aboutsummaryrefslogtreecommitdiff
path: root/linux
diff options
context:
space:
mode:
authorPasha <pasha@member.fsf.org>2024-02-20 18:49:50 +0000
committerPasha <pasha@member.fsf.org>2024-02-20 18:49:50 +0000
commit5e0b8d508ed51004bd836384293be00950ee62c9 (patch)
treee3f16b1aa8b7177032ce3ec429fbad2b1d92a876 /linux
downloadgnumach-riscv-5e0b8d508ed51004bd836384293be00950ee62c9.tar.gz
gnumach-riscv-5e0b8d508ed51004bd836384293be00950ee62c9.tar.bz2
init gnumach copy
Diffstat (limited to 'linux')
-rw-r--r--linux/Makefrag.am788
-rw-r--r--linux/configfrag.ac664
-rw-r--r--linux/dev/README8
-rw-r--r--linux/dev/arch/i386/kernel/irq.c775
-rw-r--r--linux/dev/arch/i386/kernel/setup.c13
-rw-r--r--linux/dev/drivers/block/ahci.c1038
-rw-r--r--linux/dev/drivers/block/floppy.c4288
-rw-r--r--linux/dev/drivers/block/genhd.c1080
-rw-r--r--linux/dev/drivers/net/Space.c582
-rw-r--r--linux/dev/drivers/net/auto_irq.c123
-rw-r--r--linux/dev/drivers/net/net_init.c446
-rw-r--r--linux/dev/drivers/net/wavelan.p.h639
-rw-r--r--linux/dev/drivers/scsi/eata_dma.c1607
-rw-r--r--linux/dev/drivers/scsi/g_NCR5380.c735
-rw-r--r--linux/dev/glue/block.c1770
-rw-r--r--linux/dev/glue/glue.h42
-rw-r--r--linux/dev/glue/kmem.c589
-rw-r--r--linux/dev/glue/misc.c248
-rw-r--r--linux/dev/glue/net.c670
-rw-r--r--linux/dev/include/ahci.h268
-rw-r--r--linux/dev/include/asm-i386/page.h59
-rw-r--r--linux/dev/include/asm-i386/smp.h8
-rw-r--r--linux/dev/include/asm-i386/string.h487
-rw-r--r--linux/dev/include/asm-i386/system.h356
-rw-r--r--linux/dev/include/asm-i386/uaccess.h1
-rw-r--r--linux/dev/include/linux/blk.h471
-rw-r--r--linux/dev/include/linux/blkdev.h73
-rw-r--r--linux/dev/include/linux/compile.h6
-rw-r--r--linux/dev/include/linux/etherdevice.h62
-rw-r--r--linux/dev/include/linux/fs.h803
-rw-r--r--linux/dev/include/linux/genhd.h208
-rw-r--r--linux/dev/include/linux/if.h184
-rw-r--r--linux/dev/include/linux/kernel.h107
-rw-r--r--linux/dev/include/linux/locks.h66
-rw-r--r--linux/dev/include/linux/malloc.h18
-rw-r--r--linux/dev/include/linux/mm.h378
-rw-r--r--linux/dev/include/linux/modversions.h1
-rw-r--r--linux/dev/include/linux/netdevice.h339
-rw-r--r--linux/dev/include/linux/notifier.h96
-rw-r--r--linux/dev/include/linux/pagemap.h150
-rw-r--r--linux/dev/include/linux/pm.h1
-rw-r--r--linux/dev/include/linux/proc_fs.h292
-rw-r--r--linux/dev/include/linux/sched.h521
-rw-r--r--linux/dev/include/linux/skbuff.h466
-rw-r--r--linux/dev/include/linux/threads.h1
-rw-r--r--linux/dev/include/linux/types.h117
-rw-r--r--linux/dev/init/main.c261
-rw-r--r--linux/dev/init/version.c32
-rw-r--r--linux/dev/kernel/dma.c109
-rw-r--r--linux/dev/kernel/printk.c83
-rw-r--r--linux/dev/kernel/resource.c145
-rw-r--r--linux/dev/kernel/sched.c630
-rw-r--r--linux/dev/kernel/softirq.c48
-rw-r--r--linux/dev/lib/vsprintf.c354
-rw-r--r--linux/dev/net/core/dev.c1648
-rw-r--r--linux/pcmcia-cs/clients/3c574_cs.c1349
-rw-r--r--linux/pcmcia-cs/clients/3c589_cs.c1107
-rw-r--r--linux/pcmcia-cs/clients/ax8390.h165
-rw-r--r--linux/pcmcia-cs/clients/axnet_cs.c1936
-rw-r--r--linux/pcmcia-cs/clients/fmvj18x_cs.c1322
-rw-r--r--linux/pcmcia-cs/clients/nmclan_cs.c1744
-rw-r--r--linux/pcmcia-cs/clients/ositech.h358
-rw-r--r--linux/pcmcia-cs/clients/pcnet_cs.c1702
-rw-r--r--linux/pcmcia-cs/clients/smc91c92_cs.c2135
-rw-r--r--linux/pcmcia-cs/clients/xirc2ps_cs.c2091
-rw-r--r--linux/pcmcia-cs/glue/ds.c454
-rw-r--r--linux/pcmcia-cs/glue/pcmcia.c121
-rw-r--r--linux/pcmcia-cs/glue/pcmcia_glue.h264
-rw-r--r--linux/pcmcia-cs/glue/wireless_glue.h158
-rw-r--r--linux/pcmcia-cs/include/linux/crc32.h49
-rw-r--r--linux/pcmcia-cs/include/linux/slab.h12
-rw-r--r--linux/pcmcia-cs/include/pcmcia/bulkmem.h195
-rw-r--r--linux/pcmcia-cs/include/pcmcia/bus_ops.h157
-rw-r--r--linux/pcmcia-cs/include/pcmcia/ciscode.h138
-rw-r--r--linux/pcmcia-cs/include/pcmcia/cisreg.h135
-rw-r--r--linux/pcmcia-cs/include/pcmcia/cistpl.h604
-rw-r--r--linux/pcmcia-cs/include/pcmcia/cs.h441
-rw-r--r--linux/pcmcia-cs/include/pcmcia/cs_types.h70
-rw-r--r--linux/pcmcia-cs/include/pcmcia/driver_ops.h73
-rw-r--r--linux/pcmcia-cs/include/pcmcia/ds.h148
-rw-r--r--linux/pcmcia-cs/include/pcmcia/mem_op.h133
-rw-r--r--linux/pcmcia-cs/include/pcmcia/ss.h133
-rw-r--r--linux/pcmcia-cs/include/pcmcia/version.h9
-rw-r--r--linux/pcmcia-cs/modules/bulkmem.c626
-rw-r--r--linux/pcmcia-cs/modules/cirrus.h188
-rw-r--r--linux/pcmcia-cs/modules/cistpl.c1502
-rw-r--r--linux/pcmcia-cs/modules/cs.c2399
-rw-r--r--linux/pcmcia-cs/modules/cs_internal.h300
-rw-r--r--linux/pcmcia-cs/modules/ds.c1039
-rw-r--r--linux/pcmcia-cs/modules/ene.h59
-rw-r--r--linux/pcmcia-cs/modules/i82365.c2588
-rw-r--r--linux/pcmcia-cs/modules/i82365.h154
-rw-r--r--linux/pcmcia-cs/modules/o2micro.h160
-rw-r--r--linux/pcmcia-cs/modules/pci_fixup.c677
-rw-r--r--linux/pcmcia-cs/modules/ricoh.h161
-rw-r--r--linux/pcmcia-cs/modules/rsrc_mgr.c877
-rw-r--r--linux/pcmcia-cs/modules/smc34c90.h58
-rw-r--r--linux/pcmcia-cs/modules/ti113x.h264
-rw-r--r--linux/pcmcia-cs/modules/topic.h123
-rw-r--r--linux/pcmcia-cs/modules/vg468.h112
-rw-r--r--linux/pcmcia-cs/modules/yenta.h156
-rw-r--r--linux/pcmcia-cs/wireless/hermes.c552
-rw-r--r--linux/pcmcia-cs/wireless/hermes.h456
-rw-r--r--linux/pcmcia-cs/wireless/hermes_rid.h153
-rw-r--r--linux/pcmcia-cs/wireless/ieee802_11.h79
-rw-r--r--linux/pcmcia-cs/wireless/orinoco.c4230
-rw-r--r--linux/pcmcia-cs/wireless/orinoco.h166
-rw-r--r--linux/pcmcia-cs/wireless/orinoco_cs.c705
-rw-r--r--linux/src/COPYING351
-rw-r--r--linux/src/arch/i386/kernel/bios32.c916
-rw-r--r--linux/src/arch/i386/kernel/irq.c582
-rw-r--r--linux/src/arch/i386/lib/delay.c45
-rw-r--r--linux/src/arch/i386/lib/semaphore.S35
-rw-r--r--linux/src/drivers/block/cmd640.c850
-rw-r--r--linux/src/drivers/block/floppy.c4284
-rw-r--r--linux/src/drivers/block/genhd.c761
-rw-r--r--linux/src/drivers/block/ide-cd.c2802
-rw-r--r--linux/src/drivers/block/ide.c3926
-rw-r--r--linux/src/drivers/block/ide.h750
-rw-r--r--linux/src/drivers/block/ide_modes.h226
-rw-r--r--linux/src/drivers/block/rz1000.c59
-rw-r--r--linux/src/drivers/block/triton.c996
-rw-r--r--linux/src/drivers/net/3c501.c856
-rw-r--r--linux/src/drivers/net/3c503.c690
-rw-r--r--linux/src/drivers/net/3c503.h91
-rw-r--r--linux/src/drivers/net/3c505.c1732
-rw-r--r--linux/src/drivers/net/3c505.h245
-rw-r--r--linux/src/drivers/net/3c507.c924
-rw-r--r--linux/src/drivers/net/3c509.c842
-rw-r--r--linux/src/drivers/net/3c515.c1501
-rw-r--r--linux/src/drivers/net/3c59x.c2648
-rw-r--r--linux/src/drivers/net/8390.c829
-rw-r--r--linux/src/drivers/net/8390.h175
-rw-r--r--linux/src/drivers/net/Space.c541
-rw-r--r--linux/src/drivers/net/ac3200.c385
-rw-r--r--linux/src/drivers/net/apricot.c1046
-rw-r--r--linux/src/drivers/net/at1700.c756
-rw-r--r--linux/src/drivers/net/atp.c977
-rw-r--r--linux/src/drivers/net/atp.h274
-rw-r--r--linux/src/drivers/net/auto_irq.c123
-rw-r--r--linux/src/drivers/net/cb_shim.c296
-rw-r--r--linux/src/drivers/net/de4x5.c5942
-rw-r--r--linux/src/drivers/net/de4x5.h1028
-rw-r--r--linux/src/drivers/net/de600.c853
-rw-r--r--linux/src/drivers/net/de620.c1045
-rw-r--r--linux/src/drivers/net/de620.h117
-rw-r--r--linux/src/drivers/net/depca.c1890
-rw-r--r--linux/src/drivers/net/depca.h185
-rw-r--r--linux/src/drivers/net/e2100.c456
-rw-r--r--linux/src/drivers/net/eepro.c1407
-rw-r--r--linux/src/drivers/net/eepro100.c2155
-rw-r--r--linux/src/drivers/net/eexpress.c1285
-rw-r--r--linux/src/drivers/net/epic100.c1560
-rw-r--r--linux/src/drivers/net/eth16i.c1604
-rw-r--r--linux/src/drivers/net/eth82586.h172
-rw-r--r--linux/src/drivers/net/ewrk3.c1920
-rw-r--r--linux/src/drivers/net/ewrk3.h322
-rw-r--r--linux/src/drivers/net/fmv18x.c664
-rw-r--r--linux/src/drivers/net/hamachi.c1315
-rw-r--r--linux/src/drivers/net/hp-plus.c483
-rw-r--r--linux/src/drivers/net/hp.c451
-rw-r--r--linux/src/drivers/net/hp100.c3121
-rw-r--r--linux/src/drivers/net/hp100.h626
-rw-r--r--linux/src/drivers/net/i82586.h413
-rw-r--r--linux/src/drivers/net/intel-gige.c1450
-rw-r--r--linux/src/drivers/net/iow.h6
-rw-r--r--linux/src/drivers/net/kern_compat.h285
-rw-r--r--linux/src/drivers/net/lance.c1293
-rw-r--r--linux/src/drivers/net/myson803.c1650
-rw-r--r--linux/src/drivers/net/natsemi.c1448
-rw-r--r--linux/src/drivers/net/ne.c812
-rw-r--r--linux/src/drivers/net/ne2k-pci.c647
-rw-r--r--linux/src/drivers/net/net_init.c439
-rw-r--r--linux/src/drivers/net/ni52.c1387
-rw-r--r--linux/src/drivers/net/ni52.h310
-rw-r--r--linux/src/drivers/net/ni65.c1228
-rw-r--r--linux/src/drivers/net/ni65.h130
-rw-r--r--linux/src/drivers/net/ns820.c1547
-rw-r--r--linux/src/drivers/net/pci-scan.c659
-rw-r--r--linux/src/drivers/net/pci-scan.h90
-rw-r--r--linux/src/drivers/net/pcnet32.c970
-rw-r--r--linux/src/drivers/net/rtl8139.c1737
-rw-r--r--linux/src/drivers/net/seeq8005.c760
-rw-r--r--linux/src/drivers/net/seeq8005.h156
-rw-r--r--linux/src/drivers/net/sis900.c1803
-rw-r--r--linux/src/drivers/net/sis900.h284
-rw-r--r--linux/src/drivers/net/sk_g16.c2110
-rw-r--r--linux/src/drivers/net/sk_g16.h164
-rw-r--r--linux/src/drivers/net/smc-ultra.c496
-rw-r--r--linux/src/drivers/net/smc-ultra32.c413
-rw-r--r--linux/src/drivers/net/smc9194.c1779
-rw-r--r--linux/src/drivers/net/smc9194.h240
-rw-r--r--linux/src/drivers/net/starfire.c1535
-rw-r--r--linux/src/drivers/net/sundance.c1556
-rw-r--r--linux/src/drivers/net/tlan.c2863
-rw-r--r--linux/src/drivers/net/tlan.h525
-rw-r--r--linux/src/drivers/net/tulip.c3685
-rw-r--r--linux/src/drivers/net/via-rhine.c1427
-rw-r--r--linux/src/drivers/net/wavelan.c4373
-rw-r--r--linux/src/drivers/net/wavelan.h346
-rw-r--r--linux/src/drivers/net/wavelan.p.h635
-rw-r--r--linux/src/drivers/net/wd.c513
-rw-r--r--linux/src/drivers/net/winbond-840.c1558
-rw-r--r--linux/src/drivers/net/yellowfin.c1482
-rw-r--r--linux/src/drivers/net/znet.c746
-rw-r--r--linux/src/drivers/pci/pci.c1322
-rw-r--r--linux/src/drivers/scsi/53c7,8xx.h1584
-rw-r--r--linux/src/drivers/scsi/53c78xx.c6401
-rw-r--r--linux/src/drivers/scsi/53c8xx_d.h2677
-rw-r--r--linux/src/drivers/scsi/53c8xx_u.h97
-rw-r--r--linux/src/drivers/scsi/AM53C974.c2270
-rw-r--r--linux/src/drivers/scsi/AM53C974.h409
-rw-r--r--linux/src/drivers/scsi/BusLogic.c5003
-rw-r--r--linux/src/drivers/scsi/BusLogic.h1775
-rw-r--r--linux/src/drivers/scsi/FlashPoint.c12156
-rw-r--r--linux/src/drivers/scsi/NCR5380.c3246
-rw-r--r--linux/src/drivers/scsi/NCR5380.h369
-rw-r--r--linux/src/drivers/scsi/NCR53c406a.c1079
-rw-r--r--linux/src/drivers/scsi/NCR53c406a.h83
-rw-r--r--linux/src/drivers/scsi/advansys.c15554
-rw-r--r--linux/src/drivers/scsi/advansys.h174
-rw-r--r--linux/src/drivers/scsi/aha152x.c3280
-rw-r--r--linux/src/drivers/scsi/aha152x.h357
-rw-r--r--linux/src/drivers/scsi/aha1542.c1325
-rw-r--r--linux/src/drivers/scsi/aha1542.h170
-rw-r--r--linux/src/drivers/scsi/aha1740.c614
-rw-r--r--linux/src/drivers/scsi/aha1740.h196
-rw-r--r--linux/src/drivers/scsi/aic7xxx.c11404
-rw-r--r--linux/src/drivers/scsi/aic7xxx.h114
-rw-r--r--linux/src/drivers/scsi/aic7xxx/scsi_message.h41
-rw-r--r--linux/src/drivers/scsi/aic7xxx/sequencer.h135
-rw-r--r--linux/src/drivers/scsi/aic7xxx_proc.c384
-rw-r--r--linux/src/drivers/scsi/aic7xxx_reg.h587
-rw-r--r--linux/src/drivers/scsi/aic7xxx_seq.c769
-rw-r--r--linux/src/drivers/scsi/constants.c683
-rw-r--r--linux/src/drivers/scsi/constants.h6
-rw-r--r--linux/src/drivers/scsi/dc390.h147
-rw-r--r--linux/src/drivers/scsi/dtc.c400
-rw-r--r--linux/src/drivers/scsi/dtc.h169
-rw-r--r--linux/src/drivers/scsi/eata.c2331
-rw-r--r--linux/src/drivers/scsi/eata.h60
-rw-r--r--linux/src/drivers/scsi/eata_dma.c1603
-rw-r--r--linux/src/drivers/scsi/eata_dma.h128
-rw-r--r--linux/src/drivers/scsi/eata_dma_proc.c493
-rw-r--r--linux/src/drivers/scsi/eata_dma_proc.h260
-rw-r--r--linux/src/drivers/scsi/eata_generic.h414
-rw-r--r--linux/src/drivers/scsi/eata_pio.c1042
-rw-r--r--linux/src/drivers/scsi/eata_pio.h116
-rw-r--r--linux/src/drivers/scsi/eata_pio_proc.c135
-rw-r--r--linux/src/drivers/scsi/fdomain.c2082
-rw-r--r--linux/src/drivers/scsi/fdomain.h61
-rw-r--r--linux/src/drivers/scsi/g_NCR5380.c729
-rw-r--r--linux/src/drivers/scsi/g_NCR5380.h162
-rw-r--r--linux/src/drivers/scsi/gdth.c3598
-rw-r--r--linux/src/drivers/scsi/gdth.h819
-rw-r--r--linux/src/drivers/scsi/gdth_ioctl.h86
-rw-r--r--linux/src/drivers/scsi/gdth_proc.c656
-rw-r--r--linux/src/drivers/scsi/gdth_proc.h24
-rw-r--r--linux/src/drivers/scsi/hosts.c554
-rw-r--r--linux/src/drivers/scsi/hosts.h405
-rw-r--r--linux/src/drivers/scsi/in2000.c2379
-rw-r--r--linux/src/drivers/scsi/in2000.h465
-rw-r--r--linux/src/drivers/scsi/ncr53c8xx.c10795
-rw-r--r--linux/src/drivers/scsi/ncr53c8xx.h1220
-rw-r--r--linux/src/drivers/scsi/pas16.c576
-rw-r--r--linux/src/drivers/scsi/pas16.h196
-rw-r--r--linux/src/drivers/scsi/ppa.c1464
-rw-r--r--linux/src/drivers/scsi/ppa.h176
-rw-r--r--linux/src/drivers/scsi/qlogicfas.c679
-rw-r--r--linux/src/drivers/scsi/qlogicfas.h43
-rw-r--r--linux/src/drivers/scsi/qlogicisp.c1767
-rw-r--r--linux/src/drivers/scsi/qlogicisp.h98
-rw-r--r--linux/src/drivers/scsi/scripts.h1357
-rw-r--r--linux/src/drivers/scsi/scsi.c3585
-rw-r--r--linux/src/drivers/scsi/scsi.h650
-rw-r--r--linux/src/drivers/scsi/scsi_ioctl.c452
-rw-r--r--linux/src/drivers/scsi/scsi_proc.c302
-rw-r--r--linux/src/drivers/scsi/scsicam.c229
-rw-r--r--linux/src/drivers/scsi/scsiio.c1537
-rw-r--r--linux/src/drivers/scsi/scsiiom.c1540
-rw-r--r--linux/src/drivers/scsi/sd.c1691
-rw-r--r--linux/src/drivers/scsi/sd.h65
-rw-r--r--linux/src/drivers/scsi/sd_ioctl.c128
-rw-r--r--linux/src/drivers/scsi/seagate.c1679
-rw-r--r--linux/src/drivers/scsi/seagate.h139
-rw-r--r--linux/src/drivers/scsi/sr.c1290
-rw-r--r--linux/src/drivers/scsi/sr.h40
-rw-r--r--linux/src/drivers/scsi/sr_ioctl.c607
-rw-r--r--linux/src/drivers/scsi/sym53c8xx.c14696
-rw-r--r--linux/src/drivers/scsi/sym53c8xx.h116
-rw-r--r--linux/src/drivers/scsi/sym53c8xx_comm.h2717
-rw-r--r--linux/src/drivers/scsi/sym53c8xx_defs.h1767
-rw-r--r--linux/src/drivers/scsi/t128.c400
-rw-r--r--linux/src/drivers/scsi/t128.h169
-rw-r--r--linux/src/drivers/scsi/tmscsim.c1930
-rw-r--r--linux/src/drivers/scsi/tmscsim.h680
-rw-r--r--linux/src/drivers/scsi/u14-34f.c1996
-rw-r--r--linux/src/drivers/scsi/u14-34f.h60
-rw-r--r--linux/src/drivers/scsi/ultrastor.c1165
-rw-r--r--linux/src/drivers/scsi/ultrastor.h102
-rw-r--r--linux/src/drivers/scsi/wd7000.c1452
-rw-r--r--linux/src/drivers/scsi/wd7000.h446
-rw-r--r--linux/src/include/asm-i386/atomic.h69
-rw-r--r--linux/src/include/asm-i386/bitops.h201
-rw-r--r--linux/src/include/asm-i386/byteorder.h90
-rw-r--r--linux/src/include/asm-i386/cache.h18
-rw-r--r--linux/src/include/asm-i386/checksum.h121
-rw-r--r--linux/src/include/asm-i386/delay.h18
-rw-r--r--linux/src/include/asm-i386/dma.h271
-rw-r--r--linux/src/include/asm-i386/errno.h132
-rw-r--r--linux/src/include/asm-i386/fcntl.h59
-rw-r--r--linux/src/include/asm-i386/floppy.h289
-rw-r--r--linux/src/include/asm-i386/hardirq.h66
-rw-r--r--linux/src/include/asm-i386/io.h216
-rw-r--r--linux/src/include/asm-i386/ioctl.h75
-rw-r--r--linux/src/include/asm-i386/ioctls.h74
-rw-r--r--linux/src/include/asm-i386/irq.h421
-rw-r--r--linux/src/include/asm-i386/math_emu.h57
-rw-r--r--linux/src/include/asm-i386/page.h62
-rw-r--r--linux/src/include/asm-i386/param.h20
-rw-r--r--linux/src/include/asm-i386/posix_types.h63
-rw-r--r--linux/src/include/asm-i386/processor.h204
-rw-r--r--linux/src/include/asm-i386/ptrace.h60
-rw-r--r--linux/src/include/asm-i386/resource.h39
-rw-r--r--linux/src/include/asm-i386/segment.h380
-rw-r--r--linux/src/include/asm-i386/semaphore.h133
-rw-r--r--linux/src/include/asm-i386/sigcontext.h54
-rw-r--r--linux/src/include/asm-i386/signal.h97
-rw-r--r--linux/src/include/asm-i386/socket.h27
-rw-r--r--linux/src/include/asm-i386/sockios.h12
-rw-r--r--linux/src/include/asm-i386/spinlock.h262
-rw-r--r--linux/src/include/asm-i386/stat.h41
-rw-r--r--linux/src/include/asm-i386/statfs.h25
-rw-r--r--linux/src/include/asm-i386/string.h487
-rw-r--r--linux/src/include/asm-i386/system.h334
-rw-r--r--linux/src/include/asm-i386/termbits.h160
-rw-r--r--linux/src/include/asm-i386/termios.h92
-rw-r--r--linux/src/include/asm-i386/types.h46
-rw-r--r--linux/src/include/asm-i386/unaligned.h16
-rw-r--r--linux/src/include/asm-i386/unistd.h328
-rw-r--r--linux/src/include/asm-i386/vm86.h175
-rw-r--r--linux/src/include/linux/affs_hardblocks.h66
-rw-r--r--linux/src/include/linux/atalk.h157
-rw-r--r--linux/src/include/linux/ax25.h96
-rw-r--r--linux/src/include/linux/binfmts.h65
-rw-r--r--linux/src/include/linux/bios32.h61
-rw-r--r--linux/src/include/linux/blk.h454
-rw-r--r--linux/src/include/linux/blkdev.h66
-rw-r--r--linux/src/include/linux/cdrom.h453
-rw-r--r--linux/src/include/linux/compatmac.h153
-rw-r--r--linux/src/include/linux/compiler-gcc.h112
-rw-r--r--linux/src/include/linux/compiler-gcc3.h23
-rw-r--r--linux/src/include/linux/compiler-gcc4.h57
-rw-r--r--linux/src/include/linux/compiler-gcc5.h67
-rw-r--r--linux/src/include/linux/compiler.h315
-rw-r--r--linux/src/include/linux/config.h43
-rw-r--r--linux/src/include/linux/ctype.h64
-rw-r--r--linux/src/include/linux/delay.h14
-rw-r--r--linux/src/include/linux/errno.h16
-rw-r--r--linux/src/include/linux/etherdevice.h46
-rw-r--r--linux/src/include/linux/fcntl.h6
-rw-r--r--linux/src/include/linux/fd.h377
-rw-r--r--linux/src/include/linux/fddidevice.h42
-rw-r--r--linux/src/include/linux/fdreg.h143
-rw-r--r--linux/src/include/linux/fs.h728
-rw-r--r--linux/src/include/linux/genhd.h136
-rw-r--r--linux/src/include/linux/hdreg.h240
-rw-r--r--linux/src/include/linux/head.h20
-rw-r--r--linux/src/include/linux/icmp.h85
-rw-r--r--linux/src/include/linux/if.h155
-rw-r--r--linux/src/include/linux/if_arp.h130
-rw-r--r--linux/src/include/linux/if_ether.h119
-rw-r--r--linux/src/include/linux/if_fddi.h202
-rw-r--r--linux/src/include/linux/if_tr.h102
-rw-r--r--linux/src/include/linux/igmp.h119
-rw-r--r--linux/src/include/linux/in.h149
-rw-r--r--linux/src/include/linux/inet.h52
-rw-r--r--linux/src/include/linux/init.h30
-rw-r--r--linux/src/include/linux/interrupt.h120
-rw-r--r--linux/src/include/linux/ioctl.h7
-rw-r--r--linux/src/include/linux/ioport.h31
-rw-r--r--linux/src/include/linux/ip.h112
-rw-r--r--linux/src/include/linux/ipc.h67
-rw-r--r--linux/src/include/linux/ipx.h80
-rw-r--r--linux/src/include/linux/kcomp.h52
-rw-r--r--linux/src/include/linux/kdev_t.h114
-rw-r--r--linux/src/include/linux/kernel.h97
-rw-r--r--linux/src/include/linux/kernel_stat.h32
-rw-r--r--linux/src/include/linux/limits.h17
-rw-r--r--linux/src/include/linux/linkage.h59
-rw-r--r--linux/src/include/linux/list.h112
-rw-r--r--linux/src/include/linux/locks.h65
-rw-r--r--linux/src/include/linux/major.h88
-rw-r--r--linux/src/include/linux/malloc.h11
-rw-r--r--linux/src/include/linux/mc146818rtc.h149
-rw-r--r--linux/src/include/linux/md.h275
-rw-r--r--linux/src/include/linux/mm.h375
-rw-r--r--linux/src/include/linux/module.h116
-rw-r--r--linux/src/include/linux/mount.h30
-rw-r--r--linux/src/include/linux/net.h130
-rw-r--r--linux/src/include/linux/netdevice.h313
-rw-r--r--linux/src/include/linux/netrom.h34
-rw-r--r--linux/src/include/linux/notifier.h96
-rw-r--r--linux/src/include/linux/pagemap.h146
-rw-r--r--linux/src/include/linux/param.h6
-rw-r--r--linux/src/include/linux/pci.h1116
-rw-r--r--linux/src/include/linux/personality.h55
-rw-r--r--linux/src/include/linux/posix_types.h50
-rw-r--r--linux/src/include/linux/proc_fs.h292
-rw-r--r--linux/src/include/linux/ptrace.h26
-rw-r--r--linux/src/include/linux/quota.h221
-rw-r--r--linux/src/include/linux/random.h70
-rw-r--r--linux/src/include/linux/resource.h60
-rw-r--r--linux/src/include/linux/rose.h88
-rw-r--r--linux/src/include/linux/route.h79
-rw-r--r--linux/src/include/linux/sched.h496
-rw-r--r--linux/src/include/linux/sem.h112
-rw-r--r--linux/src/include/linux/signal.h6
-rw-r--r--linux/src/include/linux/skbuff.h467
-rw-r--r--linux/src/include/linux/smp.h54
-rw-r--r--linux/src/include/linux/socket.h147
-rw-r--r--linux/src/include/linux/sockios.h98
-rw-r--r--linux/src/include/linux/spinlock.h4
-rw-r--r--linux/src/include/linux/stat.h53
-rw-r--r--linux/src/include/linux/stddef.h15
-rw-r--r--linux/src/include/linux/string.h53
-rw-r--r--linux/src/include/linux/symtab_begin.h45
-rw-r--r--linux/src/include/linux/symtab_end.h15
-rw-r--r--linux/src/include/linux/tasks.h17
-rw-r--r--linux/src/include/linux/tcp.h71
-rw-r--r--linux/src/include/linux/termios.h7
-rw-r--r--linux/src/include/linux/time.h53
-rw-r--r--linux/src/include/linux/timer.h100
-rw-r--r--linux/src/include/linux/tqueue.h143
-rw-r--r--linux/src/include/linux/trdevice.h40
-rw-r--r--linux/src/include/linux/tty.h351
-rw-r--r--linux/src/include/linux/tty_driver.h189
-rw-r--r--linux/src/include/linux/tty_ldisc.h46
-rw-r--r--linux/src/include/linux/types.h96
-rw-r--r--linux/src/include/linux/ucdrom.h96
-rw-r--r--linux/src/include/linux/udp.h29
-rw-r--r--linux/src/include/linux/uio.h26
-rw-r--r--linux/src/include/linux/unistd.h11
-rw-r--r--linux/src/include/linux/utsname.h35
-rw-r--r--linux/src/include/linux/version.h2
-rw-r--r--linux/src/include/linux/vfs.h6
-rw-r--r--linux/src/include/linux/wait.h53
-rw-r--r--linux/src/include/linux/wireless.h479
-rw-r--r--linux/src/include/net/af_unix.h14
-rw-r--r--linux/src/include/net/arp.h17
-rw-r--r--linux/src/include/net/atalkcall.h2
-rw-r--r--linux/src/include/net/ax25.h292
-rw-r--r--linux/src/include/net/ax25call.h2
-rw-r--r--linux/src/include/net/br.h270
-rw-r--r--linux/src/include/net/checksum.h25
-rw-r--r--linux/src/include/net/datalink.h16
-rw-r--r--linux/src/include/net/gc.h46
-rw-r--r--linux/src/include/net/icmp.h43
-rw-r--r--linux/src/include/net/ip.h159
-rw-r--r--linux/src/include/net/ip_alias.h23
-rw-r--r--linux/src/include/net/ip_forward.h11
-rw-r--r--linux/src/include/net/ip_masq.h205
-rw-r--r--linux/src/include/net/ipip.h4
-rw-r--r--linux/src/include/net/ipx.h88
-rw-r--r--linux/src/include/net/ipxcall.h2
-rw-r--r--linux/src/include/net/netlink.h32
-rw-r--r--linux/src/include/net/netrom.h166
-rw-r--r--linux/src/include/net/nrcall.h2
-rw-r--r--linux/src/include/net/p8022.h7
-rw-r--r--linux/src/include/net/p8022call.h2
-rw-r--r--linux/src/include/net/p8022tr.h8
-rw-r--r--linux/src/include/net/p8022trcall.h3
-rw-r--r--linux/src/include/net/protocol.h55
-rw-r--r--linux/src/include/net/psnap.h7
-rw-r--r--linux/src/include/net/psnapcall.h2
-rw-r--r--linux/src/include/net/rarp.h12
-rw-r--r--linux/src/include/net/raw.h44
-rw-r--r--linux/src/include/net/rose.h233
-rw-r--r--linux/src/include/net/rosecall.h2
-rw-r--r--linux/src/include/net/route.h189
-rw-r--r--linux/src/include/net/slhc.h6
-rw-r--r--linux/src/include/net/slhc_vj.h187
-rw-r--r--linux/src/include/net/snmp.h107
-rw-r--r--linux/src/include/net/sock.h613
-rw-r--r--linux/src/include/net/spx.h38
-rw-r--r--linux/src/include/net/tcp.h374
-rw-r--r--linux/src/include/net/udp.h63
-rw-r--r--linux/src/include/scsi/scsi.h205
-rw-r--r--linux/src/include/scsi/scsi_ioctl.h28
-rw-r--r--linux/src/include/scsi/scsicam.h17
-rw-r--r--linux/src/init/main.c1135
-rw-r--r--linux/src/init/version.c30
-rw-r--r--linux/src/kernel/dma.c99
-rw-r--r--linux/src/kernel/printk.c253
-rw-r--r--linux/src/kernel/resource.c129
-rw-r--r--linux/src/kernel/sched.c1747
-rw-r--r--linux/src/kernel/softirq.c54
-rw-r--r--linux/src/lib/ctype.c36
-rw-r--r--linux/src/lib/vsprintf.c306
-rw-r--r--linux/src/net/core/dev.c1629
500 files changed, 347291 insertions, 0 deletions
diff --git a/linux/Makefrag.am b/linux/Makefrag.am
new file mode 100644
index 0000000..2338452
--- /dev/null
+++ b/linux/Makefrag.am
@@ -0,0 +1,788 @@
+# Makefile fragment for Linux device drivers and the glue code.
+
+# Copyright (C) 2006, 2007, 2011 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+
+#
+# Files for device driver support.
+#
+
+if CODE_linux
+noinst_LIBRARIES += \
+ liblinux.a
+gnumach_o_LDADD += \
+ liblinux.a
+endif
+
+liblinux_a_CPPFLAGS = $(AM_CPPFLAGS) \
+ -I$(srcdir)/$(systype)/linux/dev/include \
+ -I$(top_builddir)/linux/dev/include \
+ -I$(srcdir)/linux/dev/include \
+ -I$(top_builddir)/linux/src/include \
+ -I$(srcdir)/linux/src/include
+# Because of the use of `extern inline' in some Linux header files without
+# corresponding text segment definitions, we must always optimize.
+liblinux_a_CFLAGS = -O2 $(AM_CFLAGS)
+
+# Disable warnings that are applied to the core Mach code.
+liblinux_a_CFLAGS += -Wno-missing-prototypes -Wno-strict-prototypes \
+ -Wno-old-style-definition
+
+# See <http://lists.gnu.org/archive/html/bug-hurd/2006-01/msg00148.html>.
+liblinux_a_CFLAGS += \
+ -fno-strict-aliasing
+
+# TODO. Do we really need `-traditional'?
+liblinux_a_CCASFLAGS = $(AM_CCASFLAGS) \
+ -traditional \
+ $(liblinux_a_CPPFLAGS)
+
+liblinux_a_SOURCES = \
+ linux/dev/init/version.c \
+ linux/dev/kernel/softirq.c \
+ linux/src/arch/i386/lib/delay.c \
+ linux/dev/kernel/dma.c \
+ linux/dev/kernel/resource.c \
+ linux/dev/kernel/printk.c \
+ linux/src/arch/i386/kernel/bios32.c \
+ linux/dev/arch/i386/kernel/irq.c \
+ linux/src/lib/ctype.c \
+ linux/dev/lib/vsprintf.c \
+ linux/dev/init/main.c \
+ linux/dev/glue/misc.c \
+ linux/dev/kernel/sched.c \
+ linux/dev/glue/kmem.c \
+ linux/dev/glue/block.c \
+ linux/dev/glue/glue.h \
+ linux/dev/arch/i386/kernel/setup.c
+
+liblinux_a_SOURCES += \
+ linux/src/drivers/pci/pci.c \
+ linux/dev/drivers/block/genhd.c
+
+#
+# Linux device drivers.
+#
+if device_driver_floppy
+liblinux_a_SOURCES += \
+ linux/dev/drivers/block/floppy.c
+endif
+
+if device_driver_ide
+liblinux_a_SOURCES += \
+ linux/src/drivers/block/cmd640.c \
+ linux/src/drivers/block/ide-cd.c \
+ linux/src/drivers/block/ide.c \
+ linux/src/drivers/block/ide.h \
+ linux/dev/drivers/block/ahci.c \
+ linux/dev/include/ahci.h \
+ linux/src/drivers/block/ide_modes.h \
+ linux/src/drivers/block/rz1000.c \
+ linux/src/drivers/block/triton.c
+endif
+
+if device_driver_group_scsi
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/constants.c \
+ linux/src/drivers/scsi/constants.h \
+ linux/src/drivers/scsi/hosts.c \
+ linux/src/drivers/scsi/hosts.h \
+ linux/src/drivers/scsi/scsi.c \
+ linux/src/drivers/scsi/scsi.h \
+ linux/src/drivers/scsi/scsi_ioctl.c \
+ linux/src/drivers/scsi/scsi_proc.c \
+ linux/src/drivers/scsi/scsicam.c \
+ linux/src/drivers/scsi/sd.c \
+ linux/src/drivers/scsi/sd.h \
+ linux/src/drivers/scsi/sd_ioctl.c \
+ linux/src/drivers/scsi/sr.c \
+ linux/src/drivers/scsi/sr.h \
+ linux/src/drivers/scsi/sr_ioctl.c
+endif
+
+if device_driver_53c78xx
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/53c7,8xx.h \
+ linux/src/drivers/scsi/53c78xx.c \
+ linux/src/drivers/scsi/53c8xx_d.h \
+ linux/src/drivers/scsi/53c8xx_u.h
+endif
+
+if device_driver_AM53C974
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/AM53C974.c \
+ linux/src/drivers/scsi/AM53C974.h
+endif
+
+if device_driver_BusLogic
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/BusLogic.c \
+ linux/src/drivers/scsi/BusLogic.h
+EXTRA_DIST += \
+ linux/src/drivers/scsi/FlashPoint.c
+endif
+
+if device_driver_NCR53c406a
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/NCR53c406a.c \
+ linux/src/drivers/scsi/NCR53c406a.h
+endif
+
+if device_driver_advansys
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/advansys.c \
+ linux/src/drivers/scsi/advansys.h
+endif
+
+if device_driver_aha152x
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/aha152x.c \
+ linux/src/drivers/scsi/aha152x.h
+endif
+
+if device_driver_aha1542
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/aha1542.c \
+ linux/src/drivers/scsi/aha1542.h
+endif
+
+if device_driver_aha1740
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/aha1740.c \
+ linux/src/drivers/scsi/aha1740.h
+endif
+
+if device_driver_aic7xxx
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/aic7xxx.c \
+ linux/src/drivers/scsi/aic7xxx.h \
+ linux/src/drivers/scsi/aic7xxx/scsi_message.h \
+ linux/src/drivers/scsi/aic7xxx/sequencer.h \
+ linux/src/drivers/scsi/aic7xxx_reg.h
+EXTRA_DIST += \
+ linux/src/drivers/scsi/aic7xxx_proc.c \
+ linux/src/drivers/scsi/aic7xxx_seq.c
+endif
+
+if device_driver_dtc
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/dtc.c \
+ linux/src/drivers/scsi/dtc.h
+endif
+
+if device_driver_eata
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/eata.c \
+ linux/src/drivers/scsi/eata.h \
+ linux/src/drivers/scsi/eata_generic.h
+endif
+
+if device_driver_eata_dma
+liblinux_a_SOURCES += \
+ linux/dev/drivers/scsi/eata_dma.c \
+ linux/src/drivers/scsi/eata_dma.h \
+ linux/src/drivers/scsi/eata_dma_proc.h
+EXTRA_DIST += \
+ linux/src/drivers/scsi/eata_dma_proc.c
+endif
+
+if device_driver_eata_pio
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/eata_pio.c \
+ linux/src/drivers/scsi/eata_pio.h
+EXTRA_DIST += \
+ linux/src/drivers/scsi/eata_pio_proc.c
+endif
+
+if device_driver_fdomain
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/fdomain.c \
+ linux/src/drivers/scsi/fdomain.h
+endif
+
+if device_driver_g_NCR5380
+liblinux_a_SOURCES += \
+ linux/dev/drivers/scsi/g_NCR5380.c \
+ linux/src/drivers/scsi/g_NCR5380.h
+endif
+
+if device_driver_gdth
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/gdth.c \
+ linux/src/drivers/scsi/gdth.h \
+ linux/src/drivers/scsi/gdth_ioctl.h \
+ linux/src/drivers/scsi/gdth_proc.h
+EXTRA_DIST += \
+ linux/src/drivers/scsi/gdth_proc.c
+endif
+
+if device_driver_in2000
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/in2000.c \
+ linux/src/drivers/scsi/in2000.h
+endif
+
+if device_driver_ncr53c8xx
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/ncr53c8xx.c \
+ linux/src/drivers/scsi/ncr53c8xx.h
+endif
+
+if device_driver_pas16
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/pas16.c \
+ linux/src/drivers/scsi/pas16.h
+endif
+
+if device_driver_ppa
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/ppa.c \
+ linux/src/drivers/scsi/ppa.h
+endif
+
+if device_driver_qlogicfas
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/qlogicfas.c \
+ linux/src/drivers/scsi/qlogicfas.h
+endif
+
+if device_driver_qlogicisp
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/qlogicisp.c \
+ linux/src/drivers/scsi/qlogicisp.h
+endif
+
+if device_driver_seagate
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/seagate.c \
+ linux/src/drivers/scsi/seagate.h
+endif
+
+if device_driver_sym53c8xx
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/sym53c8xx.c \
+ linux/src/drivers/scsi/sym53c8xx_comm.h \
+ linux/src/drivers/scsi/sym53c8xx.h \
+ linux/src/drivers/scsi/sym53c8xx_defs.h
+endif
+
+if device_driver_t128
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/t128.c \
+ linux/src/drivers/scsi/t128.h
+endif
+
+if device_driver_tmscsim
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/dc390.h \
+ linux/src/drivers/scsi/tmscsim.c \
+ linux/src/drivers/scsi/tmscsim.h
+EXTRA_DIST += \
+ linux/src/drivers/scsi/scsiiom.c
+endif
+
+if device_driver_u14_34f
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/u14-34f.c \
+ linux/src/drivers/scsi/u14-34f.h
+endif
+
+if device_driver_ultrastor
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/ultrastor.c \
+ linux/src/drivers/scsi/ultrastor.h
+endif
+
+if device_driver_wd7000
+liblinux_a_SOURCES += \
+ linux/src/drivers/scsi/wd7000.c \
+ linux/src/drivers/scsi/wd7000.h
+endif
+
+EXTRA_DIST += \
+ linux/src/drivers/scsi/NCR5380.c \
+ linux/src/drivers/scsi/NCR5380.h
+
+if device_driver_group_net
+liblinux_a_SOURCES += \
+ linux/dev/drivers/net/auto_irq.c \
+ linux/dev/glue/net.c \
+ linux/dev/drivers/net/Space.c \
+ linux/dev/net/core/dev.c \
+ linux/dev/drivers/net/net_init.c \
+ linux/src/drivers/net/pci-scan.c \
+ linux/src/drivers/net/pci-scan.h
+endif
+
+if device_driver_3c501
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/3c501.c
+endif
+
+if device_driver_3c503
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/3c503.c \
+ linux/src/drivers/net/3c503.h \
+ linux/src/drivers/net/8390.c
+endif
+
+if device_driver_3c505
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/3c505.c \
+ linux/src/drivers/net/3c505.h
+endif
+
+if device_driver_3c507
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/3c507.c
+endif
+
+if device_driver_3c509
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/3c509.c
+endif
+
+if device_driver_3c59x
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/3c59x.c
+endif
+
+if device_driver_3c515
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/3c515.c
+endif
+
+if device_driver_ac3200
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/ac3200.c \
+ linux/src/drivers/net/8390.c
+endif
+
+if device_driver_apricot
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/apricot.c
+endif
+
+if device_driver_at1700
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/at1700.c
+endif
+
+if device_driver_atp
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/atp.c \
+ linux/src/drivers/net/atp.h
+endif
+
+#if device_driver_cb_shim
+#liblinux_a_SOURCES += \
+# linux/src/drivers/net/cb_shim.c
+#endif
+
+if device_driver_de4x5
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/de4x5.c \
+ linux/src/drivers/net/de4x5.h
+endif
+
+if device_driver_de600
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/de600.c
+endif
+
+if device_driver_de620
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/de620.c \
+ linux/src/drivers/net/de620.h
+endif
+
+if device_driver_depca
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/depca.c \
+ linux/src/drivers/net/depca.h
+endif
+
+if device_driver_e2100
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/e2100.c \
+ linux/src/drivers/net/8390.c
+endif
+
+if device_driver_eepro
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/eepro.c
+endif
+
+if device_driver_eepro100
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/eepro100.c
+endif
+
+if device_driver_eexpress
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/eexpress.c \
+ linux/src/drivers/net/eth82586.h
+endif
+
+if device_driver_epic100
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/epic100.c
+endif
+
+if device_driver_eth16i
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/eth16i.c
+endif
+
+if device_driver_ewrk3
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/ewrk3.c \
+ linux/src/drivers/net/ewrk3.h
+endif
+
+if device_driver_fmv18x
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/fmv18x.c
+endif
+
+if device_driver_hamachi
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/hamachi.c
+endif
+
+if device_driver_hp_plus
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/hp-plus.c \
+ linux/src/drivers/net/8390.c
+endif
+
+if device_driver_hp
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/hp.c \
+ linux/src/drivers/net/8390.c
+endif
+
+if device_driver_hp100
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/hp100.c \
+ linux/src/drivers/net/hp100.h
+endif
+
+if device_driver_intel_gige
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/intel-gige.c
+endif
+
+if device_driver_lance
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/lance.c
+endif
+
+if device_driver_myson803
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/myson803.c
+endif
+
+if device_driver_natsemi
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/natsemi.c
+endif
+
+if device_driver_ne
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/ne.c \
+ linux/src/drivers/net/8390.c
+endif
+
+if device_driver_ne2k_pci
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/ne2k-pci.c \
+ linux/src/drivers/net/8390.c
+endif
+
+if device_driver_ni52
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/ni52.c \
+ linux/src/drivers/net/ni52.h
+endif
+
+if device_driver_ni65
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/ni65.c \
+ linux/src/drivers/net/ni65.h
+endif
+
+if device_driver_ns820
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/ns820.c
+endif
+
+if device_driver_pcnet32
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/pcnet32.c
+endif
+
+if device_driver_rtl8139
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/rtl8139.c
+endif
+
+if device_driver_seeq8005
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/seeq8005.c \
+ linux/src/drivers/net/seeq8005.h
+endif
+
+if device_driver_sis900
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/sis900.c \
+ linux/src/drivers/net/sis900.h
+endif
+
+if device_driver_sk_g16
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/sk_g16.c \
+ linux/src/drivers/net/sk_g16.h
+endif
+
+if device_driver_smc_ultra
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/smc-ultra.c \
+ linux/src/drivers/net/8390.c
+endif
+
+if device_driver_smc_ultra32
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/smc-ultra32.c \
+ linux/src/drivers/net/8390.c
+endif
+
+if device_driver_starfire
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/starfire.c
+endif
+
+if device_driver_sundance
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/sundance.c
+endif
+
+if device_driver_tlan
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/tlan.c \
+ linux/src/drivers/net/tlan.h
+endif
+
+if device_driver_tulip
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/tulip.c
+endif
+
+if device_driver_via_rhine
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/via-rhine.c
+endif
+
+if device_driver_wavelan
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/i82586.h \
+ linux/src/drivers/net/wavelan.c \
+ linux/src/drivers/net/wavelan.h \
+ linux/dev/drivers/net/wavelan.p.h
+endif
+
+if device_driver_wd
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/wd.c
+endif
+
+if device_driver_winbond_840
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/winbond-840.c \
+ linux/src/drivers/net/8390.c
+endif
+
+if device_driver_yellowfin
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/yellowfin.c
+endif
+
+if device_driver_znet
+liblinux_a_SOURCES += \
+ linux/src/drivers/net/znet.c
+endif
+
+EXTRA_DIST += \
+ linux/src/drivers/net/8390.h \
+ linux/src/drivers/net/kern_compat.h
+
+# pcmcia-cs.
+
+liblinux_pcmcia_cs_modules_a_CPPFLAGS = $(liblinux_a_CPPFLAGS) \
+ -I$(srcdir)/linux/pcmcia-cs/include
+liblinux_pcmcia_cs_modules_a_CFLAGS = $(liblinux_a_CFLAGS) \
+ -include $(srcdir)/linux/pcmcia-cs/glue/pcmcia_glue.h
+liblinux_pcmcia_cs_modules_a_SOURCES =
+if device_driver_group_pcmcia
+noinst_LIBRARIES += \
+ liblinux_pcmcia_cs_modules.a
+gnumach_o_LDADD += \
+ liblinux_pcmcia_cs_modules.a
+endif
+
+liblinux_pcmcia_cs_modules_a_SOURCES += \
+ linux/pcmcia-cs/glue/pcmcia.c \
+ linux/pcmcia-cs/glue/pcmcia_glue.h \
+ linux/pcmcia-cs/modules/cs.c \
+ linux/pcmcia-cs/modules/cs_internal.h \
+ linux/pcmcia-cs/modules/ds.c \
+ linux/pcmcia-cs/modules/rsrc_mgr.c \
+ linux/pcmcia-cs/modules/bulkmem.c \
+ linux/pcmcia-cs/modules/cistpl.c \
+ linux/pcmcia-cs/modules/pci_fixup.c
+EXTRA_DIST += \
+ linux/pcmcia-cs/glue/ds.c
+
+if device_driver_i82365
+liblinux_pcmcia_cs_modules_a_SOURCES += \
+ linux/pcmcia-cs/modules/cirrus.h \
+ linux/pcmcia-cs/modules/ene.h \
+ linux/pcmcia-cs/modules/i82365.c \
+ linux/pcmcia-cs/modules/i82365.h \
+ linux/pcmcia-cs/modules/o2micro.h \
+ linux/pcmcia-cs/modules/ricoh.h \
+ linux/pcmcia-cs/modules/smc34c90.h \
+ linux/pcmcia-cs/modules/ti113x.h \
+ linux/pcmcia-cs/modules/topic.h \
+ linux/pcmcia-cs/modules/vg468.h \
+ linux/pcmcia-cs/modules/yenta.h
+endif
+
+liblinux_pcmcia_cs_clients_a_CPPFLAGS = $(liblinux_a_CPPFLAGS) \
+ -DPCMCIA_CLIENT -I$(srcdir)/linux/pcmcia-cs/include
+liblinux_pcmcia_cs_clients_a_CFLAGS = $(liblinux_a_CFLAGS) \
+ -include $(srcdir)/linux/pcmcia-cs/glue/pcmcia_glue.h
+liblinux_pcmcia_cs_clients_a_SOURCES =
+if device_driver_group_pcmcia
+noinst_LIBRARIES += \
+ liblinux_pcmcia_cs_clients.a
+gnumach_o_LDADD += \
+ liblinux_pcmcia_cs_clients.a
+endif
+
+if device_driver_3c574_cs
+liblinux_pcmcia_cs_clients_a_SOURCES += \
+ linux/pcmcia-cs/clients/3c574_cs.c
+endif
+
+if device_driver_3c589_cs
+liblinux_pcmcia_cs_clients_a_SOURCES += \
+ linux/pcmcia-cs/clients/3c589_cs.c
+endif
+
+if device_driver_axnet_cs
+liblinux_pcmcia_cs_clients_a_SOURCES += \
+ linux/pcmcia-cs/clients/ax8390.h \
+ linux/pcmcia-cs/clients/axnet_cs.c
+endif
+
+if device_driver_fmvj18x_cs
+liblinux_pcmcia_cs_clients_a_SOURCES += \
+ linux/pcmcia-cs/clients/fmvj18x_cs.c
+endif
+
+if device_driver_nmclan_cs
+liblinux_pcmcia_cs_clients_a_SOURCES += \
+ linux/pcmcia-cs/clients/nmclan_cs.c
+endif
+
+if device_driver_pcnet_cs
+liblinux_pcmcia_cs_clients_a_SOURCES += \
+ linux/pcmcia-cs/clients/pcnet_cs.c \
+ linux/src/drivers/net/8390.c
+endif
+
+if device_driver_smc91c92_cs
+liblinux_pcmcia_cs_clients_a_SOURCES += \
+ linux/pcmcia-cs/clients/ositech.h \
+ linux/pcmcia-cs/clients/smc91c92_cs.c
+endif
+
+if device_driver_xirc2ps_cs
+liblinux_pcmcia_cs_clients_a_SOURCES += \
+ linux/pcmcia-cs/clients/xirc2ps_cs.c
+endif
+
+liblinux_pcmcia_cs_wireless_a_CPPFLAGS = $(liblinux_a_CPPFLAGS) \
+ -I$(srcdir)/linux/pcmcia-cs/include
+liblinux_pcmcia_cs_wireless_a_CFLAGS = $(liblinux_a_CFLAGS) \
+ -include $(srcdir)/linux/pcmcia-cs/glue/wireless_glue.h
+liblinux_pcmcia_cs_wireless_a_SOURCES =
+if device_driver_group_pcmcia
+noinst_LIBRARIES += \
+ liblinux_pcmcia_cs_wireless.a
+gnumach_o_LDADD += \
+ liblinux_pcmcia_cs_wireless.a
+endif
+
+if device_driver_orinoco_cs
+liblinux_pcmcia_cs_wireless_a_SOURCES += \
+ linux/pcmcia-cs/glue/wireless_glue.h \
+ linux/pcmcia-cs/wireless/hermes.c \
+ linux/pcmcia-cs/wireless/hermes.h \
+ linux/pcmcia-cs/wireless/hermes_rid.h \
+ linux/pcmcia-cs/wireless/ieee802_11.h \
+ linux/pcmcia-cs/wireless/orinoco.c \
+ linux/pcmcia-cs/wireless/orinoco.h \
+ linux/pcmcia-cs/wireless/orinoco_cs.c
+endif
+
+#
+# Building a distribution.
+#
+
+EXTRA_DIST += \
+ linux/dev/README \
+ linux/src/COPYING
+
+# Those get #included...
+EXTRA_DIST += \
+ linux/src/drivers/scsi/FlashPoint.c \
+ linux/src/drivers/scsi/eata_pio_proc.c \
+ linux/src/drivers/scsi/scsiiom.c
+
+# Instead of listing each file individually...
+EXTRA_DIST += \
+ linux/dev/include \
+ linux/src/include
+EXTRA_DIST += \
+ linux/pcmcia-cs/include
+dist-hook: dist-hook-linux
+.PHONY: dist-hook-linux
+dist-hook-linux:
+# These symbolic links are copied from the build directory due to including
+# `linux/dev/include linux/src/include' to `EXTRA_DIST' above.
+ rm -f \
+ $(distdir)/linux/dev/include/asm \
+ $(distdir)/linux/src/include/asm
+
+#
+# Architecture specific parts.
+#
+
+if HOST_ix86
+include i386/linux/Makefrag.am
+endif
diff --git a/linux/configfrag.ac b/linux/configfrag.ac
new file mode 100644
index 0000000..c851e56
--- /dev/null
+++ b/linux/configfrag.ac
@@ -0,0 +1,664 @@
+dnl Configure fragment for Linux code snarfed into GNU Mach.
+
+dnl Copyright (C) 1997, 1999, 2004, 2006, 2007 Free Software Foundation, Inc.
+
+dnl Permission to use, copy, modify and distribute this software and its
+dnl documentation is hereby granted, provided that both the copyright
+dnl notice and this permission notice appear in all copies of the
+dnl software, derivative works or modified versions, and any portions
+dnl thereof, and that both notices appear in supporting documentation.
+dnl
+dnl THE FREE SOFTWARE FOUNDATION ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+dnl "AS IS" CONDITION. THE FREE SOFTWARE FOUNDATION DISCLAIMS ANY
+dnl LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE
+dnl USE OF THIS SOFTWARE.
+
+#
+# Internals.
+#
+
+[have_linux_code=no]
+
+#
+# Helper functions.
+#
+
+#
+# Calling `device_driver_group group' makes sure that the infrastructure needed
+# for the group `group' will be set-up.
+#
+
+[device_driver_group() {
+ case $1 in
+ '')
+ # No group.
+ :;;
+ block)
+ device_driver_group_block=selected;;
+ net)
+ device_driver_group_net=selected;;
+ pcmcia)
+ # Pull in group `net'.
+ device_driver_group net
+ device_driver_group_pcmcia=selected;;
+ scsi)
+ device_driver_group_scsi=selected;;
+ wireless)
+ # Pull in group `pcmcia'.
+ device_driver_group pcmcia
+ device_driver_group_wireless=selected;;
+ *)]
+ AC_MSG_ERROR([invalid device driver group `$1'])[;;
+ esac
+}]
+
+AC_ARG_ENABLE([linux-groups],
+ AS_HELP_STRING([--disable-linux-groups], [Linux drivers]))
+
+AC_DEFUN([AC_OPTION_Linux_group], [
+AC_ARG_ENABLE([$1-group],
+ AS_HELP_STRING([--enable-$1-group], [$2]),
+ enable_$1_group=$enableval, enable_$1_group=$enable_linux_groups)
+])
+
+#
+# AC_OPTION_Linux_ix86_at(name,description,option[,class]). Process
+# configuration option --enable-`name' (with description `description'). If
+# it's set, then `option' is defined with AC_DEFINE. The option optionally
+# pulls in group `group'; see the comments on device_driver_group for more
+# information. For ix86-at, the value from $enable_default_device_drivers is
+# considered when deciding whether to activate the option by default or not.
+#
+AC_DEFUN([AC_OPTION_Linux_ix86_at], [
+[unset enableval]
+AC_ARG_ENABLE([$1],
+ AS_HELP_STRING([--enable-$1], [$2]))
+[if test x$enable_$4_group = xno;
+then
+ enableval=${enableval-no}
+fi
+#TODO. Could use some M4 magic to avoid a lot of shell code.
+case $host_platform:$host_cpu in
+ at:i?86)
+ case $enable_device_drivers:'$2' in
+ default:*by\ default* | qemu:*for\ qemu*)
+ enableval=${enableval-yes};;
+ *)
+ enableval=${enableval-no};;
+ esac;;
+ *)
+ if [ x"$enableval" = xyes ]; then
+ # TODO. That might not always be true.]
+ AC_MSG_ERROR([cannot enable `$1' in this configuration.])
+ [fi;;
+esac]
+AM_CONDITIONAL([device_driver_]m4_bpatsubst([$1], [-], [_]),
+ [[[ x"$enableval" = xyes ]]])
+[if [ x"$enableval" = xyes ]; then
+ have_linux_code=yes]
+ AC_DEFINE([$3], [1], [option $1: $2])
+ [device_driver_group $4
+fi]])
+
+#
+# AC_OPTION_Linux_ix86_at_nodef() is like AC_OPTION_Linux_ix86_at(), but
+# doesn't consider $enable_default_device_drivers.
+#
+AC_DEFUN([AC_OPTION_Linux_ix86_at_nodef], [
+[unset enableval]
+AC_ARG_ENABLE([$1],
+ AS_HELP_STRING([--enable-$1], [$2]))
+[#TODO.
+case $host_platform:$host_cpu in
+ at:i?86)
+ :;;
+ *)
+ if [ x"$enableval" = xyes ]; then
+ # TODO. That might not always be true.]
+ AC_MSG_ERROR([cannot enable `$1' in this configuration.])
+ [fi;;
+esac]
+AM_CONDITIONAL([device_driver_]m4_bpatsubst([$1], [-], [_]),
+ [[[ x"$enableval" = xyes ]]])
+[if [ x"$enableval" = xyes ]; then
+ have_linux_code=yes]
+ AC_DEFINE([$3], [1], [option $1: $2])
+ [device_driver_group $4
+fi]])
+
+#
+# AC_Linux_DRIVER(machname, description, definition, [group]). Convenience.
+# TODO. The naming of those is nearly everything but reasonable.
+#
+
+AC_DEFUN([AC_Linux_DRIVER], [
+ AC_OPTION_Linux_ix86_at([$1], [Linux device driver for $2; on ix86-at enabled]
+ [by default], [$3], [$4])
+])
+AC_DEFUN([AC_Linux_DRIVER_qemu], [
+ AC_OPTION_Linux_ix86_at([$1], [Linux device driver for $2; on ix86-at enabled]
+ [by default and for qemu], [$3], [$4])
+])
+AC_DEFUN([AC_Linux_DRIVER_nodef], [
+ AC_OPTION_Linux_ix86_at_nodef([$1], [Linux device driver for $2], [$3], [$4])
+])
+
+#
+# Configuration options.
+#
+
+dnl Block drivers.
+AC_OPTION_Linux_group([block], [Block drivers])
+
+AC_Linux_DRIVER_qemu([floppy],
+ [PC floppy],
+ [CONFIG_BLK_DEV_FD],
+ [block])
+AC_Linux_DRIVER_qemu([ide],
+ [IDE disk controllers],
+ [CONFIG_BLK_DEV_IDE],
+ [block])
+
+AC_ARG_ENABLE([ide-forcedma],
+ AS_HELP_STRING([--enable-ide-forcedma], [enable forced use of DMA on IDE]),
+ [test x"$enableval" = xno ||
+ AC_DEFINE([CONFIG_BLK_DEV_FORCE_DMA], [1], [Force DMA on IDE block devices])])
+
+dnl SCSI controllers.
+AC_OPTION_Linux_group([scsi], [SCSI drivers])
+
+# Disabled by default.
+AC_Linux_DRIVER_nodef([53c78xx],
+ [SCSI controller NCR 53C7,8xx],
+ [CONFIG_SCSI_NCR53C7xx],
+ [scsi])
+# Disabled by default.
+AC_Linux_DRIVER_nodef([AM53C974],
+ [SCSI controller AM53/79C974 (am53c974, am79c974)],
+ [CONFIG_SCSI_AM53C974],
+ [scsi])
+AC_Linux_DRIVER([BusLogic],
+ [SCSI controller BusLogic],
+ [CONFIG_SCSI_BUSLOGIC],
+ [scsi])
+# TODO. What's that? And what about FlashPoint.c?
+dnl Dirty implementation...
+AC_ARG_ENABLE([flashpoint],
+ AS_HELP_STRING([--enable-flashpoint], [SCSI flashpoint]),
+ [test x"$enableval" = xno &&
+ AC_DEFINE([CONFIG_SCSI_OMIT_FLASHPOINT], [], [scsi omit flashpoint])])
+# Disabled by default.
+AC_Linux_DRIVER_nodef([NCR53c406a],
+ [SCSI controller NCR53c406a chip],
+ [CONFIG_SCSI_NCR53C406A],
+ [scsi])
+AC_Linux_DRIVER([advansys],
+ [SCSI controller AdvanSys],
+ [CONFIG_SCSI_ADVANSYS],
+ [scsi])
+AC_Linux_DRIVER([aha152x],
+ [SCSI controller Adaptec AHA-152x/2825 (aha152x, aha2825)],
+ [CONFIG_SCSI_AHA152X],
+ [scsi])
+# Disabled by default.
+AC_Linux_DRIVER_nodef([aha1542],
+ [SCSI controller Adaptec AHA-1542],
+ [CONFIG_SCSI_AHA1542],
+ [scsi])
+AC_Linux_DRIVER([aha1740],
+ [SCSI controller Adaptec AHA-1740],
+ [CONFIG_SCSI_AHA1740],
+ [scsi])
+# Disabled by default.
+AC_Linux_DRIVER_nodef([aic7xxx],
+ [SCSI controller Adaptec AIC7xxx],
+ [CONFIG_SCSI_AIC7XXX],
+ [scsi])
+AC_Linux_DRIVER([dtc],
+ [SCSI controller DTC3180/3280 (dtc3180, dtc3280)],
+ [CONFIG_SCSI_DTC3280],
+ [scsi])
+# Disabled by default.
+AC_Linux_DRIVER_nodef([eata],
+ [SCSI controller EATA ISA/EISA/PCI
+ (DPT and generic EATA/DMA-compliant boards)],
+ [CONFIG_SCSI_EATA],
+ [scsi])
+# Disabled by default.
+AC_Linux_DRIVER_nodef([eata_dma],
+ [SCSI controller EATA-DMA (DPT, NEC, AT&T, SNI, AST, Olivetti, Alphatronix)],
+ [CONFIG_SCSI_EATA_DMA],
+ [scsi])
+# Disabled by default.
+AC_Linux_DRIVER_nodef([eata_pio],
+ [SCSI controller EATA-PIO (old DPT PM2001, PM2012A)],
+ [CONFIG_SCSI_EATA_PIO],
+ [scsi])
+AC_Linux_DRIVER([fdomain],
+ [SCSI controller Future Domain 16xx],
+ [CONFIG_SCSI_FUTURE_DOMAIN],
+ [scsi])
+# Disabled by default.
+AC_Linux_DRIVER_nodef([g_NCR5380],
+ [SCSI controller Generic NCR5380/53c400 (ncr5380, ncr53c400)],
+ [CONFIG_SCSI_GENERIC_NCR5380],
+ [scsi])
+# Disabled by default.
+AC_Linux_DRIVER_nodef([gdth],
+ [GDT SCSI Disk Array Controller],
+ [CONFIG_SCSI_GDTH],
+ [scsi])
+AC_Linux_DRIVER([in2000],
+ [SCSI controller Always IN 2000],
+ [CONFIG_SCSI_IN2000],
+ [scsi])
+AC_Linux_DRIVER([ncr53c8xx],
+ [SCSI controller NCR53C8XX (ncr53c8xx, dc390f, dc390u, dc390w)],
+ [CONFIG_SCSI_NCR53C8XX],
+ [scsi])
+AC_Linux_DRIVER([pas16],
+ [SCSI controller PAS16],
+ [CONFIG_SCSI_PASS16],
+ [scsi])
+# Disabled by default.
+AC_Linux_DRIVER_nodef([ppa],
+ [IOMEGA Parallel Port ZIP drive],
+ [CONFIG_SCSI_PPA],
+ [scsi])
+AC_Linux_DRIVER([qlogicfas],
+ [SCSI controller Qlogic FAS],
+ [CONFIG_SCSI_QLOGIC_FAS],
+ [scsi])
+AC_Linux_DRIVER([qlogicisp],
+ [SCSI controller Qlogic ISP],
+ [CONFIG_SCSI_QLOGIC_ISP],
+ [scsi])
+AC_Linux_DRIVER([seagate],
+ [SCSI controller Seagate ST02, Future Domain TMC-8xx],
+ [CONFIG_SCSI_SEAGATE],
+ [scsi])
+AC_Linux_DRIVER([sym53c8xx],
+ [SCSI controller Symbios 53C8XX],
+ [CONFIG_SCSI_SYM53C8XX],
+ [scsi])
+AC_Linux_DRIVER([t128],
+ [SCSI controller Trantor T128/T128F/T228 (t128, t128f, t228)],
+ [CONFIG_SCSI_T128],
+ [scsi])
+AC_Linux_DRIVER([tmscsim],
+ [SCSI controller Tekram DC-390(T) (dc390, dc390t)],
+ [CONFIG_SCSI_DC390T],
+ [scsi])
+AC_Linux_DRIVER([u14-34f],
+ [SCSI controller UltraStor 14F/34F],
+ [CONFIG_SCSI_U14_34F],
+ [scsi])
+AC_Linux_DRIVER([ultrastor],
+ [SCSI controller UltraStor],
+ [CONFIG_SCSI_ULTRASTOR],
+ [scsi])
+# Disabled by default.
+AC_Linux_DRIVER_nodef([wd7000],
+ [SCSI controller WD 7000],
+ [CONFIG_SCSI_7000FASST],
+ [scsi])
+
+dnl Ethernet controllers.
+AC_OPTION_Linux_group([net], [Network drivers])
+
+AC_Linux_DRIVER([3c501],
+ [Ethernet controller 3COM 501 (3c501) / Etherlink I],
+ [CONFIG_EL1],
+ [net])
+AC_Linux_DRIVER([3c503],
+ [Ethernet controller 3Com 503 (3c503) / Etherlink II],
+ [CONFIG_EL2],
+ [net])
+AC_Linux_DRIVER([3c505],
+ [Ethernet controller 3Com 505 (3c505, elplus)],
+ [CONFIG_ELPLUS],
+ [net])
+AC_Linux_DRIVER([3c507],
+ [Ethernet controller 3Com 507 (3c507, el16)],
+ [CONFIG_EL16],
+ [net])
+AC_Linux_DRIVER([3c509],
+ [Ethernet controller 3Com 509/579 (3c509, 3c579) / Etherlink III],
+ [CONFIG_EL3],
+ [net])
+AC_Linux_DRIVER([3c59x],
+ [Ethernet controller 3Com 59x/90x
+ (3c59x, 3c590, 3c592, 3c595, 3c597, 3c90x, 3c900, 3c905)
+ "Vortex/Boomerang"],
+ [CONFIG_VORTEX],
+ [net])
+# Disabled by default.
+AC_Linux_DRIVER_nodef([3c515],
+ [Ethernet controller 3Com 515 ISA Fast EtherLink],
+ [CONFIG_3C515],
+ [net])
+AC_Linux_DRIVER([ac3200],
+ [Ethernet controller Ansel Communications EISA 3200],
+ [CONFIG_AC3200],
+ [net])
+AC_Linux_DRIVER([apricot],
+ [Ethernet controller Apricot XEN-II on board ethernet],
+ [CONFIG_APRICOT],
+ [net])
+AC_Linux_DRIVER([at1700],
+ [Ethernet controller AT1700 (Fujitsu 86965)],
+ [CONFIG_AT1700],
+ [net])
+# Disabled by default.
+AC_Linux_DRIVER_nodef([atp],
+ [Ethernet controller AT-LAN-TEC/RealTek pocket adaptor],
+ [CONFIG_ATP],
+ [net])
+dnl FIXME: Can't be enabled since it is a pcmcia driver, and we don't
+dnl have that kind of fluff.
+dnl linux_DRIVER([cb_shim], [CB_SHIM], [cb_shim], [net])
+AC_Linux_DRIVER([de4x5],
+ [Ethernet controller DE4x5 (de4x5, de425, de434, de435, de450, de500)],
+ [CONFIG_DE4X5],
+ [net])
+# Disabled by default.
+AC_Linux_DRIVER_nodef([de600],
+ [Ethernet controller D-Link DE-600],
+ [CONFIG_DE600],
+ [net])
+# Disabled by default.
+AC_Linux_DRIVER_nodef([de620],
+ [Ethernet controller D-Link DE-620],
+ [CONFIG_DE620],
+ [net])
+AC_Linux_DRIVER([depca],
+ [Ethernet controller DEPCA
+ (de100, de101, de200, de201, de202, de210, de422)],
+ [CONFIG_DEPCA],
+ [net])
+AC_Linux_DRIVER([e2100],
+ [Ethernet controller Cabletron E21xx],
+ [CONFIG_E2100],
+ [net])
+AC_Linux_DRIVER([eepro],
+ [Ethernet controller EtherExpressPro],
+ [CONFIG_EEXPRESS_PRO],
+ [net])
+AC_Linux_DRIVER([eepro100],
+ [Ethernet controller Intel EtherExpressPro PCI 10+/100B/100+],
+ [CONFIG_EEXPRESS_PRO100B],
+ [net])
+AC_Linux_DRIVER([eexpress],
+ [Ethernet controller EtherExpress 16],
+ [CONFIG_EEXPRESS],
+ [net])
+AC_Linux_DRIVER([epic100],
+ [Ethernet controller SMC 83c170/175 EPIC/100 (epic, epic100) / EtherPower II],
+ [CONFIG_EPIC],
+ [net])
+AC_Linux_DRIVER([eth16i],
+ [Ethernet controller ICL EtherTeam 16i/32 (eth16i, eth32)],
+ [CONFIG_ETH16I],
+ [net])
+AC_Linux_DRIVER([ewrk3],
+ [Ethernet controller EtherWORKS 3 (ewrk3, de203, de204, de205)],
+ [CONFIG_EWRK3],
+ [net])
+AC_Linux_DRIVER([fmv18x],
+ [Ethernet controller FMV-181/182/183/184],
+ [CONFIG_FMV18X],
+ [net])
+AC_Linux_DRIVER([hamachi],
+ [Ethernet controller Packet Engines "Hamachi" GNIC-2 Gigabit Ethernet],
+ [CONFIG_HAMACHI],
+ [net])
+AC_Linux_DRIVER([hp-plus],
+ [Ethernet controller HP PCLAN+ (27247B and 27252A)],
+ [CONFIG_HPLAN_PLUS],
+ [net])
+AC_Linux_DRIVER([hp],
+ [Ethernet controller HP PCLAN (27245 and other 27xxx series)],
+ [CONFIG_HPLAN],
+ [net])
+AC_Linux_DRIVER([hp100],
+ [Ethernet controller HP 10/100VG PCLAN (ISA, EISA, PCI)
+ (hp100, hpj2577, hpj2573, hpj2585, hp27248b)],
+ [CONFIG_HP100],
+ [net])
+AC_Linux_DRIVER([intel-gige],
+ [Ethernet controller Intel PCI Gigabit Ethernet],
+ [CONFIG_INTEL_GIGE],
+ [net])
+AC_Linux_DRIVER([lance],
+ [Ethernet controller AMD LANCE and PCnet (at1500, ne2100)],
+ [CONFIG_LANCE],
+ [net])
+AC_Linux_DRIVER([myson803],
+ [Ethernet controller Myson MTD803 Ethernet adapter series],
+ [CONFIG_MYSON803],
+ [net])
+AC_Linux_DRIVER([natsemi],
+ [Ethernet controller National Semiconductor DP8381x series PCI Ethernet],
+ [CONFIG_NATSEMI],
+ [net])
+AC_Linux_DRIVER_qemu([ne],
+ [Ethernet controller NE2000/NE1000 ISA (ne, ne1000, ne2000)],
+ [CONFIG_NE2000],
+ [net])
+AC_Linux_DRIVER([ne2k-pci],
+ [Ethernet controller PCI NE2000],
+ [CONFIG_NE2K_PCI],
+ [net])
+AC_Linux_DRIVER([ni52],
+ [Ethernet controller NI5210],
+ [CONFIG_NI52],
+ [net])
+AC_Linux_DRIVER([ni65],
+ [Ethernet controller NI6510],
+ [CONFIG_NI65],
+ [net])
+AC_Linux_DRIVER([ns820],
+ [Ethernet controller National Semiconductor DP8382x series PCI Ethernet],
+ [CONFIG_NS820],
+ [net])
+AC_Linux_DRIVER([pcnet32],
+ [Ethernet controller AMD PCI PCnet32 (PCI bus NE2100 cards)],
+ [CONFIG_PCNET32],
+ [net])
+AC_Linux_DRIVER([rtl8139],
+ [Ethernet controller RealTek 8129/8139 (rtl8129, rtl8139) (not 8019/8029!)],
+ [CONFIG_RTL8139],
+ [net])
+AC_Linux_DRIVER([seeq8005],
+ [Ethernet controller Seeq8005],
+ [CONFIG_SEEQ8005],
+ [net])
+AC_Linux_DRIVER([sis900],
+ [Ethernet controller SiS 900],
+ [CONFIG_SIS900],
+ [net])
+AC_Linux_DRIVER([sk_g16],
+ [Ethernet controller Schneider & Koch G16],
+ [CONFIG_SK_G16],
+ [net])
+AC_Linux_DRIVER([smc-ultra],
+ [Ethernet controller SMC Ultra],
+ [CONFIG_ULTRA],
+ [net])
+AC_Linux_DRIVER([smc-ultra32],
+ [Ethernet controller SMC Ultra32],
+ [CONFIG_ULTRA32],
+ [net])
+AC_Linux_DRIVER([starfire],
+ [Ethernet controller Adaptec Starfire network adapter],
+ [CONFIG_STARFIRE],
+ [net])
+AC_Linux_DRIVER([sundance],
+ [Ethernet controller Sundance ST201 "Alta" PCI Ethernet],
+ [CONFIG_SUNDANCE],
+ [net])
+AC_Linux_DRIVER([tlan],
+ [Ethernet controller TI ThunderLAN],
+ [CONFIG_TLAN],
+ [net])
+AC_Linux_DRIVER([tulip],
+ [Ethernet controller DECchip Tulip (dc21x4x) PCI (elcp, tulip)],
+ [CONFIG_DEC_ELCP],
+ [net])
+AC_Linux_DRIVER([via-rhine],
+ [Ethernet controller VIA Rhine],
+ [CONFIG_VIA_RHINE],
+ [net])
+# Disabled by default.
+AC_Linux_DRIVER_nodef([wavelan],
+ [Ethernet controller AT&T WaveLAN & DEC RoamAbout DS],
+ [CONFIG_WAVELAN],
+ [net])
+AC_Linux_DRIVER([wd],
+ [Ethernet controller WD80x3],
+ [CONFIG_WD80x3],
+ [net])
+AC_Linux_DRIVER([winbond-840],
+ [Ethernet controller Winbond W89c840 PCI Ethernet],
+ [CONFIG_WINBOND840],
+ [net])
+AC_Linux_DRIVER([yellowfin],
+ [Ethernet controller Packet Engines Yellowfin Gigabit-NIC],
+ [CONFIG_YELLOWFIN],
+ [net])
+AC_Linux_DRIVER([znet],
+ [Ethernet controller Zenith Z-Note (znet, znote)],
+ [CONFIG_ZNET],
+ [net])
+
+dnl PCMCIA device support.
+AC_OPTION_Linux_group([pcmcia], [PCMCIA drivers])
+
+AC_Linux_DRIVER([i82365],
+ [Intel 82365 PC Card controller],
+ [CONFIG_I82365],
+ [pcmcia])
+
+AC_OPTION_Linux_ix86_at([pcmcia-isa],
+ [isa bus support in the pcmcia core; on ix86-at enabled by default],
+ [CONFIG_ISA],
+ [pcmcia])
+
+dnl PCMCIA device drivers.
+
+AC_Linux_DRIVER([3c574_cs],
+ [3Com 3c574 ``RoadRunner'' PCMCIA Ethernet],
+ [CONFIG_3C574_CS],
+ [pcmcia])
+AC_Linux_DRIVER([3c589_cs],
+ [3Com 3c589 PCMCIA Ethernet card],
+ [CONFIG_3C589_CS],
+ [pcmcia])
+AC_Linux_DRIVER([axnet_cs],
+ [Asix AX88190-based PCMCIA Ethernet adapters],
+ [CONFIG_AXNET_CS],
+ [pcmcia])
+AC_Linux_DRIVER([fmvj18x_cs],
+ [fmvj18x chipset based PCMCIA Ethernet cards],
+ [CONFIG_FMVJ18X_CS],
+ [pcmcia])
+AC_Linux_DRIVER([nmclan_cs],
+ [New Media Ethernet LAN PCMCIA cards],
+ [CONFIG_NMCLAN_CS],
+ [pcmcia])
+AC_Linux_DRIVER([pcnet_cs],
+ [NS8390-based PCMCIA cards],
+ [CONFIG_PCNET_CS],
+ [pcmcia])
+AC_Linux_DRIVER([smc91c92_cs],
+ [SMC91c92-based PCMCIA cards],
+ [CONFIG_SMC91C92_CS],
+ [pcmcia])
+AC_Linux_DRIVER([xirc2ps_cs],
+ [Xircom CreditCard and Realport PCMCIA ethernet],
+ [CONFIG_XIRC2PS_CS],
+ [pcmcia])
+
+dnl Wireless device drivers.
+AC_OPTION_Linux_group([wireless], [Wireless drivers])
+
+AC_Linux_DRIVER([orinoco_cs],
+ [Hermes or Prism 2 PCMCIA Wireless adapters (Orinoco)],
+ [CONFIG_ORINOCO_CS],
+ [wireless])
+
+#
+# Process device driver groups: kinds of drivers that have gobs of source files
+# that get brought in, need special symbols defined, etc.
+#
+
+[if [ x"$device_driver_group_block" = xselected ]; then]
+ AC_DEFINE([CONFIG_BLOCK], [1], [CONFIG_BLOCK])
+ AM_CONDITIONAL([device_driver_group_block], [true])
+[else] AM_CONDITIONAL([device_driver_group_block], [false])
+[fi
+
+if [ x"$device_driver_group_net" = xselected ]; then]
+ AC_DEFINE([CONFIG_INET], [1], [CONFIG_INET])
+ AM_CONDITIONAL([device_driver_group_net], [true])
+[else] AM_CONDITIONAL([device_driver_group_net], [false])
+[fi
+
+if [ x"$device_driver_group_pcmcia" = xselected ]; then]
+ AC_DEFINE([CONFIG_PCMCIA], [1], [CONFIG_PCMCIA])
+ AM_CONDITIONAL([device_driver_group_pcmcia], [true])
+[else] AM_CONDITIONAL([device_driver_group_pcmcia], [false])
+[fi
+
+if [ x"$device_driver_group_scsi" = xselected ]; then]
+ AC_DEFINE([CONFIG_SCSI], [1], [CONFIG_SCSI])
+ AM_CONDITIONAL([device_driver_group_scsi], [true])
+[else] AM_CONDITIONAL([device_driver_group_scsi], [false])
+[fi
+
+if [ x"$device_driver_group_wireless" = xselected ]; then]
+ AC_DEFINE([CONFIG_WIRELESS], [1], [CONFIG_WIRELESS])
+[fi]
+
+#
+# Internals.
+#
+
+AC_DEFUN([hurd_host_CPU], [
+ AC_DEFINE([CONFIG_M$1], [1], [$1])
+ AC_DEFINE([CPU], [$1], [CPU])])
+
+[if [ "$have_linux_code" = yes ]; then]
+ AM_CONDITIONAL([CODE_linux], [true])
+
+ [case $host_cpu in
+ i386)]
+ hurd_host_CPU([386])[;;
+ i486)]
+ hurd_host_CPU([486])[;;
+ i586)]
+ hurd_host_CPU([586])[;;
+ i686)]
+ hurd_host_CPU([686])[;;
+ *)
+ # TODO. Warn here?]
+ hurd_host_CPU([486])[;;
+ esac]
+
+ # The glue code dependend code checks for this.
+ AC_DEFINE([LINUX_DEV], [1], [Linux device drivers.])
+ # Instead of Mach's KERNEL, Linux uses __KERNEL__. Whee.
+ AC_DEFINE([__KERNEL__], [1], [__KERNEL__])
+ [if [ $mach_ncpus -gt 1 ]; then]
+ AC_DEFINE([__SMP__], [1], [__SMP__])
+ [fi]
+
+ # Set up `asm-SYSTYPE' links.
+ AC_CONFIG_LINKS([linux/src/include/asm:linux/src/include/asm-$systype
+ linux/dev/include/asm:linux/dev/include/asm-$systype])
+[else] AM_CONDITIONAL([CODE_linux], [false])
+[fi]
+
+dnl Local Variables:
+dnl mode: autoconf
+dnl End:
diff --git a/linux/dev/README b/linux/dev/README
new file mode 100644
index 0000000..c3ceca1
--- /dev/null
+++ b/linux/dev/README
@@ -0,0 +1,8 @@
+This hierarchy used to contain modified files, based on files from the
+Linux kernel, as opposed to `../src/' containing only files that have not
+been modified (or have only been modified marginally). This policy is
+NOT adhered to any further, so please don't change (or even add) files
+below here, but instead merge the files in here back into `../src/'
+(which should really be called `../linux-2.0' or similar) or even better
+--- when adding large chunks --- create a more suitable hierarchy like
+we've done with `../pcmcia-cs/'.
diff --git a/linux/dev/arch/i386/kernel/irq.c b/linux/dev/arch/i386/kernel/irq.c
new file mode 100644
index 0000000..3b349cc
--- /dev/null
+++ b/linux/dev/arch/i386/kernel/irq.c
@@ -0,0 +1,775 @@
+/*
+ * Linux IRQ management.
+ * Copyright (C) 1995 Shantanu Goel.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * linux/arch/i386/kernel/irq.c
+ *
+ * Copyright (C) 1992 Linus Torvalds
+ */
+
+#include <sys/types.h>
+#include <mach/mach_types.h>
+#include <mach/vm_param.h>
+#include <kern/assert.h>
+#include <kern/cpu_number.h>
+
+#include <i386/spl.h>
+#include <i386/irq.h>
+#include <i386/pit.h>
+
+#define MACH_INCLUDE
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/delay.h>
+#include <linux/kernel_stat.h>
+#include <linux/malloc.h>
+#include <linux/ioport.h>
+
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/hardirq.h>
+
+#include <linux/dev/glue/glue.h>
+#include <machine/machspl.h>
+
+#include <device/intr.h>
+
+#if 0
+/* XXX: This is the way it's done in linux 2.2. GNU Mach currently uses intr_count. It should be made using local_{bh/irq}_count instead (through hardirq_enter/exit) for SMP support. */
+unsigned int local_bh_count[NR_CPUS];
+unsigned int local_irq_count[NR_CPUS];
+#else
+#define local_bh_count (&intr_count)
+#define local_irq_count (&intr_count)
+#endif
+
+/*
+ * XXX Move this into more suitable place...
+ * Set if the machine has an EISA bus.
+ */
+int EISA_bus = 0;
+
+/*
+ * Flag indicating an interrupt is being handled.
+ */
+unsigned int intr_count = 0;
+
+/*
+ * List of Linux interrupt handlers.
+ */
+struct linux_action
+{
+ void (*handler) (int, void *, struct pt_regs *);
+ void *dev_id;
+ struct linux_action *next;
+ unsigned long flags;
+ user_intr_t *user_intr;
+};
+
+static struct linux_action *irq_action[NINTR] = {0};
+
+/*
+ * Generic interrupt handler for Linux devices.
+ * Set up a fake `struct pt_regs' then call the real handler.
+ */
+static void
+linux_intr (int irq)
+{
+ struct pt_regs regs;
+ struct linux_action *action = *(irq_action + irq);
+ struct linux_action **prev = &irq_action[irq];
+ unsigned long flags;
+
+ kstat.interrupts[irq]++;
+ intr_count++;
+
+ save_flags (flags);
+ if (action && (action->flags & SA_INTERRUPT))
+ cli ();
+
+ while (action)
+ {
+ // TODO I might need to check whether the interrupt belongs to
+ // the current device. But I don't do it for now.
+ if (action->user_intr)
+ {
+ if (!deliver_user_intr(&irqtab, irq, action->user_intr))
+ {
+ *prev = action->next;
+ linux_kfree(action);
+ action = *prev;
+ continue;
+ }
+ }
+ else if (action->handler)
+ action->handler (irq, action->dev_id, &regs);
+ prev = &action->next;
+ action = action->next;
+ }
+
+ if (!irq_action[irq])
+ {
+ /* No handler any more, disable interrupt */
+ mask_irq (irq);
+ ivect[irq] = intnull;
+ iunit[irq] = irq;
+ }
+
+ restore_flags (flags);
+
+ intr_count--;
+}
+
+/* IRQ mask according to Linux drivers */
+static unsigned linux_pic_mask;
+
+/* These only record that Linux requested to mask IRQs */
+void
+disable_irq (unsigned int irq_nr)
+{
+ unsigned long flags;
+ unsigned mask = 1U << irq_nr;
+
+ save_flags (flags);
+ cli ();
+ if (!(linux_pic_mask & mask))
+ {
+ linux_pic_mask |= mask;
+ __disable_irq(irq_nr);
+ }
+ restore_flags (flags);
+}
+
+void
+enable_irq (unsigned int irq_nr)
+{
+ unsigned long flags;
+ unsigned mask = 1U << irq_nr;
+
+ save_flags (flags);
+ cli ();
+ if (linux_pic_mask & mask)
+ {
+ linux_pic_mask &= ~mask;
+ __enable_irq(irq_nr);
+ }
+ restore_flags (flags);
+}
+
+static int
+setup_x86_irq (int irq, struct linux_action *new)
+{
+ int shared = 0;
+ struct linux_action *old, **p;
+ unsigned long flags;
+
+ p = irq_action + irq;
+ if ((old = *p) != NULL)
+ {
+ /* Can't share interrupts unless both agree to */
+ if (!(old->flags & new->flags & SA_SHIRQ))
+ return (-EBUSY);
+
+ /* Can't share interrupts unless both are same type */
+ if ((old->flags ^ new->flags) & SA_INTERRUPT)
+ return (-EBUSY);
+
+ /* add new interrupt at end of irq queue */
+ do
+ {
+ p = &old->next;
+ old = *p;
+ }
+ while (old);
+ shared = 1;
+ }
+
+ save_flags (flags);
+ cli ();
+ *p = new;
+
+ if (!shared)
+ {
+ ivect[irq] = linux_intr;
+ iunit[irq] = irq;
+ unmask_irq (irq);
+ }
+ restore_flags (flags);
+ return 0;
+}
+
+int
+install_user_intr_handler (struct irqdev *dev, int id, unsigned long flags,
+ user_intr_t *user_intr)
+{
+ struct linux_action *action;
+ struct linux_action *old;
+ int retval;
+
+ unsigned int irq = dev->irq[id];
+
+ assert (irq < NINTR);
+
+ /* Test whether the irq handler has been set */
+ // TODO I need to protect the array when iterating it.
+ old = irq_action[irq];
+ while (old)
+ {
+ if (old->user_intr && old->user_intr->dst_port == user_intr->dst_port)
+ {
+ printk ("The interrupt handler has already been installed on line %d", irq);
+ return linux_to_mach_error (-EAGAIN);
+ }
+ old = old->next;
+ }
+
+ /*
+ * Hmm... Should I use `kalloc()' ?
+ * By OKUJI Yoshinori.
+ */
+ action = (struct linux_action *)
+ linux_kmalloc (sizeof (struct linux_action), GFP_KERNEL);
+ if (action == NULL)
+ return linux_to_mach_error (-ENOMEM);
+
+ action->handler = NULL;
+ action->next = NULL;
+ action->dev_id = NULL;
+ action->flags = SA_SHIRQ;
+ action->user_intr = user_intr;
+
+ retval = setup_x86_irq (irq, action);
+ if (retval)
+ linux_kfree (action);
+
+ return linux_to_mach_error (retval);
+}
+
+/*
+ * Attach a handler to an IRQ.
+ */
+int
+request_irq (unsigned int irq, void (*handler) (int, void *, struct pt_regs *),
+ unsigned long flags, const char *device, void *dev_id)
+{
+ struct linux_action *action;
+ int retval;
+
+ assert (irq < NINTR);
+
+ if (!handler)
+ return -EINVAL;
+
+ /*
+ * Hmm... Should I use `kalloc()' ?
+ * By OKUJI Yoshinori.
+ */
+ action = (struct linux_action *)
+ linux_kmalloc (sizeof (struct linux_action), GFP_KERNEL);
+ if (action == NULL)
+ return -ENOMEM;
+
+ action->handler = handler;
+ action->next = NULL;
+ action->dev_id = dev_id;
+ action->flags = flags;
+ action->user_intr = NULL;
+
+ retval = setup_x86_irq (irq, action);
+ if (retval)
+ linux_kfree (action);
+
+ return retval;
+}
+
+/*
+ * Deallocate an irq.
+ */
+void
+free_irq (unsigned int irq, void *dev_id)
+{
+ struct linux_action *action, **p;
+ unsigned long flags;
+
+ if (irq >= NINTR)
+ panic ("free_irq: bad irq number");
+
+ for (p = irq_action + irq; (action = *p) != NULL; p = &action->next)
+ {
+ if (action->dev_id != dev_id)
+ continue;
+
+ save_flags (flags);
+ cli ();
+ *p = action->next;
+ if (!irq_action[irq])
+ {
+ mask_irq (irq);
+ ivect[irq] = intnull;
+ iunit[irq] = irq;
+ }
+ restore_flags (flags);
+ linux_kfree (action);
+ return;
+ }
+
+ panic ("free_irq: bad irq number");
+}
+
+/*
+ * Set for an irq probe.
+ */
+unsigned long
+probe_irq_on (void)
+{
+ unsigned i, irqs = 0;
+ unsigned long delay;
+
+ assert (curr_ipl[cpu_number()] == 0);
+
+ /*
+ * Allocate all available IRQs.
+ */
+ for (i = NINTR - 1; i > 0; i--)
+ {
+ if (!irq_action[i] && ivect[i] == intnull)
+ {
+ enable_irq (i);
+ irqs |= 1 << i;
+ }
+ }
+
+ /*
+ * Wait for spurious interrupts to mask themselves out.
+ */
+ for (delay = jiffies + HZ / 10; delay > jiffies;)
+ ;
+
+ return (irqs & ~linux_pic_mask);
+}
+
+/*
+ * Return the result of an irq probe.
+ */
+int
+probe_irq_off (unsigned long irqs)
+{
+ unsigned int i;
+
+ assert (curr_ipl[cpu_number()] == 0);
+
+ irqs &= linux_pic_mask;
+
+ /*
+ * Disable unnecessary IRQs.
+ */
+ for (i = NINTR - 1; i > 0; i--)
+ {
+ if (!irq_action[i] && ivect[i] == intnull)
+ {
+ disable_irq (i);
+ }
+ }
+
+ /*
+ * Return IRQ number.
+ */
+ if (!irqs)
+ return 0;
+ i = ffz (~irqs);
+ if (irqs != (irqs & (1 << i)))
+ i = -i;
+ return i;
+}
+
+/*
+ * Reserve IRQs used by Mach drivers.
+ * Must be called before Linux IRQ detection, after Mach IRQ detection.
+ */
+
+static void reserved_mach_handler (int line, void *cookie, struct pt_regs *regs)
+{
+ /* These interrupts are actually handled in Mach. */
+ assert (! "reached");
+}
+
+static const struct linux_action reserved_mach =
+ {
+ reserved_mach_handler, NULL, NULL, 0
+ };
+
+static void
+reserve_mach_irqs (void)
+{
+ unsigned int i;
+
+ for (i = 0; i < NINTR; i++)
+ {
+ if (ivect[i] != intnull)
+ /* This dummy action does not specify SA_SHIRQ, so
+ setup_x86_irq will not try to add a handler to this
+ slot. Therefore, the cast is safe. */
+ irq_action[i] = (struct linux_action *) &reserved_mach;
+ }
+}
+
+#ifdef __SMP__
+unsigned char global_irq_holder = NO_PROC_ID;
+unsigned volatile int global_irq_lock;
+atomic_t global_irq_count;
+
+atomic_t global_bh_count;
+atomic_t global_bh_lock;
+
+/*
+ * "global_cli()" is a special case, in that it can hold the
+ * interrupts disabled for a longish time, and also because
+ * we may be doing TLB invalidates when holding the global
+ * IRQ lock for historical reasons. Thus we may need to check
+ * SMP invalidate events specially by hand here (but not in
+ * any normal spinlocks)
+ */
+#if 0
+/* XXX: check how Mach handles this */
+static inline void check_smp_invalidate(int cpu)
+{
+ if (test_bit(cpu, &smp_invalidate_needed)) {
+ clear_bit(cpu, &smp_invalidate_needed);
+ local_flush_tlb();
+ }
+}
+#endif
+
+static void show(char * str)
+{
+ int i;
+ unsigned long *stack;
+ int cpu = smp_processor_id();
+
+ printk("\n%s, CPU %d:\n", str, cpu);
+ printk("irq: %d [%d %d]\n",
+ atomic_read(&global_irq_count), local_irq_count[0], local_irq_count[1]);
+ printk("bh: %d [%d %d]\n",
+ atomic_read(&global_bh_count), local_bh_count[0], local_bh_count[1]);
+ stack = (unsigned long *) &stack;
+ for (i = 40; i ; i--) {
+ unsigned long x = *++stack;
+ //if (x > (unsigned long) &get_options && x < (unsigned long) &vsprintf) {
+ printk("<[%08lx]> ", x);
+ //}
+ }
+}
+
+#define MAXCOUNT 100000000
+
+static inline void wait_on_bh(void)
+{
+ int count = MAXCOUNT;
+ do {
+ if (!--count) {
+ show("wait_on_bh");
+ count = ~0;
+ }
+ /* nothing .. wait for the other bh's to go away */
+ } while (atomic_read(&global_bh_count) != 0);
+}
+
+/*
+ * I had a lockup scenario where a tight loop doing
+ * spin_unlock()/spin_lock() on CPU#1 was racing with
+ * spin_lock() on CPU#0. CPU#0 should have noticed spin_unlock(), but
+ * apparently the spin_unlock() information did not make it
+ * through to CPU#0 ... nasty, is this by design, do we have to limit
+ * 'memory update oscillation frequency' artificially like here?
+ *
+ * Such 'high frequency update' races can be avoided by careful design, but
+ * some of our major constructs like spinlocks use similar techniques,
+ * it would be nice to clarify this issue. Set this define to 0 if you
+ * want to check whether your system freezes. I suspect the delay done
+ * by SYNC_OTHER_CORES() is in correlation with 'snooping latency', but
+ * i thought that such things are guaranteed by design, since we use
+ * the 'LOCK' prefix.
+ */
+#define SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND 1
+
+#if SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND
+# define SYNC_OTHER_CORES(x) udelay(x+1)
+#else
+/*
+ * We have to allow irqs to arrive between __sti and __cli
+ */
+# define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop")
+#endif
+
+static inline void wait_on_irq(int cpu)
+{
+ int count = MAXCOUNT;
+
+ for (;;) {
+
+ /*
+ * Wait until all interrupts are gone. Wait
+ * for bottom half handlers unless we're
+ * already executing in one..
+ */
+ if (!atomic_read(&global_irq_count)) {
+ if (local_bh_count[cpu] || !atomic_read(&global_bh_count))
+ break;
+ }
+
+ /* Duh, we have to loop. Release the lock to avoid deadlocks */
+ clear_bit(0,&global_irq_lock);
+
+ for (;;) {
+ if (!--count) {
+ show("wait_on_irq");
+ count = ~0;
+ }
+ __sti();
+ SYNC_OTHER_CORES(cpu);
+ __cli();
+ //check_smp_invalidate(cpu);
+ if (atomic_read(&global_irq_count))
+ continue;
+ if (global_irq_lock)
+ continue;
+ if (!local_bh_count[cpu] && atomic_read(&global_bh_count))
+ continue;
+ if (!test_and_set_bit(0,&global_irq_lock))
+ break;
+ }
+ }
+}
+
+/*
+ * This is called when we want to synchronize with
+ * bottom half handlers. We need to wait until
+ * no other CPU is executing any bottom half handler.
+ *
+ * Don't wait if we're already running in an interrupt
+ * context or are inside a bh handler.
+ */
+void synchronize_bh(void)
+{
+ if (atomic_read(&global_bh_count) && !in_interrupt())
+ wait_on_bh();
+}
+
+/*
+ * This is called when we want to synchronize with
+ * interrupts. We may for example tell a device to
+ * stop sending interrupts: but to make sure there
+ * are no interrupts that are executing on another
+ * CPU we need to call this function.
+ */
+void synchronize_irq(void)
+{
+ if (atomic_read(&global_irq_count)) {
+ /* Stupid approach */
+ cli();
+ sti();
+ }
+}
+
+static inline void get_irqlock(int cpu)
+{
+ if (test_and_set_bit(0,&global_irq_lock)) {
+ /* do we already hold the lock? */
+ if ((unsigned char) cpu == global_irq_holder)
+ return;
+ /* Uhhuh.. Somebody else got it. Wait.. */
+ do {
+ do {
+ //check_smp_invalidate(cpu);
+ } while (test_bit(0,&global_irq_lock));
+ } while (test_and_set_bit(0,&global_irq_lock));
+ }
+ /*
+ * We also to make sure that nobody else is running
+ * in an interrupt context.
+ */
+ wait_on_irq(cpu);
+
+ /*
+ * Ok, finally..
+ */
+ global_irq_holder = cpu;
+}
+
+#define EFLAGS_IF_SHIFT 9
+
+/*
+ * A global "cli()" while in an interrupt context
+ * turns into just a local cli(). Interrupts
+ * should use spinlocks for the (very unlikely)
+ * case that they ever want to protect against
+ * each other.
+ *
+ * If we already have local interrupts disabled,
+ * this will not turn a local disable into a
+ * global one (problems with spinlocks: this makes
+ * save_flags+cli+sti usable inside a spinlock).
+ */
+void __global_cli(void)
+{
+ unsigned int flags;
+
+ __save_flags(flags);
+ if (flags & (1 << EFLAGS_IF_SHIFT)) {
+ int cpu = smp_processor_id();
+ __cli();
+ if (!local_irq_count[cpu])
+ get_irqlock(cpu);
+ }
+}
+
+void __global_sti(void)
+{
+ int cpu = smp_processor_id();
+
+ if (!local_irq_count[cpu])
+ release_irqlock(cpu);
+ __sti();
+}
+
+/*
+ * SMP flags value to restore to:
+ * 0 - global cli
+ * 1 - global sti
+ * 2 - local cli
+ * 3 - local sti
+ */
+unsigned long __global_save_flags(void)
+{
+ int retval;
+ int local_enabled;
+ unsigned long flags;
+
+ __save_flags(flags);
+ local_enabled = (flags >> EFLAGS_IF_SHIFT) & 1;
+ /* default to local */
+ retval = 2 + local_enabled;
+
+ /* check for global flags if we're not in an interrupt */
+ if (!local_irq_count[smp_processor_id()]) {
+ if (local_enabled)
+ retval = 1;
+ if (global_irq_holder == (unsigned char) smp_processor_id())
+ retval = 0;
+ }
+ return retval;
+}
+
+void __global_restore_flags(unsigned long flags)
+{
+ switch (flags) {
+ case 0:
+ __global_cli();
+ break;
+ case 1:
+ __global_sti();
+ break;
+ case 2:
+ __cli();
+ break;
+ case 3:
+ __sti();
+ break;
+ default:
+ printk("global_restore_flags: %08lx (%08lx)\n",
+ flags, (&flags)[-1]);
+ }
+}
+
+#endif
+
+static void (*old_clock_handler) ();
+
+void
+init_IRQ (void)
+{
+ char *p;
+ int latch = (CLKNUM + hz / 2) / hz;
+
+ /*
+ * Ensure interrupts are disabled.
+ */
+ (void) splhigh ();
+
+#ifndef APIC
+ /*
+ * Program counter 0 of 8253 to interrupt hz times per second.
+ */
+ outb_p (PIT_C0 | PIT_SQUAREMODE | PIT_READMODE, PITCTL_PORT);
+ outb_p (latch & 0xff, PITCTR0_PORT);
+ outb (latch >> 8, PITCTR0_PORT);
+
+ /*
+ * Install our clock interrupt handler.
+ */
+ old_clock_handler = ivect[0];
+ ivect[0] = linux_timer_intr;
+#endif
+
+ reserve_mach_irqs ();
+
+ /*
+ * Enable interrupts.
+ */
+ (void) spl0 ();
+
+ /*
+ * Check if the machine has an EISA bus.
+ */
+ p = (char *) phystokv(0x0FFFD9);
+ if (*p++ == 'E' && *p++ == 'I' && *p++ == 'S' && *p == 'A')
+ EISA_bus = 1;
+
+ /*
+ * Permanently allocate standard device ports.
+ */
+ request_region (0x00, 0x20, "dma1");
+ request_region (0x20, 0x20, "pic1");
+ request_region (0x40, 0x20, "timer");
+ request_region (0x70, 0x10, "rtc");
+ request_region (0x80, 0x20, "dma page reg");
+ request_region (0xa0, 0x20, "pic2");
+ request_region (0xc0, 0x20, "dma2");
+ request_region (0xf0, 0x10, "npu");
+}
+
+void
+restore_IRQ (void)
+{
+ /*
+ * Disable interrupts.
+ */
+ (void) splhigh ();
+
+#ifndef APIC
+ /*
+ * Restore clock interrupt handler.
+ */
+ ivect[0] = old_clock_handler;
+#endif
+}
+
diff --git a/linux/dev/arch/i386/kernel/setup.c b/linux/dev/arch/i386/kernel/setup.c
new file mode 100644
index 0000000..92b782a
--- /dev/null
+++ b/linux/dev/arch/i386/kernel/setup.c
@@ -0,0 +1,13 @@
+char x86 =
+#if defined(CONFIG_M386)
+3;
+#elif defined(CONFIG_M486)
+4;
+#elif defined(CONFIG_M586)
+5;
+#elif defined(CONFIG_M686)
+6;
+#else
+#error "CPU type is undefined!"
+#endif
+
diff --git a/linux/dev/drivers/block/ahci.c b/linux/dev/drivers/block/ahci.c
new file mode 100644
index 0000000..751c7ca
--- /dev/null
+++ b/linux/dev/drivers/block/ahci.c
@@ -0,0 +1,1038 @@
+/*
+ * Copyright (C) 2013 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <ahci.h>
+#include <kern/assert.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/fs.h>
+#include <linux/bios32.h>
+#include <linux/major.h>
+#include <linux/hdreg.h>
+#include <linux/genhd.h>
+#include <asm/io.h>
+
+#define MAJOR_NR SCSI_DISK_MAJOR
+#include <linux/blk.h>
+
+/* Standard AHCI BAR for mmio */
+#define AHCI_PCI_BAR 5
+
+/* minor: 2 bits for device number, 6 bits for partition number. */
+
+#define MAX_PORTS 8
+#define PARTN_BITS 5
+#define PARTN_MASK ((1<<PARTN_BITS)-1)
+
+/* We need to use one DMA scatter element per physical page.
+ * ll_rw_block creates at most 8 buffer heads */
+/* See MAX_BUF */
+#define PRDTL_SIZE 8
+
+#define WAIT_MAX (1*HZ) /* Wait at most 1s for requests completion */
+
+/* AHCI standard structures */
+
+struct ahci_prdt {
+ u32 dba; /* Data base address */
+ u32 dbau; /* upper 32bit */
+ u32 rsv0; /* Reserved */
+
+ u32 dbc; /* Byte count bits 0-21,
+ * bit31 interrupt on completion. */
+};
+
+struct ahci_cmd_tbl {
+ u8 cfis[64];
+ u8 acmd[16];
+ u8 rsv[48];
+
+ struct ahci_prdt prdtl[PRDTL_SIZE];
+};
+
+struct ahci_command {
+ u32 opts; /* Command options */
+
+ u32 prdbc; /* Physical Region Descriptor byte count */
+
+ u32 ctba; /* Command Table Descriptor Base Address */
+ u32 ctbau; /* upper 32bit */
+
+ u32 rsv1[4]; /* Reserved */
+};
+
+struct ahci_fis_dma {
+ u8 fis_type;
+ u8 flags;
+ u8 rsved[2];
+ u64 id;
+ u32 rsvd;
+ u32 offset;
+ u32 count;
+ u32 resvd;
+};
+
+struct ahci_fis_pio {
+ u8 fis_type;
+ u8 flags;
+ u8 status;
+ u8 error;
+
+ u8 lba0;
+ u8 lba1;
+ u8 lba2;
+ u8 device;
+
+ u8 lba3;
+ u8 lba4;
+ u8 lba5;
+ u8 rsv2;
+
+ u8 countl;
+ u8 counth;
+ u8 rsv3;
+ u8 e_status;
+
+ u16 tc; /* Transfer Count */
+ u8 rsv4[2];
+};
+
+struct ahci_fis_d2h {
+ u8 fis_type;
+ u8 flags;
+ u8 status;
+ u8 error;
+
+ u8 lba0;
+ u8 lba1;
+ u8 lba2;
+ u8 device;
+
+ u8 lba3;
+ u8 lba4;
+ u8 lba5;
+ u8 rsv2;
+
+ u8 countl;
+ u8 counth;
+ u8 rsv3[2];
+
+ u8 rsv4[4];
+};
+
+struct ahci_fis_dev {
+ u8 rsvd[8];
+};
+
+struct ahci_fis_h2d {
+ u8 fis_type;
+ u8 flags;
+ u8 command;
+ u8 featurel;
+
+ u8 lba0;
+ u8 lba1;
+ u8 lba2;
+ u8 device;
+
+ u8 lba3;
+ u8 lba4;
+ u8 lba5;
+ u8 featureh;
+
+ u8 countl;
+ u8 counth;
+ u8 icc;
+ u8 control;
+
+ u8 rsv1[4];
+};
+
+struct ahci_fis_data {
+ u8 fis_type;
+ u8 flags;
+ u8 rsv1[2];
+ u32 data1[];
+};
+
+struct ahci_fis {
+ struct ahci_fis_dma dma_fis;
+ u8 pad0[4];
+
+ struct ahci_fis_pio pio_fis;
+ u8 pad1[12];
+
+ struct ahci_fis_d2h d2h_fis;
+ u8 pad2[4];
+
+ struct ahci_fis_dev dev_fis;
+
+ u8 ufis[64];
+
+ u8 rsv[0x100 - 0xa0];
+};
+
+struct ahci_port {
+ u32 clb; /* Command List Base address */
+ u32 clbu; /* upper 32bit */
+ u32 fb; /* FIS Base */
+ u32 fbu; /* upper 32bit */
+ u32 is; /* Interrupt Status */
+ u32 ie; /* Interrupt Enable */
+ u32 cmd; /* Command and Status */
+ u32 rsv0; /* Reserved */
+ u32 tfd; /* Task File Data */
+ u32 sig; /* Signature */
+ u32 ssts; /* SATA Status */
+ u32 sctl; /* SATA Control */
+ u32 serr; /* SATA Error */
+ u32 sact; /* SATA Active */
+ u32 ci; /* Command Issue */
+ u32 sntf; /* SATA Notification */
+ u32 fbs; /* FIS-based switch control */
+ u8 rsv1[0x70 - 0x44]; /* Reserved */
+ u8 vendor[0x80 - 0x70]; /* Vendor-specific */
+};
+
+struct ahci_host {
+ u32 cap; /* Host capabilities */
+ u32 ghc; /* Global Host Control */
+ u32 is; /* Interrupt Status */
+ u32 pi; /* Port Implemented */
+ u32 v; /* Version */
+ u32 ccc_ctl; /* Command Completion Coalescing control */
+ u32 ccc_pts; /* Command Completion Coalescing ports */
+ u32 em_loc; /* Enclosure Management location */
+ u32 em_ctrl; /* Enclosure Management control */
+ u32 cap2; /* Host capabilities extended */
+ u32 bohc; /* BIOS/OS Handoff Control and status */
+ u8 rsv[0xa0 - 0x2c]; /* Reserved */
+ u8 vendor[0x100 - 0xa0]; /* Vendor-specific */
+ struct ahci_port ports[]; /* Up to 32 ports */
+};
+
+/* Our own data */
+
+static struct port {
+ /* memory-mapped regions */
+ const volatile struct ahci_host *ahci_host;
+ const volatile struct ahci_port *ahci_port;
+
+ /* host-memory buffers */
+ struct ahci_command *command;
+ struct ahci_fis *fis;
+ struct ahci_cmd_tbl *prdtl;
+
+ struct hd_driveid id;
+ unsigned is_cd;
+ unsigned long long capacity; /* Nr of sectors */
+ u32 status; /* interrupt status */
+ unsigned cls; /* Command list maximum size.
+ We currently only use 1. */
+ struct wait_queue *q; /* IRQ wait queue */
+ struct hd_struct *part; /* drive partition table */
+ unsigned lba48; /* Whether LBA48 is supported */
+ unsigned identify; /* Whether we are just identifying
+ at boot */
+ struct gendisk *gd;
+} ports[MAX_PORTS];
+
+
+/* do_request() gets called by the block layer to push a request to the disk.
+ We just push one, and when an interrupt tells it's over, we call do_request()
+ ourself again to push the next request, etc. */
+
+/* Request completed, either successfully or with an error */
+static void ahci_end_request(int uptodate)
+{
+ struct request *rq = CURRENT;
+ struct buffer_head *bh;
+
+ rq->errors = 0;
+ if (!uptodate) {
+ if (!rq->quiet)
+ printk("end_request: I/O error, dev %s, sector %lu\n",
+ kdevname(rq->rq_dev), rq->sector);
+ }
+
+ for (bh = rq->bh; bh; )
+ {
+ struct buffer_head *next = bh->b_reqnext;
+ bh->b_reqnext = NULL;
+ mark_buffer_uptodate (bh, uptodate);
+ unlock_buffer (bh);
+ bh = next;
+ }
+
+ CURRENT = rq->next;
+ if (rq->sem != NULL)
+ up(rq->sem);
+ rq->rq_status = RQ_INACTIVE;
+ wake_up(&wait_for_request);
+}
+
+/* Push the request to the controler port */
+static int ahci_do_port_request(struct port *port, unsigned long long sector, struct request *rq)
+{
+ struct ahci_command *command = port->command;
+ struct ahci_cmd_tbl *prdtl = port->prdtl;
+ struct ahci_fis_h2d *fis_h2d;
+ unsigned slot = 0;
+ struct buffer_head *bh;
+ unsigned i;
+
+ rq->rq_status = RQ_SCSI_BUSY;
+
+ /* Shouldn't ever happen: the block glue is limited at 8 blocks */
+ assert(rq->nr_sectors < 0x10000);
+
+ fis_h2d = (void*) &prdtl[slot].cfis;
+ fis_h2d->fis_type = FIS_TYPE_REG_H2D;
+ fis_h2d->flags = 128;
+ if (port->lba48) {
+ if (sector >= 1ULL << 48) {
+ printk("sector %llu beyond LBA48\n", sector);
+ return -EOVERFLOW;
+ }
+ if (rq->cmd == READ)
+ fis_h2d->command = WIN_READDMA_EXT;
+ else
+ fis_h2d->command = WIN_WRITEDMA_EXT;
+ } else {
+ if (sector >= 1ULL << 28) {
+ printk("sector %llu beyond LBA28\n", sector);
+ return -EOVERFLOW;
+ }
+ if (rq->cmd == READ)
+ fis_h2d->command = WIN_READDMA;
+ else
+ fis_h2d->command = WIN_WRITEDMA;
+ }
+
+ fis_h2d->device = 1<<6; /* LBA */
+
+ fis_h2d->lba0 = sector;
+ fis_h2d->lba1 = sector >> 8;
+ fis_h2d->lba2 = sector >> 16;
+
+ fis_h2d->lba3 = sector >> 24;
+ fis_h2d->lba4 = sector >> 32;
+ fis_h2d->lba5 = sector >> 40;
+
+ fis_h2d->countl = rq->nr_sectors;
+ fis_h2d->counth = rq->nr_sectors >> 8;
+
+ command[slot].opts = sizeof(*fis_h2d) / sizeof(u32);
+
+ if (rq->cmd == WRITE)
+ command[slot].opts |= AHCI_CMD_WRITE;
+
+ for (i = 0, bh = rq->bh; bh; i++, bh = bh->b_reqnext)
+ {
+ assert(i < PRDTL_SIZE);
+ assert((((unsigned long) bh->b_data) & ~PAGE_MASK) ==
+ (((unsigned long) bh->b_data + bh->b_size - 1) & ~PAGE_MASK));
+ prdtl[slot].prdtl[i].dbau = 0;
+ prdtl[slot].prdtl[i].dba = vmtophys(bh->b_data);
+ prdtl[slot].prdtl[i].dbc = bh->b_size - 1;
+ }
+
+ command[slot].opts |= i << 16;
+
+ /* Make sure main memory buffers are up to date */
+ mb();
+
+ /* Issue command */
+ writel(1 << slot, &port->ahci_port->ci);
+
+ /* TODO: IRQ timeout handler */
+ return 0;
+}
+
+/* Called by block core to push a request */
+/* TODO: ideally, would have one request queue per port */
+/* TODO: ideally, would use tags to process several requests at a time */
+static void ahci_do_request() /* invoked with cli() */
+{
+ struct request *rq;
+ unsigned minor, unit;
+ unsigned long long block, blockend;
+ struct port *port;
+
+ rq = CURRENT;
+ if (!rq)
+ return;
+
+ if (rq->rq_status != RQ_ACTIVE)
+ /* Current one is already ongoing, let the interrupt handler
+ * push the new one when the current one is finished. */
+ return;
+
+ if (MAJOR(rq->rq_dev) != MAJOR_NR) {
+ printk("bad ahci major %u\n", MAJOR(rq->rq_dev));
+ goto kill_rq;
+ }
+
+ minor = MINOR(rq->rq_dev);
+ unit = minor >> PARTN_BITS;
+ if (unit >= MAX_PORTS) {
+ printk("bad ahci unit %u\n", unit);
+ goto kill_rq;
+ }
+
+ port = &ports[unit];
+
+ /* Compute start sector */
+ block = rq->sector;
+ block += port->part[minor & PARTN_MASK].start_sect;
+
+ /* And check end */
+ blockend = block + rq->nr_sectors;
+ if (blockend < block) {
+ if (!rq->quiet)
+ printk("bad blockend %lu vs %lu\n", (unsigned long) blockend, (unsigned long) block);
+ goto kill_rq;
+ }
+ if (blockend > port->capacity) {
+ if (!rq->quiet)
+ {
+ printk("offset for %u was %lu\n", minor, port->part[minor & PARTN_MASK].start_sect);
+ printk("bad access: block %lu, count= %lu\n", (unsigned long) blockend, (unsigned long) port->capacity);
+ }
+ goto kill_rq;
+ }
+
+ /* Push this to the port */
+ if (ahci_do_port_request(port, block, rq))
+ goto kill_rq;
+ return;
+
+kill_rq:
+ ahci_end_request(0);
+}
+
+/* The given port got an interrupt, terminate the current request if any */
+static void ahci_port_interrupt(struct port *port, u32 status)
+{
+ unsigned slot = 0;
+
+ if (readl(&port->ahci_port->ci) & (1 << slot)) {
+ /* Command still pending */
+ return;
+ }
+
+ if (port->identify) {
+ port->status = status;
+ wake_up(&port->q);
+ return;
+ }
+
+ if (!CURRENT || CURRENT->rq_status != RQ_SCSI_BUSY) {
+ /* No request currently running */
+ return;
+ }
+
+ if (status & (PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_IF_NONFATAL)) {
+ printk("ahci error %x %x\n", status, readl(&port->ahci_port->tfd));
+ ahci_end_request(0);
+ return;
+ }
+
+ ahci_end_request(1);
+}
+
+/* Start of IRQ handler. Iterate over all ports for this host */
+static void ahci_interrupt (int irq, void *host, struct pt_regs *regs)
+{
+ struct port *port;
+ struct ahci_host *ahci_host = host;
+ u32 irq_mask;
+ u32 status;
+
+ irq_mask = readl(&ahci_host->is);
+
+ if (!irq_mask)
+ return;
+
+ for (port = &ports[0]; port < &ports[MAX_PORTS]; port++) {
+ if (port->ahci_host == ahci_host && (irq_mask & (1 << (port->ahci_port - ahci_host->ports)))) {
+ status = readl(&port->ahci_port->is);
+ /* Clear interrupt before possibly triggering others */
+ writel(status, &port->ahci_port->is);
+ ahci_port_interrupt (port, status);
+ }
+ }
+
+ if (CURRENT)
+ /* Still some requests, queue another one */
+ ahci_do_request();
+
+ /* Clear host after clearing ports */
+ writel(irq_mask, &ahci_host->is);
+
+ /* unlock */
+}
+
+static int ahci_ioctl (struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int major, unit;
+
+ if (!inode || !inode->i_rdev)
+ return -EINVAL;
+
+ major = MAJOR(inode->i_rdev);
+ if (major != MAJOR_NR)
+ return -ENOTTY;
+
+ unit = DEVICE_NR(inode->i_rdev);
+ if (unit >= MAX_PORTS)
+ return -EINVAL;
+
+ switch (cmd) {
+ case BLKRRPART:
+ if (!suser()) return -EACCES;
+ if (!ports[unit].gd)
+ return -EINVAL;
+ resetup_one_dev(ports[unit].gd, unit);
+ return 0;
+ default:
+ return -EPERM;
+ }
+}
+
+static int ahci_open (struct inode *inode, struct file *file)
+{
+ int target;
+
+ if (MAJOR(inode->i_rdev) != MAJOR_NR)
+ return -ENXIO;
+
+ target = MINOR(inode->i_rdev) >> PARTN_BITS;
+ if (target >= MAX_PORTS)
+ return -ENXIO;
+
+ if (!ports[target].ahci_port)
+ return -ENXIO;
+
+ return 0;
+}
+
+static void ahci_release (struct inode *inode, struct file *file)
+{
+}
+
+static int ahci_fsync (struct inode *inode, struct file *file)
+{
+ printk("fsync\n");
+ return -ENOSYS;
+}
+
+static struct file_operations ahci_fops = {
+ .lseek = NULL,
+ .read = block_read,
+ .write = block_write,
+ .readdir = NULL,
+ .select = NULL,
+ .ioctl = ahci_ioctl,
+ .mmap = NULL,
+ .open = ahci_open,
+ .release = ahci_release,
+ .fsync = ahci_fsync,
+ .fasync = NULL,
+ .check_media_change = NULL,
+ .revalidate = NULL,
+};
+
+/* Disk timed out while processing identify, interrupt ahci_probe_port */
+static void identify_timeout(unsigned long data)
+{
+ struct port *port = (void*) data;
+
+ wake_up(&port->q);
+}
+
+static struct timer_list identify_timer = { .function = identify_timeout };
+
+static int ahci_identify(const volatile struct ahci_host *ahci_host, const volatile struct ahci_port *ahci_port, struct port *port, unsigned cmd)
+{
+ struct hd_driveid id;
+ struct ahci_fis_h2d *fis_h2d;
+ struct ahci_command *command = port->command;
+ struct ahci_cmd_tbl *prdtl = port->prdtl;
+ unsigned long flags;
+ unsigned slot;
+ unsigned long first_part;
+ unsigned long long timeout;
+ int ret = 0;
+
+ /* Identify device */
+ /* TODO: make this a request */
+ slot = 0;
+
+ fis_h2d = (void*) &prdtl[slot].cfis;
+ fis_h2d->fis_type = FIS_TYPE_REG_H2D;
+ fis_h2d->flags = 128;
+ fis_h2d->command = cmd;
+ fis_h2d->device = 0;
+
+ /* Fetch the 512 identify data */
+ memset(&id, 0, sizeof(id));
+
+ command[slot].opts = sizeof(*fis_h2d) / sizeof(u32);
+
+ first_part = PAGE_ALIGN((unsigned long) &id) - (unsigned long) &id;
+
+ if (first_part && first_part < sizeof(id)) {
+ /* split over two pages */
+
+ command[slot].opts |= (2 << 16);
+
+ prdtl[slot].prdtl[0].dbau = 0;
+ prdtl[slot].prdtl[0].dba = vmtophys((void*) &id);
+ prdtl[slot].prdtl[0].dbc = first_part - 1;
+ prdtl[slot].prdtl[1].dbau = 0;
+ prdtl[slot].prdtl[1].dba = vmtophys((void*) &id + first_part);
+ prdtl[slot].prdtl[1].dbc = sizeof(id) - first_part - 1;
+ }
+ else
+ {
+ command[slot].opts |= (1 << 16);
+
+ prdtl[slot].prdtl[0].dbau = 0;
+ prdtl[slot].prdtl[0].dba = vmtophys((void*) &id);
+ prdtl[slot].prdtl[0].dbc = sizeof(id) - 1;
+ }
+
+ timeout = jiffies + WAIT_MAX;
+ while (readl(&ahci_port->tfd) & (BUSY_STAT | DRQ_STAT))
+ if (jiffies > timeout) {
+ printk("sd%u: timeout waiting for ready\n", port-ports);
+ port->ahci_host = NULL;
+ port->ahci_port = NULL;
+ return 3;
+ }
+
+ save_flags(flags);
+ cli();
+
+ port->identify = 1;
+ port->status = 0;
+
+ /* Issue command */
+ mb();
+ writel(1 << slot, &ahci_port->ci);
+
+ timeout = jiffies + WAIT_MAX;
+ identify_timer.expires = timeout;
+ identify_timer.data = (unsigned long) port;
+ add_timer(&identify_timer);
+ while (!port->status) {
+ if (jiffies >= timeout) {
+ printk("sd%u: timeout waiting for identify\n", port-ports);
+ port->ahci_host = NULL;
+ port->ahci_port = NULL;
+ del_timer(&identify_timer);
+ restore_flags(flags);
+ return 3;
+ }
+ sleep_on(&port->q);
+ }
+ del_timer(&identify_timer);
+ restore_flags(flags);
+
+ if ((port->status & PORT_IRQ_TF_ERR) || readl(&ahci_port->is) & PORT_IRQ_TF_ERR)
+ {
+ /* Identify error */
+ port->capacity = 0;
+ port->lba48 = 0;
+ ret = 2;
+ } else {
+ memcpy(&port->id, &id, sizeof(id));
+ port->is_cd = 0;
+
+ ide_fixstring(id.model, sizeof(id.model), 1);
+ ide_fixstring(id.fw_rev, sizeof(id.fw_rev), 1);
+ ide_fixstring(id.serial_no, sizeof(id.serial_no), 1);
+ if (cmd == WIN_PIDENTIFY)
+ {
+ unsigned char type = (id.config >> 8) & 0x1f;
+
+ printk("sd%u: %s, ATAPI ", port - ports, id.model);
+ if (type == 5)
+ {
+ printk("unsupported CDROM drive\n");
+ port->is_cd = 1;
+ port->lba48 = 0;
+ port->capacity = 0;
+ }
+ else
+ {
+ printk("unsupported type %d\n", type);
+ port->lba48 = 0;
+ port->capacity = 0;
+ return 2;
+ }
+ return 0;
+ }
+
+ if (id.command_set_2 & (1U<<10))
+ {
+ port->lba48 = 1;
+ port->capacity = id.lba_capacity_2;
+ if (port->capacity >= (1ULL << 32))
+ {
+ port->capacity = (1ULL << 32) - 1;
+ printk("Warning: truncating disk size to 2TiB\n");
+ }
+ }
+ else
+ {
+ port->lba48 = 0;
+ port->capacity = id.lba_capacity;
+ if (port->capacity > (1ULL << 24))
+ {
+ port->capacity = (1ULL << 24);
+ printk("Warning: truncating disk size to 128GiB\n");
+ }
+ }
+ if (port->capacity/2048 >= 10240)
+ printk("sd%u: %s, %uGB w/%dkB Cache\n", (unsigned) (port - ports), id.model, (unsigned) (port->capacity/(2048*1024)), id.buf_size/2);
+ else
+ printk("sd%u: %s, %uMB w/%dkB Cache\n", (unsigned) (port - ports), id.model, (unsigned) (port->capacity/2048), id.buf_size/2);
+ }
+ port->identify = 0;
+
+ return ret;
+}
+
+/* Probe one AHCI port */
+static void ahci_probe_port(const volatile struct ahci_host *ahci_host, const volatile struct ahci_port *ahci_port)
+{
+ struct port *port;
+ void *mem;
+ unsigned cls = ((readl(&ahci_host->cap) >> 8) & 0x1f) + 1;
+ struct ahci_command *command;
+ struct ahci_fis *fis;
+ struct ahci_cmd_tbl *prdtl;
+ vm_size_t size =
+ cls * sizeof(*command)
+ + sizeof(*fis)
+ + cls * sizeof(*prdtl);
+ unsigned i;
+ unsigned long long timeout;
+
+ for (i = 0; i < MAX_PORTS; i++) {
+ if (!ports[i].ahci_port)
+ break;
+ }
+ if (i == MAX_PORTS)
+ return;
+ port = &ports[i];
+
+ /* Has to be 1K-aligned */
+ mem = vmalloc (size);
+ if (!mem)
+ return;
+ assert (!(((unsigned long) mem) & (1024-1)));
+ memset (mem, 0, size);
+
+ port->ahci_host = ahci_host;
+ port->ahci_port = ahci_port;
+ port->cls = cls;
+
+ port->command = command = mem;
+ port->fis = fis = (void*) command + cls * sizeof(*command);
+ port->prdtl = prdtl = (void*) fis + sizeof(*fis);
+
+ /* Stop commands */
+ writel(readl(&ahci_port->cmd) & ~PORT_CMD_START, &ahci_port->cmd);
+ timeout = jiffies + WAIT_MAX;
+ while (readl(&ahci_port->cmd) & PORT_CMD_LIST_ON)
+ if (jiffies > timeout) {
+ printk("sd%u: timeout waiting for list completion\n", (unsigned) (port-ports));
+ port->ahci_host = NULL;
+ port->ahci_port = NULL;
+ return;
+ }
+
+ writel(readl(&ahci_port->cmd) & ~PORT_CMD_FIS_RX, &ahci_port->cmd);
+ timeout = jiffies + WAIT_MAX;
+ while (readl(&ahci_port->cmd) & PORT_CMD_FIS_ON)
+ if (jiffies > timeout) {
+ printk("sd%u: timeout waiting for FIS completion\n", (unsigned) (port-ports));
+ port->ahci_host = NULL;
+ port->ahci_port = NULL;
+ return;
+ }
+
+ /* We don't support 64bit */
+ /* Point controller to our buffers */
+ writel(0, &ahci_port->clbu);
+ writel(vmtophys((void*) command), &ahci_port->clb);
+ writel(0, &ahci_port->fbu);
+ writel(vmtophys((void*) fis), &ahci_port->fb);
+
+ /* Clear any previous interrupts */
+ writel(readl(&ahci_port->is), &ahci_port->is);
+ writel(1 << (ahci_port - ahci_host->ports), &ahci_host->is);
+
+ /* And activate them */
+ writel(DEF_PORT_IRQ, &ahci_port->ie);
+ writel(readl(&ahci_host->ghc) | HOST_IRQ_EN, &ahci_host->ghc);
+
+ for (i = 0; i < cls; i++)
+ {
+ command[i].ctbau = 0;
+ command[i].ctba = vmtophys((void*) &prdtl[i]);
+ }
+
+ /* Start commands */
+ timeout = jiffies + WAIT_MAX;
+ while (readl(&ahci_port->cmd) & PORT_CMD_LIST_ON)
+ if (jiffies > timeout) {
+ printk("sd%u: timeout waiting for list completion\n", (unsigned) (port-ports));
+ port->ahci_host = NULL;
+ port->ahci_port = NULL;
+ return;
+ }
+
+ writel(readl(&ahci_port->cmd) | PORT_CMD_FIS_RX | PORT_CMD_START, &ahci_port->cmd);
+
+ /* if PxCMD.ATAPI is set, try ATAPI identify; otherwise try AHCI, then ATAPI */
+ if (readl(&ahci_port->cmd) & PORT_CMD_ATAPI ||
+ ahci_identify(ahci_host, ahci_port, port, WIN_IDENTIFY) >= 2)
+ ahci_identify(ahci_host, ahci_port, port, WIN_PIDENTIFY);
+}
+
+/* Probe one AHCI PCI device */
+static void ahci_probe_dev(unsigned char bus, unsigned char device)
+{
+ unsigned char hdrtype;
+ unsigned char dev, fun;
+ const volatile struct ahci_host *ahci_host;
+ const volatile struct ahci_port *ahci_port;
+ unsigned nports, n, i;
+ unsigned port_map;
+ unsigned bar;
+ unsigned char irq;
+
+ dev = PCI_SLOT(device);
+ fun = PCI_FUNC(device);
+
+ /* Get configuration */
+ if (pcibios_read_config_byte(bus, device, PCI_HEADER_TYPE, &hdrtype) != PCIBIOS_SUCCESSFUL) {
+ printk("ahci: %02x:%02x.%x: Can not read configuration", bus, dev, fun);
+ return;
+ }
+ /* Ignore multifunction bit */
+ hdrtype &= ~0x80;
+
+ if (hdrtype != 0) {
+ printk("ahci: %02x:%02x.%x: Unknown hdrtype %d\n", bus, dev, fun, hdrtype);
+ return;
+ }
+
+ if (pcibios_read_config_dword(bus, device, PCI_BASE_ADDRESS_5, &bar) != PCIBIOS_SUCCESSFUL) {
+ printk("ahci: %02x:%02x.%x: Can not read BAR 5", bus, dev, fun);
+ return;
+ }
+ if (bar & PCI_BASE_ADDRESS_SPACE_IO) {
+ printk("ahci: %02x:%02x.%x: BAR 5 is I/O?!", bus, dev, fun);
+ return;
+ }
+ bar &= PCI_BASE_ADDRESS_MEM_MASK;
+
+ if (pcibios_read_config_byte(bus, device, PCI_INTERRUPT_LINE, &irq) != PCIBIOS_SUCCESSFUL) {
+ printk("ahci: %02x:%02x.%x: Can not read IRQ", bus, dev, fun);
+ return;
+ }
+
+ printk("AHCI SATA %02x:%02x.%x BAR 0x%x IRQ %u\n", bus, dev, fun, bar, irq);
+
+ /* Map mmio */
+ ahci_host = vremap(bar, 0x2000);
+
+ /* Request IRQ */
+ if (request_irq(irq, &ahci_interrupt, SA_SHIRQ, "ahci", (void*) ahci_host)) {
+ printk("ahci: %02x:%02x.%x: Can not get irq %u\n", bus, dev, fun, irq);
+ return;
+ }
+
+#ifdef CONFIG_BLK_DEV_IDE
+ /* OK, we will handle it. Disable probing on legacy IDE ports it may have. */
+ for (i = 0; i < 6; i++)
+ {
+ unsigned mybar;
+ if (pcibios_read_config_dword(bus, device, PCI_BASE_ADDRESS_0 + i*4, &mybar) == PCIBIOS_SUCCESSFUL) {
+ if (!(bar & PCI_BASE_ADDRESS_SPACE_IO))
+ /* Memory, don't care */
+ continue;
+ /* printk("ahci: %02x:%02x.%x: BAR %d is %x\n", bus, dev, fun, i, mybar); */
+ ide_disable_base(bar & PCI_BASE_ADDRESS_IO_MASK);
+ }
+ }
+#endif
+
+ nports = (readl(&ahci_host->cap) & 0x1f) + 1;
+ port_map = readl(&ahci_host->pi);
+
+ for (n = 0, i = 0; i < AHCI_MAX_PORTS; i++)
+ if (port_map & (1U << i))
+ n++;
+
+ if (nports != n) {
+ printk("ahci: %02x:%02x.%x: Odd number of ports %u, assuming %u is correct\n", bus, dev, fun, n, nports);
+ port_map = 0;
+ }
+ if (!port_map) {
+ port_map = (1U << nports) - 1;
+ }
+
+ for (i = 0; i < AHCI_MAX_PORTS; i++) {
+ u32 ssts;
+ u8 det, ipm;
+
+ if (!(port_map & (1U << i)))
+ continue;
+
+ ahci_port = &ahci_host->ports[i];
+
+ ssts = readl(&ahci_port->ssts);
+ det = ssts & 0xf;
+ switch (det)
+ {
+ case 0x0:
+ /* Device not present */
+ continue;
+ case 0x1:
+ printk("ahci: %02x:%02x.%x: Port %u communication not established. TODO: power on device\n", bus, dev, fun, i);
+ continue;
+ case 0x3:
+ /* Present and communication established */
+ break;
+ case 0x4:
+ printk("ahci: %02x:%02x.%x: Port %u phy offline?!\n", bus, dev, fun, i);
+ continue;
+ default:
+ printk("ahci: %02x:%02x.%x: Unknown port %u DET %x\n", bus, dev, fun, i, det);
+ continue;
+ }
+
+ ipm = (ssts >> 8) & 0xf;
+ switch (ipm)
+ {
+ case 0x0:
+ /* Device not present */
+ continue;
+ case 0x1:
+ /* Active */
+ break;
+ case 0x2:
+ printk("ahci: %02x:%02x.%x: Port %u in Partial power management. TODO: power on device\n", bus, dev, fun, i);
+ continue;
+ case 0x6:
+ printk("ahci: %02x:%02x.%x: Port %u in Slumber power management. TODO: power on device\n", bus, dev, fun, i);
+ continue;
+ default:
+ printk("ahci: %02x:%02x.%x: Unknown port %u IPM %x\n", bus, dev, fun, i, ipm);
+ continue;
+ }
+
+ /* OK! Probe this port */
+ ahci_probe_port(ahci_host, ahci_port);
+ }
+}
+
+/* genhd callback to set size of disks */
+static void ahci_geninit(struct gendisk *gd)
+{
+ unsigned unit;
+ struct port *port;
+
+ for (unit = 0; unit < gd->nr_real; unit++) {
+ port = &ports[unit];
+ port->part[0].nr_sects = port->capacity;
+ if (!port->part[0].nr_sects)
+ port->part[0].nr_sects = -1;
+ }
+}
+
+/* Probe all AHCI PCI devices */
+void ahci_probe_pci(void)
+{
+ unsigned char bus, device;
+ unsigned short index;
+ int ret;
+ unsigned nports, unit, nminors;
+ struct port *port;
+ struct gendisk *gd, **gdp;
+ int *bs;
+
+ for (index = 0;
+ (ret = pcibios_find_class(PCI_CLASS_STORAGE_SATA_AHCI, index, &bus, &device)) == PCIBIOS_SUCCESSFUL;
+ index++)
+ {
+ /* Note: this prevents from also having a SCSI controler.
+ * It shouldn't harm too much until we have proper hardware
+ * enumeration.
+ */
+ if (register_blkdev(MAJOR_NR, "sd", &ahci_fops) < 0)
+ printk("could not register ahci\n");
+ ahci_probe_dev(bus, device);
+ }
+
+ for (nports = 0, port = &ports[0]; port < &ports[MAX_PORTS]; port++)
+ if (port->ahci_port)
+ nports++;
+
+ nminors = nports * (1<<PARTN_BITS);
+
+ gd = kmalloc(sizeof(*gd), GFP_KERNEL);
+ gd->sizes = kmalloc(nminors * sizeof(*gd->sizes), GFP_KERNEL);
+ gd->part = kmalloc(nminors * sizeof(*gd->part), GFP_KERNEL);
+ bs = kmalloc(nminors * sizeof(*bs), GFP_KERNEL);
+
+ blksize_size[MAJOR_NR] = bs;
+ for (unit = 0; unit < nminors; unit++)
+ /* We prefer to transfer whole pages */
+ *bs++ = PAGE_SIZE;
+
+ memset(gd->part, 0, nminors * sizeof(*gd->part));
+
+ for (unit = 0; unit < nports; unit++) {
+ ports[unit].gd = gd;
+ ports[unit].part = &gd->part[unit << PARTN_BITS];
+ }
+
+ gd->major = MAJOR_NR;
+ gd->major_name = "sd";
+ gd->minor_shift = PARTN_BITS;
+ gd->max_p = 1<<PARTN_BITS;
+ gd->max_nr = nports;
+ gd->nr_real = nports;
+ gd->init = ahci_geninit;
+ gd->next = NULL;
+
+ for (gdp = &gendisk_head; *gdp; gdp = &((*gdp)->next))
+ ;
+ *gdp = gd;
+
+ blk_dev[MAJOR_NR].request_fn = ahci_do_request;
+}
diff --git a/linux/dev/drivers/block/floppy.c b/linux/dev/drivers/block/floppy.c
new file mode 100644
index 0000000..83d66f0
--- /dev/null
+++ b/linux/dev/drivers/block/floppy.c
@@ -0,0 +1,4288 @@
+/*
+ * linux/kernel/floppy.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1993, 1994 Alain Knaff
+ */
+/*
+ * 02.12.91 - Changed to static variables to indicate need for reset
+ * and recalibrate. This makes some things easier (output_byte reset
+ * checking etc), and means less interrupt jumping in case of errors,
+ * so the code is hopefully easier to understand.
+ */
+
+/*
+ * This file is certainly a mess. I've tried my best to get it working,
+ * but I don't like programming floppies, and I have only one anyway.
+ * Urgel. I should check for more errors, and do more graceful error
+ * recovery. Seems there are problems with several drives. I've tried to
+ * correct them. No promises.
+ */
+
+/*
+ * As with hd.c, all routines within this file can (and will) be called
+ * by interrupts, so extreme caution is needed. A hardware interrupt
+ * handler may not sleep, or a kernel panic will happen. Thus I cannot
+ * call "floppy-on" directly, but have to set a special timer interrupt
+ * etc.
+ */
+
+/*
+ * 28.02.92 - made track-buffering routines, based on the routines written
+ * by entropy@wintermute.wpi.edu (Lawrence Foard). Linus.
+ */
+
+/*
+ * Automatic floppy-detection and formatting written by Werner Almesberger
+ * (almesber@nessie.cs.id.ethz.ch), who also corrected some problems with
+ * the floppy-change signal detection.
+ */
+
+/*
+ * 1992/7/22 -- Hennus Bergman: Added better error reporting, fixed
+ * FDC data overrun bug, added some preliminary stuff for vertical
+ * recording support.
+ *
+ * 1992/9/17: Added DMA allocation & DMA functions. -- hhb.
+ *
+ * TODO: Errors are still not counted properly.
+ */
+
+/* 1992/9/20
+ * Modifications for ``Sector Shifting'' by Rob Hooft (hooft@chem.ruu.nl)
+ * modeled after the freeware MS-DOS program fdformat/88 V1.8 by
+ * Christoph H. Hochst\"atter.
+ * I have fixed the shift values to the ones I always use. Maybe a new
+ * ioctl() should be created to be able to modify them.
+ * There is a bug in the driver that makes it impossible to format a
+ * floppy as the first thing after bootup.
+ */
+
+/*
+ * 1993/4/29 -- Linus -- cleaned up the timer handling in the kernel, and
+ * this helped the floppy driver as well. Much cleaner, and still seems to
+ * work.
+ */
+
+/* 1994/6/24 --bbroad-- added the floppy table entries and made
+ * minor modifications to allow 2.88 floppies to be run.
+ */
+
+/* 1994/7/13 -- Paul Vojta -- modified the probing code to allow three or more
+ * disk types.
+ */
+
+/*
+ * 1994/8/8 -- Alain Knaff -- Switched to fdpatch driver: Support for bigger
+ * format bug fixes, but unfortunately some new bugs too...
+ */
+
+/* 1994/9/17 -- Koen Holtman -- added logging of physical floppy write
+ * errors to allow safe writing by specialized programs.
+ */
+
+/* 1995/4/24 -- Dan Fandrich -- added support for Commodore 1581 3.5" disks
+ * by defining bit 1 of the "stretch" parameter to mean put sectors on the
+ * opposite side of the disk, leaving the sector IDs alone (i.e. Commodore's
+ * drives are "upside-down").
+ */
+
+/*
+ * 1995/8/26 -- Andreas Busse -- added Mips support.
+ */
+
+/*
+ * 1995/10/18 -- Ralf Baechle -- Portability cleanup; move machine dependent
+ * features to asm/floppy.h.
+ */
+
+
+#define FLOPPY_SANITY_CHECK
+#undef FLOPPY_SILENT_DCL_CLEAR
+
+#define REALLY_SLOW_IO
+
+#define DEBUGT 2
+#define DCL_DEBUG /* debug disk change line */
+
+/* do print messages for unexpected interrupts */
+static int print_unex=1;
+#include <linux/utsname.h>
+#include <linux/module.h>
+
+/* the following is the mask of allowed drives. By default units 2 and
+ * 3 of both floppy controllers are disabled, because switching on the
+ * motor of these drives causes system hangs on some PCI computers. drive
+ * 0 is the low bit (0x1), and drive 7 is the high bit (0x80). Bits are on if
+ * a drive is allowed. */
+static int FLOPPY_IRQ=6;
+static int FLOPPY_DMA=2;
+static int allowed_drive_mask = 0x33;
+
+static int irqdma_allocated = 0;
+
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/tqueue.h>
+#define FDPATCHES
+#include <linux/fdreg.h>
+
+
+#include <linux/fd.h>
+
+
+#define OLDFDRAWCMD 0x020d /* send a raw command to the FDC */
+
+struct old_floppy_raw_cmd {
+ void *data;
+ long length;
+
+ unsigned char rate;
+ unsigned char flags;
+ unsigned char cmd_count;
+ unsigned char cmd[9];
+ unsigned char reply_count;
+ unsigned char reply[7];
+ int track;
+};
+
+#include <linux/errno.h>
+#include <linux/malloc.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/delay.h>
+#include <linux/mc146818rtc.h> /* CMOS defines */
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/segment.h>
+
+static int use_virtual_dma=0; /* virtual DMA for Intel */
+static unsigned short virtual_dma_port=0x3f0;
+void floppy_interrupt(int irq, void *dev_id, struct pt_regs * regs);
+static int set_dor(int fdc, char mask, char data);
+static inline int __get_order(unsigned long size);
+#include <asm/floppy.h>
+
+
+#define MAJOR_NR FLOPPY_MAJOR
+
+#include <linux/blk.h>
+#include <linux/cdrom.h> /* for the compatibility eject ioctl */
+
+#include <linux/dev/glue/glue.h>
+
+
+#ifndef FLOPPY_MOTOR_MASK
+#define FLOPPY_MOTOR_MASK 0xf0
+#endif
+
+#ifndef fd_get_dma_residue
+#define fd_get_dma_residue() get_dma_residue(FLOPPY_DMA)
+#endif
+
+/* Dma Memory related stuff */
+
+/* Pure 2^n version of get_order */
+static inline int __get_order(unsigned long size)
+{
+ int order;
+
+ size = (size-1) >> (PAGE_SHIFT-1);
+ order = -1;
+ do {
+ size >>= 1;
+ order++;
+ } while (size);
+ return order;
+}
+
+#ifndef fd_dma_mem_free
+#define fd_dma_mem_free(addr, size) free_pages(addr, __get_order(size))
+#endif
+
+#ifndef fd_dma_mem_alloc
+#define fd_dma_mem_alloc(size) __get_dma_pages(GFP_KERNEL,__get_order(size))
+#endif
+
+/* End dma memory related stuff */
+
+static unsigned int fake_change = 0;
+static int initialising=1;
+
+static inline int TYPE(kdev_t x) {
+ return (MINOR(x)>>2) & 0x1f;
+}
+static inline int DRIVE(kdev_t x) {
+ return (MINOR(x)&0x03) | ((MINOR(x)&0x80) >> 5);
+}
+#define ITYPE(x) (((x)>>2) & 0x1f)
+#define TOMINOR(x) ((x & 3) | ((x & 4) << 5))
+#define UNIT(x) ((x) & 0x03) /* drive on fdc */
+#define FDC(x) (((x) & 0x04) >> 2) /* fdc of drive */
+#define REVDRIVE(fdc, unit) ((unit) + ((fdc) << 2))
+ /* reverse mapping from unit and fdc to drive */
+#define DP (&drive_params[current_drive])
+#define DRS (&drive_state[current_drive])
+#define DRWE (&write_errors[current_drive])
+#define FDCS (&fdc_state[fdc])
+#define CLEARF(x) (clear_bit(x##_BIT, &DRS->flags))
+#define SETF(x) (set_bit(x##_BIT, &DRS->flags))
+#define TESTF(x) (test_bit(x##_BIT, &DRS->flags))
+
+#define UDP (&drive_params[drive])
+#define UDRS (&drive_state[drive])
+#define UDRWE (&write_errors[drive])
+#define UFDCS (&fdc_state[FDC(drive)])
+#define UCLEARF(x) (clear_bit(x##_BIT, &UDRS->flags))
+#define USETF(x) (set_bit(x##_BIT, &UDRS->flags))
+#define UTESTF(x) (test_bit(x##_BIT, &UDRS->flags))
+
+#define DPRINT(format, args...) printk(DEVICE_NAME "%d: " format, current_drive , ## args)
+
+#define PH_HEAD(floppy,head) (((((floppy)->stretch & 2) >>1) ^ head) << 2)
+#define STRETCH(floppy) ((floppy)->stretch & FD_STRETCH)
+
+#define CLEARSTRUCT(x) memset((x), 0, sizeof(*(x)))
+
+#define INT_OFF save_flags(flags); cli()
+#define INT_ON restore_flags(flags)
+
+/* read/write */
+#define COMMAND raw_cmd->cmd[0]
+#define DR_SELECT raw_cmd->cmd[1]
+#define TRACK raw_cmd->cmd[2]
+#define HEAD raw_cmd->cmd[3]
+#define SECTOR raw_cmd->cmd[4]
+#define SIZECODE raw_cmd->cmd[5]
+#define SECT_PER_TRACK raw_cmd->cmd[6]
+#define GAP raw_cmd->cmd[7]
+#define SIZECODE2 raw_cmd->cmd[8]
+#define NR_RW 9
+
+/* format */
+#define F_SIZECODE raw_cmd->cmd[2]
+#define F_SECT_PER_TRACK raw_cmd->cmd[3]
+#define F_GAP raw_cmd->cmd[4]
+#define F_FILL raw_cmd->cmd[5]
+#define NR_F 6
+
+/*
+ * Maximum disk size (in kilobytes). This default is used whenever the
+ * current disk size is unknown.
+ * [Now it is rather a minimum]
+ */
+#define MAX_DISK_SIZE 4 /* 3984*/
+
+#define K_64 0x10000 /* 64KB */
+
+/*
+ * globals used by 'result()'
+ */
+#define MAX_REPLIES 16
+static unsigned char reply_buffer[MAX_REPLIES];
+static int inr; /* size of reply buffer, when called from interrupt */
+#define ST0 (reply_buffer[0])
+#define ST1 (reply_buffer[1])
+#define ST2 (reply_buffer[2])
+#define ST3 (reply_buffer[0]) /* result of GETSTATUS */
+#define R_TRACK (reply_buffer[3])
+#define R_HEAD (reply_buffer[4])
+#define R_SECTOR (reply_buffer[5])
+#define R_SIZECODE (reply_buffer[6])
+
+#define SEL_DLY (2*HZ/100)
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+/*
+ * this struct defines the different floppy drive types.
+ */
+static struct {
+ struct floppy_drive_params params;
+ const char *name; /* name printed while booting */
+} default_drive_params[]= {
+/* NOTE: the time values in jiffies should be in msec!
+ CMOS drive type
+ | Maximum data rate supported by drive type
+ | | Head load time, msec
+ | | | Head unload time, msec (not used)
+ | | | | Step rate interval, usec
+ | | | | | Time needed for spinup time (jiffies)
+ | | | | | | Timeout for spinning down (jiffies)
+ | | | | | | | Spindown offset (where disk stops)
+ | | | | | | | | Select delay
+ | | | | | | | | | RPS
+ | | | | | | | | | | Max number of tracks
+ | | | | | | | | | | | Interrupt timeout
+ | | | | | | | | | | | | Max nonintlv. sectors
+ | | | | | | | | | | | | | -Max Errors- flags */
+{{0, 500, 16, 16, 8000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 80, 3*HZ, 20, {3,1,2,0,2}, 0,
+ 0, { 7, 4, 8, 2, 1, 5, 3,10}, 3*HZ/2, 0 }, "unknown" },
+
+{{1, 300, 16, 16, 8000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 40, 3*HZ, 17, {3,1,2,0,2}, 0,
+ 0, { 1, 0, 0, 0, 0, 0, 0, 0}, 3*HZ/2, 1 }, "360K PC" }, /*5 1/4 360 KB PC*/
+
+{{2, 500, 16, 16, 6000, 4*HZ/10, 3*HZ, 14, SEL_DLY, 6, 83, 3*HZ, 17, {3,1,2,0,2}, 0,
+ 0, { 2, 5, 6,23,10,20,12, 0}, 3*HZ/2, 2 }, "1.2M" }, /*5 1/4 HD AT*/
+
+{{3, 250, 16, 16, 3000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 83, 3*HZ, 20, {3,1,2,0,2}, 0,
+ 0, { 4,22,21,30, 3, 0, 0, 0}, 3*HZ/2, 4 }, "720k" }, /*3 1/2 DD*/
+
+{{4, 500, 16, 16, 4000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 20, {3,1,2,0,2}, 0,
+ 0, { 7, 4,25,22,31,21,29,11}, 3*HZ/2, 7 }, "1.44M" }, /*3 1/2 HD*/
+
+{{5, 1000, 15, 8, 3000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 40, {3,1,2,0,2}, 0,
+ 0, { 7, 8, 4,25,28,22,31,21}, 3*HZ/2, 8 }, "2.88M AMI BIOS" }, /*3 1/2 ED*/
+
+{{6, 1000, 15, 8, 3000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 40, {3,1,2,0,2}, 0,
+ 0, { 7, 8, 4,25,28,22,31,21}, 3*HZ/2, 8 }, "2.88M" } /*3 1/2 ED*/
+/* | --autodetected formats--- | | |
+ * read_track | | Name printed when booting
+ * | Native format
+ * Frequency of disk change checks */
+};
+
+static struct floppy_drive_params drive_params[N_DRIVE];
+static struct floppy_drive_struct drive_state[N_DRIVE];
+static struct floppy_write_errors write_errors[N_DRIVE];
+static struct floppy_raw_cmd *raw_cmd, default_raw_cmd;
+
+/*
+ * This struct defines the different floppy types.
+ *
+ * Bit 0 of 'stretch' tells if the tracks need to be doubled for some
+ * types (e.g. 360kB diskette in 1.2MB drive, etc.). Bit 1 of 'stretch'
+ * tells if the disk is in Commodore 1581 format, which means side 0 sectors
+ * are located on side 1 of the disk but with a side 0 ID, and vice-versa.
+ * This is the same as the Sharp MZ-80 5.25" CP/M disk format, except that the
+ * 1581's logical side 0 is on physical side 1, whereas the Sharp's logical
+ * side 0 is on physical side 0 (but with the misnamed sector IDs).
+ * 'stretch' should probably be renamed to something more general, like
+ * 'options'. Other parameters should be self-explanatory (see also
+ * setfdprm(8)).
+ */
+static struct floppy_struct floppy_type[32] = {
+ { 0, 0,0, 0,0,0x00,0x00,0x00,0x00,NULL }, /* 0 no testing */
+ { 720, 9,2,40,0,0x2A,0x02,0xDF,0x50,"d360" }, /* 1 360KB PC */
+ { 2400,15,2,80,0,0x1B,0x00,0xDF,0x54,"h1200" }, /* 2 1.2MB AT */
+ { 720, 9,1,80,0,0x2A,0x02,0xDF,0x50,"D360" }, /* 3 360KB SS 3.5" */
+ { 1440, 9,2,80,0,0x2A,0x02,0xDF,0x50,"D720" }, /* 4 720KB 3.5" */
+ { 720, 9,2,40,1,0x23,0x01,0xDF,0x50,"h360" }, /* 5 360KB AT */
+ { 1440, 9,2,80,0,0x23,0x01,0xDF,0x50,"h720" }, /* 6 720KB AT */
+ { 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,"H1440" }, /* 7 1.44MB 3.5" */
+ { 5760,36,2,80,0,0x1B,0x43,0xAF,0x54,"E2880" }, /* 8 2.88MB 3.5" */
+ { 6240,39,2,80,0,0x1B,0x43,0xAF,0x28,"E3120"}, /* 9 3.12MB 3.5" */
+
+ { 2880,18,2,80,0,0x25,0x00,0xDF,0x02,"h1440" }, /* 10 1.44MB 5.25" */
+ { 3360,21,2,80,0,0x1C,0x00,0xCF,0x0C,"H1680" }, /* 11 1.68MB 3.5" */
+ { 820,10,2,41,1,0x25,0x01,0xDF,0x2E,"h410" }, /* 12 410KB 5.25" */
+ { 1640,10,2,82,0,0x25,0x02,0xDF,0x2E,"H820" }, /* 13 820KB 3.5" */
+ { 2952,18,2,82,0,0x25,0x00,0xDF,0x02,"h1476" }, /* 14 1.48MB 5.25" */
+ { 3444,21,2,82,0,0x25,0x00,0xDF,0x0C,"H1722" }, /* 15 1.72MB 3.5" */
+ { 840,10,2,42,1,0x25,0x01,0xDF,0x2E,"h420" }, /* 16 420KB 5.25" */
+ { 1660,10,2,83,0,0x25,0x02,0xDF,0x2E,"H830" }, /* 17 830KB 3.5" */
+ { 2988,18,2,83,0,0x25,0x00,0xDF,0x02,"h1494" }, /* 18 1.49MB 5.25" */
+ { 3486,21,2,83,0,0x25,0x00,0xDF,0x0C,"H1743" }, /* 19 1.74 MB 3.5" */
+
+ { 1760,11,2,80,0,0x1C,0x09,0xCF,0x00,"h880" }, /* 20 880KB 5.25" */
+ { 2080,13,2,80,0,0x1C,0x01,0xCF,0x00,"D1040" }, /* 21 1.04MB 3.5" */
+ { 2240,14,2,80,0,0x1C,0x19,0xCF,0x00,"D1120" }, /* 22 1.12MB 3.5" */
+ { 3200,20,2,80,0,0x1C,0x20,0xCF,0x2C,"h1600" }, /* 23 1.6MB 5.25" */
+ { 3520,22,2,80,0,0x1C,0x08,0xCF,0x2e,"H1760" }, /* 24 1.76MB 3.5" */
+ { 3840,24,2,80,0,0x1C,0x20,0xCF,0x00,"H1920" }, /* 25 1.92MB 3.5" */
+ { 6400,40,2,80,0,0x25,0x5B,0xCF,0x00,"E3200" }, /* 26 3.20MB 3.5" */
+ { 7040,44,2,80,0,0x25,0x5B,0xCF,0x00,"E3520" }, /* 27 3.52MB 3.5" */
+ { 7680,48,2,80,0,0x25,0x63,0xCF,0x00,"E3840" }, /* 28 3.84MB 3.5" */
+
+ { 3680,23,2,80,0,0x1C,0x10,0xCF,0x00,"H1840" }, /* 29 1.84MB 3.5" */
+ { 1600,10,2,80,0,0x25,0x02,0xDF,0x2E,"D800" }, /* 30 800KB 3.5" */
+ { 3200,20,2,80,0,0x1C,0x00,0xCF,0x2C,"H1600" }, /* 31 1.6MB 3.5" */
+};
+
+#define NUMBER(x) (sizeof(x) / sizeof(*(x)))
+#define SECTSIZE (_FD_SECTSIZE(*floppy))
+
+/* Auto-detection: Disk type used until the next media change occurs. */
+static struct floppy_struct *current_type[N_DRIVE] = {
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL
+};
+
+/*
+ * User-provided type information. current_type points to
+ * the respective entry of this array.
+ */
+static struct floppy_struct user_params[N_DRIVE];
+
+static int floppy_sizes[256];
+static int floppy_blocksizes[256] = { 0, };
+
+/*
+ * The driver is trying to determine the correct media format
+ * while probing is set. rw_interrupt() clears it after a
+ * successful access.
+ */
+static int probing = 0;
+
+/* Synchronization of FDC access. */
+#define FD_COMMAND_NONE -1
+#define FD_COMMAND_ERROR 2
+#define FD_COMMAND_OKAY 3
+
+static volatile int command_status = FD_COMMAND_NONE, fdc_busy = 0;
+static struct wait_queue *fdc_wait = NULL, *command_done = NULL;
+#ifdef MACH
+#define NO_SIGNAL (! issig () || ! interruptible)
+#else
+#define NO_SIGNAL (!(current->signal & ~current->blocked) || !interruptible)
+#endif
+#define CALL(x) if ((x) == -EINTR) return -EINTR
+#define ECALL(x) if ((ret = (x))) return ret;
+#define _WAIT(x,i) CALL(ret=wait_til_done((x),i))
+#define WAIT(x) _WAIT((x),interruptible)
+#define IWAIT(x) _WAIT((x),1)
+
+/* Errors during formatting are counted here. */
+static int format_errors;
+
+/* Format request descriptor. */
+static struct format_descr format_req;
+
+/*
+ * Rate is 0 for 500kb/s, 1 for 300kbps, 2 for 250kbps
+ * Spec1 is 0xSH, where S is stepping rate (F=1ms, E=2ms, D=3ms etc),
+ * H is head unload time (1=16ms, 2=32ms, etc)
+ */
+
+/*
+ * Track buffer
+ * Because these are written to by the DMA controller, they must
+ * not contain a 64k byte boundary crossing, or data will be
+ * corrupted/lost.
+ */
+static char *floppy_track_buffer=0;
+static int max_buffer_sectors=0;
+
+static int *errors;
+typedef void (*done_f)(int);
+static struct cont_t {
+ void (*interrupt)(void); /* this is called after the interrupt of the
+ * main command */
+ void (*redo)(void); /* this is called to retry the operation */
+ void (*error)(void); /* this is called to tally an error */
+ done_f done; /* this is called to say if the operation has
+ * succeeded/failed */
+} *cont=NULL;
+
+static void floppy_ready(void);
+static void floppy_start(void);
+static void process_fd_request(void);
+static void recalibrate_floppy(void);
+static void floppy_shutdown(void);
+
+static int floppy_grab_irq_and_dma(void);
+static void floppy_release_irq_and_dma(void);
+
+/*
+ * The "reset" variable should be tested whenever an interrupt is scheduled,
+ * after the commands have been sent. This is to ensure that the driver doesn't
+ * get wedged when the interrupt doesn't come because of a failed command.
+ * reset doesn't need to be tested before sending commands, because
+ * output_byte is automatically disabled when reset is set.
+ */
+#define CHECK_RESET { if (FDCS->reset){ reset_fdc(); return; } }
+static void reset_fdc(void);
+
+/*
+ * These are global variables, as that's the easiest way to give
+ * information to interrupts. They are the data used for the current
+ * request.
+ */
+#define NO_TRACK -1
+#define NEED_1_RECAL -2
+#define NEED_2_RECAL -3
+
+/* */
+static int usage_count = 0;
+
+
+/* buffer related variables */
+static int buffer_track = -1;
+static int buffer_drive = -1;
+static int buffer_min = -1;
+static int buffer_max = -1;
+
+/* fdc related variables, should end up in a struct */
+static struct floppy_fdc_state fdc_state[N_FDC];
+static int fdc; /* current fdc */
+
+static struct floppy_struct *_floppy = floppy_type;
+static unsigned char current_drive = 0;
+static long current_count_sectors = 0;
+static unsigned char sector_t; /* sector in track */
+
+#ifndef fd_eject
+#define fd_eject(x) -EINVAL
+#endif
+
+
+#ifdef DEBUGT
+static long unsigned debugtimer;
+#endif
+
+/*
+ * Debugging
+ * =========
+ */
+static inline void set_debugt(void)
+{
+#ifdef DEBUGT
+ debugtimer = jiffies;
+#endif
+}
+
+static inline void debugt(const char *message)
+{
+#ifdef DEBUGT
+ if (DP->flags & DEBUGT)
+ printk("%s dtime=%lu\n", message, jiffies-debugtimer);
+#endif
+}
+
+typedef void (*timeout_fn)(unsigned long);
+static struct timer_list fd_timeout ={ NULL, NULL, 0, 0,
+ (timeout_fn) floppy_shutdown };
+
+static const char *timeout_message;
+
+#ifdef FLOPPY_SANITY_CHECK
+static void is_alive(const char *message)
+{
+ /* this routine checks whether the floppy driver is "alive" */
+ if (fdc_busy && command_status < 2 && !fd_timeout.prev){
+ DPRINT("timeout handler died: %s\n",message);
+ }
+}
+#endif
+
+#ifdef FLOPPY_SANITY_CHECK
+
+#define OLOGSIZE 20
+
+static void (*lasthandler)(void) = NULL;
+static int interruptjiffies=0;
+static int resultjiffies=0;
+static int resultsize=0;
+static int lastredo=0;
+
+static struct output_log {
+ unsigned char data;
+ unsigned char status;
+ unsigned long jiffies;
+} output_log[OLOGSIZE];
+
+static int output_log_pos=0;
+#endif
+
+#define CURRENTD -1
+#define MAXTIMEOUT -2
+
+static void reschedule_timeout(int drive, const char *message, int marg)
+{
+ if (drive == CURRENTD)
+ drive = current_drive;
+ del_timer(&fd_timeout);
+ if (drive < 0 || drive > N_DRIVE) {
+ fd_timeout.expires = jiffies + 20*HZ;
+ drive=0;
+ } else
+ fd_timeout.expires = jiffies + UDP->timeout;
+ add_timer(&fd_timeout);
+ if (UDP->flags & FD_DEBUG){
+ DPRINT("reschedule timeout ");
+ printk(message, marg);
+ printk("\n");
+ }
+ timeout_message = message;
+}
+
+static int maximum(int a, int b)
+{
+ if(a > b)
+ return a;
+ else
+ return b;
+}
+#define INFBOUND(a,b) (a)=maximum((a),(b));
+
+static int minimum(int a, int b)
+{
+ if(a < b)
+ return a;
+ else
+ return b;
+}
+#define SUPBOUND(a,b) (a)=minimum((a),(b));
+
+
+/*
+ * Bottom half floppy driver.
+ * ==========================
+ *
+ * This part of the file contains the code talking directly to the hardware,
+ * and also the main service loop (seek-configure-spinup-command)
+ */
+
+/*
+ * disk change.
+ * This routine is responsible for maintaining the FD_DISK_CHANGE flag,
+ * and the last_checked date.
+ *
+ * last_checked is the date of the last check which showed 'no disk change'
+ * FD_DISK_CHANGE is set under two conditions:
+ * 1. The floppy has been changed after some i/o to that floppy already
+ * took place.
+ * 2. No floppy disk is in the drive. This is done in order to ensure that
+ * requests are quickly flushed in case there is no disk in the drive. It
+ * follows that FD_DISK_CHANGE can only be cleared if there is a disk in
+ * the drive.
+ *
+ * For 1., maxblock is observed. Maxblock is 0 if no i/o has taken place yet.
+ * For 2., FD_DISK_NEWCHANGE is watched. FD_DISK_NEWCHANGE is cleared on
+ * each seek. If a disk is present, the disk change line should also be
+ * cleared on each seek. Thus, if FD_DISK_NEWCHANGE is clear, but the disk
+ * change line is set, this means either that no disk is in the drive, or
+ * that it has been removed since the last seek.
+ *
+ * This means that we really have a third possibility too:
+ * The floppy has been changed after the last seek.
+ */
+
+static int disk_change(int drive)
+{
+ int fdc=FDC(drive);
+#ifdef FLOPPY_SANITY_CHECK
+ if (jiffies - UDRS->select_date < UDP->select_delay)
+ DPRINT("WARNING disk change called early\n");
+ if (!(FDCS->dor & (0x10 << UNIT(drive))) ||
+ (FDCS->dor & 3) != UNIT(drive) ||
+ fdc != FDC(drive)){
+ DPRINT("probing disk change on unselected drive\n");
+ DPRINT("drive=%d fdc=%d dor=%x\n",drive, FDC(drive),
+ FDCS->dor);
+ }
+#endif
+
+#ifdef DCL_DEBUG
+ if (UDP->flags & FD_DEBUG){
+ DPRINT("checking disk change line for drive %d\n",drive);
+ DPRINT("jiffies=%ld\n", jiffies);
+ DPRINT("disk change line=%x\n",fd_inb(FD_DIR)&0x80);
+ DPRINT("flags=%x\n",UDRS->flags);
+ }
+#endif
+ if (UDP->flags & FD_BROKEN_DCL)
+ return UTESTF(FD_DISK_CHANGED);
+ if ((fd_inb(FD_DIR) ^ UDP->flags) & 0x80){
+ USETF(FD_VERIFY); /* verify write protection */
+ if (UDRS->maxblock){
+ /* mark it changed */
+ USETF(FD_DISK_CHANGED);
+ }
+
+ /* invalidate its geometry */
+ if (UDRS->keep_data >= 0) {
+ if ((UDP->flags & FTD_MSG) &&
+ current_type[drive] != NULL)
+ DPRINT("Disk type is undefined after "
+ "disk change\n");
+ current_type[drive] = NULL;
+ floppy_sizes[TOMINOR(drive)] = MAX_DISK_SIZE;
+ }
+
+ /*USETF(FD_DISK_NEWCHANGE);*/
+ return 1;
+ } else {
+ UDRS->last_checked=jiffies;
+ UCLEARF(FD_DISK_NEWCHANGE);
+ }
+ return 0;
+}
+
+static inline int is_selected(int dor, int unit)
+{
+ return ((dor & (0x10 << unit)) && (dor &3) == unit);
+}
+
+static int set_dor(int fdc, char mask, char data)
+{
+ register unsigned char drive, unit, newdor,olddor;
+
+ if (FDCS->address == -1)
+ return -1;
+
+ olddor = FDCS->dor;
+ newdor = (olddor & mask) | data;
+ if (newdor != olddor){
+ unit = olddor & 0x3;
+ if (is_selected(olddor, unit) && !is_selected(newdor,unit)){
+ drive = REVDRIVE(fdc,unit);
+#ifdef DCL_DEBUG
+ if (UDP->flags & FD_DEBUG){
+ DPRINT("calling disk change from set_dor\n");
+ }
+#endif
+ disk_change(drive);
+ }
+ FDCS->dor = newdor;
+ fd_outb(newdor, FD_DOR);
+
+ unit = newdor & 0x3;
+ if (!is_selected(olddor, unit) && is_selected(newdor,unit)){
+ drive = REVDRIVE(fdc,unit);
+ UDRS->select_date = jiffies;
+ }
+ }
+
+ /* FIXME: we should be more graceful here */
+
+ if (newdor & FLOPPY_MOTOR_MASK)
+ floppy_grab_irq_and_dma();
+ if (olddor & FLOPPY_MOTOR_MASK)
+ floppy_release_irq_and_dma();
+ return olddor;
+}
+
+static void twaddle(void)
+{
+ if (DP->select_delay)
+ return;
+ fd_outb(FDCS->dor & ~(0x10<<UNIT(current_drive)),FD_DOR);
+ fd_outb(FDCS->dor, FD_DOR);
+ DRS->select_date = jiffies;
+}
+
+/* reset all driver information about the current fdc. This is needed after
+ * a reset, and after a raw command. */
+static void reset_fdc_info(int mode)
+{
+ int drive;
+
+ FDCS->spec1 = FDCS->spec2 = -1;
+ FDCS->need_configure = 1;
+ FDCS->perp_mode = 1;
+ FDCS->rawcmd = 0;
+ for (drive = 0; drive < N_DRIVE; drive++)
+ if (FDC(drive) == fdc &&
+ (mode || UDRS->track != NEED_1_RECAL))
+ UDRS->track = NEED_2_RECAL;
+}
+
+/* selects the fdc and drive, and enables the fdc's input/dma. */
+static void set_fdc(int drive)
+{
+ if (drive >= 0 && drive < N_DRIVE){
+ fdc = FDC(drive);
+ current_drive = drive;
+ }
+ if (fdc != 1 && fdc != 0) {
+ printk("bad fdc value\n");
+ return;
+ }
+ set_dor(fdc,~0,8);
+#if N_FDC > 1
+ set_dor(1-fdc, ~8, 0);
+#endif
+ if (FDCS->rawcmd == 2)
+ reset_fdc_info(1);
+ if (fd_inb(FD_STATUS) != STATUS_READY)
+ FDCS->reset = 1;
+}
+
+/* locks the driver */
+static int lock_fdc(int drive, int interruptible)
+{
+ unsigned long flags;
+
+ if (!usage_count){
+ printk(KERN_ERR "trying to lock fdc while usage count=0\n");
+ return -1;
+ }
+ if(floppy_grab_irq_and_dma()==-1)
+ return -EBUSY;
+ INT_OFF;
+ while (fdc_busy && NO_SIGNAL)
+ interruptible_sleep_on(&fdc_wait);
+ if (fdc_busy){
+ INT_ON;
+ return -EINTR;
+ }
+ fdc_busy = 1;
+ INT_ON;
+ command_status = FD_COMMAND_NONE;
+ reschedule_timeout(drive, "lock fdc", 0);
+ set_fdc(drive);
+ return 0;
+}
+
+#define LOCK_FDC(drive,interruptible) \
+if (lock_fdc(drive,interruptible)) return -EINTR;
+
+
+/* unlocks the driver */
+static inline void unlock_fdc(void)
+{
+ raw_cmd = 0;
+ if (!fdc_busy)
+ DPRINT("FDC access conflict!\n");
+
+ if (DEVICE_INTR)
+ DPRINT("device interrupt still active at FDC release: %p!\n",
+ DEVICE_INTR);
+ command_status = FD_COMMAND_NONE;
+ del_timer(&fd_timeout);
+ cont = NULL;
+ fdc_busy = 0;
+ floppy_release_irq_and_dma();
+ wake_up(&fdc_wait);
+}
+
+/* switches the motor off after a given timeout */
+static void motor_off_callback(unsigned long nr)
+{
+ unsigned char mask = ~(0x10 << UNIT(nr));
+
+ set_dor(FDC(nr), mask, 0);
+}
+
+static struct timer_list motor_off_timer[N_DRIVE] = {
+ { NULL, NULL, 0, 0, motor_off_callback },
+ { NULL, NULL, 0, 1, motor_off_callback },
+ { NULL, NULL, 0, 2, motor_off_callback },
+ { NULL, NULL, 0, 3, motor_off_callback },
+ { NULL, NULL, 0, 4, motor_off_callback },
+ { NULL, NULL, 0, 5, motor_off_callback },
+ { NULL, NULL, 0, 6, motor_off_callback },
+ { NULL, NULL, 0, 7, motor_off_callback }
+};
+
+/* schedules motor off */
+static void floppy_off(unsigned int drive)
+{
+ unsigned long volatile delta;
+ register int fdc=FDC(drive);
+
+ if (!(FDCS->dor & (0x10 << UNIT(drive))))
+ return;
+
+ del_timer(motor_off_timer+drive);
+
+ /* make spindle stop in a position which minimizes spinup time
+ * next time */
+ if (UDP->rps){
+ delta = jiffies - UDRS->first_read_date + HZ -
+ UDP->spindown_offset;
+ delta = ((delta * UDP->rps) % HZ) / UDP->rps;
+ motor_off_timer[drive].expires = jiffies + UDP->spindown - delta;
+ }
+ add_timer(motor_off_timer+drive);
+}
+
+/*
+ * cycle through all N_DRIVE floppy drives, for disk change testing.
+ * stopping at current drive. This is done before any long operation, to
+ * be sure to have up to date disk change information.
+ */
+static void scandrives(void)
+{
+ int i, drive, saved_drive;
+
+ if (DP->select_delay)
+ return;
+
+ saved_drive = current_drive;
+ for (i=0; i < N_DRIVE; i++){
+ drive = (saved_drive + i + 1) % N_DRIVE;
+ if (UDRS->fd_ref == 0 || UDP->select_delay != 0)
+ continue; /* skip closed drives */
+ set_fdc(drive);
+ if (!(set_dor(fdc, ~3, UNIT(drive) | (0x10 << UNIT(drive))) &
+ (0x10 << UNIT(drive))))
+ /* switch the motor off again, if it was off to
+ * begin with */
+ set_dor(fdc, ~(0x10 << UNIT(drive)), 0);
+ }
+ set_fdc(saved_drive);
+}
+
+static void empty(void)
+{
+}
+
+static struct tq_struct floppy_tq =
+{ 0, 0, 0, 0 };
+
+static struct timer_list fd_timer ={ NULL, NULL, 0, 0, 0 };
+
+static void cancel_activity(void)
+{
+ CLEAR_INTR;
+ floppy_tq.routine = (void *)(void *) empty;
+ del_timer(&fd_timer);
+}
+
+/* this function makes sure that the disk stays in the drive during the
+ * transfer */
+static void fd_watchdog(void)
+{
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("calling disk change from watchdog\n");
+ }
+#endif
+
+ if (disk_change(current_drive)){
+ DPRINT("disk removed during i/o\n");
+ cancel_activity();
+ cont->done(0);
+ reset_fdc();
+ } else {
+ del_timer(&fd_timer);
+ fd_timer.function = (timeout_fn) fd_watchdog;
+ fd_timer.expires = jiffies + HZ / 10;
+ add_timer(&fd_timer);
+ }
+}
+
+static void main_command_interrupt(void)
+{
+ del_timer(&fd_timer);
+ cont->interrupt();
+}
+
+/* waits for a delay (spinup or select) to pass */
+static int wait_for_completion(int delay, timeout_fn function)
+{
+ if (FDCS->reset){
+ reset_fdc(); /* do the reset during sleep to win time
+ * if we don't need to sleep, it's a good
+ * occasion anyways */
+ return 1;
+ }
+
+ if ((signed) (jiffies - delay) < 0){
+ del_timer(&fd_timer);
+ fd_timer.function = function;
+ fd_timer.expires = delay;
+ add_timer(&fd_timer);
+ return 1;
+ }
+ return 0;
+}
+
+static int hlt_disabled=0;
+static void floppy_disable_hlt(void)
+{
+ unsigned long flags;
+
+ INT_OFF;
+ if (!hlt_disabled){
+ hlt_disabled=1;
+#ifdef HAVE_DISABLE_HLT
+ disable_hlt();
+#endif
+ }
+ INT_ON;
+}
+
+static void floppy_enable_hlt(void)
+{
+ unsigned long flags;
+
+ INT_OFF;
+ if (hlt_disabled){
+ hlt_disabled=0;
+#ifdef HAVE_DISABLE_HLT
+ enable_hlt();
+#endif
+ }
+ INT_ON;
+}
+
+
+static void setup_DMA(void)
+{
+ unsigned long flags;
+
+#ifdef FLOPPY_SANITY_CHECK
+ if (raw_cmd->length == 0){
+ int i;
+
+ printk("zero dma transfer size:");
+ for (i=0; i < raw_cmd->cmd_count; i++)
+ printk("%x,", raw_cmd->cmd[i]);
+ printk("\n");
+ cont->done(0);
+ FDCS->reset = 1;
+ return;
+ }
+ if ((long) raw_cmd->kernel_data % 512){
+ printk("non aligned address: %p\n", raw_cmd->kernel_data);
+ cont->done(0);
+ FDCS->reset=1;
+ return;
+ }
+ if (CROSS_64KB(raw_cmd->kernel_data, raw_cmd->length)) {
+ printk("DMA crossing 64-K boundary %p-%p\n",
+ raw_cmd->kernel_data,
+ raw_cmd->kernel_data + raw_cmd->length);
+ cont->done(0);
+ FDCS->reset=1;
+ return;
+ }
+#endif
+ INT_OFF;
+ fd_disable_dma();
+ fd_clear_dma_ff();
+ fd_cacheflush(raw_cmd->kernel_data, raw_cmd->length);
+ fd_set_dma_mode((raw_cmd->flags & FD_RAW_READ)?
+ DMA_MODE_READ : DMA_MODE_WRITE);
+ fd_set_dma_addr(virt_to_bus(raw_cmd->kernel_data));
+ fd_set_dma_count(raw_cmd->length);
+ virtual_dma_port = FDCS->address;
+ fd_enable_dma();
+ INT_ON;
+ floppy_disable_hlt();
+}
+
+void show_floppy(void);
+
+/* waits until the fdc becomes ready */
+static int wait_til_ready(void)
+{
+ int counter, status;
+ if(FDCS->reset)
+ return -1;
+ for (counter = 0; counter < 10000; counter++) {
+ status = fd_inb(FD_STATUS);
+ if (status & STATUS_READY)
+ return status;
+ }
+ if (!initialising) {
+ DPRINT("Getstatus times out (%x) on fdc %d\n",
+ status, fdc);
+ show_floppy();
+ }
+ FDCS->reset = 1;
+ return -1;
+}
+
+/* sends a command byte to the fdc */
+static int output_byte(char byte)
+{
+ int status;
+
+ if ((status = wait_til_ready()) < 0)
+ return -1;
+ if ((status & (STATUS_READY|STATUS_DIR|STATUS_DMA)) == STATUS_READY){
+ fd_outb(byte,FD_DATA);
+#ifdef FLOPPY_SANITY_CHECK
+ output_log[output_log_pos].data = byte;
+ output_log[output_log_pos].status = status;
+ output_log[output_log_pos].jiffies = jiffies;
+ output_log_pos = (output_log_pos + 1) % OLOGSIZE;
+#endif
+ return 0;
+ }
+ FDCS->reset = 1;
+ if (!initialising) {
+ DPRINT("Unable to send byte %x to FDC. Fdc=%x Status=%x\n",
+ byte, fdc, status);
+ show_floppy();
+ }
+ return -1;
+}
+#define LAST_OUT(x) if (output_byte(x)<0){ reset_fdc();return;}
+
+/* gets the response from the fdc */
+static int result(void)
+{
+ int i, status;
+
+ for(i=0; i < MAX_REPLIES; i++) {
+ if ((status = wait_til_ready()) < 0)
+ break;
+ status &= STATUS_DIR|STATUS_READY|STATUS_BUSY|STATUS_DMA;
+ if ((status & ~STATUS_BUSY) == STATUS_READY){
+#ifdef FLOPPY_SANITY_CHECK
+ resultjiffies = jiffies;
+ resultsize = i;
+#endif
+ return i;
+ }
+ if (status == (STATUS_DIR|STATUS_READY|STATUS_BUSY))
+ reply_buffer[i] = fd_inb(FD_DATA);
+ else
+ break;
+ }
+ if(!initialising) {
+ DPRINT("get result error. Fdc=%d Last status=%x Read bytes=%d\n",
+ fdc, status, i);
+ show_floppy();
+ }
+ FDCS->reset = 1;
+ return -1;
+}
+
+#define MORE_OUTPUT -2
+/* does the fdc need more output? */
+static int need_more_output(void)
+{
+ int status;
+ if( (status = wait_til_ready()) < 0)
+ return -1;
+ if ((status & (STATUS_READY|STATUS_DIR|STATUS_DMA)) == STATUS_READY)
+ return MORE_OUTPUT;
+ return result();
+}
+
+/* Set perpendicular mode as required, based on data rate, if supported.
+ * 82077 Now tested. 1Mbps data rate only possible with 82077-1.
+ */
+static inline void perpendicular_mode(void)
+{
+ unsigned char perp_mode;
+
+ if (raw_cmd->rate & 0x40){
+ switch(raw_cmd->rate & 3){
+ case 0:
+ perp_mode=2;
+ break;
+ case 3:
+ perp_mode=3;
+ break;
+ default:
+ DPRINT("Invalid data rate for perpendicular mode!\n");
+ cont->done(0);
+ FDCS->reset = 1; /* convenient way to return to
+ * redo without to much hassle (deep
+ * stack et al. */
+ return;
+ }
+ } else
+ perp_mode = 0;
+
+ if (FDCS->perp_mode == perp_mode)
+ return;
+ if (FDCS->version >= FDC_82077_ORIG) {
+ output_byte(FD_PERPENDICULAR);
+ output_byte(perp_mode);
+ FDCS->perp_mode = perp_mode;
+ } else if (perp_mode) {
+ DPRINT("perpendicular mode not supported by this FDC.\n");
+ }
+} /* perpendicular_mode */
+
+static int fifo_depth = 0xa;
+static int no_fifo = 0;
+
+static int fdc_configure(void)
+{
+ /* Turn on FIFO */
+ output_byte(FD_CONFIGURE);
+ if(need_more_output() != MORE_OUTPUT)
+ return 0;
+ output_byte(0);
+ output_byte(0x10 | (no_fifo & 0x20) | (fifo_depth & 0xf));
+ output_byte(0); /* pre-compensation from track
+ 0 upwards */
+ return 1;
+}
+
+#define NOMINAL_DTR 500
+
+/* Issue a "SPECIFY" command to set the step rate time, head unload time,
+ * head load time, and DMA disable flag to values needed by floppy.
+ *
+ * The value "dtr" is the data transfer rate in Kbps. It is needed
+ * to account for the data rate-based scaling done by the 82072 and 82077
+ * FDC types. This parameter is ignored for other types of FDCs (i.e.
+ * 8272a).
+ *
+ * Note that changing the data transfer rate has a (probably deleterious)
+ * effect on the parameters subject to scaling for 82072/82077 FDCs, so
+ * fdc_specify is called again after each data transfer rate
+ * change.
+ *
+ * srt: 1000 to 16000 in microseconds
+ * hut: 16 to 240 milliseconds
+ * hlt: 2 to 254 milliseconds
+ *
+ * These values are rounded up to the next highest available delay time.
+ */
+static void fdc_specify(void)
+{
+ unsigned char spec1, spec2;
+ int srt, hlt, hut;
+ unsigned long dtr = NOMINAL_DTR;
+ unsigned long scale_dtr = NOMINAL_DTR;
+ int hlt_max_code = 0x7f;
+ int hut_max_code = 0xf;
+
+ if (FDCS->need_configure && FDCS->version >= FDC_82072A) {
+ fdc_configure();
+ FDCS->need_configure = 0;
+ /*DPRINT("FIFO enabled\n");*/
+ }
+
+ switch (raw_cmd->rate & 0x03) {
+ case 3:
+ dtr = 1000;
+ break;
+ case 1:
+ dtr = 300;
+ if (FDCS->version >= FDC_82078) {
+ /* chose the default rate table, not the one
+ * where 1 = 2 Mbps */
+ output_byte(FD_DRIVESPEC);
+ if(need_more_output() == MORE_OUTPUT) {
+ output_byte(UNIT(current_drive));
+ output_byte(0xc0);
+ }
+ }
+ break;
+ case 2:
+ dtr = 250;
+ break;
+ }
+
+ if (FDCS->version >= FDC_82072) {
+ scale_dtr = dtr;
+ hlt_max_code = 0x00; /* 0==256msec*dtr0/dtr (not linear!) */
+ hut_max_code = 0x0; /* 0==256msec*dtr0/dtr (not linear!) */
+ }
+
+ /* Convert step rate from microseconds to milliseconds and 4 bits */
+ srt = 16 - (DP->srt*scale_dtr/1000 + NOMINAL_DTR - 1)/NOMINAL_DTR;
+ SUPBOUND(srt, 0xf);
+ INFBOUND(srt, 0);
+
+ hlt = (DP->hlt*scale_dtr/2 + NOMINAL_DTR - 1)/NOMINAL_DTR;
+ if (hlt < 0x01)
+ hlt = 0x01;
+ else if (hlt > 0x7f)
+ hlt = hlt_max_code;
+
+ hut = (DP->hut*scale_dtr/16 + NOMINAL_DTR - 1)/NOMINAL_DTR;
+ if (hut < 0x1)
+ hut = 0x1;
+ else if (hut > 0xf)
+ hut = hut_max_code;
+
+ spec1 = (srt << 4) | hut;
+ spec2 = (hlt << 1) | (use_virtual_dma & 1);
+
+ /* If these parameters did not change, just return with success */
+ if (FDCS->spec1 != spec1 || FDCS->spec2 != spec2) {
+ /* Go ahead and set spec1 and spec2 */
+ output_byte(FD_SPECIFY);
+ output_byte(FDCS->spec1 = spec1);
+ output_byte(FDCS->spec2 = spec2);
+ }
+} /* fdc_specify */
+
+/* Set the FDC's data transfer rate on behalf of the specified drive.
+ * NOTE: with 82072/82077 FDCs, changing the data rate requires a reissue
+ * of the specify command (i.e. using the fdc_specify function).
+ */
+static int fdc_dtr(void)
+{
+ /* If data rate not already set to desired value, set it. */
+ if ((raw_cmd->rate & 3) == FDCS->dtr)
+ return 0;
+
+ /* Set dtr */
+ fd_outb(raw_cmd->rate & 3, FD_DCR);
+
+ /* TODO: some FDC/drive combinations (C&T 82C711 with TEAC 1.2MB)
+ * need a stabilization period of several milliseconds to be
+ * enforced after data rate changes before R/W operations.
+ * Pause 5 msec to avoid trouble. (Needs to be 2 jiffies)
+ */
+ FDCS->dtr = raw_cmd->rate & 3;
+ return(wait_for_completion(jiffies+2*HZ/100,
+ (timeout_fn) floppy_ready));
+} /* fdc_dtr */
+
+static void tell_sector(void)
+{
+ printk(": track %d, head %d, sector %d, size %d",
+ R_TRACK, R_HEAD, R_SECTOR, R_SIZECODE);
+} /* tell_sector */
+
+
+/*
+ * OK, this error interpreting routine is called after a
+ * DMA read/write has succeeded
+ * or failed, so we check the results, and copy any buffers.
+ * hhb: Added better error reporting.
+ * ak: Made this into a separate routine.
+ */
+static int interpret_errors(void)
+{
+ char bad;
+
+ if (inr!=7) {
+ DPRINT("-- FDC reply error");
+ FDCS->reset = 1;
+ return 1;
+ }
+
+ /* check IC to find cause of interrupt */
+ switch (ST0 & ST0_INTR) {
+ case 0x40: /* error occurred during command execution */
+ if (ST1 & ST1_EOC)
+ return 0; /* occurs with pseudo-DMA */
+ bad = 1;
+ if (ST1 & ST1_WP) {
+ DPRINT("Drive is write protected\n");
+ CLEARF(FD_DISK_WRITABLE);
+ cont->done(0);
+ bad = 2;
+ } else if (ST1 & ST1_ND) {
+ SETF(FD_NEED_TWADDLE);
+ } else if (ST1 & ST1_OR) {
+ if (DP->flags & FTD_MSG)
+ DPRINT("Over/Underrun - retrying\n");
+ bad = 0;
+ }else if (*errors >= DP->max_errors.reporting){
+ DPRINT("");
+ if (ST0 & ST0_ECE) {
+ printk("Recalibrate failed!");
+ } else if (ST2 & ST2_CRC) {
+ printk("data CRC error");
+ tell_sector();
+ } else if (ST1 & ST1_CRC) {
+ printk("CRC error");
+ tell_sector();
+ } else if ((ST1 & (ST1_MAM|ST1_ND)) || (ST2 & ST2_MAM)) {
+ if (!probing) {
+ printk("sector not found");
+ tell_sector();
+ } else
+ printk("probe failed...");
+ } else if (ST2 & ST2_WC) { /* seek error */
+ printk("wrong cylinder");
+ } else if (ST2 & ST2_BC) { /* cylinder marked as bad */
+ printk("bad cylinder");
+ } else {
+ printk("unknown error. ST[0..2] are: 0x%x 0x%x 0x%x", ST0, ST1, ST2);
+ tell_sector();
+ }
+ printk("\n");
+
+ }
+ if (ST2 & ST2_WC || ST2 & ST2_BC)
+ /* wrong cylinder => recal */
+ DRS->track = NEED_2_RECAL;
+ return bad;
+ case 0x80: /* invalid command given */
+ DPRINT("Invalid FDC command given!\n");
+ cont->done(0);
+ return 2;
+ case 0xc0:
+ DPRINT("Abnormal termination caused by polling\n");
+ cont->error();
+ return 2;
+ default: /* (0) Normal command termination */
+ return 0;
+ }
+}
+
+/*
+ * This routine is called when everything should be correctly set up
+ * for the transfer (i.e. floppy motor is on, the correct floppy is
+ * selected, and the head is sitting on the right track).
+ */
+static void setup_rw_floppy(void)
+{
+ int i, ready_date, r, flags;
+ timeout_fn function;
+
+ flags = raw_cmd->flags;
+ if (flags & (FD_RAW_READ | FD_RAW_WRITE))
+ flags |= FD_RAW_INTR;
+
+ if ((flags & FD_RAW_SPIN) && !(flags & FD_RAW_NO_MOTOR)){
+ ready_date = DRS->spinup_date + DP->spinup;
+ /* If spinup will take a long time, rerun scandrives
+ * again just before spinup completion. Beware that
+ * after scandrives, we must again wait for selection.
+ */
+ if ((signed) (ready_date - jiffies) > DP->select_delay){
+ ready_date -= DP->select_delay;
+ function = (timeout_fn) floppy_start;
+ } else
+ function = (timeout_fn) setup_rw_floppy;
+
+ /* wait until the floppy is spinning fast enough */
+ if (wait_for_completion(ready_date,function))
+ return;
+ }
+
+ if ((flags & FD_RAW_READ) || (flags & FD_RAW_WRITE))
+ setup_DMA();
+
+ if (flags & FD_RAW_INTR)
+ SET_INTR(main_command_interrupt);
+
+ r=0;
+ for (i=0; i< raw_cmd->cmd_count; i++)
+ r|=output_byte(raw_cmd->cmd[i]);
+
+#ifdef DEBUGT
+ debugt("rw_command: ");
+#endif
+ if (r){
+ cont->error();
+ reset_fdc();
+ return;
+ }
+
+ if (!(flags & FD_RAW_INTR)){
+ inr = result();
+ cont->interrupt();
+ } else if (flags & FD_RAW_NEED_DISK)
+ fd_watchdog();
+}
+
+static int blind_seek;
+
+/*
+ * This is the routine called after every seek (or recalibrate) interrupt
+ * from the floppy controller.
+ */
+static void seek_interrupt(void)
+{
+#ifdef DEBUGT
+ debugt("seek interrupt:");
+#endif
+ if (inr != 2 || (ST0 & 0xF8) != 0x20) {
+ DPRINT("seek failed\n");
+ DRS->track = NEED_2_RECAL;
+ cont->error();
+ cont->redo();
+ return;
+ }
+ if (DRS->track >= 0 && DRS->track != ST1 && !blind_seek){
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("clearing NEWCHANGE flag because of effective seek\n");
+ DPRINT("jiffies=%ld\n", jiffies);
+ }
+#endif
+ CLEARF(FD_DISK_NEWCHANGE); /* effective seek */
+ DRS->select_date = jiffies;
+ }
+ DRS->track = ST1;
+ floppy_ready();
+}
+
+static void check_wp(void)
+{
+ if (TESTF(FD_VERIFY)) {
+ /* check write protection */
+ output_byte(FD_GETSTATUS);
+ output_byte(UNIT(current_drive));
+ if (result() != 1){
+ FDCS->reset = 1;
+ return;
+ }
+ CLEARF(FD_VERIFY);
+ CLEARF(FD_NEED_TWADDLE);
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("checking whether disk is write protected\n");
+ DPRINT("wp=%x\n",ST3 & 0x40);
+ }
+#endif
+ if (!(ST3 & 0x40))
+ SETF(FD_DISK_WRITABLE);
+ else
+ CLEARF(FD_DISK_WRITABLE);
+ }
+}
+
+static void seek_floppy(void)
+{
+ int track;
+
+ blind_seek=0;
+
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("calling disk change from seek\n");
+ }
+#endif
+
+ if (!TESTF(FD_DISK_NEWCHANGE) &&
+ disk_change(current_drive) &&
+ (raw_cmd->flags & FD_RAW_NEED_DISK)){
+ /* the media changed flag should be cleared after the seek.
+ * If it isn't, this means that there is really no disk in
+ * the drive.
+ */
+ SETF(FD_DISK_CHANGED);
+ cont->done(0);
+ cont->redo();
+ return;
+ }
+ if (DRS->track <= NEED_1_RECAL){
+ recalibrate_floppy();
+ return;
+ } else if (TESTF(FD_DISK_NEWCHANGE) &&
+ (raw_cmd->flags & FD_RAW_NEED_DISK) &&
+ (DRS->track <= NO_TRACK || DRS->track == raw_cmd->track)) {
+ /* we seek to clear the media-changed condition. Does anybody
+ * know a more elegant way, which works on all drives? */
+ if (raw_cmd->track)
+ track = raw_cmd->track - 1;
+ else {
+ if (DP->flags & FD_SILENT_DCL_CLEAR){
+ set_dor(fdc, ~(0x10 << UNIT(current_drive)), 0);
+ blind_seek = 1;
+ raw_cmd->flags |= FD_RAW_NEED_SEEK;
+ }
+ track = 1;
+ }
+ } else {
+ check_wp();
+ if (raw_cmd->track != DRS->track &&
+ (raw_cmd->flags & FD_RAW_NEED_SEEK))
+ track = raw_cmd->track;
+ else {
+ setup_rw_floppy();
+ return;
+ }
+ }
+
+ SET_INTR(seek_interrupt);
+ output_byte(FD_SEEK);
+ output_byte(UNIT(current_drive));
+ LAST_OUT(track);
+#ifdef DEBUGT
+ debugt("seek command:");
+#endif
+}
+
+static void recal_interrupt(void)
+{
+#ifdef DEBUGT
+ debugt("recal interrupt:");
+#endif
+ if (inr !=2)
+ FDCS->reset = 1;
+ else if (ST0 & ST0_ECE) {
+ switch(DRS->track){
+ case NEED_1_RECAL:
+#ifdef DEBUGT
+ debugt("recal interrupt need 1 recal:");
+#endif
+ /* after a second recalibrate, we still haven't
+ * reached track 0. Probably no drive. Raise an
+ * error, as failing immediately might upset
+ * computers possessed by the Devil :-) */
+ cont->error();
+ cont->redo();
+ return;
+ case NEED_2_RECAL:
+#ifdef DEBUGT
+ debugt("recal interrupt need 2 recal:");
+#endif
+ /* If we already did a recalibrate,
+ * and we are not at track 0, this
+ * means we have moved. (The only way
+ * not to move at recalibration is to
+ * be already at track 0.) Clear the
+ * new change flag */
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("clearing NEWCHANGE flag because of second recalibrate\n");
+ }
+#endif
+
+ CLEARF(FD_DISK_NEWCHANGE);
+ DRS->select_date = jiffies;
+ /* fall through */
+ default:
+#ifdef DEBUGT
+ debugt("recal interrupt default:");
+#endif
+ /* Recalibrate moves the head by at
+ * most 80 steps. If after one
+ * recalibrate we don't have reached
+ * track 0, this might mean that we
+ * started beyond track 80. Try
+ * again. */
+ DRS->track = NEED_1_RECAL;
+ break;
+ }
+ } else
+ DRS->track = ST1;
+ floppy_ready();
+}
+
+static void print_result(char *message, int inr)
+{
+ int i;
+
+ DPRINT("%s ", message);
+ if (inr >= 0)
+ for (i=0; i<inr; i++)
+ printk("repl[%d]=%x ", i, reply_buffer[i]);
+ printk("\n");
+}
+
+/* interrupt handler */
+void floppy_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ void (*handler)(void) = DEVICE_INTR;
+ int do_print;
+
+ lasthandler = handler;
+ interruptjiffies = jiffies;
+
+ fd_disable_dma();
+ floppy_enable_hlt();
+ CLEAR_INTR;
+ if (fdc >= N_FDC || FDCS->address == -1){
+ /* we don't even know which FDC is the culprit */
+ printk("DOR0=%x\n", fdc_state[0].dor);
+ printk("floppy interrupt on bizarre fdc %d\n",fdc);
+ printk("handler=%p\n", handler);
+ is_alive("bizarre fdc");
+ return;
+ }
+
+ FDCS->reset = 0;
+ /* We have to clear the reset flag here, because apparently on boxes
+ * with level triggered interrupts (PS/2, Sparc, ...), it is needed to
+ * emit SENSEI's to clear the interrupt line. And FDCS->reset blocks the
+ * emission of the SENSEI's.
+ * It is OK to emit floppy commands because we are in an interrupt
+ * handler here, and thus we have to fear no interference of other
+ * activity.
+ */
+
+ do_print = !handler && print_unex && !initialising;
+
+ inr = result();
+ if(do_print)
+ print_result("unexpected interrupt", inr);
+ if (inr == 0){
+ int max_sensei = 4;
+ do {
+ output_byte(FD_SENSEI);
+ inr = result();
+ if(do_print)
+ print_result("sensei", inr);
+ max_sensei--;
+ } while ((ST0 & 0x83) != UNIT(current_drive) && inr == 2 && max_sensei);
+ }
+ if (handler) {
+ if(intr_count >= 2) {
+ /* expected interrupt */
+ floppy_tq.routine = (void *)(void *) handler;
+ queue_task_irq(&floppy_tq, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+ } else
+ handler();
+ } else
+ FDCS->reset = 1;
+ is_alive("normal interrupt end");
+}
+
+static void recalibrate_floppy(void)
+{
+#ifdef DEBUGT
+ debugt("recalibrate floppy:");
+#endif
+ SET_INTR(recal_interrupt);
+ output_byte(FD_RECALIBRATE);
+ LAST_OUT(UNIT(current_drive));
+}
+
+/*
+ * Must do 4 FD_SENSEIs after reset because of ``drive polling''.
+ */
+static void reset_interrupt(void)
+{
+#ifdef DEBUGT
+ debugt("reset interrupt:");
+#endif
+ result(); /* get the status ready for set_fdc */
+ if (FDCS->reset) {
+ printk("reset set in interrupt, calling %p\n", cont->error);
+ cont->error(); /* a reset just after a reset. BAD! */
+ }
+ cont->redo();
+}
+
+/*
+ * reset is done by pulling bit 2 of DOR low for a while (old FDCs),
+ * or by setting the self clearing bit 7 of STATUS (newer FDCs)
+ */
+static void reset_fdc(void)
+{
+ SET_INTR(reset_interrupt);
+ FDCS->reset = 0;
+ reset_fdc_info(0);
+
+ /* Pseudo-DMA may intercept 'reset finished' interrupt. */
+ /* Irrelevant for systems with true DMA (i386). */
+ fd_disable_dma();
+
+ if (FDCS->version >= FDC_82072A)
+ fd_outb(0x80 | (FDCS->dtr &3), FD_STATUS);
+ else {
+ fd_outb(FDCS->dor & ~0x04, FD_DOR);
+ udelay(FD_RESET_DELAY);
+ fd_outb(FDCS->dor, FD_DOR);
+ }
+}
+
+void show_floppy(void)
+{
+ int i;
+
+ printk("\n");
+ printk("floppy driver state\n");
+ printk("-------------------\n");
+ printk("now=%ld last interrupt=%d last called handler=%p\n",
+ jiffies, interruptjiffies, lasthandler);
+
+
+#ifdef FLOPPY_SANITY_CHECK
+ printk("timeout_message=%s\n", timeout_message);
+ printk("last output bytes:\n");
+ for (i=0; i < OLOGSIZE; i++)
+ printk("%2x %2x %ld\n",
+ output_log[(i+output_log_pos) % OLOGSIZE].data,
+ output_log[(i+output_log_pos) % OLOGSIZE].status,
+ output_log[(i+output_log_pos) % OLOGSIZE].jiffies);
+ printk("last result at %d\n", resultjiffies);
+ printk("last redo_fd_request at %d\n", lastredo);
+ for (i=0; i<resultsize; i++){
+ printk("%2x ", reply_buffer[i]);
+ }
+ printk("\n");
+#endif
+
+ printk("status=%x\n", fd_inb(FD_STATUS));
+ printk("fdc_busy=%d\n", fdc_busy);
+ if (DEVICE_INTR)
+ printk("DEVICE_INTR=%p\n", DEVICE_INTR);
+ if (floppy_tq.sync)
+ printk("floppy_tq.routine=%p\n", floppy_tq.routine);
+ if (fd_timer.prev)
+ printk("fd_timer.function=%p\n", fd_timer.function);
+ if (fd_timeout.prev){
+ printk("timer_table=%p\n",fd_timeout.function);
+ printk("expires=%ld\n",fd_timeout.expires-jiffies);
+ printk("now=%ld\n",jiffies);
+ }
+ printk("cont=%p\n", cont);
+ printk("CURRENT=%p\n", CURRENT);
+ printk("command_status=%d\n", command_status);
+ printk("\n");
+}
+
+static void floppy_shutdown(void)
+{
+ if (!initialising)
+ show_floppy();
+ cancel_activity();
+ sti();
+
+ floppy_enable_hlt();
+ fd_disable_dma();
+ /* avoid dma going to a random drive after shutdown */
+
+ if (!initialising)
+ DPRINT("floppy timeout called\n");
+ FDCS->reset = 1;
+ if (cont){
+ cont->done(0);
+ cont->redo(); /* this will recall reset when needed */
+ } else {
+ printk("no cont in shutdown!\n");
+ process_fd_request();
+ }
+ is_alive("floppy shutdown");
+}
+/*typedef void (*timeout_fn)(unsigned long);*/
+
+/* start motor, check media-changed condition and write protection */
+static int start_motor(void (*function)(void) )
+{
+ int mask, data;
+
+ mask = 0xfc;
+ data = UNIT(current_drive);
+ if (!(raw_cmd->flags & FD_RAW_NO_MOTOR)){
+ if (!(FDCS->dor & (0x10 << UNIT(current_drive)))){
+ set_debugt();
+ /* no read since this drive is running */
+ DRS->first_read_date = 0;
+ /* note motor start time if motor is not yet running */
+ DRS->spinup_date = jiffies;
+ data |= (0x10 << UNIT(current_drive));
+ }
+ } else
+ if (FDCS->dor & (0x10 << UNIT(current_drive)))
+ mask &= ~(0x10 << UNIT(current_drive));
+
+ /* starts motor and selects floppy */
+ del_timer(motor_off_timer + current_drive);
+ set_dor(fdc, mask, data);
+
+ /* wait_for_completion also schedules reset if needed. */
+ return(wait_for_completion(DRS->select_date+DP->select_delay,
+ (timeout_fn) function));
+}
+
+static void floppy_ready(void)
+{
+ CHECK_RESET;
+ if (start_motor(floppy_ready)) return;
+ if (fdc_dtr()) return;
+
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("calling disk change from floppy_ready\n");
+ }
+#endif
+
+ if (!(raw_cmd->flags & FD_RAW_NO_MOTOR) &&
+ disk_change(current_drive) &&
+ !DP->select_delay)
+ twaddle(); /* this clears the dcl on certain drive/controller
+ * combinations */
+
+ if (raw_cmd->flags & (FD_RAW_NEED_SEEK | FD_RAW_NEED_DISK)){
+ perpendicular_mode();
+ fdc_specify(); /* must be done here because of hut, hlt ... */
+ seek_floppy();
+ } else
+ setup_rw_floppy();
+}
+
+static void floppy_start(void)
+{
+ reschedule_timeout(CURRENTD, "floppy start", 0);
+
+ scandrives();
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("setting NEWCHANGE in floppy_start\n");
+ }
+#endif
+ SETF(FD_DISK_NEWCHANGE);
+ floppy_ready();
+}
+
+/*
+ * ========================================================================
+ * here ends the bottom half. Exported routines are:
+ * floppy_start, floppy_off, floppy_ready, lock_fdc, unlock_fdc, set_fdc,
+ * start_motor, reset_fdc, reset_fdc_info, interpret_errors.
+ * Initialization also uses output_byte, result, set_dor, floppy_interrupt
+ * and set_dor.
+ * ========================================================================
+ */
+/*
+ * General purpose continuations.
+ * ==============================
+ */
+
+static void do_wakeup(void)
+{
+ reschedule_timeout(MAXTIMEOUT, "do wakeup", 0);
+ cont = 0;
+ command_status += 2;
+ wake_up(&command_done);
+}
+
+static struct cont_t wakeup_cont={
+ empty,
+ do_wakeup,
+ empty,
+ (done_f)empty
+};
+
+
+static struct cont_t intr_cont={
+ empty,
+ process_fd_request,
+ empty,
+ (done_f) empty
+};
+
+static int wait_til_done(void (*handler)(void), int interruptible)
+{
+ int ret;
+ unsigned long flags;
+
+ floppy_tq.routine = (void *)(void *) handler;
+ queue_task(&floppy_tq, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+ INT_OFF;
+ while(command_status < 2 && NO_SIGNAL){
+ is_alive("wait_til_done");
+ if (interruptible)
+ interruptible_sleep_on(&command_done);
+ else
+ sleep_on(&command_done);
+ }
+ if (command_status < 2){
+ cancel_activity();
+ cont = &intr_cont;
+ reset_fdc();
+ INT_ON;
+ return -EINTR;
+ }
+ INT_ON;
+
+ if (FDCS->reset)
+ command_status = FD_COMMAND_ERROR;
+ if (command_status == FD_COMMAND_OKAY)
+ ret=0;
+ else
+ ret=-EIO;
+ command_status = FD_COMMAND_NONE;
+ return ret;
+}
+
+static void generic_done(int result)
+{
+ command_status = result;
+ cont = &wakeup_cont;
+}
+
+static void generic_success(void)
+{
+ cont->done(1);
+}
+
+static void generic_failure(void)
+{
+ cont->done(0);
+}
+
+static void success_and_wakeup(void)
+{
+ generic_success();
+ cont->redo();
+}
+
+
+/*
+ * formatting and rw support.
+ * ==========================
+ */
+
+static int next_valid_format(void)
+{
+ int probed_format;
+
+ probed_format = DRS->probed_format;
+ while(1){
+ if (probed_format >= 8 ||
+ !DP->autodetect[probed_format]){
+ DRS->probed_format = 0;
+ return 1;
+ }
+ if (floppy_type[DP->autodetect[probed_format]].sect){
+ DRS->probed_format = probed_format;
+ return 0;
+ }
+ probed_format++;
+ }
+}
+
+static void bad_flp_intr(void)
+{
+ if (probing){
+ DRS->probed_format++;
+ if (!next_valid_format())
+ return;
+ }
+ (*errors)++;
+ INFBOUND(DRWE->badness, *errors);
+ if (*errors > DP->max_errors.abort)
+ cont->done(0);
+ if (*errors > DP->max_errors.reset)
+ FDCS->reset = 1;
+ else if (*errors > DP->max_errors.recal)
+ DRS->track = NEED_2_RECAL;
+}
+
+static void set_floppy(kdev_t device)
+{
+ if (TYPE(device))
+ _floppy = TYPE(device) + floppy_type;
+ else
+ _floppy = current_type[ DRIVE(device) ];
+}
+
+/*
+ * formatting support.
+ * ===================
+ */
+static void format_interrupt(void)
+{
+ switch (interpret_errors()){
+ case 1:
+ cont->error();
+ case 2:
+ break;
+ case 0:
+ cont->done(1);
+ }
+ cont->redo();
+}
+
+#define CODE2SIZE (ssize = ((1 << SIZECODE) + 3) >> 2)
+#define FM_MODE(x,y) ((y) & ~(((x)->rate & 0x80) >>1))
+#define CT(x) ((x) | 0x40)
+static void setup_format_params(int track)
+{
+ struct fparm {
+ unsigned char track,head,sect,size;
+ } *here = (struct fparm *)floppy_track_buffer;
+ int il,n;
+ int count,head_shift,track_shift;
+
+ raw_cmd = &default_raw_cmd;
+ raw_cmd->track = track;
+
+ raw_cmd->flags = FD_RAW_WRITE | FD_RAW_INTR | FD_RAW_SPIN |
+ FD_RAW_NEED_DISK | FD_RAW_NEED_SEEK;
+ raw_cmd->rate = _floppy->rate & 0x43;
+ raw_cmd->cmd_count = NR_F;
+ COMMAND = FM_MODE(_floppy,FD_FORMAT);
+ DR_SELECT = UNIT(current_drive) + PH_HEAD(_floppy,format_req.head);
+ F_SIZECODE = FD_SIZECODE(_floppy);
+ F_SECT_PER_TRACK = _floppy->sect << 2 >> F_SIZECODE;
+ F_GAP = _floppy->fmt_gap;
+ F_FILL = FD_FILL_BYTE;
+
+ raw_cmd->kernel_data = floppy_track_buffer;
+ raw_cmd->length = 4 * F_SECT_PER_TRACK;
+
+ /* allow for about 30ms for data transport per track */
+ head_shift = (F_SECT_PER_TRACK + 5) / 6;
+
+ /* a ``cylinder'' is two tracks plus a little stepping time */
+ track_shift = 2 * head_shift + 3;
+
+ /* position of logical sector 1 on this track */
+ n = (track_shift * format_req.track + head_shift * format_req.head)
+ % F_SECT_PER_TRACK;
+
+ /* determine interleave */
+ il = 1;
+ if (_floppy->fmt_gap < 0x22)
+ il++;
+
+ /* initialize field */
+ for (count = 0; count < F_SECT_PER_TRACK; ++count) {
+ here[count].track = format_req.track;
+ here[count].head = format_req.head;
+ here[count].sect = 0;
+ here[count].size = F_SIZECODE;
+ }
+ /* place logical sectors */
+ for (count = 1; count <= F_SECT_PER_TRACK; ++count) {
+ here[n].sect = count;
+ n = (n+il) % F_SECT_PER_TRACK;
+ if (here[n].sect) { /* sector busy, find next free sector */
+ ++n;
+ if (n>= F_SECT_PER_TRACK) {
+ n-=F_SECT_PER_TRACK;
+ while (here[n].sect) ++n;
+ }
+ }
+ }
+}
+
+static void redo_format(void)
+{
+ buffer_track = -1;
+ setup_format_params(format_req.track << STRETCH(_floppy));
+ floppy_start();
+#ifdef DEBUGT
+ debugt("queue format request");
+#endif
+}
+
+static struct cont_t format_cont={
+ format_interrupt,
+ redo_format,
+ bad_flp_intr,
+ generic_done };
+
+static int do_format(kdev_t device, struct format_descr *tmp_format_req)
+{
+ int ret;
+ int drive=DRIVE(device);
+
+ LOCK_FDC(drive,1);
+ set_floppy(device);
+ if (!_floppy ||
+ _floppy->track > DP->tracks ||
+ tmp_format_req->track >= _floppy->track ||
+ tmp_format_req->head >= _floppy->head ||
+ (_floppy->sect << 2) % (1 << FD_SIZECODE(_floppy)) ||
+ !_floppy->fmt_gap) {
+ process_fd_request();
+ return -EINVAL;
+ }
+ format_req = *tmp_format_req;
+ format_errors = 0;
+ cont = &format_cont;
+ errors = &format_errors;
+ IWAIT(redo_format);
+ process_fd_request();
+ return ret;
+}
+
+/*
+ * Buffer read/write and support
+ * =============================
+ */
+
+/* new request_done. Can handle physical sectors which are smaller than a
+ * logical buffer */
+static void request_done(int uptodate)
+{
+ int block;
+
+ probing = 0;
+ reschedule_timeout(MAXTIMEOUT, "request done %d", uptodate);
+
+ if (!CURRENT){
+ DPRINT("request list destroyed in floppy request done\n");
+ return;
+ }
+
+ if (uptodate){
+ /* maintain values for invalidation on geometry
+ * change */
+ block = current_count_sectors + CURRENT->sector;
+ INFBOUND(DRS->maxblock, block);
+ if (block > _floppy->sect)
+ DRS->maxtrack = 1;
+
+ /* unlock chained buffers */
+ while (current_count_sectors && CURRENT &&
+ current_count_sectors >= CURRENT->current_nr_sectors){
+ current_count_sectors -= CURRENT->current_nr_sectors;
+ CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
+ CURRENT->sector += CURRENT->current_nr_sectors;
+ end_request(1);
+ }
+ if (current_count_sectors && CURRENT){
+ /* "unlock" last subsector */
+ CURRENT->buffer += current_count_sectors <<9;
+ CURRENT->current_nr_sectors -= current_count_sectors;
+ CURRENT->nr_sectors -= current_count_sectors;
+ CURRENT->sector += current_count_sectors;
+ return;
+ }
+
+ if (current_count_sectors && !CURRENT)
+ DPRINT("request list destroyed in floppy request done\n");
+
+ } else {
+ if (CURRENT->cmd == WRITE) {
+ /* record write error information */
+ DRWE->write_errors++;
+ if (DRWE->write_errors == 1) {
+ DRWE->first_error_sector = CURRENT->sector;
+ DRWE->first_error_generation = DRS->generation;
+ }
+ DRWE->last_error_sector = CURRENT->sector;
+ DRWE->last_error_generation = DRS->generation;
+ }
+ end_request(0);
+ }
+}
+
+/* Interrupt handler evaluating the result of the r/w operation */
+static void rw_interrupt(void)
+{
+ int nr_sectors, ssize, eoc;
+
+ if (!DRS->first_read_date)
+ DRS->first_read_date = jiffies;
+
+ nr_sectors = 0;
+ CODE2SIZE;
+
+ if(ST1 & ST1_EOC)
+ eoc = 1;
+ else
+ eoc = 0;
+ nr_sectors = ((R_TRACK-TRACK)*_floppy->head+R_HEAD-HEAD) *
+ _floppy->sect + ((R_SECTOR-SECTOR+eoc) << SIZECODE >> 2) -
+ (sector_t % _floppy->sect) % ssize;
+
+#ifdef FLOPPY_SANITY_CHECK
+ if (nr_sectors > current_count_sectors + ssize -
+ (current_count_sectors + sector_t) % ssize +
+ sector_t % ssize){
+ DPRINT("long rw: %x instead of %lx\n",
+ nr_sectors, current_count_sectors);
+ printk("rs=%d s=%d\n", R_SECTOR, SECTOR);
+ printk("rh=%d h=%d\n", R_HEAD, HEAD);
+ printk("rt=%d t=%d\n", R_TRACK, TRACK);
+ printk("spt=%d st=%d ss=%d\n", SECT_PER_TRACK,
+ sector_t, ssize);
+ }
+#endif
+ INFBOUND(nr_sectors,0);
+ SUPBOUND(current_count_sectors, nr_sectors);
+
+ switch (interpret_errors()){
+ case 2:
+ cont->redo();
+ return;
+ case 1:
+ if (!current_count_sectors){
+ cont->error();
+ cont->redo();
+ return;
+ }
+ break;
+ case 0:
+ if (!current_count_sectors){
+ cont->redo();
+ return;
+ }
+ current_type[current_drive] = _floppy;
+ floppy_sizes[TOMINOR(current_drive) ]= _floppy->size>>1;
+ break;
+ }
+
+ if (probing) {
+ if (DP->flags & FTD_MSG)
+ DPRINT("Auto-detected floppy type %s in fd%d\n",
+ _floppy->name,current_drive);
+ current_type[current_drive] = _floppy;
+ floppy_sizes[TOMINOR(current_drive)] = _floppy->size >> 1;
+ probing = 0;
+ }
+
+ if (CT(COMMAND) != FD_READ ||
+ raw_cmd->kernel_data == CURRENT->buffer){
+ /* transfer directly from buffer */
+ cont->done(1);
+ } else if (CT(COMMAND) == FD_READ){
+ buffer_track = raw_cmd->track;
+ buffer_drive = current_drive;
+ INFBOUND(buffer_max, nr_sectors + sector_t);
+ }
+ cont->redo();
+}
+
+/* Compute maximal contiguous buffer size. */
+static int buffer_chain_size(void)
+{
+ struct buffer_head *bh;
+ int size;
+ char *base;
+
+ base = CURRENT->buffer;
+ size = CURRENT->current_nr_sectors << 9;
+ bh = CURRENT->bh;
+
+ if (bh){
+ bh = bh->b_reqnext;
+ while (bh && bh->b_data == base + size){
+ size += bh->b_size;
+ bh = bh->b_reqnext;
+ }
+ }
+ return size >> 9;
+}
+
+/* Compute the maximal transfer size */
+static int transfer_size(int ssize, int max_sector, int max_size)
+{
+ SUPBOUND(max_sector, sector_t + max_size);
+
+ /* alignment */
+ max_sector -= (max_sector % _floppy->sect) % ssize;
+
+ /* transfer size, beginning not aligned */
+ current_count_sectors = max_sector - sector_t ;
+
+ return max_sector;
+}
+
+/*
+ * Move data from/to the track buffer to/from the buffer cache.
+ */
+static void copy_buffer(int ssize, int max_sector, int max_sector_2)
+{
+ int remaining; /* number of transferred 512-byte sectors */
+ struct buffer_head *bh;
+ char *buffer, *dma_buffer;
+ int size;
+
+ max_sector = transfer_size(ssize,
+ minimum(max_sector, max_sector_2),
+ CURRENT->nr_sectors);
+
+ if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE &&
+ buffer_max > sector_t + CURRENT->nr_sectors)
+ current_count_sectors = minimum(buffer_max - sector_t,
+ CURRENT->nr_sectors);
+
+ remaining = current_count_sectors << 9;
+#ifdef FLOPPY_SANITY_CHECK
+ if ((remaining >> 9) > CURRENT->nr_sectors &&
+ CT(COMMAND) == FD_WRITE){
+ DPRINT("in copy buffer\n");
+ printk("current_count_sectors=%ld\n", current_count_sectors);
+ printk("remaining=%d\n", remaining >> 9);
+ printk("CURRENT->nr_sectors=%ld\n",CURRENT->nr_sectors);
+ printk("CURRENT->current_nr_sectors=%ld\n",
+ CURRENT->current_nr_sectors);
+ printk("max_sector=%d\n", max_sector);
+ printk("ssize=%d\n", ssize);
+ }
+#endif
+
+ buffer_max = maximum(max_sector, buffer_max);
+
+ dma_buffer = floppy_track_buffer + ((sector_t - buffer_min) << 9);
+
+ bh = CURRENT->bh;
+ size = CURRENT->current_nr_sectors << 9;
+ buffer = CURRENT->buffer;
+
+ while (remaining > 0){
+ SUPBOUND(size, remaining);
+#ifdef FLOPPY_SANITY_CHECK
+ if (dma_buffer + size >
+ floppy_track_buffer + (max_buffer_sectors << 10) ||
+ dma_buffer < floppy_track_buffer){
+ DPRINT("buffer overrun in copy buffer %d\n",
+ (int) ((floppy_track_buffer - dma_buffer) >>9));
+ printk("sector_t=%d buffer_min=%d\n",
+ sector_t, buffer_min);
+ printk("current_count_sectors=%ld\n",
+ current_count_sectors);
+ if (CT(COMMAND) == FD_READ)
+ printk("read\n");
+ if (CT(COMMAND) == FD_READ)
+ printk("write\n");
+ break;
+ }
+ if (((unsigned long)buffer) % 512)
+ DPRINT("%p buffer not aligned\n", buffer);
+#endif
+ if (CT(COMMAND) == FD_READ)
+ memcpy(buffer, dma_buffer, size);
+ else
+ memcpy(dma_buffer, buffer, size);
+ remaining -= size;
+ if (!remaining)
+ break;
+
+ dma_buffer += size;
+ bh = bh->b_reqnext;
+#ifdef FLOPPY_SANITY_CHECK
+ if (!bh){
+ DPRINT("bh=null in copy buffer after copy\n");
+ break;
+ }
+#endif
+ size = bh->b_size;
+ buffer = bh->b_data;
+ }
+#ifdef FLOPPY_SANITY_CHECK
+ if (remaining){
+ if (remaining > 0)
+ max_sector -= remaining >> 9;
+ DPRINT("weirdness: remaining %d\n", remaining>>9);
+ }
+#endif
+}
+
+/*
+ * Formulate a read/write request.
+ * this routine decides where to load the data (directly to buffer, or to
+ * tmp floppy area), how much data to load (the size of the buffer, the whole
+ * track, or a single sector)
+ * All floppy_track_buffer handling goes in here. If we ever add track buffer
+ * allocation on the fly, it should be done here. No other part should need
+ * modification.
+ */
+
+static int make_raw_rw_request(void)
+{
+ int aligned_sector_t;
+ int max_sector, max_size, tracksize, ssize;
+
+ set_fdc(DRIVE(CURRENT->rq_dev));
+
+ raw_cmd = &default_raw_cmd;
+ raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_DISK |
+ FD_RAW_NEED_SEEK;
+ raw_cmd->cmd_count = NR_RW;
+ if (CURRENT->cmd == READ){
+ raw_cmd->flags |= FD_RAW_READ;
+ COMMAND = FM_MODE(_floppy,FD_READ);
+ } else if (CURRENT->cmd == WRITE){
+ raw_cmd->flags |= FD_RAW_WRITE;
+ COMMAND = FM_MODE(_floppy,FD_WRITE);
+ } else {
+ DPRINT("make_raw_rw_request: unknown command\n");
+ return 0;
+ }
+
+ max_sector = _floppy->sect * _floppy->head;
+
+ TRACK = CURRENT->sector / max_sector;
+ sector_t = CURRENT->sector % max_sector;
+ if (_floppy->track && TRACK >= _floppy->track)
+ return 0;
+ HEAD = sector_t / _floppy->sect;
+
+ if (((_floppy->stretch & FD_SWAPSIDES) || TESTF(FD_NEED_TWADDLE)) &&
+ sector_t < _floppy->sect)
+ max_sector = _floppy->sect;
+
+ /* 2M disks have phantom sectors on the first track */
+ if ((_floppy->rate & FD_2M) && (!TRACK) && (!HEAD)){
+ max_sector = 2 * _floppy->sect / 3;
+ if (sector_t >= max_sector){
+ current_count_sectors = minimum(_floppy->sect - sector_t,
+ CURRENT->nr_sectors);
+ return 1;
+ }
+ SIZECODE = 2;
+ } else
+ SIZECODE = FD_SIZECODE(_floppy);
+ raw_cmd->rate = _floppy->rate & 0x43;
+ if ((_floppy->rate & FD_2M) &&
+ (TRACK || HEAD) &&
+ raw_cmd->rate == 2)
+ raw_cmd->rate = 1;
+
+ if (SIZECODE)
+ SIZECODE2 = 0xff;
+ else
+ SIZECODE2 = 0x80;
+ raw_cmd->track = TRACK << STRETCH(_floppy);
+ DR_SELECT = UNIT(current_drive) + PH_HEAD(_floppy,HEAD);
+ GAP = _floppy->gap;
+ CODE2SIZE;
+ SECT_PER_TRACK = _floppy->sect << 2 >> SIZECODE;
+ SECTOR = ((sector_t % _floppy->sect) << 2 >> SIZECODE) + 1;
+ tracksize = _floppy->sect - _floppy->sect % ssize;
+ if (tracksize < _floppy->sect){
+ SECT_PER_TRACK ++;
+ if (tracksize <= sector_t % _floppy->sect)
+ SECTOR--;
+ while (tracksize <= sector_t % _floppy->sect){
+ while(tracksize + ssize > _floppy->sect){
+ SIZECODE--;
+ ssize >>= 1;
+ }
+ SECTOR++; SECT_PER_TRACK ++;
+ tracksize += ssize;
+ }
+ max_sector = HEAD * _floppy->sect + tracksize;
+ } else if (!TRACK && !HEAD && !(_floppy->rate & FD_2M) && probing)
+ max_sector = _floppy->sect;
+
+ aligned_sector_t = sector_t - (sector_t % _floppy->sect) % ssize;
+ max_size = CURRENT->nr_sectors;
+ if ((raw_cmd->track == buffer_track) &&
+ (current_drive == buffer_drive) &&
+ (sector_t >= buffer_min) && (sector_t < buffer_max)) {
+ /* data already in track buffer */
+ if (CT(COMMAND) == FD_READ) {
+ copy_buffer(1, max_sector, buffer_max);
+ return 1;
+ }
+ } else if (aligned_sector_t != sector_t || CURRENT->nr_sectors < ssize){
+ if (CT(COMMAND) == FD_WRITE){
+ if (sector_t + CURRENT->nr_sectors > ssize &&
+ sector_t + CURRENT->nr_sectors < ssize + ssize)
+ max_size = ssize + ssize;
+ else
+ max_size = ssize;
+ }
+ raw_cmd->flags &= ~FD_RAW_WRITE;
+ raw_cmd->flags |= FD_RAW_READ;
+ COMMAND = FM_MODE(_floppy,FD_READ);
+ } else if ((unsigned long)CURRENT->buffer < MAX_DMA_ADDRESS) {
+ unsigned long dma_limit;
+ int direct, indirect;
+
+ indirect= transfer_size(ssize,max_sector,max_buffer_sectors*2) -
+ sector_t;
+
+ /*
+ * Do NOT use minimum() here---MAX_DMA_ADDRESS is 64 bits wide
+ * on a 64 bit machine!
+ */
+ max_size = buffer_chain_size();
+ dma_limit = (MAX_DMA_ADDRESS - ((unsigned long) CURRENT->buffer)) >> 9;
+ if ((unsigned long) max_size > dma_limit) {
+ max_size = dma_limit;
+ }
+ /* 64 kb boundaries */
+ if (CROSS_64KB(CURRENT->buffer, max_size << 9))
+ max_size = (K_64 - ((long) CURRENT->buffer) % K_64)>>9;
+ direct = transfer_size(ssize,max_sector,max_size) - sector_t;
+ /*
+ * We try to read tracks, but if we get too many errors, we
+ * go back to reading just one sector at a time.
+ *
+ * This means we should be able to read a sector even if there
+ * are other bad sectors on this track.
+ */
+ if (!direct ||
+ (indirect * 2 > direct * 3 &&
+ *errors < DP->max_errors.read_track &&
+ /*!TESTF(FD_NEED_TWADDLE) &&*/
+ ((!probing || (DP->read_track&(1<<DRS->probed_format)))))){
+ max_size = CURRENT->nr_sectors;
+ } else {
+ raw_cmd->kernel_data = CURRENT->buffer;
+ raw_cmd->length = current_count_sectors << 9;
+ if (raw_cmd->length == 0){
+ DPRINT("zero dma transfer attempted from make_raw_request\n");
+ DPRINT("indirect=%d direct=%d sector_t=%d",
+ indirect, direct, sector_t);
+ return 0;
+ }
+ return 2;
+ }
+ }
+
+ if (CT(COMMAND) == FD_READ)
+ max_size = max_sector; /* unbounded */
+
+ /* claim buffer track if needed */
+ if (buffer_track != raw_cmd->track || /* bad track */
+ buffer_drive !=current_drive || /* bad drive */
+ sector_t > buffer_max ||
+ sector_t < buffer_min ||
+ ((CT(COMMAND) == FD_READ ||
+ (aligned_sector_t == sector_t && CURRENT->nr_sectors >= ssize))&&
+ max_sector > 2 * max_buffer_sectors + buffer_min &&
+ max_size + sector_t > 2 * max_buffer_sectors + buffer_min)
+ /* not enough space */){
+ buffer_track = -1;
+ buffer_drive = current_drive;
+ buffer_max = buffer_min = aligned_sector_t;
+ }
+ raw_cmd->kernel_data = floppy_track_buffer +
+ ((aligned_sector_t-buffer_min)<<9);
+
+ if (CT(COMMAND) == FD_WRITE){
+ /* copy write buffer to track buffer.
+ * if we get here, we know that the write
+ * is either aligned or the data already in the buffer
+ * (buffer will be overwritten) */
+#ifdef FLOPPY_SANITY_CHECK
+ if (sector_t != aligned_sector_t && buffer_track == -1)
+ DPRINT("internal error offset !=0 on write\n");
+#endif
+ buffer_track = raw_cmd->track;
+ buffer_drive = current_drive;
+ copy_buffer(ssize, max_sector, 2*max_buffer_sectors+buffer_min);
+ } else
+ transfer_size(ssize, max_sector,
+ 2*max_buffer_sectors+buffer_min-aligned_sector_t);
+
+ /* round up current_count_sectors to get dma xfer size */
+ raw_cmd->length = sector_t+current_count_sectors-aligned_sector_t;
+ raw_cmd->length = ((raw_cmd->length -1)|(ssize-1))+1;
+ raw_cmd->length <<= 9;
+#ifdef FLOPPY_SANITY_CHECK
+ if ((raw_cmd->length < current_count_sectors << 9) ||
+ (raw_cmd->kernel_data != CURRENT->buffer &&
+ CT(COMMAND) == FD_WRITE &&
+ (aligned_sector_t + (raw_cmd->length >> 9) > buffer_max ||
+ aligned_sector_t < buffer_min)) ||
+ raw_cmd->length % (128 << SIZECODE) ||
+ raw_cmd->length <= 0 || current_count_sectors <= 0){
+ DPRINT("fractionary current count b=%lx s=%lx\n",
+ raw_cmd->length, current_count_sectors);
+ if (raw_cmd->kernel_data != CURRENT->buffer)
+ printk("addr=%d, length=%ld\n",
+ (int) ((raw_cmd->kernel_data -
+ floppy_track_buffer) >> 9),
+ current_count_sectors);
+ printk("st=%d ast=%d mse=%d msi=%d\n",
+ sector_t, aligned_sector_t, max_sector, max_size);
+ printk("ssize=%x SIZECODE=%d\n", ssize, SIZECODE);
+ printk("command=%x SECTOR=%d HEAD=%d, TRACK=%d\n",
+ COMMAND, SECTOR, HEAD, TRACK);
+ printk("buffer drive=%d\n", buffer_drive);
+ printk("buffer track=%d\n", buffer_track);
+ printk("buffer_min=%d\n", buffer_min);
+ printk("buffer_max=%d\n", buffer_max);
+ return 0;
+ }
+
+ if (raw_cmd->kernel_data != CURRENT->buffer){
+ if (raw_cmd->kernel_data < floppy_track_buffer ||
+ current_count_sectors < 0 ||
+ raw_cmd->length < 0 ||
+ raw_cmd->kernel_data + raw_cmd->length >
+ floppy_track_buffer + (max_buffer_sectors << 10)){
+ DPRINT("buffer overrun in schedule dma\n");
+ printk("sector_t=%d buffer_min=%d current_count=%ld\n",
+ sector_t, buffer_min,
+ raw_cmd->length >> 9);
+ printk("current_count_sectors=%ld\n",
+ current_count_sectors);
+ if (CT(COMMAND) == FD_READ)
+ printk("read\n");
+ if (CT(COMMAND) == FD_READ)
+ printk("write\n");
+ return 0;
+ }
+ } else if (raw_cmd->length > CURRENT->nr_sectors << 9 ||
+ current_count_sectors > CURRENT->nr_sectors){
+ DPRINT("buffer overrun in direct transfer\n");
+ return 0;
+ } else if (raw_cmd->length < current_count_sectors << 9){
+ DPRINT("more sectors than bytes\n");
+ printk("bytes=%ld\n", raw_cmd->length >> 9);
+ printk("sectors=%ld\n", current_count_sectors);
+ }
+ if (raw_cmd->length == 0){
+ DPRINT("zero dma transfer attempted from make_raw_request\n");
+ return 0;
+ }
+#endif
+ return 2;
+}
+
+static void redo_fd_request(void)
+{
+#define REPEAT {request_done(0); continue; }
+ kdev_t device;
+ int tmp;
+
+ lastredo = jiffies;
+ if (current_drive < N_DRIVE)
+ floppy_off(current_drive);
+
+ if (CURRENT && CURRENT->rq_status == RQ_INACTIVE){
+ CLEAR_INTR;
+ unlock_fdc();
+ return;
+ }
+
+ while(1){
+ if (!CURRENT) {
+ CLEAR_INTR;
+ unlock_fdc();
+ return;
+ }
+ if (MAJOR(CURRENT->rq_dev) != MAJOR_NR)
+ panic(DEVICE_NAME ": request list destroyed");
+ if (CURRENT->bh && !buffer_locked(CURRENT->bh))
+ panic(DEVICE_NAME ": block not locked");
+
+ device = CURRENT->rq_dev;
+ set_fdc(DRIVE(device));
+ reschedule_timeout(CURRENTD, "redo fd request", 0);
+
+ set_floppy(device);
+ raw_cmd = & default_raw_cmd;
+ raw_cmd->flags = 0;
+ if (start_motor(redo_fd_request)) return;
+ disk_change(current_drive);
+ if (test_bit(current_drive, &fake_change) ||
+ TESTF(FD_DISK_CHANGED)){
+ DPRINT("disk absent or changed during operation\n");
+ REPEAT;
+ }
+ if (!_floppy) { /* Autodetection */
+ if (!probing){
+ DRS->probed_format = 0;
+ if (next_valid_format()){
+ DPRINT("no autodetectable formats\n");
+ _floppy = NULL;
+ REPEAT;
+ }
+ }
+ probing = 1;
+ _floppy = floppy_type+DP->autodetect[DRS->probed_format];
+ } else
+ probing = 0;
+ errors = & (CURRENT->errors);
+ tmp = make_raw_rw_request();
+ if (tmp < 2){
+ request_done(tmp);
+ continue;
+ }
+
+ if (TESTF(FD_NEED_TWADDLE))
+ twaddle();
+ floppy_tq.routine = (void *)(void *) floppy_start;
+ queue_task(&floppy_tq, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+#ifdef DEBUGT
+ debugt("queue fd request");
+#endif
+ return;
+ }
+#undef REPEAT
+}
+
+static struct cont_t rw_cont={
+ rw_interrupt,
+ redo_fd_request,
+ bad_flp_intr,
+ request_done };
+
+static struct tq_struct request_tq =
+{ 0, 0, (void *) (void *) redo_fd_request, 0 };
+
+static void process_fd_request(void)
+{
+ cont = &rw_cont;
+ queue_task(&request_tq, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+}
+
+static void do_fd_request(void)
+{
+ sti();
+ if (fdc_busy){
+ /* fdc busy, this new request will be treated when the
+ current one is done */
+ is_alive("do fd request, old request running");
+ return;
+ }
+ lock_fdc(MAXTIMEOUT,0);
+ process_fd_request();
+ is_alive("do fd request");
+}
+
+static struct cont_t poll_cont={
+ success_and_wakeup,
+ floppy_ready,
+ generic_failure,
+ generic_done };
+
+static int poll_drive(int interruptible, int flag)
+{
+ int ret;
+ /* no auto-sense, just clear dcl */
+ raw_cmd = &default_raw_cmd;
+ raw_cmd->flags= flag;
+ raw_cmd->track=0;
+ raw_cmd->cmd_count=0;
+ cont = &poll_cont;
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("setting NEWCHANGE in poll_drive\n");
+ }
+#endif
+ SETF(FD_DISK_NEWCHANGE);
+ WAIT(floppy_ready);
+ return ret;
+}
+
+/*
+ * User triggered reset
+ * ====================
+ */
+
+static void reset_intr(void)
+{
+ printk("weird, reset interrupt called\n");
+}
+
+static struct cont_t reset_cont={
+ reset_intr,
+ success_and_wakeup,
+ generic_failure,
+ generic_done };
+
+static int user_reset_fdc(int drive, int arg, int interruptible)
+{
+ int ret;
+
+ ret=0;
+ LOCK_FDC(drive,interruptible);
+ if (arg == FD_RESET_ALWAYS)
+ FDCS->reset=1;
+ if (FDCS->reset){
+ cont = &reset_cont;
+ WAIT(reset_fdc);
+ }
+ process_fd_request();
+ return ret;
+}
+
+/*
+ * Misc Ioctl's and support
+ * ========================
+ */
+static int fd_copyout(void *param, const void *address, int size)
+{
+ int ret;
+
+ ECALL(verify_area(VERIFY_WRITE,param,size));
+ memcpy_tofs(param,(void *) address, size);
+ return 0;
+}
+
+static int fd_copyin(void *param, void *address, int size)
+{
+ int ret;
+
+ ECALL(verify_area(VERIFY_READ,param,size));
+ memcpy_fromfs((void *) address, param, size);
+ return 0;
+}
+
+#define COPYOUT(x) ECALL(fd_copyout((void *)param, &(x), sizeof(x)))
+#define COPYIN(x) ECALL(fd_copyin((void *)param, &(x), sizeof(x)))
+
+static inline const char *drive_name(int type, int drive)
+{
+ struct floppy_struct *floppy;
+
+ if (type)
+ floppy = floppy_type + type;
+ else {
+ if (UDP->native_format)
+ floppy = floppy_type + UDP->native_format;
+ else
+ return "(null)";
+ }
+ if (floppy->name)
+ return floppy->name;
+ else
+ return "(null)";
+}
+
+
+/* raw commands */
+static void raw_cmd_done(int flag)
+{
+ int i;
+
+ if (!flag) {
+ raw_cmd->flags |= FD_RAW_FAILURE;
+ raw_cmd->flags |= FD_RAW_HARDFAILURE;
+ } else {
+ raw_cmd->reply_count = inr;
+ for (i=0; i< raw_cmd->reply_count; i++)
+ raw_cmd->reply[i] = reply_buffer[i];
+
+ if (raw_cmd->flags & (FD_RAW_READ | FD_RAW_WRITE))
+ raw_cmd->length = fd_get_dma_residue();
+
+ if ((raw_cmd->flags & FD_RAW_SOFTFAILURE) &&
+ (!raw_cmd->reply_count || (raw_cmd->reply[0] & 0xc0)))
+ raw_cmd->flags |= FD_RAW_FAILURE;
+
+ if (disk_change(current_drive))
+ raw_cmd->flags |= FD_RAW_DISK_CHANGE;
+ else
+ raw_cmd->flags &= ~FD_RAW_DISK_CHANGE;
+ if (raw_cmd->flags & FD_RAW_NO_MOTOR_AFTER)
+ motor_off_callback(current_drive);
+
+ if (raw_cmd->next &&
+ (!(raw_cmd->flags & FD_RAW_FAILURE) ||
+ !(raw_cmd->flags & FD_RAW_STOP_IF_FAILURE)) &&
+ ((raw_cmd->flags & FD_RAW_FAILURE) ||
+ !(raw_cmd->flags &FD_RAW_STOP_IF_SUCCESS))) {
+ raw_cmd = raw_cmd->next;
+ return;
+ }
+ }
+ generic_done(flag);
+}
+
+
+static struct cont_t raw_cmd_cont={
+ success_and_wakeup,
+ floppy_start,
+ generic_failure,
+ raw_cmd_done
+};
+
+static inline int raw_cmd_copyout(int cmd, char *param,
+ struct floppy_raw_cmd *ptr)
+{
+ struct old_floppy_raw_cmd old_raw_cmd;
+ int ret;
+
+ while(ptr) {
+ if (cmd == OLDFDRAWCMD) {
+ old_raw_cmd.flags = ptr->flags;
+ old_raw_cmd.data = ptr->data;
+ old_raw_cmd.length = ptr->length;
+ old_raw_cmd.rate = ptr->rate;
+ old_raw_cmd.reply_count = ptr->reply_count;
+ memcpy(old_raw_cmd.reply, ptr->reply, 7);
+ COPYOUT(old_raw_cmd);
+ param += sizeof(old_raw_cmd);
+ } else {
+ COPYOUT(*ptr);
+ param += sizeof(struct floppy_raw_cmd);
+ }
+
+ if ((ptr->flags & FD_RAW_READ) && ptr->buffer_length){
+ if (ptr->length>=0 && ptr->length<=ptr->buffer_length)
+ ECALL(fd_copyout(ptr->data,
+ ptr->kernel_data,
+ ptr->buffer_length -
+ ptr->length));
+ }
+ ptr = ptr->next;
+ }
+ return 0;
+}
+
+
+static void raw_cmd_free(struct floppy_raw_cmd **ptr)
+{
+ struct floppy_raw_cmd *next,*this;
+
+ this = *ptr;
+ *ptr = 0;
+ while(this) {
+ if (this->buffer_length) {
+ fd_dma_mem_free((unsigned long)this->kernel_data,
+ this->buffer_length);
+ this->buffer_length = 0;
+ }
+ next = this->next;
+ kfree(this);
+ this = next;
+ }
+}
+
+
+static inline int raw_cmd_copyin(int cmd, char *param,
+ struct floppy_raw_cmd **rcmd)
+{
+ struct floppy_raw_cmd *ptr;
+ struct old_floppy_raw_cmd old_raw_cmd;
+ int ret;
+ int i;
+
+ *rcmd = 0;
+ while(1) {
+ ptr = (struct floppy_raw_cmd *)
+ kmalloc(sizeof(struct floppy_raw_cmd), GFP_USER);
+ if (!ptr)
+ return -ENOMEM;
+ *rcmd = ptr;
+ if (cmd == OLDFDRAWCMD){
+ COPYIN(old_raw_cmd);
+ ptr->flags = old_raw_cmd.flags;
+ ptr->data = old_raw_cmd.data;
+ ptr->length = old_raw_cmd.length;
+ ptr->rate = old_raw_cmd.rate;
+ ptr->cmd_count = old_raw_cmd.cmd_count;
+ ptr->track = old_raw_cmd.track;
+ ptr->phys_length = 0;
+ ptr->next = 0;
+ ptr->buffer_length = 0;
+ memcpy(ptr->cmd, old_raw_cmd.cmd, 9);
+ param += sizeof(struct old_floppy_raw_cmd);
+ if (ptr->cmd_count > 9)
+ return -EINVAL;
+ } else {
+ COPYIN(*ptr);
+ ptr->next = 0;
+ ptr->buffer_length = 0;
+ param += sizeof(struct floppy_raw_cmd);
+ if (ptr->cmd_count > 33)
+ /* the command may now also take up the space
+ * initially intended for the reply & the
+ * reply count. Needed for long 82078 commands
+ * such as RESTORE, which takes ... 17 command
+ * bytes. Murphy's law #137: When you reserve
+ * 16 bytes for a structure, you'll one day
+ * discover that you really need 17...
+ */
+ return -EINVAL;
+ }
+
+ for (i=0; i< 16; i++)
+ ptr->reply[i] = 0;
+ ptr->resultcode = 0;
+ ptr->kernel_data = 0;
+
+ if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
+ if (ptr->length <= 0)
+ return -EINVAL;
+ ptr->kernel_data =(char*)fd_dma_mem_alloc(ptr->length);
+ if (!ptr->kernel_data)
+ return -ENOMEM;
+ ptr->buffer_length = ptr->length;
+ }
+ if ( ptr->flags & FD_RAW_READ )
+ ECALL( verify_area( VERIFY_WRITE, ptr->data,
+ ptr->length ));
+ if (ptr->flags & FD_RAW_WRITE)
+ ECALL(fd_copyin(ptr->data, ptr->kernel_data,
+ ptr->length));
+ rcmd = & (ptr->next);
+ if (!(ptr->flags & FD_RAW_MORE))
+ return 0;
+ ptr->rate &= 0x43;
+ }
+}
+
+
+static int raw_cmd_ioctl(int cmd, void *param)
+{
+ int drive, ret, ret2;
+ struct floppy_raw_cmd *my_raw_cmd;
+
+ if (FDCS->rawcmd <= 1)
+ FDCS->rawcmd = 1;
+ for (drive= 0; drive < N_DRIVE; drive++){
+ if (FDC(drive) != fdc)
+ continue;
+ if (drive == current_drive){
+ if (UDRS->fd_ref > 1){
+ FDCS->rawcmd = 2;
+ break;
+ }
+ } else if (UDRS->fd_ref){
+ FDCS->rawcmd = 2;
+ break;
+ }
+ }
+
+ if (FDCS->reset)
+ return -EIO;
+
+ ret = raw_cmd_copyin(cmd, param, &my_raw_cmd);
+ if (ret) {
+ raw_cmd_free(&my_raw_cmd);
+ return ret;
+ }
+
+ raw_cmd = my_raw_cmd;
+ cont = &raw_cmd_cont;
+ ret=wait_til_done(floppy_start,1);
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("calling disk change from raw_cmd ioctl\n");
+ }
+#endif
+
+ if (ret != -EINTR && FDCS->reset)
+ ret = -EIO;
+
+ DRS->track = NO_TRACK;
+
+ ret2 = raw_cmd_copyout(cmd, param, my_raw_cmd);
+ if (!ret)
+ ret = ret2;
+ raw_cmd_free(&my_raw_cmd);
+ return ret;
+}
+
+static int invalidate_drive(kdev_t rdev)
+{
+ /* invalidate the buffer track to force a reread */
+ set_bit(DRIVE(rdev), &fake_change);
+ process_fd_request();
+ check_disk_change(rdev);
+ return 0;
+}
+
+
+static inline void clear_write_error(int drive)
+{
+ CLEARSTRUCT(UDRWE);
+}
+
+static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
+ int drive, int type, kdev_t device)
+{
+ int cnt;
+
+ /* sanity checking for parameters.*/
+ if (g->sect <= 0 ||
+ g->head <= 0 ||
+ g->track <= 0 ||
+ g->track > UDP->tracks>>STRETCH(g) ||
+ /* check if reserved bits are set */
+ (g->stretch&~(FD_STRETCH|FD_SWAPSIDES)) != 0)
+ return -EINVAL;
+ if (type){
+ if (!suser())
+ return -EPERM;
+ LOCK_FDC(drive,1);
+ for (cnt = 0; cnt < N_DRIVE; cnt++){
+ if (ITYPE(drive_state[cnt].fd_device) == type &&
+ drive_state[cnt].fd_ref)
+ set_bit(drive, &fake_change);
+ }
+ floppy_type[type] = *g;
+ floppy_type[type].name="user format";
+ for (cnt = type << 2; cnt < (type << 2) + 4; cnt++)
+ floppy_sizes[cnt]= floppy_sizes[cnt+0x80]=
+ floppy_type[type].size>>1;
+ process_fd_request();
+ for (cnt = 0; cnt < N_DRIVE; cnt++){
+ if (ITYPE(drive_state[cnt].fd_device) == type &&
+ drive_state[cnt].fd_ref)
+ check_disk_change(
+ MKDEV(FLOPPY_MAJOR,
+ drive_state[cnt].fd_device));
+ }
+ } else {
+ LOCK_FDC(drive,1);
+ if (cmd != FDDEFPRM)
+ /* notice a disk change immediately, else
+ * we loose our settings immediately*/
+ CALL(poll_drive(1, FD_RAW_NEED_DISK));
+ user_params[drive] = *g;
+ if (buffer_drive == drive)
+ SUPBOUND(buffer_max, user_params[drive].sect);
+ current_type[drive] = &user_params[drive];
+ floppy_sizes[drive] = user_params[drive].size >> 1;
+ if (cmd == FDDEFPRM)
+ DRS->keep_data = -1;
+ else
+ DRS->keep_data = 1;
+ /* invalidation. Invalidate only when needed, i.e.
+ * when there are already sectors in the buffer cache
+ * whose number will change. This is useful, because
+ * mtools often changes the geometry of the disk after
+ * looking at the boot block */
+ if (DRS->maxblock > user_params[drive].sect || DRS->maxtrack)
+ invalidate_drive(device);
+ else
+ process_fd_request();
+ }
+ return 0;
+}
+
+/* handle obsolete ioctl's */
+static struct translation_entry {
+ int newcmd;
+ int oldcmd;
+ int oldsize; /* size of 0x00xx-style ioctl. Reflects old structures, thus
+ * use numeric values. NO SIZEOFS */
+} translation_table[]= {
+ {FDCLRPRM, 0, 0},
+ {FDSETPRM, 1, 28},
+ {FDDEFPRM, 2, 28},
+ {FDGETPRM, 3, 28},
+ {FDMSGON, 4, 0},
+ {FDMSGOFF, 5, 0},
+ {FDFMTBEG, 6, 0},
+ {FDFMTTRK, 7, 12},
+ {FDFMTEND, 8, 0},
+ {FDSETEMSGTRESH, 10, 0},
+ {FDFLUSH, 11, 0},
+ {FDSETMAXERRS, 12, 20},
+ {OLDFDRAWCMD, 30, 0},
+ {FDGETMAXERRS, 14, 20},
+ {FDGETDRVTYP, 16, 16},
+ {FDSETDRVPRM, 20, 88},
+ {FDGETDRVPRM, 21, 88},
+ {FDGETDRVSTAT, 22, 52},
+ {FDPOLLDRVSTAT, 23, 52},
+ {FDRESET, 24, 0},
+ {FDGETFDCSTAT, 25, 40},
+ {FDWERRORCLR, 27, 0},
+ {FDWERRORGET, 28, 24},
+ {FDRAWCMD, 0, 0},
+ {FDEJECT, 0, 0},
+ {FDTWADDLE, 40, 0} };
+
+static inline int normalize_0x02xx_ioctl(int *cmd, int *size)
+{
+ int i;
+
+ for (i=0; i < ARRAY_SIZE(translation_table); i++) {
+ if ((*cmd & 0xffff) == (translation_table[i].newcmd & 0xffff)){
+ *size = _IOC_SIZE(*cmd);
+ *cmd = translation_table[i].newcmd;
+ if (*size > _IOC_SIZE(*cmd)) {
+ printk("ioctl not yet supported\n");
+ return -EFAULT;
+ }
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static inline int xlate_0x00xx_ioctl(int *cmd, int *size)
+{
+ int i;
+ /* old ioctls' for kernels <= 1.3.33 */
+ /* When the next even release will come around, we'll start
+ * warning against these.
+ * When the next odd release will come around, we'll fail with
+ * -EINVAL */
+ if(strcmp(system_utsname.version, "1.4.0") >= 0)
+ printk("obsolete floppy ioctl %x\n", *cmd);
+ if((system_utsname.version[0] == '1' &&
+ strcmp(system_utsname.version, "1.5.0") >= 0) ||
+ (system_utsname.version[0] >= '2' &&
+ strcmp(system_utsname.version, "2.1.0") >= 0))
+ return -EINVAL;
+ for (i=0; i < ARRAY_SIZE(translation_table); i++) {
+ if (*cmd == translation_table[i].oldcmd) {
+ *size = translation_table[i].oldsize;
+ *cmd = translation_table[i].newcmd;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int fd_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long param)
+{
+#define IOCTL_MODE_BIT 8
+#define OPEN_WRITE_BIT 16
+#define IOCTL_ALLOWED (filp && (filp->f_mode & IOCTL_MODE_BIT))
+#define OUT(c,x) case c: outparam = (const char *) (x); break
+#define IN(c,x,tag) case c: *(x) = inparam. tag ; return 0
+
+ int i,drive,type;
+ kdev_t device;
+ int ret;
+ int size;
+ union inparam {
+ struct floppy_struct g; /* geometry */
+ struct format_descr f;
+ struct floppy_max_errors max_errors;
+ struct floppy_drive_params dp;
+ } inparam; /* parameters coming from user space */
+ const char *outparam; /* parameters passed back to user space */
+
+ device = inode->i_rdev;
+ switch (cmd) {
+ RO_IOCTLS(device,param);
+ }
+ type = TYPE(device);
+ drive = DRIVE(device);
+
+ /* convert compatibility eject ioctls into floppy eject ioctl.
+ * We do this in order to provide a means to eject floppy disks before
+ * installing the new fdutils package */
+ if(cmd == CDROMEJECT || /* CD-ROM eject */
+ cmd == 0x6470 /* SunOS floppy eject */) {
+ DPRINT("obsolete eject ioctl\n");
+ DPRINT("please use floppycontrol --eject\n");
+ cmd = FDEJECT;
+ }
+
+ /* convert the old style command into a new style command */
+ if ((cmd & 0xff00) == 0x0200) {
+ ECALL(normalize_0x02xx_ioctl(&cmd, &size));
+ } else if ((cmd & 0xff00) == 0x0000) {
+ ECALL(xlate_0x00xx_ioctl(&cmd, &size));
+ } else
+ return -EINVAL;
+
+ /* permission checks */
+ if (((cmd & 0x80) && !suser()) ||
+ ((cmd & 0x40) && !IOCTL_ALLOWED))
+ return -EPERM;
+
+ /* verify writability of result, and fail early */
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ ECALL(verify_area(VERIFY_WRITE,(void *) param, size));
+
+ /* copyin */
+ CLEARSTRUCT(&inparam);
+ if (_IOC_DIR(cmd) & _IOC_WRITE)
+ ECALL(fd_copyin((void *)param, &inparam, size))
+
+ switch (cmd) {
+ case FDEJECT:
+ if(UDRS->fd_ref != 1)
+ /* somebody else has this drive open */
+ return -EBUSY;
+ LOCK_FDC(drive,1);
+
+ /* do the actual eject. Fails on
+ * non-Sparc architectures */
+ ret=fd_eject(UNIT(drive));
+
+ USETF(FD_DISK_CHANGED);
+ USETF(FD_VERIFY);
+ process_fd_request();
+ return ret;
+ case FDCLRPRM:
+ LOCK_FDC(drive,1);
+ current_type[drive] = NULL;
+ floppy_sizes[drive] = MAX_DISK_SIZE;
+ UDRS->keep_data = 0;
+ return invalidate_drive(device);
+ case FDSETPRM:
+ case FDDEFPRM:
+ return set_geometry(cmd, & inparam.g,
+ drive, type, device);
+ case FDGETPRM:
+ LOCK_FDC(drive,1);
+ CALL(poll_drive(1,0));
+ process_fd_request();
+ if (type)
+ outparam = (char *) &floppy_type[type];
+ else
+ outparam = (char *) current_type[drive];
+ if(!outparam)
+ return -ENODEV;
+ break;
+
+ case FDMSGON:
+ UDP->flags |= FTD_MSG;
+ return 0;
+ case FDMSGOFF:
+ UDP->flags &= ~FTD_MSG;
+ return 0;
+
+ case FDFMTBEG:
+ LOCK_FDC(drive,1);
+ CALL(poll_drive(1, FD_RAW_NEED_DISK));
+ ret = UDRS->flags;
+ process_fd_request();
+ if(ret & FD_VERIFY)
+ return -ENODEV;
+ if(!(ret & FD_DISK_WRITABLE))
+ return -EROFS;
+ return 0;
+ case FDFMTTRK:
+ if (UDRS->fd_ref != 1)
+ return -EBUSY;
+ return do_format(device, &inparam.f);
+ case FDFMTEND:
+ case FDFLUSH:
+ LOCK_FDC(drive,1);
+ return invalidate_drive(device);
+
+ case FDSETEMSGTRESH:
+ UDP->max_errors.reporting =
+ (unsigned short) (param & 0x0f);
+ return 0;
+ OUT(FDGETMAXERRS, &UDP->max_errors);
+ IN(FDSETMAXERRS, &UDP->max_errors, max_errors);
+
+ case FDGETDRVTYP:
+ outparam = drive_name(type,drive);
+ SUPBOUND(size,strlen(outparam)+1);
+ break;
+
+ IN(FDSETDRVPRM, UDP, dp);
+ OUT(FDGETDRVPRM, UDP);
+
+ case FDPOLLDRVSTAT:
+ LOCK_FDC(drive,1);
+ CALL(poll_drive(1, FD_RAW_NEED_DISK));
+ process_fd_request();
+ /* fall through */
+ OUT(FDGETDRVSTAT, UDRS);
+
+ case FDRESET:
+ return user_reset_fdc(drive, (int)param, 1);
+
+ OUT(FDGETFDCSTAT,UFDCS);
+
+ case FDWERRORCLR:
+ CLEARSTRUCT(UDRWE);
+ return 0;
+ OUT(FDWERRORGET,UDRWE);
+
+ case OLDFDRAWCMD:
+ case FDRAWCMD:
+ if (type)
+ return -EINVAL;
+ LOCK_FDC(drive,1);
+ set_floppy(device);
+ CALL(i = raw_cmd_ioctl(cmd,(void *) param));
+ process_fd_request();
+ return i;
+
+ case FDTWADDLE:
+ LOCK_FDC(drive,1);
+ twaddle();
+ process_fd_request();
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ return fd_copyout((void *)param, outparam, size);
+ else
+ return 0;
+#undef IOCTL_ALLOWED
+#undef OUT
+#undef IN
+}
+
+static void config_types(void)
+{
+ int first=1;
+ int drive;
+
+ /* read drive info out of physical CMOS */
+ drive=0;
+ if (!UDP->cmos)
+ UDP->cmos= FLOPPY0_TYPE;
+ drive=1;
+ if (!UDP->cmos && FLOPPY1_TYPE)
+ UDP->cmos = FLOPPY1_TYPE;
+
+ /* XXX */
+ /* additional physical CMOS drive detection should go here */
+
+ for (drive=0; drive < N_DRIVE; drive++){
+ if (UDP->cmos >= 16)
+ UDP->cmos = 0;
+ if (UDP->cmos >= 0 && UDP->cmos <= NUMBER(default_drive_params))
+ memcpy((char *) UDP,
+ (char *) (&default_drive_params[(int)UDP->cmos].params),
+ sizeof(struct floppy_drive_params));
+ if (UDP->cmos){
+ if (first)
+ printk(KERN_INFO "Floppy drive(s): ");
+ else
+ printk(", ");
+ first=0;
+ if (UDP->cmos > 0){
+ allowed_drive_mask |= 1 << drive;
+ printk("fd%d is %s", drive,
+ default_drive_params[(int)UDP->cmos].name);
+ } else
+ printk("fd%d is unknown type %d",drive,
+ UDP->cmos);
+ }
+ else
+ allowed_drive_mask &= ~(1 << drive);
+ }
+ if (!first)
+ printk("\n");
+}
+
+static int floppy_read(struct inode * inode, struct file * filp,
+ char * buf, int count)
+{
+ int drive = DRIVE(inode->i_rdev);
+
+ check_disk_change(inode->i_rdev);
+ if (UTESTF(FD_DISK_CHANGED))
+ return -ENXIO;
+ return block_read(inode, filp, buf, count);
+}
+
+static int floppy_write(struct inode * inode, struct file * filp,
+ const char * buf, int count)
+{
+ int block;
+ int ret;
+ int drive = DRIVE(inode->i_rdev);
+
+ if (!UDRS->maxblock)
+ UDRS->maxblock=1;/* make change detectable */
+ check_disk_change(inode->i_rdev);
+ if (UTESTF(FD_DISK_CHANGED))
+ return -ENXIO;
+ if (!UTESTF(FD_DISK_WRITABLE))
+ return -EROFS;
+ block = (filp->f_pos + count) >> 9;
+ INFBOUND(UDRS->maxblock, block);
+ ret= block_write(inode, filp, buf, count);
+ return ret;
+}
+
+static void floppy_release(struct inode * inode, struct file * filp)
+{
+ int drive;
+
+ drive = DRIVE(inode->i_rdev);
+
+ if (!filp || (filp->f_mode & (2 | OPEN_WRITE_BIT)))
+ /* if the file is mounted OR (writable now AND writable at
+ * open time) Linus: Does this cover all cases? */
+ block_fsync(inode,filp);
+
+ if (UDRS->fd_ref < 0)
+ UDRS->fd_ref=0;
+ else if (!UDRS->fd_ref--) {
+ DPRINT("floppy_release with fd_ref == 0");
+ UDRS->fd_ref = 0;
+ }
+ floppy_release_irq_and_dma();
+}
+
+/*
+ * floppy_open check for aliasing (/dev/fd0 can be the same as
+ * /dev/PS0 etc), and disallows simultaneous access to the same
+ * drive with different device numbers.
+ */
+#define RETERR(x) do{floppy_release(inode,filp); return -(x);}while(0)
+
+static int floppy_open(struct inode * inode, struct file * filp)
+{
+ int drive;
+ int old_dev;
+ int try;
+ char *tmp;
+
+ if (!filp) {
+ DPRINT("Weird, open called with filp=0\n");
+ return -EIO;
+ }
+
+ drive = DRIVE(inode->i_rdev);
+ if (drive >= N_DRIVE ||
+ !(allowed_drive_mask & (1 << drive)) ||
+ fdc_state[FDC(drive)].version == FDC_NONE)
+ return -ENXIO;
+
+ if (TYPE(inode->i_rdev) >= NUMBER(floppy_type))
+ return -ENXIO;
+ old_dev = UDRS->fd_device;
+ if (UDRS->fd_ref && old_dev != MINOR(inode->i_rdev))
+ return -EBUSY;
+
+ if (!UDRS->fd_ref && (UDP->flags & FD_BROKEN_DCL)){
+ USETF(FD_DISK_CHANGED);
+ USETF(FD_VERIFY);
+ }
+
+ if (UDRS->fd_ref == -1 ||
+ (UDRS->fd_ref && (filp->f_flags & O_EXCL)))
+ return -EBUSY;
+
+ if (floppy_grab_irq_and_dma())
+ return -EBUSY;
+
+ if (filp->f_flags & O_EXCL)
+ UDRS->fd_ref = -1;
+ else
+ UDRS->fd_ref++;
+
+ if (!floppy_track_buffer){
+ /* if opening an ED drive, reserve a big buffer,
+ * else reserve a small one */
+ if ((UDP->cmos == 6) || (UDP->cmos == 5))
+ try = 64; /* Only 48 actually useful */
+ else
+ try = 32; /* Only 24 actually useful */
+
+ tmp=(char *)fd_dma_mem_alloc(1024 * try);
+ if (!tmp) {
+ try >>= 1; /* buffer only one side */
+ INFBOUND(try, 16);
+ tmp= (char *)fd_dma_mem_alloc(1024*try);
+ }
+ if (!tmp) {
+ DPRINT("Unable to allocate DMA memory\n");
+ RETERR(ENXIO);
+ }
+ if (floppy_track_buffer)
+ fd_dma_mem_free((unsigned long)tmp,try*1024);
+ else {
+ buffer_min = buffer_max = -1;
+ floppy_track_buffer = tmp;
+ max_buffer_sectors = try;
+ }
+ }
+
+ UDRS->fd_device = MINOR(inode->i_rdev);
+ if (old_dev != -1 && old_dev != MINOR(inode->i_rdev)) {
+ if (buffer_drive == drive)
+ buffer_track = -1;
+ invalidate_buffers(MKDEV(FLOPPY_MAJOR,old_dev));
+ }
+
+ /* Allow ioctls if we have write-permissions even if read-only open */
+ if ((filp->f_mode & 2) || (permission(inode,2) == 0))
+ filp->f_mode |= IOCTL_MODE_BIT;
+ if (filp->f_mode & 2)
+ filp->f_mode |= OPEN_WRITE_BIT;
+
+ if (UFDCS->rawcmd == 1)
+ UFDCS->rawcmd = 2;
+
+ if (filp->f_flags & O_NDELAY)
+ return 0;
+ if (filp->f_mode & 3) {
+ UDRS->last_checked = 0;
+ check_disk_change(inode->i_rdev);
+ if (UTESTF(FD_DISK_CHANGED))
+ RETERR(ENXIO);
+ }
+ if ((filp->f_mode & 2) && !(UTESTF(FD_DISK_WRITABLE)))
+ RETERR(EROFS);
+ return 0;
+#undef RETERR
+}
+
+/*
+ * Check if the disk has been changed or if a change has been faked.
+ */
+static int check_floppy_change(kdev_t dev)
+{
+ int drive = DRIVE(dev);
+
+ if (MAJOR(dev) != MAJOR_NR) {
+ DPRINT("check_floppy_change: not a floppy\n");
+ return 0;
+ }
+
+ if (UTESTF(FD_DISK_CHANGED) || UTESTF(FD_VERIFY))
+ return 1;
+
+ if (UDP->checkfreq < jiffies - UDRS->last_checked){
+ lock_fdc(drive,0);
+ poll_drive(0,0);
+ process_fd_request();
+ }
+
+ if (UTESTF(FD_DISK_CHANGED) ||
+ UTESTF(FD_VERIFY) ||
+ test_bit(drive, &fake_change) ||
+ (!TYPE(dev) && !current_type[drive]))
+ return 1;
+ return 0;
+}
+
+/* revalidate the floppy disk, i.e. trigger format autodetection by reading
+ * the bootblock (block 0). "Autodetection" is also needed to check whether
+ * there is a disk in the drive at all... Thus we also do it for fixed
+ * geometry formats */
+static int floppy_revalidate(kdev_t dev)
+{
+#define NO_GEOM (!current_type[drive] && !TYPE(dev))
+ struct buffer_head * bh;
+ int drive=DRIVE(dev);
+ int cf;
+
+ if (UTESTF(FD_DISK_CHANGED) ||
+ UTESTF(FD_VERIFY) ||
+ test_bit(drive, &fake_change) ||
+ NO_GEOM){
+ lock_fdc(drive,0);
+ cf = UTESTF(FD_DISK_CHANGED) || UTESTF(FD_VERIFY);
+ if (!(cf || test_bit(drive, &fake_change) || NO_GEOM)){
+ process_fd_request(); /*already done by another thread*/
+ return 0;
+ }
+ UDRS->maxblock = 0;
+ UDRS->maxtrack = 0;
+ if (buffer_drive == drive)
+ buffer_track = -1;
+ clear_bit(drive, &fake_change);
+ UCLEARF(FD_DISK_CHANGED);
+ if (cf)
+ UDRS->generation++;
+ if (NO_GEOM){
+ /* auto-sensing */
+ int size = floppy_blocksizes[MINOR(dev)];
+ if (!size)
+ size = 1024;
+ if (!(bh = getblk(dev,0,size))){
+ process_fd_request();
+ return 1;
+ }
+ if (bh && !buffer_uptodate(bh))
+ ll_rw_block(READ, 1, &bh, 1);
+ process_fd_request();
+ wait_on_buffer(bh);
+ brelse(bh);
+ return 0;
+ }
+ if (cf)
+ poll_drive(0, FD_RAW_NEED_DISK);
+ process_fd_request();
+ }
+ return 0;
+}
+
+static struct file_operations floppy_fops = {
+ NULL, /* lseek - default */
+ floppy_read, /* read - general block-dev read */
+ floppy_write, /* write - general block-dev write */
+ NULL, /* readdir - bad */
+ NULL, /* select */
+ fd_ioctl, /* ioctl */
+ NULL, /* mmap */
+ floppy_open, /* open */
+ floppy_release, /* release */
+ block_fsync, /* fsync */
+ NULL, /* fasync */
+ check_floppy_change, /* media_change */
+ floppy_revalidate, /* revalidate */
+};
+
+/*
+ * Floppy Driver initialization
+ * =============================
+ */
+
+/* Determine the floppy disk controller type */
+/* This routine was written by David C. Niemi */
+static char get_fdc_version(void)
+{
+ int r;
+
+ output_byte(FD_DUMPREGS); /* 82072 and better know DUMPREGS */
+ if (FDCS->reset)
+ return FDC_NONE;
+ if ((r = result()) <= 0x00)
+ return FDC_NONE; /* No FDC present ??? */
+ if ((r==1) && (reply_buffer[0] == 0x80)){
+ printk(KERN_INFO "FDC %d is an 8272A\n",fdc);
+ return FDC_8272A; /* 8272a/765 don't know DUMPREGS */
+ }
+ if (r != 10) {
+ printk("FDC %d init: DUMPREGS: unexpected return of %d bytes.\n",
+ fdc, r);
+ return FDC_UNKNOWN;
+ }
+
+ if(!fdc_configure()) {
+ printk(KERN_INFO "FDC %d is an 82072\n",fdc);
+ return FDC_82072; /* 82072 doesn't know CONFIGURE */
+ }
+
+ output_byte(FD_PERPENDICULAR);
+ if(need_more_output() == MORE_OUTPUT) {
+ output_byte(0);
+ } else {
+ printk(KERN_INFO "FDC %d is an 82072A\n", fdc);
+ return FDC_82072A; /* 82072A as found on Sparcs. */
+ }
+
+ output_byte(FD_UNLOCK);
+ r = result();
+ if ((r == 1) && (reply_buffer[0] == 0x80)){
+ printk(KERN_INFO "FDC %d is a pre-1991 82077\n", fdc);
+ return FDC_82077_ORIG; /* Pre-1991 82077, doesn't know
+ * LOCK/UNLOCK */
+ }
+ if ((r != 1) || (reply_buffer[0] != 0x00)) {
+ printk("FDC %d init: UNLOCK: unexpected return of %d bytes.\n",
+ fdc, r);
+ return FDC_UNKNOWN;
+ }
+ output_byte(FD_PARTID);
+ r = result();
+ if (r != 1) {
+ printk("FDC %d init: PARTID: unexpected return of %d bytes.\n",
+ fdc, r);
+ return FDC_UNKNOWN;
+ }
+ if (reply_buffer[0] == 0x80) {
+ printk(KERN_INFO "FDC %d is a post-1991 82077\n",fdc);
+ return FDC_82077; /* Revised 82077AA passes all the tests */
+ }
+ switch (reply_buffer[0] >> 5) {
+ case 0x0:
+ /* Either a 82078-1 or a 82078SL running at 5Volt */
+ printk(KERN_INFO "FDC %d is an 82078.\n",fdc);
+ return FDC_82078;
+ case 0x1:
+ printk(KERN_INFO "FDC %d is a 44pin 82078\n",fdc);
+ return FDC_82078;
+ case 0x2:
+ printk(KERN_INFO "FDC %d is a S82078B\n", fdc);
+ return FDC_S82078B;
+ case 0x3:
+ printk(KERN_INFO "FDC %d is a National Semiconductor PC87306\n", fdc);
+ return FDC_87306;
+ default:
+ printk(KERN_INFO "FDC %d init: 82078 variant with unknown PARTID=%d.\n",
+ fdc, reply_buffer[0] >> 5);
+ return FDC_82078_UNKN;
+ }
+} /* get_fdc_version */
+
+/* lilo configuration */
+
+/* we make the invert_dcl function global. One day, somebody might
+ * want to centralize all thinkpad related options into one lilo option,
+ * there are just so many thinkpad related quirks! */
+void floppy_invert_dcl(int *ints,int param)
+{
+ int i;
+
+ for (i=0; i < ARRAY_SIZE(default_drive_params); i++){
+ if (param)
+ default_drive_params[i].params.flags |= 0x80;
+ else
+ default_drive_params[i].params.flags &= ~0x80;
+ }
+ DPRINT("Configuring drives for inverted dcl\n");
+}
+
+static void daring(int *ints,int param)
+{
+ int i;
+
+ for (i=0; i < ARRAY_SIZE(default_drive_params); i++){
+ if (param){
+ default_drive_params[i].params.select_delay = 0;
+ default_drive_params[i].params.flags |= FD_SILENT_DCL_CLEAR;
+ } else {
+ default_drive_params[i].params.select_delay = 2*HZ/100;
+ default_drive_params[i].params.flags &= ~FD_SILENT_DCL_CLEAR;
+ }
+ }
+ DPRINT("Assuming %s floppy hardware\n", param ? "standard" : "broken");
+}
+
+static void set_cmos(int *ints, int dummy)
+{
+ int current_drive=0;
+
+ if (ints[0] != 2){
+ DPRINT("wrong number of parameter for cmos\n");
+ return;
+ }
+ current_drive = ints[1];
+ if (current_drive < 0 || current_drive >= 8){
+ DPRINT("bad drive for set_cmos\n");
+ return;
+ }
+ if (current_drive >= 4 && !FDC2)
+ FDC2 = 0x370;
+ if (ints[2] <= 0 ||
+ (ints[2] >= NUMBER(default_drive_params) && ints[2] != 16)){
+ DPRINT("bad cmos code %d\n", ints[2]);
+ return;
+ }
+ DP->cmos = ints[2];
+ DPRINT("setting cmos code to %d\n", ints[2]);
+}
+
+static struct param_table {
+ const char *name;
+ void (*fn)(int *ints, int param);
+ int *var;
+ int def_param;
+} config_params[]={
+ { "allowed_drive_mask", 0, &allowed_drive_mask, 0xff },
+ { "all_drives", 0, &allowed_drive_mask, 0xff },
+ { "asus_pci", 0, &allowed_drive_mask, 0x33 },
+
+ { "daring", daring, 0, 1},
+
+ { "two_fdc", 0, &FDC2, 0x370 },
+ { "one_fdc", 0, &FDC2, 0 },
+
+ { "thinkpad", floppy_invert_dcl, 0, 1 },
+
+ { "nodma", 0, &use_virtual_dma, 1 },
+ { "omnibook", 0, &use_virtual_dma, 1 },
+ { "dma", 0, &use_virtual_dma, 0 },
+
+ { "fifo_depth", 0, &fifo_depth, 0xa },
+ { "nofifo", 0, &no_fifo, 0x20 },
+ { "usefifo", 0, &no_fifo, 0 },
+
+ { "cmos", set_cmos, 0, 0 },
+
+ { "unexpected_interrupts", 0, &print_unex, 1 },
+ { "no_unexpected_interrupts", 0, &print_unex, 0 },
+ { "L40SX", 0, &print_unex, 0 } };
+
+#define FLOPPY_SETUP
+void floppy_setup(char *str, int *ints)
+{
+ int i;
+ int param;
+ if (str)
+ for (i=0; i< ARRAY_SIZE(config_params); i++){
+ if (strcmp(str,config_params[i].name) == 0){
+ if (ints[0])
+ param = ints[1];
+ else
+ param = config_params[i].def_param;
+ if(config_params[i].fn)
+ config_params[i].fn(ints,param);
+ if(config_params[i].var) {
+ DPRINT("%s=%d\n", str, param);
+ *config_params[i].var = param;
+ }
+ return;
+ }
+ }
+ if (str) {
+ DPRINT("unknown floppy option [%s]\n", str);
+
+ DPRINT("allowed options are:");
+ for (i=0; i< ARRAY_SIZE(config_params); i++)
+ printk(" %s",config_params[i].name);
+ printk("\n");
+ } else
+ DPRINT("botched floppy option\n");
+ DPRINT("Read linux/drivers/block/README.fd\n");
+}
+
+int floppy_init(void)
+{
+ int i,unit,drive;
+ int have_no_fdc= -EIO;
+
+ raw_cmd = 0;
+
+ if (register_blkdev(MAJOR_NR,"fd",&floppy_fops)) {
+ printk("Unable to get major %d for floppy\n",MAJOR_NR);
+ return -EBUSY;
+ }
+
+ for (i=0; i<256; i++)
+ if (ITYPE(i))
+ floppy_sizes[i] = floppy_type[ITYPE(i)].size >> 1;
+ else
+ floppy_sizes[i] = MAX_DISK_SIZE;
+
+ blk_size[MAJOR_NR] = floppy_sizes;
+ blksize_size[MAJOR_NR] = floppy_blocksizes;
+ blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ reschedule_timeout(MAXTIMEOUT, "floppy init", MAXTIMEOUT);
+ config_types();
+
+ for (i = 0; i < N_FDC; i++) {
+ fdc = i;
+ CLEARSTRUCT(FDCS);
+ FDCS->dtr = -1;
+ FDCS->dor = 0x4;
+#ifdef __sparc__
+ /*sparcs don't have a DOR reset which we can fall back on to*/
+ FDCS->version = FDC_82072A;
+#endif
+ }
+
+ fdc_state[0].address = FDC1;
+#if N_FDC > 1
+ fdc_state[1].address = FDC2;
+#endif
+
+ if (floppy_grab_irq_and_dma()){
+ del_timer(&fd_timeout);
+ blk_dev[MAJOR_NR].request_fn = NULL;
+ unregister_blkdev(MAJOR_NR,"fd");
+ return -EBUSY;
+ }
+
+ /* initialise drive state */
+ for (drive = 0; drive < N_DRIVE; drive++) {
+ CLEARSTRUCT(UDRS);
+ CLEARSTRUCT(UDRWE);
+ UDRS->flags = FD_VERIFY | FD_DISK_NEWCHANGE | FD_DISK_CHANGED;
+ UDRS->fd_device = -1;
+ floppy_track_buffer = NULL;
+ max_buffer_sectors = 0;
+ }
+
+ for (i = 0; i < N_FDC; i++) {
+ fdc = i;
+ FDCS->driver_version = FD_DRIVER_VERSION;
+ for (unit=0; unit<4; unit++)
+ FDCS->track[unit] = 0;
+ if (FDCS->address == -1)
+ continue;
+ FDCS->rawcmd = 2;
+ if (user_reset_fdc(-1,FD_RESET_ALWAYS,0)){
+ FDCS->address = -1;
+ FDCS->version = FDC_NONE;
+ continue;
+ }
+ /* Try to determine the floppy controller type */
+ FDCS->version = get_fdc_version();
+ if (FDCS->version == FDC_NONE){
+ FDCS->address = -1;
+ continue;
+ }
+
+ request_region(FDCS->address, 6, "floppy");
+ request_region(FDCS->address+7, 1, "floppy DIR");
+ /* address + 6 is reserved, and may be taken by IDE.
+ * Unfortunately, Adaptec doesn't know this :-(, */
+
+ have_no_fdc = 0;
+ /* Not all FDCs seem to be able to handle the version command
+ * properly, so force a reset for the standard FDC clones,
+ * to avoid interrupt garbage.
+ */
+ user_reset_fdc(-1,FD_RESET_ALWAYS,0);
+ }
+ fdc=0;
+ del_timer(&fd_timeout);
+ current_drive = 0;
+ floppy_release_irq_and_dma();
+ initialising=0;
+ if (have_no_fdc) {
+ DPRINT("no floppy controllers found\n");
+ request_tq.routine = (void *)(void *) empty;
+ /*
+ * When we return we may be unloaded. This little
+ * trick forces the immediate_bh handler to have run
+ * before we unload it, lest we cause bad things.
+ */
+ mark_bh(IMMEDIATE_BH);
+ schedule();
+ if (usage_count)
+ floppy_release_irq_and_dma();
+ blk_dev[MAJOR_NR].request_fn = NULL;
+ unregister_blkdev(MAJOR_NR,"fd");
+ }
+ return have_no_fdc;
+}
+
+static int floppy_grab_irq_and_dma(void)
+{
+ int i;
+ unsigned long flags;
+
+ INT_OFF;
+ if (usage_count++){
+ INT_ON;
+ return 0;
+ }
+ INT_ON;
+ MOD_INC_USE_COUNT;
+ for (i=0; i< N_FDC; i++){
+ if (fdc_state[i].address != -1){
+ fdc = i;
+ reset_fdc_info(1);
+ fd_outb(FDCS->dor, FD_DOR);
+ }
+ }
+ fdc = 0;
+ set_dor(0, ~0, 8); /* avoid immediate interrupt */
+
+ if (fd_request_irq()) {
+ DPRINT("Unable to grab IRQ%d for the floppy driver\n",
+ FLOPPY_IRQ);
+ MOD_DEC_USE_COUNT;
+ usage_count--;
+ return -1;
+ }
+ if (fd_request_dma()) {
+ DPRINT("Unable to grab DMA%d for the floppy driver\n",
+ FLOPPY_DMA);
+ fd_free_irq();
+ MOD_DEC_USE_COUNT;
+ usage_count--;
+ return -1;
+ }
+ for (fdc = 0; fdc < N_FDC; fdc++)
+ if (FDCS->address != -1)
+ fd_outb(FDCS->dor, FD_DOR);
+ fdc = 0;
+ fd_enable_irq();
+ irqdma_allocated=1;
+ return 0;
+}
+
+static void floppy_release_irq_and_dma(void)
+{
+#ifdef FLOPPY_SANITY_CHECK
+ int drive;
+#endif
+ long tmpsize;
+ unsigned long tmpaddr;
+ unsigned long flags;
+
+ INT_OFF;
+ if (--usage_count){
+ INT_ON;
+ return;
+ }
+ INT_ON;
+ if(irqdma_allocated)
+ {
+ fd_disable_dma();
+ fd_free_dma();
+ fd_disable_irq();
+ fd_free_irq();
+ irqdma_allocated=0;
+ }
+
+ set_dor(0, ~0, 8);
+#if N_FDC > 1
+ set_dor(1, ~8, 0);
+#endif
+ floppy_enable_hlt();
+
+ if (floppy_track_buffer && max_buffer_sectors) {
+ tmpsize = max_buffer_sectors*1024;
+ tmpaddr = (unsigned long)floppy_track_buffer;
+ floppy_track_buffer = 0;
+ max_buffer_sectors = 0;
+ buffer_min = buffer_max = -1;
+ fd_dma_mem_free(tmpaddr, tmpsize);
+ }
+
+#ifdef FLOPPY_SANITY_CHECK
+#ifndef __sparc__
+ for (drive=0; drive < N_FDC * 4; drive++)
+ if (motor_off_timer[drive].next)
+ printk("motor off timer %d still active\n", drive);
+#endif
+
+ if (fd_timeout.next)
+ printk("floppy timer still active:%s\n", timeout_message);
+ if (fd_timer.next)
+ printk("auxiliary floppy timer still active\n");
+ if (floppy_tq.sync)
+ printk("task queue still active\n");
+#endif
+ MOD_DEC_USE_COUNT;
+}
+
+
+#ifdef MODULE
+
+char *floppy=NULL;
+
+static void parse_floppy_cfg_string(char *cfg)
+{
+ char *ptr;
+ int ints[11];
+
+ while(*cfg) {
+ for(ptr = cfg;*cfg && *cfg != ' ' && *cfg != '\t'; cfg++);
+ if(*cfg) {
+ *cfg = '\0';
+ cfg++;
+ }
+ if(*ptr)
+ floppy_setup(get_options(ptr,ints),ints);
+ }
+}
+
+static void mod_setup(char *pattern, void (*setup)(char *, int *))
+{
+ unsigned long i;
+ char c;
+ int j;
+ int match;
+ char buffer[100];
+ int ints[11];
+ int length = strlen(pattern)+1;
+
+ match=0;
+ j=1;
+
+ for (i=current->mm->env_start; i< current->mm->env_end; i ++){
+ c= get_fs_byte(i);
+ if (match){
+ if (j==99)
+ c='\0';
+ buffer[j] = c;
+ if (!c || c == ' ' || c == '\t'){
+ if (j){
+ buffer[j] = '\0';
+ setup(get_options(buffer,ints),ints);
+ }
+ j=0;
+ } else
+ j++;
+ if (!c)
+ break;
+ continue;
+ }
+ if ((!j && !c) || (j && c == pattern[j-1]))
+ j++;
+ else
+ j=0;
+ if (j==length){
+ match=1;
+ j=0;
+ }
+ }
+}
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+int init_module(void)
+{
+ printk(KERN_INFO "inserting floppy driver for %s\n", kernel_version);
+
+ if(floppy)
+ parse_floppy_cfg_string(floppy);
+ else
+ mod_setup("floppy=", floppy_setup);
+
+ return floppy_init();
+}
+
+void cleanup_module(void)
+{
+ int fdc, dummy;
+
+ for (fdc=0; fdc<2; fdc++)
+ if (FDCS->address != -1){
+ release_region(FDCS->address, 6);
+ release_region(FDCS->address+7, 1);
+ }
+
+ unregister_blkdev(MAJOR_NR, "fd");
+
+ blk_dev[MAJOR_NR].request_fn = 0;
+ /* eject disk, if any */
+ dummy = fd_eject(0);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#else
+/* eject the boot floppy (if we need the drive for a different root floppy) */
+/* This should only be called at boot time when we're sure that there's no
+ * resource contention. */
+void floppy_eject(void)
+{
+ if(floppy_grab_irq_and_dma()==0)
+ {
+ lock_fdc(MAXTIMEOUT,0);
+ fd_eject(0);
+ process_fd_request();
+ floppy_release_irq_and_dma();
+ }
+}
+#endif
diff --git a/linux/dev/drivers/block/genhd.c b/linux/dev/drivers/block/genhd.c
new file mode 100644
index 0000000..903135c
--- /dev/null
+++ b/linux/dev/drivers/block/genhd.c
@@ -0,0 +1,1080 @@
+/*
+ * Code extracted from
+ * linux/kernel/hd.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ *
+ * Thanks to Branko Lankester, lankeste@fwi.uva.nl, who found a bug
+ * in the early extended-partition checks and added DM partitions
+ *
+ * Support for DiskManager v6.0x added by Mark Lord,
+ * with information provided by OnTrack. This now works for linux fdisk
+ * and LILO, as well as loadlin and bootln. Note that disks other than
+ * /dev/hda *must* have a "DOS" type 0x51 partition in the first slot (hda1).
+ *
+ * More flexible handling of extended partitions - aeb, 950831
+ *
+ * Check partition table on IDE disks for common CHS translations
+ */
+
+#include <linux/config.h>
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#ifdef CONFIG_BLK_DEV_INITRD
+#include <linux/blk.h>
+#endif
+#include <linux/hdreg.h>
+#include <alloca.h>
+#ifdef CONFIG_GPT_DISKLABEL
+#include <linux/blkdev.h>
+#include <kern/kalloc.h>
+#include <stddef.h>
+#endif
+
+#include <asm/system.h>
+
+/*
+ * Many architectures don't like unaligned accesses, which is
+ * frequently the case with the nr_sects and start_sect partition
+ * table entries.
+ */
+#include <asm/unaligned.h>
+
+#ifdef MACH
+#include <machine/spl.h>
+#include <linux/dev/glue/glue.h>
+#endif
+
+#define SYS_IND(p) get_unaligned(&p->sys_ind)
+#define NR_SECTS(p) get_unaligned(&p->nr_sects)
+#define START_SECT(p) get_unaligned(&p->start_sect)
+
+
+struct gendisk *gendisk_head = NULL;
+
+static int current_minor = 0;
+extern int *blk_size[];
+extern void rd_load(void);
+extern void initrd_load(void);
+
+extern int chr_dev_init(void);
+extern int blk_dev_init(void);
+extern int scsi_dev_init(void);
+extern int net_dev_init(void);
+
+/*
+ * disk_name() is used by genhd.c and md.c.
+ * It formats the devicename of the indicated disk
+ * into the supplied buffer, and returns a pointer
+ * to that same buffer (for convenience).
+ */
+char *disk_name (struct gendisk *hd, int minor, char *buf)
+{
+ unsigned int part;
+ const char *maj = hd->major_name;
+#ifdef MACH
+ char unit = (minor >> hd->minor_shift) + '0';
+#else
+ char unit = (minor >> hd->minor_shift) + 'a';
+#endif
+
+#ifdef CONFIG_BLK_DEV_IDE
+ /*
+ * IDE devices use multiple major numbers, but the drives
+ * are named as: {hda,hdb}, {hdc,hdd}, {hde,hdf}, {hdg,hdh}..
+ * This requires special handling here.
+ */
+ switch (hd->major) {
+ case IDE3_MAJOR:
+ unit += 2;
+ case IDE2_MAJOR:
+ unit += 2;
+ case IDE1_MAJOR:
+ unit += 2;
+ case IDE0_MAJOR:
+ maj = "hd";
+ }
+#endif
+ part = minor & ((1 << hd->minor_shift) - 1);
+ if (part)
+#ifdef MACH
+ sprintf(buf, "%s%cs%d", maj, unit, part);
+#else
+ sprintf(buf, "%s%c%d", maj, unit, part);
+#endif
+ else
+ sprintf(buf, "%s%c", maj, unit);
+ return buf;
+}
+
+static void add_partition (struct gendisk *hd, int minor, int start, int size)
+{
+ char buf[8];
+ hd->part[minor].start_sect = start;
+ hd->part[minor].nr_sects = size;
+ printk(" %s", disk_name(hd, minor, buf));
+}
+
+#if defined (MACH) && defined (CONFIG_BSD_DISKLABEL)
+static int mach_minor;
+static void
+add_bsd_partition (struct gendisk *hd, int minor, int slice,
+ int start, int size)
+{
+ char buf[16];
+ hd->part[minor].start_sect = start;
+ hd->part[minor].nr_sects = size;
+ printk (" %s%c", disk_name (hd, mach_minor, buf), slice);
+}
+#endif
+
+static inline int is_extended_partition(struct partition *p)
+{
+ return (SYS_IND(p) == DOS_EXTENDED_PARTITION ||
+ SYS_IND(p) == WIN98_EXTENDED_PARTITION ||
+ SYS_IND(p) == LINUX_EXTENDED_PARTITION);
+}
+
+#ifdef CONFIG_MSDOS_PARTITION
+/*
+ * Create devices for each logical partition in an extended partition.
+ * The logical partitions form a linked list, with each entry being
+ * a partition table with two entries. The first entry
+ * is the real data partition (with a start relative to the partition
+ * table start). The second is a pointer to the next logical partition
+ * (with a start relative to the entire extended partition).
+ * We do not create a Linux partition for the partition tables, but
+ * only for the actual data partitions.
+ */
+
+static void extended_partition(struct gendisk *hd, kdev_t dev)
+{
+ struct buffer_head *bh;
+ struct partition *p;
+ unsigned long first_sector, first_size, this_sector, this_size;
+ int mask = (1 << hd->minor_shift) - 1;
+ int i;
+
+ first_sector = hd->part[MINOR(dev)].start_sect;
+ first_size = hd->part[MINOR(dev)].nr_sects;
+ this_sector = first_sector;
+
+ while (1) {
+ if ((current_minor & mask) == 0)
+ return;
+ if (!(bh = bread(dev,0,1024)))
+ return;
+ /*
+ * This block is from a device that we're about to stomp on.
+ * So make sure nobody thinks this block is usable.
+ */
+ bh->b_state = 0;
+
+ if (*(unsigned short *) (bh->b_data+510) != 0xAA55)
+ goto done;
+
+ p = (struct partition *) (0x1BE + bh->b_data);
+
+ this_size = hd->part[MINOR(dev)].nr_sects;
+
+ /*
+ * Usually, the first entry is the real data partition,
+ * the 2nd entry is the next extended partition, or empty,
+ * and the 3rd and 4th entries are unused.
+ * However, DRDOS sometimes has the extended partition as
+ * the first entry (when the data partition is empty),
+ * and OS/2 seems to use all four entries.
+ */
+
+ /*
+ * First process the data partition(s)
+ */
+ for (i=0; i<4; i++, p++) {
+ if (!NR_SECTS(p) || is_extended_partition(p))
+ continue;
+
+ /* Check the 3rd and 4th entries -
+ these sometimes contain random garbage */
+ if (i >= 2
+ && START_SECT(p) + NR_SECTS(p) > this_size
+ && (this_sector + START_SECT(p) < first_sector ||
+ this_sector + START_SECT(p) + NR_SECTS(p) >
+ first_sector + first_size))
+ continue;
+
+ add_partition(hd, current_minor, this_sector+START_SECT(p), NR_SECTS(p));
+ current_minor++;
+ if ((current_minor & mask) == 0)
+ goto done;
+ }
+ /*
+ * Next, process the (first) extended partition, if present.
+ * (So far, there seems to be no reason to make
+ * extended_partition() recursive and allow a tree
+ * of extended partitions.)
+ * It should be a link to the next logical partition.
+ * Create a minor for this just long enough to get the next
+ * partition table. The minor will be reused for the next
+ * data partition.
+ */
+ p -= 4;
+ for (i=0; i<4; i++, p++)
+ if(NR_SECTS(p) && is_extended_partition(p))
+ break;
+ if (i == 4)
+ goto done; /* nothing left to do */
+
+ hd->part[current_minor].nr_sects = NR_SECTS(p);
+ hd->part[current_minor].start_sect = first_sector + START_SECT(p);
+ this_sector = first_sector + START_SECT(p);
+ dev = MKDEV(hd->major, current_minor);
+ brelse(bh);
+ }
+done:
+ brelse(bh);
+}
+
+#ifdef CONFIG_BSD_DISKLABEL
+/*
+ * Create devices for BSD partitions listed in a disklabel, under a
+ * dos-like partition. See extended_partition() for more information.
+ */
+static void bsd_disklabel_partition(struct gendisk *hd, kdev_t dev)
+{
+ struct buffer_head *bh;
+ struct bsd_disklabel *l;
+ struct bsd_partition *p;
+ int mask = (1 << hd->minor_shift) - 1;
+
+ if (!(bh = bread(dev,0,1024)))
+ return;
+ bh->b_state = 0;
+ l = (struct bsd_disklabel *) (bh->b_data+512);
+ if (l->d_magic != BSD_DISKMAGIC) {
+ brelse(bh);
+ return;
+ }
+
+ p = &l->d_partitions[0];
+ while (p - &l->d_partitions[0] <= BSD_MAXPARTITIONS) {
+ if ((current_minor & mask) >= (4 + hd->max_p))
+ break;
+
+ if (p->p_fstype != BSD_FS_UNUSED) {
+#ifdef MACH
+ add_bsd_partition (hd, current_minor,
+ p - &l->d_partitions[0] + 'a',
+ p->p_offset, p->p_size);
+#else
+ add_partition(hd, current_minor, p->p_offset, p->p_size);
+#endif
+ current_minor++;
+ }
+ p++;
+ }
+ brelse(bh);
+
+}
+#endif
+
+#ifdef CONFIG_GPT_DISKLABEL
+/*
+ * Compute a CRC32 but treat some range as if it were zeros.
+ *
+ * Straight copy of ether_crc_le() from linux/pcmcia-cs/include/linux/crc32.h, except for the first if/else
+ */
+static inline unsigned ether_crc_le_hole(int length, unsigned char *data, unsigned int skip_offset, unsigned int skip_length)
+{
+ static unsigned const ethernet_polynomial_le = 0xedb88320U;
+ unsigned int crc = 0xffffffff; /* Initial value. */
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ if(skip_offset == 0 && skip_length-- != 0)
+ current_octet = 0;
+ else
+ --skip_offset;
+ int bit;
+ for (bit = 8; --bit >= 0; current_octet >>= 1) {
+ if ((crc ^ current_octet) & 1) {
+ crc >>= 1;
+ crc ^= ethernet_polynomial_le;
+ } else
+ crc >>= 1;
+ }
+ }
+ return crc;
+}
+
+/*
+ * Read in a full GPT array into a contiguous chunk, allocates *PP_S bytes into *PP.
+ *
+ * An attempt to do as few round-trips as possible is made by reading a PAGE_SIZE at a time,
+ * since that's the bread() maximum.
+ */
+static int gpt_read_part_table(void **pp, vm_size_t *pp_s, kdev_t dev, int bsize, __u64 first_sector, struct gpt_disklabel_header *h)
+{
+ __u64 lba = first_sector + h->h_part_table_lba;
+ __u32 bytes_left = *pp_s = h->h_part_table_len * h->h_part_table_entry_size;
+ struct buffer_head *bh;
+ void *cur = *pp = (void *)kalloc(*pp_s);
+ if (!cur) {
+ printk(" unable to allocate GPT partition table buffer");
+ return -2;
+ }
+
+ while (bytes_left) {
+ unsigned bytes_to_read = MIN(bytes_left, PAGE_SIZE);
+ if(!(bh = bread(dev, lba, bytes_to_read))) {
+ printk(" unable to read partition table array");
+ return -3;
+ }
+
+ memcpy(cur, bh->b_data, bytes_to_read);
+ cur += bytes_to_read;
+ bytes_left -= bytes_to_read;
+ lba += PAGE_SIZE / bsize;
+
+ brelse(bh);
+ }
+
+ return 0;
+}
+
+/*
+ * Sequence from section 5.3.2 of spec 2.8A:
+ * signature, CRC, lba_current matches, partition table CRC, primary: check backup for validity
+ */
+static int gpt_verify_header(void **pp, vm_size_t *pp_s, kdev_t dev, int bsize, __u64 first_sector, __u64 lba, struct gpt_disklabel_header *h)
+{
+ int res;
+ __u32 crc;
+
+ if (memcmp(h->h_signature, GPT_SIGNATURE, strlen(GPT_SIGNATURE)) != 0) {
+ printk(" bad GPT signature \"%c%c%c%c%c%c%c%c\";",
+ h->h_signature[0], h->h_signature[1], h->h_signature[2], h->h_signature[3],
+ h->h_signature[4], h->h_signature[5], h->h_signature[6], h->h_signature[7]);
+ return 1;
+ }
+
+ crc = ether_crc_le_hole(h->h_header_size, (void *)h,
+ offsetof(struct gpt_disklabel_header, h_header_crc), sizeof(h->h_header_crc)) ^ ~0;
+ if (crc != h->h_header_crc) {
+ printk(" bad header CRC: %x != %x;", crc, h->h_header_crc);
+ return 2;
+ }
+
+ if (h->h_lba_current != lba) {
+ printk(" current LBA mismatch: %lld != %lld;", h->h_lba_current, lba);
+ return 3;
+ }
+
+ if (*pp) {
+ kfree((vm_offset_t)*pp, *pp_s);
+ *pp = NULL;
+ }
+ if ((res = gpt_read_part_table(pp, pp_s, dev, bsize, first_sector, h)))
+ return res;
+
+ crc = ether_crc_le_hole(*pp_s, *pp, 0, 0) ^ ~0;
+ if (crc != h->h_part_table_crc) {
+ printk(" bad partition table CRC: %x != %x;", crc, h->h_part_table_crc);
+ return 4;
+ }
+
+ for (int i = h->h_header_size; i < bsize; ++i)
+ res |= ((char*)h)[i];
+ if (res) {
+ printk(" rest of GPT block dirty;");
+ return 5;
+ }
+
+ return 0;
+}
+
+static void gpt_print_part_name(struct gpt_disklabel_part *p)
+{
+ for(int n = 0; n < sizeof(p->p_name) / sizeof(*p->p_name) && p->p_name[n]; ++n)
+ if(p->p_name[n] & ~0xFF)
+ printk("?"); /* Can't support all of Unicode, but don't print garbage at least... */
+ else
+ printk("%c", p->p_name[n]);
+}
+
+#ifdef DEBUG
+static void gpt_print_guid(struct gpt_guid *guid)
+{
+ printk("%08X-%04X-%04X-%02X%02X-", guid->g_time_low, guid->g_time_mid, guid->g_time_high_version, guid->g_clock_sec_high, guid->g_clock_sec_low);
+ for (int i = 0; i < sizeof(guid->g_node_id); ++i)
+ printk("%02X", guid->g_node_id[i]);
+}
+
+static void gpt_dump_header(struct gpt_disklabel_header *h)
+{
+ printk(" [h_signature: \"%c%c%c%c%c%c%c%c\"; ",
+ h->h_signature[0], h->h_signature[1], h->h_signature[2], h->h_signature[3],
+ h->h_signature[4], h->h_signature[5], h->h_signature[6], h->h_signature[7]);
+ printk("h_revision: %x; ", h->h_revision);
+ printk("h_header_size: %u; ", h->h_header_size);
+ printk("h_header_crc: %x; ", h->h_header_crc);
+ printk("h_reserved: %u; ", h->h_reserved);
+ printk("h_lba_current: %llu; ", h->h_lba_current);
+ printk("h_lba_backup: %llu; ", h->h_lba_backup);
+ printk("h_lba_usable_first: %llu; ", h->h_lba_usable_first);
+ printk("h_lba_usable_last: %llu; ", h->h_lba_usable_last);
+ printk("h_guid: "); gpt_print_guid(&h->h_guid); printk("; ");
+ printk("h_part_table_lba: %llu; ", h->h_part_table_lba);
+ printk("h_part_table_len: %u; ", h->h_part_table_len);
+ printk("h_part_table_crc: %x]", h->h_part_table_crc);
+}
+
+static void gpt_dump_part(struct gpt_disklabel_part *p, int i)
+{
+ printk(" part#%d:[", i);
+ printk("p_type: "); gpt_print_guid(&p->p_type);
+ printk("; p_guid:"); gpt_print_guid(&p->p_guid);
+ printk("; p_lba_first: %llu", p->p_lba_first);
+ printk("; p_lba_last: %llu", p->p_lba_last);
+ printk("; p_attrs: %llx", p->p_attrs);
+ printk("; p_name: \""); gpt_print_part_name(p); printk("\"]");
+}
+#else
+static void gpt_dump_header(struct gpt_disklabel_header *h) {}
+static void gpt_dump_part(struct gpt_disklabel_part *p, int i) {}
+#endif
+
+static int gpt_partition(struct gendisk *hd, kdev_t dev, __u64 first_sector, int minor)
+{
+ struct buffer_head *bh;
+ struct gpt_disklabel_header *h;
+ void *pp = NULL; vm_size_t pp_s = 0;
+ int res, bsize = 512;
+ /* Note: this must be set by the driver; SCSI does --
+ * only, in practice, it always sets this to 512, see sd_init() in sd.c */
+ if (hardsect_size[MAJOR(dev)] && hardsect_size[MAJOR(dev)][MINOR(dev)])
+ bsize = hardsect_size[MAJOR(dev)][MINOR(dev)];
+ set_blocksize(dev,bsize); /* Must override read block size since GPT has pointers, stolen from amiga_partition(). */
+ if (!(bh = bread(dev, first_sector + 1, bsize))) {
+ printk("unable to read GPT");
+ res = -1;
+ goto done;
+ }
+
+ h = (struct gpt_disklabel_header *)bh->b_data;
+ gpt_dump_header(h);
+
+ res = gpt_verify_header(&pp, &pp_s, dev, bsize, first_sector, 1, h);
+ if (res < 0)
+ goto done;
+ else if (res > 0) {
+ printk(" main GPT dirty, trying backup at %llu;", h->h_lba_backup);
+ __u64 lba = h->h_lba_backup;
+ brelse(bh);
+
+ if (!(bh = bread(dev, first_sector + lba, bsize))) {
+ printk("unable to read backup GPT");
+ res = -4;
+ goto done;
+ }
+
+ h = (struct gpt_disklabel_header *)bh->b_data;
+ gpt_dump_header(h);
+
+ res = gpt_verify_header(&pp, &pp_s, dev, bsize, first_sector, lba, h);
+ if (res < 0)
+ goto done;
+ else if (res > 0) {
+ printk(" backup GPT dirty as well; cowardly refusing to continue");
+ res = -5;
+ goto done;
+ }
+ }
+
+ /* At least one good GPT+array */
+
+ for(int i = 0; i < h->h_part_table_len; ++i, ++minor) {
+ struct gpt_disklabel_part *p =
+ (struct gpt_disklabel_part *) (pp + i * h->h_part_table_entry_size);
+ if(memcmp(&p->p_type, &GPT_GUID_TYPE_UNUSED, sizeof(struct gpt_guid)) == 0)
+ continue;
+ gpt_dump_part(p, i);
+
+ if (minor > hd->max_nr * hd->max_p) {
+ printk(" [ignoring GPT partition %d \"", i); gpt_print_part_name(p); printk("\": too many partitions (max %d)]", hd->max_p);
+ } else {
+ add_partition(hd, minor, first_sector + p->p_lba_first, p->p_lba_last - p->p_lba_first + 1);
+ if(p->p_name[0]) {
+ printk(" ("); gpt_print_part_name(p); printk(")");
+ }
+ }
+ }
+
+done:
+ brelse(bh);
+ set_blocksize(dev,BLOCK_SIZE);
+ kfree((vm_offset_t)pp, pp_s);
+ printk("\n");
+ return !res;
+}
+#endif
+
+static int msdos_partition(struct gendisk *hd, kdev_t dev, unsigned long first_sector)
+{
+ int i, minor = current_minor;
+ struct buffer_head *bh;
+ struct partition *p;
+ unsigned char *data;
+ int mask = (1 << hd->minor_shift) - 1;
+#ifdef CONFIG_BLK_DEV_IDE
+ int tested_for_xlate = 0;
+
+read_mbr:
+#endif
+ if (!(bh = bread(dev,0,1024))) {
+ printk(" unable to read partition table\n");
+ return -1;
+ }
+ data = (unsigned char *)bh->b_data;
+ /* In some cases we modify the geometry */
+ /* of the drive (below), so ensure that */
+ /* nobody else tries to re-use this data. */
+ bh->b_state = 0;
+#ifdef CONFIG_BLK_DEV_IDE
+check_table:
+#endif
+ if (*(unsigned short *) (0x1fe + data) != 0xAA55) {
+ brelse(bh);
+ return 0;
+ }
+ p = (struct partition *) (0x1be + data);
+
+#ifdef CONFIG_BLK_DEV_IDE
+ if (!tested_for_xlate++) { /* Do this only once per disk */
+ /*
+ * Look for various forms of IDE disk geometry translation
+ */
+ extern int ide_xlate_1024(kdev_t, int, const char *);
+ unsigned int sig = *(unsigned short *)(data + 2);
+ if (SYS_IND(p) == EZD_PARTITION) {
+ /*
+ * The remainder of the disk must be accessed using
+ * a translated geometry that reduces the number of
+ * apparent cylinders to less than 1024 if possible.
+ *
+ * ide_xlate_1024() will take care of the necessary
+ * adjustments to fool fdisk/LILO and partition check.
+ */
+ if (ide_xlate_1024(dev, -1, " [EZD]")) {
+ data += 512;
+ goto check_table;
+ }
+ } else if (SYS_IND(p) == DM6_PARTITION) {
+
+ /*
+ * Everything on the disk is offset by 63 sectors,
+ * including a "new" MBR with its own partition table,
+ * and the remainder of the disk must be accessed using
+ * a translated geometry that reduces the number of
+ * apparent cylinders to less than 1024 if possible.
+ *
+ * ide_xlate_1024() will take care of the necessary
+ * adjustments to fool fdisk/LILO and partition check.
+ */
+ if (ide_xlate_1024(dev, 1, " [DM6:DDO]")) {
+ brelse(bh);
+ goto read_mbr; /* start over with new MBR */
+ }
+ } else if (sig <= 0x1ae && *(unsigned short *)(data + sig) == 0x55AA
+ && (1 & *(unsigned char *)(data + sig + 2)) )
+ {
+ /*
+ * DM6 signature in MBR, courtesy of OnTrack
+ */
+ (void) ide_xlate_1024 (dev, 0, " [DM6:MBR]");
+ } else if (SYS_IND(p) == DM6_AUX1PARTITION || SYS_IND(p) == DM6_AUX3PARTITION) {
+ /*
+ * DM6 on other than the first (boot) drive
+ */
+ (void) ide_xlate_1024(dev, 0, " [DM6:AUX]");
+ } else {
+ /*
+ * Examine the partition table for common translations.
+ * This is necessary for drives for situations where
+ * the translated geometry is unavailable from the BIOS.
+ */
+ for (i = 0; i < 4 ; i++) {
+ struct partition *q = &p[i];
+ if (NR_SECTS(q)
+ && (q->sector & 63) == 1
+ && (q->end_sector & 63) == 63) {
+ unsigned int heads = q->end_head + 1;
+ if (heads == 32 || heads == 64 || heads == 128 || heads == 255) {
+
+ (void) ide_xlate_1024(dev, heads, " [PTBL]");
+ break;
+ }
+ }
+ }
+ }
+ }
+#endif /* CONFIG_BLK_DEV_IDE */
+
+ current_minor += 4; /* first "extra" minor (for extended partitions) */
+ for (i=1 ; i<=4 ; minor++,i++,p++) {
+ if (!NR_SECTS(p))
+ continue;
+#ifdef CONFIG_GPT_DISKLABEL
+ if (SYS_IND(p) == GPT_PARTITION) {
+ brelse(bh);
+ return gpt_partition(hd, dev, first_sector, minor);
+ } else
+#endif
+ add_partition(hd, minor, first_sector+START_SECT(p), NR_SECTS(p));
+ if (is_extended_partition(p)) {
+ printk(" <");
+ /*
+ * If we are rereading the partition table, we need
+ * to set the size of the partition so that we will
+ * be able to bread the block containing the extended
+ * partition info.
+ */
+ hd->sizes[minor] = hd->part[minor].nr_sects
+ >> (BLOCK_SIZE_BITS - 9);
+ extended_partition(hd, MKDEV(hd->major, minor));
+ printk(" >");
+ /* prevent someone doing mkfs or mkswap on an
+ extended partition, but leave room for LILO */
+ if (hd->part[minor].nr_sects > 2)
+ hd->part[minor].nr_sects = 2;
+ }
+#ifdef CONFIG_BSD_DISKLABEL
+ if (SYS_IND(p) == BSD_PARTITION) {
+ printk(" <");
+#ifdef MACH
+ mach_minor = minor;
+#endif
+ bsd_disklabel_partition(hd, MKDEV(hd->major, minor));
+ printk(" >");
+ }
+#endif
+ }
+ /*
+ * Check for old-style Disk Manager partition table
+ */
+ if (*(unsigned short *) (data+0xfc) == 0x55AA) {
+ p = (struct partition *) (0x1be + data);
+ for (i = 4 ; i < 16 ; i++, current_minor++) {
+ p--;
+ if ((current_minor & mask) == 0)
+ break;
+ if (!(START_SECT(p) && NR_SECTS(p)))
+ continue;
+ add_partition(hd, current_minor, START_SECT(p), NR_SECTS(p));
+ }
+ }
+ printk("\n");
+ brelse(bh);
+ return 1;
+}
+
+#endif /* CONFIG_MSDOS_PARTITION */
+
+#ifdef CONFIG_OSF_PARTITION
+
+static int osf_partition(struct gendisk *hd, unsigned int dev, unsigned long first_sector)
+{
+ int i;
+ int mask = (1 << hd->minor_shift) - 1;
+ struct buffer_head *bh;
+ struct disklabel {
+ u32 d_magic;
+ u16 d_type,d_subtype;
+ u8 d_typename[16];
+ u8 d_packname[16];
+ u32 d_secsize;
+ u32 d_nsectors;
+ u32 d_ntracks;
+ u32 d_ncylinders;
+ u32 d_secpercyl;
+ u32 d_secprtunit;
+ u16 d_sparespertrack;
+ u16 d_sparespercyl;
+ u32 d_acylinders;
+ u16 d_rpm, d_interleave, d_trackskew, d_cylskew;
+ u32 d_headswitch, d_trkseek, d_flags;
+ u32 d_drivedata[5];
+ u32 d_spare[5];
+ u32 d_magic2;
+ u16 d_checksum;
+ u16 d_npartitions;
+ u32 d_bbsize, d_sbsize;
+ struct d_partition {
+ u32 p_size;
+ u32 p_offset;
+ u32 p_fsize;
+ u8 p_fstype;
+ u8 p_frag;
+ u16 p_cpg;
+ } d_partitions[8];
+ } * label;
+ struct d_partition * partition;
+#define DISKLABELMAGIC (0x82564557UL)
+
+ if (!(bh = bread(dev,0,1024))) {
+ printk("unable to read partition table\n");
+ return -1;
+ }
+ label = (struct disklabel *) (bh->b_data+64);
+ partition = label->d_partitions;
+ if (label->d_magic != DISKLABELMAGIC) {
+ printk("magic: %08x\n", label->d_magic);
+ brelse(bh);
+ return 0;
+ }
+ if (label->d_magic2 != DISKLABELMAGIC) {
+ printk("magic2: %08x\n", label->d_magic2);
+ brelse(bh);
+ return 0;
+ }
+ for (i = 0 ; i < label->d_npartitions; i++, partition++) {
+ if ((current_minor & mask) == 0)
+ break;
+ if (partition->p_size)
+ add_partition(hd, current_minor,
+ first_sector+partition->p_offset,
+ partition->p_size);
+ current_minor++;
+ }
+ printk("\n");
+ brelse(bh);
+ return 1;
+}
+
+#endif /* CONFIG_OSF_PARTITION */
+
+#ifdef CONFIG_SUN_PARTITION
+
+static int sun_partition(struct gendisk *hd, kdev_t dev, unsigned long first_sector)
+{
+ int i, csum;
+ unsigned short *ush;
+ struct buffer_head *bh;
+ struct sun_disklabel {
+ unsigned char info[128]; /* Informative text string */
+ unsigned char spare[292]; /* Boot information etc. */
+ unsigned short rspeed; /* Disk rotational speed */
+ unsigned short pcylcount; /* Physical cylinder count */
+ unsigned short sparecyl; /* extra sects per cylinder */
+ unsigned char spare2[4]; /* More magic... */
+ unsigned short ilfact; /* Interleave factor */
+ unsigned short ncyl; /* Data cylinder count */
+ unsigned short nacyl; /* Alt. cylinder count */
+ unsigned short ntrks; /* Tracks per cylinder */
+ unsigned short nsect; /* Sectors per track */
+ unsigned char spare3[4]; /* Even more magic... */
+ struct sun_partition {
+ __u32 start_cylinder;
+ __u32 num_sectors;
+ } partitions[8];
+ unsigned short magic; /* Magic number */
+ unsigned short csum; /* Label xor'd checksum */
+ } * label;
+ struct sun_partition *p;
+ int other_endian;
+ unsigned long spc;
+#define SUN_LABEL_MAGIC 0xDABE
+#define SUN_LABEL_MAGIC_SWAPPED 0xBEDA
+/* No need to optimize these macros since they are called only when reading
+ * the partition table. This occurs only at each disk change. */
+#define SWAP16(x) (other_endian ? (((__u16)(x) & 0xFF) << 8) \
+ | (((__u16)(x) & 0xFF00) >> 8) \
+ : (__u16)(x))
+#define SWAP32(x) (other_endian ? (((__u32)(x) & 0xFF) << 24) \
+ | (((__u32)(x) & 0xFF00) << 8) \
+ | (((__u32)(x) & 0xFF0000) >> 8) \
+ | (((__u32)(x) & 0xFF000000) >> 24) \
+ : (__u32)(x))
+
+ if(!(bh = bread(dev, 0, 1024))) {
+ printk("Dev %s: unable to read partition table\n",
+ kdevname(dev));
+ return -1;
+ }
+ label = (struct sun_disklabel *) bh->b_data;
+ p = label->partitions;
+ if (label->magic != SUN_LABEL_MAGIC && label->magic != SUN_LABEL_MAGIC_SWAPPED) {
+ printk("Dev %s Sun disklabel: bad magic %04x\n",
+ kdevname(dev), label->magic);
+ brelse(bh);
+ return 0;
+ }
+ other_endian = (label->magic == SUN_LABEL_MAGIC_SWAPPED);
+ /* Look at the checksum */
+ ush = ((unsigned short *) (label+1)) - 1;
+ for(csum = 0; ush >= ((unsigned short *) label);)
+ csum ^= *ush--;
+ if(csum) {
+ printk("Dev %s Sun disklabel: Csum bad, label corrupted\n",
+ kdevname(dev));
+ brelse(bh);
+ return 0;
+ }
+ /* All Sun disks have 8 partition entries */
+ spc = SWAP16(label->ntrks) * SWAP16(label->nsect);
+ for(i=0; i < 8; i++, p++) {
+ unsigned long st_sector;
+
+ /* We register all partitions, even if zero size, so that
+ * the minor numbers end up ok as per SunOS interpretation.
+ */
+ st_sector = first_sector + SWAP32(p->start_cylinder) * spc;
+ add_partition(hd, current_minor, st_sector, SWAP32(p->num_sectors));
+ current_minor++;
+ }
+ printk("\n");
+ brelse(bh);
+ return 1;
+#undef SWAP16
+#undef SWAP32
+}
+
+#endif /* CONFIG_SUN_PARTITION */
+
+#ifdef CONFIG_AMIGA_PARTITION
+#include <asm/byteorder.h>
+#include <linux/affs_hardblocks.h>
+
+static __inline__ __u32
+checksum_block(__u32 *m, int size)
+{
+ __u32 sum = 0;
+
+ while (size--)
+ sum += htonl(*m++);
+ return sum;
+}
+
+static int
+amiga_partition(struct gendisk *hd, unsigned int dev, unsigned long first_sector)
+{
+ struct buffer_head *bh;
+ struct RigidDiskBlock *rdb;
+ struct PartitionBlock *pb;
+ int start_sect;
+ int nr_sects;
+ int blk;
+ int part, res;
+
+ set_blocksize(dev,512);
+ res = 0;
+
+ for (blk = 0; blk < RDB_ALLOCATION_LIMIT; blk++) {
+ if(!(bh = bread(dev,blk,512))) {
+ printk("Dev %d: unable to read RDB block %d\n",dev,blk);
+ goto rdb_done;
+ }
+ if (*(__u32 *)bh->b_data == htonl(IDNAME_RIGIDDISK)) {
+ rdb = (struct RigidDiskBlock *)bh->b_data;
+ if (checksum_block((__u32 *)bh->b_data,htonl(rdb->rdb_SummedLongs) & 0x7F)) {
+ printk("Dev %d: RDB in block %d has bad checksum\n",dev,blk);
+ brelse(bh);
+ continue;
+ }
+ printk(" RDSK");
+ blk = htonl(rdb->rdb_PartitionList);
+ brelse(bh);
+ for (part = 1; blk > 0 && part <= 16; part++) {
+ if (!(bh = bread(dev,blk,512))) {
+ printk("Dev %d: unable to read partition block %d\n",
+ dev,blk);
+ goto rdb_done;
+ }
+ pb = (struct PartitionBlock *)bh->b_data;
+ blk = htonl(pb->pb_Next);
+ if (pb->pb_ID == htonl(IDNAME_PARTITION) && checksum_block(
+ (__u32 *)pb,htonl(pb->pb_SummedLongs) & 0x7F) == 0 ) {
+
+ /* Tell Kernel about it */
+
+ if (!(nr_sects = (htonl(pb->pb_Environment[10]) + 1 -
+ htonl(pb->pb_Environment[9])) *
+ htonl(pb->pb_Environment[3]) *
+ htonl(pb->pb_Environment[5]))) {
+ continue;
+ }
+ start_sect = htonl(pb->pb_Environment[9]) *
+ htonl(pb->pb_Environment[3]) *
+ htonl(pb->pb_Environment[5]);
+ add_partition(hd,current_minor,start_sect,nr_sects);
+ current_minor++;
+ res = 1;
+ }
+ brelse(bh);
+ }
+ printk("\n");
+ break;
+ }
+ }
+
+rdb_done:
+ set_blocksize(dev,BLOCK_SIZE);
+ return res;
+}
+#endif /* CONFIG_AMIGA_PARTITION */
+
+static void check_partition(struct gendisk *hd, kdev_t dev)
+{
+ static int first_time = 1;
+ unsigned long first_sector;
+ char buf[8];
+
+ if (first_time)
+ printk("Partition check (DOS partitions):\n");
+ first_time = 0;
+ first_sector = hd->part[MINOR(dev)].start_sect;
+
+ /*
+ * This is a kludge to allow the partition check to be
+ * skipped for specific drives (e.g. IDE cd-rom drives)
+ */
+ if ((int)first_sector == -1) {
+ hd->part[MINOR(dev)].start_sect = 0;
+ return;
+ }
+
+ printk(" %s:", disk_name(hd, MINOR(dev), buf));
+#ifdef CONFIG_MSDOS_PARTITION
+ if (msdos_partition(hd, dev, first_sector))
+ return;
+#endif
+#ifdef CONFIG_OSF_PARTITION
+ if (osf_partition(hd, dev, first_sector))
+ return;
+#endif
+#ifdef CONFIG_SUN_PARTITION
+ if(sun_partition(hd, dev, first_sector))
+ return;
+#endif
+#ifdef CONFIG_AMIGA_PARTITION
+ if(amiga_partition(hd, dev, first_sector))
+ return;
+#endif
+ printk(" unknown partition table\n");
+}
+
+/* This function is used to re-read partition tables for removable disks.
+ Much of the cleanup from the old partition tables should have already been
+ done */
+
+/* This function will re-read the partition tables for a given device,
+and set things back up again. There are some important caveats,
+however. You must ensure that no one is using the device, and no one
+can start using the device while this function is being executed. */
+
+void resetup_one_dev(struct gendisk *dev, int drive)
+{
+ int i;
+ int first_minor = drive << dev->minor_shift;
+ int end_minor = first_minor + dev->max_p;
+
+ blk_size[dev->major] = NULL;
+ current_minor = 1 + first_minor;
+ check_partition(dev, MKDEV(dev->major, first_minor));
+
+ /*
+ * We need to set the sizes array before we will be able to access
+ * any of the partitions on this device.
+ */
+ if (dev->sizes != NULL) { /* optional safeguard in ll_rw_blk.c */
+ for (i = first_minor; i < end_minor; i++)
+ dev->sizes[i] = dev->part[i].nr_sects >> (BLOCK_SIZE_BITS - 9);
+ blk_size[dev->major] = dev->sizes;
+ }
+}
+
+static void setup_dev(struct gendisk *dev)
+{
+ int i, drive;
+ int end_minor = dev->max_nr * dev->max_p;
+
+ blk_size[dev->major] = NULL;
+ for (i = 0 ; i < end_minor; i++) {
+ dev->part[i].start_sect = 0;
+ dev->part[i].nr_sects = 0;
+ }
+ dev->init(dev);
+ for (drive = 0 ; drive < dev->nr_real ; drive++) {
+ int first_minor = drive << dev->minor_shift;
+ current_minor = 1 + first_minor;
+ check_partition(dev, MKDEV(dev->major, first_minor));
+ }
+ if (dev->sizes != NULL) { /* optional safeguard in ll_rw_blk.c */
+ for (i = 0; i < end_minor; i++)
+ dev->sizes[i] = dev->part[i].nr_sects >> (BLOCK_SIZE_BITS - 9);
+ blk_size[dev->major] = dev->sizes;
+ }
+}
+
+void device_setup(void)
+{
+ extern void console_map_init(void);
+ struct gendisk *p;
+ int nr=0;
+
+#ifdef CONFIG_BLK_DEV_IDE
+ extern char *kernel_cmdline;
+ char *c, *param, *white;
+
+ for (c = kernel_cmdline; c; )
+ {
+ param = strstr(c, " ide");
+ if (!param)
+ param = strstr(c, " hd");
+ if (!param)
+ break;
+ if (param) {
+ param++;
+ white = strchr(param, ' ');
+ if (!white) {
+ ide_setup(param);
+ c = NULL;
+ } else {
+ char *word = alloca(white - param + 1);
+ strncpy(word, param, white - param);
+ word[white-param] = '\0';
+ ide_setup(word);
+ c = white + 1;
+ }
+ }
+ }
+#endif
+#ifndef MACH
+ chr_dev_init();
+#endif
+ blk_dev_init();
+ sti();
+#ifdef CONFIG_SCSI
+ scsi_dev_init();
+#endif
+#ifdef CONFIG_INET
+ net_dev_init();
+#endif
+#ifndef MACH
+ console_map_init();
+#endif
+
+ for (p = gendisk_head ; p ; p=p->next) {
+ setup_dev(p);
+ nr += p->nr_real;
+ }
+#ifdef CONFIG_BLK_DEV_RAM
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (initrd_start && mount_initrd) initrd_load();
+ else
+#endif
+ rd_load();
+#endif
+}
diff --git a/linux/dev/drivers/net/Space.c b/linux/dev/drivers/net/Space.c
new file mode 100644
index 0000000..213fa9b
--- /dev/null
+++ b/linux/dev/drivers/net/Space.c
@@ -0,0 +1,582 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Holds initial configuration information for devices.
+ *
+ * NOTE: This file is a nice idea, but its current format does not work
+ * well for drivers that support multiple units, like the SLIP
+ * driver. We should actually have only one pointer to a driver
+ * here, with the driver knowing how many units it supports.
+ * Currently, the SLIP driver abuses the "base_addr" integer
+ * field of the 'device' structure to store the unit number...
+ * -FvK
+ *
+ * Version: @(#)Space.c 1.0.8 07/31/96
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Donald J. Becker, <becker@super.org>
+ *
+ * FIXME:
+ * Sort the device chain fastest first.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/config.h>
+#include <linux/netdevice.h>
+#include <linux/errno.h>
+
+#define NEXT_DEV NULL
+
+
+/* A unified ethernet device probe. This is the easiest way to have every
+ ethernet adaptor have the name "eth[0123...]".
+ */
+
+extern int tulip_probe(struct device *dev);
+extern int hp100_probe(struct device *dev);
+extern int ultra_probe(struct device *dev);
+extern int ultra32_probe(struct device *dev);
+extern int wd_probe(struct device *dev);
+extern int el2_probe(struct device *dev);
+extern int ne_probe(struct device *dev);
+extern int ne2k_pci_probe(struct device *dev);
+extern int hp_probe(struct device *dev);
+extern int hp_plus_probe(struct device *dev);
+extern int znet_probe(struct device *);
+extern int express_probe(struct device *);
+extern int eepro_probe(struct device *);
+extern int el3_probe(struct device *);
+extern int at1500_probe(struct device *);
+extern int at1700_probe(struct device *);
+extern int fmv18x_probe(struct device *);
+extern int eth16i_probe(struct device *);
+extern int depca_probe(struct device *);
+extern int apricot_probe(struct device *);
+extern int ewrk3_probe(struct device *);
+extern int de4x5_probe(struct device *);
+extern int el1_probe(struct device *);
+extern int via_rhine_probe(struct device *);
+extern int natsemi_probe(struct device *);
+extern int ns820_probe(struct device *);
+extern int winbond840_probe(struct device *);
+extern int hamachi_probe(struct device *);
+extern int sundance_probe(struct device *);
+extern int starfire_probe(struct device *);
+extern int myson803_probe(struct device *);
+extern int igige_probe(struct device *);
+#if defined(CONFIG_WAVELAN)
+extern int wavelan_probe(struct device *);
+#endif /* defined(CONFIG_WAVELAN) */
+extern int el16_probe(struct device *);
+extern int elplus_probe(struct device *);
+extern int ac3200_probe(struct device *);
+extern int e2100_probe(struct device *);
+extern int ni52_probe(struct device *);
+extern int ni65_probe(struct device *);
+extern int SK_init(struct device *);
+extern int seeq8005_probe(struct device *);
+extern int tc59x_probe(struct device *);
+extern int dgrs_probe(struct device *);
+extern int smc_init( struct device * );
+extern int sparc_lance_probe(struct device *);
+extern int atarilance_probe(struct device *);
+extern int a2065_probe(struct device *);
+extern int ariadne_probe(struct device *);
+extern int hydra_probe(struct device *);
+extern int yellowfin_probe(struct device *);
+extern int eepro100_probe(struct device *);
+extern int epic100_probe(struct device *);
+extern int rtl8139_probe(struct device *);
+extern int sis900_probe(struct device *);
+extern int tlan_probe(struct device *);
+extern int isa515_probe(struct device *);
+extern int pcnet32_probe(struct device *);
+extern int lance_probe(struct device *);
+/* Detachable devices ("pocket adaptors") */
+extern int atp_init(struct device *);
+extern int de600_probe(struct device *);
+extern int de620_probe(struct device *);
+extern int tc515_probe(struct device *);
+
+static int
+ethif_probe(struct device *dev)
+{
+ u_long base_addr = dev->base_addr;
+
+ if ((base_addr == 0xffe0) || (base_addr == 1))
+ return 1; /* ENXIO */
+
+ if (1
+ /* All PCI probes are safe, and thus should be first. */
+#ifdef CONFIG_DE4X5 /* DEC DE425, DE434, DE435 adapters */
+ && de4x5_probe(dev)
+#endif
+#ifdef CONFIG_DGRS
+ && dgrs_probe(dev)
+#endif
+#ifdef CONFIG_EEXPRESS_PRO100B /* Intel EtherExpress Pro100B */
+ && eepro100_probe(dev)
+#endif
+#ifdef CONFIG_EPIC
+ && epic100_probe(dev)
+#endif
+#if defined(CONFIG_HP100)
+ && hp100_probe(dev)
+#endif
+#if defined(CONFIG_NE2K_PCI)
+ && ne2k_pci_probe(dev)
+#endif
+#ifdef CONFIG_PCNET32
+ && pcnet32_probe(dev)
+#endif
+#ifdef CONFIG_RTL8139
+ && rtl8139_probe(dev)
+#endif
+#ifdef CONFIG_SIS900
+ && sis900_probe(dev)
+#endif
+#ifdef CONFIG_VIA_RHINE
+ && via_rhine_probe(dev)
+#endif
+#ifdef CONFIG_NATSEMI
+ && natsemi_probe(dev)
+#endif
+#ifdef CONFIG_NS820
+ && ns820_probe(dev)
+#endif
+#ifdef CONFIG_WINBOND840
+ && winbond840_probe(dev)
+#endif
+#ifdef CONFIG_HAMACHI
+ && hamachi_probe(dev)
+#endif
+#ifdef CONFIG_SUNDANCE
+ && sundance_probe(dev)
+#endif
+#ifdef CONFIG_STARFIRE
+ && starfire_probe(dev)
+#endif
+#ifdef CONFIG_MYSON803
+ && myson803_probe(dev)
+#endif
+#ifdef CONFIG_INTEL_GIGE
+ && igige_probe(dev)
+#endif
+#if defined(CONFIG_DEC_ELCP)
+ && tulip_probe(dev)
+#endif
+#ifdef CONFIG_YELLOWFIN
+ && yellowfin_probe(dev)
+#endif
+ /* Next mostly-safe EISA-only drivers. */
+#ifdef CONFIG_AC3200 /* Ansel Communications EISA 3200. */
+ && ac3200_probe(dev)
+#endif
+#if defined(CONFIG_ULTRA32)
+ && ultra32_probe(dev)
+#endif
+ /* Third, sensitive ISA boards. */
+#ifdef CONFIG_AT1700
+ && at1700_probe(dev)
+#endif
+#if defined(CONFIG_ULTRA)
+ && ultra_probe(dev)
+#endif
+#if defined(CONFIG_SMC9194)
+ && smc_init(dev)
+#endif
+#if defined(CONFIG_WD80x3)
+ && wd_probe(dev)
+#endif
+#if defined(CONFIG_EL2) /* 3c503 */
+ && el2_probe(dev)
+#endif
+#if defined(CONFIG_HPLAN)
+ && hp_probe(dev)
+#endif
+#if defined(CONFIG_HPLAN_PLUS)
+ && hp_plus_probe(dev)
+#endif
+#if defined(CONFIG_SEEQ8005)
+ && seeq8005_probe(dev)
+#endif
+#ifdef CONFIG_E2100 /* Cabletron E21xx series. */
+ && e2100_probe(dev)
+#endif
+#if defined(CONFIG_NE2000)
+ && ne_probe(dev)
+#endif
+#ifdef CONFIG_AT1500
+ && at1500_probe(dev)
+#endif
+#ifdef CONFIG_FMV18X /* Fujitsu FMV-181/182 */
+ && fmv18x_probe(dev)
+#endif
+#ifdef CONFIG_ETH16I
+ && eth16i_probe(dev) /* ICL EtherTeam 16i/32 */
+#endif
+#ifdef CONFIG_EL3 /* 3c509 */
+ && el3_probe(dev)
+#endif
+#if defined(CONFIG_VORTEX)
+ && tc59x_probe(dev)
+#endif
+#ifdef CONFIG_3C515 /* 3c515 */
+ && tc515_probe(dev)
+#endif
+#ifdef CONFIG_ZNET /* Zenith Z-Note and some IBM Thinkpads. */
+ && znet_probe(dev)
+#endif
+#ifdef CONFIG_EEXPRESS /* Intel EtherExpress */
+ && express_probe(dev)
+#endif
+#ifdef CONFIG_EEXPRESS_PRO /* Intel EtherExpress Pro/10 */
+ && eepro_probe(dev)
+#endif
+#ifdef CONFIG_DEPCA /* DEC DEPCA */
+ && depca_probe(dev)
+#endif
+#ifdef CONFIG_EWRK3 /* DEC EtherWORKS 3 */
+ && ewrk3_probe(dev)
+#endif
+#ifdef CONFIG_APRICOT /* Apricot I82596 */
+ && apricot_probe(dev)
+#endif
+#ifdef CONFIG_EL1 /* 3c501 */
+ && el1_probe(dev)
+#endif
+#if defined(CONFIG_WAVELAN) /* WaveLAN */
+ && wavelan_probe(dev)
+#endif /* defined(CONFIG_WAVELAN) */
+#ifdef CONFIG_EL16 /* 3c507 */
+ && el16_probe(dev)
+#endif
+#ifdef CONFIG_ELPLUS /* 3c505 */
+ && elplus_probe(dev)
+#endif
+#ifdef CONFIG_DE600 /* D-Link DE-600 adapter */
+ && de600_probe(dev)
+#endif
+#ifdef CONFIG_DE620 /* D-Link DE-620 adapter */
+ && de620_probe(dev)
+#endif
+#if defined(CONFIG_SK_G16)
+ && SK_init(dev)
+#endif
+#ifdef CONFIG_NI52
+ && ni52_probe(dev)
+#endif
+#ifdef CONFIG_NI65
+ && ni65_probe(dev)
+#endif
+#ifdef CONFIG_LANCE /* ISA LANCE boards */
+ && lance_probe(dev)
+#endif
+#ifdef CONFIG_ATARILANCE /* Lance-based Atari ethernet boards */
+ && atarilance_probe(dev)
+#endif
+#ifdef CONFIG_A2065 /* Commodore/Ameristar A2065 Ethernet Board */
+ && a2065_probe(dev)
+#endif
+#ifdef CONFIG_ARIADNE /* Village Tronic Ariadne Ethernet Board */
+ && ariadne_probe(dev)
+#endif
+#ifdef CONFIG_HYDRA /* Hydra Systems Amiganet Ethernet board */
+ && hydra_probe(dev)
+#endif
+#ifdef CONFIG_SUNLANCE
+ && sparc_lance_probe(dev)
+#endif
+#ifdef CONFIG_TLAN
+ && tlan_probe(dev)
+#endif
+#ifdef CONFIG_LANCE
+ && lance_probe(dev)
+#endif
+ && 1 ) {
+ return 1; /* -ENODEV or -EAGAIN would be more accurate. */
+ }
+ return 0;
+}
+
+#ifdef CONFIG_SDLA
+ extern int sdla_init(struct device *);
+ static struct device sdla0_dev = { "sdla0", 0, 0, 0, 0, 0, 0, 0, 0, 0, NEXT_DEV, sdla_init, };
+
+# undef NEXT_DEV
+# define NEXT_DEV (&sdla0_dev)
+#endif
+
+#ifdef CONFIG_NETROM
+ extern int nr_init(struct device *);
+
+ static struct device nr3_dev = { "nr3", 0, 0, 0, 0, 0, 0, 0, 0, 0, NEXT_DEV, nr_init, };
+ static struct device nr2_dev = { "nr2", 0, 0, 0, 0, 0, 0, 0, 0, 0, &nr3_dev, nr_init, };
+ static struct device nr1_dev = { "nr1", 0, 0, 0, 0, 0, 0, 0, 0, 0, &nr2_dev, nr_init, };
+ static struct device nr0_dev = { "nr0", 0, 0, 0, 0, 0, 0, 0, 0, 0, &nr1_dev, nr_init, };
+
+# undef NEXT_DEV
+# define NEXT_DEV (&nr0_dev)
+#endif
+
+/* Run-time ATtachable (Pocket) devices have a different (not "eth#") name. */
+#ifdef CONFIG_ATP /* AT-LAN-TEC (RealTek) pocket adaptor. */
+static struct device atp_dev = {
+ "atp0", 0, 0, 0, 0, 0, 0, 0, 0, 0, NEXT_DEV, atp_init, /* ... */ };
+# undef NEXT_DEV
+# define NEXT_DEV (&atp_dev)
+#endif
+
+#ifdef CONFIG_ARCNET
+ extern int arcnet_probe(struct device *dev);
+ static struct device arcnet_dev = {
+ "arc0", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, arcnet_probe, };
+# undef NEXT_DEV
+# define NEXT_DEV (&arcnet_dev)
+#endif
+
+/* In Mach, by default allow at least 2 interfaces. */
+#ifdef MACH
+#ifndef ETH1_ADDR
+# define ETH1_ADDR 0
+#endif
+#ifndef ETH1_IRQ
+# define ETH1_IRQ 0
+#endif
+#endif
+
+/* The first device defaults to I/O base '0', which means autoprobe. */
+#ifndef ETH0_ADDR
+# define ETH0_ADDR 0
+#endif
+#ifndef ETH0_IRQ
+# define ETH0_IRQ 0
+#endif
+/* "eth0" defaults to autoprobe (== 0), other use a base of 0xffe0 (== -0x20),
+ which means "don't probe". These entries exist to only to provide empty
+ slots which may be enabled at boot-time. */
+
+static struct device eth7_dev = {
+ "eth7", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, NEXT_DEV, ethif_probe };
+static struct device eth6_dev = {
+ "eth6", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, &eth7_dev, ethif_probe };
+static struct device eth5_dev = {
+ "eth5", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, &eth6_dev, ethif_probe };
+static struct device eth4_dev = {
+ "eth4", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, &eth5_dev, ethif_probe };
+static struct device eth3_dev = {
+ "eth3", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, &eth4_dev, ethif_probe };
+static struct device eth2_dev = {
+ "eth2", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, &eth3_dev, ethif_probe };
+
+#ifdef MACH
+static struct device eth1_dev = {
+ "eth1", 0, 0, 0, 0, ETH1_ADDR, ETH1_IRQ, 0, 0, 0, &eth2_dev, ethif_probe };
+#else
+static struct device eth1_dev = {
+ "eth1", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, &eth2_dev, ethif_probe };
+#endif
+
+static struct device eth0_dev = {
+ "eth0", 0, 0, 0, 0, ETH0_ADDR, ETH0_IRQ, 0, 0, 0, &eth1_dev, ethif_probe };
+
+# undef NEXT_DEV
+# define NEXT_DEV (&eth0_dev)
+
+#if defined(PLIP) || defined(CONFIG_PLIP)
+ extern int plip_init(struct device *);
+ static struct device plip2_dev = {
+ "plip2", 0, 0, 0, 0, 0x278, 2, 0, 0, 0, NEXT_DEV, plip_init, };
+ static struct device plip1_dev = {
+ "plip1", 0, 0, 0, 0, 0x378, 7, 0, 0, 0, &plip2_dev, plip_init, };
+ static struct device plip0_dev = {
+ "plip0", 0, 0, 0, 0, 0x3BC, 5, 0, 0, 0, &plip1_dev, plip_init, };
+# undef NEXT_DEV
+# define NEXT_DEV (&plip0_dev)
+#endif /* PLIP */
+
+#if defined(SLIP) || defined(CONFIG_SLIP)
+ /* To be exact, this node just hooks the initialization
+ routines to the device structures. */
+extern int slip_init_ctrl_dev(struct device *);
+static struct device slip_bootstrap = {
+ "slip_proto", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, slip_init_ctrl_dev, };
+#undef NEXT_DEV
+#define NEXT_DEV (&slip_bootstrap)
+#endif /* SLIP */
+
+#if defined(CONFIG_STRIP)
+extern int strip_init_ctrl_dev(struct device *);
+static struct device strip_bootstrap = {
+ "strip_proto", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, strip_init_ctrl_dev, };
+#undef NEXT_DEV
+#define NEXT_DEV (&strip_bootstrap)
+#endif /* STRIP */
+
+#if defined(CONFIG_PPP)
+extern int ppp_init(struct device *);
+static struct device ppp_bootstrap = {
+ "ppp_proto", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, ppp_init, };
+#undef NEXT_DEV
+#define NEXT_DEV (&ppp_bootstrap)
+#endif /* PPP */
+
+#ifdef CONFIG_DUMMY
+ extern int dummy_init(struct device *dev);
+ static struct device dummy_dev = {
+ "dummy", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, dummy_init, };
+# undef NEXT_DEV
+# define NEXT_DEV (&dummy_dev)
+#endif
+
+#ifdef CONFIG_EQUALIZER
+extern int eql_init(struct device *dev);
+struct device eql_dev = {
+ "eql", /* Master device for IP traffic load
+ balancing */
+ 0x0, 0x0, 0x0, 0x0, /* recv end/start; mem end/start */
+ 0, /* base I/O address */
+ 0, /* IRQ */
+ 0, 0, 0, /* flags */
+ NEXT_DEV, /* next device */
+ eql_init /* set up the rest */
+};
+# undef NEXT_DEV
+# define NEXT_DEV (&eql_dev)
+#endif
+
+#ifdef CONFIG_IBMTR
+
+ extern int tok_probe(struct device *dev);
+ static struct device ibmtr_dev1 = {
+ "tr1", /* IBM Token Ring (Non-DMA) Interface */
+ 0x0, /* recv memory end */
+ 0x0, /* recv memory start */
+ 0x0, /* memory end */
+ 0x0, /* memory start */
+ 0xa24, /* base I/O address */
+ 0, /* IRQ */
+ 0, 0, 0, /* flags */
+ NEXT_DEV, /* next device */
+ tok_probe /* ??? Token_init should set up the rest */
+ };
+# undef NEXT_DEV
+# define NEXT_DEV (&ibmtr_dev1)
+
+
+ static struct device ibmtr_dev0 = {
+ "tr0", /* IBM Token Ring (Non-DMA) Interface */
+ 0x0, /* recv memory end */
+ 0x0, /* recv memory start */
+ 0x0, /* memory end */
+ 0x0, /* memory start */
+ 0xa20, /* base I/O address */
+ 0, /* IRQ */
+ 0, 0, 0, /* flags */
+ NEXT_DEV, /* next device */
+ tok_probe /* ??? Token_init should set up the rest */
+ };
+# undef NEXT_DEV
+# define NEXT_DEV (&ibmtr_dev0)
+
+#endif
+
+#ifdef CONFIG_DEFXX
+ extern int dfx_probe(struct device *dev);
+ static struct device fddi7_dev =
+ {"fddi7", 0, 0, 0, 0, 0, 0, 0, 0, 0, NEXT_DEV, dfx_probe};
+ static struct device fddi6_dev =
+ {"fddi6", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi7_dev, dfx_probe};
+ static struct device fddi5_dev =
+ {"fddi5", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi6_dev, dfx_probe};
+ static struct device fddi4_dev =
+ {"fddi4", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi5_dev, dfx_probe};
+ static struct device fddi3_dev =
+ {"fddi3", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi4_dev, dfx_probe};
+ static struct device fddi2_dev =
+ {"fddi2", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi3_dev, dfx_probe};
+ static struct device fddi1_dev =
+ {"fddi1", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi2_dev, dfx_probe};
+ static struct device fddi0_dev =
+ {"fddi0", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi1_dev, dfx_probe};
+
+#undef NEXT_DEV
+#define NEXT_DEV (&fddi0_dev)
+#endif
+
+#ifdef CONFIG_NET_IPIP
+ extern int tunnel_init(struct device *);
+
+ static struct device tunnel_dev1 =
+ {
+ "tunl1", /* IPIP tunnel */
+ 0x0, /* recv memory end */
+ 0x0, /* recv memory start */
+ 0x0, /* memory end */
+ 0x0, /* memory start */
+ 0x0, /* base I/O address */
+ 0, /* IRQ */
+ 0, 0, 0, /* flags */
+ NEXT_DEV, /* next device */
+ tunnel_init /* Fill in the details */
+ };
+
+ static struct device tunnel_dev0 =
+ {
+ "tunl0", /* IPIP tunnel */
+ 0x0, /* recv memory end */
+ 0x0, /* recv memory start */
+ 0x0, /* memory end */
+ 0x0, /* memory start */
+ 0x0, /* base I/O address */
+ 0, /* IRQ */
+ 0, 0, 0, /* flags */
+ &tunnel_dev1, /* next device */
+ tunnel_init /* Fill in the details */
+ };
+# undef NEXT_DEV
+# define NEXT_DEV (&tunnel_dev0)
+
+#endif
+
+#ifdef CONFIG_APFDDI
+ extern int apfddi_init(struct device *dev);
+ static struct device fddi_dev = {
+ "fddi", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, apfddi_init };
+# undef NEXT_DEV
+# define NEXT_DEV (&fddi_dev)
+#endif
+
+#ifdef CONFIG_APBIF
+ extern int bif_init(struct device *dev);
+ static struct device bif_dev = {
+ "bif", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, bif_init };
+# undef NEXT_DEV
+# define NEXT_DEV (&bif_dev)
+#endif
+
+#ifdef MACH
+struct device *dev_base = &eth0_dev;
+#else
+extern int loopback_init(struct device *dev);
+struct device loopback_dev = {
+ "lo", /* Software Loopback interface */
+ 0x0, /* recv memory end */
+ 0x0, /* recv memory start */
+ 0x0, /* memory end */
+ 0x0, /* memory start */
+ 0, /* base I/O address */
+ 0, /* IRQ */
+ 0, 0, 0, /* flags */
+ NEXT_DEV, /* next device */
+ loopback_init /* loopback_init should set up the rest */
+};
+
+struct device *dev_base = &loopback_dev;
+#endif
diff --git a/linux/dev/drivers/net/auto_irq.c b/linux/dev/drivers/net/auto_irq.c
new file mode 100644
index 0000000..73cfe34
--- /dev/null
+++ b/linux/dev/drivers/net/auto_irq.c
@@ -0,0 +1,123 @@
+/* auto_irq.c: Auto-configure IRQ lines for linux. */
+/*
+ Written 1994 by Donald Becker.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This code is a general-purpose IRQ line detector for devices with
+ jumpered IRQ lines. If you can make the device raise an IRQ (and
+ that IRQ line isn't already being used), these routines will tell
+ you what IRQ line it's using -- perfect for those oh-so-cool boot-time
+ device probes!
+
+ To use this, first call autoirq_setup(timeout). TIMEOUT is how many
+ 'jiffies' (1/100 sec.) to detect other devices that have active IRQ lines,
+ and can usually be zero at boot. 'autoirq_setup()' returns the bit
+ vector of nominally-available IRQ lines (lines may be physically in-use,
+ but not yet registered to a device).
+ Next, set up your device to trigger an interrupt.
+ Finally call autoirq_report(TIMEOUT) to find out which IRQ line was
+ most recently active. The TIMEOUT should usually be zero, but may
+ be set to the number of jiffies to wait for a slow device to raise an IRQ.
+
+ The idea of using the setup timeout to filter out bogus IRQs came from
+ the serial driver.
+ */
+
+
+#ifdef version
+static const char *version =
+"auto_irq.c:v1.11 Donald Becker (becker@cesdis.gsfc.nasa.gov)";
+#endif
+
+#include <sys/types.h>
+#include <mach/mach_types.h>
+#include <mach/vm_param.h>
+
+#define MACH_INCLUDE
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <linux/netdevice.h>
+
+void *irq2dev_map[NR_IRQS] = {0, 0, /* ... zeroed */ };
+
+unsigned long irqs_busy = 0x2147; /* The set of fixed IRQs (keyboard, timer, etc) */
+unsigned long irqs_used = 0x0001; /* The set of fixed IRQs sometimes enabled. */
+unsigned long irqs_reserved = 0x0000; /* An advisory "reserved" table. */
+unsigned long irqs_shared = 0x0000; /* IRQ lines "shared" among conforming cards. */
+
+static volatile unsigned long irq_bitmap; /* The irqs we actually found. */
+static unsigned long irq_handled; /* The irq lines we have a handler on. */
+static volatile int irq_number; /* The latest irq number we actually found. */
+
+static void
+autoirq_probe (int irq, void *dev_id, struct pt_regs *regs)
+{
+ irq_number = irq;
+ set_bit (irq, (void *) &irq_bitmap); /* irq_bitmap |= 1 << irq; */
+ /* This code used to disable the irq. However, the interrupt stub
+ * would then re-enable the interrupt with (potentially) disastrous
+ * consequences
+ */
+ free_irq (irq, dev_id);
+ return;
+}
+
+int
+autoirq_setup (int waittime)
+{
+ int i;
+ unsigned long timeout = jiffies + waittime;
+ unsigned long boguscount = (waittime * loops_per_sec) / 100;
+
+ irq_handled = 0;
+ irq_bitmap = 0;
+
+ for (i = 0; i < 16; i++)
+ {
+ if (test_bit (i, &irqs_busy) == 0
+ && request_irq (i, autoirq_probe, SA_INTERRUPT, "irq probe", NULL) == 0)
+ set_bit (i, (void *) &irq_handled); /* irq_handled |= 1 << i; */
+ }
+ /* Update our USED lists. */
+ irqs_used |= ~irq_handled;
+
+ /* Hang out at least <waittime> jiffies waiting for bogus IRQ hits. */
+ while (timeout > jiffies && --boguscount > 0)
+ ;
+
+ irq_handled &= ~irq_bitmap;
+
+ irq_number = 0; /* We are interested in new interrupts from now on */
+
+ return irq_handled;
+}
+
+int
+autoirq_report (int waittime)
+{
+ int i;
+ unsigned long timeout = jiffies + waittime;
+ unsigned long boguscount = (waittime * loops_per_sec) / 100;
+
+ /* Hang out at least <waittime> jiffies waiting for the IRQ. */
+
+ while (timeout > jiffies && --boguscount > 0)
+ if (irq_number)
+ break;
+
+ irq_handled &= ~irq_bitmap; /* This eliminates the already reset handlers */
+
+ /* Retract the irq handlers that we installed. */
+ for (i = 0; i < 16; i++)
+ {
+ if (test_bit (i, (void *) &irq_handled))
+ free_irq (i, NULL);
+ }
+ return irq_number;
+}
diff --git a/linux/dev/drivers/net/net_init.c b/linux/dev/drivers/net/net_init.c
new file mode 100644
index 0000000..46dbb17
--- /dev/null
+++ b/linux/dev/drivers/net/net_init.c
@@ -0,0 +1,446 @@
+/* netdrv_init.c: Initialization for network devices. */
+/*
+ Written 1993,1994,1995 by Donald Becker.
+
+ The author may be reached as becker@cesdis.gsfc.nasa.gov or
+ C/O Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This file contains the initialization for the "pl14+" style ethernet
+ drivers. It should eventually replace most of drivers/net/Space.c.
+ It's primary advantage is that it's able to allocate low-memory buffers.
+ A secondary advantage is that the dangerous NE*000 netcards can reserve
+ their I/O port region before the SCSI probes start.
+
+ Modifications/additions by Bjorn Ekwall <bj0rn@blox.se>:
+ ethdev_index[MAX_ETH_CARDS]
+ register_netdev() / unregister_netdev()
+
+ Modifications by Wolfgang Walter
+ Use dev_close cleanly so we always shut things down tidily.
+
+ Changed 29/10/95, Alan Cox to pass sockaddr's around for mac addresses.
+
+ 14/06/96 - Paul Gortmaker: Add generic eth_change_mtu() function.
+
+ August 12, 1996 - Lawrence V. Stefani: Added fddi_change_mtu() and
+ fddi_setup() functions.
+ Sept. 10, 1996 - Lawrence V. Stefani: Increased hard_header_len to
+ include 3 pad bytes.
+*/
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/malloc.h>
+#include <linux/if_ether.h>
+#include <linux/string.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/fddidevice.h>
+#include <linux/trdevice.h>
+#include <linux/if_arp.h>
+#ifdef CONFIG_NET_ALIAS
+#include <linux/net_alias.h>
+#endif
+
+/* The network devices currently exist only in the socket namespace, so these
+ entries are unused. The only ones that make sense are
+ open start the ethercard
+ close stop the ethercard
+ ioctl To get statistics, perhaps set the interface port (AUI, BNC, etc.)
+ One can also imagine getting raw packets using
+ read & write
+ but this is probably better handled by a raw packet socket.
+
+ Given that almost all of these functions are handled in the current
+ socket-based scheme, putting ethercard devices in /dev/ seems pointless.
+
+ [Removed all support for /dev network devices. When someone adds
+ streams then by magic we get them, but otherwise they are un-needed
+ and a space waste]
+*/
+
+/* The list of used and available "eth" slots (for "eth0", "eth1", etc.) */
+#define MAX_ETH_CARDS 16 /* same as the number if irq's in irq2dev[] */
+static struct device *ethdev_index[MAX_ETH_CARDS];
+
+
+/* Fill in the fields of the device structure with ethernet-generic values.
+
+ If no device structure is passed, a new one is constructed, complete with
+ a SIZEOF_PRIVATE private data area.
+
+ If an empty string area is passed as dev->name, or a new structure is made,
+ a new name string is constructed. The passed string area should be 8 bytes
+ long.
+ */
+
+struct device *
+init_etherdev(struct device *dev, int sizeof_priv)
+{
+ int new_device = 0;
+ int i;
+
+ /* Use an existing correctly named device in Space.c:dev_base. */
+ if (dev == NULL) {
+ int alloc_size = sizeof(struct device) + sizeof("eth%d ")
+ + sizeof_priv + 3;
+ struct device *cur_dev;
+ char pname[8]; /* Putative name for the device. */
+
+ for (i = 0; i < MAX_ETH_CARDS; ++i)
+ if (ethdev_index[i] == NULL) {
+ sprintf(pname, "eth%d", i);
+ for (cur_dev = dev_base; cur_dev; cur_dev = cur_dev->next)
+ if (strcmp(pname, cur_dev->name) == 0) {
+ dev = cur_dev;
+ dev->init = NULL;
+ sizeof_priv = (sizeof_priv + 3) & ~3;
+ dev->priv = sizeof_priv
+ ? kmalloc(sizeof_priv, GFP_KERNEL)
+ : NULL;
+ if (dev->priv) memset(dev->priv, 0, sizeof_priv);
+ goto found;
+ }
+ }
+
+ alloc_size &= ~3; /* Round to dword boundary. */
+
+ dev = (struct device *)kmalloc(alloc_size, GFP_KERNEL);
+ memset(dev, 0, alloc_size);
+ if (sizeof_priv)
+ dev->priv = (void *) (dev + 1);
+ dev->name = sizeof_priv + (char *)(dev + 1);
+ new_device = 1;
+ }
+
+ found: /* From the double loop above. */
+
+ if (dev->name &&
+ ((dev->name[0] == '\0') || (dev->name[0] == ' '))) {
+ for (i = 0; i < MAX_ETH_CARDS; ++i)
+ if (ethdev_index[i] == NULL) {
+ sprintf(dev->name, "eth%d", i);
+ ethdev_index[i] = dev;
+ break;
+ }
+ }
+
+ ether_setup(dev); /* Hmmm, should this be called here? */
+
+ if (new_device) {
+ /* Append the device to the device queue. */
+ struct device **old_devp = &dev_base;
+ while ((*old_devp)->next)
+ old_devp = & (*old_devp)->next;
+ (*old_devp)->next = dev;
+ dev->next = 0;
+ }
+ return dev;
+}
+
+
+static int eth_mac_addr(struct device *dev, void *p)
+{
+ struct sockaddr *addr=p;
+ if(dev->start)
+ return -EBUSY;
+ memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
+ return 0;
+}
+
+static int eth_change_mtu(struct device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > 1500))
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+#ifdef CONFIG_FDDI
+
+static int fddi_change_mtu(struct device *dev, int new_mtu)
+{
+ if ((new_mtu < FDDI_K_SNAP_HLEN) || (new_mtu > FDDI_K_SNAP_DLEN))
+ return(-EINVAL);
+ dev->mtu = new_mtu;
+ return(0);
+}
+
+#endif
+
+void ether_setup(struct device *dev)
+{
+ int i;
+ /* Fill in the fields of the device structure with ethernet-generic values.
+ This should be in a common file instead of per-driver. */
+ for (i = 0; i < DEV_NUMBUFFS; i++)
+ skb_queue_head_init(&dev->buffs[i]);
+
+ /* register boot-defined "eth" devices */
+ if (dev->name && (strncmp(dev->name, "eth", 3) == 0)) {
+ i = simple_strtoul(dev->name + 3, NULL, 0);
+ if (ethdev_index[i] == NULL) {
+ ethdev_index[i] = dev;
+ }
+ else if (dev != ethdev_index[i]) {
+ /* Really shouldn't happen! */
+#ifdef MACH
+ panic("ether_setup: Ouch! Someone else took %s\n",
+ dev->name);
+#else
+ printk("ether_setup: Ouch! Someone else took %s\n",
+ dev->name);
+#endif
+ }
+ }
+
+#ifndef MACH
+ dev->change_mtu = eth_change_mtu;
+ dev->hard_header = eth_header;
+ dev->rebuild_header = eth_rebuild_header;
+ dev->set_mac_address = eth_mac_addr;
+ dev->header_cache_bind = eth_header_cache_bind;
+ dev->header_cache_update= eth_header_cache_update;
+#endif
+
+ dev->type = ARPHRD_ETHER;
+ dev->hard_header_len = ETH_HLEN;
+ dev->mtu = 1500; /* eth_mtu */
+ dev->addr_len = ETH_ALEN;
+ dev->tx_queue_len = 100; /* Ethernet wants good queues */
+
+ memset(dev->broadcast,0xFF, ETH_ALEN);
+
+ /* New-style flags. */
+ dev->flags = IFF_BROADCAST|IFF_MULTICAST;
+ dev->family = AF_INET;
+ dev->pa_addr = 0;
+ dev->pa_brdaddr = 0;
+ dev->pa_mask = 0;
+ dev->pa_alen = 4;
+}
+
+#ifdef CONFIG_TR
+
+void tr_setup(struct device *dev)
+{
+ int i;
+ /* Fill in the fields of the device structure with ethernet-generic values.
+ This should be in a common file instead of per-driver. */
+ for (i = 0; i < DEV_NUMBUFFS; i++)
+ skb_queue_head_init(&dev->buffs[i]);
+
+ dev->hard_header = tr_header;
+ dev->rebuild_header = tr_rebuild_header;
+
+ dev->type = ARPHRD_IEEE802;
+ dev->hard_header_len = TR_HLEN;
+ dev->mtu = 2000; /* bug in fragmenter...*/
+ dev->addr_len = TR_ALEN;
+ dev->tx_queue_len = 100; /* Long queues on tr */
+
+ memset(dev->broadcast,0xFF, TR_ALEN);
+
+ /* New-style flags. */
+ dev->flags = IFF_BROADCAST;
+ dev->family = AF_INET;
+ dev->pa_addr = 0;
+ dev->pa_brdaddr = 0;
+ dev->pa_mask = 0;
+ dev->pa_alen = 4;
+}
+
+#endif
+
+#ifdef CONFIG_FDDI
+
+void fddi_setup(struct device *dev)
+ {
+ int i;
+
+ /*
+ * Fill in the fields of the device structure with FDDI-generic values.
+ * This should be in a common file instead of per-driver.
+ */
+ for (i=0; i < DEV_NUMBUFFS; i++)
+ skb_queue_head_init(&dev->buffs[i]);
+
+ dev->change_mtu = fddi_change_mtu;
+ dev->hard_header = fddi_header;
+ dev->rebuild_header = fddi_rebuild_header;
+
+ dev->type = ARPHRD_FDDI;
+ dev->hard_header_len = FDDI_K_SNAP_HLEN+3; /* Assume 802.2 SNAP hdr len + 3 pad bytes */
+ dev->mtu = FDDI_K_SNAP_DLEN; /* Assume max payload of 802.2 SNAP frame */
+ dev->addr_len = FDDI_K_ALEN;
+ dev->tx_queue_len = 100; /* Long queues on FDDI */
+
+ memset(dev->broadcast, 0xFF, FDDI_K_ALEN);
+
+ /* New-style flags */
+ dev->flags = IFF_BROADCAST | IFF_MULTICAST;
+ dev->family = AF_INET;
+ dev->pa_addr = 0;
+ dev->pa_brdaddr = 0;
+ dev->pa_mask = 0;
+ dev->pa_alen = 4;
+ return;
+ }
+
+#endif
+
+int ether_config(struct device *dev, struct ifmap *map)
+{
+ if (map->mem_start != (u_long)(-1))
+ dev->mem_start = map->mem_start;
+ if (map->mem_end != (u_long)(-1))
+ dev->mem_end = map->mem_end;
+ if (map->base_addr != (u_short)(-1))
+ dev->base_addr = map->base_addr;
+ if (map->irq != (u_char)(-1))
+ dev->irq = map->irq;
+ if (map->dma != (u_char)(-1))
+ dev->dma = map->dma;
+ if (map->port != (u_char)(-1))
+ dev->if_port = map->port;
+ return 0;
+}
+
+int register_netdev(struct device *dev)
+{
+ struct device *d = dev_base;
+ unsigned long flags;
+ int i=MAX_ETH_CARDS;
+
+ save_flags(flags);
+ cli();
+
+ if (dev && dev->init) {
+ if (dev->name &&
+ ((dev->name[0] == '\0') || (dev->name[0] == ' '))) {
+ for (i = 0; i < MAX_ETH_CARDS; ++i)
+ if (ethdev_index[i] == NULL) {
+ sprintf(dev->name, "eth%d", i);
+ printk("loading device '%s'...\n", dev->name);
+ ethdev_index[i] = dev;
+ break;
+ }
+ }
+
+ sti(); /* device probes assume interrupts enabled */
+ if (dev->init(dev) != 0) {
+ if (i < MAX_ETH_CARDS) ethdev_index[i] = NULL;
+ restore_flags(flags);
+ return -EIO;
+ }
+ cli();
+
+ /* Add device to end of chain */
+ if (dev_base) {
+ while (d->next)
+ d = d->next;
+ d->next = dev;
+ }
+ else
+ dev_base = dev;
+ dev->next = NULL;
+ }
+ restore_flags(flags);
+ return 0;
+}
+
+void unregister_netdev(struct device *dev)
+{
+ struct device *d = dev_base;
+ unsigned long flags;
+ int i;
+
+ save_flags(flags);
+ cli();
+
+ if (dev == NULL)
+ {
+ printk("was NULL\n");
+ restore_flags(flags);
+ return;
+ }
+ /* else */
+ if (dev->start)
+ printk("ERROR '%s' busy and not MOD_IN_USE.\n", dev->name);
+
+ /*
+ * must jump over main_device+aliases
+ * avoid alias devices unregistration so that only
+ * net_alias module manages them
+ */
+#ifdef CONFIG_NET_ALIAS
+ if (dev_base == dev)
+ dev_base = net_alias_nextdev(dev);
+ else
+ {
+ while(d && (net_alias_nextdev(d) != dev)) /* skip aliases */
+ d = net_alias_nextdev(d);
+
+ if (d && (net_alias_nextdev(d) == dev))
+ {
+ /*
+ * Critical: Bypass by consider devices as blocks (maindev+aliases)
+ */
+ net_alias_nextdev_set(d, net_alias_nextdev(dev));
+ }
+#else
+ if (dev_base == dev)
+ dev_base = dev->next;
+ else
+ {
+ while (d && (d->next != dev))
+ d = d->next;
+
+ if (d && (d->next == dev))
+ {
+ d->next = dev->next;
+ }
+#endif
+ else
+ {
+ printk("unregister_netdev: '%s' not found\n", dev->name);
+ restore_flags(flags);
+ return;
+ }
+ }
+ for (i = 0; i < MAX_ETH_CARDS; ++i)
+ {
+ if (ethdev_index[i] == dev)
+ {
+ ethdev_index[i] = NULL;
+ break;
+ }
+ }
+
+ restore_flags(flags);
+
+ /*
+ * You can i.e use a interfaces in a route though it is not up.
+ * We call close_dev (which is changed: it will down a device even if
+ * dev->flags==0 (but it will not call dev->stop if IFF_UP
+ * is not set).
+ * This will call notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev),
+ * dev_mc_discard(dev), ....
+ */
+
+ dev_close(dev);
+}
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c net_init.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/dev/drivers/net/wavelan.p.h b/linux/dev/drivers/net/wavelan.p.h
new file mode 100644
index 0000000..0549844
--- /dev/null
+++ b/linux/dev/drivers/net/wavelan.p.h
@@ -0,0 +1,639 @@
+/*
+ * Wavelan ISA driver
+ *
+ * Jean II - HPLB '96
+ *
+ * Reorganisation and extension of the driver.
+ *
+ * This file contain all definition and declarations necessary for the
+ * wavelan isa driver. This file is a private header, so it should
+ * be included only on wavelan.c !!!
+ */
+
+#ifndef WAVELAN_P_H
+#define WAVELAN_P_H
+
+/************************** DOCUMENTATION **************************/
+/*
+ * This driver provide a Linux interface to the Wavelan ISA hardware
+ * The Wavelan is a product of Lucent ("http://wavelan.netland.nl/").
+ * This division was formerly part of NCR and then AT&T.
+ * Wavelan are also distributed by DEC (RoamAbout), Digital Ocean and
+ * Aironet (Arlan). If you have one of those product, you will need to
+ * make some changes below...
+ *
+ * This driver is still a beta software. A lot of bugs have been corrected,
+ * a lot of functionalities are implemented, the whole appear pretty stable,
+ * but there is still some area of improvement (encryption, performance...).
+ *
+ * To know how to use this driver, read the NET3 HOWTO.
+ * If you want to exploit the many other fonctionalities, look comments
+ * in the code...
+ *
+ * This driver is the result of the effort of many peoples (see below).
+ */
+
+/* ------------------------ SPECIFIC NOTES ------------------------ */
+/*
+ * wavelan.o is darn too big
+ * -------------------------
+ * That's true ! There is a very simple way to reduce the driver
+ * object by 33% (yes !). Comment out the following line :
+ * #include <linux/wireless.h>
+ *
+ * MAC address and hardware detection :
+ * ----------------------------------
+ * The detection code of the wavelan chech that the first 3
+ * octets of the MAC address fit the company code. This type of
+ * detection work well for AT&T cards (because the AT&T code is
+ * hardcoded in wavelan.h), but of course will fail for other
+ * manufacturer.
+ *
+ * If you are sure that your card is derived from the wavelan,
+ * here is the way to configure it :
+ * 1) Get your MAC address
+ * a) With your card utilities (wfreqsel, instconf, ...)
+ * b) With the driver :
+ * o compile the kernel with DEBUG_CONFIG_INFO enabled
+ * o Boot and look the card messages
+ * 2) Set your MAC code (3 octets) in MAC_ADDRESSES[][3] (wavelan.h)
+ * 3) Compile & verify
+ * 4) Send me the MAC code - I will include it in the next version...
+ *
+ * "CU Inactive" message at boot up :
+ * -----------------------------------
+ * It seem that there is some weird timings problems with the
+ * Intel microcontroler. In fact, this message is triggered by a
+ * bad reading of the on board ram the first time we read the
+ * control block. If you ignore this message, all is ok (but in
+ * fact, currently, it reset the wavelan hardware).
+ *
+ * To get rid of that problem, there is two solution. The first
+ * is to add a dummy read of the scb at the end of
+ * wv_82586_config. The second is to add the timers
+ * wv_synchronous_cmd and wv_ack (the udelay just after the
+ * waiting loops - seem that the controler is not totally ready
+ * when it say it is !).
+ *
+ * In the current code, I use the second solution (to be
+ * consistent with the original solution of Bruce Janson).
+ */
+
+/* --------------------- WIRELESS EXTENSIONS --------------------- */
+/*
+ * This driver is the first one to support "wireless extensions".
+ * This set of extensions provide you some way to control the wireless
+ * caracteristics of the hardware in a standard way and support for
+ * applications for taking advantage of it (like Mobile IP).
+ *
+ * You will need to enable the CONFIG_NET_RADIO define in the kernel
+ * configuration to enable the wireless extensions (this is the one
+ * giving access to the radio network device choice).
+ *
+ * It might also be a good idea as well to fetch the wireless tools to
+ * configure the device and play a bit.
+ */
+
+/* ---------------------------- FILES ---------------------------- */
+/*
+ * wavelan.c : The actual code for the driver - C functions
+ *
+ * wavelan.p.h : Private header : local types / vars for the driver
+ *
+ * wavelan.h : Description of the hardware interface & structs
+ *
+ * i82586.h : Description if the Ethernet controler
+ */
+
+/* --------------------------- HISTORY --------------------------- */
+/*
+ * (Made with information in drivers headers. It may not be accurate,
+ * and I garantee nothing except my best effort...)
+ *
+ * The history of the Wavelan drivers is as complicated as history of
+ * the Wavelan itself (NCR -> AT&T -> Lucent).
+ *
+ * All started with Anders Klemets <klemets@paul.rutgers.edu>,
+ * writting a Wavelan ISA driver for the MACH microkernel. Girish
+ * Welling <welling@paul.rutgers.edu> had also worked on it.
+ * Keith Moore modify this for the Pcmcia hardware.
+ *
+ * Robert Morris <rtm@das.harvard.edu> port these two drivers to BSDI
+ * and add specific Pcmcia support (there is currently no equivalent
+ * of the PCMCIA package under BSD...).
+ *
+ * Jim Binkley <jrb@cs.pdx.edu> port both BSDI drivers to freeBSD.
+ *
+ * Bruce Janson <bruce@cs.usyd.edu.au> port the BSDI ISA driver to Linux.
+ *
+ * Anthony D. Joseph <adj@lcs.mit.edu> started modify Bruce driver
+ * (with help of the BSDI PCMCIA driver) for PCMCIA.
+ * Yunzhou Li <yunzhou@strat.iol.unh.edu> finished is work.
+ * Joe Finney <joe@comp.lancs.ac.uk> patched the driver to start
+ * correctly 2.00 cards (2.4 GHz with frequency selection).
+ * David Hinds <dhinds@hyper.stanford.edu> integrated the whole in his
+ * Pcmcia package (+ bug corrections).
+ *
+ * I (Jean Tourrilhes - jt@hplb.hpl.hp.com) then started to make some
+ * patchs to the Pcmcia driver. After, I added code in the ISA driver
+ * for Wireless Extensions and full support of frequency selection
+ * cards. Then, I've done the same to the Pcmcia driver + some
+ * reorganisation. Finally, I came back to the ISA driver to
+ * upgrade it at the same level as the Pcmcia one and reorganise
+ * the code
+ * Loeke Brederveld <lbrederv@wavelan.com> from Lucent has given me
+ * much needed informations on the Wavelan hardware.
+ */
+
+/* The original copyrights and litteratures mention others names and
+ * credits. I don't know what there part in this development was...
+ */
+
+/* By the way : for the copyright & legal stuff :
+ * Almost everybody wrote code under GNU or BSD license (or alike),
+ * and want that their original copyright remain somewhere in the
+ * code (for myself, I go with the GPL).
+ * Nobody want to take responsibility for anything, except the fame...
+ */
+
+/* --------------------------- CREDITS --------------------------- */
+/*
+ * This software was developed as a component of the
+ * Linux operating system.
+ * It is based on other device drivers and information
+ * either written or supplied by:
+ * Ajay Bakre (bakre@paul.rutgers.edu),
+ * Donald Becker (becker@cesdis.gsfc.nasa.gov),
+ * Loeke Brederveld (Loeke.Brederveld@Utrecht.NCR.com),
+ * Brent Elphick <belphick@uwaterloo.ca>,
+ * Anders Klemets (klemets@it.kth.se),
+ * Vladimir V. Kolpakov (w@stier.koenig.ru),
+ * Marc Meertens (Marc.Meertens@Utrecht.NCR.com),
+ * Pauline Middelink (middelin@polyware.iaf.nl),
+ * Robert Morris (rtm@das.harvard.edu),
+ * Jean Tourrilhes (jt@hplb.hpl.hp.com),
+ * Girish Welling (welling@paul.rutgers.edu),
+ * Clark Woodworth <clark@hiway1.exit109.com>
+ * Yongguang Zhang <ygz@isl.hrl.hac.com>...
+ *
+ * Thanks go also to:
+ * James Ashton (jaa101@syseng.anu.edu.au),
+ * Alan Cox (iialan@iiit.swan.ac.uk),
+ * Allan Creighton (allanc@cs.usyd.edu.au),
+ * Matthew Geier (matthew@cs.usyd.edu.au),
+ * Remo di Giovanni (remo@cs.usyd.edu.au),
+ * Eckhard Grah (grah@wrcs1.urz.uni-wuppertal.de),
+ * Vipul Gupta (vgupta@cs.binghamton.edu),
+ * Mark Hagan (mhagan@wtcpost.daytonoh.NCR.COM),
+ * Tim Nicholson (tim@cs.usyd.edu.au),
+ * Ian Parkin (ian@cs.usyd.edu.au),
+ * John Rosenberg (johnr@cs.usyd.edu.au),
+ * George Rossi (george@phm.gov.au),
+ * Arthur Scott (arthur@cs.usyd.edu.au),
+ * Stanislav Sinyagin <stas@isf.ru>
+ * Peter Storey,
+ * for their assistance and advice.
+ *
+ * Additional Credits:
+ *
+ * My developpement has been done under Linux 2.0.x (Debian 1.1) with
+ * an HP Vectra XP/60.
+ *
+ */
+
+/* ------------------------- IMPROVEMENTS ------------------------- */
+/*
+ * I proudly present :
+ *
+ * Changes mades in first pre-release :
+ * ----------------------------------
+ * - Reorganisation of the code, function name change
+ * - Creation of private header (wavelan.p.h)
+ * - Reorganised debug messages
+ * - More comments, history, ...
+ * - mmc_init : configure the PSA if not done
+ * - mmc_init : correct default value of level threshold for pcmcia
+ * - mmc_init : 2.00 detection better code for 2.00 init
+ * - better info at startup
+ * - irq setting (note : this setting is permanent...)
+ * - Watchdog : change strategy (+ solve module removal problems)
+ * - add wireless extensions (ioctl & get_wireless_stats)
+ * get/set nwid/frequency on fly, info for /proc/net/wireless
+ * - More wireless extension : SETSPY and GETSPY
+ * - Make wireless extensions optional
+ * - Private ioctl to set/get quality & level threshold, histogram
+ * - Remove /proc/net/wavelan
+ * - Supress useless stuff from lp (net_local)
+ * - kernel 2.1 support (copy_to/from_user instead of memcpy_to/fromfs)
+ * - Add message level (debug stuff in /var/adm/debug & errors not
+ * displayed at console and still in /var/adm/messages)
+ * - multi device support
+ * - Start fixing the probe (init code)
+ * - More inlines
+ * - man page
+ * - Lot of others minor details & cleanups
+ *
+ * Changes made in second pre-release :
+ * ----------------------------------
+ * - Cleanup init code (probe & module init)
+ * - Better multi device support (module)
+ * - name assignement (module)
+ *
+ * Changes made in third pre-release :
+ * ---------------------------------
+ * - Be more conservative on timers
+ * - Preliminary support for multicast (I still lack some details...)
+ *
+ * Changes made in fourth pre-release :
+ * ----------------------------------
+ * - multicast (revisited and finished)
+ * - Avoid reset in set_multicast_list (a really big hack)
+ * if somebody could apply this code for other i82586 based driver...
+ * - Share on board memory 75% RU / 25% CU (instead of 50/50)
+ *
+ * Changes made for release in 2.1.15 :
+ * ----------------------------------
+ * - Change the detection code for multi manufacturer code support
+ *
+ * Changes made for release in 2.1.17 :
+ * ----------------------------------
+ * - Update to wireless extensions changes
+ * - Silly bug in card initial configuration (psa_conf_status)
+ *
+ * Changes made for release in 2.1.27 & 2.0.30 :
+ * -------------------------------------------
+ * - Small bug in debug code (probably not the last one...)
+ * - Remove extern kerword for wavelan_probe()
+ * - Level threshold is now a standard wireless extension (version 4 !)
+ *
+ * Changes made for release in 2.1.36 :
+ * ----------------------------------
+ * - Encryption setting from Brent Elphick (thanks a lot !)
+ * - 'ioaddr' to 'u_long' for the Alpha (thanks to Stanislav Sinyagin)
+ *
+ * Wishes & dreams :
+ * ---------------
+ * - Roaming
+ */
+
+/***************************** INCLUDES *****************************/
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/stat.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/malloc.h>
+#include <linux/timer.h>
+
+#include <linux/wireless.h> /* Wireless extensions */
+
+/* Wavelan declarations */
+#ifdef MACH
+#include <linuxdev/drivers/net/i82586.h>
+#else
+#include "i82586.h"
+#endif
+#include "wavelan.h"
+
+/****************************** DEBUG ******************************/
+
+#undef DEBUG_MODULE_TRACE /* Module insertion/removal */
+#undef DEBUG_CALLBACK_TRACE /* Calls made by Linux */
+#undef DEBUG_INTERRUPT_TRACE /* Calls to handler */
+#undef DEBUG_INTERRUPT_INFO /* type of interrupt & so on */
+#define DEBUG_INTERRUPT_ERROR /* problems */
+#undef DEBUG_CONFIG_TRACE /* Trace the config functions */
+#undef DEBUG_CONFIG_INFO /* What's going on... */
+#define DEBUG_CONFIG_ERRORS /* Errors on configuration */
+#undef DEBUG_TX_TRACE /* Transmission calls */
+#undef DEBUG_TX_INFO /* Header of the transmited packet */
+#define DEBUG_TX_ERROR /* unexpected conditions */
+#undef DEBUG_RX_TRACE /* Transmission calls */
+#undef DEBUG_RX_INFO /* Header of the transmited packet */
+#define DEBUG_RX_ERROR /* unexpected conditions */
+#undef DEBUG_PACKET_DUMP 16 /* Dump packet on the screen */
+#undef DEBUG_IOCTL_TRACE /* Misc call by Linux */
+#undef DEBUG_IOCTL_INFO /* Various debug info */
+#define DEBUG_IOCTL_ERROR /* What's going wrong */
+#define DEBUG_BASIC_SHOW /* Show basic startup info */
+#undef DEBUG_VERSION_SHOW /* Print version info */
+#undef DEBUG_PSA_SHOW /* Dump psa to screen */
+#undef DEBUG_MMC_SHOW /* Dump mmc to screen */
+#undef DEBUG_SHOW_UNUSED /* Show also unused fields */
+#undef DEBUG_I82586_SHOW /* Show i82586 status */
+#undef DEBUG_DEVICE_SHOW /* Show device parameters */
+
+/* Options : */
+#define USE_PSA_CONFIG /* Use info from the PSA */
+#define IGNORE_NORMAL_XMIT_ERRS /* Don't bother with normal conditions */
+#undef STRUCT_CHECK /* Verify padding of structures */
+#undef PSA_CRC /* Check CRC in PSA */
+#undef OLDIES /* Old code (to redo) */
+#undef RECORD_SNR /* To redo */
+#undef EEPROM_IS_PROTECTED /* Doesn't seem to be necessary */
+#define MULTICAST_AVOID /* Avoid extra multicast (I'm sceptical) */
+
+#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */
+/* Warning : these stuff will slow down the driver... */
+#define WIRELESS_SPY /* Enable spying addresses */
+#undef HISTOGRAM /* Enable histogram of sig level... */
+#endif
+
+/************************ CONSTANTS & MACROS ************************/
+
+#ifdef DEBUG_VERSION_SHOW
+static const char *version = "wavelan.c : v16 (wireless extensions) 17/4/97\n";
+#endif
+
+/* Watchdog temporisation */
+#define WATCHDOG_JIFFIES 32 /* TODO: express in HZ. */
+
+/* Macro to get the number of elements in an array */
+#define NELS(a) (sizeof(a) / sizeof(a[0]))
+
+/* ------------------------ PRIVATE IOCTL ------------------------ */
+
+#define SIOCSIPQTHR SIOCDEVPRIVATE /* Set quality threshold */
+#define SIOCGIPQTHR SIOCDEVPRIVATE + 1 /* Get quality threshold */
+#define SIOCSIPLTHR SIOCDEVPRIVATE + 2 /* Set level threshold */
+#define SIOCGIPLTHR SIOCDEVPRIVATE + 3 /* Get level threshold */
+
+#define SIOCSIPHISTO SIOCDEVPRIVATE + 6 /* Set histogram ranges */
+#define SIOCGIPHISTO SIOCDEVPRIVATE + 7 /* Get histogram values */
+
+/* ----------------------- VERSION SUPPORT ----------------------- */
+
+/* This ugly patch is needed to cope with old version of the kernel */
+#ifndef copy_from_user
+#define copy_from_user memcpy_fromfs
+#define copy_to_user memcpy_tofs
+#endif
+
+/****************************** TYPES ******************************/
+
+/* Shortcuts */
+typedef struct device device;
+typedef struct enet_statistics en_stats;
+typedef struct iw_statistics iw_stats;
+typedef struct iw_quality iw_qual;
+typedef struct iw_freq iw_freq;
+typedef struct net_local net_local;
+typedef struct timer_list timer_list;
+
+/* Basic types */
+typedef u_char mac_addr[WAVELAN_ADDR_SIZE]; /* Hardware address */
+
+/*
+ * Static specific data for the interface.
+ *
+ * For each network interface, Linux keep data in two structure. "device"
+ * keep the generic data (same format for everybody) and "net_local" keep
+ * the additional specific data.
+ * Note that some of this specific data is in fact generic (en_stats, for
+ * example).
+ */
+struct net_local
+{
+ net_local * next; /* Linked list of the devices */
+ device * dev; /* Reverse link... */
+ en_stats stats; /* Ethernet interface statistics */
+ int nresets; /* Number of hw resets */
+ u_char reconfig_82586; /* Need to reconfigure the controler */
+ u_char promiscuous; /* Promiscuous mode */
+ int mc_count; /* Number of multicast addresses */
+ timer_list watchdog; /* To avoid blocking state */
+ u_short hacr; /* Current host interface state */
+
+ int tx_n_in_use;
+ u_short rx_head;
+ u_short rx_last;
+ u_short tx_first_free;
+ u_short tx_first_in_use;
+
+#ifdef WIRELESS_EXT
+ iw_stats wstats; /* Wireless specific stats */
+#endif
+
+#ifdef WIRELESS_SPY
+ int spy_number; /* Number of addresses to spy */
+ mac_addr spy_address[IW_MAX_SPY]; /* The addresses to spy */
+ iw_qual spy_stat[IW_MAX_SPY]; /* Statistics gathered */
+#endif /* WIRELESS_SPY */
+#ifdef HISTOGRAM
+ int his_number; /* Number of intervals */
+ u_char his_range[16]; /* Boundaries of interval ]n-1; n] */
+ u_long his_sum[16]; /* Sum in interval */
+#endif /* HISTOGRAM */
+};
+
+/**************************** PROTOTYPES ****************************/
+
+/* ----------------------- MISC SUBROUTINES ------------------------ */
+static inline unsigned long /* flags */
+ wv_splhi(void); /* Disable interrupts */
+static inline void
+ wv_splx(unsigned long); /* ReEnable interrupts : flags */
+static u_char
+ wv_irq_to_psa(int);
+static int
+ wv_psa_to_irq(u_char);
+/* ------------------- HOST ADAPTER SUBROUTINES ------------------- */
+static inline u_short /* data */
+ hasr_read(u_long); /* Read the host interface : base address */
+static inline void
+ hacr_write(u_long, /* Write to host interface : base address */
+ u_short), /* data */
+ hacr_write_slow(u_long,
+ u_short),
+ set_chan_attn(u_long, /* ioaddr */
+ u_short), /* hacr */
+ wv_hacr_reset(u_long), /* ioaddr */
+ wv_16_off(u_long, /* ioaddr */
+ u_short), /* hacr */
+ wv_16_on(u_long, /* ioaddr */
+ u_short), /* hacr */
+ wv_ints_off(device *),
+ wv_ints_on(device *);
+/* ----------------- MODEM MANAGEMENT SUBROUTINES ----------------- */
+static void
+ psa_read(u_long, /* Read the Parameter Storage Area */
+ u_short, /* hacr */
+ int, /* offset in PSA */
+ u_char *, /* buffer to fill */
+ int), /* size to read */
+ psa_write(u_long, /* Write to the PSA */
+ u_short, /* hacr */
+ int, /* Offset in psa */
+ u_char *, /* Buffer in memory */
+ int); /* Length of buffer */
+static inline void
+ mmc_out(u_long, /* Write 1 byte to the Modem Manag Control */
+ u_short,
+ u_char),
+ mmc_write(u_long, /* Write n bytes to the MMC */
+ u_char,
+ u_char *,
+ int);
+static inline u_char /* Read 1 byte from the MMC */
+ mmc_in(u_long,
+ u_short);
+static inline void
+ mmc_read(u_long, /* Read n bytes from the MMC */
+ u_char,
+ u_char *,
+ int),
+ fee_wait(u_long, /* Wait for frequency EEprom : base address */
+ int, /* Base delay to wait for */
+ int); /* Number of time to wait */
+static void
+ fee_read(u_long, /* Read the frequency EEprom : base address */
+ u_short, /* destination offset */
+ u_short *, /* data buffer */
+ int); /* number of registers */
+/* ---------------------- I82586 SUBROUTINES ----------------------- */
+static /*inline*/ void
+ obram_read(u_long, /* ioaddr */
+ u_short, /* o */
+ u_char *, /* b */
+ int); /* n */
+static inline void
+ obram_write(u_long, /* ioaddr */
+ u_short, /* o */
+ u_char *, /* b */
+ int); /* n */
+static void
+ wv_ack(device *);
+static inline int
+ wv_synchronous_cmd(device *,
+ const char *),
+ wv_config_complete(device *,
+ u_long,
+ net_local *);
+static int
+ wv_complete(device *,
+ u_long,
+ net_local *);
+static inline void
+ wv_82586_reconfig(device *);
+/* ------------------- DEBUG & INFO SUBROUTINES ------------------- */
+#ifdef DEBUG_I82586_SHOW
+static void
+ wv_scb_show(unsigned short);
+#endif
+static inline void
+ wv_init_info(device *); /* display startup info */
+/* ------------------- IOCTL, STATS & RECONFIG ------------------- */
+static en_stats *
+ wavelan_get_stats(device *); /* Give stats /proc/net/dev */
+static void
+ wavelan_set_multicast_list(device *);
+/* ----------------------- PACKET RECEPTION ----------------------- */
+static inline void
+ wv_packet_read(device *, /* Read a packet from a frame */
+ u_short,
+ int),
+ wv_receive(device *); /* Read all packets waiting */
+/* --------------------- PACKET TRANSMISSION --------------------- */
+static inline void
+ wv_packet_write(device *, /* Write a packet to the Tx buffer */
+ void *,
+ short);
+static int
+ wavelan_packet_xmit(struct sk_buff *, /* Send a packet */
+ device *);
+/* -------------------- HARDWARE CONFIGURATION -------------------- */
+static inline int
+ wv_mmc_init(device *), /* Initialize the modem */
+ wv_ru_start(device *), /* Start the i82586 receiver unit */
+ wv_cu_start(device *), /* Start the i82586 command unit */
+ wv_82586_start(device *); /* Start the i82586 */
+static void
+ wv_82586_config(device *); /* Configure the i82586 */
+static inline void
+ wv_82586_stop(device *);
+static int
+ wv_hw_reset(device *), /* Reset the wavelan hardware */
+ wv_check_ioaddr(u_long, /* ioaddr */
+ u_char *); /* mac address (read) */
+/* ---------------------- INTERRUPT HANDLING ---------------------- */
+static void
+ wavelan_interrupt(int, /* Interrupt handler */
+ void *,
+ struct pt_regs *);
+static void
+ wavelan_watchdog(u_long); /* Transmission watchdog */
+/* ------------------- CONFIGURATION CALLBACKS ------------------- */
+static int
+ wavelan_open(device *), /* Open the device */
+ wavelan_close(device *), /* Close the device */
+ wavelan_config(device *); /* Configure one device */
+extern int
+ wavelan_probe(device *); /* See Space.c */
+
+/**************************** VARIABLES ****************************/
+
+/*
+ * This is the root of the linked list of wavelan drivers
+ * It is use to verify that we don't reuse the same base address
+ * for two differents drivers and to make the cleanup when
+ * removing the module.
+ */
+static net_local * wavelan_list = (net_local *) NULL;
+
+/*
+ * This table is used to translate the psa value to irq number
+ * and vice versa...
+ */
+static u_char irqvals[] =
+{
+ 0, 0, 0, 0x01,
+ 0x02, 0x04, 0, 0x08,
+ 0, 0, 0x10, 0x20,
+ 0x40, 0, 0, 0x80,
+};
+
+/*
+ * Table of the available i/o address (base address) for wavelan
+ */
+static unsigned short iobase[] =
+{
+#if 0
+ /* Leave out 0x3C0 for now -- seems to clash with some video
+ * controllers.
+ * Leave out the others too -- we will always use 0x390 and leave
+ * 0x300 for the Ethernet device.
+ * Jean II : 0x3E0 is really fine as well...
+ */
+ 0x300, 0x390, 0x3E0, 0x3C0
+#endif /* 0 */
+ 0x390, 0x3E0
+};
+
+#ifdef MODULE
+/* Name of the devices (memory allocation) */
+static char devname[4][IFNAMSIZ] = { "", "", "", "" };
+
+/* Parameters set by insmod */
+static int io[4] = { 0, 0, 0, 0 };
+static int irq[4] = { 0, 0, 0, 0 };
+static char * name[4] = { devname[0], devname[1], devname[2], devname[3] };
+#endif /* MODULE */
+
+#endif /* WAVELAN_P_H */
diff --git a/linux/dev/drivers/scsi/eata_dma.c b/linux/dev/drivers/scsi/eata_dma.c
new file mode 100644
index 0000000..e902ea1
--- /dev/null
+++ b/linux/dev/drivers/scsi/eata_dma.c
@@ -0,0 +1,1607 @@
+/************************************************************
+ * *
+ * Linux EATA SCSI driver *
+ * *
+ * based on the CAM document CAM/89-004 rev. 2.0c, *
+ * DPT's driver kit, some internal documents and source, *
+ * and several other Linux scsi drivers and kernel docs. *
+ * *
+ * The driver currently: *
+ * -supports all ISA based EATA-DMA boards *
+ * like PM2011, PM2021, PM2041, PM3021 *
+ * -supports all EISA based EATA-DMA boards *
+ * like PM2012B, PM2022, PM2122, PM2322, PM2042, *
+ * PM3122, PM3222, PM3332 *
+ * -supports all PCI based EATA-DMA boards *
+ * like PM2024, PM2124, PM2044, PM2144, PM3224, *
+ * PM3334 *
+ * -supports the Wide, Ultra Wide and Differential *
+ * versions of the boards *
+ * -supports multiple HBAs with & without IRQ sharing *
+ * -supports all SCSI channels on multi channel boards *
+ * -supports ix86 and MIPS, untested on ALPHA *
+ * -needs identical IDs on all channels of a HBA *
+ * -can be loaded as module *
+ * -displays statistical and hardware information *
+ * in /proc/scsi/eata_dma *
+ * -provides rudimentary latency measurement *
+ * possibilities via /proc/scsi/eata_dma/<hostnum> *
+ * *
+ * (c)1993-96 Michael Neuffer *
+ * mike@i-Connect.Net *
+ * neuffer@mail.uni-mainz.de *
+ * *
+ * This program is free software; you can redistribute it *
+ * and/or modify it under the terms of the GNU General *
+ * Public License as published by the Free Software *
+ * Foundation; either version 2 of the License, or *
+ * (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be *
+ * useful, but WITHOUT ANY WARRANTY; without even the *
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A *
+ * PARTICULAR PURPOSE. See the GNU General Public License *
+ * for more details. *
+ * *
+ * You should have received a copy of the GNU General *
+ * Public License along with this kernel; if not, write to *
+ * the Free Software Foundation, Inc., 675 Mass Ave, *
+ * Cambridge, MA 02139, USA. *
+ * *
+ * I have to thank DPT for their excellent support. I took *
+ * me almost a year and a stopover at their HQ, on my first *
+ * trip to the USA, to get it, but since then they've been *
+ * very helpful and tried to give me all the infos and *
+ * support I need. *
+ * *
+ * Thanks also to Simon Shapiro, Greg Hosler and Mike *
+ * Jagdis who did a lot of testing and found quite a number *
+ * of bugs during the development. *
+ ************************************************************
+ * last change: 96/10/21 OS: Linux 2.0.23 *
+ ************************************************************/
+
+/* Look in eata_dma.h for configuration and revision information */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/in.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <asm/types.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#ifdef MACH
+#define flush_cache_all()
+#else
+#include <asm/pgtable.h>
+#endif
+#ifdef __mips__
+#include <asm/cachectl.h>
+#endif
+#include <linux/blk.h>
+#include "scsi.h"
+#include "sd.h"
+#include "hosts.h"
+#include "eata_dma.h"
+#include "eata_dma_proc.h"
+
+#include <linux/stat.h>
+#include <linux/config.h> /* for CONFIG_PCI */
+
+struct proc_dir_entry proc_scsi_eata_dma = {
+ PROC_SCSI_EATA, 8, "eata_dma",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+static u32 ISAbases[] =
+{0x1F0, 0x170, 0x330, 0x230};
+static unchar EISAbases[] =
+{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
+static uint registered_HBAs = 0;
+static struct Scsi_Host *last_HBA = NULL;
+static struct Scsi_Host *first_HBA = NULL;
+static unchar reg_IRQ[] =
+{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+static unchar reg_IRQL[] =
+{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+static struct eata_sp *status = 0; /* Statuspacket array */
+static void *dma_scratch = 0;
+
+static struct eata_register *fake_int_base;
+static int fake_int_result;
+static int fake_int_happened;
+
+static ulong int_counter = 0;
+static ulong queue_counter = 0;
+
+void eata_scsi_done (Scsi_Cmnd * scmd)
+{
+ scmd->request.rq_status = RQ_SCSI_DONE;
+
+ if (scmd->request.sem != NULL)
+ up(scmd->request.sem);
+
+ return;
+}
+
+void eata_fake_int_handler(s32 irq, void *dev_id, struct pt_regs * regs)
+{
+ fake_int_result = inb((ulong)fake_int_base + HA_RSTATUS);
+ fake_int_happened = TRUE;
+ DBG(DBG_INTR3, printk("eata_fake_int_handler called irq%d base %p"
+ " res %#x\n", irq, fake_int_base, fake_int_result));
+ return;
+}
+
+#include "eata_dma_proc.c"
+
+#ifdef MODULE
+int eata_release(struct Scsi_Host *sh)
+{
+ uint i;
+ if (sh->irq && reg_IRQ[sh->irq] == 1) free_irq(sh->irq, NULL);
+ else reg_IRQ[sh->irq]--;
+
+ scsi_init_free((void *)status, 512);
+ scsi_init_free((void *)dma_scratch - 4, 1024);
+ for (i = 0; i < sh->can_queue; i++){ /* Free all SG arrays */
+ if(SD(sh)->ccb[i].sg_list != NULL)
+ scsi_init_free((void *) SD(sh)->ccb[i].sg_list,
+ sh->sg_tablesize * sizeof(struct eata_sg_list));
+ }
+
+ if (SD(sh)->channel == 0) {
+ if (sh->dma_channel != BUSMASTER) free_dma(sh->dma_channel);
+ if (sh->io_port && sh->n_io_port)
+ release_region(sh->io_port, sh->n_io_port);
+ }
+ return(TRUE);
+}
+#endif
+
+
+inline void eata_latency_in(struct eata_ccb *cp, hostdata *hd)
+{
+ uint time;
+ time = jiffies - cp->timestamp;
+ if(hd->all_lat[1] > time)
+ hd->all_lat[1] = time;
+ if(hd->all_lat[2] < time)
+ hd->all_lat[2] = time;
+ hd->all_lat[3] += time;
+ hd->all_lat[0]++;
+ if((cp->rw_latency) == WRITE) { /* was WRITE */
+ if(hd->writes_lat[cp->sizeindex][1] > time)
+ hd->writes_lat[cp->sizeindex][1] = time;
+ if(hd->writes_lat[cp->sizeindex][2] < time)
+ hd->writes_lat[cp->sizeindex][2] = time;
+ hd->writes_lat[cp->sizeindex][3] += time;
+ hd->writes_lat[cp->sizeindex][0]++;
+ } else if((cp->rw_latency) == READ) {
+ if(hd->reads_lat[cp->sizeindex][1] > time)
+ hd->reads_lat[cp->sizeindex][1] = time;
+ if(hd->reads_lat[cp->sizeindex][2] < time)
+ hd->reads_lat[cp->sizeindex][2] = time;
+ hd->reads_lat[cp->sizeindex][3] += time;
+ hd->reads_lat[cp->sizeindex][0]++;
+ }
+}
+
+inline void eata_latency_out(struct eata_ccb *cp, Scsi_Cmnd *cmd)
+{
+ int x, z;
+ short *sho;
+ long *lon;
+ x = 0; /* just to keep GCC quiet */
+ cp->timestamp = jiffies; /* For latency measurements */
+ switch(cmd->cmnd[0]) {
+ case WRITE_6:
+ x = cmd->cmnd[4]/2;
+ cp->rw_latency = WRITE;
+ break;
+ case READ_6:
+ x = cmd->cmnd[4]/2;
+ cp->rw_latency = READ;
+ break;
+ case WRITE_10:
+ sho = (short *) &cmd->cmnd[7];
+ x = ntohs(*sho)/2;
+ cp->rw_latency = WRITE;
+ break;
+ case READ_10:
+ sho = (short *) &cmd->cmnd[7];
+ x = ntohs(*sho)/2;
+ cp->rw_latency = READ;
+ break;
+ case WRITE_12:
+ lon = (long *) &cmd->cmnd[6];
+ x = ntohl(*lon)/2;
+ cp->rw_latency = WRITE;
+ break;
+ case READ_12:
+ lon = (long *) &cmd->cmnd[6];
+ x = ntohl(*lon)/2;
+ cp->rw_latency = READ;
+ break;
+ default:
+ cp->rw_latency = OTHER;
+ break;
+ }
+ if (cmd->cmnd[0] == WRITE_6 || cmd->cmnd[0] == WRITE_10 ||
+ cmd->cmnd[0] == WRITE_12 || cmd->cmnd[0] == READ_6 ||
+ cmd->cmnd[0] == READ_10 || cmd->cmnd[0] == READ_12) {
+ for(z = 0; (x > (1 << z)) && (z <= 11); z++)
+ /* nothing */;
+ cp->sizeindex = z;
+ }
+}
+
+
+void eata_int_handler(int irq, void *dev_id, struct pt_regs * regs)
+{
+ uint i, result = 0;
+ uint hba_stat, scsi_stat, eata_stat;
+ Scsi_Cmnd *cmd;
+ struct eata_ccb *ccb;
+ struct eata_sp *sp;
+ uint base;
+ uint x;
+ struct Scsi_Host *sh;
+
+ for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->next) {
+ if (sh->irq != irq)
+ continue;
+
+ while(inb((uint)sh->base + HA_RAUXSTAT) & HA_AIRQ) {
+
+ int_counter++;
+
+ sp = &SD(sh)->sp;
+#ifdef __mips__
+ sys_cacheflush(sp, sizeof(struct eata_sp), 2);
+#endif
+ ccb = sp->ccb;
+
+ if(ccb == NULL) {
+ eata_stat = inb((uint)sh->base + HA_RSTATUS);
+ printk("eata_dma: int_handler, Spurious IRQ %d "
+ "received. CCB pointer not set.\n", irq);
+ break;
+ }
+
+ cmd = ccb->cmd;
+ base = (uint) cmd->host->base;
+ hba_stat = sp->hba_stat;
+
+ scsi_stat = (sp->scsi_stat >> 1) & 0x1f;
+
+ if (sp->EOC == FALSE) {
+ eata_stat = inb(base + HA_RSTATUS);
+ printk(KERN_WARNING "eata_dma: int_handler, board: %x cmd %lx "
+ "returned unfinished.\n"
+ "EATA: %x HBA: %x SCSI: %x spadr %lx spadrirq %lx, "
+ "irq%d\n", base, (long)ccb, eata_stat, hba_stat,
+ scsi_stat,(long)&status, (long)&status[irq], irq);
+ cmd->result = DID_ERROR << 16;
+ ccb->status = FREE;
+ cmd->scsi_done(cmd);
+ break;
+ }
+
+ sp->EOC = FALSE; /* Clean out this flag */
+
+ if (ccb->status == LOCKED || ccb->status == RESET) {
+ printk("eata_dma: int_handler, reseted command pid %ld returned"
+ "\n", cmd->pid);
+ DBG(DBG_INTR && DBG_DELAY, DELAY(1));
+ }
+
+ eata_stat = inb(base + HA_RSTATUS);
+ DBG(DBG_INTR, printk("IRQ %d received, base %#.4x, pid %ld, "
+ "target: %x, lun: %x, ea_s: %#.2x, hba_s: "
+ "%#.2x \n", irq, base, cmd->pid, cmd->target,
+ cmd->lun, eata_stat, hba_stat));
+
+ switch (hba_stat) {
+ case HA_NO_ERROR: /* NO Error */
+ if(HD(cmd)->do_latency == TRUE && ccb->timestamp)
+ eata_latency_in(ccb, HD(cmd));
+ result = DID_OK << 16;
+ break;
+ case HA_ERR_SEL_TO: /* Selection Timeout */
+ case HA_ERR_CMD_TO: /* Command Timeout */
+ result = DID_TIME_OUT << 16;
+ break;
+ case HA_BUS_RESET: /* SCSI Bus Reset Received */
+ result = DID_RESET << 16;
+ DBG(DBG_STATUS, printk(KERN_WARNING "scsi%d: BUS RESET "
+ "received on cmd %ld\n",
+ HD(cmd)->HBA_number, cmd->pid));
+ break;
+ case HA_INIT_POWERUP: /* Initial Controller Power-up */
+ if (cmd->device->type != TYPE_TAPE)
+ result = DID_BUS_BUSY << 16;
+ else
+ result = DID_ERROR << 16;
+
+ for (i = 0; i < MAXTARGET; i++)
+ DBG(DBG_STATUS, printk(KERN_DEBUG "scsi%d: cmd pid %ld "
+ "returned with INIT_POWERUP\n",
+ HD(cmd)->HBA_number, cmd->pid));
+ break;
+ case HA_CP_ABORT_NA:
+ case HA_CP_ABORTED:
+ result = DID_ABORT << 16;
+ DBG(DBG_STATUS, printk(KERN_WARNING "scsi%d: aborted cmd "
+ "returned\n", HD(cmd)->HBA_number));
+ break;
+ case HA_CP_RESET_NA:
+ case HA_CP_RESET:
+ HD(cmd)->resetlevel[cmd->channel] = 0;
+ result = DID_RESET << 16;
+ DBG(DBG_STATUS, printk(KERN_WARNING "scsi%d: reseted cmd "
+ "pid %ldreturned\n",
+ HD(cmd)->HBA_number, cmd->pid));
+ case HA_SCSI_HUNG: /* SCSI Hung */
+ printk(KERN_ERR "scsi%d: SCSI hung\n", HD(cmd)->HBA_number);
+ result = DID_ERROR << 16;
+ break;
+ case HA_RSENSE_FAIL: /* Auto Request-Sense Failed */
+ DBG(DBG_STATUS, printk(KERN_ERR "scsi%d: Auto Request Sense "
+ "Failed\n", HD(cmd)->HBA_number));
+ result = DID_ERROR << 16;
+ break;
+ case HA_UNX_BUSPHASE: /* Unexpected Bus Phase */
+ case HA_UNX_BUS_FREE: /* Unexpected Bus Free */
+ case HA_BUS_PARITY: /* Bus Parity Error */
+ case HA_UNX_MSGRJCT: /* Unexpected Message Reject */
+ case HA_RESET_STUCK: /* SCSI Bus Reset Stuck */
+ case HA_PARITY_ERR: /* Controller Ram Parity */
+ default:
+ result = DID_ERROR << 16;
+ break;
+ }
+ cmd->result = result | (scsi_stat << 1);
+
+#if DBG_INTR2
+ if (scsi_stat || result || hba_stat || eata_stat != 0x50
+ || cmd->scsi_done == NULL || cmd->device->id == 7)
+ printk("HBA: %d, channel %d, id: %d, lun %d, pid %ld:\n"
+ "eata_stat %#x, hba_stat %#.2x, scsi_stat %#.2x, "
+ "sense_key: %#x, result: %#.8x\n", x,
+ cmd->device->channel, cmd->device->id, cmd->device->lun,
+ cmd->pid, eata_stat, hba_stat, scsi_stat,
+ cmd->sense_buffer[2] & 0xf, cmd->result);
+ DBG(DBG_INTR&&DBG_DELAY,DELAY(1));
+#endif
+
+ ccb->status = FREE; /* now we can release the slot */
+ cmd->scsi_done(cmd);
+ }
+ }
+
+ return;
+}
+
+inline int eata_send_command(u32 addr, u32 base, u8 command)
+{
+ long loop = R_LIMIT;
+
+ while (inb(base + HA_RAUXSTAT) & HA_ABUSY)
+ if (--loop == 0)
+ return(FALSE);
+
+ if(addr != (u32) NULL)
+ addr = virt_to_bus((void *)addr);
+
+ /*
+ * This is overkill.....but the MIPSen seem to need this
+ * and it will be optimized away for i86 and ALPHA machines.
+ */
+ flush_cache_all();
+
+ /* And now the address in nice little byte chunks */
+#ifdef __LITTLE_ENDIAN
+ outb(addr, base + HA_WDMAADDR);
+ outb(addr >> 8, base + HA_WDMAADDR + 1);
+ outb(addr >> 16, base + HA_WDMAADDR + 2);
+ outb(addr >> 24, base + HA_WDMAADDR + 3);
+#else
+ outb(addr >> 24, base + HA_WDMAADDR);
+ outb(addr >> 16, base + HA_WDMAADDR + 1);
+ outb(addr >> 8, base + HA_WDMAADDR + 2);
+ outb(addr, base + HA_WDMAADDR + 3);
+#endif
+ outb(command, base + HA_WCOMMAND);
+ return(TRUE);
+}
+
+inline int eata_send_immediate(u32 base, u32 addr, u8 ifc, u8 code, u8 code2)
+{
+ if(addr != (u32) NULL)
+ addr = virt_to_bus((void *)addr);
+
+ /*
+ * This is overkill.....but the MIPSen seem to need this
+ * and it will be optimized away for i86 and ALPHA machines.
+ */
+ flush_cache_all();
+
+ outb(0x0, base + HA_WDMAADDR - 1);
+ if(addr){
+#ifdef __LITTLE_ENDIAN
+ outb(addr, base + HA_WDMAADDR);
+ outb(addr >> 8, base + HA_WDMAADDR + 1);
+ outb(addr >> 16, base + HA_WDMAADDR + 2);
+ outb(addr >> 24, base + HA_WDMAADDR + 3);
+#else
+ outb(addr >> 24, base + HA_WDMAADDR);
+ outb(addr >> 16, base + HA_WDMAADDR + 1);
+ outb(addr >> 8, base + HA_WDMAADDR + 2);
+ outb(addr, base + HA_WDMAADDR + 3);
+#endif
+ } else {
+ outb(0x0, base + HA_WDMAADDR);
+ outb(0x0, base + HA_WDMAADDR + 1);
+ outb(code2, base + HA_WCODE2);
+ outb(code, base + HA_WCODE);
+ }
+
+ outb(ifc, base + HA_WIFC);
+ outb(EATA_CMD_IMMEDIATE, base + HA_WCOMMAND);
+ return(TRUE);
+}
+
+int eata_queue(Scsi_Cmnd * cmd, void (* done) (Scsi_Cmnd *))
+{
+ unsigned int i, x, y;
+ ulong flags;
+ hostdata *hd;
+ struct Scsi_Host *sh;
+ struct eata_ccb *ccb;
+ struct scatterlist *sl;
+
+
+ save_flags(flags);
+ cli();
+
+#if 0
+ for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->next) {
+ if(inb((uint)sh->base + HA_RAUXSTAT) & HA_AIRQ) {
+ printk("eata_dma: scsi%d interrupt pending in eata_queue.\n"
+ " Calling interrupt handler.\n", sh->host_no);
+ eata_int_handler(sh->irq, 0, 0);
+ }
+ }
+#endif
+
+ queue_counter++;
+
+ hd = HD(cmd);
+ sh = cmd->host;
+
+ if (cmd->cmnd[0] == REQUEST_SENSE && cmd->sense_buffer[0] != 0) {
+ DBG(DBG_REQSENSE, printk(KERN_DEBUG "Tried to REQUEST SENSE\n"));
+ cmd->result = DID_OK << 16;
+ done(cmd);
+
+ return(0);
+ }
+
+ /* check for free slot */
+ for (y = hd->last_ccb + 1, x = 0; x < sh->can_queue; x++, y++) {
+ if (y >= sh->can_queue)
+ y = 0;
+ if (hd->ccb[y].status == FREE)
+ break;
+ }
+
+ hd->last_ccb = y;
+
+ if (x >= sh->can_queue) {
+ cmd->result = DID_BUS_BUSY << 16;
+ DBG(DBG_QUEUE && DBG_ABNORM,
+ printk(KERN_CRIT "eata_queue pid %ld, HBA QUEUE FULL..., "
+ "returning DID_BUS_BUSY\n", cmd->pid));
+ done(cmd);
+ restore_flags(flags);
+ return(0);
+ }
+ ccb = &hd->ccb[y];
+
+ memset(ccb, 0, sizeof(struct eata_ccb) - sizeof(struct eata_sg_list *));
+
+ ccb->status = USED; /* claim free slot */
+
+ restore_flags(flags);
+
+ DBG(DBG_QUEUE, printk("eata_queue pid %ld, target: %x, lun: %x, y %d\n",
+ cmd->pid, cmd->target, cmd->lun, y));
+ DBG(DBG_QUEUE && DBG_DELAY, DELAY(1));
+
+ if(hd->do_latency == TRUE)
+ eata_latency_out(ccb, cmd);
+
+ cmd->scsi_done = (void *)done;
+
+ switch (cmd->cmnd[0]) {
+ case CHANGE_DEFINITION: case COMPARE: case COPY:
+ case COPY_VERIFY: case LOG_SELECT: case MODE_SELECT:
+ case MODE_SELECT_10: case SEND_DIAGNOSTIC: case WRITE_BUFFER:
+ case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE:
+ case SEARCH_EQUAL: case SEARCH_HIGH: case SEARCH_LOW:
+ case WRITE_6: case WRITE_10: case WRITE_VERIFY:
+ case UPDATE_BLOCK: case WRITE_LONG: case WRITE_SAME:
+ case SEARCH_HIGH_12: case SEARCH_EQUAL_12: case SEARCH_LOW_12:
+ case WRITE_12: case WRITE_VERIFY_12: case SET_WINDOW:
+ case MEDIUM_SCAN: case SEND_VOLUME_TAG:
+ case 0xea: /* alternate number for WRITE LONG */
+ ccb->DataOut = TRUE; /* Output mode */
+ break;
+ case TEST_UNIT_READY:
+ default:
+ ccb->DataIn = TRUE; /* Input mode */
+ }
+
+ /* FIXME: This will have to be changed once the midlevel driver
+ * allows different HBA IDs on every channel.
+ */
+ if (cmd->target == sh->this_id)
+ ccb->Interpret = TRUE; /* Interpret command */
+
+ if (cmd->use_sg) {
+ ccb->scatter = TRUE; /* SG mode */
+ if (ccb->sg_list == NULL) {
+ ccb->sg_list = kmalloc(sh->sg_tablesize * sizeof(struct eata_sg_list),
+ GFP_ATOMIC | GFP_DMA);
+ }
+ if (ccb->sg_list == NULL)
+ panic("eata_dma: Run out of DMA memory for SG lists !\n");
+ ccb->cp_dataDMA = htonl(virt_to_bus(ccb->sg_list));
+
+ ccb->cp_datalen = htonl(cmd->use_sg * sizeof(struct eata_sg_list));
+ sl=(struct scatterlist *)cmd->request_buffer;
+ for(i = 0; i < cmd->use_sg; i++, sl++){
+ ccb->sg_list[i].data = htonl(virt_to_bus(sl->address));
+ ccb->sg_list[i].len = htonl((u32) sl->length);
+ }
+ } else {
+ ccb->scatter = FALSE;
+ ccb->cp_datalen = htonl(cmd->request_bufflen);
+ ccb->cp_dataDMA = htonl(virt_to_bus(cmd->request_buffer));
+ }
+
+ ccb->Auto_Req_Sen = TRUE;
+ ccb->cp_reqDMA = htonl(virt_to_bus(cmd->sense_buffer));
+ ccb->reqlen = sizeof(cmd->sense_buffer);
+
+ ccb->cp_id = cmd->target;
+ ccb->cp_channel = cmd->channel;
+ ccb->cp_lun = cmd->lun;
+ ccb->cp_dispri = TRUE;
+ ccb->cp_identify = TRUE;
+ memcpy(ccb->cp_cdb, cmd->cmnd, cmd->cmd_len);
+
+ ccb->cp_statDMA = htonl(virt_to_bus(&(hd->sp)));
+
+ ccb->cp_viraddr = ccb; /* This will be passed thru, so we don't need to
+ * convert it */
+ ccb->cmd = cmd;
+ cmd->host_scribble = (char *)&hd->ccb[y];
+
+ if(eata_send_command((u32) ccb, (u32) sh->base, EATA_CMD_DMA_SEND_CP) == FALSE) {
+ cmd->result = DID_BUS_BUSY << 16;
+ DBG(DBG_QUEUE && DBG_ABNORM,
+ printk("eata_queue target %d, pid %ld, HBA busy, "
+ "returning DID_BUS_BUSY\n",cmd->target, cmd->pid));
+ ccb->status = FREE;
+ done(cmd);
+ return(0);
+ }
+ DBG(DBG_QUEUE, printk("Queued base %#.4x pid: %ld target: %x lun: %x "
+ "slot %d irq %d\n", (s32)sh->base, cmd->pid,
+ cmd->target, cmd->lun, y, sh->irq));
+ DBG(DBG_QUEUE && DBG_DELAY, DELAY(1));
+
+ return(0);
+}
+
+
+int eata_abort(Scsi_Cmnd * cmd)
+{
+ ulong loop = HZ / 2;
+ ulong flags;
+ int x;
+ struct Scsi_Host *sh;
+
+ save_flags(flags);
+ cli();
+
+ DBG(DBG_ABNORM, printk("eata_abort called pid: %ld target: %x lun: %x"
+ " reason %x\n", cmd->pid, cmd->target, cmd->lun,
+ cmd->abort_reason));
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+
+ /* Some interrupt controllers seem to loose interrupts */
+ for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->next) {
+ if(inb((uint)sh->base + HA_RAUXSTAT) & HA_AIRQ) {
+ printk("eata_dma: scsi%d interrupt pending in eata_abort.\n"
+ " Calling interrupt handler.\n", sh->host_no);
+ eata_int_handler(sh->irq, 0, 0);
+ }
+ }
+
+ while (inb((u32)(cmd->host->base) + HA_RAUXSTAT) & HA_ABUSY) {
+ if (--loop == 0) {
+ printk("eata_dma: abort, timeout error.\n");
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ restore_flags(flags);
+ return (SCSI_ABORT_ERROR);
+ }
+ }
+ if (CD(cmd)->status == RESET) {
+ printk("eata_dma: abort, command reset error.\n");
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ restore_flags(flags);
+ return (SCSI_ABORT_ERROR);
+ }
+ if (CD(cmd)->status == LOCKED) {
+ DBG(DBG_ABNORM, printk("eata_dma: abort, queue slot locked.\n"));
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ restore_flags(flags);
+ return (SCSI_ABORT_NOT_RUNNING);
+ }
+ if (CD(cmd)->status == USED) {
+ DBG(DBG_ABNORM, printk("Returning: SCSI_ABORT_BUSY\n"));
+ restore_flags(flags);
+ return (SCSI_ABORT_BUSY); /* SNOOZE */
+ }
+ if (CD(cmd)->status == FREE) {
+ DBG(DBG_ABNORM, printk("Returning: SCSI_ABORT_NOT_RUNNING\n"));
+ restore_flags(flags);
+ return (SCSI_ABORT_NOT_RUNNING);
+ }
+ restore_flags(flags);
+ panic("eata_dma: abort: invalid slot status\n");
+}
+
+int eata_reset(Scsi_Cmnd * cmd, unsigned int resetflags)
+{
+ uint x;
+ ulong loop = loops_per_sec / 3;
+ ulong flags;
+ unchar success = FALSE;
+ Scsi_Cmnd *sp;
+ struct Scsi_Host *sh;
+
+ save_flags(flags);
+ cli();
+
+ DBG(DBG_ABNORM, printk("eata_reset called pid:%ld target: %x lun: %x"
+ " reason %x\n", cmd->pid, cmd->target, cmd->lun,
+ cmd->abort_reason));
+
+ for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->next) {
+ if(inb((uint)sh->base + HA_RAUXSTAT) & HA_AIRQ) {
+ printk("eata_dma: scsi%d interrupt pending in eata_reset.\n"
+ " Calling interrupt handler.\n", sh->host_no);
+ eata_int_handler(sh->irq, 0, 0);
+ }
+ }
+
+ if (HD(cmd)->state == RESET) {
+ printk("eata_reset: exit, already in reset.\n");
+ restore_flags(flags);
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ return (SCSI_RESET_ERROR);
+ }
+
+ while (inb((u32)(cmd->host->base) + HA_RAUXSTAT) & HA_ABUSY)
+ if (--loop == 0) {
+ printk("eata_reset: exit, timeout error.\n");
+ restore_flags(flags);
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ return (SCSI_RESET_ERROR);
+ }
+
+ for (x = 0; x < cmd->host->can_queue; x++) {
+ if (HD(cmd)->ccb[x].status == FREE)
+ continue;
+
+ if (HD(cmd)->ccb[x].status == LOCKED) {
+ HD(cmd)->ccb[x].status = FREE;
+ printk("eata_reset: locked slot %d forced free.\n", x);
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ continue;
+ }
+
+
+ sp = HD(cmd)->ccb[x].cmd;
+ HD(cmd)->ccb[x].status = RESET;
+
+ if (sp == NULL)
+ panic("eata_reset: slot %d, sp==NULL.\n", x);
+
+ printk("eata_reset: slot %d in reset, pid %ld.\n", x, sp->pid);
+
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+
+ if (sp == cmd)
+ success = TRUE;
+ }
+
+ /* hard reset the HBA */
+ inb((u32) (cmd->host->base) + HA_RSTATUS); /* This might cause trouble */
+ eata_send_command(0, (u32) cmd->host->base, EATA_CMD_RESET);
+
+ HD(cmd)->state = RESET;
+
+ DBG(DBG_ABNORM, printk("eata_reset: board reset done, enabling "
+ "interrupts.\n"));
+
+ DELAY(2); /* In theorie we should get interrupts and set free all
+ * used queueslots */
+
+ DBG(DBG_ABNORM, printk("eata_reset: interrupts disabled again.\n"));
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+
+ for (x = 0; x < cmd->host->can_queue; x++) {
+
+ /* Skip slots already set free by interrupt and those that
+ * are still LOCKED from the last reset */
+ if (HD(cmd)->ccb[x].status != RESET)
+ continue;
+
+ sp = HD(cmd)->ccb[x].cmd;
+ sp->result = DID_RESET << 16;
+
+ /* This mailbox is still waiting for its interrupt */
+ HD(cmd)->ccb[x].status = LOCKED;
+
+ printk("eata_reset: slot %d locked, DID_RESET, pid %ld done.\n",
+ x, sp->pid);
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+
+ sp->scsi_done(sp);
+ }
+
+ HD(cmd)->state = FALSE;
+ restore_flags(flags);
+
+ if (success) {
+ DBG(DBG_ABNORM, printk("eata_reset: exit, pending.\n"));
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ return (SCSI_RESET_PENDING);
+ } else {
+ DBG(DBG_ABNORM, printk("eata_reset: exit, wakeup.\n"));
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ return (SCSI_RESET_PUNT);
+ }
+}
+
+/* Here we try to determine the optimum queue depth for
+ * each attached device.
+ *
+ * At the moment the algorithm is rather simple
+ */
+static void eata_select_queue_depths(struct Scsi_Host *host,
+ Scsi_Device *devicelist)
+{
+ Scsi_Device *device;
+ int devcount = 0;
+ int factor = 0;
+
+#if CRIPPLE_QUEUE
+ for(device = devicelist; device != NULL; device = device->next) {
+ if(device->host == host)
+ device->queue_depth = 2;
+ }
+#else
+ /* First we do a sample run go find out what we have */
+ for(device = devicelist; device != NULL; device = device->next) {
+ if (device->host == host) {
+ devcount++;
+ switch(device->type) {
+ case TYPE_DISK:
+ case TYPE_MOD:
+ factor += TYPE_DISK_QUEUE;
+ break;
+ case TYPE_TAPE:
+ factor += TYPE_TAPE_QUEUE;
+ break;
+ case TYPE_WORM:
+ case TYPE_ROM:
+ factor += TYPE_ROM_QUEUE;
+ break;
+ case TYPE_PROCESSOR:
+ case TYPE_SCANNER:
+ default:
+ factor += TYPE_OTHER_QUEUE;
+ break;
+ }
+ }
+ }
+
+ DBG(DBG_REGISTER, printk(KERN_DEBUG "scsi%d: needed queueslots %d\n",
+ host->host_no, factor));
+
+ if(factor == 0) /* We don't want to get a DIV BY ZERO error */
+ factor = 1;
+
+ factor = (SD(host)->queuesize * 10) / factor;
+
+ DBG(DBG_REGISTER, printk(KERN_DEBUG "scsi%d: using factor %dE-1\n",
+ host->host_no, factor));
+
+ /* Now that have the factor we can set the individual queuesizes */
+ for(device = devicelist; device != NULL; device = device->next) {
+ if(device->host == host) {
+ if(SD(device->host)->bustype != IS_ISA){
+ switch(device->type) {
+ case TYPE_DISK:
+ case TYPE_MOD:
+ device->queue_depth = (TYPE_DISK_QUEUE * factor) / 10;
+ break;
+ case TYPE_TAPE:
+ device->queue_depth = (TYPE_TAPE_QUEUE * factor) / 10;
+ break;
+ case TYPE_WORM:
+ case TYPE_ROM:
+ device->queue_depth = (TYPE_ROM_QUEUE * factor) / 10;
+ break;
+ case TYPE_PROCESSOR:
+ case TYPE_SCANNER:
+ default:
+ device->queue_depth = (TYPE_OTHER_QUEUE * factor) / 10;
+ break;
+ }
+ } else /* ISA forces us to limit the queue depth because of the
+ * bounce buffer memory overhead. I know this is cruel */
+ device->queue_depth = 2;
+
+ /*
+ * It showed that we need to set an upper limit of commands
+ * we can allow to queue for a single device on the bus.
+ * If we get above that limit, the broken midlevel SCSI code
+ * will produce bogus timeouts and aborts en masse. :-(
+ */
+ if(device->queue_depth > UPPER_DEVICE_QUEUE_LIMIT)
+ device->queue_depth = UPPER_DEVICE_QUEUE_LIMIT;
+ if(device->queue_depth == 0)
+ device->queue_depth = 1;
+
+ printk(KERN_INFO "scsi%d: queue depth for target %d on channel %d "
+ "set to %d\n", host->host_no, device->id, device->channel,
+ device->queue_depth);
+ }
+ }
+#endif
+}
+
+#if CHECK_BLINK
+int check_blink_state(long base)
+{
+ ushort loops = 10;
+ u32 blinkindicator;
+ u32 state = 0x12345678;
+ u32 oldstate = 0;
+
+ blinkindicator = htonl(0x54504442);
+ while ((loops--) && (state != oldstate)) {
+ oldstate = state;
+ state = inl((uint) base + 1);
+ }
+
+ DBG(DBG_BLINK, printk("Did Blink check. Status: %d\n",
+ (state == oldstate) && (state == blinkindicator)));
+
+ if ((state == oldstate) && (state == blinkindicator))
+ return(TRUE);
+ else
+ return (FALSE);
+}
+#endif
+
+char * get_board_data(u32 base, u32 irq, u32 id)
+{
+ struct eata_ccb *cp;
+ struct eata_sp *sp;
+ static char *buff;
+ ulong i;
+
+ cp = (struct eata_ccb *) scsi_init_malloc(sizeof(struct eata_ccb),
+ GFP_ATOMIC | GFP_DMA);
+ sp = (struct eata_sp *) scsi_init_malloc(sizeof(struct eata_sp),
+ GFP_ATOMIC | GFP_DMA);
+
+ buff = dma_scratch;
+
+ memset(cp, 0, sizeof(struct eata_ccb));
+ memset(sp, 0, sizeof(struct eata_sp));
+ memset(buff, 0, 256);
+
+ cp->DataIn = TRUE;
+ cp->Interpret = TRUE; /* Interpret command */
+ cp->cp_dispri = TRUE;
+ cp->cp_identify = TRUE;
+
+ cp->cp_datalen = htonl(56);
+ cp->cp_dataDMA = htonl(virt_to_bus(buff));
+ cp->cp_statDMA = htonl(virt_to_bus(sp));
+ cp->cp_viraddr = cp;
+
+ cp->cp_id = id;
+ cp->cp_lun = 0;
+
+ cp->cp_cdb[0] = INQUIRY;
+ cp->cp_cdb[1] = 0;
+ cp->cp_cdb[2] = 0;
+ cp->cp_cdb[3] = 0;
+ cp->cp_cdb[4] = 56;
+ cp->cp_cdb[5] = 0;
+
+ fake_int_base = (struct eata_register *) base;
+ fake_int_result = FALSE;
+ fake_int_happened = FALSE;
+
+ eata_send_command((u32) cp, (u32) base, EATA_CMD_DMA_SEND_CP);
+
+ i = jiffies + (3 * HZ);
+ while (fake_int_happened == FALSE && jiffies <= i)
+ barrier();
+
+ DBG(DBG_INTR3, printk(KERN_DEBUG "fake_int_result: %#x hbastat %#x "
+ "scsistat %#x, buff %p sp %p\n",
+ fake_int_result, (u32) (sp->hba_stat /*& 0x7f*/),
+ (u32) sp->scsi_stat, buff, sp));
+
+ scsi_init_free((void *)cp, sizeof(struct eata_ccb));
+ scsi_init_free((void *)sp, sizeof(struct eata_sp));
+
+ if ((fake_int_result & HA_SERROR) || jiffies > i){
+ printk(KERN_WARNING "eata_dma: trying to reset HBA at %x to clear "
+ "possible blink state\n", base);
+ /* hard reset the HBA */
+ inb((u32) (base) + HA_RSTATUS);
+ eata_send_command(0, base, EATA_CMD_RESET);
+ DELAY(1);
+ return (NULL);
+ } else
+ return (buff);
+}
+
+
+int get_conf_PIO(u32 base, struct get_conf *buf)
+{
+ ulong loop = R_LIMIT;
+ u16 *p;
+
+ if(check_region(base, 9))
+ return (FALSE);
+
+ memset(buf, 0, sizeof(struct get_conf));
+
+ while (inb(base + HA_RSTATUS) & HA_SBUSY)
+ if (--loop == 0)
+ return (FALSE);
+
+ fake_int_base = (struct eata_register *) base;
+ fake_int_result = FALSE;
+ fake_int_happened = FALSE;
+
+ DBG(DBG_PIO && DBG_PROBE,
+ printk("Issuing PIO READ CONFIG to HBA at %#x\n", base));
+ eata_send_command(0, base, EATA_CMD_PIO_READ_CONFIG);
+
+ loop = R_LIMIT;
+ for (p = (u16 *) buf;
+ (long)p <= ((long)buf + (sizeof(struct get_conf) / 2)); p++) {
+ while (!(inb(base + HA_RSTATUS) & HA_SDRQ))
+ if (--loop == 0)
+ return (FALSE);
+
+ loop = R_LIMIT;
+ *p = inw(base + HA_RDATA);
+ }
+
+ if (!(inb(base + HA_RSTATUS) & HA_SERROR)) { /* Error ? */
+ if (htonl(EATA_SIGNATURE) == buf->signature) {
+ DBG(DBG_PIO&&DBG_PROBE, printk("EATA Controller found at %x "
+ "EATA Level: %x\n", (uint) base,
+ (uint) (buf->version)));
+
+ while (inb(base + HA_RSTATUS) & HA_SDRQ)
+ inw(base + HA_RDATA);
+ return (TRUE);
+ }
+ } else {
+ DBG(DBG_PROBE, printk("eata_dma: get_conf_PIO, error during transfer "
+ "for HBA at %lx\n", (long)base));
+ }
+ return (FALSE);
+}
+
+
+void print_config(struct get_conf *gc)
+{
+ printk("LEN: %d ver:%d OCS:%d TAR:%d TRNXFR:%d MORES:%d DMAS:%d\n",
+ (u32) ntohl(gc->len), gc->version,
+ gc->OCS_enabled, gc->TAR_support, gc->TRNXFR, gc->MORE_support,
+ gc->DMA_support);
+ printk("DMAV:%d HAAV:%d SCSIID0:%d ID1:%d ID2:%d QUEUE:%d SG:%d SEC:%d\n",
+ gc->DMA_valid, gc->HAA_valid, gc->scsi_id[3], gc->scsi_id[2],
+ gc->scsi_id[1], ntohs(gc->queuesiz), ntohs(gc->SGsiz), gc->SECOND);
+ printk("IRQ:%d IRQT:%d DMAC:%d FORCADR:%d SG_64K:%d SG_UAE:%d MID:%d "
+ "MCH:%d MLUN:%d\n",
+ gc->IRQ, gc->IRQ_TR, (8 - gc->DMA_channel) & 7, gc->FORCADR,
+ gc->SG_64K, gc->SG_UAE, gc->MAX_ID, gc->MAX_CHAN, gc->MAX_LUN);
+ printk("RIDQ:%d PCI:%d EISA:%d\n",
+ gc->ID_qest, gc->is_PCI, gc->is_EISA);
+ DBG(DPT_DEBUG, DELAY(14));
+}
+
+short register_HBA(u32 base, struct get_conf *gc, Scsi_Host_Template * tpnt,
+ u8 bustype)
+{
+ ulong size = 0;
+ unchar dma_channel = 0;
+ char *buff = 0;
+ unchar bugs = 0;
+ struct Scsi_Host *sh;
+ hostdata *hd;
+ int x;
+
+
+ DBG(DBG_REGISTER, print_config(gc));
+
+ if (gc->DMA_support == FALSE) {
+ printk("The EATA HBA at %#.4x does not support DMA.\n"
+ "Please use the EATA-PIO driver.\n", base);
+ return (FALSE);
+ }
+ if(gc->HAA_valid == FALSE || ntohl(gc->len) < 0x22)
+ gc->MAX_CHAN = 0;
+
+ if (reg_IRQ[gc->IRQ] == FALSE) { /* Interrupt already registered ? */
+ if (!request_irq(gc->IRQ, (void *) eata_fake_int_handler, SA_INTERRUPT,
+ "eata_dma", NULL)){
+ reg_IRQ[gc->IRQ]++;
+ if (!gc->IRQ_TR)
+ reg_IRQL[gc->IRQ] = TRUE; /* IRQ is edge triggered */
+ } else {
+ printk("Couldn't allocate IRQ %d, Sorry.", gc->IRQ);
+ return (FALSE);
+ }
+ } else { /* More than one HBA on this IRQ */
+ if (reg_IRQL[gc->IRQ] == TRUE) {
+ printk("Can't support more than one HBA on this IRQ,\n"
+ " if the IRQ is edge triggered. Sorry.\n");
+ return (FALSE);
+ } else
+ reg_IRQ[gc->IRQ]++;
+ }
+
+
+ /* If DMA is supported but DMA_valid isn't set to indicate that
+ * the channel number is given we must have pre 2.0 firmware (1.7?)
+ * which leaves us to guess since the "newer ones" also don't set the
+ * DMA_valid bit.
+ */
+ if (gc->DMA_support && !gc->DMA_valid && gc->DMA_channel) {
+ printk(KERN_WARNING "eata_dma: If you are using a pre 2.0 firmware "
+ "please update it !\n"
+ " You can get new firmware releases from ftp.dpt.com\n");
+ gc->DMA_channel = (base == 0x1f0 ? 3 /* DMA=5 */ : 2 /* DMA=6 */);
+ gc->DMA_valid = TRUE;
+ }
+
+ /* if gc->DMA_valid it must be an ISA HBA and we have to register it */
+ dma_channel = BUSMASTER;
+ if (gc->DMA_valid) {
+ if (request_dma(dma_channel = (8 - gc->DMA_channel) & 7, "eata_dma")) {
+ printk(KERN_WARNING "Unable to allocate DMA channel %d for ISA HBA"
+ " at %#.4x.\n", dma_channel, base);
+ reg_IRQ[gc->IRQ]--;
+ if (reg_IRQ[gc->IRQ] == 0)
+ free_irq(gc->IRQ, NULL);
+ if (gc->IRQ_TR == FALSE)
+ reg_IRQL[gc->IRQ] = FALSE;
+ return (FALSE);
+ }
+ }
+
+ if (dma_channel != BUSMASTER) {
+ disable_dma(dma_channel);
+ clear_dma_ff(dma_channel);
+ set_dma_mode(dma_channel, DMA_MODE_CASCADE);
+ enable_dma(dma_channel);
+ }
+
+ if (bustype != IS_EISA && bustype != IS_ISA)
+ buff = get_board_data(base, gc->IRQ, gc->scsi_id[3]);
+
+ if (buff == NULL) {
+ if (bustype == IS_EISA || bustype == IS_ISA) {
+ bugs = bugs || BROKEN_INQUIRY;
+ } else {
+ if (gc->DMA_support == FALSE)
+ printk(KERN_WARNING "HBA at %#.4x doesn't support DMA. "
+ "Sorry\n", base);
+ else
+ printk(KERN_WARNING "HBA at %#.4x does not react on INQUIRY. "
+ "Sorry.\n", base);
+ if (gc->DMA_valid)
+ free_dma(dma_channel);
+ reg_IRQ[gc->IRQ]--;
+ if (reg_IRQ[gc->IRQ] == 0)
+ free_irq(gc->IRQ, NULL);
+ if (gc->IRQ_TR == FALSE)
+ reg_IRQL[gc->IRQ] = FALSE;
+ return (FALSE);
+ }
+ }
+
+ if (gc->DMA_support == FALSE && buff != NULL)
+ printk(KERN_WARNING "HBA %.12sat %#.4x doesn't set the DMA_support "
+ "flag correctly.\n", &buff[16], base);
+
+ request_region(base, 9, "eata_dma"); /* We already checked the
+ * availability, so this
+ * should not fail.
+ */
+
+ if(ntohs(gc->queuesiz) == 0) {
+ gc->queuesiz = ntohs(64);
+ printk(KERN_WARNING "Warning: Queue size has to be corrected. Assuming"
+ " 64 queueslots\n"
+ " This might be a PM2012B with a defective Firmware\n"
+ " Contact DPT support@dpt.com for an upgrade\n");
+ }
+
+ size = sizeof(hostdata) + ((sizeof(struct eata_ccb) + sizeof(long))
+ * ntohs(gc->queuesiz));
+
+ DBG(DBG_REGISTER, printk("scsi_register size: %ld\n", size));
+
+ sh = scsi_register(tpnt, size);
+
+ if(sh != NULL) {
+
+ hd = SD(sh);
+
+ memset(hd->reads, 0, sizeof(u32) * 26);
+
+ sh->select_queue_depths = eata_select_queue_depths;
+
+ hd->bustype = bustype;
+
+ /*
+ * If we are using a ISA board, we can't use extended SG,
+ * because we would need excessive amounts of memory for
+ * bounce buffers.
+ */
+ if (gc->SG_64K==TRUE && ntohs(gc->SGsiz)==64 && hd->bustype!=IS_ISA){
+ sh->sg_tablesize = SG_SIZE_BIG;
+ } else {
+ sh->sg_tablesize = ntohs(gc->SGsiz);
+ if (sh->sg_tablesize > SG_SIZE || sh->sg_tablesize == 0) {
+ if (sh->sg_tablesize == 0)
+ printk(KERN_WARNING "Warning: SG size had to be fixed.\n"
+ "This might be a PM2012 with a defective Firmware"
+ "\nContact DPT support@dpt.com for an upgrade\n");
+ sh->sg_tablesize = SG_SIZE;
+ }
+ }
+ hd->sgsize = sh->sg_tablesize;
+ }
+
+ if(sh != NULL) {
+ sh->can_queue = hd->queuesize = ntohs(gc->queuesiz);
+ sh->cmd_per_lun = 0;
+ }
+
+ if(sh == NULL) {
+ DBG(DBG_REGISTER, printk(KERN_NOTICE "eata_dma: couldn't register HBA"
+ " at%x \n", base));
+ scsi_unregister(sh);
+ if (gc->DMA_valid)
+ free_dma(dma_channel);
+
+ reg_IRQ[gc->IRQ]--;
+ if (reg_IRQ[gc->IRQ] == 0)
+ free_irq(gc->IRQ, NULL);
+ if (gc->IRQ_TR == FALSE)
+ reg_IRQL[gc->IRQ] = FALSE;
+ return (FALSE);
+ }
+
+
+ hd->broken_INQUIRY = (bugs & BROKEN_INQUIRY);
+
+ if(hd->broken_INQUIRY == TRUE) {
+ strcpy(hd->vendor, "DPT");
+ strcpy(hd->name, "??????????");
+ strcpy(hd->revision, "???.?");
+ hd->firmware_revision = 0;
+ } else {
+ strncpy(hd->vendor, &buff[8], 8);
+ hd->vendor[8] = 0;
+ strncpy(hd->name, &buff[16], 17);
+ hd->name[17] = 0;
+ hd->revision[0] = buff[32];
+ hd->revision[1] = buff[33];
+ hd->revision[2] = buff[34];
+ hd->revision[3] = '.';
+ hd->revision[4] = buff[35];
+ hd->revision[5] = 0;
+ hd->firmware_revision = (buff[32] << 24) + (buff[33] << 16)
+ + (buff[34] << 8) + buff[35];
+ }
+
+ if (hd->firmware_revision >= (('0'<<24) + ('7'<<16) + ('G'<< 8) + '0'))
+ hd->immediate_support = 1;
+ else
+ hd->immediate_support = 0;
+
+ switch (ntohl(gc->len)) {
+ case 0x1c:
+ hd->EATA_revision = 'a';
+ break;
+ case 0x1e:
+ hd->EATA_revision = 'b';
+ break;
+ case 0x22:
+ hd->EATA_revision = 'c';
+ break;
+ case 0x24:
+ hd->EATA_revision = 'z';
+ default:
+ hd->EATA_revision = '?';
+ }
+
+
+ if(ntohl(gc->len) >= 0x22) {
+ sh->max_id = gc->MAX_ID + 1;
+ sh->max_lun = gc->MAX_LUN + 1;
+ } else {
+ sh->max_id = 8;
+ sh->max_lun = 8;
+ }
+
+ hd->HBA_number = sh->host_no;
+ hd->channel = gc->MAX_CHAN;
+ sh->max_channel = gc->MAX_CHAN;
+ sh->unique_id = base;
+ sh->base = (char *) base;
+ sh->io_port = base;
+ sh->n_io_port = 9;
+ sh->irq = gc->IRQ;
+ sh->dma_channel = dma_channel;
+
+ /* FIXME:
+ * SCSI midlevel code should support different HBA ids on every channel
+ */
+ sh->this_id = gc->scsi_id[3];
+
+ if (gc->SECOND)
+ hd->primary = FALSE;
+ else
+ hd->primary = TRUE;
+
+ sh->wish_block = FALSE;
+
+ if (hd->bustype != IS_ISA) {
+ sh->unchecked_isa_dma = FALSE;
+ } else {
+ sh->unchecked_isa_dma = TRUE; /* We're doing ISA DMA */
+ }
+
+ for(x = 0; x <= 11; x++){ /* Initialize min. latency */
+ hd->writes_lat[x][1] = 0xffffffff;
+ hd->reads_lat[x][1] = 0xffffffff;
+ }
+ hd->all_lat[1] = 0xffffffff;
+
+ hd->next = NULL; /* build a linked list of all HBAs */
+ hd->prev = last_HBA;
+ if(hd->prev != NULL)
+ SD(hd->prev)->next = sh;
+ last_HBA = sh;
+ if (first_HBA == NULL)
+ first_HBA = sh;
+ registered_HBAs++;
+
+ return (TRUE);
+}
+
+
+
+void find_EISA(struct get_conf *buf, Scsi_Host_Template * tpnt)
+{
+ u32 base;
+ int i;
+
+#if CHECKPAL
+ u8 pal1, pal2, pal3;
+#endif
+
+ for (i = 0; i < MAXEISA; i++) {
+ if (EISAbases[i] == TRUE) { /* Still a possibility ? */
+
+ base = 0x1c88 + (i * 0x1000);
+#if CHECKPAL
+ pal1 = inb((u16)base - 8);
+ pal2 = inb((u16)base - 7);
+ pal3 = inb((u16)base - 6);
+
+ if (((pal1 == DPT_ID1) && (pal2 == DPT_ID2)) ||
+ ((pal1 == NEC_ID1) && (pal2 == NEC_ID2) && (pal3 == NEC_ID3))||
+ ((pal1 == ATT_ID1) && (pal2 == ATT_ID2) && (pal3 == ATT_ID3))){
+ DBG(DBG_PROBE, printk("EISA EATA id tags found: %x %x %x \n",
+ (int)pal1, (int)pal2, (int)pal3));
+#endif
+ if (get_conf_PIO(base, buf) == TRUE) {
+ if (buf->IRQ) {
+ DBG(DBG_EISA, printk("Registering EISA HBA\n"));
+ register_HBA(base, buf, tpnt, IS_EISA);
+ } else
+ printk("eata_dma: No valid IRQ. HBA removed from list\n");
+ }
+#if CHECK_BLINK
+ else {
+ if (check_blink_state(base))
+ printk("HBA is in BLINK state. Consult your HBAs "
+ "Manual to correct this.\n");
+ }
+#endif
+ /* Nothing found here so we take it from the list */
+ EISAbases[i] = 0;
+#if CHECKPAL
+ }
+#endif
+ }
+ }
+ return;
+}
+
+void find_ISA(struct get_conf *buf, Scsi_Host_Template * tpnt)
+{
+ int i;
+
+ for (i = 0; i < MAXISA; i++) {
+ if (ISAbases[i]) {
+ if (get_conf_PIO(ISAbases[i],buf) == TRUE){
+ DBG(DBG_ISA, printk("Registering ISA HBA\n"));
+ register_HBA(ISAbases[i], buf, tpnt, IS_ISA);
+ }
+#if CHECK_BLINK
+ else {
+ if (check_blink_state(ISAbases[i]))
+ printk("HBA is in BLINK state. Consult your HBAs "
+ "Manual to correct this.\n");
+ }
+#endif
+ ISAbases[i] = 0;
+ }
+ }
+ return;
+}
+
+void find_PCI(struct get_conf *buf, Scsi_Host_Template * tpnt)
+{
+
+#ifndef CONFIG_PCI
+ printk("eata_dma: kernel PCI support not enabled. Skipping scan for PCI HBAs.\n");
+#else
+
+ u8 pci_bus, pci_device_fn;
+ static s16 pci_index = 0; /* Device index to PCI BIOS calls */
+ u32 base = 0;
+ u16 com_adr;
+ u16 rev_device;
+ u32 error, i, x;
+ u8 pal1, pal2, pal3;
+
+ if (pcibios_present()) {
+ for (i = 0; i <= MAXPCI; ++i, ++pci_index) {
+ if (pcibios_find_device(PCI_VENDOR_ID_DPT, PCI_DEVICE_ID_DPT,
+ pci_index, &pci_bus, &pci_device_fn))
+ break;
+ DBG(DBG_PROBE && DBG_PCI,
+ printk("eata_dma: find_PCI, HBA at bus %d, device %d,"
+ " function %d, index %d\n", (s32)pci_bus,
+ (s32)((pci_device_fn & 0xf8) >> 3),
+ (s32)(pci_device_fn & 7), pci_index));
+
+ if (!(error = pcibios_read_config_word(pci_bus, pci_device_fn,
+ PCI_CLASS_DEVICE, &rev_device))) {
+ if (rev_device == PCI_CLASS_STORAGE_SCSI) {
+ if (!(error = pcibios_read_config_word(pci_bus,
+ pci_device_fn, PCI_COMMAND,
+ (u16 *) & com_adr))) {
+ if (!((com_adr & PCI_COMMAND_IO) &&
+ (com_adr & PCI_COMMAND_MASTER))) {
+ printk("eata_dma: find_PCI, HBA has IO or"
+ " BUSMASTER mode disabled\n");
+ continue;
+ }
+ } else
+ printk("eata_dma: find_PCI, error %x while reading "
+ "PCI_COMMAND\n", error);
+ } else
+ printk("eata_dma: find_PCI, DEVICECLASSID %x didn't match\n",
+ rev_device);
+ } else {
+ printk("eata_dma: find_PCI, error %x while reading "
+ "PCI_CLASS_BASE\n",
+ error);
+ continue;
+ }
+
+ if (!(error = pcibios_read_config_dword(pci_bus, pci_device_fn,
+ PCI_BASE_ADDRESS_0, (int *) &base))){
+
+ /* Check if the address is valid */
+ if (base & 0x01) {
+ base &= 0xfffffffe;
+ /* EISA tag there ? */
+ pal1 = inb(base);
+ pal2 = inb(base + 1);
+ pal3 = inb(base + 2);
+ if (((pal1 == DPT_ID1) && (pal2 == DPT_ID2)) ||
+ ((pal1 == NEC_ID1) && (pal2 == NEC_ID2) &&
+ (pal3 == NEC_ID3)) ||
+ ((pal1 == ATT_ID1) && (pal2 == ATT_ID2) &&
+ (pal3 == ATT_ID3)))
+ base += 0x08;
+ else
+ base += 0x10; /* Now, THIS is the real address */
+
+ if (base != 0x1f8) {
+ /* We didn't find it in the primary search */
+ if (get_conf_PIO(base, buf) == TRUE) {
+
+ /* OK. We made it till here, so we can go now
+ * and register it. We only have to check and
+ * eventually remove it from the EISA and ISA list
+ */
+ DBG(DBG_PCI, printk("Registering PCI HBA\n"));
+ register_HBA(base, buf, tpnt, IS_PCI);
+
+ if (base < 0x1000) {
+ for (x = 0; x < MAXISA; ++x) {
+ if (ISAbases[x] == base) {
+ ISAbases[x] = 0;
+ break;
+ }
+ }
+ } else if ((base & 0x0fff) == 0x0c88)
+ EISAbases[(base >> 12) & 0x0f] = 0;
+ continue; /* break; */
+ }
+#if CHECK_BLINK
+ else if (check_blink_state(base) == TRUE) {
+ printk("eata_dma: HBA is in BLINK state.\n"
+ "Consult your HBAs manual to correct this.\n");
+ }
+#endif
+ }
+ }
+ } else {
+ printk("eata_dma: error %x while reading "
+ "PCI_BASE_ADDRESS_0\n", error);
+ }
+ }
+ } else {
+ printk("eata_dma: No BIOS32 extensions present. This driver release "
+ "still depends on it.\n"
+ " Skipping scan for PCI HBAs. \n");
+ }
+#endif /* #ifndef CONFIG_PCI */
+ return;
+}
+
+int eata_detect(Scsi_Host_Template * tpnt)
+{
+ struct Scsi_Host *HBA_ptr;
+ struct get_conf gc;
+ int i;
+
+ DBG((DBG_PROBE && DBG_DELAY) || DPT_DEBUG,
+ printk("Using lots of delays to let you read the debugging output\n"));
+
+ tpnt->proc_dir = &proc_scsi_eata_dma;
+
+ status = scsi_init_malloc(512, GFP_ATOMIC | GFP_DMA);
+ dma_scratch = scsi_init_malloc(1024, GFP_ATOMIC | GFP_DMA);
+
+ if(status == NULL || dma_scratch == NULL) {
+ printk("eata_dma: can't allocate enough memory to probe for hosts !\n");
+ return(0);
+ }
+
+ dma_scratch += 4;
+
+ find_PCI(&gc, tpnt);
+
+ find_EISA(&gc, tpnt);
+
+ find_ISA(&gc, tpnt);
+
+ for (i = 0; i < MAXIRQ; i++) { /* Now that we know what we have, we */
+ if (reg_IRQ[i] >= 1){ /* exchange the interrupt handler which */
+ free_irq(i, NULL); /* we used for probing with the real one */
+ request_irq(i, (void *)(eata_int_handler), SA_INTERRUPT|SA_SHIRQ,
+ "eata_dma", NULL);
+ }
+ }
+
+ HBA_ptr = first_HBA;
+
+ if (registered_HBAs != 0) {
+ printk("EATA (Extended Attachment) driver version: %d.%d%s"
+ "\ndeveloped in co-operation with DPT\n"
+ "(c) 1993-96 Michael Neuffer, mike@i-Connect.Net\n",
+ VER_MAJOR, VER_MINOR, VER_SUB);
+ printk("Registered HBAs:");
+ printk("\nHBA no. Boardtype Revis EATA Bus BaseIO IRQ"
+ " DMA Ch ID Pr QS S/G IS\n");
+ for (i = 1; i <= registered_HBAs; i++) {
+ printk("scsi%-2d: %.12s v%s 2.0%c %s %#.4x %2d",
+ HBA_ptr->host_no, SD(HBA_ptr)->name, SD(HBA_ptr)->revision,
+ SD(HBA_ptr)->EATA_revision, (SD(HBA_ptr)->bustype == 'P')?
+ "PCI ":(SD(HBA_ptr)->bustype == 'E')?"EISA":"ISA ",
+ (u32) HBA_ptr->base, HBA_ptr->irq);
+ if(HBA_ptr->dma_channel != BUSMASTER)
+ printk(" %2x ", HBA_ptr->dma_channel);
+ else
+ printk(" %s", "BMST");
+ printk(" %d %d %c %3d %3d %c\n",
+ SD(HBA_ptr)->channel+1, HBA_ptr->this_id,
+ (SD(HBA_ptr)->primary == TRUE)?'Y':'N',
+ HBA_ptr->can_queue, HBA_ptr->sg_tablesize,
+ (SD(HBA_ptr)->immediate_support == TRUE)?'Y':'N');
+ HBA_ptr = SD(HBA_ptr)->next;
+ }
+ } else {
+ scsi_init_free((void *)status, 512);
+ }
+
+ scsi_init_free((void *)dma_scratch - 4, 1024);
+
+ DBG(DPT_DEBUG, DELAY(12));
+
+ return(registered_HBAs);
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = EATA_DMA;
+#include "scsi_module.c"
+#endif
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/dev/drivers/scsi/g_NCR5380.c b/linux/dev/drivers/scsi/g_NCR5380.c
new file mode 100644
index 0000000..687dd36
--- /dev/null
+++ b/linux/dev/drivers/scsi/g_NCR5380.c
@@ -0,0 +1,735 @@
+/*
+ * Generic Generic NCR5380 driver
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 440-4894
+ *
+ * NCR53C400 extensions (c) 1994,1995,1996, Kevin Lentin
+ * K.Lentin@cs.monash.edu.au
+ *
+ * ALPHA RELEASE 1.
+ *
+ * For more information, please consult
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+ *
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * 1+ (719) 578-3400
+ * 1+ (800) 334-5454
+ */
+
+/*
+ * TODO : flesh out DMA support, find some one actually using this (I have
+ * a memory mapped Trantor board that works fine)
+ */
+
+/*
+ * Options :
+ *
+ * PARITY - enable parity checking. Not supported.
+ *
+ * SCSI2 - enable support for SCSI-II tagged queueing. Untested.
+ *
+ * USLEEP - enable support for devices that don't disconnect. Untested.
+ *
+ * The card is detected and initialized in one of several ways :
+ * 1. With command line overrides - NCR5380=port,irq may be
+ * used on the LILO command line to override the defaults.
+ *
+ * 2. With the GENERIC_NCR5380_OVERRIDE compile time define. This is
+ * specified as an array of address, irq, dma, board tuples. Ie, for
+ * one board at 0x350, IRQ5, no dma, I could say
+ * -DGENERIC_NCR5380_OVERRIDE={{0xcc000, 5, DMA_NONE, BOARD_NCR5380}}
+ *
+ * -1 should be specified for no or DMA interrupt, -2 to autoprobe for an
+ * IRQ line if overridden on the command line.
+ *
+ * 3. When included as a module, with arguments passed on the command line:
+ * ncr_irq=xx the interrupt
+ * ncr_addr=xx the port or base address (for port or memory
+ * mapped, resp.)
+ * ncr_dma=xx the DMA
+ * ncr_5380=1 to set up for a NCR5380 board
+ * ncr_53c400=1 to set up for a NCR53C400 board
+ * e.g.
+ * modprobe g_NCR5380 ncr_irq=5 ncr_addr=0x350 ncr_5380=1
+ * for a port mapped NCR5380 board or
+ * modprobe g_NCR5380 ncr_irq=255 ncr_addr=0xc8000 ncr_53c400=1
+ * for a memory mapped NCR53C400 board with interrupts disabled.
+ *
+ * 255 should be specified for no or DMA interrupt, 254 to autoprobe for an
+ * IRQ line if overridden on the command line.
+ *
+ */
+
+#ifdef MACH
+#define GENERIC_NCR5380_OVERRIDE {{(NCR5380_map_type)0x350,5,0,BOARD_NCR53C400}};
+#define CONFIG_SCSI_GENERIC_NCR53C400
+#define CONFIG_SCSI_G_NCR5380_MEM
+#endif
+
+#define AUTOPROBE_IRQ
+#define AUTOSENSE
+
+#include <linux/config.h>
+
+#ifdef CONFIG_SCSI_GENERIC_NCR53C400
+#define NCR53C400_PSEUDO_DMA 1
+#define PSEUDO_DMA
+#define NCR53C400
+#define NCR5380_STATS
+#undef NCR5380_STAT_LIMIT
+#endif
+#if defined(CONFIG_SCSI_G_NCR5380_PORT) && defined(CONFIG_SCSI_G_NCR5380_MEM)
+#error You can not configure the Generic NCR 5380 SCSI Driver for memory mapped I/O and port mapped I/O at the same time (yet)
+#endif
+#if !defined(CONFIG_SCSI_G_NCR5380_PORT) && !defined(CONFIG_SCSI_G_NCR5380_MEM)
+#error You must configure the Generic NCR 5380 SCSI Driver for one of memory mapped I/O and port mapped I/O.
+#endif
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "g_NCR5380.h"
+#include "NCR5380.h"
+#include "constants.h"
+#include "sd.h"
+#include<linux/stat.h>
+
+struct proc_dir_entry proc_scsi_g_ncr5380 = {
+ PROC_SCSI_GENERIC_NCR5380, 9, "g_NCR5380",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+#define NCR_NOT_SET 0
+static int ncr_irq=NCR_NOT_SET;
+static int ncr_dma=NCR_NOT_SET;
+static int ncr_addr=NCR_NOT_SET;
+static int ncr_5380=NCR_NOT_SET;
+static int ncr_53c400=NCR_NOT_SET;
+
+static struct override {
+ NCR5380_implementation_fields;
+ int irq;
+ int dma;
+ int board; /* Use NCR53c400, Ricoh, etc. extensions ? */
+} overrides
+#ifdef GENERIC_NCR5380_OVERRIDE
+ [] = GENERIC_NCR5380_OVERRIDE
+#else
+ [1] = {{0,},};
+#endif
+
+#define NO_OVERRIDES (sizeof(overrides) / sizeof(struct override))
+
+/*
+ * Function : static internal_setup(int board, char *str, int *ints)
+ *
+ * Purpose : LILO command line initialization of the overrides array,
+ *
+ * Inputs : board - either BOARD_NCR5380 for a normal NCR5380 board,
+ * or BOARD_NCR53C400 for a NCR53C400 board. str - unused, ints -
+ * array of integer parameters with ints[0] equal to the number of ints.
+ *
+ */
+
+static void internal_setup(int board, char *str, int *ints) {
+ static int commandline_current = 0;
+ switch (board) {
+ case BOARD_NCR5380:
+ if (ints[0] != 2 && ints[0] != 3) {
+ printk("generic_NCR5380_setup : usage ncr5380=" STRVAL(NCR5380_map_name) ",irq,dma\n");
+ return;
+ }
+ case BOARD_NCR53C400:
+ if (ints[0] != 2) {
+ printk("generic_NCR53C400_setup : usage ncr53c400=" STRVAL(NCR5380_map_name) ",irq\n");
+ return;
+ }
+ }
+
+ if (commandline_current < NO_OVERRIDES) {
+ overrides[commandline_current].NCR5380_map_name = (NCR5380_map_type)ints[1];
+ overrides[commandline_current].irq = ints[2];
+ if (ints[0] == 3)
+ overrides[commandline_current].dma = ints[3];
+ else
+ overrides[commandline_current].dma = DMA_NONE;
+ overrides[commandline_current].board = board;
+ ++commandline_current;
+ }
+}
+
+/*
+ * Function : generic_NCR5380_setup (char *str, int *ints)
+ *
+ * Purpose : LILO command line initialization of the overrides array,
+ *
+ * Inputs : str - unused, ints - array of integer parameters with ints[0]
+ * equal to the number of ints.
+ */
+
+void generic_NCR5380_setup (char *str, int *ints) {
+ internal_setup (BOARD_NCR5380, str, ints);
+}
+
+/*
+ * Function : generic_NCR53C400_setup (char *str, int *ints)
+ *
+ * Purpose : LILO command line initialization of the overrides array,
+ *
+ * Inputs : str - unused, ints - array of integer parameters with ints[0]
+ * equal to the number of ints.
+ */
+
+void generic_NCR53C400_setup (char *str, int *ints) {
+ internal_setup (BOARD_NCR53C400, str, ints);
+}
+
+/*
+ * Function : int generic_NCR5380_detect(Scsi_Host_Template * tpnt)
+ *
+ * Purpose : initializes generic NCR5380 driver based on the
+ * command line / compile time port and irq definitions.
+ *
+ * Inputs : tpnt - template for this SCSI adapter.
+ *
+ * Returns : 1 if a host adapter was found, 0 if not.
+ *
+ */
+
+int generic_NCR5380_detect(Scsi_Host_Template * tpnt) {
+ static int current_override = 0;
+ int count;
+ int flags = 0;
+ struct Scsi_Host *instance;
+
+ if (ncr_irq != NCR_NOT_SET)
+ overrides[0].irq=ncr_irq;
+ if (ncr_dma != NCR_NOT_SET)
+ overrides[0].dma=ncr_dma;
+ if (ncr_addr != NCR_NOT_SET)
+ overrides[0].NCR5380_map_name=(NCR5380_map_type)ncr_addr;
+ if (ncr_5380 != NCR_NOT_SET)
+ overrides[0].board=BOARD_NCR5380;
+ else if (ncr_53c400 != NCR_NOT_SET)
+ overrides[0].board=BOARD_NCR53C400;
+
+ tpnt->proc_dir = &proc_scsi_g_ncr5380;
+
+ for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
+ if (!(overrides[current_override].NCR5380_map_name))
+ continue;
+
+ switch (overrides[current_override].board) {
+ case BOARD_NCR5380:
+ flags = FLAG_NO_PSEUDO_DMA;
+ break;
+ case BOARD_NCR53C400:
+ flags = FLAG_NCR53C400;
+ break;
+ }
+
+ instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
+ instance->NCR5380_instance_name = overrides[current_override].NCR5380_map_name;
+
+ NCR5380_init(instance, flags);
+
+ if (overrides[current_override].irq != IRQ_AUTO)
+ instance->irq = overrides[current_override].irq;
+ else
+ instance->irq = NCR5380_probe_irq(instance, 0xffff);
+
+ if (instance->irq != IRQ_NONE)
+ if (request_irq(instance->irq, generic_NCR5380_intr, SA_INTERRUPT, "NCR5380", NULL)) {
+ printk("scsi%d : IRQ%d not free, interrupts disabled\n",
+ instance->host_no, instance->irq);
+ instance->irq = IRQ_NONE;
+ }
+
+ if (instance->irq == IRQ_NONE) {
+ printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
+ printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
+ }
+
+ printk("scsi%d : at " STRVAL(NCR5380_map_name) " 0x%x", instance->host_no, (unsigned int)instance->NCR5380_instance_name);
+ if (instance->irq == IRQ_NONE)
+ printk (" interrupts disabled");
+ else
+ printk (" irq %d", instance->irq);
+ printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
+ CAN_QUEUE, CMD_PER_LUN, GENERIC_NCR5380_PUBLIC_RELEASE);
+ NCR5380_print_options(instance);
+ printk("\n");
+
+ ++current_override;
+ ++count;
+ }
+ return count;
+}
+
+const char * generic_NCR5380_info (struct Scsi_Host* host) {
+ static const char string[]="Generic NCR5380/53C400 Driver";
+ return string;
+}
+
+int generic_NCR5380_release_resources(struct Scsi_Host * instance)
+{
+ NCR5380_local_declare();
+
+ NCR5380_setup(instance);
+
+ if (instance->irq != IRQ_NONE)
+ free_irq(instance->irq, NULL);
+
+ return 0;
+}
+
+#ifdef BIOSPARAM
+/*
+ * Function : int generic_NCR5380_biosparam(Disk * disk, kdev_t dev, int *ip)
+ *
+ * Purpose : Generates a BIOS / DOS compatible H-C-S mapping for
+ * the specified device / size.
+ *
+ * Inputs : size = size of device in sectors (512 bytes), dev = block device
+ * major / minor, ip[] = {heads, sectors, cylinders}
+ *
+ * Returns : always 0 (success), initializes ip
+ *
+ */
+
+/*
+ * XXX Most SCSI boards use this mapping, I could be incorrect. Some one
+ * using hard disks on a trantor should verify that this mapping corresponds
+ * to that used by the BIOS / ASPI driver by running the linux fdisk program
+ * and matching the H_C_S coordinates to what DOS uses.
+ */
+
+int generic_NCR5380_biosparam(Disk * disk, kdev_t dev, int *ip)
+{
+ int size = disk->capacity;
+ ip[0] = 64;
+ ip[1] = 32;
+ ip[2] = size >> 11;
+ return 0;
+}
+#endif
+
+#if NCR53C400_PSEUDO_DMA
+static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst, int len)
+{
+ int blocks = len / 128;
+ int start = 0;
+ int i;
+ int bl;
+ NCR5380_local_declare();
+
+ NCR5380_setup(instance);
+
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: About to read %d blocks for %d bytes\n", blocks, len);
+#endif
+
+ NCR5380_write(C400_CONTROL_STATUS_REG, CSR_BASE | CSR_TRANS_DIR);
+ NCR5380_write(C400_BLOCK_COUNTER_REG, blocks);
+ while (1) {
+
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: %d blocks left\n", blocks);
+#endif
+
+ if ((bl=NCR5380_read(C400_BLOCK_COUNTER_REG)) == 0) {
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ if (blocks)
+ printk("53C400r: blocks still == %d\n", blocks);
+ else
+ printk("53C400r: Exiting loop\n");
+#endif
+ break;
+ }
+
+#if 1
+ if (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_GATED_53C80_IRQ) {
+ printk("53C400r: Got 53C80_IRQ start=%d, blocks=%d\n", start, blocks);
+ return -1;
+ }
+#endif
+
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: Waiting for buffer, bl=%d\n", bl);
+#endif
+
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY)
+ ;
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: Transferring 128 bytes\n");
+#endif
+
+#ifdef CONFIG_SCSI_G_NCR5380_PORT
+ for (i=0; i<128; i++)
+ dst[start+i] = NCR5380_read(C400_HOST_BUFFER);
+#else
+ /* implies CONFIG_SCSI_G_NCR5380_MEM */
+ memmove(dst+start,NCR53C400_host_buffer+NCR5380_map_name,128);
+#endif
+ start+=128;
+ blocks--;
+ }
+
+ if (blocks) {
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: EXTRA: Waiting for buffer\n");
+#endif
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY)
+ ;
+
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: Transferring EXTRA 128 bytes\n");
+#endif
+#ifdef CONFIG_SCSI_G_NCR5380_PORT
+ for (i=0; i<128; i++)
+ dst[start+i] = NCR5380_read(C400_HOST_BUFFER);
+#else
+ /* implies CONFIG_SCSI_G_NCR5380_MEM */
+ memmove(dst+start,NCR53C400_host_buffer+NCR5380_map_name,128);
+#endif
+ start+=128;
+ blocks--;
+ }
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ else
+ printk("53C400r: No EXTRA required\n");
+#endif
+
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: Final values: blocks=%d start=%d\n", blocks, start);
+#endif
+
+ if (!(NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_GATED_53C80_IRQ))
+ printk("53C400r: no 53C80 gated irq after transfer");
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ else
+ printk("53C400r: Got 53C80 interrupt and tried to clear it\n");
+#endif
+
+/* DON'T DO THIS - THEY NEVER ARRIVE!
+ printk("53C400r: Waiting for 53C80 registers\n");
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_53C80_REG)
+ ;
+*/
+
+ if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_END_DMA_TRANSFER))
+ printk("53C400r: no end dma signal\n");
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ else
+ printk("53C400r: end dma as expected\n");
+#endif
+
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ return 0;
+}
+
+static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src, int len)
+{
+ int blocks = len / 128;
+ int start = 0;
+ int i;
+ int bl;
+ NCR5380_local_declare();
+
+ NCR5380_setup(instance);
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: About to write %d blocks for %d bytes\n", blocks, len);
+#endif
+
+ NCR5380_write(C400_CONTROL_STATUS_REG, CSR_BASE);
+ NCR5380_write(C400_BLOCK_COUNTER_REG, blocks);
+ while (1) {
+ if (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_GATED_53C80_IRQ) {
+ printk("53C400w: Got 53C80_IRQ start=%d, blocks=%d\n", start, blocks);
+ return -1;
+ }
+
+ if ((bl=NCR5380_read(C400_BLOCK_COUNTER_REG)) == 0) {
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ if (blocks)
+ printk("53C400w: exiting loop, blocks still == %d\n", blocks);
+ else
+ printk("53C400w: exiting loop\n");
+#endif
+ break;
+ }
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: %d blocks left\n", blocks);
+
+ printk("53C400w: waiting for buffer, bl=%d\n", bl);
+#endif
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY)
+ ;
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: transferring 128 bytes\n");
+#endif
+#ifdef CONFIG_SCSI_G_NCR5380_PORT
+ for (i=0; i<128; i++)
+ NCR5380_write(C400_HOST_BUFFER, src[start+i]);
+#else
+ /* implies CONFIG_SCSI_G_NCR5380_MEM */
+ memmove(NCR53C400_host_buffer+NCR5380_map_name,src+start,128);
+#endif
+ start+=128;
+ blocks--;
+ }
+ if (blocks) {
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: EXTRA waiting for buffer\n");
+#endif
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY)
+ ;
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: transferring EXTRA 128 bytes\n");
+#endif
+#ifdef CONFIG_SCSI_G_NCR5380_PORT
+ for (i=0; i<128; i++)
+ NCR5380_write(C400_HOST_BUFFER, src[start+i]);
+#else
+ /* implies CONFIG_SCSI_G_NCR5380_MEM */
+ memmove(NCR53C400_host_buffer+NCR5380_map_name,src+start,128);
+#endif
+ start+=128;
+ blocks--;
+ }
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ else
+ printk("53C400w: No EXTRA required\n");
+#endif
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: Final values: blocks=%d start=%d\n", blocks, start);
+#endif
+
+#if 0
+ printk("53C400w: waiting for registers to be available\n");
+ THEY NEVER DO!
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_53C80_REG)
+ ;
+ printk("53C400w: Got em\n");
+#endif
+
+ /* Let's wait for this instead - could be ugly */
+ /* All documentation says to check for this. Maybe my hardware is too
+ * fast. Waiting for it seems to work fine! KLL
+ */
+ while (!(i = NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_GATED_53C80_IRQ))
+ ;
+
+ /*
+ * I know. i is certainly != 0 here but the loop is new. See previous
+ * comment.
+ */
+ if (i) {
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: got 53C80 gated irq (last block)\n");
+#endif
+ if (!((i=NCR5380_read(BUS_AND_STATUS_REG)) & BASR_END_DMA_TRANSFER))
+ printk("53C400w: No END OF DMA bit - WHOOPS! BASR=%0x\n",i);
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ else
+ printk("53C400w: Got END OF DMA\n");
+#endif
+ }
+ else
+ printk("53C400w: no 53C80 gated irq after transfer (last block)\n");
+
+#if 0
+ if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_END_DMA_TRANSFER)) {
+ printk("53C400w: no end dma signal\n");
+ }
+#endif
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: waiting for last byte...\n");
+#endif
+ while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT))
+ ;
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: got last byte.\n");
+ printk("53C400w: pwrite exiting with status 0, whoopee!\n");
+#endif
+ return 0;
+}
+#endif /* PSEUDO_DMA */
+
+#include "NCR5380.c"
+
+#define PRINTP(x) len += sprintf(buffer+len, x)
+#define ANDP ,
+
+static int sprint_opcode(char* buffer, int len, int opcode) {
+ int start = len;
+ PRINTP("0x%02x " ANDP opcode);
+ return len-start;
+}
+
+static int sprint_command (char* buffer, int len, unsigned char *command) {
+ int i,s,start=len;
+ len += sprint_opcode(buffer, len, command[0]);
+ for ( i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i)
+ PRINTP("%02x " ANDP command[i]);
+ PRINTP("\n");
+ return len-start;
+}
+
+static int sprint_Scsi_Cmnd (char* buffer, int len, Scsi_Cmnd *cmd) {
+ int start = len;
+ PRINTP("host number %d destination target %d, lun %d\n" ANDP
+ cmd->host->host_no ANDP
+ cmd->target ANDP
+ cmd->lun);
+ PRINTP(" command = ");
+ len += sprint_command (buffer, len, cmd->cmnd);
+ return len-start;
+}
+
+int generic_NCR5380_proc_info(char* buffer, char** start, off_t offset, int length, int hostno, int inout)
+{
+ int len = 0;
+ NCR5380_local_declare();
+ unsigned char status;
+ int i;
+ struct Scsi_Host *scsi_ptr;
+ Scsi_Cmnd *ptr;
+ Scsi_Device *dev;
+ struct NCR5380_hostdata *hostdata;
+
+ cli();
+
+ for (scsi_ptr = first_instance; scsi_ptr; scsi_ptr=scsi_ptr->next)
+ if (scsi_ptr->host_no == hostno)
+ break;
+ NCR5380_setup(scsi_ptr);
+ hostdata = (struct NCR5380_hostdata *)scsi_ptr->hostdata;
+
+ PRINTP("SCSI host number %d : %s\n" ANDP scsi_ptr->host_no ANDP scsi_ptr->hostt->name);
+ PRINTP("Generic NCR5380 driver version %d\n" ANDP GENERIC_NCR5380_PUBLIC_RELEASE);
+ PRINTP("NCR5380 core version %d\n" ANDP NCR5380_PUBLIC_RELEASE);
+#ifdef NCR53C400
+ PRINTP("NCR53C400 extension version %d\n" ANDP NCR53C400_PUBLIC_RELEASE);
+ PRINTP("NCR53C400 card%s detected\n" ANDP (((struct NCR5380_hostdata *)scsi_ptr->hostdata)->flags & FLAG_NCR53C400)?"":" not");
+# if NCR53C400_PSEUDO_DMA
+ PRINTP("NCR53C400 pseudo DMA used\n");
+# endif
+#else
+ PRINTP("NO NCR53C400 driver extensions\n");
+#endif
+ PRINTP("Using %s mapping at %s 0x%x, " ANDP STRVAL(NCR5380_map_config) ANDP STRVAL(NCR5380_map_name) ANDP scsi_ptr->NCR5380_instance_name);
+ if (scsi_ptr->irq == IRQ_NONE)
+ PRINTP("no interrupt\n");
+ else
+ PRINTP("on interrupt %d\n" ANDP scsi_ptr->irq);
+
+#ifdef NCR5380_STATS
+ if (hostdata->connected || hostdata->issue_queue || hostdata->disconnected_queue)
+ PRINTP("There are commands pending, transfer rates may be crud\n");
+ if (hostdata->pendingr)
+ PRINTP(" %d pending reads" ANDP hostdata->pendingr);
+ if (hostdata->pendingw)
+ PRINTP(" %d pending writes" ANDP hostdata->pendingw);
+ if (hostdata->pendingr || hostdata->pendingw)
+ PRINTP("\n");
+ for (dev = scsi_devices; dev; dev=dev->next) {
+ if (dev->host == scsi_ptr) {
+ unsigned long br = hostdata->bytes_read[dev->id];
+ unsigned long bw = hostdata->bytes_write[dev->id];
+ long tr = hostdata->time_read[dev->id] / HZ;
+ long tw = hostdata->time_write[dev->id] / HZ;
+
+ PRINTP(" T:%d %s " ANDP dev->id ANDP (dev->type < MAX_SCSI_DEVICE_CODE) ? scsi_device_types[(int)dev->type] : "Unknown");
+ for (i=0; i<8; i++)
+ if (dev->vendor[i] >= 0x20)
+ *(buffer+(len++)) = dev->vendor[i];
+ *(buffer+(len++)) = ' ';
+ for (i=0; i<16; i++)
+ if (dev->model[i] >= 0x20)
+ *(buffer+(len++)) = dev->model[i];
+ *(buffer+(len++)) = ' ';
+ for (i=0; i<4; i++)
+ if (dev->rev[i] >= 0x20)
+ *(buffer+(len++)) = dev->rev[i];
+ *(buffer+(len++)) = ' ';
+
+ PRINTP("\n%10ld kb read in %5ld secs" ANDP br/1024 ANDP tr);
+ if (tr)
+ PRINTP(" @ %5ld bps" ANDP br / tr);
+
+ PRINTP("\n%10ld kb written in %5ld secs" ANDP bw/1024 ANDP tw);
+ if (tw)
+ PRINTP(" @ %5ld bps" ANDP bw / tw);
+ PRINTP("\n");
+ }
+ }
+#endif
+
+ status = NCR5380_read(STATUS_REG);
+ if (!(status & SR_REQ))
+ PRINTP("REQ not asserted, phase unknown.\n");
+ else {
+ for (i = 0; (phases[i].value != PHASE_UNKNOWN) &&
+ (phases[i].value != (status & PHASE_MASK)); ++i)
+ ;
+ PRINTP("Phase %s\n" ANDP phases[i].name);
+ }
+
+ if (!hostdata->connected) {
+ PRINTP("No currently connected command\n");
+ } else {
+ len += sprint_Scsi_Cmnd (buffer, len, (Scsi_Cmnd *) hostdata->connected);
+ }
+
+ PRINTP("issue_queue\n");
+
+ for (ptr = (Scsi_Cmnd *) hostdata->issue_queue; ptr;
+ ptr = (Scsi_Cmnd *) ptr->host_scribble)
+ len += sprint_Scsi_Cmnd (buffer, len, ptr);
+
+ PRINTP("disconnected_queue\n");
+
+ for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr;
+ ptr = (Scsi_Cmnd *) ptr->host_scribble)
+ len += sprint_Scsi_Cmnd (buffer, len, ptr);
+
+ *start = buffer + offset;
+ len -= offset;
+ if (len > length)
+ len = length;
+ sti();
+ return len;
+}
+
+#undef PRINTP
+#undef ANDP
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = GENERIC_NCR5380;
+
+#include <linux/module.h>
+#include "scsi_module.c"
+#endif
diff --git a/linux/dev/glue/block.c b/linux/dev/glue/block.c
new file mode 100644
index 0000000..a830781
--- /dev/null
+++ b/linux/dev/glue/block.c
@@ -0,0 +1,1770 @@
+/*
+ * Linux block driver support.
+ *
+ * Copyright (C) 1996 The University of Utah and the Computer Systems
+ * Laboratory at the University of Utah (CSL)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ */
+
+/*
+ * linux/drivers/block/ll_rw_blk.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
+ */
+
+/*
+ * linux/fs/block_dev.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+/*
+ * linux/fs/buffer.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#include <sys/types.h>
+#include <machine/spl.h>
+#include <mach/mach_types.h>
+#include <mach/kern_return.h>
+#include <mach/mig_errors.h>
+#include <mach/port.h>
+#include <mach/vm_param.h>
+#include <mach/notify.h>
+
+#include <kern/kalloc.h>
+#include <kern/list.h>
+
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_space.h>
+
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+
+#include <device/device_types.h>
+#include <device/device_port.h>
+#include <device/disk_status.h>
+#include <device/device_reply.user.h>
+#include <device/device_emul.h>
+#include <device/ds_routines.h>
+
+/* TODO. This should be fixed to not be i386 specific. */
+#include <i386at/disk.h>
+
+#define MACH_INCLUDE
+#include <linux/fs.h>
+#include <linux/blk.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/major.h>
+#include <linux/kdev_t.h>
+#include <linux/delay.h>
+#include <linux/malloc.h>
+#include <linux/hdreg.h>
+#include <asm/io.h>
+
+#include <linux/dev/glue/glue.h>
+
+#ifdef PAE
+#define VM_PAGE_LINUX VM_PAGE_DMA32
+#else
+#define VM_PAGE_LINUX VM_PAGE_HIGHMEM
+#endif
+
+/* This task queue is not used in Mach: just for fixing undefined symbols. */
+DECLARE_TASK_QUEUE (tq_disk);
+
+/* Location of VTOC in units for sectors (512 bytes). */
+#define PDLOCATION 29
+
+/* Linux kernel variables. */
+
+/* Temporary data allocated on the stack. */
+struct temp_data
+{
+ struct inode inode;
+ struct file file;
+ struct request req;
+ struct list pages;
+};
+
+/* One of these exists for each
+ driver associated with a major number. */
+struct device_struct
+{
+ const char *name; /* device name */
+ struct file_operations *fops; /* operations vector */
+ int busy:1; /* driver is being opened/closed */
+ int want:1; /* someone wants to open/close driver */
+ struct gendisk *gd; /* DOS partition information */
+ int default_slice; /* what slice to use when none is given */
+ struct disklabel **labels; /* disklabels for each DOS partition */
+};
+
+/* An entry in the Mach name to Linux major number conversion table. */
+struct name_map
+{
+ const char *name; /* Mach name for device */
+ unsigned major; /* Linux major number */
+ unsigned unit; /* Linux unit number */
+ int read_only; /* 1 if device is read only */
+};
+
+/* Driver operation table. */
+static struct device_struct blkdevs[MAX_BLKDEV];
+
+/* Driver request function table. */
+struct blk_dev_struct blk_dev[MAX_BLKDEV] =
+{
+ { NULL, NULL }, /* 0 no_dev */
+ { NULL, NULL }, /* 1 dev mem */
+ { NULL, NULL }, /* 2 dev fd */
+ { NULL, NULL }, /* 3 dev ide0 or hd */
+ { NULL, NULL }, /* 4 dev ttyx */
+ { NULL, NULL }, /* 5 dev tty */
+ { NULL, NULL }, /* 6 dev lp */
+ { NULL, NULL }, /* 7 dev pipes */
+ { NULL, NULL }, /* 8 dev sd */
+ { NULL, NULL }, /* 9 dev st */
+ { NULL, NULL }, /* 10 */
+ { NULL, NULL }, /* 11 */
+ { NULL, NULL }, /* 12 */
+ { NULL, NULL }, /* 13 */
+ { NULL, NULL }, /* 14 */
+ { NULL, NULL }, /* 15 */
+ { NULL, NULL }, /* 16 */
+ { NULL, NULL }, /* 17 */
+ { NULL, NULL }, /* 18 */
+ { NULL, NULL }, /* 19 */
+ { NULL, NULL }, /* 20 */
+ { NULL, NULL }, /* 21 */
+ { NULL, NULL } /* 22 dev ide1 */
+};
+
+/*
+ * blk_size contains the size of all block-devices in units of 1024 byte
+ * sectors:
+ *
+ * blk_size[MAJOR][MINOR]
+ *
+ * if (!blk_size[MAJOR]) then no minor size checking is done.
+ */
+int *blk_size[MAX_BLKDEV] = { NULL, NULL, };
+
+/*
+ * blksize_size contains the size of all block-devices:
+ *
+ * blksize_size[MAJOR][MINOR]
+ *
+ * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
+ */
+int *blksize_size[MAX_BLKDEV] = { NULL, NULL, };
+
+/*
+ * hardsect_size contains the size of the hardware sector of a device.
+ *
+ * hardsect_size[MAJOR][MINOR]
+ *
+ * if (!hardsect_size[MAJOR])
+ * then 512 bytes is assumed.
+ * else
+ * sector_size is hardsect_size[MAJOR][MINOR]
+ * This is currently set by some scsi device and read by the msdos fs driver
+ * This might be a some uses later.
+ */
+int *hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
+
+/* This specifies how many sectors to read ahead on the disk.
+ This is unused in Mach. It is here to make drivers compile. */
+int read_ahead[MAX_BLKDEV] = {0, };
+
+/* Use to wait on when there are no free requests.
+ This is unused in Mach. It is here to make drivers compile. */
+struct wait_queue *wait_for_request = NULL;
+
+/* Initialize block drivers. */
+int
+blk_dev_init ()
+{
+#ifdef CONFIG_BLK_DEV_IDE
+ extern char *kernel_cmdline;
+ if (strncmp(kernel_cmdline, "noide", 5) &&
+ !strstr(kernel_cmdline, " noide"))
+ ide_init ();
+#endif
+#ifdef CONFIG_BLK_DEV_FD
+ floppy_init ();
+#else
+ outb_p (0xc, 0x3f2);
+#endif
+ return 0;
+}
+
+/* Return 1 if major number MAJOR corresponds to a disk device. */
+static inline int
+disk_major (int major)
+{
+ return (major == IDE0_MAJOR
+ || major == IDE1_MAJOR
+ || major == IDE2_MAJOR
+ || major == IDE3_MAJOR
+ || major == SCSI_DISK_MAJOR);
+}
+
+/* Linux kernel block support routines. */
+
+/* Register a driver for major number MAJOR,
+ with name NAME, and operations vector FOPS. */
+int
+register_blkdev (unsigned major, const char *name,
+ struct file_operations *fops)
+{
+ if (major == 0)
+ {
+ for (major = MAX_BLKDEV - 1; major > 0; major--)
+ if (blkdevs[major].fops == NULL)
+ goto out;
+ return -EBUSY;
+ }
+ if (major >= MAX_BLKDEV)
+ return -EINVAL;
+ if (blkdevs[major].fops && blkdevs[major].fops != fops)
+ return -EBUSY;
+
+out:
+ blkdevs[major].name = name;
+ blkdevs[major].fops = fops;
+ blkdevs[major].busy = 0;
+ blkdevs[major].want = 0;
+ blkdevs[major].gd = NULL;
+ blkdevs[major].default_slice = 0;
+ blkdevs[major].labels = NULL;
+ return 0;
+}
+
+/* Unregister the driver associated with
+ major number MAJOR and having the name NAME. */
+int
+unregister_blkdev (unsigned major, const char *name)
+{
+ if (major >= MAX_BLKDEV)
+ return -EINVAL;
+ if (! blkdevs[major].fops || strcmp (blkdevs[major].name, name))
+ return -EINVAL;
+ blkdevs[major].fops = NULL;
+ if (blkdevs[major].labels)
+ {
+ assert (blkdevs[major].gd);
+ kfree ((vm_offset_t) blkdevs[major].labels,
+ (sizeof (struct disklabel *)
+ * blkdevs[major].gd->max_p * blkdevs[major].gd->max_nr));
+ }
+ return 0;
+}
+
+void
+set_blocksize (kdev_t dev, int size)
+{
+ if (! blksize_size[MAJOR (dev)])
+ return;
+
+ switch (size)
+ {
+ case 512:
+ case 1024:
+ case 2048:
+ case 4096:
+ break;
+ default:
+ panic ("Invalid blocksize passed to set_blocksize");
+ break;
+ }
+ blksize_size[MAJOR (dev)][MINOR (dev)] = size;
+}
+
+/* Allocate a buffer SIZE bytes long. */
+static void *
+alloc_buffer (int size)
+{
+ vm_page_t m;
+ struct temp_data *d;
+
+ assert (size <= PAGE_SIZE);
+
+ if (! linux_auto_config)
+ {
+ while ((m = vm_page_grab (VM_PAGE_DMA32)) == 0)
+ VM_PAGE_WAIT (0);
+ d = current_thread ()->pcb->data;
+ assert (d);
+ list_insert_tail (&d->pages, &m->node);
+ return (void *) phystokv(m->phys_addr);
+ }
+ return (void *) __get_free_pages (GFP_KERNEL, 0, ~0UL);
+}
+
+/* Free buffer P which is SIZE bytes long. */
+static void
+free_buffer (void *p, int size)
+{
+ struct temp_data *d;
+ vm_page_t m, tmp;
+
+ assert (size <= PAGE_SIZE);
+
+ if (! linux_auto_config)
+ {
+ d = current_thread ()->pcb->data;
+ assert (d);
+ list_for_each_entry_safe (&d->pages, m, tmp, node)
+ {
+ if (phystokv(m->phys_addr) == (vm_offset_t) p)
+ {
+ list_remove (&m->node);
+ VM_PAGE_FREE (m);
+ return;
+ }
+ }
+ panic ("free_buffer");
+ }
+ free_pages ((unsigned long) p, 0);
+}
+
+/* Allocate a buffer of SIZE bytes and
+ associate it with block number BLOCK of device DEV. */
+struct buffer_head *
+getblk (kdev_t dev, int block, int size)
+{
+ struct buffer_head *bh;
+
+ assert (size <= PAGE_SIZE);
+
+ bh = (struct buffer_head *) kalloc (sizeof (struct buffer_head));
+ if (bh)
+ {
+ memset (bh, 0, sizeof (struct buffer_head));
+ bh->b_data = alloc_buffer (size);
+ if (! bh->b_data)
+ {
+ kfree ((vm_offset_t) bh, sizeof (struct buffer_head));
+ return NULL;
+ }
+ bh->b_dev = dev;
+ bh->b_size = size;
+ bh->b_state = 1 << BH_Lock;
+ bh->b_blocknr = block;
+ }
+ return bh;
+}
+
+/* Release buffer BH previously allocated by getblk. */
+void
+__brelse (struct buffer_head *bh)
+{
+ free_buffer (bh->b_data, bh->b_size);
+ kfree ((vm_offset_t) bh, sizeof (*bh));
+}
+
+/* Allocate a buffer of SIZE bytes and fill it with data
+ from device DEV starting at block number BLOCK. */
+struct buffer_head *
+bread (kdev_t dev, int block, int size)
+{
+ struct buffer_head *bh;
+
+ bh = getblk (dev, block, size);
+ if (bh)
+ {
+ ll_rw_block (READ, 1, &bh, 0);
+ wait_on_buffer (bh);
+ if (! buffer_uptodate (bh))
+ {
+ __brelse (bh);
+ return NULL;
+ }
+ }
+ return bh;
+}
+
+/* Return the block size for device DEV in *BSIZE and
+ log2(block size) in *BSHIFT. */
+static void
+get_block_size (kdev_t dev, int *bsize, int *bshift)
+{
+ int i;
+
+ *bsize = BLOCK_SIZE;
+ if (blksize_size[MAJOR (dev)]
+ && blksize_size[MAJOR (dev)][MINOR (dev)])
+ *bsize = blksize_size[MAJOR (dev)][MINOR (dev)];
+ for (i = *bsize, *bshift = 0; i != 1; i >>= 1, (*bshift)++)
+ ;
+}
+
+/* Enqueue request REQ on a driver's queue. */
+static inline void
+enqueue_request (struct request *req)
+{
+ struct request *tmp;
+ struct blk_dev_struct *dev;
+
+ dev = blk_dev + MAJOR (req->rq_dev);
+ cli ();
+ tmp = dev->current_request;
+ if (! tmp)
+ {
+ dev->current_request = req;
+ (*dev->request_fn) ();
+ sti ();
+ return;
+ }
+ while (tmp->next)
+ {
+ if ((IN_ORDER (tmp, req) || ! IN_ORDER (tmp, tmp->next))
+ && IN_ORDER (req, tmp->next))
+ break;
+ tmp = tmp->next;
+ }
+ req->next = tmp->next;
+ tmp->next = req;
+ if (scsi_blk_major (MAJOR (req->rq_dev)))
+ (*dev->request_fn) ();
+ sti ();
+}
+
+int
+check_rw_block (int nr, struct buffer_head **bh)
+{
+ int i, bshift, bsize;
+ get_block_size (bh[0]->b_dev, &bsize, &bshift);
+ loff_t sectorl = bh[0]->b_blocknr << (bshift - 9);
+
+ for (i = 0; i < nr; i++)
+ {
+ sectorl += bh[i]->b_size >> 9;
+ unsigned long sector = sectorl;
+ if (sector != sectorl)
+ return -EOVERFLOW;
+ }
+
+ return 0;
+}
+
+/* Perform the I/O operation RW on the buffer list BH
+ containing NR buffers. */
+void
+ll_rw_block (int rw, int nr, struct buffer_head **bh, int quiet)
+{
+ int i, bshift, bsize;
+ unsigned major;
+ struct request *r;
+ static struct request req;
+
+ major = MAJOR (bh[0]->b_dev);
+ assert (major < MAX_BLKDEV);
+
+ get_block_size (bh[0]->b_dev, &bsize, &bshift);
+
+ if (! linux_auto_config)
+ {
+ assert (current_thread ()->pcb->data);
+ r = &((struct temp_data *) current_thread ()->pcb->data)->req;
+ }
+ else
+ r = &req;
+
+ for (i = 0, r->nr_sectors = 0; i < nr - 1; i++)
+ {
+ r->nr_sectors += bh[i]->b_size >> 9;
+ bh[i]->b_reqnext = bh[i + 1];
+ }
+ r->nr_sectors += bh[i]->b_size >> 9;
+ bh[i]->b_reqnext = NULL;
+
+ r->rq_status = RQ_ACTIVE;
+ r->rq_dev = bh[0]->b_dev;
+ r->cmd = rw;
+ r->errors = 0;
+ r->quiet = quiet;
+ r->sector = bh[0]->b_blocknr << (bshift - 9);
+ r->current_nr_sectors = bh[0]->b_size >> 9;
+ r->buffer = bh[0]->b_data;
+ r->bh = bh[0];
+ r->bhtail = bh[nr - 1];
+ r->sem = NULL;
+ r->next = NULL;
+
+ enqueue_request (r);
+}
+
+#define BSIZE (1 << bshift)
+#define BMASK (BSIZE - 1)
+
+/* Perform read/write operation RW on device DEV
+ starting at *off to/from buffer *BUF of size *RESID.
+ The device block size is given by BSHIFT. *OFF and
+ *RESID may be non-multiples of the block size.
+ *OFF, *BUF and *RESID are updated if the operation
+ completed successfully. */
+static int
+rdwr_partial (int rw, kdev_t dev, loff_t *off,
+ char **buf, int *resid, int bshift)
+{
+ int c, err = 0, o;
+ long sect, nsect;
+ struct buffer_head bhead, *bh = &bhead;
+ struct gendisk *gd;
+ loff_t blkl;
+
+ memset (bh, 0, sizeof (struct buffer_head));
+ bh->b_state = 1 << BH_Lock;
+ bh->b_dev = dev;
+ blkl = *off >> bshift;
+ bh->b_blocknr = blkl;
+ if (bh->b_blocknr != blkl)
+ return -EOVERFLOW;
+ bh->b_size = BSIZE;
+
+ /* Check if this device has non even number of blocks. */
+ for (gd = gendisk_head, nsect = -1; gd; gd = gd->next)
+ if (gd->major == MAJOR (dev))
+ {
+ nsect = gd->part[MINOR (dev)].nr_sects;
+ break;
+ }
+ if (nsect > 0)
+ {
+ loff_t sectl;
+ sectl = bh->b_blocknr << (bshift - 9);
+ sect = sectl;
+ assert ((nsect - sect) > 0);
+ if (nsect - sect < (BSIZE >> 9))
+ bh->b_size = (nsect - sect) << 9;
+ }
+ bh->b_data = alloc_buffer (bh->b_size);
+ if (! bh->b_data)
+ return -ENOMEM;
+ err = check_rw_block (1, &bh);
+ if (err)
+ goto out;
+ ll_rw_block (READ, 1, &bh, 0);
+ wait_on_buffer (bh);
+ if (buffer_uptodate (bh))
+ {
+ o = *off & BMASK;
+ c = bh->b_size - o;
+ if (c > *resid)
+ c = *resid;
+ if (rw == READ)
+ memcpy (*buf, bh->b_data + o, c);
+ else
+ {
+ memcpy (bh->b_data + o, *buf, c);
+ bh->b_state = (1 << BH_Dirty) | (1 << BH_Lock);
+ err = check_rw_block (1, &bh);
+ if (err)
+ goto out;
+ ll_rw_block (WRITE, 1, &bh, 0);
+ wait_on_buffer (bh);
+ if (! buffer_uptodate (bh))
+ {
+ err = -EIO;
+ goto out;
+ }
+ }
+ *buf += c;
+ *resid -= c;
+ *off += c;
+ }
+ else
+ err = -EIO;
+out:
+ free_buffer (bh->b_data, bh->b_size);
+ return err;
+}
+
+#define BH_Bounce 16
+#define MAX_BUF 8
+
+/* Perform read/write operation RW on device DEV
+ starting at *off to/from buffer *BUF of size *RESID.
+ The device block size is given by BSHIFT. *OFF and
+ *RESID must be multiples of the block size.
+ *OFF, *BUF and *RESID are updated if the operation
+ completed successfully. */
+static int
+rdwr_full (int rw, kdev_t dev, loff_t *off, char **buf, int *resid, int bshift)
+{
+ int cc, err = 0, i, j, nb, nbuf;
+ loff_t blkl;
+ long blk, newblk;
+ struct buffer_head bhead[MAX_BUF], *bh, *bhp[MAX_BUF];
+ phys_addr_t pa;
+
+ assert ((*off & BMASK) == 0);
+
+ nbuf = *resid >> bshift;
+ blkl = *off >> bshift;
+ blk = blkl;
+ if (blk != blkl)
+ return -EOVERFLOW;
+ for (i = nb = 0, bh = bhead; nb < nbuf; bh++)
+ {
+ memset (bh, 0, sizeof (*bh));
+ bh->b_dev = dev;
+ bh->b_blocknr = blk;
+ set_bit (BH_Lock, &bh->b_state);
+ if (rw == WRITE)
+ set_bit (BH_Dirty, &bh->b_state);
+ cc = PAGE_SIZE - (((int) *buf + (nb << bshift)) & PAGE_MASK);
+ pa = pmap_extract (vm_map_pmap (device_io_map),
+ (((vm_offset_t) *buf) + (nb << bshift)));
+ if (cc >= BSIZE && (((int) *buf + (nb << bshift)) & 511) == 0
+ && pa + cc <= VM_PAGE_DIRECTMAP_LIMIT)
+ cc &= ~BMASK;
+ else
+ {
+ cc = PAGE_SIZE;
+ set_bit (BH_Bounce, &bh->b_state);
+ }
+ if (cc > ((nbuf - nb) << bshift))
+ cc = (nbuf - nb) << bshift;
+ if (! test_bit (BH_Bounce, &bh->b_state))
+ bh->b_data = (char *) phystokv(pa);
+ else
+ {
+ bh->b_data = alloc_buffer (cc);
+ if (! bh->b_data)
+ {
+ err = -ENOMEM;
+ break;
+ }
+ if (rw == WRITE)
+ memcpy (bh->b_data, *buf + (nb << bshift), cc);
+ }
+ bh->b_size = cc;
+ bhp[i] = bh;
+ nb += cc >> bshift;
+ newblk = blk + (cc >> bshift);
+ if (newblk < blk)
+ {
+ err = -EOVERFLOW;
+ break;
+ }
+ blk = newblk;
+ if (++i == MAX_BUF)
+ break;
+ }
+ if (! err)
+ err = check_rw_block (i, bhp);
+ if (! err)
+ {
+ assert (i > 0);
+ ll_rw_block (rw, i, bhp, 0);
+ wait_on_buffer (bhp[i - 1]);
+ }
+ for (bh = bhead, cc = 0, j = 0; j < i; cc += bh->b_size, bh++, j++)
+ {
+ if (! err && buffer_uptodate (bh)
+ && rw == READ && test_bit (BH_Bounce, &bh->b_state))
+ memcpy (*buf + cc, bh->b_data, bh->b_size);
+ else if (! err && ! buffer_uptodate (bh))
+ err = -EIO;
+ if (test_bit (BH_Bounce, &bh->b_state))
+ free_buffer (bh->b_data, bh->b_size);
+ }
+ if (! err)
+ {
+ *buf += cc;
+ *resid -= cc;
+ *off += cc;
+ }
+ return err;
+}
+
+/* Perform read/write operation RW on device DEV
+ starting at *off to/from buffer BUF of size COUNT.
+ *OFF is updated if the operation completed successfully. */
+static int
+do_rdwr (int rw, kdev_t dev, loff_t *off, char *buf, int count)
+{
+ int bsize, bshift, err = 0, resid = count;
+
+ get_block_size (dev, &bsize, &bshift);
+ if (*off & BMASK)
+ err = rdwr_partial (rw, dev, off, &buf, &resid, bshift);
+ while (resid >= bsize && ! err)
+ err = rdwr_full (rw, dev, off, &buf, &resid, bshift);
+ if (! err && resid)
+ err = rdwr_partial (rw, dev, off, &buf, &resid, bshift);
+ return err ? err : count - resid;
+}
+
+int
+block_write (struct inode *inode, struct file *filp,
+ const char *buf, int count)
+{
+ return do_rdwr (WRITE, inode->i_rdev, &filp->f_pos, (char *) buf, count);
+}
+
+int
+block_read (struct inode *inode, struct file *filp, char *buf, int count)
+{
+ return do_rdwr (READ, inode->i_rdev, &filp->f_pos, buf, count);
+}
+
+/*
+ * This routine checks whether a removable media has been changed,
+ * and invalidates all buffer-cache-entries in that case. This
+ * is a relatively slow routine, so we have to try to minimize using
+ * it. Thus it is called only upon a 'mount' or 'open'. This
+ * is the best way of combining speed and utility, I think.
+ * People changing diskettes in the middle of an operation deserve
+ * to loose :-)
+ */
+int
+check_disk_change (kdev_t dev)
+{
+ unsigned i;
+ struct file_operations * fops;
+
+ i = MAJOR(dev);
+ if (i >= MAX_BLKDEV || (fops = blkdevs[i].fops) == NULL)
+ return 0;
+ if (fops->check_media_change == NULL)
+ return 0;
+ if (! (*fops->check_media_change) (dev))
+ return 0;
+
+ /* printf ("Disk change detected on device %s\n", kdevname(dev));*/
+
+ if (fops->revalidate)
+ (*fops->revalidate) (dev);
+
+ return 1;
+}
+
+/* Mach device interface routines. */
+
+/* Mach name to Linux major/minor number mapping table. */
+static struct name_map name_to_major[] =
+{
+ /* IDE disks */
+ { "hd0", IDE0_MAJOR, 0, 0 },
+ { "hd1", IDE0_MAJOR, 1, 0 },
+ { "hd2", IDE1_MAJOR, 0, 0 },
+ { "hd3", IDE1_MAJOR, 1, 0 },
+ { "hd4", IDE2_MAJOR, 0, 0 },
+ { "hd5", IDE2_MAJOR, 1, 0 },
+ { "hd6", IDE3_MAJOR, 0, 0 },
+ { "hd7", IDE3_MAJOR, 1, 0 },
+
+ /* IDE CDROMs */
+ { "wcd0", IDE0_MAJOR, 0, 1 },
+ { "wcd1", IDE0_MAJOR, 1, 1 },
+ { "wcd2", IDE1_MAJOR, 0, 1 },
+ { "wcd3", IDE1_MAJOR, 1, 1 },
+ { "wcd4", IDE2_MAJOR, 0, 1 },
+ { "wcd5", IDE2_MAJOR, 1, 1 },
+ { "wcd6", IDE3_MAJOR, 0, 1 },
+ { "wcd7", IDE3_MAJOR, 1, 1 },
+
+ /* SCSI disks */
+ { "sd0", SCSI_DISK_MAJOR, 0, 0 },
+ { "sd1", SCSI_DISK_MAJOR, 1, 0 },
+ { "sd2", SCSI_DISK_MAJOR, 2, 0 },
+ { "sd3", SCSI_DISK_MAJOR, 3, 0 },
+ { "sd4", SCSI_DISK_MAJOR, 4, 0 },
+ { "sd5", SCSI_DISK_MAJOR, 5, 0 },
+ { "sd6", SCSI_DISK_MAJOR, 6, 0 },
+ { "sd7", SCSI_DISK_MAJOR, 7, 0 },
+
+ /* SCSI CDROMs */
+ { "cd0", SCSI_CDROM_MAJOR, 0, 1 },
+ { "cd1", SCSI_CDROM_MAJOR, 1, 1 },
+
+ /* Floppy disks */
+ { "fd0", FLOPPY_MAJOR, 0, 0 },
+ { "fd1", FLOPPY_MAJOR, 1, 0 },
+};
+
+#define NUM_NAMES (sizeof (name_to_major) / sizeof (name_to_major[0]))
+
+/* One of these is associated with each open instance of a device. */
+struct block_data
+{
+ const char *name; /* Mach name for device */
+ int want:1; /* someone is waiting for I/O to complete */
+ int open_count; /* number of opens */
+ int iocount; /* number of pending I/O operations */
+ int part; /* BSD partition number (-1 if none) */
+ int flags; /* Linux file flags */
+ int mode; /* Linux file mode */
+ kdev_t dev; /* Linux device number */
+ ipc_port_t port; /* port representing device */
+ struct device_struct *ds; /* driver operation table entry */
+ struct device device; /* generic device header */
+ struct name_map *np; /* name to inode map */
+ struct block_data *next; /* forward link */
+};
+
+/* List of open devices. */
+static struct block_data *open_list;
+
+/* Forward declarations. */
+
+extern struct device_emulation_ops linux_block_emulation_ops;
+
+static io_return_t device_close (void *);
+static io_return_t device_close_forced (void *, int);
+
+/* Return a send right for block device BD. */
+static ipc_port_t
+dev_to_port (void *bd)
+{
+ return (bd
+ ? ipc_port_make_send (((struct block_data *) bd)->port)
+ : IP_NULL);
+}
+
+/* Return 1 if C is a letter of the alphabet. */
+static inline int
+isalpha (int c)
+{
+ return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z');
+}
+
+/* Return 1 if C is a digit. */
+static inline int
+isdigit (int c)
+{
+ return c >= '0' && c <= '9';
+}
+
+/* Find the name map entry for device NAME.
+ Set *SLICE to be the DOS partition and
+ *PART the BSD/Mach partition, if any. */
+static struct name_map *
+find_name (char *name, int *slice, int *part)
+{
+ char *p, *q;
+ int i, len;
+ struct name_map *np;
+
+ /* Parse name into name, unit, DOS partition (slice) and partition. */
+ for (*slice = 0, *part = -1, p = name; isalpha (*p); p++)
+ ;
+ if (p == name || ! isdigit (*p))
+ return NULL;
+ do
+ p++;
+ while (isdigit (*p));
+ if (*p)
+ {
+ q = p;
+ if (*q == 's' && isdigit (*(q + 1)))
+ {
+ q++;
+ do
+ *slice = *slice * 10 + *q++ - '0';
+ while (isdigit (*q));
+ if (! *q)
+ goto find_major;
+ }
+ if (! isalpha (*q) || *(q + 1))
+ return NULL;
+ *part = *q - 'a';
+ }
+
+find_major:
+ /* Convert name to major number. */
+ for (i = 0, np = name_to_major; i < NUM_NAMES; i++, np++)
+ {
+ len = strlen (np->name);
+ if (len == (p - name) && ! strncmp (np->name, name, len))
+ return np;
+ }
+ return NULL;
+}
+
+/* Attempt to read a BSD disklabel from device DEV. */
+static struct disklabel *
+read_bsd_label (kdev_t dev)
+{
+ int bsize, bshift;
+ struct buffer_head *bh;
+ struct disklabel *dlp, *lp = NULL;
+
+ get_block_size (dev, &bsize, &bshift);
+ bh = bread (dev, LBLLOC >> (bshift - 9), bsize);
+ if (bh)
+ {
+ dlp = (struct disklabel *) (bh->b_data + ((LBLLOC << 9) & (bsize - 1)));
+ if (dlp->d_magic == DISKMAGIC && dlp->d_magic2 == DISKMAGIC)
+ {
+ lp = (struct disklabel *) kalloc (sizeof (*lp));
+ assert (lp);
+ memcpy (lp, dlp, sizeof (*lp));
+ }
+ __brelse (bh);
+ }
+ return lp;
+}
+
+/* Attempt to read a VTOC from device DEV. */
+static struct disklabel *
+read_vtoc (kdev_t dev)
+{
+ int bshift, bsize, i;
+ struct buffer_head *bh;
+ struct evtoc *evp;
+ struct disklabel *lp = NULL;
+
+ get_block_size (dev, &bsize, &bshift);
+ bh = bread (dev, PDLOCATION >> (bshift - 9), bsize);
+ if (bh)
+ {
+ evp = (struct evtoc *) (bh->b_data + ((PDLOCATION << 9) & (bsize - 1)));
+ if (evp->sanity == VTOC_SANE)
+ {
+ lp = (struct disklabel *) kalloc (sizeof (*lp));
+ assert (lp);
+ lp->d_npartitions = evp->nparts;
+ if (lp->d_npartitions > MAXPARTITIONS)
+ lp->d_npartitions = MAXPARTITIONS;
+ for (i = 0; i < lp->d_npartitions; i++)
+ {
+ lp->d_partitions[i].p_size = evp->part[i].p_size;
+ lp->d_partitions[i].p_offset = evp->part[i].p_start;
+ lp->d_partitions[i].p_fstype = FS_BSDFFS;
+ }
+ }
+ __brelse (bh);
+ }
+ return lp;
+}
+
+/* Initialize BSD/Mach partition table for device
+ specified by NP, DS and *DEV. Check SLICE and *PART for validity. */
+static kern_return_t
+init_partition (struct name_map *np, kdev_t *dev,
+ struct device_struct *ds, int slice, int *part)
+{
+ int i, j;
+ struct disklabel *lp;
+ struct gendisk *gd = ds->gd;
+ struct partition *p;
+ struct temp_data *d = current_thread ()->pcb->data;
+
+ if (! gd)
+ {
+ *part = -1;
+ return 0;
+ }
+ if (ds->labels)
+ goto check;
+ ds->labels = (struct disklabel **) kalloc (sizeof (struct disklabel *)
+ * gd->max_nr * gd->max_p);
+ if (! ds->labels)
+ return D_NO_MEMORY;
+ memset ((void *) ds->labels, 0,
+ sizeof (struct disklabel *) * gd->max_nr * gd->max_p);
+ for (i = 1; i < gd->max_p; i++)
+ {
+ d->inode.i_rdev = *dev | i;
+ if (gd->part[MINOR (d->inode.i_rdev)].nr_sects <= 0
+ || gd->part[MINOR (d->inode.i_rdev)].start_sect < 0)
+ continue;
+ d->file.f_flags = 0;
+ d->file.f_mode = O_RDONLY;
+ if (ds->fops->open && (*ds->fops->open) (&d->inode, &d->file))
+ continue;
+ lp = read_bsd_label (d->inode.i_rdev);
+ if (! lp && gd->part[MINOR (d->inode.i_rdev)].nr_sects > PDLOCATION)
+ lp = read_vtoc (d->inode.i_rdev);
+ if (ds->fops->release)
+ (*ds->fops->release) (&d->inode, &d->file);
+ if (lp)
+ {
+ if (ds->default_slice == 0)
+ ds->default_slice = i;
+ for (j = 0, p = lp->d_partitions; j < lp->d_npartitions; j++, p++)
+ {
+ if (p->p_offset < 0 || p->p_size <= 0)
+ continue;
+
+ /* Sanity check. */
+ if (p->p_size > gd->part[MINOR (d->inode.i_rdev)].nr_sects)
+ p->p_size = gd->part[MINOR (d->inode.i_rdev)].nr_sects;
+ }
+ }
+ ds->labels[MINOR (d->inode.i_rdev)] = lp;
+ }
+
+check:
+ if (*part >= 0 && slice == 0)
+ slice = ds->default_slice;
+ if (*part >= 0 && slice == 0)
+ return D_NO_SUCH_DEVICE;
+ *dev = MKDEV (MAJOR (*dev), MINOR (*dev) | slice);
+ if (slice >= gd->max_p
+ || gd->part[MINOR (*dev)].start_sect < 0
+ || gd->part[MINOR (*dev)].nr_sects <= 0)
+ return D_NO_SUCH_DEVICE;
+ if (*part >= 0)
+ {
+ lp = ds->labels[MINOR (*dev)];
+ if (! lp
+ || *part >= lp->d_npartitions
+ || lp->d_partitions[*part].p_offset < 0
+ || lp->d_partitions[*part].p_size <= 0)
+ return D_NO_SUCH_DEVICE;
+ }
+ return 0;
+}
+
+#define DECL_DATA struct temp_data td
+#define INIT_DATA() \
+{ \
+ list_init (&td.pages); \
+ td.inode.i_rdev = bd->dev; \
+ td.file.f_mode = bd->mode; \
+ td.file.f_flags = bd->flags; \
+ current_thread ()->pcb->data = &td; \
+}
+
+static io_return_t
+device_open (ipc_port_t reply_port, mach_msg_type_name_t reply_port_type,
+ dev_mode_t mode, char *name, device_t *devp)
+{
+ int part, slice, err;
+ unsigned major, minor;
+ kdev_t dev;
+ ipc_port_t notify;
+ struct block_data *bd = NULL, *bdp;
+ struct device_struct *ds;
+ struct gendisk *gd;
+ struct name_map *np;
+ DECL_DATA;
+
+ np = find_name (name, &slice, &part);
+ if (! np)
+ return D_NO_SUCH_DEVICE;
+ major = np->major;
+ ds = &blkdevs[major];
+
+ /* Check that driver exists. */
+ if (! ds->fops)
+ return D_NO_SUCH_DEVICE;
+
+ /* Wait for any other open/close calls to finish. */
+ ds = &blkdevs[major];
+ while (ds->busy)
+ {
+ ds->want = 1;
+ assert_wait ((event_t) ds, FALSE);
+ schedule ();
+ }
+ ds->busy = 1;
+
+ /* Compute minor number. */
+ if (! ds->gd)
+ {
+ for (gd = gendisk_head; gd && gd->major != major; gd = gd->next)
+ ;
+ ds->gd = gd;
+ }
+ minor = np->unit;
+ gd = ds->gd;
+ if (gd)
+ minor <<= gd->minor_shift;
+ dev = MKDEV (major, minor);
+
+ list_init (&td.pages);
+ current_thread ()->pcb->data = &td;
+
+ /* Check partition. */
+ err = init_partition (np, &dev, ds, slice, &part);
+ if (err)
+ goto out;
+
+ /* Initialize file structure. */
+ switch (mode & (D_READ|D_WRITE))
+ {
+ case D_WRITE:
+ td.file.f_mode = O_WRONLY;
+ break;
+
+ case D_READ|D_WRITE:
+ td.file.f_mode = O_RDWR;
+ break;
+
+ default:
+ td.file.f_mode = O_RDONLY;
+ break;
+ }
+ td.file.f_flags = (mode & D_NODELAY) ? O_NDELAY : 0;
+
+ /* Check if the device is currently open. */
+ for (bdp = open_list; bdp; bdp = bdp->next)
+ if (bdp->dev == dev
+ && bdp->part == part
+ && bdp->mode == td.file.f_mode
+ && bdp->flags == td.file.f_flags)
+ {
+ bd = bdp;
+ goto out;
+ }
+
+ /* Open the device. */
+ if (ds->fops->open)
+ {
+ td.inode.i_rdev = dev;
+ err = (*ds->fops->open) (&td.inode, &td.file);
+ if (err)
+ {
+ err = linux_to_mach_error (err);
+ goto out;
+ }
+ }
+
+ /* Allocate and initialize device data. */
+ bd = (struct block_data *) kalloc (sizeof (struct block_data));
+ if (! bd)
+ {
+ err = D_NO_MEMORY;
+ goto bad;
+ }
+ bd->want = 0;
+ bd->open_count = 0;
+ bd->iocount = 0;
+ bd->part = part;
+ bd->ds = ds;
+ bd->device.emul_data = bd;
+ bd->device.emul_ops = &linux_block_emulation_ops;
+ bd->dev = dev;
+ bd->mode = td.file.f_mode;
+ bd->flags = td.file.f_flags;
+ bd->port = ipc_port_alloc_kernel ();
+ if (bd->port == IP_NULL)
+ {
+ err = KERN_RESOURCE_SHORTAGE;
+ goto bad;
+ }
+ ipc_kobject_set (bd->port, (ipc_kobject_t) &bd->device, IKOT_DEVICE);
+ notify = ipc_port_make_sonce (bd->port);
+ ip_lock (bd->port);
+ ipc_port_nsrequest (bd->port, 1, notify, &notify);
+ assert (notify == IP_NULL);
+ goto out;
+
+bad:
+ if (ds->fops->release)
+ (*ds->fops->release) (&td.inode, &td.file);
+
+out:
+ ds->busy = 0;
+ if (ds->want)
+ {
+ ds->want = 0;
+ thread_wakeup ((event_t) ds);
+ }
+
+ if (bd && bd->open_count > 0)
+ {
+ if (err)
+ *devp = NULL;
+ else
+ {
+ *devp = &bd->device;
+ bd->open_count++;
+ }
+ return err;
+ }
+
+ if (err)
+ {
+ if (bd)
+ {
+ if (bd->port != IP_NULL)
+ {
+ ipc_kobject_set (bd->port, IKO_NULL, IKOT_NONE);
+ ipc_port_dealloc_kernel (bd->port);
+ *devp = (device_t) IP_NULL;
+ }
+ kfree ((vm_offset_t) bd, sizeof (struct block_data));
+ bd = NULL;
+ }
+ }
+ else
+ {
+ bd->open_count = 1;
+ bd->next = open_list;
+ open_list = bd;
+ *devp = &bd -> device;
+ }
+
+ if (!IP_VALID (reply_port) && ! err)
+ device_close (bd);
+ return err;
+}
+
+static io_return_t
+device_close_forced (void *d, int force)
+{
+ struct block_data *bd = d, *bdp, **prev;
+ struct device_struct *ds = bd->ds;
+ DECL_DATA;
+
+ INIT_DATA ();
+
+ /* Wait for any other open/close to complete. */
+ while (ds->busy)
+ {
+ ds->want = 1;
+ assert_wait ((event_t) ds, FALSE);
+ schedule ();
+ }
+ ds->busy = 1;
+
+ if (force || --bd->open_count == 0)
+ {
+ /* Wait for pending I/O to complete. */
+ while (bd->iocount > 0)
+ {
+ bd->want = 1;
+ assert_wait ((event_t) bd, FALSE);
+ schedule ();
+ }
+
+ /* Remove device from open list. */
+ prev = &open_list;
+ bdp = open_list;
+ while (bdp)
+ {
+ if (bdp == bd)
+ {
+ *prev = bdp->next;
+ break;
+ }
+ prev = &bdp->next;
+ bdp = bdp->next;
+ }
+
+ assert (bdp == bd);
+
+ if (ds->fops->release)
+ (*ds->fops->release) (&td.inode, &td.file);
+
+ ipc_kobject_set (bd->port, IKO_NULL, IKOT_NONE);
+ ipc_port_dealloc_kernel (bd->port);
+ kfree ((vm_offset_t) bd, sizeof (struct block_data));
+ }
+
+ ds->busy = 0;
+ if (ds->want)
+ {
+ ds->want = 0;
+ thread_wakeup ((event_t) ds);
+ }
+ return D_SUCCESS;
+}
+
+static io_return_t
+device_close (void *d)
+{
+ return device_close_forced (d, 0);
+}
+
+
+#define MAX_COPY (VM_MAP_COPY_PAGE_LIST_MAX << PAGE_SHIFT)
+
+/* Check block BN and size COUNT for I/O validity
+ to from device BD. Set *OFF to the byte offset
+ where I/O is to begin and return the size of transfer. */
+static int
+check_limit (struct block_data *bd, loff_t *off, long bn, int count)
+{
+ int major, minor;
+ long maxsz, sz;
+ struct disklabel *lp = NULL;
+
+ if (count <= 0)
+ return count;
+
+ major = MAJOR (bd->dev);
+ minor = MINOR (bd->dev);
+
+ if (bd->ds->gd)
+ {
+ if (bd->part >= 0)
+ {
+ assert (bd->ds->labels);
+ assert (bd->ds->labels[minor]);
+ lp = bd->ds->labels[minor];
+ maxsz = lp->d_partitions[bd->part].p_size;
+ }
+ else
+ maxsz = bd->ds->gd->part[minor].nr_sects;
+ }
+ else
+ {
+ assert (blk_size[major]);
+ maxsz = blk_size[major][minor] << (BLOCK_SIZE_BITS - 9);
+ }
+ assert (maxsz > 0);
+ sz = maxsz - bn;
+ if (sz <= 0)
+ return sz;
+ if (sz < ((count + 511) >> 9))
+ count = sz << 9;
+ if (lp)
+ bn += (lp->d_partitions[bd->part].p_offset
+ - bd->ds->gd->part[minor].start_sect);
+ *off = (loff_t) bn << 9;
+ bd->iocount++;
+ return count;
+}
+
+static io_return_t
+device_write (void *d, ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type, dev_mode_t mode,
+ recnum_t bn, io_buf_ptr_t data, unsigned int orig_count,
+ int *bytes_written)
+{
+ int resid, amt, i;
+ int count = (int) orig_count;
+ io_return_t err = 0;
+ vm_map_copy_t copy = (vm_map_copy_t) data;
+ vm_offset_t addr, uaddr;
+ vm_size_t len, size;
+ struct block_data *bd = d;
+ DECL_DATA;
+
+ INIT_DATA ();
+
+ *bytes_written = 0;
+
+ if (bd->mode == O_RDONLY)
+ return D_INVALID_OPERATION;
+ if (! bd->ds->fops->write)
+ return D_READ_ONLY;
+ count = check_limit (bd, &td.file.f_pos, bn, count);
+ if (count < 0)
+ return D_INVALID_SIZE;
+ if (count == 0)
+ {
+ vm_map_copy_discard (copy);
+ return 0;
+ }
+
+ resid = count;
+ uaddr = copy->offset;
+
+ /* Allocate a kernel buffer. */
+ size = round_page (uaddr + count) - trunc_page (uaddr);
+ if (size > MAX_COPY)
+ size = MAX_COPY;
+ addr = vm_map_min (device_io_map);
+ err = vm_map_enter (device_io_map, &addr, size, 0, TRUE,
+ NULL, 0, FALSE, VM_PROT_READ|VM_PROT_WRITE,
+ VM_PROT_READ|VM_PROT_WRITE, VM_INHERIT_NONE);
+ if (err)
+ {
+ vm_map_copy_discard (copy);
+ goto out;
+ }
+
+ /* Determine size of I/O this time around. */
+ len = size - (uaddr & PAGE_MASK);
+ if (len > resid)
+ len = resid;
+
+ while (1)
+ {
+ /* Map user pages. */
+ for (i = 0; i < copy->cpy_npages; i++)
+ pmap_enter (vm_map_pmap (device_io_map),
+ addr + (i << PAGE_SHIFT),
+ copy->cpy_page_list[i]->phys_addr,
+ VM_PROT_READ|VM_PROT_WRITE, TRUE);
+
+ /* Do the write. */
+ amt = (*bd->ds->fops->write) (&td.inode, &td.file,
+ (char *) addr + (uaddr & PAGE_MASK), len);
+
+ /* Unmap pages and deallocate copy. */
+ pmap_remove (vm_map_pmap (device_io_map),
+ addr, addr + (copy->cpy_npages << PAGE_SHIFT));
+ vm_map_copy_discard (copy);
+
+ /* Check result of write. */
+ if (amt > 0)
+ {
+ resid -= amt;
+ if (resid == 0)
+ break;
+ uaddr += amt;
+ }
+ else
+ {
+ if (amt < 0)
+ err = linux_to_mach_error (amt);
+ break;
+ }
+
+ /* Determine size of I/O this time around and copy in pages. */
+ len = round_page (uaddr + resid) - trunc_page (uaddr);
+ if (len > MAX_COPY)
+ len = MAX_COPY;
+ len -= uaddr & PAGE_MASK;
+ if (len > resid)
+ len = resid;
+ err = vm_map_copyin_page_list (current_map (), uaddr, len,
+ FALSE, FALSE, &copy, FALSE);
+ if (err)
+ break;
+ }
+
+ /* Delete kernel buffer. */
+ vm_map_remove (device_io_map, addr, addr + size);
+
+out:
+ if (--bd->iocount == 0 && bd->want)
+ {
+ bd->want = 0;
+ thread_wakeup ((event_t) bd);
+ }
+ if (IP_VALID (reply_port))
+ ds_device_write_reply (reply_port, reply_port_type, err, count - resid);
+ return MIG_NO_REPLY;
+}
+
+static io_return_t
+device_read (void *d, ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type, dev_mode_t mode,
+ recnum_t bn, int count, io_buf_ptr_t *data,
+ unsigned *bytes_read)
+{
+ boolean_t dirty;
+ int resid, amt;
+ io_return_t err = 0;
+ struct list pages;
+ vm_map_copy_t copy;
+ vm_offset_t addr, offset, alloc_offset, o;
+ vm_object_t object;
+ vm_page_t m;
+ vm_size_t len, size;
+ struct block_data *bd = d;
+ DECL_DATA;
+
+ INIT_DATA ();
+
+ *data = 0;
+ *bytes_read = 0;
+
+ if (! bd->ds->fops->read)
+ return D_INVALID_OPERATION;
+ count = check_limit (bd, &td.file.f_pos, bn, count);
+ if (count < 0)
+ return D_INVALID_SIZE;
+ if (count == 0)
+ return 0;
+
+ /* Allocate an object to hold the data. */
+ size = round_page (count);
+ object = vm_object_allocate (size);
+ if (! object)
+ {
+ err = D_NO_MEMORY;
+ goto out;
+ }
+ alloc_offset = offset = 0;
+ resid = count;
+
+ /* Allocate a kernel buffer. */
+ addr = vm_map_min (device_io_map);
+ if (size > MAX_COPY)
+ size = MAX_COPY;
+ err = vm_map_enter (device_io_map, &addr, size, 0, TRUE, NULL,
+ 0, FALSE, VM_PROT_READ|VM_PROT_WRITE,
+ VM_PROT_READ|VM_PROT_WRITE, VM_INHERIT_NONE);
+ if (err)
+ goto out;
+
+ list_init (&pages);
+
+ while (resid)
+ {
+ /* Determine size of I/O this time around. */
+ len = round_page (offset + resid) - trunc_page (offset);
+ if (len > MAX_COPY)
+ len = MAX_COPY;
+
+ /* Map any pages left from previous operation. */
+ o = trunc_page (offset);
+ list_for_each_entry (&pages, m, node)
+ {
+ pmap_enter (vm_map_pmap (device_io_map),
+ addr + o - trunc_page (offset),
+ m->phys_addr, VM_PROT_READ|VM_PROT_WRITE, TRUE);
+ o += PAGE_SIZE;
+ }
+ assert (o == alloc_offset);
+
+ /* Allocate and map pages. */
+ while (alloc_offset < trunc_page (offset) + len)
+ {
+ while ((m = vm_page_grab (VM_PAGE_LINUX)) == 0)
+ VM_PAGE_WAIT (0);
+ assert (! m->active && ! m->inactive);
+ m->busy = TRUE;
+ list_insert_tail (&pages, &m->node);
+ pmap_enter (vm_map_pmap (device_io_map),
+ addr + alloc_offset - trunc_page (offset),
+ m->phys_addr, VM_PROT_READ|VM_PROT_WRITE, TRUE);
+ alloc_offset += PAGE_SIZE;
+ }
+
+ /* Do the read. */
+ amt = len - (offset & PAGE_MASK);
+ if (amt > resid)
+ amt = resid;
+ amt = (*bd->ds->fops->read) (&td.inode, &td.file,
+ (char *) addr + (offset & PAGE_MASK), amt);
+
+ /* Compute number of pages to insert in object. */
+ o = trunc_page (offset);
+ if (amt > 0)
+ {
+ dirty = TRUE;
+ resid -= amt;
+ if (resid == 0)
+ {
+ /* Zero any unused space. */
+ if (offset + amt < o + len)
+ memset ((void *) (addr + offset - o + amt),
+ 0, o + len - offset - amt);
+ offset = o + len;
+ }
+ else
+ offset += amt;
+ }
+ else
+ {
+ dirty = FALSE;
+ offset = o + len;
+ }
+
+ /* Unmap pages and add them to the object. */
+ pmap_remove (vm_map_pmap (device_io_map), addr, addr + len);
+ vm_object_lock (object);
+ while (o < trunc_page (offset))
+ {
+ m = list_first_entry (&pages, struct vm_page, node);
+ assert (! list_end (&pages, &m->node));
+ list_remove (&m->node);
+ assert (m->busy);
+ vm_page_lock_queues ();
+ if (dirty)
+ {
+ PAGE_WAKEUP_DONE (m);
+ m->dirty = TRUE;
+ vm_page_insert (m, object, o);
+ }
+ else
+ vm_page_free (m);
+ vm_page_unlock_queues ();
+ o += PAGE_SIZE;
+ }
+ vm_object_unlock (object);
+ if (amt <= 0)
+ {
+ if (amt < 0)
+ err = linux_to_mach_error (amt);
+ break;
+ }
+ }
+
+ /* Delete kernel buffer. */
+ vm_map_remove (device_io_map, addr, addr + size);
+
+ assert (list_empty (&pages));
+
+out:
+ if (! err)
+ err = vm_map_copyin_object (object, 0, round_page (count), &copy);
+ if (! err)
+ {
+ *data = (io_buf_ptr_t) copy;
+ *bytes_read = count - resid;
+ }
+ else
+ vm_object_deallocate (object);
+ if (--bd->iocount == 0 && bd->want)
+ {
+ bd->want = 0;
+ thread_wakeup ((event_t) bd);
+ }
+ return err;
+}
+
+static io_return_t
+device_get_status (void *d, dev_flavor_t flavor, dev_status_t status,
+ mach_msg_type_number_t *status_count)
+{
+ struct block_data *bd = d;
+
+ switch (flavor)
+ {
+ case DEV_GET_SIZE:
+ if (disk_major (MAJOR (bd->dev)))
+ {
+ assert (bd->ds->gd);
+
+ if (bd->part >= 0)
+ {
+ struct disklabel *lp;
+
+ assert (bd->ds->labels);
+ lp = bd->ds->labels[MINOR (bd->dev)];
+ assert (lp);
+ (status[DEV_GET_SIZE_DEVICE_SIZE]
+ = lp->d_partitions[bd->part].p_size << 9);
+ }
+ else
+ (status[DEV_GET_SIZE_DEVICE_SIZE]
+ = bd->ds->gd->part[MINOR (bd->dev)].nr_sects << 9);
+ }
+ else
+ {
+ assert (blk_size[MAJOR (bd->dev)]);
+ (status[DEV_GET_SIZE_DEVICE_SIZE]
+ = (blk_size[MAJOR (bd->dev)][MINOR (bd->dev)]
+ << BLOCK_SIZE_BITS));
+ }
+ /* It would be nice to return the block size as reported by
+ the driver, but a lot of user level code assumes the sector
+ size to be 512. */
+ status[DEV_GET_SIZE_RECORD_SIZE] = 512;
+ /* Always return DEV_GET_SIZE_COUNT. This is what all native
+ Mach drivers do, and makes it possible to detect the absence
+ of the call by setting it to a different value on input. MiG
+ makes sure that we will never return more integers than the
+ user asked for. */
+ *status_count = DEV_GET_SIZE_COUNT;
+ break;
+
+ case DEV_GET_RECORDS:
+ if (disk_major (MAJOR (bd->dev)))
+ {
+ assert (bd->ds->gd);
+
+ if (bd->part >= 0)
+ {
+ struct disklabel *lp;
+
+ assert (bd->ds->labels);
+ lp = bd->ds->labels[MINOR (bd->dev)];
+ assert (lp);
+ (status[DEV_GET_RECORDS_DEVICE_RECORDS]
+ = lp->d_partitions[bd->part].p_size);
+ }
+ else
+ (status[DEV_GET_RECORDS_DEVICE_RECORDS]
+ = bd->ds->gd->part[MINOR (bd->dev)].nr_sects);
+ }
+ else
+ {
+ assert (blk_size[MAJOR (bd->dev)]);
+ status[DEV_GET_RECORDS_DEVICE_RECORDS]
+ = (blk_size[MAJOR (bd->dev)][MINOR (bd->dev)]
+ << (BLOCK_SIZE_BITS - 9));
+ }
+ /* It would be nice to return the block size as reported by
+ the driver, but a lot of user level code assumes the sector
+ size to be 512. */
+ status[DEV_GET_RECORDS_RECORD_SIZE] = 512;
+ /* Always return DEV_GET_RECORDS_COUNT. This is what all native
+ Mach drivers do, and makes it possible to detect the absence
+ of the call by setting it to a different value on input. MiG
+ makes sure that we will never return more integers than the
+ user asked for. */
+ *status_count = DEV_GET_RECORDS_COUNT;
+ break;
+
+ default:
+ return D_INVALID_OPERATION;
+ }
+
+ return D_SUCCESS;
+}
+
+static io_return_t
+device_set_status (void *d, dev_flavor_t flavor, dev_status_t status,
+ mach_msg_type_number_t status_count)
+{
+ struct block_data *bd = d;
+
+ switch (flavor)
+ {
+ case BLKRRPART:
+ {
+ DECL_DATA;
+ INIT_DATA();
+ return (*bd->ds->fops->ioctl) (&td.inode, &td.file, flavor, 0);
+ }
+ }
+
+ return D_INVALID_OPERATION;
+}
+
+
+static void
+device_no_senders (mach_no_senders_notification_t *ns)
+{
+ device_t dev;
+
+ dev = dev_port_lookup((ipc_port_t) ns->not_header.msgh_remote_port);
+ assert(dev);
+ device_close_forced (dev->emul_data, 1);
+}
+
+struct device_emulation_ops linux_block_emulation_ops =
+{
+ NULL,
+ NULL,
+ dev_to_port,
+ device_open,
+ device_close,
+ device_write,
+ NULL,
+ device_read,
+ NULL,
+ device_set_status,
+ device_get_status,
+ NULL,
+ NULL,
+ device_no_senders,
+ NULL,
+ NULL
+};
diff --git a/linux/dev/glue/glue.h b/linux/dev/glue/glue.h
new file mode 100644
index 0000000..e94ff55
--- /dev/null
+++ b/linux/dev/glue/glue.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2011 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef LINUX_DEV_GLUE_GLUE_H
+#define LINUX_DEV_GLUE_GLUE_H
+
+#include <vm/vm_types.h>
+#include <mach/machine/vm_types.h>
+
+extern int linux_auto_config;
+
+extern unsigned long alloc_contig_mem (unsigned, unsigned, unsigned, vm_page_t *);
+extern void free_contig_mem (vm_page_t, unsigned);
+extern void init_IRQ (void);
+extern void restore_IRQ (void);
+extern void linux_kmem_init (void);
+extern void linux_net_emulation_init (void);
+extern void device_setup (void);
+extern void linux_timer_intr (void);
+extern void linux_sched_init (void);
+extern void pcmcia_init (void);
+extern void linux_soft_intr (void);
+extern int issig (void);
+extern int linux_to_mach_error (int);
+extern char *get_options(char *str, int *ints);
+
+#endif /* LINUX_DEV_GLUE_GLUE_H */
diff --git a/linux/dev/glue/kmem.c b/linux/dev/glue/kmem.c
new file mode 100644
index 0000000..509229d
--- /dev/null
+++ b/linux/dev/glue/kmem.c
@@ -0,0 +1,589 @@
+/*
+ * Linux memory allocation.
+ *
+ * Copyright (C) 1996 The University of Utah and the Computer Systems
+ * Laboratory at the University of Utah (CSL)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ *
+ */
+
+#include <sys/types.h>
+
+#include <mach/mach_types.h>
+#include <mach/vm_param.h>
+
+#include <kern/assert.h>
+#include <kern/kalloc.h>
+#include <kern/printf.h>
+
+#include <vm/vm_page.h>
+#include <vm/vm_kern.h>
+
+#define MACH_INCLUDE
+#include <linux/sched.h>
+#include <linux/malloc.h>
+#include <linux/delay.h>
+
+#include <asm/system.h>
+
+#include <linux/dev/glue/glue.h>
+
+/* Amount of memory to reserve for Linux memory allocator.
+ We reserve 64K chunks to stay within DMA limits.
+ Increase MEM_CHUNKS if the kernel is running out of memory. */
+#define MEM_CHUNK_SIZE (64 * 1024)
+#define MEM_CHUNKS 32
+#define MEM_DMA_LIMIT (16 * 1024 * 1024)
+
+/* Mininum amount that linux_kmalloc will allocate. */
+#define MIN_ALLOC 12
+
+#ifndef NBPW
+#define NBPW 32
+#endif
+
+/* Memory block header. */
+struct blkhdr
+{
+ unsigned short free; /* 1 if block is free */
+ unsigned short size; /* size of block */
+};
+
+/* This structure heads a page allocated by linux_kmalloc. */
+struct pagehdr
+{
+ unsigned size; /* size (multiple of PAGE_SIZE) */
+ struct pagehdr *next; /* next header in list */
+};
+
+/* This structure describes a memory chunk. */
+struct chunkhdr
+{
+ unsigned long start; /* start address */
+ unsigned long end; /* end address */
+ unsigned long bitmap; /* busy/free bitmap of pages */
+};
+
+/* Chunks from which pages are allocated. */
+static struct chunkhdr pages_free[MEM_CHUNKS];
+
+/* Memory list maintained by linux_kmalloc. */
+static struct pagehdr *memlist;
+
+/* Some statistics. */
+int num_block_coalesce = 0;
+int num_page_collect = 0;
+int linux_mem_avail;
+
+/* Initialize the Linux memory allocator. */
+void
+linux_kmem_init ()
+{
+ int i, j;
+ vm_page_t p, pages;
+
+ for (i = 0; i < MEM_CHUNKS; i++)
+ {
+ /* Allocate memory. */
+ pages_free[i].start = (unsigned long) alloc_contig_mem (MEM_CHUNK_SIZE,
+ MEM_DMA_LIMIT,
+ 0xffff, &pages);
+
+ assert (pages_free[i].start);
+ assert ((pages_free[i].start & 0xffff) == 0);
+
+ /* Sanity check: ensure pages are contiguous and within DMA limits. */
+ for (p = pages, j = 0; j < MEM_CHUNK_SIZE - PAGE_SIZE; j += PAGE_SIZE)
+ {
+ assert (p->phys_addr < MEM_DMA_LIMIT);
+ assert (p->phys_addr + PAGE_SIZE == (p + 1)->phys_addr);
+ p++;
+ }
+
+ pages_free[i].end = pages_free[i].start + MEM_CHUNK_SIZE;
+
+ /* Initialize free page bitmap. */
+ pages_free[i].bitmap = 0;
+ j = MEM_CHUNK_SIZE >> PAGE_SHIFT;
+ while (--j >= 0)
+ pages_free[i].bitmap |= 1 << j;
+ }
+
+ linux_mem_avail = (MEM_CHUNKS * MEM_CHUNK_SIZE) >> PAGE_SHIFT;
+}
+
+/* Return the number by which the page size should be
+ shifted such that the resulting value is >= SIZE. */
+static unsigned long
+get_page_order (int size)
+{
+ unsigned long order;
+
+ for (order = 0; (PAGE_SIZE << order) < size; order++)
+ ;
+ return order;
+}
+
+#ifdef LINUX_DEV_DEBUG
+static void
+check_page_list (int line)
+{
+ unsigned size;
+ struct pagehdr *ph;
+ struct blkhdr *bh;
+
+ for (ph = memlist; ph; ph = ph->next)
+ {
+ if ((int) ph & PAGE_MASK)
+ panic ("%s:%d: page header not aligned", __FILE__, line);
+
+ size = 0;
+ bh = (struct blkhdr *) (ph + 1);
+ while (bh < (struct blkhdr *) ((void *) ph + ph->size))
+ {
+ size += bh->size + sizeof (struct blkhdr);
+ bh = (void *) (bh + 1) + bh->size;
+ }
+
+ if (size + sizeof (struct pagehdr) != ph->size)
+ panic ("%s:%d: memory list destroyed", __FILE__, line);
+ }
+}
+#else
+#define check_page_list(line)
+#endif
+
+/* Merge adjacent free blocks in the memory list. */
+static void
+coalesce_blocks ()
+{
+ struct pagehdr *ph;
+ struct blkhdr *bh, *bhp, *ebh;
+
+ num_block_coalesce++;
+
+ for (ph = memlist; ph; ph = ph->next)
+ {
+ bh = (struct blkhdr *) (ph + 1);
+ ebh = (struct blkhdr *) ((void *) ph + ph->size);
+ while (1)
+ {
+ /* Skip busy blocks. */
+ while (bh < ebh && !bh->free)
+ bh = (struct blkhdr *) ((void *) (bh + 1) + bh->size);
+ if (bh == ebh)
+ break;
+
+ /* Merge adjacent free blocks. */
+ while (1)
+ {
+ bhp = (struct blkhdr *) ((void *) (bh + 1) + bh->size);
+ if (bhp == ebh)
+ {
+ bh = bhp;
+ break;
+ }
+ if (!bhp->free)
+ {
+ bh = (struct blkhdr *) ((void *) (bhp + 1) + bhp->size);
+ break;
+ }
+ bh->size += bhp->size + sizeof (struct blkhdr);
+ }
+ }
+ }
+}
+
+/* Allocate SIZE bytes of memory.
+ The PRIORITY parameter specifies various flags
+ such as DMA, atomicity, etc. It is not used by Mach. */
+void *
+linux_kmalloc (unsigned int size, int priority)
+{
+ int order, coalesced = 0;
+ unsigned long flags;
+ struct pagehdr *ph;
+ struct blkhdr *bh, *new_bh;
+
+ if (size < MIN_ALLOC)
+ size = MIN_ALLOC;
+ else
+ size = (size + sizeof (int) - 1) & ~(sizeof (int) - 1);
+
+ assert (size <= (MEM_CHUNK_SIZE
+ - sizeof (struct pagehdr)
+ - sizeof (struct blkhdr)));
+
+ save_flags (flags);
+ cli ();
+
+again:
+ check_page_list (__LINE__);
+
+ /* Walk the page list and find the first free block with size
+ greater than or equal to the one required. */
+ for (ph = memlist; ph; ph = ph->next)
+ {
+ bh = (struct blkhdr *) (ph + 1);
+ while (bh < (struct blkhdr *) ((void *) ph + ph->size))
+ {
+ if (bh->free && bh->size >= size)
+ {
+ bh->free = 0;
+ if (bh->size - size >= MIN_ALLOC + sizeof (struct blkhdr))
+ {
+ /* Split the current block and create a new free block. */
+ new_bh = (void *) (bh + 1) + size;
+ new_bh->free = 1;
+ new_bh->size = bh->size - size - sizeof (struct blkhdr);
+ bh->size = size;
+ }
+
+ check_page_list (__LINE__);
+
+ restore_flags (flags);
+ return bh + 1;
+ }
+ bh = (void *) (bh + 1) + bh->size;
+ }
+ }
+
+ check_page_list (__LINE__);
+
+ /* Allocation failed; coalesce free blocks and try again. */
+ if (!coalesced)
+ {
+ coalesce_blocks ();
+ coalesced = 1;
+ goto again;
+ }
+
+ /* Allocate more pages. */
+ order = get_page_order (size
+ + sizeof (struct pagehdr)
+ + sizeof (struct blkhdr));
+ ph = (struct pagehdr *) __get_free_pages (GFP_KERNEL, order, ~0UL);
+ if (!ph)
+ {
+ restore_flags (flags);
+ return NULL;
+ }
+
+ ph->size = PAGE_SIZE << order;
+ ph->next = memlist;
+ memlist = ph;
+ bh = (struct blkhdr *) (ph + 1);
+ bh->free = 0;
+ bh->size = ph->size - sizeof (struct pagehdr) - sizeof (struct blkhdr);
+ if (bh->size - size >= MIN_ALLOC + sizeof (struct blkhdr))
+ {
+ new_bh = (void *) (bh + 1) + size;
+ new_bh->free = 1;
+ new_bh->size = bh->size - size - sizeof (struct blkhdr);
+ bh->size = size;
+ }
+
+ check_page_list (__LINE__);
+
+ restore_flags (flags);
+ return bh + 1;
+}
+
+/* Free memory P previously allocated by linux_kmalloc. */
+void
+linux_kfree (void *p)
+{
+ unsigned long flags;
+ struct blkhdr *bh;
+ struct pagehdr *ph;
+
+ assert (((int) p & (sizeof (int) - 1)) == 0);
+
+ save_flags (flags);
+ cli ();
+
+ check_page_list (__LINE__);
+
+ for (ph = memlist; ph; ph = ph->next)
+ if (p >= (void *) ph && p < (void *) ph + ph->size)
+ break;
+
+ assert (ph);
+
+ bh = (struct blkhdr *) p - 1;
+
+ assert (!bh->free);
+ assert (bh->size >= MIN_ALLOC);
+ assert ((bh->size & (sizeof (int) - 1)) == 0);
+
+ bh->free = 1;
+
+ check_page_list (__LINE__);
+
+ restore_flags (flags);
+}
+
+/* Free any pages that are not in use.
+ Called by __get_free_pages when pages are running low. */
+static void
+collect_kmalloc_pages ()
+{
+ struct blkhdr *bh;
+ struct pagehdr *ph, **prev_ph;
+
+ check_page_list (__LINE__);
+
+ coalesce_blocks ();
+
+ check_page_list (__LINE__);
+
+ ph = memlist;
+ prev_ph = &memlist;
+ while (ph)
+ {
+ bh = (struct blkhdr *) (ph + 1);
+ if (bh->free && (void *) (bh + 1) + bh->size == (void *) ph + ph->size)
+ {
+ *prev_ph = ph->next;
+ free_pages ((unsigned long) ph, get_page_order (ph->size));
+ ph = *prev_ph;
+ }
+ else
+ {
+ prev_ph = &ph->next;
+ ph = ph->next;
+ }
+ }
+
+ check_page_list (__LINE__);
+}
+
+/* Allocate ORDER + 1 number of physically contiguous pages.
+ PRIORITY and DMA are not used in Mach.
+
+ XXX: This needs to be dynamic. To do that we need to make
+ the Mach page manipulation routines interrupt safe and they
+ must provide machine dependant hooks. */
+unsigned long
+__get_free_pages (int priority, unsigned long order, int dma)
+{
+ int i, pages_collected = 0;
+ unsigned bits, off, j, len;
+ unsigned long flags;
+
+ assert ((PAGE_SIZE << order) <= MEM_CHUNK_SIZE);
+
+ /* Construct bitmap of contiguous pages. */
+ bits = 0;
+ j = 0;
+ len = 0;
+ while (len < (PAGE_SIZE << order))
+ {
+ bits |= 1 << j++;
+ len += PAGE_SIZE;
+ }
+
+ save_flags (flags);
+ cli ();
+again:
+
+ /* Search each chunk for the required number of contiguous pages. */
+ for (i = 0; i < MEM_CHUNKS; i++)
+ {
+ off = 0;
+ j = bits;
+ while (MEM_CHUNK_SIZE - off >= (PAGE_SIZE << order))
+ {
+ if ((pages_free[i].bitmap & j) == j)
+ {
+ pages_free[i].bitmap &= ~j;
+ linux_mem_avail -= order + 1;
+ restore_flags (flags);
+ return pages_free[i].start + off;
+ }
+ j <<= 1;
+ off += PAGE_SIZE;
+ }
+ }
+
+ /* Allocation failed; collect kmalloc and buffer pages
+ and try again. */
+ if (!pages_collected)
+ {
+ num_page_collect++;
+ collect_kmalloc_pages ();
+ pages_collected = 1;
+ goto again;
+ }
+
+ printf ("%s:%d: __get_free_pages: ran out of pages\n", __FILE__, __LINE__);
+
+ restore_flags (flags);
+ return 0;
+}
+
+/* Free ORDER + 1 number of physically
+ contiguous pages starting at address ADDR. */
+void
+free_pages (unsigned long addr, unsigned long order)
+{
+ int i;
+ unsigned bits, len, j;
+ unsigned long flags;
+
+ assert ((addr & PAGE_MASK) == 0);
+
+ for (i = 0; i < MEM_CHUNKS; i++)
+ if (addr >= pages_free[i].start && addr < pages_free[i].end)
+ break;
+
+ assert (i < MEM_CHUNKS);
+
+ /* Contruct bitmap of contiguous pages. */
+ len = 0;
+ j = 0;
+ bits = 0;
+ while (len < (PAGE_SIZE << order))
+ {
+ bits |= 1 << j++;
+ len += PAGE_SIZE;
+ }
+ bits <<= (addr - pages_free[i].start) >> PAGE_SHIFT;
+
+ save_flags (flags);
+ cli ();
+
+ assert ((pages_free[i].bitmap & bits) == 0);
+
+ pages_free[i].bitmap |= bits;
+ linux_mem_avail += order + 1;
+ restore_flags (flags);
+}
+
+
+/* vmalloc management routines. */
+struct vmalloc_struct
+{
+ struct vmalloc_struct *prev;
+ struct vmalloc_struct *next;
+ vm_offset_t start;
+ vm_size_t size;
+};
+
+static struct vmalloc_struct
+vmalloc_list = { &vmalloc_list, &vmalloc_list, 0, 0 };
+
+static inline void
+vmalloc_list_insert (vm_offset_t start, vm_size_t size)
+{
+ struct vmalloc_struct *p;
+
+ p = (struct vmalloc_struct *) kalloc (sizeof (struct vmalloc_struct));
+ if (p == NULL)
+ panic ("kernel memory is exhausted");
+
+ p->prev = vmalloc_list.prev;
+ p->next = &vmalloc_list;
+ vmalloc_list.prev->next = p;
+ vmalloc_list.prev = p;
+
+ p->start = start;
+ p->size = size;
+}
+
+static struct vmalloc_struct *
+vmalloc_list_lookup (vm_offset_t start)
+{
+ struct vmalloc_struct *p;
+
+ for (p = vmalloc_list.next; p != &vmalloc_list; p = p->next)
+ {
+ if (p->start == start)
+ return p;
+ }
+
+ return NULL;
+}
+
+static inline void
+vmalloc_list_remove (struct vmalloc_struct *p)
+{
+ p->next->prev = p->prev;
+ p->prev->next = p->next;
+
+ kfree ((vm_offset_t) p, sizeof (struct vmalloc_struct));
+}
+
+/* Allocate SIZE bytes of memory. The pages need not be contiguous. */
+void *
+vmalloc (unsigned long size)
+{
+ kern_return_t ret;
+ vm_offset_t addr;
+
+ ret = kmem_alloc_wired (kernel_map, &addr, round_page (size));
+ if (ret != KERN_SUCCESS)
+ return NULL;
+
+ vmalloc_list_insert (addr, round_page (size));
+ return (void *) addr;
+}
+
+/* Free vmalloc'ed and vremap'ed virtual address space. */
+void
+vfree (void *addr)
+{
+ struct vmalloc_struct *p;
+
+ p = vmalloc_list_lookup ((vm_offset_t) addr);
+ if (!p)
+ panic ("vmalloc_list_lookup failure");
+
+ kmem_free (kernel_map, (vm_offset_t) addr, p->size);
+ vmalloc_list_remove (p);
+}
+
+unsigned long
+vmtophys (void *addr)
+{
+ return kvtophys((vm_offset_t) addr);
+}
+
+/* XXX: Quick hacking. */
+/* Remap physical address into virtual address. */
+
+#include <vm/pmap.h>
+
+void *
+vremap (unsigned long offset, unsigned long size)
+{
+ vm_offset_t addr;
+ kern_return_t ret;
+
+ assert(page_aligned(offset));
+
+ ret = kmem_valloc (kernel_map, &addr, round_page (size));
+ if (ret != KERN_SUCCESS)
+ return NULL;
+
+ (void) pmap_map_bd (addr, offset, offset + round_page (size),
+ VM_PROT_READ | VM_PROT_WRITE);
+
+ vmalloc_list_insert (addr, round_page (size));
+ return (void *) addr;
+}
diff --git a/linux/dev/glue/misc.c b/linux/dev/glue/misc.c
new file mode 100644
index 0000000..5646e5e
--- /dev/null
+++ b/linux/dev/glue/misc.c
@@ -0,0 +1,248 @@
+/*
+ * Miscellaneous routines and data for Linux emulation.
+ *
+ * Copyright (C) 1996 The University of Utah and the Computer Systems
+ * Laboratory at the University of Utah (CSL)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ */
+
+/*
+ * linux/fs/proc/scsi.c
+ * (c) 1995 Michael Neuffer neuffer@goofy.zdv.uni-mainz.de
+ *
+ * The original version was derived from linux/fs/proc/net.c,
+ * which is Copyright (C) 1991, 1992 Linus Torvalds.
+ * Much has been rewritten, but some of the code still remains.
+ *
+ * /proc/scsi directory handling functions
+ *
+ * last change: 95/07/04
+ *
+ * Initial version: March '95
+ * 95/05/15 Added subdirectories for each driver and show every
+ * registered HBA as a single file.
+ * 95/05/30 Added rudimentary write support for parameter passing
+ * 95/07/04 Fixed bugs in directory handling
+ * 95/09/13 Update to support the new proc-dir tree
+ *
+ * TODO: Improve support to write to the driver files
+ * Add some more comments
+ */
+
+/*
+ * linux/fs/buffer.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#include <sys/types.h>
+#include <mach/vm_param.h>
+#include <kern/thread.h>
+#include <kern/printf.h>
+#include <kern/mach_host.server.h>
+#include <vm/vm_map.h>
+#include <vm/vm_page.h>
+#include <device/device_types.h>
+
+#define MACH_INCLUDE
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/blk.h>
+#include <linux/proc_fs.h>
+#include <linux/kernel_stat.h>
+#include <linux/dev/glue/glue.h>
+
+int (*dispatch_scsi_info_ptr) (int ino, char *buffer, char **start,
+ off_t offset, int length, int inout) = 0;
+
+struct kernel_stat kstat;
+
+int
+linux_to_mach_error (int err)
+{
+ switch (err)
+ {
+ case 0:
+ return D_SUCCESS;
+
+ case -EPERM:
+ return D_INVALID_OPERATION;
+
+ case -EIO:
+ return D_IO_ERROR;
+
+ case -ENXIO:
+ return D_NO_SUCH_DEVICE;
+
+ case -EACCES:
+ return D_INVALID_OPERATION;
+
+ case -EFAULT:
+ return D_INVALID_SIZE;
+
+ case -EBUSY:
+ return D_ALREADY_OPEN;
+
+ case -EINVAL:
+ return D_INVALID_SIZE;
+
+ case -EROFS:
+ return D_READ_ONLY;
+
+ case -EWOULDBLOCK:
+ return D_WOULD_BLOCK;
+
+ case -ENOMEM:
+ return D_NO_MEMORY;
+
+ default:
+ printf ("linux_to_mach_error: unknown code %d\n", err);
+ return D_IO_ERROR;
+ }
+}
+
+int
+issig ()
+{
+ if (!current_thread())
+ return 0;
+ return current_thread ()->wait_result != THREAD_AWAKENED;
+}
+
+int
+block_fsync (struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+int
+verify_area (int rw, const void *p, unsigned long size)
+{
+ vm_prot_t prot = (rw == VERIFY_WRITE) ? VM_PROT_WRITE : VM_PROT_READ;
+ vm_offset_t addr = trunc_page ((vm_offset_t) p);
+ vm_size_t len = round_page ((vm_size_t) size);
+ vm_map_entry_t entry;
+
+ vm_map_lock_read (current_map ());
+
+ while (1)
+ {
+ if (!vm_map_lookup_entry (current_map (), addr, &entry)
+ || (entry->protection & prot) != prot)
+ {
+ vm_map_unlock_read (current_map ());
+ return -EFAULT;
+ }
+ if (entry->vme_end - entry->vme_start >= len)
+ break;
+ len -= entry->vme_end - entry->vme_start;
+ addr += entry->vme_end - entry->vme_start;
+ }
+
+ vm_map_unlock_read (current_map ());
+ return 0;
+}
+
+/*
+ * Print device name (in decimal, hexadecimal or symbolic) -
+ * at present hexadecimal only.
+ * Note: returns pointer to static data!
+ */
+char *
+kdevname (kdev_t dev)
+{
+ static char buffer[32];
+ linux_sprintf (buffer, "%02x:%02x", MAJOR (dev), MINOR (dev));
+ return buffer;
+}
+
+/* RO fail safe mechanism */
+
+static long ro_bits[MAX_BLKDEV][8];
+
+int
+is_read_only (kdev_t dev)
+{
+ int minor, major;
+
+ major = MAJOR (dev);
+ minor = MINOR (dev);
+ if (major < 0 || major >= MAX_BLKDEV)
+ return 0;
+ return ro_bits[major][minor >> 5] & (1 << (minor & 31));
+}
+
+void
+set_device_ro (kdev_t dev, int flag)
+{
+ int minor, major;
+
+ major = MAJOR (dev);
+ minor = MINOR (dev);
+ if (major < 0 || major >= MAX_BLKDEV)
+ return;
+ if (flag)
+ ro_bits[major][minor >> 5] |= 1 << (minor & 31);
+ else
+ ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
+}
+
+struct proc_dir_entry proc_scsi;
+struct inode_operations proc_scsi_inode_operations;
+struct proc_dir_entry proc_net;
+struct inode_operations proc_net_inode_operations;
+
+int
+proc_register (struct proc_dir_entry *xxx1, struct proc_dir_entry *xxx2)
+{
+ return 0;
+}
+
+int
+proc_unregister (struct proc_dir_entry *xxx1, int xxx2)
+{
+ return 0;
+}
+
+void
+add_blkdev_randomness (int major)
+{
+}
+
+void
+do_gettimeofday (struct timeval *tv)
+{
+ /*
+ * XXX: The first argument should be mach_host_self (), but that's too
+ * expensive, and the host argument is not used by host_get_time (),
+ * only checked not to be HOST_NULL.
+ */
+ time_value64_t tv64;
+ host_get_time64 ((host_t) 1, &tv64);
+ tv->tv_sec = tv64.seconds;
+ tv->tv_usec = tv64.nanoseconds / 1000;
+}
+
+int
+dev_get_info (char *buffer, char **start, off_t offset, int length, int dummy)
+{
+ return 0;
+}
diff --git a/linux/dev/glue/net.c b/linux/dev/glue/net.c
new file mode 100644
index 0000000..dd80622
--- /dev/null
+++ b/linux/dev/glue/net.c
@@ -0,0 +1,670 @@
+/*
+ * Linux network driver support.
+ *
+ * Copyright (C) 1996 The University of Utah and the Computer Systems
+ * Laboratory at the University of Utah (CSL)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ */
+
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Ethernet-type device handling.
+ *
+ * Version: @(#)eth.c 1.0.7 05/25/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Mark Evans, <evansmp@uhura.aston.ac.uk>
+ * Florian La Roche, <rzsfl@rz.uni-sb.de>
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ *
+ * Fixes:
+ * Mr Linux : Arp problems
+ * Alan Cox : Generic queue tidyup (very tiny here)
+ * Alan Cox : eth_header ntohs should be htons
+ * Alan Cox : eth_rebuild_header missing an htons and
+ * minor other things.
+ * Tegge : Arp bug fixes.
+ * Florian : Removed many unnecessary functions, code cleanup
+ * and changes for new arp and skbuff.
+ * Alan Cox : Redid header building to reflect new format.
+ * Alan Cox : ARP only when compiled with CONFIG_INET
+ * Greg Page : 802.2 and SNAP stuff.
+ * Alan Cox : MAC layer pointers/new format.
+ * Paul Gortmaker : eth_copy_and_sum shouldn't csum padding.
+ * Alan Cox : Protect against forwarding explosions with
+ * older network drivers and IFF_ALLMULTI
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <sys/types.h>
+#include <machine/spl.h>
+#include <machine/vm_param.h>
+
+#include <mach/mach_types.h>
+#include <mach/kern_return.h>
+#include <mach/mig_errors.h>
+#include <mach/port.h>
+#include <mach/vm_param.h>
+#include <mach/notify.h>
+
+#include <kern/kalloc.h>
+#include <kern/printf.h>
+
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_space.h>
+
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+
+#include <device/device_types.h>
+#include <device/device_port.h>
+#include <device/if_hdr.h>
+#include <device/if_ether.h>
+#include <device/if_hdr.h>
+#include <device/net_io.h>
+#include <device/device_reply.user.h>
+#include <device/device_emul.h>
+#include <device/ds_routines.h>
+
+#define MACH_INCLUDE
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/malloc.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+
+#include <linux/dev/glue/glue.h>
+
+/* One of these is associated with each instance of a device. */
+struct net_data
+{
+ ipc_port_t port; /* device port */
+ struct ifnet ifnet; /* Mach ifnet structure (needed for filters) */
+ struct device device; /* generic device structure */
+ struct linux_device *dev; /* Linux network device structure */
+};
+
+/* List of sk_buffs waiting to be freed. */
+static struct sk_buff_head skb_done_list;
+
+/* Forward declarations. */
+
+extern struct device_emulation_ops linux_net_emulation_ops;
+
+static int print_packet_size = 0;
+
+/* Linux kernel network support routines. */
+
+/* Requeue packet SKB for transmission after the interface DEV
+ has timed out. The priority of the packet is PRI.
+ In Mach, we simply drop the packet like the native drivers. */
+void
+dev_queue_xmit (struct sk_buff *skb, struct linux_device *dev, int pri)
+{
+ dev_kfree_skb (skb, FREE_WRITE);
+}
+
+/* Close the device DEV. */
+int
+dev_close (struct linux_device *dev)
+{
+ return 0;
+}
+
+/* Network software interrupt handler. */
+void
+net_bh (void)
+{
+ int len;
+ struct sk_buff *skb;
+ struct linux_device *dev;
+
+ /* Start transmission on interfaces. */
+ for (dev = dev_base; dev; dev = dev->next)
+ {
+ if (dev->base_addr && dev->base_addr != 0xffe0)
+ while (1)
+ {
+ skb = skb_dequeue (&dev->buffs[0]);
+ if (skb)
+ {
+ len = skb->len;
+ if ((*dev->hard_start_xmit) (skb, dev))
+ {
+ skb_queue_head (&dev->buffs[0], skb);
+ mark_bh (NET_BH);
+ break;
+ }
+ else if (print_packet_size)
+ printf ("net_bh: length %d\n", len);
+ }
+ else
+ break;
+ }
+ }
+}
+
+/* Free all sk_buffs on the done list.
+ This routine is called by the iodone thread in ds_routines.c. */
+void
+free_skbuffs ()
+{
+ struct sk_buff *skb;
+
+ while (1)
+ {
+ skb = skb_dequeue (&skb_done_list);
+ if (skb)
+ {
+ if (skb->copy)
+ {
+ vm_map_copy_discard (skb->copy);
+ skb->copy = NULL;
+ }
+ if (IP_VALID (skb->reply))
+ {
+ ds_device_write_reply (skb->reply, skb->reply_type, 0, skb->len);
+ skb->reply = IP_NULL;
+ }
+ dev_kfree_skb (skb, FREE_WRITE);
+ }
+ else
+ break;
+ }
+}
+
+/* Allocate an sk_buff with SIZE bytes of data space. */
+struct sk_buff *
+alloc_skb (unsigned int size, int priority)
+{
+ return dev_alloc_skb (size);
+}
+
+/* Free SKB. */
+void
+kfree_skb (struct sk_buff *skb, int priority)
+{
+ dev_kfree_skb (skb, priority);
+}
+
+/* Allocate an sk_buff with SIZE bytes of data space. */
+struct sk_buff *
+dev_alloc_skb (unsigned int size)
+{
+ struct sk_buff *skb;
+ unsigned char *bptr;
+ int len = size;
+
+ size = (size + 15) & ~15;
+ size += sizeof (struct sk_buff);
+
+ bptr = linux_kmalloc (size, GFP_KERNEL);
+ if (bptr == NULL)
+ return NULL;
+
+ /* XXX: In Mach, a sk_buff is located at the head,
+ while it's located at the tail in Linux. */
+ skb = bptr;
+ skb->dev = NULL;
+ skb->reply = IP_NULL;
+ skb->copy = NULL;
+ skb->len = 0;
+ skb->prev = skb->next = NULL;
+ skb->list = NULL;
+ skb->data = bptr + sizeof (struct sk_buff);
+ skb->tail = skb->data;
+ skb->head = skb->data;
+ skb->end = skb->data + len;
+
+ return skb;
+}
+
+/* Free the sk_buff SKB. */
+void
+dev_kfree_skb (struct sk_buff *skb, int mode)
+{
+ unsigned flags;
+
+ /* Queue sk_buff on done list if there is a
+ page list attached or we need to send a reply.
+ Wakeup the iodone thread to process the list. */
+ if (skb->copy || IP_VALID (skb->reply))
+ {
+ skb_queue_tail (&skb_done_list, skb);
+ save_flags (flags);
+ thread_wakeup ((event_t) & io_done_list);
+ restore_flags (flags);
+ return;
+ }
+ linux_kfree (skb);
+}
+
+/* Accept packet SKB received on an interface. */
+void
+netif_rx (struct sk_buff *skb)
+{
+ ipc_kmsg_t kmsg;
+ struct ether_header *eh;
+ struct packet_header *ph;
+ struct linux_device *dev = skb->dev;
+
+ assert (skb != NULL);
+
+ if (print_packet_size)
+ printf ("netif_rx: length %ld\n", skb->len);
+
+ /* Allocate a kernel message buffer. */
+ kmsg = net_kmsg_get ();
+ if (!kmsg)
+ {
+ dev_kfree_skb (skb, FREE_READ);
+ return;
+ }
+
+ /* Copy packet into message buffer. */
+ eh = (struct ether_header *) (net_kmsg (kmsg)->header);
+ ph = (struct packet_header *) (net_kmsg (kmsg)->packet);
+ memcpy (eh, skb->data, sizeof (struct ether_header));
+
+ /* packet is prefixed with a struct packet_header,
+ see include/device/net_status.h. */
+ memcpy (ph + 1, skb->data + sizeof (struct ether_header),
+ skb->len - sizeof (struct ether_header));
+ ph->type = eh->ether_type;
+ ph->length = (skb->len - sizeof (struct ether_header)
+ + sizeof (struct packet_header));
+
+ dev_kfree_skb (skb, FREE_READ);
+
+ net_kmsg(kmsg)->sent = FALSE; /* Mark packet as received. */
+
+ /* Pass packet up to the microkernel. */
+ net_packet (&dev->net_data->ifnet, kmsg,
+ ph->length, ethernet_priority (kmsg));
+}
+
+/* Mach device interface routines. */
+
+/* Return a send right associated with network device ND. */
+static ipc_port_t
+dev_to_port (void *nd)
+{
+ return (nd
+ ? ipc_port_make_send (((struct net_data *) nd)->port)
+ : IP_NULL);
+}
+
+static io_return_t
+device_open (ipc_port_t reply_port, mach_msg_type_name_t reply_port_type,
+ dev_mode_t mode, char *name, device_t *devp)
+{
+ io_return_t err = D_SUCCESS;
+ ipc_port_t notify;
+ struct ifnet *ifp;
+ struct linux_device *dev;
+ struct net_data *nd;
+
+ /* Search for the device. */
+ for (dev = dev_base; dev; dev = dev->next)
+ if (dev->base_addr
+ && dev->base_addr != 0xffe0
+ && !strcmp (name, dev->name))
+ break;
+ if (!dev)
+ return D_NO_SUCH_DEVICE;
+
+ /* Allocate and initialize device data if this is the first open. */
+ nd = dev->net_data;
+ if (!nd)
+ {
+ dev->net_data = nd = ((struct net_data *)
+ kalloc (sizeof (struct net_data)));
+ if (!nd)
+ {
+ err = D_NO_MEMORY;
+ goto out;
+ }
+ nd->dev = dev;
+ nd->device.emul_data = nd;
+ nd->device.emul_ops = &linux_net_emulation_ops;
+ nd->port = ipc_port_alloc_kernel ();
+ if (nd->port == IP_NULL)
+ {
+ err = KERN_RESOURCE_SHORTAGE;
+ goto out;
+ }
+ ipc_kobject_set (nd->port, (ipc_kobject_t) & nd->device, IKOT_DEVICE);
+ notify = ipc_port_make_sonce (nd->port);
+ ip_lock (nd->port);
+ ipc_port_nsrequest (nd->port, 1, notify, &notify);
+ assert (notify == IP_NULL);
+
+ ifp = &nd->ifnet;
+ ifp->if_unit = dev->name[strlen (dev->name) - 1] - '0';
+ ifp->if_flags = IFF_UP | IFF_RUNNING;
+ ifp->if_mtu = dev->mtu;
+ ifp->if_header_size = dev->hard_header_len;
+ ifp->if_header_format = dev->type;
+ ifp->if_address_size = dev->addr_len;
+ ifp->if_address = dev->dev_addr;
+ if_init_queues (ifp);
+
+ if (dev->open)
+ {
+ if ((*dev->open) (dev))
+ err = D_NO_SUCH_DEVICE;
+ }
+
+ out:
+ if (err)
+ {
+ if (nd)
+ {
+ if (nd->port != IP_NULL)
+ {
+ ipc_kobject_set (nd->port, IKO_NULL, IKOT_NONE);
+ ipc_port_dealloc_kernel (nd->port);
+ }
+ kfree ((vm_offset_t) nd, sizeof (struct net_data));
+ nd = NULL;
+ dev->net_data = NULL;
+ }
+ }
+ else
+ {
+ /* IPv6 heavily relies on multicasting (especially router and
+ neighbor solicits and advertisements), so enable reception of
+ those multicast packets by setting `LINUX_IFF_ALLMULTI'. */
+ dev->flags |= LINUX_IFF_UP | LINUX_IFF_RUNNING | LINUX_IFF_ALLMULTI;
+ skb_queue_head_init (&dev->buffs[0]);
+
+ if (dev->set_multicast_list)
+ dev->set_multicast_list (dev);
+ }
+ if (IP_VALID (reply_port))
+ ds_device_open_reply (reply_port, reply_port_type,
+ err, dev_to_port (nd));
+ return MIG_NO_REPLY;
+ }
+
+ *devp = &nd->device;
+ return D_SUCCESS;
+}
+
+static io_return_t
+device_write (void *d, ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type, dev_mode_t mode,
+ recnum_t bn, io_buf_ptr_t data, unsigned int count,
+ int *bytes_written)
+{
+ int s;
+ vm_map_copy_t copy = (vm_map_copy_t) data;
+ char *map_data;
+ vm_offset_t map_addr;
+ vm_size_t map_size;
+ struct net_data *nd = d;
+ struct linux_device *dev = nd->dev;
+ struct sk_buff *skb;
+ kern_return_t kr;
+
+ if (count == 0 || count > dev->mtu + dev->hard_header_len)
+ return D_INVALID_SIZE;
+
+ /* Allocate a sk_buff. */
+ skb = dev_alloc_skb (count);
+ if (!skb)
+ return D_NO_MEMORY;
+
+ /* Map user data. */
+ kr = kmem_io_map_copyout(device_io_map, (vm_offset_t *)&map_data,
+ &map_addr, &map_size, copy, count);
+
+ if (kr) {
+ dev_kfree_skb (skb, FREE_WRITE);
+ return D_NO_MEMORY;
+ }
+
+ /* XXX The underlying physical pages of the mapping could be highmem,
+ for which drivers require the use of a bounce buffer. */
+ memcpy (skb->data, map_data, count);
+ kmem_io_map_deallocate (device_io_map, map_addr, map_size);
+ vm_map_copy_discard (copy);
+
+ skb->len = count;
+ skb->head = skb->data;
+ skb->tail = skb->data + skb->len;
+ skb->end = skb->tail;
+ skb->dev = dev;
+ skb->reply = reply_port;
+ skb->reply_type = reply_port_type;
+
+ /* Queue packet for transmission and schedule a software interrupt. */
+ s = splimp ();
+ if (dev->buffs[0].next != (struct sk_buff *) &dev->buffs[0]
+ || (*dev->hard_start_xmit) (skb, dev))
+ {
+ __skb_queue_tail (&dev->buffs[0], skb);
+ mark_bh (NET_BH);
+ }
+ splx (s);
+
+ /* Send packet to filters. */
+ {
+ struct packet_header *packet;
+ struct ether_header *header;
+ ipc_kmsg_t kmsg;
+
+ kmsg = net_kmsg_get ();
+
+ if (kmsg != IKM_NULL)
+ {
+ /* Suitable for Ethernet only. */
+ header = (struct ether_header *) (net_kmsg (kmsg)->header);
+ packet = (struct packet_header *) (net_kmsg (kmsg)->packet);
+ memcpy (header, skb->data, sizeof (struct ether_header));
+
+ /* packet is prefixed with a struct packet_header,
+ see include/device/net_status.h. */
+ memcpy (packet + 1, skb->data + sizeof (struct ether_header),
+ skb->len - sizeof (struct ether_header));
+ packet->length = skb->len - sizeof (struct ether_header)
+ + sizeof (struct packet_header);
+ packet->type = header->ether_type;
+ net_kmsg (kmsg)->sent = TRUE; /* Mark packet as sent. */
+ s = splimp ();
+ net_packet (&dev->net_data->ifnet, kmsg, packet->length,
+ ethernet_priority (kmsg));
+ splx (s);
+ }
+ }
+
+ return MIG_NO_REPLY;
+}
+
+
+static io_return_t
+device_get_status (void *d, dev_flavor_t flavor, dev_status_t status,
+ mach_msg_type_number_t *count)
+{
+ if (flavor == NET_FLAGS)
+ {
+ struct net_data *net = (struct net_data *) d;
+
+ if (*count != 1)
+ return D_INVALID_SIZE;
+
+ status[0] = net->dev->flags;
+ return D_SUCCESS;
+ }
+
+ if(flavor >= SIOCIWFIRST && flavor <= SIOCIWLAST)
+ {
+ /* handle wireless ioctl */
+ if(! IW_IS_GET(flavor))
+ return D_INVALID_OPERATION;
+
+ if(*count * sizeof(int) < sizeof(struct ifreq))
+ return D_INVALID_OPERATION;
+
+ struct net_data *nd = d;
+ struct linux_device *dev = nd->dev;
+
+ if(! dev->do_ioctl)
+ return D_INVALID_OPERATION;
+
+ int result;
+
+ if (flavor == SIOCGIWRANGE || flavor == SIOCGIWENCODE
+ || flavor == SIOCGIWESSID || flavor == SIOCGIWNICKN
+ || flavor == SIOCGIWSPY)
+ {
+ /*
+ * These ioctls require an `iw_point' as their argument (i.e.
+ * they want to return some data to userspace.
+ * Therefore supply some sane values and carry the data back
+ * to userspace right behind the `struct iwreq'.
+ */
+ struct iw_point *iwp = &((struct iwreq *) status)->u.data;
+ iwp->length = *count * sizeof (dev_status_t) - sizeof (struct ifreq);
+ iwp->pointer = (void *) status + sizeof (struct ifreq);
+
+ result = dev->do_ioctl (dev, (struct ifreq *) status, flavor);
+
+ *count = ((sizeof (struct ifreq) + iwp->length)
+ / sizeof (dev_status_t));
+ if (iwp->length % sizeof (dev_status_t))
+ (*count) ++;
+ }
+ else
+ {
+ *count = sizeof(struct ifreq) / sizeof(int);
+ result = dev->do_ioctl(dev, (struct ifreq *) status, flavor);
+ }
+
+ return result ? D_IO_ERROR : D_SUCCESS;
+ }
+ else
+ {
+ /* common get_status request */
+ return net_getstat (&((struct net_data *) d)->ifnet, flavor,
+ status, count);
+ }
+}
+
+
+static io_return_t
+device_set_status(void *d, dev_flavor_t flavor, dev_status_t status,
+ mach_msg_type_number_t count)
+{
+ if (flavor == NET_FLAGS)
+ {
+ if (count != 1)
+ return D_INVALID_SIZE;
+
+ short flags = status[0];
+ struct net_data *net = (struct net_data *) d;
+
+ dev_change_flags (net->dev, flags);
+
+ /* Change the flags of the Mach device, too. */
+ net->ifnet.if_flags = net->dev->flags;
+ return D_SUCCESS;
+ }
+
+ if(flavor < SIOCIWFIRST || flavor > SIOCIWLAST)
+ return D_INVALID_OPERATION;
+
+ if(! IW_IS_SET(flavor))
+ return D_INVALID_OPERATION;
+
+ if(count * sizeof(int) < sizeof(struct ifreq))
+ return D_INVALID_OPERATION;
+
+ struct net_data *nd = d;
+ struct linux_device *dev = nd->dev;
+
+ if(! dev->do_ioctl)
+ return D_INVALID_OPERATION;
+
+ if((flavor == SIOCSIWENCODE || flavor == SIOCSIWESSID
+ || flavor == SIOCSIWNICKN || flavor == SIOCSIWSPY)
+ && ((struct iwreq *) status)->u.data.pointer)
+ {
+ struct iw_point *iwp = &((struct iwreq *) status)->u.data;
+
+ /* safety check whether the status array is long enough ... */
+ if(count * sizeof(int) < sizeof(struct ifreq) + iwp->length)
+ return D_INVALID_OPERATION;
+
+ /* make sure, iwp->pointer points to the correct address */
+ if(iwp->pointer) iwp->pointer = (void *) status + sizeof(struct ifreq);
+ }
+
+ int result = dev->do_ioctl(dev, (struct ifreq *) status, flavor);
+ return result ? D_IO_ERROR : D_SUCCESS;
+}
+
+
+static io_return_t
+device_set_filter (void *d, ipc_port_t port, int priority,
+ filter_t * filter, unsigned filter_count)
+{
+ return net_set_filter (&((struct net_data *) d)->ifnet,
+ port, priority, filter, filter_count);
+}
+
+struct device_emulation_ops linux_net_emulation_ops =
+{
+ NULL,
+ NULL,
+ dev_to_port,
+ device_open,
+ NULL,
+ device_write,
+ NULL,
+ NULL,
+ NULL,
+ device_set_status,
+ device_get_status,
+ device_set_filter,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+
+/* Do any initialization required for network devices. */
+void
+linux_net_emulation_init ()
+{
+ skb_queue_head_init (&skb_done_list);
+}
diff --git a/linux/dev/include/ahci.h b/linux/dev/include/ahci.h
new file mode 100644
index 0000000..31977b6
--- /dev/null
+++ b/linux/dev/include/ahci.h
@@ -0,0 +1,268 @@
+#ifndef _GNUMACH_AHCI_H
+#define _GNUMACH_AHCI_H
+extern void ahci_probe_pci(void);
+
+/* From linux 3.9's drivers/ata/ahci.h */
+
+/*
+ * ahci.h - Common AHCI SATA definitions and declarations
+ *
+ * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Please ALWAYS copy linux-ide@vger.kernel.org
+ * on emails.
+ *
+ * Copyright 2004-2005 Red Hat, Inc.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * AHCI hardware documentation:
+ * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
+ * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
+ *
+ */
+
+enum {
+ AHCI_MAX_PORTS = 32,
+ AHCI_MAX_SG = 168, /* hardware max is 64K */
+ AHCI_DMA_BOUNDARY = 0xffffffff,
+ AHCI_MAX_CMDS = 32,
+ AHCI_CMD_SZ = 32,
+ AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
+ AHCI_RX_FIS_SZ = 256,
+ AHCI_CMD_TBL_CDB = 0x40,
+ AHCI_CMD_TBL_HDR_SZ = 0x80,
+ AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
+ AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
+ AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
+ AHCI_RX_FIS_SZ,
+ AHCI_PORT_PRIV_FBS_DMA_SZ = AHCI_CMD_SLOT_SZ +
+ AHCI_CMD_TBL_AR_SZ +
+ (AHCI_RX_FIS_SZ * 16),
+ AHCI_IRQ_ON_SG = (1 << 31),
+ AHCI_CMD_ATAPI = (1 << 5),
+ AHCI_CMD_WRITE = (1 << 6),
+ AHCI_CMD_PREFETCH = (1 << 7),
+ AHCI_CMD_RESET = (1 << 8),
+ AHCI_CMD_CLR_BUSY = (1 << 10),
+
+ RX_FIS_PIO_SETUP = 0x20, /* offset of PIO Setup FIS data */
+ RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
+ RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
+ RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
+
+ /* global controller registers */
+ HOST_CAP = 0x00, /* host capabilities */
+ HOST_CTL = 0x04, /* global host control */
+ HOST_IRQ_STAT = 0x08, /* interrupt status */
+ HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
+ HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
+ HOST_EM_LOC = 0x1c, /* Enclosure Management location */
+ HOST_EM_CTL = 0x20, /* Enclosure Management Control */
+ HOST_CAP2 = 0x24, /* host capabilities, extended */
+
+ /* HOST_CTL bits */
+ HOST_RESET = (1 << 0), /* reset controller; self-clear */
+ HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
+ HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
+
+ /* HOST_CAP bits */
+ HOST_CAP_SXS = (1 << 5), /* Supports External SATA */
+ HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
+ HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */
+ HOST_CAP_PART = (1 << 13), /* Partial state capable */
+ HOST_CAP_SSC = (1 << 14), /* Slumber state capable */
+ HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */
+ HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */
+ HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
+ HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */
+ HOST_CAP_CLO = (1 << 24), /* Command List Override support */
+ HOST_CAP_LED = (1 << 25), /* Supports activity LED */
+ HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
+ HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
+ HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */
+ HOST_CAP_SNTF = (1 << 29), /* SNotification register */
+ HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
+ HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
+
+ /* HOST_CAP2 bits */
+ HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */
+ HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */
+ HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */
+ HOST_CAP2_SDS = (1 << 3), /* Support device sleep */
+ HOST_CAP2_SADM = (1 << 4), /* Support aggressive DevSlp */
+ HOST_CAP2_DESO = (1 << 5), /* DevSlp from slumber only */
+
+ /* registers for each SATA port */
+ PORT_LST_ADDR = 0x00, /* command list DMA addr */
+ PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
+ PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
+ PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
+ PORT_IRQ_STAT = 0x10, /* interrupt status */
+ PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
+ PORT_CMD = 0x18, /* port command */
+ PORT_TFDATA = 0x20, /* taskfile data */
+ PORT_SIG = 0x24, /* device TF signature */
+ PORT_CMD_ISSUE = 0x38, /* command issue */
+ PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
+ PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
+ PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
+ PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
+ PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
+ PORT_FBS = 0x40, /* FIS-based Switching */
+ PORT_DEVSLP = 0x44, /* device sleep */
+
+ /* PORT_IRQ_{STAT,MASK} bits */
+ PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
+ PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
+ PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
+ PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
+ PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
+ PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
+ PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
+ PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
+
+ PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
+ PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
+ PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
+ PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
+ PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
+ PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
+ PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
+ PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
+ PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
+
+ PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
+ PORT_IRQ_IF_ERR |
+ PORT_IRQ_CONNECT |
+ PORT_IRQ_PHYRDY |
+ PORT_IRQ_UNK_FIS |
+ PORT_IRQ_BAD_PMP,
+ PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
+ PORT_IRQ_TF_ERR |
+ PORT_IRQ_HBUS_DATA_ERR,
+ DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
+ PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
+ PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
+
+ /* PORT_CMD bits */
+ PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
+ PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
+ PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
+ PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */
+ PORT_CMD_PMP = (1 << 17), /* PMP attached */
+ PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
+ PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
+ PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
+ PORT_CMD_CLO = (1 << 3), /* Command list override */
+ PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
+ PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
+ PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
+
+ PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
+ PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
+ PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
+ PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
+
+ /* PORT_FBS bits */
+ PORT_FBS_DWE_OFFSET = 16, /* FBS device with error offset */
+ PORT_FBS_ADO_OFFSET = 12, /* FBS active dev optimization offset */
+ PORT_FBS_DEV_OFFSET = 8, /* FBS device to issue offset */
+ PORT_FBS_DEV_MASK = (0xf << PORT_FBS_DEV_OFFSET), /* FBS.DEV */
+ PORT_FBS_SDE = (1 << 2), /* FBS single device error */
+ PORT_FBS_DEC = (1 << 1), /* FBS device error clear */
+ PORT_FBS_EN = (1 << 0), /* Enable FBS */
+
+ /* PORT_DEVSLP bits */
+ PORT_DEVSLP_DM_OFFSET = 25, /* DITO multiplier offset */
+ PORT_DEVSLP_DM_MASK = (0xf << 25), /* DITO multiplier mask */
+ PORT_DEVSLP_DITO_OFFSET = 15, /* DITO offset */
+ PORT_DEVSLP_MDAT_OFFSET = 10, /* Minimum assertion time */
+ PORT_DEVSLP_DETO_OFFSET = 2, /* DevSlp exit timeout */
+ PORT_DEVSLP_DSP = (1 << 1), /* DevSlp present */
+ PORT_DEVSLP_ADSE = (1 << 0), /* Aggressive DevSlp enable */
+
+ /* hpriv->flags bits */
+
+#define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
+
+ AHCI_HFLAG_NO_NCQ = (1 << 0),
+ AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
+ AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
+ AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
+ AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
+ AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
+ AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
+ AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
+ AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
+ AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
+ AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as
+ link offline */
+ AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
+ AHCI_HFLAG_NO_FPDMA_AA = (1 << 13), /* no FPDMA AA */
+ AHCI_HFLAG_YES_FBS = (1 << 14), /* force FBS cap on */
+ AHCI_HFLAG_DELAY_ENGINE = (1 << 15), /* do not start engine on
+ port start (wait until
+ error-handling stage) */
+ AHCI_HFLAG_MULTI_MSI = (1 << 16), /* multiple PCI MSIs */
+
+ /* ap->flags bits */
+
+ /*
+ AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
+ ATA_FLAG_ACPI_SATA | ATA_FLAG_AN,
+ */
+
+ ICH_MAP = 0x90, /* ICH MAP register */
+
+ /* em constants */
+ EM_MAX_SLOTS = 8,
+ EM_MAX_RETRY = 5,
+
+ /* em_ctl bits */
+ EM_CTL_RST = (1 << 9), /* Reset */
+ EM_CTL_TM = (1 << 8), /* Transmit Message */
+ EM_CTL_MR = (1 << 0), /* Message Received */
+ EM_CTL_ALHD = (1 << 26), /* Activity LED */
+ EM_CTL_XMT = (1 << 25), /* Transmit Only */
+ EM_CTL_SMB = (1 << 24), /* Single Message Buffer */
+ EM_CTL_SGPIO = (1 << 19), /* SGPIO messages supported */
+ EM_CTL_SES = (1 << 18), /* SES-2 messages supported */
+ EM_CTL_SAFTE = (1 << 17), /* SAF-TE messages supported */
+ EM_CTL_LED = (1 << 16), /* LED messages supported */
+
+ /* em message type */
+ EM_MSG_TYPE_LED = (1 << 0), /* LED */
+ EM_MSG_TYPE_SAFTE = (1 << 1), /* SAF-TE */
+ EM_MSG_TYPE_SES2 = (1 << 2), /* SES-2 */
+ EM_MSG_TYPE_SGPIO = (1 << 3), /* SGPIO */
+
+ FIS_TYPE_REG_H2D = 0x27,
+ FIS_TYPE_REG_D2H = 0x34,
+ FIS_TYPE_DMA_ACT = 0x39,
+ FIS_TYPE_DMA_SETUP = 0x41,
+ FIS_TYPE_DATA = 0x46,
+ FIS_TYPE_BIST = 0x58,
+ FIS_TYPE_PIO_SETUP = 0x5F,
+ FIS_TYPE_DEV_BITS = 0xA1,
+};
+
+/* End from linux 3.9 */
+
+#endif /* _GNUMACH_AHCI_H */
diff --git a/linux/dev/include/asm-i386/page.h b/linux/dev/include/asm-i386/page.h
new file mode 100644
index 0000000..be81848
--- /dev/null
+++ b/linux/dev/include/asm-i386/page.h
@@ -0,0 +1,59 @@
+#ifndef _I386_PAGE_H
+#define _I386_PAGE_H
+
+#include <mach/vm_param.h>
+
+#ifdef __KERNEL__
+
+#define STRICT_MM_TYPECHECKS
+
+#ifdef STRICT_MM_TYPECHECKS
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { unsigned long pte; } pte_t;
+typedef struct { unsigned long pmd; } pmd_t;
+typedef struct { unsigned long pgd; } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+
+#define pte_val(x) ((x).pte)
+#define pmd_val(x) ((x).pmd)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pmd(x) ((pmd_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+#else
+/*
+ * .. while these make it easier on the compiler
+ */
+typedef unsigned long pte_t;
+typedef unsigned long pmd_t;
+typedef unsigned long pgd_t;
+typedef unsigned long pgprot_t;
+
+#define pte_val(x) (x)
+#define pmd_val(x) (x)
+#define pgd_val(x) (x)
+#define pgprot_val(x) (x)
+
+#define __pte(x) (x)
+#define __pmd(x) (x)
+#define __pgd(x) (x)
+#define __pgprot(x) (x)
+
+#endif
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+/* This handles the memory map.. */
+#define PAGE_OFFSET 0
+#define MAP_NR(addr) (((unsigned long)(addr)) >> PAGE_SHIFT)
+
+#endif /* __KERNEL__ */
+
+#endif /* _I386_PAGE_H */
diff --git a/linux/dev/include/asm-i386/smp.h b/linux/dev/include/asm-i386/smp.h
new file mode 100644
index 0000000..fabe01d
--- /dev/null
+++ b/linux/dev/include/asm-i386/smp.h
@@ -0,0 +1,8 @@
+#ifndef _I386_SMP_H
+#define _I386_SMP_H
+
+#include <machine/cpu_number.h>
+
+#define smp_processor_id() cpu_number()
+
+#endif /* _I386_SMP_H */
diff --git a/linux/dev/include/asm-i386/string.h b/linux/dev/include/asm-i386/string.h
new file mode 100644
index 0000000..f41ca5c
--- /dev/null
+++ b/linux/dev/include/asm-i386/string.h
@@ -0,0 +1,487 @@
+#ifndef _I386_STRING_H_
+#define _I386_STRING_H_
+
+/*
+ * On a 486 or Pentium, we are better off not using the
+ * byte string operations. But on a 386 or a PPro the
+ * byte string ops are faster than doing it by hand
+ * (MUCH faster on a Pentium).
+ *
+ * Also, the byte strings actually work correctly. Forget
+ * the i486 routines for now as they may be broken..
+ */
+#if FIXED_486_STRING && (CPU == 486 || CPU == 586)
+#include <asm/string-486.h>
+#else
+
+/*
+ * This string-include defines all string functions as inline
+ * functions. Use gcc. It also assumes ds=es=data space, this should be
+ * normal. Most of the string-functions are rather heavily hand-optimized,
+ * see especially strtok,strstr,str[c]spn. They should work, but are not
+ * very easy to understand. Everything is done entirely within the register
+ * set, making the functions fast and clean. String instructions have been
+ * used through-out, making for "slightly" unclear code :-)
+ *
+ * NO Copyright (C) 1991, 1992 Linus Torvalds,
+ * consider these trivial functions to be PD.
+ */
+
+#define __HAVE_ARCH_STRCPY
+static inline char * strcpy(char * dest,const char *src)
+{
+int d0, d1, d2;
+__asm__ __volatile__(
+ "cld\n"
+ "1:\tlodsb\n\t"
+ "stosb\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b"
+ : "=&S" (d0), "=&D" (d1), "=&a" (d2)
+ :"0" (src),"1" (dest) : "memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRNCPY
+static inline char * strncpy(char * dest,const char *src,size_t count)
+{
+int d0, d1, d2, d3;
+__asm__ __volatile__(
+ "cld\n"
+ "1:\tdecl %2\n\t"
+ "js 2f\n\t"
+ "lodsb\n\t"
+ "stosb\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n\t"
+ "rep\n\t"
+ "stosb\n"
+ "2:"
+ : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3)
+ :"0" (src),"1" (dest),"2" (count) : "memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRCAT
+static inline char * strcat(char * dest,const char * src)
+{
+int d0, d1, d2, d3;
+__asm__ __volatile__(
+ "cld\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "decl %1\n"
+ "1:\tlodsb\n\t"
+ "stosb\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b"
+ : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
+ : "0" (src), "1" (dest), "2" (0), "3" (0xffffffff):"memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRNCAT
+static inline char * strncat(char * dest,const char * src,size_t count)
+{
+int d0, d1, d2, d3;
+__asm__ __volatile__(
+ "cld\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "decl %1\n\t"
+ "movl %8,%3\n"
+ "1:\tdecl %3\n\t"
+ "js 2f\n\t"
+ "lodsb\n\t"
+ "stosb\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n"
+ "2:\txorl %2,%2\n\t"
+ "stosb"
+ : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
+ : "0" (src),"1" (dest),"2" (0),"3" (0xffffffff), "g" (count)
+ : "memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRCMP
+static inline int strcmp(const char * cs,const char * ct)
+{
+int d0, d1;
+register int __res;
+__asm__ __volatile__(
+ "cld\n"
+ "1:\tlodsb\n\t"
+ "scasb\n\t"
+ "jne 2f\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n\t"
+ "xorl %%eax,%%eax\n\t"
+ "jmp 3f\n"
+ "2:\tsbbl %%eax,%%eax\n\t"
+ "orb $1,%%al\n"
+ "3:"
+ :"=a" (__res), "=&S" (d0), "=&D" (d1)
+ :"1" (cs),"2" (ct));
+return __res;
+}
+
+#define __HAVE_ARCH_STRNCMP
+static inline int strncmp(const char * cs,const char * ct,size_t count)
+{
+register int __res;
+int d0, d1, d2;
+__asm__ __volatile__(
+ "cld\n"
+ "1:\tdecl %3\n\t"
+ "js 2f\n\t"
+ "lodsb\n\t"
+ "scasb\n\t"
+ "jne 3f\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n"
+ "2:\txorl %%eax,%%eax\n\t"
+ "jmp 4f\n"
+ "3:\tsbbl %%eax,%%eax\n\t"
+ "orb $1,%%al\n"
+ "4:"
+ :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
+ :"1" (cs),"2" (ct),"3" (count));
+return __res;
+}
+
+#define __HAVE_ARCH_STRCHR
+static inline char * strchr(const char * s, int c)
+{
+int d0;
+register char * __res;
+__asm__ __volatile__(
+ "cld\n\t"
+ "movb %%al,%%ah\n"
+ "1:\tlodsb\n\t"
+ "cmpb %%ah,%%al\n\t"
+ "je 2f\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n\t"
+ "movl $1,%1\n"
+ "2:\tmovl %1,%0\n\t"
+ "decl %0"
+ :"=a" (__res), "=&S" (d0) : "1" (s),"0" (c));
+return __res;
+}
+
+#define __HAVE_ARCH_STRRCHR
+static inline char * strrchr(const char * s, int c)
+{
+int d0, d1;
+register char * __res;
+__asm__ __volatile__(
+ "cld\n\t"
+ "movb %%al,%%ah\n"
+ "1:\tlodsb\n\t"
+ "cmpb %%ah,%%al\n\t"
+ "jne 2f\n\t"
+ "leal -1(%%esi),%0\n"
+ "2:\ttestb %%al,%%al\n\t"
+ "jne 1b"
+ :"=g" (__res), "=&S" (d0), "=&a" (d1) :"0" (0),"1" (s),"2" (c));
+return __res;
+}
+
+#define __HAVE_ARCH_STRLEN
+static inline size_t strlen(const char * s)
+{
+int d0;
+register int __res;
+__asm__ __volatile__(
+ "cld\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "notl %0\n\t"
+ "decl %0"
+ :"=c" (__res), "=&D" (d0) :"1" (s),"a" (0), "0" (0xffffffff));
+return __res;
+}
+
+static inline void * __memcpy(void * to, const void * from, size_t n)
+{
+int d0, d1, d2;
+__asm__ __volatile__(
+ "cld\n\t"
+ "rep ; movsl\n\t"
+ "testb $2,%b4\n\t"
+ "je 1f\n\t"
+ "movsw\n"
+ "1:\ttestb $1,%b4\n\t"
+ "je 2f\n\t"
+ "movsb\n"
+ "2:"
+ : "=&c" (d0), "=&D" (d1), "=&S" (d2)
+ :"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from)
+ : "memory");
+return (to);
+}
+
+/*
+ * This looks horribly ugly, but the compiler can optimize it totally,
+ * as the count is constant.
+ */
+static inline void * __constant_memcpy(void * to, const void * from, size_t n)
+{
+ switch (n) {
+ case 0:
+ return to;
+ case 1:
+ *(unsigned char *)to = *(const unsigned char *)from;
+ return to;
+ case 2:
+ *(unsigned short *)to = *(const unsigned short *)from;
+ return to;
+ case 3:
+ *(unsigned short *)to = *(const unsigned short *)from;
+ *(2+(unsigned char *)to) = *(2+(const unsigned char *)from);
+ return to;
+ case 4:
+ *(unsigned long *)to = *(const unsigned long *)from;
+ return to;
+ case 6: /* for Ethernet addresses */
+ *(unsigned long *)to = *(const unsigned long *)from;
+ *(2+(unsigned short *)to) = *(2+(const unsigned short *)from);
+ return to;
+ case 8:
+ *(unsigned long *)to = *(const unsigned long *)from;
+ *(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
+ return to;
+ case 12:
+ *(unsigned long *)to = *(const unsigned long *)from;
+ *(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
+ *(2+(unsigned long *)to) = *(2+(const unsigned long *)from);
+ return to;
+ case 16:
+ *(unsigned long *)to = *(const unsigned long *)from;
+ *(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
+ *(2+(unsigned long *)to) = *(2+(const unsigned long *)from);
+ *(3+(unsigned long *)to) = *(3+(const unsigned long *)from);
+ return to;
+ case 20:
+ *(unsigned long *)to = *(const unsigned long *)from;
+ *(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
+ *(2+(unsigned long *)to) = *(2+(const unsigned long *)from);
+ *(3+(unsigned long *)to) = *(3+(const unsigned long *)from);
+ *(4+(unsigned long *)to) = *(4+(const unsigned long *)from);
+ return to;
+ }
+#define COMMON(x) \
+__asm__ __volatile__( \
+ "cld\n\t" \
+ "rep ; movsl" \
+ x \
+ : "=&c" (d0), "=&D" (d1), "=&S" (d2) \
+ : "0" (n/4),"1" ((long) to),"2" ((long) from) \
+ : "memory");
+{
+ int d0, d1, d2;
+ switch (n % 4) {
+ case 0: COMMON(""); return to;
+ case 1: COMMON("\n\tmovsb"); return to;
+ case 2: COMMON("\n\tmovsw"); return to;
+ default: COMMON("\n\tmovsw\n\tmovsb"); return to;
+ }
+}
+
+#undef COMMON
+}
+
+#define __HAVE_ARCH_MEMCPY
+#define memcpy(t, f, n) \
+(__builtin_constant_p(n) ? \
+ __constant_memcpy((t),(f),(n)) : \
+ __memcpy((t),(f),(n)))
+
+#define __HAVE_ARCH_MEMMOVE
+static inline void * memmove(void * dest,const void * src, size_t n)
+{
+int d0, d1, d2;
+if (dest<src)
+__asm__ __volatile__(
+ "cld\n\t"
+ "rep\n\t"
+ "movsb"
+ : "=&c" (d0), "=&S" (d1), "=&D" (d2)
+ :"0" (n),"1" (src),"2" (dest)
+ : "memory");
+else
+__asm__ __volatile__(
+ "std\n\t"
+ "rep\n\t"
+ "movsb\n\t"
+ "cld"
+ : "=&c" (d0), "=&S" (d1), "=&D" (d2)
+ :"0" (n),
+ "1" (n-1+(const char *)src),
+ "2" (n-1+(char *)dest)
+ :"memory");
+return dest;
+}
+
+#define memcmp __builtin_memcmp
+
+#define __HAVE_ARCH_MEMCHR
+static inline void * memchr(const void * cs,int c,size_t count)
+{
+int d0;
+register void * __res;
+if (!count)
+ return NULL;
+__asm__ __volatile__(
+ "cld\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "je 1f\n\t"
+ "movl $1,%0\n"
+ "1:\tdecl %0"
+ :"=D" (__res), "=&c" (d0) : "a" (c),"0" (cs),"1" (count));
+return __res;
+}
+
+static inline void * __memset_generic(void * s, char c,size_t count)
+{
+int d0, d1;
+__asm__ __volatile__(
+ "cld\n\t"
+ "rep\n\t"
+ "stosb"
+ : "=&c" (d0), "=&D" (d1)
+ :"a" (c),"1" (s),"0" (count)
+ :"memory");
+return s;
+}
+
+/* we might want to write optimized versions of these later */
+#define __constant_count_memset(s,c,count) __memset_generic((s),(c),(count))
+
+/*
+ * memset(x,0,y) is a reasonably common thing to do, so we want to fill
+ * things 32 bits at a time even when we don't know the size of the
+ * area at compile-time..
+ */
+static inline void * __constant_c_memset(void * s, unsigned long c, size_t count)
+{
+int d0, d1;
+__asm__ __volatile__(
+ "cld\n\t"
+ "rep ; stosl\n\t"
+ "testb $2,%b3\n\t"
+ "je 1f\n\t"
+ "stosw\n"
+ "1:\ttestb $1,%b3\n\t"
+ "je 2f\n\t"
+ "stosb\n"
+ "2:"
+ : "=&c" (d0), "=&D" (d1)
+ :"a" (c), "q" (count), "0" (count/4), "1" ((long) s)
+ :"memory");
+return (s);
+}
+
+/* Added by Gertjan van Wingerde to make minix and sysv module work */
+#define __HAVE_ARCH_STRNLEN
+static inline size_t strnlen(const char * s, size_t count)
+{
+int d0;
+register int __res;
+__asm__ __volatile__(
+ "movl %2,%0\n\t"
+ "jmp 2f\n"
+ "1:\tcmpb $0,(%0)\n\t"
+ "je 3f\n\t"
+ "incl %0\n"
+ "2:\tdecl %1\n\t"
+ "cmpl $-1,%1\n\t"
+ "jne 1b\n"
+ "3:\tsubl %2,%0"
+ :"=a" (__res), "=&d" (d0)
+ :"c" (s),"1" (count));
+return __res;
+}
+/* end of additional stuff */
+
+/*
+ * This looks horribly ugly, but the compiler can optimize it totally,
+ * as we by now know that both pattern and count is constant..
+ */
+static inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count)
+{
+ switch (count) {
+ case 0:
+ return s;
+ case 1:
+ *(unsigned char *)s = pattern;
+ return s;
+ case 2:
+ *(unsigned short *)s = pattern;
+ return s;
+ case 3:
+ *(unsigned short *)s = pattern;
+ *(2+(unsigned char *)s) = pattern;
+ return s;
+ case 4:
+ *(unsigned long *)s = pattern;
+ return s;
+ }
+#define COMMON(x) \
+__asm__ __volatile__("cld\n\t" \
+ "rep ; stosl" \
+ x \
+ : "=&c" (d0), "=&D" (d1) \
+ : "a" (pattern),"0" (count/4),"1" ((long) s) \
+ : "memory")
+{
+ int d0, d1;
+ switch (count % 4) {
+ case 0: COMMON(""); return s;
+ case 1: COMMON("\n\tstosb"); return s;
+ case 2: COMMON("\n\tstosw"); return s;
+ default: COMMON("\n\tstosw\n\tstosb"); return s;
+ }
+}
+
+#undef COMMON
+}
+
+#define __constant_c_x_memset(s, c, count) \
+(__builtin_constant_p(count) ? \
+ __constant_c_and_count_memset((s),(c),(count)) : \
+ __constant_c_memset((s),(c),(count)))
+
+#define __memset(s, c, count) \
+(__builtin_constant_p(count) ? \
+ __constant_count_memset((s),(c),(count)) : \
+ __memset_generic((s),(c),(count)))
+
+#define __HAVE_ARCH_MEMSET
+#define memset(s, c, count) \
+(__builtin_constant_p(c) ? \
+ __constant_c_x_memset((s),(0x01010101UL*(unsigned char)(c)),(count)) : \
+ __memset((s),(c),(count)))
+
+/*
+ * find the first occurrence of byte 'c', or 1 past the area if none
+ */
+#define __HAVE_ARCH_MEMSCAN
+static inline void * memscan(void * addr, int c, size_t size)
+{
+ if (!size)
+ return addr;
+ __asm__("cld\n"
+ "repnz; scasb\n"
+ "jnz 1f\n"
+ "dec %%edi\n"
+ "1:\n"
+ : "=D" (addr), "=c" (size)
+ : "0" (addr), "1" (size), "a" (c));
+ return addr;
+}
+
+#endif
+#endif
diff --git a/linux/dev/include/asm-i386/system.h b/linux/dev/include/asm-i386/system.h
new file mode 100644
index 0000000..5187c5e
--- /dev/null
+++ b/linux/dev/include/asm-i386/system.h
@@ -0,0 +1,356 @@
+#ifndef __ASM_SYSTEM_H
+#define __ASM_SYSTEM_H
+
+#include <i386/ipl.h> /* curr_ipl[], splx */
+#include <kern/cpu_number.h>
+
+#include <asm/segment.h>
+
+/*
+ * Entry into gdt where to find first TSS. GDT layout:
+ * 0 - null
+ * 1 - not used
+ * 2 - kernel code segment
+ * 3 - kernel data segment
+ * 4 - user code segment
+ * 5 - user data segment
+ * ...
+ * 8 - TSS #0
+ * 9 - LDT #0
+ * 10 - TSS #1
+ * 11 - LDT #1
+ */
+#define FIRST_TSS_ENTRY 8
+#define FIRST_LDT_ENTRY (FIRST_TSS_ENTRY+1)
+#define _TSS(n) ((((unsigned long) n)<<4)+(FIRST_TSS_ENTRY<<3))
+#define _LDT(n) ((((unsigned long) n)<<4)+(FIRST_LDT_ENTRY<<3))
+#define load_TR(n) __asm__("ltr %%ax": /* no output */ :"a" (_TSS(n)))
+#define load_ldt(n) __asm__("lldt %%ax": /* no output */ :"a" (_LDT(n)))
+#define store_TR(n) \
+__asm__("str %%ax\n\t" \
+ "subl %2,%%eax\n\t" \
+ "shrl $4,%%eax" \
+ :"=a" (n) \
+ :"0" (0),"i" (FIRST_TSS_ENTRY<<3))
+
+/* This special macro can be used to load a debugging register */
+
+#define loaddebug(tsk,register) \
+ __asm__("movl %0,%%edx\n\t" \
+ "movl %%edx,%%db" #register "\n\t" \
+ : /* no output */ \
+ :"m" (tsk->debugreg[register]) \
+ :"dx");
+
+
+/*
+ * switch_to(n) should switch tasks to task nr n, first
+ * checking that n isn't the current task, in which case it does nothing.
+ * This also clears the TS-flag if the task we switched to has used
+ * the math co-processor latest.
+ *
+ * It also reloads the debug regs if necessary..
+ */
+
+
+#ifdef __SMP__
+ /*
+ * Keep the lock depth straight. If we switch on an interrupt from
+ * kernel->user task we need to lose a depth, and if we switch the
+ * other way we need to gain a depth. Same layer switches come out
+ * the same.
+ *
+ * We spot a switch in user mode because the kernel counter is the
+ * same as the interrupt counter depth. (We never switch during the
+ * message/invalidate IPI).
+ *
+ * We fsave/fwait so that an exception goes off at the right time
+ * (as a call from the fsave or fwait in effect) rather than to
+ * the wrong process.
+ */
+
+#define switch_to(prev,next) do { \
+ cli();\
+ if(prev->flags&PF_USEDFPU) \
+ { \
+ __asm__ __volatile__("fnsave %0":"=m" (prev->tss.i387.hard)); \
+ __asm__ __volatile__("fwait"); \
+ prev->flags&=~PF_USEDFPU; \
+ } \
+ prev->lock_depth=syscall_count; \
+ kernel_counter+=next->lock_depth-prev->lock_depth; \
+ syscall_count=next->lock_depth; \
+__asm__("pushl %%edx\n\t" \
+ "movl "SYMBOL_NAME_STR(apic_reg)",%%edx\n\t" \
+ "movl 0x20(%%edx), %%edx\n\t" \
+ "shrl $22,%%edx\n\t" \
+ "and $0x3C,%%edx\n\t" \
+ "movl %%ecx,"SYMBOL_NAME_STR(current_set)"(,%%edx)\n\t" \
+ "popl %%edx\n\t" \
+ "ljmp %0\n\t" \
+ "sti\n\t" \
+ : /* no output */ \
+ :"m" (*(((char *)&next->tss.tr)-4)), \
+ "c" (next)); \
+ /* Now maybe reload the debug registers */ \
+ if(prev->debugreg[7]){ \
+ loaddebug(prev,0); \
+ loaddebug(prev,1); \
+ loaddebug(prev,2); \
+ loaddebug(prev,3); \
+ loaddebug(prev,6); \
+ } \
+} while (0)
+
+#else
+#define switch_to(prev,next) do { \
+__asm__("movl %2,"SYMBOL_NAME_STR(current_set)"\n\t" \
+ "ljmp %0\n\t" \
+ "cmpl %1,"SYMBOL_NAME_STR(last_task_used_math)"\n\t" \
+ "jne 1f\n\t" \
+ "clts\n" \
+ "1:" \
+ : /* no outputs */ \
+ :"m" (*(((char *)&next->tss.tr)-4)), \
+ "r" (prev), "r" (next)); \
+ /* Now maybe reload the debug registers */ \
+ if(prev->debugreg[7]){ \
+ loaddebug(prev,0); \
+ loaddebug(prev,1); \
+ loaddebug(prev,2); \
+ loaddebug(prev,3); \
+ loaddebug(prev,6); \
+ } \
+} while (0)
+#endif
+
+#define _set_base(addr,base) \
+__asm__("movw %%dx,%0\n\t" \
+ "rorl $16,%%edx\n\t" \
+ "movb %%dl,%1\n\t" \
+ "movb %%dh,%2" \
+ : /* no output */ \
+ :"m" (*((addr)+2)), \
+ "m" (*((addr)+4)), \
+ "m" (*((addr)+7)), \
+ "d" (base) \
+ :"dx")
+
+#define _set_limit(addr,limit) \
+__asm__("movw %%dx,%0\n\t" \
+ "rorl $16,%%edx\n\t" \
+ "movb %1,%%dh\n\t" \
+ "andb $0xf0,%%dh\n\t" \
+ "orb %%dh,%%dl\n\t" \
+ "movb %%dl,%1" \
+ : /* no output */ \
+ :"m" (*(addr)), \
+ "m" (*((addr)+6)), \
+ "d" (limit) \
+ :"dx")
+
+#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , base )
+#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , (limit-1)>>12 )
+
+static inline unsigned long _get_base(char * addr)
+{
+ unsigned long __base;
+ __asm__("movb %3,%%dh\n\t"
+ "movb %2,%%dl\n\t"
+ "shll $16,%%edx\n\t"
+ "movw %1,%%dx"
+ :"=&d" (__base)
+ :"m" (*((addr)+2)),
+ "m" (*((addr)+4)),
+ "m" (*((addr)+7)));
+ return __base;
+}
+
+#define get_base(ldt) _get_base( ((char *)&(ldt)) )
+
+static inline unsigned long get_limit(unsigned long segment)
+{
+ unsigned long __limit;
+ __asm__("lsll %1,%0"
+ :"=r" (__limit):"r" (segment));
+ return __limit+1;
+}
+
+#define nop() __asm__ __volatile__ ("nop")
+
+/*
+ * Clear and set 'TS' bit respectively
+ */
+#define clts() __asm__ __volatile__ ("clts")
+#define stts() \
+__asm__ __volatile__ ( \
+ "movl %%cr0,%%eax\n\t" \
+ "orl $8,%%eax\n\t" \
+ "movl %%eax,%%cr0" \
+ : /* no outputs */ \
+ : /* no inputs */ \
+ :"ax")
+
+
+#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+#define tas(ptr) (xchg((ptr),1))
+
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((struct __xchg_dummy *)(x))
+
+static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
+{
+ switch (size) {
+ case 1:
+ __asm__("xchgb %b0,%1"
+ :"=q" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 2:
+ __asm__("xchgw %w0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 4:
+ __asm__("xchgl %0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ }
+ return x;
+}
+
+#define mb() __asm__ __volatile__ ("" : : :"memory")
+#define __sti() __asm__ __volatile__ ("sti": : :"memory")
+#define __cli() __asm__ __volatile__ ("cli": : :"memory")
+#define __save_flags(x) (x = ((curr_ipl[cpu_number()] > 0) ? 0 : (1 << 9)))
+#define __restore_flags(x) splx((x & (1 << 9)) ? 0 : 7)
+
+#ifdef __SMP__
+
+extern void __global_cli(void);
+extern void __global_sti(void);
+extern unsigned long __global_save_flags(void);
+extern void __global_restore_flags(unsigned long);
+#define cli() __global_cli()
+#define sti() __global_sti()
+#define save_flags(x) ((x)=__global_save_flags())
+#define restore_flags(x) __global_restore_flags(x)
+
+#else
+
+#define cli() __cli()
+#define sti() __sti()
+#define save_flags(x) __save_flags(x)
+#define restore_flags(x) __restore_flags(x)
+
+#endif
+
+
+#define iret() __asm__ __volatile__ ("iret": : :"memory")
+
+#define _set_gate(gate_addr,type,dpl,addr) \
+__asm__ __volatile__ ("movw %%dx,%%ax\n\t" \
+ "movw %2,%%dx\n\t" \
+ "movl %%eax,%0\n\t" \
+ "movl %%edx,%1" \
+ :"=m" (*((long *) (gate_addr))), \
+ "=m" (*(1+(long *) (gate_addr))) \
+ :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \
+ "d" ((char *) (addr)),"a" (KERNEL_CS << 16) \
+ :"ax","dx")
+
+#define set_intr_gate(n,addr) \
+ _set_gate(&idt[n],14,0,addr)
+
+#define set_trap_gate(n,addr) \
+ _set_gate(&idt[n],15,0,addr)
+
+#define set_system_gate(n,addr) \
+ _set_gate(&idt[n],15,3,addr)
+
+#define set_call_gate(a,addr) \
+ _set_gate(a,12,3,addr)
+
+#define _set_seg_desc(gate_addr,type,dpl,base,limit) {\
+ *((gate_addr)+1) = ((base) & 0xff000000) | \
+ (((base) & 0x00ff0000)>>16) | \
+ ((limit) & 0xf0000) | \
+ ((dpl)<<13) | \
+ (0x00408000) | \
+ ((type)<<8); \
+ *(gate_addr) = (((base) & 0x0000ffff)<<16) | \
+ ((limit) & 0x0ffff); }
+
+#define _set_tssldt_desc(n,addr,limit,type) \
+__asm__ __volatile__ ("movw $" #limit ",%1\n\t" \
+ "movw %%ax,%2\n\t" \
+ "rorl $16,%%eax\n\t" \
+ "movb %%al,%3\n\t" \
+ "movb $" type ",%4\n\t" \
+ "movb $0x00,%5\n\t" \
+ "movb %%ah,%6\n\t" \
+ "rorl $16,%%eax" \
+ : /* no output */ \
+ :"a" (addr+0xc0000000), "m" (*(n)), "m" (*(n+2)), "m" (*(n+4)), \
+ "m" (*(n+5)), "m" (*(n+6)), "m" (*(n+7)) \
+ )
+
+#define set_tss_desc(n,addr) _set_tssldt_desc(((char *) (n)),((int)(addr)),235,"0x89")
+#define set_ldt_desc(n,addr,size) \
+ _set_tssldt_desc(((char *) (n)),((int)(addr)),((size << 3) - 1),"0x82")
+
+/*
+ * This is the ldt that every process will get unless we need
+ * something other than this.
+ */
+extern struct desc_struct default_ldt;
+
+/*
+ * disable hlt during certain critical i/o operations
+ */
+#ifndef MACH
+#define HAVE_DISABLE_HLT
+#endif
+void disable_hlt(void);
+void enable_hlt(void);
+
+static __inline__ unsigned long long rdmsr(unsigned int msr)
+{
+ unsigned long long ret;
+ __asm__ __volatile__("rdmsr"
+ : "=A" (ret)
+ : "c" (msr));
+ return ret;
+}
+
+static __inline__ void wrmsr(unsigned int msr,unsigned long long val)
+{
+ __asm__ __volatile__("wrmsr"
+ : /* no Outputs */
+ : "c" (msr), "A" (val));
+}
+
+
+static __inline__ unsigned long long rdtsc(void)
+{
+ unsigned long long ret;
+ __asm__ __volatile__("rdtsc"
+ : "=A" (ret)
+ : /* no inputs */);
+ return ret;
+}
+
+static __inline__ unsigned long long rdpmc(unsigned int counter)
+{
+ unsigned long long ret;
+ __asm__ __volatile__("rdpmc"
+ : "=A" (ret)
+ : "c" (counter));
+ return ret;
+}
+
+#endif
diff --git a/linux/dev/include/asm-i386/uaccess.h b/linux/dev/include/asm-i386/uaccess.h
new file mode 100644
index 0000000..9d841c9
--- /dev/null
+++ b/linux/dev/include/asm-i386/uaccess.h
@@ -0,0 +1 @@
+/* Dummy file. */
diff --git a/linux/dev/include/linux/blk.h b/linux/dev/include/linux/blk.h
new file mode 100644
index 0000000..b924a14
--- /dev/null
+++ b/linux/dev/include/linux/blk.h
@@ -0,0 +1,471 @@
+/* Is this okay? by OKUJI Yoshinori */
+#ifndef _BLK_H
+#define _BLK_H
+
+#include <linux/blkdev.h>
+#include <linux/locks.h>
+#include <linux/malloc.h>
+#include <linux/config.h>
+#include <linux/md.h>
+
+/*
+ * NR_REQUEST is the number of entries in the request-queue.
+ * NOTE that writes may use only the low 2/3 of these: reads
+ * take precedence.
+ */
+#define NR_REQUEST 64
+
+/*
+ * This is used in the elevator algorithm. We don't prioritise reads
+ * over writes any more --- although reads are more time-critical than
+ * writes, by treating them equally we increase filesystem throughput.
+ * This turns out to give better overall performance. -- sct
+ */
+#define IN_ORDER(s1,s2) \
+((s1)->rq_dev < (s2)->rq_dev || (((s1)->rq_dev == (s2)->rq_dev && \
+(s1)->sector < (s2)->sector)))
+
+/*
+ * These will have to be changed to be aware of different buffer
+ * sizes etc.. It actually needs a major cleanup.
+ */
+#if defined(IDE_DRIVER) || defined(MD_DRIVER)
+#define SECTOR_MASK ((BLOCK_SIZE >> 9) - 1)
+#else
+#define SECTOR_MASK (blksize_size[MAJOR_NR] && \
+ blksize_size[MAJOR_NR][MINOR(CURRENT->rq_dev)] ? \
+ ((blksize_size[MAJOR_NR][MINOR(CURRENT->rq_dev)] >> 9) - 1) : \
+ ((BLOCK_SIZE >> 9) - 1))
+#endif /* IDE_DRIVER */
+
+#define SUBSECTOR(block) (CURRENT->current_nr_sectors > 0)
+
+#ifdef CONFIG_CDU31A
+extern int cdu31a_init(void);
+#endif /* CONFIG_CDU31A */
+#ifdef CONFIG_MCD
+extern int mcd_init(void);
+#endif /* CONFIG_MCD */
+#ifdef CONFIG_MCDX
+extern int mcdx_init(void);
+#endif /* CONFIG_MCDX */
+#ifdef CONFIG_SBPCD
+extern int sbpcd_init(void);
+#endif /* CONFIG_SBPCD */
+#ifdef CONFIG_AZTCD
+extern int aztcd_init(void);
+#endif /* CONFIG_AZTCD */
+#ifdef CONFIG_CDU535
+extern int sony535_init(void);
+#endif /* CONFIG_CDU535 */
+#ifdef CONFIG_GSCD
+extern int gscd_init(void);
+#endif /* CONFIG_GSCD */
+#ifdef CONFIG_CM206
+extern int cm206_init(void);
+#endif /* CONFIG_CM206 */
+#ifdef CONFIG_OPTCD
+extern int optcd_init(void);
+#endif /* CONFIG_OPTCD */
+#ifdef CONFIG_SJCD
+extern int sjcd_init(void);
+#endif /* CONFIG_SJCD */
+#ifdef CONFIG_CDI_INIT
+extern int cdi_init(void);
+#endif /* CONFIG_CDI_INIT */
+#ifdef CONFIG_BLK_DEV_HD
+extern int hd_init(void);
+#endif
+#ifdef CONFIG_BLK_DEV_IDE
+extern int ide_init(void);
+extern void ide_disable_base(unsigned base);
+#endif
+#ifdef CONFIG_BLK_DEV_XD
+extern int xd_init(void);
+#endif
+#ifdef CONFIG_BLK_DEV_LOOP
+extern int loop_init(void);
+#endif
+#ifdef CONFIG_BLK_DEV_MD
+extern int md_init(void);
+#endif /* CONFIG_BLK_DEV_MD */
+
+extern void set_device_ro(kdev_t dev,int flag);
+void add_blkdev_randomness(int major);
+
+extern int floppy_init(void);
+extern void rd_load(void);
+extern int rd_init(void);
+extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */
+extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */
+extern int rd_image_start; /* starting block # of image */
+
+#ifdef CONFIG_BLK_DEV_INITRD
+
+#define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */
+
+extern unsigned long initrd_start,initrd_end;
+extern int mount_initrd; /* zero if initrd should not be mounted */
+void initrd_init(void);
+
+#endif
+
+#define RO_IOCTLS(dev,where) \
+ case BLKROSET: { int __err; if (!suser()) return -EACCES; \
+ __err = verify_area(VERIFY_READ, (void *) (where), sizeof(long)); \
+ if (!__err) set_device_ro((dev),get_fs_long((long *) (where))); return __err; } \
+ case BLKROGET: { int __err = verify_area(VERIFY_WRITE, (void *) (where), sizeof(long)); \
+ if (!__err) put_fs_long(0!=is_read_only(dev),(long *) (where)); return __err; }
+
+#if defined(MAJOR_NR) || defined(IDE_DRIVER)
+
+/*
+ * Add entries as needed.
+ */
+
+#ifdef IDE_DRIVER
+
+#define DEVICE_NR(device) (MINOR(device) >> PARTN_BITS)
+#define DEVICE_ON(device) /* nothing */
+#define DEVICE_OFF(device) /* nothing */
+
+#elif (MAJOR_NR == RAMDISK_MAJOR)
+
+/* ram disk */
+#define DEVICE_NAME "ramdisk"
+#define DEVICE_REQUEST rd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+#define DEVICE_NO_RANDOM
+
+#elif (MAJOR_NR == FLOPPY_MAJOR)
+
+static void floppy_off(unsigned int nr);
+
+#define DEVICE_NAME "floppy"
+#define DEVICE_INTR do_floppy
+#define DEVICE_REQUEST do_fd_request
+#define DEVICE_NR(device) ( (MINOR(device) & 3) | ((MINOR(device) & 0x80 ) >> 5 ))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device) floppy_off(DEVICE_NR(device))
+
+#elif (MAJOR_NR == HD_MAJOR)
+
+/* harddisk: timeout is 6 seconds.. */
+#define DEVICE_NAME "harddisk"
+#define DEVICE_INTR do_hd
+#define DEVICE_TIMEOUT HD_TIMER
+#define TIMEOUT_VALUE (6*HZ)
+#define DEVICE_REQUEST do_hd_request
+#define DEVICE_NR(device) (MINOR(device)>>6)
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == SCSI_DISK_MAJOR)
+
+#define DEVICE_NAME "scsidisk"
+#define DEVICE_INTR do_sd
+#define TIMEOUT_VALUE (2*HZ)
+#define DEVICE_REQUEST do_sd_request
+#define DEVICE_NR(device) (MINOR(device) >> 4)
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+/* Kludge to use the same number for both char and block major numbers */
+#elif (MAJOR_NR == MD_MAJOR) && defined(MD_DRIVER)
+
+#define DEVICE_NAME "Multiple devices driver"
+#define DEVICE_REQUEST do_md_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == SCSI_TAPE_MAJOR)
+
+#define DEVICE_NAME "scsitape"
+#define DEVICE_INTR do_st
+#define DEVICE_NR(device) (MINOR(device) & 0x7f)
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == SCSI_CDROM_MAJOR)
+
+#define DEVICE_NAME "CD-ROM"
+#define DEVICE_INTR do_sr
+#define DEVICE_REQUEST do_sr_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == XT_DISK_MAJOR)
+
+#define DEVICE_NAME "xt disk"
+#define DEVICE_REQUEST do_xd_request
+#define DEVICE_NR(device) (MINOR(device) >> 6)
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == CDU31A_CDROM_MAJOR)
+
+#define DEVICE_NAME "CDU31A"
+#define DEVICE_REQUEST do_cdu31a_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MITSUMI_CDROM_MAJOR)
+
+#define DEVICE_NAME "Mitsumi CD-ROM"
+/* #define DEVICE_INTR do_mcd */
+#define DEVICE_REQUEST do_mcd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MITSUMI_X_CDROM_MAJOR)
+
+#define DEVICE_NAME "Mitsumi CD-ROM"
+/* #define DEVICE_INTR do_mcdx */
+#define DEVICE_REQUEST do_mcdx_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MATSUSHITA_CDROM_MAJOR)
+
+#define DEVICE_NAME "Matsushita CD-ROM controller #1"
+#define DEVICE_REQUEST do_sbpcd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MATSUSHITA_CDROM2_MAJOR)
+
+#define DEVICE_NAME "Matsushita CD-ROM controller #2"
+#define DEVICE_REQUEST do_sbpcd2_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MATSUSHITA_CDROM3_MAJOR)
+
+#define DEVICE_NAME "Matsushita CD-ROM controller #3"
+#define DEVICE_REQUEST do_sbpcd3_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MATSUSHITA_CDROM4_MAJOR)
+
+#define DEVICE_NAME "Matsushita CD-ROM controller #4"
+#define DEVICE_REQUEST do_sbpcd4_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == AZTECH_CDROM_MAJOR)
+
+#define DEVICE_NAME "Aztech CD-ROM"
+#define DEVICE_REQUEST do_aztcd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == CDU535_CDROM_MAJOR)
+
+#define DEVICE_NAME "SONY-CDU535"
+#define DEVICE_INTR do_cdu535
+#define DEVICE_REQUEST do_cdu535_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == GOLDSTAR_CDROM_MAJOR)
+
+#define DEVICE_NAME "Goldstar R420"
+#define DEVICE_REQUEST do_gscd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == CM206_CDROM_MAJOR)
+#define DEVICE_NAME "Philips/LMS cd-rom cm206"
+#define DEVICE_REQUEST do_cm206_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == OPTICS_CDROM_MAJOR)
+
+#define DEVICE_NAME "DOLPHIN 8000AT CD-ROM"
+#define DEVICE_REQUEST do_optcd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == SANYO_CDROM_MAJOR)
+
+#define DEVICE_NAME "Sanyo H94A CD-ROM"
+#define DEVICE_REQUEST do_sjcd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#endif /* MAJOR_NR == whatever */
+
+#if (MAJOR_NR != SCSI_TAPE_MAJOR)
+#if !defined(IDE_DRIVER)
+
+#ifndef CURRENT
+#define CURRENT (blk_dev[MAJOR_NR].current_request)
+#endif
+
+#define CURRENT_DEV DEVICE_NR(CURRENT->rq_dev)
+
+#ifdef DEVICE_INTR
+static void (*DEVICE_INTR)(void) = NULL;
+#endif
+#ifdef DEVICE_TIMEOUT
+
+#define SET_TIMER \
+((timer_table[DEVICE_TIMEOUT].expires = jiffies + TIMEOUT_VALUE), \
+(timer_active |= 1<<DEVICE_TIMEOUT))
+
+#define CLEAR_TIMER \
+timer_active &= ~(1<<DEVICE_TIMEOUT)
+
+#define SET_INTR(x) \
+if ((DEVICE_INTR = (x)) != NULL) \
+ SET_TIMER; \
+else \
+ CLEAR_TIMER;
+
+#else
+
+#define SET_INTR(x) (DEVICE_INTR = (x))
+
+#endif /* DEVICE_TIMEOUT */
+
+static void (DEVICE_REQUEST)(void);
+
+#ifdef DEVICE_INTR
+#define CLEAR_INTR SET_INTR(NULL)
+#else
+#define CLEAR_INTR
+#endif
+
+#define INIT_REQUEST \
+ if (!CURRENT) {\
+ CLEAR_INTR; \
+ return; \
+ } \
+ if (MAJOR(CURRENT->rq_dev) != MAJOR_NR) \
+ panic(DEVICE_NAME ": request list destroyed"); \
+ if (CURRENT->bh) { \
+ if (!buffer_locked(CURRENT->bh)) \
+ panic(DEVICE_NAME ": block not locked"); \
+ }
+
+#endif /* !defined(IDE_DRIVER) */
+
+/* end_request() - SCSI devices have their own version */
+/* - IDE drivers have their own copy too */
+
+#if ! SCSI_BLK_MAJOR(MAJOR_NR)
+
+#if defined(IDE_DRIVER) && !defined(_IDE_C) /* shared copy for IDE modules */
+void ide_end_request(byte uptodate, ide_hwgroup_t *hwgroup);
+#else
+
+#ifdef IDE_DRIVER
+void ide_end_request(byte uptodate, ide_hwgroup_t *hwgroup) {
+ struct request *req = hwgroup->rq;
+#else
+static void end_request(int uptodate) {
+ struct request *req = CURRENT;
+#endif /* IDE_DRIVER */
+ struct buffer_head * bh;
+#ifndef MACH
+ int nsect;
+#endif
+
+ req->errors = 0;
+ if (!uptodate) {
+ if (!req->quiet)
+ printk("end_request: I/O error, dev %s, sector %lu\n",
+ kdevname(req->rq_dev), req->sector);
+#ifdef MACH
+ for (bh = req->bh; bh; )
+ {
+ struct buffer_head *next = bh->b_reqnext;
+ bh->b_reqnext = NULL;
+ mark_buffer_uptodate (bh, 0);
+ unlock_buffer (bh);
+ bh = next;
+ }
+ req->bh = NULL;
+#else
+ if ((bh = req->bh) != NULL) {
+ nsect = bh->b_size >> 9;
+ req->nr_sectors--;
+ req->nr_sectors &= ~(nsect - 1);
+ req->sector += nsect;
+ req->sector &= ~(nsect - 1);
+ }
+#endif
+ }
+
+ if ((bh = req->bh) != NULL) {
+ req->bh = bh->b_reqnext;
+ bh->b_reqnext = NULL;
+
+ /*
+ * This is our 'MD IO has finished' event handler.
+ * note that b_state should be cached in a register
+ * anyways, so the overhead if this checking is almost
+ * zero. But anyways .. we never get OO for free :)
+ */
+ if (test_bit(BH_MD, &bh->b_state)) {
+ struct md_personality * pers=(struct md_personality *)bh->personality;
+ pers->end_request(bh,uptodate);
+ }
+ /*
+ * the normal (nonmirrored and no RAID5) case:
+ */
+ else {
+ mark_buffer_uptodate(bh, uptodate);
+ unlock_buffer(bh);
+ }
+ if ((bh = req->bh) != NULL) {
+ req->current_nr_sectors = bh->b_size >> 9;
+ if (req->nr_sectors < req->current_nr_sectors) {
+ req->nr_sectors = req->current_nr_sectors;
+ printk("end_request: buffer-list destroyed\n");
+ }
+ req->buffer = bh->b_data;
+ return;
+ }
+ }
+#ifndef DEVICE_NO_RANDOM
+ add_blkdev_randomness(MAJOR(req->rq_dev));
+#endif
+#ifdef IDE_DRIVER
+ blk_dev[MAJOR(req->rq_dev)].current_request = req->next;
+ hwgroup->rq = NULL;
+#else
+ DEVICE_OFF(req->rq_dev);
+ CURRENT = req->next;
+#endif /* IDE_DRIVER */
+ if (req->sem != NULL)
+ up(req->sem);
+ req->rq_status = RQ_INACTIVE;
+ wake_up(&wait_for_request);
+}
+#endif /* defined(IDE_DRIVER) && !defined(_IDE_C) */
+#endif /* ! SCSI_BLK_MAJOR(MAJOR_NR) */
+#endif /* (MAJOR_NR != SCSI_TAPE_MAJOR) */
+
+#endif /* defined(MAJOR_NR) || defined(IDE_DRIVER) */
+
+#endif /* _BLK_H */
diff --git a/linux/dev/include/linux/blkdev.h b/linux/dev/include/linux/blkdev.h
new file mode 100644
index 0000000..5bf0a28
--- /dev/null
+++ b/linux/dev/include/linux/blkdev.h
@@ -0,0 +1,73 @@
+#ifndef _LINUX_BLKDEV_H
+#define _LINUX_BLKDEV_H
+
+#include <linux/major.h>
+#include <linux/sched.h>
+#include <linux/genhd.h>
+#include <linux/tqueue.h>
+
+/*
+ * Ok, this is an expanded form so that we can use the same
+ * request for paging requests when that is implemented. In
+ * paging, 'bh' is NULL, and the semaphore is used to wait
+ * for read/write completion.
+ */
+struct request {
+ volatile int rq_status; /* should split this into a few status bits */
+#define RQ_INACTIVE (-1)
+#define RQ_ACTIVE 1
+#define RQ_SCSI_BUSY 0xffff
+#define RQ_SCSI_DONE 0xfffe
+#define RQ_SCSI_DISCONNECTING 0xffe0
+
+ kdev_t rq_dev;
+ int cmd; /* READ or WRITE */
+ int errors;
+ int quiet;
+ unsigned long sector;
+ unsigned long nr_sectors;
+ unsigned long current_nr_sectors;
+ char * buffer;
+ struct semaphore * sem;
+ struct buffer_head * bh;
+ struct buffer_head * bhtail;
+ struct request * next;
+};
+
+struct blk_dev_struct {
+ void (*request_fn)(void);
+ struct request * current_request;
+ struct request plug;
+ struct tq_struct plug_tq;
+};
+
+struct sec_size {
+ unsigned block_size;
+ unsigned block_size_bits;
+};
+
+extern struct sec_size * blk_sec[MAX_BLKDEV];
+extern struct blk_dev_struct blk_dev[MAX_BLKDEV];
+extern struct wait_queue * wait_for_request;
+extern void resetup_one_dev(struct gendisk *dev, int drive);
+
+#ifdef MACH
+extern inline void unplug_device(void *data) { }
+#else
+extern void unplug_device(void * data);
+#endif
+
+extern void make_request(int major,int rw, struct buffer_head * bh);
+
+/* md needs this function to remap requests */
+extern int md_map (int minor, kdev_t *rdev, unsigned long *rsector, unsigned long size);
+extern int md_make_request (int major, int rw, struct buffer_head * bh);
+extern int md_error (kdev_t mddev, kdev_t rdev);
+
+extern int * blk_size[MAX_BLKDEV];
+
+extern int * blksize_size[MAX_BLKDEV];
+
+extern int * hardsect_size[MAX_BLKDEV];
+
+#endif
diff --git a/linux/dev/include/linux/compile.h b/linux/dev/include/linux/compile.h
new file mode 100644
index 0000000..7d43a20
--- /dev/null
+++ b/linux/dev/include/linux/compile.h
@@ -0,0 +1,6 @@
+#define UTS_VERSION "#11 Fri Apr 24 23:03:10 JST 1998"
+#define LINUX_COMPILE_TIME "23:03:10"
+#define LINUX_COMPILE_BY "somebody"
+#define LINUX_COMPILE_HOST "unknown"
+#define LINUX_COMPILE_DOMAIN "somewhere.org"
+#define LINUX_COMPILER "gcc version 2.7.2.3"
diff --git a/linux/dev/include/linux/etherdevice.h b/linux/dev/include/linux/etherdevice.h
new file mode 100644
index 0000000..eb262b2
--- /dev/null
+++ b/linux/dev/include/linux/etherdevice.h
@@ -0,0 +1,62 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. NET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the Ethernet handlers.
+ *
+ * Version: @(#)eth.h 1.0.4 05/13/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * Relocated to include/linux where it belongs by Alan Cox
+ * <gw4pts@gw4pts.ampr.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * WARNING: This move may well be temporary. This file will get merged with others RSN.
+ *
+ */
+#ifndef _LINUX_ETHERDEVICE_H
+#define _LINUX_ETHERDEVICE_H
+
+
+#include <linux/if_ether.h>
+
+#ifdef __KERNEL__
+extern int eth_header(struct sk_buff *skb, struct device *dev,
+ unsigned short type, void *daddr,
+ void *saddr, unsigned len);
+extern int eth_rebuild_header(void *buff, struct device *dev,
+ unsigned long dst, struct sk_buff *skb);
+
+/* This cause skb->protocol = 0. I don't sure if this is really ok.
+ * Last modified: 19980402 by OKUJI Yoshinori <okuji@kmc.kyoto-u.ac.jp>
+ */
+#ifdef MACH
+#define eth_type_trans(skb, dev) ((unsigned short)0)
+#else
+extern unsigned short eth_type_trans(struct sk_buff *skb, struct device *dev);
+#endif
+
+extern void eth_header_cache_bind(struct hh_cache ** hhp, struct device *dev,
+ unsigned short htype, __u32 daddr);
+extern void eth_header_cache_update(struct hh_cache *hh, struct device *dev, unsigned char * haddr);
+
+#ifdef MACH
+#define eth_copy_and_sum(dest, src, length, base) \
+ memcpy((dest)->data, src, length)
+#else
+extern void eth_copy_and_sum(struct sk_buff *dest,
+ unsigned char *src, int length, int base);
+#endif
+
+extern struct device * init_etherdev(struct device *, int);
+
+#endif
+
+#endif /* _LINUX_ETHERDEVICE_H */
diff --git a/linux/dev/include/linux/fs.h b/linux/dev/include/linux/fs.h
new file mode 100644
index 0000000..def2bc9
--- /dev/null
+++ b/linux/dev/include/linux/fs.h
@@ -0,0 +1,803 @@
+#ifndef _LINUX_FS_H
+#define _LINUX_FS_H
+
+/*
+ * This file has definitions for some important file table
+ * structures etc.
+ */
+
+#include <linux/config.h>
+#include <linux/linkage.h>
+#include <linux/limits.h>
+#include <linux/wait.h>
+#include <linux/types.h>
+#include <linux/vfs.h>
+#include <linux/net.h>
+#include <linux/kdev_t.h>
+#include <linux/ioctl.h>
+
+/*
+ * It's silly to have NR_OPEN bigger than NR_FILE, but I'll fix
+ * that later. Anyway, now the file code is no longer dependent
+ * on bitmaps in unsigned longs, but uses the new fd_set structure..
+ *
+ * Some programs (notably those using select()) may have to be
+ * recompiled to take full advantage of the new limits..
+ */
+
+/* Fixed constants first: */
+#undef NR_OPEN
+#define NR_OPEN 256
+
+#define NR_SUPER 64
+#define BLOCK_SIZE 1024
+#define BLOCK_SIZE_BITS 10
+
+/* And dynamically-tunable limits and defaults: */
+extern int max_inodes, nr_inodes;
+extern int max_files, nr_files;
+#define NR_INODE 3072 /* this should be bigger than NR_FILE */
+#define NR_FILE 1024 /* this can well be larger on a larger system */
+
+#define MAY_EXEC 1
+#define MAY_WRITE 2
+#define MAY_READ 4
+
+#define FMODE_READ 1
+#define FMODE_WRITE 2
+
+#define READ 0
+#define WRITE 1
+#define READA 2 /* read-ahead - don't block if no resources */
+#define WRITEA 3 /* write-ahead - don't block if no resources */
+
+#ifndef NULL
+#define NULL ((void *) 0)
+#endif
+
+#define NIL_FILP ((struct file *)0)
+#define SEL_IN 1
+#define SEL_OUT 2
+#define SEL_EX 4
+
+/*
+ * These are the fs-independent mount-flags: up to 16 flags are supported
+ */
+#define MS_RDONLY 1 /* Mount read-only */
+#define MS_NOSUID 2 /* Ignore suid and sgid bits */
+#define MS_NODEV 4 /* Disallow access to device special files */
+#define MS_NOEXEC 8 /* Disallow program execution */
+#define MS_SYNCHRONOUS 16 /* Writes are synced at once */
+#define MS_REMOUNT 32 /* Alter flags of a mounted FS */
+#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */
+#define S_WRITE 128 /* Write on file/directory/symlink */
+#define S_APPEND 256 /* Append-only file */
+#define S_IMMUTABLE 512 /* Immutable file */
+#define MS_NOATIME 1024 /* Do not update access times. */
+#define S_BAD_INODE 2048 /* Marker for unreadable inodes */
+#define S_ZERO_WR 4096 /* Device accepts 0 length writes */
+/*
+ * Flags that can be altered by MS_REMOUNT
+ */
+#define MS_RMT_MASK (MS_RDONLY|MS_NOSUID|MS_NODEV|MS_NOEXEC|MS_SYNCHRONOUS|MS_MANDLOCK|MS_NOATIME)
+
+/*
+ * Magic mount flag number. Has to be or-ed to the flag values.
+ */
+#define MS_MGC_VAL 0xC0ED0000 /* magic flag number to indicate "new" flags */
+#define MS_MGC_MSK 0xffff0000 /* magic flag number mask */
+
+/*
+ * Note that read-only etc flags are inode-specific: setting some file-system
+ * flags just means all the inodes inherit those flags by default. It might be
+ * possible to override it selectively if you really wanted to with some
+ * ioctl() that is not currently implemented.
+ *
+ * Exception: MS_RDONLY is always applied to the entire file system.
+ */
+#define IS_RDONLY(inode) (((inode)->i_sb) && ((inode)->i_sb->s_flags & MS_RDONLY))
+#define IS_NOSUID(inode) ((inode)->i_flags & MS_NOSUID)
+#define IS_NODEV(inode) ((inode)->i_flags & MS_NODEV)
+#define IS_NOEXEC(inode) ((inode)->i_flags & MS_NOEXEC)
+#define IS_SYNC(inode) ((inode)->i_flags & MS_SYNCHRONOUS)
+#define IS_MANDLOCK(inode) ((inode)->i_flags & MS_MANDLOCK)
+
+#define IS_WRITABLE(inode) ((inode)->i_flags & S_WRITE)
+#define IS_APPEND(inode) ((inode)->i_flags & S_APPEND)
+#define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE)
+#define IS_NOATIME(inode) ((inode)->i_flags & MS_NOATIME)
+#define IS_ZERO_WR(inode) ((inode)->i_flags & S_ZERO_WR)
+
+#define UPDATE_ATIME(inode) \
+ if (!IS_NOATIME(inode) && !IS_RDONLY(inode)) { \
+ inode->i_atime = CURRENT_TIME; \
+ inode->i_dirt = 1; \
+ }
+
+/* the read-only stuff doesn't really belong here, but any other place is
+ probably as bad and I don't want to create yet another include file. */
+
+#define BLKROSET _IO(0x12,93) /* set device read-only (0 = read-write) */
+#define BLKROGET _IO(0x12,94) /* get read-only status (0 = read_write) */
+#define BLKRRPART _IO(0x12,95) /* re-read partition table */
+#define BLKGETSIZE _IO(0x12,96) /* return device size */
+#define BLKFLSBUF _IO(0x12,97) /* flush buffer cache */
+#define BLKRASET _IO(0x12,98) /* Set read ahead for block device */
+#define BLKRAGET _IO(0x12,99) /* get current read ahead setting */
+
+#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
+#define FIBMAP _IO(0x00,1) /* bmap access */
+#define FIGETBSZ _IO(0x00,2) /* get the block size used for bmap */
+
+#ifdef __KERNEL__
+
+#include <asm/semaphore.h>
+#include <asm/bitops.h>
+
+extern void buffer_init(void);
+extern unsigned long inode_init(unsigned long start, unsigned long end);
+extern unsigned long file_table_init(unsigned long start, unsigned long end);
+extern unsigned long name_cache_init(unsigned long start, unsigned long end);
+
+typedef char buffer_block[BLOCK_SIZE];
+
+/* bh state bits */
+#define BH_Uptodate 0 /* 1 if the buffer contains valid data */
+#define BH_Dirty 1 /* 1 if the buffer is dirty */
+#define BH_Lock 2 /* 1 if the buffer is locked */
+#define BH_Req 3 /* 0 if the buffer has been invalidated */
+#define BH_Touched 4 /* 1 if the buffer has been touched (aging) */
+#define BH_Has_aged 5 /* 1 if the buffer has been aged (aging) */
+#define BH_Protected 6 /* 1 if the buffer is protected */
+#define BH_FreeOnIO 7 /* 1 to discard the buffer_head after IO */
+#define BH_MD 8 /* 1 if the buffer is an MD request */
+
+/*
+ * Try to keep the most commonly used fields in single cache lines (16
+ * bytes) to improve performance. This ordering should be
+ * particularly beneficial on 32-bit processors.
+ *
+ * We use the first 16 bytes for the data which is used in searches
+ * over the block hash lists (ie. getblk(), find_buffer() and
+ * friends).
+ *
+ * The second 16 bytes we use for lru buffer scans, as used by
+ * sync_buffers() and refill_freelist(). -- sct
+ */
+struct buffer_head {
+ /* First cache line: */
+ unsigned long b_blocknr; /* block number */
+ kdev_t b_dev; /* device (B_FREE = free) */
+ kdev_t b_rdev; /* Real device */
+ unsigned long b_rsector; /* Real buffer location on disk */
+ struct buffer_head * b_next; /* Hash queue list */
+ struct buffer_head * b_this_page; /* circular list of buffers in one page */
+
+ /* Second cache line: */
+ unsigned long b_state; /* buffer state bitmap (see above) */
+ struct buffer_head * b_next_free;
+ unsigned int b_count; /* users using this block */
+ unsigned long b_size; /* block size */
+
+ /* Non-performance-critical data follows. */
+ char * b_data; /* pointer to data block (1024 bytes) */
+ unsigned int b_list; /* List that this buffer appears */
+ unsigned long b_flushtime; /* Time when this (dirty) buffer
+ * should be written */
+ unsigned long b_lru_time; /* Time when this buffer was
+ * last used. */
+ struct wait_queue * b_wait;
+ struct buffer_head * b_prev; /* doubly linked list of hash-queue */
+ struct buffer_head * b_prev_free; /* doubly linked list of buffers */
+ struct buffer_head * b_reqnext; /* request queue */
+
+/*
+ * Some MD stuff like RAID5 needs special event handlers and
+ * special private buffer_head fields:
+ */
+ void * personality;
+ void * private_bh;
+};
+
+static inline int buffer_uptodate(struct buffer_head * bh)
+{
+ return test_bit(BH_Uptodate, &bh->b_state);
+}
+
+static inline int buffer_dirty(struct buffer_head * bh)
+{
+ return test_bit(BH_Dirty, &bh->b_state);
+}
+
+static inline int buffer_locked(struct buffer_head * bh)
+{
+ return test_bit(BH_Lock, &bh->b_state);
+}
+
+static inline int buffer_req(struct buffer_head * bh)
+{
+ return test_bit(BH_Req, &bh->b_state);
+}
+
+static inline int buffer_touched(struct buffer_head * bh)
+{
+ return test_bit(BH_Touched, &bh->b_state);
+}
+
+static inline int buffer_has_aged(struct buffer_head * bh)
+{
+ return test_bit(BH_Has_aged, &bh->b_state);
+}
+
+static inline int buffer_protected(struct buffer_head * bh)
+{
+ return test_bit(BH_Protected, &bh->b_state);
+}
+
+#ifndef MACH
+#include <linux/pipe_fs_i.h>
+#include <linux/minix_fs_i.h>
+#include <linux/ext_fs_i.h>
+#include <linux/ext2_fs_i.h>
+#include <linux/hpfs_fs_i.h>
+#include <linux/msdos_fs_i.h>
+#include <linux/umsdos_fs_i.h>
+#include <linux/iso_fs_i.h>
+#include <linux/nfs_fs_i.h>
+#include <linux/xia_fs_i.h>
+#include <linux/sysv_fs_i.h>
+#include <linux/affs_fs_i.h>
+#include <linux/ufs_fs_i.h>
+#endif
+
+/*
+ * Attribute flags. These should be or-ed together to figure out what
+ * has been changed!
+ */
+#define ATTR_MODE 1
+#define ATTR_UID 2
+#define ATTR_GID 4
+#define ATTR_SIZE 8
+#define ATTR_ATIME 16
+#define ATTR_MTIME 32
+#define ATTR_CTIME 64
+#define ATTR_ATIME_SET 128
+#define ATTR_MTIME_SET 256
+#define ATTR_FORCE 512 /* Not a change, but a change it */
+
+/*
+ * This is the Inode Attributes structure, used for notify_change(). It
+ * uses the above definitions as flags, to know which values have changed.
+ * Also, in this manner, a Filesystem can look at only the values it cares
+ * about. Basically, these are the attributes that the VFS layer can
+ * request to change from the FS layer.
+ *
+ * Derek Atkins <warlord@MIT.EDU> 94-10-20
+ */
+struct iattr {
+ unsigned int ia_valid;
+ umode_t ia_mode;
+ uid_t ia_uid;
+ gid_t ia_gid;
+ off_t ia_size;
+ time_t ia_atime;
+ time_t ia_mtime;
+ time_t ia_ctime;
+};
+
+#include <linux/quota.h>
+
+#ifdef MACH
+/* Supress GCC's warnings. by OKUJI Yoshinori. */
+struct vm_area_struct;
+struct page;
+
+struct inode
+{
+ umode_t i_mode;
+ kdev_t i_rdev;
+};
+
+struct file
+{
+ mode_t f_mode;
+ loff_t f_pos;
+ unsigned short f_flags;
+ int f_resid;
+ void *f_object;
+ void *f_np;
+};
+
+#else /* !MACH */
+
+struct inode {
+ kdev_t i_dev;
+ unsigned long i_ino;
+ umode_t i_mode;
+ nlink_t i_nlink;
+ uid_t i_uid;
+ gid_t i_gid;
+ kdev_t i_rdev;
+ off_t i_size;
+ time_t i_atime;
+ time_t i_mtime;
+ time_t i_ctime;
+ unsigned long i_blksize;
+ unsigned long i_blocks;
+ unsigned long i_version;
+ unsigned long i_nrpages;
+ struct semaphore i_sem;
+ struct inode_operations *i_op;
+ struct super_block *i_sb;
+ struct wait_queue *i_wait;
+ struct file_lock *i_flock;
+ struct vm_area_struct *i_mmap;
+ struct page *i_pages;
+ struct dquot *i_dquot[MAXQUOTAS];
+ struct inode *i_next, *i_prev;
+ struct inode *i_hash_next, *i_hash_prev;
+ struct inode *i_bound_to, *i_bound_by;
+ struct inode *i_mount;
+ unsigned long i_count;
+ unsigned short i_flags;
+ unsigned short i_writecount;
+ unsigned char i_lock;
+ unsigned char i_dirt;
+ unsigned char i_pipe;
+ unsigned char i_sock;
+ unsigned char i_seek;
+ unsigned char i_update;
+ unsigned char i_condemned;
+ union {
+ struct pipe_inode_info pipe_i;
+ struct minix_inode_info minix_i;
+ struct ext_inode_info ext_i;
+ struct ext2_inode_info ext2_i;
+ struct hpfs_inode_info hpfs_i;
+ struct msdos_inode_info msdos_i;
+ struct umsdos_inode_info umsdos_i;
+ struct iso_inode_info isofs_i;
+ struct nfs_inode_info nfs_i;
+ struct xiafs_inode_info xiafs_i;
+ struct sysv_inode_info sysv_i;
+ struct affs_inode_info affs_i;
+ struct ufs_inode_info ufs_i;
+ struct socket socket_i;
+ void * generic_ip;
+ } u;
+};
+
+struct fown_struct {
+ int pid; /* pid or -pgrp where SIGIO should be sent */
+ uid_t uid, euid; /* uid/euid of process setting the owner */
+};
+
+struct file {
+ mode_t f_mode;
+ loff_t f_pos;
+ unsigned short f_flags;
+ unsigned short f_count;
+ unsigned long f_reada, f_ramax, f_raend, f_ralen, f_rawin;
+ struct file *f_next, *f_prev;
+ struct fown_struct f_owner;
+ struct inode * f_inode;
+ struct file_operations * f_op;
+ unsigned long f_version;
+ void *private_data; /* needed for tty driver, and maybe others */
+};
+#endif /* !MACH */
+
+#define FL_POSIX 1
+#define FL_FLOCK 2
+#define FL_BROKEN 4 /* broken flock() emulation */
+#define FL_ACCESS 8 /* for processes suspended by mandatory locking */
+
+struct file_lock {
+ struct file_lock *fl_next; /* singly linked list for this inode */
+ struct file_lock *fl_nextlink; /* doubly linked list of all locks */
+ struct file_lock *fl_prevlink; /* used to simplify lock removal */
+ struct file_lock *fl_nextblock; /* circular list of blocked processes */
+ struct file_lock *fl_prevblock;
+ struct task_struct *fl_owner;
+ struct wait_queue *fl_wait;
+ struct file *fl_file;
+ unsigned char fl_flags;
+ unsigned char fl_type;
+ off_t fl_start;
+ off_t fl_end;
+};
+
+#include <linux/fcntl.h>
+
+extern int fcntl_getlk(unsigned int fd, struct flock *l);
+extern int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l);
+extern void locks_remove_locks(struct task_struct *task, struct file *filp);
+
+#include <linux/stat.h>
+
+#define FLOCK_VERIFY_READ 1
+#define FLOCK_VERIFY_WRITE 2
+
+extern int locks_mandatory_locked(struct inode *inode);
+extern int locks_mandatory_area(int read_write, struct inode *inode,
+ struct file *filp, unsigned int offset,
+ unsigned int count);
+
+#ifndef MACH
+extern inline int locks_verify_locked(struct inode *inode)
+{
+ /* Candidates for mandatory locking have the setgid bit set
+ * but no group execute bit - an otherwise meaningless combination.
+ */
+ if (IS_MANDLOCK(inode) &&
+ (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
+ return (locks_mandatory_locked(inode));
+ return (0);
+}
+extern inline int locks_verify_area(int read_write, struct inode *inode,
+ struct file *filp, unsigned int offset,
+ unsigned int count)
+{
+ /* Candidates for mandatory locking have the setgid bit set
+ * but no group execute bit - an otherwise meaningless combination.
+ */
+ if (IS_MANDLOCK(inode) &&
+ (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
+ return (locks_mandatory_area(read_write, inode, filp, offset,
+ count));
+ return (0);
+}
+#endif
+
+struct fasync_struct {
+ int magic;
+ struct fasync_struct *fa_next; /* singly linked list */
+ struct file *fa_file;
+};
+
+#define FASYNC_MAGIC 0x4601
+
+extern int fasync_helper(struct inode *, struct file *, int, struct fasync_struct **);
+
+#ifndef MACH
+#include <linux/minix_fs_sb.h>
+#include <linux/ext_fs_sb.h>
+#include <linux/ext2_fs_sb.h>
+#include <linux/hpfs_fs_sb.h>
+#include <linux/msdos_fs_sb.h>
+#include <linux/iso_fs_sb.h>
+#include <linux/nfs_fs_sb.h>
+#include <linux/xia_fs_sb.h>
+#include <linux/sysv_fs_sb.h>
+#include <linux/affs_fs_sb.h>
+#include <linux/ufs_fs_sb.h>
+
+struct super_block {
+ kdev_t s_dev;
+ unsigned long s_blocksize;
+ unsigned char s_blocksize_bits;
+ unsigned char s_lock;
+ unsigned char s_rd_only;
+ unsigned char s_dirt;
+ struct file_system_type *s_type;
+ struct super_operations *s_op;
+ struct dquot_operations *dq_op;
+ unsigned long s_flags;
+ unsigned long s_magic;
+ unsigned long s_time;
+ struct inode * s_covered;
+ struct inode * s_mounted;
+ struct wait_queue * s_wait;
+ union {
+ struct minix_sb_info minix_sb;
+ struct ext_sb_info ext_sb;
+ struct ext2_sb_info ext2_sb;
+ struct hpfs_sb_info hpfs_sb;
+ struct msdos_sb_info msdos_sb;
+ struct isofs_sb_info isofs_sb;
+ struct nfs_sb_info nfs_sb;
+ struct xiafs_sb_info xiafs_sb;
+ struct sysv_sb_info sysv_sb;
+ struct affs_sb_info affs_sb;
+ struct ufs_sb_info ufs_sb;
+ void *generic_sbp;
+ } u;
+};
+#endif /* !MACH */
+
+/*
+ * This is the "filldir" function type, used by readdir() to let
+ * the kernel specify what kind of dirent layout it wants to have.
+ * This allows the kernel to read directories into kernel space or
+ * to have different dirent layouts depending on the binary type.
+ */
+typedef int (*filldir_t)(void *, const char *, int, off_t, ino_t);
+
+struct file_operations {
+ int (*lseek) (struct inode *, struct file *, off_t, int);
+ int (*read) (struct inode *, struct file *, char *, int);
+ int (*write) (struct inode *, struct file *, const char *, int);
+ int (*readdir) (struct inode *, struct file *, void *, filldir_t);
+ int (*select) (struct inode *, struct file *, int, select_table *);
+ int (*ioctl) (struct inode *, struct file *, unsigned int, unsigned long);
+ int (*mmap) (struct inode *, struct file *, struct vm_area_struct *);
+ int (*open) (struct inode *, struct file *);
+ void (*release) (struct inode *, struct file *);
+ int (*fsync) (struct inode *, struct file *);
+ int (*fasync) (struct inode *, struct file *, int);
+ int (*check_media_change) (kdev_t dev);
+ int (*revalidate) (kdev_t dev);
+};
+
+struct inode_operations {
+ struct file_operations * default_file_ops;
+ int (*create) (struct inode *,const char *,int,int,struct inode **);
+ int (*lookup) (struct inode *,const char *,int,struct inode **);
+ int (*link) (struct inode *,struct inode *,const char *,int);
+ int (*unlink) (struct inode *,const char *,int);
+ int (*symlink) (struct inode *,const char *,int,const char *);
+ int (*mkdir) (struct inode *,const char *,int,int);
+ int (*rmdir) (struct inode *,const char *,int);
+ int (*mknod) (struct inode *,const char *,int,int,int);
+ int (*rename) (struct inode *,const char *,int,struct inode *,const char *,int, int);
+ int (*readlink) (struct inode *,char *,int);
+ int (*follow_link) (struct inode *,struct inode *,int,int,struct inode **);
+ int (*readpage) (struct inode *, struct page *);
+ int (*writepage) (struct inode *, struct page *);
+ int (*bmap) (struct inode *,int);
+ void (*truncate) (struct inode *);
+ int (*permission) (struct inode *, int);
+ int (*smap) (struct inode *,int);
+};
+
+struct super_operations {
+ void (*read_inode) (struct inode *);
+ int (*notify_change) (struct inode *, struct iattr *);
+ void (*write_inode) (struct inode *);
+ void (*put_inode) (struct inode *);
+ void (*put_super) (struct super_block *);
+ void (*write_super) (struct super_block *);
+ void (*statfs) (struct super_block *, struct statfs *, int);
+ int (*remount_fs) (struct super_block *, int *, char *);
+};
+
+struct dquot_operations {
+ void (*initialize) (struct inode *, short);
+ void (*drop) (struct inode *);
+ int (*alloc_block) (const struct inode *, unsigned long);
+ int (*alloc_inode) (const struct inode *, unsigned long);
+ void (*free_block) (const struct inode *, unsigned long);
+ void (*free_inode) (const struct inode *, unsigned long);
+ int (*transfer) (struct inode *, struct iattr *, char);
+};
+
+struct file_system_type {
+ struct super_block *(*read_super) (struct super_block *, void *, int);
+ const char *name;
+ int requires_dev;
+ struct file_system_type * next;
+};
+
+extern int register_filesystem(struct file_system_type *);
+extern int unregister_filesystem(struct file_system_type *);
+
+asmlinkage int sys_open(const char *, int, int);
+asmlinkage int sys_close(unsigned int); /* yes, it's really unsigned */
+asmlinkage int sys_read(unsigned int, char *, int);
+
+extern void kill_fasync(struct fasync_struct *fa, int sig);
+
+extern int getname(const char * filename, char **result);
+extern void putname(char * name);
+extern int do_truncate(struct inode *, unsigned long);
+extern int register_blkdev(unsigned int, const char *, struct file_operations *);
+extern int unregister_blkdev(unsigned int major, const char * name);
+extern int blkdev_open(struct inode * inode, struct file * filp);
+extern void blkdev_release (struct inode * inode);
+extern struct file_operations def_blk_fops;
+extern struct inode_operations blkdev_inode_operations;
+
+extern int register_chrdev(unsigned int, const char *, struct file_operations *);
+extern int unregister_chrdev(unsigned int major, const char * name);
+extern int chrdev_open(struct inode * inode, struct file * filp);
+extern struct file_operations def_chr_fops;
+extern struct inode_operations chrdev_inode_operations;
+
+extern void init_fifo(struct inode * inode);
+extern struct inode_operations fifo_inode_operations;
+
+extern struct file_operations connecting_fifo_fops;
+extern struct file_operations read_fifo_fops;
+extern struct file_operations write_fifo_fops;
+extern struct file_operations rdwr_fifo_fops;
+extern struct file_operations read_pipe_fops;
+extern struct file_operations write_pipe_fops;
+extern struct file_operations rdwr_pipe_fops;
+
+extern struct file_system_type *get_fs_type(const char *name);
+
+extern int fs_may_mount(kdev_t dev);
+extern int fs_may_umount(kdev_t dev, struct inode * mount_root);
+extern int fs_may_remount_ro(kdev_t dev);
+
+extern struct file *first_file;
+extern struct super_block *super_blocks;
+
+extern void refile_buffer(struct buffer_head * buf);
+extern void set_writetime(struct buffer_head * buf, int flag);
+extern int try_to_free_buffer(struct buffer_head*, struct buffer_head**, int);
+
+extern int nr_buffers;
+extern int buffermem;
+extern int nr_buffer_heads;
+
+#define BUF_CLEAN 0
+#define BUF_LOCKED 1 /* Buffers scheduled for write */
+#define BUF_LOCKED1 2 /* Supers, inodes */
+#define BUF_DIRTY 3 /* Dirty buffers, not yet scheduled for write */
+#define NR_LIST 4
+
+#ifdef MACH
+static inline void
+mark_buffer_uptodate (struct buffer_head *bh, int on)
+{
+ if (on)
+ set_bit (BH_Uptodate, &bh->b_state);
+ else
+ clear_bit (BH_Uptodate, &bh->b_state);
+}
+#else
+void mark_buffer_uptodate(struct buffer_head * bh, int on);
+#endif
+
+static inline void mark_buffer_clean(struct buffer_head * bh)
+{
+#ifdef MACH
+ clear_bit (BH_Dirty, &bh->b_state);
+#else
+ if (clear_bit(BH_Dirty, &bh->b_state)) {
+ if (bh->b_list == BUF_DIRTY)
+ refile_buffer(bh);
+ }
+#endif
+}
+
+static inline void mark_buffer_dirty(struct buffer_head * bh, int flag)
+{
+#ifdef MACH
+ set_bit (BH_Dirty, &bh->b_state);
+#else
+ if (!set_bit(BH_Dirty, &bh->b_state)) {
+ set_writetime(bh, flag);
+ if (bh->b_list != BUF_DIRTY)
+ refile_buffer(bh);
+ }
+#endif
+}
+
+extern int check_disk_change(kdev_t dev);
+
+#ifdef MACH
+#define invalidate_inodes(dev)
+#else
+extern void invalidate_inodes(kdev_t dev);
+#endif
+
+extern void invalidate_inode_pages(struct inode *);
+
+#ifdef MACH
+#define invalidate_buffers(dev)
+#else
+extern void invalidate_buffers(kdev_t dev);
+#endif
+
+extern int floppy_is_wp(int minor);
+extern void sync_inodes(kdev_t dev);
+
+#ifdef MACH
+#define sync_dev(dev)
+#define fsync_dev(dev)
+#else
+extern void sync_dev(kdev_t dev);
+extern int fsync_dev(kdev_t dev);
+#endif
+
+extern void sync_supers(kdev_t dev);
+extern int bmap(struct inode * inode,int block);
+extern int notify_change(struct inode *, struct iattr *);
+extern int namei(const char * pathname, struct inode ** res_inode);
+extern int lnamei(const char * pathname, struct inode ** res_inode);
+
+#ifdef MACH
+#define permission(inode, mask) 0
+#else
+extern int permission(struct inode * inode,int mask);
+#endif
+
+extern int get_write_access(struct inode *inode);
+extern void put_write_access(struct inode *inode);
+extern int open_namei(const char * pathname, int flag, int mode,
+ struct inode ** res_inode, struct inode * base);
+extern int do_mknod(const char * filename, int mode, dev_t dev);
+extern int do_pipe(int *);
+extern void iput(struct inode * inode);
+extern struct inode * __iget(struct super_block * sb,int nr,int crsmnt);
+extern struct inode * get_empty_inode(void);
+extern void insert_inode_hash(struct inode *);
+extern void clear_inode(struct inode *);
+extern struct inode * get_pipe_inode(void);
+extern void make_bad_inode(struct inode *);
+extern int get_unused_fd(void);
+extern void put_unused_fd(int);
+extern struct file * get_empty_filp(void);
+extern int close_fp(struct file *filp);
+extern struct buffer_head * get_hash_table(kdev_t dev, int block, int size);
+extern struct buffer_head * getblk(kdev_t dev, int block, int size);
+extern void ll_rw_block(int rw, int nr, struct buffer_head * bh[], int quiet);
+extern void ll_rw_page(int rw, kdev_t dev, unsigned long nr, char * buffer);
+extern void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buffer);
+extern int is_read_only(kdev_t dev);
+extern void __brelse(struct buffer_head *buf);
+extern inline void brelse(struct buffer_head *buf)
+{
+ if (buf)
+ __brelse(buf);
+}
+extern void __bforget(struct buffer_head *buf);
+extern inline void bforget(struct buffer_head *buf)
+{
+ if (buf)
+ __bforget(buf);
+}
+extern void set_blocksize(kdev_t dev, int size);
+extern struct buffer_head * bread(kdev_t dev, int block, int size);
+extern struct buffer_head * breada(kdev_t dev,int block, int size,
+ unsigned int pos, unsigned int filesize);
+
+extern int generic_readpage(struct inode *, struct page *);
+extern int generic_file_read(struct inode *, struct file *, char *, int);
+extern int generic_file_mmap(struct inode *, struct file *, struct vm_area_struct *);
+extern int brw_page(int, struct page *, kdev_t, int [], int, int);
+
+extern void put_super(kdev_t dev);
+unsigned long generate_cluster(kdev_t dev, int b[], int size);
+extern kdev_t ROOT_DEV;
+
+extern void show_buffers(void);
+extern void mount_root(void);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+extern kdev_t real_root_dev;
+extern int change_root(kdev_t new_root_dev,const char *put_old);
+#endif
+
+extern int char_read(struct inode *, struct file *, char *, int);
+extern int block_read(struct inode *, struct file *, char *, int);
+extern int read_ahead[];
+
+extern int char_write(struct inode *, struct file *, const char *, int);
+extern int block_write(struct inode *, struct file *, const char *, int);
+
+extern int block_fsync(struct inode *, struct file *);
+extern int file_fsync(struct inode *, struct file *);
+
+extern void dcache_add(struct inode *, const char *, int, unsigned long);
+extern int dcache_lookup(struct inode *, const char *, int, unsigned long *);
+
+extern int inode_change_ok(struct inode *, struct iattr *);
+extern void inode_setattr(struct inode *, struct iattr *);
+
+extern inline struct inode * iget(struct super_block * sb,int nr)
+{
+ return __iget(sb, nr, 1);
+}
+
+/* kludge to get SCSI modules working */
+#ifndef MACH
+#include <linux/minix_fs.h>
+#include <linux/minix_fs_sb.h>
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/linux/dev/include/linux/genhd.h b/linux/dev/include/linux/genhd.h
new file mode 100644
index 0000000..f19015d
--- /dev/null
+++ b/linux/dev/include/linux/genhd.h
@@ -0,0 +1,208 @@
+#ifndef _LINUX_GENHD_H
+#define _LINUX_GENHD_H
+
+/*
+ * genhd.h Copyright (C) 1992 Drew Eckhardt
+ * Generic hard disk header file by
+ * Drew Eckhardt
+ *
+ * <drew@colorado.edu>
+ */
+
+#include <linux/config.h>
+
+#define CONFIG_MSDOS_PARTITION 1
+
+#ifdef __alpha__
+#define CONFIG_OSF_PARTITION 1
+#endif
+
+#if defined(__sparc__) || defined(CONFIG_SMD_DISKLABEL)
+#define CONFIG_SUN_PARTITION 1
+#endif
+
+/* These three have identical behaviour; use the second one if DOS fdisk gets
+ confused about extended/logical partitions starting past cylinder 1023. */
+#define DOS_EXTENDED_PARTITION 5
+#define LINUX_EXTENDED_PARTITION 0x85
+#define WIN98_EXTENDED_PARTITION 0x0f
+
+#define DM6_PARTITION 0x54 /* has DDO: use xlated geom & offset */
+#define EZD_PARTITION 0x55 /* EZ-DRIVE: same as DM6 (we think) */
+#define DM6_AUX1PARTITION 0x51 /* no DDO: use xlated geom */
+#define DM6_AUX3PARTITION 0x53 /* no DDO: use xlated geom */
+
+#ifdef MACH_INCLUDE
+struct linux_partition
+{
+#else
+struct partition {
+#endif
+ unsigned char boot_ind; /* 0x80 - active */
+ unsigned char head; /* starting head */
+ unsigned char sector; /* starting sector */
+ unsigned char cyl; /* starting cylinder */
+ unsigned char sys_ind; /* What partition type */
+ unsigned char end_head; /* end head */
+ unsigned char end_sector; /* end sector */
+ unsigned char end_cyl; /* end cylinder */
+ unsigned int start_sect; /* starting sector counting from 0 */
+ unsigned int nr_sects; /* nr of sectors in partition */
+} __attribute((packed)); /* Give a polite hint to egcs/alpha to generate
+ unaligned operations */
+
+struct hd_struct {
+ long start_sect;
+ long nr_sects;
+};
+
+struct gendisk {
+ int major; /* major number of driver */
+ const char *major_name; /* name of major driver */
+ int minor_shift; /* number of times minor is shifted to
+ get real minor */
+ int max_p; /* maximum partitions per device */
+ int max_nr; /* maximum number of real devices */
+
+ void (*init)(struct gendisk *); /* Initialization called before we do our thing */
+ struct hd_struct *part; /* partition table */
+ int *sizes; /* device size in blocks, copied to blk_size[] */
+ int nr_real; /* number of real devices */
+
+ void *real_devices; /* internal use */
+ struct gendisk *next;
+};
+
+#ifdef CONFIG_BSD_DISKLABEL
+/*
+ * BSD disklabel support by Yossi Gottlieb <yogo@math.tau.ac.il>
+ */
+
+#define BSD_PARTITION 0xa5 /* Partition ID */
+
+#define BSD_DISKMAGIC (0x82564557UL) /* The disk magic number */
+#define BSD_MAXPARTITIONS 8
+#define BSD_FS_UNUSED 0 /* disklabel unused partition entry ID */
+struct bsd_disklabel {
+ __u32 d_magic; /* the magic number */
+ __s16 d_type; /* drive type */
+ __s16 d_subtype; /* controller/d_type specific */
+ char d_typename[16]; /* type name, e.g. "eagle" */
+ char d_packname[16]; /* pack identifier */
+ __u32 d_secsize; /* # of bytes per sector */
+ __u32 d_nsectors; /* # of data sectors per track */
+ __u32 d_ntracks; /* # of tracks per cylinder */
+ __u32 d_ncylinders; /* # of data cylinders per unit */
+ __u32 d_secpercyl; /* # of data sectors per cylinder */
+ __u32 d_secperunit; /* # of data sectors per unit */
+ __u16 d_sparespertrack; /* # of spare sectors per track */
+ __u16 d_sparespercyl; /* # of spare sectors per cylinder */
+ __u32 d_acylinders; /* # of alt. cylinders per unit */
+ __u16 d_rpm; /* rotational speed */
+ __u16 d_interleave; /* hardware sector interleave */
+ __u16 d_trackskew; /* sector 0 skew, per track */
+ __u16 d_cylskew; /* sector 0 skew, per cylinder */
+ __u32 d_headswitch; /* head switch time, usec */
+ __u32 d_trkseek; /* track-to-track seek, usec */
+ __u32 d_flags; /* generic flags */
+#define NDDATA 5
+ __u32 d_drivedata[NDDATA]; /* drive-type specific information */
+#define NSPARE 5
+ __u32 d_spare[NSPARE]; /* reserved for future use */
+ __u32 d_magic2; /* the magic number (again) */
+ __u16 d_checksum; /* xor of data incl. partitions */
+
+ /* filesystem and partition information: */
+ __u16 d_npartitions; /* number of partitions in following */
+ __u32 d_bbsize; /* size of boot area at sn0, bytes */
+ __u32 d_sbsize; /* max size of fs superblock, bytes */
+ struct bsd_partition { /* the partition table */
+ __u32 p_size; /* number of sectors in partition */
+ __u32 p_offset; /* starting sector */
+ __u32 p_fsize; /* filesystem basic fragment size */
+ __u8 p_fstype; /* filesystem type, see below */
+ __u8 p_frag; /* filesystem fragments per block */
+ __u16 p_cpg; /* filesystem cylinders per group */
+ } d_partitions[BSD_MAXPARTITIONS]; /* actually may be more */
+};
+
+#endif /* CONFIG_BSD_DISKLABEL */
+
+#ifdef CONFIG_GPT_DISKLABEL
+/*
+ * GPT disklabel support by наб <nabijaczleweli@gmail.com>
+ *
+ * Based on UEFI specification 2.8A (current as of May 2020):
+ * https://uefi.org/specifications
+ * https://uefi.org/sites/default/files/resources/UEFI_Spec_2_8_A_Feb14.pdf
+ *
+ * CRC32 behaviour (final ^ ~0) courtesy of util-linux documentation:
+ * https://git.kernel.org/pub/scm/utils/util-linux/util-linux.git/tree/libblkid/src/partitions/gpt.c?id=042f62dfc514da177c148c257e4dcb32e5f8379d#n104
+ */
+
+#define GPT_PARTITION 0xee /* Partition ID in MBR */
+
+#define GPT_GUID_SIZE 16
+struct gpt_guid {
+ __u32 g_time_low; /* Low field of timestamp */
+ __u16 g_time_mid; /* Medium field of timestamp */
+ __u16 g_time_high_version; /* High field of timestamp and version */
+ __u8 g_clock_sec_high; /* High field of clock sequence and variant */
+ __u8 g_clock_sec_low; /* Low field of clock sequence */
+ __u8 g_node_id[6]; /* Spatially unique node identifier (MAC address or urandom) */
+} __attribute((packed));
+typedef char __gpt_guid_right_size[(sizeof(struct gpt_guid) == GPT_GUID_SIZE) ? 1 : -1];
+
+static const struct gpt_guid GPT_GUID_TYPE_UNUSED = {0,0,0,0,0,{0,0,0,0,0,0}};
+
+#define GPT_SIGNATURE "EFI PART" /* The header signauture */
+#define GPT_REVISION (0x00010000UL) /* Little-endian on disk */
+#define GPT_HEADER_SIZE 92
+#define GPT_MAXPARTITIONS 128
+struct gpt_disklabel_header {
+ char h_signature[8]; /* Must match GPT_SIGNATURE */
+ __u32 h_revision; /* Disklabel revision, must match GPT_REVISION */
+ __u32 h_header_size; /* Must match GPT_HEADER_SIZE */
+ __u32 h_header_crc; /* CRC32 of header, zero for calculation */
+ __u32 h_reserved; /* Must be zero */
+ __u64 h_lba_current; /* LBA of this copy of the header */
+ __u64 h_lba_backup; /* LBA of the second (backup) copy of the header */
+ __u64 h_lba_usable_first; /* First usable LBA for partitions (last LBA of primary table + 1) */
+ __u64 h_lba_usable_last; /* Last usable LBA for partitions (first LBA of secondary table - 1) */
+ struct gpt_guid h_guid; /* ID of the disk */
+ __u64 h_part_table_lba; /* First LBA of the partition table (usually 2 in primary header) */
+ __u32 h_part_table_len; /* Amount of entries in the partition table */
+ __u32 h_part_table_entry_size; /* Size of each partition entry (usually 128) */
+ __u32 h_part_table_crc; /* CRC32 of entire partition table, starts at h_part_table_lba, is h_part_table_len*h_part_table_entry_size long */
+ /* Rest of block must be zero */
+} __attribute((packed));
+typedef char __gpt_header_right_size[(sizeof(struct gpt_disklabel_header) == GPT_HEADER_SIZE) ? 1 : -1];
+
+/* 3-47: reserved; 48-63: defined for individual partition types. */
+#define GPT_PARTITION_ATTR_PLATFORM_REQUIRED (1ULL << 0) /* Required by the platform to function */
+#define GPT_PARTITION_ATTR_EFI_IGNORE (1ULL << 1) /* To be ignored by the EFI firmware */
+#define GPT_PARTITION_ATTR_BIOS_BOOTABLE (1ULL << 2) /* Equivalent to MBR active flag */
+
+#define GPT_PARTITION_ENTRY_SIZE 128 /* Minimum size, implementations must respect bigger vendor-specific entries */
+struct gpt_disklabel_part {
+ struct gpt_guid p_type; /* Partition type GUID */
+ struct gpt_guid p_guid; /* ID of the partition */
+ __u64 p_lba_first; /* First LBA of the partition */
+ __u64 p_lba_last; /* Last LBA of the partition */
+ __u64 p_attrs; /* Partition attribute bitfield, see above */
+ __u16 p_name[36]; /* Display name of partition, UTF-16 */
+} __attribute((packed));
+typedef char __gpt_part_entry_right_size[(sizeof(struct gpt_disklabel_part) == GPT_PARTITION_ENTRY_SIZE) ? 1 : -1];
+#endif /* CONFIG_GPT_DISKLABEL */
+
+extern struct gendisk *gendisk_head; /* linked list of disks */
+
+/*
+ * disk_name() is used by genhd.c and md.c.
+ * It formats the devicename of the indicated disk
+ * into the supplied buffer, and returns a pointer
+ * to that same buffer (for convenience).
+ */
+char *disk_name (struct gendisk *hd, int minor, char *buf);
+
+#endif
diff --git a/linux/dev/include/linux/if.h b/linux/dev/include/linux/if.h
new file mode 100644
index 0000000..50dd138
--- /dev/null
+++ b/linux/dev/include/linux/if.h
@@ -0,0 +1,184 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Global definitions for the INET interface module.
+ *
+ * Version: @(#)if.h 1.0.2 04/18/93
+ *
+ * Authors: Original taken from Berkeley UNIX 4.3, (c) UCB 1982-1988
+ * Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_IF_H
+#define _LINUX_IF_H
+
+#include <linux/types.h> /* for "caddr_t" et al */
+#include <linux/socket.h> /* for "struct sockaddr" et al */
+
+/* Standard interface flags. */
+
+#ifdef MACH_INCLUDE
+
+#define LINUX_IFF_UP 0x1 /* interface is up */
+#define LINUX_IFF_BROADCAST 0x2 /* broadcast address valid */
+#define LINUX_IFF_DEBUG 0x4 /* turn on debugging */
+#define LINUX_IFF_LOOPBACK 0x8 /* is a loopback net */
+#define LINUX_IFF_POINTOPOINT 0x10 /* interface is has p-p link */
+#define LINUX_IFF_NOTRAILERS 0x20 /* avoid use of trailers */
+#define LINUX_IFF_RUNNING 0x40 /* resources allocated */
+#define LINUX_IFF_NOARP 0x80 /* no ARP protocol */
+#define LINUX_IFF_PROMISC 0x100 /* receive all packets */
+/* Not supported */
+#define LINUX_IFF_ALLMULTI 0x200 /* receive all multicast packets*/
+
+#define LINUX_IFF_MASTER 0x400 /* master of a load balancer */
+#define LINUX_IFF_SLAVE 0x800 /* slave of a load balancer */
+
+#define LINUX_IFF_MULTICAST 0x1000 /* Supports multicast */
+#define LINUX_IFF_SOFTHEADERS 0x2000 /* Device cannot construct headers
+ * until broadcast time. Therefore
+ * SOCK_PACKET must call header
+ * construction. Private flag.
+ * Never visible outside of kernel.
+ */
+
+#else /* !MACH_INCLUDE */
+
+#define IFF_UP 0x1 /* interface is up */
+#define IFF_BROADCAST 0x2 /* broadcast address valid */
+#define IFF_DEBUG 0x4 /* turn on debugging */
+#define IFF_LOOPBACK 0x8 /* is a loopback net */
+#define IFF_POINTOPOINT 0x10 /* interface is has p-p link */
+#define IFF_NOTRAILERS 0x20 /* avoid use of trailers */
+#define IFF_RUNNING 0x40 /* resources allocated */
+#define IFF_NOARP 0x80 /* no ARP protocol */
+#define IFF_PROMISC 0x100 /* receive all packets */
+/* Not supported */
+#define IFF_ALLMULTI 0x200 /* receive all multicast packets*/
+
+#define IFF_MASTER 0x400 /* master of a load balancer */
+#define IFF_SLAVE 0x800 /* slave of a load balancer */
+
+#define IFF_MULTICAST 0x1000 /* Supports multicast */
+#define IFF_SOFTHEADERS 0x2000 /* Device cannot construct headers
+ * until broadcast time. Therefore
+ * SOCK_PACKET must call header
+ * construction. Private flag.
+ * Never visible outside of kernel.
+ */
+#endif /* !MACH_INCLUDE */
+
+/*
+ * The ifaddr structure contains information about one address
+ * of an interface. They are maintained by the different address
+ * families, are allocated and attached when an address is set,
+ * and are linked together so all addresses for an interface can
+ * be located.
+ */
+
+struct ifaddr
+{
+ struct sockaddr ifa_addr; /* address of interface */
+ union {
+ struct sockaddr ifu_broadaddr;
+ struct sockaddr ifu_dstaddr;
+ } ifa_ifu;
+ struct iface *ifa_ifp; /* back-pointer to interface */
+ struct ifaddr *ifa_next; /* next address for interface */
+};
+
+#define ifa_broadaddr ifa_ifu.ifu_broadaddr /* broadcast address */
+#define ifa_dstaddr ifa_ifu.ifu_dstaddr /* other end of link */
+
+/*
+ * Device mapping structure. I'd just gone off and designed a
+ * beautiful scheme using only loadable modules with arguments
+ * for driver options and along come the PCMCIA people 8)
+ *
+ * Ah well. The get() side of this is good for WDSETUP, and it'll
+ * be handy for debugging things. The set side is fine for now and
+ * being very small might be worth keeping for clean configuration.
+ */
+
+struct ifmap
+{
+ unsigned long mem_start;
+ unsigned long mem_end;
+ unsigned short base_addr;
+ unsigned char irq;
+ unsigned char dma;
+ unsigned char port;
+ /* 3 bytes spare */
+};
+
+/*
+ * Interface request structure used for socket
+ * ioctl's. All interface ioctl's must have parameter
+ * definitions which begin with ifr_name. The
+ * remainder may be interface specific.
+ */
+
+struct ifreq
+{
+#define IFHWADDRLEN 6
+#define IFNAMSIZ 16
+ union
+ {
+ char ifrn_name[IFNAMSIZ]; /* if name, e.g. "en0" */
+ } ifr_ifrn;
+
+ union {
+ struct sockaddr ifru_addr;
+ struct sockaddr ifru_dstaddr;
+ struct sockaddr ifru_broadaddr;
+ struct sockaddr ifru_netmask;
+ struct sockaddr ifru_hwaddr;
+ short ifru_flags;
+ int ifru_metric;
+ int ifru_mtu;
+ struct ifmap ifru_map;
+ char ifru_slave[IFNAMSIZ]; /* Just fits the size */
+ caddr_t ifru_data;
+ } ifr_ifru;
+};
+
+#define ifr_name ifr_ifrn.ifrn_name /* interface name */
+#define ifr_hwaddr ifr_ifru.ifru_hwaddr /* MAC address */
+#define ifr_addr ifr_ifru.ifru_addr /* address */
+#define ifr_dstaddr ifr_ifru.ifru_dstaddr /* other end of p-p lnk */
+#define ifr_broadaddr ifr_ifru.ifru_broadaddr /* broadcast address */
+#define ifr_netmask ifr_ifru.ifru_netmask /* interface net mask */
+#define ifr_flags ifr_ifru.ifru_flags /* flags */
+#define ifr_metric ifr_ifru.ifru_metric /* metric */
+#define ifr_mtu ifr_ifru.ifru_mtu /* mtu */
+#define ifr_map ifr_ifru.ifru_map /* device map */
+#define ifr_slave ifr_ifru.ifru_slave /* slave device */
+#define ifr_data ifr_ifru.ifru_data /* for use by interface */
+
+/*
+ * Structure used in SIOCGIFCONF request.
+ * Used to retrieve interface configuration
+ * for machine (useful for programs which
+ * must know all networks accessible).
+ */
+
+struct ifconf
+{
+ int ifc_len; /* size of buffer */
+ union
+ {
+ caddr_t ifcu_buf;
+ struct ifreq *ifcu_req;
+ } ifc_ifcu;
+};
+#define ifc_buf ifc_ifcu.ifcu_buf /* buffer address */
+#define ifc_req ifc_ifcu.ifcu_req /* array of structures */
+
+#endif /* _LINUX_IF_H */
diff --git a/linux/dev/include/linux/kernel.h b/linux/dev/include/linux/kernel.h
new file mode 100644
index 0000000..9c60b41
--- /dev/null
+++ b/linux/dev/include/linux/kernel.h
@@ -0,0 +1,107 @@
+#ifndef _LINUX_KERNEL_H
+#define _LINUX_KERNEL_H
+
+/*
+ * 'kernel.h' contains some often-used function prototypes etc
+ */
+
+#ifdef __KERNEL__
+
+#include <stdarg.h>
+#include <linux/linkage.h>
+#include <linux/compiler.h>
+
+#define INT_MAX ((int)(~0U>>1))
+#define UINT_MAX (~0U)
+#define LONG_MAX ((long)(~0UL>>1))
+#define ULONG_MAX (~0UL)
+
+#define STACK_MAGIC 0xdeadbeef
+
+#define KERN_EMERG "<0>" /* system is unusable */
+#define KERN_ALERT "<1>" /* action must be taken immediately */
+#define KERN_CRIT "<2>" /* critical conditions */
+#define KERN_ERR "<3>" /* error conditions */
+#define KERN_WARNING "<4>" /* warning conditions */
+#define KERN_NOTICE "<5>" /* normal but significant condition */
+#define KERN_INFO "<6>" /* informational */
+#define KERN_DEBUG "<7>" /* debug-level messages */
+
+# define NORET_TYPE /**/
+# define ATTRIB_NORET __attribute__((noreturn))
+# define NORET_AND noreturn,
+
+extern void math_error(void);
+
+/* Use Mach's panic. */
+#include <kern/debug.h>
+
+NORET_TYPE void do_exit(long error_code)
+ ATTRIB_NORET;
+extern unsigned long simple_strtoul(const char *,char **,unsigned int);
+
+extern int linux_sprintf(char *buf, const char *fmt, ...);
+extern int linux_vsprintf(char *buf, const char *fmt, va_list args);
+
+#ifndef MACH_INCLUDE
+#define sprintf linux_sprintf
+#define vsprintf linux_vsprintf
+#endif
+
+extern int session_of_pgrp(int pgrp);
+
+extern int kill_proc(int pid, int sig, int priv);
+extern int kill_pg(int pgrp, int sig, int priv);
+extern int kill_sl(int sess, int sig, int priv);
+
+asmlinkage int printk(const char * fmt, ...)
+ __attribute__ ((format (printf, 1, 2)));
+
+#if DEBUG
+#define pr_debug(fmt,arg...) \
+ printk(KERN_DEBUG fmt,##arg)
+#else
+#define pr_debug(fmt,arg...) \
+ do { } while (0)
+#endif
+
+#define pr_info(fmt,arg...) \
+ printk(KERN_INFO fmt,##arg)
+
+/*
+ * "suser()" checks against the effective user id, while "fsuser()"
+ * is used for file permission checking and checks against the fsuid..
+ */
+#ifdef MACH
+#define fsuser() 1
+#else
+#define fsuser() (current->fsuid == 0)
+#endif
+
+/*
+ * Display an IP address in readable format.
+ */
+
+#define NIPQUAD(addr) \
+ (((addr) >> 0) & 0xff), \
+ (((addr) >> 8) & 0xff), \
+ (((addr) >> 16) & 0xff), \
+ (((addr) >> 24) & 0xff)
+
+#endif /* __KERNEL__ */
+
+#define SI_LOAD_SHIFT 16
+struct sysinfo {
+ long uptime; /* Seconds since boot */
+ unsigned long loads[3]; /* 1, 5, and 15 minute load averages */
+ unsigned long totalram; /* Total usable main memory size */
+ unsigned long freeram; /* Available memory size */
+ unsigned long sharedram; /* Amount of shared memory */
+ unsigned long bufferram; /* Memory used by buffers */
+ unsigned long totalswap; /* Total swap space size */
+ unsigned long freeswap; /* swap space still available */
+ unsigned short procs; /* Number of current processes */
+ char _f[22]; /* Pads structure to 64 bytes */
+};
+
+#endif
diff --git a/linux/dev/include/linux/locks.h b/linux/dev/include/linux/locks.h
new file mode 100644
index 0000000..ae063fb
--- /dev/null
+++ b/linux/dev/include/linux/locks.h
@@ -0,0 +1,66 @@
+#ifndef _LINUX_LOCKS_H
+#define _LINUX_LOCKS_H
+
+#ifndef _LINUX_MM_H
+#include <linux/mm.h>
+#endif
+#ifndef _LINUX_PAGEMAP_H
+#include <linux/pagemap.h>
+#endif
+
+/*
+ * Unlocked, temporary IO buffer_heads gets moved to the reuse_list
+ * once their page becomes unlocked.
+ */
+extern struct buffer_head *reuse_list;
+
+/*
+ * Buffer cache locking - note that interrupts may only unlock, not
+ * lock buffers.
+ */
+extern void __wait_on_buffer(struct buffer_head *);
+
+static inline void wait_on_buffer(struct buffer_head * bh)
+{
+ if (test_bit(BH_Lock, &bh->b_state))
+ __wait_on_buffer(bh);
+}
+
+static inline void lock_buffer(struct buffer_head * bh)
+{
+ while (set_bit(BH_Lock, &bh->b_state))
+ __wait_on_buffer(bh);
+}
+
+void unlock_buffer(struct buffer_head *);
+
+#ifndef MACH
+/*
+ * super-block locking. Again, interrupts may only unlock
+ * a super-block (although even this isn't done right now.
+ * nfs may need it).
+ */
+extern void __wait_on_super(struct super_block *);
+
+static inline void wait_on_super(struct super_block * sb)
+{
+ if (sb->s_lock)
+ __wait_on_super(sb);
+}
+
+static inline void lock_super(struct super_block * sb)
+{
+ if (sb->s_lock)
+ __wait_on_super(sb);
+ sb->s_lock = 1;
+}
+
+static inline void unlock_super(struct super_block * sb)
+{
+ sb->s_lock = 0;
+ wake_up(&sb->s_wait);
+}
+#endif /* !MACH */
+
+#endif /* _LINUX_LOCKS_H */
+
diff --git a/linux/dev/include/linux/malloc.h b/linux/dev/include/linux/malloc.h
new file mode 100644
index 0000000..50d8114
--- /dev/null
+++ b/linux/dev/include/linux/malloc.h
@@ -0,0 +1,18 @@
+#ifndef _LINUX_MALLOC_H
+#define _LINUX_MALLOC_H
+
+#include <linux/mm.h>
+#include <asm/cache.h>
+
+#ifndef MACH_INCLUDE
+#define kmalloc linux_kmalloc
+#define kfree linux_kfree
+#define kfree_s linux_kfree_s
+#endif
+
+extern void *linux_kmalloc (unsigned int size, int priority);
+extern void linux_kfree (void *obj);
+
+#define linux_kfree_s(a,b) linux_kfree(a)
+
+#endif /* _LINUX_MALLOC_H */
diff --git a/linux/dev/include/linux/mm.h b/linux/dev/include/linux/mm.h
new file mode 100644
index 0000000..b0c3ab0
--- /dev/null
+++ b/linux/dev/include/linux/mm.h
@@ -0,0 +1,378 @@
+#ifndef _LINUX_MM_H
+#define _LINUX_MM_H
+
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+
+#ifdef __KERNEL__
+
+#include <linux/string.h>
+
+extern unsigned long high_memory;
+
+#include <asm/page.h>
+#include <asm/atomic.h>
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+extern int verify_area(int, const void *, unsigned long);
+
+/*
+ * Linux kernel virtual memory manager primitives.
+ * The idea being to have a "virtual" mm in the same way
+ * we have a virtual fs - giving a cleaner interface to the
+ * mm details, and allowing different kinds of memory mappings
+ * (from shared memory to executable loading to arbitrary
+ * mmap() functions).
+ */
+
+/*
+ * This struct defines a memory VMM memory area. There is one of these
+ * per VM-area/task. A VM area is any part of the process virtual memory
+ * space that has a special rule for the page-fault handlers (ie a shared
+ * library, the executable area etc).
+ */
+struct vm_area_struct {
+ struct mm_struct * vm_mm; /* VM area parameters */
+ unsigned long vm_start;
+ unsigned long vm_end;
+ pgprot_t vm_page_prot;
+ unsigned short vm_flags;
+/* AVL tree of VM areas per task, sorted by address */
+ short vm_avl_height;
+ struct vm_area_struct * vm_avl_left;
+ struct vm_area_struct * vm_avl_right;
+/* linked list of VM areas per task, sorted by address */
+ struct vm_area_struct * vm_next;
+/* for areas with inode, the circular list inode->i_mmap */
+/* for shm areas, the circular list of attaches */
+/* otherwise unused */
+ struct vm_area_struct * vm_next_share;
+ struct vm_area_struct * vm_prev_share;
+/* more */
+ struct vm_operations_struct * vm_ops;
+ unsigned long vm_offset;
+ struct inode * vm_inode;
+ unsigned long vm_pte; /* shared mem */
+};
+
+/*
+ * vm_flags..
+ */
+#define VM_READ 0x0001 /* currently active flags */
+#define VM_WRITE 0x0002
+#define VM_EXEC 0x0004
+#define VM_SHARED 0x0008
+
+#define VM_MAYREAD 0x0010 /* limits for mprotect() etc */
+#define VM_MAYWRITE 0x0020
+#define VM_MAYEXEC 0x0040
+#define VM_MAYSHARE 0x0080
+
+#define VM_GROWSDOWN 0x0100 /* general info on the segment */
+#define VM_GROWSUP 0x0200
+#define VM_SHM 0x0400 /* shared memory area, don't swap out */
+#define VM_DENYWRITE 0x0800 /* ETXTBSY on write attempts.. */
+
+#define VM_EXECUTABLE 0x1000
+#define VM_LOCKED 0x2000
+
+#define VM_STACK_FLAGS 0x0177
+
+/*
+ * mapping from the currently active vm_flags protection bits (the
+ * low four bits) to a page protection mask..
+ */
+extern pgprot_t protection_map[16];
+
+
+/*
+ * These are the virtual MM functions - opening of an area, closing and
+ * unmapping it (needed to keep files on disk up-to-date etc), pointer
+ * to the functions called when a no-page or a wp-page exception occurs.
+ */
+struct vm_operations_struct {
+ void (*open)(struct vm_area_struct * area);
+ void (*close)(struct vm_area_struct * area);
+ void (*unmap)(struct vm_area_struct *area, unsigned long, size_t);
+ void (*protect)(struct vm_area_struct *area, unsigned long, size_t, unsigned int newprot);
+ int (*sync)(struct vm_area_struct *area, unsigned long, size_t, unsigned int flags);
+ void (*advise)(struct vm_area_struct *area, unsigned long, size_t, unsigned int advise);
+ unsigned long (*nopage)(struct vm_area_struct * area, unsigned long address, int write_access);
+ unsigned long (*wppage)(struct vm_area_struct * area, unsigned long address,
+ unsigned long page);
+ int (*swapout)(struct vm_area_struct *, unsigned long, pte_t *);
+ pte_t (*swapin)(struct vm_area_struct *, unsigned long, unsigned long);
+};
+
+/*
+ * Try to keep the most commonly accessed fields in single cache lines
+ * here (16 bytes or greater). This ordering should be particularly
+ * beneficial on 32-bit processors.
+ *
+ * The first line is data used in page cache lookup, the second line
+ * is used for linear searches (eg. clock algorithm scans).
+ */
+typedef struct page {
+ /* these must be first (free area handling) */
+ struct page *next;
+ struct page *prev;
+ struct inode *inode;
+ unsigned long offset;
+ struct page *next_hash;
+ atomic_t count;
+ unsigned flags; /* atomic flags, some possibly updated asynchronously */
+ unsigned dirty:16,
+ age:8;
+ struct wait_queue *wait;
+ struct page *prev_hash;
+ struct buffer_head * buffers;
+ unsigned long swap_unlock_entry;
+ unsigned long map_nr; /* page->map_nr == page - mem_map */
+} mem_map_t;
+
+/* Page flag bit values */
+#define PG_locked 0
+#define PG_error 1
+#define PG_referenced 2
+#define PG_uptodate 3
+#define PG_free_after 4
+#define PG_decr_after 5
+#define PG_swap_unlock_after 6
+#define PG_DMA 7
+#define PG_reserved 31
+
+/* Make it prettier to test the above... */
+#define PageLocked(page) (test_bit(PG_locked, &(page)->flags))
+#define PageError(page) (test_bit(PG_error, &(page)->flags))
+#define PageReferenced(page) (test_bit(PG_referenced, &(page)->flags))
+#define PageDirty(page) (test_bit(PG_dirty, &(page)->flags))
+#define PageUptodate(page) (test_bit(PG_uptodate, &(page)->flags))
+#define PageFreeAfter(page) (test_bit(PG_free_after, &(page)->flags))
+#define PageDecrAfter(page) (test_bit(PG_decr_after, &(page)->flags))
+#define PageSwapUnlockAfter(page) (test_bit(PG_swap_unlock_after, &(page)->flags))
+#define PageDMA(page) (test_bit(PG_DMA, &(page)->flags))
+#define PageReserved(page) (test_bit(PG_reserved, &(page)->flags))
+
+/*
+ * page->reserved denotes a page which must never be accessed (which
+ * may not even be present).
+ *
+ * page->dma is set for those pages which lie in the range of
+ * physical addresses capable of carrying DMA transfers.
+ *
+ * Multiple processes may "see" the same page. E.g. for untouched
+ * mappings of /dev/null, all processes see the same page full of
+ * zeroes, and text pages of executables and shared libraries have
+ * only one copy in memory, at most, normally.
+ *
+ * For the non-reserved pages, page->count denotes a reference count.
+ * page->count == 0 means the page is free.
+ * page->count == 1 means the page is used for exactly one purpose
+ * (e.g. a private data page of one process).
+ *
+ * A page may be used for kmalloc() or anyone else who does a
+ * get_free_page(). In this case the page->count is at least 1, and
+ * all other fields are unused but should be 0 or NULL. The
+ * management of this page is the responsibility of the one who uses
+ * it.
+ *
+ * The other pages (we may call them "process pages") are completely
+ * managed by the Linux memory manager: I/O, buffers, swapping etc.
+ * The following discussion applies only to them.
+ *
+ * A page may belong to an inode's memory mapping. In this case,
+ * page->inode is the inode, and page->offset is the file offset
+ * of the page (not necessarily a multiple of PAGE_SIZE).
+ *
+ * A page may have buffers allocated to it. In this case,
+ * page->buffers is a circular list of these buffer heads. Else,
+ * page->buffers == NULL.
+ *
+ * For pages belonging to inodes, the page->count is the number of
+ * attaches, plus 1 if buffers are allocated to the page.
+ *
+ * All pages belonging to an inode make up a doubly linked list
+ * inode->i_pages, using the fields page->next and page->prev. (These
+ * fields are also used for freelist management when page->count==0.)
+ * There is also a hash table mapping (inode,offset) to the page
+ * in memory if present. The lists for this hash table use the fields
+ * page->next_hash and page->prev_hash.
+ *
+ * All process pages can do I/O:
+ * - inode pages may need to be read from disk,
+ * - inode pages which have been modified and are MAP_SHARED may need
+ * to be written to disk,
+ * - private pages which have been modified may need to be swapped out
+ * to swap space and (later) to be read back into memory.
+ * During disk I/O, page->locked is true. This bit is set before I/O
+ * and reset when I/O completes. page->wait is a wait queue of all
+ * tasks waiting for the I/O on this page to complete.
+ * page->uptodate tells whether the page's contents is valid.
+ * When a read completes, the page becomes uptodate, unless a disk I/O
+ * error happened.
+ * When a write completes, and page->free_after is true, the page is
+ * freed without any further delay.
+ *
+ * For choosing which pages to swap out, inode pages carry a
+ * page->referenced bit, which is set any time the system accesses
+ * that page through the (inode,offset) hash table.
+ * There is also the page->age counter, which implements a linear
+ * decay (why not an exponential decay?), see swapctl.h.
+ */
+
+extern mem_map_t * mem_map;
+
+/*
+ * This is timing-critical - most of the time in getting a new page
+ * goes to clearing the page. If you want a page without the clearing
+ * overhead, just use __get_free_page() directly..
+ */
+#define __get_free_page(priority) __get_free_pages((priority),0,0)
+#define __get_dma_pages(priority, order) __get_free_pages((priority),(order),1)
+extern unsigned long __get_free_pages(int priority, unsigned long gfporder, int dma);
+
+static inline unsigned long get_free_page(int priority)
+{
+ unsigned long page;
+
+ page = __get_free_page(priority);
+ if (page)
+ memset((void *) page, 0, PAGE_SIZE);
+ return page;
+}
+
+/* memory.c & swap.c*/
+
+#define free_page(addr) free_pages((addr),0)
+extern void free_pages(unsigned long addr, unsigned long order);
+extern void __free_page(struct page *);
+
+extern void show_free_areas(void);
+extern unsigned long put_dirty_page(struct task_struct * tsk,unsigned long page,
+ unsigned long address);
+
+extern void free_page_tables(struct mm_struct * mm);
+extern void clear_page_tables(struct task_struct * tsk);
+extern int new_page_tables(struct task_struct * tsk);
+extern int copy_page_tables(struct task_struct * to);
+
+extern int zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size);
+extern int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma);
+extern int remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot);
+extern int zeromap_page_range(unsigned long from, unsigned long size, pgprot_t prot);
+
+extern void vmtruncate(struct inode * inode, unsigned long offset);
+extern void handle_mm_fault(struct vm_area_struct *vma, unsigned long address, int write_access);
+extern void do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access);
+extern void do_no_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access);
+
+extern unsigned long paging_init(unsigned long start_mem, unsigned long end_mem);
+extern void mem_init(unsigned long start_mem, unsigned long end_mem);
+extern void show_mem(void);
+extern void oom(struct task_struct * tsk);
+extern void si_meminfo(struct sysinfo * val);
+
+/* vmalloc.c */
+
+extern void * vmalloc(unsigned long size);
+extern void * vremap(unsigned long offset, unsigned long size);
+extern void vfree(void * addr);
+extern int vread(char *buf, char *addr, int count);
+extern unsigned long vmtophys (void *);
+
+/* mmap.c */
+extern unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags, unsigned long off);
+extern void merge_segments(struct mm_struct *, unsigned long, unsigned long);
+extern void insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
+extern void remove_shared_vm_struct(struct vm_area_struct *);
+extern void build_mmap_avl(struct mm_struct *);
+extern void exit_mmap(struct mm_struct *);
+extern int do_munmap(unsigned long, size_t);
+extern unsigned long get_unmapped_area(unsigned long, unsigned long);
+
+/* filemap.c */
+extern unsigned long page_unuse(unsigned long);
+extern int shrink_mmap(int, int, int);
+extern void truncate_inode_pages(struct inode *, unsigned long);
+
+#define GFP_BUFFER 0x00
+#define GFP_ATOMIC 0x01
+#define GFP_USER 0x02
+#define GFP_KERNEL 0x03
+#define GFP_NOBUFFER 0x04
+#define GFP_NFS 0x05
+#define GFP_IO 0x06
+
+/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
+ platforms, used as appropriate on others */
+
+#define GFP_DMA 0x80
+
+#define GFP_LEVEL_MASK 0xf
+
+#ifndef MACH
+/* vma is the first one with address < vma->vm_end,
+ * and even address < vma->vm_start. Have to extend vma. */
+static inline int expand_stack(struct vm_area_struct * vma, unsigned long address)
+{
+ unsigned long grow;
+
+ address &= PAGE_MASK;
+ grow = vma->vm_start - address;
+ if (vma->vm_end - address
+ > (unsigned long) current->rlim[RLIMIT_STACK].rlim_cur ||
+ (vma->vm_mm->total_vm << PAGE_SHIFT) + grow
+ > (unsigned long) current->rlim[RLIMIT_AS].rlim_cur)
+ return -ENOMEM;
+ vma->vm_start = address;
+ vma->vm_offset -= grow;
+ vma->vm_mm->total_vm += grow >> PAGE_SHIFT;
+ if (vma->vm_flags & VM_LOCKED)
+ vma->vm_mm->locked_vm += grow >> PAGE_SHIFT;
+ return 0;
+}
+
+#define avl_empty (struct vm_area_struct *) NULL
+
+/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
+static inline struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
+{
+ struct vm_area_struct * result = NULL;
+
+ if (mm) {
+ struct vm_area_struct * tree = mm->mmap_avl;
+ for (;;) {
+ if (tree == avl_empty)
+ break;
+ if (tree->vm_end > addr) {
+ result = tree;
+ if (tree->vm_start <= addr)
+ break;
+ tree = tree->vm_avl_left;
+ } else
+ tree = tree->vm_avl_right;
+ }
+ }
+ return result;
+}
+
+/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
+ NULL if none. Assume start_addr < end_addr. */
+static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
+{
+ struct vm_area_struct * vma;
+
+ vma = find_vma(mm,start_addr);
+ if (vma && end_addr <= vma->vm_start)
+ vma = NULL;
+ return vma;
+}
+#endif /* !MACH */
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/linux/dev/include/linux/modversions.h b/linux/dev/include/linux/modversions.h
new file mode 100644
index 0000000..9d841c9
--- /dev/null
+++ b/linux/dev/include/linux/modversions.h
@@ -0,0 +1 @@
+/* Dummy file. */
diff --git a/linux/dev/include/linux/netdevice.h b/linux/dev/include/linux/netdevice.h
new file mode 100644
index 0000000..e1a9a34
--- /dev/null
+++ b/linux/dev/include/linux/netdevice.h
@@ -0,0 +1,339 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the Interfaces handler.
+ *
+ * Version: @(#)dev.h 1.0.11 07/31/96
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Corey Minyard <wf-rch!minyard@relay.EU.net>
+ * Donald J. Becker, <becker@super.org>
+ * Alan Cox, <A.Cox@swansea.ac.uk>
+ * Bjorn Ekwall. <bj0rn@blox.se>
+ * Lawrence V. Stefani, <stefani@lkg.dec.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Moved to /usr/include/linux for NET3
+ * Added extern for fddi_setup()
+ */
+#ifndef _LINUX_NETDEVICE_H
+#define _LINUX_NETDEVICE_H
+
+#include <linux/config.h>
+#include <linux/if.h>
+#include <linux/if_ether.h>
+
+/* for future expansion when we will have different priorities. */
+#define DEV_NUMBUFFS 3
+#define MAX_ADDR_LEN 7
+#ifndef CONFIG_AX25
+#ifndef CONFIG_TR
+#if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE)
+#define MAX_HEADER 32 /* We really need about 18 worst case .. so 32 is aligned */
+#else
+#define MAX_HEADER 80 /* We need to allow for having tunnel headers */
+#endif /* IPIP */
+#else
+#define MAX_HEADER 48 /* Token Ring header needs 40 bytes ... 48 is aligned */
+#endif /* TR */
+#else
+#define MAX_HEADER 96 /* AX.25 + NetROM */
+#endif /* AX25 */
+
+#define IS_MYADDR 1 /* address is (one of) our own */
+#define IS_LOOPBACK 2 /* address is for LOOPBACK */
+#define IS_BROADCAST 3 /* address is a valid broadcast */
+#define IS_INVBCAST 4 /* Wrong netmask bcast not for us (unused)*/
+#define IS_MULTICAST 5 /* Multicast IP address */
+
+#ifdef __KERNEL__
+
+#include <linux/skbuff.h>
+
+/*
+ * We tag multicasts with these structures.
+ */
+
+struct dev_mc_list
+{
+ struct dev_mc_list *next;
+ char dmi_addr[MAX_ADDR_LEN];
+ unsigned short dmi_addrlen;
+ unsigned short dmi_users;
+};
+
+struct hh_cache
+{
+ struct hh_cache *hh_next;
+ void *hh_arp; /* Opaque pointer, used by
+ * any address resolution module,
+ * not only ARP.
+ */
+ int hh_refcnt; /* number of users */
+ unsigned short hh_type; /* protocol identifier, f.e ETH_P_IP */
+ char hh_uptodate; /* hh_data is valid */
+ char hh_data[16]; /* cached hardware header */
+};
+
+/*
+ * The DEVICE structure.
+ * Actually, this whole structure is a big mistake. It mixes I/O
+ * data with strictly "high-level" data, and it has to know about
+ * almost every data structure used in the INET module.
+ */
+
+#ifdef MACH
+
+#ifndef MACH_INCLUDE
+#define device linux_device
+#endif
+
+struct linux_device
+
+#else
+
+struct device
+
+#endif
+{
+
+ /*
+ * This is the first field of the "visible" part of this structure
+ * (i.e. as seen by users in the "Space.c" file). It is the name
+ * the interface.
+ */
+ char *name;
+
+ /* I/O specific fields - FIXME: Merge these and struct ifmap into one */
+ unsigned long rmem_end; /* shmem "recv" end */
+ unsigned long rmem_start; /* shmem "recv" start */
+ unsigned long mem_end; /* shared mem end */
+ unsigned long mem_start; /* shared mem start */
+ unsigned long base_addr; /* device I/O address */
+ unsigned char irq; /* device IRQ number */
+
+ /* Low-level status flags. */
+ volatile unsigned char start, /* start an operation */
+ interrupt; /* interrupt arrived */
+ unsigned long tbusy; /* transmitter busy must be long for bitops */
+
+ struct linux_device *next;
+
+ /* The device initialization function. Called only once. */
+ int (*init)(struct linux_device *dev);
+
+ /* Some hardware also needs these fields, but they are not part of the
+ usual set specified in Space.c. */
+ unsigned char if_port; /* Selectable AUI, TP,..*/
+ unsigned char dma; /* DMA channel */
+
+ struct enet_statistics* (*get_stats)(struct linux_device *dev);
+
+ /*
+ * This marks the end of the "visible" part of the structure. All
+ * fields hereafter are internal to the system, and may change at
+ * will (read: may be cleaned up at will).
+ */
+
+ /* These may be needed for future network-power-down code. */
+ unsigned long trans_start; /* Time (in jiffies) of last Tx */
+ unsigned long last_rx; /* Time of last Rx */
+
+ unsigned short flags; /* interface flags (a la BSD) */
+ unsigned short family; /* address family ID (AF_INET) */
+ unsigned short metric; /* routing metric (not used) */
+ unsigned short mtu; /* interface MTU value */
+ unsigned short type; /* interface hardware type */
+ unsigned short hard_header_len; /* hardware hdr length */
+ void *priv; /* pointer to private data */
+
+ /* Interface address info. */
+ unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
+ unsigned char pad; /* make dev_addr aligned to 8 bytes */
+ unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address */
+ unsigned char addr_len; /* hardware address length */
+ unsigned long pa_addr; /* protocol address */
+ unsigned long pa_brdaddr; /* protocol broadcast addr */
+ unsigned long pa_dstaddr; /* protocol P-P other side addr */
+ unsigned long pa_mask; /* protocol netmask */
+ unsigned short pa_alen; /* protocol address length */
+
+ struct dev_mc_list *mc_list; /* Multicast mac addresses */
+ int mc_count; /* Number of installed mcasts */
+
+ struct ip_mc_list *ip_mc_list; /* IP multicast filter chain */
+ __u32 tx_queue_len; /* Max frames per queue allowed */
+
+ /* For load balancing driver pair support */
+
+ unsigned long pkt_queue; /* Packets queued */
+ struct linux_device *slave; /* Slave device */
+ struct net_alias_info *alias_info; /* main dev alias info */
+ struct net_alias *my_alias; /* alias devs */
+
+ /* Pointer to the interface buffers. */
+ struct sk_buff_head buffs[DEV_NUMBUFFS];
+
+ /* Pointers to interface service routines. */
+ int (*open)(struct linux_device *dev);
+ int (*stop)(struct linux_device *dev);
+ int (*hard_start_xmit) (struct sk_buff *skb,
+ struct linux_device *dev);
+ int (*hard_header) (struct sk_buff *skb,
+ struct linux_device *dev,
+ unsigned short type,
+ void *daddr,
+ void *saddr,
+ unsigned len);
+ int (*rebuild_header)(void *eth, struct linux_device *dev,
+ unsigned long raddr, struct sk_buff *skb);
+#define HAVE_MULTICAST
+ void (*set_multicast_list)(struct linux_device *dev);
+#define HAVE_SET_MAC_ADDR
+ int (*set_mac_address)(struct linux_device *dev, void *addr);
+#define HAVE_PRIVATE_IOCTL
+ int (*do_ioctl)(struct linux_device *dev, struct ifreq *ifr, int cmd);
+#define HAVE_SET_CONFIG
+ int (*set_config)(struct linux_device *dev, struct ifmap *map);
+#define HAVE_HEADER_CACHE
+ void (*header_cache_bind)(struct hh_cache **hhp, struct linux_device *dev, unsigned short htype, __u32 daddr);
+ void (*header_cache_update)(struct hh_cache *hh, struct linux_device *dev, unsigned char * haddr);
+#define HAVE_CHANGE_MTU
+ int (*change_mtu)(struct linux_device *dev, int new_mtu);
+
+ struct iw_statistics* (*get_wireless_stats)(struct linux_device *dev);
+
+#ifdef MACH
+
+#ifdef MACH_INCLUDE
+ struct net_data *net_data;
+#else
+ void *net_data;
+#endif
+
+#endif
+};
+
+
+struct packet_type {
+ unsigned short type; /* This is really htons(ether_type). */
+ struct linux_device * dev;
+ int (*func) (struct sk_buff *, struct linux_device *,
+ struct packet_type *);
+ void *data;
+ struct packet_type *next;
+};
+
+
+#include <linux/interrupt.h>
+#include <linux/notifier.h>
+
+/* Used by dev_rint */
+#define IN_SKBUFF 1
+
+extern volatile unsigned long in_bh;
+
+extern struct linux_device loopback_dev;
+extern struct linux_device *dev_base;
+extern struct packet_type *ptype_base[16];
+
+
+extern int ip_addr_match(unsigned long addr1, unsigned long addr2);
+extern int ip_chk_addr(unsigned long addr);
+extern struct linux_device *ip_dev_bynet(unsigned long daddr, unsigned long mask);
+extern unsigned long ip_my_addr(void);
+extern unsigned long ip_get_mask(unsigned long addr);
+extern struct linux_device *ip_dev_find(unsigned long addr);
+extern struct linux_device *dev_getbytype(unsigned short type);
+
+extern void dev_add_pack(struct packet_type *pt);
+extern void dev_remove_pack(struct packet_type *pt);
+extern struct linux_device *dev_get(const char *name);
+extern int dev_open(struct linux_device *dev);
+extern int dev_close(struct linux_device *dev);
+extern void dev_queue_xmit(struct sk_buff *skb, struct linux_device *dev,
+ int pri);
+
+#define HAVE_NETIF_RX 1
+extern void netif_rx(struct sk_buff *skb);
+extern void net_bh(void);
+
+#ifdef MACH
+#define dev_tint(dev)
+#else
+extern void dev_tint(struct linux_device *dev);
+#endif
+
+extern int dev_change_flags(struct linux_device *dev, short flags);
+extern int dev_get_info(char *buffer, char **start, off_t offset, int length, int dummy);
+extern int dev_ioctl(unsigned int cmd, void *);
+
+extern void dev_init(void);
+
+/* Locking protection for page faults during outputs to devices unloaded during the fault */
+
+extern int dev_lockct;
+
+/*
+ * These two don't currently need to be interrupt-safe
+ * but they may do soon. Do it properly anyway.
+ */
+
+extern __inline__ void dev_lock_list(void)
+{
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ dev_lockct++;
+ restore_flags(flags);
+}
+
+extern __inline__ void dev_unlock_list(void)
+{
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ dev_lockct--;
+ restore_flags(flags);
+}
+
+/*
+ * This almost never occurs, isn't in performance critical paths
+ * and we can thus be relaxed about it
+ */
+
+extern __inline__ void dev_lock_wait(void)
+{
+ while(dev_lockct)
+ schedule();
+}
+
+
+/* These functions live elsewhere (drivers/net/net_init.c, but related) */
+
+extern void ether_setup(struct linux_device *dev);
+extern void tr_setup(struct linux_device *dev);
+extern void fddi_setup(struct linux_device *dev);
+extern int ether_config(struct linux_device *dev, struct ifmap *map);
+/* Support for loadable net-drivers */
+extern int register_netdev(struct linux_device *dev);
+extern void unregister_netdev(struct linux_device *dev);
+extern int register_netdevice_notifier(struct notifier_block *nb);
+extern int unregister_netdevice_notifier(struct notifier_block *nb);
+/* Functions used for multicast support */
+extern void dev_mc_upload(struct linux_device *dev);
+extern void dev_mc_delete(struct linux_device *dev, void *addr, int alen, int all);
+extern void dev_mc_add(struct linux_device *dev, void *addr, int alen, int newonly);
+extern void dev_mc_discard(struct linux_device *dev);
+/* This is the wrong place but it'll do for the moment */
+extern void ip_mc_allhost(struct linux_device *dev);
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_DEV_H */
diff --git a/linux/dev/include/linux/notifier.h b/linux/dev/include/linux/notifier.h
new file mode 100644
index 0000000..b3c9ccf
--- /dev/null
+++ b/linux/dev/include/linux/notifier.h
@@ -0,0 +1,96 @@
+/*
+ * Routines to manage notifier chains for passing status changes to any
+ * interested routines. We need this instead of hard coded call lists so
+ * that modules can poke their nose into the innards. The network devices
+ * needed them so here they are for the rest of you.
+ *
+ * Alan Cox <Alan.Cox@linux.org>
+ */
+
+#ifndef _LINUX_NOTIFIER_H
+#define _LINUX_NOTIFIER_H
+#include <linux/errno.h>
+
+struct notifier_block
+{
+ int (*notifier_call)(struct notifier_block *this, unsigned long, void *);
+ struct notifier_block *next;
+ int priority;
+};
+
+
+#ifdef __KERNEL__
+
+#define NOTIFY_DONE 0x0000 /* Don't care */
+#define NOTIFY_OK 0x0001 /* Suits me */
+#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
+#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002) /* Bad/Veto action */
+
+extern __inline__ int notifier_chain_register(struct notifier_block **list, struct notifier_block *n)
+{
+ while(*list)
+ {
+ if(n->priority > (*list)->priority)
+ break;
+ list= &((*list)->next);
+ }
+ n->next = *list;
+ *list=n;
+ return 0;
+}
+
+/*
+ * Warning to any non GPL module writers out there.. these functions are
+ * GPL'd
+ */
+
+extern __inline__ int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n)
+{
+ while((*nl)!=NULL)
+ {
+ if((*nl)==n)
+ {
+ *nl=n->next;
+ return 0;
+ }
+ nl=&((*nl)->next);
+ }
+ return -ENOENT;
+}
+
+/*
+ * This is one of these things that is generally shorter inline
+ */
+
+extern __inline__ int notifier_call_chain(struct notifier_block **n, unsigned long val, void *v)
+{
+ int ret=NOTIFY_DONE;
+ struct notifier_block *nb = *n;
+ while(nb)
+ {
+ ret=nb->notifier_call(nb,val,v);
+ if(ret&NOTIFY_STOP_MASK)
+ return ret;
+ nb=nb->next;
+ }
+ return ret;
+}
+
+
+/*
+ * Declared notifiers so far. I can imagine quite a few more chains
+ * over time (eg laptop power reset chains, reboot chain (to clean
+ * device units up), device [un]mount chain, module load/unload chain,
+ * low memory chain, screenblank chain (for plug in modular screenblankers)
+ * VC switch chains (for loadable kernel svgalib VC switch helpers) etc...
+ */
+
+/* netdevice notifier chain */
+#define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
+#define NETDEV_DOWN 0x0002
+#define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
+ detected a hardware crash and restarted
+ - we can use this eg to kick tcp sessions
+ once done */
+#endif
+#endif
diff --git a/linux/dev/include/linux/pagemap.h b/linux/dev/include/linux/pagemap.h
new file mode 100644
index 0000000..6e21f3d
--- /dev/null
+++ b/linux/dev/include/linux/pagemap.h
@@ -0,0 +1,150 @@
+#ifndef _LINUX_PAGEMAP_H
+#define _LINUX_PAGEMAP_H
+
+#include <asm/system.h>
+
+/*
+ * Page-mapping primitive inline functions
+ *
+ * Copyright 1995 Linus Torvalds
+ */
+
+#ifndef MACH
+
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/swapctl.h>
+
+static inline unsigned long page_address(struct page * page)
+{
+ return PAGE_OFFSET + PAGE_SIZE * page->map_nr;
+}
+
+#define PAGE_HASH_BITS 11
+#define PAGE_HASH_SIZE (1 << PAGE_HASH_BITS)
+
+#define PAGE_AGE_VALUE ((PAGE_INITIAL_AGE)+(PAGE_ADVANCE))
+
+extern unsigned long page_cache_size; /* # of pages currently in the hash table */
+extern struct page * page_hash_table[PAGE_HASH_SIZE];
+
+/*
+ * We use a power-of-two hash table to avoid a modulus,
+ * and get a reasonable hash by knowing roughly how the
+ * inode pointer and offsets are distributed (ie, we
+ * roughly know which bits are "significant")
+ */
+static inline unsigned long _page_hashfn(struct inode * inode, unsigned long offset)
+{
+#define i (((unsigned long) inode)/(sizeof(struct inode) & ~ (sizeof(struct inode) - 1)))
+#define o (offset >> PAGE_SHIFT)
+#define s(x) ((x)+((x)>>PAGE_HASH_BITS))
+ return s(i+o) & (PAGE_HASH_SIZE-1);
+#undef i
+#undef o
+#undef s
+}
+
+#define page_hash(inode,offset) (page_hash_table+_page_hashfn(inode,offset))
+
+static inline struct page * __find_page(struct inode * inode, unsigned long offset, struct page *page)
+{
+ goto inside;
+ for (;;) {
+ page = page->next_hash;
+inside:
+ if (!page)
+ goto not_found;
+ if (page->inode != inode)
+ continue;
+ if (page->offset == offset)
+ break;
+ }
+ /* Found the page. */
+ atomic_inc(&page->count);
+ set_bit(PG_referenced, &page->flags);
+not_found:
+ return page;
+}
+
+static inline struct page *find_page(struct inode * inode, unsigned long offset)
+{
+ return __find_page(inode, offset, *page_hash(inode, offset));
+}
+
+static inline void remove_page_from_hash_queue(struct page * page)
+{
+ struct page **p;
+ struct page *next_hash, *prev_hash;
+
+ next_hash = page->next_hash;
+ prev_hash = page->prev_hash;
+ page->next_hash = NULL;
+ page->prev_hash = NULL;
+ if (next_hash)
+ next_hash->prev_hash = prev_hash;
+ if (prev_hash)
+ prev_hash->next_hash = next_hash;
+ p = page_hash(page->inode,page->offset);
+ if (*p == page)
+ *p = next_hash;
+ page_cache_size--;
+}
+
+static inline void __add_page_to_hash_queue(struct page * page, struct page **p)
+{
+ page_cache_size++;
+ set_bit(PG_referenced, &page->flags);
+ page->age = PAGE_AGE_VALUE;
+ page->prev_hash = NULL;
+ if ((page->next_hash = *p) != NULL)
+ page->next_hash->prev_hash = page;
+ *p = page;
+}
+
+static inline void add_page_to_hash_queue(struct page * page, struct inode * inode, unsigned long offset)
+{
+ __add_page_to_hash_queue(page, page_hash(inode,offset));
+}
+
+
+static inline void remove_page_from_inode_queue(struct page * page)
+{
+ struct inode * inode = page->inode;
+
+ page->inode = NULL;
+ inode->i_nrpages--;
+ if (inode->i_pages == page)
+ inode->i_pages = page->next;
+ if (page->next)
+ page->next->prev = page->prev;
+ if (page->prev)
+ page->prev->next = page->next;
+ page->next = NULL;
+ page->prev = NULL;
+}
+
+static inline void add_page_to_inode_queue(struct inode * inode, struct page * page)
+{
+ struct page **p = &inode->i_pages;
+
+ inode->i_nrpages++;
+ page->inode = inode;
+ page->prev = NULL;
+ if ((page->next = *p) != NULL)
+ page->next->prev = page;
+ *p = page;
+}
+
+extern void __wait_on_page(struct page *);
+static inline void wait_on_page(struct page * page)
+{
+ if (PageLocked(page))
+ __wait_on_page(page);
+}
+
+extern void update_vm_cache(struct inode *, unsigned long, const char *, int);
+
+#endif /* !MACH */
+
+#endif
diff --git a/linux/dev/include/linux/pm.h b/linux/dev/include/linux/pm.h
new file mode 100644
index 0000000..9d841c9
--- /dev/null
+++ b/linux/dev/include/linux/pm.h
@@ -0,0 +1 @@
+/* Dummy file. */
diff --git a/linux/dev/include/linux/proc_fs.h b/linux/dev/include/linux/proc_fs.h
new file mode 100644
index 0000000..8ce0bb2
--- /dev/null
+++ b/linux/dev/include/linux/proc_fs.h
@@ -0,0 +1,292 @@
+#ifndef _LINUX_PROC_FS_H
+#define _LINUX_PROC_FS_H
+
+#include <linux/fs.h>
+#include <linux/malloc.h>
+
+/*
+ * The proc filesystem constants/structures
+ */
+
+/*
+ * We always define these enumerators
+ */
+
+enum root_directory_inos {
+ PROC_ROOT_INO = 1,
+ PROC_LOADAVG,
+ PROC_UPTIME,
+ PROC_MEMINFO,
+ PROC_KMSG,
+ PROC_VERSION,
+ PROC_CPUINFO,
+ PROC_PCI,
+ PROC_SELF, /* will change inode # */
+ PROC_NET,
+ PROC_SCSI,
+ PROC_MALLOC,
+ PROC_KCORE,
+ PROC_MODULES,
+ PROC_STAT,
+ PROC_DEVICES,
+ PROC_INTERRUPTS,
+ PROC_FILESYSTEMS,
+ PROC_KSYMS,
+ PROC_DMA,
+ PROC_IOPORTS,
+#ifdef __SMP_PROF__
+ PROC_SMP_PROF,
+#endif
+ PROC_PROFILE, /* whether enabled or not */
+ PROC_CMDLINE,
+ PROC_SYS,
+ PROC_MTAB,
+ PROC_MD,
+ PROC_RTC,
+ PROC_LOCKS
+};
+
+enum pid_directory_inos {
+ PROC_PID_INO = 2,
+ PROC_PID_STATUS,
+ PROC_PID_MEM,
+ PROC_PID_CWD,
+ PROC_PID_ROOT,
+ PROC_PID_EXE,
+ PROC_PID_FD,
+ PROC_PID_ENVIRON,
+ PROC_PID_CMDLINE,
+ PROC_PID_STAT,
+ PROC_PID_STATM,
+ PROC_PID_MAPS
+};
+
+enum pid_subdirectory_inos {
+ PROC_PID_FD_DIR = 1
+};
+
+enum net_directory_inos {
+ PROC_NET_UNIX = 128,
+ PROC_NET_ARP,
+ PROC_NET_ROUTE,
+ PROC_NET_DEV,
+ PROC_NET_RAW,
+ PROC_NET_TCP,
+ PROC_NET_UDP,
+ PROC_NET_SNMP,
+ PROC_NET_RARP,
+ PROC_NET_IGMP,
+ PROC_NET_IPMR_VIF,
+ PROC_NET_IPMR_MFC,
+ PROC_NET_IPFWFWD,
+ PROC_NET_IPFWIN,
+ PROC_NET_IPFWOUT,
+ PROC_NET_IPACCT,
+ PROC_NET_IPMSQHST,
+ PROC_NET_WIRELESS,
+ PROC_NET_IPX_INTERFACE,
+ PROC_NET_IPX_ROUTE,
+ PROC_NET_IPX,
+ PROC_NET_ATALK,
+ PROC_NET_AT_ROUTE,
+ PROC_NET_ATIF,
+ PROC_NET_AX25_ROUTE,
+ PROC_NET_AX25,
+ PROC_NET_AX25_CALLS,
+ PROC_NET_NR_NODES,
+ PROC_NET_NR_NEIGH,
+ PROC_NET_NR,
+ PROC_NET_SOCKSTAT,
+ PROC_NET_RTCACHE,
+ PROC_NET_AX25_BPQETHER,
+ PROC_NET_ALIAS_TYPES,
+ PROC_NET_ALIASES,
+ PROC_NET_IP_MASQ_APP,
+ PROC_NET_STRIP_STATUS,
+ PROC_NET_STRIP_TRACE,
+ PROC_NET_IPAUTOFW,
+ PROC_NET_RS_NODES,
+ PROC_NET_RS_NEIGH,
+ PROC_NET_RS_ROUTES,
+ PROC_NET_RS,
+ PROC_NET_Z8530,
+ PROC_NET_LAST
+};
+
+enum scsi_directory_inos {
+ PROC_SCSI_SCSI = 256,
+ PROC_SCSI_ADVANSYS,
+ PROC_SCSI_EATA,
+ PROC_SCSI_EATA_PIO,
+ PROC_SCSI_AHA152X,
+ PROC_SCSI_AHA1542,
+ PROC_SCSI_AHA1740,
+ PROC_SCSI_AIC7XXX,
+ PROC_SCSI_BUSLOGIC,
+ PROC_SCSI_U14_34F,
+ PROC_SCSI_FDOMAIN,
+ PROC_SCSI_GENERIC_NCR5380,
+ PROC_SCSI_IN2000,
+ PROC_SCSI_PAS16,
+ PROC_SCSI_QLOGICFAS,
+ PROC_SCSI_QLOGICISP,
+ PROC_SCSI_SEAGATE,
+ PROC_SCSI_T128,
+ PROC_SCSI_DC390WUF,
+ PROC_SCSI_DC390T,
+ PROC_SCSI_NCR53C7xx,
+ PROC_SCSI_NCR53C8XX,
+ PROC_SCSI_ULTRASTOR,
+ PROC_SCSI_7000FASST,
+ PROC_SCSI_EATA2X,
+ PROC_SCSI_AM53C974,
+ PROC_SCSI_SSC,
+ PROC_SCSI_NCR53C406A,
+ PROC_SCSI_PPA,
+ PROC_SCSI_ESP,
+ PROC_SCSI_A3000,
+ PROC_SCSI_A2091,
+ PROC_SCSI_GVP11,
+ PROC_SCSI_ATARI,
+ PROC_SCSI_GDTH,
+ PROC_SCSI_IDESCSI,
+ PROC_SCSI_SCSI_DEBUG,
+ PROC_SCSI_NOT_PRESENT,
+ PROC_SCSI_FILE, /* I'm assuming here that we */
+ PROC_SCSI_LAST = (PROC_SCSI_FILE + 16) /* won't ever see more than */
+}; /* 16 HBAs in one machine */
+
+/* Finally, the dynamically allocatable proc entries are reserved: */
+
+#define PROC_DYNAMIC_FIRST 4096
+#define PROC_NDYNAMIC 4096
+
+#define PROC_SUPER_MAGIC 0x9fa0
+
+/*
+ * This is not completely implemented yet. The idea is to
+ * create a in-memory tree (like the actual /proc filesystem
+ * tree) of these proc_dir_entries, so that we can dynamically
+ * add new files to /proc.
+ *
+ * The "next" pointer creates a linked list of one /proc directory,
+ * while parent/subdir create the directory structure (every
+ * /proc file has a parent, but "subdir" is NULL for all
+ * non-directory entries).
+ *
+ * "get_info" is called at "read", while "fill_inode" is used to
+ * fill in file type/protection/owner information specific to the
+ * particular /proc file.
+ */
+struct proc_dir_entry {
+ unsigned short low_ino;
+ unsigned short namelen;
+ const char *name;
+ mode_t mode;
+ nlink_t nlink;
+ uid_t uid;
+ gid_t gid;
+ unsigned long size;
+ struct inode_operations * ops;
+ int (*get_info)(char *, char **, off_t, int, int);
+ void (*fill_inode)(struct inode *);
+ struct proc_dir_entry *next, *parent, *subdir;
+ void *data;
+};
+
+extern int (* dispatch_scsi_info_ptr) (int ino, char *buffer, char **start,
+ off_t offset, int length, int inout);
+
+extern struct proc_dir_entry proc_root;
+extern struct proc_dir_entry proc_net;
+extern struct proc_dir_entry proc_scsi;
+extern struct proc_dir_entry proc_sys;
+extern struct proc_dir_entry proc_pid;
+extern struct proc_dir_entry proc_pid_fd;
+
+extern struct inode_operations proc_scsi_inode_operations;
+
+extern void proc_root_init(void);
+extern void proc_base_init(void);
+extern void proc_net_init(void);
+
+extern int proc_register(struct proc_dir_entry *, struct proc_dir_entry *);
+extern int proc_register_dynamic(struct proc_dir_entry *,
+ struct proc_dir_entry *);
+extern int proc_unregister(struct proc_dir_entry *, int);
+
+static inline int proc_net_register(struct proc_dir_entry * x)
+{
+ return proc_register(&proc_net, x);
+}
+
+static inline int proc_net_unregister(int x)
+{
+ return proc_unregister(&proc_net, x);
+}
+
+static inline int proc_scsi_register(struct proc_dir_entry *driver,
+ struct proc_dir_entry *x)
+{
+ x->ops = &proc_scsi_inode_operations;
+ if(x->low_ino < PROC_SCSI_FILE){
+ return(proc_register(&proc_scsi, x));
+ }else{
+ return(proc_register(driver, x));
+ }
+}
+
+static inline int proc_scsi_unregister(struct proc_dir_entry *driver, int x)
+{
+ extern void scsi_init_free(char *ptr, unsigned int size);
+
+ if(x <= PROC_SCSI_FILE)
+ return(proc_unregister(&proc_scsi, x));
+ else {
+ struct proc_dir_entry **p = &driver->subdir, *dp;
+ int ret;
+
+ while ((dp = *p) != NULL) {
+ if (dp->low_ino == x)
+ break;
+ p = &dp->next;
+ }
+ ret = proc_unregister(driver, x);
+ scsi_init_free((char *) dp, sizeof(struct proc_dir_entry) + 4);
+ return(ret);
+ }
+}
+
+extern struct super_block *proc_read_super(struct super_block *,void *,int);
+extern int init_proc_fs(void);
+extern struct inode * proc_get_inode(struct super_block *, int, struct proc_dir_entry *);
+extern void proc_statfs(struct super_block *, struct statfs *, int);
+extern void proc_read_inode(struct inode *);
+extern void proc_write_inode(struct inode *);
+extern int proc_match(int, const char *, struct proc_dir_entry *);
+
+/*
+ * These are generic /proc routines that use the internal
+ * "struct proc_dir_entry" tree to traverse the filesystem.
+ *
+ * The /proc root directory has extended versions to take care
+ * of the /proc/<pid> subdirectories.
+ */
+extern int proc_readdir(struct inode *, struct file *, void *, filldir_t);
+extern int proc_lookup(struct inode *, const char *, int, struct inode **);
+
+extern struct inode_operations proc_dir_inode_operations;
+extern struct inode_operations proc_net_inode_operations;
+extern struct inode_operations proc_netdir_inode_operations;
+extern struct inode_operations proc_scsi_inode_operations;
+extern struct inode_operations proc_mem_inode_operations;
+extern struct inode_operations proc_sys_inode_operations;
+extern struct inode_operations proc_array_inode_operations;
+extern struct inode_operations proc_arraylong_inode_operations;
+extern struct inode_operations proc_kcore_inode_operations;
+extern struct inode_operations proc_profile_inode_operations;
+extern struct inode_operations proc_kmsg_inode_operations;
+extern struct inode_operations proc_link_inode_operations;
+extern struct inode_operations proc_fd_inode_operations;
+
+#endif
diff --git a/linux/dev/include/linux/sched.h b/linux/dev/include/linux/sched.h
new file mode 100644
index 0000000..3e7bcd4
--- /dev/null
+++ b/linux/dev/include/linux/sched.h
@@ -0,0 +1,521 @@
+#ifndef _LINUX_SCHED_H
+#define _LINUX_SCHED_H
+
+/*
+ * define DEBUG if you want the wait-queues to have some extra
+ * debugging code. It's not normally used, but might catch some
+ * wait-queue coding errors.
+ *
+ * #define DEBUG
+ */
+
+#include <asm/param.h> /* for HZ */
+
+extern unsigned long event;
+
+#include <linux/binfmts.h>
+#include <linux/personality.h>
+#include <linux/tasks.h>
+#include <linux/kernel.h>
+
+#include <asm/system.h>
+#include <asm/semaphore.h>
+#include <asm/page.h>
+
+#include <linux/smp.h>
+#include <linux/tty.h>
+#include <linux/sem.h>
+
+/*
+ * cloning flags:
+ */
+#define CSIGNAL 0x000000ff /* signal mask to be sent at exit */
+#define CLONE_VM 0x00000100 /* set if VM shared between processes */
+#define CLONE_FS 0x00000200 /* set if fs info shared between processes */
+#define CLONE_FILES 0x00000400 /* set if open files shared between processes */
+#define CLONE_SIGHAND 0x00000800 /* set if signal handlers shared */
+#define CLONE_PID 0x00001000 /* set if pid shared */
+
+/*
+ * These are the constant used to fake the fixed-point load-average
+ * counting. Some notes:
+ * - 11 bit fractions expand to 22 bits by the multiplies: this gives
+ * a load-average precision of 10 bits integer + 11 bits fractional
+ * - if you want to count load-averages more often, you need more
+ * precision, or rounding will get you. With 2-second counting freq,
+ * the EXP_n values would be 1981, 2034 and 2043 if still using only
+ * 11 bit fractions.
+ */
+extern unsigned long avenrun[]; /* Load averages */
+
+#define FSHIFT 11 /* nr of bits of precision */
+#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
+#define LOAD_FREQ (5*HZ) /* 5 sec intervals */
+#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */
+#define EXP_5 2014 /* 1/exp(5sec/5min) */
+#define EXP_15 2037 /* 1/exp(5sec/15min) */
+
+#define CALC_LOAD(load,exp,n) \
+ load *= exp; \
+ load += n*(FIXED_1-exp); \
+ load >>= FSHIFT;
+
+#define CT_TO_SECS(x) ((x) / HZ)
+#define CT_TO_USECS(x) (((x) % HZ) * 1000000/HZ)
+
+extern int nr_running, nr_tasks;
+extern int last_pid;
+
+#define FIRST_TASK task[0]
+#define LAST_TASK task[NR_TASKS-1]
+
+#include <linux/head.h>
+#include <linux/fs.h>
+#include <linux/signal.h>
+#include <linux/time.h>
+#include <linux/param.h>
+#include <linux/resource.h>
+#include <linux/ptrace.h>
+#include <linux/timer.h>
+
+#include <asm/processor.h>
+
+#define TASK_RUNNING 0
+#define TASK_INTERRUPTIBLE 1
+#define TASK_UNINTERRUPTIBLE 2
+#define TASK_ZOMBIE 3
+#define TASK_STOPPED 4
+#define TASK_SWAPPING 5
+
+/*
+ * Scheduling policies
+ */
+#define SCHED_OTHER 0
+#define SCHED_FIFO 1
+#define SCHED_RR 2
+
+struct sched_param {
+ int sched_priority;
+};
+
+#ifndef NULL
+#define NULL ((void *) 0)
+#endif
+
+#ifdef __KERNEL__
+
+extern void sched_init(void);
+extern void show_state(void);
+extern void trap_init(void);
+
+asmlinkage void schedule(void);
+
+/* Open file table structure */
+struct files_struct {
+ int count;
+ fd_set close_on_exec;
+ fd_set open_fds;
+ struct file * fd[NR_OPEN];
+};
+
+#define INIT_FILES { \
+ 1, \
+ { { 0, } }, \
+ { { 0, } }, \
+ { NULL, } \
+}
+
+struct fs_struct {
+ int count;
+ unsigned short umask;
+ struct inode * root, * pwd;
+};
+
+#define INIT_FS { \
+ 1, \
+ 0022, \
+ NULL, NULL \
+}
+
+struct mm_struct {
+ int count;
+ pgd_t * pgd;
+ unsigned long context;
+ unsigned long start_code, end_code, start_data, end_data;
+ unsigned long start_brk, brk, start_stack, start_mmap;
+ unsigned long arg_start, arg_end, env_start, env_end;
+ unsigned long rss, total_vm, locked_vm;
+ unsigned long def_flags;
+ struct vm_area_struct * mmap;
+ struct vm_area_struct * mmap_avl;
+ struct semaphore mmap_sem;
+};
+
+#define INIT_MM { \
+ 1, \
+ swapper_pg_dir, \
+ 0, \
+ 0, 0, 0, 0, \
+ 0, 0, 0, 0, \
+ 0, 0, 0, 0, \
+ 0, 0, 0, \
+ 0, \
+ &init_mmap, &init_mmap, MUTEX }
+
+struct signal_struct {
+ int count;
+ struct sigaction action[32];
+};
+
+#define INIT_SIGNALS { \
+ 1, \
+ { {0,}, } }
+
+struct task_struct {
+/* these are hardcoded - don't touch */
+ volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
+ long counter;
+ long priority;
+ unsigned long signal;
+ unsigned long blocked; /* bitmap of masked signals */
+ unsigned long flags; /* per process flags, defined below */
+ int errno;
+ long debugreg[8]; /* Hardware debugging registers */
+ struct exec_domain *exec_domain;
+/* various fields */
+ struct linux_binfmt *binfmt;
+ struct task_struct *next_task, *prev_task;
+ struct task_struct *next_run, *prev_run;
+ unsigned long saved_kernel_stack;
+ unsigned long kernel_stack_page;
+ int exit_code, exit_signal;
+ /* ??? */
+ unsigned long personality;
+ int dumpable:1;
+ int did_exec:1;
+ /* shouldn't this be pid_t? */
+ int pid;
+ int pgrp;
+ int tty_old_pgrp;
+ int session;
+ /* boolean value for session group leader */
+ int leader;
+ int groups[NGROUPS];
+ /*
+ * pointers to (original) parent process, youngest child, younger sibling,
+ * older sibling, respectively. (p->father can be replaced with
+ * p->p_pptr->pid)
+ */
+ struct task_struct *p_opptr, *p_pptr, *p_cptr, *p_ysptr, *p_osptr;
+ struct wait_queue *wait_chldexit; /* for wait4() */
+ unsigned short uid,euid,suid,fsuid;
+ unsigned short gid,egid,sgid,fsgid;
+ unsigned long timeout, policy, rt_priority;
+ unsigned long it_real_value, it_prof_value, it_virt_value;
+ unsigned long it_real_incr, it_prof_incr, it_virt_incr;
+ struct timer_list real_timer;
+ long utime, stime, cutime, cstime, start_time;
+/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
+ unsigned long min_flt, maj_flt, nswap, cmin_flt, cmaj_flt, cnswap;
+ int swappable:1;
+ unsigned long swap_address;
+ unsigned long old_maj_flt; /* old value of maj_flt */
+ unsigned long dec_flt; /* page fault count of the last time */
+ unsigned long swap_cnt; /* number of pages to swap on next pass */
+/* limits */
+ struct rlimit rlim[RLIM_NLIMITS];
+ unsigned short used_math;
+ char comm[16];
+/* file system info */
+ int link_count;
+ struct tty_struct *tty; /* NULL if no tty */
+/* ipc stuff */
+ struct sem_undo *semundo;
+ struct sem_queue *semsleeping;
+/* ldt for this task - used by Wine. If NULL, default_ldt is used */
+ struct desc_struct *ldt;
+/* tss for this task */
+ struct thread_struct tss;
+/* filesystem information */
+ struct fs_struct *fs;
+/* open file information */
+ struct files_struct *files;
+/* memory management info */
+ struct mm_struct *mm;
+/* signal handlers */
+ struct signal_struct *sig;
+#ifdef __SMP__
+ int processor;
+ int last_processor;
+ int lock_depth; /* Lock depth. We can context switch in and out of holding a syscall kernel lock... */
+#endif
+};
+
+/*
+ * Per process flags
+ */
+#define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */
+ /* Not implemented yet, only for 486*/
+#define PF_PTRACED 0x00000010 /* set if ptrace (0) has been called. */
+#define PF_TRACESYS 0x00000020 /* tracing system calls */
+#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
+#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
+#define PF_DUMPCORE 0x00000200 /* dumped core */
+#define PF_SIGNALED 0x00000400 /* killed by a signal */
+
+#define PF_STARTING 0x00000002 /* being created */
+#define PF_EXITING 0x00000004 /* getting shut down */
+
+#define PF_USEDFPU 0x00100000 /* Process used the FPU this quantum (SMP only) */
+#define PF_DTRACE 0x00200000 /* delayed trace (used on m68k) */
+
+/*
+ * Limit the stack by to some sane default: root can always
+ * increase this limit if needed.. 8MB seems reasonable.
+ */
+#define _STK_LIM (8*1024*1024)
+
+#define DEF_PRIORITY (20*HZ/100) /* 200 ms time slices */
+
+/*
+ * INIT_TASK is used to set up the first task table, touch at
+ * your own risk!. Base=0, limit=0x1fffff (=2MB)
+ */
+#define INIT_TASK \
+/* state etc */ { 0,DEF_PRIORITY,DEF_PRIORITY,0,0,0,0, \
+/* debugregs */ { 0, }, \
+/* exec domain */&default_exec_domain, \
+/* binfmt */ NULL, \
+/* schedlink */ &init_task,&init_task, &init_task, &init_task, \
+/* stack */ 0,(unsigned long) &init_kernel_stack, \
+/* ec,brk... */ 0,0,0,0,0, \
+/* pid etc.. */ 0,0,0,0,0, \
+/* suppl grps*/ {NOGROUP,}, \
+/* proc links*/ &init_task,&init_task,NULL,NULL,NULL,NULL, \
+/* uid etc */ 0,0,0,0,0,0,0,0, \
+/* timeout */ 0,SCHED_OTHER,0,0,0,0,0,0,0, \
+/* timer */ { NULL, NULL, 0, 0, it_real_fn }, \
+/* utime */ 0,0,0,0,0, \
+/* flt */ 0,0,0,0,0,0, \
+/* swp */ 0,0,0,0,0, \
+/* rlimits */ INIT_RLIMITS, \
+/* math */ 0, \
+/* comm */ "swapper", \
+/* fs info */ 0,NULL, \
+/* ipc */ NULL, NULL, \
+/* ldt */ NULL, \
+/* tss */ INIT_TSS, \
+/* fs */ &init_fs, \
+/* files */ &init_files, \
+/* mm */ &init_mm, \
+/* signals */ &init_signals, \
+}
+
+extern struct mm_struct init_mm;
+extern struct task_struct init_task;
+extern struct task_struct *task[NR_TASKS];
+extern struct task_struct *last_task_used_math;
+extern struct task_struct *current_set[NR_CPUS];
+/*
+ * On a single processor system this comes out as current_set[0] when cpp
+ * has finished with it, which gcc will optimise away.
+ */
+#define current (0+current_set[smp_processor_id()]) /* Current on this processor */
+extern unsigned long volatile jiffies;
+extern unsigned long itimer_ticks;
+extern unsigned long itimer_next;
+extern struct timeval xtime;
+extern int need_resched;
+extern void do_timer(struct pt_regs *);
+
+extern unsigned int * prof_buffer;
+extern unsigned long prof_len;
+extern unsigned long prof_shift;
+
+extern int securelevel; /* system security level */
+
+#define CURRENT_TIME (xtime.tv_sec)
+
+extern void sleep_on(struct wait_queue ** p);
+extern void interruptible_sleep_on(struct wait_queue ** p);
+extern void wake_up(struct wait_queue ** p);
+extern void wake_up_interruptible(struct wait_queue ** p);
+extern void wake_up_process(struct task_struct * tsk);
+
+extern void notify_parent(struct task_struct * tsk, int signal);
+extern void force_sig(unsigned long sig,struct task_struct * p);
+extern int send_sig(unsigned long sig,struct task_struct * p,int priv);
+extern int in_group_p(gid_t grp);
+
+extern int request_irq(unsigned int irq,
+ void (*handler)(int, void *, struct pt_regs *),
+ unsigned long flags,
+ const char *device,
+ void *dev_id);
+extern void free_irq(unsigned int irq, void *dev_id);
+
+/*
+ * This has now become a routine instead of a macro, it sets a flag if
+ * it returns true (to do BSD-style accounting where the process is flagged
+ * if it uses root privs). The implication of this is that you should do
+ * normal permissions checks first, and check suser() last.
+ */
+#ifdef MACH
+
+extern inline int
+suser(void)
+{
+ return 1;
+}
+
+#else
+
+extern inline int suser(void)
+{
+ if (current->euid == 0) {
+ current->flags |= PF_SUPERPRIV;
+ return 1;
+ }
+ return 0;
+}
+#endif
+
+extern void copy_thread(int, unsigned long, unsigned long, struct task_struct *, struct pt_regs *);
+extern void flush_thread(void);
+extern void exit_thread(void);
+
+extern void exit_mm(struct task_struct *);
+extern void exit_fs(struct task_struct *);
+extern void exit_files(struct task_struct *);
+extern void exit_sighand(struct task_struct *);
+extern void release_thread(struct task_struct *);
+
+extern int do_execve(char *, char **, char **, struct pt_regs *);
+extern int do_fork(unsigned long, unsigned long, struct pt_regs *);
+
+extern void add_wait_queue(struct wait_queue **p, struct wait_queue *wait);
+extern void remove_wait_queue(struct wait_queue **p, struct wait_queue *wait);
+
+/* See if we have a valid user level fd.
+ * If it makes sense, return the file structure it references.
+ * Otherwise return NULL.
+ */
+
+#ifdef MACH
+
+extern void __add_wait_queue (struct wait_queue **q, struct wait_queue *wait);
+extern void add_wait_queue (struct wait_queue **q, struct wait_queue *wait);
+extern void __remove_wait_queue (struct wait_queue **q, struct wait_queue *wait);
+extern void remove_wait_queue (struct wait_queue **q, struct wait_queue *wait);
+
+#else /* !MACH */
+
+extern inline struct file *file_from_fd(const unsigned int fd)
+{
+
+ if (fd >= NR_OPEN)
+ return NULL;
+ /* either valid or null */
+ return current->files->fd[fd];
+}
+
+/*
+ * The wait-queues are circular lists, and you have to be *very* sure
+ * to keep them correct. Use only these two functions to add/remove
+ * entries in the queues.
+ */
+extern inline void __add_wait_queue(struct wait_queue ** p, struct wait_queue * wait)
+{
+ struct wait_queue *head = *p;
+ struct wait_queue *next = WAIT_QUEUE_HEAD(p);
+
+ if (head)
+ next = head;
+ *p = wait;
+ wait->next = next;
+}
+
+extern inline void add_wait_queue(struct wait_queue ** p, struct wait_queue * wait)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __add_wait_queue(p, wait);
+ restore_flags(flags);
+}
+
+extern inline void __remove_wait_queue(struct wait_queue ** p, struct wait_queue * wait)
+{
+ struct wait_queue * next = wait->next;
+ struct wait_queue * head = next;
+
+ for (;;) {
+ struct wait_queue * nextlist = head->next;
+ if (nextlist == wait)
+ break;
+ head = nextlist;
+ }
+ head->next = next;
+}
+
+extern inline void remove_wait_queue(struct wait_queue ** p, struct wait_queue * wait)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __remove_wait_queue(p, wait);
+ restore_flags(flags);
+}
+
+extern inline void select_wait(struct wait_queue ** wait_address, select_table * p)
+{
+ struct select_table_entry * entry;
+
+ if (!p || !wait_address)
+ return;
+ if (p->nr >= __MAX_SELECT_TABLE_ENTRIES)
+ return;
+ entry = p->entry + p->nr;
+ entry->wait_address = wait_address;
+ entry->wait.task = current;
+ entry->wait.next = NULL;
+ add_wait_queue(wait_address,&entry->wait);
+ p->nr++;
+}
+
+#endif /* !MACH */
+
+#define REMOVE_LINKS(p) do { unsigned long flags; \
+ save_flags(flags) ; cli(); \
+ (p)->next_task->prev_task = (p)->prev_task; \
+ (p)->prev_task->next_task = (p)->next_task; \
+ restore_flags(flags); \
+ if ((p)->p_osptr) \
+ (p)->p_osptr->p_ysptr = (p)->p_ysptr; \
+ if ((p)->p_ysptr) \
+ (p)->p_ysptr->p_osptr = (p)->p_osptr; \
+ else \
+ (p)->p_pptr->p_cptr = (p)->p_osptr; \
+ } while (0)
+
+#define SET_LINKS(p) do { unsigned long flags; \
+ save_flags(flags); cli(); \
+ (p)->next_task = &init_task; \
+ (p)->prev_task = init_task.prev_task; \
+ init_task.prev_task->next_task = (p); \
+ init_task.prev_task = (p); \
+ restore_flags(flags); \
+ (p)->p_ysptr = NULL; \
+ if (((p)->p_osptr = (p)->p_pptr->p_cptr) != NULL) \
+ (p)->p_osptr->p_ysptr = p; \
+ (p)->p_pptr->p_cptr = p; \
+ } while (0)
+
+#define for_each_task(p) \
+ for (p = &init_task ; (p = p->next_task) != &init_task ; )
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/linux/dev/include/linux/skbuff.h b/linux/dev/include/linux/skbuff.h
new file mode 100644
index 0000000..c55e529
--- /dev/null
+++ b/linux/dev/include/linux/skbuff.h
@@ -0,0 +1,466 @@
+/*
+ * Definitions for the 'struct sk_buff' memory handlers.
+ *
+ * Authors:
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ * Florian La Roche, <rzsfl@rz.uni-sb.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_SKBUFF_H
+#define _LINUX_SKBUFF_H
+
+#include <linux/config.h>
+#include <linux/time.h>
+
+#include <asm/atomic.h>
+#include <asm/types.h>
+
+#define CONFIG_SKB_CHECK 0
+
+#define HAVE_ALLOC_SKB /* For the drivers to know */
+#define HAVE_ALIGNABLE_SKB /* Ditto 8) */
+
+
+#define FREE_READ 1
+#define FREE_WRITE 0
+
+#define CHECKSUM_NONE 0
+#define CHECKSUM_HW 1
+#define CHECKSUM_UNNECESSARY 2
+
+struct sk_buff_head
+{
+ struct sk_buff * next;
+ struct sk_buff * prev;
+ __u32 qlen; /* Must be same length as a pointer
+ for using debugging */
+#if CONFIG_SKB_CHECK
+ int magic_debug_cookie;
+#endif
+};
+
+
+struct sk_buff
+{
+ struct sk_buff * next; /* Next buffer in list */
+ struct sk_buff * prev; /* Previous buffer in list */
+ struct sk_buff_head * list; /* List we are on */
+#if CONFIG_SKB_CHECK
+ int magic_debug_cookie;
+#endif
+ struct sk_buff *link3; /* Link for IP protocol level buffer chains */
+ struct sock *sk; /* Socket we are owned by */
+ unsigned long when; /* used to compute rtt's */
+ struct timeval stamp; /* Time we arrived */
+ struct linux_device *dev; /* Device we arrived on/are leaving by */
+ union
+ {
+ struct tcphdr *th;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ struct udphdr *uh;
+ unsigned char *raw;
+ /* for passing file handles in a unix domain socket */
+ void *filp;
+ } h;
+
+ union
+ {
+ /* As yet incomplete physical layer views */
+ unsigned char *raw;
+ struct ethhdr *ethernet;
+ } mac;
+
+ struct iphdr *ip_hdr; /* For IPPROTO_RAW */
+ unsigned long len; /* Length of actual data */
+ unsigned long csum; /* Checksum */
+ __u32 saddr; /* IP source address */
+ __u32 daddr; /* IP target address */
+ __u32 raddr; /* IP next hop address */
+ __u32 seq; /* TCP sequence number */
+ __u32 end_seq; /* seq [+ fin] [+ syn] + datalen */
+ __u32 ack_seq; /* TCP ack sequence number */
+ unsigned char proto_priv[16]; /* Protocol private data */
+ volatile char acked, /* Are we acked ? */
+ used, /* Are we in use ? */
+ free, /* How to free this buffer */
+ arp; /* Has IP/ARP resolution finished */
+ unsigned char tries, /* Times tried */
+ lock, /* Are we locked ? */
+ localroute, /* Local routing asserted for this frame */
+ pkt_type, /* Packet class */
+ pkt_bridged, /* Tracker for bridging */
+ ip_summed; /* Driver fed us an IP checksum */
+#define PACKET_HOST 0 /* To us */
+#define PACKET_BROADCAST 1 /* To all */
+#define PACKET_MULTICAST 2 /* To group */
+#define PACKET_OTHERHOST 3 /* To someone else */
+ unsigned short users; /* User count - see datagram.c,tcp.c */
+ unsigned short protocol; /* Packet protocol from driver. */
+ unsigned int truesize; /* Buffer size */
+
+ atomic_t count; /* reference count */
+ struct sk_buff *data_skb; /* Link to the actual data skb */
+ unsigned char *head; /* Head of buffer */
+ unsigned char *data; /* Data head pointer */
+ unsigned char *tail; /* Tail pointer */
+ unsigned char *end; /* End pointer */
+ void (*destructor)(struct sk_buff *); /* Destruct function */
+ __u16 redirport; /* Redirect port */
+#ifdef MACH
+#ifdef MACH_INCLUDE
+ ipc_port_t reply;
+ mach_msg_type_name_t reply_type;
+ vm_map_copy_t copy;
+#else
+ void *reply;
+ unsigned reply_type;
+ void *copy;
+#endif
+#endif
+};
+
+#ifdef CONFIG_SKB_LARGE
+#define SK_WMEM_MAX 65535
+#define SK_RMEM_MAX 65535
+#else
+#define SK_WMEM_MAX 32767
+#define SK_RMEM_MAX 32767
+#endif
+
+#if CONFIG_SKB_CHECK
+#define SK_FREED_SKB 0x0DE2C0DE
+#define SK_GOOD_SKB 0xDEC0DED1
+#define SK_HEAD_SKB 0x12231298
+#endif
+
+#ifdef __KERNEL__
+/*
+ * Handling routines are only of interest to the kernel
+ */
+#include <linux/malloc.h>
+
+#include <asm/system.h>
+
+#if 0
+extern void print_skb(struct sk_buff *);
+#endif
+extern void kfree_skb(struct sk_buff *skb, int rw);
+extern void skb_queue_head_init(struct sk_buff_head *list);
+extern void skb_queue_head(struct sk_buff_head *list,struct sk_buff *buf);
+extern void skb_queue_tail(struct sk_buff_head *list,struct sk_buff *buf);
+extern struct sk_buff * skb_dequeue(struct sk_buff_head *list);
+extern void skb_insert(struct sk_buff *old,struct sk_buff *newsk);
+extern void skb_append(struct sk_buff *old,struct sk_buff *newsk);
+extern void skb_unlink(struct sk_buff *buf);
+extern __u32 skb_queue_len(struct sk_buff_head *list);
+extern struct sk_buff * skb_peek_copy(struct sk_buff_head *list);
+extern struct sk_buff * alloc_skb(unsigned int size, int priority);
+extern struct sk_buff * dev_alloc_skb(unsigned int size);
+extern void kfree_skbmem(struct sk_buff *skb);
+extern struct sk_buff * skb_clone(struct sk_buff *skb, int priority);
+extern struct sk_buff * skb_copy(struct sk_buff *skb, int priority);
+extern void skb_device_lock(struct sk_buff *skb);
+extern void skb_device_unlock(struct sk_buff *skb);
+extern void dev_kfree_skb(struct sk_buff *skb, int mode);
+extern int skb_device_locked(struct sk_buff *skb);
+extern unsigned char * skb_put(struct sk_buff *skb, int len);
+extern unsigned char * skb_push(struct sk_buff *skb, int len);
+extern unsigned char * skb_pull(struct sk_buff *skb, int len);
+extern int skb_headroom(struct sk_buff *skb);
+extern int skb_tailroom(struct sk_buff *skb);
+extern void skb_reserve(struct sk_buff *skb, int len);
+extern void skb_trim(struct sk_buff *skb, int len);
+
+extern __inline__ int skb_queue_empty(struct sk_buff_head *list)
+{
+ return (list->next == (struct sk_buff *) list);
+}
+
+/*
+ * Peek an sk_buff. Unlike most other operations you _MUST_
+ * be careful with this one. A peek leaves the buffer on the
+ * list and someone else may run off with it. For an interrupt
+ * type system cli() peek the buffer copy the data and sti();
+ */
+extern __inline__ struct sk_buff *skb_peek(struct sk_buff_head *list_)
+{
+ struct sk_buff *list = ((struct sk_buff *)list_)->next;
+ if (list == (struct sk_buff *)list_)
+ list = NULL;
+ return list;
+}
+
+/*
+ * Return the length of an sk_buff queue
+ */
+
+extern __inline__ __u32 skb_queue_len(struct sk_buff_head *list_)
+{
+ return(list_->qlen);
+}
+
+#if CONFIG_SKB_CHECK
+extern int skb_check(struct sk_buff *skb,int,int, char *);
+#define IS_SKB(skb) skb_check((skb), 0, __LINE__,__FILE__)
+#define IS_SKB_HEAD(skb) skb_check((skb), 1, __LINE__,__FILE__)
+#else
+#define IS_SKB(skb)
+#define IS_SKB_HEAD(skb)
+
+extern __inline__ void skb_queue_head_init(struct sk_buff_head *list)
+{
+ list->prev = (struct sk_buff *)list;
+ list->next = (struct sk_buff *)list;
+ list->qlen = 0;
+}
+
+/*
+ * Insert an sk_buff at the start of a list.
+ *
+ * The "__skb_xxxx()" functions are the non-atomic ones that
+ * can only be called with interrupts disabled.
+ */
+
+extern __inline__ void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+ struct sk_buff *prev, *next;
+
+ newsk->list = list;
+ list->qlen++;
+ prev = (struct sk_buff *)list;
+ next = prev->next;
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = newsk;
+ prev->next = newsk;
+}
+
+extern __inline__ void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __skb_queue_head(list, newsk);
+ restore_flags(flags);
+}
+
+/*
+ * Insert an sk_buff at the end of a list.
+ */
+
+extern __inline__ void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+ struct sk_buff *prev, *next;
+
+ newsk->list = list;
+ list->qlen++;
+ next = (struct sk_buff *)list;
+ prev = next->prev;
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = newsk;
+ prev->next = newsk;
+}
+
+extern __inline__ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __skb_queue_tail(list, newsk);
+ restore_flags(flags);
+}
+
+/*
+ * Remove an sk_buff from a list.
+ */
+
+extern __inline__ struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
+{
+ struct sk_buff *next, *prev, *result;
+
+ prev = (struct sk_buff *) list;
+ next = prev->next;
+ result = NULL;
+ if (next != prev) {
+ result = next;
+ next = next->next;
+ list->qlen--;
+ next->prev = prev;
+ prev->next = next;
+ result->next = NULL;
+ result->prev = NULL;
+ result->list = NULL;
+ }
+ return result;
+}
+
+extern __inline__ struct sk_buff *skb_dequeue(struct sk_buff_head *list)
+{
+ long flags;
+ struct sk_buff *result;
+
+ save_flags(flags);
+ cli();
+ result = __skb_dequeue(list);
+ restore_flags(flags);
+ return result;
+}
+
+/*
+ * Insert a packet on a list.
+ */
+
+extern __inline__ void __skb_insert(struct sk_buff *newsk,
+ struct sk_buff * prev, struct sk_buff *next,
+ struct sk_buff_head * list)
+{
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = newsk;
+ prev->next = newsk;
+ newsk->list = list;
+ list->qlen++;
+}
+
+/*
+ * Place a packet before a given packet in a list
+ */
+extern __inline__ void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __skb_insert(newsk, old->prev, old, old->list);
+ restore_flags(flags);
+}
+
+/*
+ * Place a packet after a given packet in a list.
+ */
+
+extern __inline__ void skb_append(struct sk_buff *old, struct sk_buff *newsk)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __skb_insert(newsk, old, old->next, old->list);
+ restore_flags(flags);
+}
+
+/*
+ * remove sk_buff from list. _Must_ be called atomically, and with
+ * the list known..
+ */
+extern __inline__ void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
+{
+ struct sk_buff * next, * prev;
+
+ list->qlen--;
+ next = skb->next;
+ prev = skb->prev;
+ skb->next = NULL;
+ skb->prev = NULL;
+ skb->list = NULL;
+ next->prev = prev;
+ prev->next = next;
+}
+
+/*
+ * Remove an sk_buff from its list. Works even without knowing the list it
+ * is sitting on, which can be handy at times. It also means that THE LIST
+ * MUST EXIST when you unlink. Thus a list must have its contents unlinked
+ * _FIRST_.
+ */
+
+extern __inline__ void skb_unlink(struct sk_buff *skb)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ if(skb->list)
+ __skb_unlink(skb, skb->list);
+ restore_flags(flags);
+}
+
+/*
+ * Add data to an sk_buff
+ */
+extern __inline__ unsigned char *skb_put(struct sk_buff *skb, int len)
+{
+ unsigned char *tmp=skb->tail;
+ skb->tail+=len;
+ skb->len+=len;
+ if(skb->tail>skb->end)
+ {
+ panic("skput:over: %d", len);
+ }
+ return tmp;
+}
+
+extern __inline__ unsigned char *skb_push(struct sk_buff *skb, int len)
+{
+ skb->data-=len;
+ skb->len+=len;
+ if(skb->data<skb->head)
+ {
+ panic("skpush:under: %d", len);
+ }
+ return skb->data;
+}
+
+extern __inline__ unsigned char * skb_pull(struct sk_buff *skb, int len)
+{
+ if(len > skb->len)
+ return NULL;
+ skb->data+=len;
+ skb->len-=len;
+ return skb->data;
+}
+
+extern __inline__ int skb_headroom(struct sk_buff *skb)
+{
+ return skb->data-skb->head;
+}
+
+extern __inline__ int skb_tailroom(struct sk_buff *skb)
+{
+ return skb->end-skb->tail;
+}
+
+extern __inline__ void skb_reserve(struct sk_buff *skb, int len)
+{
+ skb->data+=len;
+ skb->tail+=len;
+}
+
+extern __inline__ void skb_trim(struct sk_buff *skb, int len)
+{
+ if(skb->len>len)
+ {
+ skb->len=len;
+ skb->tail=skb->data+len;
+ }
+}
+
+#endif
+
+extern struct sk_buff * skb_recv_datagram(struct sock *sk,unsigned flags,int noblock, int *err);
+extern int datagram_select(struct sock *sk, int sel_type, select_table *wait);
+extern void skb_copy_datagram(struct sk_buff *from, int offset, char *to,int size);
+extern void skb_copy_datagram_iovec(struct sk_buff *from, int offset, struct iovec *to,int size);
+extern void skb_free_datagram(struct sock * sk, struct sk_buff *skb);
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_SKBUFF_H */
diff --git a/linux/dev/include/linux/threads.h b/linux/dev/include/linux/threads.h
new file mode 100644
index 0000000..9d841c9
--- /dev/null
+++ b/linux/dev/include/linux/threads.h
@@ -0,0 +1 @@
+/* Dummy file. */
diff --git a/linux/dev/include/linux/types.h b/linux/dev/include/linux/types.h
new file mode 100644
index 0000000..eb086c2
--- /dev/null
+++ b/linux/dev/include/linux/types.h
@@ -0,0 +1,117 @@
+#ifndef _LINUX_TYPES_H
+#define _LINUX_TYPES_H
+
+#include <linux/posix_types.h>
+#include <asm/types.h>
+
+#ifndef __KERNEL_STRICT_NAMES
+
+typedef __kernel_fd_set fd_set;
+
+#ifndef MACH_INCLUDE
+typedef __kernel_dev_t dev_t;
+typedef __kernel_ino_t ino_t;
+typedef __kernel_mode_t mode_t;
+typedef __kernel_nlink_t nlink_t;
+#endif
+
+#ifdef MACH_INCLUDE
+#define off_t long
+#else
+typedef __kernel_off_t off_t;
+#endif
+
+typedef __kernel_pid_t pid_t;
+
+#ifdef MACH_INCLUDE
+#define uid_t unsigned short
+#define gid_t unsigned short
+#define daddr_t int
+#else
+typedef __kernel_uid_t uid_t;
+typedef __kernel_gid_t gid_t;
+typedef __kernel_daddr_t daddr_t;
+#endif
+
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+typedef __kernel_loff_t loff_t;
+#endif
+
+/*
+ * The following typedefs are also protected by individual ifdefs for
+ * historical reasons:
+ */
+#ifndef _SIZE_T
+#define _SIZE_T
+#ifndef MACH_INCLUDE
+typedef __kernel_size_t size_t;
+#endif
+#endif
+
+#ifndef _SSIZE_T
+#define _SSIZE_T
+#ifndef MACH_INCLUDE
+typedef __kernel_ssize_t ssize_t;
+#endif
+#endif
+
+#ifndef _PTRDIFF_T
+#define _PTRDIFF_T
+typedef __kernel_ptrdiff_t ptrdiff_t;
+#endif
+
+#ifndef _TIME_T
+#define _TIME_T
+#ifdef MACH_INCLUDE
+#define time_t long
+#else
+typedef __kernel_time_t time_t;
+#endif
+#endif
+
+#ifndef _CLOCK_T
+#define _CLOCK_T
+typedef __kernel_clock_t clock_t;
+#endif
+
+#ifndef _CADDR_T
+#define _CADDR_T
+#ifndef MACH_INCLUDE
+typedef __kernel_caddr_t caddr_t;
+#endif
+#endif
+
+#ifndef MACH_INCLUDE
+/* bsd */
+typedef unsigned char u_char;
+typedef unsigned short u_short;
+typedef unsigned int u_int;
+typedef unsigned long u_long;
+#endif
+
+/* sysv */
+typedef unsigned char unchar;
+typedef unsigned short ushort;
+typedef unsigned int uint;
+typedef unsigned long ulong;
+
+#endif /* __KERNEL_STRICT_NAMES */
+
+/*
+ * Below are truly Linux-specific types that should never collide with
+ * any application/library that wants linux/types.h.
+ */
+
+struct ustat {
+ __kernel_daddr_t f_tfree;
+ __kernel_ino_t f_tinode;
+ char f_fname[6];
+ char f_fpack[6];
+};
+
+/* Yes, this is ugly. But that's why it is called glue code. */
+
+#define _MACH_SA_SYS_TYPES_H_
+
+
+#endif /* _LINUX_TYPES_H */
diff --git a/linux/dev/init/main.c b/linux/dev/init/main.c
new file mode 100644
index 0000000..6d85395
--- /dev/null
+++ b/linux/dev/init/main.c
@@ -0,0 +1,261 @@
+/*
+ * Linux initialization.
+ *
+ * Copyright (C) 1996 The University of Utah and the Computer Systems
+ * Laboratory at the University of Utah (CSL)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ */
+
+/*
+ * linux/init/main.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#include <sys/types.h>
+
+#include <mach/vm_param.h>
+#include <mach/vm_prot.h>
+#include <mach/machine.h>
+
+#include <vm/vm_page.h>
+#include <kern/kalloc.h>
+
+#include <machine/spl.h>
+#include <machine/pmap.h>
+#include <machine/vm_param.h>
+#include <machine/model_dep.h>
+
+#define MACH_INCLUDE
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/dev/glue/glue.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+
+/*
+ * Timing loop count.
+ */
+unsigned long loops_per_sec = 1;
+
+#if defined(__SMP__) && defined(__i386__)
+unsigned long smp_loops_per_tick = 1000000;
+#endif
+
+/*
+ * End of physical memory.
+ */
+unsigned long high_memory;
+
+/*
+ * Flag to indicate auto-configuration is in progress.
+ */
+int linux_auto_config = 1;
+
+/*
+ * Hard drive parameters obtained from the BIOS.
+ */
+struct drive_info_struct
+{
+ char dummy[32];
+} drive_info;
+
+/*
+ * Forward declarations.
+ */
+static void calibrate_delay (void);
+
+/*
+ * Amount of contiguous memory to allocate for initialization.
+ */
+#define CONTIG_ALLOC (512 * 1024)
+
+/*
+ * Initialize Linux drivers.
+ */
+void
+linux_init (void)
+{
+ int addr;
+ unsigned long memory_start, memory_end;
+ vm_page_t pages;
+
+ /*
+ * Initialize memory size.
+ */
+ high_memory = vm_page_seg_end(VM_PAGE_SEL_DIRECTMAP);
+ init_IRQ ();
+ linux_sched_init ();
+
+ /*
+ * Set loop count.
+ */
+ calibrate_delay ();
+
+ /*
+ * Initialize drive info.
+ */
+ addr = *((unsigned *) phystokv (0x104));
+ memcpy (&drive_info,
+ (void *) ((addr & 0xffff) + ((addr >> 12) & 0xffff0)), 16);
+ addr = *((unsigned *) phystokv (0x118));
+ memcpy ((char *) &drive_info + 16,
+ (void *) ((addr & 0xffff) + ((addr >> 12) & 0xffff0)), 16);
+
+ /*
+ * Initialize Linux memory allocator.
+ */
+ linux_kmem_init ();
+
+ /*
+ * Allocate contiguous memory below 16 MB.
+ */
+ memory_start = alloc_contig_mem (CONTIG_ALLOC, 16 * 1024 * 1024, 0, &pages);
+ if (memory_start == 0)
+ panic ("linux_init: alloc_contig_mem failed");
+ memory_end = memory_start + CONTIG_ALLOC;
+
+ /*
+ * Initialize PCI bus.
+ */
+ memory_start = pci_init (memory_start, memory_end);
+
+ if (memory_start > memory_end)
+ panic ("linux_init: ran out memory");
+
+ /*
+ * Initialize devices.
+ */
+#ifdef CONFIG_INET
+ linux_net_emulation_init ();
+#endif
+
+ device_setup ();
+
+#ifdef CONFIG_PCMCIA
+ /*
+ * Initialize pcmcia.
+ */
+ pcmcia_init ();
+#endif
+
+ restore_IRQ ();
+
+ linux_auto_config = 0;
+}
+
+#ifndef NBPW
+#define NBPW 32
+#endif
+
+/*
+ * Allocate contiguous memory with the given constraints.
+ */
+unsigned long
+alloc_contig_mem (unsigned size, unsigned limit,
+ unsigned mask, vm_page_t * pages)
+{
+ vm_page_t p;
+
+ p = vm_page_grab_contig(size, VM_PAGE_SEL_DMA);
+
+ if (p == NULL)
+ return 0;
+
+ if (pages)
+ *pages = p;
+
+ return phystokv(vm_page_to_pa(p));
+}
+
+/*
+ * Free memory allocated by alloc_contig_mem.
+ */
+void
+free_contig_mem (vm_page_t pages, unsigned size)
+{
+ vm_page_free_contig(pages, size);
+}
+
+/* This is the number of bits of precision for the loops_per_second. Each
+ * bit takes on average 1.5/HZ seconds. This (like the original) is a little
+ * better than 1%
+ */
+#define LPS_PREC 8
+
+static void
+calibrate_delay (void)
+{
+ int ticks;
+ int loopbit;
+ int lps_precision = LPS_PREC;
+
+ loops_per_sec = (1 << 12);
+
+#ifndef MACH
+ printk ("Calibrating delay loop.. ");
+#endif
+ while (loops_per_sec <<= 1)
+ {
+ /* wait for "start of" clock tick */
+ ticks = jiffies;
+ while (ticks == jiffies)
+ /* nothing */ ;
+ /* Go .. */
+ ticks = jiffies;
+ __delay (loops_per_sec);
+ ticks = jiffies - ticks;
+ if (ticks)
+ break;
+ }
+
+ /* Do a binary approximation to get loops_per_second set to equal one clock
+ * (up to lps_precision bits)
+ */
+ loops_per_sec >>= 1;
+ loopbit = loops_per_sec;
+ while (lps_precision-- && (loopbit >>= 1))
+ {
+ loops_per_sec |= loopbit;
+ ticks = jiffies;
+ while (ticks == jiffies);
+ ticks = jiffies;
+ __delay (loops_per_sec);
+ if (jiffies != ticks) /* longer than 1 tick */
+ loops_per_sec &= ~loopbit;
+ }
+
+ /* finally, adjust loops per second in terms of seconds instead of clocks */
+ loops_per_sec *= HZ;
+ /* Round the value and print it */
+#ifndef MACH
+ printk ("ok - %lu.%02lu BogoMIPS\n",
+ (loops_per_sec + 2500) / 500000,
+ ((loops_per_sec + 2500) / 5000) % 100);
+#endif
+
+#if defined(__SMP__) && defined(__i386__)
+ smp_loops_per_tick = loops_per_sec / 400;
+#endif
+}
diff --git a/linux/dev/init/version.c b/linux/dev/init/version.c
new file mode 100644
index 0000000..1989483
--- /dev/null
+++ b/linux/dev/init/version.c
@@ -0,0 +1,32 @@
+/*
+ * linux/version.c
+ *
+ * Copyright (C) 1992 Theodore Ts'o
+ *
+ * May be freely distributed as part of Linux.
+ */
+
+#define MACH_INCLUDE
+#include <linux/config.h>
+#include <linux/utsname.h>
+#include <linux/version.h>
+#include <linux/compile.h>
+
+/* make the "checkconfig" script happy: we really need to include config.h */
+#ifdef CONFIG_BOGUS
+#endif
+
+#define version(a) Version_ ## a
+#define version_string(a) version(a)
+
+int version_string (LINUX_VERSION_CODE) = 0;
+
+struct new_utsname system_utsname =
+{
+ UTS_SYSNAME, UTS_NODENAME, UTS_RELEASE, UTS_VERSION,
+ UTS_MACHINE, UTS_DOMAINNAME
+};
+
+const char *linux_banner =
+"Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@"
+LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION "\n";
diff --git a/linux/dev/kernel/dma.c b/linux/dev/kernel/dma.c
new file mode 100644
index 0000000..bbda4bb
--- /dev/null
+++ b/linux/dev/kernel/dma.c
@@ -0,0 +1,109 @@
+/* $Id: dma.c,v 1.1 1999/04/26 05:49:35 tb Exp $
+ * linux/kernel/dma.c: A DMA channel allocator. Inspired by linux/kernel/irq.c.
+ *
+ * Written by Hennus Bergman, 1992.
+ *
+ * 1994/12/26: Changes by Alex Nash to fix a minor bug in /proc/dma.
+ * In the previous version the reported device could end up being wrong,
+ * if a device requested a DMA channel that was already in use.
+ * [It also happened to remove the sizeof(char *) == sizeof(int)
+ * assumption introduced because of those /proc/dma patches. -- Hennus]
+ */
+
+#define MACH_INCLUDE
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <asm/dma.h>
+#include <asm/system.h>
+
+
+/* A note on resource allocation:
+ *
+ * All drivers needing DMA channels, should allocate and release them
+ * through the public routines `request_dma()' and `free_dma()'.
+ *
+ * In order to avoid problems, all processes should allocate resources in
+ * the same sequence and release them in the reverse order.
+ *
+ * So, when allocating DMAs and IRQs, first allocate the IRQ, then the DMA.
+ * When releasing them, first release the DMA, then release the IRQ.
+ * If you don't, you may cause allocation requests to fail unnecessarily.
+ * This doesn't really matter now, but it will once we get real semaphores
+ * in the kernel.
+ */
+
+
+
+/* Channel n is busy iff dma_chan_busy[n].lock != 0.
+ * DMA0 used to be reserved for DRAM refresh, but apparently not any more...
+ * DMA4 is reserved for cascading.
+ */
+
+struct dma_chan
+{
+ int lock;
+ const char *device_id;
+};
+
+static struct dma_chan dma_chan_busy[MAX_DMA_CHANNELS] =
+{
+ { 0, 0 },
+ { 0, 0 },
+ { 0, 0 },
+ { 0, 0 },
+ { 1, "cascade" },
+ { 0, 0 },
+ { 0, 0 },
+ { 0, 0 }
+};
+
+#ifndef MACH
+int
+get_dma_list (char *buf)
+{
+ int i, len = 0;
+
+ for (i = 0 ; i < MAX_DMA_CHANNELS ; i++)
+ {
+ if (dma_chan_busy[i].lock)
+ {
+ len += linux_sprintf (buf+len, "%2d: %s\n",
+ i,
+ dma_chan_busy[i].device_id);
+ }
+ }
+ return len;
+} /* get_dma_list */
+#endif
+
+int
+request_dma (unsigned int dmanr, const char *device_id)
+{
+ if (dmanr >= MAX_DMA_CHANNELS)
+ return -EINVAL;
+
+ if (xchg (&dma_chan_busy[dmanr].lock, 1) != 0)
+ return -EBUSY;
+
+ dma_chan_busy[dmanr].device_id = device_id;
+
+ /* old flag was 0, now contains 1 to indicate busy */
+ return 0;
+} /* request_dma */
+
+
+void
+free_dma (unsigned int dmanr)
+{
+ if (dmanr >= MAX_DMA_CHANNELS)
+ {
+ printk ("Trying to free DMA%d\n", dmanr);
+ return;
+ }
+
+ if (xchg (&dma_chan_busy[dmanr].lock, 0) == 0)
+ {
+ printk ("Trying to free free DMA%d\n", dmanr);
+ return;
+ }
+} /* free_dma */
diff --git a/linux/dev/kernel/printk.c b/linux/dev/kernel/printk.c
new file mode 100644
index 0000000..7c65d30
--- /dev/null
+++ b/linux/dev/kernel/printk.c
@@ -0,0 +1,83 @@
+/*
+ * Linux kernel print routine.
+ * Copyright (C) 1995 Shantanu Goel.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * linux/kernel/printk.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#define MACH_INCLUDE
+#include <stdarg.h>
+#include <asm/system.h>
+#include <kern/assert.h>
+#include <kern/printf.h>
+#include <device/cons.h>
+
+static char buf[2048];
+
+#define DEFAULT_MESSAGE_LOGLEVEL 4
+#define DEFAULT_CONSOLE_LOGLEVEL 7
+
+int console_loglevel = DEFAULT_CONSOLE_LOGLEVEL;
+
+int
+printk (char *fmt, ...)
+{
+ va_list args;
+ int n;
+ unsigned long flags;
+ char *p, *msg, *buf_end;
+ static int msg_level = -1;
+
+ save_flags (flags);
+ cli ();
+ va_start (args, fmt);
+ n = vsnprintf (buf + 3, sizeof (buf) - 3, fmt, args);
+ assert (n <= sizeof (buf) - 3);
+ buf_end = buf + 3 + n;
+ va_end (args);
+ for (p = buf + 3; p < buf_end; p++)
+ {
+ msg = p;
+ if (msg_level < 0)
+ {
+ if (p[0] != '<' || p[1] < '0' || p[1] > '7' || p[2] != '>')
+ {
+ p -= 3;
+ p[0] = '<';
+ p[1] = DEFAULT_MESSAGE_LOGLEVEL + '0';
+ p[2] = '>';
+ }
+ else
+ msg += 3;
+ msg_level = p[1] - '0';
+ }
+ for (; p < buf_end; p++)
+ if (*p == '\n')
+ break;
+ if (msg_level < console_loglevel)
+ while (msg <= p)
+ cnputc (*msg++);
+ if (*p == '\n')
+ msg_level = -1;
+ }
+ restore_flags (flags);
+ return n;
+}
diff --git a/linux/dev/kernel/resource.c b/linux/dev/kernel/resource.c
new file mode 100644
index 0000000..ba107e8
--- /dev/null
+++ b/linux/dev/kernel/resource.c
@@ -0,0 +1,145 @@
+/*
+ * linux/kernel/resource.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ * David Hinds
+ *
+ * Kernel io-region resource management
+ */
+
+#include <sys/types.h>
+
+#define MACH_INCLUDE
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+
+#define IOTABLE_SIZE 128
+
+typedef struct resource_entry_t
+{
+ u_long from, num;
+ const char *name;
+ struct resource_entry_t *next;
+} resource_entry_t;
+
+static resource_entry_t iolist = { 0, 0, "", NULL };
+
+static resource_entry_t iotable[IOTABLE_SIZE];
+
+/*
+ * This generates the report for /proc/ioports
+ */
+#ifndef MACH
+int
+get_ioport_list (char *buf)
+{
+ resource_entry_t *p;
+ int len = 0;
+
+ for (p = iolist.next; (p) && (len < 4000); p = p->next)
+ len += linux_sprintf (buf+len, "%04lx-%04lx : %s\n",
+ p->from, p->from+p->num-1, p->name);
+ if (p)
+ len += linux_sprintf (buf+len, "4K limit reached!\n");
+ return len;
+}
+#endif
+
+/*
+ * The workhorse function: find where to put a new entry
+ */
+static resource_entry_t *
+find_gap (resource_entry_t *root, u_long from, u_long num)
+{
+ unsigned long flags;
+ resource_entry_t *p;
+
+ if (from > from+num-1)
+ return NULL;
+ save_flags (flags);
+ cli ();
+ for (p = root; ; p = p->next)
+ {
+ if ((p != root) && (p->from+p->num-1 >= from))
+ {
+ p = NULL;
+ break;
+ }
+ if ((p->next == NULL) || (p->next->from > from+num-1))
+ break;
+ }
+ restore_flags (flags);
+ return p;
+}
+
+/*
+ * Call this from the device driver to register the ioport region.
+ */
+void
+request_region (unsigned int from, unsigned int num, const char *name)
+{
+ resource_entry_t *p;
+ int i;
+
+ for (i = 0; i < IOTABLE_SIZE; i++)
+ if (iotable[i].num == 0)
+ break;
+ if (i == IOTABLE_SIZE)
+ printk ("warning: ioport table is full\n");
+ else
+ {
+ p = find_gap (&iolist, from, num);
+ if (p == NULL)
+ return;
+ iotable[i].name = name;
+ iotable[i].from = from;
+ iotable[i].num = num;
+ iotable[i].next = p->next;
+ p->next = &iotable[i];
+ return;
+ }
+}
+
+/*
+ * Call this when the device driver is unloaded
+ */
+void
+release_region (unsigned int from, unsigned int num)
+{
+ resource_entry_t *p, *q;
+
+ for (p = &iolist; ; p = q)
+ {
+ q = p->next;
+ if (q == NULL)
+ break;
+ if ((q->from == from) && (q->num == num))
+ {
+ q->num = 0;
+ p->next = q->next;
+ return;
+ }
+ }
+}
+
+/*
+ * Call this to check the ioport region before probing
+ */
+int
+check_region (unsigned int from, unsigned int num)
+{
+ return (find_gap (&iolist, from, num) == NULL) ? -EBUSY : 0;
+}
+
+/* Called from init/main.c to reserve IO ports. */
+void
+reserve_setup(char *str, int *ints)
+{
+ int i;
+
+ for (i = 1; i < ints[0]; i += 2)
+ request_region (ints[i], ints[i+1], "reserved");
+}
diff --git a/linux/dev/kernel/sched.c b/linux/dev/kernel/sched.c
new file mode 100644
index 0000000..f87482e
--- /dev/null
+++ b/linux/dev/kernel/sched.c
@@ -0,0 +1,630 @@
+/*
+ * Linux scheduling support.
+ *
+ * Copyright (C) 1996 The University of Utah and the Computer Systems
+ * Laboratory at the University of Utah (CSL)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ */
+
+/*
+ * linux/kernel/sched.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#include <sys/types.h>
+#include <machine/spl.h>
+
+#include <mach/boolean.h>
+
+#include <kern/thread.h>
+#include <kern/sched_prim.h>
+#include <kern/printf.h>
+
+#include <machine/machspl.h>
+
+#define MACH_INCLUDE
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/dev/glue/glue.h>
+
+#include <asm/system.h>
+#include <asm/atomic.h>
+
+int securelevel = 0;
+
+static void timer_bh (void);
+
+DECLARE_TASK_QUEUE (tq_timer);
+DECLARE_TASK_QUEUE (tq_immediate);
+DECLARE_TASK_QUEUE (tq_scheduler);
+
+static struct wait_queue **auto_config_queue;
+
+static inline void
+handle_soft_intr (void)
+{
+ if (bh_active & bh_mask)
+ {
+ intr_count = 1;
+ linux_soft_intr ();
+ intr_count = 0;
+ }
+}
+
+static void
+tqueue_bh (void)
+{
+ run_task_queue(&tq_timer);
+}
+
+static void
+immediate_bh (void)
+{
+ run_task_queue (&tq_immediate);
+}
+
+void
+add_wait_queue (struct wait_queue **q, struct wait_queue *wait)
+{
+ unsigned long flags;
+
+ if (! linux_auto_config)
+ {
+ save_flags (flags);
+ cli ();
+ assert_wait ((event_t) q, FALSE);
+ restore_flags (flags);
+ return;
+ }
+
+ if (auto_config_queue)
+ printf ("add_wait_queue: queue not empty\n");
+ auto_config_queue = q;
+}
+
+void
+remove_wait_queue (struct wait_queue **q, struct wait_queue *wait)
+{
+ unsigned long flags;
+
+ if (! linux_auto_config)
+ {
+ save_flags (flags);
+ thread_wakeup ((event_t) q);
+ restore_flags (flags);
+ return;
+ }
+
+ auto_config_queue = NULL;
+}
+
+static inline int
+waking_non_zero (struct semaphore *sem)
+{
+ int ret;
+ unsigned long flags;
+
+ get_buzz_lock (&sem->lock);
+ save_flags (flags);
+ cli ();
+
+ if ((ret = (sem->waking > 0)))
+ sem->waking--;
+
+ restore_flags (flags);
+ give_buzz_lock (&sem->lock);
+ return ret;
+}
+
+void
+__up (struct semaphore *sem)
+{
+ atomic_inc (&sem->waking);
+ wake_up (&sem->wait);
+}
+
+int
+__do_down (struct semaphore *sem, int task_state)
+{
+ unsigned long flags;
+ int ret = 0;
+ int s;
+
+ if (!linux_auto_config)
+ {
+ save_flags (flags);
+ s = splhigh ();
+ for (;;)
+ {
+ if (waking_non_zero (sem))
+ break;
+
+ if (task_state == TASK_INTERRUPTIBLE && issig ())
+ {
+ ret = -EINTR;
+ atomic_inc (&sem->count);
+ break;
+ }
+
+ assert_wait ((event_t) &sem->wait,
+ task_state == TASK_INTERRUPTIBLE ? TRUE : FALSE);
+ splx (s);
+ schedule ();
+ s = splhigh ();
+ }
+ splx (s);
+ restore_flags (flags);
+ return ret;
+ }
+
+ while (!waking_non_zero (sem))
+ {
+ if (task_state == TASK_INTERRUPTIBLE && issig ())
+ {
+ ret = -EINTR;
+ atomic_inc (&sem->count);
+ break;
+ }
+ schedule ();
+ }
+
+ return ret;
+}
+
+void
+__down (struct semaphore *sem)
+{
+ __do_down(sem, TASK_UNINTERRUPTIBLE);
+}
+
+int
+__down_interruptible (struct semaphore *sem)
+{
+ return __do_down (sem, TASK_INTERRUPTIBLE);
+}
+
+void
+__sleep_on (struct wait_queue **q, int state)
+{
+ unsigned long flags;
+
+ if (!q)
+ return;
+ save_flags (flags);
+ if (!linux_auto_config)
+ {
+ assert_wait ((event_t) q, state == TASK_INTERRUPTIBLE ? TRUE : FALSE);
+ sti ();
+ schedule ();
+ restore_flags (flags);
+ return;
+ }
+
+ add_wait_queue (q, NULL);
+ sti ();
+ while (auto_config_queue)
+ schedule ();
+ restore_flags (flags);
+}
+
+void
+sleep_on (struct wait_queue **q)
+{
+ __sleep_on (q, TASK_UNINTERRUPTIBLE);
+}
+
+void
+interruptible_sleep_on (struct wait_queue **q)
+{
+ __sleep_on (q, TASK_INTERRUPTIBLE);
+}
+
+void
+wake_up (struct wait_queue **q)
+{
+ unsigned long flags;
+
+ if (! linux_auto_config)
+ {
+ if (q != &wait_for_request) /* ??? by OKUJI Yoshinori. */
+ {
+ save_flags (flags);
+ thread_wakeup ((event_t) q);
+ restore_flags (flags);
+ }
+ return;
+ }
+
+ if (auto_config_queue == q)
+ auto_config_queue = NULL;
+}
+
+void
+__wait_on_buffer (struct buffer_head *bh)
+{
+ unsigned long flags;
+
+ save_flags (flags);
+ if (! linux_auto_config)
+ {
+ while (1)
+ {
+ cli ();
+ run_task_queue (&tq_disk);
+ if (! buffer_locked (bh))
+ break;
+ bh->b_wait = (struct wait_queue *) 1;
+ assert_wait ((event_t) bh, FALSE);
+ sti ();
+ schedule ();
+ }
+ restore_flags (flags);
+ return;
+ }
+
+ sti ();
+ while (buffer_locked (bh))
+ {
+ run_task_queue (&tq_disk);
+ schedule ();
+ }
+ restore_flags (flags);
+}
+
+void
+unlock_buffer (struct buffer_head *bh)
+{
+ unsigned long flags;
+
+ save_flags (flags);
+ cli ();
+ clear_bit (BH_Lock, &bh->b_state);
+ if (bh->b_wait && ! linux_auto_config)
+ {
+ bh->b_wait = NULL;
+ thread_wakeup ((event_t) bh);
+ }
+ restore_flags (flags);
+}
+
+void
+schedule (void)
+{
+ if (intr_count)
+ printk ("Aiee: scheduling in interrupt %p\n",
+ __builtin_return_address (0));
+
+ handle_soft_intr ();
+ run_task_queue (&tq_scheduler);
+
+ if (!linux_auto_config)
+ thread_block (0);
+}
+
+void
+linux_sched_init (void)
+{
+ /*
+ * Install software interrupt handlers.
+ */
+ init_bh (TIMER_BH, timer_bh);
+ init_bh (TQUEUE_BH, tqueue_bh);
+ init_bh (IMMEDIATE_BH, immediate_bh);
+}
+
+/*
+ * Linux timers.
+ *
+ * Copyright (C) 1996 The University of Utah and the Computer Systems
+ * Laboratory at the University of Utah (CSL)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ */
+
+unsigned long volatile jiffies = 0;
+
+/*
+ * Mask of active timers.
+ */
+unsigned long timer_active = 0;
+
+/*
+ * List of timeout routines.
+ */
+struct timer_struct timer_table[32];
+
+#define TVN_BITS 6
+#define TVR_BITS 8
+#define TVN_SIZE (1 << TVN_BITS)
+#define TVR_SIZE (1 << TVR_BITS)
+#define TVN_MASK (TVN_SIZE - 1)
+#define TVR_MASK (TVR_SIZE - 1)
+
+#define SLOW_BUT_DEBUGGING_TIMERS 0
+
+struct timer_vec
+ {
+ int index;
+ struct timer_list *vec[TVN_SIZE];
+ };
+
+struct timer_vec_root
+ {
+ int index;
+ struct timer_list *vec[TVR_SIZE];
+ };
+
+static struct timer_vec tv5 =
+{0};
+static struct timer_vec tv4 =
+{0};
+static struct timer_vec tv3 =
+{0};
+static struct timer_vec tv2 =
+{0};
+static struct timer_vec_root tv1 =
+{0};
+
+static struct timer_vec *const tvecs[] =
+{
+ (struct timer_vec *) &tv1, &tv2, &tv3, &tv4, &tv5
+};
+
+#define NOOF_TVECS (sizeof(tvecs) / sizeof(tvecs[0]))
+
+static unsigned long timer_jiffies = 0;
+
+static inline void
+insert_timer (struct timer_list *timer, struct timer_list **vec, int idx)
+{
+ if ((timer->next = vec[idx]))
+ vec[idx]->prev = timer;
+ vec[idx] = timer;
+ timer->prev = (struct timer_list *) &vec[idx];
+}
+
+static inline void
+internal_add_timer (struct timer_list *timer)
+{
+ /*
+ * must be cli-ed when calling this
+ */
+ unsigned long expires = timer->expires;
+ unsigned long idx = expires - timer_jiffies;
+
+ if (idx < TVR_SIZE)
+ {
+ int i = expires & TVR_MASK;
+ insert_timer (timer, tv1.vec, i);
+ }
+ else if (idx < 1 << (TVR_BITS + TVN_BITS))
+ {
+ int i = (expires >> TVR_BITS) & TVN_MASK;
+ insert_timer (timer, tv2.vec, i);
+ }
+ else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS))
+ {
+ int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
+ insert_timer (timer, tv3.vec, i);
+ }
+ else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS))
+ {
+ int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
+ insert_timer (timer, tv4.vec, i);
+ }
+ else if (expires < timer_jiffies)
+ {
+ /* can happen if you add a timer with expires == jiffies,
+ * or you set a timer to go off in the past
+ */
+ insert_timer (timer, tv1.vec, tv1.index);
+ }
+ else if (idx < 0xffffffffUL)
+ {
+ int i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
+ insert_timer (timer, tv5.vec, i);
+ }
+ else
+ {
+ /* Can only get here on architectures with 64-bit jiffies */
+ timer->next = timer->prev = timer;
+ }
+}
+
+void
+add_timer (struct timer_list *timer)
+{
+ unsigned long flags;
+
+ save_flags (flags);
+ cli ();
+#if SLOW_BUT_DEBUGGING_TIMERS
+ if (timer->next || timer->prev)
+ {
+ printk ("add_timer() called with non-zero list from %p\n",
+ __builtin_return_address (0));
+ goto out;
+ }
+#endif
+ internal_add_timer (timer);
+#if SLOW_BUT_DEBUGGING_TIMERS
+out:
+#endif
+ restore_flags (flags);
+}
+
+static inline int
+detach_timer (struct timer_list *timer)
+{
+ int ret = 0;
+ struct timer_list *next, *prev;
+
+ next = timer->next;
+ prev = timer->prev;
+ if (next)
+ {
+ next->prev = prev;
+ }
+ if (prev)
+ {
+ ret = 1;
+ prev->next = next;
+ }
+ return ret;
+}
+
+int
+del_timer (struct timer_list *timer)
+{
+ int ret;
+ unsigned long flags;
+
+ save_flags (flags);
+ cli ();
+ ret = detach_timer (timer);
+ timer->next = timer->prev = 0;
+ restore_flags (flags);
+ return ret;
+}
+
+static inline void
+run_old_timers (void)
+{
+ struct timer_struct *tp;
+ unsigned long mask;
+
+ for (mask = 1, tp = timer_table + 0; mask; tp++, mask += mask)
+ {
+ if (mask > timer_active)
+ break;
+ if (!(mask & timer_active))
+ continue;
+ if (tp->expires > jiffies)
+ continue;
+ timer_active &= ~mask;
+ tp->fn ();
+ sti ();
+ }
+}
+
+static inline void
+cascade_timers (struct timer_vec *tv)
+{
+ /* cascade all the timers from tv up one level */
+ struct timer_list *timer;
+
+ timer = tv->vec[tv->index];
+ /*
+ * We are removing _all_ timers from the list, so we don't have to
+ * detach them individually, just clear the list afterwards.
+ */
+ while (timer)
+ {
+ struct timer_list *tmp = timer;
+ timer = timer->next;
+ internal_add_timer (tmp);
+ }
+ tv->vec[tv->index] = NULL;
+ tv->index = (tv->index + 1) & TVN_MASK;
+}
+
+static inline void
+run_timer_list (void)
+{
+ cli ();
+ while ((long) (jiffies - timer_jiffies) >= 0)
+ {
+ struct timer_list *timer;
+
+ if (!tv1.index)
+ {
+ int n = 1;
+
+ do
+ {
+ cascade_timers (tvecs[n]);
+ }
+ while (tvecs[n]->index == 1 && ++n < NOOF_TVECS);
+ }
+ while ((timer = tv1.vec[tv1.index]))
+ {
+ void (*fn) (unsigned long) = timer->function;
+ unsigned long data = timer->data;
+
+ detach_timer (timer);
+ timer->next = timer->prev = NULL;
+ sti ();
+ fn (data);
+ cli ();
+ }
+ ++timer_jiffies;
+ tv1.index = (tv1.index + 1) & TVR_MASK;
+ }
+ sti ();
+}
+
+/*
+ * Timer software interrupt handler.
+ */
+static void
+timer_bh (void)
+{
+ run_old_timers ();
+ run_timer_list ();
+}
+
+#if 0
+int linux_timer_print = 0;
+#endif
+
+/*
+ * Timer interrupt handler.
+ */
+void
+linux_timer_intr (void)
+{
+ if (cpu_number() != master_cpu)
+ return;
+
+ (*(unsigned long *) &jiffies)++;
+ mark_bh (TIMER_BH);
+ if (tq_timer)
+ mark_bh (TQUEUE_BH);
+#if 0
+ if (linux_timer_print)
+ printf ("linux_timer_intr: hello\n");
+#endif
+}
diff --git a/linux/dev/kernel/softirq.c b/linux/dev/kernel/softirq.c
new file mode 100644
index 0000000..ac95a7d
--- /dev/null
+++ b/linux/dev/kernel/softirq.c
@@ -0,0 +1,48 @@
+/*
+ * linux/kernel/softirq.c
+ *
+ * Copyright (C) 1992 Linus Torvalds
+ *
+ * do_bottom_half() runs at normal kernel priority: all interrupts
+ * enabled. do_bottom_half() is atomic with respect to itself: a
+ * bottom_half handler need not be re-entrant.
+ */
+
+#define MACH_INCLUDE
+#include <linux/ptrace.h>
+#include <linux/interrupt.h>
+#include <asm/system.h>
+
+#include <linux/dev/glue/glue.h>
+
+int bh_mask_count[32];
+unsigned int bh_active = 0;
+unsigned int bh_mask = 0;
+void (*bh_base[32]) (void);
+
+void
+linux_soft_intr (void)
+{
+ unsigned int active;
+ unsigned int mask, left;
+ void (**bh) (void);
+
+ sti ();
+ bh = bh_base;
+ active = bh_active & bh_mask;
+ for (mask = 1, left = ~0; left & active; bh++, mask += mask, left += left)
+ {
+ if (mask & active)
+ {
+ void (*fn) (void);
+ bh_active &= ~mask;
+ fn = *bh;
+ if (!fn)
+ goto bad_bh;
+ fn ();
+ }
+ }
+ return;
+bad_bh:
+ printk ("linux_soft_intr:bad interrupt handler entry %08x\n", mask);
+}
diff --git a/linux/dev/lib/vsprintf.c b/linux/dev/lib/vsprintf.c
new file mode 100644
index 0000000..541ec65
--- /dev/null
+++ b/linux/dev/lib/vsprintf.c
@@ -0,0 +1,354 @@
+/*
+ * linux/lib/vsprintf.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+/* vsprintf.c -- Lars Wirzenius & Linus Torvalds. */
+/*
+ * Wirzenius wrote this portably, Torvalds fucked it up :-)
+ */
+
+#include <sys/types.h>
+
+#define MACH_INCLUDE
+#include <stdarg.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+
+unsigned long
+simple_strtoul (const char *cp, char **endp, unsigned int base)
+{
+ unsigned long result = 0, value;
+
+ if (!base)
+ {
+ base = 10;
+ if (*cp == '0')
+ {
+ base = 8;
+ cp++;
+ if ((*cp == 'x') && isxdigit (cp[1]))
+ {
+ cp++;
+ base = 16;
+ }
+ }
+ }
+ while (isxdigit (*cp)
+ && (value = isdigit (*cp) ? *cp - '0'
+ : (islower (*cp) ? toupper (*cp) : *cp) - 'A' + 10) < base)
+ {
+ result = result * base + value;
+ cp++;
+ }
+ if (endp)
+ *endp = (char *) cp;
+ return result;
+}
+
+/* we use this so that we can do without the ctype library */
+#define is_digit(c) ((c) >= '0' && (c) <= '9')
+
+static int
+skip_atoi (const char **s)
+{
+ int i = 0;
+
+ while (is_digit (**s))
+ i = i * 10 + *((*s)++) - '0';
+ return i;
+}
+
+#define ZEROPAD 1 /* pad with zero */
+#define SIGN 2 /* unsigned/signed long */
+#define PLUS 4 /* show plus */
+#define SPACE 8 /* space if plus */
+#define LEFT 16 /* left justified */
+#define SPECIAL 32 /* 0x */
+#define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */
+
+#define do_div(n,base) ({ \
+int __res; \
+__res = ((unsigned long) n) % (unsigned) base; \
+n = ((unsigned long) n) / (unsigned) base; \
+__res; })
+
+static char *
+number (char *str, long num, int base, int size, int precision, int type)
+{
+ char c, sign, tmp[66];
+ const char *digits = "0123456789abcdefghijklmnopqrstuvwxyz";
+ int i;
+
+ if (type & LARGE)
+ digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+ if (type & LEFT)
+ type &= ~ZEROPAD;
+ if (base < 2 || base > 36)
+ return 0;
+ c = (type & ZEROPAD) ? '0' : ' ';
+ sign = 0;
+ if (type & SIGN)
+ {
+ if (num < 0)
+ {
+ sign = '-';
+ num = -num;
+ size--;
+ }
+ else if (type & PLUS)
+ {
+ sign = '+';
+ size--;
+ }
+ else if (type & SPACE)
+ {
+ sign = ' ';
+ size--;
+ }
+ }
+ if (type & SPECIAL)
+ {
+ if (base == 16)
+ size -= 2;
+ else if (base == 8)
+ size--;
+ }
+ i = 0;
+ if (num == 0)
+ tmp[i++] = '0';
+ else
+ while (num != 0)
+ tmp[i++] = digits[do_div (num, base)];
+ if (i > precision)
+ precision = i;
+ size -= precision;
+ if (!(type & (ZEROPAD + LEFT)))
+ while (size-- > 0)
+ *str++ = ' ';
+ if (sign)
+ *str++ = sign;
+ if (type & SPECIAL)
+ {
+ if (base == 8)
+ {
+ *str++ = '0';
+ }
+ else if (base == 16)
+ {
+ *str++ = '0';
+ *str++ = digits[33];
+ }
+ }
+ if (!(type & LEFT))
+ while (size-- > 0)
+ *str++ = c;
+ while (i < precision--)
+ *str++ = '0';
+ while (i-- > 0)
+ *str++ = tmp[i];
+ while (size-- > 0)
+ *str++ = ' ';
+ return str;
+}
+
+int
+linux_vsprintf (char *buf, const char *fmt, va_list args)
+{
+ int len;
+ unsigned long num;
+ int i, base;
+ char *str;
+ const char *s;
+
+ int flags; /* flags to number() */
+
+ int field_width; /* width of output field */
+ int precision; /* min. # of digits for integers; max
+ * number of chars for from string
+ */
+ int qualifier; /* 'h', 'l', or 'L' for integer fields */
+
+ for (str = buf; *fmt; ++fmt)
+ {
+ if (*fmt != '%')
+ {
+ *str++ = *fmt;
+ continue;
+ }
+
+ /* process flags */
+ flags = 0;
+ repeat:
+ ++fmt; /* this also skips first '%' */
+ switch (*fmt)
+ {
+ case '-':
+ flags |= LEFT;
+ goto repeat;
+ case '+':
+ flags |= PLUS;
+ goto repeat;
+ case ' ':
+ flags |= SPACE;
+ goto repeat;
+ case '#':
+ flags |= SPECIAL;
+ goto repeat;
+ case '0':
+ flags |= ZEROPAD;
+ goto repeat;
+ }
+
+ /* get field width */
+ field_width = -1;
+ if (is_digit (*fmt))
+ field_width = skip_atoi (&fmt);
+ else if (*fmt == '*')
+ {
+ ++fmt;
+ /* it's the next argument */
+ field_width = va_arg (args, int);
+ if (field_width < 0)
+ {
+ field_width = -field_width;
+ flags |= LEFT;
+ }
+ }
+
+ /* get the precision */
+ precision = -1;
+ if (*fmt == '.')
+ {
+ ++fmt;
+ if (is_digit (*fmt))
+ precision = skip_atoi (&fmt);
+ else if (*fmt == '*')
+ {
+ ++fmt;
+ /* it's the next argument */
+ precision = va_arg (args, int);
+ }
+ if (precision < 0)
+ precision = 0;
+ }
+
+ /* get the conversion qualifier */
+ qualifier = -1;
+ if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L')
+ {
+ qualifier = *fmt;
+ ++fmt;
+ }
+
+ /* default base */
+ base = 10;
+
+ switch (*fmt)
+ {
+ case 'c':
+ if (!(flags & LEFT))
+ while (--field_width > 0)
+ *str++ = ' ';
+ *str++ = (unsigned char) va_arg (args, int);
+ while (--field_width > 0)
+ *str++ = ' ';
+ continue;
+
+ case 's':
+ s = va_arg (args, char *);
+ if (!s)
+ s = "<NULL>";
+
+ len = strnlen (s, precision);
+
+ if (!(flags & LEFT))
+ while (len < field_width--)
+ *str++ = ' ';
+ for (i = 0; i < len; ++i)
+ *str++ = *s++;
+ while (len < field_width--)
+ *str++ = ' ';
+ continue;
+
+ case 'p':
+ if (field_width == -1)
+ {
+ field_width = 2 * sizeof (void *);
+ flags |= ZEROPAD;
+ }
+ str = number (str,
+ (unsigned long) va_arg (args, void *), 16,
+ field_width, precision, flags);
+ continue;
+
+
+ case 'n':
+ if (qualifier == 'l')
+ {
+ long *ip = va_arg (args, long *);
+ *ip = (str - buf);
+ }
+ else
+ {
+ int *ip = va_arg (args, int *);
+ *ip = (str - buf);
+ }
+ continue;
+
+ /* integer number formats - set up the flags and "break" */
+ case 'o':
+ base = 8;
+ break;
+
+ case 'X':
+ flags |= LARGE;
+ case 'x':
+ base = 16;
+ break;
+
+ case 'd':
+ case 'i':
+ flags |= SIGN;
+ case 'u':
+ break;
+
+ default:
+ if (*fmt != '%')
+ *str++ = '%';
+ if (*fmt)
+ *str++ = *fmt;
+ else
+ --fmt;
+ continue;
+ }
+ if (qualifier == 'l')
+ num = va_arg (args, unsigned long);
+ else if (qualifier == 'h')
+ if (flags & SIGN)
+ num = (short) va_arg (args, int);
+ else
+ num = (unsigned short) va_arg (args, unsigned int);
+ else if (flags & SIGN)
+ num = va_arg (args, int);
+ else
+ num = va_arg (args, unsigned int);
+ str = number (str, num, base, field_width, precision, flags);
+ }
+ *str = '\0';
+ return str - buf;
+}
+
+int
+linux_sprintf (char *buf, const char *fmt,...)
+{
+ va_list args;
+ int i;
+
+ va_start (args, fmt);
+ i = linux_vsprintf (buf, fmt, args);
+ va_end (args);
+ return i;
+}
diff --git a/linux/dev/net/core/dev.c b/linux/dev/net/core/dev.c
new file mode 100644
index 0000000..cbdf8cc
--- /dev/null
+++ b/linux/dev/net/core/dev.c
@@ -0,0 +1,1648 @@
+/*
+ * NET3 Protocol independent device support routines.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Derived from the non IP parts of dev.c 1.0.19
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Mark Evans, <evansmp@uhura.aston.ac.uk>
+ *
+ * Additional Authors:
+ * Florian la Roche <rzsfl@rz.uni-sb.de>
+ * Alan Cox <gw4pts@gw4pts.ampr.org>
+ * David Hinds <dhinds@allegro.stanford.edu>
+ *
+ * Changes:
+ * Alan Cox : device private ioctl copies fields back.
+ * Alan Cox : Transmit queue code does relevant stunts to
+ * keep the queue safe.
+ * Alan Cox : Fixed double lock.
+ * Alan Cox : Fixed promisc NULL pointer trap
+ * ???????? : Support the full private ioctl range
+ * Alan Cox : Moved ioctl permission check into drivers
+ * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
+ * Alan Cox : 100 backlog just doesn't cut it when
+ * you start doing multicast video 8)
+ * Alan Cox : Rewrote net_bh and list manager.
+ * Alan Cox : Fix ETH_P_ALL echoback lengths.
+ * Alan Cox : Took out transmit every packet pass
+ * Saved a few bytes in the ioctl handler
+ * Alan Cox : Network driver sets packet type before calling netif_rx. Saves
+ * a function call a packet.
+ * Alan Cox : Hashed net_bh()
+ * Richard Kooijman: Timestamp fixes.
+ * Alan Cox : Wrong field in SIOCGIFDSTADDR
+ * Alan Cox : Device lock protection.
+ * Alan Cox : Fixed nasty side effect of device close changes.
+ * Rudi Cilibrasi : Pass the right thing to set_mac_address()
+ * Dave Miller : 32bit quantity for the device lock to make it work out
+ * on a Sparc.
+ * Bjorn Ekwall : Added KERNELD hack.
+ * Alan Cox : Cleaned up the backlog initialise.
+ * Craig Metz : SIOCGIFCONF fix if space for under
+ * 1 device.
+ * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
+ * is no device open function.
+ * Lawrence V. Stefani : Changed set MTU ioctl to not assume
+ * min MTU of 68 bytes for devices
+ * that have change MTU functions.
+ *
+ */
+
+#include <asm/segment.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/notifier.h>
+#include <net/ip.h>
+#include <net/route.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <net/arp.h>
+#include <net/slhc.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <net/br.h>
+#ifdef CONFIG_NET_ALIAS
+#include <linux/net_alias.h>
+#endif
+#ifdef CONFIG_KERNELD
+#include <linux/kerneld.h>
+#endif
+#ifdef CONFIG_NET_RADIO
+#include <linux/wireless.h>
+#endif /* CONFIG_NET_RADIO */
+
+#ifndef MACH
+/*
+ * The list of packet types we will receive (as opposed to discard)
+ * and the routines to invoke.
+ */
+
+struct packet_type *ptype_base[16];
+struct packet_type *ptype_all = NULL; /* Taps */
+
+/*
+ * Device list lock
+ */
+
+int dev_lockct=0;
+
+/*
+ * Our notifier list
+ */
+
+struct notifier_block *netdev_chain=NULL;
+
+/*
+ * Device drivers call our routines to queue packets here. We empty the
+ * queue in the bottom half handler.
+ */
+
+static struct sk_buff_head backlog;
+
+/*
+ * We don't overdo the queue or we will thrash memory badly.
+ */
+
+static int backlog_size = 0;
+
+/*
+ * Return the lesser of the two values.
+ */
+
+static __inline__ unsigned long min(unsigned long a, unsigned long b)
+{
+ return (a < b)? a : b;
+}
+
+
+/******************************************************************************************
+
+ Protocol management and registration routines
+
+*******************************************************************************************/
+
+/*
+ * For efficiency
+ */
+
+static int dev_nit=0;
+
+/*
+ * Add a protocol ID to the list. Now that the input handler is
+ * smarter we can dispense with all the messy stuff that used to be
+ * here.
+ */
+
+void dev_add_pack(struct packet_type *pt)
+{
+ int hash;
+ if(pt->type==htons(ETH_P_ALL))
+ {
+ dev_nit++;
+ pt->next=ptype_all;
+ ptype_all=pt;
+ }
+ else
+ {
+ hash=ntohs(pt->type)&15;
+ pt->next = ptype_base[hash];
+ ptype_base[hash] = pt;
+ }
+}
+
+
+/*
+ * Remove a protocol ID from the list.
+ */
+
+void dev_remove_pack(struct packet_type *pt)
+{
+ struct packet_type **pt1;
+ if(pt->type==htons(ETH_P_ALL))
+ {
+ dev_nit--;
+ pt1=&ptype_all;
+ }
+ else
+ pt1=&ptype_base[ntohs(pt->type)&15];
+ for(; (*pt1)!=NULL; pt1=&((*pt1)->next))
+ {
+ if(pt==(*pt1))
+ {
+ *pt1=pt->next;
+ return;
+ }
+ }
+ printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
+}
+
+/*****************************************************************************************
+
+ Device Interface Subroutines
+
+******************************************************************************************/
+
+/*
+ * Find an interface by name.
+ */
+
+struct device *dev_get(const char *name)
+{
+ struct device *dev;
+
+ for (dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ if (strcmp(dev->name, name) == 0)
+ return(dev);
+ }
+ return NULL;
+}
+
+/*
+ * Find and possibly load an interface.
+ */
+
+#ifdef CONFIG_KERNELD
+
+extern __inline__ void dev_load(const char *name)
+{
+ if(!dev_get(name) && suser()) {
+#ifdef CONFIG_NET_ALIAS
+ const char *sptr;
+
+ for (sptr=name ; *sptr ; sptr++) if(*sptr==':') break;
+ if (!(*sptr && *(sptr+1)))
+#endif
+ request_module(name);
+ }
+}
+
+#endif
+
+/*
+ * Prepare an interface for use.
+ */
+
+int dev_open(struct device *dev)
+{
+ int ret = -ENODEV;
+
+ /*
+ * Call device private open method
+ */
+ if (dev->open)
+ ret = dev->open(dev);
+
+ /*
+ * If it went open OK then set the flags
+ */
+
+ if (ret == 0)
+ {
+ dev->flags |= (IFF_UP | IFF_RUNNING);
+ /*
+ * Initialise multicasting status
+ */
+ dev_mc_upload(dev);
+ notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
+ }
+ return(ret);
+}
+
+
+/*
+ * Completely shutdown an interface.
+ */
+
+int dev_close(struct device *dev)
+{
+ int ct=0;
+
+ /*
+ * Call the device specific close. This cannot fail.
+ * Only if device is UP
+ */
+
+ if ((dev->flags & IFF_UP) && dev->stop)
+ dev->stop(dev);
+
+ /*
+ * Device is now down.
+ */
+
+ dev->flags&=~(IFF_UP|IFF_RUNNING);
+
+ /*
+ * Tell people we are going down
+ */
+ notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
+ /*
+ * Flush the multicast chain
+ */
+ dev_mc_discard(dev);
+
+ /*
+ * Purge any queued packets when we down the link
+ */
+ while(ct<DEV_NUMBUFFS)
+ {
+ struct sk_buff *skb;
+ while((skb=skb_dequeue(&dev->buffs[ct]))!=NULL)
+ if(skb->free)
+ kfree_skb(skb,FREE_WRITE);
+ ct++;
+ }
+ return(0);
+}
+
+
+/*
+ * Device change register/unregister. These are not inline or static
+ * as we export them to the world.
+ */
+
+int register_netdevice_notifier(struct notifier_block *nb)
+{
+ return notifier_chain_register(&netdev_chain, nb);
+}
+
+int unregister_netdevice_notifier(struct notifier_block *nb)
+{
+ return notifier_chain_unregister(&netdev_chain,nb);
+}
+
+/*
+ * Send (or queue for sending) a packet.
+ *
+ * IMPORTANT: When this is called to resend frames. The caller MUST
+ * already have locked the sk_buff. Apart from that we do the
+ * rest of the magic.
+ */
+
+static void do_dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
+{
+ unsigned long flags;
+ struct sk_buff_head *list;
+ int retransmission = 0; /* used to say if the packet should go */
+ /* at the front or the back of the */
+ /* queue - front is a retransmit try */
+
+ if(pri>=0 && !skb_device_locked(skb))
+ skb_device_lock(skb); /* Shove a lock on the frame */
+#if CONFIG_SKB_CHECK
+ IS_SKB(skb);
+#endif
+ skb->dev = dev;
+
+ /*
+ * Negative priority is used to flag a frame that is being pulled from the
+ * queue front as a retransmit attempt. It therefore goes back on the queue
+ * start on a failure.
+ */
+
+ if (pri < 0)
+ {
+ pri = -pri-1;
+ retransmission = 1;
+ }
+
+#ifdef CONFIG_NET_DEBUG
+ if (pri >= DEV_NUMBUFFS)
+ {
+ printk(KERN_WARNING "bad priority in dev_queue_xmit.\n");
+ pri = 1;
+ }
+#endif
+
+ /*
+ * If the address has not been resolved. Call the device header rebuilder.
+ * This can cover all protocols and technically not just ARP either.
+ */
+
+ if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) {
+ return;
+ }
+
+ /*
+ *
+ * If dev is an alias, switch to its main device.
+ * "arp" resolution has been made with alias device, so
+ * arp entries refer to alias, not main.
+ *
+ */
+
+#ifdef CONFIG_NET_ALIAS
+ if (net_alias_is(dev))
+ skb->dev = dev = net_alias_dev_tx(dev);
+#endif
+
+ /*
+ * If we are bridging and this is directly generated output
+ * pass the frame via the bridge.
+ */
+
+#ifdef CONFIG_BRIDGE
+ if(skb->pkt_bridged!=IS_BRIDGED && br_stats.flags & BR_UP)
+ {
+ if(br_tx_frame(skb))
+ return;
+ }
+#endif
+
+ list = dev->buffs + pri;
+
+ save_flags(flags);
+ /* if this isn't a retransmission, use the first packet instead... */
+ if (!retransmission) {
+ if (skb_queue_len(list)) {
+ /* avoid overrunning the device queue.. */
+ if (skb_queue_len(list) > dev->tx_queue_len) {
+ dev_kfree_skb(skb, FREE_WRITE);
+ return;
+ }
+ }
+
+ /* copy outgoing packets to any sniffer packet handlers */
+ if (dev_nit) {
+ struct packet_type *ptype;
+ skb->stamp=xtime;
+ for (ptype = ptype_all; ptype!=NULL; ptype = ptype->next)
+ {
+ /* Never send packets back to the socket
+ * they originated from - MvS (miquels@drinkel.ow.org)
+ */
+ if ((ptype->dev == dev || !ptype->dev) &&
+ ((struct sock *)ptype->data != skb->sk))
+ {
+ struct sk_buff *skb2;
+ if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
+ break;
+ /* FIXME?: Wrong when the hard_header_len
+ * is an upper bound. Is this even
+ * used anywhere?
+ */
+ skb2->h.raw = skb2->data + dev->hard_header_len;
+ /* On soft header devices we
+ * yank the header before mac.raw
+ * back off. This is set by
+ * dev->hard_header().
+ */
+ if (dev->flags&IFF_SOFTHEADERS)
+ skb_pull(skb2,skb2->mac.raw-skb2->data);
+ skb2->mac.raw = skb2->data;
+ ptype->func(skb2, skb->dev, ptype);
+ }
+ }
+ }
+
+ if (skb_queue_len(list)) {
+ cli();
+ skb_device_unlock(skb); /* Buffer is on the device queue and can be freed safely */
+ __skb_queue_tail(list, skb);
+ skb = __skb_dequeue(list);
+ skb_device_lock(skb); /* New buffer needs locking down */
+ restore_flags(flags);
+ }
+ }
+ if (dev->hard_start_xmit(skb, dev) == 0) {
+ /*
+ * Packet is now solely the responsibility of the driver
+ */
+ return;
+ }
+
+ /*
+ * Transmission failed, put skb back into a list. Once on the list it's safe and
+ * no longer device locked (it can be freed safely from the device queue)
+ */
+ cli();
+ skb_device_unlock(skb);
+ __skb_queue_head(list,skb);
+ restore_flags(flags);
+}
+
+void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
+{
+ start_bh_atomic();
+ do_dev_queue_xmit(skb, dev, pri);
+ end_bh_atomic();
+}
+
+/*
+ * Receive a packet from a device driver and queue it for the upper
+ * (protocol) levels. It always succeeds. This is the recommended
+ * interface to use.
+ */
+
+void netif_rx(struct sk_buff *skb)
+{
+ static int dropping = 0;
+
+ /*
+ * Any received buffers are un-owned and should be discarded
+ * when freed. These will be updated later as the frames get
+ * owners.
+ */
+
+ skb->sk = NULL;
+ skb->free = 1;
+ if(skb->stamp.tv_sec==0)
+ skb->stamp = xtime;
+
+ /*
+ * Check that we aren't overdoing things.
+ */
+
+ if (!backlog_size)
+ dropping = 0;
+ else if (backlog_size > 300)
+ dropping = 1;
+
+ if (dropping)
+ {
+ kfree_skb(skb, FREE_READ);
+ return;
+ }
+
+ /*
+ * Add it to the "backlog" queue.
+ */
+#if CONFIG_SKB_CHECK
+ IS_SKB(skb);
+#endif
+ skb_queue_tail(&backlog,skb);
+ backlog_size++;
+
+ /*
+ * If any packet arrived, mark it for processing after the
+ * hardware interrupt returns.
+ */
+
+ mark_bh(NET_BH);
+ return;
+}
+
+/*
+ * This routine causes all interfaces to try to send some data.
+ */
+
+static void dev_transmit(void)
+{
+ struct device *dev;
+
+ for (dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ if (dev->flags != 0 && !dev->tbusy) {
+ /*
+ * Kick the device
+ */
+ dev_tint(dev);
+ }
+ }
+}
+
+
+/**********************************************************************************
+
+ Receive Queue Processor
+
+***********************************************************************************/
+
+/*
+ * When we are called the queue is ready to grab, the interrupts are
+ * on and hardware can interrupt and queue to the receive queue as we
+ * run with no problems.
+ * This is run as a bottom half after an interrupt handler that does
+ * mark_bh(NET_BH);
+ */
+
+void net_bh(void)
+{
+ struct packet_type *ptype;
+ struct packet_type *pt_prev;
+ unsigned short type;
+
+ /*
+ * Can we send anything now? We want to clear the
+ * decks for any more sends that get done as we
+ * process the input. This also minimises the
+ * latency on a transmit interrupt bh.
+ */
+
+ dev_transmit();
+
+ /*
+ * Any data left to process. This may occur because a
+ * mark_bh() is done after we empty the queue including
+ * that from the device which does a mark_bh() just after
+ */
+
+ /*
+ * While the queue is not empty..
+ *
+ * Note that the queue never shrinks due to
+ * an interrupt, so we can do this test without
+ * disabling interrupts.
+ */
+
+ while (!skb_queue_empty(&backlog)) {
+ struct sk_buff * skb = backlog.next;
+
+ /*
+ * We have a packet. Therefore the queue has shrunk
+ */
+ cli();
+ __skb_unlink(skb, &backlog);
+ backlog_size--;
+ sti();
+
+
+#ifdef CONFIG_BRIDGE
+
+ /*
+ * If we are bridging then pass the frame up to the
+ * bridging code. If it is bridged then move on
+ */
+
+ if (br_stats.flags & BR_UP)
+ {
+ /*
+ * We pass the bridge a complete frame. This means
+ * recovering the MAC header first.
+ */
+
+ int offset=skb->data-skb->mac.raw;
+ cli();
+ skb_push(skb,offset); /* Put header back on for bridge */
+ if(br_receive_frame(skb))
+ {
+ sti();
+ continue;
+ }
+ /*
+ * Pull the MAC header off for the copy going to
+ * the upper layers.
+ */
+ skb_pull(skb,offset);
+ sti();
+ }
+#endif
+
+ /*
+ * Bump the pointer to the next structure.
+ *
+ * On entry to the protocol layer. skb->data and
+ * skb->h.raw point to the MAC and encapsulated data
+ */
+
+ skb->h.raw = skb->data;
+
+ /*
+ * Fetch the packet protocol ID.
+ */
+
+ type = skb->protocol;
+
+ /*
+ * We got a packet ID. Now loop over the "known protocols"
+ * list. There are two lists. The ptype_all list of taps (normally empty)
+ * and the main protocol list which is hashed perfectly for normal protocols.
+ */
+
+ pt_prev = NULL;
+ for (ptype = ptype_all; ptype!=NULL; ptype=ptype->next)
+ {
+ if(!ptype->dev || ptype->dev == skb->dev) {
+ if(pt_prev) {
+ struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC);
+ if(skb2)
+ pt_prev->func(skb2,skb->dev, pt_prev);
+ }
+ pt_prev=ptype;
+ }
+ }
+
+ for (ptype = ptype_base[ntohs(type)&15]; ptype != NULL; ptype = ptype->next)
+ {
+ if (ptype->type == type && (!ptype->dev || ptype->dev==skb->dev))
+ {
+ /*
+ * We already have a match queued. Deliver
+ * to it and then remember the new match
+ */
+ if(pt_prev)
+ {
+ struct sk_buff *skb2;
+
+ skb2=skb_clone(skb, GFP_ATOMIC);
+
+ /*
+ * Kick the protocol handler. This should be fast
+ * and efficient code.
+ */
+
+ if(skb2)
+ pt_prev->func(skb2, skb->dev, pt_prev);
+ }
+ /* Remember the current last to do */
+ pt_prev=ptype;
+ }
+ } /* End of protocol list loop */
+
+ /*
+ * Is there a last item to send to ?
+ */
+
+ if(pt_prev)
+ pt_prev->func(skb, skb->dev, pt_prev);
+ /*
+ * Has an unknown packet has been received ?
+ */
+
+ else
+ kfree_skb(skb, FREE_WRITE);
+ /*
+ * Again, see if we can transmit anything now.
+ * [Ought to take this out judging by tests it slows
+ * us down not speeds us up]
+ */
+#ifdef XMIT_EVERY
+ dev_transmit();
+#endif
+ } /* End of queue loop */
+
+ /*
+ * We have emptied the queue
+ */
+
+ /*
+ * One last output flush.
+ */
+
+#ifdef XMIT_AFTER
+ dev_transmit();
+#endif
+}
+
+
+/*
+ * This routine is called when an device driver (i.e. an
+ * interface) is ready to transmit a packet.
+ */
+
+void dev_tint(struct device *dev)
+{
+ int i;
+ unsigned long flags;
+ struct sk_buff_head * head;
+
+ /*
+ * aliases do not transmit (for now :) )
+ */
+
+#ifdef CONFIG_NET_ALIAS
+ if (net_alias_is(dev)) return;
+#endif
+ head = dev->buffs;
+ save_flags(flags);
+ cli();
+
+ /*
+ * Work the queues in priority order
+ */
+ for(i = 0;i < DEV_NUMBUFFS; i++,head++)
+ {
+
+ while (!skb_queue_empty(head)) {
+ struct sk_buff *skb;
+
+ skb = head->next;
+ __skb_unlink(skb, head);
+ /*
+ * Stop anyone freeing the buffer while we retransmit it
+ */
+ skb_device_lock(skb);
+ restore_flags(flags);
+ /*
+ * Feed them to the output stage and if it fails
+ * indicate they re-queue at the front.
+ */
+ do_dev_queue_xmit(skb,dev,-i - 1);
+ /*
+ * If we can take no more then stop here.
+ */
+ if (dev->tbusy)
+ return;
+ cli();
+ }
+ }
+ restore_flags(flags);
+}
+
+
+/*
+ * Perform a SIOCGIFCONF call. This structure will change
+ * size shortly, and there is nothing I can do about it.
+ * Thus we will need a 'compatibility mode'.
+ */
+
+static int dev_ifconf(char *arg)
+{
+ struct ifconf ifc;
+ struct ifreq ifr;
+ struct device *dev;
+ char *pos;
+ int len;
+ int err;
+
+ /*
+ * Fetch the caller's info block.
+ */
+
+ err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifconf));
+ if(err)
+ return err;
+ memcpy_fromfs(&ifc, arg, sizeof(struct ifconf));
+ len = ifc.ifc_len;
+ pos = ifc.ifc_buf;
+
+ /*
+ * We now walk the device list filling each active device
+ * into the array.
+ */
+
+ err=verify_area(VERIFY_WRITE,pos,len);
+ if(err)
+ return err;
+
+ /*
+ * Loop over the interfaces, and write an info block for each.
+ */
+
+ for (dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ if(!(dev->flags & IFF_UP)) /* Downed devices don't count */
+ continue;
+ /*
+ * Have we run out of space here ?
+ */
+
+ if (len < sizeof(struct ifreq))
+ break;
+
+ memset(&ifr, 0, sizeof(struct ifreq));
+ strcpy(ifr.ifr_name, dev->name);
+ (*(struct sockaddr_in *) &ifr.ifr_addr).sin_family = dev->family;
+ (*(struct sockaddr_in *) &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
+
+
+ /*
+ * Write this block to the caller's space.
+ */
+
+ memcpy_tofs(pos, &ifr, sizeof(struct ifreq));
+ pos += sizeof(struct ifreq);
+ len -= sizeof(struct ifreq);
+ }
+
+ /*
+ * All done. Write the updated control block back to the caller.
+ */
+
+ ifc.ifc_len = (pos - ifc.ifc_buf);
+ ifc.ifc_req = (struct ifreq *) ifc.ifc_buf;
+ memcpy_tofs(arg, &ifc, sizeof(struct ifconf));
+
+ /*
+ * Report how much was filled in
+ */
+
+ return(pos - arg);
+}
+
+
+/*
+ * This is invoked by the /proc filesystem handler to display a device
+ * in detail.
+ */
+
+#ifdef CONFIG_PROC_FS
+static int sprintf_stats(char *buffer, struct device *dev)
+{
+ struct enet_statistics *stats = (dev->get_stats ? dev->get_stats(dev): NULL);
+ int size;
+
+ if (stats)
+ size = sprintf(buffer, "%6s:%7d %4d %4d %4d %4d %8d %4d %4d %4d %5d %4d\n",
+ dev->name,
+ stats->rx_packets, stats->rx_errors,
+ stats->rx_dropped + stats->rx_missed_errors,
+ stats->rx_fifo_errors,
+ stats->rx_length_errors + stats->rx_over_errors
+ + stats->rx_crc_errors + stats->rx_frame_errors,
+ stats->tx_packets, stats->tx_errors, stats->tx_dropped,
+ stats->tx_fifo_errors, stats->collisions,
+ stats->tx_carrier_errors + stats->tx_aborted_errors
+ + stats->tx_window_errors + stats->tx_heartbeat_errors);
+ else
+ size = sprintf(buffer, "%6s: No statistics available.\n", dev->name);
+
+ return size;
+}
+
+/*
+ * Called from the PROCfs module. This now uses the new arbitrary sized /proc/net interface
+ * to create /proc/net/dev
+ */
+
+int dev_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
+{
+ int len=0;
+ off_t begin=0;
+ off_t pos=0;
+ int size;
+
+ struct device *dev;
+
+
+ size = sprintf(buffer, "Inter-| Receive | Transmit\n"
+ " face |packets errs drop fifo frame|packets errs drop fifo colls carrier\n");
+
+ pos+=size;
+ len+=size;
+
+
+ for (dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ size = sprintf_stats(buffer+len, dev);
+ len+=size;
+ pos=begin+len;
+
+ if(pos<offset)
+ {
+ len=0;
+ begin=pos;
+ }
+ if(pos>offset+length)
+ break;
+ }
+
+ *start=buffer+(offset-begin); /* Start of wanted data */
+ len-=(offset-begin); /* Start slop */
+ if(len>length)
+ len=length; /* Ending slop */
+ return len;
+}
+#endif /* CONFIG_PROC_FS */
+
+
+#ifdef CONFIG_NET_RADIO
+#ifdef CONFIG_PROC_FS
+
+/*
+ * Print one entry of /proc/net/wireless
+ * This is a clone of /proc/net/dev (just above)
+ */
+static int
+sprintf_wireless_stats(char * buffer,
+ struct device * dev)
+{
+ /* Get stats from the driver */
+ struct iw_statistics *stats = (dev->get_wireless_stats ?
+ dev->get_wireless_stats(dev) :
+ (struct iw_statistics *) NULL);
+ int size;
+
+ if(stats != (struct iw_statistics *) NULL)
+ size = sprintf(buffer,
+ "%6s: %02x %3d%c %3d%c %3d%c %5d %5d %5d\n",
+ dev->name,
+ stats->status,
+ stats->qual.qual,
+ stats->qual.updated & 1 ? '.' : ' ',
+ stats->qual.level,
+ stats->qual.updated & 2 ? '.' : ' ',
+ stats->qual.noise,
+ stats->qual.updated & 3 ? '.' : ' ',
+ stats->discard.nwid,
+ stats->discard.code,
+ stats->discard.misc);
+ else
+ size = 0;
+
+ return size;
+}
+
+/*
+ * Print info for /proc/net/wireless (print all entries)
+ * This is a clone of /proc/net/dev (just above)
+ */
+int
+dev_get_wireless_info(char * buffer,
+ char ** start,
+ off_t offset,
+ int length,
+ int dummy)
+{
+ int len = 0;
+ off_t begin = 0;
+ off_t pos = 0;
+ int size;
+
+ struct device * dev;
+
+ size = sprintf(buffer,
+ "Inter-|sta| Quality | Discarded packets\n"
+ " face |tus|link level noise| nwid crypt misc\n");
+
+ pos+=size;
+ len+=size;
+
+
+ for(dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ size = sprintf_wireless_stats(buffer+len, dev);
+ len+=size;
+ pos=begin+len;
+
+ if(pos < offset)
+ {
+ len=0;
+ begin=pos;
+ }
+ if(pos > offset + length)
+ break;
+ }
+
+ *start = buffer + (offset - begin); /* Start of wanted data */
+ len -= (offset - begin); /* Start slop */
+ if(len > length)
+ len = length; /* Ending slop */
+
+ return len;
+}
+#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_NET_RADIO */
+
+
+/*
+ * This checks bitmasks for the ioctl calls for devices.
+ */
+
+static inline int bad_mask(unsigned long mask, unsigned long addr)
+{
+ if (addr & (mask = ~mask))
+ return 1;
+ mask = ntohl(mask);
+ if (mask & (mask+1))
+ return 1;
+ return 0;
+}
+
+/*
+ * Perform the SIOCxIFxxx calls.
+ *
+ * The socket layer has seen an ioctl the address family thinks is
+ * for the device. At this point we get invoked to make a decision
+ */
+
+static int dev_ifsioc(void *arg, unsigned int getset)
+{
+ struct ifreq ifr;
+ struct device *dev;
+ int ret;
+
+ /*
+ * Fetch the caller's info block into kernel space
+ */
+
+ int err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifreq));
+ if(err)
+ return err;
+
+ memcpy_fromfs(&ifr, arg, sizeof(struct ifreq));
+
+ /*
+ * See which interface the caller is talking about.
+ */
+
+ /*
+ *
+ * net_alias_dev_get(): dev_get() with added alias naming magic.
+ * only allow alias creation/deletion if (getset==SIOCSIFADDR)
+ *
+ */
+
+#ifdef CONFIG_KERNELD
+ dev_load(ifr.ifr_name);
+#endif
+
+#ifdef CONFIG_NET_ALIAS
+ if ((dev = net_alias_dev_get(ifr.ifr_name, getset == SIOCSIFADDR, &err, NULL, NULL)) == NULL)
+ return(err);
+#else
+ if ((dev = dev_get(ifr.ifr_name)) == NULL)
+ return(-ENODEV);
+#endif
+ switch(getset)
+ {
+ case SIOCGIFFLAGS: /* Get interface flags */
+ ifr.ifr_flags = (dev->flags & ~IFF_SOFTHEADERS);
+ goto rarok;
+
+ case SIOCSIFFLAGS: /* Set interface flags */
+ {
+ int old_flags = dev->flags;
+
+ if(securelevel>0)
+ ifr.ifr_flags&=~IFF_PROMISC;
+ /*
+ * We are not allowed to potentially close/unload
+ * a device until we get this lock.
+ */
+
+ dev_lock_wait();
+
+ /*
+ * Set the flags on our device.
+ */
+
+ dev->flags = (ifr.ifr_flags & (
+ IFF_BROADCAST | IFF_DEBUG | IFF_LOOPBACK |
+ IFF_POINTOPOINT | IFF_NOTRAILERS | IFF_RUNNING |
+ IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI | IFF_SLAVE | IFF_MASTER
+ | IFF_MULTICAST)) | (dev->flags & (IFF_SOFTHEADERS|IFF_UP));
+ /*
+ * Load in the correct multicast list now the flags have changed.
+ */
+
+ dev_mc_upload(dev);
+
+ /*
+ * Have we downed the interface. We handle IFF_UP ourselves
+ * according to user attempts to set it, rather than blindly
+ * setting it.
+ */
+
+ if ((old_flags^ifr.ifr_flags)&IFF_UP) /* Bit is different ? */
+ {
+ if(old_flags&IFF_UP) /* Gone down */
+ ret=dev_close(dev);
+ else /* Come up */
+ {
+ ret=dev_open(dev);
+ if(ret<0)
+ dev->flags&=~IFF_UP; /* Open failed */
+ }
+ }
+ else
+ ret=0;
+ /*
+ * Load in the correct multicast list now the flags have changed.
+ */
+
+ dev_mc_upload(dev);
+ }
+ break;
+
+ case SIOCGIFADDR: /* Get interface address (and family) */
+ if(ifr.ifr_addr.sa_family==AF_UNSPEC)
+ {
+ memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
+ ifr.ifr_hwaddr.sa_family=dev->type;
+ goto rarok;
+ }
+ else
+ {
+ (*(struct sockaddr_in *)
+ &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_addr).sin_family = dev->family;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_addr).sin_port = 0;
+ }
+ goto rarok;
+
+ case SIOCSIFADDR: /* Set interface address (and family) */
+
+ /*
+ * BSDism. SIOCSIFADDR family=AF_UNSPEC sets the
+ * physical address. We can cope with this now.
+ */
+
+ if(ifr.ifr_addr.sa_family==AF_UNSPEC)
+ {
+ if(dev->set_mac_address==NULL)
+ return -EOPNOTSUPP;
+ if(securelevel>0)
+ return -EPERM;
+ ret=dev->set_mac_address(dev,&ifr.ifr_addr);
+ }
+ else
+ {
+ u32 new_pa_addr = (*(struct sockaddr_in *)
+ &ifr.ifr_addr).sin_addr.s_addr;
+ u16 new_family = ifr.ifr_addr.sa_family;
+
+ if (new_family == dev->family &&
+ new_pa_addr == dev->pa_addr) {
+ ret =0;
+ break;
+ }
+ if (dev->flags & IFF_UP)
+ notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
+
+ /*
+ * if dev is an alias, must rehash to update
+ * address change
+ */
+
+#ifdef CONFIG_NET_ALIAS
+ if (net_alias_is(dev))
+ net_alias_dev_rehash(dev ,&ifr.ifr_addr);
+#endif
+ dev->pa_addr = new_pa_addr;
+ dev->family = new_family;
+
+#ifdef CONFIG_INET
+ /* This is naughty. When net-032e comes out It wants moving into the net032
+ code not the kernel. Till then it can sit here (SIGH) */
+ if (!dev->pa_mask)
+ dev->pa_mask = ip_get_mask(dev->pa_addr);
+#endif
+ if (!dev->pa_brdaddr)
+ dev->pa_brdaddr = dev->pa_addr | ~dev->pa_mask;
+ if (dev->flags & IFF_UP)
+ notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
+ ret = 0;
+ }
+ break;
+
+ case SIOCGIFBRDADDR: /* Get the broadcast address */
+ (*(struct sockaddr_in *)
+ &ifr.ifr_broadaddr).sin_addr.s_addr = dev->pa_brdaddr;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_broadaddr).sin_family = dev->family;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_broadaddr).sin_port = 0;
+ goto rarok;
+
+ case SIOCSIFBRDADDR: /* Set the broadcast address */
+ dev->pa_brdaddr = (*(struct sockaddr_in *)
+ &ifr.ifr_broadaddr).sin_addr.s_addr;
+ ret = 0;
+ break;
+
+ case SIOCGIFDSTADDR: /* Get the destination address (for point-to-point links) */
+ (*(struct sockaddr_in *)
+ &ifr.ifr_dstaddr).sin_addr.s_addr = dev->pa_dstaddr;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_dstaddr).sin_family = dev->family;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_dstaddr).sin_port = 0;
+ goto rarok;
+
+ case SIOCSIFDSTADDR: /* Set the destination address (for point-to-point links) */
+ dev->pa_dstaddr = (*(struct sockaddr_in *)
+ &ifr.ifr_dstaddr).sin_addr.s_addr;
+ ret = 0;
+ break;
+
+ case SIOCGIFNETMASK: /* Get the netmask for the interface */
+ (*(struct sockaddr_in *)
+ &ifr.ifr_netmask).sin_addr.s_addr = dev->pa_mask;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_netmask).sin_family = dev->family;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_netmask).sin_port = 0;
+ goto rarok;
+
+ case SIOCSIFNETMASK: /* Set the netmask for the interface */
+ {
+ unsigned long mask = (*(struct sockaddr_in *)
+ &ifr.ifr_netmask).sin_addr.s_addr;
+ ret = -EINVAL;
+ /*
+ * The mask we set must be legal.
+ */
+ if (bad_mask(mask,0))
+ break;
+ dev->pa_mask = mask;
+ ret = 0;
+ }
+ break;
+
+ case SIOCGIFMETRIC: /* Get the metric on the interface (currently unused) */
+
+ ifr.ifr_metric = dev->metric;
+ goto rarok;
+
+ case SIOCSIFMETRIC: /* Set the metric on the interface (currently unused) */
+ dev->metric = ifr.ifr_metric;
+ ret=0;
+ break;
+
+ case SIOCGIFMTU: /* Get the MTU of a device */
+ ifr.ifr_mtu = dev->mtu;
+ goto rarok;
+
+ case SIOCSIFMTU: /* Set the MTU of a device */
+
+ if (dev->change_mtu)
+ ret = dev->change_mtu(dev, ifr.ifr_mtu);
+ else
+ {
+ /*
+ * MTU must be positive.
+ */
+
+ if(ifr.ifr_mtu<68)
+ return -EINVAL;
+
+ dev->mtu = ifr.ifr_mtu;
+ ret = 0;
+ }
+ break;
+
+ case SIOCGIFMEM: /* Get the per device memory space. We can add this but currently
+ do not support it */
+ ret = -EINVAL;
+ break;
+
+ case SIOCSIFMEM: /* Set the per device memory buffer space. Not applicable in our case */
+ ret = -EINVAL;
+ break;
+
+ case SIOCGIFHWADDR:
+ memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
+ ifr.ifr_hwaddr.sa_family=dev->type;
+ goto rarok;
+
+ case SIOCSIFHWADDR:
+ if(dev->set_mac_address==NULL)
+ return -EOPNOTSUPP;
+ if(securelevel > 0)
+ return -EPERM;
+ if(ifr.ifr_hwaddr.sa_family!=dev->type)
+ return -EINVAL;
+ ret=dev->set_mac_address(dev,&ifr.ifr_hwaddr);
+ break;
+
+ case SIOCGIFMAP:
+ ifr.ifr_map.mem_start=dev->mem_start;
+ ifr.ifr_map.mem_end=dev->mem_end;
+ ifr.ifr_map.base_addr=dev->base_addr;
+ ifr.ifr_map.irq=dev->irq;
+ ifr.ifr_map.dma=dev->dma;
+ ifr.ifr_map.port=dev->if_port;
+ goto rarok;
+
+ case SIOCSIFMAP:
+ if(dev->set_config==NULL)
+ return -EOPNOTSUPP;
+ return dev->set_config(dev,&ifr.ifr_map);
+
+ case SIOCADDMULTI:
+ if(dev->set_multicast_list==NULL)
+ return -EINVAL;
+ if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
+ return -EINVAL;
+ dev_mc_add(dev,ifr.ifr_hwaddr.sa_data, dev->addr_len, 1);
+ return 0;
+
+ case SIOCDELMULTI:
+ if(dev->set_multicast_list==NULL)
+ return -EINVAL;
+ if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
+ return -EINVAL;
+ dev_mc_delete(dev,ifr.ifr_hwaddr.sa_data,dev->addr_len, 1);
+ return 0;
+ /*
+ * Unknown or private ioctl
+ */
+
+ default:
+ if((getset >= SIOCDEVPRIVATE) &&
+ (getset <= (SIOCDEVPRIVATE + 15))) {
+ if(dev->do_ioctl==NULL)
+ return -EOPNOTSUPP;
+ ret=dev->do_ioctl(dev, &ifr, getset);
+ memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
+ break;
+ }
+
+#ifdef CONFIG_NET_RADIO
+ if((getset >= SIOCIWFIRST) &&
+ (getset <= SIOCIWLAST))
+ {
+ if(dev->do_ioctl==NULL)
+ return -EOPNOTSUPP;
+ /* Perform the ioctl */
+ ret=dev->do_ioctl(dev, &ifr, getset);
+ /* If return args... */
+ if(IW_IS_GET(getset))
+ memcpy_tofs(arg, &ifr,
+ sizeof(struct ifreq));
+ break;
+ }
+#endif /* CONFIG_NET_RADIO */
+
+ ret = -EINVAL;
+ }
+ return(ret);
+/*
+ * The load of calls that return an ifreq and ok (saves memory).
+ */
+rarok:
+ memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
+ return 0;
+}
+
+
+/*
+ * This function handles all "interface"-type I/O control requests. The actual
+ * 'doing' part of this is dev_ifsioc above.
+ */
+
+int dev_ioctl(unsigned int cmd, void *arg)
+{
+ switch(cmd)
+ {
+ case SIOCGIFCONF:
+ (void) dev_ifconf((char *) arg);
+ return 0;
+
+ /*
+ * Ioctl calls that can be done by all.
+ */
+
+ case SIOCGIFFLAGS:
+ case SIOCGIFADDR:
+ case SIOCGIFDSTADDR:
+ case SIOCGIFBRDADDR:
+ case SIOCGIFNETMASK:
+ case SIOCGIFMETRIC:
+ case SIOCGIFMTU:
+ case SIOCGIFMEM:
+ case SIOCGIFHWADDR:
+ case SIOCGIFSLAVE:
+ case SIOCGIFMAP:
+ return dev_ifsioc(arg, cmd);
+
+ /*
+ * Ioctl calls requiring the power of a superuser
+ */
+
+ case SIOCSIFFLAGS:
+ case SIOCSIFADDR:
+ case SIOCSIFDSTADDR:
+ case SIOCSIFBRDADDR:
+ case SIOCSIFNETMASK:
+ case SIOCSIFMETRIC:
+ case SIOCSIFMTU:
+ case SIOCSIFMEM:
+ case SIOCSIFHWADDR:
+ case SIOCSIFMAP:
+ case SIOCSIFSLAVE:
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ if (!suser())
+ return -EPERM;
+ return dev_ifsioc(arg, cmd);
+
+ case SIOCSIFLINK:
+ return -EINVAL;
+
+ /*
+ * Unknown or private ioctl.
+ */
+
+ default:
+ if((cmd >= SIOCDEVPRIVATE) &&
+ (cmd <= (SIOCDEVPRIVATE + 15))) {
+ return dev_ifsioc(arg, cmd);
+ }
+#ifdef CONFIG_NET_RADIO
+ if((cmd >= SIOCIWFIRST) &&
+ (cmd <= SIOCIWLAST))
+ {
+ if((IW_IS_SET(cmd)) && (!suser()))
+ return -EPERM;
+ return dev_ifsioc(arg, cmd);
+ }
+#endif /* CONFIG_NET_RADIO */
+ return -EINVAL;
+ }
+}
+#endif /* !MACH */
+
+
+/*
+ * Initialize the DEV module. At boot time this walks the device list and
+ * unhooks any devices that fail to initialise (normally hardware not
+ * present) and leaves us with a valid list of present and active devices.
+ *
+ */
+extern int lance_init(void);
+extern int pi_init(void);
+extern void sdla_setup(void);
+extern int dlci_setup(void);
+
+int net_dev_init(void)
+{
+ struct device *dev, **dp;
+
+ /*
+ * Initialise the packet receive queue.
+ */
+
+#ifndef MACH
+ skb_queue_head_init(&backlog);
+#endif
+
+ /*
+ * The bridge has to be up before the devices
+ */
+
+#ifdef CONFIG_BRIDGE
+ br_init();
+#endif
+
+ /*
+ * This is Very Ugly(tm).
+ *
+ * Some devices want to be initialized early..
+ */
+#if defined(CONFIG_PI)
+ pi_init();
+#endif
+#if defined(CONFIG_PT)
+ pt_init();
+#endif
+#if defined(CONFIG_DLCI)
+ dlci_setup();
+#endif
+#if defined(CONFIG_SDLA)
+ sdla_setup();
+#endif
+ /*
+ * SLHC if present needs attaching so other people see it
+ * even if not opened.
+ */
+#if (defined(CONFIG_SLIP) && defined(CONFIG_SLIP_COMPRESSED)) \
+ || defined(CONFIG_PPP) \
+ || (defined(CONFIG_ISDN) && defined(CONFIG_ISDN_PPP))
+ slhc_install();
+#endif
+
+ /*
+ * Add the devices.
+ * If the call to dev->init fails, the dev is removed
+ * from the chain disconnecting the device until the
+ * next reboot.
+ */
+
+ dp = &dev_base;
+ while ((dev = *dp) != NULL)
+ {
+ int i;
+ for (i = 0; i < DEV_NUMBUFFS; i++) {
+ skb_queue_head_init(dev->buffs + i);
+ }
+
+ if (dev->init && dev->init(dev))
+ {
+ /*
+ * It failed to come up. Unhook it.
+ */
+ *dp = dev->next;
+ }
+ else
+ {
+ dp = &dev->next;
+ }
+ }
+
+#ifdef CONFIG_PROC_FS
+ proc_net_register(&(struct proc_dir_entry) {
+ PROC_NET_DEV, 3, "dev",
+ S_IFREG | S_IRUGO, 1, 0, 0,
+ 0, &proc_net_inode_operations,
+ dev_get_info
+ });
+#endif
+
+#ifdef CONFIG_NET_RADIO
+#ifdef CONFIG_PROC_FS
+ proc_net_register(&(struct proc_dir_entry) {
+ PROC_NET_WIRELESS, 8, "wireless",
+ S_IFREG | S_IRUGO, 1, 0, 0,
+ 0, &proc_net_inode_operations,
+ dev_get_wireless_info
+ });
+#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_NET_RADIO */
+
+ /*
+ * Initialise net_alias engine
+ *
+ * - register net_alias device notifier
+ * - register proc entries: /proc/net/alias_types
+ * /proc/net/aliases
+ */
+
+#ifdef CONFIG_NET_ALIAS
+ net_alias_init();
+#endif
+
+ init_bh(NET_BH, net_bh);
+ return 0;
+}
+
+/*
+ * Change the flags of device DEV to FLAGS.
+ */
+int dev_change_flags (struct device *dev, short flags)
+{
+ if (securelevel > 0)
+ flags &= ~IFF_PROMISC;
+
+ /*
+ * Set the flags on our device.
+ */
+
+ dev->flags = (flags &
+ (IFF_BROADCAST | IFF_DEBUG | IFF_LOOPBACK |
+ IFF_POINTOPOINT | IFF_NOTRAILERS | IFF_RUNNING |
+ IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI | IFF_SLAVE
+ | IFF_MASTER | IFF_MULTICAST))
+ | (dev->flags & (IFF_SOFTHEADERS|IFF_UP));
+
+ /* The flags are taken into account (multicast, promiscuous, ...)
+ in the set_multicast_list handler. */
+ if ((dev->flags & IFF_UP) && dev->set_multicast_list != NULL)
+ dev->set_multicast_list (dev);
+
+ return 0;
+}
+
diff --git a/linux/pcmcia-cs/clients/3c574_cs.c b/linux/pcmcia-cs/clients/3c574_cs.c
new file mode 100644
index 0000000..9dc045a
--- /dev/null
+++ b/linux/pcmcia-cs/clients/3c574_cs.c
@@ -0,0 +1,1349 @@
+/* 3c574.c: A PCMCIA ethernet driver for the 3com 3c574 "RoadRunner".
+
+ Written 1993-1998 by
+ Donald Becker, becker@scyld.com, (driver core) and
+ David Hinds, dahinds@users.sourceforge.net (from his PC card code).
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License, incorporated herein by reference.
+
+ This driver derives from Donald Becker's 3c509 core, which has the
+ following copyright:
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+*/
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the 3Com 3c574 PC card Fast Ethernet
+Adapter.
+
+II. Board-specific settings
+
+None -- PC cards are autoconfigured.
+
+III. Driver operation
+
+The 3c574 uses a Boomerang-style interface, without the bus-master capability.
+See the Boomerang driver and documentation for most details.
+
+IV. Notes and chip documentation.
+
+Two added registers are used to enhance PIO performance, RunnerRdCtrl and
+RunnerWrCtrl. These are 11 bit down-counters that are preloaded with the
+count of word (16 bits) reads or writes the driver is about to do to the Rx
+or Tx FIFO. The chip is then able to hide the internal-PCI-bus to PC-card
+translation latency by buffering the I/O operations with an 8 word FIFO.
+Note: No other chip accesses are permitted when this buffer is used.
+
+A second enhancement is that both attribute and common memory space
+0x0800-0x0fff can translated to the PIO FIFO. Thus memory operations (faster
+with *some* PCcard bridges) may be used instead of I/O operations.
+This is enabled by setting the 0x10 bit in the PCMCIA LAN COR.
+
+Some slow PC card bridges work better if they never see a WAIT signal.
+This is configured by setting the 0x20 bit in the PCMCIA LAN COR.
+Only do this after testing that it is reliable and improves performance.
+
+The upper five bits of RunnerRdCtrl are used to window into PCcard
+configuration space registers. Window 0 is the regular Boomerang/Odie
+register set, 1-5 are various PC card control registers, and 16-31 are
+the (reversed!) CIS table.
+
+A final note: writing the InternalConfig register in window 3 with an
+invalid ramWidth is Very Bad.
+
+V. References
+
+http://www.scyld.com/expert/NWay.html
+http://www.national.com/pf/DP/DP83840.html
+
+Thanks to Terry Murphy of 3Com for providing development information for
+earlier 3Com products.
+
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ciscode.h>
+#include <pcmcia/ds.h>
+#include <pcmcia/mem_op.h>
+
+/*====================================================================*/
+
+/* Module parameters */
+
+MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
+MODULE_DESCRIPTION("3Com 3c574 series PCMCIA ethernet driver");
+MODULE_LICENSE("GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; MODULE_PARM(n, "i")
+
+/* Now-standard PC card module parameters. */
+INT_MODULE_PARM(irq_mask, 0xdeb8);
+static int irq_list[4] = { -1 };
+MODULE_PARM(irq_list, "1-4i");
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+INT_MODULE_PARM(max_interrupt_work, 32);
+
+/* Force full duplex modes? */
+INT_MODULE_PARM(full_duplex, 0);
+
+/* Autodetect link polarity reversal? */
+INT_MODULE_PARM(auto_polarity, 1);
+
+#ifdef PCMCIA_DEBUG
+INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
+static char *version =
+"3c574_cs.c 1.70 2003/08/25 15:57:40 Donald Becker/David Hinds, becker@scyld.com.\n";
+#else
+#define DEBUG(n, args...)
+#endif
+
+/*====================================================================*/
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT ((800*HZ)/1000)
+
+/* To minimize the size of the driver source and make the driver more
+ readable not all constants are symbolically defined.
+ You'll need the manual if you want to understand driver details anyway. */
+/* Offsets from base I/O address. */
+#define EL3_DATA 0x00
+#define EL3_CMD 0x0e
+#define EL3_STATUS 0x0e
+
+#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
+
+/* The top five bits written to EL3_CMD are a command, the lower
+ 11 bits are the parameter, if applicable. */
+enum el3_cmds {
+ TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
+ RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, RxDiscard = 8<<11,
+ TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
+ FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
+ SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
+ SetTxThreshold = 18<<11, SetTxStart = 19<<11, StatsEnable = 21<<11,
+ StatsDisable = 22<<11, StopCoax = 23<<11,
+};
+
+enum elxl_status {
+ IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004,
+ TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
+ IntReq = 0x0040, StatsFull = 0x0080, CmdBusy = 0x1000 };
+
+/* The SetRxFilter command accepts the following classes: */
+enum RxFilter {
+ RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8
+};
+
+enum Window0 {
+ Wn0EepromCmd = 10, Wn0EepromData = 12, /* EEPROM command/address, data. */
+ IntrStatus=0x0E, /* Valid in all windows. */
+};
+/* These assumes the larger EEPROM. */
+enum Win0_EEPROM_cmds {
+ EEPROM_Read = 0x200, EEPROM_WRITE = 0x100, EEPROM_ERASE = 0x300,
+ EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */
+ EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */
+};
+
+/* Register window 1 offsets, the window used in normal operation.
+ On the "Odie" this window is always mapped at offsets 0x10-0x1f.
+ Except for TxFree, which is overlapped by RunnerWrCtrl. */
+enum Window1 {
+ TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14,
+ RxStatus = 0x18, Timer=0x1A, TxStatus = 0x1B,
+ TxFree = 0x0C, /* Remaining free bytes in Tx buffer. */
+ RunnerRdCtrl = 0x16, RunnerWrCtrl = 0x1c,
+};
+
+enum Window3 { /* Window 3: MAC/config bits. */
+ Wn3_Config=0, Wn3_MAC_Ctrl=6, Wn3_Options=8,
+};
+union wn3_config {
+ int i;
+ struct w3_config_fields {
+ unsigned int ram_size:3, ram_width:1, ram_speed:2, rom_size:2;
+ int pad8:8;
+ unsigned int ram_split:2, pad18:2, xcvr:3, pad21:1, autoselect:1;
+ int pad24:7;
+ } u;
+};
+
+enum Window4 { /* Window 4: Xcvr/media bits. */
+ Wn4_FIFODiag = 4, Wn4_NetDiag = 6, Wn4_PhysicalMgmt=8, Wn4_Media = 10,
+};
+
+
+#define MEDIA_TP 0x00C0 /* Enable link beat and jabber for 10baseT. */
+
+struct el3_private {
+ dev_link_t link;
+ struct net_device dev;
+ dev_node_t node;
+ struct net_device_stats stats;
+ u16 advertising, partner; /* NWay media advertisement */
+ unsigned char phys; /* MII device address */
+ unsigned int
+ autoselect:1, default_media:3; /* Read from the EEPROM/Wn3_Config. */
+ /* for transceiver monitoring */
+ struct timer_list media;
+ u_short media_status;
+ u_short fast_poll;
+ u_long last_irq;
+};
+
+/* Set iff a MII transceiver on any interface requires mdio preamble.
+ This only set with the original DP83840 on older 3c905 boards, so the extra
+ code size of a per-interface flag is not worthwhile. */
+static char mii_preamble_required = 0;
+
+/* Index of functions. */
+
+static void tc574_config(dev_link_t *link);
+static void tc574_release(u_long arg);
+static int tc574_event(event_t event, int priority,
+ event_callback_args_t *args);
+
+static void mdio_sync(ioaddr_t ioaddr, int bits);
+static int mdio_read(ioaddr_t ioaddr, int phy_id, int location);
+static void mdio_write(ioaddr_t ioaddr, int phy_id, int location, int value);
+static u_short read_eeprom(ioaddr_t ioaddr, int index);
+static void tc574_wait_for_completion(struct net_device *dev, int cmd);
+
+static void tc574_reset(struct net_device *dev);
+static void media_check(u_long arg);
+static int el3_open(struct net_device *dev);
+static int el3_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void el3_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void update_stats(struct net_device *dev);
+static struct net_device_stats *el3_get_stats(struct net_device *dev);
+static int el3_rx(struct net_device *dev, int worklimit);
+static int el3_close(struct net_device *dev);
+static void el3_tx_timeout(struct net_device *dev);
+static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void set_rx_mode(struct net_device *dev);
+
+static dev_info_t dev_info = "3c574_cs";
+
+static dev_link_t *tc574_attach(void);
+static void tc574_detach(dev_link_t *);
+
+static dev_link_t *dev_list = NULL;
+
+static void flush_stale_links(void)
+{
+ dev_link_t *link, *next;
+ for (link = dev_list; link; link = next) {
+ next = link->next;
+ if (link->state & DEV_STALE_LINK)
+ tc574_detach(link);
+ }
+}
+
+static void cs_error(client_handle_t handle, int func, int ret)
+{
+#if CS_RELEASE_CODE < 0x2911
+ CardServices(ReportError, dev_info, (void *)func, (void *)ret);
+#else
+ error_info_t err = { func, ret };
+ CardServices(ReportError, handle, &err);
+#endif
+}
+
+/*
+ tc574_attach() creates an "instance" of the driver, allocating
+ local data structures for one device. The device is registered
+ with Card Services.
+*/
+
+static dev_link_t *tc574_attach(void)
+{
+ struct el3_private *lp;
+ client_reg_t client_reg;
+ dev_link_t *link;
+ struct net_device *dev;
+ int i, ret;
+
+ DEBUG(0, "3c574_attach()\n");
+ flush_stale_links();
+
+ /* Create the PC card device object. */
+ lp = kmalloc(sizeof(*lp), GFP_KERNEL);
+ if (!lp) return NULL;
+ memset(lp, 0, sizeof(*lp));
+ link = &lp->link; dev = &lp->dev;
+ link->priv = dev->priv = link->irq.Instance = lp;
+
+ init_timer(&link->release);
+ link->release.function = &tc574_release;
+ link->release.data = (u_long)link;
+ link->io.NumPorts1 = 32;
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+ link->irq.IRQInfo1 = IRQ_INFO2_VALID|IRQ_LEVEL_ID;
+ if (irq_list[0] == -1)
+ link->irq.IRQInfo2 = irq_mask;
+ else
+ for (i = 0; i < 4; i++)
+ link->irq.IRQInfo2 |= 1 << irq_list[i];
+ link->irq.Handler = &el3_interrupt;
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.Vcc = 50;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+ link->conf.ConfigIndex = 1;
+ link->conf.Present = PRESENT_OPTION;
+
+ /* The EL3-specific entries in the device structure. */
+ dev->hard_start_xmit = &el3_start_xmit;
+ dev->get_stats = &el3_get_stats;
+ dev->do_ioctl = &el3_ioctl;
+ dev->set_multicast_list = &set_rx_mode;
+ ether_setup(dev);
+ init_dev_name(dev, lp->node);
+ dev->open = &el3_open;
+ dev->stop = &el3_close;
+#ifdef HAVE_TX_TIMEOUT
+ dev->tx_timeout = el3_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+#endif
+
+ /* Register with Card Services */
+ link->next = dev_list;
+ dev_list = link;
+ client_reg.dev_info = &dev_info;
+ client_reg.Attributes = INFO_IO_CLIENT | INFO_CARD_SHARE;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &tc574_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ ret = CardServices(RegisterClient, &link->handle, &client_reg);
+ if (ret != 0) {
+ cs_error(link->handle, RegisterClient, ret);
+ tc574_detach(link);
+ return NULL;
+ }
+
+ return link;
+} /* tc574_attach */
+
+/*
+
+ This deletes a driver "instance". The device is de-registered
+ with Card Services. If it has been released, all local data
+ structures are freed. Otherwise, the structures will be freed
+ when the device is released.
+
+*/
+
+static void tc574_detach(dev_link_t *link)
+{
+ struct el3_private *lp = link->priv;
+ dev_link_t **linkp;
+
+ DEBUG(0, "3c574_detach(0x%p)\n", link);
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link) break;
+ if (*linkp == NULL)
+ return;
+
+ del_timer(&link->release);
+ if (link->state & DEV_CONFIG) {
+ tc574_release((u_long)link);
+ if (link->state & DEV_STALE_CONFIG) {
+ link->state |= DEV_STALE_LINK;
+ return;
+ }
+ }
+
+ if (link->handle)
+ CardServices(DeregisterClient, link->handle);
+
+ /* Unlink device structure, free bits */
+ *linkp = link->next;
+ if (link->dev)
+ unregister_netdev(&lp->dev);
+ kfree(lp);
+
+} /* tc574_detach */
+
+/*
+ tc574_config() is scheduled to run after a CARD_INSERTION event
+ is received, to configure the PCMCIA socket, and to make the
+ ethernet device available to the system.
+*/
+
+#define CS_CHECK(fn, args...) \
+while ((last_ret=CardServices(last_fn=(fn), args))!=0) goto cs_failed
+
+static void tc574_config(dev_link_t *link)
+{
+ client_handle_t handle = link->handle;
+ struct el3_private *lp = link->priv;
+ struct net_device *dev = &lp->dev;
+ tuple_t tuple;
+ cisparse_t parse;
+ u_short buf[32];
+ int last_fn, last_ret, i, j;
+ ioaddr_t ioaddr;
+ u16 *phys_addr;
+ char *cardname;
+
+ phys_addr = (u16 *)dev->dev_addr;
+
+ DEBUG(0, "3c574_config(0x%p)\n", link);
+
+ tuple.Attributes = 0;
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ CS_CHECK(GetFirstTuple, handle, &tuple);
+ tuple.TupleData = (cisdata_t *)buf;
+ tuple.TupleDataMax = 64;
+ tuple.TupleOffset = 0;
+ CS_CHECK(GetTupleData, handle, &tuple);
+ CS_CHECK(ParseTuple, handle, &tuple, &parse);
+ link->conf.ConfigBase = parse.config.base;
+ link->conf.Present = parse.config.rmask[0];
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ link->io.IOAddrLines = 16;
+ for (i = j = 0; j < 0x400; j += 0x20) {
+ link->io.BasePort1 = j ^ 0x300;
+ i = CardServices(RequestIO, link->handle, &link->io);
+ if (i == CS_SUCCESS) break;
+ }
+ if (i != CS_SUCCESS) {
+ cs_error(link->handle, RequestIO, i);
+ goto failed;
+ }
+ CS_CHECK(RequestIRQ, link->handle, &link->irq);
+ CS_CHECK(RequestConfiguration, link->handle, &link->conf);
+
+ dev->irq = link->irq.AssignedIRQ;
+ dev->base_addr = link->io.BasePort1;
+
+ if (register_netdev(dev) != 0) {
+ printk(KERN_NOTICE "3c574_cs: register_netdev() failed\n");
+ goto failed;
+ }
+
+ ioaddr = dev->base_addr;
+ copy_dev_name(lp->node, dev);
+ link->dev = &lp->node;
+
+ /* The 3c574 normally uses an EEPROM for configuration info, including
+ the hardware address. The future products may include a modem chip
+ and put the address in the CIS. */
+ tuple.DesiredTuple = 0x88;
+ if (CardServices(GetFirstTuple, handle, &tuple) == CS_SUCCESS) {
+ CardServices(GetTupleData, handle, &tuple);
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(buf[i]);
+ } else {
+ EL3WINDOW(0);
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(read_eeprom(ioaddr, i + 10));
+ if (phys_addr[0] == 0x6060) {
+ printk(KERN_NOTICE "3c574_cs: IO port conflict at 0x%03lx"
+ "-0x%03lx\n", dev->base_addr, dev->base_addr+15);
+ goto failed;
+ }
+ }
+ tuple.DesiredTuple = CISTPL_VERS_1;
+ if (CardServices(GetFirstTuple, handle, &tuple) == CS_SUCCESS &&
+ CardServices(GetTupleData, handle, &tuple) == CS_SUCCESS &&
+ CardServices(ParseTuple, handle, &tuple, &parse) == CS_SUCCESS) {
+ cardname = parse.version_1.str + parse.version_1.ofs[1];
+ } else
+ cardname = "3Com 3c574";
+
+ printk(KERN_INFO "%s: %s at io %#3lx, irq %d, hw_addr ",
+ dev->name, cardname, dev->base_addr, dev->irq);
+
+ for (i = 0; i < 6; i++)
+ printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : ".\n"));
+
+ {
+ u_char mcr, *ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
+ union wn3_config config;
+ outw(2<<11, ioaddr + RunnerRdCtrl);
+ mcr = inb(ioaddr + 2);
+ outw(0<<11, ioaddr + RunnerRdCtrl);
+ printk(KERN_INFO " ASIC rev %d,", mcr>>3);
+ EL3WINDOW(3);
+ config.i = inl(ioaddr + Wn3_Config);
+ printk(" %dK FIFO split %s Rx:Tx, %sMII interface.\n",
+ 8 << config.u.ram_size, ram_split[config.u.ram_split],
+ config.u.autoselect ? "autoselect " : "");
+ lp->default_media = config.u.xcvr;
+ lp->autoselect = config.u.autoselect;
+ }
+
+ {
+ int phy;
+
+ /* Roadrunner only: Turn on the MII transceiver */
+ outw(0x8040, ioaddr + Wn3_Options);
+ mdelay(1);
+ outw(0xc040, ioaddr + Wn3_Options);
+ tc574_wait_for_completion(dev, TxReset);
+ tc574_wait_for_completion(dev, RxReset);
+ mdelay(1);
+ outw(0x8040, ioaddr + Wn3_Options);
+
+ EL3WINDOW(4);
+ for (phy = 1; phy <= 32; phy++) {
+ int mii_status;
+ mdio_sync(ioaddr, 32);
+ mii_status = mdio_read(ioaddr, phy & 0x1f, 1);
+ if (mii_status != 0xffff) {
+ lp->phys = phy & 0x1f;
+ DEBUG(0, " MII transceiver at index %d, status %x.\n",
+ phy, mii_status);
+ if ((mii_status & 0x0040) == 0)
+ mii_preamble_required = 1;
+ break;
+ }
+ }
+ if (phy > 32) {
+ printk(KERN_NOTICE " No MII transceivers found!\n");
+ goto failed;
+ }
+ i = mdio_read(ioaddr, lp->phys, 16) | 0x40;
+ mdio_write(ioaddr, lp->phys, 16, i);
+ lp->advertising = mdio_read(ioaddr, lp->phys, 4);
+ if (full_duplex) {
+ /* Only advertise the FD media types. */
+ lp->advertising &= ~0x02a0;
+ mdio_write(ioaddr, lp->phys, 4, lp->advertising);
+ }
+ }
+
+ link->state &= ~DEV_CONFIG_PENDING;
+ return;
+
+cs_failed:
+ cs_error(link->handle, last_fn, last_ret);
+failed:
+ tc574_release((u_long)link);
+ link->state &= ~DEV_CONFIG_PENDING;
+ return;
+
+} /* tc574_config */
+
+/*
+ After a card is removed, tc574_release() will unregister the net
+ device, and release the PCMCIA configuration. If the device is
+ still open, this will be postponed until it is closed.
+*/
+
+static void tc574_release(u_long arg)
+{
+ dev_link_t *link = (dev_link_t *)arg;
+
+ DEBUG(0, "3c574_release(0x%p)\n", link);
+
+ if (link->open) {
+ DEBUG(1, "3c574_cs: release postponed, '%s' still open\n",
+ link->dev->dev_name);
+ link->state |= DEV_STALE_CONFIG;
+ return;
+ }
+
+ CardServices(ReleaseConfiguration, link->handle);
+ CardServices(ReleaseIO, link->handle, &link->io);
+ CardServices(ReleaseIRQ, link->handle, &link->irq);
+
+ link->state &= ~DEV_CONFIG;
+
+} /* tc574_release */
+
+/*
+ The card status event handler. Mostly, this schedules other
+ stuff to run after an event is received. A CARD_REMOVAL event
+ also sets some flags to discourage the net drivers from trying
+ to talk to the card any more.
+*/
+
+static int tc574_event(event_t event, int priority,
+ event_callback_args_t *args)
+{
+ dev_link_t *link = args->client_data;
+ struct el3_private *lp = link->priv;
+ struct net_device *dev = &lp->dev;
+
+ DEBUG(1, "3c574_event(0x%06x)\n", event);
+
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG) {
+ netif_device_detach(dev);
+ mod_timer(&link->release, jiffies + HZ/20);
+ }
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ tc574_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if (link->state & DEV_CONFIG) {
+ if (link->open)
+ netif_device_detach(dev);
+ CardServices(ReleaseConfiguration, link->handle);
+ }
+ break;
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ CardServices(RequestConfiguration, link->handle, &link->conf);
+ if (link->open) {
+ tc574_reset(dev);
+ netif_device_attach(dev);
+ }
+ }
+ break;
+ }
+ return 0;
+} /* tc574_event */
+
+static void dump_status(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+ EL3WINDOW(1);
+ printk(KERN_INFO " irq status %04x, rx status %04x, tx status "
+ "%02x, tx free %04x\n", inw(ioaddr+EL3_STATUS),
+ inw(ioaddr+RxStatus), inb(ioaddr+TxStatus),
+ inw(ioaddr+TxFree));
+ EL3WINDOW(4);
+ printk(KERN_INFO " diagnostics: fifo %04x net %04x ethernet %04x"
+ " media %04x\n", inw(ioaddr+0x04), inw(ioaddr+0x06),
+ inw(ioaddr+0x08), inw(ioaddr+0x0a));
+ EL3WINDOW(1);
+}
+
+/*
+ Use this for commands that may take time to finish
+*/
+static void tc574_wait_for_completion(struct net_device *dev, int cmd)
+{
+ int i = 1500;
+ outw(cmd, dev->base_addr + EL3_CMD);
+ while (--i > 0)
+ if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break;
+ if (i == 0)
+ printk(KERN_NOTICE "%s: command 0x%04x did not complete!\n",
+ dev->name, cmd);
+}
+
+/* Read a word from the EEPROM using the regular EEPROM access register.
+ Assume that we are in register window zero.
+ */
+static u_short read_eeprom(ioaddr_t ioaddr, int index)
+{
+ int timer;
+ outw(EEPROM_Read + index, ioaddr + Wn0EepromCmd);
+ /* Pause for at least 162 usec for the read to take place. */
+ for (timer = 1620; timer >= 0; timer--) {
+ if ((inw(ioaddr + Wn0EepromCmd) & 0x8000) == 0)
+ break;
+ }
+ return inw(ioaddr + Wn0EepromData);
+}
+
+/* MII transceiver control section.
+ Read and write the MII registers using software-generated serial
+ MDIO protocol. See the MII specifications or DP83840A data sheet
+ for details.
+ The maxium data clock rate is 2.5 Mhz. The timing is easily met by the
+ slow PC card interface. */
+
+#define MDIO_SHIFT_CLK 0x01
+#define MDIO_DIR_WRITE 0x04
+#define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE)
+#define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE)
+#define MDIO_DATA_READ 0x02
+#define MDIO_ENB_IN 0x00
+
+/* Generate the preamble required for initial synchronization and
+ a few older transceivers. */
+static void mdio_sync(ioaddr_t ioaddr, int bits)
+{
+ int mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+
+ /* Establish sync by sending at least 32 logic ones. */
+ while (-- bits >= 0) {
+ outw(MDIO_DATA_WRITE1, mdio_addr);
+ outw(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
+ }
+}
+
+static int mdio_read(ioaddr_t ioaddr, int phy_id, int location)
+{
+ int i;
+ int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ unsigned int retval = 0;
+ int mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+
+ if (mii_preamble_required)
+ mdio_sync(ioaddr, 32);
+
+ /* Shift the read command bits out. */
+ for (i = 14; i >= 0; i--) {
+ int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outw(dataval, mdio_addr);
+ outw(dataval | MDIO_SHIFT_CLK, mdio_addr);
+ }
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 19; i > 0; i--) {
+ outw(MDIO_ENB_IN, mdio_addr);
+ retval = (retval << 1) | ((inw(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
+ outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+ }
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(ioaddr_t ioaddr, int phy_id, int location, int value)
+{
+ int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value;
+ int mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+ int i;
+
+ if (mii_preamble_required)
+ mdio_sync(ioaddr, 32);
+
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outw(dataval, mdio_addr);
+ outw(dataval | MDIO_SHIFT_CLK, mdio_addr);
+ }
+ /* Leave the interface idle. */
+ for (i = 1; i >= 0; i--) {
+ outw(MDIO_ENB_IN, mdio_addr);
+ outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+ }
+
+ return;
+}
+
+/* Reset and restore all of the 3c574 registers. */
+static void tc574_reset(struct net_device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ int i, ioaddr = dev->base_addr;
+
+ tc574_wait_for_completion(dev, TotalReset|0x10);
+
+ /* Clear any transactions in progress. */
+ outw(0, ioaddr + RunnerWrCtrl);
+ outw(0, ioaddr + RunnerRdCtrl);
+
+ /* Set the station address and mask. */
+ EL3WINDOW(2);
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + i);
+ for (; i < 12; i+=2)
+ outw(0, ioaddr + i);
+
+ /* Reset config options */
+ EL3WINDOW(3);
+ outb((dev->mtu > 1500 ? 0x40 : 0), ioaddr + Wn3_MAC_Ctrl);
+ outl((lp->autoselect ? 0x01000000 : 0) | 0x0062001b,
+ ioaddr + Wn3_Config);
+
+ /* Roadrunner only: Turn on the MII transceiver. */
+ outw(0x8040, ioaddr + Wn3_Options);
+ mdelay(1);
+ outw(0xc040, ioaddr + Wn3_Options);
+ tc574_wait_for_completion(dev, TxReset);
+ tc574_wait_for_completion(dev, RxReset);
+ mdelay(1);
+ outw(0x8040, ioaddr + Wn3_Options);
+
+ /* Switch to the stats window, and clear all stats by reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ EL3WINDOW(6);
+ for (i = 0; i < 10; i++)
+ inb(ioaddr + i);
+ inw(ioaddr + 10);
+ inw(ioaddr + 12);
+ EL3WINDOW(4);
+ inb(ioaddr + 12);
+ inb(ioaddr + 13);
+
+ /* .. enable any extra statistics bits.. */
+ outw(0x0040, ioaddr + Wn4_NetDiag);
+ /* .. re-sync MII and re-fill what NWay is advertising. */
+ mdio_sync(ioaddr, 32);
+ mdio_write(ioaddr, lp->phys, 4, lp->advertising);
+ if (!auto_polarity) {
+ /* works for TDK 78Q2120 series MII's */
+ int i = mdio_read(ioaddr, lp->phys, 16) | 0x20;
+ mdio_write(ioaddr, lp->phys, 16, i);
+ }
+
+ /* Switch to register set 1 for normal use, just for TxFree. */
+ EL3WINDOW(1);
+
+ set_rx_mode(dev);
+ outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+ outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
+ outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
+ /* Allow status bits to be seen. */
+ outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
+ /* Ack all pending events, and set active indicator mask. */
+ outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+ ioaddr + EL3_CMD);
+ outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull
+ | AdapterFailure | RxEarly, ioaddr + EL3_CMD);
+}
+
+static int el3_open(struct net_device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ dev_link_t *link = &lp->link;
+
+ if (!DEV_OK(link))
+ return -ENODEV;
+
+ link->open++;
+ MOD_INC_USE_COUNT;
+ netif_start_queue(dev);
+ netif_mark_up(dev);
+
+ tc574_reset(dev);
+ lp->media.function = &media_check;
+ lp->media.data = (u_long)lp;
+ lp->media.expires = jiffies + HZ;
+ add_timer(&lp->media);
+
+ DEBUG(2, "%s: opened, status %4.4x.\n",
+ dev->name, inw(dev->base_addr + EL3_STATUS));
+
+ return 0;
+}
+
+static void el3_tx_timeout(struct net_device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+
+ printk(KERN_NOTICE "%s: Transmit timed out!\n", dev->name);
+ dump_status(dev);
+ lp->stats.tx_errors++;
+ dev->trans_start = jiffies;
+ /* Issue TX_RESET and TX_START commands. */
+ tc574_wait_for_completion(dev, TxReset);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ netif_wake_queue(dev);
+}
+
+static void pop_tx_status(struct net_device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ int i;
+
+ /* Clear the Tx status stack. */
+ for (i = 32; i > 0; i--) {
+ u_char tx_status = inb(ioaddr + TxStatus);
+ if (!(tx_status & 0x84)) break;
+ /* reset transmitter on jabber error or underrun */
+ if (tx_status & 0x30)
+ tc574_wait_for_completion(dev, TxReset);
+ if (tx_status & 0x38) {
+ DEBUG(1, "%s: transmit error: status 0x%02x\n",
+ dev->name, tx_status);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ lp->stats.tx_aborted_errors++;
+ }
+ outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */
+ }
+}
+
+static int el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+
+ tx_timeout_check(dev, el3_tx_timeout);
+ skb_tx_check(dev, skb);
+
+ DEBUG(3, "%s: el3_start_xmit(length = %ld) called, "
+ "status %4.4x.\n", dev->name, (long)skb->len,
+ inw(ioaddr + EL3_STATUS));
+
+ outw(skb->len, ioaddr + TX_FIFO);
+ outw(0, ioaddr + TX_FIFO);
+ outsl(ioaddr + TX_FIFO, skb->data, (skb->len+3)>>2);
+
+ dev->trans_start = jiffies;
+
+ /* TxFree appears only in Window 1, not offset 0x1c. */
+ if (inw(ioaddr + TxFree) > 1536) {
+ netif_start_queue(dev);
+ } else
+ /* Interrupt us when the FIFO has room for max-sized packet.
+ The threshold is in units of dwords. */
+ outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
+
+ DEV_KFREE_SKB (skb);
+ pop_tx_status(dev);
+
+ return 0;
+}
+
+/* The EL3 interrupt handler. */
+static void el3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct el3_private *lp = dev_id;
+ struct net_device *dev = &lp->dev;
+ ioaddr_t ioaddr, status;
+ int work_budget = max_interrupt_work;
+
+ if (!netif_device_present(dev))
+ return;
+ ioaddr = dev->base_addr;
+
+ DEBUG(3, "%s: interrupt, status %4.4x.\n",
+ dev->name, inw(ioaddr + EL3_STATUS));
+
+ while ((status = inw(ioaddr + EL3_STATUS)) &
+ (IntLatch | RxComplete | RxEarly | StatsFull)) {
+ if (!netif_device_present(dev) ||
+ ((status & 0xe000) != 0x2000)) {
+ DEBUG(1, "%s: Interrupt from dead card\n", dev->name);
+ break;
+ }
+
+ if (status & RxComplete)
+ work_budget = el3_rx(dev, work_budget);
+
+ if (status & TxAvailable) {
+ DEBUG(3, " TX room bit was handled.\n");
+ /* There's room in the FIFO for a full-sized packet. */
+ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+ netif_wake_queue(dev);
+ }
+
+ if (status & TxComplete)
+ pop_tx_status(dev);
+
+ if (status & (AdapterFailure | RxEarly | StatsFull)) {
+ /* Handle all uncommon interrupts. */
+ if (status & StatsFull)
+ update_stats(dev);
+ if (status & RxEarly) {
+ work_budget = el3_rx(dev, work_budget);
+ outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
+ }
+ if (status & AdapterFailure) {
+ u16 fifo_diag;
+ EL3WINDOW(4);
+ fifo_diag = inw(ioaddr + Wn4_FIFODiag);
+ EL3WINDOW(1);
+ printk(KERN_NOTICE "%s: adapter failure, FIFO diagnostic"
+ " register %04x.\n", dev->name, fifo_diag);
+ if (fifo_diag & 0x0400) {
+ /* Tx overrun */
+ tc574_wait_for_completion(dev, TxReset);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ }
+ if (fifo_diag & 0x2000) {
+ /* Rx underrun */
+ tc574_wait_for_completion(dev, RxReset);
+ set_rx_mode(dev);
+ outw(RxEnable, ioaddr + EL3_CMD);
+ }
+ outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
+ }
+ }
+
+ if (--work_budget < 0) {
+ DEBUG(0, "%s: Too much work in interrupt, "
+ "status %4.4x.\n", dev->name, status);
+ /* Clear all interrupts */
+ outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
+ break;
+ }
+ /* Acknowledge the IRQ. */
+ outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
+ }
+
+ DEBUG(3, "%s: exiting interrupt, status %4.4x.\n",
+ dev->name, inw(ioaddr + EL3_STATUS));
+ return;
+}
+
+/*
+ This timer serves two purposes: to check for missed interrupts
+ (and as a last resort, poll the NIC for events), and to monitor
+ the MII, reporting changes in cable status.
+*/
+static void media_check(u_long arg)
+{
+ struct el3_private *lp = (struct el3_private *)arg;
+ struct net_device *dev = &lp->dev;
+ ioaddr_t ioaddr = dev->base_addr;
+ u_long flags;
+ u_short /* cable, */ media, partner;
+
+ if (!netif_device_present(dev))
+ goto reschedule;
+
+ /* Check for pending interrupt with expired latency timer: with
+ this, we can limp along even if the interrupt is blocked */
+ if ((inw(ioaddr + EL3_STATUS) & IntLatch) &&
+ (inb(ioaddr + Timer) == 0xff)) {
+ if (!lp->fast_poll)
+ printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
+ el3_interrupt(dev->irq, lp, NULL);
+ lp->fast_poll = HZ;
+ }
+ if (lp->fast_poll) {
+ lp->fast_poll--;
+ lp->media.expires = jiffies + 2;
+ add_timer(&lp->media);
+ return;
+ }
+
+ save_flags(flags);
+ cli();
+#if 0
+ outw(2<<11, ioaddr + RunnerRdCtrl);
+ cable = inb(ioaddr);
+ outb(0x20, ioaddr);
+ outw(0, ioaddr + RunnerRdCtrl);
+#endif
+ EL3WINDOW(4);
+ media = mdio_read(ioaddr, lp->phys, 1);
+ partner = mdio_read(ioaddr, lp->phys, 5);
+ EL3WINDOW(1);
+ restore_flags(flags);
+
+#if 0
+ if (cable & 0x20)
+ printk(KERN_INFO "%s: cable %s\n", dev->name,
+ ((cable & 0x08) ? "fixed" : "problem"));
+#endif
+ if (media != lp->media_status) {
+ if ((media ^ lp->media_status) & 0x0004)
+ printk(KERN_INFO "%s: %s link beat\n", dev->name,
+ (lp->media_status & 0x0004) ? "lost" : "found");
+ if ((media ^ lp->media_status) & 0x0020) {
+ lp->partner = 0;
+ if (lp->media_status & 0x0020) {
+ printk(KERN_INFO "%s: autonegotiation restarted\n",
+ dev->name);
+ } else if (partner) {
+ partner &= lp->advertising;
+ lp->partner = partner;
+ printk(KERN_INFO "%s: autonegotiation complete: "
+ "%sbaseT-%cD selected\n", dev->name,
+ ((partner & 0x0180) ? "100" : "10"),
+ ((partner & 0x0140) ? 'F' : 'H'));
+ } else {
+ printk(KERN_INFO "%s: link partner did not autonegotiate\n",
+ dev->name);
+ }
+
+ EL3WINDOW(3);
+ outb((partner & 0x0140 ? 0x20 : 0) |
+ (dev->mtu > 1500 ? 0x40 : 0), ioaddr + Wn3_MAC_Ctrl);
+ EL3WINDOW(1);
+
+ }
+ if (media & 0x0010)
+ printk(KERN_INFO "%s: remote fault detected\n",
+ dev->name);
+ if (media & 0x0002)
+ printk(KERN_INFO "%s: jabber detected\n", dev->name);
+ lp->media_status = media;
+ }
+
+reschedule:
+ lp->media.expires = jiffies + HZ;
+ add_timer(&lp->media);
+}
+
+static struct net_device_stats *el3_get_stats(struct net_device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+
+ if (netif_device_present(dev))
+ update_stats(dev);
+ return &lp->stats;
+}
+
+/* Update statistics.
+ Suprisingly this need not be run single-threaded, but it effectively is.
+ The counters clear when read, so the adds must merely be atomic.
+ */
+static void update_stats(struct net_device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ u8 rx, tx, up;
+
+ DEBUG(2, "%s: updating the statistics.\n", dev->name);
+
+ if (inw(ioaddr+EL3_STATUS) == 0xffff) /* No card. */
+ return;
+
+ /* Unlike the 3c509 we need not turn off stats updates while reading. */
+ /* Switch to the stats window, and read everything. */
+ EL3WINDOW(6);
+ lp->stats.tx_carrier_errors += inb(ioaddr + 0);
+ lp->stats.tx_heartbeat_errors += inb(ioaddr + 1);
+ /* Multiple collisions. */ inb(ioaddr + 2);
+ lp->stats.collisions += inb(ioaddr + 3);
+ lp->stats.tx_window_errors += inb(ioaddr + 4);
+ lp->stats.rx_fifo_errors += inb(ioaddr + 5);
+ lp->stats.tx_packets += inb(ioaddr + 6);
+ up = inb(ioaddr + 9);
+ lp->stats.tx_packets += (up&0x30) << 4;
+ /* Rx packets */ inb(ioaddr + 7);
+ /* Tx deferrals */ inb(ioaddr + 8);
+ rx = inw(ioaddr + 10);
+ tx = inw(ioaddr + 12);
+
+ EL3WINDOW(4);
+ /* BadSSD */ inb(ioaddr + 12);
+ up = inb(ioaddr + 13);
+
+ add_tx_bytes(&lp->stats, tx + ((up & 0xf0) << 12));
+
+ EL3WINDOW(1);
+}
+
+static int el3_rx(struct net_device *dev, int worklimit)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ short rx_status;
+
+ DEBUG(3, "%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n",
+ dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus));
+ while (!((rx_status = inw(ioaddr + RxStatus)) & 0x8000) &&
+ (--worklimit >= 0)) {
+ if (rx_status & 0x4000) { /* Error, update stats. */
+ short error = rx_status & 0x3800;
+ lp->stats.rx_errors++;
+ switch (error) {
+ case 0x0000: lp->stats.rx_over_errors++; break;
+ case 0x0800: lp->stats.rx_length_errors++; break;
+ case 0x1000: lp->stats.rx_frame_errors++; break;
+ case 0x1800: lp->stats.rx_length_errors++; break;
+ case 0x2000: lp->stats.rx_frame_errors++; break;
+ case 0x2800: lp->stats.rx_crc_errors++; break;
+ }
+ } else {
+ short pkt_len = rx_status & 0x7ff;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len+5);
+
+ DEBUG(3, " Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
+ if (skb != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2);
+ insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
+ ((pkt_len+3)>>2));
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ add_rx_bytes(&lp->stats, pkt_len);
+ } else {
+ DEBUG(1, "%s: couldn't allocate a sk_buff of"
+ " size %d.\n", dev->name, pkt_len);
+ lp->stats.rx_dropped++;
+ }
+ }
+ tc574_wait_for_completion(dev, RxDiscard);
+ }
+
+ return worklimit;
+}
+
+/* Provide ioctl() calls to examine the MII xcvr state. */
+static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ u16 *data = (u16 *)&rq->ifr_data;
+ int phy = lp->phys & 0x1f;
+
+ DEBUG(2, "%s: In ioct(%-.6s, %#4.4x) %4.4x %4.4x %4.4x %4.4x.\n",
+ dev->name, rq->ifr_ifrn.ifrn_name, cmd,
+ data[0], data[1], data[2], data[3]);
+
+ switch(cmd) {
+ case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
+ data[0] = phy;
+ case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
+ {
+ int saved_window;
+ long flags;
+
+ save_flags(flags);
+ cli();
+ saved_window = inw(ioaddr + EL3_CMD) >> 13;
+ EL3WINDOW(4);
+ data[3] = mdio_read(ioaddr, data[0] & 0x1f, data[1] & 0x1f);
+ EL3WINDOW(saved_window);
+ restore_flags(flags);
+ return 0;
+ }
+ case SIOCDEVPRIVATE+2: /* Write the specified MII register */
+ {
+ int saved_window;
+ long flags;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ save_flags(flags);
+ cli();
+ saved_window = inw(ioaddr + EL3_CMD) >> 13;
+ EL3WINDOW(4);
+ mdio_write(ioaddr, data[0] & 0x1f, data[1] & 0x1f, data[2]);
+ EL3WINDOW(saved_window);
+ restore_flags(flags);
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* The Odie chip has a 64 bin multicast filter, but the bit layout is not
+ documented. Until it is we revert to receiving all multicast frames when
+ any multicast reception is desired.
+ Note: My other drivers emit a log message whenever promiscuous mode is
+ entered to help detect password sniffers. This is less desirable on
+ typical PC card machines, so we omit the message.
+ */
+
+static void set_rx_mode(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+
+ if (dev->flags & IFF_PROMISC)
+ outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm,
+ ioaddr + EL3_CMD);
+ else if (dev->mc_count || (dev->flags & IFF_ALLMULTI))
+ outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
+ else
+ outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
+}
+
+static int el3_close(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+ struct el3_private *lp = dev->priv;
+ dev_link_t *link = &lp->link;
+
+ DEBUG(2, "%s: shutting down ethercard.\n", dev->name);
+
+ if (DEV_OK(link)) {
+ /* Turn off statistics ASAP. We update lp->stats below. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+
+ /* Disable the receiver and transmitter. */
+ outw(RxDisable, ioaddr + EL3_CMD);
+ outw(TxDisable, ioaddr + EL3_CMD);
+
+ /* Note: Switching to window 0 may disable the IRQ. */
+ EL3WINDOW(0);
+
+ update_stats(dev);
+ }
+
+ link->open--;
+ netif_stop_queue(dev);
+ netif_mark_down(dev);
+ del_timer(&lp->media);
+ if (link->state & DEV_STALE_CONFIG)
+ mod_timer(&link->release, jiffies + HZ/20);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static int __init init_3c574_cs(void)
+{
+ servinfo_t serv;
+
+ DEBUG(0, "%s\n", version);
+ CardServices(GetCardServicesInfo, &serv);
+ if (serv.Revision != CS_RELEASE_CODE) {
+ printk(KERN_NOTICE "3c574_cs: Card Services release "
+ "does not match!\n");
+ return -EINVAL;
+ }
+ register_pccard_driver(&dev_info, &tc574_attach, &tc574_detach);
+ return 0;
+}
+
+static void __exit exit_3c574_cs(void)
+{
+ DEBUG(0, "3c574_cs: unloading\n");
+ unregister_pccard_driver(&dev_info);
+ while (dev_list != NULL)
+ tc574_detach(dev_list);
+}
+
+module_init(init_3c574_cs);
+module_exit(exit_3c574_cs);
+
+/*
+ * Local variables:
+ * compile-command: "make 3c574_cs.o"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/pcmcia-cs/clients/3c589_cs.c b/linux/pcmcia-cs/clients/3c589_cs.c
new file mode 100644
index 0000000..9794b82
--- /dev/null
+++ b/linux/pcmcia-cs/clients/3c589_cs.c
@@ -0,0 +1,1107 @@
+/*======================================================================
+
+ A PCMCIA ethernet driver for the 3com 3c589 card.
+
+ Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
+
+ 3c589_cs.c 1.167 2003/08/25 15:57:40
+
+ The network driver code is based on Donald Becker's 3c589 code:
+
+ Written 1994 by Donald Becker.
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used and
+ distributed according to the terms of the GNU General Public License,
+ incorporated herein by reference.
+ Donald Becker may be reached at becker@scyld.com
+
+======================================================================*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ciscode.h>
+#include <pcmcia/ds.h>
+
+/* To minimize the size of the driver source I only define operating
+ constants if they are used several times. You'll need the manual
+ if you want to understand driver details. */
+/* Offsets from base I/O address. */
+#define EL3_DATA 0x00
+#define EL3_TIMER 0x0a
+#define EL3_CMD 0x0e
+#define EL3_STATUS 0x0e
+
+#define EEPROM_READ 0x0080
+#define EEPROM_BUSY 0x8000
+
+#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
+
+/* The top five bits written to EL3_CMD are a command, the lower
+ 11 bits are the parameter, if applicable. */
+enum c509cmd {
+ TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
+ RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, RxDiscard = 8<<11,
+ TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
+ FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
+ SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
+ SetTxThreshold = 18<<11, SetTxStart = 19<<11, StatsEnable = 21<<11,
+ StatsDisable = 22<<11, StopCoax = 23<<11,
+};
+
+enum c509status {
+ IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004,
+ TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
+ IntReq = 0x0040, StatsFull = 0x0080, CmdBusy = 0x1000
+};
+
+/* The SetRxFilter command accepts the following classes: */
+enum RxFilter {
+ RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8
+};
+
+/* Register window 1 offsets, the window used in normal operation. */
+#define TX_FIFO 0x00
+#define RX_FIFO 0x00
+#define RX_STATUS 0x08
+#define TX_STATUS 0x0B
+#define TX_FREE 0x0C /* Remaining free bytes in Tx buffer. */
+
+#define WN0_IRQ 0x08 /* Window 0: Set IRQ line in bits 12-15. */
+#define WN4_MEDIA 0x0A /* Window 4: Various transcvr/media bits. */
+#define MEDIA_TP 0x00C0 /* Enable link beat and jabber for 10baseT. */
+#define MEDIA_LED 0x0001 /* Enable link light on 3C589E cards. */
+
+/* Time in jiffies before concluding Tx hung */
+#define TX_TIMEOUT ((400*HZ)/1000)
+
+struct el3_private {
+ dev_link_t link;
+ struct net_device dev;
+ dev_node_t node;
+ struct net_device_stats stats;
+ /* For transceiver monitoring */
+ struct timer_list media;
+ u_short media_status;
+ u_short fast_poll;
+ u_long last_irq;
+};
+
+static char *if_names[] = { "auto", "10baseT", "10base2", "AUI" };
+
+/*====================================================================*/
+
+/* Module parameters */
+
+MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
+MODULE_DESCRIPTION("3Com 3c589 series PCMCIA ethernet driver");
+MODULE_LICENSE("GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; MODULE_PARM(n, "i")
+
+/* Special hook for setting if_port when module is loaded */
+INT_MODULE_PARM(if_port, 0);
+
+/* Bit map of interrupts to choose from */
+INT_MODULE_PARM(irq_mask, 0xdeb8);
+static int irq_list[4] = { -1 };
+MODULE_PARM(irq_list, "1-4i");
+
+#ifdef PCMCIA_DEBUG
+INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
+static char *version =
+"3c589_cs.c 1.167 2003/08/25 15:57:40 (David Hinds)";
+#else
+#define DEBUG(n, args...)
+#endif
+
+/*====================================================================*/
+
+static void tc589_config(dev_link_t *link);
+static void tc589_release(u_long arg);
+static int tc589_event(event_t event, int priority,
+ event_callback_args_t *args);
+
+static u_short read_eeprom(ioaddr_t ioaddr, int index);
+static void tc589_reset(struct net_device *dev);
+static void media_check(u_long arg);
+static int el3_config(struct net_device *dev, struct ifmap *map);
+static int el3_open(struct net_device *dev);
+static int el3_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void el3_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void update_stats(struct net_device *dev);
+static struct net_device_stats *el3_get_stats(struct net_device *dev);
+static int el3_rx(struct net_device *dev);
+static int el3_close(struct net_device *dev);
+static void el3_tx_timeout(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+
+static dev_info_t dev_info = "3c589_cs";
+
+static dev_link_t *tc589_attach(void);
+static void tc589_detach(dev_link_t *);
+
+static dev_link_t *dev_list = NULL;
+
+/*======================================================================
+
+ This bit of code is used to avoid unregistering network devices
+ at inappropriate times. 2.2 and later kernels are fairly picky
+ about when this can happen.
+
+======================================================================*/
+
+static void flush_stale_links(void)
+{
+ dev_link_t *link, *next;
+ for (link = dev_list; link; link = next) {
+ next = link->next;
+ if (link->state & DEV_STALE_LINK)
+ tc589_detach(link);
+ }
+}
+
+/*====================================================================*/
+
+static void cs_error(client_handle_t handle, int func, int ret)
+{
+ error_info_t err = { func, ret };
+ CardServices(ReportError, handle, &err);
+}
+
+/*======================================================================
+
+ tc589_attach() creates an "instance" of the driver, allocating
+ local data structures for one device. The device is registered
+ with Card Services.
+
+======================================================================*/
+
+static dev_link_t *tc589_attach(void)
+{
+ struct el3_private *lp;
+ client_reg_t client_reg;
+ dev_link_t *link;
+ struct net_device *dev;
+ int i, ret;
+
+ DEBUG(0, "3c589_attach()\n");
+ flush_stale_links();
+
+ /* Create new ethernet device */
+ lp = kmalloc(sizeof(*lp), GFP_KERNEL);
+ if (!lp) return NULL;
+ memset(lp, 0, sizeof(*lp));
+ link = &lp->link; dev = &lp->dev;
+ link->priv = dev->priv = link->irq.Instance = lp;
+
+ init_timer(&link->release);
+ link->release.function = &tc589_release;
+ link->release.data = (u_long)link;
+ link->io.NumPorts1 = 16;
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+ link->irq.IRQInfo1 = IRQ_INFO2_VALID|IRQ_LEVEL_ID;
+ if (irq_list[0] == -1)
+ link->irq.IRQInfo2 = irq_mask;
+ else
+ for (i = 0; i < 4; i++)
+ link->irq.IRQInfo2 |= 1 << irq_list[i];
+ link->irq.Handler = &el3_interrupt;
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.Vcc = 50;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+ link->conf.ConfigIndex = 1;
+ link->conf.Present = PRESENT_OPTION;
+
+ /* The EL3-specific entries in the device structure. */
+ dev->hard_start_xmit = &el3_start_xmit;
+ dev->set_config = &el3_config;
+ dev->get_stats = &el3_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+ ether_setup(dev);
+ init_dev_name(dev, lp->node);
+ dev->open = &el3_open;
+ dev->stop = &el3_close;
+#ifdef HAVE_TX_TIMEOUT
+ dev->tx_timeout = el3_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+#endif
+
+ /* Register with Card Services */
+ link->next = dev_list;
+ dev_list = link;
+ client_reg.dev_info = &dev_info;
+ client_reg.Attributes = INFO_IO_CLIENT | INFO_CARD_SHARE;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &tc589_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ ret = CardServices(RegisterClient, &link->handle, &client_reg);
+ if (ret != 0) {
+ cs_error(link->handle, RegisterClient, ret);
+ tc589_detach(link);
+ return NULL;
+ }
+
+ return link;
+} /* tc589_attach */
+
+/*======================================================================
+
+ This deletes a driver "instance". The device is de-registered
+ with Card Services. If it has been released, all local data
+ structures are freed. Otherwise, the structures will be freed
+ when the device is released.
+
+======================================================================*/
+
+static void tc589_detach(dev_link_t *link)
+{
+ struct el3_private *lp = link->priv;
+ dev_link_t **linkp;
+
+ DEBUG(0, "3c589_detach(0x%p)\n", link);
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link) break;
+ if (*linkp == NULL)
+ return;
+
+ del_timer(&link->release);
+ if (link->state & DEV_CONFIG) {
+ tc589_release((u_long)link);
+ if (link->state & DEV_STALE_CONFIG) {
+ link->state |= DEV_STALE_LINK;
+ return;
+ }
+ }
+
+ if (link->handle)
+ CardServices(DeregisterClient, link->handle);
+
+ /* Unlink device structure, free bits */
+ *linkp = link->next;
+ if (link->dev)
+ unregister_netdev(&lp->dev);
+ kfree(lp);
+
+} /* tc589_detach */
+
+/*======================================================================
+
+ tc589_config() is scheduled to run after a CARD_INSERTION event
+ is received, to configure the PCMCIA socket, and to make the
+ ethernet device available to the system.
+
+======================================================================*/
+
+#define CS_CHECK(fn, args...) \
+while ((last_ret=CardServices(last_fn=(fn), args))!=0) goto cs_failed
+
+static void tc589_config(dev_link_t *link)
+{
+ client_handle_t handle = link->handle;
+ struct el3_private *lp = link->priv;
+ struct net_device *dev = &lp->dev;
+ tuple_t tuple;
+ cisparse_t parse;
+ u_short buf[32], *phys_addr;
+ int last_fn, last_ret, i, j, multi = 0;
+ ioaddr_t ioaddr;
+ char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
+
+ DEBUG(0, "3c589_config(0x%p)\n", link);
+
+ phys_addr = (u_short *)dev->dev_addr;
+ tuple.Attributes = 0;
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ CS_CHECK(GetFirstTuple, handle, &tuple);
+ tuple.TupleData = (cisdata_t *)buf;
+ tuple.TupleDataMax = sizeof(buf);
+ tuple.TupleOffset = 0;
+ CS_CHECK(GetTupleData, handle, &tuple);
+ CS_CHECK(ParseTuple, handle, &tuple, &parse);
+ link->conf.ConfigBase = parse.config.base;
+ link->conf.Present = parse.config.rmask[0];
+
+ /* Is this a 3c562? */
+ tuple.DesiredTuple = CISTPL_MANFID;
+ tuple.Attributes = TUPLE_RETURN_COMMON;
+ if ((CardServices(GetFirstTuple, handle, &tuple) == CS_SUCCESS) &&
+ (CardServices(GetTupleData, handle, &tuple) == CS_SUCCESS)) {
+ if (le16_to_cpu(buf[0]) != MANFID_3COM)
+ printk(KERN_INFO "3c589_cs: hmmm, is this really a "
+ "3Com card??\n");
+ multi = (le16_to_cpu(buf[1]) == PRODID_3COM_3C562);
+ }
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ /* For the 3c562, the base address must be xx00-xx7f */
+ link->io.IOAddrLines = 16;
+ for (i = j = 0; j < 0x400; j += 0x10) {
+ if (multi && (j & 0x80)) continue;
+ link->io.BasePort1 = j ^ 0x300;
+ i = CardServices(RequestIO, link->handle, &link->io);
+ if (i == CS_SUCCESS) break;
+ }
+ if (i != CS_SUCCESS) {
+ cs_error(link->handle, RequestIO, i);
+ goto failed;
+ }
+ CS_CHECK(RequestIRQ, link->handle, &link->irq);
+ CS_CHECK(RequestConfiguration, link->handle, &link->conf);
+
+ dev->irq = link->irq.AssignedIRQ;
+ dev->base_addr = link->io.BasePort1;
+ if (register_netdev(dev) != 0) {
+ printk(KERN_NOTICE "3c589_cs: register_netdev() failed\n");
+ goto failed;
+ }
+
+ ioaddr = dev->base_addr;
+ EL3WINDOW(0);
+
+ /* The 3c589 has an extra EEPROM for configuration info, including
+ the hardware address. The 3c562 puts the address in the CIS. */
+ tuple.DesiredTuple = 0x88;
+ if (CardServices(GetFirstTuple, handle, &tuple) == CS_SUCCESS) {
+ CardServices(GetTupleData, handle, &tuple);
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(buf[i]);
+ } else {
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(read_eeprom(ioaddr, i));
+ if (phys_addr[0] == 0x6060) {
+ printk(KERN_NOTICE "3c589_cs: IO port conflict at 0x%03lx"
+ "-0x%03lx\n", dev->base_addr, dev->base_addr+15);
+ goto failed;
+ }
+ }
+
+ copy_dev_name(lp->node, dev);
+ link->dev = &lp->node;
+
+ /* The address and resource configuration register aren't loaded from
+ the EEPROM and *must* be set to 0 and IRQ3 for the PCMCIA version. */
+ outw(0x3f00, ioaddr + 8);
+
+ /* The if_port symbol can be set when the module is loaded */
+ if ((if_port >= 0) && (if_port <= 3))
+ dev->if_port = if_port;
+ else
+ printk(KERN_NOTICE "3c589_cs: invalid if_port requested\n");
+
+ printk(KERN_INFO "%s: 3Com 3c%s, io %#3lx, irq %d, hw_addr ",
+ dev->name, (multi ? "562" : "589"), dev->base_addr,
+ dev->irq);
+ for (i = 0; i < 6; i++)
+ printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n"));
+ i = inl(ioaddr);
+ printk(KERN_INFO " %dK FIFO split %s Rx:Tx, %s xcvr\n",
+ (i & 7) ? 32 : 8, ram_split[(i >> 16) & 3],
+ if_names[dev->if_port]);
+ link->state &= ~DEV_CONFIG_PENDING;
+ return;
+
+cs_failed:
+ cs_error(link->handle, last_fn, last_ret);
+failed:
+ tc589_release((u_long)link);
+ link->state &= ~DEV_CONFIG_PENDING;
+ return;
+
+} /* tc589_config */
+
+/*======================================================================
+
+ After a card is removed, tc589_release() will unregister the net
+ device, and release the PCMCIA configuration. If the device is
+ still open, this will be postponed until it is closed.
+
+======================================================================*/
+
+static void tc589_release(u_long arg)
+{
+ dev_link_t *link = (dev_link_t *)arg;
+
+ DEBUG(0, "3c589_release(0x%p)\n", link);
+
+ if (link->open) {
+ DEBUG(1, "3c589_cs: release postponed, '%s' still open\n",
+ link->dev->dev_name);
+ link->state |= DEV_STALE_CONFIG;
+ return;
+ }
+
+ CardServices(ReleaseConfiguration, link->handle);
+ CardServices(ReleaseIO, link->handle, &link->io);
+ CardServices(ReleaseIRQ, link->handle, &link->irq);
+
+ link->state &= ~DEV_CONFIG;
+
+} /* tc589_release */
+
+/*======================================================================
+
+ The card status event handler. Mostly, this schedules other
+ stuff to run after an event is received. A CARD_REMOVAL event
+ also sets some flags to discourage the net drivers from trying
+ to talk to the card any more.
+
+======================================================================*/
+
+static int tc589_event(event_t event, int priority,
+ event_callback_args_t *args)
+{
+ dev_link_t *link = args->client_data;
+ struct el3_private *lp = link->priv;
+ struct net_device *dev = &lp->dev;
+
+ DEBUG(1, "3c589_event(0x%06x)\n", event);
+
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG) {
+ netif_device_detach(dev);
+ mod_timer(&link->release, jiffies + HZ/20);
+ }
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ tc589_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if (link->state & DEV_CONFIG) {
+ if (link->open)
+ netif_device_detach(dev);
+ CardServices(ReleaseConfiguration, link->handle);
+ }
+ break;
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ CardServices(RequestConfiguration, link->handle, &link->conf);
+ if (link->open) {
+ tc589_reset(dev);
+ netif_device_attach(dev);
+ }
+ }
+ break;
+ }
+ return 0;
+} /* tc589_event */
+
+/*====================================================================*/
+
+/*
+ Use this for commands that may take time to finish
+*/
+static void tc589_wait_for_completion(struct net_device *dev, int cmd)
+{
+ int i = 100;
+ outw(cmd, dev->base_addr + EL3_CMD);
+ while (--i > 0)
+ if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break;
+ if (i == 0)
+ printk(KERN_NOTICE "%s: command 0x%04x did not complete!\n",
+ dev->name, cmd);
+}
+
+/*
+ Read a word from the EEPROM using the regular EEPROM access register.
+ Assume that we are in register window zero.
+*/
+static u_short read_eeprom(ioaddr_t ioaddr, int index)
+{
+ int i;
+ outw(EEPROM_READ + index, ioaddr + 10);
+ /* Reading the eeprom takes 162 us */
+ for (i = 1620; i >= 0; i--)
+ if ((inw(ioaddr + 10) & EEPROM_BUSY) == 0)
+ break;
+ return inw(ioaddr + 12);
+}
+
+/*
+ Set transceiver type, perhaps to something other than what the user
+ specified in dev->if_port.
+*/
+static void tc589_set_xcvr(struct net_device *dev, int if_port)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+
+ EL3WINDOW(0);
+ switch (if_port) {
+ case 0: case 1: outw(0, ioaddr + 6); break;
+ case 2: outw(3<<14, ioaddr + 6); break;
+ case 3: outw(1<<14, ioaddr + 6); break;
+ }
+ /* On PCMCIA, this just turns on the LED */
+ outw((if_port == 2) ? StartCoax : StopCoax, ioaddr + EL3_CMD);
+ /* 10baseT interface, enable link beat and jabber check. */
+ EL3WINDOW(4);
+ outw(MEDIA_LED | ((if_port < 2) ? MEDIA_TP : 0), ioaddr + WN4_MEDIA);
+ EL3WINDOW(1);
+ if (if_port == 2)
+ lp->media_status = ((dev->if_port == 0) ? 0x8000 : 0x4000);
+ else
+ lp->media_status = ((dev->if_port == 0) ? 0x4010 : 0x8800);
+}
+
+static void dump_status(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+ EL3WINDOW(1);
+ printk(KERN_INFO " irq status %04x, rx status %04x, tx status "
+ "%02x tx free %04x\n", inw(ioaddr+EL3_STATUS),
+ inw(ioaddr+RX_STATUS), inb(ioaddr+TX_STATUS),
+ inw(ioaddr+TX_FREE));
+ EL3WINDOW(4);
+ printk(KERN_INFO " diagnostics: fifo %04x net %04x ethernet %04x"
+ " media %04x\n", inw(ioaddr+0x04), inw(ioaddr+0x06),
+ inw(ioaddr+0x08), inw(ioaddr+0x0a));
+ EL3WINDOW(1);
+}
+
+/* Reset and restore all of the 3c589 registers. */
+static void tc589_reset(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+ int i;
+
+ EL3WINDOW(0);
+ outw(0x0001, ioaddr + 4); /* Activate board. */
+ outw(0x3f00, ioaddr + 8); /* Set the IRQ line. */
+
+ /* Set the station address in window 2. */
+ EL3WINDOW(2);
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + i);
+
+ tc589_set_xcvr(dev, dev->if_port);
+
+ /* Switch to the stats window, and clear all stats by reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ EL3WINDOW(6);
+ for (i = 0; i < 9; i++)
+ inb(ioaddr+i);
+ inw(ioaddr + 10);
+ inw(ioaddr + 12);
+
+ /* Switch to register set 1 for normal use. */
+ EL3WINDOW(1);
+
+ /* Accept b-cast and phys addr only. */
+ outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
+ outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+ outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
+ outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
+ /* Allow status bits to be seen. */
+ outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
+ /* Ack all pending events, and set active indicator mask. */
+ outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+ ioaddr + EL3_CMD);
+ outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull
+ | AdapterFailure, ioaddr + EL3_CMD);
+}
+
+static int el3_config(struct net_device *dev, struct ifmap *map)
+{
+ if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
+ if (map->port <= 3) {
+ dev->if_port = map->port;
+ printk(KERN_INFO "%s: switched to %s port\n",
+ dev->name, if_names[dev->if_port]);
+ tc589_set_xcvr(dev, dev->if_port);
+ } else
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int el3_open(struct net_device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ dev_link_t *link = &lp->link;
+
+ if (!DEV_OK(link))
+ return -ENODEV;
+
+ link->open++;
+ MOD_INC_USE_COUNT;
+ netif_start_queue(dev);
+ netif_mark_up(dev);
+
+ tc589_reset(dev);
+ lp->media.function = &media_check;
+ lp->media.data = (u_long)lp;
+ lp->media.expires = jiffies + HZ;
+ add_timer(&lp->media);
+
+ DEBUG(1, "%s: opened, status %4.4x.\n",
+ dev->name, inw(dev->base_addr + EL3_STATUS));
+
+ return 0;
+}
+
+static void el3_tx_timeout(struct net_device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+
+ printk(KERN_NOTICE "%s: Transmit timed out!\n", dev->name);
+ dump_status(dev);
+ lp->stats.tx_errors++;
+ dev->trans_start = jiffies;
+ /* Issue TX_RESET and TX_START commands. */
+ tc589_wait_for_completion(dev, TxReset);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ netif_wake_queue(dev);
+}
+
+static void pop_tx_status(struct net_device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ int i;
+
+ /* Clear the Tx status stack. */
+ for (i = 32; i > 0; i--) {
+ u_char tx_status = inb(ioaddr + TX_STATUS);
+ if (!(tx_status & 0x84)) break;
+ /* reset transmitter on jabber error or underrun */
+ if (tx_status & 0x30)
+ tc589_wait_for_completion(dev, TxReset);
+ if (tx_status & 0x38) {
+ DEBUG(1, "%s: transmit error: status 0x%02x\n",
+ dev->name, tx_status);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ lp->stats.tx_aborted_errors++;
+ }
+ outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
+ }
+}
+
+static int el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+
+ tx_timeout_check(dev, el3_tx_timeout);
+ skb_tx_check(dev, skb);
+
+ DEBUG(3, "%s: el3_start_xmit(length = %ld) called, "
+ "status %4.4x.\n", dev->name, (long)skb->len,
+ inw(ioaddr + EL3_STATUS));
+
+ add_tx_bytes(&((struct el3_private *)dev->priv)->stats, skb->len);
+
+ /* Put out the doubleword header... */
+ outw(skb->len, ioaddr + TX_FIFO);
+ outw(0x00, ioaddr + TX_FIFO);
+ /* ... and the packet rounded to a doubleword. */
+ outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+
+ dev->trans_start = jiffies;
+ if (inw(ioaddr + TX_FREE) > 1536) {
+ netif_start_queue(dev);
+ } else
+ /* Interrupt us when the FIFO has room for max-sized packet. */
+ outw(SetTxThreshold + 1536, ioaddr + EL3_CMD);
+
+ DEV_KFREE_SKB(skb);
+ pop_tx_status(dev);
+
+ return 0;
+}
+
+/* The EL3 interrupt handler. */
+static void el3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct el3_private *lp = dev_id;
+ struct net_device *dev = &lp->dev;
+ ioaddr_t ioaddr, status;
+ int i = 0;
+
+ if (!netif_device_present(dev))
+ return;
+ ioaddr = dev->base_addr;
+
+ DEBUG(3, "%s: interrupt, status %4.4x.\n",
+ dev->name, inw(ioaddr + EL3_STATUS));
+
+ while ((status = inw(ioaddr + EL3_STATUS)) &
+ (IntLatch | RxComplete | StatsFull)) {
+ if (!netif_device_present(dev) ||
+ ((status & 0xe000) != 0x2000)) {
+ DEBUG(1, "%s: interrupt from dead card\n", dev->name);
+ break;
+ }
+
+ if (status & RxComplete)
+ el3_rx(dev);
+
+ if (status & TxAvailable) {
+ DEBUG(3, " TX room bit was handled.\n");
+ /* There's room in the FIFO for a full-sized packet. */
+ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+ netif_wake_queue(dev);
+ }
+
+ if (status & TxComplete)
+ pop_tx_status(dev);
+
+ if (status & (AdapterFailure | RxEarly | StatsFull)) {
+ /* Handle all uncommon interrupts. */
+ if (status & StatsFull) /* Empty statistics. */
+ update_stats(dev);
+ if (status & RxEarly) { /* Rx early is unused. */
+ el3_rx(dev);
+ outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
+ }
+ if (status & AdapterFailure) {
+ u16 fifo_diag;
+ EL3WINDOW(4);
+ fifo_diag = inw(ioaddr + 4);
+ EL3WINDOW(1);
+ printk(KERN_NOTICE "%s: adapter failure, FIFO diagnostic"
+ " register %04x.\n", dev->name, fifo_diag);
+ if (fifo_diag & 0x0400) {
+ /* Tx overrun */
+ tc589_wait_for_completion(dev, TxReset);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ }
+ if (fifo_diag & 0x2000) {
+ /* Rx underrun */
+ tc589_wait_for_completion(dev, RxReset);
+ set_multicast_list(dev);
+ outw(RxEnable, ioaddr + EL3_CMD);
+ }
+ outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
+ }
+ }
+
+ if (++i > 10) {
+ printk(KERN_NOTICE "%s: infinite loop in interrupt, "
+ "status %4.4x.\n", dev->name, status);
+ /* Clear all interrupts */
+ outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
+ break;
+ }
+ /* Acknowledge the IRQ. */
+ outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
+ }
+
+ lp->last_irq = jiffies;
+ DEBUG(3, "%s: exiting interrupt, status %4.4x.\n",
+ dev->name, inw(ioaddr + EL3_STATUS));
+ return;
+}
+
+static void media_check(u_long arg)
+{
+ struct el3_private *lp = (struct el3_private *)(arg);
+ struct net_device *dev = &lp->dev;
+ ioaddr_t ioaddr = dev->base_addr;
+ u_short media, errs;
+ u_long flags;
+
+ if (!netif_device_present(dev)) goto reschedule;
+
+ EL3WINDOW(1);
+ /* Check for pending interrupt with expired latency timer: with
+ this, we can limp along even if the interrupt is blocked */
+ if ((inw(ioaddr + EL3_STATUS) & IntLatch) &&
+ (inb(ioaddr + EL3_TIMER) == 0xff)) {
+ if (!lp->fast_poll)
+ printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
+ el3_interrupt(dev->irq, lp, NULL);
+ lp->fast_poll = HZ;
+ }
+ if (lp->fast_poll) {
+ lp->fast_poll--;
+ lp->media.expires = jiffies + 1;
+ add_timer(&lp->media);
+ return;
+ }
+
+ save_flags(flags);
+ cli();
+ EL3WINDOW(4);
+ media = inw(ioaddr+WN4_MEDIA) & 0xc810;
+
+ /* Ignore collisions unless we've had no irq's recently */
+ if (jiffies - lp->last_irq < HZ) {
+ media &= ~0x0010;
+ } else {
+ /* Try harder to detect carrier errors */
+ EL3WINDOW(6);
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ errs = inb(ioaddr + 0);
+ outw(StatsEnable, ioaddr + EL3_CMD);
+ lp->stats.tx_carrier_errors += errs;
+ if (errs || (lp->media_status & 0x0010)) media |= 0x0010;
+ }
+
+ if (media != lp->media_status) {
+ if ((media & lp->media_status & 0x8000) &&
+ ((lp->media_status ^ media) & 0x0800))
+ printk(KERN_INFO "%s: %s link beat\n", dev->name,
+ (lp->media_status & 0x0800 ? "lost" : "found"));
+ else if ((media & lp->media_status & 0x4000) &&
+ ((lp->media_status ^ media) & 0x0010))
+ printk(KERN_INFO "%s: coax cable %s\n", dev->name,
+ (lp->media_status & 0x0010 ? "ok" : "problem"));
+ if (dev->if_port == 0) {
+ if (media & 0x8000) {
+ if (media & 0x0800)
+ printk(KERN_INFO "%s: flipped to 10baseT\n",
+ dev->name);
+ else
+ tc589_set_xcvr(dev, 2);
+ } else if (media & 0x4000) {
+ if (media & 0x0010)
+ tc589_set_xcvr(dev, 1);
+ else
+ printk(KERN_INFO "%s: flipped to 10base2\n",
+ dev->name);
+ }
+ }
+ lp->media_status = media;
+ }
+
+ EL3WINDOW(1);
+ restore_flags(flags);
+
+reschedule:
+ lp->media.expires = jiffies + HZ;
+ add_timer(&lp->media);
+}
+
+static struct net_device_stats *el3_get_stats(struct net_device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ unsigned long flags;
+ dev_link_t *link = &lp->link;
+
+ if (DEV_OK(link)) {
+ save_flags(flags);
+ cli();
+ update_stats(dev);
+ restore_flags(flags);
+ }
+ return &lp->stats;
+}
+
+/*
+ Update statistics. We change to register window 6, so this should be run
+ single-threaded if the device is active. This is expected to be a rare
+ operation, and it's simpler for the rest of the driver to assume that
+ window 1 is always valid rather than use a special window-state variable.
+*/
+static void update_stats(struct net_device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+
+ DEBUG(2, "%s: updating the statistics.\n", dev->name);
+ /* Turn off statistics updates while reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ /* Switch to the stats window, and read everything. */
+ EL3WINDOW(6);
+ lp->stats.tx_carrier_errors += inb(ioaddr + 0);
+ lp->stats.tx_heartbeat_errors += inb(ioaddr + 1);
+ /* Multiple collisions. */ inb(ioaddr + 2);
+ lp->stats.collisions += inb(ioaddr + 3);
+ lp->stats.tx_window_errors += inb(ioaddr + 4);
+ lp->stats.rx_fifo_errors += inb(ioaddr + 5);
+ lp->stats.tx_packets += inb(ioaddr + 6);
+ /* Rx packets */ inb(ioaddr + 7);
+ /* Tx deferrals */ inb(ioaddr + 8);
+ /* Rx octets */ inw(ioaddr + 10);
+ /* Tx octets */ inw(ioaddr + 12);
+
+ /* Back to window 1, and turn statistics back on. */
+ EL3WINDOW(1);
+ outw(StatsEnable, ioaddr + EL3_CMD);
+}
+
+static int el3_rx(struct net_device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ int worklimit = 32;
+ short rx_status;
+
+ DEBUG(3, "%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n",
+ dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
+ while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) &&
+ (--worklimit >= 0)) {
+ if (rx_status & 0x4000) { /* Error, update stats. */
+ short error = rx_status & 0x3800;
+ lp->stats.rx_errors++;
+ switch (error) {
+ case 0x0000: lp->stats.rx_over_errors++; break;
+ case 0x0800: lp->stats.rx_length_errors++; break;
+ case 0x1000: lp->stats.rx_frame_errors++; break;
+ case 0x1800: lp->stats.rx_length_errors++; break;
+ case 0x2000: lp->stats.rx_frame_errors++; break;
+ case 0x2800: lp->stats.rx_crc_errors++; break;
+ }
+ } else {
+ short pkt_len = rx_status & 0x7ff;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len+5);
+
+ DEBUG(3, " Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
+ if (skb != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2);
+ insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
+ (pkt_len+3)>>2);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ add_rx_bytes(&lp->stats, pkt_len);
+ } else {
+ DEBUG(1, "%s: couldn't allocate a sk_buff of"
+ " size %d.\n", dev->name, pkt_len);
+ lp->stats.rx_dropped++;
+ }
+ }
+ /* Pop the top of the Rx FIFO */
+ tc589_wait_for_completion(dev, RxDiscard);
+ }
+ if (worklimit == 0)
+ printk(KERN_NOTICE "%s: too much work in el3_rx!\n", dev->name);
+ return 0;
+}
+
+static void set_multicast_list(struct net_device *dev)
+{
+ struct el3_private *lp = dev->priv;
+ dev_link_t *link = &lp->link;
+ ioaddr_t ioaddr = dev->base_addr;
+ u_short opts = SetRxFilter | RxStation | RxBroadcast;
+
+ if (!(DEV_OK(link))) return;
+ if (dev->flags & IFF_PROMISC)
+ opts |= RxMulticast | RxProm;
+ else if (dev->mc_count || (dev->flags & IFF_ALLMULTI))
+ opts |= RxMulticast;
+ outw(opts, ioaddr + EL3_CMD);
+}
+
+static int el3_close(struct net_device *dev)
+{
+ struct el3_private *lp = dev->priv;
+ dev_link_t *link = &lp->link;
+ ioaddr_t ioaddr = dev->base_addr;
+
+ DEBUG(1, "%s: shutting down ethercard.\n", dev->name);
+
+ if (DEV_OK(link)) {
+ /* Turn off statistics ASAP. We update lp->stats below. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+
+ /* Disable the receiver and transmitter. */
+ outw(RxDisable, ioaddr + EL3_CMD);
+ outw(TxDisable, ioaddr + EL3_CMD);
+
+ if (dev->if_port == 2)
+ /* Turn off thinnet power. Green! */
+ outw(StopCoax, ioaddr + EL3_CMD);
+ else if (dev->if_port == 1) {
+ /* Disable link beat and jabber */
+ EL3WINDOW(4);
+ outw(0, ioaddr + WN4_MEDIA);
+ }
+
+ /* Switching back to window 0 disables the IRQ. */
+ EL3WINDOW(0);
+ /* But we explicitly zero the IRQ line select anyway. */
+ outw(0x0f00, ioaddr + WN0_IRQ);
+
+ /* Check if the card still exists */
+ if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000)
+ update_stats(dev);
+ }
+
+ link->open--;
+ netif_stop_queue(dev);
+ netif_mark_down(dev);
+ del_timer(&lp->media);
+ if (link->state & DEV_STALE_CONFIG)
+ mod_timer(&link->release, jiffies + HZ/20);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+/*====================================================================*/
+
+static int __init init_3c589_cs(void)
+{
+ servinfo_t serv;
+ DEBUG(0, "%s\n", version);
+ CardServices(GetCardServicesInfo, &serv);
+ if (serv.Revision != CS_RELEASE_CODE) {
+ printk(KERN_NOTICE "3c589_cs: Card Services release "
+ "does not match!\n");
+ return -EINVAL;
+ }
+ register_pccard_driver(&dev_info, &tc589_attach, &tc589_detach);
+ return 0;
+}
+
+static void __exit exit_3c589_cs(void)
+{
+ DEBUG(0, "3c589_cs: unloading\n");
+ unregister_pccard_driver(&dev_info);
+ while (dev_list != NULL)
+ tc589_detach(dev_list);
+}
+
+module_init(init_3c589_cs);
+module_exit(exit_3c589_cs);
diff --git a/linux/pcmcia-cs/clients/ax8390.h b/linux/pcmcia-cs/clients/ax8390.h
new file mode 100644
index 0000000..8be1197
--- /dev/null
+++ b/linux/pcmcia-cs/clients/ax8390.h
@@ -0,0 +1,165 @@
+/* Generic NS8390 register definitions. */
+/* This file is part of Donald Becker's 8390 drivers, and is distributed
+ under the same license. Auto-loading of 8390.o only in v2.2 - Paul G.
+ Some of these names and comments originated from the Crynwr
+ packet drivers, which are distributed under the GPL. */
+
+#ifndef _8390_h
+#define _8390_h
+
+#include <linux/config.h>
+#include <linux/if_ether.h>
+#include <linux/ioport.h>
+#include <linux/skbuff.h>
+
+#define TX_2X_PAGES 12
+#define TX_1X_PAGES 6
+
+#define TX_PAGES TX_2X_PAGES
+
+#define ETHER_ADDR_LEN 6
+
+/* The 8390 specific per-packet-header format. */
+struct e8390_pkt_hdr {
+ unsigned char status; /* status */
+ unsigned char next; /* pointer to next packet. */
+ unsigned short count; /* header + packet length in bytes */
+};
+
+#ifdef notdef
+extern int ei_debug;
+#else
+#define ei_debug 1
+#endif
+
+#ifndef HAVE_AUTOIRQ
+/* From auto_irq.c */
+extern void autoirq_setup(int waittime);
+extern unsigned long autoirq_report(int waittime);
+#endif
+
+/* Most of these entries should be in 'struct net_device' (or most of the
+ things in there should be here!) */
+/* You have one of these per-board */
+struct ei_device {
+ const char *name;
+ void (*reset_8390)(struct net_device *);
+ void (*get_8390_hdr)(struct net_device *, struct e8390_pkt_hdr *, int);
+ void (*block_output)(struct net_device *, int, const unsigned char *, int);
+ void (*block_input)(struct net_device *, int, struct sk_buff *, int);
+ unsigned char mcfilter[8];
+ unsigned open:1;
+ unsigned word16:1; /* We have the 16-bit (vs 8-bit) version of the card. */
+ unsigned txing:1; /* Transmit Active */
+ unsigned irqlock:1; /* 8390's intrs disabled when '1'. */
+ unsigned dmaing:1; /* Remote DMA Active */
+ unsigned char tx_start_page, rx_start_page, stop_page;
+ unsigned char current_page; /* Read pointer in buffer */
+ unsigned char interface_num; /* Net port (AUI, 10bT.) to use. */
+ unsigned char txqueue; /* Tx Packet buffer queue length. */
+ short tx1, tx2; /* Packet lengths for ping-pong tx. */
+ short lasttx; /* Alpha version consistency check. */
+ unsigned char saved_irq; /* Original dev->irq value. */
+ struct net_device_stats stat; /* The new statistics table. */
+ spinlock_t page_lock; /* Page register locks */
+ unsigned long priv; /* Private field to store bus IDs etc. */
+};
+
+/* The maximum number of 8390 interrupt service routines called per IRQ. */
+#define MAX_SERVICE 12
+
+/* The maximum time waited (in jiffies) before assuming a Tx failed. (20ms) */
+#define TX_TIMEOUT (20*HZ/100)
+
+#define ei_status (*(struct ei_device *)(dev->priv))
+
+/* Some generic ethernet register configurations. */
+#define E8390_TX_IRQ_MASK 0xa /* For register EN0_ISR */
+#define E8390_RX_IRQ_MASK 0x5
+#define E8390_RXCONFIG 0x4 /* EN0_RXCR: broadcasts, no multicast,errors */
+#define E8390_RXOFF 0x20 /* EN0_RXCR: Accept no packets */
+#define E8390_TXCONFIG 0x00 /* EN0_TXCR: Normal transmit mode */
+#define E8390_TXOFF 0x02 /* EN0_TXCR: Transmitter off */
+
+/* Register accessed at EN_CMD, the 8390 base addr. */
+#define E8390_STOP 0x01 /* Stop and reset the chip */
+#define E8390_START 0x02 /* Start the chip, clear reset */
+#define E8390_TRANS 0x04 /* Transmit a frame */
+#define E8390_RREAD 0x08 /* Remote read */
+#define E8390_RWRITE 0x10 /* Remote write */
+#define E8390_NODMA 0x20 /* Remote DMA */
+#define E8390_PAGE0 0x00 /* Select page chip registers */
+#define E8390_PAGE1 0x40 /* using the two high-order bits */
+#define E8390_PAGE2 0x80 /* Page 3 is invalid. */
+
+#define E8390_CMD 0x00 /* The command register (for all pages) */
+/* Page 0 register offsets. */
+#define EN0_CLDALO 0x01 /* Low byte of current local dma addr RD */
+#define EN0_STARTPG 0x01 /* Starting page of ring bfr WR */
+#define EN0_CLDAHI 0x02 /* High byte of current local dma addr RD */
+#define EN0_STOPPG 0x02 /* Ending page +1 of ring bfr WR */
+#define EN0_BOUNDARY 0x03 /* Boundary page of ring bfr RD WR */
+#define EN0_TSR 0x04 /* Transmit status reg RD */
+#define EN0_TPSR 0x04 /* Transmit starting page WR */
+#define EN0_NCR 0x05 /* Number of collision reg RD */
+#define EN0_TCNTLO 0x05 /* Low byte of tx byte count WR */
+#define EN0_FIFO 0x06 /* FIFO RD */
+#define EN0_TCNTHI 0x06 /* High byte of tx byte count WR */
+#define EN0_ISR 0x07 /* Interrupt status reg RD WR */
+#define EN0_CRDALO 0x08 /* low byte of current remote dma address RD */
+#define EN0_RSARLO 0x08 /* Remote start address reg 0 */
+#define EN0_CRDAHI 0x09 /* high byte, current remote dma address RD */
+#define EN0_RSARHI 0x09 /* Remote start address reg 1 */
+#define EN0_RCNTLO 0x0a /* Remote byte count reg WR */
+#define EN0_RCNTHI 0x0b /* Remote byte count reg WR */
+#define EN0_RSR 0x0c /* rx status reg RD */
+#define EN0_RXCR 0x0c /* RX configuration reg WR */
+#define EN0_TXCR 0x0d /* TX configuration reg WR */
+#define EN0_COUNTER0 0x0d /* Rcv alignment error counter RD */
+#define EN0_DCFG 0x0e /* Data configuration reg WR */
+#define EN0_COUNTER1 0x0e /* Rcv CRC error counter RD */
+#define EN0_IMR 0x0f /* Interrupt mask reg WR */
+#define EN0_COUNTER2 0x0f /* Rcv missed frame error counter RD */
+
+/* Bits in EN0_ISR - Interrupt status register */
+#define ENISR_RX 0x01 /* Receiver, no error */
+#define ENISR_TX 0x02 /* Transmitter, no error */
+#define ENISR_RX_ERR 0x04 /* Receiver, with error */
+#define ENISR_TX_ERR 0x08 /* Transmitter, with error */
+#define ENISR_OVER 0x10 /* Receiver overwrote the ring */
+#define ENISR_COUNTERS 0x20 /* Counters need emptying */
+#define ENISR_RDC 0x40 /* remote dma complete */
+#define ENISR_RESET 0x80 /* Reset completed */
+#define ENISR_ALL 0x3f /* Interrupts we will enable */
+
+/* Bits in EN0_DCFG - Data config register */
+#define ENDCFG_WTS 0x01 /* word transfer mode selection */
+
+/* Page 1 register offsets. */
+#define EN1_PHYS 0x01 /* This board's physical enet addr RD WR */
+#define EN1_PHYS_SHIFT(i) (i+1) /* Get and set mac address */
+#define EN1_CURPAG 0x07 /* Current memory page RD WR */
+#define EN1_MULT 0x08 /* Multicast filter mask array (8 bytes) RD WR */
+#define EN1_MULT_SHIFT(i) (8+i) /* Get and set multicast filter */
+
+/* Bits in received packet status byte and EN0_RSR*/
+#define ENRSR_RXOK 0x01 /* Received a good packet */
+#define ENRSR_CRC 0x02 /* CRC error */
+#define ENRSR_FAE 0x04 /* frame alignment error */
+#define ENRSR_FO 0x08 /* FIFO overrun */
+#define ENRSR_MPA 0x10 /* missed pkt */
+#define ENRSR_PHY 0x20 /* physical/multicast address */
+#define ENRSR_DIS 0x40 /* receiver disable. set in monitor mode */
+#define ENRSR_DEF 0x80 /* deferring */
+
+/* Transmitted packet status, EN0_TSR. */
+#define ENTSR_PTX 0x01 /* Packet transmitted without error */
+#define ENTSR_ND 0x02 /* The transmit wasn't deferred. */
+#define ENTSR_COL 0x04 /* The transmit collided at least once. */
+#define ENTSR_ABT 0x08 /* The transmit collided 16 times, and was deferred. */
+#define ENTSR_CRS 0x10 /* The carrier sense was lost. */
+#define ENTSR_FU 0x20 /* A "FIFO underrun" occurred during transmit. */
+#define ENTSR_CDH 0x40 /* The collision detect "heartbeat" signal was lost. */
+#define ENTSR_OWC 0x80 /* There was an out-of-window collision. */
+
+#endif /* _8390_h */
diff --git a/linux/pcmcia-cs/clients/axnet_cs.c b/linux/pcmcia-cs/clients/axnet_cs.c
new file mode 100644
index 0000000..2e7d9ed
--- /dev/null
+++ b/linux/pcmcia-cs/clients/axnet_cs.c
@@ -0,0 +1,1936 @@
+/*======================================================================
+
+ A PCMCIA ethernet driver for Asix AX88190-based cards
+
+ The Asix AX88190 is a NS8390-derived chipset with a few nasty
+ idiosyncracies that make it very inconvenient to support with a
+ standard 8390 driver. This driver is based on pcnet_cs, with the
+ tweaked 8390 code grafted on the end. Much of what I did was to
+ clean up and update a similar driver supplied by Asix, which was
+ adapted by William Lee, william@asix.com.tw.
+
+ Copyright (C) 2001 David A. Hinds -- dahinds@users.sourceforge.net
+
+ axnet_cs.c 1.31 2003/08/25 15:57:40
+
+ The network driver code is based on Donald Becker's NE2000 code:
+
+ Written 1992,1993 by Donald Becker.
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used and
+ distributed according to the terms of the GNU General Public License,
+ incorporated herein by reference.
+ Donald Becker may be reached at becker@scyld.com
+
+======================================================================*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+
+#include <linux/netdevice.h>
+#include "ax8390.h"
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ciscode.h>
+#include <pcmcia/ds.h>
+#include <pcmcia/cisreg.h>
+
+#define AXNET_CMD 0x00
+#define AXNET_DATAPORT 0x10 /* NatSemi-defined port window offset. */
+#define AXNET_RESET 0x1f /* Issue a read to reset, a write to clear. */
+#define AXNET_MII_EEP 0x14 /* Offset of MII access port */
+#define AXNET_TEST 0x15 /* Offset of TEST Register port */
+#define AXNET_GPIO 0x17 /* Offset of General Purpose Register Port */
+
+#define AXNET_START_PG 0x40 /* First page of TX buffer */
+#define AXNET_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+#define AXNET_RDC_TIMEOUT 0x02 /* Max wait in jiffies for Tx RDC */
+
+#define IS_AX88190 0x0001
+#define IS_AX88790 0x0002
+
+/*====================================================================*/
+
+/* Module parameters */
+
+MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
+MODULE_DESCRIPTION("Asix AX88190 PCMCIA ethernet driver");
+MODULE_LICENSE("GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; MODULE_PARM(n, "i")
+
+/* Bit map of interrupts to choose from */
+INT_MODULE_PARM(irq_mask, 0xdeb8);
+static int irq_list[4] = { -1 };
+MODULE_PARM(irq_list, "1-4i");
+
+#ifdef PCMCIA_DEBUG
+INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
+static char *version =
+"axnet_cs.c 1.31 2003/08/25 15:57:40 (David Hinds)";
+#else
+#define DEBUG(n, args...)
+#endif
+
+/*====================================================================*/
+
+static void axnet_config(dev_link_t *link);
+static void axnet_release(u_long arg);
+static int axnet_event(event_t event, int priority,
+ event_callback_args_t *args);
+static int axnet_open(struct net_device *dev);
+static int axnet_close(struct net_device *dev);
+static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void ei_irq_wrapper(int irq, void *dev_id, struct pt_regs *regs);
+static void ei_watchdog(u_long arg);
+static void axnet_reset_8390(struct net_device *dev);
+
+static int mdio_read(ioaddr_t addr, int phy_id, int loc);
+static void mdio_write(ioaddr_t addr, int phy_id, int loc, int value);
+
+static void get_8390_hdr(struct net_device *,
+ struct e8390_pkt_hdr *, int);
+static void block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void block_output(struct net_device *dev, int count,
+ const u_char *buf, const int start_page);
+
+static dev_link_t *axnet_attach(void);
+static void axnet_detach(dev_link_t *);
+
+static dev_info_t dev_info = "axnet_cs";
+static dev_link_t *dev_list;
+
+static int axdev_init(struct net_device *dev);
+static void AX88190_init(struct net_device *dev, int startp);
+static int ax_open(struct net_device *dev);
+static int ax_close(struct net_device *dev);
+static void ax_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+
+/*====================================================================*/
+
+typedef struct axnet_dev_t {
+ struct net_device dev; /* so &dev == &axnet_dev_t */
+ dev_link_t link;
+ dev_node_t node;
+ caddr_t base;
+ struct timer_list watchdog;
+ int stale, fast_poll;
+ u_short link_status;
+ u_char duplex_flag;
+ int phy_id;
+ int flags;
+} axnet_dev_t;
+
+/*======================================================================
+
+ This bit of code is used to avoid unregistering network devices
+ at inappropriate times. 2.2 and later kernels are fairly picky
+ about when this can happen.
+
+======================================================================*/
+
+static void flush_stale_links(void)
+{
+ dev_link_t *link, *next;
+ for (link = dev_list; link; link = next) {
+ next = link->next;
+ if (link->state & DEV_STALE_LINK)
+ axnet_detach(link);
+ }
+}
+
+/*====================================================================*/
+
+static void cs_error(client_handle_t handle, int func, int ret)
+{
+ error_info_t err = { func, ret };
+ CardServices(ReportError, handle, &err);
+}
+
+/*======================================================================
+
+ We never need to do anything when a axnet device is "initialized"
+ by the net software, because we only register already-found cards.
+
+======================================================================*/
+
+static int axnet_init(struct net_device *dev)
+{
+ return 0;
+}
+
+/*======================================================================
+
+ axnet_attach() creates an "instance" of the driver, allocating
+ local data structures for one device. The device is registered
+ with Card Services.
+
+======================================================================*/
+
+static dev_link_t *axnet_attach(void)
+{
+ axnet_dev_t *info;
+ dev_link_t *link;
+ struct net_device *dev;
+ client_reg_t client_reg;
+ int i, ret;
+
+ DEBUG(0, "axnet_attach()\n");
+ flush_stale_links();
+
+ /* Create new ethernet device */
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) return NULL;
+ memset(info, 0, sizeof(*info));
+ link = &info->link; dev = &info->dev;
+ link->priv = info;
+
+ init_timer(&link->release);
+ link->release.function = &axnet_release;
+ link->release.data = (u_long)link;
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
+ link->irq.IRQInfo1 = IRQ_INFO2_VALID|IRQ_LEVEL_ID;
+ if (irq_list[0] == -1)
+ link->irq.IRQInfo2 = irq_mask;
+ else
+ for (i = 0; i < 4; i++)
+ link->irq.IRQInfo2 |= 1 << irq_list[i];
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+
+ axdev_init(dev);
+ init_dev_name(dev, info->node);
+ dev->init = &axnet_init;
+ dev->open = &axnet_open;
+ dev->stop = &axnet_close;
+ dev->do_ioctl = &axnet_ioctl;
+
+ /* Register with Card Services */
+ link->next = dev_list;
+ dev_list = link;
+ client_reg.dev_info = &dev_info;
+ client_reg.Attributes = INFO_IO_CLIENT | INFO_CARD_SHARE;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &axnet_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ ret = CardServices(RegisterClient, &link->handle, &client_reg);
+ if (ret != CS_SUCCESS) {
+ cs_error(link->handle, RegisterClient, ret);
+ axnet_detach(link);
+ return NULL;
+ }
+
+ return link;
+} /* axnet_attach */
+
+/*======================================================================
+
+ This deletes a driver "instance". The device is de-registered
+ with Card Services. If it has been released, all local data
+ structures are freed. Otherwise, the structures will be freed
+ when the device is released.
+
+======================================================================*/
+
+static void axnet_detach(dev_link_t *link)
+{
+ axnet_dev_t *info = link->priv;
+ dev_link_t **linkp;
+
+ DEBUG(0, "axnet_detach(0x%p)\n", link);
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link) break;
+ if (*linkp == NULL)
+ return;
+
+ del_timer(&link->release);
+ if (link->state & DEV_CONFIG) {
+ axnet_release((u_long)link);
+ if (link->state & DEV_STALE_CONFIG) {
+ link->state |= DEV_STALE_LINK;
+ return;
+ }
+ }
+
+ if (link->handle)
+ CardServices(DeregisterClient, link->handle);
+
+ /* Unlink device structure, free bits */
+ *linkp = link->next;
+ if (link->dev)
+ unregister_netdev(&info->dev);
+ kfree(info);
+
+} /* axnet_detach */
+
+/*======================================================================
+
+ This probes for a card's hardware address by reading the PROM.
+
+======================================================================*/
+
+static int get_prom(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ int i, j;
+
+ /* This is based on drivers/net/ne.c */
+ struct {
+ u_char value, offset;
+ } program_seq[] = {
+ {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/
+ {0x01, EN0_DCFG}, /* Set word-wide access. */
+ {0x00, EN0_RCNTLO}, /* Clear the count regs. */
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_IMR}, /* Mask completion irq. */
+ {0xFF, EN0_ISR},
+ {E8390_RXOFF|0x40, EN0_RXCR}, /* 0x60 Set to monitor */
+ {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */
+ {0x10, EN0_RCNTLO},
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_RSARLO}, /* DMA starting at 0x0400. */
+ {0x04, EN0_RSARHI},
+ {E8390_RREAD+E8390_START, E8390_CMD},
+ };
+
+ /* Not much of a test, but the alternatives are messy */
+ if (link->conf.ConfigBase != 0x03c0)
+ return 0;
+
+ axnet_reset_8390(dev);
+ mdelay(10);
+
+ for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++)
+ outb_p(program_seq[i].value, ioaddr + program_seq[i].offset);
+
+ for (i = 0; i < 6; i += 2) {
+ j = inw(ioaddr + AXNET_DATAPORT);
+ dev->dev_addr[i] = j & 0xff;
+ dev->dev_addr[i+1] = j >> 8;
+ }
+ return 1;
+} /* get_prom */
+
+/*======================================================================
+
+ axnet_config() is scheduled to run after a CARD_INSERTION event
+ is received, to configure the PCMCIA socket, and to make the
+ ethernet device available to the system.
+
+======================================================================*/
+
+#define CS_CHECK(fn, args...) \
+while ((last_ret=CardServices(last_fn=(fn), args))!=0) goto cs_failed
+
+#define CFG_CHECK(fn, args...) \
+if (CardServices(fn, args) != 0) goto next_entry
+
+static int try_io_port(dev_link_t *link)
+{
+ int j, ret;
+ if (link->io.NumPorts1 == 32) {
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
+ if (link->io.NumPorts2 > 0) {
+ /* for master/slave multifunction cards */
+ link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
+ link->irq.Attributes =
+ IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
+ }
+ } else {
+ /* This should be two 16-port windows */
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
+ link->io.Attributes2 = IO_DATA_PATH_WIDTH_16;
+ }
+ if (link->io.BasePort1 == 0) {
+ link->io.IOAddrLines = 16;
+ for (j = 0; j < 0x400; j += 0x20) {
+ link->io.BasePort1 = j ^ 0x300;
+ link->io.BasePort2 = (j ^ 0x300) + 0x10;
+ ret = CardServices(RequestIO, link->handle, &link->io);
+ if (ret == CS_SUCCESS) return ret;
+ }
+ return ret;
+ } else {
+ return CardServices(RequestIO, link->handle, &link->io);
+ }
+}
+
+static void axnet_config(dev_link_t *link)
+{
+ client_handle_t handle = link->handle;
+ axnet_dev_t *info = link->priv;
+ struct net_device *dev = &info->dev;
+ tuple_t tuple;
+ cisparse_t parse;
+ int i, j, last_ret, last_fn;
+ u_short buf[64];
+ config_info_t conf;
+
+ DEBUG(0, "axnet_config(0x%p)\n", link);
+
+ tuple.Attributes = 0;
+ tuple.TupleData = (cisdata_t *)buf;
+ tuple.TupleDataMax = sizeof(buf);
+ tuple.TupleOffset = 0;
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ CS_CHECK(GetFirstTuple, handle, &tuple);
+ CS_CHECK(GetTupleData, handle, &tuple);
+ CS_CHECK(ParseTuple, handle, &tuple, &parse);
+ link->conf.ConfigBase = parse.config.base;
+ /* don't trust the CIS on this; Linksys got it wrong */
+ link->conf.Present = 0x63;
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ /* Look up current Vcc */
+ CS_CHECK(GetConfigurationInfo, handle, &conf);
+ link->conf.Vcc = conf.Vcc;
+
+ tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+ tuple.Attributes = 0;
+ CS_CHECK(GetFirstTuple, handle, &tuple);
+ while (last_ret == CS_SUCCESS) {
+ cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
+ cistpl_io_t *io = &(parse.cftable_entry.io);
+
+ CFG_CHECK(GetTupleData, handle, &tuple);
+ CFG_CHECK(ParseTuple, handle, &tuple, &parse);
+ if ((cfg->index == 0) || (cfg->io.nwin == 0))
+ goto next_entry;
+
+ link->conf.ConfigIndex = 0x05;
+ /* For multifunction cards, by convention, we configure the
+ network function with window 0, and serial with window 1 */
+ if (io->nwin > 1) {
+ i = (io->win[1].len > io->win[0].len);
+ link->io.BasePort2 = io->win[1-i].base;
+ link->io.NumPorts2 = io->win[1-i].len;
+ } else {
+ i = link->io.NumPorts2 = 0;
+ }
+ link->io.BasePort1 = io->win[i].base;
+ link->io.NumPorts1 = io->win[i].len;
+ link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
+ if (link->io.NumPorts1 + link->io.NumPorts2 >= 32) {
+ last_ret = try_io_port(link);
+ if (last_ret == CS_SUCCESS) break;
+ }
+ next_entry:
+ last_ret = CardServices(GetNextTuple, handle, &tuple);
+ }
+ if (last_ret != CS_SUCCESS) {
+ cs_error(handle, RequestIO, last_ret);
+ goto failed;
+ }
+
+ CS_CHECK(RequestIRQ, handle, &link->irq);
+
+ if (link->io.NumPorts2 == 8) {
+ link->conf.Attributes |= CONF_ENABLE_SPKR;
+ link->conf.Status = CCSR_AUDIO_ENA;
+ }
+
+ CS_CHECK(RequestConfiguration, handle, &link->conf);
+ dev->irq = link->irq.AssignedIRQ;
+ dev->base_addr = link->io.BasePort1;
+ if (register_netdev(dev) != 0) {
+ printk(KERN_NOTICE "axnet_cs: register_netdev() failed\n");
+ goto failed;
+ }
+
+ if (!get_prom(link)) {
+ printk(KERN_NOTICE "axnet_cs: this is not an AX88190 card!\n");
+ printk(KERN_NOTICE "axnet_cs: use pcnet_cs instead.\n");
+ unregister_netdev(dev);
+ goto failed;
+ }
+
+ ei_status.name = "AX88190";
+ ei_status.word16 = 1;
+ ei_status.tx_start_page = AXNET_START_PG;
+ ei_status.rx_start_page = AXNET_START_PG + TX_PAGES;
+ ei_status.stop_page = AXNET_STOP_PG;
+ ei_status.reset_8390 = &axnet_reset_8390;
+ ei_status.get_8390_hdr = &get_8390_hdr;
+ ei_status.block_input = &block_input;
+ ei_status.block_output = &block_output;
+
+ copy_dev_name(info->node, dev);
+ link->dev = &info->node;
+
+ if (inb(dev->base_addr + AXNET_TEST) != 0)
+ info->flags |= IS_AX88790;
+ else
+ info->flags |= IS_AX88190;
+
+ printk(KERN_INFO "%s: Asix AX88%d90: io %#3lx, irq %d, hw_addr ",
+ dev->name, ((info->flags & IS_AX88790) ? 7 : 1),
+ dev->base_addr, dev->irq);
+ for (i = 0; i < 6; i++)
+ printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n"));
+
+ if (info->flags & IS_AX88790)
+ outb(0x10, dev->base_addr + AXNET_GPIO); /* select Internal PHY */
+
+ for (i = 0; i < 32; i++) {
+ j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
+ if ((j != 0) && (j != 0xffff)) break;
+ }
+
+
+ /* Maybe PHY is in power down mode. (PPD_SET = 1)
+ Bit 2 of CCSR is active low. */
+ if (i == 32) {
+ conf_reg_t reg = { 0, CS_WRITE, CISREG_CCSR, 0x04 };
+ CardServices(AccessConfigurationRegister, link->handle, &reg);
+ for (i = 0; i < 32; i++) {
+ j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
+ if ((j != 0) && (j != 0xffff)) break;
+ }
+ }
+
+ info->phy_id = (i < 32) ? i : -1;
+ if (i < 32) {
+ DEBUG(0, " MII transceiver at index %d, status %x.\n", i, j);
+ } else {
+ printk(KERN_NOTICE " No MII transceivers found!\n");
+ }
+
+ link->state &= ~DEV_CONFIG_PENDING;
+ return;
+
+cs_failed:
+ cs_error(link->handle, last_fn, last_ret);
+failed:
+ axnet_release((u_long)link);
+ link->state &= ~DEV_CONFIG_PENDING;
+ return;
+} /* axnet_config */
+
+/*======================================================================
+
+ After a card is removed, axnet_release() will unregister the net
+ device, and release the PCMCIA configuration. If the device is
+ still open, this will be postponed until it is closed.
+
+======================================================================*/
+
+static void axnet_release(u_long arg)
+{
+ dev_link_t *link = (dev_link_t *)arg;
+
+ DEBUG(0, "axnet_release(0x%p)\n", link);
+
+ if (link->open) {
+ DEBUG(1, "axnet_cs: release postponed, '%s' still open\n",
+ ((axnet_dev_t *)(link->priv))->node.dev_name);
+ link->state |= DEV_STALE_CONFIG;
+ return;
+ }
+
+ CardServices(ReleaseConfiguration, link->handle);
+ CardServices(ReleaseIO, link->handle, &link->io);
+ CardServices(ReleaseIRQ, link->handle, &link->irq);
+
+ link->state &= ~DEV_CONFIG;
+
+} /* axnet_release */
+
+/*======================================================================
+
+ The card status event handler. Mostly, this schedules other
+ stuff to run after an event is received. A CARD_REMOVAL event
+ also sets some flags to discourage the net drivers from trying
+ to talk to the card any more.
+
+======================================================================*/
+
+static int axnet_event(event_t event, int priority,
+ event_callback_args_t *args)
+{
+ dev_link_t *link = args->client_data;
+ axnet_dev_t *info = link->priv;
+
+ DEBUG(2, "axnet_event(0x%06x)\n", event);
+
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG) {
+ netif_device_detach(&info->dev);
+ mod_timer(&link->release, jiffies + HZ/20);
+ }
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ axnet_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if (link->state & DEV_CONFIG) {
+ if (link->open)
+ netif_device_detach(&info->dev);
+ CardServices(ReleaseConfiguration, link->handle);
+ }
+ break;
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ CardServices(RequestConfiguration, link->handle, &link->conf);
+ if (link->open) {
+ axnet_reset_8390(&info->dev);
+ AX88190_init(&info->dev, 1);
+ netif_device_attach(&info->dev);
+ }
+ }
+ break;
+ }
+ return 0;
+} /* axnet_event */
+
+/*======================================================================
+
+ MII interface support
+
+======================================================================*/
+
+#define MDIO_SHIFT_CLK 0x01
+#define MDIO_DATA_WRITE0 0x00
+#define MDIO_DATA_WRITE1 0x08
+#define MDIO_DATA_READ 0x04
+#define MDIO_MASK 0x0f
+#define MDIO_ENB_IN 0x02
+
+static void mdio_sync(ioaddr_t addr)
+{
+ int bits;
+ for (bits = 0; bits < 32; bits++) {
+ outb_p(MDIO_DATA_WRITE1, addr);
+ outb_p(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, addr);
+ }
+}
+
+static int mdio_read(ioaddr_t addr, int phy_id, int loc)
+{
+ u_int cmd = (0xf6<<10)|(phy_id<<5)|loc;
+ int i, retval = 0;
+
+ mdio_sync(addr);
+ for (i = 14; i >= 0; i--) {
+ int dat = (cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outb_p(dat, addr);
+ outb_p(dat | MDIO_SHIFT_CLK, addr);
+ }
+ for (i = 19; i > 0; i--) {
+ outb_p(MDIO_ENB_IN, addr);
+ retval = (retval << 1) | ((inb_p(addr) & MDIO_DATA_READ) != 0);
+ outb_p(MDIO_ENB_IN | MDIO_SHIFT_CLK, addr);
+ }
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(ioaddr_t addr, int phy_id, int loc, int value)
+{
+ u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value;
+ int i;
+
+ mdio_sync(addr);
+ for (i = 31; i >= 0; i--) {
+ int dat = (cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outb_p(dat, addr);
+ outb_p(dat | MDIO_SHIFT_CLK, addr);
+ }
+ for (i = 1; i >= 0; i--) {
+ outb_p(MDIO_ENB_IN, addr);
+ outb_p(MDIO_ENB_IN | MDIO_SHIFT_CLK, addr);
+ }
+}
+
+/*====================================================================*/
+
+static int axnet_open(struct net_device *dev)
+{
+ axnet_dev_t *info = (axnet_dev_t *)dev;
+ dev_link_t *link = &info->link;
+
+ DEBUG(2, "axnet_open('%s')\n", dev->name);
+
+ if (!DEV_OK(link))
+ return -ENODEV;
+
+ link->open++;
+ MOD_INC_USE_COUNT;
+
+ request_irq(dev->irq, ei_irq_wrapper, SA_SHIRQ, dev_info, dev);
+
+ info->link_status = 0x00;
+ info->watchdog.function = &ei_watchdog;
+ info->watchdog.data = (u_long)info;
+ info->watchdog.expires = jiffies + HZ;
+ add_timer(&info->watchdog);
+
+ return ax_open(dev);
+} /* axnet_open */
+
+/*====================================================================*/
+
+static int axnet_close(struct net_device *dev)
+{
+ axnet_dev_t *info = (axnet_dev_t *)dev;
+ dev_link_t *link = &info->link;
+
+ DEBUG(2, "axnet_close('%s')\n", dev->name);
+
+ ax_close(dev);
+ free_irq(dev->irq, dev);
+
+ link->open--;
+ netif_stop_queue(dev);
+ netif_mark_down(dev);
+ del_timer(&info->watchdog);
+ if (link->state & DEV_STALE_CONFIG)
+ mod_timer(&link->release, jiffies + HZ/20);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+} /* axnet_close */
+
+/*======================================================================
+
+ Hard reset the card. This used to pause for the same period that
+ a 8390 reset command required, but that shouldn't be necessary.
+
+======================================================================*/
+
+static void axnet_reset_8390(struct net_device *dev)
+{
+ ioaddr_t nic_base = dev->base_addr;
+ int i;
+
+ ei_status.txing = ei_status.dmaing = 0;
+
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, nic_base + E8390_CMD);
+
+ outb(inb(nic_base + AXNET_RESET), nic_base + AXNET_RESET);
+
+ for (i = 0; i < 100; i++) {
+ if ((inb_p(nic_base+EN0_ISR) & ENISR_RESET) != 0)
+ break;
+ udelay(100);
+ }
+ outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */
+
+ if (i == 100)
+ printk(KERN_ERR "%s: axnet_reset_8390() did not complete.\n",
+ dev->name);
+
+} /* axnet_reset_8390 */
+
+/*====================================================================*/
+
+static void ei_irq_wrapper(int irq, void *dev_id, struct pt_regs *regs)
+{
+ axnet_dev_t *info = dev_id;
+ info->stale = 0;
+ ax_interrupt(irq, dev_id, regs);
+}
+
+static void ei_watchdog(u_long arg)
+{
+ axnet_dev_t *info = (axnet_dev_t *)(arg);
+ struct net_device *dev = &info->dev;
+ ioaddr_t nic_base = dev->base_addr;
+ ioaddr_t mii_addr = nic_base + AXNET_MII_EEP;
+ u_short link;
+
+ if (!netif_device_present(dev)) goto reschedule;
+
+ /* Check for pending interrupt with expired latency timer: with
+ this, we can limp along even if the interrupt is blocked */
+ if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) {
+ if (!info->fast_poll)
+ printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
+ ei_irq_wrapper(dev->irq, dev, NULL);
+ info->fast_poll = HZ;
+ }
+ if (info->fast_poll) {
+ info->fast_poll--;
+ info->watchdog.expires = jiffies + 1;
+ add_timer(&info->watchdog);
+ return;
+ }
+
+ if (info->phy_id < 0)
+ goto reschedule;
+ link = mdio_read(mii_addr, info->phy_id, 1);
+ if (!link || (link == 0xffff)) {
+ printk(KERN_INFO "%s: MII is missing!\n", dev->name);
+ info->phy_id = -1;
+ goto reschedule;
+ }
+
+ link &= 0x0004;
+ if (link != info->link_status) {
+ u_short p = mdio_read(mii_addr, info->phy_id, 5);
+ printk(KERN_INFO "%s: %s link beat\n", dev->name,
+ (link) ? "found" : "lost");
+ if (link) {
+ info->duplex_flag = (p & 0x0140) ? 0x80 : 0x00;
+ if (p)
+ printk(KERN_INFO "%s: autonegotiation complete: "
+ "%sbaseT-%cD selected\n", dev->name,
+ ((p & 0x0180) ? "100" : "10"),
+ ((p & 0x0140) ? 'F' : 'H'));
+ else
+ printk(KERN_INFO "%s: link partner did not autonegotiate\n",
+ dev->name);
+ AX88190_init(dev, 1);
+ }
+ info->link_status = link;
+ }
+
+reschedule:
+ info->watchdog.expires = jiffies + HZ;
+ add_timer(&info->watchdog);
+}
+
+/*====================================================================*/
+
+static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ axnet_dev_t *info = (axnet_dev_t *)dev;
+ u16 *data = (u16 *)&rq->ifr_data;
+ ioaddr_t mii_addr = dev->base_addr + AXNET_MII_EEP;
+ switch (cmd) {
+ case SIOCDEVPRIVATE:
+ data[0] = info->phy_id;
+ case SIOCDEVPRIVATE+1:
+ data[3] = mdio_read(mii_addr, data[0], data[1] & 0x1f);
+ return 0;
+ case SIOCDEVPRIVATE+2:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ mdio_write(mii_addr, data[0], data[1] & 0x1f, data[2]);
+ return 0;
+ }
+ return -EOPNOTSUPP;
+}
+
+/*====================================================================*/
+
+static void get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr,
+ int ring_page)
+{
+ ioaddr_t nic_base = dev->base_addr;
+
+ outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */
+ outb_p(ring_page, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD);
+
+ insw(nic_base + AXNET_DATAPORT, hdr,
+ sizeof(struct e8390_pkt_hdr)>>1);
+ /* Fix for big endian systems */
+ hdr->count = le16_to_cpu(hdr->count);
+
+}
+
+/*====================================================================*/
+
+static void block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ ioaddr_t nic_base = dev->base_addr;
+ int xfer_count = count;
+ char *buf = skb->data;
+
+#ifdef PCMCIA_DEBUG
+ if ((ei_debug > 4) && (count != 4))
+ printk(KERN_DEBUG "%s: [bi=%d]\n", dev->name, count+4);
+#endif
+ outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
+ outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD);
+
+ insw(nic_base + AXNET_DATAPORT,buf,count>>1);
+ if (count & 0x01)
+ buf[count-1] = inb(nic_base + AXNET_DATAPORT), xfer_count++;
+
+}
+
+/*====================================================================*/
+
+static void block_output(struct net_device *dev, int count,
+ const u_char *buf, const int start_page)
+{
+ ioaddr_t nic_base = dev->base_addr;
+
+#ifdef PCMCIA_DEBUG
+ if (ei_debug > 4)
+ printk(KERN_DEBUG "%s: [bo=%d]\n", dev->name, count);
+#endif
+
+ /* Round the count up for word writes. Do we need to do this?
+ What effect will an odd byte count have on the 8390?
+ I should check someday. */
+ if (count & 0x01)
+ count++;
+
+ outb_p(0x00, nic_base + EN0_RSARLO);
+ outb_p(start_page, nic_base + EN0_RSARHI);
+ outb_p(E8390_RWRITE+E8390_START, nic_base + AXNET_CMD);
+ outsw(nic_base + AXNET_DATAPORT, buf, count>>1);
+}
+
+/*====================================================================*/
+
+static int __init init_axnet_cs(void)
+{
+ servinfo_t serv;
+ DEBUG(0, "%s\n", version);
+ CardServices(GetCardServicesInfo, &serv);
+ if (serv.Revision != CS_RELEASE_CODE) {
+ printk(KERN_NOTICE "axnet_cs: Card Services release "
+ "does not match!\n");
+ return -EINVAL;
+ }
+ register_pccard_driver(&dev_info, &axnet_attach, &axnet_detach);
+ return 0;
+}
+
+static void __exit exit_axnet_cs(void)
+{
+ DEBUG(0, "axnet_cs: unloading\n");
+ unregister_pccard_driver(&dev_info);
+ while (dev_list != NULL)
+ axnet_detach(dev_list);
+}
+
+module_init(init_axnet_cs);
+module_exit(exit_axnet_cs);
+
+/*====================================================================*/
+
+/* 8390.c: A general NS8390 ethernet driver core for linux. */
+/*
+ Written 1992-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ This is the chip-specific code for many 8390-based ethernet adaptors.
+ This is not a complete driver, it must be combined with board-specific
+ code such as ne.c, wd.c, 3c503.c, etc.
+
+ Seeing how at least eight drivers use this code, (not counting the
+ PCMCIA ones either) it is easy to break some card by what seems like
+ a simple innocent change. Please contact me or Donald if you think
+ you have found something that needs changing. -- PG
+
+ Changelog:
+
+ Paul Gortmaker : remove set_bit lock, other cleanups.
+ Paul Gortmaker : add ei_get_8390_hdr() so we can pass skb's to
+ ei_block_input() for eth_io_copy_and_sum().
+ Paul Gortmaker : exchange static int ei_pingpong for a #define,
+ also add better Tx error handling.
+ Paul Gortmaker : rewrite Rx overrun handling as per NS specs.
+ Alexey Kuznetsov : use the 8390's six bit hash multicast filter.
+ Paul Gortmaker : tweak ANK's above multicast changes a bit.
+ Paul Gortmaker : update packet statistics for v2.1.x
+ Alan Cox : support arbitary stupid port mappings on the
+ 68K Macintosh. Support >16bit I/O spaces
+ Paul Gortmaker : add kmod support for auto-loading of the 8390
+ module by all drivers that require it.
+ Alan Cox : Spinlocking work, added 'BUG_83C690'
+ Paul Gortmaker : Separate out Tx timeout code from Tx path.
+
+ Sources:
+ The National Semiconductor LAN Databook, and the 3Com 3c503 databook.
+
+ */
+
+static const char *version_8390 =
+ "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@scyld.com)\n";
+
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <asm/irq.h>
+#include <linux/fcntl.h>
+#include <linux/in.h>
+#include <linux/interrupt.h>
+
+#include <linux/etherdevice.h>
+
+#define BUG_83C690
+
+/* These are the operational function interfaces to board-specific
+ routines.
+ void reset_8390(struct net_device *dev)
+ Resets the board associated with DEV, including a hardware reset of
+ the 8390. This is only called when there is a transmit timeout, and
+ it is always followed by 8390_init().
+ void block_output(struct net_device *dev, int count, const unsigned char *buf,
+ int start_page)
+ Write the COUNT bytes of BUF to the packet buffer at START_PAGE. The
+ "page" value uses the 8390's 256-byte pages.
+ void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page)
+ Read the 4 byte, page aligned 8390 header. *If* there is a
+ subsequent read, it will be of the rest of the packet.
+ void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+ Read COUNT bytes from the packet buffer into the skb data area. Start
+ reading from RING_OFFSET, the address as the 8390 sees it. This will always
+ follow the read of the 8390 header.
+*/
+#define ei_reset_8390 (ei_local->reset_8390)
+#define ei_block_output (ei_local->block_output)
+#define ei_block_input (ei_local->block_input)
+#define ei_get_8390_hdr (ei_local->get_8390_hdr)
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef ei_debug
+int ei_debug = 1;
+#endif
+
+/* Index to functions. */
+static void ei_tx_intr(struct net_device *dev);
+static void ei_tx_err(struct net_device *dev);
+static void ei_tx_timeout(struct net_device *dev);
+static void ei_receive(struct net_device *dev);
+static void ei_rx_overrun(struct net_device *dev);
+
+/* Routines generic to NS8390-based boards. */
+static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
+ int start_page);
+static void set_multicast_list(struct net_device *dev);
+static void do_set_multicast_list(struct net_device *dev);
+
+/*
+ * SMP and the 8390 setup.
+ *
+ * The 8390 isnt exactly designed to be multithreaded on RX/TX. There is
+ * a page register that controls bank and packet buffer access. We guard
+ * this with ei_local->page_lock. Nobody should assume or set the page other
+ * than zero when the lock is not held. Lock holders must restore page 0
+ * before unlocking. Even pure readers must take the lock to protect in
+ * page 0.
+ *
+ * To make life difficult the chip can also be very slow. We therefore can't
+ * just use spinlocks. For the longer lockups we disable the irq the device
+ * sits on and hold the lock. We must hold the lock because there is a dual
+ * processor case other than interrupts (get stats/set multicast list in
+ * parallel with each other and transmit).
+ *
+ * Note: in theory we can just disable the irq on the card _but_ there is
+ * a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs"
+ * enter lock, take the queued irq. So we waddle instead of flying.
+ *
+ * Finally by special arrangement for the purpose of being generally
+ * annoying the transmit function is called bh atomic. That places
+ * restrictions on the user context callers as disable_irq won't save
+ * them.
+ */
+
+/**
+ * ax_open - Open/initialize the board.
+ * @dev: network device to initialize
+ *
+ * This routine goes all-out, setting everything
+ * up anew at each open, even though many of these registers should only
+ * need to be set once at boot.
+ */
+static int ax_open(struct net_device *dev)
+{
+ unsigned long flags;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+
+ /* This can't happen unless somebody forgot to call axdev_init(). */
+ if (ei_local == NULL)
+ {
+ printk(KERN_EMERG "%s: ax_open passed a non-existent device!\n", dev->name);
+ return -ENXIO;
+ }
+
+#ifdef HAVE_TX_TIMEOUT
+ /* The card I/O part of the driver (e.g. 3c503) can hook a Tx timeout
+ wrapper that does e.g. media check & then calls ei_tx_timeout. */
+ if (dev->tx_timeout == NULL)
+ dev->tx_timeout = ei_tx_timeout;
+ if (dev->watchdog_timeo <= 0)
+ dev->watchdog_timeo = TX_TIMEOUT;
+#endif
+
+ /*
+ * Grab the page lock so we own the register set, then call
+ * the init function.
+ */
+
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ AX88190_init(dev, 1);
+ /* Set the flag before we drop the lock, That way the IRQ arrives
+ after its set and we get no silly warnings */
+ netif_mark_up(dev);
+ netif_start_queue(dev);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+ ei_local->irqlock = 0;
+ return 0;
+}
+
+#define dev_lock(dev) (((struct ei_device *)(dev)->priv)->page_lock)
+
+/**
+ * ax_close - shut down network device
+ * @dev: network device to close
+ *
+ * Opposite of ax_open(). Only used when "ifconfig <devname> down" is done.
+ */
+int ax_close(struct net_device *dev)
+{
+ unsigned long flags;
+
+ /*
+ * Hold the page lock during close
+ */
+
+ spin_lock_irqsave(&dev_lock(dev), flags);
+ AX88190_init(dev, 0);
+ spin_unlock_irqrestore(&dev_lock(dev), flags);
+ netif_stop_queue(dev);
+ return 0;
+}
+
+/**
+ * ei_tx_timeout - handle transmit time out condition
+ * @dev: network device which has apparently fallen asleep
+ *
+ * Called by kernel when device never acknowledges a transmit has
+ * completed (or failed) - i.e. never posted a Tx related interrupt.
+ */
+
+void ei_tx_timeout(struct net_device *dev)
+{
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+ int txsr, isr, tickssofar = jiffies - dev->trans_start;
+ unsigned long flags;
+
+ ei_local->stat.tx_errors++;
+
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ txsr = inb(e8390_base+EN0_TSR);
+ isr = inb(e8390_base+EN0_ISR);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+
+ printk(KERN_DEBUG "%s: Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
+ dev->name, (txsr & ENTSR_ABT) ? "excess collisions." :
+ (isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar);
+
+ if (!isr && !ei_local->stat.tx_packets)
+ {
+ /* The 8390 probably hasn't gotten on the cable yet. */
+ ei_local->interface_num ^= 1; /* Try a different xcvr. */
+ }
+
+ /* Ugly but a reset can be slow, yet must be protected */
+
+ disable_irq_nosync(dev->irq);
+ spin_lock(&ei_local->page_lock);
+
+ /* Try to restart the card. Perhaps the user has fixed something. */
+ ei_reset_8390(dev);
+ AX88190_init(dev, 1);
+
+ spin_unlock(&ei_local->page_lock);
+ enable_irq(dev->irq);
+ netif_wake_queue(dev);
+}
+
+/**
+ * ei_start_xmit - begin packet transmission
+ * @skb: packet to be sent
+ * @dev: network device to which packet is sent
+ *
+ * Sends a packet to an 8390 network device.
+ */
+
+static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+ int length, send_length, output_page;
+ unsigned long flags;
+
+ tx_timeout_check(dev, ei_tx_timeout);
+ skb_tx_check(dev, skb);
+
+ length = skb->len;
+
+ /* Mask interrupts from the ethercard.
+ SMP: We have to grab the lock here otherwise the IRQ handler
+ on another CPU can flip window and race the IRQ mask set. We end
+ up trashing the mcast filter not disabling irqs if we dont lock */
+
+ spin_lock_irqsave(&ei_local->page_lock, flags);
+ outb_p(0x00, e8390_base + EN0_IMR);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+
+ /*
+ * Slow phase with lock held.
+ */
+
+ disable_irq_nosync(dev->irq);
+
+ spin_lock(&ei_local->page_lock);
+
+ ei_local->irqlock = 1;
+
+ send_length = ETH_ZLEN < length ? length : ETH_ZLEN;
+
+ /*
+ * We have two Tx slots available for use. Find the first free
+ * slot, and then perform some sanity checks. With two Tx bufs,
+ * you get very close to transmitting back-to-back packets. With
+ * only one Tx buf, the transmitter sits idle while you reload the
+ * card, leaving a substantial gap between each transmitted packet.
+ */
+
+ if (ei_local->tx1 == 0)
+ {
+ output_page = ei_local->tx_start_page;
+ ei_local->tx1 = send_length;
+ if (ei_debug && ei_local->tx2 > 0)
+ printk(KERN_DEBUG "%s: idle transmitter tx2=%d, lasttx=%d, txing=%d.\n",
+ dev->name, ei_local->tx2, ei_local->lasttx, ei_local->txing);
+ }
+ else if (ei_local->tx2 == 0)
+ {
+ output_page = ei_local->tx_start_page + TX_1X_PAGES;
+ ei_local->tx2 = send_length;
+ if (ei_debug && ei_local->tx1 > 0)
+ printk(KERN_DEBUG "%s: idle transmitter, tx1=%d, lasttx=%d, txing=%d.\n",
+ dev->name, ei_local->tx1, ei_local->lasttx, ei_local->txing);
+ }
+ else
+ { /* We should never get here. */
+ if (ei_debug)
+ printk(KERN_DEBUG "%s: No Tx buffers free! tx1=%d tx2=%d last=%d\n",
+ dev->name, ei_local->tx1, ei_local->tx2, ei_local->lasttx);
+ ei_local->irqlock = 0;
+ netif_stop_queue(dev);
+ outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+ spin_unlock(&ei_local->page_lock);
+ enable_irq(dev->irq);
+ ei_local->stat.tx_errors++;
+ return 1;
+ }
+
+ /*
+ * Okay, now upload the packet and trigger a send if the transmitter
+ * isn't already sending. If it is busy, the interrupt handler will
+ * trigger the send later, upon receiving a Tx done interrupt.
+ */
+
+ ei_block_output(dev, length, skb->data, output_page);
+ if (! ei_local->txing)
+ {
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, send_length, output_page);
+ dev->trans_start = jiffies;
+ if (output_page == ei_local->tx_start_page)
+ {
+ ei_local->tx1 = -1;
+ ei_local->lasttx = -1;
+ }
+ else
+ {
+ ei_local->tx2 = -1;
+ ei_local->lasttx = -2;
+ }
+ }
+ else ei_local->txqueue++;
+
+ if (ei_local->tx1 && ei_local->tx2)
+ netif_stop_queue(dev);
+ else
+ netif_start_queue(dev);
+
+ /* Turn 8390 interrupts back on. */
+ ei_local->irqlock = 0;
+ outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+
+ spin_unlock(&ei_local->page_lock);
+ enable_irq(dev->irq);
+
+ DEV_KFREE_SKB (skb);
+ add_tx_bytes(&ei_local->stat, send_length);
+
+ return 0;
+}
+
+/**
+ * ax_interrupt - handle the interrupts from an 8390
+ * @irq: interrupt number
+ * @dev_id: a pointer to the net_device
+ * @regs: unused
+ *
+ * Handle the ether interface interrupts. We pull packets from
+ * the 8390 via the card specific functions and fire them at the networking
+ * stack. We also handle transmit completions and wake the transmit path if
+ * neccessary. We also update the counters and do other housekeeping as
+ * needed.
+ */
+
+static void ax_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct net_device *dev = dev_id;
+ long e8390_base;
+ int interrupts, nr_serviced = 0, i;
+ struct ei_device *ei_local;
+
+ if (dev == NULL)
+ {
+ printk ("net_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ e8390_base = dev->base_addr;
+ ei_local = (struct ei_device *) dev->priv;
+
+ /*
+ * Protect the irq test too.
+ */
+
+ spin_lock(&ei_local->page_lock);
+
+ if (ei_local->irqlock)
+ {
+#if 1 /* This might just be an interrupt for a PCI device sharing this line */
+ /* The "irqlock" check is only for testing. */
+ printk(ei_local->irqlock
+ ? "%s: Interrupted while interrupts are masked! isr=%#2x imr=%#2x.\n"
+ : "%s: Reentering the interrupt handler! isr=%#2x imr=%#2x.\n",
+ dev->name, inb_p(e8390_base + EN0_ISR),
+ inb_p(e8390_base + EN0_IMR));
+#endif
+ spin_unlock(&ei_local->page_lock);
+ return;
+ }
+
+ if (ei_debug > 3)
+ printk(KERN_DEBUG "%s: interrupt(isr=%#2.2x).\n", dev->name,
+ inb_p(e8390_base + EN0_ISR));
+
+ outb_p(0x00, e8390_base + EN0_ISR);
+ ei_local->irqlock = 1;
+
+ /* !!Assumption!! -- we stay in page 0. Don't break this. */
+ while ((interrupts = inb_p(e8390_base + EN0_ISR)) != 0
+ && ++nr_serviced < MAX_SERVICE)
+ {
+ if (!netif_running(dev) || (interrupts == 0xff)) {
+ if (ei_debug > 1)
+ printk(KERN_WARNING "%s: interrupt from stopped card\n", dev->name);
+ outb_p(interrupts, e8390_base + EN0_ISR);
+ interrupts = 0;
+ break;
+ }
+ /* AX88190 bug fix. */
+ outb_p(interrupts, e8390_base + EN0_ISR);
+ for (i = 0; i < 10; i++) {
+ if (!(inb(e8390_base + EN0_ISR) & interrupts))
+ break;
+ outb_p(0, e8390_base + EN0_ISR);
+ outb_p(interrupts, e8390_base + EN0_ISR);
+ }
+ if (interrupts & ENISR_OVER)
+ ei_rx_overrun(dev);
+ else if (interrupts & (ENISR_RX+ENISR_RX_ERR))
+ {
+ /* Got a good (?) packet. */
+ ei_receive(dev);
+ }
+ /* Push the next to-transmit packet through. */
+ if (interrupts & ENISR_TX)
+ ei_tx_intr(dev);
+ else if (interrupts & ENISR_TX_ERR)
+ ei_tx_err(dev);
+
+ if (interrupts & ENISR_COUNTERS)
+ {
+ ei_local->stat.rx_frame_errors += inb_p(e8390_base + EN0_COUNTER0);
+ ei_local->stat.rx_crc_errors += inb_p(e8390_base + EN0_COUNTER1);
+ ei_local->stat.rx_missed_errors+= inb_p(e8390_base + EN0_COUNTER2);
+ }
+ }
+
+ if (interrupts && ei_debug)
+ {
+ if (nr_serviced >= MAX_SERVICE)
+ {
+ /* 0xFF is valid for a card removal */
+ if(interrupts!=0xFF)
+ printk(KERN_WARNING "%s: Too much work at interrupt, status %#2.2x\n",
+ dev->name, interrupts);
+ outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
+ } else {
+ printk(KERN_WARNING "%s: unknown interrupt %#2x\n", dev->name, interrupts);
+ outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */
+ }
+ }
+
+ /* Turn 8390 interrupts back on. */
+ ei_local->irqlock = 0;
+ outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+
+ spin_unlock(&ei_local->page_lock);
+ return;
+}
+
+/**
+ * ei_tx_err - handle transmitter error
+ * @dev: network device which threw the exception
+ *
+ * A transmitter error has happened. Most likely excess collisions (which
+ * is a fairly normal condition). If the error is one where the Tx will
+ * have been aborted, we try and send another one right away, instead of
+ * letting the failed packet sit and collect dust in the Tx buffer. This
+ * is a much better solution as it avoids kernel based Tx timeouts, and
+ * an unnecessary card reset.
+ *
+ * Called with lock held.
+ */
+
+static void ei_tx_err(struct net_device *dev)
+{
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+ unsigned char txsr = inb_p(e8390_base+EN0_TSR);
+ unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
+
+#ifdef VERBOSE_ERROR_DUMP
+ printk(KERN_DEBUG "%s: transmitter error (%#2x): ", dev->name, txsr);
+ if (txsr & ENTSR_ABT)
+ printk("excess-collisions ");
+ if (txsr & ENTSR_ND)
+ printk("non-deferral ");
+ if (txsr & ENTSR_CRS)
+ printk("lost-carrier ");
+ if (txsr & ENTSR_FU)
+ printk("FIFO-underrun ");
+ if (txsr & ENTSR_CDH)
+ printk("lost-heartbeat ");
+ printk("\n");
+#endif
+
+ if (tx_was_aborted)
+ ei_tx_intr(dev);
+ else
+ {
+ ei_local->stat.tx_errors++;
+ if (txsr & ENTSR_CRS) ei_local->stat.tx_carrier_errors++;
+ if (txsr & ENTSR_CDH) ei_local->stat.tx_heartbeat_errors++;
+ if (txsr & ENTSR_OWC) ei_local->stat.tx_window_errors++;
+ }
+}
+
+/**
+ * ei_tx_intr - transmit interrupt handler
+ * @dev: network device for which tx intr is handled
+ *
+ * We have finished a transmit: check for errors and then trigger the next
+ * packet to be sent. Called with lock held.
+ */
+
+static void ei_tx_intr(struct net_device *dev)
+{
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+ int status = inb(e8390_base + EN0_TSR);
+
+ /*
+ * There are two Tx buffers, see which one finished, and trigger
+ * the send of another one if it exists.
+ */
+ ei_local->txqueue--;
+
+ if (ei_local->tx1 < 0)
+ {
+ if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
+ printk(KERN_ERR "%s: bogus last_tx_buffer %d, tx1=%d.\n",
+ ei_local->name, ei_local->lasttx, ei_local->tx1);
+ ei_local->tx1 = 0;
+ if (ei_local->tx2 > 0)
+ {
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
+ dev->trans_start = jiffies;
+ ei_local->tx2 = -1,
+ ei_local->lasttx = 2;
+ }
+ else ei_local->lasttx = 20, ei_local->txing = 0;
+ }
+ else if (ei_local->tx2 < 0)
+ {
+ if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
+ printk("%s: bogus last_tx_buffer %d, tx2=%d.\n",
+ ei_local->name, ei_local->lasttx, ei_local->tx2);
+ ei_local->tx2 = 0;
+ if (ei_local->tx1 > 0)
+ {
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
+ dev->trans_start = jiffies;
+ ei_local->tx1 = -1;
+ ei_local->lasttx = 1;
+ }
+ else
+ ei_local->lasttx = 10, ei_local->txing = 0;
+ }
+// else printk(KERN_WARNING "%s: unexpected TX-done interrupt, lasttx=%d.\n",
+// dev->name, ei_local->lasttx);
+
+ /* Minimize Tx latency: update the statistics after we restart TXing. */
+ if (status & ENTSR_COL)
+ ei_local->stat.collisions++;
+ if (status & ENTSR_PTX)
+ ei_local->stat.tx_packets++;
+ else
+ {
+ ei_local->stat.tx_errors++;
+ if (status & ENTSR_ABT)
+ {
+ ei_local->stat.tx_aborted_errors++;
+ ei_local->stat.collisions += 16;
+ }
+ if (status & ENTSR_CRS)
+ ei_local->stat.tx_carrier_errors++;
+ if (status & ENTSR_FU)
+ ei_local->stat.tx_fifo_errors++;
+ if (status & ENTSR_CDH)
+ ei_local->stat.tx_heartbeat_errors++;
+ if (status & ENTSR_OWC)
+ ei_local->stat.tx_window_errors++;
+ }
+ netif_wake_queue(dev);
+}
+
+/**
+ * ei_receive - receive some packets
+ * @dev: network device with which receive will be run
+ *
+ * We have a good packet(s), get it/them out of the buffers.
+ * Called with lock held.
+ */
+
+static void ei_receive(struct net_device *dev)
+{
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+ unsigned char rxing_page, this_frame, next_frame;
+ unsigned short current_offset;
+ int rx_pkt_count = 0;
+ struct e8390_pkt_hdr rx_frame;
+
+ while (++rx_pkt_count < 10)
+ {
+ int pkt_len, pkt_stat;
+
+ /* Get the rx page (incoming packet pointer). */
+ rxing_page = inb_p(e8390_base + EN1_CURPAG -1);
+
+ /* Remove one frame from the ring. Boundary is always a page behind. */
+ this_frame = inb_p(e8390_base + EN0_BOUNDARY) + 1;
+ if (this_frame >= ei_local->stop_page)
+ this_frame = ei_local->rx_start_page;
+
+ /* Someday we'll omit the previous, iff we never get this message.
+ (There is at least one clone claimed to have a problem.)
+
+ Keep quiet if it looks like a card removal. One problem here
+ is that some clones crash in roughly the same way.
+ */
+ if (ei_debug > 0 && this_frame != ei_local->current_page && (this_frame!=0x0 || rxing_page!=0xFF))
+ printk(KERN_ERR "%s: mismatched read page pointers %2x vs %2x.\n",
+ dev->name, this_frame, ei_local->current_page);
+
+ if (this_frame == rxing_page) /* Read all the frames? */
+ break; /* Done for now */
+
+ current_offset = this_frame << 8;
+ ei_get_8390_hdr(dev, &rx_frame, this_frame);
+
+ pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr);
+ pkt_stat = rx_frame.status;
+
+ next_frame = this_frame + 1 + ((pkt_len+4)>>8);
+
+ if (pkt_len < 60 || pkt_len > 1518)
+ {
+ if (ei_debug)
+ printk(KERN_DEBUG "%s: bogus packet size: %d, status=%#2x nxpg=%#2x.\n",
+ dev->name, rx_frame.count, rx_frame.status,
+ rx_frame.next);
+ ei_local->stat.rx_errors++;
+ ei_local->stat.rx_length_errors++;
+ }
+ else if ((pkt_stat & 0x0F) == ENRSR_RXOK)
+ {
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL)
+ {
+ if (ei_debug > 1)
+ printk(KERN_DEBUG "%s: Couldn't allocate a sk_buff of size %d.\n",
+ dev->name, pkt_len);
+ ei_local->stat.rx_dropped++;
+ break;
+ }
+ else
+ {
+ skb_reserve(skb,2); /* IP headers on 16 byte boundaries */
+ skb->dev = dev;
+ skb_put(skb, pkt_len); /* Make room */
+ ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ ei_local->stat.rx_packets++;
+ add_rx_bytes(&ei_local->stat, pkt_len);
+ if (pkt_stat & ENRSR_PHY)
+ ei_local->stat.multicast++;
+ }
+ }
+ else
+ {
+ if (ei_debug)
+ printk(KERN_DEBUG "%s: bogus packet: status=%#2x nxpg=%#2x size=%d\n",
+ dev->name, rx_frame.status, rx_frame.next,
+ rx_frame.count);
+ ei_local->stat.rx_errors++;
+ /* NB: The NIC counts CRC, frame and missed errors. */
+ if (pkt_stat & ENRSR_FO)
+ ei_local->stat.rx_fifo_errors++;
+ }
+ next_frame = rx_frame.next;
+
+ /* This _should_ never happen: it's here for avoiding bad clones. */
+ if (next_frame >= ei_local->stop_page) {
+ printk("%s: next frame inconsistency, %#2x\n", dev->name,
+ next_frame);
+ next_frame = ei_local->rx_start_page;
+ }
+ ei_local->current_page = next_frame;
+ outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
+ }
+
+ return;
+}
+
+/**
+ * ei_rx_overrun - handle receiver overrun
+ * @dev: network device which threw exception
+ *
+ * We have a receiver overrun: we have to kick the 8390 to get it started
+ * again. Problem is that you have to kick it exactly as NS prescribes in
+ * the updated datasheets, or "the NIC may act in an unpredictable manner."
+ * This includes causing "the NIC to defer indefinitely when it is stopped
+ * on a busy network." Ugh.
+ * Called with lock held. Don't call this with the interrupts off or your
+ * computer will hate you - it takes 10ms or so.
+ */
+
+static void ei_rx_overrun(struct net_device *dev)
+{
+ axnet_dev_t *info = (axnet_dev_t *)dev;
+ long e8390_base = dev->base_addr;
+ unsigned char was_txing, must_resend = 0;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+
+ /*
+ * Record whether a Tx was in progress and then issue the
+ * stop command.
+ */
+ was_txing = inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
+
+ if (ei_debug > 1)
+ printk(KERN_DEBUG "%s: Receiver overrun.\n", dev->name);
+ ei_local->stat.rx_over_errors++;
+
+ /*
+ * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
+ * Early datasheets said to poll the reset bit, but now they say that
+ * it "is not a reliable indicator and subsequently should be ignored."
+ * We wait at least 10ms.
+ */
+
+ mdelay(10);
+
+ /*
+ * Reset RBCR[01] back to zero as per magic incantation.
+ */
+ outb_p(0x00, e8390_base+EN0_RCNTLO);
+ outb_p(0x00, e8390_base+EN0_RCNTHI);
+
+ /*
+ * See if any Tx was interrupted or not. According to NS, this
+ * step is vital, and skipping it will cause no end of havoc.
+ */
+
+ if (was_txing)
+ {
+ unsigned char tx_completed = inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR);
+ if (!tx_completed)
+ must_resend = 1;
+ }
+
+ /*
+ * Have to enter loopback mode and then restart the NIC before
+ * you are allowed to slurp packets up off the ring.
+ */
+ outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
+ outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD);
+
+ /*
+ * Clear the Rx ring of all the debris, and ack the interrupt.
+ */
+ ei_receive(dev);
+
+ /*
+ * Leave loopback mode, and resend any packet that got stopped.
+ */
+ outb_p(E8390_TXCONFIG | info->duplex_flag, e8390_base + EN0_TXCR);
+ if (must_resend)
+ outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
+}
+
+/*
+ * Collect the stats. This is called unlocked and from several contexts.
+ */
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+ unsigned long flags;
+
+ /* If the card is stopped, just return the present stats. */
+ if (!netif_running(dev))
+ return &ei_local->stat;
+
+ spin_lock_irqsave(&ei_local->page_lock,flags);
+ /* Read the counter registers, assuming we are in page 0. */
+ ei_local->stat.rx_frame_errors += inb_p(ioaddr + EN0_COUNTER0);
+ ei_local->stat.rx_crc_errors += inb_p(ioaddr + EN0_COUNTER1);
+ ei_local->stat.rx_missed_errors+= inb_p(ioaddr + EN0_COUNTER2);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
+
+ return &ei_local->stat;
+}
+
+/**
+ * do_set_multicast_list - set/clear multicast filter
+ * @dev: net device for which multicast filter is adjusted
+ *
+ * Set or clear the multicast filter for this adaptor. May be called
+ * from a BH in 2.1.x. Must be called with lock held.
+ */
+
+static void do_set_multicast_list(struct net_device *dev)
+{
+ long e8390_base = dev->base_addr;
+
+ if(dev->flags&IFF_PROMISC)
+ outb_p(E8390_RXCONFIG | 0x58, e8390_base + EN0_RXCR);
+ else if(dev->flags&IFF_ALLMULTI || dev->mc_list)
+ outb_p(E8390_RXCONFIG | 0x48, e8390_base + EN0_RXCR);
+ else
+ outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR);
+}
+
+/*
+ * Called without lock held. This is invoked from user context and may
+ * be parallel to just about everything else. Its also fairly quick and
+ * not called too often. Must protect against both bh and irq users
+ */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_lock(dev), flags);
+ do_set_multicast_list(dev);
+ spin_unlock_irqrestore(&dev_lock(dev), flags);
+}
+
+/**
+ * axdev_init - init rest of 8390 device struct
+ * @dev: network device structure to init
+ *
+ * Initialize the rest of the 8390 device structure. Do NOT __init
+ * this, as it is used by 8390 based modular drivers too.
+ */
+
+static int axdev_init(struct net_device *dev)
+{
+ if (ei_debug > 1)
+ printk("%s", version_8390);
+
+ if (dev->priv == NULL)
+ {
+ struct ei_device *ei_local;
+
+ dev->priv = kmalloc(sizeof(struct ei_device), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct ei_device));
+ ei_local = (struct ei_device *)dev->priv;
+ spin_lock_init(&ei_local->page_lock);
+ }
+
+ dev->hard_start_xmit = &ei_start_xmit;
+ dev->get_stats = get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ ether_setup(dev);
+
+ return 0;
+}
+
+/* This page of functions should be 8390 generic */
+/* Follow National Semi's recommendations for initializing the "NIC". */
+
+/**
+ * AX88190_init - initialize 8390 hardware
+ * @dev: network device to initialize
+ * @startp: boolean. non-zero value to initiate chip processing
+ *
+ * Must be called with lock held.
+ */
+
+static void AX88190_init(struct net_device *dev, int startp)
+{
+ axnet_dev_t *info = (axnet_dev_t *)dev;
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+ int i;
+ int endcfg = ei_local->word16 ? (0x48 | ENDCFG_WTS) : 0x48;
+
+ if(sizeof(struct e8390_pkt_hdr)!=4)
+ panic("8390.c: header struct mispacked\n");
+ /* Follow National Semi's recommendations for initing the DP83902. */
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */
+ outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */
+ /* Clear the remote byte count registers. */
+ outb_p(0x00, e8390_base + EN0_RCNTLO);
+ outb_p(0x00, e8390_base + EN0_RCNTHI);
+ /* Set to monitor and loopback mode -- this is vital!. */
+ outb_p(E8390_RXOFF|0x40, e8390_base + EN0_RXCR); /* 0x60 */
+ outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */
+ /* Set the transmit page and receive ring. */
+ outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR);
+ ei_local->tx1 = ei_local->tx2 = 0;
+ outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG);
+ outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY); /* 3c503 says 0x3f,NS0x26*/
+ ei_local->current_page = ei_local->rx_start_page; /* assert boundary+1 */
+ outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG);
+ /* Clear the pending interrupts and mask. */
+ outb_p(0xFF, e8390_base + EN0_ISR);
+ outb_p(0x00, e8390_base + EN0_IMR);
+
+ /* Copy the station address into the DS8390 registers. */
+
+ outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */
+ for(i = 0; i < 6; i++)
+ {
+ outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
+ if(inb_p(e8390_base + EN1_PHYS_SHIFT(i))!=dev->dev_addr[i])
+ printk(KERN_ERR "Hw. address read/write mismap %d\n",i);
+ }
+ /*
+ * Initialize the multicast list to accept-all. If we enable multicast
+ * the higher levels can do the filtering.
+ */
+ for (i = 0; i < 8; i++)
+ outb_p(0xff, e8390_base + EN1_MULT + i);
+
+ outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
+
+ netif_start_queue(dev);
+ ei_local->tx1 = ei_local->tx2 = 0;
+ ei_local->txing = 0;
+
+ if (startp)
+ {
+ outb_p(0xff, e8390_base + EN0_ISR);
+ outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
+ outb_p(E8390_TXCONFIG | info->duplex_flag,
+ e8390_base + EN0_TXCR); /* xmit on. */
+ /* 3c503 TechMan says rxconfig only after the NIC is started. */
+ outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR); /* rx on, */
+ do_set_multicast_list(dev); /* (re)load the mcast table */
+ }
+}
+
+/* Trigger a transmit start, assuming the length is valid.
+ Always called with the page lock held */
+
+static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
+ int start_page)
+{
+ long e8390_base = dev->base_addr;
+ struct ei_device *ei_local __attribute((unused)) = (struct ei_device *) dev->priv;
+
+ if (inb_p(e8390_base) & E8390_TRANS)
+ {
+ printk(KERN_WARNING "%s: trigger_send() called with the transmitter busy.\n",
+ dev->name);
+ return;
+ }
+ outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
+ outb_p(length >> 8, e8390_base + EN0_TCNTHI);
+ outb_p(start_page, e8390_base + EN0_TPSR);
+ outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD);
+}
diff --git a/linux/pcmcia-cs/clients/fmvj18x_cs.c b/linux/pcmcia-cs/clients/fmvj18x_cs.c
new file mode 100644
index 0000000..bd492e8
--- /dev/null
+++ b/linux/pcmcia-cs/clients/fmvj18x_cs.c
@@ -0,0 +1,1322 @@
+/*======================================================================
+ fmvj18x_cs.c 2.8 2002/03/23
+
+ A fmvj18x (and its compatibles) PCMCIA client driver
+
+ Contributed by Shingo Fujimoto, shingo@flab.fujitsu.co.jp
+
+ TDK LAK-CD021 and CONTEC C-NET(PC)C support added by
+ Nobuhiro Katayama, kata-n@po.iijnet.or.jp
+
+ The PCMCIA client code is based on code written by David Hinds.
+ Network code is based on the "FMV-18x driver" by Yutaka TAMIYA
+ but is actually largely Donald Becker's AT1700 driver, which
+ carries the following attribution:
+
+ Written 1993-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+======================================================================*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+#include <linux/crc32.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ciscode.h>
+#include <pcmcia/ds.h>
+
+/*====================================================================*/
+
+/* Module parameters */
+
+MODULE_DESCRIPTION("fmvj18x and compatible PCMCIA ethernet driver");
+MODULE_LICENSE("GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; MODULE_PARM(n, "i")
+
+/* Bit map of interrupts to choose from */
+/* This means pick from 15, 14, 12, 11, 10, 9, 7, 5, 4, and 3 */
+INT_MODULE_PARM(irq_mask, 0xdeb8);
+static int irq_list[4] = { -1 };
+MODULE_PARM(irq_list, "1-4i");
+
+/* SRAM configuration */
+/* 0:4KB*2 TX buffer else:8KB*2 TX buffer */
+INT_MODULE_PARM(sram_config, 0);
+
+#ifdef PCMCIA_DEBUG
+INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
+static char *version = "fmvj18x_cs.c 2.8 2002/03/23";
+#else
+#define DEBUG(n, args...)
+#endif
+
+/*====================================================================*/
+/*
+ PCMCIA event handlers
+ */
+static void fmvj18x_config(dev_link_t *link);
+static int fmvj18x_get_hwinfo(dev_link_t *link, u_char *node_id);
+static int fmvj18x_setup_mfc(dev_link_t *link);
+static void fmvj18x_release(u_long arg);
+static int fmvj18x_event(event_t event, int priority,
+ event_callback_args_t *args);
+static dev_link_t *fmvj18x_attach(void);
+static void fmvj18x_detach(dev_link_t *);
+
+/*
+ LAN controller(MBH86960A) specific routines
+ */
+static int fjn_config(struct net_device *dev, struct ifmap *map);
+static int fjn_open(struct net_device *dev);
+static int fjn_close(struct net_device *dev);
+static int fjn_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void fjn_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void fjn_rx(struct net_device *dev);
+static void fjn_reset(struct net_device *dev);
+static struct net_device_stats *fjn_get_stats(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+static void fjn_tx_timeout(struct net_device *dev);
+
+static dev_info_t dev_info = "fmvj18x_cs";
+static dev_link_t *dev_list;
+
+/*
+ card type
+ */
+typedef enum { MBH10302, MBH10304, TDK, CONTEC, LA501, UNGERMANN,
+ XXX10304
+} cardtype_t;
+
+/*
+ driver specific data structure
+*/
+typedef struct local_info_t {
+ dev_link_t link;
+ struct net_device dev;
+ dev_node_t node;
+ struct net_device_stats stats;
+ long open_time;
+ uint tx_started:1;
+ uint tx_queue;
+ u_short tx_queue_len;
+ cardtype_t cardtype;
+ u_short sent;
+ u_char mc_filter[8];
+} local_info_t;
+
+#define MC_FILTERBREAK 64
+
+/*====================================================================*/
+/*
+ ioport offset from the base address
+ */
+#define TX_STATUS 0 /* transmit status register */
+#define RX_STATUS 1 /* receive status register */
+#define TX_INTR 2 /* transmit interrupt mask register */
+#define RX_INTR 3 /* receive interrupt mask register */
+#define TX_MODE 4 /* transmit mode register */
+#define RX_MODE 5 /* receive mode register */
+#define CONFIG_0 6 /* configuration register 0 */
+#define CONFIG_1 7 /* configuration register 1 */
+
+#define NODE_ID 8 /* node ID register (bank 0) */
+#define MAR_ADR 8 /* multicast address registers (bank 1) */
+
+#define DATAPORT 8 /* buffer mem port registers (bank 2) */
+#define TX_START 10 /* transmit start register */
+#define COL_CTRL 11 /* 16 collision control register */
+#define BMPR12 12 /* reserved */
+#define BMPR13 13 /* reserved */
+#define RX_SKIP 14 /* skip received packet register */
+
+#define LAN_CTRL 16 /* LAN card control register */
+
+#define MAC_ID 0x1a /* hardware address */
+#define UNGERMANN_MAC_ID 0x18 /* UNGERMANN-BASS hardware address */
+
+/*
+ control bits
+ */
+#define ENA_TMT_OK 0x80
+#define ENA_TMT_REC 0x20
+#define ENA_COL 0x04
+#define ENA_16_COL 0x02
+#define ENA_TBUS_ERR 0x01
+
+#define ENA_PKT_RDY 0x80
+#define ENA_BUS_ERR 0x40
+#define ENA_LEN_ERR 0x08
+#define ENA_ALG_ERR 0x04
+#define ENA_CRC_ERR 0x02
+#define ENA_OVR_FLO 0x01
+
+/* flags */
+#define F_TMT_RDY 0x80 /* can accept new packet */
+#define F_NET_BSY 0x40 /* carrier is detected */
+#define F_TMT_OK 0x20 /* send packet successfully */
+#define F_SRT_PKT 0x10 /* short packet error */
+#define F_COL_ERR 0x04 /* collision error */
+#define F_16_COL 0x02 /* 16 collision error */
+#define F_TBUS_ERR 0x01 /* bus read error */
+
+#define F_PKT_RDY 0x80 /* packet(s) in buffer */
+#define F_BUS_ERR 0x40 /* bus read error */
+#define F_LEN_ERR 0x08 /* short packet */
+#define F_ALG_ERR 0x04 /* frame error */
+#define F_CRC_ERR 0x02 /* CRC error */
+#define F_OVR_FLO 0x01 /* overflow error */
+
+#define F_BUF_EMP 0x40 /* receive buffer is empty */
+
+#define F_SKP_PKT 0x05 /* drop packet in buffer */
+
+/* default bitmaps */
+#define D_TX_INTR ( ENA_TMT_OK )
+#define D_RX_INTR ( ENA_PKT_RDY | ENA_LEN_ERR \
+ | ENA_ALG_ERR | ENA_CRC_ERR | ENA_OVR_FLO )
+#define TX_STAT_M ( F_TMT_RDY )
+#define RX_STAT_M ( F_PKT_RDY | F_LEN_ERR \
+ | F_ALG_ERR | F_CRC_ERR | F_OVR_FLO )
+
+/* commands */
+#define D_TX_MODE 0x06 /* no tests, detect carrier */
+#define ID_MATCHED 0x02 /* (RX_MODE) */
+#define RECV_ALL 0x03 /* (RX_MODE) */
+#define CONFIG0_DFL 0x5a /* 16bit bus, 4K x 2 Tx queues */
+#define CONFIG0_DFL_1 0x5e /* 16bit bus, 8K x 2 Tx queues */
+#define CONFIG0_RST 0xda /* Data Link Controller off (CONFIG_0) */
+#define CONFIG0_RST_1 0xde /* Data Link Controller off (CONFIG_0) */
+#define BANK_0 0xa0 /* bank 0 (CONFIG_1) */
+#define BANK_1 0xa4 /* bank 1 (CONFIG_1) */
+#define BANK_2 0xa8 /* bank 2 (CONFIG_1) */
+#define CHIP_OFF 0x80 /* contrl chip power off (CONFIG_1) */
+#define DO_TX 0x80 /* do transmit packet */
+#define SEND_PKT 0x81 /* send a packet */
+#define AUTO_MODE 0x07 /* Auto skip packet on 16 col detected */
+#define MANU_MODE 0x03 /* Stop and skip packet on 16 col */
+#define TDK_AUTO_MODE 0x47 /* Auto skip packet on 16 col detected */
+#define TDK_MANU_MODE 0x43 /* Stop and skip packet on 16 col */
+#define INTR_OFF 0x0d /* LAN controller ignores interrupts */
+#define INTR_ON 0x1d /* LAN controller will catch interrupts */
+
+#define TX_TIMEOUT ((400*HZ)/1000)
+
+#define BANK_0U 0x20 /* bank 0 (CONFIG_1) */
+#define BANK_1U 0x24 /* bank 1 (CONFIG_1) */
+#define BANK_2U 0x28 /* bank 2 (CONFIG_1) */
+
+/*======================================================================
+
+ This bit of code is used to avoid unregistering network devices
+ at inappropriate times. 2.2 and later kernels are fairly picky
+ about when this can happen.
+
+======================================================================*/
+
+static void flush_stale_links(void)
+{
+ dev_link_t *link, *next;
+ for (link = dev_list; link; link = next) {
+ next = link->next;
+ if (link->state & DEV_STALE_LINK)
+ fmvj18x_detach(link);
+ }
+}
+
+/*====================================================================*/
+
+static void cs_error(client_handle_t handle, int func, int ret)
+{
+ error_info_t err = { func, ret };
+ CardServices(ReportError, handle, &err);
+}
+
+/*====================================================================*/
+
+static dev_link_t *fmvj18x_attach(void)
+{
+ local_info_t *lp;
+ dev_link_t *link;
+ struct net_device *dev;
+ client_reg_t client_reg;
+ int i, ret;
+
+ DEBUG(0, "fmvj18x_attach()\n");
+ flush_stale_links();
+
+ /* Make up a FMVJ18x specific data structure */
+ lp = kmalloc(sizeof(*lp), GFP_KERNEL);
+ if (!lp) return NULL;
+ memset(lp, 0, sizeof(*lp));
+ link = &lp->link; dev = &lp->dev;
+ link->priv = dev->priv = link->irq.Instance = lp;
+
+ init_timer(&link->release);
+ link->release.function = &fmvj18x_release;
+ link->release.data = (u_long)link;
+
+ /* The io structure describes IO port mapping */
+ link->io.NumPorts1 = 32;
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
+ link->io.IOAddrLines = 5;
+
+ /* Interrupt setup */
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+ link->irq.IRQInfo1 = IRQ_INFO2_VALID|IRQ_LEVEL_ID;
+ if (irq_list[0] == -1)
+ link->irq.IRQInfo2 = irq_mask;
+ else
+ for (i = 0; i < 4; i++)
+ link->irq.IRQInfo2 |= 1 << irq_list[i];
+ link->irq.Handler = &fjn_interrupt;
+
+ /* General socket configuration */
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.Vcc = 50;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+
+ /* The FMVJ18x specific entries in the device structure. */
+ dev->hard_start_xmit = &fjn_start_xmit;
+ dev->set_config = &fjn_config;
+ dev->get_stats = &fjn_get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ ether_setup(dev);
+ init_dev_name(dev, lp->node);
+ dev->open = &fjn_open;
+ dev->stop = &fjn_close;
+#ifdef HAVE_TX_TIMEOUT
+ dev->tx_timeout = fjn_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+#endif
+
+ /* Register with Card Services */
+ link->next = dev_list;
+ dev_list = link;
+ client_reg.dev_info = &dev_info;
+ client_reg.Attributes = INFO_IO_CLIENT | INFO_CARD_SHARE;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &fmvj18x_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ ret = CardServices(RegisterClient, &link->handle, &client_reg);
+ if (ret != 0) {
+ cs_error(link->handle, RegisterClient, ret);
+ fmvj18x_detach(link);
+ return NULL;
+ }
+
+ return link;
+} /* fmvj18x_attach */
+
+/*====================================================================*/
+
+static void fmvj18x_detach(dev_link_t *link)
+{
+ local_info_t *lp = link->priv;
+ dev_link_t **linkp;
+
+ DEBUG(0, "fmvj18x_detach(0x%p)\n", link);
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link) break;
+ if (*linkp == NULL)
+ return;
+
+ del_timer(&link->release);
+ if (link->state & DEV_CONFIG) {
+ fmvj18x_release((u_long)link);
+ if (link->state & DEV_STALE_CONFIG) {
+ link->state |= DEV_STALE_LINK;
+ return;
+ }
+ }
+
+ /* Break the link with Card Services */
+ if (link->handle)
+ CardServices(DeregisterClient, link->handle);
+
+ /* Unlink device structure, free pieces */
+ *linkp = link->next;
+ if (link->dev)
+ unregister_netdev(&lp->dev);
+ kfree(lp);
+
+} /* fmvj18x_detach */
+
+/*====================================================================*/
+
+#define CS_CHECK(fn, args...) \
+while ((last_ret=CardServices(last_fn=(fn), args))!=0) goto cs_failed
+
+static int mfc_try_io_port(dev_link_t *link)
+{
+ int i, ret;
+ static ioaddr_t serial_base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
+
+ for (i = 0; i < 5; i++) {
+ link->io.BasePort2 = serial_base[i];
+ link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
+ if (link->io.BasePort2 == 0) {
+ link->io.NumPorts2 = 0;
+ printk(KERN_NOTICE "fmvj18x_cs: out of resource for serial\n");
+ }
+ ret = CardServices(RequestIO, link->handle, &link->io);
+ if (ret == CS_SUCCESS) return ret;
+ }
+ return ret;
+}
+
+static int ungermann_try_io_port(dev_link_t *link)
+{
+ int ret;
+ ioaddr_t ioaddr;
+ /*
+ Ungermann-Bass Access/CARD accepts 0x300,0x320,0x340,0x360
+ 0x380,0x3c0 only for ioport.
+ */
+ for (ioaddr = 0x300; ioaddr < 0x3e0; ioaddr += 0x20) {
+ link->io.BasePort1 = ioaddr;
+ ret = CardServices(RequestIO, link->handle, &link->io);
+ if (ret == CS_SUCCESS) {
+ /* calculate ConfigIndex value */
+ link->conf.ConfigIndex =
+ ((link->io.BasePort1 & 0x0f0) >> 3) | 0x22;
+ return ret;
+ }
+ }
+ return ret; /* RequestIO failed */
+}
+
+static void fmvj18x_config(dev_link_t *link)
+{
+ client_handle_t handle = link->handle;
+ local_info_t *lp = link->priv;
+ struct net_device *dev = &lp->dev;
+ tuple_t tuple;
+ cisparse_t parse;
+ u_short buf[32];
+ int i, last_fn, last_ret, ret;
+ ioaddr_t ioaddr;
+ cardtype_t cardtype;
+ char *card_name = "unknown";
+ u_char *node_id;
+
+ DEBUG(0, "fmvj18x_config(0x%p)\n", link);
+
+ /*
+ This reads the card's CONFIG tuple to find its configuration
+ registers.
+ */
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ CS_CHECK(GetFirstTuple, handle, &tuple);
+ tuple.TupleData = (u_char *)buf;
+ tuple.TupleDataMax = 64;
+ tuple.TupleOffset = 0;
+ CS_CHECK(GetTupleData, handle, &tuple);
+ CS_CHECK(ParseTuple, handle, &tuple, &parse);
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ link->conf.ConfigBase = parse.config.base;
+ link->conf.Present = parse.config.rmask[0];
+
+ tuple.DesiredTuple = CISTPL_FUNCE;
+ tuple.TupleOffset = 0;
+ if (CardServices(GetFirstTuple, handle, &tuple) == CS_SUCCESS) {
+ /* Yes, I have CISTPL_FUNCE. Let's check CISTPL_MANFID */
+ tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+ CS_CHECK(GetFirstTuple, handle, &tuple);
+ CS_CHECK(GetTupleData, handle, &tuple);
+ CS_CHECK(ParseTuple, handle, &tuple, &parse);
+ link->conf.ConfigIndex = parse.cftable_entry.index;
+ tuple.DesiredTuple = CISTPL_MANFID;
+ if (CardServices(GetFirstTuple, handle, &tuple) == CS_SUCCESS)
+ CS_CHECK(GetTupleData, handle, &tuple);
+ else
+ buf[0] = 0xffff;
+ switch (le16_to_cpu(buf[0])) {
+ case MANFID_TDK:
+ cardtype = TDK;
+ if (le16_to_cpu(buf[1]) == PRODID_TDK_CF010) {
+ cs_status_t status;
+ CardServices(GetStatus, handle, &status);
+ if (status.CardState & CS_EVENT_3VCARD)
+ link->conf.Vcc = 33; /* inserted in 3.3V slot */
+ } else if (le16_to_cpu(buf[1]) == PRODID_TDK_GN3410) {
+ /* MultiFunction Card */
+ link->conf.ConfigBase = 0x800;
+ link->conf.ConfigIndex = 0x47;
+ link->io.NumPorts2 = 8;
+ }
+ break;
+ case MANFID_CONTEC:
+ cardtype = CONTEC;
+ break;
+ case MANFID_FUJITSU:
+ if (le16_to_cpu(buf[1]) == PRODID_FUJITSU_MBH10302)
+ /* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302),
+ but these are MBH10304 based card. */
+ cardtype = MBH10304;
+ else if (le16_to_cpu(buf[1]) == PRODID_FUJITSU_MBH10304)
+ cardtype = MBH10304;
+ else
+ cardtype = LA501;
+ break;
+ default:
+ cardtype = MBH10304;
+ }
+ } else {
+ /* old type card */
+ tuple.DesiredTuple = CISTPL_MANFID;
+ if (CardServices(GetFirstTuple, handle, &tuple) == CS_SUCCESS)
+ CS_CHECK(GetTupleData, handle, &tuple);
+ else
+ buf[0] = 0xffff;
+ switch (le16_to_cpu(buf[0])) {
+ case MANFID_FUJITSU:
+ if (le16_to_cpu(buf[1]) == PRODID_FUJITSU_MBH10304) {
+ cardtype = XXX10304; /* MBH10304 with buggy CIS */
+ link->conf.ConfigIndex = 0x20;
+ } else {
+ cardtype = MBH10302; /* NextCom NC5310, etc. */
+ link->conf.ConfigIndex = 1;
+ }
+ break;
+ case MANFID_UNGERMANN:
+ cardtype = UNGERMANN;
+ break;
+ default:
+ cardtype = MBH10302;
+ link->conf.ConfigIndex = 1;
+ }
+ }
+
+ if (link->io.NumPorts2 != 0) {
+ link->irq.Attributes =
+ IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED|IRQ_HANDLE_PRESENT;
+ ret = mfc_try_io_port(link);
+ if (ret != CS_SUCCESS) goto cs_failed;
+ } else if (cardtype == UNGERMANN) {
+ ret = ungermann_try_io_port(link);
+ if (ret != CS_SUCCESS) goto cs_failed;
+ } else {
+ CS_CHECK(RequestIO, link->handle, &link->io);
+ }
+ CS_CHECK(RequestIRQ, link->handle, &link->irq);
+ CS_CHECK(RequestConfiguration, link->handle, &link->conf);
+ dev->irq = link->irq.AssignedIRQ;
+ dev->base_addr = link->io.BasePort1;
+ if (register_netdev(dev) != 0) {
+ printk(KERN_NOTICE "fmvj18x_cs: register_netdev() failed\n");
+ goto failed;
+ }
+
+ if (link->io.BasePort2 != 0)
+ fmvj18x_setup_mfc(link);
+
+ ioaddr = dev->base_addr;
+
+ /* Reset controller */
+ if (sram_config == 0)
+ outb(CONFIG0_RST, ioaddr + CONFIG_0);
+ else
+ outb(CONFIG0_RST_1, ioaddr + CONFIG_0);
+
+ /* Power On chip and select bank 0 */
+ if (cardtype == MBH10302)
+ outb(BANK_0, ioaddr + CONFIG_1);
+ else
+ outb(BANK_0U, ioaddr + CONFIG_1);
+
+ /* Set hardware address */
+ switch (cardtype) {
+ case MBH10304:
+ case TDK:
+ case LA501:
+ case CONTEC:
+ tuple.DesiredTuple = CISTPL_FUNCE;
+ tuple.TupleOffset = 0;
+ CS_CHECK(GetFirstTuple, handle, &tuple);
+ tuple.TupleOffset = 0;
+ CS_CHECK(GetTupleData, handle, &tuple);
+ if (cardtype == MBH10304) {
+ /* MBH10304's CIS_FUNCE is corrupted */
+ node_id = &(tuple.TupleData[5]);
+ card_name = "FMV-J182";
+ } else {
+ while (tuple.TupleData[0] != CISTPL_FUNCE_LAN_NODE_ID ) {
+ CS_CHECK(GetNextTuple, handle, &tuple) ;
+ CS_CHECK(GetTupleData, handle, &tuple) ;
+ }
+ node_id = &(tuple.TupleData[2]);
+ if( cardtype == TDK ) {
+ card_name = "TDK LAK-CD021";
+ } else if( cardtype == LA501 ) {
+ card_name = "LA501";
+ } else {
+ card_name = "C-NET(PC)C";
+ }
+ }
+ /* Read MACID from CIS */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = node_id[i];
+ break;
+ case UNGERMANN:
+ /* Read MACID from register */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = inb(ioaddr + UNGERMANN_MAC_ID + i);
+ card_name = "Access/CARD";
+ break;
+ case XXX10304:
+ /* Read MACID from Buggy CIS */
+ if (fmvj18x_get_hwinfo(link, tuple.TupleData) == -1) {
+ printk(KERN_NOTICE "fmvj18x_cs: unable to read hardware net address.");
+ unregister_netdev(dev);
+ goto failed;
+ }
+ for (i = 0 ; i < 6; i++) {
+ dev->dev_addr[i] = tuple.TupleData[i];
+ }
+ card_name = "FMV-J182";
+ break;
+ case MBH10302:
+ default:
+ /* Read MACID from register */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = inb(ioaddr + MAC_ID + i);
+ card_name = "FMV-J181";
+ break;
+ }
+
+ copy_dev_name(lp->node, dev);
+ link->dev = &lp->node;
+
+ lp->cardtype = cardtype;
+ /* print current configuration */
+ printk(KERN_INFO "%s: %s, sram %s, port %#3lx, irq %d, hw_addr ",
+ dev->name, card_name, sram_config == 0 ? "4K TX*2" : "8K TX*2",
+ dev->base_addr, dev->irq);
+ for (i = 0; i < 6; i++)
+ printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n"));
+
+ link->state &= ~DEV_CONFIG_PENDING;
+ return;
+
+cs_failed:
+ /* All Card Services errors end up here */
+ cs_error(link->handle, last_fn, last_ret);
+failed:
+ fmvj18x_release((u_long)link);
+ link->state &= ~DEV_CONFIG_PENDING;
+
+} /* fmvj18x_config */
+/*====================================================================*/
+
+static int fmvj18x_get_hwinfo(dev_link_t *link, u_char *node_id)
+{
+ win_req_t req;
+ memreq_t mem;
+ u_char *base;
+ int i, j;
+
+ /* Allocate a small memory window */
+ req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
+ req.Base = 0; req.Size = 0;
+ req.AccessSpeed = 0;
+ link->win = (window_handle_t)link->handle;
+ i = CardServices(RequestWindow, &link->win, &req);
+ if (i != CS_SUCCESS) {
+ cs_error(link->handle, RequestWindow, i);
+ return -1;
+ }
+
+ base = ioremap(req.Base, req.Size);
+ mem.Page = 0;
+ mem.CardOffset = 0;
+ CardServices(MapMemPage, link->win, &mem);
+
+ /*
+ * MBH10304 CISTPL_FUNCE_LAN_NODE_ID format
+ * 22 0d xx xx xx 04 06 yy yy yy yy yy yy ff
+ * 'xx' is garbage.
+ * 'yy' is MAC address.
+ */
+ for (i = 0; i < 0x200; i++) {
+ if (readb(base+i*2) == 0x22) {
+ if (readb(base+(i-1)*2) == 0xff
+ && readb(base+(i+5)*2) == 0x04
+ && readb(base+(i+6)*2) == 0x06
+ && readb(base+(i+13)*2) == 0xff)
+ break;
+ }
+ }
+
+ if (i != 0x200) {
+ for (j = 0 ; j < 6; j++,i++) {
+ node_id[j] = readb(base+(i+7)*2);
+ }
+ }
+
+ iounmap(base);
+ j = CardServices(ReleaseWindow, link->win);
+ if (j != CS_SUCCESS)
+ cs_error(link->handle, ReleaseWindow, j);
+ return (i != 0x200) ? 0 : -1;
+
+} /* fmvj18x_get_hwinfo */
+/*====================================================================*/
+
+static int fmvj18x_setup_mfc(dev_link_t *link)
+{
+ win_req_t req;
+ memreq_t mem;
+ u_char *base;
+ int i, j;
+ local_info_t *lp = link->priv;
+ struct net_device *dev = &lp->dev;
+ ioaddr_t ioaddr;
+
+ /* Allocate a small memory window */
+ req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
+ req.Base = 0; req.Size = 0;
+ req.AccessSpeed = 0;
+ link->win = (window_handle_t)link->handle;
+ i = CardServices(RequestWindow, &link->win, &req);
+ if (i != CS_SUCCESS) {
+ cs_error(link->handle, RequestWindow, i);
+ return -1;
+ }
+
+ base = ioremap(req.Base, req.Size);
+ mem.Page = 0;
+ mem.CardOffset = 0;
+ CardServices(MapMemPage, link->win, &mem);
+
+ ioaddr = dev->base_addr;
+ writeb(0x47, base+0x800); /* Config Option Register of LAN */
+ writeb(0x0, base+0x802); /* Config and Status Register */
+
+ writeb(ioaddr & 0xff, base+0x80a); /* I/O Base(Low) of LAN */
+ writeb((ioaddr >> 8) & 0xff, base+0x80c); /* I/O Base(High) of LAN */
+
+ writeb(0x45, base+0x820); /* Config Option Register of Modem */
+ writeb(0x8, base+0x822); /* Config and Status Register */
+
+ iounmap(base);
+ j = CardServices(ReleaseWindow, link->win);
+ if (j != CS_SUCCESS)
+ cs_error(link->handle, ReleaseWindow, j);
+ return 0;
+
+}
+/*====================================================================*/
+
+static void fmvj18x_release(u_long arg)
+{
+ dev_link_t *link = (dev_link_t *)arg;
+
+ DEBUG(0, "fmvj18x_release(0x%p)\n", link);
+
+ /*
+ If the device is currently in use, we won't release until it
+ is actually closed.
+ */
+ if (link->open) {
+ DEBUG(1, "fmvj18x_cs: release postponed, '%s' "
+ "still open\n", link->dev->dev_name);
+ link->state |= DEV_STALE_CONFIG;
+ return;
+ }
+
+ /* Don't bother checking to see if these succeed or not */
+ CardServices(ReleaseWindow, link->win);
+ CardServices(ReleaseConfiguration, link->handle);
+ CardServices(ReleaseIO, link->handle, &link->io);
+ CardServices(ReleaseIRQ, link->handle, &link->irq);
+
+ link->state &= ~DEV_CONFIG;
+
+} /* fmvj18x_release */
+
+/*====================================================================*/
+
+static int fmvj18x_event(event_t event, int priority,
+ event_callback_args_t *args)
+{
+ dev_link_t *link = args->client_data;
+ local_info_t *lp = link->priv;
+ struct net_device *dev = &lp->dev;
+
+ DEBUG(1, "fmvj18x_event(0x%06x)\n", event);
+
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG) {
+ netif_device_detach(dev);
+ mod_timer(&link->release, jiffies + HZ/20);
+ }
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ fmvj18x_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if (link->state & DEV_CONFIG) {
+ if (link->open)
+ netif_device_detach(dev);
+ CardServices(ReleaseConfiguration, link->handle);
+ }
+ break;
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ CardServices(RequestConfiguration, link->handle, &link->conf);
+ if (link->open) {
+ fjn_reset(dev);
+ netif_device_attach(dev);
+ }
+ }
+ break;
+ }
+ return 0;
+} /* fmvj18x_event */
+
+/*====================================================================*/
+
+static int __init init_fmvj18x_cs(void)
+{
+ servinfo_t serv;
+ DEBUG(0, "%s\n", version);
+ CardServices(GetCardServicesInfo, &serv);
+ if (serv.Revision != CS_RELEASE_CODE) {
+ printk(KERN_NOTICE "fmvj18x: Card Services release "
+ "does not match!\n");
+ return -EINVAL;
+ }
+ register_pccard_driver(&dev_info, &fmvj18x_attach, &fmvj18x_detach);
+ return 0;
+}
+
+static void __exit exit_fmvj18x_cs(void)
+{
+ DEBUG(0, "fmvj18x_cs: unloading\n");
+ unregister_pccard_driver(&dev_info);
+ while (dev_list != NULL)
+ fmvj18x_detach(dev_list);
+}
+
+module_init(init_fmvj18x_cs);
+module_exit(exit_fmvj18x_cs);
+
+/*====================================================================*/
+
+static void fjn_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ local_info_t *lp = dev_id;
+ struct net_device *dev = &lp->dev;
+ ioaddr_t ioaddr;
+ unsigned short tx_stat, rx_stat;
+
+ if (lp == NULL) {
+ printk(KERN_NOTICE "fjn_interrupt(): irq %d for "
+ "unknown device.\n", irq);
+ return;
+ }
+ ioaddr = dev->base_addr;
+
+ /* avoid multiple interrupts */
+ outw(0x0000, ioaddr + TX_INTR);
+
+ /* wait for a while */
+ udelay(1);
+
+ /* get status */
+ tx_stat = inb(ioaddr + TX_STATUS);
+ rx_stat = inb(ioaddr + RX_STATUS);
+
+ /* clear status */
+ outb(tx_stat, ioaddr + TX_STATUS);
+ outb(rx_stat, ioaddr + RX_STATUS);
+
+ DEBUG(4, "%s: interrupt, rx_status %02x.\n", dev->name, rx_stat);
+ DEBUG(4, " tx_status %02x.\n", tx_stat);
+
+ if (rx_stat || (inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) {
+ /* there is packet(s) in rx buffer */
+ fjn_rx(dev);
+ }
+ if (tx_stat & F_TMT_RDY) {
+ lp->stats.tx_packets += lp->sent ;
+ lp->sent = 0 ;
+ if (lp->tx_queue) {
+ outb(DO_TX | lp->tx_queue, ioaddr + TX_START);
+ lp->sent = lp->tx_queue ;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ dev->trans_start = jiffies;
+ } else {
+ lp->tx_started = 0;
+ }
+ netif_wake_queue(dev);
+ }
+ DEBUG(4, "%s: exiting interrupt,\n", dev->name);
+ DEBUG(4, " tx_status %02x, rx_status %02x.\n", tx_stat, rx_stat);
+
+ outb(D_TX_INTR, ioaddr + TX_INTR);
+ outb(D_RX_INTR, ioaddr + RX_INTR);
+
+} /* fjn_interrupt */
+
+/*====================================================================*/
+
+static void fjn_tx_timeout(struct net_device *dev)
+{
+ struct local_info_t *lp = (struct local_info_t *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+
+ printk(KERN_NOTICE "%s: transmit timed out with status %04x, %s?\n",
+ dev->name, htons(inw(ioaddr + TX_STATUS)),
+ inb(ioaddr + TX_STATUS) & F_TMT_RDY
+ ? "IRQ conflict" : "network cable problem");
+ printk(KERN_NOTICE "%s: timeout registers: %04x %04x %04x "
+ "%04x %04x %04x %04x %04x.\n",
+ dev->name, htons(inw(ioaddr + 0)),
+ htons(inw(ioaddr + 2)), htons(inw(ioaddr + 4)),
+ htons(inw(ioaddr + 6)), htons(inw(ioaddr + 8)),
+ htons(inw(ioaddr +10)), htons(inw(ioaddr +12)),
+ htons(inw(ioaddr +14)));
+ lp->stats.tx_errors++;
+ /* ToDo: We should try to restart the adaptor... */
+ cli();
+
+ fjn_reset(dev);
+
+ lp->tx_started = 0;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ lp->sent = 0;
+ lp->open_time = jiffies;
+ sti();
+ netif_wake_queue(dev);
+}
+
+static int fjn_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct local_info_t *lp = (struct local_info_t *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+
+ tx_timeout_check(dev, fjn_tx_timeout);
+ skb_tx_check(dev, skb);
+
+ {
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned char *buf = skb->data;
+
+ if (length > ETH_FRAME_LEN) {
+ printk(KERN_NOTICE "%s: Attempting to send a large packet"
+ " (%d bytes).\n", dev->name, length);
+ return 1;
+ }
+
+ DEBUG(4, "%s: Transmitting a packet of length %lu.\n",
+ dev->name, (unsigned long)skb->len);
+ add_tx_bytes(&lp->stats, skb->len);
+
+ /* Disable both interrupts. */
+ outw(0x0000, ioaddr + TX_INTR);
+
+ /* wait for a while */
+ udelay(1);
+
+ outw(length, ioaddr + DATAPORT);
+ outsw(ioaddr + DATAPORT, buf, (length + 1) >> 1);
+
+ lp->tx_queue++;
+ lp->tx_queue_len += ((length+3) & ~1);
+
+ if (lp->tx_started == 0) {
+ /* If the Tx is idle, always trigger a transmit. */
+ outb(DO_TX | lp->tx_queue, ioaddr + TX_START);
+ lp->sent = lp->tx_queue ;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ dev->trans_start = jiffies;
+ lp->tx_started = 1;
+ netif_start_queue(dev);
+ } else {
+ if( sram_config == 0 ) {
+ if (lp->tx_queue_len < (4096 - (ETH_FRAME_LEN +2)) )
+ /* Yes, there is room for one more packet. */
+ netif_start_queue(dev);
+ } else {
+ if (lp->tx_queue_len < (8192 - (ETH_FRAME_LEN +2)) &&
+ lp->tx_queue < 127 )
+ /* Yes, there is room for one more packet. */
+ netif_start_queue(dev);
+ }
+ }
+
+ /* Re-enable interrupts */
+ outb(D_TX_INTR, ioaddr + TX_INTR);
+ outb(D_RX_INTR, ioaddr + RX_INTR);
+ }
+ DEV_KFREE_SKB (skb);
+
+ return 0;
+} /* fjn_start_xmit */
+
+/*====================================================================*/
+
+static void fjn_reset(struct net_device *dev)
+{
+ struct local_info_t *lp = (struct local_info_t *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ int i;
+
+ DEBUG(4, "fjn_reset(%s) called.\n",dev->name);
+
+ /* Reset controller */
+ if( sram_config == 0 )
+ outb(CONFIG0_RST, ioaddr + CONFIG_0);
+ else
+ outb(CONFIG0_RST_1, ioaddr + CONFIG_0);
+
+ /* Power On chip and select bank 0 */
+ if (lp->cardtype == MBH10302)
+ outb(BANK_0, ioaddr + CONFIG_1);
+ else
+ outb(BANK_0U, ioaddr + CONFIG_1);
+
+ /* Set Tx modes */
+ outb(D_TX_MODE, ioaddr + TX_MODE);
+ /* set Rx modes */
+ outb(ID_MATCHED, ioaddr + RX_MODE);
+
+ /* Set hardware address */
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + NODE_ID + i);
+
+ /* Switch to bank 1 */
+ if (lp->cardtype == MBH10302)
+ outb(BANK_1, ioaddr + CONFIG_1);
+ else
+ outb(BANK_1U, ioaddr + CONFIG_1);
+
+ /* set the multicast table to accept none. */
+ for (i = 0; i < 6; i++)
+ outb(0x00, ioaddr + MAR_ADR + i);
+
+ /* Switch to bank 2 (runtime mode) */
+ if (lp->cardtype == MBH10302)
+ outb(BANK_2, ioaddr + CONFIG_1);
+ else
+ outb(BANK_2U, ioaddr + CONFIG_1);
+
+ /* set 16col ctrl bits */
+ if( lp->cardtype == TDK || lp->cardtype == CONTEC)
+ outb(TDK_AUTO_MODE, ioaddr + COL_CTRL);
+ else
+ outb(AUTO_MODE, ioaddr + COL_CTRL);
+
+ /* clear Reserved Regs */
+ outb(0x00, ioaddr + BMPR12);
+ outb(0x00, ioaddr + BMPR13);
+
+ /* reset Skip packet reg. */
+ outb(0x01, ioaddr + RX_SKIP);
+
+ /* Enable Tx and Rx */
+ if( sram_config == 0 )
+ outb(CONFIG0_DFL, ioaddr + CONFIG_0);
+ else
+ outb(CONFIG0_DFL_1, ioaddr + CONFIG_0);
+
+ /* Init receive pointer ? */
+ inw(ioaddr + DATAPORT);
+ inw(ioaddr + DATAPORT);
+
+ /* Clear all status */
+ outb(0xff, ioaddr + TX_STATUS);
+ outb(0xff, ioaddr + RX_STATUS);
+
+ if (lp->cardtype == MBH10302)
+ outb(INTR_OFF, ioaddr + LAN_CTRL);
+
+ /* Turn on Rx interrupts */
+ outb(D_TX_INTR, ioaddr + TX_INTR);
+ outb(D_RX_INTR, ioaddr + RX_INTR);
+
+ /* Turn on interrupts from LAN card controller */
+ if (lp->cardtype == MBH10302)
+ outb(INTR_ON, ioaddr + LAN_CTRL);
+} /* fjn_reset */
+
+/*====================================================================*/
+
+static void fjn_rx(struct net_device *dev)
+{
+ struct local_info_t *lp = (struct local_info_t *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ int boguscount = 10; /* 5 -> 10: by agy 19940922 */
+
+ DEBUG(4, "%s: in rx_packet(), rx_status %02x.\n",
+ dev->name, inb(ioaddr + RX_STATUS));
+
+ while ((inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) {
+ u_short status = inw(ioaddr + DATAPORT);
+
+ DEBUG(4, "%s: Rxing packet mode %02x status %04x.\n",
+ dev->name, inb(ioaddr + RX_MODE), status);
+#ifndef final_version
+ if (status == 0) {
+ outb(F_SKP_PKT, ioaddr + RX_SKIP);
+ break;
+ }
+#endif
+ if ((status & 0xF0) != 0x20) { /* There was an error. */
+ lp->stats.rx_errors++;
+ if (status & F_LEN_ERR) lp->stats.rx_length_errors++;
+ if (status & F_ALG_ERR) lp->stats.rx_frame_errors++;
+ if (status & F_CRC_ERR) lp->stats.rx_crc_errors++;
+ if (status & F_OVR_FLO) lp->stats.rx_over_errors++;
+ } else {
+ u_short pkt_len = inw(ioaddr + DATAPORT);
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+
+ if (pkt_len > 1550) {
+ printk(KERN_NOTICE "%s: The FMV-18x claimed a very "
+ "large packet, size %d.\n", dev->name, pkt_len);
+ outb(F_SKP_PKT, ioaddr + RX_SKIP);
+ lp->stats.rx_errors++;
+ break;
+ }
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL) {
+ printk(KERN_NOTICE "%s: Memory squeeze, dropping "
+ "packet (len %d).\n", dev->name, pkt_len);
+ outb(F_SKP_PKT, ioaddr + RX_SKIP);
+ lp->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = dev;
+
+ skb_reserve(skb, 2);
+ insw(ioaddr + DATAPORT, skb_put(skb, pkt_len),
+ (pkt_len + 1) >> 1);
+ skb->protocol = eth_type_trans(skb, dev);
+
+#ifdef PCMCIA_DEBUG
+ if (pc_debug > 5) {
+ int i;
+ printk(KERN_DEBUG "%s: Rxed packet of length %d: ",
+ dev->name, pkt_len);
+ for (i = 0; i < 14; i++)
+ printk(" %02x", skb->data[i]);
+ printk(".\n");
+ }
+#endif
+
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ add_rx_bytes(&lp->stats, pkt_len);
+ }
+ if (--boguscount <= 0)
+ break;
+ }
+
+ /* If any worth-while packets have been received, dev_rint()
+ has done a netif_wake_queue() for us and will work on them
+ when we get to the bottom-half routine. */
+/*
+ if (lp->cardtype != TDK) {
+ int i;
+ for (i = 0; i < 20; i++) {
+ if ((inb(ioaddr + RX_MODE) & F_BUF_EMP) == F_BUF_EMP)
+ break;
+ (void)inw(ioaddr + DATAPORT); /+ dummy status read +/
+ outb(F_SKP_PKT, ioaddr + RX_SKIP);
+ }
+
+ if (i > 0)
+ DEBUG(5, "%s: Exint Rx packet with mode %02x after "
+ "%d ticks.\n", dev->name, inb(ioaddr + RX_MODE), i);
+ }
+*/
+
+ return;
+} /* fjn_rx */
+
+/*====================================================================*/
+
+static int fjn_config(struct net_device *dev, struct ifmap *map){
+ return 0;
+}
+
+static int fjn_open(struct net_device *dev)
+{
+ struct local_info_t *lp = (struct local_info_t *)dev->priv;
+ dev_link_t *link = &lp->link;
+
+ DEBUG(4, "fjn_open('%s').\n", dev->name);
+
+ if (!DEV_OK(link))
+ return -ENODEV;
+
+ link->open++;
+
+ fjn_reset(dev);
+
+ lp->tx_started = 0;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ lp->open_time = jiffies;
+ netif_mark_up(dev);
+ netif_start_queue(dev);
+
+ MOD_INC_USE_COUNT;
+
+ return 0;
+} /* fjn_open */
+
+/*====================================================================*/
+
+static int fjn_close(struct net_device *dev)
+{
+ struct local_info_t *lp = (struct local_info_t *)dev->priv;
+ dev_link_t *link = &lp->link;
+ ioaddr_t ioaddr = dev->base_addr;
+
+ DEBUG(4, "fjn_close('%s').\n", dev->name);
+
+ lp->open_time = 0;
+ netif_stop_queue(dev);
+ netif_mark_down(dev);
+
+ /* Set configuration register 0 to disable Tx and Rx. */
+ if( sram_config == 0 )
+ outb(CONFIG0_RST ,ioaddr + CONFIG_0);
+ else
+ outb(CONFIG0_RST_1 ,ioaddr + CONFIG_0);
+
+ /* Update the statistics -- ToDo. */
+
+ /* Power-down the chip. Green, green, green! */
+ outb(CHIP_OFF ,ioaddr + CONFIG_1);
+
+ /* Set the ethernet adaptor disable IRQ */
+ if (lp->cardtype == MBH10302)
+ outb(INTR_OFF, ioaddr + LAN_CTRL);
+
+ link->open--;
+ if (link->state & DEV_STALE_CONFIG)
+ mod_timer(&link->release, jiffies + HZ/20);
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+} /* fjn_close */
+
+/*====================================================================*/
+
+static struct net_device_stats *fjn_get_stats(struct net_device *dev)
+{
+ local_info_t *lp = (local_info_t *)dev->priv;
+ return &lp->stats;
+} /* fjn_get_stats */
+
+/*====================================================================*/
+
+/*
+ Set the multicast/promiscuous mode for this adaptor.
+*/
+
+static void set_rx_mode(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+ struct local_info_t *lp = (struct local_info_t *)dev->priv;
+ u_char mc_filter[8]; /* Multicast hash filter */
+ u_long flags;
+ int i;
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Unconditionally log net taps. */
+ printk("%s: Promiscuous mode enabled.\n", dev->name);
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */
+ } else if (dev->mc_count > MC_FILTERBREAK
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter perfectly -- accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ outb(2, ioaddr + RX_MODE); /* Use normal mode. */
+ } else if (dev->mc_count == 0) {
+ memset(mc_filter, 0x00, sizeof(mc_filter));
+ outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
+ } else {
+ struct dev_mc_list *mclist;
+ int i;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next)
+ set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f,
+ mc_filter);
+ }
+
+ save_flags(flags);
+ cli();
+ if (memcmp(mc_filter, lp->mc_filter, sizeof(mc_filter))) {
+ int saved_bank = inb(ioaddr + CONFIG_1);
+ /* Switch to bank 1 and set the multicast table. */
+ outb(0xe4, ioaddr + CONFIG_1);
+ for (i = 0; i < 8; i++)
+ outb(mc_filter[i], ioaddr + 8 + i);
+ memcpy(lp->mc_filter, mc_filter, sizeof(mc_filter));
+ outb(saved_bank, ioaddr + CONFIG_1);
+ }
+ restore_flags(flags);
+}
diff --git a/linux/pcmcia-cs/clients/nmclan_cs.c b/linux/pcmcia-cs/clients/nmclan_cs.c
new file mode 100644
index 0000000..2f6fb08
--- /dev/null
+++ b/linux/pcmcia-cs/clients/nmclan_cs.c
@@ -0,0 +1,1744 @@
+/* ----------------------------------------------------------------------------
+Linux PCMCIA ethernet adapter driver for the New Media Ethernet LAN.
+ nmclan_cs.c,v 0.16 1995/07/01 06:42:17 rpao Exp rpao
+
+ The Ethernet LAN uses the Advanced Micro Devices (AMD) Am79C940 Media
+ Access Controller for Ethernet (MACE). It is essentially the Am2150
+ PCMCIA Ethernet card contained in the Am2150 Demo Kit.
+
+Written by Roger C. Pao <rpao@paonet.org>
+ Copyright 1995 Roger C. Pao
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License.
+
+Ported to Linux 1.3.* network driver environment by
+ Matti Aarnio <mea@utu.fi>
+
+References
+
+ Am2150 Technical Reference Manual, Revision 1.0, August 17, 1993
+ Am79C940 (MACE) Data Sheet, 1994
+ Am79C90 (C-LANCE) Data Sheet, 1994
+ Linux PCMCIA Programmer's Guide v1.17
+ /usr/src/linux/net/inet/dev.c, Linux kernel 1.2.8
+
+ Eric Mears, New Media Corporation
+ Tom Pollard, New Media Corporation
+ Dean Siasoyco, New Media Corporation
+ Ken Lesniak, Silicon Graphics, Inc. <lesniak@boston.sgi.com>
+ Donald Becker <becker@scyld.com>
+ David Hinds <dahinds@users.sourceforge.net>
+
+ The Linux client driver is based on the 3c589_cs.c client driver by
+ David Hinds.
+
+ The Linux network driver outline is based on the 3c589_cs.c driver,
+ the 8390.c driver, and the example skeleton.c kernel code, which are
+ by Donald Becker.
+
+ The Am2150 network driver hardware interface code is based on the
+ OS/9000 driver for the New Media Ethernet LAN by Eric Mears.
+
+ Special thanks for testing and help in debugging this driver goes
+ to Ken Lesniak.
+
+-------------------------------------------------------------------------------
+Driver Notes and Issues
+-------------------------------------------------------------------------------
+
+1. Developed on a Dell 320SLi
+ PCMCIA Card Services 2.6.2
+ Linux dell 1.2.10 #1 Thu Jun 29 20:23:41 PDT 1995 i386
+
+2. rc.pcmcia may require loading pcmcia_core with io_speed=300:
+ 'insmod pcmcia_core.o io_speed=300'.
+ This will avoid problems with fast systems which causes rx_framecnt
+ to return random values.
+
+3. If hot extraction does not work for you, use 'ifconfig eth0 down'
+ before extraction.
+
+4. There is a bad slow-down problem in this driver.
+
+5. Future: Multicast processing. In the meantime, do _not_ compile your
+ kernel with multicast ip enabled.
+
+-------------------------------------------------------------------------------
+History
+-------------------------------------------------------------------------------
+Log: nmclan_cs.c,v
+ * Revision 0.16 1995/07/01 06:42:17 rpao
+ * Bug fix: nmclan_reset() called CardServices incorrectly.
+ *
+ * Revision 0.15 1995/05/24 08:09:47 rpao
+ * Re-implement MULTI_TX dev->tbusy handling.
+ *
+ * Revision 0.14 1995/05/23 03:19:30 rpao
+ * Added, in nmclan_config(), "tuple.Attributes = 0;".
+ * Modified MACE ID check to ignore chip revision level.
+ * Avoid tx_free_frames race condition between _start_xmit and _interrupt.
+ *
+ * Revision 0.13 1995/05/18 05:56:34 rpao
+ * Statistics changes.
+ * Bug fix: nmclan_reset did not enable TX and RX: call restore_multicast_list.
+ * Bug fix: mace_interrupt checks ~MACE_IMR_DEFAULT. Fixes driver lockup.
+ *
+ * Revision 0.12 1995/05/14 00:12:23 rpao
+ * Statistics overhaul.
+ *
+
+95/05/13 rpao V0.10a
+ Bug fix: MACE statistics counters used wrong I/O ports.
+ Bug fix: mace_interrupt() needed to allow statistics to be
+ processed without RX or TX interrupts pending.
+95/05/11 rpao V0.10
+ Multiple transmit request processing.
+ Modified statistics to use MACE counters where possible.
+95/05/10 rpao V0.09 Bug fix: Must use IO_DATA_PATH_WIDTH_AUTO.
+ *Released
+95/05/10 rpao V0.08
+ Bug fix: Make all non-exported functions private by using
+ static keyword.
+ Bug fix: Test IntrCnt _before_ reading MACE_IR.
+95/05/10 rpao V0.07 Statistics.
+95/05/09 rpao V0.06 Fix rx_framecnt problem by addition of PCIC wait states.
+
+---------------------------------------------------------------------------- */
+
+/* ----------------------------------------------------------------------------
+Conditional Compilation Options
+---------------------------------------------------------------------------- */
+
+#define MULTI_TX 0
+#define RESET_ON_TIMEOUT 1
+#define TX_INTERRUPTABLE 1
+#define RESET_XILINX 0
+
+/* ----------------------------------------------------------------------------
+Include Files
+---------------------------------------------------------------------------- */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+/* ----------------------------------------------------------------------------
+Defines
+---------------------------------------------------------------------------- */
+
+#define ETHER_ADDR_LEN ETH_ALEN
+ /* 6 bytes in an Ethernet Address */
+#define MACE_LADRF_LEN 8
+ /* 8 bytes in Logical Address Filter */
+
+/* Loop Control Defines */
+#define MACE_MAX_IR_ITERATIONS 10
+#define MACE_MAX_RX_ITERATIONS 12
+ /*
+ TBD: Dean brought this up, and I assumed the hardware would
+ handle it:
+
+ If MACE_MAX_RX_ITERATIONS is > 1, rx_framecnt may still be
+ non-zero when the isr exits. We may not get another interrupt
+ to process the remaining packets for some time.
+ */
+
+/*
+The Am2150 has a Xilinx XC3042 field programmable gate array (FPGA)
+which manages the interface between the MACE and the PCMCIA bus. It
+also includes buffer management for the 32K x 8 SRAM to control up to
+four transmit and 12 receive frames at a time.
+*/
+#define AM2150_MAX_TX_FRAMES 4
+#define AM2150_MAX_RX_FRAMES 12
+
+/* Am2150 Ethernet Card I/O Mapping */
+#define AM2150_RCV 0x00
+#define AM2150_XMT 0x04
+#define AM2150_XMT_SKIP 0x09
+#define AM2150_RCV_NEXT 0x0A
+#define AM2150_RCV_FRAME_COUNT 0x0B
+#define AM2150_MACE_BANK 0x0C
+#define AM2150_MACE_BASE 0x10
+
+/* MACE Registers */
+#define MACE_RCVFIFO 0
+#define MACE_XMTFIFO 1
+#define MACE_XMTFC 2
+#define MACE_XMTFS 3
+#define MACE_XMTRC 4
+#define MACE_RCVFC 5
+#define MACE_RCVFS 6
+#define MACE_FIFOFC 7
+#define MACE_IR 8
+#define MACE_IMR 9
+#define MACE_PR 10
+#define MACE_BIUCC 11
+#define MACE_FIFOCC 12
+#define MACE_MACCC 13
+#define MACE_PLSCC 14
+#define MACE_PHYCC 15
+#define MACE_CHIPIDL 16
+#define MACE_CHIPIDH 17
+#define MACE_IAC 18
+/* Reserved */
+#define MACE_LADRF 20
+#define MACE_PADR 21
+/* Reserved */
+/* Reserved */
+#define MACE_MPC 24
+/* Reserved */
+#define MACE_RNTPC 26
+#define MACE_RCVCC 27
+/* Reserved */
+#define MACE_UTR 29
+#define MACE_RTR1 30
+#define MACE_RTR2 31
+
+/* MACE Bit Masks */
+#define MACE_XMTRC_EXDEF 0x80
+#define MACE_XMTRC_XMTRC 0x0F
+
+#define MACE_XMTFS_XMTSV 0x80
+#define MACE_XMTFS_UFLO 0x40
+#define MACE_XMTFS_LCOL 0x20
+#define MACE_XMTFS_MORE 0x10
+#define MACE_XMTFS_ONE 0x08
+#define MACE_XMTFS_DEFER 0x04
+#define MACE_XMTFS_LCAR 0x02
+#define MACE_XMTFS_RTRY 0x01
+
+#define MACE_RCVFS_RCVSTS 0xF000
+#define MACE_RCVFS_OFLO 0x8000
+#define MACE_RCVFS_CLSN 0x4000
+#define MACE_RCVFS_FRAM 0x2000
+#define MACE_RCVFS_FCS 0x1000
+
+#define MACE_FIFOFC_RCVFC 0xF0
+#define MACE_FIFOFC_XMTFC 0x0F
+
+#define MACE_IR_JAB 0x80
+#define MACE_IR_BABL 0x40
+#define MACE_IR_CERR 0x20
+#define MACE_IR_RCVCCO 0x10
+#define MACE_IR_RNTPCO 0x08
+#define MACE_IR_MPCO 0x04
+#define MACE_IR_RCVINT 0x02
+#define MACE_IR_XMTINT 0x01
+
+#define MACE_MACCC_PROM 0x80
+#define MACE_MACCC_DXMT2PD 0x40
+#define MACE_MACCC_EMBA 0x20
+#define MACE_MACCC_RESERVED 0x10
+#define MACE_MACCC_DRCVPA 0x08
+#define MACE_MACCC_DRCVBC 0x04
+#define MACE_MACCC_ENXMT 0x02
+#define MACE_MACCC_ENRCV 0x01
+
+#define MACE_PHYCC_LNKFL 0x80
+#define MACE_PHYCC_DLNKTST 0x40
+#define MACE_PHYCC_REVPOL 0x20
+#define MACE_PHYCC_DAPC 0x10
+#define MACE_PHYCC_LRT 0x08
+#define MACE_PHYCC_ASEL 0x04
+#define MACE_PHYCC_RWAKE 0x02
+#define MACE_PHYCC_AWAKE 0x01
+
+#define MACE_IAC_ADDRCHG 0x80
+#define MACE_IAC_PHYADDR 0x04
+#define MACE_IAC_LOGADDR 0x02
+
+#define MACE_UTR_RTRE 0x80
+#define MACE_UTR_RTRD 0x40
+#define MACE_UTR_RPA 0x20
+#define MACE_UTR_FCOLL 0x10
+#define MACE_UTR_RCVFCSE 0x08
+#define MACE_UTR_LOOP_INCL_MENDEC 0x06
+#define MACE_UTR_LOOP_NO_MENDEC 0x04
+#define MACE_UTR_LOOP_EXTERNAL 0x02
+#define MACE_UTR_LOOP_NONE 0x00
+#define MACE_UTR_RESERVED 0x01
+
+/* Switch MACE register bank (only 0 and 1 are valid) */
+#define MACEBANK(win_num) outb((win_num), ioaddr + AM2150_MACE_BANK)
+
+#define MACE_IMR_DEFAULT \
+ (0xFF - \
+ ( \
+ MACE_IR_CERR | \
+ MACE_IR_RCVCCO | \
+ MACE_IR_RNTPCO | \
+ MACE_IR_MPCO | \
+ MACE_IR_RCVINT | \
+ MACE_IR_XMTINT \
+ ) \
+ )
+#undef MACE_IMR_DEFAULT
+#define MACE_IMR_DEFAULT 0x00 /* New statistics handling: grab everything */
+
+#define TX_TIMEOUT ((400*HZ)/1000)
+
+/* ----------------------------------------------------------------------------
+Type Definitions
+---------------------------------------------------------------------------- */
+
+typedef struct _mace_statistics {
+ /* MACE_XMTFS */
+ int xmtsv;
+ int uflo;
+ int lcol;
+ int more;
+ int one;
+ int defer;
+ int lcar;
+ int rtry;
+
+ /* MACE_XMTRC */
+ int exdef;
+ int xmtrc;
+
+ /* RFS1--Receive Status (RCVSTS) */
+ int oflo;
+ int clsn;
+ int fram;
+ int fcs;
+
+ /* RFS2--Runt Packet Count (RNTPC) */
+ int rfs_rntpc;
+
+ /* RFS3--Receive Collision Count (RCVCC) */
+ int rfs_rcvcc;
+
+ /* MACE_IR */
+ int jab;
+ int babl;
+ int cerr;
+ int rcvcco;
+ int rntpco;
+ int mpco;
+
+ /* MACE_MPC */
+ int mpc;
+
+ /* MACE_RNTPC */
+ int rntpc;
+
+ /* MACE_RCVCC */
+ int rcvcc;
+} mace_statistics;
+
+typedef struct _mace_private {
+ dev_link_t link;
+ struct net_device dev;
+ dev_node_t node;
+ struct net_device_stats linux_stats; /* Linux statistics counters */
+ mace_statistics mace_stats; /* MACE chip statistics counters */
+
+ /* restore_multicast_list() state variables */
+ int multicast_ladrf[MACE_LADRF_LEN]; /* Logical address filter */
+ int multicast_num_addrs;
+
+ char tx_free_frames; /* Number of free transmit frame buffers */
+ char tx_irq_disabled; /* MACE TX interrupt disabled */
+} mace_private;
+
+/* ----------------------------------------------------------------------------
+Private Global Variables
+---------------------------------------------------------------------------- */
+
+#ifdef PCMCIA_DEBUG
+static char rcsid[] =
+"nmclan_cs.c,v 0.16 1995/07/01 06:42:17 rpao Exp rpao";
+static char *version =
+"nmclan_cs 0.16 (Roger C. Pao)";
+#endif
+
+static dev_info_t dev_info="nmclan_cs";
+static dev_link_t *dev_list=NULL;
+
+static char *if_names[]={
+ "Auto", "10baseT", "BNC",
+};
+
+/* ----------------------------------------------------------------------------
+Parameters
+ These are the parameters that can be set during loading with
+ 'insmod'.
+---------------------------------------------------------------------------- */
+
+MODULE_DESCRIPTION("New Media PCMCIA ethernet driver");
+MODULE_LICENSE("GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; MODULE_PARM(n, "i")
+
+static int irq_list[4] = { -1 };
+MODULE_PARM(irq_list, "1-4i");
+
+/* 0=auto, 1=10baseT, 2 = 10base2, default=auto */
+INT_MODULE_PARM(if_port, 0);
+/* Bit map of interrupts to choose from */
+INT_MODULE_PARM(irq_mask, 0xdeb8);
+
+#ifdef PCMCIA_DEBUG
+INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
+#else
+#define DEBUG(n, args...)
+#endif
+
+/* ----------------------------------------------------------------------------
+Function Prototypes
+---------------------------------------------------------------------------- */
+
+static void nmclan_config(dev_link_t *link);
+static void nmclan_release(u_long arg);
+static int nmclan_event(event_t event, int priority,
+ event_callback_args_t *args);
+
+static void nmclan_reset(struct net_device *dev);
+static int mace_config(struct net_device *dev, struct ifmap *map);
+static int mace_open(struct net_device *dev);
+static int mace_close(struct net_device *dev);
+static int mace_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void mace_tx_timeout(struct net_device *dev);
+static void mace_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static struct net_device_stats *mace_get_stats(struct net_device *dev);
+static int mace_rx(struct net_device *dev, unsigned char RxCnt);
+static void restore_multicast_list(struct net_device *dev);
+
+static void set_multicast_list(struct net_device *dev);
+
+static dev_link_t *nmclan_attach(void);
+static void nmclan_detach(dev_link_t *);
+
+/* ----------------------------------------------------------------------------
+flush_stale_links
+ Clean up stale device structures
+---------------------------------------------------------------------------- */
+
+static void flush_stale_links(void)
+{
+ dev_link_t *link, *next;
+ for (link = dev_list; link; link = next) {
+ next = link->next;
+ if (link->state & DEV_STALE_LINK)
+ nmclan_detach(link);
+ }
+}
+
+/* ----------------------------------------------------------------------------
+cs_error
+ Report a Card Services related error.
+---------------------------------------------------------------------------- */
+
+static void cs_error(client_handle_t handle, int func, int ret)
+{
+ error_info_t err = { func, ret };
+ CardServices(ReportError, handle, &err);
+}
+
+/* ----------------------------------------------------------------------------
+nmclan_attach
+ Creates an "instance" of the driver, allocating local data
+ structures for one device. The device is registered with Card
+ Services.
+---------------------------------------------------------------------------- */
+
+static dev_link_t *nmclan_attach(void)
+{
+ mace_private *lp;
+ dev_link_t *link;
+ struct net_device *dev;
+ client_reg_t client_reg;
+ int i, ret;
+
+ DEBUG(0, "nmclan_attach()\n");
+ DEBUG(1, "%s\n", rcsid);
+ flush_stale_links();
+
+ /* Create new ethernet device */
+ lp = kmalloc(sizeof(*lp), GFP_KERNEL);
+ if (!lp) return NULL;
+ memset(lp, 0, sizeof(*lp));
+ link = &lp->link; dev = &lp->dev;
+ link->priv = dev->priv = link->irq.Instance = lp;
+
+ init_timer(&link->release);
+ link->release.function = &nmclan_release;
+ link->release.data = (u_long)link;
+ link->io.NumPorts1 = 32;
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
+ link->io.IOAddrLines = 5;
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+ link->irq.IRQInfo1 = IRQ_INFO2_VALID|IRQ_LEVEL_ID;
+ if (irq_list[0] == -1)
+ link->irq.IRQInfo2 = irq_mask;
+ else
+ for (i = 0; i < 4; i++)
+ link->irq.IRQInfo2 |= 1 << irq_list[i];
+ link->irq.Handler = &mace_interrupt;
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.Vcc = 50;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+ link->conf.ConfigIndex = 1;
+ link->conf.Present = PRESENT_OPTION;
+
+ lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
+
+ dev->hard_start_xmit = &mace_start_xmit;
+ dev->set_config = &mace_config;
+ dev->get_stats = &mace_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+ ether_setup(dev);
+ init_dev_name(dev, lp->node);
+ dev->open = &mace_open;
+ dev->stop = &mace_close;
+#ifdef HAVE_TX_TIMEOUT
+ dev->tx_timeout = mace_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+#endif
+
+ /* Register with Card Services */
+ link->next = dev_list;
+ dev_list = link;
+ client_reg.dev_info = &dev_info;
+ client_reg.Attributes = INFO_IO_CLIENT | INFO_CARD_SHARE;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &nmclan_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ ret = CardServices(RegisterClient, &link->handle, &client_reg);
+ if (ret != 0) {
+ cs_error(link->handle, RegisterClient, ret);
+ nmclan_detach(link);
+ return NULL;
+ }
+
+ return link;
+} /* nmclan_attach */
+
+/* ----------------------------------------------------------------------------
+nmclan_detach
+ This deletes a driver "instance". The device is de-registered
+ with Card Services. If it has been released, all local data
+ structures are freed. Otherwise, the structures will be freed
+ when the device is released.
+---------------------------------------------------------------------------- */
+
+static void nmclan_detach(dev_link_t *link)
+{
+ mace_private *lp = link->priv;
+ dev_link_t **linkp;
+
+ DEBUG(0, "nmclan_detach(0x%p)\n", link);
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link) break;
+ if (*linkp == NULL)
+ return;
+
+ del_timer(&link->release);
+ if (link->state & DEV_CONFIG) {
+ nmclan_release((u_long)link);
+ if (link->state & DEV_STALE_CONFIG) {
+ link->state |= DEV_STALE_LINK;
+ return;
+ }
+ }
+
+ if (link->handle)
+ CardServices(DeregisterClient, link->handle);
+
+ /* Unlink device structure, free bits */
+ *linkp = link->next;
+ if (link->dev)
+ unregister_netdev(&lp->dev);
+ kfree(lp);
+
+} /* nmclan_detach */
+
+/* ----------------------------------------------------------------------------
+mace_read
+ Reads a MACE register. This is bank independent; however, the
+ caller must ensure that this call is not interruptable. We are
+ assuming that during normal operation, the MACE is always in
+ bank 0.
+---------------------------------------------------------------------------- */
+static int mace_read(ioaddr_t ioaddr, int reg)
+{
+ int data = 0xFF;
+ unsigned long flags;
+
+ switch (reg >> 4) {
+ case 0: /* register 0-15 */
+ data = inb(ioaddr + AM2150_MACE_BASE + reg);
+ break;
+ case 1: /* register 16-31 */
+ save_flags(flags);
+ cli();
+ MACEBANK(1);
+ data = inb(ioaddr + AM2150_MACE_BASE + (reg & 0x0F));
+ MACEBANK(0);
+ restore_flags(flags);
+ break;
+ }
+ return (data & 0xFF);
+} /* mace_read */
+
+/* ----------------------------------------------------------------------------
+mace_write
+ Writes to a MACE register. This is bank independent; however,
+ the caller must ensure that this call is not interruptable. We
+ are assuming that during normal operation, the MACE is always in
+ bank 0.
+---------------------------------------------------------------------------- */
+static void mace_write(ioaddr_t ioaddr, int reg, int data)
+{
+ unsigned long flags;
+
+ switch (reg >> 4) {
+ case 0: /* register 0-15 */
+ outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + reg);
+ break;
+ case 1: /* register 16-31 */
+ save_flags(flags);
+ cli();
+ MACEBANK(1);
+ outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + (reg & 0x0F));
+ MACEBANK(0);
+ restore_flags(flags);
+ break;
+ }
+} /* mace_write */
+
+/* ----------------------------------------------------------------------------
+mace_init
+ Resets the MACE chip.
+---------------------------------------------------------------------------- */
+static void mace_init(ioaddr_t ioaddr, char *enet_addr)
+{
+ int i;
+
+ /* MACE Software reset */
+ mace_write(ioaddr, MACE_BIUCC, 1);
+ while (mace_read(ioaddr, MACE_BIUCC) & 0x01) {
+ /* Wait for reset bit to be cleared automatically after <= 200ns */;
+ }
+ mace_write(ioaddr, MACE_BIUCC, 0);
+
+ /* The Am2150 requires that the MACE FIFOs operate in burst mode. */
+ mace_write(ioaddr, MACE_FIFOCC, 0x0F);
+
+ mace_write(ioaddr, MACE_RCVFC, 0); /* Disable Auto Strip Receive */
+ mace_write(ioaddr, MACE_IMR, 0xFF); /* Disable all interrupts until _open */
+
+ /*
+ * Bit 2-1 PORTSEL[1-0] Port Select.
+ * 00 AUI/10Base-2
+ * 01 10Base-T
+ * 10 DAI Port (reserved in Am2150)
+ * 11 GPSI
+ * For this card, only the first two are valid.
+ * So, PLSCC should be set to
+ * 0x00 for 10Base-2
+ * 0x02 for 10Base-T
+ * Or just set ASEL in PHYCC below!
+ */
+ switch (if_port) {
+ case 1:
+ mace_write(ioaddr, MACE_PLSCC, 0x02);
+ break;
+ case 2:
+ mace_write(ioaddr, MACE_PLSCC, 0x00);
+ break;
+ default:
+ mace_write(ioaddr, MACE_PHYCC, /* ASEL */ 4);
+ /* ASEL Auto Select. When set, the PORTSEL[1-0] bits are overridden,
+ and the MACE device will automatically select the operating media
+ interface port. */
+ break;
+ }
+
+ mace_write(ioaddr, MACE_IAC, MACE_IAC_ADDRCHG | MACE_IAC_PHYADDR);
+ /* Poll ADDRCHG bit */
+ while (mace_read(ioaddr, MACE_IAC) & MACE_IAC_ADDRCHG)
+ ;
+ /* Set PADR register */
+ for (i = 0; i < ETHER_ADDR_LEN; i++)
+ mace_write(ioaddr, MACE_PADR, enet_addr[i]);
+
+ /* MAC Configuration Control Register should be written last */
+ /* Let set_multicast_list set this. */
+ /* mace_write(ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV); */
+ mace_write(ioaddr, MACE_MACCC, 0x00);
+} /* mace_init */
+
+/* ----------------------------------------------------------------------------
+nmclan_config
+ This routine is scheduled to run after a CARD_INSERTION event
+ is received, to configure the PCMCIA socket, and to make the
+ ethernet device available to the system.
+---------------------------------------------------------------------------- */
+
+#define CS_CHECK(fn, args...) \
+while ((last_ret=CardServices(last_fn=(fn), args))!=0) goto cs_failed
+
+static void nmclan_config(dev_link_t *link)
+{
+ client_handle_t handle = link->handle;
+ mace_private *lp = link->priv;
+ struct net_device *dev = &lp->dev;
+ tuple_t tuple;
+ cisparse_t parse;
+ u_char buf[64];
+ int i, last_ret, last_fn;
+ ioaddr_t ioaddr;
+
+ DEBUG(0, "nmclan_config(0x%p)\n", link);
+
+ tuple.Attributes = 0;
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = 64;
+ tuple.TupleOffset = 0;
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ CS_CHECK(GetFirstTuple, handle, &tuple);
+ CS_CHECK(GetTupleData, handle, &tuple);
+ CS_CHECK(ParseTuple, handle, &tuple, &parse);
+ link->conf.ConfigBase = parse.config.base;
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ CS_CHECK(RequestIO, handle, &link->io);
+ CS_CHECK(RequestIRQ, handle, &link->irq);
+ CS_CHECK(RequestConfiguration, handle, &link->conf);
+ dev->irq = link->irq.AssignedIRQ;
+ dev->base_addr = link->io.BasePort1;
+ i = register_netdev(dev);
+ if (i != 0) {
+ printk(KERN_NOTICE "nmclan_cs: register_netdev() failed\n");
+ goto failed;
+ }
+
+ ioaddr = dev->base_addr;
+
+ /* Read the ethernet address from the CIS. */
+ tuple.DesiredTuple = 0x80 /* CISTPL_CFTABLE_ENTRY_MISC */;
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = 64;
+ tuple.TupleOffset = 0;
+ CS_CHECK(GetFirstTuple, handle, &tuple);
+ CS_CHECK(GetTupleData, handle, &tuple);
+ memcpy(dev->dev_addr, tuple.TupleData, ETHER_ADDR_LEN);
+
+ /* Verify configuration by reading the MACE ID. */
+ {
+ char sig[2];
+
+ sig[0] = mace_read(ioaddr, MACE_CHIPIDL);
+ sig[1] = mace_read(ioaddr, MACE_CHIPIDH);
+ if ((sig[0] == 0x40) && ((sig[1] & 0x0F) == 0x09)) {
+ DEBUG(0, "nmclan_cs configured: mace id=%x %x\n",
+ sig[0], sig[1]);
+ } else {
+ printk(KERN_NOTICE "nmclan_cs: mace id not found: %x %x should"
+ " be 0x40 0x?9\n", sig[0], sig[1]);
+ link->state &= ~DEV_CONFIG_PENDING;
+ return;
+ }
+ }
+
+ mace_init(ioaddr, dev->dev_addr);
+
+ /* The if_port symbol can be set when the module is loaded */
+ if (if_port <= 2)
+ dev->if_port = if_port;
+ else
+ printk(KERN_NOTICE "nmclan_cs: invalid if_port requested\n");
+
+#if 0
+ /* Determine which port we are using if auto is selected */
+ if (if_port==0) {
+ mace_write(ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV);
+ DEBUG(2, "%s: mace_phycc 0x%X.\n", dev->name,
+ mace_read(ioaddr, MACE_PHYCC));
+ if (mace_read(ioaddr, MACE_PHYCC) & MACE_PHYCC_LNKFL)
+ /* 10base-T receiver is in link fail, MACE is using AUI port. */
+ dev->if_port = 2;
+ else
+ dev->if_port = 1;
+ mace_write(ioaddr, MACE_MACCC, 0x00);
+ }
+ /* Unfortunately, this doesn't seem to work. LNKFL is always set.
+ LNKFL is supposed to be opposite the green LED on the edge of the card.
+ It doesn't work if it is checked and printed in _open() either.
+ It does work if check in _start_xmit(), but that's not a good place
+ to printk. */
+#endif
+
+ copy_dev_name(lp->node, dev);
+ link->dev = &lp->node;
+ link->state &= ~DEV_CONFIG_PENDING;
+
+ printk(KERN_INFO "%s: nmclan: port %#3lx, irq %d, %s port, hw_addr ",
+ dev->name, dev->base_addr, dev->irq, if_names[dev->if_port]);
+ for (i = 0; i < 6; i++)
+ printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n"));
+ return;
+
+cs_failed:
+ cs_error(link->handle, last_fn, last_ret);
+failed:
+ nmclan_release((u_long)link);
+ link->state &= ~DEV_CONFIG_PENDING;
+ return;
+
+} /* nmclan_config */
+
+/* ----------------------------------------------------------------------------
+nmclan_release
+ After a card is removed, nmclan_release() will unregister the
+ net device, and release the PCMCIA configuration. If the device
+ is still open, this will be postponed until it is closed.
+---------------------------------------------------------------------------- */
+static void nmclan_release(u_long arg)
+{
+ dev_link_t *link = (dev_link_t *)arg;
+
+ DEBUG(0, "nmclan_release(0x%p)\n", link);
+
+ if (link->open) {
+ DEBUG(1, "nmclan_cs: release postponed, '%s' "
+ "still open\n", link->dev->dev_name);
+ link->state |= DEV_STALE_CONFIG;
+ return;
+ }
+
+ CardServices(ReleaseConfiguration, link->handle);
+ CardServices(ReleaseIO, link->handle, &link->io);
+ CardServices(ReleaseIRQ, link->handle, &link->irq);
+
+ link->state &= ~DEV_CONFIG;
+
+} /* nmclan_release */
+
+/* ----------------------------------------------------------------------------
+nmclan_event
+ The card status event handler. Mostly, this schedules other
+ stuff to run after an event is received. A CARD_REMOVAL event
+ also sets some flags to discourage the net drivers from trying
+ to talk to the card any more.
+---------------------------------------------------------------------------- */
+static int nmclan_event(event_t event, int priority,
+ event_callback_args_t *args)
+{
+ dev_link_t *link = args->client_data;
+ mace_private *lp = link->priv;
+ struct net_device *dev = &lp->dev;
+
+ DEBUG(1, "nmclan_event(0x%06x)\n", event);
+
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG) {
+ netif_device_detach(dev);
+ mod_timer(&link->release, jiffies + HZ/20);
+ }
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ nmclan_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if (link->state & DEV_CONFIG) {
+ if (link->open)
+ netif_device_detach(dev);
+ CardServices(ReleaseConfiguration, link->handle);
+ }
+ break;
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ CardServices(RequestConfiguration, link->handle, &link->conf);
+ if (link->open) {
+ nmclan_reset(dev);
+ netif_device_attach(dev);
+ }
+ }
+ break;
+ case CS_EVENT_RESET_REQUEST:
+ return 1;
+ break;
+ }
+ return 0;
+} /* nmclan_event */
+
+/* ----------------------------------------------------------------------------
+nmclan_reset
+ Reset and restore all of the Xilinx and MACE registers.
+---------------------------------------------------------------------------- */
+static void nmclan_reset(struct net_device *dev)
+{
+ mace_private *lp = dev->priv;
+
+#if RESET_XILINX
+ dev_link_t *link = &lp->link;
+ conf_reg_t reg;
+ u_long OrigCorValue;
+
+ /* Save original COR value */
+ reg.Function = 0;
+ reg.Action = CS_READ;
+ reg.Offset = CISREG_COR;
+ reg.Value = 0;
+ CardServices(AccessConfigurationRegister, link->handle, &reg);
+ OrigCorValue = reg.Value;
+
+ /* Reset Xilinx */
+ reg.Action = CS_WRITE;
+ reg.Offset = CISREG_COR;
+ DEBUG(1, "nmclan_reset: OrigCorValue=0x%lX, resetting...\n",
+ OrigCorValue);
+ reg.Value = COR_SOFT_RESET;
+ CardServices(AccessConfigurationRegister, link->handle, &reg);
+ /* Need to wait for 20 ms for PCMCIA to finish reset. */
+
+ /* Restore original COR configuration index */
+ reg.Value = COR_LEVEL_REQ | (OrigCorValue & COR_CONFIG_MASK);
+ CardServices(AccessConfigurationRegister, link->handle, &reg);
+ /* Xilinx is now completely reset along with the MACE chip. */
+ lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
+
+#endif /* #if RESET_XILINX */
+
+ /* Xilinx is now completely reset along with the MACE chip. */
+ lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
+
+ /* Reinitialize the MACE chip for operation. */
+ mace_init(dev->base_addr, dev->dev_addr);
+ mace_write(dev->base_addr, MACE_IMR, MACE_IMR_DEFAULT);
+
+ /* Restore the multicast list and enable TX and RX. */
+ restore_multicast_list(dev);
+} /* nmclan_reset */
+
+/* ----------------------------------------------------------------------------
+mace_config
+ [Someone tell me what this is supposed to do? Is if_port a defined
+ standard? If so, there should be defines to indicate 1=10Base-T,
+ 2=10Base-2, etc. including limited automatic detection.]
+---------------------------------------------------------------------------- */
+static int mace_config(struct net_device *dev, struct ifmap *map)
+{
+ if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
+ if (map->port <= 2) {
+ dev->if_port = map->port;
+ printk(KERN_INFO "%s: switched to %s port\n", dev->name,
+ if_names[dev->if_port]);
+ } else
+ return -EINVAL;
+ }
+ return 0;
+} /* mace_config */
+
+/* ----------------------------------------------------------------------------
+mace_open
+ Open device driver.
+---------------------------------------------------------------------------- */
+static int mace_open(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+ mace_private *lp = dev->priv;
+ dev_link_t *link = &lp->link;
+
+ if (!DEV_OK(link))
+ return -ENODEV;
+
+ link->open++;
+ MOD_INC_USE_COUNT;
+
+ MACEBANK(0);
+
+ netif_start_queue(dev);
+ netif_mark_up(dev);
+ nmclan_reset(dev);
+
+ return 0; /* Always succeed */
+} /* mace_open */
+
+/* ----------------------------------------------------------------------------
+mace_close
+ Closes device driver.
+---------------------------------------------------------------------------- */
+static int mace_close(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+ mace_private *lp = dev->priv;
+ dev_link_t *link = &lp->link;
+
+ DEBUG(2, "%s: shutting down ethercard.\n", dev->name);
+
+ /* Mask off all interrupts from the MACE chip. */
+ outb(0xFF, ioaddr + AM2150_MACE_BASE + MACE_IMR);
+
+ link->open--;
+ netif_stop_queue(dev);
+ netif_mark_down(dev);
+ if (link->state & DEV_STALE_CONFIG)
+ mod_timer(&link->release, jiffies + HZ/20);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+} /* mace_close */
+
+/* ----------------------------------------------------------------------------
+mace_start_xmit
+ This routine begins the packet transmit function. When completed,
+ it will generate a transmit interrupt.
+
+ According to /usr/src/linux/net/inet/dev.c, if _start_xmit
+ returns 0, the "packet is now solely the responsibility of the
+ driver." If _start_xmit returns non-zero, the "transmission
+ failed, put skb back into a list."
+---------------------------------------------------------------------------- */
+
+static void mace_tx_timeout(struct net_device *dev)
+{
+ mace_private *lp = (mace_private *)dev->priv;
+ dev_link_t *link = &lp->link;
+
+ printk(KERN_NOTICE "%s: transmit timed out -- ", dev->name);
+#if RESET_ON_TIMEOUT
+ printk("resetting card\n");
+ CardServices(ResetCard, link->handle);
+#else /* #if RESET_ON_TIMEOUT */
+ printk("NOT resetting card\n");
+#endif /* #if RESET_ON_TIMEOUT */
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+}
+
+static int mace_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ mace_private *lp = (mace_private *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+
+ tx_timeout_check(dev, mace_tx_timeout);
+ skb_tx_check(dev, skb);
+
+ DEBUG(3, "%s: mace_start_xmit(length = %ld) called.\n",
+ dev->name, (long)skb->len);
+
+#if (!TX_INTERRUPTABLE)
+ /* Disable MACE TX interrupts. */
+ outb(MACE_IMR_DEFAULT | MACE_IR_XMTINT,
+ ioaddr + AM2150_MACE_BASE + MACE_IMR);
+ lp->tx_irq_disabled=1;
+#endif /* #if (!TX_INTERRUPTABLE) */
+
+ {
+ /* This block must not be interrupted by another transmit request!
+ mace_tx_timeout will take care of timer-based retransmissions from
+ the upper layers. The interrupt handler is guaranteed never to
+ service a transmit interrupt while we are in here.
+ */
+
+ add_tx_bytes(&lp->linux_stats, skb->len);
+ lp->tx_free_frames--;
+
+ /* WARNING: Write the _exact_ number of bytes written in the header! */
+ /* Put out the word header [must be an outw()] . . . */
+ outw(skb->len, ioaddr + AM2150_XMT);
+ /* . . . and the packet [may be any combination of outw() and outb()] */
+ outsw(ioaddr + AM2150_XMT, skb->data, skb->len >> 1);
+ if (skb->len & 1) {
+ /* Odd byte transfer */
+ outb(skb->data[skb->len-1], ioaddr + AM2150_XMT);
+ }
+
+ dev->trans_start = jiffies;
+
+#if MULTI_TX
+ if (lp->tx_free_frames > 0)
+ netif_start_queue(dev);
+#endif /* #if MULTI_TX */
+ }
+
+#if (!TX_INTERRUPTABLE)
+ /* Re-enable MACE TX interrupts. */
+ lp->tx_irq_disabled=0;
+ outb(MACE_IMR_DEFAULT, ioaddr + AM2150_MACE_BASE + MACE_IMR);
+#endif /* #if (!TX_INTERRUPTABLE) */
+
+ DEV_KFREE_SKB(skb);
+
+ return 0;
+} /* mace_start_xmit */
+
+/* ----------------------------------------------------------------------------
+mace_interrupt
+ The interrupt handler.
+---------------------------------------------------------------------------- */
+static void mace_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ mace_private *lp = (mace_private *)dev_id;
+ struct net_device *dev = &lp->dev;
+ ioaddr_t ioaddr = dev->base_addr;
+ int status;
+ int IntrCnt = MACE_MAX_IR_ITERATIONS;
+
+ if (dev == NULL) {
+ DEBUG(2, "mace_interrupt(): irq 0x%X for unknown device.\n",
+ irq);
+ return;
+ }
+
+ if (lp->tx_irq_disabled) {
+ printk(
+ (lp->tx_irq_disabled?
+ KERN_NOTICE "%s: Interrupt with tx_irq_disabled "
+ "[isr=%02X, imr=%02X]\n":
+ KERN_NOTICE "%s: Re-entering the interrupt handler "
+ "[isr=%02X, imr=%02X]\n"),
+ dev->name,
+ inb(ioaddr + AM2150_MACE_BASE + MACE_IR),
+ inb(ioaddr + AM2150_MACE_BASE + MACE_IMR)
+ );
+ /* WARNING: MACE_IR has been read! */
+ return;
+ }
+
+ if (!netif_device_present(dev)) {
+ DEBUG(2, "%s: interrupt from dead card\n", dev->name);
+ goto exception;
+ }
+
+ do {
+ /* WARNING: MACE_IR is a READ/CLEAR port! */
+ status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR);
+
+ DEBUG(3, "mace_interrupt: irq 0x%X status 0x%X.\n", irq, status);
+
+ if (status & MACE_IR_RCVINT) {
+ mace_rx(dev, MACE_MAX_RX_ITERATIONS);
+ }
+
+ if (status & MACE_IR_XMTINT) {
+ unsigned char fifofc;
+ unsigned char xmtrc;
+ unsigned char xmtfs;
+
+ fifofc = inb(ioaddr + AM2150_MACE_BASE + MACE_FIFOFC);
+ if ((fifofc & MACE_FIFOFC_XMTFC)==0) {
+ lp->linux_stats.tx_errors++;
+ outb(0xFF, ioaddr + AM2150_XMT_SKIP);
+ }
+
+ /* Transmit Retry Count (XMTRC, reg 4) */
+ xmtrc = inb(ioaddr + AM2150_MACE_BASE + MACE_XMTRC);
+ if (xmtrc & MACE_XMTRC_EXDEF) lp->mace_stats.exdef++;
+ lp->mace_stats.xmtrc += (xmtrc & MACE_XMTRC_XMTRC);
+
+ if (
+ (xmtfs = inb(ioaddr + AM2150_MACE_BASE + MACE_XMTFS)) &
+ MACE_XMTFS_XMTSV /* Transmit Status Valid */
+ ) {
+ lp->mace_stats.xmtsv++;
+
+ if (xmtfs & ~MACE_XMTFS_XMTSV) {
+ if (xmtfs & MACE_XMTFS_UFLO) {
+ /* Underflow. Indicates that the Transmit FIFO emptied before
+ the end of frame was reached. */
+ lp->mace_stats.uflo++;
+ }
+ if (xmtfs & MACE_XMTFS_LCOL) {
+ /* Late Collision */
+ lp->mace_stats.lcol++;
+ }
+ if (xmtfs & MACE_XMTFS_MORE) {
+ /* MORE than one retry was needed */
+ lp->mace_stats.more++;
+ }
+ if (xmtfs & MACE_XMTFS_ONE) {
+ /* Exactly ONE retry occurred */
+ lp->mace_stats.one++;
+ }
+ if (xmtfs & MACE_XMTFS_DEFER) {
+ /* Transmission was defered */
+ lp->mace_stats.defer++;
+ }
+ if (xmtfs & MACE_XMTFS_LCAR) {
+ /* Loss of carrier */
+ lp->mace_stats.lcar++;
+ }
+ if (xmtfs & MACE_XMTFS_RTRY) {
+ /* Retry error: transmit aborted after 16 attempts */
+ lp->mace_stats.rtry++;
+ }
+ } /* if (xmtfs & ~MACE_XMTFS_XMTSV) */
+
+ } /* if (xmtfs & MACE_XMTFS_XMTSV) */
+
+ lp->linux_stats.tx_packets++;
+ lp->tx_free_frames++;
+ netif_wake_queue(dev);
+ } /* if (status & MACE_IR_XMTINT) */
+
+ if (status & ~MACE_IMR_DEFAULT & ~MACE_IR_RCVINT & ~MACE_IR_XMTINT) {
+ if (status & MACE_IR_JAB) {
+ /* Jabber Error. Excessive transmit duration (20-150ms). */
+ lp->mace_stats.jab++;
+ }
+ if (status & MACE_IR_BABL) {
+ /* Babble Error. >1518 bytes transmitted. */
+ lp->mace_stats.babl++;
+ }
+ if (status & MACE_IR_CERR) {
+ /* Collision Error. CERR indicates the absence of the
+ Signal Quality Error Test message after a packet
+ transmission. */
+ lp->mace_stats.cerr++;
+ }
+ if (status & MACE_IR_RCVCCO) {
+ /* Receive Collision Count Overflow; */
+ lp->mace_stats.rcvcco++;
+ }
+ if (status & MACE_IR_RNTPCO) {
+ /* Runt Packet Count Overflow */
+ lp->mace_stats.rntpco++;
+ }
+ if (status & MACE_IR_MPCO) {
+ /* Missed Packet Count Overflow */
+ lp->mace_stats.mpco++;
+ }
+ } /* if (status & ~MACE_IMR_DEFAULT & ~MACE_IR_RCVINT & ~MACE_IR_XMTINT) */
+
+ } while ((status & ~MACE_IMR_DEFAULT) && (--IntrCnt));
+
+exception:
+ return;
+} /* mace_interrupt */
+
+/* ----------------------------------------------------------------------------
+mace_rx
+ Receives packets.
+---------------------------------------------------------------------------- */
+static int mace_rx(struct net_device *dev, unsigned char RxCnt)
+{
+ mace_private *lp = (mace_private *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ unsigned char rx_framecnt;
+ unsigned short rx_status;
+
+ while (
+ ((rx_framecnt = inb(ioaddr + AM2150_RCV_FRAME_COUNT)) > 0) &&
+ (rx_framecnt <= 12) && /* rx_framecnt==0xFF if card is extracted. */
+ (RxCnt--)
+ ) {
+ rx_status = inw(ioaddr + AM2150_RCV);
+
+ DEBUG(3, "%s: in mace_rx(), framecnt 0x%X, rx_status"
+ " 0x%X.\n", dev->name, rx_framecnt, rx_status);
+
+ if (rx_status & MACE_RCVFS_RCVSTS) { /* Error, update stats. */
+ lp->linux_stats.rx_errors++;
+ if (rx_status & MACE_RCVFS_OFLO) {
+ lp->mace_stats.oflo++;
+ }
+ if (rx_status & MACE_RCVFS_CLSN) {
+ lp->mace_stats.clsn++;
+ }
+ if (rx_status & MACE_RCVFS_FRAM) {
+ lp->mace_stats.fram++;
+ }
+ if (rx_status & MACE_RCVFS_FCS) {
+ lp->mace_stats.fcs++;
+ }
+ } else {
+ short pkt_len = (rx_status & ~MACE_RCVFS_RCVSTS) - 4;
+ /* Auto Strip is off, always subtract 4 */
+ struct sk_buff *skb;
+
+ lp->mace_stats.rfs_rntpc += inb(ioaddr + AM2150_RCV);
+ /* runt packet count */
+ lp->mace_stats.rfs_rcvcc += inb(ioaddr + AM2150_RCV);
+ /* rcv collision count */
+
+ DEBUG(3, " receiving packet size 0x%X rx_status"
+ " 0x%X.\n", pkt_len, rx_status);
+
+ skb = dev_alloc_skb(pkt_len+2);
+
+ if (skb != NULL) {
+ skb->dev = dev;
+
+ skb_reserve(skb, 2);
+ insw(ioaddr + AM2150_RCV, skb_put(skb, pkt_len), pkt_len>>1);
+ if (pkt_len & 1)
+ *(skb->tail-1) = inb(ioaddr + AM2150_RCV);
+ skb->protocol = eth_type_trans(skb, dev);
+
+ netif_rx(skb); /* Send the packet to the upper (protocol) layers. */
+
+ dev->last_rx = jiffies;
+ lp->linux_stats.rx_packets++;
+ add_rx_bytes(&lp->linux_stats, skb->len);
+ outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */
+ continue;
+ } else {
+ DEBUG(1, "%s: couldn't allocate a sk_buff of size"
+ " %d.\n", dev->name, pkt_len);
+ lp->linux_stats.rx_dropped++;
+ }
+ }
+ outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */
+ } /* while */
+
+ return 0;
+} /* mace_rx */
+
+/* ----------------------------------------------------------------------------
+pr_linux_stats
+---------------------------------------------------------------------------- */
+static void pr_linux_stats(struct net_device_stats *pstats)
+{
+ DEBUG(2, "pr_linux_stats\n");
+ DEBUG(2, " rx_packets=%-7ld tx_packets=%ld\n",
+ (long)pstats->rx_packets, (long)pstats->tx_packets);
+ DEBUG(2, " rx_errors=%-7ld tx_errors=%ld\n",
+ (long)pstats->rx_errors, (long)pstats->tx_errors);
+ DEBUG(2, " rx_dropped=%-7ld tx_dropped=%ld\n",
+ (long)pstats->rx_dropped, (long)pstats->tx_dropped);
+ DEBUG(2, " multicast=%-7ld collisions=%ld\n",
+ (long)pstats->multicast, (long)pstats->collisions);
+
+ DEBUG(2, " rx_length_errors=%-7ld rx_over_errors=%ld\n",
+ (long)pstats->rx_length_errors, (long)pstats->rx_over_errors);
+ DEBUG(2, " rx_crc_errors=%-7ld rx_frame_errors=%ld\n",
+ (long)pstats->rx_crc_errors, (long)pstats->rx_frame_errors);
+ DEBUG(2, " rx_fifo_errors=%-7ld rx_missed_errors=%ld\n",
+ (long)pstats->rx_fifo_errors, (long)pstats->rx_missed_errors);
+
+ DEBUG(2, " tx_aborted_errors=%-7ld tx_carrier_errors=%ld\n",
+ (long)pstats->tx_aborted_errors, (long)pstats->tx_carrier_errors);
+ DEBUG(2, " tx_fifo_errors=%-7ld tx_heartbeat_errors=%ld\n",
+ (long)pstats->tx_fifo_errors, (long)pstats->tx_heartbeat_errors);
+ DEBUG(2, " tx_window_errors=%ld\n",
+ (long)pstats->tx_window_errors);
+} /* pr_linux_stats */
+
+/* ----------------------------------------------------------------------------
+pr_mace_stats
+---------------------------------------------------------------------------- */
+static void pr_mace_stats(mace_statistics *pstats)
+{
+ DEBUG(2, "pr_mace_stats\n");
+
+ DEBUG(2, " xmtsv=%-7d uflo=%d\n",
+ pstats->xmtsv, pstats->uflo);
+ DEBUG(2, " lcol=%-7d more=%d\n",
+ pstats->lcol, pstats->more);
+ DEBUG(2, " one=%-7d defer=%d\n",
+ pstats->one, pstats->defer);
+ DEBUG(2, " lcar=%-7d rtry=%d\n",
+ pstats->lcar, pstats->rtry);
+
+ /* MACE_XMTRC */
+ DEBUG(2, " exdef=%-7d xmtrc=%d\n",
+ pstats->exdef, pstats->xmtrc);
+
+ /* RFS1--Receive Status (RCVSTS) */
+ DEBUG(2, " oflo=%-7d clsn=%d\n",
+ pstats->oflo, pstats->clsn);
+ DEBUG(2, " fram=%-7d fcs=%d\n",
+ pstats->fram, pstats->fcs);
+
+ /* RFS2--Runt Packet Count (RNTPC) */
+ /* RFS3--Receive Collision Count (RCVCC) */
+ DEBUG(2, " rfs_rntpc=%-7d rfs_rcvcc=%d\n",
+ pstats->rfs_rntpc, pstats->rfs_rcvcc);
+
+ /* MACE_IR */
+ DEBUG(2, " jab=%-7d babl=%d\n",
+ pstats->jab, pstats->babl);
+ DEBUG(2, " cerr=%-7d rcvcco=%d\n",
+ pstats->cerr, pstats->rcvcco);
+ DEBUG(2, " rntpco=%-7d mpco=%d\n",
+ pstats->rntpco, pstats->mpco);
+
+ /* MACE_MPC */
+ DEBUG(2, " mpc=%d\n", pstats->mpc);
+
+ /* MACE_RNTPC */
+ DEBUG(2, " rntpc=%d\n", pstats->rntpc);
+
+ /* MACE_RCVCC */
+ DEBUG(2, " rcvcc=%d\n", pstats->rcvcc);
+
+} /* pr_mace_stats */
+
+/* ----------------------------------------------------------------------------
+update_stats
+ Update statistics. We change to register window 1, so this
+ should be run single-threaded if the device is active. This is
+ expected to be a rare operation, and it's simpler for the rest
+ of the driver to assume that window 0 is always valid rather
+ than use a special window-state variable.
+
+ oflo & uflo should _never_ occur since it would mean the Xilinx
+ was not able to transfer data between the MACE FIFO and the
+ card's SRAM fast enough. If this happens, something is
+ seriously wrong with the hardware.
+---------------------------------------------------------------------------- */
+static void update_stats(ioaddr_t ioaddr, struct net_device *dev)
+{
+ mace_private *lp = (mace_private *)dev->priv;
+
+ lp->mace_stats.rcvcc += mace_read(ioaddr, MACE_RCVCC);
+ lp->mace_stats.rntpc += mace_read(ioaddr, MACE_RNTPC);
+ lp->mace_stats.mpc += mace_read(ioaddr, MACE_MPC);
+ /* At this point, mace_stats is fully updated for this call.
+ We may now update the linux_stats. */
+
+ /* The MACE has no equivalent for linux_stats field which are commented
+ out. */
+
+#if 0
+ /* These must be tracked in the main body of the driver. */
+ lp->linux_stats.rx_packets;
+ lp->linux_stats.tx_packets;
+ lp->linux_stats.rx_errors;
+ lp->linux_stats.tx_errors;
+ lp->linux_stats.rx_dropped;
+ lp->linux_stats.tx_dropped;
+#endif
+ /* lp->linux_stats.multicast; */
+ lp->linux_stats.collisions =
+ lp->mace_stats.rcvcco * 256 + lp->mace_stats.rcvcc;
+ /* Collision: The MACE may retry sending a packet 15 times
+ before giving up. The retry count is in XMTRC.
+ Does each retry constitute a collision?
+ If so, why doesn't the RCVCC record these collisions? */
+
+ /* detailed rx_errors: */
+ lp->linux_stats.rx_length_errors =
+ lp->mace_stats.rntpco * 256 + lp->mace_stats.rntpc;
+ /* lp->linux_stats.rx_over_errors */
+ lp->linux_stats.rx_crc_errors = lp->mace_stats.fcs;
+ lp->linux_stats.rx_frame_errors = lp->mace_stats.fram;
+ lp->linux_stats.rx_fifo_errors = lp->mace_stats.oflo;
+ lp->linux_stats.rx_missed_errors =
+ lp->mace_stats.mpco * 256 + lp->mace_stats.mpc;
+
+ /* detailed tx_errors */
+ lp->linux_stats.tx_aborted_errors = lp->mace_stats.rtry;
+ lp->linux_stats.tx_carrier_errors = lp->mace_stats.lcar;
+ /* LCAR usually results from bad cabling. */
+ lp->linux_stats.tx_fifo_errors = lp->mace_stats.uflo;
+ lp->linux_stats.tx_heartbeat_errors = lp->mace_stats.cerr;
+ /* lp->linux_stats.tx_window_errors; */
+
+ return;
+} /* update_stats */
+
+/* ----------------------------------------------------------------------------
+mace_get_stats
+ Gathers ethernet statistics from the MACE chip.
+---------------------------------------------------------------------------- */
+static struct net_device_stats *mace_get_stats(struct net_device *dev)
+{
+ mace_private *lp = (mace_private *)dev->priv;
+
+ update_stats(dev->base_addr, dev);
+
+ DEBUG(1, "%s: updating the statistics.\n", dev->name);
+ pr_linux_stats(&lp->linux_stats);
+ pr_mace_stats(&lp->mace_stats);
+
+ return &lp->linux_stats;
+} /* net_device_stats */
+
+/* ----------------------------------------------------------------------------
+updateCRC
+ Modified from Am79C90 data sheet.
+---------------------------------------------------------------------------- */
+
+#if BROKEN_MULTICAST
+
+static void updateCRC(int *CRC, int bit)
+{
+ int poly[]={
+ 1,1,1,0, 1,1,0,1,
+ 1,0,1,1, 1,0,0,0,
+ 1,0,0,0, 0,0,1,1,
+ 0,0,1,0, 0,0,0,0
+ }; /* CRC polynomial. poly[n] = coefficient of the x**n term of the
+ CRC generator polynomial. */
+
+ int j;
+
+ /* shift CRC and control bit (CRC[32]) */
+ for (j = 32; j > 0; j--)
+ CRC[j] = CRC[j-1];
+ CRC[0] = 0;
+
+ /* If bit XOR(control bit) = 1, set CRC = CRC XOR polynomial. */
+ if (bit ^ CRC[32])
+ for (j = 0; j < 32; j++)
+ CRC[j] ^= poly[j];
+} /* updateCRC */
+
+/* ----------------------------------------------------------------------------
+BuildLAF
+ Build logical address filter.
+ Modified from Am79C90 data sheet.
+
+Input
+ ladrf: logical address filter (contents initialized to 0)
+ adr: ethernet address
+---------------------------------------------------------------------------- */
+static void BuildLAF(int *ladrf, int *adr)
+{
+ int CRC[33]={1}; /* CRC register, 1 word/bit + extra control bit */
+
+ int i, byte; /* temporary array indices */
+ int hashcode; /* the output object */
+
+ CRC[32]=0;
+
+ for (byte = 0; byte < 6; byte++)
+ for (i = 0; i < 8; i++)
+ updateCRC(CRC, (adr[byte] >> i) & 1);
+
+ hashcode = 0;
+ for (i = 0; i < 6; i++)
+ hashcode = (hashcode << 1) + CRC[i];
+
+ byte = hashcode >> 3;
+ ladrf[byte] |= (1 << (hashcode & 7));
+
+#ifdef PCMCIA_DEBUG
+ if (pc_debug > 2) {
+ printk(KERN_DEBUG " adr =");
+ for (i = 0; i < 6; i++)
+ printk(" %02X", adr[i]);
+ printk("\n" KERN_DEBUG " hashcode = %d(decimal), ladrf[0:63]"
+ " =", hashcode);
+ for (i = 0; i < 8; i++)
+ printk(" %02X", ladrf[i]);
+ printk("\n");
+ }
+#endif
+} /* BuildLAF */
+
+/* ----------------------------------------------------------------------------
+restore_multicast_list
+ Restores the multicast filter for MACE chip to the last
+ set_multicast_list() call.
+
+Input
+ multicast_num_addrs
+ multicast_ladrf[]
+---------------------------------------------------------------------------- */
+static void restore_multicast_list(struct net_device *dev)
+{
+ mace_private *lp = (mace_private *)dev->priv;
+ int num_addrs = lp->multicast_num_addrs;
+ int *ladrf = lp->multicast_ladrf;
+ ioaddr_t ioaddr = dev->base_addr;
+ int i;
+
+ DEBUG(2, "%s: restoring Rx mode to %d addresses.\n",
+ dev->name, num_addrs);
+
+ if (num_addrs > 0) {
+
+ DEBUG(1, "Attempt to restore multicast list detected.\n");
+
+ mace_write(ioaddr, MACE_IAC, MACE_IAC_ADDRCHG | MACE_IAC_LOGADDR);
+ /* Poll ADDRCHG bit */
+ while (mace_read(ioaddr, MACE_IAC) & MACE_IAC_ADDRCHG)
+ ;
+ /* Set LADRF register */
+ for (i = 0; i < MACE_LADRF_LEN; i++)
+ mace_write(ioaddr, MACE_LADRF, ladrf[i]);
+
+ mace_write(ioaddr, MACE_UTR, MACE_UTR_RCVFCSE | MACE_UTR_LOOP_EXTERNAL);
+ mace_write(ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV);
+
+ } else if (num_addrs < 0) {
+
+ /* Promiscuous mode: receive all packets */
+ mace_write(ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL);
+ mace_write(ioaddr, MACE_MACCC,
+ MACE_MACCC_PROM | MACE_MACCC_ENXMT | MACE_MACCC_ENRCV
+ );
+
+ } else {
+
+ /* Normal mode */
+ mace_write(ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL);
+ mace_write(ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV);
+
+ }
+} /* restore_multicast_list */
+
+/* ----------------------------------------------------------------------------
+set_multicast_list
+ Set or clear the multicast filter for this adaptor.
+
+Input
+ num_addrs == -1 Promiscuous mode, receive all packets
+ num_addrs == 0 Normal mode, clear multicast list
+ num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ best-effort filtering.
+Output
+ multicast_num_addrs
+ multicast_ladrf[]
+---------------------------------------------------------------------------- */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ mace_private *lp = (mace_private *)dev->priv;
+ int adr[ETHER_ADDR_LEN] = {0}; /* Ethernet address */
+ int i;
+ struct dev_mc_list *dmi = dev->mc_list;
+
+#ifdef PCMCIA_DEBUG
+ if (pc_debug > 1) {
+ static int old = 0;
+ if (dev->mc_count != old) {
+ old = dev->mc_count;
+ DEBUG(0, "%s: setting Rx mode to %d addresses.\n",
+ dev->name, old);
+ }
+ }
+#endif
+
+ /* Set multicast_num_addrs. */
+ lp->multicast_num_addrs = dev->mc_count;
+
+ /* Set multicast_ladrf. */
+ if (num_addrs > 0) {
+ /* Calculate multicast logical address filter */
+ memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN);
+ for (i = 0; i < dev->mc_count; i++) {
+ memcpy(adr, dmi->dmi_addr, ETHER_ADDR_LEN);
+ dmi = dmi->next;
+ BuildLAF(lp->multicast_ladrf, adr);
+ }
+ }
+
+ restore_multicast_list(dev);
+
+} /* set_multicast_list */
+
+#endif /* BROKEN_MULTICAST */
+
+static void restore_multicast_list(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+
+ DEBUG(2, "%s: restoring Rx mode to %d addresses.\n", dev->name,
+ ((mace_private *)(dev->priv))->multicast_num_addrs);
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Promiscuous mode: receive all packets */
+ mace_write(ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL);
+ mace_write(ioaddr, MACE_MACCC,
+ MACE_MACCC_PROM | MACE_MACCC_ENXMT | MACE_MACCC_ENRCV
+ );
+ } else {
+ /* Normal mode */
+ mace_write(ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL);
+ mace_write(ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV);
+ }
+} /* restore_multicast_list */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ mace_private *lp = (mace_private *)dev->priv;
+
+#ifdef PCMCIA_DEBUG
+ if (pc_debug > 1) {
+ static int old = 0;
+ if (dev->mc_count != old) {
+ old = dev->mc_count;
+ DEBUG(0, "%s: setting Rx mode to %d addresses.\n",
+ dev->name, old);
+ }
+ }
+#endif
+
+ lp->multicast_num_addrs = dev->mc_count;
+ restore_multicast_list(dev);
+
+} /* set_multicast_list */
+
+/* ----------------------------------------------------------------------------
+init_nmclan_cs
+---------------------------------------------------------------------------- */
+
+static int __init init_nmclan_cs(void)
+{
+ servinfo_t serv;
+ DEBUG(0, "%s\n", version);
+ CardServices(GetCardServicesInfo, &serv);
+ if (serv.Revision != CS_RELEASE_CODE) {
+ printk(KERN_NOTICE "nmclan_cs: Card Services release does not match!\n");
+ return -EINVAL;
+ }
+ register_pccard_driver(&dev_info, &nmclan_attach, &nmclan_detach);
+ return 0;
+}
+
+/* ----------------------------------------------------------------------------
+exit_nmclan_cs
+---------------------------------------------------------------------------- */
+
+static void __exit exit_nmclan_cs(void)
+{
+ DEBUG(0, "nmclan_cs: unloading\n");
+ unregister_pccard_driver(&dev_info);
+ while (dev_list != NULL)
+ nmclan_detach(dev_list);
+}
+
+module_init(init_nmclan_cs);
+module_exit(exit_nmclan_cs);
diff --git a/linux/pcmcia-cs/clients/ositech.h b/linux/pcmcia-cs/clients/ositech.h
new file mode 100644
index 0000000..4126efc
--- /dev/null
+++ b/linux/pcmcia-cs/clients/ositech.h
@@ -0,0 +1,358 @@
+/*
+ This file contains the firmware of Seven of Diamonds from OSITECH.
+ (Special thanks to Kevin MacPherson of OSITECH)
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License, incorporated herein by reference.
+*/
+
+ static const u_char __Xilinx7OD[] = {
+ 0xFF, 0x04, 0xA0, 0x36, 0xF3, 0xEC, 0xFF, 0xFF, 0xFF, 0xDF, 0xFB, 0xFF,
+ 0xF3, 0xFF, 0xFF, 0xFF,
+ 0xEF, 0x3F, 0xFF, 0xF7, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0x7F, 0xFE, 0xFF,
+ 0xCE, 0xFE, 0xFE, 0xFE,
+ 0xFE, 0xDE, 0xBD, 0xDD, 0xFD, 0xFF, 0xFD, 0xCF, 0xF7, 0xBF, 0x7F, 0xFF,
+ 0x7F, 0x3F, 0xFE, 0xBF,
+ 0xFF, 0xFF, 0xFF, 0xBC, 0xFF, 0xFF, 0xBD, 0xB5, 0x7F, 0x7F, 0xBF, 0xBF,
+ 0x7F, 0xFF, 0xEF, 0xFF,
+ 0xFF, 0xFF, 0xFB, 0xFF, 0xF7, 0xF7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xDE,
+ 0xFE, 0xFE, 0xFA, 0xDE,
+ 0xBD, 0xFD, 0xED, 0xFD, 0xFD, 0xCF, 0xEF, 0xEF, 0xEF, 0xEF, 0xC7, 0xDF,
+ 0xDF, 0xDF, 0xDF, 0xDF,
+ 0xFF, 0x7E, 0xFE, 0xFD, 0x7D, 0x6D, 0xEE, 0xFE, 0x7C, 0xFB, 0xF4, 0xFB,
+ 0xCF, 0xDB, 0xDF, 0xFF,
+ 0xFF, 0xBB, 0x7F, 0xFF, 0x7F, 0xFF, 0xF7, 0xFF, 0x9E, 0xBF, 0x3B, 0xBF,
+ 0xBF, 0x7F, 0x7F, 0x7F,
+ 0x7E, 0x6F, 0xDF, 0xEF, 0xF5, 0xF6, 0xFD, 0xF6, 0xF5, 0xED, 0xEB, 0xFF,
+ 0xEF, 0xEF, 0xEF, 0x7E,
+ 0x7F, 0x7F, 0x6F, 0x7F, 0xFF, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xEF, 0xBF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBC, 0x1F, 0x1F, 0xEE, 0xFF, 0xBC,
+ 0xB7, 0xFF, 0xDF, 0xFF,
+ 0xDF, 0xEF, 0x3B, 0xE3, 0xD3, 0xFF, 0xFB, 0xFF, 0xFF, 0xDF, 0xFF, 0xFF,
+ 0xFF, 0xBA, 0xBF, 0x2D,
+ 0xDB, 0xBD, 0xFD, 0xDB, 0xDF, 0xFA, 0xFB, 0xFF, 0xEF, 0xFB, 0xDB, 0xF3,
+ 0xFF, 0xDF, 0xFD, 0x7F,
+ 0xEF, 0xFB, 0xFF, 0xFF, 0xBE, 0xBF, 0x27, 0xBA, 0xFE, 0xFB, 0xDF, 0xFF,
+ 0xF6, 0xFF, 0xFF, 0xEF,
+ 0xFB, 0xDB, 0xF3, 0xD9, 0x9A, 0x3F, 0xFF, 0xAF, 0xBF, 0xFF, 0xFF, 0xBE,
+ 0x3F, 0x37, 0xBD, 0x96,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xAE, 0xFB, 0xF3, 0xF3, 0xEB, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xF7, 0xFA, 0xBC, 0xAE, 0xFE, 0xBE, 0xFE, 0xBB, 0x7F, 0xFD, 0xFF,
+ 0x7F, 0xEF, 0xF7, 0xFB,
+ 0xBB, 0xD7, 0xF7, 0x7F, 0xFF, 0xF7, 0xFF, 0xFF, 0xF7, 0xBC, 0xED, 0xFD,
+ 0xBD, 0x9D, 0x7D, 0x7B,
+ 0xFB, 0x7B, 0x7B, 0xFB, 0xAF, 0xFF, 0xFE, 0xFD, 0xFD, 0xFE, 0xFE, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xF7,
+ 0xAA, 0xB9, 0xBF, 0x8F, 0xBF, 0xDF, 0xFF, 0x7F, 0xFF, 0xFF, 0x7F, 0xCF,
+ 0xFB, 0xEB, 0xCB, 0xEB,
+ 0xEE, 0xFF, 0xFF, 0xD7, 0xFF, 0xFF, 0xFF, 0x3E, 0x33, 0x3F, 0x1C, 0x7C,
+ 0xFC, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xCF, 0xD3, 0xF3, 0xE3, 0xF3, 0xFB, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xEB, 0xFE, 0x35,
+ 0x3F, 0x3D, 0xFD, 0xFD, 0xFF, 0xFF, 0xFF, 0xBF, 0xFF, 0xEF, 0x6F, 0xE3,
+ 0xE3, 0xE3, 0xEF, 0xFF,
+ 0xFF, 0xDF, 0xFF, 0xFF, 0xF7, 0xFE, 0x3E, 0x5E, 0xFE, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFD, 0xFF, 0xFF,
+ 0xAF, 0xCF, 0xF2, 0xCB, 0xCF, 0x8E, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFD,
+ 0xFC, 0x3E, 0x1F, 0x9E,
+ 0xAD, 0xFD, 0xFF, 0xFF, 0xBF, 0xFF, 0xFF, 0xEF, 0xFF, 0xB3, 0xF7, 0xE7,
+ 0xF7, 0xFA, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xEE, 0xEB, 0xAB, 0xAF, 0x9F, 0xE3, 0x7F, 0xFF, 0xDE,
+ 0xFF, 0x7F, 0xEE, 0xFF,
+ 0xFF, 0xFB, 0x3A, 0xFA, 0xFF, 0xF2, 0x77, 0xFF, 0xFF, 0xF7, 0xFE, 0xFF,
+ 0xFE, 0xBD, 0xAE, 0xDE,
+ 0x7D, 0x7D, 0xFD, 0xFF, 0xBF, 0xEE, 0xFF, 0xFD, 0xFF, 0xDB, 0xFB, 0xFF,
+ 0xF7, 0xEF, 0xFB, 0xFF,
+ 0xFF, 0xFE, 0xFF, 0x2D, 0xAF, 0xB9, 0xFD, 0x79, 0xFB, 0xFA, 0xFF, 0xBF,
+ 0xEF, 0xFF, 0xFF, 0x91,
+ 0xFA, 0xFB, 0xDF, 0xF7, 0xF7, 0xFF, 0xFF, 0xFF, 0xFC, 0xCF, 0x37, 0xBF,
+ 0xBF, 0xFF, 0x7F, 0x7F,
+ 0xFF, 0xFF, 0xFF, 0xAF, 0xFF, 0xFF, 0xF3, 0xFB, 0xFB, 0xFF, 0xF5, 0xEF,
+ 0xFF, 0xFF, 0xF7, 0xFA,
+ 0xFF, 0xFF, 0xEE, 0xFA, 0xFE, 0xFB, 0x55, 0xDD, 0xFF, 0x7F, 0xAF, 0xFE,
+ 0xFF, 0xFB, 0xFB, 0xF5,
+ 0xFF, 0xF7, 0xEF, 0xFF, 0xFF, 0xFF, 0xBE, 0xBD, 0xBD, 0xBD, 0xBD, 0x7D,
+ 0x7B, 0x7B, 0x7B, 0x7B,
+ 0xFB, 0xAE, 0xFF, 0xFD, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xF7, 0xDA, 0xB7, 0x61,
+ 0xFF, 0xB9, 0x59, 0xF3, 0x73, 0xF3, 0xDF, 0x7F, 0x6F, 0xDF, 0xEF, 0xF7,
+ 0xEB, 0xEB, 0xD7, 0xFF,
+ 0xD7, 0xFF, 0xFF, 0xF7, 0xFE, 0x7F, 0xFB, 0x3E, 0x38, 0x73, 0xF6, 0x7F,
+ 0xFC, 0xFF, 0xFF, 0xCF,
+ 0xFF, 0xB7, 0xFB, 0xB3, 0xB3, 0x67, 0xFF, 0xE7, 0xFD, 0xFF, 0xEF, 0xF6,
+ 0x7F, 0xB7, 0xBC, 0xF5,
+ 0x7B, 0xF6, 0xF7, 0xF5, 0xFF, 0xFF, 0xEF, 0xFF, 0xF7, 0xFF, 0xF7, 0xCE,
+ 0xE7, 0xFF, 0x9F, 0xFF,
+ 0xFF, 0xF5, 0xFE, 0x7D, 0xFF, 0x5F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xEF, 0xFF, 0xF6,
+ 0xCB, 0xDB, 0xEE, 0xFE, 0xFF, 0xDF, 0xFF, 0xFF, 0xFF, 0xFE, 0x7F, 0xBE,
+ 0x1E, 0x3E, 0xFE, 0xFF,
+ 0x7D, 0xFE, 0xFF, 0xFF, 0xEF, 0xBF, 0xE7, 0xFF, 0xE3, 0xE3, 0xFF, 0xDF,
+ 0xE7, 0xFF, 0xFF, 0xFF,
+ 0xB8, 0xEF, 0xB7, 0x2F, 0xEE, 0xFF, 0xDF, 0xFF, 0xBF, 0xFF, 0x7F, 0xEF,
+ 0xEB, 0xBF, 0xA3, 0xD3,
+ 0xFF, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xBE, 0xFD, 0x3F, 0xCF, 0xFD,
+ 0xFB, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xAF, 0xFB, 0xBF, 0xBB, 0xBF, 0xDB, 0xFD, 0xFB, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0x3E, 0xFE,
+ 0x3F, 0xBA, 0xBA, 0xFE, 0xFF, 0xFF, 0xFF, 0xEF, 0xFF, 0xEF, 0xC3, 0x7F,
+ 0xB2, 0x9B, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0x3C, 0xFF, 0x3F, 0x3C, 0xFF, 0xFE, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xAF, 0xF3, 0xFE, 0xF3, 0xE3, 0xEB, 0xFF, 0xFF, 0xFF, 0xFB, 0xFF, 0xF7,
+ 0x9A, 0xFE, 0xAF, 0x9E,
+ 0xBE, 0xFE, 0xFF, 0xDF, 0xFF, 0xFF, 0x7B, 0xEF, 0xF7, 0xBF, 0xFB, 0xFB,
+ 0xFB, 0xFF, 0xFF, 0x7F,
+ 0xFF, 0xFF, 0xFF, 0xBC, 0xBD, 0xFD, 0xBD, 0xDD, 0x7D, 0x7B, 0x7B, 0x7B,
+ 0x7B, 0xFB, 0xAE, 0xFF,
+ 0xFF, 0xFF, 0xFE, 0xFE, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xF7, 0x9A, 0xFF,
+ 0x9F, 0xFF, 0xAF, 0xEF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xCF, 0xF3, 0xFF, 0xEB, 0xFF, 0xEB, 0xFF,
+ 0xFF, 0xBF, 0xFF, 0xFF,
+ 0xEF, 0xFE, 0xFF, 0x37, 0xFC, 0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xCF, 0xEF, 0xFD, 0xF3,
+ 0xFF, 0xEE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x6E, 0xFD, 0x2F, 0xFD,
+ 0xFF, 0xFD, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xEF, 0xCF, 0xFF, 0xF3, 0xBF, 0x69, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFE,
+ 0xFB, 0x9F, 0xFF, 0xBF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0x87,
+ 0xFE, 0xDA, 0xEF, 0xCF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xEF, 0xBF, 0xEF, 0xEF, 0xFD,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xEF, 0xFD, 0xFF, 0x7B, 0xFF, 0xEB, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xEB, 0xF8, 0xFF, 0xEF,
+ 0xAF, 0xFF, 0xFF, 0xBD, 0xFF, 0xFF, 0xFF, 0x7F, 0xEE, 0x7F, 0xEF, 0xFF,
+ 0xBB, 0xFF, 0xBF, 0xFB,
+ 0xFF, 0xFF, 0xFF, 0xF7, 0xF6, 0xFB, 0xBD, 0xFD, 0xDD, 0xF5, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xAF,
+ 0xFF, 0x5F, 0xF5, 0xDF, 0xFF, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF6,
+ 0xF3, 0xFF, 0xDE, 0xFE,
+ 0xEF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFF, 0xDE, 0xDF, 0x5F, 0xDF,
+ 0xFD, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFE, 0xFE, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xAF, 0xFF, 0xFF,
+ 0xEF, 0xED, 0xFF, 0xDF, 0xFF, 0xFF, 0xFB, 0xFF, 0xFF, 0xDA, 0xBD, 0xBE,
+ 0xAE, 0xFE, 0x7F, 0xFD,
+ 0xDF, 0xFF, 0xFF, 0x7F, 0xEF, 0xFF, 0xFB, 0xFB, 0xFB, 0x7F, 0xF7, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xF7,
+ 0xBC, 0xFD, 0xBD, 0xBD, 0xBD, 0xFD, 0x7B, 0x7B, 0x7B, 0x7B, 0xFB, 0xAE,
+ 0xFF, 0xFF, 0xFD, 0xFF,
+ 0xFF, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFA, 0x9F, 0xBF, 0xBF, 0xCF,
+ 0x7F, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xAF, 0xFF, 0xEB, 0xEB, 0xEB, 0xFF, 0xD7, 0xFE, 0xFF, 0xFF,
+ 0xBF, 0xE7, 0xFE, 0xBF,
+ 0x7F, 0xFC, 0xFF, 0xFF, 0xED, 0xFF, 0xFF, 0xFF, 0xFF, 0x4F, 0xFF, 0xFB,
+ 0xFB, 0xFF, 0xFF, 0xDD,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xBD, 0xDF, 0x9D, 0xFD, 0xDF, 0xB9,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xEF, 0xFF, 0xFB, 0xEF, 0xEB, 0xFF, 0xDE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xF6, 0x9F, 0xFF, 0xFC,
+ 0xFE, 0xFB, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xDF, 0xFA, 0xCD, 0xCF,
+ 0xBF, 0x9F, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xF7, 0xFE, 0xBF, 0xFF, 0xDF, 0xEF, 0x5F, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0x7F, 0x6F, 0xFF,
+ 0xBB, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7E, 0xFF,
+ 0x5F, 0xFF, 0xBF, 0xBF,
+ 0xF9, 0xFF, 0xFF, 0xFF, 0x7F, 0x6E, 0x7B, 0xFF, 0xEF, 0xFD, 0xEB, 0xDF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xF7, 0xB6, 0x3E, 0xFC, 0xFD, 0xBF, 0x7E, 0xFB, 0xFF, 0xFF, 0xFF, 0xF7,
+ 0xEF, 0xF7, 0xF3, 0xF7,
+ 0xFF, 0xFB, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x6E, 0x35, 0x79, 0xFF,
+ 0xBF, 0xFC, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xEF, 0xFB, 0x53, 0xDF, 0xFF, 0xEB, 0xBF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xBC,
+ 0xFF, 0xFF, 0xFF, 0xBF, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xAF, 0xF5,
+ 0xFF, 0xF7, 0xFF, 0xFB,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBA, 0xAA, 0xEE, 0xFE, 0x3F, 0x7D,
+ 0xFD, 0xFF, 0xFF, 0xFF,
+ 0x7F, 0xAF, 0x77, 0xFB, 0xFB, 0xFF, 0xFB, 0xF7, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xF7, 0xBE, 0xBD, 0xBD,
+ 0xBD, 0xBD, 0xFD, 0x7B, 0x7B, 0x7B, 0x7B, 0xFB, 0xAE, 0xFF, 0xEF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFC,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0x9A, 0xD9, 0xB8, 0xFF, 0xFF, 0x79, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xCF,
+ 0xFB, 0xFF, 0xEB, 0xFF, 0xEB, 0xD7, 0xFF, 0xFF, 0xFF, 0xFF, 0xE7, 0xDE,
+ 0xF8, 0xFB, 0xFE, 0x3F,
+ 0xFB, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xCF, 0xAD, 0xBF, 0xFA, 0xFF, 0x73,
+ 0xDF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0x3A, 0xF5, 0xB7, 0xFC, 0x3F, 0xF9, 0xFD, 0xFF, 0xFF, 0xFF,
+ 0x7F, 0xEF, 0xF3, 0xFF,
+ 0xBF, 0xFE, 0xF3, 0x9F, 0xFE, 0xFF, 0xFF, 0xFF, 0xF7, 0x3E, 0xFF, 0xFF,
+ 0xFF, 0xBF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xAF, 0xD3, 0xFE, 0xDB, 0xFF, 0xDB, 0xDF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x3E, 0xFF, 0xBF, 0xFF, 0x7F, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0x8F,
+ 0xF3, 0xFF, 0xED, 0xFF,
+ 0xF7, 0xFB, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xF6, 0x3C, 0xFE, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0x9F, 0xEF, 0xEF, 0xD1, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0x7E, 0xBF,
+ 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBB, 0xEF, 0xDF, 0xF1,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEE, 0x3E, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xBF,
+ 0xEF, 0xFD, 0xC3, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBF, 0xFF,
+ 0xFC, 0x3E, 0xFE, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x2E, 0xEF, 0xF3, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xF7, 0xBA, 0xBE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0x7F, 0xAF, 0xFB,
+ 0xFB, 0xFD, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xF2, 0xD6, 0xED,
+ 0xBD, 0xBD, 0xBD, 0x7D,
+ 0x7B, 0x7B, 0x7B, 0x7B, 0xFB, 0xAF, 0xDF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0x92, 0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F,
+ 0xAF, 0xEB, 0xEB, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE7, 0xFE, 0x2E, 0xFE, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x4F, 0xEF, 0xF3, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFE,
+ 0x3C, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xCE,
+ 0xC3, 0xFD, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0x5D, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xEF, 0xCF, 0xEB, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xF7, 0xEE, 0x3E, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xEF, 0xDF, 0xE2, 0xFF,
+ 0xFF, 0xFF, 0xFB, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xF6, 0xBE, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0x7F, 0xEE,
+ 0x5F, 0xE6, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3E,
+ 0x7D, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xF3, 0xFB, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xBF, 0xF7, 0x36, 0xBE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xEF, 0xD3, 0xF6,
+ 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x7F, 0xEE,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xAF, 0xEF, 0xEB, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xBA, 0xBE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEE,
+ 0xFB, 0xFA, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xD6, 0xFD, 0xBD, 0xBD, 0xBD,
+ 0x7D, 0x7B, 0x7B, 0x7B,
+ 0x7B, 0xFB, 0xAE, 0xFF, 0x7E, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xF7, 0xBA, 0xBF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xEF, 0xEB, 0x6B,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xFE, 0xBE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x4F, 0xEF, 0xF7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF,
+ 0x3E, 0x6E, 0xFC, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xC3, 0xC9, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x3E, 0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xEF, 0xFB,
+ 0xD5, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFE,
+ 0xFE, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x6F, 0xEF, 0xFB, 0xFF, 0xFF, 0xFF, 0xFB,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xF6, 0xDF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xFE,
+ 0xEF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE7, 0xFF, 0xFE, 0xFF, 0xF7, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0x7F, 0xFA, 0xEF, 0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xE7, 0xFF, 0xFE,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xFE, 0xEF, 0xBF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xA7, 0xFF, 0xFC, 0xF7, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x7F,
+ 0xFE, 0xAE, 0xFF, 0xFF, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE7,
+ 0xF7, 0xFA, 0xFF, 0xFD,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xAF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xF7, 0xBE, 0xBD, 0xBD, 0xBD, 0xBD, 0x7D, 0x7B, 0x7B,
+ 0x7B, 0x7B, 0xFB, 0xAF,
+ 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xCA,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0x6F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xE7, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xCF, 0xFE, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xDF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xEF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFB, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xE7, 0xF2, 0xFC,
+ 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xAE, 0xEF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0x7E, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xEF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xBF, 0xFF, 0xFF, 0xFF, 0xBF, 0xFF,
+ 0xFE, 0xFE, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xDF, 0xEF, 0xDD, 0xFE, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFE, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xAF, 0xEF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBA, 0xFE,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFA, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xF6, 0x9C, 0xBD, 0xBD, 0xBD, 0xBD, 0x7D, 0x7B, 0x7B, 0x7B, 0x7B, 0xFB,
+ 0xAE, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0x7A, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xDF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0x6F, 0xEF, 0xF7, 0xFF, 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xF7, 0xFE,
+ 0xFE, 0xFF, 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xCF, 0xEB,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0x9E, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xEF, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFE, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xEF, 0xCB, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFD,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xBE, 0xFD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xEF,
+ 0xEF, 0xFF, 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFB, 0xAF, 0x7F, 0xFF,
+ 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xEF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xBF, 0xFF,
+ 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xAE,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xFA, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0x7F, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xF7, 0xBC, 0xBD,
+ 0xBD, 0xBD, 0xBD, 0x7D, 0x7B, 0x7B, 0x7B, 0x7B, 0xFB, 0xAF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xFA, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0x7F,
+ 0xAF, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF,
+ 0xFE, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xCF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFB, 0xFF,
+ 0xFF, 0xFF, 0xEF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xBF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xEF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFB, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xFE, 0xFF, 0x9F, 0x9F,
+ 0x9F, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0xFF, 0xEF, 0xDF, 0xDF, 0xDF, 0xDF, 0xCF, 0xB7, 0xBF, 0xBF,
+ 0xBF, 0xBF, 0xFF, 0xBC,
+ 0xB9, 0x9D, 0xBD, 0xBD, 0x7D, 0x7B, 0x7B, 0x7B, 0x7B, 0xFB, 0xEF, 0xD7,
+ 0xF5, 0xF3, 0xF1, 0xD1,
+ 0x65, 0xE3, 0xE3, 0xE3, 0xA3, 0xFF, 0xFE, 0x7F, 0xFE, 0xDE, 0xDE, 0xFF,
+ 0xBD, 0xBD, 0xBD, 0xBD,
+ 0xDF, 0xEF, 0xFB, 0xF7, 0xF3, 0xF3, 0xF3, 0xE7, 0xE7, 0xE7, 0xE7, 0xE7,
+ 0xFB, 0xFE, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
+
+ };
diff --git a/linux/pcmcia-cs/clients/pcnet_cs.c b/linux/pcmcia-cs/clients/pcnet_cs.c
new file mode 100644
index 0000000..8b3c1ec
--- /dev/null
+++ b/linux/pcmcia-cs/clients/pcnet_cs.c
@@ -0,0 +1,1702 @@
+/*======================================================================
+
+ A PCMCIA ethernet driver for NS8390-based cards
+
+ This driver supports the D-Link DE-650 and Linksys EthernetCard
+ cards, the newer D-Link and Linksys combo cards, Accton EN2212
+ cards, the RPTI EP400, and the PreMax PE-200 in non-shared-memory
+ mode, and the IBM Credit Card Adapter, the NE4100, the Thomas
+ Conrad ethernet card, and the Kingston KNE-PCM/x in shared-memory
+ mode. It will also handle the Socket EA card in either mode.
+
+ Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
+
+ pcnet_cs.c 1.153 2003/11/09 18:53:09
+
+ The network driver code is based on Donald Becker's NE2000 code:
+
+ Written 1992,1993 by Donald Becker.
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used and
+ distributed according to the terms of the GNU General Public License,
+ incorporated herein by reference.
+ Donald Becker may be reached at becker@scyld.com
+
+ Based also on Keith Moore's changes to Don Becker's code, for IBM
+ CCAE support. Drivers merged back together, and shared-memory
+ Socket EA support added, by Ken Raeburn, September 1995.
+
+======================================================================*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+
+#include <linux/netdevice.h>
+#include <../drivers/net/8390.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ciscode.h>
+#include <pcmcia/ds.h>
+#include <pcmcia/cisreg.h>
+
+#define PCNET_CMD 0x00
+#define PCNET_DATAPORT 0x10 /* NatSemi-defined port window offset. */
+#define PCNET_RESET 0x1f /* Issue a read to reset, a write to clear. */
+#define PCNET_MISC 0x18 /* For IBM CCAE and Socket EA cards */
+
+#define PCNET_START_PG 0x40 /* First page of TX buffer */
+#define PCNET_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+/* Socket EA cards have a larger packet buffer */
+#define SOCKET_START_PG 0x01
+#define SOCKET_STOP_PG 0xff
+
+#define PCNET_RDC_TIMEOUT (2*HZ/100) /* Max wait in jiffies for Tx RDC */
+
+static char *if_names[] = { "auto", "10baseT", "10base2"};
+
+#ifdef PCMCIA_DEBUG
+static int pc_debug = PCMCIA_DEBUG;
+MODULE_PARM(pc_debug, "i");
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
+static char *version =
+"pcnet_cs.c 1.153 2003/11/09 18:53:09 (David Hinds)";
+#else
+#define DEBUG(n, args...)
+#endif
+
+/*====================================================================*/
+
+/* Module parameters */
+
+MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
+MODULE_DESCRIPTION("NE2000 compatible PCMCIA ethernet driver");
+MODULE_LICENSE("GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; MODULE_PARM(n, "i")
+
+/* Bit map of interrupts to choose from */
+INT_MODULE_PARM(irq_mask, 0xdeb8);
+static int irq_list[4] = { -1 };
+MODULE_PARM(irq_list, "1-4i");
+
+INT_MODULE_PARM(if_port, 1); /* Transceiver type */
+INT_MODULE_PARM(use_big_buf, 1); /* use 64K packet buffer? */
+INT_MODULE_PARM(mem_speed, 0); /* shared mem speed, in ns */
+INT_MODULE_PARM(delay_output, 0); /* pause after xmit? */
+INT_MODULE_PARM(delay_time, 4); /* in usec */
+INT_MODULE_PARM(use_shmem, -1); /* use shared memory? */
+INT_MODULE_PARM(full_duplex, 0); /* full duplex? */
+
+/* Ugh! Let the user hardwire the hardware address for queer cards */
+static int hw_addr[6] = { 0, /* ... */ };
+MODULE_PARM(hw_addr, "6i");
+
+/*====================================================================*/
+
+static void mii_phy_probe(struct net_device *dev);
+static void pcnet_config(dev_link_t *link);
+static void pcnet_release(u_long arg);
+static int pcnet_event(event_t event, int priority,
+ event_callback_args_t *args);
+static int pcnet_open(struct net_device *dev);
+static int pcnet_close(struct net_device *dev);
+static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void ei_irq_wrapper(int irq, void *dev_id, struct pt_regs *regs);
+static void ei_watchdog(u_long arg);
+static void pcnet_reset_8390(struct net_device *dev);
+static int set_config(struct net_device *dev, struct ifmap *map);
+static int setup_shmem_window(dev_link_t *link, int start_pg,
+ int stop_pg, int cm_offset);
+static int setup_dma_config(dev_link_t *link, int start_pg,
+ int stop_pg);
+
+static dev_link_t *pcnet_attach(void);
+static void pcnet_detach(dev_link_t *);
+
+static dev_info_t dev_info = "pcnet_cs";
+static dev_link_t *dev_list;
+
+/*====================================================================*/
+
+typedef struct hw_info_t {
+ u_int offset;
+ u_char a0, a1, a2;
+ u_int flags;
+} hw_info_t;
+
+#define DELAY_OUTPUT 0x01
+#define HAS_MISC_REG 0x02
+#define USE_BIG_BUF 0x04
+#define HAS_IBM_MISC 0x08
+#define IS_DL10019 0x10
+#define IS_DL10022 0x20
+#define HAS_MII 0x40
+#define USE_SHMEM 0x80 /* autodetected */
+
+#define AM79C9XX_HOME_PHY 0x00006B90 /* HomePNA PHY */
+#define AM79C9XX_ETH_PHY 0x00006B70 /* 10baseT PHY */
+#define MII_PHYID_REV_MASK 0xfffffff0
+#define MII_PHYID_REG1 0x02
+#define MII_PHYID_REG2 0x03
+
+static hw_info_t hw_info[] = {
+ { /* Accton EN2212 */ 0x0ff0, 0x00, 0x00, 0xe8, DELAY_OUTPUT },
+ { /* Allied Telesis LA-PCM */ 0x0ff0, 0x00, 0x00, 0xf4, 0 },
+ { /* APEX MultiCard */ 0x03f4, 0x00, 0x20, 0xe5, 0 },
+ { /* ASANTE FriendlyNet */ 0x4910, 0x00, 0x00, 0x94,
+ DELAY_OUTPUT | HAS_IBM_MISC },
+ { /* Danpex EN-6200P2 */ 0x0110, 0x00, 0x40, 0xc7, 0 },
+ { /* DataTrek NetCard */ 0x0ff0, 0x00, 0x20, 0xe8, 0 },
+ { /* Dayna CommuniCard E */ 0x0110, 0x00, 0x80, 0x19, 0 },
+ { /* D-Link DE-650 */ 0x0040, 0x00, 0x80, 0xc8, 0 },
+ { /* EP-210 Ethernet */ 0x0110, 0x00, 0x40, 0x33, 0 },
+ { /* EP4000 Ethernet */ 0x01c0, 0x00, 0x00, 0xb4, 0 },
+ { /* Epson EEN10B */ 0x0ff0, 0x00, 0x00, 0x48,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* ELECOM Laneed LD-CDWA */ 0xb8, 0x08, 0x00, 0x42, 0 },
+ { /* Hypertec Ethernet */ 0x01c0, 0x00, 0x40, 0x4c, 0 },
+ { /* IBM CCAE */ 0x0ff0, 0x08, 0x00, 0x5a,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* IBM CCAE */ 0x0ff0, 0x00, 0x04, 0xac,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* IBM CCAE */ 0x0ff0, 0x00, 0x06, 0x29,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* IBM FME */ 0x0374, 0x08, 0x00, 0x5a,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* IBM FME */ 0x0374, 0x00, 0x04, 0xac,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* Kansai KLA-PCM/T */ 0x0ff0, 0x00, 0x60, 0x87,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* NSC DP83903 */ 0x0374, 0x08, 0x00, 0x17,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* NSC DP83903 */ 0x0374, 0x00, 0xc0, 0xa8,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* NSC DP83903 */ 0x0374, 0x00, 0xa0, 0xb0,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* NSC DP83903 */ 0x0198, 0x00, 0x20, 0xe0,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* I-O DATA PCLA/T */ 0x0ff0, 0x00, 0xa0, 0xb0, 0 },
+ { /* Katron PE-520 */ 0x0110, 0x00, 0x40, 0xf6, 0 },
+ { /* Kingston KNE-PCM/x */ 0x0ff0, 0x00, 0xc0, 0xf0,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* Kingston KNE-PCM/x */ 0x0ff0, 0xe2, 0x0c, 0x0f,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* Kingston KNE-PC2 */ 0x0180, 0x00, 0xc0, 0xf0, 0 },
+ { /* Maxtech PCN2000 */ 0x5000, 0x00, 0x00, 0xe8, 0 },
+ { /* NDC Instant-Link */ 0x003a, 0x00, 0x80, 0xc6, 0 },
+ { /* NE2000 Compatible */ 0x0ff0, 0x00, 0xa0, 0x0c, 0 },
+ { /* Network General Sniffer */ 0x0ff0, 0x00, 0x00, 0x65,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* Panasonic VEL211 */ 0x0ff0, 0x00, 0x80, 0x45,
+ HAS_MISC_REG | HAS_IBM_MISC },
+ { /* PreMax PE-200 */ 0x07f0, 0x00, 0x20, 0xe0, 0 },
+ { /* RPTI EP400 */ 0x0110, 0x00, 0x40, 0x95, 0 },
+ { /* SCM Ethernet */ 0x0ff0, 0x00, 0x20, 0xcb, 0 },
+ { /* Socket EA */ 0x4000, 0x00, 0xc0, 0x1b,
+ DELAY_OUTPUT | HAS_MISC_REG | USE_BIG_BUF },
+ { /* Socket LP-E CF+ */ 0x01c0, 0x00, 0xc0, 0x1b, 0 },
+ { /* SuperSocket RE450T */ 0x0110, 0x00, 0xe0, 0x98, 0 },
+ { /* Volktek NPL-402CT */ 0x0060, 0x00, 0x40, 0x05, 0 },
+ { /* NEC PC-9801N-J12 */ 0x0ff0, 0x00, 0x00, 0x4c, 0 },
+ { /* PCMCIA Technology OEM */ 0x01c8, 0x00, 0xa0, 0x0c, 0 }
+};
+
+#define NR_INFO (sizeof(hw_info)/sizeof(hw_info_t))
+
+static hw_info_t default_info = { 0, 0, 0, 0, 0 };
+static hw_info_t dl10019_info = { 0, 0, 0, 0, IS_DL10019|HAS_MII };
+static hw_info_t dl10022_info = { 0, 0, 0, 0, IS_DL10022|HAS_MII };
+
+typedef struct pcnet_dev_t {
+ struct net_device dev; /* so &dev == &pcnet_dev_t */
+ dev_link_t link;
+ dev_node_t node;
+ u_int flags;
+ caddr_t base;
+ struct timer_list watchdog;
+ int stale, fast_poll;
+ u_char phy_id;
+ u_char eth_phy, pna_phy;
+ u_short link_status;
+ u_long mii_reset;
+} pcnet_dev_t;
+
+/*======================================================================
+
+ This bit of code is used to avoid unregistering network devices
+ at inappropriate times. 2.2 and later kernels are fairly picky
+ about when this can happen.
+
+======================================================================*/
+
+static void flush_stale_links(void)
+{
+ dev_link_t *link, *next;
+ for (link = dev_list; link; link = next) {
+ next = link->next;
+ if (link->state & DEV_STALE_LINK)
+ pcnet_detach(link);
+ }
+}
+
+/*====================================================================*/
+
+static void cs_error(client_handle_t handle, int func, int ret)
+{
+ error_info_t err = { func, ret };
+ CardServices(ReportError, handle, &err);
+}
+
+/*======================================================================
+
+ We never need to do anything when a pcnet device is "initialized"
+ by the net software, because we only register already-found cards.
+
+======================================================================*/
+
+static int pcnet_init(struct net_device *dev)
+{
+ return 0;
+}
+
+/*======================================================================
+
+ pcnet_attach() creates an "instance" of the driver, allocating
+ local data structures for one device. The device is registered
+ with Card Services.
+
+======================================================================*/
+
+static dev_link_t *pcnet_attach(void)
+{
+ pcnet_dev_t *info;
+ dev_link_t *link;
+ struct net_device *dev;
+ client_reg_t client_reg;
+ int i, ret;
+
+ DEBUG(0, "pcnet_attach()\n");
+ flush_stale_links();
+
+ /* Create new ethernet device */
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) return NULL;
+ memset(info, 0, sizeof(*info));
+ link = &info->link; dev = &info->dev;
+ link->priv = info;
+
+ init_timer(&link->release);
+ link->release.function = &pcnet_release;
+ link->release.data = (u_long)link;
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
+ link->irq.IRQInfo1 = IRQ_INFO2_VALID|IRQ_LEVEL_ID;
+ if (irq_list[0] == -1)
+ link->irq.IRQInfo2 = irq_mask;
+ else
+ for (i = 0; i < 4; i++)
+ link->irq.IRQInfo2 |= 1 << irq_list[i];
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+
+ ethdev_init(dev);
+ init_dev_name(dev, info->node);
+ dev->init = &pcnet_init;
+ dev->open = &pcnet_open;
+ dev->stop = &pcnet_close;
+ dev->set_config = &set_config;
+
+ /* Register with Card Services */
+ link->next = dev_list;
+ dev_list = link;
+ client_reg.dev_info = &dev_info;
+ client_reg.Attributes = INFO_IO_CLIENT | INFO_CARD_SHARE;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &pcnet_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ ret = CardServices(RegisterClient, &link->handle, &client_reg);
+ if (ret != CS_SUCCESS) {
+ cs_error(link->handle, RegisterClient, ret);
+ pcnet_detach(link);
+ return NULL;
+ }
+
+ return link;
+} /* pcnet_attach */
+
+/*======================================================================
+
+ This deletes a driver "instance". The device is de-registered
+ with Card Services. If it has been released, all local data
+ structures are freed. Otherwise, the structures will be freed
+ when the device is released.
+
+======================================================================*/
+
+static void pcnet_detach(dev_link_t *link)
+{
+ pcnet_dev_t *info = link->priv;
+ dev_link_t **linkp;
+
+ DEBUG(0, "pcnet_detach(0x%p)\n", link);
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link) break;
+ if (*linkp == NULL)
+ return;
+
+ del_timer(&link->release);
+ if (link->state & DEV_CONFIG) {
+ pcnet_release((u_long)link);
+ if (link->state & DEV_STALE_CONFIG) {
+ link->state |= DEV_STALE_LINK;
+ return;
+ }
+ }
+
+ if (link->handle)
+ CardServices(DeregisterClient, link->handle);
+
+ /* Unlink device structure, free bits */
+ *linkp = link->next;
+ if (link->dev)
+ unregister_netdev(&info->dev);
+ kfree(info);
+
+} /* pcnet_detach */
+
+/*======================================================================
+
+ This probes for a card's hardware address, for card types that
+ encode this information in their CIS.
+
+======================================================================*/
+
+static hw_info_t *get_hwinfo(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ win_req_t req;
+ memreq_t mem;
+ u_char *base, *virt;
+ int i, j;
+
+ /* Allocate a small memory window */
+ req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
+ req.Base = 0; req.Size = 0;
+ req.AccessSpeed = 0;
+ link->win = (window_handle_t)link->handle;
+ i = CardServices(RequestWindow, &link->win, &req);
+ if (i != CS_SUCCESS) {
+ cs_error(link->handle, RequestWindow, i);
+ return NULL;
+ }
+
+ virt = ioremap(req.Base, req.Size);
+ mem.Page = 0;
+ for (i = 0; i < NR_INFO; i++) {
+ mem.CardOffset = hw_info[i].offset & ~(req.Size-1);
+ CardServices(MapMemPage, link->win, &mem);
+ base = &virt[hw_info[i].offset & (req.Size-1)];
+ if ((readb(base+0) == hw_info[i].a0) &&
+ (readb(base+2) == hw_info[i].a1) &&
+ (readb(base+4) == hw_info[i].a2))
+ break;
+ }
+ if (i < NR_INFO) {
+ for (j = 0; j < 6; j++)
+ dev->dev_addr[j] = readb(base + (j<<1));
+ }
+
+ iounmap(virt);
+ j = CardServices(ReleaseWindow, link->win);
+ if (j != CS_SUCCESS)
+ cs_error(link->handle, ReleaseWindow, j);
+ return (i < NR_INFO) ? hw_info+i : NULL;
+} /* get_hwinfo */
+
+/*======================================================================
+
+ This probes for a card's hardware address by reading the PROM.
+ It checks the address against a list of known types, then falls
+ back to a simple NE2000 clone signature check.
+
+======================================================================*/
+
+static hw_info_t *get_prom(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ u_char prom[32];
+ int i, j;
+
+ /* This is lifted straight from drivers/net/ne.c */
+ struct {
+ u_char value, offset;
+ } program_seq[] = {
+ {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/
+ {0x48, EN0_DCFG}, /* Set byte-wide (0x48) access. */
+ {0x00, EN0_RCNTLO}, /* Clear the count regs. */
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_IMR}, /* Mask completion irq. */
+ {0xFF, EN0_ISR},
+ {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */
+ {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */
+ {32, EN0_RCNTLO},
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */
+ {0x00, EN0_RSARHI},
+ {E8390_RREAD+E8390_START, E8390_CMD},
+ };
+
+ pcnet_reset_8390(dev);
+ mdelay(10);
+
+ for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++)
+ outb_p(program_seq[i].value, ioaddr + program_seq[i].offset);
+
+ for (i = 0; i < 32; i++)
+ prom[i] = inb(ioaddr + PCNET_DATAPORT);
+ for (i = 0; i < NR_INFO; i++) {
+ if ((prom[0] == hw_info[i].a0) &&
+ (prom[2] == hw_info[i].a1) &&
+ (prom[4] == hw_info[i].a2))
+ break;
+ }
+ if ((i < NR_INFO) || ((prom[28] == 0x57) && (prom[30] == 0x57))) {
+ for (j = 0; j < 6; j++)
+ dev->dev_addr[j] = prom[j<<1];
+ return (i < NR_INFO) ? hw_info+i : &default_info;
+ }
+ return NULL;
+} /* get_prom */
+
+/*======================================================================
+
+ For DL10019 based cards, like the Linksys EtherFast
+
+======================================================================*/
+
+static hw_info_t *get_dl10019(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ int i;
+ u_char sum;
+
+ for (sum = 0, i = 0x14; i < 0x1c; i++)
+ sum += inb_p(dev->base_addr + i);
+ if (sum != 0xff)
+ return NULL;
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = inb_p(dev->base_addr + 0x14 + i);
+ i = inb(dev->base_addr + 0x1f);
+ return ((i == 0x91)||(i == 0x99)) ? &dl10022_info : &dl10019_info;
+}
+
+/*======================================================================
+
+ For Asix AX88190 based cards
+
+======================================================================*/
+
+static hw_info_t *get_ax88190(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ int i, j;
+
+ /* Not much of a test, but the alternatives are messy */
+ if (link->conf.ConfigBase != 0x03c0)
+ return NULL;
+
+ outb_p(0x01, ioaddr + EN0_DCFG); /* Set word-wide access. */
+ outb_p(0x00, ioaddr + EN0_RSARLO); /* DMA starting at 0x0400. */
+ outb_p(0x04, ioaddr + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, ioaddr + E8390_CMD);
+
+ for (i = 0; i < 6; i += 2) {
+ j = inw(ioaddr + PCNET_DATAPORT);
+ dev->dev_addr[i] = j & 0xff;
+ dev->dev_addr[i+1] = j >> 8;
+ }
+ printk(KERN_NOTICE "pcnet_cs: this is an AX88190 card!\n");
+ printk(KERN_NOTICE "pcnet_cs: use axnet_cs instead.\n");
+ return NULL;
+}
+
+/*======================================================================
+
+ This should be totally unnecessary... but when we can't figure
+ out the hardware address any other way, we'll let the user hard
+ wire it when the module is initialized.
+
+======================================================================*/
+
+static hw_info_t *get_hwired(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ int i;
+
+ for (i = 0; i < 6; i++)
+ if (hw_addr[i] != 0) break;
+ if (i == 6)
+ return NULL;
+
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = hw_addr[i];
+
+ return &default_info;
+} /* get_hwired */
+
+/*======================================================================
+
+ pcnet_config() is scheduled to run after a CARD_INSERTION event
+ is received, to configure the PCMCIA socket, and to make the
+ ethernet device available to the system.
+
+======================================================================*/
+
+#define CS_CHECK(fn, args...) \
+while ((last_ret=CardServices(last_fn=(fn), args))!=0) goto cs_failed
+
+#define CFG_CHECK(fn, args...) \
+if (CardServices(fn, args) != 0) goto next_entry
+
+static int try_io_port(dev_link_t *link)
+{
+ int j, ret;
+ if (link->io.NumPorts1 == 32) {
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
+ if (link->io.NumPorts2 > 0) {
+ /* for master/slave multifunction cards */
+ link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
+ link->irq.Attributes =
+ IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
+ }
+ } else {
+ /* This should be two 16-port windows */
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
+ link->io.Attributes2 = IO_DATA_PATH_WIDTH_16;
+ }
+ if (link->io.BasePort1 == 0) {
+ link->io.IOAddrLines = 16;
+ for (j = 0; j < 0x400; j += 0x20) {
+ link->io.BasePort1 = j ^ 0x300;
+ link->io.BasePort2 = (j ^ 0x300) + 0x10;
+ ret = CardServices(RequestIO, link->handle, &link->io);
+ if (ret == CS_SUCCESS) return ret;
+ }
+ return ret;
+ } else {
+ return CardServices(RequestIO, link->handle, &link->io);
+ }
+}
+
+static void pcnet_config(dev_link_t *link)
+{
+ client_handle_t handle = link->handle;
+ pcnet_dev_t *info = link->priv;
+ struct net_device *dev = &info->dev;
+ tuple_t tuple;
+ cisparse_t parse;
+ int i, last_ret, last_fn, start_pg, stop_pg, cm_offset;
+ int manfid = 0, prodid = 0, has_shmem = 0;
+ u_short buf[64];
+ config_info_t conf;
+ hw_info_t *hw_info;
+
+ DEBUG(0, "pcnet_config(0x%p)\n", link);
+
+ tuple.Attributes = 0;
+ tuple.TupleData = (cisdata_t *)buf;
+ tuple.TupleDataMax = sizeof(buf);
+ tuple.TupleOffset = 0;
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ CS_CHECK(GetFirstTuple, handle, &tuple);
+ CS_CHECK(GetTupleData, handle, &tuple);
+ CS_CHECK(ParseTuple, handle, &tuple, &parse);
+ link->conf.ConfigBase = parse.config.base;
+ link->conf.Present = parse.config.rmask[0];
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ /* Look up current Vcc */
+ CS_CHECK(GetConfigurationInfo, handle, &conf);
+ link->conf.Vcc = conf.Vcc;
+
+ tuple.DesiredTuple = CISTPL_MANFID;
+ tuple.Attributes = TUPLE_RETURN_COMMON;
+ if ((CardServices(GetFirstTuple, handle, &tuple) == CS_SUCCESS) &&
+ (CardServices(GetTupleData, handle, &tuple) == CS_SUCCESS)) {
+ manfid = le16_to_cpu(buf[0]);
+ prodid = le16_to_cpu(buf[1]);
+ }
+
+ tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+ tuple.Attributes = 0;
+ CS_CHECK(GetFirstTuple, handle, &tuple);
+ while (last_ret == CS_SUCCESS) {
+ cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
+ cistpl_io_t *io = &(parse.cftable_entry.io);
+
+ CFG_CHECK(GetTupleData, handle, &tuple);
+ CFG_CHECK(ParseTuple, handle, &tuple, &parse);
+ if ((cfg->index == 0) || (cfg->io.nwin == 0))
+ goto next_entry;
+
+ link->conf.ConfigIndex = cfg->index;
+ /* For multifunction cards, by convention, we configure the
+ network function with window 0, and serial with window 1 */
+ if (io->nwin > 1) {
+ i = (io->win[1].len > io->win[0].len);
+ link->io.BasePort2 = io->win[1-i].base;
+ link->io.NumPorts2 = io->win[1-i].len;
+ } else {
+ i = link->io.NumPorts2 = 0;
+ }
+ has_shmem = ((cfg->mem.nwin == 1) &&
+ (cfg->mem.win[0].len >= 0x4000));
+ link->io.BasePort1 = io->win[i].base;
+ link->io.NumPorts1 = io->win[i].len;
+ link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
+ if (link->io.NumPorts1 + link->io.NumPorts2 >= 32) {
+ last_ret = try_io_port(link);
+ if (last_ret == CS_SUCCESS) break;
+ }
+ next_entry:
+ last_ret = CardServices(GetNextTuple, handle, &tuple);
+ }
+ if (last_ret != CS_SUCCESS) {
+ cs_error(handle, RequestIO, last_ret);
+ goto failed;
+ }
+
+ CS_CHECK(RequestIRQ, handle, &link->irq);
+
+ if (link->io.NumPorts2 == 8) {
+ link->conf.Attributes |= CONF_ENABLE_SPKR;
+ link->conf.Status = CCSR_AUDIO_ENA;
+ }
+ if ((manfid == MANFID_IBM) &&
+ (prodid == PRODID_IBM_HOME_AND_AWAY))
+ link->conf.ConfigIndex |= 0x10;
+
+ CS_CHECK(RequestConfiguration, handle, &link->conf);
+ dev->irq = link->irq.AssignedIRQ;
+ dev->base_addr = link->io.BasePort1;
+ if (info->flags & HAS_MISC_REG) {
+ if ((if_port == 1) || (if_port == 2))
+ dev->if_port = if_port;
+ else
+ printk(KERN_NOTICE "pcnet_cs: invalid if_port requested\n");
+ } else {
+ dev->if_port = 0;
+ }
+ if (register_netdev(dev) != 0) {
+ printk(KERN_NOTICE "pcnet_cs: register_netdev() failed\n");
+ goto failed;
+ }
+
+ hw_info = get_hwinfo(link);
+ if (hw_info == NULL)
+ hw_info = get_prom(link);
+ if (hw_info == NULL)
+ hw_info = get_dl10019(link);
+ if (hw_info == NULL)
+ hw_info = get_ax88190(link);
+ if (hw_info == NULL)
+ hw_info = get_hwired(link);
+
+ if (hw_info == NULL) {
+ printk(KERN_NOTICE "pcnet_cs: unable to read hardware net"
+ " address for io base %#3lx\n", dev->base_addr);
+ unregister_netdev(dev);
+ goto failed;
+ }
+
+ info->flags = hw_info->flags;
+ /* Check for user overrides */
+ info->flags |= (delay_output) ? DELAY_OUTPUT : 0;
+ if ((manfid == MANFID_SOCKET) &&
+ ((prodid == PRODID_SOCKET_LPE) ||
+ (prodid == PRODID_SOCKET_LPE_CF) ||
+ (prodid == PRODID_SOCKET_EIO)))
+ info->flags &= ~USE_BIG_BUF;
+ if (!use_big_buf)
+ info->flags &= ~USE_BIG_BUF;
+
+ if (info->flags & USE_BIG_BUF) {
+ start_pg = SOCKET_START_PG;
+ stop_pg = SOCKET_STOP_PG;
+ cm_offset = 0x10000;
+ } else {
+ start_pg = PCNET_START_PG;
+ stop_pg = PCNET_STOP_PG;
+ cm_offset = 0;
+ }
+
+ /* has_shmem is ignored if use_shmem != -1 */
+ if ((use_shmem == 0) || (!has_shmem && (use_shmem == -1)) ||
+ (setup_shmem_window(link, start_pg, stop_pg, cm_offset) != 0))
+ setup_dma_config(link, start_pg, stop_pg);
+
+ ei_status.name = "NE2000";
+ ei_status.word16 = 1;
+ ei_status.reset_8390 = &pcnet_reset_8390;
+
+ copy_dev_name(info->node, dev);
+ link->dev = &info->node;
+
+ if (info->flags & (IS_DL10019|IS_DL10022)) {
+ u_char id = inb(dev->base_addr + 0x1a);
+ dev->do_ioctl = &ei_ioctl;
+ mii_phy_probe(dev);
+ if ((id == 0x30) && !info->pna_phy && (info->eth_phy == 4))
+ info->eth_phy = 0;
+ printk(KERN_INFO "%s: NE2000 (DL100%d rev %02x): ",
+ dev->name, ((info->flags & IS_DL10022) ? 22 : 19), id);
+ if (info->pna_phy)
+ printk("PNA, ");
+ } else
+ printk(KERN_INFO "%s: NE2000 Compatible: ", dev->name);
+ printk("io %#3lx, irq %d,", dev->base_addr, dev->irq);
+ if (info->flags & USE_SHMEM)
+ printk (" mem %#5lx,", dev->mem_start);
+ if (info->flags & HAS_MISC_REG)
+ printk(" %s xcvr,", if_names[dev->if_port]);
+ printk(" hw_addr ");
+ for (i = 0; i < 6; i++)
+ printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n"));
+ link->state &= ~DEV_CONFIG_PENDING;
+ return;
+
+cs_failed:
+ cs_error(link->handle, last_fn, last_ret);
+failed:
+ pcnet_release((u_long)link);
+ link->state &= ~DEV_CONFIG_PENDING;
+ return;
+} /* pcnet_config */
+
+/*======================================================================
+
+ After a card is removed, pcnet_release() will unregister the net
+ device, and release the PCMCIA configuration. If the device is
+ still open, this will be postponed until it is closed.
+
+======================================================================*/
+
+static void pcnet_release(u_long arg)
+{
+ dev_link_t *link = (dev_link_t *)arg;
+ pcnet_dev_t *info = link->priv;
+
+ DEBUG(0, "pcnet_release(0x%p)\n", link);
+
+ if (link->open) {
+ DEBUG(1, "pcnet_cs: release postponed, '%s' still open\n",
+ info->node.dev_name);
+ link->state |= DEV_STALE_CONFIG;
+ return;
+ }
+
+ if (info->flags & USE_SHMEM) {
+ iounmap(info->base);
+ CardServices(ReleaseWindow, link->win);
+ }
+ CardServices(ReleaseConfiguration, link->handle);
+ CardServices(ReleaseIO, link->handle, &link->io);
+ CardServices(ReleaseIRQ, link->handle, &link->irq);
+
+ link->state &= ~DEV_CONFIG;
+
+} /* pcnet_release */
+
+/*======================================================================
+
+ The card status event handler. Mostly, this schedules other
+ stuff to run after an event is received. A CARD_REMOVAL event
+ also sets some flags to discourage the net drivers from trying
+ to talk to the card any more.
+
+======================================================================*/
+
+static int pcnet_event(event_t event, int priority,
+ event_callback_args_t *args)
+{
+ dev_link_t *link = args->client_data;
+ pcnet_dev_t *info = link->priv;
+
+ DEBUG(2, "pcnet_event(0x%06x)\n", event);
+
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG) {
+ netif_device_detach(&info->dev);
+ mod_timer(&link->release, jiffies + HZ/20);
+ }
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ pcnet_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if (link->state & DEV_CONFIG) {
+ if (link->open)
+ netif_device_detach(&info->dev);
+ CardServices(ReleaseConfiguration, link->handle);
+ }
+ break;
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ CardServices(RequestConfiguration, link->handle, &link->conf);
+ if (link->open) {
+ pcnet_reset_8390(&info->dev);
+ NS8390_init(&info->dev, 1);
+ netif_device_attach(&info->dev);
+ }
+ }
+ break;
+ }
+ return 0;
+} /* pcnet_event */
+
+/*======================================================================
+
+ MII interface support for DL10019 and DL10022 based cards
+
+ On the DL10019, the MII IO direction bit is 0x10; on the DL10022
+ it is 0x20. Setting both bits seems to work on both card types.
+
+======================================================================*/
+
+#define DLINK_GPIO 0x1c
+#define DLINK_DIAG 0x1d
+#define DLINK_EEPROM 0x1e
+
+#define MDIO_SHIFT_CLK 0x80
+#define MDIO_DATA_OUT 0x40
+#define MDIO_DIR_WRITE 0x30
+#define MDIO_DATA_WRITE0 (MDIO_DIR_WRITE)
+#define MDIO_DATA_WRITE1 (MDIO_DIR_WRITE | MDIO_DATA_OUT)
+#define MDIO_DATA_READ 0x10
+#define MDIO_MASK 0x0f
+
+static void mdio_sync(ioaddr_t addr)
+{
+ int bits, mask = inb(addr) & MDIO_MASK;
+ for (bits = 0; bits < 32; bits++) {
+ outb(mask | MDIO_DATA_WRITE1, addr);
+ outb(mask | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, addr);
+ }
+}
+
+static int mdio_read(ioaddr_t addr, int phy_id, int loc)
+{
+ u_int cmd = (0x06<<10)|(phy_id<<5)|loc;
+ int i, retval = 0, mask = inb(addr) & MDIO_MASK;
+
+ mdio_sync(addr);
+ for (i = 13; i >= 0; i--) {
+ int dat = (cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outb(mask | dat, addr);
+ outb(mask | dat | MDIO_SHIFT_CLK, addr);
+ }
+ for (i = 19; i > 0; i--) {
+ outb(mask, addr);
+ retval = (retval << 1) | ((inb(addr) & MDIO_DATA_READ) != 0);
+ outb(mask | MDIO_SHIFT_CLK, addr);
+ }
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(ioaddr_t addr, int phy_id, int loc, int value)
+{
+ u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value;
+ int i, mask = inb(addr) & MDIO_MASK;
+
+ mdio_sync(addr);
+ for (i = 31; i >= 0; i--) {
+ int dat = (cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outb(mask | dat, addr);
+ outb(mask | dat | MDIO_SHIFT_CLK, addr);
+ }
+ for (i = 1; i >= 0; i--) {
+ outb(mask, addr);
+ outb(mask | MDIO_SHIFT_CLK, addr);
+ }
+}
+
+static void mdio_reset(ioaddr_t addr, int phy_id)
+{
+ outb_p(0x08, addr);
+ outb_p(0x0c, addr);
+ outb_p(0x08, addr);
+ outb_p(0x0c, addr);
+ outb_p(0x00, addr);
+}
+
+/*======================================================================
+
+ EEPROM access routines for DL10019 and DL10022 based cards
+
+======================================================================*/
+
+#define EE_EEP 0x40
+#define EE_ASIC 0x10
+#define EE_CS 0x08
+#define EE_CK 0x04
+#define EE_DO 0x02
+#define EE_DI 0x01
+#define EE_ADOT 0x01 /* DataOut for ASIC */
+#define EE_READ_CMD 0x06
+
+#define DL19FDUPLX 0x0400 /* DL10019 Full duplex mode */
+
+static int read_eeprom(ioaddr_t ioaddr, int location)
+{
+ int i, retval = 0;
+ ioaddr_t ee_addr = ioaddr + DLINK_EEPROM;
+ int read_cmd = location | (EE_READ_CMD << 8);
+
+ outb(0, ee_addr);
+ outb(EE_EEP|EE_CS, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 10; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_DO : 0;
+ outb_p(EE_EEP|EE_CS|dataval, ee_addr);
+ outb_p(EE_EEP|EE_CS|dataval|EE_CK, ee_addr);
+ }
+ outb(EE_EEP|EE_CS, ee_addr);
+
+ for (i = 16; i > 0; i--) {
+ outb_p(EE_EEP|EE_CS | EE_CK, ee_addr);
+ retval = (retval << 1) | ((inb(ee_addr) & EE_DI) ? 1 : 0);
+ outb_p(EE_EEP|EE_CS, ee_addr);
+ }
+
+ /* Terminate the EEPROM access. */
+ outb(0, ee_addr);
+ return retval;
+}
+
+/*
+ The internal ASIC registers can be changed by EEPROM READ access
+ with EE_ASIC bit set.
+ In ASIC mode, EE_ADOT is used to output the data to the ASIC.
+*/
+
+static void write_asic(ioaddr_t ioaddr, int location, short asic_data)
+{
+ int i;
+ ioaddr_t ee_addr = ioaddr + DLINK_EEPROM;
+ short dataval;
+ int read_cmd = location | (EE_READ_CMD << 8);
+
+ asic_data |= read_eeprom(ioaddr, location);
+
+ outb(0, ee_addr);
+ outb(EE_ASIC|EE_CS|EE_DI, ee_addr);
+
+ read_cmd = read_cmd >> 1;
+
+ /* Shift the read command bits out. */
+ for (i = 9; i >= 0; i--) {
+ dataval = (read_cmd & (1 << i)) ? EE_DO : 0;
+ outb_p(EE_ASIC|EE_CS|EE_DI|dataval, ee_addr);
+ outb_p(EE_ASIC|EE_CS|EE_DI|dataval|EE_CK, ee_addr);
+ outb_p(EE_ASIC|EE_CS|EE_DI|dataval, ee_addr);
+ }
+ // sync
+ outb(EE_ASIC|EE_CS, ee_addr);
+ outb(EE_ASIC|EE_CS|EE_CK, ee_addr);
+ outb(EE_ASIC|EE_CS, ee_addr);
+
+ for (i = 15; i >= 0; i--) {
+ dataval = (asic_data & (1 << i)) ? EE_ADOT : 0;
+ outb_p(EE_ASIC|EE_CS|dataval, ee_addr);
+ outb_p(EE_ASIC|EE_CS|dataval|EE_CK, ee_addr);
+ outb_p(EE_ASIC|EE_CS|dataval, ee_addr);
+ }
+
+ /* Terminate the ASIC access. */
+ outb(EE_ASIC|EE_DI, ee_addr);
+ outb(EE_ASIC|EE_DI| EE_CK, ee_addr);
+ outb(EE_ASIC|EE_DI, ee_addr);
+
+ outb(0, ee_addr);
+}
+
+/*====================================================================*/
+
+static void set_misc_reg(struct net_device *dev)
+{
+ ioaddr_t nic_base = dev->base_addr;
+ pcnet_dev_t *info = (pcnet_dev_t *)dev;
+ u_char tmp;
+
+ if (info->flags & HAS_MISC_REG) {
+ tmp = inb_p(nic_base + PCNET_MISC) & ~3;
+ if (dev->if_port == 2)
+ tmp |= 1;
+ if (info->flags & USE_BIG_BUF)
+ tmp |= 2;
+ if (info->flags & HAS_IBM_MISC)
+ tmp |= 8;
+ outb_p(tmp, nic_base + PCNET_MISC);
+ }
+ if (info->flags & IS_DL10022) {
+ if (info->flags & HAS_MII) {
+ mdio_reset(nic_base + DLINK_GPIO, info->eth_phy);
+ /* Restart MII autonegotiation */
+ mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x0000);
+ mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x1200);
+ info->mii_reset = jiffies;
+ } else {
+ outb(full_duplex ? 4 : 0, nic_base + DLINK_DIAG);
+ }
+ }
+}
+
+/*====================================================================*/
+
+static void mii_phy_probe(struct net_device *dev)
+{
+ pcnet_dev_t *info = (pcnet_dev_t *)dev;
+ ioaddr_t mii_addr = dev->base_addr + DLINK_GPIO;
+ int i;
+ u_int tmp, phyid;
+
+ for (i = 31; i >= 0; i--) {
+ tmp = mdio_read(mii_addr, i, 1);
+ if ((tmp == 0) || (tmp == 0xffff))
+ continue;
+ tmp = mdio_read(mii_addr, i, MII_PHYID_REG1);
+ phyid = tmp << 16;
+ phyid |= mdio_read(mii_addr, i, MII_PHYID_REG2);
+ phyid &= MII_PHYID_REV_MASK;
+ DEBUG(0, "%s: MII at %d is 0x%08x\n", dev->name, i, phyid);
+ if (phyid == AM79C9XX_HOME_PHY) {
+ info->pna_phy = i;
+ } else if (phyid != AM79C9XX_ETH_PHY) {
+ info->eth_phy = i;
+ }
+ }
+}
+
+static int pcnet_open(struct net_device *dev)
+{
+ pcnet_dev_t *info = (pcnet_dev_t *)dev;
+ dev_link_t *link = &info->link;
+
+ DEBUG(2, "pcnet_open('%s')\n", dev->name);
+
+ if (!DEV_OK(link))
+ return -ENODEV;
+
+ link->open++;
+ MOD_INC_USE_COUNT;
+
+ set_misc_reg(dev);
+ request_irq(dev->irq, ei_irq_wrapper, SA_SHIRQ, dev_info, dev);
+
+ info->phy_id = info->eth_phy;
+ info->link_status = 0x00;
+ info->watchdog.function = &ei_watchdog;
+ info->watchdog.data = (u_long)info;
+ info->watchdog.expires = jiffies + HZ;
+ add_timer(&info->watchdog);
+
+ return ei_open(dev);
+} /* pcnet_open */
+
+/*====================================================================*/
+
+static int pcnet_close(struct net_device *dev)
+{
+ pcnet_dev_t *info = (pcnet_dev_t *)dev;
+ dev_link_t *link = &info->link;
+
+ DEBUG(2, "pcnet_close('%s')\n", dev->name);
+
+ ei_close(dev);
+ free_irq(dev->irq, dev);
+
+ link->open--;
+ netif_stop_queue(dev);
+ netif_mark_down(dev);
+ del_timer(&info->watchdog);
+ if (link->state & DEV_STALE_CONFIG)
+ mod_timer(&link->release, jiffies + HZ/20);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+} /* pcnet_close */
+
+/*======================================================================
+
+ Hard reset the card. This used to pause for the same period that
+ a 8390 reset command required, but that shouldn't be necessary.
+
+======================================================================*/
+
+static void pcnet_reset_8390(struct net_device *dev)
+{
+ ioaddr_t nic_base = dev->base_addr;
+ int i;
+
+ ei_status.txing = ei_status.dmaing = 0;
+
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, nic_base + E8390_CMD);
+
+ outb(inb(nic_base + PCNET_RESET), nic_base + PCNET_RESET);
+
+ for (i = 0; i < 100; i++) {
+ if ((inb_p(nic_base+EN0_ISR) & ENISR_RESET) != 0)
+ break;
+ udelay(100);
+ }
+ outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */
+
+ if (i == 100)
+ printk(KERN_ERR "%s: pcnet_reset_8390() did not complete.\n",
+ dev->name);
+ set_misc_reg(dev);
+
+} /* pcnet_reset_8390 */
+
+/*====================================================================*/
+
+static int set_config(struct net_device *dev, struct ifmap *map)
+{
+ pcnet_dev_t *info = (pcnet_dev_t *)dev;
+ if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
+ if (!(info->flags & HAS_MISC_REG))
+ return -EOPNOTSUPP;
+ else if ((map->port < 1) || (map->port > 2))
+ return -EINVAL;
+ dev->if_port = map->port;
+ printk(KERN_INFO "%s: switched to %s port\n",
+ dev->name, if_names[dev->if_port]);
+ NS8390_init(dev, 1);
+ }
+ return 0;
+}
+
+/*====================================================================*/
+
+static void ei_irq_wrapper(int irq, void *dev_id, struct pt_regs *regs)
+{
+ pcnet_dev_t *info = dev_id;
+ info->stale = 0;
+ ei_interrupt(irq, dev_id, regs);
+}
+
+static void ei_watchdog(u_long arg)
+{
+ pcnet_dev_t *info = (pcnet_dev_t *)(arg);
+ struct net_device *dev = &info->dev;
+ ioaddr_t nic_base = dev->base_addr;
+ ioaddr_t mii_addr = nic_base + DLINK_GPIO;
+ u_short link;
+
+ if (!netif_device_present(dev)) goto reschedule;
+
+ /* Check for pending interrupt with expired latency timer: with
+ this, we can limp along even if the interrupt is blocked */
+ outb_p(E8390_NODMA+E8390_PAGE0, nic_base + E8390_CMD);
+ if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) {
+ if (!info->fast_poll)
+ printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
+ ei_irq_wrapper(dev->irq, dev, NULL);
+ info->fast_poll = HZ;
+ }
+ if (info->fast_poll) {
+ info->fast_poll--;
+ info->watchdog.expires = jiffies + 1;
+ add_timer(&info->watchdog);
+ return;
+ }
+
+ if (!(info->flags & HAS_MII))
+ goto reschedule;
+
+ mdio_read(mii_addr, info->phy_id, 1);
+ link = mdio_read(mii_addr, info->phy_id, 1);
+ if (!link || (link == 0xffff)) {
+ if (info->eth_phy) {
+ info->phy_id = info->eth_phy = 0;
+ } else {
+ printk(KERN_INFO "%s: MII is missing!\n", dev->name);
+ info->flags &= ~HAS_MII;
+ }
+ goto reschedule;
+ }
+
+ link &= 0x0004;
+ if (link != info->link_status) {
+ u_short p = mdio_read(mii_addr, info->phy_id, 5);
+ printk(KERN_INFO "%s: %s link beat\n", dev->name,
+ (link) ? "found" : "lost");
+ if (link && (info->flags & IS_DL10022)) {
+ /* Disable collision detection on full duplex links */
+ outb((p & 0x0140) ? 4 : 0, nic_base + DLINK_DIAG);
+ } else if (link && (info->flags & IS_DL10019)) {
+ /* Disable collision detection on full duplex links */
+ write_asic(dev->base_addr, 4, (p & 0x140) ? DL19FDUPLX : 0);
+ }
+ if (link) {
+ if (info->phy_id == info->eth_phy) {
+ if (p)
+ printk(KERN_INFO "%s: autonegotiation complete: "
+ "%sbaseT-%cD selected\n", dev->name,
+ ((p & 0x0180) ? "100" : "10"),
+ ((p & 0x0140) ? 'F' : 'H'));
+ else
+ printk(KERN_INFO "%s: link partner did not "
+ "autonegotiate\n", dev->name);
+ }
+ NS8390_init(dev, 1);
+ }
+ info->link_status = link;
+ }
+ if (info->pna_phy && (jiffies - info->mii_reset > 6*HZ)) {
+ link = mdio_read(mii_addr, info->eth_phy, 1) & 0x0004;
+ if (((info->phy_id == info->pna_phy) && link) ||
+ ((info->phy_id != info->pna_phy) && !link)) {
+ /* isolate this MII and try flipping to the other one */
+ mdio_write(mii_addr, info->phy_id, 0, 0x0400);
+ info->phy_id ^= info->pna_phy ^ info->eth_phy;
+ printk(KERN_INFO "%s: switched to %s transceiver\n", dev->name,
+ (info->phy_id == info->eth_phy) ? "ethernet" : "PNA");
+ mdio_write(mii_addr, info->phy_id, 0,
+ (info->phy_id == info->eth_phy) ? 0x1000 : 0);
+ info->link_status = 0;
+ info->mii_reset = jiffies;
+ }
+ }
+
+reschedule:
+ info->watchdog.expires = jiffies + HZ;
+ add_timer(&info->watchdog);
+}
+
+/*====================================================================*/
+
+static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ pcnet_dev_t *info = (pcnet_dev_t *)dev;
+ u16 *data = (u16 *)&rq->ifr_data;
+ ioaddr_t mii_addr = dev->base_addr + DLINK_GPIO;
+ switch (cmd) {
+ case SIOCDEVPRIVATE:
+ data[0] = info->phy_id;
+ case SIOCDEVPRIVATE+1:
+ data[3] = mdio_read(mii_addr, data[0], data[1] & 0x1f);
+ return 0;
+ case SIOCDEVPRIVATE+2:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ mdio_write(mii_addr, data[0], data[1] & 0x1f, data[2]);
+ return 0;
+ }
+ return -EOPNOTSUPP;
+}
+
+/*====================================================================*/
+
+static void dma_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr,
+ int ring_page)
+{
+ ioaddr_t nic_base = dev->base_addr;
+
+ if (ei_status.dmaing) {
+ printk(KERN_NOTICE "%s: DMAing conflict in dma_block_input."
+ "[DMAstat:%1x][irqlock:%1x]\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+
+ ei_status.dmaing |= 0x01;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + PCNET_CMD);
+ outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
+ outb_p(0, nic_base + EN0_RCNTHI);
+ outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */
+ outb_p(ring_page, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + PCNET_CMD);
+
+ insw(nic_base + PCNET_DATAPORT, hdr,
+ sizeof(struct e8390_pkt_hdr)>>1);
+ /* Fix for big endian systems */
+ hdr->count = le16_to_cpu(hdr->count);
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+/*====================================================================*/
+
+static void dma_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ ioaddr_t nic_base = dev->base_addr;
+ int xfer_count = count;
+ char *buf = skb->data;
+
+#ifdef PCMCIA_DEBUG
+ if ((ei_debug > 4) && (count != 4))
+ printk(KERN_DEBUG "%s: [bi=%d]\n", dev->name, count+4);
+#endif
+ if (ei_status.dmaing) {
+ printk(KERN_NOTICE "%s: DMAing conflict in dma_block_input."
+ "[DMAstat:%1x][irqlock:%1x]\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + PCNET_CMD);
+ outb_p(count & 0xff, nic_base + EN0_RCNTLO);
+ outb_p(count >> 8, nic_base + EN0_RCNTHI);
+ outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
+ outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + PCNET_CMD);
+
+ insw(nic_base + PCNET_DATAPORT,buf,count>>1);
+ if (count & 0x01)
+ buf[count-1] = inb(nic_base + PCNET_DATAPORT), xfer_count++;
+
+ /* This was for the ALPHA version only, but enough people have
+ encountering problems that it is still here. */
+#ifdef PCMCIA_DEBUG
+ if (ei_debug > 4) { /* DMA termination address check... */
+ int addr, tries = 20;
+ do {
+ /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here
+ -- it's broken for Rx on some cards! */
+ int high = inb_p(nic_base + EN0_RSARHI);
+ int low = inb_p(nic_base + EN0_RSARLO);
+ addr = (high << 8) + low;
+ if (((ring_offset + xfer_count) & 0xff) == (addr & 0xff))
+ break;
+ } while (--tries > 0);
+ if (tries <= 0)
+ printk(KERN_NOTICE "%s: RX transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ dev->name, ring_offset + xfer_count, addr);
+ }
+#endif
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+} /* dma_block_input */
+
+/*====================================================================*/
+
+static void dma_block_output(struct net_device *dev, int count,
+ const u_char *buf, const int start_page)
+{
+ ioaddr_t nic_base = dev->base_addr;
+ pcnet_dev_t *info = (pcnet_dev_t *)dev;
+#ifdef PCMCIA_DEBUG
+ int retries = 0;
+#endif
+ u_long dma_start;
+
+#ifdef PCMCIA_DEBUG
+ if (ei_debug > 4)
+ printk(KERN_DEBUG "%s: [bo=%d]\n", dev->name, count);
+#endif
+
+ /* Round the count up for word writes. Do we need to do this?
+ What effect will an odd byte count have on the 8390?
+ I should check someday. */
+ if (count & 0x01)
+ count++;
+ if (ei_status.dmaing) {
+ printk(KERN_NOTICE "%s: DMAing conflict in dma_block_output."
+ "[DMAstat:%1x][irqlock:%1x]\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ /* We should already be in page 0, but to be safe... */
+ outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base+PCNET_CMD);
+
+#ifdef PCMCIA_DEBUG
+ retry:
+#endif
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR);
+
+ /* Now the normal output. */
+ outb_p(count & 0xff, nic_base + EN0_RCNTLO);
+ outb_p(count >> 8, nic_base + EN0_RCNTHI);
+ outb_p(0x00, nic_base + EN0_RSARLO);
+ outb_p(start_page, nic_base + EN0_RSARHI);
+
+ outb_p(E8390_RWRITE+E8390_START, nic_base + PCNET_CMD);
+ outsw(nic_base + PCNET_DATAPORT, buf, count>>1);
+
+ dma_start = jiffies;
+
+#ifdef PCMCIA_DEBUG
+ /* This was for the ALPHA version only, but enough people have
+ encountering problems that it is still here. */
+ if (ei_debug > 4) { /* DMA termination address check... */
+ int addr, tries = 20;
+ do {
+ int high = inb_p(nic_base + EN0_RSARHI);
+ int low = inb_p(nic_base + EN0_RSARLO);
+ addr = (high << 8) + low;
+ if ((start_page << 8) + count == addr)
+ break;
+ } while (--tries > 0);
+ if (tries <= 0) {
+ printk(KERN_NOTICE "%s: Tx packet transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ dev->name, (start_page << 8) + count, addr);
+ if (retries++ == 0)
+ goto retry;
+ }
+ }
+#endif
+
+ while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0)
+ if (jiffies - dma_start > PCNET_RDC_TIMEOUT) {
+ printk(KERN_NOTICE "%s: timeout waiting for Tx RDC.\n",
+ dev->name);
+ pcnet_reset_8390(dev);
+ NS8390_init(dev, 1);
+ break;
+ }
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ if (info->flags & DELAY_OUTPUT)
+ udelay((long)delay_time);
+ ei_status.dmaing &= ~0x01;
+}
+
+/*====================================================================*/
+
+static int setup_dma_config(dev_link_t *link, int start_pg,
+ int stop_pg)
+{
+ struct net_device *dev = link->priv;
+
+ ei_status.tx_start_page = start_pg;
+ ei_status.rx_start_page = start_pg + TX_PAGES;
+ ei_status.stop_page = stop_pg;
+
+ /* set up block i/o functions */
+ ei_status.get_8390_hdr = &dma_get_8390_hdr;
+ ei_status.block_input = &dma_block_input;
+ ei_status.block_output = &dma_block_output;
+
+ return 0;
+}
+
+/*====================================================================*/
+
+static void copyin(u_char *dest, u_char *src, int c)
+{
+ u_short *d = (u_short *)dest, *s = (u_short *)src;
+ int odd;
+
+ if (c <= 0)
+ return;
+ odd = (c & 1); c >>= 1;
+
+ if (c) {
+ do { *d++ = readw_ns(s++); } while (--c);
+ }
+ /* get last byte by fetching a word and masking */
+ if (odd)
+ *((u_char *)d) = readw(s) & 0xff;
+}
+
+static void copyout(u_char *dest, const u_char *src, int c)
+{
+ u_short *d = (u_short *)dest, *s = (u_short *)src;
+ int odd;
+
+ if (c <= 0)
+ return;
+ odd = (c & 1); c >>= 1;
+
+ if (c) {
+ do { writew_ns(*s++, d++); } while (--c);
+ }
+ /* copy last byte doing a read-modify-write */
+ if (odd)
+ writew((readw(d) & 0xff00) | *(u_char *)s, d);
+}
+
+/*====================================================================*/
+
+static void shmem_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr,
+ int ring_page)
+{
+ void *xfer_start = (void *)(dev->rmem_start + (ring_page << 8)
+ - (ei_status.rx_start_page << 8));
+
+ copyin((void *)hdr, xfer_start, sizeof(struct e8390_pkt_hdr));
+ /* Fix for big endian systems */
+ hdr->count = le16_to_cpu(hdr->count);
+}
+
+/*====================================================================*/
+
+static void shmem_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ void *xfer_start = (void *)(dev->rmem_start + ring_offset
+ - (ei_status.rx_start_page << 8));
+ char *buf = skb->data;
+
+ if (xfer_start + count > (void *)dev->rmem_end) {
+ /* We must wrap the input move. */
+ int semi_count = (void*)dev->rmem_end - xfer_start;
+ copyin(buf, xfer_start, semi_count);
+ buf += semi_count;
+ ring_offset = ei_status.rx_start_page << 8;
+ xfer_start = (void *)dev->rmem_start;
+ count -= semi_count;
+ }
+ copyin(buf, xfer_start, count);
+}
+
+/*====================================================================*/
+
+static void shmem_block_output(struct net_device *dev, int count,
+ const u_char *buf, const int start_page)
+{
+ void *shmem = (void *)dev->mem_start + (start_page << 8);
+ shmem -= ei_status.tx_start_page << 8;
+ copyout(shmem, buf, count);
+}
+
+/*====================================================================*/
+
+static int setup_shmem_window(dev_link_t *link, int start_pg,
+ int stop_pg, int cm_offset)
+{
+ struct net_device *dev = link->priv;
+ pcnet_dev_t *info = link->priv;
+ win_req_t req;
+ memreq_t mem;
+ int i, window_size, offset, last_ret, last_fn;
+
+ window_size = (stop_pg - start_pg) << 8;
+ if (window_size > 32 * 1024)
+ window_size = 32 * 1024;
+
+ /* Make sure it's a power of two. */
+ while ((window_size & (window_size - 1)) != 0)
+ window_size += window_size & ~(window_size - 1);
+
+ /* Allocate a memory window */
+ req.Attributes = WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM|WIN_ENABLE;
+ req.Attributes |= WIN_USE_WAIT;
+ req.Base = 0; req.Size = window_size;
+ req.AccessSpeed = mem_speed;
+ link->win = (window_handle_t)link->handle;
+ CS_CHECK(RequestWindow, &link->win, &req);
+
+ mem.CardOffset = (start_pg << 8) + cm_offset;
+ offset = mem.CardOffset % window_size;
+ mem.CardOffset -= offset;
+ mem.Page = 0;
+ CS_CHECK(MapMemPage, link->win, &mem);
+
+ /* Try scribbling on the buffer */
+ info->base = ioremap(req.Base, window_size);
+ for (i = 0; i < (TX_PAGES<<8); i += 2)
+ writew_ns((i>>1), info->base+offset+i);
+ udelay(100);
+ for (i = 0; i < (TX_PAGES<<8); i += 2)
+ if (readw_ns(info->base+offset+i) != (i>>1)) break;
+ pcnet_reset_8390(dev);
+ if (i != (TX_PAGES<<8)) {
+ iounmap(info->base);
+ CardServices(ReleaseWindow, link->win);
+ info->base = NULL; link->win = NULL;
+ goto failed;
+ }
+
+ dev->mem_start = (u_long)info->base + offset;
+ dev->rmem_start = dev->mem_start + (TX_PAGES<<8);
+ dev->mem_end = dev->rmem_end = (u_long)info->base + req.Size;
+
+ ei_status.tx_start_page = start_pg;
+ ei_status.rx_start_page = start_pg + TX_PAGES;
+ ei_status.stop_page = start_pg + ((req.Size - offset) >> 8);
+
+ /* set up block i/o functions */
+ ei_status.get_8390_hdr = &shmem_get_8390_hdr;
+ ei_status.block_input = &shmem_block_input;
+ ei_status.block_output = &shmem_block_output;
+
+ info->flags |= USE_SHMEM;
+ return 0;
+
+cs_failed:
+ cs_error(link->handle, last_fn, last_ret);
+failed:
+ return 1;
+}
+
+/*====================================================================*/
+
+static int __init init_pcnet_cs(void)
+{
+ servinfo_t serv;
+ DEBUG(0, "%s\n", version);
+ CardServices(GetCardServicesInfo, &serv);
+ if (serv.Revision != CS_RELEASE_CODE) {
+ printk(KERN_NOTICE "pcnet_cs: Card Services release "
+ "does not match!\n");
+ return -EINVAL;
+ }
+ register_pccard_driver(&dev_info, &pcnet_attach, &pcnet_detach);
+ return 0;
+}
+
+static void __exit exit_pcnet_cs(void)
+{
+ DEBUG(0, "pcnet_cs: unloading\n");
+ unregister_pccard_driver(&dev_info);
+ while (dev_list != NULL)
+ pcnet_detach(dev_list);
+}
+
+module_init(init_pcnet_cs);
+module_exit(exit_pcnet_cs);
diff --git a/linux/pcmcia-cs/clients/smc91c92_cs.c b/linux/pcmcia-cs/clients/smc91c92_cs.c
new file mode 100644
index 0000000..6921515
--- /dev/null
+++ b/linux/pcmcia-cs/clients/smc91c92_cs.c
@@ -0,0 +1,2135 @@
+/*======================================================================
+
+ A PCMCIA ethernet driver for SMC91c92-based cards.
+
+ This driver supports Megahertz PCMCIA ethernet cards; and
+ Megahertz, Motorola, Ositech, and Psion Dacom ethernet/modem
+ multifunction cards.
+
+ Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
+
+ smc91c92_cs.c 1.123 2003/08/25 15:57:41
+
+ This driver contains code written by Donald Becker
+ (becker@scyld.com), Rowan Hughes (x-csrdh@jcu.edu.au),
+ David Hinds (dahinds@users.sourceforge.net), and Erik Stahlman
+ (erik@vt.edu). Donald wrote the SMC 91c92 code using parts of
+ Erik's SMC 91c94 driver. Rowan wrote a similar driver, and I've
+ incorporated some parts of his driver here. I (Dave) wrote most
+ of the PCMCIA glue code, and the Ositech support code. Kelly
+ Stephens (kstephen@holli.com) added support for the Motorola
+ Mariner, with help from Allen Brost.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License, incorporated herein by reference.
+
+======================================================================*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/crc32.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ciscode.h>
+#include <pcmcia/ds.h>
+
+/* Ositech Seven of Diamonds firmware */
+#include "ositech.h"
+
+/*====================================================================*/
+
+static char *if_names[] = { "auto", "10baseT", "10base2"};
+
+/* Module parameters */
+
+MODULE_DESCRIPTION("SMC 91c92 series PCMCIA ethernet driver");
+MODULE_LICENSE("GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; MODULE_PARM(n, "i")
+
+/*
+ Transceiver/media type.
+ 0 = auto
+ 1 = 10baseT (and autoselect if #define AUTOSELECT),
+ 2 = AUI/10base2,
+*/
+INT_MODULE_PARM(if_port, 0);
+
+/* Bit map of interrupts to choose from. */
+INT_MODULE_PARM(irq_mask, 0xdeb8);
+static int irq_list[4] = { -1 };
+MODULE_PARM(irq_list, "1-4i");
+
+#ifdef PCMCIA_DEBUG
+INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
+static const char *version =
+"smc91c92_cs.c 0.09 1996/8/4 Donald Becker, becker@scyld.com.\n";
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
+#else
+#define DEBUG(n, args...)
+#endif
+
+/*====================================================================*/
+
+/* Operational parameter that usually are not changed. */
+
+/* Time in jiffies before concluding Tx hung */
+#define TX_TIMEOUT ((400*HZ)/1000)
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+#define INTR_WORK 4
+
+/* Times to check the check the chip before concluding that it doesn't
+ currently have room for another Tx packet. */
+#define MEMORY_WAIT_TIME 8
+
+static dev_info_t dev_info = "smc91c92_cs";
+
+static dev_link_t *dev_list;
+
+struct smc_private {
+ dev_link_t link;
+ struct net_device dev;
+ u_short manfid;
+ u_short cardid;
+ struct net_device_stats stats;
+ dev_node_t node;
+ struct sk_buff *saved_skb;
+ int packets_waiting;
+ caddr_t base;
+ u_short cfg;
+ struct timer_list media;
+ int watchdog, tx_err;
+ u_short media_status;
+ u_short fast_poll;
+ u_short link_status;
+ int phy_id;
+ int duplex;
+ int rx_ovrn;
+};
+
+/* Special definitions for Megahertz multifunction cards */
+#define MEGAHERTZ_ISR 0x0380
+
+/* Special function registers for Motorola Mariner */
+#define MOT_LAN 0x0000
+#define MOT_UART 0x0020
+#define MOT_EEPROM 0x20
+
+#define MOT_NORMAL \
+(COR_LEVEL_REQ | COR_FUNC_ENA | COR_ADDR_DECODE | COR_IREQ_ENA)
+
+/* Special function registers for Ositech cards */
+#define OSITECH_AUI_CTL 0x0c
+#define OSITECH_PWRDOWN 0x0d
+#define OSITECH_RESET 0x0e
+#define OSITECH_ISR 0x0f
+#define OSITECH_AUI_PWR 0x0c
+#define OSITECH_RESET_ISR 0x0e
+
+#define OSI_AUI_PWR 0x40
+#define OSI_LAN_PWRDOWN 0x02
+#define OSI_MODEM_PWRDOWN 0x01
+#define OSI_LAN_RESET 0x02
+#define OSI_MODEM_RESET 0x01
+
+/* Symbolic constants for the SMC91c9* series chips, from Erik Stahlman. */
+#define BANK_SELECT 14 /* Window select register. */
+#define SMC_SELECT_BANK(x) { outw(x, ioaddr + BANK_SELECT); }
+
+/* Bank 0 registers. */
+#define TCR 0 /* transmit control register */
+#define TCR_CLEAR 0 /* do NOTHING */
+#define TCR_ENABLE 0x0001 /* if this is 1, we can transmit */
+#define TCR_PAD_EN 0x0080 /* pads short packets to 64 bytes */
+#define TCR_MONCSN 0x0400 /* Monitor Carrier. */
+#define TCR_FDUPLX 0x0800 /* Full duplex mode. */
+#define TCR_NORMAL TCR_ENABLE | TCR_PAD_EN
+
+#define EPH 2 /* Ethernet Protocol Handler report. */
+#define EPH_TX_SUC 0x0001
+#define EPH_SNGLCOL 0x0002
+#define EPH_MULCOL 0x0004
+#define EPH_LTX_MULT 0x0008
+#define EPH_16COL 0x0010
+#define EPH_SQET 0x0020
+#define EPH_LTX_BRD 0x0040
+#define EPH_TX_DEFR 0x0080
+#define EPH_LAT_COL 0x0200
+#define EPH_LOST_CAR 0x0400
+#define EPH_EXC_DEF 0x0800
+#define EPH_CTR_ROL 0x1000
+#define EPH_RX_OVRN 0x2000
+#define EPH_LINK_OK 0x4000
+#define EPH_TX_UNRN 0x8000
+#define MEMINFO 8 /* Memory Information Register */
+#define MEMCFG 10 /* Memory Configuration Register */
+
+/* Bank 1 registers. */
+#define CONFIG 0
+#define CFG_MII_SELECT 0x8000 /* 91C100 only */
+#define CFG_NO_WAIT 0x1000
+#define CFG_FULL_STEP 0x0400
+#define CFG_SET_SQLCH 0x0200
+#define CFG_AUI_SELECT 0x0100
+#define CFG_16BIT 0x0080
+#define CFG_DIS_LINK 0x0040
+#define CFG_STATIC 0x0030
+#define CFG_IRQ_SEL_1 0x0004
+#define CFG_IRQ_SEL_0 0x0002
+#define BASE_ADDR 2
+#define ADDR0 4
+#define GENERAL 10
+#define CONTROL 12
+#define CTL_STORE 0x0001
+#define CTL_RELOAD 0x0002
+#define CTL_EE_SELECT 0x0004
+#define CTL_TE_ENABLE 0x0020
+#define CTL_CR_ENABLE 0x0040
+#define CTL_LE_ENABLE 0x0080
+#define CTL_AUTO_RELEASE 0x0800
+#define CTL_POWERDOWN 0x2000
+
+/* Bank 2 registers. */
+#define MMU_CMD 0
+#define MC_ALLOC 0x20 /* or with number of 256 byte packets */
+#define MC_RESET 0x40
+#define MC_RELEASE 0x80 /* remove and release the current rx packet */
+#define MC_FREEPKT 0xA0 /* Release packet in PNR register */
+#define MC_ENQUEUE 0xC0 /* Enqueue the packet for transmit */
+#define PNR_ARR 2
+#define FIFO_PORTS 4
+#define FP_RXEMPTY 0x8000
+#define POINTER 6
+#define PTR_AUTO_INC 0x0040
+#define PTR_READ 0x2000
+#define PTR_AUTOINC 0x4000
+#define PTR_RCV 0x8000
+#define DATA_1 8
+#define INTERRUPT 12
+#define IM_RCV_INT 0x1
+#define IM_TX_INT 0x2
+#define IM_TX_EMPTY_INT 0x4
+#define IM_ALLOC_INT 0x8
+#define IM_RX_OVRN_INT 0x10
+#define IM_EPH_INT 0x20
+
+#define RCR 4
+enum RxCfg { RxAllMulti = 0x0004, RxPromisc = 0x0002,
+ RxEnable = 0x0100, RxStripCRC = 0x0200};
+#define RCR_SOFTRESET 0x8000 /* resets the chip */
+#define RCR_STRIP_CRC 0x200 /* strips CRC */
+#define RCR_ENABLE 0x100 /* IFF this is set, we can receive packets */
+#define RCR_ALMUL 0x4 /* receive all multicast packets */
+#define RCR_PROMISC 0x2 /* enable promiscuous mode */
+
+/* the normal settings for the RCR register : */
+#define RCR_NORMAL (RCR_STRIP_CRC | RCR_ENABLE)
+#define RCR_CLEAR 0x0 /* set it to a base state */
+#define COUNTER 6
+
+/* BANK 3 -- not the same values as in smc9194! */
+#define MULTICAST0 0
+#define MULTICAST2 2
+#define MULTICAST4 4
+#define MULTICAST6 6
+#define MGMT 8
+#define REVISION 0x0a
+
+/* Transmit status bits. */
+#define TS_SUCCESS 0x0001
+#define TS_16COL 0x0010
+#define TS_LATCOL 0x0200
+#define TS_LOSTCAR 0x0400
+
+/* Receive status bits. */
+#define RS_ALGNERR 0x8000
+#define RS_BADCRC 0x2000
+#define RS_ODDFRAME 0x1000
+#define RS_TOOLONG 0x0800
+#define RS_TOOSHORT 0x0400
+#define RS_MULTICAST 0x0001
+#define RS_ERRORS (RS_ALGNERR | RS_BADCRC | RS_TOOLONG | RS_TOOSHORT)
+
+#define set_bits(v, p) outw(inw(p)|(v), (p))
+#define mask_bits(v, p) outw(inw(p)&(v), (p))
+
+/*====================================================================*/
+
+static dev_link_t *smc91c92_attach(void);
+static void smc91c92_detach(dev_link_t *);
+static void smc91c92_config(dev_link_t *link);
+static void smc91c92_release(u_long arg);
+static int smc91c92_event(event_t event, int priority,
+ event_callback_args_t *args);
+
+static int smc_open(struct net_device *dev);
+static int smc_close(struct net_device *dev);
+static int smc_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void smc_tx_timeout(struct net_device *dev);
+static int smc_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void smc_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void smc_rx(struct net_device *dev);
+static struct net_device_stats *smc_get_stats(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+static int s9k_config(struct net_device *dev, struct ifmap *map);
+static void smc_set_xcvr(struct net_device *dev, int if_port);
+static void smc_reset(struct net_device *dev);
+static void media_check(u_long arg);
+static void mdio_sync(ioaddr_t addr);
+static int mdio_read(struct net_device *dev, int phy_id, int loc);
+static void mdio_write(struct net_device *dev, int phy_id, int loc, int value);
+
+/*======================================================================
+
+ This bit of code is used to avoid unregistering network devices
+ at inappropriate times. 2.2 and later kernels are fairly picky
+ about when this can happen.
+
+======================================================================*/
+
+static void flush_stale_links(void)
+{
+ dev_link_t *link, *next;
+ for (link = dev_list; link; link = next) {
+ next = link->next;
+ if (link->state & DEV_STALE_LINK)
+ smc91c92_detach(link);
+ }
+}
+
+/*====================================================================*/
+
+static void cs_error(client_handle_t handle, int func, int ret)
+{
+ error_info_t err = { func, ret };
+ CardServices(ReportError, handle, &err);
+}
+
+/*======================================================================
+
+ smc91c92_attach() creates an "instance" of the driver, allocating
+ local data structures for one device. The device is registered
+ with Card Services.
+
+======================================================================*/
+
+static dev_link_t *smc91c92_attach(void)
+{
+ client_reg_t client_reg;
+ struct smc_private *smc;
+ dev_link_t *link;
+ struct net_device *dev;
+ int i, ret;
+
+ DEBUG(0, "smc91c92_attach()\n");
+ flush_stale_links();
+
+ /* Create new ethernet device */
+ smc = kmalloc(sizeof(struct smc_private), GFP_KERNEL);
+ if (!smc) return NULL;
+ memset(smc, 0, sizeof(struct smc_private));
+ link = &smc->link; dev = &smc->dev;
+
+ init_timer(&link->release);
+ link->release.function = &smc91c92_release;
+ link->release.data = (u_long)link;
+ link->io.NumPorts1 = 16;
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
+ link->io.IOAddrLines = 4;
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+ link->irq.IRQInfo1 = IRQ_INFO2_VALID|IRQ_LEVEL_ID;
+ if (irq_list[0] == -1)
+ link->irq.IRQInfo2 = irq_mask;
+ else
+ for (i = 0; i < 4; i++)
+ link->irq.IRQInfo2 |= 1 << irq_list[i];
+ link->irq.Handler = &smc_interrupt;
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.Vcc = 50;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+
+ /* The SMC91c92-specific entries in the device structure. */
+ dev->hard_start_xmit = &smc_start_xmit;
+ dev->get_stats = &smc_get_stats;
+ dev->set_config = &s9k_config;
+ dev->set_multicast_list = &set_rx_mode;
+ ether_setup(dev);
+ init_dev_name(dev, smc->node);
+ dev->open = &smc_open;
+ dev->stop = &smc_close;
+ dev->do_ioctl = &smc_ioctl;
+#ifdef HAVE_TX_TIMEOUT
+ dev->tx_timeout = smc_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+#endif
+ dev->priv = link->priv = link->irq.Instance = smc;
+
+ /* Register with Card Services */
+ link->next = dev_list;
+ dev_list = link;
+ client_reg.dev_info = &dev_info;
+ client_reg.Attributes = INFO_IO_CLIENT | INFO_CARD_SHARE;
+ client_reg.EventMask = CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &smc91c92_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ ret = CardServices(RegisterClient, &link->handle, &client_reg);
+ if (ret != 0) {
+ cs_error(link->handle, RegisterClient, ret);
+ smc91c92_detach(link);
+ return NULL;
+ }
+
+ return link;
+} /* smc91c92_attach */
+
+/*======================================================================
+
+ This deletes a driver "instance". The device is de-registered
+ with Card Services. If it has been released, all local data
+ structures are freed. Otherwise, the structures will be freed
+ when the device is released.
+
+======================================================================*/
+
+static void smc91c92_detach(dev_link_t *link)
+{
+ struct smc_private *smc = link->priv;
+ dev_link_t **linkp;
+
+ DEBUG(0, "smc91c92_detach(0x%p)\n", link);
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link) break;
+ if (*linkp == NULL)
+ return;
+
+ del_timer(&link->release);
+ if (link->state & DEV_CONFIG) {
+ smc91c92_release((u_long)link);
+ if (link->state & DEV_STALE_CONFIG) {
+ link->state |= DEV_STALE_LINK;
+ return;
+ }
+ }
+
+ if (link->handle)
+ CardServices(DeregisterClient, link->handle);
+
+ /* Unlink device structure, free bits */
+ *linkp = link->next;
+ if (link->dev)
+ unregister_netdev(&smc->dev);
+ kfree(smc);
+
+} /* smc91c92_detach */
+
+/*====================================================================*/
+
+static int cvt_ascii_address(struct net_device *dev, char *s)
+{
+ int i, j, da, c;
+
+ if (strlen(s) != 12)
+ return -1;
+ for (i = 0; i < 6; i++) {
+ da = 0;
+ for (j = 0; j < 2; j++) {
+ c = *s++;
+ da <<= 4;
+ da += ((c >= '0') && (c <= '9')) ?
+ (c - '0') : ((c & 0x0f) + 9);
+ }
+ dev->dev_addr[i] = da;
+ }
+ return 0;
+}
+
+/*====================================================================*/
+
+static int get_tuple(int fn, client_handle_t handle, tuple_t *tuple,
+ cisparse_t *parse)
+{
+ int i;
+ i = CardServices(fn, handle, tuple);
+ if (i != CS_SUCCESS) return i;
+ i = CardServices(GetTupleData, handle, tuple);
+ if (i != CS_SUCCESS) return i;
+ return CardServices(ParseTuple, handle, tuple, parse);
+}
+
+#define first_tuple(a, b, c) get_tuple(GetFirstTuple, a, b, c)
+#define next_tuple(a, b, c) get_tuple(GetNextTuple, a, b, c)
+
+/*======================================================================
+
+ Configuration stuff for Megahertz cards
+
+ mhz_3288_power() is used to power up a 3288's ethernet chip.
+ mhz_mfc_config() handles socket setup for multifunction (1144
+ and 3288) cards. mhz_setup() gets a card's hardware ethernet
+ address.
+
+======================================================================*/
+
+static int mhz_3288_power(dev_link_t *link)
+{
+ struct smc_private *smc = link->priv;
+ u_char tmp;
+
+ /* Read the ISR twice... */
+ readb(smc->base+MEGAHERTZ_ISR);
+ udelay(5);
+ readb(smc->base+MEGAHERTZ_ISR);
+
+ /* Pause 200ms... */
+ mdelay(200);
+
+ /* Now read and write the COR... */
+ tmp = readb(smc->base + link->conf.ConfigBase + CISREG_COR);
+ udelay(5);
+ writeb(tmp, smc->base + link->conf.ConfigBase + CISREG_COR);
+
+ return 0;
+}
+
+static int mhz_mfc_config(dev_link_t *link)
+{
+ struct smc_private *smc = link->priv;
+ struct net_device *dev = &smc->dev;
+ tuple_t tuple;
+ cisparse_t parse;
+ u_char buf[255];
+ cistpl_cftable_entry_t *cf = &parse.cftable_entry;
+ win_req_t req;
+ memreq_t mem;
+ int i, k;
+
+ link->conf.Attributes |= CONF_ENABLE_SPKR;
+ link->conf.Status = CCSR_AUDIO_ENA;
+ link->irq.Attributes =
+ IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED|IRQ_HANDLE_PRESENT;
+ link->io.IOAddrLines = 16;
+ link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
+ link->io.NumPorts2 = 8;
+
+ tuple.Attributes = tuple.TupleOffset = 0;
+ tuple.TupleData = (cisdata_t *)buf;
+ tuple.TupleDataMax = sizeof(buf);
+ tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+
+ i = first_tuple(link->handle, &tuple, &parse);
+ /* The Megahertz combo cards have modem-like CIS entries, so
+ we have to explicitly try a bunch of port combinations. */
+ while (i == CS_SUCCESS) {
+ link->conf.ConfigIndex = cf->index;
+ link->io.BasePort2 = cf->io.win[0].base;
+ for (k = 0; k < 0x400; k += 0x10) {
+ if (k & 0x80) continue;
+ link->io.BasePort1 = k ^ 0x300;
+ i = CardServices(RequestIO, link->handle, &link->io);
+ if (i == CS_SUCCESS) break;
+ }
+ if (i == CS_SUCCESS) break;
+ i = next_tuple(link->handle, &tuple, &parse);
+ }
+ if (i != CS_SUCCESS)
+ return i;
+ dev->base_addr = link->io.BasePort1;
+
+ /* Allocate a memory window, for accessing the ISR */
+ req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
+ req.Base = req.Size = 0;
+ req.AccessSpeed = 0;
+ link->win = (window_handle_t)link->handle;
+ i = CardServices(RequestWindow, &link->win, &req);
+ if (i != CS_SUCCESS)
+ return i;
+ smc->base = ioremap(req.Base, req.Size);
+ mem.CardOffset = mem.Page = 0;
+ if (smc->manfid == MANFID_MOTOROLA)
+ mem.CardOffset = link->conf.ConfigBase;
+ i = CardServices(MapMemPage, link->win, &mem);
+
+ if ((i == CS_SUCCESS)
+ && (smc->manfid == MANFID_MEGAHERTZ)
+ && (smc->cardid == PRODID_MEGAHERTZ_EM3288))
+ mhz_3288_power(link);
+
+ return i;
+}
+
+static int mhz_setup(dev_link_t *link)
+{
+ client_handle_t handle = link->handle;
+ struct smc_private *smc = link->priv;
+ struct net_device *dev = &smc->dev;
+ tuple_t tuple;
+ cisparse_t parse;
+ u_char buf[255], *station_addr;
+
+ tuple.Attributes = tuple.TupleOffset = 0;
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = sizeof(buf);
+
+ /* Read the station address from the CIS. It is stored as the last
+ (fourth) string in the Version 1 Version/ID tuple. */
+ tuple.DesiredTuple = CISTPL_VERS_1;
+ if (first_tuple(handle, &tuple, &parse) != CS_SUCCESS)
+ return -1;
+ /* Ugh -- the EM1144 card has two VERS_1 tuples!?! */
+ if (next_tuple(handle, &tuple, &parse) != CS_SUCCESS)
+ first_tuple(handle, &tuple, &parse);
+ if (parse.version_1.ns > 3) {
+ station_addr = parse.version_1.str + parse.version_1.ofs[3];
+ if (cvt_ascii_address(dev, station_addr) == 0)
+ return 0;
+ }
+
+ /* Another possibility: for the EM3288, in a special tuple */
+ tuple.DesiredTuple = 0x81;
+ if (CardServices(GetFirstTuple, handle, &tuple) != CS_SUCCESS)
+ return -1;
+ if (CardServices(GetTupleData, handle, &tuple) != CS_SUCCESS)
+ return -1;
+ buf[12] = '\0';
+ if (cvt_ascii_address(dev, buf) == 0)
+ return 0;
+
+ return -1;
+}
+
+/*======================================================================
+
+ Configuration stuff for the Motorola Mariner
+
+ mot_config() writes directly to the Mariner configuration
+ registers because the CIS is just bogus.
+
+======================================================================*/
+
+static void mot_config(dev_link_t *link)
+{
+ struct smc_private *smc = link->priv;
+ struct net_device *dev = &smc->dev;
+ ioaddr_t ioaddr = dev->base_addr;
+ ioaddr_t iouart = link->io.BasePort2;
+
+ /* Set UART base address and force map with COR bit 1 */
+ writeb(iouart & 0xff, smc->base + MOT_UART + CISREG_IOBASE_0);
+ writeb((iouart >> 8) & 0xff, smc->base + MOT_UART + CISREG_IOBASE_1);
+ writeb(MOT_NORMAL, smc->base + MOT_UART + CISREG_COR);
+
+ /* Set SMC base address and force map with COR bit 1 */
+ writeb(ioaddr & 0xff, smc->base + MOT_LAN + CISREG_IOBASE_0);
+ writeb((ioaddr >> 8) & 0xff, smc->base + MOT_LAN + CISREG_IOBASE_1);
+ writeb(MOT_NORMAL, smc->base + MOT_LAN + CISREG_COR);
+
+ /* Wait for things to settle down */
+ mdelay(100);
+}
+
+static int mot_setup(dev_link_t *link)
+{
+ struct smc_private *smc = link->priv;
+ struct net_device *dev = &smc->dev;
+ ioaddr_t ioaddr = dev->base_addr;
+ int i, wait, loop;
+ u_int addr;
+
+ /* Read Ethernet address from Serial EEPROM */
+
+ for (i = 0; i < 3; i++) {
+ SMC_SELECT_BANK(2);
+ outw(MOT_EEPROM + i, ioaddr + POINTER);
+ SMC_SELECT_BANK(1);
+ outw((CTL_RELOAD | CTL_EE_SELECT), ioaddr + CONTROL);
+
+ for (loop = wait = 0; loop < 200; loop++) {
+ udelay(10);
+ wait = ((CTL_RELOAD | CTL_STORE) & inw(ioaddr + CONTROL));
+ if (wait == 0) break;
+ }
+
+ if (wait)
+ return -1;
+
+ addr = inw(ioaddr + GENERAL);
+ dev->dev_addr[2*i] = addr & 0xff;
+ dev->dev_addr[2*i+1] = (addr >> 8) & 0xff;
+ }
+
+ return 0;
+}
+
+/*====================================================================*/
+
+static int smc_config(dev_link_t *link)
+{
+ struct smc_private *smc = link->priv;
+ struct net_device *dev = &smc->dev;
+ tuple_t tuple;
+ cisparse_t parse;
+ u_char buf[255];
+ cistpl_cftable_entry_t *cf = &parse.cftable_entry;
+ int i;
+
+ tuple.Attributes = tuple.TupleOffset = 0;
+ tuple.TupleData = (cisdata_t *)buf;
+ tuple.TupleDataMax = sizeof(buf);
+ tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+
+ link->io.NumPorts1 = 16;
+ i = first_tuple(link->handle, &tuple, &parse);
+ while (i != CS_NO_MORE_ITEMS) {
+ if (i == CS_SUCCESS) {
+ link->conf.ConfigIndex = cf->index;
+ link->io.BasePort1 = cf->io.win[0].base;
+ link->io.IOAddrLines = cf->io.flags & CISTPL_IO_LINES_MASK;
+ i = CardServices(RequestIO, link->handle, &link->io);
+ if (i == CS_SUCCESS) break;
+ }
+ i = next_tuple(link->handle, &tuple, &parse);
+ }
+ if (i == CS_SUCCESS)
+ dev->base_addr = link->io.BasePort1;
+ return i;
+}
+
+static int smc_setup(dev_link_t *link)
+{
+ client_handle_t handle = link->handle;
+ struct smc_private *smc = link->priv;
+ struct net_device *dev = &smc->dev;
+ tuple_t tuple;
+ cisparse_t parse;
+ cistpl_lan_node_id_t *node_id;
+ u_char buf[255], *station_addr;
+ int i;
+
+ tuple.Attributes = tuple.TupleOffset = 0;
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = sizeof(buf);
+
+ /* Check for a LAN function extension tuple */
+ tuple.DesiredTuple = CISTPL_FUNCE;
+ i = first_tuple(handle, &tuple, &parse);
+ while (i == CS_SUCCESS) {
+ if (parse.funce.type == CISTPL_FUNCE_LAN_NODE_ID)
+ break;
+ i = next_tuple(handle, &tuple, &parse);
+ }
+ if (i == CS_SUCCESS) {
+ node_id = (cistpl_lan_node_id_t *)parse.funce.data;
+ if (node_id->nb == 6) {
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = node_id->id[i];
+ return 0;
+ }
+ }
+
+ /* Try the third string in the Version 1 Version/ID tuple. */
+ tuple.DesiredTuple = CISTPL_VERS_1;
+ if (first_tuple(handle, &tuple, &parse) != CS_SUCCESS)
+ return -1;
+ station_addr = parse.version_1.str + parse.version_1.ofs[2];
+ if (cvt_ascii_address(dev, station_addr) == 0)
+ return 0;
+
+ return -1;
+}
+
+/*====================================================================*/
+
+static int osi_config(dev_link_t *link)
+{
+ struct smc_private *smc = link->priv;
+ struct net_device *dev = &smc->dev;
+ static ioaddr_t com[4] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8 };
+ int i, j;
+
+ link->conf.Attributes |= CONF_ENABLE_SPKR;
+ link->conf.Status = CCSR_AUDIO_ENA;
+ link->irq.Attributes =
+ IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED|IRQ_HANDLE_PRESENT;
+ link->io.NumPorts1 = 64;
+ link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
+ link->io.NumPorts2 = 8;
+ link->io.IOAddrLines = 16;
+
+ /* Enable Hard Decode, LAN, Modem */
+ link->conf.ConfigIndex = 0x23;
+
+ for (i = j = 0; j < 4; j++) {
+ link->io.BasePort2 = com[j];
+ i = CardServices(RequestIO, link->handle, &link->io);
+ if (i == CS_SUCCESS) break;
+ }
+ if (i != CS_SUCCESS) {
+ /* Fallback: turn off hard decode */
+ link->conf.ConfigIndex = 0x03;
+ link->io.NumPorts2 = 0;
+ i = CardServices(RequestIO, link->handle, &link->io);
+ }
+ dev->base_addr = link->io.BasePort1 + 0x10;
+ return i;
+}
+
+static int osi_setup(dev_link_t *link, u_short manfid, u_short cardid)
+{
+ client_handle_t handle = link->handle;
+ struct smc_private *smc = link->priv;
+ struct net_device *dev = &smc->dev;
+ tuple_t tuple;
+ u_char buf[255];
+ int i;
+
+ tuple.Attributes = TUPLE_RETURN_COMMON;
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = sizeof(buf);
+ tuple.TupleOffset = 0;
+
+ /* Read the station address from tuple 0x90, subtuple 0x04 */
+ tuple.DesiredTuple = 0x90;
+ i = CardServices(GetFirstTuple, handle, &tuple);
+ while (i == CS_SUCCESS) {
+ i = CardServices(GetTupleData, handle, &tuple);
+ if ((i != CS_SUCCESS) || (buf[0] == 0x04))
+ break;
+ i = CardServices(GetNextTuple, handle, &tuple);
+ }
+ if (i != CS_SUCCESS)
+ return -1;
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = buf[i+2];
+
+ if (((manfid == MANFID_OSITECH) &&
+ (cardid == PRODID_OSITECH_SEVEN)) ||
+ ((manfid == MANFID_PSION) &&
+ (cardid == PRODID_PSION_NET100))) {
+ /* Download the Seven of Diamonds firmware */
+ for (i = 0; i < sizeof(__Xilinx7OD); i++) {
+ outb(__Xilinx7OD[i], link->io.BasePort1+2);
+ udelay(50);
+ }
+ } else if (manfid == MANFID_OSITECH) {
+ /* Make sure both functions are powered up */
+ set_bits(0x300, link->io.BasePort1 + OSITECH_AUI_PWR);
+ /* Now, turn on the interrupt for both card functions */
+ set_bits(0x300, link->io.BasePort1 + OSITECH_RESET_ISR);
+ DEBUG(2, "AUI/PWR: %4.4x RESET/ISR: %4.4x\n",
+ inw(link->io.BasePort1 + OSITECH_AUI_PWR),
+ inw(link->io.BasePort1 + OSITECH_RESET_ISR));
+ }
+
+ return 0;
+}
+
+/*======================================================================
+
+ This verifies that the chip is some SMC91cXX variant, and returns
+ the revision code if successful. Otherwise, it returns -ENODEV.
+
+======================================================================*/
+
+static int check_sig(dev_link_t *link)
+{
+ struct smc_private *smc = link->priv;
+ struct net_device *dev = &smc->dev;
+ ioaddr_t ioaddr = dev->base_addr;
+ int width;
+ u_short s;
+
+ SMC_SELECT_BANK(1);
+ if (inw(ioaddr + BANK_SELECT) >> 8 != 0x33) {
+ /* Try powering up the chip */
+ outw(0, ioaddr + CONTROL);
+ mdelay(55);
+ }
+
+ /* Try setting bus width */
+ width = (link->io.Attributes1 == IO_DATA_PATH_WIDTH_AUTO);
+ s = inb(ioaddr + CONFIG);
+ if (width)
+ s |= CFG_16BIT;
+ else
+ s &= ~CFG_16BIT;
+ outb(s, ioaddr + CONFIG);
+
+ /* Check Base Address Register to make sure bus width is OK */
+ s = inw(ioaddr + BASE_ADDR);
+ if ((inw(ioaddr + BANK_SELECT) >> 8 == 0x33) &&
+ ((s >> 8) != (s & 0xff))) {
+ SMC_SELECT_BANK(3);
+ s = inw(ioaddr + REVISION);
+ return (s & 0xff);
+ }
+
+ if (width) {
+ event_callback_args_t args;
+ printk(KERN_INFO "smc91c92_cs: using 8-bit IO window.\n");
+ args.client_data = link;
+ smc91c92_event(CS_EVENT_RESET_PHYSICAL, 0, &args);
+ CardServices(ReleaseIO, link->handle, &link->io);
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
+ CardServices(RequestIO, link->handle, &link->io);
+ smc91c92_event(CS_EVENT_CARD_RESET, 0, &args);
+ return check_sig(link);
+ }
+ return -ENODEV;
+}
+
+/*======================================================================
+
+ smc91c92_config() is scheduled to run after a CARD_INSERTION event
+ is received, to configure the PCMCIA socket, and to make the
+ ethernet device available to the system.
+
+======================================================================*/
+
+#define CS_EXIT_TEST(ret, svc, label) \
+if (ret != CS_SUCCESS) { cs_error(link->handle, svc, ret); goto label; }
+
+static void smc91c92_config(dev_link_t *link)
+{
+ client_handle_t handle = link->handle;
+ struct smc_private *smc = link->priv;
+ struct net_device *dev = &smc->dev;
+ tuple_t tuple;
+ cisparse_t parse;
+ u_short buf[32];
+ char *name;
+ int i, j, rev;
+ ioaddr_t ioaddr;
+
+ DEBUG(0, "smc91c92_config(0x%p)\n", link);
+
+ tuple.Attributes = tuple.TupleOffset = 0;
+ tuple.TupleData = (cisdata_t *)buf;
+ tuple.TupleDataMax = sizeof(buf);
+
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ i = first_tuple(handle, &tuple, &parse);
+ CS_EXIT_TEST(i, ParseTuple, config_failed);
+ link->conf.ConfigBase = parse.config.base;
+ link->conf.Present = parse.config.rmask[0];
+
+ tuple.DesiredTuple = CISTPL_MANFID;
+ tuple.Attributes = TUPLE_RETURN_COMMON;
+ if (first_tuple(handle, &tuple, &parse) == CS_SUCCESS) {
+ smc->manfid = parse.manfid.manf;
+ smc->cardid = parse.manfid.card;
+ }
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ if ((smc->manfid == MANFID_OSITECH) &&
+ (smc->cardid != PRODID_OSITECH_SEVEN)) {
+ i = osi_config(link);
+ } else if ((smc->manfid == MANFID_MOTOROLA) ||
+ ((smc->manfid == MANFID_MEGAHERTZ) &&
+ ((smc->cardid == PRODID_MEGAHERTZ_VARIOUS) ||
+ (smc->cardid == PRODID_MEGAHERTZ_EM3288)))) {
+ i = mhz_mfc_config(link);
+ } else {
+ i = smc_config(link);
+ }
+ CS_EXIT_TEST(i, RequestIO, config_failed);
+
+ i = CardServices(RequestIRQ, link->handle, &link->irq);
+ CS_EXIT_TEST(i, RequestIRQ, config_failed);
+ i = CardServices(RequestConfiguration, link->handle, &link->conf);
+ CS_EXIT_TEST(i, RequestConfiguration, config_failed);
+
+ if (smc->manfid == MANFID_MOTOROLA)
+ mot_config(link);
+
+ dev->irq = link->irq.AssignedIRQ;
+
+ if ((if_port >= 0) && (if_port <= 2))
+ dev->if_port = if_port;
+ else
+ printk(KERN_NOTICE "smc91c92_cs: invalid if_port requested\n");
+
+ if (register_netdev(dev) != 0) {
+ printk(KERN_ERR "smc91c92_cs: register_netdev() failed\n");
+ goto config_undo;
+ }
+
+ switch (smc->manfid) {
+ case MANFID_OSITECH:
+ case MANFID_PSION:
+ i = osi_setup(link, smc->manfid, smc->cardid); break;
+ case MANFID_SMC:
+ case MANFID_NEW_MEDIA:
+ i = smc_setup(link); break;
+ case 0x128: /* For broken Megahertz cards */
+ case MANFID_MEGAHERTZ:
+ i = mhz_setup(link); break;
+ case MANFID_MOTOROLA:
+ default: /* get the hw address from EEPROM */
+ i = mot_setup(link); break;
+ }
+
+ if (i != 0) {
+ printk(KERN_NOTICE "smc91c92_cs: Unable to find hardware address.\n");
+ goto config_undo;
+ }
+
+ copy_dev_name(smc->node, dev);
+ link->dev = &smc->node;
+ smc->duplex = 0;
+ smc->rx_ovrn = 0;
+
+ rev = check_sig(link);
+ name = "???";
+ if (rev > 0)
+ switch (rev >> 4) {
+ case 3: name = "92"; break;
+ case 4: name = ((rev & 15) >= 6) ? "96" : "94"; break;
+ case 5: name = "95"; break;
+ case 7: name = "100"; break;
+ case 8: name = "100-FD"; break;
+ case 9: name = "110"; break;
+ }
+ printk(KERN_INFO "%s: smc91c%s rev %d: io %#3lx, irq %d, "
+ "hw_addr ", dev->name, name, (rev & 0x0f), dev->base_addr,
+ dev->irq);
+ for (i = 0; i < 6; i++)
+ printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n"));
+
+ ioaddr = dev->base_addr;
+ if (rev > 0) {
+ u_long mir, mcr;
+ SMC_SELECT_BANK(0);
+ mir = inw(ioaddr + MEMINFO) & 0xff;
+ if (mir == 0xff) mir++;
+ /* Get scale factor for memory size */
+ mcr = ((rev >> 4) > 3) ? inw(ioaddr + MEMCFG) : 0x0200;
+ mir *= 128 * (1<<((mcr >> 9) & 7));
+ if (mir & 0x3ff)
+ printk(KERN_INFO " %lu byte", mir);
+ else
+ printk(KERN_INFO " %lu kb", mir>>10);
+ SMC_SELECT_BANK(1);
+ smc->cfg = inw(ioaddr + CONFIG) & ~CFG_AUI_SELECT;
+ smc->cfg |= CFG_NO_WAIT | CFG_16BIT | CFG_STATIC;
+ if (smc->manfid == MANFID_OSITECH)
+ smc->cfg |= CFG_IRQ_SEL_1 | CFG_IRQ_SEL_0;
+ if ((rev >> 4) >= 7)
+ smc->cfg |= CFG_MII_SELECT;
+ printk(" buffer, %s xcvr\n", (smc->cfg & CFG_MII_SELECT) ?
+ "MII" : if_names[dev->if_port]);
+ }
+
+ if (smc->cfg & CFG_MII_SELECT) {
+ SMC_SELECT_BANK(3);
+
+ for (i = 0; i < 32; i++) {
+ j = mdio_read(dev, i, 1);
+ if ((j != 0) && (j != 0xffff)) break;
+ }
+ smc->phy_id = (i < 32) ? i : -1;
+ if (i < 32) {
+ DEBUG(0, " MII transceiver at index %d, status %x.\n", i, j);
+ } else {
+ printk(KERN_NOTICE " No MII transceivers found!\n");
+ }
+
+ SMC_SELECT_BANK(0);
+ }
+
+ link->state &= ~DEV_CONFIG_PENDING;
+ return;
+
+config_undo:
+ unregister_netdev(dev);
+config_failed: /* CS_EXIT_TEST() calls jump to here... */
+ smc91c92_release((u_long)link);
+ link->state &= ~DEV_CONFIG_PENDING;
+
+} /* smc91c92_config */
+
+/*======================================================================
+
+ After a card is removed, smc91c92_release() will unregister the net
+ device, and release the PCMCIA configuration. If the device is
+ still open, this will be postponed until it is closed.
+
+======================================================================*/
+
+static void smc91c92_release(u_long arg)
+{
+ dev_link_t *link = (dev_link_t *)arg;
+ struct smc_private *smc = link->priv;
+
+ DEBUG(0, "smc91c92_release(0x%p)\n", link);
+
+ if (link->open) {
+ DEBUG(1, "smc91c92_cs: release postponed, '%s' still open\n",
+ link->dev->dev_name);
+ link->state |= DEV_STALE_CONFIG;
+ return;
+ }
+
+ CardServices(ReleaseConfiguration, link->handle);
+ CardServices(ReleaseIO, link->handle, &link->io);
+ CardServices(ReleaseIRQ, link->handle, &link->irq);
+ if (link->win) {
+ iounmap(smc->base);
+ CardServices(ReleaseWindow, link->win);
+ }
+
+ link->state &= ~DEV_CONFIG;
+
+} /* smc91c92_release */
+
+/*======================================================================
+
+ The card status event handler. Mostly, this schedules other
+ stuff to run after an event is received. A CARD_REMOVAL event
+ also sets some flags to discourage the net drivers from trying
+ to talk to the card any more.
+
+======================================================================*/
+
+static int smc91c92_event(event_t event, int priority,
+ event_callback_args_t *args)
+{
+ dev_link_t *link = args->client_data;
+ struct smc_private *smc = link->priv;
+ struct net_device *dev = &smc->dev;
+ int i;
+
+ DEBUG(1, "smc91c92_event(0x%06x)\n", event);
+
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG) {
+ netif_device_detach(dev);
+ mod_timer(&link->release, jiffies + HZ/20);
+ }
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ smc91c92_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if (link->state & DEV_CONFIG) {
+ if (link->open)
+ netif_device_detach(dev);
+ CardServices(ReleaseConfiguration, link->handle);
+ }
+ break;
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ if ((smc->manfid == MANFID_MEGAHERTZ) &&
+ (smc->cardid == PRODID_MEGAHERTZ_EM3288))
+ mhz_3288_power(link);
+ CardServices(RequestConfiguration, link->handle, &link->conf);
+ if (smc->manfid == MANFID_MOTOROLA)
+ mot_config(link);
+ if ((smc->manfid == MANFID_OSITECH) &&
+ (smc->cardid != PRODID_OSITECH_SEVEN)) {
+ /* Power up the card and enable interrupts */
+ set_bits(0x0300, dev->base_addr-0x10+OSITECH_AUI_PWR);
+ set_bits(0x0300, dev->base_addr-0x10+OSITECH_RESET_ISR);
+ }
+ if (((smc->manfid == MANFID_OSITECH) &&
+ (smc->cardid == PRODID_OSITECH_SEVEN)) ||
+ ((smc->manfid == MANFID_PSION) &&
+ (smc->cardid == PRODID_PSION_NET100))) {
+ /* Download the Seven of Diamonds firmware */
+ for (i = 0; i < sizeof(__Xilinx7OD); i++) {
+ outb(__Xilinx7OD[i], link->io.BasePort1+2);
+ udelay(50);
+ }
+ }
+ if (link->open) {
+ smc_reset(dev);
+ netif_device_attach(dev);
+ }
+ }
+ break;
+ }
+ return 0;
+} /* smc91c92_event */
+
+/*======================================================================
+
+ MII interface support for SMC91cXX based cards
+======================================================================*/
+
+#define MDIO_SHIFT_CLK 0x04
+#define MDIO_DATA_OUT 0x01
+#define MDIO_DIR_WRITE 0x08
+#define MDIO_DATA_WRITE0 (MDIO_DIR_WRITE)
+#define MDIO_DATA_WRITE1 (MDIO_DIR_WRITE | MDIO_DATA_OUT)
+#define MDIO_DATA_READ 0x02
+
+static void mdio_sync(ioaddr_t addr)
+{
+ int bits;
+ for (bits = 0; bits < 32; bits++) {
+ outb(MDIO_DATA_WRITE1, addr);
+ outb(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, addr);
+ }
+}
+
+static int mdio_read(struct net_device *dev, int phy_id, int loc)
+{
+ ioaddr_t addr = dev->base_addr + MGMT;
+ u_int cmd = (0x06<<10)|(phy_id<<5)|loc;
+ int i, retval = 0;
+
+ mdio_sync(addr);
+ for (i = 13; i >= 0; i--) {
+ int dat = (cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outb(dat, addr);
+ outb(dat | MDIO_SHIFT_CLK, addr);
+ }
+ for (i = 19; i > 0; i--) {
+ outb(0, addr);
+ retval = (retval << 1) | ((inb(addr) & MDIO_DATA_READ) != 0);
+ outb(MDIO_SHIFT_CLK, addr);
+ }
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
+{
+ ioaddr_t addr = dev->base_addr + MGMT;
+ u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value;
+ int i;
+
+ mdio_sync(addr);
+ for (i = 31; i >= 0; i--) {
+ int dat = (cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outb(dat, addr);
+ outb(dat | MDIO_SHIFT_CLK, addr);
+ }
+ for (i = 1; i >= 0; i--) {
+ outb(0, addr);
+ outb(MDIO_SHIFT_CLK, addr);
+ }
+}
+
+/*======================================================================
+
+ The driver core code, most of which should be common with a
+ non-PCMCIA implementation.
+
+======================================================================*/
+
+#ifdef PCMCIA_DEBUG
+static void smc_dump(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+ u_short i, w, save;
+ save = inw(ioaddr + BANK_SELECT);
+ for (w = 0; w < 4; w++) {
+ SMC_SELECT_BANK(w);
+ printk(KERN_DEBUG "bank %d: ", w);
+ for (i = 0; i < 14; i += 2)
+ printk(" %04x", inw(ioaddr + i));
+ printk("\n");
+ }
+ outw(save, ioaddr + BANK_SELECT);
+}
+#endif
+
+static int smc_open(struct net_device *dev)
+{
+ struct smc_private *smc = dev->priv;
+ dev_link_t *link = &smc->link;
+
+#ifdef PCMCIA_DEBUG
+ DEBUG(0, "%s: smc_open(%p), ID/Window %4.4x.\n",
+ dev->name, dev, inw(dev->base_addr + BANK_SELECT));
+ if (pc_debug > 1) smc_dump(dev);
+#endif
+
+ /* Check that the PCMCIA card is still here. */
+ if (!DEV_OK(link))
+ return -ENODEV;
+ /* Physical device present signature. */
+ if (check_sig(link) < 0) {
+ printk("smc91c92_cs: Yikes! Bad chip signature!\n");
+ return -ENODEV;
+ }
+ link->open++;
+ MOD_INC_USE_COUNT;
+
+ netif_start_queue(dev);
+ netif_mark_up(dev);
+ smc->saved_skb = 0;
+ smc->packets_waiting = 0;
+
+ smc_reset(dev);
+ smc->media.function = &media_check;
+ smc->media.data = (u_long)smc;
+ smc->media.expires = jiffies + HZ;
+ add_timer(&smc->media);
+
+ return 0;
+} /* smc_open */
+
+/*====================================================================*/
+
+static int smc_close(struct net_device *dev)
+{
+ struct smc_private *smc = dev->priv;
+ dev_link_t *link = &smc->link;
+ ioaddr_t ioaddr = dev->base_addr;
+
+ DEBUG(0, "%s: smc_close(), status %4.4x.\n",
+ dev->name, inw(ioaddr + BANK_SELECT));
+
+ netif_stop_queue(dev);
+ netif_mark_down(dev);
+
+ /* Shut off all interrupts, and turn off the Tx and Rx sections.
+ Don't bother to check for chip present. */
+ SMC_SELECT_BANK(2); /* Nominally paranoia, but do no assume... */
+ outw(0, ioaddr + INTERRUPT);
+ SMC_SELECT_BANK(0);
+ mask_bits(0xff00, ioaddr + RCR);
+ mask_bits(0xff00, ioaddr + TCR);
+
+ /* Put the chip into power-down mode. */
+ SMC_SELECT_BANK(1);
+ outw(CTL_POWERDOWN, ioaddr + CONTROL );
+
+ link->open--;
+ del_timer(&smc->media);
+ if (link->state & DEV_STALE_CONFIG)
+ mod_timer(&link->release, jiffies + HZ/20);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+} /* smc_close */
+
+static int smc_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct smc_private *smc = dev->priv;
+ u16 *data = (u16 *)&rq->ifr_data;
+ ushort saved_bank;
+ ioaddr_t ioaddr = dev->base_addr;
+
+ if (!(smc->cfg & CFG_MII_SELECT))
+ return -EOPNOTSUPP;
+
+ saved_bank = inw(ioaddr + BANK_SELECT);
+ SMC_SELECT_BANK(3);
+
+ switch (cmd) {
+ case SIOCDEVPRIVATE:
+ data[0] = smc->phy_id;
+ case SIOCDEVPRIVATE+1:
+ data[3] = mdio_read(dev, data[0], data[1] & 0x1f);
+ SMC_SELECT_BANK(saved_bank);
+ return 0;
+ case SIOCDEVPRIVATE+2:
+ if (!capable(CAP_NET_ADMIN)) {
+ SMC_SELECT_BANK(saved_bank);
+ return -EPERM;
+ }
+ mdio_write(dev, data[0], data[1] & 0x1f, data[2]);
+ SMC_SELECT_BANK(saved_bank);
+ return 0;
+ }
+ SMC_SELECT_BANK(saved_bank);
+ return -EOPNOTSUPP;
+}
+/*======================================================================
+
+ Transfer a packet to the hardware and trigger the packet send.
+ This may be called at either from either the Tx queue code
+ or the interrupt handler.
+
+======================================================================*/
+
+static void smc_hardware_send_packet(struct net_device * dev)
+{
+ struct smc_private *smc = dev->priv;
+ struct sk_buff *skb = smc->saved_skb;
+ ioaddr_t ioaddr = dev->base_addr;
+ u_char packet_no;
+
+ if (!skb) {
+ printk(KERN_ERR "%s: In XMIT with no packet to send.\n", dev->name);
+ return;
+ }
+
+ /* There should be a packet slot waiting. */
+ packet_no = inw(ioaddr + PNR_ARR) >> 8;
+ if (packet_no & 0x80) {
+ /* If not, there is a hardware problem! Likely an ejected card. */
+ printk(KERN_WARNING "%s: 91c92 hardware Tx buffer allocation"
+ " failed, status %#2.2x.\n", dev->name, packet_no);
+ dev_kfree_skb_irq(skb);
+ smc->saved_skb = NULL;
+ netif_start_queue(dev);
+ return;
+ }
+
+ add_tx_bytes(&smc->stats, skb->len);
+ /* The card should use the just-allocated buffer. */
+ outw(packet_no, ioaddr + PNR_ARR);
+ /* point to the beginning of the packet */
+ outw(PTR_AUTOINC , ioaddr + POINTER);
+
+ /* Send the packet length (+6 for status, length and ctl byte)
+ and the status word (set to zeros). */
+ {
+ u_char *buf = skb->data;
+ u_int length = skb->len; /* The chip will pad to ethernet min. */
+
+ DEBUG(2, "%s: Trying to xmit packet of length %d.\n",
+ dev->name, length);
+
+ /* send the packet length: +6 for status word, length, and ctl */
+ outw(0, ioaddr + DATA_1);
+ outw(length + 6, ioaddr + DATA_1);
+ outsw(ioaddr + DATA_1, buf, length >> 1);
+
+ /* The odd last byte, if there is one, goes in the control word. */
+ outw((length & 1) ? 0x2000 | buf[length-1] : 0, ioaddr + DATA_1);
+ }
+
+ /* Enable the Tx interrupts, both Tx (TxErr) and TxEmpty. */
+ outw(((IM_TX_INT|IM_TX_EMPTY_INT)<<8) |
+ (inw(ioaddr + INTERRUPT) & 0xff00),
+ ioaddr + INTERRUPT);
+
+ /* The chip does the rest of the work. */
+ outw(MC_ENQUEUE , ioaddr + MMU_CMD);
+
+ smc->saved_skb = NULL;
+ dev_kfree_skb_irq(skb);
+ dev->trans_start = jiffies;
+ netif_start_queue(dev);
+ return;
+}
+
+/*====================================================================*/
+
+static void smc_tx_timeout(struct net_device *dev)
+{
+ struct smc_private *smc = dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+
+ printk(KERN_NOTICE "%s: SMC91c92 transmit timed out, "
+ "Tx_status %2.2x status %4.4x.\n",
+ dev->name, inw(ioaddr)&0xff, inw(ioaddr + 2));
+ smc->stats.tx_errors++;
+ smc_reset(dev);
+ dev->trans_start = jiffies;
+ smc->saved_skb = NULL;
+ netif_wake_queue(dev);
+}
+
+static int smc_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct smc_private *smc = dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ u_short num_pages;
+ short time_out, ir;
+
+ tx_timeout_check(dev, smc_tx_timeout);
+ skb_tx_check(dev, skb);
+
+ DEBUG(2, "%s: smc_start_xmit(length = %ld) called,"
+ " status %4.4x.\n", dev->name, skb->len, inw(ioaddr + 2));
+
+ if (smc->saved_skb) {
+ /* THIS SHOULD NEVER HAPPEN. */
+ smc->stats.tx_aborted_errors++;
+ printk(KERN_DEBUG "%s: Internal error -- sent packet while busy.\n",
+ dev->name);
+ return 1;
+ }
+ smc->saved_skb = skb;
+
+ num_pages = skb->len >> 8;
+
+ if (num_pages > 7) {
+ printk(KERN_ERR "%s: Far too big packet error.\n", dev->name);
+ DEV_KFREE_SKB (skb);
+ smc->saved_skb = NULL;
+ smc->stats.tx_dropped++;
+ return 0; /* Do not re-queue this packet. */
+ }
+ /* A packet is now waiting. */
+ smc->packets_waiting++;
+
+ SMC_SELECT_BANK(2); /* Paranoia, we should always be in window 2 */
+
+ /* need MC_RESET to keep the memory consistent. errata? */
+ if (smc->rx_ovrn) {
+ outw(MC_RESET, ioaddr + MMU_CMD);
+ smc->rx_ovrn = 0;
+ }
+
+ /* Allocate the memory; send the packet now if we win. */
+ outw(MC_ALLOC | num_pages, ioaddr + MMU_CMD);
+ for (time_out = MEMORY_WAIT_TIME; time_out >= 0; time_out--) {
+ ir = inw(ioaddr+INTERRUPT);
+ if (ir & IM_ALLOC_INT) {
+ /* Acknowledge the interrupt, send the packet. */
+ outw((ir&0xff00) | IM_ALLOC_INT, ioaddr + INTERRUPT);
+ smc_hardware_send_packet(dev); /* Send the packet now.. */
+ return 0;
+ }
+ }
+
+ /* Otherwise defer until the Tx-space-allocated interrupt. */
+ DEBUG(2, "%s: memory allocation deferred.\n", dev->name);
+ outw((IM_ALLOC_INT << 8) | (ir & 0xff00), ioaddr + INTERRUPT);
+
+ return 0;
+}
+
+/*======================================================================
+
+ Handle a Tx anomolous event. Entered while in Window 2.
+
+======================================================================*/
+
+static void smc_tx_err(struct net_device * dev)
+{
+ struct smc_private *smc = (struct smc_private *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ int saved_packet = inw(ioaddr + PNR_ARR) & 0xff;
+ int packet_no = inw(ioaddr + FIFO_PORTS) & 0x7f;
+ int tx_status;
+
+ /* select this as the packet to read from */
+ outw(packet_no, ioaddr + PNR_ARR);
+
+ /* read the first word from this packet */
+ outw(PTR_AUTOINC | PTR_READ | 0, ioaddr + POINTER);
+
+ tx_status = inw(ioaddr + DATA_1);
+
+ smc->stats.tx_errors++;
+ if (tx_status & TS_LOSTCAR) smc->stats.tx_carrier_errors++;
+ if (tx_status & TS_LATCOL) smc->stats.tx_window_errors++;
+ if (tx_status & TS_16COL) {
+ smc->stats.tx_aborted_errors++;
+ smc->tx_err++;
+ }
+
+ if (tx_status & TS_SUCCESS) {
+ printk(KERN_NOTICE "%s: Successful packet caused error "
+ "interrupt?\n", dev->name);
+ }
+ /* re-enable transmit */
+ SMC_SELECT_BANK(0);
+ outw(inw(ioaddr + TCR) | TCR_ENABLE | smc->duplex, ioaddr + TCR);
+ SMC_SELECT_BANK(2);
+
+ outw(MC_FREEPKT, ioaddr + MMU_CMD); /* Free the packet memory. */
+
+ /* one less packet waiting for me */
+ smc->packets_waiting--;
+
+ outw(saved_packet, ioaddr + PNR_ARR);
+ return;
+}
+
+/*====================================================================*/
+
+static void smc_eph_irq(struct net_device *dev)
+{
+ struct smc_private *smc = dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ u_short card_stats, ephs;
+
+ SMC_SELECT_BANK(0);
+ ephs = inw(ioaddr + EPH);
+ DEBUG(2, "%s: Ethernet protocol handler interrupt, status"
+ " %4.4x.\n", dev->name, ephs);
+ /* Could be a counter roll-over warning: update stats. */
+ card_stats = inw(ioaddr + COUNTER);
+ /* single collisions */
+ smc->stats.collisions += card_stats & 0xF;
+ card_stats >>= 4;
+ /* multiple collisions */
+ smc->stats.collisions += card_stats & 0xF;
+#if 0 /* These are for when linux supports these statistics */
+ card_stats >>= 4; /* deferred */
+ card_stats >>= 4; /* excess deferred */
+#endif
+ /* If we had a transmit error we must re-enable the transmitter. */
+ outw(inw(ioaddr + TCR) | TCR_ENABLE | smc->duplex, ioaddr + TCR);
+
+ /* Clear a link error interrupt. */
+ SMC_SELECT_BANK(1);
+ outw(CTL_AUTO_RELEASE | 0x0000, ioaddr + CONTROL);
+ outw(CTL_AUTO_RELEASE | CTL_TE_ENABLE | CTL_CR_ENABLE,
+ ioaddr + CONTROL);
+ SMC_SELECT_BANK(2);
+}
+
+/*====================================================================*/
+
+static void smc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct smc_private *smc = dev_id;
+ struct net_device *dev = &smc->dev;
+ ioaddr_t ioaddr;
+ u_short saved_bank, saved_pointer, mask, status;
+ char bogus_cnt = INTR_WORK; /* Work we are willing to do. */
+
+ if (!netif_device_present(dev))
+ return;
+ ioaddr = dev->base_addr;
+
+ DEBUG(3, "%s: SMC91c92 interrupt %d at %#x.\n", dev->name,
+ irq, ioaddr);
+
+ smc->watchdog = 0;
+ saved_bank = inw(ioaddr + BANK_SELECT);
+ if ((saved_bank & 0xff00) != 0x3300) {
+ /* The device does not exist -- the card could be off-line, or
+ maybe it has been ejected. */
+ DEBUG(1, "%s: SMC91c92 interrupt %d for non-existent"
+ "/ejected device.\n", dev->name, irq);
+ goto irq_done;
+ }
+
+ SMC_SELECT_BANK(2);
+ saved_pointer = inw(ioaddr + POINTER);
+ mask = inw(ioaddr + INTERRUPT) >> 8;
+ /* clear all interrupts */
+ outw(0, ioaddr + INTERRUPT);
+
+ do { /* read the status flag, and mask it */
+ status = inw(ioaddr + INTERRUPT) & 0xff;
+ DEBUG(3, "%s: Status is %#2.2x (mask %#2.2x).\n", dev->name,
+ status, mask);
+ if ((status & mask) == 0)
+ break;
+
+ if (status & IM_RCV_INT) {
+ /* Got a packet(s). */
+ smc_rx(dev);
+ }
+ if (status & IM_TX_INT) {
+ smc_tx_err(dev);
+ outw(IM_TX_INT, ioaddr + INTERRUPT);
+ }
+ status &= mask;
+ if (status & IM_TX_EMPTY_INT) {
+ outw(IM_TX_EMPTY_INT, ioaddr + INTERRUPT);
+ mask &= ~IM_TX_EMPTY_INT;
+ smc->stats.tx_packets += smc->packets_waiting;
+ smc->packets_waiting = 0;
+ }
+ if (status & IM_ALLOC_INT) {
+ /* Clear this interrupt so it doesn't happen again */
+ mask &= ~IM_ALLOC_INT;
+
+ smc_hardware_send_packet(dev);
+
+ /* enable xmit interrupts based on this */
+ mask |= (IM_TX_EMPTY_INT | IM_TX_INT);
+
+ /* and let the card send more packets to me */
+ netif_wake_queue(dev);
+ }
+ if (status & IM_RX_OVRN_INT) {
+ smc->stats.rx_errors++;
+ smc->stats.rx_fifo_errors++;
+ if (smc->duplex)
+ smc->rx_ovrn = 1; /* need MC_RESET outside smc_interrupt */
+ outw(IM_RX_OVRN_INT, ioaddr + INTERRUPT);
+ }
+ if (status & IM_EPH_INT)
+ smc_eph_irq(dev);
+ } while (--bogus_cnt);
+
+ DEBUG(3, " Restoring saved registers mask %2.2x bank %4.4x"
+ " pointer %4.4x.\n", mask, saved_bank, saved_pointer);
+
+ /* restore state register */
+ outw((mask<<8), ioaddr + INTERRUPT);
+ outw(saved_pointer, ioaddr + POINTER);
+ SMC_SELECT_BANK(saved_bank);
+
+ DEBUG(3, "%s: Exiting interrupt IRQ%d.\n", dev->name, irq);
+
+irq_done:
+
+ if ((smc->manfid == MANFID_OSITECH) &&
+ (smc->cardid != PRODID_OSITECH_SEVEN)) {
+ /* Retrigger interrupt if needed */
+ mask_bits(0x00ff, ioaddr-0x10+OSITECH_RESET_ISR);
+ set_bits(0x0300, ioaddr-0x10+OSITECH_RESET_ISR);
+ }
+ if (smc->manfid == MANFID_MOTOROLA) {
+ u_char cor;
+ cor = readb(smc->base + MOT_UART + CISREG_COR);
+ writeb(cor & ~COR_IREQ_ENA, smc->base + MOT_UART + CISREG_COR);
+ writeb(cor, smc->base + MOT_UART + CISREG_COR);
+ cor = readb(smc->base + MOT_LAN + CISREG_COR);
+ writeb(cor & ~COR_IREQ_ENA, smc->base + MOT_LAN + CISREG_COR);
+ writeb(cor, smc->base + MOT_LAN + CISREG_COR);
+ }
+#ifdef DOES_NOT_WORK
+ if (smc->base != NULL) { /* Megahertz MFC's */
+ readb(smc->base+MEGAHERTZ_ISR);
+ readb(smc->base+MEGAHERTZ_ISR);
+ }
+#endif
+}
+
+/*====================================================================*/
+
+static void smc_rx(struct net_device *dev)
+{
+ struct smc_private *smc = (struct smc_private *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ int rx_status;
+ int packet_length; /* Caution: not frame length, rather words
+ to transfer from the chip. */
+
+ /* Assertion: we are in Window 2. */
+
+ if (inw(ioaddr + FIFO_PORTS) & FP_RXEMPTY) {
+ printk(KERN_ERR "%s: smc_rx() with nothing on Rx FIFO.\n",
+ dev->name);
+ return;
+ }
+
+ /* Reset the read pointer, and read the status and packet length. */
+ outw(PTR_READ | PTR_RCV | PTR_AUTOINC, ioaddr + POINTER);
+ rx_status = inw(ioaddr + DATA_1);
+ packet_length = inw(ioaddr + DATA_1) & 0x07ff;
+
+ DEBUG(2, "%s: Receive status %4.4x length %d.\n",
+ dev->name, rx_status, packet_length);
+
+ if (!(rx_status & RS_ERRORS)) {
+ /* do stuff to make a new packet */
+ struct sk_buff *skb;
+
+ /* Note: packet_length adds 5 or 6 extra bytes here! */
+ skb = dev_alloc_skb(packet_length+2);
+
+ if (skb == NULL) {
+ DEBUG(1, "%s: Low memory, packet dropped.\n", dev->name);
+ smc->stats.rx_dropped++;
+ outw(MC_RELEASE, ioaddr + MMU_CMD);
+ return;
+ }
+
+ packet_length -= (rx_status & RS_ODDFRAME ? 5 : 6);
+ skb_reserve(skb, 2);
+ insw(ioaddr+DATA_1, skb_put(skb, packet_length),
+ (packet_length+1)>>1);
+ skb->protocol = eth_type_trans(skb, dev);
+
+ skb->dev = dev;
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ smc->stats.rx_packets++;
+ add_rx_bytes(&smc->stats, packet_length);
+ if (rx_status & RS_MULTICAST)
+ smc->stats.multicast++;
+ } else {
+ /* error ... */
+ smc->stats.rx_errors++;
+
+ if (rx_status & RS_ALGNERR) smc->stats.rx_frame_errors++;
+ if (rx_status & (RS_TOOSHORT | RS_TOOLONG))
+ smc->stats.rx_length_errors++;
+ if (rx_status & RS_BADCRC) smc->stats.rx_crc_errors++;
+ }
+ /* Let the MMU free the memory of this packet. */
+ outw(MC_RELEASE, ioaddr + MMU_CMD);
+
+ return;
+}
+
+/*====================================================================*/
+
+static struct net_device_stats *smc_get_stats(struct net_device *dev)
+{
+ struct smc_private *smc = (struct smc_private *)dev->priv;
+ /* Nothing to update - the 91c92 is a pretty primative chip. */
+ return &smc->stats;
+}
+
+/*======================================================================
+
+ Calculate values for the hardware multicast filter hash table.
+
+======================================================================*/
+
+static void fill_multicast_tbl(int count, struct dev_mc_list *addrs,
+ u_char *multicast_table)
+{
+ struct dev_mc_list *mc_addr;
+
+ for (mc_addr = addrs; mc_addr && --count > 0; mc_addr = mc_addr->next) {
+ u_int position = ether_crc(6, mc_addr->dmi_addr);
+#ifndef final_version /* Verify multicast address. */
+ if ((mc_addr->dmi_addr[0] & 1) == 0)
+ continue;
+#endif
+ multicast_table[position >> 29] |= 1 << ((position >> 26) & 7);
+ }
+}
+
+/*======================================================================
+
+ Set the receive mode.
+
+ This routine is used by both the protocol level to notify us of
+ promiscuous/multicast mode changes, and by the open/reset code to
+ initialize the Rx registers. We always set the multicast list and
+ leave the receiver running.
+
+======================================================================*/
+
+static void set_rx_mode(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+ u_int multicast_table[ 2 ] = { 0, };
+ unsigned long flags;
+ u_short rx_cfg_setting;
+
+ if (dev->flags & IFF_PROMISC) {
+ printk(KERN_NOTICE "%s: setting Rx mode to promiscuous.\n", dev->name);
+ rx_cfg_setting = RxStripCRC | RxEnable | RxPromisc | RxAllMulti;
+ } else if (dev->flags & IFF_ALLMULTI)
+ rx_cfg_setting = RxStripCRC | RxEnable | RxAllMulti;
+ else {
+ if (dev->mc_count) {
+ fill_multicast_tbl(dev->mc_count, dev->mc_list,
+ (u_char *)multicast_table);
+ }
+ rx_cfg_setting = RxStripCRC | RxEnable;
+ }
+
+ /* Load MC table and Rx setting into the chip without interrupts. */
+ save_flags(flags);
+ cli();
+ SMC_SELECT_BANK(3);
+ outl(multicast_table[0], ioaddr + MULTICAST0);
+ outl(multicast_table[1], ioaddr + MULTICAST4);
+ SMC_SELECT_BANK(0);
+ outw(rx_cfg_setting, ioaddr + RCR);
+ SMC_SELECT_BANK(2);
+ restore_flags(flags);
+
+ return;
+}
+
+/*======================================================================
+
+ Senses when a card's config changes. Here, it's coax or TP.
+
+======================================================================*/
+
+static int s9k_config(struct net_device *dev, struct ifmap *map)
+{
+ struct smc_private *smc = dev->priv;
+ if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
+ if (smc->cfg & CFG_MII_SELECT)
+ return -EOPNOTSUPP;
+ else if (map->port > 2)
+ return -EINVAL;
+ dev->if_port = map->port;
+ printk(KERN_INFO "%s: switched to %s port\n",
+ dev->name, if_names[dev->if_port]);
+ smc_reset(dev);
+ }
+ return 0;
+}
+
+/*======================================================================
+
+ Reset the chip, reloading every register that might be corrupted.
+
+======================================================================*/
+
+/*
+ Set transceiver type, perhaps to something other than what the user
+ specified in dev->if_port.
+*/
+static void smc_set_xcvr(struct net_device *dev, int if_port)
+{
+ struct smc_private *smc = (struct smc_private *)dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ u_short saved_bank;
+
+ saved_bank = inw(ioaddr + BANK_SELECT);
+ SMC_SELECT_BANK(1);
+ if (if_port == 2) {
+ outw(smc->cfg | CFG_AUI_SELECT, ioaddr + CONFIG);
+ if ((smc->manfid == MANFID_OSITECH) &&
+ (smc->cardid != PRODID_OSITECH_SEVEN))
+ set_bits(OSI_AUI_PWR, ioaddr - 0x10 + OSITECH_AUI_PWR);
+ smc->media_status = ((dev->if_port == 0) ? 0x0001 : 0x0002);
+ } else {
+ outw(smc->cfg, ioaddr + CONFIG);
+ if ((smc->manfid == MANFID_OSITECH) &&
+ (smc->cardid != PRODID_OSITECH_SEVEN))
+ mask_bits(~OSI_AUI_PWR, ioaddr - 0x10 + OSITECH_AUI_PWR);
+ smc->media_status = ((dev->if_port == 0) ? 0x0012 : 0x4001);
+ }
+ SMC_SELECT_BANK(saved_bank);
+}
+
+static void smc_reset(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+ struct smc_private *smc = dev->priv;
+ int i;
+
+ DEBUG(0, "%s: smc91c92 reset called.\n", dev->name);
+
+ /* The first interaction must be a write to bring the chip out
+ of sleep mode. */
+ SMC_SELECT_BANK(0);
+ /* Reset the chip. */
+ outw(RCR_SOFTRESET, ioaddr + RCR);
+ udelay(10);
+
+ /* Clear the transmit and receive configuration registers. */
+ outw(RCR_CLEAR, ioaddr + RCR);
+ outw(TCR_CLEAR, ioaddr + TCR);
+
+ /* Set the Window 1 control, configuration and station addr registers.
+ No point in writing the I/O base register ;-> */
+ SMC_SELECT_BANK(1);
+ /* Automatically release succesfully transmitted packets,
+ Accept link errors, counter and Tx error interrupts. */
+ outw(CTL_AUTO_RELEASE | CTL_TE_ENABLE | CTL_CR_ENABLE,
+ ioaddr + CONTROL);
+ smc_set_xcvr(dev, dev->if_port);
+ if ((smc->manfid == MANFID_OSITECH) &&
+ (smc->cardid != PRODID_OSITECH_SEVEN))
+ outw((dev->if_port == 2 ? OSI_AUI_PWR : 0) |
+ (inw(ioaddr-0x10+OSITECH_AUI_PWR) & 0xff00),
+ ioaddr - 0x10 + OSITECH_AUI_PWR);
+
+ /* Fill in the physical address. The databook is wrong about the order! */
+ for (i = 0; i < 6; i += 2)
+ outw((dev->dev_addr[i+1]<<8)+dev->dev_addr[i],
+ ioaddr + ADDR0 + i);
+
+ /* Reset the MMU */
+ SMC_SELECT_BANK(2);
+ outw(MC_RESET, ioaddr + MMU_CMD);
+ outw(0, ioaddr + INTERRUPT);
+
+ /* Re-enable the chip. */
+ SMC_SELECT_BANK(0);
+ outw(((smc->cfg & CFG_MII_SELECT) ? 0 : TCR_MONCSN) |
+ TCR_ENABLE | TCR_PAD_EN | smc->duplex, ioaddr + TCR);
+ set_rx_mode(dev);
+
+ if (smc->cfg & CFG_MII_SELECT) {
+ SMC_SELECT_BANK(3);
+
+ /* Reset MII */
+ mdio_write(dev, smc->phy_id, 0, 0x8000);
+
+ /* Advertise 100F, 100H, 10F, 10H */
+ mdio_write(dev, smc->phy_id, 4, 0x01e1);
+
+ /* Restart MII autonegotiation */
+ mdio_write(dev, smc->phy_id, 0, 0x0000);
+ mdio_write(dev, smc->phy_id, 0, 0x1200);
+ }
+
+ /* Enable interrupts. */
+ SMC_SELECT_BANK(2);
+ outw((IM_EPH_INT | IM_RX_OVRN_INT | IM_RCV_INT) << 8,
+ ioaddr + INTERRUPT);
+}
+
+/*======================================================================
+
+ Media selection timer routine
+
+======================================================================*/
+
+static void media_check(u_long arg)
+{
+ struct smc_private *smc = (struct smc_private *)(arg);
+ struct net_device *dev = &smc->dev;
+ ioaddr_t ioaddr = dev->base_addr;
+ u_short i, media, saved_bank;
+ u_short link;
+
+ saved_bank = inw(ioaddr + BANK_SELECT);
+
+ if (!netif_device_present(dev))
+ goto reschedule;
+
+ SMC_SELECT_BANK(2);
+
+ /* need MC_RESET to keep the memory consistent. errata? */
+ if (smc->rx_ovrn) {
+ outw(MC_RESET, ioaddr + MMU_CMD);
+ smc->rx_ovrn = 0;
+ }
+ i = inw(ioaddr + INTERRUPT);
+ SMC_SELECT_BANK(0);
+ media = inw(ioaddr + EPH) & EPH_LINK_OK;
+ SMC_SELECT_BANK(1);
+ media |= (inw(ioaddr + CONFIG) & CFG_AUI_SELECT) ? 2 : 1;
+
+ /* Check for pending interrupt with watchdog flag set: with
+ this, we can limp along even if the interrupt is blocked */
+ if (smc->watchdog++ && ((i>>8) & i)) {
+ if (!smc->fast_poll)
+ printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
+ smc_interrupt(dev->irq, smc, NULL);
+ smc->fast_poll = HZ;
+ }
+ if (smc->fast_poll) {
+ smc->fast_poll--;
+ smc->media.expires = jiffies + 1;
+ add_timer(&smc->media);
+ SMC_SELECT_BANK(saved_bank);
+ return;
+ }
+
+ if (smc->cfg & CFG_MII_SELECT) {
+ if (smc->phy_id < 0)
+ goto reschedule;
+
+ SMC_SELECT_BANK(3);
+ link = mdio_read(dev, smc->phy_id, 1);
+ if (!link || (link == 0xffff)) {
+ printk(KERN_INFO "%s: MII is missing!\n", dev->name);
+ smc->phy_id = -1;
+ goto reschedule;
+ }
+
+ link &= 0x0004;
+ if (link != smc->link_status) {
+ u_short p = mdio_read(dev, smc->phy_id, 5);
+ printk(KERN_INFO "%s: %s link beat\n", dev->name,
+ (link) ? "found" : "lost");
+ smc->duplex = (((p & 0x0100) || ((p & 0x1c0) == 0x40))
+ ? TCR_FDUPLX : 0);
+ if (link) {
+ printk(KERN_INFO "%s: autonegotiation complete: "
+ "%sbaseT-%cD selected\n", dev->name,
+ ((p & 0x0180) ? "100" : "10"),
+ (smc->duplex ? 'F' : 'H'));
+ }
+ SMC_SELECT_BANK(0);
+ outw(inw(ioaddr + TCR) | smc->duplex, ioaddr + TCR);
+ smc->link_status = link;
+ }
+ goto reschedule;
+ }
+
+ /* Ignore collisions unless we've had no rx's recently */
+ if (jiffies - dev->last_rx > HZ) {
+ if (smc->tx_err || (smc->media_status & EPH_16COL))
+ media |= EPH_16COL;
+ }
+ smc->tx_err = 0;
+
+ if (media != smc->media_status) {
+ if ((media & smc->media_status & 1) &&
+ ((smc->media_status ^ media) & EPH_LINK_OK))
+ printk(KERN_INFO "%s: %s link beat\n", dev->name,
+ (smc->media_status & EPH_LINK_OK ? "lost" : "found"));
+ else if ((media & smc->media_status & 2) &&
+ ((smc->media_status ^ media) & EPH_16COL))
+ printk(KERN_INFO "%s: coax cable %s\n", dev->name,
+ (media & EPH_16COL ? "problem" : "ok"));
+ if (dev->if_port == 0) {
+ if (media & 1) {
+ if (media & EPH_LINK_OK)
+ printk(KERN_INFO "%s: flipped to 10baseT\n",
+ dev->name);
+ else
+ smc_set_xcvr(dev, 2);
+ } else {
+ if (media & EPH_16COL)
+ smc_set_xcvr(dev, 1);
+ else
+ printk(KERN_INFO "%s: flipped to 10base2\n",
+ dev->name);
+ }
+ }
+ smc->media_status = media;
+ }
+
+reschedule:
+ smc->media.expires = jiffies + HZ;
+ add_timer(&smc->media);
+ SMC_SELECT_BANK(saved_bank);
+}
+
+
+/*====================================================================*/
+
+static int __init init_smc91c92_cs(void)
+{
+ servinfo_t serv;
+ DEBUG(0, "%s\n", version);
+ CardServices(GetCardServicesInfo, &serv);
+ if (serv.Revision != CS_RELEASE_CODE) {
+ printk(KERN_ERR
+ "smc91c92_cs: Card Services release does not match!\n");
+ return -EINVAL;
+ }
+ register_pccard_driver(&dev_info, &smc91c92_attach, &smc91c92_detach);
+ return 0;
+}
+
+static void __exit exit_smc91c92_cs(void)
+{
+ DEBUG(0, "smc91c92_cs: unloading\n");
+ unregister_pccard_driver(&dev_info);
+ while (dev_list != NULL)
+ smc91c92_detach(dev_list);
+}
+
+module_init(init_smc91c92_cs);
+module_exit(exit_smc91c92_cs);
diff --git a/linux/pcmcia-cs/clients/xirc2ps_cs.c b/linux/pcmcia-cs/clients/xirc2ps_cs.c
new file mode 100644
index 0000000..9db947d
--- /dev/null
+++ b/linux/pcmcia-cs/clients/xirc2ps_cs.c
@@ -0,0 +1,2091 @@
+/* [xirc2ps_cs.c wk 03.11.99] (1.40 1999/11/18 00:06:03)
+ * Xircom CreditCard Ethernet Adapter IIps driver
+ * Xircom Realport 10/100 (RE-100) driver
+ *
+ * This driver supports various Xircom CreditCard Ethernet adapters
+ * including the CE2, CE IIps, RE-10, CEM28, CEM33, CE33, CEM56,
+ * CE3-100, CE3B, RE-100, REM10BT, and REM56G-100.
+ *
+ * 2000-09-24 <psheer@icon.co.za> The Xircom CE3B-100 may not
+ * autodetect the media properly. In this case use the
+ * if_port=1 (for 10BaseT) or if_port=4 (for 100BaseT) options
+ * to force the media type.
+ *
+ * Written originally by Werner Koch based on David Hinds' skeleton of the
+ * PCMCIA driver.
+ *
+ * Copyright (c) 1997,1998 Werner Koch (dd9jn)
+ *
+ * This driver is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * It is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ *
+ * ALTERNATIVELY, this driver may be distributed under the terms of
+ * the following license, in which case the provisions of this license
+ * are required INSTEAD OF the GNU General Public License. (This clause
+ * is necessary due to a potential bad interaction between the GPL and
+ * the restrictions contained in a BSD-style copyright.)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, and the entire permission notice in its entirety,
+ * including the disclaimer of warranties.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ciscode.h>
+
+#ifndef MANFID_COMPAQ
+ #define MANFID_COMPAQ 0x0138
+ #define MANFID_COMPAQ2 0x0183 /* is this correct? */
+#endif
+
+#include <pcmcia/ds.h>
+
+/* Time in jiffies before concluding Tx hung */
+#define TX_TIMEOUT ((400*HZ)/1000)
+
+/****************
+ * Some constants used to access the hardware
+ */
+
+/* Register offsets and value constans */
+#define XIRCREG_CR 0 /* Command register (wr) */
+enum xirc_cr {
+ TransmitPacket = 0x01,
+ SoftReset = 0x02,
+ EnableIntr = 0x04,
+ ForceIntr = 0x08,
+ ClearTxFIFO = 0x10,
+ ClearRxOvrun = 0x20,
+ RestartTx = 0x40
+};
+#define XIRCREG_ESR 0 /* Ethernet status register (rd) */
+enum xirc_esr {
+ FullPktRcvd = 0x01, /* full packet in receive buffer */
+ PktRejected = 0x04, /* a packet has been rejected */
+ TxPktPend = 0x08, /* TX Packet Pending */
+ IncorPolarity = 0x10,
+ MediaSelect = 0x20 /* set if TP, clear if AUI */
+};
+#define XIRCREG_PR 1 /* Page Register select */
+#define XIRCREG_EDP 4 /* Ethernet Data Port Register */
+#define XIRCREG_ISR 6 /* Ethernet Interrupt Status Register */
+enum xirc_isr {
+ TxBufOvr = 0x01, /* TX Buffer Overflow */
+ PktTxed = 0x02, /* Packet Transmitted */
+ MACIntr = 0x04, /* MAC Interrupt occurred */
+ TxResGrant = 0x08, /* Tx Reservation Granted */
+ RxFullPkt = 0x20, /* Rx Full Packet */
+ RxPktRej = 0x40, /* Rx Packet Rejected */
+ ForcedIntr= 0x80 /* Forced Interrupt */
+};
+#define XIRCREG1_IMR0 12 /* Ethernet Interrupt Mask Register (on page 1)*/
+#define XIRCREG1_IMR1 13
+#define XIRCREG0_TSO 8 /* Transmit Space Open Register (on page 0)*/
+#define XIRCREG0_TRS 10 /* Transmit reservation Size Register (page 0)*/
+#define XIRCREG0_DO 12 /* Data Offset Register (page 0) (wr) */
+#define XIRCREG0_RSR 12 /* Receive Status Register (page 0) (rd) */
+enum xirc_rsr {
+ PhyPkt = 0x01, /* set:physical packet, clear: multicast packet */
+ BrdcstPkt = 0x02, /* set if it is a broadcast packet */
+ PktTooLong = 0x04, /* set if packet length > 1518 */
+ AlignErr = 0x10, /* incorrect CRC and last octet not complete */
+ CRCErr = 0x20, /* incorrect CRC and last octet is complete */
+ PktRxOk = 0x80 /* received ok */
+};
+#define XIRCREG0_PTR 13 /* packets transmitted register (rd) */
+#define XIRCREG0_RBC 14 /* receive byte count regsister (rd) */
+#define XIRCREG1_ECR 14 /* ethernet configurationn register */
+enum xirc_ecr {
+ FullDuplex = 0x04, /* enable full duplex mode */
+ LongTPMode = 0x08, /* adjust for longer lengths of TP cable */
+ DisablePolCor = 0x10,/* disable auto polarity correction */
+ DisableLinkPulse = 0x20, /* disable link pulse generation */
+ DisableAutoTx = 0x40, /* disable auto-transmit */
+};
+#define XIRCREG2_RBS 8 /* receive buffer start register */
+#define XIRCREG2_LED 10 /* LED Configuration register */
+/* values for the leds: Bits 2-0 for led 1
+ * 0 disabled Bits 5-3 for led 2
+ * 1 collision
+ * 2 noncollision
+ * 3 link_detected
+ * 4 incor_polarity
+ * 5 jabber
+ * 6 auto_assertion
+ * 7 rx_tx_activity
+ */
+#define XIRCREG2_MSR 12 /* Mohawk specific register */
+
+#define XIRCREG4_GPR0 8 /* General Purpose Register 0 */
+#define XIRCREG4_GPR1 9 /* General Purpose Register 1 */
+#define XIRCREG2_GPR2 13 /* General Purpose Register 2 (page2!)*/
+#define XIRCREG4_BOV 10 /* Bonding Version Register */
+#define XIRCREG4_LMA 12 /* Local Memory Address Register */
+#define XIRCREG4_LMD 14 /* Local Memory Data Port */
+/* MAC register can only by accessed with 8 bit operations */
+#define XIRCREG40_CMD0 8 /* Command Register (wr) */
+enum xirc_cmd { /* Commands */
+ Transmit = 0x01,
+ EnableRecv = 0x04,
+ DisableRecv = 0x08,
+ Abort = 0x10,
+ Online = 0x20,
+ IntrAck = 0x40,
+ Offline = 0x80
+};
+#define XIRCREG5_RHSA0 10 /* Rx Host Start Address */
+#define XIRCREG40_RXST0 9 /* Receive Status Register */
+#define XIRCREG40_TXST0 11 /* Transmit Status Register 0 */
+#define XIRCREG40_TXST1 12 /* Transmit Status Register 10 */
+#define XIRCREG40_RMASK0 13 /* Receive Mask Register */
+#define XIRCREG40_TMASK0 14 /* Transmit Mask Register 0 */
+#define XIRCREG40_TMASK1 15 /* Transmit Mask Register 0 */
+#define XIRCREG42_SWC0 8 /* Software Configuration 0 */
+#define XIRCREG42_SWC1 9 /* Software Configuration 1 */
+#define XIRCREG42_BOC 10 /* Back-Off Configuration */
+#define XIRCREG44_TDR0 8 /* Time Domain Reflectometry 0 */
+#define XIRCREG44_TDR1 9 /* Time Domain Reflectometry 1 */
+#define XIRCREG44_RXBC_LO 10 /* Rx Byte Count 0 (rd) */
+#define XIRCREG44_RXBC_HI 11 /* Rx Byte Count 1 (rd) */
+#define XIRCREG45_REV 15 /* Revision Register (rd) */
+#define XIRCREG50_IA 8 /* Individual Address (8-13) */
+
+static char *if_names[] = { "Auto", "10BaseT", "10Base2", "AUI", "100BaseT" };
+
+/****************
+ * All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
+ * you do not define PCMCIA_DEBUG at all, all the debug code will be
+ * left out. If you compile with PCMCIA_DEBUG=0, the debug code will
+ * be present but disabled -- but it can then be enabled for specific
+ * modules at load time with a 'pc_debug=#' option to insmod.
+ */
+#ifdef PCMCIA_DEBUG
+static int pc_debug = PCMCIA_DEBUG;
+MODULE_PARM(pc_debug, "i");
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KDBG_XIRC args)
+#else
+#define DEBUG(n, args...)
+#endif
+static char *version =
+"xirc2ps_cs.c 1.31 1998/12/09 19:32:55 (dd9jn+kvh)";
+ /* !--- CVS revision */
+#define KDBG_XIRC KERN_DEBUG "xirc2ps_cs: "
+#define KERR_XIRC KERN_ERR "xirc2ps_cs: "
+#define KWRN_XIRC KERN_WARNING "xirc2ps_cs: "
+#define KNOT_XIRC KERN_NOTICE "xirc2ps_cs: "
+#define KINF_XIRC KERN_INFO "xirc2ps_cs: "
+
+/* card types */
+#define XIR_UNKNOWN 0 /* unknown: not supported */
+#define XIR_CE 1 /* (prodid 1) different hardware: not supported */
+#define XIR_CE2 2 /* (prodid 2) */
+#define XIR_CE3 3 /* (prodid 3) */
+#define XIR_CEM 4 /* (prodid 1) different hardware: not supported */
+#define XIR_CEM2 5 /* (prodid 2) */
+#define XIR_CEM3 6 /* (prodid 3) */
+#define XIR_CEM33 7 /* (prodid 4) */
+#define XIR_CEM56M 8 /* (prodid 5) */
+#define XIR_CEM56 9 /* (prodid 6) */
+#define XIR_CM28 10 /* (prodid 3) modem only: not supported here */
+#define XIR_CM33 11 /* (prodid 4) modem only: not supported here */
+#define XIR_CM56 12 /* (prodid 5) modem only: not supported here */
+#define XIR_CG 13 /* (prodid 1) GSM modem only: not supported */
+#define XIR_CBE 14 /* (prodid 1) cardbus ethernet: not supported */
+/*====================================================================*/
+
+/* Module parameters */
+
+MODULE_DESCRIPTION("Xircom PCMCIA ethernet driver");
+MODULE_LICENSE("Dual MPL/GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; MODULE_PARM(n, "i")
+
+static int irq_list[4] = { -1 };
+MODULE_PARM(irq_list, "1-4i");
+INT_MODULE_PARM(irq_mask, 0xdeb8);
+INT_MODULE_PARM(if_port, 0);
+INT_MODULE_PARM(full_duplex, 0);
+INT_MODULE_PARM(do_sound, 1);
+INT_MODULE_PARM(lockup_hack, 0); /* anti lockup hack */
+
+/*====================================================================*/
+
+/* We do not process more than these number of bytes during one
+ * interrupt. (Of course we receive complete packets, so this is not
+ * an exact value).
+ * Something between 2000..22000; first value gives best interrupt latency,
+ * the second enables the usage of the complete on-chip buffer. We use the
+ * high value as the initial value.
+ */
+static unsigned maxrx_bytes = 22000;
+
+/* MII management prototypes */
+static void mii_idle(ioaddr_t ioaddr);
+static void mii_putbit(ioaddr_t ioaddr, unsigned data);
+static int mii_getbit(ioaddr_t ioaddr);
+static void mii_wbits(ioaddr_t ioaddr, unsigned data, int len);
+static unsigned mii_rd(ioaddr_t ioaddr, u_char phyaddr, u_char phyreg);
+static void mii_wr(ioaddr_t ioaddr, u_char phyaddr, u_char phyreg,
+ unsigned data, int len);
+
+/*
+ * The event() function is this driver's Card Services event handler.
+ * It will be called by Card Services when an appropriate card status
+ * event is received. The config() and release() entry points are
+ * used to configure or release a socket, in response to card insertion
+ * and ejection events. They are invoked from the event handler.
+ */
+
+static int has_ce2_string(dev_link_t * link);
+static void xirc2ps_config(dev_link_t * link);
+static void xirc2ps_release(u_long arg);
+static int xirc2ps_event(event_t event, int priority,
+ event_callback_args_t * args);
+
+/****************
+ * The attach() and detach() entry points are used to create and destroy
+ * "instances" of the driver, where each instance represents everything
+ * needed to manage one actual PCMCIA card.
+ */
+
+static dev_link_t *xirc2ps_attach(void);
+static void xirc2ps_detach(dev_link_t *);
+
+/****************
+ * You'll also need to prototype all the functions that will actually
+ * be used to talk to your device. See 'pcmem_cs' for a good example
+ * of a fully self-sufficient driver; the other drivers rely more or
+ * less on other parts of the kernel.
+ */
+
+static void xirc2ps_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+
+/*
+ * The dev_info variable is the "key" that is used to match up this
+ * device driver with appropriate cards, through the card configuration
+ * database.
+ */
+
+static dev_info_t dev_info = "xirc2ps_cs";
+
+/****************
+ * A linked list of "instances" of the device. Each actual
+ * PCMCIA card corresponds to one device instance, and is described
+ * by one dev_link_t structure (defined in ds.h).
+ *
+ * You may not want to use a linked list for this -- for example, the
+ * memory card driver uses an array of dev_link_t pointers, where minor
+ * device numbers are used to derive the corresponding array index.
+ */
+
+static dev_link_t *dev_list = NULL;
+
+/****************
+ * A dev_link_t structure has fields for most things that are needed
+ * to keep track of a socket, but there will usually be some device
+ * specific information that also needs to be kept track of. The
+ * 'priv' pointer in a dev_link_t structure can be used to point to
+ * a device-specific private data structure, like this.
+ *
+ * A driver needs to provide a dev_node_t structure for each device
+ * on a card. In some cases, there is only one device per card (for
+ * example, ethernet cards, modems). In other cases, there may be
+ * many actual or logical devices (SCSI adapters, memory cards with
+ * multiple partitions). The dev_node_t structures need to be kept
+ * in a linked list starting at the 'dev' field of a dev_link_t
+ * structure. We allocate them in the card's private data structure,
+ * because they generally can't be allocated dynamically.
+ */
+
+typedef struct local_info_t {
+ dev_link_t link;
+ struct net_device dev;
+ dev_node_t node;
+ struct net_device_stats stats;
+ int card_type;
+ int probe_port;
+ int silicon; /* silicon revision. 0=old CE2, 1=Scipper, 4=Mohawk */
+ int mohawk; /* a CE3 type card */
+ int dingo; /* a CEM56 type card */
+ int new_mii; /* has full 10baseT/100baseT MII */
+ int modem; /* is a multi function card (i.e with a modem) */
+ caddr_t dingo_ccr; /* only used for CEM56 cards */
+ unsigned last_ptr_value; /* last packets transmitted value */
+ const char *manf_str;
+} local_info_t;
+
+/****************
+ * Some more prototypes
+ */
+static int do_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void do_tx_timeout(struct net_device *dev);
+static struct net_device_stats *do_get_stats(struct net_device *dev);
+static void set_addresses(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+static int set_card_type(dev_link_t *link, const void *s);
+static int do_config(struct net_device *dev, struct ifmap *map);
+static int do_open(struct net_device *dev);
+static int do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void hardreset(struct net_device *dev);
+static void do_reset(struct net_device *dev, int full);
+static int init_mii(struct net_device *dev);
+static void do_powerdown(struct net_device *dev);
+static int do_stop(struct net_device *dev);
+
+
+/*=============== Helper functions =========================*/
+static void
+flush_stale_links(void)
+{
+ dev_link_t *link, *next;
+ for (link = dev_list; link; link = next) {
+ next = link->next;
+ if (link->state & DEV_STALE_LINK)
+ xirc2ps_detach(link);
+ }
+}
+
+static void
+cs_error(client_handle_t handle, int func, int ret)
+{
+ error_info_t err = { func, ret };
+ CardServices(ReportError, handle, &err);
+}
+
+static int
+get_tuple_data(int fn, client_handle_t handle, tuple_t *tuple)
+{
+ int err;
+
+ if ((err=CardServices(fn, handle, tuple)))
+ return err;
+ return CardServices(GetTupleData, handle, tuple);
+}
+
+static int
+get_tuple(int fn, client_handle_t handle, tuple_t *tuple, cisparse_t *parse)
+{
+ int err;
+
+ if ((err=get_tuple_data(fn, handle, tuple)))
+ return err;
+ return CardServices(ParseTuple, handle, tuple, parse);
+}
+
+#define first_tuple(a, b, c) get_tuple(GetFirstTuple, a, b, c)
+#define next_tuple(a, b, c) get_tuple(GetNextTuple, a, b, c)
+
+#define SelectPage(pgnr) outb((pgnr), ioaddr + XIRCREG_PR)
+#define GetByte(reg) ((unsigned)inb(ioaddr + (reg)))
+#define GetWord(reg) ((unsigned)inw(ioaddr + (reg)))
+#define PutByte(reg,value) outb((value), ioaddr+(reg))
+#define PutWord(reg,value) outw((value), ioaddr+(reg))
+
+static void
+busy_loop(u_long len)
+{
+#ifdef MACH
+ /* TODO: Is this really what we want? */
+ __udelay(1000000 / HZ * len);
+#else
+ if (in_interrupt()) {
+ u_long timeout = jiffies + len;
+ u_long flags;
+ save_flags(flags);
+ sti();
+ while (timeout >= jiffies)
+ ;
+ restore_flags(flags);
+ } else {
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(len);
+ }
+#endif
+}
+
+
+/*====== Functions used for debugging =================================*/
+#if defined(PCMCIA_DEBUG) && 0 /* reading regs may change system status */
+static void
+PrintRegisters(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+
+ if (pc_debug > 1) {
+ int i, page;
+
+ printk(KDBG_XIRC "Register common: ");
+ for (i = 0; i < 8; i++)
+ printk(" %2.2x", GetByte(i));
+ printk("\n");
+ for (page = 0; page <= 8; page++) {
+ printk(KDBG_XIRC "Register page %2x: ", page);
+ SelectPage(page);
+ for (i = 8; i < 16; i++)
+ printk(" %2.2x", GetByte(i));
+ printk("\n");
+ }
+ for (page=0x40 ; page <= 0x5f; page++) {
+ if (page == 0x43 || (page >= 0x46 && page <= 0x4f)
+ || (page >= 0x51 && page <=0x5e))
+ continue;
+ printk(KDBG_XIRC "Register page %2x: ", page);
+ SelectPage(page);
+ for (i = 8; i < 16; i++)
+ printk(" %2.2x", GetByte(i));
+ printk("\n");
+ }
+ }
+}
+#endif /* PCMCIA_DEBUG */
+
+/*============== MII Management functions ===============*/
+
+/****************
+ * Turn around for read
+ */
+static void
+mii_idle(ioaddr_t ioaddr)
+{
+ PutByte(XIRCREG2_GPR2, 0x04|0); /* drive MDCK low */
+ udelay(1);
+ PutByte(XIRCREG2_GPR2, 0x04|1); /* and drive MDCK high */
+ udelay(1);
+}
+
+/****************
+ * Write a bit to MDI/O
+ */
+static void
+mii_putbit(ioaddr_t ioaddr, unsigned data)
+{
+ #if 1
+ if (data) {
+ PutByte(XIRCREG2_GPR2, 0x0c|2|0); /* set MDIO */
+ udelay(1);
+ PutByte(XIRCREG2_GPR2, 0x0c|2|1); /* and drive MDCK high */
+ udelay(1);
+ } else {
+ PutByte(XIRCREG2_GPR2, 0x0c|0|0); /* clear MDIO */
+ udelay(1);
+ PutByte(XIRCREG2_GPR2, 0x0c|0|1); /* and drive MDCK high */
+ udelay(1);
+ }
+ #else
+ if (data) {
+ PutWord(XIRCREG2_GPR2-1, 0x0e0e);
+ udelay(1);
+ PutWord(XIRCREG2_GPR2-1, 0x0f0f);
+ udelay(1);
+ } else {
+ PutWord(XIRCREG2_GPR2-1, 0x0c0c);
+ udelay(1);
+ PutWord(XIRCREG2_GPR2-1, 0x0d0d);
+ udelay(1);
+ }
+ #endif
+}
+
+/****************
+ * Get a bit from MDI/O
+ */
+static int
+mii_getbit(ioaddr_t ioaddr)
+{
+ unsigned d;
+
+ PutByte(XIRCREG2_GPR2, 4|0); /* drive MDCK low */
+ udelay(1);
+ d = GetByte(XIRCREG2_GPR2); /* read MDIO */
+ PutByte(XIRCREG2_GPR2, 4|1); /* drive MDCK high again */
+ udelay(1);
+ return d & 0x20; /* read MDIO */
+}
+
+static void
+mii_wbits(ioaddr_t ioaddr, unsigned data, int len)
+{
+ unsigned m = 1 << (len-1);
+ for (; m; m >>= 1)
+ mii_putbit(ioaddr, data & m);
+}
+
+static unsigned
+mii_rd(ioaddr_t ioaddr, u_char phyaddr, u_char phyreg)
+{
+ int i;
+ unsigned data=0, m;
+
+ SelectPage(2);
+ for (i=0; i < 32; i++) /* 32 bit preamble */
+ mii_putbit(ioaddr, 1);
+ mii_wbits(ioaddr, 0x06, 4); /* Start and opcode for read */
+ mii_wbits(ioaddr, phyaddr, 5); /* PHY address to be accessed */
+ mii_wbits(ioaddr, phyreg, 5); /* PHY register to read */
+ mii_idle(ioaddr); /* turn around */
+ mii_getbit(ioaddr);
+
+ for (m = 1<<15; m; m >>= 1)
+ if (mii_getbit(ioaddr))
+ data |= m;
+ mii_idle(ioaddr);
+ return data;
+}
+
+static void
+mii_wr(ioaddr_t ioaddr, u_char phyaddr, u_char phyreg, unsigned data, int len)
+{
+ int i;
+
+ SelectPage(2);
+ for (i=0; i < 32; i++) /* 32 bit preamble */
+ mii_putbit(ioaddr, 1);
+ mii_wbits(ioaddr, 0x05, 4); /* Start and opcode for write */
+ mii_wbits(ioaddr, phyaddr, 5); /* PHY address to be accessed */
+ mii_wbits(ioaddr, phyreg, 5); /* PHY Register to write */
+ mii_putbit(ioaddr, 1); /* turn around */
+ mii_putbit(ioaddr, 0);
+ mii_wbits(ioaddr, data, len); /* And write the data */
+ mii_idle(ioaddr);
+}
+
+/*============= Main bulk of functions =========================*/
+
+/****************
+ * xirc2ps_attach() creates an "instance" of the driver, allocating
+ * local data structures for one device. The device is registered
+ * with Card Services.
+ *
+ * The dev_link structure is initialized, but we don't actually
+ * configure the card at this point -- we wait until we receive a
+ * card insertion event.
+ */
+
+static dev_link_t *
+xirc2ps_attach(void)
+{
+ client_reg_t client_reg;
+ dev_link_t *link;
+ struct net_device *dev;
+ local_info_t *local;
+ int err;
+
+ DEBUG(0, "attach()\n");
+ flush_stale_links();
+
+ /* Allocate the device structure */
+ local = kmalloc(sizeof(*local), GFP_KERNEL);
+ if (!local) return NULL;
+ memset(local, 0, sizeof(*local));
+ link = &local->link; dev = &local->dev;
+ link->priv = dev->priv = local;
+
+ init_timer(&link->release);
+ link->release.function = &xirc2ps_release;
+ link->release.data = (u_long) link;
+
+ /* General socket configuration */
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.Vcc = 50;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+ link->conf.ConfigIndex = 1;
+ link->conf.Present = PRESENT_OPTION;
+ link->irq.Handler = xirc2ps_interrupt;
+ link->irq.Instance = dev;
+
+ /* Fill in card specific entries */
+ dev->hard_start_xmit = &do_start_xmit;
+ dev->set_config = &do_config;
+ dev->get_stats = &do_get_stats;
+ dev->do_ioctl = &do_ioctl;
+ dev->set_multicast_list = &set_multicast_list;
+ ether_setup(dev);
+ init_dev_name(dev, local->node);
+ dev->open = &do_open;
+ dev->stop = &do_stop;
+#ifdef HAVE_TX_TIMEOUT
+ dev->tx_timeout = do_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+#endif
+
+ /* Register with Card Services */
+ link->next = dev_list;
+ dev_list = link;
+ client_reg.dev_info = &dev_info;
+ client_reg.Attributes = INFO_IO_CLIENT | INFO_CARD_SHARE;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &xirc2ps_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ if ((err = CardServices(RegisterClient, &link->handle, &client_reg))) {
+ cs_error(link->handle, RegisterClient, err);
+ xirc2ps_detach(link);
+ return NULL;
+ }
+
+ return link;
+} /* xirc2ps_attach */
+
+/****************
+ * This deletes a driver "instance". The device is de-registered
+ * with Card Services. If it has been released, all local data
+ * structures are freed. Otherwise, the structures will be freed
+ * when the device is released.
+ */
+
+static void
+xirc2ps_detach(dev_link_t * link)
+{
+ local_info_t *local = link->priv;
+ dev_link_t **linkp;
+
+ DEBUG(0, "detach(0x%p)\n", link);
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link)
+ break;
+ if (!*linkp) {
+ DEBUG(0, "detach(0x%p): dev_link lost\n", link);
+ return;
+ }
+
+ /*
+ * If the device is currently configured and active, we won't
+ * actually delete it yet. Instead, it is marked so that when
+ * the release() function is called, that will trigger a proper
+ * detach().
+ */
+ del_timer(&link->release);
+ if (link->state & DEV_CONFIG) {
+ DEBUG(0, "detach postponed, '%s' still locked\n",
+ link->dev->dev_name);
+ link->state |= DEV_STALE_LINK;
+ return;
+ }
+
+ /* Break the link with Card Services */
+ if (link->handle)
+ CardServices(DeregisterClient, link->handle);
+
+ /* Unlink device structure, free it */
+ *linkp = link->next;
+ if (link->dev)
+ unregister_netdev(&local->dev);
+ kfree(local);
+
+} /* xirc2ps_detach */
+
+/****************
+ * Detect the type of the card. s is the buffer with the data of tuple 0x20
+ * Returns: 0 := not supported
+ * mediaid=11 and prodid=47
+ * Media-Id bits:
+ * Ethernet 0x01
+ * Tokenring 0x02
+ * Arcnet 0x04
+ * Wireless 0x08
+ * Modem 0x10
+ * GSM only 0x20
+ * Prod-Id bits:
+ * Pocket 0x10
+ * External 0x20
+ * Creditcard 0x40
+ * Cardbus 0x80
+ *
+ */
+static int
+set_card_type(dev_link_t *link, const void *s)
+{
+ local_info_t *local = link->priv;
+ #ifdef PCMCIA_DEBUG
+ unsigned cisrev = ((const unsigned char *)s)[2];
+ #endif
+ unsigned mediaid= ((const unsigned char *)s)[3];
+ unsigned prodid = ((const unsigned char *)s)[4];
+
+ DEBUG(0, "cisrev=%02x mediaid=%02x prodid=%02x\n",
+ cisrev, mediaid, prodid);
+
+ local->mohawk = 0;
+ local->dingo = 0;
+ local->modem = 0;
+ local->card_type = XIR_UNKNOWN;
+ if (!(prodid & 0x40)) {
+ printk(KNOT_XIRC "Ooops: Not a creditcard\n");
+ return 0;
+ }
+ if (!(mediaid & 0x01)) {
+ printk(KNOT_XIRC "Not an Ethernet card\n");
+ return 0;
+ }
+ if (mediaid & 0x10) {
+ local->modem = 1;
+ switch(prodid & 15) {
+ case 1: local->card_type = XIR_CEM ; break;
+ case 2: local->card_type = XIR_CEM2 ; break;
+ case 3: local->card_type = XIR_CEM3 ; break;
+ case 4: local->card_type = XIR_CEM33 ; break;
+ case 5: local->card_type = XIR_CEM56M;
+ local->mohawk = 1;
+ break;
+ case 6:
+ case 7: /* 7 is the RealPort 10/56 */
+ local->card_type = XIR_CEM56 ;
+ local->mohawk = 1;
+ local->dingo = 1;
+ break;
+ }
+ } else {
+ switch(prodid & 15) {
+ case 1: local->card_type = has_ce2_string(link)? XIR_CE2 : XIR_CE ;
+ break;
+ case 15:
+ case 2: local->card_type = XIR_CE2; break;
+ case 3: local->card_type = XIR_CE3;
+ local->mohawk = 1;
+ break;
+ }
+ }
+ if (local->card_type == XIR_CE || local->card_type == XIR_CEM) {
+ printk(KNOT_XIRC "Sorry, this is an old CE card\n");
+ return 0;
+ }
+ if (local->card_type == XIR_UNKNOWN)
+ printk(KNOT_XIRC "unknown card (mediaid=%02x prodid=%02x)\n",
+ mediaid, prodid);
+
+ return 1;
+}
+
+/****************
+ * There are some CE2 cards out which claim to be a CE card.
+ * This function looks for a "CE2" in the 3rd version field.
+ * Returns: true if this is a CE2
+ */
+static int
+has_ce2_string(dev_link_t * link)
+{
+ client_handle_t handle = link->handle;
+ tuple_t tuple;
+ cisparse_t parse;
+ u_char buf[256];
+
+ tuple.Attributes = 0;
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = 254;
+ tuple.TupleOffset = 0;
+ tuple.DesiredTuple = CISTPL_VERS_1;
+ if (!first_tuple(handle, &tuple, &parse) && parse.version_1.ns > 2) {
+ if (strstr(parse.version_1.str + parse.version_1.ofs[2], "CE2"))
+ return 1;
+ }
+ return 0;
+}
+
+/****************
+ * xirc2ps_config() is scheduled to run after a CARD_INSERTION event
+ * is received, to configure the PCMCIA socket, and to make the
+ * ethernet device available to the system.
+ */
+static void
+xirc2ps_config(dev_link_t * link)
+{
+ client_handle_t handle = link->handle;
+ local_info_t *local = link->priv;
+ struct net_device *dev = &local->dev;
+ tuple_t tuple;
+ cisparse_t parse;
+ ioaddr_t ioaddr;
+ int err, i;
+ u_char buf[64];
+ cistpl_lan_node_id_t *node_id = (cistpl_lan_node_id_t*)parse.funce.data;
+ cistpl_cftable_entry_t *cf = &parse.cftable_entry;
+
+ local->dingo_ccr = 0;
+
+ DEBUG(0, "config(0x%p)\n", link);
+
+ /*
+ * This reads the card's CONFIG tuple to find its configuration
+ * registers.
+ */
+ tuple.Attributes = 0;
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = 64;
+ tuple.TupleOffset = 0;
+
+ /* Is this a valid card */
+ tuple.DesiredTuple = CISTPL_MANFID;
+ if ((err=first_tuple(handle, &tuple, &parse))) {
+ printk(KNOT_XIRC "manfid not found in CIS\n");
+ goto failure;
+ }
+
+ switch(parse.manfid.manf) {
+ case MANFID_XIRCOM:
+ local->manf_str = "Xircom";
+ break;
+ case MANFID_ACCTON:
+ local->manf_str = "Accton";
+ break;
+ case MANFID_COMPAQ:
+ case MANFID_COMPAQ2:
+ local->manf_str = "Compaq";
+ break;
+ case MANFID_INTEL:
+ local->manf_str = "Intel";
+ break;
+ case MANFID_TOSHIBA:
+ local->manf_str = "Toshiba";
+ break;
+ default:
+ printk(KNOT_XIRC "Unknown Card Manufacturer ID: 0x%04x\n",
+ (unsigned)parse.manfid.manf);
+ goto failure;
+ }
+ DEBUG(0, "found %s card\n", local->manf_str);
+
+ if (!set_card_type(link, buf)) {
+ printk(KNOT_XIRC "this card is not supported\n");
+ goto failure;
+ }
+
+ /* get configuration stuff */
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ if ((err=first_tuple(handle, &tuple, &parse)))
+ goto cis_error;
+ link->conf.ConfigBase = parse.config.base;
+ link->conf.Present = parse.config.rmask[0];
+
+ /* get the ethernet address from the CIS */
+ tuple.DesiredTuple = CISTPL_FUNCE;
+ for (err = first_tuple(handle, &tuple, &parse); !err;
+ err = next_tuple(handle, &tuple, &parse)) {
+ /* Once I saw two CISTPL_FUNCE_LAN_NODE_ID entries:
+ * the first one with a length of zero the second correct -
+ * so I skip all entries with length 0 */
+ if (parse.funce.type == CISTPL_FUNCE_LAN_NODE_ID
+ && ((cistpl_lan_node_id_t *)parse.funce.data)->nb)
+ break;
+ }
+ if (err) { /* not found: try to get the node-id from tuple 0x89 */
+ tuple.DesiredTuple = 0x89; /* data layout looks like tuple 0x22 */
+ if (!(err = get_tuple_data(GetFirstTuple, handle, &tuple))) {
+ if (tuple.TupleDataLen == 8 && *buf == CISTPL_FUNCE_LAN_NODE_ID)
+ memcpy(&parse, buf, 8);
+ else
+ err = -1;
+ }
+ }
+ if (err) { /* another try (James Lehmer's CE2 version 4.1)*/
+ tuple.DesiredTuple = CISTPL_FUNCE;
+ for (err = first_tuple(handle, &tuple, &parse); !err;
+ err = next_tuple(handle, &tuple, &parse)) {
+ if (parse.funce.type == 0x02 && parse.funce.data[0] == 1
+ && parse.funce.data[1] == 6 && tuple.TupleDataLen == 13) {
+ buf[1] = 4;
+ memcpy(&parse, buf+1, 8);
+ break;
+ }
+ }
+ }
+ if (err) {
+ printk(KNOT_XIRC "node-id not found in CIS\n");
+ goto failure;
+ }
+ node_id = (cistpl_lan_node_id_t *)parse.funce.data;
+ if (node_id->nb != 6) {
+ printk(KNOT_XIRC "malformed node-id in CIS\n");
+ goto failure;
+ }
+ for (i=0; i < 6; i++)
+ dev->dev_addr[i] = node_id->id[i];
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ link->io.IOAddrLines =10;
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
+ link->irq.Attributes = IRQ_HANDLE_PRESENT;
+ link->irq.IRQInfo1 = IRQ_INFO2_VALID | IRQ_LEVEL_ID;
+ if (irq_list[0] == -1)
+ link->irq.IRQInfo2 = irq_mask;
+ else {
+ for (i = 0; i < 4; i++)
+ link->irq.IRQInfo2 |= 1 << irq_list[i];
+ }
+ if (local->modem) {
+ int pass;
+
+ if (do_sound) {
+ link->conf.Attributes |= CONF_ENABLE_SPKR;
+ link->conf.Status |= CCSR_AUDIO_ENA;
+ }
+ link->irq.Attributes |= IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED ;
+ link->io.NumPorts2 = 8;
+ link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
+ if (local->dingo) {
+ /* Take the Modem IO port from the CIS and scan for a free
+ * Ethernet port */
+ link->io.NumPorts1 = 16; /* no Mako stuff anymore */
+ tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+ for (err = first_tuple(handle, &tuple, &parse); !err;
+ err = next_tuple(handle, &tuple, &parse)) {
+ if (cf->io.nwin > 0 && (cf->io.win[0].base & 0xf) == 8) {
+ for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) {
+ link->conf.ConfigIndex = cf->index ;
+ link->io.BasePort2 = cf->io.win[0].base;
+ link->io.BasePort1 = ioaddr;
+ if (!(err=CardServices(RequestIO, link->handle,
+ &link->io)))
+ goto port_found;
+ }
+ }
+ }
+ } else {
+ link->io.NumPorts1 = 18;
+ /* We do 2 passes here: The first one uses the regular mapping and
+ * the second tries again, thereby considering that the 32 ports are
+ * mirrored every 32 bytes. Actually we use a mirrored port for
+ * the Mako if (on the first pass) the COR bit 5 is set.
+ */
+ for (pass=0; pass < 2; pass++) {
+ tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+ for (err = first_tuple(handle, &tuple, &parse); !err;
+ err = next_tuple(handle, &tuple, &parse)){
+ if (cf->io.nwin > 0 && (cf->io.win[0].base & 0xf) == 8){
+ link->conf.ConfigIndex = cf->index ;
+ link->io.BasePort2 = cf->io.win[0].base;
+ link->io.BasePort1 = link->io.BasePort2
+ + (pass ? (cf->index & 0x20 ? -24:8)
+ : (cf->index & 0x20 ? 8:-24));
+ if (!(err=CardServices(RequestIO, link->handle,
+ &link->io)))
+ goto port_found;
+ }
+ }
+ }
+ /* if special option:
+ * try to configure as Ethernet only.
+ * .... */
+ }
+ printk(KNOT_XIRC "no ports available\n");
+ } else {
+ link->irq.Attributes |= IRQ_TYPE_EXCLUSIVE;
+ link->io.NumPorts1 = 16;
+ for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) {
+ link->io.BasePort1 = ioaddr;
+ if (!(err=CardServices(RequestIO, link->handle, &link->io)))
+ goto port_found;
+ }
+ link->io.BasePort1 = 0; /* let CS decide */
+ if ((err=CardServices(RequestIO, link->handle, &link->io))) {
+ cs_error(link->handle, RequestIO, err);
+ goto config_error;
+ }
+ }
+ port_found:
+ if (err)
+ goto config_error;
+
+ /****************
+ * Now allocate an interrupt line. Note that this does not
+ * actually assign a handler to the interrupt.
+ */
+ if ((err=CardServices(RequestIRQ, link->handle, &link->irq))) {
+ cs_error(link->handle, RequestIRQ, err);
+ goto config_error;
+ }
+
+ /****************
+ * This actually configures the PCMCIA socket -- setting up
+ * the I/O windows and the interrupt mapping.
+ */
+ if ((err=CardServices(RequestConfiguration,
+ link->handle, &link->conf))) {
+ cs_error(link->handle, RequestConfiguration, err);
+ goto config_error;
+ }
+
+ if (local->dingo) {
+ conf_reg_t reg;
+ win_req_t req;
+ memreq_t mem;
+
+ /* Reset the modem's BAR to the correct value
+ * This is necessary because in the RequestConfiguration call,
+ * the base address of the ethernet port (BasePort1) is written
+ * to the BAR registers of the modem.
+ */
+ reg.Action = CS_WRITE;
+ reg.Offset = CISREG_IOBASE_0;
+ reg.Value = link->io.BasePort2 & 0xff;
+ if ((err = CardServices(AccessConfigurationRegister, link->handle,
+ &reg))) {
+ cs_error(link->handle, AccessConfigurationRegister, err);
+ goto config_error;
+ }
+ reg.Action = CS_WRITE;
+ reg.Offset = CISREG_IOBASE_1;
+ reg.Value = (link->io.BasePort2 >> 8) & 0xff;
+ if ((err = CardServices(AccessConfigurationRegister, link->handle,
+ &reg))) {
+ cs_error(link->handle, AccessConfigurationRegister, err);
+ goto config_error;
+ }
+
+ /* There is no config entry for the Ethernet part which
+ * is at 0x0800. So we allocate a window into the attribute
+ * memory and write direct to the CIS registers
+ */
+ req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
+ req.Base = req.Size = 0;
+ req.AccessSpeed = 0;
+ link->win = (window_handle_t)link->handle;
+ if ((err = CardServices(RequestWindow, &link->win, &req))) {
+ cs_error(link->handle, RequestWindow, err);
+ goto config_error;
+ }
+ local->dingo_ccr = ioremap(req.Base,0x1000) + 0x0800;
+ mem.CardOffset = 0x0;
+ mem.Page = 0;
+ if ((err = CardServices(MapMemPage, link->win, &mem))) {
+ cs_error(link->handle, MapMemPage, err);
+ goto config_error;
+ }
+
+ /* Setup the CCRs; there are no infos in the CIS about the Ethernet
+ * part.
+ */
+ writeb(0x47, local->dingo_ccr + CISREG_COR);
+ ioaddr = link->io.BasePort1;
+ writeb(ioaddr & 0xff , local->dingo_ccr + CISREG_IOBASE_0);
+ writeb((ioaddr >> 8)&0xff , local->dingo_ccr + CISREG_IOBASE_1);
+
+ #if 0
+ {
+ u_char tmp;
+ printk(KERN_INFO "ECOR:");
+ for (i=0; i < 7; i++) {
+ tmp = readb(local->dingo_ccr + i*2);
+ printk(" %02x", tmp);
+ }
+ printk("\n");
+ printk(KERN_INFO "DCOR:");
+ for (i=0; i < 4; i++) {
+ tmp = readb(local->dingo_ccr + 0x20 + i*2);
+ printk(" %02x", tmp);
+ }
+ printk("\n");
+ printk(KERN_INFO "SCOR:");
+ for (i=0; i < 10; i++) {
+ tmp = readb(local->dingo_ccr + 0x40 + i*2);
+ printk(" %02x", tmp);
+ }
+ printk("\n");
+ }
+ #endif
+
+ writeb(0x01, local->dingo_ccr + 0x20);
+ writeb(0x0c, local->dingo_ccr + 0x22);
+ writeb(0x00, local->dingo_ccr + 0x24);
+ writeb(0x00, local->dingo_ccr + 0x26);
+ writeb(0x00, local->dingo_ccr + 0x28);
+ }
+
+ /* The if_port symbol can be set when the module is loaded */
+ local->probe_port=0;
+ if (!if_port) {
+ local->probe_port = dev->if_port = 1;
+ } else if ((if_port >= 1 && if_port <= 2) ||
+ (local->mohawk && if_port==4))
+ dev->if_port = if_port;
+ else
+ printk(KNOT_XIRC "invalid if_port requested\n");
+
+ /* we can now register the device with the net subsystem */
+ dev->irq = link->irq.AssignedIRQ;
+ dev->base_addr = link->io.BasePort1;
+ if ((err=register_netdev(dev))) {
+ printk(KNOT_XIRC "register_netdev() failed\n");
+ goto config_error;
+ }
+
+ copy_dev_name(local->node, dev);
+ link->dev = &local->node;
+ link->state &= ~DEV_CONFIG_PENDING;
+
+ if (local->dingo)
+ do_reset(dev, 1); /* a kludge to make the cem56 work */
+
+ /* give some infos about the hardware */
+ printk(KERN_INFO "%s: %s: port %#3lx, irq %d, hwaddr",
+ dev->name, local->manf_str,(u_long)dev->base_addr, (int)dev->irq);
+ for (i = 0; i < 6; i++)
+ printk("%c%02X", i?':':' ', dev->dev_addr[i]);
+ printk("\n");
+
+ return;
+
+ config_error:
+ link->state &= ~DEV_CONFIG_PENDING;
+ xirc2ps_release((u_long)link);
+ return;
+
+ cis_error:
+ printk(KNOT_XIRC "unable to parse CIS\n");
+ failure:
+ link->state &= ~DEV_CONFIG_PENDING;
+} /* xirc2ps_config */
+
+/****************
+ * After a card is removed, xirc2ps_release() will unregister the net
+ * device, and release the PCMCIA configuration. If the device is
+ * still open, this will be postponed until it is closed.
+ */
+static void
+xirc2ps_release(u_long arg)
+{
+ dev_link_t *link = (dev_link_t *) arg;
+ local_info_t *local = link->priv;
+ struct net_device *dev = &local->dev;
+
+ DEBUG(0, "release(0x%p)\n", link);
+
+ /*
+ * If the device is currently in use, we won't release until it
+ * is actually closed.
+ */
+ if (link->open) {
+ DEBUG(0, "release postponed, '%s' "
+ "still open\n", link->dev->dev_name);
+ link->state |= DEV_STALE_CONFIG;
+ return;
+ }
+
+ if (link->win) {
+ local_info_t *local = dev->priv;
+ if (local->dingo)
+ iounmap(local->dingo_ccr - 0x0800);
+ CardServices(ReleaseWindow, link->win);
+ }
+ CardServices(ReleaseConfiguration, link->handle);
+ CardServices(ReleaseIO, link->handle, &link->io);
+ CardServices(ReleaseIRQ, link->handle, &link->irq);
+ link->state &= ~DEV_CONFIG;
+
+} /* xirc2ps_release */
+
+/*====================================================================*/
+
+/****************
+ * The card status event handler. Mostly, this schedules other
+ * stuff to run after an event is received. A CARD_REMOVAL event
+ * also sets some flags to discourage the net drivers from trying
+ * to talk to the card any more.
+ *
+ * When a CARD_REMOVAL event is received, we immediately set a flag
+ * to block future accesses to this device. All the functions that
+ * actually access the device should check this flag to make sure
+ * the card is still present.
+ */
+
+static int
+xirc2ps_event(event_t event, int priority,
+ event_callback_args_t * args)
+{
+ dev_link_t *link = args->client_data;
+ local_info_t *lp = link->priv;
+ struct net_device *dev = &lp->dev;
+
+ DEBUG(0, "event(%d)\n", (int)event);
+
+ switch (event) {
+ case CS_EVENT_REGISTRATION_COMPLETE:
+ DEBUG(0, "registration complete\n");
+ break;
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG) {
+ netif_device_detach(dev);
+ mod_timer(&link->release, jiffies + HZ/20);
+ }
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ xirc2ps_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if (link->state & DEV_CONFIG) {
+ if (link->open) {
+ netif_device_detach(dev);
+ do_powerdown(dev);
+ }
+ CardServices(ReleaseConfiguration, link->handle);
+ }
+ break;
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ CardServices(RequestConfiguration, link->handle, &link->conf);
+ if (link->open) {
+ do_reset(dev,1);
+ netif_device_attach(dev);
+ }
+ }
+ break;
+ }
+ return 0;
+} /* xirc2ps_event */
+
+/*====================================================================*/
+
+/****************
+ * This is the Interrupt service route.
+ */
+static void
+xirc2ps_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ local_info_t *lp = dev->priv;
+ ioaddr_t ioaddr;
+ u_char saved_page;
+ unsigned bytes_rcvd;
+ unsigned int_status, eth_status, rx_status, tx_status;
+ unsigned rsr, pktlen;
+ ulong start_ticks = jiffies; /* fixme: jiffies rollover every 497 days
+ * is this something to worry about?
+ * -- on a laptop?
+ */
+
+ if (!netif_device_present(dev))
+ return;
+
+ ioaddr = dev->base_addr;
+ if (lp->mohawk) { /* must disable the interrupt */
+ PutByte(XIRCREG_CR, 0);
+ }
+
+ DEBUG(6, "%s: interrupt %d at %#x.\n", dev->name, irq, ioaddr);
+
+ saved_page = GetByte(XIRCREG_PR);
+ /* Read the ISR to see whats the cause for the interrupt.
+ * This also clears the interrupt flags on CE2 cards
+ */
+ int_status = GetByte(XIRCREG_ISR);
+ bytes_rcvd = 0;
+ loop_entry:
+ if (int_status == 0xff) { /* card may be ejected */
+ DEBUG(3, "%s: interrupt %d for dead card\n", dev->name, irq);
+ goto leave;
+ }
+ eth_status = GetByte(XIRCREG_ESR);
+
+ SelectPage(0x40);
+ rx_status = GetByte(XIRCREG40_RXST0);
+ PutByte(XIRCREG40_RXST0, (~rx_status & 0xff));
+ tx_status = GetByte(XIRCREG40_TXST0);
+ tx_status |= GetByte(XIRCREG40_TXST1) << 8;
+ PutByte(XIRCREG40_TXST0, 0);
+ PutByte(XIRCREG40_TXST1, 0);
+
+ DEBUG(3, "%s: ISR=%#2.2x ESR=%#2.2x RSR=%#2.2x TSR=%#4.4x\n",
+ dev->name, int_status, eth_status, rx_status, tx_status);
+
+ /***** receive section ******/
+ SelectPage(0);
+ while (eth_status & FullPktRcvd) {
+ rsr = GetByte(XIRCREG0_RSR);
+ if (bytes_rcvd > maxrx_bytes && (rsr & PktRxOk)) {
+ /* too many bytes received during this int, drop the rest of the
+ * packets */
+ lp->stats.rx_dropped++;
+ DEBUG(2, "%s: RX drop, too much done\n", dev->name);
+ } else if (rsr & PktRxOk) {
+ struct sk_buff *skb;
+
+ pktlen = GetWord(XIRCREG0_RBC);
+ bytes_rcvd += pktlen;
+
+ DEBUG(5, "rsr=%#02x packet_length=%u\n", rsr, pktlen);
+
+ skb = dev_alloc_skb(pktlen+3); /* 1 extra so we can use insw */
+ if (!skb) {
+ printk(KNOT_XIRC "low memory, packet dropped (size=%u)\n",
+ pktlen);
+ lp->stats.rx_dropped++;
+ } else { /* okay get the packet */
+ skb_reserve(skb, 2);
+ if (lp->silicon == 0 ) { /* work around a hardware bug */
+ unsigned rhsa; /* receive start address */
+
+ SelectPage(5);
+ rhsa = GetWord(XIRCREG5_RHSA0);
+ SelectPage(0);
+ rhsa += 3; /* skip control infos */
+ if (rhsa >= 0x8000)
+ rhsa = 0;
+ if (rhsa + pktlen > 0x8000) {
+ unsigned i;
+ u_char *buf = skb_put(skb, pktlen);
+ for (i=0; i < pktlen ; i++, rhsa++) {
+ buf[i] = GetByte(XIRCREG_EDP);
+ if (rhsa == 0x8000) {
+ rhsa = 0;
+ i--;
+ }
+ }
+ } else {
+ insw(ioaddr+XIRCREG_EDP,
+ skb_put(skb, pktlen), (pktlen+1)>>1);
+ }
+ }
+ #if 0
+ else if (lp->mohawk) {
+ /* To use this 32 bit access we should use
+ * a manual optimized loop
+ * Also the words are swapped, we can get more
+ * performance by using 32 bit access and swapping
+ * the words in a register. Will need this for cardbus
+ *
+ * Note: don't forget to change the ALLOC_SKB to .. +3
+ */
+ unsigned i;
+ u_long *p = skb_put(skb, pktlen);
+ register u_long a;
+ ioaddr_t edpreg = ioaddr+XIRCREG_EDP-2;
+ for (i=0; i < len ; i += 4, p++) {
+ a = inl(edpreg);
+ __asm__("rorl $16,%0\n\t"
+ :"=q" (a)
+ : "0" (a));
+ *p = a;
+ }
+ }
+ #endif
+ else {
+ insw(ioaddr+XIRCREG_EDP, skb_put(skb, pktlen),
+ (pktlen+1)>>1);
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->dev = dev;
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ add_rx_bytes(&lp->stats, pktlen);
+ if (!(rsr & PhyPkt))
+ lp->stats.multicast++;
+ }
+ } else { /* bad packet */
+ DEBUG(5, "rsr=%#02x\n", rsr);
+ }
+ if (rsr & PktTooLong) {
+ lp->stats.rx_frame_errors++;
+ DEBUG(3, "%s: Packet too long\n", dev->name);
+ }
+ if (rsr & CRCErr) {
+ lp->stats.rx_crc_errors++;
+ DEBUG(3, "%s: CRC error\n", dev->name);
+ }
+ if (rsr & AlignErr) {
+ lp->stats.rx_fifo_errors++; /* okay ? */
+ DEBUG(3, "%s: Alignment error\n", dev->name);
+ }
+
+ /* clear the received/dropped/error packet */
+ PutWord(XIRCREG0_DO, 0x8000); /* issue cmd: skip_rx_packet */
+
+ /* get the new ethernet status */
+ eth_status = GetByte(XIRCREG_ESR);
+ }
+ if (rx_status & 0x10) { /* Receive overrun */
+ lp->stats.rx_over_errors++;
+ PutByte(XIRCREG_CR, ClearRxOvrun);
+ DEBUG(3, "receive overrun cleared\n");
+ }
+
+ /***** transmit section ******/
+ if (int_status & PktTxed) {
+ unsigned n, nn;
+
+ n = lp->last_ptr_value;
+ nn = GetByte(XIRCREG0_PTR);
+ lp->last_ptr_value = nn;
+ if (nn < n) /* rollover */
+ lp->stats.tx_packets += 256 - n;
+ else if (n == nn) { /* happens sometimes - don't know why */
+ DEBUG(0, "PTR not changed?\n");
+ } else
+ lp->stats.tx_packets += lp->last_ptr_value - n;
+ netif_wake_queue(dev);
+ }
+ if (tx_status & 0x0002) { /* Execessive collissions */
+ DEBUG(0, "tx restarted due to execssive collissions\n");
+ PutByte(XIRCREG_CR, RestartTx); /* restart transmitter process */
+ }
+ if (tx_status & 0x0040)
+ lp->stats.tx_aborted_errors++;
+
+ /* recalculate our work chunk so that we limit the duration of this
+ * ISR to about 1/10 of a second.
+ * Calculate only if we received a reasonable amount of bytes.
+ */
+ if (bytes_rcvd > 1000) {
+ u_long duration = jiffies - start_ticks;
+
+ if (duration >= HZ/10) { /* if more than about 1/10 second */
+ maxrx_bytes = (bytes_rcvd * (HZ/10)) / duration;
+ if (maxrx_bytes < 2000)
+ maxrx_bytes = 2000;
+ else if (maxrx_bytes > 22000)
+ maxrx_bytes = 22000;
+ DEBUG(1, "set maxrx=%u (rcvd=%u ticks=%lu)\n",
+ maxrx_bytes, bytes_rcvd, duration);
+ } else if (!duration && maxrx_bytes < 22000) {
+ /* now much faster */
+ maxrx_bytes += 2000;
+ if (maxrx_bytes > 22000)
+ maxrx_bytes = 22000;
+ DEBUG(1, "set maxrx=%u\n", maxrx_bytes);
+ }
+ }
+
+ leave:
+ if (lockup_hack) {
+ if (int_status != 0xff && (int_status = GetByte(XIRCREG_ISR)) != 0)
+ goto loop_entry;
+ }
+ SelectPage(saved_page);
+ PutByte(XIRCREG_CR, EnableIntr); /* re-enable interrupts */
+ /* Instead of dropping packets during a receive, we could
+ * force an interrupt with this command:
+ * PutByte(XIRCREG_CR, EnableIntr|ForceIntr);
+ */
+} /* xirc2ps_interrupt */
+
+/*====================================================================*/
+
+static void
+do_tx_timeout(struct net_device *dev)
+{
+ local_info_t *lp = dev->priv;
+ printk(KERN_NOTICE "%s: transmit timed out\n", dev->name);
+ lp->stats.tx_errors++;
+ /* reset the card */
+ do_reset(dev,1);
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+}
+
+static int
+do_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ local_info_t *lp = dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ int okay;
+ unsigned freespace;
+ unsigned pktlen = skb? skb->len : 0;
+
+ DEBUG(1, "do_start_xmit(skb=%p, dev=%p) len=%u\n",
+ skb, dev, pktlen);
+
+ tx_timeout_check(dev, do_tx_timeout);
+ skb_tx_check(dev, skb);
+
+ /* adjust the packet length to min. required
+ * and hope that the buffer is large enough
+ * to provide some random data.
+ * fixme: For Mohawk we can change this by sending
+ * a larger packetlen than we actually have; the chip will
+ * pad this in his buffer with random bytes
+ */
+ if (pktlen < ETH_ZLEN)
+ pktlen = ETH_ZLEN;
+
+ SelectPage(0);
+ PutWord(XIRCREG0_TRS, (u_short)pktlen+2);
+ freespace = GetWord(XIRCREG0_TSO);
+ okay = freespace & 0x8000;
+ freespace &= 0x7fff;
+ /* TRS doesn't work - (indeed it is eliminated with sil-rev 1) */
+ okay = pktlen +2 < freespace;
+ DEBUG(2 + (okay ? 2 : 0), "%s: avail. tx space=%u%s\n",
+ dev->name, freespace, okay ? " (okay)":" (not enough)");
+ if (!okay) { /* not enough space */
+ return 1; /* upper layer may decide to requeue this packet */
+ }
+ /* send the packet */
+ PutWord(XIRCREG_EDP, (u_short)pktlen);
+ outsw(ioaddr+XIRCREG_EDP, skb->data, pktlen>>1);
+ if (pktlen & 1)
+ PutByte(XIRCREG_EDP, skb->data[pktlen-1]);
+
+ if (lp->mohawk)
+ PutByte(XIRCREG_CR, TransmitPacket|EnableIntr);
+
+ DEV_KFREE_SKB (skb);
+ dev->trans_start = jiffies;
+ add_tx_bytes(&lp->stats, pktlen);
+ netif_start_queue(dev);
+ return 0;
+}
+
+static struct net_device_stats *
+do_get_stats(struct net_device *dev)
+{
+ local_info_t *lp = dev->priv;
+
+ /* lp->stats.rx_missed_errors = GetByte(?) */
+ return &lp->stats;
+}
+
+/****************
+ * Set all addresses: This first one is the individual address,
+ * the next 9 addresses are taken from the multicast list and
+ * the rest is filled with the individual address.
+ */
+static void
+set_addresses(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+ local_info_t *lp = dev->priv;
+ struct dev_mc_list *dmi = dev->mc_list;
+ char *addr;
+ int i,j,k,n;
+
+ SelectPage(k=0x50);
+ for (i=0,j=8,n=0; ; i++, j++) {
+ if (i > 5) {
+ if (++n > 9)
+ break;
+ i = 0;
+ }
+ if (j > 15) {
+ j = 8;
+ k++;
+ SelectPage(k);
+ }
+
+ if (n && n <= dev->mc_count && dmi) {
+ addr = dmi->dmi_addr;
+ dmi = dmi->next;
+ } else
+ addr = dev->dev_addr;
+
+ if (lp->mohawk)
+ PutByte(j, addr[5-i]);
+ else
+ PutByte(j, addr[i]);
+ }
+ SelectPage(0);
+}
+
+/****************
+ * Set or clear the multicast filter for this adaptor.
+ * We can filter up to 9 addresses, if more are requested we set
+ * multicast promiscuous mode.
+ */
+
+static void
+set_multicast_list(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+
+ SelectPage(0x42);
+ if (dev->flags & IFF_PROMISC) { /* snoop */
+ PutByte(XIRCREG42_SWC1, 0x06); /* set MPE and PME */
+ } else if (dev->mc_count > 9 || (dev->flags & IFF_ALLMULTI)) {
+ PutByte(XIRCREG42_SWC1, 0x06); /* set MPE */
+ } else if (dev->mc_count) {
+ /* the chip can filter 9 addresses perfectly */
+ PutByte(XIRCREG42_SWC1, 0x00);
+ SelectPage(0x40);
+ PutByte(XIRCREG40_CMD0, Offline);
+ set_addresses(dev);
+ SelectPage(0x40);
+ PutByte(XIRCREG40_CMD0, EnableRecv | Online);
+ } else { /* standard usage */
+ PutByte(XIRCREG42_SWC1, 0x00);
+ }
+ SelectPage(0);
+}
+
+static int
+do_config(struct net_device *dev, struct ifmap *map)
+{
+ local_info_t *local = dev->priv;
+
+ DEBUG(0, "do_config(%p)\n", dev);
+ if (map->port != 255 && map->port != dev->if_port) {
+ if (map->port > 4)
+ return -EINVAL;
+ if (!map->port) {
+ local->probe_port = 1;
+ dev->if_port = 1;
+ } else {
+ local->probe_port = 0;
+ dev->if_port = map->port;
+ }
+ printk(KERN_INFO "%s: switching to %s port\n",
+ dev->name, if_names[dev->if_port]);
+ do_reset(dev,1); /* not the fine way :-) */
+ }
+ return 0;
+}
+
+/****************
+ * Open the driver
+ */
+static int
+do_open(struct net_device *dev)
+{
+ local_info_t *lp = dev->priv;
+ dev_link_t *link = &lp->link;
+
+ DEBUG(0, "do_open(%p)\n", dev);
+
+ /* Check that the PCMCIA card is still here. */
+ /* Physical device present signature. */
+ if (!DEV_OK(link))
+ return -ENODEV;
+
+ /* okay */
+ link->open++;
+ MOD_INC_USE_COUNT;
+
+ netif_start_queue(dev);
+ netif_mark_up(dev);
+ do_reset(dev,1);
+
+ return 0;
+}
+
+static int
+do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ local_info_t *local = dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ u16 *data = (u16 *)&rq->ifr_data;
+
+ DEBUG(1, "%s: ioctl(%-.6s, %#04x) %04x %04x %04x %04x\n",
+ dev->name, rq->ifr_ifrn.ifrn_name, cmd,
+ data[0], data[1], data[2], data[3]);
+
+ if (!local->mohawk)
+ return -EOPNOTSUPP;
+
+ switch(cmd) {
+ case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
+ data[0] = 0; /* we have only this address */
+ /* fall trough */
+ case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
+ data[3] = mii_rd(ioaddr, data[0] & 0x1f, data[1] & 0x1f);
+ break;
+ case SIOCDEVPRIVATE+2: /* Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ mii_wr(ioaddr, data[0] & 0x1f, data[1] & 0x1f, data[2], 16);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static void
+hardreset(struct net_device *dev)
+{
+ local_info_t *local = dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+
+ SelectPage(4);
+ udelay(1);
+ PutByte(XIRCREG4_GPR1, 0); /* clear bit 0: power down */
+ busy_loop(HZ/25); /* wait 40 msec */
+ if (local->mohawk)
+ PutByte(XIRCREG4_GPR1, 1); /* set bit 0: power up */
+ else
+ PutByte(XIRCREG4_GPR1, 1 | 4); /* set bit 0: power up, bit 2: AIC */
+ busy_loop(HZ/50); /* wait 20 msec */
+}
+
+static void
+do_reset(struct net_device *dev, int full)
+{
+ local_info_t *local = dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ unsigned value;
+
+ DEBUG(0, "%s: do_reset(%p,%d)\n", dev? dev->name:"eth?", dev, full);
+
+ hardreset(dev);
+ PutByte(XIRCREG_CR, SoftReset); /* set */
+ busy_loop(HZ/50); /* wait 20 msec */
+ PutByte(XIRCREG_CR, 0); /* clear */
+ busy_loop(HZ/25); /* wait 40 msec */
+ if (local->mohawk) {
+ SelectPage(4);
+ /* set pin GP1 and GP2 to output (0x0c)
+ * set GP1 to low to power up the ML6692 (0x00)
+ * set GP2 to high to power up the 10Mhz chip (0x02)
+ */
+ PutByte(XIRCREG4_GPR0, 0x0e);
+ }
+
+ /* give the circuits some time to power up */
+ busy_loop(HZ/2); /* about 500ms */
+
+ local->last_ptr_value = 0;
+ local->silicon = local->mohawk ? (GetByte(XIRCREG4_BOV) & 0x70) >> 4
+ : (GetByte(XIRCREG4_BOV) & 0x30) >> 4;
+
+ if (local->probe_port) {
+ if (!local->mohawk) {
+ SelectPage(4);
+ PutByte(XIRCREG4_GPR0, 4);
+ local->probe_port = 0;
+ }
+ } else if (dev->if_port == 2) { /* enable 10Base2 */
+ SelectPage(0x42);
+ PutByte(XIRCREG42_SWC1, 0xC0);
+ } else { /* enable 10BaseT */
+ SelectPage(0x42);
+ PutByte(XIRCREG42_SWC1, 0x80);
+ }
+ busy_loop(HZ/25); /* wait 40 msec to let it complete */
+
+ #ifdef PCMCIA_DEBUG
+ if (pc_debug) {
+ SelectPage(0);
+ value = GetByte(XIRCREG_ESR); /* read the ESR */
+ printk(KERN_DEBUG "%s: ESR is: %#02x\n", dev->name, value);
+ }
+ #endif
+
+ /* setup the ECR */
+ SelectPage(1);
+ PutByte(XIRCREG1_IMR0, 0xff); /* allow all ints */
+ PutByte(XIRCREG1_IMR1, 1 ); /* and Set TxUnderrunDetect */
+ value = GetByte(XIRCREG1_ECR);
+ #if 0
+ if (local->mohawk)
+ value |= DisableLinkPulse;
+ PutByte(XIRCREG1_ECR, value);
+ #endif
+ DEBUG(0, "%s: ECR is: %#02x\n", dev->name, value);
+
+ SelectPage(0x42);
+ PutByte(XIRCREG42_SWC0, 0x20); /* disable source insertion */
+
+ if (local->silicon != 1) {
+ /* set the local memory dividing line.
+ * The comments in the sample code say that this is only
+ * settable with the scipper version 2 which is revision 0.
+ * Always for CE3 cards
+ */
+ SelectPage(2);
+ PutWord(XIRCREG2_RBS, 0x2000);
+ }
+
+ if (full)
+ set_addresses(dev);
+
+ /* Hardware workaround:
+ * The receive byte pointer after reset is off by 1 so we need
+ * to move the offset pointer back to 0.
+ */
+ SelectPage(0);
+ PutWord(XIRCREG0_DO, 0x2000); /* change offset command, off=0 */
+
+ /* setup MAC IMRs and clear status registers */
+ SelectPage(0x40); /* Bit 7 ... bit 0 */
+ PutByte(XIRCREG40_RMASK0, 0xff); /* ROK, RAB, rsv, RO, CRC, AE, PTL, MP */
+ PutByte(XIRCREG40_TMASK0, 0xff); /* TOK, TAB, SQE, LL, TU, JAB, EXC, CRS */
+ PutByte(XIRCREG40_TMASK1, 0xb0); /* rsv, rsv, PTD, EXT, rsv,rsv,rsv, rsv*/
+ PutByte(XIRCREG40_RXST0, 0x00); /* ROK, RAB, REN, RO, CRC, AE, PTL, MP */
+ PutByte(XIRCREG40_TXST0, 0x00); /* TOK, TAB, SQE, LL, TU, JAB, EXC, CRS */
+ PutByte(XIRCREG40_TXST1, 0x00); /* TEN, rsv, PTD, EXT, retry_counter:4 */
+
+ if (full && local->mohawk && init_mii(dev)) {
+ if (dev->if_port == 4 || local->dingo || local->new_mii) {
+ printk(KERN_INFO "%s: MII selected\n", dev->name);
+ SelectPage(2);
+ PutByte(XIRCREG2_MSR, GetByte(XIRCREG2_MSR) | 0x08);
+ busy_loop(HZ/50);
+ } else {
+ printk(KERN_INFO "%s: MII detected; using 10mbs\n",
+ dev->name);
+ SelectPage(0x42);
+ if (dev->if_port == 2) /* enable 10Base2 */
+ PutByte(XIRCREG42_SWC1, 0xC0);
+ else /* enable 10BaseT */
+ PutByte(XIRCREG42_SWC1, 0x80);
+ busy_loop(HZ/25); /* wait 40 msec to let it complete */
+ }
+ if (full_duplex)
+ PutByte(XIRCREG1_ECR, GetByte(XIRCREG1_ECR | FullDuplex));
+ } else { /* No MII */
+ SelectPage(0);
+ value = GetByte(XIRCREG_ESR); /* read the ESR */
+ dev->if_port = (value & MediaSelect) ? 1 : 2;
+ }
+
+ /* configure the LEDs */
+ SelectPage(2);
+ if (dev->if_port == 1 || dev->if_port == 4) /* TP: Link and Activity */
+ PutByte(XIRCREG2_LED, 0x3b);
+ else /* Coax: Not-Collision and Activity */
+ PutByte(XIRCREG2_LED, 0x3a);
+
+ if (local->dingo)
+ PutByte(0x0b, 0x04); /* 100 Mbit LED */
+
+ /* enable receiver and put the mac online */
+ if (full) {
+ SelectPage(0x40);
+ PutByte(XIRCREG40_CMD0, EnableRecv | Online);
+ }
+
+ /* setup Ethernet IMR and enable interrupts */
+ SelectPage(1);
+ PutByte(XIRCREG1_IMR0, 0xff);
+ udelay(1);
+ SelectPage(0);
+ PutByte(XIRCREG_CR, EnableIntr);
+ if (local->modem && !local->dingo) { /* do some magic */
+ if (!(GetByte(0x10) & 0x01))
+ PutByte(0x10, 0x11); /* unmask master-int bit */
+ }
+
+ if (full)
+ printk(KERN_INFO "%s: media %s, silicon revision %d\n",
+ dev->name, if_names[dev->if_port], local->silicon);
+ /* We should switch back to page 0 to avoid a bug in revision 0
+ * where regs with offset below 8 can't be read after an access
+ * to the MAC registers */
+ SelectPage(0);
+}
+
+/****************
+ * Initialize the Media-Independent-Interface
+ * Returns: True if we have a good MII
+ */
+static int
+init_mii(struct net_device *dev)
+{
+ local_info_t *local = dev->priv;
+ ioaddr_t ioaddr = dev->base_addr;
+ unsigned control, status, linkpartner;
+ int i;
+
+ if (if_port == 4 || if_port == 1) { /* force 100BaseT or 10BaseT */
+ dev->if_port = if_port;
+ local->probe_port = 0;
+ return 1;
+ }
+
+ status = mii_rd(ioaddr, 0, 1);
+ if ((status & 0xff00) != 0x7800)
+ return 0; /* No MII */
+
+ local->new_mii = (mii_rd(ioaddr, 0, 2) != 0xffff);
+
+ if (local->probe_port)
+ control = 0x1000; /* auto neg */
+ else if (dev->if_port == 4)
+ control = 0x2000; /* no auto neg, 100mbs mode */
+ else
+ control = 0x0000; /* no auto neg, 10mbs mode */
+ mii_wr(ioaddr, 0, 0, control, 16);
+ udelay(100);
+ control = mii_rd(ioaddr, 0, 0);
+
+ if (control & 0x0400) {
+ printk(KERN_NOTICE "%s can't take PHY out of isolation mode\n",
+ dev->name);
+ local->probe_port = 0;
+ return 0;
+ }
+
+ if (local->probe_port) {
+ /* according to the DP83840A specs the auto negotiation process
+ * may take up to 3.5 sec, so we use this also for our ML6692
+ * Fixme: Better to use a timer here!
+ */
+ for (i=0; i < 35; i++) {
+ busy_loop(HZ/10); /* wait 100 msec */
+ status = mii_rd(ioaddr, 0, 1);
+ if ((status & 0x0020) && (status & 0x0004))
+ break;
+ }
+
+ if (!(status & 0x0020)) {
+ printk(KERN_INFO "%s: autonegotiation failed;"
+ " using 10mbs\n", dev->name);
+ if (!local->new_mii) {
+ control = 0x0000;
+ mii_wr(ioaddr, 0, 0, control, 16);
+ udelay(100);
+ SelectPage(0);
+ dev->if_port = (GetByte(XIRCREG_ESR) & MediaSelect) ? 1 : 2;
+ }
+ } else {
+ linkpartner = mii_rd(ioaddr, 0, 5);
+ printk(KERN_INFO "%s: MII link partner: %04x\n",
+ dev->name, linkpartner);
+ if (linkpartner & 0x0080) {
+ dev->if_port = 4;
+ } else
+ dev->if_port = 1;
+ }
+ }
+
+ return 1;
+}
+
+static void
+do_powerdown(struct net_device *dev)
+{
+
+ ioaddr_t ioaddr = dev->base_addr;
+
+ DEBUG(0, "do_powerdown(%p)\n", dev);
+
+ SelectPage(4);
+ PutByte(XIRCREG4_GPR1, 0); /* clear bit 0: power down */
+ SelectPage(0);
+}
+
+static int
+do_stop(struct net_device *dev)
+{
+ ioaddr_t ioaddr = dev->base_addr;
+ local_info_t *lp = dev->priv;
+ dev_link_t *link = &lp->link;
+
+ DEBUG(0, "do_stop(%p)\n", dev);
+
+ if (!link)
+ return -ENODEV;
+
+ netif_stop_queue(dev);
+ netif_mark_down(dev);
+
+ SelectPage(0);
+ PutByte(XIRCREG_CR, 0); /* disable interrupts */
+ SelectPage(0x01);
+ PutByte(XIRCREG1_IMR0, 0x00); /* forbid all ints */
+ SelectPage(4);
+ PutByte(XIRCREG4_GPR1, 0); /* clear bit 0: power down */
+ SelectPage(0);
+
+ link->open--;
+ if (link->state & DEV_STALE_CONFIG)
+ mod_timer(&link->release, jiffies + HZ/20);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static int __init
+init_xirc2ps_cs(void)
+{
+ servinfo_t serv;
+
+ printk(KERN_INFO "%s\n", version);
+ if (lockup_hack)
+ printk(KINF_XIRC "lockup hack is enabled\n");
+ CardServices(GetCardServicesInfo, &serv);
+ if (serv.Revision != CS_RELEASE_CODE) {
+ printk(KNOT_XIRC "Card Services release does not match!\n");
+ return -EINVAL;
+ }
+ DEBUG(0, "pc_debug=%d\n", pc_debug);
+ register_pccard_driver(&dev_info, &xirc2ps_attach, &xirc2ps_detach);
+ return 0;
+}
+
+static void __exit
+exit_xirc2ps_cs(void)
+{
+ DEBUG(0, "unloading\n");
+ unregister_pccard_driver(&dev_info);
+ while (dev_list) {
+ if (dev_list->state & DEV_CONFIG)
+ xirc2ps_release((u_long)dev_list);
+ if (dev_list) /* xirc2ps_release() might already have detached... */
+ xirc2ps_detach(dev_list);
+ }
+}
+
+module_init(init_xirc2ps_cs);
+module_exit(exit_xirc2ps_cs);
+
diff --git a/linux/pcmcia-cs/glue/ds.c b/linux/pcmcia-cs/glue/ds.c
new file mode 100644
index 0000000..cc4b92b
--- /dev/null
+++ b/linux/pcmcia-cs/glue/ds.c
@@ -0,0 +1,454 @@
+/*
+ * pcmcia-socket `device' driver
+ *
+ * Copyright (C) 2006, 2007 Free Software Foundation, Inc.
+ * Written by Stefan Siegl <stesie@brokenpipe.de>.
+ *
+ * This file is part of GNU Mach.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* This file is included from linux/pcmcia-cs/modules/ds.c. */
+
+/*
+ * This is really ugly. But this is glue code, so... It's about the `kfree'
+ * symbols in <linux/malloc.h> and <kern/kalloc.h>.
+ */
+#undef kfree
+
+/*
+ * <kern/sched_prim.h> defines another event_t which is not used in this
+ * file, so name it mach_event_t to avoid a clash.
+ */
+#define event_t mach_event_t
+#include <kern/sched_prim.h>
+#undef event_t
+
+#include <mach/port.h>
+#include <mach/notify.h>
+#include <mach/mig_errors.h>
+
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_space.h>
+
+#include <device/device_types.h>
+#include <device/device_port.h>
+#include <device/io_req.h>
+#include <device/ds_routines.h>
+
+#include <device/device_emul.h>
+
+#include <device/device_reply.user.h>
+
+/* Eliminate the queue_empty macro from Mach header files. */
+#undef queue_empty
+
+struct device_emulation_ops linux_pcmcia_emulation_ops;
+
+/*
+ * We have our very own device emulation stack because we need to carry a
+ * pointer from the open call via read until the final close call: a
+ * pointer to the user's event queue.
+ */
+struct mach_socket_device {
+ /*
+ * Pointer to the mach_device we have allocated. This must be the
+ * first entry in this struct, in order to be able to cast to
+ * mach_device.
+ */
+ struct mach_device mach_dev;
+
+ /*
+ * Pointer to the user info of pcmcia data services.
+ */
+ user_info_t *user;
+
+ /*
+ * Cache for carrying data from set_status to get_status calls. This
+ * is needed for write ioctls.
+ */
+ ds_ioctl_arg_t carry;
+};
+
+
+static void
+ds_device_deallocate(void *p)
+{
+ mach_device_t device = (mach_device_t) p;
+
+ simple_lock(&device->ref_lock);
+ if (--device->ref_count > 0)
+ {
+ simple_unlock(&device->ref_lock);
+ return;
+ }
+
+ simple_unlock(&device->ref_lock);
+
+ /*
+ * do what the original ds_release would do, ...
+ */
+ socket_t i = device->dev_number;
+ socket_info_t *s;
+ user_info_t *user, **link;
+
+ s = &socket_table[i];
+ user = ((struct mach_socket_device *) device)->user;
+
+ /* allow to access the device again ... */
+ if(device->flag & D_WRITE)
+ s->state &= ~SOCKET_BUSY;
+
+ /* Unlink user data structure */
+ for (link = &s->user; *link; link = &(*link)->next)
+ if (*link == user) break;
+
+ if(link)
+ {
+ *link = user->next;
+ user->user_magic = 0;
+ linux_kfree(user);
+ }
+
+ /* now finally reap the device */
+ linux_kfree(device);
+}
+
+/*
+ * Return the send right associated with this socket device incarnation.
+ */
+static ipc_port_t
+dev_to_port(void *d)
+{
+ struct mach_device *dev = d;
+
+ if(! dev)
+ return IP_NULL;
+
+ ipc_port_t port = ipc_port_make_send(dev->port);
+
+ ds_device_deallocate(dev);
+ return port;
+}
+
+
+static inline int
+atoi(const char *ptr)
+{
+ if(! ptr)
+ return 0;
+
+ int i = 0;
+ while(*ptr >= '0' && *ptr <= '9')
+ i = i * 10 + *(ptr ++) - '0';
+
+ return i;
+}
+
+
+/*
+ * Try to open the per-socket pseudo device `socket%d'.
+ */
+static io_return_t
+device_open (ipc_port_t reply_port, mach_msg_type_name_t reply_port_type,
+ dev_mode_t mode, char *name, device_t *devp /* out */)
+{
+ if(! socket_table)
+ return D_NO_SUCH_DEVICE;
+
+ if(strlen(name) < 7 || strncmp(name, "socket", 6))
+ return D_NO_SUCH_DEVICE;
+
+ socket_t i = atoi(name + 6);
+ if(i >= MAX_SOCKS || i >= sockets)
+ return D_NO_SUCH_DEVICE;
+
+ io_return_t err = D_SUCCESS;
+
+ struct mach_device *dev;
+ dev = linux_kmalloc(sizeof(struct mach_socket_device), GFP_KERNEL);
+ if(! dev)
+ {
+ err = D_NO_MEMORY;
+ goto out;
+ }
+
+ memset(dev, 0, sizeof(struct mach_socket_device));
+ mach_device_reference(dev);
+
+ /* now do, what ds_open would do if it would be in charge */
+ socket_info_t *s = &socket_table[i];
+
+ if(mode & D_WRITE)
+ {
+ if(s->state & SOCKET_BUSY)
+ {
+ err = D_ALREADY_OPEN;
+ goto out;
+ }
+ else
+ s->state |= SOCKET_BUSY;
+ }
+
+ user_info_t *user = linux_kmalloc(sizeof(user_info_t), GFP_KERNEL);
+ if(! user)
+ {
+ err = D_NO_MEMORY;
+ goto out;
+ }
+
+ user->event_tail = user->event_head = 0;
+ user->next = s->user;
+ user->user_magic = USER_MAGIC;
+ s->user = user;
+
+ ((struct mach_socket_device *) dev)->user = user;
+
+ if(s->state & SOCKET_PRESENT)
+ queue_event(user, CS_EVENT_CARD_INSERTION);
+
+ /* just set up the rest of our mach_device now ... */
+ dev->dev.emul_ops = &linux_pcmcia_emulation_ops;
+ dev->dev.emul_data = dev;
+
+ dev->dev_number = i;
+ dev->flag = mode;
+
+ dev->port = ipc_port_alloc_kernel();
+ if(dev->port == IP_NULL)
+ {
+ err = KERN_RESOURCE_SHORTAGE;
+ goto out;
+ }
+
+ mach_device_reference(dev);
+ ipc_kobject_set(dev->port, (ipc_kobject_t) &dev->dev, IKOT_DEVICE);
+
+ /* request no-senders notifications on device port */
+ ipc_port_t notify = ipc_port_make_sonce(dev->port);
+ ip_lock(dev->port);
+ ipc_port_nsrequest(dev->port, 1, notify, &notify);
+ assert (notify == IP_NULL);
+
+ out:
+ if(err)
+ {
+ if(dev)
+ {
+ if(dev->port != IP_NULL)
+ {
+ ipc_kobject_set(dev->port, IKO_NULL, IKOT_NONE);
+ ipc_port_dealloc_kernel(dev->port);
+ }
+
+ linux_kfree(dev);
+ dev = NULL;
+ }
+ }
+ else
+ dev->state = DEV_STATE_OPEN;
+
+ *devp = &dev->dev;
+
+ if (IP_VALID (reply_port))
+ ds_device_open_reply(reply_port, reply_port_type,
+ err, dev_to_port(dev));
+ return MIG_NO_REPLY;
+}
+
+
+/*
+ * Close the device DEV.
+ */
+static int
+device_close (void *devp)
+{
+ struct mach_device *dev = (struct mach_device *) devp;
+
+ dev->state = DEV_STATE_CLOSING;
+
+ /* check whether there is a blocked read request pending,
+ * in that case, abort that one before closing
+ */
+ while(dev->ref_count > 2)
+ {
+ socket_t i = dev->dev_number;
+ socket_info_t *s = &socket_table[i];
+ wake_up_interruptible(&s->queue);
+
+ /* wait for device_read to exit */
+ return D_INVALID_OPERATION;
+ }
+
+ dev_port_remove(dev);
+ ipc_port_dealloc_kernel(dev->port);
+
+ return 0;
+}
+
+static io_return_t
+device_read(void *d, ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type, dev_mode_t mode,
+ recnum_t recnum, int bytes_wanted,
+ io_buf_ptr_t *data, unsigned int *data_count)
+{
+ struct mach_device *dev = (struct mach_device *) d;
+
+ if(dev->state != DEV_STATE_OPEN)
+ return D_NO_SUCH_DEVICE;
+
+ if(! IP_VALID(reply_port)) {
+ printk(KERN_INFO "ds: device_read: invalid reply port.\n");
+ return (MIG_NO_REPLY); /* no sense in doing anything */
+ }
+
+ /* prepare an io request structure */
+ io_req_t ior;
+ io_req_alloc(ior, 0);
+
+ ior->io_device = dev;
+ ior->io_unit = dev->dev_number;
+ ior->io_op = IO_READ | IO_CALL;
+ ior->io_mode = mode;
+ ior->io_recnum = recnum;
+ ior->io_data = 0;
+ ior->io_count = bytes_wanted;
+ ior->io_alloc_size = 0;
+ ior->io_residual = 0;
+ ior->io_error = 0;
+ ior->io_done = ds_read_done;
+ ior->io_reply_port = reply_port;
+ ior->io_reply_port_type = reply_port_type;
+
+ /*
+ * The ior keeps an extra reference for the device.
+ */
+ mach_device_reference(dev);
+
+ /* do the read finally */
+ io_return_t result = D_SUCCESS;
+
+ result = device_read_alloc(ior, ior->io_count);
+ if(result != KERN_SUCCESS)
+ goto out;
+
+ socket_t i = dev->dev_number;
+ socket_info_t *s = &socket_table[i];
+ user_info_t *user = ((struct mach_socket_device *) dev)->user;
+
+ if(ior->io_count < 4)
+ return D_INVALID_SIZE;
+
+ if(CHECK_USER(user))
+ {
+ result = D_IO_ERROR;
+ goto out;
+ }
+
+ while(queue_empty(user))
+ {
+ if(ior->io_mode & D_NOWAIT)
+ {
+ result = D_WOULD_BLOCK;
+ goto out;
+ }
+ else
+ interruptible_sleep_on(&s->queue);
+
+ if(dev->state == DEV_STATE_CLOSING)
+ {
+ result = D_DEVICE_DOWN;
+ goto out;
+ }
+ }
+
+ event_t ev = get_queued_event(user);
+ memcpy(ior->io_data, &ev, sizeof(event_t));
+
+ ior->io_residual = ior->io_count - sizeof(event_t);
+
+ out:
+ /*
+ * Return result via ds_read_done.
+ */
+ ior->io_error = result;
+ (void) ds_read_done(ior);
+ io_req_free(ior);
+
+ return (MIG_NO_REPLY); /* reply has already been sent. */
+}
+
+
+static io_return_t
+device_set_status(void *d, dev_flavor_t req, dev_status_t arg,
+ mach_msg_type_number_t sz)
+{
+ struct mach_socket_device *dev = (struct mach_socket_device *) d;
+
+ if(sz * sizeof(int) > sizeof(ds_ioctl_arg_t))
+ return D_INVALID_OPERATION;
+
+ if(dev->mach_dev.state != DEV_STATE_OPEN)
+ return D_NO_SUCH_DEVICE;
+
+ unsigned int ioctl_sz = (req & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
+ memcpy(&dev->carry, arg, ioctl_sz);
+
+ return D_SUCCESS;
+}
+
+static io_return_t
+device_get_status(void *d, dev_flavor_t req, dev_status_t arg,
+ mach_msg_type_number_t *sz)
+{
+ struct mach_socket_device *dev = (struct mach_socket_device *) d;
+
+ if(dev->mach_dev.state != DEV_STATE_OPEN)
+ return D_NO_SUCH_DEVICE;
+
+ struct inode inode;
+ inode.i_rdev = dev->mach_dev.dev_number;
+ int ret = ds_ioctl(&inode, NULL, req, (u_long) &dev->carry);
+
+ if(ret)
+ return D_IO_ERROR;
+
+ unsigned int ioctl_sz = (req & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
+ if(req & IOC_OUT) memcpy(arg, &dev->carry, ioctl_sz);
+
+ return D_SUCCESS;
+}
+
+
+struct device_emulation_ops linux_pcmcia_emulation_ops =
+ {
+ (void*) mach_device_reference,
+ ds_device_deallocate,
+ dev_to_port,
+ device_open,
+ device_close,
+ NULL, /* device_write */
+ NULL, /* write_inband */
+ device_read,
+ NULL, /* read_inband */
+ device_set_status,
+ device_get_status,
+ NULL, /* set_filter */
+ NULL, /* map */
+ NULL, /* no_senders */
+ NULL, /* write_trap */
+ NULL /* writev_trap */
+ };
diff --git a/linux/pcmcia-cs/glue/pcmcia.c b/linux/pcmcia-cs/glue/pcmcia.c
new file mode 100644
index 0000000..3beebe3
--- /dev/null
+++ b/linux/pcmcia-cs/glue/pcmcia.c
@@ -0,0 +1,121 @@
+/*
+ * pcmcia bridge initialization
+ *
+ * Copyright (C) 2006 Free Software Foundation, Inc.
+ * Written by Stefan Siegl <stesie@brokenpipe.de>.
+ *
+ * This file is part of GNU Mach.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/proc_fs.h>
+#include <linux/pci.h>
+
+#include <asm/spinlock.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/ss.h>
+#include <pcmcia/cs.h>
+
+extern int init_pcmcia_cs(void);
+extern int init_i82365(void);
+extern int init_pcmcia_ds(void);
+
+extern int pcmcia_modinit_pcnet_cs(void);
+extern int pcmcia_modinit_3c589_cs(void);
+extern int pcmcia_modinit_3c574_cs(void);
+extern int pcmcia_modinit_3c575_cb(void);
+extern int pcmcia_modinit_axnet_cs(void);
+extern int pcmcia_modinit_eepro100_cb(void);
+extern int pcmcia_modinit_epic_cb(void);
+extern int pcmcia_modinit_fmvj18x_cs(void);
+extern int pcmcia_modinit_nmclan_cs(void);
+extern int pcmcia_modinit_smc91c92_cs(void);
+extern int pcmcia_modinit_tulip_cb(void);
+extern int pcmcia_modinit_xirc2ps_cs(void);
+
+extern int pcmcia_modinit_orinoco_cs(void);
+
+/*
+ * pcmcia bridge initialisation.
+ */
+void
+pcmcia_init(void)
+{
+ init_pcmcia_cs();
+
+#ifdef CONFIG_I82365
+ init_i82365();
+#endif
+
+ init_pcmcia_ds();
+
+ /*
+ * Call te initialization routines of each driver.
+ */
+#ifdef CONFIG_PCNET_CS
+ pcmcia_modinit_pcnet_cs();
+#endif
+
+#ifdef CONFIG_3C589_CS
+ pcmcia_modinit_3c589_cs();
+#endif
+
+#ifdef CONFIG_3C574_CS
+ pcmcia_modinit_3c574_cs();
+#endif
+
+#ifdef CONFIG_3C575_CB
+ pcmcia_modinit_3c575_cb();
+#endif
+
+#ifdef CONFIG_AXNET_CS
+ pcmcia_modinit_axnet_cs();
+#endif
+
+#ifdef CONFIG_EEPRO100_CB
+ pcmcia_modinit_eepro100_cb();
+#endif
+
+#ifdef CONFIG_EPIC_CB
+ pcmcia_modinit_epic_cb();
+#endif
+
+#ifdef CONFIG_FMVJ18X_CS
+ pcmcia_modinit_fmvj18x_cs();
+#endif
+
+#ifdef CONFIG_NMCLAN_CS
+ pcmcia_modinit_nmclan_cs();
+#endif
+
+#ifdef CONFIG_SMC91C92_CS
+ pcmcia_modinit_smc91c92_cs();
+#endif
+
+#ifdef CONFIG_TULIP_CB
+ pcmcia_modinit_tulip_cb();
+#endif
+
+#ifdef CONFIG_XIRC2PS_CS
+ pcmcia_modinit_xirc2ps_cs();
+#endif
+
+#ifdef CONFIG_ORINOCO_CS
+ pcmcia_modinit_orinoco_cs();
+#endif
+}
diff --git a/linux/pcmcia-cs/glue/pcmcia_glue.h b/linux/pcmcia-cs/glue/pcmcia_glue.h
new file mode 100644
index 0000000..691c1b9
--- /dev/null
+++ b/linux/pcmcia-cs/glue/pcmcia_glue.h
@@ -0,0 +1,264 @@
+/*
+ * pcmcia card services glue code
+ *
+ * Copyright (C) 2006 Free Software Foundation, Inc.
+ * Written by Stefan Siegl <stesie@brokenpipe.de>.
+ *
+ * This file is part of GNU Mach.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _PCMCIA_GLUE_H
+#define _PCMCIA_GLUE_H
+
+/*
+ * pcmcia glue configuration
+ */
+#define PCMCIA_DEBUG 4
+/* Maximum number of sockets supported by the glue code. */
+#define MAX_SOCKS 8
+
+
+/*
+ * Linux kernel version handling.
+ */
+#include <linux/version.h>
+#define UTS_VERSION "" /* Hm. */
+#define KERNEL_VERSION(v,p,s) (((v)<<16)+(p<<8)+s)
+
+
+/*
+ * Some cardbus drivers want `CARDBUS' to be defined.
+ */
+#ifdef CONFIG_CARDBUS
+#define CARDBUS 1
+#endif
+
+
+/*
+ * Some includes.
+ */
+#include <linux/malloc.h>
+#include <pcmcia/driver_ops.h>
+
+
+/*
+ * ioremap and iounmap
+ */
+#include <linux/pci.h>
+#include <linux/compatmac.h>
+#define iounmap(x) (((long)x<0x100000)?0:vfree ((void*)x))
+
+
+/*
+ * These are implemented in rsrc_mgr.c.
+ */
+extern int check_mem_region(u_long base, u_long num);
+extern void request_mem_region(u_long base, u_long num, char *name);
+extern void release_mem_region(u_long base, u_long num);
+
+
+/*
+ * Timer and delaying functions.
+ */
+#include <linux/delay.h>
+#define mod_timer(a, b) \
+ do { del_timer(a); (a)->expires = (b); add_timer(a); } while (0)
+#define mdelay(x) \
+ do { int i; for (i=0;i<x;i++) __udelay(1000); } while (0)
+
+
+/*
+ * GNU Mach's Linux glue code doesn't have
+ * `interruptible_sleep_on_timeout'. For the moment let's use the
+ * non-timeout variant. :-/
+ */
+#define interruptible_sleep_on_timeout(w,t) \
+ interruptible_sleep_on(w)
+
+/*
+ * The macro implementation relies on current_set symbol, which doesn't
+ * appear to be available on GNU Mach. TODO: How to fix this properly?
+ */
+#undef signal_pending
+#define signal_pending(c) \
+ 0
+
+
+/*
+ * Byte order stuff. TODO: This does not work on big endian systems,
+ * does it? Move to asm-i386?
+ */
+#include <asm/byteorder.h>
+#ifndef le16_to_cpu
+#define le16_to_cpu(x) (x)
+#define le32_to_cpu(x) (x)
+#endif
+#ifndef cpu_to_le16
+#define cpu_to_le16(val) (val)
+#define cpu_to_le32(val) (val)
+#endif
+
+
+/*
+ * There is no `wake_up_interruptible' on GNU Mach. Use plain `wake_up'
+ * for the moment. TODO.
+ */
+#define wake_up_interruptible wake_up
+
+
+/* Eliminate the 4-arg versions from <linux/compatmac.h>. */
+#undef pci_read_config_word
+#undef pci_read_config_dword
+
+#define bus_number(pci_dev) ((pci_dev)->bus->number)
+#define devfn_number(pci_dev) ((pci_dev)->devfn)
+
+#define pci_read_config_byte(pdev, where, valp) \
+ pcibios_read_config_byte(bus_number(pdev), devfn_number(pdev), where, valp)
+#define pci_read_config_word(pdev, where, valp) \
+ pcibios_read_config_word(bus_number(pdev), devfn_number(pdev), where, valp)
+#define pci_read_config_dword(pdev, where, valp) \
+ pcibios_read_config_dword(bus_number(pdev), devfn_number(pdev), where, valp)
+#define pci_write_config_byte(pdev, where, val) \
+ pcibios_write_config_byte(bus_number(pdev), devfn_number(pdev), where, val)
+#define pci_write_config_word(pdev, where, val) \
+ pcibios_write_config_word(bus_number(pdev), devfn_number(pdev), where, val)
+#define pci_write_config_dword(pdev, where, val) \
+ pcibios_write_config_dword(bus_number(pdev), devfn_number(pdev), where, val)
+
+
+/*
+ * From pcmcia-cs/include/linux/pci.h.
+ */
+#define pci_for_each_dev(p) for (p = pci_devices; p; p = p->next)
+
+
+
+/*
+ * These are defined in pci_fixup.c.
+ */
+extern struct pci_dev *pci_find_slot(u_int bus, u_int devfn);
+extern struct pci_dev *pci_find_class(u_int class, struct pci_dev *from);
+extern int pci_set_power_state(struct pci_dev *dev, int state);
+extern int pci_enable_device(struct pci_dev *dev);
+
+extern u32 pci_irq_mask;
+
+
+#ifdef PCMCIA_CLIENT
+/*
+ * Worse enough, we need to have `mach_device' as well (at least in ds.c)
+ * and this one is typedef'd to `device', therefore we cannot just
+ * include `netdevice.h' when we're compiling the core.
+ *
+ * For compilation of the clients `PCMCIA_CLIENT' is defined through the
+ * Makefile.
+ */
+#include <linux/netdevice.h>
+#include <linux/kcomp.h>
+
+
+/*
+ * init_dev_name and copy_dev_name glue (for `PCMCIA_CLIENT's only).
+ */
+static inline void
+init_dev_name(struct net_device *dev, dev_node_t node)
+{
+ /* just allocate some space for the device name,
+ * register_netdev will happily provide one to us
+ */
+ dev->name = kmalloc(8, GFP_KERNEL);
+ dev->name[0] = 0;
+
+ /*
+ * dev->init needs to be initialized in order for register_netdev to work
+ */
+ int stub(struct device *dev)
+ {
+ (void) dev;
+ return 0;
+ }
+ dev->init = stub;
+}
+
+#define copy_dev_name(node, dev) do { } while (0)
+#endif /* PCMCIA_CLIENT */
+
+
+/*
+ * Some network interface glue, additional to the one from
+ * <linux/kcomp.h>.
+ */
+#define netif_mark_up(dev) do { (dev)->start = 1; } while (0)
+#define netif_mark_down(dev) do { (dev)->start = 0; } while (0)
+#define netif_carrier_on(dev) do { dev->flags |= IFF_RUNNING; } while (0)
+#define netif_carrier_off(dev) do { dev->flags &= ~IFF_RUNNING; } while (0)
+#define tx_timeout_check(dev, tx_timeout) \
+ do { if (test_and_set_bit(0, (void *)&(dev)->tbusy) != 0) { \
+ if (jiffies - (dev)->trans_start < TX_TIMEOUT) return 1; \
+ tx_timeout(dev); \
+ } } while (0)
+
+
+/*
+ * Some `struct netdevice' interface glue (from the pcmcia-cs package).
+ */
+#define skb_tx_check(dev, skb) \
+ do { if (skb == NULL) { dev_tint(dev); return 0; } \
+ if (skb->len <= 0) return 0; } while (0)
+#define tx_timeout_check(dev, tx_timeout) \
+ do { if (test_and_set_bit(0, (void *)&(dev)->tbusy) != 0) { \
+ if (jiffies - (dev)->trans_start < TX_TIMEOUT) return 1; \
+ tx_timeout(dev); \
+ } } while (0)
+#define DEV_KFREE_SKB(skb) dev_kfree_skb(skb, FREE_WRITE)
+#define net_device_stats enet_statistics
+#define add_rx_bytes(stats, n) do { int x; x = (n); } while (0)
+#define add_tx_bytes(stats, n) do { int x; x = (n); } while (0)
+
+
+
+/*
+ * TODO: This is i386 dependent.
+ */
+#define readw_ns(p) readw(p)
+#define writew_ns(v,p) writew(v,p)
+
+
+
+
+/*
+ * We compile everything directly into the GNU Mach kernel, there are no
+ * modules.
+ */
+#define MODULE_PARM(a,b)
+#define MODULE_AUTHOR(a)
+#define MODULE_DESCRIPTION(a)
+#define MODULE_LICENSE(a)
+
+#define module_init(a) \
+ void pcmcia_mod ## a (void) { a(); return; }
+#define module_exit(a)
+
+/*
+ * TODO: We don't have `disable_irq_nosync', do we need it? This is used
+ * by the axnet_cs client driver only.
+ */
+#define disable_irq_nosync(irq) disable_irq(irq)
+
+
+#endif /* _PCMCIA_GLUE_H */
diff --git a/linux/pcmcia-cs/glue/wireless_glue.h b/linux/pcmcia-cs/glue/wireless_glue.h
new file mode 100644
index 0000000..61006b4
--- /dev/null
+++ b/linux/pcmcia-cs/glue/wireless_glue.h
@@ -0,0 +1,158 @@
+/*
+ * wireless network glue code
+ *
+ * Copyright (C) 2006 Free Software Foundation, Inc.
+ * Written by Stefan Siegl <stesie@brokenpipe.de>.
+ *
+ * This file is part of GNU Mach.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _WIRELESS_GLUE_H
+#define _WIRELESS_GLUE_H
+
+/*
+ * Wireless glue configuration.
+ */
+
+/*
+ * Include the pcmcia glue as well, in case the kernel is configured for
+ * it.
+ */
+#ifdef CONFIG_PCMCIA
+#define PCMCIA_CLIENT
+#include "pcmcia_glue.h"
+#endif
+
+
+/*
+ * Definition of a `BUG' function: write message and panic out.
+ */
+#ifndef BUG
+#define BUG() \
+ do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
+ *(int *)0=0; } while (0)
+#endif
+
+
+#include <kern/debug.h>
+
+/*
+ * We need some `schedule_task' replacement. This is defined in
+ * kernel/context.c in the Linux kernel.
+ */
+static inline int
+schedule_task(struct tq_struct *task)
+{
+ printk(KERN_INFO "schedule_task: not implemented, task=%p\n", task);
+ Debugger("schedule_task");
+ return 0; /* fail */
+}
+
+
+/*
+ * min() and max() macros that also do strict type-checking. See the
+ * "unnecessary" pointer comparison.
+ */
+#define min(x,y) ({ \
+ const typeof(x) _x = (x); \
+ const typeof(y) _y = (y); \
+ (void) (&_x == &_y); \
+ _x < _y ? _x : _y; })
+
+#define max(x,y) ({ \
+ const typeof(x) _x = (x); \
+ const typeof(y) _y = (y); \
+ (void) (&_x == &_y); \
+ _x > _y ? _x : _y; })
+
+/*
+ * ... and if you can't take the strict types, you can specify one
+ * yourself.
+ */
+#define min_t(type,x,y) \
+ ({ type __x = (x); type __y = (y); __x < __y ? __x: __y; })
+#define max_t(type,x,y) \
+ ({ type __x = (x); type __y = (y); __x > __y ? __x: __y; })
+
+
+#define DEV_KFREE_SKB(skb) dev_kfree_skb(skb, FREE_WRITE)
+
+
+/*
+ * TODO: this is i386 specific.
+ */
+#define le16_to_cpus(x) do { } while(0)
+
+
+/*
+ * Some wireless drivers check for a return value from `copy_to_user',
+ * however the `memcpy_tofs' implementation does return void.
+ */
+#undef copy_to_user
+#define copy_to_user(a,b,c) ((memcpy_tofs(a,b,c)), 0)
+
+
+/*
+ * Some more macros that are available on 2.2 and 2.4 Linux kernels.
+ */
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+
+/*
+ * TQUEUE glue.
+ */
+#define PREPARE_TQUEUE(_tq, _routine, _data) \
+ do { \
+ (_tq)->routine = _routine; \
+ (_tq)->data = _data; \
+ } while (0)
+#define INIT_TQUEUE(_tq, _routine, _data) \
+ do { \
+ (_tq)->next = 0; \
+ (_tq)->sync = 0; \
+ PREPARE_TQUEUE((_tq), (_routine), (_data)); \
+ } while (0)
+
+
+/*
+ * `etherdev' allocator.
+ */
+static inline struct net_device *
+alloc_etherdev(int sz)
+{
+ struct net_device *dev;
+ sz += sizeof(*dev) + 31;
+
+ if (!(dev = kmalloc(sz, GFP_KERNEL)))
+ return NULL;
+ memset(dev, 0, sz);
+
+ if (sz)
+ dev->priv = (void *)(((long)dev + sizeof(*dev) + 31) & ~31);
+
+ /* just allocate some space for the device name,
+ * register_netdev will happily provide one to us
+ */
+ dev->name = kmalloc(8, GFP_KERNEL);
+ dev->name[0] = 0;
+
+ ether_setup(dev);
+ return dev;
+}
+
+
+#endif /* _WIRELESS_GLUE_H */
diff --git a/linux/pcmcia-cs/include/linux/crc32.h b/linux/pcmcia-cs/include/linux/crc32.h
new file mode 100644
index 0000000..008a2da
--- /dev/null
+++ b/linux/pcmcia-cs/include/linux/crc32.h
@@ -0,0 +1,49 @@
+#ifndef _COMPAT_CRC32_H
+#define _COMPAT_CRC32_H
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18))
+
+#include_next <linux/crc32.h>
+
+#else
+
+static inline u_int ether_crc(int length, u_char *data)
+{
+ static const u_int ethernet_polynomial = 0x04c11db7U;
+ int crc = 0xffffffff; /* Initial value. */
+
+ while (--length >= 0) {
+ u_char current_octet = *data++;
+ int bit;
+ for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
+ crc = (crc << 1) ^
+ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
+ }
+ }
+ return crc;
+}
+
+static inline unsigned ether_crc_le(int length, unsigned char *data)
+{
+ static unsigned const ethernet_polynomial_le = 0xedb88320U;
+ unsigned int crc = 0xffffffff; /* Initial value. */
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 8; --bit >= 0; current_octet >>= 1) {
+ if ((crc ^ current_octet) & 1) {
+ crc >>= 1;
+ crc ^= ethernet_polynomial_le;
+ } else
+ crc >>= 1;
+ }
+ }
+ return crc;
+}
+
+#endif
+
+#endif /* _COMPAT_CRC32_H */
+
diff --git a/linux/pcmcia-cs/include/linux/slab.h b/linux/pcmcia-cs/include/linux/slab.h
new file mode 100644
index 0000000..634084d
--- /dev/null
+++ b/linux/pcmcia-cs/include/linux/slab.h
@@ -0,0 +1,12 @@
+#ifndef _COMPAT_SLAB_H
+#define _COMPAT_SLAB_H
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,2,0))
+#include <linux/malloc.h>
+#else
+#include_next <linux/slab.h>
+#endif
+
+#endif /* _COMPAT_SLAB_H */
diff --git a/linux/pcmcia-cs/include/pcmcia/bulkmem.h b/linux/pcmcia-cs/include/pcmcia/bulkmem.h
new file mode 100644
index 0000000..7748d44
--- /dev/null
+++ b/linux/pcmcia-cs/include/pcmcia/bulkmem.h
@@ -0,0 +1,195 @@
+/*
+ * Definitions for bulk memory services
+ *
+ * bulkmem.h 1.13 2001/08/24 12:16:12
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ * bulkmem.h 1.3 1995/05/27 04:49:49
+ */
+
+#ifndef _LINUX_BULKMEM_H
+#define _LINUX_BULKMEM_H
+
+/* For GetFirstRegion and GetNextRegion */
+typedef struct region_info_t {
+ u_int Attributes;
+ u_int CardOffset;
+ u_int RegionSize;
+ u_int AccessSpeed;
+ u_int BlockSize;
+ u_int PartMultiple;
+ u_char JedecMfr, JedecInfo;
+ memory_handle_t next;
+} region_info_t;
+
+#define REGION_TYPE 0x0001
+#define REGION_TYPE_CM 0x0000
+#define REGION_TYPE_AM 0x0001
+#define REGION_PREFETCH 0x0008
+#define REGION_CACHEABLE 0x0010
+#define REGION_BAR_MASK 0xe000
+#define REGION_BAR_SHIFT 13
+
+/* For OpenMemory */
+typedef struct open_mem_t {
+ u_int Attributes;
+ u_int Offset;
+} open_mem_t;
+
+/* Attributes for OpenMemory */
+#define MEMORY_TYPE 0x0001
+#define MEMORY_TYPE_CM 0x0000
+#define MEMORY_TYPE_AM 0x0001
+#define MEMORY_EXCLUSIVE 0x0002
+#define MEMORY_PREFETCH 0x0008
+#define MEMORY_CACHEABLE 0x0010
+#define MEMORY_BAR_MASK 0xe000
+#define MEMORY_BAR_SHIFT 13
+
+typedef struct eraseq_entry_t {
+ memory_handle_t Handle;
+ u_char State;
+ u_int Size;
+ u_int Offset;
+ void *Optional;
+} eraseq_entry_t;
+
+typedef struct eraseq_hdr_t {
+ int QueueEntryCnt;
+ eraseq_entry_t *QueueEntryArray;
+} eraseq_hdr_t;
+
+#define ERASE_QUEUED 0x00
+#define ERASE_IN_PROGRESS(n) (((n) > 0) && ((n) < 0x80))
+#define ERASE_IDLE 0xff
+#define ERASE_PASSED 0xe0
+#define ERASE_FAILED 0xe1
+
+#define ERASE_MISSING 0x80
+#define ERASE_MEDIA_WRPROT 0x84
+#define ERASE_NOT_ERASABLE 0x85
+#define ERASE_BAD_OFFSET 0xc1
+#define ERASE_BAD_TECH 0xc2
+#define ERASE_BAD_SOCKET 0xc3
+#define ERASE_BAD_VCC 0xc4
+#define ERASE_BAD_VPP 0xc5
+#define ERASE_BAD_SIZE 0xc6
+
+/* For CopyMemory */
+typedef struct copy_op_t {
+ u_int Attributes;
+ u_int SourceOffset;
+ u_int DestOffset;
+ u_int Count;
+} copy_op_t;
+
+/* For ReadMemory and WriteMemory */
+typedef struct mem_op_t {
+ u_int Attributes;
+ u_int Offset;
+ u_int Count;
+} mem_op_t;
+
+#define MEM_OP_BUFFER 0x01
+#define MEM_OP_BUFFER_USER 0x00
+#define MEM_OP_BUFFER_KERNEL 0x01
+#define MEM_OP_DISABLE_ERASE 0x02
+#define MEM_OP_VERIFY 0x04
+
+/* For RegisterMTD */
+typedef struct mtd_reg_t {
+ u_int Attributes;
+ u_int Offset;
+ u_long MediaID;
+} mtd_reg_t;
+
+/*
+ * Definitions for MTD requests
+ */
+
+typedef struct mtd_request_t {
+ u_int SrcCardOffset;
+ u_int DestCardOffset;
+ u_int TransferLength;
+ u_int Function;
+ u_long MediaID;
+ u_int Status;
+ u_int Timeout;
+} mtd_request_t;
+
+/* Fields in MTD Function */
+#define MTD_REQ_ACTION 0x003
+#define MTD_REQ_ERASE 0x000
+#define MTD_REQ_READ 0x001
+#define MTD_REQ_WRITE 0x002
+#define MTD_REQ_COPY 0x003
+#define MTD_REQ_NOERASE 0x004
+#define MTD_REQ_VERIFY 0x008
+#define MTD_REQ_READY 0x010
+#define MTD_REQ_TIMEOUT 0x020
+#define MTD_REQ_LAST 0x040
+#define MTD_REQ_FIRST 0x080
+#define MTD_REQ_KERNEL 0x100
+
+/* Status codes */
+#define MTD_WAITREQ 0x00
+#define MTD_WAITTIMER 0x01
+#define MTD_WAITRDY 0x02
+#define MTD_WAITPOWER 0x03
+
+/*
+ * Definitions for MTD helper functions
+ */
+
+/* For MTDModifyWindow */
+typedef struct mtd_mod_win_t {
+ u_int Attributes;
+ u_int AccessSpeed;
+ u_int CardOffset;
+} mtd_mod_win_t;
+
+/* For MTDSetVpp */
+typedef struct mtd_vpp_req_t {
+ u_char Vpp1, Vpp2;
+} mtd_vpp_req_t;
+
+/* For MTDRDYMask */
+typedef struct mtd_rdy_req_t {
+ u_int Mask;
+} mtd_rdy_req_t;
+
+enum mtd_helper {
+ MTDRequestWindow, MTDModifyWindow, MTDReleaseWindow,
+ MTDSetVpp, MTDRDYMask
+};
+
+#ifdef IN_CARD_SERVICES
+extern int MTDHelperEntry(int func, void *a1, void *a2);
+#else
+extern int MTDHelperEntry(int func, ...);
+#endif
+
+#endif /* _LINUX_BULKMEM_H */
diff --git a/linux/pcmcia-cs/include/pcmcia/bus_ops.h b/linux/pcmcia-cs/include/pcmcia/bus_ops.h
new file mode 100644
index 0000000..d5f362a
--- /dev/null
+++ b/linux/pcmcia-cs/include/pcmcia/bus_ops.h
@@ -0,0 +1,157 @@
+/*
+ * bus_ops.h 1.12 2001/08/24 12:16:12
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_BUS_OPS_H
+#define _LINUX_BUS_OPS_H
+
+#include <linux/config.h>
+
+#ifdef CONFIG_VIRTUAL_BUS
+
+typedef struct bus_operations {
+ void *priv;
+ u32 (*b_in)(void *bus, u32 port, s32 sz);
+ void (*b_ins)(void *bus, u32 port, void *buf,
+ u32 count, s32 sz);
+ void (*b_out)(void *bus, u32 val, u32 port, s32 sz);
+ void (*b_outs)(void *bus, u32 port, void *buf,
+ u32 count, s32 sz);
+ void *(*b_ioremap)(void *bus, u_long ofs, u_long sz);
+ void (*b_iounmap)(void *bus, void *addr);
+ u32 (*b_read)(void *bus, void *addr, s32 sz);
+ void (*b_write)(void *bus, u32 val, void *addr, s32 sz);
+ void (*b_copy_from)(void *bus, void *d, void *s, u32 count);
+ void (*b_copy_to)(void *bus, void *d, void *s, u32 count);
+ int (*b_request_irq)(void *bus, u_int irq,
+ void (*handler)(int, void *,
+ struct pt_regs *),
+ u_long flags, const char *device,
+ void *dev_id);
+ void (*b_free_irq)(void *bus, u_int irq, void *dev_id);
+} bus_operations;
+
+#define bus_inb(b,p) (b)->b_in((b),(p),0)
+#define bus_inw(b,p) (b)->b_in((b),(p),1)
+#define bus_inl(b,p) (b)->b_in((b),(p),2)
+#define bus_inw_ns(b,p) (b)->b_in((b),(p),-1)
+#define bus_inl_ns(b,p) (b)->b_in((b),(p),-2)
+
+#define bus_insb(b,p,a,c) (b)->b_ins((b),(p),(a),(c),0)
+#define bus_insw(b,p,a,c) (b)->b_ins((b),(p),(a),(c),1)
+#define bus_insl(b,p,a,c) (b)->b_ins((b),(p),(a),(c),2)
+#define bus_insw_ns(b,p,a,c) (b)->b_ins((b),(p),(a),(c),-1)
+#define bus_insl_ns(b,p,a,c) (b)->b_ins((b),(p),(a),(c),-2)
+
+#define bus_outb(b,v,p) (b)->b_out((b),(v),(p),0)
+#define bus_outw(b,v,p) (b)->b_out((b),(v),(p),1)
+#define bus_outl(b,v,p) (b)->b_out((b),(v),(p),2)
+#define bus_outw_ns(b,v,p) (b)->b_out((b),(v),(p),-1)
+#define bus_outl_ns(b,v,p) (b)->b_out((b),(v),(p),-2)
+
+#define bus_outsb(b,p,a,c) (b)->b_outs((b),(p),(a),(c),0)
+#define bus_outsw(b,p,a,c) (b)->b_outs((b),(p),(a),(c),1)
+#define bus_outsl(b,p,a,c) (b)->b_outs((b),(p),(a),(c),2)
+#define bus_outsw_ns(b,p,a,c) (b)->b_outs((b),(p),(a),(c),-1)
+#define bus_outsl_ns(b,p,a,c) (b)->b_outs((b),(p),(a),(c),-2)
+
+#define bus_readb(b,a) (b)->b_read((b),(a),0)
+#define bus_readw(b,a) (b)->b_read((b),(a),1)
+#define bus_readl(b,a) (b)->b_read((b),(a),2)
+#define bus_readw_ns(b,a) (b)->b_read((b),(a),-1)
+#define bus_readl_ns(b,a) (b)->b_read((b),(a),-2)
+
+#define bus_writeb(b,v,a) (b)->b_write((b),(v),(a),0)
+#define bus_writew(b,v,a) (b)->b_write((b),(v),(a),1)
+#define bus_writel(b,v,a) (b)->b_write((b),(v),(a),2)
+#define bus_writew_ns(b,v,a) (b)->b_write((b),(v),(a),-1)
+#define bus_writel_ns(b,v,a) (b)->b_write((b),(v),(a),-2)
+
+#define bus_ioremap(b,s,n) (b)->b_ioremap((b),(s),(n))
+#define bus_iounmap(b,a) (b)->b_iounmap((b),(a))
+#define bus_memcpy_fromio(b,d,s,n) (b)->b_copy_from((b),(d),(s),(n))
+#define bus_memcpy_toio(b,d,s,n) (b)->b_copy_to((b),(d),(s),(n))
+
+#define bus_request_irq(b,i,h,f,n,d) \
+ (b)->b_request_irq((b),(i),(h),(f),(n),(d))
+#define bus_free_irq(b,i,d) (b)->b_free_irq((b),(i),(d))
+
+#else
+
+#define bus_inb(b,p) inb(p)
+#define bus_inw(b,p) inw(p)
+#define bus_inl(b,p) inl(p)
+#define bus_inw_ns(b,p) inw_ns(p)
+#define bus_inl_ns(b,p) inl_ns(p)
+
+#define bus_insb(b,p,a,c) insb(p,a,c)
+#define bus_insw(b,p,a,c) insw(p,a,c)
+#define bus_insl(b,p,a,c) insl(p,a,c)
+#define bus_insw_ns(b,p,a,c) insw_ns(p,a,c)
+#define bus_insl_ns(b,p,a,c) insl_ns(p,a,c)
+
+#define bus_outb(b,v,p) outb(b,v,p)
+#define bus_outw(b,v,p) outw(b,v,p)
+#define bus_outl(b,v,p) outl(b,v,p)
+#define bus_outw_ns(b,v,p) outw_ns(b,v,p)
+#define bus_outl_ns(b,v,p) outl_ns(b,v,p)
+
+#define bus_outsb(b,p,a,c) outsb(p,a,c)
+#define bus_outsw(b,p,a,c) outsw(p,a,c)
+#define bus_outsl(b,p,a,c) outsl(p,a,c)
+#define bus_outsw_ns(b,p,a,c) outsw_ns(p,a,c)
+#define bus_outsl_ns(b,p,a,c) outsl_ns(p,a,c)
+
+#define bus_readb(b,a) readb(a)
+#define bus_readw(b,a) readw(a)
+#define bus_readl(b,a) readl(a)
+#define bus_readw_ns(b,a) readw_ns(a)
+#define bus_readl_ns(b,a) readl_ns(a)
+
+#define bus_writeb(b,v,a) writeb(v,a)
+#define bus_writew(b,v,a) writew(v,a)
+#define bus_writel(b,v,a) writel(v,a)
+#define bus_writew_ns(b,v,a) writew_ns(v,a)
+#define bus_writel_ns(b,v,a) writel_ns(v,a)
+
+#define bus_ioremap(b,s,n) ioremap(s,n)
+#define bus_iounmap(b,a) iounmap(a)
+#define bus_memcpy_fromio(b,d,s,n) memcpy_fromio(d,s,n)
+#define bus_memcpy_toio(b,d,s,n) memcpy_toio(d,s,n)
+
+#ifdef CONFIG_8xx
+#define bus_request_irq(b,i,h,f,n,d) request_8xxirq((i),(h),(f),(n),(d))
+#else
+#define bus_request_irq(b,i,h,f,n,d) request_irq((i),(h),(f),(n),(d))
+#endif
+
+#define bus_free_irq(b,i,d) free_irq((i),(d))
+
+#endif /* CONFIG_VIRTUAL_BUS */
+
+#endif /* _LINUX_BUS_OPS_H */
diff --git a/linux/pcmcia-cs/include/pcmcia/ciscode.h b/linux/pcmcia-cs/include/pcmcia/ciscode.h
new file mode 100644
index 0000000..e6bacef
--- /dev/null
+++ b/linux/pcmcia-cs/include/pcmcia/ciscode.h
@@ -0,0 +1,138 @@
+/*
+ * ciscode.h 1.57 2002/11/03 20:38:14
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_CISCODE_H
+#define _LINUX_CISCODE_H
+
+/* Manufacturer and Product ID codes */
+
+#define MANFID_3COM 0x0101
+#define PRODID_3COM_3CXEM556 0x0035
+#define PRODID_3COM_3CCFEM556 0x0556
+#define PRODID_3COM_3C562 0x0562
+
+#define MANFID_ACCTON 0x01bf
+#define PRODID_ACCTON_EN2226 0x010a
+
+#define MANFID_ADAPTEC 0x012f
+#define PRODID_ADAPTEC_SCSI 0x0001
+
+#define MANFID_ATT 0xffff
+#define PRODID_ATT_KIT 0x0100
+
+#define MANFID_CONTEC 0xc001
+
+#define MANFID_FUJITSU 0x0004
+#define PRODID_FUJITSU_MBH10302 0x0004
+#define PRODID_FUJITSU_MBH10304 0x1003
+#define PRODID_FUJITSU_LA501 0x2000
+
+#define MANFID_IBM 0x00a4
+#define PRODID_IBM_HOME_AND_AWAY 0x002e
+
+#define MANFID_INTEL 0x0089
+#define PRODID_INTEL_DUAL_RS232 0x0301
+#define PRODID_INTEL_2PLUS 0x8422
+
+#define MANFID_KME 0x0032
+#define PRODID_KME_KXLC005_A 0x0704
+#define PRODID_KME_KXLC005_B 0x2904
+
+#define MANFID_LINKSYS 0x0143
+#define PRODID_LINKSYS_PCMLM28 0xc0ab
+#define PRODID_LINKSYS_3400 0x3341
+
+#define MANFID_MEGAHERTZ 0x0102
+#define PRODID_MEGAHERTZ_VARIOUS 0x0000
+#define PRODID_MEGAHERTZ_EM3288 0x0006
+
+#define MANFID_MACNICA 0xc00b
+
+#define MANFID_MOTOROLA 0x0109
+#define PRODID_MOTOROLA_MARINER 0x0501
+
+#define MANFID_NATINST 0x010b
+#define PRODID_NATINST_QUAD_RS232 0xd180
+
+#define MANFID_NEW_MEDIA 0x0057
+
+#define MANFID_NOKIA 0x0124
+#define PRODID_NOKIA_CARDPHONE 0x0900
+
+#define MANFID_OLICOM 0x0121
+#define PRODID_OLICOM_OC2231 0x3122
+#define PRODID_OLICOM_OC2232 0x3222
+
+#define MANFID_OMEGA 0x0137
+#define PRODID_OMEGA_QSP_100 0x0025
+
+#define MANFID_OSITECH 0x0140
+#define PRODID_OSITECH_JACK_144 0x0001
+#define PRODID_OSITECH_JACK_288 0x0002
+#define PRODID_OSITECH_JACK_336 0x0007
+#define PRODID_OSITECH_SEVEN 0x0008
+
+#define MANFID_OXSEMI 0x0279
+
+#define MANFID_PIONEER 0x000b
+
+#define MANFID_PSION 0x016c
+#define PRODID_PSION_NET100 0x0023
+
+#define MANFID_QUATECH 0x0137
+#define PRODID_QUATECH_SPP100 0x0003
+#define PRODID_QUATECH_DUAL_RS232 0x0012
+#define PRODID_QUATECH_DUAL_RS232_D1 0x0007
+#define PRODID_QUATECH_DUAL_RS232_D2 0x0052
+#define PRODID_QUATECH_QUAD_RS232 0x001b
+#define PRODID_QUATECH_DUAL_RS422 0x000e
+#define PRODID_QUATECH_QUAD_RS422 0x0045
+
+#define MANFID_SMC 0x0108
+#define PRODID_SMC_ETHER 0x0105
+
+#define MANFID_SOCKET 0x0104
+#define PRODID_SOCKET_DUAL_RS232 0x0006
+#define PRODID_SOCKET_EIO 0x000a
+#define PRODID_SOCKET_LPE 0x000d
+#define PRODID_SOCKET_LPE_CF 0x0075
+
+#define MANFID_SUNDISK 0x0045
+
+#define MANFID_TDK 0x0105
+#define PRODID_TDK_CF010 0x0900
+#define PRODID_TDK_GN3410 0x4815
+
+#define MANFID_TOSHIBA 0x0098
+
+#define MANFID_UNGERMANN 0x02c0
+
+#define MANFID_XIRCOM 0x0105
+
+#endif /* _LINUX_CISCODE_H */
diff --git a/linux/pcmcia-cs/include/pcmcia/cisreg.h b/linux/pcmcia-cs/include/pcmcia/cisreg.h
new file mode 100644
index 0000000..cb9fe39
--- /dev/null
+++ b/linux/pcmcia-cs/include/pcmcia/cisreg.h
@@ -0,0 +1,135 @@
+/*
+ * cisreg.h 1.18 2001/08/24 12:16:12
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_CISREG_H
+#define _LINUX_CISREG_H
+
+/*
+ * Offsets from ConfigBase for CIS registers
+ */
+#define CISREG_COR 0x00
+#define CISREG_CCSR 0x02
+#define CISREG_PRR 0x04
+#define CISREG_SCR 0x06
+#define CISREG_ESR 0x08
+#define CISREG_IOBASE_0 0x0a
+#define CISREG_IOBASE_1 0x0c
+#define CISREG_IOBASE_2 0x0e
+#define CISREG_IOBASE_3 0x10
+#define CISREG_IOSIZE 0x12
+
+/*
+ * Configuration Option Register
+ */
+#define COR_CONFIG_MASK 0x3f
+#define COR_MFC_CONFIG_MASK 0x38
+#define COR_FUNC_ENA 0x01
+#define COR_ADDR_DECODE 0x02
+#define COR_IREQ_ENA 0x04
+#define COR_LEVEL_REQ 0x40
+#define COR_SOFT_RESET 0x80
+
+/*
+ * Card Configuration and Status Register
+ */
+#define CCSR_INTR_ACK 0x01
+#define CCSR_INTR_PENDING 0x02
+#define CCSR_POWER_DOWN 0x04
+#define CCSR_AUDIO_ENA 0x08
+#define CCSR_IOIS8 0x20
+#define CCSR_SIGCHG_ENA 0x40
+#define CCSR_CHANGED 0x80
+
+/*
+ * Pin Replacement Register
+ */
+#define PRR_WP_STATUS 0x01
+#define PRR_READY_STATUS 0x02
+#define PRR_BVD2_STATUS 0x04
+#define PRR_BVD1_STATUS 0x08
+#define PRR_WP_EVENT 0x10
+#define PRR_READY_EVENT 0x20
+#define PRR_BVD2_EVENT 0x40
+#define PRR_BVD1_EVENT 0x80
+
+/*
+ * Socket and Copy Register
+ */
+#define SCR_SOCKET_NUM 0x0f
+#define SCR_COPY_NUM 0x70
+
+/*
+ * Extended Status Register
+ */
+#define ESR_REQ_ATTN_ENA 0x01
+#define ESR_REQ_ATTN 0x10
+
+/*
+ * CardBus Function Status Registers
+ */
+#define CBFN_EVENT 0x00
+#define CBFN_MASK 0x04
+#define CBFN_STATE 0x08
+#define CBFN_FORCE 0x0c
+
+/*
+ * These apply to all the CardBus function registers
+ */
+#define CBFN_WP 0x0001
+#define CBFN_READY 0x0002
+#define CBFN_BVD2 0x0004
+#define CBFN_BVD1 0x0008
+#define CBFN_GWAKE 0x0010
+#define CBFN_INTR 0x8000
+
+/*
+ * Extra bits in the Function Event Mask Register
+ */
+#define FEMR_BAM_ENA 0x0020
+#define FEMR_PWM_ENA 0x0040
+#define FEMR_WKUP_MASK 0x4000
+
+/*
+ * Indirect Addressing Registers for Zoomed Video: these are addresses
+ * in common memory space
+ */
+#define CISREG_ICTRL0 0x02 /* control registers */
+#define CISREG_ICTRL1 0x03
+#define CISREG_IADDR0 0x04 /* address registers */
+#define CISREG_IADDR1 0x05
+#define CISREG_IADDR2 0x06
+#define CISREG_IADDR3 0x07
+#define CISREG_IDATA0 0x08 /* data registers */
+#define CISREG_IDATA1 0x09
+
+#define ICTRL0_COMMON 0x01
+#define ICTRL0_AUTOINC 0x02
+#define ICTRL0_BYTEGRAN 0x04
+
+#endif /* _LINUX_CISREG_H */
diff --git a/linux/pcmcia-cs/include/pcmcia/cistpl.h b/linux/pcmcia-cs/include/pcmcia/cistpl.h
new file mode 100644
index 0000000..1d4cac2
--- /dev/null
+++ b/linux/pcmcia-cs/include/pcmcia/cistpl.h
@@ -0,0 +1,604 @@
+/*
+ * cistpl.h 1.35 2001/08/24 12:16:12
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_CISTPL_H
+#define _LINUX_CISTPL_H
+
+#define CISTPL_NULL 0x00
+#define CISTPL_DEVICE 0x01
+#define CISTPL_LONGLINK_CB 0x02
+#define CISTPL_INDIRECT 0x03
+#define CISTPL_CONFIG_CB 0x04
+#define CISTPL_CFTABLE_ENTRY_CB 0x05
+#define CISTPL_LONGLINK_MFC 0x06
+#define CISTPL_BAR 0x07
+#define CISTPL_PWR_MGMNT 0x08
+#define CISTPL_EXTDEVICE 0x09
+#define CISTPL_CHECKSUM 0x10
+#define CISTPL_LONGLINK_A 0x11
+#define CISTPL_LONGLINK_C 0x12
+#define CISTPL_LINKTARGET 0x13
+#define CISTPL_NO_LINK 0x14
+#define CISTPL_VERS_1 0x15
+#define CISTPL_ALTSTR 0x16
+#define CISTPL_DEVICE_A 0x17
+#define CISTPL_JEDEC_C 0x18
+#define CISTPL_JEDEC_A 0x19
+#define CISTPL_CONFIG 0x1a
+#define CISTPL_CFTABLE_ENTRY 0x1b
+#define CISTPL_DEVICE_OC 0x1c
+#define CISTPL_DEVICE_OA 0x1d
+#define CISTPL_DEVICE_GEO 0x1e
+#define CISTPL_DEVICE_GEO_A 0x1f
+#define CISTPL_MANFID 0x20
+#define CISTPL_FUNCID 0x21
+#define CISTPL_FUNCE 0x22
+#define CISTPL_SWIL 0x23
+#define CISTPL_END 0xff
+/* Layer 2 tuples */
+#define CISTPL_VERS_2 0x40
+#define CISTPL_FORMAT 0x41
+#define CISTPL_GEOMETRY 0x42
+#define CISTPL_BYTEORDER 0x43
+#define CISTPL_DATE 0x44
+#define CISTPL_BATTERY 0x45
+#define CISTPL_FORMAT_A 0x47
+/* Layer 3 tuples */
+#define CISTPL_ORG 0x46
+#define CISTPL_SPCL 0x90
+
+typedef struct cistpl_longlink_t {
+ u_int addr;
+} cistpl_longlink_t;
+
+typedef struct cistpl_checksum_t {
+ u_short addr;
+ u_short len;
+ u_char sum;
+} cistpl_checksum_t;
+
+#define CISTPL_MAX_FUNCTIONS 8
+#define CISTPL_MFC_ATTR 0x00
+#define CISTPL_MFC_COMMON 0x01
+
+typedef struct cistpl_longlink_mfc_t {
+ u_char nfn;
+ struct {
+ u_char space;
+ u_int addr;
+ } fn[CISTPL_MAX_FUNCTIONS];
+} cistpl_longlink_mfc_t;
+
+#define CISTPL_MAX_ALTSTR_STRINGS 4
+
+typedef struct cistpl_altstr_t {
+ u_char ns;
+ u_char ofs[CISTPL_MAX_ALTSTR_STRINGS];
+ char str[254];
+} cistpl_altstr_t;
+
+#define CISTPL_DTYPE_NULL 0x00
+#define CISTPL_DTYPE_ROM 0x01
+#define CISTPL_DTYPE_OTPROM 0x02
+#define CISTPL_DTYPE_EPROM 0x03
+#define CISTPL_DTYPE_EEPROM 0x04
+#define CISTPL_DTYPE_FLASH 0x05
+#define CISTPL_DTYPE_SRAM 0x06
+#define CISTPL_DTYPE_DRAM 0x07
+#define CISTPL_DTYPE_FUNCSPEC 0x0d
+#define CISTPL_DTYPE_EXTEND 0x0e
+
+#define CISTPL_MAX_DEVICES 4
+
+typedef struct cistpl_device_t {
+ u_char ndev;
+ struct {
+ u_char type;
+ u_char wp;
+ u_int speed;
+ u_int size;
+ } dev[CISTPL_MAX_DEVICES];
+} cistpl_device_t;
+
+#define CISTPL_DEVICE_MWAIT 0x01
+#define CISTPL_DEVICE_3VCC 0x02
+
+typedef struct cistpl_device_o_t {
+ u_char flags;
+ cistpl_device_t device;
+} cistpl_device_o_t;
+
+#define CISTPL_VERS_1_MAX_PROD_STRINGS 4
+
+typedef struct cistpl_vers_1_t {
+ u_char major;
+ u_char minor;
+ u_char ns;
+ u_char ofs[CISTPL_VERS_1_MAX_PROD_STRINGS];
+ char str[254];
+} cistpl_vers_1_t;
+
+typedef struct cistpl_jedec_t {
+ u_char nid;
+ struct {
+ u_char mfr;
+ u_char info;
+ } id[CISTPL_MAX_DEVICES];
+} cistpl_jedec_t;
+
+typedef struct cistpl_manfid_t {
+ u_short manf;
+ u_short card;
+} cistpl_manfid_t;
+
+#define CISTPL_FUNCID_MULTI 0x00
+#define CISTPL_FUNCID_MEMORY 0x01
+#define CISTPL_FUNCID_SERIAL 0x02
+#define CISTPL_FUNCID_PARALLEL 0x03
+#define CISTPL_FUNCID_FIXED 0x04
+#define CISTPL_FUNCID_VIDEO 0x05
+#define CISTPL_FUNCID_NETWORK 0x06
+#define CISTPL_FUNCID_AIMS 0x07
+#define CISTPL_FUNCID_SCSI 0x08
+
+#define CISTPL_SYSINIT_POST 0x01
+#define CISTPL_SYSINIT_ROM 0x02
+
+typedef struct cistpl_funcid_t {
+ u_char func;
+ u_char sysinit;
+} cistpl_funcid_t;
+
+typedef struct cistpl_funce_t {
+ u_char type;
+ u_char data[0];
+} cistpl_funce_t;
+
+/*======================================================================
+
+ Modem Function Extension Tuples
+
+======================================================================*/
+
+#define CISTPL_FUNCE_SERIAL_IF 0x00
+#define CISTPL_FUNCE_SERIAL_CAP 0x01
+#define CISTPL_FUNCE_SERIAL_SERV_DATA 0x02
+#define CISTPL_FUNCE_SERIAL_SERV_FAX 0x03
+#define CISTPL_FUNCE_SERIAL_SERV_VOICE 0x04
+#define CISTPL_FUNCE_SERIAL_CAP_DATA 0x05
+#define CISTPL_FUNCE_SERIAL_CAP_FAX 0x06
+#define CISTPL_FUNCE_SERIAL_CAP_VOICE 0x07
+#define CISTPL_FUNCE_SERIAL_IF_DATA 0x08
+#define CISTPL_FUNCE_SERIAL_IF_FAX 0x09
+#define CISTPL_FUNCE_SERIAL_IF_VOICE 0x0a
+
+/* UART identification */
+#define CISTPL_SERIAL_UART_8250 0x00
+#define CISTPL_SERIAL_UART_16450 0x01
+#define CISTPL_SERIAL_UART_16550 0x02
+#define CISTPL_SERIAL_UART_8251 0x03
+#define CISTPL_SERIAL_UART_8530 0x04
+#define CISTPL_SERIAL_UART_85230 0x05
+
+/* UART capabilities */
+#define CISTPL_SERIAL_UART_SPACE 0x01
+#define CISTPL_SERIAL_UART_MARK 0x02
+#define CISTPL_SERIAL_UART_ODD 0x04
+#define CISTPL_SERIAL_UART_EVEN 0x08
+#define CISTPL_SERIAL_UART_5BIT 0x01
+#define CISTPL_SERIAL_UART_6BIT 0x02
+#define CISTPL_SERIAL_UART_7BIT 0x04
+#define CISTPL_SERIAL_UART_8BIT 0x08
+#define CISTPL_SERIAL_UART_1STOP 0x10
+#define CISTPL_SERIAL_UART_MSTOP 0x20
+#define CISTPL_SERIAL_UART_2STOP 0x40
+
+typedef struct cistpl_serial_t {
+ u_char uart_type;
+ u_char uart_cap_0;
+ u_char uart_cap_1;
+} cistpl_serial_t;
+
+typedef struct cistpl_modem_cap_t {
+ u_char flow;
+ u_char cmd_buf;
+ u_char rcv_buf_0, rcv_buf_1, rcv_buf_2;
+ u_char xmit_buf_0, xmit_buf_1, xmit_buf_2;
+} cistpl_modem_cap_t;
+
+#define CISTPL_SERIAL_MOD_103 0x01
+#define CISTPL_SERIAL_MOD_V21 0x02
+#define CISTPL_SERIAL_MOD_V23 0x04
+#define CISTPL_SERIAL_MOD_V22 0x08
+#define CISTPL_SERIAL_MOD_212A 0x10
+#define CISTPL_SERIAL_MOD_V22BIS 0x20
+#define CISTPL_SERIAL_MOD_V26 0x40
+#define CISTPL_SERIAL_MOD_V26BIS 0x80
+#define CISTPL_SERIAL_MOD_V27BIS 0x01
+#define CISTPL_SERIAL_MOD_V29 0x02
+#define CISTPL_SERIAL_MOD_V32 0x04
+#define CISTPL_SERIAL_MOD_V32BIS 0x08
+#define CISTPL_SERIAL_MOD_V34 0x10
+
+#define CISTPL_SERIAL_ERR_MNP2_4 0x01
+#define CISTPL_SERIAL_ERR_V42_LAPM 0x02
+
+#define CISTPL_SERIAL_CMPR_V42BIS 0x01
+#define CISTPL_SERIAL_CMPR_MNP5 0x02
+
+#define CISTPL_SERIAL_CMD_AT1 0x01
+#define CISTPL_SERIAL_CMD_AT2 0x02
+#define CISTPL_SERIAL_CMD_AT3 0x04
+#define CISTPL_SERIAL_CMD_MNP_AT 0x08
+#define CISTPL_SERIAL_CMD_V25BIS 0x10
+#define CISTPL_SERIAL_CMD_V25A 0x20
+#define CISTPL_SERIAL_CMD_DMCL 0x40
+
+typedef struct cistpl_data_serv_t {
+ u_char max_data_0;
+ u_char max_data_1;
+ u_char modulation_0;
+ u_char modulation_1;
+ u_char error_control;
+ u_char compression;
+ u_char cmd_protocol;
+ u_char escape;
+ u_char encrypt;
+ u_char misc_features;
+ u_char ccitt_code[0];
+} cistpl_data_serv_t;
+
+typedef struct cistpl_fax_serv_t {
+ u_char max_data_0;
+ u_char max_data_1;
+ u_char modulation;
+ u_char encrypt;
+ u_char features_0;
+ u_char features_1;
+ u_char ccitt_code[0];
+} cistpl_fax_serv_t;
+
+typedef struct cistpl_voice_serv_t {
+ u_char max_data_0;
+ u_char max_data_1;
+} cistpl_voice_serv_t;
+
+/*======================================================================
+
+ LAN Function Extension Tuples
+
+======================================================================*/
+
+#define CISTPL_FUNCE_LAN_TECH 0x01
+#define CISTPL_FUNCE_LAN_SPEED 0x02
+#define CISTPL_FUNCE_LAN_MEDIA 0x03
+#define CISTPL_FUNCE_LAN_NODE_ID 0x04
+#define CISTPL_FUNCE_LAN_CONNECTOR 0x05
+
+/* LAN technologies */
+#define CISTPL_LAN_TECH_ARCNET 0x01
+#define CISTPL_LAN_TECH_ETHERNET 0x02
+#define CISTPL_LAN_TECH_TOKENRING 0x03
+#define CISTPL_LAN_TECH_LOCALTALK 0x04
+#define CISTPL_LAN_TECH_FDDI 0x05
+#define CISTPL_LAN_TECH_ATM 0x06
+#define CISTPL_LAN_TECH_WIRELESS 0x07
+
+typedef struct cistpl_lan_tech_t {
+ u_char tech;
+} cistpl_lan_tech_t;
+
+typedef struct cistpl_lan_speed_t {
+ u_int speed;
+} cistpl_lan_speed_t;
+
+/* LAN media definitions */
+#define CISTPL_LAN_MEDIA_UTP 0x01
+#define CISTPL_LAN_MEDIA_STP 0x02
+#define CISTPL_LAN_MEDIA_THIN_COAX 0x03
+#define CISTPL_LAN_MEDIA_THICK_COAX 0x04
+#define CISTPL_LAN_MEDIA_FIBER 0x05
+#define CISTPL_LAN_MEDIA_900MHZ 0x06
+#define CISTPL_LAN_MEDIA_2GHZ 0x07
+#define CISTPL_LAN_MEDIA_5GHZ 0x08
+#define CISTPL_LAN_MEDIA_DIFF_IR 0x09
+#define CISTPL_LAN_MEDIA_PTP_IR 0x0a
+
+typedef struct cistpl_lan_media_t {
+ u_char media;
+} cistpl_lan_media_t;
+
+typedef struct cistpl_lan_node_id_t {
+ u_char nb;
+ u_char id[16];
+} cistpl_lan_node_id_t;
+
+typedef struct cistpl_lan_connector_t {
+ u_char code;
+} cistpl_lan_connector_t;
+
+/*======================================================================
+
+ IDE Function Extension Tuples
+
+======================================================================*/
+
+#define CISTPL_IDE_INTERFACE 0x01
+
+typedef struct cistpl_ide_interface_t {
+ u_char interface;
+} cistpl_ide_interface_t;
+
+/* First feature byte */
+#define CISTPL_IDE_SILICON 0x04
+#define CISTPL_IDE_UNIQUE 0x08
+#define CISTPL_IDE_DUAL 0x10
+
+/* Second feature byte */
+#define CISTPL_IDE_HAS_SLEEP 0x01
+#define CISTPL_IDE_HAS_STANDBY 0x02
+#define CISTPL_IDE_HAS_IDLE 0x04
+#define CISTPL_IDE_LOW_POWER 0x08
+#define CISTPL_IDE_REG_INHIBIT 0x10
+#define CISTPL_IDE_HAS_INDEX 0x20
+#define CISTPL_IDE_IOIS16 0x40
+
+typedef struct cistpl_ide_feature_t {
+ u_char feature1;
+ u_char feature2;
+} cistpl_ide_feature_t;
+
+#define CISTPL_FUNCE_IDE_IFACE 0x01
+#define CISTPL_FUNCE_IDE_MASTER 0x02
+#define CISTPL_FUNCE_IDE_SLAVE 0x03
+
+/*======================================================================
+
+ Configuration Table Entries
+
+======================================================================*/
+
+#define CISTPL_BAR_SPACE 0x07
+#define CISTPL_BAR_SPACE_IO 0x10
+#define CISTPL_BAR_PREFETCH 0x20
+#define CISTPL_BAR_CACHEABLE 0x40
+#define CISTPL_BAR_1MEG_MAP 0x80
+
+typedef struct cistpl_bar_t {
+ u_char attr;
+ u_int size;
+} cistpl_bar_t;
+
+typedef struct cistpl_config_t {
+ u_char last_idx;
+ u_int base;
+ u_int rmask[4];
+ u_char subtuples;
+} cistpl_config_t;
+
+/* These are bits in the 'present' field, and indices in 'param' */
+#define CISTPL_POWER_VNOM 0
+#define CISTPL_POWER_VMIN 1
+#define CISTPL_POWER_VMAX 2
+#define CISTPL_POWER_ISTATIC 3
+#define CISTPL_POWER_IAVG 4
+#define CISTPL_POWER_IPEAK 5
+#define CISTPL_POWER_IDOWN 6
+
+#define CISTPL_POWER_HIGHZ_OK 0x01
+#define CISTPL_POWER_HIGHZ_REQ 0x02
+
+typedef struct cistpl_power_t {
+ u_char present;
+ u_char flags;
+ u_int param[7];
+} cistpl_power_t;
+
+typedef struct cistpl_timing_t {
+ u_int wait, waitscale;
+ u_int ready, rdyscale;
+ u_int reserved, rsvscale;
+} cistpl_timing_t;
+
+#define CISTPL_IO_LINES_MASK 0x1f
+#define CISTPL_IO_8BIT 0x20
+#define CISTPL_IO_16BIT 0x40
+#define CISTPL_IO_RANGE 0x80
+
+#define CISTPL_IO_MAX_WIN 16
+
+typedef struct cistpl_io_t {
+ u_char flags;
+ u_char nwin;
+ struct {
+ u_int base;
+ u_int len;
+ } win[CISTPL_IO_MAX_WIN];
+} cistpl_io_t;
+
+typedef struct cistpl_irq_t {
+ u_int IRQInfo1;
+ u_int IRQInfo2;
+} cistpl_irq_t;
+
+#define CISTPL_MEM_MAX_WIN 8
+
+typedef struct cistpl_mem_t {
+ u_char flags;
+ u_char nwin;
+ struct {
+ u_int len;
+ u_int card_addr;
+ u_int host_addr;
+ } win[CISTPL_MEM_MAX_WIN];
+} cistpl_mem_t;
+
+#define CISTPL_CFTABLE_DEFAULT 0x0001
+#define CISTPL_CFTABLE_BVDS 0x0002
+#define CISTPL_CFTABLE_WP 0x0004
+#define CISTPL_CFTABLE_RDYBSY 0x0008
+#define CISTPL_CFTABLE_MWAIT 0x0010
+#define CISTPL_CFTABLE_AUDIO 0x0800
+#define CISTPL_CFTABLE_READONLY 0x1000
+#define CISTPL_CFTABLE_PWRDOWN 0x2000
+
+typedef struct cistpl_cftable_entry_t {
+ u_char index;
+ u_short flags;
+ u_char interface;
+ cistpl_power_t vcc, vpp1, vpp2;
+ cistpl_timing_t timing;
+ cistpl_io_t io;
+ cistpl_irq_t irq;
+ cistpl_mem_t mem;
+ u_char subtuples;
+} cistpl_cftable_entry_t;
+
+#define CISTPL_CFTABLE_MASTER 0x000100
+#define CISTPL_CFTABLE_INVALIDATE 0x000200
+#define CISTPL_CFTABLE_VGA_PALETTE 0x000400
+#define CISTPL_CFTABLE_PARITY 0x000800
+#define CISTPL_CFTABLE_WAIT 0x001000
+#define CISTPL_CFTABLE_SERR 0x002000
+#define CISTPL_CFTABLE_FAST_BACK 0x004000
+#define CISTPL_CFTABLE_BINARY_AUDIO 0x010000
+#define CISTPL_CFTABLE_PWM_AUDIO 0x020000
+
+typedef struct cistpl_cftable_entry_cb_t {
+ u_char index;
+ u_int flags;
+ cistpl_power_t vcc, vpp1, vpp2;
+ u_char io;
+ cistpl_irq_t irq;
+ u_char mem;
+ u_char subtuples;
+} cistpl_cftable_entry_cb_t;
+
+typedef struct cistpl_device_geo_t {
+ u_char ngeo;
+ struct {
+ u_char buswidth;
+ u_int erase_block;
+ u_int read_block;
+ u_int write_block;
+ u_int partition;
+ u_int interleave;
+ } geo[CISTPL_MAX_DEVICES];
+} cistpl_device_geo_t;
+
+typedef struct cistpl_vers_2_t {
+ u_char vers;
+ u_char comply;
+ u_short dindex;
+ u_char vspec8, vspec9;
+ u_char nhdr;
+ u_char vendor, info;
+ char str[244];
+} cistpl_vers_2_t;
+
+typedef struct cistpl_org_t {
+ u_char data_org;
+ char desc[30];
+} cistpl_org_t;
+
+#define CISTPL_ORG_FS 0x00
+#define CISTPL_ORG_APPSPEC 0x01
+#define CISTPL_ORG_XIP 0x02
+
+typedef struct cistpl_format_t {
+ u_char type;
+ u_char edc;
+ u_int offset;
+ u_int length;
+} cistpl_format_t;
+
+#define CISTPL_FORMAT_DISK 0x00
+#define CISTPL_FORMAT_MEM 0x01
+
+#define CISTPL_EDC_NONE 0x00
+#define CISTPL_EDC_CKSUM 0x01
+#define CISTPL_EDC_CRC 0x02
+#define CISTPL_EDC_PCC 0x03
+
+typedef union cisparse_t {
+ cistpl_device_t device;
+ cistpl_checksum_t checksum;
+ cistpl_longlink_t longlink;
+ cistpl_longlink_mfc_t longlink_mfc;
+ cistpl_vers_1_t version_1;
+ cistpl_altstr_t altstr;
+ cistpl_jedec_t jedec;
+ cistpl_manfid_t manfid;
+ cistpl_funcid_t funcid;
+ cistpl_funce_t funce;
+ cistpl_bar_t bar;
+ cistpl_config_t config;
+ cistpl_cftable_entry_t cftable_entry;
+ cistpl_cftable_entry_cb_t cftable_entry_cb;
+ cistpl_device_geo_t device_geo;
+ cistpl_vers_2_t vers_2;
+ cistpl_org_t org;
+ cistpl_format_t format;
+} cisparse_t;
+
+typedef struct tuple_t {
+ u_int Attributes;
+ cisdata_t DesiredTuple;
+ u_int Flags; /* internal use */
+ u_int LinkOffset; /* internal use */
+ u_int CISOffset; /* internal use */
+ cisdata_t TupleCode;
+ cisdata_t TupleLink;
+ cisdata_t TupleOffset;
+ cisdata_t TupleDataMax;
+ cisdata_t TupleDataLen;
+ cisdata_t *TupleData;
+} tuple_t;
+
+/* Special cisdata_t value */
+#define RETURN_FIRST_TUPLE 0xff
+
+/* Attributes for tuple calls */
+#define TUPLE_RETURN_LINK 0x01
+#define TUPLE_RETURN_COMMON 0x02
+
+/* For ValidateCIS */
+typedef struct cisinfo_t {
+ u_int Chains;
+} cisinfo_t;
+
+#define CISTPL_MAX_CIS_SIZE 0x200
+
+/* For ReplaceCIS */
+typedef struct cisdump_t {
+ u_int Length;
+ cisdata_t Data[CISTPL_MAX_CIS_SIZE];
+} cisdump_t;
+
+#endif /* LINUX_CISTPL_H */
diff --git a/linux/pcmcia-cs/include/pcmcia/cs.h b/linux/pcmcia-cs/include/pcmcia/cs.h
new file mode 100644
index 0000000..8e202c6
--- /dev/null
+++ b/linux/pcmcia-cs/include/pcmcia/cs.h
@@ -0,0 +1,441 @@
+/*
+ * cs.h 1.74 2001/10/04 03:15:22
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_CS_H
+#define _LINUX_CS_H
+
+/* For AccessConfigurationRegister */
+typedef struct conf_reg_t {
+ u_char Function;
+ u_int Action;
+ off_t Offset;
+ u_int Value;
+} conf_reg_t;
+
+/* Actions */
+#define CS_READ 1
+#define CS_WRITE 2
+
+/* for AdjustResourceInfo */
+typedef struct adjust_t {
+ u_int Action;
+ u_int Resource;
+ u_int Attributes;
+ union {
+ struct memory {
+ u_long Base;
+ u_long Size;
+ } memory;
+ struct io {
+ ioaddr_t BasePort;
+ ioaddr_t NumPorts;
+ u_int IOAddrLines;
+ } io;
+ struct irq {
+ u_int IRQ;
+ } irq;
+ } resource;
+} adjust_t;
+
+/* Action field */
+#define REMOVE_MANAGED_RESOURCE 1
+#define ADD_MANAGED_RESOURCE 2
+#define GET_FIRST_MANAGED_RESOURCE 3
+#define GET_NEXT_MANAGED_RESOURCE 4
+/* Resource field */
+#define RES_MEMORY_RANGE 1
+#define RES_IO_RANGE 2
+#define RES_IRQ 3
+/* Attribute field */
+#define RES_IRQ_TYPE 0x03
+#define RES_IRQ_TYPE_EXCLUSIVE 0
+#define RES_IRQ_TYPE_TIME 1
+#define RES_IRQ_TYPE_DYNAMIC 2
+#define RES_IRQ_CSC 0x04
+#define RES_SHARED 0x08
+#define RES_RESERVED 0x10
+#define RES_ALLOCATED 0x20
+#define RES_REMOVED 0x40
+
+typedef struct servinfo_t {
+ char Signature[2];
+ u_int Count;
+ u_int Revision;
+ u_int CSLevel;
+ char *VendorString;
+} servinfo_t;
+
+typedef struct event_callback_args_t {
+ client_handle_t client_handle;
+ void *info;
+ void *mtdrequest;
+ void *buffer;
+ void *misc;
+ void *client_data;
+ struct bus_operations *bus;
+} event_callback_args_t;
+
+/* for GetConfigurationInfo */
+typedef struct config_info_t {
+ u_char Function;
+ u_int Attributes;
+ u_int Vcc, Vpp1, Vpp2;
+ u_int IntType;
+ u_int ConfigBase;
+ u_char Status, Pin, Copy, Option, ExtStatus;
+ u_int Present;
+ u_int CardValues;
+ u_int AssignedIRQ;
+ u_int IRQAttributes;
+ ioaddr_t BasePort1;
+ ioaddr_t NumPorts1;
+ u_int Attributes1;
+ ioaddr_t BasePort2;
+ ioaddr_t NumPorts2;
+ u_int Attributes2;
+ u_int IOAddrLines;
+} config_info_t;
+
+/* For CardValues field */
+#define CV_OPTION_VALUE 0x01
+#define CV_STATUS_VALUE 0x02
+#define CV_PIN_REPLACEMENT 0x04
+#define CV_COPY_VALUE 0x08
+#define CV_EXT_STATUS 0x10
+
+/* For GetFirst/NextClient */
+typedef struct client_req_t {
+ socket_t Socket;
+ u_int Attributes;
+} client_req_t;
+
+#define CLIENT_THIS_SOCKET 0x01
+
+/* For RegisterClient */
+typedef struct client_reg_t {
+ dev_info_t *dev_info;
+ u_int Attributes;
+ u_int EventMask;
+ int (*event_handler)(event_t event, int priority,
+ event_callback_args_t *);
+ event_callback_args_t event_callback_args;
+ u_int Version;
+} client_reg_t;
+
+/* ModifyConfiguration */
+typedef struct modconf_t {
+ u_int Attributes;
+ u_int Vcc, Vpp1, Vpp2;
+} modconf_t;
+
+/* Attributes for ModifyConfiguration */
+#define CONF_IRQ_CHANGE_VALID 0x100
+#define CONF_VCC_CHANGE_VALID 0x200
+#define CONF_VPP1_CHANGE_VALID 0x400
+#define CONF_VPP2_CHANGE_VALID 0x800
+
+/* For RequestConfiguration */
+typedef struct config_req_t {
+ u_int Attributes;
+ u_int Vcc, Vpp1, Vpp2;
+ u_int IntType;
+ u_int ConfigBase;
+ u_char Status, Pin, Copy, ExtStatus;
+ u_char ConfigIndex;
+ u_int Present;
+} config_req_t;
+
+/* Attributes for RequestConfiguration */
+#define CONF_ENABLE_IRQ 0x01
+#define CONF_ENABLE_DMA 0x02
+#define CONF_ENABLE_SPKR 0x04
+#define CONF_VALID_CLIENT 0x100
+
+/* IntType field */
+#define INT_MEMORY 0x01
+#define INT_MEMORY_AND_IO 0x02
+#define INT_CARDBUS 0x04
+#define INT_ZOOMED_VIDEO 0x08
+
+/* For RequestIO and ReleaseIO */
+typedef struct io_req_t {
+ ioaddr_t BasePort1;
+ ioaddr_t NumPorts1;
+ u_int Attributes1;
+ ioaddr_t BasePort2;
+ ioaddr_t NumPorts2;
+ u_int Attributes2;
+ u_int IOAddrLines;
+} io_req_t;
+
+/* Attributes for RequestIO and ReleaseIO */
+#define IO_SHARED 0x01
+#define IO_FIRST_SHARED 0x02
+#define IO_FORCE_ALIAS_ACCESS 0x04
+#define IO_DATA_PATH_WIDTH 0x18
+#define IO_DATA_PATH_WIDTH_8 0x00
+#define IO_DATA_PATH_WIDTH_16 0x08
+#define IO_DATA_PATH_WIDTH_AUTO 0x10
+
+/* For RequestIRQ and ReleaseIRQ */
+typedef struct irq_req_t {
+ u_int Attributes;
+ u_int AssignedIRQ;
+ u_int IRQInfo1, IRQInfo2;
+ void *Handler;
+ void *Instance;
+} irq_req_t;
+
+/* Attributes for RequestIRQ and ReleaseIRQ */
+#define IRQ_TYPE 0x03
+#define IRQ_TYPE_EXCLUSIVE 0x00
+#define IRQ_TYPE_TIME 0x01
+#define IRQ_TYPE_DYNAMIC_SHARING 0x02
+#define IRQ_FORCED_PULSE 0x04
+#define IRQ_FIRST_SHARED 0x08
+#define IRQ_HANDLE_PRESENT 0x10
+#define IRQ_PULSE_ALLOCATED 0x100
+
+/* Bits in IRQInfo1 field */
+#define IRQ_MASK 0x0f
+#define IRQ_NMI_ID 0x01
+#define IRQ_IOCK_ID 0x02
+#define IRQ_BERR_ID 0x04
+#define IRQ_VEND_ID 0x08
+#define IRQ_INFO2_VALID 0x10
+#define IRQ_LEVEL_ID 0x20
+#define IRQ_PULSE_ID 0x40
+#define IRQ_SHARE_ID 0x80
+
+typedef struct eventmask_t {
+ u_int Attributes;
+ u_int EventMask;
+} eventmask_t;
+
+#define CONF_EVENT_MASK_VALID 0x01
+
+/* Configuration registers present */
+#define PRESENT_OPTION 0x001
+#define PRESENT_STATUS 0x002
+#define PRESENT_PIN_REPLACE 0x004
+#define PRESENT_COPY 0x008
+#define PRESENT_EXT_STATUS 0x010
+#define PRESENT_IOBASE_0 0x020
+#define PRESENT_IOBASE_1 0x040
+#define PRESENT_IOBASE_2 0x080
+#define PRESENT_IOBASE_3 0x100
+#define PRESENT_IOSIZE 0x200
+
+/* For GetMemPage, MapMemPage */
+typedef struct memreq_t {
+ u_int CardOffset;
+ page_t Page;
+} memreq_t;
+
+/* For ModifyWindow */
+typedef struct modwin_t {
+ u_int Attributes;
+ u_int AccessSpeed;
+} modwin_t;
+
+/* For RequestWindow */
+typedef struct win_req_t {
+ u_int Attributes;
+ u_long Base;
+ u_int Size;
+ u_int AccessSpeed;
+} win_req_t;
+
+/* Attributes for RequestWindow */
+#define WIN_ADDR_SPACE 0x0001
+#define WIN_ADDR_SPACE_MEM 0x0000
+#define WIN_ADDR_SPACE_IO 0x0001
+#define WIN_MEMORY_TYPE 0x0002
+#define WIN_MEMORY_TYPE_CM 0x0000
+#define WIN_MEMORY_TYPE_AM 0x0002
+#define WIN_ENABLE 0x0004
+#define WIN_DATA_WIDTH 0x0018
+#define WIN_DATA_WIDTH_8 0x0000
+#define WIN_DATA_WIDTH_16 0x0008
+#define WIN_DATA_WIDTH_32 0x0010
+#define WIN_PAGED 0x0020
+#define WIN_SHARED 0x0040
+#define WIN_FIRST_SHARED 0x0080
+#define WIN_USE_WAIT 0x0100
+#define WIN_STRICT_ALIGN 0x0200
+#define WIN_MAP_BELOW_1MB 0x0400
+#define WIN_PREFETCH 0x0800
+#define WIN_CACHEABLE 0x1000
+#define WIN_BAR_MASK 0xe000
+#define WIN_BAR_SHIFT 13
+
+/* Attributes for RegisterClient */
+#define INFO_MASTER_CLIENT 0x01
+#define INFO_IO_CLIENT 0x02
+#define INFO_MTD_CLIENT 0x04
+#define INFO_MEM_CLIENT 0x08
+#define MAX_NUM_CLIENTS 3
+
+#define INFO_CARD_SHARE 0x10
+#define INFO_CARD_EXCL 0x20
+
+typedef struct cs_status_t {
+ u_char Function;
+ event_t CardState;
+ event_t SocketState;
+} cs_status_t;
+
+typedef struct error_info_t {
+ int func;
+ int retcode;
+} error_info_t;
+
+/* Special stuff for binding drivers to sockets */
+typedef struct bind_req_t {
+ socket_t Socket;
+ u_char Function;
+ dev_info_t *dev_info;
+} bind_req_t;
+
+/* Flag to bind to all functions */
+#define BIND_FN_ALL 0xff
+
+typedef struct mtd_bind_t {
+ socket_t Socket;
+ u_int Attributes;
+ u_int CardOffset;
+ dev_info_t *dev_info;
+} mtd_bind_t;
+
+/* Events */
+#define CS_EVENT_PRI_LOW 0
+#define CS_EVENT_PRI_HIGH 1
+
+#define CS_EVENT_WRITE_PROTECT 0x000001
+#define CS_EVENT_CARD_LOCK 0x000002
+#define CS_EVENT_CARD_INSERTION 0x000004
+#define CS_EVENT_CARD_REMOVAL 0x000008
+#define CS_EVENT_BATTERY_DEAD 0x000010
+#define CS_EVENT_BATTERY_LOW 0x000020
+#define CS_EVENT_READY_CHANGE 0x000040
+#define CS_EVENT_CARD_DETECT 0x000080
+#define CS_EVENT_RESET_REQUEST 0x000100
+#define CS_EVENT_RESET_PHYSICAL 0x000200
+#define CS_EVENT_CARD_RESET 0x000400
+#define CS_EVENT_REGISTRATION_COMPLETE 0x000800
+#define CS_EVENT_RESET_COMPLETE 0x001000
+#define CS_EVENT_PM_SUSPEND 0x002000
+#define CS_EVENT_PM_RESUME 0x004000
+#define CS_EVENT_INSERTION_REQUEST 0x008000
+#define CS_EVENT_EJECTION_REQUEST 0x010000
+#define CS_EVENT_MTD_REQUEST 0x020000
+#define CS_EVENT_ERASE_COMPLETE 0x040000
+#define CS_EVENT_REQUEST_ATTENTION 0x080000
+#define CS_EVENT_CB_DETECT 0x100000
+#define CS_EVENT_3VCARD 0x200000
+#define CS_EVENT_XVCARD 0x400000
+
+/* Return codes */
+#define CS_SUCCESS 0x00
+#define CS_BAD_ADAPTER 0x01
+#define CS_BAD_ATTRIBUTE 0x02
+#define CS_BAD_BASE 0x03
+#define CS_BAD_EDC 0x04
+#define CS_BAD_IRQ 0x06
+#define CS_BAD_OFFSET 0x07
+#define CS_BAD_PAGE 0x08
+#define CS_READ_FAILURE 0x09
+#define CS_BAD_SIZE 0x0a
+#define CS_BAD_SOCKET 0x0b
+#define CS_BAD_TYPE 0x0d
+#define CS_BAD_VCC 0x0e
+#define CS_BAD_VPP 0x0f
+#define CS_BAD_WINDOW 0x11
+#define CS_WRITE_FAILURE 0x12
+#define CS_NO_CARD 0x14
+#define CS_UNSUPPORTED_FUNCTION 0x15
+#define CS_UNSUPPORTED_MODE 0x16
+#define CS_BAD_SPEED 0x17
+#define CS_BUSY 0x18
+#define CS_GENERAL_FAILURE 0x19
+#define CS_WRITE_PROTECTED 0x1a
+#define CS_BAD_ARG_LENGTH 0x1b
+#define CS_BAD_ARGS 0x1c
+#define CS_CONFIGURATION_LOCKED 0x1d
+#define CS_IN_USE 0x1e
+#define CS_NO_MORE_ITEMS 0x1f
+#define CS_OUT_OF_RESOURCE 0x20
+#define CS_BAD_HANDLE 0x21
+
+#define CS_BAD_TUPLE 0x40
+
+#ifdef __KERNEL__
+
+/*
+ * Calls to set up low-level "Socket Services" drivers
+ */
+
+typedef int (*ss_entry_t)(u_int sock, u_int cmd, void *arg);
+extern int register_ss_entry(int nsock, ss_entry_t entry);
+extern void unregister_ss_entry(ss_entry_t entry);
+
+/*
+ * The main Card Services entry point
+ */
+
+enum service {
+ AccessConfigurationRegister, AddSocketServices,
+ AdjustResourceInfo, CheckEraseQueue, CloseMemory, CopyMemory,
+ DeregisterClient, DeregisterEraseQueue, GetCardServicesInfo,
+ GetClientInfo, GetConfigurationInfo, GetEventMask,
+ GetFirstClient, GetFirstPartion, GetFirstRegion, GetFirstTuple,
+ GetNextClient, GetNextPartition, GetNextRegion, GetNextTuple,
+ GetStatus, GetTupleData, MapLogSocket, MapLogWindow, MapMemPage,
+ MapPhySocket, MapPhyWindow, ModifyConfiguration, ModifyWindow,
+ OpenMemory, ParseTuple, ReadMemory, RegisterClient,
+ RegisterEraseQueue, RegisterMTD, RegisterTimer,
+ ReleaseConfiguration, ReleaseExclusive, ReleaseIO, ReleaseIRQ,
+ ReleaseSocketMask, ReleaseWindow, ReplaceSocketServices,
+ RequestConfiguration, RequestExclusive, RequestIO, RequestIRQ,
+ RequestSocketMask, RequestWindow, ResetCard, ReturnSSEntry,
+ SetEventMask, SetRegion, ValidateCIS, VendorSpecific,
+ WriteMemory, BindDevice, BindMTD, ReportError,
+ SuspendCard, ResumeCard, EjectCard, InsertCard, ReplaceCIS,
+ GetFirstWindow, GetNextWindow, GetMemPage
+};
+
+#ifdef IN_CARD_SERVICES
+extern int CardServices(int func, void *a1, void *a2, void *a3);
+#else
+extern int CardServices(int func, ...);
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_CS_H */
diff --git a/linux/pcmcia-cs/include/pcmcia/cs_types.h b/linux/pcmcia-cs/include/pcmcia/cs_types.h
new file mode 100644
index 0000000..88471f9
--- /dev/null
+++ b/linux/pcmcia-cs/include/pcmcia/cs_types.h
@@ -0,0 +1,70 @@
+/*
+ * cs_types.h 1.20 2002/04/17 02:52:39
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_CS_TYPES_H
+#define _LINUX_CS_TYPES_H
+
+#ifdef __linux__
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <sys/types.h>
+#endif
+#endif
+
+#ifdef __arm__
+typedef u_int ioaddr_t;
+#else
+typedef u_short ioaddr_t;
+#endif
+
+typedef u_short socket_t;
+typedef u_int event_t;
+typedef u_char cisdata_t;
+typedef u_short page_t;
+
+struct client_t;
+typedef struct client_t *client_handle_t;
+
+struct window_t;
+typedef struct window_t *window_handle_t;
+
+struct region_t;
+typedef struct region_t *memory_handle_t;
+
+struct eraseq_t;
+typedef struct eraseq_t *eraseq_handle_t;
+
+#ifndef DEV_NAME_LEN
+#define DEV_NAME_LEN 32
+#endif
+
+typedef char dev_info_t[DEV_NAME_LEN];
+
+#endif /* _LINUX_CS_TYPES_H */
diff --git a/linux/pcmcia-cs/include/pcmcia/driver_ops.h b/linux/pcmcia-cs/include/pcmcia/driver_ops.h
new file mode 100644
index 0000000..9903e5b
--- /dev/null
+++ b/linux/pcmcia-cs/include/pcmcia/driver_ops.h
@@ -0,0 +1,73 @@
+/*
+ * driver_ops.h 1.17 2001/10/04 03:15:22
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_DRIVER_OPS_H
+#define _LINUX_DRIVER_OPS_H
+
+#ifndef DEV_NAME_LEN
+#define DEV_NAME_LEN 32
+#endif
+
+#ifdef __KERNEL__
+
+typedef struct dev_node_t {
+ char dev_name[DEV_NAME_LEN];
+ u_short major, minor;
+ struct dev_node_t *next;
+} dev_node_t;
+
+typedef struct dev_locator_t {
+ enum { LOC_ISA, LOC_PCI } bus;
+ union {
+ struct {
+ u_short io_base_1, io_base_2;
+ u_long mem_base;
+ u_char irq, dma;
+ } isa;
+ struct {
+ u_char bus;
+ u_char devfn;
+ } pci;
+ } b;
+} dev_locator_t;
+
+typedef struct driver_operations {
+ char *name;
+ dev_node_t *(*attach) (dev_locator_t *loc);
+ void (*suspend) (dev_node_t *dev);
+ void (*resume) (dev_node_t *dev);
+ void (*detach) (dev_node_t *dev);
+} driver_operations;
+
+int register_driver(struct driver_operations *ops);
+void unregister_driver(struct driver_operations *ops);
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_DRIVER_OPS_H */
diff --git a/linux/pcmcia-cs/include/pcmcia/ds.h b/linux/pcmcia-cs/include/pcmcia/ds.h
new file mode 100644
index 0000000..b372e59
--- /dev/null
+++ b/linux/pcmcia-cs/include/pcmcia/ds.h
@@ -0,0 +1,148 @@
+/*
+ * ds.h 1.58 2001/10/04 03:15:22
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_DS_H
+#define _LINUX_DS_H
+
+#include <pcmcia/driver_ops.h>
+#include <pcmcia/bulkmem.h>
+
+typedef struct tuple_parse_t {
+ tuple_t tuple;
+ cisdata_t data[255];
+ cisparse_t parse;
+} tuple_parse_t;
+
+typedef struct win_info_t {
+ window_handle_t handle;
+ win_req_t window;
+ memreq_t map;
+} win_info_t;
+
+typedef struct bind_info_t {
+ dev_info_t dev_info;
+ u_char function;
+ struct dev_link_t *instance;
+ char name[DEV_NAME_LEN];
+ u_short major, minor;
+ void *next;
+} bind_info_t;
+
+typedef struct mtd_info_t {
+ dev_info_t dev_info;
+ u_int Attributes;
+ u_int CardOffset;
+} mtd_info_t;
+
+typedef union ds_ioctl_arg_t {
+ servinfo_t servinfo;
+ adjust_t adjust;
+ config_info_t config;
+ tuple_t tuple;
+ tuple_parse_t tuple_parse;
+ client_req_t client_req;
+ cs_status_t status;
+ conf_reg_t conf_reg;
+ cisinfo_t cisinfo;
+ region_info_t region;
+ bind_info_t bind_info;
+ mtd_info_t mtd_info;
+ win_info_t win_info;
+ cisdump_t cisdump;
+} ds_ioctl_arg_t;
+
+#define DS_GET_CARD_SERVICES_INFO _IOR ('d', 1, servinfo_t)
+#define DS_ADJUST_RESOURCE_INFO _IOWR('d', 2, adjust_t)
+#define DS_GET_CONFIGURATION_INFO _IOWR('d', 3, config_info_t)
+#define DS_GET_FIRST_TUPLE _IOWR('d', 4, tuple_t)
+#define DS_GET_NEXT_TUPLE _IOWR('d', 5, tuple_t)
+#define DS_GET_TUPLE_DATA _IOWR('d', 6, tuple_parse_t)
+#define DS_PARSE_TUPLE _IOWR('d', 7, tuple_parse_t)
+#define DS_RESET_CARD _IO ('d', 8)
+#define DS_GET_STATUS _IOWR('d', 9, cs_status_t)
+#define DS_ACCESS_CONFIGURATION_REGISTER _IOWR('d', 10, conf_reg_t)
+#define DS_VALIDATE_CIS _IOR ('d', 11, cisinfo_t)
+#define DS_SUSPEND_CARD _IO ('d', 12)
+#define DS_RESUME_CARD _IO ('d', 13)
+#define DS_EJECT_CARD _IO ('d', 14)
+#define DS_INSERT_CARD _IO ('d', 15)
+#define DS_GET_FIRST_REGION _IOWR('d', 16, region_info_t)
+#define DS_GET_NEXT_REGION _IOWR('d', 17, region_info_t)
+#define DS_REPLACE_CIS _IOWR('d', 18, cisdump_t)
+#define DS_GET_FIRST_WINDOW _IOR ('d', 19, win_info_t)
+#define DS_GET_NEXT_WINDOW _IOWR('d', 20, win_info_t)
+#define DS_GET_MEM_PAGE _IOWR('d', 21, win_info_t)
+
+#define DS_BIND_REQUEST _IOWR('d', 60, bind_info_t)
+#define DS_GET_DEVICE_INFO _IOWR('d', 61, bind_info_t)
+#define DS_GET_NEXT_DEVICE _IOWR('d', 62, bind_info_t)
+#define DS_UNBIND_REQUEST _IOW ('d', 63, bind_info_t)
+#define DS_BIND_MTD _IOWR('d', 64, mtd_info_t)
+
+#ifdef __KERNEL__
+
+typedef struct dev_link_t {
+ dev_node_t *dev;
+ u_int state, open;
+ wait_queue_head_t pending;
+ struct timer_list release;
+ client_handle_t handle;
+ io_req_t io;
+ irq_req_t irq;
+ config_req_t conf;
+ window_handle_t win;
+ void *priv;
+ struct dev_link_t *next;
+} dev_link_t;
+
+/* Flags for device state */
+#define DEV_PRESENT 0x01
+#define DEV_CONFIG 0x02
+#define DEV_STALE_CONFIG 0x04 /* release on close */
+#define DEV_STALE_LINK 0x08 /* detach on release */
+#define DEV_CONFIG_PENDING 0x10
+#define DEV_RELEASE_PENDING 0x20
+#define DEV_SUSPEND 0x40
+#define DEV_BUSY 0x80
+
+#define DEV_OK(l) \
+ ((l) && ((l->state & ~DEV_BUSY) == (DEV_CONFIG|DEV_PRESENT)))
+
+int register_pccard_driver(dev_info_t *dev_info,
+ dev_link_t *(*attach)(void),
+ void (*detach)(dev_link_t *));
+
+int unregister_pccard_driver(dev_info_t *dev_info);
+
+#define register_pcmcia_driver register_pccard_driver
+#define unregister_pcmcia_driver unregister_pccard_driver
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_DS_H */
diff --git a/linux/pcmcia-cs/include/pcmcia/mem_op.h b/linux/pcmcia-cs/include/pcmcia/mem_op.h
new file mode 100644
index 0000000..9230faa
--- /dev/null
+++ b/linux/pcmcia-cs/include/pcmcia/mem_op.h
@@ -0,0 +1,133 @@
+/*
+ * mem_op.h 1.14 2001/08/24 12:16:13
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_MEM_OP_H
+#define _LINUX_MEM_OP_H
+
+#include <asm/uaccess.h>
+
+/*
+ If UNSAFE_MEMCPY is defined, we use the (optimized) system routines
+ to copy between a card and kernel memory. These routines do 32-bit
+ operations which may not work with all PCMCIA controllers. The
+ safe versions defined here will do only 8-bit and 16-bit accesses.
+*/
+
+#ifdef UNSAFE_MEMCPY
+
+#define copy_from_pc memcpy_fromio
+#define copy_to_pc memcpy_toio
+
+static inline void copy_pc_to_user(void *to, const void *from, size_t n)
+{
+ size_t odd = (n & 3);
+ n -= odd;
+ while (n) {
+ put_user(readl_ns(from), (int *)to);
+ from += 4; to += 4; n -= 4;
+ }
+ while (odd--)
+ put_user(readb((char *)from++), (char *)to++);
+}
+
+static inline void copy_user_to_pc(void *to, const void *from, size_t n)
+{
+ int l;
+ char c;
+ size_t odd = (n & 3);
+ n -= odd;
+ while (n) {
+ l = get_user((int *)from);
+ writel_ns(l, to);
+ to += 4; from += 4; n -= 4;
+ }
+ while (odd--) {
+ c = get_user((char *)from++);
+ writeb(c, (char *)to++);
+ }
+}
+
+#else /* UNSAFE_MEMCPY */
+
+static inline void copy_from_pc(void *to, const void *from, size_t n)
+{
+ size_t odd = (n & 1);
+ n -= odd;
+ while (n) {
+ *(u_short *)to = readw_ns(from);
+ to += 2; from += 2; n -= 2;
+ }
+ if (odd)
+ *(u_char *)to = readb(from);
+}
+
+static inline void copy_to_pc(void *to, const void *from, size_t n)
+{
+ size_t odd = (n & 1);
+ n -= odd;
+ while (n) {
+ writew_ns(*(u_short *)from, to);
+ to += 2; from += 2; n -= 2;
+ }
+ if (odd)
+ writeb(*(u_char *)from, to);
+}
+
+static inline void copy_pc_to_user(void *to, const void *from, size_t n)
+{
+ size_t odd = (n & 1);
+ n -= odd;
+ while (n) {
+ put_user(readw_ns(from), (short *)to);
+ to += 2; from += 2; n -= 2;
+ }
+ if (odd)
+ put_user(readb(from), (char *)to);
+}
+
+static inline void copy_user_to_pc(void *to, const void *from, size_t n)
+{
+ short s;
+ char c;
+ size_t odd = (n & 1);
+ n -= odd;
+ while (n) {
+ s = get_user((short *)from);
+ writew_ns(s, to);
+ to += 2; from += 2; n -= 2;
+ }
+ if (odd) {
+ c = get_user((char *)from);
+ writeb(c, to);
+ }
+}
+
+#endif /* UNSAFE_MEMCPY */
+
+#endif /* _LINUX_MEM_OP_H */
diff --git a/linux/pcmcia-cs/include/pcmcia/ss.h b/linux/pcmcia-cs/include/pcmcia/ss.h
new file mode 100644
index 0000000..d197e42
--- /dev/null
+++ b/linux/pcmcia-cs/include/pcmcia/ss.h
@@ -0,0 +1,133 @@
+/*
+ * ss.h 1.31 2001/08/24 12:16:13
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_SS_H
+#define _LINUX_SS_H
+
+/* For RegisterCallback */
+typedef struct ss_callback_t {
+ void (*handler)(void *info, u_int events);
+ void *info;
+} ss_callback_t;
+
+/* Definitions for card status flags for GetStatus */
+#define SS_WRPROT 0x0001
+#define SS_CARDLOCK 0x0002
+#define SS_EJECTION 0x0004
+#define SS_INSERTION 0x0008
+#define SS_BATDEAD 0x0010
+#define SS_BATWARN 0x0020
+#define SS_READY 0x0040
+#define SS_DETECT 0x0080
+#define SS_POWERON 0x0100
+#define SS_GPI 0x0200
+#define SS_STSCHG 0x0400
+#define SS_CARDBUS 0x0800
+#define SS_3VCARD 0x1000
+#define SS_XVCARD 0x2000
+#define SS_PENDING 0x4000
+
+/* for InquireSocket */
+typedef struct socket_cap_t {
+ u_int features;
+ u_int irq_mask;
+ u_int map_size;
+ u_char pci_irq;
+ u_char cardbus;
+ struct pci_bus *cb_bus;
+ struct bus_operations *bus;
+} socket_cap_t;
+
+/* InquireSocket capabilities */
+#define SS_CAP_PAGE_REGS 0x0001
+#define SS_CAP_VIRTUAL_BUS 0x0002
+#define SS_CAP_MEM_ALIGN 0x0004
+#define SS_CAP_STATIC_MAP 0x0008
+#define SS_CAP_PCCARD 0x4000
+#define SS_CAP_CARDBUS 0x8000
+
+/* for GetSocket, SetSocket */
+typedef struct socket_state_t {
+ u_int flags;
+ u_int csc_mask;
+ u_char Vcc, Vpp;
+ u_char io_irq;
+} socket_state_t;
+
+/* Socket configuration flags */
+#define SS_PWR_AUTO 0x0010
+#define SS_IOCARD 0x0020
+#define SS_RESET 0x0040
+#define SS_DMA_MODE 0x0080
+#define SS_SPKR_ENA 0x0100
+#define SS_OUTPUT_ENA 0x0200
+#define SS_ZVCARD 0x0400
+
+/* Flags for I/O port and memory windows */
+#define MAP_ACTIVE 0x01
+#define MAP_16BIT 0x02
+#define MAP_AUTOSZ 0x04
+#define MAP_0WS 0x08
+#define MAP_WRPROT 0x10
+#define MAP_ATTRIB 0x20
+#define MAP_USE_WAIT 0x40
+#define MAP_PREFETCH 0x80
+
+/* Use this just for bridge windows */
+#define MAP_IOSPACE 0x20
+
+typedef struct pccard_io_map {
+ u_char map;
+ u_char flags;
+ u_short speed;
+ u_short start, stop;
+} pccard_io_map;
+
+typedef struct pccard_mem_map {
+ u_char map;
+ u_char flags;
+ u_short speed;
+ u_long sys_start, sys_stop;
+ u_int card_start;
+} pccard_mem_map;
+
+typedef struct cb_bridge_map {
+ u_char map;
+ u_char flags;
+ u_int start, stop;
+} cb_bridge_map;
+
+enum ss_service {
+ SS_RegisterCallback, SS_InquireSocket,
+ SS_GetStatus, SS_GetSocket, SS_SetSocket,
+ SS_GetIOMap, SS_SetIOMap, SS_GetMemMap, SS_SetMemMap,
+ SS_GetBridge, SS_SetBridge, SS_ProcSetup
+};
+
+#endif /* _LINUX_SS_H */
diff --git a/linux/pcmcia-cs/include/pcmcia/version.h b/linux/pcmcia-cs/include/pcmcia/version.h
new file mode 100644
index 0000000..bc2aef3
--- /dev/null
+++ b/linux/pcmcia-cs/include/pcmcia/version.h
@@ -0,0 +1,9 @@
+/* version.h 1.118 2003/12/20 07:16:36 (David Hinds) */
+
+#define CS_PKG_RELEASE "3.2.8"
+#define CS_PKG_RELEASE_CODE 0x3208
+
+#define VERSION(v,p,s) (((v)<<16)+(p<<8)+s)
+
+#define CS_RELEASE CS_PKG_RELEASE
+#define CS_RELEASE_CODE CS_PKG_RELEASE_CODE
diff --git a/linux/pcmcia-cs/modules/bulkmem.c b/linux/pcmcia-cs/modules/bulkmem.c
new file mode 100644
index 0000000..558e6d9
--- /dev/null
+++ b/linux/pcmcia-cs/modules/bulkmem.c
@@ -0,0 +1,626 @@
+/*======================================================================
+
+ PCMCIA Bulk Memory Services
+
+ bulkmem.c 1.44 2002/06/29 06:23:09
+
+ The contents of this file are subject to the Mozilla Public
+ License Version 1.1 (the "License"); you may not use this file
+ except in compliance with the License. You may obtain a copy of
+ the License at http://www.mozilla.org/MPL/
+
+ Software distributed under the License is distributed on an "AS
+ IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ implied. See the License for the specific language governing
+ rights and limitations under the License.
+
+ The initial developer of the original code is David A. Hinds
+ <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+
+ Alternatively, the contents of this file may be used under the
+ terms of the GNU General Public License version 2 (the "GPL"), in
+ which case the provisions of the GPL are applicable instead of the
+ above. If you wish to allow the use of your version of this file
+ only under the terms of the GPL and not to allow others to use
+ your version of this file under the MPL, indicate your decision
+ by deleting the provisions above and replace them with the notice
+ and other provisions required by the GPL. If you do not delete
+ the provisions above, a recipient may use your version of this
+ file under either the MPL or the GPL.
+
+======================================================================*/
+
+#define __NO_VERSION__
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+
+#define IN_CARD_SERVICES
+#include <pcmcia/cs_types.h>
+#include <pcmcia/ss.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/bulkmem.h>
+#include <pcmcia/cistpl.h>
+#include "cs_internal.h"
+
+/*======================================================================
+
+ This function handles submitting an MTD request, and retrying
+ requests when an MTD is busy.
+
+ An MTD request should never block.
+
+======================================================================*/
+
+static int do_mtd_request(memory_handle_t handle, mtd_request_t *req,
+ caddr_t buf)
+{
+ int ret, tries;
+ client_t *mtd;
+ socket_info_t *s;
+
+ mtd = handle->mtd;
+ if (mtd == NULL)
+ return CS_GENERAL_FAILURE;
+ s = SOCKET(mtd);
+ for (ret = tries = 0; tries < 100; tries++) {
+ mtd->event_callback_args.mtdrequest = req;
+ mtd->event_callback_args.buffer = buf;
+ ret = EVENT(mtd, CS_EVENT_MTD_REQUEST, CS_EVENT_PRI_LOW);
+ if (ret != CS_BUSY)
+ break;
+ switch (req->Status) {
+ case MTD_WAITREQ:
+ /* Not that we should ever need this... */
+ interruptible_sleep_on_timeout(&mtd->mtd_req, HZ);
+ break;
+ case MTD_WAITTIMER:
+ case MTD_WAITRDY:
+ interruptible_sleep_on_timeout(&mtd->mtd_req,
+ req->Timeout*HZ/1000);
+ req->Function |= MTD_REQ_TIMEOUT;
+ break;
+ case MTD_WAITPOWER:
+ interruptible_sleep_on(&mtd->mtd_req);
+ break;
+ }
+ if (signal_pending(current))
+ printk(KERN_NOTICE "cs: do_mtd_request interrupted!\n");
+ }
+ if (tries == 20) {
+ printk(KERN_NOTICE "cs: MTD request timed out!\n");
+ ret = CS_GENERAL_FAILURE;
+ }
+ wake_up_interruptible(&mtd->mtd_req);
+ retry_erase_list(&mtd->erase_busy, 0);
+ return ret;
+} /* do_mtd_request */
+
+/*======================================================================
+
+ This stuff is all for handling asynchronous erase requests. It
+ is complicated because all the retry stuff has to be dealt with
+ in timer interrupts or in the card status event handler.
+
+======================================================================*/
+
+static void insert_queue(erase_busy_t *head, erase_busy_t *entry)
+{
+ DEBUG(2, "cs: adding 0x%p to queue 0x%p\n", entry, head);
+ entry->next = head;
+ entry->prev = head->prev;
+ head->prev->next = entry;
+ head->prev = entry;
+}
+
+static void remove_queue(erase_busy_t *entry)
+{
+ DEBUG(2, "cs: unqueueing 0x%p\n", entry);
+ entry->next->prev = entry->prev;
+ entry->prev->next = entry->next;
+}
+
+static void retry_erase(erase_busy_t *busy, u_int cause)
+{
+ eraseq_entry_t *erase = busy->erase;
+ mtd_request_t req;
+ client_t *mtd;
+ socket_info_t *s;
+ int ret;
+
+ DEBUG(2, "cs: trying erase request 0x%p...\n", busy);
+ if (busy->next)
+ remove_queue(busy);
+ req.Function = MTD_REQ_ERASE | cause;
+ req.TransferLength = erase->Size;
+ req.DestCardOffset = erase->Offset + erase->Handle->info.CardOffset;
+ req.MediaID = erase->Handle->MediaID;
+ mtd = erase->Handle->mtd;
+ s = SOCKET(mtd);
+ mtd->event_callback_args.mtdrequest = &req;
+ ret = EVENT(mtd, CS_EVENT_MTD_REQUEST, CS_EVENT_PRI_LOW);
+ if (ret == CS_BUSY) {
+ DEBUG(2, " Status = %d, requeueing.\n", req.Status);
+ switch (req.Status) {
+ case MTD_WAITREQ:
+ case MTD_WAITPOWER:
+ insert_queue(&mtd->erase_busy, busy);
+ break;
+ case MTD_WAITTIMER:
+ case MTD_WAITRDY:
+ if (req.Status == MTD_WAITRDY)
+ insert_queue(&s->erase_busy, busy);
+ mod_timer(&busy->timeout, jiffies + req.Timeout*HZ/1000);
+ break;
+ }
+ } else {
+ /* update erase queue status */
+ DEBUG(2, " Ret = %d\n", ret);
+ switch (ret) {
+ case CS_SUCCESS:
+ erase->State = ERASE_PASSED; break;
+ case CS_WRITE_PROTECTED:
+ erase->State = ERASE_MEDIA_WRPROT; break;
+ case CS_BAD_OFFSET:
+ erase->State = ERASE_BAD_OFFSET; break;
+ case CS_BAD_SIZE:
+ erase->State = ERASE_BAD_SIZE; break;
+ case CS_NO_CARD:
+ erase->State = ERASE_BAD_SOCKET; break;
+ default:
+ erase->State = ERASE_FAILED; break;
+ }
+ busy->client->event_callback_args.info = erase;
+ EVENT(busy->client, CS_EVENT_ERASE_COMPLETE, CS_EVENT_PRI_LOW);
+ kfree(busy);
+ /* Resubmit anything waiting for a request to finish */
+ wake_up_interruptible(&mtd->mtd_req);
+ retry_erase_list(&mtd->erase_busy, 0);
+ }
+} /* retry_erase */
+
+void retry_erase_list(erase_busy_t *list, u_int cause)
+{
+ erase_busy_t tmp = *list;
+
+ DEBUG(2, "cs: rescanning erase queue list 0x%p\n", list);
+ if (list->next == list)
+ return;
+ /* First, truncate the original list */
+ list->prev->next = &tmp;
+ list->next->prev = &tmp;
+ list->prev = list->next = list;
+ tmp.prev->next = &tmp;
+ tmp.next->prev = &tmp;
+
+ /* Now, retry each request, in order. */
+ while (tmp.next != &tmp)
+ retry_erase(tmp.next, cause);
+} /* retry_erase_list */
+
+static void handle_erase_timeout(u_long arg)
+{
+ DEBUG(0, "cs: erase timeout for entry 0x%lx\n", arg);
+ retry_erase((erase_busy_t *)arg, MTD_REQ_TIMEOUT);
+}
+
+static void setup_erase_request(client_handle_t handle, eraseq_entry_t *erase)
+{
+ erase_busy_t *busy;
+ region_info_t *info;
+
+ if (CHECK_REGION(erase->Handle))
+ erase->State = ERASE_BAD_SOCKET;
+ else {
+ info = &erase->Handle->info;
+ if ((erase->Offset >= info->RegionSize) ||
+ (erase->Offset & (info->BlockSize-1)))
+ erase->State = ERASE_BAD_OFFSET;
+ else if ((erase->Offset+erase->Size > info->RegionSize) ||
+ (erase->Size & (info->BlockSize-1)))
+ erase->State = ERASE_BAD_SIZE;
+ else {
+ erase->State = 1;
+ busy = kmalloc(sizeof(erase_busy_t), GFP_KERNEL);
+ busy->erase = erase;
+ busy->client = handle;
+ init_timer(&busy->timeout);
+ busy->timeout.data = (u_long)busy;
+ busy->timeout.function = &handle_erase_timeout;
+ busy->prev = busy->next = NULL;
+ retry_erase(busy, 0);
+ }
+ }
+} /* setup_erase_request */
+
+/*======================================================================
+
+ MTD helper functions
+
+======================================================================*/
+
+static int mtd_modify_window(window_handle_t win, mtd_mod_win_t *req)
+{
+ if ((win == NULL) || (win->magic != WINDOW_MAGIC))
+ return CS_BAD_HANDLE;
+ win->ctl.flags = MAP_16BIT | MAP_ACTIVE;
+ if (req->Attributes & WIN_USE_WAIT)
+ win->ctl.flags |= MAP_USE_WAIT;
+ if (req->Attributes & WIN_MEMORY_TYPE)
+ win->ctl.flags |= MAP_ATTRIB;
+ win->ctl.speed = req->AccessSpeed;
+ win->ctl.card_start = req->CardOffset;
+ win->sock->ss_entry(win->sock->sock, SS_SetMemMap, &win->ctl);
+ return CS_SUCCESS;
+}
+
+static int mtd_set_vpp(client_handle_t handle, mtd_vpp_req_t *req)
+{
+ socket_info_t *s;
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ if (req->Vpp1 != req->Vpp2)
+ return CS_BAD_VPP;
+ s = SOCKET(handle);
+ s->socket.Vpp = req->Vpp1;
+ if (s->ss_entry(s->sock, SS_SetSocket, &s->socket))
+ return CS_BAD_VPP;
+ return CS_SUCCESS;
+}
+
+static int mtd_rdy_mask(client_handle_t handle, mtd_rdy_req_t *req)
+{
+ socket_info_t *s;
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ s = SOCKET(handle);
+ if (req->Mask & CS_EVENT_READY_CHANGE)
+ s->socket.csc_mask |= SS_READY;
+ else
+ s->socket.csc_mask &= ~SS_READY;
+ if (s->ss_entry(s->sock, SS_SetSocket, &s->socket))
+ return CS_GENERAL_FAILURE;
+ return CS_SUCCESS;
+}
+
+int MTDHelperEntry(int func, void *a1, void *a2)
+{
+ switch (func) {
+ case MTDRequestWindow:
+ return CardServices(RequestWindow, a1, a2, NULL);
+ case MTDReleaseWindow:
+ return CardServices(ReleaseWindow, a1, NULL, NULL);
+ case MTDModifyWindow:
+ return mtd_modify_window(a1, a2); break;
+ case MTDSetVpp:
+ return mtd_set_vpp(a1, a2); break;
+ case MTDRDYMask:
+ return mtd_rdy_mask(a1, a2); break;
+ default:
+ return CS_UNSUPPORTED_FUNCTION; break;
+ }
+} /* MTDHelperEntry */
+
+/*======================================================================
+
+ This stuff is used by Card Services to initialize the table of
+ region info used for subsequent calls to GetFirstRegion and
+ GetNextRegion.
+
+======================================================================*/
+
+static void setup_regions(client_handle_t handle, int attr,
+ memory_handle_t *list)
+{
+ int i, code, has_jedec, has_geo;
+ u_int offset;
+ cistpl_device_t device;
+ cistpl_jedec_t jedec;
+ cistpl_device_geo_t geo;
+ memory_handle_t r;
+
+ DEBUG(1, "cs: setup_regions(0x%p, %d, 0x%p)\n",
+ handle, attr, list);
+
+ code = (attr) ? CISTPL_DEVICE_A : CISTPL_DEVICE;
+ if (read_tuple(handle, code, &device) != CS_SUCCESS)
+ return;
+ code = (attr) ? CISTPL_JEDEC_A : CISTPL_JEDEC_C;
+ has_jedec = (read_tuple(handle, code, &jedec) == CS_SUCCESS);
+ if (has_jedec && (device.ndev != jedec.nid)) {
+#ifdef PCMCIA_DEBUG
+ printk(KERN_DEBUG "cs: Device info does not match JEDEC info.\n");
+#endif
+ has_jedec = 0;
+ }
+ code = (attr) ? CISTPL_DEVICE_GEO_A : CISTPL_DEVICE_GEO;
+ has_geo = (read_tuple(handle, code, &geo) == CS_SUCCESS);
+ if (has_geo && (device.ndev != geo.ngeo)) {
+#ifdef PCMCIA_DEBUG
+ printk(KERN_DEBUG "cs: Device info does not match geometry tuple.\n");
+#endif
+ has_geo = 0;
+ }
+
+ offset = 0;
+ for (i = 0; i < device.ndev; i++) {
+ if ((device.dev[i].type != CISTPL_DTYPE_NULL) &&
+ (device.dev[i].size != 0)) {
+ r = kmalloc(sizeof(*r), GFP_KERNEL);
+ r->region_magic = REGION_MAGIC;
+ r->state = 0;
+ r->dev_info[0] = '\0';
+ r->mtd = NULL;
+ r->info.Attributes = (attr) ? REGION_TYPE_AM : 0;
+ r->info.CardOffset = offset;
+ r->info.RegionSize = device.dev[i].size;
+ r->info.AccessSpeed = device.dev[i].speed;
+ if (has_jedec) {
+ r->info.JedecMfr = jedec.id[i].mfr;
+ r->info.JedecInfo = jedec.id[i].info;
+ } else
+ r->info.JedecMfr = r->info.JedecInfo = 0;
+ if (has_geo) {
+ r->info.BlockSize = geo.geo[i].buswidth *
+ geo.geo[i].erase_block * geo.geo[i].interleave;
+ r->info.PartMultiple =
+ r->info.BlockSize * geo.geo[i].partition;
+ } else
+ r->info.BlockSize = r->info.PartMultiple = 1;
+ r->info.next = *list; *list = r;
+ }
+ offset += device.dev[i].size;
+ }
+} /* setup_regions */
+
+/*======================================================================
+
+ This is tricky. When get_first_region() is called by Driver
+ Services, we initialize the region info table in the socket
+ structure. When it is called by an MTD, we can just scan the
+ table for matching entries.
+
+======================================================================*/
+
+static int match_region(client_handle_t handle, memory_handle_t list,
+ region_info_t *match)
+{
+ while (list != NULL) {
+ if (!(handle->Attributes & INFO_MTD_CLIENT) ||
+ (strcmp(handle->dev_info, list->dev_info) == 0)) {
+ *match = list->info;
+ return CS_SUCCESS;
+ }
+ list = list->info.next;
+ }
+ return CS_NO_MORE_ITEMS;
+} /* match_region */
+
+int get_first_region(client_handle_t handle, region_info_t *rgn)
+{
+ socket_info_t *s = SOCKET(handle);
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+
+ if ((handle->Attributes & INFO_MASTER_CLIENT) &&
+ (!(s->state & SOCKET_REGION_INFO))) {
+ setup_regions(handle, 0, &s->c_region);
+ setup_regions(handle, 1, &s->a_region);
+ s->state |= SOCKET_REGION_INFO;
+ }
+
+ if (rgn->Attributes & REGION_TYPE_AM)
+ return match_region(handle, s->a_region, rgn);
+ else
+ return match_region(handle, s->c_region, rgn);
+} /* get_first_region */
+
+int get_next_region(client_handle_t handle, region_info_t *rgn)
+{
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ return match_region(handle, rgn->next, rgn);
+} /* get_next_region */
+
+/*======================================================================
+
+ Connect an MTD with a memory region.
+
+======================================================================*/
+
+int register_mtd(client_handle_t handle, mtd_reg_t *reg)
+{
+ memory_handle_t list;
+ socket_info_t *s;
+
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ s = SOCKET(handle);
+ if (reg->Attributes & REGION_TYPE_AM)
+ list = s->a_region;
+ else
+ list = s->c_region;
+ DEBUG(1, "cs: register_mtd(0x%p, '%s', 0x%x)\n",
+ handle, handle->dev_info, reg->Offset);
+ while (list) {
+ if (list->info.CardOffset == reg->Offset) break;
+ list = list->info.next;
+ }
+ if (list && (list->mtd == NULL) &&
+ (strcmp(handle->dev_info, list->dev_info) == 0)) {
+ list->info.Attributes = reg->Attributes;
+ list->MediaID = reg->MediaID;
+ list->mtd = handle;
+ handle->mtd_count++;
+ return CS_SUCCESS;
+ } else
+ return CS_BAD_OFFSET;
+} /* register_mtd */
+
+/*======================================================================
+
+ Erase queue management functions
+
+======================================================================*/
+
+int register_erase_queue(client_handle_t *handle, eraseq_hdr_t *header)
+{
+ eraseq_t *queue;
+
+ if ((handle == NULL) || CHECK_HANDLE(*handle))
+ return CS_BAD_HANDLE;
+ queue = kmalloc(sizeof(*queue), GFP_KERNEL);
+ if (!queue) return CS_OUT_OF_RESOURCE;
+ queue->eraseq_magic = ERASEQ_MAGIC;
+ queue->handle = *handle;
+ queue->count = header->QueueEntryCnt;
+ queue->entry = header->QueueEntryArray;
+ *handle = (client_handle_t)queue;
+ return CS_SUCCESS;
+} /* register_erase_queue */
+
+int deregister_erase_queue(eraseq_handle_t eraseq)
+{
+ int i;
+ if (CHECK_ERASEQ(eraseq))
+ return CS_BAD_HANDLE;
+ for (i = 0; i < eraseq->count; i++)
+ if (ERASE_IN_PROGRESS(eraseq->entry[i].State)) break;
+ if (i < eraseq->count)
+ return CS_BUSY;
+ eraseq->eraseq_magic = 0;
+ kfree(eraseq);
+ return CS_SUCCESS;
+} /* deregister_erase_queue */
+
+int check_erase_queue(eraseq_handle_t eraseq)
+{
+ int i;
+ if (CHECK_ERASEQ(eraseq))
+ return CS_BAD_HANDLE;
+ for (i = 0; i < eraseq->count; i++)
+ if (eraseq->entry[i].State == ERASE_QUEUED)
+ setup_erase_request(eraseq->handle, &eraseq->entry[i]);
+ return CS_SUCCESS;
+} /* check_erase_queue */
+
+/*======================================================================
+
+ Look up the memory region matching the request, and return a
+ memory handle.
+
+======================================================================*/
+
+int open_memory(client_handle_t *handle, open_mem_t *open)
+{
+ socket_info_t *s;
+ memory_handle_t region;
+
+ if ((handle == NULL) || CHECK_HANDLE(*handle))
+ return CS_BAD_HANDLE;
+ s = SOCKET(*handle);
+ if (open->Attributes & MEMORY_TYPE_AM)
+ region = s->a_region;
+ else
+ region = s->c_region;
+ while (region) {
+ if (region->info.CardOffset == open->Offset) break;
+ region = region->info.next;
+ }
+ if (region && region->mtd) {
+ *handle = (client_handle_t)region;
+ DEBUG(1, "cs: open_memory(0x%p, 0x%x) = 0x%p\n",
+ handle, open->Offset, region);
+ return CS_SUCCESS;
+ } else
+ return CS_BAD_OFFSET;
+} /* open_memory */
+
+/*======================================================================
+
+ Close a memory handle from an earlier call to OpenMemory.
+
+ For the moment, I don't think this needs to do anything.
+
+======================================================================*/
+
+int close_memory(memory_handle_t handle)
+{
+ DEBUG(1, "cs: close_memory(0x%p)\n", handle);
+ if (CHECK_REGION(handle))
+ return CS_BAD_HANDLE;
+ return CS_SUCCESS;
+} /* close_memory */
+
+/*======================================================================
+
+ Read from a memory device, using a handle previously returned
+ by a call to OpenMemory.
+
+======================================================================*/
+
+int read_memory(memory_handle_t handle, mem_op_t *req, caddr_t buf)
+{
+ mtd_request_t mtd;
+ if (CHECK_REGION(handle))
+ return CS_BAD_HANDLE;
+ if (req->Offset >= handle->info.RegionSize)
+ return CS_BAD_OFFSET;
+ if (req->Offset+req->Count > handle->info.RegionSize)
+ return CS_BAD_SIZE;
+
+ mtd.SrcCardOffset = req->Offset + handle->info.CardOffset;
+ mtd.TransferLength = req->Count;
+ mtd.MediaID = handle->MediaID;
+ mtd.Function = MTD_REQ_READ;
+ if (req->Attributes & MEM_OP_BUFFER_KERNEL)
+ mtd.Function |= MTD_REQ_KERNEL;
+ return do_mtd_request(handle, &mtd, buf);
+} /* read_memory */
+
+/*======================================================================
+
+ Write to a memory device, using a handle previously returned by
+ a call to OpenMemory.
+
+======================================================================*/
+
+int write_memory(memory_handle_t handle, mem_op_t *req, caddr_t buf)
+{
+ mtd_request_t mtd;
+ if (CHECK_REGION(handle))
+ return CS_BAD_HANDLE;
+ if (req->Offset >= handle->info.RegionSize)
+ return CS_BAD_OFFSET;
+ if (req->Offset+req->Count > handle->info.RegionSize)
+ return CS_BAD_SIZE;
+
+ mtd.DestCardOffset = req->Offset + handle->info.CardOffset;
+ mtd.TransferLength = req->Count;
+ mtd.MediaID = handle->MediaID;
+ mtd.Function = MTD_REQ_WRITE;
+ if (req->Attributes & MEM_OP_BUFFER_KERNEL)
+ mtd.Function |= MTD_REQ_KERNEL;
+ return do_mtd_request(handle, &mtd, buf);
+} /* write_memory */
+
+/*======================================================================
+
+ This isn't needed for anything I could think of.
+
+======================================================================*/
+
+int copy_memory(memory_handle_t handle, copy_op_t *req)
+{
+ if (CHECK_REGION(handle))
+ return CS_BAD_HANDLE;
+ return CS_UNSUPPORTED_FUNCTION;
+}
+
diff --git a/linux/pcmcia-cs/modules/cirrus.h b/linux/pcmcia-cs/modules/cirrus.h
new file mode 100644
index 0000000..e3bb255
--- /dev/null
+++ b/linux/pcmcia-cs/modules/cirrus.h
@@ -0,0 +1,188 @@
+/*
+ * cirrus.h 1.11 2003/09/09 07:05:40
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_CIRRUS_H
+#define _LINUX_CIRRUS_H
+
+#ifndef PCI_VENDOR_ID_CIRRUS
+#define PCI_VENDOR_ID_CIRRUS 0x1013
+#endif
+#ifndef PCI_DEVICE_ID_CIRRUS_6729
+#define PCI_DEVICE_ID_CIRRUS_6729 0x1100
+#endif
+#ifndef PCI_DEVICE_ID_CIRRUS_6832
+#define PCI_DEVICE_ID_CIRRUS_6832 0x1110
+#endif
+
+#define PD67_MISC_CTL_1 0x16 /* Misc control 1 */
+#define PD67_FIFO_CTL 0x17 /* FIFO control */
+#define PD67_MISC_CTL_2 0x1E /* Misc control 2 */
+#define PD67_CHIP_INFO 0x1f /* Chip information */
+#define PD67_ATA_CTL 0x026 /* 6730: ATA control */
+#define PD67_EXT_INDEX 0x2e /* Extension index */
+#define PD67_EXT_DATA 0x2f /* Extension data */
+
+#define pd67_ext_get(s, r) \
+ (i365_set(s, PD67_EXT_INDEX, r), i365_get(s, PD67_EXT_DATA))
+#define pd67_ext_set(s, r, v) \
+ (i365_set(s, PD67_EXT_INDEX, r), i365_set(s, PD67_EXT_DATA, v))
+
+/* PD6722 extension registers -- indexed in PD67_EXT_INDEX */
+#define PD67_DATA_MASK0 0x01 /* Data mask 0 */
+#define PD67_DATA_MASK1 0x02 /* Data mask 1 */
+#define PD67_DMA_CTL 0x03 /* DMA control */
+
+/* PD6730 extension registers -- indexed in PD67_EXT_INDEX */
+#define PD67_EXT_CTL_1 0x03 /* Extension control 1 */
+#define PD67_MEM_PAGE(n) ((n)+5) /* PCI window bits 31:24 */
+#define PD67_EXTERN_DATA 0x0a
+#define PD67_EXT_CTL_2 0x0b
+#define PD67_MISC_CTL_3 0x25
+#define PD67_SMB_PWR_CTL 0x26
+
+/* I/O window address offset */
+#define PD67_IO_OFF(w) (0x36+((w)<<1))
+
+/* Timing register sets */
+#define PD67_TIME_SETUP(n) (0x3a + 3*(n))
+#define PD67_TIME_CMD(n) (0x3b + 3*(n))
+#define PD67_TIME_RECOV(n) (0x3c + 3*(n))
+
+/* Flags for PD67_MISC_CTL_1 */
+#define PD67_MC1_5V_DET 0x01 /* 5v detect */
+#define PD67_MC1_MEDIA_ENA 0x01 /* 6730: Multimedia enable */
+#define PD67_MC1_VCC_3V 0x02 /* 3.3v Vcc */
+#define PD67_MC1_PULSE_MGMT 0x04
+#define PD67_MC1_PULSE_IRQ 0x08
+#define PD67_MC1_SPKR_ENA 0x10
+#define PD67_MC1_INPACK_ENA 0x80
+
+/* Flags for PD67_FIFO_CTL */
+#define PD67_FIFO_EMPTY 0x80
+
+/* Flags for PD67_MISC_CTL_2 */
+#define PD67_MC2_FREQ_BYPASS 0x01
+#define PD67_MC2_DYNAMIC_MODE 0x02
+#define PD67_MC2_SUSPEND 0x04
+#define PD67_MC2_5V_CORE 0x08
+#define PD67_MC2_LED_ENA 0x10 /* IRQ 12 is LED enable */
+#define PD67_MC2_FAST_PCI 0x10 /* 6729: PCI bus > 25 MHz */
+#define PD67_MC2_3STATE_BIT7 0x20 /* Floppy change bit */
+#define PD67_MC2_DMA_MODE 0x40
+#define PD67_MC2_IRQ15_RI 0x80 /* IRQ 15 is ring enable */
+
+/* Flags for PD67_CHIP_INFO */
+#define PD67_INFO_SLOTS 0x20 /* 0 = 1 slot, 1 = 2 slots */
+#define PD67_INFO_CHIP_ID 0xc0
+#define PD67_INFO_REV 0x1c
+
+/* Fields in PD67_TIME_* registers */
+#define PD67_TIME_SCALE 0xc0
+#define PD67_TIME_SCALE_1 0x00
+#define PD67_TIME_SCALE_16 0x40
+#define PD67_TIME_SCALE_256 0x80
+#define PD67_TIME_SCALE_4096 0xc0
+#define PD67_TIME_MULT 0x3f
+
+/* Fields in PD67_DMA_CTL */
+#define PD67_DMA_MODE 0xc0
+#define PD67_DMA_OFF 0x00
+#define PD67_DMA_DREQ_INPACK 0x40
+#define PD67_DMA_DREQ_WP 0x80
+#define PD67_DMA_DREQ_BVD2 0xc0
+#define PD67_DMA_PULLUP 0x20 /* Disable socket pullups? */
+
+/* Fields in PD67_EXT_CTL_1 */
+#define PD67_EC1_VCC_PWR_LOCK 0x01
+#define PD67_EC1_AUTO_PWR_CLEAR 0x02
+#define PD67_EC1_LED_ENA 0x04
+#define PD67_EC1_INV_CARD_IRQ 0x08
+#define PD67_EC1_INV_MGMT_IRQ 0x10
+#define PD67_EC1_PULLUP_CTL 0x20
+
+/* Fields in PD67_EXTERN_DATA */
+#define PD67_EXD_VS1(s) (0x01 << ((s)<<1))
+#define PD67_EXD_VS2(s) (0x02 << ((s)<<1))
+
+/* Fields in PD67_EXT_CTL_2 */
+#define PD67_EC2_GPSTB_TOTEM 0x04
+#define PD67_EC2_GPSTB_IOR 0x08
+#define PD67_EC2_GPSTB_IOW 0x10
+#define PD67_EC2_GPSTB_HIGH 0x20
+
+/* Fields in PD67_MISC_CTL_3 */
+#define PD67_MC3_IRQ_MASK 0x03
+#define PD67_MC3_IRQ_PCPCI 0x00
+#define PD67_MC3_IRQ_EXTERN 0x01
+#define PD67_MC3_IRQ_PCIWAY 0x02
+#define PD67_MC3_IRQ_PCI 0x03
+#define PD67_MC3_PWR_MASK 0x0c
+#define PD67_MC3_PWR_SERIAL 0x00
+#define PD67_MC3_PWR_TI2202 0x08
+#define PD67_MC3_PWR_SMB 0x0c
+
+/* Register definitions for Cirrus PD6832 PCI-to-CardBus bridge */
+
+/* PD6832 extension registers -- indexed in PD67_EXT_INDEX */
+#define PD68_PCI_SPACE 0x22
+#define PD68_PCCARD_SPACE 0x23
+#define PD68_WINDOW_TYPE 0x24
+#define PD68_EXT_CSC 0x2e
+#define PD68_MISC_CTL_4 0x2f
+#define PD68_MISC_CTL_5 0x30
+#define PD68_MISC_CTL_6 0x31
+
+/* Extra flags in PD67_MISC_CTL_3 */
+#define PD68_MC3_HW_SUSP 0x10
+#define PD68_MC3_MM_EXPAND 0x40
+#define PD68_MC3_MM_ARM 0x80
+
+/* Bridge Control Register */
+#define PD6832_BCR_MGMT_IRQ_ENA 0x0800
+
+/* Socket Number Register */
+#define PD6832_SOCKET_NUMBER 0x004c /* 8 bit */
+
+/* Data structure for tracking vendor-specific state */
+typedef struct cirrus_state_t {
+ u_char misc1; /* PD67_MISC_CTL_1 */
+ u_char misc2; /* PD67_MISC_CTL_2 */
+ u_char ectl1; /* PD67_EXT_CTL_1 */
+ u_char timer[6]; /* PD67_TIME_* */
+} cirrus_state_t;
+
+#define CIRRUS_PCIC_ID \
+ IS_PD6729, IS_PD6730, IS_PD6832
+
+#define CIRRUS_PCIC_INFO \
+ { "Cirrus PD6729", IS_CIRRUS|IS_PCI, ID(CIRRUS, 6729) }, \
+ { "Cirrus PD6730", IS_CIRRUS|IS_PCI, PCI_VENDOR_ID_CIRRUS, -1 }, \
+ { "Cirrus PD6832", IS_CIRRUS|IS_CARDBUS, ID(CIRRUS, 6832) }
+
+#endif /* _LINUX_CIRRUS_H */
diff --git a/linux/pcmcia-cs/modules/cistpl.c b/linux/pcmcia-cs/modules/cistpl.c
new file mode 100644
index 0000000..404b8e4
--- /dev/null
+++ b/linux/pcmcia-cs/modules/cistpl.c
@@ -0,0 +1,1502 @@
+/*======================================================================
+
+ PCMCIA Card Information Structure parser
+
+ cistpl.c 1.101 2003/12/15 03:58:03
+
+ The contents of this file are subject to the Mozilla Public
+ License Version 1.1 (the "License"); you may not use this file
+ except in compliance with the License. You may obtain a copy of
+ the License at http://www.mozilla.org/MPL/
+
+ Software distributed under the License is distributed on an "AS
+ IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ implied. See the License for the specific language governing
+ rights and limitations under the License.
+
+ The initial developer of the original code is David A. Hinds
+ <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+
+ Alternatively, the contents of this file may be used under the
+ terms of the GNU General Public License version 2 (the "GPL"), in
+ which case the provisions of the GPL are applicable instead of the
+ above. If you wish to allow the use of your version of this file
+ only under the terms of the GPL and not to allow others to use
+ your version of this file under the MPL, indicate your decision
+ by deleting the provisions above and replace them with the notice
+ and other provisions required by the GPL. If you do not delete
+ the provisions above, a recipient may use your version of this
+ file under either the MPL or the GPL.
+
+======================================================================*/
+
+#define __NO_VERSION__
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/ioport.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+
+#include <pcmcia/cs_types.h>
+#include <pcmcia/bus_ops.h>
+#include <pcmcia/ss.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/bulkmem.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/cistpl.h>
+#include "cs_internal.h"
+
+static const u_char mantissa[] = {
+ 10, 12, 13, 15, 20, 25, 30, 35,
+ 40, 45, 50, 55, 60, 70, 80, 90
+};
+
+static const u_int exponent[] = {
+ 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000
+};
+
+/* Convert an extended speed byte to a time in nanoseconds */
+#define SPEED_CVT(v) \
+ (mantissa[(((v)>>3)&15)-1] * exponent[(v)&7] / 10)
+/* Convert a power byte to a current in 0.1 microamps */
+#define POWER_CVT(v) \
+ (mantissa[((v)>>3)&15] * exponent[(v)&7] / 10)
+#define POWER_SCALE(v) (exponent[(v)&7])
+
+/* Upper limit on reasonable # of tuples */
+#define MAX_TUPLES 200
+
+/*====================================================================*/
+
+/* Parameters that can be set with 'insmod' */
+
+#define INT_MODULE_PARM(n, v) static int n = v; MODULE_PARM(n, "i")
+
+INT_MODULE_PARM(cis_width, 0); /* 16-bit CIS? */
+
+/*======================================================================
+
+ Low-level functions to read and write CIS memory. I think the
+ write routine is only useful for writing one-byte registers.
+
+======================================================================*/
+
+/* Bits in attr field */
+#define IS_ATTR 1
+#define IS_INDIRECT 8
+
+static int setup_cis_mem(socket_info_t *s);
+
+static void set_cis_map(socket_info_t *s, pccard_mem_map *mem)
+{
+ s->ss_entry(s->sock, SS_SetMemMap, mem);
+ if (s->cap.features & SS_CAP_STATIC_MAP) {
+ if (s->cis_virt)
+ bus_iounmap(s->cap.bus, s->cis_virt);
+ s->cis_virt = bus_ioremap(s->cap.bus, mem->sys_start,
+ s->cap.map_size);
+ }
+}
+
+int read_cis_mem(socket_info_t *s, int attr, u_int addr,
+ u_int len, void *ptr)
+{
+ pccard_mem_map *mem = &s->cis_mem;
+ u_char *sys, *buf = ptr;
+
+ DEBUG(3, "cs: read_cis_mem(%d, %#x, %u)\n", attr, addr, len);
+ if (setup_cis_mem(s) != 0) {
+ memset(ptr, 0xff, len);
+ return -1;
+ }
+ mem->flags = MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0);
+
+ if (attr & IS_INDIRECT) {
+ /* Indirect accesses use a bunch of special registers at fixed
+ locations in common memory */
+ u_char flags = ICTRL0_COMMON|ICTRL0_AUTOINC|ICTRL0_BYTEGRAN;
+ if (attr & IS_ATTR) { addr *= 2; flags = ICTRL0_AUTOINC; }
+ mem->card_start = 0; mem->flags = MAP_ACTIVE;
+ set_cis_map(s, mem);
+ sys = s->cis_virt;
+ bus_writeb(s->cap.bus, flags, sys+CISREG_ICTRL0);
+ bus_writeb(s->cap.bus, addr & 0xff, sys+CISREG_IADDR0);
+ bus_writeb(s->cap.bus, (addr>>8) & 0xff, sys+CISREG_IADDR1);
+ bus_writeb(s->cap.bus, (addr>>16) & 0xff, sys+CISREG_IADDR2);
+ bus_writeb(s->cap.bus, (addr>>24) & 0xff, sys+CISREG_IADDR3);
+ for ( ; len > 0; len--, buf++)
+ *buf = bus_readb(s->cap.bus, sys+CISREG_IDATA0);
+ } else {
+ u_int inc = 1;
+ if (attr) { mem->flags |= MAP_ATTRIB; inc++; addr *= 2; }
+ sys += (addr & (s->cap.map_size-1));
+ mem->card_start = addr & ~(s->cap.map_size-1);
+ while (len) {
+ set_cis_map(s, mem);
+ sys = s->cis_virt + (addr & (s->cap.map_size-1));
+ for ( ; len > 0; len--, buf++, sys += inc) {
+ if (sys == s->cis_virt+s->cap.map_size) break;
+ *buf = bus_readb(s->cap.bus, sys);
+ }
+ mem->card_start += s->cap.map_size;
+ addr = 0;
+ }
+ }
+ DEBUG(3, "cs: %#2.2x %#2.2x %#2.2x %#2.2x ...\n",
+ *(u_char *)(ptr+0), *(u_char *)(ptr+1),
+ *(u_char *)(ptr+2), *(u_char *)(ptr+3));
+ return 0;
+}
+
+void write_cis_mem(socket_info_t *s, int attr, u_int addr,
+ u_int len, void *ptr)
+{
+ pccard_mem_map *mem = &s->cis_mem;
+ u_char *sys, *buf = ptr;
+
+ DEBUG(3, "cs: write_cis_mem(%d, %#x, %u)\n", attr, addr, len);
+ if (setup_cis_mem(s) != 0) return;
+ mem->flags = MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0);
+
+ if (attr & IS_INDIRECT) {
+ /* Indirect accesses use a bunch of special registers at fixed
+ locations in common memory */
+ u_char flags = ICTRL0_COMMON|ICTRL0_AUTOINC|ICTRL0_BYTEGRAN;
+ if (attr & IS_ATTR) { addr *= 2; flags = ICTRL0_AUTOINC; }
+ mem->card_start = 0; mem->flags = MAP_ACTIVE;
+ set_cis_map(s, mem);
+ sys = s->cis_virt;
+ bus_writeb(s->cap.bus, flags, sys+CISREG_ICTRL0);
+ bus_writeb(s->cap.bus, addr & 0xff, sys+CISREG_IADDR0);
+ bus_writeb(s->cap.bus, (addr>>8) & 0xff, sys+CISREG_IADDR1);
+ bus_writeb(s->cap.bus, (addr>>16) & 0xff, sys+CISREG_IADDR2);
+ bus_writeb(s->cap.bus, (addr>>24) & 0xff, sys+CISREG_IADDR3);
+ for ( ; len > 0; len--, buf++)
+ bus_writeb(s->cap.bus, *buf, sys+CISREG_IDATA0);
+ } else {
+ int inc = 1;
+ if (attr & IS_ATTR) { mem->flags |= MAP_ATTRIB; inc++; addr *= 2; }
+ mem->card_start = addr & ~(s->cap.map_size-1);
+ while (len) {
+ set_cis_map(s, mem);
+ sys = s->cis_virt + (addr & (s->cap.map_size-1));
+ for ( ; len > 0; len--, buf++, sys += inc) {
+ if (sys == s->cis_virt+s->cap.map_size) break;
+ bus_writeb(s->cap.bus, *buf, sys);
+ }
+ mem->card_start += s->cap.map_size;
+ addr = 0;
+ }
+ }
+}
+
+/*======================================================================
+
+ This is tricky... when we set up CIS memory, we try to validate
+ the memory window space allocations.
+
+======================================================================*/
+
+/* Scratch pointer to the socket we use for validation */
+static socket_info_t *vs = NULL;
+
+/* Validation function for cards with a valid CIS */
+static int cis_readable(u_long base)
+{
+ cisinfo_t info1, info2;
+ int ret;
+ vs->cis_mem.sys_start = base;
+ vs->cis_mem.sys_stop = base+vs->cap.map_size-1;
+ vs->cis_virt = bus_ioremap(vs->cap.bus, base, vs->cap.map_size);
+ ret = validate_cis(vs->clients, &info1);
+ /* invalidate mapping and CIS cache */
+ bus_iounmap(vs->cap.bus, vs->cis_virt); vs->cis_used = 0;
+ if ((ret != 0) || (info1.Chains == 0))
+ return 0;
+ vs->cis_mem.sys_start = base+vs->cap.map_size;
+ vs->cis_mem.sys_stop = base+2*vs->cap.map_size-1;
+ vs->cis_virt = bus_ioremap(vs->cap.bus, base+vs->cap.map_size,
+ vs->cap.map_size);
+ ret = validate_cis(vs->clients, &info2);
+ bus_iounmap(vs->cap.bus, vs->cis_virt); vs->cis_used = 0;
+ return ((ret == 0) && (info1.Chains == info2.Chains));
+}
+
+/* Validation function for simple memory cards */
+static int checksum(u_long base)
+{
+ int i, a, b, d;
+ vs->cis_mem.sys_start = base;
+ vs->cis_mem.sys_stop = base+vs->cap.map_size-1;
+ vs->cis_virt = bus_ioremap(vs->cap.bus, base, vs->cap.map_size);
+ vs->cis_mem.card_start = 0;
+ vs->cis_mem.flags = MAP_ACTIVE;
+ vs->ss_entry(vs->sock, SS_SetMemMap, &vs->cis_mem);
+ /* Don't bother checking every word... */
+ a = 0; b = -1;
+ for (i = 0; i < vs->cap.map_size; i += 44) {
+ d = bus_readl(vs->cap.bus, vs->cis_virt+i);
+ a += d; b &= d;
+ }
+ bus_iounmap(vs->cap.bus, vs->cis_virt);
+ return (b == -1) ? -1 : (a>>1);
+}
+
+static int checksum_match(u_long base)
+{
+ int a = checksum(base), b = checksum(base+vs->cap.map_size);
+ return ((a == b) && (a >= 0));
+}
+
+static int setup_cis_mem(socket_info_t *s)
+{
+ if (!(s->cap.features & SS_CAP_STATIC_MAP) &&
+ (s->cis_mem.sys_start == 0)) {
+ int low = !(s->cap.features & SS_CAP_PAGE_REGS);
+ vs = s;
+ validate_mem(cis_readable, checksum_match, low);
+ s->cis_mem.sys_start = 0;
+ vs = NULL;
+ if (find_mem_region(&s->cis_mem.sys_start, s->cap.map_size,
+ s->cap.map_size, low, "card services")) {
+ printk(KERN_NOTICE "cs: unable to map card memory!\n");
+ return -1;
+ }
+ s->cis_mem.sys_stop = s->cis_mem.sys_start+s->cap.map_size-1;
+ s->cis_virt = bus_ioremap(s->cap.bus, s->cis_mem.sys_start,
+ s->cap.map_size);
+ }
+ return 0;
+}
+
+void release_cis_mem(socket_info_t *s)
+{
+ if (s->cis_mem.sys_start != 0) {
+ s->cis_mem.flags &= ~MAP_ACTIVE;
+ s->ss_entry(s->sock, SS_SetMemMap, &s->cis_mem);
+ if (!(s->cap.features & SS_CAP_STATIC_MAP))
+ release_mem_region(s->cis_mem.sys_start, s->cap.map_size);
+ bus_iounmap(s->cap.bus, s->cis_virt);
+ s->cis_mem.sys_start = 0;
+ s->cis_virt = NULL;
+ }
+}
+
+/*======================================================================
+
+ This is a wrapper around read_cis_mem, with the same interface,
+ but which caches information, for cards whose CIS may not be
+ readable all the time.
+
+======================================================================*/
+
+static void read_cis_cache(socket_info_t *s, int attr, u_int addr,
+ u_int len, void *ptr)
+{
+ int i, ret;
+ char *caddr;
+
+ if (s->fake_cis) {
+ if (s->fake_cis_len > addr+len)
+ memcpy(ptr, s->fake_cis+addr, len);
+ else
+ memset(ptr, 0xff, len);
+ return;
+ }
+ caddr = s->cis_cache;
+ for (i = 0; i < s->cis_used; i++) {
+ if ((s->cis_table[i].addr == addr) &&
+ (s->cis_table[i].len == len) &&
+ (s->cis_table[i].attr == attr)) break;
+ caddr += s->cis_table[i].len;
+ }
+ if (i < s->cis_used) {
+ memcpy(ptr, caddr, len);
+ return;
+ }
+#ifdef CONFIG_CARDBUS
+ if (s->state & SOCKET_CARDBUS)
+ ret = read_cb_mem(s, 0, attr, addr, len, ptr);
+ else
+#endif
+ ret = read_cis_mem(s, attr, addr, len, ptr);
+ /* Copy data into the cache, if there is room */
+ if ((ret == 0) && (i < MAX_CIS_TABLE) &&
+ (caddr+len < s->cis_cache+MAX_CIS_DATA)) {
+ s->cis_table[i].addr = addr;
+ s->cis_table[i].len = len;
+ s->cis_table[i].attr = attr;
+ s->cis_used++;
+ memcpy(caddr, ptr, len);
+ }
+}
+
+/*======================================================================
+
+ This verifies if the CIS of a card matches what is in the CIS
+ cache.
+
+======================================================================*/
+
+int verify_cis_cache(socket_info_t *s)
+{
+ char *buf, *caddr;
+ int i;
+
+ buf = kmalloc(256, GFP_KERNEL);
+ if (buf == NULL)
+ return -1;
+ caddr = s->cis_cache;
+ for (i = 0; i < s->cis_used; i++) {
+#ifdef CONFIG_CARDBUS
+ if (s->state & SOCKET_CARDBUS)
+ read_cb_mem(s, 0, s->cis_table[i].attr, s->cis_table[i].addr,
+ s->cis_table[i].len, buf);
+ else
+#endif
+ read_cis_mem(s, s->cis_table[i].attr, s->cis_table[i].addr,
+ s->cis_table[i].len, buf);
+ if (memcmp(buf, caddr, s->cis_table[i].len) != 0)
+ break;
+ caddr += s->cis_table[i].len;
+ }
+ kfree(buf);
+ return (i < s->cis_used);
+}
+
+/*======================================================================
+
+ For really bad cards, we provide a facility for uploading a
+ replacement CIS.
+
+======================================================================*/
+
+int replace_cis(client_handle_t handle, cisdump_t *cis)
+{
+ socket_info_t *s;
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ s = SOCKET(handle);
+ if (s->fake_cis != NULL) {
+ kfree(s->fake_cis);
+ s->fake_cis = NULL;
+ }
+ if (cis->Length > CISTPL_MAX_CIS_SIZE)
+ return CS_BAD_SIZE;
+ s->fake_cis = kmalloc(cis->Length, GFP_KERNEL);
+ if (s->fake_cis == NULL)
+ return CS_OUT_OF_RESOURCE;
+ s->fake_cis_len = cis->Length;
+ memcpy(s->fake_cis, cis->Data, cis->Length);
+ return CS_SUCCESS;
+}
+
+/*======================================================================
+
+ The high-level CIS tuple services
+
+======================================================================*/
+
+typedef struct tuple_flags {
+ u_int link_space:4;
+ u_int has_link:1;
+ u_int mfc_fn:3;
+ u_int space:4;
+} tuple_flags;
+
+#define LINK_SPACE(f) (((tuple_flags *)(&(f)))->link_space)
+#define HAS_LINK(f) (((tuple_flags *)(&(f)))->has_link)
+#define MFC_FN(f) (((tuple_flags *)(&(f)))->mfc_fn)
+#define SPACE(f) (((tuple_flags *)(&(f)))->space)
+
+int get_next_tuple(client_handle_t handle, tuple_t *tuple);
+
+int get_first_tuple(client_handle_t handle, tuple_t *tuple)
+{
+ socket_info_t *s;
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ s = SOCKET(handle);
+ if (!(s->state & SOCKET_PRESENT))
+ return CS_NO_CARD;
+ tuple->TupleLink = tuple->Flags = 0;
+#ifdef CONFIG_CARDBUS
+ if (s->state & SOCKET_CARDBUS) {
+ u_int ptr;
+ pcibios_read_config_dword(s->cap.cardbus, 0, 0x28, &ptr);
+ tuple->CISOffset = ptr & ~7;
+ SPACE(tuple->Flags) = (ptr & 7);
+ } else
+#endif
+ {
+ /* Assume presence of a LONGLINK_C to address 0 */
+ tuple->CISOffset = tuple->LinkOffset = 0;
+ SPACE(tuple->Flags) = HAS_LINK(tuple->Flags) = 1;
+ }
+ if (!(s->state & SOCKET_CARDBUS) && (s->functions > 1) &&
+ !(tuple->Attributes & TUPLE_RETURN_COMMON)) {
+ cisdata_t req = tuple->DesiredTuple;
+ tuple->DesiredTuple = CISTPL_LONGLINK_MFC;
+ if (get_next_tuple(handle, tuple) == CS_SUCCESS) {
+ tuple->DesiredTuple = CISTPL_LINKTARGET;
+ if (get_next_tuple(handle, tuple) != CS_SUCCESS)
+ return CS_NO_MORE_ITEMS;
+ } else
+ tuple->CISOffset = tuple->TupleLink = 0;
+ tuple->DesiredTuple = req;
+ }
+ return get_next_tuple(handle, tuple);
+}
+
+static int follow_link(socket_info_t *s, tuple_t *tuple)
+{
+ u_char link[5];
+ u_int ofs;
+
+ if (MFC_FN(tuple->Flags)) {
+ /* Get indirect link from the MFC tuple */
+ read_cis_cache(s, LINK_SPACE(tuple->Flags),
+ tuple->LinkOffset, 5, link);
+ ofs = le32_to_cpu(*(u_int *)(link+1));
+ SPACE(tuple->Flags) = (link[0] == CISTPL_MFC_ATTR);
+ /* Move to the next indirect link */
+ tuple->LinkOffset += 5;
+ MFC_FN(tuple->Flags)--;
+ } else if (HAS_LINK(tuple->Flags)) {
+ ofs = tuple->LinkOffset;
+ SPACE(tuple->Flags) = LINK_SPACE(tuple->Flags);
+ HAS_LINK(tuple->Flags) = 0;
+ } else {
+ return -1;
+ }
+ if (!(s->state & SOCKET_CARDBUS) && SPACE(tuple->Flags)) {
+ /* This is ugly, but a common CIS error is to code the long
+ link offset incorrectly, so we check the right spot... */
+ read_cis_cache(s, SPACE(tuple->Flags), ofs, 5, link);
+ if ((link[0] == CISTPL_LINKTARGET) && (link[1] >= 3) &&
+ (strncmp(link+2, "CIS", 3) == 0))
+ return ofs;
+ /* Then, we try the wrong spot... */
+ ofs = ofs >> 1;
+ }
+ read_cis_cache(s, SPACE(tuple->Flags), ofs, 5, link);
+ if ((link[0] != CISTPL_LINKTARGET) || (link[1] < 3) ||
+ (strncmp(link+2, "CIS", 3) != 0))
+ return -1;
+ return ofs;
+}
+
+int get_next_tuple(client_handle_t handle, tuple_t *tuple)
+{
+ socket_info_t *s;
+ u_char link[2], tmp;
+ int ofs, i, attr;
+
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ s = SOCKET(handle);
+ if (!(s->state & SOCKET_PRESENT))
+ return CS_NO_CARD;
+
+ link[1] = tuple->TupleLink;
+ ofs = tuple->CISOffset + tuple->TupleLink;
+ attr = SPACE(tuple->Flags);
+
+ for (i = 0; i < MAX_TUPLES; i++) {
+ if (link[1] == 0xff) {
+ link[0] = CISTPL_END;
+ } else {
+ read_cis_cache(s, attr, ofs, 2, link);
+ if (link[0] == CISTPL_NULL) {
+ ofs++; continue;
+ }
+ }
+
+ /* End of chain? Follow long link if possible */
+ if (link[0] == CISTPL_END) {
+ if ((ofs = follow_link(s, tuple)) < 0)
+ return CS_NO_MORE_ITEMS;
+ attr = SPACE(tuple->Flags);
+ read_cis_cache(s, attr, ofs, 2, link);
+ }
+
+ /* Is this a link tuple? Make a note of it */
+ if ((link[0] == CISTPL_LONGLINK_A) ||
+ (link[0] == CISTPL_LONGLINK_C) ||
+ (link[0] == CISTPL_LONGLINK_MFC) ||
+ (link[0] == CISTPL_LINKTARGET) ||
+ (link[0] == CISTPL_INDIRECT) ||
+ (link[0] == CISTPL_NO_LINK)) {
+ switch (link[0]) {
+ case CISTPL_LONGLINK_A:
+ HAS_LINK(tuple->Flags) = 1;
+ LINK_SPACE(tuple->Flags) = attr | IS_ATTR;
+ read_cis_cache(s, attr, ofs+2, 4, &tuple->LinkOffset);
+ break;
+ case CISTPL_LONGLINK_C:
+ HAS_LINK(tuple->Flags) = 1;
+ LINK_SPACE(tuple->Flags) = attr & ~IS_ATTR;
+ read_cis_cache(s, attr, ofs+2, 4, &tuple->LinkOffset);
+ break;
+ case CISTPL_INDIRECT:
+ HAS_LINK(tuple->Flags) = 1;
+ LINK_SPACE(tuple->Flags) = IS_ATTR | IS_INDIRECT;
+ tuple->LinkOffset = 0;
+ break;
+ case CISTPL_LONGLINK_MFC:
+ tuple->LinkOffset = ofs + 3;
+ LINK_SPACE(tuple->Flags) = attr;
+ if (handle->Function == BIND_FN_ALL) {
+ /* Follow all the MFC links */
+ read_cis_cache(s, attr, ofs+2, 1, &tmp);
+ MFC_FN(tuple->Flags) = tmp;
+ } else {
+ /* Follow exactly one of the links */
+ MFC_FN(tuple->Flags) = 1;
+ tuple->LinkOffset += handle->Function * 5;
+ }
+ break;
+ case CISTPL_NO_LINK:
+ HAS_LINK(tuple->Flags) = 0;
+ break;
+ }
+ if ((tuple->Attributes & TUPLE_RETURN_LINK) &&
+ (tuple->DesiredTuple == RETURN_FIRST_TUPLE))
+ break;
+ } else
+ if (tuple->DesiredTuple == RETURN_FIRST_TUPLE)
+ break;
+
+ if (link[0] == tuple->DesiredTuple)
+ break;
+ ofs += link[1] + 2;
+ }
+ if (i == MAX_TUPLES) {
+ DEBUG(1, "cs: overrun in get_next_tuple for socket %d\n",
+ handle->Socket);
+ return CS_NO_MORE_ITEMS;
+ }
+
+ tuple->TupleCode = link[0];
+ tuple->TupleLink = link[1];
+ tuple->CISOffset = ofs + 2;
+ return CS_SUCCESS;
+}
+
+/*====================================================================*/
+
+#define _MIN(a, b) (((a) < (b)) ? (a) : (b))
+
+int get_tuple_data(client_handle_t handle, tuple_t *tuple)
+{
+ socket_info_t *s;
+ u_int len;
+
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+
+ s = SOCKET(handle);
+
+ if (tuple->TupleLink < tuple->TupleOffset)
+ return CS_NO_MORE_ITEMS;
+ len = tuple->TupleLink - tuple->TupleOffset;
+ tuple->TupleDataLen = tuple->TupleLink;
+ if (len == 0)
+ return CS_SUCCESS;
+ read_cis_cache(s, SPACE(tuple->Flags),
+ tuple->CISOffset + tuple->TupleOffset,
+ _MIN(len, tuple->TupleDataMax), tuple->TupleData);
+ return CS_SUCCESS;
+}
+
+/*======================================================================
+
+ Parsing routines for individual tuples
+
+======================================================================*/
+
+static int parse_device(tuple_t *tuple, cistpl_device_t *device)
+{
+ int i;
+ u_char scale;
+ u_char *p, *q;
+
+ p = (u_char *)tuple->TupleData;
+ q = p + tuple->TupleDataLen;
+
+ device->ndev = 0;
+ for (i = 0; i < CISTPL_MAX_DEVICES; i++) {
+
+ if (*p == 0xff) break;
+ device->dev[i].type = (*p >> 4);
+ device->dev[i].wp = (*p & 0x08) ? 1 : 0;
+ switch (*p & 0x07) {
+ case 0: device->dev[i].speed = 0; break;
+ case 1: device->dev[i].speed = 250; break;
+ case 2: device->dev[i].speed = 200; break;
+ case 3: device->dev[i].speed = 150; break;
+ case 4: device->dev[i].speed = 100; break;
+ case 7:
+ if (++p == q) return CS_BAD_TUPLE;
+ device->dev[i].speed = SPEED_CVT(*p);
+ while (*p & 0x80)
+ if (++p == q) return CS_BAD_TUPLE;
+ break;
+ default:
+ return CS_BAD_TUPLE;
+ }
+
+ if (++p == q) return CS_BAD_TUPLE;
+ if (*p == 0xff) break;
+ scale = *p & 7;
+ if (scale == 7) return CS_BAD_TUPLE;
+ device->dev[i].size = ((*p >> 3) + 1) * (512 << (scale*2));
+ device->ndev++;
+ if (++p == q) break;
+ }
+
+ return CS_SUCCESS;
+}
+
+/*====================================================================*/
+
+static int parse_checksum(tuple_t *tuple, cistpl_checksum_t *csum)
+{
+ u_char *p;
+ if (tuple->TupleDataLen < 5)
+ return CS_BAD_TUPLE;
+ p = (u_char *)tuple->TupleData;
+ csum->addr = tuple->CISOffset+(short)le16_to_cpu(*(u_short *)p)-2;
+ csum->len = le16_to_cpu(*(u_short *)(p + 2));
+ csum->sum = *(p+4);
+ return CS_SUCCESS;
+}
+
+/*====================================================================*/
+
+static int parse_longlink(tuple_t *tuple, cistpl_longlink_t *link)
+{
+ if (tuple->TupleDataLen < 4)
+ return CS_BAD_TUPLE;
+ link->addr = le32_to_cpu(*(u_int *)tuple->TupleData);
+ return CS_SUCCESS;
+}
+
+/*====================================================================*/
+
+static int parse_longlink_mfc(tuple_t *tuple,
+ cistpl_longlink_mfc_t *link)
+{
+ u_char *p;
+ int i;
+
+ p = (u_char *)tuple->TupleData;
+
+ link->nfn = *p; p++;
+ if (tuple->TupleDataLen <= link->nfn*5)
+ return CS_BAD_TUPLE;
+ for (i = 0; i < link->nfn; i++) {
+ link->fn[i].space = *p; p++;
+ link->fn[i].addr = le32_to_cpu(*(u_int *)p); p += 4;
+ }
+ return CS_SUCCESS;
+}
+
+/*====================================================================*/
+
+static int parse_strings(u_char *p, u_char *q, int max,
+ char *s, u_char *ofs, u_char *found)
+{
+ int i, j, ns;
+
+ if (p == q) return CS_BAD_TUPLE;
+ ns = 0; j = 0;
+ for (i = 0; i < max; i++) {
+ if (*p == 0xff) break;
+ ofs[i] = j;
+ ns++;
+ for (;;) {
+ s[j++] = (*p == 0xff) ? '\0' : *p;
+ if ((*p == '\0') || (*p == 0xff)) break;
+ if (++p == q) return CS_BAD_TUPLE;
+ }
+ if ((*p == 0xff) || (++p == q)) break;
+ }
+ if (found) {
+ *found = ns;
+ return CS_SUCCESS;
+ } else {
+ return (ns == max) ? CS_SUCCESS : CS_BAD_TUPLE;
+ }
+}
+
+/*====================================================================*/
+
+static int parse_vers_1(tuple_t *tuple, cistpl_vers_1_t *vers_1)
+{
+ u_char *p, *q;
+
+ p = (u_char *)tuple->TupleData;
+ q = p + tuple->TupleDataLen;
+
+ vers_1->major = *p; p++;
+ vers_1->minor = *p; p++;
+ if (p >= q) return CS_BAD_TUPLE;
+
+ return parse_strings(p, q, CISTPL_VERS_1_MAX_PROD_STRINGS,
+ vers_1->str, vers_1->ofs, &vers_1->ns);
+}
+
+/*====================================================================*/
+
+static int parse_altstr(tuple_t *tuple, cistpl_altstr_t *altstr)
+{
+ u_char *p, *q;
+
+ p = (u_char *)tuple->TupleData;
+ q = p + tuple->TupleDataLen;
+
+ return parse_strings(p, q, CISTPL_MAX_ALTSTR_STRINGS,
+ altstr->str, altstr->ofs, &altstr->ns);
+}
+
+/*====================================================================*/
+
+static int parse_jedec(tuple_t *tuple, cistpl_jedec_t *jedec)
+{
+ u_char *p, *q;
+ int nid;
+
+ p = (u_char *)tuple->TupleData;
+ q = p + tuple->TupleDataLen;
+
+ for (nid = 0; nid < CISTPL_MAX_DEVICES; nid++) {
+ if (p > q-2) break;
+ jedec->id[nid].mfr = p[0];
+ jedec->id[nid].info = p[1];
+ p += 2;
+ }
+ jedec->nid = nid;
+ return CS_SUCCESS;
+}
+
+/*====================================================================*/
+
+static int parse_manfid(tuple_t *tuple, cistpl_manfid_t *m)
+{
+ u_short *p;
+ if (tuple->TupleDataLen < 4)
+ return CS_BAD_TUPLE;
+ p = (u_short *)tuple->TupleData;
+ m->manf = le16_to_cpu(p[0]);
+ m->card = le16_to_cpu(p[1]);
+ return CS_SUCCESS;
+}
+
+/*====================================================================*/
+
+static int parse_funcid(tuple_t *tuple, cistpl_funcid_t *f)
+{
+ u_char *p;
+ if (tuple->TupleDataLen < 2)
+ return CS_BAD_TUPLE;
+ p = (u_char *)tuple->TupleData;
+ f->func = p[0];
+ f->sysinit = p[1];
+ return CS_SUCCESS;
+}
+
+/*====================================================================*/
+
+static int parse_funce(tuple_t *tuple, cistpl_funce_t *f)
+{
+ u_char *p;
+ int i;
+ if (tuple->TupleDataLen < 1)
+ return CS_BAD_TUPLE;
+ p = (u_char *)tuple->TupleData;
+ f->type = p[0];
+ for (i = 1; i < tuple->TupleDataLen; i++)
+ f->data[i-1] = p[i];
+ return CS_SUCCESS;
+}
+
+/*====================================================================*/
+
+static int parse_config(tuple_t *tuple, cistpl_config_t *config)
+{
+ int rasz, rmsz, i;
+ u_char *p;
+
+ p = (u_char *)tuple->TupleData;
+ rasz = *p & 0x03;
+ rmsz = (*p & 0x3c) >> 2;
+ if (tuple->TupleDataLen < rasz+rmsz+4)
+ return CS_BAD_TUPLE;
+ config->last_idx = *(++p);
+ p++;
+ config->base = 0;
+ for (i = 0; i <= rasz; i++)
+ config->base += p[i] << (8*i);
+ p += rasz+1;
+ for (i = 0; i < 4; i++)
+ config->rmask[i] = 0;
+ for (i = 0; i <= rmsz; i++)
+ config->rmask[i>>2] += p[i] << (8*(i%4));
+ config->subtuples = tuple->TupleDataLen - (rasz+rmsz+4);
+ return CS_SUCCESS;
+}
+
+/*======================================================================
+
+ The following routines are all used to parse the nightmarish
+ config table entries.
+
+======================================================================*/
+
+static u_char *parse_power(u_char *p, u_char *q,
+ cistpl_power_t *pwr)
+{
+ int i;
+ u_int scale;
+
+ if (p == q) return NULL;
+ pwr->present = *p;
+ pwr->flags = 0;
+ p++;
+ for (i = 0; i < 7; i++)
+ if (pwr->present & (1<<i)) {
+ if (p == q) return NULL;
+ pwr->param[i] = POWER_CVT(*p);
+ scale = POWER_SCALE(*p);
+ while (*p & 0x80) {
+ if (++p == q) return NULL;
+ if ((*p & 0x7f) < 100)
+ pwr->param[i] += (*p & 0x7f) * scale / 100;
+ else if (*p == 0x7d)
+ pwr->flags |= CISTPL_POWER_HIGHZ_OK;
+ else if (*p == 0x7e)
+ pwr->param[i] = 0;
+ else if (*p == 0x7f)
+ pwr->flags |= CISTPL_POWER_HIGHZ_REQ;
+ else
+ return NULL;
+ }
+ p++;
+ }
+ return p;
+}
+
+/*====================================================================*/
+
+static u_char *parse_timing(u_char *p, u_char *q,
+ cistpl_timing_t *timing)
+{
+ u_char scale;
+
+ if (p == q) return NULL;
+ scale = *p;
+ if ((scale & 3) != 3) {
+ if (++p == q) return NULL;
+ timing->wait = SPEED_CVT(*p);
+ timing->waitscale = exponent[scale & 3];
+ } else
+ timing->wait = 0;
+ scale >>= 2;
+ if ((scale & 7) != 7) {
+ if (++p == q) return NULL;
+ timing->ready = SPEED_CVT(*p);
+ timing->rdyscale = exponent[scale & 7];
+ } else
+ timing->ready = 0;
+ scale >>= 3;
+ if (scale != 7) {
+ if (++p == q) return NULL;
+ timing->reserved = SPEED_CVT(*p);
+ timing->rsvscale = exponent[scale];
+ } else
+ timing->reserved = 0;
+ p++;
+ return p;
+}
+
+/*====================================================================*/
+
+static u_char *parse_io(u_char *p, u_char *q, cistpl_io_t *io)
+{
+ int i, j, bsz, lsz;
+
+ if (p == q) return NULL;
+ io->flags = *p;
+
+ if (!(*p & 0x80)) {
+ io->nwin = 1;
+ io->win[0].base = 0;
+ io->win[0].len = (1 << (io->flags & CISTPL_IO_LINES_MASK));
+ return p+1;
+ }
+
+ if (++p == q) return NULL;
+ io->nwin = (*p & 0x0f) + 1;
+ bsz = (*p & 0x30) >> 4;
+ if (bsz == 3) bsz++;
+ lsz = (*p & 0xc0) >> 6;
+ if (lsz == 3) lsz++;
+ p++;
+
+ for (i = 0; i < io->nwin; i++) {
+ io->win[i].base = 0;
+ io->win[i].len = 1;
+ for (j = 0; j < bsz; j++, p++) {
+ if (p == q) return NULL;
+ io->win[i].base += *p << (j*8);
+ }
+ for (j = 0; j < lsz; j++, p++) {
+ if (p == q) return NULL;
+ io->win[i].len += *p << (j*8);
+ }
+ }
+ return p;
+}
+
+/*====================================================================*/
+
+static u_char *parse_mem(u_char *p, u_char *q, cistpl_mem_t *mem)
+{
+ int i, j, asz, lsz, has_ha;
+ u_int len, ca, ha;
+
+ if (p == q) return NULL;
+
+ mem->nwin = (*p & 0x07) + 1;
+ lsz = (*p & 0x18) >> 3;
+ asz = (*p & 0x60) >> 5;
+ has_ha = (*p & 0x80);
+ if (++p == q) return NULL;
+
+ for (i = 0; i < mem->nwin; i++) {
+ len = ca = ha = 0;
+ for (j = 0; j < lsz; j++, p++) {
+ if (p == q) return NULL;
+ len += *p << (j*8);
+ }
+ for (j = 0; j < asz; j++, p++) {
+ if (p == q) return NULL;
+ ca += *p << (j*8);
+ }
+ if (has_ha)
+ for (j = 0; j < asz; j++, p++) {
+ if (p == q) return NULL;
+ ha += *p << (j*8);
+ }
+ mem->win[i].len = len << 8;
+ mem->win[i].card_addr = ca << 8;
+ mem->win[i].host_addr = ha << 8;
+ }
+ return p;
+}
+
+/*====================================================================*/
+
+static u_char *parse_irq(u_char *p, u_char *q, cistpl_irq_t *irq)
+{
+ if (p == q) return NULL;
+ irq->IRQInfo1 = *p; p++;
+ if (irq->IRQInfo1 & IRQ_INFO2_VALID) {
+ if (p+2 > q) return NULL;
+ irq->IRQInfo2 = (p[1]<<8) + p[0];
+ p += 2;
+ }
+ return p;
+}
+
+/*====================================================================*/
+
+static int parse_cftable_entry(tuple_t *tuple,
+ cistpl_cftable_entry_t *entry)
+{
+ u_char *p, *q, features;
+
+ p = tuple->TupleData;
+ q = p + tuple->TupleDataLen;
+ entry->index = *p & 0x3f;
+ entry->flags = 0;
+ if (*p & 0x40)
+ entry->flags |= CISTPL_CFTABLE_DEFAULT;
+ if (*p & 0x80) {
+ if (++p == q) return CS_BAD_TUPLE;
+ if (*p & 0x10)
+ entry->flags |= CISTPL_CFTABLE_BVDS;
+ if (*p & 0x20)
+ entry->flags |= CISTPL_CFTABLE_WP;
+ if (*p & 0x40)
+ entry->flags |= CISTPL_CFTABLE_RDYBSY;
+ if (*p & 0x80)
+ entry->flags |= CISTPL_CFTABLE_MWAIT;
+ entry->interface = *p & 0x0f;
+ } else
+ entry->interface = 0;
+
+ /* Process optional features */
+ if (++p == q) return CS_BAD_TUPLE;
+ features = *p; p++;
+
+ /* Power options */
+ if ((features & 3) > 0) {
+ p = parse_power(p, q, &entry->vcc);
+ if (p == NULL) return CS_BAD_TUPLE;
+ } else
+ entry->vcc.present = 0;
+ if ((features & 3) > 1) {
+ p = parse_power(p, q, &entry->vpp1);
+ if (p == NULL) return CS_BAD_TUPLE;
+ } else
+ entry->vpp1.present = 0;
+ if ((features & 3) > 2) {
+ p = parse_power(p, q, &entry->vpp2);
+ if (p == NULL) return CS_BAD_TUPLE;
+ } else
+ entry->vpp2.present = 0;
+
+ /* Timing options */
+ if (features & 0x04) {
+ p = parse_timing(p, q, &entry->timing);
+ if (p == NULL) return CS_BAD_TUPLE;
+ } else {
+ entry->timing.wait = 0;
+ entry->timing.ready = 0;
+ entry->timing.reserved = 0;
+ }
+
+ /* I/O window options */
+ if (features & 0x08) {
+ p = parse_io(p, q, &entry->io);
+ if (p == NULL) return CS_BAD_TUPLE;
+ } else
+ entry->io.nwin = 0;
+
+ /* Interrupt options */
+ if (features & 0x10) {
+ p = parse_irq(p, q, &entry->irq);
+ if (p == NULL) return CS_BAD_TUPLE;
+ } else
+ entry->irq.IRQInfo1 = 0;
+
+ switch (features & 0x60) {
+ case 0x00:
+ entry->mem.nwin = 0;
+ break;
+ case 0x20:
+ entry->mem.nwin = 1;
+ entry->mem.win[0].len = le16_to_cpu(*(u_short *)p) << 8;
+ entry->mem.win[0].card_addr = 0;
+ entry->mem.win[0].host_addr = 0;
+ p += 2;
+ if (p > q) return CS_BAD_TUPLE;
+ break;
+ case 0x40:
+ entry->mem.nwin = 1;
+ entry->mem.win[0].len = le16_to_cpu(*(u_short *)p) << 8;
+ entry->mem.win[0].card_addr =
+ le16_to_cpu(*(u_short *)(p+2)) << 8;
+ entry->mem.win[0].host_addr = 0;
+ p += 4;
+ if (p > q) return CS_BAD_TUPLE;
+ break;
+ case 0x60:
+ p = parse_mem(p, q, &entry->mem);
+ if (p == NULL) return CS_BAD_TUPLE;
+ break;
+ }
+
+ /* Misc features */
+ if (features & 0x80) {
+ if (p == q) return CS_BAD_TUPLE;
+ entry->flags |= (*p << 8);
+ while (*p & 0x80)
+ if (++p == q) return CS_BAD_TUPLE;
+ p++;
+ }
+
+ entry->subtuples = q-p;
+
+ return CS_SUCCESS;
+}
+
+/*====================================================================*/
+
+#ifdef CONFIG_CARDBUS
+
+static int parse_bar(tuple_t *tuple, cistpl_bar_t *bar)
+{
+ u_char *p;
+ if (tuple->TupleDataLen < 6)
+ return CS_BAD_TUPLE;
+ p = (u_char *)tuple->TupleData;
+ bar->attr = *p;
+ p += 2;
+ bar->size = le32_to_cpu(*(u_int *)p);
+ return CS_SUCCESS;
+}
+
+static int parse_config_cb(tuple_t *tuple, cistpl_config_t *config)
+{
+ u_char *p;
+
+ p = (u_char *)tuple->TupleData;
+ if ((*p != 3) || (tuple->TupleDataLen < 6))
+ return CS_BAD_TUPLE;
+ config->last_idx = *(++p);
+ p++;
+ config->base = le32_to_cpu(*(u_int *)p);
+ config->subtuples = tuple->TupleDataLen - 6;
+ return CS_SUCCESS;
+}
+
+static int parse_cftable_entry_cb(tuple_t *tuple,
+ cistpl_cftable_entry_cb_t *entry)
+{
+ u_char *p, *q, features;
+
+ p = tuple->TupleData;
+ q = p + tuple->TupleDataLen;
+ entry->index = *p & 0x3f;
+ entry->flags = 0;
+ if (*p & 0x40)
+ entry->flags |= CISTPL_CFTABLE_DEFAULT;
+
+ /* Process optional features */
+ if (++p == q) return CS_BAD_TUPLE;
+ features = *p; p++;
+
+ /* Power options */
+ if ((features & 3) > 0) {
+ p = parse_power(p, q, &entry->vcc);
+ if (p == NULL) return CS_BAD_TUPLE;
+ } else
+ entry->vcc.present = 0;
+ if ((features & 3) > 1) {
+ p = parse_power(p, q, &entry->vpp1);
+ if (p == NULL) return CS_BAD_TUPLE;
+ } else
+ entry->vpp1.present = 0;
+ if ((features & 3) > 2) {
+ p = parse_power(p, q, &entry->vpp2);
+ if (p == NULL) return CS_BAD_TUPLE;
+ } else
+ entry->vpp2.present = 0;
+
+ /* I/O window options */
+ if (features & 0x08) {
+ if (p == q) return CS_BAD_TUPLE;
+ entry->io = *p; p++;
+ } else
+ entry->io = 0;
+
+ /* Interrupt options */
+ if (features & 0x10) {
+ p = parse_irq(p, q, &entry->irq);
+ if (p == NULL) return CS_BAD_TUPLE;
+ } else
+ entry->irq.IRQInfo1 = 0;
+
+ if (features & 0x20) {
+ if (p == q) return CS_BAD_TUPLE;
+ entry->mem = *p; p++;
+ } else
+ entry->mem = 0;
+
+ /* Misc features */
+ if (features & 0x80) {
+ if (p == q) return CS_BAD_TUPLE;
+ entry->flags |= (*p << 8);
+ if (*p & 0x80) {
+ if (++p == q) return CS_BAD_TUPLE;
+ entry->flags |= (*p << 16);
+ }
+ while (*p & 0x80)
+ if (++p == q) return CS_BAD_TUPLE;
+ p++;
+ }
+
+ entry->subtuples = q-p;
+
+ return CS_SUCCESS;
+}
+
+#endif
+
+/*====================================================================*/
+
+static int parse_device_geo(tuple_t *tuple, cistpl_device_geo_t *geo)
+{
+ u_char *p, *q;
+ int n;
+
+ p = (u_char *)tuple->TupleData;
+ q = p + tuple->TupleDataLen;
+
+ for (n = 0; n < CISTPL_MAX_DEVICES; n++) {
+ if (p > q-6) break;
+ geo->geo[n].buswidth = p[0];
+ geo->geo[n].erase_block = 1 << (p[1]-1);
+ geo->geo[n].read_block = 1 << (p[2]-1);
+ geo->geo[n].write_block = 1 << (p[3]-1);
+ geo->geo[n].partition = 1 << (p[4]-1);
+ geo->geo[n].interleave = 1 << (p[5]-1);
+ p += 6;
+ }
+ geo->ngeo = n;
+ return CS_SUCCESS;
+}
+
+/*====================================================================*/
+
+static int parse_vers_2(tuple_t *tuple, cistpl_vers_2_t *v2)
+{
+ u_char *p, *q;
+
+ if (tuple->TupleDataLen < 10)
+ return CS_BAD_TUPLE;
+
+ p = tuple->TupleData;
+ q = p + tuple->TupleDataLen;
+
+ v2->vers = p[0];
+ v2->comply = p[1];
+ v2->dindex = le16_to_cpu(*(u_short *)(p+2));
+ v2->vspec8 = p[6];
+ v2->vspec9 = p[7];
+ v2->nhdr = p[8];
+ p += 9;
+ return parse_strings(p, q, 2, v2->str, &v2->vendor, NULL);
+}
+
+/*====================================================================*/
+
+static int parse_org(tuple_t *tuple, cistpl_org_t *org)
+{
+ u_char *p, *q;
+ int i;
+
+ p = tuple->TupleData;
+ q = p + tuple->TupleDataLen;
+ if (p == q) return CS_BAD_TUPLE;
+ org->data_org = *p;
+ if (++p == q) return CS_BAD_TUPLE;
+ for (i = 0; i < 30; i++) {
+ org->desc[i] = *p;
+ if (*p == '\0') break;
+ if (++p == q) return CS_BAD_TUPLE;
+ }
+ return CS_SUCCESS;
+}
+
+/*====================================================================*/
+
+static int parse_format(tuple_t *tuple, cistpl_format_t *fmt)
+{
+ u_char *p;
+
+ if (tuple->TupleDataLen < 10)
+ return CS_BAD_TUPLE;
+
+ p = tuple->TupleData;
+
+ fmt->type = p[0];
+ fmt->edc = p[1];
+ fmt->offset = le32_to_cpu(*(u_int *)(p+2));
+ fmt->length = le32_to_cpu(*(u_int *)(p+6));
+
+ return CS_SUCCESS;
+}
+
+/*====================================================================*/
+
+int parse_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse)
+{
+ int ret = CS_SUCCESS;
+
+ if (tuple->TupleDataLen > tuple->TupleDataMax)
+ return CS_BAD_TUPLE;
+ switch (tuple->TupleCode) {
+ case CISTPL_DEVICE:
+ case CISTPL_DEVICE_A:
+ ret = parse_device(tuple, &parse->device);
+ break;
+#ifdef CONFIG_CARDBUS
+ case CISTPL_BAR:
+ ret = parse_bar(tuple, &parse->bar);
+ break;
+ case CISTPL_CONFIG_CB:
+ ret = parse_config_cb(tuple, &parse->config);
+ break;
+ case CISTPL_CFTABLE_ENTRY_CB:
+ ret = parse_cftable_entry_cb(tuple, &parse->cftable_entry_cb);
+ break;
+#endif
+ case CISTPL_CHECKSUM:
+ ret = parse_checksum(tuple, &parse->checksum);
+ break;
+ case CISTPL_LONGLINK_A:
+ case CISTPL_LONGLINK_C:
+ ret = parse_longlink(tuple, &parse->longlink);
+ break;
+ case CISTPL_LONGLINK_MFC:
+ ret = parse_longlink_mfc(tuple, &parse->longlink_mfc);
+ break;
+ case CISTPL_VERS_1:
+ ret = parse_vers_1(tuple, &parse->version_1);
+ break;
+ case CISTPL_ALTSTR:
+ ret = parse_altstr(tuple, &parse->altstr);
+ break;
+ case CISTPL_JEDEC_A:
+ case CISTPL_JEDEC_C:
+ ret = parse_jedec(tuple, &parse->jedec);
+ break;
+ case CISTPL_MANFID:
+ ret = parse_manfid(tuple, &parse->manfid);
+ break;
+ case CISTPL_FUNCID:
+ ret = parse_funcid(tuple, &parse->funcid);
+ break;
+ case CISTPL_FUNCE:
+ ret = parse_funce(tuple, &parse->funce);
+ break;
+ case CISTPL_CONFIG:
+ ret = parse_config(tuple, &parse->config);
+ break;
+ case CISTPL_CFTABLE_ENTRY:
+ ret = parse_cftable_entry(tuple, &parse->cftable_entry);
+ break;
+ case CISTPL_DEVICE_GEO:
+ case CISTPL_DEVICE_GEO_A:
+ ret = parse_device_geo(tuple, &parse->device_geo);
+ break;
+ case CISTPL_VERS_2:
+ ret = parse_vers_2(tuple, &parse->vers_2);
+ break;
+ case CISTPL_ORG:
+ ret = parse_org(tuple, &parse->org);
+ break;
+ case CISTPL_FORMAT:
+ case CISTPL_FORMAT_A:
+ ret = parse_format(tuple, &parse->format);
+ break;
+ case CISTPL_NO_LINK:
+ case CISTPL_LINKTARGET:
+ ret = CS_SUCCESS;
+ break;
+ default:
+ ret = CS_UNSUPPORTED_FUNCTION;
+ break;
+ }
+ return ret;
+}
+
+/*======================================================================
+
+ This is used internally by Card Services to look up CIS stuff.
+
+======================================================================*/
+
+int read_tuple(client_handle_t handle, cisdata_t code, void *parse)
+{
+ tuple_t tuple;
+ cisdata_t *buf;
+ int ret;
+
+ buf = kmalloc(255, GFP_KERNEL);
+ if (buf == NULL)
+ return CS_OUT_OF_RESOURCE;
+ tuple.DesiredTuple = code;
+ tuple.Attributes = TUPLE_RETURN_COMMON;
+ ret = CardServices(GetFirstTuple, handle, &tuple, NULL);
+ if (ret != CS_SUCCESS) goto done;
+ tuple.TupleData = buf;
+ tuple.TupleOffset = 0;
+ tuple.TupleDataMax = 255;
+ ret = CardServices(GetTupleData, handle, &tuple, NULL);
+ if (ret != CS_SUCCESS) goto done;
+ ret = CardServices(ParseTuple, handle, &tuple, parse);
+done:
+ kfree(buf);
+ return ret;
+}
+
+/*======================================================================
+
+ This tries to determine if a card has a sensible CIS. It returns
+ the number of tuples in the CIS, or 0 if the CIS looks bad. The
+ checks include making sure several critical tuples are present and
+ valid; seeing if the total number of tuples is reasonable; and
+ looking for tuples that use reserved codes.
+
+======================================================================*/
+
+int validate_cis(client_handle_t handle, cisinfo_t *info)
+{
+ tuple_t tuple;
+ cisparse_t *p;
+ int ret, reserved, dev_ok = 0, ident_ok = 0;
+
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (p == NULL)
+ return CS_OUT_OF_RESOURCE;
+
+ info->Chains = reserved = 0;
+ tuple.DesiredTuple = RETURN_FIRST_TUPLE;
+ tuple.Attributes = TUPLE_RETURN_COMMON;
+ ret = get_first_tuple(handle, &tuple);
+ if (ret != CS_SUCCESS)
+ goto done;
+
+ /* First tuple should be DEVICE; we should really have either that
+ or a CFTABLE_ENTRY of some sort */
+ if ((tuple.TupleCode == CISTPL_DEVICE) ||
+ (read_tuple(handle, CISTPL_CFTABLE_ENTRY, p) == CS_SUCCESS) ||
+ (read_tuple(handle, CISTPL_CFTABLE_ENTRY_CB, p) == CS_SUCCESS))
+ dev_ok++;
+
+ /* All cards should have a MANFID tuple, and/or a VERS_1 or VERS_2
+ tuple, for card identification. Certain old D-Link and Linksys
+ cards have only a broken VERS_2 tuple; hence the bogus test. */
+ if ((read_tuple(handle, CISTPL_MANFID, p) == CS_SUCCESS) ||
+ (read_tuple(handle, CISTPL_VERS_1, p) == CS_SUCCESS) ||
+ (read_tuple(handle, CISTPL_VERS_2, p) != CS_NO_MORE_ITEMS))
+ ident_ok++;
+
+ if (!dev_ok && !ident_ok)
+ goto done;
+
+ for (info->Chains = 1; info->Chains < MAX_TUPLES; info->Chains++) {
+ ret = get_next_tuple(handle, &tuple);
+ if (ret != CS_SUCCESS) break;
+ if (((tuple.TupleCode > 0x23) && (tuple.TupleCode < 0x40)) ||
+ ((tuple.TupleCode > 0x47) && (tuple.TupleCode < 0x80)) ||
+ ((tuple.TupleCode > 0x90) && (tuple.TupleCode < 0xff)))
+ reserved++;
+ }
+ if ((info->Chains == MAX_TUPLES) || (reserved > 5) ||
+ ((!dev_ok || !ident_ok) && (info->Chains > 10)))
+ info->Chains = 0;
+
+done:
+ kfree(p);
+ return CS_SUCCESS;
+}
+
diff --git a/linux/pcmcia-cs/modules/cs.c b/linux/pcmcia-cs/modules/cs.c
new file mode 100644
index 0000000..949b190
--- /dev/null
+++ b/linux/pcmcia-cs/modules/cs.c
@@ -0,0 +1,2399 @@
+/*======================================================================
+
+ PCMCIA Card Services -- core services
+
+ cs.c 1.287 2004/04/09 03:54:25
+
+ The contents of this file are subject to the Mozilla Public
+ License Version 1.1 (the "License"); you may not use this file
+ except in compliance with the License. You may obtain a copy of
+ the License at http://www.mozilla.org/MPL/
+
+ Software distributed under the License is distributed on an "AS
+ IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ implied. See the License for the specific language governing
+ rights and limitations under the License.
+
+ The initial developer of the original code is David A. Hinds
+ <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+
+ Alternatively, the contents of this file may be used under the
+ terms of the GNU General Public License version 2 (the "GPL"), in
+ which case the provisions of the GPL are applicable instead of the
+ above. If you wish to allow the use of your version of this file
+ only under the terms of the GPL and not to allow others to use
+ your version of this file under the MPL, indicate your decision
+ by deleting the provisions above and replace them with the notice
+ and other provisions required by the GPL. If you do not delete
+ the provisions above, a recipient may use your version of this
+ file under either the MPL or the GPL.
+
+======================================================================*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/config.h>
+#include <linux/string.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/pm.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <asm/system.h>
+#include <asm/irq.h>
+
+#define IN_CARD_SERVICES
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/ss.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/bulkmem.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/bus_ops.h>
+#include "cs_internal.h"
+
+#ifdef CONFIG_PCI
+#define PCI_OPT " [pci]"
+#else
+#define PCI_OPT ""
+#endif
+#ifdef CONFIG_CARDBUS
+#define CB_OPT " [cardbus]"
+#else
+#define CB_OPT ""
+#endif
+#ifdef CONFIG_PM
+#define PM_OPT " [apm]"
+#else
+#define PM_OPT ""
+#endif
+#ifdef CONFIG_PNP_BIOS
+#define PNP_OPT " [pnp]"
+#else
+#define PNP_OPT ""
+#endif
+#if !defined(CONFIG_CARDBUS) && !defined(CONFIG_PCI) && \
+ !defined(CONFIG_PM) && !defined(CONFIG_PNP_BIOS)
+#define OPTIONS " none"
+#else
+#define OPTIONS PCI_OPT CB_OPT PM_OPT PNP_OPT
+#endif
+
+static const char *release = "Linux PCMCIA Card Services " CS_RELEASE;
+#ifdef UTS_RELEASE
+static const char *kernel = "kernel build: " UTS_RELEASE " " UTS_VERSION;
+#endif
+static const char *options = "options: " OPTIONS;
+
+/*====================================================================*/
+
+/* Module parameters */
+
+MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
+MODULE_DESCRIPTION("Linux PCMCIA Card Services " CS_RELEASE
+ "\n options:" OPTIONS);
+MODULE_LICENSE("Dual MPL/GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; MODULE_PARM(n, "i")
+
+INT_MODULE_PARM(setup_delay, HZ/20); /* ticks */
+INT_MODULE_PARM(resume_delay, HZ/5); /* ticks */
+INT_MODULE_PARM(shutdown_delay, HZ/40); /* ticks */
+INT_MODULE_PARM(vcc_settle, HZ*4/10); /* ticks */
+INT_MODULE_PARM(reset_time, 10); /* usecs */
+INT_MODULE_PARM(unreset_delay, HZ/10); /* ticks */
+INT_MODULE_PARM(unreset_check, HZ/10); /* ticks */
+INT_MODULE_PARM(unreset_limit, 50); /* unreset_check's */
+
+/* Access speed for attribute memory windows */
+INT_MODULE_PARM(cis_speed, 300); /* ns */
+
+/* Access speed for IO windows */
+INT_MODULE_PARM(io_speed, 0); /* ns */
+
+/* Optional features */
+#ifdef CONFIG_PM
+INT_MODULE_PARM(do_apm, 1);
+#endif
+#ifdef CONFIG_PNP_BIOS
+INT_MODULE_PARM(do_pnp, 1);
+#endif
+
+#ifdef PCMCIA_DEBUG
+int pc_debug=PCMCIA_DEBUG;
+MODULE_PARM(pc_debug, "i");
+static const char *version =
+"cs.c 1.287 2004/04/09 03:54:25 (David Hinds)";
+#endif
+
+/*====================================================================*/
+
+static socket_state_t dead_socket = {
+ 0, SS_DETECT, 0, 0, 0
+};
+
+/* Table of sockets */
+socket_t sockets = 0;
+socket_info_t *socket_table[MAX_SOCK];
+
+#ifdef HAS_PROC_BUS
+struct proc_dir_entry *proc_pccard = NULL;
+#endif
+
+/*====================================================================*/
+
+/* String tables for error messages */
+
+typedef struct lookup_t {
+ int key;
+ char *msg;
+} lookup_t;
+
+static const lookup_t error_table[] = {
+ { CS_SUCCESS, "Operation succeeded" },
+ { CS_BAD_ADAPTER, "Bad adapter" },
+ { CS_BAD_ATTRIBUTE, "Bad attribute", },
+ { CS_BAD_BASE, "Bad base address" },
+ { CS_BAD_EDC, "Bad EDC" },
+ { CS_BAD_IRQ, "Bad IRQ" },
+ { CS_BAD_OFFSET, "Bad offset" },
+ { CS_BAD_PAGE, "Bad page number" },
+ { CS_READ_FAILURE, "Read failure" },
+ { CS_BAD_SIZE, "Bad size" },
+ { CS_BAD_SOCKET, "Bad socket" },
+ { CS_BAD_TYPE, "Bad type" },
+ { CS_BAD_VCC, "Bad Vcc" },
+ { CS_BAD_VPP, "Bad Vpp" },
+ { CS_BAD_WINDOW, "Bad window" },
+ { CS_WRITE_FAILURE, "Write failure" },
+ { CS_NO_CARD, "No card present" },
+ { CS_UNSUPPORTED_FUNCTION, "Usupported function" },
+ { CS_UNSUPPORTED_MODE, "Unsupported mode" },
+ { CS_BAD_SPEED, "Bad speed" },
+ { CS_BUSY, "Resource busy" },
+ { CS_GENERAL_FAILURE, "General failure" },
+ { CS_WRITE_PROTECTED, "Write protected" },
+ { CS_BAD_ARG_LENGTH, "Bad argument length" },
+ { CS_BAD_ARGS, "Bad arguments" },
+ { CS_CONFIGURATION_LOCKED, "Configuration locked" },
+ { CS_IN_USE, "Resource in use" },
+ { CS_NO_MORE_ITEMS, "No more items" },
+ { CS_OUT_OF_RESOURCE, "Out of resource" },
+ { CS_BAD_HANDLE, "Bad handle" },
+ { CS_BAD_TUPLE, "Bad CIS tuple" }
+};
+#define ERROR_COUNT (sizeof(error_table)/sizeof(lookup_t))
+
+static const lookup_t service_table[] = {
+ { AccessConfigurationRegister, "AccessConfigurationRegister" },
+ { AddSocketServices, "AddSocketServices" },
+ { AdjustResourceInfo, "AdjustResourceInfo" },
+ { CheckEraseQueue, "CheckEraseQueue" },
+ { CloseMemory, "CloseMemory" },
+ { DeregisterClient, "DeregisterClient" },
+ { DeregisterEraseQueue, "DeregisterEraseQueue" },
+ { GetCardServicesInfo, "GetCardServicesInfo" },
+ { GetClientInfo, "GetClientInfo" },
+ { GetConfigurationInfo, "GetConfigurationInfo" },
+ { GetEventMask, "GetEventMask" },
+ { GetFirstClient, "GetFirstClient" },
+ { GetFirstRegion, "GetFirstRegion" },
+ { GetFirstTuple, "GetFirstTuple" },
+ { GetNextClient, "GetNextClient" },
+ { GetNextRegion, "GetNextRegion" },
+ { GetNextTuple, "GetNextTuple" },
+ { GetStatus, "GetStatus" },
+ { GetTupleData, "GetTupleData" },
+ { MapMemPage, "MapMemPage" },
+ { ModifyConfiguration, "ModifyConfiguration" },
+ { ModifyWindow, "ModifyWindow" },
+ { OpenMemory, "OpenMemory" },
+ { ParseTuple, "ParseTuple" },
+ { ReadMemory, "ReadMemory" },
+ { RegisterClient, "RegisterClient" },
+ { RegisterEraseQueue, "RegisterEraseQueue" },
+ { RegisterMTD, "RegisterMTD" },
+ { ReleaseConfiguration, "ReleaseConfiguration" },
+ { ReleaseIO, "ReleaseIO" },
+ { ReleaseIRQ, "ReleaseIRQ" },
+ { ReleaseWindow, "ReleaseWindow" },
+ { RequestConfiguration, "RequestConfiguration" },
+ { RequestIO, "RequestIO" },
+ { RequestIRQ, "RequestIRQ" },
+ { RequestSocketMask, "RequestSocketMask" },
+ { RequestWindow, "RequestWindow" },
+ { ResetCard, "ResetCard" },
+ { SetEventMask, "SetEventMask" },
+ { ValidateCIS, "ValidateCIS" },
+ { WriteMemory, "WriteMemory" },
+ { BindDevice, "BindDevice" },
+ { BindMTD, "BindMTD" },
+ { ReportError, "ReportError" },
+ { SuspendCard, "SuspendCard" },
+ { ResumeCard, "ResumeCard" },
+ { EjectCard, "EjectCard" },
+ { InsertCard, "InsertCard" },
+ { ReplaceCIS, "ReplaceCIS" }
+};
+#define SERVICE_COUNT (sizeof(service_table)/sizeof(lookup_t))
+
+/*======================================================================
+
+ Reset a socket to the default state
+
+======================================================================*/
+
+static void init_socket(socket_info_t *s)
+{
+ int i;
+ pccard_io_map io = { 0, 0, 0, 0, 1 };
+ pccard_mem_map mem = { 0, 0, 0, 0, 0, 0 };
+
+ mem.sys_stop = s->cap.map_size;
+ s->socket = dead_socket;
+ s->ss_entry(s->sock, SS_SetSocket, &s->socket);
+ for (i = 0; i < 2; i++) {
+ io.map = i;
+ s->ss_entry(s->sock, SS_SetIOMap, &io);
+ }
+ for (i = 0; i < 5; i++) {
+ mem.map = i;
+ s->ss_entry(s->sock, SS_SetMemMap, &mem);
+ }
+}
+
+/*====================================================================*/
+
+#if defined(HAS_PROC_BUS) && defined(PCMCIA_DEBUG)
+static int proc_read_clients(char *buf, char **start, off_t pos,
+ int count, int *eof, void *data)
+{
+ socket_info_t *s = data;
+ client_handle_t c;
+ char *p = buf;
+
+ for (c = s->clients; c; c = c->next)
+ p += sprintf(p, "fn %x: '%s' [attr 0x%04x] [state 0x%04x]\n",
+ c->Function, c->dev_info, c->Attributes, c->state);
+ return (p - buf);
+}
+#endif
+
+/*======================================================================
+
+ Low-level PC Card interface drivers need to register with Card
+ Services using these calls.
+
+======================================================================*/
+
+static void setup_socket(u_long i);
+static void shutdown_socket(u_long i);
+static void reset_socket(u_long i);
+static void unreset_socket(u_long i);
+static void parse_events(void *info, u_int events);
+
+int register_ss_entry(int nsock, ss_entry_t ss_entry)
+{
+ int i, ns;
+ socket_info_t *s;
+
+ DEBUG(0, "cs: register_ss_entry(%d, 0x%p)\n", nsock, ss_entry);
+
+ for (ns = 0; ns < nsock; ns++) {
+ s = kmalloc(sizeof(struct socket_info_t), GFP_KERNEL);
+ if (!s) {
+ printk(KERN_NOTICE "cs: memory allocation failure!\n");
+ return (!ns);
+ }
+ memset(s, 0, sizeof(socket_info_t));
+
+ s->ss_entry = ss_entry;
+ s->sock = ns;
+ s->setup.data = sockets;
+ s->setup.function = &setup_socket;
+ s->shutdown.data = sockets;
+ s->shutdown.function = &shutdown_socket;
+ /* base address = 0, map = 0 */
+ s->cis_mem.flags = 0;
+ s->cis_mem.speed = cis_speed;
+ s->erase_busy.next = s->erase_busy.prev = &s->erase_busy;
+ spin_lock_init(&s->lock);
+
+ for (i = 0; i < sockets; i++)
+ if (socket_table[i] == NULL) break;
+ socket_table[i] = s;
+ if (i == sockets) sockets++;
+
+ init_socket(s);
+ ss_entry(ns, SS_InquireSocket, &s->cap);
+#ifdef HAS_PROC_BUS
+ if (proc_pccard) {
+ char name[3];
+ sprintf(name, "%02d", i);
+ s->proc = proc_mkdir(name, proc_pccard);
+ if (s->proc)
+ ss_entry(ns, SS_ProcSetup, s->proc);
+#ifdef PCMCIA_DEBUG
+ if (s->proc)
+ create_proc_read_entry("clients", 0, s->proc,
+ proc_read_clients, s);
+#endif
+ }
+#endif
+ }
+
+ return 0;
+} /* register_ss_entry */
+
+/*====================================================================*/
+
+void unregister_ss_entry(ss_entry_t ss_entry)
+{
+ int i, j;
+ socket_info_t *s = NULL;
+ client_t *client;
+
+#ifdef HAS_PROC_BUS
+ for (i = 0; i < sockets; i++) {
+ s = socket_table[i];
+ if (s->ss_entry != ss_entry) continue;
+ if (proc_pccard) {
+ char name[3];
+ sprintf(name, "%02d", i);
+#ifdef PCMCIA_DEBUG
+ remove_proc_entry("clients", s->proc);
+#endif
+ remove_proc_entry(name, proc_pccard);
+ }
+ }
+#endif
+
+ for (;;) {
+ for (i = 0; i < sockets; i++) {
+ s = socket_table[i];
+ if (s->ss_entry == ss_entry) break;
+ }
+ if (i == sockets)
+ break;
+ shutdown_socket(i);
+ release_cis_mem(s);
+ while (s->clients) {
+ client = s->clients;
+ s->clients = s->clients->next;
+ kfree(client);
+ }
+ s->ss_entry = NULL;
+ kfree(s);
+ socket_table[i] = NULL;
+ for (j = i; j < sockets-1; j++)
+ socket_table[j] = socket_table[j+1];
+ sockets--;
+ }
+
+} /* unregister_ss_entry */
+
+/*======================================================================
+
+ Shutdown_Socket() and setup_socket() are scheduled using add_timer
+ calls by the main event handler when card insertion and removal
+ events are received. Shutdown_Socket() unconfigures a socket and
+ turns off socket power. Setup_socket() turns on socket power
+ and resets the socket, in two stages.
+
+======================================================================*/
+
+static void free_regions(memory_handle_t *list)
+{
+ memory_handle_t tmp;
+ while (*list != NULL) {
+ tmp = *list;
+ *list = tmp->info.next;
+ tmp->region_magic = 0;
+ kfree(tmp);
+ }
+}
+
+static int send_event(socket_info_t *s, event_t event, int priority);
+
+static void shutdown_socket(u_long i)
+{
+ socket_info_t *s = socket_table[i];
+ client_t **c;
+
+ DEBUG(1, "cs: shutdown_socket(%ld)\n", i);
+
+ /* Blank out the socket state */
+ s->state &= SOCKET_PRESENT|SOCKET_SETUP_PENDING;
+ init_socket(s);
+ s->irq.AssignedIRQ = s->irq.Config = 0;
+ s->lock_count = 0;
+ s->cis_used = 0;
+ if (s->fake_cis) {
+ kfree(s->fake_cis);
+ s->fake_cis = NULL;
+ }
+#ifdef CONFIG_CARDBUS
+ cb_release_cis_mem(s);
+ cb_free(s);
+#endif
+ s->functions = 0;
+ if (s->config) {
+ kfree(s->config);
+ s->config = NULL;
+ }
+ for (c = &s->clients; *c; ) {
+ if ((*c)->state & CLIENT_UNBOUND) {
+ client_t *d = *c;
+ *c = (*c)->next;
+ kfree(d);
+ } else {
+ c = &((*c)->next);
+ }
+ }
+ free_regions(&s->a_region);
+ free_regions(&s->c_region);
+} /* shutdown_socket */
+
+static void setup_socket(u_long i)
+{
+ int val;
+ socket_info_t *s = socket_table[i];
+
+ s->ss_entry(s->sock, SS_GetStatus, &val);
+ if (val & SS_PENDING) {
+ /* Does the socket need more time? */
+ DEBUG(2, "cs: setup_socket(%ld): status pending\n", i);
+ if (++s->setup_timeout > 100) {
+ printk(KERN_NOTICE "cs: socket %ld voltage interrogation"
+ " timed out\n", i);
+ } else {
+ mod_timer(&s->setup, jiffies + HZ/10);
+ }
+ } else if (val & SS_DETECT) {
+ DEBUG(1, "cs: setup_socket(%ld): applying power\n", i);
+ s->state |= SOCKET_PRESENT;
+ s->socket.flags = 0;
+ if (val & SS_3VCARD)
+ s->socket.Vcc = s->socket.Vpp = 33;
+ else if (!(val & SS_XVCARD))
+ s->socket.Vcc = s->socket.Vpp = 50;
+ else {
+ printk(KERN_NOTICE "cs: socket %ld: unsupported "
+ "voltage key\n", i);
+ s->socket.Vcc = 0;
+ }
+ if (val & SS_CARDBUS) {
+ s->state |= SOCKET_CARDBUS;
+#ifndef CONFIG_CARDBUS
+ printk(KERN_NOTICE "cs: unsupported card type detected!\n");
+#endif
+ }
+ s->ss_entry(s->sock, SS_SetSocket, &s->socket);
+ s->setup.function = &reset_socket;
+ mod_timer(&s->setup, jiffies + vcc_settle);
+ } else
+ DEBUG(0, "cs: setup_socket(%ld): no card!\n", i);
+} /* setup_socket */
+
+/*======================================================================
+
+ Reset_socket() and unreset_socket() handle hard resets. Resets
+ have several causes: card insertion, a call to reset_socket, or
+ recovery from a suspend/resume cycle. Unreset_socket() sends
+ a CS event that matches the cause of the reset.
+
+======================================================================*/
+
+static void reset_socket(u_long i)
+{
+ socket_info_t *s = socket_table[i];
+
+ DEBUG(1, "cs: resetting socket %ld\n", i);
+ s->socket.flags |= SS_OUTPUT_ENA | SS_RESET;
+ s->ss_entry(s->sock, SS_SetSocket, &s->socket);
+ udelay((long)reset_time);
+ s->socket.flags &= ~SS_RESET;
+ s->ss_entry(s->sock, SS_SetSocket, &s->socket);
+ s->setup_timeout = 0;
+ s->setup.function = &unreset_socket;
+ mod_timer(&s->setup, jiffies + unreset_delay);
+} /* reset_socket */
+
+#define EVENT_MASK \
+(SOCKET_SETUP_PENDING|SOCKET_SUSPEND|SOCKET_RESET_PENDING)
+
+static void unreset_socket(u_long i)
+{
+ socket_info_t *s = socket_table[i];
+ int val;
+
+ s->ss_entry(s->sock, SS_GetStatus, &val);
+ if (val & SS_READY) {
+ DEBUG(1, "cs: reset done on socket %ld\n", i);
+ if (s->state & SOCKET_SUSPEND) {
+ s->state &= ~EVENT_MASK;
+ if (verify_cis_cache(s) != 0)
+ parse_events(s, SS_DETECT);
+ else
+ send_event(s, CS_EVENT_PM_RESUME, CS_EVENT_PRI_LOW);
+ } else if (s->state & SOCKET_SETUP_PENDING) {
+#ifdef CONFIG_CARDBUS
+ if (s->state & SOCKET_CARDBUS)
+ cb_alloc(s);
+#endif
+ send_event(s, CS_EVENT_CARD_INSERTION, CS_EVENT_PRI_LOW);
+ s->state &= ~SOCKET_SETUP_PENDING;
+ } else {
+ send_event(s, CS_EVENT_CARD_RESET, CS_EVENT_PRI_LOW);
+ if (s->reset_handle) {
+ s->reset_handle->event_callback_args.info = NULL;
+ EVENT(s->reset_handle, CS_EVENT_RESET_COMPLETE,
+ CS_EVENT_PRI_LOW);
+ s->state &= ~EVENT_MASK;
+ }
+ }
+ } else {
+ DEBUG(2, "cs: socket %ld not ready yet\n", i);
+ if (++s->setup_timeout > unreset_limit) {
+ printk(KERN_NOTICE "cs: socket %ld timed out during"
+ " reset\n", i);
+ s->state &= ~EVENT_MASK;
+ } else {
+ mod_timer(&s->setup, jiffies + unreset_check);
+ }
+ }
+} /* unreset_socket */
+
+/*======================================================================
+
+ The central event handler. Send_event() sends an event to all
+ valid clients. Parse_events() interprets the event bits from
+ a card status change report. Do_shotdown() handles the high
+ priority stuff associated with a card removal.
+
+======================================================================*/
+
+static int send_event(socket_info_t *s, event_t event, int priority)
+{
+ client_t *client = s->clients;
+ int ret;
+ DEBUG(1, "cs: send_event(sock %d, event %d, pri %d)\n",
+ s->sock, event, priority);
+ ret = 0;
+ for (; client; client = client->next) {
+ if (client->state & (CLIENT_UNBOUND|CLIENT_STALE))
+ continue;
+ if (client->EventMask & event) {
+ ret = EVENT(client, event, priority);
+ if (ret != 0)
+ return ret;
+ }
+ }
+ return ret;
+} /* send_event */
+
+static void do_shutdown(socket_info_t *s)
+{
+ client_t *client;
+ if (s->state & SOCKET_SHUTDOWN_PENDING)
+ return;
+ s->state |= SOCKET_SHUTDOWN_PENDING;
+ send_event(s, CS_EVENT_CARD_REMOVAL, CS_EVENT_PRI_HIGH);
+ for (client = s->clients; client; client = client->next)
+ if (!(client->Attributes & INFO_MASTER_CLIENT))
+ client->state |= CLIENT_STALE;
+ if (s->state & (SOCKET_SETUP_PENDING|SOCKET_RESET_PENDING)) {
+ DEBUG(0, "cs: flushing pending setup\n");
+ del_timer(&s->setup);
+ s->state &= ~EVENT_MASK;
+ }
+ mod_timer(&s->shutdown, jiffies + shutdown_delay);
+ s->state &= ~SOCKET_PRESENT;
+}
+
+static void parse_events(void *info, u_int events)
+{
+ socket_info_t *s = info;
+ if (events & SS_DETECT) {
+ int status;
+ u_long flags;
+ spin_lock_irqsave(&s->lock, flags);
+ s->ss_entry(s->sock, SS_GetStatus, &status);
+ if ((s->state & SOCKET_PRESENT) &&
+ (!(s->state & SOCKET_SUSPEND) ||
+ !(status & SS_DETECT)))
+ do_shutdown(s);
+ if (status & SS_DETECT) {
+ if (s->state & SOCKET_SETUP_PENDING) {
+ del_timer(&s->setup);
+ DEBUG(1, "cs: delaying pending setup\n");
+ }
+ s->state |= SOCKET_SETUP_PENDING;
+ s->setup.function = &setup_socket;
+ s->setup_timeout = 0;
+ if (s->state & SOCKET_SUSPEND)
+ s->setup.expires = jiffies + resume_delay;
+ else
+ s->setup.expires = jiffies + setup_delay;
+ add_timer(&s->setup);
+ }
+ spin_unlock_irqrestore(&s->lock, flags);
+ }
+ if (events & SS_BATDEAD)
+ send_event(s, CS_EVENT_BATTERY_DEAD, CS_EVENT_PRI_LOW);
+ if (events & SS_BATWARN)
+ send_event(s, CS_EVENT_BATTERY_LOW, CS_EVENT_PRI_LOW);
+ if (events & SS_READY) {
+ if (!(s->state & SOCKET_RESET_PENDING))
+ send_event(s, CS_EVENT_READY_CHANGE, CS_EVENT_PRI_LOW);
+ else DEBUG(1, "cs: ready change during reset\n");
+ }
+} /* parse_events */
+
+/*======================================================================
+
+ Another event handler, for power management events.
+
+ This does not comply with the latest PC Card spec for handling
+ power management events.
+
+======================================================================*/
+
+#ifdef CONFIG_PM
+#if (LINUX_VERSION_CODE < VERSION(2,3,43))
+static int handle_pm_event(apm_event_t rqst)
+#else
+static int handle_pm_event(struct pm_dev *dev, pm_request_t rqst,
+ void *data)
+#endif
+{
+ int i, stat;
+ socket_info_t *s;
+ static int down = 0;
+
+ /* <linux/pm.h> hides a hack so this works with old APM support */
+ switch (rqst) {
+ case PM_SUSPEND:
+ DEBUG(1, "cs: received suspend notification\n");
+ if (down) {
+ printk(KERN_DEBUG "cs: received extra suspend event\n");
+ break;
+ }
+ down = 1;
+ for (i = 0; i < sockets; i++) {
+ s = socket_table[i];
+ if ((s->state & SOCKET_PRESENT) &&
+ !(s->state & SOCKET_SUSPEND)){
+ send_event(s, CS_EVENT_PM_SUSPEND, CS_EVENT_PRI_LOW);
+ s->ss_entry(s->sock, SS_SetSocket, &dead_socket);
+ s->state |= SOCKET_SUSPEND;
+ }
+ }
+ break;
+ case PM_RESUME:
+ DEBUG(1, "cs: received resume notification\n");
+ if (!down) {
+ printk(KERN_DEBUG "cs: received bogus resume event\n");
+ break;
+ }
+ down = 0;
+ for (i = 0; i < sockets; i++) {
+ s = socket_table[i];
+ /* Do this just to reinitialize the socket */
+ init_socket(s);
+ s->ss_entry(s->sock, SS_GetStatus, &stat);
+ /* If there was or is a card here, we need to do something
+ about it... but parse_events will sort it all out. */
+ if ((s->state & SOCKET_PRESENT) || (stat & SS_DETECT))
+ parse_events(s, SS_DETECT);
+ }
+ break;
+ }
+ return 0;
+} /* handle_pm_event */
+#endif
+
+/*======================================================================
+
+ Special stuff for managing IO windows, because they are scarce.
+
+======================================================================*/
+
+static int alloc_io_space(socket_info_t *s, u_int attr, ioaddr_t *base,
+ ioaddr_t num, u_int lines, char *name)
+{
+ int i;
+ ioaddr_t try, align;
+
+ align = (*base) ? (lines ? 1<<lines : 0) : 1;
+ if (align && (align < num)) {
+ if (*base) {
+ DEBUG(0, "odd IO request: num %04x align %04x\n",
+ num, align);
+ align = 0;
+ } else
+ while (align && (align < num)) align <<= 1;
+ }
+ if (*base & ~(align-1)) {
+ DEBUG(0, "odd IO request: base %04x align %04x\n",
+ *base, align);
+ while (*base & ~(align-1)) align <<= 1;
+ }
+ /* Check for an already-allocated window that must conflict with
+ what was asked for. It is a hack because it does not catch all
+ potential conflicts, just the most obvious ones. */
+ for (i = 0; i < MAX_IO_WIN; i++)
+ if ((s->io[i].NumPorts != 0) &&
+ ((s->io[i].BasePort & (align-1)) == *base))
+ return 1;
+ for (i = 0; i < MAX_IO_WIN; i++) {
+ if (s->io[i].NumPorts == 0) {
+ if (find_io_region(base, num, align, name) == 0) {
+ s->io[i].Attributes = attr;
+ s->io[i].BasePort = *base;
+ s->io[i].NumPorts = s->io[i].InUse = num;
+ break;
+ } else
+ return 1;
+ } else if (s->io[i].Attributes != attr)
+ continue;
+ /* Try to extend top of window */
+ try = s->io[i].BasePort + s->io[i].NumPorts;
+ if ((*base == 0) || (*base == try))
+ if (find_io_region(&try, num, 0, name) == 0) {
+ *base = try;
+ s->io[i].NumPorts += num;
+ s->io[i].InUse += num;
+ break;
+ }
+ /* Try to extend bottom of window */
+ try = s->io[i].BasePort - num;
+ if ((*base == 0) || (*base == try))
+ if (find_io_region(&try, num, 0, name) == 0) {
+ s->io[i].BasePort = *base = try;
+ s->io[i].NumPorts += num;
+ s->io[i].InUse += num;
+ break;
+ }
+ }
+ return (i == MAX_IO_WIN);
+} /* alloc_io_space */
+
+static void release_io_space(socket_info_t *s, ioaddr_t base,
+ ioaddr_t num)
+{
+ int i;
+ release_region(base, num);
+ for (i = 0; i < MAX_IO_WIN; i++) {
+ if ((s->io[i].BasePort <= base) &&
+ (s->io[i].BasePort+s->io[i].NumPorts >= base+num)) {
+ s->io[i].InUse -= num;
+ /* Free the window if no one else is using it */
+ if (s->io[i].InUse == 0)
+ s->io[i].NumPorts = 0;
+ }
+ }
+}
+
+/*======================================================================
+
+ Access_configuration_register() reads and writes configuration
+ registers in attribute memory. Memory window 0 is reserved for
+ this and the tuple reading services.
+
+======================================================================*/
+
+static int access_configuration_register(client_handle_t handle,
+ conf_reg_t *reg)
+{
+ socket_info_t *s;
+ config_t *c;
+ int addr;
+ u_char val;
+
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ s = SOCKET(handle);
+ if (!(s->state & SOCKET_PRESENT))
+ return CS_NO_CARD;
+ if (handle->Function == BIND_FN_ALL) {
+ if (reg->Function >= s->functions)
+ return CS_BAD_ARGS;
+ c = &s->config[reg->Function];
+ } else
+ c = CONFIG(handle);
+ if (!(c->state & CONFIG_LOCKED))
+ return CS_CONFIGURATION_LOCKED;
+
+ addr = (c->ConfigBase + reg->Offset) >> 1;
+
+ switch (reg->Action) {
+ case CS_READ:
+ read_cis_mem(s, 1, addr, 1, &val);
+ reg->Value = val;
+ break;
+ case CS_WRITE:
+ val = reg->Value;
+ write_cis_mem(s, 1, addr, 1, &val);
+ break;
+ default:
+ return CS_BAD_ARGS;
+ break;
+ }
+ return CS_SUCCESS;
+} /* access_configuration_register */
+
+/*======================================================================
+
+ Bind_device() associates a device driver with a particular socket.
+ It is normally called by Driver Services after it has identified
+ a newly inserted card. An instance of that driver will then be
+ eligible to register as a client of this socket.
+
+======================================================================*/
+
+static int bind_device(bind_req_t *req)
+{
+ client_t *client;
+ socket_info_t *s;
+
+ if (CHECK_SOCKET(req->Socket))
+ return CS_BAD_SOCKET;
+ s = SOCKET(req);
+
+ client = (client_t *)kmalloc(sizeof(client_t), GFP_KERNEL);
+ if (!client) return CS_OUT_OF_RESOURCE;
+ memset(client, '\0', sizeof(client_t));
+ client->client_magic = CLIENT_MAGIC;
+ strncpy(client->dev_info, (char *)req->dev_info, DEV_NAME_LEN);
+ client->Socket = req->Socket;
+ client->Function = req->Function;
+ client->state = CLIENT_UNBOUND;
+ client->erase_busy.next = &client->erase_busy;
+ client->erase_busy.prev = &client->erase_busy;
+ init_waitqueue_head(&client->mtd_req);
+ client->next = s->clients;
+ s->clients = client;
+ DEBUG(1, "cs: bind_device(): client 0x%p, sock %d, dev %s\n",
+ client, client->Socket, client->dev_info);
+ return CS_SUCCESS;
+} /* bind_device */
+
+/*======================================================================
+
+ Bind_mtd() associates a device driver with a particular memory
+ region. It is normally called by Driver Services after it has
+ identified a memory device type. An instance of the corresponding
+ driver will then be able to register to control this region.
+
+======================================================================*/
+
+static int bind_mtd(mtd_bind_t *req)
+{
+ socket_info_t *s;
+ memory_handle_t region;
+
+ if (CHECK_SOCKET(req->Socket))
+ return CS_BAD_SOCKET;
+ s = SOCKET(req);
+
+ if (req->Attributes & REGION_TYPE_AM)
+ region = s->a_region;
+ else
+ region = s->c_region;
+
+ while (region) {
+ if (region->info.CardOffset == req->CardOffset) break;
+ region = region->info.next;
+ }
+ if (!region || (region->mtd != NULL))
+ return CS_BAD_OFFSET;
+ strncpy(region->dev_info, (char *)req->dev_info, DEV_NAME_LEN);
+
+ DEBUG(1, "cs: bind_mtd(): attr 0x%x, offset 0x%x, dev %s\n",
+ req->Attributes, req->CardOffset, (char *)req->dev_info);
+ return CS_SUCCESS;
+} /* bind_mtd */
+
+/*====================================================================*/
+
+static int deregister_client(client_handle_t handle)
+{
+ client_t **client;
+ socket_info_t *s;
+ memory_handle_t region;
+ u_long flags;
+ int i, sn;
+
+ DEBUG(1, "cs: deregister_client(%p)\n", handle);
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ if (handle->state &
+ (CLIENT_IRQ_REQ|CLIENT_IO_REQ|CLIENT_CONFIG_LOCKED))
+ return CS_IN_USE;
+ for (i = 0; i < MAX_WIN; i++)
+ if (handle->state & CLIENT_WIN_REQ(i))
+ return CS_IN_USE;
+
+ /* Disconnect all MTD links */
+ s = SOCKET(handle);
+ if (handle->mtd_count) {
+ for (region = s->a_region; region; region = region->info.next)
+ if (region->mtd == handle) region->mtd = NULL;
+ for (region = s->c_region; region; region = region->info.next)
+ if (region->mtd == handle) region->mtd = NULL;
+ }
+
+ sn = handle->Socket; s = socket_table[sn];
+
+ if ((handle->state & CLIENT_STALE) ||
+ (handle->Attributes & INFO_MASTER_CLIENT)) {
+ spin_lock_irqsave(&s->lock, flags);
+ client = &s->clients;
+ while ((*client) && ((*client) != handle))
+ client = &(*client)->next;
+ if (*client == NULL) {
+ spin_unlock_irqrestore(&s->lock, flags);
+ return CS_BAD_HANDLE;
+ }
+ *client = handle->next;
+ handle->client_magic = 0;
+ kfree(handle);
+ spin_unlock_irqrestore(&s->lock, flags);
+ } else {
+ handle->state = CLIENT_UNBOUND;
+ handle->mtd_count = 0;
+ handle->event_handler = NULL;
+ }
+
+ if (--s->real_clients == 0)
+ s->ss_entry(sn, SS_RegisterCallback, NULL);
+
+ return CS_SUCCESS;
+} /* deregister_client */
+
+/*====================================================================*/
+
+static int get_configuration_info(client_handle_t handle,
+ config_info_t *config)
+{
+ socket_info_t *s;
+ config_t *c;
+
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ s = SOCKET(handle);
+ if (!(s->state & SOCKET_PRESENT))
+ return CS_NO_CARD;
+
+ if (handle->Function == BIND_FN_ALL) {
+ if (config->Function && (config->Function >= s->functions))
+ return CS_BAD_ARGS;
+ } else
+ config->Function = handle->Function;
+
+#ifdef CONFIG_CARDBUS
+ if (s->state & SOCKET_CARDBUS) {
+ u_char fn = config->Function;
+ memset(config, 0, sizeof(config_info_t));
+ config->Function = fn;
+ config->Vcc = s->socket.Vcc;
+ config->Vpp1 = config->Vpp2 = s->socket.Vpp;
+ config->Option = s->cap.cardbus;
+ config->IntType = INT_CARDBUS;
+ /* This is a nasty hack */
+ pcibios_read_config_dword(s->cap.cardbus, 0, 0, &config->ConfigBase);
+ if (s->cb_config) {
+ config->Attributes = CONF_VALID_CLIENT;
+ config->AssignedIRQ = s->irq.AssignedIRQ;
+ if (config->AssignedIRQ)
+ config->Attributes |= CONF_ENABLE_IRQ;
+ config->BasePort1 = s->io[0].BasePort;
+ config->NumPorts1 = s->io[0].NumPorts;
+ }
+ return CS_SUCCESS;
+ }
+#endif
+
+ c = (s->config != NULL) ? &s->config[config->Function] : NULL;
+
+ if ((c == NULL) || !(c->state & CONFIG_LOCKED)) {
+ config->Attributes = 0;
+ config->Vcc = s->socket.Vcc;
+ config->Vpp1 = config->Vpp2 = s->socket.Vpp;
+ return CS_SUCCESS;
+ }
+
+ /* !!! This is a hack !!! */
+ memcpy(&config->Attributes, &c->Attributes, sizeof(config_t));
+ config->Attributes |= CONF_VALID_CLIENT;
+ config->CardValues = c->CardValues;
+ config->IRQAttributes = c->irq.Attributes;
+ config->AssignedIRQ = s->irq.AssignedIRQ;
+ config->BasePort1 = c->io.BasePort1;
+ config->NumPorts1 = c->io.NumPorts1;
+ config->Attributes1 = c->io.Attributes1;
+ config->BasePort2 = c->io.BasePort2;
+ config->NumPorts2 = c->io.NumPorts2;
+ config->Attributes2 = c->io.Attributes2;
+ config->IOAddrLines = c->io.IOAddrLines;
+
+ return CS_SUCCESS;
+} /* get_configuration_info */
+
+/*======================================================================
+
+ Return information about this version of Card Services.
+
+======================================================================*/
+
+static int get_card_services_info(servinfo_t *info)
+{
+ info->Signature[0] = 'C';
+ info->Signature[1] = 'S';
+ info->Count = sockets;
+ info->Revision = CS_RELEASE_CODE;
+ info->CSLevel = 0x0210;
+ info->VendorString = (char *)release;
+ return CS_SUCCESS;
+} /* get_card_services_info */
+
+/*======================================================================
+
+ Note that get_first_client() *does* recognize the Socket field
+ in the request structure.
+
+======================================================================*/
+
+static int get_first_client(client_handle_t *handle, client_req_t *req)
+{
+ socket_t s;
+ if (req->Attributes & CLIENT_THIS_SOCKET)
+ s = req->Socket;
+ else
+ s = 0;
+ if (CHECK_SOCKET(req->Socket))
+ return CS_BAD_SOCKET;
+ if (socket_table[s]->clients == NULL)
+ return CS_NO_MORE_ITEMS;
+ *handle = socket_table[s]->clients;
+ return CS_SUCCESS;
+} /* get_first_client */
+
+/*====================================================================*/
+
+static int get_next_client(client_handle_t *handle, client_req_t *req)
+{
+ socket_info_t *s;
+ if ((handle == NULL) || CHECK_HANDLE(*handle))
+ return CS_BAD_HANDLE;
+ if ((*handle)->next == NULL) {
+ if (req->Attributes & CLIENT_THIS_SOCKET)
+ return CS_NO_MORE_ITEMS;
+ s = SOCKET(*handle);
+ if (s->clients == NULL)
+ return CS_NO_MORE_ITEMS;
+ *handle = s->clients;
+ } else
+ *handle = (*handle)->next;
+ return CS_SUCCESS;
+} /* get_next_client */
+
+/*====================================================================*/
+
+static int get_window(window_handle_t *handle, int idx, win_req_t *req)
+{
+ socket_info_t *s;
+ window_t *win;
+ int w;
+
+ if (idx == 0)
+ s = SOCKET((client_handle_t)*handle);
+ else
+ s = (*handle)->sock;
+ if (!(s->state & SOCKET_PRESENT))
+ return CS_NO_CARD;
+ for (w = idx; w < MAX_WIN; w++)
+ if (s->state & SOCKET_WIN_REQ(w)) break;
+ if (w == MAX_WIN)
+ return CS_NO_MORE_ITEMS;
+ win = &s->win[w];
+ req->Base = win->ctl.sys_start;
+ req->Size = win->ctl.sys_stop - win->ctl.sys_start + 1;
+ req->AccessSpeed = win->ctl.speed;
+ req->Attributes = 0;
+ if (win->ctl.flags & MAP_ATTRIB)
+ req->Attributes |= WIN_MEMORY_TYPE_AM;
+ if (win->ctl.flags & MAP_ACTIVE)
+ req->Attributes |= WIN_ENABLE;
+ if (win->ctl.flags & MAP_16BIT)
+ req->Attributes |= WIN_DATA_WIDTH_16;
+ if (win->ctl.flags & MAP_USE_WAIT)
+ req->Attributes |= WIN_USE_WAIT;
+ *handle = win;
+ return CS_SUCCESS;
+} /* get_window */
+
+static int get_first_window(client_handle_t *handle, win_req_t *req)
+{
+ if ((handle == NULL) || CHECK_HANDLE(*handle))
+ return CS_BAD_HANDLE;
+ return get_window((window_handle_t *)handle, 0, req);
+}
+
+static int get_next_window(window_handle_t *win, win_req_t *req)
+{
+ if ((win == NULL) || ((*win)->magic != WINDOW_MAGIC))
+ return CS_BAD_HANDLE;
+ return get_window(win, (*win)->index+1, req);
+}
+
+/*======================================================================
+
+ Get the current socket state bits. We don't support the latched
+ SocketState yet: I haven't seen any point for it.
+
+======================================================================*/
+
+static int cs_get_status(client_handle_t handle, cs_status_t *status)
+{
+ socket_info_t *s;
+ config_t *c;
+ int val;
+
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ s = SOCKET(handle);
+ s->ss_entry(s->sock, SS_GetStatus, &val);
+ status->CardState = status->SocketState = 0;
+ status->CardState |= (val & SS_DETECT) ? CS_EVENT_CARD_DETECT : 0;
+ status->CardState |= (val & SS_CARDBUS) ? CS_EVENT_CB_DETECT : 0;
+ status->CardState |= (val & SS_3VCARD) ? CS_EVENT_3VCARD : 0;
+ status->CardState |= (val & SS_XVCARD) ? CS_EVENT_XVCARD : 0;
+ if (s->state & SOCKET_SUSPEND)
+ status->CardState |= CS_EVENT_PM_SUSPEND;
+ if (!(s->state & SOCKET_PRESENT))
+ return CS_NO_CARD;
+ if (s->state & SOCKET_SETUP_PENDING)
+ status->CardState |= CS_EVENT_CARD_INSERTION;
+
+ /* Get info from the PRR, if necessary */
+ if (handle->Function == BIND_FN_ALL) {
+ if (status->Function && (status->Function >= s->functions))
+ return CS_BAD_ARGS;
+ c = (s->config != NULL) ? &s->config[status->Function] : NULL;
+ } else
+ c = CONFIG(handle);
+ if ((c != NULL) && (c->state & CONFIG_LOCKED) &&
+ (c->IntType & (INT_MEMORY_AND_IO | INT_ZOOMED_VIDEO))) {
+ u_char reg;
+ if (c->Present & PRESENT_PIN_REPLACE) {
+ read_cis_mem(s, 1, (c->ConfigBase+CISREG_PRR)>>1, 1, &reg);
+ status->CardState |=
+ (reg & PRR_WP_STATUS) ? CS_EVENT_WRITE_PROTECT : 0;
+ status->CardState |=
+ (reg & PRR_READY_STATUS) ? CS_EVENT_READY_CHANGE : 0;
+ status->CardState |=
+ (reg & PRR_BVD2_STATUS) ? CS_EVENT_BATTERY_LOW : 0;
+ status->CardState |=
+ (reg & PRR_BVD1_STATUS) ? CS_EVENT_BATTERY_DEAD : 0;
+ } else {
+ /* No PRR? Then assume we're always ready */
+ status->CardState |= CS_EVENT_READY_CHANGE;
+ }
+ if (c->Present & PRESENT_EXT_STATUS) {
+ read_cis_mem(s, 1, (c->ConfigBase+CISREG_ESR)>>1, 1, &reg);
+ status->CardState |=
+ (reg & ESR_REQ_ATTN) ? CS_EVENT_REQUEST_ATTENTION : 0;
+ }
+ return CS_SUCCESS;
+ }
+ status->CardState |=
+ (val & SS_WRPROT) ? CS_EVENT_WRITE_PROTECT : 0;
+ status->CardState |=
+ (val & SS_BATDEAD) ? CS_EVENT_BATTERY_DEAD : 0;
+ status->CardState |=
+ (val & SS_BATWARN) ? CS_EVENT_BATTERY_LOW : 0;
+ status->CardState |=
+ (val & SS_READY) ? CS_EVENT_READY_CHANGE : 0;
+ return CS_SUCCESS;
+} /* cs_get_status */
+
+/*======================================================================
+
+ Change the card address of an already open memory window.
+
+======================================================================*/
+
+static int get_mem_page(window_handle_t win, memreq_t *req)
+{
+ if ((win == NULL) || (win->magic != WINDOW_MAGIC))
+ return CS_BAD_HANDLE;
+ req->Page = 0;
+ req->CardOffset = win->ctl.card_start;
+ return CS_SUCCESS;
+} /* get_mem_page */
+
+static int map_mem_page(window_handle_t win, memreq_t *req)
+{
+ socket_info_t *s;
+ if ((win == NULL) || (win->magic != WINDOW_MAGIC))
+ return CS_BAD_HANDLE;
+ if (req->Page != 0)
+ return CS_BAD_PAGE;
+ s = win->sock;
+ win->ctl.card_start = req->CardOffset;
+ if (s->ss_entry(s->sock, SS_SetMemMap, &win->ctl) != 0)
+ return CS_BAD_OFFSET;
+ return CS_SUCCESS;
+} /* map_mem_page */
+
+/*======================================================================
+
+ Modify a locked socket configuration
+
+======================================================================*/
+
+static int modify_configuration(client_handle_t handle,
+ modconf_t *mod)
+{
+ socket_info_t *s;
+ config_t *c;
+
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ s = SOCKET(handle); c = CONFIG(handle);
+ if (!(s->state & SOCKET_PRESENT))
+ return CS_NO_CARD;
+ if (!(c->state & CONFIG_LOCKED))
+ return CS_CONFIGURATION_LOCKED;
+
+ if (mod->Attributes & CONF_IRQ_CHANGE_VALID) {
+ if (mod->Attributes & CONF_ENABLE_IRQ) {
+ c->Attributes |= CONF_ENABLE_IRQ;
+ s->socket.io_irq = s->irq.AssignedIRQ;
+ } else {
+ c->Attributes &= ~CONF_ENABLE_IRQ;
+ s->socket.io_irq = 0;
+ }
+ s->ss_entry(s->sock, SS_SetSocket, &s->socket);
+ }
+
+ if (mod->Attributes & CONF_VCC_CHANGE_VALID)
+ return CS_BAD_VCC;
+
+ /* We only allow changing Vpp1 and Vpp2 to the same value */
+ if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) &&
+ (mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
+ if (mod->Vpp1 != mod->Vpp2)
+ return CS_BAD_VPP;
+ c->Vpp1 = c->Vpp2 = s->socket.Vpp = mod->Vpp1;
+ if (s->ss_entry(s->sock, SS_SetSocket, &s->socket))
+ return CS_BAD_VPP;
+ } else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) ||
+ (mod->Attributes & CONF_VPP2_CHANGE_VALID))
+ return CS_BAD_VPP;
+
+ return CS_SUCCESS;
+} /* modify_configuration */
+
+/*======================================================================
+
+ Modify the attributes of a window returned by RequestWindow.
+
+======================================================================*/
+
+static int modify_window(window_handle_t win, modwin_t *req)
+{
+ if ((win == NULL) || (win->magic != WINDOW_MAGIC))
+ return CS_BAD_HANDLE;
+
+ win->ctl.flags &= ~(MAP_ATTRIB|MAP_ACTIVE);
+ if (req->Attributes & WIN_MEMORY_TYPE)
+ win->ctl.flags |= MAP_ATTRIB;
+ if (req->Attributes & WIN_ENABLE)
+ win->ctl.flags |= MAP_ACTIVE;
+ if (req->Attributes & WIN_DATA_WIDTH_16)
+ win->ctl.flags |= MAP_16BIT;
+ if (req->Attributes & WIN_USE_WAIT)
+ win->ctl.flags |= MAP_USE_WAIT;
+ win->ctl.speed = req->AccessSpeed;
+ win->sock->ss_entry(win->sock->sock, SS_SetMemMap, &win->ctl);
+
+ return CS_SUCCESS;
+} /* modify_window */
+
+/*======================================================================
+
+ Register_client() uses the dev_info_t handle to match the
+ caller with a socket. The driver must have already been bound
+ to a socket with bind_device() -- in fact, bind_device()
+ allocates the client structure that will be used.
+
+======================================================================*/
+
+static int register_client(client_handle_t *handle, client_reg_t *req)
+{
+ client_t *client;
+ socket_info_t *s;
+ socket_t ns;
+
+ /* Look for unbound client with matching dev_info */
+ client = NULL;
+ for (ns = 0; ns < sockets; ns++) {
+ client = socket_table[ns]->clients;
+ while (client != NULL) {
+ if ((strcmp(client->dev_info, (char *)req->dev_info) == 0)
+ && (client->state & CLIENT_UNBOUND)) break;
+ client = client->next;
+ }
+ if (client != NULL) break;
+ }
+ if (client == NULL)
+ return CS_OUT_OF_RESOURCE;
+
+ s = socket_table[ns];
+ if (++s->real_clients == 1) {
+ ss_callback_t call;
+ int status;
+ call.handler = &parse_events;
+ call.info = s;
+ s->ss_entry(ns, SS_RegisterCallback, &call);
+ s->ss_entry(ns, SS_GetStatus, &status);
+ if ((status & SS_DETECT) &&
+ !(s->state & SOCKET_SETUP_PENDING)) {
+ s->state |= SOCKET_SETUP_PENDING;
+ setup_socket(ns);
+ }
+ }
+
+ *handle = client;
+ client->state &= ~CLIENT_UNBOUND;
+ client->Socket = ns;
+ client->Attributes = req->Attributes;
+ client->EventMask = req->EventMask;
+ client->event_handler = req->event_handler;
+ client->event_callback_args = req->event_callback_args;
+ client->event_callback_args.client_handle = client;
+ client->event_callback_args.bus = s->cap.bus;
+
+ if (s->state & SOCKET_CARDBUS)
+ client->state |= CLIENT_CARDBUS;
+
+ if ((!(s->state & SOCKET_CARDBUS)) && (s->functions == 0) &&
+ (client->Function != BIND_FN_ALL)) {
+ cistpl_longlink_mfc_t mfc;
+ if (read_tuple(client, CISTPL_LONGLINK_MFC, &mfc)
+ == CS_SUCCESS)
+ s->functions = mfc.nfn;
+ else
+ s->functions = 1;
+ s->config = kmalloc(sizeof(config_t) * s->functions,
+ GFP_KERNEL);
+ memset(s->config, 0, sizeof(config_t) * s->functions);
+ }
+
+ DEBUG(1, "cs: register_client(): client 0x%p, sock %d, dev %s\n",
+ client, client->Socket, client->dev_info);
+ if (client->EventMask & CS_EVENT_REGISTRATION_COMPLETE)
+ EVENT(client, CS_EVENT_REGISTRATION_COMPLETE, CS_EVENT_PRI_LOW);
+ if ((socket_table[ns]->state & SOCKET_PRESENT) &&
+ !(socket_table[ns]->state & SOCKET_SETUP_PENDING)) {
+ if (client->EventMask & CS_EVENT_CARD_INSERTION)
+ EVENT(client, CS_EVENT_CARD_INSERTION, CS_EVENT_PRI_LOW);
+ else
+ client->PendingEvents |= CS_EVENT_CARD_INSERTION;
+ }
+ return CS_SUCCESS;
+} /* register_client */
+
+/*====================================================================*/
+
+static int release_configuration(client_handle_t handle,
+ config_req_t *req)
+{
+ socket_info_t *s;
+ pccard_io_map io = { 0, 0, 0, 0, 1 };
+ int i;
+
+ if (CHECK_HANDLE(handle) ||
+ !(handle->state & CLIENT_CONFIG_LOCKED))
+ return CS_BAD_HANDLE;
+ handle->state &= ~CLIENT_CONFIG_LOCKED;
+ s = SOCKET(handle);
+
+#ifdef CONFIG_CARDBUS
+ if (handle->state & CLIENT_CARDBUS) {
+ cb_disable(s);
+ s->lock_count = 0;
+ return CS_SUCCESS;
+ }
+#endif
+
+ if (!(handle->state & CLIENT_STALE)) {
+ config_t *c = CONFIG(handle);
+ if (--(s->lock_count) == 0) {
+ s->socket.flags = SS_OUTPUT_ENA;
+ s->socket.Vpp = 0;
+ s->socket.io_irq = 0;
+ s->ss_entry(s->sock, SS_SetSocket, &s->socket);
+ }
+ if (c->state & CONFIG_IO_REQ)
+ for (i = 0; i < MAX_IO_WIN; i++) {
+ if (s->io[i].NumPorts == 0)
+ continue;
+ s->io[i].Config--;
+ if (s->io[i].Config != 0)
+ continue;
+ io.map = i;
+ s->ss_entry(s->sock, SS_SetIOMap, &io);
+ }
+ c->state &= ~CONFIG_LOCKED;
+ }
+
+ return CS_SUCCESS;
+} /* release_configuration */
+
+/*======================================================================
+
+ Release_io() releases the I/O ranges allocated by a client. This
+ may be invoked some time after a card ejection has already dumped
+ the actual socket configuration, so if the client is "stale", we
+ don't bother checking the port ranges against the current socket
+ values.
+
+======================================================================*/
+
+static int release_io(client_handle_t handle, io_req_t *req)
+{
+ socket_info_t *s;
+
+ if (CHECK_HANDLE(handle) || !(handle->state & CLIENT_IO_REQ))
+ return CS_BAD_HANDLE;
+ handle->state &= ~CLIENT_IO_REQ;
+ s = SOCKET(handle);
+
+#ifdef CONFIG_CARDBUS
+ if (handle->state & CLIENT_CARDBUS) {
+ cb_release(s);
+ return CS_SUCCESS;
+ }
+#endif
+
+ if (!(handle->state & CLIENT_STALE)) {
+ config_t *c = CONFIG(handle);
+ if (c->state & CONFIG_LOCKED)
+ return CS_CONFIGURATION_LOCKED;
+ if ((c->io.BasePort1 != req->BasePort1) ||
+ (c->io.NumPorts1 != req->NumPorts1) ||
+ (c->io.BasePort2 != req->BasePort2) ||
+ (c->io.NumPorts2 != req->NumPorts2))
+ return CS_BAD_ARGS;
+ c->state &= ~CONFIG_IO_REQ;
+ }
+
+ release_io_space(s, req->BasePort1, req->NumPorts1);
+ if (req->NumPorts2)
+ release_io_space(s, req->BasePort2, req->NumPorts2);
+
+ return CS_SUCCESS;
+} /* release_io */
+
+/*====================================================================*/
+
+static int cs_release_irq(client_handle_t handle, irq_req_t *req)
+{
+ socket_info_t *s;
+ if (CHECK_HANDLE(handle) || !(handle->state & CLIENT_IRQ_REQ))
+ return CS_BAD_HANDLE;
+ handle->state &= ~CLIENT_IRQ_REQ;
+ s = SOCKET(handle);
+
+ if (!(handle->state & CLIENT_STALE)) {
+ config_t *c = CONFIG(handle);
+ if (c->state & CONFIG_LOCKED)
+ return CS_CONFIGURATION_LOCKED;
+ if (c->irq.Attributes != req->Attributes)
+ return CS_BAD_ATTRIBUTE;
+ if (s->irq.AssignedIRQ != req->AssignedIRQ)
+ return CS_BAD_IRQ;
+ if (--s->irq.Config == 0) {
+ c->state &= ~CONFIG_IRQ_REQ;
+ s->irq.AssignedIRQ = 0;
+ }
+ }
+
+ if (req->Attributes & IRQ_HANDLE_PRESENT) {
+ bus_free_irq(s->cap.bus, req->AssignedIRQ, req->Instance);
+ }
+
+#ifdef CONFIG_ISA
+ if (req->AssignedIRQ != s->cap.pci_irq)
+ undo_irq(req->Attributes, req->AssignedIRQ);
+#endif
+
+ return CS_SUCCESS;
+} /* cs_release_irq */
+
+/*====================================================================*/
+
+static int release_window(window_handle_t win)
+{
+ socket_info_t *s;
+
+ if ((win == NULL) || (win->magic != WINDOW_MAGIC))
+ return CS_BAD_HANDLE;
+ s = win->sock;
+ if (!(win->handle->state & CLIENT_WIN_REQ(win->index)))
+ return CS_BAD_HANDLE;
+
+ /* Shut down memory window */
+ win->ctl.flags &= ~MAP_ACTIVE;
+ s->ss_entry(s->sock, SS_SetMemMap, &win->ctl);
+ s->state &= ~SOCKET_WIN_REQ(win->index);
+
+ /* Release system memory */
+ release_mem_region(win->base, win->size);
+ win->handle->state &= ~CLIENT_WIN_REQ(win->index);
+
+ win->magic = 0;
+
+ return CS_SUCCESS;
+} /* release_window */
+
+/*====================================================================*/
+
+static int request_configuration(client_handle_t handle,
+ config_req_t *req)
+{
+ int i;
+ u_int base;
+ socket_info_t *s;
+ config_t *c;
+ pccard_io_map iomap;
+
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ i = handle->Socket; s = socket_table[i];
+ if (!(s->state & SOCKET_PRESENT))
+ return CS_NO_CARD;
+
+#ifdef CONFIG_CARDBUS
+ if (handle->state & CLIENT_CARDBUS) {
+ if (!(req->IntType & INT_CARDBUS))
+ return CS_UNSUPPORTED_MODE;
+ if (s->lock_count != 0)
+ return CS_CONFIGURATION_LOCKED;
+ cb_enable(s);
+ handle->state |= CLIENT_CONFIG_LOCKED;
+ s->lock_count++;
+ return CS_SUCCESS;
+ }
+#endif
+
+ if (req->IntType & INT_CARDBUS)
+ return CS_UNSUPPORTED_MODE;
+ c = CONFIG(handle);
+ if (c->state & CONFIG_LOCKED)
+ return CS_CONFIGURATION_LOCKED;
+
+ /* Do power control. We don't allow changes in Vcc. */
+ if (s->socket.Vcc != req->Vcc)
+ printk(KERN_DEBUG "cs: ignoring requested Vcc\n");
+ if (req->Vpp1 != req->Vpp2)
+ return CS_BAD_VPP;
+ s->socket.Vpp = req->Vpp1;
+ if (s->ss_entry(s->sock, SS_SetSocket, &s->socket))
+ return CS_BAD_VPP;
+
+ c->Vcc = req->Vcc; c->Vpp1 = c->Vpp2 = req->Vpp1;
+
+ /* Pick memory or I/O card, DMA mode, interrupt */
+ c->IntType = req->IntType;
+ c->Attributes = req->Attributes;
+ if (req->IntType & INT_MEMORY_AND_IO)
+ s->socket.flags |= SS_IOCARD;
+ if (req->IntType & INT_ZOOMED_VIDEO)
+ s->socket.flags |= SS_ZVCARD;
+ if (req->Attributes & CONF_ENABLE_DMA)
+ s->socket.flags |= SS_DMA_MODE;
+ if (req->Attributes & CONF_ENABLE_SPKR)
+ s->socket.flags |= SS_SPKR_ENA;
+ if (req->Attributes & CONF_ENABLE_IRQ)
+ s->socket.io_irq = s->irq.AssignedIRQ;
+ else
+ s->socket.io_irq = 0;
+ s->ss_entry(s->sock, SS_SetSocket, &s->socket);
+ s->lock_count++;
+
+ /* Set up CIS configuration registers */
+ base = c->ConfigBase = req->ConfigBase;
+ c->Present = c->CardValues = req->Present;
+ if (req->Present & PRESENT_COPY) {
+ c->Copy = req->Copy;
+ write_cis_mem(s, 1, (base + CISREG_SCR)>>1, 1, &c->Copy);
+ }
+ if (req->Present & PRESENT_OPTION) {
+ if (s->functions == 1) {
+ c->Option = req->ConfigIndex & COR_CONFIG_MASK;
+ } else {
+ c->Option = req->ConfigIndex & COR_MFC_CONFIG_MASK;
+ c->Option |= COR_FUNC_ENA|COR_IREQ_ENA;
+ if (req->Present & PRESENT_IOBASE_0)
+ c->Option |= COR_ADDR_DECODE;
+ }
+ if (c->state & CONFIG_IRQ_REQ)
+ if (!(c->irq.Attributes & IRQ_FORCED_PULSE))
+ c->Option |= COR_LEVEL_REQ;
+ write_cis_mem(s, 1, (base + CISREG_COR)>>1, 1, &c->Option);
+ mdelay(40);
+ }
+ if (req->Present & PRESENT_STATUS) {
+ c->Status = req->Status;
+ write_cis_mem(s, 1, (base + CISREG_CCSR)>>1, 1, &c->Status);
+ }
+ if (req->Present & PRESENT_PIN_REPLACE) {
+ c->Pin = req->Pin;
+ write_cis_mem(s, 1, (base + CISREG_PRR)>>1, 1, &c->Pin);
+ }
+ if (req->Present & PRESENT_EXT_STATUS) {
+ c->ExtStatus = req->ExtStatus;
+ write_cis_mem(s, 1, (base + CISREG_ESR)>>1, 1, &c->ExtStatus);
+ }
+ if (req->Present & PRESENT_IOBASE_0) {
+ u_char b = c->io.BasePort1 & 0xff;
+ write_cis_mem(s, 1, (base + CISREG_IOBASE_0)>>1, 1, &b);
+ b = (c->io.BasePort1 >> 8) & 0xff;
+ write_cis_mem(s, 1, (base + CISREG_IOBASE_1)>>1, 1, &b);
+ }
+ if (req->Present & PRESENT_IOSIZE) {
+ u_char b = c->io.NumPorts1 + c->io.NumPorts2 - 1;
+ write_cis_mem(s, 1, (base + CISREG_IOSIZE)>>1, 1, &b);
+ }
+
+ /* Configure I/O windows */
+ if (c->state & CONFIG_IO_REQ) {
+ iomap.speed = io_speed;
+ for (i = 0; i < MAX_IO_WIN; i++)
+ if (s->io[i].NumPorts != 0) {
+ iomap.map = i;
+ iomap.flags = MAP_ACTIVE;
+ switch (s->io[i].Attributes & IO_DATA_PATH_WIDTH) {
+ case IO_DATA_PATH_WIDTH_16:
+ iomap.flags |= MAP_16BIT; break;
+ case IO_DATA_PATH_WIDTH_AUTO:
+ iomap.flags |= MAP_AUTOSZ; break;
+ default:
+ break;
+ }
+ iomap.start = s->io[i].BasePort;
+ iomap.stop = iomap.start + s->io[i].NumPorts - 1;
+ s->ss_entry(s->sock, SS_SetIOMap, &iomap);
+ s->io[i].Config++;
+ }
+ }
+
+ c->state |= CONFIG_LOCKED;
+ handle->state |= CLIENT_CONFIG_LOCKED;
+ return CS_SUCCESS;
+} /* request_configuration */
+
+/*======================================================================
+
+ Request_io() reserves ranges of port addresses for a socket.
+ I have not implemented range sharing or alias addressing.
+
+======================================================================*/
+
+static int request_io(client_handle_t handle, io_req_t *req)
+{
+ socket_info_t *s;
+ config_t *c;
+
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ s = SOCKET(handle);
+ if (!(s->state & SOCKET_PRESENT))
+ return CS_NO_CARD;
+
+ if (handle->state & CLIENT_CARDBUS) {
+#ifdef CONFIG_CARDBUS
+ int ret = cb_config(s);
+ if (ret == CS_SUCCESS)
+ handle->state |= CLIENT_IO_REQ;
+ return ret;
+#else
+ return CS_UNSUPPORTED_FUNCTION;
+#endif
+ }
+
+ if (!req)
+ return CS_UNSUPPORTED_MODE;
+ c = CONFIG(handle);
+ if (c->state & CONFIG_LOCKED)
+ return CS_CONFIGURATION_LOCKED;
+ if (c->state & CONFIG_IO_REQ)
+ return CS_IN_USE;
+ if (req->Attributes1 & (IO_SHARED | IO_FORCE_ALIAS_ACCESS))
+ return CS_BAD_ATTRIBUTE;
+ if ((req->NumPorts2 > 0) &&
+ (req->Attributes2 & (IO_SHARED | IO_FORCE_ALIAS_ACCESS)))
+ return CS_BAD_ATTRIBUTE;
+
+ if (alloc_io_space(s, req->Attributes1, &req->BasePort1,
+ req->NumPorts1, req->IOAddrLines,
+ handle->dev_info))
+ return CS_IN_USE;
+
+ if (req->NumPorts2) {
+ if (alloc_io_space(s, req->Attributes2, &req->BasePort2,
+ req->NumPorts2, req->IOAddrLines,
+ handle->dev_info)) {
+ release_io_space(s, req->BasePort1, req->NumPorts1);
+ return CS_IN_USE;
+ }
+ }
+
+ c->io = *req;
+ c->state |= CONFIG_IO_REQ;
+ handle->state |= CLIENT_IO_REQ;
+ return CS_SUCCESS;
+} /* request_io */
+
+/*======================================================================
+
+ Request_irq() reserves an irq for this client.
+
+ Also, since Linux only reserves irq's when they are actually
+ hooked, we don't guarantee that an irq will still be available
+ when the configuration is locked. Now that I think about it,
+ there might be a way to fix this using a dummy handler.
+
+======================================================================*/
+
+static int cs_request_irq(client_handle_t handle, irq_req_t *req)
+{
+ socket_info_t *s;
+ config_t *c;
+ int ret = CS_IN_USE, irq = 0;
+
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ s = SOCKET(handle);
+ if (!(s->state & SOCKET_PRESENT))
+ return CS_NO_CARD;
+ c = CONFIG(handle);
+ if (c->state & CONFIG_LOCKED)
+ return CS_CONFIGURATION_LOCKED;
+ if (c->state & CONFIG_IRQ_REQ)
+ return CS_IN_USE;
+
+#ifdef CONFIG_ISA
+ if (s->irq.AssignedIRQ != 0) {
+ /* If the interrupt is already assigned, it must match */
+ irq = s->irq.AssignedIRQ;
+ if (req->IRQInfo1 & IRQ_INFO2_VALID) {
+ u_int mask = req->IRQInfo2 & s->cap.irq_mask;
+ ret = ((mask >> irq) & 1) ? 0 : CS_BAD_ARGS;
+ } else
+ ret = ((req->IRQInfo1&IRQ_MASK) == irq) ? 0 : CS_BAD_ARGS;
+ } else {
+ if (req->IRQInfo1 & IRQ_INFO2_VALID) {
+ u_int try, mask = req->IRQInfo2 & s->cap.irq_mask;
+ for (try = 0; try < 2; try++) {
+ for (irq = 0; irq < 16; irq++)
+ if ((mask >> irq) & 1) {
+ ret = try_irq(req->Attributes, irq, try);
+ if (ret == 0) break;
+ }
+ if (ret == 0) break;
+ }
+ } else {
+ irq = req->IRQInfo1 & IRQ_MASK;
+ ret = try_irq(req->Attributes, irq, 1);
+ }
+ }
+#endif
+ if (ret != 0) {
+ if (!s->cap.pci_irq)
+ return ret;
+ irq = s->cap.pci_irq;
+ }
+
+ if (req->Attributes & IRQ_HANDLE_PRESENT) {
+ if (bus_request_irq(s->cap.bus, irq, req->Handler,
+ ((req->Attributes & IRQ_TYPE_DYNAMIC_SHARING) ||
+ (s->functions > 1) ||
+ (irq == s->cap.pci_irq)) ? SA_SHIRQ : 0,
+ handle->dev_info, req->Instance))
+ return CS_IN_USE;
+ }
+
+ c->irq.Attributes = req->Attributes;
+ s->irq.AssignedIRQ = req->AssignedIRQ = irq;
+ s->irq.Config++;
+
+ c->state |= CONFIG_IRQ_REQ;
+ handle->state |= CLIENT_IRQ_REQ;
+ return CS_SUCCESS;
+} /* cs_request_irq */
+
+/*======================================================================
+
+ Request_window() establishes a mapping between card memory space
+ and system memory space.
+
+======================================================================*/
+
+static int request_window(client_handle_t *handle, win_req_t *req)
+{
+ socket_info_t *s;
+ window_t *win;
+ u_long align;
+ int w;
+
+ if (CHECK_HANDLE(*handle))
+ return CS_BAD_HANDLE;
+ s = SOCKET(*handle);
+ if (!(s->state & SOCKET_PRESENT))
+ return CS_NO_CARD;
+ if (req->Attributes & (WIN_PAGED | WIN_SHARED))
+ return CS_BAD_ATTRIBUTE;
+
+ /* Window size defaults to smallest available */
+ if (req->Size == 0)
+ req->Size = s->cap.map_size;
+ align = (((s->cap.features & SS_CAP_MEM_ALIGN) ||
+ (req->Attributes & WIN_STRICT_ALIGN)) ?
+ req->Size : s->cap.map_size);
+ if (req->Size & (s->cap.map_size-1))
+ return CS_BAD_SIZE;
+ if ((req->Base && (s->cap.features & SS_CAP_STATIC_MAP)) ||
+ (req->Base & (align-1)))
+ return CS_BAD_BASE;
+ if (req->Base)
+ align = 0;
+
+ /* Allocate system memory window */
+ for (w = 0; w < MAX_WIN; w++)
+ if (!(s->state & SOCKET_WIN_REQ(w))) break;
+ if (w == MAX_WIN)
+ return CS_OUT_OF_RESOURCE;
+
+ win = &s->win[w];
+ win->magic = WINDOW_MAGIC;
+ win->index = w;
+ win->handle = *handle;
+ win->sock = s;
+ win->base = req->Base;
+ win->size = req->Size;
+
+ if (!(s->cap.features & SS_CAP_STATIC_MAP) &&
+ find_mem_region(&win->base, win->size, align,
+ (req->Attributes & WIN_MAP_BELOW_1MB) ||
+ !(s->cap.features & SS_CAP_PAGE_REGS),
+ (*handle)->dev_info))
+ return CS_IN_USE;
+ (*handle)->state |= CLIENT_WIN_REQ(w);
+
+ /* Configure the socket controller */
+ win->ctl.map = w+1;
+ win->ctl.flags = 0;
+ win->ctl.speed = req->AccessSpeed;
+ if (req->Attributes & WIN_MEMORY_TYPE)
+ win->ctl.flags |= MAP_ATTRIB;
+ if (req->Attributes & WIN_ENABLE)
+ win->ctl.flags |= MAP_ACTIVE;
+ if (req->Attributes & WIN_DATA_WIDTH_16)
+ win->ctl.flags |= MAP_16BIT;
+ if (req->Attributes & WIN_USE_WAIT)
+ win->ctl.flags |= MAP_USE_WAIT;
+ win->ctl.sys_start = win->base;
+ win->ctl.sys_stop = win->base + win->size-1;
+ win->ctl.card_start = 0;
+ if (s->ss_entry(s->sock, SS_SetMemMap, &win->ctl) != 0)
+ return CS_BAD_ARGS;
+ s->state |= SOCKET_WIN_REQ(w);
+
+ /* Return window handle */
+ req->Base = win->ctl.sys_start;
+ *handle = (client_handle_t)win;
+
+ return CS_SUCCESS;
+} /* request_window */
+
+/*======================================================================
+
+ I'm not sure which "reset" function this is supposed to use,
+ but for now, it uses the low-level interface's reset, not the
+ CIS register.
+
+======================================================================*/
+
+static int reset_card(client_handle_t handle, client_req_t *req)
+{
+ int i, ret;
+ socket_info_t *s;
+
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ i = handle->Socket; s = socket_table[i];
+ if (!(s->state & SOCKET_PRESENT))
+ return CS_NO_CARD;
+ if (s->state & SOCKET_RESET_PENDING)
+ return CS_IN_USE;
+ s->state |= SOCKET_RESET_PENDING;
+
+ ret = send_event(s, CS_EVENT_RESET_REQUEST, CS_EVENT_PRI_LOW);
+ if (ret != 0) {
+ s->state &= ~SOCKET_RESET_PENDING;
+ handle->event_callback_args.info = (void *)(u_long)ret;
+ EVENT(handle, CS_EVENT_RESET_COMPLETE, CS_EVENT_PRI_LOW);
+ } else {
+ DEBUG(1, "cs: resetting socket %d\n", i);
+ send_event(s, CS_EVENT_RESET_PHYSICAL, CS_EVENT_PRI_LOW);
+ s->reset_handle = handle;
+ reset_socket(i);
+ }
+ return CS_SUCCESS;
+} /* reset_card */
+
+/*======================================================================
+
+ These shut down or wake up a socket. They are sort of user
+ initiated versions of the APM suspend and resume actions.
+
+======================================================================*/
+
+static int suspend_card(client_handle_t handle, client_req_t *req)
+{
+ int i;
+ socket_info_t *s;
+
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ i = handle->Socket; s = socket_table[i];
+ if (!(s->state & SOCKET_PRESENT))
+ return CS_NO_CARD;
+ if (s->state & SOCKET_SUSPEND)
+ return CS_IN_USE;
+
+ DEBUG(1, "cs: suspending socket %d\n", i);
+ send_event(s, CS_EVENT_PM_SUSPEND, CS_EVENT_PRI_LOW);
+ s->ss_entry(s->sock, SS_SetSocket, &dead_socket);
+ s->state |= SOCKET_SUSPEND;
+
+ return CS_SUCCESS;
+} /* suspend_card */
+
+static int resume_card(client_handle_t handle, client_req_t *req)
+{
+ int i;
+ socket_info_t *s;
+
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ i = handle->Socket; s = socket_table[i];
+ if (!(s->state & SOCKET_PRESENT))
+ return CS_NO_CARD;
+ if (!(s->state & SOCKET_SUSPEND))
+ return CS_IN_USE;
+
+ DEBUG(1, "cs: waking up socket %d\n", i);
+ setup_socket(i);
+
+ return CS_SUCCESS;
+} /* resume_card */
+
+/*======================================================================
+
+ These handle user requests to eject or insert a card.
+
+======================================================================*/
+
+static int eject_card(client_handle_t handle, client_req_t *req)
+{
+ int i, ret;
+ socket_info_t *s;
+ u_long flags;
+
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ i = handle->Socket; s = socket_table[i];
+ if (!(s->state & SOCKET_PRESENT))
+ return CS_NO_CARD;
+
+ DEBUG(1, "cs: user eject request on socket %d\n", i);
+
+ ret = send_event(s, CS_EVENT_EJECTION_REQUEST, CS_EVENT_PRI_LOW);
+ if (ret != 0)
+ return ret;
+
+ spin_lock_irqsave(&s->lock, flags);
+ do_shutdown(s);
+ spin_unlock_irqrestore(&s->lock, flags);
+
+ return CS_SUCCESS;
+
+} /* eject_card */
+
+static int insert_card(client_handle_t handle, client_req_t *req)
+{
+ int i, status;
+ socket_info_t *s;
+ u_long flags;
+
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ i = handle->Socket; s = socket_table[i];
+ if (s->state & SOCKET_PRESENT)
+ return CS_IN_USE;
+
+ DEBUG(1, "cs: user insert request on socket %d\n", i);
+
+ spin_lock_irqsave(&s->lock, flags);
+ if (!(s->state & SOCKET_SETUP_PENDING)) {
+ s->state |= SOCKET_SETUP_PENDING;
+ spin_unlock_irqrestore(&s->lock, flags);
+ s->ss_entry(i, SS_GetStatus, &status);
+ if (status & SS_DETECT)
+ setup_socket(i);
+ else {
+ s->state &= ~SOCKET_SETUP_PENDING;
+ return CS_NO_CARD;
+ }
+ } else
+ spin_unlock_irqrestore(&s->lock, flags);
+
+ return CS_SUCCESS;
+} /* insert_card */
+
+/*======================================================================
+
+ Maybe this should send a CS_EVENT_CARD_INSERTION event if we
+ haven't sent one to this client yet?
+
+======================================================================*/
+
+static int set_event_mask(client_handle_t handle, eventmask_t *mask)
+{
+ u_int events, bit;
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+ if (handle->Attributes & CONF_EVENT_MASK_VALID)
+ return CS_BAD_SOCKET;
+ handle->EventMask = mask->EventMask;
+ events = handle->PendingEvents & handle->EventMask;
+ handle->PendingEvents -= events;
+ while (events != 0) {
+ bit = ((events ^ (events-1)) + 1) >> 1;
+ EVENT(handle, bit, CS_EVENT_PRI_LOW);
+ events -= bit;
+ }
+ return CS_SUCCESS;
+} /* set_event_mask */
+
+/*====================================================================*/
+
+static int report_error(client_handle_t handle, error_info_t *err)
+{
+ int i;
+ char *serv;
+
+ if (CHECK_HANDLE(handle))
+ printk(KERN_NOTICE);
+ else
+ printk(KERN_NOTICE "%s: ", handle->dev_info);
+
+ for (i = 0; i < SERVICE_COUNT; i++)
+ if (service_table[i].key == err->func) break;
+ if (i < SERVICE_COUNT)
+ serv = service_table[i].msg;
+ else
+ serv = "Unknown service number";
+
+ for (i = 0; i < ERROR_COUNT; i++)
+ if (error_table[i].key == err->retcode) break;
+ if (i < ERROR_COUNT)
+ printk("%s: %s\n", serv, error_table[i].msg);
+ else
+ printk("%s: Unknown error code %#x\n", serv, err->retcode);
+
+ return CS_SUCCESS;
+} /* report_error */
+
+/*====================================================================*/
+
+int CardServices(int func, void *a1, void *a2, void *a3)
+{
+
+#ifdef PCMCIA_DEBUG
+ if (pc_debug > 2) {
+ int i;
+ for (i = 0; i < SERVICE_COUNT; i++)
+ if (service_table[i].key == func) break;
+ if (i < SERVICE_COUNT)
+ printk(KERN_DEBUG "cs: CardServices(%s, 0x%p, 0x%p)\n",
+ service_table[i].msg, a1, a2);
+ else
+ printk(KERN_DEBUG "cs: CardServices(Unknown func %d, "
+ "0x%p, 0x%p)\n", func, a1, a2);
+ }
+#endif
+ switch (func) {
+ case AccessConfigurationRegister:
+ return access_configuration_register(a1, a2); break;
+ case AdjustResourceInfo:
+ return adjust_resource_info(a1, a2); break;
+ case CheckEraseQueue:
+ return check_erase_queue(a1); break;
+ case CloseMemory:
+ return close_memory(a1); break;
+ case CopyMemory:
+ return copy_memory(a1, a2); break;
+ case DeregisterClient:
+ return deregister_client(a1); break;
+ case DeregisterEraseQueue:
+ return deregister_erase_queue(a1); break;
+ case GetFirstClient:
+ return get_first_client(a1, a2); break;
+ case GetCardServicesInfo:
+ return get_card_services_info(a1); break;
+ case GetConfigurationInfo:
+ return get_configuration_info(a1, a2); break;
+ case GetNextClient:
+ return get_next_client(a1, a2); break;
+ case GetFirstRegion:
+ return get_first_region(a1, a2); break;
+ case GetFirstTuple:
+ return get_first_tuple(a1, a2); break;
+ case GetNextRegion:
+ return get_next_region(a1, a2); break;
+ case GetNextTuple:
+ return get_next_tuple(a1, a2); break;
+ case GetStatus:
+ return cs_get_status(a1, a2); break;
+ case GetTupleData:
+ return get_tuple_data(a1, a2); break;
+ case MapMemPage:
+ return map_mem_page(a1, a2); break;
+ case ModifyConfiguration:
+ return modify_configuration(a1, a2); break;
+ case ModifyWindow:
+ return modify_window(a1, a2); break;
+ case OpenMemory:
+ return open_memory(a1, a2);
+ case ParseTuple:
+ return parse_tuple(a1, a2, a3); break;
+ case ReadMemory:
+ return read_memory(a1, a2, a3); break;
+ case RegisterClient:
+ return register_client(a1, a2); break;
+ case RegisterEraseQueue:
+ return register_erase_queue(a1, a2); break;
+ case RegisterMTD:
+ return register_mtd(a1, a2); break;
+ case ReleaseConfiguration:
+ return release_configuration(a1, a2); break;
+ case ReleaseIO:
+ return release_io(a1, a2); break;
+ case ReleaseIRQ:
+ return cs_release_irq(a1, a2); break;
+ case ReleaseWindow:
+ return release_window(a1); break;
+ case RequestConfiguration:
+ return request_configuration(a1, a2); break;
+ case RequestIO:
+ return request_io(a1, a2); break;
+ case RequestIRQ:
+ return cs_request_irq(a1, a2); break;
+ case RequestWindow:
+ return request_window(a1, a2); break;
+ case ResetCard:
+ return reset_card(a1, a2); break;
+ case SetEventMask:
+ return set_event_mask(a1, a2); break;
+ case ValidateCIS:
+ return validate_cis(a1, a2); break;
+ case WriteMemory:
+ return write_memory(a1, a2, a3); break;
+ case BindDevice:
+ return bind_device(a1); break;
+ case BindMTD:
+ return bind_mtd(a1); break;
+ case ReportError:
+ return report_error(a1, a2); break;
+ case SuspendCard:
+ return suspend_card(a1, a2); break;
+ case ResumeCard:
+ return resume_card(a1, a2); break;
+ case EjectCard:
+ return eject_card(a1, a2); break;
+ case InsertCard:
+ return insert_card(a1, a2); break;
+ case ReplaceCIS:
+ return replace_cis(a1, a2); break;
+ case GetFirstWindow:
+ return get_first_window(a1, a2); break;
+ case GetNextWindow:
+ return get_next_window(a1, a2); break;
+ case GetMemPage:
+ return get_mem_page(a1, a2); break;
+ default:
+ return CS_UNSUPPORTED_FUNCTION; break;
+ }
+
+} /* CardServices */
+
+/*======================================================================
+
+ OS-specific module glue goes here
+
+======================================================================*/
+
+#include <linux/pci.h>
+
+#if (LINUX_VERSION_CODE <= VERSION(2,1,17))
+
+#undef CONFIG_MODVERSIONS
+static struct symbol_table cs_symtab = {
+#include <linux/symtab_begin.h>
+#undef X
+#define X(sym) { (void *)&sym, SYMBOL_NAME_STR(sym) }
+ X(register_ss_entry),
+ X(unregister_ss_entry),
+ X(CardServices),
+ X(MTDHelperEntry),
+#ifdef HAS_PROC_BUS
+ X(proc_pccard),
+#endif
+#ifndef HAVE_MEMRESERVE
+ X(request_mem_region),
+ X(release_mem_region),
+#endif
+#ifdef CONFIG_PNP_BIOS
+ X(check_pnp_irq),
+#endif
+#ifdef CONFIG_PCI
+ X(pci_irq_mask),
+ X(pci_devices),
+ X(pci_root),
+ X(pci_find_slot),
+ X(pci_find_class),
+ X(pci_enable_device),
+ X(pci_set_power_state),
+#endif
+#include <linux/symtab_end.h>
+};
+
+#else
+
+EXPORT_SYMBOL(register_ss_entry);
+EXPORT_SYMBOL(unregister_ss_entry);
+EXPORT_SYMBOL(CardServices);
+EXPORT_SYMBOL(MTDHelperEntry);
+#ifdef HAS_PROC_BUS
+EXPORT_SYMBOL(proc_pccard);
+#endif
+#ifndef HAVE_MEMRESERVE
+EXPORT_SYMBOL(request_mem_region);
+EXPORT_SYMBOL(release_mem_region);
+#endif
+#ifdef CONFIG_PNP_BIOS
+EXPORT_SYMBOL(check_pnp_irq);
+#endif
+#ifdef CONFIG_PCI
+EXPORT_SYMBOL(pci_irq_mask);
+#if (LINUX_VERSION_CODE < VERSION(2,3,24))
+EXPORT_SYMBOL(pci_enable_device);
+EXPORT_SYMBOL(pci_set_power_state);
+#endif
+#endif
+
+#endif
+
+int __init init_pcmcia_cs(void)
+{
+ printk(KERN_INFO "%s\n", release);
+#ifdef UTS_RELEASE
+ printk(KERN_INFO " %s\n", kernel);
+#endif
+ printk(KERN_INFO " %s\n", options);
+ DEBUG(0, "%s\n", version);
+#ifdef CONFIG_PM
+ if (do_apm)
+ pm_register(PM_SYS_DEV, PM_SYS_PCMCIA, handle_pm_event);
+#endif
+#ifdef CONFIG_PCI
+ pci_fixup_init();
+#endif
+#ifdef CONFIG_PNP_BIOS
+ if (do_pnp) {
+ pnp_bios_init();
+ pnp_proc_init();
+ pnp_rsrc_init();
+ }
+#endif
+ register_symtab(&cs_symtab);
+#ifdef HAS_PROC_BUS
+ proc_pccard = proc_mkdir("pccard", proc_bus);
+#ifdef CONFIG_PNP_BIOS
+ if (proc_pccard) {
+ create_proc_read_entry("ioport", 0, proc_pccard,
+ proc_read_io, NULL);
+ create_proc_read_entry("irq", 0, proc_pccard,
+ proc_read_irq, NULL);
+ }
+#endif
+#ifndef HAVE_MEMRESERVE
+ if (proc_pccard)
+ create_proc_read_entry("memory", 0, proc_pccard,
+ proc_read_mem, NULL);
+#endif
+#endif
+ return 0;
+}
+
+static void __exit exit_pcmcia_cs(void)
+{
+ printk(KERN_INFO "unloading PCMCIA Card Services\n");
+#ifdef HAS_PROC_BUS
+ if (proc_pccard) {
+#ifdef CONFIG_PNP_BIOS
+ remove_proc_entry("ioport", proc_pccard);
+ remove_proc_entry("irq", proc_pccard);
+#endif
+#ifndef HAVE_MEMRESERVE
+ remove_proc_entry("memory", proc_pccard);
+#endif
+ remove_proc_entry("pccard", proc_bus);
+ }
+#endif
+#ifdef CONFIG_PM
+ if (do_apm)
+ pm_unregister_all(handle_pm_event);
+#endif
+#ifdef CONFIG_PCI
+ pci_fixup_done();
+#endif
+#ifdef CONFIG_PNP_BIOS
+ if (do_pnp) {
+ pnp_proc_done();
+ pnp_rsrc_done();
+ }
+#endif
+ release_resource_db();
+}
+
+module_init(init_pcmcia_cs);
+module_exit(exit_pcmcia_cs);
diff --git a/linux/pcmcia-cs/modules/cs_internal.h b/linux/pcmcia-cs/modules/cs_internal.h
new file mode 100644
index 0000000..c9f98f8
--- /dev/null
+++ b/linux/pcmcia-cs/modules/cs_internal.h
@@ -0,0 +1,300 @@
+/*
+ * cs_internal.h 1.58 2004/04/25 17:58:22
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_CS_INTERNAL_H
+#define _LINUX_CS_INTERNAL_H
+
+#include <linux/config.h>
+#include <linux/spinlock.h>
+
+typedef struct erase_busy_t {
+ eraseq_entry_t *erase;
+ client_handle_t client;
+ struct timer_list timeout;
+ struct erase_busy_t *prev, *next;
+} erase_busy_t;
+
+#define ERASEQ_MAGIC 0xFA67
+typedef struct eraseq_t {
+ u_short eraseq_magic;
+ client_handle_t handle;
+ int count;
+ eraseq_entry_t *entry;
+} eraseq_t;
+
+#define CLIENT_MAGIC 0x51E6
+typedef struct client_t {
+ u_short client_magic;
+ socket_t Socket;
+ u_char Function;
+ dev_info_t dev_info;
+ u_int Attributes;
+ u_int state;
+ event_t EventMask, PendingEvents;
+ int (*event_handler)(event_t event, int priority,
+ event_callback_args_t *);
+ event_callback_args_t event_callback_args;
+ struct client_t *next;
+ u_int mtd_count;
+ wait_queue_head_t mtd_req;
+ erase_busy_t erase_busy;
+} client_t;
+
+/* Flags in client state */
+#define CLIENT_CONFIG_LOCKED 0x0001
+#define CLIENT_IRQ_REQ 0x0002
+#define CLIENT_IO_REQ 0x0004
+#define CLIENT_UNBOUND 0x0008
+#define CLIENT_STALE 0x0010
+#define CLIENT_WIN_REQ(i) (0x20<<(i))
+#define CLIENT_CARDBUS 0x8000
+
+typedef struct io_window_t {
+ u_int Attributes;
+ ioaddr_t BasePort, NumPorts;
+ ioaddr_t InUse, Config;
+} io_window_t;
+
+#define WINDOW_MAGIC 0xB35C
+typedef struct window_t {
+ u_short magic;
+ u_short index;
+ client_handle_t handle;
+ struct socket_info_t *sock;
+ u_long base;
+ u_long size;
+ pccard_mem_map ctl;
+} window_t;
+
+#define REGION_MAGIC 0xE3C9
+typedef struct region_t {
+ u_short region_magic;
+ u_short state;
+ dev_info_t dev_info;
+ client_handle_t mtd;
+ u_int MediaID;
+ region_info_t info;
+} region_t;
+
+#define REGION_STALE 0x01
+
+/* Each card function gets one of these guys */
+typedef struct config_t {
+ u_int state;
+ u_int Attributes;
+ u_int Vcc, Vpp1, Vpp2;
+ u_int IntType;
+ u_int ConfigBase;
+ u_char Status, Pin, Copy, Option, ExtStatus;
+ u_int Present;
+ u_int CardValues;
+ io_req_t io;
+ struct {
+ u_int Attributes;
+ } irq;
+} config_t;
+
+/* Maximum number of IO windows per socket */
+#define MAX_IO_WIN 2
+
+/* Maximum number of memory windows per socket */
+#define MAX_WIN 4
+
+/* The size of the CIS cache */
+#define MAX_CIS_TABLE 64
+#define MAX_CIS_DATA 512
+
+typedef struct socket_info_t {
+#ifdef __SMP__
+ spinlock_t lock;
+#endif
+ ss_entry_t ss_entry;
+ u_int sock;
+ socket_state_t socket;
+ socket_cap_t cap;
+ u_int state;
+ u_short functions;
+ u_short lock_count;
+ client_handle_t clients;
+ u_int real_clients;
+ client_handle_t reset_handle;
+ struct timer_list setup, shutdown;
+ u_long setup_timeout;
+ pccard_mem_map cis_mem;
+ u_char *cis_virt;
+ config_t *config;
+#ifdef CONFIG_CARDBUS
+ u_int cb_cis_space;
+ cb_bridge_map cb_cis_map;
+ u_char *cb_cis_virt;
+ struct cb_config_t *cb_config;
+#endif
+ struct {
+ u_int AssignedIRQ;
+ u_int Config;
+ } irq;
+ io_window_t io[MAX_IO_WIN];
+ window_t win[MAX_WIN];
+ region_t *c_region, *a_region;
+ erase_busy_t erase_busy;
+ int cis_used;
+ struct {
+ u_int addr;
+ u_short len;
+ u_short attr;
+ } cis_table[MAX_CIS_TABLE];
+ char cis_cache[MAX_CIS_DATA];
+ u_int fake_cis_len;
+ char *fake_cis;
+#ifdef HAS_PROC_BUS
+ struct proc_dir_entry *proc;
+#endif
+} socket_info_t;
+
+/* Flags in config state */
+#define CONFIG_LOCKED 0x01
+#define CONFIG_IRQ_REQ 0x02
+#define CONFIG_IO_REQ 0x04
+
+/* Flags in socket state */
+#define SOCKET_PRESENT 0x0008
+#define SOCKET_SETUP_PENDING 0x0010
+#define SOCKET_SHUTDOWN_PENDING 0x0020
+#define SOCKET_RESET_PENDING 0x0040
+#define SOCKET_SUSPEND 0x0080
+#define SOCKET_WIN_REQ(i) (0x0100<<(i))
+#define SOCKET_IO_REQ(i) (0x1000<<(i))
+#define SOCKET_REGION_INFO 0x4000
+#define SOCKET_CARDBUS 0x8000
+
+#define CHECK_HANDLE(h) \
+ (((h) == NULL) || ((h)->client_magic != CLIENT_MAGIC))
+
+#define CHECK_SOCKET(s) \
+ (((s) >= sockets) || (socket_table[s]->ss_entry == NULL))
+
+#define SOCKET(h) (socket_table[(h)->Socket])
+#define CONFIG(h) (&SOCKET(h)->config[(h)->Function])
+
+#define CHECK_REGION(r) \
+ (((r) == NULL) || ((r)->region_magic != REGION_MAGIC))
+
+#define CHECK_ERASEQ(q) \
+ (((q) == NULL) || ((q)->eraseq_magic != ERASEQ_MAGIC))
+
+#define EVENT(h, e, p) \
+ ((h)->event_handler((e), (p), &(h)->event_callback_args))
+
+/* In cardbus.c */
+int cb_alloc(socket_info_t *s);
+void cb_free(socket_info_t *s);
+int cb_config(socket_info_t *s);
+void cb_release(socket_info_t *s);
+void cb_enable(socket_info_t *s);
+void cb_disable(socket_info_t *s);
+int read_cb_mem(socket_info_t *s, u_char fn, int space,
+ u_int addr, u_int len, void *ptr);
+void cb_release_cis_mem(socket_info_t *s);
+
+/* In cistpl.c */
+int read_cis_mem(socket_info_t *s, int attr,
+ u_int addr, u_int len, void *ptr);
+void write_cis_mem(socket_info_t *s, int attr,
+ u_int addr, u_int len, void *ptr);
+void release_cis_mem(socket_info_t *s);
+int verify_cis_cache(socket_info_t *s);
+void preload_cis_cache(socket_info_t *s);
+int get_first_tuple(client_handle_t handle, tuple_t *tuple);
+int get_next_tuple(client_handle_t handle, tuple_t *tuple);
+int get_tuple_data(client_handle_t handle, tuple_t *tuple);
+int parse_tuple(client_handle_t handle, tuple_t *tuple, cisparse_t *parse);
+int validate_cis(client_handle_t handle, cisinfo_t *info);
+int replace_cis(client_handle_t handle, cisdump_t *cis);
+int read_tuple(client_handle_t handle, cisdata_t code, void *parse);
+
+/* In bulkmem.c */
+void retry_erase_list(struct erase_busy_t *list, u_int cause);
+int get_first_region(client_handle_t handle, region_info_t *rgn);
+int get_next_region(client_handle_t handle, region_info_t *rgn);
+int register_mtd(client_handle_t handle, mtd_reg_t *reg);
+int register_erase_queue(client_handle_t *handle, eraseq_hdr_t *header);
+int deregister_erase_queue(eraseq_handle_t eraseq);
+int check_erase_queue(eraseq_handle_t eraseq);
+int open_memory(client_handle_t *handle, open_mem_t *open);
+int close_memory(memory_handle_t handle);
+int read_memory(memory_handle_t handle, mem_op_t *req, caddr_t buf);
+int write_memory(memory_handle_t handle, mem_op_t *req, caddr_t buf);
+int copy_memory(memory_handle_t handle, copy_op_t *req);
+
+/* In rsrc_mgr */
+void validate_mem(int (*is_valid)(u_long), int (*do_cksum)(u_long),
+ int force_low);
+int find_io_region(ioaddr_t *base, ioaddr_t num, ioaddr_t align,
+ char *name);
+int find_mem_region(u_long *base, u_long num, u_long align,
+ int force_low, char *name);
+int try_irq(u_int Attributes, int irq, int specific);
+void undo_irq(u_int Attributes, int irq);
+int adjust_resource_info(client_handle_t handle, adjust_t *adj);
+void release_resource_db(void);
+int proc_read_io(char *buf, char **start, off_t pos,
+ int count, int *eof, void *data);
+int proc_read_mem(char *buf, char **start, off_t pos,
+ int count, int *eof, void *data);
+
+/* in pnp components */
+int proc_read_irq(char *buf, char **start, off_t pos,
+ int count, int *eof, void *data);
+int check_pnp_irq(int n);
+void pnp_bios_init(void);
+void pnp_proc_init(void);
+void pnp_proc_done(void);
+void pnp_rsrc_init(void);
+void pnp_rsrc_done(void);
+
+/* in pci_fixup */
+void pci_fixup_init(void);
+void pci_fixup_done(void);
+
+#define MAX_SOCK 8
+extern socket_t sockets;
+extern socket_info_t *socket_table[MAX_SOCK];
+
+#ifdef HAS_PROC_BUS
+extern struct proc_dir_entry *proc_pccard;
+#endif
+
+#ifdef PCMCIA_DEBUG
+extern int pc_debug;
+#define DEBUG(n, args...) do { if (pc_debug>(n)) printk(KERN_DEBUG args); } while (0)
+#else
+#define DEBUG(n, args...) do { } while (0)
+#endif
+
+#endif /* _LINUX_CS_INTERNAL_H */
diff --git a/linux/pcmcia-cs/modules/ds.c b/linux/pcmcia-cs/modules/ds.c
new file mode 100644
index 0000000..f2f3341
--- /dev/null
+++ b/linux/pcmcia-cs/modules/ds.c
@@ -0,0 +1,1039 @@
+/*======================================================================
+
+ PC Card Driver Services
+
+ ds.c 1.115 2002/10/12 19:03:44
+
+ The contents of this file are subject to the Mozilla Public
+ License Version 1.1 (the "License"); you may not use this file
+ except in compliance with the License. You may obtain a copy of
+ the License at http://www.mozilla.org/MPL/
+
+ Software distributed under the License is distributed on an "AS
+ IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ implied. See the License for the specific language governing
+ rights and limitations under the License.
+
+ The initial developer of the original code is David A. Hinds
+ <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+
+ Alternatively, the contents of this file may be used under the
+ terms of the GNU General Public License version 2 (the "GPL"), in
+ which case the provisions of the GPL are applicable instead of the
+ above. If you wish to allow the use of your version of this file
+ only under the terms of the GPL and not to allow others to use
+ your version of this file under the MPL, indicate your decision
+ by deleting the provisions above and replace them with the notice
+ and other provisions required by the GPL. If you do not delete
+ the provisions above, a recipient may use your version of this
+ file under either the MPL or the GPL.
+
+======================================================================*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/fcntl.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/ioctl.h>
+#include <linux/proc_fs.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,1,23))
+#include <linux/poll.h>
+#endif
+
+/*
+ * <pcmcia/cs.h> defines io_req_t which is not used in this file, but
+ * which clashes with the io_req_t needed for the Mach devices. Rename
+ * it to cardservice_io_req_t to avoid this clash.
+ */
+#define io_req_t cardservice_io_req_t
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/bulkmem.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+#undef io_req_t
+
+/*====================================================================*/
+
+/* Module parameters */
+
+MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
+MODULE_DESCRIPTION("PCMCIA Driver Services " CS_RELEASE);
+MODULE_LICENSE("Dual MPL/GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; MODULE_PARM(n, "i")
+
+#ifdef PCMCIA_DEBUG
+INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
+static const char *version =
+"ds.c 1.115 2002/10/12 19:03:44 (David Hinds)";
+#else
+#define DEBUG(n, args...)
+#endif
+
+/*====================================================================*/
+
+typedef struct driver_info_t {
+ dev_info_t dev_info;
+ int use_count, status;
+ dev_link_t *(*attach)(void);
+ void (*detach)(dev_link_t *);
+ struct driver_info_t *next;
+} driver_info_t;
+
+typedef struct socket_bind_t {
+ driver_info_t *driver;
+ u_char function;
+ dev_link_t *instance;
+ struct socket_bind_t *next;
+} socket_bind_t;
+
+/* Device user information */
+#define MAX_EVENTS 32
+#define USER_MAGIC 0x7ea4
+#define CHECK_USER(u) \
+ (((u) == NULL) || ((u)->user_magic != USER_MAGIC))
+typedef struct user_info_t {
+ u_int user_magic;
+ int event_head, event_tail;
+ event_t event[MAX_EVENTS];
+ struct user_info_t *next;
+} user_info_t;
+
+/* Socket state information */
+typedef struct socket_info_t {
+ client_handle_t handle;
+ int state;
+ user_info_t *user;
+ int req_pending, req_result;
+ wait_queue_head_t queue, request;
+ struct timer_list removal;
+ socket_bind_t *bind;
+} socket_info_t;
+
+#define SOCKET_PRESENT 0x01
+#define SOCKET_BUSY 0x02
+#define SOCKET_REMOVAL_PENDING 0x10
+
+/*====================================================================*/
+
+/* Device driver ID passed to Card Services */
+static dev_info_t dev_info = "Driver Services";
+
+/* Linked list of all registered device drivers */
+static driver_info_t *root_driver = NULL;
+
+static int sockets = 0, major_dev = -1;
+static socket_info_t *socket_table = NULL;
+
+extern struct proc_dir_entry *proc_pccard;
+
+/* We use this to distinguish in-kernel from modular drivers */
+static int init_status = 1;
+
+/*====================================================================*/
+
+static void cs_error(client_handle_t handle, int func, int ret)
+{
+ error_info_t err = { func, ret };
+ CardServices(ReportError, handle, &err);
+}
+
+/*======================================================================
+
+ Register_pccard_driver() and unregister_pccard_driver() are used
+ tell Driver Services that a PC Card client driver is available to
+ be bound to sockets.
+
+======================================================================*/
+
+int register_pccard_driver(dev_info_t *dev_info,
+ dev_link_t *(*attach)(void),
+ void (*detach)(dev_link_t *))
+{
+ driver_info_t *driver;
+ socket_bind_t *b;
+ int i;
+
+ DEBUG(0, "ds: register_pccard_driver('%s')\n", (char *)dev_info);
+ for (driver = root_driver; driver; driver = driver->next)
+ if (strncmp((char *)dev_info, (char *)driver->dev_info,
+ DEV_NAME_LEN) == 0)
+ break;
+ if (!driver) {
+ driver = kmalloc(sizeof(driver_info_t), GFP_KERNEL);
+ if (!driver) return -ENOMEM;
+ strncpy(driver->dev_info, (char *)dev_info, DEV_NAME_LEN);
+ driver->use_count = 0;
+ driver->status = init_status;
+ driver->next = root_driver;
+ root_driver = driver;
+ }
+
+ driver->attach = attach;
+ driver->detach = detach;
+ if (driver->use_count == 0) return 0;
+
+ /* Instantiate any already-bound devices */
+ for (i = 0; i < sockets; i++)
+ for (b = socket_table[i].bind; b; b = b->next) {
+ if (b->driver != driver) continue;
+ b->instance = driver->attach();
+ if (b->instance == NULL)
+ printk(KERN_NOTICE "ds: unable to create instance "
+ "of '%s'!\n", driver->dev_info);
+ }
+
+ return 0;
+} /* register_pccard_driver */
+
+/*====================================================================*/
+
+int unregister_pccard_driver(dev_info_t *dev_info)
+{
+ driver_info_t *target, **d = &root_driver;
+ socket_bind_t *b;
+ int i;
+
+ DEBUG(0, "ds: unregister_pccard_driver('%s')\n",
+ (char *)dev_info);
+ while ((*d) && (strncmp((*d)->dev_info, (char *)dev_info,
+ DEV_NAME_LEN) != 0))
+ d = &(*d)->next;
+ if (*d == NULL)
+ return -ENODEV;
+
+ target = *d;
+ if (target->use_count == 0) {
+ *d = target->next;
+ kfree(target);
+ } else {
+ /* Blank out any left-over device instances */
+ target->attach = NULL; target->detach = NULL;
+ for (i = 0; i < sockets; i++)
+ for (b = socket_table[i].bind; b; b = b->next)
+ if (b->driver == target) b->instance = NULL;
+ }
+ return 0;
+} /* unregister_pccard_driver */
+
+/*====================================================================*/
+
+#ifdef HAS_PROC_BUS
+static int proc_read_drivers(char *buf, char **start, off_t pos,
+ int count, int *eof, void *data)
+{
+ driver_info_t *d;
+ char *p = buf;
+ for (d = root_driver; d; d = d->next)
+ p += sprintf(p, "%-24.24s %d %d\n", d->dev_info,
+ d->status, d->use_count);
+ return (p - buf);
+}
+#endif
+
+/*======================================================================
+
+ These manage a ring buffer of events pending for one user process
+
+======================================================================*/
+
+static int queue_empty(user_info_t *user)
+{
+ return (user->event_head == user->event_tail);
+}
+
+static event_t get_queued_event(user_info_t *user)
+{
+ user->event_tail = (user->event_tail+1) % MAX_EVENTS;
+ return user->event[user->event_tail];
+}
+
+static void queue_event(user_info_t *user, event_t event)
+{
+ user->event_head = (user->event_head+1) % MAX_EVENTS;
+ if (user->event_head == user->event_tail)
+ user->event_tail = (user->event_tail+1) % MAX_EVENTS;
+ user->event[user->event_head] = event;
+}
+
+static void handle_event(socket_info_t *s, event_t event)
+{
+ user_info_t *user;
+ for (user = s->user; user; user = user->next)
+ queue_event(user, event);
+ wake_up_interruptible(&s->queue);
+}
+
+static int handle_request(socket_info_t *s, event_t event)
+{
+ if (s->req_pending != 0)
+ return CS_IN_USE;
+ if (s->state & SOCKET_BUSY)
+ s->req_pending = 1;
+ handle_event(s, event);
+ if (s->req_pending > 0) {
+ interruptible_sleep_on(&s->request);
+ if (signal_pending(current))
+ return CS_IN_USE;
+ else
+ return s->req_result;
+ }
+ return CS_SUCCESS;
+}
+
+static void handle_removal(u_long sn)
+{
+ socket_info_t *s = &socket_table[sn];
+ handle_event(s, CS_EVENT_CARD_REMOVAL);
+ s->state &= ~SOCKET_REMOVAL_PENDING;
+}
+
+/*======================================================================
+
+ The card status event handler.
+
+======================================================================*/
+
+static int ds_event(event_t event, int priority,
+ event_callback_args_t *args)
+{
+ socket_info_t *s;
+ int i;
+
+ DEBUG(1, "ds: ds_event(0x%06x, %d, 0x%p)\n",
+ event, priority, args->client_handle);
+ s = args->client_data;
+ i = s - socket_table;
+
+ switch (event) {
+
+ case CS_EVENT_CARD_REMOVAL:
+ s->state &= ~SOCKET_PRESENT;
+ if (!(s->state & SOCKET_REMOVAL_PENDING)) {
+ s->state |= SOCKET_REMOVAL_PENDING;
+ s->removal.expires = jiffies + HZ/10;
+ add_timer(&s->removal);
+ }
+ break;
+
+ case CS_EVENT_CARD_INSERTION:
+ s->state |= SOCKET_PRESENT;
+ handle_event(s, event);
+ break;
+
+ case CS_EVENT_EJECTION_REQUEST:
+ return handle_request(s, event);
+ break;
+
+ default:
+ handle_event(s, event);
+ break;
+ }
+
+ return 0;
+} /* ds_event */
+
+/*======================================================================
+
+ bind_mtd() connects a memory region with an MTD client.
+
+======================================================================*/
+
+static int bind_mtd(int i, mtd_info_t *mtd_info)
+{
+ mtd_bind_t bind_req;
+ int ret;
+
+ bind_req.dev_info = &mtd_info->dev_info;
+ bind_req.Attributes = mtd_info->Attributes;
+ bind_req.Socket = i;
+ bind_req.CardOffset = mtd_info->CardOffset;
+ ret = CardServices(BindMTD, &bind_req);
+ if (ret != CS_SUCCESS) {
+ cs_error(NULL, BindMTD, ret);
+ printk(KERN_NOTICE "ds: unable to bind MTD '%s' to socket %d"
+ " offset 0x%x\n",
+ (char *)bind_req.dev_info, i, bind_req.CardOffset);
+ return -ENODEV;
+ }
+ return 0;
+} /* bind_mtd */
+
+/*======================================================================
+
+ bind_request() connects a socket to a particular client driver.
+ It looks up the specified device ID in the list of registered
+ drivers, binds it to the socket, and tries to create an instance
+ of the device. unbind_request() deletes a driver instance.
+
+======================================================================*/
+
+static int bind_request(int i, bind_info_t *bind_info)
+{
+ struct driver_info_t *driver;
+ socket_bind_t *b;
+ bind_req_t bind_req;
+ socket_info_t *s = &socket_table[i];
+ int ret;
+
+ DEBUG(2, "bind_request(%d, '%s')\n", i,
+ (char *)bind_info->dev_info);
+ for (driver = root_driver; driver; driver = driver->next)
+ if (strcmp((char *)driver->dev_info,
+ (char *)bind_info->dev_info) == 0)
+ break;
+ if (driver == NULL) {
+ driver = kmalloc(sizeof(driver_info_t), GFP_KERNEL);
+ if (!driver) return -ENOMEM;
+ strncpy(driver->dev_info, bind_info->dev_info, DEV_NAME_LEN);
+ driver->use_count = 0;
+ driver->next = root_driver;
+ driver->attach = NULL; driver->detach = NULL;
+ root_driver = driver;
+ }
+
+ for (b = s->bind; b; b = b->next)
+ if ((driver == b->driver) &&
+ (bind_info->function == b->function))
+ break;
+ if (b != NULL) {
+ bind_info->instance = b->instance;
+ return -EBUSY;
+ }
+ b = kmalloc(sizeof(socket_bind_t), GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
+ bind_req.Socket = i;
+ bind_req.Function = bind_info->function;
+ bind_req.dev_info = &driver->dev_info;
+ ret = CardServices(BindDevice, &bind_req);
+ if (ret != CS_SUCCESS) {
+ cs_error(NULL, BindDevice, ret);
+ printk(KERN_NOTICE "ds: unable to bind '%s' to socket %d\n",
+ (char *)dev_info, i);
+ kfree(b);
+ return -ENODEV;
+ }
+
+ /* Add binding to list for this socket */
+ driver->use_count++;
+ b->driver = driver;
+ b->function = bind_info->function;
+ b->instance = NULL;
+ b->next = s->bind;
+ s->bind = b;
+
+ if (driver->attach) {
+ b->instance = driver->attach();
+ if (b->instance == NULL) {
+ printk(KERN_NOTICE "ds: unable to create instance "
+ "of '%s'!\n", (char *)bind_info->dev_info);
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+} /* bind_request */
+
+/*====================================================================*/
+
+static int get_device_info(int i, bind_info_t *bind_info, int first)
+{
+ socket_info_t *s = &socket_table[i];
+ socket_bind_t *b;
+ dev_node_t *node;
+
+ for (b = s->bind; b; b = b->next)
+ if ((strcmp((char *)b->driver->dev_info,
+ (char *)bind_info->dev_info) == 0) &&
+ (b->function == bind_info->function))
+ break;
+ if (b == NULL) return -ENODEV;
+ if ((b->instance == NULL) ||
+ (b->instance->state & DEV_CONFIG_PENDING))
+ return -EAGAIN;
+ if (first)
+ node = b->instance->dev;
+ else
+ for (node = b->instance->dev; node; node = node->next)
+ if (node == bind_info->next) break;
+ if (node == NULL) return -ENODEV;
+
+ strncpy(bind_info->name, node->dev_name, DEV_NAME_LEN);
+ bind_info->name[DEV_NAME_LEN-1] = '\0';
+ bind_info->major = node->major;
+ bind_info->minor = node->minor;
+ bind_info->next = node->next;
+
+ return 0;
+} /* get_device_info */
+
+/*====================================================================*/
+
+static int unbind_request(int i, bind_info_t *bind_info)
+{
+ socket_info_t *s = &socket_table[i];
+ socket_bind_t **b, *c;
+
+ DEBUG(2, "unbind_request(%d, '%s')\n", i,
+ (char *)bind_info->dev_info);
+ for (b = &s->bind; *b; b = &(*b)->next)
+ if ((strcmp((char *)(*b)->driver->dev_info,
+ (char *)bind_info->dev_info) == 0) &&
+ ((*b)->function == bind_info->function))
+ break;
+ if (*b == NULL)
+ return -ENODEV;
+
+ c = *b;
+ c->driver->use_count--;
+ if (c->driver->detach) {
+ if (c->instance)
+ c->driver->detach(c->instance);
+ } else {
+ if (c->driver->use_count == 0) {
+ driver_info_t **d;
+ for (d = &root_driver; *d; d = &((*d)->next))
+ if (c->driver == *d) break;
+ *d = (*d)->next;
+ kfree(c->driver);
+ }
+ }
+ *b = c->next;
+ kfree(c);
+
+ return 0;
+} /* unbind_request */
+
+/*======================================================================
+
+ The user-mode PC Card device interface
+
+======================================================================*/
+
+/* Disable all the ds filesystem operations. */
+#ifndef MACH
+
+static int ds_open(struct inode *inode, struct file *file)
+{
+ socket_t i = MINOR(inode->i_rdev);
+ socket_info_t *s;
+ user_info_t *user;
+
+ DEBUG(0, "ds_open(socket %d)\n", i);
+ if ((i >= sockets) || (sockets == 0))
+ return -ENODEV;
+ s = &socket_table[i];
+ if ((file->f_flags & O_ACCMODE) != O_RDONLY) {
+ if (s->state & SOCKET_BUSY)
+ return -EBUSY;
+ else
+ s->state |= SOCKET_BUSY;
+ }
+
+ MOD_INC_USE_COUNT;
+ user = kmalloc(sizeof(user_info_t), GFP_KERNEL);
+ if (!user) {
+ MOD_DEC_USE_COUNT;
+ return -ENOMEM;
+ }
+ user->event_tail = user->event_head = 0;
+ user->next = s->user;
+ user->user_magic = USER_MAGIC;
+ s->user = user;
+ file->private_data = user;
+
+ if (s->state & SOCKET_PRESENT)
+ queue_event(user, CS_EVENT_CARD_INSERTION);
+ return 0;
+} /* ds_open */
+
+/*====================================================================*/
+
+static FS_RELEASE_T ds_release(struct inode *inode, struct file *file)
+{
+ socket_t i = MINOR(inode->i_rdev);
+ socket_info_t *s;
+ user_info_t *user, **link;
+
+ DEBUG(0, "ds_release(socket %d)\n", i);
+ if ((i >= sockets) || (sockets == 0))
+ return (FS_RELEASE_T)0;
+ s = &socket_table[i];
+ user = file->private_data;
+ if (CHECK_USER(user))
+ return (FS_RELEASE_T)0;
+
+ /* Unlink user data structure */
+ if ((file->f_flags & O_ACCMODE) != O_RDONLY)
+ s->state &= ~SOCKET_BUSY;
+ file->private_data = NULL;
+ for (link = &s->user; *link; link = &(*link)->next)
+ if (*link == user) break;
+ if (link == NULL)
+ return (FS_RELEASE_T)0;
+ *link = user->next;
+ user->user_magic = 0;
+ kfree(user);
+
+ MOD_DEC_USE_COUNT;
+ return (FS_RELEASE_T)0;
+} /* ds_release */
+
+/*====================================================================*/
+
+static ssize_t ds_read FOPS(struct inode *inode,
+ struct file *file, char *buf,
+ size_t count, loff_t *ppos)
+{
+ socket_t i = MINOR(F_INODE(file)->i_rdev);
+ socket_info_t *s;
+ user_info_t *user;
+
+ DEBUG(2, "ds_read(socket %d)\n", i);
+
+ if ((i >= sockets) || (sockets == 0))
+ return -ENODEV;
+ if (count < 4)
+ return -EINVAL;
+ s = &socket_table[i];
+ user = file->private_data;
+ if (CHECK_USER(user))
+ return -EIO;
+
+ if (queue_empty(user)) {
+ interruptible_sleep_on(&s->queue);
+ if (signal_pending(current))
+ return -EINTR;
+ }
+ put_user(get_queued_event(user), (int *)buf);
+ return 4;
+} /* ds_read */
+
+/*====================================================================*/
+
+static ssize_t ds_write FOPS(struct inode *inode,
+ struct file *file, const char *buf,
+ size_t count, loff_t *ppos)
+{
+ socket_t i = MINOR(F_INODE(file)->i_rdev);
+ socket_info_t *s;
+ user_info_t *user;
+
+ DEBUG(2, "ds_write(socket %d)\n", i);
+
+ if ((i >= sockets) || (sockets == 0))
+ return -ENODEV;
+ if (count != 4)
+ return -EINVAL;
+ if ((file->f_flags & O_ACCMODE) == O_RDONLY)
+ return -EBADF;
+ s = &socket_table[i];
+ user = file->private_data;
+ if (CHECK_USER(user))
+ return -EIO;
+
+ if (s->req_pending) {
+ s->req_pending--;
+ get_user(s->req_result, (int *)buf);
+ if ((s->req_result != 0) || (s->req_pending == 0))
+ wake_up_interruptible(&s->request);
+ } else
+ return -EIO;
+
+ return 4;
+} /* ds_write */
+
+/*====================================================================*/
+
+#if (LINUX_VERSION_CODE < VERSION(2,1,23))
+
+static int ds_select(struct inode *inode, struct file *file,
+ int sel_type, select_table *wait)
+{
+ socket_t i = MINOR(inode->i_rdev);
+ socket_info_t *s;
+ user_info_t *user;
+
+ DEBUG(2, "ds_select(socket %d)\n", i);
+
+ if ((i >= sockets) || (sockets == 0))
+ return -ENODEV;
+ s = &socket_table[i];
+ user = file->private_data;
+ if (CHECK_USER(user))
+ return -EIO;
+ if (sel_type != SEL_IN)
+ return 0;
+ if (!queue_empty(user))
+ return 1;
+ select_wait(&s->queue, wait);
+ return 0;
+} /* ds_select */
+
+#else
+
+static u_int ds_poll(struct file *file, poll_table *wait)
+{
+ socket_t i = MINOR(F_INODE(file)->i_rdev);
+ socket_info_t *s;
+ user_info_t *user;
+
+ DEBUG(2, "ds_poll(socket %d)\n", i);
+
+ if ((i >= sockets) || (sockets == 0))
+ return POLLERR;
+ s = &socket_table[i];
+ user = file->private_data;
+ if (CHECK_USER(user))
+ return POLLERR;
+ POLL_WAIT(file, &s->queue, wait);
+ if (!queue_empty(user))
+ return POLLIN | POLLRDNORM;
+ return 0;
+} /* ds_poll */
+
+#endif
+
+/*====================================================================*/
+
+#endif /* !defined(MACH) */
+
+static int ds_ioctl(struct inode * inode, struct file * file,
+ u_int cmd, u_long arg)
+{
+ socket_t i = MINOR(inode->i_rdev);
+ socket_info_t *s;
+ u_int size;
+ int ret, err;
+ ds_ioctl_arg_t buf;
+
+ DEBUG(2, "ds_ioctl(socket %d, %#x, %#lx)\n", i, cmd, arg);
+
+ if ((i >= sockets) || (sockets == 0))
+ return -ENODEV;
+ s = &socket_table[i];
+
+ size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
+ if (size > sizeof(ds_ioctl_arg_t)) return -EINVAL;
+
+ /* Permission check */
+ if (!(cmd & IOC_OUT) && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+#ifndef MACH
+ if (cmd & IOC_IN) {
+ err = verify_area(VERIFY_READ, (char *)arg, size);
+ if (err) {
+ DEBUG(3, "ds_ioctl(): verify_read = %d\n", err);
+ return err;
+ }
+ }
+ if (cmd & IOC_OUT) {
+ err = verify_area(VERIFY_WRITE, (char *)arg, size);
+ if (err) {
+ DEBUG(3, "ds_ioctl(): verify_write = %d\n", err);
+ return err;
+ }
+ }
+#endif
+
+ err = ret = 0;
+
+#ifndef MACH
+ if (cmd & IOC_IN) copy_from_user((char *)&buf, (char *)arg, size);
+#else
+ if (cmd & IOC_IN) memcpy((char *) &buf, (char *) arg, size);
+#endif
+
+ switch (cmd) {
+ case DS_ADJUST_RESOURCE_INFO:
+ ret = CardServices(AdjustResourceInfo, s->handle, &buf.adjust);
+ break;
+ case DS_GET_CARD_SERVICES_INFO:
+ ret = CardServices(GetCardServicesInfo, &buf.servinfo);
+ break;
+ case DS_GET_CONFIGURATION_INFO:
+ ret = CardServices(GetConfigurationInfo, s->handle, &buf.config);
+ break;
+ case DS_GET_FIRST_TUPLE:
+ ret = CardServices(GetFirstTuple, s->handle, &buf.tuple);
+ break;
+ case DS_GET_NEXT_TUPLE:
+ ret = CardServices(GetNextTuple, s->handle, &buf.tuple);
+ break;
+ case DS_GET_TUPLE_DATA:
+ buf.tuple.TupleData = buf.tuple_parse.data;
+ buf.tuple.TupleDataMax = sizeof(buf.tuple_parse.data);
+ ret = CardServices(GetTupleData, s->handle, &buf.tuple);
+ break;
+ case DS_PARSE_TUPLE:
+ buf.tuple.TupleData = buf.tuple_parse.data;
+ ret = CardServices(ParseTuple, s->handle, &buf.tuple,
+ &buf.tuple_parse.parse);
+ break;
+ case DS_RESET_CARD:
+ ret = CardServices(ResetCard, s->handle, NULL);
+ break;
+ case DS_GET_STATUS:
+ ret = CardServices(GetStatus, s->handle, &buf.status);
+ break;
+ case DS_VALIDATE_CIS:
+ ret = CardServices(ValidateCIS, s->handle, &buf.cisinfo);
+ break;
+ case DS_SUSPEND_CARD:
+ ret = CardServices(SuspendCard, s->handle, NULL);
+ break;
+ case DS_RESUME_CARD:
+ ret = CardServices(ResumeCard, s->handle, NULL);
+ break;
+ case DS_EJECT_CARD:
+ ret = CardServices(EjectCard, s->handle, NULL);
+ break;
+ case DS_INSERT_CARD:
+ ret = CardServices(InsertCard, s->handle, NULL);
+ break;
+ case DS_ACCESS_CONFIGURATION_REGISTER:
+ if ((buf.conf_reg.Action == CS_WRITE) && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ ret = CardServices(AccessConfigurationRegister, s->handle,
+ &buf.conf_reg);
+ break;
+ case DS_GET_FIRST_REGION:
+ ret = CardServices(GetFirstRegion, s->handle, &buf.region);
+ break;
+ case DS_GET_NEXT_REGION:
+ ret = CardServices(GetNextRegion, s->handle, &buf.region);
+ break;
+ case DS_GET_FIRST_WINDOW:
+ buf.win_info.handle = (window_handle_t)s->handle;
+ ret = CardServices(GetFirstWindow, &buf.win_info.handle,
+ &buf.win_info.window);
+ break;
+ case DS_GET_NEXT_WINDOW:
+ ret = CardServices(GetNextWindow, &buf.win_info.handle,
+ &buf.win_info.window);
+ break;
+ case DS_GET_MEM_PAGE:
+ ret = CardServices(GetMemPage, buf.win_info.handle,
+ &buf.win_info.map);
+ break;
+ case DS_REPLACE_CIS:
+ ret = CardServices(ReplaceCIS, s->handle, &buf.cisdump);
+ break;
+ case DS_BIND_REQUEST:
+ if (!capable(CAP_SYS_ADMIN)) return -EPERM;
+ err = bind_request(i, &buf.bind_info);
+ break;
+ case DS_GET_DEVICE_INFO:
+ err = get_device_info(i, &buf.bind_info, 1);
+ break;
+ case DS_GET_NEXT_DEVICE:
+ err = get_device_info(i, &buf.bind_info, 0);
+ break;
+ case DS_UNBIND_REQUEST:
+ err = unbind_request(i, &buf.bind_info);
+ break;
+ case DS_BIND_MTD:
+ if (!capable(CAP_SYS_ADMIN)) return -EPERM;
+ err = bind_mtd(i, &buf.mtd_info);
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ if ((err == 0) && (ret != CS_SUCCESS)) {
+ DEBUG(2, "ds_ioctl: ret = %d\n", ret);
+ switch (ret) {
+ case CS_BAD_SOCKET: case CS_NO_CARD:
+ err = -ENODEV; break;
+ case CS_BAD_ARGS: case CS_BAD_ATTRIBUTE: case CS_BAD_IRQ:
+ case CS_BAD_TUPLE:
+ err = -EINVAL; break;
+ case CS_IN_USE:
+ err = -EBUSY; break;
+ case CS_OUT_OF_RESOURCE:
+ err = -ENOSPC; break;
+ case CS_NO_MORE_ITEMS:
+ err = -ENODATA; break;
+ case CS_UNSUPPORTED_FUNCTION:
+ err = -ENOSYS; break;
+ default:
+ err = -EIO; break;
+ }
+ }
+
+#ifndef MACH
+ if (cmd & IOC_OUT) copy_to_user((char *)arg, (char *)&buf, size);
+#else
+ if (cmd & IOC_OUT) memcpy((char *) arg, (char *) &buf, size);
+#endif
+
+ return err;
+} /* ds_ioctl */
+
+/*====================================================================*/
+
+#ifndef MACH
+
+static struct file_operations ds_fops = {
+ open: ds_open,
+ release: ds_release,
+ ioctl: ds_ioctl,
+ read: ds_read,
+ write: ds_write,
+#if (LINUX_VERSION_CODE < VERSION(2,1,23))
+ select: ds_select
+#else
+ poll: ds_poll
+#endif
+};
+
+#if (LINUX_VERSION_CODE <= VERSION(2,1,17))
+
+#undef CONFIG_MODVERSIONS
+static struct symbol_table ds_symtab = {
+#include <linux/symtab_begin.h>
+#undef X
+#define X(sym) { (void *)&sym, SYMBOL_NAME_STR(sym) }
+ X(register_pccard_driver),
+ X(unregister_pccard_driver),
+#include <linux/symtab_end.h>
+};
+
+#else
+
+EXPORT_SYMBOL(register_pccard_driver);
+EXPORT_SYMBOL(unregister_pccard_driver);
+
+#endif
+
+#endif /* !defined(MACH) */
+
+/*====================================================================*/
+
+int __init init_pcmcia_ds(void)
+{
+ client_reg_t client_reg;
+ servinfo_t serv;
+ bind_req_t bind;
+ socket_info_t *s;
+ int i, ret;
+
+ DEBUG(0, "%s\n", version);
+
+ CardServices(GetCardServicesInfo, &serv);
+ if (serv.Revision != CS_RELEASE_CODE) {
+ printk(KERN_NOTICE "ds: Card Services release does not match!\n");
+ return -EINVAL;
+ }
+ if (serv.Count == 0) {
+ printk(KERN_NOTICE "ds: no socket drivers\n");
+ return -1;
+ }
+
+ sockets = serv.Count;
+ socket_table = kmalloc(sockets*sizeof(socket_info_t), GFP_KERNEL);
+ if (!socket_table) return -1;
+ for (i = 0, s = socket_table; i < sockets; i++, s++) {
+ s->state = 0;
+ s->user = NULL;
+ s->req_pending = 0;
+ init_waitqueue_head(&s->queue);
+ init_waitqueue_head(&s->request);
+ s->handle = NULL;
+ init_timer(&s->removal);
+ s->removal.data = i;
+ s->removal.function = &handle_removal;
+ s->bind = NULL;
+ }
+
+ /* Set up hotline to Card Services */
+ client_reg.dev_info = bind.dev_info = &dev_info;
+ client_reg.Attributes = INFO_MASTER_CLIENT;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_EJECTION_REQUEST | CS_EVENT_INSERTION_REQUEST |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &ds_event;
+ client_reg.Version = 0x0210;
+ for (i = 0; i < sockets; i++) {
+ bind.Socket = i;
+ bind.Function = BIND_FN_ALL;
+ ret = CardServices(BindDevice, &bind);
+ if (ret != CS_SUCCESS) {
+ cs_error(NULL, BindDevice, ret);
+ break;
+ }
+ client_reg.event_callback_args.client_data = &socket_table[i];
+ ret = CardServices(RegisterClient, &socket_table[i].handle,
+ &client_reg);
+ if (ret != CS_SUCCESS) {
+ cs_error(NULL, RegisterClient, ret);
+ break;
+ }
+ }
+
+#ifndef MACH
+ /* Set up character device for user mode clients */
+ i = register_chrdev(0, "pcmcia", &ds_fops);
+ if (i == -EBUSY)
+ printk(KERN_NOTICE "unable to find a free device # for "
+ "Driver Services\n");
+ else
+ major_dev = i;
+ register_symtab(&ds_symtab);
+#endif
+
+#ifdef HAS_PROC_BUS
+ if (proc_pccard)
+ create_proc_read_entry("drivers", 0, proc_pccard,
+ proc_read_drivers, NULL);
+ init_status = 0;
+#endif
+ return 0;
+}
+
+#ifdef MODULE
+
+int __init init_module(void)
+{
+ return init_pcmcia_ds();
+}
+
+void __exit cleanup_module(void)
+{
+ int i;
+#ifdef HAS_PROC_BUS
+ if (proc_pccard)
+ remove_proc_entry("drivers", proc_pccard);
+#endif
+ if (major_dev != -1)
+ unregister_chrdev(major_dev, "pcmcia");
+ for (i = 0; i < sockets; i++)
+ CardServices(DeregisterClient, socket_table[i].handle);
+ sockets = 0;
+ kfree(socket_table);
+}
+
+#endif
+
+/*====================================================================*/
+
+/* Include the interface glue code to GNU Mach. */
+#include "../glue/ds.c"
+
+/*====================================================================*/
diff --git a/linux/pcmcia-cs/modules/ene.h b/linux/pcmcia-cs/modules/ene.h
new file mode 100644
index 0000000..6b9b18b
--- /dev/null
+++ b/linux/pcmcia-cs/modules/ene.h
@@ -0,0 +1,59 @@
+/*
+ * ene.h 1.2 2001/08/24 12:15:33
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_ENE_H
+#define _LINUX_ENE_H
+
+#ifndef PCI_VENDOR_ID_ENE
+#define PCI_VENDOR_ID_ENE 0x1524
+#endif
+
+#ifndef PCI_DEVICE_ID_ENE_1211
+#define PCI_DEVICE_ID_ENE_1211 0x1211
+#endif
+#ifndef PCI_DEVICE_ID_ENE_1225
+#define PCI_DEVICE_ID_ENE_1225 0x1225
+#endif
+#ifndef PCI_DEVICE_ID_ENE_1410
+#define PCI_DEVICE_ID_ENE_1410 0x1410
+#endif
+#ifndef PCI_DEVICE_ID_ENE_1420
+#define PCI_DEVICE_ID_ENE_1420 0x1420
+#endif
+
+#define ENE_PCIC_ID \
+ IS_ENE1211, IS_ENE1225, IS_ENE1410, IS_ENE1420
+
+#define ENE_PCIC_INFO \
+ { "ENE 1211", IS_TI|IS_CARDBUS, ID(ENE, 1211) }, \
+ { "ENE 1225", IS_TI|IS_CARDBUS, ID(ENE, 1225) }, \
+ { "ENE 1410", IS_TI|IS_CARDBUS, ID(ENE, 1410) }, \
+ { "ENE 1420", IS_TI|IS_CARDBUS, ID(ENE, 1420) }
+
+#endif /* _LINUX_ENE_H */
diff --git a/linux/pcmcia-cs/modules/i82365.c b/linux/pcmcia-cs/modules/i82365.c
new file mode 100644
index 0000000..17ddf66
--- /dev/null
+++ b/linux/pcmcia-cs/modules/i82365.c
@@ -0,0 +1,2588 @@
+/*======================================================================
+
+ Device driver for Intel 82365 and compatible PC Card controllers,
+ and Yenta-compatible PCI-to-CardBus controllers.
+
+ i82365.c 1.358 2003/09/13 17:34:01
+
+ The contents of this file are subject to the Mozilla Public
+ License Version 1.1 (the "License"); you may not use this file
+ except in compliance with the License. You may obtain a copy of
+ the License at http://www.mozilla.org/MPL/
+
+ Software distributed under the License is distributed on an "AS
+ IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ implied. See the License for the specific language governing
+ rights and limitations under the License.
+
+ The initial developer of the original code is David A. Hinds
+ <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+
+ Alternatively, the contents of this file may be used under the
+ terms of the GNU General Public License version 2 (the "GPL"), in
+ which case the provisions of the GPL are applicable instead of the
+ above. If you wish to allow the use of your version of this file
+ only under the terms of the GPL and not to allow others to use
+ your version of this file under the MPL, indicate your decision
+ by deleting the provisions above and replace them with the notice
+ and other provisions required by the GPL. If you do not delete
+ the provisions above, a recipient may use your version of this
+ file under either the MPL or the GPL.
+
+======================================================================*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/spinlock.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/bitops.h>
+#include <asm/system.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/ss.h>
+#include <pcmcia/cs.h>
+
+/* ISA-bus controllers */
+#include "i82365.h"
+#include "cirrus.h"
+#include "vg468.h"
+#include "ricoh.h"
+#include "o2micro.h"
+
+/* PCI-bus controllers */
+#include "yenta.h"
+#include "ti113x.h"
+#include "smc34c90.h"
+#include "topic.h"
+#include "ene.h"
+
+/*====================================================================*/
+
+/* Module parameters */
+
+MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
+MODULE_DESCRIPTION("Intel ExCA/Yenta PCMCIA socket driver");
+MODULE_LICENSE("Dual MPL/GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; MODULE_PARM(n, "i")
+
+/* General options */
+INT_MODULE_PARM(poll_interval, 0); /* in ticks, 0 means never */
+INT_MODULE_PARM(cycle_time, 120); /* in ns, 120 ns = 8.33 MHz */
+INT_MODULE_PARM(do_scan, 1); /* Probe free interrupts? */
+
+/* Cirrus options */
+INT_MODULE_PARM(has_dma, -1);
+INT_MODULE_PARM(has_led, -1);
+INT_MODULE_PARM(has_ring, -1);
+INT_MODULE_PARM(has_vsense, 0);
+INT_MODULE_PARM(dynamic_mode, 0);
+INT_MODULE_PARM(freq_bypass, -1);
+INT_MODULE_PARM(setup_time, -1);
+INT_MODULE_PARM(cmd_time, -1);
+INT_MODULE_PARM(recov_time, -1);
+
+#ifdef CONFIG_ISA
+INT_MODULE_PARM(i365_base, 0x3e0); /* IO address for probes */
+INT_MODULE_PARM(extra_sockets, 0); /* Probe at i365_base+2? */
+INT_MODULE_PARM(ignore, -1); /* Ignore this socket # */
+INT_MODULE_PARM(cs_irq, 0); /* card status irq */
+INT_MODULE_PARM(irq_mask, 0xffff); /* bit map of irq's to use */
+static int irq_list[16] = { -1 };
+MODULE_PARM(irq_list, "1-16i");
+INT_MODULE_PARM(async_clock, -1); /* Vadem specific */
+INT_MODULE_PARM(cable_mode, -1);
+INT_MODULE_PARM(wakeup, 0); /* Cirrus specific */
+#endif
+
+#ifdef CONFIG_PCI
+static int pci_irq_list[8] = { 0 }; /* PCI interrupt assignments */
+MODULE_PARM(pci_irq_list, "1-8i");
+INT_MODULE_PARM(do_pci_probe, 1); /* Scan for PCI bridges? */
+INT_MODULE_PARM(fast_pci, -1);
+INT_MODULE_PARM(cb_write_post, -1);
+INT_MODULE_PARM(irq_mode, -1); /* Override BIOS routing? */
+INT_MODULE_PARM(hold_time, -1); /* Ricoh specific */
+INT_MODULE_PARM(p2cclk, -1); /* TI specific */
+#endif
+
+#if defined(CONFIG_ISA) && defined(CONFIG_PCI)
+INT_MODULE_PARM(pci_csc, 1); /* PCI card status irqs? */
+INT_MODULE_PARM(pci_int, 1); /* PCI IO card irqs? */
+#elif defined(CONFIG_ISA) && !defined(CONFIG_PCI)
+#define pci_csc 0
+#define pci_int 0
+#elif !defined(CONFIG_ISA) && defined(CONFIG_PCI)
+#define pci_csc 0
+#define pci_int 1 /* We must use PCI irq's */
+#else
+#error "No bus architectures defined!"
+#endif
+
+#ifdef PCMCIA_DEBUG
+INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
+static const char *version =
+"i82365.c 1.358 2003/09/13 17:34:01 (David Hinds)";
+#else
+#define DEBUG(n, args...) do { } while (0)
+#endif
+
+/*====================================================================*/
+
+typedef struct socket_info_t {
+ u_short type, flags;
+ socket_cap_t cap;
+ ioaddr_t ioaddr;
+ u_short psock;
+ u_char cs_irq, intr;
+ void (*handler)(void *info, u_int events);
+ void *info;
+#ifdef HAS_PROC_BUS
+ struct proc_dir_entry *proc;
+#endif
+ u_char pci_irq_code;
+#ifdef CONFIG_PCI
+ u_short vendor, device;
+ u_char revision, bus, devfn;
+ u_short bcr;
+ u_char pci_lat, cb_lat, sub_bus, cache;
+ u_int cb_phys;
+ char *cb_virt;
+#endif
+ union {
+ cirrus_state_t cirrus;
+ vg46x_state_t vg46x;
+ o2micro_state_t o2micro;
+ ti113x_state_t ti113x;
+ ricoh_state_t ricoh;
+ topic_state_t topic;
+ } state;
+} socket_info_t;
+
+/* Where we keep track of our sockets... */
+static int sockets = 0;
+static socket_info_t socket[8] = {
+ { 0, }, /* ... */
+};
+
+#ifdef CONFIG_ISA
+static int grab_irq;
+#ifdef __SMP__
+static spinlock_t isa_lock = SPIN_LOCK_UNLOCKED;
+#endif
+#define ISA_LOCK(s, f) \
+ if (!((s)->flags & IS_CARDBUS)) spin_lock_irqsave(&isa_lock, f)
+#define ISA_UNLOCK(n, f) \
+ if (!((s)->flags & IS_CARDBUS)) spin_unlock_irqrestore(&isa_lock, f)
+#else
+#define ISA_LOCK(n, f) do { } while (0)
+#define ISA_UNLOCK(n, f) do { } while (0)
+#endif
+
+static void pcic_interrupt_wrapper(u_long data);
+static struct timer_list poll_timer = {
+ function: pcic_interrupt_wrapper
+};
+
+#define flip(v,b,f) (v = ((f)<0) ? v : ((f) ? ((v)|(b)) : ((v)&(~b))))
+
+/*====================================================================*/
+
+/* Some PCI shortcuts */
+
+#ifdef CONFIG_PCI
+static int pci_readb(socket_info_t *s, int r, u_char *v)
+{ return pcibios_read_config_byte(s->bus, s->devfn, r, v); }
+static int pci_writeb(socket_info_t *s, int r, u_char v)
+{ return pcibios_write_config_byte(s->bus, s->devfn, r, v); }
+static int pci_readw(socket_info_t *s, int r, u_short *v)
+{ return pcibios_read_config_word(s->bus, s->devfn, r, v); }
+static int pci_writew(socket_info_t *s, int r, u_short v)
+{ return pcibios_write_config_word(s->bus, s->devfn, r, v); }
+static int pci_readl(socket_info_t *s, int r, u_int *v)
+{ return pcibios_read_config_dword(s->bus, s->devfn, r, v); }
+static int pci_writel(socket_info_t *s, int r, u_int v)
+{ return pcibios_write_config_dword(s->bus, s->devfn, r, v); }
+#endif
+
+#define cb_readb(s, r) readb((s)->cb_virt + (r))
+#define cb_readl(s, r) readl((s)->cb_virt + (r))
+#define cb_writeb(s, r, v) writeb(v, (s)->cb_virt + (r))
+#define cb_writel(s, r, v) writel(v, (s)->cb_virt + (r))
+
+/*====================================================================*/
+
+/* These definitions must match the pcic table! */
+typedef enum pcic_id {
+#ifdef CONFIG_ISA
+ IS_I82365A, IS_I82365B, IS_I82365DF, IS_IBM, IS_RF5Cx96,
+ IS_VLSI, IS_VG468, IS_VG469, IS_PD6710, IS_PD672X, IS_VT83C469,
+#endif
+#ifdef CONFIG_PCI
+ IS_I82092AA, IS_OM82C092G, CIRRUS_PCIC_ID, O2MICRO_PCIC_ID,
+ RICOH_PCIC_ID, SMC_PCIC_ID, TI_PCIC_ID, ENE_PCIC_ID,
+ TOPIC_PCIC_ID, IS_UNK_PCI, IS_UNK_CARDBUS
+#endif
+} pcic_id;
+
+/* Flags for classifying groups of controllers */
+#define IS_VADEM 0x0001
+#define IS_CIRRUS 0x0002
+#define IS_TI 0x0004
+#define IS_O2MICRO 0x0008
+#define IS_TOPIC 0x0020
+#define IS_RICOH 0x0040
+#define IS_UNKNOWN 0x0400
+#define IS_VG_PWR 0x0800
+#define IS_DF_PWR 0x1000
+#define IS_PCI 0x2000
+#define IS_CARDBUS 0x4000
+#define IS_ALIVE 0x8000
+
+typedef struct pcic_t {
+ char *name;
+ u_short flags;
+#ifdef CONFIG_PCI
+ u_short vendor, device;
+#endif
+} pcic_t;
+
+#define ID(a,b) PCI_VENDOR_ID_##a,PCI_DEVICE_ID_##a##_##b
+
+static pcic_t pcic[] = {
+#ifdef CONFIG_ISA
+ { "Intel i82365sl A step", 0 },
+ { "Intel i82365sl B step", 0 },
+ { "Intel i82365sl DF", IS_DF_PWR },
+ { "IBM Clone", 0 },
+ { "Ricoh RF5C296/396", 0 },
+ { "VLSI 82C146", 0 },
+ { "Vadem VG-468", IS_VADEM },
+ { "Vadem VG-469", IS_VADEM|IS_VG_PWR },
+ { "Cirrus PD6710", IS_CIRRUS },
+ { "Cirrus PD672x", IS_CIRRUS },
+ { "VIA VT83C469", IS_CIRRUS },
+#endif
+#ifdef CONFIG_PCI
+ { "Intel 82092AA", IS_PCI, ID(INTEL, 82092AA_0) },
+ { "Omega Micro 82C092G", IS_PCI, ID(OMEGA, 82C092G) },
+ CIRRUS_PCIC_INFO, O2MICRO_PCIC_INFO, RICOH_PCIC_INFO,
+ SMC_PCIC_INFO, TI_PCIC_INFO, ENE_PCIC_INFO, TOPIC_PCIC_INFO,
+ { "Unknown", IS_PCI|IS_UNKNOWN, 0, 0 },
+ { "Unknown", IS_CARDBUS|IS_UNKNOWN, 0, 0 }
+#endif
+};
+
+#define PCIC_COUNT (sizeof(pcic)/sizeof(pcic_t))
+
+/*====================================================================*/
+
+static u_char i365_get(socket_info_t *s, u_short reg)
+{
+#ifdef CONFIG_PCI
+ if (s->cb_virt)
+ return cb_readb(s, 0x0800 + reg);
+#endif
+ outb(I365_REG(s->psock, reg), s->ioaddr);
+ return inb(s->ioaddr+1);
+}
+
+static void i365_set(socket_info_t *s, u_short reg, u_char data)
+{
+#ifdef CONFIG_PCI
+ if (s->cb_virt) {
+ cb_writeb(s, 0x0800 + reg, data);
+ return;
+ }
+#endif
+ outb(I365_REG(s->psock, reg), s->ioaddr);
+ outb(data, s->ioaddr+1);
+}
+
+static void i365_bset(socket_info_t *s, u_short reg, u_char mask)
+{
+ i365_set(s, reg, i365_get(s, reg) | mask);
+}
+
+static void i365_bclr(socket_info_t *s, u_short reg, u_char mask)
+{
+ i365_set(s, reg, i365_get(s, reg) & ~mask);
+}
+
+static void i365_bflip(socket_info_t *s, u_short reg, u_char mask, int b)
+{
+ u_char d = i365_get(s, reg);
+ i365_set(s, reg, (b) ? (d | mask) : (d & ~mask));
+}
+
+static u_short i365_get_pair(socket_info_t *s, u_short reg)
+{
+ return (i365_get(s, reg) + (i365_get(s, reg+1) << 8));
+}
+
+static void i365_set_pair(socket_info_t *s, u_short reg, u_short data)
+{
+ i365_set(s, reg, data & 0xff);
+ i365_set(s, reg+1, data >> 8);
+}
+
+/*======================================================================
+
+ Code to save and restore global state information for Cirrus
+ PD67xx controllers, and to set and report global configuration
+ options.
+
+ The VIA controllers also use these routines, as they are mostly
+ Cirrus lookalikes, without the timing registers.
+
+======================================================================*/
+
+#ifdef CONFIG_PCI
+
+static int __init get_pci_irq(socket_info_t *s)
+{
+ u8 irq = pci_irq_list[s - socket];
+ if (!irq)
+ irq = pci_find_slot(s->bus, s->devfn)->irq;
+ if (irq >= NR_IRQS) irq = 0;
+ s->cap.pci_irq = irq;
+ return irq;
+}
+
+#endif
+
+static void __init cirrus_get_state(socket_info_t *s)
+{
+ cirrus_state_t *p = &s->state.cirrus;
+ int i;
+
+ p->misc1 = i365_get(s, PD67_MISC_CTL_1);
+ p->misc1 &= (PD67_MC1_MEDIA_ENA | PD67_MC1_INPACK_ENA);
+ p->misc2 = i365_get(s, PD67_MISC_CTL_2);
+ if (s->flags & IS_PCI)
+ p->ectl1 = pd67_ext_get(s, PD67_EXT_CTL_1);
+ for (i = 0; i < 6; i++)
+ p->timer[i] = i365_get(s, PD67_TIME_SETUP(0)+i);
+}
+
+static void cirrus_set_state(socket_info_t *s)
+{
+ cirrus_state_t *p = &s->state.cirrus;
+ u_char misc;
+ int i;
+
+ misc = i365_get(s, PD67_MISC_CTL_2);
+ i365_set(s, PD67_MISC_CTL_2, p->misc2);
+ if (misc & PD67_MC2_SUSPEND) mdelay(50);
+ misc = i365_get(s, PD67_MISC_CTL_1);
+ misc &= ~(PD67_MC1_MEDIA_ENA | PD67_MC1_INPACK_ENA);
+ i365_set(s, PD67_MISC_CTL_1, misc | p->misc1);
+ if (s->flags & IS_PCI)
+ pd67_ext_set(s, PD67_EXT_CTL_1, p->ectl1);
+ else if (has_vsense) {
+ socket_info_t *t = (s->psock) ? s : s+1;
+ pd67_ext_set(t, PD67_EXT_CTL_2, PD67_EC2_GPSTB_IOR);
+ }
+ for (i = 0; i < 6; i++)
+ i365_set(s, PD67_TIME_SETUP(0)+i, p->timer[i]);
+}
+
+#ifdef CONFIG_PCI
+static int cirrus_set_irq_mode(socket_info_t *s, int pcsc, int pint)
+{
+ flip(s->bcr, PD6832_BCR_MGMT_IRQ_ENA, !pcsc);
+ return 0;
+}
+#endif
+
+static u_int __init cirrus_set_opts(socket_info_t *s, char *buf)
+{
+ cirrus_state_t *p = &s->state.cirrus;
+ u_int mask = 0xffff;
+
+ p->misc1 |= PD67_MC1_SPKR_ENA;
+ if (has_ring == -1) has_ring = 1;
+ flip(p->misc2, PD67_MC2_IRQ15_RI, has_ring);
+ flip(p->misc2, PD67_MC2_DYNAMIC_MODE, dynamic_mode);
+ if (p->misc2 & PD67_MC2_IRQ15_RI)
+ strcat(buf, " [ring]");
+ if (p->misc2 & PD67_MC2_DYNAMIC_MODE)
+ strcat(buf, " [dyn mode]");
+ if (p->misc1 & PD67_MC1_INPACK_ENA)
+ strcat(buf, " [inpack]");
+ if (!(s->flags & (IS_PCI|IS_CARDBUS))) {
+ flip(p->misc2, PD67_MC2_FREQ_BYPASS, freq_bypass);
+ if (p->misc2 & PD67_MC2_FREQ_BYPASS)
+ strcat(buf, " [freq bypass]");
+ if (p->misc2 & PD67_MC2_IRQ15_RI)
+ mask &= ~0x8000;
+ if (has_led > 0) {
+ strcat(buf, " [led]");
+ mask &= ~0x1000;
+ }
+ if (has_dma > 0) {
+ strcat(buf, " [dma]");
+ mask &= ~0x0600;
+ }
+#ifdef CONFIG_PCI
+ } else {
+ p->misc1 &= ~PD67_MC1_MEDIA_ENA;
+ p->misc1 &= ~(PD67_MC1_PULSE_MGMT | PD67_MC1_PULSE_IRQ);
+ p->ectl1 &= ~(PD67_EC1_INV_MGMT_IRQ | PD67_EC1_INV_CARD_IRQ);
+ flip(p->misc2, PD67_MC2_FAST_PCI, fast_pci);
+ if (p->misc2 & PD67_MC2_IRQ15_RI)
+ mask &= (s->type == IS_PD6730) ? ~0x0400 : ~0x8000;
+ if ((s->flags & IS_PCI) && (irq_mode == 1) && get_pci_irq(s)) {
+ /* Configure PD6729 bridge for PCI interrupts */
+ p->ectl1 |= PD67_EC1_INV_MGMT_IRQ | PD67_EC1_INV_CARD_IRQ;
+ s->pci_irq_code = 3; /* PCI INTA = "irq 3" */
+ buf += strlen(buf);
+ sprintf(buf, " [pci irq %d]", s->cap.pci_irq);
+ mask = 0;
+ }
+#endif
+ }
+#ifdef CONFIG_ISA
+ if (s->type != IS_VT83C469)
+#endif
+ {
+ if (setup_time >= 0)
+ p->timer[0] = p->timer[3] = setup_time;
+ if (cmd_time > 0) {
+ p->timer[1] = cmd_time;
+ p->timer[4] = cmd_time*2+4;
+ }
+ if (p->timer[1] == 0) {
+ p->timer[1] = 6; p->timer[4] = 16;
+ if (p->timer[0] == 0)
+ p->timer[0] = p->timer[3] = 1;
+ }
+ if (recov_time >= 0)
+ p->timer[2] = p->timer[5] = recov_time;
+ buf += strlen(buf);
+ sprintf(buf, " [%d/%d/%d] [%d/%d/%d]", p->timer[0], p->timer[1],
+ p->timer[2], p->timer[3], p->timer[4], p->timer[5]);
+ }
+ return mask;
+}
+
+/*======================================================================
+
+ Code to save and restore global state information for Vadem VG468
+ and VG469 controllers, and to set and report global configuration
+ options.
+
+======================================================================*/
+
+#ifdef CONFIG_ISA
+
+static void __init vg46x_get_state(socket_info_t *s)
+{
+ vg46x_state_t *p = &s->state.vg46x;
+ p->ctl = i365_get(s, VG468_CTL);
+ if (s->type == IS_VG469)
+ p->ema = i365_get(s, VG469_EXT_MODE);
+}
+
+static void vg46x_set_state(socket_info_t *s)
+{
+ vg46x_state_t *p = &s->state.vg46x;
+ i365_set(s, VG468_CTL, p->ctl);
+ if (s->type == IS_VG469)
+ i365_set(s, VG469_EXT_MODE, p->ema);
+}
+
+static u_int __init vg46x_set_opts(socket_info_t *s, char *buf)
+{
+ vg46x_state_t *p = &s->state.vg46x;
+
+ flip(p->ctl, VG468_CTL_ASYNC, async_clock);
+ flip(p->ema, VG469_MODE_CABLE, cable_mode);
+ if (p->ctl & VG468_CTL_ASYNC)
+ strcat(buf, " [async]");
+ if (p->ctl & VG468_CTL_INPACK)
+ strcat(buf, " [inpack]");
+ if (s->type == IS_VG469) {
+ u_char vsel = i365_get(s, VG469_VSELECT);
+ if (vsel & VG469_VSEL_EXT_STAT) {
+ strcat(buf, " [ext mode]");
+ if (vsel & VG469_VSEL_EXT_BUS)
+ strcat(buf, " [isa buf]");
+ }
+ if (p->ema & VG469_MODE_CABLE)
+ strcat(buf, " [cable]");
+ if (p->ema & VG469_MODE_COMPAT)
+ strcat(buf, " [c step]");
+ }
+ return 0xffff;
+}
+
+#endif
+
+/*======================================================================
+
+ Code to save and restore global state information for TI 1130 and
+ TI 1131 controllers, and to set and report global configuration
+ options.
+
+======================================================================*/
+
+#ifdef CONFIG_PCI
+
+static void __init ti113x_get_state(socket_info_t *s)
+{
+ ti113x_state_t *p = &s->state.ti113x;
+ pci_readl(s, TI113X_SYSTEM_CONTROL, &p->sysctl);
+ pci_readb(s, TI113X_CARD_CONTROL, &p->cardctl);
+ pci_readb(s, TI113X_DEVICE_CONTROL, &p->devctl);
+ pci_readb(s, TI1250_DIAGNOSTIC, &p->diag);
+ pci_readl(s, TI12XX_IRQMUX, &p->irqmux);
+}
+
+static void ti113x_set_state(socket_info_t *s)
+{
+ ti113x_state_t *p = &s->state.ti113x;
+ pci_writel(s, TI113X_SYSTEM_CONTROL, p->sysctl);
+ pci_writeb(s, TI113X_CARD_CONTROL, p->cardctl);
+ pci_writeb(s, TI113X_DEVICE_CONTROL, p->devctl);
+ pci_writeb(s, TI1250_MULTIMEDIA_CTL, 0);
+ pci_writeb(s, TI1250_DIAGNOSTIC, p->diag);
+ pci_writel(s, TI12XX_IRQMUX, p->irqmux);
+ i365_set_pair(s, TI113X_IO_OFFSET(0), 0);
+ i365_set_pair(s, TI113X_IO_OFFSET(1), 0);
+}
+
+static int ti113x_set_irq_mode(socket_info_t *s, int pcsc, int pint)
+{
+ ti113x_state_t *p = &s->state.ti113x;
+ s->intr = (pcsc) ? I365_INTR_ENA : 0;
+ if (s->type <= IS_TI1131) {
+ p->cardctl &= ~(TI113X_CCR_PCI_IRQ_ENA |
+ TI113X_CCR_PCI_IREQ | TI113X_CCR_PCI_CSC);
+ if (pcsc)
+ p->cardctl |= TI113X_CCR_PCI_IRQ_ENA | TI113X_CCR_PCI_CSC;
+ if (pint)
+ p->cardctl |= TI113X_CCR_PCI_IRQ_ENA | TI113X_CCR_PCI_IREQ;
+ } else if (s->type == IS_TI1250A) {
+ p->diag &= TI1250_DIAG_PCI_CSC | TI1250_DIAG_PCI_IREQ;
+ if (pcsc)
+ p->diag |= TI1250_DIAG_PCI_CSC;
+ if (pint)
+ p->diag |= TI1250_DIAG_PCI_IREQ;
+ }
+ return 0;
+}
+
+static u_int __init ti113x_set_opts(socket_info_t *s, char *buf)
+{
+ ti113x_state_t *p = &s->state.ti113x;
+ u_int mask = 0xffff;
+ int old = (s->type <= IS_TI1131);
+
+ flip(p->cardctl, TI113X_CCR_RIENB, has_ring);
+ p->cardctl &= ~TI113X_CCR_ZVENABLE;
+ p->cardctl |= TI113X_CCR_SPKROUTEN;
+ if (!old) flip(p->sysctl, TI122X_SCR_P2CCLK, p2cclk);
+ switch (irq_mode) {
+ case 0:
+ p->devctl &= ~TI113X_DCR_IMODE_MASK;
+ p->irqmux = (p->irqmux & ~0x0f) | 0x02; /* route INTA */
+ if (!(p->sysctl & TI122X_SCR_INTRTIE))
+ p->irqmux = (p->irqmux & ~0xf0) | 0x20; /* route INTB */
+ break;
+ case 1:
+ p->devctl &= ~TI113X_DCR_IMODE_MASK;
+ p->devctl |= TI113X_DCR_IMODE_ISA;
+ break;
+ case 2:
+ p->devctl &= ~TI113X_DCR_IMODE_MASK;
+ p->devctl |= TI113X_DCR_IMODE_SERIAL;
+ break;
+ case 3:
+ p->devctl &= ~TI113X_DCR_IMODE_MASK;
+ p->devctl |= TI12XX_DCR_IMODE_ALL_SERIAL;
+ break;
+ default:
+ /* Feeble fallback: if PCI-only but no PCI irq, try ISA */
+ if (((p->devctl & TI113X_DCR_IMODE_MASK) == 0) &&
+ (s->cap.pci_irq == 0))
+ p->devctl |= TI113X_DCR_IMODE_ISA;
+ }
+ if (p->cardctl & TI113X_CCR_RIENB) {
+ strcat(buf, " [ring]");
+ if (old) mask &= ~0x8000;
+ }
+ if (old && (p->sysctl & TI113X_SCR_CLKRUN_ENA)) {
+ if (p->sysctl & TI113X_SCR_CLKRUN_SEL) {
+ strcat(buf, " [clkrun irq 12]");
+ mask &= ~0x1000;
+ } else {
+ strcat(buf, " [clkrun irq 10]");
+ mask &= ~0x0400;
+ }
+ }
+ switch (p->devctl & TI113X_DCR_IMODE_MASK) {
+ case TI12XX_DCR_IMODE_PCI_ONLY:
+ strcat(buf, " [pci only]");
+ mask = 0;
+ break;
+ case TI113X_DCR_IMODE_ISA:
+ strcat(buf, " [isa irq]");
+ if (old) mask &= ~0x0018;
+ break;
+ case TI113X_DCR_IMODE_SERIAL:
+ strcat(buf, " [pci + serial irq]");
+ mask = 0xffff;
+ break;
+ case TI12XX_DCR_IMODE_ALL_SERIAL:
+ strcat(buf, " [serial pci & irq]");
+ mask = 0xffff;
+ break;
+ }
+ return mask;
+}
+
+#endif
+
+/*======================================================================
+
+ Code to save and restore global state information for the Ricoh
+ RL5C4XX controllers, and to set and report global configuration
+ options.
+
+ The interrupt test doesn't seem to be reliable with Ricoh
+ bridges. It seems to depend on what type of card is in the
+ socket, and on the history of that socket, in some way that
+ doesn't show up in the current socket state.
+
+======================================================================*/
+
+#ifdef CONFIG_PCI
+
+static void __init ricoh_get_state(socket_info_t *s)
+{
+ ricoh_state_t *p = &s->state.ricoh;
+ pci_readw(s, RL5C4XX_CONFIG, &p->config);
+ pci_readw(s, RL5C4XX_MISC, &p->misc);
+ pci_readw(s, RL5C4XX_16BIT_CTL, &p->ctl);
+ pci_readw(s, RL5C4XX_16BIT_IO_0, &p->io);
+ pci_readw(s, RL5C4XX_16BIT_MEM_0, &p->mem);
+}
+
+static void ricoh_set_state(socket_info_t *s)
+{
+ ricoh_state_t *p = &s->state.ricoh;
+ pci_writew(s, RL5C4XX_CONFIG, p->config);
+ pci_writew(s, RL5C4XX_MISC, p->misc);
+ pci_writew(s, RL5C4XX_16BIT_CTL, p->ctl);
+ pci_writew(s, RL5C4XX_16BIT_IO_0, p->io);
+ pci_writew(s, RL5C4XX_16BIT_MEM_0, p->mem);
+}
+
+static u_int __init ricoh_set_opts(socket_info_t *s, char *buf)
+{
+ ricoh_state_t *p = &s->state.ricoh;
+ u_int mask = 0xffff;
+ int old = (s->type < IS_RL5C475);
+
+ p->ctl = RL5C4XX_16CTL_IO_TIMING | RL5C4XX_16CTL_MEM_TIMING;
+ if (old)
+ p->ctl |= RL5C46X_16CTL_LEVEL_1 | RL5C46X_16CTL_LEVEL_2;
+ else
+ p->config |= RL5C4XX_CONFIG_PREFETCH;
+
+ if (setup_time >= 0) {
+ p->io = (p->io & ~RL5C4XX_SETUP_MASK) +
+ ((setup_time+1) << RL5C4XX_SETUP_SHIFT);
+ p->mem = (p->mem & ~RL5C4XX_SETUP_MASK) +
+ (setup_time << RL5C4XX_SETUP_SHIFT);
+ }
+ if (cmd_time >= 0) {
+ p->io = (p->io & ~RL5C4XX_CMD_MASK) +
+ (cmd_time << RL5C4XX_CMD_SHIFT);
+ p->mem = (p->mem & ~RL5C4XX_CMD_MASK) +
+ (cmd_time << RL5C4XX_CMD_SHIFT);
+ }
+ if (hold_time >= 0) {
+ p->io = (p->io & ~RL5C4XX_HOLD_MASK) +
+ (hold_time << RL5C4XX_HOLD_SHIFT);
+ p->mem = (p->mem & ~RL5C4XX_HOLD_MASK) +
+ (hold_time << RL5C4XX_HOLD_SHIFT);
+ }
+ if (irq_mode == 0) {
+ mask = 0;
+ p->misc &= ~RL5C47X_MISC_SRIRQ_ENA;
+ sprintf(buf, " [pci only]");
+ buf += strlen(buf);
+ } else if (!old) {
+ switch (irq_mode) {
+ case 1:
+ p->misc &= ~RL5C47X_MISC_SRIRQ_ENA; break;
+ case 2:
+ p->misc |= RL5C47X_MISC_SRIRQ_ENA; break;
+ }
+ if (p->misc & RL5C47X_MISC_SRIRQ_ENA)
+ sprintf(buf, " [serial irq]");
+ else
+ sprintf(buf, " [isa irq]");
+ buf += strlen(buf);
+ }
+ sprintf(buf, " [io %d/%d/%d] [mem %d/%d/%d]",
+ (p->io & RL5C4XX_SETUP_MASK) >> RL5C4XX_SETUP_SHIFT,
+ (p->io & RL5C4XX_CMD_MASK) >> RL5C4XX_CMD_SHIFT,
+ (p->io & RL5C4XX_HOLD_MASK) >> RL5C4XX_HOLD_SHIFT,
+ (p->mem & RL5C4XX_SETUP_MASK) >> RL5C4XX_SETUP_SHIFT,
+ (p->mem & RL5C4XX_CMD_MASK) >> RL5C4XX_CMD_SHIFT,
+ (p->mem & RL5C4XX_HOLD_MASK) >> RL5C4XX_HOLD_SHIFT);
+ return mask;
+}
+
+#endif
+
+/*======================================================================
+
+ Code to save and restore global state information for O2Micro
+ controllers, and to set and report global configuration options.
+
+======================================================================*/
+
+#ifdef CONFIG_PCI
+
+static void __init o2micro_get_state(socket_info_t *s)
+{
+ o2micro_state_t *p = &s->state.o2micro;
+ if ((s->revision == 0x34) || (s->revision == 0x62) ||
+ (s->type == IS_OZ6812)) {
+ p->mode_a = i365_get(s, O2_MODE_A_2);
+ p->mode_b = i365_get(s, O2_MODE_B_2);
+ } else {
+ p->mode_a = i365_get(s, O2_MODE_A);
+ p->mode_b = i365_get(s, O2_MODE_B);
+ }
+ p->mode_c = i365_get(s, O2_MODE_C);
+ p->mode_d = i365_get(s, O2_MODE_D);
+ if (s->flags & IS_CARDBUS) {
+ p->mhpg = i365_get(s, O2_MHPG_DMA);
+ p->fifo = i365_get(s, O2_FIFO_ENA);
+ p->mode_e = i365_get(s, O2_MODE_E);
+ }
+}
+
+static void o2micro_set_state(socket_info_t *s)
+{
+ o2micro_state_t *p = &s->state.o2micro;
+ if ((s->revision == 0x34) || (s->revision == 0x62) ||
+ (s->type == IS_OZ6812)) {
+ i365_set(s, O2_MODE_A_2, p->mode_a);
+ i365_set(s, O2_MODE_B_2, p->mode_b);
+ } else {
+ i365_set(s, O2_MODE_A, p->mode_a);
+ i365_set(s, O2_MODE_B, p->mode_b);
+ }
+ i365_set(s, O2_MODE_C, p->mode_c);
+ i365_set(s, O2_MODE_D, p->mode_d);
+ if (s->flags & IS_CARDBUS) {
+ i365_set(s, O2_MHPG_DMA, p->mhpg);
+ i365_set(s, O2_FIFO_ENA, p->fifo);
+ i365_set(s, O2_MODE_E, p->mode_e);
+ }
+}
+
+static u_int __init o2micro_set_opts(socket_info_t *s, char *buf)
+{
+ o2micro_state_t *p = &s->state.o2micro;
+ u_int mask = 0xffff;
+
+ p->mode_b = (p->mode_b & ~O2_MODE_B_IDENT) | O2_MODE_B_ID_CSTEP;
+ flip(p->mode_b, O2_MODE_B_IRQ15_RI, has_ring);
+ p->mode_c &= ~(O2_MODE_C_ZVIDEO | O2_MODE_C_DREQ_MASK);
+ if (s->flags & IS_CARDBUS) {
+ p->mode_d &= ~O2_MODE_D_W97_IRQ;
+ p->mode_e &= ~O2_MODE_E_MHPG_DMA;
+ p->mhpg = O2_MHPG_CINT_ENA | O2_MHPG_CSC_ENA;
+ if (s->revision == 0x34)
+ p->mode_c = 0x20;
+ } else {
+ if (p->mode_b & O2_MODE_B_IRQ15_RI) mask &= ~0x8000;
+ }
+ if (p->mode_b & O2_MODE_B_IRQ15_RI)
+ strcat(buf, " [ring]");
+ if (irq_mode != -1)
+ p->mode_d = irq_mode;
+ if (p->mode_d & O2_MODE_D_ISA_IRQ) {
+ strcat(buf, " [pci+isa]");
+ } else {
+ switch (p->mode_d & O2_MODE_D_IRQ_MODE) {
+ case O2_MODE_D_IRQ_PCPCI:
+ strcat(buf, " [pc/pci]"); break;
+ case O2_MODE_D_IRQ_PCIWAY:
+ strcat(buf, " [pci/way]"); break;
+ case O2_MODE_D_IRQ_PCI:
+ strcat(buf, " [pci only]"); mask = 0; break;
+ }
+ }
+ if (s->flags & IS_CARDBUS) {
+ if (p->mode_d & O2_MODE_D_W97_IRQ)
+ strcat(buf, " [win97]");
+ }
+ return mask;
+}
+
+#endif
+
+/*======================================================================
+
+ Code to save and restore global state information for the Toshiba
+ ToPIC 95 and 97 controllers, and to set and report global
+ configuration options.
+
+======================================================================*/
+
+#ifdef CONFIG_PCI
+
+static void __init topic_get_state(socket_info_t *s)
+{
+ topic_state_t *p = &s->state.topic;
+ pci_readb(s, TOPIC_SLOT_CONTROL, &p->slot);
+ pci_readb(s, TOPIC_CARD_CONTROL, &p->ccr);
+ pci_readb(s, TOPIC_CARD_DETECT, &p->cdr);
+ pci_readl(s, TOPIC_REGISTER_CONTROL, &p->rcr);
+ p->fcr = i365_get(s, TOPIC_FUNCTION_CONTROL);
+}
+
+static void topic_set_state(socket_info_t *s)
+{
+ topic_state_t *p = &s->state.topic;
+ u_int state;
+ pci_writeb(s, TOPIC_SLOT_CONTROL, p->slot);
+ pci_writeb(s, TOPIC_CARD_CONTROL, p->ccr);
+ pci_writeb(s, TOPIC_CARD_DETECT, p->cdr);
+ pci_writel(s, TOPIC_REGISTER_CONTROL, p->rcr);
+ i365_set(s, TOPIC_FUNCTION_CONTROL, p->fcr);
+ state = cb_readl(s, CB_SOCKET_STATE);
+ if (!(state & CB_SS_32BIT))
+ cb_writel(s, CB_SOCKET_CONTROL, 0);
+ if (!(state & CB_SS_VSENSE))
+ cb_writel(s, CB_SOCKET_FORCE, CB_SF_CVSTEST);
+}
+
+static u_int __init topic_set_opts(socket_info_t *s, char *buf)
+{
+ topic_state_t *p = &s->state.topic;
+
+ p->slot |= TOPIC_SLOT_SLOTON|TOPIC_SLOT_SLOTEN;
+ p->slot &= ~TOPIC_SLOT_ID_LOCK;
+ p->cdr |= TOPIC_CDR_MODE_PC32;
+ p->cdr &= ~(TOPIC_CDR_SW_DETECT);
+ p->ccr |= TOPIC97_ICR_IRQSEL;
+ p->fcr |= TOPIC_FCR_3V_ENA;
+ sprintf(buf, " [slot 0x%02x] [ccr 0x%02x] [cdr 0x%02x] [rcr 0x%02x]",
+ p->slot, p->ccr, p->cdr, p->rcr);
+ return 0xffff;
+}
+
+#endif
+
+/*======================================================================
+
+ Routines to handle common CardBus options
+
+======================================================================*/
+
+/* Default settings for PCI command configuration register */
+#define CMD_DFLT (PCI_COMMAND_IO|PCI_COMMAND_MEMORY| \
+ PCI_COMMAND_MASTER|PCI_COMMAND_WAIT)
+
+#ifdef CONFIG_PCI
+
+static void __init cb_get_state(socket_info_t *s)
+{
+ pci_readb(s, PCI_CACHE_LINE_SIZE, &s->cache);
+ pci_readb(s, PCI_LATENCY_TIMER, &s->pci_lat);
+ pci_readb(s, CB_LATENCY_TIMER, &s->cb_lat);
+ pci_readb(s, CB_CARDBUS_BUS, &s->cap.cardbus);
+ pci_readb(s, CB_SUBORD_BUS, &s->sub_bus);
+ pci_readw(s, CB_BRIDGE_CONTROL, &s->bcr);
+ get_pci_irq(s);
+}
+
+static void cb_set_state(socket_info_t *s)
+{
+ pci_set_power_state(pci_find_slot(s->bus, s->devfn), 0);
+ pci_writel(s, CB_LEGACY_MODE_BASE, 0);
+ pci_writel(s, PCI_BASE_ADDRESS_0, s->cb_phys);
+ pci_writew(s, PCI_COMMAND, CMD_DFLT);
+ pci_writeb(s, PCI_CACHE_LINE_SIZE, s->cache);
+ pci_writeb(s, PCI_LATENCY_TIMER, s->pci_lat);
+ pci_writeb(s, CB_LATENCY_TIMER, s->cb_lat);
+ pci_writeb(s, CB_CARDBUS_BUS, s->cap.cardbus);
+ pci_writeb(s, CB_SUBORD_BUS, s->sub_bus);
+ pci_writew(s, CB_BRIDGE_CONTROL, s->bcr);
+}
+
+static int cb_get_irq_mode(socket_info_t *s)
+{
+ return (!(s->bcr & CB_BCR_ISA_IRQ));
+}
+
+static int cb_set_irq_mode(socket_info_t *s, int pcsc, int pint)
+{
+ flip(s->bcr, CB_BCR_ISA_IRQ, !(pint));
+ if (s->flags & IS_CIRRUS)
+ return cirrus_set_irq_mode(s, pcsc, pint);
+ else if (s->flags & IS_TI)
+ return ti113x_set_irq_mode(s, pcsc, pint);
+ /* By default, assume that we can't do ISA status irqs */
+ return (!pcsc);
+}
+
+static void __init cb_set_opts(socket_info_t *s, char *buf)
+{
+ s->bcr |= CB_BCR_WRITE_POST;
+ /* some TI1130's seem to exhibit problems with write posting */
+ if (((s->type == IS_TI1130) && (s->revision == 4) &&
+ (cb_write_post < 0)) || (cb_write_post == 0))
+ s->bcr &= ~CB_BCR_WRITE_POST;
+ if (s->cache == 0) s->cache = 8;
+ if (s->pci_lat == 0) s->pci_lat = 0xa8;
+ if (s->cb_lat == 0) s->cb_lat = 0xb0;
+ if (s->cap.pci_irq == 0)
+ strcat(buf, " [no pci irq]");
+ else
+ sprintf(buf, " [pci irq %d]", s->cap.pci_irq);
+ buf += strlen(buf);
+ if (!(s->flags & IS_TOPIC))
+ s->cap.features |= SS_CAP_PAGE_REGS;
+ sprintf(buf, " [lat %d/%d] [bus %d/%d]",
+ s->pci_lat, s->cb_lat, s->cap.cardbus, s->sub_bus);
+}
+
+#endif
+
+/*======================================================================
+
+ Power control for Cardbus controllers: used both for 16-bit and
+ Cardbus cards.
+
+======================================================================*/
+
+#ifdef CONFIG_PCI
+
+static void cb_get_power(socket_info_t *s, socket_state_t *state)
+{
+ u_int reg = cb_readl(s, CB_SOCKET_CONTROL);
+ state->Vcc = state->Vpp = 0;
+ switch (reg & CB_SC_VCC_MASK) {
+ case CB_SC_VCC_3V: state->Vcc = 33; break;
+ case CB_SC_VCC_5V: state->Vcc = 50; break;
+ }
+ switch (reg & CB_SC_VPP_MASK) {
+ case CB_SC_VPP_3V: state->Vpp = 33; break;
+ case CB_SC_VPP_5V: state->Vpp = 50; break;
+ case CB_SC_VPP_12V: state->Vpp = 120; break;
+ }
+}
+
+static int cb_set_power(socket_info_t *s, socket_state_t *state)
+{
+ u_int reg = 0;
+ /* restart card voltage detection if it seems appropriate */
+ if ((state->Vcc == 0) && (state->Vpp == 0) &&
+ !(cb_readl(s, CB_SOCKET_STATE) & CB_SS_VSENSE))
+ cb_writel(s, CB_SOCKET_FORCE, CB_SF_CVSTEST);
+ switch (state->Vcc) {
+ case 0: reg = 0; break;
+ case 33: reg = CB_SC_VCC_3V; break;
+ case 50: reg = CB_SC_VCC_5V; break;
+ default: return -EINVAL;
+ }
+ switch (state->Vpp) {
+ case 0: break;
+ case 33: reg |= CB_SC_VPP_3V; break;
+ case 50: reg |= CB_SC_VPP_5V; break;
+ case 120: reg |= CB_SC_VPP_12V; break;
+ default: return -EINVAL;
+ }
+ if (reg != cb_readl(s, CB_SOCKET_CONTROL))
+ cb_writel(s, CB_SOCKET_CONTROL, reg);
+ return 0;
+}
+
+#endif
+
+/*======================================================================
+
+ Generic routines to get and set controller options
+
+======================================================================*/
+
+static void __init get_bridge_state(socket_info_t *s)
+{
+ if (s->flags & IS_CIRRUS)
+ cirrus_get_state(s);
+#ifdef CONFIG_ISA
+ else if (s->flags & IS_VADEM)
+ vg46x_get_state(s);
+#endif
+#ifdef CONFIG_PCI
+ else if (s->flags & IS_O2MICRO)
+ o2micro_get_state(s);
+ else if (s->flags & IS_TI)
+ ti113x_get_state(s);
+ else if (s->flags & IS_RICOH)
+ ricoh_get_state(s);
+ else if (s->flags & IS_TOPIC)
+ topic_get_state(s);
+ if (s->flags & IS_CARDBUS)
+ cb_get_state(s);
+#endif
+}
+
+static void set_bridge_state(socket_info_t *s)
+{
+#ifdef CONFIG_PCI
+ if (s->flags & IS_CARDBUS)
+ cb_set_state(s);
+#endif
+ if (s->flags & IS_CIRRUS) {
+ cirrus_set_state(s);
+ } else {
+ i365_set(s, I365_GBLCTL, 0x00);
+ i365_set(s, I365_GENCTL, 0x00);
+ /* Trouble: changes timing of memory operations */
+ /* i365_bset(s, I365_ADDRWIN, I365_ADDR_MEMCS16); */
+ }
+ i365_bflip(s, I365_INTCTL, I365_INTR_ENA, s->intr);
+#ifdef CONFIG_ISA
+ if (s->flags & IS_VADEM)
+ vg46x_set_state(s);
+#endif
+#ifdef CONFIG_PCI
+ if (s->flags & IS_O2MICRO)
+ o2micro_set_state(s);
+ else if (s->flags & IS_TI)
+ ti113x_set_state(s);
+ else if (s->flags & IS_RICOH)
+ ricoh_set_state(s);
+ else if (s->flags & IS_TOPIC)
+ topic_set_state(s);
+#endif
+}
+
+static u_int __init set_bridge_opts(socket_info_t *s, u_short ns)
+{
+ u_short i;
+ u_int m = 0xffff;
+ char buf[128];
+
+ for (i = 0; i < ns; i++) {
+ if (s[i].flags & IS_ALIVE) {
+ printk(KERN_INFO " host opts [%d]: already alive!\n", i);
+ continue;
+ }
+ buf[0] = '\0';
+ get_bridge_state(s+i);
+ if (s[i].flags & IS_CIRRUS)
+ m = cirrus_set_opts(s+i, buf);
+#ifdef CONFIG_ISA
+ else if (s[i].flags & IS_VADEM)
+ m = vg46x_set_opts(s+i, buf);
+#endif
+#ifdef CONFIG_PCI
+ else if (s[i].flags & IS_O2MICRO)
+ m = o2micro_set_opts(s+i, buf);
+ else if (s[i].flags & IS_TI)
+ m = ti113x_set_opts(s+i, buf);
+ else if (s[i].flags & IS_RICOH)
+ m = ricoh_set_opts(s+i, buf);
+ else if (s[i].flags & IS_TOPIC)
+ m = topic_set_opts(s+i, buf);
+ if (s[i].flags & IS_CARDBUS)
+ cb_set_opts(s+i, buf+strlen(buf));
+#endif
+ set_bridge_state(s+i);
+ printk(KERN_INFO " host opts [%d]:%s\n", i,
+ (*buf) ? buf : " none");
+ }
+#ifdef CONFIG_PCI
+ m &= ~pci_irq_mask;
+#endif
+ return m;
+}
+
+/*======================================================================
+
+ Interrupt testing code, for ISA and PCI interrupts
+
+======================================================================*/
+
+static volatile u_int irq_hits, irq_shared;
+static volatile socket_info_t *irq_sock;
+
+static void irq_count(int irq, void *dev, struct pt_regs *regs)
+{
+ irq_hits++;
+ DEBUG(2, "-> hit on irq %d\n", irq);
+ if (!irq_shared && (irq_hits > 100)) {
+ printk(KERN_INFO " PCI irq %d seems to be wedged!\n", irq);
+ disable_irq(irq);
+ return;
+ }
+#ifdef CONFIG_PCI
+ if (irq_sock->flags & IS_CARDBUS) {
+ cb_writel(irq_sock, CB_SOCKET_EVENT, -1);
+ } else
+#endif
+ i365_get((socket_info_t *)irq_sock, I365_CSC);
+ return;
+}
+
+static u_int __init test_irq(socket_info_t *s, int irq, int pci)
+{
+ u_char csc = (pci) ? 0 : irq;
+
+#ifdef CONFIG_PNP_BIOS
+ extern int check_pnp_irq(int);
+ if (!pci && check_pnp_irq(irq)) return 1;
+#endif
+
+ DEBUG(2, " testing %s irq %d\n", pci ? "PCI" : "ISA", irq);
+ irq_sock = s; irq_shared = irq_hits = 0;
+ if (request_irq(irq, irq_count, 0, "scan", socket)) {
+ irq_shared++;
+ if (!pci || request_irq(irq, irq_count, SA_SHIRQ, "scan", socket))
+ return 1;
+ }
+ irq_hits = 0;
+
+#ifndef MACH
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ/100);
+#else
+ /* TODO: Is this really what we want? */
+ {
+ unsigned long flags;
+
+ save_flags(flags);
+
+ mdelay(1);
+#endif
+
+ if (irq_hits && !irq_shared) {
+ free_irq(irq, socket);
+ DEBUG(2, " spurious hit!\n");
+ return 1;
+ }
+
+ /* Generate one interrupt */
+#ifdef CONFIG_PCI
+ if (s->flags & IS_CARDBUS) {
+ cb_writel(s, CB_SOCKET_EVENT, -1);
+ i365_set(s, I365_CSCINT, I365_CSC_STSCHG | (csc << 4));
+ cb_writel(s, CB_SOCKET_EVENT, -1);
+ cb_writel(s, CB_SOCKET_MASK, CB_SM_CSTSCHG);
+ cb_writel(s, CB_SOCKET_FORCE, CB_SE_CSTSCHG);
+ mdelay(1);
+ cb_writel(s, CB_SOCKET_EVENT, -1);
+ cb_writel(s, CB_SOCKET_MASK, 0);
+ } else
+#endif
+ {
+ i365_set(s, I365_CSCINT, I365_CSC_DETECT | (csc << 4));
+ i365_bset(s, I365_GENCTL, I365_CTL_SW_IRQ);
+ mdelay(1);
+ }
+
+#ifdef MACH
+ restore_flags(flags);
+ }
+
+#endif
+
+ free_irq(irq, socket);
+
+ /* mask all interrupts */
+ i365_set(s, I365_CSCINT, 0);
+ DEBUG(2, " hits = %d\n", irq_hits);
+
+ return pci ? (irq_hits == 0) : (irq_hits != 1);
+}
+
+#ifdef CONFIG_ISA
+static int _check_irq(int irq, int flags)
+{
+#ifdef CONFIG_PNP_BIOS
+ extern int check_pnp_irq(int);
+ if ((flags != SA_SHIRQ) && check_pnp_irq(irq))
+ return -1;
+#endif
+ if (request_irq(irq, irq_count, flags, "x", irq_count) != 0)
+ return -1;
+ free_irq(irq, irq_count);
+ return 0;
+}
+
+static u_int __init isa_scan(socket_info_t *s, u_int mask0)
+{
+ u_int mask1 = 0;
+ int i;
+
+#ifdef CONFIG_PCI
+ /* Only scan if we can select ISA csc irq's */
+ if (!(s->flags & IS_CARDBUS) || (cb_set_irq_mode(s, 0, 0) == 0))
+#endif
+ if (do_scan) {
+ set_bridge_state(s);
+ i365_set(s, I365_CSCINT, 0);
+ for (i = 0; i < 16; i++)
+ if ((mask0 & (1 << i)) && (test_irq(s, i, 0) == 0))
+ mask1 |= (1 << i);
+ for (i = 0; i < 16; i++)
+ if ((mask1 & (1 << i)) && (test_irq(s, i, 0) != 0))
+ mask1 ^= (1 << i);
+ }
+
+ printk(KERN_INFO " ISA irqs (");
+ /* we trust TI bridges to do this right */
+ if (mask1 || (s->flags & IS_TI)) {
+ printk("scanned");
+ } else {
+ /* Fallback: just find interrupts that aren't in use */
+ for (i = 0; i < 16; i++)
+ if ((mask0 & (1 << i)) && (_check_irq(i, 0) == 0))
+ mask1 |= (1 << i);
+ printk("default");
+ /* If scan failed, default to polled status */
+ if (!cs_irq && (poll_interval == 0)) poll_interval = HZ;
+ }
+ printk(") = ");
+
+ for (i = 0; i < 16; i++)
+ if (mask1 & (1<<i))
+ printk("%s%d", ((mask1 & ((1<<i)-1)) ? "," : ""), i);
+ if (mask1 == 0) printk("none!");
+
+ return mask1;
+}
+#endif /* CONFIG_ISA */
+
+#ifdef CONFIG_PCI
+static int __init pci_scan(socket_info_t *s)
+{
+ int ret;
+ if ((s->flags & IS_RICOH) || !(s->flags & IS_CARDBUS) || !do_scan) {
+ /* for PCI-to-PCMCIA bridges, just check for wedged irq */
+ irq_sock = s; irq_hits = 0;
+ if (request_irq(s->cap.pci_irq, irq_count, 0, "scan", socket))
+ return 1;
+ udelay(50);
+ free_irq(s->cap.pci_irq, socket);
+ return (!irq_hits);
+ }
+ cb_set_irq_mode(s, 1, 0);
+ set_bridge_state(s);
+ i365_set(s, I365_CSCINT, 0);
+ ret = ((test_irq(s, s->cap.pci_irq, 1) == 0) &&
+ (test_irq(s, s->cap.pci_irq, 1) == 0));
+ if (!ret)
+ printk(KERN_INFO " PCI irq %d test failed\n",
+ s->cap.pci_irq);
+ return ret;
+}
+#endif /* CONFIG_PCI */
+
+/*====================================================================*/
+
+#ifdef CONFIG_ISA
+
+static int __init isa_identify(u_short port, u_short sock)
+{
+ socket_info_t *s = socket+sockets;
+ u_char val;
+ int type = -1;
+
+ /* Use the next free entry in the socket table */
+ s->ioaddr = port;
+ s->psock = sock;
+
+ /* Wake up a sleepy Cirrus controller */
+ if (wakeup) {
+ i365_bclr(s, PD67_MISC_CTL_2, PD67_MC2_SUSPEND);
+ /* Pause at least 50 ms */
+ mdelay(50);
+ }
+
+ if ((val = i365_get(s, I365_IDENT)) & 0x70)
+ return -1;
+ switch (val) {
+ case 0x82:
+ type = IS_I82365A; break;
+ case 0x83:
+ type = IS_I82365B; break;
+ case 0x84:
+ type = IS_I82365DF; break;
+ case 0x88: case 0x89: case 0x8a:
+ type = IS_IBM; break;
+ }
+
+ /* Check for Vadem VG-468 chips */
+ outb(0x0e, port);
+ outb(0x37, port);
+ i365_bset(s, VG468_MISC, VG468_MISC_VADEMREV);
+ val = i365_get(s, I365_IDENT);
+ if (val & I365_IDENT_VADEM) {
+ i365_bclr(s, VG468_MISC, VG468_MISC_VADEMREV);
+ type = ((val & 7) >= 4) ? IS_VG469 : IS_VG468;
+ }
+
+ /* Check for Ricoh chips */
+ val = i365_get(s, RF5C_CHIP_ID);
+ if ((val == RF5C_CHIP_RF5C296) || (val == RF5C_CHIP_RF5C396))
+ type = IS_RF5Cx96;
+
+ /* Check for Cirrus CL-PD67xx chips */
+ i365_set(s, PD67_CHIP_INFO, 0);
+ val = i365_get(s, PD67_CHIP_INFO);
+ if ((val & PD67_INFO_CHIP_ID) == PD67_INFO_CHIP_ID) {
+ val = i365_get(s, PD67_CHIP_INFO);
+ if ((val & PD67_INFO_CHIP_ID) == 0) {
+ type = (val & PD67_INFO_SLOTS) ? IS_PD672X : IS_PD6710;
+ i365_set(s, PD67_EXT_INDEX, 0xe5);
+ if (i365_get(s, PD67_EXT_INDEX) != 0xe5)
+ type = IS_VT83C469;
+ }
+ }
+ return type;
+} /* isa_identify */
+
+#endif
+
+/*======================================================================
+
+ See if a card is present, powered up, in IO mode, and already
+ bound to a (non PC Card) Linux driver. We leave these alone.
+
+ We make an exception for cards that seem to be serial devices.
+
+======================================================================*/
+
+static int __init is_alive(socket_info_t *s)
+{
+ u_char stat;
+ u_short start, stop;
+
+ stat = i365_get(s, I365_STATUS);
+ start = i365_get_pair(s, I365_IO(0)+I365_W_START);
+ stop = i365_get_pair(s, I365_IO(0)+I365_W_STOP);
+ if ((stop - start < 0x40) && (stop - start >= 0x07) &&
+ ((start & 0xfeef) != 0x02e8) && (start >= 0x100) &&
+ (stat & I365_CS_DETECT) && (stat & I365_CS_POWERON) &&
+ (i365_get(s, I365_INTCTL) & I365_PC_IOCARD) &&
+ (i365_get(s, I365_ADDRWIN) & I365_ENA_IO(0)) &&
+ (check_region(start, stop-start+1) != 0))
+ return 1;
+ else
+ return 0;
+}
+
+/*====================================================================*/
+
+static void __init add_socket(u_int port, int psock, int type)
+{
+ socket_info_t *s = socket+sockets;
+ s->ioaddr = port;
+ s->psock = psock;
+ s->type = type;
+ s->flags = pcic[type].flags;
+ if (is_alive(s))
+ s->flags |= IS_ALIVE;
+ sockets++;
+}
+
+static void __init add_pcic(int ns, int type)
+{
+ u_int mask = 0, i;
+ int use_pci = 0, isa_irq = 0;
+ socket_info_t *s = &socket[sockets-ns];
+
+ if (s->ioaddr > 0) request_region(s->ioaddr, 2, "i82365");
+
+ printk(KERN_INFO " %s", pcic[type].name);
+#ifdef CONFIG_PCI
+ if (s->flags & IS_UNKNOWN)
+ printk(" [%04x %04x]", s->vendor, s->device);
+ printk(" rev %02x", s->revision);
+ if (s->flags & IS_CARDBUS)
+ printk(" PCI-to-CardBus at slot %02x:%02x, mem %#08x\n",
+ s->bus, PCI_SLOT(s->devfn), s->cb_phys);
+ else if (s->flags & IS_PCI)
+ printk(" PCI-to-PCMCIA at slot %02x:%02x, port %#x\n",
+ s->bus, PCI_SLOT(s->devfn), s->ioaddr);
+ else
+#endif
+ printk(" ISA-to-PCMCIA at port %#x ofs 0x%02x\n",
+ s->ioaddr, s->psock*0x40);
+
+#ifdef CONFIG_ISA
+ if (irq_list[0] == -1)
+ mask = irq_mask;
+ else
+ for (i = mask = 0; i < 16; i++)
+ mask |= (1<<irq_list[i]);
+#endif
+ /* Set host options, build basic interrupt mask */
+ mask &= I365_ISA_IRQ_MASK & set_bridge_opts(s, ns);
+
+#ifdef CONFIG_PCI
+ /* Can we use PCI interrupts for card status changes? */
+ if (pci_csc || pci_int) {
+ for (i = 0; i < ns; i++)
+ if (!s[i].cap.pci_irq || !pci_scan(&s[i])) break;
+ use_pci = (i == ns);
+ }
+#endif
+#ifdef CONFIG_ISA
+ /* Scan, report ISA card interrupts */
+ if (mask)
+ mask = isa_scan(s, mask);
+#endif
+
+#ifdef CONFIG_PCI
+ if (!mask)
+ printk(KERN_INFO " %s card interrupts,",
+ (use_pci && pci_int) ? "PCI" : "*NO*");
+ if (use_pci && pci_csc)
+ printk(" PCI status changes\n");
+#endif
+
+#ifdef CONFIG_ISA
+ /* Poll if only two sensible interrupts available */
+ if (!(use_pci && pci_csc) && !poll_interval) {
+ u_int tmp = (mask & 0xff20);
+ tmp = tmp & (tmp-1);
+ if ((tmp & (tmp-1)) == 0)
+ poll_interval = HZ;
+ }
+ /* Only try an ISA cs_irq if this is the first controller */
+ if (!(use_pci && pci_csc) && !grab_irq &&
+ (cs_irq || !poll_interval)) {
+ /* Avoid irq 12 unless it is explicitly requested */
+ u_int cs_mask = mask & ((cs_irq) ? (1<<cs_irq) : ~(1<<12));
+ for (isa_irq = 15; isa_irq > 0; isa_irq--)
+ if (cs_mask & (1 << isa_irq)) break;
+ if (isa_irq) {
+ grab_irq = 1;
+ cs_irq = isa_irq;
+ printk(" status change on irq %d\n", isa_irq);
+ }
+ }
+#endif
+
+ if (!(use_pci && pci_csc) && !isa_irq) {
+ if (poll_interval == 0)
+ poll_interval = HZ;
+ printk(" polling interval = %d ms\n", poll_interval*1000/HZ);
+ }
+
+ /* Update socket interrupt information, capabilities */
+ for (i = 0; i < ns; i++) {
+ s[i].cap.features |= SS_CAP_PCCARD;
+ s[i].cap.map_size = 0x1000;
+ s[i].cap.irq_mask = mask;
+ if (!use_pci)
+ s[i].cap.pci_irq = 0;
+ s[i].cs_irq = isa_irq;
+#ifdef CONFIG_PCI
+ if (s[i].flags & IS_CARDBUS) {
+ s[i].cap.features |= SS_CAP_CARDBUS;
+ cb_set_irq_mode(s+i, pci_csc && s[i].cap.pci_irq, 0);
+ }
+#endif
+ }
+
+} /* add_pcic */
+
+/*====================================================================*/
+
+#ifdef CONFIG_PCI
+
+static int __init pci_lookup(u_int class, struct pci_dev **id,
+ u_char *bus, u_char *devfn)
+{
+ if ((*id = pci_find_class(class<<8, *id)) != NULL) {
+ *bus = (*id)->bus->number;
+ *devfn = (*id)->devfn;
+ return 0;
+ } else return -1;
+}
+
+static void __init add_pci_bridge(int type, u_short v, u_short d)
+{
+ socket_info_t *s = &socket[sockets];
+ u_int addr, ns;
+
+ pci_enable_device(pci_find_slot(s->bus, s->devfn));
+ pci_writew(s, PCI_COMMAND, CMD_DFLT);
+
+ if (type == PCIC_COUNT) type = IS_UNK_PCI;
+ pci_readl(s, PCI_BASE_ADDRESS_0, &addr);
+ addr &= ~0x1;
+ for (ns = 0; ns < ((type == IS_I82092AA) ? 4 : 2); ns++) {
+ s[ns].bus = s->bus; s[ns].devfn = s->devfn;
+ s[ns].vendor = v; s[ns].device = d;
+ add_socket(addr, ns, type);
+ }
+ add_pcic(ns, type);
+}
+
+static int check_cb_mapping(socket_info_t *s)
+{
+ u_int state = cb_readl(s, CB_SOCKET_STATE) >> 16;
+ /* A few sanity checks to validate the bridge mapping */
+ if ((cb_readb(s, 0x800+I365_IDENT) & 0x70) ||
+ (cb_readb(s, 0x800+I365_CSC) && cb_readb(s, 0x800+I365_CSC) &&
+ cb_readb(s, 0x800+I365_CSC)) || cb_readl(s, CB_SOCKET_FORCE) ||
+ ((state & ~0x3000) || !(state & 0x3000)))
+ return 1;
+ return 0;
+}
+
+static void __init add_cb_bridge(int type, u_short v, u_short d0)
+{
+ socket_info_t *s = &socket[sockets];
+ u_char bus = s->bus, devfn = s->devfn;
+ u_short d, ns;
+ u_char a, r, max;
+
+ /* PCI bus enumeration is broken on some systems */
+ for (ns = 0; ns < sockets; ns++)
+ if ((socket[ns].bus == bus) &&
+ (socket[ns].devfn == devfn))
+ return;
+
+ if (type == PCIC_COUNT) type = IS_UNK_CARDBUS;
+ pci_readb(s, PCI_HEADER_TYPE, &a);
+ pci_readb(s, PCI_CLASS_REVISION, &r);
+ max = (a & 0x80) ? 8 : 1;
+ for (ns = 0; ns < max; ns++, s++, devfn++) {
+ s->bus = bus; s->devfn = devfn;
+ if (pci_readw(s, PCI_DEVICE_ID, &d) || (d != d0))
+ break;
+ s->vendor = v; s->device = d; s->revision = r;
+
+ pci_enable_device(pci_find_slot(bus, devfn));
+ pci_set_power_state(pci_find_slot(bus, devfn), 0);
+ pci_writew(s, PCI_COMMAND, CMD_DFLT);
+
+ /* Set up CardBus register mapping */
+ pci_writel(s, CB_LEGACY_MODE_BASE, 0);
+ pci_readl(s, PCI_BASE_ADDRESS_0, &s->cb_phys);
+ if (s->cb_phys == 0) {
+ printk("\n" KERN_NOTICE " Bridge register mapping failed:"
+ " check cb_mem_base setting\n");
+ break;
+ }
+ s->cb_virt = ioremap(s->cb_phys, 0x1000);
+ if (check_cb_mapping(s) != 0) {
+ printk("\n" KERN_NOTICE " Bad bridge mapping at "
+ "0x%08x!\n", s->cb_phys);
+ break;
+ }
+
+ request_mem_region(s->cb_phys, 0x1000, "i82365");
+ add_socket(0, 0, type);
+ }
+ if (ns == 0) return;
+
+ add_pcic(ns, type);
+
+ /* Look up PCI bus bridge structures if needed */
+ s -= ns;
+ for (a = 0; a < ns; a++) {
+ struct pci_dev *self = pci_find_slot(bus, s[a].devfn);
+#if (LINUX_VERSION_CODE >= VERSION(2,3,40))
+ s[a].cap.cb_bus = self->subordinate;
+#else
+ struct pci_bus *child;
+ for (child = self->bus->children; child; child = child->next)
+ if (child->number == s[a].cap.cardbus) break;
+ s[a].cap.cb_bus = child;
+#endif
+ }
+}
+
+static void __init pci_probe(u_int class)
+{
+ socket_info_t *s = &socket[sockets];
+ u_short i, v, d;
+ struct pci_dev *id;
+
+ id = 0;
+ while (pci_lookup(class, &id, &s->bus, &s->devfn) == 0) {
+ if (PCI_FUNC(s->devfn) != 0) continue;
+ pci_readw(s, PCI_VENDOR_ID, &v);
+ pci_readw(s, PCI_DEVICE_ID, &d);
+ for (i = 0; i < PCIC_COUNT; i++)
+ if ((pcic[i].vendor == v) && (pcic[i].device == d)) break;
+ /* The "ToPIC95-A" is unusable as a CardBus bridge */
+ if (i == IS_TOPIC95_A)
+ continue;
+ if (((i < PCIC_COUNT) && (pcic[i].flags & IS_CARDBUS)) ||
+ (class == PCI_CLASS_BRIDGE_CARDBUS))
+ add_cb_bridge(i, v, d);
+ else
+ add_pci_bridge(i, v, d);
+ s = &socket[sockets];
+ }
+}
+
+#endif
+
+/*====================================================================*/
+
+#ifdef CONFIG_ISA
+
+static void __init isa_probe(ioaddr_t base)
+{
+ int i, j, sock, k, ns, id;
+ ioaddr_t port;
+
+ if (check_region(base, 2) != 0) {
+ if (sockets == 0)
+ printk("port conflict at %#x\n", base);
+ return;
+ }
+
+ id = isa_identify(base, 0);
+ if ((id == IS_I82365DF) && (isa_identify(base, 1) != id)) {
+ for (i = 0; i < 4; i++) {
+ if (i == ignore) continue;
+ port = base + ((i & 1) << 2) + ((i & 2) << 1);
+ sock = (i & 1) << 1;
+ if (isa_identify(port, sock) == IS_I82365DF) {
+ add_socket(port, sock, IS_VLSI);
+ add_pcic(1, IS_VLSI);
+ }
+ }
+ } else {
+ for (i = 0; i < 4; i += 2) {
+ port = base + 2*(i>>2);
+ sock = (i & 3);
+ id = isa_identify(port, sock);
+ if (id < 0) continue;
+
+ for (j = ns = 0; j < 2; j++) {
+ /* Does the socket exist? */
+ if ((ignore == i+j) || (isa_identify(port, sock+j) < 0))
+ continue;
+ /* Check for bad socket decode */
+ for (k = 0; k <= sockets; k++)
+ i365_set(socket+k, I365_MEM(0)+I365_W_OFF, k);
+ for (k = 0; k <= sockets; k++)
+ if (i365_get(socket+k, I365_MEM(0)+I365_W_OFF) != k)
+ break;
+ if (k <= sockets) break;
+ add_socket(port, sock+j, id); ns++;
+ }
+ if (ns != 0) add_pcic(ns, id);
+ }
+ }
+}
+
+#endif
+
+/*======================================================================
+
+ The card status event handler. This may either be interrupt
+ driven or polled. It monitors mainly for card insert and eject
+ events; there are various other kinds of events that can be
+ monitored (ready/busy, status change, etc), but they are almost
+ never used.
+
+======================================================================*/
+
+static void pcic_interrupt(int irq, void *dev, struct pt_regs *regs)
+{
+ int i, j, csc;
+ u_int events, active;
+#ifdef CONFIG_ISA
+ u_long flags = 0;
+#endif
+
+ DEBUG(2, "i82365: pcic_interrupt(%d)\n", irq);
+
+ for (j = 0; j < 20; j++) {
+ active = 0;
+ for (i = 0; i < sockets; i++) {
+ socket_info_t *s = &socket[i];
+ if ((s->cs_irq != irq) && (s->cap.pci_irq != irq))
+ continue;
+ ISA_LOCK(s, flags);
+ csc = i365_get(s, I365_CSC);
+#ifdef CONFIG_PCI
+ if ((s->flags & IS_CARDBUS) &&
+ (cb_readl(s, CB_SOCKET_EVENT) & CB_SE_CCD)) {
+ cb_writel(s, CB_SOCKET_EVENT, CB_SE_CCD);
+ csc |= I365_CSC_DETECT;
+ }
+#endif
+ if ((csc == 0) || (!s->handler) ||
+ (i365_get(s, I365_IDENT) & 0x70)) {
+ ISA_UNLOCK(s, flags);
+ continue;
+ }
+ events = (csc & I365_CSC_DETECT) ? SS_DETECT : 0;
+ if (i365_get(s, I365_INTCTL) & I365_PC_IOCARD) {
+ events |= (csc & I365_CSC_STSCHG) ? SS_STSCHG : 0;
+ } else {
+ events |= (csc & I365_CSC_BVD1) ? SS_BATDEAD : 0;
+ events |= (csc & I365_CSC_BVD2) ? SS_BATWARN : 0;
+ events |= (csc & I365_CSC_READY) ? SS_READY : 0;
+ }
+ ISA_UNLOCK(s, flags);
+ DEBUG(1, "i82365: socket %d event 0x%04x\n", i, events);
+ if (events)
+ s->handler(s->info, events);
+ active |= events;
+ }
+ if (!active) break;
+ }
+ if (j == 20)
+ printk(KERN_NOTICE "i82365: infinite loop in interrupt "
+ "handler: active = 0x%04x\n", active);
+
+ DEBUG(2, "i82365: interrupt done\n");
+} /* pcic_interrupt */
+
+static void pcic_interrupt_wrapper(u_long data)
+{
+ pcic_interrupt(0, NULL, NULL);
+ poll_timer.expires = jiffies + poll_interval;
+ add_timer(&poll_timer);
+}
+
+/*====================================================================*/
+
+static int pcic_register_callback(socket_info_t *s, ss_callback_t *call)
+{
+ if (call == NULL) {
+ s->handler = NULL;
+ MOD_DEC_USE_COUNT;
+ } else {
+ MOD_INC_USE_COUNT;
+ s->handler = call->handler;
+ s->info = call->info;
+ }
+ return 0;
+} /* pcic_register_callback */
+
+/*====================================================================*/
+
+static int pcic_inquire_socket(socket_info_t *s, socket_cap_t *cap)
+{
+ *cap = s->cap;
+ return 0;
+}
+
+/*====================================================================*/
+
+static int i365_get_status(socket_info_t *s, u_int *value)
+{
+ u_int status;
+
+ status = i365_get(s, I365_STATUS);
+ *value = ((status & I365_CS_DETECT) == I365_CS_DETECT)
+ ? SS_DETECT : 0;
+ if (i365_get(s, I365_INTCTL) & I365_PC_IOCARD) {
+ *value |= (status & I365_CS_STSCHG) ? 0 : SS_STSCHG;
+ } else {
+ *value |= (status & I365_CS_BVD1) ? 0 : SS_BATDEAD;
+ *value |= (status & I365_CS_BVD2) ? 0 : SS_BATWARN;
+ }
+ *value |= (status & I365_CS_WRPROT) ? SS_WRPROT : 0;
+ *value |= (status & I365_CS_READY) ? SS_READY : 0;
+ *value |= (status & I365_CS_POWERON) ? SS_POWERON : 0;
+
+#ifdef CONFIG_PCI
+ if (s->flags & IS_CARDBUS) {
+ status = cb_readl(s, CB_SOCKET_STATE);
+ *value |= (status & CB_SS_32BIT) ? SS_CARDBUS : 0;
+ *value |= (status & CB_SS_3VCARD) ? SS_3VCARD : 0;
+ *value |= (status & CB_SS_XVCARD) ? SS_XVCARD : 0;
+ *value |= (status & CB_SS_VSENSE) ? 0 : SS_PENDING;
+ } else if (s->flags & IS_O2MICRO) {
+ status = i365_get(s, O2_MODE_B);
+ *value |= (status & O2_MODE_B_VS1) ? 0 : SS_3VCARD;
+ *value |= (status & O2_MODE_B_VS2) ? 0 : SS_XVCARD;
+ }
+#endif
+ if ((s->flags & IS_CIRRUS) &&
+ ((s->flags & IS_PCI) || has_vsense)) {
+ socket_info_t *t = (s->psock) ? s : s+1;
+ status = pd67_ext_get(t, PD67_EXTERN_DATA);
+ *value |= (status & PD67_EXD_VS1(s->psock)) ? 0 : SS_3VCARD;
+ *value |= (status & PD67_EXD_VS2(s->psock)) ? 0 : SS_XVCARD;
+ }
+#ifdef CONFIG_ISA
+ if (s->type == IS_VG469) {
+ status = i365_get(s, VG469_VSENSE);
+ if (s->psock & 1) {
+ *value |= (status & VG469_VSENSE_B_VS1) ? 0 : SS_3VCARD;
+ *value |= (status & VG469_VSENSE_B_VS2) ? 0 : SS_XVCARD;
+ } else {
+ *value |= (status & VG469_VSENSE_A_VS1) ? 0 : SS_3VCARD;
+ *value |= (status & VG469_VSENSE_A_VS2) ? 0 : SS_XVCARD;
+ }
+ }
+#endif
+ /* For now, ignore cards with unsupported voltage keys */
+ if (*value & SS_XVCARD)
+ *value &= ~(SS_DETECT|SS_3VCARD|SS_XVCARD);
+ DEBUG(1, "i82365: GetStatus(%d) = %#4.4x\n", s-socket, *value);
+ return 0;
+} /* i365_get_status */
+
+/*====================================================================*/
+
+static int i365_get_socket(socket_info_t *s, socket_state_t *state)
+{
+ u_char reg, vcc, vpp;
+
+ reg = i365_get(s, I365_POWER);
+ state->flags = (reg & I365_PWR_AUTO) ? SS_PWR_AUTO : 0;
+ state->flags |= (reg & I365_PWR_OUT) ? SS_OUTPUT_ENA : 0;
+ vcc = reg & I365_VCC_MASK; vpp = reg & I365_VPP1_MASK;
+ state->Vcc = state->Vpp = 0;
+#ifdef CONFIG_PCI
+ if ((s->flags & IS_CARDBUS) && !(s->flags & IS_TOPIC)) {
+ cb_get_power(s, state);
+ } else
+#endif
+ {
+ if ((s->flags & IS_CIRRUS) && (reg & I365_VCC_5V)) {
+ state->Vcc = (i365_get(s, PD67_MISC_CTL_1) &
+ PD67_MC1_VCC_3V) ? 33 : 50;
+ } else if ((s->flags & IS_VG_PWR) && (reg & I365_VCC_5V)) {
+ state->Vcc = (i365_get(s, VG469_VSELECT) &
+ VG469_VSEL_VCC) ? 33 : 50;
+ } else if ((s->flags & IS_DF_PWR) || (s->flags & IS_TOPIC)) {
+ if (vcc == I365_VCC_3V) state->Vcc = 33;
+ if (vcc == I365_VCC_5V) state->Vcc = 50;
+ } else {
+ if (reg & I365_VCC_5V) state->Vcc = 50;
+ }
+ if (vpp == I365_VPP1_5V)
+ state->Vpp = (s->flags & IS_DF_PWR) ? 50 : state->Vcc;
+ if (vpp == I365_VPP1_12V) state->Vpp = 120;
+ }
+
+ /* IO card, RESET flags, IO interrupt */
+ reg = i365_get(s, I365_INTCTL);
+ state->flags |= (reg & I365_PC_RESET) ? 0 : SS_RESET;
+ state->flags |= (reg & I365_PC_IOCARD) ? SS_IOCARD : 0;
+#ifdef CONFIG_PCI
+ if (cb_get_irq_mode(s) != 0)
+ state->io_irq = s->cap.pci_irq;
+ else
+#endif
+ state->io_irq = reg & I365_IRQ_MASK;
+
+ /* Card status change mask */
+ reg = i365_get(s, I365_CSCINT);
+ state->csc_mask = (reg & I365_CSC_DETECT) ? SS_DETECT : 0;
+ if (state->flags & SS_IOCARD) {
+ state->csc_mask |= (reg & I365_CSC_STSCHG) ? SS_STSCHG : 0;
+ } else {
+ state->csc_mask |= (reg & I365_CSC_BVD1) ? SS_BATDEAD : 0;
+ state->csc_mask |= (reg & I365_CSC_BVD2) ? SS_BATWARN : 0;
+ state->csc_mask |= (reg & I365_CSC_READY) ? SS_READY : 0;
+ }
+
+ DEBUG(2, "i82365: GetSocket(%d) = flags %#3.3x, Vcc %d, Vpp %d, "
+ "io_irq %d, csc_mask %#2.2x\n", s-socket, state->flags,
+ state->Vcc, state->Vpp, state->io_irq, state->csc_mask);
+ return 0;
+} /* i365_get_socket */
+
+/*====================================================================*/
+
+static int i365_set_socket(socket_info_t *s, socket_state_t *state)
+{
+ u_char reg;
+
+ DEBUG(2, "i82365: SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, "
+ "io_irq %d, csc_mask %#2.2x)\n", s-socket, state->flags,
+ state->Vcc, state->Vpp, state->io_irq, state->csc_mask);
+
+ /* First set global controller options */
+#ifdef CONFIG_PCI
+ if (s->cap.pci_irq)
+ cb_set_irq_mode(s, pci_csc, (s->cap.pci_irq == state->io_irq));
+ s->bcr &= ~CB_BCR_CB_RESET;
+#endif
+ set_bridge_state(s);
+
+ /* IO card, RESET flag, IO interrupt */
+ reg = s->intr | ((state->io_irq == s->cap.pci_irq) ?
+ s->pci_irq_code : state->io_irq);
+ reg |= (state->flags & SS_RESET) ? 0 : I365_PC_RESET;
+ reg |= (state->flags & SS_IOCARD) ? I365_PC_IOCARD : 0;
+ i365_set(s, I365_INTCTL, reg);
+
+ reg = I365_PWR_NORESET;
+ if (state->flags & SS_PWR_AUTO) reg |= I365_PWR_AUTO;
+ if (state->flags & SS_OUTPUT_ENA) reg |= I365_PWR_OUT;
+
+#ifdef CONFIG_PCI
+ if ((s->flags & IS_CARDBUS) && !(s->flags & IS_TOPIC)) {
+ cb_set_power(s, state);
+ reg |= i365_get(s, I365_POWER) & (I365_VCC_MASK|I365_VPP1_MASK);
+ } else
+#endif
+ {
+ int new = s->flags & (IS_TOPIC|IS_CIRRUS|IS_VG_PWR|IS_DF_PWR);
+ int vcc3 = (state->Vcc == 33), df = (s->flags & IS_DF_PWR);
+
+ if (state->Vcc == 50) {
+ reg |= I365_VCC_5V;
+ } else if (new && vcc3) {
+ reg |= ((s->flags & (IS_TOPIC|IS_DF_PWR)) ?
+ I365_VCC_3V : I365_VCC_5V);
+ } else if (state->Vcc)
+ return -EINVAL;
+ if (s->flags & IS_CIRRUS)
+ i365_bflip(s, PD67_MISC_CTL_1, PD67_MC1_VCC_3V, vcc3);
+ if (s->flags & IS_VG_PWR)
+ i365_bflip(s, VG469_VSELECT, VG469_VSEL_VCC, vcc3);
+
+ if (state->Vpp == 120) {
+ reg |= I365_VPP1_12V | (new ? 0 : I365_VPP2_12V);
+ } else if (state->Vpp == (df ? 50 : state->Vcc)) {
+ reg |= I365_VPP1_5V | (new ? 0 : I365_VPP2_5V);
+ } else if (state->Vpp)
+ return -EINVAL;
+ }
+
+ if (reg != i365_get(s, I365_POWER))
+ i365_set(s, I365_POWER, reg);
+
+ /* Card status change interrupt mask */
+ reg = (s->cap.pci_irq ? s->pci_irq_code : s->cs_irq) << 4;
+ if (state->csc_mask & SS_DETECT) reg |= I365_CSC_DETECT;
+ if (state->flags & SS_IOCARD) {
+ if (state->csc_mask & SS_STSCHG) reg |= I365_CSC_STSCHG;
+ } else {
+ if (state->csc_mask & SS_BATDEAD) reg |= I365_CSC_BVD1;
+ if (state->csc_mask & SS_BATWARN) reg |= I365_CSC_BVD2;
+ if (state->csc_mask & SS_READY) reg |= I365_CSC_READY;
+ }
+ i365_set(s, I365_CSCINT, reg);
+ i365_get(s, I365_CSC);
+#ifdef CONFIG_PCI
+ if (s->flags & IS_CARDBUS) {
+ if (s->cs_irq || (pci_csc && s->cap.pci_irq))
+ cb_writel(s, CB_SOCKET_MASK, CB_SM_CCD);
+ cb_writel(s, CB_SOCKET_EVENT, -1);
+ }
+#endif
+
+ return 0;
+} /* i365_set_socket */
+
+/*====================================================================*/
+
+static int i365_get_io_map(socket_info_t *s, struct pccard_io_map *io)
+{
+ u_char map, ioctl, addr;
+
+ map = io->map;
+ if (map > 1) return -EINVAL;
+ io->start = i365_get_pair(s, I365_IO(map)+I365_W_START);
+ io->stop = i365_get_pair(s, I365_IO(map)+I365_W_STOP);
+ ioctl = i365_get(s, I365_IOCTL);
+ addr = i365_get(s, I365_ADDRWIN);
+ io->speed = (ioctl & I365_IOCTL_WAIT(map)) ? cycle_time : 0;
+ io->flags = (addr & I365_ENA_IO(map)) ? MAP_ACTIVE : 0;
+ io->flags |= (ioctl & I365_IOCTL_0WS(map)) ? MAP_0WS : 0;
+ io->flags |= (ioctl & I365_IOCTL_16BIT(map)) ? MAP_16BIT : 0;
+ io->flags |= (ioctl & I365_IOCTL_IOCS16(map)) ? MAP_AUTOSZ : 0;
+ DEBUG(3, "i82365: GetIOMap(%d, %d) = %#2.2x, %d ns, %#4.4x-%#4.4x\n",
+ s-socket, map, io->flags, io->speed, io->start, io->stop);
+ return 0;
+} /* i365_get_io_map */
+
+/*====================================================================*/
+
+static int i365_set_io_map(socket_info_t *s, struct pccard_io_map *io)
+{
+ u_char map, ioctl;
+
+ DEBUG(3, "i82365: SetIOMap(%d, %d, %#2.2x, %d ns, %#4.4x-%#4.4x)\n",
+ s-socket, io->map, io->flags, io->speed, io->start, io->stop);
+ map = io->map;
+ if ((map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) ||
+ (io->stop < io->start)) return -EINVAL;
+ /* Turn off the window before changing anything */
+ if (i365_get(s, I365_ADDRWIN) & I365_ENA_IO(map))
+ i365_bclr(s, I365_ADDRWIN, I365_ENA_IO(map));
+ i365_set_pair(s, I365_IO(map)+I365_W_START, io->start);
+ i365_set_pair(s, I365_IO(map)+I365_W_STOP, io->stop);
+ ioctl = i365_get(s, I365_IOCTL) & ~I365_IOCTL_MASK(map);
+ if (io->speed) ioctl |= I365_IOCTL_WAIT(map);
+ if (io->flags & MAP_0WS) ioctl |= I365_IOCTL_0WS(map);
+ if (io->flags & MAP_16BIT) ioctl |= I365_IOCTL_16BIT(map);
+ if (io->flags & MAP_AUTOSZ) ioctl |= I365_IOCTL_IOCS16(map);
+ i365_set(s, I365_IOCTL, ioctl);
+ /* Turn on the window if necessary */
+ if (io->flags & MAP_ACTIVE)
+ i365_bset(s, I365_ADDRWIN, I365_ENA_IO(map));
+ return 0;
+} /* i365_set_io_map */
+
+/*====================================================================*/
+
+static int i365_get_mem_map(socket_info_t *s, struct pccard_mem_map *mem)
+{
+ u_short base, i;
+ u_char map, addr;
+
+ map = mem->map;
+ if (map > 4) return -EINVAL;
+ addr = i365_get(s, I365_ADDRWIN);
+ mem->flags = (addr & I365_ENA_MEM(map)) ? MAP_ACTIVE : 0;
+ base = I365_MEM(map);
+
+ i = i365_get_pair(s, base+I365_W_START);
+ mem->flags |= (i & I365_MEM_16BIT) ? MAP_16BIT : 0;
+ mem->flags |= (i & I365_MEM_0WS) ? MAP_0WS : 0;
+ mem->sys_start = ((u_long)(i & 0x0fff) << 12);
+
+ i = i365_get_pair(s, base+I365_W_STOP);
+ mem->speed = (i & I365_MEM_WS0) ? 1 : 0;
+ mem->speed += (i & I365_MEM_WS1) ? 2 : 0;
+ mem->speed *= cycle_time;
+ mem->sys_stop = ((u_long)(i & 0x0fff) << 12) + 0x0fff;
+
+ i = i365_get_pair(s, base+I365_W_OFF);
+ mem->flags |= (i & I365_MEM_WRPROT) ? MAP_WRPROT : 0;
+ mem->flags |= (i & I365_MEM_REG) ? MAP_ATTRIB : 0;
+ mem->card_start = ((u_int)(i & 0x3fff) << 12) + mem->sys_start;
+ mem->card_start &= 0x3ffffff;
+
+#ifdef CONFIG_PCI
+ /* Take care of high byte, for PCI controllers */
+ if (s->type == IS_PD6729) {
+ addr = pd67_ext_get(s, PD67_MEM_PAGE(map)) << 24;
+ mem->sys_stop += addr; mem->sys_start += addr;
+ } else if (s->flags & IS_CARDBUS) {
+ addr = i365_get(s, CB_MEM_PAGE(map)) << 24;
+ mem->sys_stop += addr; mem->sys_start += addr;
+ }
+#endif
+
+ DEBUG(3, "i82365: GetMemMap(%d, %d) = %#2.2x, %d ns, %#5.5lx-%#5."
+ "5lx, %#5.5x\n", s-socket, mem->map, mem->flags, mem->speed,
+ mem->sys_start, mem->sys_stop, mem->card_start);
+ return 0;
+} /* i365_get_mem_map */
+
+/*====================================================================*/
+
+static int i365_set_mem_map(socket_info_t *s, struct pccard_mem_map *mem)
+{
+ u_short base, i;
+ u_char map;
+
+ DEBUG(3, "i82365: SetMemMap(%d, %d, %#2.2x, %d ns, %#5.5lx-%#5.5"
+ "lx, %#5.5x)\n", s-socket, mem->map, mem->flags, mem->speed,
+ mem->sys_start, mem->sys_stop, mem->card_start);
+
+ map = mem->map;
+ if ((map > 4) || (mem->card_start > 0x3ffffff) ||
+ (mem->sys_start > mem->sys_stop) || (mem->speed > 1000))
+ return -EINVAL;
+ if (!(s->flags & (IS_PCI|IS_CARDBUS)) &&
+ ((mem->sys_start > 0xffffff) || (mem->sys_stop > 0xffffff)))
+ return -EINVAL;
+
+ /* Turn off the window before changing anything */
+ if (i365_get(s, I365_ADDRWIN) & I365_ENA_MEM(map))
+ i365_bclr(s, I365_ADDRWIN, I365_ENA_MEM(map));
+
+#ifdef CONFIG_PCI
+ /* Take care of high byte, for PCI controllers */
+ if (s->type == IS_PD6729) {
+ pd67_ext_set(s, PD67_MEM_PAGE(map), (mem->sys_start >> 24));
+ } else if (s->flags & IS_CARDBUS)
+ i365_set(s, CB_MEM_PAGE(map), mem->sys_start >> 24);
+#endif
+
+ base = I365_MEM(map);
+ i = (mem->sys_start >> 12) & 0x0fff;
+ if (mem->flags & MAP_16BIT) i |= I365_MEM_16BIT;
+ if (mem->flags & MAP_0WS) i |= I365_MEM_0WS;
+ i365_set_pair(s, base+I365_W_START, i);
+
+ i = (mem->sys_stop >> 12) & 0x0fff;
+ switch (mem->speed / cycle_time) {
+ case 0: break;
+ case 1: i |= I365_MEM_WS0; break;
+ case 2: i |= I365_MEM_WS1; break;
+ default: i |= I365_MEM_WS1 | I365_MEM_WS0; break;
+ }
+ i365_set_pair(s, base+I365_W_STOP, i);
+
+ i = ((mem->card_start - mem->sys_start) >> 12) & 0x3fff;
+ if (mem->flags & MAP_WRPROT) i |= I365_MEM_WRPROT;
+ if (mem->flags & MAP_ATTRIB) i |= I365_MEM_REG;
+ i365_set_pair(s, base+I365_W_OFF, i);
+
+ /* Turn on the window if necessary */
+ if (mem->flags & MAP_ACTIVE)
+ i365_bset(s, I365_ADDRWIN, I365_ENA_MEM(map));
+ return 0;
+} /* i365_set_mem_map */
+
+/*======================================================================
+
+ The few things that are strictly for Cardbus cards goes here.
+
+======================================================================*/
+
+#ifdef CONFIG_CARDBUS
+
+static int cb_get_status(socket_info_t *s, u_int *value)
+{
+ u_int state = cb_readl(s, CB_SOCKET_STATE);
+ *value = (state & CB_SS_32BIT) ? SS_CARDBUS : 0;
+ *value |= (state & CB_SS_CCD) ? 0 : SS_DETECT;
+ *value |= (state & CB_SS_CSTSCHG) ? SS_STSCHG : 0;
+ *value |= (state & CB_SS_PWRCYCLE) ? (SS_POWERON|SS_READY) : 0;
+ *value |= (state & CB_SS_3VCARD) ? SS_3VCARD : 0;
+ *value |= (state & CB_SS_XVCARD) ? SS_XVCARD : 0;
+ *value |= (state & CB_SS_VSENSE) ? 0 : SS_PENDING;
+ DEBUG(1, "yenta: GetStatus(%d) = %#4.4x\n", s-socket, *value);
+ return 0;
+} /* cb_get_status */
+
+static int cb_get_socket(socket_info_t *s, socket_state_t *state)
+{
+ u_short bcr;
+
+ cb_get_power(s, state);
+ pci_readw(s, CB_BRIDGE_CONTROL, &bcr);
+ state->flags |= (bcr & CB_BCR_CB_RESET) ? SS_RESET : 0;
+ if (cb_get_irq_mode(s) != 0)
+ state->io_irq = s->cap.pci_irq;
+ else
+ state->io_irq = i365_get(s, I365_INTCTL) & I365_IRQ_MASK;
+ DEBUG(2, "yenta: GetSocket(%d) = flags %#3.3x, Vcc %d, Vpp %d"
+ ", io_irq %d, csc_mask %#2.2x\n", s-socket, state->flags,
+ state->Vcc, state->Vpp, state->io_irq, state->csc_mask);
+ return 0;
+} /* cb_get_socket */
+
+static int cb_set_socket(socket_info_t *s, socket_state_t *state)
+{
+ u_int reg;
+
+ DEBUG(2, "yenta: SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, "
+ "io_irq %d, csc_mask %#2.2x)\n", s-socket, state->flags,
+ state->Vcc, state->Vpp, state->io_irq, state->csc_mask);
+
+ /* First set global controller options */
+ if (s->cap.pci_irq)
+ cb_set_irq_mode(s, pci_csc, (s->cap.pci_irq == state->io_irq));
+ s->bcr &= ~CB_BCR_CB_RESET;
+ s->bcr |= (state->flags & SS_RESET) ? CB_BCR_CB_RESET : 0;
+ set_bridge_state(s);
+
+ cb_set_power(s, state);
+
+ /* Handle IO interrupt using ISA routing */
+ reg = s->intr;
+ if (state->io_irq != s->cap.pci_irq) reg |= state->io_irq;
+ i365_set(s, I365_INTCTL, reg);
+
+ /* Handle CSC mask */
+ if (!s->cs_irq && (!pci_csc || !s->cap.pci_irq))
+ return 0;
+ reg = (s->cs_irq << 4);
+ if (state->csc_mask & SS_DETECT) reg |= I365_CSC_DETECT;
+ i365_set(s, I365_CSCINT, reg);
+ i365_get(s, I365_CSC);
+ cb_writel(s, CB_SOCKET_MASK, CB_SM_CCD);
+ cb_writel(s, CB_SOCKET_EVENT, -1);
+
+ return 0;
+} /* cb_set_socket */
+
+static int cb_get_bridge(socket_info_t *s, struct cb_bridge_map *m)
+{
+ u_char map = m->map;
+
+ if (map > 1) return -EINVAL;
+ m->flags &= MAP_IOSPACE;
+ map += (m->flags & MAP_IOSPACE) ? 2 : 0;
+ pci_readl(s, CB_MEM_BASE(map), &m->start);
+ pci_readl(s, CB_MEM_LIMIT(map), &m->stop);
+ if (m->start || m->stop) {
+ m->flags |= MAP_ACTIVE;
+ m->stop |= (map > 1) ? 3 : 0x0fff;
+ }
+ if (map > 1) {
+ u_short bcr;
+ pci_readw(s, CB_BRIDGE_CONTROL, &bcr);
+ m->flags |= (bcr & CB_BCR_PREFETCH(map)) ? MAP_PREFETCH : 0;
+ }
+ DEBUG(3, "yenta: GetBridge(%d, %d) = %#2.2x, %#4.4x-%#4.4x\n",
+ s-socket, map, m->flags, m->start, m->stop);
+ return 0;
+}
+
+static int cb_set_bridge(socket_info_t *s, struct cb_bridge_map *m)
+{
+ u_char map;
+
+ DEBUG(3, "yenta: SetBridge(%d, %d, %#2.2x, %#4.4x-%#4.4x)\n",
+ s-socket, m->map, m->flags, m->start, m->stop);
+ map = m->map;
+ if (!(s->flags & IS_CARDBUS) || (map > 1) || (m->stop < m->start))
+ return -EINVAL;
+ if (m->flags & MAP_IOSPACE) {
+ if ((m->stop > 0xffff) || (m->start & 3) ||
+ ((m->stop & 3) != 3))
+ return -EINVAL;
+ map += 2;
+ } else {
+ if ((m->start & 0x0fff) || ((m->stop & 0x0fff) != 0x0fff))
+ return -EINVAL;
+ s->bcr &= ~CB_BCR_PREFETCH(map);
+ s->bcr |= (m->flags & MAP_PREFETCH) ? CB_BCR_PREFETCH(map) : 0;
+ pci_writew(s, CB_BRIDGE_CONTROL, s->bcr);
+ }
+ if (m->flags & MAP_ACTIVE) {
+ pci_writel(s, CB_MEM_BASE(map), m->start);
+ pci_writel(s, CB_MEM_LIMIT(map), m->stop);
+ } else {
+ pci_writel(s, CB_MEM_LIMIT(map), 0);
+ pci_writel(s, CB_MEM_BASE(map), 0);
+ }
+ return 0;
+}
+
+#endif /* CONFIG_CARDBUS */
+
+/*======================================================================
+
+ Routines for accessing socket information and register dumps via
+ /proc/bus/pccard/...
+
+======================================================================*/
+
+#ifdef HAS_PROC_BUS
+
+static int proc_read_info(char *buf, char **start, off_t pos,
+ int count, int *eof, void *data)
+{
+ socket_info_t *s = data;
+ char *p = buf;
+ p += sprintf(p, "type: %s\npsock: %d\n",
+ pcic[s->type].name, s->psock);
+#ifdef CONFIG_PCI
+ if (s->flags & (IS_PCI|IS_CARDBUS))
+ p += sprintf(p, "bus: %02x\ndevfn: %02x.%1x\n",
+ s->bus, PCI_SLOT(s->devfn), PCI_FUNC(s->devfn));
+ if (s->flags & IS_CARDBUS)
+ p += sprintf(p, "cardbus: %02x\n", s->cap.cardbus);
+#endif
+ return (p - buf);
+}
+
+static int proc_read_exca(char *buf, char **start, off_t pos,
+ int count, int *eof, void *data)
+{
+ socket_info_t *s = data;
+ char *p = buf;
+ int i, top;
+
+#ifdef CONFIG_ISA
+ u_long flags = 0;
+#endif
+ ISA_LOCK(s, flags);
+ top = 0x40;
+ if (s->flags & IS_CARDBUS)
+ top = (s->flags & IS_CIRRUS) ? 0x140 : 0x50;
+ for (i = 0; i < top; i += 4) {
+ if (i == 0x50) {
+ p += sprintf(p, "\n");
+ i = 0x100;
+ }
+ p += sprintf(p, "%02x %02x %02x %02x%s",
+ i365_get(s,i), i365_get(s,i+1),
+ i365_get(s,i+2), i365_get(s,i+3),
+ ((i % 16) == 12) ? "\n" : " ");
+ }
+ ISA_UNLOCK(s, flags);
+ return (p - buf);
+}
+
+#ifdef CONFIG_PCI
+static int proc_read_pci(char *buf, char **start, off_t pos,
+ int count, int *eof, void *data)
+{
+ socket_info_t *s = data;
+ char *p = buf;
+ u_int a, b, c, d;
+ int i;
+
+ for (i = 0; i < 0xc0; i += 0x10) {
+ pci_readl(s, i, &a);
+ pci_readl(s, i+4, &b);
+ pci_readl(s, i+8, &c);
+ pci_readl(s, i+12, &d);
+ p += sprintf(p, "%08x %08x %08x %08x\n", a, b, c, d);
+ }
+ return (p - buf);
+}
+
+static int proc_read_cardbus(char *buf, char **start, off_t pos,
+ int count, int *eof, void *data)
+{
+ socket_info_t *s = data;
+ char *p = buf;
+ int i, top;
+
+ top = (s->flags & IS_O2MICRO) ? 0x30 : 0x20;
+ for (i = 0; i < top; i += 0x10)
+ p += sprintf(p, "%08x %08x %08x %08x\n",
+ cb_readl(s,i+0x00), cb_readl(s,i+0x04),
+ cb_readl(s,i+0x08), cb_readl(s,i+0x0c));
+ return (p - buf);
+}
+#endif
+
+static void pcic_proc_setup(socket_info_t *s, struct proc_dir_entry *base)
+{
+ create_proc_read_entry("info", 0, base, proc_read_info, s);
+ create_proc_read_entry("exca", 0, base, proc_read_exca, s);
+#ifdef CONFIG_PCI
+ if (s->flags & (IS_PCI|IS_CARDBUS))
+ create_proc_read_entry("pci", 0, base, proc_read_pci, s);
+ if (s->flags & IS_CARDBUS)
+ create_proc_read_entry("cardbus", 0, base, proc_read_cardbus, s);
+#endif
+ s->proc = base;
+}
+
+static void pcic_proc_remove(socket_info_t *s)
+{
+ struct proc_dir_entry *base = s->proc;
+ if (base == NULL) return;
+ remove_proc_entry("info", base);
+ remove_proc_entry("exca", base);
+#ifdef CONFIG_PCI
+ if (s->flags & (IS_PCI|IS_CARDBUS))
+ remove_proc_entry("pci", base);
+ if (s->flags & IS_CARDBUS)
+ remove_proc_entry("cardbus", base);
+#endif
+}
+
+#endif /* HAS_PROC_BUS */
+
+/*====================================================================*/
+
+typedef int (*subfn_t)(socket_info_t *, void *);
+
+static subfn_t pcic_service_table[] = {
+ (subfn_t)&pcic_register_callback,
+ (subfn_t)&pcic_inquire_socket,
+ (subfn_t)&i365_get_status,
+ (subfn_t)&i365_get_socket,
+ (subfn_t)&i365_set_socket,
+ (subfn_t)&i365_get_io_map,
+ (subfn_t)&i365_set_io_map,
+ (subfn_t)&i365_get_mem_map,
+ (subfn_t)&i365_set_mem_map,
+#ifdef CONFIG_CARDBUS
+ (subfn_t)&cb_get_bridge,
+ (subfn_t)&cb_set_bridge,
+#else
+ NULL, NULL,
+#endif
+#ifdef HAS_PROC_BUS
+ (subfn_t)&pcic_proc_setup
+#endif
+};
+
+#define NFUNC (sizeof(pcic_service_table)/sizeof(subfn_t))
+
+static int pcic_service(u_int sock, u_int cmd, void *arg)
+{
+ socket_info_t *s = &socket[sock];
+ subfn_t fn;
+ int ret;
+#ifdef CONFIG_ISA
+ u_long flags = 0;
+#endif
+
+ if (cmd >= NFUNC)
+ return -EINVAL;
+
+ if (s->flags & IS_ALIVE) {
+ if (cmd == SS_GetStatus)
+ *(u_int *)arg = 0;
+ return -EINVAL;
+ }
+
+ fn = pcic_service_table[cmd];
+#ifdef CONFIG_CARDBUS
+ if ((s->flags & IS_CARDBUS) &&
+ (cb_readl(s, CB_SOCKET_STATE) & CB_SS_32BIT)) {
+ if (cmd == SS_GetStatus)
+ fn = (subfn_t)&cb_get_status;
+ else if (cmd == SS_GetSocket)
+ fn = (subfn_t)&cb_get_socket;
+ else if (cmd == SS_SetSocket)
+ fn = (subfn_t)&cb_set_socket;
+ }
+#endif
+
+ ISA_LOCK(s, flags);
+ ret = (fn == NULL) ? -EINVAL : fn(s, arg);
+ ISA_UNLOCK(s, flags);
+ return ret;
+} /* pcic_service */
+
+/*====================================================================*/
+
+int __init init_i82365(void)
+{
+ servinfo_t serv;
+ CardServices(GetCardServicesInfo, &serv);
+ if (serv.Revision != CS_RELEASE_CODE) {
+ printk(KERN_NOTICE "i82365: Card Services release "
+ "does not match!\n");
+ return -EINVAL;
+ }
+ DEBUG(0, "%s\n", version);
+
+#ifdef CONFIG_PCI
+ if (pcic[IS_UNK_CARDBUS].flags != (IS_CARDBUS|IS_UNKNOWN)) {
+ printk(KERN_NOTICE "i82365: bad pcic_id enumeration!\n");
+ return -EINVAL;
+ }
+#endif
+
+ printk(KERN_INFO "Intel ISA/PCI/CardBus PCIC probe:\n");
+ sockets = 0;
+
+#ifdef CONFIG_PCI
+ if (do_pci_probe && pcibios_present()) {
+ pci_probe(PCI_CLASS_BRIDGE_CARDBUS);
+ pci_probe(PCI_CLASS_BRIDGE_PCMCIA);
+ }
+#endif
+
+#ifdef CONFIG_ISA
+ isa_probe(i365_base);
+ if (!sockets || extra_sockets)
+ isa_probe(i365_base+2);
+#endif
+
+ if (sockets == 0) {
+ printk(KERN_INFO " no bridges found.\n");
+ return -ENODEV;
+ }
+
+ /* Set up interrupt handler(s) */
+#ifdef CONFIG_ISA
+ if (grab_irq != 0)
+ request_irq(cs_irq, pcic_interrupt, 0, "i82365", socket);
+#endif
+#ifdef CONFIG_PCI
+ if (pci_csc) {
+ u_int i, irq, mask = 0;
+ for (i = 0; i < sockets; i++) {
+ irq = socket[i].cap.pci_irq;
+ if (irq && !(mask & (1<<irq)))
+ request_irq(irq, pcic_interrupt, SA_SHIRQ, "i82365", socket);
+ mask |= (1<<irq);
+ }
+ }
+#endif
+
+ if (register_ss_entry(sockets, &pcic_service) != 0)
+ printk(KERN_NOTICE "i82365: register_ss_entry() failed\n");
+
+ /* Finally, schedule a polling interrupt */
+ if (poll_interval != 0) {
+ poll_timer.expires = jiffies + poll_interval;
+ add_timer(&poll_timer);
+ }
+
+ return 0;
+
+} /* init_i82365 */
+
+static void __exit exit_i82365(void)
+{
+ int i;
+#ifdef HAS_PROC_BUS
+ for (i = 0; i < sockets; i++)
+ pcic_proc_remove(&socket[i]);
+#endif
+ unregister_ss_entry(&pcic_service);
+ if (poll_interval != 0)
+ del_timer(&poll_timer);
+#ifdef CONFIG_ISA
+ if (grab_irq != 0)
+ free_irq(cs_irq, socket);
+#endif
+#ifdef CONFIG_PCI
+ if (pci_csc) {
+ u_int irq, mask = 0;
+ for (i = 0; i < sockets; i++) {
+ irq = socket[i].cap.pci_irq;
+ if (irq && !(mask & (1<<irq)))
+ free_irq(irq, socket);
+ mask |= (1<<irq);
+ }
+ }
+#endif
+ for (i = 0; i < sockets; i++) {
+ socket_info_t *s = &socket[i];
+ /* Turn off all interrupt sources! */
+ i365_set(s, I365_CSCINT, 0);
+#ifdef CONFIG_PCI
+ if (s->flags & IS_CARDBUS)
+ cb_writel(s, CB_SOCKET_MASK, 0);
+ if (s->cb_virt) {
+ iounmap(s->cb_virt);
+ release_mem_region(s->cb_phys, 0x1000);
+ } else
+#endif
+ release_region(s->ioaddr, 2);
+ }
+} /* exit_i82365 */
+
+module_init(init_i82365);
+module_exit(exit_i82365);
diff --git a/linux/pcmcia-cs/modules/i82365.h b/linux/pcmcia-cs/modules/i82365.h
new file mode 100644
index 0000000..27ee583
--- /dev/null
+++ b/linux/pcmcia-cs/modules/i82365.h
@@ -0,0 +1,154 @@
+/*
+ * i82365.h 1.21 2001/08/24 12:15:33
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_I82365_H
+#define _LINUX_I82365_H
+
+/* register definitions for the Intel 82365SL PCMCIA controller */
+
+/* Offsets for PCIC registers */
+#define I365_IDENT 0x00 /* Identification and revision */
+#define I365_STATUS 0x01 /* Interface status */
+#define I365_POWER 0x02 /* Power and RESETDRV control */
+#define I365_INTCTL 0x03 /* Interrupt and general control */
+#define I365_CSC 0x04 /* Card status change */
+#define I365_CSCINT 0x05 /* Card status change interrupt control */
+#define I365_ADDRWIN 0x06 /* Address window enable */
+#define I365_IOCTL 0x07 /* I/O control */
+#define I365_GENCTL 0x16 /* Card detect and general control */
+#define I365_GBLCTL 0x1E /* Global control register */
+
+/* Offsets for I/O and memory window registers */
+#define I365_IO(map) (0x08+((map)<<2))
+#define I365_MEM(map) (0x10+((map)<<3))
+#define I365_W_START 0
+#define I365_W_STOP 2
+#define I365_W_OFF 4
+
+/* Flags for I365_STATUS */
+#define I365_CS_BVD1 0x01
+#define I365_CS_STSCHG 0x01
+#define I365_CS_BVD2 0x02
+#define I365_CS_SPKR 0x02
+#define I365_CS_DETECT 0x0C
+#define I365_CS_WRPROT 0x10
+#define I365_CS_READY 0x20 /* Inverted */
+#define I365_CS_POWERON 0x40
+#define I365_CS_GPI 0x80
+
+/* Flags for I365_POWER */
+#define I365_PWR_OFF 0x00 /* Turn off the socket */
+#define I365_PWR_OUT 0x80 /* Output enable */
+#define I365_PWR_NORESET 0x40 /* Disable RESETDRV on resume */
+#define I365_PWR_AUTO 0x20 /* Auto pwr switch enable */
+#define I365_VCC_MASK 0x18 /* Mask for turning off Vcc */
+/* There are different layouts for B-step and DF-step chips: the B
+ step has independent Vpp1/Vpp2 control, and the DF step has only
+ Vpp1 control, plus 3V control */
+#define I365_VCC_5V 0x10 /* Vcc = 5.0v */
+#define I365_VCC_3V 0x18 /* Vcc = 3.3v */
+#define I365_VPP2_MASK 0x0c /* Mask for turning off Vpp2 */
+#define I365_VPP2_5V 0x04 /* Vpp2 = 5.0v */
+#define I365_VPP2_12V 0x08 /* Vpp2 = 12.0v */
+#define I365_VPP1_MASK 0x03 /* Mask for turning off Vpp1 */
+#define I365_VPP1_5V 0x01 /* Vpp2 = 5.0v */
+#define I365_VPP1_12V 0x02 /* Vpp2 = 12.0v */
+
+/* Flags for I365_INTCTL */
+#define I365_RING_ENA 0x80
+#define I365_PC_RESET 0x40
+#define I365_PC_IOCARD 0x20
+#define I365_INTR_ENA 0x10
+#define I365_IRQ_MASK 0x0F
+
+/* Flags for I365_CSC and I365_CSCINT*/
+#define I365_CSC_BVD1 0x01
+#define I365_CSC_STSCHG 0x01
+#define I365_CSC_BVD2 0x02
+#define I365_CSC_READY 0x04
+#define I365_CSC_DETECT 0x08
+#define I365_CSC_ANY 0x0F
+#define I365_CSC_GPI 0x10
+
+/* Flags for I365_ADDRWIN */
+#define I365_ADDR_MEMCS16 0x20
+#define I365_ENA_IO(map) (0x40 << (map))
+#define I365_ENA_MEM(map) (0x01 << (map))
+
+/* Flags for I365_IOCTL */
+#define I365_IOCTL_MASK(map) (0x0F << (map<<2))
+#define I365_IOCTL_WAIT(map) (0x08 << (map<<2))
+#define I365_IOCTL_0WS(map) (0x04 << (map<<2))
+#define I365_IOCTL_IOCS16(map) (0x02 << (map<<2))
+#define I365_IOCTL_16BIT(map) (0x01 << (map<<2))
+
+/* Flags for I365_GENCTL */
+#define I365_CTL_16DELAY 0x01
+#define I365_CTL_RESET 0x02
+#define I365_CTL_GPI_ENA 0x04
+#define I365_CTL_GPI_CTL 0x08
+#define I365_CTL_RESUME 0x10
+#define I365_CTL_SW_IRQ 0x20
+
+/* Flags for I365_GBLCTL */
+#define I365_GBL_PWRDOWN 0x01
+#define I365_GBL_CSC_LEV 0x02
+#define I365_GBL_WRBACK 0x04
+#define I365_GBL_IRQ_0_LEV 0x08
+#define I365_GBL_IRQ_1_LEV 0x10
+
+/* Flags for memory window registers */
+#define I365_MEM_16BIT 0x8000 /* In memory start high byte */
+#define I365_MEM_0WS 0x4000
+#define I365_MEM_WS1 0x8000 /* In memory stop high byte */
+#define I365_MEM_WS0 0x4000
+#define I365_MEM_WRPROT 0x8000 /* In offset high byte */
+#define I365_MEM_REG 0x4000
+
+#define I365_REG(slot, reg) (((slot) << 6) | (reg))
+
+/* Default ISA interrupt mask */
+#define I365_ISA_IRQ_MASK 0xdeb8 /* irq's 3-5,7,9-12,14,15 */
+
+/* Device ID's for PCI-to-PCMCIA bridges */
+
+#ifndef PCI_VENDOR_ID_INTEL
+#define PCI_VENDOR_ID_INTEL 0x8086
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_82092AA_0
+#define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221
+#endif
+#ifndef PCI_VENDOR_ID_OMEGA
+#define PCI_VENDOR_ID_OMEGA 0x119b
+#endif
+#ifndef PCI_DEVICE_ID_OMEGA_82C092G
+#define PCI_DEVICE_ID_OMEGA_82C092G 0x1221
+#endif
+
+#endif /* _LINUX_I82365_H */
diff --git a/linux/pcmcia-cs/modules/o2micro.h b/linux/pcmcia-cs/modules/o2micro.h
new file mode 100644
index 0000000..fd15234
--- /dev/null
+++ b/linux/pcmcia-cs/modules/o2micro.h
@@ -0,0 +1,160 @@
+/*
+ * o2micro.h 1.20 2002/03/03 14:16:57
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_O2MICRO_H
+#define _LINUX_O2MICRO_H
+
+#ifndef PCI_VENDOR_ID_O2
+#define PCI_VENDOR_ID_O2 0x1217
+#endif
+#ifndef PCI_DEVICE_ID_O2_6729
+#define PCI_DEVICE_ID_O2_6729 0x6729
+#endif
+#ifndef PCI_DEVICE_ID_O2_6730
+#define PCI_DEVICE_ID_O2_6730 0x673a
+#endif
+#ifndef PCI_DEVICE_ID_O2_6832
+#define PCI_DEVICE_ID_O2_6832 0x6832
+#endif
+#ifndef PCI_DEVICE_ID_O2_6836
+#define PCI_DEVICE_ID_O2_6836 0x6836
+#endif
+#ifndef PCI_DEVICE_ID_O2_6812
+#define PCI_DEVICE_ID_O2_6812 0x6872
+#endif
+#ifndef PCI_DEVICE_ID_O2_6922
+#define PCI_DEVICE_ID_O2_6922 0x6825
+#endif
+#ifndef PCI_DEVICE_ID_O2_6933
+#define PCI_DEVICE_ID_O2_6933 0x6933
+#endif
+#ifndef PCI_DEVICE_ID_O2_6912
+#define PCI_DEVICE_ID_O2_6912 0x6972
+#endif
+
+/* Additional PCI configuration registers */
+
+#define O2_MUX_CONTROL 0x90 /* 32 bit */
+#define O2_MUX_RING_OUT 0x0000000f
+#define O2_MUX_SKTB_ACTV 0x000000f0
+#define O2_MUX_SCTA_ACTV_ENA 0x00000100
+#define O2_MUX_SCTB_ACTV_ENA 0x00000200
+#define O2_MUX_SER_IRQ_ROUTE 0x0000e000
+#define O2_MUX_SER_PCI 0x00010000
+
+#define O2_MUX_SKTA_TURBO 0x000c0000 /* for 6833, 6860 */
+#define O2_MUX_SKTB_TURBO 0x00300000
+#define O2_MUX_AUX_VCC_3V 0x00400000
+#define O2_MUX_PCI_VCC_5V 0x00800000
+#define O2_MUX_PME_MUX 0x0f000000
+
+/* Additional ExCA registers */
+
+#define O2_MODE_A 0x38
+#define O2_MODE_A_2 0x26 /* for 6833B, 6860C */
+#define O2_MODE_A_CD_PULSE 0x04
+#define O2_MODE_A_SUSP_EDGE 0x08
+#define O2_MODE_A_HOST_SUSP 0x10
+#define O2_MODE_A_PWR_MASK 0x60
+#define O2_MODE_A_QUIET 0x80
+
+#define O2_MODE_B 0x39
+#define O2_MODE_B_2 0x2e /* for 6833B, 6860C */
+#define O2_MODE_B_IDENT 0x03
+#define O2_MODE_B_ID_BSTEP 0x00
+#define O2_MODE_B_ID_CSTEP 0x01
+#define O2_MODE_B_ID_O2 0x02
+#define O2_MODE_B_VS1 0x04
+#define O2_MODE_B_VS2 0x08
+#define O2_MODE_B_IRQ15_RI 0x80
+
+#define O2_MODE_C 0x3a
+#define O2_MODE_C_DREQ_MASK 0x03
+#define O2_MODE_C_DREQ_INPACK 0x01
+#define O2_MODE_C_DREQ_WP 0x02
+#define O2_MODE_C_DREQ_BVD2 0x03
+#define O2_MODE_C_ZVIDEO 0x08
+#define O2_MODE_C_IREQ_SEL 0x30
+#define O2_MODE_C_MGMT_SEL 0xc0
+
+#define O2_MODE_D 0x3b
+#define O2_MODE_D_IRQ_MODE 0x03
+#define O2_MODE_D_IRQ_PCPCI 0x00
+#define O2_MODE_D_IRQ_PCIWAY 0x02
+#define O2_MODE_D_IRQ_PCI 0x03
+#define O2_MODE_D_PCI_CLKRUN 0x04
+#define O2_MODE_D_CB_CLKRUN 0x08
+#define O2_MODE_D_SKT_ACTV 0x20
+#define O2_MODE_D_PCI_FIFO 0x40 /* for OZ6729, OZ6730 */
+#define O2_MODE_D_W97_IRQ 0x40
+#define O2_MODE_D_ISA_IRQ 0x80
+
+#define O2_MHPG_DMA 0x3c
+#define O2_MHPG_CHANNEL 0x07
+#define O2_MHPG_CINT_ENA 0x08
+#define O2_MHPG_CSC_ENA 0x10
+
+#define O2_FIFO_ENA 0x3d
+#define O2_FIFO_ZVIDEO_3 0x08
+#define O2_FIFO_PCI_FIFO 0x10
+#define O2_FIFO_POSTWR 0x40
+#define O2_FIFO_BUFFER 0x80
+
+#define O2_MODE_E 0x3e
+#define O2_MODE_E_MHPG_DMA 0x01
+#define O2_MODE_E_SPKR_OUT 0x02
+#define O2_MODE_E_LED_OUT 0x08
+#define O2_MODE_E_SKTA_ACTV 0x10
+
+/* Data structure for tracking vendor-specific state */
+typedef struct o2micro_state_t {
+ u_char mode_a; /* O2_MODE_A */
+ u_char mode_b; /* O2_MODE_B */
+ u_char mode_c; /* O2_MODE_C */
+ u_char mode_d; /* O2_MODE_D */
+ u_char mhpg; /* O2_MHPG_DMA */
+ u_char fifo; /* O2_FIFO_ENA */
+ u_char mode_e; /* O2_MODE_E */
+} o2micro_state_t;
+
+#define O2MICRO_PCIC_ID \
+ IS_OZ6729, IS_OZ6730, IS_OZ6832, IS_OZ6836, IS_OZ6812, \
+ IS_OZ6922, IS_OZ6933, IS_OZ6912
+
+#define O2MICRO_PCIC_INFO \
+ { "O2Micro OZ6729", IS_O2MICRO|IS_PCI|IS_VG_PWR, ID(O2, 6729) }, \
+ { "O2Micro OZ6730", IS_O2MICRO|IS_PCI|IS_VG_PWR, ID(O2, 6730) }, \
+ { "O2Micro OZ6832/33", IS_O2MICRO|IS_CARDBUS, ID(O2, 6832) }, \
+ { "O2Micro OZ6836/60", IS_O2MICRO|IS_CARDBUS, ID(O2, 6836) }, \
+ { "O2Micro OZ6812", IS_O2MICRO|IS_CARDBUS, ID(O2, 6812) }, \
+ { "O2Micro OZ6922", IS_O2MICRO|IS_CARDBUS, ID(O2, 6922) }, \
+ { "O2Micro OZ6933", IS_O2MICRO|IS_CARDBUS, ID(O2, 6933) }, \
+ { "O2Micro OZ6912", IS_O2MICRO|IS_CARDBUS, ID(O2, 6912) }
+
+#endif /* _LINUX_O2MICRO_H */
diff --git a/linux/pcmcia-cs/modules/pci_fixup.c b/linux/pcmcia-cs/modules/pci_fixup.c
new file mode 100644
index 0000000..6cbcd03
--- /dev/null
+++ b/linux/pcmcia-cs/modules/pci_fixup.c
@@ -0,0 +1,677 @@
+/*======================================================================
+
+ Kernel fixups for PCI device support
+
+ pci_fixup.c 1.33 2002/10/12 19:02:59
+
+ PCI bus fixups: various bits of code that don't really belong in
+ the PCMCIA subsystem, but may or may not be available from the
+ kernel, depending on kernel version. The basic idea is to make
+ 2.0.* and 2.2.* kernels look like they have the 2.3.* features.
+
+======================================================================*/
+
+#define __NO_VERSION__
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+
+/* We use these for setting up CardBus bridges */
+#include "yenta.h"
+#include "i82365.h"
+
+#define VERSION KERNEL_VERSION
+#if (LINUX_VERSION_CODE < VERSION(2,3,24))
+
+/* Default memory base addresses for CardBus controllers */
+static u_int cb_mem_base[] = { 0x0, 0x68000000, 0xf8000000 };
+MODULE_PARM(cb_mem_base, "i");
+
+/* PCI bus number overrides for CardBus controllers */
+#define INT_MODULE_PARM(n, v) static int n = v; MODULE_PARM(n, "i")
+INT_MODULE_PARM(cb_bus_base, 0);
+INT_MODULE_PARM(cb_bus_step, 2);
+INT_MODULE_PARM(cb_pci_irq, 0);
+
+#endif
+
+/* (exported) mask of interrupts reserved for PCI devices */
+u32 pci_irq_mask = 0;
+
+/*======================================================================
+
+ Basic PCI services missing from older kernels: device lookup, etc
+
+======================================================================*/
+
+#if (LINUX_VERSION_CODE < VERSION(2,1,0))
+#ifndef MACH
+/* Already defined in drivers/pci/pci.c. */
+struct pci_dev *pci_devices = NULL;
+struct pci_bus pci_root = {
+ parent: NULL,
+ children: NULL,
+ next: NULL,
+ self: NULL,
+ devices: NULL,
+ number: 0
+};
+#endif
+#endif
+
+#if (LINUX_VERSION_CODE < VERSION(2,1,93))
+
+struct pci_dev *pci_find_slot(u_int bus, u_int devfn)
+{
+ struct pci_dev *dev;
+ for (dev = pci_devices; dev; dev = dev->next)
+ if ((dev->devfn == devfn) && (bus == dev->bus->number))
+ return dev;
+#if (LINUX_VERSION_CODE > VERSION(2,1,0))
+ return NULL;
+#else
+ {
+ struct pci_bus *b;
+ u8 hdr;
+ u32 id, class;
+
+ if (pcibios_read_config_byte(bus, devfn & ~7, PCI_HEADER_TYPE, &hdr))
+ return NULL;
+ if (PCI_FUNC(devfn) && !(hdr & 0x80))
+ return NULL;
+ pcibios_read_config_dword(bus, devfn, PCI_VENDOR_ID, &id);
+ if ((id == 0) || (id == 0xffffffff))
+ return NULL;
+ dev = kmalloc(sizeof *dev, GFP_ATOMIC);
+ if (!dev)
+ return NULL;
+ memset(dev, 0, sizeof *dev);
+ dev->devfn = devfn;
+ pcibios_read_config_byte(bus, devfn, PCI_INTERRUPT_LINE, &dev->irq);
+ dev->vendor = id & 0xffff;
+ dev->device = id >> 16;
+ pcibios_read_config_dword(bus, devfn, PCI_CLASS_REVISION, &class);
+ if (dev->irq == 255)
+ dev->irq = 0;
+ dev->class = class >> 8;
+ for (b = &pci_root; b; b = b->next)
+ if (b->number == bus) break;
+ if (!b) {
+ b = kmalloc(sizeof *b, GFP_ATOMIC);
+ if (!b) {
+ kfree(dev);
+ return NULL;
+ }
+ memset(b, 0, sizeof *b);
+ b->number = bus;
+ b->next = pci_root.next;
+ pci_root.next = b;
+ }
+ dev->bus = b;
+ return dev;
+ }
+#endif
+}
+
+struct pci_dev *pci_find_class(u_int class, struct pci_dev *from)
+{
+ static u16 index = 0;
+ u8 bus, devfn;
+ if (from == NULL)
+ index = 0;
+ if (pcibios_find_class(class, index++, &bus, &devfn) == 0)
+ return pci_find_slot(bus, devfn);
+ else
+ return NULL;
+}
+
+#endif /* (LINUX_VERSION_CODE < VERSION(2,1,93)) */
+
+/*======================================================================
+
+ PCI Interrupt Routing Table parser
+
+ This only needs to be done once per boot: we scan the BIOS for
+ the routing table, and then look for devices that have interrupt
+ assignments that the kernel doesn't know about. If we find any,
+ we update their pci_dev structures and write the PCI interrupt
+ line registers.
+
+======================================================================*/
+
+#if (LINUX_VERSION_CODE < VERSION(2,3,24)) && defined(__i386__)
+
+#pragma pack(1)
+
+struct slot_entry {
+ u8 bus, devfn;
+ struct pirq_pin {
+ u8 link;
+ u16 irq_map;
+ } pin[4];
+ u8 slot;
+ u8 reserved;
+};
+
+struct routing_table {
+ u32 signature;
+ u8 minor, major;
+ u16 size;
+ u8 bus, devfn;
+ u16 pci_mask;
+ u32 compat;
+ u32 miniport;
+ u8 reserved[11];
+ u8 checksum;
+ struct slot_entry entry[0];
+};
+
+#pragma pack()
+
+/*
+ The meaning of the link bytes in the routing table is vendor
+ specific. We need code to get and set the routing information.
+*/
+
+static u8 pIIx_link(struct pci_dev *router, u8 link)
+{
+ u8 pirq;
+ /* link should be 0x60, 0x61, 0x62, 0x63 */
+ pci_read_config_byte(router, link, &pirq);
+ return (pirq < 16) ? pirq : 0;
+}
+
+static void pIIx_init(struct pci_dev *router, u8 link, u8 irq)
+{
+ pci_write_config_byte(router, link, irq);
+}
+
+static u8 via_link(struct pci_dev *router, u8 link)
+{
+ u8 pirq = 0;
+ /* link should be 1, 2, 3, 5 */
+ if (link < 6)
+ pci_read_config_byte(router, 0x55 + (link>>1), &pirq);
+ return (link & 1) ? (pirq >> 4) : (pirq & 15);
+}
+
+static void via_init(struct pci_dev *router, u8 link, u8 irq)
+{
+ u8 pirq;
+ pci_read_config_byte(router, 0x55 + (link>>1), &pirq);
+ pirq &= (link & 1) ? 0x0f : 0xf0;
+ pirq |= (link & 1) ? (irq << 4) : (irq & 15);
+ pci_write_config_byte(router, 0x55 + (link>>1), pirq);
+}
+
+static u8 opti_link(struct pci_dev *router, u8 link)
+{
+ u8 pirq = 0;
+ /* link should be 0x02, 0x12, 0x22, 0x32 */
+ if ((link & 0xcf) == 0x02)
+ pci_read_config_byte(router, 0xb8 + (link >> 5), &pirq);
+ return (link & 0x10) ? (pirq >> 4) : (pirq & 15);
+}
+
+static void opti_init(struct pci_dev *router, u8 link, u8 irq)
+{
+ u8 pirq;
+ pci_read_config_byte(router, 0xb8 + (link >> 5), &pirq);
+ pirq &= (link & 0x10) ? 0x0f : 0xf0;
+ pirq |= (link & 0x10) ? (irq << 4) : (irq & 15);
+ pci_write_config_byte(router, 0xb8 + (link >> 5), pirq);
+}
+
+static u8 ali_link(struct pci_dev *router, u8 link)
+{
+ /* No, you're not dreaming */
+ static const u8 map[] =
+ { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 };
+ u8 pirq;
+ /* link should be 0x01..0x08 */
+ pci_read_config_byte(router, 0x48 + ((link-1)>>1), &pirq);
+ return (link & 1) ? map[pirq&15] : map[pirq>>4];
+}
+
+static void ali_init(struct pci_dev *router, u8 link, u8 irq)
+{
+ /* Inverse of map in ali_link */
+ static const u8 map[] =
+ { 0, 8, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15 };
+ u8 pirq;
+ pci_read_config_byte(router, 0x48 + ((link-1)>>1), &pirq);
+ pirq &= (link & 1) ? 0x0f : 0xf0;
+ pirq |= (link & 1) ? (map[irq] << 4) : (map[irq] & 15);
+ pci_write_config_byte(router, 0x48 + ((link-1)>>1), pirq);
+}
+
+static u8 cyrix_link(struct pci_dev *router, u8 link)
+{
+ u8 pirq;
+ /* link should be 1, 2, 3, 4 */
+ link--;
+ pci_read_config_byte(router, 0x5c + (link>>1), &pirq);
+ return ((link & 1) ? pirq >> 4 : pirq & 15);
+}
+
+static void cyrix_init(struct pci_dev *router, u8 link, u8 irq)
+{
+ u8 pirq;
+ link--;
+ pci_read_config_byte(router, 0x5c + (link>>1), &pirq);
+ pirq &= (link & 1) ? 0x0f : 0xf0;
+ pirq |= (link & 1) ? (irq << 4) : (irq & 15);
+ pci_write_config_byte(router, 0x5c + (link>>1), pirq);
+}
+
+/*
+ A table of all the PCI interrupt routers for which we know how to
+ interpret the link bytes.
+*/
+
+#ifndef PCI_DEVICE_ID_INTEL_82371FB_0
+#define PCI_DEVICE_ID_INTEL_82371FB_0 0x122e
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_82371SB_0
+#define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_82371AB_0
+#define PCI_DEVICE_ID_INTEL_82371AB_0 0x7110
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_82443MX_1
+#define PCI_DEVICE_ID_INTEL_82443MX_1 0x7198
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_82443MX_1
+#define PCI_DEVICE_ID_INTEL_82443MX_1 0x7198
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_82801AA_0
+#define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_82801AB_0
+#define PCI_DEVICE_ID_INTEL_82801AB_0 0x2420
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_82801BA_0
+#define PCI_DEVICE_ID_INTEL_82801BA_0 0x2440
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_82801BAM_0
+#define PCI_DEVICE_ID_INTEL_82801BAM_0 0x244c
+#endif
+#ifndef PCI_DEVICE_ID_VIA_82C586_0
+#define PCI_DEVICE_ID_VIA_82C586_0 0x0586
+#endif
+#ifndef PCI_DEVICE_ID_VIA_82C596
+#define PCI_DEVICE_ID_VIA_82C596 0x0596
+#endif
+#ifndef PCI_DEVICE_ID_VIA_82C686
+#define PCI_DEVICE_ID_VIA_82C686 0x0686
+#endif
+#ifndef PCI_DEVICE_ID_SI
+#define PCI_DEVICE_ID_SI 0x1039
+#endif
+#ifndef PCI_DEVICE_ID_SI_503
+#define PCI_DEVICE_ID_SI_503 0x0008
+#endif
+#ifndef PCI_DEVICE_ID_SI_496
+#define PCI_DEVICE_ID_SI_496 0x0496
+#endif
+
+#define ID(a,b) PCI_VENDOR_ID_##a,PCI_DEVICE_ID_##a##_##b
+
+struct router {
+ u16 vendor, device;
+ u8 (*xlate)(struct pci_dev *, u8);
+ void (*init)(struct pci_dev *, u8, u8);
+} router_table[] = {
+ { ID(INTEL, 82371FB_0), &pIIx_link, &pIIx_init },
+ { ID(INTEL, 82371SB_0), &pIIx_link, &pIIx_init },
+ { ID(INTEL, 82371AB_0), &pIIx_link, &pIIx_init },
+ { ID(INTEL, 82443MX_1), &pIIx_link, &pIIx_init },
+ { ID(INTEL, 82801AA_0), &pIIx_link, &pIIx_init },
+ { ID(INTEL, 82801AB_0), &pIIx_link, &pIIx_init },
+ { ID(INTEL, 82801BA_0), &pIIx_link, &pIIx_init },
+ { ID(INTEL, 82801BAM_0), &pIIx_link, &pIIx_init },
+ { ID(VIA, 82C586_0), &via_link, &via_init },
+ { ID(VIA, 82C596), &via_link, &via_init },
+ { ID(VIA, 82C686), &via_link, &via_init },
+ { ID(OPTI, 82C700), &opti_link, &opti_init },
+ { ID(AL, M1533), &ali_link, &ali_init },
+ { ID(SI, 503), &pIIx_link, &pIIx_init },
+ { ID(SI, 496), &pIIx_link, &pIIx_init },
+ { ID(CYRIX, 5530_LEGACY), &cyrix_link, &cyrix_init }
+};
+#define ROUTER_COUNT (sizeof(router_table)/sizeof(router_table[0]))
+
+/* Global variables for current interrupt routing table */
+static struct routing_table *pirq = NULL;
+static struct pci_dev *router_dev = NULL;
+static struct router *router_info = NULL;
+
+#ifndef __va
+#define __va(x) (x)
+#endif
+
+static void scan_pirq_table(void)
+{
+ struct routing_table *r;
+ struct pci_dev *router, *dev;
+ u8 pin, fn, *p;
+ int i, j;
+ struct slot_entry *e;
+
+ /* Scan the BIOS for the routing table signature */
+ for (p = (u8 *)__va(0xf0000); p < (u8 *)__va(0xfffff); p += 16)
+ if ((p[0] == '$') && (p[1] == 'P') &&
+ (p[2] == 'I') && (p[3] == 'R')) break;
+ if (p >= (u8 *)__va(0xfffff))
+ return;
+
+ pirq = r = (struct routing_table *)p;
+ printk(KERN_INFO "PCI routing table version %d.%d at %#06x\n",
+ r->major, r->minor, (u32)r & 0xfffff);
+ for (i = j = 0; i < 16; i++)
+ j += (r->pci_mask >> i) & 1;
+ if (j > 4)
+ printk(KERN_NOTICE " bogus PCI irq mask %#04x!\n",
+ r->pci_mask);
+ else
+ pci_irq_mask |= r->pci_mask;
+
+ router_dev = router = pci_find_slot(r->bus, r->devfn);
+ if (router) {
+ for (i = 0; i < ROUTER_COUNT; i++) {
+ if ((router->vendor == router_table[i].vendor) &&
+ (router->device == router_table[i].device))
+ break;
+ if (((r->compat & 0xffff) == router_table[i].vendor) &&
+ ((r->compat >> 16) == router_table[i].device))
+ break;
+ }
+ if (i == ROUTER_COUNT)
+ printk(KERN_INFO " unknown PCI interrupt router %04x:%04x\n",
+ router->vendor, router->device);
+ else
+ router_info = &router_table[i];
+ }
+
+ for (e = r->entry; (u8 *)e < p+r->size; e++) {
+ for (fn = 0; fn < 8; fn++) {
+ dev = pci_find_slot(e->bus, e->devfn | fn);
+ if ((dev == NULL) || (dev->irq != 0)) continue;
+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+ if ((pin == 0) || (pin == 255)) continue;
+ if (router_info) {
+ dev->irq = router_info->xlate(router, e->pin[pin-1].link);
+ } else {
+ /* Fallback: see if only one irq possible */
+ int map = e->pin[pin-1].irq_map;
+ if (map && (!(map & (map-1))))
+ dev->irq = ffs(map)-1;
+ }
+ if (dev->irq) {
+ printk(KERN_INFO " %02x:%02x.%1x -> irq %d\n",
+ e->bus, PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn), dev->irq);
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
+ dev->irq);
+ }
+ }
+ }
+}
+
+#endif /* (LINUX_VERSION_CODE < VERSION(2,3,24)) && defined(__i386__) */
+
+/*======================================================================
+
+ PCI device enabler
+
+ This is not at all generic... it is mostly a hack to correctly
+ configure CardBus bridges.
+
+======================================================================*/
+
+#if (LINUX_VERSION_CODE < VERSION(2,3,24))
+
+static int check_cb_mapping(u_int phys)
+{
+ /* A few sanity checks to validate the bridge mapping */
+ char *virt = ioremap(phys, 0x1000);
+ int ret = ((readb(virt+0x800+I365_IDENT) & 0x70) ||
+ (readb(virt+0x800+I365_CSC) &&
+ readb(virt+0x800+I365_CSC) &&
+ readb(virt+0x800+I365_CSC)));
+ int state = readl(virt+CB_SOCKET_STATE) >> 16;
+ ret |= (state & ~0x3000) || !(state & 0x3000);
+ ret |= readl(virt+CB_SOCKET_FORCE);
+ iounmap(virt);
+ return ret;
+}
+
+static void setup_cb_bridge(struct pci_dev *dev)
+{
+ u8 bus, sub;
+ u32 phys;
+ int i;
+
+ /* This is nasty, but where else can we put it? */
+ if (PCI_FUNC(dev->devfn) == 0) {
+ struct pci_dev *sib;
+ sib = pci_find_slot(dev->bus->number, dev->devfn+1);
+ if (sib) {
+ u8 a, b;
+ u32 c, d;
+ /* Check for bad PCI bus numbering */
+ pci_read_config_byte(dev, CB_CARDBUS_BUS, &a);
+ pci_read_config_byte(sib, CB_CARDBUS_BUS, &b);
+ if (a == b) {
+ pci_write_config_byte(dev, CB_CARDBUS_BUS, 0);
+ pci_write_config_byte(sib, CB_CARDBUS_BUS, 0);
+ }
+ /* check for bad register mapping */
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &c);
+ pci_read_config_dword(sib, PCI_BASE_ADDRESS_0, &d);
+ if ((c != 0) && (c == d)) {
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, 0);
+ pci_write_config_dword(sib, PCI_BASE_ADDRESS_0, 0);
+ }
+ }
+ }
+
+ /* Assign PCI bus numbers, if needed */
+ pci_read_config_byte(dev, CB_CARDBUS_BUS, &bus);
+ pci_read_config_byte(dev, CB_SUBORD_BUS, &sub);
+ if ((cb_bus_base > 0) || (bus == 0)) {
+ if (cb_bus_base <= 0) cb_bus_base = 0x20;
+ bus = cb_bus_base;
+ sub = cb_bus_base+cb_bus_step;
+ cb_bus_base += cb_bus_step+1;
+ pci_write_config_byte(dev, CB_CARDBUS_BUS, bus);
+ pci_write_config_byte(dev, CB_SUBORD_BUS, sub);
+ }
+
+ /* Create pci_bus structure for the CardBus, if needed */
+ {
+ struct pci_bus *child, *parent = dev->bus;
+ for (child = parent->children; child; child = child->next)
+ if (child->number == bus) break;
+ if (!child) {
+ child = kmalloc(sizeof(struct pci_bus), GFP_KERNEL);
+ memset(child, 0, sizeof(struct pci_bus));
+ child->self = dev;
+ child->primary = bus;
+ child->number = child->secondary = bus;
+ child->subordinate = sub;
+ child->parent = parent;
+#if (LINUX_VERSION_CODE >= VERSION(2,3,15))
+ child->ops = parent->ops;
+#endif
+ child->next = parent->children;
+ parent->children = child;
+ }
+ }
+
+ /* Map the CardBus bridge registers, if needed */
+ pci_write_config_dword(dev, CB_LEGACY_MODE_BASE, 0);
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &phys);
+ if ((phys == 0) || (cb_mem_base[0] != 0)) {
+ /* Make sure the bridge is awake so we can test it */
+ pci_set_power_state(dev, 0);
+ for (i = 0; i < sizeof(cb_mem_base)/sizeof(u_int); i++) {
+ phys = cb_mem_base[i];
+ if (phys == 0) continue;
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, phys);
+ if ((i == 0) || (check_cb_mapping(phys) == 0)) break;
+ }
+ if (i == sizeof(cb_mem_base)/sizeof(u_int)) {
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, 0);
+ } else {
+ cb_mem_base[0] = cb_mem_base[i] + 0x1000;
+ }
+ }
+}
+
+#ifdef __i386__
+
+static u8 pirq_init(struct pci_dev *router, struct pirq_pin *pin)
+{
+ u16 map = pin->irq_map;
+ u8 irq = 0;
+ if (pirq->pci_mask)
+ map &= pirq->pci_mask;
+ if (cb_pci_irq)
+ map = 1<<cb_pci_irq;
+ /* Be conservative: only init irq if the mask is unambiguous */
+ if (map && (!(map & (map-1)))) {
+ irq = ffs(map)-1;
+ router_info->init(router, pin->link, irq);
+ pci_irq_mask |= (1<<irq);
+ }
+ return irq;
+}
+
+static void setup_cb_bridge_irq(struct pci_dev *dev)
+{
+ struct slot_entry *e;
+ u8 pin;
+ u32 phys;
+ char *virt;
+
+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &phys);
+ if (!pin || !phys)
+ return;
+ virt = ioremap(phys, 0x1000);
+ if (virt) {
+ /* Disable any pending interrupt sources */
+ writel(0, virt+CB_SOCKET_MASK);
+ writel(-1, virt+CB_SOCKET_EVENT);
+ iounmap(virt);
+ }
+ for (e = pirq->entry; (u8 *)e < (u8 *)pirq + pirq->size; e++) {
+ if ((e->bus != dev->bus->number) ||
+ (e->devfn != (dev->devfn & ~7)))
+ continue;
+ dev->irq = pirq_init(router_dev, &e->pin[pin-1]);
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
+ break;
+ }
+}
+
+#endif
+
+int pci_enable_device(struct pci_dev *dev)
+{
+ if ((dev->class >> 8) == PCI_CLASS_BRIDGE_CARDBUS) {
+ setup_cb_bridge(dev);
+ }
+#ifdef __i386__
+ /* In certain cases, if the interrupt can be deduced, but was
+ unrouted when the pirq table was scanned, we'll try to set it
+ up now. */
+ if (!dev->irq && pirq && (router_info) &&
+ ((dev->class >> 8) == PCI_CLASS_BRIDGE_CARDBUS)) {
+ setup_cb_bridge_irq(dev);
+ }
+#endif
+ return 0;
+}
+
+int pci_set_power_state(struct pci_dev *dev, int state)
+{
+ u16 tmp, cmd;
+ u32 base, bus;
+ u8 a, b, pmcs;
+ pci_read_config_byte(dev, PCI_STATUS, &a);
+ if (a & PCI_STATUS_CAPLIST) {
+ pci_read_config_byte(dev, PCI_CB_CAPABILITY_POINTER, &b);
+ while (b != 0) {
+ pci_read_config_byte(dev, b+PCI_CAPABILITY_ID, &a);
+ if (a == PCI_CAPABILITY_PM) {
+ pmcs = b + PCI_PM_CONTROL_STATUS;
+ /* Make sure we're in D0 state */
+ pci_read_config_word(dev, pmcs, &tmp);
+ if (!(tmp & PCI_PMCS_PWR_STATE_MASK)) break;
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &base);
+ pci_read_config_dword(dev, CB_PRIMARY_BUS, &bus);
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ pci_write_config_word(dev, pmcs, PCI_PMCS_PWR_STATE_D0);
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, base);
+ pci_write_config_dword(dev, CB_PRIMARY_BUS, bus);
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ break;
+ }
+ pci_read_config_byte(dev, b+PCI_NEXT_CAPABILITY, &b);
+ }
+ }
+ return 0;
+}
+
+#endif /* (LINUX_VERSION_CODE < VERSION(2,3,24)) */
+
+/*======================================================================
+
+ General setup and cleanup entry points
+
+======================================================================*/
+
+void pci_fixup_init(void)
+{
+ struct pci_dev *p;
+
+#if (LINUX_VERSION_CODE < VERSION(2,3,24)) && defined(__i386__)
+ scan_pirq_table();
+ pci_for_each_dev(p)
+ if (((p->class >> 8) == PCI_CLASS_BRIDGE_CARDBUS) &&
+ (p->irq == 0)) break;
+ if (p && !pirq)
+ printk(KERN_INFO "No PCI interrupt routing table!\n");
+ if (!pirq && cb_pci_irq)
+ printk(KERN_INFO "cb_pci_irq will be ignored.\n");
+#endif
+
+ pci_for_each_dev(p)
+ pci_irq_mask |= (1<<p->irq);
+
+#ifdef __alpha__
+#define PIC 0x4d0
+ pci_irq_mask |= inb(PIC) | (inb(PIC+1) << 8);
+#endif
+}
+
+void pci_fixup_done(void)
+{
+#if (LINUX_VERSION_CODE < VERSION(2,1,0))
+ struct pci_dev *d, *dn;
+ struct pci_bus *b, *bn;
+ for (d = pci_devices; d; d = dn) {
+ dn = d->next;
+ kfree(d);
+ }
+ for (b = pci_root.next; b; b = bn) {
+ bn = b->next;
+ kfree(b);
+ }
+#endif
+}
diff --git a/linux/pcmcia-cs/modules/ricoh.h b/linux/pcmcia-cs/modules/ricoh.h
new file mode 100644
index 0000000..de62f8b
--- /dev/null
+++ b/linux/pcmcia-cs/modules/ricoh.h
@@ -0,0 +1,161 @@
+/*
+ * ricoh.h 1.16 2002/08/13 15:17:14
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_RICOH_H
+#define _LINUX_RICOH_H
+
+#define RF5C_MODE_CTL 0x1f /* Mode control */
+#define RF5C_PWR_CTL 0x2f /* Mixed voltage control */
+#define RF5C_CHIP_ID 0x3a /* Chip identification */
+#define RF5C_MODE_CTL_3 0x3b /* Mode control 3 */
+
+/* I/O window address offset */
+#define RF5C_IO_OFF(w) (0x36+((w)<<1))
+
+/* Flags for RF5C_MODE_CTL */
+#define RF5C_MODE_ATA 0x01 /* ATA mode */
+#define RF5C_MODE_LED_ENA 0x02 /* IRQ 12 is LED */
+#define RF5C_MODE_CA21 0x04
+#define RF5C_MODE_CA22 0x08
+#define RF5C_MODE_CA23 0x10
+#define RF5C_MODE_CA24 0x20
+#define RF5C_MODE_CA25 0x40
+#define RF5C_MODE_3STATE_BIT7 0x80
+
+/* Flags for RF5C_PWR_CTL */
+#define RF5C_PWR_VCC_3V 0x01
+#define RF5C_PWR_IREQ_HIGH 0x02
+#define RF5C_PWR_INPACK_ENA 0x04
+#define RF5C_PWR_5V_DET 0x08
+#define RF5C_PWR_TC_SEL 0x10 /* Terminal Count: irq 11 or 15 */
+#define RF5C_PWR_DREQ_LOW 0x20
+#define RF5C_PWR_DREQ_OFF 0x00 /* DREQ steering control */
+#define RF5C_PWR_DREQ_INPACK 0x40
+#define RF5C_PWR_DREQ_SPKR 0x80
+#define RF5C_PWR_DREQ_IOIS16 0xc0
+
+/* Values for RF5C_CHIP_ID */
+#define RF5C_CHIP_RF5C296 0x32
+#define RF5C_CHIP_RF5C396 0xb2
+
+/* Flags for RF5C_MODE_CTL_3 */
+#define RF5C_MCTL3_DISABLE 0x01 /* Disable PCMCIA interface */
+#define RF5C_MCTL3_DMA_ENA 0x02
+
+/* Register definitions for Ricoh PCI-to-CardBus bridges */
+
+#ifndef PCI_VENDOR_ID_RICOH
+#define PCI_VENDOR_ID_RICOH 0x1180
+#endif
+#ifndef PCI_DEVICE_ID_RICOH_RL5C465
+#define PCI_DEVICE_ID_RICOH_RL5C465 0x0465
+#endif
+#ifndef PCI_DEVICE_ID_RICOH_RL5C466
+#define PCI_DEVICE_ID_RICOH_RL5C466 0x0466
+#endif
+#ifndef PCI_DEVICE_ID_RICOH_RL5C475
+#define PCI_DEVICE_ID_RICOH_RL5C475 0x0475
+#endif
+#ifndef PCI_DEVICE_ID_RICOH_RL5C476
+#define PCI_DEVICE_ID_RICOH_RL5C476 0x0476
+#endif
+#ifndef PCI_DEVICE_ID_RICOH_RL5C477
+#define PCI_DEVICE_ID_RICOH_RL5C477 0x0477
+#endif
+#ifndef PCI_DEVICE_ID_RICOH_RL5C478
+#define PCI_DEVICE_ID_RICOH_RL5C478 0x0478
+#endif
+
+/* Extra bits in CB_BRIDGE_CONTROL */
+#define RL5C46X_BCR_3E0_ENA 0x0800
+#define RL5C46X_BCR_3E2_ENA 0x1000
+
+/* Bridge Configuration Register */
+#define RL5C4XX_CONFIG 0x80 /* 16 bit */
+#define RL5C4XX_CONFIG_IO_1_MODE 0x0200
+#define RL5C4XX_CONFIG_IO_0_MODE 0x0100
+#define RL5C4XX_CONFIG_PREFETCH 0x0001
+
+/* Misc Control Register */
+#define RL5C4XX_MISC 0x82 /* 16 bit */
+#define RL5C4XX_MISC_HW_SUSPEND_ENA 0x0002
+#define RL5C4XX_MISC_VCCEN_POL 0x0100
+#define RL5C4XX_MISC_VPPEN_POL 0x0200
+#define RL5C46X_MISC_SUSPEND 0x0001
+#define RL5C46X_MISC_PWR_SAVE_2 0x0004
+#define RL5C46X_MISC_IFACE_BUSY 0x0008
+#define RL5C46X_MISC_B_LOCK 0x0010
+#define RL5C46X_MISC_A_LOCK 0x0020
+#define RL5C46X_MISC_PCI_LOCK 0x0040
+#define RL5C47X_MISC_IFACE_BUSY 0x0004
+#define RL5C47X_MISC_PCI_INT_MASK 0x0018
+#define RL5C47X_MISC_PCI_INT_DIS 0x0020
+#define RL5C47X_MISC_SUBSYS_WR 0x0040
+#define RL5C47X_MISC_SRIRQ_ENA 0x0080
+#define RL5C47X_MISC_5V_DISABLE 0x0400
+#define RL5C47X_MISC_LED_POL 0x0800
+
+/* 16-bit Interface Control Register */
+#define RL5C4XX_16BIT_CTL 0x84 /* 16 bit */
+#define RL5C4XX_16CTL_IO_TIMING 0x0100
+#define RL5C4XX_16CTL_MEM_TIMING 0x0200
+#define RL5C46X_16CTL_LEVEL_1 0x0010
+#define RL5C46X_16CTL_LEVEL_2 0x0020
+
+/* 16-bit IO and memory timing registers */
+#define RL5C4XX_16BIT_IO_0 0x88 /* 16 bit */
+#define RL5C4XX_16BIT_MEM_0 0x8a /* 16 bit */
+#define RL5C4XX_SETUP_MASK 0x0007
+#define RL5C4XX_SETUP_SHIFT 0
+#define RL5C4XX_CMD_MASK 0x01f0
+#define RL5C4XX_CMD_SHIFT 4
+#define RL5C4XX_HOLD_MASK 0x1c00
+#define RL5C4XX_HOLD_SHIFT 10
+
+/* Data structure for tracking vendor-specific state */
+typedef struct ricoh_state_t {
+ u_short config; /* RL5C4XX_CONFIG */
+ u_short misc; /* RL5C4XX_MISC */
+ u_short ctl; /* RL5C4XX_16BIT_CTL */
+ u_short io; /* RL5C4XX_16BIT_IO_0 */
+ u_short mem; /* RL5C4XX_16BIT_MEM_0 */
+} ricoh_state_t;
+
+#define RICOH_PCIC_ID \
+ IS_RL5C465, IS_RL5C466, IS_RL5C475, IS_RL5C476, IS_RL5C477, IS_RL5C478
+
+#define RICOH_PCIC_INFO \
+ { "Ricoh RL5C465", IS_RICOH|IS_CARDBUS, ID(RICOH, RL5C465) }, \
+ { "Ricoh RL5C466", IS_RICOH|IS_CARDBUS, ID(RICOH, RL5C466) }, \
+ { "Ricoh RL5C475", IS_RICOH|IS_CARDBUS, ID(RICOH, RL5C475) }, \
+ { "Ricoh RL5C476", IS_RICOH|IS_CARDBUS, ID(RICOH, RL5C476) }, \
+ { "Ricoh RL5C477", IS_RICOH|IS_CARDBUS, ID(RICOH, RL5C477) }, \
+ { "Ricoh RL5C478", IS_RICOH|IS_CARDBUS, ID(RICOH, RL5C478) }
+
+#endif /* _LINUX_RICOH_H */
diff --git a/linux/pcmcia-cs/modules/rsrc_mgr.c b/linux/pcmcia-cs/modules/rsrc_mgr.c
new file mode 100644
index 0000000..a94926a
--- /dev/null
+++ b/linux/pcmcia-cs/modules/rsrc_mgr.c
@@ -0,0 +1,877 @@
+/*======================================================================
+
+ Resource management routines
+
+ rsrc_mgr.c 1.94 2003/12/12 17:12:53
+
+ The contents of this file are subject to the Mozilla Public
+ License Version 1.1 (the "License"); you may not use this file
+ except in compliance with the License. You may obtain a copy of
+ the License at http://www.mozilla.org/MPL/
+
+ Software distributed under the License is distributed on an "AS
+ IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ implied. See the License for the specific language governing
+ rights and limitations under the License.
+
+ The initial developer of the original code is David A. Hinds
+ <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+
+ Alternatively, the contents of this file may be used under the
+ terms of the GNU General Public License version 2 (the "GPL"), in
+ which case the provisions of the GPL are applicable instead of the
+ above. If you wish to allow the use of your version of this file
+ only under the terms of the GPL and not to allow others to use
+ your version of this file under the MPL, indicate your decision
+ by deleting the provisions above and replace them with the notice
+ and other provisions required by the GPL. If you do not delete
+ the provisions above, a recipient may use your version of this
+ file under either the MPL or the GPL.
+
+======================================================================*/
+
+#define __NO_VERSION__
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/timer.h>
+#include <linux/spinlock.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+
+#include <pcmcia/cs_types.h>
+#include <pcmcia/ss.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/bulkmem.h>
+#include <pcmcia/cistpl.h>
+#include "cs_internal.h"
+
+/*====================================================================*/
+
+/* Parameters that can be set with 'insmod' */
+
+#define INT_MODULE_PARM(n, v) static int n = v; MODULE_PARM(n, "i")
+
+INT_MODULE_PARM(probe_mem, 1); /* memory probe? */
+#ifdef CONFIG_ISA
+INT_MODULE_PARM(probe_io, 1); /* IO port probe? */
+INT_MODULE_PARM(mem_limit, 0x10000);
+#endif
+
+/*======================================================================
+
+ The resource_map_t structures are used to track what resources are
+ available for allocation for PC Card devices.
+
+======================================================================*/
+
+typedef struct resource_map_t {
+ u_long base, num;
+ struct resource_map_t *next;
+} resource_map_t;
+
+/* Memory resource database */
+static resource_map_t mem_db = { 0, 0, &mem_db };
+
+/* IO port resource database */
+static resource_map_t io_db = { 0, 0, &io_db };
+
+#ifdef CONFIG_ISA
+
+typedef struct irq_info_t {
+ u_int Attributes;
+ int time_share, dyn_share;
+ struct socket_info_t *Socket;
+} irq_info_t;
+
+/* Table of ISA IRQ assignments */
+static irq_info_t irq_table[16] = { { 0, 0, 0 }, /* etc */ };
+
+#endif
+
+/*======================================================================
+
+ Linux resource management extensions
+
+======================================================================*/
+
+#ifndef CONFIG_PNP_BIOS
+#define check_io_region(b,n) (0)
+#endif
+
+#if defined(CONFIG_PNP_BIOS) || !defined(HAVE_MEMRESERVE)
+
+#ifdef __SMP__
+static spinlock_t rsrc_lock = SPIN_LOCK_UNLOCKED;
+#endif
+
+typedef struct resource_entry_t {
+ u_long base, num;
+ char *name;
+ struct resource_entry_t *next;
+} resource_entry_t;
+
+/* Ordered linked lists of allocated IO and memory blocks */
+#ifdef CONFIG_PNP_BIOS
+static resource_entry_t io_list = { 0, 0, NULL, NULL };
+#endif
+#ifndef HAVE_MEMRESERVE
+static resource_entry_t mem_list = { 0, 0, NULL, NULL };
+#endif
+
+static resource_entry_t *find_gap(resource_entry_t *root,
+ resource_entry_t *entry)
+{
+ resource_entry_t *p;
+
+ if (entry->base > entry->base+entry->num-1)
+ return NULL;
+ for (p = root; ; p = p->next) {
+ if ((p != root) && (p->base+p->num-1 >= entry->base)) {
+ p = NULL;
+ break;
+ }
+ if ((p->next == NULL) ||
+ (p->next->base > entry->base+entry->num-1))
+ break;
+ }
+ return p;
+}
+
+static int register_my_resource(resource_entry_t *list,
+ u_long base, u_long num, char *name)
+{
+ u_long flags;
+ resource_entry_t *p, *entry;
+
+ entry = kmalloc(sizeof(resource_entry_t), GFP_ATOMIC);
+ if (!entry) return -ENOMEM;
+ entry->base = base;
+ entry->num = num;
+ entry->name = name;
+
+ spin_lock_irqsave(&rsrc_lock, flags);
+ p = find_gap(list, entry);
+ if (p == NULL) {
+ spin_unlock_irqrestore(&rsrc_lock, flags);
+ kfree(entry);
+ return -EBUSY;
+ }
+ entry->next = p->next;
+ p->next = entry;
+ spin_unlock_irqrestore(&rsrc_lock, flags);
+ return 0;
+}
+
+static void release_my_resource(resource_entry_t *list,
+ u_long base, u_long num)
+{
+ u_long flags;
+ resource_entry_t *p, *q;
+
+ spin_lock_irqsave(&rsrc_lock, flags);
+ for (p = list; ; p = q) {
+ q = p->next;
+ if (q == NULL) break;
+ if ((q->base == base) && (q->num == num)) {
+ p->next = q->next;
+ kfree(q);
+ spin_unlock_irqrestore(&rsrc_lock, flags);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&rsrc_lock, flags);
+ return;
+}
+
+static int check_my_resource(resource_entry_t *list,
+ u_long base, u_long num)
+{
+ if (register_my_resource(list, base, num, NULL) != 0)
+ return -EBUSY;
+ release_my_resource(list, base, num);
+ return 0;
+}
+
+#ifdef CONFIG_PNP_BIOS
+int check_io_region(u_long base, u_long num)
+{
+ return check_my_resource(&io_list, base, num);
+}
+void request_io_region(u_long base, u_long num, char *name)
+{
+ register_my_resource(&io_list, base, num, name);
+}
+void release_io_region(u_long base, u_long num)
+{
+ release_my_resource(&io_list, base, num);
+}
+#ifdef HAS_PROC_BUS
+int proc_read_io(char *buf, char **start, off_t pos,
+ int count, int *eof, void *data)
+{
+ resource_entry_t *r;
+ u_long flags;
+ char *p = buf;
+
+ spin_lock_irqsave(&rsrc_lock, flags);
+ for (r = io_list.next; r; r = r->next)
+ p += sprintf(p, "%04lx-%04lx : %s\n", r->base,
+ r->base+r->num-1, r->name);
+ spin_unlock_irqrestore(&rsrc_lock, flags);
+ return (p - buf);
+}
+#endif
+#endif
+
+#ifndef HAVE_MEMRESERVE
+int check_mem_region(u_long base, u_long num)
+{
+ return check_my_resource(&mem_list, base, num);
+}
+void request_mem_region(u_long base, u_long num, char *name)
+{
+ register_my_resource(&mem_list, base, num, name);
+}
+void release_mem_region(u_long base, u_long num)
+{
+ release_my_resource(&mem_list, base, num);
+}
+#ifdef HAS_PROC_BUS
+int proc_read_mem(char *buf, char **start, off_t pos,
+ int count, int *eof, void *data)
+{
+ resource_entry_t *r;
+ u_long flags;
+ char *p = buf;
+
+ spin_lock_irqsave(&rsrc_lock, flags);
+ for (r = mem_list.next; r; r = r->next)
+ p += sprintf(p, "%08lx-%08lx : %s\n", r->base,
+ r->base+r->num-1, r->name);
+ spin_unlock_irqrestore(&rsrc_lock, flags);
+ return (p - buf);
+}
+#endif
+#endif
+
+#endif /* defined(CONFIG_PNP_BIOS) || !defined(HAVE_MEMRESERVE) */
+
+/*======================================================================
+
+ These manage the internal databases of available resources.
+
+======================================================================*/
+
+static int add_interval(resource_map_t *map, u_long base, u_long num)
+{
+ resource_map_t *p, *q;
+
+ for (p = map; ; p = p->next) {
+ if ((p != map) && (p->base+p->num-1 >= base))
+ return -1;
+ if ((p->next == map) || (p->next->base > base+num-1))
+ break;
+ }
+ q = kmalloc(sizeof(resource_map_t), GFP_KERNEL);
+ if (!q) return CS_OUT_OF_RESOURCE;
+ q->base = base; q->num = num;
+ q->next = p->next; p->next = q;
+ return CS_SUCCESS;
+}
+
+/*====================================================================*/
+
+static int sub_interval(resource_map_t *map, u_long base, u_long num)
+{
+ resource_map_t *p, *q;
+
+ for (p = map; ; p = q) {
+ q = p->next;
+ if (q == map)
+ break;
+ if ((q->base+q->num > base) && (base+num > q->base)) {
+ if (q->base >= base) {
+ if (q->base+q->num <= base+num) {
+ /* Delete whole block */
+ p->next = q->next;
+ kfree(q);
+ /* don't advance the pointer yet */
+ q = p;
+ } else {
+ /* Cut off bit from the front */
+ q->num = q->base + q->num - base - num;
+ q->base = base + num;
+ }
+ } else if (q->base+q->num <= base+num) {
+ /* Cut off bit from the end */
+ q->num = base - q->base;
+ } else {
+ /* Split the block into two pieces */
+ p = kmalloc(sizeof(resource_map_t), GFP_KERNEL);
+ if (!p) return CS_OUT_OF_RESOURCE;
+ p->base = base+num;
+ p->num = q->base+q->num - p->base;
+ q->num = base - q->base;
+ p->next = q->next ; q->next = p;
+ }
+ }
+ }
+ return CS_SUCCESS;
+}
+
+/*======================================================================
+
+ These routines examine a region of IO or memory addresses to
+ determine what ranges might be genuinely available.
+
+======================================================================*/
+
+#ifdef CONFIG_ISA
+static void do_io_probe(ioaddr_t base, ioaddr_t num)
+{
+
+ ioaddr_t i, j, bad, any;
+ u_char *b, hole, most;
+
+ printk(KERN_INFO "cs: IO port probe 0x%04x-0x%04x:",
+ base, base+num-1);
+
+ /* First, what does a floating port look like? */
+ b = kmalloc(256, GFP_KERNEL);
+ if (!b) {
+ printk(KERN_INFO " kmalloc failed!\n");
+ return;
+ }
+ memset(b, 0, 256);
+ for (i = base, most = 0; i < base+num; i += 8) {
+ if (check_region(i, 8) || check_io_region(i, 8))
+ continue;
+ hole = inb(i);
+ for (j = 1; j < 8; j++)
+ if (inb(i+j) != hole) break;
+ if ((j == 8) && (++b[hole] > b[most]))
+ most = hole;
+ if (b[most] == 127) break;
+ }
+ kfree(b);
+
+ bad = any = 0;
+ for (i = base; i < base+num; i += 8) {
+ if (check_region(i, 8) || check_io_region(i, 8))
+ continue;
+ for (j = 0; j < 8; j++)
+ if (inb(i+j) != most) break;
+ if (j < 8) {
+ if (!any)
+ printk(" excluding");
+ if (!bad)
+ bad = any = i;
+ } else {
+ if (bad) {
+ sub_interval(&io_db, bad, i-bad);
+ printk(" %#04x-%#04x", bad, i-1);
+ bad = 0;
+ }
+ }
+ }
+ if (bad) {
+ if ((num > 16) && (bad == base) && (i == base+num)) {
+ printk(" nothing: probe failed.\n");
+ return;
+ } else {
+ sub_interval(&io_db, bad, i-bad);
+ printk(" %#04x-%#04x", bad, i-1);
+ }
+ }
+
+ printk(any ? "\n" : " clean.\n");
+}
+
+static int io_scan; /* = 0 */
+
+static void invalidate_io(void)
+{
+ io_scan = 0;
+}
+
+static void validate_io(void)
+{
+ resource_map_t *m, *n;
+ if (!probe_io || io_scan++)
+ return;
+ for (m = io_db.next; m != &io_db; m = n) {
+ n = m->next;
+ do_io_probe(m->base, m->num);
+ }
+}
+
+#else /* CONFIG_ISA */
+
+#define validate_io() do { } while (0)
+#define invalidate_io() do { } while (0)
+
+#endif /* CONFIG_ISA */
+
+/*======================================================================
+
+ The memory probe. If the memory list includes a 64K-aligned block
+ below 1MB, we probe in 64K chunks, and as soon as we accumulate at
+ least mem_limit free space, we quit.
+
+======================================================================*/
+
+static int do_mem_probe(u_long base, u_long num,
+ int (*is_valid)(u_long), int (*do_cksum)(u_long))
+{
+ u_long i, j, bad, fail, step;
+
+ printk(KERN_INFO "cs: memory probe 0x%06lx-0x%06lx:",
+ base, base+num-1);
+ bad = fail = 0;
+ step = (num < 0x20000) ? 0x2000 : ((num>>4) & ~0x1fff);
+ for (i = j = base; i < base+num; i = j + step) {
+ if (!fail) {
+ for (j = i; j < base+num; j += step)
+ if ((check_mem_region(j, step) == 0) && is_valid(j))
+ break;
+ fail = ((i == base) && (j == base+num));
+ }
+ if (fail) {
+ for (j = i; j < base+num; j += 2*step)
+ if ((check_mem_region(j, 2*step) == 0) &&
+ do_cksum(j) && do_cksum(j+step))
+ break;
+ }
+ if (i != j) {
+ if (!bad) printk(" excluding");
+ printk(" %#05lx-%#05lx", i, j-1);
+ sub_interval(&mem_db, i, j-i);
+ bad += j-i;
+ }
+ }
+ printk(bad ? "\n" : " clean.\n");
+ return (num - bad);
+}
+
+#ifdef CONFIG_ISA
+
+static u_long inv_probe(int (*is_valid)(u_long),
+ int (*do_cksum)(u_long),
+ resource_map_t *m)
+{
+ u_long ok;
+ if (m == &mem_db)
+ return 0;
+ ok = inv_probe(is_valid, do_cksum, m->next);
+ if (ok) {
+ if (m->base >= 0x100000)
+ sub_interval(&mem_db, m->base, m->num);
+ return ok;
+ }
+ if (m->base < 0x100000)
+ return 0;
+ return do_mem_probe(m->base, m->num, is_valid, do_cksum);
+}
+
+static int hi_scan, lo_scan; /* = 0 */
+
+static void invalidate_mem(void)
+{
+ hi_scan = lo_scan = 0;
+}
+
+void validate_mem(int (*is_valid)(u_long), int (*do_cksum)(u_long),
+ int force_low)
+{
+ resource_map_t *m, mm;
+ static u_char order[] = { 0xd0, 0xe0, 0xc0, 0xf0 };
+ u_long b, i, ok = 0;
+
+ if (!probe_mem) return;
+ /* We do up to four passes through the list */
+ if (!force_low) {
+ if (hi_scan++ || (inv_probe(is_valid, do_cksum, mem_db.next) > 0))
+ return;
+ printk(KERN_NOTICE "cs: warning: no high memory space "
+ "available!\n");
+ }
+ if (lo_scan++) return;
+ for (m = mem_db.next; m != &mem_db; m = mm.next) {
+ mm = *m;
+ /* Only probe < 1 MB */
+ if (mm.base >= 0x100000) continue;
+ if ((mm.base | mm.num) & 0xffff) {
+ ok += do_mem_probe(mm.base, mm.num, is_valid, do_cksum);
+ continue;
+ }
+ /* Special probe for 64K-aligned block */
+ for (i = 0; i < 4; i++) {
+ b = order[i] << 12;
+ if ((b >= mm.base) && (b+0x10000 <= mm.base+mm.num)) {
+ if (ok >= mem_limit)
+ sub_interval(&mem_db, b, 0x10000);
+ else
+ ok += do_mem_probe(b, 0x10000, is_valid, do_cksum);
+ }
+ }
+ }
+}
+
+#else /* CONFIG_ISA */
+
+#define invalidate_mem() do { } while (0)
+
+void validate_mem(int (*is_valid)(u_long), int (*do_cksum)(u_long),
+ int force_low)
+{
+ resource_map_t *m, *n;
+ static int done = 0;
+
+ if (!probe_mem || done++)
+ return;
+ for (m = mem_db.next; m != &mem_db; m = n)
+ n = m->next;
+ if (do_mem_probe(m->base, m->num, is_valid, do_cksum))
+ return;
+}
+
+#endif /* CONFIG_ISA */
+
+/*======================================================================
+
+ These find ranges of I/O ports or memory addresses that are not
+ currently allocated by other devices.
+
+ The 'align' field should reflect the number of bits of address
+ that need to be preserved from the initial value of *base. It
+ should be a power of two, greater than or equal to 'num'. A value
+ of 0 means that all bits of *base are significant. *base should
+ also be strictly less than 'align'.
+
+======================================================================*/
+
+int find_io_region(ioaddr_t *base, ioaddr_t num, ioaddr_t align,
+ char *name)
+{
+ ioaddr_t try;
+ resource_map_t *m;
+
+ validate_io();
+ for (m = io_db.next; m != &io_db; m = m->next) {
+ try = (m->base & ~(align-1)) + *base;
+ for (try = (try >= m->base) ? try : try+align;
+ (try >= m->base) && (try+num <= m->base+m->num);
+ try += align) {
+ if ((check_region(try, num) == 0) &&
+ (check_io_region(try, num) == 0)) {
+ *base = try;
+ if (name) request_region(try, num, name);
+ return 0;
+ }
+ if (!align) break;
+ }
+ }
+ return -1;
+}
+
+int find_mem_region(u_long *base, u_long num, u_long align,
+ int force_low, char *name)
+{
+ u_long try;
+ resource_map_t *m;
+
+ while (1) {
+ for (m = mem_db.next; m != &mem_db; m = m->next) {
+ /* first pass >1MB, second pass <1MB */
+ if ((force_low != 0) ^ (m->base < 0x100000)) continue;
+ try = (m->base & ~(align-1)) + *base;
+ for (try = (try >= m->base) ? try : try+align;
+ (try >= m->base) && (try+num <= m->base+m->num);
+ try += align) {
+ if (check_mem_region(try, num) == 0) {
+ if (name) request_mem_region(try, num, name);
+ *base = try;
+ return 0;
+ }
+ if (!align) break;
+ }
+ }
+ if (force_low) break;
+ force_low++;
+ }
+ return -1;
+}
+
+/*======================================================================
+
+ This checks to see if an interrupt is available, with support
+ for interrupt sharing. We don't support reserving interrupts
+ yet. If the interrupt is available, we allocate it.
+
+======================================================================*/
+
+#ifdef CONFIG_ISA
+
+static void fake_irq(int i, void *d, struct pt_regs *r) { }
+static inline int check_irq(int irq)
+{
+ if (request_irq(irq, fake_irq, 0, "bogus", NULL) != 0)
+ return -1;
+ free_irq(irq, NULL);
+ return 0;
+}
+
+int try_irq(u_int Attributes, int irq, int specific)
+{
+ irq_info_t *info = &irq_table[irq];
+ if (info->Attributes & RES_ALLOCATED) {
+ switch (Attributes & IRQ_TYPE) {
+ case IRQ_TYPE_EXCLUSIVE:
+ return CS_IN_USE;
+ case IRQ_TYPE_TIME:
+ if ((info->Attributes & RES_IRQ_TYPE)
+ != RES_IRQ_TYPE_TIME)
+ return CS_IN_USE;
+ if (Attributes & IRQ_FIRST_SHARED)
+ return CS_BAD_ATTRIBUTE;
+ info->Attributes |= RES_IRQ_TYPE_TIME | RES_ALLOCATED;
+ info->time_share++;
+ break;
+ case IRQ_TYPE_DYNAMIC_SHARING:
+ if ((info->Attributes & RES_IRQ_TYPE)
+ != RES_IRQ_TYPE_DYNAMIC)
+ return CS_IN_USE;
+ if (Attributes & IRQ_FIRST_SHARED)
+ return CS_BAD_ATTRIBUTE;
+ info->Attributes |= RES_IRQ_TYPE_DYNAMIC | RES_ALLOCATED;
+ info->dyn_share++;
+ break;
+ }
+ } else {
+ if ((info->Attributes & RES_RESERVED) && !specific)
+ return CS_IN_USE;
+ if (check_irq(irq) != 0)
+ return CS_IN_USE;
+ switch (Attributes & IRQ_TYPE) {
+ case IRQ_TYPE_EXCLUSIVE:
+ info->Attributes |= RES_ALLOCATED;
+ break;
+ case IRQ_TYPE_TIME:
+ if (!(Attributes & IRQ_FIRST_SHARED))
+ return CS_BAD_ATTRIBUTE;
+ info->Attributes |= RES_IRQ_TYPE_TIME | RES_ALLOCATED;
+ info->time_share = 1;
+ break;
+ case IRQ_TYPE_DYNAMIC_SHARING:
+ if (!(Attributes & IRQ_FIRST_SHARED))
+ return CS_BAD_ATTRIBUTE;
+ info->Attributes |= RES_IRQ_TYPE_DYNAMIC | RES_ALLOCATED;
+ info->dyn_share = 1;
+ break;
+ }
+ }
+ return 0;
+}
+
+#endif
+
+/*====================================================================*/
+
+#ifdef CONFIG_ISA
+
+void undo_irq(u_int Attributes, int irq)
+{
+ irq_info_t *info;
+
+ info = &irq_table[irq];
+ switch (Attributes & IRQ_TYPE) {
+ case IRQ_TYPE_EXCLUSIVE:
+ info->Attributes &= RES_RESERVED;
+ break;
+ case IRQ_TYPE_TIME:
+ info->time_share--;
+ if (info->time_share == 0)
+ info->Attributes &= RES_RESERVED;
+ break;
+ case IRQ_TYPE_DYNAMIC_SHARING:
+ info->dyn_share--;
+ if (info->dyn_share == 0)
+ info->Attributes &= RES_RESERVED;
+ break;
+ }
+}
+
+#endif
+
+/*======================================================================
+
+ The various adjust_* calls form the external interface to the
+ resource database.
+
+======================================================================*/
+
+static int adjust_memory(adjust_t *adj)
+{
+ u_long base, num;
+ int i, ret;
+
+ base = adj->resource.memory.Base;
+ num = adj->resource.memory.Size;
+ if ((num == 0) || (base+num-1 < base))
+ return CS_BAD_SIZE;
+
+ ret = CS_SUCCESS;
+ switch (adj->Action) {
+ case ADD_MANAGED_RESOURCE:
+ ret = add_interval(&mem_db, base, num);
+ break;
+ case REMOVE_MANAGED_RESOURCE:
+ ret = sub_interval(&mem_db, base, num);
+ if (ret == CS_SUCCESS) {
+ invalidate_mem();
+ for (i = 0; i < sockets; i++) {
+ release_cis_mem(socket_table[i]);
+#ifdef CONFIG_CARDBUS
+ cb_release_cis_mem(socket_table[i]);
+#endif
+ }
+ }
+ break;
+ default:
+ ret = CS_UNSUPPORTED_FUNCTION;
+ }
+
+ return ret;
+}
+
+/*====================================================================*/
+
+static int adjust_io(adjust_t *adj)
+{
+ int base, num;
+
+ base = adj->resource.io.BasePort;
+ num = adj->resource.io.NumPorts;
+ if ((base < 0) || (base > 0xffff))
+ return CS_BAD_BASE;
+ if ((num <= 0) || (base+num > 0x10000) || (base+num <= base))
+ return CS_BAD_SIZE;
+
+ switch (adj->Action) {
+ case ADD_MANAGED_RESOURCE:
+ if (add_interval(&io_db, base, num) != 0)
+ return CS_IN_USE;
+ break;
+ case REMOVE_MANAGED_RESOURCE:
+ sub_interval(&io_db, base, num);
+ invalidate_io();
+ break;
+ default:
+ return CS_UNSUPPORTED_FUNCTION;
+ break;
+ }
+
+ return CS_SUCCESS;
+}
+
+/*====================================================================*/
+
+static int adjust_irq(adjust_t *adj)
+{
+#ifdef CONFIG_ISA
+ int irq;
+ irq_info_t *info;
+
+ irq = adj->resource.irq.IRQ;
+ if ((irq < 0) || (irq > 15))
+ return CS_BAD_IRQ;
+ info = &irq_table[irq];
+
+ switch (adj->Action) {
+ case ADD_MANAGED_RESOURCE:
+ if (info->Attributes & RES_REMOVED)
+ info->Attributes &= ~(RES_REMOVED|RES_ALLOCATED);
+ else
+ if (adj->Attributes & RES_ALLOCATED)
+ return CS_IN_USE;
+ if (adj->Attributes & RES_RESERVED)
+ info->Attributes |= RES_RESERVED;
+ else
+ info->Attributes &= ~RES_RESERVED;
+ break;
+ case REMOVE_MANAGED_RESOURCE:
+ if (info->Attributes & RES_REMOVED)
+ return 0;
+ if (info->Attributes & RES_ALLOCATED)
+ return CS_IN_USE;
+ info->Attributes |= RES_ALLOCATED|RES_REMOVED;
+ info->Attributes &= ~RES_RESERVED;
+ break;
+ default:
+ return CS_UNSUPPORTED_FUNCTION;
+ break;
+ }
+#endif
+ return CS_SUCCESS;
+}
+
+/*====================================================================*/
+
+int adjust_resource_info(client_handle_t handle, adjust_t *adj)
+{
+ if (CHECK_HANDLE(handle))
+ return CS_BAD_HANDLE;
+
+ switch (adj->Resource) {
+ case RES_MEMORY_RANGE:
+ return adjust_memory(adj);
+ break;
+ case RES_IO_RANGE:
+ return adjust_io(adj);
+ break;
+ case RES_IRQ:
+ return adjust_irq(adj);
+ break;
+ }
+ return CS_UNSUPPORTED_FUNCTION;
+}
+
+/*====================================================================*/
+
+void release_resource_db(void)
+{
+ resource_map_t *p, *q;
+#if defined(CONFIG_PNP_BIOS) || !defined(HAVE_MEMRESERVE)
+ resource_entry_t *u, *v;
+#endif
+
+ for (p = mem_db.next; p != &mem_db; p = q) {
+ q = p->next;
+ kfree(p);
+ }
+ for (p = io_db.next; p != &io_db; p = q) {
+ q = p->next;
+ kfree(p);
+ }
+#ifdef CONFIG_PNP_BIOS
+ for (u = io_list.next; u; u = v) {
+ v = u->next;
+ kfree(u);
+ }
+#endif
+#ifndef HAVE_MEMRESERVE
+ for (u = mem_list.next; u; u = v) {
+ v = u->next;
+ kfree(u);
+ }
+#endif
+}
diff --git a/linux/pcmcia-cs/modules/smc34c90.h b/linux/pcmcia-cs/modules/smc34c90.h
new file mode 100644
index 0000000..0f3ddc0
--- /dev/null
+++ b/linux/pcmcia-cs/modules/smc34c90.h
@@ -0,0 +1,58 @@
+/*
+ * smc34c90.h 1.10 2001/08/24 12:15:34
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_SMC34C90_H
+#define _LINUX_SMC34C90_H
+
+#ifndef PCI_VENDOR_ID_SMC
+#define PCI_VENDOR_ID_SMC 0x10b3
+#endif
+
+#ifndef PCI_DEVICE_ID_SMC_34C90
+#define PCI_DEVICE_ID_SMC_34C90 0xb106
+#endif
+
+/* Register definitions for SMC 34C90 PCI-to-CardBus bridge */
+
+/* EEPROM Information Register */
+#define SMC34C90_EEINFO 0x0088
+#define SMC34C90_EEINFO_ONE_SOCKET 0x0001
+#define SMC34C90_EEINFO_5V_ONLY 0x0002
+#define SMC34C90_EEINFO_ISA_IRQ 0x0004
+#define SMC34C90_EEINFO_ZV_PORT 0x0008
+#define SMC34C90_EEINFO_RING 0x0010
+#define SMC34C90_EEINFO_LED 0x0020
+
+#define SMC_PCIC_ID \
+ IS_SMC34C90
+
+#define SMC_PCIC_INFO \
+ { "SMC 34C90", IS_CARDBUS, ID(SMC, 34C90) }
+
+#endif /* _LINUX_SMC34C90_H */
diff --git a/linux/pcmcia-cs/modules/ti113x.h b/linux/pcmcia-cs/modules/ti113x.h
new file mode 100644
index 0000000..c224d7a
--- /dev/null
+++ b/linux/pcmcia-cs/modules/ti113x.h
@@ -0,0 +1,264 @@
+/*
+ * ti113x.h 1.32 2003/02/13 06:28:09
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_TI113X_H
+#define _LINUX_TI113X_H
+
+#ifndef PCI_VENDOR_ID_TI
+#define PCI_VENDOR_ID_TI 0x104c
+#endif
+
+#ifndef PCI_DEVICE_ID_TI_1130
+#define PCI_DEVICE_ID_TI_1130 0xac12
+#endif
+#ifndef PCI_DEVICE_ID_TI_1031
+#define PCI_DEVICE_ID_TI_1031 0xac13
+#endif
+#ifndef PCI_DEVICE_ID_TI_1131
+#define PCI_DEVICE_ID_TI_1131 0xac15
+#endif
+#ifndef PCI_DEVICE_ID_TI_1210
+#define PCI_DEVICE_ID_TI_1210 0xac1a
+#endif
+#ifndef PCI_DEVICE_ID_TI_1211
+#define PCI_DEVICE_ID_TI_1211 0xac1e
+#endif
+#ifndef PCI_DEVICE_ID_TI_1220
+#define PCI_DEVICE_ID_TI_1220 0xac17
+#endif
+#ifndef PCI_DEVICE_ID_TI_1221
+#define PCI_DEVICE_ID_TI_1221 0xac19
+#endif
+#ifndef PCI_DEVICE_ID_TI_1250A
+#define PCI_DEVICE_ID_TI_1250A 0xac16
+#endif
+#ifndef PCI_DEVICE_ID_TI_1225
+#define PCI_DEVICE_ID_TI_1225 0xac1c
+#endif
+#ifndef PCI_DEVICE_ID_TI_1251A
+#define PCI_DEVICE_ID_TI_1251A 0xac1d
+#endif
+#ifndef PCI_DEVICE_ID_TI_1251B
+#define PCI_DEVICE_ID_TI_1251B 0xac1f
+#endif
+#ifndef PCI_DEVICE_ID_TI_1410
+#define PCI_DEVICE_ID_TI_1410 0xac50
+#endif
+#ifndef PCI_DEVICE_ID_TI_1420
+#define PCI_DEVICE_ID_TI_1420 0xac51
+#endif
+#ifndef PCI_DEVICE_ID_TI_1450
+#define PCI_DEVICE_ID_TI_1450 0xac1b
+#endif
+#ifndef PCI_DEVICE_ID_TI_1451
+#define PCI_DEVICE_ID_TI_1451 0xac52
+#endif
+#ifndef PCI_DEVICE_ID_TI_1510
+#define PCI_DEVICE_ID_TI_1510 0xac56
+#endif
+#ifndef PCI_DEVICE_ID_TI_1520
+#define PCI_DEVICE_ID_TI_1520 0xac55
+#endif
+#ifndef PCI_DEVICE_ID_TI_1620
+#define PCI_DEVICE_ID_TI_1620 0xac54
+#endif
+#ifndef PCI_DEVICE_ID_TI_4410
+#define PCI_DEVICE_ID_TI_4410 0xac41
+#endif
+#ifndef PCI_DEVICE_ID_TI_4450
+#define PCI_DEVICE_ID_TI_4450 0xac40
+#endif
+#ifndef PCI_DEVICE_ID_TI_4451
+#define PCI_DEVICE_ID_TI_4451 0xac42
+#endif
+#ifndef PCI_DEVICE_ID_TI_4510
+#define PCI_DEVICE_ID_TI_4510 0xac44
+#endif
+#ifndef PCI_DEVICE_ID_TI_4520
+#define PCI_DEVICE_ID_TI_4520 0xac46
+#endif
+#ifndef PCI_DEVICE_ID_TI_7410
+#define PCI_DEVICE_ID_TI_7410 0xac49
+#endif
+#ifndef PCI_DEVICE_ID_TI_7510
+#define PCI_DEVICE_ID_TI_7510 0xac47
+#endif
+#ifndef PCI_DEVICE_ID_TI_7610
+#define PCI_DEVICE_ID_TI_7610 0xac48
+#endif
+
+/* Register definitions for TI 113X PCI-to-CardBus bridges */
+
+/* System Control Register */
+#define TI113X_SYSTEM_CONTROL 0x80 /* 32 bit */
+#define TI113X_SCR_SMIROUTE 0x04000000
+#define TI113X_SCR_SMISTATUS 0x02000000
+#define TI113X_SCR_SMIENB 0x01000000
+#define TI113X_SCR_VCCPROT 0x00200000
+#define TI113X_SCR_REDUCEZV 0x00100000
+#define TI113X_SCR_CDREQEN 0x00080000
+#define TI113X_SCR_CDMACHAN 0x00070000
+#define TI113X_SCR_SOCACTIVE 0x00002000
+#define TI113X_SCR_PWRSTREAM 0x00000800
+#define TI113X_SCR_DELAYUP 0x00000400
+#define TI113X_SCR_DELAYDOWN 0x00000200
+#define TI113X_SCR_INTERROGATE 0x00000100
+#define TI113X_SCR_CLKRUN_SEL 0x00000080
+#define TI113X_SCR_PWRSAVINGS 0x00000040
+#define TI113X_SCR_SUBSYSRW 0x00000020
+#define TI113X_SCR_CB_DPAR 0x00000010
+#define TI113X_SCR_CDMA_EN 0x00000008
+#define TI113X_SCR_ASYNC_IRQ 0x00000004
+#define TI113X_SCR_KEEPCLK 0x00000002
+#define TI113X_SCR_CLKRUN_ENA 0x00000001
+
+#define TI122X_SCR_SER_STEP 0xc0000000
+#define TI122X_SCR_INTRTIE 0x20000000
+#define TI122X_SCR_P2CCLK 0x08000000
+#define TI122X_SCR_CBRSVD 0x00400000
+#define TI122X_SCR_MRBURSTDN 0x00008000
+#define TI122X_SCR_MRBURSTUP 0x00004000
+#define TI122X_SCR_RIMUX 0x00000001
+
+/* Multimedia Control Register */
+#define TI1250_MULTIMEDIA_CTL 0x84 /* 8 bit */
+#define TI1250_MMC_ZVOUTEN 0x80
+#define TI1250_MMC_PORTSEL 0x40
+#define TI1250_MMC_ZVEN1 0x02
+#define TI1250_MMC_ZVEN0 0x01
+
+#define TI1250_GENERAL_STATUS 0x85 /* 8 bit */
+#define TI1250_GPIO0_CONTROL 0x88 /* 8 bit */
+#define TI1250_GPIO1_CONTROL 0x89 /* 8 bit */
+#define TI1250_GPIO2_CONTROL 0x8a /* 8 bit */
+#define TI1250_GPIO3_CONTROL 0x8b /* 8 bit */
+#define TI12XX_IRQMUX 0x8c /* 32 bit */
+
+/* Retry Status Register */
+#define TI113X_RETRY_STATUS 0x90 /* 8 bit */
+#define TI113X_RSR_PCIRETRY 0x80
+#define TI113X_RSR_CBRETRY 0x40
+#define TI113X_RSR_TEXP_CBB 0x20
+#define TI113X_RSR_MEXP_CBB 0x10
+#define TI113X_RSR_TEXP_CBA 0x08
+#define TI113X_RSR_MEXP_CBA 0x04
+#define TI113X_RSR_TEXP_PCI 0x02
+#define TI113X_RSR_MEXP_PCI 0x01
+
+/* Card Control Register */
+#define TI113X_CARD_CONTROL 0x91 /* 8 bit */
+#define TI113X_CCR_RIENB 0x80
+#define TI113X_CCR_ZVENABLE 0x40
+#define TI113X_CCR_PCI_IRQ_ENA 0x20
+#define TI113X_CCR_PCI_IREQ 0x10
+#define TI113X_CCR_PCI_CSC 0x08
+#define TI113X_CCR_SPKROUTEN 0x02
+#define TI113X_CCR_IFG 0x01
+
+#define TI1220_CCR_PORT_SEL 0x20
+#define TI122X_CCR_AUD2MUX 0x04
+
+/* Device Control Register */
+#define TI113X_DEVICE_CONTROL 0x92 /* 8 bit */
+#define TI113X_DCR_5V_FORCE 0x40
+#define TI113X_DCR_3V_FORCE 0x20
+#define TI113X_DCR_IMODE_MASK 0x06
+#define TI113X_DCR_IMODE_ISA 0x02
+#define TI113X_DCR_IMODE_SERIAL 0x04
+
+#define TI12XX_DCR_IMODE_PCI_ONLY 0x00
+#define TI12XX_DCR_IMODE_ALL_SERIAL 0x06
+
+/* Buffer Control Register */
+#define TI113X_BUFFER_CONTROL 0x93 /* 8 bit */
+#define TI113X_BCR_CB_READ_DEPTH 0x08
+#define TI113X_BCR_CB_WRITE_DEPTH 0x04
+#define TI113X_BCR_PCI_READ_DEPTH 0x02
+#define TI113X_BCR_PCI_WRITE_DEPTH 0x01
+
+/* Diagnostic Register */
+#define TI1250_DIAGNOSTIC 0x93 /* 8 bit */
+#define TI1250_DIAG_TRUE_VALUE 0x80
+#define TI1250_DIAG_PCI_IREQ 0x40
+#define TI1250_DIAG_PCI_CSC 0x20
+#define TI1250_DIAG_ASYNC_CSC 0x01
+
+/* DMA Registers */
+#define TI113X_DMA_0 0x94 /* 32 bit */
+#define TI113X_DMA_1 0x98 /* 32 bit */
+
+/* ExCA IO offset registers */
+#define TI113X_IO_OFFSET(map) (0x36+((map)<<1))
+
+/* Data structure for tracking vendor-specific state */
+typedef struct ti113x_state_t {
+ u32 sysctl; /* TI113X_SYSTEM_CONTROL */
+ u8 cardctl; /* TI113X_CARD_CONTROL */
+ u8 devctl; /* TI113X_DEVICE_CONTROL */
+ u8 diag; /* TI1250_DIAGNOSTIC */
+ u32 irqmux; /* TI12XX_IRQMUX */
+} ti113x_state_t;
+
+#define TI_PCIC_ID \
+ IS_TI1130, IS_TI1131, IS_TI1031, IS_TI1210, IS_TI1211, \
+ IS_TI1220, IS_TI1221, IS_TI1225, IS_TI1250A, IS_TI1251A, \
+ IS_TI1251B, IS_TI1410, IS_TI1420, IS_TI1450, IS_TI1451, \
+ IS_TI1510, IS_TI1520, IS_TI1620, IS_TI4410, IS_TI4450, \
+ IS_TI4451, IS_TI4510, IS_TI4520, IS_TI7410, IS_TI7510, \
+ IS_TI7610
+
+#define TI_PCIC_INFO \
+ { "TI 1130", IS_TI|IS_CARDBUS, ID(TI, 1130) }, \
+ { "TI 1131", IS_TI|IS_CARDBUS, ID(TI, 1131) }, \
+ { "TI 1031", IS_TI|IS_CARDBUS, ID(TI, 1031) }, \
+ { "TI 1210", IS_TI|IS_CARDBUS, ID(TI, 1210) }, \
+ { "TI 1211", IS_TI|IS_CARDBUS, ID(TI, 1211) }, \
+ { "TI 1220", IS_TI|IS_CARDBUS, ID(TI, 1220) }, \
+ { "TI 1221", IS_TI|IS_CARDBUS, ID(TI, 1221) }, \
+ { "TI 1225", IS_TI|IS_CARDBUS, ID(TI, 1225) }, \
+ { "TI 1250A", IS_TI|IS_CARDBUS, ID(TI, 1250A) }, \
+ { "TI 1251A", IS_TI|IS_CARDBUS, ID(TI, 1251A) }, \
+ { "TI 1251B", IS_TI|IS_CARDBUS, ID(TI, 1251B) }, \
+ { "TI 1410", IS_TI|IS_CARDBUS, ID(TI, 1410) }, \
+ { "TI 1420", IS_TI|IS_CARDBUS, ID(TI, 1420) }, \
+ { "TI 1450", IS_TI|IS_CARDBUS, ID(TI, 1450) }, \
+ { "TI 1451", IS_TI|IS_CARDBUS, ID(TI, 1451) }, \
+ { "TI 1510", IS_TI|IS_CARDBUS, ID(TI, 1510) }, \
+ { "TI 1520", IS_TI|IS_CARDBUS, ID(TI, 1520) }, \
+ { "TI 1620", IS_TI|IS_CARDBUS, ID(TI, 1620) }, \
+ { "TI 4410", IS_TI|IS_CARDBUS, ID(TI, 4410) }, \
+ { "TI 4450", IS_TI|IS_CARDBUS, ID(TI, 4450) }, \
+ { "TI 4451", IS_TI|IS_CARDBUS, ID(TI, 4451) }, \
+ { "TI 4510", IS_TI|IS_CARDBUS, ID(TI, 4510) }, \
+ { "TI 4520", IS_TI|IS_CARDBUS, ID(TI, 4520) }, \
+ { "TI 7410", IS_TI|IS_CARDBUS, ID(TI, 7410) }, \
+ { "TI 7510", IS_TI|IS_CARDBUS, ID(TI, 7510) }, \
+ { "TI 7610", IS_TI|IS_CARDBUS, ID(TI, 7610) }
+
+#endif /* _LINUX_TI113X_H */
diff --git a/linux/pcmcia-cs/modules/topic.h b/linux/pcmcia-cs/modules/topic.h
new file mode 100644
index 0000000..88662c4
--- /dev/null
+++ b/linux/pcmcia-cs/modules/topic.h
@@ -0,0 +1,123 @@
+/*
+ * topic.h 1.15 2002/02/27 01:21:09
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ * topic.h $Release$ 2002/02/27 01:21:09
+ */
+
+#ifndef _LINUX_TOPIC_H
+#define _LINUX_TOPIC_H
+
+#ifndef PCI_VENDOR_ID_TOSHIBA
+#define PCI_VENDOR_ID_TOSHIBA 0x1179
+#endif
+#ifndef PCI_DEVICE_ID_TOSHIBA_TOPIC95_A
+#define PCI_DEVICE_ID_TOSHIBA_TOPIC95_A 0x0603
+#endif
+#ifndef PCI_DEVICE_ID_TOSHIBA_TOPIC95_B
+#define PCI_DEVICE_ID_TOSHIBA_TOPIC95_B 0x060a
+#endif
+#ifndef PCI_DEVICE_ID_TOSHIBA_TOPIC97
+#define PCI_DEVICE_ID_TOSHIBA_TOPIC97 0x060f
+#endif
+#ifndef PCI_DEVICE_ID_TOSHIBA_TOPIC100
+#define PCI_DEVICE_ID_TOSHIBA_TOPIC100 0x0617
+#endif
+
+/* Register definitions for Toshiba ToPIC95 controllers */
+
+#define TOPIC_SOCKET_CONTROL 0x0090 /* 32 bit */
+#define TOPIC_SCR_IRQSEL 0x00000001
+
+#define TOPIC_SLOT_CONTROL 0x00a0 /* 8 bit */
+#define TOPIC_SLOT_SLOTON 0x80
+#define TOPIC_SLOT_SLOTEN 0x40
+#define TOPIC_SLOT_ID_LOCK 0x20
+#define TOPIC_SLOT_ID_WP 0x10
+#define TOPIC_SLOT_PORT_MASK 0x0c
+#define TOPIC_SLOT_PORT_SHIFT 2
+#define TOPIC_SLOT_OFS_MASK 0x03
+
+#define TOPIC_CARD_CONTROL 0x00a1 /* 8 bit */
+#define TOPIC_CCR_INTB 0x20
+#define TOPIC_CCR_INTA 0x10
+#define TOPIC_CCR_CLOCK 0x0c
+#define TOPIC_CCR_PCICLK 0x0c
+#define TOPIC_CCR_PCICLK_2 0x08
+#define TOPIC_CCR_CCLK 0x04
+
+#define TOPIC97_INT_CONTROL 0x00a1 /* 8 bit */
+#define TOPIC97_ICR_INTB 0x20
+#define TOPIC97_ICR_INTA 0x10
+#define TOPIC97_ICR_STSIRQNP 0x04
+#define TOPIC97_ICR_IRQNP 0x02
+#define TOPIC97_ICR_IRQSEL 0x01
+
+#define TOPIC_CARD_DETECT 0x00a3 /* 8 bit */
+#define TOPIC_CDR_MODE_PC32 0x80
+#define TOPIC_CDR_VS1 0x04
+#define TOPIC_CDR_VS2 0x02
+#define TOPIC_CDR_SW_DETECT 0x01
+
+#define TOPIC_REGISTER_CONTROL 0x00a4 /* 32 bit */
+#define TOPIC_RCR_RESUME_RESET 0x80000000
+#define TOPIC_RCR_REMOVE_RESET 0x40000000
+#define TOPIC97_RCR_CLKRUN_ENA 0x20000000
+#define TOPIC97_RCR_TESTMODE 0x10000000
+#define TOPIC97_RCR_IOPLUP 0x08000000
+#define TOPIC_RCR_BUFOFF_PWROFF 0x02000000
+#define TOPIC_RCR_BUFOFF_SIGOFF 0x01000000
+#define TOPIC97_RCR_CB_DEV_MASK 0x0000f800
+#define TOPIC97_RCR_CB_DEV_SHIFT 11
+#define TOPIC97_RCR_RI_DISABLE 0x00000004
+#define TOPIC97_RCR_CAUDIO_OFF 0x00000002
+#define TOPIC_RCR_CAUDIO_INVERT 0x00000001
+
+#define TOPIC_FUNCTION_CONTROL 0x3e
+#define TOPIC_FCR_PWR_BUF_ENA 0x40
+#define TOPIC_FCR_CTR_ENA 0x08
+#define TOPIC_FCR_VS_ENA 0x02
+#define TOPIC_FCR_3V_ENA 0x01
+
+/* Data structure for tracking vendor-specific state */
+typedef struct topic_state_t {
+ u_char slot; /* TOPIC_SLOT_CONTROL */
+ u_char ccr; /* TOPIC_CARD_CONTROL */
+ u_char cdr; /* TOPIC_CARD_DETECT */
+ u_int rcr; /* TOPIC_REGISTER_CONTROL */
+ u_char fcr; /* TOPIC_FUNCTION_CONTROL */
+} topic_state_t;
+
+#define TOPIC_PCIC_ID \
+ IS_TOPIC95_A, IS_TOPIC95_B, IS_TOPIC97, IS_TOPIC100
+
+#define TOPIC_PCIC_INFO \
+ { "Toshiba ToPIC95-A", IS_CARDBUS|IS_TOPIC, ID(TOSHIBA, TOPIC95_A) }, \
+ { "Toshiba ToPIC95-B", IS_CARDBUS|IS_TOPIC, ID(TOSHIBA, TOPIC95_B) }, \
+ { "Toshiba ToPIC97", IS_CARDBUS|IS_TOPIC, ID(TOSHIBA, TOPIC97) }, \
+ { "Toshiba ToPIC100", IS_CARDBUS|IS_TOPIC, ID(TOSHIBA, TOPIC100) }
+
+#endif /* _LINUX_TOPIC_H */
diff --git a/linux/pcmcia-cs/modules/vg468.h b/linux/pcmcia-cs/modules/vg468.h
new file mode 100644
index 0000000..93dc00b
--- /dev/null
+++ b/linux/pcmcia-cs/modules/vg468.h
@@ -0,0 +1,112 @@
+/*
+ * vg468.h 1.14 2001/08/24 12:15:34
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_VG468_H
+#define _LINUX_VG468_H
+
+/* Special bit in I365_IDENT used for Vadem chip detection */
+#define I365_IDENT_VADEM 0x08
+
+/* Special definitions in I365_POWER */
+#define VG468_VPP2_MASK 0x0c
+#define VG468_VPP2_5V 0x04
+#define VG468_VPP2_12V 0x08
+
+/* Unique Vadem registers */
+#define VG469_VSENSE 0x1f /* Card voltage sense */
+#define VG469_VSELECT 0x2f /* Card voltage select */
+#define VG468_CTL 0x38 /* Control register */
+#define VG468_TIMER 0x39 /* Timer control */
+#define VG468_MISC 0x3a /* Miscellaneous */
+#define VG468_GPIO_CFG 0x3b /* GPIO configuration */
+#define VG469_EXT_MODE 0x3c /* Extended mode register */
+#define VG468_SELECT 0x3d /* Programmable chip select */
+#define VG468_SELECT_CFG 0x3e /* Chip select configuration */
+#define VG468_ATA 0x3f /* ATA control */
+
+/* Flags for VG469_VSENSE */
+#define VG469_VSENSE_A_VS1 0x01
+#define VG469_VSENSE_A_VS2 0x02
+#define VG469_VSENSE_B_VS1 0x04
+#define VG469_VSENSE_B_VS2 0x08
+
+/* Flags for VG469_VSELECT */
+#define VG469_VSEL_VCC 0x03
+#define VG469_VSEL_5V 0x00
+#define VG469_VSEL_3V 0x03
+#define VG469_VSEL_MAX 0x0c
+#define VG469_VSEL_EXT_STAT 0x10
+#define VG469_VSEL_EXT_BUS 0x20
+#define VG469_VSEL_MIXED 0x40
+#define VG469_VSEL_ISA 0x80
+
+/* Flags for VG468_CTL */
+#define VG468_CTL_SLOW 0x01 /* 600ns memory timing */
+#define VG468_CTL_ASYNC 0x02 /* Asynchronous bus clocking */
+#define VG468_CTL_TSSI 0x08 /* Tri-state some outputs */
+#define VG468_CTL_DELAY 0x10 /* Card detect debounce */
+#define VG468_CTL_INPACK 0x20 /* Obey INPACK signal? */
+#define VG468_CTL_POLARITY 0x40 /* VCCEN polarity */
+#define VG468_CTL_COMPAT 0x80 /* Compatibility stuff */
+
+#define VG469_CTL_WS_COMPAT 0x04 /* Wait state compatibility */
+#define VG469_CTL_STRETCH 0x10 /* LED stretch */
+
+/* Flags for VG468_TIMER */
+#define VG468_TIMER_ZEROPWR 0x10 /* Zero power control */
+#define VG468_TIMER_SIGEN 0x20 /* Power up */
+#define VG468_TIMER_STATUS 0x40 /* Activity timer status */
+#define VG468_TIMER_RES 0x80 /* Timer resolution */
+#define VG468_TIMER_MASK 0x0f /* Activity timer timeout */
+
+/* Flags for VG468_MISC */
+#define VG468_MISC_GPIO 0x04 /* General-purpose IO */
+#define VG468_MISC_DMAWSB 0x08 /* DMA wait state control */
+#define VG469_MISC_LEDENA 0x10 /* LED enable */
+#define VG468_MISC_VADEMREV 0x40 /* Vadem revision control */
+#define VG468_MISC_UNLOCK 0x80 /* Unique register lock */
+
+/* Flags for VG469_EXT_MODE_A */
+#define VG469_MODE_VPPST 0x03 /* Vpp steering control */
+#define VG469_MODE_INT_SENSE 0x04 /* Internal voltage sense */
+#define VG469_MODE_CABLE 0x08
+#define VG469_MODE_COMPAT 0x10 /* i82365sl B or DF step */
+#define VG469_MODE_TEST 0x20
+#define VG469_MODE_RIO 0x40 /* Steer RIO to INTR? */
+
+/* Flags for VG469_EXT_MODE_B */
+#define VG469_MODE_B_3V 0x01 /* 3.3v for socket B */
+
+/* Data structure for tracking vendor-specific state */
+typedef struct vg46x_state_t {
+ u_char ctl; /* VG468_CTL */
+ u_char ema; /* VG468_EXT_MODE_A */
+} vg46x_state_t;
+
+#endif /* _LINUX_VG468_H */
diff --git a/linux/pcmcia-cs/modules/yenta.h b/linux/pcmcia-cs/modules/yenta.h
new file mode 100644
index 0000000..525d8ec
--- /dev/null
+++ b/linux/pcmcia-cs/modules/yenta.h
@@ -0,0 +1,156 @@
+/*
+ * yenta.h 1.20 2001/08/24 12:15:34
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_YENTA_H
+#define _LINUX_YENTA_H
+
+/* PCI Configuration Registers */
+
+#define PCI_STATUS_CAPLIST 0x10
+#define PCI_CB_CAPABILITY_POINTER 0x14 /* 8 bit */
+#define PCI_CAPABILITY_ID 0x00 /* 8 bit */
+#define PCI_CAPABILITY_PM 0x01
+#define PCI_NEXT_CAPABILITY 0x01 /* 8 bit */
+#define PCI_PM_CAPABILITIES 0x02 /* 16 bit */
+#define PCI_PMCAP_PME_D3COLD 0x8000
+#define PCI_PMCAP_PME_D3HOT 0x4000
+#define PCI_PMCAP_PME_D2 0x2000
+#define PCI_PMCAP_PME_D1 0x1000
+#define PCI_PMCAP_PME_D0 0x0800
+#define PCI_PMCAP_D2_CAP 0x0400
+#define PCI_PMCAP_D1_CAP 0x0200
+#define PCI_PMCAP_DYN_DATA 0x0100
+#define PCI_PMCAP_DSI 0x0020
+#define PCI_PMCAP_AUX_PWR 0x0010
+#define PCI_PMCAP_PMECLK 0x0008
+#define PCI_PMCAP_VERSION_MASK 0x0007
+#define PCI_PM_CONTROL_STATUS 0x04 /* 16 bit */
+#define PCI_PMCS_PME_STATUS 0x8000
+#define PCI_PMCS_DATASCALE_MASK 0x6000
+#define PCI_PMCS_DATASCALE_SHIFT 13
+#define PCI_PMCS_DATASEL_MASK 0x1e00
+#define PCI_PMCS_DATASEL_SHIFT 9
+#define PCI_PMCS_PME_ENABLE 0x0100
+#define PCI_PMCS_PWR_STATE_MASK 0x0003
+#define PCI_PMCS_PWR_STATE_D0 0x0000
+#define PCI_PMCS_PWR_STATE_D1 0x0001
+#define PCI_PMCS_PWR_STATE_D2 0x0002
+#define PCI_PMCS_PWR_STATE_D3 0x0003
+#define PCI_PM_BRIDGE_EXT 0x06 /* 8 bit */
+#define PCI_PM_DATA 0x07 /* 8 bit */
+
+#define CB_PRIMARY_BUS 0x18 /* 8 bit */
+#define CB_CARDBUS_BUS 0x19 /* 8 bit */
+#define CB_SUBORD_BUS 0x1a /* 8 bit */
+#define CB_LATENCY_TIMER 0x1b /* 8 bit */
+
+#define CB_MEM_BASE(m) (0x1c + 8*(m))
+#define CB_MEM_LIMIT(m) (0x20 + 8*(m))
+#define CB_IO_BASE(m) (0x2c + 8*(m))
+#define CB_IO_LIMIT(m) (0x30 + 8*(m))
+
+#define CB_BRIDGE_CONTROL 0x3e /* 16 bit */
+#define CB_BCR_PARITY_ENA 0x0001
+#define CB_BCR_SERR_ENA 0x0002
+#define CB_BCR_ISA_ENA 0x0004
+#define CB_BCR_VGA_ENA 0x0008
+#define CB_BCR_MABORT 0x0020
+#define CB_BCR_CB_RESET 0x0040
+#define CB_BCR_ISA_IRQ 0x0080
+#define CB_BCR_PREFETCH(m) (0x0100 << (m))
+#define CB_BCR_WRITE_POST 0x0400
+
+#define CB_LEGACY_MODE_BASE 0x44
+
+/* Memory mapped registers */
+
+#define CB_SOCKET_EVENT 0x0000
+#define CB_SE_CSTSCHG 0x00000001
+#define CB_SE_CCD 0x00000006
+#define CB_SE_CCD1 0x00000002
+#define CB_SE_CCD2 0x00000004
+#define CB_SE_PWRCYCLE 0x00000008
+
+#define CB_SOCKET_MASK 0x0004
+#define CB_SM_CSTSCHG 0x00000001
+#define CB_SM_CCD 0x00000006
+#define CB_SM_PWRCYCLE 0x00000008
+
+#define CB_SOCKET_STATE 0x0008
+#define CB_SS_CSTSCHG 0x00000001
+#define CB_SS_CCD 0x00000006
+#define CB_SS_CCD1 0x00000002
+#define CB_SS_CCD2 0x00000004
+#define CB_SS_PWRCYCLE 0x00000008
+#define CB_SS_16BIT 0x00000010
+#define CB_SS_32BIT 0x00000020
+#define CB_SS_CINT 0x00000040
+#define CB_SS_BADCARD 0x00000080
+#define CB_SS_DATALOST 0x00000100
+#define CB_SS_BADVCC 0x00000200
+#define CB_SS_5VCARD 0x00000400
+#define CB_SS_3VCARD 0x00000800
+#define CB_SS_XVCARD 0x00001000
+#define CB_SS_YVCARD 0x00002000
+#define CB_SS_VSENSE 0x00003c86
+#define CB_SS_5VSOCKET 0x10000000
+#define CB_SS_3VSOCKET 0x20000000
+#define CB_SS_XVSOCKET 0x40000000
+#define CB_SS_YVSOCKET 0x80000000
+
+#define CB_SOCKET_FORCE 0x000c
+#define CB_SF_CVSTEST 0x00004000
+
+#define CB_SOCKET_CONTROL 0x0010
+#define CB_SC_VPP_MASK 0x00000007
+#define CB_SC_VPP_OFF 0x00000000
+#define CB_SC_VPP_12V 0x00000001
+#define CB_SC_VPP_5V 0x00000002
+#define CB_SC_VPP_3V 0x00000003
+#define CB_SC_VPP_XV 0x00000004
+#define CB_SC_VPP_YV 0x00000005
+#define CB_SC_VCC_MASK 0x00000070
+#define CB_SC_VCC_OFF 0x00000000
+#define CB_SC_VCC_5V 0x00000020
+#define CB_SC_VCC_3V 0x00000030
+#define CB_SC_VCC_XV 0x00000040
+#define CB_SC_VCC_YV 0x00000050
+#define CB_SC_CCLK_STOP 0x00000080
+
+#define CB_SOCKET_POWER 0x0020
+#define CB_SP_CLK_CTRL 0x00000001
+#define CB_SP_CLK_CTRL_ENA 0x00010000
+#define CB_SP_CLK_MODE 0x01000000
+#define CB_SP_ACCESS 0x02000000
+
+/* Address bits 31..24 for memory windows for 16-bit cards,
+ accessable only by memory mapping the 16-bit register set */
+#define CB_MEM_PAGE(map) (0x40 + (map))
+
+#endif /* _LINUX_YENTA_H */
diff --git a/linux/pcmcia-cs/wireless/hermes.c b/linux/pcmcia-cs/wireless/hermes.c
new file mode 100644
index 0000000..d5ec3de
--- /dev/null
+++ b/linux/pcmcia-cs/wireless/hermes.c
@@ -0,0 +1,552 @@
+/* hermes.c
+ *
+ * Driver core for the "Hermes" wireless MAC controller, as used in
+ * the Lucent Orinoco and Cabletron RoamAbout cards. It should also
+ * work on the hfa3841 and hfa3842 MAC controller chips used in the
+ * Prism II chipsets.
+ *
+ * This is not a complete driver, just low-level access routines for
+ * the MAC controller itself.
+ *
+ * Based on the prism2 driver from Absolute Value Systems' linux-wlan
+ * project, the Linux wvlan_cs driver, Lucent's HCF-Light
+ * (wvlan_hcf.c) library, and the NetBSD wireless driver (in no
+ * particular order).
+ *
+ * Copyright (C) 2000, David Gibson, Linuxcare Australia <hermes@gibson.dropbear.id.au>
+ * Copyright (C) 2001, David Gibson, IBM <hermes@gibson.dropbear.id.au>
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use your
+ * version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#include <linux/config.h>
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/threads.h>
+#include <linux/smp.h>
+#include <asm/io.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/errno.h>
+
+#include "hermes.h"
+
+MODULE_DESCRIPTION("Low-level driver helper for Lucent Hermes chipset and Prism II HFA384x wireless MAC controller");
+MODULE_AUTHOR("David Gibson <hermes@gibson.dropbear.id.au>");
+#ifdef MODULE_LICENSE
+MODULE_LICENSE("Dual MPL/GPL");
+#endif
+
+/* These are maximum timeouts. Most often, card wil react much faster */
+#define CMD_BUSY_TIMEOUT (100) /* In iterations of ~1us */
+#define CMD_INIT_TIMEOUT (50000) /* in iterations of ~10us */
+#define CMD_COMPL_TIMEOUT (20000) /* in iterations of ~10us */
+#define ALLOC_COMPL_TIMEOUT (1000) /* in iterations of ~10us */
+
+/*
+ * Debugging helpers
+ */
+
+#define IO_TYPE(hw) ((hw)->io_space ? "IO " : "MEM ")
+#define DMSG(stuff...) do {printk(KERN_DEBUG "hermes @ %s0x%x: " , IO_TYPE(hw), hw->iobase); \
+ printk(stuff);} while (0)
+
+#undef HERMES_DEBUG
+#ifdef HERMES_DEBUG
+#include <stdarg.h>
+
+#define DEBUG(lvl, stuff...) if ( (lvl) <= HERMES_DEBUG) DMSG(stuff)
+
+#else /* ! HERMES_DEBUG */
+
+#define DEBUG(lvl, stuff...) do { } while (0)
+
+#endif /* ! HERMES_DEBUG */
+
+
+/*
+ * Internal functions
+ */
+
+/* Issue a command to the chip. Waiting for it to complete is the caller's
+ problem.
+
+ Returns -EBUSY if the command register is busy, 0 on success.
+
+ Callable from any context.
+*/
+static int hermes_issue_cmd(hermes_t *hw, u16 cmd, u16 param0)
+{
+ int k = CMD_BUSY_TIMEOUT;
+ u16 reg;
+
+ /* First wait for the command register to unbusy */
+ reg = hermes_read_regn(hw, CMD);
+ while ( (reg & HERMES_CMD_BUSY) && k ) {
+ k--;
+ udelay(1);
+ reg = hermes_read_regn(hw, CMD);
+ }
+ if (reg & HERMES_CMD_BUSY) {
+ return -EBUSY;
+ }
+
+ hermes_write_regn(hw, PARAM2, 0);
+ hermes_write_regn(hw, PARAM1, 0);
+ hermes_write_regn(hw, PARAM0, param0);
+ hermes_write_regn(hw, CMD, cmd);
+
+ return 0;
+}
+
+/*
+ * Function definitions
+ */
+
+void hermes_struct_init(hermes_t *hw, ulong address,
+ int io_space, int reg_spacing)
+{
+ hw->iobase = address;
+ hw->io_space = io_space;
+ hw->reg_spacing = reg_spacing;
+ hw->inten = 0x0;
+
+#ifdef HERMES_DEBUG_BUFFER
+ hw->dbufp = 0;
+ memset(&hw->dbuf, 0xff, sizeof(hw->dbuf));
+ memset(&hw->profile, 0, sizeof(hw->profile));
+#endif
+}
+
+int hermes_init(hermes_t *hw)
+{
+ u16 status, reg;
+ int err = 0;
+ int k;
+
+ /* We don't want to be interrupted while resetting the chipset */
+ hw->inten = 0x0;
+ hermes_write_regn(hw, INTEN, 0);
+ hermes_write_regn(hw, EVACK, 0xffff);
+
+ /* Normally it's a "can't happen" for the command register to
+ be busy when we go to issue a command because we are
+ serializing all commands. However we want to have some
+ chance of resetting the card even if it gets into a stupid
+ state, so we actually wait to see if the command register
+ will unbusy itself here. */
+ k = CMD_BUSY_TIMEOUT;
+ reg = hermes_read_regn(hw, CMD);
+ while (k && (reg & HERMES_CMD_BUSY)) {
+ if (reg == 0xffff) /* Special case - the card has probably been removed,
+ so don't wait for the timeout */
+ return -ENODEV;
+
+ k--;
+ udelay(1);
+ reg = hermes_read_regn(hw, CMD);
+ }
+
+ /* No need to explicitly handle the timeout - if we've timed
+ out hermes_issue_cmd() will probably return -EBUSY below */
+
+ /* According to the documentation, EVSTAT may contain
+ obsolete event occurrence information. We have to acknowledge
+ it by writing EVACK. */
+ reg = hermes_read_regn(hw, EVSTAT);
+ hermes_write_regn(hw, EVACK, reg);
+
+ /* We don't use hermes_docmd_wait here, because the reset wipes
+ the magic constant in SWSUPPORT0 away, and it gets confused */
+ err = hermes_issue_cmd(hw, HERMES_CMD_INIT, 0);
+ if (err)
+ return err;
+
+ reg = hermes_read_regn(hw, EVSTAT);
+ k = CMD_INIT_TIMEOUT;
+ while ( (! (reg & HERMES_EV_CMD)) && k) {
+ k--;
+ udelay(10);
+ reg = hermes_read_regn(hw, EVSTAT);
+ }
+
+ hermes_write_regn(hw, SWSUPPORT0, HERMES_MAGIC);
+
+ if (! hermes_present(hw)) {
+ DEBUG(0, "hermes @ 0x%x: Card removed during reset.\n",
+ hw->iobase);
+ err = -ENODEV;
+ goto out;
+ }
+
+ if (! (reg & HERMES_EV_CMD)) {
+ printk(KERN_ERR "hermes @ %s0x%lx: "
+ "Timeout waiting for card to reset (reg=0x%04x)!\n",
+ IO_TYPE(hw), hw->iobase, reg);
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ status = hermes_read_regn(hw, STATUS);
+
+ hermes_write_regn(hw, EVACK, HERMES_EV_CMD);
+
+ if (status & HERMES_STATUS_RESULT)
+ err = -EIO;
+
+ out:
+ return err;
+}
+
+/* Issue a command to the chip, and (busy!) wait for it to
+ * complete.
+ *
+ * Returns: < 0 on internal error, 0 on success, > 0 on error returned by the firmware
+ *
+ * Callable from any context, but locking is your problem. */
+int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
+ hermes_response_t *resp)
+{
+ int err;
+ int k;
+ u16 reg;
+ u16 status;
+
+ err = hermes_issue_cmd(hw, cmd, parm0);
+ if (err) {
+ if (! hermes_present(hw)) {
+ printk(KERN_WARNING "hermes @ %s0x%lx: "
+ "Card removed while issuing command.\n",
+ IO_TYPE(hw), hw->iobase);
+ err = -ENODEV;
+ } else
+ printk(KERN_ERR "hermes @ %s0x%lx: Error %d issuing command.\n",
+ IO_TYPE(hw), hw->iobase, err);
+ goto out;
+ }
+
+ reg = hermes_read_regn(hw, EVSTAT);
+ k = CMD_COMPL_TIMEOUT;
+ while ( (! (reg & HERMES_EV_CMD)) && k) {
+ k--;
+ udelay(10);
+ reg = hermes_read_regn(hw, EVSTAT);
+ }
+
+ if (! hermes_present(hw)) {
+ printk(KERN_WARNING "hermes @ %s0x%lx: "
+ "Card removed while waiting for command completion.\n",
+ IO_TYPE(hw), hw->iobase);
+ err = -ENODEV;
+ goto out;
+ }
+
+ if (! (reg & HERMES_EV_CMD)) {
+ printk(KERN_ERR "hermes @ %s0x%lx: "
+ "Timeout waiting for command completion.\n",
+ IO_TYPE(hw), hw->iobase);
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ status = hermes_read_regn(hw, STATUS);
+ if (resp) {
+ resp->status = status;
+ resp->resp0 = hermes_read_regn(hw, RESP0);
+ resp->resp1 = hermes_read_regn(hw, RESP1);
+ resp->resp2 = hermes_read_regn(hw, RESP2);
+ }
+
+ hermes_write_regn(hw, EVACK, HERMES_EV_CMD);
+
+ if (status & HERMES_STATUS_RESULT)
+ err = -EIO;
+
+ out:
+ return err;
+}
+
+int hermes_allocate(hermes_t *hw, u16 size, u16 *fid)
+{
+ int err = 0;
+ int k;
+ u16 reg;
+
+ if ( (size < HERMES_ALLOC_LEN_MIN) || (size > HERMES_ALLOC_LEN_MAX) )
+ return -EINVAL;
+
+ err = hermes_docmd_wait(hw, HERMES_CMD_ALLOC, size, NULL);
+ if (err) {
+ return err;
+ }
+
+ reg = hermes_read_regn(hw, EVSTAT);
+ k = ALLOC_COMPL_TIMEOUT;
+ while ( (! (reg & HERMES_EV_ALLOC)) && k) {
+ k--;
+ udelay(10);
+ reg = hermes_read_regn(hw, EVSTAT);
+ }
+
+ if (! hermes_present(hw)) {
+ printk(KERN_WARNING "hermes @ %s0x%lx: "
+ "Card removed waiting for frame allocation.\n",
+ IO_TYPE(hw), hw->iobase);
+ return -ENODEV;
+ }
+
+ if (! (reg & HERMES_EV_ALLOC)) {
+ printk(KERN_ERR "hermes @ %s0x%lx: "
+ "Timeout waiting for frame allocation\n",
+ IO_TYPE(hw), hw->iobase);
+ return -ETIMEDOUT;
+ }
+
+ *fid = hermes_read_regn(hw, ALLOCFID);
+ hermes_write_regn(hw, EVACK, HERMES_EV_ALLOC);
+
+ return 0;
+}
+
+
+/* Set up a BAP to read a particular chunk of data from card's internal buffer.
+ *
+ * Returns: < 0 on internal failure (errno), 0 on success, >0 on error
+ * from firmware
+ *
+ * Callable from any context */
+static int hermes_bap_seek(hermes_t *hw, int bap, u16 id, u16 offset)
+{
+ int sreg = bap ? HERMES_SELECT1 : HERMES_SELECT0;
+ int oreg = bap ? HERMES_OFFSET1 : HERMES_OFFSET0;
+ int k;
+ u16 reg;
+
+ /* Paranoia.. */
+ if ( (offset > HERMES_BAP_OFFSET_MAX) || (offset % 2) )
+ return -EINVAL;
+
+ k = HERMES_BAP_BUSY_TIMEOUT;
+ reg = hermes_read_reg(hw, oreg);
+ while ((reg & HERMES_OFFSET_BUSY) && k) {
+ k--;
+ udelay(1);
+ reg = hermes_read_reg(hw, oreg);
+ }
+
+#ifdef HERMES_DEBUG_BUFFER
+ hw->profile[HERMES_BAP_BUSY_TIMEOUT - k]++;
+
+ if (k < HERMES_BAP_BUSY_TIMEOUT) {
+ struct hermes_debug_entry *e =
+ &hw->dbuf[(hw->dbufp++) % HERMES_DEBUG_BUFSIZE];
+ e->bap = bap;
+ e->id = id;
+ e->offset = offset;
+ e->cycles = HERMES_BAP_BUSY_TIMEOUT - k;
+ }
+#endif
+
+ if (reg & HERMES_OFFSET_BUSY)
+ return -ETIMEDOUT;
+
+ /* Now we actually set up the transfer */
+ hermes_write_reg(hw, sreg, id);
+ hermes_write_reg(hw, oreg, offset);
+
+ /* Wait for the BAP to be ready */
+ k = HERMES_BAP_BUSY_TIMEOUT;
+ reg = hermes_read_reg(hw, oreg);
+ while ( (reg & (HERMES_OFFSET_BUSY | HERMES_OFFSET_ERR)) && k) {
+ k--;
+ udelay(1);
+ reg = hermes_read_reg(hw, oreg);
+ }
+
+ if (reg & HERMES_OFFSET_BUSY) {
+ return -ETIMEDOUT;
+ }
+
+ if (reg & HERMES_OFFSET_ERR) {
+ return -EIO;
+ }
+
+
+ return 0;
+}
+
+/* Read a block of data from the chip's buffer, via the
+ * BAP. Synchronization/serialization is the caller's problem. len
+ * must be even.
+ *
+ * Returns: < 0 on internal failure (errno), 0 on success, > 0 on error from firmware
+ */
+int hermes_bap_pread(hermes_t *hw, int bap, void *buf, unsigned len,
+ u16 id, u16 offset)
+{
+ int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
+ int err = 0;
+
+ if ( (len < 0) || (len % 2) )
+ return -EINVAL;
+
+ err = hermes_bap_seek(hw, bap, id, offset);
+ if (err)
+ goto out;
+
+ /* Actually do the transfer */
+ hermes_read_words(hw, dreg, buf, len/2);
+
+ out:
+ return err;
+}
+
+/* Write a block of data to the chip's buffer, via the
+ * BAP. Synchronization/serialization is the caller's problem. len
+ * must be even.
+ *
+ * Returns: < 0 on internal failure (errno), 0 on success, > 0 on error from firmware
+ */
+int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, unsigned len,
+ u16 id, u16 offset)
+{
+ int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
+ int err = 0;
+
+ if ( (len < 0) || (len % 2) )
+ return -EINVAL;
+
+ err = hermes_bap_seek(hw, bap, id, offset);
+ if (err)
+ goto out;
+
+ /* Actually do the transfer */
+ hermes_write_words(hw, dreg, buf, len/2);
+
+ out:
+ return err;
+}
+
+/* Read a Length-Type-Value record from the card.
+ *
+ * If length is NULL, we ignore the length read from the card, and
+ * read the entire buffer regardless. This is useful because some of
+ * the configuration records appear to have incorrect lengths in
+ * practice.
+ *
+ * Callable from user or bh context. */
+int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned bufsize,
+ u16 *length, void *buf)
+{
+ int err = 0;
+ int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
+ u16 rlength, rtype;
+ unsigned nwords;
+
+ if ( (bufsize < 0) || (bufsize % 2) )
+ return -EINVAL;
+
+ err = hermes_docmd_wait(hw, HERMES_CMD_ACCESS, rid, NULL);
+ if (err)
+ return err;
+
+ err = hermes_bap_seek(hw, bap, rid, 0);
+ if (err)
+ return err;
+
+ rlength = hermes_read_reg(hw, dreg);
+
+ if (! rlength)
+ return -ENOENT;
+
+ rtype = hermes_read_reg(hw, dreg);
+
+ if (length)
+ *length = rlength;
+
+ if (rtype != rid)
+ printk(KERN_WARNING "hermes @ %s0x%lx: "
+ "hermes_read_ltv(): rid (0x%04x) does not match type (0x%04x)\n",
+ IO_TYPE(hw), hw->iobase, rid, rtype);
+ if (HERMES_RECLEN_TO_BYTES(rlength) > bufsize)
+ printk(KERN_WARNING "hermes @ %s0x%lx: "
+ "Truncating LTV record from %d to %d bytes. "
+ "(rid=0x%04x, len=0x%04x)\n",
+ IO_TYPE(hw), hw->iobase,
+ HERMES_RECLEN_TO_BYTES(rlength), bufsize, rid, rlength);
+
+ nwords = min((unsigned)rlength - 1, bufsize / 2);
+ hermes_read_words(hw, dreg, buf, nwords);
+
+ return 0;
+}
+
+int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
+ u16 length, const void *value)
+{
+ int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
+ int err = 0;
+ unsigned count;
+
+ if (length == 0)
+ return -EINVAL;
+
+ err = hermes_bap_seek(hw, bap, rid, 0);
+ if (err)
+ return err;
+
+ hermes_write_reg(hw, dreg, length);
+ hermes_write_reg(hw, dreg, rid);
+
+ count = length - 1;
+
+ hermes_write_words(hw, dreg, value, count);
+
+ err = hermes_docmd_wait(hw, HERMES_CMD_ACCESS | HERMES_CMD_WRITE,
+ rid, NULL);
+
+ return err;
+}
+
+EXPORT_SYMBOL(hermes_struct_init);
+EXPORT_SYMBOL(hermes_init);
+EXPORT_SYMBOL(hermes_docmd_wait);
+EXPORT_SYMBOL(hermes_allocate);
+
+EXPORT_SYMBOL(hermes_bap_pread);
+EXPORT_SYMBOL(hermes_bap_pwrite);
+EXPORT_SYMBOL(hermes_read_ltv);
+EXPORT_SYMBOL(hermes_write_ltv);
+
+static int __init init_hermes(void)
+{
+ return 0;
+}
+
+static void __exit exit_hermes(void)
+{
+}
+
+module_init(init_hermes);
+module_exit(exit_hermes);
diff --git a/linux/pcmcia-cs/wireless/hermes.h b/linux/pcmcia-cs/wireless/hermes.h
new file mode 100644
index 0000000..b43fa0c
--- /dev/null
+++ b/linux/pcmcia-cs/wireless/hermes.h
@@ -0,0 +1,456 @@
+/* hermes.h
+ *
+ * Driver core for the "Hermes" wireless MAC controller, as used in
+ * the Lucent Orinoco and Cabletron RoamAbout cards. It should also
+ * work on the hfa3841 and hfa3842 MAC controller chips used in the
+ * Prism I & II chipsets.
+ *
+ * This is not a complete driver, just low-level access routines for
+ * the MAC controller itself.
+ *
+ * Based on the prism2 driver from Absolute Value Systems' linux-wlan
+ * project, the Linux wvlan_cs driver, Lucent's HCF-Light
+ * (wvlan_hcf.c) library, and the NetBSD wireless driver.
+ *
+ * Copyright (C) 2000, David Gibson, Linuxcare Australia <hermes@gibson.dropbear.id.au>
+ *
+ * Portions taken from hfa384x.h, Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ *
+ * This file distributed under the GPL, version 2.
+ */
+
+#ifndef _HERMES_H
+#define _HERMES_H
+
+/* Notes on locking:
+ *
+ * As a module of low level hardware access routines, there is no
+ * locking. Users of this module should ensure that they serialize
+ * access to the hermes_t structure, and to the hardware
+*/
+
+#include <linux/delay.h>
+#include <linux/if_ether.h>
+#include <asm/byteorder.h>
+
+/*
+ * Limits and constants
+ */
+#define HERMES_ALLOC_LEN_MIN (4)
+#define HERMES_ALLOC_LEN_MAX (2400)
+#define HERMES_LTV_LEN_MAX (34)
+#define HERMES_BAP_DATALEN_MAX (4096)
+#define HERMES_BAP_OFFSET_MAX (4096)
+#define HERMES_PORTID_MAX (7)
+#define HERMES_NUMPORTS_MAX (HERMES_PORTID_MAX+1)
+#define HERMES_PDR_LEN_MAX (260) /* in bytes, from EK */
+#define HERMES_PDA_RECS_MAX (200) /* a guess */
+#define HERMES_PDA_LEN_MAX (1024) /* in bytes, from EK */
+#define HERMES_SCANRESULT_MAX (35)
+#define HERMES_CHINFORESULT_MAX (8)
+#define HERMES_MAX_MULTICAST (16)
+#define HERMES_MAGIC (0x7d1f)
+
+/*
+ * Hermes register offsets
+ */
+#define HERMES_CMD (0x00)
+#define HERMES_PARAM0 (0x02)
+#define HERMES_PARAM1 (0x04)
+#define HERMES_PARAM2 (0x06)
+#define HERMES_STATUS (0x08)
+#define HERMES_RESP0 (0x0A)
+#define HERMES_RESP1 (0x0C)
+#define HERMES_RESP2 (0x0E)
+#define HERMES_INFOFID (0x10)
+#define HERMES_RXFID (0x20)
+#define HERMES_ALLOCFID (0x22)
+#define HERMES_TXCOMPLFID (0x24)
+#define HERMES_SELECT0 (0x18)
+#define HERMES_OFFSET0 (0x1C)
+#define HERMES_DATA0 (0x36)
+#define HERMES_SELECT1 (0x1A)
+#define HERMES_OFFSET1 (0x1E)
+#define HERMES_DATA1 (0x38)
+#define HERMES_EVSTAT (0x30)
+#define HERMES_INTEN (0x32)
+#define HERMES_EVACK (0x34)
+#define HERMES_CONTROL (0x14)
+#define HERMES_SWSUPPORT0 (0x28)
+#define HERMES_SWSUPPORT1 (0x2A)
+#define HERMES_SWSUPPORT2 (0x2C)
+#define HERMES_AUXPAGE (0x3A)
+#define HERMES_AUXOFFSET (0x3C)
+#define HERMES_AUXDATA (0x3E)
+
+/*
+ * CMD register bitmasks
+ */
+#define HERMES_CMD_BUSY (0x8000)
+#define HERMES_CMD_AINFO (0x7f00)
+#define HERMES_CMD_MACPORT (0x0700)
+#define HERMES_CMD_RECL (0x0100)
+#define HERMES_CMD_WRITE (0x0100)
+#define HERMES_CMD_PROGMODE (0x0300)
+#define HERMES_CMD_CMDCODE (0x003f)
+
+/*
+ * STATUS register bitmasks
+ */
+#define HERMES_STATUS_RESULT (0x7f00)
+#define HERMES_STATUS_CMDCODE (0x003f)
+
+/*
+ * OFFSET register bitmasks
+ */
+#define HERMES_OFFSET_BUSY (0x8000)
+#define HERMES_OFFSET_ERR (0x4000)
+#define HERMES_OFFSET_DATAOFF (0x0ffe)
+
+/*
+ * Event register bitmasks (INTEN, EVSTAT, EVACK)
+ */
+#define HERMES_EV_TICK (0x8000)
+#define HERMES_EV_WTERR (0x4000)
+#define HERMES_EV_INFDROP (0x2000)
+#define HERMES_EV_INFO (0x0080)
+#define HERMES_EV_DTIM (0x0020)
+#define HERMES_EV_CMD (0x0010)
+#define HERMES_EV_ALLOC (0x0008)
+#define HERMES_EV_TXEXC (0x0004)
+#define HERMES_EV_TX (0x0002)
+#define HERMES_EV_RX (0x0001)
+
+/*
+ * Command codes
+ */
+/*--- Controller Commands --------------------------*/
+#define HERMES_CMD_INIT (0x0000)
+#define HERMES_CMD_ENABLE (0x0001)
+#define HERMES_CMD_DISABLE (0x0002)
+#define HERMES_CMD_DIAG (0x0003)
+
+/*--- Buffer Mgmt Commands --------------------------*/
+#define HERMES_CMD_ALLOC (0x000A)
+#define HERMES_CMD_TX (0x000B)
+#define HERMES_CMD_CLRPRST (0x0012)
+
+/*--- Regulate Commands --------------------------*/
+#define HERMES_CMD_NOTIFY (0x0010)
+#define HERMES_CMD_INQUIRE (0x0011)
+
+/*--- Configure Commands --------------------------*/
+#define HERMES_CMD_ACCESS (0x0021)
+#define HERMES_CMD_DOWNLD (0x0022)
+
+/*--- Debugging Commands -----------------------------*/
+#define HERMES_CMD_MONITOR (0x0038)
+#define HERMES_MONITOR_ENABLE (0x000b)
+#define HERMES_MONITOR_DISABLE (0x000f)
+
+/*
+ * Frame structures and constants
+ */
+
+#define HERMES_DESCRIPTOR_OFFSET 0
+#define HERMES_802_11_OFFSET (14)
+#define HERMES_802_3_OFFSET (14+32)
+#define HERMES_802_2_OFFSET (14+32+14)
+
+struct hermes_rx_descriptor {
+ u16 status;
+ u32 time;
+ u8 silence;
+ u8 signal;
+ u8 rate;
+ u8 rxflow;
+ u32 reserved;
+} __attribute__ ((packed));
+
+#define HERMES_RXSTAT_ERR (0x0003)
+#define HERMES_RXSTAT_BADCRC (0x0001)
+#define HERMES_RXSTAT_UNDECRYPTABLE (0x0002)
+#define HERMES_RXSTAT_MACPORT (0x0700)
+#define HERMES_RXSTAT_PCF (0x1000) /* Frame was received in CF period */
+#define HERMES_RXSTAT_MSGTYPE (0xE000)
+#define HERMES_RXSTAT_1042 (0x2000) /* RFC-1042 frame */
+#define HERMES_RXSTAT_TUNNEL (0x4000) /* bridge-tunnel encoded frame */
+#define HERMES_RXSTAT_WMP (0x6000) /* Wavelan-II Management Protocol frame */
+
+struct hermes_tx_descriptor {
+ u16 status;
+ u16 reserved1;
+ u16 reserved2;
+ u32 sw_support;
+ u8 retry_count;
+ u8 tx_rate;
+ u16 tx_control;
+} __attribute__ ((packed));
+
+#define HERMES_TXSTAT_RETRYERR (0x0001)
+#define HERMES_TXSTAT_AGEDERR (0x0002)
+#define HERMES_TXSTAT_DISCON (0x0004)
+#define HERMES_TXSTAT_FORMERR (0x0008)
+
+#define HERMES_TXCTRL_TX_OK (0x0002) /* ?? interrupt on Tx complete */
+#define HERMES_TXCTRL_TX_EX (0x0004) /* ?? interrupt on Tx exception */
+#define HERMES_TXCTRL_802_11 (0x0008) /* We supply 802.11 header */
+#define HERMES_TXCTRL_ALT_RTRY (0x0020)
+
+/* Inquiry constants and data types */
+
+#define HERMES_INQ_TALLIES (0xF100)
+#define HERMES_INQ_SCAN (0xF101)
+#define HERMES_INQ_LINKSTATUS (0xF200)
+
+struct hermes_tallies_frame {
+ u16 TxUnicastFrames;
+ u16 TxMulticastFrames;
+ u16 TxFragments;
+ u16 TxUnicastOctets;
+ u16 TxMulticastOctets;
+ u16 TxDeferredTransmissions;
+ u16 TxSingleRetryFrames;
+ u16 TxMultipleRetryFrames;
+ u16 TxRetryLimitExceeded;
+ u16 TxDiscards;
+ u16 RxUnicastFrames;
+ u16 RxMulticastFrames;
+ u16 RxFragments;
+ u16 RxUnicastOctets;
+ u16 RxMulticastOctets;
+ u16 RxFCSErrors;
+ u16 RxDiscards_NoBuffer;
+ u16 TxDiscardsWrongSA;
+ u16 RxWEPUndecryptable;
+ u16 RxMsgInMsgFragments;
+ u16 RxMsgInBadMsgFragments;
+ /* Those last are probably not available in very old firmwares */
+ u16 RxDiscards_WEPICVError;
+ u16 RxDiscards_WEPExcluded;
+} __attribute__ ((packed));
+
+/* Grabbed from wlan-ng - Thanks Mark... - Jean II
+ * This is the result of a scan inquiry command */
+/* Structure describing info about an Access Point */
+struct hermes_scan_apinfo {
+ u16 channel; /* Channel where the AP sits */
+ u16 noise; /* Noise level */
+ u16 level; /* Signal level */
+ u8 bssid[ETH_ALEN]; /* MAC address of the Access Point */
+ u16 beacon_interv; /* Beacon interval ? */
+ u16 capabilities; /* Capabilities ? */
+ u8 essid[32]; /* ESSID of the network */
+ u8 rates[10]; /* Bit rate supported */
+ u16 proberesp_rate; /* ???? */
+} __attribute__ ((packed));
+/* Container */
+struct hermes_scan_frame {
+ u16 rsvd; /* ??? */
+ u16 scanreason; /* ??? */
+ struct hermes_scan_apinfo aps[35]; /* Scan result */
+} __attribute__ ((packed));
+#define HERMES_LINKSTATUS_NOT_CONNECTED (0x0000)
+#define HERMES_LINKSTATUS_CONNECTED (0x0001)
+#define HERMES_LINKSTATUS_DISCONNECTED (0x0002)
+#define HERMES_LINKSTATUS_AP_CHANGE (0x0003)
+#define HERMES_LINKSTATUS_AP_OUT_OF_RANGE (0x0004)
+#define HERMES_LINKSTATUS_AP_IN_RANGE (0x0005)
+#define HERMES_LINKSTATUS_ASSOC_FAILED (0x0006)
+
+struct hermes_linkstatus {
+ u16 linkstatus; /* Link status */
+} __attribute__ ((packed));
+
+// #define HERMES_DEBUG_BUFFER 1
+#define HERMES_DEBUG_BUFSIZE 4096
+struct hermes_debug_entry {
+ int bap;
+ u16 id, offset;
+ int cycles;
+};
+
+#ifdef __KERNEL__
+
+/* Timeouts */
+#define HERMES_BAP_BUSY_TIMEOUT (500) /* In iterations of ~1us */
+
+/* Basic control structure */
+typedef struct hermes {
+ unsigned long iobase;
+ int io_space; /* 1 if we IO-mapped IO, 0 for memory-mapped IO? */
+#define HERMES_IO 1
+#define HERMES_MEM 0
+ int reg_spacing;
+#define HERMES_16BIT_REGSPACING 0
+#define HERMES_32BIT_REGSPACING 1
+
+ u16 inten; /* Which interrupts should be enabled? */
+
+#ifdef HERMES_DEBUG_BUFFER
+ struct hermes_debug_entry dbuf[HERMES_DEBUG_BUFSIZE];
+ unsigned long dbufp;
+ unsigned long profile[HERMES_BAP_BUSY_TIMEOUT+1];
+#endif
+} hermes_t;
+
+typedef struct hermes_response {
+ u16 status, resp0, resp1, resp2;
+} hermes_response_t;
+
+/* Register access convenience macros */
+#define hermes_read_reg(hw, off) ((hw)->io_space ? \
+ inw((hw)->iobase + ( (off) << (hw)->reg_spacing )) : \
+ readw((hw)->iobase + ( (off) << (hw)->reg_spacing )))
+#define hermes_write_reg(hw, off, val) ((hw)->io_space ? \
+ outw_p((val), (hw)->iobase + ( (off) << (hw)->reg_spacing )) : \
+ writew((val), (hw)->iobase + ( (off) << (hw)->reg_spacing )))
+
+#define hermes_read_regn(hw, name) (hermes_read_reg((hw), HERMES_##name))
+#define hermes_write_regn(hw, name, val) (hermes_write_reg((hw), HERMES_##name, (val)))
+
+/* Function prototypes */
+void hermes_struct_init(hermes_t *hw, ulong address, int io_space, int reg_spacing);
+int hermes_init(hermes_t *hw);
+int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0, hermes_response_t *resp);
+int hermes_allocate(hermes_t *hw, u16 size, u16 *fid);
+
+int hermes_bap_pread(hermes_t *hw, int bap, void *buf, unsigned len,
+ u16 id, u16 offset);
+int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, unsigned len,
+ u16 id, u16 offset);
+int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned buflen,
+ u16 *length, void *buf);
+int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
+ u16 length, const void *value);
+
+/* Inline functions */
+
+static inline int hermes_present(hermes_t *hw)
+{
+ return hermes_read_regn(hw, SWSUPPORT0) == HERMES_MAGIC;
+}
+
+static inline void hermes_set_irqmask(hermes_t *hw, u16 events)
+{
+ hw->inten = events;
+ hermes_write_regn(hw, INTEN, events);
+}
+
+static inline int hermes_enable_port(hermes_t *hw, int port)
+{
+ return hermes_docmd_wait(hw, HERMES_CMD_ENABLE | (port << 8),
+ 0, NULL);
+}
+
+static inline int hermes_disable_port(hermes_t *hw, int port)
+{
+ return hermes_docmd_wait(hw, HERMES_CMD_DISABLE | (port << 8),
+ 0, NULL);
+}
+
+/* Initiate an INQUIRE command (tallies or scan). The result will come as an
+ * information frame in __orinoco_ev_info() */
+static inline int hermes_inquire(hermes_t *hw, u16 rid)
+{
+ return hermes_docmd_wait(hw, HERMES_CMD_INQUIRE, rid, NULL);
+}
+
+#define HERMES_BYTES_TO_RECLEN(n) ( (((n)+1)/2) + 1 )
+#define HERMES_RECLEN_TO_BYTES(n) ( ((n)-1) * 2 )
+
+/* Note that for the next two, the count is in 16-bit words, not bytes */
+static inline void hermes_read_words(struct hermes *hw, int off, void *buf, unsigned count)
+{
+ off = off << hw->reg_spacing;;
+
+ if (hw->io_space) {
+ insw(hw->iobase + off, buf, count);
+ } else {
+ unsigned i;
+ u16 *p;
+
+ /* This needs to *not* byteswap (like insw()) but
+ * readw() does byteswap hence the conversion. I hope
+ * gcc is smart enough to fold away the two swaps on
+ * big-endian platforms. */
+ for (i = 0, p = buf; i < count; i++) {
+ *p++ = cpu_to_le16(readw(hw->iobase + off));
+ }
+ }
+}
+
+static inline void hermes_write_words(struct hermes *hw, int off, const void *buf, unsigned count)
+{
+ off = off << hw->reg_spacing;;
+
+ if (hw->io_space) {
+ outsw(hw->iobase + off, buf, count);
+ } else {
+ unsigned i;
+ const u16 *p;
+
+ /* This needs to *not* byteswap (like outsw()) but
+ * writew() does byteswap hence the conversion. I
+ * hope gcc is smart enough to fold away the two swaps
+ * on big-endian platforms. */
+ for (i = 0, p = buf; i < count; i++) {
+ writew(le16_to_cpu(*p++), hw->iobase + off);
+ }
+ }
+}
+
+static inline void hermes_clear_words(struct hermes *hw, int off, unsigned count)
+{
+ unsigned i;
+
+ off = off << hw->reg_spacing;;
+
+ if (hw->io_space) {
+ for (i = 0; i < count; i++)
+ outw(0, hw->iobase + off);
+ } else {
+ for (i = 0; i < count; i++)
+ writew(0, hw->iobase + off);
+ }
+}
+
+#define HERMES_READ_RECORD(hw, bap, rid, buf) \
+ (hermes_read_ltv((hw),(bap),(rid), sizeof(*buf), NULL, (buf)))
+#define HERMES_WRITE_RECORD(hw, bap, rid, buf) \
+ (hermes_write_ltv((hw),(bap),(rid),HERMES_BYTES_TO_RECLEN(sizeof(*buf)),(buf)))
+
+static inline int hermes_read_wordrec(hermes_t *hw, int bap, u16 rid, u16 *word)
+{
+ u16 rec;
+ int err;
+
+ err = HERMES_READ_RECORD(hw, bap, rid, &rec);
+ *word = le16_to_cpu(rec);
+ return err;
+}
+
+static inline int hermes_write_wordrec(hermes_t *hw, int bap, u16 rid, u16 word)
+{
+ u16 rec = cpu_to_le16(word);
+ return HERMES_WRITE_RECORD(hw, bap, rid, &rec);
+}
+
+#else /* ! __KERNEL__ */
+
+/* These are provided for the benefit of userspace drivers and testing programs
+ which use ioperm() or iopl() */
+
+#define hermes_read_reg(base, off) (inw((base) + (off)))
+#define hermes_write_reg(base, off, val) (outw((val), (base) + (off)))
+
+#define hermes_read_regn(base, name) (hermes_read_reg((base), HERMES_##name))
+#define hermes_write_regn(base, name, val) (hermes_write_reg((base), HERMES_##name, (val)))
+
+/* Note that for the next two, the count is in 16-bit words, not bytes */
+#define hermes_read_data(base, off, buf, count) (insw((base) + (off), (buf), (count)))
+#define hermes_write_data(base, off, buf, count) (outsw((base) + (off), (buf), (count)))
+
+#endif /* ! __KERNEL__ */
+
+#endif /* _HERMES_H */
diff --git a/linux/pcmcia-cs/wireless/hermes_rid.h b/linux/pcmcia-cs/wireless/hermes_rid.h
new file mode 100644
index 0000000..761c542
--- /dev/null
+++ b/linux/pcmcia-cs/wireless/hermes_rid.h
@@ -0,0 +1,153 @@
+#ifndef _HERMES_RID_H
+#define _HERMES_RID_H
+
+/*
+ * Configuration RIDs
+ */
+#define HERMES_RID_CNFPORTTYPE 0xFC00 /* used */
+#define HERMES_RID_CNFOWNMACADDR 0xFC01 /* used */
+#define HERMES_RID_CNFDESIREDSSID 0xFC02 /* used */
+#define HERMES_RID_CNFOWNCHANNEL 0xFC03 /* used */
+#define HERMES_RID_CNFOWNSSID 0xFC04 /* used */
+#define HERMES_RID_CNFOWNATIMWINDOW 0xFC05
+#define HERMES_RID_CNFSYSTEMSCALE 0xFC06 /* used */
+#define HERMES_RID_CNFMAXDATALEN 0xFC07
+#define HERMES_RID_CNFWDSADDRESS 0xFC08
+#define HERMES_RID_CNFPMENABLED 0xFC09 /* used */
+#define HERMES_RID_CNFPMEPS 0xFC0A
+#define HERMES_RID_CNFMULTICASTRECEIVE 0xFC0B /* used */
+#define HERMES_RID_CNFMAXSLEEPDURATION 0xFC0C /* used */
+#define HERMES_RID_CNFPMHOLDOVERDURATION 0xFC0D /* used */
+#define HERMES_RID_CNFOWNNAME 0xFC0E /* used */
+#define HERMES_RID_CNFOWNDTIMPERIOD 0xFC10
+#define HERMES_RID_CNFWDSADDRESS1 0xFC11
+#define HERMES_RID_CNFWDSADDRESS2 0xFC12
+#define HERMES_RID_CNFWDSADDRESS3 0xFC13
+#define HERMES_RID_CNFWDSADDRESS4 0xFC14
+#define HERMES_RID_CNFWDSADDRESS5 0xFC15
+#define HERMES_RID_CNFWDSADDRESS6 0xFC16
+#define HERMES_RID_CNFMULTICASTPMBUFFERING 0xFC17
+#define HERMES_RID_CNFWEPENABLED_AGERE 0xFC20 /* used */
+#define HERMES_RID_CNFMANDATORYBSSID_SYMBOL 0xFC21
+#define HERMES_RID_CNFWEPDEFAULTKEYID 0xFC23 /* used */
+#define HERMES_RID_CNFDEFAULTKEY0 0xFC24 /* used */
+#define HERMES_RID_CNFDEFAULTKEY1 0xFC25 /* used */
+#define HERMES_RID_CNFMWOROBUST_AGERE 0xFC25 /* used */
+#define HERMES_RID_CNFDEFAULTKEY2 0xFC26 /* used */
+#define HERMES_RID_CNFDEFAULTKEY3 0xFC27 /* used */
+#define HERMES_RID_CNFWEPFLAGS_INTERSIL 0xFC28 /* used */
+#define HERMES_RID_CNFWEPKEYMAPPINGTABLE 0xFC29
+#define HERMES_RID_CNFAUTHENTICATION 0xFC2A /* used */
+#define HERMES_RID_CNFMAXASSOCSTA 0xFC2B
+#define HERMES_RID_CNFKEYLENGTH_SYMBOL 0xFC2B
+#define HERMES_RID_CNFTXCONTROL 0xFC2C
+#define HERMES_RID_CNFROAMINGMODE 0xFC2D
+#define HERMES_RID_CNFHOSTAUTHENTICATION 0xFC2E
+#define HERMES_RID_CNFRCVCRCERROR 0xFC30
+#define HERMES_RID_CNFMMLIFE 0xFC31
+#define HERMES_RID_CNFALTRETRYCOUNT 0xFC32
+#define HERMES_RID_CNFBEACONINT 0xFC33
+#define HERMES_RID_CNFAPPCFINFO 0xFC34
+#define HERMES_RID_CNFSTAPCFINFO 0xFC35
+#define HERMES_RID_CNFPRIORITYQUSAGE 0xFC37
+#define HERMES_RID_CNFTIMCTRL 0xFC40
+#define HERMES_RID_CNFTHIRTY2TALLY 0xFC42
+#define HERMES_RID_CNFENHSECURITY 0xFC43
+#define HERMES_RID_CNFGROUPADDRESSES 0xFC80 /* used */
+#define HERMES_RID_CNFCREATEIBSS 0xFC81 /* used */
+#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD 0xFC82 /* used */
+#define HERMES_RID_CNFRTSTHRESHOLD 0xFC83 /* used */
+#define HERMES_RID_CNFTXRATECONTROL 0xFC84 /* used */
+#define HERMES_RID_CNFPROMISCUOUSMODE 0xFC85 /* used */
+#define HERMES_RID_CNFBASICRATES_SYMBOL 0xFC8A
+#define HERMES_RID_CNFPREAMBLE_SYMBOL 0xFC8C /* used */
+#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD0 0xFC90
+#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD1 0xFC91
+#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD2 0xFC92
+#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD3 0xFC93
+#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD4 0xFC94
+#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD5 0xFC95
+#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD6 0xFC96
+#define HERMES_RID_CNFRTSTHRESHOLD0 0xFC97
+#define HERMES_RID_CNFRTSTHRESHOLD1 0xFC98
+#define HERMES_RID_CNFRTSTHRESHOLD2 0xFC99
+#define HERMES_RID_CNFRTSTHRESHOLD3 0xFC9A
+#define HERMES_RID_CNFRTSTHRESHOLD4 0xFC9B
+#define HERMES_RID_CNFRTSTHRESHOLD5 0xFC9C
+#define HERMES_RID_CNFRTSTHRESHOLD6 0xFC9D
+#define HERMES_RID_CNFSHORTPREAMBLE 0xFCB0
+#define HERMES_RID_CNFWEPKEYS_AGERE 0xFCB0 /* used */
+#define HERMES_RID_CNFEXCLUDELONGPREAMBLE 0xFCB1
+#define HERMES_RID_CNFTXKEY_AGERE 0xFCB1 /* used */
+#define HERMES_RID_CNFAUTHENTICATIONRSPTO 0xFCB2
+#define HERMES_RID_CNFBASICRATES 0xFCB3
+#define HERMES_RID_CNFSUPPORTEDRATES 0xFCB4
+#define HERMES_RID_CNFTICKTIME 0xFCE0 /* used */
+#define HERMES_RID_CNFSCANREQUEST 0xFCE1
+#define HERMES_RID_CNFJOINREQUEST 0xFCE2
+#define HERMES_RID_CNFAUTHENTICATESTATION 0xFCE3
+#define HERMES_RID_CNFCHANNELINFOREQUEST 0xFCE4
+
+/*
+ * Information RIDs
+ */
+#define HERMES_RID_MAXLOADTIME 0xFD00
+#define HERMES_RID_DOWNLOADBUFFER 0xFD01
+#define HERMES_RID_PRIID 0xFD02
+#define HERMES_RID_PRISUPRANGE 0xFD03
+#define HERMES_RID_CFIACTRANGES 0xFD04
+#define HERMES_RID_NICSERNUM 0xFD0A
+#define HERMES_RID_NICID 0xFD0B
+#define HERMES_RID_MFISUPRANGE 0xFD0C
+#define HERMES_RID_CFISUPRANGE 0xFD0D
+#define HERMES_RID_CHANNELLIST 0xFD10 /* used */
+#define HERMES_RID_REGULATORYDOMAINS 0xFD11
+#define HERMES_RID_TEMPTYPE 0xFD12
+#define HERMES_RID_CIS 0xFD13
+#define HERMES_RID_STAID 0xFD20 /* used */
+#define HERMES_RID_STASUPRANGE 0xFD21
+#define HERMES_RID_MFIACTRANGES 0xFD22
+#define HERMES_RID_CFIACTRANGES2 0xFD23
+#define HERMES_RID_SECONDARYVERSION_SYMBOL 0xFD24 /* used */
+#define HERMES_RID_PORTSTATUS 0xFD40
+#define HERMES_RID_CURRENTSSID 0xFD41 /* used */
+#define HERMES_RID_CURRENTBSSID 0xFD42 /* used */
+#define HERMES_RID_COMMSQUALITY 0xFD43 /* used */
+#define HERMES_RID_CURRENTTXRATE 0xFD44 /* used */
+#define HERMES_RID_CURRENTBEACONINTERVAL 0xFD45
+#define HERMES_RID_CURRENTSCALETHRESHOLDS 0xFD46
+#define HERMES_RID_PROTOCOLRSPTIME 0xFD47
+#define HERMES_RID_SHORTRETRYLIMIT 0xFD48 /* used */
+#define HERMES_RID_LONGRETRYLIMIT 0xFD49 /* used */
+#define HERMES_RID_MAXTRANSMITLIFETIME 0xFD4A /* used */
+#define HERMES_RID_MAXRECEIVELIFETIME 0xFD4B
+#define HERMES_RID_CFPOLLABLE 0xFD4C
+#define HERMES_RID_AUTHENTICATIONALGORITHMS 0xFD4D
+#define HERMES_RID_PRIVACYOPTIONIMPLEMENTED 0xFD4F
+#define HERMES_RID_CURRENTTXRATE1 0xFD80
+#define HERMES_RID_CURRENTTXRATE2 0xFD81
+#define HERMES_RID_CURRENTTXRATE3 0xFD82
+#define HERMES_RID_CURRENTTXRATE4 0xFD83
+#define HERMES_RID_CURRENTTXRATE5 0xFD84
+#define HERMES_RID_CURRENTTXRATE6 0xFD85
+#define HERMES_RID_OWNMACADDR 0xFD86
+#define HERMES_RID_SCANRESULTSTABLE 0xFD88
+#define HERMES_RID_PHYTYPE 0xFDC0
+#define HERMES_RID_CURRENTCHANNEL 0xFDC1 /* used */
+#define HERMES_RID_CURRENTPOWERSTATE 0xFDC2
+#define HERMES_RID_CCAMODE 0xFDC3
+#define HERMES_RID_SUPPORTEDDATARATES 0xFDC6 /* used */
+#define HERMES_RID_BUILDSEQ 0xFFFE
+#define HERMES_RID_FWID 0xFFFF
+
+/* "ID" structure - used for ESSID and station nickname */
+struct hermes_idstring {
+ u16 len;
+ u16 val[16];
+} __attribute__ ((packed));
+
+typedef struct hermes_multicast {
+ u8 addr[HERMES_MAX_MULTICAST][ETH_ALEN];
+} __attribute__ ((packed)) hermes_multicast_t;
+
+#endif
diff --git a/linux/pcmcia-cs/wireless/ieee802_11.h b/linux/pcmcia-cs/wireless/ieee802_11.h
new file mode 100644
index 0000000..07d626e
--- /dev/null
+++ b/linux/pcmcia-cs/wireless/ieee802_11.h
@@ -0,0 +1,79 @@
+#ifndef _IEEE802_11_H
+#define _IEEE802_11_H
+
+#define IEEE802_11_DATA_LEN 2304
+/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
+ 6.2.1.1.2.
+
+ The figure in section 7.1.2 suggests a body size of up to 2312
+ bytes is allowed, which is a bit confusing, I suspect this
+ represents the 2304 bytes of real data, plus a possible 8 bytes of
+ WEP IV and ICV. (this interpretation suggested by Ramiro Barreiro) */
+
+
+#define IEEE802_11_HLEN 30
+#define IEEE802_11_FRAME_LEN (IEEE802_11_DATA_LEN + IEEE802_11_HLEN)
+
+struct ieee802_11_hdr {
+ u16 frame_ctl;
+ u16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ u16 seq_ctl;
+ u8 addr4[ETH_ALEN];
+} __attribute__ ((packed));
+
+/* Frame control field constants */
+#define IEEE802_11_FCTL_VERS 0x0002
+#define IEEE802_11_FCTL_FTYPE 0x000c
+#define IEEE802_11_FCTL_STYPE 0x00f0
+#define IEEE802_11_FCTL_TODS 0x0100
+#define IEEE802_11_FCTL_FROMDS 0x0200
+#define IEEE802_11_FCTL_MOREFRAGS 0x0400
+#define IEEE802_11_FCTL_RETRY 0x0800
+#define IEEE802_11_FCTL_PM 0x1000
+#define IEEE802_11_FCTL_MOREDATA 0x2000
+#define IEEE802_11_FCTL_WEP 0x4000
+#define IEEE802_11_FCTL_ORDER 0x8000
+
+#define IEEE802_11_FTYPE_MGMT 0x0000
+#define IEEE802_11_FTYPE_CTL 0x0004
+#define IEEE802_11_FTYPE_DATA 0x0008
+
+/* management */
+#define IEEE802_11_STYPE_ASSOC_REQ 0x0000
+#define IEEE802_11_STYPE_ASSOC_RESP 0x0010
+#define IEEE802_11_STYPE_REASSOC_REQ 0x0020
+#define IEEE802_11_STYPE_REASSOC_RESP 0x0030
+#define IEEE802_11_STYPE_PROBE_REQ 0x0040
+#define IEEE802_11_STYPE_PROBE_RESP 0x0050
+#define IEEE802_11_STYPE_BEACON 0x0080
+#define IEEE802_11_STYPE_ATIM 0x0090
+#define IEEE802_11_STYPE_DISASSOC 0x00A0
+#define IEEE802_11_STYPE_AUTH 0x00B0
+#define IEEE802_11_STYPE_DEAUTH 0x00C0
+
+/* control */
+#define IEEE802_11_STYPE_PSPOLL 0x00A0
+#define IEEE802_11_STYPE_RTS 0x00B0
+#define IEEE802_11_STYPE_CTS 0x00C0
+#define IEEE802_11_STYPE_ACK 0x00D0
+#define IEEE802_11_STYPE_CFEND 0x00E0
+#define IEEE802_11_STYPE_CFENDACK 0x00F0
+
+/* data */
+#define IEEE802_11_STYPE_DATA 0x0000
+#define IEEE802_11_STYPE_DATA_CFACK 0x0010
+#define IEEE802_11_STYPE_DATA_CFPOLL 0x0020
+#define IEEE802_11_STYPE_DATA_CFACKPOLL 0x0030
+#define IEEE802_11_STYPE_NULLFUNC 0x0040
+#define IEEE802_11_STYPE_CFACK 0x0050
+#define IEEE802_11_STYPE_CFPOLL 0x0060
+#define IEEE802_11_STYPE_CFACKPOLL 0x0070
+
+#define IEEE802_11_SCTL_FRAG 0x000F
+#define IEEE802_11_SCTL_SEQ 0xFFF0
+
+#endif /* _IEEE802_11_H */
+
diff --git a/linux/pcmcia-cs/wireless/orinoco.c b/linux/pcmcia-cs/wireless/orinoco.c
new file mode 100644
index 0000000..1f70b6e
--- /dev/null
+++ b/linux/pcmcia-cs/wireless/orinoco.c
@@ -0,0 +1,4230 @@
+/* orinoco.c 0.13e - (formerly known as dldwd_cs.c and orinoco_cs.c)
+ *
+ * A driver for Hermes or Prism 2 chipset based PCMCIA wireless
+ * adaptors, with Lucent/Agere, Intersil or Symbol firmware.
+ *
+ * Copyright (C) 2000 David Gibson, Linuxcare Australia <hermes@gibson.dropbear.id.au>
+ * With some help from :
+ * Copyright (C) 2001 Jean Tourrilhes, HP Labs <jt@hpl.hp.com>
+ * Copyright (C) 2001 Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ *
+ * Based on dummy_cs.c 1.27 2000/06/12 21:27:25
+ *
+ * Portions based on wvlan_cs.c 1.0.6, Copyright Andreas Neuhaus <andy@fasta.fh-dortmund.de>
+ * http://www.fasta.fh-dortmund.de/users/andy/wvlan/
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David
+ * A. Hinds are Copyright (C) 1999 David A. Hinds. All Rights
+ * Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use your
+ * version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL. */
+
+/*
+ * v0.01 -> v0.02 - 21/3/2001 - Jean II
+ * o Allow to use regular ethX device name instead of dldwdX
+ * o Warning on IBSS with ESSID=any for firmware 6.06
+ * o Put proper range.throughput values (optimistic)
+ * o IWSPY support (IOCTL and stat gather in Rx path)
+ * o Allow setting frequency in Ad-Hoc mode
+ * o Disable WEP setting if !has_wep to work on old firmware
+ * o Fix txpower range
+ * o Start adding support for Samsung/Compaq firmware
+ *
+ * v0.02 -> v0.03 - 23/3/2001 - Jean II
+ * o Start adding Symbol support - need to check all that
+ * o Fix Prism2/Symbol WEP to accept 128 bits keys
+ * o Add Symbol WEP (add authentication type)
+ * o Add Prism2/Symbol rate
+ * o Add PM timeout (holdover duration)
+ * o Enable "iwconfig eth0 key off" and friends (toggle flags)
+ * o Enable "iwconfig eth0 power unicast/all" (toggle flags)
+ * o Try with an intel card. It report firmware 1.01, behave like
+ * an antiquated firmware, however on windows it says 2.00. Yuck !
+ * o Workaround firmware bug in allocate buffer (Intel 1.01)
+ * o Finish external renaming to orinoco...
+ * o Testing with various Wavelan firmwares
+ *
+ * v0.03 -> v0.04 - 30/3/2001 - Jean II
+ * o Update to Wireless 11 -> add retry limit/lifetime support
+ * o Tested with a D-Link DWL 650 card, fill in firmware support
+ * o Warning on Vcc mismatch (D-Link 3.3v card in Lucent 5v only slot)
+ * o Fixed the Prims2 WEP bugs that I introduced in v0.03 :-(
+ * It work on D-Link *only* after a tcpdump. Weird...
+ * And still doesn't work on Intel card. Grrrr...
+ * o Update the mode after a setport3
+ * o Add preamble setting for Symbol cards (not yet enabled)
+ * o Don't complain as much about Symbol cards...
+ *
+ * v0.04 -> v0.04b - 22/4/2001 - David Gibson
+ * o Removed the 'eth' parameter - always use ethXX as the
+ * interface name instead of dldwdXX. The other was racy
+ * anyway.
+ * o Clean up RID definitions in hermes.h, other cleanups
+ *
+ * v0.04b -> v0.04c - 24/4/2001 - Jean II
+ * o Tim Hurley <timster@seiki.bliztech.com> reported a D-Link card
+ * with vendor 02 and firmware 0.08. Added in the capabilities...
+ * o Tested Lucent firmware 7.28, everything works...
+ *
+ * v0.04c -> v0.05 - 3/5/2001 - Benjamin Herrenschmidt
+ * o Spin-off Pcmcia code. This file is renamed orinoco.c,
+ * and orinoco_cs.c now contains only the Pcmcia specific stuff
+ * o Add Airport driver support on top of orinoco.c (see airport.c)
+ *
+ * v0.05 -> v0.05a - 4/5/2001 - Jean II
+ * o Revert to old Pcmcia code to fix breakage of Ben's changes...
+ *
+ * v0.05a -> v0.05b - 4/5/2001 - Jean II
+ * o add module parameter 'ignore_cis_vcc' for D-Link @ 5V
+ * o D-Link firmware doesn't support multicast. We just print a few
+ * error messages, but otherwise everything works...
+ * o For David : set/getport3 works fine, just upgrade iwpriv...
+ *
+ * v0.05b -> v0.05c - 5/5/2001 - Benjamin Herrenschmidt
+ * o Adapt airport.c to latest changes in orinoco.c
+ * o Remove deferred power enabling code
+ *
+ * v0.05c -> v0.05d - 5/5/2001 - Jean II
+ * o Workaround to SNAP decapsulate frame from LinkSys AP
+ * original patch from : Dong Liu <dliu@research.bell-labs.com>
+ * (note : the memcmp bug was mine - fixed)
+ * o Remove set_retry stuff, no firmware support it (bloat--).
+ *
+ * v0.05d -> v0.06 - 25/5/2001 - Jean II
+ * Original patch from "Hong Lin" <alin@redhat.com>,
+ * "Ian Kinner" <ikinner@redhat.com>
+ * and "David Smith" <dsmith@redhat.com>
+ * o Init of priv->tx_rate_ctrl in firmware specific section.
+ * o Prism2/Symbol rate, upto should be 0xF and not 0x15. Doh !
+ * o Spectrum card always need cor_reset (for every reset)
+ * o Fix cor_reset to not lose bit 7 in the register
+ * o flush_stale_links to remove zombie Pcmcia instances
+ * o Ack previous hermes event before reset
+ * Me (with my little hands)
+ * o Allow orinoco.c to call cor_reset via priv->card_reset_handler
+ * o Add priv->need_card_reset to toggle this feature
+ * o Fix various buglets when setting WEP in Symbol firmware
+ * Now, encryption is fully functional on Symbol cards. Youpi !
+ *
+ * v0.06 -> v0.06b - 25/5/2001 - Jean II
+ * o IBSS on Symbol use port_mode = 4. Please don't ask...
+ *
+ * v0.06b -> v0.06c - 29/5/2001 - Jean II
+ * o Show first spy address in /proc/net/wireless for IBSS mode as well
+ *
+ * v0.06c -> v0.06d - 6/7/2001 - David Gibson
+ * o Change a bunch of KERN_INFO messages to KERN_DEBUG, as per Linus'
+ * wishes to reduce the number of unecessary messages.
+ * o Removed bogus message on CRC error.
+ * o Merged fixeds for v0.08 Prism 2 firmware from William Waghorn
+ * <willwaghorn@yahoo.co.uk>
+ * o Slight cleanup/re-arrangement of firmware detection code.
+ *
+ * v0.06d -> v0.06e - 1/8/2001 - David Gibson
+ * o Removed some redundant global initializers (orinoco_cs.c).
+ * o Added some module metadataa
+ *
+ * v0.06e -> v0.06f - 14/8/2001 - David Gibson
+ * o Wording fix to license
+ * o Added a 'use_alternate_encaps' module parameter for APs which need an
+ * oui of 00:00:00. We really need a better way of handling this, but
+ * the module flag is better than nothing for now.
+ *
+ * v0.06f -> v0.07 - 20/8/2001 - David Gibson
+ * o Removed BAP error retries from hermes_bap_seek(). For Tx we now
+ * let the upper layers handle the retry, we retry explicitly in the
+ * Rx path, but don't make as much noise about it.
+ * o Firmware detection cleanups.
+ *
+ * v0.07 -> v0.07a - 1/10/3001 - Jean II
+ * o Add code to read Symbol firmware revision, inspired by latest code
+ * in Spectrum24 by Lee John Keyser-Allen - Thanks Lee !
+ * o Thanks to Jared Valentine <hidden@xmission.com> for "providing" me
+ * a 3Com card with a recent firmware, fill out Symbol firmware
+ * capabilities of latest rev (2.20), as well as older Symbol cards.
+ * o Disable Power Management in newer Symbol firmware, the API
+ * has changed (documentation needed).
+ *
+ * v0.07a -> v0.08 - 3/10/2001 - David Gibson
+ * o Fixed a possible buffer overrun found by the Stanford checker (in
+ * dldwd_ioctl_setiwencode()). Can only be called by root anyway, so not
+ * a big problem.
+ * o Turned has_big_wep on for Intersil cards. That's not true for all of
+ * them but we should at least let the capable ones try.
+ * o Wait for BUSY to clear at the beginning of hermes_bap_seek(). I
+ * realised that my assumption that the driver's serialization
+ * would prevent the BAP being busy on entry was possibly false, because
+ * things other than seeks may make the BAP busy.
+ * o Use "alternate" (oui 00:00:00) encapsulation by default.
+ * Setting use_old_encaps will mimic the old behaviour, but I think we
+ * will be able to eliminate this.
+ * o Don't try to make __initdata const (the version string). This can't
+ * work because of the way the __initdata sectioning works.
+ * o Added MODULE_LICENSE tags.
+ * o Support for PLX (transparent PCMCIA->PCI brdge) cards.
+ * o Changed to using the new type-facist min/max.
+ *
+ * v0.08 -> v0.08a - 9/10/2001 - David Gibson
+ * o Inserted some missing acknowledgements/info into the Changelog.
+ * o Fixed some bugs in the normalisation of signel level reporting.
+ * o Fixed bad bug in WEP key handling on Intersil and Symbol firmware,
+ * which led to an instant crash on big-endian machines.
+ *
+ * v0.08a -> v0.08b - 20/11/2001 - David Gibson
+ * o Lots of cleanup and bugfixes in orinoco_plx.c
+ * o Cleanup to handling of Tx rate setting.
+ * o Removed support for old encapsulation method.
+ * o Removed old "dldwd" names.
+ * o Split RID constants into a new file hermes_rid.h
+ * o Renamed RID constants to match linux-wlan-ng and prism2.o
+ * o Bugfixes in hermes.c
+ * o Poke the PLX's INTCSR register, so it actually starts
+ * generating interrupts. These cards might actually work now.
+ * o Update to wireless extensions v12 (Jean II)
+ * o Support for tallies and inquire command (Jean II)
+ * o Airport updates for newer PPC kernels (BenH)
+ *
+ * v0.08b -> v0.09 - 21/12/2001 - David Gibson
+ * o Some new PCI IDs for PLX cards.
+ * o Removed broken attempt to do ALLMULTI reception. Just use
+ * promiscuous mode instead
+ * o Preliminary work for list-AP (Jean II)
+ * o Airport updates from (BenH)
+ * o Eliminated racy hw_ready stuff
+ * o Fixed generation of fake events in irq handler. This should
+ * finally kill the EIO problems (Jean II & dgibson)
+ * o Fixed breakage of bitrate set/get on Agere firmware (Jean II)
+ *
+ * v0.09 -> v0.09a - 2/1/2002 - David Gibson
+ * o Fixed stupid mistake in multicast list handling, triggering
+ * a BUG()
+ *
+ * v0.09a -> v0.09b - 16/1/2002 - David Gibson
+ * o Fixed even stupider mistake in new interrupt handling, which
+ * seriously broke things on big-endian machines.
+ * o Removed a bunch of redundant includes and exports.
+ * o Removed a redundant MOD_{INC,DEC}_USE_COUNT pair in airport.c
+ * o Don't attempt to do hardware level multicast reception on
+ * Intersil firmware, just go promisc instead.
+ * o Typo fixed in hermes_issue_cmd()
+ * o Eliminated WIRELESS_SPY #ifdefs
+ * o Status code reported on Tx exceptions
+ * o Moved netif_wake_queue() from ALLOC interrupts to TX and TXEXC
+ * interrupts, which should fix the timeouts we're seeing.
+ *
+ * v0.09b -> v0.10 - 25 Feb 2002 - David Gibson
+ * o Removed nested structures used for header parsing, so the
+ * driver should now work without hackery on ARM
+ * o Fix for WEP handling on Intersil (Hawk Newton)
+ * o Eliminated the /proc/hermes/ethXX/regs debugging file. It
+ * was never very useful.
+ * o Make Rx errors less noisy.
+ *
+ * v0.10 -> v0.11 - 5 Apr 2002 - David Gibson
+ * o Laid the groundwork in hermes.[ch] for devices which map
+ * into PCI memory space rather than IO space.
+ * o Fixed bug in multicast handling (cleared multicast list when
+ * leaving promiscuous mode).
+ * o Relegated Tx error messages to debug.
+ * o Cleaned up / corrected handling of allocation lengths.
+ * o Set OWNSSID in IBSS mode for WinXP interoperability (jimc).
+ * o Change to using alloc_etherdev() for structure allocations.
+ * o Check for and drop undersized packets.
+ * o Fixed a race in stopping/waking the queue. This should fix
+ * the timeout problems (Pavel Roskin)
+ * o Reverted to netif_wake_queue() on the ALLOC event.
+ * o Fixes for recent Symbol firmwares which lack AP density
+ * (Pavel Roskin).
+ *
+ * v0.11 -> v0.11a - 29 Apr 2002 - David Gibson
+ * o Handle different register spacing, necessary for Prism 2.5
+ * PCI adaptors (Steve Hill).
+ * o Cleaned up initialization of card structures in orinoco_cs
+ * and airport. Removed card->priv field.
+ * o Make response structure optional for hermes_docmd_wait()
+ * Pavel Roskin)
+ * o Added PCI id for Nortel emobility to orinoco_plx.c.
+ * o Cleanup to handling of Symbol's allocation bug. (Pavel Roskin)
+ * o Cleanups to firmware capability detection.
+ * o Arrange for orinoco_pci.c to override firmware detection.
+ * We should be able to support the PCI Intersil cards now.
+ * o Cleanup handling of reset_cor and hard_reset (Pavel Roskin).
+ * o Remove erroneous use of USER_BAP in the TxExc handler (Jouni
+ * Malinen).
+ * o Makefile changes for better integration into David Hinds
+ * pcmcia-cs package.
+ *
+ * v0.11a -> v0.11b - 1 May 2002 - David Gibson
+ * o Better error reporting in orinoco_plx_init_one()
+ * o Fixed multiple bad kfree() bugs introduced by the
+ * alloc_orinocodev() changes.
+ *
+ * v0.11b -> v0.12 - 19 Jun 2002 - David Gibson
+ * o Support changing the MAC address.
+ * o Correct display of Intersil firmware revision numbers.
+ * o Entirely revised locking scheme. Should be both simpler and
+ * better.
+ * o Merged some common code in orinoco_plx, orinoco_pci and
+ * airport by creating orinoco_default_{open,stop,reset}()
+ * which are used as the dev->open, dev->stop, priv->reset
+ * callbacks if none are specified when alloc_orinocodev() is
+ * called.
+ * o Removed orinoco_plx_interrupt() and orinoco_pci_interrupt().
+ * They didn't do anything.
+ *
+ * v0.12 -> v0.12a - 4 Jul 2002 - David Gibson
+ * o Some rearrangement of code.
+ * o Numerous fixups to locking and rest handling, particularly
+ * for PCMCIA.
+ * o This allows open and stop net_device methods to be in
+ * orinoco.c now, rather than in the init modules.
+ * o In orinoco_cs.c link->priv now points to the struct
+ * net_device not to the struct orinoco_private.
+ * o Added a check for undersized SNAP frames, which could cause
+ * crashes.
+ *
+ * v0.12a -> v0.12b - 11 Jul 2002 - David Gibson
+ * o Fix hw->num_init testing code, so num_init is actually
+ * incremented.
+ * o Fix very stupid bug in orinoco_cs which broke compile with
+ * CONFIG_SMP.
+ * o Squashed a warning.
+ *
+ * v0.12b -> v0.12c - 26 Jul 2002 - David Gibson
+ * o Change to C9X style designated initializers.
+ * o Add support for 3Com AirConnect PCI.
+ * o No longer ignore the hard_reset argument to
+ * alloc_orinocodev(). Oops.
+ *
+ * v0.12c -> v0.13beta1 - 13 Sep 2002 - David Gibson
+ * o Revert the broken 0.12* locking scheme and go to a new yet
+ * simpler scheme.
+ * o Do firmware resets only in orinoco_init() and when waking
+ * the card from hard sleep.
+ *
+ * v0.13beta1 -> v0.13 - 27 Sep 2002 - David Gibson
+ * o Re-introduced full resets (via schedule_task()) on Tx
+ * timeout.
+ *
+ * v0.13 -> v0.13a - 30 Sep 2002 - David Gibson
+ * o Minor cleanups to info frame handling. Add basic support
+ * for linkstatus info frames.
+ * o Include required kernel headers in orinoco.h, to avoid
+ * compile problems.
+ *
+ * v0.13a -> v0.13b - 10 Feb 2003 - David Gibson
+ * o Implemented hard reset for Airport cards
+ * o Experimental suspend/resume implementation for orinoco_pci
+ * o Abolished /proc debugging support, replaced with a debugging
+ * iwpriv. Now it's ugly and simple instead of ugly and complex.
+ * o Bugfix in hermes.c if the firmware returned a record length
+ * of 0, we could go clobbering memory.
+ * o Bugfix in orinoco_stop() - it used to fail if hw_unavailable
+ * was set, which was usually true on PCMCIA hot removes.
+ * o Track LINKSTATUS messages, silently drop Tx packets before
+ * we are connected (avoids cofusing the firmware), and only
+ * give LINKSTATUS printk()s if the status has changed.
+ *
+ * v0.13b -> v0.13c - 11 Mar 2003 - David Gibson
+ * o Cleanup: use dev instead of priv in various places.
+ * o Bug fix: Don't ReleaseConfiguration on RESET_PHYSICAL event
+ * if we're in the middle of a (driver initiated) hard reset.
+ * o Bug fix: ETH_ZLEN is supposed to include the header
+ * (Dionysus Blazakis & Manish Karir)
+ * o Convert to using workqueues instead of taskqueues (and
+ * backwards compatibility macros for pre 2.5.41 kernels).
+ * o Drop redundant (I think...) MOD_{INC,DEC}_USE_COUNT in
+ * airport.c
+ * o New orinoco_tmd.c init module from Joerg Dorchain for
+ * TMD7160 based PCI to PCMCIA bridges (similar to
+ * orinoco_plx.c).
+ *
+ * v0.13c -> v0.13d - 22 Apr 2003 - David Gibson
+ * o Make hw_unavailable a counter, rather than just a flag, this
+ * is necessary to avoid some races (such as a card being
+ * removed in the middle of orinoco_reset().
+ * o Restore Release/RequestConfiguration in the PCMCIA event handler
+ * when dealing with a driver initiated hard reset. This is
+ * necessary to prevent hangs due to a spurious interrupt while
+ * the reset is in progress.
+ * o Clear the 802.11 header when transmitting, even though we
+ * don't use it. This fixes a long standing bug on some
+ * firmwares, which seem to get confused if that isn't done.
+ * o Be less eager to de-encapsulate SNAP frames, only do so if
+ * the OUI is 00:00:00 or 00:00:f8, leave others alone. The old
+ * behaviour broke CDP (Cisco Discovery Protocol).
+ * o Use dev instead of priv for free_irq() as well as
+ * request_irq() (oops).
+ * o Attempt to reset rather than giving up if we get too many
+ * IRQs.
+ * o Changed semantics of __orinoco_down() so it can be called
+ * safely with hw_unavailable set. It also now clears the
+ * linkstatus (since we're going to have to reassociate).
+ *
+ * v0.13d -> v0.13e - 12 May 2003 - David Gibson
+ * o Support for post-2.5.68 return values from irq handler.
+ * o Fixed bug where underlength packets would be double counted
+ * in the rx_dropped statistics.
+ * o Provided a module parameter to suppress linkstatus messages.
+ *
+ * TODO
+ * o New wireless extensions API (patch from Moustafa
+ * Youssef, updated by Jim Carter and Pavel Roskin).
+ * o Handle de-encapsulation within network layer, provide 802.11
+ * headers (patch from Thomas 'Dent' Mirlacher)
+ * o RF monitor mode support
+ * o Fix possible races in SPY handling.
+ * o Disconnect wireless extensions from fundamental configuration.
+ * o (maybe) Software WEP support (patch from Stano Meduna).
+ * o (maybe) Use multiple Tx buffers - driver handling queue
+ * rather than firmware. */
+
+/* Locking and synchronization:
+ *
+ * The basic principle is that everything is serialized through a
+ * single spinlock, priv->lock. The lock is used in user, bh and irq
+ * context, so when taken outside hardirq context it should always be
+ * taken with interrupts disabled. The lock protects both the
+ * hardware and the struct orinoco_private.
+ *
+ * Another flag, priv->hw_unavailable indicates that the hardware is
+ * unavailable for an extended period of time (e.g. suspended, or in
+ * the middle of a hard reset). This flag is protected by the
+ * spinlock. All code which touches the hardware should check the
+ * flag after taking the lock, and if it is set, give up on whatever
+ * they are doing and drop the lock again. The orinoco_lock()
+ * function handles this (it unlocks and returns -EBUSY if
+ * hw_unavailable is non-zero). */
+
+#include <linux/config.h>
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include "hermes.h"
+#include "hermes_rid.h"
+#include "orinoco.h"
+#include "ieee802_11.h"
+
+/********************************************************************/
+/* Module information */
+/********************************************************************/
+
+MODULE_AUTHOR("David Gibson <hermes@gibson.dropbear.id.au>");
+MODULE_DESCRIPTION("Driver for Lucent Orinoco, Prism II based and similar wireless cards");
+#ifdef MODULE_LICENSE
+MODULE_LICENSE("Dual MPL/GPL");
+#endif
+
+/* Level of debugging. Used in the macros in orinoco.h */
+#ifdef ORINOCO_DEBUG
+int orinoco_debug = ORINOCO_DEBUG;
+MODULE_PARM(orinoco_debug, "i");
+EXPORT_SYMBOL(orinoco_debug);
+#endif
+
+static int suppress_linkstatus; /* = 0 */
+MODULE_PARM(suppress_linkstatus, "i");
+
+/********************************************************************/
+/* Compile time configuration and compatibility stuff */
+/********************************************************************/
+
+/* Wireless extensions backwards compatibility */
+#ifndef SIOCIWFIRSTPRIV
+#define SIOCIWFIRSTPRIV SIOCDEVPRIVATE
+#endif /* SIOCIWFIRSTPRIV */
+#ifndef SIOCIWLASTPRIV
+#define SIOCIWLASTPRIV SIOCDEVPRIVATE+0xF
+#endif /* SIOCIWLASTPRIV */
+
+/* We do this this way to avoid ifdefs in the actual code */
+#ifdef WIRELESS_SPY
+#define SPY_NUMBER(priv) (priv->spy_number)
+#else
+#define SPY_NUMBER(priv) 0
+#endif /* WIRELESS_SPY */
+
+/********************************************************************/
+/* Internal constants */
+/********************************************************************/
+
+#define ORINOCO_MIN_MTU 256
+#define ORINOCO_MAX_MTU (IEEE802_11_DATA_LEN - ENCAPS_OVERHEAD)
+
+#define SYMBOL_MAX_VER_LEN (14)
+#define USER_BAP 0
+#define IRQ_BAP 1
+#define MAX_IRQLOOPS_PER_IRQ 10
+#define MAX_IRQLOOPS_PER_JIFFY (20000/HZ) /* Based on a guestimate of
+ * how many events the
+ * device could
+ * legitimately generate */
+#define SMALL_KEY_SIZE 5
+#define LARGE_KEY_SIZE 13
+#define TX_NICBUF_SIZE_BUG 1585 /* Bug in Symbol firmware */
+
+#define DUMMY_FID 0xFFFF
+
+#define RUP_EVEN(a) (((a) + 1) & (~1))
+
+/*#define MAX_MULTICAST(priv) (priv->firmware_type == FIRMWARE_TYPE_AGERE ? \
+ HERMES_MAX_MULTICAST : 0)*/
+#define MAX_MULTICAST(priv) (HERMES_MAX_MULTICAST)
+
+/*
+ * MACH related stuff...
+ */
+
+#ifdef MACH
+
+#undef copy_to_user
+#define copy_to_user(a,b,c) (memcpy(a,b,c), 0)
+
+#define verify_area(a,b,c) (0)
+#define copy_from_user(a,b,c) (memcpy(a,b,c), 0)
+
+#endif
+
+/********************************************************************/
+/* Data tables */
+/********************************************************************/
+
+/* The frequency of each channel in MHz */
+const long channel_frequency[] = {
+ 2412, 2417, 2422, 2427, 2432, 2437, 2442,
+ 2447, 2452, 2457, 2462, 2467, 2472, 2484
+};
+#define NUM_CHANNELS ( sizeof(channel_frequency) / sizeof(channel_frequency[0]) )
+
+/* This tables gives the actual meanings of the bitrate IDs returned by the firmware. */
+struct {
+ int bitrate; /* in 100s of kilobits */
+ int automatic;
+ u16 agere_txratectrl;
+ u16 intersil_txratectrl;
+} bitrate_table[] = {
+ {110, 1, 3, 15}, /* Entry 0 is the default */
+ {10, 0, 1, 1},
+ {10, 1, 1, 1},
+ {20, 0, 2, 2},
+ {20, 1, 6, 3},
+ {55, 0, 4, 4},
+ {55, 1, 7, 7},
+ {110, 0, 5, 8},
+};
+#define BITRATE_TABLE_SIZE (sizeof(bitrate_table) / sizeof(bitrate_table[0]))
+
+/********************************************************************/
+/* Data types */
+/********************************************************************/
+
+struct header_struct {
+ /* 802.3 */
+ u8 dest[ETH_ALEN];
+ u8 src[ETH_ALEN];
+ u16 len;
+ /* 802.2 */
+ u8 dsap;
+ u8 ssap;
+ u8 ctrl;
+ /* SNAP */
+ u8 oui[3];
+ u16 ethertype;
+} __attribute__ ((packed));
+
+/* 802.2 LLC/SNAP header used for Ethernet encapsulation over 802.11 */
+u8 encaps_hdr[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
+
+#define ENCAPS_OVERHEAD (sizeof(encaps_hdr) + 2)
+
+/********************************************************************/
+/* Function prototypes */
+/********************************************************************/
+
+static void orinoco_stat_gather(struct net_device *dev,
+ struct sk_buff *skb,
+ struct hermes_rx_descriptor *desc);
+
+static struct net_device_stats *orinoco_get_stats(struct net_device *dev);
+static struct iw_statistics *orinoco_get_wireless_stats(struct net_device *dev);
+
+/* Hardware control routines */
+
+static int __orinoco_program_rids(struct net_device *dev);
+
+static int __orinoco_hw_set_bitrate(struct orinoco_private *priv);
+static int __orinoco_hw_setup_wep(struct orinoco_private *priv);
+static int orinoco_hw_get_bssid(struct orinoco_private *priv, char buf[ETH_ALEN]);
+static int orinoco_hw_get_essid(struct orinoco_private *priv, int *active,
+ char buf[IW_ESSID_MAX_SIZE+1]);
+static long orinoco_hw_get_freq(struct orinoco_private *priv);
+static int orinoco_hw_get_bitratelist(struct orinoco_private *priv, int *numrates,
+ s32 *rates, int max);
+static void __orinoco_set_multicast_list(struct net_device *dev);
+
+/* Interrupt handling routines */
+static void __orinoco_ev_tick(struct net_device *dev, hermes_t *hw);
+static void __orinoco_ev_wterr(struct net_device *dev, hermes_t *hw);
+static void __orinoco_ev_infdrop(struct net_device *dev, hermes_t *hw);
+static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw);
+static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw);
+static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw);
+static void __orinoco_ev_tx(struct net_device *dev, hermes_t *hw);
+static void __orinoco_ev_alloc(struct net_device *dev, hermes_t *hw);
+
+/* ioctl() routines */
+static int orinoco_debug_dump_recs(struct net_device *dev);
+
+/********************************************************************/
+/* Function prototypes */
+/********************************************************************/
+
+int __orinoco_up(struct net_device *dev)
+{
+ struct orinoco_private *priv = dev->priv;
+ struct hermes *hw = &priv->hw;
+ int err;
+
+ err = __orinoco_program_rids(dev);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d configuring card\n",
+ dev->name, err);
+ return err;
+ }
+
+ /* Fire things up again */
+ hermes_set_irqmask(hw, ORINOCO_INTEN);
+ err = hermes_enable_port(hw, 0);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d enabling MAC port\n",
+ dev->name, err);
+ return err;
+ }
+
+ netif_start_queue(dev);
+ netif_mark_up(dev);
+
+ return 0;
+}
+
+int __orinoco_down(struct net_device *dev)
+{
+ struct orinoco_private *priv = dev->priv;
+ struct hermes *hw = &priv->hw;
+ int err;
+
+ netif_stop_queue(dev);
+ netif_mark_down(dev);
+
+ if (! priv->hw_unavailable) {
+ if (! priv->broken_disableport) {
+ err = hermes_disable_port(hw, 0);
+ if (err) {
+ /* Some firmwares (e.g. Intersil 1.3.x) seem
+ * to have problems disabling the port, oh
+ * well, too bad. */
+ printk(KERN_WARNING "%s: Error %d disabling MAC port\n",
+ dev->name, err);
+ priv->broken_disableport = 1;
+ }
+ }
+ hermes_set_irqmask(hw, 0);
+ hermes_write_regn(hw, EVACK, 0xffff);
+ }
+
+ /* firmware will have to reassociate */
+ priv->last_linkstatus = 0xffff;
+ priv->connected = 0;
+
+ return 0;
+}
+
+int orinoco_reinit_firmware(struct net_device *dev)
+{
+ struct orinoco_private *priv = dev->priv;
+ struct hermes *hw = &priv->hw;
+ int err;
+
+ err = hermes_init(hw);
+ if (err)
+ return err;
+
+ err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid);
+ if (err == -EIO) {
+ /* Try workaround for old Symbol firmware bug */
+ printk(KERN_WARNING "%s: firmware ALLOC bug detected "
+ "(old Symbol firmware?). Trying to work around... ",
+ dev->name);
+
+ priv->nicbuf_size = TX_NICBUF_SIZE_BUG;
+ err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid);
+ if (err)
+ printk("failed!\n");
+ else
+ printk("ok.\n");
+ }
+
+ return err;
+}
+
+static int orinoco_open(struct net_device *dev)
+{
+ struct orinoco_private *priv = dev->priv;
+ unsigned long flags;
+ int err;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ err = __orinoco_up(dev);
+
+ if (! err)
+ priv->open = 1;
+
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+int orinoco_stop(struct net_device *dev)
+{
+ struct orinoco_private *priv = dev->priv;
+ int err = 0;
+
+ /* We mustn't use orinoco_lock() here, because we need to be
+ able to close the interface even if hw_unavailable is set
+ (e.g. as we're released after a PC Card removal) */
+ spin_lock_irq(&priv->lock);
+
+ priv->open = 0;
+
+ err = __orinoco_down(dev);
+
+ spin_unlock_irq(&priv->lock);
+
+ return err;
+}
+
+static int __orinoco_program_rids(struct net_device *dev)
+{
+ struct orinoco_private *priv = dev->priv;
+ hermes_t *hw = &priv->hw;
+ int err;
+ struct hermes_idstring idbuf;
+
+ /* Set the MAC address */
+ err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
+ HERMES_BYTES_TO_RECLEN(ETH_ALEN), dev->dev_addr);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting MAC address\n", dev->name, err);
+ return err;
+ }
+
+ /* Set up the link mode */
+ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFPORTTYPE, priv->port_type);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting port type\n", dev->name, err);
+ return err;
+ }
+ /* Set the channel/frequency */
+ if (priv->channel == 0) {
+ printk(KERN_DEBUG "%s: Channel is 0 in __orinoco_program_rids()\n", dev->name);
+ if (priv->createibss)
+ priv->channel = 10;
+ }
+ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFOWNCHANNEL, priv->channel);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting channel\n", dev->name, err);
+ return err;
+ }
+
+ if (priv->has_ibss) {
+ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFCREATEIBSS,
+ priv->createibss);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting CREATEIBSS\n", dev->name, err);
+ return err;
+ }
+
+ if ((strlen(priv->desired_essid) == 0) && (priv->createibss)
+ && (!priv->has_ibss_any)) {
+ printk(KERN_WARNING "%s: This firmware requires an \
+ESSID in IBSS-Ad-Hoc mode.\n", dev->name);
+ /* With wvlan_cs, in this case, we would crash.
+ * hopefully, this driver will behave better...
+ * Jean II */
+ }
+ }
+
+ /* Set the desired ESSID */
+ idbuf.len = cpu_to_le16(strlen(priv->desired_essid));
+ memcpy(&idbuf.val, priv->desired_essid, sizeof(idbuf.val));
+ /* WinXP wants partner to configure OWNSSID even in IBSS mode. (jimc) */
+ err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNSSID,
+ HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid)+2),
+ &idbuf);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting OWNSSID\n", dev->name, err);
+ return err;
+ }
+ err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFDESIREDSSID,
+ HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid)+2),
+ &idbuf);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting DESIREDSSID\n", dev->name, err);
+ return err;
+ }
+
+ /* Set the station name */
+ idbuf.len = cpu_to_le16(strlen(priv->nick));
+ memcpy(&idbuf.val, priv->nick, sizeof(idbuf.val));
+ err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
+ HERMES_BYTES_TO_RECLEN(strlen(priv->nick)+2),
+ &idbuf);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting nickname\n", dev->name, err);
+ return err;
+ }
+
+ /* Set AP density */
+ if (priv->has_sensitivity) {
+ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFSYSTEMSCALE,
+ priv->ap_density);
+ if (err) {
+ printk(KERN_WARNING "%s: Error %d setting SYSTEMSCALE. "
+ "Disabling sensitivity control\n", dev->name, err);
+
+ priv->has_sensitivity = 0;
+ }
+ }
+
+ /* Set RTS threshold */
+ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFRTSTHRESHOLD, priv->rts_thresh);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting RTS threshold\n", dev->name, err);
+ return err;
+ }
+
+ /* Set fragmentation threshold or MWO robustness */
+ if (priv->has_mwo)
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFMWOROBUST_AGERE,
+ priv->mwo_robust);
+ else
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFFRAGMENTATIONTHRESHOLD,
+ priv->frag_thresh);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting framentation\n", dev->name, err);
+ return err;
+ }
+
+ /* Set bitrate */
+ err = __orinoco_hw_set_bitrate(priv);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting bitrate\n", dev->name, err);
+ return err;
+ }
+
+ /* Set power management */
+ if (priv->has_pm) {
+ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFPMENABLED,
+ priv->pm_on);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting up PM\n",
+ dev->name, err);
+ return err;
+ }
+
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFMULTICASTRECEIVE,
+ priv->pm_mcast);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting up PM\n",
+ dev->name, err);
+ return err;
+ }
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFMAXSLEEPDURATION,
+ priv->pm_period);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting up PM\n",
+ dev->name, err);
+ return err;
+ }
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFPMHOLDOVERDURATION,
+ priv->pm_timeout);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting up PM\n",
+ dev->name, err);
+ return err;
+ }
+ }
+
+ /* Set preamble - only for Symbol so far... */
+ if (priv->has_preamble) {
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFPREAMBLE_SYMBOL,
+ priv->preamble);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting preamble\n",
+ dev->name, err);
+ return err;
+ }
+ }
+
+ /* Set up encryption */
+ if (priv->has_wep) {
+ err = __orinoco_hw_setup_wep(priv);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d activating WEP\n",
+ dev->name, err);
+ return err;
+ }
+ }
+
+ /* Set promiscuity / multicast*/
+ priv->promiscuous = 0;
+ priv->mc_count = 0;
+ __orinoco_set_multicast_list(dev); /* FIXME: what about the xmit_lock */
+
+ return 0;
+}
+
+/* xyzzy */
+static int orinoco_reconfigure(struct net_device *dev)
+{
+ struct orinoco_private *priv = dev->priv;
+ struct hermes *hw = &priv->hw;
+ unsigned long flags;
+ int err = 0;
+
+ if (priv->broken_disableport) {
+ schedule_work(&priv->reset_work);
+ return 0;
+ }
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+
+ err = hermes_disable_port(hw, 0);
+ if (err) {
+ printk(KERN_WARNING "%s: Unable to disable port while reconfiguring card\n",
+ dev->name);
+ priv->broken_disableport = 1;
+ goto out;
+ }
+
+ err = __orinoco_program_rids(dev);
+ if (err) {
+ printk(KERN_WARNING "%s: Unable to reconfigure card\n",
+ dev->name);
+ goto out;
+ }
+
+ err = hermes_enable_port(hw, 0);
+ if (err) {
+ printk(KERN_WARNING "%s: Unable to enable port while reconfiguring card\n",
+ dev->name);
+ goto out;
+ }
+
+ out:
+ if (err) {
+ printk(KERN_WARNING "%s: Resetting instead...\n", dev->name);
+ schedule_work(&priv->reset_work);
+ err = 0;
+ }
+
+ orinoco_unlock(priv, &flags);
+ return err;
+
+}
+
+/* This must be called from user context, without locks held - use
+ * schedule_work() */
+static void orinoco_reset(struct net_device *dev)
+{
+ struct orinoco_private *priv = dev->priv;
+ struct hermes *hw = &priv->hw;
+ int err;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ /* When the hardware becomes available again, whatever
+ * detects that is responsible for re-initializing
+ * it. So no need for anything further*/
+ return;
+
+ netif_stop_queue(dev);
+
+ /* Shut off interrupts. Depending on what state the hardware
+ * is in, this might not work, but we'll try anyway */
+ hermes_set_irqmask(hw, 0);
+ hermes_write_regn(hw, EVACK, 0xffff);
+
+ priv->hw_unavailable++;
+ priv->last_linkstatus = 0xffff; /* firmware will have to reassociate */
+ priv->connected = 0;
+
+ orinoco_unlock(priv, &flags);
+
+ if (priv->hard_reset)
+ err = (*priv->hard_reset)(priv);
+ if (err) {
+ printk(KERN_ERR "%s: orinoco_reset: Error %d performing hard reset\n",
+ dev->name, err);
+ /* FIXME: shutdown of some sort */
+ return;
+ }
+
+ err = orinoco_reinit_firmware(dev);
+ if (err) {
+ printk(KERN_ERR "%s: orinoco_reset: Error %d re-initializing firmware\n",
+ dev->name, err);
+ return;
+ }
+
+ spin_lock_irq(&priv->lock); /* This has to be called from user context */
+
+ priv->hw_unavailable--;
+
+ /* priv->open or priv->hw_unavailable might have changed while
+ * we dropped the lock */
+ if (priv->open && (! priv->hw_unavailable)) {
+ err = __orinoco_up(dev);
+ if (err) {
+ printk(KERN_ERR "%s: orinoco_reset: Error %d reenabling card\n",
+ dev->name, err);
+ } else
+ dev->trans_start = jiffies;
+ }
+
+ spin_unlock_irq(&priv->lock);
+
+ return;
+}
+
+/********************************************************************/
+/* Internal helper functions */
+/********************************************************************/
+
+static inline void
+set_port_type(struct orinoco_private *priv)
+{
+ switch (priv->iw_mode) {
+ case IW_MODE_INFRA:
+ priv->port_type = 1;
+ priv->createibss = 0;
+ break;
+ case IW_MODE_ADHOC:
+ if (priv->prefer_port3) {
+ priv->port_type = 3;
+ priv->createibss = 0;
+ } else {
+ priv->port_type = priv->ibss_port;
+ priv->createibss = 1;
+ }
+ break;
+ default:
+ printk(KERN_ERR "%s: Invalid priv->iw_mode in set_port_type()\n",
+ priv->ndev->name);
+ }
+}
+
+/* Does the frame have a SNAP header indicating it should be
+ * de-encapsulated to Ethernet-II? */
+static inline int
+is_ethersnap(struct header_struct *hdr)
+{
+ /* We de-encapsulate all packets which, a) have SNAP headers
+ * (i.e. SSAP=DSAP=0xaa and CTRL=0x3 in the 802.2 LLC header
+ * and where b) the OUI of the SNAP header is 00:00:00 or
+ * 00:00:f8 - we need both because different APs appear to use
+ * different OUIs for some reason */
+ return (memcmp(&hdr->dsap, &encaps_hdr, 5) == 0)
+ && ( (hdr->oui[2] == 0x00) || (hdr->oui[2] == 0xf8) );
+}
+
+static void
+orinoco_set_multicast_list(struct net_device *dev)
+{
+ struct orinoco_private *priv = dev->priv;
+ unsigned long flags;
+
+ if (orinoco_lock(priv, &flags) != 0) {
+ printk(KERN_DEBUG "%s: orinoco_set_multicast_list() "
+ "called when hw_unavailable\n", dev->name);
+ return;
+ }
+
+ __orinoco_set_multicast_list(dev);
+ orinoco_unlock(priv, &flags);
+}
+
+/********************************************************************/
+/* Hardware control functions */
+/********************************************************************/
+
+
+static int __orinoco_hw_set_bitrate(struct orinoco_private *priv)
+{
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+
+ if (priv->bitratemode >= BITRATE_TABLE_SIZE) {
+ printk(KERN_ERR "%s: BUG: Invalid bitrate mode %d\n",
+ priv->ndev->name, priv->bitratemode);
+ return -EINVAL;
+ }
+
+ switch (priv->firmware_type) {
+ case FIRMWARE_TYPE_AGERE:
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFTXRATECONTROL,
+ bitrate_table[priv->bitratemode].agere_txratectrl);
+ break;
+ case FIRMWARE_TYPE_INTERSIL:
+ case FIRMWARE_TYPE_SYMBOL:
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFTXRATECONTROL,
+ bitrate_table[priv->bitratemode].intersil_txratectrl);
+ break;
+ default:
+ BUG();
+ }
+
+ return err;
+}
+
+
+static int __orinoco_hw_setup_wep(struct orinoco_private *priv)
+{
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ int master_wep_flag;
+ int auth_flag;
+
+ switch (priv->firmware_type) {
+ case FIRMWARE_TYPE_AGERE: /* Agere style WEP */
+ if (priv->wep_on) {
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFTXKEY_AGERE,
+ priv->tx_key);
+ if (err)
+ return err;
+
+ err = HERMES_WRITE_RECORD(hw, USER_BAP,
+ HERMES_RID_CNFWEPKEYS_AGERE,
+ &priv->keys);
+ if (err)
+ return err;
+ }
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFWEPENABLED_AGERE,
+ priv->wep_on);
+ if (err)
+ return err;
+ break;
+
+ case FIRMWARE_TYPE_INTERSIL: /* Intersil style WEP */
+ case FIRMWARE_TYPE_SYMBOL: /* Symbol style WEP */
+ master_wep_flag = 0; /* Off */
+ if (priv->wep_on) {
+ int keylen;
+ int i;
+
+ /* Fudge around firmware weirdness */
+ keylen = le16_to_cpu(priv->keys[priv->tx_key].len);
+
+ /* Write all 4 keys */
+ for(i = 0; i < ORINOCO_MAX_KEYS; i++) {
+/* int keylen = le16_to_cpu(priv->keys[i].len); */
+
+ if (keylen > LARGE_KEY_SIZE) {
+ printk(KERN_ERR "%s: BUG: Key %d has oversize length %d.\n",
+ priv->ndev->name, i, keylen);
+ return -E2BIG;
+ }
+
+ err = hermes_write_ltv(hw, USER_BAP,
+ HERMES_RID_CNFDEFAULTKEY0 + i,
+ HERMES_BYTES_TO_RECLEN(keylen),
+ priv->keys[i].data);
+ if (err)
+ return err;
+ }
+
+ /* Write the index of the key used in transmission */
+ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFWEPDEFAULTKEYID,
+ priv->tx_key);
+ if (err)
+ return err;
+
+ if (priv->wep_restrict) {
+ auth_flag = 2;
+ master_wep_flag = 3;
+ } else {
+ /* Authentication is where Intersil and Symbol
+ * firmware differ... */
+ auth_flag = 1;
+ if (priv->firmware_type == FIRMWARE_TYPE_SYMBOL)
+ master_wep_flag = 3; /* Symbol */
+ else
+ master_wep_flag = 1; /* Intersil */
+ }
+
+
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFAUTHENTICATION, auth_flag);
+ if (err)
+ return err;
+ }
+
+ /* Master WEP setting : on/off */
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFWEPFLAGS_INTERSIL,
+ master_wep_flag);
+ if (err)
+ return err;
+
+ break;
+
+ default:
+ if (priv->wep_on) {
+ printk(KERN_ERR "%s: WEP enabled, although not supported!\n",
+ priv->ndev->name);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int orinoco_hw_get_bssid(struct orinoco_private *priv,
+ char buf[ETH_ALEN])
+{
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID,
+ ETH_ALEN, NULL, buf);
+
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+static int orinoco_hw_get_essid(struct orinoco_private *priv, int *active,
+ char buf[IW_ESSID_MAX_SIZE+1])
+{
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ struct hermes_idstring essidbuf;
+ char *p = (char *)(&essidbuf.val);
+ int len;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ if (strlen(priv->desired_essid) > 0) {
+ /* We read the desired SSID from the hardware rather
+ than from priv->desired_essid, just in case the
+ firmware is allowed to change it on us. I'm not
+ sure about this */
+ /* My guess is that the OWNSSID should always be whatever
+ * we set to the card, whereas CURRENT_SSID is the one that
+ * may change... - Jean II */
+ u16 rid;
+
+ *active = 1;
+
+ rid = (priv->port_type == 3) ? HERMES_RID_CNFOWNSSID :
+ HERMES_RID_CNFDESIREDSSID;
+
+ err = hermes_read_ltv(hw, USER_BAP, rid, sizeof(essidbuf),
+ NULL, &essidbuf);
+ if (err)
+ goto fail_unlock;
+ } else {
+ *active = 0;
+
+ err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENTSSID,
+ sizeof(essidbuf), NULL, &essidbuf);
+ if (err)
+ goto fail_unlock;
+ }
+
+ len = le16_to_cpu(essidbuf.len);
+
+ memset(buf, 0, IW_ESSID_MAX_SIZE+1);
+ memcpy(buf, p, len);
+ buf[len] = '\0';
+
+ fail_unlock:
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+static long orinoco_hw_get_freq(struct orinoco_private *priv)
+{
+
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ u16 channel;
+ long freq = 0;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CURRENTCHANNEL, &channel);
+ if (err)
+ goto out;
+
+ /* Intersil firmware 1.3.5 returns 0 when the interface is down */
+ if (channel == 0) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ if ( (channel < 1) || (channel > NUM_CHANNELS) ) {
+ printk(KERN_WARNING "%s: Channel out of range (%d)!\n",
+ priv->ndev->name, channel);
+ err = -EBUSY;
+ goto out;
+
+ }
+ freq = channel_frequency[channel-1] * 100000;
+
+ out:
+ orinoco_unlock(priv, &flags);
+
+ if (err > 0)
+ err = -EBUSY;
+ return err ? err : freq;
+}
+
+static int orinoco_hw_get_bitratelist(struct orinoco_private *priv,
+ int *numrates, s32 *rates, int max)
+{
+ hermes_t *hw = &priv->hw;
+ struct hermes_idstring list;
+ unsigned char *p = (unsigned char *)&list.val;
+ int err = 0;
+ int num;
+ int i;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_SUPPORTEDDATARATES,
+ sizeof(list), NULL, &list);
+ orinoco_unlock(priv, &flags);
+
+ if (err)
+ return err;
+
+ num = le16_to_cpu(list.len);
+ *numrates = num;
+ num = min(num, max);
+
+ for (i = 0; i < num; i++) {
+ rates[i] = (p[i] & 0x7f) * 500000; /* convert to bps */
+ }
+
+ return 0;
+}
+
+#if 0
+static void show_rx_frame(struct orinoco_rxframe_hdr *frame)
+{
+ printk(KERN_DEBUG "RX descriptor:\n");
+ printk(KERN_DEBUG " status = 0x%04x\n", frame->desc.status);
+ printk(KERN_DEBUG " time = 0x%08x\n", frame->desc.time);
+ printk(KERN_DEBUG " silence = 0x%02x\n", frame->desc.silence);
+ printk(KERN_DEBUG " signal = 0x%02x\n", frame->desc.signal);
+ printk(KERN_DEBUG " rate = 0x%02x\n", frame->desc.rate);
+ printk(KERN_DEBUG " rxflow = 0x%02x\n", frame->desc.rxflow);
+ printk(KERN_DEBUG " reserved = 0x%08x\n", frame->desc.reserved);
+
+ printk(KERN_DEBUG "IEEE 802.11 header:\n");
+ printk(KERN_DEBUG " frame_ctl = 0x%04x\n",
+ frame->p80211.frame_ctl);
+ printk(KERN_DEBUG " duration_id = 0x%04x\n",
+ frame->p80211.duration_id);
+ printk(KERN_DEBUG " addr1 = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ frame->p80211.addr1[0], frame->p80211.addr1[1],
+ frame->p80211.addr1[2], frame->p80211.addr1[3],
+ frame->p80211.addr1[4], frame->p80211.addr1[5]);
+ printk(KERN_DEBUG " addr2 = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ frame->p80211.addr2[0], frame->p80211.addr2[1],
+ frame->p80211.addr2[2], frame->p80211.addr2[3],
+ frame->p80211.addr2[4], frame->p80211.addr2[5]);
+ printk(KERN_DEBUG " addr3 = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ frame->p80211.addr3[0], frame->p80211.addr3[1],
+ frame->p80211.addr3[2], frame->p80211.addr3[3],
+ frame->p80211.addr3[4], frame->p80211.addr3[5]);
+ printk(KERN_DEBUG " seq_ctl = 0x%04x\n",
+ frame->p80211.seq_ctl);
+ printk(KERN_DEBUG " addr4 = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ frame->p80211.addr4[0], frame->p80211.addr4[1],
+ frame->p80211.addr4[2], frame->p80211.addr4[3],
+ frame->p80211.addr4[4], frame->p80211.addr4[5]);
+ printk(KERN_DEBUG " data_len = 0x%04x\n",
+ frame->p80211.data_len);
+
+ printk(KERN_DEBUG "IEEE 802.3 header:\n");
+ printk(KERN_DEBUG " dest = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ frame->p8023.h_dest[0], frame->p8023.h_dest[1],
+ frame->p8023.h_dest[2], frame->p8023.h_dest[3],
+ frame->p8023.h_dest[4], frame->p8023.h_dest[5]);
+ printk(KERN_DEBUG " src = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ frame->p8023.h_source[0], frame->p8023.h_source[1],
+ frame->p8023.h_source[2], frame->p8023.h_source[3],
+ frame->p8023.h_source[4], frame->p8023.h_source[5]);
+ printk(KERN_DEBUG " len = 0x%04x\n", frame->p8023.h_proto);
+
+ printk(KERN_DEBUG "IEEE 802.2 LLC/SNAP header:\n");
+ printk(KERN_DEBUG " DSAP = 0x%02x\n", frame->p8022.dsap);
+ printk(KERN_DEBUG " SSAP = 0x%02x\n", frame->p8022.ssap);
+ printk(KERN_DEBUG " ctrl = 0x%02x\n", frame->p8022.ctrl);
+ printk(KERN_DEBUG " OUI = %02x:%02x:%02x\n",
+ frame->p8022.oui[0], frame->p8022.oui[1], frame->p8022.oui[2]);
+ printk(KERN_DEBUG " ethertype = 0x%04x\n", frame->ethertype);
+}
+#endif /* 0 */
+
+/*
+ * Interrupt handler
+ */
+irqreturn_t orinoco_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct orinoco_private *priv = dev->priv;
+ hermes_t *hw = &priv->hw;
+ int count = MAX_IRQLOOPS_PER_IRQ;
+ u16 evstat, events;
+ /* These are used to detect a runaway interrupt situation */
+ /* If we get more than MAX_IRQLOOPS_PER_JIFFY iterations in a jiffy,
+ * we panic and shut down the hardware */
+ static int last_irq_jiffy = 0; /* jiffies value the last time we were called */
+ static int loops_this_jiffy = 0;
+ unsigned long flags;
+
+ if (orinoco_lock(priv, &flags) != 0) {
+ /* If hw is unavailable - we don't know if the irq was
+ * for us or not */
+ return IRQ_HANDLED;
+ }
+
+ evstat = hermes_read_regn(hw, EVSTAT);
+ events = evstat & hw->inten;
+ if (! events) {
+ orinoco_unlock(priv, &flags);
+ return IRQ_NONE;
+ }
+
+ if (jiffies != last_irq_jiffy)
+ loops_this_jiffy = 0;
+ last_irq_jiffy = jiffies;
+
+ while (events && count--) {
+ if (++loops_this_jiffy > MAX_IRQLOOPS_PER_JIFFY) {
+ printk(KERN_WARNING "%s: IRQ handler is looping too "
+ "much! Resetting.\n", dev->name);
+ /* Disable interrupts for now */
+ hermes_set_irqmask(hw, 0);
+ schedule_work(&priv->reset_work);
+ break;
+ }
+
+ /* Check the card hasn't been removed */
+ if (! hermes_present(hw)) {
+ DEBUG(0, "orinoco_interrupt(): card removed\n");
+ break;
+ }
+
+ if (events & HERMES_EV_TICK)
+ __orinoco_ev_tick(dev, hw);
+ if (events & HERMES_EV_WTERR)
+ __orinoco_ev_wterr(dev, hw);
+ if (events & HERMES_EV_INFDROP)
+ __orinoco_ev_infdrop(dev, hw);
+ if (events & HERMES_EV_INFO)
+ __orinoco_ev_info(dev, hw);
+ if (events & HERMES_EV_RX)
+ __orinoco_ev_rx(dev, hw);
+ if (events & HERMES_EV_TXEXC)
+ __orinoco_ev_txexc(dev, hw);
+ if (events & HERMES_EV_TX)
+ __orinoco_ev_tx(dev, hw);
+ if (events & HERMES_EV_ALLOC)
+ __orinoco_ev_alloc(dev, hw);
+
+ hermes_write_regn(hw, EVACK, events);
+
+ evstat = hermes_read_regn(hw, EVSTAT);
+ events = evstat & hw->inten;
+ };
+
+ orinoco_unlock(priv, &flags);
+ return IRQ_HANDLED;
+}
+
+static void __orinoco_ev_tick(struct net_device *dev, hermes_t *hw)
+{
+ printk(KERN_DEBUG "%s: TICK\n", dev->name);
+}
+
+static void __orinoco_ev_wterr(struct net_device *dev, hermes_t *hw)
+{
+ /* This seems to happen a fair bit under load, but ignoring it
+ seems to work fine...*/
+ printk(KERN_DEBUG "%s: MAC controller error (WTERR). Ignoring.\n",
+ dev->name);
+}
+
+static void __orinoco_ev_infdrop(struct net_device *dev, hermes_t *hw)
+{
+ printk(KERN_WARNING "%s: Information frame lost.\n", dev->name);
+}
+
+static void print_linkstatus(struct net_device *dev, u16 status)
+{
+ char * s;
+
+ if (suppress_linkstatus)
+ return;
+
+ switch (status) {
+ case HERMES_LINKSTATUS_NOT_CONNECTED:
+ s = "Not Connected";
+ break;
+ case HERMES_LINKSTATUS_CONNECTED:
+ s = "Connected";
+ break;
+ case HERMES_LINKSTATUS_DISCONNECTED:
+ s = "Disconnected";
+ break;
+ case HERMES_LINKSTATUS_AP_CHANGE:
+ s = "AP Changed";
+ break;
+ case HERMES_LINKSTATUS_AP_OUT_OF_RANGE:
+ s = "AP Out of Range";
+ break;
+ case HERMES_LINKSTATUS_AP_IN_RANGE:
+ s = "AP In Range";
+ break;
+ case HERMES_LINKSTATUS_ASSOC_FAILED:
+ s = "Association Failed";
+ break;
+ default:
+ s = "UNKNOWN";
+ }
+
+ printk(KERN_INFO "%s: New link status: %s (%04x)\n",
+ dev->name, s, status);
+}
+
+static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
+{
+ struct orinoco_private *priv = dev->priv;
+ u16 infofid;
+ struct {
+ u16 len;
+ u16 type;
+ } __attribute__ ((packed)) info;
+ int len, type;
+ int err;
+
+ /* This is an answer to an INQUIRE command that we did earlier,
+ * or an information "event" generated by the card
+ * The controller return to us a pseudo frame containing
+ * the information in question - Jean II */
+ infofid = hermes_read_regn(hw, INFOFID);
+
+ /* Read the info frame header - don't try too hard */
+ err = hermes_bap_pread(hw, IRQ_BAP, &info, sizeof(info),
+ infofid, 0);
+ if (err) {
+ printk(KERN_ERR "%s: error %d reading info frame. "
+ "Frame dropped.\n", dev->name, err);
+ return;
+ }
+
+ len = HERMES_RECLEN_TO_BYTES(le16_to_cpu(info.len));
+ type = le16_to_cpu(info.type);
+
+ switch (type) {
+ case HERMES_INQ_TALLIES: {
+ struct hermes_tallies_frame tallies;
+ struct iw_statistics *wstats = &priv->wstats;
+
+ if (len > sizeof(tallies)) {
+ printk(KERN_WARNING "%s: Tallies frame too long (%d bytes)\n",
+ dev->name, len);
+ len = sizeof(tallies);
+ }
+
+ /* Read directly the data (no seek) */
+ hermes_read_words(hw, HERMES_DATA1, (void *) &tallies,
+ len / 2); /* FIXME: blech! */
+
+ /* Increment our various counters */
+ /* wstats->discard.nwid - no wrong BSSID stuff */
+ wstats->discard.code +=
+ le16_to_cpu(tallies.RxWEPUndecryptable);
+ if (len == sizeof(tallies))
+ wstats->discard.code +=
+ le16_to_cpu(tallies.RxDiscards_WEPICVError) +
+ le16_to_cpu(tallies.RxDiscards_WEPExcluded);
+ wstats->discard.misc +=
+ le16_to_cpu(tallies.TxDiscardsWrongSA);
+#if WIRELESS_EXT > 11
+ wstats->discard.fragment +=
+ le16_to_cpu(tallies.RxMsgInBadMsgFragments);
+ wstats->discard.retries +=
+ le16_to_cpu(tallies.TxRetryLimitExceeded);
+ /* wstats->miss.beacon - no match */
+#endif /* WIRELESS_EXT > 11 */
+ }
+ break;
+ case HERMES_INQ_LINKSTATUS: {
+ struct hermes_linkstatus linkstatus;
+ u16 newstatus;
+
+ if (len != sizeof(linkstatus)) {
+ printk(KERN_WARNING "%s: Unexpected size for linkstatus frame (%d bytes)\n",
+ dev->name, len);
+ break;
+ }
+
+ hermes_read_words(hw, HERMES_DATA1, (void *) &linkstatus,
+ len / 2);
+ newstatus = le16_to_cpu(linkstatus.linkstatus);
+
+ if ( (newstatus == HERMES_LINKSTATUS_CONNECTED)
+ || (newstatus == HERMES_LINKSTATUS_AP_CHANGE)
+ || (newstatus == HERMES_LINKSTATUS_AP_IN_RANGE) )
+ priv->connected = 1;
+ else if ( (newstatus == HERMES_LINKSTATUS_NOT_CONNECTED)
+ || (newstatus == HERMES_LINKSTATUS_DISCONNECTED)
+ || (newstatus == HERMES_LINKSTATUS_AP_OUT_OF_RANGE)
+ || (newstatus == HERMES_LINKSTATUS_ASSOC_FAILED) )
+ priv->connected = 0;
+
+ if (newstatus != priv->last_linkstatus)
+ print_linkstatus(dev, newstatus);
+
+ priv->last_linkstatus = newstatus;
+ }
+ break;
+ default:
+ printk(KERN_DEBUG "%s: Unknown information frame received (type %04x).\n",
+ dev->name, type);
+ /* We don't actually do anything about it */
+ break;
+ }
+}
+
+static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
+{
+ struct orinoco_private *priv = dev->priv;
+ struct net_device_stats *stats = &priv->stats;
+ struct iw_statistics *wstats = &priv->wstats;
+ struct sk_buff *skb = NULL;
+ u16 rxfid, status;
+ int length, data_len, data_off;
+ char *p;
+ struct hermes_rx_descriptor desc;
+ struct header_struct hdr;
+ struct ethhdr *eh;
+ int err;
+
+ rxfid = hermes_read_regn(hw, RXFID);
+
+ err = hermes_bap_pread(hw, IRQ_BAP, &desc, sizeof(desc),
+ rxfid, 0);
+ if (err) {
+ printk(KERN_ERR "%s: error %d reading Rx descriptor. "
+ "Frame dropped.\n", dev->name, err);
+ stats->rx_errors++;
+ goto drop;
+ }
+
+ status = le16_to_cpu(desc.status);
+
+ if (status & HERMES_RXSTAT_ERR) {
+ if (status & HERMES_RXSTAT_UNDECRYPTABLE) {
+ wstats->discard.code++;
+ DEBUG(1, "%s: Undecryptable frame on Rx. Frame dropped.\n",
+ dev->name);
+ } else {
+ stats->rx_crc_errors++;
+ DEBUG(1, "%s: Bad CRC on Rx. Frame dropped.\n", dev->name);
+ }
+ stats->rx_errors++;
+ goto drop;
+ }
+
+ /* For now we ignore the 802.11 header completely, assuming
+ that the card's firmware has handled anything vital */
+
+ err = hermes_bap_pread(hw, IRQ_BAP, &hdr, sizeof(hdr),
+ rxfid, HERMES_802_3_OFFSET);
+ if (err) {
+ printk(KERN_ERR "%s: error %d reading frame header. "
+ "Frame dropped.\n", dev->name, err);
+ stats->rx_errors++;
+ goto drop;
+ }
+
+ length = ntohs(hdr.len);
+
+ /* Sanity checks */
+ if (length < 3) { /* No for even an 802.2 LLC header */
+ /* At least on Symbol firmware with PCF we get quite a
+ lot of these legitimately - Poll frames with no
+ data. */
+ stats->rx_dropped++;
+ goto drop;
+ }
+ if (length > IEEE802_11_DATA_LEN) {
+ printk(KERN_WARNING "%s: Oversized frame received (%d bytes)\n",
+ dev->name, length);
+ stats->rx_length_errors++;
+ stats->rx_errors++;
+ goto drop;
+ }
+
+ /* We need space for the packet data itself, plus an ethernet
+ header, plus 2 bytes so we can align the IP header on a
+ 32bit boundary, plus 1 byte so we can read in odd length
+ packets from the card, which has an IO granularity of 16
+ bits */
+ skb = dev_alloc_skb(length+ETH_HLEN+2+1);
+ if (!skb) {
+ printk(KERN_WARNING "%s: Can't allocate skb for Rx\n",
+ dev->name);
+ goto drop;
+ }
+
+ skb_reserve(skb, 2); /* This way the IP header is aligned */
+
+ /* Handle decapsulation
+ * In most cases, the firmware tell us about SNAP frames.
+ * For some reason, the SNAP frames sent by LinkSys APs
+ * are not properly recognised by most firmwares.
+ * So, check ourselves */
+ if(((status & HERMES_RXSTAT_MSGTYPE) == HERMES_RXSTAT_1042) ||
+ ((status & HERMES_RXSTAT_MSGTYPE) == HERMES_RXSTAT_TUNNEL) ||
+ is_ethersnap(&hdr)) {
+ /* These indicate a SNAP within 802.2 LLC within
+ 802.11 frame which we'll need to de-encapsulate to
+ the original EthernetII frame. */
+
+ if (length < ENCAPS_OVERHEAD) { /* No room for full LLC+SNAP */
+ stats->rx_length_errors++;
+ goto drop;
+ }
+
+ /* Remove SNAP header, reconstruct EthernetII frame */
+ data_len = length - ENCAPS_OVERHEAD;
+ data_off = HERMES_802_3_OFFSET + sizeof(hdr);
+
+ eh = (struct ethhdr *)skb_put(skb, ETH_HLEN);
+
+ memcpy(eh, &hdr, 2 * ETH_ALEN);
+ eh->h_proto = hdr.ethertype;
+ } else {
+ /* All other cases indicate a genuine 802.3 frame. No
+ decapsulation needed. We just throw the whole
+ thing in, and hope the protocol layer can deal with
+ it as 802.3 */
+ data_len = length;
+ data_off = HERMES_802_3_OFFSET;
+ /* FIXME: we re-read from the card data we already read here */
+ }
+
+ p = skb_put(skb, data_len);
+ err = hermes_bap_pread(hw, IRQ_BAP, p, RUP_EVEN(data_len),
+ rxfid, data_off);
+ if (err) {
+ printk(KERN_ERR "%s: error %d reading frame. "
+ "Frame dropped.\n", dev->name, err);
+ stats->rx_errors++;
+ goto drop;
+ }
+
+ dev->last_rx = jiffies;
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* Process the wireless stats if needed */
+ orinoco_stat_gather(dev, skb, &desc);
+
+ /* Pass the packet to the networking stack */
+ netif_rx(skb);
+ stats->rx_packets++;
+
+ return;
+
+ drop:
+ stats->rx_dropped++;
+
+ if (skb)
+ dev_kfree_skb_irq(skb);
+ return;
+}
+
+static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
+{
+ struct orinoco_private *priv = dev->priv;
+ struct net_device_stats *stats = &priv->stats;
+ u16 fid = hermes_read_regn(hw, TXCOMPLFID);
+ struct hermes_tx_descriptor desc;
+ int err = 0;
+
+ if (fid == DUMMY_FID)
+ return; /* Nothing's really happened */
+
+ err = hermes_bap_pread(hw, IRQ_BAP, &desc, sizeof(desc), fid, 0);
+ if (err) {
+ printk(KERN_WARNING "%s: Unable to read descriptor on Tx error "
+ "(FID=%04X error %d)\n",
+ dev->name, fid, err);
+ } else {
+ DEBUG(1, "%s: Tx error, status %d\n",
+ dev->name, le16_to_cpu(desc.status));
+ }
+
+ stats->tx_errors++;
+
+ hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID);
+}
+
+static void __orinoco_ev_tx(struct net_device *dev, hermes_t *hw)
+{
+ struct orinoco_private *priv = dev->priv;
+ struct net_device_stats *stats = &priv->stats;
+
+ stats->tx_packets++;
+
+ hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID);
+}
+
+static void __orinoco_ev_alloc(struct net_device *dev, hermes_t *hw)
+{
+ struct orinoco_private *priv = dev->priv;
+
+ u16 fid = hermes_read_regn(hw, ALLOCFID);
+
+ if (fid != priv->txfid) {
+ if (fid != DUMMY_FID)
+ printk(KERN_WARNING "%s: Allocate event on unexpected fid (%04X)\n",
+ dev->name, fid);
+ return;
+ } else {
+ netif_wake_queue(dev);
+ }
+
+ hermes_write_regn(hw, ALLOCFID, DUMMY_FID);
+}
+
+struct sta_id {
+ u16 id, variant, major, minor;
+} __attribute__ ((packed));
+
+static int determine_firmware_type(struct net_device *dev, struct sta_id *sta_id)
+{
+ /* FIXME: this is fundamentally broken */
+ unsigned int firmver = ((u32)sta_id->major << 16) | sta_id->minor;
+
+ if (sta_id->variant == 1)
+ return FIRMWARE_TYPE_AGERE;
+ else if ((sta_id->variant == 2) &&
+ ((firmver == 0x10001) || (firmver == 0x20001)))
+ return FIRMWARE_TYPE_SYMBOL;
+ else
+ return FIRMWARE_TYPE_INTERSIL;
+}
+
+static void determine_firmware(struct net_device *dev)
+{
+ struct orinoco_private *priv = dev->priv;
+ hermes_t *hw = &priv->hw;
+ int err;
+ struct sta_id sta_id;
+ unsigned int firmver;
+ char tmp[SYMBOL_MAX_VER_LEN+1];
+
+ /* Get the firmware version */
+ err = HERMES_READ_RECORD(hw, USER_BAP, HERMES_RID_STAID, &sta_id);
+ if (err) {
+ printk(KERN_WARNING "%s: Error %d reading firmware info. Wildly guessing capabilities...\n",
+ dev->name, err);
+ memset(&sta_id, 0, sizeof(sta_id));
+ }
+ le16_to_cpus(&sta_id.id);
+ le16_to_cpus(&sta_id.variant);
+ le16_to_cpus(&sta_id.major);
+ le16_to_cpus(&sta_id.minor);
+
+ printk(KERN_DEBUG "%s: Station identity %04x:%04x:%04x:%04x\n",
+ dev->name, sta_id.id, sta_id.variant,
+ sta_id.major, sta_id.minor);
+
+ if (! priv->firmware_type)
+ priv->firmware_type = determine_firmware_type(dev, &sta_id);
+
+ /* Default capabilities */
+ priv->has_sensitivity = 1;
+ priv->has_mwo = 0;
+ priv->has_preamble = 0;
+ priv->has_port3 = 1;
+ priv->has_ibss = 1;
+ priv->has_ibss_any = 0;
+ priv->has_wep = 0;
+ priv->has_big_wep = 0;
+
+ /* Determine capabilities from the firmware version */
+ switch (priv->firmware_type) {
+ case FIRMWARE_TYPE_AGERE:
+ /* Lucent Wavelan IEEE, Lucent Orinoco, Cabletron RoamAbout,
+ ELSA, Melco, HP, IBM, Dell 1150, Compaq 110/210 */
+ printk(KERN_DEBUG "%s: Looks like a Lucent/Agere firmware "
+ "version %d.%02d\n", dev->name,
+ sta_id.major, sta_id.minor);
+
+ firmver = ((unsigned long)sta_id.major << 16) | sta_id.minor;
+
+ priv->has_ibss = (firmver >= 0x60006);
+ priv->has_ibss_any = (firmver >= 0x60010);
+ priv->has_wep = (firmver >= 0x40020);
+ priv->has_big_wep = 1; /* FIXME: this is wrong - how do we tell
+ Gold cards from the others? */
+ priv->has_mwo = (firmver >= 0x60000);
+ priv->has_pm = (firmver >= 0x40020); /* Don't work in 7.52 ? */
+ priv->ibss_port = 1;
+
+ /* Tested with Agere firmware :
+ * 1.16 ; 4.08 ; 4.52 ; 6.04 ; 6.16 ; 7.28 => Jean II
+ * Tested CableTron firmware : 4.32 => Anton */
+ break;
+ case FIRMWARE_TYPE_SYMBOL:
+ /* Symbol , 3Com AirConnect, Intel, Ericsson WLAN */
+ /* Intel MAC : 00:02:B3:* */
+ /* 3Com MAC : 00:50:DA:* */
+ memset(tmp, 0, sizeof(tmp));
+ /* Get the Symbol firmware version */
+ err = hermes_read_ltv(hw, USER_BAP,
+ HERMES_RID_SECONDARYVERSION_SYMBOL,
+ SYMBOL_MAX_VER_LEN, NULL, &tmp);
+ if (err) {
+ printk(KERN_WARNING
+ "%s: Error %d reading Symbol firmware info. Wildly guessing capabilities...\n",
+ dev->name, err);
+ firmver = 0;
+ tmp[0] = '\0';
+ } else {
+ /* The firmware revision is a string, the format is
+ * something like : "V2.20-01".
+ * Quick and dirty parsing... - Jean II
+ */
+ firmver = ((tmp[1] - '0') << 16) | ((tmp[3] - '0') << 12)
+ | ((tmp[4] - '0') << 8) | ((tmp[6] - '0') << 4)
+ | (tmp[7] - '0');
+
+ tmp[SYMBOL_MAX_VER_LEN] = '\0';
+ }
+
+ printk(KERN_DEBUG "%s: Looks like a Symbol firmware "
+ "version [%s] (parsing to %X)\n", dev->name,
+ tmp, firmver);
+
+ priv->has_ibss = (firmver >= 0x20000);
+ priv->has_wep = (firmver >= 0x15012);
+ priv->has_big_wep = (firmver >= 0x20000);
+ priv->has_pm = (firmver >= 0x20000) && (firmver < 0x22000);
+ priv->has_preamble = (firmver >= 0x20000);
+ priv->ibss_port = 4;
+ /* Tested with Intel firmware : 0x20015 => Jean II */
+ /* Tested with 3Com firmware : 0x15012 & 0x22001 => Jean II */
+ break;
+ case FIRMWARE_TYPE_INTERSIL:
+ /* D-Link, Linksys, Adtron, ZoomAir, and many others...
+ * Samsung, Compaq 100/200 and Proxim are slightly
+ * different and less well tested */
+ /* D-Link MAC : 00:40:05:* */
+ /* Addtron MAC : 00:90:D1:* */
+ printk(KERN_DEBUG "%s: Looks like an Intersil firmware "
+ "version %d.%d.%d\n", dev->name,
+ sta_id.major, sta_id.minor, sta_id.variant);
+
+ firmver = ((unsigned long)sta_id.major << 16) |
+ ((unsigned long)sta_id.minor << 8) | sta_id.variant;
+
+ priv->has_ibss = (firmver >= 0x000700); /* FIXME */
+ priv->has_big_wep = priv->has_wep = (firmver >= 0x000800);
+ priv->has_pm = (firmver >= 0x000700);
+
+ if (firmver >= 0x000800)
+ priv->ibss_port = 0;
+ else {
+ printk(KERN_NOTICE "%s: Intersil firmware earlier "
+ "than v0.8.x - several features not supported\n",
+ dev->name);
+ priv->ibss_port = 1;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * struct net_device methods
+ */
+
+static int
+orinoco_init(struct net_device *dev)
+{
+ struct orinoco_private *priv = dev->priv;
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ struct hermes_idstring nickbuf;
+ u16 reclen;
+ int len;
+
+ TRACE_ENTER(dev->name);
+
+ /* No need to lock, the hw_unavailable flag is already set in
+ * alloc_orinocodev() */
+ priv->nicbuf_size = IEEE802_11_FRAME_LEN + ETH_HLEN;
+
+ /* Initialize the firmware */
+ err = hermes_init(hw);
+ if (err != 0) {
+ printk(KERN_ERR "%s: failed to initialize firmware (err = %d)\n",
+ dev->name, err);
+ goto out;
+ }
+
+ determine_firmware(dev);
+
+ if (priv->has_port3)
+ printk(KERN_DEBUG "%s: Ad-hoc demo mode supported\n", dev->name);
+ if (priv->has_ibss)
+ printk(KERN_DEBUG "%s: IEEE standard IBSS ad-hoc mode supported\n",
+ dev->name);
+ if (priv->has_wep) {
+ printk(KERN_DEBUG "%s: WEP supported, ", dev->name);
+ if (priv->has_big_wep)
+ printk("104-bit key\n");
+ else
+ printk("40-bit key\n");
+ }
+
+ /* Get the MAC address */
+ err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
+ ETH_ALEN, NULL, dev->dev_addr);
+ if (err) {
+ printk(KERN_WARNING "%s: failed to read MAC address!\n",
+ dev->name);
+ goto out;
+ }
+
+ printk(KERN_DEBUG "%s: MAC address %02X:%02X:%02X:%02X:%02X:%02X\n",
+ dev->name, dev->dev_addr[0], dev->dev_addr[1],
+ dev->dev_addr[2], dev->dev_addr[3], dev->dev_addr[4],
+ dev->dev_addr[5]);
+
+ /* Get the station name */
+ err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
+ sizeof(nickbuf), &reclen, &nickbuf);
+ if (err) {
+ printk(KERN_ERR "%s: failed to read station name\n",
+ dev->name);
+ goto out;
+ }
+ if (nickbuf.len)
+ len = min(IW_ESSID_MAX_SIZE, (int)le16_to_cpu(nickbuf.len));
+ else
+ len = min(IW_ESSID_MAX_SIZE, 2 * reclen);
+ memcpy(priv->nick, &nickbuf.val, len);
+ priv->nick[len] = '\0';
+
+ printk(KERN_DEBUG "%s: Station name \"%s\"\n", dev->name, priv->nick);
+
+ /* Get allowed channels */
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CHANNELLIST,
+ &priv->channel_mask);
+ if (err) {
+ printk(KERN_ERR "%s: failed to read channel list!\n",
+ dev->name);
+ goto out;
+ }
+
+ /* Get initial AP density */
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFSYSTEMSCALE,
+ &priv->ap_density);
+ if (err || priv->ap_density < 1 || priv->ap_density > 3) {
+ priv->has_sensitivity = 0;
+ }
+
+ /* Get initial RTS threshold */
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFRTSTHRESHOLD,
+ &priv->rts_thresh);
+ if (err) {
+ printk(KERN_ERR "%s: failed to read RTS threshold!\n", dev->name);
+ goto out;
+ }
+
+ /* Get initial fragmentation settings */
+ if (priv->has_mwo)
+ err = hermes_read_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFMWOROBUST_AGERE,
+ &priv->mwo_robust);
+ else
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFFRAGMENTATIONTHRESHOLD,
+ &priv->frag_thresh);
+ if (err) {
+ printk(KERN_ERR "%s: failed to read fragmentation settings!\n", dev->name);
+ goto out;
+ }
+
+ /* Power management setup */
+ if (priv->has_pm) {
+ priv->pm_on = 0;
+ priv->pm_mcast = 1;
+ err = hermes_read_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFMAXSLEEPDURATION,
+ &priv->pm_period);
+ if (err) {
+ printk(KERN_ERR "%s: failed to read power management period!\n",
+ dev->name);
+ goto out;
+ }
+ err = hermes_read_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFPMHOLDOVERDURATION,
+ &priv->pm_timeout);
+ if (err) {
+ printk(KERN_ERR "%s: failed to read power management timeout!\n",
+ dev->name);
+ goto out;
+ }
+ }
+
+ /* Preamble setup */
+ if (priv->has_preamble) {
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFPREAMBLE_SYMBOL,
+ &priv->preamble);
+ if (err)
+ goto out;
+ }
+
+ /* Set up the default configuration */
+ priv->iw_mode = IW_MODE_INFRA;
+ /* By default use IEEE/IBSS ad-hoc mode if we have it */
+ priv->prefer_port3 = priv->has_port3 && (! priv->has_ibss);
+ set_port_type(priv);
+ priv->channel = 10; /* default channel, more-or-less arbitrary */
+
+ priv->promiscuous = 0;
+ priv->wep_on = 0;
+ priv->tx_key = 0;
+
+ err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid);
+ if (err == -EIO) {
+ /* Try workaround for old Symbol firmware bug */
+ printk(KERN_WARNING "%s: firmware ALLOC bug detected "
+ "(old Symbol firmware?). Trying to work around... ",
+ dev->name);
+
+ priv->nicbuf_size = TX_NICBUF_SIZE_BUG;
+ err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid);
+ if (err)
+ printk("failed!\n");
+ else
+ printk("ok.\n");
+ }
+ if (err) {
+ printk("%s: Error %d allocating Tx buffer\n", dev->name, err);
+ goto out;
+ }
+
+ /* Make the hardware available, as long as it hasn't been
+ * removed elsewhere (e.g. by PCMCIA hot unplug) */
+ spin_lock_irq(&priv->lock);
+ priv->hw_unavailable--;
+ spin_unlock_irq(&priv->lock);
+
+ printk(KERN_DEBUG "%s: ready\n", dev->name);
+
+ out:
+ TRACE_EXIT(dev->name);
+ return err;
+}
+
+struct net_device_stats *
+orinoco_get_stats(struct net_device *dev)
+{
+ struct orinoco_private *priv = dev->priv;
+
+ return &priv->stats;
+}
+
+struct iw_statistics *
+orinoco_get_wireless_stats(struct net_device *dev)
+{
+ struct orinoco_private *priv = dev->priv;
+ hermes_t *hw = &priv->hw;
+ struct iw_statistics *wstats = &priv->wstats;
+ int err = 0;
+ unsigned long flags;
+
+ if (! netif_device_present(dev)) {
+ printk(KERN_WARNING "%s: get_wireless_stats() called while device not present\n",
+ dev->name);
+ return NULL; /* FIXME: Can we do better than this? */
+ }
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return NULL; /* FIXME: Erg, we've been signalled, how
+ * do we propagate this back up? */
+
+ if (priv->iw_mode == IW_MODE_ADHOC) {
+ memset(&wstats->qual, 0, sizeof(wstats->qual));
+ /* If a spy address is defined, we report stats of the
+ * first spy address - Jean II */
+ if (SPY_NUMBER(priv)) {
+ wstats->qual.qual = priv->spy_stat[0].qual;
+ wstats->qual.level = priv->spy_stat[0].level;
+ wstats->qual.noise = priv->spy_stat[0].noise;
+ wstats->qual.updated = priv->spy_stat[0].updated;
+ }
+ } else {
+ struct {
+ u16 qual, signal, noise;
+ } __attribute__ ((packed)) cq;
+
+ err = HERMES_READ_RECORD(hw, USER_BAP,
+ HERMES_RID_COMMSQUALITY, &cq);
+
+ wstats->qual.qual = (int)le16_to_cpu(cq.qual);
+ wstats->qual.level = (int)le16_to_cpu(cq.signal) - 0x95;
+ wstats->qual.noise = (int)le16_to_cpu(cq.noise) - 0x95;
+ wstats->qual.updated = 7;
+ }
+
+ /* We can't really wait for the tallies inquiry command to
+ * complete, so we just use the previous results and trigger
+ * a new tallies inquiry command for next time - Jean II */
+ /* FIXME: We're in user context (I think?), so we should just
+ wait for the tallies to come through */
+ err = hermes_inquire(hw, HERMES_INQ_TALLIES);
+
+ orinoco_unlock(priv, &flags);
+
+ if (err)
+ return NULL;
+
+ return wstats;
+}
+
+static inline void orinoco_spy_gather(struct net_device *dev, u_char *mac,
+ int level, int noise)
+{
+ struct orinoco_private *priv = (struct orinoco_private *)dev->priv;
+ int i;
+
+ /* Gather wireless spy statistics: for each packet, compare the
+ * source address with out list, and if match, get the stats... */
+ for (i = 0; i < priv->spy_number; i++)
+ if (!memcmp(mac, priv->spy_address[i], ETH_ALEN)) {
+ priv->spy_stat[i].level = level - 0x95;
+ priv->spy_stat[i].noise = noise - 0x95;
+ priv->spy_stat[i].qual = (level > noise) ? (level - noise) : 0;
+ priv->spy_stat[i].updated = 7;
+ }
+}
+
+void
+orinoco_stat_gather(struct net_device *dev,
+ struct sk_buff *skb,
+ struct hermes_rx_descriptor *desc)
+{
+ struct orinoco_private *priv = (struct orinoco_private *)dev->priv;
+
+ /* Using spy support with lots of Rx packets, like in an
+ * infrastructure (AP), will really slow down everything, because
+ * the MAC address must be compared to each entry of the spy list.
+ * If the user really asks for it (set some address in the
+ * spy list), we do it, but he will pay the price.
+ * Note that to get here, you need both WIRELESS_SPY
+ * compiled in AND some addresses in the list !!!
+ */
+ /* Note : gcc will optimise the whole section away if
+ * WIRELESS_SPY is not defined... - Jean II */
+ if (SPY_NUMBER(priv)) {
+ orinoco_spy_gather(dev, skb->mac.raw + ETH_ALEN,
+ desc->signal, desc->silence);
+ }
+}
+
+static int
+orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct orinoco_private *priv = (struct orinoco_private *)dev->priv;
+ struct net_device_stats *stats = &priv->stats;
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ u16 txfid = priv->txfid;
+ char *p;
+ struct ethhdr *eh;
+ int len, data_len, data_off;
+ struct hermes_tx_descriptor desc;
+ unsigned long flags;
+
+ TRACE_ENTER(dev->name);
+
+ if (! netif_running(dev)) {
+ printk(KERN_ERR "%s: Tx on stopped device!\n",
+ dev->name);
+ TRACE_EXIT(dev->name);
+ return 1;
+ }
+
+ if (netif_queue_stopped(dev)) {
+ printk(KERN_DEBUG "%s: Tx while transmitter busy!\n",
+ dev->name);
+ TRACE_EXIT(dev->name);
+ return 1;
+ }
+
+ if (orinoco_lock(priv, &flags) != 0) {
+ printk(KERN_ERR "%s: orinoco_xmit() called while hw_unavailable\n",
+ dev->name);
+ TRACE_EXIT(dev->name);
+/* BUG(); */
+ return 1;
+ }
+
+ if (! priv->connected) {
+ /* Oops, the firmware hasn't established a connection,
+ silently drop the packet (this seems to be the
+ safest approach). */
+ stats->tx_errors++;
+ orinoco_unlock(priv, &flags);
+ dev_kfree_skb(skb, FREE_WRITE);
+ TRACE_EXIT(dev->name);
+ return 0;
+ }
+
+ /* Length of the packet body */
+ /* FIXME: what if the skb is smaller than this? */
+ len = max_t(int,skb->len - ETH_HLEN, ETH_ZLEN - ETH_HLEN);
+
+ eh = (struct ethhdr *)skb->data;
+
+ memset(&desc, 0, sizeof(desc));
+ desc.tx_control = cpu_to_le16(HERMES_TXCTRL_TX_OK | HERMES_TXCTRL_TX_EX);
+ err = hermes_bap_pwrite(hw, USER_BAP, &desc, sizeof(desc), txfid, 0);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d writing Tx descriptor to BAP\n",
+ dev->name, err);
+ stats->tx_errors++;
+ goto fail;
+ }
+
+ /* Clear the 802.11 header and data length fields - some
+ * firmwares (e.g. Lucent/Agere 8.xx) appear to get confused
+ * if this isn't done. */
+ hermes_clear_words(hw, HERMES_DATA0,
+ HERMES_802_3_OFFSET - HERMES_802_11_OFFSET);
+
+ /* Encapsulate Ethernet-II frames */
+ if (ntohs(eh->h_proto) > 1500) { /* Ethernet-II frame */
+ struct header_struct hdr;
+ data_len = len;
+ data_off = HERMES_802_3_OFFSET + sizeof(hdr);
+ p = skb->data + ETH_HLEN;
+
+ /* 802.3 header */
+ memcpy(hdr.dest, eh->h_dest, ETH_ALEN);
+ memcpy(hdr.src, eh->h_source, ETH_ALEN);
+ hdr.len = htons(data_len + ENCAPS_OVERHEAD);
+
+ /* 802.2 header */
+ memcpy(&hdr.dsap, &encaps_hdr, sizeof(encaps_hdr));
+
+ hdr.ethertype = eh->h_proto;
+ err = hermes_bap_pwrite(hw, USER_BAP, &hdr, sizeof(hdr),
+ txfid, HERMES_802_3_OFFSET);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d writing packet header to BAP\n",
+ dev->name, err);
+ stats->tx_errors++;
+ goto fail;
+ }
+ } else { /* IEEE 802.3 frame */
+ data_len = len + ETH_HLEN;
+ data_off = HERMES_802_3_OFFSET;
+ p = skb->data;
+ }
+
+ /* Round up for odd length packets */
+ err = hermes_bap_pwrite(hw, USER_BAP, p, RUP_EVEN(data_len), txfid, data_off);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d writing packet to BAP\n",
+ dev->name, err);
+ stats->tx_errors++;
+ goto fail;
+ }
+
+ /* Finally, we actually initiate the send */
+ netif_stop_queue(dev);
+
+ err = hermes_docmd_wait(hw, HERMES_CMD_TX | HERMES_CMD_RECL, txfid, NULL);
+ if (err) {
+ netif_start_queue(dev);
+ printk(KERN_ERR "%s: Error %d transmitting packet\n", dev->name, err);
+ stats->tx_errors++;
+ goto fail;
+ }
+
+ dev->trans_start = jiffies;
+
+ orinoco_unlock(priv, &flags);
+
+ DEV_KFREE_SKB(skb);
+
+ TRACE_EXIT(dev->name);
+
+ return 0;
+ fail:
+ TRACE_EXIT(dev->name);
+
+ orinoco_unlock(priv, &flags);
+ return err;
+}
+
+#ifdef HAVE_TX_TIMEOUT
+static void
+orinoco_tx_timeout(struct net_device *dev)
+{
+ struct orinoco_private *priv = (struct orinoco_private *)dev->priv;
+ struct net_device_stats *stats = &priv->stats;
+ struct hermes *hw = &priv->hw;
+
+ printk(KERN_WARNING "%s: Tx timeout! "
+ "ALLOCFID=%04x, TXCOMPLFID=%04x, EVSTAT=%04x\n",
+ dev->name, hermes_read_regn(hw, ALLOCFID),
+ hermes_read_regn(hw, TXCOMPLFID), hermes_read_regn(hw, EVSTAT));
+
+ stats->tx_errors++;
+
+ schedule_work(&priv->reset_work);
+}
+#endif
+
+static int
+orinoco_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct orinoco_private *priv = dev->priv;
+
+ if ( (new_mtu < ORINOCO_MIN_MTU) || (new_mtu > ORINOCO_MAX_MTU) )
+ return -EINVAL;
+
+ if ( (new_mtu + ENCAPS_OVERHEAD + IEEE802_11_HLEN) >
+ (priv->nicbuf_size - ETH_HLEN) )
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+/* FIXME: return int? */
+static void
+__orinoco_set_multicast_list(struct net_device *dev)
+{
+ struct orinoco_private *priv = dev->priv;
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ int promisc, mc_count;
+
+ /* The Hermes doesn't seem to have an allmulti mode, so we go
+ * into promiscuous mode and let the upper levels deal. */
+ if ( (dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
+ (dev->mc_count > MAX_MULTICAST(priv)) ) {
+ promisc = 1;
+ mc_count = 0;
+ } else {
+ promisc = 0;
+ mc_count = dev->mc_count;
+ }
+
+ if (promisc != priv->promiscuous) {
+ err = hermes_write_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFPROMISCUOUSMODE,
+ promisc);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d setting PROMISCUOUSMODE to 1.\n",
+ dev->name, err);
+ } else
+ priv->promiscuous = promisc;
+ }
+
+ if (! promisc && (mc_count || priv->mc_count) ) {
+ struct dev_mc_list *p = dev->mc_list;
+ hermes_multicast_t mclist;
+ int i;
+
+ for (i = 0; i < mc_count; i++) {
+ /* Paranoia: */
+ if (! p)
+ BUG(); /* Multicast list shorter than mc_count */
+ if (p->dmi_addrlen != ETH_ALEN)
+ BUG(); /* Bad address size in multicast list */
+
+ memcpy(mclist.addr[i], p->dmi_addr, ETH_ALEN);
+ p = p->next;
+ }
+
+ if (p)
+ printk(KERN_WARNING "Multicast list is longer than mc_count\n");
+
+ err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFGROUPADDRESSES,
+ HERMES_BYTES_TO_RECLEN(priv->mc_count * ETH_ALEN),
+ &mclist);
+ if (err)
+ printk(KERN_ERR "%s: Error %d setting multicast list.\n",
+ dev->name, err);
+ else
+ priv->mc_count = mc_count;
+ }
+
+ /* Since we can set the promiscuous flag when it wasn't asked
+ for, make sure the net_device knows about it. */
+ if (priv->promiscuous)
+ dev->flags |= IFF_PROMISC;
+ else
+ dev->flags &= ~IFF_PROMISC;
+}
+
+/********************************************************************/
+/* Wireless extensions support */
+/********************************************************************/
+
+static int orinoco_ioctl_getiwrange(struct net_device *dev, struct iw_point *rrq)
+{
+ struct orinoco_private *priv = dev->priv;
+ int err = 0;
+ int mode;
+ struct iw_range range;
+ int numrates;
+ int i, k;
+ unsigned long flags;
+
+ TRACE_ENTER(dev->name);
+
+ err = verify_area(VERIFY_WRITE, rrq->pointer, sizeof(range));
+ if (err)
+ return err;
+
+ rrq->length = sizeof(range);
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ mode = priv->iw_mode;
+ orinoco_unlock(priv, &flags);
+
+ memset(&range, 0, sizeof(range));
+
+ /* Much of this shamelessly taken from wvlan_cs.c. No idea
+ * what it all means -dgibson */
+#if WIRELESS_EXT > 10
+ range.we_version_compiled = WIRELESS_EXT;
+ range.we_version_source = 11;
+#endif /* WIRELESS_EXT > 10 */
+
+ range.min_nwid = range.max_nwid = 0; /* We don't use nwids */
+
+ /* Set available channels/frequencies */
+ range.num_channels = NUM_CHANNELS;
+ k = 0;
+ for (i = 0; i < NUM_CHANNELS; i++) {
+ if (priv->channel_mask & (1 << i)) {
+ range.freq[k].i = i + 1;
+ range.freq[k].m = channel_frequency[i] * 100000;
+ range.freq[k].e = 1;
+ k++;
+ }
+
+ if (k >= IW_MAX_FREQUENCIES)
+ break;
+ }
+ range.num_frequency = k;
+
+ range.sensitivity = 3;
+
+ if ((mode == IW_MODE_ADHOC) && (priv->spy_number == 0)){
+ /* Quality stats meaningless in ad-hoc mode */
+ range.max_qual.qual = 0;
+ range.max_qual.level = 0;
+ range.max_qual.noise = 0;
+#if WIRELESS_EXT > 11
+ range.avg_qual.qual = 0;
+ range.avg_qual.level = 0;
+ range.avg_qual.noise = 0;
+#endif /* WIRELESS_EXT > 11 */
+
+ } else {
+ range.max_qual.qual = 0x8b - 0x2f;
+ range.max_qual.level = 0x2f - 0x95 - 1;
+ range.max_qual.noise = 0x2f - 0x95 - 1;
+#if WIRELESS_EXT > 11
+ /* Need to get better values */
+ range.avg_qual.qual = 0x24;
+ range.avg_qual.level = 0xC2;
+ range.avg_qual.noise = 0x9E;
+#endif /* WIRELESS_EXT > 11 */
+ }
+
+ err = orinoco_hw_get_bitratelist(priv, &numrates,
+ range.bitrate, IW_MAX_BITRATES);
+ if (err)
+ return err;
+ range.num_bitrates = numrates;
+
+ /* Set an indication of the max TCP throughput in bit/s that we can
+ * expect using this interface. May be use for QoS stuff...
+ * Jean II */
+ if(numrates > 2)
+ range.throughput = 5 * 1000 * 1000; /* ~5 Mb/s */
+ else
+ range.throughput = 1.5 * 1000 * 1000; /* ~1.5 Mb/s */
+
+ range.min_rts = 0;
+ range.max_rts = 2347;
+ range.min_frag = 256;
+ range.max_frag = 2346;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+ if (priv->has_wep) {
+ range.max_encoding_tokens = ORINOCO_MAX_KEYS;
+
+ range.encoding_size[0] = SMALL_KEY_SIZE;
+ range.num_encoding_sizes = 1;
+
+ if (priv->has_big_wep) {
+ range.encoding_size[1] = LARGE_KEY_SIZE;
+ range.num_encoding_sizes = 2;
+ }
+ } else {
+ range.num_encoding_sizes = 0;
+ range.max_encoding_tokens = 0;
+ }
+ orinoco_unlock(priv, &flags);
+
+ range.min_pmp = 0;
+ range.max_pmp = 65535000;
+ range.min_pmt = 0;
+ range.max_pmt = 65535 * 1000; /* ??? */
+ range.pmp_flags = IW_POWER_PERIOD;
+ range.pmt_flags = IW_POWER_TIMEOUT;
+ range.pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT | IW_POWER_UNICAST_R;
+
+ range.num_txpower = 1;
+ range.txpower[0] = 15; /* 15dBm */
+ range.txpower_capa = IW_TXPOW_DBM;
+
+#if WIRELESS_EXT > 10
+ range.retry_capa = IW_RETRY_LIMIT | IW_RETRY_LIFETIME;
+ range.retry_flags = IW_RETRY_LIMIT;
+ range.r_time_flags = IW_RETRY_LIFETIME;
+ range.min_retry = 0;
+ range.max_retry = 65535; /* ??? */
+ range.min_r_time = 0;
+ range.max_r_time = 65535 * 1000; /* ??? */
+#endif /* WIRELESS_EXT > 10 */
+
+ if (copy_to_user(rrq->pointer, &range, sizeof(range)))
+ return -EFAULT;
+
+ TRACE_EXIT(dev->name);
+
+ return 0;
+}
+
+static int orinoco_ioctl_setiwencode(struct net_device *dev, struct iw_point *erq)
+{
+ struct orinoco_private *priv = dev->priv;
+ int index = (erq->flags & IW_ENCODE_INDEX) - 1;
+ int setindex = priv->tx_key;
+ int enable = priv->wep_on;
+ int restricted = priv->wep_restrict;
+ u16 xlen = 0;
+ int err = 0;
+ char keybuf[ORINOCO_MAX_KEY_SIZE];
+ unsigned long flags;
+
+ if (erq->pointer) {
+ /* We actually have a key to set */
+ if ( (erq->length < SMALL_KEY_SIZE) || (erq->length > ORINOCO_MAX_KEY_SIZE) )
+ return -EINVAL;
+
+ if (copy_from_user(keybuf, erq->pointer, erq->length))
+ return -EFAULT;
+ }
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ if (erq->pointer) {
+ if (erq->length > ORINOCO_MAX_KEY_SIZE) {
+ err = -E2BIG;
+ goto out;
+ }
+
+ if ( (erq->length > LARGE_KEY_SIZE)
+ || ( ! priv->has_big_wep && (erq->length > SMALL_KEY_SIZE)) ) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if ((index < 0) || (index >= ORINOCO_MAX_KEYS))
+ index = priv->tx_key;
+
+ if (erq->length > SMALL_KEY_SIZE) {
+ xlen = LARGE_KEY_SIZE;
+ } else if (erq->length > 0) {
+ xlen = SMALL_KEY_SIZE;
+ } else
+ xlen = 0;
+
+ /* Switch on WEP if off */
+ if ((!enable) && (xlen > 0)) {
+ setindex = index;
+ enable = 1;
+ }
+ } else {
+ /* Important note : if the user do "iwconfig eth0 enc off",
+ * we will arrive there with an index of -1. This is valid
+ * but need to be taken care off... Jean II */
+ if ((index < 0) || (index >= ORINOCO_MAX_KEYS)) {
+ if((index != -1) || (erq->flags == 0)) {
+ err = -EINVAL;
+ goto out;
+ }
+ } else {
+ /* Set the index : Check that the key is valid */
+ if(priv->keys[index].len == 0) {
+ err = -EINVAL;
+ goto out;
+ }
+ setindex = index;
+ }
+ }
+
+ if (erq->flags & IW_ENCODE_DISABLED)
+ enable = 0;
+ /* Only for Prism2 & Symbol cards (so far) - Jean II */
+ if (erq->flags & IW_ENCODE_OPEN)
+ restricted = 0;
+ if (erq->flags & IW_ENCODE_RESTRICTED)
+ restricted = 1;
+
+ if (erq->pointer) {
+ priv->keys[index].len = cpu_to_le16(xlen);
+ memset(priv->keys[index].data, 0, sizeof(priv->keys[index].data));
+ memcpy(priv->keys[index].data, keybuf, erq->length);
+ }
+ priv->tx_key = setindex;
+ priv->wep_on = enable;
+ priv->wep_restrict = restricted;
+
+
+ out:
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+static int orinoco_ioctl_getiwencode(struct net_device *dev, struct iw_point *erq)
+{
+ struct orinoco_private *priv = dev->priv;
+ int index = (erq->flags & IW_ENCODE_INDEX) - 1;
+ u16 xlen = 0;
+ char keybuf[ORINOCO_MAX_KEY_SIZE];
+ int err;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ if ((index < 0) || (index >= ORINOCO_MAX_KEYS))
+ index = priv->tx_key;
+
+ erq->flags = 0;
+ if (! priv->wep_on)
+ erq->flags |= IW_ENCODE_DISABLED;
+ erq->flags |= index + 1;
+
+ /* Only for symbol cards - Jean II */
+ if (priv->firmware_type != FIRMWARE_TYPE_AGERE) {
+ if(priv->wep_restrict)
+ erq->flags |= IW_ENCODE_RESTRICTED;
+ else
+ erq->flags |= IW_ENCODE_OPEN;
+ }
+
+ xlen = le16_to_cpu(priv->keys[index].len);
+
+ erq->length = xlen;
+
+ if (erq->pointer) {
+ memcpy(keybuf, priv->keys[index].data, ORINOCO_MAX_KEY_SIZE);
+ }
+
+ orinoco_unlock(priv, &flags);
+
+ if (erq->pointer) {
+ if (copy_to_user(erq->pointer, keybuf, xlen))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int orinoco_ioctl_setessid(struct net_device *dev, struct iw_point *erq)
+{
+ struct orinoco_private *priv = dev->priv;
+ char essidbuf[IW_ESSID_MAX_SIZE+1];
+ int err;
+ unsigned long flags;
+
+ /* Note : ESSID is ignored in Ad-Hoc demo mode, but we can set it
+ * anyway... - Jean II */
+
+ memset(&essidbuf, 0, sizeof(essidbuf));
+
+ if (erq->flags) {
+ if (erq->length > IW_ESSID_MAX_SIZE)
+ return -E2BIG;
+
+ if (copy_from_user(&essidbuf, erq->pointer, erq->length))
+ return -EFAULT;
+
+ essidbuf[erq->length] = '\0';
+ }
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ memcpy(priv->desired_essid, essidbuf, sizeof(priv->desired_essid));
+
+ orinoco_unlock(priv, &flags);
+
+ return 0;
+}
+
+static int orinoco_ioctl_getessid(struct net_device *dev, struct iw_point *erq)
+{
+ struct orinoco_private *priv = dev->priv;
+ char essidbuf[IW_ESSID_MAX_SIZE+1];
+ int active;
+ int err = 0;
+ unsigned long flags;
+
+ TRACE_ENTER(dev->name);
+
+ if (netif_running(dev)) {
+ err = orinoco_hw_get_essid(priv, &active, essidbuf);
+ if (err)
+ return err;
+ } else {
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+ memcpy(essidbuf, priv->desired_essid, sizeof(essidbuf));
+ orinoco_unlock(priv, &flags);
+ }
+
+ erq->flags = 1;
+ erq->length = strlen(essidbuf) + 1;
+ if (erq->pointer)
+ if (copy_to_user(erq->pointer, essidbuf, erq->length))
+ return -EFAULT;
+
+ TRACE_EXIT(dev->name);
+
+ return 0;
+}
+
+static int orinoco_ioctl_setnick(struct net_device *dev, struct iw_point *nrq)
+{
+ struct orinoco_private *priv = dev->priv;
+ char nickbuf[IW_ESSID_MAX_SIZE+1];
+ int err;
+ unsigned long flags;
+
+ if (nrq->length > IW_ESSID_MAX_SIZE)
+ return -E2BIG;
+
+ memset(nickbuf, 0, sizeof(nickbuf));
+
+ if (copy_from_user(nickbuf, nrq->pointer, nrq->length))
+ return -EFAULT;
+
+ nickbuf[nrq->length] = '\0';
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ memcpy(priv->nick, nickbuf, sizeof(priv->nick));
+
+ orinoco_unlock(priv, &flags);
+
+ return 0;
+}
+
+static int orinoco_ioctl_getnick(struct net_device *dev, struct iw_point *nrq)
+{
+ struct orinoco_private *priv = dev->priv;
+ char nickbuf[IW_ESSID_MAX_SIZE+1];
+ int err;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ memcpy(nickbuf, priv->nick, IW_ESSID_MAX_SIZE+1);
+ orinoco_unlock(priv, &flags);
+
+ nrq->length = strlen(nickbuf)+1;
+
+#ifdef MACH
+ if(! nrq->pointer) {
+ printk(KERN_INFO "orinoco_ioctl_getnick: no nrq pointer.\n");
+ return -EFAULT;
+ }
+#endif
+
+ if (copy_to_user(nrq->pointer, nickbuf, sizeof(nickbuf)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int orinoco_ioctl_setfreq(struct net_device *dev, struct iw_freq *frq)
+{
+ struct orinoco_private *priv = dev->priv;
+ int chan = -1;
+ int err;
+ unsigned long flags;
+
+ /* We can only use this in Ad-Hoc demo mode to set the operating
+ * frequency, or in IBSS mode to set the frequency where the IBSS
+ * will be created - Jean II */
+ if (priv->iw_mode != IW_MODE_ADHOC)
+ return -EOPNOTSUPP;
+
+ if ( (frq->e == 0) && (frq->m <= 1000) ) {
+ /* Setting by channel number */
+ chan = frq->m;
+ } else {
+ /* Setting by frequency - search the table */
+ int mult = 1;
+ int i;
+
+ for (i = 0; i < (6 - frq->e); i++)
+ mult *= 10;
+
+ for (i = 0; i < NUM_CHANNELS; i++)
+ if (frq->m == (channel_frequency[i] * mult))
+ chan = i+1;
+ }
+
+ if ( (chan < 1) || (chan > NUM_CHANNELS) ||
+ ! (priv->channel_mask & (1 << (chan-1)) ) )
+ return -EINVAL;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+ priv->channel = chan;
+ orinoco_unlock(priv, &flags);
+
+ return 0;
+}
+
+static int orinoco_ioctl_getsens(struct net_device *dev, struct iw_param *srq)
+{
+ struct orinoco_private *priv = dev->priv;
+ hermes_t *hw = &priv->hw;
+ u16 val;
+ int err;
+ unsigned long flags;
+
+ if (!priv->has_sensitivity)
+ return -EOPNOTSUPP;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFSYSTEMSCALE, &val);
+ orinoco_unlock(priv, &flags);
+
+ if (err)
+ return err;
+
+ srq->value = val;
+ srq->fixed = 0; /* auto */
+
+ return 0;
+}
+
+static int orinoco_ioctl_setsens(struct net_device *dev, struct iw_param *srq)
+{
+ struct orinoco_private *priv = dev->priv;
+ int val = srq->value;
+ int err;
+ unsigned long flags;
+
+ if (!priv->has_sensitivity)
+ return -EOPNOTSUPP;
+
+ if ((val < 1) || (val > 3))
+ return -EINVAL;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+ priv->ap_density = val;
+ orinoco_unlock(priv, &flags);
+
+ return 0;
+}
+
+static int orinoco_ioctl_setrts(struct net_device *dev, struct iw_param *rrq)
+{
+ struct orinoco_private *priv = dev->priv;
+ int val = rrq->value;
+ int err;
+ unsigned long flags;
+
+ if (rrq->disabled)
+ val = 2347;
+
+ if ( (val < 0) || (val > 2347) )
+ return -EINVAL;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ priv->rts_thresh = val;
+ orinoco_unlock(priv, &flags);
+
+ return 0;
+}
+
+static int orinoco_ioctl_setfrag(struct net_device *dev, struct iw_param *frq)
+{
+ struct orinoco_private *priv = dev->priv;
+ int err = 0;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ if (priv->has_mwo) {
+ if (frq->disabled)
+ priv->mwo_robust = 0;
+ else {
+ if (frq->fixed)
+ printk(KERN_WARNING "%s: Fixed fragmentation not \
+supported on this firmware. Using MWO robust instead.\n", dev->name);
+ priv->mwo_robust = 1;
+ }
+ } else {
+ if (frq->disabled)
+ priv->frag_thresh = 2346;
+ else {
+ if ( (frq->value < 256) || (frq->value > 2346) )
+ err = -EINVAL;
+ else
+ priv->frag_thresh = frq->value & ~0x1; /* must be even */
+ }
+ }
+
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+static int orinoco_ioctl_getfrag(struct net_device *dev, struct iw_param *frq)
+{
+ struct orinoco_private *priv = dev->priv;
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ u16 val;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ if (priv->has_mwo) {
+ err = hermes_read_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFMWOROBUST_AGERE,
+ &val);
+ if (err)
+ val = 0;
+
+ frq->value = val ? 2347 : 0;
+ frq->disabled = ! val;
+ frq->fixed = 0;
+ } else {
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFFRAGMENTATIONTHRESHOLD,
+ &val);
+ if (err)
+ val = 0;
+
+ frq->value = val;
+ frq->disabled = (val >= 2346);
+ frq->fixed = 1;
+ }
+
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+static int orinoco_ioctl_setrate(struct net_device *dev, struct iw_param *rrq)
+{
+ struct orinoco_private *priv = dev->priv;
+ int err = 0;
+ int ratemode = -1;
+ int bitrate; /* 100s of kilobits */
+ int i;
+ unsigned long flags;
+
+ /* As the user space doesn't know our highest rate, it uses -1
+ * to ask us to set the highest rate. Test it using "iwconfig
+ * ethX rate auto" - Jean II */
+ if (rrq->value == -1)
+ bitrate = 110;
+ else {
+ if (rrq->value % 100000)
+ return -EINVAL;
+ bitrate = rrq->value / 100000;
+ }
+
+ if ( (bitrate != 10) && (bitrate != 20) &&
+ (bitrate != 55) && (bitrate != 110) )
+ return -EINVAL;
+
+ for (i = 0; i < BITRATE_TABLE_SIZE; i++)
+ if ( (bitrate_table[i].bitrate == bitrate) &&
+ (bitrate_table[i].automatic == ! rrq->fixed) ) {
+ ratemode = i;
+ break;
+ }
+
+ if (ratemode == -1)
+ return -EINVAL;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+ priv->bitratemode = ratemode;
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+static int orinoco_ioctl_getrate(struct net_device *dev, struct iw_param *rrq)
+{
+ struct orinoco_private *priv = dev->priv;
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ int ratemode;
+ int i;
+ u16 val;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ ratemode = priv->bitratemode;
+
+ if ( (ratemode < 0) || (ratemode >= BITRATE_TABLE_SIZE) )
+ BUG();
+
+ rrq->value = bitrate_table[ratemode].bitrate * 100000;
+ rrq->fixed = ! bitrate_table[ratemode].automatic;
+ rrq->disabled = 0;
+
+ /* If the interface is running we try to find more about the
+ current mode */
+ if (netif_running(dev)) {
+ err = hermes_read_wordrec(hw, USER_BAP,
+ HERMES_RID_CURRENTTXRATE, &val);
+ if (err)
+ goto out;
+
+ switch (priv->firmware_type) {
+ case FIRMWARE_TYPE_AGERE: /* Lucent style rate */
+ /* Note : in Lucent firmware, the return value of
+ * HERMES_RID_CURRENTTXRATE is the bitrate in Mb/s,
+ * and therefore is totally different from the
+ * encoding of HERMES_RID_CNFTXRATECONTROL.
+ * Don't forget that 6Mb/s is really 5.5Mb/s */
+ if (val == 6)
+ rrq->value = 5500000;
+ else
+ rrq->value = val * 1000000;
+ break;
+ case FIRMWARE_TYPE_INTERSIL: /* Intersil style rate */
+ case FIRMWARE_TYPE_SYMBOL: /* Symbol style rate */
+ for (i = 0; i < BITRATE_TABLE_SIZE; i++)
+ if (bitrate_table[i].intersil_txratectrl == val) {
+ ratemode = i;
+ break;
+ }
+ if (i >= BITRATE_TABLE_SIZE)
+ printk(KERN_INFO "%s: Unable to determine current bitrate (0x%04hx)\n",
+ dev->name, val);
+
+ rrq->value = bitrate_table[ratemode].bitrate * 100000;
+ break;
+ default:
+ BUG();
+ }
+ }
+
+ out:
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+static int orinoco_ioctl_setpower(struct net_device *dev, struct iw_param *prq)
+{
+ struct orinoco_private *priv = dev->priv;
+ int err = 0;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ if (prq->disabled) {
+ priv->pm_on = 0;
+ } else {
+ switch (prq->flags & IW_POWER_MODE) {
+ case IW_POWER_UNICAST_R:
+ priv->pm_mcast = 0;
+ priv->pm_on = 1;
+ break;
+ case IW_POWER_ALL_R:
+ priv->pm_mcast = 1;
+ priv->pm_on = 1;
+ break;
+ case IW_POWER_ON:
+ /* No flags : but we may have a value - Jean II */
+ break;
+ default:
+ err = -EINVAL;
+ }
+ if (err)
+ goto out;
+
+ if (prq->flags & IW_POWER_TIMEOUT) {
+ priv->pm_on = 1;
+ priv->pm_timeout = prq->value / 1000;
+ }
+ if (prq->flags & IW_POWER_PERIOD) {
+ priv->pm_on = 1;
+ priv->pm_period = prq->value / 1000;
+ }
+ /* It's valid to not have a value if we are just toggling
+ * the flags... Jean II */
+ if(!priv->pm_on) {
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ out:
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+static int orinoco_ioctl_getpower(struct net_device *dev, struct iw_param *prq)
+{
+ struct orinoco_private *priv = dev->priv;
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ u16 enable, period, timeout, mcast;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFPMENABLED, &enable);
+ if (err)
+ goto out;
+
+ err = hermes_read_wordrec(hw, USER_BAP,
+ HERMES_RID_CNFMAXSLEEPDURATION, &period);
+ if (err)
+ goto out;
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFPMHOLDOVERDURATION, &timeout);
+ if (err)
+ goto out;
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFMULTICASTRECEIVE, &mcast);
+ if (err)
+ goto out;
+
+ prq->disabled = !enable;
+ /* Note : by default, display the period */
+ if ((prq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
+ prq->flags = IW_POWER_TIMEOUT;
+ prq->value = timeout * 1000;
+ } else {
+ prq->flags = IW_POWER_PERIOD;
+ prq->value = period * 1000;
+ }
+ if (mcast)
+ prq->flags |= IW_POWER_ALL_R;
+ else
+ prq->flags |= IW_POWER_UNICAST_R;
+
+ out:
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+#if WIRELESS_EXT > 10
+static int orinoco_ioctl_getretry(struct net_device *dev, struct iw_param *rrq)
+{
+ struct orinoco_private *priv = dev->priv;
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ u16 short_limit, long_limit, lifetime;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_SHORTRETRYLIMIT,
+ &short_limit);
+ if (err)
+ goto out;
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_LONGRETRYLIMIT,
+ &long_limit);
+ if (err)
+ goto out;
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_MAXTRANSMITLIFETIME,
+ &lifetime);
+ if (err)
+ goto out;
+
+ rrq->disabled = 0; /* Can't be disabled */
+
+ /* Note : by default, display the retry number */
+ if ((rrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
+ rrq->flags = IW_RETRY_LIFETIME;
+ rrq->value = lifetime * 1000; /* ??? */
+ } else {
+ /* By default, display the min number */
+ if ((rrq->flags & IW_RETRY_MAX)) {
+ rrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
+ rrq->value = long_limit;
+ } else {
+ rrq->flags = IW_RETRY_LIMIT;
+ rrq->value = short_limit;
+ if(short_limit != long_limit)
+ rrq->flags |= IW_RETRY_MIN;
+ }
+ }
+
+ out:
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+#endif /* WIRELESS_EXT > 10 */
+
+static int orinoco_ioctl_setibssport(struct net_device *dev, struct iwreq *wrq)
+{
+ struct orinoco_private *priv = dev->priv;
+ int val = *( (int *) wrq->u.name );
+ int err;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ priv->ibss_port = val ;
+
+ /* Actually update the mode we are using */
+ set_port_type(priv);
+
+ orinoco_unlock(priv, &flags);
+ return 0;
+}
+
+static int orinoco_ioctl_getibssport(struct net_device *dev, struct iwreq *wrq)
+{
+ struct orinoco_private *priv = dev->priv;
+ int *val = (int *)wrq->u.name;
+ int err;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ *val = priv->ibss_port;
+ orinoco_unlock(priv, &flags);
+
+ return 0;
+}
+
+static int orinoco_ioctl_setport3(struct net_device *dev, struct iwreq *wrq)
+{
+ struct orinoco_private *priv = dev->priv;
+ int val = *( (int *) wrq->u.name );
+ int err = 0;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ switch (val) {
+ case 0: /* Try to do IEEE ad-hoc mode */
+ if (! priv->has_ibss) {
+ err = -EINVAL;
+ break;
+ }
+ priv->prefer_port3 = 0;
+
+ break;
+
+ case 1: /* Try to do Lucent proprietary ad-hoc mode */
+ if (! priv->has_port3) {
+ err = -EINVAL;
+ break;
+ }
+ priv->prefer_port3 = 1;
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ if (! err)
+ /* Actually update the mode we are using */
+ set_port_type(priv);
+
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+static int orinoco_ioctl_getport3(struct net_device *dev, struct iwreq *wrq)
+{
+ struct orinoco_private *priv = dev->priv;
+ int *val = (int *)wrq->u.name;
+ int err;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ *val = priv->prefer_port3;
+ orinoco_unlock(priv, &flags);
+
+ return 0;
+}
+
+/* Spy is used for link quality/strength measurements in Ad-Hoc mode
+ * Jean II */
+static int orinoco_ioctl_setspy(struct net_device *dev, struct iw_point *srq)
+{
+ struct orinoco_private *priv = dev->priv;
+ struct sockaddr address[IW_MAX_SPY];
+ int number = srq->length;
+ int i;
+ int err = 0;
+ unsigned long flags;
+
+ /* Check the number of addresses */
+ if (number > IW_MAX_SPY)
+ return -E2BIG;
+
+ /* Get the data in the driver */
+ if (srq->pointer) {
+ if (copy_from_user(address, srq->pointer,
+ sizeof(struct sockaddr) * number))
+ return -EFAULT;
+ }
+
+ /* Make sure nobody mess with the structure while we do */
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ /* orinoco_lock() doesn't disable interrupts, so make sure the
+ * interrupt rx path don't get confused while we copy */
+ priv->spy_number = 0;
+
+ if (number > 0) {
+ /* Extract the addresses */
+ for (i = 0; i < number; i++)
+ memcpy(priv->spy_address[i], address[i].sa_data,
+ ETH_ALEN);
+ /* Reset stats */
+ memset(priv->spy_stat, 0,
+ sizeof(struct iw_quality) * IW_MAX_SPY);
+ /* Set number of addresses */
+ priv->spy_number = number;
+ }
+
+ /* Now, let the others play */
+ orinoco_unlock(priv, &flags);
+
+ return err;
+}
+
+static int orinoco_ioctl_getspy(struct net_device *dev, struct iw_point *srq)
+{
+ struct orinoco_private *priv = dev->priv;
+ struct sockaddr address[IW_MAX_SPY];
+ struct iw_quality spy_stat[IW_MAX_SPY];
+ int number;
+ int i;
+ int err;
+ unsigned long flags;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+
+ number = priv->spy_number;
+ if ((number > 0) && (srq->pointer)) {
+ /* Create address struct */
+ for (i = 0; i < number; i++) {
+ memcpy(address[i].sa_data, priv->spy_address[i],
+ ETH_ALEN);
+ address[i].sa_family = AF_UNIX;
+ }
+ /* Copy stats */
+ /* In theory, we should disable irqs while copying the stats
+ * because the rx path migh update it in the middle...
+ * Bah, who care ? - Jean II */
+ memcpy(&spy_stat, priv->spy_stat,
+ sizeof(struct iw_quality) * IW_MAX_SPY);
+ for (i=0; i < number; i++)
+ priv->spy_stat[i].updated = 0;
+ }
+
+ orinoco_unlock(priv, &flags);
+
+ /* Push stuff to user space */
+ srq->length = number;
+ if(copy_to_user(srq->pointer, address,
+ sizeof(struct sockaddr) * number))
+ return -EFAULT;
+ if(copy_to_user(srq->pointer + (sizeof(struct sockaddr)*number),
+ &spy_stat, sizeof(struct iw_quality) * number))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int
+orinoco_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct orinoco_private *priv = dev->priv;
+ struct iwreq *wrq = (struct iwreq *)rq;
+ int err = 0;
+ int tmp;
+ int changed = 0;
+ unsigned long flags;
+
+ TRACE_ENTER(dev->name);
+
+ /* In theory, we could allow most of the the SET stuff to be
+ * done. In practice, the lapse of time at startup when the
+ * card is not ready is very short, so why bother... Note
+ * that netif_device_present is different from up/down
+ * (ifconfig), when the device is not yet up, it is usually
+ * already ready... Jean II */
+ if (! netif_device_present(dev))
+ return -ENODEV;
+
+ switch (cmd) {
+ case SIOCGIWNAME:
+ strcpy(wrq->u.name, "IEEE 802.11-DS");
+ break;
+
+ case SIOCGIWAP:
+ wrq->u.ap_addr.sa_family = ARPHRD_ETHER;
+ err = orinoco_hw_get_bssid(priv, wrq->u.ap_addr.sa_data);
+ break;
+
+ case SIOCGIWRANGE:
+ err = orinoco_ioctl_getiwrange(dev, &wrq->u.data);
+ break;
+
+ case SIOCSIWMODE:
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+ switch (wrq->u.mode) {
+ case IW_MODE_ADHOC:
+ if (! (priv->has_ibss || priv->has_port3) )
+ err = -EINVAL;
+ else {
+ priv->iw_mode = IW_MODE_ADHOC;
+ changed = 1;
+ }
+ break;
+
+ case IW_MODE_INFRA:
+ priv->iw_mode = IW_MODE_INFRA;
+ changed = 1;
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+ set_port_type(priv);
+ orinoco_unlock(priv, &flags);
+ break;
+
+ case SIOCGIWMODE:
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+ wrq->u.mode = priv->iw_mode;
+ orinoco_unlock(priv, &flags);
+ break;
+
+ case SIOCSIWENCODE:
+ if (! priv->has_wep) {
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ err = orinoco_ioctl_setiwencode(dev, &wrq->u.encoding);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCGIWENCODE:
+ if (! priv->has_wep) {
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ if (! capable(CAP_NET_ADMIN)) {
+ err = -EPERM;
+ break;
+ }
+
+ err = orinoco_ioctl_getiwencode(dev, &wrq->u.encoding);
+ break;
+
+ case SIOCSIWESSID:
+ err = orinoco_ioctl_setessid(dev, &wrq->u.essid);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCGIWESSID:
+ err = orinoco_ioctl_getessid(dev, &wrq->u.essid);
+ break;
+
+ case SIOCSIWNICKN:
+ err = orinoco_ioctl_setnick(dev, &wrq->u.data);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCGIWNICKN:
+ err = orinoco_ioctl_getnick(dev, &wrq->u.data);
+ break;
+
+ case SIOCGIWFREQ:
+ tmp = orinoco_hw_get_freq(priv);
+ if (tmp < 0) {
+ err = tmp;
+ } else {
+ wrq->u.freq.m = tmp;
+ wrq->u.freq.e = 1;
+ }
+ break;
+
+ case SIOCSIWFREQ:
+ err = orinoco_ioctl_setfreq(dev, &wrq->u.freq);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCGIWSENS:
+ err = orinoco_ioctl_getsens(dev, &wrq->u.sens);
+ break;
+
+ case SIOCSIWSENS:
+ err = orinoco_ioctl_setsens(dev, &wrq->u.sens);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCGIWRTS:
+ wrq->u.rts.value = priv->rts_thresh;
+ wrq->u.rts.disabled = (wrq->u.rts.value == 2347);
+ wrq->u.rts.fixed = 1;
+ break;
+
+ case SIOCSIWRTS:
+ err = orinoco_ioctl_setrts(dev, &wrq->u.rts);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCSIWFRAG:
+ err = orinoco_ioctl_setfrag(dev, &wrq->u.frag);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCGIWFRAG:
+ err = orinoco_ioctl_getfrag(dev, &wrq->u.frag);
+ break;
+
+ case SIOCSIWRATE:
+ err = orinoco_ioctl_setrate(dev, &wrq->u.bitrate);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCGIWRATE:
+ err = orinoco_ioctl_getrate(dev, &wrq->u.bitrate);
+ break;
+
+ case SIOCSIWPOWER:
+ err = orinoco_ioctl_setpower(dev, &wrq->u.power);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCGIWPOWER:
+ err = orinoco_ioctl_getpower(dev, &wrq->u.power);
+ break;
+
+ case SIOCGIWTXPOW:
+ /* The card only supports one tx power, so this is easy */
+ wrq->u.txpower.value = 15; /* dBm */
+ wrq->u.txpower.fixed = 1;
+ wrq->u.txpower.disabled = 0;
+ wrq->u.txpower.flags = IW_TXPOW_DBM;
+ break;
+
+#if WIRELESS_EXT > 10
+ case SIOCSIWRETRY:
+ err = -EOPNOTSUPP;
+ break;
+
+ case SIOCGIWRETRY:
+ err = orinoco_ioctl_getretry(dev, &wrq->u.retry);
+ break;
+#endif /* WIRELESS_EXT > 10 */
+
+ case SIOCSIWSPY:
+ err = orinoco_ioctl_setspy(dev, &wrq->u.data);
+ break;
+
+ case SIOCGIWSPY:
+ err = orinoco_ioctl_getspy(dev, &wrq->u.data);
+ break;
+
+ case SIOCGIWPRIV:
+ if (wrq->u.data.pointer) {
+ struct iw_priv_args privtab[] = {
+ { SIOCIWFIRSTPRIV + 0x0, 0, 0, "force_reset" },
+ { SIOCIWFIRSTPRIV + 0x1, 0, 0, "card_reset" },
+ { SIOCIWFIRSTPRIV + 0x2,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ 0, "set_port3" },
+ { SIOCIWFIRSTPRIV + 0x3, 0,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ "get_port3" },
+ { SIOCIWFIRSTPRIV + 0x4,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ 0, "set_preamble" },
+ { SIOCIWFIRSTPRIV + 0x5, 0,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ "get_preamble" },
+ { SIOCIWFIRSTPRIV + 0x6,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ 0, "set_ibssport" },
+ { SIOCIWFIRSTPRIV + 0x7, 0,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ "get_ibssport" },
+ { SIOCIWLASTPRIV, 0, 0, "dump_recs" },
+ };
+
+ err = verify_area(VERIFY_WRITE, wrq->u.data.pointer, sizeof(privtab));
+ if (err)
+ break;
+
+ wrq->u.data.length = sizeof(privtab) / sizeof(privtab[0]);
+ if (copy_to_user(wrq->u.data.pointer, privtab, sizeof(privtab)))
+ err = -EFAULT;
+ }
+ break;
+
+ case SIOCIWFIRSTPRIV + 0x0: /* force_reset */
+ case SIOCIWFIRSTPRIV + 0x1: /* card_reset */
+ if (! capable(CAP_NET_ADMIN)) {
+ err = -EPERM;
+ break;
+ }
+
+ printk(KERN_DEBUG "%s: Force scheduling reset!\n", dev->name);
+
+ schedule_work(&priv->reset_work);
+ break;
+
+ case SIOCIWFIRSTPRIV + 0x2: /* set_port3 */
+ if (! capable(CAP_NET_ADMIN)) {
+ err = -EPERM;
+ break;
+ }
+
+ err = orinoco_ioctl_setport3(dev, wrq);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCIWFIRSTPRIV + 0x3: /* get_port3 */
+ err = orinoco_ioctl_getport3(dev, wrq);
+ break;
+
+ case SIOCIWFIRSTPRIV + 0x4: /* set_preamble */
+ if (! capable(CAP_NET_ADMIN)) {
+ err = -EPERM;
+ break;
+ }
+
+ /* 802.11b has recently defined some short preamble.
+ * Basically, the Phy header has been reduced in size.
+ * This increase performance, especially at high rates
+ * (the preamble is transmitted at 1Mb/s), unfortunately
+ * this give compatibility troubles... - Jean II */
+ if(priv->has_preamble) {
+ int val = *( (int *) wrq->u.name );
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+ if (val)
+ priv->preamble = 1;
+ else
+ priv->preamble = 0;
+ orinoco_unlock(priv, &flags);
+ changed = 1;
+ } else
+ err = -EOPNOTSUPP;
+ break;
+
+ case SIOCIWFIRSTPRIV + 0x5: /* get_preamble */
+ if(priv->has_preamble) {
+ int *val = (int *)wrq->u.name;
+
+ err = orinoco_lock(priv, &flags);
+ if (err)
+ return err;
+ *val = priv->preamble;
+ orinoco_unlock(priv, &flags);
+ } else
+ err = -EOPNOTSUPP;
+ break;
+ case SIOCIWFIRSTPRIV + 0x6: /* set_ibssport */
+ if (! capable(CAP_NET_ADMIN)) {
+ err = -EPERM;
+ break;
+ }
+
+ err = orinoco_ioctl_setibssport(dev, wrq);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCIWFIRSTPRIV + 0x7: /* get_ibssport */
+ err = orinoco_ioctl_getibssport(dev, wrq);
+ break;
+
+ case SIOCIWLASTPRIV:
+ err = orinoco_debug_dump_recs(dev);
+ if (err)
+ printk(KERN_ERR "%s: Unable to dump records (%d)\n",
+ dev->name, err);
+ break;
+
+
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ if (! err && changed && netif_running(dev)) {
+ err = orinoco_reconfigure(dev);
+ }
+
+ TRACE_EXIT(dev->name);
+
+ return err;
+}
+
+struct {
+ u16 rid;
+ char *name;
+ int displaytype;
+#define DISPLAY_WORDS 0
+#define DISPLAY_BYTES 1
+#define DISPLAY_STRING 2
+#define DISPLAY_XSTRING 3
+} record_table[] = {
+#define DEBUG_REC(name,type) { HERMES_RID_##name, #name, DISPLAY_##type }
+ DEBUG_REC(CNFPORTTYPE,WORDS),
+ DEBUG_REC(CNFOWNMACADDR,BYTES),
+ DEBUG_REC(CNFDESIREDSSID,STRING),
+ DEBUG_REC(CNFOWNCHANNEL,WORDS),
+ DEBUG_REC(CNFOWNSSID,STRING),
+ DEBUG_REC(CNFOWNATIMWINDOW,WORDS),
+ DEBUG_REC(CNFSYSTEMSCALE,WORDS),
+ DEBUG_REC(CNFMAXDATALEN,WORDS),
+ DEBUG_REC(CNFPMENABLED,WORDS),
+ DEBUG_REC(CNFPMEPS,WORDS),
+ DEBUG_REC(CNFMULTICASTRECEIVE,WORDS),
+ DEBUG_REC(CNFMAXSLEEPDURATION,WORDS),
+ DEBUG_REC(CNFPMHOLDOVERDURATION,WORDS),
+ DEBUG_REC(CNFOWNNAME,STRING),
+ DEBUG_REC(CNFOWNDTIMPERIOD,WORDS),
+ DEBUG_REC(CNFMULTICASTPMBUFFERING,WORDS),
+ DEBUG_REC(CNFWEPENABLED_AGERE,WORDS),
+ DEBUG_REC(CNFMANDATORYBSSID_SYMBOL,WORDS),
+ DEBUG_REC(CNFWEPDEFAULTKEYID,WORDS),
+ DEBUG_REC(CNFDEFAULTKEY0,BYTES),
+ DEBUG_REC(CNFDEFAULTKEY1,BYTES),
+ DEBUG_REC(CNFMWOROBUST_AGERE,WORDS),
+ DEBUG_REC(CNFDEFAULTKEY2,BYTES),
+ DEBUG_REC(CNFDEFAULTKEY3,BYTES),
+ DEBUG_REC(CNFWEPFLAGS_INTERSIL,WORDS),
+ DEBUG_REC(CNFWEPKEYMAPPINGTABLE,WORDS),
+ DEBUG_REC(CNFAUTHENTICATION,WORDS),
+ DEBUG_REC(CNFMAXASSOCSTA,WORDS),
+ DEBUG_REC(CNFKEYLENGTH_SYMBOL,WORDS),
+ DEBUG_REC(CNFTXCONTROL,WORDS),
+ DEBUG_REC(CNFROAMINGMODE,WORDS),
+ DEBUG_REC(CNFHOSTAUTHENTICATION,WORDS),
+ DEBUG_REC(CNFRCVCRCERROR,WORDS),
+ DEBUG_REC(CNFMMLIFE,WORDS),
+ DEBUG_REC(CNFALTRETRYCOUNT,WORDS),
+ DEBUG_REC(CNFBEACONINT,WORDS),
+ DEBUG_REC(CNFAPPCFINFO,WORDS),
+ DEBUG_REC(CNFSTAPCFINFO,WORDS),
+ DEBUG_REC(CNFPRIORITYQUSAGE,WORDS),
+ DEBUG_REC(CNFTIMCTRL,WORDS),
+ DEBUG_REC(CNFTHIRTY2TALLY,WORDS),
+ DEBUG_REC(CNFENHSECURITY,WORDS),
+ DEBUG_REC(CNFGROUPADDRESSES,BYTES),
+ DEBUG_REC(CNFCREATEIBSS,WORDS),
+ DEBUG_REC(CNFFRAGMENTATIONTHRESHOLD,WORDS),
+ DEBUG_REC(CNFRTSTHRESHOLD,WORDS),
+ DEBUG_REC(CNFTXRATECONTROL,WORDS),
+ DEBUG_REC(CNFPROMISCUOUSMODE,WORDS),
+ DEBUG_REC(CNFBASICRATES_SYMBOL,WORDS),
+ DEBUG_REC(CNFPREAMBLE_SYMBOL,WORDS),
+ DEBUG_REC(CNFSHORTPREAMBLE,WORDS),
+ DEBUG_REC(CNFWEPKEYS_AGERE,BYTES),
+ DEBUG_REC(CNFEXCLUDELONGPREAMBLE,WORDS),
+ DEBUG_REC(CNFTXKEY_AGERE,WORDS),
+ DEBUG_REC(CNFAUTHENTICATIONRSPTO,WORDS),
+ DEBUG_REC(CNFBASICRATES,WORDS),
+ DEBUG_REC(CNFSUPPORTEDRATES,WORDS),
+ DEBUG_REC(CNFTICKTIME,WORDS),
+ DEBUG_REC(CNFSCANREQUEST,WORDS),
+ DEBUG_REC(CNFJOINREQUEST,WORDS),
+ DEBUG_REC(CNFAUTHENTICATESTATION,WORDS),
+ DEBUG_REC(CNFCHANNELINFOREQUEST,WORDS),
+ DEBUG_REC(MAXLOADTIME,WORDS),
+ DEBUG_REC(DOWNLOADBUFFER,WORDS),
+ DEBUG_REC(PRIID,WORDS),
+ DEBUG_REC(PRISUPRANGE,WORDS),
+ DEBUG_REC(CFIACTRANGES,WORDS),
+ DEBUG_REC(NICSERNUM,XSTRING),
+ DEBUG_REC(NICID,WORDS),
+ DEBUG_REC(MFISUPRANGE,WORDS),
+ DEBUG_REC(CFISUPRANGE,WORDS),
+ DEBUG_REC(CHANNELLIST,WORDS),
+ DEBUG_REC(REGULATORYDOMAINS,WORDS),
+ DEBUG_REC(TEMPTYPE,WORDS),
+/* DEBUG_REC(CIS,BYTES), */
+ DEBUG_REC(STAID,WORDS),
+ DEBUG_REC(CURRENTSSID,STRING),
+ DEBUG_REC(CURRENTBSSID,BYTES),
+ DEBUG_REC(COMMSQUALITY,WORDS),
+ DEBUG_REC(CURRENTTXRATE,WORDS),
+ DEBUG_REC(CURRENTBEACONINTERVAL,WORDS),
+ DEBUG_REC(CURRENTSCALETHRESHOLDS,WORDS),
+ DEBUG_REC(PROTOCOLRSPTIME,WORDS),
+ DEBUG_REC(SHORTRETRYLIMIT,WORDS),
+ DEBUG_REC(LONGRETRYLIMIT,WORDS),
+ DEBUG_REC(MAXTRANSMITLIFETIME,WORDS),
+ DEBUG_REC(MAXRECEIVELIFETIME,WORDS),
+ DEBUG_REC(CFPOLLABLE,WORDS),
+ DEBUG_REC(AUTHENTICATIONALGORITHMS,WORDS),
+ DEBUG_REC(PRIVACYOPTIONIMPLEMENTED,WORDS),
+ DEBUG_REC(OWNMACADDR,BYTES),
+ DEBUG_REC(SCANRESULTSTABLE,WORDS),
+ DEBUG_REC(PHYTYPE,WORDS),
+ DEBUG_REC(CURRENTCHANNEL,WORDS),
+ DEBUG_REC(CURRENTPOWERSTATE,WORDS),
+ DEBUG_REC(CCAMODE,WORDS),
+ DEBUG_REC(SUPPORTEDDATARATES,WORDS),
+ DEBUG_REC(BUILDSEQ,BYTES),
+ DEBUG_REC(FWID,XSTRING)
+#undef DEBUG_REC
+};
+
+#define DEBUG_LTV_SIZE 128
+
+static int orinoco_debug_dump_recs(struct net_device *dev)
+{
+ struct orinoco_private *priv = dev->priv;
+ hermes_t *hw = &priv->hw;
+ u8 *val8;
+ u16 *val16;
+ int i,j;
+ u16 length;
+ int err;
+
+ /* I'm not sure: we might have a lock here, so we'd better go
+ atomic, just in case. */
+ val8 = kmalloc(DEBUG_LTV_SIZE + 2, GFP_ATOMIC);
+ if (! val8)
+ return -ENOMEM;
+ val16 = (u16 *)val8;
+
+ for (i = 0; i < ARRAY_SIZE(record_table); i++) {
+ u16 rid = record_table[i].rid;
+ int len;
+
+ memset(val8, 0, DEBUG_LTV_SIZE + 2);
+
+ err = hermes_read_ltv(hw, USER_BAP, rid, DEBUG_LTV_SIZE,
+ &length, val8);
+ if (err) {
+ DEBUG(0, "Error %d reading RID 0x%04x\n", err, rid);
+ continue;
+ }
+ val16 = (u16 *)val8;
+ if (length == 0)
+ continue;
+
+ printk(KERN_DEBUG "%-15s (0x%04x): length=%d (%d bytes)\tvalue=",
+ record_table[i].name,
+ rid, length, (length-1)*2);
+ len = min(((int)length-1)*2, DEBUG_LTV_SIZE);
+
+ switch (record_table[i].displaytype) {
+ case DISPLAY_WORDS:
+ for (j = 0; j < len / 2; j++)
+ printk("%04X-", le16_to_cpu(val16[j]));
+ break;
+
+ case DISPLAY_BYTES:
+ default:
+ for (j = 0; j < len; j++)
+ printk("%02X:", val8[j]);
+ break;
+
+ case DISPLAY_STRING:
+ len = min(len, le16_to_cpu(val16[0])+2);
+ val8[len] = '\0';
+ printk("\"%s\"", (char *)&val16[1]);
+ break;
+
+ case DISPLAY_XSTRING:
+ printk("'%s'", (char *)val8);
+ }
+
+ printk("\n");
+ }
+
+ kfree(val8);
+
+ return 0;
+}
+
+struct net_device *alloc_orinocodev(int sizeof_card, int (*hard_reset)(struct orinoco_private *))
+{
+ struct net_device *dev;
+ struct orinoco_private *priv;
+
+ dev = alloc_etherdev(sizeof(struct orinoco_private) + sizeof_card);
+ priv = (struct orinoco_private *)dev->priv;
+ priv->ndev = dev;
+ if (sizeof_card)
+ priv->card = (void *)((unsigned long)dev->priv + sizeof(struct orinoco_private));
+ else
+ priv->card = NULL;
+
+ /* Setup / override net_device fields */
+ dev->init = orinoco_init;
+ dev->hard_start_xmit = orinoco_xmit;
+#ifdef HAVE_TX_TIMEOUT
+ dev->tx_timeout = orinoco_tx_timeout;
+ dev->watchdog_timeo = HZ; /* 1 second timeout */
+#endif
+ dev->get_stats = orinoco_get_stats;
+ dev->get_wireless_stats = orinoco_get_wireless_stats;
+ dev->do_ioctl = orinoco_ioctl;
+ dev->change_mtu = orinoco_change_mtu;
+ dev->set_multicast_list = orinoco_set_multicast_list;
+ /* we use the default eth_mac_addr for setting the MAC addr */
+
+ /* Set up default callbacks */
+ dev->open = orinoco_open;
+ dev->stop = orinoco_stop;
+ priv->hard_reset = hard_reset;
+
+ spin_lock_init(&priv->lock);
+ priv->open = 0;
+ priv->hw_unavailable = 1; /* orinoco_init() must clear this
+ * before anything else touches the
+ * hardware */
+ INIT_WORK(&priv->reset_work, (void (*)(void *))orinoco_reset, dev);
+
+ priv->last_linkstatus = 0xffff;
+ priv->connected = 0;
+
+ return dev;
+
+}
+
+/********************************************************************/
+/* Module initialization */
+/********************************************************************/
+
+EXPORT_SYMBOL(alloc_orinocodev);
+
+EXPORT_SYMBOL(__orinoco_up);
+EXPORT_SYMBOL(__orinoco_down);
+EXPORT_SYMBOL(orinoco_stop);
+EXPORT_SYMBOL(orinoco_reinit_firmware);
+
+EXPORT_SYMBOL(orinoco_interrupt);
+
+/* Can't be declared "const" or the whole __initdata section will
+ * become const */
+static char version[] __initdata = "orinoco.c 0.13e (David Gibson <hermes@gibson.dropbear.id.au> and others)";
+
+static int __init init_orinoco(void)
+{
+ printk(KERN_DEBUG "%s\n", version);
+ return 0;
+}
+
+static void __exit exit_orinoco(void)
+{
+}
+
+module_init(init_orinoco);
+module_exit(exit_orinoco);
diff --git a/linux/pcmcia-cs/wireless/orinoco.h b/linux/pcmcia-cs/wireless/orinoco.h
new file mode 100644
index 0000000..6eb9e85
--- /dev/null
+++ b/linux/pcmcia-cs/wireless/orinoco.h
@@ -0,0 +1,166 @@
+/* orinoco.h
+ *
+ * Common definitions to all pieces of the various orinoco
+ * drivers
+ */
+
+#ifndef _ORINOCO_H
+#define _ORINOCO_H
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <linux/version.h>
+#include "hermes.h"
+
+/* Workqueue / task queue backwards compatibility stuff */
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41)
+#include <linux/workqueue.h>
+#else
+#include <linux/tqueue.h>
+#define work_struct tq_struct
+#define INIT_WORK INIT_TQUEUE
+#define schedule_work schedule_task
+#endif
+
+/* Interrupt handler backwards compatibility stuff */
+#ifndef IRQ_NONE
+
+#define IRQ_NONE
+#define IRQ_HANDLED
+typedef void irqreturn_t;
+
+#endif
+
+/* To enable debug messages */
+//#define ORINOCO_DEBUG 3
+
+#if (! defined (WIRELESS_EXT)) || (WIRELESS_EXT < 10)
+#error "orinoco driver requires Wireless extensions v10 or later."
+#endif /* (! defined (WIRELESS_EXT)) || (WIRELESS_EXT < 10) */
+#define WIRELESS_SPY // enable iwspy support
+
+#define ORINOCO_MAX_KEY_SIZE 14
+#define ORINOCO_MAX_KEYS 4
+
+struct orinoco_key {
+ u16 len; /* always stored as little-endian */
+ char data[ORINOCO_MAX_KEY_SIZE];
+} __attribute__ ((packed));
+
+#define ORINOCO_INTEN ( HERMES_EV_RX | HERMES_EV_ALLOC | HERMES_EV_TX | \
+ HERMES_EV_TXEXC | HERMES_EV_WTERR | HERMES_EV_INFO | \
+ HERMES_EV_INFDROP )
+
+
+struct orinoco_private {
+ void *card; /* Pointer to card dependent structure */
+ int (*hard_reset)(struct orinoco_private *);
+
+ /* Synchronisation stuff */
+ spinlock_t lock;
+ int hw_unavailable;
+ struct work_struct reset_work;
+
+ /* driver state */
+ int open;
+ u16 last_linkstatus;
+ int connected;
+
+ /* Net device stuff */
+ struct net_device *ndev;
+ struct net_device_stats stats;
+ struct iw_statistics wstats;
+
+ /* Hardware control variables */
+ hermes_t hw;
+ u16 txfid;
+
+
+ /* Capabilities of the hardware/firmware */
+ int firmware_type;
+#define FIRMWARE_TYPE_AGERE 1
+#define FIRMWARE_TYPE_INTERSIL 2
+#define FIRMWARE_TYPE_SYMBOL 3
+ int has_ibss, has_port3, has_ibss_any, ibss_port;
+ int has_wep, has_big_wep;
+ int has_mwo;
+ int has_pm;
+ int has_preamble;
+ int has_sensitivity;
+ int nicbuf_size;
+ u16 channel_mask;
+ int broken_disableport;
+
+ /* Configuration paramaters */
+ u32 iw_mode;
+ int prefer_port3;
+ u16 wep_on, wep_restrict, tx_key;
+ struct orinoco_key keys[ORINOCO_MAX_KEYS];
+ int bitratemode;
+ char nick[IW_ESSID_MAX_SIZE+1];
+ char desired_essid[IW_ESSID_MAX_SIZE+1];
+ u16 frag_thresh, mwo_robust;
+ u16 channel;
+ u16 ap_density, rts_thresh;
+ u16 pm_on, pm_mcast, pm_period, pm_timeout;
+ u16 preamble;
+#ifdef WIRELESS_SPY
+ int spy_number;
+ u_char spy_address[IW_MAX_SPY][ETH_ALEN];
+ struct iw_quality spy_stat[IW_MAX_SPY];
+#endif
+
+ /* Configuration dependent variables */
+ int port_type, createibss;
+ int promiscuous, mc_count;
+};
+
+#ifdef ORINOCO_DEBUG
+extern int orinoco_debug;
+#define DEBUG(n, args...) do { if (orinoco_debug>(n)) printk(KERN_DEBUG args); } while(0)
+#else
+#define DEBUG(n, args...) do { } while (0)
+#endif /* ORINOCO_DEBUG */
+
+#define TRACE_ENTER(devname) DEBUG(2, "%s: -> " __FUNCTION__ "()\n", devname);
+#define TRACE_EXIT(devname) DEBUG(2, "%s: <- " __FUNCTION__ "()\n", devname);
+
+extern struct net_device *alloc_orinocodev(int sizeof_card,
+ int (*hard_reset)(struct orinoco_private *));
+extern int __orinoco_up(struct net_device *dev);
+extern int __orinoco_down(struct net_device *dev);
+extern int orinoco_stop(struct net_device *dev);
+extern int orinoco_reinit_firmware(struct net_device *dev);
+extern irqreturn_t orinoco_interrupt(int irq, void * dev_id, struct pt_regs *regs);
+
+/********************************************************************/
+/* Locking and synchronization functions */
+/********************************************************************/
+
+/* These functions *must* be inline or they will break horribly on
+ * SPARC, due to its weird semantics for save/restore flags. extern
+ * inline should prevent the kernel from linking or module from
+ * loading if they are not inlined. */
+extern inline int orinoco_lock(struct orinoco_private *priv,
+ unsigned long *flags)
+{
+ spin_lock_irqsave(&priv->lock, *flags);
+ if (priv->hw_unavailable) {
+ printk(KERN_DEBUG "orinoco_lock() called with hw_unavailable (dev=%p)\n",
+ priv->ndev);
+ spin_unlock_irqrestore(&priv->lock, *flags);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+extern inline void orinoco_unlock(struct orinoco_private *priv,
+ unsigned long *flags)
+{
+ spin_unlock_irqrestore(&priv->lock, *flags);
+}
+
+#endif /* _ORINOCO_H */
diff --git a/linux/pcmcia-cs/wireless/orinoco_cs.c b/linux/pcmcia-cs/wireless/orinoco_cs.c
new file mode 100644
index 0000000..a3f6357
--- /dev/null
+++ b/linux/pcmcia-cs/wireless/orinoco_cs.c
@@ -0,0 +1,705 @@
+/* orinoco_cs.c 0.13e - (formerly known as dldwd_cs.c)
+ *
+ * A driver for "Hermes" chipset based PCMCIA wireless adaptors, such
+ * as the Lucent WavelanIEEE/Orinoco cards and their OEM (Cabletron/
+ * EnteraSys RoamAbout 802.11, ELSA Airlancer, Melco Buffalo and others).
+ * It should also be usable on various Prism II based cards such as the
+ * Linksys, D-Link and Farallon Skyline. It should also work on Symbol
+ * cards such as the 3Com AirConnect and Ericsson WLAN.
+ *
+ * Copyright notice & release notes in file orinoco.c
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ds.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include "orinoco.h"
+
+/********************************************************************/
+/* Module stuff */
+/********************************************************************/
+
+MODULE_AUTHOR("David Gibson <hermes@gibson.dropbear.id.au>");
+MODULE_DESCRIPTION("Driver for PCMCIA Lucent Orinoco, Prism II based and similar wireless cards");
+#ifdef MODULE_LICENSE
+MODULE_LICENSE("Dual MPL/GPL");
+#endif
+
+/* Module parameters */
+
+/* The old way: bit map of interrupts to choose from */
+/* This means pick from 15, 14, 12, 11, 10, 9, 7, 5, 4, and 3 */
+static uint irq_mask = 0xdeb8;
+/* Newer, simpler way of listing specific interrupts */
+static int irq_list[4] = { -1 };
+
+/* Some D-Link cards have buggy CIS. They do work at 5v properly, but
+ * don't have any CIS entry for it. This workaround it... */
+static int ignore_cis_vcc; /* = 0 */
+
+MODULE_PARM(irq_mask, "i");
+MODULE_PARM(irq_list, "1-4i");
+MODULE_PARM(ignore_cis_vcc, "i");
+
+/********************************************************************/
+/* Magic constants */
+/********************************************************************/
+
+/*
+ * The dev_info variable is the "key" that is used to match up this
+ * device driver with appropriate cards, through the card
+ * configuration database.
+ */
+static dev_info_t dev_info = "orinoco_cs";
+
+/********************************************************************/
+/* Data structures */
+/********************************************************************/
+
+/* PCMCIA specific device information (goes in the card field of
+ * struct orinoco_private */
+struct orinoco_pccard {
+ dev_link_t link;
+ dev_node_t node;
+
+ /* Used to handle hard reset */
+ /* yuck, we need this hack to work around the insanity of the
+ * PCMCIA layer */
+ unsigned long hard_reset_in_progress;
+};
+
+/*
+ * A linked list of "instances" of the device. Each actual PCMCIA
+ * card corresponds to one device instance, and is described by one
+ * dev_link_t structure (defined in ds.h).
+ */
+static dev_link_t *dev_list; /* = NULL */
+
+/********************************************************************/
+/* Function prototypes */
+/********************************************************************/
+
+/* device methods */
+static int orinoco_cs_hard_reset(struct orinoco_private *priv);
+
+/* PCMCIA gumpf */
+static void orinoco_cs_config(dev_link_t * link);
+static void orinoco_cs_release(u_long arg);
+static int orinoco_cs_event(event_t event, int priority,
+ event_callback_args_t * args);
+
+static dev_link_t *orinoco_cs_attach(void);
+static void orinoco_cs_detach(dev_link_t *);
+
+/********************************************************************/
+/* Device methods */
+/********************************************************************/
+
+static int
+orinoco_cs_hard_reset(struct orinoco_private *priv)
+{
+ struct orinoco_pccard *card = priv->card;
+ dev_link_t *link = &card->link;
+ int err;
+
+ /* We need atomic ops here, because we're not holding the lock */
+ set_bit(0, &card->hard_reset_in_progress);
+
+ err = CardServices(ResetCard, link->handle, NULL);
+ if (err)
+ return err;
+
+ clear_bit(0, &card->hard_reset_in_progress);
+
+ return 0;
+}
+
+/********************************************************************/
+/* PCMCIA stuff */
+/********************************************************************/
+
+/* In 2.5 (as of 2.5.69 at least) there is a cs_error exported which
+ * does this, but it's not in 2.4 so we do our own for now. */
+static void
+orinoco_cs_error(client_handle_t handle, int func, int ret)
+{
+ error_info_t err = { func, ret };
+ CardServices(ReportError, handle, &err);
+}
+
+
+/* Remove zombie instances (card removed, detach pending) */
+static void
+flush_stale_links(void)
+{
+ dev_link_t *link, *next;
+
+ TRACE_ENTER("");
+
+ for (link = dev_list; link; link = next) {
+ next = link->next;
+ if (link->state & DEV_STALE_LINK) {
+ orinoco_cs_detach(link);
+ }
+ }
+ TRACE_EXIT("");
+}
+
+/*
+ * This creates an "instance" of the driver, allocating local data
+ * structures for one device. The device is registered with Card
+ * Services.
+ *
+ * The dev_link structure is initialized, but we don't actually
+ * configure the card at this point -- we wait until we receive a card
+ * insertion event. */
+static dev_link_t *
+orinoco_cs_attach(void)
+{
+ struct net_device *dev;
+ struct orinoco_private *priv;
+ struct orinoco_pccard *card;
+ dev_link_t *link;
+ client_reg_t client_reg;
+ int ret, i;
+
+ /* A bit of cleanup */
+ flush_stale_links();
+
+ dev = alloc_orinocodev(sizeof(*card), orinoco_cs_hard_reset);
+ if (! dev)
+ return NULL;
+ priv = dev->priv;
+ card = priv->card;
+
+ /* Link both structures together */
+ link = &card->link;
+ link->priv = dev;
+
+ /* Initialize the dev_link_t structure */
+ init_timer(&link->release);
+ link->release.function = &orinoco_cs_release;
+ link->release.data = (u_long) link;
+
+ /* Interrupt setup */
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
+ link->irq.IRQInfo1 = IRQ_INFO2_VALID | IRQ_LEVEL_ID;
+ if (irq_list[0] == -1)
+ link->irq.IRQInfo2 = irq_mask;
+ else
+ for (i = 0; i < 4; i++)
+ link->irq.IRQInfo2 |= 1 << irq_list[i];
+ link->irq.Handler = NULL;
+
+ /* General socket configuration defaults can go here. In this
+ * client, we assume very little, and rely on the CIS for
+ * almost everything. In most clients, many details (i.e.,
+ * number, sizes, and attributes of IO windows) are fixed by
+ * the nature of the device, and can be hard-wired here. */
+ link->conf.Attributes = 0;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+
+ /* Register with Card Services */
+ /* FIXME: need a lock? */
+ link->next = dev_list;
+ dev_list = link;
+
+ client_reg.dev_info = &dev_info;
+ client_reg.Attributes = INFO_IO_CLIENT | INFO_CARD_SHARE;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &orinoco_cs_event;
+ client_reg.Version = 0x0210; /* FIXME: what does this mean? */
+ client_reg.event_callback_args.client_data = link;
+
+ ret = CardServices(RegisterClient, &link->handle, &client_reg);
+ if (ret != CS_SUCCESS) {
+ orinoco_cs_error(link->handle, RegisterClient, ret);
+ orinoco_cs_detach(link);
+ return NULL;
+ }
+
+ return link;
+} /* orinoco_cs_attach */
+
+/*
+ * This deletes a driver "instance". The device is de-registered with
+ * Card Services. If it has been released, all local data structures
+ * are freed. Otherwise, the structures will be freed when the device
+ * is released.
+ */
+static void
+orinoco_cs_detach(dev_link_t * link)
+{
+ dev_link_t **linkp;
+ struct net_device *dev = link->priv;
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link)
+ break;
+ if (*linkp == NULL) {
+ BUG();
+ return;
+ }
+
+ if (link->state & DEV_CONFIG) {
+ orinoco_cs_release((u_long)link);
+ if (link->state & DEV_CONFIG) {
+ link->state |= DEV_STALE_LINK;
+ return;
+ }
+ }
+
+ /* Break the link with Card Services */
+ if (link->handle)
+ CardServices(DeregisterClient, link->handle);
+
+ /* Unlink device structure, and free it */
+ *linkp = link->next;
+ DEBUG(0, "orinoco_cs: detach: link=%p link->dev=%p\n", link, link->dev);
+ if (link->dev) {
+ DEBUG(0, "orinoco_cs: About to unregister net device %p\n",
+ dev);
+ unregister_netdev(dev);
+ }
+ kfree(dev);
+} /* orinoco_cs_detach */
+
+/*
+ * orinoco_cs_config() is scheduled to run after a CARD_INSERTION
+ * event is received, to configure the PCMCIA socket, and to make the
+ * device available to the system.
+ */
+
+#define CS_CHECK(fn, args...) \
+ while ((last_ret=CardServices(last_fn=(fn),args))!=0) goto cs_failed
+
+#define CFG_CHECK(fn, args...) \
+ if (CardServices(fn, args) != 0) goto next_entry
+
+static void
+orinoco_cs_config(dev_link_t *link)
+{
+ struct net_device *dev = link->priv;
+ client_handle_t handle = link->handle;
+ struct orinoco_private *priv = dev->priv;
+ struct orinoco_pccard *card = priv->card;
+ hermes_t *hw = &priv->hw;
+ int last_fn, last_ret;
+ u_char buf[64];
+ config_info_t conf;
+ cisinfo_t info;
+ tuple_t tuple;
+ cisparse_t parse;
+
+ CS_CHECK(ValidateCIS, handle, &info);
+
+ /*
+ * This reads the card's CONFIG tuple to find its
+ * configuration registers.
+ */
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ tuple.Attributes = 0;
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = sizeof(buf);
+ tuple.TupleOffset = 0;
+ CS_CHECK(GetFirstTuple, handle, &tuple);
+ CS_CHECK(GetTupleData, handle, &tuple);
+ CS_CHECK(ParseTuple, handle, &tuple, &parse);
+ link->conf.ConfigBase = parse.config.base;
+ link->conf.Present = parse.config.rmask[0];
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ /* Look up the current Vcc */
+ CS_CHECK(GetConfigurationInfo, handle, &conf);
+ link->conf.Vcc = conf.Vcc;
+
+ /*
+ * In this loop, we scan the CIS for configuration table
+ * entries, each of which describes a valid card
+ * configuration, including voltage, IO window, memory window,
+ * and interrupt settings.
+ *
+ * We make no assumptions about the card to be configured: we
+ * use just the information available in the CIS. In an ideal
+ * world, this would work for any PCMCIA card, but it requires
+ * a complete and accurate CIS. In practice, a driver usually
+ * "knows" most of these things without consulting the CIS,
+ * and most client drivers will only use the CIS to fill in
+ * implementation-defined details.
+ */
+ tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+ CS_CHECK(GetFirstTuple, handle, &tuple);
+ while (1) {
+ cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
+ cistpl_cftable_entry_t dflt = { .index = 0 };
+
+ CFG_CHECK(GetTupleData, handle, &tuple);
+ CFG_CHECK(ParseTuple, handle, &tuple, &parse);
+
+ if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
+ dflt = *cfg;
+ if (cfg->index == 0)
+ goto next_entry;
+ link->conf.ConfigIndex = cfg->index;
+
+ /* Does this card need audio output? */
+ if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
+ link->conf.Attributes |= CONF_ENABLE_SPKR;
+ link->conf.Status = CCSR_AUDIO_ENA;
+ }
+
+ /* Use power settings for Vcc and Vpp if present */
+ /* Note that the CIS values need to be rescaled */
+ if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) {
+ if (conf.Vcc != cfg->vcc.param[CISTPL_POWER_VNOM] / 10000) {
+ DEBUG(2, "orinoco_cs_config: Vcc mismatch (conf.Vcc = %d, CIS = %d)\n", conf.Vcc, cfg->vcc.param[CISTPL_POWER_VNOM] / 10000);
+ if (!ignore_cis_vcc)
+ goto next_entry;
+ }
+ } else if (dflt.vcc.present & (1 << CISTPL_POWER_VNOM)) {
+ if (conf.Vcc != dflt.vcc.param[CISTPL_POWER_VNOM] / 10000) {
+ DEBUG(2, "orinoco_cs_config: Vcc mismatch (conf.Vcc = %d, CIS = %d)\n", conf.Vcc, dflt.vcc.param[CISTPL_POWER_VNOM] / 10000);
+ if(!ignore_cis_vcc)
+ goto next_entry;
+ }
+ }
+
+ if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM))
+ link->conf.Vpp1 = link->conf.Vpp2 =
+ cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000;
+ else if (dflt.vpp1.present & (1 << CISTPL_POWER_VNOM))
+ link->conf.Vpp1 = link->conf.Vpp2 =
+ dflt.vpp1.param[CISTPL_POWER_VNOM] / 10000;
+
+ /* Do we need to allocate an interrupt? */
+ if (cfg->irq.IRQInfo1 || dflt.irq.IRQInfo1)
+ link->conf.Attributes |= CONF_ENABLE_IRQ;
+
+ /* IO window settings */
+ link->io.NumPorts1 = link->io.NumPorts2 = 0;
+ if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) {
+ cistpl_io_t *io =
+ (cfg->io.nwin) ? &cfg->io : &dflt.io;
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
+ if (!(io->flags & CISTPL_IO_8BIT))
+ link->io.Attributes1 =
+ IO_DATA_PATH_WIDTH_16;
+ if (!(io->flags & CISTPL_IO_16BIT))
+ link->io.Attributes1 =
+ IO_DATA_PATH_WIDTH_8;
+ link->io.IOAddrLines =
+ io->flags & CISTPL_IO_LINES_MASK;
+ link->io.BasePort1 = io->win[0].base;
+ link->io.NumPorts1 = io->win[0].len;
+ if (io->nwin > 1) {
+ link->io.Attributes2 =
+ link->io.Attributes1;
+ link->io.BasePort2 = io->win[1].base;
+ link->io.NumPorts2 = io->win[1].len;
+ }
+
+ /* This reserves IO space but doesn't actually enable it */
+ CFG_CHECK(RequestIO, link->handle, &link->io);
+ }
+
+
+ /* If we got this far, we're cool! */
+
+ break;
+
+ next_entry:
+ if (link->io.NumPorts1)
+ CardServices(ReleaseIO, link->handle, &link->io);
+ last_ret = CardServices(GetNextTuple, handle, &tuple);
+ if (last_ret == CS_NO_MORE_ITEMS) {
+ printk(KERN_ERR "GetNextTuple(). No matching CIS configuration, "
+ "maybe you need the ignore_cis_vcc=1 parameter.\n");
+ goto cs_failed;
+ }
+ }
+
+ /*
+ * Allocate an interrupt line. Note that this does not assign
+ * a handler to the interrupt, unless the 'Handler' member of
+ * the irq structure is initialized.
+ */
+ if (link->conf.Attributes & CONF_ENABLE_IRQ) {
+ int i;
+
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+ link->irq.IRQInfo1 = IRQ_INFO2_VALID | IRQ_LEVEL_ID;
+ if (irq_list[0] == -1)
+ link->irq.IRQInfo2 = irq_mask;
+ else
+ for (i=0; i<4; i++)
+ link->irq.IRQInfo2 |= 1 << irq_list[i];
+
+ link->irq.Handler = orinoco_interrupt;
+ link->irq.Instance = dev;
+
+ CS_CHECK(RequestIRQ, link->handle, &link->irq);
+ }
+
+ /* We initialize the hermes structure before completing PCMCIA
+ * configuration just in case the interrupt handler gets
+ * called. */
+ hermes_struct_init(hw, link->io.BasePort1,
+ HERMES_IO, HERMES_16BIT_REGSPACING);
+
+ /*
+ * This actually configures the PCMCIA socket -- setting up
+ * the I/O windows and the interrupt mapping, and putting the
+ * card and host interface into "Memory and IO" mode.
+ */
+ CS_CHECK(RequestConfiguration, link->handle, &link->conf);
+
+ /* Ok, we have the configuration, prepare to register the netdev */
+ dev->base_addr = link->io.BasePort1;
+ dev->irq = link->irq.AssignedIRQ;
+ SET_MODULE_OWNER(dev);
+ card->node.major = card->node.minor = 0;
+
+ /* register_netdev will give us an ethX name */
+ dev->name[0] = '\0';
+ /* Tell the stack we exist */
+ if (register_netdev(dev) != 0) {
+ printk(KERN_ERR "orinoco_cs: register_netdev() failed\n");
+ goto failed;
+ }
+
+ /* At this point, the dev_node_t structure(s) needs to be
+ * initialized and arranged in a linked list at link->dev. */
+ strcpy(card->node.dev_name, dev->name);
+ link->dev = &card->node; /* link->dev being non-NULL is also
+ used to indicate that the
+ net_device has been registered */
+ link->state &= ~DEV_CONFIG_PENDING;
+
+ /* Finally, report what we've done */
+ printk(KERN_DEBUG "%s: index 0x%02x: Vcc %d.%d",
+ dev->name, link->conf.ConfigIndex,
+ link->conf.Vcc / 10, link->conf.Vcc % 10);
+ if (link->conf.Vpp1)
+ printk(", Vpp %d.%d", link->conf.Vpp1 / 10,
+ link->conf.Vpp1 % 10);
+ if (link->conf.Attributes & CONF_ENABLE_IRQ)
+ printk(", irq %d", link->irq.AssignedIRQ);
+ if (link->io.NumPorts1)
+ printk(", io 0x%04x-0x%04x", link->io.BasePort1,
+ link->io.BasePort1 + link->io.NumPorts1 - 1);
+ if (link->io.NumPorts2)
+ printk(" & 0x%04x-0x%04x", link->io.BasePort2,
+ link->io.BasePort2 + link->io.NumPorts2 - 1);
+ printk("\n");
+
+ return;
+
+ cs_failed:
+ orinoco_cs_error(link->handle, last_fn, last_ret);
+
+ failed:
+ orinoco_cs_release((u_long) link);
+} /* orinoco_cs_config */
+
+/*
+ * After a card is removed, orinoco_cs_release() will unregister the
+ * device, and release the PCMCIA configuration. If the device is
+ * still open, this will be postponed until it is closed.
+ */
+static void
+orinoco_cs_release(u_long arg)
+{
+ dev_link_t *link = (dev_link_t *) arg;
+ struct net_device *dev = link->priv;
+ struct orinoco_private *priv = dev->priv;
+ unsigned long flags;
+
+ /* We're committed to taking the device away now, so mark the
+ * hardware as unavailable */
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->hw_unavailable++;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Don't bother checking to see if these succeed or not */
+ CardServices(ReleaseConfiguration, link->handle);
+ if (link->io.NumPorts1)
+ CardServices(ReleaseIO, link->handle, &link->io);
+ if (link->irq.AssignedIRQ)
+ CardServices(ReleaseIRQ, link->handle, &link->irq);
+ link->state &= ~DEV_CONFIG;
+} /* orinoco_cs_release */
+
+/*
+ * The card status event handler. Mostly, this schedules other stuff
+ * to run after an event is received.
+ */
+static int
+orinoco_cs_event(event_t event, int priority,
+ event_callback_args_t * args)
+{
+ dev_link_t *link = args->client_data;
+ struct net_device *dev = link->priv;
+ struct orinoco_private *priv = dev->priv;
+ struct orinoco_pccard *card = priv->card;
+ int err = 0;
+ unsigned long flags;
+
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG) {
+ orinoco_lock(priv, &flags);
+
+ netif_device_detach(dev);
+ priv->hw_unavailable++;
+
+ orinoco_unlock(priv, &flags);
+ }
+ break;
+
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ orinoco_cs_config(link);
+ break;
+
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ /* Mark the device as stopped, to block IO until later */
+ if (link->state & DEV_CONFIG) {
+ /* This is probably racy, but I can't think of
+ a better way, short of rewriting the PCMCIA
+ layer to not suck :-( */
+ if (! test_bit(0, &card->hard_reset_in_progress)) {
+ spin_lock_irqsave(&priv->lock, flags);
+
+ err = __orinoco_down(dev);
+ if (err)
+ printk(KERN_WARNING "%s: %s: Error %d downing interface\n",
+ dev->name,
+ event == CS_EVENT_PM_SUSPEND ? "SUSPEND" : "RESET_PHYSICAL",
+ err);
+
+ netif_device_detach(dev);
+ priv->hw_unavailable++;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ CardServices(ReleaseConfiguration, link->handle);
+ }
+ break;
+
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ /* FIXME: should we double check that this is
+ * the same card as we had before */
+ CardServices(RequestConfiguration, link->handle,
+ &link->conf);
+
+ if (! test_bit(0, &card->hard_reset_in_progress)) {
+ err = orinoco_reinit_firmware(dev);
+ if (err) {
+ printk(KERN_ERR "%s: Error %d re-initializing firmware\n",
+ dev->name, err);
+ break;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ netif_device_attach(dev);
+ priv->hw_unavailable--;
+
+ if (priv->open && ! priv->hw_unavailable) {
+ err = __orinoco_up(dev);
+ if (err)
+ printk(KERN_ERR "%s: Error %d restarting card\n",
+ dev->name, err);
+
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+ }
+ break;
+ }
+
+ return err;
+} /* orinoco_cs_event */
+
+/********************************************************************/
+/* Module initialization */
+/********************************************************************/
+
+/* Can't be declared "const" or the whole __initdata section will
+ * become const */
+static char version[] __initdata = "orinoco_cs.c 0.13e (David Gibson <hermes@gibson.dropbear.id.au> and others)";
+
+static int __init
+init_orinoco_cs(void)
+{
+ servinfo_t serv;
+
+ printk(KERN_DEBUG "%s\n", version);
+
+ CardServices(GetCardServicesInfo, &serv);
+ if (serv.Revision != CS_RELEASE_CODE) {
+ printk(KERN_NOTICE "orinoco_cs: Card Services release "
+ "does not match!\n");
+ return -EINVAL;
+ }
+
+ register_pccard_driver(&dev_info, &orinoco_cs_attach, &orinoco_cs_detach);
+
+ return 0;
+}
+
+static void __exit
+exit_orinoco_cs(void)
+{
+ unregister_pccard_driver(&dev_info);
+
+ if (dev_list)
+ DEBUG(0, "orinoco_cs: Removing leftover devices.\n");
+ while (dev_list != NULL) {
+ if (dev_list->state & DEV_CONFIG)
+ orinoco_cs_release((u_long) dev_list);
+ orinoco_cs_detach(dev_list);
+ }
+}
+
+module_init(init_orinoco_cs);
+module_exit(exit_orinoco_cs);
+
diff --git a/linux/src/COPYING b/linux/src/COPYING
new file mode 100644
index 0000000..6dc77dc
--- /dev/null
+++ b/linux/src/COPYING
@@ -0,0 +1,351 @@
+
+ NOTE! This copyright does *not* cover user programs that use kernel
+ services by normal system calls - this is merely considered normal use
+ of the kernel, and does *not* fall under the heading of "derived work".
+ Also note that the GPL below is copyrighted by the Free Software
+ Foundation, but the instance of code that it refers to (the Linux
+ kernel) is copyrighted by me and others who actually wrote it.
+
+ Linus Torvalds
+
+----------------------------------------
+
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+ 675 Mass Ave, Cambridge, MA 02139, USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ Appendix: How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) 19yy <name of author>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) 19yy name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ <signature of Ty Coon>, 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Library General
+Public License instead of this License.
diff --git a/linux/src/arch/i386/kernel/bios32.c b/linux/src/arch/i386/kernel/bios32.c
new file mode 100644
index 0000000..bb0e89c
--- /dev/null
+++ b/linux/src/arch/i386/kernel/bios32.c
@@ -0,0 +1,916 @@
+/*
+ * bios32.c - BIOS32, PCI BIOS functions.
+ *
+ * $Id: bios32.c,v 1.1 1999/04/26 05:50:57 tb Exp $
+ *
+ * Sponsored by
+ * iX Multiuser Multitasking Magazine
+ * Hannover, Germany
+ * hm@ix.de
+ *
+ * Copyright 1993, 1994 Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * Drew@Colorado.EDU
+ * +1 (303) 786-7975
+ *
+ * For more information, please consult
+ *
+ * PCI BIOS Specification Revision
+ * PCI Local Bus Specification
+ * PCI System Design Guide
+ *
+ * PCI Special Interest Group
+ * M/S HF3-15A
+ * 5200 N.E. Elam Young Parkway
+ * Hillsboro, Oregon 97124-6497
+ * +1 (503) 696-2000
+ * +1 (800) 433-5177
+ *
+ * Manuals are $25 each or $50 for all three, plus $7 shipping
+ * within the United States, $35 abroad.
+ *
+ *
+ * CHANGELOG :
+ * Jun 17, 1994 : Modified to accommodate the broken pre-PCI BIOS SPECIFICATION
+ * Revision 2.0 present on <thys@dennis.ee.up.ac.za>'s ASUS mainboard.
+ *
+ * Jan 5, 1995 : Modified to probe PCI hardware at boot time by Frederic
+ * Potter, potter@cao-vlsi.ibp.fr
+ *
+ * Jan 10, 1995 : Modified to store the information about configured pci
+ * devices into a list, which can be accessed via /proc/pci by
+ * Curtis Varner, cvarner@cs.ucr.edu
+ *
+ * Jan 12, 1995 : CPU-PCI bridge optimization support by Frederic Potter.
+ * Alpha version. Intel & UMC chipset support only.
+ *
+ * Apr 16, 1995 : Source merge with the DEC Alpha PCI support. Most of the code
+ * moved to drivers/pci/pci.c.
+ *
+ * Dec 7, 1996 : Added support for direct configuration access of boards
+ * with Intel compatible access schemes (tsbogend@alpha.franken.de)
+ *
+ * Feb 3, 1997 : Set internal functions to static, save/restore flags
+ * avoid dead locks reading broken PCI BIOS, werner@suse.de
+ *
+ * Apr 26, 1997 : Fixed case when there is BIOS32, but not PCI BIOS
+ * (mj@atrey.karlin.mff.cuni.cz)
+ *
+ * May 7, 1997 : Added some missing cli()'s. [mj]
+ *
+ * Jun 20, 1997 : Corrected problems in "conf1" type accesses.
+ * (paubert@iram.es)
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+
+#include <asm/segment.h>
+#include <asm/system.h>
+#include <asm/io.h>
+
+#define PCIBIOS_PCI_FUNCTION_ID 0xb1XX
+#define PCIBIOS_PCI_BIOS_PRESENT 0xb101
+#define PCIBIOS_FIND_PCI_DEVICE 0xb102
+#define PCIBIOS_FIND_PCI_CLASS_CODE 0xb103
+#define PCIBIOS_GENERATE_SPECIAL_CYCLE 0xb106
+#define PCIBIOS_READ_CONFIG_BYTE 0xb108
+#define PCIBIOS_READ_CONFIG_WORD 0xb109
+#define PCIBIOS_READ_CONFIG_DWORD 0xb10a
+#define PCIBIOS_WRITE_CONFIG_BYTE 0xb10b
+#define PCIBIOS_WRITE_CONFIG_WORD 0xb10c
+#define PCIBIOS_WRITE_CONFIG_DWORD 0xb10d
+
+
+/* BIOS32 signature: "_32_" */
+#define BIOS32_SIGNATURE (('_' << 0) + ('3' << 8) + ('2' << 16) + ('_' << 24))
+
+/* PCI signature: "PCI " */
+#define PCI_SIGNATURE (('P' << 0) + ('C' << 8) + ('I' << 16) + (' ' << 24))
+
+/* PCI service signature: "$PCI" */
+#define PCI_SERVICE (('$' << 0) + ('P' << 8) + ('C' << 16) + ('I' << 24))
+
+/*
+ * This is the standard structure used to identify the entry point
+ * to the BIOS32 Service Directory, as documented in
+ * Standard BIOS 32-bit Service Directory Proposal
+ * Revision 0.4 May 24, 1993
+ * Phoenix Technologies Ltd.
+ * Norwood, MA
+ * and the PCI BIOS specification.
+ */
+
+union bios32 {
+ struct {
+ unsigned long signature; /* _32_ */
+ unsigned long entry; /* 32 bit physical address */
+ unsigned char revision; /* Revision level, 0 */
+ unsigned char length; /* Length in paragraphs should be 01 */
+ unsigned char checksum; /* All bytes must add up to zero */
+ unsigned char reserved[5]; /* Must be zero */
+ } fields;
+ char chars[16];
+};
+
+#ifdef CONFIG_PCI
+/*
+ * Physical address of the service directory. I don't know if we're
+ * allowed to have more than one of these or not, so just in case
+ * we'll make pcibios_present() take a memory start parameter and store
+ * the array there.
+ */
+
+static unsigned long bios32_entry = 0;
+static struct {
+ unsigned long address;
+ unsigned short segment;
+} bios32_indirect = { 0, KERNEL_CS };
+
+
+/*
+ * function table for accessing PCI configuration space
+ */
+struct pci_access {
+ int (*find_device)(unsigned short, unsigned short, unsigned short, unsigned char *, unsigned char *);
+ int (*find_class)(unsigned int, unsigned short, unsigned char *, unsigned char *);
+ int (*read_config_byte)(unsigned char, unsigned char, unsigned char, unsigned char *);
+ int (*read_config_word)(unsigned char, unsigned char, unsigned char, unsigned short *);
+ int (*read_config_dword)(unsigned char, unsigned char, unsigned char, unsigned int *);
+ int (*write_config_byte)(unsigned char, unsigned char, unsigned char, unsigned char);
+ int (*write_config_word)(unsigned char, unsigned char, unsigned char, unsigned short);
+ int (*write_config_dword)(unsigned char, unsigned char, unsigned char, unsigned int);
+};
+
+/*
+ * pointer to selected PCI access function table
+ */
+static struct pci_access *access_pci = NULL;
+
+
+
+/*
+ * Returns the entry point for the given service, NULL on error
+ */
+
+static unsigned long bios32_service(unsigned long service)
+{
+ unsigned char return_code; /* %al */
+ unsigned long address; /* %ebx */
+ unsigned long length; /* %ecx */
+ unsigned long entry; /* %edx */
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ __asm__("lcall *(%%edi); cld"
+ : "=a" (return_code),
+ "=b" (address),
+ "=c" (length),
+ "=d" (entry)
+ : "0" (service),
+ "1" (0),
+ "D" (&bios32_indirect));
+ restore_flags(flags);
+
+ switch (return_code) {
+ case 0:
+ return address + entry;
+ case 0x80: /* Not present */
+ printk("bios32_service(0x%lx) : not present\n", service);
+ return 0;
+ default: /* Shouldn't happen */
+ printk("bios32_service(0x%lx) : returned 0x%x, mail drew@colorado.edu\n",
+ service, return_code);
+ return 0;
+ }
+}
+
+static long pcibios_entry = 0;
+static struct {
+ unsigned long address;
+ unsigned short segment;
+} pci_indirect = { 0, KERNEL_CS };
+
+
+static int check_pcibios(void)
+{
+ unsigned long signature;
+ unsigned char present_status;
+ unsigned char major_revision;
+ unsigned char minor_revision;
+ unsigned long flags;
+ int pack;
+
+ if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
+ pci_indirect.address = phystokv(pcibios_entry);
+
+ save_flags(flags); cli();
+ __asm__("lcall *(%%edi); cld\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:\tshl $8, %%eax\n\t"
+ "movw %%bx, %%ax"
+ : "=d" (signature),
+ "=a" (pack)
+ : "1" (PCIBIOS_PCI_BIOS_PRESENT),
+ "D" (&pci_indirect)
+ : "bx", "cx");
+ restore_flags(flags);
+
+ present_status = (pack >> 16) & 0xff;
+ major_revision = (pack >> 8) & 0xff;
+ minor_revision = pack & 0xff;
+ if (present_status || (signature != PCI_SIGNATURE)) {
+ printk ("pcibios_init : %s : BIOS32 Service Directory says PCI BIOS is present,\n"
+ " but PCI_BIOS_PRESENT subfunction fails with present status of 0x%x\n"
+ " and signature of 0x%08lx (%c%c%c%c). mail drew@Colorado.EDU\n",
+ (signature == PCI_SIGNATURE) ? "WARNING" : "ERROR",
+ present_status, signature,
+ (char) (signature >> 0), (char) (signature >> 8),
+ (char) (signature >> 16), (char) (signature >> 24));
+
+ if (signature != PCI_SIGNATURE)
+ pcibios_entry = 0;
+ }
+ if (pcibios_entry) {
+ printk ("pcibios_init : PCI BIOS revision %x.%02x entry at 0x%lx\n",
+ major_revision, minor_revision, pcibios_entry);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+
+static int pci_bios_find_class (unsigned int class_code, unsigned short index,
+ unsigned char *bus, unsigned char *device_fn)
+{
+ unsigned long bx;
+ unsigned long ret;
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ __asm__ ("lcall *(%%edi); cld\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+ : "=b" (bx),
+ "=a" (ret)
+ : "1" (PCIBIOS_FIND_PCI_CLASS_CODE),
+ "c" (class_code),
+ "S" ((int) index),
+ "D" (&pci_indirect));
+ restore_flags(flags);
+ *bus = (bx >> 8) & 0xff;
+ *device_fn = bx & 0xff;
+ return (int) (ret & 0xff00) >> 8;
+}
+
+
+static int pci_bios_find_device (unsigned short vendor, unsigned short device_id,
+ unsigned short index, unsigned char *bus, unsigned char *device_fn)
+{
+ unsigned short bx;
+ unsigned short ret;
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ __asm__("lcall *(%%edi); cld\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+ : "=b" (bx),
+ "=a" (ret)
+ : "1" (PCIBIOS_FIND_PCI_DEVICE),
+ "c" (device_id),
+ "d" (vendor),
+ "S" ((int) index),
+ "D" (&pci_indirect));
+ restore_flags(flags);
+ *bus = (bx >> 8) & 0xff;
+ *device_fn = bx & 0xff;
+ return (int) (ret & 0xff00) >> 8;
+}
+
+static int pci_bios_read_config_byte(unsigned char bus,
+ unsigned char device_fn, unsigned char where, unsigned char *value)
+{
+ unsigned long ret;
+ unsigned long bx = (bus << 8) | device_fn;
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ __asm__("lcall *(%%esi); cld\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+ : "=c" (*value),
+ "=a" (ret)
+ : "1" (PCIBIOS_READ_CONFIG_BYTE),
+ "b" (bx),
+ "D" ((long) where),
+ "S" (&pci_indirect));
+ restore_flags(flags);
+ return (int) (ret & 0xff00) >> 8;
+}
+
+static int pci_bios_read_config_word (unsigned char bus,
+ unsigned char device_fn, unsigned char where, unsigned short *value)
+{
+ unsigned long ret;
+ unsigned long bx = (bus << 8) | device_fn;
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ __asm__("lcall *(%%esi); cld\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+ : "=c" (*value),
+ "=a" (ret)
+ : "1" (PCIBIOS_READ_CONFIG_WORD),
+ "b" (bx),
+ "D" ((long) where),
+ "S" (&pci_indirect));
+ restore_flags(flags);
+ return (int) (ret & 0xff00) >> 8;
+}
+
+static int pci_bios_read_config_dword (unsigned char bus,
+ unsigned char device_fn, unsigned char where, unsigned int *value)
+{
+ unsigned long ret;
+ unsigned long bx = (bus << 8) | device_fn;
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ __asm__("lcall *(%%esi); cld\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+ : "=c" (*value),
+ "=a" (ret)
+ : "1" (PCIBIOS_READ_CONFIG_DWORD),
+ "b" (bx),
+ "D" ((long) where),
+ "S" (&pci_indirect));
+ restore_flags(flags);
+ return (int) (ret & 0xff00) >> 8;
+}
+
+static int pci_bios_write_config_byte (unsigned char bus,
+ unsigned char device_fn, unsigned char where, unsigned char value)
+{
+ unsigned long ret;
+ unsigned long bx = (bus << 8) | device_fn;
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ __asm__("lcall *(%%esi); cld\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+ : "=a" (ret)
+ : "0" (PCIBIOS_WRITE_CONFIG_BYTE),
+ "c" (value),
+ "b" (bx),
+ "D" ((long) where),
+ "S" (&pci_indirect));
+ restore_flags(flags);
+ return (int) (ret & 0xff00) >> 8;
+}
+
+static int pci_bios_write_config_word (unsigned char bus,
+ unsigned char device_fn, unsigned char where, unsigned short value)
+{
+ unsigned long ret;
+ unsigned long bx = (bus << 8) | device_fn;
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ __asm__("lcall *(%%esi); cld\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+ : "=a" (ret)
+ : "0" (PCIBIOS_WRITE_CONFIG_WORD),
+ "c" (value),
+ "b" (bx),
+ "D" ((long) where),
+ "S" (&pci_indirect));
+ restore_flags(flags);
+ return (int) (ret & 0xff00) >> 8;
+}
+
+static int pci_bios_write_config_dword (unsigned char bus,
+ unsigned char device_fn, unsigned char where, unsigned int value)
+{
+ unsigned long ret;
+ unsigned long bx = (bus << 8) | device_fn;
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ __asm__("lcall *(%%esi); cld\n\t"
+ "jc 1f\n\t"
+ "xor %%ah, %%ah\n"
+ "1:"
+ : "=a" (ret)
+ : "0" (PCIBIOS_WRITE_CONFIG_DWORD),
+ "c" (value),
+ "b" (bx),
+ "D" ((long) where),
+ "S" (&pci_indirect));
+ restore_flags(flags);
+ return (int) (ret & 0xff00) >> 8;
+}
+
+/*
+ * function table for BIOS32 access
+ */
+static struct pci_access pci_bios_access = {
+ pci_bios_find_device,
+ pci_bios_find_class,
+ pci_bios_read_config_byte,
+ pci_bios_read_config_word,
+ pci_bios_read_config_dword,
+ pci_bios_write_config_byte,
+ pci_bios_write_config_word,
+ pci_bios_write_config_dword
+};
+
+
+
+/*
+ * Given the vendor and device ids, find the n'th instance of that device
+ * in the system.
+ */
+static int pci_direct_find_device (unsigned short vendor, unsigned short device_id,
+ unsigned short index, unsigned char *bus,
+ unsigned char *devfn)
+{
+ unsigned int curr = 0;
+ struct pci_dev *dev;
+
+ for (dev = pci_devices; dev; dev = dev->next) {
+ if (dev->vendor == vendor && dev->device == device_id) {
+ if (curr == index) {
+ *devfn = dev->devfn;
+ *bus = dev->bus->number;
+ return PCIBIOS_SUCCESSFUL;
+ }
+ ++curr;
+ }
+ }
+ return PCIBIOS_DEVICE_NOT_FOUND;
+}
+
+
+/*
+ * Given the class, find the n'th instance of that device
+ * in the system.
+ */
+static int pci_direct_find_class (unsigned int class_code, unsigned short index,
+ unsigned char *bus, unsigned char *devfn)
+{
+ unsigned int curr = 0;
+ struct pci_dev *dev;
+
+ for (dev = pci_devices; dev; dev = dev->next) {
+ if (dev->class == class_code) {
+ if (curr == index) {
+ *devfn = dev->devfn;
+ *bus = dev->bus->number;
+ return PCIBIOS_SUCCESSFUL;
+ }
+ ++curr;
+ }
+ }
+ return PCIBIOS_DEVICE_NOT_FOUND;
+}
+
+/*
+ * Functions for accessing PCI configuration space with type 1 accesses
+ */
+#define CONFIG_CMD(bus, device_fn, where) (0x80000000 | (bus << 16) | (device_fn << 8) | (where & ~3))
+
+static int pci_conf1_read_config_byte(unsigned char bus, unsigned char device_fn,
+ unsigned char where, unsigned char *value)
+{
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ outl(CONFIG_CMD(bus,device_fn,where), 0xCF8);
+ *value = inb(0xCFC + (where&3));
+ restore_flags(flags);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_conf1_read_config_word (unsigned char bus,
+ unsigned char device_fn, unsigned char where, unsigned short *value)
+{
+ unsigned long flags;
+
+ if (where&1) return PCIBIOS_BAD_REGISTER_NUMBER;
+ save_flags(flags); cli();
+ outl(CONFIG_CMD(bus,device_fn,where), 0xCF8);
+ *value = inw(0xCFC + (where&2));
+ restore_flags(flags);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_conf1_read_config_dword (unsigned char bus, unsigned char device_fn,
+ unsigned char where, unsigned int *value)
+{
+ unsigned long flags;
+
+ if (where&3) return PCIBIOS_BAD_REGISTER_NUMBER;
+ save_flags(flags); cli();
+ outl(CONFIG_CMD(bus,device_fn,where), 0xCF8);
+ *value = inl(0xCFC);
+ restore_flags(flags);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_conf1_write_config_byte (unsigned char bus, unsigned char device_fn,
+ unsigned char where, unsigned char value)
+{
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ outl(CONFIG_CMD(bus,device_fn,where), 0xCF8);
+ outb(value, 0xCFC + (where&3));
+ restore_flags(flags);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_conf1_write_config_word (unsigned char bus, unsigned char device_fn,
+ unsigned char where, unsigned short value)
+{
+ unsigned long flags;
+
+ if (where&1) return PCIBIOS_BAD_REGISTER_NUMBER;
+ save_flags(flags); cli();
+ outl(CONFIG_CMD(bus,device_fn,where), 0xCF8);
+ outw(value, 0xCFC + (where&2));
+ restore_flags(flags);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_conf1_write_config_dword (unsigned char bus, unsigned char device_fn,
+ unsigned char where, unsigned int value)
+{
+ unsigned long flags;
+
+ if (where&3) return PCIBIOS_BAD_REGISTER_NUMBER;
+ save_flags(flags); cli();
+ outl(CONFIG_CMD(bus,device_fn,where), 0xCF8);
+ outl(value, 0xCFC);
+ restore_flags(flags);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+#undef CONFIG_CMD
+
+/*
+ * functiontable for type 1
+ */
+static struct pci_access pci_direct_conf1 = {
+ pci_direct_find_device,
+ pci_direct_find_class,
+ pci_conf1_read_config_byte,
+ pci_conf1_read_config_word,
+ pci_conf1_read_config_dword,
+ pci_conf1_write_config_byte,
+ pci_conf1_write_config_word,
+ pci_conf1_write_config_dword
+};
+
+/*
+ * Functions for accessing PCI configuration space with type 2 accesses
+ */
+#define IOADDR(devfn, where) ((0xC000 | ((devfn & 0x78) << 5)) + where)
+#define FUNC(devfn) (((devfn & 7) << 1) | 0xf0)
+
+static int pci_conf2_read_config_byte(unsigned char bus, unsigned char device_fn,
+ unsigned char where, unsigned char *value)
+{
+ unsigned long flags;
+
+ if (device_fn & 0x80)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ save_flags(flags); cli();
+ outb (FUNC(device_fn), 0xCF8);
+ outb (bus, 0xCFA);
+ *value = inb(IOADDR(device_fn,where));
+ outb (0, 0xCF8);
+ restore_flags(flags);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_conf2_read_config_word (unsigned char bus, unsigned char device_fn,
+ unsigned char where, unsigned short *value)
+{
+ unsigned long flags;
+
+ if (device_fn & 0x80)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ save_flags(flags); cli();
+ outb (FUNC(device_fn), 0xCF8);
+ outb (bus, 0xCFA);
+ *value = inw(IOADDR(device_fn,where));
+ outb (0, 0xCF8);
+ restore_flags(flags);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_conf2_read_config_dword (unsigned char bus, unsigned char device_fn,
+ unsigned char where, unsigned int *value)
+{
+ unsigned long flags;
+
+ if (device_fn & 0x80)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ save_flags(flags); cli();
+ outb (FUNC(device_fn), 0xCF8);
+ outb (bus, 0xCFA);
+ *value = inl (IOADDR(device_fn,where));
+ outb (0, 0xCF8);
+ restore_flags(flags);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_conf2_write_config_byte (unsigned char bus, unsigned char device_fn,
+ unsigned char where, unsigned char value)
+{
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ outb (FUNC(device_fn), 0xCF8);
+ outb (bus, 0xCFA);
+ outb (value, IOADDR(device_fn,where));
+ outb (0, 0xCF8);
+ restore_flags(flags);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_conf2_write_config_word (unsigned char bus, unsigned char device_fn,
+ unsigned char where, unsigned short value)
+{
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ outb (FUNC(device_fn), 0xCF8);
+ outb (bus, 0xCFA);
+ outw (value, IOADDR(device_fn,where));
+ outb (0, 0xCF8);
+ restore_flags(flags);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_conf2_write_config_dword (unsigned char bus, unsigned char device_fn,
+ unsigned char where, unsigned int value)
+{
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ outb (FUNC(device_fn), 0xCF8);
+ outb (bus, 0xCFA);
+ outl (value, IOADDR(device_fn,where));
+ outb (0, 0xCF8);
+ restore_flags(flags);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+#undef IOADDR
+#undef FUNC
+
+/*
+ * functiontable for type 2
+ */
+static struct pci_access pci_direct_conf2 = {
+ pci_direct_find_device,
+ pci_direct_find_class,
+ pci_conf2_read_config_byte,
+ pci_conf2_read_config_word,
+ pci_conf2_read_config_dword,
+ pci_conf2_write_config_byte,
+ pci_conf2_write_config_word,
+ pci_conf2_write_config_dword
+};
+
+
+static struct pci_access *check_direct_pci(void)
+{
+ unsigned int tmp;
+ unsigned long flags;
+
+ save_flags(flags); cli();
+
+ /*
+ * check if configuration type 1 works
+ */
+ outb (0x01, 0xCFB);
+ tmp = inl (0xCF8);
+ outl (0x80000000, 0xCF8);
+ if (inl (0xCF8) == 0x80000000) {
+ outl (tmp, 0xCF8);
+ restore_flags(flags);
+ printk("pcibios_init: Using configuration type 1\n");
+ return &pci_direct_conf1;
+ }
+ outl (tmp, 0xCF8);
+
+ /*
+ * check if configuration type 2 works
+ */
+ outb (0x00, 0xCFB);
+ outb (0x00, 0xCF8);
+ outb (0x00, 0xCFA);
+ if (inb (0xCF8) == 0x00 && inb (0xCFB) == 0x00) {
+ restore_flags(flags);
+ printk("pcibios_init: Using configuration type 2\n");
+ return &pci_direct_conf2;
+ }
+ restore_flags(flags);
+ printk("pcibios_init: Not supported chipset for direct PCI access !\n");
+ return NULL;
+}
+
+
+/*
+ * access defined pcibios functions via
+ * the function table
+ */
+
+int pcibios_present(void)
+{
+ return access_pci ? 1 : 0;
+}
+
+int pcibios_find_class (unsigned int class_code, unsigned short index,
+ unsigned char *bus, unsigned char *device_fn)
+{
+ if (access_pci && access_pci->find_class)
+ return access_pci->find_class(class_code, index, bus, device_fn);
+
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+}
+
+int pcibios_find_device (unsigned short vendor, unsigned short device_id,
+ unsigned short index, unsigned char *bus, unsigned char *device_fn)
+{
+ if (access_pci && access_pci->find_device)
+ return access_pci->find_device(vendor, device_id, index, bus, device_fn);
+
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+}
+
+int pcibios_read_config_byte (unsigned char bus,
+ unsigned char device_fn, unsigned char where, unsigned char *value)
+{
+ if (access_pci && access_pci->read_config_byte)
+ return access_pci->read_config_byte(bus, device_fn, where, value);
+
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+}
+
+int pcibios_read_config_word (unsigned char bus,
+ unsigned char device_fn, unsigned char where, unsigned short *value)
+{
+ if (access_pci && access_pci->read_config_word)
+ return access_pci->read_config_word(bus, device_fn, where, value);
+
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+}
+
+int pcibios_read_config_dword (unsigned char bus,
+ unsigned char device_fn, unsigned char where, unsigned int *value)
+{
+ if (access_pci && access_pci->read_config_dword)
+ return access_pci->read_config_dword(bus, device_fn, where, value);
+
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+}
+
+int pcibios_write_config_byte (unsigned char bus,
+ unsigned char device_fn, unsigned char where, unsigned char value)
+{
+ if (access_pci && access_pci->write_config_byte)
+ return access_pci->write_config_byte(bus, device_fn, where, value);
+
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+}
+
+int pcibios_write_config_word (unsigned char bus,
+ unsigned char device_fn, unsigned char where, unsigned short value)
+{
+ if (access_pci && access_pci->write_config_word)
+ return access_pci->write_config_word(bus, device_fn, where, value);
+
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+}
+
+int pcibios_write_config_dword (unsigned char bus,
+ unsigned char device_fn, unsigned char where, unsigned int value)
+{
+ if (access_pci && access_pci->write_config_dword)
+ return access_pci->write_config_dword(bus, device_fn, where, value);
+
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+}
+
+const char *pcibios_strerror (int error)
+{
+ static char buf[80];
+
+ switch (error) {
+ case PCIBIOS_SUCCESSFUL:
+ return "SUCCESSFUL";
+
+ case PCIBIOS_FUNC_NOT_SUPPORTED:
+ return "FUNC_NOT_SUPPORTED";
+
+ case PCIBIOS_BAD_VENDOR_ID:
+ return "SUCCESSFUL";
+
+ case PCIBIOS_DEVICE_NOT_FOUND:
+ return "DEVICE_NOT_FOUND";
+
+ case PCIBIOS_BAD_REGISTER_NUMBER:
+ return "BAD_REGISTER_NUMBER";
+
+ case PCIBIOS_SET_FAILED:
+ return "SET_FAILED";
+
+ case PCIBIOS_BUFFER_TOO_SMALL:
+ return "BUFFER_TOO_SMALL";
+
+ default:
+ sprintf (buf, "UNKNOWN RETURN 0x%x", error);
+ return buf;
+ }
+}
+
+
+unsigned long pcibios_fixup(unsigned long mem_start, unsigned long mem_end)
+{
+ return mem_start;
+}
+
+#endif
+
+unsigned long pcibios_init(unsigned long memory_start, unsigned long memory_end)
+{
+#ifdef CONFIG_PCI
+ union bios32 *check;
+ unsigned char sum;
+ int i, length;
+
+ /*
+ * Follow the standard procedure for locating the BIOS32 Service
+ * directory by scanning the permissible address range from
+ * 0xe0000 through 0xfffff for a valid BIOS32 structure.
+ *
+ */
+
+ for (check = (union bios32 *) phystokv(0xe0000);
+ check <= (union bios32 *) phystokv(0xffff0);
+ ++check) {
+ if (check->fields.signature != BIOS32_SIGNATURE)
+ continue;
+ length = check->fields.length * 16;
+ if (!length)
+ continue;
+ sum = 0;
+ for (i = 0; i < length ; ++i)
+ sum += check->chars[i];
+ if (sum != 0)
+ continue;
+ if (check->fields.revision != 0) {
+ printk("pcibios_init : unsupported revision %d at 0x%lx, mail drew@colorado.edu\n",
+ check->fields.revision, _kvtophys(check));
+ continue;
+ }
+ printk ("pcibios_init : BIOS32 Service Directory structure at 0x%lx\n", _kvtophys(check));
+ if (!bios32_entry) {
+ if (check->fields.entry >= 0x100000) {
+ printk("pcibios_init: entry in high memory, trying direct PCI access\n");
+ access_pci = check_direct_pci();
+ } else {
+ bios32_entry = check->fields.entry;
+ printk ("pcibios_init : BIOS32 Service Directory entry at 0x%lx\n", bios32_entry);
+ bios32_indirect.address = phystokv(bios32_entry);
+ }
+ }
+ }
+ if (bios32_entry && check_pcibios())
+ access_pci = &pci_bios_access;
+ else
+ access_pci = check_direct_pci();
+#endif
+ return memory_start;
+}
diff --git a/linux/src/arch/i386/kernel/irq.c b/linux/src/arch/i386/kernel/irq.c
new file mode 100644
index 0000000..6db6115
--- /dev/null
+++ b/linux/src/arch/i386/kernel/irq.c
@@ -0,0 +1,582 @@
+/*
+ * linux/arch/i386/kernel/irq.c
+ *
+ * Copyright (C) 1992 Linus Torvalds
+ *
+ * This file contains the code used by various IRQ handling routines:
+ * asking for different IRQ's should be done through these routines
+ * instead of just grabbing them. Thus setups with different IRQ numbers
+ * shouldn't result in any weird surprises, and installing new handlers
+ * should be easier.
+ */
+
+/*
+ * IRQ's are in fact implemented a bit like signal handlers for the kernel.
+ * Naturally it's not a 1:1 relation, but there are similarities.
+ */
+
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/kernel_stat.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/timex.h>
+#include <linux/malloc.h>
+#include <linux/random.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/bitops.h>
+#include <asm/smp.h>
+
+#define CR0_NE 32
+
+static unsigned char cache_21 = 0xff;
+static unsigned char cache_A1 = 0xff;
+
+#ifdef __SMP_PROF__
+static unsigned int int_count[NR_CPUS][NR_IRQS] = {{0},};
+#endif
+
+static inline void mask_irq(unsigned int irq_nr)
+{
+ unsigned char mask;
+
+ mask = 1 << (irq_nr & 7);
+ if (irq_nr < 8) {
+ cache_21 |= mask;
+ outb(cache_21,0x21);
+ } else {
+ cache_A1 |= mask;
+ outb(cache_A1,0xA1);
+ }
+}
+
+static inline void unmask_irq(unsigned int irq_nr)
+{
+ unsigned char mask;
+
+ mask = ~(1 << (irq_nr & 7));
+ if (irq_nr < 8) {
+ cache_21 &= mask;
+ outb(cache_21,0x21);
+ } else {
+ cache_A1 &= mask;
+ outb(cache_A1,0xA1);
+ }
+}
+
+void disable_irq(unsigned int irq_nr)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ mask_irq(irq_nr);
+ restore_flags(flags);
+}
+
+void enable_irq(unsigned int irq_nr)
+{
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ unmask_irq(irq_nr);
+ restore_flags(flags);
+}
+
+/*
+ * This builds up the IRQ handler stubs using some ugly macros in irq.h
+ *
+ * These macros create the low-level assembly IRQ routines that do all
+ * the operations that are needed to keep the AT interrupt-controller
+ * happy. They are also written to be fast - and to disable interrupts
+ * as little as humanly possible.
+ *
+ * NOTE! These macros expand to three different handlers for each line: one
+ * complete handler that does all the fancy stuff (including signal handling),
+ * and one fast handler that is meant for simple IRQ's that want to be
+ * atomic. The specific handler is chosen depending on the SA_INTERRUPT
+ * flag when installing a handler. Finally, one "bad interrupt" handler, that
+ * is used when no handler is present.
+ *
+ * The timer interrupt is handled specially to insure that the jiffies
+ * variable is updated at all times. Specifically, the timer interrupt is
+ * just like the complete handlers except that it is invoked with interrupts
+ * disabled and should never re-enable them. If other interrupts were
+ * allowed to be processed while the timer interrupt is active, then the
+ * other interrupts would have to avoid using the jiffies variable for delay
+ * and interval timing operations to avoid hanging the system.
+ */
+BUILD_TIMER_IRQ(FIRST,0,0x01)
+BUILD_IRQ(FIRST,1,0x02)
+BUILD_IRQ(FIRST,2,0x04)
+BUILD_IRQ(FIRST,3,0x08)
+BUILD_IRQ(FIRST,4,0x10)
+BUILD_IRQ(FIRST,5,0x20)
+BUILD_IRQ(FIRST,6,0x40)
+BUILD_IRQ(FIRST,7,0x80)
+BUILD_IRQ(SECOND,8,0x01)
+BUILD_IRQ(SECOND,9,0x02)
+BUILD_IRQ(SECOND,10,0x04)
+BUILD_IRQ(SECOND,11,0x08)
+BUILD_IRQ(SECOND,12,0x10)
+#ifdef __SMP__
+BUILD_MSGIRQ(SECOND,13,0x20)
+#else
+BUILD_IRQ(SECOND,13,0x20)
+#endif
+BUILD_IRQ(SECOND,14,0x40)
+BUILD_IRQ(SECOND,15,0x80)
+#ifdef __SMP__
+BUILD_RESCHEDIRQ(16)
+#endif
+
+/*
+ * Pointers to the low-level handlers: first the general ones, then the
+ * fast ones, then the bad ones.
+ */
+static void (*interrupt[17])(void) = {
+ IRQ0_interrupt, IRQ1_interrupt, IRQ2_interrupt, IRQ3_interrupt,
+ IRQ4_interrupt, IRQ5_interrupt, IRQ6_interrupt, IRQ7_interrupt,
+ IRQ8_interrupt, IRQ9_interrupt, IRQ10_interrupt, IRQ11_interrupt,
+ IRQ12_interrupt, IRQ13_interrupt, IRQ14_interrupt, IRQ15_interrupt
+#ifdef __SMP__
+ ,IRQ16_interrupt
+#endif
+};
+
+static void (*fast_interrupt[16])(void) = {
+ fast_IRQ0_interrupt, fast_IRQ1_interrupt,
+ fast_IRQ2_interrupt, fast_IRQ3_interrupt,
+ fast_IRQ4_interrupt, fast_IRQ5_interrupt,
+ fast_IRQ6_interrupt, fast_IRQ7_interrupt,
+ fast_IRQ8_interrupt, fast_IRQ9_interrupt,
+ fast_IRQ10_interrupt, fast_IRQ11_interrupt,
+ fast_IRQ12_interrupt, fast_IRQ13_interrupt,
+ fast_IRQ14_interrupt, fast_IRQ15_interrupt
+};
+
+static void (*bad_interrupt[16])(void) = {
+ bad_IRQ0_interrupt, bad_IRQ1_interrupt,
+ bad_IRQ2_interrupt, bad_IRQ3_interrupt,
+ bad_IRQ4_interrupt, bad_IRQ5_interrupt,
+ bad_IRQ6_interrupt, bad_IRQ7_interrupt,
+ bad_IRQ8_interrupt, bad_IRQ9_interrupt,
+ bad_IRQ10_interrupt, bad_IRQ11_interrupt,
+ bad_IRQ12_interrupt, bad_IRQ13_interrupt,
+ bad_IRQ14_interrupt, bad_IRQ15_interrupt
+};
+
+/*
+ * Initial irq handlers.
+ */
+
+static void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
+
+#ifdef __SMP__
+
+/*
+ * On SMP boards, irq13 is used for interprocessor interrupts (IPI's).
+ */
+static struct irqaction irq13 = { smp_message_irq, SA_INTERRUPT, 0, "IPI", NULL, NULL };
+
+#else
+
+/*
+ * Note that on a 486, we don't want to do a SIGFPE on a irq13
+ * as the irq is unreliable, and exception 16 works correctly
+ * (ie as explained in the intel literature). On a 386, you
+ * can't use exception 16 due to bad IBM design, so we have to
+ * rely on the less exact irq13.
+ *
+ * Careful.. Not only is IRQ13 unreliable, but it is also
+ * leads to races. IBM designers who came up with it should
+ * be shot.
+ */
+
+
+static void math_error_irq(int cpl, void *dev_id, struct pt_regs *regs)
+{
+ outb(0,0xF0);
+ if (ignore_irq13 || !hard_math)
+ return;
+ math_error();
+}
+
+static struct irqaction irq13 = { math_error_irq, 0, 0, "math error", NULL, NULL };
+
+#endif
+
+/*
+ * IRQ2 is cascade interrupt to second interrupt controller
+ */
+static struct irqaction irq2 = { no_action, 0, 0, "cascade", NULL, NULL};
+
+static struct irqaction *irq_action[16] = {
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL
+};
+
+int get_irq_list(char *buf)
+{
+ int i, len = 0;
+ struct irqaction * action;
+
+ for (i = 0 ; i < 16 ; i++) {
+ action = irq_action[i];
+ if (!action)
+ continue;
+ len += sprintf(buf+len, "%2d: %10u %c %s",
+ i, kstat.interrupts[i],
+ (action->flags & SA_INTERRUPT) ? '+' : ' ',
+ action->name);
+ for (action=action->next; action; action = action->next) {
+ len += sprintf(buf+len, ",%s %s",
+ (action->flags & SA_INTERRUPT) ? " +" : "",
+ action->name);
+ }
+ len += sprintf(buf+len, "\n");
+ }
+/*
+ * Linus - should you add NMI counts here ?????
+ */
+#ifdef __SMP_PROF__
+ len+=sprintf(buf+len, "IPI: %8lu received\n",
+ ipi_count);
+#endif
+ return len;
+}
+
+#ifdef __SMP_PROF__
+
+int get_smp_prof_list(char *buf) {
+ int i,j, len = 0;
+ struct irqaction * action;
+ unsigned long sum_spins = 0;
+ unsigned long sum_spins_syscall = 0;
+ unsigned long sum_spins_sys_idle = 0;
+ unsigned long sum_smp_idle_count = 0;
+
+ for (i=0;i<smp_num_cpus;i++) {
+ int cpunum = cpu_logical_map[i];
+ sum_spins+=smp_spins[cpunum];
+ sum_spins_syscall+=smp_spins_syscall[cpunum];
+ sum_spins_sys_idle+=smp_spins_sys_idle[cpunum];
+ sum_smp_idle_count+=smp_idle_count[cpunum];
+ }
+
+ len += sprintf(buf+len,"CPUS: %10i \n", smp_num_cpus);
+ len += sprintf(buf+len," SUM ");
+ for (i=0;i<smp_num_cpus;i++)
+ len += sprintf(buf+len," P%1d ",cpu_logical_map[i]);
+ len += sprintf(buf+len,"\n");
+ for (i = 0 ; i < NR_IRQS ; i++) {
+ action = *(i + irq_action);
+ if (!action || !action->handler)
+ continue;
+ len += sprintf(buf+len, "%3d: %10d ",
+ i, kstat.interrupts[i]);
+ for (j=0;j<smp_num_cpus;j++)
+ len+=sprintf(buf+len, "%10d ",
+ int_count[cpu_logical_map[j]][i]);
+ len += sprintf(buf+len, "%c %s",
+ (action->flags & SA_INTERRUPT) ? '+' : ' ',
+ action->name);
+ for (action=action->next; action; action = action->next) {
+ len += sprintf(buf+len, ",%s %s",
+ (action->flags & SA_INTERRUPT) ? " +" : "",
+ action->name);
+ }
+ len += sprintf(buf+len, "\n");
+ }
+ len+=sprintf(buf+len, "LCK: %10lu",
+ sum_spins);
+
+ for (i=0;i<smp_num_cpus;i++)
+ len+=sprintf(buf+len," %10lu",smp_spins[cpu_logical_map[i]]);
+
+ len +=sprintf(buf+len," spins from int\n");
+
+ len+=sprintf(buf+len, "LCK: %10lu",
+ sum_spins_syscall);
+
+ for (i=0;i<smp_num_cpus;i++)
+ len+=sprintf(buf+len," %10lu",smp_spins_syscall[cpu_logical_map[i]]);
+
+ len +=sprintf(buf+len," spins from syscall\n");
+
+ len+=sprintf(buf+len, "LCK: %10lu",
+ sum_spins_sys_idle);
+
+ for (i=0;i<smp_num_cpus;i++)
+ len+=sprintf(buf+len," %10lu",smp_spins_sys_idle[cpu_logical_map[i]]);
+
+ len +=sprintf(buf+len," spins from sysidle\n");
+ len+=sprintf(buf+len,"IDLE %10lu",sum_smp_idle_count);
+
+ for (i=0;i<smp_num_cpus;i++)
+ len+=sprintf(buf+len," %10lu",smp_idle_count[cpu_logical_map[i]]);
+
+ len +=sprintf(buf+len," idle ticks\n");
+
+ len+=sprintf(buf+len, "IPI: %10lu received\n",
+ ipi_count);
+
+ return len;
+}
+#endif
+
+
+
+/*
+ * do_IRQ handles IRQ's that have been installed without the
+ * SA_INTERRUPT flag: it uses the full signal-handling return
+ * and runs with other interrupts enabled. All relatively slow
+ * IRQ's should use this format: notably the keyboard/timer
+ * routines.
+ */
+asmlinkage void do_IRQ(int irq, struct pt_regs * regs)
+{
+ struct irqaction * action = *(irq + irq_action);
+ int do_random = 0;
+ int c,intm,mask;
+#ifdef IRQ_DEBUG
+ static int count;
+ if (smp_processor_id() != 0 && count++ < 1000)
+ printk("IRQ %d: done by CPU %d\n",irq,smp_processor_id());
+#endif
+ if (irq >= 8) {
+ c = cache_A1;
+ intm = inb(0xA1);
+ mask = 1 << (irq - 8);
+ } else {
+ c = cache_21;
+ intm = inb(0x21);
+ mask = 1 << irq;
+ }
+ if (!(c & mask) || !(intm & mask)) {
+#ifdef IRQ_DEBUG
+ printk("IRQ %d (proc %d):cache_x1=0x%x,INT mask=0x%x\n", irq, smp_processor_id(),c,intm);
+#endif
+ /* better to return because the interrupt may be asserted again,
+ the bad thing is that we may loose some interrupts */
+ return;
+ }
+#ifdef __SMP__
+ if(smp_threads_ready && active_kernel_processor!=smp_processor_id())
+ panic("IRQ %d: active processor set wrongly(%d not %d).\n", irq, active_kernel_processor, smp_processor_id());
+#endif
+
+ kstat.interrupts[irq]++;
+#ifdef __SMP_PROF__
+ int_count[smp_processor_id()][irq]++;
+#endif
+ while (action) {
+ do_random |= action->flags;
+ action->handler(irq, action->dev_id, regs);
+ action = action->next;
+ }
+ if (do_random & SA_SAMPLE_RANDOM)
+ add_interrupt_randomness(irq);
+}
+
+/*
+ * do_fast_IRQ handles IRQ's that don't need the fancy interrupt return
+ * stuff - the handler is also running with interrupts disabled unless
+ * it explicitly enables them later.
+ */
+asmlinkage void do_fast_IRQ(int irq)
+{
+ struct irqaction * action = *(irq + irq_action);
+ int do_random = 0;
+
+#ifdef __SMP__
+ /* IRQ 13 is allowed - that's a flush tlb */
+ if(smp_threads_ready && active_kernel_processor!=smp_processor_id() && irq!=13)
+ panic("fast_IRQ %d: active processor set wrongly(%d not %d).\n", irq, active_kernel_processor, smp_processor_id());
+#endif
+
+ kstat.interrupts[irq]++;
+#ifdef __SMP_PROF__
+ int_count[smp_processor_id()][irq]++;
+#endif
+ while (action) {
+ do_random |= action->flags;
+ action->handler(irq, action->dev_id, NULL);
+ action = action->next;
+ }
+ if (do_random & SA_SAMPLE_RANDOM)
+ add_interrupt_randomness(irq);
+}
+
+int setup_x86_irq(int irq, struct irqaction * new)
+{
+ int shared = 0;
+ struct irqaction *old, **p;
+ unsigned long flags;
+
+ p = irq_action + irq;
+ if ((old = *p) != NULL) {
+ /* Can't share interrupts unless both agree to */
+ if (!(old->flags & new->flags & SA_SHIRQ))
+ return -EBUSY;
+
+ /* Can't share interrupts unless both are same type */
+ if ((old->flags ^ new->flags) & SA_INTERRUPT)
+ return -EBUSY;
+
+ /* add new interrupt at end of irq queue */
+ do {
+ p = &old->next;
+ old = *p;
+ } while (old);
+ shared = 1;
+ }
+
+ if (new->flags & SA_SAMPLE_RANDOM)
+ rand_initialize_irq(irq);
+
+ save_flags(flags);
+ cli();
+ *p = new;
+
+ if (!shared) {
+ if (new->flags & SA_INTERRUPT)
+ set_intr_gate(0x20+irq,fast_interrupt[irq]);
+ else
+ set_intr_gate(0x20+irq,interrupt[irq]);
+ unmask_irq(irq);
+ }
+ restore_flags(flags);
+ return 0;
+}
+
+int request_irq(unsigned int irq,
+ void (*handler)(int, void *, struct pt_regs *),
+ unsigned long irqflags,
+ const char * devname,
+ void *dev_id)
+{
+ int retval;
+ struct irqaction * action;
+
+ if (irq > 15)
+ return -EINVAL;
+ if (!handler)
+ return -EINVAL;
+
+ action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL);
+ if (!action)
+ return -ENOMEM;
+
+ action->handler = handler;
+ action->flags = irqflags;
+ action->mask = 0;
+ action->name = devname;
+ action->next = NULL;
+ action->dev_id = dev_id;
+
+ retval = setup_x86_irq(irq, action);
+
+ if (retval)
+ kfree(action);
+ return retval;
+}
+
+void free_irq(unsigned int irq, void *dev_id)
+{
+ struct irqaction * action, **p;
+ unsigned long flags;
+
+ if (irq > 15) {
+ printk("Trying to free IRQ%d\n",irq);
+ return;
+ }
+ for (p = irq + irq_action; (action = *p) != NULL; p = &action->next) {
+ if (action->dev_id != dev_id)
+ continue;
+
+ /* Found it - now free it */
+ save_flags(flags);
+ cli();
+ *p = action->next;
+ if (!irq[irq_action]) {
+ mask_irq(irq);
+ set_intr_gate(0x20+irq,bad_interrupt[irq]);
+ }
+ restore_flags(flags);
+ kfree(action);
+ return;
+ }
+ printk("Trying to free free IRQ%d\n",irq);
+}
+
+unsigned long probe_irq_on (void)
+{
+ unsigned int i, irqs = 0, irqmask;
+ unsigned long delay;
+
+ /* first, enable any unassigned irqs */
+ for (i = 15; i > 0; i--) {
+ if (!irq_action[i]) {
+ enable_irq(i);
+ irqs |= (1 << i);
+ }
+ }
+
+ /* wait for spurious interrupts to mask themselves out again */
+ for (delay = jiffies + HZ/10; delay > jiffies; )
+ /* about 100ms delay */;
+
+ /* now filter out any obviously spurious interrupts */
+ irqmask = (((unsigned int)cache_A1)<<8) | (unsigned int)cache_21;
+ return irqs & ~irqmask;
+}
+
+int probe_irq_off (unsigned long irqs)
+{
+ unsigned int i, irqmask;
+
+ irqmask = (((unsigned int)cache_A1)<<8) | (unsigned int)cache_21;
+#ifdef DEBUG
+ printk("probe_irq_off: irqs=0x%04lx irqmask=0x%04x\n", irqs, irqmask);
+#endif
+ irqs &= irqmask;
+ if (!irqs)
+ return 0;
+ i = ffz(~irqs);
+ if (irqs != (irqs & (1 << i)))
+ i = -i;
+ return i;
+}
+
+void init_IRQ(void)
+{
+ int i;
+ static unsigned char smptrap=0;
+ if(smptrap)
+ return;
+ smptrap=1;
+
+ /* set the clock to 100 Hz */
+ outb_p(0x34,0x43); /* binary, mode 2, LSB/MSB, ch 0 */
+ outb_p(LATCH & 0xff , 0x40); /* LSB */
+ outb(LATCH >> 8 , 0x40); /* MSB */
+ for (i = 0; i < 16 ; i++)
+ set_intr_gate(0x20+i,bad_interrupt[i]);
+ /* This bit is a hack because we don't send timer messages to all processors yet */
+ /* It has to be here .. it doesn't work if you put it down the bottom - assembler explodes 8) */
+#ifdef __SMP__
+ set_intr_gate(0x20+i, interrupt[i]); /* IRQ '16' - IPI for rescheduling */
+#endif
+ request_region(0x20,0x20,"pic1");
+ request_region(0xa0,0x20,"pic2");
+ setup_x86_irq(2, &irq2);
+ setup_x86_irq(13, &irq13);
+}
diff --git a/linux/src/arch/i386/lib/delay.c b/linux/src/arch/i386/lib/delay.c
new file mode 100644
index 0000000..04ccf16
--- /dev/null
+++ b/linux/src/arch/i386/lib/delay.c
@@ -0,0 +1,45 @@
+/*
+ * Precise Delay Loops for i386
+ *
+ * Copyright (C) 1993 Linus Torvalds
+ * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+ *
+ * The __delay function must _NOT_ be inlined as its execution time
+ * depends wildly on alignment on many x86 processors. The additional
+ * jump magic is needed to get the timing stable on all the CPU's
+ * we have to worry about.
+ */
+
+#include <linux/sched.h>
+#include <linux/delay.h>
+
+#ifdef __SMP__
+#include <asm/smp.h>
+#endif
+
+void __delay(unsigned long loops)
+{
+ int d0;
+ __asm__ __volatile__(
+ "\tjmp 1f\n"
+ ".align 16\n"
+ "1:\tjmp 2f\n"
+ ".align 16\n"
+ "2:\tdecl %0\n\tjns 2b"
+ :"=&a" (d0)
+ :"0" (loops));
+}
+
+inline void __const_udelay(unsigned long xloops)
+{
+ int d0;
+ __asm__("mull %0"
+ :"=d" (xloops), "=&a" (d0)
+ :"1" (xloops),"0" (loops_per_sec));
+ __delay(xloops);
+}
+
+void __udelay(unsigned long usecs)
+{
+ __const_udelay(usecs * 0x000010c6); /* 2**32 / 1000000 */
+}
diff --git a/linux/src/arch/i386/lib/semaphore.S b/linux/src/arch/i386/lib/semaphore.S
new file mode 100644
index 0000000..e09655c
--- /dev/null
+++ b/linux/src/arch/i386/lib/semaphore.S
@@ -0,0 +1,35 @@
+/*
+ * linux/arch/i386/lib/semaphore.S
+ *
+ * Copyright (C) 1996 Linus Torvalds
+ */
+
+#include <linux/linkage.h>
+
+/*
+ * "down_failed" is called with the eventual return address
+ * in %eax, and the address of the semaphore in %ecx. We need
+ * to increment the number of waiters on the semaphore,
+ * call "__down()", and then eventually return to try again.
+ */
+ENTRY(down_failed)
+ pushl %eax
+ pushl %ecx
+ call SYMBOL_NAME(__down)
+ popl %ecx
+ ret
+
+ENTRY(up_wakeup)
+ pushl %eax
+ pushl %ecx
+ call SYMBOL_NAME(__up)
+ popl %ecx
+ ret
+
+ENTRY(down_failed_interruptible)
+ pushl %eax
+ pushl %ecx
+ call SYMBOL_NAME(__down_interruptible)
+ popl %ecx
+ ret
+
diff --git a/linux/src/drivers/block/cmd640.c b/linux/src/drivers/block/cmd640.c
new file mode 100644
index 0000000..b8132dc
--- /dev/null
+++ b/linux/src/drivers/block/cmd640.c
@@ -0,0 +1,850 @@
+/*
+ * linux/drivers/block/cmd640.c Version 1.02 Sep 01, 1996
+ *
+ * Copyright (C) 1995-1996 Linus Torvalds & authors (see below)
+ */
+
+/*
+ * Original author: abramov@cecmow.enet.dec.com (Igor Abramov)
+ *
+ * Maintained by: mlord@pobox.com (Mark Lord)
+ * with fanatical support from a legion of hackers!
+ *
+ * This file provides support for the advanced features and bugs
+ * of IDE interfaces using the CMD Technologies 0640 IDE interface chip.
+ *
+ * These chips are basically fucked by design, and getting this driver
+ * to work on every motherboard design that uses this screwed chip seems
+ * bloody well impossible. However, we're still trying.
+ *
+ * Version 0.97 worked for everybody.
+ *
+ * User feedback is essential. Many thanks to the beta test team:
+ *
+ * A.Hartgers@stud.tue.nl, JZDQC@CUNYVM.CUNY.edu, abramov@cecmow.enet.dec.com,
+ * bardj@utopia.ppp.sn.no, bart@gaga.tue.nl, bbol001@cs.auckland.ac.nz,
+ * chrisc@dbass.demon.co.uk, dalecki@namu26.Num.Math.Uni-Goettingen.de,
+ * derekn@vw.ece.cmu.edu, florian@btp2x3.phy.uni-bayreuth.de,
+ * flynn@dei.unipd.it, gadio@netvision.net.il, godzilla@futuris.net,
+ * j@pobox.com, jkemp1@mises.uni-paderborn.de, jtoppe@hiwaay.net,
+ * kerouac@ssnet.com, meskes@informatik.rwth-aachen.de, hzoli@cs.elte.hu,
+ * peter@udgaard.isgtec.com, phil@tazenda.demon.co.uk, roadcapw@cfw.com,
+ * s0033las@sun10.vsz.bme.hu, schaffer@tam.cornell.edu, sjd@slip.net,
+ * steve@ei.org, ulrpeg@bigcomm.gun.de, ism@tardis.ed.ac.uk, mack@cray.com
+ * liug@mama.indstate.edu, and others.
+ *
+ * Version 0.01 Initial version, hacked out of ide.c,
+ * and #include'd rather than compiled separately.
+ * This will get cleaned up in a subsequent release.
+ *
+ * Version 0.02 Fixes for vlb initialization code, enable prefetch
+ * for versions 'B' and 'C' of chip by default,
+ * some code cleanup.
+ *
+ * Version 0.03 Added reset of secondary interface,
+ * and black list for devices which are not compatible
+ * with prefetch mode. Separate function for setting
+ * prefetch is added, possibly it will be called some
+ * day from ioctl processing code.
+ *
+ * Version 0.04 Now configs/compiles separate from ide.c
+ *
+ * Version 0.05 Major rewrite of interface timing code.
+ * Added new function cmd640_set_mode to set PIO mode
+ * from ioctl call. New drives added to black list.
+ *
+ * Version 0.06 More code cleanup. Prefetch is enabled only for
+ * detected hard drives, not included in prefetch
+ * black list.
+ *
+ * Version 0.07 Changed to more conservative drive tuning policy.
+ * Unknown drives, which report PIO < 4 are set to
+ * (reported_PIO - 1) if it is supported, or to PIO0.
+ * List of known drives extended by info provided by
+ * CMD at their ftp site.
+ *
+ * Version 0.08 Added autotune/noautotune support.
+ *
+ * Version 0.09 Try to be smarter about 2nd port enabling.
+ * Version 0.10 Be nice and don't reset 2nd port.
+ * Version 0.11 Try to handle more wierd situations.
+ *
+ * Version 0.12 Lots of bug fixes from Laszlo Peter
+ * irq unmasking disabled for reliability.
+ * try to be even smarter about the second port.
+ * tidy up source code formatting.
+ * Version 0.13 permit irq unmasking again.
+ * Version 0.90 massive code cleanup, some bugs fixed.
+ * defaults all drives to PIO mode0, prefetch off.
+ * autotune is OFF by default, with compile time flag.
+ * prefetch can be turned OFF/ON using "hdparm -p8/-p9"
+ * (requires hdparm-3.1 or newer)
+ * Version 0.91 first release to linux-kernel list.
+ * Version 0.92 move initial reg dump to separate callable function
+ * change "readahead" to "prefetch" to avoid confusion
+ * Version 0.95 respect original BIOS timings unless autotuning.
+ * tons of code cleanup and rearrangement.
+ * added CONFIG_BLK_DEV_CMD640_ENHANCED option
+ * prevent use of unmask when prefetch is on
+ * Version 0.96 prevent use of io_32bit when prefetch is off
+ * Version 0.97 fix VLB secondary interface for sjd@slip.net
+ * other minor tune-ups: 0.96 was very good.
+ * Version 0.98 ignore PCI version when disabled by BIOS
+ * Version 0.99 display setup/active/recovery clocks with PIO mode
+ * Version 1.00 Mmm.. cannot depend on PCMD_ENA in all systems
+ * Version 1.01 slow/fast devsel can be selected with "hdparm -p6/-p7"
+ * ("fast" is necessary for 32bit I/O in some systems)
+ * Version 1.02 fix bug that resulted in slow "setup times"
+ * (patch courtesy of Zoltan Hidvegi)
+ */
+
+#undef REALLY_SLOW_IO /* most systems can safely undef this */
+#define CMD640_PREFETCH_MASKS 1
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/blkdev.h>
+#include <linux/hdreg.h>
+#include <asm/io.h>
+#include "ide.h"
+#include "ide_modes.h"
+
+/*
+ * This flag is set in ide.c by the parameter: ide0=cmd640_vlb
+ */
+int cmd640_vlb = 0;
+
+/*
+ * CMD640 specific registers definition.
+ */
+
+#define VID 0x00
+#define DID 0x02
+#define PCMD 0x04
+#define PCMD_ENA 0x01
+#define PSTTS 0x06
+#define REVID 0x08
+#define PROGIF 0x09
+#define SUBCL 0x0a
+#define BASCL 0x0b
+#define BaseA0 0x10
+#define BaseA1 0x14
+#define BaseA2 0x18
+#define BaseA3 0x1c
+#define INTLINE 0x3c
+#define INPINE 0x3d
+
+#define CFR 0x50
+#define CFR_DEVREV 0x03
+#define CFR_IDE01INTR 0x04
+#define CFR_DEVID 0x18
+#define CFR_AT_VESA_078h 0x20
+#define CFR_DSA1 0x40
+#define CFR_DSA0 0x80
+
+#define CNTRL 0x51
+#define CNTRL_DIS_RA0 0x40
+#define CNTRL_DIS_RA1 0x80
+#define CNTRL_ENA_2ND 0x08
+
+#define CMDTIM 0x52
+#define ARTTIM0 0x53
+#define DRWTIM0 0x54
+#define ARTTIM1 0x55
+#define DRWTIM1 0x56
+#define ARTTIM23 0x57
+#define ARTTIM23_DIS_RA2 0x04
+#define ARTTIM23_DIS_RA3 0x08
+#define DRWTIM23 0x58
+#define BRST 0x59
+
+/*
+ * Registers and masks for easy access by drive index:
+ */
+static byte prefetch_regs[4] = {CNTRL, CNTRL, ARTTIM23, ARTTIM23};
+static byte prefetch_masks[4] = {CNTRL_DIS_RA0, CNTRL_DIS_RA1, ARTTIM23_DIS_RA2, ARTTIM23_DIS_RA3};
+
+#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
+
+static byte arttim_regs[4] = {ARTTIM0, ARTTIM1, ARTTIM23, ARTTIM23};
+static byte drwtim_regs[4] = {DRWTIM0, DRWTIM1, DRWTIM23, DRWTIM23};
+
+/*
+ * Current cmd640 timing values for each drive.
+ * The defaults for each are the slowest possible timings.
+ */
+static byte setup_counts[4] = {4, 4, 4, 4}; /* Address setup count (in clocks) */
+static byte active_counts[4] = {16, 16, 16, 16}; /* Active count (encoded) */
+static byte recovery_counts[4] = {16, 16, 16, 16}; /* Recovery count (encoded) */
+
+#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
+
+/*
+ * These are initialized to point at the devices we control
+ */
+static ide_hwif_t *cmd_hwif0, *cmd_hwif1;
+static ide_drive_t *cmd_drives[4];
+
+/*
+ * Interface to access cmd640x registers
+ */
+static unsigned int cmd640_key;
+static void (*put_cmd640_reg)(unsigned short reg, byte val);
+static byte (*get_cmd640_reg)(unsigned short reg);
+
+/*
+ * This is read from the CFR reg, and is used in several places.
+ */
+static unsigned int cmd640_chip_version;
+
+/*
+ * The CMD640x chip does not support DWORD config write cycles, but some
+ * of the BIOSes use them to implement the config services.
+ * Therefore, we must use direct IO instead.
+ */
+
+/* PCI method 1 access */
+
+static void put_cmd640_reg_pci1 (unsigned short reg, byte val)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ outl_p((reg & 0xfc) | cmd640_key, 0xcf8);
+ outb_p(val, (reg & 3) | 0xcfc);
+ restore_flags(flags);
+}
+
+static byte get_cmd640_reg_pci1 (unsigned short reg)
+{
+ byte b;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ outl_p((reg & 0xfc) | cmd640_key, 0xcf8);
+ b = inb_p((reg & 3) | 0xcfc);
+ restore_flags(flags);
+ return b;
+}
+
+/* PCI method 2 access (from CMD datasheet) */
+
+static void put_cmd640_reg_pci2 (unsigned short reg, byte val)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ outb_p(0x10, 0xcf8);
+ outb_p(val, cmd640_key + reg);
+ outb_p(0, 0xcf8);
+ restore_flags(flags);
+}
+
+static byte get_cmd640_reg_pci2 (unsigned short reg)
+{
+ byte b;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ outb_p(0x10, 0xcf8);
+ b = inb_p(cmd640_key + reg);
+ outb_p(0, 0xcf8);
+ restore_flags(flags);
+ return b;
+}
+
+/* VLB access */
+
+static void put_cmd640_reg_vlb (unsigned short reg, byte val)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ outb_p(reg, cmd640_key);
+ outb_p(val, cmd640_key + 4);
+ restore_flags(flags);
+}
+
+static byte get_cmd640_reg_vlb (unsigned short reg)
+{
+ byte b;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ outb_p(reg, cmd640_key);
+ b = inb_p(cmd640_key + 4);
+ restore_flags(flags);
+ return b;
+}
+
+static int match_pci_cmd640_device (void)
+{
+ const byte ven_dev[4] = {0x95, 0x10, 0x40, 0x06};
+ unsigned int i;
+ for (i = 0; i < 4; i++) {
+ if (get_cmd640_reg(i) != ven_dev[i])
+ return 0;
+ }
+#ifdef STUPIDLY_TRUST_BROKEN_PCMD_ENA_BIT
+ if ((get_cmd640_reg(PCMD) & PCMD_ENA) == 0) {
+ printk("ide: cmd640 on PCI disabled by BIOS\n");
+ return 0;
+ }
+#endif /* STUPIDLY_TRUST_BROKEN_PCMD_ENA_BIT */
+ return 1; /* success */
+}
+
+/*
+ * Probe for CMD640x -- pci method 1
+ */
+static int probe_for_cmd640_pci1 (void)
+{
+ get_cmd640_reg = get_cmd640_reg_pci1;
+ put_cmd640_reg = put_cmd640_reg_pci1;
+ for (cmd640_key = 0x80000000; cmd640_key <= 0x8000f800; cmd640_key += 0x800) {
+ if (match_pci_cmd640_device())
+ return 1; /* success */
+ }
+ return 0;
+}
+
+/*
+ * Probe for CMD640x -- pci method 2
+ */
+static int probe_for_cmd640_pci2 (void)
+{
+ get_cmd640_reg = get_cmd640_reg_pci2;
+ put_cmd640_reg = put_cmd640_reg_pci2;
+ for (cmd640_key = 0xc000; cmd640_key <= 0xcf00; cmd640_key += 0x100) {
+ if (match_pci_cmd640_device())
+ return 1; /* success */
+ }
+ return 0;
+}
+
+/*
+ * Probe for CMD640x -- vlb
+ */
+static int probe_for_cmd640_vlb (void)
+{
+ byte b;
+
+ get_cmd640_reg = get_cmd640_reg_vlb;
+ put_cmd640_reg = put_cmd640_reg_vlb;
+ cmd640_key = 0x178;
+ b = get_cmd640_reg(CFR);
+ if (b == 0xff || b == 0x00 || (b & CFR_AT_VESA_078h)) {
+ cmd640_key = 0x78;
+ b = get_cmd640_reg(CFR);
+ if (b == 0xff || b == 0x00 || !(b & CFR_AT_VESA_078h))
+ return 0;
+ }
+ return 1; /* success */
+}
+
+/*
+ * Returns 1 if an IDE interface/drive exists at 0x170,
+ * Returns 0 otherwise.
+ */
+static int secondary_port_responding (void)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+
+ outb_p(0x0a, 0x170 + IDE_SELECT_OFFSET); /* select drive0 */
+ udelay(100);
+ if ((inb_p(0x170 + IDE_SELECT_OFFSET) & 0x1f) != 0x0a) {
+ outb_p(0x1a, 0x170 + IDE_SELECT_OFFSET); /* select drive1 */
+ udelay(100);
+ if ((inb_p(0x170 + IDE_SELECT_OFFSET) & 0x1f) != 0x1a) {
+ restore_flags(flags);
+ return 0; /* nothing responded */
+ }
+ }
+ restore_flags(flags);
+ return 1; /* success */
+}
+
+#ifdef CMD640_DUMP_REGS
+/*
+ * Dump out all cmd640 registers. May be called from ide.c
+ */
+void cmd640_dump_regs (void)
+{
+ unsigned int reg = cmd640_vlb ? 0x50 : 0x00;
+
+ /* Dump current state of chip registers */
+ printk("ide: cmd640 internal register dump:");
+ for (; reg <= 0x59; reg++) {
+ if (!(reg & 0x0f))
+ printk("\n%04x:", reg);
+ printk(" %02x", get_cmd640_reg(reg));
+ }
+ printk("\n");
+}
+#endif
+
+/*
+ * Check whether prefetch is on for a drive,
+ * and initialize the unmask flags for safe operation.
+ */
+static void check_prefetch (unsigned int index)
+{
+ ide_drive_t *drive = cmd_drives[index];
+ byte b = get_cmd640_reg(prefetch_regs[index]);
+
+ if (b & prefetch_masks[index]) { /* is prefetch off? */
+ drive->no_unmask = 0;
+ drive->no_io_32bit = 1;
+ drive->io_32bit = 0;
+ } else {
+#if CMD640_PREFETCH_MASKS
+ drive->no_unmask = 1;
+ drive->unmask = 0;
+#endif
+ drive->no_io_32bit = 0;
+ }
+}
+
+/*
+ * Figure out which devices we control
+ */
+static void setup_device_ptrs (void)
+{
+ unsigned int i;
+
+ cmd_hwif0 = &ide_hwifs[0]; /* default, if not found below */
+ cmd_hwif1 = &ide_hwifs[1]; /* default, if not found below */
+ for (i = 0; i < MAX_HWIFS; i++) {
+ ide_hwif_t *hwif = &ide_hwifs[i];
+ if (hwif->chipset == ide_unknown || hwif->chipset == ide_generic) {
+ if (hwif->io_base == 0x1f0)
+ cmd_hwif0 = hwif;
+ else if (hwif->io_base == 0x170)
+ cmd_hwif1 = hwif;
+ }
+ }
+ cmd_drives[0] = &cmd_hwif0->drives[0];
+ cmd_drives[1] = &cmd_hwif0->drives[1];
+ cmd_drives[2] = &cmd_hwif1->drives[0];
+ cmd_drives[3] = &cmd_hwif1->drives[1];
+}
+
+#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
+
+/*
+ * Sets prefetch mode for a drive.
+ */
+static void set_prefetch_mode (unsigned int index, int mode)
+{
+ ide_drive_t *drive = cmd_drives[index];
+ int reg = prefetch_regs[index];
+ byte b;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ b = get_cmd640_reg(reg);
+ if (mode) { /* want prefetch on? */
+#if CMD640_PREFETCH_MASKS
+ drive->no_unmask = 1;
+ drive->unmask = 0;
+#endif
+ drive->no_io_32bit = 0;
+ b &= ~prefetch_masks[index]; /* enable prefetch */
+ } else {
+ drive->no_unmask = 0;
+ drive->no_io_32bit = 1;
+ drive->io_32bit = 0;
+ b |= prefetch_masks[index]; /* disable prefetch */
+ }
+ put_cmd640_reg(reg, b);
+ restore_flags(flags);
+}
+
+/*
+ * Dump out current drive clocks settings
+ */
+static void display_clocks (unsigned int index)
+{
+ byte active_count, recovery_count;
+
+ active_count = active_counts[index];
+ if (active_count == 1)
+ ++active_count;
+ recovery_count = recovery_counts[index];
+ if (active_count > 3 && recovery_count == 1)
+ ++recovery_count;
+ if (cmd640_chip_version > 1)
+ recovery_count += 1; /* cmd640b uses (count + 1)*/
+ printk(", clocks=%d/%d/%d\n", setup_counts[index], active_count, recovery_count);
+}
+
+/*
+ * Pack active and recovery counts into single byte representation
+ * used by controller
+ */
+inline static byte pack_nibbles (byte upper, byte lower)
+{
+ return ((upper & 0x0f) << 4) | (lower & 0x0f);
+}
+
+/*
+ * This routine retrieves the initial drive timings from the chipset.
+ */
+static void retrieve_drive_counts (unsigned int index)
+{
+ byte b;
+
+ /*
+ * Get the internal setup timing, and convert to clock count
+ */
+ b = get_cmd640_reg(arttim_regs[index]) & ~0x3f;
+ switch (b) {
+ case 0x00: b = 4; break;
+ case 0x80: b = 3; break;
+ case 0x40: b = 2; break;
+ default: b = 5; break;
+ }
+ setup_counts[index] = b;
+
+ /*
+ * Get the active/recovery counts
+ */
+ b = get_cmd640_reg(drwtim_regs[index]);
+ active_counts[index] = (b >> 4) ? (b >> 4) : 0x10;
+ recovery_counts[index] = (b & 0x0f) ? (b & 0x0f) : 0x10;
+}
+
+
+/*
+ * This routine writes the prepared setup/active/recovery counts
+ * for a drive into the cmd640 chipset registers to active them.
+ */
+static void program_drive_counts (unsigned int index)
+{
+ unsigned long flags;
+ byte setup_count = setup_counts[index];
+ byte active_count = active_counts[index];
+ byte recovery_count = recovery_counts[index];
+
+ /*
+ * Set up address setup count and drive read/write timing registers.
+ * Primary interface has individual count/timing registers for
+ * each drive. Secondary interface has one common set of registers,
+ * so we merge the timings, using the slowest value for each timing.
+ */
+ if (index > 1) {
+ unsigned int mate;
+ if (cmd_drives[mate = index ^ 1]->present) {
+ if (setup_count < setup_counts[mate])
+ setup_count = setup_counts[mate];
+ if (active_count < active_counts[mate])
+ active_count = active_counts[mate];
+ if (recovery_count < recovery_counts[mate])
+ recovery_count = recovery_counts[mate];
+ }
+ }
+
+ /*
+ * Convert setup_count to internal chipset representation
+ */
+ switch (setup_count) {
+ case 4: setup_count = 0x00; break;
+ case 3: setup_count = 0x80; break;
+ case 1:
+ case 2: setup_count = 0x40; break;
+ default: setup_count = 0xc0; /* case 5 */
+ }
+
+ /*
+ * Now that everything is ready, program the new timings
+ */
+ save_flags (flags);
+ cli();
+ /*
+ * Program the address_setup clocks into ARTTIM reg,
+ * and then the active/recovery counts into the DRWTIM reg
+ * (this converts counts of 16 into counts of zero -- okay).
+ */
+ setup_count |= get_cmd640_reg(arttim_regs[index]) & 0x3f;
+ put_cmd640_reg(arttim_regs[index], setup_count);
+ put_cmd640_reg(drwtim_regs[index], pack_nibbles(active_count, recovery_count));
+ restore_flags(flags);
+}
+
+/*
+ * Set a specific pio_mode for a drive
+ */
+static void cmd640_set_mode (unsigned int index, byte pio_mode, unsigned int cycle_time)
+{
+ int setup_time, active_time, recovery_time, clock_time;
+ byte setup_count, active_count, recovery_count, recovery_count2, cycle_count;
+ int bus_speed = ide_system_bus_speed();
+
+ if (pio_mode > 5)
+ pio_mode = 5;
+ setup_time = ide_pio_timings[pio_mode].setup_time;
+ active_time = ide_pio_timings[pio_mode].active_time;
+ recovery_time = cycle_time - (setup_time + active_time);
+ clock_time = 1000 / bus_speed;
+ cycle_count = (cycle_time + clock_time - 1) / clock_time;
+
+ setup_count = (setup_time + clock_time - 1) / clock_time;
+
+ active_count = (active_time + clock_time - 1) / clock_time;
+ if (active_count < 2)
+ active_count = 2; /* minimum allowed by cmd640 */
+
+ recovery_count = (recovery_time + clock_time - 1) / clock_time;
+ recovery_count2 = cycle_count - (setup_count + active_count);
+ if (recovery_count2 > recovery_count)
+ recovery_count = recovery_count2;
+ if (recovery_count < 2)
+ recovery_count = 2; /* minimum allowed by cmd640 */
+ if (recovery_count > 17) {
+ active_count += recovery_count - 17;
+ recovery_count = 17;
+ }
+ if (active_count > 16)
+ active_count = 16; /* maximum allowed by cmd640 */
+ if (cmd640_chip_version > 1)
+ recovery_count -= 1; /* cmd640b uses (count + 1)*/
+ if (recovery_count > 16)
+ recovery_count = 16; /* maximum allowed by cmd640 */
+
+ setup_counts[index] = setup_count;
+ active_counts[index] = active_count;
+ recovery_counts[index] = recovery_count;
+
+ /*
+ * In a perfect world, we might set the drive pio mode here
+ * (using WIN_SETFEATURE) before continuing.
+ *
+ * But we do not, because:
+ * 1) this is the wrong place to do it (proper is do_special() in ide.c)
+ * 2) in practice this is rarely, if ever, necessary
+ */
+ program_drive_counts (index);
+}
+
+/*
+ * Drive PIO mode selection:
+ */
+static void cmd640_tune_drive (ide_drive_t *drive, byte mode_wanted)
+{
+ byte b;
+ ide_pio_data_t d;
+ unsigned int index = 0;
+
+ while (drive != cmd_drives[index]) {
+ if (++index > 3) {
+ printk("%s: bad news in cmd640_tune_drive\n", drive->name);
+ return;
+ }
+ }
+ switch (mode_wanted) {
+ case 6: /* set fast-devsel off */
+ case 7: /* set fast-devsel on */
+ mode_wanted &= 1;
+ b = get_cmd640_reg(CNTRL) & ~0x27;
+ if (mode_wanted)
+ b |= 0x27;
+ put_cmd640_reg(CNTRL, b);
+ printk("%s: %sabled cmd640 fast host timing (devsel)\n", drive->name, mode_wanted ? "en" : "dis");
+ return;
+
+ case 8: /* set prefetch off */
+ case 9: /* set prefetch on */
+ mode_wanted &= 1;
+ set_prefetch_mode(index, mode_wanted);
+ printk("%s: %sabled cmd640 prefetch\n", drive->name, mode_wanted ? "en" : "dis");
+ return;
+ }
+
+ (void) ide_get_best_pio_mode (drive, mode_wanted, 5, &d);
+ cmd640_set_mode (index, d.pio_mode, d.cycle_time);
+
+ printk ("%s: selected cmd640 PIO mode%d (%dns) %s/IORDY%s",
+ drive->name,
+ d.pio_mode,
+ d.cycle_time,
+ d.use_iordy ? "w" : "wo",
+ d.overridden ? " (overriding vendor mode)" : "");
+ display_clocks(index);
+}
+
+#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
+
+/*
+ * Probe for a cmd640 chipset, and initialize it if found. Called from ide.c
+ */
+int ide_probe_for_cmd640x (void)
+{
+#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
+ int second_port_toggled = 0;
+#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
+ int second_port_cmd640 = 0;
+ const char *bus_type, *port2;
+ unsigned int index;
+ byte b, cfr;
+
+ if (cmd640_vlb && probe_for_cmd640_vlb()) {
+ bus_type = "VLB";
+ } else {
+ cmd640_vlb = 0;
+ if (probe_for_cmd640_pci1())
+ bus_type = "PCI (type1)";
+ else if (probe_for_cmd640_pci2())
+ bus_type = "PCI (type2)";
+ else
+ return 0;
+ }
+ /*
+ * Undocumented magic (there is no 0x5b reg in specs)
+ */
+ put_cmd640_reg(0x5b, 0xbd);
+ if (get_cmd640_reg(0x5b) != 0xbd) {
+ printk("ide: cmd640 init failed: wrong value in reg 0x5b\n");
+ return 0;
+ }
+ put_cmd640_reg(0x5b, 0);
+
+#ifdef CMD640_DUMP_REGS
+ CMD640_DUMP_REGS;
+#endif
+
+ /*
+ * Documented magic begins here
+ */
+ cfr = get_cmd640_reg(CFR);
+ cmd640_chip_version = cfr & CFR_DEVREV;
+ if (cmd640_chip_version == 0) {
+ printk ("ide: bad cmd640 revision: %d\n", cmd640_chip_version);
+ return 0;
+ }
+
+ /*
+ * Initialize data for primary port
+ */
+ setup_device_ptrs ();
+ printk("%s: buggy cmd640%c interface on %s, config=0x%02x\n",
+ cmd_hwif0->name, 'a' + cmd640_chip_version - 1, bus_type, cfr);
+ cmd_hwif0->chipset = ide_cmd640;
+#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
+ cmd_hwif0->tuneproc = &cmd640_tune_drive;
+#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
+
+ /*
+ * Ensure compatibility by always using the slowest timings
+ * for access to the drive's command register block,
+ * and reset the prefetch burstsize to default (512 bytes).
+ *
+ * Maybe we need a way to NOT do these on *some* systems?
+ */
+ put_cmd640_reg(CMDTIM, 0);
+ put_cmd640_reg(BRST, 0x40);
+
+ /*
+ * Try to enable the secondary interface, if not already enabled
+ */
+ if (cmd_hwif1->noprobe) {
+ port2 = "not probed";
+ } else {
+ b = get_cmd640_reg(CNTRL);
+ if (secondary_port_responding()) {
+ if ((b & CNTRL_ENA_2ND)) {
+ second_port_cmd640 = 1;
+ port2 = "okay";
+ } else if (cmd640_vlb) {
+ second_port_cmd640 = 1;
+ port2 = "alive";
+ } else
+ port2 = "not cmd640";
+ } else {
+ put_cmd640_reg(CNTRL, b ^ CNTRL_ENA_2ND); /* toggle the bit */
+ if (secondary_port_responding()) {
+ second_port_cmd640 = 1;
+#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
+ second_port_toggled = 1;
+#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
+ port2 = "enabled";
+ } else {
+ put_cmd640_reg(CNTRL, b); /* restore original setting */
+ port2 = "not responding";
+ }
+ }
+ }
+
+ /*
+ * Initialize data for secondary cmd640 port, if enabled
+ */
+ if (second_port_cmd640) {
+ cmd_hwif0->serialized = 1;
+ cmd_hwif1->serialized = 1;
+ cmd_hwif1->chipset = ide_cmd640;
+#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
+ cmd_hwif1->tuneproc = &cmd640_tune_drive;
+#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
+ }
+ printk("%s: %sserialized, secondary interface %s\n", cmd_hwif1->name,
+ cmd_hwif0->serialized ? "" : "not ", port2);
+
+ /*
+ * Establish initial timings/prefetch for all drives.
+ * Do not unnecessarily disturb any prior BIOS setup of these.
+ */
+ for (index = 0; index < (2 + (second_port_cmd640 << 1)); index++) {
+ ide_drive_t *drive = cmd_drives[index];
+#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
+ if (drive->autotune || ((index > 1) && second_port_toggled)) {
+ /*
+ * Reset timing to the slowest speed and turn off prefetch.
+ * This way, the drive identify code has a better chance.
+ */
+ setup_counts [index] = 4; /* max possible */
+ active_counts [index] = 16; /* max possible */
+ recovery_counts [index] = 16; /* max possible */
+ program_drive_counts (index);
+ set_prefetch_mode (index, 0);
+ printk("cmd640: drive%d timings/prefetch cleared\n", index);
+ } else {
+ /*
+ * Record timings/prefetch without changing them.
+ * This preserves any prior BIOS setup.
+ */
+ retrieve_drive_counts (index);
+ check_prefetch (index);
+ printk("cmd640: drive%d timings/prefetch(%s) preserved",
+ index, drive->no_io_32bit ? "off" : "on");
+ display_clocks(index);
+ }
+#else
+ /*
+ * Set the drive unmask flags to match the prefetch setting
+ */
+ check_prefetch (index);
+ printk("cmd640: drive%d timings/prefetch(%s) preserved\n",
+ index, drive->no_io_32bit ? "off" : "on");
+#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
+ }
+
+#ifdef CMD640_DUMP_REGS
+ CMD640_DUMP_REGS;
+#endif
+ return 1;
+}
+
diff --git a/linux/src/drivers/block/floppy.c b/linux/src/drivers/block/floppy.c
new file mode 100644
index 0000000..1b96c44
--- /dev/null
+++ b/linux/src/drivers/block/floppy.c
@@ -0,0 +1,4284 @@
+/*
+ * linux/kernel/floppy.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1993, 1994 Alain Knaff
+ */
+/*
+ * 02.12.91 - Changed to static variables to indicate need for reset
+ * and recalibrate. This makes some things easier (output_byte reset
+ * checking etc), and means less interrupt jumping in case of errors,
+ * so the code is hopefully easier to understand.
+ */
+
+/*
+ * This file is certainly a mess. I've tried my best to get it working,
+ * but I don't like programming floppies, and I have only one anyway.
+ * Urgel. I should check for more errors, and do more graceful error
+ * recovery. Seems there are problems with several drives. I've tried to
+ * correct them. No promises.
+ */
+
+/*
+ * As with hd.c, all routines within this file can (and will) be called
+ * by interrupts, so extreme caution is needed. A hardware interrupt
+ * handler may not sleep, or a kernel panic will happen. Thus I cannot
+ * call "floppy-on" directly, but have to set a special timer interrupt
+ * etc.
+ */
+
+/*
+ * 28.02.92 - made track-buffering routines, based on the routines written
+ * by entropy@wintermute.wpi.edu (Lawrence Foard). Linus.
+ */
+
+/*
+ * Automatic floppy-detection and formatting written by Werner Almesberger
+ * (almesber@nessie.cs.id.ethz.ch), who also corrected some problems with
+ * the floppy-change signal detection.
+ */
+
+/*
+ * 1992/7/22 -- Hennus Bergman: Added better error reporting, fixed
+ * FDC data overrun bug, added some preliminary stuff for vertical
+ * recording support.
+ *
+ * 1992/9/17: Added DMA allocation & DMA functions. -- hhb.
+ *
+ * TODO: Errors are still not counted properly.
+ */
+
+/* 1992/9/20
+ * Modifications for ``Sector Shifting'' by Rob Hooft (hooft@chem.ruu.nl)
+ * modeled after the freeware MS-DOS program fdformat/88 V1.8 by
+ * Christoph H. Hochst\"atter.
+ * I have fixed the shift values to the ones I always use. Maybe a new
+ * ioctl() should be created to be able to modify them.
+ * There is a bug in the driver that makes it impossible to format a
+ * floppy as the first thing after bootup.
+ */
+
+/*
+ * 1993/4/29 -- Linus -- cleaned up the timer handling in the kernel, and
+ * this helped the floppy driver as well. Much cleaner, and still seems to
+ * work.
+ */
+
+/* 1994/6/24 --bbroad-- added the floppy table entries and made
+ * minor modifications to allow 2.88 floppies to be run.
+ */
+
+/* 1994/7/13 -- Paul Vojta -- modified the probing code to allow three or more
+ * disk types.
+ */
+
+/*
+ * 1994/8/8 -- Alain Knaff -- Switched to fdpatch driver: Support for bigger
+ * format bug fixes, but unfortunately some new bugs too...
+ */
+
+/* 1994/9/17 -- Koen Holtman -- added logging of physical floppy write
+ * errors to allow safe writing by specialized programs.
+ */
+
+/* 1995/4/24 -- Dan Fandrich -- added support for Commodore 1581 3.5" disks
+ * by defining bit 1 of the "stretch" parameter to mean put sectors on the
+ * opposite side of the disk, leaving the sector IDs alone (i.e. Commodore's
+ * drives are "upside-down").
+ */
+
+/*
+ * 1995/8/26 -- Andreas Busse -- added Mips support.
+ */
+
+/*
+ * 1995/10/18 -- Ralf Baechle -- Portability cleanup; move machine dependent
+ * features to asm/floppy.h.
+ */
+
+
+#define FLOPPY_SANITY_CHECK
+#undef FLOPPY_SILENT_DCL_CLEAR
+
+#define REALLY_SLOW_IO
+
+#define DEBUGT 2
+#define DCL_DEBUG /* debug disk change line */
+
+/* do print messages for unexpected interrupts */
+static int print_unex=1;
+#include <linux/utsname.h>
+#include <linux/module.h>
+
+/* the following is the mask of allowed drives. By default units 2 and
+ * 3 of both floppy controllers are disabled, because switching on the
+ * motor of these drives causes system hangs on some PCI computers. drive
+ * 0 is the low bit (0x1), and drive 7 is the high bit (0x80). Bits are on if
+ * a drive is allowed. */
+static int FLOPPY_IRQ=6;
+static int FLOPPY_DMA=2;
+static int allowed_drive_mask = 0x33;
+
+static int irqdma_allocated = 0;
+
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/tqueue.h>
+#define FDPATCHES
+#include <linux/fdreg.h>
+
+
+#include <linux/fd.h>
+
+
+#define OLDFDRAWCMD 0x020d /* send a raw command to the FDC */
+
+struct old_floppy_raw_cmd {
+ void *data;
+ long length;
+
+ unsigned char rate;
+ unsigned char flags;
+ unsigned char cmd_count;
+ unsigned char cmd[9];
+ unsigned char reply_count;
+ unsigned char reply[7];
+ int track;
+};
+
+#include <linux/errno.h>
+#include <linux/malloc.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/delay.h>
+#include <linux/mc146818rtc.h> /* CMOS defines */
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/segment.h>
+
+static int use_virtual_dma=0; /* virtual DMA for Intel */
+static unsigned short virtual_dma_port=0x3f0;
+void floppy_interrupt(int irq, void *dev_id, struct pt_regs * regs);
+static int set_dor(int fdc, char mask, char data);
+static inline int __get_order(unsigned long size);
+#include <asm/floppy.h>
+
+
+#define MAJOR_NR FLOPPY_MAJOR
+
+#include <linux/blk.h>
+#include <linux/cdrom.h> /* for the compatibility eject ioctl */
+
+#include <linux/dev/glue/glue.h>
+
+
+#ifndef FLOPPY_MOTOR_MASK
+#define FLOPPY_MOTOR_MASK 0xf0
+#endif
+
+#ifndef fd_get_dma_residue
+#define fd_get_dma_residue() get_dma_residue(FLOPPY_DMA)
+#endif
+
+/* Dma Memory related stuff */
+
+/* Pure 2^n version of get_order */
+static inline int __get_order(unsigned long size)
+{
+ int order;
+
+ size = (size-1) >> (PAGE_SHIFT-1);
+ order = -1;
+ do {
+ size >>= 1;
+ order++;
+ } while (size);
+ return order;
+}
+
+#ifndef fd_dma_mem_free
+#define fd_dma_mem_free(addr, size) free_pages(addr, __get_order(size))
+#endif
+
+#ifndef fd_dma_mem_alloc
+#define fd_dma_mem_alloc(size) __get_dma_pages(GFP_KERNEL,__get_order(size))
+#endif
+
+/* End dma memory related stuff */
+
+static unsigned int fake_change = 0;
+static int initialising=1;
+
+static inline int TYPE(kdev_t x) {
+ return (MINOR(x)>>2) & 0x1f;
+}
+static inline int DRIVE(kdev_t x) {
+ return (MINOR(x)&0x03) | ((MINOR(x)&0x80) >> 5);
+}
+#define ITYPE(x) (((x)>>2) & 0x1f)
+#define TOMINOR(x) ((x & 3) | ((x & 4) << 5))
+#define UNIT(x) ((x) & 0x03) /* drive on fdc */
+#define FDC(x) (((x) & 0x04) >> 2) /* fdc of drive */
+#define REVDRIVE(fdc, unit) ((unit) + ((fdc) << 2))
+ /* reverse mapping from unit and fdc to drive */
+#define DP (&drive_params[current_drive])
+#define DRS (&drive_state[current_drive])
+#define DRWE (&write_errors[current_drive])
+#define FDCS (&fdc_state[fdc])
+#define CLEARF(x) (clear_bit(x##_BIT, &DRS->flags))
+#define SETF(x) (set_bit(x##_BIT, &DRS->flags))
+#define TESTF(x) (test_bit(x##_BIT, &DRS->flags))
+
+#define UDP (&drive_params[drive])
+#define UDRS (&drive_state[drive])
+#define UDRWE (&write_errors[drive])
+#define UFDCS (&fdc_state[FDC(drive)])
+#define UCLEARF(x) (clear_bit(x##_BIT, &UDRS->flags))
+#define USETF(x) (set_bit(x##_BIT, &UDRS->flags))
+#define UTESTF(x) (test_bit(x##_BIT, &UDRS->flags))
+
+#define DPRINT(format, args...) printk(DEVICE_NAME "%d: " format, current_drive , ## args)
+
+#define PH_HEAD(floppy,head) (((((floppy)->stretch & 2) >>1) ^ head) << 2)
+#define STRETCH(floppy) ((floppy)->stretch & FD_STRETCH)
+
+#define CLEARSTRUCT(x) memset((x), 0, sizeof(*(x)))
+
+#define INT_OFF save_flags(flags); cli()
+#define INT_ON restore_flags(flags)
+
+/* read/write */
+#define COMMAND raw_cmd->cmd[0]
+#define DR_SELECT raw_cmd->cmd[1]
+#define TRACK raw_cmd->cmd[2]
+#define HEAD raw_cmd->cmd[3]
+#define SECTOR raw_cmd->cmd[4]
+#define SIZECODE raw_cmd->cmd[5]
+#define SECT_PER_TRACK raw_cmd->cmd[6]
+#define GAP raw_cmd->cmd[7]
+#define SIZECODE2 raw_cmd->cmd[8]
+#define NR_RW 9
+
+/* format */
+#define F_SIZECODE raw_cmd->cmd[2]
+#define F_SECT_PER_TRACK raw_cmd->cmd[3]
+#define F_GAP raw_cmd->cmd[4]
+#define F_FILL raw_cmd->cmd[5]
+#define NR_F 6
+
+/*
+ * Maximum disk size (in kilobytes). This default is used whenever the
+ * current disk size is unknown.
+ * [Now it is rather a minimum]
+ */
+#define MAX_DISK_SIZE 4 /* 3984*/
+
+#define K_64 0x10000 /* 64KB */
+
+/*
+ * globals used by 'result()'
+ */
+#define MAX_REPLIES 16
+static unsigned char reply_buffer[MAX_REPLIES];
+static int inr; /* size of reply buffer, when called from interrupt */
+#define ST0 (reply_buffer[0])
+#define ST1 (reply_buffer[1])
+#define ST2 (reply_buffer[2])
+#define ST3 (reply_buffer[0]) /* result of GETSTATUS */
+#define R_TRACK (reply_buffer[3])
+#define R_HEAD (reply_buffer[4])
+#define R_SECTOR (reply_buffer[5])
+#define R_SIZECODE (reply_buffer[6])
+
+#define SEL_DLY (2*HZ/100)
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+/*
+ * this struct defines the different floppy drive types.
+ */
+static struct {
+ struct floppy_drive_params params;
+ const char *name; /* name printed while booting */
+} default_drive_params[]= {
+/* NOTE: the time values in jiffies should be in msec!
+ CMOS drive type
+ | Maximum data rate supported by drive type
+ | | Head load time, msec
+ | | | Head unload time, msec (not used)
+ | | | | Step rate interval, usec
+ | | | | | Time needed for spinup time (jiffies)
+ | | | | | | Timeout for spinning down (jiffies)
+ | | | | | | | Spindown offset (where disk stops)
+ | | | | | | | | Select delay
+ | | | | | | | | | RPS
+ | | | | | | | | | | Max number of tracks
+ | | | | | | | | | | | Interrupt timeout
+ | | | | | | | | | | | | Max nonintlv. sectors
+ | | | | | | | | | | | | | -Max Errors- flags */
+{{0, 500, 16, 16, 8000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 80, 3*HZ, 20, {3,1,2,0,2}, 0,
+ 0, { 7, 4, 8, 2, 1, 5, 3,10}, 3*HZ/2, 0 }, "unknown" },
+
+{{1, 300, 16, 16, 8000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 40, 3*HZ, 17, {3,1,2,0,2}, 0,
+ 0, { 1, 0, 0, 0, 0, 0, 0, 0}, 3*HZ/2, 1 }, "360K PC" }, /*5 1/4 360 KB PC*/
+
+{{2, 500, 16, 16, 6000, 4*HZ/10, 3*HZ, 14, SEL_DLY, 6, 83, 3*HZ, 17, {3,1,2,0,2}, 0,
+ 0, { 2, 5, 6,23,10,20,12, 0}, 3*HZ/2, 2 }, "1.2M" }, /*5 1/4 HD AT*/
+
+{{3, 250, 16, 16, 3000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 83, 3*HZ, 20, {3,1,2,0,2}, 0,
+ 0, { 4,22,21,30, 3, 0, 0, 0}, 3*HZ/2, 4 }, "720k" }, /*3 1/2 DD*/
+
+{{4, 500, 16, 16, 4000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 20, {3,1,2,0,2}, 0,
+ 0, { 7, 4,25,22,31,21,29,11}, 3*HZ/2, 7 }, "1.44M" }, /*3 1/2 HD*/
+
+{{5, 1000, 15, 8, 3000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 40, {3,1,2,0,2}, 0,
+ 0, { 7, 8, 4,25,28,22,31,21}, 3*HZ/2, 8 }, "2.88M AMI BIOS" }, /*3 1/2 ED*/
+
+{{6, 1000, 15, 8, 3000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 40, {3,1,2,0,2}, 0,
+ 0, { 7, 8, 4,25,28,22,31,21}, 3*HZ/2, 8 }, "2.88M" } /*3 1/2 ED*/
+/* | --autodetected formats--- | | |
+ * read_track | | Name printed when booting
+ * | Native format
+ * Frequency of disk change checks */
+};
+
+static struct floppy_drive_params drive_params[N_DRIVE];
+static struct floppy_drive_struct drive_state[N_DRIVE];
+static struct floppy_write_errors write_errors[N_DRIVE];
+static struct floppy_raw_cmd *raw_cmd, default_raw_cmd;
+
+/*
+ * This struct defines the different floppy types.
+ *
+ * Bit 0 of 'stretch' tells if the tracks need to be doubled for some
+ * types (e.g. 360kB diskette in 1.2MB drive, etc.). Bit 1 of 'stretch'
+ * tells if the disk is in Commodore 1581 format, which means side 0 sectors
+ * are located on side 1 of the disk but with a side 0 ID, and vice-versa.
+ * This is the same as the Sharp MZ-80 5.25" CP/M disk format, except that the
+ * 1581's logical side 0 is on physical side 1, whereas the Sharp's logical
+ * side 0 is on physical side 0 (but with the misnamed sector IDs).
+ * 'stretch' should probably be renamed to something more general, like
+ * 'options'. Other parameters should be self-explanatory (see also
+ * setfdprm(8)).
+ */
+static struct floppy_struct floppy_type[32] = {
+ { 0, 0,0, 0,0,0x00,0x00,0x00,0x00,NULL }, /* 0 no testing */
+ { 720, 9,2,40,0,0x2A,0x02,0xDF,0x50,"d360" }, /* 1 360KB PC */
+ { 2400,15,2,80,0,0x1B,0x00,0xDF,0x54,"h1200" }, /* 2 1.2MB AT */
+ { 720, 9,1,80,0,0x2A,0x02,0xDF,0x50,"D360" }, /* 3 360KB SS 3.5" */
+ { 1440, 9,2,80,0,0x2A,0x02,0xDF,0x50,"D720" }, /* 4 720KB 3.5" */
+ { 720, 9,2,40,1,0x23,0x01,0xDF,0x50,"h360" }, /* 5 360KB AT */
+ { 1440, 9,2,80,0,0x23,0x01,0xDF,0x50,"h720" }, /* 6 720KB AT */
+ { 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,"H1440" }, /* 7 1.44MB 3.5" */
+ { 5760,36,2,80,0,0x1B,0x43,0xAF,0x54,"E2880" }, /* 8 2.88MB 3.5" */
+ { 6240,39,2,80,0,0x1B,0x43,0xAF,0x28,"E3120"}, /* 9 3.12MB 3.5" */
+
+ { 2880,18,2,80,0,0x25,0x00,0xDF,0x02,"h1440" }, /* 10 1.44MB 5.25" */
+ { 3360,21,2,80,0,0x1C,0x00,0xCF,0x0C,"H1680" }, /* 11 1.68MB 3.5" */
+ { 820,10,2,41,1,0x25,0x01,0xDF,0x2E,"h410" }, /* 12 410KB 5.25" */
+ { 1640,10,2,82,0,0x25,0x02,0xDF,0x2E,"H820" }, /* 13 820KB 3.5" */
+ { 2952,18,2,82,0,0x25,0x00,0xDF,0x02,"h1476" }, /* 14 1.48MB 5.25" */
+ { 3444,21,2,82,0,0x25,0x00,0xDF,0x0C,"H1722" }, /* 15 1.72MB 3.5" */
+ { 840,10,2,42,1,0x25,0x01,0xDF,0x2E,"h420" }, /* 16 420KB 5.25" */
+ { 1660,10,2,83,0,0x25,0x02,0xDF,0x2E,"H830" }, /* 17 830KB 3.5" */
+ { 2988,18,2,83,0,0x25,0x00,0xDF,0x02,"h1494" }, /* 18 1.49MB 5.25" */
+ { 3486,21,2,83,0,0x25,0x00,0xDF,0x0C,"H1743" }, /* 19 1.74 MB 3.5" */
+
+ { 1760,11,2,80,0,0x1C,0x09,0xCF,0x00,"h880" }, /* 20 880KB 5.25" */
+ { 2080,13,2,80,0,0x1C,0x01,0xCF,0x00,"D1040" }, /* 21 1.04MB 3.5" */
+ { 2240,14,2,80,0,0x1C,0x19,0xCF,0x00,"D1120" }, /* 22 1.12MB 3.5" */
+ { 3200,20,2,80,0,0x1C,0x20,0xCF,0x2C,"h1600" }, /* 23 1.6MB 5.25" */
+ { 3520,22,2,80,0,0x1C,0x08,0xCF,0x2e,"H1760" }, /* 24 1.76MB 3.5" */
+ { 3840,24,2,80,0,0x1C,0x20,0xCF,0x00,"H1920" }, /* 25 1.92MB 3.5" */
+ { 6400,40,2,80,0,0x25,0x5B,0xCF,0x00,"E3200" }, /* 26 3.20MB 3.5" */
+ { 7040,44,2,80,0,0x25,0x5B,0xCF,0x00,"E3520" }, /* 27 3.52MB 3.5" */
+ { 7680,48,2,80,0,0x25,0x63,0xCF,0x00,"E3840" }, /* 28 3.84MB 3.5" */
+
+ { 3680,23,2,80,0,0x1C,0x10,0xCF,0x00,"H1840" }, /* 29 1.84MB 3.5" */
+ { 1600,10,2,80,0,0x25,0x02,0xDF,0x2E,"D800" }, /* 30 800KB 3.5" */
+ { 3200,20,2,80,0,0x1C,0x00,0xCF,0x2C,"H1600" }, /* 31 1.6MB 3.5" */
+};
+
+#define NUMBER(x) (sizeof(x) / sizeof(*(x)))
+#define SECTSIZE (_FD_SECTSIZE(*floppy))
+
+/* Auto-detection: Disk type used until the next media change occurs. */
+static struct floppy_struct *current_type[N_DRIVE] = {
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL
+};
+
+/*
+ * User-provided type information. current_type points to
+ * the respective entry of this array.
+ */
+static struct floppy_struct user_params[N_DRIVE];
+
+static int floppy_sizes[256];
+static int floppy_blocksizes[256] = { 0, };
+
+/*
+ * The driver is trying to determine the correct media format
+ * while probing is set. rw_interrupt() clears it after a
+ * successful access.
+ */
+static int probing = 0;
+
+/* Synchronization of FDC access. */
+#define FD_COMMAND_NONE -1
+#define FD_COMMAND_ERROR 2
+#define FD_COMMAND_OKAY 3
+
+static volatile int command_status = FD_COMMAND_NONE, fdc_busy = 0;
+static struct wait_queue *fdc_wait = NULL, *command_done = NULL;
+#define NO_SIGNAL (!(current->signal & ~current->blocked) || !interruptible)
+#define CALL(x) if ((x) == -EINTR) return -EINTR
+#define ECALL(x) if ((ret = (x))) return ret;
+#define _WAIT(x,i) CALL(ret=wait_til_done((x),i))
+#define WAIT(x) _WAIT((x),interruptible)
+#define IWAIT(x) _WAIT((x),1)
+
+/* Errors during formatting are counted here. */
+static int format_errors;
+
+/* Format request descriptor. */
+static struct format_descr format_req;
+
+/*
+ * Rate is 0 for 500kb/s, 1 for 300kbps, 2 for 250kbps
+ * Spec1 is 0xSH, where S is stepping rate (F=1ms, E=2ms, D=3ms etc),
+ * H is head unload time (1=16ms, 2=32ms, etc)
+ */
+
+/*
+ * Track buffer
+ * Because these are written to by the DMA controller, they must
+ * not contain a 64k byte boundary crossing, or data will be
+ * corrupted/lost.
+ */
+static char *floppy_track_buffer=0;
+static int max_buffer_sectors=0;
+
+static int *errors;
+typedef void (*done_f)(int);
+static struct cont_t {
+ void (*interrupt)(void); /* this is called after the interrupt of the
+ * main command */
+ void (*redo)(void); /* this is called to retry the operation */
+ void (*error)(void); /* this is called to tally an error */
+ done_f done; /* this is called to say if the operation has
+ * succeeded/failed */
+} *cont=NULL;
+
+static void floppy_ready(void);
+static void floppy_start(void);
+static void process_fd_request(void);
+static void recalibrate_floppy(void);
+static void floppy_shutdown(void);
+
+static int floppy_grab_irq_and_dma(void);
+static void floppy_release_irq_and_dma(void);
+
+/*
+ * The "reset" variable should be tested whenever an interrupt is scheduled,
+ * after the commands have been sent. This is to ensure that the driver doesn't
+ * get wedged when the interrupt doesn't come because of a failed command.
+ * reset doesn't need to be tested before sending commands, because
+ * output_byte is automatically disabled when reset is set.
+ */
+#define CHECK_RESET { if (FDCS->reset){ reset_fdc(); return; } }
+static void reset_fdc(void);
+
+/*
+ * These are global variables, as that's the easiest way to give
+ * information to interrupts. They are the data used for the current
+ * request.
+ */
+#define NO_TRACK -1
+#define NEED_1_RECAL -2
+#define NEED_2_RECAL -3
+
+/* */
+static int usage_count = 0;
+
+
+/* buffer related variables */
+static int buffer_track = -1;
+static int buffer_drive = -1;
+static int buffer_min = -1;
+static int buffer_max = -1;
+
+/* fdc related variables, should end up in a struct */
+static struct floppy_fdc_state fdc_state[N_FDC];
+static int fdc; /* current fdc */
+
+static struct floppy_struct *_floppy = floppy_type;
+static unsigned char current_drive = 0;
+static long current_count_sectors = 0;
+static unsigned char sector_t; /* sector in track */
+
+#ifndef fd_eject
+#define fd_eject(x) -EINVAL
+#endif
+
+
+#ifdef DEBUGT
+static long unsigned debugtimer;
+#endif
+
+/*
+ * Debugging
+ * =========
+ */
+static inline void set_debugt(void)
+{
+#ifdef DEBUGT
+ debugtimer = jiffies;
+#endif
+}
+
+static inline void debugt(const char *message)
+{
+#ifdef DEBUGT
+ if (DP->flags & DEBUGT)
+ printk("%s dtime=%lu\n", message, jiffies-debugtimer);
+#endif
+}
+
+typedef void (*timeout_fn)(unsigned long);
+static struct timer_list fd_timeout ={ NULL, NULL, 0, 0,
+ (timeout_fn) floppy_shutdown };
+
+static const char *timeout_message;
+
+#ifdef FLOPPY_SANITY_CHECK
+static void is_alive(const char *message)
+{
+ /* this routine checks whether the floppy driver is "alive" */
+ if (fdc_busy && command_status < 2 && !fd_timeout.prev){
+ DPRINT("timeout handler died: %s\n",message);
+ }
+}
+#endif
+
+#ifdef FLOPPY_SANITY_CHECK
+
+#define OLOGSIZE 20
+
+static void (*lasthandler)(void) = NULL;
+static int interruptjiffies=0;
+static int resultjiffies=0;
+static int resultsize=0;
+static int lastredo=0;
+
+static struct output_log {
+ unsigned char data;
+ unsigned char status;
+ unsigned long jiffies;
+} output_log[OLOGSIZE];
+
+static int output_log_pos=0;
+#endif
+
+#define CURRENTD -1
+#define MAXTIMEOUT -2
+
+static void reschedule_timeout(int drive, const char *message, int marg)
+{
+ if (drive == CURRENTD)
+ drive = current_drive;
+ del_timer(&fd_timeout);
+ if (drive < 0 || drive > N_DRIVE) {
+ fd_timeout.expires = jiffies + 20*HZ;
+ drive=0;
+ } else
+ fd_timeout.expires = jiffies + UDP->timeout;
+ add_timer(&fd_timeout);
+ if (UDP->flags & FD_DEBUG){
+ DPRINT("reschedule timeout ");
+ printk(message, marg);
+ printk("\n");
+ }
+ timeout_message = message;
+}
+
+static int maximum(int a, int b)
+{
+ if(a > b)
+ return a;
+ else
+ return b;
+}
+#define INFBOUND(a,b) (a)=maximum((a),(b));
+
+static int minimum(int a, int b)
+{
+ if(a < b)
+ return a;
+ else
+ return b;
+}
+#define SUPBOUND(a,b) (a)=minimum((a),(b));
+
+
+/*
+ * Bottom half floppy driver.
+ * ==========================
+ *
+ * This part of the file contains the code talking directly to the hardware,
+ * and also the main service loop (seek-configure-spinup-command)
+ */
+
+/*
+ * disk change.
+ * This routine is responsible for maintaining the FD_DISK_CHANGE flag,
+ * and the last_checked date.
+ *
+ * last_checked is the date of the last check which showed 'no disk change'
+ * FD_DISK_CHANGE is set under two conditions:
+ * 1. The floppy has been changed after some i/o to that floppy already
+ * took place.
+ * 2. No floppy disk is in the drive. This is done in order to ensure that
+ * requests are quickly flushed in case there is no disk in the drive. It
+ * follows that FD_DISK_CHANGE can only be cleared if there is a disk in
+ * the drive.
+ *
+ * For 1., maxblock is observed. Maxblock is 0 if no i/o has taken place yet.
+ * For 2., FD_DISK_NEWCHANGE is watched. FD_DISK_NEWCHANGE is cleared on
+ * each seek. If a disk is present, the disk change line should also be
+ * cleared on each seek. Thus, if FD_DISK_NEWCHANGE is clear, but the disk
+ * change line is set, this means either that no disk is in the drive, or
+ * that it has been removed since the last seek.
+ *
+ * This means that we really have a third possibility too:
+ * The floppy has been changed after the last seek.
+ */
+
+static int disk_change(int drive)
+{
+ int fdc=FDC(drive);
+#ifdef FLOPPY_SANITY_CHECK
+ if (jiffies - UDRS->select_date < UDP->select_delay)
+ DPRINT("WARNING disk change called early\n");
+ if (!(FDCS->dor & (0x10 << UNIT(drive))) ||
+ (FDCS->dor & 3) != UNIT(drive) ||
+ fdc != FDC(drive)){
+ DPRINT("probing disk change on unselected drive\n");
+ DPRINT("drive=%d fdc=%d dor=%x\n",drive, FDC(drive),
+ FDCS->dor);
+ }
+#endif
+
+#ifdef DCL_DEBUG
+ if (UDP->flags & FD_DEBUG){
+ DPRINT("checking disk change line for drive %d\n",drive);
+ DPRINT("jiffies=%ld\n", jiffies);
+ DPRINT("disk change line=%x\n",fd_inb(FD_DIR)&0x80);
+ DPRINT("flags=%x\n",UDRS->flags);
+ }
+#endif
+ if (UDP->flags & FD_BROKEN_DCL)
+ return UTESTF(FD_DISK_CHANGED);
+ if ((fd_inb(FD_DIR) ^ UDP->flags) & 0x80){
+ USETF(FD_VERIFY); /* verify write protection */
+ if (UDRS->maxblock){
+ /* mark it changed */
+ USETF(FD_DISK_CHANGED);
+ }
+
+ /* invalidate its geometry */
+ if (UDRS->keep_data >= 0) {
+ if ((UDP->flags & FTD_MSG) &&
+ current_type[drive] != NULL)
+ DPRINT("Disk type is undefined after "
+ "disk change\n");
+ current_type[drive] = NULL;
+ floppy_sizes[TOMINOR(drive)] = MAX_DISK_SIZE;
+ }
+
+ /*USETF(FD_DISK_NEWCHANGE);*/
+ return 1;
+ } else {
+ UDRS->last_checked=jiffies;
+ UCLEARF(FD_DISK_NEWCHANGE);
+ }
+ return 0;
+}
+
+static inline int is_selected(int dor, int unit)
+{
+ return ((dor & (0x10 << unit)) && (dor &3) == unit);
+}
+
+static int set_dor(int fdc, char mask, char data)
+{
+ register unsigned char drive, unit, newdor,olddor;
+
+ if (FDCS->address == -1)
+ return -1;
+
+ olddor = FDCS->dor;
+ newdor = (olddor & mask) | data;
+ if (newdor != olddor){
+ unit = olddor & 0x3;
+ if (is_selected(olddor, unit) && !is_selected(newdor,unit)){
+ drive = REVDRIVE(fdc,unit);
+#ifdef DCL_DEBUG
+ if (UDP->flags & FD_DEBUG){
+ DPRINT("calling disk change from set_dor\n");
+ }
+#endif
+ disk_change(drive);
+ }
+ FDCS->dor = newdor;
+ fd_outb(newdor, FD_DOR);
+
+ unit = newdor & 0x3;
+ if (!is_selected(olddor, unit) && is_selected(newdor,unit)){
+ drive = REVDRIVE(fdc,unit);
+ UDRS->select_date = jiffies;
+ }
+ }
+
+ /* FIXME: we should be more graceful here */
+
+ if (newdor & FLOPPY_MOTOR_MASK)
+ floppy_grab_irq_and_dma();
+ if (olddor & FLOPPY_MOTOR_MASK)
+ floppy_release_irq_and_dma();
+ return olddor;
+}
+
+static void twaddle(void)
+{
+ if (DP->select_delay)
+ return;
+ fd_outb(FDCS->dor & ~(0x10<<UNIT(current_drive)),FD_DOR);
+ fd_outb(FDCS->dor, FD_DOR);
+ DRS->select_date = jiffies;
+}
+
+/* reset all driver information about the current fdc. This is needed after
+ * a reset, and after a raw command. */
+static void reset_fdc_info(int mode)
+{
+ int drive;
+
+ FDCS->spec1 = FDCS->spec2 = -1;
+ FDCS->need_configure = 1;
+ FDCS->perp_mode = 1;
+ FDCS->rawcmd = 0;
+ for (drive = 0; drive < N_DRIVE; drive++)
+ if (FDC(drive) == fdc &&
+ (mode || UDRS->track != NEED_1_RECAL))
+ UDRS->track = NEED_2_RECAL;
+}
+
+/* selects the fdc and drive, and enables the fdc's input/dma. */
+static void set_fdc(int drive)
+{
+ if (drive >= 0 && drive < N_DRIVE){
+ fdc = FDC(drive);
+ current_drive = drive;
+ }
+ if (fdc != 1 && fdc != 0) {
+ printk("bad fdc value\n");
+ return;
+ }
+ set_dor(fdc,~0,8);
+#if N_FDC > 1
+ set_dor(1-fdc, ~8, 0);
+#endif
+ if (FDCS->rawcmd == 2)
+ reset_fdc_info(1);
+ if (fd_inb(FD_STATUS) != STATUS_READY)
+ FDCS->reset = 1;
+}
+
+/* locks the driver */
+static int lock_fdc(int drive, int interruptible)
+{
+ unsigned long flags;
+
+ if (!usage_count){
+ printk(KERN_ERR "trying to lock fdc while usage count=0\n");
+ return -1;
+ }
+ if(floppy_grab_irq_and_dma()==-1)
+ return -EBUSY;
+ INT_OFF;
+ while (fdc_busy && NO_SIGNAL)
+ interruptible_sleep_on(&fdc_wait);
+ if (fdc_busy){
+ INT_ON;
+ return -EINTR;
+ }
+ fdc_busy = 1;
+ INT_ON;
+ command_status = FD_COMMAND_NONE;
+ reschedule_timeout(drive, "lock fdc", 0);
+ set_fdc(drive);
+ return 0;
+}
+
+#define LOCK_FDC(drive,interruptible) \
+if (lock_fdc(drive,interruptible)) return -EINTR;
+
+
+/* unlocks the driver */
+static inline void unlock_fdc(void)
+{
+ raw_cmd = 0;
+ if (!fdc_busy)
+ DPRINT("FDC access conflict!\n");
+
+ if (DEVICE_INTR)
+ DPRINT("device interrupt still active at FDC release: %p!\n",
+ DEVICE_INTR);
+ command_status = FD_COMMAND_NONE;
+ del_timer(&fd_timeout);
+ cont = NULL;
+ fdc_busy = 0;
+ floppy_release_irq_and_dma();
+ wake_up(&fdc_wait);
+}
+
+/* switches the motor off after a given timeout */
+static void motor_off_callback(unsigned long nr)
+{
+ unsigned char mask = ~(0x10 << UNIT(nr));
+
+ set_dor(FDC(nr), mask, 0);
+}
+
+static struct timer_list motor_off_timer[N_DRIVE] = {
+ { NULL, NULL, 0, 0, motor_off_callback },
+ { NULL, NULL, 0, 1, motor_off_callback },
+ { NULL, NULL, 0, 2, motor_off_callback },
+ { NULL, NULL, 0, 3, motor_off_callback },
+ { NULL, NULL, 0, 4, motor_off_callback },
+ { NULL, NULL, 0, 5, motor_off_callback },
+ { NULL, NULL, 0, 6, motor_off_callback },
+ { NULL, NULL, 0, 7, motor_off_callback }
+};
+
+/* schedules motor off */
+static void floppy_off(unsigned int drive)
+{
+ unsigned long volatile delta;
+ register int fdc=FDC(drive);
+
+ if (!(FDCS->dor & (0x10 << UNIT(drive))))
+ return;
+
+ del_timer(motor_off_timer+drive);
+
+ /* make spindle stop in a position which minimizes spinup time
+ * next time */
+ if (UDP->rps){
+ delta = jiffies - UDRS->first_read_date + HZ -
+ UDP->spindown_offset;
+ delta = ((delta * UDP->rps) % HZ) / UDP->rps;
+ motor_off_timer[drive].expires = jiffies + UDP->spindown - delta;
+ }
+ add_timer(motor_off_timer+drive);
+}
+
+/*
+ * cycle through all N_DRIVE floppy drives, for disk change testing.
+ * stopping at current drive. This is done before any long operation, to
+ * be sure to have up to date disk change information.
+ */
+static void scandrives(void)
+{
+ int i, drive, saved_drive;
+
+ if (DP->select_delay)
+ return;
+
+ saved_drive = current_drive;
+ for (i=0; i < N_DRIVE; i++){
+ drive = (saved_drive + i + 1) % N_DRIVE;
+ if (UDRS->fd_ref == 0 || UDP->select_delay != 0)
+ continue; /* skip closed drives */
+ set_fdc(drive);
+ if (!(set_dor(fdc, ~3, UNIT(drive) | (0x10 << UNIT(drive))) &
+ (0x10 << UNIT(drive))))
+ /* switch the motor off again, if it was off to
+ * begin with */
+ set_dor(fdc, ~(0x10 << UNIT(drive)), 0);
+ }
+ set_fdc(saved_drive);
+}
+
+static void empty(void)
+{
+}
+
+static struct tq_struct floppy_tq =
+{ 0, 0, 0, 0 };
+
+static struct timer_list fd_timer ={ NULL, NULL, 0, 0, 0 };
+
+static void cancel_activity(void)
+{
+ CLEAR_INTR;
+ floppy_tq.routine = (void *)(void *) empty;
+ del_timer(&fd_timer);
+}
+
+/* this function makes sure that the disk stays in the drive during the
+ * transfer */
+static void fd_watchdog(void)
+{
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("calling disk change from watchdog\n");
+ }
+#endif
+
+ if (disk_change(current_drive)){
+ DPRINT("disk removed during i/o\n");
+ cancel_activity();
+ cont->done(0);
+ reset_fdc();
+ } else {
+ del_timer(&fd_timer);
+ fd_timer.function = (timeout_fn) fd_watchdog;
+ fd_timer.expires = jiffies + HZ / 10;
+ add_timer(&fd_timer);
+ }
+}
+
+static void main_command_interrupt(void)
+{
+ del_timer(&fd_timer);
+ cont->interrupt();
+}
+
+/* waits for a delay (spinup or select) to pass */
+static int wait_for_completion(int delay, timeout_fn function)
+{
+ if (FDCS->reset){
+ reset_fdc(); /* do the reset during sleep to win time
+ * if we don't need to sleep, it's a good
+ * occasion anyways */
+ return 1;
+ }
+
+ if ((signed) (jiffies - delay) < 0){
+ del_timer(&fd_timer);
+ fd_timer.function = function;
+ fd_timer.expires = delay;
+ add_timer(&fd_timer);
+ return 1;
+ }
+ return 0;
+}
+
+static int hlt_disabled=0;
+static void floppy_disable_hlt(void)
+{
+ unsigned long flags;
+
+ INT_OFF;
+ if (!hlt_disabled){
+ hlt_disabled=1;
+#ifdef HAVE_DISABLE_HLT
+ disable_hlt();
+#endif
+ }
+ INT_ON;
+}
+
+static void floppy_enable_hlt(void)
+{
+ unsigned long flags;
+
+ INT_OFF;
+ if (hlt_disabled){
+ hlt_disabled=0;
+#ifdef HAVE_DISABLE_HLT
+ enable_hlt();
+#endif
+ }
+ INT_ON;
+}
+
+
+static void setup_DMA(void)
+{
+ unsigned long flags;
+
+#ifdef FLOPPY_SANITY_CHECK
+ if (raw_cmd->length == 0){
+ int i;
+
+ printk("zero dma transfer size:");
+ for (i=0; i < raw_cmd->cmd_count; i++)
+ printk("%x,", raw_cmd->cmd[i]);
+ printk("\n");
+ cont->done(0);
+ FDCS->reset = 1;
+ return;
+ }
+ if ((long) raw_cmd->kernel_data % 512){
+ printk("non aligned address: %p\n", raw_cmd->kernel_data);
+ cont->done(0);
+ FDCS->reset=1;
+ return;
+ }
+ if (CROSS_64KB(raw_cmd->kernel_data, raw_cmd->length)) {
+ printk("DMA crossing 64-K boundary %p-%p\n",
+ raw_cmd->kernel_data,
+ raw_cmd->kernel_data + raw_cmd->length);
+ cont->done(0);
+ FDCS->reset=1;
+ return;
+ }
+#endif
+ INT_OFF;
+ fd_disable_dma();
+ fd_clear_dma_ff();
+ fd_cacheflush(raw_cmd->kernel_data, raw_cmd->length);
+ fd_set_dma_mode((raw_cmd->flags & FD_RAW_READ)?
+ DMA_MODE_READ : DMA_MODE_WRITE);
+ fd_set_dma_addr(virt_to_bus(raw_cmd->kernel_data));
+ fd_set_dma_count(raw_cmd->length);
+ virtual_dma_port = FDCS->address;
+ fd_enable_dma();
+ INT_ON;
+ floppy_disable_hlt();
+}
+
+void show_floppy(void);
+
+/* waits until the fdc becomes ready */
+static int wait_til_ready(void)
+{
+ int counter, status;
+ if(FDCS->reset)
+ return -1;
+ for (counter = 0; counter < 10000; counter++) {
+ status = fd_inb(FD_STATUS);
+ if (status & STATUS_READY)
+ return status;
+ }
+ if (!initialising) {
+ DPRINT("Getstatus times out (%x) on fdc %d\n",
+ status, fdc);
+ show_floppy();
+ }
+ FDCS->reset = 1;
+ return -1;
+}
+
+/* sends a command byte to the fdc */
+static int output_byte(char byte)
+{
+ int status;
+
+ if ((status = wait_til_ready()) < 0)
+ return -1;
+ if ((status & (STATUS_READY|STATUS_DIR|STATUS_DMA)) == STATUS_READY){
+ fd_outb(byte,FD_DATA);
+#ifdef FLOPPY_SANITY_CHECK
+ output_log[output_log_pos].data = byte;
+ output_log[output_log_pos].status = status;
+ output_log[output_log_pos].jiffies = jiffies;
+ output_log_pos = (output_log_pos + 1) % OLOGSIZE;
+#endif
+ return 0;
+ }
+ FDCS->reset = 1;
+ if (!initialising) {
+ DPRINT("Unable to send byte %x to FDC. Fdc=%x Status=%x\n",
+ byte, fdc, status);
+ show_floppy();
+ }
+ return -1;
+}
+#define LAST_OUT(x) if (output_byte(x)<0){ reset_fdc();return;}
+
+/* gets the response from the fdc */
+static int result(void)
+{
+ int i, status;
+
+ for(i=0; i < MAX_REPLIES; i++) {
+ if ((status = wait_til_ready()) < 0)
+ break;
+ status &= STATUS_DIR|STATUS_READY|STATUS_BUSY|STATUS_DMA;
+ if ((status & ~STATUS_BUSY) == STATUS_READY){
+#ifdef FLOPPY_SANITY_CHECK
+ resultjiffies = jiffies;
+ resultsize = i;
+#endif
+ return i;
+ }
+ if (status == (STATUS_DIR|STATUS_READY|STATUS_BUSY))
+ reply_buffer[i] = fd_inb(FD_DATA);
+ else
+ break;
+ }
+ if(!initialising) {
+ DPRINT("get result error. Fdc=%d Last status=%x Read bytes=%d\n",
+ fdc, status, i);
+ show_floppy();
+ }
+ FDCS->reset = 1;
+ return -1;
+}
+
+#define MORE_OUTPUT -2
+/* does the fdc need more output? */
+static int need_more_output(void)
+{
+ int status;
+ if( (status = wait_til_ready()) < 0)
+ return -1;
+ if ((status & (STATUS_READY|STATUS_DIR|STATUS_DMA)) == STATUS_READY)
+ return MORE_OUTPUT;
+ return result();
+}
+
+/* Set perpendicular mode as required, based on data rate, if supported.
+ * 82077 Now tested. 1Mbps data rate only possible with 82077-1.
+ */
+static inline void perpendicular_mode(void)
+{
+ unsigned char perp_mode;
+
+ if (raw_cmd->rate & 0x40){
+ switch(raw_cmd->rate & 3){
+ case 0:
+ perp_mode=2;
+ break;
+ case 3:
+ perp_mode=3;
+ break;
+ default:
+ DPRINT("Invalid data rate for perpendicular mode!\n");
+ cont->done(0);
+ FDCS->reset = 1; /* convenient way to return to
+ * redo without to much hassle (deep
+ * stack et al. */
+ return;
+ }
+ } else
+ perp_mode = 0;
+
+ if (FDCS->perp_mode == perp_mode)
+ return;
+ if (FDCS->version >= FDC_82077_ORIG) {
+ output_byte(FD_PERPENDICULAR);
+ output_byte(perp_mode);
+ FDCS->perp_mode = perp_mode;
+ } else if (perp_mode) {
+ DPRINT("perpendicular mode not supported by this FDC.\n");
+ }
+} /* perpendicular_mode */
+
+static int fifo_depth = 0xa;
+static int no_fifo = 0;
+
+static int fdc_configure(void)
+{
+ /* Turn on FIFO */
+ output_byte(FD_CONFIGURE);
+ if(need_more_output() != MORE_OUTPUT)
+ return 0;
+ output_byte(0);
+ output_byte(0x10 | (no_fifo & 0x20) | (fifo_depth & 0xf));
+ output_byte(0); /* pre-compensation from track
+ 0 upwards */
+ return 1;
+}
+
+#define NOMINAL_DTR 500
+
+/* Issue a "SPECIFY" command to set the step rate time, head unload time,
+ * head load time, and DMA disable flag to values needed by floppy.
+ *
+ * The value "dtr" is the data transfer rate in Kbps. It is needed
+ * to account for the data rate-based scaling done by the 82072 and 82077
+ * FDC types. This parameter is ignored for other types of FDCs (i.e.
+ * 8272a).
+ *
+ * Note that changing the data transfer rate has a (probably deleterious)
+ * effect on the parameters subject to scaling for 82072/82077 FDCs, so
+ * fdc_specify is called again after each data transfer rate
+ * change.
+ *
+ * srt: 1000 to 16000 in microseconds
+ * hut: 16 to 240 milliseconds
+ * hlt: 2 to 254 milliseconds
+ *
+ * These values are rounded up to the next highest available delay time.
+ */
+static void fdc_specify(void)
+{
+ unsigned char spec1, spec2;
+ int srt, hlt, hut;
+ unsigned long dtr = NOMINAL_DTR;
+ unsigned long scale_dtr = NOMINAL_DTR;
+ int hlt_max_code = 0x7f;
+ int hut_max_code = 0xf;
+
+ if (FDCS->need_configure && FDCS->version >= FDC_82072A) {
+ fdc_configure();
+ FDCS->need_configure = 0;
+ /*DPRINT("FIFO enabled\n");*/
+ }
+
+ switch (raw_cmd->rate & 0x03) {
+ case 3:
+ dtr = 1000;
+ break;
+ case 1:
+ dtr = 300;
+ if (FDCS->version >= FDC_82078) {
+ /* chose the default rate table, not the one
+ * where 1 = 2 Mbps */
+ output_byte(FD_DRIVESPEC);
+ if(need_more_output() == MORE_OUTPUT) {
+ output_byte(UNIT(current_drive));
+ output_byte(0xc0);
+ }
+ }
+ break;
+ case 2:
+ dtr = 250;
+ break;
+ }
+
+ if (FDCS->version >= FDC_82072) {
+ scale_dtr = dtr;
+ hlt_max_code = 0x00; /* 0==256msec*dtr0/dtr (not linear!) */
+ hut_max_code = 0x0; /* 0==256msec*dtr0/dtr (not linear!) */
+ }
+
+ /* Convert step rate from microseconds to milliseconds and 4 bits */
+ srt = 16 - (DP->srt*scale_dtr/1000 + NOMINAL_DTR - 1)/NOMINAL_DTR;
+ SUPBOUND(srt, 0xf);
+ INFBOUND(srt, 0);
+
+ hlt = (DP->hlt*scale_dtr/2 + NOMINAL_DTR - 1)/NOMINAL_DTR;
+ if (hlt < 0x01)
+ hlt = 0x01;
+ else if (hlt > 0x7f)
+ hlt = hlt_max_code;
+
+ hut = (DP->hut*scale_dtr/16 + NOMINAL_DTR - 1)/NOMINAL_DTR;
+ if (hut < 0x1)
+ hut = 0x1;
+ else if (hut > 0xf)
+ hut = hut_max_code;
+
+ spec1 = (srt << 4) | hut;
+ spec2 = (hlt << 1) | (use_virtual_dma & 1);
+
+ /* If these parameters did not change, just return with success */
+ if (FDCS->spec1 != spec1 || FDCS->spec2 != spec2) {
+ /* Go ahead and set spec1 and spec2 */
+ output_byte(FD_SPECIFY);
+ output_byte(FDCS->spec1 = spec1);
+ output_byte(FDCS->spec2 = spec2);
+ }
+} /* fdc_specify */
+
+/* Set the FDC's data transfer rate on behalf of the specified drive.
+ * NOTE: with 82072/82077 FDCs, changing the data rate requires a reissue
+ * of the specify command (i.e. using the fdc_specify function).
+ */
+static int fdc_dtr(void)
+{
+ /* If data rate not already set to desired value, set it. */
+ if ((raw_cmd->rate & 3) == FDCS->dtr)
+ return 0;
+
+ /* Set dtr */
+ fd_outb(raw_cmd->rate & 3, FD_DCR);
+
+ /* TODO: some FDC/drive combinations (C&T 82C711 with TEAC 1.2MB)
+ * need a stabilization period of several milliseconds to be
+ * enforced after data rate changes before R/W operations.
+ * Pause 5 msec to avoid trouble. (Needs to be 2 jiffies)
+ */
+ FDCS->dtr = raw_cmd->rate & 3;
+ return(wait_for_completion(jiffies+2*HZ/100,
+ (timeout_fn) floppy_ready));
+} /* fdc_dtr */
+
+static void tell_sector(void)
+{
+ printk(": track %d, head %d, sector %d, size %d",
+ R_TRACK, R_HEAD, R_SECTOR, R_SIZECODE);
+} /* tell_sector */
+
+
+/*
+ * OK, this error interpreting routine is called after a
+ * DMA read/write has succeeded
+ * or failed, so we check the results, and copy any buffers.
+ * hhb: Added better error reporting.
+ * ak: Made this into a separate routine.
+ */
+static int interpret_errors(void)
+{
+ char bad;
+
+ if (inr!=7) {
+ DPRINT("-- FDC reply error");
+ FDCS->reset = 1;
+ return 1;
+ }
+
+ /* check IC to find cause of interrupt */
+ switch (ST0 & ST0_INTR) {
+ case 0x40: /* error occurred during command execution */
+ if (ST1 & ST1_EOC)
+ return 0; /* occurs with pseudo-DMA */
+ bad = 1;
+ if (ST1 & ST1_WP) {
+ DPRINT("Drive is write protected\n");
+ CLEARF(FD_DISK_WRITABLE);
+ cont->done(0);
+ bad = 2;
+ } else if (ST1 & ST1_ND) {
+ SETF(FD_NEED_TWADDLE);
+ } else if (ST1 & ST1_OR) {
+ if (DP->flags & FTD_MSG)
+ DPRINT("Over/Underrun - retrying\n");
+ bad = 0;
+ }else if (*errors >= DP->max_errors.reporting){
+ DPRINT("");
+ if (ST0 & ST0_ECE) {
+ printk("Recalibrate failed!");
+ } else if (ST2 & ST2_CRC) {
+ printk("data CRC error");
+ tell_sector();
+ } else if (ST1 & ST1_CRC) {
+ printk("CRC error");
+ tell_sector();
+ } else if ((ST1 & (ST1_MAM|ST1_ND)) || (ST2 & ST2_MAM)) {
+ if (!probing) {
+ printk("sector not found");
+ tell_sector();
+ } else
+ printk("probe failed...");
+ } else if (ST2 & ST2_WC) { /* seek error */
+ printk("wrong cylinder");
+ } else if (ST2 & ST2_BC) { /* cylinder marked as bad */
+ printk("bad cylinder");
+ } else {
+ printk("unknown error. ST[0..2] are: 0x%x 0x%x 0x%x", ST0, ST1, ST2);
+ tell_sector();
+ }
+ printk("\n");
+
+ }
+ if (ST2 & ST2_WC || ST2 & ST2_BC)
+ /* wrong cylinder => recal */
+ DRS->track = NEED_2_RECAL;
+ return bad;
+ case 0x80: /* invalid command given */
+ DPRINT("Invalid FDC command given!\n");
+ cont->done(0);
+ return 2;
+ case 0xc0:
+ DPRINT("Abnormal termination caused by polling\n");
+ cont->error();
+ return 2;
+ default: /* (0) Normal command termination */
+ return 0;
+ }
+}
+
+/*
+ * This routine is called when everything should be correctly set up
+ * for the transfer (i.e. floppy motor is on, the correct floppy is
+ * selected, and the head is sitting on the right track).
+ */
+static void setup_rw_floppy(void)
+{
+ int i,ready_date,r, flags,dflags;
+ timeout_fn function;
+
+ flags = raw_cmd->flags;
+ if (flags & (FD_RAW_READ | FD_RAW_WRITE))
+ flags |= FD_RAW_INTR;
+
+ if ((flags & FD_RAW_SPIN) && !(flags & FD_RAW_NO_MOTOR)){
+ ready_date = DRS->spinup_date + DP->spinup;
+ /* If spinup will take a long time, rerun scandrives
+ * again just before spinup completion. Beware that
+ * after scandrives, we must again wait for selection.
+ */
+ if ((signed) (ready_date - jiffies) > DP->select_delay){
+ ready_date -= DP->select_delay;
+ function = (timeout_fn) floppy_start;
+ } else
+ function = (timeout_fn) setup_rw_floppy;
+
+ /* wait until the floppy is spinning fast enough */
+ if (wait_for_completion(ready_date,function))
+ return;
+ }
+ dflags = DRS->flags;
+
+ if ((flags & FD_RAW_READ) || (flags & FD_RAW_WRITE))
+ setup_DMA();
+
+ if (flags & FD_RAW_INTR)
+ SET_INTR(main_command_interrupt);
+
+ r=0;
+ for (i=0; i< raw_cmd->cmd_count; i++)
+ r|=output_byte(raw_cmd->cmd[i]);
+
+#ifdef DEBUGT
+ debugt("rw_command: ");
+#endif
+ if (r){
+ cont->error();
+ reset_fdc();
+ return;
+ }
+
+ if (!(flags & FD_RAW_INTR)){
+ inr = result();
+ cont->interrupt();
+ } else if (flags & FD_RAW_NEED_DISK)
+ fd_watchdog();
+}
+
+static int blind_seek;
+
+/*
+ * This is the routine called after every seek (or recalibrate) interrupt
+ * from the floppy controller.
+ */
+static void seek_interrupt(void)
+{
+#ifdef DEBUGT
+ debugt("seek interrupt:");
+#endif
+ if (inr != 2 || (ST0 & 0xF8) != 0x20) {
+ DPRINT("seek failed\n");
+ DRS->track = NEED_2_RECAL;
+ cont->error();
+ cont->redo();
+ return;
+ }
+ if (DRS->track >= 0 && DRS->track != ST1 && !blind_seek){
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("clearing NEWCHANGE flag because of effective seek\n");
+ DPRINT("jiffies=%ld\n", jiffies);
+ }
+#endif
+ CLEARF(FD_DISK_NEWCHANGE); /* effective seek */
+ DRS->select_date = jiffies;
+ }
+ DRS->track = ST1;
+ floppy_ready();
+}
+
+static void check_wp(void)
+{
+ if (TESTF(FD_VERIFY)) {
+ /* check write protection */
+ output_byte(FD_GETSTATUS);
+ output_byte(UNIT(current_drive));
+ if (result() != 1){
+ FDCS->reset = 1;
+ return;
+ }
+ CLEARF(FD_VERIFY);
+ CLEARF(FD_NEED_TWADDLE);
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("checking whether disk is write protected\n");
+ DPRINT("wp=%x\n",ST3 & 0x40);
+ }
+#endif
+ if (!(ST3 & 0x40))
+ SETF(FD_DISK_WRITABLE);
+ else
+ CLEARF(FD_DISK_WRITABLE);
+ }
+}
+
+static void seek_floppy(void)
+{
+ int track;
+
+ blind_seek=0;
+
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("calling disk change from seek\n");
+ }
+#endif
+
+ if (!TESTF(FD_DISK_NEWCHANGE) &&
+ disk_change(current_drive) &&
+ (raw_cmd->flags & FD_RAW_NEED_DISK)){
+ /* the media changed flag should be cleared after the seek.
+ * If it isn't, this means that there is really no disk in
+ * the drive.
+ */
+ SETF(FD_DISK_CHANGED);
+ cont->done(0);
+ cont->redo();
+ return;
+ }
+ if (DRS->track <= NEED_1_RECAL){
+ recalibrate_floppy();
+ return;
+ } else if (TESTF(FD_DISK_NEWCHANGE) &&
+ (raw_cmd->flags & FD_RAW_NEED_DISK) &&
+ (DRS->track <= NO_TRACK || DRS->track == raw_cmd->track)) {
+ /* we seek to clear the media-changed condition. Does anybody
+ * know a more elegant way, which works on all drives? */
+ if (raw_cmd->track)
+ track = raw_cmd->track - 1;
+ else {
+ if (DP->flags & FD_SILENT_DCL_CLEAR){
+ set_dor(fdc, ~(0x10 << UNIT(current_drive)), 0);
+ blind_seek = 1;
+ raw_cmd->flags |= FD_RAW_NEED_SEEK;
+ }
+ track = 1;
+ }
+ } else {
+ check_wp();
+ if (raw_cmd->track != DRS->track &&
+ (raw_cmd->flags & FD_RAW_NEED_SEEK))
+ track = raw_cmd->track;
+ else {
+ setup_rw_floppy();
+ return;
+ }
+ }
+
+ SET_INTR(seek_interrupt);
+ output_byte(FD_SEEK);
+ output_byte(UNIT(current_drive));
+ LAST_OUT(track);
+#ifdef DEBUGT
+ debugt("seek command:");
+#endif
+}
+
+static void recal_interrupt(void)
+{
+#ifdef DEBUGT
+ debugt("recal interrupt:");
+#endif
+ if (inr !=2)
+ FDCS->reset = 1;
+ else if (ST0 & ST0_ECE) {
+ switch(DRS->track){
+ case NEED_1_RECAL:
+#ifdef DEBUGT
+ debugt("recal interrupt need 1 recal:");
+#endif
+ /* after a second recalibrate, we still haven't
+ * reached track 0. Probably no drive. Raise an
+ * error, as failing immediately might upset
+ * computers possessed by the Devil :-) */
+ cont->error();
+ cont->redo();
+ return;
+ case NEED_2_RECAL:
+#ifdef DEBUGT
+ debugt("recal interrupt need 2 recal:");
+#endif
+ /* If we already did a recalibrate,
+ * and we are not at track 0, this
+ * means we have moved. (The only way
+ * not to move at recalibration is to
+ * be already at track 0.) Clear the
+ * new change flag */
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("clearing NEWCHANGE flag because of second recalibrate\n");
+ }
+#endif
+
+ CLEARF(FD_DISK_NEWCHANGE);
+ DRS->select_date = jiffies;
+ /* fall through */
+ default:
+#ifdef DEBUGT
+ debugt("recal interrupt default:");
+#endif
+ /* Recalibrate moves the head by at
+ * most 80 steps. If after one
+ * recalibrate we don't have reached
+ * track 0, this might mean that we
+ * started beyond track 80. Try
+ * again. */
+ DRS->track = NEED_1_RECAL;
+ break;
+ }
+ } else
+ DRS->track = ST1;
+ floppy_ready();
+}
+
+static void print_result(char *message, int inr)
+{
+ int i;
+
+ DPRINT("%s ", message);
+ if (inr >= 0)
+ for (i=0; i<inr; i++)
+ printk("repl[%d]=%x ", i, reply_buffer[i]);
+ printk("\n");
+}
+
+/* interrupt handler */
+void floppy_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ void (*handler)(void) = DEVICE_INTR;
+ int do_print;
+
+ lasthandler = handler;
+ interruptjiffies = jiffies;
+
+ fd_disable_dma();
+ floppy_enable_hlt();
+ CLEAR_INTR;
+ if (fdc >= N_FDC || FDCS->address == -1){
+ /* we don't even know which FDC is the culprit */
+ printk("DOR0=%x\n", fdc_state[0].dor);
+ printk("floppy interrupt on bizarre fdc %d\n",fdc);
+ printk("handler=%p\n", handler);
+ is_alive("bizarre fdc");
+ return;
+ }
+
+ FDCS->reset = 0;
+ /* We have to clear the reset flag here, because apparently on boxes
+ * with level triggered interrupts (PS/2, Sparc, ...), it is needed to
+ * emit SENSEI's to clear the interrupt line. And FDCS->reset blocks the
+ * emission of the SENSEI's.
+ * It is OK to emit floppy commands because we are in an interrupt
+ * handler here, and thus we have to fear no interference of other
+ * activity.
+ */
+
+ do_print = !handler && print_unex && !initialising;
+
+ inr = result();
+ if(do_print)
+ print_result("unexpected interrupt", inr);
+ if (inr == 0){
+ int max_sensei = 4;
+ do {
+ output_byte(FD_SENSEI);
+ inr = result();
+ if(do_print)
+ print_result("sensei", inr);
+ max_sensei--;
+ } while ((ST0 & 0x83) != UNIT(current_drive) && inr == 2 && max_sensei);
+ }
+ if (handler) {
+ if(intr_count >= 2) {
+ /* expected interrupt */
+ floppy_tq.routine = (void *)(void *) handler;
+ queue_task_irq(&floppy_tq, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+ } else
+ handler();
+ } else
+ FDCS->reset = 1;
+ is_alive("normal interrupt end");
+}
+
+static void recalibrate_floppy(void)
+{
+#ifdef DEBUGT
+ debugt("recalibrate floppy:");
+#endif
+ SET_INTR(recal_interrupt);
+ output_byte(FD_RECALIBRATE);
+ LAST_OUT(UNIT(current_drive));
+}
+
+/*
+ * Must do 4 FD_SENSEIs after reset because of ``drive polling''.
+ */
+static void reset_interrupt(void)
+{
+#ifdef DEBUGT
+ debugt("reset interrupt:");
+#endif
+ result(); /* get the status ready for set_fdc */
+ if (FDCS->reset) {
+ printk("reset set in interrupt, calling %p\n", cont->error);
+ cont->error(); /* a reset just after a reset. BAD! */
+ }
+ cont->redo();
+}
+
+/*
+ * reset is done by pulling bit 2 of DOR low for a while (old FDCs),
+ * or by setting the self clearing bit 7 of STATUS (newer FDCs)
+ */
+static void reset_fdc(void)
+{
+ SET_INTR(reset_interrupt);
+ FDCS->reset = 0;
+ reset_fdc_info(0);
+
+ /* Pseudo-DMA may intercept 'reset finished' interrupt. */
+ /* Irrelevant for systems with true DMA (i386). */
+ fd_disable_dma();
+
+ if (FDCS->version >= FDC_82072A)
+ fd_outb(0x80 | (FDCS->dtr &3), FD_STATUS);
+ else {
+ fd_outb(FDCS->dor & ~0x04, FD_DOR);
+ udelay(FD_RESET_DELAY);
+ fd_outb(FDCS->dor, FD_DOR);
+ }
+}
+
+void show_floppy(void)
+{
+ int i;
+
+ printk("\n");
+ printk("floppy driver state\n");
+ printk("-------------------\n");
+ printk("now=%ld last interrupt=%d last called handler=%p\n",
+ jiffies, interruptjiffies, lasthandler);
+
+
+#ifdef FLOPPY_SANITY_CHECK
+ printk("timeout_message=%s\n", timeout_message);
+ printk("last output bytes:\n");
+ for (i=0; i < OLOGSIZE; i++)
+ printk("%2x %2x %ld\n",
+ output_log[(i+output_log_pos) % OLOGSIZE].data,
+ output_log[(i+output_log_pos) % OLOGSIZE].status,
+ output_log[(i+output_log_pos) % OLOGSIZE].jiffies);
+ printk("last result at %d\n", resultjiffies);
+ printk("last redo_fd_request at %d\n", lastredo);
+ for (i=0; i<resultsize; i++){
+ printk("%2x ", reply_buffer[i]);
+ }
+ printk("\n");
+#endif
+
+ printk("status=%x\n", fd_inb(FD_STATUS));
+ printk("fdc_busy=%d\n", fdc_busy);
+ if (DEVICE_INTR)
+ printk("DEVICE_INTR=%p\n", DEVICE_INTR);
+ if (floppy_tq.sync)
+ printk("floppy_tq.routine=%p\n", floppy_tq.routine);
+ if (fd_timer.prev)
+ printk("fd_timer.function=%p\n", fd_timer.function);
+ if (fd_timeout.prev){
+ printk("timer_table=%p\n",fd_timeout.function);
+ printk("expires=%ld\n",fd_timeout.expires-jiffies);
+ printk("now=%ld\n",jiffies);
+ }
+ printk("cont=%p\n", cont);
+ printk("CURRENT=%p\n", CURRENT);
+ printk("command_status=%d\n", command_status);
+ printk("\n");
+}
+
+static void floppy_shutdown(void)
+{
+ if (!initialising)
+ show_floppy();
+ cancel_activity();
+ sti();
+
+ floppy_enable_hlt();
+ fd_disable_dma();
+ /* avoid dma going to a random drive after shutdown */
+
+ if (!initialising)
+ DPRINT("floppy timeout called\n");
+ FDCS->reset = 1;
+ if (cont){
+ cont->done(0);
+ cont->redo(); /* this will recall reset when needed */
+ } else {
+ printk("no cont in shutdown!\n");
+ process_fd_request();
+ }
+ is_alive("floppy shutdown");
+}
+/*typedef void (*timeout_fn)(unsigned long);*/
+
+/* start motor, check media-changed condition and write protection */
+static int start_motor(void (*function)(void) )
+{
+ int mask, data;
+
+ mask = 0xfc;
+ data = UNIT(current_drive);
+ if (!(raw_cmd->flags & FD_RAW_NO_MOTOR)){
+ if (!(FDCS->dor & (0x10 << UNIT(current_drive)))){
+ set_debugt();
+ /* no read since this drive is running */
+ DRS->first_read_date = 0;
+ /* note motor start time if motor is not yet running */
+ DRS->spinup_date = jiffies;
+ data |= (0x10 << UNIT(current_drive));
+ }
+ } else
+ if (FDCS->dor & (0x10 << UNIT(current_drive)))
+ mask &= ~(0x10 << UNIT(current_drive));
+
+ /* starts motor and selects floppy */
+ del_timer(motor_off_timer + current_drive);
+ set_dor(fdc, mask, data);
+
+ /* wait_for_completion also schedules reset if needed. */
+ return(wait_for_completion(DRS->select_date+DP->select_delay,
+ (timeout_fn) function));
+}
+
+static void floppy_ready(void)
+{
+ CHECK_RESET;
+ if (start_motor(floppy_ready)) return;
+ if (fdc_dtr()) return;
+
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("calling disk change from floppy_ready\n");
+ }
+#endif
+
+ if (!(raw_cmd->flags & FD_RAW_NO_MOTOR) &&
+ disk_change(current_drive) &&
+ !DP->select_delay)
+ twaddle(); /* this clears the dcl on certain drive/controller
+ * combinations */
+
+ if (raw_cmd->flags & (FD_RAW_NEED_SEEK | FD_RAW_NEED_DISK)){
+ perpendicular_mode();
+ fdc_specify(); /* must be done here because of hut, hlt ... */
+ seek_floppy();
+ } else
+ setup_rw_floppy();
+}
+
+static void floppy_start(void)
+{
+ reschedule_timeout(CURRENTD, "floppy start", 0);
+
+ scandrives();
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("setting NEWCHANGE in floppy_start\n");
+ }
+#endif
+ SETF(FD_DISK_NEWCHANGE);
+ floppy_ready();
+}
+
+/*
+ * ========================================================================
+ * here ends the bottom half. Exported routines are:
+ * floppy_start, floppy_off, floppy_ready, lock_fdc, unlock_fdc, set_fdc,
+ * start_motor, reset_fdc, reset_fdc_info, interpret_errors.
+ * Initialization also uses output_byte, result, set_dor, floppy_interrupt
+ * and set_dor.
+ * ========================================================================
+ */
+/*
+ * General purpose continuations.
+ * ==============================
+ */
+
+static void do_wakeup(void)
+{
+ reschedule_timeout(MAXTIMEOUT, "do wakeup", 0);
+ cont = 0;
+ command_status += 2;
+ wake_up(&command_done);
+}
+
+static struct cont_t wakeup_cont={
+ empty,
+ do_wakeup,
+ empty,
+ (done_f)empty
+};
+
+
+static struct cont_t intr_cont={
+ empty,
+ process_fd_request,
+ empty,
+ (done_f) empty
+};
+
+static int wait_til_done(void (*handler)(void), int interruptible)
+{
+ int ret;
+ unsigned long flags;
+
+ floppy_tq.routine = (void *)(void *) handler;
+ queue_task(&floppy_tq, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+ INT_OFF;
+ while(command_status < 2 && NO_SIGNAL){
+ is_alive("wait_til_done");
+ if (interruptible)
+ interruptible_sleep_on(&command_done);
+ else
+ sleep_on(&command_done);
+ }
+ if (command_status < 2){
+ cancel_activity();
+ cont = &intr_cont;
+ reset_fdc();
+ INT_ON;
+ return -EINTR;
+ }
+ INT_ON;
+
+ if (FDCS->reset)
+ command_status = FD_COMMAND_ERROR;
+ if (command_status == FD_COMMAND_OKAY)
+ ret=0;
+ else
+ ret=-EIO;
+ command_status = FD_COMMAND_NONE;
+ return ret;
+}
+
+static void generic_done(int result)
+{
+ command_status = result;
+ cont = &wakeup_cont;
+}
+
+static void generic_success(void)
+{
+ cont->done(1);
+}
+
+static void generic_failure(void)
+{
+ cont->done(0);
+}
+
+static void success_and_wakeup(void)
+{
+ generic_success();
+ cont->redo();
+}
+
+
+/*
+ * formatting and rw support.
+ * ==========================
+ */
+
+static int next_valid_format(void)
+{
+ int probed_format;
+
+ probed_format = DRS->probed_format;
+ while(1){
+ if (probed_format >= 8 ||
+ !DP->autodetect[probed_format]){
+ DRS->probed_format = 0;
+ return 1;
+ }
+ if (floppy_type[DP->autodetect[probed_format]].sect){
+ DRS->probed_format = probed_format;
+ return 0;
+ }
+ probed_format++;
+ }
+}
+
+static void bad_flp_intr(void)
+{
+ if (probing){
+ DRS->probed_format++;
+ if (!next_valid_format())
+ return;
+ }
+ (*errors)++;
+ INFBOUND(DRWE->badness, *errors);
+ if (*errors > DP->max_errors.abort)
+ cont->done(0);
+ if (*errors > DP->max_errors.reset)
+ FDCS->reset = 1;
+ else if (*errors > DP->max_errors.recal)
+ DRS->track = NEED_2_RECAL;
+}
+
+static void set_floppy(kdev_t device)
+{
+ if (TYPE(device))
+ _floppy = TYPE(device) + floppy_type;
+ else
+ _floppy = current_type[ DRIVE(device) ];
+}
+
+/*
+ * formatting support.
+ * ===================
+ */
+static void format_interrupt(void)
+{
+ switch (interpret_errors()){
+ case 1:
+ cont->error();
+ case 2:
+ break;
+ case 0:
+ cont->done(1);
+ }
+ cont->redo();
+}
+
+#define CODE2SIZE (ssize = ((1 << SIZECODE) + 3) >> 2)
+#define FM_MODE(x,y) ((y) & ~(((x)->rate & 0x80) >>1))
+#define CT(x) ((x) | 0x40)
+static void setup_format_params(int track)
+{
+ struct fparm {
+ unsigned char track,head,sect,size;
+ } *here = (struct fparm *)floppy_track_buffer;
+ int il,n;
+ int count,head_shift,track_shift;
+
+ raw_cmd = &default_raw_cmd;
+ raw_cmd->track = track;
+
+ raw_cmd->flags = FD_RAW_WRITE | FD_RAW_INTR | FD_RAW_SPIN |
+ FD_RAW_NEED_DISK | FD_RAW_NEED_SEEK;
+ raw_cmd->rate = _floppy->rate & 0x43;
+ raw_cmd->cmd_count = NR_F;
+ COMMAND = FM_MODE(_floppy,FD_FORMAT);
+ DR_SELECT = UNIT(current_drive) + PH_HEAD(_floppy,format_req.head);
+ F_SIZECODE = FD_SIZECODE(_floppy);
+ F_SECT_PER_TRACK = _floppy->sect << 2 >> F_SIZECODE;
+ F_GAP = _floppy->fmt_gap;
+ F_FILL = FD_FILL_BYTE;
+
+ raw_cmd->kernel_data = floppy_track_buffer;
+ raw_cmd->length = 4 * F_SECT_PER_TRACK;
+
+ /* allow for about 30ms for data transport per track */
+ head_shift = (F_SECT_PER_TRACK + 5) / 6;
+
+ /* a ``cylinder'' is two tracks plus a little stepping time */
+ track_shift = 2 * head_shift + 3;
+
+ /* position of logical sector 1 on this track */
+ n = (track_shift * format_req.track + head_shift * format_req.head)
+ % F_SECT_PER_TRACK;
+
+ /* determine interleave */
+ il = 1;
+ if (_floppy->fmt_gap < 0x22)
+ il++;
+
+ /* initialize field */
+ for (count = 0; count < F_SECT_PER_TRACK; ++count) {
+ here[count].track = format_req.track;
+ here[count].head = format_req.head;
+ here[count].sect = 0;
+ here[count].size = F_SIZECODE;
+ }
+ /* place logical sectors */
+ for (count = 1; count <= F_SECT_PER_TRACK; ++count) {
+ here[n].sect = count;
+ n = (n+il) % F_SECT_PER_TRACK;
+ if (here[n].sect) { /* sector busy, find next free sector */
+ ++n;
+ if (n>= F_SECT_PER_TRACK) {
+ n-=F_SECT_PER_TRACK;
+ while (here[n].sect) ++n;
+ }
+ }
+ }
+}
+
+static void redo_format(void)
+{
+ buffer_track = -1;
+ setup_format_params(format_req.track << STRETCH(_floppy));
+ floppy_start();
+#ifdef DEBUGT
+ debugt("queue format request");
+#endif
+}
+
+static struct cont_t format_cont={
+ format_interrupt,
+ redo_format,
+ bad_flp_intr,
+ generic_done };
+
+static int do_format(kdev_t device, struct format_descr *tmp_format_req)
+{
+ int ret;
+ int drive=DRIVE(device);
+
+ LOCK_FDC(drive,1);
+ set_floppy(device);
+ if (!_floppy ||
+ _floppy->track > DP->tracks ||
+ tmp_format_req->track >= _floppy->track ||
+ tmp_format_req->head >= _floppy->head ||
+ (_floppy->sect << 2) % (1 << FD_SIZECODE(_floppy)) ||
+ !_floppy->fmt_gap) {
+ process_fd_request();
+ return -EINVAL;
+ }
+ format_req = *tmp_format_req;
+ format_errors = 0;
+ cont = &format_cont;
+ errors = &format_errors;
+ IWAIT(redo_format);
+ process_fd_request();
+ return ret;
+}
+
+/*
+ * Buffer read/write and support
+ * =============================
+ */
+
+/* new request_done. Can handle physical sectors which are smaller than a
+ * logical buffer */
+static void request_done(int uptodate)
+{
+ int block;
+
+ probing = 0;
+ reschedule_timeout(MAXTIMEOUT, "request done %d", uptodate);
+
+ if (!CURRENT){
+ DPRINT("request list destroyed in floppy request done\n");
+ return;
+ }
+
+ if (uptodate){
+ /* maintain values for invalidation on geometry
+ * change */
+ block = current_count_sectors + CURRENT->sector;
+ INFBOUND(DRS->maxblock, block);
+ if (block > _floppy->sect)
+ DRS->maxtrack = 1;
+
+ /* unlock chained buffers */
+ while (current_count_sectors && CURRENT &&
+ current_count_sectors >= CURRENT->current_nr_sectors){
+ current_count_sectors -= CURRENT->current_nr_sectors;
+ CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
+ CURRENT->sector += CURRENT->current_nr_sectors;
+ end_request(1);
+ }
+ if (current_count_sectors && CURRENT){
+ /* "unlock" last subsector */
+ CURRENT->buffer += current_count_sectors <<9;
+ CURRENT->current_nr_sectors -= current_count_sectors;
+ CURRENT->nr_sectors -= current_count_sectors;
+ CURRENT->sector += current_count_sectors;
+ return;
+ }
+
+ if (current_count_sectors && !CURRENT)
+ DPRINT("request list destroyed in floppy request done\n");
+
+ } else {
+ if (CURRENT->cmd == WRITE) {
+ /* record write error information */
+ DRWE->write_errors++;
+ if (DRWE->write_errors == 1) {
+ DRWE->first_error_sector = CURRENT->sector;
+ DRWE->first_error_generation = DRS->generation;
+ }
+ DRWE->last_error_sector = CURRENT->sector;
+ DRWE->last_error_generation = DRS->generation;
+ }
+ end_request(0);
+ }
+}
+
+/* Interrupt handler evaluating the result of the r/w operation */
+static void rw_interrupt(void)
+{
+ int nr_sectors, ssize, eoc;
+
+ if (!DRS->first_read_date)
+ DRS->first_read_date = jiffies;
+
+ nr_sectors = 0;
+ CODE2SIZE;
+
+ if(ST1 & ST1_EOC)
+ eoc = 1;
+ else
+ eoc = 0;
+ nr_sectors = ((R_TRACK-TRACK)*_floppy->head+R_HEAD-HEAD) *
+ _floppy->sect + ((R_SECTOR-SECTOR+eoc) << SIZECODE >> 2) -
+ (sector_t % _floppy->sect) % ssize;
+
+#ifdef FLOPPY_SANITY_CHECK
+ if (nr_sectors > current_count_sectors + ssize -
+ (current_count_sectors + sector_t) % ssize +
+ sector_t % ssize){
+ DPRINT("long rw: %x instead of %lx\n",
+ nr_sectors, current_count_sectors);
+ printk("rs=%d s=%d\n", R_SECTOR, SECTOR);
+ printk("rh=%d h=%d\n", R_HEAD, HEAD);
+ printk("rt=%d t=%d\n", R_TRACK, TRACK);
+ printk("spt=%d st=%d ss=%d\n", SECT_PER_TRACK,
+ sector_t, ssize);
+ }
+#endif
+ INFBOUND(nr_sectors,0);
+ SUPBOUND(current_count_sectors, nr_sectors);
+
+ switch (interpret_errors()){
+ case 2:
+ cont->redo();
+ return;
+ case 1:
+ if (!current_count_sectors){
+ cont->error();
+ cont->redo();
+ return;
+ }
+ break;
+ case 0:
+ if (!current_count_sectors){
+ cont->redo();
+ return;
+ }
+ current_type[current_drive] = _floppy;
+ floppy_sizes[TOMINOR(current_drive) ]= _floppy->size>>1;
+ break;
+ }
+
+ if (probing) {
+ if (DP->flags & FTD_MSG)
+ DPRINT("Auto-detected floppy type %s in fd%d\n",
+ _floppy->name,current_drive);
+ current_type[current_drive] = _floppy;
+ floppy_sizes[TOMINOR(current_drive)] = _floppy->size >> 1;
+ probing = 0;
+ }
+
+ if (CT(COMMAND) != FD_READ ||
+ raw_cmd->kernel_data == CURRENT->buffer){
+ /* transfer directly from buffer */
+ cont->done(1);
+ } else if (CT(COMMAND) == FD_READ){
+ buffer_track = raw_cmd->track;
+ buffer_drive = current_drive;
+ INFBOUND(buffer_max, nr_sectors + sector_t);
+ }
+ cont->redo();
+}
+
+/* Compute maximal contiguous buffer size. */
+static int buffer_chain_size(void)
+{
+ struct buffer_head *bh;
+ int size;
+ char *base;
+
+ base = CURRENT->buffer;
+ size = CURRENT->current_nr_sectors << 9;
+ bh = CURRENT->bh;
+
+ if (bh){
+ bh = bh->b_reqnext;
+ while (bh && bh->b_data == base + size){
+ size += bh->b_size;
+ bh = bh->b_reqnext;
+ }
+ }
+ return size >> 9;
+}
+
+/* Compute the maximal transfer size */
+static int transfer_size(int ssize, int max_sector, int max_size)
+{
+ SUPBOUND(max_sector, sector_t + max_size);
+
+ /* alignment */
+ max_sector -= (max_sector % _floppy->sect) % ssize;
+
+ /* transfer size, beginning not aligned */
+ current_count_sectors = max_sector - sector_t ;
+
+ return max_sector;
+}
+
+/*
+ * Move data from/to the track buffer to/from the buffer cache.
+ */
+static void copy_buffer(int ssize, int max_sector, int max_sector_2)
+{
+ int remaining; /* number of transferred 512-byte sectors */
+ struct buffer_head *bh;
+ char *buffer, *dma_buffer;
+ int size;
+
+ max_sector = transfer_size(ssize,
+ minimum(max_sector, max_sector_2),
+ CURRENT->nr_sectors);
+
+ if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE &&
+ buffer_max > sector_t + CURRENT->nr_sectors)
+ current_count_sectors = minimum(buffer_max - sector_t,
+ CURRENT->nr_sectors);
+
+ remaining = current_count_sectors << 9;
+#ifdef FLOPPY_SANITY_CHECK
+ if ((remaining >> 9) > CURRENT->nr_sectors &&
+ CT(COMMAND) == FD_WRITE){
+ DPRINT("in copy buffer\n");
+ printk("current_count_sectors=%ld\n", current_count_sectors);
+ printk("remaining=%d\n", remaining >> 9);
+ printk("CURRENT->nr_sectors=%ld\n",CURRENT->nr_sectors);
+ printk("CURRENT->current_nr_sectors=%ld\n",
+ CURRENT->current_nr_sectors);
+ printk("max_sector=%d\n", max_sector);
+ printk("ssize=%d\n", ssize);
+ }
+#endif
+
+ buffer_max = maximum(max_sector, buffer_max);
+
+ dma_buffer = floppy_track_buffer + ((sector_t - buffer_min) << 9);
+
+ bh = CURRENT->bh;
+ size = CURRENT->current_nr_sectors << 9;
+ buffer = CURRENT->buffer;
+
+ while (remaining > 0){
+ SUPBOUND(size, remaining);
+#ifdef FLOPPY_SANITY_CHECK
+ if (dma_buffer + size >
+ floppy_track_buffer + (max_buffer_sectors << 10) ||
+ dma_buffer < floppy_track_buffer){
+ DPRINT("buffer overrun in copy buffer %d\n",
+ (int) ((floppy_track_buffer - dma_buffer) >>9));
+ printk("sector_t=%d buffer_min=%d\n",
+ sector_t, buffer_min);
+ printk("current_count_sectors=%ld\n",
+ current_count_sectors);
+ if (CT(COMMAND) == FD_READ)
+ printk("read\n");
+ if (CT(COMMAND) == FD_READ)
+ printk("write\n");
+ break;
+ }
+ if (((unsigned long)buffer) % 512)
+ DPRINT("%p buffer not aligned\n", buffer);
+#endif
+ if (CT(COMMAND) == FD_READ)
+ memcpy(buffer, dma_buffer, size);
+ else
+ memcpy(dma_buffer, buffer, size);
+ remaining -= size;
+ if (!remaining)
+ break;
+
+ dma_buffer += size;
+ bh = bh->b_reqnext;
+#ifdef FLOPPY_SANITY_CHECK
+ if (!bh){
+ DPRINT("bh=null in copy buffer after copy\n");
+ break;
+ }
+#endif
+ size = bh->b_size;
+ buffer = bh->b_data;
+ }
+#ifdef FLOPPY_SANITY_CHECK
+ if (remaining){
+ if (remaining > 0)
+ max_sector -= remaining >> 9;
+ DPRINT("weirdness: remaining %d\n", remaining>>9);
+ }
+#endif
+}
+
+/*
+ * Formulate a read/write request.
+ * this routine decides where to load the data (directly to buffer, or to
+ * tmp floppy area), how much data to load (the size of the buffer, the whole
+ * track, or a single sector)
+ * All floppy_track_buffer handling goes in here. If we ever add track buffer
+ * allocation on the fly, it should be done here. No other part should need
+ * modification.
+ */
+
+static int make_raw_rw_request(void)
+{
+ int aligned_sector_t;
+ int max_sector, max_size, tracksize, ssize;
+
+ set_fdc(DRIVE(CURRENT->rq_dev));
+
+ raw_cmd = &default_raw_cmd;
+ raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_DISK |
+ FD_RAW_NEED_SEEK;
+ raw_cmd->cmd_count = NR_RW;
+ if (CURRENT->cmd == READ){
+ raw_cmd->flags |= FD_RAW_READ;
+ COMMAND = FM_MODE(_floppy,FD_READ);
+ } else if (CURRENT->cmd == WRITE){
+ raw_cmd->flags |= FD_RAW_WRITE;
+ COMMAND = FM_MODE(_floppy,FD_WRITE);
+ } else {
+ DPRINT("make_raw_rw_request: unknown command\n");
+ return 0;
+ }
+
+ max_sector = _floppy->sect * _floppy->head;
+
+ TRACK = CURRENT->sector / max_sector;
+ sector_t = CURRENT->sector % max_sector;
+ if (_floppy->track && TRACK >= _floppy->track)
+ return 0;
+ HEAD = sector_t / _floppy->sect;
+
+ if (((_floppy->stretch & FD_SWAPSIDES) || TESTF(FD_NEED_TWADDLE)) &&
+ sector_t < _floppy->sect)
+ max_sector = _floppy->sect;
+
+ /* 2M disks have phantom sectors on the first track */
+ if ((_floppy->rate & FD_2M) && (!TRACK) && (!HEAD)){
+ max_sector = 2 * _floppy->sect / 3;
+ if (sector_t >= max_sector){
+ current_count_sectors = minimum(_floppy->sect - sector_t,
+ CURRENT->nr_sectors);
+ return 1;
+ }
+ SIZECODE = 2;
+ } else
+ SIZECODE = FD_SIZECODE(_floppy);
+ raw_cmd->rate = _floppy->rate & 0x43;
+ if ((_floppy->rate & FD_2M) &&
+ (TRACK || HEAD) &&
+ raw_cmd->rate == 2)
+ raw_cmd->rate = 1;
+
+ if (SIZECODE)
+ SIZECODE2 = 0xff;
+ else
+ SIZECODE2 = 0x80;
+ raw_cmd->track = TRACK << STRETCH(_floppy);
+ DR_SELECT = UNIT(current_drive) + PH_HEAD(_floppy,HEAD);
+ GAP = _floppy->gap;
+ CODE2SIZE;
+ SECT_PER_TRACK = _floppy->sect << 2 >> SIZECODE;
+ SECTOR = ((sector_t % _floppy->sect) << 2 >> SIZECODE) + 1;
+ tracksize = _floppy->sect - _floppy->sect % ssize;
+ if (tracksize < _floppy->sect){
+ SECT_PER_TRACK ++;
+ if (tracksize <= sector_t % _floppy->sect)
+ SECTOR--;
+ while (tracksize <= sector_t % _floppy->sect){
+ while(tracksize + ssize > _floppy->sect){
+ SIZECODE--;
+ ssize >>= 1;
+ }
+ SECTOR++; SECT_PER_TRACK ++;
+ tracksize += ssize;
+ }
+ max_sector = HEAD * _floppy->sect + tracksize;
+ } else if (!TRACK && !HEAD && !(_floppy->rate & FD_2M) && probing)
+ max_sector = _floppy->sect;
+
+ aligned_sector_t = sector_t - (sector_t % _floppy->sect) % ssize;
+ max_size = CURRENT->nr_sectors;
+ if ((raw_cmd->track == buffer_track) &&
+ (current_drive == buffer_drive) &&
+ (sector_t >= buffer_min) && (sector_t < buffer_max)) {
+ /* data already in track buffer */
+ if (CT(COMMAND) == FD_READ) {
+ copy_buffer(1, max_sector, buffer_max);
+ return 1;
+ }
+ } else if (aligned_sector_t != sector_t || CURRENT->nr_sectors < ssize){
+ if (CT(COMMAND) == FD_WRITE){
+ if (sector_t + CURRENT->nr_sectors > ssize &&
+ sector_t + CURRENT->nr_sectors < ssize + ssize)
+ max_size = ssize + ssize;
+ else
+ max_size = ssize;
+ }
+ raw_cmd->flags &= ~FD_RAW_WRITE;
+ raw_cmd->flags |= FD_RAW_READ;
+ COMMAND = FM_MODE(_floppy,FD_READ);
+ } else if ((unsigned long)CURRENT->buffer < MAX_DMA_ADDRESS) {
+ unsigned long dma_limit;
+ int direct, indirect;
+
+ indirect= transfer_size(ssize,max_sector,max_buffer_sectors*2) -
+ sector_t;
+
+ /*
+ * Do NOT use minimum() here---MAX_DMA_ADDRESS is 64 bits wide
+ * on a 64 bit machine!
+ */
+ max_size = buffer_chain_size();
+ dma_limit = (MAX_DMA_ADDRESS - ((unsigned long) CURRENT->buffer)) >> 9;
+ if ((unsigned long) max_size > dma_limit) {
+ max_size = dma_limit;
+ }
+ /* 64 kb boundaries */
+ if (CROSS_64KB(CURRENT->buffer, max_size << 9))
+ max_size = (K_64 - ((long) CURRENT->buffer) % K_64)>>9;
+ direct = transfer_size(ssize,max_sector,max_size) - sector_t;
+ /*
+ * We try to read tracks, but if we get too many errors, we
+ * go back to reading just one sector at a time.
+ *
+ * This means we should be able to read a sector even if there
+ * are other bad sectors on this track.
+ */
+ if (!direct ||
+ (indirect * 2 > direct * 3 &&
+ *errors < DP->max_errors.read_track &&
+ /*!TESTF(FD_NEED_TWADDLE) &&*/
+ ((!probing || (DP->read_track&(1<<DRS->probed_format)))))){
+ max_size = CURRENT->nr_sectors;
+ } else {
+ raw_cmd->kernel_data = CURRENT->buffer;
+ raw_cmd->length = current_count_sectors << 9;
+ if (raw_cmd->length == 0){
+ DPRINT("zero dma transfer attempted from make_raw_request\n");
+ DPRINT("indirect=%d direct=%d sector_t=%d",
+ indirect, direct, sector_t);
+ return 0;
+ }
+ return 2;
+ }
+ }
+
+ if (CT(COMMAND) == FD_READ)
+ max_size = max_sector; /* unbounded */
+
+ /* claim buffer track if needed */
+ if (buffer_track != raw_cmd->track || /* bad track */
+ buffer_drive !=current_drive || /* bad drive */
+ sector_t > buffer_max ||
+ sector_t < buffer_min ||
+ ((CT(COMMAND) == FD_READ ||
+ (aligned_sector_t == sector_t && CURRENT->nr_sectors >= ssize))&&
+ max_sector > 2 * max_buffer_sectors + buffer_min &&
+ max_size + sector_t > 2 * max_buffer_sectors + buffer_min)
+ /* not enough space */){
+ buffer_track = -1;
+ buffer_drive = current_drive;
+ buffer_max = buffer_min = aligned_sector_t;
+ }
+ raw_cmd->kernel_data = floppy_track_buffer +
+ ((aligned_sector_t-buffer_min)<<9);
+
+ if (CT(COMMAND) == FD_WRITE){
+ /* copy write buffer to track buffer.
+ * if we get here, we know that the write
+ * is either aligned or the data already in the buffer
+ * (buffer will be overwritten) */
+#ifdef FLOPPY_SANITY_CHECK
+ if (sector_t != aligned_sector_t && buffer_track == -1)
+ DPRINT("internal error offset !=0 on write\n");
+#endif
+ buffer_track = raw_cmd->track;
+ buffer_drive = current_drive;
+ copy_buffer(ssize, max_sector, 2*max_buffer_sectors+buffer_min);
+ } else
+ transfer_size(ssize, max_sector,
+ 2*max_buffer_sectors+buffer_min-aligned_sector_t);
+
+ /* round up current_count_sectors to get dma xfer size */
+ raw_cmd->length = sector_t+current_count_sectors-aligned_sector_t;
+ raw_cmd->length = ((raw_cmd->length -1)|(ssize-1))+1;
+ raw_cmd->length <<= 9;
+#ifdef FLOPPY_SANITY_CHECK
+ if ((raw_cmd->length < current_count_sectors << 9) ||
+ (raw_cmd->kernel_data != CURRENT->buffer &&
+ CT(COMMAND) == FD_WRITE &&
+ (aligned_sector_t + (raw_cmd->length >> 9) > buffer_max ||
+ aligned_sector_t < buffer_min)) ||
+ raw_cmd->length % (128 << SIZECODE) ||
+ raw_cmd->length <= 0 || current_count_sectors <= 0){
+ DPRINT("fractionary current count b=%lx s=%lx\n",
+ raw_cmd->length, current_count_sectors);
+ if (raw_cmd->kernel_data != CURRENT->buffer)
+ printk("addr=%d, length=%ld\n",
+ (int) ((raw_cmd->kernel_data -
+ floppy_track_buffer) >> 9),
+ current_count_sectors);
+ printk("st=%d ast=%d mse=%d msi=%d\n",
+ sector_t, aligned_sector_t, max_sector, max_size);
+ printk("ssize=%x SIZECODE=%d\n", ssize, SIZECODE);
+ printk("command=%x SECTOR=%d HEAD=%d, TRACK=%d\n",
+ COMMAND, SECTOR, HEAD, TRACK);
+ printk("buffer drive=%d\n", buffer_drive);
+ printk("buffer track=%d\n", buffer_track);
+ printk("buffer_min=%d\n", buffer_min);
+ printk("buffer_max=%d\n", buffer_max);
+ return 0;
+ }
+
+ if (raw_cmd->kernel_data != CURRENT->buffer){
+ if (raw_cmd->kernel_data < floppy_track_buffer ||
+ current_count_sectors < 0 ||
+ raw_cmd->length < 0 ||
+ raw_cmd->kernel_data + raw_cmd->length >
+ floppy_track_buffer + (max_buffer_sectors << 10)){
+ DPRINT("buffer overrun in schedule dma\n");
+ printk("sector_t=%d buffer_min=%d current_count=%ld\n",
+ sector_t, buffer_min,
+ raw_cmd->length >> 9);
+ printk("current_count_sectors=%ld\n",
+ current_count_sectors);
+ if (CT(COMMAND) == FD_READ)
+ printk("read\n");
+ if (CT(COMMAND) == FD_READ)
+ printk("write\n");
+ return 0;
+ }
+ } else if (raw_cmd->length > CURRENT->nr_sectors << 9 ||
+ current_count_sectors > CURRENT->nr_sectors){
+ DPRINT("buffer overrun in direct transfer\n");
+ return 0;
+ } else if (raw_cmd->length < current_count_sectors << 9){
+ DPRINT("more sectors than bytes\n");
+ printk("bytes=%ld\n", raw_cmd->length >> 9);
+ printk("sectors=%ld\n", current_count_sectors);
+ }
+ if (raw_cmd->length == 0){
+ DPRINT("zero dma transfer attempted from make_raw_request\n");
+ return 0;
+ }
+#endif
+ return 2;
+}
+
+static void redo_fd_request(void)
+{
+#define REPEAT {request_done(0); continue; }
+ kdev_t device;
+ int tmp;
+
+ lastredo = jiffies;
+ if (current_drive < N_DRIVE)
+ floppy_off(current_drive);
+
+ if (CURRENT && CURRENT->rq_status == RQ_INACTIVE){
+ CLEAR_INTR;
+ unlock_fdc();
+ return;
+ }
+
+ while(1){
+ if (!CURRENT) {
+ CLEAR_INTR;
+ unlock_fdc();
+ return;
+ }
+ if (MAJOR(CURRENT->rq_dev) != MAJOR_NR)
+ panic(DEVICE_NAME ": request list destroyed");
+ if (CURRENT->bh && !buffer_locked(CURRENT->bh))
+ panic(DEVICE_NAME ": block not locked");
+
+ device = CURRENT->rq_dev;
+ set_fdc(DRIVE(device));
+ reschedule_timeout(CURRENTD, "redo fd request", 0);
+
+ set_floppy(device);
+ raw_cmd = & default_raw_cmd;
+ raw_cmd->flags = 0;
+ if (start_motor(redo_fd_request)) return;
+ disk_change(current_drive);
+ if (test_bit(current_drive, &fake_change) ||
+ TESTF(FD_DISK_CHANGED)){
+ DPRINT("disk absent or changed during operation\n");
+ REPEAT;
+ }
+ if (!_floppy) { /* Autodetection */
+ if (!probing){
+ DRS->probed_format = 0;
+ if (next_valid_format()){
+ DPRINT("no autodetectable formats\n");
+ _floppy = NULL;
+ REPEAT;
+ }
+ }
+ probing = 1;
+ _floppy = floppy_type+DP->autodetect[DRS->probed_format];
+ } else
+ probing = 0;
+ errors = & (CURRENT->errors);
+ tmp = make_raw_rw_request();
+ if (tmp < 2){
+ request_done(tmp);
+ continue;
+ }
+
+ if (TESTF(FD_NEED_TWADDLE))
+ twaddle();
+ floppy_tq.routine = (void *)(void *) floppy_start;
+ queue_task(&floppy_tq, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+#ifdef DEBUGT
+ debugt("queue fd request");
+#endif
+ return;
+ }
+#undef REPEAT
+}
+
+static struct cont_t rw_cont={
+ rw_interrupt,
+ redo_fd_request,
+ bad_flp_intr,
+ request_done };
+
+static struct tq_struct request_tq =
+{ 0, 0, (void *) (void *) redo_fd_request, 0 };
+
+static void process_fd_request(void)
+{
+ cont = &rw_cont;
+ queue_task(&request_tq, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+}
+
+static void do_fd_request(void)
+{
+ sti();
+ if (fdc_busy){
+ /* fdc busy, this new request will be treated when the
+ current one is done */
+ is_alive("do fd request, old request running");
+ return;
+ }
+ lock_fdc(MAXTIMEOUT,0);
+ process_fd_request();
+ is_alive("do fd request");
+}
+
+static struct cont_t poll_cont={
+ success_and_wakeup,
+ floppy_ready,
+ generic_failure,
+ generic_done };
+
+static int poll_drive(int interruptible, int flag)
+{
+ int ret;
+ /* no auto-sense, just clear dcl */
+ raw_cmd = &default_raw_cmd;
+ raw_cmd->flags= flag;
+ raw_cmd->track=0;
+ raw_cmd->cmd_count=0;
+ cont = &poll_cont;
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("setting NEWCHANGE in poll_drive\n");
+ }
+#endif
+ SETF(FD_DISK_NEWCHANGE);
+ WAIT(floppy_ready);
+ return ret;
+}
+
+/*
+ * User triggered reset
+ * ====================
+ */
+
+static void reset_intr(void)
+{
+ printk("weird, reset interrupt called\n");
+}
+
+static struct cont_t reset_cont={
+ reset_intr,
+ success_and_wakeup,
+ generic_failure,
+ generic_done };
+
+static int user_reset_fdc(int drive, int arg, int interruptible)
+{
+ int ret;
+
+ ret=0;
+ LOCK_FDC(drive,interruptible);
+ if (arg == FD_RESET_ALWAYS)
+ FDCS->reset=1;
+ if (FDCS->reset){
+ cont = &reset_cont;
+ WAIT(reset_fdc);
+ }
+ process_fd_request();
+ return ret;
+}
+
+/*
+ * Misc Ioctl's and support
+ * ========================
+ */
+static int fd_copyout(void *param, const void *address, int size)
+{
+ int ret;
+
+ ECALL(verify_area(VERIFY_WRITE,param,size));
+ memcpy_tofs(param,(void *) address, size);
+ return 0;
+}
+
+static int fd_copyin(void *param, void *address, int size)
+{
+ int ret;
+
+ ECALL(verify_area(VERIFY_READ,param,size));
+ memcpy_fromfs((void *) address, param, size);
+ return 0;
+}
+
+#define COPYOUT(x) ECALL(fd_copyout((void *)param, &(x), sizeof(x)))
+#define COPYIN(x) ECALL(fd_copyin((void *)param, &(x), sizeof(x)))
+
+static inline const char *drive_name(int type, int drive)
+{
+ struct floppy_struct *floppy;
+
+ if (type)
+ floppy = floppy_type + type;
+ else {
+ if (UDP->native_format)
+ floppy = floppy_type + UDP->native_format;
+ else
+ return "(null)";
+ }
+ if (floppy->name)
+ return floppy->name;
+ else
+ return "(null)";
+}
+
+
+/* raw commands */
+static void raw_cmd_done(int flag)
+{
+ int i;
+
+ if (!flag) {
+ raw_cmd->flags |= FD_RAW_FAILURE;
+ raw_cmd->flags |= FD_RAW_HARDFAILURE;
+ } else {
+ raw_cmd->reply_count = inr;
+ for (i=0; i< raw_cmd->reply_count; i++)
+ raw_cmd->reply[i] = reply_buffer[i];
+
+ if (raw_cmd->flags & (FD_RAW_READ | FD_RAW_WRITE))
+ raw_cmd->length = fd_get_dma_residue();
+
+ if ((raw_cmd->flags & FD_RAW_SOFTFAILURE) &&
+ (!raw_cmd->reply_count || (raw_cmd->reply[0] & 0xc0)))
+ raw_cmd->flags |= FD_RAW_FAILURE;
+
+ if (disk_change(current_drive))
+ raw_cmd->flags |= FD_RAW_DISK_CHANGE;
+ else
+ raw_cmd->flags &= ~FD_RAW_DISK_CHANGE;
+ if (raw_cmd->flags & FD_RAW_NO_MOTOR_AFTER)
+ motor_off_callback(current_drive);
+
+ if (raw_cmd->next &&
+ (!(raw_cmd->flags & FD_RAW_FAILURE) ||
+ !(raw_cmd->flags & FD_RAW_STOP_IF_FAILURE)) &&
+ ((raw_cmd->flags & FD_RAW_FAILURE) ||
+ !(raw_cmd->flags &FD_RAW_STOP_IF_SUCCESS))) {
+ raw_cmd = raw_cmd->next;
+ return;
+ }
+ }
+ generic_done(flag);
+}
+
+
+static struct cont_t raw_cmd_cont={
+ success_and_wakeup,
+ floppy_start,
+ generic_failure,
+ raw_cmd_done
+};
+
+static inline int raw_cmd_copyout(int cmd, char *param,
+ struct floppy_raw_cmd *ptr)
+{
+ struct old_floppy_raw_cmd old_raw_cmd;
+ int ret;
+
+ while(ptr) {
+ if (cmd == OLDFDRAWCMD) {
+ old_raw_cmd.flags = ptr->flags;
+ old_raw_cmd.data = ptr->data;
+ old_raw_cmd.length = ptr->length;
+ old_raw_cmd.rate = ptr->rate;
+ old_raw_cmd.reply_count = ptr->reply_count;
+ memcpy(old_raw_cmd.reply, ptr->reply, 7);
+ COPYOUT(old_raw_cmd);
+ param += sizeof(old_raw_cmd);
+ } else {
+ COPYOUT(*ptr);
+ param += sizeof(struct floppy_raw_cmd);
+ }
+
+ if ((ptr->flags & FD_RAW_READ) && ptr->buffer_length){
+ if (ptr->length>=0 && ptr->length<=ptr->buffer_length)
+ ECALL(fd_copyout(ptr->data,
+ ptr->kernel_data,
+ ptr->buffer_length -
+ ptr->length));
+ }
+ ptr = ptr->next;
+ }
+ return 0;
+}
+
+
+static void raw_cmd_free(struct floppy_raw_cmd **ptr)
+{
+ struct floppy_raw_cmd *next,*this;
+
+ this = *ptr;
+ *ptr = 0;
+ while(this) {
+ if (this->buffer_length) {
+ fd_dma_mem_free((unsigned long)this->kernel_data,
+ this->buffer_length);
+ this->buffer_length = 0;
+ }
+ next = this->next;
+ kfree(this);
+ this = next;
+ }
+}
+
+
+static inline int raw_cmd_copyin(int cmd, char *param,
+ struct floppy_raw_cmd **rcmd)
+{
+ struct floppy_raw_cmd *ptr;
+ struct old_floppy_raw_cmd old_raw_cmd;
+ int ret;
+ int i;
+
+ *rcmd = 0;
+ while(1) {
+ ptr = (struct floppy_raw_cmd *)
+ kmalloc(sizeof(struct floppy_raw_cmd), GFP_USER);
+ if (!ptr)
+ return -ENOMEM;
+ *rcmd = ptr;
+ if (cmd == OLDFDRAWCMD){
+ COPYIN(old_raw_cmd);
+ ptr->flags = old_raw_cmd.flags;
+ ptr->data = old_raw_cmd.data;
+ ptr->length = old_raw_cmd.length;
+ ptr->rate = old_raw_cmd.rate;
+ ptr->cmd_count = old_raw_cmd.cmd_count;
+ ptr->track = old_raw_cmd.track;
+ ptr->phys_length = 0;
+ ptr->next = 0;
+ ptr->buffer_length = 0;
+ memcpy(ptr->cmd, old_raw_cmd.cmd, 9);
+ param += sizeof(struct old_floppy_raw_cmd);
+ if (ptr->cmd_count > 9)
+ return -EINVAL;
+ } else {
+ COPYIN(*ptr);
+ ptr->next = 0;
+ ptr->buffer_length = 0;
+ param += sizeof(struct floppy_raw_cmd);
+ if (ptr->cmd_count > 33)
+ /* the command may now also take up the space
+ * initially intended for the reply & the
+ * reply count. Needed for long 82078 commands
+ * such as RESTORE, which takes ... 17 command
+ * bytes. Murphy's law #137: When you reserve
+ * 16 bytes for a structure, you'll one day
+ * discover that you really need 17...
+ */
+ return -EINVAL;
+ }
+
+ for (i=0; i< 16; i++)
+ ptr->reply[i] = 0;
+ ptr->resultcode = 0;
+ ptr->kernel_data = 0;
+
+ if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
+ if (ptr->length <= 0)
+ return -EINVAL;
+ ptr->kernel_data =(char*)fd_dma_mem_alloc(ptr->length);
+ if (!ptr->kernel_data)
+ return -ENOMEM;
+ ptr->buffer_length = ptr->length;
+ }
+ if ( ptr->flags & FD_RAW_READ )
+ ECALL( verify_area( VERIFY_WRITE, ptr->data,
+ ptr->length ));
+ if (ptr->flags & FD_RAW_WRITE)
+ ECALL(fd_copyin(ptr->data, ptr->kernel_data,
+ ptr->length));
+ rcmd = & (ptr->next);
+ if (!(ptr->flags & FD_RAW_MORE))
+ return 0;
+ ptr->rate &= 0x43;
+ }
+}
+
+
+static int raw_cmd_ioctl(int cmd, void *param)
+{
+ int drive, ret, ret2;
+ struct floppy_raw_cmd *my_raw_cmd;
+
+ if (FDCS->rawcmd <= 1)
+ FDCS->rawcmd = 1;
+ for (drive= 0; drive < N_DRIVE; drive++){
+ if (FDC(drive) != fdc)
+ continue;
+ if (drive == current_drive){
+ if (UDRS->fd_ref > 1){
+ FDCS->rawcmd = 2;
+ break;
+ }
+ } else if (UDRS->fd_ref){
+ FDCS->rawcmd = 2;
+ break;
+ }
+ }
+
+ if (FDCS->reset)
+ return -EIO;
+
+ ret = raw_cmd_copyin(cmd, param, &my_raw_cmd);
+ if (ret) {
+ raw_cmd_free(&my_raw_cmd);
+ return ret;
+ }
+
+ raw_cmd = my_raw_cmd;
+ cont = &raw_cmd_cont;
+ ret=wait_til_done(floppy_start,1);
+#ifdef DCL_DEBUG
+ if (DP->flags & FD_DEBUG){
+ DPRINT("calling disk change from raw_cmd ioctl\n");
+ }
+#endif
+
+ if (ret != -EINTR && FDCS->reset)
+ ret = -EIO;
+
+ DRS->track = NO_TRACK;
+
+ ret2 = raw_cmd_copyout(cmd, param, my_raw_cmd);
+ if (!ret)
+ ret = ret2;
+ raw_cmd_free(&my_raw_cmd);
+ return ret;
+}
+
+static int invalidate_drive(kdev_t rdev)
+{
+ /* invalidate the buffer track to force a reread */
+ set_bit(DRIVE(rdev), &fake_change);
+ process_fd_request();
+ check_disk_change(rdev);
+ return 0;
+}
+
+
+static inline void clear_write_error(int drive)
+{
+ CLEARSTRUCT(UDRWE);
+}
+
+static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
+ int drive, int type, kdev_t device)
+{
+ int cnt;
+
+ /* sanity checking for parameters.*/
+ if (g->sect <= 0 ||
+ g->head <= 0 ||
+ g->track <= 0 ||
+ g->track > UDP->tracks>>STRETCH(g) ||
+ /* check if reserved bits are set */
+ (g->stretch&~(FD_STRETCH|FD_SWAPSIDES)) != 0)
+ return -EINVAL;
+ if (type){
+ if (!suser())
+ return -EPERM;
+ LOCK_FDC(drive,1);
+ for (cnt = 0; cnt < N_DRIVE; cnt++){
+ if (ITYPE(drive_state[cnt].fd_device) == type &&
+ drive_state[cnt].fd_ref)
+ set_bit(drive, &fake_change);
+ }
+ floppy_type[type] = *g;
+ floppy_type[type].name="user format";
+ for (cnt = type << 2; cnt < (type << 2) + 4; cnt++)
+ floppy_sizes[cnt]= floppy_sizes[cnt+0x80]=
+ floppy_type[type].size>>1;
+ process_fd_request();
+ for (cnt = 0; cnt < N_DRIVE; cnt++){
+ if (ITYPE(drive_state[cnt].fd_device) == type &&
+ drive_state[cnt].fd_ref)
+ check_disk_change(
+ MKDEV(FLOPPY_MAJOR,
+ drive_state[cnt].fd_device));
+ }
+ } else {
+ LOCK_FDC(drive,1);
+ if (cmd != FDDEFPRM)
+ /* notice a disk change immediately, else
+ * we loose our settings immediately*/
+ CALL(poll_drive(1, FD_RAW_NEED_DISK));
+ user_params[drive] = *g;
+ if (buffer_drive == drive)
+ SUPBOUND(buffer_max, user_params[drive].sect);
+ current_type[drive] = &user_params[drive];
+ floppy_sizes[drive] = user_params[drive].size >> 1;
+ if (cmd == FDDEFPRM)
+ DRS->keep_data = -1;
+ else
+ DRS->keep_data = 1;
+ /* invalidation. Invalidate only when needed, i.e.
+ * when there are already sectors in the buffer cache
+ * whose number will change. This is useful, because
+ * mtools often changes the geometry of the disk after
+ * looking at the boot block */
+ if (DRS->maxblock > user_params[drive].sect || DRS->maxtrack)
+ invalidate_drive(device);
+ else
+ process_fd_request();
+ }
+ return 0;
+}
+
+/* handle obsolete ioctl's */
+static struct translation_entry {
+ int newcmd;
+ int oldcmd;
+ int oldsize; /* size of 0x00xx-style ioctl. Reflects old structures, thus
+ * use numeric values. NO SIZEOFS */
+} translation_table[]= {
+ {FDCLRPRM, 0, 0},
+ {FDSETPRM, 1, 28},
+ {FDDEFPRM, 2, 28},
+ {FDGETPRM, 3, 28},
+ {FDMSGON, 4, 0},
+ {FDMSGOFF, 5, 0},
+ {FDFMTBEG, 6, 0},
+ {FDFMTTRK, 7, 12},
+ {FDFMTEND, 8, 0},
+ {FDSETEMSGTRESH, 10, 0},
+ {FDFLUSH, 11, 0},
+ {FDSETMAXERRS, 12, 20},
+ {OLDFDRAWCMD, 30, 0},
+ {FDGETMAXERRS, 14, 20},
+ {FDGETDRVTYP, 16, 16},
+ {FDSETDRVPRM, 20, 88},
+ {FDGETDRVPRM, 21, 88},
+ {FDGETDRVSTAT, 22, 52},
+ {FDPOLLDRVSTAT, 23, 52},
+ {FDRESET, 24, 0},
+ {FDGETFDCSTAT, 25, 40},
+ {FDWERRORCLR, 27, 0},
+ {FDWERRORGET, 28, 24},
+ {FDRAWCMD, 0, 0},
+ {FDEJECT, 0, 0},
+ {FDTWADDLE, 40, 0} };
+
+static inline int normalize_0x02xx_ioctl(int *cmd, int *size)
+{
+ int i;
+
+ for (i=0; i < ARRAY_SIZE(translation_table); i++) {
+ if ((*cmd & 0xffff) == (translation_table[i].newcmd & 0xffff)){
+ *size = _IOC_SIZE(*cmd);
+ *cmd = translation_table[i].newcmd;
+ if (*size > _IOC_SIZE(*cmd)) {
+ printk("ioctl not yet supported\n");
+ return -EFAULT;
+ }
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static inline int xlate_0x00xx_ioctl(int *cmd, int *size)
+{
+ int i;
+ /* old ioctls' for kernels <= 1.3.33 */
+ /* When the next even release will come around, we'll start
+ * warning against these.
+ * When the next odd release will come around, we'll fail with
+ * -EINVAL */
+ if(strcmp(system_utsname.version, "1.4.0") >= 0)
+ printk("obsolete floppy ioctl %x\n", *cmd);
+ if((system_utsname.version[0] == '1' &&
+ strcmp(system_utsname.version, "1.5.0") >= 0) ||
+ (system_utsname.version[0] >= '2' &&
+ strcmp(system_utsname.version, "2.1.0") >= 0))
+ return -EINVAL;
+ for (i=0; i < ARRAY_SIZE(translation_table); i++) {
+ if (*cmd == translation_table[i].oldcmd) {
+ *size = translation_table[i].oldsize;
+ *cmd = translation_table[i].newcmd;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int fd_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long param)
+{
+#define IOCTL_MODE_BIT 8
+#define OPEN_WRITE_BIT 16
+#define IOCTL_ALLOWED (filp && (filp->f_mode & IOCTL_MODE_BIT))
+#define OUT(c,x) case c: outparam = (const char *) (x); break
+#define IN(c,x,tag) case c: *(x) = inparam. tag ; return 0
+
+ int i,drive,type;
+ kdev_t device;
+ int ret;
+ int size;
+ union inparam {
+ struct floppy_struct g; /* geometry */
+ struct format_descr f;
+ struct floppy_max_errors max_errors;
+ struct floppy_drive_params dp;
+ } inparam; /* parameters coming from user space */
+ const char *outparam; /* parameters passed back to user space */
+
+ device = inode->i_rdev;
+ switch (cmd) {
+ RO_IOCTLS(device,param);
+ }
+ type = TYPE(device);
+ drive = DRIVE(device);
+
+ /* convert compatibility eject ioctls into floppy eject ioctl.
+ * We do this in order to provide a means to eject floppy disks before
+ * installing the new fdutils package */
+ if(cmd == CDROMEJECT || /* CD-ROM eject */
+ cmd == 0x6470 /* SunOS floppy eject */) {
+ DPRINT("obsolete eject ioctl\n");
+ DPRINT("please use floppycontrol --eject\n");
+ cmd = FDEJECT;
+ }
+
+ /* convert the old style command into a new style command */
+ if ((cmd & 0xff00) == 0x0200) {
+ ECALL(normalize_0x02xx_ioctl(&cmd, &size));
+ } else if ((cmd & 0xff00) == 0x0000) {
+ ECALL(xlate_0x00xx_ioctl(&cmd, &size));
+ } else
+ return -EINVAL;
+
+ /* permission checks */
+ if (((cmd & 0x80) && !suser()) ||
+ ((cmd & 0x40) && !IOCTL_ALLOWED))
+ return -EPERM;
+
+ /* verify writability of result, and fail early */
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ ECALL(verify_area(VERIFY_WRITE,(void *) param, size));
+
+ /* copyin */
+ CLEARSTRUCT(&inparam);
+ if (_IOC_DIR(cmd) & _IOC_WRITE)
+ ECALL(fd_copyin((void *)param, &inparam, size))
+
+ switch (cmd) {
+ case FDEJECT:
+ if(UDRS->fd_ref != 1)
+ /* somebody else has this drive open */
+ return -EBUSY;
+ LOCK_FDC(drive,1);
+
+ /* do the actual eject. Fails on
+ * non-Sparc architectures */
+ ret=fd_eject(UNIT(drive));
+
+ USETF(FD_DISK_CHANGED);
+ USETF(FD_VERIFY);
+ process_fd_request();
+ return ret;
+ case FDCLRPRM:
+ LOCK_FDC(drive,1);
+ current_type[drive] = NULL;
+ floppy_sizes[drive] = MAX_DISK_SIZE;
+ UDRS->keep_data = 0;
+ return invalidate_drive(device);
+ case FDSETPRM:
+ case FDDEFPRM:
+ return set_geometry(cmd, & inparam.g,
+ drive, type, device);
+ case FDGETPRM:
+ LOCK_FDC(drive,1);
+ CALL(poll_drive(1,0));
+ process_fd_request();
+ if (type)
+ outparam = (char *) &floppy_type[type];
+ else
+ outparam = (char *) current_type[drive];
+ if(!outparam)
+ return -ENODEV;
+ break;
+
+ case FDMSGON:
+ UDP->flags |= FTD_MSG;
+ return 0;
+ case FDMSGOFF:
+ UDP->flags &= ~FTD_MSG;
+ return 0;
+
+ case FDFMTBEG:
+ LOCK_FDC(drive,1);
+ CALL(poll_drive(1, FD_RAW_NEED_DISK));
+ ret = UDRS->flags;
+ process_fd_request();
+ if(ret & FD_VERIFY)
+ return -ENODEV;
+ if(!(ret & FD_DISK_WRITABLE))
+ return -EROFS;
+ return 0;
+ case FDFMTTRK:
+ if (UDRS->fd_ref != 1)
+ return -EBUSY;
+ return do_format(device, &inparam.f);
+ case FDFMTEND:
+ case FDFLUSH:
+ LOCK_FDC(drive,1);
+ return invalidate_drive(device);
+
+ case FDSETEMSGTRESH:
+ UDP->max_errors.reporting =
+ (unsigned short) (param & 0x0f);
+ return 0;
+ OUT(FDGETMAXERRS, &UDP->max_errors);
+ IN(FDSETMAXERRS, &UDP->max_errors, max_errors);
+
+ case FDGETDRVTYP:
+ outparam = drive_name(type,drive);
+ SUPBOUND(size,strlen(outparam)+1);
+ break;
+
+ IN(FDSETDRVPRM, UDP, dp);
+ OUT(FDGETDRVPRM, UDP);
+
+ case FDPOLLDRVSTAT:
+ LOCK_FDC(drive,1);
+ CALL(poll_drive(1, FD_RAW_NEED_DISK));
+ process_fd_request();
+ /* fall through */
+ OUT(FDGETDRVSTAT, UDRS);
+
+ case FDRESET:
+ return user_reset_fdc(drive, (int)param, 1);
+
+ OUT(FDGETFDCSTAT,UFDCS);
+
+ case FDWERRORCLR:
+ CLEARSTRUCT(UDRWE);
+ return 0;
+ OUT(FDWERRORGET,UDRWE);
+
+ case OLDFDRAWCMD:
+ case FDRAWCMD:
+ if (type)
+ return -EINVAL;
+ LOCK_FDC(drive,1);
+ set_floppy(device);
+ CALL(i = raw_cmd_ioctl(cmd,(void *) param));
+ process_fd_request();
+ return i;
+
+ case FDTWADDLE:
+ LOCK_FDC(drive,1);
+ twaddle();
+ process_fd_request();
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ return fd_copyout((void *)param, outparam, size);
+ else
+ return 0;
+#undef IOCTL_ALLOWED
+#undef OUT
+#undef IN
+}
+
+static void config_types(void)
+{
+ int first=1;
+ int drive;
+
+ /* read drive info out of physical CMOS */
+ drive=0;
+ if (!UDP->cmos)
+ UDP->cmos= FLOPPY0_TYPE;
+ drive=1;
+ if (!UDP->cmos && FLOPPY1_TYPE)
+ UDP->cmos = FLOPPY1_TYPE;
+
+ /* XXX */
+ /* additional physical CMOS drive detection should go here */
+
+ for (drive=0; drive < N_DRIVE; drive++){
+ if (UDP->cmos >= 16)
+ UDP->cmos = 0;
+ if (UDP->cmos >= 0 && UDP->cmos <= NUMBER(default_drive_params))
+ memcpy((char *) UDP,
+ (char *) (&default_drive_params[(int)UDP->cmos].params),
+ sizeof(struct floppy_drive_params));
+ if (UDP->cmos){
+ if (first)
+ printk(KERN_INFO "Floppy drive(s): ");
+ else
+ printk(", ");
+ first=0;
+ if (UDP->cmos > 0){
+ allowed_drive_mask |= 1 << drive;
+ printk("fd%d is %s", drive,
+ default_drive_params[(int)UDP->cmos].name);
+ } else
+ printk("fd%d is unknown type %d",drive,
+ UDP->cmos);
+ }
+ }
+ if (!first)
+ printk("\n");
+}
+
+static int floppy_read(struct inode * inode, struct file * filp,
+ char * buf, int count)
+{
+ int drive = DRIVE(inode->i_rdev);
+
+ check_disk_change(inode->i_rdev);
+ if (UTESTF(FD_DISK_CHANGED))
+ return -ENXIO;
+ return block_read(inode, filp, buf, count);
+}
+
+static int floppy_write(struct inode * inode, struct file * filp,
+ const char * buf, int count)
+{
+ int block;
+ int ret;
+ int drive = DRIVE(inode->i_rdev);
+
+ if (!UDRS->maxblock)
+ UDRS->maxblock=1;/* make change detectable */
+ check_disk_change(inode->i_rdev);
+ if (UTESTF(FD_DISK_CHANGED))
+ return -ENXIO;
+ if (!UTESTF(FD_DISK_WRITABLE))
+ return -EROFS;
+ block = (filp->f_pos + count) >> 9;
+ INFBOUND(UDRS->maxblock, block);
+ ret= block_write(inode, filp, buf, count);
+ return ret;
+}
+
+static void floppy_release(struct inode * inode, struct file * filp)
+{
+ int drive;
+
+ drive = DRIVE(inode->i_rdev);
+
+ if (!filp || (filp->f_mode & (2 | OPEN_WRITE_BIT)))
+ /* if the file is mounted OR (writable now AND writable at
+ * open time) Linus: Does this cover all cases? */
+ block_fsync(inode,filp);
+
+ if (UDRS->fd_ref < 0)
+ UDRS->fd_ref=0;
+ else if (!UDRS->fd_ref--) {
+ DPRINT("floppy_release with fd_ref == 0");
+ UDRS->fd_ref = 0;
+ }
+ floppy_release_irq_and_dma();
+}
+
+/*
+ * floppy_open check for aliasing (/dev/fd0 can be the same as
+ * /dev/PS0 etc), and disallows simultaneous access to the same
+ * drive with different device numbers.
+ */
+#define RETERR(x) do{floppy_release(inode,filp); return -(x);}while(0)
+
+static int floppy_open(struct inode * inode, struct file * filp)
+{
+ int drive;
+ int old_dev;
+ int try;
+ char *tmp;
+
+ if (!filp) {
+ DPRINT("Weird, open called with filp=0\n");
+ return -EIO;
+ }
+
+ drive = DRIVE(inode->i_rdev);
+ if (drive >= N_DRIVE ||
+ !(allowed_drive_mask & (1 << drive)) ||
+ fdc_state[FDC(drive)].version == FDC_NONE)
+ return -ENXIO;
+
+ if (TYPE(inode->i_rdev) >= NUMBER(floppy_type))
+ return -ENXIO;
+ old_dev = UDRS->fd_device;
+ if (UDRS->fd_ref && old_dev != MINOR(inode->i_rdev))
+ return -EBUSY;
+
+ if (!UDRS->fd_ref && (UDP->flags & FD_BROKEN_DCL)){
+ USETF(FD_DISK_CHANGED);
+ USETF(FD_VERIFY);
+ }
+
+ if (UDRS->fd_ref == -1 ||
+ (UDRS->fd_ref && (filp->f_flags & O_EXCL)))
+ return -EBUSY;
+
+ if (floppy_grab_irq_and_dma())
+ return -EBUSY;
+
+ if (filp->f_flags & O_EXCL)
+ UDRS->fd_ref = -1;
+ else
+ UDRS->fd_ref++;
+
+ if (!floppy_track_buffer){
+ /* if opening an ED drive, reserve a big buffer,
+ * else reserve a small one */
+ if ((UDP->cmos == 6) || (UDP->cmos == 5))
+ try = 64; /* Only 48 actually useful */
+ else
+ try = 32; /* Only 24 actually useful */
+
+ tmp=(char *)fd_dma_mem_alloc(1024 * try);
+ if (!tmp) {
+ try >>= 1; /* buffer only one side */
+ INFBOUND(try, 16);
+ tmp= (char *)fd_dma_mem_alloc(1024*try);
+ }
+ if (!tmp) {
+ DPRINT("Unable to allocate DMA memory\n");
+ RETERR(ENXIO);
+ }
+ if (floppy_track_buffer)
+ fd_dma_mem_free((unsigned long)tmp,try*1024);
+ else {
+ buffer_min = buffer_max = -1;
+ floppy_track_buffer = tmp;
+ max_buffer_sectors = try;
+ }
+ }
+
+ UDRS->fd_device = MINOR(inode->i_rdev);
+ if (old_dev != -1 && old_dev != MINOR(inode->i_rdev)) {
+ if (buffer_drive == drive)
+ buffer_track = -1;
+ invalidate_buffers(MKDEV(FLOPPY_MAJOR,old_dev));
+ }
+
+ /* Allow ioctls if we have write-permissions even if read-only open */
+ if ((filp->f_mode & 2) || (permission(inode,2) == 0))
+ filp->f_mode |= IOCTL_MODE_BIT;
+ if (filp->f_mode & 2)
+ filp->f_mode |= OPEN_WRITE_BIT;
+
+ if (UFDCS->rawcmd == 1)
+ UFDCS->rawcmd = 2;
+
+ if (filp->f_flags & O_NDELAY)
+ return 0;
+ if (filp->f_mode & 3) {
+ UDRS->last_checked = 0;
+ check_disk_change(inode->i_rdev);
+ if (UTESTF(FD_DISK_CHANGED))
+ RETERR(ENXIO);
+ }
+ if ((filp->f_mode & 2) && !(UTESTF(FD_DISK_WRITABLE)))
+ RETERR(EROFS);
+ return 0;
+#undef RETERR
+}
+
+/*
+ * Check if the disk has been changed or if a change has been faked.
+ */
+static int check_floppy_change(kdev_t dev)
+{
+ int drive = DRIVE(dev);
+
+ if (MAJOR(dev) != MAJOR_NR) {
+ DPRINT("check_floppy_change: not a floppy\n");
+ return 0;
+ }
+
+ if (UTESTF(FD_DISK_CHANGED) || UTESTF(FD_VERIFY))
+ return 1;
+
+ if (UDP->checkfreq < jiffies - UDRS->last_checked){
+ lock_fdc(drive,0);
+ poll_drive(0,0);
+ process_fd_request();
+ }
+
+ if (UTESTF(FD_DISK_CHANGED) ||
+ UTESTF(FD_VERIFY) ||
+ test_bit(drive, &fake_change) ||
+ (!TYPE(dev) && !current_type[drive]))
+ return 1;
+ return 0;
+}
+
+/* revalidate the floppy disk, i.e. trigger format autodetection by reading
+ * the bootblock (block 0). "Autodetection" is also needed to check whether
+ * there is a disk in the drive at all... Thus we also do it for fixed
+ * geometry formats */
+static int floppy_revalidate(kdev_t dev)
+{
+#define NO_GEOM (!current_type[drive] && !TYPE(dev))
+ struct buffer_head * bh;
+ int drive=DRIVE(dev);
+ int cf;
+
+ if (UTESTF(FD_DISK_CHANGED) ||
+ UTESTF(FD_VERIFY) ||
+ test_bit(drive, &fake_change) ||
+ NO_GEOM){
+ lock_fdc(drive,0);
+ cf = UTESTF(FD_DISK_CHANGED) || UTESTF(FD_VERIFY);
+ if (!(cf || test_bit(drive, &fake_change) || NO_GEOM)){
+ process_fd_request(); /*already done by another thread*/
+ return 0;
+ }
+ UDRS->maxblock = 0;
+ UDRS->maxtrack = 0;
+ if (buffer_drive == drive)
+ buffer_track = -1;
+ clear_bit(drive, &fake_change);
+ UCLEARF(FD_DISK_CHANGED);
+ if (cf)
+ UDRS->generation++;
+ if (NO_GEOM){
+ /* auto-sensing */
+ int size = floppy_blocksizes[MINOR(dev)];
+ if (!size)
+ size = 1024;
+ if (!(bh = getblk(dev,0,size))){
+ process_fd_request();
+ return 1;
+ }
+ if (bh && !buffer_uptodate(bh))
+ ll_rw_block(READ, 1, &bh);
+ process_fd_request();
+ wait_on_buffer(bh);
+ brelse(bh);
+ return 0;
+ }
+ if (cf)
+ poll_drive(0, FD_RAW_NEED_DISK);
+ process_fd_request();
+ }
+ return 0;
+}
+
+static struct file_operations floppy_fops = {
+ NULL, /* lseek - default */
+ floppy_read, /* read - general block-dev read */
+ floppy_write, /* write - general block-dev write */
+ NULL, /* readdir - bad */
+ NULL, /* select */
+ fd_ioctl, /* ioctl */
+ NULL, /* mmap */
+ floppy_open, /* open */
+ floppy_release, /* release */
+ block_fsync, /* fsync */
+ NULL, /* fasync */
+ check_floppy_change, /* media_change */
+ floppy_revalidate, /* revalidate */
+};
+
+/*
+ * Floppy Driver initialization
+ * =============================
+ */
+
+/* Determine the floppy disk controller type */
+/* This routine was written by David C. Niemi */
+static char get_fdc_version(void)
+{
+ int r;
+
+ output_byte(FD_DUMPREGS); /* 82072 and better know DUMPREGS */
+ if (FDCS->reset)
+ return FDC_NONE;
+ if ((r = result()) <= 0x00)
+ return FDC_NONE; /* No FDC present ??? */
+ if ((r==1) && (reply_buffer[0] == 0x80)){
+ printk(KERN_INFO "FDC %d is an 8272A\n",fdc);
+ return FDC_8272A; /* 8272a/765 don't know DUMPREGS */
+ }
+ if (r != 10) {
+ printk("FDC %d init: DUMPREGS: unexpected return of %d bytes.\n",
+ fdc, r);
+ return FDC_UNKNOWN;
+ }
+
+ if(!fdc_configure()) {
+ printk(KERN_INFO "FDC %d is an 82072\n",fdc);
+ return FDC_82072; /* 82072 doesn't know CONFIGURE */
+ }
+
+ output_byte(FD_PERPENDICULAR);
+ if(need_more_output() == MORE_OUTPUT) {
+ output_byte(0);
+ } else {
+ printk(KERN_INFO "FDC %d is an 82072A\n", fdc);
+ return FDC_82072A; /* 82072A as found on Sparcs. */
+ }
+
+ output_byte(FD_UNLOCK);
+ r = result();
+ if ((r == 1) && (reply_buffer[0] == 0x80)){
+ printk(KERN_INFO "FDC %d is a pre-1991 82077\n", fdc);
+ return FDC_82077_ORIG; /* Pre-1991 82077, doesn't know
+ * LOCK/UNLOCK */
+ }
+ if ((r != 1) || (reply_buffer[0] != 0x00)) {
+ printk("FDC %d init: UNLOCK: unexpected return of %d bytes.\n",
+ fdc, r);
+ return FDC_UNKNOWN;
+ }
+ output_byte(FD_PARTID);
+ r = result();
+ if (r != 1) {
+ printk("FDC %d init: PARTID: unexpected return of %d bytes.\n",
+ fdc, r);
+ return FDC_UNKNOWN;
+ }
+ if (reply_buffer[0] == 0x80) {
+ printk(KERN_INFO "FDC %d is a post-1991 82077\n",fdc);
+ return FDC_82077; /* Revised 82077AA passes all the tests */
+ }
+ switch (reply_buffer[0] >> 5) {
+ case 0x0:
+ /* Either a 82078-1 or a 82078SL running at 5Volt */
+ printk(KERN_INFO "FDC %d is an 82078.\n",fdc);
+ return FDC_82078;
+ case 0x1:
+ printk(KERN_INFO "FDC %d is a 44pin 82078\n",fdc);
+ return FDC_82078;
+ case 0x2:
+ printk(KERN_INFO "FDC %d is a S82078B\n", fdc);
+ return FDC_S82078B;
+ case 0x3:
+ printk(KERN_INFO "FDC %d is a National Semiconductor PC87306\n", fdc);
+ return FDC_87306;
+ default:
+ printk(KERN_INFO "FDC %d init: 82078 variant with unknown PARTID=%d.\n",
+ fdc, reply_buffer[0] >> 5);
+ return FDC_82078_UNKN;
+ }
+} /* get_fdc_version */
+
+/* lilo configuration */
+
+/* we make the invert_dcl function global. One day, somebody might
+ * want to centralize all thinkpad related options into one lilo option,
+ * there are just so many thinkpad related quirks! */
+void floppy_invert_dcl(int *ints,int param)
+{
+ int i;
+
+ for (i=0; i < ARRAY_SIZE(default_drive_params); i++){
+ if (param)
+ default_drive_params[i].params.flags |= 0x80;
+ else
+ default_drive_params[i].params.flags &= ~0x80;
+ }
+ DPRINT("Configuring drives for inverted dcl\n");
+}
+
+static void daring(int *ints,int param)
+{
+ int i;
+
+ for (i=0; i < ARRAY_SIZE(default_drive_params); i++){
+ if (param){
+ default_drive_params[i].params.select_delay = 0;
+ default_drive_params[i].params.flags |= FD_SILENT_DCL_CLEAR;
+ } else {
+ default_drive_params[i].params.select_delay = 2*HZ/100;
+ default_drive_params[i].params.flags &= ~FD_SILENT_DCL_CLEAR;
+ }
+ }
+ DPRINT("Assuming %s floppy hardware\n", param ? "standard" : "broken");
+}
+
+static void set_cmos(int *ints, int dummy)
+{
+ int current_drive=0;
+
+ if (ints[0] != 2){
+ DPRINT("wrong number of parameter for cmos\n");
+ return;
+ }
+ current_drive = ints[1];
+ if (current_drive < 0 || current_drive >= 8){
+ DPRINT("bad drive for set_cmos\n");
+ return;
+ }
+ if (current_drive >= 4 && !FDC2)
+ FDC2 = 0x370;
+ if (ints[2] <= 0 ||
+ (ints[2] >= NUMBER(default_drive_params) && ints[2] != 16)){
+ DPRINT("bad cmos code %d\n", ints[2]);
+ return;
+ }
+ DP->cmos = ints[2];
+ DPRINT("setting cmos code to %d\n", ints[2]);
+}
+
+static struct param_table {
+ const char *name;
+ void (*fn)(int *ints, int param);
+ int *var;
+ int def_param;
+} config_params[]={
+ { "allowed_drive_mask", 0, &allowed_drive_mask, 0xff },
+ { "all_drives", 0, &allowed_drive_mask, 0xff },
+ { "asus_pci", 0, &allowed_drive_mask, 0x33 },
+
+ { "daring", daring, 0, 1},
+
+ { "two_fdc", 0, &FDC2, 0x370 },
+ { "one_fdc", 0, &FDC2, 0 },
+
+ { "thinkpad", floppy_invert_dcl, 0, 1 },
+
+ { "nodma", 0, &use_virtual_dma, 1 },
+ { "omnibook", 0, &use_virtual_dma, 1 },
+ { "dma", 0, &use_virtual_dma, 0 },
+
+ { "fifo_depth", 0, &fifo_depth, 0xa },
+ { "nofifo", 0, &no_fifo, 0x20 },
+ { "usefifo", 0, &no_fifo, 0 },
+
+ { "cmos", set_cmos, 0, 0 },
+
+ { "unexpected_interrupts", 0, &print_unex, 1 },
+ { "no_unexpected_interrupts", 0, &print_unex, 0 },
+ { "L40SX", 0, &print_unex, 0 } };
+
+#define FLOPPY_SETUP
+void floppy_setup(char *str, int *ints)
+{
+ int i;
+ int param;
+ if (str)
+ for (i=0; i< ARRAY_SIZE(config_params); i++){
+ if (strcmp(str,config_params[i].name) == 0){
+ if (ints[0])
+ param = ints[1];
+ else
+ param = config_params[i].def_param;
+ if(config_params[i].fn)
+ config_params[i].fn(ints,param);
+ if(config_params[i].var) {
+ DPRINT("%s=%d\n", str, param);
+ *config_params[i].var = param;
+ }
+ return;
+ }
+ }
+ if (str) {
+ DPRINT("unknown floppy option [%s]\n", str);
+
+ DPRINT("allowed options are:");
+ for (i=0; i< ARRAY_SIZE(config_params); i++)
+ printk(" %s",config_params[i].name);
+ printk("\n");
+ } else
+ DPRINT("botched floppy option\n");
+ DPRINT("Read linux/drivers/block/README.fd\n");
+}
+
+int floppy_init(void)
+{
+ int i,unit,drive;
+ int have_no_fdc= -EIO;
+
+ raw_cmd = 0;
+
+ if (register_blkdev(MAJOR_NR,"fd",&floppy_fops)) {
+ printk("Unable to get major %d for floppy\n",MAJOR_NR);
+ return -EBUSY;
+ }
+
+ for (i=0; i<256; i++)
+ if (ITYPE(i))
+ floppy_sizes[i] = floppy_type[ITYPE(i)].size >> 1;
+ else
+ floppy_sizes[i] = MAX_DISK_SIZE;
+
+ blk_size[MAJOR_NR] = floppy_sizes;
+ blksize_size[MAJOR_NR] = floppy_blocksizes;
+ blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ reschedule_timeout(MAXTIMEOUT, "floppy init", MAXTIMEOUT);
+ config_types();
+
+ for (i = 0; i < N_FDC; i++) {
+ fdc = i;
+ CLEARSTRUCT(FDCS);
+ FDCS->dtr = -1;
+ FDCS->dor = 0x4;
+#ifdef __sparc__
+ /*sparcs don't have a DOR reset which we can fall back on to*/
+ FDCS->version = FDC_82072A;
+#endif
+ }
+
+ fdc_state[0].address = FDC1;
+#if N_FDC > 1
+ fdc_state[1].address = FDC2;
+#endif
+
+ if (floppy_grab_irq_and_dma()){
+ del_timer(&fd_timeout);
+ blk_dev[MAJOR_NR].request_fn = NULL;
+ unregister_blkdev(MAJOR_NR,"fd");
+ return -EBUSY;
+ }
+
+ /* initialise drive state */
+ for (drive = 0; drive < N_DRIVE; drive++) {
+ CLEARSTRUCT(UDRS);
+ CLEARSTRUCT(UDRWE);
+ UDRS->flags = FD_VERIFY | FD_DISK_NEWCHANGE | FD_DISK_CHANGED;
+ UDRS->fd_device = -1;
+ floppy_track_buffer = NULL;
+ max_buffer_sectors = 0;
+ }
+
+ for (i = 0; i < N_FDC; i++) {
+ fdc = i;
+ FDCS->driver_version = FD_DRIVER_VERSION;
+ for (unit=0; unit<4; unit++)
+ FDCS->track[unit] = 0;
+ if (FDCS->address == -1)
+ continue;
+ FDCS->rawcmd = 2;
+ if (user_reset_fdc(-1,FD_RESET_ALWAYS,0)){
+ FDCS->address = -1;
+ FDCS->version = FDC_NONE;
+ continue;
+ }
+ /* Try to determine the floppy controller type */
+ FDCS->version = get_fdc_version();
+ if (FDCS->version == FDC_NONE){
+ FDCS->address = -1;
+ continue;
+ }
+
+ request_region(FDCS->address, 6, "floppy");
+ request_region(FDCS->address+7, 1, "floppy DIR");
+ /* address + 6 is reserved, and may be taken by IDE.
+ * Unfortunately, Adaptec doesn't know this :-(, */
+
+ have_no_fdc = 0;
+ /* Not all FDCs seem to be able to handle the version command
+ * properly, so force a reset for the standard FDC clones,
+ * to avoid interrupt garbage.
+ */
+ user_reset_fdc(-1,FD_RESET_ALWAYS,0);
+ }
+ fdc=0;
+ del_timer(&fd_timeout);
+ current_drive = 0;
+ floppy_release_irq_and_dma();
+ initialising=0;
+ if (have_no_fdc) {
+ DPRINT("no floppy controllers found\n");
+ request_tq.routine = (void *)(void *) empty;
+ /*
+ * When we return we may be unloaded. This little
+ * trick forces the immediate_bh handler to have run
+ * before we unload it, lest we cause bad things.
+ */
+ mark_bh(IMMEDIATE_BH);
+ schedule();
+ if (usage_count)
+ floppy_release_irq_and_dma();
+ blk_dev[MAJOR_NR].request_fn = NULL;
+ unregister_blkdev(MAJOR_NR,"fd");
+ }
+ return have_no_fdc;
+}
+
+static int floppy_grab_irq_and_dma(void)
+{
+ int i;
+ unsigned long flags;
+
+ INT_OFF;
+ if (usage_count++){
+ INT_ON;
+ return 0;
+ }
+ INT_ON;
+ MOD_INC_USE_COUNT;
+ for (i=0; i< N_FDC; i++){
+ if (fdc_state[i].address != -1){
+ fdc = i;
+ reset_fdc_info(1);
+ fd_outb(FDCS->dor, FD_DOR);
+ }
+ }
+ fdc = 0;
+ set_dor(0, ~0, 8); /* avoid immediate interrupt */
+
+ if (fd_request_irq()) {
+ DPRINT("Unable to grab IRQ%d for the floppy driver\n",
+ FLOPPY_IRQ);
+ MOD_DEC_USE_COUNT;
+ usage_count--;
+ return -1;
+ }
+ if (fd_request_dma()) {
+ DPRINT("Unable to grab DMA%d for the floppy driver\n",
+ FLOPPY_DMA);
+ fd_free_irq();
+ MOD_DEC_USE_COUNT;
+ usage_count--;
+ return -1;
+ }
+ for (fdc = 0; fdc < N_FDC; fdc++)
+ if (FDCS->address != -1)
+ fd_outb(FDCS->dor, FD_DOR);
+ fdc = 0;
+ fd_enable_irq();
+ irqdma_allocated=1;
+ return 0;
+}
+
+static void floppy_release_irq_and_dma(void)
+{
+#ifdef FLOPPY_SANITY_CHECK
+ int drive;
+#endif
+ long tmpsize;
+ unsigned long tmpaddr;
+ unsigned long flags;
+
+ INT_OFF;
+ if (--usage_count){
+ INT_ON;
+ return;
+ }
+ INT_ON;
+ if(irqdma_allocated)
+ {
+ fd_disable_dma();
+ fd_free_dma();
+ fd_disable_irq();
+ fd_free_irq();
+ irqdma_allocated=0;
+ }
+
+ set_dor(0, ~0, 8);
+#if N_FDC > 1
+ set_dor(1, ~8, 0);
+#endif
+ floppy_enable_hlt();
+
+ if (floppy_track_buffer && max_buffer_sectors) {
+ tmpsize = max_buffer_sectors*1024;
+ tmpaddr = (unsigned long)floppy_track_buffer;
+ floppy_track_buffer = 0;
+ max_buffer_sectors = 0;
+ buffer_min = buffer_max = -1;
+ fd_dma_mem_free(tmpaddr, tmpsize);
+ }
+
+#ifdef FLOPPY_SANITY_CHECK
+#ifndef __sparc__
+ for (drive=0; drive < N_FDC * 4; drive++)
+ if (motor_off_timer[drive].next)
+ printk("motor off timer %d still active\n", drive);
+#endif
+
+ if (fd_timeout.next)
+ printk("floppy timer still active:%s\n", timeout_message);
+ if (fd_timer.next)
+ printk("auxiliary floppy timer still active\n");
+ if (floppy_tq.sync)
+ printk("task queue still active\n");
+#endif
+ MOD_DEC_USE_COUNT;
+}
+
+
+#ifdef MODULE
+
+char *floppy=NULL;
+
+static void parse_floppy_cfg_string(char *cfg)
+{
+ char *ptr;
+ int ints[11];
+
+ while(*cfg) {
+ for(ptr = cfg;*cfg && *cfg != ' ' && *cfg != '\t'; cfg++);
+ if(*cfg) {
+ *cfg = '\0';
+ cfg++;
+ }
+ if(*ptr)
+ floppy_setup(get_options(ptr,ints),ints);
+ }
+}
+
+static void mod_setup(char *pattern, void (*setup)(char *, int *))
+{
+ unsigned long i;
+ char c;
+ int j;
+ int match;
+ char buffer[100];
+ int ints[11];
+ int length = strlen(pattern)+1;
+
+ match=0;
+ j=1;
+
+ for (i=current->mm->env_start; i< current->mm->env_end; i ++){
+ c= get_fs_byte(i);
+ if (match){
+ if (j==99)
+ c='\0';
+ buffer[j] = c;
+ if (!c || c == ' ' || c == '\t'){
+ if (j){
+ buffer[j] = '\0';
+ setup(get_options(buffer,ints),ints);
+ }
+ j=0;
+ } else
+ j++;
+ if (!c)
+ break;
+ continue;
+ }
+ if ((!j && !c) || (j && c == pattern[j-1]))
+ j++;
+ else
+ j=0;
+ if (j==length){
+ match=1;
+ j=0;
+ }
+ }
+}
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+int init_module(void)
+{
+ printk(KERN_INFO "inserting floppy driver for %s\n", kernel_version);
+
+ if(floppy)
+ parse_floppy_cfg_string(floppy);
+ else
+ mod_setup("floppy=", floppy_setup);
+
+ return floppy_init();
+}
+
+void cleanup_module(void)
+{
+ int fdc, dummy;
+
+ for (fdc=0; fdc<2; fdc++)
+ if (FDCS->address != -1){
+ release_region(FDCS->address, 6);
+ release_region(FDCS->address+7, 1);
+ }
+
+ unregister_blkdev(MAJOR_NR, "fd");
+
+ blk_dev[MAJOR_NR].request_fn = 0;
+ /* eject disk, if any */
+ dummy = fd_eject(0);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#else
+/* eject the boot floppy (if we need the drive for a different root floppy) */
+/* This should only be called at boot time when we're sure that there's no
+ * resource contention. */
+void floppy_eject(void)
+{
+ int dummy;
+ if(floppy_grab_irq_and_dma()==0)
+ {
+ lock_fdc(MAXTIMEOUT,0);
+ dummy=fd_eject(0);
+ process_fd_request();
+ floppy_release_irq_and_dma();
+ }
+}
+#endif
diff --git a/linux/src/drivers/block/genhd.c b/linux/src/drivers/block/genhd.c
new file mode 100644
index 0000000..ebee7ff
--- /dev/null
+++ b/linux/src/drivers/block/genhd.c
@@ -0,0 +1,761 @@
+/*
+ * Code extracted from
+ * linux/kernel/hd.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ *
+ * Thanks to Branko Lankester, lankeste@fwi.uva.nl, who found a bug
+ * in the early extended-partition checks and added DM partitions
+ *
+ * Support for DiskManager v6.0x added by Mark Lord,
+ * with information provided by OnTrack. This now works for linux fdisk
+ * and LILO, as well as loadlin and bootln. Note that disks other than
+ * /dev/hda *must* have a "DOS" type 0x51 partition in the first slot (hda1).
+ *
+ * More flexible handling of extended partitions - aeb, 950831
+ *
+ * Check partition table on IDE disks for common CHS translations
+ */
+
+#include <linux/config.h>
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#ifdef CONFIG_BLK_DEV_INITRD
+#include <linux/blk.h>
+#endif
+
+#include <asm/system.h>
+
+/*
+ * Many architectures don't like unaligned accesses, which is
+ * frequently the case with the nr_sects and start_sect partition
+ * table entries.
+ */
+#include <asm/unaligned.h>
+
+#define SYS_IND(p) get_unaligned(&p->sys_ind)
+#define NR_SECTS(p) get_unaligned(&p->nr_sects)
+#define START_SECT(p) get_unaligned(&p->start_sect)
+
+
+struct gendisk *gendisk_head = NULL;
+
+static int current_minor = 0;
+extern int *blk_size[];
+extern void rd_load(void);
+extern void initrd_load(void);
+
+extern int chr_dev_init(void);
+extern int blk_dev_init(void);
+extern int scsi_dev_init(void);
+extern int net_dev_init(void);
+
+/*
+ * disk_name() is used by genhd.c and md.c.
+ * It formats the devicename of the indicated disk
+ * into the supplied buffer, and returns a pointer
+ * to that same buffer (for convenience).
+ */
+char *disk_name (struct gendisk *hd, int minor, char *buf)
+{
+ unsigned int part;
+ const char *maj = hd->major_name;
+ char unit = (minor >> hd->minor_shift) + 'a';
+
+#ifdef CONFIG_BLK_DEV_IDE
+ /*
+ * IDE devices use multiple major numbers, but the drives
+ * are named as: {hda,hdb}, {hdc,hdd}, {hde,hdf}, {hdg,hdh}..
+ * This requires special handling here.
+ */
+ switch (hd->major) {
+ case IDE3_MAJOR:
+ unit += 2;
+ case IDE2_MAJOR:
+ unit += 2;
+ case IDE1_MAJOR:
+ unit += 2;
+ case IDE0_MAJOR:
+ maj = "hd";
+ }
+#endif
+ part = minor & ((1 << hd->minor_shift) - 1);
+ if (part)
+ sprintf(buf, "%s%c%d", maj, unit, part);
+ else
+ sprintf(buf, "%s%c", maj, unit);
+ return buf;
+}
+
+static void add_partition (struct gendisk *hd, int minor, int start, int size)
+{
+ char buf[8];
+ hd->part[minor].start_sect = start;
+ hd->part[minor].nr_sects = size;
+ printk(" %s", disk_name(hd, minor, buf));
+}
+
+static inline int is_extended_partition(struct partition *p)
+{
+ return (SYS_IND(p) == DOS_EXTENDED_PARTITION ||
+ SYS_IND(p) == WIN98_EXTENDED_PARTITION ||
+ SYS_IND(p) == LINUX_EXTENDED_PARTITION);
+}
+
+#ifdef CONFIG_MSDOS_PARTITION
+/*
+ * Create devices for each logical partition in an extended partition.
+ * The logical partitions form a linked list, with each entry being
+ * a partition table with two entries. The first entry
+ * is the real data partition (with a start relative to the partition
+ * table start). The second is a pointer to the next logical partition
+ * (with a start relative to the entire extended partition).
+ * We do not create a Linux partition for the partition tables, but
+ * only for the actual data partitions.
+ */
+
+static void extended_partition(struct gendisk *hd, kdev_t dev)
+{
+ struct buffer_head *bh;
+ struct partition *p;
+ unsigned long first_sector, first_size, this_sector, this_size;
+ int mask = (1 << hd->minor_shift) - 1;
+ int i;
+
+ first_sector = hd->part[MINOR(dev)].start_sect;
+ first_size = hd->part[MINOR(dev)].nr_sects;
+ this_sector = first_sector;
+
+ while (1) {
+ if ((current_minor & mask) == 0)
+ return;
+ if (!(bh = bread(dev,0,1024)))
+ return;
+ /*
+ * This block is from a device that we're about to stomp on.
+ * So make sure nobody thinks this block is usable.
+ */
+ bh->b_state = 0;
+
+ if (*(unsigned short *) (bh->b_data+510) != 0xAA55)
+ goto done;
+
+ p = (struct partition *) (0x1BE + bh->b_data);
+
+ this_size = hd->part[MINOR(dev)].nr_sects;
+
+ /*
+ * Usually, the first entry is the real data partition,
+ * the 2nd entry is the next extended partition, or empty,
+ * and the 3rd and 4th entries are unused.
+ * However, DRDOS sometimes has the extended partition as
+ * the first entry (when the data partition is empty),
+ * and OS/2 seems to use all four entries.
+ */
+
+ /*
+ * First process the data partition(s)
+ */
+ for (i=0; i<4; i++, p++) {
+ if (!NR_SECTS(p) || is_extended_partition(p))
+ continue;
+
+ /* Check the 3rd and 4th entries -
+ these sometimes contain random garbage */
+ if (i >= 2
+ && START_SECT(p) + NR_SECTS(p) > this_size
+ && (this_sector + START_SECT(p) < first_sector ||
+ this_sector + START_SECT(p) + NR_SECTS(p) >
+ first_sector + first_size))
+ continue;
+
+ add_partition(hd, current_minor, this_sector+START_SECT(p), NR_SECTS(p));
+ current_minor++;
+ if ((current_minor & mask) == 0)
+ goto done;
+ }
+ /*
+ * Next, process the (first) extended partition, if present.
+ * (So far, there seems to be no reason to make
+ * extended_partition() recursive and allow a tree
+ * of extended partitions.)
+ * It should be a link to the next logical partition.
+ * Create a minor for this just long enough to get the next
+ * partition table. The minor will be reused for the next
+ * data partition.
+ */
+ p -= 4;
+ for (i=0; i<4; i++, p++)
+ if(NR_SECTS(p) && is_extended_partition(p))
+ break;
+ if (i == 4)
+ goto done; /* nothing left to do */
+
+ hd->part[current_minor].nr_sects = NR_SECTS(p);
+ hd->part[current_minor].start_sect = first_sector + START_SECT(p);
+ this_sector = first_sector + START_SECT(p);
+ dev = MKDEV(hd->major, current_minor);
+ brelse(bh);
+ }
+done:
+ brelse(bh);
+}
+
+#ifdef CONFIG_BSD_DISKLABEL
+/*
+ * Create devices for BSD partitions listed in a disklabel, under a
+ * dos-like partition. See extended_partition() for more information.
+ */
+static void bsd_disklabel_partition(struct gendisk *hd, kdev_t dev)
+{
+ struct buffer_head *bh;
+ struct bsd_disklabel *l;
+ struct bsd_partition *p;
+ int mask = (1 << hd->minor_shift) - 1;
+
+ if (!(bh = bread(dev,0,1024)))
+ return;
+ bh->b_state = 0;
+ l = (struct bsd_disklabel *) (bh->b_data+512);
+ if (l->d_magic != BSD_DISKMAGIC) {
+ brelse(bh);
+ return;
+ }
+
+ p = &l->d_partitions[0];
+ while (p - &l->d_partitions[0] <= BSD_MAXPARTITIONS) {
+ if ((current_minor & mask) >= (4 + hd->max_p))
+ break;
+
+ if (p->p_fstype != BSD_FS_UNUSED) {
+ add_partition(hd, current_minor, p->p_offset, p->p_size);
+ current_minor++;
+ }
+ p++;
+ }
+ brelse(bh);
+
+}
+#endif
+
+static int msdos_partition(struct gendisk *hd, kdev_t dev, unsigned long first_sector)
+{
+ int i, minor = current_minor;
+ struct buffer_head *bh;
+ struct partition *p;
+ unsigned char *data;
+ int mask = (1 << hd->minor_shift) - 1;
+#ifdef CONFIG_BLK_DEV_IDE
+ int tested_for_xlate = 0;
+
+read_mbr:
+#endif
+ if (!(bh = bread(dev,0,1024))) {
+ printk(" unable to read partition table\n");
+ return -1;
+ }
+ data = bh->b_data;
+ /* In some cases we modify the geometry */
+ /* of the drive (below), so ensure that */
+ /* nobody else tries to re-use this data. */
+ bh->b_state = 0;
+#ifdef CONFIG_BLK_DEV_IDE
+check_table:
+#endif
+ if (*(unsigned short *) (0x1fe + data) != 0xAA55) {
+ brelse(bh);
+ return 0;
+ }
+ p = (struct partition *) (0x1be + data);
+
+#ifdef CONFIG_BLK_DEV_IDE
+ if (!tested_for_xlate++) { /* Do this only once per disk */
+ /*
+ * Look for various forms of IDE disk geometry translation
+ */
+ extern int ide_xlate_1024(kdev_t, int, const char *);
+ unsigned int sig = *(unsigned short *)(data + 2);
+ if (SYS_IND(p) == EZD_PARTITION) {
+ /*
+ * The remainder of the disk must be accessed using
+ * a translated geometry that reduces the number of
+ * apparent cylinders to less than 1024 if possible.
+ *
+ * ide_xlate_1024() will take care of the necessary
+ * adjustments to fool fdisk/LILO and partition check.
+ */
+ if (ide_xlate_1024(dev, -1, " [EZD]")) {
+ data += 512;
+ goto check_table;
+ }
+ } else if (SYS_IND(p) == DM6_PARTITION) {
+
+ /*
+ * Everything on the disk is offset by 63 sectors,
+ * including a "new" MBR with its own partition table,
+ * and the remainder of the disk must be accessed using
+ * a translated geometry that reduces the number of
+ * apparent cylinders to less than 1024 if possible.
+ *
+ * ide_xlate_1024() will take care of the necessary
+ * adjustments to fool fdisk/LILO and partition check.
+ */
+ if (ide_xlate_1024(dev, 1, " [DM6:DDO]")) {
+ brelse(bh);
+ goto read_mbr; /* start over with new MBR */
+ }
+ } else if (sig <= 0x1ae && *(unsigned short *)(data + sig) == 0x55AA
+ && (1 & *(unsigned char *)(data + sig + 2)) )
+ {
+ /*
+ * DM6 signature in MBR, courtesy of OnTrack
+ */
+ (void) ide_xlate_1024 (dev, 0, " [DM6:MBR]");
+ } else if (SYS_IND(p) == DM6_AUX1PARTITION || SYS_IND(p) == DM6_AUX3PARTITION) {
+ /*
+ * DM6 on other than the first (boot) drive
+ */
+ (void) ide_xlate_1024(dev, 0, " [DM6:AUX]");
+ } else {
+ /*
+ * Examine the partition table for common translations.
+ * This is necessary for drives for situations where
+ * the translated geometry is unavailable from the BIOS.
+ */
+ for (i = 0; i < 4 ; i++) {
+ struct partition *q = &p[i];
+ if (NR_SECTS(q)
+ && (q->sector & 63) == 1
+ && (q->end_sector & 63) == 63) {
+ unsigned int heads = q->end_head + 1;
+ if (heads == 32 || heads == 64 || heads == 128 || heads == 255) {
+
+ (void) ide_xlate_1024(dev, heads, " [PTBL]");
+ break;
+ }
+ }
+ }
+ }
+ }
+#endif /* CONFIG_BLK_DEV_IDE */
+
+ current_minor += 4; /* first "extra" minor (for extended partitions) */
+ for (i=1 ; i<=4 ; minor++,i++,p++) {
+ if (!NR_SECTS(p))
+ continue;
+ add_partition(hd, minor, first_sector+START_SECT(p), NR_SECTS(p));
+ if (is_extended_partition(p)) {
+ printk(" <");
+ /*
+ * If we are rereading the partition table, we need
+ * to set the size of the partition so that we will
+ * be able to bread the block containing the extended
+ * partition info.
+ */
+ hd->sizes[minor] = hd->part[minor].nr_sects
+ >> (BLOCK_SIZE_BITS - 9);
+ extended_partition(hd, MKDEV(hd->major, minor));
+ printk(" >");
+ /* prevent someone doing mkfs or mkswap on an
+ extended partition, but leave room for LILO */
+ if (hd->part[minor].nr_sects > 2)
+ hd->part[minor].nr_sects = 2;
+ }
+#ifdef CONFIG_BSD_DISKLABEL
+ if (SYS_IND(p) == BSD_PARTITION) {
+ printk(" <");
+ bsd_disklabel_partition(hd, MKDEV(hd->major, minor));
+ printk(" >");
+ }
+#endif
+ }
+ /*
+ * Check for old-style Disk Manager partition table
+ */
+ if (*(unsigned short *) (data+0xfc) == 0x55AA) {
+ p = (struct partition *) (0x1be + data);
+ for (i = 4 ; i < 16 ; i++, current_minor++) {
+ p--;
+ if ((current_minor & mask) == 0)
+ break;
+ if (!(START_SECT(p) && NR_SECTS(p)))
+ continue;
+ add_partition(hd, current_minor, START_SECT(p), NR_SECTS(p));
+ }
+ }
+ printk("\n");
+ brelse(bh);
+ return 1;
+}
+
+#endif /* CONFIG_MSDOS_PARTITION */
+
+#ifdef CONFIG_OSF_PARTITION
+
+static int osf_partition(struct gendisk *hd, unsigned int dev, unsigned long first_sector)
+{
+ int i;
+ int mask = (1 << hd->minor_shift) - 1;
+ struct buffer_head *bh;
+ struct disklabel {
+ u32 d_magic;
+ u16 d_type,d_subtype;
+ u8 d_typename[16];
+ u8 d_packname[16];
+ u32 d_secsize;
+ u32 d_nsectors;
+ u32 d_ntracks;
+ u32 d_ncylinders;
+ u32 d_secpercyl;
+ u32 d_secprtunit;
+ u16 d_sparespertrack;
+ u16 d_sparespercyl;
+ u32 d_acylinders;
+ u16 d_rpm, d_interleave, d_trackskew, d_cylskew;
+ u32 d_headswitch, d_trkseek, d_flags;
+ u32 d_drivedata[5];
+ u32 d_spare[5];
+ u32 d_magic2;
+ u16 d_checksum;
+ u16 d_npartitions;
+ u32 d_bbsize, d_sbsize;
+ struct d_partition {
+ u32 p_size;
+ u32 p_offset;
+ u32 p_fsize;
+ u8 p_fstype;
+ u8 p_frag;
+ u16 p_cpg;
+ } d_partitions[8];
+ } * label;
+ struct d_partition * partition;
+#define DISKLABELMAGIC (0x82564557UL)
+
+ if (!(bh = bread(dev,0,1024))) {
+ printk("unable to read partition table\n");
+ return -1;
+ }
+ label = (struct disklabel *) (bh->b_data+64);
+ partition = label->d_partitions;
+ if (label->d_magic != DISKLABELMAGIC) {
+ printk("magic: %08x\n", label->d_magic);
+ brelse(bh);
+ return 0;
+ }
+ if (label->d_magic2 != DISKLABELMAGIC) {
+ printk("magic2: %08x\n", label->d_magic2);
+ brelse(bh);
+ return 0;
+ }
+ for (i = 0 ; i < label->d_npartitions; i++, partition++) {
+ if ((current_minor & mask) == 0)
+ break;
+ if (partition->p_size)
+ add_partition(hd, current_minor,
+ first_sector+partition->p_offset,
+ partition->p_size);
+ current_minor++;
+ }
+ printk("\n");
+ brelse(bh);
+ return 1;
+}
+
+#endif /* CONFIG_OSF_PARTITION */
+
+#ifdef CONFIG_SUN_PARTITION
+
+static int sun_partition(struct gendisk *hd, kdev_t dev, unsigned long first_sector)
+{
+ int i, csum;
+ unsigned short *ush;
+ struct buffer_head *bh;
+ struct sun_disklabel {
+ unsigned char info[128]; /* Informative text string */
+ unsigned char spare[292]; /* Boot information etc. */
+ unsigned short rspeed; /* Disk rotational speed */
+ unsigned short pcylcount; /* Physical cylinder count */
+ unsigned short sparecyl; /* extra sects per cylinder */
+ unsigned char spare2[4]; /* More magic... */
+ unsigned short ilfact; /* Interleave factor */
+ unsigned short ncyl; /* Data cylinder count */
+ unsigned short nacyl; /* Alt. cylinder count */
+ unsigned short ntrks; /* Tracks per cylinder */
+ unsigned short nsect; /* Sectors per track */
+ unsigned char spare3[4]; /* Even more magic... */
+ struct sun_partition {
+ __u32 start_cylinder;
+ __u32 num_sectors;
+ } partitions[8];
+ unsigned short magic; /* Magic number */
+ unsigned short csum; /* Label xor'd checksum */
+ } * label;
+ struct sun_partition *p;
+ int other_endian;
+ unsigned long spc;
+#define SUN_LABEL_MAGIC 0xDABE
+#define SUN_LABEL_MAGIC_SWAPPED 0xBEDA
+/* No need to optimize these macros since they are called only when reading
+ * the partition table. This occurs only at each disk change. */
+#define SWAP16(x) (other_endian ? (((__u16)(x) & 0xFF) << 8) \
+ | (((__u16)(x) & 0xFF00) >> 8) \
+ : (__u16)(x))
+#define SWAP32(x) (other_endian ? (((__u32)(x) & 0xFF) << 24) \
+ | (((__u32)(x) & 0xFF00) << 8) \
+ | (((__u32)(x) & 0xFF0000) >> 8) \
+ | (((__u32)(x) & 0xFF000000) >> 24) \
+ : (__u32)(x))
+
+ if(!(bh = bread(dev, 0, 1024))) {
+ printk("Dev %s: unable to read partition table\n",
+ kdevname(dev));
+ return -1;
+ }
+ label = (struct sun_disklabel *) bh->b_data;
+ p = label->partitions;
+ if (label->magic != SUN_LABEL_MAGIC && label->magic != SUN_LABEL_MAGIC_SWAPPED) {
+ printk("Dev %s Sun disklabel: bad magic %04x\n",
+ kdevname(dev), label->magic);
+ brelse(bh);
+ return 0;
+ }
+ other_endian = (label->magic == SUN_LABEL_MAGIC_SWAPPED);
+ /* Look at the checksum */
+ ush = ((unsigned short *) (label+1)) - 1;
+ for(csum = 0; ush >= ((unsigned short *) label);)
+ csum ^= *ush--;
+ if(csum) {
+ printk("Dev %s Sun disklabel: Csum bad, label corrupted\n",
+ kdevname(dev));
+ brelse(bh);
+ return 0;
+ }
+ /* All Sun disks have 8 partition entries */
+ spc = SWAP16(label->ntrks) * SWAP16(label->nsect);
+ for(i=0; i < 8; i++, p++) {
+ unsigned long st_sector;
+
+ /* We register all partitions, even if zero size, so that
+ * the minor numbers end up ok as per SunOS interpretation.
+ */
+ st_sector = first_sector + SWAP32(p->start_cylinder) * spc;
+ add_partition(hd, current_minor, st_sector, SWAP32(p->num_sectors));
+ current_minor++;
+ }
+ printk("\n");
+ brelse(bh);
+ return 1;
+#undef SWAP16
+#undef SWAP32
+}
+
+#endif /* CONFIG_SUN_PARTITION */
+
+#ifdef CONFIG_AMIGA_PARTITION
+#include <asm/byteorder.h>
+#include <linux/affs_hardblocks.h>
+
+static __inline__ __u32
+checksum_block(__u32 *m, int size)
+{
+ __u32 sum = 0;
+
+ while (size--)
+ sum += htonl(*m++);
+ return sum;
+}
+
+static int
+amiga_partition(struct gendisk *hd, unsigned int dev, unsigned long first_sector)
+{
+ struct buffer_head *bh;
+ struct RigidDiskBlock *rdb;
+ struct PartitionBlock *pb;
+ int start_sect;
+ int nr_sects;
+ int blk;
+ int part, res;
+
+ set_blocksize(dev,512);
+ res = 0;
+
+ for (blk = 0; blk < RDB_ALLOCATION_LIMIT; blk++) {
+ if(!(bh = bread(dev,blk,512))) {
+ printk("Dev %d: unable to read RDB block %d\n",dev,blk);
+ goto rdb_done;
+ }
+ if (*(__u32 *)bh->b_data == htonl(IDNAME_RIGIDDISK)) {
+ rdb = (struct RigidDiskBlock *)bh->b_data;
+ if (checksum_block((__u32 *)bh->b_data,htonl(rdb->rdb_SummedLongs) & 0x7F)) {
+ printk("Dev %d: RDB in block %d has bad checksum\n",dev,blk);
+ brelse(bh);
+ continue;
+ }
+ printk(" RDSK");
+ blk = htonl(rdb->rdb_PartitionList);
+ brelse(bh);
+ for (part = 1; blk > 0 && part <= 16; part++) {
+ if (!(bh = bread(dev,blk,512))) {
+ printk("Dev %d: unable to read partition block %d\n",
+ dev,blk);
+ goto rdb_done;
+ }
+ pb = (struct PartitionBlock *)bh->b_data;
+ blk = htonl(pb->pb_Next);
+ if (pb->pb_ID == htonl(IDNAME_PARTITION) && checksum_block(
+ (__u32 *)pb,htonl(pb->pb_SummedLongs) & 0x7F) == 0 ) {
+
+ /* Tell Kernel about it */
+
+ if (!(nr_sects = (htonl(pb->pb_Environment[10]) + 1 -
+ htonl(pb->pb_Environment[9])) *
+ htonl(pb->pb_Environment[3]) *
+ htonl(pb->pb_Environment[5]))) {
+ continue;
+ }
+ start_sect = htonl(pb->pb_Environment[9]) *
+ htonl(pb->pb_Environment[3]) *
+ htonl(pb->pb_Environment[5]);
+ add_partition(hd,current_minor,start_sect,nr_sects);
+ current_minor++;
+ res = 1;
+ }
+ brelse(bh);
+ }
+ printk("\n");
+ break;
+ }
+ }
+
+rdb_done:
+ set_blocksize(dev,BLOCK_SIZE);
+ return res;
+}
+#endif /* CONFIG_AMIGA_PARTITION */
+
+static void check_partition(struct gendisk *hd, kdev_t dev)
+{
+ static int first_time = 1;
+ unsigned long first_sector;
+ char buf[8];
+
+ if (first_time)
+ printk("Partition check:\n");
+ first_time = 0;
+ first_sector = hd->part[MINOR(dev)].start_sect;
+
+ /*
+ * This is a kludge to allow the partition check to be
+ * skipped for specific drives (e.g. IDE cd-rom drives)
+ */
+ if ((int)first_sector == -1) {
+ hd->part[MINOR(dev)].start_sect = 0;
+ return;
+ }
+
+ printk(" %s:", disk_name(hd, MINOR(dev), buf));
+#ifdef CONFIG_MSDOS_PARTITION
+ if (msdos_partition(hd, dev, first_sector))
+ return;
+#endif
+#ifdef CONFIG_OSF_PARTITION
+ if (osf_partition(hd, dev, first_sector))
+ return;
+#endif
+#ifdef CONFIG_SUN_PARTITION
+ if(sun_partition(hd, dev, first_sector))
+ return;
+#endif
+#ifdef CONFIG_AMIGA_PARTITION
+ if(amiga_partition(hd, dev, first_sector))
+ return;
+#endif
+ printk(" unknown partition table\n");
+}
+
+/* This function is used to re-read partition tables for removable disks.
+ Much of the cleanup from the old partition tables should have already been
+ done */
+
+/* This function will re-read the partition tables for a given device,
+and set things back up again. There are some important caveats,
+however. You must ensure that no one is using the device, and no one
+can start using the device while this function is being executed. */
+
+void resetup_one_dev(struct gendisk *dev, int drive)
+{
+ int i;
+ int first_minor = drive << dev->minor_shift;
+ int end_minor = first_minor + dev->max_p;
+
+ blk_size[dev->major] = NULL;
+ current_minor = 1 + first_minor;
+ check_partition(dev, MKDEV(dev->major, first_minor));
+
+ /*
+ * We need to set the sizes array before we will be able to access
+ * any of the partitions on this device.
+ */
+ if (dev->sizes != NULL) { /* optional safeguard in ll_rw_blk.c */
+ for (i = first_minor; i < end_minor; i++)
+ dev->sizes[i] = dev->part[i].nr_sects >> (BLOCK_SIZE_BITS - 9);
+ blk_size[dev->major] = dev->sizes;
+ }
+}
+
+static void setup_dev(struct gendisk *dev)
+{
+ int i, drive;
+ int end_minor = dev->max_nr * dev->max_p;
+
+ blk_size[dev->major] = NULL;
+ for (i = 0 ; i < end_minor; i++) {
+ dev->part[i].start_sect = 0;
+ dev->part[i].nr_sects = 0;
+ }
+ dev->init(dev);
+ for (drive = 0 ; drive < dev->nr_real ; drive++) {
+ int first_minor = drive << dev->minor_shift;
+ current_minor = 1 + first_minor;
+ check_partition(dev, MKDEV(dev->major, first_minor));
+ }
+ if (dev->sizes != NULL) { /* optional safeguard in ll_rw_blk.c */
+ for (i = 0; i < end_minor; i++)
+ dev->sizes[i] = dev->part[i].nr_sects >> (BLOCK_SIZE_BITS - 9);
+ blk_size[dev->major] = dev->sizes;
+ }
+}
+
+void device_setup(void)
+{
+ extern void console_map_init(void);
+ struct gendisk *p;
+ int nr=0;
+
+ chr_dev_init();
+ blk_dev_init();
+ sti();
+#ifdef CONFIG_SCSI
+ scsi_dev_init();
+#endif
+#ifdef CONFIG_INET
+ net_dev_init();
+#endif
+ console_map_init();
+
+ for (p = gendisk_head ; p ; p=p->next) {
+ setup_dev(p);
+ nr += p->nr_real;
+ }
+#ifdef CONFIG_BLK_DEV_RAM
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (initrd_start && mount_initrd) initrd_load();
+ else
+#endif
+ rd_load();
+#endif
+}
diff --git a/linux/src/drivers/block/ide-cd.c b/linux/src/drivers/block/ide-cd.c
new file mode 100644
index 0000000..020a831
--- /dev/null
+++ b/linux/src/drivers/block/ide-cd.c
@@ -0,0 +1,2802 @@
+/* #define VERBOSE_IDE_CD_ERRORS 1 */
+/*
+ * linux/drivers/block/ide-cd.c
+ * ATAPI cd-rom driver. To be used with ide.c.
+ * See Documentation/cdrom/ide-cd for usage information.
+ *
+ * Copyright (C) 1994, 1995, 1996 scott snyder <snyder@fnald0.fnal.gov>
+ * Copyright (C) 1996, 1997 Erik Andersen <andersee@debian.org>
+ * Copyright (C) 1998 Jens Axboe and Chris Zwilling
+ *
+ * May be copied or modified under the terms of the GNU General Public License
+ * see linux/COPYING for more information.
+ *
+ * 1.00 Oct 31, 1994 -- Initial version.
+ * 1.01 Nov 2, 1994 -- Fixed problem with starting request in
+ * cdrom_check_status.
+ * 1.03 Nov 25, 1994 -- leaving unmask_intr[] as a user-setting (as for disks)
+ * (from mlord) -- minor changes to cdrom_setup()
+ * -- renamed ide_dev_s to ide_drive_t, enable irq on command
+ * 2.00 Nov 27, 1994 -- Generalize packet command interface;
+ * add audio ioctls.
+ * 2.01 Dec 3, 1994 -- Rework packet command interface to handle devices
+ * which send an interrupt when ready for a command.
+ * 2.02 Dec 11, 1994 -- Cache the TOC in the driver.
+ * Don't use SCMD_PLAYAUDIO_TI; it's not included
+ * in the current version of ATAPI.
+ * Try to use LBA instead of track or MSF addressing
+ * when possible.
+ * Don't wait for READY_STAT.
+ * 2.03 Jan 10, 1995 -- Rewrite block read routines to handle block sizes
+ * other than 2k and to move multiple sectors in a
+ * single transaction.
+ * 2.04 Apr 21, 1995 -- Add work-around for Creative Labs CD220E drives.
+ * Thanks to Nick Saw <cwsaw@pts7.pts.mot.com> for
+ * help in figuring this out. Ditto for Acer and
+ * Aztech drives, which seem to have the same problem.
+ * 2.04b May 30, 1995 -- Fix to match changes in ide.c version 3.16 -ml
+ * 2.05 Jun 8, 1995 -- Don't attempt to retry after an illegal request
+ * or data protect error.
+ * Use HWIF and DEV_HWIF macros as in ide.c.
+ * Always try to do a request_sense after
+ * a failed command.
+ * Include an option to give textual descriptions
+ * of ATAPI errors.
+ * Fix a bug in handling the sector cache which
+ * showed up if the drive returned data in 512 byte
+ * blocks (like Pioneer drives). Thanks to
+ * Richard Hirst <srh@gpt.co.uk> for diagnosing this.
+ * Properly supply the page number field in the
+ * MODE_SELECT command.
+ * PLAYAUDIO12 is broken on the Aztech; work around it.
+ * 2.05x Aug 11, 1995 -- lots of data structure renaming/restructuring in ide.c
+ * (my apologies to Scott, but now ide-cd.c is independent)
+ * 3.00 Aug 22, 1995 -- Implement CDROMMULTISESSION ioctl.
+ * Implement CDROMREADAUDIO ioctl (UNTESTED).
+ * Use input_ide_data() and output_ide_data().
+ * Add door locking.
+ * Fix usage count leak in cdrom_open, which happened
+ * when a read-write mount was attempted.
+ * Try to load the disk on open.
+ * Implement CDROMEJECT_SW ioctl (off by default).
+ * Read total cdrom capacity during open.
+ * Rearrange logic in cdrom_decode_status. Issue
+ * request sense commands for failed packet commands
+ * from here instead of from cdrom_queue_packet_command.
+ * Fix a race condition in retrieving error information.
+ * Suppress printing normal unit attention errors and
+ * some drive not ready errors.
+ * Implement CDROMVOLREAD ioctl.
+ * Implement CDROMREADMODE1/2 ioctls.
+ * Fix race condition in setting up interrupt handlers
+ * when the `serialize' option is used.
+ * 3.01 Sep 2, 1995 -- Fix ordering of reenabling interrupts in
+ * cdrom_queue_request.
+ * Another try at using ide_[input,output]_data.
+ * 3.02 Sep 16, 1995 -- Stick total disk capacity in partition table as well.
+ * Make VERBOSE_IDE_CD_ERRORS dump failed command again.
+ * Dump out more information for ILLEGAL REQUEST errs.
+ * Fix handling of errors occurring before the
+ * packet command is transferred.
+ * Fix transfers with odd bytelengths.
+ * 3.03 Oct 27, 1995 -- Some Creative drives have an id of just `CD'.
+ * `DCI-2S10' drives are broken too.
+ * 3.04 Nov 20, 1995 -- So are Vertos drives.
+ * 3.05 Dec 1, 1995 -- Changes to go with overhaul of ide.c and ide-tape.c
+ * 3.06 Dec 16, 1995 -- Add support needed for partitions.
+ * More workarounds for Vertos bugs (based on patches
+ * from Holger Dietze <dietze@aix520.informatik.uni-leipzig.de>).
+ * Try to eliminate byteorder assumptions.
+ * Use atapi_cdrom_subchnl struct definition.
+ * Add STANDARD_ATAPI compilation option.
+ * 3.07 Jan 29, 1996 -- More twiddling for broken drives: Sony 55D,
+ * Vertos 300.
+ * Add NO_DOOR_LOCKING configuration option.
+ * Handle drive_cmd requests w/NULL args (for hdparm -t).
+ * Work around sporadic Sony55e audio play problem.
+ * 3.07a Feb 11, 1996 -- check drive->id for NULL before dereferencing, to fix
+ * problem with "hde=cdrom" with no drive present. -ml
+ * 3.08 Mar 6, 1996 -- More Vertos workarounds.
+ * 3.09 Apr 5, 1996 -- Add CDROMCLOSETRAY ioctl.
+ * Switch to using MSF addressing for audio commands.
+ * Reformat to match kernel tabbing style.
+ * Add CDROM_GET_UPC ioctl.
+ * 3.10 Apr 10, 1996 -- Fix compilation error with STANDARD_ATAPI.
+ * 3.11 Apr 29, 1996 -- Patch from Heiko Eissfeldt <heiko@colossus.escape.de>
+ * to remove redundant verify_area calls.
+ * 3.12 May 7, 1996 -- Rudimentary changer support. Based on patches
+ * from Gerhard Zuber <zuber@berlin.snafu.de>.
+ * Let open succeed even if there's no loaded disc.
+ * 3.13 May 19, 1996 -- Fixes for changer code.
+ * 3.14 May 29, 1996 -- Add work-around for Vertos 600.
+ * (From Hennus Bergman <hennus@sky.ow.nl>.)
+ * 3.15 July 2, 1996 -- Added support for Sanyo 3 CD changers
+ * from Ben Galliart <bgallia@luc.edu> with
+ * special help from Jeff Lightfoot
+ * <jeffml@netcom.com>
+ * 3.15a July 9, 1996 -- Improved Sanyo 3 CD changer identification
+ * 3.16 Jul 28, 1996 -- Fix from Gadi to reduce kernel stack usage for ioctl.
+ * 3.17 Sep 17, 1996 -- Tweak audio reads for some drives.
+ * Start changing CDROMLOADFROMSLOT to CDROM_SELECT_DISC.
+ *
+ * 3.19 Nov 5, 1996 -- New ide-cd maintainer:
+ * Erik B. Andersen <andersee@debian.org>
+ * 3.20 Jan 13,1997 -- Bug Fixes:
+ * Fix errors on CDROMSTOP (If you have a "Dolphin",
+ * you must define IHAVEADOLPHIN)
+ * Added identifier so new Sanyo CD-changer works
+ * Better detection if door locking isn't supported
+ * 3.21 Jun 16,1997 -- Add work-around for GCD-R580B
+ *
+ * 3.22 Nov 13, 1998 -- New ide-cd maintainers:
+ * Jens Axboe <axboe@image.dk>
+ * Chris Zwilling <chris@cloudnet.com>
+ *
+ * NOTE: Direct audio reads will only work on some types of drive.
+ * So far, i've received reports of success for Sony and Toshiba drives.
+ *
+ * ALSO NOTE:
+ *
+ * The ide cdrom driver has undergone extensive changes for the
+ * latest development kernel. If you wish to add new features to
+ * this driver, make your changes to the latest version in the
+ * development kernel. Only Bug fixes will be accepted for this
+ * version.
+ *
+ * For those wishing to work on this driver, please be sure you download
+ * and comply with the latest ATAPI standard. This document can be
+ * obtained by anonymous ftp from fission.dt.wdc.com in directory:
+ * /pub/standards/atapi/spec/SFF8020-r2.6/PDF/8020r26.pdf
+ *
+ */
+
+
+/***************************************************************************/
+
+#ifdef MACH
+#include <kern/sched_prim.h>
+#endif
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/malloc.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/blkdev.h>
+#include <linux/errno.h>
+#include <linux/hdreg.h>
+#include <linux/cdrom.h>
+#include <linux/ucdrom.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <asm/segment.h>
+#include <asm/unaligned.h>
+
+#include "ide.h"
+
+
+
+/* Turn this on to have the driver print out the meanings of the
+ ATAPI error codes. This will use up additional kernel-space
+ memory, though. */
+
+#ifndef VERBOSE_IDE_CD_ERRORS
+#define VERBOSE_IDE_CD_ERRORS 0
+#endif
+
+
+/* Turning this on will remove code to work around various nonstandard
+ ATAPI implementations. If you know your drive follows the standard,
+ this will give you a slightly smaller kernel. */
+
+#ifndef STANDARD_ATAPI
+#define STANDARD_ATAPI 0
+#endif
+
+
+/* Turning this on will disable the door-locking functionality.
+ This is apparently needed for supermount. */
+
+#ifndef NO_DOOR_LOCKING
+#define NO_DOOR_LOCKING 0
+#endif
+
+
+/* Size of buffer to allocate, in blocks, for audio reads. */
+
+#ifndef CDROM_NBLOCKS_BUFFER
+#define CDROM_NBLOCKS_BUFFER 8
+#endif
+
+
+/************************************************************************/
+
+#define SECTOR_SIZE 512
+#define SECTOR_BITS 9
+#define SECTORS_PER_FRAME (CD_FRAMESIZE / SECTOR_SIZE)
+
+#define MIN(a,b) ((a) < (b) ? (a) : (b))
+
+/* special command codes for strategy routine. */
+#define PACKET_COMMAND 4315
+#define REQUEST_SENSE_COMMAND 4316
+#define RESET_DRIVE_COMMAND 4317
+
+/* Some ATAPI command opcodes (just like SCSI).
+ (Some other cdrom-specific codes are in cdrom.h.) */
+#define TEST_UNIT_READY 0x00
+#define REQUEST_SENSE 0x03
+#define START_STOP 0x1b
+#define ALLOW_MEDIUM_REMOVAL 0x1e
+#define READ_CAPACITY 0x25
+#define READ_10 0x28
+#define MODE_SENSE_10 0x5a
+#define MODE_SELECT_10 0x55
+#define READ_CD 0xbe
+
+#define LOAD_UNLOAD 0xa6
+
+
+/* ATAPI sense keys (mostly copied from scsi.h). */
+
+#define NO_SENSE 0x00
+#define RECOVERED_ERROR 0x01
+#define NOT_READY 0x02
+#define MEDIUM_ERROR 0x03
+#define HARDWARE_ERROR 0x04
+#define ILLEGAL_REQUEST 0x05
+#define UNIT_ATTENTION 0x06
+#define DATA_PROTECT 0x07
+#define ABORTED_COMMAND 0x0b
+#define MISCOMPARE 0x0e
+
+/* We want some additional flags for cd-rom drives.
+ To save space in the ide_drive_t struct, use some fields which
+ doesn't make sense for cd-roms -- `bios_sect' and `bios_head'. */
+
+/* Configuration flags. These describe the capabilities of the drive.
+ They generally do not change after initialization, unless we learn
+ more about the drive from stuff failing. */
+struct ide_cd_config_flags {
+ __u8 drq_interrupt : 1; /* Device sends an interrupt when ready
+ for a packet command. */
+ __u8 no_doorlock : 1; /* Drive cannot lock the door. */
+#if ! STANDARD_ATAPI
+ __u8 old_readcd : 1; /* Drive uses old READ CD opcode. */
+ __u8 playmsf_as_bcd : 1; /* PLAYMSF command takes BCD args. */
+ __u8 tocaddr_as_bcd : 1; /* TOC addresses are in BCD. */
+ __u8 toctracks_as_bcd : 1; /* TOC track numbers are in BCD. */
+ __u8 subchan_as_bcd : 1; /* Subchannel info is in BCD. */
+#endif /* not STANDARD_ATAPI */
+ __u8 reserved : 1;
+};
+#define CDROM_CONFIG_FLAGS(drive) ((struct ide_cd_config_flags *)&((drive)->bios_sect))
+
+
+/* State flags. These give information about the current state of the
+ drive, and will change during normal operation. */
+struct ide_cd_state_flags {
+ __u8 media_changed : 1; /* Driver has noticed a media change. */
+ __u8 toc_valid : 1; /* Saved TOC information is current. */
+ __u8 door_locked : 1; /* We think that the drive door is locked. */
+ __u8 eject_on_close: 1; /* Drive should eject when device is closed. */
+ __u8 sanyo_slot : 2; /* Sanyo 3 CD changer support */
+ __u8 reserved : 2;
+};
+#define CDROM_STATE_FLAGS(drive) ((struct ide_cd_state_flags *)&((drive)->bios_head))
+
+
+#define SECTOR_BUFFER_SIZE CD_FRAMESIZE
+
+
+
+/****************************************************************************
+ * Routines to read and write data from/to the drive, using
+ * the routines input_ide_data() and output_ide_data() from ide.c.
+ *
+ * These routines will round up any request for an odd number of bytes,
+ * so if an odd bytecount is specified, be sure that there's at least one
+ * extra byte allocated for the buffer.
+ */
+
+
+static inline
+void cdrom_in_bytes (ide_drive_t *drive, void *buffer, uint bytecount)
+{
+ ++bytecount;
+ ide_input_data (drive, buffer, bytecount / 4);
+ if ((bytecount & 0x03) >= 2) {
+ insw (IDE_DATA_REG, ((byte *)buffer) + (bytecount & ~0x03), 1);
+ }
+}
+
+
+static inline
+void cdrom_out_bytes (ide_drive_t *drive, void *buffer, uint bytecount)
+{
+ ++bytecount;
+ ide_output_data (drive, buffer, bytecount / 4);
+ if ((bytecount & 0x03) >= 2) {
+ outsw (IDE_DATA_REG,
+ ((byte *)buffer) + (bytecount & ~0x03), 1);
+ }
+}
+
+
+
+/****************************************************************************
+ * Descriptions of ATAPI error codes.
+ */
+
+#define ARY_LEN(a) ((sizeof(a) / sizeof(a[0])))
+
+#if VERBOSE_IDE_CD_ERRORS
+
+/* From Table 124 of the ATAPI 1.2 spec. */
+
+char *sense_key_texts[16] = {
+ "No sense data",
+ "Recovered error",
+ "Not ready",
+ "Medium error",
+ "Hardware error",
+ "Illegal request",
+ "Unit attention",
+ "Data protect",
+ "(reserved)",
+ "(reserved)",
+ "(reserved)",
+ "Aborted command",
+ "(reserved)",
+ "(reserved)",
+ "Miscompare",
+ "(reserved)",
+};
+
+
+/* From Table 125 of the ATAPI 1.2 spec. */
+
+struct {
+ short asc_ascq;
+ char *text;
+} sense_data_texts[] = {
+ { 0x0000, "No additional sense information" },
+ { 0x0011, "Audio play operation in progress" },
+ { 0x0012, "Audio play operation paused" },
+ { 0x0013, "Audio play operation successfully completed" },
+ { 0x0014, "Audio play operation stopped due to error" },
+ { 0x0015, "No current audio status to return" },
+
+ { 0x0200, "No seek complete" },
+
+ { 0x0400, "Logical unit not ready - cause not reportable" },
+ { 0x0401,
+ "Logical unit not ready - in progress (sic) of becoming ready" },
+ { 0x0402, "Logical unit not ready - initializing command required" },
+ { 0x0403, "Logical unit not ready - manual intervention required" },
+
+ { 0x0600, "No reference position found" },
+
+ { 0x0900, "Track following error" },
+ { 0x0901, "Tracking servo failure" },
+ { 0x0902, "Focus servo failure" },
+ { 0x0903, "Spindle servo failure" },
+
+ { 0x1100, "Unrecovered read error" },
+ { 0x1106, "CIRC unrecovered error" },
+
+ { 0x1500, "Random positioning error" },
+ { 0x1501, "Mechanical positioning error" },
+ { 0x1502, "Positioning error detected by read of medium" },
+
+ { 0x1700, "Recovered data with no error correction applied" },
+ { 0x1701, "Recovered data with retries" },
+ { 0x1702, "Recovered data with positive head offset" },
+ { 0x1703, "Recovered data with negative head offset" },
+ { 0x1704, "Recovered data with retries and/or CIRC applied" },
+ { 0x1705, "Recovered data using previous sector ID" },
+
+ { 0x1800, "Recovered data with error correction applied" },
+ { 0x1801, "Recovered data with error correction and retries applied" },
+ { 0x1802, "Recovered data - the data was auto-reallocated" },
+ { 0x1803, "Recovered data with CIRC" },
+ { 0x1804, "Recovered data with L-EC" },
+ { 0x1805, "Recovered data - recommend reassignment" },
+ { 0x1806, "Recovered data - recommend rewrite" },
+
+ { 0x1a00, "Parameter list length error" },
+
+ { 0x2000, "Invalid command operation code" },
+
+ { 0x2100, "Logical block address out of range" },
+
+ { 0x2400, "Invalid field in command packet" },
+
+ { 0x2600, "Invalid field in parameter list" },
+ { 0x2601, "Parameter not supported" },
+ { 0x2602, "Parameter value invalid" },
+ { 0x2603, "Threshold parameters not supported" },
+
+ { 0x2800, "Not ready to ready transition, medium may have changed" },
+
+ { 0x2900, "Power on, reset or bus device reset occurred" },
+
+ { 0x2a00, "Parameters changed" },
+ { 0x2a01, "Mode parameters changed" },
+
+ { 0x3000, "Incompatible medium installed" },
+ { 0x3001, "Cannot read medium - unknown format" },
+ { 0x3002, "Cannot read medium - incompatible format" },
+
+ { 0x3700, "Rounded parameter" },
+
+ { 0x3900, "Saving parameters not supported" },
+
+ { 0x3a00, "Medium not present" },
+
+ { 0x3f00, "ATAPI CD-ROM drive operating conditions have changed" },
+ { 0x3f01, "Microcode has been changed" },
+ { 0x3f02, "Changed operating definition" },
+ { 0x3f03, "Inquiry data has changed" },
+
+ { 0x4000, "Diagnostic failure on component (ASCQ)" },
+
+ { 0x4400, "Internal ATAPI CD-ROM drive failure" },
+
+ { 0x4e00, "Overlapped commands attempted" },
+
+ { 0x5300, "Media load or eject failed" },
+ { 0x5302, "Medium removal prevented" },
+
+ { 0x5700, "Unable to recover table of contents" },
+
+ { 0x5a00, "Operator request or state change input (unspecified)" },
+ { 0x5a01, "Operator medium removal request" },
+
+ { 0x5b00, "Threshold condition met" },
+
+ { 0x5c00, "Status change" },
+
+ { 0x6300, "End of user area encountered on this track" },
+
+ { 0x6400, "Illegal mode for this track" },
+
+ { 0xbf00, "Loss of streaming" },
+};
+#endif
+
+
+
+/****************************************************************************
+ * Generic packet command support and error handling routines.
+ */
+
+
+static
+void cdrom_analyze_sense_data (ide_drive_t *drive,
+ struct atapi_request_sense *reqbuf,
+ struct packet_command *failed_command)
+{
+ /* Don't print not ready or unit attention errors for READ_SUBCHANNEL.
+ Workman (and probably other programs) uses this command to poll
+ the drive, and we don't want to fill the syslog
+ with useless errors. */
+ if (failed_command &&
+ failed_command->c[0] == SCMD_READ_SUBCHANNEL &&
+ (reqbuf->sense_key == NOT_READY ||
+ reqbuf->sense_key == UNIT_ATTENTION))
+ return;
+
+#if VERBOSE_IDE_CD_ERRORS
+ {
+ int i;
+ char *s;
+ char buf[80];
+
+ printk ("ATAPI device %s:\n", drive->name);
+
+ printk (" Error code: 0x%02x\n", reqbuf->error_code);
+
+ if (reqbuf->sense_key >= 0 &&
+ reqbuf->sense_key < ARY_LEN (sense_key_texts))
+ s = sense_key_texts[reqbuf->sense_key];
+ else
+ s = "(bad sense key)";
+
+ printk (" Sense key: 0x%02x - %s\n", reqbuf->sense_key, s);
+
+ if (reqbuf->asc == 0x40) {
+ sprintf (buf, "Diagnostic failure on component 0x%02x",
+ reqbuf->ascq);
+ s = buf;
+ } else {
+ int lo, hi;
+ int key = (reqbuf->asc << 8);
+ if ( ! (reqbuf->ascq >= 0x80 && reqbuf->ascq <= 0xdd) )
+ key |= reqbuf->ascq;
+
+ lo = 0;
+ hi = ARY_LEN (sense_data_texts);
+ s = NULL;
+
+ while (hi > lo) {
+ int mid = (lo + hi) / 2;
+ if (sense_data_texts[mid].asc_ascq == key) {
+ s = sense_data_texts[mid].text;
+ break;
+ }
+ else if (sense_data_texts[mid].asc_ascq > key)
+ hi = mid;
+ else
+ lo = mid+1;
+ }
+ }
+
+ if (s == NULL) {
+ if (reqbuf->asc > 0x80)
+ s = "(vendor-specific error)";
+ else
+ s = "(reserved error code)";
+ }
+
+ printk (" Additional sense data: 0x%02x, 0x%02x - %s\n",
+ reqbuf->asc, reqbuf->ascq, s);
+
+ if (failed_command != NULL) {
+ printk (" Failed packet command: ");
+ for (i=0; i<sizeof (failed_command->c); i++)
+ printk ("%02x ", failed_command->c[i]);
+ printk ("\n");
+ }
+
+ if (reqbuf->sense_key == ILLEGAL_REQUEST &&
+ (reqbuf->sense_key_specific[0] & 0x80) != 0) {
+ printk (" Error in %s byte %d",
+ (reqbuf->sense_key_specific[0] & 0x40) != 0
+ ? "command packet"
+ : "command data",
+ (reqbuf->sense_key_specific[1] << 8) +
+ reqbuf->sense_key_specific[2]);
+
+ if ((reqbuf->sense_key_specific[0] & 0x40) != 0) {
+ printk (" bit %d",
+ reqbuf->sense_key_specific[0] & 0x07);
+ }
+
+ printk ("\n");
+ }
+ }
+
+#else /* not VERBOSE_IDE_CD_ERRORS */
+
+ /* Suppress printing unit attention and `in progress of becoming ready'
+ errors when we're not being verbose. */
+
+ if (reqbuf->sense_key == UNIT_ATTENTION ||
+ (reqbuf->sense_key == NOT_READY && (reqbuf->asc == 4 ||
+ reqbuf->asc == 0x3a)))
+ return;
+
+ printk ("%s: code: 0x%02x key: 0x%02x asc: 0x%02x ascq: 0x%02x\n",
+ drive->name,
+ reqbuf->error_code, reqbuf->sense_key,
+ reqbuf->asc, reqbuf->ascq);
+#endif /* not VERBOSE_IDE_CD_ERRORS */
+}
+
+
+/* Fix up a possibly partially-processed request so that we can
+ start it over entirely, or even put it back on the request queue. */
+static void restore_request (struct request *rq)
+{
+ if (rq->buffer != rq->bh->b_data) {
+ int n = (rq->buffer - rq->bh->b_data) / SECTOR_SIZE;
+ rq->buffer = rq->bh->b_data;
+ rq->nr_sectors += n;
+ rq->sector -= n;
+ }
+ rq->current_nr_sectors = rq->bh->b_size >> SECTOR_BITS;
+}
+
+
+static void cdrom_queue_request_sense (ide_drive_t *drive,
+ struct semaphore *sem,
+ struct atapi_request_sense *reqbuf,
+ struct packet_command *failed_command)
+{
+ struct request *rq;
+ struct packet_command *pc;
+ int len;
+
+ /* If the request didn't explicitly specify where
+ to put the sense data, use the statically allocated structure. */
+ if (reqbuf == NULL)
+ reqbuf = &drive->cdrom_info.sense_data;
+
+ /* Make up a new request to retrieve sense information. */
+
+ pc = &HWIF(drive)->request_sense_pc;
+ memset (pc, 0, sizeof (*pc));
+
+ /* The request_sense structure has an odd number of (16-bit) words,
+ which won't work well with 32-bit transfers. However, we don't care
+ about the last two bytes, so just truncate the structure down
+ to an even length. */
+ len = sizeof (*reqbuf) / 4;
+ len *= 4;
+
+ pc->c[0] = REQUEST_SENSE;
+ pc->c[4] = len;
+ pc->buffer = (unsigned char *)reqbuf;
+ pc->buflen = len;
+ pc->sense_data = (struct atapi_request_sense *)failed_command;
+
+ /* stuff the sense request in front of our current request */
+
+ rq = &HWIF(drive)->request_sense_request;
+ ide_init_drive_cmd (rq);
+ rq->cmd = REQUEST_SENSE_COMMAND;
+ rq->buffer = (char *)pc;
+ rq->sem = sem;
+ (void) ide_do_drive_cmd (drive, rq, ide_preempt);
+}
+
+
+static void cdrom_end_request (int uptodate, ide_drive_t *drive)
+{
+ struct request *rq = HWGROUP(drive)->rq;
+
+ if (rq->cmd == REQUEST_SENSE_COMMAND && uptodate && !rq->quiet) {
+ struct packet_command *pc = (struct packet_command *)
+ rq->buffer;
+ cdrom_analyze_sense_data (drive,
+ (struct atapi_request_sense *)
+ (pc->buffer - pc->c[4]),
+ (struct packet_command *)
+ pc->sense_data);
+ }
+
+ ide_end_request (uptodate, HWGROUP(drive));
+}
+
+
+/* Mark that we've seen a media change, and invalidate our internal
+ buffers. */
+static void cdrom_saw_media_change (ide_drive_t *drive)
+{
+ CDROM_STATE_FLAGS (drive)->media_changed = 1;
+ CDROM_STATE_FLAGS (drive)->toc_valid = 0;
+ drive->cdrom_info.nsectors_buffered = 0;
+}
+
+
+/* Returns 0 if the request should be continued.
+ Returns 1 if the request was ended. */
+static int cdrom_decode_status (ide_drive_t *drive, int good_stat,
+ int *stat_ret)
+{
+ struct request *rq = HWGROUP(drive)->rq;
+ int stat, err, sense_key, cmd;
+
+ /* Check for errors. */
+ stat = GET_STAT();
+ *stat_ret = stat;
+
+ if (OK_STAT (stat, good_stat, BAD_R_STAT))
+ return 0;
+
+ /* Got an error. */
+ err = IN_BYTE (IDE_ERROR_REG);
+ sense_key = err >> 4;
+
+ if (rq == NULL)
+ printk ("%s : missing request in cdrom_decode_status\n",
+ drive->name);
+ else {
+ cmd = rq->cmd;
+
+ if (cmd == REQUEST_SENSE_COMMAND) {
+ /* We got an error trying to get sense info
+ from the drive (probably while trying
+ to recover from a former error). Just give up. */
+
+ struct packet_command *pc = (struct packet_command *)
+ rq->buffer;
+ pc->stat = 1;
+ cdrom_end_request (1, drive);
+ ide_error (drive, "request sense failure", stat);
+ return 1;
+
+ } else if (cmd == PACKET_COMMAND) {
+ /* All other functions, except for READ. */
+
+ struct packet_command *pc = (struct packet_command *)
+ rq->buffer;
+ struct semaphore *sem = NULL;
+
+ /* Check for tray open. */
+ if (sense_key == NOT_READY) {
+ cdrom_saw_media_change (drive);
+
+ /* Print an error message to the syslog.
+ Exception: don't print anything if this
+ is a read subchannel command. This is
+ because workman constantly polls the drive
+ with this command, and we don't want
+ to uselessly fill up the syslog. */
+ if (pc->c[0] != SCMD_READ_SUBCHANNEL && !rq->quiet)
+ printk ("%s : tray open or drive not ready\n",
+ drive->name);
+ } else if (sense_key == UNIT_ATTENTION) {
+ /* Check for media change. */
+ cdrom_saw_media_change (drive);
+ if (!rq->quiet)
+ printk ("%s: media changed\n", drive->name);
+ } else {
+ /* Otherwise, print an error. */
+ if (!rq->quiet)
+ ide_dump_status (drive, "packet command error",
+ stat);
+ }
+
+ /* Set the error flag and complete the request.
+ Then, if we have a CHECK CONDITION status,
+ queue a request sense command. We must be careful,
+ though: we don't want the thread in
+ cdrom_queue_packet_command to wake up until
+ the request sense has completed. We do this
+ by transferring the semaphore from the packet
+ command request to the request sense request. */
+
+ if ((stat & ERR_STAT) != 0) {
+ sem = rq->sem;
+ rq->sem = NULL;
+ }
+
+ pc->stat = 1;
+ cdrom_end_request (1, drive);
+
+ if ((stat & ERR_STAT) != 0)
+ cdrom_queue_request_sense (drive, sem,
+ pc->sense_data, pc);
+ } else {
+ /* Handle errors from READ requests. */
+
+ if (sense_key == NOT_READY) {
+ /* Tray open. */
+ cdrom_saw_media_change (drive);
+
+ /* Fail the request. */
+ if (!rq->quiet)
+ printk ("%s : tray open\n", drive->name);
+ cdrom_end_request (0, drive);
+ } else if (sense_key == UNIT_ATTENTION) {
+ /* Media change. */
+ cdrom_saw_media_change (drive);
+
+ /* Arrange to retry the request.
+ But be sure to give up if we've retried
+ too many times. */
+ if (++rq->errors > ERROR_MAX)
+ cdrom_end_request (0, drive);
+ } else if (sense_key == ILLEGAL_REQUEST ||
+ sense_key == DATA_PROTECT) {
+ /* No point in retrying after an illegal
+ request or data protect error.*/
+ if (!rq->quiet)
+ ide_dump_status (drive, "command error", stat);
+ cdrom_end_request (0, drive);
+ } else if ((err & ~ABRT_ERR) != 0) {
+ /* Go to the default handler
+ for other errors. */
+ ide_error (drive, "cdrom_decode_status", stat);
+ return 1;
+ } else if ((++rq->errors > ERROR_MAX)) {
+ /* We've racked up too many retries. Abort. */
+ cdrom_end_request (0, drive);
+ }
+
+ /* If we got a CHECK_CONDITION status,
+ queue a request sense command. */
+ if ((stat & ERR_STAT) != 0)
+ cdrom_queue_request_sense (drive,
+ NULL, NULL, NULL);
+ }
+ }
+
+ /* Retry, or handle the next request. */
+ return 1;
+}
+
+
+/* Set up the device registers for transferring a packet command on DEV,
+ expecting to later transfer XFERLEN bytes. HANDLER is the routine
+ which actually transfers the command to the drive. If this is a
+ drq_interrupt device, this routine will arrange for HANDLER to be
+ called when the interrupt from the drive arrives. Otherwise, HANDLER
+ will be called immediately after the drive is prepared for the transfer. */
+
+static int cdrom_start_packet_command (ide_drive_t *drive, int xferlen,
+ ide_handler_t *handler)
+{
+ /* Wait for the controller to be idle. */
+ if (ide_wait_stat (drive, 0, BUSY_STAT, WAIT_READY)) return 1;
+
+ /* Set up the controller registers. */
+ OUT_BYTE (0, IDE_FEATURE_REG);
+ OUT_BYTE (0, IDE_NSECTOR_REG);
+ OUT_BYTE (0, IDE_SECTOR_REG);
+
+ OUT_BYTE (xferlen & 0xff, IDE_LCYL_REG);
+ OUT_BYTE (xferlen >> 8 , IDE_HCYL_REG);
+ OUT_BYTE (drive->ctl, IDE_CONTROL_REG);
+
+ if (CDROM_CONFIG_FLAGS (drive)->drq_interrupt) {
+ ide_set_handler (drive, handler, WAIT_CMD);
+ OUT_BYTE (WIN_PACKETCMD, IDE_COMMAND_REG); /* packet command */
+ } else {
+ OUT_BYTE (WIN_PACKETCMD, IDE_COMMAND_REG); /* packet command */
+ (*handler) (drive);
+ }
+
+ return 0;
+}
+
+
+/* Send a packet command to DRIVE described by CMD_BUF and CMD_LEN.
+ The device registers must have already been prepared
+ by cdrom_start_packet_command.
+ HANDLER is the interrupt handler to call when the command completes
+ or there's data ready. */
+static int cdrom_transfer_packet_command (ide_drive_t *drive,
+ unsigned char *cmd_buf, int cmd_len,
+ ide_handler_t *handler)
+{
+ if (CDROM_CONFIG_FLAGS (drive)->drq_interrupt) {
+ /* Here we should have been called after receiving an interrupt
+ from the device. DRQ should how be set. */
+ int stat_dum;
+
+ /* Check for errors. */
+ if (cdrom_decode_status (drive, DRQ_STAT, &stat_dum)) return 1;
+ } else {
+ /* Otherwise, we must wait for DRQ to get set. */
+ if (ide_wait_stat (drive, DRQ_STAT, BUSY_STAT, WAIT_READY))
+ return 1;
+ }
+
+ /* Arm the interrupt handler. */
+ ide_set_handler (drive, handler, WAIT_CMD);
+
+ /* Send the command to the device. */
+ cdrom_out_bytes (drive, cmd_buf, cmd_len);
+
+ return 0;
+}
+
+
+
+/****************************************************************************
+ * Block read functions.
+ */
+
+/*
+ * Buffer up to SECTORS_TO_TRANSFER sectors from the drive in our sector
+ * buffer. Once the first sector is added, any subsequent sectors are
+ * assumed to be continuous (until the buffer is cleared). For the first
+ * sector added, SECTOR is its sector number. (SECTOR is then ignored until
+ * the buffer is cleared.)
+ */
+static void cdrom_buffer_sectors (ide_drive_t *drive, unsigned long sector,
+ int sectors_to_transfer)
+{
+ struct cdrom_info *info = &drive->cdrom_info;
+
+ /* Number of sectors to read into the buffer. */
+ int sectors_to_buffer = MIN (sectors_to_transfer,
+ (SECTOR_BUFFER_SIZE >> SECTOR_BITS) -
+ info->nsectors_buffered);
+
+ char *dest;
+
+ /* If we don't yet have a sector buffer, try to allocate one.
+ If we can't get one atomically, it's not fatal -- we'll just throw
+ the data away rather than caching it. */
+ if (info->sector_buffer == NULL) {
+ info->sector_buffer = (char *) kmalloc (SECTOR_BUFFER_SIZE,
+ GFP_ATOMIC);
+
+ /* If we couldn't get a buffer,
+ don't try to buffer anything... */
+ if (info->sector_buffer == NULL)
+ sectors_to_buffer = 0;
+ }
+
+ /* If this is the first sector in the buffer, remember its number. */
+ if (info->nsectors_buffered == 0)
+ info->sector_buffered = sector;
+
+ /* Read the data into the buffer. */
+ dest = info->sector_buffer + info->nsectors_buffered * SECTOR_SIZE;
+ while (sectors_to_buffer > 0) {
+ cdrom_in_bytes (drive, dest, SECTOR_SIZE);
+ --sectors_to_buffer;
+ --sectors_to_transfer;
+ ++info->nsectors_buffered;
+ dest += SECTOR_SIZE;
+ }
+
+ /* Throw away any remaining data. */
+ while (sectors_to_transfer > 0) {
+ char dum[SECTOR_SIZE];
+ cdrom_in_bytes (drive, dum, sizeof (dum));
+ --sectors_to_transfer;
+ }
+}
+
+
+/*
+ * Check the contents of the interrupt reason register from the cdrom
+ * and attempt to recover if there are problems. Returns 0 if everything's
+ * ok; nonzero if the request has been terminated.
+ */
+static inline
+int cdrom_read_check_ireason (ide_drive_t *drive, int len, int ireason)
+{
+ ireason &= 3;
+ if (ireason == 2) return 0;
+
+ if (ireason == 0) {
+ /* Whoops... The drive is expecting to receive data from us! */
+ printk ("%s: cdrom_read_intr: "
+ "Drive wants to transfer data the wrong way!\n",
+ drive->name);
+
+ /* Throw some data at the drive so it doesn't hang
+ and quit this request. */
+ while (len > 0) {
+ int dum = 0;
+ cdrom_out_bytes (drive, &dum, sizeof (dum));
+ len -= sizeof (dum);
+ }
+ } else {
+ /* Drive wants a command packet, or invalid ireason... */
+ printk ("%s: cdrom_read_intr: bad interrupt reason %d\n",
+ drive->name, ireason);
+ }
+
+ cdrom_end_request (0, drive);
+ return -1;
+}
+
+
+/*
+ * Interrupt routine. Called when a read request has completed.
+ */
+static void cdrom_read_intr (ide_drive_t *drive)
+{
+ int stat;
+ int ireason, len, sectors_to_transfer, nskip;
+
+ struct request *rq = HWGROUP(drive)->rq;
+
+ /* Check for errors. */
+ if (cdrom_decode_status (drive, 0, &stat)) return;
+
+ /* Read the interrupt reason and the transfer length. */
+ ireason = IN_BYTE (IDE_NSECTOR_REG);
+ len = IN_BYTE (IDE_LCYL_REG) + 256 * IN_BYTE (IDE_HCYL_REG);
+
+ /* If DRQ is clear, the command has completed. */
+ if ((stat & DRQ_STAT) == 0) {
+ /* If we're not done filling the current buffer, complain.
+ Otherwise, complete the command normally. */
+ if (rq->current_nr_sectors > 0) {
+ printk ("%s: cdrom_read_intr: data underrun (%ld blocks)\n",
+ drive->name, rq->current_nr_sectors);
+ cdrom_end_request (0, drive);
+ } else
+ cdrom_end_request (1, drive);
+
+ return;
+ }
+
+ /* Check that the drive is expecting to do the same thing we are. */
+ if (cdrom_read_check_ireason (drive, len, ireason)) return;
+
+ /* Assume that the drive will always provide data in multiples
+ of at least SECTOR_SIZE, as it gets hairy to keep track
+ of the transfers otherwise. */
+ if ((len % SECTOR_SIZE) != 0) {
+ printk ("%s: cdrom_read_intr: Bad transfer size %d\n",
+ drive->name, len);
+ printk (" This drive is not supported by this version of the driver\n");
+ cdrom_end_request (0, drive);
+ return;
+ }
+
+ /* The number of sectors we need to read from the drive. */
+ sectors_to_transfer = len / SECTOR_SIZE;
+
+ /* First, figure out if we need to bit-bucket
+ any of the leading sectors. */
+ nskip = MIN ((int)(rq->current_nr_sectors -
+ (rq->bh->b_size >> SECTOR_BITS)),
+ sectors_to_transfer);
+
+ while (nskip > 0) {
+ /* We need to throw away a sector. */
+ char dum[SECTOR_SIZE];
+ cdrom_in_bytes (drive, dum, sizeof (dum));
+
+ --rq->current_nr_sectors;
+ --nskip;
+ --sectors_to_transfer;
+ }
+
+ /* Now loop while we still have data to read from the drive. */
+ while (sectors_to_transfer > 0) {
+ int this_transfer;
+
+ /* If we've filled the present buffer but there's another
+ chained buffer after it, move on. */
+ if (rq->current_nr_sectors == 0 &&
+ rq->nr_sectors > 0)
+ cdrom_end_request (1, drive);
+
+ /* If the buffers are full, cache the rest of the data in our
+ internal buffer. */
+ if (rq->current_nr_sectors == 0) {
+ cdrom_buffer_sectors (drive,
+ rq->sector, sectors_to_transfer);
+ sectors_to_transfer = 0;
+ } else {
+ /* Transfer data to the buffers.
+ Figure out how many sectors we can transfer
+ to the current buffer. */
+ this_transfer = MIN (sectors_to_transfer,
+ rq->current_nr_sectors);
+
+ /* Read this_transfer sectors
+ into the current buffer. */
+ while (this_transfer > 0) {
+ cdrom_in_bytes (drive
+ , rq->buffer, SECTOR_SIZE);
+ rq->buffer += SECTOR_SIZE;
+ --rq->nr_sectors;
+ --rq->current_nr_sectors;
+ ++rq->sector;
+ --this_transfer;
+ --sectors_to_transfer;
+ }
+ }
+ }
+
+ /* Done moving data!
+ Wait for another interrupt. */
+ ide_set_handler (drive, &cdrom_read_intr, WAIT_CMD);
+}
+
+
+/*
+ * Try to satisfy some of the current read request from our cached data.
+ * Returns nonzero if the request has been completed, zero otherwise.
+ */
+static int cdrom_read_from_buffer (ide_drive_t *drive)
+{
+ struct cdrom_info *info = &drive->cdrom_info;
+ struct request *rq = HWGROUP(drive)->rq;
+
+ /* Can't do anything if there's no buffer. */
+ if (info->sector_buffer == NULL) return 0;
+
+ /* Loop while this request needs data and the next block is present
+ in our cache. */
+ while (rq->nr_sectors > 0 &&
+ rq->sector >= info->sector_buffered &&
+ rq->sector < info->sector_buffered + info->nsectors_buffered) {
+ if (rq->current_nr_sectors == 0)
+ cdrom_end_request (1, drive);
+
+ memcpy (rq->buffer,
+ info->sector_buffer +
+ (rq->sector - info->sector_buffered) * SECTOR_SIZE,
+ SECTOR_SIZE);
+ rq->buffer += SECTOR_SIZE;
+ --rq->current_nr_sectors;
+ --rq->nr_sectors;
+ ++rq->sector;
+ }
+
+ /* If we've satisfied the current request,
+ terminate it successfully. */
+ if (rq->nr_sectors == 0) {
+ cdrom_end_request (1, drive);
+ return -1;
+ }
+
+ /* Move on to the next buffer if needed. */
+ if (rq->current_nr_sectors == 0)
+ cdrom_end_request (1, drive);
+
+ /* If this condition does not hold, then the kluge i use to
+ represent the number of sectors to skip at the start of a transfer
+ will fail. I think that this will never happen, but let's be
+ paranoid and check. */
+ if (rq->current_nr_sectors < (rq->bh->b_size >> SECTOR_BITS) &&
+ (rq->sector % SECTORS_PER_FRAME) != 0) {
+ printk ("%s: cdrom_read_from_buffer: buffer botch (%ld)\n",
+ drive->name, rq->sector);
+ cdrom_end_request (0, drive);
+ return -1;
+ }
+
+ return 0;
+}
+
+
+
+/*
+ * Routine to send a read packet command to the drive.
+ * This is usually called directly from cdrom_start_read.
+ * However, for drq_interrupt devices, it is called from an interrupt
+ * when the drive is ready to accept the command.
+ */
+static void cdrom_start_read_continuation (ide_drive_t *drive)
+{
+ struct packet_command pc;
+ struct request *rq = HWGROUP(drive)->rq;
+
+ int nsect, sector, nframes, frame, nskip;
+
+ /* Number of sectors to transfer. */
+ nsect = rq->nr_sectors;
+
+#if !STANDARD_ATAPI
+ if (nsect > drive->cdrom_info.max_sectors)
+ nsect = drive->cdrom_info.max_sectors;
+#endif /* not STANDARD_ATAPI */
+
+ /* Starting sector. */
+ sector = rq->sector;
+
+ /* If the requested sector doesn't start on a cdrom block boundary,
+ we must adjust the start of the transfer so that it does,
+ and remember to skip the first few sectors.
+ If the CURRENT_NR_SECTORS field is larger than the size
+ of the buffer, it will mean that we're to skip a number
+ of sectors equal to the amount by which CURRENT_NR_SECTORS
+ is larger than the buffer size. */
+ nskip = (sector % SECTORS_PER_FRAME);
+ if (nskip > 0) {
+ /* Sanity check... */
+ if (rq->current_nr_sectors !=
+ (rq->bh->b_size >> SECTOR_BITS)) {
+ printk ("%s: cdrom_start_read_continuation: buffer botch (%ld)\n",
+ drive->name, rq->current_nr_sectors);
+ cdrom_end_request (0, drive);
+ return;
+ }
+
+ sector -= nskip;
+ nsect += nskip;
+ rq->current_nr_sectors += nskip;
+ }
+
+ /* Convert from sectors to cdrom blocks, rounding up the transfer
+ length if needed. */
+ nframes = (nsect + SECTORS_PER_FRAME-1) / SECTORS_PER_FRAME;
+ frame = sector / SECTORS_PER_FRAME;
+
+ /* Largest number of frames was can transfer at once is 64k-1. */
+ nframes = MIN (nframes, 65535);
+
+ /* Set up the command */
+ memset (&pc.c, 0, sizeof (pc.c));
+ pc.c[0] = READ_10;
+ pc.c[7] = (nframes >> 8);
+ pc.c[8] = (nframes & 0xff);
+ put_unaligned(htonl (frame), (unsigned int *) &pc.c[2]);
+
+ /* Send the command to the drive and return. */
+ (void) cdrom_transfer_packet_command (drive, pc.c, sizeof (pc.c),
+ &cdrom_read_intr);
+}
+
+
+/*
+ * Start a read request from the CD-ROM.
+ */
+static void cdrom_start_read (ide_drive_t *drive, unsigned int block)
+{
+ struct request *rq = HWGROUP(drive)->rq;
+ int minor = MINOR (rq->rq_dev);
+
+ /* If the request is relative to a partition, fix it up to refer to the
+ absolute address. */
+ if ((minor & PARTN_MASK) != 0) {
+ rq->sector = block;
+ minor &= ~PARTN_MASK;
+ rq->rq_dev = MKDEV (MAJOR(rq->rq_dev), minor);
+ }
+
+ /* We may be retrying this request after an error. Fix up
+ any weirdness which might be present in the request packet. */
+ restore_request (rq);
+
+ /* Satisfy whatever we can of this request from our cached sector. */
+ if (cdrom_read_from_buffer (drive))
+ return;
+
+ /* Clear the local sector buffer. */
+ drive->cdrom_info.nsectors_buffered = 0;
+
+ /* Start sending the read request to the drive. */
+ cdrom_start_packet_command (drive, 32768,
+ cdrom_start_read_continuation);
+}
+
+
+
+
+/****************************************************************************
+ * Execute all other packet commands.
+ */
+
+/* Forward declarations. */
+static int
+cdrom_lockdoor (ide_drive_t *drive, int lockflag,
+ struct atapi_request_sense *reqbuf);
+
+
+
+/* Interrupt routine for packet command completion. */
+static void cdrom_pc_intr (ide_drive_t *drive)
+{
+ int ireason, len, stat, thislen;
+ struct request *rq = HWGROUP(drive)->rq;
+ struct packet_command *pc = (struct packet_command *)rq->buffer;
+
+ /* Check for errors. */
+ if (cdrom_decode_status (drive, 0, &stat)) return;
+
+ /* Read the interrupt reason and the transfer length. */
+ ireason = IN_BYTE (IDE_NSECTOR_REG);
+ len = IN_BYTE (IDE_LCYL_REG) + 256 * IN_BYTE (IDE_HCYL_REG);
+
+ /* If DRQ is clear, the command has completed.
+ Complain if we still have data left to transfer. */
+ if ((stat & DRQ_STAT) == 0) {
+ /* Some of the trailing request sense fields are optional, and
+ some drives don't send them. Sigh. */
+ if (pc->c[0] == REQUEST_SENSE &&
+ pc->buflen > 0 &&
+ pc->buflen <= 5) {
+ while (pc->buflen > 0) {
+ *pc->buffer++ = 0;
+ --pc->buflen;
+ }
+ }
+
+ if (pc->buflen == 0)
+ cdrom_end_request (1, drive);
+ else {
+ printk ("%s: cdrom_pc_intr: data underrun %d\n",
+ drive->name, pc->buflen);
+ pc->stat = 1;
+ cdrom_end_request (1, drive);
+ }
+ return;
+ }
+
+ /* Figure out how much data to transfer. */
+ thislen = pc->buflen;
+ if (thislen < 0) thislen = -thislen;
+ if (thislen > len) thislen = len;
+
+ /* The drive wants to be written to. */
+ if ((ireason & 3) == 0) {
+ /* Check that we want to write. */
+ if (pc->buflen > 0) {
+ printk ("%s: cdrom_pc_intr: Drive wants "
+ "to transfer data the wrong way!\n",
+ drive->name);
+ pc->stat = 1;
+ thislen = 0;
+ }
+
+ /* Transfer the data. */
+ cdrom_out_bytes (drive, pc->buffer, thislen);
+
+ /* If we haven't moved enough data to satisfy the drive,
+ add some padding. */
+ while (len > thislen) {
+ int dum = 0;
+ cdrom_out_bytes (drive, &dum, sizeof (dum));
+ len -= sizeof (dum);
+ }
+
+ /* Keep count of how much data we've moved. */
+ pc->buffer += thislen;
+ pc->buflen += thislen;
+ }
+
+ /* Same drill for reading. */
+ else if ((ireason & 3) == 2) {
+ /* Check that we want to read. */
+ if (pc->buflen < 0) {
+ printk ("%s: cdrom_pc_intr: Drive wants to "
+ "transfer data the wrong way!\n",
+ drive->name);
+ pc->stat = 1;
+ thislen = 0;
+ }
+
+ /* Transfer the data. */
+ cdrom_in_bytes (drive, pc->buffer, thislen);
+
+ /* If we haven't moved enough data to satisfy the drive,
+ add some padding. */
+ while (len > thislen) {
+ int dum = 0;
+ cdrom_in_bytes (drive, &dum, sizeof (dum));
+ len -= sizeof (dum);
+ }
+
+ /* Keep count of how much data we've moved. */
+ pc->buffer += thislen;
+ pc->buflen -= thislen;
+ } else {
+ printk ("%s: cdrom_pc_intr: The drive "
+ "appears confused (ireason = 0x%2x)\n",
+ drive->name, ireason);
+ pc->stat = 1;
+ }
+
+ /* Now we wait for another interrupt. */
+ ide_set_handler (drive, &cdrom_pc_intr, WAIT_CMD);
+}
+
+
+static void cdrom_do_pc_continuation (ide_drive_t *drive)
+{
+ struct request *rq = HWGROUP(drive)->rq;
+ struct packet_command *pc = (struct packet_command *)rq->buffer;
+
+ /* Send the command to the drive and return. */
+ cdrom_transfer_packet_command (drive, pc->c,
+ sizeof (pc->c), &cdrom_pc_intr);
+}
+
+
+static void cdrom_do_packet_command (ide_drive_t *drive)
+{
+ int len;
+ struct request *rq = HWGROUP(drive)->rq;
+ struct packet_command *pc = (struct packet_command *)rq->buffer;
+
+ len = pc->buflen;
+ if (len < 0) len = -len;
+
+ pc->stat = 0;
+
+ /* Start sending the command to the drive. */
+ cdrom_start_packet_command (drive, len, cdrom_do_pc_continuation);
+}
+
+/* Sleep for TIME jiffies.
+ Not to be called from an interrupt handler. */
+#ifdef MACH
+static
+void cdrom_sleep (int time)
+{
+ int xxx;
+
+ assert_wait ((event_t) &xxx, TRUE);
+ thread_set_timeout (time);
+ schedule ();
+}
+#else
+static
+void cdrom_sleep (int time)
+{
+ current->state = TASK_INTERRUPTIBLE;
+ current->timeout = jiffies + time;
+ schedule ();
+}
+#endif
+
+static
+int cdrom_queue_packet_command (ide_drive_t *drive, struct packet_command *pc, int quiet)
+{
+ struct atapi_request_sense my_reqbuf;
+ int retries = 10;
+ struct request req;
+
+ /* If our caller has not provided a place to stick any sense data,
+ use our own area. */
+ if (pc->sense_data == NULL)
+ pc->sense_data = &my_reqbuf;
+ pc->sense_data->sense_key = 0;
+
+ /* Start of retry loop. */
+ do {
+ ide_init_drive_cmd (&req);
+ req.cmd = PACKET_COMMAND;
+ req.buffer = (char *)pc;
+ req.quiet = quiet;
+ (void) ide_do_drive_cmd (drive, &req, ide_wait);
+
+ if (pc->stat != 0) {
+ /* The request failed. Retry if it was due to a unit
+ attention status
+ (usually means media was changed). */
+ struct atapi_request_sense *reqbuf = pc->sense_data;
+
+ if (reqbuf->sense_key == UNIT_ATTENTION)
+ ;
+ else if (reqbuf->sense_key == NOT_READY &&
+ reqbuf->asc == 4) {
+ /* The drive is in the process of loading
+ a disk. Retry, but wait a little to give
+ the drive time to complete the load. */
+ cdrom_sleep (HZ);
+ } else
+ /* Otherwise, don't retry. */
+ retries = 0;
+
+ --retries;
+ }
+
+ /* End of retry loop. */
+ } while (pc->stat != 0 && retries >= 0);
+
+
+ /* Return an error if the command failed. */
+ if (pc->stat != 0)
+ return -EIO;
+ else {
+ /* The command succeeded. If it was anything other than
+ a request sense, eject, or door lock command,
+ and we think that the door is presently, lock it again.
+ (The door was probably unlocked via an explicit
+ CDROMEJECT ioctl.) */
+ if (CDROM_STATE_FLAGS (drive)->door_locked == 0 &&
+ (pc->c[0] != REQUEST_SENSE &&
+ pc->c[0] != ALLOW_MEDIUM_REMOVAL &&
+ pc->c[0] != START_STOP)) {
+ (void) cdrom_lockdoor (drive, 1, NULL);
+ }
+ return 0;
+ }
+}
+
+
+/****************************************************************************
+ * cdrom driver request routine.
+ */
+
+void ide_do_rw_cdrom (ide_drive_t *drive, unsigned long block)
+{
+ struct request *rq = HWGROUP(drive)->rq;
+
+ if (rq -> cmd == PACKET_COMMAND || rq -> cmd == REQUEST_SENSE_COMMAND)
+ cdrom_do_packet_command (drive);
+ else if (rq -> cmd == RESET_DRIVE_COMMAND) {
+ cdrom_end_request (1, drive);
+ ide_do_reset (drive);
+ return;
+ } else if (rq -> cmd != READ) {
+ printk ("ide-cd: bad cmd %d\n", rq -> cmd);
+ cdrom_end_request (0, drive);
+ } else
+ cdrom_start_read (drive, block);
+}
+
+
+
+/****************************************************************************
+ * Ioctl handling.
+ *
+ * Routines which queue packet commands take as a final argument a pointer
+ * to an atapi_request_sense struct. If execution of the command results
+ * in an error with a CHECK CONDITION status, this structure will be filled
+ * with the results of the subsequent request sense command. The pointer
+ * can also be NULL, in which case no sense information is returned.
+ */
+
+#if ! STANDARD_ATAPI
+static inline
+int bin2bcd (int x)
+{
+ return (x%10) | ((x/10) << 4);
+}
+
+
+static inline
+int bcd2bin (int x)
+{
+ return (x >> 4) * 10 + (x & 0x0f);
+}
+
+static
+void msf_from_bcd (struct atapi_msf *msf)
+{
+ msf->minute = bcd2bin (msf->minute);
+ msf->second = bcd2bin (msf->second);
+ msf->frame = bcd2bin (msf->frame);
+}
+
+#endif /* not STANDARD_ATAPI */
+
+
+static inline
+void lba_to_msf (int lba, byte *m, byte *s, byte *f)
+{
+ lba += CD_BLOCK_OFFSET;
+ lba &= 0xffffff; /* negative lbas use only 24 bits */
+ *m = lba / (CD_SECS * CD_FRAMES);
+ lba %= (CD_SECS * CD_FRAMES);
+ *s = lba / CD_FRAMES;
+ *f = lba % CD_FRAMES;
+}
+
+
+static inline
+int msf_to_lba (byte m, byte s, byte f)
+{
+ return (((m * CD_SECS) + s) * CD_FRAMES + f) - CD_BLOCK_OFFSET;
+}
+
+
+static int
+cdrom_check_status (ide_drive_t *drive,
+ struct atapi_request_sense *reqbuf)
+{
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+
+ pc.sense_data = reqbuf;
+ pc.c[0] = TEST_UNIT_READY;
+
+ /* the Sanyo 3 CD changer uses byte 7 of TEST_UNIT_READY to
+ switch CDs instead of supporting the LOAD_UNLOAD opcode */
+
+ pc.c[7] = CDROM_STATE_FLAGS (drive)->sanyo_slot % 3;
+
+ return cdrom_queue_packet_command (drive, &pc, 1);
+}
+
+
+/* Lock the door if LOCKFLAG is nonzero; unlock it otherwise. */
+static int
+cdrom_lockdoor (ide_drive_t *drive, int lockflag,
+ struct atapi_request_sense *reqbuf)
+{
+ struct atapi_request_sense my_reqbuf;
+ int stat;
+ struct packet_command pc;
+
+ if (reqbuf == NULL)
+ reqbuf = &my_reqbuf;
+
+ /* If the drive cannot lock the door, just pretend. */
+ if (CDROM_CONFIG_FLAGS (drive)->no_doorlock)
+ stat = 0;
+ else {
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.c[0] = ALLOW_MEDIUM_REMOVAL;
+ pc.c[4] = (lockflag != 0);
+ stat = cdrom_queue_packet_command (drive, &pc, 0);
+ }
+
+ if (stat == 0)
+ CDROM_STATE_FLAGS (drive)->door_locked = lockflag;
+ else {
+ /* If we got an illegal field error, the drive
+ probably cannot lock the door. */
+ if (reqbuf->sense_key == ILLEGAL_REQUEST &&
+ (reqbuf->asc == 0x24 || reqbuf->asc == 0x20)) {
+ printk ("%s: door locking not supported\n",
+ drive->name);
+ CDROM_CONFIG_FLAGS (drive)->no_doorlock = 1;
+ stat = 0;
+ CDROM_STATE_FLAGS (drive)->door_locked = lockflag;
+ }
+ }
+ return stat;
+}
+
+
+/* Eject the disk if EJECTFLAG is 0.
+ If EJECTFLAG is 1, try to reload the disk. */
+static int
+cdrom_eject (ide_drive_t *drive, int ejectflag,
+ struct atapi_request_sense *reqbuf)
+{
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.c[0] = START_STOP;
+ pc.c[4] = 2 + (ejectflag != 0);
+ return cdrom_queue_packet_command (drive, &pc, 0);
+}
+
+
+static int
+cdrom_pause (ide_drive_t *drive, int pauseflag,
+ struct atapi_request_sense *reqbuf)
+{
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.c[0] = SCMD_PAUSE_RESUME;
+ pc.c[8] = !pauseflag;
+ return cdrom_queue_packet_command (drive, &pc, 0);
+}
+
+
+static int
+cdrom_startstop (ide_drive_t *drive, int startflag,
+ struct atapi_request_sense *reqbuf)
+{
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.c[0] = START_STOP;
+ pc.c[1] = 1;
+ pc.c[4] = startflag;
+ return cdrom_queue_packet_command (drive, &pc, 0);
+}
+
+
+static int
+cdrom_read_capacity (ide_drive_t *drive, unsigned *capacity,
+ struct atapi_request_sense *reqbuf)
+{
+ struct {
+ unsigned lba;
+ unsigned blocklen;
+ } capbuf;
+
+ int stat;
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.c[0] = READ_CAPACITY;
+ pc.buffer = (unsigned char *)&capbuf;
+ pc.buflen = sizeof (capbuf);
+
+ stat = cdrom_queue_packet_command (drive, &pc, 1);
+ if (stat == 0)
+ *capacity = ntohl (capbuf.lba);
+
+ return stat;
+}
+
+
+static int
+cdrom_read_tocentry (ide_drive_t *drive, int trackno, int msf_flag,
+ int format, char *buf, int buflen,
+ struct atapi_request_sense *reqbuf)
+{
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.buffer = (unsigned char *)buf;
+ pc.buflen = buflen;
+ pc.c[0] = SCMD_READ_TOC;
+ pc.c[6] = trackno;
+ pc.c[7] = (buflen >> 8);
+ pc.c[8] = (buflen & 0xff);
+ pc.c[9] = (format << 6);
+ if (msf_flag) pc.c[1] = 2;
+ return cdrom_queue_packet_command (drive, &pc, 1);
+}
+
+
+/* Try to read the entire TOC for the disk into our internal buffer. */
+static int
+cdrom_read_toc (ide_drive_t *drive,
+ struct atapi_request_sense *reqbuf)
+{
+ int stat, ntracks, i;
+ struct atapi_toc *toc = drive->cdrom_info.toc;
+ struct {
+ struct atapi_toc_header hdr;
+ struct atapi_toc_entry ent;
+ } ms_tmp;
+
+ if (toc == NULL) {
+ /* Try to allocate space. */
+ toc = (struct atapi_toc *) kmalloc (sizeof (struct atapi_toc),
+ GFP_KERNEL);
+ drive->cdrom_info.toc = toc;
+ }
+
+ if (toc == NULL) {
+ printk ("%s: No cdrom TOC buffer!\n", drive->name);
+ return -EIO;
+ }
+
+ /* Check to see if the existing data is still valid.
+ If it is, just return. */
+ if (CDROM_STATE_FLAGS (drive)->toc_valid)
+ (void) cdrom_check_status (drive, NULL);
+
+ if (CDROM_STATE_FLAGS (drive)->toc_valid) return 0;
+
+ /* First read just the header, so we know how long the TOC is. */
+ stat = cdrom_read_tocentry (drive, 0, 1, 0, (char *)&toc->hdr,
+ sizeof (struct atapi_toc_header) +
+ sizeof (struct atapi_toc_entry),
+ reqbuf);
+ if (stat) return stat;
+
+#if ! STANDARD_ATAPI
+ if (CDROM_CONFIG_FLAGS (drive)->toctracks_as_bcd) {
+ toc->hdr.first_track = bcd2bin (toc->hdr.first_track);
+ toc->hdr.last_track = bcd2bin (toc->hdr.last_track);
+ }
+#endif /* not STANDARD_ATAPI */
+
+ ntracks = toc->hdr.last_track - toc->hdr.first_track + 1;
+ if (ntracks <= 0) return -EIO;
+ if (ntracks > MAX_TRACKS) ntracks = MAX_TRACKS;
+
+ /* Now read the whole schmeer. */
+ stat = cdrom_read_tocentry (drive, 0, 1, 0, (char *)&toc->hdr,
+ sizeof (struct atapi_toc_header) +
+ (ntracks+1) *
+ sizeof (struct atapi_toc_entry),
+ reqbuf);
+ if (stat) return stat;
+ toc->hdr.toc_length = ntohs (toc->hdr.toc_length);
+
+#if ! STANDARD_ATAPI
+ if (CDROM_CONFIG_FLAGS (drive)->toctracks_as_bcd) {
+ toc->hdr.first_track = bcd2bin (toc->hdr.first_track);
+ toc->hdr.last_track = bcd2bin (toc->hdr.last_track);
+ }
+#endif /* not STANDARD_ATAPI */
+
+ for (i=0; i<=ntracks; i++) {
+#if ! STANDARD_ATAPI
+ if (CDROM_CONFIG_FLAGS (drive)->tocaddr_as_bcd) {
+ if (CDROM_CONFIG_FLAGS (drive)->toctracks_as_bcd)
+ toc->ent[i].track = bcd2bin (toc->ent[i].track);
+ msf_from_bcd (&toc->ent[i].addr.msf);
+ }
+#endif /* not STANDARD_ATAPI */
+ toc->ent[i].addr.lba = msf_to_lba (toc->ent[i].addr.msf.minute,
+ toc->ent[i].addr.msf.second,
+ toc->ent[i].addr.msf.frame);
+ }
+
+ /* Read the multisession information. */
+ stat = cdrom_read_tocentry (drive, 0, 1, 1,
+ (char *)&ms_tmp, sizeof (ms_tmp),
+ reqbuf);
+ if (stat) return stat;
+
+#if ! STANDARD_ATAPI
+ if (CDROM_CONFIG_FLAGS (drive)->tocaddr_as_bcd)
+ msf_from_bcd (&ms_tmp.ent.addr.msf);
+#endif /* not STANDARD_ATAPI */
+
+ toc->last_session_lba = msf_to_lba (ms_tmp.ent.addr.msf.minute,
+ ms_tmp.ent.addr.msf.second,
+ ms_tmp.ent.addr.msf.frame);
+
+ toc->xa_flag = (ms_tmp.hdr.first_track != ms_tmp.hdr.last_track);
+
+ /* Now try to get the total cdrom capacity. */
+ stat = cdrom_read_capacity (drive, &toc->capacity, reqbuf);
+ if (stat) toc->capacity = 0x1fffff;
+
+ HWIF(drive)->gd->sizes[drive->select.b.unit << PARTN_BITS]
+ = toc->capacity * SECTORS_PER_FRAME;
+ drive->part[0].nr_sects = toc->capacity * SECTORS_PER_FRAME;
+
+ /* Remember that we've read this stuff. */
+ CDROM_STATE_FLAGS (drive)->toc_valid = 1;
+
+ return 0;
+}
+
+
+static int
+cdrom_read_subchannel (ide_drive_t *drive, int format,
+ char *buf, int buflen,
+ struct atapi_request_sense *reqbuf)
+{
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.buffer = (unsigned char *) buf;
+ pc.buflen = buflen;
+ pc.c[0] = SCMD_READ_SUBCHANNEL;
+ pc.c[1] = 2; /* MSF addressing */
+ pc.c[2] = 0x40; /* request subQ data */
+ pc.c[3] = format,
+ pc.c[7] = (buflen >> 8);
+ pc.c[8] = (buflen & 0xff);
+ return cdrom_queue_packet_command (drive, &pc, 0);
+}
+
+
+/* modeflag: 0 = current, 1 = changeable mask, 2 = default, 3 = saved */
+static int
+cdrom_mode_sense (ide_drive_t *drive, int pageno, int modeflag,
+ char *buf, int buflen,
+ struct atapi_request_sense *reqbuf)
+{
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.buffer = (unsigned char *)buf;
+ pc.buflen = buflen;
+ pc.c[0] = MODE_SENSE_10;
+ pc.c[2] = pageno | (modeflag << 6);
+ pc.c[7] = (buflen >> 8);
+ pc.c[8] = (buflen & 0xff);
+ return cdrom_queue_packet_command (drive, &pc, 0);
+}
+
+
+static int
+cdrom_mode_select (ide_drive_t *drive, int pageno, char *buf, int buflen,
+ struct atapi_request_sense *reqbuf)
+{
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.buffer = (unsigned char *)buf;
+ pc.buflen = - buflen;
+ pc.c[0] = MODE_SELECT_10;
+ pc.c[1] = 0x10;
+ pc.c[2] = pageno;
+ pc.c[7] = (buflen >> 8);
+ pc.c[8] = (buflen & 0xff);
+ return cdrom_queue_packet_command (drive, &pc, 0);
+}
+
+
+static int
+cdrom_play_lba_range_1 (ide_drive_t *drive, int lba_start, int lba_end,
+ struct atapi_request_sense *reqbuf)
+{
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.c[0] = SCMD_PLAYAUDIO_MSF;
+ lba_to_msf (lba_start, &pc.c[3], &pc.c[4], &pc.c[5]);
+ lba_to_msf (lba_end-1, &pc.c[6], &pc.c[7], &pc.c[8]);
+
+#if ! STANDARD_ATAPI
+ if (CDROM_CONFIG_FLAGS (drive)->playmsf_as_bcd) {
+ pc.c[3] = bin2bcd (pc.c[3]);
+ pc.c[4] = bin2bcd (pc.c[4]);
+ pc.c[5] = bin2bcd (pc.c[5]);
+ pc.c[6] = bin2bcd (pc.c[6]);
+ pc.c[7] = bin2bcd (pc.c[7]);
+ pc.c[8] = bin2bcd (pc.c[8]);
+ }
+#endif /* not STANDARD_ATAPI */
+
+ return cdrom_queue_packet_command (drive, &pc, 0);
+}
+
+
+/* Play audio starting at LBA LBA_START and finishing with the
+ LBA before LBA_END. */
+static int
+cdrom_play_lba_range (ide_drive_t *drive, int lba_start, int lba_end,
+ struct atapi_request_sense *reqbuf)
+{
+ int i, stat;
+ struct atapi_request_sense my_reqbuf;
+
+ if (reqbuf == NULL)
+ reqbuf = &my_reqbuf;
+
+ /* Some drives, will, for certain audio cds,
+ give an error if you ask them to play the entire cd using the
+ values which are returned in the TOC. The play will succeed,
+ however, if the ending address is adjusted downwards
+ by a few frames. */
+ for (i=0; i<75; i++) {
+ stat = cdrom_play_lba_range_1 (drive, lba_start, lba_end,
+ reqbuf);
+
+ if (stat == 0 ||
+ !(reqbuf->sense_key == ILLEGAL_REQUEST &&
+ reqbuf->asc == 0x24))
+ return stat;
+
+ --lba_end;
+ if (lba_end <= lba_start) break;
+ }
+
+ return stat;
+}
+
+
+static
+int cdrom_get_toc_entry (ide_drive_t *drive, int track,
+ struct atapi_toc_entry **ent,
+ struct atapi_request_sense *reqbuf)
+{
+ int stat, ntracks;
+ struct atapi_toc *toc;
+
+ /* Make sure our saved TOC is valid. */
+ stat = cdrom_read_toc (drive, reqbuf);
+ if (stat) return stat;
+
+ toc = drive->cdrom_info.toc;
+
+ /* Check validity of requested track number. */
+ ntracks = toc->hdr.last_track - toc->hdr.first_track + 1;
+ if (track == CDROM_LEADOUT)
+ *ent = &toc->ent[ntracks];
+ else if (track < toc->hdr.first_track ||
+ track > toc->hdr.last_track)
+ return -EINVAL;
+ else
+ *ent = &toc->ent[track - toc->hdr.first_track];
+
+ return 0;
+}
+
+
+static int
+cdrom_read_block (ide_drive_t *drive, int format, int lba, int nblocks,
+ char *buf, int buflen,
+ struct atapi_request_sense *reqbuf)
+{
+ struct packet_command pc;
+ struct atapi_request_sense my_reqbuf;
+ int stat;
+
+ if (reqbuf == NULL)
+ reqbuf = &my_reqbuf;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.buffer = (unsigned char *)buf;
+ pc.buflen = buflen;
+
+#if ! STANDARD_ATAPI
+ if (CDROM_CONFIG_FLAGS (drive)->old_readcd)
+ pc.c[0] = 0xd4;
+ else
+#endif /* not STANDARD_ATAPI */
+ pc.c[0] = READ_CD;
+
+ pc.c[1] = (format << 2);
+ put_unaligned(htonl(lba), (unsigned int *) &pc.c[2]);
+ pc.c[8] = (nblocks & 0xff);
+ pc.c[7] = ((nblocks>>8) & 0xff);
+ pc.c[6] = ((nblocks>>16) & 0xff);
+ if (format <= 1)
+ pc.c[9] = 0xf8;
+ else
+ pc.c[9] = 0x10;
+
+ stat = cdrom_queue_packet_command (drive, &pc, 0);
+
+#if ! STANDARD_ATAPI
+ /* If the drive doesn't recognize the READ CD opcode, retry the command
+ with an older opcode for that command. */
+ if (stat && reqbuf->sense_key == ILLEGAL_REQUEST &&
+ reqbuf->asc == 0x20 &&
+ CDROM_CONFIG_FLAGS (drive)->old_readcd == 0) {
+ printk ("%s: Drive does not recognize READ_CD;"
+ "trying opcode 0xd4\n",
+ drive->name);
+ CDROM_CONFIG_FLAGS (drive)->old_readcd = 1;
+ return cdrom_read_block (drive, format, lba, nblocks,
+ buf, buflen, reqbuf);
+ }
+#endif /* not STANDARD_ATAPI */
+
+ return stat;
+}
+
+
+/* If SLOT<0, unload the current slot. Otherwise, try to load SLOT. */
+static int
+cdrom_load_unload (ide_drive_t *drive, int slot,
+ struct atapi_request_sense *reqbuf)
+{
+ /* if the drive is a Sanyo 3 CD changer then TEST_UNIT_READY
+ (used in the cdrom_check_status function) is used to
+ switch CDs instead of LOAD_UNLOAD */
+
+ if (CDROM_STATE_FLAGS (drive)->sanyo_slot > 0) {
+
+ if ((slot == 1) || (slot == 2)) {
+ CDROM_STATE_FLAGS (drive)->sanyo_slot = slot;
+ } else if (slot >= 0) {
+ CDROM_STATE_FLAGS (drive)->sanyo_slot = 3;
+ } else {
+ return 0;
+ }
+
+ return cdrom_check_status (drive, NULL);
+
+ } else {
+
+ /* ATAPI Rev. 2.2+ standard for requesting switching of
+ CDs in a multiplatter device */
+
+ struct packet_command pc;
+
+ memset (&pc, 0, sizeof (pc));
+ pc.sense_data = reqbuf;
+
+ pc.c[0] = LOAD_UNLOAD;
+ pc.c[4] = 2 + (slot >= 0);
+ pc.c[8] = slot;
+ return cdrom_queue_packet_command (drive, &pc, 0);
+
+ }
+}
+
+
+int ide_cdrom_ioctl (ide_drive_t *drive, struct inode *inode,
+ struct file *file, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case CDROMEJECT: {
+ int stat;
+
+ if (drive->usage > 1)
+ return -EBUSY;
+
+ stat = cdrom_lockdoor (drive, 0, NULL);
+ if (stat) return stat;
+
+ return cdrom_eject (drive, 0, NULL);
+ }
+
+ case CDROMCLOSETRAY: {
+ int stat;
+ if (drive->usage > 1)
+ return -EBUSY;
+
+ stat = cdrom_eject (drive, 1, NULL);
+ if (stat) return stat;
+
+ return cdrom_lockdoor (drive, 1, NULL);
+ }
+
+ case CDROMEJECT_SW: {
+ CDROM_STATE_FLAGS (drive)->eject_on_close = arg;
+ return 0;
+ }
+
+ case CDROMPAUSE:
+ return cdrom_pause (drive, 1, NULL);
+
+ case CDROMRESUME:
+ return cdrom_pause (drive, 0, NULL);
+
+ case CDROMSTART:
+ return cdrom_startstop (drive, 1, NULL);
+
+ case CDROMSTOP: {
+#ifdef IHAVEADOLPHIN
+ /* Certain Drives require this. Most don't
+ and will produce errors upon CDROMSTOP
+ pit says the Dolphin needs this. If you
+ own a dolphin, just define IHAVEADOLPHIN somewhere */
+ int stat;
+ stat = cdrom_startstop (drive, 0, NULL);
+ if (stat) return stat;
+ return cdrom_eject (drive, 1, NULL);
+#endif /* end of IHAVEADOLPHIN */
+ return cdrom_startstop (drive, 0, NULL);
+ }
+
+ case CDROMPLAYMSF: {
+ struct cdrom_msf msf;
+ int stat, lba_start, lba_end;
+
+ stat = verify_area (VERIFY_READ, (void *)arg, sizeof (msf));
+ if (stat) return stat;
+
+ memcpy_fromfs (&msf, (void *) arg, sizeof(msf));
+
+ lba_start = msf_to_lba (msf.cdmsf_min0, msf.cdmsf_sec0,
+ msf.cdmsf_frame0);
+ lba_end = msf_to_lba (msf.cdmsf_min1, msf.cdmsf_sec1,
+ msf.cdmsf_frame1) + 1;
+
+ if (lba_end <= lba_start) return -EINVAL;
+
+ return cdrom_play_lba_range (drive, lba_start, lba_end, NULL);
+ }
+
+ /* Like just about every other Linux cdrom driver, we ignore the
+ index part of the request here. */
+ case CDROMPLAYTRKIND: {
+ int stat, lba_start, lba_end;
+ struct cdrom_ti ti;
+ struct atapi_toc_entry *first_toc, *last_toc;
+
+ stat = verify_area (VERIFY_READ, (void *)arg, sizeof (ti));
+ if (stat) return stat;
+
+ memcpy_fromfs (&ti, (void *) arg, sizeof(ti));
+
+ stat = cdrom_get_toc_entry (drive, ti.cdti_trk0, &first_toc,
+ NULL);
+ if (stat) return stat;
+ stat = cdrom_get_toc_entry (drive, ti.cdti_trk1, &last_toc,
+ NULL);
+ if (stat) return stat;
+
+ if (ti.cdti_trk1 != CDROM_LEADOUT) ++last_toc;
+ lba_start = first_toc->addr.lba;
+ lba_end = last_toc->addr.lba;
+
+ if (lba_end <= lba_start) return -EINVAL;
+
+ return cdrom_play_lba_range (drive, lba_start, lba_end, NULL);
+ }
+
+ case CDROMREADTOCHDR: {
+ int stat;
+ struct cdrom_tochdr tochdr;
+ struct atapi_toc *toc;
+
+ stat = verify_area (VERIFY_WRITE, (void *) arg,
+ sizeof (tochdr));
+ if (stat) return stat;
+
+ /* Make sure our saved TOC is valid. */
+ stat = cdrom_read_toc (drive, NULL);
+ if (stat) return stat;
+
+ toc = drive->cdrom_info.toc;
+ tochdr.cdth_trk0 = toc->hdr.first_track;
+ tochdr.cdth_trk1 = toc->hdr.last_track;
+
+ memcpy_tofs ((void *) arg, &tochdr, sizeof (tochdr));
+
+ return stat;
+ }
+
+ case CDROMREADTOCENTRY: {
+ int stat;
+ struct cdrom_tocentry tocentry;
+ struct atapi_toc_entry *toce;
+
+ stat = verify_area (VERIFY_WRITE, (void *) arg,
+ sizeof (tocentry));
+ if (stat) return stat;
+
+ memcpy_fromfs (&tocentry, (void *) arg, sizeof (tocentry));
+
+ stat = cdrom_get_toc_entry (drive, tocentry.cdte_track, &toce,
+ NULL);
+ if (stat) return stat;
+
+ tocentry.cdte_ctrl = toce->control;
+ tocentry.cdte_adr = toce->adr;
+
+ if (tocentry.cdte_format == CDROM_MSF) {
+ /* convert to MSF */
+ lba_to_msf (toce->addr.lba,
+ &tocentry.cdte_addr.msf.minute,
+ &tocentry.cdte_addr.msf.second,
+ &tocentry.cdte_addr.msf.frame);
+ } else
+ tocentry.cdte_addr.lba = toce->addr.lba;
+
+ memcpy_tofs ((void *) arg, &tocentry, sizeof (tocentry));
+
+ return stat;
+ }
+
+ case CDROMSUBCHNL: {
+ struct atapi_cdrom_subchnl scbuf;
+ int stat;
+ struct cdrom_subchnl subchnl;
+
+ stat = verify_area (VERIFY_WRITE, (void *) arg,
+ sizeof (subchnl));
+ if (stat) return stat;
+
+ memcpy_fromfs (&subchnl, (void *) arg, sizeof (subchnl));
+
+ stat = cdrom_read_subchannel (drive, 1, /* current position */
+ (char *)&scbuf, sizeof (scbuf),
+ NULL);
+ if (stat) return stat;
+
+#if ! STANDARD_ATAPI
+ if (CDROM_CONFIG_FLAGS (drive)->subchan_as_bcd) {
+ msf_from_bcd (&scbuf.acdsc_absaddr.msf);
+ msf_from_bcd (&scbuf.acdsc_reladdr.msf);
+ }
+ if (CDROM_CONFIG_FLAGS (drive)->tocaddr_as_bcd)
+ scbuf.acdsc_trk = bcd2bin (scbuf.acdsc_trk);
+#endif /* not STANDARD_ATAPI */
+
+ if (subchnl.cdsc_format == CDROM_MSF) {
+ subchnl.cdsc_absaddr.msf.minute =
+ scbuf.acdsc_absaddr.msf.minute;
+ subchnl.cdsc_absaddr.msf.second =
+ scbuf.acdsc_absaddr.msf.second;
+ subchnl.cdsc_absaddr.msf.frame =
+ scbuf.acdsc_absaddr.msf.frame;
+
+ subchnl.cdsc_reladdr.msf.minute =
+ scbuf.acdsc_reladdr.msf.minute;
+ subchnl.cdsc_reladdr.msf.second =
+ scbuf.acdsc_reladdr.msf.second;
+ subchnl.cdsc_reladdr.msf.frame =
+ scbuf.acdsc_reladdr.msf.frame;
+ } else {
+ subchnl.cdsc_absaddr.lba =
+ msf_to_lba (scbuf.acdsc_absaddr.msf.minute,
+ scbuf.acdsc_absaddr.msf.second,
+ scbuf.acdsc_absaddr.msf.frame);
+ subchnl.cdsc_reladdr.lba =
+ msf_to_lba (scbuf.acdsc_reladdr.msf.minute,
+ scbuf.acdsc_reladdr.msf.second,
+ scbuf.acdsc_reladdr.msf.frame);
+ }
+
+ subchnl.cdsc_audiostatus = scbuf.acdsc_audiostatus;
+ subchnl.cdsc_ctrl = scbuf.acdsc_ctrl;
+ subchnl.cdsc_trk = scbuf.acdsc_trk;
+ subchnl.cdsc_ind = scbuf.acdsc_ind;
+
+ memcpy_tofs ((void *) arg, &subchnl, sizeof (subchnl));
+
+ return stat;
+ }
+
+ case CDROMVOLCTRL: {
+ struct cdrom_volctrl volctrl;
+ char buffer[24], mask[24];
+ int stat;
+
+ stat = verify_area (VERIFY_READ, (void *) arg,
+ sizeof (volctrl));
+ if (stat) return stat;
+ memcpy_fromfs (&volctrl, (void *) arg, sizeof (volctrl));
+
+ stat = cdrom_mode_sense (drive, 0x0e, 0, buffer,
+ sizeof (buffer), NULL);
+ if (stat) return stat;
+ stat = cdrom_mode_sense (drive, 0x0e, 1, mask,
+ sizeof (buffer), NULL);
+ if (stat) return stat;
+
+ buffer[1] = buffer[2] = 0;
+
+ buffer[17] = volctrl.channel0 & mask[17];
+ buffer[19] = volctrl.channel1 & mask[19];
+ buffer[21] = volctrl.channel2 & mask[21];
+ buffer[23] = volctrl.channel3 & mask[23];
+
+ return cdrom_mode_select (drive, 0x0e, buffer,
+ sizeof (buffer), NULL);
+ }
+
+ case CDROMVOLREAD: {
+ struct cdrom_volctrl volctrl;
+ char buffer[24];
+ int stat;
+
+ stat = verify_area (VERIFY_WRITE, (void *) arg,
+ sizeof (volctrl));
+ if (stat) return stat;
+
+ stat = cdrom_mode_sense (drive, 0x0e, 0, buffer,
+ sizeof (buffer), NULL);
+ if (stat) return stat;
+
+ volctrl.channel0 = buffer[17];
+ volctrl.channel1 = buffer[19];
+ volctrl.channel2 = buffer[21];
+ volctrl.channel3 = buffer[23];
+
+ memcpy_tofs ((void *) arg, &volctrl, sizeof (volctrl));
+
+ return 0;
+ }
+
+ case CDROMMULTISESSION: {
+ struct cdrom_multisession ms_info;
+ struct atapi_toc *toc;
+ int stat;
+
+ stat = verify_area (VERIFY_WRITE, (void *)arg,
+ sizeof (ms_info));
+ if (stat) return stat;
+
+ memcpy_fromfs (&ms_info, (void *)arg, sizeof (ms_info));
+
+ /* Make sure the TOC information is valid. */
+ stat = cdrom_read_toc (drive, NULL);
+ if (stat) return stat;
+
+ toc = drive->cdrom_info.toc;
+
+ if (ms_info.addr_format == CDROM_MSF)
+ lba_to_msf (toc->last_session_lba,
+ &ms_info.addr.msf.minute,
+ &ms_info.addr.msf.second,
+ &ms_info.addr.msf.frame);
+ else if (ms_info.addr_format == CDROM_LBA)
+ ms_info.addr.lba = toc->last_session_lba;
+ else
+ return -EINVAL;
+
+ ms_info.xa_flag = toc->xa_flag;
+
+ memcpy_tofs ((void *)arg, &ms_info, sizeof (ms_info));
+
+ return 0;
+ }
+
+ /* Read 2352 byte blocks from audio tracks. */
+ case CDROMREADAUDIO: {
+ int stat, lba;
+ struct atapi_toc *toc;
+ struct cdrom_read_audio ra;
+ char *buf;
+
+ /* Make sure the TOC is up to date. */
+ stat = cdrom_read_toc (drive, NULL);
+ if (stat) return stat;
+
+ toc = drive->cdrom_info.toc;
+
+ stat = verify_area (VERIFY_READ, (char *)arg, sizeof (ra));
+ if (stat) return stat;
+
+ memcpy_fromfs (&ra, (void *)arg, sizeof (ra));
+
+ if (ra.nframes < 0 || ra.nframes > toc->capacity)
+ return -EINVAL;
+ else if (ra.nframes == 0)
+ return 0;
+
+ stat = verify_area (VERIFY_WRITE, (char *)ra.buf,
+ ra.nframes * CD_FRAMESIZE_RAW);
+ if (stat) return stat;
+
+ if (ra.addr_format == CDROM_MSF)
+ lba = msf_to_lba (ra.addr.msf.minute,
+ ra.addr.msf.second,
+ ra.addr.msf.frame);
+ else if (ra.addr_format == CDROM_LBA)
+ lba = ra.addr.lba;
+ else
+ return -EINVAL;
+
+ if (lba < 0 || lba >= toc->capacity)
+ return -EINVAL;
+
+ buf = (char *) kmalloc (CDROM_NBLOCKS_BUFFER*CD_FRAMESIZE_RAW,
+ GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ while (ra.nframes > 0) {
+ int this_nblocks = ra.nframes;
+ if (this_nblocks > CDROM_NBLOCKS_BUFFER)
+ this_nblocks = CDROM_NBLOCKS_BUFFER;
+ stat = cdrom_read_block
+ (drive, 1, lba, this_nblocks,
+ buf, this_nblocks * CD_FRAMESIZE_RAW, NULL);
+ if (stat) break;
+
+ memcpy_tofs (ra.buf, buf,
+ this_nblocks * CD_FRAMESIZE_RAW);
+ ra.buf += this_nblocks * CD_FRAMESIZE_RAW;
+ ra.nframes -= this_nblocks;
+ lba += this_nblocks;
+ }
+
+ kfree (buf);
+ return stat;
+ }
+ case CDROMREADRAW:
+ case CDROMREADMODE1:
+ case CDROMREADMODE2: {
+ struct cdrom_msf msf;
+ int blocksize, format, stat, lba;
+ char *buf;
+
+ if (cmd == CDROMREADMODE1) {
+ blocksize = CD_FRAMESIZE;
+ format = 2;
+ } else if (cmd == CDROMREADMODE2) {
+ blocksize = CD_FRAMESIZE_RAW0;
+ format = 3;
+ } else {
+ blocksize = CD_FRAMESIZE_RAW;
+ format = 0;
+ }
+
+ stat = verify_area (VERIFY_WRITE, (char *)arg, blocksize);
+ if (stat) return stat;
+
+ memcpy_fromfs (&msf, (void *)arg, sizeof (msf));
+
+ lba = msf_to_lba (msf.cdmsf_min0,
+ msf.cdmsf_sec0,
+ msf.cdmsf_frame0);
+
+ /* DON'T make sure the TOC is up to date. */
+ /* stat = cdrom_read_toc (drive, NULL);
+ if (stat) return stat;
+
+ toc = drive->cdrom_info.toc;
+
+ if (lba < 0 || lba >= toc->capacity)
+ return -EINVAL; */
+
+ buf = (char *) kmalloc (CD_FRAMESIZE_RAW, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ stat = cdrom_read_block (drive, format, lba, 1, buf, blocksize,
+ NULL);
+ if (stat == 0)
+ memcpy_tofs ((char *)arg, buf, blocksize);
+
+ kfree (buf);
+ return stat;
+ }
+
+ case CDROM_GET_UPC: {
+ int stat;
+ char mcnbuf[24];
+ struct cdrom_mcn mcn;
+
+ stat = verify_area (VERIFY_WRITE, (void *) arg,
+ sizeof (mcn));
+ if (stat) return stat;
+
+ stat = cdrom_read_subchannel (drive, 2, /* get MCN */
+ mcnbuf, sizeof (mcnbuf),
+ NULL);
+ if (stat) return stat;
+
+ memcpy (mcn.medium_catalog_number, mcnbuf+9,
+ sizeof (mcn.medium_catalog_number)-1);
+ mcn.medium_catalog_number[sizeof (mcn.medium_catalog_number)-1]
+ = '\0';
+
+ memcpy_tofs ((void *) arg, &mcn, sizeof (mcn));
+
+ return stat;
+ }
+
+ case CDROMLOADFROMSLOT:
+ printk ("%s: Use CDROM_SELECT_DISC "
+ " instead of CDROMLOADFROMSLOT.\n", drive->name);
+ /* Fall through. */
+
+ case CDROM_SELECT_DISC: {
+ struct atapi_request_sense my_reqbuf;
+ int stat;
+
+ if (drive->usage > 1)
+ return -EBUSY;
+
+ (void) cdrom_load_unload (drive, -1, NULL);
+
+ cdrom_saw_media_change (drive);
+ if (arg == -1) {
+ (void) cdrom_lockdoor (drive, 0, NULL);
+ return 0;
+ }
+ (void) cdrom_load_unload (drive, (int)arg, NULL);
+
+ stat = cdrom_check_status (drive, &my_reqbuf);
+ if (stat && my_reqbuf.sense_key == NOT_READY) {
+ return -ENOENT;
+ }
+
+ /* And try to read the TOC information now. */
+ return cdrom_read_toc (drive, &my_reqbuf);
+ }
+
+#if 0 /* Doesn't work reliably yet. */
+ case CDROMRESET: {
+ struct request req;
+ ide_init_drive_cmd (&req);
+ req.cmd = RESET_DRIVE_COMMAND;
+ return ide_do_drive_cmd (drive, &req, ide_wait);
+ }
+#endif
+
+
+#ifdef TEST
+ case 0x1234: {
+ int stat;
+ struct packet_command pc;
+ int len, lena;
+
+ memset (&pc, 0, sizeof (pc));
+
+ stat = verify_area (VERIFY_READ, (void *) arg, sizeof (pc.c));
+ if (stat) return stat;
+ memcpy_fromfs (&pc.c, (void *) arg, sizeof (pc.c));
+ arg += sizeof (pc.c);
+
+ stat = verify_area (VERIFY_READ, (void *) arg, sizeof (len));
+ if (stat) return stat;
+ memcpy_fromfs (&len, (void *) arg , sizeof (len));
+ arg += sizeof (len);
+
+ if (len > 0) {
+ stat = verify_area (VERIFY_WRITE, (void *) arg, len);
+ if (stat) return stat;
+ }
+
+ lena = len;
+ if (lena < 0) lena = 0;
+
+ {
+ char buf[lena];
+ if (len > 0) {
+ pc.buflen = len;
+ pc.buffer = buf;
+ }
+
+ stat = cdrom_queue_packet_command (drive, &pc, 0);
+
+ if (len > 0)
+ memcpy_tofs ((void *)arg, buf, len);
+ }
+
+ return stat;
+ }
+#endif
+
+ default:
+ return -EPERM;
+ }
+
+}
+
+
+
+/****************************************************************************
+ * Other driver requests (open, close, check media change).
+ */
+
+int ide_cdrom_check_media_change (ide_drive_t *drive)
+{
+ int retval;
+
+ (void) cdrom_check_status (drive, NULL);
+
+ retval = CDROM_STATE_FLAGS (drive)->media_changed;
+ CDROM_STATE_FLAGS (drive)->media_changed = 0;
+
+ return retval;
+}
+
+
+int ide_cdrom_open (struct inode *ip, struct file *fp, ide_drive_t *drive)
+{
+ /* no write access */
+ if (fp->f_mode & 2) {
+ --drive->usage;
+ return -EROFS;
+ }
+
+ /* If this is the first open, check the drive status. */
+ if (drive->usage == 1) {
+ int stat;
+ struct atapi_request_sense my_reqbuf;
+ my_reqbuf.sense_key = 0;
+
+ /* Get the drive status. */
+ stat = cdrom_check_status (drive, &my_reqbuf);
+
+ /* If the tray is open, try to close it. */
+ if (stat && my_reqbuf.sense_key == NOT_READY) {
+ cdrom_eject (drive, 1, &my_reqbuf);
+ stat = cdrom_check_status (drive, &my_reqbuf);
+ }
+
+ /* If things worked ok, lock the door and read the
+ TOC information. */
+ if (stat == 0 || my_reqbuf.sense_key == UNIT_ATTENTION) {
+ (void) cdrom_lockdoor (drive, 1, &my_reqbuf);
+ (void) cdrom_read_toc (drive, &my_reqbuf);
+ } else {
+ /* Otherwise return as missing */
+ --drive->usage;
+ return -ENXIO;
+ }
+ }
+
+ return 0;
+}
+
+
+/*
+ * Close down the device. Invalidate all cached blocks.
+ */
+
+void ide_cdrom_release (struct inode *inode, struct file *file,
+ ide_drive_t *drive)
+{
+ if (drive->usage == 0) {
+ invalidate_buffers (inode->i_rdev);
+
+ /* Unlock the door. */
+ (void) cdrom_lockdoor (drive, 0, NULL);
+
+ /* Do an eject if we were requested to do so. */
+ if (CDROM_STATE_FLAGS (drive)->eject_on_close)
+ (void) cdrom_eject (drive, 0, NULL);
+ }
+}
+
+
+
+/****************************************************************************
+ * Device initialization.
+ */
+
+void ide_cdrom_setup (ide_drive_t *drive)
+{
+ blksize_size[HWIF(drive)->major][drive->select.b.unit << PARTN_BITS] =
+ CD_FRAMESIZE;
+
+ drive->special.all = 0;
+ drive->ready_stat = 0;
+
+ CDROM_STATE_FLAGS (drive)->media_changed = 0;
+ CDROM_STATE_FLAGS (drive)->toc_valid = 0;
+ CDROM_STATE_FLAGS (drive)->door_locked = 0;
+
+ /* Turn this off by default, since many people don't like it. */
+ CDROM_STATE_FLAGS (drive)->eject_on_close= 0;
+
+#if NO_DOOR_LOCKING
+ CDROM_CONFIG_FLAGS (drive)->no_doorlock = 1;
+#else
+ CDROM_CONFIG_FLAGS (drive)->no_doorlock = 0;
+#endif
+
+ /* by default Sanyo 3 CD changer support is turned off and
+ ATAPI Rev 2.2+ standard support for CD changers is used */
+ CDROM_STATE_FLAGS (drive)->sanyo_slot = 0;
+
+ if (drive->id != NULL)
+ CDROM_CONFIG_FLAGS (drive)->drq_interrupt =
+ ((drive->id->config & 0x0060) == 0x20);
+ else
+ CDROM_CONFIG_FLAGS (drive)->drq_interrupt = 0;
+
+#if ! STANDARD_ATAPI
+ drive->cdrom_info.max_sectors = 252;
+
+ CDROM_CONFIG_FLAGS (drive)->old_readcd = 0;
+ CDROM_CONFIG_FLAGS (drive)->toctracks_as_bcd = 0;
+ CDROM_CONFIG_FLAGS (drive)->tocaddr_as_bcd = 0;
+ CDROM_CONFIG_FLAGS (drive)->playmsf_as_bcd = 0;
+ CDROM_CONFIG_FLAGS (drive)->subchan_as_bcd = 0;
+
+ if (drive->id != NULL) {
+ const char *model = (const char *)drive->id->model;
+ const char *fw_rev = (const char *)drive->id->fw_rev;
+
+ if (strcmp (model, "V003S0DS") == 0 &&
+ fw_rev[4] == '1' &&
+ fw_rev[6] <= '2') {
+ /* Vertos 300.
+ Some versions of this drive like to talk BCD. */
+ CDROM_CONFIG_FLAGS (drive)->toctracks_as_bcd = 1;
+ CDROM_CONFIG_FLAGS (drive)->tocaddr_as_bcd = 1;
+ CDROM_CONFIG_FLAGS (drive)->playmsf_as_bcd = 1;
+ CDROM_CONFIG_FLAGS (drive)->subchan_as_bcd = 1;
+ }
+
+ else if (strcmp (model, "V006E0DS") == 0 &&
+ fw_rev[4] == '1' &&
+ fw_rev[6] <= '2') {
+ /* Vertos 600 ESD. */
+ CDROM_CONFIG_FLAGS (drive)->toctracks_as_bcd = 1;
+ }
+
+ else if (strcmp (model, "GCD-R580B") == 0)
+ drive->cdrom_info.max_sectors = 124;
+
+ else if (strcmp (model,
+ "NEC CD-ROM DRIVE:260") == 0 &&
+ strcmp (fw_rev, "1.01") == 0) {
+ /* Old NEC260 (not R). */
+ CDROM_CONFIG_FLAGS (drive)->tocaddr_as_bcd = 1;
+ CDROM_CONFIG_FLAGS (drive)->playmsf_as_bcd = 1;
+ CDROM_CONFIG_FLAGS (drive)->subchan_as_bcd = 1;
+ }
+
+ else if (strcmp (model, "WEARNES CDD-120") == 0 &&
+ strcmp (fw_rev, "A1.1") == 0) {
+ /* Wearnes */
+ CDROM_CONFIG_FLAGS (drive)->playmsf_as_bcd = 1;
+ CDROM_CONFIG_FLAGS (drive)->subchan_as_bcd = 1;
+ }
+
+ /* Sanyo 3 CD changer uses a non-standard command
+ for CD changing */
+ else if ((strcmp(model, "CD-ROM CDR-C3 G") == 0) ||
+ (strcmp(model, "CD-ROM CDR-C3G") == 0) ||
+ (strcmp(model, "CD-ROM CDR_C36") == 0)) {
+ /* uses CD in slot 0 when value is set to 3 */
+ CDROM_STATE_FLAGS (drive)->sanyo_slot = 3;
+ }
+
+ }
+#endif /* not STANDARD_ATAPI */
+
+ drive->cdrom_info.toc = NULL;
+ drive->cdrom_info.sector_buffer = NULL;
+ drive->cdrom_info.sector_buffered = 0;
+ drive->cdrom_info.nsectors_buffered = 0;
+}
+
+
+
+/*
+ * TODO (for 2.1?):
+ * Avoid printing error messages for expected errors from the drive.
+ * Integrate with generic cdrom driver.
+ * Query the drive to find what features are available
+ * before trying to use them.
+ * Integrate spindown time adjustment patch.
+ * Modularize.
+ * CDROMRESET ioctl.
+ * Better support for changers.
+ */
+
+
+
+/*==========================================================================*/
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/linux/src/drivers/block/ide.c b/linux/src/drivers/block/ide.c
new file mode 100644
index 0000000..c8dee84
--- /dev/null
+++ b/linux/src/drivers/block/ide.c
@@ -0,0 +1,3926 @@
+/*
+ * linux/drivers/block/ide.c Version 5.53 Jun 24, 1997
+ *
+ * Copyright (C) 1994-1996 Linus Torvalds & authors (see below)
+ */
+#define _IDE_C /* needed by <linux/blk.h> */
+
+/*
+ * Maintained by Mark Lord <mlord@pobox.com>
+ * and Gadi Oxman <gadio@netvision.net.il>
+ *
+ * This is the multiple IDE interface driver, as evolved from hd.c.
+ * It supports up to four IDE interfaces, on one or more IRQs (usually 14 & 15).
+ * There can be up to two drives per interface, as per the ATA-2 spec.
+ *
+ * Primary: ide0, port 0x1f0; major=3; hda is minor=0; hdb is minor=64
+ * Secondary: ide1, port 0x170; major=22; hdc is minor=0; hdd is minor=64
+ * Tertiary: ide2, port 0x???; major=33; hde is minor=0; hdf is minor=64
+ * Quaternary: ide3, port 0x???; major=34; hdg is minor=0; hdh is minor=64
+ *
+ * It is easy to extend ide.c to handle more than four interfaces:
+ *
+ * Change the MAX_HWIFS constant in ide.h.
+ *
+ * Define some new major numbers (in major.h), and insert them into
+ * the ide_hwif_to_major table in ide.c.
+ *
+ * Fill in the extra values for the new interfaces into the two tables
+ * inside ide.c: default_io_base[] and default_irqs[].
+ *
+ * Create the new request handlers by cloning "do_ide3_request()"
+ * for each new interface, and add them to the switch statement
+ * in the ide_init() function in ide.c.
+ *
+ * Recompile, create the new /dev/ entries, and it will probably work.
+ *
+ * From hd.c:
+ * |
+ * | It traverses the request-list, using interrupts to jump between functions.
+ * | As nearly all functions can be called within interrupts, we may not sleep.
+ * | Special care is recommended. Have Fun!
+ * |
+ * | modified by Drew Eckhardt to check nr of hd's from the CMOS.
+ * |
+ * | Thanks to Branko Lankester, lankeste@fwi.uva.nl, who found a bug
+ * | in the early extended-partition checks and added DM partitions.
+ * |
+ * | Early work on error handling by Mika Liljeberg (liljeber@cs.Helsinki.FI).
+ * |
+ * | IRQ-unmask, drive-id, multiple-mode, support for ">16 heads",
+ * | and general streamlining by Mark Lord (mlord@pobox.com).
+ *
+ * October, 1994 -- Complete line-by-line overhaul for linux 1.1.x, by:
+ *
+ * Mark Lord (mlord@pobox.com) (IDE Perf.Pkg)
+ * Delman Lee (delman@mipg.upenn.edu) ("Mr. atdisk2")
+ * Scott Snyder (snyder@fnald0.fnal.gov) (ATAPI IDE cd-rom)
+ *
+ * This was a rewrite of just about everything from hd.c, though some original
+ * code is still sprinkled about. Think of it as a major evolution, with
+ * inspiration from lots of linux users, esp. hamish@zot.apana.org.au
+ *
+ * Version 1.0 ALPHA initial code, primary i/f working okay
+ * Version 1.3 BETA dual i/f on shared irq tested & working!
+ * Version 1.4 BETA added auto probing for irq(s)
+ * Version 1.5 BETA added ALPHA (untested) support for IDE cd-roms,
+ * ...
+ * Version 3.5 correct the bios_cyl field if it's too small
+ * (linux 1.1.76) (to help fdisk with brain-dead BIOSs)
+ * Version 3.6 cosmetic corrections to comments and stuff
+ * (linux 1.1.77) reorganise probing code to make it understandable
+ * added halfway retry to probing for drive identification
+ * added "hdx=noprobe" command line option
+ * allow setting multmode even when identification fails
+ * Version 3.7 move set_geometry=1 from do_identify() to ide_init()
+ * increase DRQ_WAIT to eliminate nuisance messages
+ * wait for DRQ_STAT instead of DATA_READY during probing
+ * (courtesy of Gary Thomas gary@efland.UU.NET)
+ * Version 3.8 fixed byte-swapping for confused Mitsumi cdrom drives
+ * update of ide-cd.c from Scott, allows blocksize=1024
+ * cdrom probe fixes, inspired by jprang@uni-duisburg.de
+ * Version 3.9 don't use LBA if lba_capacity looks funny
+ * correct the drive capacity calculations
+ * fix probing for old Seagates without IDE_ALTSTATUS_REG
+ * fix byte-ordering for some NEC cdrom drives
+ * Version 3.10 disable multiple mode by default; was causing trouble
+ * Version 3.11 fix mis-identification of old WD disks as cdroms
+ * Version 3,12 simplify logic for selecting initial mult_count
+ * (fixes problems with buggy WD drives)
+ * Version 3.13 remove excess "multiple mode disabled" messages
+ * Version 3.14 fix ide_error() handling of BUSY_STAT
+ * fix byte-swapped cdrom strings (again.. arghh!)
+ * ignore INDEX bit when checking the ALTSTATUS reg
+ * Version 3.15 add SINGLE_THREADED flag for use with dual-CMD i/f
+ * ignore WRERR_STAT for non-write operations
+ * added vlb_sync support for DC-2000A & others,
+ * (incl. some Promise chips), courtesy of Frank Gockel
+ * Version 3.16 convert vlb_32bit and vlb_sync into runtime flags
+ * add ioctls to get/set VLB flags (HDIO_[SG]ET_CHIPSET)
+ * rename SINGLE_THREADED to SUPPORT_SERIALIZE,
+ * add boot flag to "serialize" operation for CMD i/f
+ * add optional support for DTC2278 interfaces,
+ * courtesy of andy@cercle.cts.com (Dyan Wile).
+ * add boot flag to enable "dtc2278" probe
+ * add probe to avoid EATA (SCSI) interfaces,
+ * courtesy of neuffer@goofy.zdv.uni-mainz.de.
+ * Version 4.00 tidy up verify_area() calls - heiko@colossus.escape.de
+ * add flag to ignore WRERR_STAT for some drives
+ * courtesy of David.H.West@um.cc.umich.edu
+ * assembly syntax tweak to vlb_sync
+ * removable drive support from scuba@cs.tu-berlin.de
+ * add transparent support for DiskManager-6.0x "Dynamic
+ * Disk Overlay" (DDO), most of this is in genhd.c
+ * eliminate "multiple mode turned off" message at boot
+ * Version 4.10 fix bug in ioctl for "hdparm -c3"
+ * fix DM6:DDO support -- now works with LILO, fdisk, ...
+ * don't treat some naughty WD drives as removable
+ * Version 4.11 updated DM6 support using info provided by OnTrack
+ * Version 5.00 major overhaul, multmode setting fixed, vlb_sync fixed
+ * added support for 3rd/4th/alternative IDE ports
+ * created ide.h; ide-cd.c now compiles separate from ide.c
+ * hopefully fixed infinite "unexpected_intr" from cdroms
+ * zillions of other changes and restructuring
+ * somehow reduced overall memory usage by several kB
+ * probably slowed things down slightly, but worth it
+ * Version 5.01 AT LAST!! Finally understood why "unexpected_intr"
+ * was happening at various times/places: whenever the
+ * ide-interface's ctl_port was used to "mask" the irq,
+ * it also would trigger an edge in the process of masking
+ * which would result in a self-inflicted interrupt!!
+ * (such a stupid way to build a hardware interrupt mask).
+ * This is now fixed (after a year of head-scratching).
+ * Version 5.02 got rid of need for {enable,disable}_irq_list()
+ * Version 5.03 tune-ups, comments, remove "busy wait" from drive resets
+ * removed PROBE_FOR_IRQS option -- no longer needed
+ * OOOPS! fixed "bad access" bug for 2nd drive on an i/f
+ * Version 5.04 changed "ira %d" to "irq %d" in DEBUG message
+ * added more comments, cleaned up unexpected_intr()
+ * OOOPS! fixed null pointer problem in ide reset code
+ * added autodetect for Triton chipset -- no effect yet
+ * Version 5.05 OOOPS! fixed bug in revalidate_disk()
+ * OOOPS! fixed bug in ide_do_request()
+ * added ATAPI reset sequence for cdroms
+ * Version 5.10 added Bus-Mastered DMA support for Triton Chipset
+ * some (mostly) cosmetic changes
+ * Version 5.11 added ht6560b support by malafoss@snakemail.hut.fi
+ * reworked PCI scanning code
+ * added automatic RZ1000 detection/support
+ * added automatic PCI CMD640 detection/support
+ * added option for VLB CMD640 support
+ * tweaked probe to find cdrom on hdb with disks on hda,hdc
+ * Version 5.12 some performance tuning
+ * added message to alert user to bad /dev/hd[cd] entries
+ * OOOPS! fixed bug in atapi reset
+ * driver now forces "serialize" again for all cmd640 chips
+ * noticed REALLY_SLOW_IO had no effect, moved it to ide.c
+ * made do_drive_cmd() into public ide_do_drive_cmd()
+ * Version 5.13 fixed typo ('B'), thanks to houston@boyd.geog.mcgill.ca
+ * fixed ht6560b support
+ * Version 5.13b (sss) fix problem in calling ide_cdrom_setup()
+ * don't bother invalidating nonexistent partitions
+ * Version 5.14 fixes to cmd640 support.. maybe it works now(?)
+ * added & tested full EZ-DRIVE support -- don't use LILO!
+ * don't enable 2nd CMD640 PCI port during init - conflict
+ * Version 5.15 bug fix in init_cmd640_vlb()
+ * bug fix in interrupt sharing code
+ * Version 5.16 ugh.. fix "serialize" support, broken in 5.15
+ * remove "Huh?" from cmd640 code
+ * added qd6580 interface speed select from Colten Edwards
+ * Version 5.17 kludge around bug in BIOS32 on Intel triton motherboards
+ * Version 5.18 new CMD640 code, moved to cmd640.c, #include'd for now
+ * new UMC8672 code, moved to umc8672.c, #include'd for now
+ * disallow turning on DMA when h/w not capable of DMA
+ * Version 5.19 fix potential infinite timeout on resets
+ * extend reset poll into a general purpose polling scheme
+ * add atapi tape drive support from Gadi Oxman
+ * simplify exit from _intr routines -- no IDE_DO_REQUEST
+ * Version 5.20 leave current rq on blkdev request list during I/O
+ * generalized ide_do_drive_cmd() for tape/cdrom driver use
+ * Version 5.21 fix nasty cdrom/tape bug (ide_preempt was messed up)
+ * Version 5.22 fix ide_xlate_1024() to work with/without drive->id
+ * Version 5.23 miscellaneous touch-ups
+ * Version 5.24 fix #if's for SUPPORT_CMD640
+ * Version 5.25 more touch-ups, fix cdrom resets, ...
+ * cmd640.c now configs/compiles separate from ide.c
+ * Version 5.26 keep_settings now maintains the using_dma flag
+ * fix [EZD] remap message to only output at boot time
+ * fix "bad /dev/ entry" message to say hdc, not hdc0
+ * fix ide_xlate_1024() to respect user specified CHS
+ * use CHS from partn table if it looks translated
+ * re-merged flags chipset,vlb_32bit,vlb_sync into io_32bit
+ * keep track of interface chipset type, when known
+ * add generic PIO mode "tuneproc" mechanism
+ * fix cmd640_vlb option
+ * fix ht6560b support (was completely broken)
+ * umc8672.c now configures/compiles separate from ide.c
+ * move dtc2278 support to dtc2278.c
+ * move ht6560b support to ht6560b.c
+ * move qd6580 support to qd6580.c
+ * add ali14xx support in ali14xx.c
+ * Version 5.27 add [no]autotune parameters to help cmd640
+ * move rz1000 support to rz1000.c
+ * Version 5.28 #include "ide_modes.h"
+ * fix disallow_unmask: now per-interface "no_unmask" bit
+ * force io_32bit to be the same on drive pairs of dtc2278
+ * improved IDE tape error handling, and tape DMA support
+ * bugfix in ide_do_drive_cmd() for cdroms + serialize
+ * Version 5.29 fixed non-IDE check for too many physical heads
+ * don't use LBA if capacity is smaller than CHS
+ * Version 5.30 remove real_devices kludge, formerly used by genhd.c
+ * Version 5.32 change "KB" to "kB"
+ * fix serialize (was broken in kernel 1.3.72)
+ * add support for "hdparm -I"
+ * use common code for disk/tape/cdrom IDE_DRIVE_CMDs
+ * add support for Promise DC4030VL caching card
+ * improved serialize support
+ * put partition check back into alphabetical order
+ * add config option for PCMCIA baggage
+ * try to make PCMCIA support safer to use
+ * improve security on ioctls(): all are suser() only
+ * Version 5.33 improve handling of HDIO_DRIVE_CMDs that read data
+ * Version 5.34 fix irq-sharing problem from 5.33
+ * fix cdrom ioctl problem from 5.33
+ * Version 5.35 cosmetic changes
+ * fix cli() problem in try_to_identify()
+ * Version 5.36 fixes to optional PCMCIA support
+ * Version 5.37 don't use DMA when "noautotune" is specified
+ * Version 5.37a (go) fix shared irq probing (was broken in kernel 1.3.72)
+ * call unplug_device() from ide_do_drive_cmd()
+ * Version 5.38 add "hdx=none" option, courtesy of Joel Maslak
+ * mask drive irq after use, if sharing with another hwif
+ * add code to help debug weird cmd640 problems
+ * Version 5.39 fix horrible error in earlier irq sharing "fix"
+ * Version 5.40 fix serialization -- was broken in 5.39
+ * help sharing by masking device irq after probing
+ * Version 5.41 more fixes to irq sharing/serialize detection
+ * disable io_32bit by default on drive reset
+ * Version 5.42 simplify irq-masking after probe
+ * fix NULL pointer deref in save_match()
+ * Version 5.43 Ugh.. unexpected_intr is back: try to exterminate it
+ * Version 5.44 Fix for "irq probe failed" on cmd640
+ * change path on message regarding MAKEDEV.ide
+ * add a throttle to the unexpected_intr() messages
+ * Version 5.45 fix ugly parameter parsing bugs (thanks Derek)
+ * include Gadi's magic fix for cmd640 unexpected_intr
+ * include mc68000 patches from Geert Uytterhoeven
+ * add Gadi's fix for PCMCIA cdroms
+ * Version 5.46 remove the mc68000 #ifdefs for 2.0.x
+ * Version 5.47 fix set_tune race condition
+ * fix bug in earlier PCMCIA cdrom update
+ * Version 5.48 if def'd, invoke CMD640_DUMP_REGS when irq probe fails
+ * lengthen the do_reset1() pulse, for laptops
+ * add idebus=xx parameter for cmd640 and ali chipsets
+ * no_unmask flag now per-drive instead of per-hwif
+ * fix tune_req so that it gets done immediately
+ * fix missing restore_flags() in ide_ioctl
+ * prevent use of io_32bit on cmd640 with no prefetch
+ * Version 5.49 fix minor quirks in probing routines
+ * Version 5.50 allow values as small as 20 for idebus=
+ * Version 5.51 force non io_32bit in drive_cmd_intr()
+ * change delay_10ms() to delay_50ms() to fix problems
+ * Version 5.52 fix incorrect invalidation of removable devices
+ * add "hdx=slow" command line option
+ * Version 5.53 add ATAPI floppy drive support
+ * change default media for type 0 to floppy
+ * add support for Exabyte Nest
+ * add missing set_blocksize() in revalidate_disk()
+ * handle bad status bit sequencing in ide_wait_stat()
+ * support partition table translations with 255 heads
+ * probe all interfaces by default
+ * add probe for the i82371AB chipset
+ * acknowledge media change on removable drives
+ * add work-around for BMI drives
+ * remove "LBA" from boot messages
+ * Version 5.53.1 add UDMA "CRC retry" support
+ * Version 5.53.2 add Promise/33 auto-detection and DMA support
+ * fix MC_ERR handling
+ * fix mis-detection of NEC cdrom as floppy
+ * issue ATAPI reset and re-probe after "no response"
+ *
+ * Some additional driver compile-time options are in ide.h
+ *
+ * To do, in likely order of completion:
+ * - modify kernel to obtain BIOS geometry for drives on 2nd/3rd/4th i/f
+*/
+
+#undef REALLY_SLOW_IO /* most systems can safely undef this */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/major.h>
+#include <linux/blkdev.h>
+#include <linux/errno.h>
+#include <linux/hdreg.h>
+#include <linux/genhd.h>
+#include <linux/malloc.h>
+
+#include <ahci.h>
+
+#include <asm/byteorder.h>
+#include <asm/irq.h>
+#include <asm/segment.h>
+#include <asm/io.h>
+
+#ifdef CONFIG_PCI
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#endif /* CONFIG_PCI */
+
+#include "ide.h"
+#include "ide_modes.h"
+
+#ifdef CONFIG_BLK_DEV_PROMISE
+#include "promise.h"
+#define IS_PROMISE_DRIVE (HWIF(drive)->chipset == ide_promise)
+#else
+#define IS_PROMISE_DRIVE (0) /* auto-NULLs out Promise code */
+#endif /* CONFIG_BLK_DEV_PROMISE */
+
+static const byte ide_hwif_to_major[MAX_HWIFS] = {IDE0_MAJOR, IDE1_MAJOR, IDE2_MAJOR, IDE3_MAJOR};
+static unsigned short default_io_base[MAX_HWIFS] = {0x1f0, 0x170, 0x1e8, 0x168};
+static const byte default_irqs[MAX_HWIFS] = {14, 15, 11, 10};
+static int idebus_parameter; /* holds the "idebus=" parameter */
+static int system_bus_speed; /* holds what we think is VESA/PCI bus speed */
+
+/*
+ * This is declared extern in ide.h, for access by other IDE modules:
+ */
+ide_hwif_t ide_hwifs[MAX_HWIFS]; /* master data repository */
+
+#if (DISK_RECOVERY_TIME > 0)
+/*
+ * For really screwy hardware (hey, at least it *can* be used with Linux)
+ * we can enforce a minimum delay time between successive operations.
+ */
+static unsigned long read_timer(void)
+{
+ unsigned long t, flags;
+ int i;
+
+ save_flags(flags);
+ cli();
+ t = jiffies * 11932;
+ outb_p(0, 0x43);
+ i = inb_p(0x40);
+ i |= inb(0x40) << 8;
+ restore_flags(flags);
+ return (t - i);
+}
+
+static void set_recovery_timer (ide_hwif_t *hwif)
+{
+ hwif->last_time = read_timer();
+}
+#define SET_RECOVERY_TIMER(drive) set_recovery_timer (drive)
+
+#else
+
+#define SET_RECOVERY_TIMER(drive)
+
+#endif /* DISK_RECOVERY_TIME */
+
+/* Called by other drivers to disable the legacy IDE driver on a given IDE base. */
+void ide_disable_base(unsigned base)
+{
+ unsigned i;
+ for (i = 0; i < MAX_HWIFS; i++)
+ if (default_io_base[i] == base)
+ default_io_base[i] = 0;
+}
+
+
+/*
+ * Do not even *think* about calling this!
+ */
+static void init_hwif_data (unsigned int index)
+{
+ byte *p;
+ unsigned int unit;
+ ide_hwif_t *hwif = &ide_hwifs[index];
+
+ /* bulk initialize hwif & drive info with zeros */
+ p = ((byte *) hwif) + sizeof(ide_hwif_t);
+ do {
+ *--p = 0;
+ } while (p > (byte *) hwif);
+
+ /* fill in any non-zero initial values */
+ hwif->index = index;
+ hwif->io_base = default_io_base[index];
+ hwif->irq = default_irqs[index];
+ hwif->ctl_port = hwif->io_base ? hwif->io_base+0x206 : 0x000;
+#ifdef CONFIG_BLK_DEV_HD
+ if (hwif->io_base == HD_DATA)
+ hwif->noprobe = 1; /* may be overridden by ide_setup() */
+#endif /* CONFIG_BLK_DEV_HD */
+ hwif->major = ide_hwif_to_major[index];
+ hwif->name[0] = 'i';
+ hwif->name[1] = 'd';
+ hwif->name[2] = 'e';
+ hwif->name[3] = '0' + index;
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ hwif->tape_drive = NULL;
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+ for (unit = 0; unit < MAX_DRIVES; ++unit) {
+ ide_drive_t *drive = &hwif->drives[unit];
+
+ drive->select.all = (unit<<4)|0xa0;
+ drive->hwif = hwif;
+ drive->ctl = 0x08;
+ drive->ready_stat = READY_STAT;
+ drive->bad_wstat = BAD_W_STAT;
+ drive->special.b.recalibrate = 1;
+ drive->special.b.set_geometry = 1;
+ drive->name[0] = 'h';
+ drive->name[1] = 'd';
+#ifdef MACH
+ drive->name[2] = '0' + (index * MAX_DRIVES) + unit;
+#else
+ drive->name[2] = 'a' + (index * MAX_DRIVES) + unit;
+#endif
+ }
+}
+
+/*
+ * init_ide_data() sets reasonable default values into all fields
+ * of all instances of the hwifs and drives, but only on the first call.
+ * Subsequent calls have no effect (they don't wipe out anything).
+ *
+ * This routine is normally called at driver initialization time,
+ * but may also be called MUCH earlier during kernel "command-line"
+ * parameter processing. As such, we cannot depend on any other parts
+ * of the kernel (such as memory allocation) to be functioning yet.
+ *
+ * This is too bad, as otherwise we could dynamically allocate the
+ * ide_drive_t structs as needed, rather than always consuming memory
+ * for the max possible number (MAX_HWIFS * MAX_DRIVES) of them.
+ */
+#define MAGIC_COOKIE 0x12345678
+static void init_ide_data (void)
+{
+ unsigned int index;
+ static unsigned long magic_cookie = MAGIC_COOKIE;
+
+ if (magic_cookie != MAGIC_COOKIE)
+ return; /* already initialized */
+ magic_cookie = 0;
+
+ for (index = 0; index < MAX_HWIFS; ++index)
+ init_hwif_data(index);
+
+ idebus_parameter = 0;
+ system_bus_speed = 0;
+}
+
+/*
+ * ide_system_bus_speed() returns what we think is the system VESA/PCI
+ * bus speed (in Mhz). This is used for calculating interface PIO timings.
+ * The default is 40 for known PCI systems, 50 otherwise.
+ * The "idebus=xx" parameter can be used to override this value.
+ * The actual value to be used is computed/displayed the first time through.
+ */
+int ide_system_bus_speed (void)
+{
+ if (!system_bus_speed) {
+ if (idebus_parameter)
+ system_bus_speed = idebus_parameter; /* user supplied value */
+#ifdef CONFIG_PCI
+ else if (pcibios_present())
+ system_bus_speed = 40; /* safe default value for PCI */
+#endif /* CONFIG_PCI */
+ else
+ system_bus_speed = 50; /* safe default value for VESA and PCI */
+ printk("ide: Assuming %dMhz system bus speed for PIO modes; override with idebus=xx\n", system_bus_speed);
+ }
+ return system_bus_speed;
+}
+
+#if SUPPORT_VLB_SYNC
+/*
+ * Some localbus EIDE interfaces require a special access sequence
+ * when using 32-bit I/O instructions to transfer data. We call this
+ * the "vlb_sync" sequence, which consists of three successive reads
+ * of the sector count register location, with interrupts disabled
+ * to ensure that the reads all happen together.
+ */
+static inline void do_vlb_sync (unsigned short port) {
+ (void) inb (port);
+ (void) inb (port);
+ (void) inb (port);
+}
+#endif /* SUPPORT_VLB_SYNC */
+
+/*
+ * This is used for most PIO data transfers *from* the IDE interface
+ */
+void ide_input_data (ide_drive_t *drive, void *buffer, unsigned int wcount)
+{
+ unsigned short io_base = HWIF(drive)->io_base;
+ unsigned short data_reg = io_base+IDE_DATA_OFFSET;
+ byte io_32bit = drive->io_32bit;
+
+ if (io_32bit) {
+#if SUPPORT_VLB_SYNC
+ if (io_32bit & 2) {
+ cli();
+ do_vlb_sync(io_base+IDE_NSECTOR_OFFSET);
+ insl(data_reg, buffer, wcount);
+ if (drive->unmask)
+ sti();
+ } else
+#endif /* SUPPORT_VLB_SYNC */
+ insl(data_reg, buffer, wcount);
+ } else {
+#if SUPPORT_SLOW_DATA_PORTS
+ if (drive->slow) {
+ unsigned short *ptr = (unsigned short *) buffer;
+ while (wcount--) {
+ *ptr++ = inw_p(data_reg);
+ *ptr++ = inw_p(data_reg);
+ }
+ } else
+#endif /* SUPPORT_SLOW_DATA_PORTS */
+ insw(data_reg, buffer, wcount<<1);
+ }
+}
+
+/*
+ * This is used for most PIO data transfers *to* the IDE interface
+ */
+void ide_output_data (ide_drive_t *drive, void *buffer, unsigned int wcount)
+{
+ unsigned short io_base = HWIF(drive)->io_base;
+ unsigned short data_reg = io_base+IDE_DATA_OFFSET;
+ byte io_32bit = drive->io_32bit;
+
+ if (io_32bit) {
+#if SUPPORT_VLB_SYNC
+ if (io_32bit & 2) {
+ cli();
+ do_vlb_sync(io_base+IDE_NSECTOR_OFFSET);
+ outsl(data_reg, buffer, wcount);
+ if (drive->unmask)
+ sti();
+ } else
+#endif /* SUPPORT_VLB_SYNC */
+ outsl(data_reg, buffer, wcount);
+ } else {
+#if SUPPORT_SLOW_DATA_PORTS
+ if (drive->slow) {
+ unsigned short *ptr = (unsigned short *) buffer;
+ while (wcount--) {
+ outw_p(*ptr++, data_reg);
+ outw_p(*ptr++, data_reg);
+ }
+ } else
+#endif /* SUPPORT_SLOW_DATA_PORTS */
+ outsw(data_reg, buffer, wcount<<1);
+ }
+}
+
+/*
+ * The following routines are mainly used by the ATAPI drivers.
+ *
+ * These routines will round up any request for an odd number of bytes,
+ * so if an odd bytecount is specified, be sure that there's at least one
+ * extra byte allocated for the buffer.
+ */
+void atapi_input_bytes (ide_drive_t *drive, void *buffer, unsigned int bytecount)
+{
+ ++bytecount;
+ ide_input_data (drive, buffer, bytecount / 4);
+ if ((bytecount & 0x03) >= 2)
+ insw (IDE_DATA_REG, ((byte *)buffer) + (bytecount & ~0x03), 1);
+}
+
+void atapi_output_bytes (ide_drive_t *drive, void *buffer, unsigned int bytecount)
+{
+ ++bytecount;
+ ide_output_data (drive, buffer, bytecount / 4);
+ if ((bytecount & 0x03) >= 2)
+ outsw (IDE_DATA_REG, ((byte *)buffer) + (bytecount & ~0x03), 1);
+}
+
+/*
+ * This should get invoked any time we exit the driver to
+ * wait for an interrupt response from a drive. handler() points
+ * at the appropriate code to handle the next interrupt, and a
+ * timer is started to prevent us from waiting forever in case
+ * something goes wrong (see the timer_expiry() handler later on).
+ */
+void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler, unsigned int timeout)
+{
+ ide_hwgroup_t *hwgroup = HWGROUP(drive);
+#ifdef DEBUG
+ if (hwgroup->handler != NULL) {
+ printk("%s: ide_set_handler: handler not null; old=%p, new=%p\n",
+ drive->name, hwgroup->handler, handler);
+ }
+#endif
+ hwgroup->handler = handler;
+ hwgroup->timer.expires = jiffies + timeout;
+ add_timer(&(hwgroup->timer));
+}
+
+/*
+ * lba_capacity_is_ok() performs a sanity check on the claimed "lba_capacity"
+ * value for this drive (from its reported identification information).
+ *
+ * Returns: 1 if lba_capacity looks sensible
+ * 0 otherwise
+ *
+ * Note: we must not change id->cyls here, otherwise a second call
+ * of this routine might no longer find lba_capacity ok.
+ */
+static int lba_capacity_is_ok (struct hd_driveid *id)
+{
+ unsigned long lba_sects = id->lba_capacity;
+ unsigned long chs_sects = id->cyls * id->heads * id->sectors;
+ unsigned long _10_percent = chs_sects / 10;
+
+ /*
+ * The ATA spec tells large drives to return
+ * C/H/S = 16383/16/63 independent of their size.
+ * Some drives can be jumpered to use 15 heads instead of 16.
+ */
+ if (id->cyls == 16383 && id->sectors == 63 &&
+ (id->heads == 15 || id->heads == 16) &&
+ id->lba_capacity >= 16383*63*id->heads)
+ return 1; /* lba_capacity is our only option */
+
+ /* perform a rough sanity check on lba_sects: within 10% is "okay" */
+ if ((lba_sects - chs_sects) < _10_percent)
+ return 1; /* lba_capacity is good */
+
+ /* some drives have the word order reversed */
+ lba_sects = (lba_sects << 16) | (lba_sects >> 16);
+ if ((lba_sects - chs_sects) < _10_percent) {
+ id->lba_capacity = lba_sects; /* fix it */
+ return 1; /* lba_capacity is (now) good */
+ }
+ return 0; /* lba_capacity value is bad */
+}
+
+/*
+ * current_capacity() returns the capacity (in sectors) of a drive
+ * according to its current geometry/LBA settings.
+ *
+ * It also sets select.b.lba.
+ */
+static unsigned long current_capacity (ide_drive_t *drive)
+{
+ struct hd_driveid *id = drive->id;
+ unsigned long capacity;
+
+ if (!drive->present)
+ return 0;
+#ifdef CONFIG_BLK_DEV_IDEFLOPPY
+ if (drive->media == ide_floppy)
+ return idefloppy_capacity(drive);
+#endif /* CONFIG_BLK_DEV_IDEFLOPPY */
+ if (drive->media != ide_disk)
+ return 0x7fffffff; /* cdrom or tape */
+
+ drive->select.b.lba = 0;
+ /* Determine capacity, and use LBA if the drive properly supports it */
+ capacity = drive->cyl * drive->head * drive->sect;
+ if (id != NULL && (id->capability & 2) && lba_capacity_is_ok(id)) {
+ if (id->lba_capacity >= capacity) {
+ capacity = id->lba_capacity;
+ drive->select.b.lba = 1;
+ }
+ }
+ return (capacity - drive->sect0);
+}
+
+/*
+ * ide_geninit() is called exactly *once* for each major, from genhd.c,
+ * at the beginning of the initial partition check for the drives.
+ */
+static void ide_geninit (struct gendisk *gd)
+{
+ unsigned int unit;
+ ide_hwif_t *hwif = gd->real_devices;
+
+ for (unit = 0; unit < gd->nr_real; ++unit) {
+ ide_drive_t *drive = &hwif->drives[unit];
+#ifdef CONFIG_BLK_DEV_IDECD
+ if (drive->present && drive->media == ide_cdrom)
+ ide_cdrom_setup(drive);
+#endif /* CONFIG_BLK_DEV_IDECD */
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ if (drive->present && drive->media == ide_tape)
+ idetape_setup(drive);
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+#ifdef CONFIG_BLK_DEV_IDEFLOPPY
+ if (drive->present && drive->media == ide_floppy)
+ idefloppy_setup(drive);
+#endif /* CONFIG_BLK_DEV_IDEFLOPPY */
+ drive->part[0].nr_sects = current_capacity(drive);
+ if (!drive->present || (drive->media != ide_disk && drive->media != ide_floppy) ||
+ !drive->part[0].nr_sects) {
+ drive->part[0].start_sect = -1; /* skip partition check */
+ }
+ }
+}
+
+/*
+ * init_gendisk() (as opposed to ide_geninit) is called for each major device,
+ * after probing for drives, to allocate partition tables and other data
+ * structures needed for the routines in genhd.c. ide_geninit() gets called
+ * somewhat later, during the partition check.
+ */
+static void init_gendisk (ide_hwif_t *hwif)
+{
+ struct gendisk *gd, **gdp;
+ unsigned int unit, units, minors;
+ int *bs;
+
+ /* figure out maximum drive number on the interface */
+ for (units = MAX_DRIVES; units > 0; --units) {
+ if (hwif->drives[units-1].present)
+ break;
+ }
+ minors = units * (1<<PARTN_BITS);
+ gd = kmalloc (sizeof(struct gendisk), GFP_KERNEL);
+ gd->sizes = kmalloc (minors * sizeof(int), GFP_KERNEL);
+ gd->part = kmalloc (minors * sizeof(struct hd_struct), GFP_KERNEL);
+ bs = kmalloc (minors*sizeof(int), GFP_KERNEL);
+
+ memset(gd->part, 0, minors * sizeof(struct hd_struct));
+
+ /* cdroms and msdos f/s are examples of non-1024 blocksizes */
+ blksize_size[hwif->major] = bs;
+ for (unit = 0; unit < minors; ++unit)
+ *bs++ = BLOCK_SIZE;
+
+ for (unit = 0; unit < units; ++unit)
+ hwif->drives[unit].part = &gd->part[unit << PARTN_BITS];
+
+ gd->major = hwif->major; /* our major device number */
+ gd->major_name = IDE_MAJOR_NAME; /* treated special in genhd.c */
+ gd->minor_shift = PARTN_BITS; /* num bits for partitions */
+ gd->max_p = 1<<PARTN_BITS; /* 1 + max partitions / drive */
+ gd->max_nr = units; /* max num real drives */
+ gd->nr_real = units; /* current num real drives */
+ gd->init = ide_geninit; /* initialization function */
+ gd->real_devices= hwif; /* ptr to internal data */
+ gd->next = NULL; /* linked list of major devs */
+
+ for (gdp = &gendisk_head; *gdp; gdp = &((*gdp)->next)) ;
+ hwif->gd = *gdp = gd; /* link onto tail of list */
+}
+
+static void do_reset1 (ide_drive_t *, int); /* needed below */
+
+#ifdef CONFIG_BLK_DEV_IDEATAPI
+/*
+ * atapi_reset_pollfunc() gets invoked to poll the interface for completion every 50ms
+ * during an atapi drive reset operation. If the drive has not yet responded,
+ * and we have not yet hit our maximum waiting time, then the timer is restarted
+ * for another 50ms.
+ */
+static void atapi_reset_pollfunc (ide_drive_t *drive)
+{
+ ide_hwgroup_t *hwgroup = HWGROUP(drive);
+ byte stat;
+
+ OUT_BYTE (drive->select.all, IDE_SELECT_REG);
+ udelay (10);
+
+ if (OK_STAT(stat=GET_STAT(), 0, BUSY_STAT)) {
+ printk("%s: ATAPI reset complete\n", drive->name);
+ } else {
+ if (jiffies < hwgroup->poll_timeout) {
+ ide_set_handler (drive, &atapi_reset_pollfunc, HZ/20);
+ return; /* continue polling */
+ }
+ hwgroup->poll_timeout = 0; /* end of polling */
+ printk("%s: ATAPI reset timed-out, status=0x%02x\n", drive->name, stat);
+ do_reset1 (drive, 1); /* do it the old fashioned way */
+ return;
+ }
+ hwgroup->poll_timeout = 0; /* done polling */
+}
+#endif /* CONFIG_BLK_DEV_IDEATAPI */
+
+/*
+ * reset_pollfunc() gets invoked to poll the interface for completion every 50ms
+ * during an ide reset operation. If the drives have not yet responded,
+ * and we have not yet hit our maximum waiting time, then the timer is restarted
+ * for another 50ms.
+ */
+static void reset_pollfunc (ide_drive_t *drive)
+{
+ ide_hwgroup_t *hwgroup = HWGROUP(drive);
+ ide_hwif_t *hwif = HWIF(drive);
+ byte tmp;
+
+ if (!OK_STAT(tmp=GET_STAT(), 0, BUSY_STAT)) {
+ if (jiffies < hwgroup->poll_timeout) {
+ ide_set_handler (drive, &reset_pollfunc, HZ/20);
+ return; /* continue polling */
+ }
+ printk("%s: reset timed-out, status=0x%02x\n", hwif->name, tmp);
+ } else {
+ printk("%s: reset: ", hwif->name);
+ if ((tmp = GET_ERR()) == 1)
+ printk("success\n");
+ else {
+#if FANCY_STATUS_DUMPS
+ printk("master: ");
+ switch (tmp & 0x7f) {
+ case 1: printk("passed");
+ break;
+ case 2: printk("formatter device error");
+ break;
+ case 3: printk("sector buffer error");
+ break;
+ case 4: printk("ECC circuitry error");
+ break;
+ case 5: printk("controlling MPU error");
+ break;
+ default:printk("error (0x%02x?)", tmp);
+ }
+ if (tmp & 0x80)
+ printk("; slave: failed");
+ printk("\n");
+#else
+ printk("failed\n");
+#endif /* FANCY_STATUS_DUMPS */
+ }
+ }
+ hwgroup->poll_timeout = 0; /* done polling */
+}
+
+/*
+ * do_reset1() attempts to recover a confused drive by resetting it.
+ * Unfortunately, resetting a disk drive actually resets all devices on
+ * the same interface, so it can really be thought of as resetting the
+ * interface rather than resetting the drive.
+ *
+ * ATAPI devices have their own reset mechanism which allows them to be
+ * individually reset without clobbering other devices on the same interface.
+ *
+ * Unfortunately, the IDE interface does not generate an interrupt to let
+ * us know when the reset operation has finished, so we must poll for this.
+ * Equally poor, though, is the fact that this may a very long time to complete,
+ * (up to 30 seconds worstcase). So, instead of busy-waiting here for it,
+ * we set a timer to poll at 50ms intervals.
+ */
+static void do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
+{
+ unsigned int unit;
+ unsigned long flags;
+ ide_hwif_t *hwif = HWIF(drive);
+ ide_hwgroup_t *hwgroup = HWGROUP(drive);
+
+ save_flags(flags);
+ cli(); /* Why ? */
+
+#ifdef CONFIG_BLK_DEV_IDEATAPI
+ /* For an ATAPI device, first try an ATAPI SRST. */
+ if (drive->media != ide_disk) {
+ if (!do_not_try_atapi) {
+ if (!drive->keep_settings) {
+ drive->unmask = 0;
+ drive->io_32bit = 0;
+ }
+ OUT_BYTE (drive->select.all, IDE_SELECT_REG);
+ udelay (20);
+ OUT_BYTE (WIN_SRST, IDE_COMMAND_REG);
+ hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
+ ide_set_handler (drive, &atapi_reset_pollfunc, HZ/20);
+ restore_flags (flags);
+ return;
+ }
+ }
+#endif /* CONFIG_BLK_DEV_IDEATAPI */
+
+ /*
+ * First, reset any device state data we were maintaining
+ * for any of the drives on this interface.
+ */
+ for (unit = 0; unit < MAX_DRIVES; ++unit) {
+ ide_drive_t *rdrive = &hwif->drives[unit];
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ if (rdrive->media == ide_tape)
+ rdrive->tape.reset_issued = 1;
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+ rdrive->special.all = 0;
+ rdrive->special.b.set_geometry = 1;
+ rdrive->special.b.recalibrate = 1;
+ if (OK_TO_RESET_CONTROLLER)
+ rdrive->mult_count = 0;
+ if (!rdrive->keep_settings) {
+ rdrive->mult_req = 0;
+ rdrive->unmask = 0;
+ rdrive->io_32bit = 0;
+ if (rdrive->using_dma) {
+ rdrive->using_dma = 0;
+ printk("%s: disabled DMA\n", rdrive->name);
+ }
+ }
+ if (rdrive->mult_req != rdrive->mult_count)
+ rdrive->special.b.set_multmode = 1;
+ }
+
+#if OK_TO_RESET_CONTROLLER
+ /*
+ * Note that we also set nIEN while resetting the device,
+ * to mask unwanted interrupts from the interface during the reset.
+ * However, due to the design of PC hardware, this will cause an
+ * immediate interrupt due to the edge transition it produces.
+ * This single interrupt gives us a "fast poll" for drives that
+ * recover from reset very quickly, saving us the first 50ms wait time.
+ */
+ OUT_BYTE(drive->ctl|6,IDE_CONTROL_REG); /* set SRST and nIEN */
+ udelay(10); /* more than enough time */
+ OUT_BYTE(drive->ctl|2,IDE_CONTROL_REG); /* clear SRST, leave nIEN */
+ udelay(10); /* more than enough time */
+ hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
+ ide_set_handler (drive, &reset_pollfunc, HZ/20);
+#endif /* OK_TO_RESET_CONTROLLER */
+
+ restore_flags (flags);
+}
+
+/*
+ * ide_do_reset() is the entry point to the drive/interface reset code.
+ */
+void ide_do_reset (ide_drive_t *drive)
+{
+ do_reset1 (drive, 0);
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ if (drive->media == ide_tape)
+ drive->tape.reset_issued=1;
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+}
+
+/*
+ * Clean up after success/failure of an explicit drive cmd
+ */
+void ide_end_drive_cmd (ide_drive_t *drive, byte stat, byte err)
+{
+ unsigned long flags;
+ struct request *rq = HWGROUP(drive)->rq;
+
+ if (rq->cmd == IDE_DRIVE_CMD) {
+ byte *args = (byte *) rq->buffer;
+ rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
+ if (args) {
+ args[0] = stat;
+ args[1] = err;
+ args[2] = IN_BYTE(IDE_NSECTOR_REG);
+ }
+ }
+ save_flags(flags);
+ cli();
+ blk_dev[MAJOR(rq->rq_dev)].current_request = rq->next;
+ HWGROUP(drive)->rq = NULL;
+ rq->rq_status = RQ_INACTIVE;
+ if (rq->sem != NULL)
+ up(rq->sem);
+ restore_flags(flags);
+}
+
+/*
+ * Error reporting, in human readable form (luxurious, but a memory hog).
+ */
+byte ide_dump_status (ide_drive_t *drive, const char *msg, byte stat)
+{
+ unsigned long flags;
+ byte err = 0;
+
+ save_flags (flags);
+ sti();
+ printk("%s: %s: status=0x%02x", drive->name, msg, stat);
+#if FANCY_STATUS_DUMPS
+ if (drive->media == ide_disk) {
+ printk(" { ");
+ if (stat & BUSY_STAT)
+ printk("Busy ");
+ else {
+ if (stat & READY_STAT) printk("DriveReady ");
+ if (stat & WRERR_STAT) printk("DeviceFault ");
+ if (stat & SEEK_STAT) printk("SeekComplete ");
+ if (stat & DRQ_STAT) printk("DataRequest ");
+ if (stat & ECC_STAT) printk("CorrectedError ");
+ if (stat & INDEX_STAT) printk("Index ");
+ if (stat & ERR_STAT) printk("Error ");
+ }
+ printk("}");
+ }
+#endif /* FANCY_STATUS_DUMPS */
+ printk("\n");
+ if ((stat & (BUSY_STAT|ERR_STAT)) == ERR_STAT) {
+ err = GET_ERR();
+ printk("%s: %s: error=0x%02x", drive->name, msg, err);
+#if FANCY_STATUS_DUMPS
+ if (drive->media == ide_disk) {
+ printk(" { ");
+ if (err & ICRC_ERR) printk((err & ABRT_ERR) ? "BadCRC " : "BadSector ");
+ if (err & ECC_ERR) printk("UncorrectableError ");
+ if (err & ID_ERR) printk("SectorIdNotFound ");
+ if (err & ABRT_ERR) printk("DriveStatusError ");
+ if (err & TRK0_ERR) printk("TrackZeroNotFound ");
+ if (err & MARK_ERR) printk("AddrMarkNotFound ");
+ printk("}");
+ if (err & (BBD_ERR|ECC_ERR|ID_ERR|MARK_ERR)) {
+ byte cur = IN_BYTE(IDE_SELECT_REG);
+ if (cur & 0x40) { /* using LBA? */
+ printk(", LBAsect=%ld", (unsigned long)
+ ((cur&0xf)<<24)
+ |(IN_BYTE(IDE_HCYL_REG)<<16)
+ |(IN_BYTE(IDE_LCYL_REG)<<8)
+ | IN_BYTE(IDE_SECTOR_REG));
+ } else {
+ printk(", CHS=%d/%d/%d",
+ (IN_BYTE(IDE_HCYL_REG)<<8) +
+ IN_BYTE(IDE_LCYL_REG),
+ cur & 0xf,
+ IN_BYTE(IDE_SECTOR_REG));
+ }
+ if (HWGROUP(drive)->rq)
+ printk(", sector=%ld", HWGROUP(drive)->rq->sector);
+ }
+ }
+#endif /* FANCY_STATUS_DUMPS */
+ printk("\n");
+ }
+ restore_flags (flags);
+ return err;
+}
+
+/*
+ * try_to_flush_leftover_data() is invoked in response to a drive
+ * unexpectedly having its DRQ_STAT bit set. As an alternative to
+ * resetting the drive, this routine tries to clear the condition
+ * by read a sector's worth of data from the drive. Of course,
+ * this may not help if the drive is *waiting* for data from *us*.
+ */
+static void try_to_flush_leftover_data (ide_drive_t *drive)
+{
+ int i = (drive->mult_count ? drive->mult_count : 1) * SECTOR_WORDS;
+
+ while (i > 0) {
+ unsigned long buffer[16];
+ unsigned int wcount = (i > 16) ? 16 : i;
+ i -= wcount;
+ ide_input_data (drive, buffer, wcount);
+ }
+}
+
+/*
+ * ide_error() takes action based on the error returned by the controller.
+ */
+void ide_error (ide_drive_t *drive, const char *msg, byte stat)
+{
+ struct request *rq;
+ byte err;
+
+ err = ide_dump_status(drive, msg, stat);
+ if ((rq = HWGROUP(drive)->rq) == NULL || drive == NULL)
+ return;
+ /* retry only "normal" I/O: */
+ if (rq->cmd == IDE_DRIVE_CMD) {
+ rq->errors = 1;
+ ide_end_drive_cmd(drive, stat, err);
+ return;
+ }
+ if (stat & BUSY_STAT) { /* other bits are useless when BUSY */
+ rq->errors |= ERROR_RESET;
+ } else {
+ if (drive->media == ide_disk && (stat & ERR_STAT)) {
+ /* err has different meaning on cdrom and tape */
+ if (err == ABRT_ERR) {
+ if (drive->select.b.lba && IN_BYTE(IDE_COMMAND_REG) == WIN_SPECIFY)
+ return; /* some newer drives don't support WIN_SPECIFY */
+ } else if ((err & (ABRT_ERR | ICRC_ERR)) == (ABRT_ERR | ICRC_ERR))
+ ; /* UDMA crc error -- just retry the operation */
+ else if (err & (BBD_ERR | ECC_ERR)) /* retries won't help these */
+ rq->errors = ERROR_MAX;
+ else if (err & TRK0_ERR) /* help it find track zero */
+ rq->errors |= ERROR_RECAL;
+ else if (err & MC_ERR)
+ drive->special.b.mc = 1;
+ }
+ if ((stat & DRQ_STAT) && rq->cmd != WRITE)
+ try_to_flush_leftover_data(drive);
+ }
+ if (GET_STAT() & (BUSY_STAT|DRQ_STAT))
+ rq->errors |= ERROR_RESET; /* Mmmm.. timing problem */
+
+ if (rq->errors >= ERROR_MAX) {
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ if (drive->media == ide_tape) {
+ rq->errors = 0;
+ idetape_end_request(0, HWGROUP(drive));
+ } else
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+#ifdef CONFIG_BLK_DEV_IDEFLOPPY
+ if (drive->media == ide_floppy) {
+ rq->errors = 0;
+ idefloppy_end_request(0, HWGROUP(drive));
+ } else
+#endif /* CONFIG_BLK_DEV_IDEFLOPPY */
+#ifdef CONFIG_BLK_DEV_IDESCSI
+ if (drive->media == ide_scsi) {
+ rq->errors = 0;
+ idescsi_end_request(0, HWGROUP(drive));
+ } else
+#endif /* CONFIG_BLK_DEV_IDESCSI */
+ ide_end_request(0, HWGROUP(drive));
+ }
+ else {
+ if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
+ ++rq->errors;
+ ide_do_reset(drive);
+ return;
+ } else if ((rq->errors & ERROR_RECAL) == ERROR_RECAL)
+ drive->special.b.recalibrate = 1;
+ ++rq->errors;
+ }
+}
+
+/*
+ * read_intr() is the handler for disk read/multread interrupts
+ */
+static void read_intr (ide_drive_t *drive)
+{
+ byte stat;
+ int i;
+ unsigned int msect, nsect;
+ struct request *rq;
+
+ if (!OK_STAT(stat=GET_STAT(),DATA_READY,BAD_R_STAT)) {
+ ide_error(drive, "read_intr", stat);
+ return;
+ }
+ msect = drive->mult_count;
+read_next:
+ rq = HWGROUP(drive)->rq;
+ if (msect) {
+ if ((nsect = rq->current_nr_sectors) > msect)
+ nsect = msect;
+ msect -= nsect;
+ } else
+ nsect = 1;
+ i = rq->nr_sectors - nsect;
+ if (i > 0 && !msect)
+ ide_set_handler (drive, &read_intr, WAIT_CMD);
+ ide_input_data(drive, rq->buffer, nsect * SECTOR_WORDS);
+#ifdef DEBUG
+ printk("%s: read: sectors(%ld-%ld), buffer=0x%08lx, remaining=%ld\n",
+ drive->name, rq->sector, rq->sector+nsect-1,
+ (unsigned long) rq->buffer+(nsect<<9), rq->nr_sectors-nsect);
+#endif
+ rq->sector += nsect;
+ rq->buffer += nsect<<9;
+ rq->errors = 0;
+ rq->nr_sectors = i;
+ if ((rq->current_nr_sectors -= nsect) <= 0)
+ ide_end_request(1, HWGROUP(drive));
+ if (i > 0 && msect)
+ goto read_next;
+}
+
+/*
+ * write_intr() is the handler for disk write interrupts
+ */
+static void write_intr (ide_drive_t *drive)
+{
+ byte stat;
+ int i;
+ ide_hwgroup_t *hwgroup = HWGROUP(drive);
+ struct request *rq = hwgroup->rq;
+
+ if (OK_STAT(stat=GET_STAT(),DRIVE_READY,drive->bad_wstat)) {
+#ifdef DEBUG
+ printk("%s: write: sector %ld, buffer=0x%08lx, remaining=%ld\n",
+ drive->name, rq->sector, (unsigned long) rq->buffer,
+ rq->nr_sectors-1);
+#endif
+ if ((rq->nr_sectors == 1) ^ ((stat & DRQ_STAT) != 0)) {
+ rq->sector++;
+ rq->buffer += 512;
+ rq->errors = 0;
+ i = --rq->nr_sectors;
+ --rq->current_nr_sectors;
+ if (rq->current_nr_sectors <= 0)
+ ide_end_request(1, hwgroup);
+ if (i > 0) {
+ ide_set_handler (drive, &write_intr, WAIT_CMD);
+ ide_output_data (drive, rq->buffer, SECTOR_WORDS);
+ }
+ return;
+ }
+ }
+ ide_error(drive, "write_intr", stat);
+}
+
+/*
+ * ide_multwrite() transfers a block of up to mcount sectors of data
+ * to a drive as part of a disk multiple-sector write operation.
+ */
+void ide_multwrite (ide_drive_t *drive, unsigned int mcount)
+{
+ struct request *rq = &HWGROUP(drive)->wrq;
+
+ do {
+ unsigned int nsect = rq->current_nr_sectors;
+ if (nsect > mcount)
+ nsect = mcount;
+ mcount -= nsect;
+
+ ide_output_data(drive, rq->buffer, nsect<<7);
+#ifdef DEBUG
+ printk("%s: multwrite: sector %ld, buffer=0x%08lx, count=%d, remaining=%ld\n",
+ drive->name, rq->sector, (unsigned long) rq->buffer,
+ nsect, rq->nr_sectors - nsect);
+#endif
+ if ((rq->nr_sectors -= nsect) <= 0)
+ break;
+ if ((rq->current_nr_sectors -= nsect) == 0) {
+ if ((rq->bh = rq->bh->b_reqnext) != NULL) {
+ rq->current_nr_sectors = rq->bh->b_size>>9;
+ rq->buffer = rq->bh->b_data;
+ } else {
+ panic("%s: buffer list corrupted\n", drive->name);
+ break;
+ }
+ } else {
+ rq->buffer += nsect << 9;
+ }
+ } while (mcount);
+}
+
+/*
+ * multwrite_intr() is the handler for disk multwrite interrupts
+ */
+static void multwrite_intr (ide_drive_t *drive)
+{
+ byte stat;
+ int i;
+ ide_hwgroup_t *hwgroup = HWGROUP(drive);
+ struct request *rq = &hwgroup->wrq;
+
+ if (OK_STAT(stat=GET_STAT(),DRIVE_READY,drive->bad_wstat)) {
+ if (stat & DRQ_STAT) {
+ if (rq->nr_sectors) {
+ ide_set_handler (drive, &multwrite_intr, WAIT_CMD);
+ ide_multwrite(drive, drive->mult_count);
+ return;
+ }
+ } else {
+ if (!rq->nr_sectors) { /* all done? */
+ rq = hwgroup->rq;
+ for (i = rq->nr_sectors; i > 0;){
+ i -= rq->current_nr_sectors;
+ ide_end_request(1, hwgroup);
+ }
+ return;
+ }
+ }
+ }
+ ide_error(drive, "multwrite_intr", stat);
+}
+
+/*
+ * Issue a simple drive command
+ * The drive must be selected beforehand.
+ */
+static void ide_cmd(ide_drive_t *drive, byte cmd, byte nsect, ide_handler_t *handler)
+{
+ ide_set_handler (drive, handler, WAIT_CMD);
+ OUT_BYTE(drive->ctl,IDE_CONTROL_REG);
+ OUT_BYTE(nsect,IDE_NSECTOR_REG);
+ OUT_BYTE(cmd,IDE_COMMAND_REG);
+}
+
+/*
+ * set_multmode_intr() is invoked on completion of a WIN_SETMULT cmd.
+ */
+static void set_multmode_intr (ide_drive_t *drive)
+{
+ byte stat = GET_STAT();
+
+ sti();
+ if (OK_STAT(stat,READY_STAT,BAD_STAT)) {
+ drive->mult_count = drive->mult_req;
+ } else {
+ drive->mult_req = drive->mult_count = 0;
+ drive->special.b.recalibrate = 1;
+ (void) ide_dump_status(drive, "set_multmode", stat);
+ }
+}
+
+/*
+ * set_geometry_intr() is invoked on completion of a WIN_SPECIFY cmd.
+ */
+static void set_geometry_intr (ide_drive_t *drive)
+{
+ byte stat = GET_STAT();
+
+ sti();
+ if (!OK_STAT(stat,READY_STAT,BAD_STAT))
+ ide_error(drive, "set_geometry_intr", stat);
+}
+
+/*
+ * recal_intr() is invoked on completion of a WIN_RESTORE (recalibrate) cmd.
+ */
+static void recal_intr (ide_drive_t *drive)
+{
+ byte stat = GET_STAT();
+
+ sti();
+ if (!OK_STAT(stat,READY_STAT,BAD_STAT))
+ ide_error(drive, "recal_intr", stat);
+}
+
+/*
+ * mc_intr() is invoked on completion of a WIN_ACKMC cmd.
+ */
+static void mc_intr (ide_drive_t *drive)
+{
+ byte stat = GET_STAT();
+
+ sti();
+ if (!OK_STAT(stat,READY_STAT,BAD_STAT))
+ ide_error(drive, "mc_intr", stat);
+ drive->special.b.mc = 0;
+}
+
+/*
+ * drive_cmd_intr() is invoked on completion of a special DRIVE_CMD.
+ */
+static void drive_cmd_intr (ide_drive_t *drive)
+{
+ struct request *rq = HWGROUP(drive)->rq;
+ byte *args = (byte *) rq->buffer;
+ byte stat = GET_STAT();
+
+ sti();
+ if ((stat & DRQ_STAT) && args && args[3]) {
+ byte io_32bit = drive->io_32bit;
+ drive->io_32bit = 0;
+ ide_input_data(drive, &args[4], args[3] * SECTOR_WORDS);
+ drive->io_32bit = io_32bit;
+ stat = GET_STAT();
+ }
+ if (OK_STAT(stat,READY_STAT,BAD_STAT))
+ ide_end_drive_cmd (drive, stat, GET_ERR());
+ else
+ ide_error(drive, "drive_cmd", stat); /* calls ide_end_drive_cmd */
+}
+
+/*
+ * do_special() is used to issue WIN_SPECIFY, WIN_RESTORE, and WIN_SETMULT
+ * commands to a drive. It used to do much more, but has been scaled back.
+ */
+static inline void do_special (ide_drive_t *drive)
+{
+ special_t *s = &drive->special;
+
+#ifdef DEBUG
+ printk("%s: do_special: 0x%02x\n", drive->name, s->all);
+#endif
+ if (s->b.set_geometry) {
+ s->b.set_geometry = 0;
+ if (drive->media == ide_disk && !drive->no_geom) {
+ OUT_BYTE(drive->sect,IDE_SECTOR_REG);
+ OUT_BYTE(drive->cyl,IDE_LCYL_REG);
+ OUT_BYTE(drive->cyl>>8,IDE_HCYL_REG);
+ OUT_BYTE(((drive->head-1)|drive->select.all)&0xBF,IDE_SELECT_REG);
+ if (!IS_PROMISE_DRIVE)
+ ide_cmd(drive, WIN_SPECIFY, drive->sect, &set_geometry_intr);
+ }
+ } else if (s->b.recalibrate) {
+ s->b.recalibrate = 0;
+ if (drive->media == ide_disk && !IS_PROMISE_DRIVE)
+ ide_cmd(drive, WIN_RESTORE, drive->sect, &recal_intr);
+ } else if (s->b.set_tune) {
+ ide_tuneproc_t *tuneproc = HWIF(drive)->tuneproc;
+ s->b.set_tune = 0;
+ if (tuneproc != NULL)
+ tuneproc(drive, drive->tune_req);
+ } else if (s->b.set_multmode) {
+ s->b.set_multmode = 0;
+ if (drive->media == ide_disk) {
+ if (drive->id && drive->mult_req > drive->id->max_multsect)
+ drive->mult_req = drive->id->max_multsect;
+ if (!IS_PROMISE_DRIVE)
+ ide_cmd(drive, WIN_SETMULT, drive->mult_req, &set_multmode_intr);
+ } else
+ drive->mult_req = 0;
+ } else if (s->b.mc) {
+ s->b.mc = 0;
+ if (drive->media == ide_disk && !IS_PROMISE_DRIVE)
+ ide_cmd(drive, WIN_ACKMC, drive->sect, &mc_intr);
+ } else if (s->all) {
+ int special = s->all;
+ s->all = 0;
+ printk("%s: bad special flag: 0x%02x\n", drive->name, special);
+ }
+}
+
+/*
+ * This routine busy-waits for the drive status to be not "busy".
+ * It then checks the status for all of the "good" bits and none
+ * of the "bad" bits, and if all is okay it returns 0. All other
+ * cases return 1 after invoking ide_error() -- caller should just return.
+ *
+ * This routine should get fixed to not hog the cpu during extra long waits..
+ * That could be done by busy-waiting for the first jiffy or two, and then
+ * setting a timer to wake up at half second intervals thereafter,
+ * until timeout is achieved, before timing out.
+ */
+int ide_wait_stat (ide_drive_t *drive, byte good, byte bad, unsigned long timeout)
+{
+ byte stat;
+ unsigned long flags;
+
+ udelay(1); /* spec allows drive 400ns to assert "BUSY" */
+ if ((stat = GET_STAT()) & BUSY_STAT) {
+ save_flags(flags);
+ sti();
+ timeout += jiffies;
+ while ((stat = GET_STAT()) & BUSY_STAT) {
+ if (jiffies > timeout) {
+ restore_flags(flags);
+ ide_error(drive, "status timeout", stat);
+ return 1;
+ }
+ }
+ restore_flags(flags);
+ }
+ udelay(1); /* allow status to settle, then read it again */
+ if (OK_STAT((stat = GET_STAT()), good, bad))
+ return 0;
+ ide_error(drive, "status error", stat);
+ return 1;
+}
+
+/*
+ * do_rw_disk() issues READ and WRITE commands to a disk,
+ * using LBA if supported, or CHS otherwise, to address sectors.
+ * It also takes care of issuing special DRIVE_CMDs.
+ */
+static inline void do_rw_disk (ide_drive_t *drive, struct request *rq, unsigned long block)
+{
+ ide_hwif_t *hwif = HWIF(drive);
+ unsigned short io_base = hwif->io_base;
+#ifdef CONFIG_BLK_DEV_PROMISE
+ int use_promise_io = 0;
+#endif /* CONFIG_BLK_DEV_PROMISE */
+
+ OUT_BYTE(drive->ctl,IDE_CONTROL_REG);
+ OUT_BYTE(rq->nr_sectors,io_base+IDE_NSECTOR_OFFSET);
+#ifdef CONFIG_BLK_DEV_PROMISE
+ if (IS_PROMISE_DRIVE) {
+ if (hwif->is_promise2 || rq->cmd == READ) {
+ use_promise_io = 1;
+ }
+ }
+ if (drive->select.b.lba || use_promise_io) {
+#else /* !CONFIG_BLK_DEV_PROMISE */
+ if (drive->select.b.lba) {
+#endif /* CONFIG_BLK_DEV_PROMISE */
+ if (block >= 1UL << 28) {
+ printk("block %lu beyond LBA28\n", block);
+ ide_end_request(0, hwif->hwgroup);
+ return;
+ }
+#ifdef DEBUG
+ printk("%s: %sing: LBAsect=%ld, sectors=%ld, buffer=0x%08lx\n",
+ drive->name, (rq->cmd==READ)?"read":"writ",
+ block, rq->nr_sectors, (unsigned long) rq->buffer);
+#endif
+ OUT_BYTE(block,io_base+IDE_SECTOR_OFFSET);
+ OUT_BYTE(block>>=8,io_base+IDE_LCYL_OFFSET);
+ OUT_BYTE(block>>=8,io_base+IDE_HCYL_OFFSET);
+ OUT_BYTE(((block>>8)&0x0f)|drive->select.all,io_base+IDE_SELECT_OFFSET);
+ } else {
+ unsigned int sect,head,cyl,track;
+ track = block / drive->sect;
+ sect = block % drive->sect + 1;
+ OUT_BYTE(sect,io_base+IDE_SECTOR_OFFSET);
+ head = track % drive->head;
+ cyl = track / drive->head;
+
+ if (cyl >= 1 << 16) {
+ printk("block %lu cylinder %u beyond CHS\n", block, cyl);
+ ide_end_request(0, hwif->hwgroup);
+ return;
+ }
+
+ OUT_BYTE(cyl,io_base+IDE_LCYL_OFFSET);
+ OUT_BYTE(cyl>>8,io_base+IDE_HCYL_OFFSET);
+ OUT_BYTE(head|drive->select.all,io_base+IDE_SELECT_OFFSET);
+#ifdef DEBUG
+ printk("%s: %sing: CHS=%d/%d/%d, sectors=%ld, buffer=0x%08lx\n",
+ drive->name, (rq->cmd==READ)?"read":"writ", cyl,
+ head, sect, rq->nr_sectors, (unsigned long) rq->buffer);
+#endif
+ }
+#ifdef CONFIG_BLK_DEV_PROMISE
+ if (use_promise_io) {
+ do_promise_io (drive, rq);
+ return;
+ }
+#endif /* CONFIG_BLK_DEV_PROMISE */
+ if (rq->cmd == READ) {
+#ifdef CONFIG_BLK_DEV_TRITON
+ if (drive->using_dma && !(HWIF(drive)->dmaproc(ide_dma_read, drive)))
+ return;
+#endif /* CONFIG_BLK_DEV_TRITON */
+ ide_set_handler(drive, &read_intr, WAIT_CMD);
+ OUT_BYTE(drive->mult_count ? WIN_MULTREAD : WIN_READ, io_base+IDE_COMMAND_OFFSET);
+ return;
+ }
+ if (rq->cmd == WRITE) {
+#ifdef CONFIG_BLK_DEV_TRITON
+ if (drive->using_dma && !(HWIF(drive)->dmaproc(ide_dma_write, drive)))
+ return;
+#endif /* CONFIG_BLK_DEV_TRITON */
+ if (drive->mult_count)
+ ide_set_handler (drive, &multwrite_intr, WAIT_CMD);
+ else
+ ide_set_handler (drive, &write_intr, WAIT_CMD);
+ OUT_BYTE(drive->mult_count ? WIN_MULTWRITE : WIN_WRITE, io_base+IDE_COMMAND_OFFSET);
+ if (ide_wait_stat(drive, DATA_READY, drive->bad_wstat, WAIT_DRQ)) {
+ printk("%s: no DRQ after issuing %s\n", drive->name,
+ drive->mult_count ? "MULTWRITE" : "WRITE");
+ return;
+ }
+ if (!drive->unmask)
+ cli();
+ if (drive->mult_count) {
+ HWGROUP(drive)->wrq = *rq; /* scratchpad */
+ ide_multwrite(drive, drive->mult_count);
+ } else {
+ ide_output_data(drive, rq->buffer, SECTOR_WORDS);
+ }
+ return;
+ }
+ printk("%s: bad command: %d\n", drive->name, rq->cmd);
+ ide_end_request(0, HWGROUP(drive));
+}
+
+/*
+ * execute_drive_cmd() issues a special drive command,
+ * usually initiated by ioctl() from the external hdparm program.
+ */
+static void execute_drive_cmd (ide_drive_t *drive, struct request *rq)
+{
+ byte *args = (byte *)rq->buffer;
+ if (args) {
+#ifdef DEBUG
+ printk("%s: DRIVE_CMD cmd=0x%02x sc=0x%02x fr=0x%02x xx=0x%02x\n",
+ drive->name, args[0], args[1], args[2], args[3]);
+#endif
+ OUT_BYTE(args[2],IDE_FEATURE_REG);
+ ide_cmd(drive, args[0], args[1], &drive_cmd_intr);
+ return;
+ } else {
+ /*
+ * NULL is actually a valid way of waiting for
+ * all current requests to be flushed from the queue.
+ */
+#ifdef DEBUG
+ printk("%s: DRIVE_CMD (null)\n", drive->name);
+#endif
+ ide_end_drive_cmd(drive, GET_STAT(), GET_ERR());
+ return;
+ }
+}
+
+/*
+ * do_request() initiates handling of a new I/O request
+ */
+static inline void do_request (ide_hwif_t *hwif, struct request *rq)
+{
+ unsigned int minor, unit;
+ unsigned long block, blockend;
+ ide_drive_t *drive;
+
+ sti();
+#ifdef DEBUG
+ printk("%s: do_request: current=0x%08lx\n", hwif->name, (unsigned long) rq);
+#endif
+ minor = MINOR(rq->rq_dev);
+ unit = minor >> PARTN_BITS;
+ if (MAJOR(rq->rq_dev) != hwif->major || unit >= MAX_DRIVES) {
+ printk("%s: bad device number: %s\n",
+ hwif->name, kdevname(rq->rq_dev));
+ goto kill_rq;
+ }
+ drive = &hwif->drives[unit];
+#ifdef DEBUG
+ if (rq->bh && !buffer_locked(rq->bh)) {
+ printk("%s: block not locked\n", drive->name);
+ goto kill_rq;
+ }
+#endif
+ block = rq->sector;
+ blockend = block + rq->nr_sectors;
+ if ((blockend < block) || (blockend > drive->part[minor&PARTN_MASK].nr_sects)) {
+#ifdef MACH
+ printk ("%s%c: bad access: block=%ld, count=%ld, blockend=%ld, nr_sects%ld\n",
+ drive->name, (minor&PARTN_MASK)?'0'+(minor&PARTN_MASK):' ',
+ block, rq->nr_sectors, blockend, drive->part[minor&PARTN_MASK].nr_sects);
+#else
+ printk("%s%c: bad access: block=%ld, count=%ld\n", drive->name,
+ (minor&PARTN_MASK)?'0'+(minor&PARTN_MASK):' ', block, rq->nr_sectors);
+#endif
+ goto kill_rq;
+ }
+ block += drive->part[minor&PARTN_MASK].start_sect + drive->sect0;
+#if FAKE_FDISK_FOR_EZDRIVE
+ if (block == 0 && drive->remap_0_to_1)
+ block = 1; /* redirect MBR access to EZ-Drive partn table */
+#endif /* FAKE_FDISK_FOR_EZDRIVE */
+ ((ide_hwgroup_t *)hwif->hwgroup)->drive = drive;
+#if (DISK_RECOVERY_TIME > 0)
+ while ((read_timer() - hwif->last_time) < DISK_RECOVERY_TIME);
+#endif
+
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ POLL_HWIF_TAPE_DRIVE; /* macro from ide-tape.h */
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+
+ SELECT_DRIVE(hwif,drive);
+ if (ide_wait_stat(drive, drive->ready_stat, BUSY_STAT|DRQ_STAT, WAIT_READY)) {
+ printk("%s: drive not ready for command\n", drive->name);
+ return;
+ }
+
+ if (!drive->special.all) {
+ if (rq->cmd == IDE_DRIVE_CMD) {
+ execute_drive_cmd(drive, rq);
+ return;
+ }
+#ifdef CONFIG_BLK_DEV_IDEATAPI
+ switch (drive->media) {
+ case ide_disk:
+ do_rw_disk (drive, rq, block);
+ return;
+#ifdef CONFIG_BLK_DEV_IDECD
+ case ide_cdrom:
+ ide_do_rw_cdrom (drive, block);
+ return;
+#endif /* CONFIG_BLK_DEV_IDECD */
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ case ide_tape:
+ idetape_do_request (drive, rq, block);
+ return;
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+#ifdef CONFIG_BLK_DEV_IDEFLOPPY
+ case ide_floppy:
+ idefloppy_do_request (drive, rq, block);
+ return;
+#endif /* CONFIG_BLK_DEV_IDEFLOPPY */
+#ifdef CONFIG_BLK_DEV_IDESCSI
+ case ide_scsi:
+ idescsi_do_request (drive, rq, block);
+ return;
+#endif /* CONFIG_BLK_DEV_IDESCSI */
+
+ default:
+ printk("%s: media type %d not supported\n",
+ drive->name, drive->media);
+ goto kill_rq;
+ }
+#else
+ do_rw_disk (drive, rq, block); /* simpler and faster */
+ return;
+#endif /* CONFIG_BLK_DEV_IDEATAPI */
+ }
+ do_special(drive);
+ return;
+kill_rq:
+ ide_end_request(0, hwif->hwgroup);
+}
+
+/*
+ * The driver enables interrupts as much as possible. In order to do this,
+ * (a) the device-interrupt is always masked before entry, and
+ * (b) the timeout-interrupt is always disabled before entry.
+ *
+ * If we enter here from, say irq14, and then start a new request for irq15,
+ * (possible with "serialize" option) then we cannot ensure that we exit
+ * before the irq15 hits us. So, we must be careful not to let this bother us.
+ *
+ * Interrupts are still masked (by default) whenever we are exchanging
+ * data/cmds with a drive, because some drives seem to have very poor
+ * tolerance for latency during I/O. For devices which don't suffer from
+ * this problem (most don't), the unmask flag can be set using the "hdparm"
+ * utility, to permit other interrupts during data/cmd transfers.
+ */
+void ide_do_request (ide_hwgroup_t *hwgroup)
+{
+ cli(); /* paranoia */
+ if (hwgroup->handler != NULL) {
+ printk("%s: EEeekk!! handler not NULL in ide_do_request()\n", hwgroup->hwif->name);
+ return;
+ }
+ do {
+ ide_hwif_t *hwif = hwgroup->hwif;
+ struct request *rq;
+ if ((rq = hwgroup->rq) == NULL) {
+ if (hwif->sharing_irq && hwgroup->drive) /* set nIEN */
+ OUT_BYTE(hwgroup->drive->ctl|2,hwif->ctl_port);
+ /*
+ * hwgroup->next_hwif is different from hwgroup->hwif
+ * only when a request is inserted using "ide_next".
+ * This saves wear and tear on IDE tapes.
+ */
+ hwif = hwgroup->next_hwif;
+ do {
+ rq = blk_dev[hwif->major].current_request;
+ if (rq != NULL && rq->rq_status != RQ_INACTIVE)
+ goto got_rq;
+ } while ((hwif = hwif->next) != hwgroup->next_hwif);
+ hwgroup->active = 0;
+ return; /* no work left for this hwgroup */
+ }
+ got_rq:
+ do_request(hwgroup->hwif = hwgroup->next_hwif = hwif, hwgroup->rq = rq);
+ cli();
+ } while (hwgroup->handler == NULL);
+}
+
+/*
+ * do_hwgroup_request() invokes ide_do_request() after first masking
+ * all possible interrupts for the current hwgroup. This prevents race
+ * conditions in the event that an unexpected interrupt occurs while
+ * we are in the driver.
+ *
+ * Note that when an interrupt is used to reenter the driver, the first level
+ * handler will already have masked the irq that triggered, but any other ones
+ * for the hwgroup will still be unmasked. The driver tries to be careful
+ * about such things.
+ */
+static void do_hwgroup_request (ide_hwgroup_t *hwgroup)
+{
+ if (hwgroup->handler == NULL) {
+ ide_hwif_t *hgif = hwgroup->hwif;
+ ide_hwif_t *hwif = hgif;
+ hwgroup->active = 1;
+ do {
+ disable_irq(hwif->irq);
+ } while ((hwif = hwif->next) != hgif);
+ ide_do_request (hwgroup);
+ do {
+ enable_irq(hwif->irq);
+ } while ((hwif = hwif->next) != hgif);
+ }
+}
+
+static void do_ide0_request (void) /* invoked with cli() */
+{
+ do_hwgroup_request (ide_hwifs[0].hwgroup);
+}
+
+#if MAX_HWIFS > 1
+static void do_ide1_request (void) /* invoked with cli() */
+{
+ do_hwgroup_request (ide_hwifs[1].hwgroup);
+}
+#endif
+
+#if MAX_HWIFS > 2
+static void do_ide2_request (void) /* invoked with cli() */
+{
+ do_hwgroup_request (ide_hwifs[2].hwgroup);
+}
+#endif
+
+#if MAX_HWIFS > 3
+static void do_ide3_request (void) /* invoked with cli() */
+{
+ do_hwgroup_request (ide_hwifs[3].hwgroup);
+}
+#endif
+
+static void timer_expiry (unsigned long data)
+{
+ ide_hwgroup_t *hwgroup = (ide_hwgroup_t *) data;
+ ide_drive_t *drive = hwgroup->drive;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+
+ if (hwgroup->poll_timeout != 0) { /* polling in progress? */
+ ide_handler_t *handler = hwgroup->handler;
+ hwgroup->handler = NULL;
+ handler(drive);
+ } else if (hwgroup->handler == NULL) { /* not waiting for anything? */
+ sti(); /* drive must have responded just as the timer expired */
+ printk("%s: marginal timeout\n", drive->name);
+ } else {
+ hwgroup->handler = NULL; /* abort the operation */
+ if (hwgroup->hwif->dmaproc)
+ (void) hwgroup->hwif->dmaproc (ide_dma_abort, drive);
+ ide_error(drive, "irq timeout", GET_STAT());
+ }
+ if (hwgroup->handler == NULL)
+ do_hwgroup_request (hwgroup);
+ restore_flags(flags);
+}
+
+/*
+ * There's nothing really useful we can do with an unexpected interrupt,
+ * other than reading the status register (to clear it), and logging it.
+ * There should be no way that an irq can happen before we're ready for it,
+ * so we needn't worry much about losing an "important" interrupt here.
+ *
+ * On laptops (and "green" PCs), an unexpected interrupt occurs whenever the
+ * drive enters "idle", "standby", or "sleep" mode, so if the status looks
+ * "good", we just ignore the interrupt completely.
+ *
+ * This routine assumes cli() is in effect when called.
+ *
+ * If an unexpected interrupt happens on irq15 while we are handling irq14
+ * and if the two interfaces are "serialized" (CMD640), then it looks like
+ * we could screw up by interfering with a new request being set up for irq15.
+ *
+ * In reality, this is a non-issue. The new command is not sent unless the
+ * drive is ready to accept one, in which case we know the drive is not
+ * trying to interrupt us. And ide_set_handler() is always invoked before
+ * completing the issuance of any new drive command, so we will not be
+ * accidently invoked as a result of any valid command completion interrupt.
+ *
+ */
+static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup)
+{
+ byte stat;
+ unsigned int unit;
+ ide_hwif_t *hwif = hwgroup->hwif;
+
+ /*
+ * handle the unexpected interrupt
+ */
+ do {
+ if (hwif->irq == irq) {
+ for (unit = 0; unit < MAX_DRIVES; ++unit) {
+ ide_drive_t *drive = &hwif->drives[unit];
+ if (!drive->present)
+ continue;
+ SELECT_DRIVE(hwif,drive);
+ udelay(100); /* Ugly, but wait_stat() may not be safe here */
+ if (!OK_STAT(stat=GET_STAT(), drive->ready_stat, BAD_STAT)) {
+ /* Try to not flood the console with msgs */
+ static unsigned long last_msgtime = 0;
+ if ((last_msgtime + (HZ/2)) < jiffies) {
+ last_msgtime = jiffies;
+ (void) ide_dump_status(drive, "unexpected_intr", stat);
+ }
+ }
+ if ((stat & DRQ_STAT))
+ try_to_flush_leftover_data(drive);
+ }
+ }
+ } while ((hwif = hwif->next) != hwgroup->hwif);
+ SELECT_DRIVE(hwif,hwgroup->drive); /* Ugh.. probably interrupts current I/O */
+ udelay(100); /* Ugly, but wait_stat() may not be safe here */
+}
+
+/*
+ * entry point for all interrupts, caller does cli() for us
+ */
+void ide_intr (int irq, void *dev_id, struct pt_regs *regs)
+{
+ ide_hwgroup_t *hwgroup = dev_id;
+ ide_handler_t *handler;
+
+ if (irq == hwgroup->hwif->irq && (handler = hwgroup->handler) != NULL) {
+ ide_drive_t *drive = hwgroup->drive;
+ hwgroup->handler = NULL;
+ del_timer(&(hwgroup->timer));
+ if (drive->unmask)
+ sti();
+ handler(drive);
+ cli(); /* this is necessary, as next rq may be different irq */
+ if (hwgroup->handler == NULL) {
+ SET_RECOVERY_TIMER(HWIF(drive));
+ ide_do_request(hwgroup);
+ }
+ } else {
+ unexpected_intr(irq, hwgroup);
+ }
+ cli();
+}
+
+/*
+ * get_info_ptr() returns the (ide_drive_t *) for a given device number.
+ * It returns NULL if the given device number does not match any present drives.
+ */
+static ide_drive_t *get_info_ptr (kdev_t i_rdev)
+{
+ int major = MAJOR(i_rdev);
+ unsigned int h;
+
+ for (h = 0; h < MAX_HWIFS; ++h) {
+ ide_hwif_t *hwif = &ide_hwifs[h];
+ if (hwif->present && major == hwif->major) {
+ unsigned unit = DEVICE_NR(i_rdev);
+ if (unit < MAX_DRIVES) {
+ ide_drive_t *drive = &hwif->drives[unit];
+ if (drive->present)
+ return drive;
+ } else if (major == IDE0_MAJOR && unit < 4) {
+ printk("ide: probable bad entry for /dev/hd%c\n", 'a'+unit);
+ printk("ide: to fix it, run: /usr/src/linux/scripts/MAKEDEV.ide\n");
+ }
+ break;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * This function is intended to be used prior to invoking ide_do_drive_cmd().
+ */
+void ide_init_drive_cmd (struct request *rq)
+{
+ rq->buffer = NULL;
+ rq->cmd = IDE_DRIVE_CMD;
+ rq->sector = 0;
+ rq->nr_sectors = 0;
+ rq->current_nr_sectors = 0;
+ rq->sem = NULL;
+ rq->bh = NULL;
+ rq->bhtail = NULL;
+ rq->next = NULL;
+
+#if 0 /* these are done each time through ide_do_drive_cmd() */
+ rq->errors = 0;
+ rq->rq_status = RQ_ACTIVE;
+ rq->rq_dev = ????;
+#endif
+ rq->quiet = 0;
+}
+
+/*
+ * This function issues a special IDE device request
+ * onto the request queue.
+ *
+ * If action is ide_wait, then the rq is queued at the end of the
+ * request queue, and the function sleeps until it has been processed.
+ * This is for use when invoked from an ioctl handler.
+ *
+ * If action is ide_preempt, then the rq is queued at the head of
+ * the request queue, displacing the currently-being-processed
+ * request and this function returns immediately without waiting
+ * for the new rq to be completed. This is VERY DANGEROUS, and is
+ * intended for careful use by the ATAPI tape/cdrom driver code.
+ *
+ * If action is ide_next, then the rq is queued immediately after
+ * the currently-being-processed-request (if any), and the function
+ * returns without waiting for the new rq to be completed. As above,
+ * This is VERY DANGEROUS, and is intended for careful use by the
+ * ATAPI tape/cdrom driver code.
+ *
+ * If action is ide_end, then the rq is queued at the end of the
+ * request queue, and the function returns immediately without waiting
+ * for the new rq to be completed. This is again intended for careful
+ * use by the ATAPI tape/cdrom driver code. (Currently used by ide-tape.c,
+ * when operating in the pipelined operation mode).
+ */
+int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t action)
+{
+ unsigned long flags;
+ unsigned int major = HWIF(drive)->major;
+ struct request *cur_rq;
+ struct blk_dev_struct *bdev = &blk_dev[major];
+ struct semaphore sem = MUTEX_LOCKED;
+
+ if (IS_PROMISE_DRIVE && rq->buffer != NULL)
+ return -ENOSYS; /* special drive cmds not supported */
+ rq->errors = 0;
+ rq->rq_status = RQ_ACTIVE;
+ rq->rq_dev = MKDEV(major,(drive->select.b.unit)<<PARTN_BITS);
+ if (action == ide_wait)
+ rq->sem = &sem;
+ unplug_device(bdev);
+
+ save_flags(flags);
+ cli();
+ if (action == ide_next)
+ HWGROUP(drive)->next_hwif = HWIF(drive);
+ cur_rq = bdev->current_request;
+
+ if (cur_rq == NULL || action == ide_preempt) {
+ rq->next = cur_rq;
+ bdev->current_request = rq;
+ if (action == ide_preempt)
+ HWGROUP(drive)->rq = NULL;
+ } else {
+ if (action == ide_wait || action == ide_end) {
+ while (cur_rq->next != NULL) /* find end of list */
+ cur_rq = cur_rq->next;
+ }
+ rq->next = cur_rq->next;
+ cur_rq->next = rq;
+ }
+ if (!HWGROUP(drive)->active) {
+ do_hwgroup_request(HWGROUP(drive));
+ cli();
+ }
+ if (action == ide_wait && rq->rq_status != RQ_INACTIVE)
+ down(&sem); /* wait for it to be serviced */
+ restore_flags(flags);
+ return rq->errors ? -EIO : 0; /* return -EIO if errors */
+}
+
+static int ide_open(struct inode * inode, struct file * filp)
+{
+ ide_drive_t *drive;
+ unsigned long flags;
+
+ if ((drive = get_info_ptr(inode->i_rdev)) == NULL)
+ return -ENXIO;
+ save_flags(flags);
+ cli();
+ while (drive->busy)
+ sleep_on(&drive->wqueue);
+ drive->usage++;
+ restore_flags(flags);
+#ifdef CONFIG_BLK_DEV_IDECD
+ if (drive->media == ide_cdrom)
+ return ide_cdrom_open (inode, filp, drive);
+#endif /* CONFIG_BLK_DEV_IDECD */
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ if (drive->media == ide_tape)
+ return idetape_blkdev_open (inode, filp, drive);
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+#ifdef CONFIG_BLK_DEV_IDEFLOPPY
+ if (drive->media == ide_floppy)
+ return idefloppy_open (inode, filp, drive);
+#endif /* CONFIG_BLK_DEV_IDEFLOPPY */
+#ifdef CONFIG_BLK_DEV_IDESCSI
+ if (drive->media == ide_scsi)
+ return idescsi_open (inode, filp, drive);
+#endif /* CONFIG_BLK_DEV_IDESCSI */
+ if (drive->removable && drive->usage == 1) {
+ byte door_lock[] = {WIN_DOORLOCK,0,0,0};
+ struct request rq;
+ check_disk_change(inode->i_rdev);
+ ide_init_drive_cmd (&rq);
+ rq.buffer = (char *)door_lock;
+ /*
+ * Ignore the return code from door_lock,
+ * since the open() has already succeeded,
+ * and the door_lock is irrelevant at this point.
+ */
+ (void) ide_do_drive_cmd(drive, &rq, ide_wait);
+ }
+ return 0;
+}
+
+/*
+ * Releasing a block device means we sync() it, so that it can safely
+ * be forgotten about...
+ */
+static void ide_release(struct inode * inode, struct file * file)
+{
+ ide_drive_t *drive;
+
+ if ((drive = get_info_ptr(inode->i_rdev)) != NULL) {
+ fsync_dev(inode->i_rdev);
+ drive->usage--;
+#ifdef CONFIG_BLK_DEV_IDECD
+ if (drive->media == ide_cdrom) {
+ ide_cdrom_release (inode, file, drive);
+ return;
+ }
+#endif /* CONFIG_BLK_DEV_IDECD */
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ if (drive->media == ide_tape) {
+ idetape_blkdev_release (inode, file, drive);
+ return;
+ }
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+#ifdef CONFIG_BLK_DEV_IDEFLOPPY
+ if (drive->media == ide_floppy) {
+ idefloppy_release (inode, file, drive);
+ return;
+ }
+#endif /* CONFIG_BLK_DEV_IDEFLOPPY */
+#ifdef CONFIG_BLK_DEV_IDESCSI
+ if (drive->media == ide_scsi) {
+ idescsi_ide_release (inode, file, drive);
+ return;
+ }
+#endif /* CONFIG_BLK_DEV_IDESCSI */
+ if (drive->removable && !drive->usage) {
+ byte door_unlock[] = {WIN_DOORUNLOCK,0,0,0};
+ struct request rq;
+ invalidate_buffers(inode->i_rdev);
+ ide_init_drive_cmd (&rq);
+ rq.buffer = (char *)door_unlock;
+ (void) ide_do_drive_cmd(drive, &rq, ide_wait);
+ }
+ }
+}
+
+/*
+ * This routine is called to flush all partitions and partition tables
+ * for a changed disk, and then re-read the new partition table.
+ * If we are revalidating a disk because of a media change, then we
+ * enter with usage == 0. If we are using an ioctl, we automatically have
+ * usage == 1 (we need an open channel to use an ioctl :-), so this
+ * is our limit.
+ */
+static int revalidate_disk(kdev_t i_rdev)
+{
+ ide_drive_t *drive;
+ unsigned int p, major, minor;
+ long flags;
+
+ if ((drive = get_info_ptr(i_rdev)) == NULL)
+ return -ENODEV;
+
+ major = MAJOR(i_rdev);
+ minor = drive->select.b.unit << PARTN_BITS;
+ save_flags(flags);
+ cli();
+ if (drive->busy || (drive->usage > 1)) {
+ restore_flags(flags);
+ return -EBUSY;
+ };
+ drive->busy = 1;
+ restore_flags(flags);
+
+ for (p = 0; p < (1<<PARTN_BITS); ++p) {
+ if (drive->part[p].nr_sects > 0) {
+ kdev_t devp = MKDEV(major, minor+p);
+ fsync_dev (devp);
+ invalidate_inodes (devp);
+ invalidate_buffers (devp);
+ set_blocksize(devp, 1024);
+ }
+ drive->part[p].start_sect = 0;
+ drive->part[p].nr_sects = 0;
+ };
+
+ drive->part[0].nr_sects = current_capacity(drive);
+ if ((drive->media != ide_disk && drive->media != ide_floppy) || !drive->part[0].nr_sects)
+ drive->part[0].start_sect = -1;
+ resetup_one_dev(HWIF(drive)->gd, drive->select.b.unit);
+
+ drive->busy = 0;
+ wake_up(&drive->wqueue);
+ return 0;
+}
+
+static int write_fs_long (unsigned long useraddr, long value)
+{
+ int err;
+
+ if (NULL == (long *)useraddr)
+ return -EINVAL;
+ if ((err = verify_area(VERIFY_WRITE, (long *)useraddr, sizeof(long))))
+ return err;
+ put_user((unsigned)value, (long *) useraddr);
+ return 0;
+}
+
+static int ide_ioctl (struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int err;
+ ide_drive_t *drive;
+ unsigned long flags;
+ struct request rq;
+
+ if (!inode || !(inode->i_rdev))
+ return -EINVAL;
+ if ((drive = get_info_ptr(inode->i_rdev)) == NULL)
+ return -ENODEV;
+ ide_init_drive_cmd (&rq);
+ switch (cmd) {
+ case HDIO_GETGEO:
+ {
+ struct hd_geometry *loc = (struct hd_geometry *) arg;
+ if (!loc || (drive->media != ide_disk && drive->media != ide_floppy)) return -EINVAL;
+#ifdef MACH
+ loc->heads = drive->bios_head;
+ loc->sectors = drive->bios_sect;
+ loc->cylinders = drive->bios_cyl;
+ loc->start
+ = (drive->part[MINOR(inode->i_rdev)&PARTN_MASK]
+ .start_sect);
+#else
+ err = verify_area(VERIFY_WRITE, loc, sizeof(*loc));
+ if (err) return err;
+ put_user(drive->bios_head, (byte *) &loc->heads);
+ put_user(drive->bios_sect, (byte *) &loc->sectors);
+ put_user(drive->bios_cyl, (unsigned short *) &loc->cylinders);
+ put_user((unsigned)drive->part[MINOR(inode->i_rdev)&PARTN_MASK].start_sect,
+ (unsigned long *) &loc->start);
+#endif
+ return 0;
+ }
+ case BLKFLSBUF:
+ if (!suser()) return -EACCES;
+ fsync_dev(inode->i_rdev);
+ invalidate_buffers(inode->i_rdev);
+ return 0;
+
+ case BLKRASET:
+ if (!suser()) return -EACCES;
+ if(arg > 0xff) return -EINVAL;
+ read_ahead[MAJOR(inode->i_rdev)] = arg;
+ return 0;
+
+ case BLKRAGET:
+ return write_fs_long(arg, read_ahead[MAJOR(inode->i_rdev)]);
+
+ case BLKGETSIZE: /* Return device size */
+ return write_fs_long(arg, drive->part[MINOR(inode->i_rdev)&PARTN_MASK].nr_sects);
+ case BLKRRPART: /* Re-read partition tables */
+ if (!suser()) return -EACCES;
+ return revalidate_disk(inode->i_rdev);
+
+ case HDIO_GET_KEEPSETTINGS:
+ return write_fs_long(arg, drive->keep_settings);
+
+ case HDIO_GET_UNMASKINTR:
+ return write_fs_long(arg, drive->unmask);
+
+ case HDIO_GET_DMA:
+ return write_fs_long(arg, drive->using_dma);
+
+ case HDIO_GET_32BIT:
+ return write_fs_long(arg, drive->io_32bit);
+
+ case HDIO_GET_MULTCOUNT:
+ return write_fs_long(arg, drive->mult_count);
+
+ case HDIO_GET_IDENTITY:
+ if (!arg || (MINOR(inode->i_rdev) & PARTN_MASK))
+ return -EINVAL;
+ if (drive->id == NULL)
+ return -ENOMSG;
+ err = verify_area(VERIFY_WRITE, (char *)arg, sizeof(*drive->id));
+ if (!err)
+ memcpy_tofs((char *)arg, (char *)drive->id, sizeof(*drive->id));
+ return err;
+
+ case HDIO_GET_NOWERR:
+ return write_fs_long(arg, drive->bad_wstat == BAD_R_STAT);
+
+ case HDIO_SET_DMA:
+ if (!suser()) return -EACCES;
+#ifdef CONFIG_BLK_DEV_IDECD
+ if (drive->media == ide_cdrom)
+ return -EPERM;
+#endif /* CONFIG_BLK_DEV_IDECD */
+ if (!drive->id || !(drive->id->capability & 1) || !HWIF(drive)->dmaproc)
+ return -EPERM;
+ case HDIO_SET_KEEPSETTINGS:
+ case HDIO_SET_UNMASKINTR:
+ case HDIO_SET_NOWERR:
+ if (arg > 1)
+ return -EINVAL;
+ case HDIO_SET_32BIT:
+ if (!suser()) return -EACCES;
+ if ((MINOR(inode->i_rdev) & PARTN_MASK))
+ return -EINVAL;
+ save_flags(flags);
+ cli();
+ switch (cmd) {
+ case HDIO_SET_DMA:
+ if (!(HWIF(drive)->dmaproc)) {
+ restore_flags(flags);
+ return -EPERM;
+ }
+ drive->using_dma = arg;
+ break;
+ case HDIO_SET_KEEPSETTINGS:
+ drive->keep_settings = arg;
+ break;
+ case HDIO_SET_UNMASKINTR:
+ if (arg && drive->no_unmask) {
+ restore_flags(flags);
+ return -EPERM;
+ }
+ drive->unmask = arg;
+ break;
+ case HDIO_SET_NOWERR:
+ drive->bad_wstat = arg ? BAD_R_STAT : BAD_W_STAT;
+ break;
+ case HDIO_SET_32BIT:
+ if (arg > (1 + (SUPPORT_VLB_SYNC<<1))) {
+ restore_flags(flags);
+ return -EINVAL;
+ }
+ if (arg && drive->no_io_32bit) {
+ restore_flags(flags);
+ return -EPERM;
+ }
+ drive->io_32bit = arg;
+#ifdef CONFIG_BLK_DEV_DTC2278
+ if (HWIF(drive)->chipset == ide_dtc2278)
+ HWIF(drive)->drives[!drive->select.b.unit].io_32bit = arg;
+#endif /* CONFIG_BLK_DEV_DTC2278 */
+ break;
+ }
+ restore_flags(flags);
+ return 0;
+
+ case HDIO_SET_MULTCOUNT:
+ if (!suser()) return -EACCES;
+ if (MINOR(inode->i_rdev) & PARTN_MASK)
+ return -EINVAL;
+ if (drive->id && arg > drive->id->max_multsect)
+ return -EINVAL;
+ save_flags(flags);
+ cli();
+ if (drive->special.b.set_multmode) {
+ restore_flags(flags);
+ return -EBUSY;
+ }
+ drive->mult_req = arg;
+ drive->special.b.set_multmode = 1;
+ restore_flags(flags);
+ (void) ide_do_drive_cmd (drive, &rq, ide_wait);
+ return (drive->mult_count == arg) ? 0 : -EIO;
+
+ case HDIO_DRIVE_CMD:
+ {
+ byte args[4], *argbuf = args;
+ int argsize = 4;
+ if (!suser() || securelevel > 0) return -EACCES;
+ if (NULL == (void *) arg) {
+ err = ide_do_drive_cmd(drive, &rq, ide_wait);
+ } else if (!(err = verify_area(VERIFY_READ,(void *)arg, 4))) {
+ memcpy_fromfs(args, (void *)arg, 4);
+ if (args[3]) {
+ argsize = 4 + (SECTOR_WORDS * 4 * args[3]);
+ argbuf = kmalloc(argsize, GFP_KERNEL);
+ if (argbuf == NULL)
+ return -ENOMEM;
+ argbuf[0] = args[0];
+ argbuf[1] = args[1];
+ argbuf[2] = args[2];
+ argbuf[3] = args[3];
+ }
+ if (!(err = verify_area(VERIFY_WRITE,(void *)arg, argsize))) {
+ rq.buffer = (char *)argbuf;
+ err = ide_do_drive_cmd(drive, &rq, ide_wait);
+ memcpy_tofs((void *)arg, argbuf, argsize);
+ }
+ if (argsize > 4)
+ kfree(argbuf);
+ }
+ return err;
+ }
+ case HDIO_SET_PIO_MODE:
+ if (!suser()) return -EACCES;
+ if (MINOR(inode->i_rdev) & PARTN_MASK)
+ return -EINVAL;
+ if (!HWIF(drive)->tuneproc)
+ return -ENOSYS;
+ save_flags(flags);
+ cli();
+ if (drive->special.b.set_tune) {
+ restore_flags(flags);
+ return -EBUSY;
+ }
+ drive->tune_req = (byte) arg;
+ drive->special.b.set_tune = 1;
+ restore_flags(flags);
+ (void) ide_do_drive_cmd (drive, &rq, ide_wait);
+ return 0;
+
+ RO_IOCTLS(inode->i_rdev, arg);
+
+ default:
+#ifdef CONFIG_BLK_DEV_IDECD
+ if (drive->media == ide_cdrom)
+ return ide_cdrom_ioctl(drive, inode, file, cmd, arg);
+#endif /* CONFIG_BLK_DEV_IDECD */
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ if (drive->media == ide_tape)
+ return idetape_blkdev_ioctl(drive, inode, file, cmd, arg);
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+#ifdef CONFIG_BLK_DEV_IDEFLOPPY
+ if (drive->media == ide_floppy)
+ return idefloppy_ioctl(drive, inode, file, cmd, arg);
+#endif /* CONFIG_BLK_DEV_IDEFLOPPY */
+#ifdef CONFIG_BLK_DEV_IDESCSI
+ if (drive->media == ide_scsi)
+ return idescsi_ioctl(drive, inode, file, cmd, arg);
+#endif /* CONFIG_BLK_DEV_IDESCSI */
+ return -EPERM;
+ }
+}
+
+static int ide_check_media_change (kdev_t i_rdev)
+{
+ ide_drive_t *drive;
+
+ if ((drive = get_info_ptr(i_rdev)) == NULL)
+ return -ENODEV;
+#ifdef CONFIG_BLK_DEV_IDECD
+ if (drive->media == ide_cdrom)
+ return ide_cdrom_check_media_change (drive);
+#endif /* CONFIG_BLK_DEV_IDECD */
+#ifdef CONFIG_BLK_DEV_IDEFLOPPY
+ if (drive->media == ide_floppy)
+ return idefloppy_media_change (drive);
+#endif /* CONFIG_BLK_DEV_IDEFLOPPY */
+ if (drive->removable) /* for disks */
+ return 1; /* always assume it was changed */
+ return 0;
+}
+
+void ide_fixstring (byte *s, const int bytecount, const int byteswap)
+{
+ byte *p = s, *end = &s[bytecount & ~1]; /* bytecount must be even */
+
+ if (byteswap) {
+ /* convert from big-endian to host byte order */
+ for (p = end ; p != s;) {
+ unsigned short *pp = (unsigned short *) (p -= 2);
+ *pp = ntohs(*pp);
+ }
+ }
+
+ /* strip leading blanks */
+ while (s != end && *s == ' ')
+ ++s;
+
+ /* compress internal blanks and strip trailing blanks */
+ while (s != end && *s) {
+ if (*s++ != ' ' || (s != end && *s && *s != ' '))
+ *p++ = *(s-1);
+ }
+
+ /* wipe out trailing garbage */
+ while (p != end)
+ *p++ = '\0';
+}
+
+static inline void do_identify (ide_drive_t *drive, byte cmd)
+{
+ int bswap;
+ struct hd_driveid *id;
+ unsigned long capacity, check;
+
+ id = drive->id = kmalloc (SECTOR_WORDS*4, GFP_KERNEL);
+ ide_input_data(drive, id, SECTOR_WORDS);/* read 512 bytes of id info */
+ sti();
+
+#if defined (CONFIG_SCSI_EATA_DMA) || defined (CONFIG_SCSI_EATA_PIO) || defined (CONFIG_SCSI_EATA)
+ /*
+ * EATA SCSI controllers do a hardware ATA emulation:
+ * Ignore them if there is a driver for them available.
+ */
+ if ((id->model[0] == 'P' && id->model[1] == 'M')
+ || (id->model[0] == 'S' && id->model[1] == 'K')) {
+ printk("%s: EATA SCSI HBA %.10s\n", drive->name, id->model);
+ drive->present = 0;
+ return;
+ }
+#endif
+
+ /*
+ * WIN_IDENTIFY returns little-endian info,
+ * WIN_PIDENTIFY *usually* returns little-endian info.
+ */
+ bswap = 1;
+ if (cmd == WIN_PIDENTIFY) {
+ if ((id->model[0] == 'N' && id->model[1] == 'E') /* NEC */
+ || (id->model[0] == 'F' && id->model[1] == 'X') /* Mitsumi */
+ || (id->model[0] == 'P' && id->model[1] == 'i'))/* Pioneer */
+ bswap = 0; /* Vertos drives may still be weird */
+ }
+ ide_fixstring (id->model, sizeof(id->model), bswap);
+ ide_fixstring (id->fw_rev, sizeof(id->fw_rev), bswap);
+ ide_fixstring (id->serial_no, sizeof(id->serial_no), bswap);
+
+ if (strstr((char *)id->model, "E X A B Y T E N E S T"))
+ return;
+
+#ifdef CONFIG_BLK_DEV_IDEATAPI
+ /*
+ * Check for an ATAPI device
+ */
+ if (cmd == WIN_PIDENTIFY) {
+ byte type = (id->config >> 8) & 0x1f;
+ printk("%s: %s, ATAPI ", drive->name, id->model);
+#ifdef CONFIG_BLK_DEV_PROMISE
+ if (HWIF(drive)->is_promise2) {
+ printk(" -- not supported on 2nd Promise port\n");
+ drive->present = 0;
+ return;
+ }
+#endif /* CONFIG_BLK_DEV_PROMISE */
+ if (!drive->ide_scsi) switch (type) {
+ case 0:
+ if (!strstr((char *)id->model, "oppy") &&
+ !strstr((char *)id->model, "poyp") &&
+ !strstr((char *)id->model, "ZIP"))
+ printk("cdrom or floppy?, assuming ");
+ if (drive->media != ide_cdrom &&
+ !strstr((char *)id->model, "CD-ROM")) {
+#ifdef CONFIG_BLK_DEV_IDEFLOPPY
+ printk("FLOPPY drive\n");
+ drive->media = ide_floppy;
+ if (idefloppy_identify_device(drive, id))
+ drive->present = 1;
+ return;
+#else
+ printk("FLOPPY ");
+ break;
+#endif /* CONFIG_BLK_DEV_IDEFLOPPY */
+ }
+ /* Early cdrom models used zero */
+ case 5:
+#ifdef CONFIG_BLK_DEV_IDECD
+ printk ("CDROM drive\n");
+ drive->media = ide_cdrom;
+ drive->present = 1;
+ drive->removable = 1;
+ return;
+#else
+ printk ("CDROM ");
+ break;
+#endif /* CONFIG_BLK_DEV_IDECD */
+ case 1:
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ printk ("TAPE drive");
+ if (idetape_identify_device (drive,id)) {
+ drive->media = ide_tape;
+ drive->present = 1;
+ drive->removable = 1;
+ if (drive->autotune != 2 && HWIF(drive)->dmaproc != NULL && !drive->nodma) {
+ if (!HWIF(drive)->dmaproc(ide_dma_check, drive))
+ printk(", DMA");
+ }
+ printk("\n");
+ }
+ else {
+ drive->present = 0;
+ printk ("\nide-tape: the tape is not supported by this version of the driver\n");
+ }
+ return;
+#else
+ printk ("TAPE ");
+ break;
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+ default:
+ drive->present = 0;
+ printk("Type %d - Unknown device\n", type);
+ return;
+ }
+#ifdef CONFIG_BLK_DEV_IDESCSI
+ printk("drive - enabling SCSI emulation\n");
+ drive->media = ide_scsi;
+ drive->present = 1;
+ idescsi_setup(drive);
+#else
+ drive->present = 0;
+ printk("- not supported by this kernel\n");
+#endif /* CONFIG_BLK_DEV_IDESCSI */
+ return;
+ }
+#endif /* CONFIG_BLK_DEV_IDEATAPI */
+
+ /* check for removable disks (eg. SYQUEST), ignore 'WD' drives */
+ if (id->config & (1<<7)) { /* removable disk ? */
+ if (id->model[0] != 'W' || id->model[1] != 'D')
+ drive->removable = 1;
+ }
+
+ /* SunDisk drives: treat as non-removable, force one unit */
+ if (id->model[0] == 'S' && id->model[1] == 'u') {
+ drive->removable = 0;
+ if (drive->select.all & (1<<4)) {
+ drive->present = 0;
+ return;
+ }
+ }
+
+ drive->media = ide_disk;
+ /* Extract geometry if we did not already have one for the drive */
+ if (!drive->present) {
+ drive->present = 1;
+ drive->cyl = drive->bios_cyl = id->cyls;
+ drive->head = drive->bios_head = id->heads;
+ drive->sect = drive->bios_sect = id->sectors;
+ }
+ /* Handle logical geometry translation by the drive */
+ if ((id->field_valid & 1) && id->cur_cyls && id->cur_heads
+ && (id->cur_heads <= 16) && id->cur_sectors) {
+ /*
+ * Extract the physical drive geometry for our use.
+ * Note that we purposely do *not* update the bios info.
+ * This way, programs that use it (like fdisk) will
+ * still have the same logical view as the BIOS does,
+ * which keeps the partition table from being screwed.
+ *
+ * An exception to this is the cylinder count,
+ * which we reexamine later on to correct for 1024 limitations.
+ */
+ drive->cyl = id->cur_cyls;
+ drive->head = id->cur_heads;
+ drive->sect = id->cur_sectors;
+
+ /* check for word-swapped "capacity" field in id information */
+ capacity = drive->cyl * drive->head * drive->sect;
+ check = (id->cur_capacity0 << 16) | id->cur_capacity1;
+ if (check == capacity) { /* was it swapped? */
+ /* yes, bring it into little-endian order: */
+ id->cur_capacity0 = (capacity >> 0) & 0xffff;
+ id->cur_capacity1 = (capacity >> 16) & 0xffff;
+ }
+ }
+ /* Use physical geometry if what we have still makes no sense */
+ if ((!drive->head || drive->head > 16) &&
+ id->heads && id->heads <= 16) {
+ drive->cyl = id->cyls;
+ drive->head = id->heads;
+ drive->sect = id->sectors;
+ }
+
+ /* calculate drive capacity, and select LBA if possible */
+ capacity = current_capacity (drive);
+
+ /*
+ * if possible, give fdisk access to more of the drive,
+ * by correcting bios_cyls:
+ */
+ if (capacity > drive->bios_cyl * drive->bios_head * drive->bios_sect
+ && !drive->forced_geom && drive->bios_sect && drive->bios_head) {
+ int cyl = (capacity / drive->bios_sect) / drive->bios_head;
+ if (cyl <= 65535)
+ drive->bios_cyl = cyl;
+ else {
+ /* OK until 539 GB */
+ drive->bios_sect = 63;
+ drive->bios_head = 255;
+ drive->bios_cyl = capacity / (63*255);
+ }
+ }
+
+ if (!strncmp((char *)id->model, "BMI ", 4) &&
+ strstr((char *)id->model, " ENHANCED IDE ") &&
+ drive->select.b.lba)
+ drive->no_geom = 1;
+
+ printk ("%s: %.40s, %ldMB w/%dkB Cache, CHS=%d/%d/%d",
+ drive->name, id->model, current_capacity(drive)/2048L, id->buf_size/2,
+ drive->bios_cyl, drive->bios_head, drive->bios_sect);
+
+ drive->mult_count = 0;
+ if (id->max_multsect) {
+ drive->mult_req = INITIAL_MULT_COUNT;
+ if (drive->mult_req > id->max_multsect)
+ drive->mult_req = id->max_multsect;
+ if (drive->mult_req || ((id->multsect_valid & 1) && id->multsect))
+ drive->special.b.set_multmode = 1;
+ }
+ if (drive->autotune != 2 && HWIF(drive)->dmaproc != NULL && !drive->nodma) {
+ if (!(HWIF(drive)->dmaproc(ide_dma_check, drive))) {
+ if ((id->field_valid & 4) && (id->dma_ultra & (id->dma_ultra >> 8) & 7))
+ printk(", UDMA");
+ else
+ printk(", DMA");
+ }
+ }
+ printk("\n");
+}
+
+/*
+ * Delay for *at least* 50ms. As we don't know how much time is left
+ * until the next tick occurs, we wait an extra tick to be safe.
+ * This is used only during the probing/polling for drives at boot time.
+ */
+static void delay_50ms (void)
+{
+ unsigned long timer = jiffies + ((HZ + 19)/20) + 1;
+ while (timer > jiffies);
+}
+
+/*
+ * try_to_identify() sends an ATA(PI) IDENTIFY request to a drive
+ * and waits for a response. It also monitors irqs while this is
+ * happening, in hope of automatically determining which one is
+ * being used by the interface.
+ *
+ * Returns: 0 device was identified
+ * 1 device timed-out (no response to identify request)
+ * 2 device aborted the command (refused to identify itself)
+ */
+static int try_to_identify (ide_drive_t *drive, byte cmd)
+{
+ int hd_status, rc;
+ unsigned long timeout;
+ unsigned long irqs_on = 0;
+ int irq_off;
+
+ if (!HWIF(drive)->irq) { /* already got an IRQ? */
+ printk("%s: Not probing legacy IRQs)\n", drive->name);
+ return 2;
+ probe_irq_off(probe_irq_on()); /* clear dangling irqs */
+ irqs_on = probe_irq_on(); /* start monitoring irqs */
+ OUT_BYTE(drive->ctl,IDE_CONTROL_REG); /* enable device irq */
+ }
+
+ delay_50ms(); /* take a deep breath */
+ if ((IN_BYTE(IDE_ALTSTATUS_REG) ^ IN_BYTE(IDE_STATUS_REG)) & ~INDEX_STAT) {
+ printk("%s: probing with STATUS instead of ALTSTATUS\n", drive->name);
+ hd_status = IDE_STATUS_REG; /* ancient Seagate drives */
+ } else
+ hd_status = IDE_ALTSTATUS_REG; /* use non-intrusive polling */
+
+#if CONFIG_BLK_DEV_PROMISE
+ if (IS_PROMISE_DRIVE) {
+ if (promise_cmd(drive,PROMISE_IDENTIFY)) {
+ if (irqs_on)
+ (void) probe_irq_off(irqs_on);
+ return 1;
+ }
+ } else
+#endif /* CONFIG_BLK_DEV_PROMISE */
+ OUT_BYTE(cmd,IDE_COMMAND_REG); /* ask drive for ID */
+ timeout = ((cmd == WIN_IDENTIFY) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2;
+ timeout += jiffies;
+ do {
+ if (jiffies > timeout) {
+ if (irqs_on)
+ (void) probe_irq_off(irqs_on);
+ return 1; /* drive timed-out */
+ }
+ delay_50ms(); /* give drive a breather */
+ } while (IN_BYTE(hd_status) & BUSY_STAT);
+
+ delay_50ms(); /* wait for IRQ and DRQ_STAT */
+ if (OK_STAT(GET_STAT(),DRQ_STAT,BAD_R_STAT)) {
+ unsigned long flags;
+ save_flags(flags);
+ cli(); /* some systems need this */
+ do_identify(drive, cmd); /* drive returned ID */
+ rc = 0; /* drive responded with ID */
+ (void) GET_STAT(); /* clear drive IRQ */
+ restore_flags(flags);
+ } else
+ rc = 2; /* drive refused ID */
+ if (!HWIF(drive)->irq) {
+ irq_off = probe_irq_off(irqs_on); /* get our irq number */
+ if (irq_off > 0) {
+ HWIF(drive)->irq = irq_off; /* save it for later */
+ irqs_on = probe_irq_on();
+ OUT_BYTE(drive->ctl|2,IDE_CONTROL_REG); /* mask device irq */
+ udelay(5);
+ (void) probe_irq_off(irqs_on);
+ (void) probe_irq_off(probe_irq_on()); /* clear self-inflicted irq */
+ (void) GET_STAT(); /* clear drive IRQ */
+
+ } else { /* Mmmm.. multiple IRQs.. don't know which was ours */
+ printk("%s: IRQ probe failed (%d)\n", drive->name, irq_off);
+#ifdef CONFIG_BLK_DEV_CMD640
+#ifdef CMD640_DUMP_REGS
+ if (HWIF(drive)->chipset == ide_cmd640) {
+ printk("%s: Hmmm.. probably a driver problem.\n", drive->name);
+ CMD640_DUMP_REGS;
+ }
+#endif /* CMD640_DUMP_REGS */
+#endif /* CONFIG_BLK_DEV_CMD640 */
+ }
+ }
+ return rc;
+}
+
+/*
+ * do_probe() has the difficult job of finding a drive if it exists,
+ * without getting hung up if it doesn't exist, without trampling on
+ * ethernet cards, and without leaving any IRQs dangling to haunt us later.
+ *
+ * If a drive is "known" to exist (from CMOS or kernel parameters),
+ * but does not respond right away, the probe will "hang in there"
+ * for the maximum wait time (about 30 seconds), otherwise it will
+ * exit much more quickly.
+ *
+ * Returns: 0 device was identified
+ * 1 device timed-out (no response to identify request)
+ * 2 device aborted the command (refused to identify itself)
+ * 3 bad status from device (possible for ATAPI drives)
+ * 4 probe was not attempted because failure was obvious
+ */
+static int do_probe (ide_drive_t *drive, byte cmd)
+{
+ int rc;
+ ide_hwif_t *hwif = HWIF(drive);
+ unsigned long timeout;
+#ifdef CONFIG_BLK_DEV_IDEATAPI
+ if (drive->present) { /* avoid waiting for inappropriate probes */
+ if ((drive->media != ide_disk) && (cmd == WIN_IDENTIFY))
+ return 4;
+ }
+#endif /* CONFIG_BLK_DEV_IDEATAPI */
+#ifdef DEBUG
+ printk("probing for %s: present=%d, media=%d, probetype=%s\n",
+ drive->name, drive->present, drive->media,
+ (cmd == WIN_IDENTIFY) ? "ATA" : "ATAPI");
+#endif
+ SELECT_DRIVE(hwif,drive);
+ delay_50ms();
+ if (IN_BYTE(IDE_SELECT_REG) != drive->select.all && !drive->present) {
+ OUT_BYTE(0xa0,IDE_SELECT_REG); /* exit with drive0 selected */
+ delay_50ms(); /* allow BUSY_STAT to assert & clear */
+ return 3; /* no i/f present: avoid killing ethernet cards */
+ }
+
+ if (OK_STAT(GET_STAT(),READY_STAT,BUSY_STAT)
+ || drive->present || cmd == WIN_PIDENTIFY)
+ {
+ if ((rc = try_to_identify(drive,cmd))) /* send cmd and wait */
+ rc = try_to_identify(drive,cmd); /* failed: try again */
+ if (rc == 1 && cmd == WIN_PIDENTIFY && drive->autotune != 2) {
+ printk("%s: no response (status = 0x%02x), resetting drive\n", drive->name, GET_STAT());
+ delay_50ms();
+ OUT_BYTE (drive->select.all, IDE_SELECT_REG);
+ delay_50ms();
+ OUT_BYTE(WIN_SRST, IDE_COMMAND_REG);
+ timeout = jiffies;
+ while ((GET_STAT() & BUSY_STAT) && jiffies < timeout + WAIT_WORSTCASE)
+ delay_50ms();
+ rc = try_to_identify(drive, cmd);
+ }
+ if (rc == 1)
+ printk("%s: no response (status = 0x%02x)\n", drive->name, GET_STAT());
+ (void) GET_STAT(); /* ensure drive irq is clear */
+ } else {
+ rc = 3; /* not present or maybe ATAPI */
+ }
+ if (drive->select.b.unit != 0) {
+ OUT_BYTE(0xa0,IDE_SELECT_REG); /* exit with drive0 selected */
+ delay_50ms();
+ (void) GET_STAT(); /* ensure drive irq is clear */
+ }
+ return rc;
+}
+
+static void enable_nest (ide_drive_t *drive)
+{
+ unsigned long timeout;
+
+ printk("%s: enabling %s -- ", HWIF(drive)->name, drive->id->model);
+ SELECT_DRIVE(HWIF(drive), drive);
+ delay_50ms();
+ OUT_BYTE(EXABYTE_ENABLE_NEST, IDE_COMMAND_REG);
+ timeout = jiffies + WAIT_WORSTCASE;
+ do {
+ if (jiffies > timeout) {
+ printk("failed (timeout)\n");
+ return;
+ }
+ delay_50ms();
+ } while (GET_STAT() & BUSY_STAT);
+ delay_50ms();
+ if (!OK_STAT(GET_STAT(), 0, BAD_STAT))
+ printk("failed (status = 0x%02x)\n", GET_STAT());
+ else
+ printk("success\n");
+ if (do_probe(drive, WIN_IDENTIFY) >= 2) { /* if !(success||timed-out) */
+#ifdef CONFIG_BLK_DEV_IDEATAPI
+ (void) do_probe(drive, WIN_PIDENTIFY); /* look for ATAPI device */
+#endif /* CONFIG_BLK_DEV_IDEATAPI */
+ }
+}
+
+/*
+ * probe_for_drive() tests for existence of a given drive using do_probe().
+ *
+ * Returns: 0 no device was found
+ * 1 device was found (note: drive->present might still be 0)
+ */
+static inline byte probe_for_drive (ide_drive_t *drive)
+{
+ if (drive->noprobe) /* skip probing? */
+ return drive->present;
+ if (do_probe(drive, WIN_IDENTIFY) >= 2) { /* if !(success||timed-out) */
+#ifdef CONFIG_BLK_DEV_IDEATAPI
+ (void) do_probe(drive, WIN_PIDENTIFY); /* look for ATAPI device */
+#endif /* CONFIG_BLK_DEV_IDEATAPI */
+ }
+ if (drive->id && strstr((char *)drive->id->model, "E X A B Y T E N E S T"))
+ enable_nest(drive);
+ if (!drive->present)
+ return 0; /* drive not found */
+ if (drive->id == NULL) { /* identification failed? */
+ if (drive->media == ide_disk) {
+ printk ("%s: non-IDE drive, CHS=%d/%d/%d\n",
+ drive->name, drive->cyl, drive->head, drive->sect);
+ }
+#ifdef CONFIG_BLK_DEV_IDECD
+ else if (drive->media == ide_cdrom) {
+ printk("%s: ATAPI cdrom (?)\n", drive->name);
+ }
+#endif /* CONFIG_BLK_DEV_IDECD */
+ else {
+ drive->present = 0; /* nuke it */
+ }
+ }
+ return 1; /* drive was found */
+}
+
+/*
+ * We query CMOS about hard disks : it could be that we have a SCSI/ESDI/etc
+ * controller that is BIOS compatible with ST-506, and thus showing up in our
+ * BIOS table, but not register compatible, and therefore not present in CMOS.
+ *
+ * Furthermore, we will assume that our ST-506 drives <if any> are the primary
+ * drives in the system -- the ones reflected as drive 1 or 2. The first
+ * drive is stored in the high nibble of CMOS byte 0x12, the second in the low
+ * nibble. This will be either a 4 bit drive type or 0xf indicating use byte
+ * 0x19 for an 8 bit type, drive 1, 0x1a for drive 2 in CMOS. A non-zero value
+ * means we have an AT controller hard disk for that drive.
+ *
+ * Of course, there is no guarantee that either drive is actually on the
+ * "primary" IDE interface, but we don't bother trying to sort that out here.
+ * If a drive is not actually on the primary interface, then these parameters
+ * will be ignored. This results in the user having to supply the logical
+ * drive geometry as a boot parameter for each drive not on the primary i/f.
+ *
+ * The only "perfect" way to handle this would be to modify the setup.[cS] code
+ * to do BIOS calls Int13h/Fn08h and Int13h/Fn48h to get all of the drive info
+ * for us during initialization. I have the necessary docs -- any takers? -ml
+ */
+static void probe_cmos_for_drives (ide_hwif_t *hwif)
+{
+#ifdef __i386__
+ extern struct drive_info_struct drive_info;
+ byte cmos_disks, *BIOS = (byte *) &drive_info;
+ int unit;
+
+#ifdef CONFIG_BLK_DEV_PROMISE
+ if (hwif->is_promise2)
+ return;
+#endif /* CONFIG_BLK_DEV_PROMISE */
+ outb_p(0x12,0x70); /* specify CMOS address 0x12 */
+ cmos_disks = inb_p(0x71); /* read the data from 0x12 */
+ /* Extract drive geometry from CMOS+BIOS if not already setup */
+ for (unit = 0; unit < MAX_DRIVES; ++unit) {
+ ide_drive_t *drive = &hwif->drives[unit];
+ if ((cmos_disks & (0xf0 >> (unit*4))) && !drive->present && !drive->nobios) {
+ unsigned short cyl = *(unsigned short *)BIOS;
+ unsigned char head = *(BIOS+2);
+ unsigned char sect = *(BIOS+14);
+ unsigned char ctl = *(BIOS+8);
+ if (cyl > 0 && head > 0 && sect > 0 && sect < 64 && head < 255) {
+ drive->cyl = drive->bios_cyl = cyl;
+ drive->head = drive->bios_head = head;
+ drive->sect = drive->bios_sect = sect;
+ drive->ctl = ctl;
+ drive->present = 1;
+ printk("hd%d: got CHS=%d/%d/%d CTL=%x from BIOS\n",
+ unit, cyl, head, sect, ctl);
+
+ } else {
+ printk("hd%d: CHS=%d/%d/%d CTL=%x from BIOS ignored\n",
+ unit, cyl, head, sect, ctl);
+ }
+ }
+ BIOS += 16;
+ }
+#endif
+}
+
+/*
+ * This routine only knows how to look for drive units 0 and 1
+ * on an interface, so any setting of MAX_DRIVES > 2 won't work here.
+ */
+static void probe_hwif (ide_hwif_t *hwif)
+{
+ unsigned int unit;
+
+ if (hwif->noprobe)
+ return;
+ if (hwif->io_base == HD_DATA)
+ probe_cmos_for_drives (hwif);
+#if CONFIG_BLK_DEV_PROMISE
+ if (!hwif->is_promise2 &&
+ (check_region(hwif->io_base,8) || check_region(hwif->ctl_port,1))) {
+#else
+ if (check_region(hwif->io_base,8) || check_region(hwif->ctl_port,1)) {
+#endif /* CONFIG_BLK_DEV_PROMISE */
+ int msgout = 0;
+ for (unit = 0; unit < MAX_DRIVES; ++unit) {
+ ide_drive_t *drive = &hwif->drives[unit];
+ if (drive->present) {
+ drive->present = 0;
+ printk("%s: ERROR, PORTS ALREADY IN USE\n", drive->name);
+ msgout = 1;
+ }
+ }
+ if (!msgout)
+ printk("%s: ports already in use, skipping probe\n", hwif->name);
+ } else {
+ unsigned long flags;
+ save_flags(flags);
+
+ sti(); /* needed for jiffies and irq probing */
+ /*
+ * Second drive should only exist if first drive was found,
+ * but a lot of cdrom drives are configured as single slaves.
+ */
+ for (unit = 0; unit < MAX_DRIVES; ++unit) {
+ ide_drive_t *drive = &hwif->drives[unit];
+ (void) probe_for_drive (drive);
+ if (drive->present && drive->media == ide_disk) {
+ if ((!drive->head || drive->head > 16) && !drive->select.b.lba) {
+ printk("%s: INVALID GEOMETRY: %d PHYSICAL HEADS?\n",
+ drive->name, drive->head);
+ drive->present = 0;
+ }
+ }
+ if (drive->present && !hwif->present) {
+ hwif->present = 1;
+ request_region(hwif->io_base, 8, hwif->name);
+ request_region(hwif->ctl_port, 1, hwif->name);
+ }
+ }
+ restore_flags(flags);
+ for (unit = 0; unit < MAX_DRIVES; ++unit) {
+ ide_drive_t *drive = &hwif->drives[unit];
+ if (drive->present && drive->media != ide_tape) {
+ ide_tuneproc_t *tuneproc = HWIF(drive)->tuneproc;
+ if (tuneproc != NULL && drive->autotune == 1)
+ tuneproc(drive, 255); /* auto-tune PIO mode */
+ }
+ }
+ }
+}
+
+/*
+ * stridx() returns the offset of c within s,
+ * or -1 if c is '\0' or not found within s.
+ */
+static int stridx (const char *s, char c)
+{
+ char *i = strchr(s, c);
+ return (i && c) ? i - s : -1;
+}
+
+/*
+ * match_parm() does parsing for ide_setup():
+ *
+ * 1. the first char of s must be '='.
+ * 2. if the remainder matches one of the supplied keywords,
+ * the index (1 based) of the keyword is negated and returned.
+ * 3. if the remainder is a series of no more than max_vals numbers
+ * separated by commas, the numbers are saved in vals[] and a
+ * count of how many were saved is returned. Base10 is assumed,
+ * and base16 is allowed when prefixed with "0x".
+ * 4. otherwise, zero is returned.
+ */
+static int match_parm (char *s, const char *keywords[], int vals[], int max_vals)
+{
+ static const char *decimal = "0123456789";
+ static const char *hex = "0123456789abcdef";
+ int i, n;
+
+ if (*s++ == '=') {
+ /*
+ * Try matching against the supplied keywords,
+ * and return -(index+1) if we match one
+ */
+ if (keywords != NULL) {
+ for (i = 0; *keywords != NULL; ++i) {
+ if (!strcmp(s, *keywords++))
+ return -(i+1);
+ }
+ }
+ /*
+ * Look for a series of no more than "max_vals"
+ * numeric values separated by commas, in base10,
+ * or base16 when prefixed with "0x".
+ * Return a count of how many were found.
+ */
+ for (n = 0; (i = stridx(decimal, *s)) >= 0;) {
+ vals[n] = i;
+ while ((i = stridx(decimal, *++s)) >= 0)
+ vals[n] = (vals[n] * 10) + i;
+ if (*s == 'x' && !vals[n]) {
+ while ((i = stridx(hex, *++s)) >= 0)
+ vals[n] = (vals[n] * 0x10) + i;
+ }
+ if (++n == max_vals)
+ break;
+ if (*s == ',')
+ ++s;
+ }
+ if (!*s)
+ return n;
+ }
+ return 0; /* zero = nothing matched */
+}
+
+/*
+ * ide_setup() gets called VERY EARLY during initialization,
+ * to handle kernel "command line" strings beginning with "hdx="
+ * or "ide". Here is the complete set currently supported:
+ *
+ * "hdx=" is recognized for all "x" from "a" to "h", such as "hdc".
+ * "idex=" is recognized for all "x" from "0" to "3", such as "ide1".
+ *
+ * "hdx=noprobe" : drive may be present, but do not probe for it
+ * "hdx=none" : drive is NOT present, ignore cmos and do not probe
+ * "hdx=nowerr" : ignore the WRERR_STAT bit on this drive
+ * "hdx=cdrom" : drive is present, and is a cdrom drive
+ * "hdx=cyl,head,sect" : disk drive is present, with specified geometry
+ * "hdx=autotune" : driver will attempt to tune interface speed
+ * to the fastest PIO mode supported,
+ * if possible for this drive only.
+ * Not fully supported by all chipset types,
+ * and quite likely to cause trouble with
+ * older/odd IDE drives.
+ * "hdx=nodma" : disallow DMA for the drive
+ *
+ * "idebus=xx" : inform IDE driver of VESA/PCI bus speed in Mhz,
+ * where "xx" is between 20 and 66 inclusive,
+ * used when tuning chipset PIO modes.
+ * For PCI bus, 25 is correct for a P75 system,
+ * 30 is correct for P90,P120,P180 systems,
+ * and 33 is used for P100,P133,P166 systems.
+ * If in doubt, use idebus=33 for PCI.
+ * As for VLB, it is safest to not specify it.
+ *
+ * "idex=noprobe" : do not attempt to access/use this interface
+ * "idex=base" : probe for an interface at the addr specified,
+ * where "base" is usually 0x1f0 or 0x170
+ * and "ctl" is assumed to be "base"+0x206
+ * "idex=base,ctl" : specify both base and ctl
+ * "idex=base,ctl,irq" : specify base, ctl, and irq number
+ * "idex=autotune" : driver will attempt to tune interface speed
+ * to the fastest PIO mode supported,
+ * for all drives on this interface.
+ * Not fully supported by all chipset types,
+ * and quite likely to cause trouble with
+ * older/odd IDE drives.
+ * "idex=noautotune" : driver will NOT attempt to tune interface speed
+ * This is the default for most chipsets,
+ * except the cmd640.
+ * "idex=serialize" : do not overlap operations on idex and ide(x^1)
+ *
+ * The following are valid ONLY on ide0,
+ * and the defaults for the base,ctl ports must not be altered.
+ *
+ * "ide0=dtc2278" : probe/support DTC2278 interface
+ * "ide0=ht6560b" : probe/support HT6560B interface
+ * "ide0=cmd640_vlb" : *REQUIRED* for VLB cards with the CMD640 chip
+ * (not for PCI -- automatically detected)
+ * "ide0=qd6580" : probe/support qd6580 interface
+ * "ide0=ali14xx" : probe/support ali14xx chipsets (ALI M1439, M1443, M1445)
+ * "ide0=umc8672" : probe/support umc8672 chipsets
+ */
+void ide_setup (char *s)
+{
+ int i, vals[3];
+ ide_hwif_t *hwif;
+ ide_drive_t *drive;
+ unsigned int hw, unit;
+#ifdef MACH
+ const char max_drive = '0' + ((MAX_HWIFS * MAX_DRIVES) - 1);
+#else
+ const char max_drive = 'a' + ((MAX_HWIFS * MAX_DRIVES) - 1);
+#endif
+ const char max_hwif = '0' + (MAX_HWIFS - 1);
+
+ printk("ide_setup: %s", s);
+ init_ide_data ();
+
+ /*
+ * Look for drive options: "hdx="
+ */
+#ifdef MACH
+ if (s[0] == 'h' && s[1] == 'd' && s[2] >= '0' && s[2] <= max_drive) {
+#else
+ if (s[0] == 'h' && s[1] == 'd' && s[2] >= 'a' && s[2] <= max_drive) {
+#endif
+ const char *hd_words[] = {"none", "noprobe", "nowerr", "cdrom",
+ "serialize", "autotune", "noautotune",
+ "slow", "ide-scsi", "nodma", NULL};
+#ifdef MACH
+ unit = s[2] - '0';
+#else
+ unit = s[2] - 'a';
+#endif
+ hw = unit / MAX_DRIVES;
+ unit = unit % MAX_DRIVES;
+ hwif = &ide_hwifs[hw];
+ drive = &hwif->drives[unit];
+ switch (match_parm(&s[3], hd_words, vals, 3)) {
+ case -1: /* "none" */
+ drive->nobios = 1; /* drop into "noprobe" */
+ case -2: /* "noprobe" */
+ drive->noprobe = 1;
+ goto done;
+ case -3: /* "nowerr" */
+ drive->bad_wstat = BAD_R_STAT;
+ hwif->noprobe = 0;
+ goto done;
+ case -4: /* "cdrom" */
+ drive->present = 1;
+ drive->media = ide_cdrom;
+ hwif->noprobe = 0;
+ goto done;
+ case -5: /* "serialize" */
+ printk(" -- USE \"ide%d=serialize\" INSTEAD", hw);
+ goto do_serialize;
+ case -6: /* "autotune" */
+ drive->autotune = 1;
+ goto done;
+ case -7: /* "noautotune" */
+ drive->autotune = 2;
+ goto done;
+ case -8: /* "slow" */
+ drive->slow = 1;
+ goto done;
+ case -9: /* "ide-scsi" */
+ drive->ide_scsi = 1;
+ goto done;
+ case -10: /* "nodma" */
+ drive->nodma = 1;
+ goto done;
+ case 3: /* cyl,head,sect */
+ drive->media = ide_disk;
+ drive->cyl = drive->bios_cyl = vals[0];
+ drive->head = drive->bios_head = vals[1];
+ drive->sect = drive->bios_sect = vals[2];
+ drive->present = 1;
+ drive->forced_geom = 1;
+ hwif->noprobe = 0;
+ goto done;
+ default:
+ goto bad_option;
+ }
+ }
+
+ if (s[0] != 'i' || s[1] != 'd' || s[2] != 'e')
+ goto bad_option;
+ /*
+ * Look for bus speed option: "idebus="
+ */
+ if (s[3] == 'b' && s[4] == 'u' && s[5] == 's') {
+ if (match_parm(&s[6], NULL, vals, 1) != 1)
+ goto bad_option;
+ if (vals[0] >= 20 && vals[0] <= 66)
+ idebus_parameter = vals[0];
+ else
+ printk(" -- BAD BUS SPEED! Expected value from 20 to 66");
+ goto done;
+ }
+ /*
+ * Look for interface options: "idex="
+ */
+ if (s[3] >= '0' && s[3] <= max_hwif) {
+ /*
+ * Be VERY CAREFUL changing this: note hardcoded indexes below
+ */
+ const char *ide_words[] = {"noprobe", "serialize", "autotune", "noautotune",
+ "qd6580", "ht6560b", "cmd640_vlb", "dtc2278", "umc8672", "ali14xx", "dc4030", NULL};
+ hw = s[3] - '0';
+ hwif = &ide_hwifs[hw];
+ i = match_parm(&s[4], ide_words, vals, 3);
+
+ /*
+ * Cryptic check to ensure chipset not already set for hwif:
+ */
+ if (i > 0 || i <= -5) {
+ if (hwif->chipset != ide_unknown)
+ goto bad_option;
+ if (i <= -5) {
+ if (ide_hwifs[1].chipset != ide_unknown)
+ goto bad_option;
+ /*
+ * Interface keywords work only for ide0:
+ */
+ if (hw != 0)
+ goto bad_hwif;
+ }
+ }
+
+ switch (i) {
+#ifdef CONFIG_BLK_DEV_PROMISE
+ case -11: /* "dc4030" */
+ {
+ setup_dc4030(hwif);
+ goto done;
+ }
+#endif /* CONFIG_BLK_DEV_PROMISE */
+#ifdef CONFIG_BLK_DEV_ALI14XX
+ case -10: /* "ali14xx" */
+ {
+ extern void init_ali14xx (void);
+ init_ali14xx();
+ goto done;
+ }
+#endif /* CONFIG_BLK_DEV_ALI14XX */
+#ifdef CONFIG_BLK_DEV_UMC8672
+ case -9: /* "umc8672" */
+ {
+ extern void init_umc8672 (void);
+ init_umc8672();
+ goto done;
+ }
+#endif /* CONFIG_BLK_DEV_UMC8672 */
+#ifdef CONFIG_BLK_DEV_DTC2278
+ case -8: /* "dtc2278" */
+ {
+ extern void init_dtc2278 (void);
+ init_dtc2278();
+ goto done;
+ }
+#endif /* CONFIG_BLK_DEV_DTC2278 */
+#ifdef CONFIG_BLK_DEV_CMD640
+ case -7: /* "cmd640_vlb" */
+ {
+ extern int cmd640_vlb; /* flag for cmd640.c */
+ cmd640_vlb = 1;
+ goto done;
+ }
+#endif /* CONFIG_BLK_DEV_CMD640 */
+#ifdef CONFIG_BLK_DEV_HT6560B
+ case -6: /* "ht6560b" */
+ {
+ extern void init_ht6560b (void);
+ init_ht6560b();
+ goto done;
+ }
+#endif /* CONFIG_BLK_DEV_HT6560B */
+#if CONFIG_BLK_DEV_QD6580
+ case -5: /* "qd6580" (has secondary i/f) */
+ {
+ extern void init_qd6580 (void);
+ init_qd6580();
+ goto done;
+ }
+#endif /* CONFIG_BLK_DEV_QD6580 */
+ case -4: /* "noautotune" */
+ hwif->drives[0].autotune = 2;
+ hwif->drives[1].autotune = 2;
+ goto done;
+ case -3: /* "autotune" */
+ hwif->drives[0].autotune = 1;
+ hwif->drives[1].autotune = 1;
+ goto done;
+ case -2: /* "serialize" */
+ do_serialize:
+ ide_hwifs[hw].serialized = 1; /* serialize */
+ ide_hwifs[hw^1].serialized = 1; /* with mate */
+ goto done;
+
+ case -1: /* "noprobe" */
+ hwif->noprobe = 1;
+ goto done;
+
+ case 1: /* base */
+ vals[1] = vals[0] + 0x206; /* default ctl */
+ case 2: /* base,ctl */
+ vals[2] = 0; /* default irq = probe for it */
+ case 3: /* base,ctl,irq */
+ hwif->io_base = vals[0];
+ hwif->ctl_port = vals[1];
+ hwif->irq = vals[2];
+ hwif->noprobe = 0;
+ hwif->chipset = ide_generic;
+ goto done;
+
+ case 0: goto bad_option;
+ default:
+ printk(" -- SUPPORT NOT CONFIGURED IN THIS KERNEL\n");
+ return;
+ }
+ }
+bad_option:
+ printk(" -- BAD OPTION\n");
+ return;
+bad_hwif:
+ printk("-- NOT SUPPORTED ON ide%d", hw);
+done:
+ printk("\n");
+}
+
+/*
+ * This routine is called from the partition-table code in genhd.c
+ * to "convert" a drive to a logical geometry with fewer than 1024 cyls.
+ *
+ * The second parameter, "xparm", determines exactly how the translation
+ * will be handled:
+ * 0 = convert to CHS with fewer than 1024 cyls
+ * using the same method as Ontrack DiskManager.
+ * 1 = same as "0", plus offset everything by 63 sectors.
+ * -1 = similar to "0", plus redirect sector 0 to sector 1.
+ * >1 = convert to a CHS geometry with "xparm" heads.
+ *
+ * Returns 0 if the translation was not possible, if the device was not
+ * an IDE disk drive, or if a geometry was "forced" on the commandline.
+ * Returns 1 if the geometry translation was successful.
+ */
+
+int ide_xlate_1024 (kdev_t i_rdev, int xparm, const char *msg)
+{
+ ide_drive_t *drive;
+ static const byte head_vals[] = {4, 8, 16, 32, 64, 128, 255, 0};
+ const byte *heads = head_vals;
+ unsigned long tracks;
+
+ drive = get_info_ptr(i_rdev);
+ if (!drive)
+ return 0;
+
+ if (drive->forced_geom)
+ return 0;
+
+ if (xparm > 1 && xparm <= drive->bios_head && drive->bios_sect == 63)
+ return 0; /* we already have a translation */
+
+ printk("%s ", msg);
+
+ if (xparm < 0 && (drive->bios_cyl * drive->bios_head * drive->bios_sect) < (1024 * 16 * 63)) {
+ return 0; /* small disk: no translation needed */
+ }
+
+ if (drive->id) {
+ drive->cyl = drive->id->cyls;
+ drive->head = drive->id->heads;
+ drive->sect = drive->id->sectors;
+ }
+ drive->bios_cyl = drive->cyl;
+ drive->bios_head = drive->head;
+ drive->bios_sect = drive->sect;
+ drive->special.b.set_geometry = 1;
+
+ tracks = drive->bios_cyl * drive->bios_head * drive->bios_sect / 63;
+ drive->bios_sect = 63;
+ if (xparm > 1) {
+ drive->bios_head = xparm;
+ drive->bios_cyl = tracks / drive->bios_head;
+ } else {
+ while (drive->bios_cyl >= 1024) {
+ drive->bios_head = *heads;
+ drive->bios_cyl = tracks / drive->bios_head;
+ if (0 == *++heads)
+ break;
+ }
+#if FAKE_FDISK_FOR_EZDRIVE
+ if (xparm == -1) {
+ drive->remap_0_to_1 = 1;
+ msg = "0->1";
+ } else
+#endif /* FAKE_FDISK_FOR_EZDRIVE */
+ if (xparm == 1) {
+ drive->sect0 = 63;
+ drive->bios_cyl = (tracks - 1) / drive->bios_head;
+ msg = "+63";
+ }
+ printk("[remap %s] ", msg);
+ }
+ drive->part[0].nr_sects = current_capacity(drive);
+ printk("[%d/%d/%d]", drive->bios_cyl, drive->bios_head, drive->bios_sect);
+ return 1;
+}
+
+#if MAX_HWIFS > 1
+/*
+ * save_match() is used to simplify logic in init_irq() below.
+ *
+ * A loophole here is that we may not know about a particular
+ * hwif's irq until after that hwif is actually probed/initialized..
+ * This could be a problem for the case where an hwif is on a
+ * dual interface that requires serialization (eg. cmd640) and another
+ * hwif using one of the same irqs is initialized beforehand.
+ *
+ * This routine detects and reports such situations, but does not fix them.
+ */
+static void save_match (ide_hwif_t *hwif, ide_hwif_t *new, ide_hwif_t **match)
+{
+ ide_hwif_t *m = *match;
+
+ if (m && m->hwgroup && m->hwgroup != new->hwgroup) {
+ if (!new->hwgroup)
+ return;
+ printk("%s: potential irq problem with %s and %s\n", hwif->name, new->name, m->name);
+ }
+ if (!m || m->irq != hwif->irq) /* don't undo a prior perfect match */
+ *match = new;
+}
+#endif /* MAX_HWIFS > 1 */
+
+/*
+ * This routine sets up the irq for an ide interface, and creates a new
+ * hwgroup for the irq/hwif if none was previously assigned.
+ *
+ * Much of the code is for correctly detecting/handling irq sharing
+ * and irq serialization situations. This is somewhat complex because
+ * it handles static as well as dynamic (PCMCIA) IDE interfaces.
+ *
+ * The SA_INTERRUPT in sa_flags means ide_intr() is always entered with
+ * interrupts completely disabled. This can be bad for interrupt latency,
+ * but anything else has led to problems on some machines. We re-enable
+ * interrupts as much as we can safely do in most places.
+ */
+static int init_irq (ide_hwif_t *hwif)
+{
+ unsigned long flags;
+#if MAX_HWIFS > 1
+ unsigned int index;
+#endif /* MAX_HWIFS > 1 */
+ ide_hwgroup_t *hwgroup;
+ ide_hwif_t *match = NULL;
+
+ save_flags(flags);
+ cli();
+
+ hwif->hwgroup = NULL;
+#if MAX_HWIFS > 1
+ /*
+ * Group up with any other hwifs that share our irq(s).
+ */
+ for (index = 0; index < MAX_HWIFS; index++) {
+ ide_hwif_t *h = &ide_hwifs[index];
+ if (h->hwgroup) { /* scan only initialized hwif's */
+ if (hwif->irq == h->irq) {
+ hwif->sharing_irq = h->sharing_irq = 1;
+ save_match(hwif, h, &match);
+ }
+ if (hwif->serialized) {
+ ide_hwif_t *mate = &ide_hwifs[hwif->index^1];
+ if (index == mate->index || h->irq == mate->irq)
+ save_match(hwif, h, &match);
+ }
+ if (h->serialized) {
+ ide_hwif_t *mate = &ide_hwifs[h->index^1];
+ if (hwif->irq == mate->irq)
+ save_match(hwif, h, &match);
+ }
+ }
+ }
+#endif /* MAX_HWIFS > 1 */
+ /*
+ * If we are still without a hwgroup, then form a new one
+ */
+ if (match) {
+ hwgroup = match->hwgroup;
+ } else {
+ hwgroup = kmalloc(sizeof(ide_hwgroup_t), GFP_KERNEL);
+ hwgroup->hwif = hwgroup->next_hwif = hwif->next = hwif;
+ hwgroup->rq = NULL;
+ hwgroup->handler = NULL;
+ if (hwif->drives[0].present)
+ hwgroup->drive = &hwif->drives[0];
+ else
+ hwgroup->drive = &hwif->drives[1];
+ hwgroup->poll_timeout = 0;
+ hwgroup->active = 0;
+ init_timer(&hwgroup->timer);
+ hwgroup->timer.function = &timer_expiry;
+ hwgroup->timer.data = (unsigned long) hwgroup;
+ }
+
+ /*
+ * Allocate the irq, if not already obtained for another hwif
+ */
+ if (!match || match->irq != hwif->irq) {
+ if (request_irq(hwif->irq, ide_intr, SA_INTERRUPT, hwif->name, hwgroup)) {
+ if (!match)
+ kfree(hwgroup);
+ restore_flags(flags);
+ return 1;
+ }
+ }
+
+ /*
+ * Everything is okay, so link us into the hwgroup
+ */
+ hwif->hwgroup = hwgroup;
+ hwif->next = hwgroup->hwif->next;
+ hwgroup->hwif->next = hwif;
+
+ restore_flags(flags); /* safe now that hwif->hwgroup is set up */
+
+ printk("%s at 0x%03x-0x%03x,0x%03x on irq %d", hwif->name,
+ hwif->io_base, hwif->io_base+7, hwif->ctl_port, hwif->irq);
+ if (match)
+ printk(" (%sed with %s)", hwif->sharing_irq ? "shar" : "serializ", match->name);
+ printk("\n");
+ return 0;
+}
+
+static struct file_operations ide_fops = {
+ NULL, /* lseek - default */
+ block_read, /* read - general block-dev read */
+ block_write, /* write - general block-dev write */
+ NULL, /* readdir - bad */
+ NULL, /* select */
+ ide_ioctl, /* ioctl */
+ NULL, /* mmap */
+ ide_open, /* open */
+ ide_release, /* release */
+ block_fsync /* fsync */
+ ,NULL, /* fasync */
+ ide_check_media_change, /* check_media_change */
+ revalidate_disk /* revalidate */
+};
+
+#ifdef CONFIG_PCI
+#if defined(CONFIG_BLK_DEV_RZ1000) || defined(CONFIG_BLK_DEV_TRITON)
+
+typedef void (ide_pci_init_proc_t)(byte, byte);
+
+/*
+ * ide_probe_pci() scans PCI for a specific vendor/device function,
+ * and invokes the supplied init routine for each instance detected.
+ */
+static void ide_probe_pci (unsigned short vendor, unsigned short device, ide_pci_init_proc_t *init, int func_adj)
+{
+ unsigned long flags;
+ unsigned index;
+ byte fn, bus;
+
+ save_flags(flags);
+ cli();
+ for (index = 0; !pcibios_find_device (vendor, device, index, &bus, &fn); ++index) {
+ init (bus, fn + func_adj);
+ }
+ restore_flags(flags);
+}
+
+#endif /* defined(CONFIG_BLK_DEV_RZ1000) || defined(CONFIG_BLK_DEV_TRITON) */
+
+static void ide_probe_promise_20246(void)
+{
+ byte fn, bus;
+ unsigned short io[6], count = 0;
+ unsigned int reg, tmp, i;
+ ide_hwif_t *hwif;
+
+ memset(io, 0, 6 * sizeof(unsigned short));
+ if (pcibios_find_device(PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20246, 0, &bus, &fn))
+ return;
+ printk("ide: Promise Technology IDE Ultra-DMA 33 on PCI bus %d function %d\n", bus, fn);
+ for (reg = PCI_BASE_ADDRESS_0; reg <= PCI_BASE_ADDRESS_5; reg += 4) {
+ pcibios_read_config_dword(bus, fn, reg, &tmp);
+ if (tmp & PCI_BASE_ADDRESS_SPACE_IO)
+ io[count++] = tmp & PCI_BASE_ADDRESS_IO_MASK;
+ }
+ for (i = 2; i < 4; i++) {
+ hwif = ide_hwifs + i;
+ if (hwif->chipset == ide_generic) {
+ printk("ide%d: overridden with command line parameter\n", i);
+ return;
+ }
+ tmp = (i - 2) * 2;
+ if (!io[tmp] || !io[tmp + 1]) {
+ printk("ide%d: invalid port address %x, %x -- aborting\n", i, io[tmp], io[tmp + 1]);
+ return;
+ }
+ hwif->io_base = io[tmp];
+ hwif->ctl_port = io[tmp + 1] + 2;
+ hwif->noprobe = 0;
+ }
+}
+
+#endif /* CONFIG_PCI */
+
+/*
+ * ide_init_pci() finds/initializes "known" PCI IDE interfaces
+ *
+ * This routine should ideally be using pcibios_find_class() to find
+ * all IDE interfaces, but that function causes some systems to "go weird".
+ */
+static void probe_for_hwifs (void)
+{
+#ifdef CONFIG_PCI
+ /*
+ * Find/initialize PCI IDE interfaces
+ */
+ if (pcibios_present()) {
+#ifdef CONFIG_BLK_DEV_RZ1000
+ ide_pci_init_proc_t init_rz1000;
+ ide_probe_pci (PCI_VENDOR_ID_PCTECH, PCI_DEVICE_ID_PCTECH_RZ1000, &init_rz1000, 0);
+ ide_probe_pci (PCI_VENDOR_ID_PCTECH, PCI_DEVICE_ID_PCTECH_RZ1001, &init_rz1000, 0);
+#endif /* CONFIG_BLK_DEV_RZ1000 */
+#ifdef CONFIG_BLK_DEV_TRITON
+ /*
+ * Apparently the BIOS32 services on Intel motherboards are
+ * buggy and won't find the PCI_DEVICE_ID_INTEL_82371_1 for us.
+ * So instead, we search for PCI_DEVICE_ID_INTEL_82371_0,
+ * and then add 1.
+ */
+ ide_probe_pci (PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371_0, &ide_init_triton, 1);
+ ide_probe_pci (PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_1, &ide_init_triton, 0);
+ ide_probe_pci (PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB, &ide_init_triton, 0);
+ ide_probe_pci (PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5513, &ide_init_triton, 0);
+ ide_probe_pci (PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1, &ide_init_triton, 0);
+ ide_probe_pci (PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M5229, &ide_init_triton, 0);
+#endif /* CONFIG_BLK_DEV_TRITON */
+ ide_probe_promise_20246();
+ }
+#endif /* CONFIG_PCI */
+#ifdef CONFIG_BLK_DEV_CMD640
+ {
+ extern void ide_probe_for_cmd640x (void);
+ ide_probe_for_cmd640x();
+ }
+#endif
+#ifdef CONFIG_BLK_DEV_PROMISE
+ init_dc4030();
+#endif
+ extern char *kernel_cmdline;
+ if (strncmp(kernel_cmdline, "noahci", 6) &&
+ !strstr(kernel_cmdline, " noahci"))
+ ahci_probe_pci();
+}
+
+static int hwif_init (int h)
+{
+ ide_hwif_t *hwif = &ide_hwifs[h];
+ void (*rfn)(void);
+
+ if (!hwif->present)
+ return 0;
+ if (!hwif->irq) {
+ if (!(hwif->irq = default_irqs[h])) {
+ printk("%s: DISABLED, NO IRQ\n", hwif->name);
+ return (hwif->present = 0);
+ }
+ }
+#ifdef CONFIG_BLK_DEV_HD
+ if (hwif->irq == HD_IRQ && hwif->io_base != HD_DATA) {
+ printk("%s: CANNOT SHARE IRQ WITH OLD HARDDISK DRIVER (hd.c)\n", hwif->name);
+ return (hwif->present = 0);
+ }
+#endif /* CONFIG_BLK_DEV_HD */
+
+ hwif->present = 0; /* we set it back to 1 if all is ok below */
+ switch (hwif->major) {
+ case IDE0_MAJOR: rfn = &do_ide0_request; break;
+#if MAX_HWIFS > 1
+ case IDE1_MAJOR: rfn = &do_ide1_request; break;
+#endif
+#if MAX_HWIFS > 2
+ case IDE2_MAJOR: rfn = &do_ide2_request; break;
+#endif
+#if MAX_HWIFS > 3
+ case IDE3_MAJOR: rfn = &do_ide3_request; break;
+#endif
+ default:
+ printk("%s: request_fn NOT DEFINED\n", hwif->name);
+ return (hwif->present = 0);
+ }
+ if (register_blkdev (hwif->major, hwif->name, &ide_fops)) {
+ printk("%s: UNABLE TO GET MAJOR NUMBER %d\n", hwif->name, hwif->major);
+ } else if (init_irq (hwif)) {
+ printk("%s: UNABLE TO GET IRQ %d\n", hwif->name, hwif->irq);
+ (void) unregister_blkdev (hwif->major, hwif->name);
+ } else {
+ init_gendisk(hwif);
+ blk_dev[hwif->major].request_fn = rfn;
+ read_ahead[hwif->major] = 8; /* (4kB) */
+ hwif->present = 1; /* success */
+ }
+ return hwif->present;
+}
+
+/*
+ * This is gets invoked once during initialization, to set *everything* up
+ */
+int ide_init (void)
+{
+ int index;
+
+ init_ide_data ();
+ /*
+ * Probe for special "known" interface chipsets
+ */
+ probe_for_hwifs ();
+
+ /*
+ * Probe for drives in the usual way.. CMOS/BIOS, then poke at ports
+ */
+ for (index = 0; index < MAX_HWIFS; ++index)
+ probe_hwif (&ide_hwifs[index]);
+ for (index = 0; index < MAX_HWIFS; ++index)
+ hwif_init (index);
+
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ idetape_register_chrdev(); /* Register character device interface to the ide tape */
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+
+ return 0;
+}
+
+#ifdef CONFIG_BLK_DEV_IDE_PCMCIA
+int ide_register(int io_base, int ctl_port, int irq)
+{
+ int index, i, rc = -1;
+ ide_hwif_t *hwif;
+ ide_drive_t *drive;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ for (index = 0; index < MAX_HWIFS; ++index) {
+ hwif = &ide_hwifs[index];
+ if (hwif->present) {
+ if (hwif->io_base == io_base || hwif->ctl_port == ctl_port)
+ break; /* this ide port already exists */
+ } else {
+ hwif->io_base = io_base;
+ hwif->ctl_port = ctl_port;
+ hwif->irq = irq;
+ hwif->noprobe = 0;
+ probe_hwif(hwif);
+ if (!hwif_init(index))
+ break;
+ for (i = 0; i < hwif->gd->nr_real; i++) {
+ drive = &hwif->drives[i];
+ revalidate_disk(MKDEV(hwif->major, i<<PARTN_BITS));
+#ifdef CONFIG_BLK_DEV_IDECD
+ if (drive->present && drive->media == ide_cdrom)
+ ide_cdrom_setup(drive);
+#endif /* CONFIG_BLK_DEV_IDECD */
+ }
+ rc = index;
+ break;
+ }
+ }
+ restore_flags(flags);
+ return rc;
+}
+
+void ide_unregister (unsigned int index)
+{
+ struct gendisk *gd, **gdp;
+ ide_hwif_t *hwif, *g;
+ ide_hwgroup_t *hwgroup;
+ int irq_count = 0;
+ unsigned long flags;
+
+ if (index >= MAX_HWIFS)
+ return;
+ save_flags(flags);
+ cli();
+ hwif = &ide_hwifs[index];
+ if (!hwif->present || hwif->drives[0].busy || hwif->drives[1].busy) {
+ restore_flags(flags);
+ return;
+ }
+ hwif->present = 0;
+ hwgroup = hwif->hwgroup;
+
+ /*
+ * free the irq if we were the only hwif using it
+ */
+ g = hwgroup->hwif;
+ do {
+ if (g->irq == hwif->irq)
+ ++irq_count;
+ g = g->next;
+ } while (g != hwgroup->hwif);
+ if (irq_count == 1)
+ free_irq(hwif->irq, hwgroup);
+
+ /*
+ * Note that we only release the standard ports,
+ * and do not even try to handle any extra ports
+ * allocated for weird IDE interface chipsets.
+ */
+ release_region(hwif->io_base, 8);
+ release_region(hwif->ctl_port, 1);
+
+ /*
+ * Remove us from the hwgroup, and free
+ * the hwgroup if we were the only member
+ */
+ while (hwgroup->hwif->next != hwif)
+ hwgroup->hwif = hwgroup->hwif->next;
+ hwgroup->hwif->next = hwif->next;
+ if (hwgroup->hwif == hwif)
+ hwgroup->hwif = hwif->next;
+ if (hwgroup->next_hwif == hwif)
+ hwgroup->next_hwif = hwif->next;
+ if (hwgroup->hwif == hwif)
+ kfree(hwgroup);
+
+ /*
+ * Remove us from the kernel's knowledge
+ */
+ unregister_blkdev(hwif->major, hwif->name);
+ kfree(blksize_size[hwif->major]);
+ blk_dev[hwif->major].request_fn = NULL;
+ blksize_size[hwif->major] = NULL;
+ for (gdp = &gendisk_head; *gdp; gdp = &((*gdp)->next))
+ if (*gdp == hwif->gd)
+ break;
+ if (*gdp == NULL)
+ printk("gd not in disk chain!\n");
+ else {
+ gd = *gdp; *gdp = gd->next;
+ kfree(gd->sizes);
+ kfree(gd->part);
+ kfree(gd);
+ }
+ init_hwif_data (index); /* restore hwif data to pristine status */
+ restore_flags(flags);
+}
+#endif /* CONFIG_BLK_DEV_IDE_PCMCIA */
diff --git a/linux/src/drivers/block/ide.h b/linux/src/drivers/block/ide.h
new file mode 100644
index 0000000..28e371b
--- /dev/null
+++ b/linux/src/drivers/block/ide.h
@@ -0,0 +1,750 @@
+/*
+ * linux/drivers/block/ide.h
+ *
+ * Copyright (C) 1994, 1995 Linus Torvalds & authors
+ */
+
+#include <linux/config.h>
+
+/*
+ * This is the multiple IDE interface driver, as evolved from hd.c.
+ * It supports up to four IDE interfaces, on one or more IRQs (usually 14 & 15).
+ * There can be up to two drives per interface, as per the ATA-2 spec.
+ *
+ * Primary i/f: ide0: major=3; (hda) minor=0; (hdb) minor=64
+ * Secondary i/f: ide1: major=22; (hdc or hd1a) minor=0; (hdd or hd1b) minor=64
+ * Tertiary i/f: ide2: major=33; (hde) minor=0; (hdf) minor=64
+ * Quaternary i/f: ide3: major=34; (hdg) minor=0; (hdh) minor=64
+ */
+
+/******************************************************************************
+ * IDE driver configuration options (play with these as desired):
+ *
+ * REALLY_SLOW_IO can be defined in ide.c and ide-cd.c, if necessary
+ */
+#undef REALLY_FAST_IO /* define if ide ports are perfect */
+#define INITIAL_MULT_COUNT 16 /* off=0; on=2,4,8,16,32, etc.. */
+
+#ifndef SUPPORT_SLOW_DATA_PORTS /* 1 to support slow data ports */
+#define SUPPORT_SLOW_DATA_PORTS 1 /* 0 to reduce kernel size */
+#endif
+#ifndef SUPPORT_VLB_SYNC /* 1 to support weird 32-bit chips */
+#define SUPPORT_VLB_SYNC 1 /* 0 to reduce kernel size */
+#endif
+#ifndef DISK_RECOVERY_TIME /* off=0; on=access_delay_time */
+#define DISK_RECOVERY_TIME 0 /* for hardware that needs it */
+#endif
+#ifndef OK_TO_RESET_CONTROLLER /* 1 needed for good error recovery */
+#define OK_TO_RESET_CONTROLLER 1 /* 0 for use with AH2372A/B interface */
+#endif
+#ifndef FAKE_FDISK_FOR_EZDRIVE /* 1 to help linux fdisk with EZDRIVE */
+#define FAKE_FDISK_FOR_EZDRIVE 1 /* 0 to reduce kernel size */
+#endif
+#ifndef FANCY_STATUS_DUMPS /* 1 for human-readable drive errors */
+#define FANCY_STATUS_DUMPS 1 /* 0 to reduce kernel size */
+#endif
+
+#ifdef CONFIG_BLK_DEV_CMD640
+#if 0 /* change to 1 when debugging cmd640 problems */
+void cmd640_dump_regs (void);
+#define CMD640_DUMP_REGS cmd640_dump_regs() /* for debugging cmd640 chipset */
+#endif
+#endif /* CONFIG_BLK_DEV_CMD640 */
+
+#if defined(CONFIG_BLK_DEV_IDECD) || defined(CONFIG_BLK_DEV_IDETAPE) || \
+ defined(CONFIG_BLK_DEV_IDEFLOPPY) || defined(CONFIG_BLK_DEV_IDESCSI)
+#define CONFIG_BLK_DEV_IDEATAPI 1
+#endif
+
+/*
+ * IDE_DRIVE_CMD is used to implement many features of the hdparm utility
+ */
+#define IDE_DRIVE_CMD 99 /* (magic) undef to reduce kernel size*/
+
+/*
+ * "No user-serviceable parts" beyond this point :)
+ *****************************************************************************/
+
+#if defined(CONFIG_BLK_DEV_IDESCSI) && !defined(CONFIG_SCSI)
+#error "SCSI must also be selected"
+#endif
+
+typedef unsigned char byte; /* used everywhere */
+
+/*
+ * Probably not wise to fiddle with these
+ */
+#define ERROR_MAX 8 /* Max read/write errors per sector */
+#define ERROR_RESET 3 /* Reset controller every 4th retry */
+#define ERROR_RECAL 1 /* Recalibrate every 2nd retry */
+
+/*
+ * Ensure that various configuration flags have compatible settings
+ */
+#ifdef REALLY_SLOW_IO
+#undef REALLY_FAST_IO
+#endif
+
+/*
+ * Definitions for accessing IDE controller registers
+ */
+
+#define HWIF(drive) ((ide_hwif_t *)((drive)->hwif))
+#define HWGROUP(drive) ((ide_hwgroup_t *)(HWIF(drive)->hwgroup))
+
+#define IDE_DATA_OFFSET (0)
+#define IDE_ERROR_OFFSET (1)
+#define IDE_NSECTOR_OFFSET (2)
+#define IDE_SECTOR_OFFSET (3)
+#define IDE_LCYL_OFFSET (4)
+#define IDE_HCYL_OFFSET (5)
+#define IDE_SELECT_OFFSET (6)
+#define IDE_STATUS_OFFSET (7)
+#define IDE_FEATURE_OFFSET IDE_ERROR_OFFSET
+#define IDE_COMMAND_OFFSET IDE_STATUS_OFFSET
+
+#define IDE_DATA_REG (HWIF(drive)->io_base+IDE_DATA_OFFSET)
+#define IDE_ERROR_REG (HWIF(drive)->io_base+IDE_ERROR_OFFSET)
+#define IDE_NSECTOR_REG (HWIF(drive)->io_base+IDE_NSECTOR_OFFSET)
+#define IDE_SECTOR_REG (HWIF(drive)->io_base+IDE_SECTOR_OFFSET)
+#define IDE_LCYL_REG (HWIF(drive)->io_base+IDE_LCYL_OFFSET)
+#define IDE_HCYL_REG (HWIF(drive)->io_base+IDE_HCYL_OFFSET)
+#define IDE_SELECT_REG (HWIF(drive)->io_base+IDE_SELECT_OFFSET)
+#define IDE_STATUS_REG (HWIF(drive)->io_base+IDE_STATUS_OFFSET)
+#define IDE_CONTROL_REG (HWIF(drive)->ctl_port)
+#define IDE_FEATURE_REG IDE_ERROR_REG
+#define IDE_COMMAND_REG IDE_STATUS_REG
+#define IDE_ALTSTATUS_REG IDE_CONTROL_REG
+#define IDE_IREASON_REG IDE_NSECTOR_REG
+#define IDE_BCOUNTL_REG IDE_LCYL_REG
+#define IDE_BCOUNTH_REG IDE_HCYL_REG
+
+#ifdef REALLY_FAST_IO
+#define OUT_BYTE(b,p) outb((b),(p))
+#define IN_BYTE(p) (byte)inb(p)
+#else
+#define OUT_BYTE(b,p) outb_p((b),(p))
+#define IN_BYTE(p) (byte)inb_p(p)
+#endif /* REALLY_FAST_IO */
+
+#define GET_ERR() IN_BYTE(IDE_ERROR_REG)
+#define GET_STAT() IN_BYTE(IDE_STATUS_REG)
+#define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good))
+#define BAD_R_STAT (BUSY_STAT | ERR_STAT)
+#define BAD_W_STAT (BAD_R_STAT | WRERR_STAT)
+#define BAD_STAT (BAD_R_STAT | DRQ_STAT)
+#define DRIVE_READY (READY_STAT | SEEK_STAT)
+#define DATA_READY (DRQ_STAT)
+
+/*
+ * Some more useful definitions
+ */
+#define IDE_MAJOR_NAME "ide" /* the same for all i/f; see also genhd.c */
+#define MAJOR_NAME IDE_MAJOR_NAME
+#define PARTN_BITS 6 /* number of minor dev bits for partitions */
+#define PARTN_MASK ((1<<PARTN_BITS)-1) /* a useful bit mask */
+#define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */
+#ifndef MAX_HWIFS
+#define MAX_HWIFS 4 /* an arbitrary, but realistic limit */
+#endif
+#define SECTOR_WORDS (512 / 4) /* number of 32bit words per sector */
+
+/*
+ * Timeouts for various operations:
+ */
+#define WAIT_DRQ (1*HZ) /* 1s - spec allows up to 20ms, but CF
+ * cards and SSD drives need more */
+#ifdef CONFIG_APM
+#define WAIT_READY (5*HZ) /* 5sec - some laptops are very slow */
+#else
+#define WAIT_READY (3*HZ/100) /* 30msec - should be instantaneous */
+#endif /* CONFIG_APM */
+#define WAIT_PIDENTIFY (1*HZ) /* 1sec - should be less than 3ms (?) */
+#define WAIT_WORSTCASE (30*HZ) /* 30sec - worst case when spinning up */
+#define WAIT_CMD (10*HZ) /* 10sec - maximum wait for an IRQ to happen */
+
+#if defined(CONFIG_BLK_DEV_HT6560B) || defined(CONFIG_BLK_DEV_PROMISE)
+#define SELECT_DRIVE(hwif,drive) \
+{ \
+ if (hwif->selectproc) \
+ hwif->selectproc(drive); \
+ else \
+ OUT_BYTE((drive)->select.all, hwif->io_base+IDE_SELECT_OFFSET); \
+}
+#else
+#define SELECT_DRIVE(hwif,drive) OUT_BYTE((drive)->select.all, hwif->io_base+IDE_SELECT_OFFSET);
+#endif /* CONFIG_BLK_DEV_HT6560B || CONFIG_BLK_DEV_PROMISE */
+
+#ifdef CONFIG_BLK_DEV_IDETAPE
+#include "ide-tape.h"
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+
+#ifdef CONFIG_BLK_DEV_IDECD
+
+struct atapi_request_sense {
+ unsigned char error_code : 7;
+ unsigned char valid : 1;
+ byte reserved1;
+ unsigned char sense_key : 4;
+ unsigned char reserved2 : 1;
+ unsigned char ili : 1;
+ unsigned char reserved3 : 2;
+ byte info[4];
+ byte sense_len;
+ byte command_info[4];
+ byte asc;
+ byte ascq;
+ byte fru;
+ byte sense_key_specific[3];
+};
+
+struct packet_command {
+ unsigned char *buffer;
+ int buflen;
+ int stat;
+ struct atapi_request_sense *sense_data;
+ unsigned char c[12];
+};
+
+
+/* Structure of a MSF cdrom address. */
+struct atapi_msf {
+ byte reserved;
+ byte minute;
+ byte second;
+ byte frame;
+};
+
+
+/* Space to hold the disk TOC. */
+
+#define MAX_TRACKS 99
+struct atapi_toc_header {
+ unsigned short toc_length;
+ byte first_track;
+ byte last_track;
+};
+
+struct atapi_toc_entry {
+ byte reserved1;
+ unsigned control : 4;
+ unsigned adr : 4;
+ byte track;
+ byte reserved2;
+ union {
+ unsigned lba;
+ struct atapi_msf msf;
+ } addr;
+};
+
+struct atapi_toc {
+ int last_session_lba;
+ int xa_flag;
+ unsigned capacity;
+ struct atapi_toc_header hdr;
+ struct atapi_toc_entry ent[MAX_TRACKS+1];
+ /* One extra for the leadout. */
+};
+
+
+/* This structure is annoyingly close to, but not identical with,
+ the cdrom_subchnl structure from cdrom.h. */
+struct atapi_cdrom_subchnl
+{
+ u_char acdsc_reserved;
+ u_char acdsc_audiostatus;
+ u_short acdsc_length;
+ u_char acdsc_format;
+
+ u_char acdsc_adr: 4;
+ u_char acdsc_ctrl: 4;
+ u_char acdsc_trk;
+ u_char acdsc_ind;
+ union {
+ struct atapi_msf msf;
+ int lba;
+ } acdsc_absaddr;
+ union {
+ struct atapi_msf msf;
+ int lba;
+ } acdsc_reladdr;
+};
+
+
+/* Extra per-device info for cdrom drives. */
+struct cdrom_info {
+
+ /* Buffer for table of contents. NULL if we haven't allocated
+ a TOC buffer for this device yet. */
+
+ struct atapi_toc *toc;
+
+ /* Sector buffer. If a read request wants only the first part
+ of a cdrom block, we cache the rest of the block here,
+ in the expectation that that data is going to be wanted soon.
+ SECTOR_BUFFERED is the number of the first buffered sector,
+ and NSECTORS_BUFFERED is the number of sectors in the buffer.
+ Before the buffer is allocated, we should have
+ SECTOR_BUFFER == NULL and NSECTORS_BUFFERED == 0. */
+
+ unsigned long sector_buffered;
+ unsigned long nsectors_buffered;
+ char *sector_buffer;
+
+ /* The result of the last successful request sense command
+ on this device. */
+ struct atapi_request_sense sense_data;
+
+ int max_sectors;
+};
+
+#endif /* CONFIG_BLK_DEV_IDECD */
+
+/*
+ * Now for the data we need to maintain per-drive: ide_drive_t
+ */
+
+typedef enum {ide_disk, ide_cdrom, ide_tape, ide_floppy, ide_scsi} ide_media_t;
+
+typedef union {
+ unsigned all : 8; /* all of the bits together */
+ struct {
+ unsigned set_geometry : 1; /* respecify drive geometry */
+ unsigned recalibrate : 1; /* seek to cyl 0 */
+ unsigned set_multmode : 1; /* set multmode count */
+ unsigned set_tune : 1; /* tune interface for drive */
+ unsigned mc : 1; /* acknowledge media change */
+ unsigned reserved : 3; /* unused */
+ } b;
+ } special_t;
+
+typedef union {
+ unsigned all : 8; /* all of the bits together */
+ struct {
+ unsigned head : 4; /* always zeros here */
+ unsigned unit : 1; /* drive select number, 0 or 1 */
+ unsigned bit5 : 1; /* always 1 */
+ unsigned lba : 1; /* using LBA instead of CHS */
+ unsigned bit7 : 1; /* always 1 */
+ } b;
+ } select_t;
+
+typedef struct ide_drive_s {
+ special_t special; /* special action flags */
+ unsigned present : 1; /* drive is physically present */
+ unsigned noprobe : 1; /* from: hdx=noprobe */
+ unsigned keep_settings : 1; /* restore settings after drive reset */
+ unsigned busy : 1; /* currently doing revalidate_disk() */
+ unsigned removable : 1; /* 1 if need to do check_media_change */
+ unsigned using_dma : 1; /* disk is using dma for read/write */
+ unsigned forced_geom : 1; /* 1 if hdx=c,h,s was given at boot */
+ unsigned unmask : 1; /* flag: okay to unmask other irqs */
+ unsigned no_unmask : 1; /* disallow setting unmask bit */
+ unsigned no_io_32bit : 1; /* disallow enabling 32bit I/O */
+ unsigned nobios : 1; /* flag: do not probe bios for drive */
+ unsigned slow : 1; /* flag: slow data port */
+ unsigned autotune : 2; /* 1=autotune, 2=noautotune, 0=default */
+ unsigned nodma : 1; /* disk should not use dma for read/write */
+#if FAKE_FDISK_FOR_EZDRIVE
+ unsigned remap_0_to_1 : 1; /* flag: partitioned with ezdrive */
+#endif /* FAKE_FDISK_FOR_EZDRIVE */
+ unsigned no_geom : 1; /* flag: do not set geometry */
+ ide_media_t media; /* disk, cdrom, tape, floppy */
+ select_t select; /* basic drive/head select reg value */
+ byte ctl; /* "normal" value for IDE_CONTROL_REG */
+ byte ready_stat; /* min status value for drive ready */
+ byte mult_count; /* current multiple sector setting */
+ byte mult_req; /* requested multiple sector setting */
+ byte tune_req; /* requested drive tuning setting */
+ byte io_32bit; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */
+ byte bad_wstat; /* used for ignoring WRERR_STAT */
+ byte sect0; /* offset of first sector for DM6:DDO */
+ byte usage; /* current "open()" count for drive */
+ byte head; /* "real" number of heads */
+ byte sect; /* "real" sectors per track */
+ byte bios_head; /* BIOS/fdisk/LILO number of heads */
+ byte bios_sect; /* BIOS/fdisk/LILO sectors per track */
+ unsigned short bios_cyl; /* BIOS/fdisk/LILO number of cyls */
+ unsigned short cyl; /* "real" number of cyls */
+ void *hwif; /* actually (ide_hwif_t *) */
+ struct wait_queue *wqueue; /* used to wait for drive in open() */
+ struct hd_driveid *id; /* drive model identification info */
+ struct hd_struct *part; /* drive partition table */
+ char name[4]; /* drive name, such as "hda" */
+#ifdef CONFIG_BLK_DEV_IDECD
+ struct cdrom_info cdrom_info; /* for ide-cd.c */
+#endif /* CONFIG_BLK_DEV_IDECD */
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ idetape_tape_t tape; /* for ide-tape.c */
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+#ifdef CONFIG_BLK_DEV_IDEFLOPPY
+ void *floppy; /* for ide-floppy.c */
+#endif /* CONFIG_BLK_DEV_IDEFLOPPY */
+#ifdef CONFIG_BLK_DEV_IDESCSI
+ void *scsi; /* for ide-scsi.c */
+#endif /* CONFIG_BLK_DEV_IDESCSI */
+ byte ide_scsi; /* use ide-scsi driver */
+ } ide_drive_t;
+
+/*
+ * An ide_dmaproc_t() initiates/aborts DMA read/write operations on a drive.
+ *
+ * The caller is assumed to have selected the drive and programmed the drive's
+ * sector address using CHS or LBA. All that remains is to prepare for DMA
+ * and then issue the actual read/write DMA/PIO command to the drive.
+ *
+ * Returns 0 if all went well.
+ * Returns 1 if DMA read/write could not be started, in which case the caller
+ * should either try again later, or revert to PIO for the current request.
+ */
+typedef enum { ide_dma_read = 0, ide_dma_write = 1,
+ ide_dma_abort = 2, ide_dma_check = 3,
+ ide_dma_status_bad = 4, ide_dma_transferred = 5,
+ ide_dma_begin = 6 }
+ ide_dma_action_t;
+
+typedef int (ide_dmaproc_t)(ide_dma_action_t, ide_drive_t *);
+
+
+/*
+ * An ide_tuneproc_t() is used to set the speed of an IDE interface
+ * to a particular PIO mode. The "byte" parameter is used
+ * to select the PIO mode by number (0,1,2,3,4,5), and a value of 255
+ * indicates that the interface driver should "auto-tune" the PIO mode
+ * according to the drive capabilities in drive->id;
+ *
+ * Not all interface types support tuning, and not all of those
+ * support all possible PIO settings. They may silently ignore
+ * or round values as they see fit.
+ */
+typedef void (ide_tuneproc_t)(ide_drive_t *, byte);
+
+/*
+ * This is used to provide HT6560B & PROMISE interface support.
+ */
+typedef void (ide_selectproc_t) (ide_drive_t *);
+
+/*
+ * hwif_chipset_t is used to keep track of the specific hardware
+ * chipset used by each IDE interface, if known.
+ */
+typedef enum { ide_unknown, ide_generic, ide_triton,
+ ide_cmd640, ide_dtc2278, ide_ali14xx,
+ ide_qd6580, ide_umc8672, ide_ht6560b,
+ ide_promise, ide_hpt343, ide_udma,
+ ide_ultra66 }
+ hwif_chipset_t;
+
+typedef struct hwif_s {
+ struct hwif_s *next; /* for linked-list in ide_hwgroup_t */
+ void *hwgroup; /* actually (ide_hwgroup_t *) */
+ unsigned short io_base; /* base io port addr */
+ unsigned short ctl_port; /* usually io_base+0x206 */
+ ide_drive_t drives[MAX_DRIVES]; /* drive info */
+ struct gendisk *gd; /* gendisk structure */
+ ide_tuneproc_t *tuneproc; /* routine to tune PIO mode for drives */
+#if defined(CONFIG_BLK_DEV_HT6560B) || defined(CONFIG_BLK_DEV_PROMISE)
+ ide_selectproc_t *selectproc; /* tweaks hardware to select drive */
+#endif
+ ide_dmaproc_t *dmaproc; /* dma read/write/abort routine */
+ unsigned long *dmatable; /* dma physical region descriptor table */
+ unsigned short dma_base; /* base addr for dma ports (triton) */
+ byte irq; /* our irq number */
+ byte major; /* our major number */
+ char name[5]; /* name of interface, eg. "ide0" */
+ byte index; /* 0 for ide0; 1 for ide1; ... */
+ hwif_chipset_t chipset; /* sub-module for tuning.. */
+ unsigned noprobe : 1; /* don't probe for this interface */
+ unsigned present : 1; /* this interface exists */
+ unsigned serialized : 1; /* serialized operation with mate hwif */
+ unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */
+#ifdef CONFIG_BLK_DEV_PROMISE
+ unsigned is_promise2: 1; /* 2nd i/f on promise DC4030 */
+#endif /* CONFIG_BLK_DEV_PROMISE */
+#if (DISK_RECOVERY_TIME > 0)
+ unsigned long last_time; /* time when previous rq was done */
+#endif
+#ifdef CONFIG_BLK_DEV_IDECD
+ struct request request_sense_request; /* from ide-cd.c */
+ struct packet_command request_sense_pc; /* from ide-cd.c */
+#endif /* CONFIG_BLK_DEV_IDECD */
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ ide_drive_t *tape_drive; /* Pointer to the tape on this interface */
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+ } ide_hwif_t;
+
+/*
+ * internal ide interrupt handler type
+ */
+typedef void (ide_handler_t)(ide_drive_t *);
+
+typedef struct hwgroup_s {
+ ide_handler_t *handler;/* irq handler, if active */
+ ide_drive_t *drive; /* current drive */
+ ide_hwif_t *hwif; /* ptr to current hwif in linked-list */
+ ide_hwif_t *next_hwif; /* next selected hwif (for tape) */
+ struct request *rq; /* current request */
+ struct timer_list timer; /* failsafe timer */
+ struct request wrq; /* local copy of current write rq */
+ unsigned long poll_timeout; /* timeout value during long polls */
+ int active; /* set when servicing requests */
+ } ide_hwgroup_t;
+
+/*
+ * ide_hwifs[] is the master data structure used to keep track
+ * of just about everything in ide.c. Whenever possible, routines
+ * should be using pointers to a drive (ide_drive_t *) or
+ * pointers to a hwif (ide_hwif_t *), rather than indexing this
+ * structure directly (the allocation/layout may change!).
+ *
+ */
+#ifndef _IDE_C
+extern ide_hwif_t ide_hwifs[]; /* master data repository */
+#endif
+
+/*
+ * One final include file, which references some of the data/defns from above
+ */
+#define IDE_DRIVER /* "parameter" for blk.h */
+#include <linux/blk.h>
+
+#if (DISK_RECOVERY_TIME > 0)
+void ide_set_recovery_timer (ide_hwif_t *);
+#define SET_RECOVERY_TIMER(drive) ide_set_recovery_timer (drive)
+#else
+#define SET_RECOVERY_TIMER(drive)
+#endif
+
+/*
+ * This is used for (nearly) all data transfers from the IDE interface
+ */
+void ide_input_data (ide_drive_t *drive, void *buffer, unsigned int wcount);
+
+/*
+ * This is used for (nearly) all data transfers to the IDE interface
+ */
+void ide_output_data (ide_drive_t *drive, void *buffer, unsigned int wcount);
+
+/*
+ * This is used for (nearly) all ATAPI data transfers from/to the IDE interface
+ */
+void atapi_input_bytes (ide_drive_t *drive, void *buffer, unsigned int bytecount);
+void atapi_output_bytes (ide_drive_t *drive, void *buffer, unsigned int bytecount);
+
+/*
+ * This is used on exit from the driver, to designate the next irq handler
+ * and also to start the safety timer.
+ */
+void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler, unsigned int timeout);
+
+/*
+ * Error reporting, in human readable form (luxurious, but a memory hog).
+ */
+byte ide_dump_status (ide_drive_t *drive, const char *msg, byte stat);
+
+/*
+ * ide_error() takes action based on the error returned by the controller.
+ * The calling function must return afterwards, to restart the request.
+ */
+void ide_error (ide_drive_t *drive, const char *msg, byte stat);
+
+/*
+ * ide_fixstring() cleans up and (optionally) byte-swaps a text string,
+ * removing leading/trailing blanks and compressing internal blanks.
+ * It is primarily used to tidy up the model name/number fields as
+ * returned by the WIN_[P]IDENTIFY commands.
+ */
+void ide_fixstring (byte *s, const int bytecount, const int byteswap);
+
+/*
+ * This routine busy-waits for the drive status to be not "busy".
+ * It then checks the status for all of the "good" bits and none
+ * of the "bad" bits, and if all is okay it returns 0. All other
+ * cases return 1 after invoking ide_error() -- caller should return.
+ *
+ */
+int ide_wait_stat (ide_drive_t *drive, byte good, byte bad, unsigned long timeout);
+
+/*
+ * This routine is called from the partition-table code in genhd.c
+ * to "convert" a drive to a logical geometry with fewer than 1024 cyls.
+ *
+ * The second parameter, "xparm", determines exactly how the translation
+ * will be handled:
+ * 0 = convert to CHS with fewer than 1024 cyls
+ * using the same method as Ontrack DiskManager.
+ * 1 = same as "0", plus offset everything by 63 sectors.
+ * -1 = similar to "0", plus redirect sector 0 to sector 1.
+ * >1 = convert to a CHS geometry with "xparm" heads.
+ *
+ * Returns 0 if the translation was not possible, if the device was not
+ * an IDE disk drive, or if a geometry was "forced" on the commandline.
+ * Returns 1 if the geometry translation was successful.
+ */
+int ide_xlate_1024 (kdev_t, int, const char *);
+
+/*
+ * Start a reset operation for an IDE interface.
+ * The caller should return immediately after invoking this.
+ */
+void ide_do_reset (ide_drive_t *);
+
+/*
+ * This function is intended to be used prior to invoking ide_do_drive_cmd().
+ */
+void ide_init_drive_cmd (struct request *rq);
+
+/*
+ * "action" parameter type for ide_do_drive_cmd() below.
+ */
+typedef enum
+ {ide_wait, /* insert rq at end of list, and wait for it */
+ ide_next, /* insert rq immediately after current request */
+ ide_preempt, /* insert rq in front of current request */
+ ide_end} /* insert rq at end of list, but don't wait for it */
+ ide_action_t;
+
+/*
+ * This function issues a special IDE device request
+ * onto the request queue.
+ *
+ * If action is ide_wait, then the rq is queued at the end of the
+ * request queue, and the function sleeps until it has been processed.
+ * This is for use when invoked from an ioctl handler.
+ *
+ * If action is ide_preempt, then the rq is queued at the head of
+ * the request queue, displacing the currently-being-processed
+ * request and this function returns immediately without waiting
+ * for the new rq to be completed. This is VERY DANGEROUS, and is
+ * intended for careful use by the ATAPI tape/cdrom driver code.
+ *
+ * If action is ide_next, then the rq is queued immediately after
+ * the currently-being-processed-request (if any), and the function
+ * returns without waiting for the new rq to be completed. As above,
+ * This is VERY DANGEROUS, and is intended for careful use by the
+ * ATAPI tape/cdrom driver code.
+ *
+ * If action is ide_end, then the rq is queued at the end of the
+ * request queue, and the function returns immediately without waiting
+ * for the new rq to be completed. This is again intended for careful
+ * use by the ATAPI tape/cdrom driver code. (Currently used by ide-tape.c,
+ * when operating in the pipelined operation mode).
+ */
+int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t action);
+
+/*
+ * Clean up after success/failure of an explicit drive cmd.
+ * stat/err are used only when (HWGROUP(drive)->rq->cmd == IDE_DRIVE_CMD).
+ */
+void ide_end_drive_cmd (ide_drive_t *drive, byte stat, byte err);
+
+/*
+ * ide_system_bus_speed() returns what we think is the system VESA/PCI
+ * bus speed (in Mhz). This is used for calculating interface PIO timings.
+ * The default is 40 for known PCI systems, 50 otherwise.
+ * The "idebus=xx" parameter can be used to override this value.
+ */
+int ide_system_bus_speed (void);
+
+/*
+ * ide_multwrite() transfers a block of up to mcount sectors of data
+ * to a drive as part of a disk multwrite operation.
+ */
+void ide_multwrite (ide_drive_t *drive, unsigned int mcount);
+
+#ifdef CONFIG_BLK_DEV_IDECD
+/*
+ * These are routines in ide-cd.c invoked from ide.c
+ */
+void ide_do_rw_cdrom (ide_drive_t *, unsigned long);
+int ide_cdrom_ioctl (ide_drive_t *, struct inode *, struct file *, unsigned int, unsigned long);
+int ide_cdrom_check_media_change (ide_drive_t *);
+int ide_cdrom_open (struct inode *, struct file *, ide_drive_t *);
+void ide_cdrom_release (struct inode *, struct file *, ide_drive_t *);
+void ide_cdrom_setup (ide_drive_t *);
+#endif /* CONFIG_BLK_DEV_IDECD */
+
+#ifdef CONFIG_BLK_DEV_IDETAPE
+
+/*
+ * Functions in ide-tape.c which are invoked from ide.c:
+ */
+
+/*
+ * idetape_identify_device is called during device probing stage to
+ * probe for an ide atapi tape drive and to initialize global variables
+ * in ide-tape.c which provide the link between the character device
+ * and the corresponding block device.
+ *
+ * Returns 1 if an ide tape was detected and is supported.
+ * Returns 0 otherwise.
+ */
+
+int idetape_identify_device (ide_drive_t *drive,struct hd_driveid *id);
+
+/*
+ * idetape_setup is called a bit later than idetape_identify_device,
+ * during the search for disk partitions, to initialize various tape
+ * state variables in ide_drive_t *drive.
+ */
+
+void idetape_setup (ide_drive_t *drive);
+
+/*
+ * idetape_do_request is our request function. It is called by ide.c
+ * to process a new request.
+ */
+
+void idetape_do_request (ide_drive_t *drive, struct request *rq, unsigned long block);
+
+/*
+ * idetape_end_request is used to finish servicing a request, and to
+ * insert a pending pipeline request into the main device queue.
+ */
+
+void idetape_end_request (byte uptodate, ide_hwgroup_t *hwgroup);
+
+/*
+ * Block device interface functions.
+ */
+
+int idetape_blkdev_ioctl (ide_drive_t *drive, struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg);
+int idetape_blkdev_open (struct inode *inode, struct file *filp, ide_drive_t *drive);
+void idetape_blkdev_release (struct inode *inode, struct file *filp, ide_drive_t *drive);
+
+/*
+ * idetape_register_chrdev initializes the character device interface to
+ * the ide tape drive.
+ */
+
+void idetape_register_chrdev (void);
+
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+
+#ifdef CONFIG_BLK_DEV_IDEFLOPPY
+int idefloppy_identify_device (ide_drive_t *drive,struct hd_driveid *id);
+void idefloppy_setup (ide_drive_t *drive);
+void idefloppy_do_request (ide_drive_t *drive, struct request *rq, unsigned long block);
+void idefloppy_end_request (byte uptodate, ide_hwgroup_t *hwgroup);
+int idefloppy_ioctl (ide_drive_t *drive, struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg);
+int idefloppy_open (struct inode *inode, struct file *filp, ide_drive_t *drive);
+void idefloppy_release (struct inode *inode, struct file *filp, ide_drive_t *drive);
+int idefloppy_media_change (ide_drive_t *drive);
+unsigned long idefloppy_capacity (ide_drive_t *drive);
+#endif /* CONFIG_BLK_DEV_IDEFLOPPY */
+
+#ifdef CONFIG_BLK_DEV_IDESCSI
+void idescsi_setup (ide_drive_t *drive);
+void idescsi_do_request (ide_drive_t *drive, struct request *rq, unsigned long block);
+void idescsi_end_request (byte uptodate, ide_hwgroup_t *hwgroup);
+int idescsi_ioctl (ide_drive_t *drive, struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg);
+int idescsi_open (struct inode *inode, struct file *filp, ide_drive_t *drive);
+void idescsi_ide_release (struct inode *inode, struct file *filp, ide_drive_t *drive);
+#endif /* CONFIG_BLK_DEV_IDESCSI */
+
+#ifdef CONFIG_BLK_DEV_TRITON
+void ide_init_triton (byte, byte);
+void ide_init_promise (byte bus, byte fn, ide_hwif_t *hwif0, ide_hwif_t *hwif1, unsigned short dma);
+#endif /* CONFIG_BLK_DEV_TRITON */
diff --git a/linux/src/drivers/block/ide_modes.h b/linux/src/drivers/block/ide_modes.h
new file mode 100644
index 0000000..589fbfa
--- /dev/null
+++ b/linux/src/drivers/block/ide_modes.h
@@ -0,0 +1,226 @@
+#ifndef _IDE_MODES_H
+#define _IDE_MODES_H
+/*
+ * linux/drivers/block/ide_modes.h
+ *
+ * Copyright (C) 1996 Linus Torvalds, Igor Abramov, and Mark Lord
+ */
+
+#include <linux/config.h>
+
+/*
+ * Shared data/functions for determining best PIO mode for an IDE drive.
+ * Most of this stuff originally lived in cmd640.c, and changes to the
+ * ide_pio_blacklist[] table should be made with EXTREME CAUTION to avoid
+ * breaking the fragile cmd640.c support.
+ */
+
+#if defined(CONFIG_BLK_DEV_CMD640) || defined(CONFIG_IDE_CHIPSETS)
+
+/*
+ * Standard (generic) timings for PIO modes, from ATA2 specification.
+ * These timings are for access to the IDE data port register *only*.
+ * Some drives may specify a mode, while also specifying a different
+ * value for cycle_time (from drive identification data).
+ */
+typedef struct ide_pio_timings_s {
+ int setup_time; /* Address setup (ns) minimum */
+ int active_time; /* Active pulse (ns) minimum */
+ int cycle_time; /* Cycle time (ns) minimum = (setup + active + recovery) */
+} ide_pio_timings_t;
+
+typedef struct ide_pio_data_s {
+ byte pio_mode;
+ byte use_iordy;
+ byte overridden;
+ byte blacklisted;
+ unsigned int cycle_time;
+} ide_pio_data_t;
+
+#ifndef _IDE_C
+
+int ide_scan_pio_blacklist (char *model);
+byte ide_get_best_pio_mode (ide_drive_t *drive, byte mode_wanted, byte max_mode, ide_pio_data_t *d);
+extern const ide_pio_timings_t ide_pio_timings[6];
+
+#else /* _IDE_C */
+
+const ide_pio_timings_t ide_pio_timings[6] = {
+ { 70, 165, 600 }, /* PIO Mode 0 */
+ { 50, 125, 383 }, /* PIO Mode 1 */
+ { 30, 100, 240 }, /* PIO Mode 2 */
+ { 30, 80, 180 }, /* PIO Mode 3 with IORDY */
+ { 25, 70, 120 }, /* PIO Mode 4 with IORDY */
+ { 20, 50, 100 } /* PIO Mode 5 with IORDY (nonstandard) */
+};
+
+/*
+ * Black list. Some drives incorrectly report their maximal PIO mode,
+ * at least in respect to CMD640. Here we keep info on some known drives.
+ */
+static struct ide_pio_info {
+ const char *name;
+ int pio;
+} ide_pio_blacklist [] = {
+/* { "Conner Peripherals 1275MB - CFS1275A", 4 }, */
+ { "Conner Peripherals 540MB - CFS540A", 3 },
+
+ { "WDC AC2700", 3 },
+ { "WDC AC2540", 3 },
+ { "WDC AC2420", 3 },
+ { "WDC AC2340", 3 },
+ { "WDC AC2250", 0 },
+ { "WDC AC2200", 0 },
+ { "WDC AC21200", 4 },
+ { "WDC AC2120", 0 },
+ { "WDC AC2850", 3 },
+ { "WDC AC1270", 3 },
+ { "WDC AC1170", 1 },
+ { "WDC AC1210", 1 },
+ { "WDC AC280", 0 },
+/* { "WDC AC21000", 4 }, */
+ { "WDC AC31000", 3 },
+ { "WDC AC31200", 3 },
+/* { "WDC AC31600", 4 }, */
+
+ { "Maxtor 7131 AT", 1 },
+ { "Maxtor 7171 AT", 1 },
+ { "Maxtor 7213 AT", 1 },
+ { "Maxtor 7245 AT", 1 },
+ { "Maxtor 7345 AT", 1 },
+ { "Maxtor 7546 AT", 3 },
+ { "Maxtor 7540 AV", 3 },
+
+ { "SAMSUNG SHD-3121A", 1 },
+ { "SAMSUNG SHD-3122A", 1 },
+ { "SAMSUNG SHD-3172A", 1 },
+
+/* { "ST51080A", 4 },
+ * { "ST51270A", 4 },
+ * { "ST31220A", 4 },
+ * { "ST31640A", 4 },
+ * { "ST32140A", 4 },
+ * { "ST3780A", 4 },
+ */
+ { "ST5660A", 3 },
+ { "ST3660A", 3 },
+ { "ST3630A", 3 },
+ { "ST3655A", 3 },
+ { "ST3391A", 3 },
+ { "ST3390A", 1 },
+ { "ST3600A", 1 },
+ { "ST3290A", 0 },
+ { "ST3144A", 0 },
+
+ { "QUANTUM ELS127A", 0 },
+ { "QUANTUM ELS170A", 0 },
+ { "QUANTUM LPS240A", 0 },
+ { "QUANTUM LPS210A", 3 },
+ { "QUANTUM LPS270A", 3 },
+ { "QUANTUM LPS365A", 3 },
+ { "QUANTUM LPS540A", 3 },
+ { "QUANTUM LIGHTNING 540A", 3 },
+ { "QUANTUM LIGHTNING 730A", 3 },
+ { "QUANTUM FIREBALL", 3 }, /* For models 540/640/1080/1280 */
+ /* 1080A works fine in mode4 with triton */
+ { NULL, 0 }
+};
+
+/*
+ * This routine searches the ide_pio_blacklist for an entry
+ * matching the start/whole of the supplied model name.
+ *
+ * Returns -1 if no match found.
+ * Otherwise returns the recommended PIO mode from ide_pio_blacklist[].
+ */
+int ide_scan_pio_blacklist (char *model)
+{
+ struct ide_pio_info *p;
+
+ for (p = ide_pio_blacklist; p->name != NULL; p++) {
+ if (strncmp(p->name, model, strlen(p->name)) == 0)
+ return p->pio;
+ }
+ return -1;
+}
+
+/*
+ * This routine returns the recommended PIO settings for a given drive,
+ * based on the drive->id information and the ide_pio_blacklist[].
+ * This is used by most chipset support modules when "auto-tuning".
+ */
+
+/*
+ * Drive PIO mode auto selection
+ */
+byte ide_get_best_pio_mode (ide_drive_t *drive, byte mode_wanted, byte max_mode, ide_pio_data_t *d)
+{
+ int pio_mode;
+ int cycle_time = 0;
+ int use_iordy = 0;
+ struct hd_driveid* id = drive->id;
+ int overridden = 0;
+ int blacklisted = 0;
+
+ if (mode_wanted != 255) {
+ pio_mode = mode_wanted;
+ } else if (!drive->id) {
+ pio_mode = 0;
+ } else if ((pio_mode = ide_scan_pio_blacklist(id->model)) != -1) {
+ overridden = 1;
+ blacklisted = 1;
+ use_iordy = (pio_mode > 2);
+ } else {
+ pio_mode = id->tPIO;
+ if (pio_mode > 2) { /* 2 is maximum allowed tPIO value */
+ pio_mode = 2;
+ overridden = 1;
+ }
+ if (id->field_valid & 2) { /* drive implements ATA2? */
+ if (id->capability & 8) { /* drive supports use_iordy? */
+ use_iordy = 1;
+ cycle_time = id->eide_pio_iordy;
+ if (id->eide_pio_modes & 7) {
+ overridden = 0;
+ if (id->eide_pio_modes & 4)
+ pio_mode = 5;
+ else if (id->eide_pio_modes & 2)
+ pio_mode = 4;
+ else
+ pio_mode = 3;
+ }
+ } else {
+ cycle_time = id->eide_pio;
+ }
+ }
+
+ /*
+ * Conservative "downgrade" for all pre-ATA2 drives
+ */
+ if (pio_mode && pio_mode < 4) {
+ pio_mode--;
+ overridden = 1;
+#if 0
+ use_iordy = (pio_mode > 2);
+#endif
+ if (cycle_time && cycle_time < ide_pio_timings[pio_mode].cycle_time)
+ cycle_time = 0; /* use standard timing */
+ }
+ }
+ if (pio_mode > max_mode) {
+ pio_mode = max_mode;
+ cycle_time = 0;
+ }
+ if (d) {
+ d->pio_mode = pio_mode;
+ d->cycle_time = cycle_time ? cycle_time : ide_pio_timings[pio_mode].cycle_time;
+ d->use_iordy = use_iordy;
+ d->overridden = overridden;
+ d->blacklisted = blacklisted;
+ }
+ return pio_mode;
+}
+
+#endif /* _IDE_C */
+#endif /* defined(CONFIG_BLK_DEV_CMD640) || defined(CONFIG_IDE_CHIPSETS) */
+#endif /* _IDE_MODES_H */
diff --git a/linux/src/drivers/block/rz1000.c b/linux/src/drivers/block/rz1000.c
new file mode 100644
index 0000000..41b26f2
--- /dev/null
+++ b/linux/src/drivers/block/rz1000.c
@@ -0,0 +1,59 @@
+/*
+ * linux/drivers/block/rz1000.c Version 0.03 Mar 20, 1996
+ *
+ * Copyright (C) 1995-1996 Linus Torvalds & author (see below)
+ */
+
+/*
+ * Principal Author/Maintainer: mlord@pobox.com (Mark Lord)
+ *
+ * This file provides support for disabling the buggy read-ahead
+ * mode of the RZ1000 IDE chipset, commonly used on Intel motherboards.
+ */
+
+#undef REALLY_SLOW_IO /* most systems can safely undef this */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/blkdev.h>
+#include <linux/hdreg.h>
+#include <asm/io.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#include "ide.h"
+
+static void ide_pci_access_error (int rc)
+{
+ printk("ide: pcibios access failed - %s\n", pcibios_strerror(rc));
+}
+
+void init_rz1000 (byte bus, byte fn)
+{
+ int rc;
+ unsigned short reg;
+
+ printk("ide0: buggy RZ1000 interface: ");
+ if ((rc = pcibios_read_config_word (bus, fn, PCI_COMMAND, &reg))) {
+ ide_pci_access_error (rc);
+ } else if (!(reg & 1)) {
+ printk("not enabled\n");
+ } else {
+ if ((rc = pcibios_read_config_word(bus, fn, 0x40, &reg))
+ || (rc = pcibios_write_config_word(bus, fn, 0x40, reg & 0xdfff)))
+ {
+ ide_hwifs[0].drives[0].no_unmask = 1;
+ ide_hwifs[0].drives[1].no_unmask = 1;
+ ide_hwifs[1].drives[0].no_unmask = 1;
+ ide_hwifs[1].drives[1].no_unmask = 1;
+ ide_hwifs[0].serialized = 1;
+ ide_hwifs[1].serialized = 1;
+ ide_pci_access_error (rc);
+ printk("serialized, disabled unmasking\n");
+ } else
+ printk("disabled read-ahead\n");
+ }
+}
diff --git a/linux/src/drivers/block/triton.c b/linux/src/drivers/block/triton.c
new file mode 100644
index 0000000..f4633d2
--- /dev/null
+++ b/linux/src/drivers/block/triton.c
@@ -0,0 +1,996 @@
+/*
+ * linux/drivers/block/triton.c Version 1.13 Aug 12, 1996
+ * Version 1.13a June 1998 - new chipsets
+ * Version 1.13b July 1998 - DMA blacklist
+ * Version 1.14 June 22, 1999
+ *
+ * Copyright (c) 1998-1999 Andre Hedrick
+ * Copyright (c) 1995-1996 Mark Lord
+ * May be copied or modified under the terms of the GNU General Public License
+ */
+
+/*
+ * This module provides support for Bus Master IDE DMA functions in various
+ * motherboard chipsets and PCI controller cards.
+ * Please check /Documentation/ide.txt and /Documentation/udma.txt for details.
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/blkdev.h>
+#include <linux/hdreg.h>
+#include <linux/pci.h>
+#include <linux/bios32.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+
+#include "ide.h"
+
+#undef DISPLAY_TRITON_TIMINGS /* define this to display timings */
+#undef DISPLAY_APOLLO_TIMINGS /* define this for extensive debugging information */
+#undef DISPLAY_ALI15X3_TIMINGS /* define this for extensive debugging information */
+
+#if defined(CONFIG_PROC_FS)
+#include <linux/stat.h>
+#include <linux/proc_fs.h>
+#ifdef DISPLAY_APOLLO_TIMINGS
+#include <linux/via_ide_dma.h>
+#endif
+#ifdef DISPLAY_ALI15X3_TIMINGS
+#include <linux/ali_ide_dma.h>
+#endif
+#endif
+
+/*
+ * good_dma_drives() lists the model names (from "hdparm -i")
+ * of drives which do not support mword2 DMA but which are
+ * known to work fine with this interface under Linux.
+ */
+const char *good_dma_drives[] = {"Micropolis 2112A",
+ "CONNER CTMA 4000",
+ "CONNER CTT8000-A",
+ "QEMU HARDDISK",
+ NULL};
+
+/*
+ * bad_dma_drives() lists the model names (from "hdparm -i")
+ * of drives which supposedly support (U)DMA but which are
+ * known to corrupt data with this interface under Linux.
+ *
+ * Note: the list was generated by statistical analysis of problem
+ * reports. It's not clear if there are problems with the drives,
+ * or with some combination of drive/controller or what.
+ *
+ * You can forcibly override this if you wish. This is the kernel
+ * 'Tread carefully' list.
+ *
+ * Finally see http://www.wdc.com/quality/err-rec.html if you have
+ * one of the listed drives.
+ */
+const char *bad_dma_drives[] = {"WDC AC11000H",
+ "WDC AC22100H",
+ "WDC AC32500H",
+ "WDC AC33100H",
+ NULL};
+
+/*
+ * Our Physical Region Descriptor (PRD) table should be large enough
+ * to handle the biggest I/O request we are likely to see. Since requests
+ * can have no more than 256 sectors, and since the typical blocksize is
+ * two sectors, we could get by with a limit of 128 entries here for the
+ * usual worst case. Most requests seem to include some contiguous blocks,
+ * further reducing the number of table entries required.
+ *
+ * The driver reverts to PIO mode for individual requests that exceed
+ * this limit (possible with 512 byte blocksizes, eg. MSDOS f/s), so handling
+ * 100% of all crazy scenarios here is not necessary.
+ *
+ * As it turns out though, we must allocate a full 4KB page for this,
+ * so the two PRD tables (ide0 & ide1) will each get half of that,
+ * allowing each to have about 256 entries (8 bytes each) from this.
+ */
+#define PRD_BYTES 8
+#define PRD_ENTRIES (PAGE_SIZE / (2 * PRD_BYTES))
+#define DEFAULT_BMIBA 0xe800 /* in case BIOS did not init it */
+#define DEFAULT_BMCRBA 0xcc00 /* VIA's default value */
+#define DEFAULT_BMALIBA 0xd400 /* ALI's default value */
+
+/*
+ * dma_intr() is the handler for disk read/write DMA interrupts
+ */
+static void dma_intr (ide_drive_t *drive)
+{
+ byte stat, dma_stat;
+ int i;
+ struct request *rq = HWGROUP(drive)->rq;
+ unsigned short dma_base = HWIF(drive)->dma_base;
+
+ dma_stat = inb(dma_base+2); /* get DMA status */
+ outb(inb(dma_base)&~1, dma_base); /* stop DMA operation */
+ stat = GET_STAT(); /* get drive status */
+ if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) {
+ if ((dma_stat & 7) == 4) { /* verify good DMA status */
+ rq = HWGROUP(drive)->rq;
+ for (i = rq->nr_sectors; i > 0;) {
+ i -= rq->current_nr_sectors;
+ ide_end_request(1, HWGROUP(drive));
+ }
+ return;
+ }
+ printk("%s: bad DMA status: 0x%02x\n", drive->name, dma_stat);
+ }
+ sti();
+ ide_error(drive, "dma_intr", stat);
+}
+
+/*
+ * build_dmatable() prepares a dma request.
+ * Returns 0 if all went okay, returns 1 otherwise.
+ */
+static int build_dmatable (ide_drive_t *drive)
+{
+ struct request *rq = HWGROUP(drive)->rq;
+ struct buffer_head *bh = rq->bh;
+ unsigned long size, addr, *table = HWIF(drive)->dmatable;
+ unsigned int count = 0;
+
+ do {
+ /*
+ * Determine addr and size of next buffer area. We assume that
+ * individual virtual buffers are always composed linearly in
+ * physical memory. For example, we assume that any 8kB buffer
+ * is always composed of two adjacent physical 4kB pages rather
+ * than two possibly non-adjacent physical 4kB pages.
+ */
+ if (bh == NULL) { /* paging and tape requests have (rq->bh == NULL) */
+ addr = virt_to_bus (rq->buffer);
+#ifdef CONFIG_BLK_DEV_IDETAPE
+ if (drive->media == ide_tape)
+ size = drive->tape.pc->request_transfer;
+ else
+#endif /* CONFIG_BLK_DEV_IDETAPE */
+ size = rq->nr_sectors << 9;
+ } else {
+ /* group sequential buffers into one large buffer */
+ addr = virt_to_bus (bh->b_data);
+ size = bh->b_size;
+ while ((bh = bh->b_reqnext) != NULL) {
+ if ((addr + size) != virt_to_bus (bh->b_data))
+ break;
+ size += bh->b_size;
+ }
+ }
+
+ /*
+ * Fill in the dma table, without crossing any 64kB boundaries.
+ * We assume 16-bit alignment of all blocks.
+ */
+ while (size) {
+ if (++count >= PRD_ENTRIES) {
+ printk("%s: DMA table too small\n", drive->name);
+ return 1; /* revert to PIO for this request */
+ } else {
+ unsigned long bcount = 0x10000 - (addr & 0xffff);
+ if (bcount > size)
+ bcount = size;
+ *table++ = addr;
+ *table++ = bcount & 0xffff;
+ addr += bcount;
+ size -= bcount;
+ }
+ }
+ } while (bh != NULL);
+ if (count) {
+ *--table |= 0x80000000; /* set End-Of-Table (EOT) bit */
+ return 0;
+ }
+ printk("%s: empty DMA table?\n", drive->name);
+ return 1; /* let the PIO routines handle this weirdness */
+}
+
+/*
+ * We will only enable drives with multi-word (mode2) (U)DMA capabilities,
+ * and ignore the very rare cases of drives that can only do single-word
+ * (modes 0 & 1) (U)DMA transfers. We also discard "blacklisted" hard disks.
+ */
+static int config_drive_for_dma (ide_drive_t *drive)
+{
+#ifndef CONFIG_BLK_DEV_FORCE_DMA
+ const char **list;
+ struct hd_driveid *id = drive->id;
+#endif
+
+#ifdef CONFIG_BLK_DEV_FORCE_DMA
+ drive->using_dma = 1;
+ return 0;
+#else
+ if (HWIF(drive)->chipset == ide_hpt343) {
+ drive->using_dma = 0; /* no DMA */
+ return 1; /* DMA disabled */
+ }
+
+ if (id && (id->capability & 1)) {
+ /* Consult the list of known "bad" drives */
+ list = bad_dma_drives;
+ while (*list) {
+ if (!strcmp(*list++,id->model)) {
+ drive->using_dma = 0; /* no DMA */
+ printk("ide: Disabling DMA modes on %s drive (%s).\n", drive->name, id->model);
+ return 1; /* DMA disabled */
+ }
+ }
+
+ if (!strcmp("QEMU HARDDISK", id->model)) {
+ /* Virtual disks don't have issues with DMA :) */
+ drive->using_dma = 1;
+ /* And keep enabled even if some requests time out due to emulation lag. */
+ drive->keep_settings = 1;
+ return 1; /* DMA enabled */
+ }
+ /* Enable DMA on any drive that has mode 4 or 2 UltraDMA enabled */
+ if (id->field_valid & 4) { /* UltraDMA */
+ /* Enable DMA on any drive that has mode 4 UltraDMA enabled */
+ if (((id->dma_ultra & 0x1010) == 0x1010) &&
+ (id->word93 & 0x2000) &&
+ (HWIF(drive)->chipset == ide_ultra66)) {
+ drive->using_dma = 1;
+ return 0; /* DMA enabled */
+ } else
+ /* Enable DMA on any drive that has mode 2 UltraDMA enabled */
+ if ((id->dma_ultra & 0x404) == 0x404) {
+ drive->using_dma = 1;
+ return 0; /* DMA enabled */
+ }
+ }
+ /* Enable DMA on any drive that has mode2 DMA enabled */
+ if (id->field_valid & 2) /* regular DMA */
+ if ((id->dma_mword & 0x404) == 0x404) {
+ drive->using_dma = 1;
+ return 0; /* DMA enabled */
+ }
+ /* Consult the list of known "good" drives */
+ list = good_dma_drives;
+ while (*list) {
+ if (!strcmp(*list++,id->model)) {
+ drive->using_dma = 1;
+ return 0; /* DMA enabled */
+ }
+ }
+ }
+ return 1; /* DMA not enabled */
+#endif
+}
+
+/*
+ * triton_dmaproc() initiates/aborts DMA read/write operations on a drive.
+ *
+ * The caller is assumed to have selected the drive and programmed the drive's
+ * sector address using CHS or LBA. All that remains is to prepare for DMA
+ * and then issue the actual read/write DMA/PIO command to the drive.
+ *
+ * For ATAPI devices, we just prepare for DMA and return. The caller should
+ * then issue the packet command to the drive and call us again with
+ * ide_dma_begin afterwards.
+ *
+ * Returns 0 if all went well.
+ * Returns 1 if DMA read/write could not be started, in which case
+ * the caller should revert to PIO for the current request.
+ */
+static int triton_dmaproc (ide_dma_action_t func, ide_drive_t *drive)
+{
+ unsigned long dma_base = HWIF(drive)->dma_base;
+ unsigned int reading = (1 << 3);
+
+ switch (func) {
+ case ide_dma_abort:
+ outb(inb(dma_base)&~1, dma_base); /* stop DMA */
+ return 0;
+ case ide_dma_check:
+ return config_drive_for_dma (drive);
+ case ide_dma_write:
+ reading = 0;
+ case ide_dma_read:
+ break;
+ case ide_dma_status_bad:
+ return ((inb(dma_base+2) & 7) != 4); /* verify good DMA status */
+ case ide_dma_transferred:
+#if 0
+ return (number of bytes actually transferred);
+#else
+ return (0);
+#endif
+ case ide_dma_begin:
+ outb(inb(dma_base)|1, dma_base); /* begin DMA */
+ return 0;
+ default:
+ printk("triton_dmaproc: unsupported func: %d\n", func);
+ return 1;
+ }
+ if (build_dmatable (drive))
+ return 1;
+ outl(virt_to_bus (HWIF(drive)->dmatable), dma_base + 4); /* PRD table */
+ outb(reading, dma_base); /* specify r/w */
+ outb(inb(dma_base+2)|0x06, dma_base+2); /* clear status bits */
+#ifdef CONFIG_BLK_DEV_IDEATAPI
+ if (drive->media != ide_disk)
+ return 0;
+#endif /* CONFIG_BLK_DEV_IDEATAPI */
+ ide_set_handler(drive, &dma_intr, WAIT_CMD); /* issue cmd to drive */
+ OUT_BYTE(reading ? WIN_READDMA : WIN_WRITEDMA, IDE_COMMAND_REG);
+ outb(inb(dma_base)|1, dma_base); /* begin DMA */
+ return 0;
+}
+
+#ifdef DISPLAY_TRITON_TIMINGS
+/*
+ * print_triton_drive_flags() displays the currently programmed options
+ * in the i82371 (Triton) for a given drive.
+ *
+ * If fastDMA is "no", then slow ISA timings are used for DMA data xfers.
+ * If fastPIO is "no", then slow ISA timings are used for PIO data xfers.
+ * If IORDY is "no", then IORDY is assumed to always be asserted.
+ * If PreFetch is "no", then data pre-fetch/post are not used.
+ *
+ * When "fastPIO" and/or "fastDMA" are "yes", then faster PCI timings and
+ * back-to-back 16-bit data transfers are enabled, using the sample_CLKs
+ * and recovery_CLKs (PCI clock cycles) timing parameters for that interface.
+ */
+static void print_triton_drive_flags (unsigned int unit, byte flags)
+{
+ printk(" %s ", unit ? "slave :" : "master:");
+ printk( "fastDMA=%s", (flags&9) ? "on " : "off");
+ printk(" PreFetch=%s", (flags&4) ? "on " : "off");
+ printk(" IORDY=%s", (flags&2) ? "on " : "off");
+ printk(" fastPIO=%s\n", ((flags&9)==1) ? "on " : "off");
+}
+#endif /* DISPLAY_TRITON_TIMINGS */
+
+static void init_triton_dma (ide_hwif_t *hwif, unsigned short base)
+{
+ static unsigned long dmatable = 0;
+
+ printk(" %s: BM-DMA at 0x%04x-0x%04x", hwif->name, base, base+7);
+ if (check_region(base, 8)) {
+ printk(" -- ERROR, PORTS ALREADY IN USE");
+ } else {
+ request_region(base, 8, "IDE DMA");
+ hwif->dma_base = base;
+ if (!dmatable) {
+ /*
+ * The BM-DMA uses a full 32-bits, so we can
+ * safely use __get_free_page() here instead
+ * of __get_dma_pages() -- no ISA limitations.
+ */
+ dmatable = __get_free_pages(GFP_KERNEL, 1, 0);
+ }
+ if (dmatable) {
+ hwif->dmatable = (unsigned long *) dmatable;
+ dmatable += (PRD_ENTRIES * PRD_BYTES);
+ outl(virt_to_bus(hwif->dmatable), base + 4);
+ hwif->dmaproc = &triton_dmaproc;
+ }
+ }
+ printk("\n");
+}
+
+/*
+ * Set VIA Chipset Timings for (U)DMA modes enabled.
+ */
+static int set_via_timings (byte bus, byte fn, byte post, byte flush)
+{
+ byte via_config = 0;
+ int rc = 0;
+
+ /* setting IDE read prefetch buffer and IDE post write buffer */
+ if ((rc = pcibios_read_config_byte(bus, fn, 0x41, &via_config)))
+ return (1);
+ if ((rc = pcibios_write_config_byte(bus, fn, 0x41, via_config | post)))
+ return (1);
+
+ /* setting Channel read and End-of-sector FIFO flush: */
+ if ((rc = pcibios_read_config_byte(bus, fn, 0x46, &via_config)))
+ return (1);
+ if ((rc = pcibios_write_config_byte(bus, fn, 0x46, via_config | flush)))
+ return (1);
+
+ return (0);
+}
+
+static int setup_aladdin (byte bus, byte fn)
+{
+ byte confreg0 = 0, confreg1 = 0, progif = 0;
+ int errors = 0;
+
+ if (pcibios_read_config_byte(bus, fn, 0x50, &confreg1))
+ goto veryspecialsettingserror;
+ if (!(confreg1 & 0x02))
+ if (pcibios_write_config_byte(bus, fn, 0x50, confreg1 | 0x02))
+ goto veryspecialsettingserror;
+
+ if (pcibios_read_config_byte(bus, fn, 0x09, &progif))
+ goto veryspecialsettingserror;
+ if (!(progif & 0x40)) {
+ /*
+ * The way to enable them is to set progif
+ * writable at 0x4Dh register, and set bit 6
+ * of progif to 1:
+ */
+ if (pcibios_read_config_byte(bus, fn, 0x4d, &confreg0))
+ goto veryspecialsettingserror;
+ if (confreg0 & 0x80)
+ if (pcibios_write_config_byte(bus, fn, 0x4d, confreg0 & ~0x80))
+ goto veryspecialsettingserror;
+ if (pcibios_write_config_byte(bus, fn, 0x09, progif | 0x40))
+ goto veryspecialsettingserror;
+ if (confreg0 & 0x80)
+ if (pcibios_write_config_byte(bus, fn, 0x4d, confreg0))
+ errors++;
+ }
+
+ if ((pcibios_read_config_byte(bus, fn, 0x09, &progif)) || (!(progif & 0x40)))
+ goto veryspecialsettingserror;
+
+ printk("ide: ALI15X3: enabled read of IDE channels state (en/dis-abled) %s.\n",
+ errors ? "with Error(s)" : "Succeeded" );
+ return 1;
+veryspecialsettingserror:
+ printk("ide: ALI15X3: impossible to enable read of IDE channels state (en/dis-abled)!\n");
+ return 0;
+}
+
+void set_promise_hpt343_extra (unsigned short device, unsigned int bmiba)
+{
+ switch(device) {
+ case PCI_DEVICE_ID_PROMISE_20246:
+ if(!check_region((bmiba+16), 16))
+ request_region((bmiba+16), 16, "PDC20246");
+ break;
+ case PCI_DEVICE_ID_PROMISE_20262:
+ if (!check_region((bmiba+48), 48))
+ request_region((bmiba+48), 48, "PDC20262");
+ break;
+ case PCI_DEVICE_ID_TTI_HPT343:
+ if(!check_region((bmiba+16), 16))
+ request_region((bmiba+16), 16, "HPT343");
+ break;
+ default:
+ break;
+ }
+}
+
+#define HPT343_PCI_INIT_REG 0x80
+
+/*
+ * ide_init_triton() prepares the IDE driver for DMA operation.
+ * This routine is called once, from ide.c during driver initialization,
+ * for each BM-DMA chipset which is found (rarely more than one).
+ */
+void ide_init_triton (byte bus, byte fn)
+{
+ byte bridgebus, bridgefn, bridgeset = 0, hpt34x_flag = 0;
+ unsigned char irq = 0;
+ int dma_enabled = 0, rc = 0, h;
+ unsigned short io[6], count = 0, step_count = 0, pass_count = 0;
+ unsigned short pcicmd, vendor, device, class;
+ unsigned int bmiba, timings, reg, tmp;
+ unsigned int addressbios = 0;
+ unsigned long flags;
+ unsigned index;
+
+#if defined(DISPLAY_APOLLO_TIMINGS) || defined(DISPLAY_ALI15X3_TIMINGS)
+ bmide_bus = bus;
+ bmide_fn = fn;
+#endif /* DISPLAY_APOLLO_TIMINGS || DISPLAY_ALI15X3_TIMINGS */
+
+/*
+ * We pick up the vendor, device, and class info for selecting the correct
+ * controller that is supported. Since we can access this routine more than
+ * once with the use of onboard and off-board EIDE controllers, a method
+ * of determining "who is who for what" is needed.
+ */
+
+ pcibios_read_config_word (bus, fn, PCI_VENDOR_ID, &vendor);
+ pcibios_read_config_word (bus, fn, PCI_DEVICE_ID, &device);
+ pcibios_read_config_word (bus, fn, PCI_CLASS_DEVICE, &class);
+ pcibios_read_config_byte (bus, fn, PCI_INTERRUPT_LINE, &irq);
+
+ switch(vendor) {
+ case PCI_VENDOR_ID_INTEL:
+ printk("ide: Intel 82371 ");
+ switch(device) {
+ case PCI_DEVICE_ID_INTEL_82371_0:
+ printk("PIIX (single FIFO) ");
+ break;
+ case PCI_DEVICE_ID_INTEL_82371SB_1:
+ printk("PIIX3 (dual FIFO) ");
+ break;
+ case PCI_DEVICE_ID_INTEL_82371AB:
+ printk("PIIX4 (dual FIFO) ");
+ break;
+ default:
+ printk(" (unknown) 0x%04x ", device);
+ break;
+ }
+ printk("DMA Bus Mastering IDE ");
+ break;
+ case PCI_VENDOR_ID_SI:
+ printk("ide: SiS 5513 (dual FIFO) DMA Bus Mastering IDE ");
+ break;
+ case PCI_VENDOR_ID_VIA:
+ printk("ide: VIA VT82C586B (split FIFO) UDMA Bus Mastering IDE ");
+ break;
+ case PCI_VENDOR_ID_TTI:
+ /*PCI_CLASS_STORAGE_UNKNOWN == class */
+ if (device == PCI_DEVICE_ID_TTI_HPT343) {
+ pcibios_write_config_byte(bus, fn, HPT343_PCI_INIT_REG, 0x00);
+ pcibios_read_config_word(bus, fn, PCI_COMMAND, &pcicmd);
+ hpt34x_flag = (pcicmd & PCI_COMMAND_MEMORY) ? 1 : 0;
+#if 1
+ if (!hpt34x_flag) {
+ save_flags(flags);
+ cli();
+ pcibios_write_config_word(bus, fn, PCI_COMMAND, pcicmd & ~PCI_COMMAND_IO);
+ pcibios_read_config_dword(bus, fn, PCI_BASE_ADDRESS_4, &bmiba);
+ pcibios_write_config_dword(bus, fn, PCI_BASE_ADDRESS_0, bmiba | 0x20);
+ pcibios_write_config_dword(bus, fn, PCI_BASE_ADDRESS_1, bmiba | 0x34);
+ pcibios_write_config_dword(bus, fn, PCI_BASE_ADDRESS_2, bmiba | 0x28);
+ pcibios_write_config_dword(bus, fn, PCI_BASE_ADDRESS_3, bmiba | 0x3c);
+ pcibios_write_config_word(bus, fn, PCI_COMMAND, pcicmd);
+ bmiba = 0;
+ restore_flags(flags);
+ }
+#endif
+ pcibios_write_config_byte(bus, fn, PCI_LATENCY_TIMER, 0x20);
+ goto hpt343_jump_in;
+ } else {
+ printk("ide: HPTXXX did == 0x%04X unsupport chipset error.\n", device);
+ return;
+ }
+ case PCI_VENDOR_ID_PROMISE:
+ /*
+ * I have been able to make my Promise Ultra33 UDMA card change class.
+ * It has reported as both PCI_CLASS_STORAGE_RAID and PCI_CLASS_STORAGE_IDE.
+ * Since the PCI_CLASS_STORAGE_RAID mode should automatically mirror the
+ * two halves of the PCI_CONFIG register data, but sometimes it forgets.
+ * Thus we guarantee that they are identical, with a quick check and
+ * correction if needed.
+ * PDC20246 (primary) PDC20247 (secondary) IDE hwif's.
+ *
+ * PDC20262 Promise Ultra66 UDMA.
+ *
+ * Note that Promise "stories,fibs,..." about this device not being
+ * capable of ATAPI and AT devices.
+ */
+ if (class != PCI_CLASS_STORAGE_IDE) {
+ unsigned char irq_mirror = 0;
+
+ pcibios_read_config_byte(bus, fn, (PCI_INTERRUPT_LINE)|0x80, &irq_mirror);
+ if (irq != irq_mirror) {
+ pcibios_write_config_byte(bus, fn, (PCI_INTERRUPT_LINE)|0x80, irq);
+ }
+ }
+ case PCI_VENDOR_ID_ARTOP:
+ /* PCI_CLASS_STORAGE_SCSI == class */
+ /*
+ * I have found that by stroking rom_enable_bit on both the AEC6210U/UF and
+ * PDC20246 controller cards, the features desired are almost guaranteed
+ * to be enabled and compatible. This ROM may not be registered in the
+ * config data, but it can be turned on. Registration failure has only
+ * been observed if and only if Linux sets up the pci_io_address in the
+ * 0x6000 range. If they are setup in the 0xef00 range it is reported.
+ * WHY??? got me.........
+ */
+hpt343_jump_in:
+ printk("ide: %s UDMA Bus Mastering ",
+ (device == PCI_DEVICE_ID_ARTOP_ATP850UF) ? "AEC6210" :
+ (device == PCI_DEVICE_ID_PROMISE_20246) ? "PDC20246" :
+ (device == PCI_DEVICE_ID_PROMISE_20262) ? "PDC20262" :
+ (hpt34x_flag && (device == PCI_DEVICE_ID_TTI_HPT343)) ? "HPT345" :
+ (device == PCI_DEVICE_ID_TTI_HPT343) ? "HPT343" : "UNKNOWN");
+ pcibios_read_config_dword(bus, fn, PCI_ROM_ADDRESS, &addressbios);
+ if (addressbios) {
+ pcibios_write_config_byte(bus, fn, PCI_ROM_ADDRESS, addressbios | PCI_ROM_ADDRESS_ENABLE);
+ printk("with ROM enabled at 0x%08x", addressbios);
+ }
+ /*
+ * This was stripped out of 2.1.XXX kernel code and parts from a patch called
+ * promise_update. This finds the PCI_BASE_ADDRESS spaces and makes them
+ * available for configuration later.
+ * PCI_BASE_ADDRESS_0 hwif0->io_base
+ * PCI_BASE_ADDRESS_1 hwif0->ctl_port
+ * PCI_BASE_ADDRESS_2 hwif1->io_base
+ * PCI_BASE_ADDRESS_3 hwif1->ctl_port
+ * PCI_BASE_ADDRESS_4 bmiba
+ */
+ memset(io, 0, 6 * sizeof(unsigned short));
+ for (reg = PCI_BASE_ADDRESS_0; reg <= PCI_BASE_ADDRESS_5; reg += 4) {
+ pcibios_read_config_dword(bus, fn, reg, &tmp);
+ if (tmp & PCI_BASE_ADDRESS_SPACE_IO)
+ io[count++] = tmp & PCI_BASE_ADDRESS_IO_MASK;
+ }
+ break;
+ case PCI_VENDOR_ID_AL:
+ save_flags(flags);
+ cli();
+ for (index = 0; !pcibios_find_device (PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, index, &bridgebus, &bridgefn); ++index) {
+ bridgeset = setup_aladdin(bus, fn);
+ }
+ restore_flags(flags);
+ printk("ide: ALI15X3 (dual FIFO) DMA Bus Mastering IDE ");
+ break;
+ default:
+ return;
+ }
+
+ printk("\n Controller on PCI bus %d function %d\n", bus, fn);
+
+ /*
+ * See if IDE and BM-DMA features are enabled:
+ */
+ if ((rc = pcibios_read_config_word(bus, fn, PCI_COMMAND, &pcicmd)))
+ goto quit;
+ if ((pcicmd & 1) == 0) {
+ printk("ide: ports are not enabled (BIOS)\n");
+ goto quit;
+ }
+ if ((pcicmd & 4) == 0) {
+ printk("ide: BM-DMA feature is not enabled (BIOS), enabling\n");
+ pcicmd |= 4;
+ pcibios_write_config_word(bus, fn, 0x04, pcicmd);
+ if ((rc = pcibios_read_config_word(bus, fn, 0x04, &pcicmd))) {
+ printk("ide: Couldn't read back PCI command\n");
+ goto quit;
+ }
+ }
+
+ if ((pcicmd & 4) == 0) {
+ printk("ide: BM-DMA feature couldn't be enabled\n");
+ } else {
+ /*
+ * Get the bmiba base address
+ */
+ int try_again = 1;
+ do {
+ if ((rc = pcibios_read_config_dword(bus, fn, PCI_BASE_ADDRESS_4, &bmiba)))
+ goto quit;
+ bmiba &= 0xfff0; /* extract port base address */
+ if (bmiba) {
+ dma_enabled = 1;
+ break;
+ } else {
+ printk("ide: BM-DMA base register is invalid (0x%04x, PnP BIOS problem)\n", bmiba);
+ if (inb(((vendor == PCI_VENDOR_ID_AL) ? DEFAULT_BMALIBA :
+ (vendor == PCI_VENDOR_ID_VIA) ? DEFAULT_BMCRBA :
+ DEFAULT_BMIBA)) != 0xff || !try_again)
+ break;
+ printk("ide: setting BM-DMA base register to 0x%04x\n",
+ ((vendor == PCI_VENDOR_ID_AL) ? DEFAULT_BMALIBA :
+ (vendor == PCI_VENDOR_ID_VIA) ? DEFAULT_BMCRBA :
+ DEFAULT_BMIBA));
+ if ((rc = pcibios_write_config_word(bus, fn, PCI_COMMAND, pcicmd&~1)))
+ goto quit;
+ rc = pcibios_write_config_dword(bus, fn, 0x20,
+ ((vendor == PCI_VENDOR_ID_AL) ? DEFAULT_BMALIBA :
+ (vendor == PCI_VENDOR_ID_VIA) ? DEFAULT_BMCRBA :
+ DEFAULT_BMIBA)|1);
+ if (pcibios_write_config_word(bus, fn, PCI_COMMAND, pcicmd|5) || rc)
+ goto quit;
+ }
+ } while (try_again--);
+ }
+
+ /*
+ * See if ide port(s) are enabled
+ */
+ if ((rc = pcibios_read_config_dword(bus, fn,
+ (vendor == PCI_VENDOR_ID_PROMISE) ? 0x50 :
+ (vendor == PCI_VENDOR_ID_ARTOP) ? 0x54 :
+ (vendor == PCI_VENDOR_ID_SI) ? 0x48 :
+ (vendor == PCI_VENDOR_ID_AL) ? 0x08 :
+ 0x40, &timings)))
+ goto quit;
+ /*
+ * We do a vendor check since the Ultra33/66 and AEC6210
+ * holds their timings in a different location.
+ */
+#if 0
+ printk("ide: timings == %08x\n", timings);
+#endif
+ /*
+ * The switch preserves some stuff that was original.
+ */
+ switch(vendor) {
+ case PCI_VENDOR_ID_INTEL:
+ if (!(timings & 0x80008000)) {
+ printk("ide: INTEL: neither port is enabled\n");
+ goto quit;
+ }
+ break;
+ case PCI_VENDOR_ID_VIA:
+ if(!(timings & 0x03)) {
+ printk("ide: VIA: neither port is enabled\n");
+ goto quit;
+ }
+ break;
+ case PCI_VENDOR_ID_AL:
+ timings <<= 16;
+ timings >>= 24;
+ if (!(timings & 0x30)) {
+ printk("ide: ALI15X3: neither port is enabled\n");
+ goto quit;
+ }
+ break;
+ case PCI_VENDOR_ID_SI:
+ timings <<= 8;
+ timings >>= 24;
+ if (!(timings & 0x06)) {
+ printk("ide: SIS5513: neither port is enabled\n");
+ goto quit;
+ }
+ break;
+ case PCI_VENDOR_ID_PROMISE:
+ printk(" (U)DMA Burst Bit %sABLED " \
+ "Primary %s Mode " \
+ "Secondary %s Mode.\n",
+ (inb(bmiba + 0x001f) & 1) ? "EN" : "DIS",
+ (inb(bmiba + 0x001a) & 1) ? "MASTER" : "PCI",
+ (inb(bmiba + 0x001b) & 1) ? "MASTER" : "PCI" );
+#if 0
+ if (!(inb(bmiba + 0x001f) & 1)) {
+ outb(inb(bmiba + 0x001f)|0x01, (bmiba + 0x001f));
+ printk(" (U)DMA Burst Bit Forced %sABLED.\n",
+ (inb(bmiba + 0x001f) & 1) ? "EN" : "DIS");
+ }
+#endif
+ break;
+ case PCI_VENDOR_ID_ARTOP:
+ case PCI_VENDOR_ID_TTI:
+ default:
+ break;
+ }
+
+ /*
+ * Save the dma_base port addr for each interface
+ */
+ for (h = 0; h < MAX_HWIFS; ++h) {
+ ide_hwif_t *hwif = &ide_hwifs[h];
+ byte channel = ((h == 1) || (h == 3) || (h == 5)) ? 1 : 0;
+
+ /*
+ * This prevents the first contoller from accidentally
+ * initalizing the hwif's that it does not use and block
+ * an off-board ide-pci from getting in the game.
+ */
+ if ((step_count >= 2) || (pass_count >= 2)) {
+ goto quit;
+ }
+
+#if 0
+ if (hwif->chipset == ide_unknown)
+ printk("ide: index == %d channel(%d)\n", h, channel);
+#endif
+
+#ifdef CONFIG_BLK_DEV_OFFBOARD
+ /*
+ * This is a forced override for the onboard ide controller
+ * to be enabled, if one chooses to have an offboard ide-pci
+ * card as the primary booting device. This beasty is
+ * for offboard UDMA upgrades with hard disks, but saving
+ * the onboard DMA2 controllers for CDROMS, TAPES, ZIPS, etc...
+ */
+ if (((vendor == PCI_VENDOR_ID_INTEL) ||
+ (vendor == PCI_VENDOR_ID_SI) ||
+ (vendor == PCI_VENDOR_ID_VIA) ||
+ (vendor == PCI_VENDOR_ID_AL)) && (h >= 2)) {
+ hwif->io_base = channel ? 0x170 : 0x1f0;
+ hwif->ctl_port = channel ? 0x376 : 0x3f6;
+ hwif->irq = channel ? 15 : 14;
+ hwif->noprobe = 0;
+ }
+#endif /* CONFIG_BLK_DEV_OFFBOARD */
+ /*
+ * If the chipset is listed as "ide_unknown", lets get a
+ * hwif while they last. This does the first check on
+ * the current availability of the ide_hwifs[h] in question.
+ */
+ if (hwif->chipset != ide_unknown) {
+ continue;
+ } else if (vendor == PCI_VENDOR_ID_INTEL) {
+ unsigned short time;
+#ifdef DISPLAY_TRITON_TIMINGS
+ byte s_clks, r_clks;
+ unsigned short devid;
+#endif /* DISPLAY_TRITON_TIMINGS */
+ pass_count++;
+ if (hwif->io_base == 0x1f0) {
+ time = timings & 0xffff;
+ if ((time & 0x8000) == 0) /* interface enabled? */
+ continue;
+ hwif->chipset = ide_triton;
+ if (dma_enabled)
+ init_triton_dma(hwif, bmiba);
+ step_count++;
+ } else if (hwif->io_base == 0x170) {
+ time = timings >> 16;
+ if ((time & 0x8000) == 0) /* interface enabled? */
+ continue;
+ hwif->chipset = ide_triton;
+ if (dma_enabled)
+ init_triton_dma(hwif, bmiba + 8);
+ step_count++;
+ } else {
+ continue;
+ }
+#ifdef DISPLAY_TRITON_TIMINGS
+ s_clks = ((~time >> 12) & 3) + 2;
+ r_clks = ((~time >> 8) & 3) + 1;
+ printk(" %s timing: (0x%04x) sample_CLKs=%d, recovery_CLKs=%d\n",
+ hwif->name, time, s_clks, r_clks);
+ if ((time & 0x40) && !pcibios_read_config_word(bus, fn, PCI_DEVICE_ID, &devid)
+ && devid == PCI_DEVICE_ID_INTEL_82371SB_1) {
+ byte stime;
+ if (pcibios_read_config_byte(bus, fn, 0x44, &stime)) {
+ if (hwif->io_base == 0x1f0) {
+ s_clks = ~stime >> 6;
+ r_clks = ~stime >> 4;
+ } else {
+ s_clks = ~stime >> 2;
+ r_clks = ~stime;
+ }
+ s_clks = (s_clks & 3) + 2;
+ r_clks = (r_clks & 3) + 1;
+ printk(" slave: sample_CLKs=%d, recovery_CLKs=%d\n",
+ s_clks, r_clks);
+ }
+ }
+ print_triton_drive_flags (0, time & 0xf);
+ print_triton_drive_flags (1, (time >> 4) & 0xf);
+#endif /* DISPLAY_TRITON_TIMINGS */
+ } else if (vendor == PCI_VENDOR_ID_SI) {
+ pass_count++;
+ if (hwif->io_base == 0x1f0) {
+ if ((timings & 0x02) == 0)
+ continue;
+ hwif->chipset = ide_triton;
+ if (dma_enabled)
+ init_triton_dma(hwif, bmiba);
+ step_count++;
+ } else if (hwif->io_base == 0x170) {
+ if ((timings & 0x04) == 0)
+ continue;
+ hwif->chipset = ide_triton;
+ if (dma_enabled)
+ init_triton_dma(hwif, bmiba + 8);
+ step_count++;
+ } else {
+ continue;
+ }
+ } else if (vendor == PCI_VENDOR_ID_VIA) {
+ pass_count++;
+ if (hwif->io_base == 0x1f0) {
+ if ((timings & 0x02) == 0)
+ continue;
+ hwif->chipset = ide_triton;
+ if (dma_enabled)
+ init_triton_dma(hwif, bmiba);
+ if (set_via_timings(bus, fn, 0xc0, 0xa0))
+ goto quit;
+#ifdef DISPLAY_APOLLO_TIMINGS
+ proc_register_dynamic(&proc_root, &via_proc_entry);
+#endif /* DISPLAY_APOLLO_TIMINGS */
+ step_count++;
+ } else if (hwif->io_base == 0x170) {
+ if ((timings & 0x01) == 0)
+ continue;
+ hwif->chipset = ide_triton;
+ if (dma_enabled)
+ init_triton_dma(hwif, bmiba + 8);
+ if (set_via_timings(bus, fn, 0x30, 0x50))
+ goto quit;
+ step_count++;
+ } else {
+ continue;
+ }
+ } else if (vendor == PCI_VENDOR_ID_AL) {
+ byte ideic, inmir;
+ byte irq_routing_table[] = { -1, 9, 3, 10, 4, 5, 7, 6,
+ 1, 11, 0, 12, 0, 14, 0, 15 };
+
+ if (bridgeset) {
+ pcibios_read_config_byte(bridgebus, bridgefn, 0x58, &ideic);
+ ideic = ideic & 0x03;
+ if ((channel && ideic == 0x03) || (!channel && !ideic)) {
+ pcibios_read_config_byte(bridgebus, bridgefn, 0x44, &inmir);
+ inmir = inmir & 0x0f;
+ hwif->irq = irq_routing_table[inmir];
+ } else if (channel && !(ideic & 0x01)) {
+ pcibios_read_config_byte(bridgebus, bridgefn, 0x75, &inmir);
+ inmir = inmir & 0x0f;
+ hwif->irq = irq_routing_table[inmir];
+ }
+ }
+ pass_count++;
+ if (hwif->io_base == 0x1f0) {
+ if ((timings & 0x20) == 0)
+ continue;
+ hwif->chipset = ide_triton;
+ if (dma_enabled)
+ init_triton_dma(hwif, bmiba);
+ outb(inb(bmiba+2) & 0x60, bmiba+2);
+ if (inb(bmiba+2) & 0x80)
+ printk("ALI15X3: simplex device: DMA forced\n");
+#ifdef DISPLAY_ALI15X3_TIMINGS
+ proc_register_dynamic(&proc_root, &ali_proc_entry);
+#endif /* DISPLAY_ALI15X3_TIMINGS */
+ step_count++;
+ } else if (hwif->io_base == 0x170) {
+ if ((timings & 0x10) == 0)
+ continue;
+ hwif->chipset = ide_triton;
+ if (dma_enabled)
+ init_triton_dma(hwif, bmiba + 8);
+ outb(inb(bmiba+10) & 0x60, bmiba+10);
+ if (inb(bmiba+10) & 0x80)
+ printk("ALI15X3: simplex device: DMA forced\n");
+ step_count++;
+ } else {
+ continue;
+ }
+ } else if ((vendor == PCI_VENDOR_ID_PROMISE) ||
+ (vendor == PCI_VENDOR_ID_ARTOP) ||
+ (vendor == PCI_VENDOR_ID_TTI)) {
+ pass_count++;
+ if (vendor == PCI_VENDOR_ID_TTI) {
+ if ((!hpt34x_flag) && (h < 2)) {
+ goto quit;
+ } else if (hpt34x_flag) {
+ hwif->io_base = channel ? (bmiba + 0x28) : (bmiba + 0x20);
+ hwif->ctl_port = channel ? (bmiba + 0x3e) : (bmiba + 0x36);
+ } else {
+ goto io_temps;
+ }
+ } else {
+io_temps:
+ tmp = channel ? 2 : 0;
+ hwif->io_base = io[tmp];
+ hwif->ctl_port = io[tmp + 1] + 2;
+ }
+ hwif->irq = irq;
+ hwif->noprobe = 0;
+
+ if (device == PCI_DEVICE_ID_ARTOP_ATP850UF) {
+ hwif->serialized = 1;
+ }
+
+ if ((vendor == PCI_VENDOR_ID_PROMISE) ||
+ (vendor == PCI_VENDOR_ID_TTI)) {
+ set_promise_hpt343_extra(device, bmiba);
+ }
+
+ if (dma_enabled) {
+ if ((!check_region(bmiba, 8)) && (!channel)) {
+ hwif->chipset = ((vendor == PCI_VENDOR_ID_TTI) && !hpt34x_flag) ? ide_hpt343 :
+ (device == PCI_DEVICE_ID_PROMISE_20262) ? ide_ultra66 : ide_udma;
+ init_triton_dma(hwif, bmiba);
+ step_count++;
+ } else if ((!check_region((bmiba + 0x08), 8)) && (channel)) {
+ hwif->chipset = ((vendor == PCI_VENDOR_ID_TTI) && !hpt34x_flag) ? ide_hpt343 :
+ (device == PCI_DEVICE_ID_PROMISE_20262) ? ide_ultra66 : ide_udma;
+ init_triton_dma(hwif, bmiba + 8);
+ step_count++;
+ } else {
+ continue;
+ }
+ }
+ }
+ }
+
+ quit: if (rc) printk("ide: pcibios access failed - %s\n", pcibios_strerror(rc));
+}
diff --git a/linux/src/drivers/net/3c501.c b/linux/src/drivers/net/3c501.c
new file mode 100644
index 0000000..200b95c
--- /dev/null
+++ b/linux/src/drivers/net/3c501.c
@@ -0,0 +1,856 @@
+/* 3c501.c: A 3Com 3c501 ethernet driver for linux. */
+/*
+ Written 1992,1993,1994 Donald Becker
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used and
+ distributed according to the terms of the GNU Public License,
+ incorporated herein by reference.
+
+ This is a device driver for the 3Com Etherlink 3c501.
+ Do not purchase this card, even as a joke. It's performance is horrible,
+ and it breaks in many ways.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ Fixed (again!) the missing interrupt locking on TX/RX shifting.
+ Alan Cox <Alan.Cox@linux.org>
+
+ Removed calls to init_etherdev since they are no longer needed, and
+ cleaned up modularization just a bit. The driver still allows only
+ the default address for cards when loaded as a module, but that's
+ really less braindead than anyone using a 3c501 board. :)
+ 19950208 (invid@msen.com)
+
+ Added traps for interrupts hitting the window as we clear and TX load
+ the board. Now getting 150K/second FTP with a 3c501 card. Still playing
+ with a TX-TX optimisation to see if we can touch 180-200K/second as seems
+ theoretically maximum.
+ 19950402 Alan Cox <Alan.Cox@linux.org>
+
+ Some notes on this thing if you have to hack it. [Alan]
+
+ 1] Some documentation is available from 3Com. Due to the boards age
+ standard responses when you ask for this will range from 'be serious'
+ to 'give it to a museum'. The documentation is incomplete and mostly
+ of historical interest anyway.
+
+ 2] The basic system is a single buffer which can be used to receive or
+ transmit a packet. A third command mode exists when you are setting
+ things up.
+
+ 3] If it's transmitting it's not receiving and vice versa. In fact the
+ time to get the board back into useful state after an operation is
+ quite large.
+
+ 4] The driver works by keeping the board in receive mode waiting for a
+ packet to arrive. When one arrives it is copied out of the buffer
+ and delivered to the kernel. The card is reloaded and off we go.
+
+ 5] When transmitting dev->tbusy is set and the card is reset (from
+ receive mode) [possibly losing a packet just received] to command
+ mode. A packet is loaded and transmit mode triggered. The interrupt
+ handler runs different code for transmit interrupts and can handle
+ returning to receive mode or retransmissions (yes you have to help
+ out with those too).
+
+ Problems:
+ There are a wide variety of undocumented error returns from the card
+ and you basically have to kick the board and pray if they turn up. Most
+ only occur under extreme load or if you do something the board doesn't
+ like (eg touching a register at the wrong time).
+
+ The driver is less efficient than it could be. It switches through
+ receive mode even if more transmits are queued. If this worries you buy
+ a real ethernet card.
+
+ The combination of slow receive restart and no real multicast
+ filter makes the board unusable with a kernel compiled for IP
+ multicasting in a real multicast environment. That's down to the board,
+ but even with no multicast programs running a multicast IP kernel is
+ in group 224.0.0.1 and you will therefore be listening to all multicasts.
+ One nv conference running over that ethernet and you can give up.
+
+*/
+
+static const char *version =
+ "3c501.c: 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov).\n";
+
+/*
+ * Braindamage remaining:
+ * The 3c501 board.
+ */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/fcntl.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/malloc.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/config.h> /* for CONFIG_IP_MULTICAST */
+
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#define BLOCKOUT_2
+
+/* A zero-terminated list of I/O addresses to be probed.
+ The 3c501 can be at many locations, but here are the popular ones. */
+static unsigned int netcard_portlist[] =
+ { 0x280, 0x300, 0};
+
+
+/*
+ * Index to functions.
+ */
+
+int el1_probe(struct device *dev);
+static int el1_probe1(struct device *dev, int ioaddr);
+static int el_open(struct device *dev);
+static int el_start_xmit(struct sk_buff *skb, struct device *dev);
+static void el_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void el_receive(struct device *dev);
+static void el_reset(struct device *dev);
+static int el1_close(struct device *dev);
+static struct enet_statistics *el1_get_stats(struct device *dev);
+static void set_multicast_list(struct device *dev);
+
+#define EL1_IO_EXTENT 16
+
+#ifndef EL_DEBUG
+#define EL_DEBUG 0 /* use 0 for production, 1 for devel., >2 for debug */
+#endif /* Anything above 5 is wordy death! */
+static int el_debug = EL_DEBUG;
+
+/*
+ * Board-specific info in dev->priv.
+ */
+
+struct net_local
+{
+ struct enet_statistics stats;
+ int tx_pkt_start; /* The length of the current Tx packet. */
+ int collisions; /* Tx collisions this packet */
+ int loading; /* Spot buffer load collisions */
+};
+
+
+#define RX_STATUS (ioaddr + 0x06)
+#define RX_CMD RX_STATUS
+#define TX_STATUS (ioaddr + 0x07)
+#define TX_CMD TX_STATUS
+#define GP_LOW (ioaddr + 0x08)
+#define GP_HIGH (ioaddr + 0x09)
+#define RX_BUF_CLR (ioaddr + 0x0A)
+#define RX_LOW (ioaddr + 0x0A)
+#define RX_HIGH (ioaddr + 0x0B)
+#define SAPROM (ioaddr + 0x0C)
+#define AX_STATUS (ioaddr + 0x0E)
+#define AX_CMD AX_STATUS
+#define DATAPORT (ioaddr + 0x0F)
+#define TX_RDY 0x08 /* In TX_STATUS */
+
+#define EL1_DATAPTR 0x08
+#define EL1_RXPTR 0x0A
+#define EL1_SAPROM 0x0C
+#define EL1_DATAPORT 0x0f
+
+/*
+ * Writes to the ax command register.
+ */
+
+#define AX_OFF 0x00 /* Irq off, buffer access on */
+#define AX_SYS 0x40 /* Load the buffer */
+#define AX_XMIT 0x44 /* Transmit a packet */
+#define AX_RX 0x48 /* Receive a packet */
+#define AX_LOOP 0x0C /* Loopback mode */
+#define AX_RESET 0x80
+
+/*
+ * Normal receive mode written to RX_STATUS. We must intr on short packets
+ * to avoid bogus rx lockups.
+ */
+
+#define RX_NORM 0xA8 /* 0x68 == all addrs, 0xA8 only to me. */
+#define RX_PROM 0x68 /* Senior Prom, uhmm promiscuous mode. */
+#define RX_MULT 0xE8 /* Accept multicast packets. */
+#define TX_NORM 0x0A /* Interrupt on everything that might hang the chip */
+
+/*
+ * TX_STATUS register.
+ */
+
+#define TX_COLLISION 0x02
+#define TX_16COLLISIONS 0x04
+#define TX_READY 0x08
+
+#define RX_RUNT 0x08
+#define RX_MISSED 0x01 /* Missed a packet due to 3c501 braindamage. */
+#define RX_GOOD 0x30 /* Good packet 0x20, or simple overflow 0x10. */
+
+
+/*
+ * The boilerplate probe code.
+ */
+
+#ifdef HAVE_DEVLIST
+struct netdev_entry el1_drv = {"3c501", el1_probe1, EL1_IO_EXTENT, netcard_portlist};
+#else
+
+int el1_probe(struct device *dev)
+{
+ int i;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return el1_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (i = 0; netcard_portlist[i]; i++)
+ {
+ int ioaddr = netcard_portlist[i];
+ if (check_region(ioaddr, EL1_IO_EXTENT))
+ continue;
+ if (el1_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif
+
+/*
+ * The actual probe.
+ */
+
+static int el1_probe1(struct device *dev, int ioaddr)
+{
+ const char *mname; /* Vendor name */
+ unsigned char station_addr[6];
+ int autoirq = 0;
+ int i;
+
+ /*
+ * Read the station address PROM data from the special port.
+ */
+
+ for (i = 0; i < 6; i++)
+ {
+ outw(i, ioaddr + EL1_DATAPTR);
+ station_addr[i] = inb(ioaddr + EL1_SAPROM);
+ }
+ /*
+ * Check the first three octets of the S.A. for 3Com's prefix, or
+ * for the Sager NP943 prefix.
+ */
+
+ if (station_addr[0] == 0x02 && station_addr[1] == 0x60
+ && station_addr[2] == 0x8c)
+ {
+ mname = "3c501";
+ } else if (station_addr[0] == 0x00 && station_addr[1] == 0x80
+ && station_addr[2] == 0xC8)
+ {
+ mname = "NP943";
+ }
+ else
+ return ENODEV;
+
+ /*
+ * Grab the region so we can find the another board if autoIRQ fails.
+ */
+
+ request_region(ioaddr, EL1_IO_EXTENT,"3c501");
+
+ /*
+ * We auto-IRQ by shutting off the interrupt line and letting it float
+ * high.
+ */
+
+ if (dev->irq < 2)
+ {
+ autoirq_setup(2);
+ inb(RX_STATUS); /* Clear pending interrupts. */
+ inb(TX_STATUS);
+ outb(AX_LOOP + 1, AX_CMD);
+
+ outb(0x00, AX_CMD);
+
+ autoirq = autoirq_report(1);
+
+ if (autoirq == 0)
+ {
+ printk("%s probe at %#x failed to detect IRQ line.\n",
+ mname, ioaddr);
+ return EAGAIN;
+ }
+ }
+
+ outb(AX_RESET+AX_LOOP, AX_CMD); /* Loopback mode. */
+ dev->base_addr = ioaddr;
+ memcpy(dev->dev_addr, station_addr, ETH_ALEN);
+
+ if (dev->mem_start & 0xf)
+ el_debug = dev->mem_start & 0x7;
+ if (autoirq)
+ dev->irq = autoirq;
+
+ printk("%s: %s EtherLink at %#lx, using %sIRQ %d.\n", dev->name, mname, dev->base_addr,
+ autoirq ? "auto":"assigned ", dev->irq);
+
+#ifdef CONFIG_IP_MULTICAST
+ printk("WARNING: Use of the 3c501 in a multicast kernel is NOT recommended.\n");
+#endif
+
+ if (el_debug)
+ printk("%s", version);
+
+ /*
+ * Initialize the device structure.
+ */
+
+ dev->priv = kmalloc(sizeof(struct net_local), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct net_local));
+
+ /*
+ * The EL1-specific entries in the device structure.
+ */
+
+ dev->open = &el_open;
+ dev->hard_start_xmit = &el_start_xmit;
+ dev->stop = &el1_close;
+ dev->get_stats = &el1_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ /*
+ * Setup the generic properties
+ */
+
+ ether_setup(dev);
+
+ return 0;
+}
+
+/*
+ * Open/initialize the board.
+ */
+
+static int el_open(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if (el_debug > 2)
+ printk("%s: Doing el_open()...", dev->name);
+
+ if (request_irq(dev->irq, &el_interrupt, 0, "3c501", NULL))
+ return -EAGAIN;
+
+ irq2dev_map[dev->irq] = dev;
+ el_reset(dev);
+
+ dev->start = 1;
+
+ outb(AX_RX, AX_CMD); /* Aux control, irq and receive enabled */
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static int el_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ unsigned long flags;
+
+ if(dev->interrupt) /* May be unloading, don't stamp on */
+ return 1; /* the packet buffer this time */
+
+ if (dev->tbusy)
+ {
+ if (jiffies - dev->trans_start < 20)
+ {
+ if (el_debug > 2)
+ printk(" transmitter busy, deferred.\n");
+ return 1;
+ }
+ if (el_debug)
+ printk ("%s: transmit timed out, txsr %#2x axsr=%02x rxsr=%02x.\n",
+ dev->name, inb(TX_STATUS), inb(AX_STATUS), inb(RX_STATUS));
+ lp->stats.tx_errors++;
+ outb(TX_NORM, TX_CMD);
+ outb(RX_NORM, RX_CMD);
+ outb(AX_OFF, AX_CMD); /* Just trigger a false interrupt. */
+ outb(AX_RX, AX_CMD); /* Aux control, irq and receive enabled */
+ dev->tbusy = 0;
+ dev->trans_start = jiffies;
+ }
+
+ if (skb == NULL)
+ {
+ dev_tint(dev);
+ return 0;
+ }
+
+ save_flags(flags);
+
+ /*
+ * Avoid incoming interrupts between us flipping tbusy and flipping
+ * mode as the driver assumes tbusy is a faithful indicator of card
+ * state
+ */
+
+ cli();
+
+ /*
+ * Avoid timer-based retransmission conflicts.
+ */
+
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ {
+ restore_flags(flags);
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ }
+ else
+ {
+ int gp_start = 0x800 - (ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN);
+ unsigned char *buf = skb->data;
+
+load_it_again_sam:
+ lp->tx_pkt_start = gp_start;
+ lp->collisions = 0;
+
+ /*
+ * Command mode with status cleared should [in theory]
+ * mean no more interrupts can be pending on the card.
+ */
+
+#ifdef BLOCKOUT_1
+ disable_irq(dev->irq);
+#endif
+ outb_p(AX_SYS, AX_CMD);
+ inb_p(RX_STATUS);
+ inb_p(TX_STATUS);
+
+ lp->loading=1;
+
+ /*
+ * Turn interrupts back on while we spend a pleasant afternoon
+ * loading bytes into the board
+ */
+
+ restore_flags(flags);
+ outw(0x00, RX_BUF_CLR); /* Set rx packet area to 0. */
+ outw(gp_start, GP_LOW); /* aim - packet will be loaded into buffer start */
+ outsb(DATAPORT,buf,skb->len); /* load buffer (usual thing each byte increments the pointer) */
+ outw(gp_start, GP_LOW); /* the board reuses the same register */
+#ifndef BLOCKOUT_1
+ if(lp->loading==2) /* A receive upset our load, despite our best efforts */
+ {
+ if(el_debug>2)
+ printk("%s: burped during tx load.\n", dev->name);
+ goto load_it_again_sam; /* Sigh... */
+ }
+#endif
+ outb(AX_XMIT, AX_CMD); /* fire ... Trigger xmit. */
+ lp->loading=0;
+#ifdef BLOCKOUT_1
+ enable_irq(dev->irq);
+#endif
+ dev->trans_start = jiffies;
+ }
+
+ if (el_debug > 2)
+ printk(" queued xmit.\n");
+ dev_kfree_skb (skb, FREE_WRITE);
+ return 0;
+}
+
+
+/*
+ * The typical workload of the driver:
+ * Handle the ether interface interrupts.
+ */
+
+static void el_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ struct net_local *lp;
+ int ioaddr;
+ int axsr; /* Aux. status reg. */
+
+ if (dev == NULL || dev->irq != irq)
+ {
+ printk ("3c501 driver: irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ ioaddr = dev->base_addr;
+ lp = (struct net_local *)dev->priv;
+
+ /*
+ * What happened ?
+ */
+
+ axsr = inb(AX_STATUS);
+
+ /*
+ * Log it
+ */
+
+ if (el_debug > 3)
+ printk("%s: el_interrupt() aux=%#02x", dev->name, axsr);
+ if (dev->interrupt)
+ printk("%s: Reentering the interrupt driver!\n", dev->name);
+ dev->interrupt = 1;
+#ifndef BLOCKOUT_1
+ if(lp->loading==1 && !dev->tbusy)
+ printk("%s: Inconsistent state loading while not in tx\n",
+ dev->name);
+#endif
+#ifdef BLOCKOUT_3
+ lp->loading=2; /* So we can spot loading interruptions */
+#endif
+
+ if (dev->tbusy)
+ {
+
+ /*
+ * Board in transmit mode. May be loading. If we are
+ * loading we shouldn't have got this.
+ */
+
+ int txsr = inb(TX_STATUS);
+#ifdef BLOCKOUT_2
+ if(lp->loading==1)
+ {
+ if(el_debug > 2)
+ {
+ printk("%s: Interrupt while loading [", dev->name);
+ printk(" txsr=%02x gp=%04x rp=%04x]\n", txsr, inw(GP_LOW),inw(RX_LOW));
+ }
+ lp->loading=2; /* Force a reload */
+ dev->interrupt = 0;
+ return;
+ }
+#endif
+ if (el_debug > 6)
+ printk(" txsr=%02x gp=%04x rp=%04x", txsr, inw(GP_LOW),inw(RX_LOW));
+
+ if ((axsr & 0x80) && (txsr & TX_READY) == 0)
+ {
+ /*
+ * FIXME: is there a logic to whether to keep on trying or
+ * reset immediately ?
+ */
+ if(el_debug>1)
+ printk("%s: Unusual interrupt during Tx, txsr=%02x axsr=%02x"
+ " gp=%03x rp=%03x.\n", dev->name, txsr, axsr,
+ inw(ioaddr + EL1_DATAPTR), inw(ioaddr + EL1_RXPTR));
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+ else if (txsr & TX_16COLLISIONS)
+ {
+ /*
+ * Timed out
+ */
+ if (el_debug)
+ printk("%s: Transmit failed 16 times, ethernet jammed?\n",dev->name);
+ outb(AX_SYS, AX_CMD);
+ lp->stats.tx_aborted_errors++;
+ }
+ else if (txsr & TX_COLLISION)
+ {
+ /*
+ * Retrigger xmit.
+ */
+
+ if (el_debug > 6)
+ printk(" retransmitting after a collision.\n");
+ /*
+ * Poor little chip can't reset its own start pointer
+ */
+
+ outb(AX_SYS, AX_CMD);
+ outw(lp->tx_pkt_start, GP_LOW);
+ outb(AX_XMIT, AX_CMD);
+ lp->stats.collisions++;
+ dev->interrupt = 0;
+ return;
+ }
+ else
+ {
+ /*
+ * It worked.. we will now fall through and receive
+ */
+ lp->stats.tx_packets++;
+ if (el_debug > 6)
+ printk(" Tx succeeded %s\n",
+ (txsr & TX_RDY) ? "." : "but tx is busy!");
+ /*
+ * This is safe the interrupt is atomic WRT itself.
+ */
+
+ dev->tbusy = 0;
+ mark_bh(NET_BH); /* In case more to transmit */
+ }
+ }
+ else
+ {
+ /*
+ * In receive mode.
+ */
+
+ int rxsr = inb(RX_STATUS);
+ if (el_debug > 5)
+ printk(" rxsr=%02x txsr=%02x rp=%04x", rxsr, inb(TX_STATUS),inw(RX_LOW));
+ /*
+ * Just reading rx_status fixes most errors.
+ */
+ if (rxsr & RX_MISSED)
+ lp->stats.rx_missed_errors++;
+ else if (rxsr & RX_RUNT)
+ { /* Handled to avoid board lock-up. */
+ lp->stats.rx_length_errors++;
+ if (el_debug > 5)
+ printk(" runt.\n");
+ }
+ else if (rxsr & RX_GOOD)
+ {
+ /*
+ * Receive worked.
+ */
+ el_receive(dev);
+ }
+ else
+ {
+ /*
+ * Nothing? Something is broken!
+ */
+ if (el_debug > 2)
+ printk("%s: No packet seen, rxsr=%02x **resetting 3c501***\n",
+ dev->name, rxsr);
+ el_reset(dev);
+ }
+ if (el_debug > 3)
+ printk(".\n");
+ }
+
+ /*
+ * Move into receive mode
+ */
+
+ outb(AX_RX, AX_CMD);
+ outw(0x00, RX_BUF_CLR);
+ inb(RX_STATUS); /* Be certain that interrupts are cleared. */
+ inb(TX_STATUS);
+ dev->interrupt = 0;
+ return;
+}
+
+
+/*
+ * We have a good packet. Well, not really "good", just mostly not broken.
+ * We must check everything to see if it is good.
+ */
+
+static void el_receive(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int pkt_len;
+ struct sk_buff *skb;
+
+ pkt_len = inw(RX_LOW);
+
+ if (el_debug > 4)
+ printk(" el_receive %d.\n", pkt_len);
+
+ if ((pkt_len < 60) || (pkt_len > 1536))
+ {
+ if (el_debug)
+ printk("%s: bogus packet, length=%d\n", dev->name, pkt_len);
+ lp->stats.rx_over_errors++;
+ return;
+ }
+
+ /*
+ * Command mode so we can empty the buffer
+ */
+
+ outb(AX_SYS, AX_CMD);
+ skb = dev_alloc_skb(pkt_len+2);
+
+ /*
+ * Start of frame
+ */
+
+ outw(0x00, GP_LOW);
+ if (skb == NULL)
+ {
+ printk("%s: Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ return;
+ }
+ else
+ {
+ skb_reserve(skb,2); /* Force 16 byte alignment */
+ skb->dev = dev;
+ /*
+ * The read increments through the bytes. The interrupt
+ * handler will fix the pointer when it returns to
+ * receive mode.
+ */
+ insb(DATAPORT, skb_put(skb,pkt_len), pkt_len);
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+ return;
+}
+
+static void el_reset(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if (el_debug> 2)
+ printk("3c501 reset...");
+ outb(AX_RESET, AX_CMD); /* Reset the chip */
+ outb(AX_LOOP, AX_CMD); /* Aux control, irq and loopback enabled */
+ {
+ int i;
+ for (i = 0; i < 6; i++) /* Set the station address. */
+ outb(dev->dev_addr[i], ioaddr + i);
+ }
+
+ outw(0, RX_BUF_CLR); /* Set rx packet area to 0. */
+ cli(); /* Avoid glitch on writes to CMD regs */
+ outb(TX_NORM, TX_CMD); /* tx irq on done, collision */
+ outb(RX_NORM, RX_CMD); /* Set Rx commands. */
+ inb(RX_STATUS); /* Clear status. */
+ inb(TX_STATUS);
+ dev->interrupt = 0;
+ dev->tbusy = 0;
+ sti();
+}
+
+static int el1_close(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if (el_debug > 2)
+ printk("%s: Shutting down ethercard at %#x.\n", dev->name, ioaddr);
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ /*
+ * Free and disable the IRQ.
+ */
+
+ free_irq(dev->irq, NULL);
+ outb(AX_RESET, AX_CMD); /* Reset the chip */
+ irq2dev_map[dev->irq] = 0;
+
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+static struct enet_statistics *el1_get_stats(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ return &lp->stats;
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ * best-effort filtering.
+ */
+
+static void set_multicast_list(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if(dev->flags&IFF_PROMISC)
+ {
+ outb(RX_PROM, RX_CMD);
+ inb(RX_STATUS);
+ }
+ else if (dev->mc_list || dev->flags&IFF_ALLMULTI)
+ {
+ outb(RX_MULT, RX_CMD); /* Multicast or all multicast is the same */
+ inb(RX_STATUS); /* Clear status. */
+ }
+ else
+ {
+ outb(RX_NORM, RX_CMD);
+ inb(RX_STATUS);
+ }
+}
+
+#ifdef MODULE
+
+static char devicename[9] = { 0, };
+
+static struct device dev_3c501 =
+{
+ devicename, /* device name is inserted by linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0x280, 5,
+ 0, 0, 0, NULL, el1_probe
+};
+
+static int io=0x280;
+static int irq=5;
+
+int init_module(void)
+{
+ dev_3c501.irq=irq;
+ dev_3c501.base_addr=io;
+ if (register_netdev(&dev_3c501) != 0)
+ return -EIO;
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ /*
+ * No need to check MOD_IN_USE, as sys_delete_module() checks.
+ */
+
+ unregister_netdev(&dev_3c501);
+
+ /*
+ * Free up the private structure, or leak memory :-)
+ */
+
+ kfree(dev_3c501.priv);
+ dev_3c501.priv = NULL; /* gets re-allocated by el1_probe1 */
+
+ /*
+ * If we don't do this, we can't re-insmod it later.
+ */
+ release_region(dev_3c501.base_addr, EL1_IO_EXTENT);
+}
+
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer -m486 -c -o 3c501.o 3c501.c"
+ * kept-new-versions: 5
+ * End:
+ */
diff --git a/linux/src/drivers/net/3c503.c b/linux/src/drivers/net/3c503.c
new file mode 100644
index 0000000..8ce488d
--- /dev/null
+++ b/linux/src/drivers/net/3c503.c
@@ -0,0 +1,690 @@
+/* 3c503.c: A shared-memory NS8390 ethernet driver for linux. */
+/*
+ Written 1992-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used and
+ distributed according to the terms of the GNU Public License,
+ incorporated herein by reference.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This driver should work with the 3c503 and 3c503/16. It should be used
+ in shared memory mode for best performance, although it may also work
+ in programmed-I/O mode.
+
+ Sources:
+ EtherLink II Technical Reference Manual,
+ EtherLink II/16 Technical Reference Manual Supplement,
+ 3Com Corporation, 5400 Bayfront Plaza, Santa Clara CA 95052-8145
+
+ The Crynwr 3c503 packet driver.
+
+ Changelog:
+
+ Paul Gortmaker : add support for the 2nd 8kB of RAM on 16 bit cards.
+ Paul Gortmaker : multiple card support for module users.
+ rjohnson@analogic.com : Fix up PIO interface for efficient operation.
+
+*/
+
+static const char *version =
+ "3c503.c:v1.10 9/23/93 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/byteorder.h>
+
+#include "8390.h"
+#include "3c503.h"
+#define WRD_COUNT 4
+
+int el2_probe(struct device *dev);
+int el2_pio_probe(struct device *dev);
+int el2_probe1(struct device *dev, int ioaddr);
+
+/* A zero-terminated list of I/O addresses to be probed in PIO mode. */
+static unsigned int netcard_portlist[] =
+ { 0x300,0x310,0x330,0x350,0x250,0x280,0x2a0,0x2e0,0};
+
+#define EL2_IO_EXTENT 16
+
+#ifdef HAVE_DEVLIST
+/* The 3c503 uses two entries, one for the safe memory-mapped probe and
+ the other for the typical I/O probe. */
+struct netdev_entry el2_drv =
+{"3c503", el2_probe, EL1_IO_EXTENT, 0};
+struct netdev_entry el2pio_drv =
+{"3c503pio", el2_pioprobe1, EL1_IO_EXTENT, netcard_portlist};
+#endif
+
+static int el2_open(struct device *dev);
+static int el2_close(struct device *dev);
+static void el2_reset_8390(struct device *dev);
+static void el2_init_card(struct device *dev);
+static void el2_block_output(struct device *dev, int count,
+ const unsigned char *buf, const int start_page);
+static void el2_block_input(struct device *dev, int count, struct sk_buff *skb,
+ int ring_offset);
+static void el2_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+
+
+/* This routine probes for a memory-mapped 3c503 board by looking for
+ the "location register" at the end of the jumpered boot PROM space.
+ This works even if a PROM isn't there.
+
+ If the ethercard isn't found there is an optional probe for
+ ethercard jumpered to programmed-I/O mode.
+ */
+int
+el2_probe(struct device *dev)
+{
+ int *addr, addrs[] = { 0xddffe, 0xd9ffe, 0xcdffe, 0xc9ffe, 0};
+ int base_addr = dev->base_addr;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return el2_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (addr = addrs; *addr; addr++) {
+ int i;
+ unsigned int base_bits = readb(*addr);
+ /* Find first set bit. */
+ for(i = 7; i >= 0; i--, base_bits >>= 1)
+ if (base_bits & 0x1)
+ break;
+ if (base_bits != 1)
+ continue;
+ if (check_region(netcard_portlist[i], EL2_IO_EXTENT))
+ continue;
+ if (el2_probe1(dev, netcard_portlist[i]) == 0)
+ return 0;
+ }
+#if ! defined(no_probe_nonshared_memory) && ! defined (HAVE_DEVLIST)
+ return el2_pio_probe(dev);
+#else
+ return ENODEV;
+#endif
+}
+
+#ifndef HAVE_DEVLIST
+/* Try all of the locations that aren't obviously empty. This touches
+ a lot of locations, and is much riskier than the code above. */
+int
+el2_pio_probe(struct device *dev)
+{
+ int i;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return el2_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (i = 0; netcard_portlist[i]; i++) {
+ int ioaddr = netcard_portlist[i];
+ if (check_region(ioaddr, EL2_IO_EXTENT))
+ continue;
+ if (el2_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif
+
+/* Probe for the Etherlink II card at I/O port base IOADDR,
+ returning non-zero on success. If found, set the station
+ address and memory parameters in DEVICE. */
+int
+el2_probe1(struct device *dev, int ioaddr)
+{
+ int i, iobase_reg, membase_reg, saved_406, wordlength;
+ static unsigned version_printed = 0;
+ unsigned long vendor_id;
+
+ /* Reset and/or avoid any lurking NE2000 */
+ if (inb(ioaddr + 0x408) == 0xff) {
+ udelay(1000);
+ return ENODEV;
+ }
+
+ /* We verify that it's a 3C503 board by checking the first three octets
+ of its ethernet address. */
+ iobase_reg = inb(ioaddr+0x403);
+ membase_reg = inb(ioaddr+0x404);
+ /* ASIC location registers should be 0 or have only a single bit set. */
+ if ( (iobase_reg & (iobase_reg - 1))
+ || (membase_reg & (membase_reg - 1))) {
+ return ENODEV;
+ }
+ saved_406 = inb_p(ioaddr + 0x406);
+ outb_p(ECNTRL_RESET|ECNTRL_THIN, ioaddr + 0x406); /* Reset it... */
+ outb_p(ECNTRL_THIN, ioaddr + 0x406);
+ /* Map the station addr PROM into the lower I/O ports. We now check
+ for both the old and new 3Com prefix */
+ outb(ECNTRL_SAPROM|ECNTRL_THIN, ioaddr + 0x406);
+ vendor_id = inb(ioaddr)*0x10000 + inb(ioaddr + 1)*0x100 + inb(ioaddr + 2);
+ if ((vendor_id != OLD_3COM_ID) && (vendor_id != NEW_3COM_ID)) {
+ /* Restore the register we frobbed. */
+ outb(saved_406, ioaddr + 0x406);
+ return ENODEV;
+ }
+
+ /* We should have a "dev" from Space.c or the static module table. */
+ if (dev == NULL) {
+ printk("3c503.c: Passed a NULL device.\n");
+ dev = init_etherdev(0, 0);
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk("%s", version);
+
+ dev->base_addr = ioaddr;
+ /* Allocate dev->priv and fill in 8390 specific dev fields. */
+ if (ethdev_init(dev)) {
+ printk ("3c503: unable to allocate memory for dev->priv.\n");
+ return -ENOMEM;
+ }
+
+ printk("%s: 3c503 at i/o base %#3x, node ", dev->name, ioaddr);
+
+ /* Retrieve and print the ethernet address. */
+ for (i = 0; i < 6; i++)
+ printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i));
+
+ /* Map the 8390 back into the window. */
+ outb(ECNTRL_THIN, ioaddr + 0x406);
+
+ /* Check for EL2/16 as described in tech. man. */
+ outb_p(E8390_PAGE0, ioaddr + E8390_CMD);
+ outb_p(0, ioaddr + EN0_DCFG);
+ outb_p(E8390_PAGE2, ioaddr + E8390_CMD);
+ wordlength = inb_p(ioaddr + EN0_DCFG) & ENDCFG_WTS;
+ outb_p(E8390_PAGE0, ioaddr + E8390_CMD);
+
+ /* Probe for, turn on and clear the board's shared memory. */
+ if (ei_debug > 2) printk(" memory jumpers %2.2x ", membase_reg);
+ outb(EGACFR_NORM, ioaddr + 0x405); /* Enable RAM */
+
+ /* This should be probed for (or set via an ioctl()) at run-time.
+ Right now we use a sleazy hack to pass in the interface number
+ at boot-time via the low bits of the mem_end field. That value is
+ unused, and the low bits would be discarded even if it was used. */
+#if defined(EI8390_THICK) || defined(EL2_AUI)
+ ei_status.interface_num = 1;
+#else
+ ei_status.interface_num = dev->mem_end & 0xf;
+#endif
+ printk(", using %sternal xcvr.\n", ei_status.interface_num == 0 ? "in" : "ex");
+
+ if ((membase_reg & 0xf0) == 0) {
+ dev->mem_start = 0;
+ ei_status.name = "3c503-PIO";
+ } else {
+ dev->mem_start = ((membase_reg & 0xc0) ? 0xD8000 : 0xC8000) +
+ ((membase_reg & 0xA0) ? 0x4000 : 0);
+
+#define EL2_MEMSIZE (EL2_MB1_STOP_PG - EL2_MB1_START_PG)*256
+#ifdef EL2MEMTEST
+ /* This has never found an error, but someone might care.
+ Note that it only tests the 2nd 8kB on 16kB 3c503/16
+ cards between card addr. 0x2000 and 0x3fff. */
+ { /* Check the card's memory. */
+ unsigned long mem_base = dev->mem_start;
+ unsigned int test_val = 0xbbadf00d;
+ writel(0xba5eba5e, mem_base);
+ for (i = sizeof(test_val); i < EL2_MEMSIZE; i+=sizeof(test_val)) {
+ writel(test_val, mem_base + i);
+ if (readl(mem_base) != 0xba5eba5e
+ || readl(mem_base + i) != test_val) {
+ printk("3c503: memory failure or memory address conflict.\n");
+ dev->mem_start = 0;
+ ei_status.name = "3c503-PIO";
+ break;
+ }
+ test_val += 0x55555555;
+ writel(0, mem_base + i);
+ }
+ }
+#endif /* EL2MEMTEST */
+
+ dev->mem_end = dev->rmem_end = dev->mem_start + EL2_MEMSIZE;
+
+ if (wordlength) { /* No Tx pages to skip over to get to Rx */
+ dev->rmem_start = dev->mem_start;
+ ei_status.name = "3c503/16";
+ } else {
+ dev->rmem_start = TX_PAGES*256 + dev->mem_start;
+ ei_status.name = "3c503";
+ }
+ }
+
+ /*
+ Divide up the memory on the card. This is the same regardless of
+ whether shared-mem or PIO is used. For 16 bit cards (16kB RAM),
+ we use the entire 8k of bank1 for an Rx ring. We only use 3k
+ of the bank0 for 2 full size Tx packet slots. For 8 bit cards,
+ (8kB RAM) we use 3kB of bank1 for two Tx slots, and the remaining
+ 5kB for an Rx ring. */
+
+ if (wordlength) {
+ ei_status.tx_start_page = EL2_MB0_START_PG;
+ ei_status.rx_start_page = EL2_MB1_START_PG;
+ } else {
+ ei_status.tx_start_page = EL2_MB1_START_PG;
+ ei_status.rx_start_page = EL2_MB1_START_PG + TX_PAGES;
+ }
+
+ /* Finish setting the board's parameters. */
+ ei_status.stop_page = EL2_MB1_STOP_PG;
+ ei_status.word16 = wordlength;
+ ei_status.reset_8390 = &el2_reset_8390;
+ ei_status.get_8390_hdr = &el2_get_8390_hdr;
+ ei_status.block_input = &el2_block_input;
+ ei_status.block_output = &el2_block_output;
+
+ request_region(ioaddr, EL2_IO_EXTENT, ei_status.name);
+
+ if (dev->irq == 2)
+ dev->irq = 9;
+ else if (dev->irq > 5 && dev->irq != 9) {
+ printk("3c503: configured interrupt %d invalid, will use autoIRQ.\n",
+ dev->irq);
+ dev->irq = 0;
+ }
+
+ ei_status.saved_irq = dev->irq;
+
+ dev->start = 0;
+ dev->open = &el2_open;
+ dev->stop = &el2_close;
+
+ if (dev->mem_start)
+ printk("%s: %s - %dkB RAM, 8kB shared mem window at %#6lx-%#6lx.\n",
+ dev->name, ei_status.name, (wordlength+1)<<3,
+ dev->mem_start, dev->mem_end-1);
+
+ else
+ {
+ ei_status.tx_start_page = EL2_MB1_START_PG;
+ ei_status.rx_start_page = EL2_MB1_START_PG + TX_PAGES;
+ printk("\n%s: %s, %dkB RAM, using programmed I/O (REJUMPER for SHARED MEMORY).\n",
+ dev->name, ei_status.name, (wordlength+1)<<3);
+ }
+ return 0;
+}
+
+static int
+el2_open(struct device *dev)
+{
+
+ if (dev->irq < 2) {
+ int irqlist[] = {5, 9, 3, 4, 0};
+ int *irqp = irqlist;
+
+ outb(EGACFR_NORM, E33G_GACFR); /* Enable RAM and interrupts. */
+ do {
+ if (request_irq (*irqp, NULL, 0, "bogus", NULL) != -EBUSY) {
+ /* Twinkle the interrupt, and check if it's seen. */
+ autoirq_setup(0);
+ outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR);
+ outb_p(0x00, E33G_IDCFR);
+ if (*irqp == autoirq_report(0) /* It's a good IRQ line! */
+ && request_irq (dev->irq = *irqp, &ei_interrupt, 0, ei_status.name, NULL) == 0)
+ break;
+ }
+ } while (*++irqp);
+ if (*irqp == 0) {
+ outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */
+ return -EAGAIN;
+ }
+ } else {
+ if (request_irq(dev->irq, &ei_interrupt, 0, ei_status.name, NULL)) {
+ return -EAGAIN;
+ }
+ }
+
+ el2_init_card(dev);
+ ei_open(dev);
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static int
+el2_close(struct device *dev)
+{
+ free_irq(dev->irq, NULL);
+ dev->irq = ei_status.saved_irq;
+ irq2dev_map[dev->irq] = NULL;
+ outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */
+
+ ei_close(dev);
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+/* This is called whenever we have a unrecoverable failure:
+ transmit timeout
+ Bad ring buffer packet header
+ */
+static void
+el2_reset_8390(struct device *dev)
+{
+ if (ei_debug > 1) {
+ printk("%s: Resetting the 3c503 board...", dev->name);
+ printk("%#lx=%#02x %#lx=%#02x %#lx=%#02x...", E33G_IDCFR, inb(E33G_IDCFR),
+ E33G_CNTRL, inb(E33G_CNTRL), E33G_GACFR, inb(E33G_GACFR));
+ }
+ outb_p(ECNTRL_RESET|ECNTRL_THIN, E33G_CNTRL);
+ ei_status.txing = 0;
+ outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
+ el2_init_card(dev);
+ if (ei_debug > 1) printk("done\n");
+}
+
+/* Initialize the 3c503 GA registers after a reset. */
+static void
+el2_init_card(struct device *dev)
+{
+ /* Unmap the station PROM and select the DIX or BNC connector. */
+ outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
+
+ /* Set ASIC copy of rx's first and last+1 buffer pages */
+ /* These must be the same as in the 8390. */
+ outb(ei_status.rx_start_page, E33G_STARTPG);
+ outb(ei_status.stop_page, E33G_STOPPG);
+
+ /* Point the vector pointer registers somewhere ?harmless?. */
+ outb(0xff, E33G_VP2); /* Point at the ROM restart location 0xffff0 */
+ outb(0xff, E33G_VP1);
+ outb(0x00, E33G_VP0);
+ /* Turn off all interrupts until we're opened. */
+ outb_p(0x00, dev->base_addr + EN0_IMR);
+ /* Enable IRQs iff started. */
+ outb(EGACFR_NORM, E33G_GACFR);
+
+ /* Set the interrupt line. */
+ outb_p((0x04 << (dev->irq == 9 ? 2 : dev->irq)), E33G_IDCFR);
+ outb_p((WRD_COUNT << 1), E33G_DRQCNT); /* Set burst size to 8 */
+ outb_p(0x20, E33G_DMAAH); /* Put a valid addr in the GA DMA */
+ outb_p(0x00, E33G_DMAAL);
+ return; /* We always succeed */
+}
+
+/*
+ * Either use the shared memory (if enabled on the board) or put the packet
+ * out through the ASIC FIFO.
+ */
+static void
+el2_block_output(struct device *dev, int count,
+ const unsigned char *buf, const int start_page)
+{
+ unsigned short int *wrd;
+ int boguscount; /* timeout counter */
+ unsigned short word; /* temporary for better machine code */
+
+ if (ei_status.word16) /* Tx packets go into bank 0 on EL2/16 card */
+ outb(EGACFR_RSEL|EGACFR_TCM, E33G_GACFR);
+ else
+ outb(EGACFR_NORM, E33G_GACFR);
+
+ if (dev->mem_start) { /* Shared memory transfer */
+ unsigned long dest_addr = dev->mem_start +
+ ((start_page - ei_status.tx_start_page) << 8);
+ memcpy_toio(dest_addr, buf, count);
+ outb(EGACFR_NORM, E33G_GACFR); /* Back to bank1 in case on bank0 */
+ return;
+ }
+
+/*
+ * No shared memory, put the packet out the other way.
+ * Set up then start the internal memory transfer to Tx Start Page
+ */
+
+ word = (unsigned short)start_page;
+ outb(word&0xFF, E33G_DMAAH);
+ outb(word>>8, E33G_DMAAL);
+
+ outb_p((ei_status.interface_num ? ECNTRL_AUI : ECNTRL_THIN ) | ECNTRL_OUTPUT
+ | ECNTRL_START, E33G_CNTRL);
+
+/*
+ * Here I am going to write data to the FIFO as quickly as possible.
+ * Note that E33G_FIFOH is defined incorrectly. It is really
+ * E33G_FIFOL, the lowest port address for both the byte and
+ * word write. Variable 'count' is NOT checked. Caller must supply a
+ * valid count. Note that I may write a harmless extra byte to the
+ * 8390 if the byte-count was not even.
+ */
+ wrd = (unsigned short int *) buf;
+ count = (count + 1) >> 1;
+ for(;;)
+ {
+ boguscount = 0x1000;
+ while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0)
+ {
+ if(!boguscount--)
+ {
+ printk("%s: FIFO blocked in el2_block_output.\n", dev->name);
+ el2_reset_8390(dev);
+ goto blocked;
+ }
+ }
+ if(count > WRD_COUNT)
+ {
+ outsw(E33G_FIFOH, wrd, WRD_COUNT);
+ wrd += WRD_COUNT;
+ count -= WRD_COUNT;
+ }
+ else
+ {
+ outsw(E33G_FIFOH, wrd, count);
+ break;
+ }
+ }
+ blocked:;
+ outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
+ return;
+}
+
+/* Read the 4 byte, page aligned 8390 specific header. */
+static void
+el2_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ int boguscount;
+ unsigned long hdr_start = dev->mem_start + ((ring_page - EL2_MB1_START_PG)<<8);
+ unsigned short word;
+
+ if (dev->mem_start) { /* Use the shared memory. */
+ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
+ return;
+ }
+
+/*
+ * No shared memory, use programmed I/O.
+ */
+
+ word = (unsigned short)ring_page;
+ outb(word&0xFF, E33G_DMAAH);
+ outb(word>>8, E33G_DMAAL);
+
+ outb_p((ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI) | ECNTRL_INPUT
+ | ECNTRL_START, E33G_CNTRL);
+ boguscount = 0x1000;
+ while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0)
+ {
+ if(!boguscount--)
+ {
+ printk("%s: FIFO blocked in el2_get_8390_hdr.\n", dev->name);
+ memset(hdr, 0x00, sizeof(struct e8390_pkt_hdr));
+ el2_reset_8390(dev);
+ goto blocked;
+ }
+ }
+ insw(E33G_FIFOH, hdr, (sizeof(struct e8390_pkt_hdr))>> 1);
+ blocked:;
+ outb_p(ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
+}
+
+
+static void
+el2_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ int boguscount = 0;
+ unsigned short int *buf;
+ unsigned short word;
+
+ int end_of_ring = dev->rmem_end;
+
+ /* Maybe enable shared memory just be to be safe... nahh.*/
+ if (dev->mem_start) { /* Use the shared memory. */
+ ring_offset -= (EL2_MB1_START_PG<<8);
+ if (dev->mem_start + ring_offset + count > end_of_ring) {
+ /* We must wrap the input move. */
+ int semi_count = end_of_ring - (dev->mem_start + ring_offset);
+ memcpy_fromio(skb->data, dev->mem_start + ring_offset, semi_count);
+ count -= semi_count;
+ memcpy_fromio(skb->data + semi_count, dev->rmem_start, count);
+ } else {
+ /* Packet is in one chunk -- we can copy + cksum. */
+ eth_io_copy_and_sum(skb, dev->mem_start + ring_offset, count, 0);
+ }
+ return;
+ }
+
+/*
+ * No shared memory, use programmed I/O.
+ */
+ word = (unsigned short) ring_offset;
+ outb(word>>8, E33G_DMAAH);
+ outb(word&0xFF, E33G_DMAAL);
+
+ outb_p((ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI) | ECNTRL_INPUT
+ | ECNTRL_START, E33G_CNTRL);
+
+/*
+ * Here I also try to get data as fast as possible. I am betting that I
+ * can read one extra byte without clobbering anything in the kernel because
+ * this would only occur on an odd byte-count and allocation of skb->data
+ * is word-aligned. Variable 'count' is NOT checked. Caller must check
+ * for a valid count.
+ * [This is currently quite safe.... but if one day the 3c503 explodes
+ * you know where to come looking ;)]
+ */
+
+ buf = (unsigned short int *) skb->data;
+ count = (count + 1) >> 1;
+ for(;;)
+ {
+ boguscount = 0x1000;
+ while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0)
+ {
+ if(!boguscount--)
+ {
+ printk("%s: FIFO blocked in el2_block_input.\n", dev->name);
+ el2_reset_8390(dev);
+ goto blocked;
+ }
+ }
+ if(count > WRD_COUNT)
+ {
+ insw(E33G_FIFOH, buf, WRD_COUNT);
+ buf += WRD_COUNT;
+ count -= WRD_COUNT;
+ }
+ else
+ {
+ insw(E33G_FIFOH, buf, count);
+ break;
+ }
+ }
+ blocked:;
+ outb_p(ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
+ return;
+}
+#ifdef MODULE
+#define MAX_EL2_CARDS 4 /* Max number of EL2 cards per module */
+#define NAMELEN 8 /* #of chars for storing dev->name */
+
+static char namelist[NAMELEN * MAX_EL2_CARDS] = { 0, };
+static struct device dev_el2[MAX_EL2_CARDS] = {
+ {
+ NULL, /* assign a chunk of namelist[] below */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, NULL
+ },
+};
+
+static int io[MAX_EL2_CARDS] = { 0, };
+static int irq[MAX_EL2_CARDS] = { 0, };
+static int xcvr[MAX_EL2_CARDS] = { 0, }; /* choose int. or ext. xcvr */
+
+/* This is set up so that only a single autoprobe takes place per call.
+ISA device autoprobes on a running machine are not recommended. */
+int
+init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_EL2_CARDS; this_dev++) {
+ struct device *dev = &dev_el2[this_dev];
+ dev->name = namelist+(NAMELEN*this_dev);
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->mem_end = xcvr[this_dev]; /* low 4bits = xcvr sel. */
+ dev->init = el2_probe;
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only autoprobe 1st one */
+ printk(KERN_NOTICE "3c503.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+ if (register_netdev(dev) != 0) {
+ printk(KERN_WARNING "3c503.c: No 3c503 card found (i/o = 0x%x).\n", io[this_dev]);
+ if (found != 0) return 0; /* Got at least one. */
+ return -ENXIO;
+ }
+ found++;
+ }
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_EL2_CARDS; this_dev++) {
+ struct device *dev = &dev_el2[this_dev];
+ if (dev->priv != NULL) {
+ /* NB: el2_close() handles free_irq + irq2dev map */
+ kfree(dev->priv);
+ dev->priv = NULL;
+ release_region(dev->base_addr, EL2_IO_EXTENT);
+ unregister_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * version-control: t
+ * kept-new-versions: 5
+ * c-indent-level: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/3c503.h b/linux/src/drivers/net/3c503.h
new file mode 100644
index 0000000..b9f8a46
--- /dev/null
+++ b/linux/src/drivers/net/3c503.h
@@ -0,0 +1,91 @@
+/* Definitions for the 3Com 3c503 Etherlink 2. */
+/* This file is distributed under the GPL.
+ Many of these names and comments are directly from the Crynwr packet
+ drivers, which are released under the GPL. */
+
+#define EL2H (dev->base_addr + 0x400)
+#define EL2L (dev->base_addr)
+
+/* Vendor unique hardware addr. prefix. 3Com has 2 because they ran
+ out of available addresses on the first one... */
+
+#define OLD_3COM_ID 0x02608c
+#define NEW_3COM_ID 0x0020af
+
+/* Shared memory management parameters. NB: The 8 bit cards have only
+ one bank (MB1) which serves both Tx and Rx packet space. The 16bit
+ cards have 2 banks, MB0 for Tx packets, and MB1 for Rx packets.
+ You choose which bank appears in the sh. mem window with EGACFR_MBSn */
+
+#define EL2_MB0_START_PG (0x00) /* EL2/16 Tx packets go in bank 0 */
+#define EL2_MB1_START_PG (0x20) /* First page of bank 1 */
+#define EL2_MB1_STOP_PG (0x40) /* Last page +1 of bank 1 */
+
+/* 3Com 3c503 ASIC registers */
+#define E33G_STARTPG (EL2H+0) /* Start page, matching EN0_STARTPG */
+#define E33G_STOPPG (EL2H+1) /* Stop page, must match EN0_STOPPG */
+#define E33G_DRQCNT (EL2H+2) /* DMA burst count */
+#define E33G_IOBASE (EL2H+3) /* Read of I/O base jumpers. */
+ /* (non-useful, but it also appears at the end of EPROM space) */
+#define E33G_ROMBASE (EL2H+4) /* Read of memory base jumpers. */
+#define E33G_GACFR (EL2H+5) /* Config/setup bits for the ASIC GA */
+#define E33G_CNTRL (EL2H+6) /* Board's main control register */
+#define E33G_STATUS (EL2H+7) /* Status on completions. */
+#define E33G_IDCFR (EL2H+8) /* Interrupt/DMA config register */
+ /* (Which IRQ to assert, DMA chan to use) */
+#define E33G_DMAAH (EL2H+9) /* High byte of DMA address reg */
+#define E33G_DMAAL (EL2H+10) /* Low byte of DMA address reg */
+/* "Vector pointer" - if this address matches a read, the EPROM (rather than
+ shared RAM) is mapped into memory space. */
+#define E33G_VP2 (EL2H+11)
+#define E33G_VP1 (EL2H+12)
+#define E33G_VP0 (EL2H+13)
+#define E33G_FIFOH (EL2H+14) /* FIFO for programmed I/O moves */
+#define E33G_FIFOL (EL2H+15) /* ... low byte of above. */
+
+/* Bits in E33G_CNTRL register: */
+
+#define ECNTRL_RESET (0x01) /* Software reset of the ASIC and 8390 */
+#define ECNTRL_THIN (0x02) /* Onboard xcvr enable, AUI disable */
+#define ECNTRL_AUI (0x00) /* Onboard xcvr disable, AUI enable */
+#define ECNTRL_SAPROM (0x04) /* Map the station address prom */
+#define ECNTRL_DBLBFR (0x20) /* FIFO configuration bit */
+#define ECNTRL_OUTPUT (0x40) /* PC-to-3C503 direction if 1 */
+#define ECNTRL_INPUT (0x00) /* 3C503-to-PC direction if 0 */
+#define ECNTRL_START (0x80) /* Start the DMA logic */
+
+/* Bits in E33G_STATUS register: */
+
+#define ESTAT_DPRDY (0x80) /* Data port (of FIFO) ready */
+#define ESTAT_UFLW (0x40) /* Tried to read FIFO when it was empty */
+#define ESTAT_OFLW (0x20) /* Tried to write FIFO when it was full */
+#define ESTAT_DTC (0x10) /* Terminal Count from PC bus DMA logic */
+#define ESTAT_DIP (0x08) /* DMA In Progress */
+
+/* Bits in E33G_GACFR register: */
+
+#define EGACFR_NIM (0x80) /* NIC interrupt mask */
+#define EGACFR_TCM (0x40) /* DMA term. count interrupt mask */
+#define EGACFR_RSEL (0x08) /* Map a bank of card mem into system mem */
+#define EGACFR_MBS2 (0x04) /* Memory bank select, bit 2. */
+#define EGACFR_MBS1 (0x02) /* Memory bank select, bit 1. */
+#define EGACFR_MBS0 (0x01) /* Memory bank select, bit 0. */
+
+#define EGACFR_NORM (0x49) /* TCM | RSEL | MBS0 */
+#define EGACFR_IRQOFF (0xc9) /* TCM | RSEL | MBS0 | NIM */
+
+/*
+ MBS2 MBS1 MBS0 Sh. mem windows card mem at:
+ ---- ---- ---- -----------------------------
+ 0 0 0 0x0000 -- bank 0
+ 0 0 1 0x2000 -- bank 1 (only choice for 8bit card)
+ 0 1 0 0x4000 -- bank 2, not used
+ 0 1 1 0x6000 -- bank 3, not used
+
+There was going to be a 32k card that used bank 2 and 3, but it
+never got produced.
+
+*/
+
+
+/* End of 3C503 parameter definitions */
diff --git a/linux/src/drivers/net/3c505.c b/linux/src/drivers/net/3c505.c
new file mode 100644
index 0000000..d78dad5
--- /dev/null
+++ b/linux/src/drivers/net/3c505.c
@@ -0,0 +1,1732 @@
+/*
+ * Linux ethernet device driver for the 3Com Etherlink Plus (3C505)
+ * By Craig Southeren, Juha Laiho and Philip Blundell
+ *
+ * 3c505.c This module implements an interface to the 3Com
+ * Etherlink Plus (3c505) ethernet card. Linux device
+ * driver interface reverse engineered from the Linux 3C509
+ * device drivers. Some 3C505 information gleaned from
+ * the Crynwr packet driver. Still this driver would not
+ * be here without 3C505 technical reference provided by
+ * 3Com.
+ *
+ * $Id: 3c505.c,v 1.1 1999/04/26 05:51:48 tb Exp $
+ *
+ * Authors: Linux 3c505 device driver by
+ * Craig Southeren, <craigs@ineluki.apana.org.au>
+ * Final debugging by
+ * Andrew Tridgell, <tridge@nimbus.anu.edu.au>
+ * Auto irq/address, tuning, cleanup and v1.1.4+ kernel mods by
+ * Juha Laiho, <jlaiho@ichaos.nullnet.fi>
+ * Linux 3C509 driver by
+ * Donald Becker, <becker@super.org>
+ * Crynwr packet driver by
+ * Krishnan Gopalan and Gregg Stefancik,
+ * Clemson University Engineering Computer Operations.
+ * Portions of the code have been adapted from the 3c505
+ * driver for NCSA Telnet by Bruce Orchard and later
+ * modified by Warren Van Houten and krus@diku.dk.
+ * 3C505 technical information provided by
+ * Terry Murphy, of 3Com Network Adapter Division
+ * Linux 1.3.0 changes by
+ * Alan Cox <Alan.Cox@linux.org>
+ * More debugging and DMA version by Philip Blundell
+ */
+
+/* Theory of operation:
+
+ * The 3c505 is quite an intelligent board. All communication with it is done
+ * by means of Primary Command Blocks (PCBs); these are transferred using PIO
+ * through the command register. The card has 256k of on-board RAM, which is
+ * used to buffer received packets. It might seem at first that more buffers
+ * are better, but in fact this isn't true. From my tests, it seems that
+ * more than about 10 buffers are unnecessary, and there is a noticeable
+ * performance hit in having more active on the card. So the majority of the
+ * card's memory isn't, in fact, used.
+ *
+ * We keep up to 4 "receive packet" commands active on the board at a time.
+ * When a packet comes in, so long as there is a receive command active, the
+ * board will send us a "packet received" PCB and then add the data for that
+ * packet to the DMA queue. If a DMA transfer is not already in progress, we
+ * set one up to start uploading the data. We have to maintain a list of
+ * backlogged receive packets, because the card may decide to tell us about
+ * a newly-arrived packet at any time, and we may not be able to start a DMA
+ * transfer immediately (ie one may already be going on). We can't NAK the
+ * PCB, because then it would throw the packet away.
+ *
+ * Trying to send a PCB to the card at the wrong moment seems to have bad
+ * effects. If we send it a transmit PCB while a receive DMA is happening,
+ * it will just NAK the PCB and so we will have wasted our time. Worse, it
+ * sometimes seems to interrupt the transfer. The majority of the low-level
+ * code is protected by one huge semaphore -- "busy" -- which is set whenever
+ * it probably isn't safe to do anything to the card. The receive routine
+ * must gain a lock on "busy" before it can start a DMA transfer, and the
+ * transmit routine must gain a lock before it sends the first PCB to the card.
+ * The send_pcb() routine also has an internal semaphore to protect it against
+ * being re-entered (which would be disastrous) -- this is needed because
+ * several things can happen asynchronously (re-priming the receiver and
+ * asking the card for statistics, for example). send_pcb() will also refuse
+ * to talk to the card at all if a DMA upload is happening. The higher-level
+ * networking code will reschedule a later retry if some part of the driver
+ * is blocked. In practice, this doesn't seem to happen very often.
+ */
+
+/* This driver will not work with revision 2 hardware, because the host
+ * control register is write-only. It should be fairly easy to arrange to
+ * keep our own soft-copy of the intended contents of this register, if
+ * somebody has the time. There may be firmware differences that cause
+ * other problems, though, and I don't have an old card to test.
+ */
+
+/* The driver is a mess. I took Craig's and Juha's code, and hacked it firstly
+ * to make it more reliable, and secondly to add DMA mode. Many things could
+ * probably be done better; the concurrency protection is particularly awful.
+ */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/malloc.h>
+#include <linux/ioport.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include "3c505.h"
+
+#define ELP_DMA 6 /* DMA channel to use */
+#define ELP_RX_PCBS 4
+
+/*********************************************************
+ *
+ * define debug messages here as common strings to reduce space
+ *
+ *********************************************************/
+
+static const char *filename = __FILE__;
+
+static const char *timeout_msg = "*** timeout at %s:%s (line %d) ***\n";
+#define TIMEOUT_MSG(lineno) \
+ printk(timeout_msg, filename,__FUNCTION__,(lineno))
+
+static const char *invalid_pcb_msg =
+"*** invalid pcb length %d at %s:%s (line %d) ***\n";
+#define INVALID_PCB_MSG(len) \
+ printk(invalid_pcb_msg, (len),filename,__FUNCTION__,__LINE__)
+
+static const char *search_msg = "%s: Looking for 3c505 adapter at address %#x...";
+
+static const char *stilllooking_msg = "still looking...";
+
+static const char *found_msg = "found.\n";
+
+static const char *notfound_msg = "not found (reason = %d)\n";
+
+static const char *couldnot_msg = "%s: 3c505 not found\n";
+
+/*********************************************************
+ *
+ * various other debug stuff
+ *
+ *********************************************************/
+
+#ifdef ELP_DEBUG
+static const int elp_debug = ELP_DEBUG;
+#else
+static const int elp_debug = 0;
+#endif
+
+/*
+ * 0 = no messages (well, some)
+ * 1 = messages when high level commands performed
+ * 2 = messages when low level commands performed
+ * 3 = messages when interrupts received
+ */
+
+/*****************************************************************
+ *
+ * useful macros
+ *
+ *****************************************************************/
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+
+/*****************************************************************
+ *
+ * List of I/O-addresses we try to auto-sense
+ * Last element MUST BE 0!
+ *****************************************************************/
+
+const int addr_list[] = {0x300, 0x280, 0x310, 0};
+
+/* Dma Memory related stuff */
+
+/* Pure 2^n version of get_order */
+static inline int __get_order(unsigned long size)
+{
+ int order;
+
+ size = (size - 1) >> (PAGE_SHIFT - 1);
+ order = -1;
+ do {
+ size >>= 1;
+ order++;
+ } while (size);
+ return order;
+}
+
+static unsigned long dma_mem_alloc(int size)
+{
+ int order = __get_order(size);
+
+ return __get_dma_pages(GFP_KERNEL, order);
+}
+
+
+/*****************************************************************
+ *
+ * Functions for I/O (note the inline !)
+ *
+ *****************************************************************/
+
+static inline unsigned char inb_status(unsigned int base_addr)
+{
+ return inb(base_addr + PORT_STATUS);
+}
+
+static inline unsigned char inb_control(unsigned int base_addr)
+{
+ return inb(base_addr + PORT_CONTROL);
+}
+
+static inline int inb_command(unsigned int base_addr)
+{
+ return inb(base_addr + PORT_COMMAND);
+}
+
+static inline void outb_control(unsigned char val, unsigned int base_addr)
+{
+ outb(val, base_addr + PORT_CONTROL);
+}
+
+static inline void outb_command(unsigned char val, unsigned int base_addr)
+{
+ outb(val, base_addr + PORT_COMMAND);
+}
+
+static inline unsigned int inw_data(unsigned int base_addr)
+{
+ return inw(base_addr + PORT_DATA);
+}
+
+static inline void outw_data(unsigned int val, unsigned int base_addr)
+{
+ outw(val, base_addr + PORT_DATA);
+}
+
+
+/*****************************************************************
+ *
+ * structure to hold context information for adapter
+ *
+ *****************************************************************/
+
+#define DMA_BUFFER_SIZE 1600
+#define BACKLOG_SIZE 4
+
+typedef struct {
+ volatile short got[NUM_TRANSMIT_CMDS]; /* flags for command completion */
+ pcb_struct tx_pcb; /* PCB for foreground sending */
+ pcb_struct rx_pcb; /* PCB for foreground receiving */
+ pcb_struct itx_pcb; /* PCB for background sending */
+ pcb_struct irx_pcb; /* PCB for background receiving */
+ struct enet_statistics stats;
+
+ void *dma_buffer;
+
+ struct {
+ unsigned int length[BACKLOG_SIZE];
+ unsigned int in;
+ unsigned int out;
+ } rx_backlog;
+
+ struct {
+ unsigned int direction;
+ unsigned int length;
+ unsigned int copy_flag;
+ struct sk_buff *skb;
+ long int start_time;
+ } current_dma;
+
+ /* flags */
+ unsigned long send_pcb_semaphore;
+ unsigned int dmaing;
+ unsigned long busy;
+
+ unsigned int rx_active; /* number of receive PCBs */
+} elp_device;
+
+static inline unsigned int backlog_next(unsigned int n)
+{
+ return (n + 1) % BACKLOG_SIZE;
+}
+
+/*****************************************************************
+ *
+ * useful functions for accessing the adapter
+ *
+ *****************************************************************/
+
+/*
+ * use this routine when accessing the ASF bits as they are
+ * changed asynchronously by the adapter
+ */
+
+/* get adapter PCB status */
+#define GET_ASF(addr) \
+ (get_status(addr)&ASF_PCB_MASK)
+
+static inline int get_status(unsigned int base_addr)
+{
+ int timeout = jiffies + 10;
+ register int stat1;
+ do {
+ stat1 = inb_status(base_addr);
+ } while (stat1 != inb_status(base_addr) && jiffies < timeout);
+ if (jiffies >= timeout)
+ TIMEOUT_MSG(__LINE__);
+ return stat1;
+}
+
+static inline void set_hsf(unsigned int base_addr, int hsf)
+{
+ cli();
+ outb_control((inb_control(base_addr) & ~HSF_PCB_MASK) | hsf, base_addr);
+ sti();
+}
+
+static int start_receive(struct device *, pcb_struct *);
+
+inline static void adapter_reset(struct device *dev)
+{
+ int timeout;
+ unsigned char orig_hcr = inb_control(dev->base_addr);
+
+ elp_device *adapter = dev->priv;
+
+ outb_control(0, dev->base_addr);
+
+ if (inb_status(dev->base_addr) & ACRF) {
+ do {
+ inb_command(dev->base_addr);
+ timeout = jiffies + 2;
+ while ((jiffies <= timeout) && !(inb_status(dev->base_addr) & ACRF));
+ } while (inb_status(dev->base_addr) & ACRF);
+ set_hsf(dev->base_addr, HSF_PCB_NAK);
+ }
+ outb_control(inb_control(dev->base_addr) | ATTN | DIR, dev->base_addr);
+ timeout = jiffies + 1;
+ while (jiffies <= timeout);
+ outb_control(inb_control(dev->base_addr) & ~ATTN, dev->base_addr);
+ timeout = jiffies + 1;
+ while (jiffies <= timeout);
+ outb_control(inb_control(dev->base_addr) | FLSH, dev->base_addr);
+ timeout = jiffies + 1;
+ while (jiffies <= timeout);
+ outb_control(inb_control(dev->base_addr) & ~FLSH, dev->base_addr);
+ timeout = jiffies + 1;
+ while (jiffies <= timeout);
+
+ outb_control(orig_hcr, dev->base_addr);
+ if (!start_receive(dev, &adapter->tx_pcb))
+ printk("%s: start receive command failed \n", dev->name);
+}
+
+/* Check to make sure that a DMA transfer hasn't timed out. This should never happen
+ * in theory, but seems to occur occasionally if the card gets prodded at the wrong
+ * time.
+ */
+static inline void check_dma(struct device *dev)
+{
+ elp_device *adapter = dev->priv;
+ if (adapter->dmaing && (jiffies > (adapter->current_dma.start_time + 10))) {
+ unsigned long flags;
+ printk("%s: DMA %s timed out, %d bytes left\n", dev->name, adapter->current_dma.direction ? "download" : "upload", get_dma_residue(dev->dma));
+ save_flags(flags);
+ cli();
+ adapter->dmaing = 0;
+ adapter->busy = 0;
+ disable_dma(dev->dma);
+ if (adapter->rx_active)
+ adapter->rx_active--;
+ outb_control(inb_control(dev->base_addr) & ~(DMAE | TCEN | DIR), dev->base_addr);
+ restore_flags(flags);
+ }
+}
+
+/* Primitive functions used by send_pcb() */
+static inline unsigned int send_pcb_slow(unsigned int base_addr, unsigned char byte)
+{
+ unsigned int timeout;
+ outb_command(byte, base_addr);
+ for (timeout = jiffies + 5; jiffies < timeout;) {
+ if (inb_status(base_addr) & HCRE)
+ return FALSE;
+ }
+ printk("3c505: send_pcb_slow timed out\n");
+ return TRUE;
+}
+
+static inline unsigned int send_pcb_fast(unsigned int base_addr, unsigned char byte)
+{
+ unsigned int timeout;
+ outb_command(byte, base_addr);
+ for (timeout = 0; timeout < 40000; timeout++) {
+ if (inb_status(base_addr) & HCRE)
+ return FALSE;
+ }
+ printk("3c505: send_pcb_fast timed out\n");
+ return TRUE;
+}
+
+/* Check to see if the receiver needs restarting, and kick it if so */
+static inline void prime_rx(struct device *dev)
+{
+ elp_device *adapter = dev->priv;
+ while (adapter->rx_active < ELP_RX_PCBS && dev->start) {
+ if (!start_receive(dev, &adapter->itx_pcb))
+ break;
+ }
+}
+
+/*****************************************************************
+ *
+ * send_pcb
+ * Send a PCB to the adapter.
+ *
+ * output byte to command reg --<--+
+ * wait until HCRE is non zero |
+ * loop until all bytes sent -->--+
+ * set HSF1 and HSF2 to 1
+ * output pcb length
+ * wait until ASF give ACK or NAK
+ * set HSF1 and HSF2 to 0
+ *
+ *****************************************************************/
+
+/* This can be quite slow -- the adapter is allowed to take up to 40ms
+ * to respond to the initial interrupt.
+ *
+ * We run initially with interrupts turned on, but with a semaphore set
+ * so that nobody tries to re-enter this code. Once the first byte has
+ * gone through, we turn interrupts off and then send the others (the
+ * timeout is reduced to 500us).
+ */
+
+static int send_pcb(struct device *dev, pcb_struct * pcb)
+{
+ int i;
+ int timeout;
+ elp_device *adapter = dev->priv;
+
+ check_dma(dev);
+
+ if (adapter->dmaing && adapter->current_dma.direction == 0)
+ return FALSE;
+
+ /* Avoid contention */
+ if (set_bit(1, &adapter->send_pcb_semaphore)) {
+ if (elp_debug >= 3) {
+ printk("%s: send_pcb entered while threaded\n", dev->name);
+ }
+ return FALSE;
+ }
+ /*
+ * load each byte into the command register and
+ * wait for the HCRE bit to indicate the adapter
+ * had read the byte
+ */
+ set_hsf(dev->base_addr, 0);
+
+ if (send_pcb_slow(dev->base_addr, pcb->command))
+ goto abort;
+
+ cli();
+
+ if (send_pcb_fast(dev->base_addr, pcb->length))
+ goto sti_abort;
+
+ for (i = 0; i < pcb->length; i++) {
+ if (send_pcb_fast(dev->base_addr, pcb->data.raw[i]))
+ goto sti_abort;
+ }
+
+ outb_control(inb_control(dev->base_addr) | 3, dev->base_addr); /* signal end of PCB */
+ outb_command(2 + pcb->length, dev->base_addr);
+
+ /* now wait for the acknowledgement */
+ sti();
+
+ for (timeout = jiffies + 5; jiffies < timeout;) {
+ switch (GET_ASF(dev->base_addr)) {
+ case ASF_PCB_ACK:
+ adapter->send_pcb_semaphore = 0;
+ return TRUE;
+ break;
+ case ASF_PCB_NAK:
+ printk("%s: send_pcb got NAK\n", dev->name);
+ goto abort;
+ break;
+ }
+ }
+
+ if (elp_debug >= 1)
+ printk("%s: timeout waiting for PCB acknowledge (status %02x)\n", dev->name, inb_status(dev->base_addr));
+
+ sti_abort:
+ sti();
+ abort:
+ adapter->send_pcb_semaphore = 0;
+ return FALSE;
+}
+
+
+/*****************************************************************
+ *
+ * receive_pcb
+ * Read a PCB from the adapter
+ *
+ * wait for ACRF to be non-zero ---<---+
+ * input a byte |
+ * if ASF1 and ASF2 were not both one |
+ * before byte was read, loop --->---+
+ * set HSF1 and HSF2 for ack
+ *
+ *****************************************************************/
+
+static int receive_pcb(struct device *dev, pcb_struct * pcb)
+{
+ int i, j;
+ int total_length;
+ int stat;
+ int timeout;
+
+ elp_device *adapter = dev->priv;
+
+ set_hsf(dev->base_addr, 0);
+
+ /* get the command code */
+ timeout = jiffies + 2;
+ while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && jiffies < timeout);
+ if (jiffies >= timeout) {
+ TIMEOUT_MSG(__LINE__);
+ return FALSE;
+ }
+ pcb->command = inb_command(dev->base_addr);
+
+ /* read the data length */
+ timeout = jiffies + 3;
+ while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && jiffies < timeout);
+ if (jiffies >= timeout) {
+ TIMEOUT_MSG(__LINE__);
+ printk("%s: status %02x\n", dev->name, stat);
+ return FALSE;
+ }
+ pcb->length = inb_command(dev->base_addr);
+
+ if (pcb->length > MAX_PCB_DATA) {
+ INVALID_PCB_MSG(pcb->length);
+ adapter_reset(dev);
+ return FALSE;
+ }
+ /* read the data */
+ cli();
+ i = 0;
+ do {
+ j = 0;
+ while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && j++ < 20000);
+ pcb->data.raw[i++] = inb_command(dev->base_addr);
+ if (i > MAX_PCB_DATA)
+ INVALID_PCB_MSG(i);
+ } while ((stat & ASF_PCB_MASK) != ASF_PCB_END && j < 20000);
+ sti();
+ if (j >= 20000) {
+ TIMEOUT_MSG(__LINE__);
+ return FALSE;
+ }
+ /* woops, the last "data" byte was really the length! */
+ total_length = pcb->data.raw[--i];
+
+ /* safety check total length vs data length */
+ if (total_length != (pcb->length + 2)) {
+ if (elp_debug >= 2)
+ printk("%s: mangled PCB received\n", dev->name);
+ set_hsf(dev->base_addr, HSF_PCB_NAK);
+ return FALSE;
+ }
+
+ if (pcb->command == CMD_RECEIVE_PACKET_COMPLETE) {
+ if (set_bit(0, (void *) &adapter->busy)) {
+ if (backlog_next(adapter->rx_backlog.in) == adapter->rx_backlog.out) {
+ set_hsf(dev->base_addr, HSF_PCB_NAK);
+ printk("%s: PCB rejected, transfer in progress and backlog full\n", dev->name);
+ pcb->command = 0;
+ return TRUE;
+ } else {
+ pcb->command = 0xff;
+ }
+ }
+ }
+ set_hsf(dev->base_addr, HSF_PCB_ACK);
+ return TRUE;
+}
+
+/******************************************************
+ *
+ * queue a receive command on the adapter so we will get an
+ * interrupt when a packet is received.
+ *
+ ******************************************************/
+
+static int start_receive(struct device *dev, pcb_struct * tx_pcb)
+{
+ int status;
+ elp_device *adapter = dev->priv;
+
+ if (elp_debug >= 3)
+ printk("%s: restarting receiver\n", dev->name);
+ tx_pcb->command = CMD_RECEIVE_PACKET;
+ tx_pcb->length = sizeof(struct Rcv_pkt);
+ tx_pcb->data.rcv_pkt.buf_seg
+ = tx_pcb->data.rcv_pkt.buf_ofs = 0; /* Unused */
+ tx_pcb->data.rcv_pkt.buf_len = 1600;
+ tx_pcb->data.rcv_pkt.timeout = 0; /* set timeout to zero */
+ status = send_pcb(dev, tx_pcb);
+ if (status)
+ adapter->rx_active++;
+ return status;
+}
+
+/******************************************************
+ *
+ * extract a packet from the adapter
+ * this routine is only called from within the interrupt
+ * service routine, so no cli/sti calls are needed
+ * note that the length is always assumed to be even
+ *
+ ******************************************************/
+
+static void receive_packet(struct device *dev, int len)
+{
+ int rlen;
+ elp_device *adapter = dev->priv;
+ unsigned long target;
+ struct sk_buff *skb;
+
+ rlen = (len + 1) & ~1;
+ skb = dev_alloc_skb(rlen + 2);
+
+ adapter->current_dma.copy_flag = 0;
+
+ if (!skb) {
+ printk("%s: memory squeeze, dropping packet\n", dev->name);
+ target = virt_to_bus(adapter->dma_buffer);
+ } else {
+ skb_reserve(skb, 2);
+ target = virt_to_bus(skb_put(skb, rlen));
+ if ((target + rlen) >= MAX_DMA_ADDRESS) {
+ target = virt_to_bus(adapter->dma_buffer);
+ adapter->current_dma.copy_flag = 1;
+ }
+ }
+ /* if this happens, we die */
+ if (set_bit(0, (void *) &adapter->dmaing))
+ printk("%s: rx blocked, DMA in progress, dir %d\n", dev->name, adapter->current_dma.direction);
+
+ adapter->current_dma.direction = 0;
+ adapter->current_dma.length = rlen;
+ adapter->current_dma.skb = skb;
+ adapter->current_dma.start_time = jiffies;
+
+ outb_control(inb_control(dev->base_addr) | DIR | TCEN | DMAE, dev->base_addr);
+
+ disable_dma(dev->dma);
+ clear_dma_ff(dev->dma);
+ set_dma_mode(dev->dma, 0x04); /* dma read */
+ set_dma_addr(dev->dma, target);
+ set_dma_count(dev->dma, rlen);
+ enable_dma(dev->dma);
+
+ if (elp_debug >= 3) {
+ printk("%s: rx DMA transfer started\n", dev->name);
+ }
+ if (adapter->rx_active)
+ adapter->rx_active--;
+
+ if (!adapter->busy)
+ printk("%s: receive_packet called, busy not set.\n", dev->name);
+}
+
+/******************************************************
+ *
+ * interrupt handler
+ *
+ ******************************************************/
+
+static void elp_interrupt(int irq, void *dev_id, struct pt_regs *reg_ptr)
+{
+ int len;
+ int dlen;
+ int icount = 0;
+ struct device *dev;
+ elp_device *adapter;
+ int timeout;
+
+ if (irq < 0 || irq > 15) {
+ printk("elp_interrupt(): illegal IRQ number found in interrupt routine (%i)\n", irq);
+ return;
+ }
+ dev = irq2dev_map[irq];
+
+ if (dev == NULL) {
+ printk("elp_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+ adapter = (elp_device *) dev->priv;
+
+ if (dev->interrupt) {
+ printk("%s: re-entering the interrupt handler!\n", dev->name);
+ return;
+ }
+ dev->interrupt = 1;
+
+ do {
+ /*
+ * has a DMA transfer finished?
+ */
+ if (inb_status(dev->base_addr) & DONE) {
+ if (!adapter->dmaing) {
+ printk("%s: phantom DMA completed\n", dev->name);
+ }
+ if (elp_debug >= 3) {
+ printk("%s: %s DMA complete, status %02x\n", dev->name, adapter->current_dma.direction ? "tx" : "rx", inb_status(dev->base_addr));
+ }
+
+ outb_control(inb_control(dev->base_addr) & ~(DMAE | TCEN | DIR), dev->base_addr);
+ if (adapter->current_dma.direction) {
+ dev_kfree_skb(adapter->current_dma.skb, FREE_WRITE);
+ } else {
+ struct sk_buff *skb = adapter->current_dma.skb;
+ if (skb) {
+ skb->dev = dev;
+ if (adapter->current_dma.copy_flag) {
+ memcpy(skb_put(skb, adapter->current_dma.length), adapter->dma_buffer, adapter->current_dma.length);
+ }
+ skb->protocol = eth_type_trans(skb,dev);
+ netif_rx(skb);
+ }
+ }
+ adapter->dmaing = 0;
+ if (adapter->rx_backlog.in != adapter->rx_backlog.out) {
+ int t = adapter->rx_backlog.length[adapter->rx_backlog.out];
+ adapter->rx_backlog.out = backlog_next(adapter->rx_backlog.out);
+ if (elp_debug >= 2)
+ printk("%s: receiving backlogged packet (%d)\n", dev->name, t);
+ receive_packet(dev, t);
+ } else {
+ adapter->busy = 0;
+ }
+ } else {
+ /* has one timed out? */
+ check_dma(dev);
+ }
+
+ sti();
+
+ /*
+ * receive a PCB from the adapter
+ */
+ timeout = jiffies + 3;
+ while ((inb_status(dev->base_addr) & ACRF) != 0 && jiffies < timeout) {
+ if (receive_pcb(dev, &adapter->irx_pcb)) {
+ switch (adapter->irx_pcb.command) {
+ case 0:
+ break;
+ /*
+ * received a packet - this must be handled fast
+ */
+ case 0xff:
+ case CMD_RECEIVE_PACKET_COMPLETE:
+ /* if the device isn't open, don't pass packets up the stack */
+ if (dev->start == 0)
+ break;
+ cli();
+ len = adapter->irx_pcb.data.rcv_resp.pkt_len;
+ dlen = adapter->irx_pcb.data.rcv_resp.buf_len;
+ if (adapter->irx_pcb.data.rcv_resp.timeout != 0) {
+ printk("%s: interrupt - packet not received correctly\n", dev->name);
+ sti();
+ } else {
+ if (elp_debug >= 3) {
+ sti();
+ printk("%s: interrupt - packet received of length %i (%i)\n", dev->name, len, dlen);
+ cli();
+ }
+ if (adapter->irx_pcb.command == 0xff) {
+ if (elp_debug >= 2)
+ printk("%s: adding packet to backlog (len = %d)\n", dev->name, dlen);
+ adapter->rx_backlog.length[adapter->rx_backlog.in] = dlen;
+ adapter->rx_backlog.in = backlog_next(adapter->rx_backlog.in);
+ } else {
+ receive_packet(dev, dlen);
+ }
+ sti();
+ if (elp_debug >= 3)
+ printk("%s: packet received\n", dev->name);
+ }
+ break;
+
+ /*
+ * 82586 configured correctly
+ */
+ case CMD_CONFIGURE_82586_RESPONSE:
+ adapter->got[CMD_CONFIGURE_82586] = 1;
+ if (elp_debug >= 3)
+ printk("%s: interrupt - configure response received\n", dev->name);
+ break;
+
+ /*
+ * Adapter memory configuration
+ */
+ case CMD_CONFIGURE_ADAPTER_RESPONSE:
+ adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] = 1;
+ if (elp_debug >= 3)
+ printk("%s: Adapter memory configuration %s.\n", dev->name,
+ adapter->irx_pcb.data.failed ? "failed" : "succeeded");
+ break;
+
+ /*
+ * Multicast list loading
+ */
+ case CMD_LOAD_MULTICAST_RESPONSE:
+ adapter->got[CMD_LOAD_MULTICAST_LIST] = 1;
+ if (elp_debug >= 3)
+ printk("%s: Multicast address list loading %s.\n", dev->name,
+ adapter->irx_pcb.data.failed ? "failed" : "succeeded");
+ break;
+
+ /*
+ * Station address setting
+ */
+ case CMD_SET_ADDRESS_RESPONSE:
+ adapter->got[CMD_SET_STATION_ADDRESS] = 1;
+ if (elp_debug >= 3)
+ printk("%s: Ethernet address setting %s.\n", dev->name,
+ adapter->irx_pcb.data.failed ? "failed" : "succeeded");
+ break;
+
+
+ /*
+ * received board statistics
+ */
+ case CMD_NETWORK_STATISTICS_RESPONSE:
+ adapter->stats.rx_packets += adapter->irx_pcb.data.netstat.tot_recv;
+ adapter->stats.tx_packets += adapter->irx_pcb.data.netstat.tot_xmit;
+ adapter->stats.rx_crc_errors += adapter->irx_pcb.data.netstat.err_CRC;
+ adapter->stats.rx_frame_errors += adapter->irx_pcb.data.netstat.err_align;
+ adapter->stats.rx_fifo_errors += adapter->irx_pcb.data.netstat.err_ovrrun;
+ adapter->stats.rx_over_errors += adapter->irx_pcb.data.netstat.err_res;
+ adapter->got[CMD_NETWORK_STATISTICS] = 1;
+ if (elp_debug >= 3)
+ printk("%s: interrupt - statistics response received\n", dev->name);
+ break;
+
+ /*
+ * sent a packet
+ */
+ case CMD_TRANSMIT_PACKET_COMPLETE:
+ if (elp_debug >= 3)
+ printk("%s: interrupt - packet sent\n", dev->name);
+ if (dev->start == 0)
+ break;
+ switch (adapter->irx_pcb.data.xmit_resp.c_stat) {
+ case 0xffff:
+ adapter->stats.tx_aborted_errors++;
+ printk(KERN_INFO "%s: transmit timed out, network cable problem?\n", dev->name);
+ break;
+ case 0xfffe:
+ adapter->stats.tx_fifo_errors++;
+ printk(KERN_INFO "%s: transmit timed out, FIFO underrun\n", dev->name);
+ break;
+ }
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ break;
+
+ /*
+ * some unknown PCB
+ */
+ default:
+ printk(KERN_DEBUG "%s: unknown PCB received - %2.2x\n", dev->name, adapter->irx_pcb.command);
+ break;
+ }
+ } else {
+ printk("%s: failed to read PCB on interrupt\n", dev->name);
+ adapter_reset(dev);
+ }
+ }
+
+ } while (icount++ < 5 && (inb_status(dev->base_addr) & (ACRF | DONE)));
+
+ prime_rx(dev);
+
+ /*
+ * indicate no longer in interrupt routine
+ */
+ dev->interrupt = 0;
+}
+
+
+/******************************************************
+ *
+ * open the board
+ *
+ ******************************************************/
+
+static int elp_open(struct device *dev)
+{
+ elp_device *adapter;
+
+ adapter = dev->priv;
+
+ if (elp_debug >= 3)
+ printk("%s: request to open device\n", dev->name);
+
+ /*
+ * make sure we actually found the device
+ */
+ if (adapter == NULL) {
+ printk("%s: Opening a non-existent physical device\n", dev->name);
+ return -EAGAIN;
+ }
+ /*
+ * disable interrupts on the board
+ */
+ outb_control(0x00, dev->base_addr);
+
+ /*
+ * clear any pending interrupts
+ */
+ inb_command(dev->base_addr);
+ adapter_reset(dev);
+
+ /*
+ * interrupt routine not entered
+ */
+ dev->interrupt = 0;
+
+ /*
+ * transmitter not busy
+ */
+ dev->tbusy = 0;
+
+ /*
+ * no receive PCBs active
+ */
+ adapter->rx_active = 0;
+
+ adapter->busy = 0;
+ adapter->send_pcb_semaphore = 0;
+ adapter->rx_backlog.in = 0;
+ adapter->rx_backlog.out = 0;
+
+ /*
+ * make sure we can find the device header given the interrupt number
+ */
+ irq2dev_map[dev->irq] = dev;
+
+ /*
+ * install our interrupt service routine
+ */
+ if (request_irq(dev->irq, &elp_interrupt, 0, "3c505", NULL)) {
+ irq2dev_map[dev->irq] = NULL;
+ return -EAGAIN;
+ }
+ if (request_dma(dev->dma, "3c505")) {
+ printk("%s: could not allocate DMA channel\n", dev->name);
+ return -EAGAIN;
+ }
+ adapter->dma_buffer = (void *) dma_mem_alloc(DMA_BUFFER_SIZE);
+ if (!adapter->dma_buffer) {
+ printk("Could not allocate DMA buffer\n");
+ }
+ adapter->dmaing = 0;
+
+ /*
+ * enable interrupts on the board
+ */
+ outb_control(CMDE, dev->base_addr);
+
+ /*
+ * device is now officially open!
+ */
+ dev->start = 1;
+
+ /*
+ * configure adapter memory: we need 10 multicast addresses, default==0
+ */
+ if (elp_debug >= 3)
+ printk("%s: sending 3c505 memory configuration command\n", dev->name);
+ adapter->tx_pcb.command = CMD_CONFIGURE_ADAPTER_MEMORY;
+ adapter->tx_pcb.data.memconf.cmd_q = 10;
+ adapter->tx_pcb.data.memconf.rcv_q = 20;
+ adapter->tx_pcb.data.memconf.mcast = 10;
+ adapter->tx_pcb.data.memconf.frame = 20;
+ adapter->tx_pcb.data.memconf.rcv_b = 20;
+ adapter->tx_pcb.data.memconf.progs = 0;
+ adapter->tx_pcb.length = sizeof(struct Memconf);
+ adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb))
+ printk("%s: couldn't send memory configuration command\n", dev->name);
+ else {
+ int timeout = jiffies + TIMEOUT;
+ while (adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] == 0 && jiffies < timeout);
+ if (jiffies >= timeout)
+ TIMEOUT_MSG(__LINE__);
+ }
+
+
+ /*
+ * configure adapter to receive broadcast messages and wait for response
+ */
+ if (elp_debug >= 3)
+ printk("%s: sending 82586 configure command\n", dev->name);
+ adapter->tx_pcb.command = CMD_CONFIGURE_82586;
+ adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD;
+ adapter->tx_pcb.length = 2;
+ adapter->got[CMD_CONFIGURE_82586] = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb))
+ printk("%s: couldn't send 82586 configure command\n", dev->name);
+ else {
+ int timeout = jiffies + TIMEOUT;
+ while (adapter->got[CMD_CONFIGURE_82586] == 0 && jiffies < timeout);
+ if (jiffies >= timeout)
+ TIMEOUT_MSG(__LINE__);
+ }
+
+ /* enable burst-mode DMA */
+ outb(0x1, dev->base_addr + PORT_AUXDMA);
+
+ /*
+ * queue receive commands to provide buffering
+ */
+ prime_rx(dev);
+ if (elp_debug >= 3)
+ printk("%s: %d receive PCBs active\n", dev->name, adapter->rx_active);
+
+ MOD_INC_USE_COUNT;
+
+ return 0; /* Always succeed */
+}
+
+
+/******************************************************
+ *
+ * send a packet to the adapter
+ *
+ ******************************************************/
+
+static int send_packet(struct device *dev, struct sk_buff *skb)
+{
+ elp_device *adapter = dev->priv;
+ unsigned long target;
+
+ /*
+ * make sure the length is even and no shorter than 60 bytes
+ */
+ unsigned int nlen = (((skb->len < 60) ? 60 : skb->len) + 1) & (~1);
+
+ if (set_bit(0, (void *) &adapter->busy)) {
+ if (elp_debug >= 2)
+ printk("%s: transmit blocked\n", dev->name);
+ return FALSE;
+ }
+ adapter = dev->priv;
+
+ /*
+ * send the adapter a transmit packet command. Ignore segment and offset
+ * and make sure the length is even
+ */
+ adapter->tx_pcb.command = CMD_TRANSMIT_PACKET;
+ adapter->tx_pcb.length = sizeof(struct Xmit_pkt);
+ adapter->tx_pcb.data.xmit_pkt.buf_ofs
+ = adapter->tx_pcb.data.xmit_pkt.buf_seg = 0; /* Unused */
+ adapter->tx_pcb.data.xmit_pkt.pkt_len = nlen;
+
+ if (!send_pcb(dev, &adapter->tx_pcb)) {
+ adapter->busy = 0;
+ return FALSE;
+ }
+ /* if this happens, we die */
+ if (set_bit(0, (void *) &adapter->dmaing))
+ printk("%s: tx: DMA %d in progress\n", dev->name, adapter->current_dma.direction);
+
+ adapter->current_dma.direction = 1;
+ adapter->current_dma.start_time = jiffies;
+
+ target = virt_to_bus(skb->data);
+ if ((target + nlen) >= MAX_DMA_ADDRESS) {
+ memcpy(adapter->dma_buffer, skb->data, nlen);
+ target = virt_to_bus(adapter->dma_buffer);
+ }
+ adapter->current_dma.skb = skb;
+ cli();
+ disable_dma(dev->dma);
+ clear_dma_ff(dev->dma);
+ set_dma_mode(dev->dma, 0x08); /* dma memory -> io */
+ set_dma_addr(dev->dma, target);
+ set_dma_count(dev->dma, nlen);
+ enable_dma(dev->dma);
+ outb_control(inb_control(dev->base_addr) | DMAE | TCEN, dev->base_addr);
+ if (elp_debug >= 3)
+ printk("%s: DMA transfer started\n", dev->name);
+
+ return TRUE;
+}
+
+/******************************************************
+ *
+ * start the transmitter
+ * return 0 if sent OK, else return 1
+ *
+ ******************************************************/
+
+static int elp_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ if (dev->interrupt) {
+ printk("%s: start_xmit aborted (in irq)\n", dev->name);
+ return 1;
+ }
+
+ check_dma(dev);
+
+ /*
+ * if the transmitter is still busy, we have a transmit timeout...
+ */
+ if (dev->tbusy) {
+ elp_device *adapter = dev->priv;
+ int tickssofar = jiffies - dev->trans_start;
+ int stat;
+
+ if (tickssofar < 1000)
+ return 1;
+
+ stat = inb_status(dev->base_addr);
+ printk("%s: transmit timed out, lost %s?\n", dev->name, (stat & ACRF) ? "interrupt" : "command");
+ if (elp_debug >= 1)
+ printk("%s: status %#02x\n", dev->name, stat);
+ dev->trans_start = jiffies;
+ dev->tbusy = 0;
+ adapter->stats.tx_dropped++;
+ }
+
+ /* Some upper layer thinks we've missed a tx-done interrupt */
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ if (skb->len <= 0)
+ return 0;
+
+ if (elp_debug >= 3)
+ printk("%s: request to send packet of length %d\n", dev->name, (int) skb->len);
+
+ if (set_bit(0, (void *) &dev->tbusy)) {
+ printk("%s: transmitter access conflict\n", dev->name);
+ return 1;
+ }
+ /*
+ * send the packet at skb->data for skb->len
+ */
+ if (!send_packet(dev, skb)) {
+ if (elp_debug >= 2) {
+ printk("%s: failed to transmit packet\n", dev->name);
+ }
+ dev->tbusy = 0;
+ return 1;
+ }
+ if (elp_debug >= 3)
+ printk("%s: packet of length %d sent\n", dev->name, (int) skb->len);
+
+ /*
+ * start the transmit timeout
+ */
+ dev->trans_start = jiffies;
+
+ prime_rx(dev);
+
+ return 0;
+}
+
+/******************************************************
+ *
+ * return statistics on the board
+ *
+ ******************************************************/
+
+static struct enet_statistics *elp_get_stats(struct device *dev)
+{
+ elp_device *adapter = (elp_device *) dev->priv;
+
+ if (elp_debug >= 3)
+ printk("%s: request for stats\n", dev->name);
+
+ /* If the device is closed, just return the latest stats we have,
+ - we cannot ask from the adapter without interrupts */
+ if (!dev->start)
+ return &adapter->stats;
+
+ /* send a get statistics command to the board */
+ adapter->tx_pcb.command = CMD_NETWORK_STATISTICS;
+ adapter->tx_pcb.length = 0;
+ adapter->got[CMD_NETWORK_STATISTICS] = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb))
+ printk("%s: couldn't send get statistics command\n", dev->name);
+ else {
+ int timeout = jiffies + TIMEOUT;
+ while (adapter->got[CMD_NETWORK_STATISTICS] == 0 && jiffies < timeout);
+ if (jiffies >= timeout) {
+ TIMEOUT_MSG(__LINE__);
+ return &adapter->stats;
+ }
+ }
+
+ /* statistics are now up to date */
+ return &adapter->stats;
+}
+
+/******************************************************
+ *
+ * close the board
+ *
+ ******************************************************/
+
+static int elp_close(struct device *dev)
+{
+ elp_device *adapter;
+
+ adapter = dev->priv;
+
+ if (elp_debug >= 3)
+ printk("%s: request to close device\n", dev->name);
+
+ /* Someone may request the device statistic information even when
+ * the interface is closed. The following will update the statistics
+ * structure in the driver, so we'll be able to give current statistics.
+ */
+ (void) elp_get_stats(dev);
+
+ /*
+ * disable interrupts on the board
+ */
+ outb_control(0x00, dev->base_addr);
+
+ /*
+ * flag transmitter as busy (i.e. not available)
+ */
+ dev->tbusy = 1;
+
+ /*
+ * indicate device is closed
+ */
+ dev->start = 0;
+
+ /*
+ * release the IRQ
+ */
+ free_irq(dev->irq, NULL);
+
+ /*
+ * and we no longer have to map irq to dev either
+ */
+ irq2dev_map[dev->irq] = 0;
+
+ free_dma(dev->dma);
+ free_pages((unsigned long) adapter->dma_buffer, __get_order(DMA_BUFFER_SIZE));
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+
+/************************************************************
+ *
+ * Set multicast list
+ * num_addrs==0: clear mc_list
+ * num_addrs==-1: set promiscuous mode
+ * num_addrs>0: set mc_list
+ *
+ ************************************************************/
+
+static void elp_set_mc_list(struct device *dev)
+{
+ elp_device *adapter = (elp_device *) dev->priv;
+ struct dev_mc_list *dmi = dev->mc_list;
+ int i;
+
+ if (elp_debug >= 3)
+ printk("%s: request to set multicast list\n", dev->name);
+
+ if (!(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
+ /* send a "load multicast list" command to the board, max 10 addrs/cmd */
+ /* if num_addrs==0 the list will be cleared */
+ adapter->tx_pcb.command = CMD_LOAD_MULTICAST_LIST;
+ adapter->tx_pcb.length = 6 * dev->mc_count;
+ for (i = 0; i < dev->mc_count; i++) {
+ memcpy(adapter->tx_pcb.data.multicast[i], dmi->dmi_addr, 6);
+ dmi = dmi->next;
+ }
+ adapter->got[CMD_LOAD_MULTICAST_LIST] = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb))
+ printk("%s: couldn't send set_multicast command\n", dev->name);
+ else {
+ int timeout = jiffies + TIMEOUT;
+ while (adapter->got[CMD_LOAD_MULTICAST_LIST] == 0 && jiffies < timeout);
+ if (jiffies >= timeout) {
+ TIMEOUT_MSG(__LINE__);
+ }
+ }
+ if (dev->mc_count)
+ adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD | RECV_MULTI;
+ else /* num_addrs == 0 */
+ adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD;
+ } else
+ adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_PROMISC;
+ /*
+ * configure adapter to receive messages (as specified above)
+ * and wait for response
+ */
+ if (elp_debug >= 3)
+ printk("%s: sending 82586 configure command\n", dev->name);
+ adapter->tx_pcb.command = CMD_CONFIGURE_82586;
+ adapter->tx_pcb.length = 2;
+ adapter->got[CMD_CONFIGURE_82586] = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb))
+ printk("%s: couldn't send 82586 configure command\n", dev->name);
+ else {
+ int timeout = jiffies + TIMEOUT;
+ while (adapter->got[CMD_CONFIGURE_82586] == 0 && jiffies < timeout);
+ if (jiffies >= timeout)
+ TIMEOUT_MSG(__LINE__);
+ }
+}
+
+/******************************************************
+ *
+ * initialise Etherlink Plus board
+ *
+ ******************************************************/
+
+static void elp_init(struct device *dev)
+{
+ elp_device *adapter = dev->priv;
+
+ /*
+ * set ptrs to various functions
+ */
+ dev->open = elp_open; /* local */
+ dev->stop = elp_close; /* local */
+ dev->get_stats = elp_get_stats; /* local */
+ dev->hard_start_xmit = elp_start_xmit; /* local */
+ dev->set_multicast_list = elp_set_mc_list; /* local */
+
+ /* Setup the generic properties */
+ ether_setup(dev);
+
+ /*
+ * setup ptr to adapter specific information
+ */
+ memset(&(adapter->stats), 0, sizeof(struct enet_statistics));
+
+ /*
+ * memory information
+ */
+ dev->mem_start = dev->mem_end = dev->rmem_end = dev->rmem_start = 0;
+}
+
+/************************************************************
+ *
+ * A couple of tests to see if there's 3C505 or not
+ * Called only by elp_autodetect
+ ************************************************************/
+
+static int elp_sense(struct device *dev)
+{
+ int timeout;
+ int addr = dev->base_addr;
+ const char *name = dev->name;
+ long flags;
+ byte orig_HCR, orig_HSR;
+
+ if (check_region(addr, 0xf))
+ return -1;
+
+ orig_HCR = inb_control(addr);
+ orig_HSR = inb_status(addr);
+
+ if (elp_debug > 0)
+ printk(search_msg, name, addr);
+
+ if (((orig_HCR == 0xff) && (orig_HSR == 0xff)) ||
+ ((orig_HCR & DIR) != (orig_HSR & DIR))) {
+ if (elp_debug > 0)
+ printk(notfound_msg, 1);
+ return -1; /* It can't be 3c505 if HCR.DIR != HSR.DIR */
+ }
+ /* Enable interrupts - we need timers! */
+ save_flags(flags);
+ sti();
+
+ /* Wait for a while; the adapter may still be booting up */
+ if (elp_debug > 0)
+ printk("%s", stilllooking_msg);
+ if (orig_HCR & DIR) {
+ /* If HCR.DIR is up, we pull it down. HSR.DIR should follow. */
+ outb_control(orig_HCR & ~DIR, addr);
+ timeout = jiffies + 30;
+ while (jiffies < timeout);
+ restore_flags(flags);
+ if (inb_status(addr) & DIR) {
+ outb_control(orig_HCR, addr);
+ if (elp_debug > 0)
+ printk(notfound_msg, 2);
+ return -1;
+ }
+ } else {
+ /* If HCR.DIR is down, we pull it up. HSR.DIR should follow. */
+ outb_control(orig_HCR | DIR, addr);
+ timeout = jiffies + 30;
+ while (jiffies < timeout);
+ restore_flags(flags);
+ if (!(inb_status(addr) & DIR)) {
+ outb_control(orig_HCR, addr);
+ if (elp_debug > 0)
+ printk(notfound_msg, 3);
+ return -1;
+ }
+ }
+ /*
+ * It certainly looks like a 3c505. If it has DMA enabled, it needs
+ * a hard reset. Also, do a hard reset if selected at the compile time.
+ */
+ if (elp_debug > 0)
+ printk("%s", found_msg);
+
+ return 0;
+}
+
+/*************************************************************
+ *
+ * Search through addr_list[] and try to find a 3C505
+ * Called only by eplus_probe
+ *************************************************************/
+
+static int elp_autodetect(struct device *dev)
+{
+ int idx = 0;
+
+ /* if base address set, then only check that address
+ otherwise, run through the table */
+ if (dev->base_addr != 0) { /* dev->base_addr == 0 ==> plain autodetect */
+ if (elp_sense(dev) == 0)
+ return dev->base_addr;
+ } else
+ while ((dev->base_addr = addr_list[idx++])) {
+ if (elp_sense(dev) == 0)
+ return dev->base_addr;
+ }
+
+ /* could not find an adapter */
+ if (elp_debug > 0)
+ printk(couldnot_msg, dev->name);
+
+ return 0; /* Because of this, the layer above will return -ENODEV */
+}
+
+
+/******************************************************
+ *
+ * probe for an Etherlink Plus board at the specified address
+ *
+ ******************************************************/
+
+/* There are three situations we need to be able to detect here:
+
+ * a) the card is idle
+ * b) the card is still booting up
+ * c) the card is stuck in a strange state (some DOS drivers do this)
+ *
+ * In case (a), all is well. In case (b), we wait 10 seconds to see if the
+ * card finishes booting, and carry on if so. In case (c), we do a hard reset,
+ * loop round, and hope for the best.
+ *
+ * This is all very unpleasant, but hopefully avoids the problems with the old
+ * probe code (which had a 15-second delay if the card was idle, and didn't
+ * work at all if it was in a weird state).
+ */
+
+int elplus_probe(struct device *dev)
+{
+ elp_device *adapter;
+ int i, tries, tries1, timeout, okay;
+
+ /*
+ * setup adapter structure
+ */
+
+ dev->base_addr = elp_autodetect(dev);
+ if (!(dev->base_addr))
+ return -ENODEV;
+
+ /*
+ * setup ptr to adapter specific information
+ */
+ adapter = (elp_device *) (dev->priv = kmalloc(sizeof(elp_device), GFP_KERNEL));
+ if (adapter == NULL) {
+ printk("%s: out of memory\n", dev->name);
+ return -ENODEV;
+ }
+
+ for (tries1 = 0; tries1 < 3; tries1++) {
+ outb_control((inb_control(dev->base_addr) | CMDE) & ~DIR, dev->base_addr);
+ /* First try to write just one byte, to see if the card is
+ * responding at all normally.
+ */
+ timeout = jiffies + 5;
+ okay = 0;
+ while (jiffies < timeout && !(inb_status(dev->base_addr) & HCRE));
+ if ((inb_status(dev->base_addr) & HCRE)) {
+ outb_command(0, dev->base_addr); /* send a spurious byte */
+ timeout = jiffies + 5;
+ while (jiffies < timeout && !(inb_status(dev->base_addr) & HCRE));
+ if (inb_status(dev->base_addr) & HCRE)
+ okay = 1;
+ }
+ if (!okay) {
+ /* Nope, it's ignoring the command register. This means that
+ * either it's still booting up, or it's died.
+ */
+ printk("%s: command register wouldn't drain, ", dev->name);
+ if ((inb_status(dev->base_addr) & 7) == 3) {
+ /* If the adapter status is 3, it *could* still be booting.
+ * Give it the benefit of the doubt for 10 seconds.
+ */
+ printk("assuming 3c505 still starting\n");
+ timeout = jiffies + 10 * HZ;
+ while (jiffies < timeout && (inb_status(dev->base_addr) & 7));
+ if (inb_status(dev->base_addr) & 7) {
+ printk("%s: 3c505 failed to start\n", dev->name);
+ } else {
+ okay = 1; /* It started */
+ }
+ } else {
+ /* Otherwise, it must just be in a strange state. We probably
+ * need to kick it.
+ */
+ printk("3c505 is sulking\n");
+ }
+ }
+ for (tries = 0; tries < 5 && okay; tries++) {
+
+ /*
+ * Try to set the Ethernet address, to make sure that the board
+ * is working.
+ */
+ adapter->tx_pcb.command = CMD_STATION_ADDRESS;
+ adapter->tx_pcb.length = 0;
+ autoirq_setup(0);
+ if (!send_pcb(dev, &adapter->tx_pcb)) {
+ printk("%s: could not send first PCB\n", dev->name);
+ autoirq_report(0);
+ continue;
+ }
+ if (!receive_pcb(dev, &adapter->rx_pcb)) {
+ printk("%s: could not read first PCB\n", dev->name);
+ autoirq_report(0);
+ continue;
+ }
+ if ((adapter->rx_pcb.command != CMD_ADDRESS_RESPONSE) ||
+ (adapter->rx_pcb.length != 6)) {
+ printk("%s: first PCB wrong (%d, %d)\n", dev->name, adapter->rx_pcb.command, adapter->rx_pcb.length);
+ autoirq_report(0);
+ continue;
+ }
+ goto okay;
+ }
+ /* It's broken. Do a hard reset to re-initialise the board,
+ * and try again.
+ */
+ printk(KERN_INFO "%s: resetting adapter\n", dev->name);
+ outb_control(inb_control(dev->base_addr) | FLSH | ATTN, dev->base_addr);
+ outb_control(inb_control(dev->base_addr) & ~(FLSH | ATTN), dev->base_addr);
+ }
+ printk("%s: failed to initialise 3c505\n", dev->name);
+ return -ENODEV;
+
+ okay:
+ if (dev->irq) { /* Is there a preset IRQ? */
+ int rpt = autoirq_report(0);
+ if (dev->irq != rpt) {
+ printk("%s: warning, irq %d configured but %d detected\n", dev->name, dev->irq, rpt);
+ return -ENODEV;
+ }
+ /* if dev->irq == autoirq_report(0), all is well */
+ } else /* No preset IRQ; just use what we can detect */
+ dev->irq = autoirq_report(0);
+ switch (dev->irq) { /* Legal, sane? */
+ case 0:
+ printk("%s: No IRQ reported by autoirq_report().\n", dev->name);
+ printk("%s: Check the jumpers of your 3c505 board.\n", dev->name);
+ return -ENODEV;
+ case 1:
+ case 6:
+ case 8:
+ case 13:
+ printk("%s: Impossible IRQ %d reported by autoirq_report().\n",
+ dev->name, dev->irq);
+ return -ENODEV;
+ }
+ /*
+ * Now we have the IRQ number so we can disable the interrupts from
+ * the board until the board is opened.
+ */
+ outb_control(inb_control(dev->base_addr) & ~CMDE, dev->base_addr);
+
+ /*
+ * copy ethernet address into structure
+ */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = adapter->rx_pcb.data.eth_addr[i];
+
+ /* set up the DMA channel */
+ dev->dma = ELP_DMA;
+
+ /*
+ * print remainder of startup message
+ */
+ printk("%s: 3c505 at %#lx, irq %d, dma %d, ",
+ dev->name, dev->base_addr, dev->irq, dev->dma);
+ printk("addr %02x:%02x:%02x:%02x:%02x:%02x, ",
+ dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+ dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+
+ /*
+ * read more information from the adapter
+ */
+
+ adapter->tx_pcb.command = CMD_ADAPTER_INFO;
+ adapter->tx_pcb.length = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb) ||
+ !receive_pcb(dev, &adapter->rx_pcb) ||
+ (adapter->rx_pcb.command != CMD_ADAPTER_INFO_RESPONSE) ||
+ (adapter->rx_pcb.length != 10)) {
+ printk("%s: not responding to second PCB\n", dev->name);
+ }
+ printk("rev %d.%d, %dk\n", adapter->rx_pcb.data.info.major_vers, adapter->rx_pcb.data.info.minor_vers, adapter->rx_pcb.data.info.RAM_sz);
+
+ /*
+ * reconfigure the adapter memory to better suit our purposes
+ */
+ adapter->tx_pcb.command = CMD_CONFIGURE_ADAPTER_MEMORY;
+ adapter->tx_pcb.length = 12;
+ adapter->tx_pcb.data.memconf.cmd_q = 8;
+ adapter->tx_pcb.data.memconf.rcv_q = 8;
+ adapter->tx_pcb.data.memconf.mcast = 10;
+ adapter->tx_pcb.data.memconf.frame = 10;
+ adapter->tx_pcb.data.memconf.rcv_b = 10;
+ adapter->tx_pcb.data.memconf.progs = 0;
+ if (!send_pcb(dev, &adapter->tx_pcb) ||
+ !receive_pcb(dev, &adapter->rx_pcb) ||
+ (adapter->rx_pcb.command != CMD_CONFIGURE_ADAPTER_RESPONSE) ||
+ (adapter->rx_pcb.length != 2)) {
+ printk("%s: could not configure adapter memory\n", dev->name);
+ }
+ if (adapter->rx_pcb.data.configure) {
+ printk("%s: adapter configuration failed\n", dev->name);
+ }
+ /*
+ * and reserve the address region
+ */
+ request_region(dev->base_addr, ELP_IO_EXTENT, "3c505");
+
+ /*
+ * initialise the device
+ */
+ elp_init(dev);
+
+ return 0;
+}
+
+#ifdef MODULE
+static char devicename[9] = {0,};
+static struct device dev_3c505 =
+{
+ devicename, /* device name is inserted by linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, elplus_probe};
+
+int io = 0x300;
+int irq = 0;
+
+int init_module(void)
+{
+ if (io == 0)
+ printk("3c505: You should not use auto-probing with insmod!\n");
+ dev_3c505.base_addr = io;
+ dev_3c505.irq = irq;
+ if (register_netdev(&dev_3c505) != 0) {
+ return -EIO;
+ }
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ unregister_netdev(&dev_3c505);
+ kfree(dev_3c505.priv);
+ dev_3c505.priv = NULL;
+
+ /* If we don't do this, we can't re-insmod it later. */
+ release_region(dev_3c505.base_addr, ELP_IO_EXTENT);
+}
+
+#endif /* MODULE */
+
+
+/*
+ * Local Variables:
+ * c-file-style: "linux"
+ * tab-width: 8
+ * compile-command: "gcc -D__KERNEL__ -I/discs/bibble/src/linux-1.3.69/include -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -fno-strength-reduce -pipe -m486 -DCPU=486 -DMODULE -c 3c505.c"
+ * End:
+ */
diff --git a/linux/src/drivers/net/3c505.h b/linux/src/drivers/net/3c505.h
new file mode 100644
index 0000000..0598ca2
--- /dev/null
+++ b/linux/src/drivers/net/3c505.h
@@ -0,0 +1,245 @@
+/*****************************************************************
+ *
+ * defines for 3Com Etherlink Plus adapter
+ *
+ *****************************************************************/
+
+/*
+ * I/O register offsets
+ */
+#define PORT_COMMAND 0x00 /* read/write, 8-bit */
+#define PORT_STATUS 0x02 /* read only, 8-bit */
+#define PORT_AUXDMA 0x02 /* write only, 8-bit */
+#define PORT_DATA 0x04 /* read/write, 16-bit */
+#define PORT_CONTROL 0x06 /* read/write, 8-bit */
+
+#define ELP_IO_EXTENT 0x10 /* size of used IO registers */
+
+/*
+ * host control registers bits
+ */
+#define ATTN 0x80 /* attention */
+#define FLSH 0x40 /* flush data register */
+#define DMAE 0x20 /* DMA enable */
+#define DIR 0x10 /* direction */
+#define TCEN 0x08 /* terminal count interrupt enable */
+#define CMDE 0x04 /* command register interrupt enable */
+#define HSF2 0x02 /* host status flag 2 */
+#define HSF1 0x01 /* host status flag 1 */
+
+/*
+ * combinations of HSF flags used for PCB transmission
+ */
+#define HSF_PCB_ACK HSF1
+#define HSF_PCB_NAK HSF2
+#define HSF_PCB_END (HSF2|HSF1)
+#define HSF_PCB_MASK (HSF2|HSF1)
+
+/*
+ * host status register bits
+ */
+#define HRDY 0x80 /* data register ready */
+#define HCRE 0x40 /* command register empty */
+#define ACRF 0x20 /* adapter command register full */
+/* #define DIR 0x10 direction - same as in control register */
+#define DONE 0x08 /* DMA done */
+#define ASF3 0x04 /* adapter status flag 3 */
+#define ASF2 0x02 /* adapter status flag 2 */
+#define ASF1 0x01 /* adapter status flag 1 */
+
+/*
+ * combinations of ASF flags used for PCB reception
+ */
+#define ASF_PCB_ACK ASF1
+#define ASF_PCB_NAK ASF2
+#define ASF_PCB_END (ASF2|ASF1)
+#define ASF_PCB_MASK (ASF2|ASF1)
+
+/*
+ * host aux DMA register bits
+ */
+#define DMA_BRST 0x01 /* DMA burst */
+
+/*
+ * maximum amount of data allowed in a PCB
+ */
+#define MAX_PCB_DATA 62
+
+/*****************************************************************
+ *
+ * timeout value
+ * this is a rough value used for loops to stop them from
+ * locking up the whole machine in the case of failure or
+ * error conditions
+ *
+ *****************************************************************/
+
+#define TIMEOUT 300
+
+/*****************************************************************
+ *
+ * PCB commands
+ *
+ *****************************************************************/
+
+enum {
+ /*
+ * host PCB commands
+ */
+ CMD_CONFIGURE_ADAPTER_MEMORY = 0x01,
+ CMD_CONFIGURE_82586 = 0x02,
+ CMD_STATION_ADDRESS = 0x03,
+ CMD_DMA_DOWNLOAD = 0x04,
+ CMD_DMA_UPLOAD = 0x05,
+ CMD_PIO_DOWNLOAD = 0x06,
+ CMD_PIO_UPLOAD = 0x07,
+ CMD_RECEIVE_PACKET = 0x08,
+ CMD_TRANSMIT_PACKET = 0x09,
+ CMD_NETWORK_STATISTICS = 0x0a,
+ CMD_LOAD_MULTICAST_LIST = 0x0b,
+ CMD_CLEAR_PROGRAM = 0x0c,
+ CMD_DOWNLOAD_PROGRAM = 0x0d,
+ CMD_EXECUTE_PROGRAM = 0x0e,
+ CMD_SELF_TEST = 0x0f,
+ CMD_SET_STATION_ADDRESS = 0x10,
+ CMD_ADAPTER_INFO = 0x11,
+ NUM_TRANSMIT_CMDS,
+
+ /*
+ * adapter PCB commands
+ */
+ CMD_CONFIGURE_ADAPTER_RESPONSE = 0x31,
+ CMD_CONFIGURE_82586_RESPONSE = 0x32,
+ CMD_ADDRESS_RESPONSE = 0x33,
+ CMD_DOWNLOAD_DATA_REQUEST = 0x34,
+ CMD_UPLOAD_DATA_REQUEST = 0x35,
+ CMD_RECEIVE_PACKET_COMPLETE = 0x38,
+ CMD_TRANSMIT_PACKET_COMPLETE = 0x39,
+ CMD_NETWORK_STATISTICS_RESPONSE = 0x3a,
+ CMD_LOAD_MULTICAST_RESPONSE = 0x3b,
+ CMD_CLEAR_PROGRAM_RESPONSE = 0x3c,
+ CMD_DOWNLOAD_PROGRAM_RESPONSE = 0x3d,
+ CMD_EXECUTE_RESPONSE = 0x3e,
+ CMD_SELF_TEST_RESPONSE = 0x3f,
+ CMD_SET_ADDRESS_RESPONSE = 0x40,
+ CMD_ADAPTER_INFO_RESPONSE = 0x41
+};
+
+/* Definitions for the PCB data structure */
+
+/* Data units */
+typedef unsigned char byte;
+typedef unsigned short int word;
+typedef unsigned long int dword;
+
+/* Data structures */
+struct Memconf {
+ word cmd_q,
+ rcv_q,
+ mcast,
+ frame,
+ rcv_b,
+ progs;
+};
+
+struct Rcv_pkt {
+ word buf_ofs,
+ buf_seg,
+ buf_len,
+ timeout;
+};
+
+struct Xmit_pkt {
+ word buf_ofs,
+ buf_seg,
+ pkt_len;
+};
+
+struct Rcv_resp {
+ word buf_ofs,
+ buf_seg,
+ buf_len,
+ pkt_len,
+ timeout,
+ status;
+ dword timetag;
+};
+
+struct Xmit_resp {
+ word buf_ofs,
+ buf_seg,
+ c_stat,
+ status;
+};
+
+
+struct Netstat {
+ dword tot_recv,
+ tot_xmit;
+ word err_CRC,
+ err_align,
+ err_res,
+ err_ovrrun;
+};
+
+
+struct Selftest {
+ word error;
+ union {
+ word ROM_cksum;
+ struct {
+ word ofs, seg;
+ } RAM;
+ word i82586;
+ } failure;
+};
+
+struct Info {
+ byte minor_vers,
+ major_vers;
+ word ROM_cksum,
+ RAM_sz,
+ free_ofs,
+ free_seg;
+};
+
+struct Memdump {
+ word size,
+ off,
+ seg;
+};
+
+/*
+Primary Command Block. The most important data structure. All communication
+between the host and the adapter is done with these. (Except for the actual
+ethernet data, which has different packaging.)
+*/
+typedef struct {
+ byte command;
+ byte length;
+ union {
+ struct Memconf memconf;
+ word configure;
+ struct Rcv_pkt rcv_pkt;
+ struct Xmit_pkt xmit_pkt;
+ byte multicast[10][6];
+ byte eth_addr[6];
+ byte failed;
+ struct Rcv_resp rcv_resp;
+ struct Xmit_resp xmit_resp;
+ struct Netstat netstat;
+ struct Selftest selftest;
+ struct Info info;
+ struct Memdump memdump;
+ byte raw[62];
+ } data;
+} pcb_struct;
+
+/* These defines for 'configure' */
+#define RECV_STATION 0x00
+#define RECV_BROAD 0x01
+#define RECV_MULTI 0x02
+#define RECV_PROMISC 0x04
+#define NO_LOOPBACK 0x00
+#define INT_LOOPBACK 0x08
+#define EXT_LOOPBACK 0x10
diff --git a/linux/src/drivers/net/3c507.c b/linux/src/drivers/net/3c507.c
new file mode 100644
index 0000000..58ba2d7
--- /dev/null
+++ b/linux/src/drivers/net/3c507.c
@@ -0,0 +1,924 @@
+/* 3c507.c: An EtherLink16 device driver for Linux. */
+/*
+ Written 1993,1994 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ Thanks go to jennings@Montrouge.SMR.slb.com ( Patrick Jennings)
+ and jrs@world.std.com (Rick Sladkey) for testing and bugfixes.
+ Mark Salazar <leslie@access.digex.net> made the changes for cards with
+ only 16K packet buffers.
+
+ Things remaining to do:
+ Verify that the tx and rx buffers don't have fencepost errors.
+ Move the theory of operation and memory map documentation.
+ The statistics need to be updated correctly.
+*/
+
+static const char *version =
+ "3c507.c:v1.10 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+
+#include <linux/module.h>
+
+/*
+ Sources:
+ This driver wouldn't have been written with the availability of the
+ Crynwr driver source code. It provided a known-working implementation
+ that filled in the gaping holes of the Intel documentation. Three cheers
+ for Russ Nelson.
+
+ Intel Microcommunications Databook, Vol. 1, 1990. It provides just enough
+ info that the casual reader might think that it documents the i82586 :-<.
+*/
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/errno.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/malloc.h>
+
+
+/* use 0 for production, 1 for verification, 2..7 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 1
+#endif
+static unsigned int net_debug = NET_DEBUG;
+
+/* A zero-terminated list of common I/O addresses to be probed. */
+static unsigned int netcard_portlist[] =
+ { 0x300, 0x320, 0x340, 0x280, 0};
+
+static void init_rx_bufs(struct device *dev);
+
+/*
+ Details of the i82586.
+
+ You'll really need the databook to understand the details of this part,
+ but the outline is that the i82586 has two separate processing units.
+ Both are started from a list of three configuration tables, of which only
+ the last, the System Control Block (SCB), is used after reset-time. The SCB
+ has the following fields:
+ Status word
+ Command word
+ Tx/Command block addr.
+ Rx block addr.
+ The command word accepts the following controls for the Tx and Rx units:
+ */
+
+#define CUC_START 0x0100
+#define CUC_RESUME 0x0200
+#define CUC_SUSPEND 0x0300
+#define RX_START 0x0010
+#define RX_RESUME 0x0020
+#define RX_SUSPEND 0x0030
+
+/* The Rx unit uses a list of frame descriptors and a list of data buffer
+ descriptors. We use full-sized (1518 byte) data buffers, so there is
+ a one-to-one pairing of frame descriptors to buffer descriptors.
+
+ The Tx ("command") unit executes a list of commands that look like:
+ Status word Written by the 82586 when the command is done.
+ Command word Command in lower 3 bits, post-command action in upper 3
+ Link word The address of the next command.
+ Parameters (as needed).
+
+ Some definitions related to the Command Word are:
+ */
+#define CMD_EOL 0x8000 /* The last command of the list, stop. */
+#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
+#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
+
+enum commands {
+ CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
+ CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7};
+
+/* Information that need to be kept for each board. */
+struct net_local {
+ struct enet_statistics stats;
+ int last_restart;
+ ushort rx_head;
+ ushort rx_tail;
+ ushort tx_head;
+ ushort tx_cmd_link;
+ ushort tx_reap;
+};
+
+/*
+ Details of the EtherLink16 Implementation
+ The 3c507 is a generic shared-memory i82586 implementation.
+ The host can map 16K, 32K, 48K, or 64K of the 64K memory into
+ 0x0[CD][08]0000, or all 64K into 0xF[02468]0000.
+ */
+
+/* Offsets from the base I/O address. */
+#define SA_DATA 0 /* Station address data, or 3Com signature. */
+#define MISC_CTRL 6 /* Switch the SA_DATA banks, and bus config bits. */
+#define RESET_IRQ 10 /* Reset the latched IRQ line. */
+#define SIGNAL_CA 11 /* Frob the 82586 Channel Attention line. */
+#define ROM_CONFIG 13
+#define MEM_CONFIG 14
+#define IRQ_CONFIG 15
+#define EL16_IO_EXTENT 16
+
+/* The ID port is used at boot-time to locate the ethercard. */
+#define ID_PORT 0x100
+
+/* Offsets to registers in the mailbox (SCB). */
+#define iSCB_STATUS 0x8
+#define iSCB_CMD 0xA
+#define iSCB_CBL 0xC /* Command BLock offset. */
+#define iSCB_RFA 0xE /* Rx Frame Area offset. */
+
+/* Since the 3c507 maps the shared memory window so that the last byte is
+ at 82586 address FFFF, the first byte is at 82586 address 0, 16K, 32K, or
+ 48K corresponding to window sizes of 64K, 48K, 32K and 16K respectively.
+ We can account for this be setting the 'SBC Base' entry in the ISCP table
+ below for all the 16 bit offset addresses, and also adding the 'SCB Base'
+ value to all 24 bit physical addresses (in the SCP table and the TX and RX
+ Buffer Descriptors).
+ -Mark
+ */
+#define SCB_BASE ((unsigned)64*1024 - (dev->mem_end - dev->mem_start))
+
+/*
+ What follows in 'init_words[]' is the "program" that is downloaded to the
+ 82586 memory. It's mostly tables and command blocks, and starts at the
+ reset address 0xfffff6. This is designed to be similar to the EtherExpress,
+ thus the unusual location of the SCB at 0x0008.
+
+ Even with the additional "don't care" values, doing it this way takes less
+ program space than initializing the individual tables, and I feel it's much
+ cleaner.
+
+ The databook is particularly useless for the first two structures, I had
+ to use the Crynwr driver as an example.
+
+ The memory setup is as follows:
+ */
+
+#define CONFIG_CMD 0x0018
+#define SET_SA_CMD 0x0024
+#define SA_OFFSET 0x002A
+#define IDLELOOP 0x30
+#define TDR_CMD 0x38
+#define TDR_TIME 0x3C
+#define DUMP_CMD 0x40
+#define DIAG_CMD 0x48
+#define SET_MC_CMD 0x4E
+#define DUMP_DATA 0x56 /* A 170 byte buffer for dump and Set-MC into. */
+
+#define TX_BUF_START 0x0100
+#define NUM_TX_BUFS 4
+#define TX_BUF_SIZE (1518+14+20+16) /* packet+header+TBD */
+
+#define RX_BUF_START 0x2000
+#define RX_BUF_SIZE (1518+14+18) /* packet+header+RBD */
+#define RX_BUF_END (dev->mem_end - dev->mem_start)
+
+/*
+ That's it: only 86 bytes to set up the beast, including every extra
+ command available. The 170 byte buffer at DUMP_DATA is shared between the
+ Dump command (called only by the diagnostic program) and the SetMulticastList
+ command.
+
+ To complete the memory setup you only have to write the station address at
+ SA_OFFSET and create the Tx & Rx buffer lists.
+
+ The Tx command chain and buffer list is setup as follows:
+ A Tx command table, with the data buffer pointing to...
+ A Tx data buffer descriptor. The packet is in a single buffer, rather than
+ chaining together several smaller buffers.
+ A NoOp command, which initially points to itself,
+ And the packet data.
+
+ A transmit is done by filling in the Tx command table and data buffer,
+ re-writing the NoOp command, and finally changing the offset of the last
+ command to point to the current Tx command. When the Tx command is finished,
+ it jumps to the NoOp, when it loops until the next Tx command changes the
+ "link offset" in the NoOp. This way the 82586 never has to go through the
+ slow restart sequence.
+
+ The Rx buffer list is set up in the obvious ring structure. We have enough
+ memory (and low enough interrupt latency) that we can avoid the complicated
+ Rx buffer linked lists by alway associating a full-size Rx data buffer with
+ each Rx data frame.
+
+ I current use four transmit buffers starting at TX_BUF_START (0x0100), and
+ use the rest of memory, from RX_BUF_START to RX_BUF_END, for Rx buffers.
+
+ */
+
+unsigned short init_words[] = {
+ /* System Configuration Pointer (SCP). */
+ 0x0000, /* Set bus size to 16 bits. */
+ 0,0, /* pad words. */
+ 0x0000,0x0000, /* ISCP phys addr, set in init_82586_mem(). */
+
+ /* Intermediate System Configuration Pointer (ISCP). */
+ 0x0001, /* Status word that's cleared when init is done. */
+ 0x0008,0,0, /* SCB offset, (skip, skip) */
+
+ /* System Control Block (SCB). */
+ 0,0xf000|RX_START|CUC_START, /* SCB status and cmd. */
+ CONFIG_CMD, /* Command list pointer, points to Configure. */
+ RX_BUF_START, /* Rx block list. */
+ 0,0,0,0, /* Error count: CRC, align, buffer, overrun. */
+
+ /* 0x0018: Configure command. Change to put MAC data with packet. */
+ 0, CmdConfigure, /* Status, command. */
+ SET_SA_CMD, /* Next command is Set Station Addr. */
+ 0x0804, /* "4" bytes of config data, 8 byte FIFO. */
+ 0x2e40, /* Magic values, including MAC data location. */
+ 0, /* Unused pad word. */
+
+ /* 0x0024: Setup station address command. */
+ 0, CmdSASetup,
+ SET_MC_CMD, /* Next command. */
+ 0xaa00,0xb000,0x0bad, /* Station address (to be filled in) */
+
+ /* 0x0030: NOP, looping back to itself. Point to first Tx buffer to Tx. */
+ 0, CmdNOp, IDLELOOP, 0 /* pad */,
+
+ /* 0x0038: A unused Time-Domain Reflectometer command. */
+ 0, CmdTDR, IDLELOOP, 0,
+
+ /* 0x0040: An unused Dump State command. */
+ 0, CmdDump, IDLELOOP, DUMP_DATA,
+
+ /* 0x0048: An unused Diagnose command. */
+ 0, CmdDiagnose, IDLELOOP,
+
+ /* 0x004E: An empty set-multicast-list command. */
+ 0, CmdMulticastList, IDLELOOP, 0,
+};
+
+/* Index to functions, as function prototypes. */
+
+extern int el16_probe(struct device *dev); /* Called from Space.c */
+
+static int el16_probe1(struct device *dev, int ioaddr);
+static int el16_open(struct device *dev);
+static int el16_send_packet(struct sk_buff *skb, struct device *dev);
+static void el16_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void el16_rx(struct device *dev);
+static int el16_close(struct device *dev);
+static struct enet_statistics *el16_get_stats(struct device *dev);
+
+static void hardware_send_packet(struct device *dev, void *buf, short length);
+void init_82586_mem(struct device *dev);
+
+
+#ifdef HAVE_DEVLIST
+struct netdev_entry netcard_drv =
+{"3c507", el16_probe1, EL16_IO_EXTENT, netcard_portlist};
+#endif
+
+/* Check for a network adaptor of this type, and return '0' iff one exists.
+ If dev->base_addr == 0, probe all likely locations.
+ If dev->base_addr == 1, always return failure.
+ If dev->base_addr == 2, (detachable devices only) allocate space for the
+ device and return success.
+ */
+int
+el16_probe(struct device *dev)
+{
+ int base_addr = dev ? dev->base_addr : 0;
+ int i;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return el16_probe1(dev, base_addr);
+ else if (base_addr != 0)
+ return ENXIO; /* Don't probe at all. */
+
+ for (i = 0; netcard_portlist[i]; i++) {
+ int ioaddr = netcard_portlist[i];
+ if (check_region(ioaddr, EL16_IO_EXTENT))
+ continue;
+ if (el16_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+
+int el16_probe1(struct device *dev, int ioaddr)
+{
+ static unsigned char init_ID_done = 0, version_printed = 0;
+ int i, irq, irqval;
+
+ if (init_ID_done == 0) {
+ ushort lrs_state = 0xff;
+ /* Send the ID sequence to the ID_PORT to enable the board(s). */
+ outb(0x00, ID_PORT);
+ for(i = 0; i < 255; i++) {
+ outb(lrs_state, ID_PORT);
+ lrs_state <<= 1;
+ if (lrs_state & 0x100)
+ lrs_state ^= 0xe7;
+ }
+ outb(0x00, ID_PORT);
+ init_ID_done = 1;
+ }
+
+ if (inb(ioaddr) == '*' && inb(ioaddr+1) == '3'
+ && inb(ioaddr+2) == 'C' && inb(ioaddr+3) == 'O')
+ ;
+ else
+ return ENODEV;
+
+ /* Allocate a new 'dev' if needed. */
+ if (dev == NULL)
+ dev = init_etherdev(0, sizeof(struct net_local));
+
+ if (net_debug && version_printed++ == 0)
+ printk("%s", version);
+
+ printk("%s: 3c507 at %#x,", dev->name, ioaddr);
+
+ /* We should make a few more checks here, like the first three octets of
+ the S.A. for the manufacturer's code. */
+
+ irq = inb(ioaddr + IRQ_CONFIG) & 0x0f;
+
+ irqval = request_irq(irq, &el16_interrupt, 0, "3c507", NULL);
+ if (irqval) {
+ printk ("unable to get IRQ %d (irqval=%d).\n", irq, irqval);
+ return EAGAIN;
+ }
+
+ /* We've committed to using the board, and can start filling in *dev. */
+ request_region(ioaddr, EL16_IO_EXTENT, "3c507");
+ dev->base_addr = ioaddr;
+
+ outb(0x01, ioaddr + MISC_CTRL);
+ for (i = 0; i < 6; i++) {
+ dev->dev_addr[i] = inb(ioaddr + i);
+ printk(" %02x", dev->dev_addr[i]);
+ }
+
+ if ((dev->mem_start & 0xf) > 0)
+ net_debug = dev->mem_start & 7;
+
+#ifdef MEM_BASE
+ dev->mem_start = MEM_BASE;
+ dev->mem_end = dev->mem_start + 0x10000;
+#else
+ {
+ int base;
+ int size;
+ char mem_config = inb(ioaddr + MEM_CONFIG);
+ if (mem_config & 0x20) {
+ size = 64*1024;
+ base = 0xf00000 + (mem_config & 0x08 ? 0x080000
+ : ((mem_config & 3) << 17));
+ } else {
+ size = ((mem_config & 3) + 1) << 14;
+ base = 0x0c0000 + ( (mem_config & 0x18) << 12);
+ }
+ dev->mem_start = base;
+ dev->mem_end = base + size;
+ }
+#endif
+
+ dev->if_port = (inb(ioaddr + ROM_CONFIG) & 0x80) ? 1 : 0;
+ dev->irq = inb(ioaddr + IRQ_CONFIG) & 0x0f;
+
+ printk(", IRQ %d, %sternal xcvr, memory %#lx-%#lx.\n", dev->irq,
+ dev->if_port ? "ex" : "in", dev->mem_start, dev->mem_end-1);
+
+ if (net_debug)
+ printk("%s", version);
+
+ /* Initialize the device structure. */
+ dev->priv = kmalloc(sizeof(struct net_local), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct net_local));
+
+ dev->open = el16_open;
+ dev->stop = el16_close;
+ dev->hard_start_xmit = el16_send_packet;
+ dev->get_stats = el16_get_stats;
+
+ ether_setup(dev); /* Generic ethernet behaviour */
+
+ dev->flags&=~IFF_MULTICAST; /* Multicast doesn't work */
+
+ return 0;
+}
+
+
+
+static int
+el16_open(struct device *dev)
+{
+ irq2dev_map[dev->irq] = dev;
+
+ /* Initialize the 82586 memory and start it. */
+ init_82586_mem(dev);
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+ MOD_INC_USE_COUNT;
+
+ return 0;
+}
+
+static int
+el16_send_packet(struct sk_buff *skb, struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ short *shmem = (short*)dev->mem_start;
+
+ if (dev->tbusy) {
+ /* If we get here, some higher level has decided we are broken.
+ There should really be a "kick me" function call instead. */
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 5)
+ return 1;
+ if (net_debug > 1)
+ printk("%s: transmit timed out, %s? ", dev->name,
+ shmem[iSCB_STATUS>>1] & 0x8000 ? "IRQ conflict" :
+ "network cable problem");
+ /* Try to restart the adaptor. */
+ if (lp->last_restart == lp->stats.tx_packets) {
+ if (net_debug > 1) printk("Resetting board.\n");
+ /* Completely reset the adaptor. */
+ init_82586_mem(dev);
+ } else {
+ /* Issue the channel attention signal and hope it "gets better". */
+ if (net_debug > 1) printk("Kicking board.\n");
+ shmem[iSCB_CMD>>1] = 0xf000|CUC_START|RX_START;
+ outb(0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */
+ lp->last_restart = lp->stats.tx_packets;
+ }
+ dev->tbusy=0;
+ dev->trans_start = jiffies;
+ }
+
+ /* If some higher layer thinks we've missed an tx-done interrupt
+ we are passed NULL. Caution: dev_tint() handles the cli()/sti()
+ itself. */
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ /* Block a timer-based transmit from overlapping. */
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ else {
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned char *buf = skb->data;
+
+ /* Disable the 82586's input to the interrupt line. */
+ outb(0x80, ioaddr + MISC_CTRL);
+ hardware_send_packet(dev, buf, length);
+ dev->trans_start = jiffies;
+ /* Enable the 82586 interrupt input. */
+ outb(0x84, ioaddr + MISC_CTRL);
+ }
+
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ /* You might need to clean up and record Tx statistics here. */
+
+ return 0;
+}
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+static void
+el16_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ struct net_local *lp;
+ int ioaddr, status, boguscount = 0;
+ ushort ack_cmd = 0;
+ ushort *shmem;
+
+ if (dev == NULL) {
+ printk ("net_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+ dev->interrupt = 1;
+
+ ioaddr = dev->base_addr;
+ lp = (struct net_local *)dev->priv;
+ shmem = ((ushort*)dev->mem_start);
+
+ status = shmem[iSCB_STATUS>>1];
+
+ if (net_debug > 4) {
+ printk("%s: 3c507 interrupt, status %4.4x.\n", dev->name, status);
+ }
+
+ /* Disable the 82586's input to the interrupt line. */
+ outb(0x80, ioaddr + MISC_CTRL);
+
+ /* Reap the Tx packet buffers. */
+ while (lp->tx_reap != lp->tx_head) {
+ unsigned short tx_status = shmem[lp->tx_reap>>1];
+
+ if (tx_status == 0) {
+ if (net_debug > 5) printk("Couldn't reap %#x.\n", lp->tx_reap);
+ break;
+ }
+ if (tx_status & 0x2000) {
+ lp->stats.tx_packets++;
+ lp->stats.collisions += tx_status & 0xf;
+ dev->tbusy = 0;
+ mark_bh(NET_BH); /* Inform upper layers. */
+ } else {
+ lp->stats.tx_errors++;
+ if (tx_status & 0x0600) lp->stats.tx_carrier_errors++;
+ if (tx_status & 0x0100) lp->stats.tx_fifo_errors++;
+ if (!(tx_status & 0x0040)) lp->stats.tx_heartbeat_errors++;
+ if (tx_status & 0x0020) lp->stats.tx_aborted_errors++;
+ }
+ if (net_debug > 5)
+ printk("Reaped %x, Tx status %04x.\n" , lp->tx_reap, tx_status);
+ lp->tx_reap += TX_BUF_SIZE;
+ if (lp->tx_reap > RX_BUF_START - TX_BUF_SIZE)
+ lp->tx_reap = TX_BUF_START;
+ if (++boguscount > 4)
+ break;
+ }
+
+ if (status & 0x4000) { /* Packet received. */
+ if (net_debug > 5)
+ printk("Received packet, rx_head %04x.\n", lp->rx_head);
+ el16_rx(dev);
+ }
+
+ /* Acknowledge the interrupt sources. */
+ ack_cmd = status & 0xf000;
+
+ if ((status & 0x0700) != 0x0200 && dev->start) {
+ if (net_debug)
+ printk("%s: Command unit stopped, status %04x, restarting.\n",
+ dev->name, status);
+ /* If this ever occurs we should really re-write the idle loop, reset
+ the Tx list, and do a complete restart of the command unit.
+ For now we rely on the Tx timeout if the resume doesn't work. */
+ ack_cmd |= CUC_RESUME;
+ }
+
+ if ((status & 0x0070) != 0x0040 && dev->start) {
+ /* The Rx unit is not ready, it must be hung. Restart the receiver by
+ initializing the rx buffers, and issuing an Rx start command. */
+ if (net_debug)
+ printk("%s: Rx unit stopped, status %04x, restarting.\n",
+ dev->name, status);
+ init_rx_bufs(dev);
+ shmem[iSCB_RFA >> 1] = RX_BUF_START;
+ ack_cmd |= RX_START;
+ }
+
+ shmem[iSCB_CMD>>1] = ack_cmd;
+ outb(0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */
+
+ /* Clear the latched interrupt. */
+ outb(0, ioaddr + RESET_IRQ);
+
+ /* Enable the 82586's interrupt input. */
+ outb(0x84, ioaddr + MISC_CTRL);
+
+ return;
+}
+
+static int
+el16_close(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+ ushort *shmem = (short*)dev->mem_start;
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ /* Flush the Tx and disable Rx. */
+ shmem[iSCB_CMD >> 1] = RX_SUSPEND | CUC_SUSPEND;
+ outb(0, ioaddr + SIGNAL_CA);
+
+ /* Disable the 82586's input to the interrupt line. */
+ outb(0x80, ioaddr + MISC_CTRL);
+
+ /* We always physically use the IRQ line, so we don't do free_irq().
+ We do remove ourselves from the map. */
+
+ irq2dev_map[dev->irq] = 0;
+
+ /* Update the statistics here. */
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+/* Get the current statistics. This may be called with the card open or
+ closed. */
+static struct enet_statistics *
+el16_get_stats(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+
+ /* ToDo: decide if there are any useful statistics from the SCB. */
+
+ return &lp->stats;
+}
+
+/* Initialize the Rx-block list. */
+static void
+init_rx_bufs(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ unsigned short *write_ptr;
+ unsigned short SCB_base = SCB_BASE;
+
+ int cur_rxbuf = lp->rx_head = RX_BUF_START;
+
+ /* Initialize each Rx frame + data buffer. */
+ do { /* While there is room for one more. */
+
+ write_ptr = (unsigned short *)(dev->mem_start + cur_rxbuf);
+
+ *write_ptr++ = 0x0000; /* Status */
+ *write_ptr++ = 0x0000; /* Command */
+ *write_ptr++ = cur_rxbuf + RX_BUF_SIZE; /* Link */
+ *write_ptr++ = cur_rxbuf + 22; /* Buffer offset */
+ *write_ptr++ = 0x0000; /* Pad for dest addr. */
+ *write_ptr++ = 0x0000;
+ *write_ptr++ = 0x0000;
+ *write_ptr++ = 0x0000; /* Pad for source addr. */
+ *write_ptr++ = 0x0000;
+ *write_ptr++ = 0x0000;
+ *write_ptr++ = 0x0000; /* Pad for protocol. */
+
+ *write_ptr++ = 0x0000; /* Buffer: Actual count */
+ *write_ptr++ = -1; /* Buffer: Next (none). */
+ *write_ptr++ = cur_rxbuf + 0x20 + SCB_base; /* Buffer: Address low */
+ *write_ptr++ = 0x0000;
+ /* Finally, the number of bytes in the buffer. */
+ *write_ptr++ = 0x8000 + RX_BUF_SIZE-0x20;
+
+ lp->rx_tail = cur_rxbuf;
+ cur_rxbuf += RX_BUF_SIZE;
+ } while (cur_rxbuf <= RX_BUF_END - RX_BUF_SIZE);
+
+ /* Terminate the list by setting the EOL bit, and wrap the pointer to make
+ the list a ring. */
+ write_ptr = (unsigned short *)
+ (dev->mem_start + lp->rx_tail + 2);
+ *write_ptr++ = 0xC000; /* Command, mark as last. */
+ *write_ptr++ = lp->rx_head; /* Link */
+
+}
+
+void
+init_82586_mem(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ short ioaddr = dev->base_addr;
+ ushort *shmem = (short*)dev->mem_start;
+
+ /* Enable loopback to protect the wire while starting up,
+ and hold the 586 in reset during the memory initialization. */
+ outb(0x20, ioaddr + MISC_CTRL);
+
+ /* Fix the ISCP address and base. */
+ init_words[3] = SCB_BASE;
+ init_words[7] = SCB_BASE;
+
+ /* Write the words at 0xfff6 (address-aliased to 0xfffff6). */
+ memcpy((void*)dev->mem_end-10, init_words, 10);
+
+ /* Write the words at 0x0000. */
+ memcpy((char*)dev->mem_start, init_words + 5, sizeof(init_words) - 10);
+
+ /* Fill in the station address. */
+ memcpy((char*)dev->mem_start+SA_OFFSET, dev->dev_addr,
+ sizeof(dev->dev_addr));
+
+ /* The Tx-block list is written as needed. We just set up the values. */
+ lp->tx_cmd_link = IDLELOOP + 4;
+ lp->tx_head = lp->tx_reap = TX_BUF_START;
+
+ init_rx_bufs(dev);
+
+ /* Start the 586 by releasing the reset line, but leave loopback. */
+ outb(0xA0, ioaddr + MISC_CTRL);
+
+ /* This was time consuming to track down: you need to give two channel
+ attention signals to reliably start up the i82586. */
+ outb(0, ioaddr + SIGNAL_CA);
+
+ {
+ int boguscnt = 50;
+ while (shmem[iSCB_STATUS>>1] == 0)
+ if (--boguscnt == 0) {
+ printk("%s: i82586 initialization timed out with status %04x,"
+ "cmd %04x.\n", dev->name,
+ shmem[iSCB_STATUS>>1], shmem[iSCB_CMD>>1]);
+ break;
+ }
+ /* Issue channel-attn -- the 82586 won't start. */
+ outb(0, ioaddr + SIGNAL_CA);
+ }
+
+ /* Disable loopback and enable interrupts. */
+ outb(0x84, ioaddr + MISC_CTRL);
+ if (net_debug > 4)
+ printk("%s: Initialized 82586, status %04x.\n", dev->name,
+ shmem[iSCB_STATUS>>1]);
+ return;
+}
+
+static void
+hardware_send_packet(struct device *dev, void *buf, short length)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ short ioaddr = dev->base_addr;
+ ushort tx_block = lp->tx_head;
+ ushort *write_ptr = (ushort *)(dev->mem_start + tx_block);
+
+ /* Set the write pointer to the Tx block, and put out the header. */
+ *write_ptr++ = 0x0000; /* Tx status */
+ *write_ptr++ = CMD_INTR|CmdTx; /* Tx command */
+ *write_ptr++ = tx_block+16; /* Next command is a NoOp. */
+ *write_ptr++ = tx_block+8; /* Data Buffer offset. */
+
+ /* Output the data buffer descriptor. */
+ *write_ptr++ = length | 0x8000; /* Byte count parameter. */
+ *write_ptr++ = -1; /* No next data buffer. */
+ *write_ptr++ = tx_block+22+SCB_BASE;/* Buffer follows the NoOp command. */
+ *write_ptr++ = 0x0000; /* Buffer address high bits (always zero). */
+
+ /* Output the Loop-back NoOp command. */
+ *write_ptr++ = 0x0000; /* Tx status */
+ *write_ptr++ = CmdNOp; /* Tx command */
+ *write_ptr++ = tx_block+16; /* Next is myself. */
+
+ /* Output the packet at the write pointer. */
+ memcpy(write_ptr, buf, length);
+
+ /* Set the old command link pointing to this send packet. */
+ *(ushort*)(dev->mem_start + lp->tx_cmd_link) = tx_block;
+ lp->tx_cmd_link = tx_block + 20;
+
+ /* Set the next free tx region. */
+ lp->tx_head = tx_block + TX_BUF_SIZE;
+ if (lp->tx_head > RX_BUF_START - TX_BUF_SIZE)
+ lp->tx_head = TX_BUF_START;
+
+ if (net_debug > 4) {
+ printk("%s: 3c507 @%x send length = %d, tx_block %3x, next %3x.\n",
+ dev->name, ioaddr, length, tx_block, lp->tx_head);
+ }
+
+ if (lp->tx_head != lp->tx_reap)
+ dev->tbusy = 0;
+}
+
+static void
+el16_rx(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ short *shmem = (short*)dev->mem_start;
+ ushort rx_head = lp->rx_head;
+ ushort rx_tail = lp->rx_tail;
+ ushort boguscount = 10;
+ short frame_status;
+
+ while ((frame_status = shmem[rx_head>>1]) < 0) { /* Command complete */
+ ushort *read_frame = (short *)(dev->mem_start + rx_head);
+ ushort rfd_cmd = read_frame[1];
+ ushort next_rx_frame = read_frame[2];
+ ushort data_buffer_addr = read_frame[3];
+ ushort *data_frame = (short *)(dev->mem_start + data_buffer_addr);
+ ushort pkt_len = data_frame[0];
+
+ if (rfd_cmd != 0 || data_buffer_addr != rx_head + 22
+ || (pkt_len & 0xC000) != 0xC000) {
+ printk("%s: Rx frame at %#x corrupted, status %04x cmd %04x"
+ "next %04x data-buf @%04x %04x.\n", dev->name, rx_head,
+ frame_status, rfd_cmd, next_rx_frame, data_buffer_addr,
+ pkt_len);
+ } else if ((frame_status & 0x2000) == 0) {
+ /* Frame Rxed, but with error. */
+ lp->stats.rx_errors++;
+ if (frame_status & 0x0800) lp->stats.rx_crc_errors++;
+ if (frame_status & 0x0400) lp->stats.rx_frame_errors++;
+ if (frame_status & 0x0200) lp->stats.rx_fifo_errors++;
+ if (frame_status & 0x0100) lp->stats.rx_over_errors++;
+ if (frame_status & 0x0080) lp->stats.rx_length_errors++;
+ } else {
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+
+ pkt_len &= 0x3fff;
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ break;
+ }
+
+ skb_reserve(skb,2);
+ skb->dev = dev;
+
+ /* 'skb->data' points to the start of sk_buff data area. */
+ memcpy(skb_put(skb,pkt_len), data_frame + 5, pkt_len);
+
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+
+ /* Clear the status word and set End-of-List on the rx frame. */
+ read_frame[0] = 0;
+ read_frame[1] = 0xC000;
+ /* Clear the end-of-list on the prev. RFD. */
+ *(short*)(dev->mem_start + rx_tail + 2) = 0x0000;
+
+ rx_tail = rx_head;
+ rx_head = next_rx_frame;
+ if (--boguscount == 0)
+ break;
+ }
+
+ lp->rx_head = rx_head;
+ lp->rx_tail = rx_tail;
+}
+#ifdef MODULE
+static char devicename[9] = { 0, };
+static struct device dev_3c507 = {
+ devicename, /* device name is inserted by linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, el16_probe
+};
+
+static int io = 0x300;
+static int irq = 0;
+
+int init_module(void)
+{
+ if (io == 0)
+ printk("3c507: You should not use auto-probing with insmod!\n");
+ dev_3c507.base_addr = io;
+ dev_3c507.irq = irq;
+ if (register_netdev(&dev_3c507) != 0) {
+ printk("3c507: register_netdev() returned non-zero.\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ unregister_netdev(&dev_3c507);
+ kfree(dev_3c507.priv);
+ dev_3c507.priv = NULL;
+
+ /* If we don't do this, we can't re-insmod it later. */
+ free_irq(dev_3c507.irq, NULL);
+ release_region(dev_3c507.base_addr, EL16_IO_EXTENT);
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -I/usr/src/linux/drivers/net -Wall -Wstrict-prototypes -O6 -m486 -c 3c507.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * c-indent-level: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/3c509.c b/linux/src/drivers/net/3c509.c
new file mode 100644
index 0000000..727595c
--- /dev/null
+++ b/linux/src/drivers/net/3c509.c
@@ -0,0 +1,842 @@
+/* 3c509.c: A 3c509 EtherLink3 ethernet driver for linux. */
+/*
+ Written 1993-1998 by Donald Becker.
+
+ Copyright 1994-1998 by Donald Becker.
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used and
+ distributed according to the terms of the GNU Public License,
+ incorporated herein by reference.
+
+ This driver is for the 3Com EtherLinkIII series.
+
+ The author may be reached as becker@cesdis.gsfc.nasa.gov or
+ C/O Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ Known limitations:
+ Because of the way 3c509 ISA detection works it's difficult to predict
+ a priori which of several ISA-mode cards will be detected first.
+
+ This driver does not use predictive interrupt mode, resulting in higher
+ packet latency but lower overhead. If interrupts are disabled for an
+ unusually long time it could also result in missed packets, but in
+ practice this rarely happens.
+
+
+ FIXES:
+ Alan Cox: Removed the 'Unexpected interrupt' bug.
+ Michael Meskes: Upgraded to Donald Becker's version 1.07.
+ Alan Cox: Increased the eeprom delay. Regardless of
+ what the docs say some people definitely
+ get problems with lower (but in card spec)
+ delays
+ v1.10 4/21/97 Fixed module code so that multiple cards may be detected,
+ other cleanups. -djb
+ v1.13 9/8/97 Made 'max_interrupt_work' an insmod-settable variable -djb
+ v1.14 10/15/97 Avoided waiting..discard message for fast machines -djb
+ v1.15 1/31/98 Faster recovery for Tx errors. -djb
+ v1.16 2/3/98 Different ID port handling to avoid sound cards. -djb
+*/
+
+static char *version = "3c509.c:1.16 2/3/98 becker@cesdis.gsfc.nasa.gov\n";
+/* A few values that may be tweaked. */
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (400*HZ/1000)
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 10;
+
+#include <linux/module.h>
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/malloc.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/config.h> /* for CONFIG_MCA */
+#include <linux/delay.h> /* for udelay() */
+
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#ifdef EL3_DEBUG
+int el3_debug = EL3_DEBUG;
+#else
+int el3_debug = 2;
+#endif
+
+/* To minimize the size of the driver source I only define operating
+ constants if they are used several times. You'll need the manual
+ anyway if you want to understand driver details. */
+/* Offsets from base I/O address. */
+#define EL3_DATA 0x00
+#define EL3_CMD 0x0e
+#define EL3_STATUS 0x0e
+#define EEPROM_READ 0x80
+
+#define EL3_IO_EXTENT 16
+
+#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
+
+
+/* The top five bits written to EL3_CMD are a command, the lower
+ 11 bits are the parameter, if applicable. */
+enum c509cmd {
+ TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
+ RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, RxDiscard = 8<<11,
+ TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
+ FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
+ SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
+ SetTxThreshold = 18<<11, SetTxStart = 19<<11, StatsEnable = 21<<11,
+ StatsDisable = 22<<11, StopCoax = 23<<11,};
+
+enum c509status {
+ IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004,
+ TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
+ IntReq = 0x0040, StatsFull = 0x0080, CmdBusy = 0x1000, };
+
+/* The SetRxFilter command accepts the following classes: */
+enum RxFilter {
+ RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 };
+
+/* Register window 1 offsets, the window used in normal operation. */
+#define TX_FIFO 0x00
+#define RX_FIFO 0x00
+#define RX_STATUS 0x08
+#define TX_STATUS 0x0B
+#define TX_FREE 0x0C /* Remaining free bytes in Tx buffer. */
+
+#define WN0_IRQ 0x08 /* Window 0: Set IRQ line in bits 12-15. */
+#define WN4_MEDIA 0x0A /* Window 4: Various transcvr/media bits. */
+#define MEDIA_TP 0x00C0 /* Enable link beat and jabber for 10baseT. */
+
+/*
+ * Must be a power of two (we use a binary and in the
+ * circular queue)
+ */
+#define SKB_QUEUE_SIZE 64
+
+struct el3_private {
+ struct enet_statistics stats;
+ struct device *next_dev;
+ /* skb send-queue */
+ int head, size;
+ struct sk_buff *queue[SKB_QUEUE_SIZE];
+};
+static int id_port = 0x110; /* Start with 0x110 to avoid new sound cards.*/
+static struct device *el3_root_dev = NULL;
+
+static ushort id_read_eeprom(int index);
+static ushort read_eeprom(int ioaddr, int index);
+static int el3_open(struct device *dev);
+static int el3_start_xmit(struct sk_buff *skb, struct device *dev);
+static void el3_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void update_stats(int addr, struct device *dev);
+static struct enet_statistics *el3_get_stats(struct device *dev);
+static int el3_rx(struct device *dev);
+static int el3_close(struct device *dev);
+static void set_multicast_list(struct device *dev);
+
+
+
+int el3_probe(struct device *dev)
+{
+ short lrs_state = 0xff, i;
+ int ioaddr, irq, if_port;
+ u16 phys_addr[3];
+ static int current_tag = 0;
+
+ /* First check all slots of the EISA bus. The next slot address to
+ probe is kept in 'eisa_addr' to support multiple probe() calls. */
+ if (EISA_bus) {
+ static int eisa_addr = 0x1000;
+ while (eisa_addr < 0x9000) {
+ ioaddr = eisa_addr;
+ eisa_addr += 0x1000;
+
+ /* Check the standard EISA ID register for an encoded '3Com'. */
+ if (inw(ioaddr + 0xC80) != 0x6d50)
+ continue;
+
+ /* Change the register set to the configuration window 0. */
+ outw(SelectWindow | 0, ioaddr + 0xC80 + EL3_CMD);
+
+ irq = inw(ioaddr + WN0_IRQ) >> 12;
+ if_port = inw(ioaddr + 6)>>14;
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(read_eeprom(ioaddr, i));
+
+ /* Restore the "Product ID" to the EEPROM read register. */
+ read_eeprom(ioaddr, 3);
+
+ /* Was the EISA code an add-on hack? Nahhhhh... */
+ goto found;
+ }
+ }
+
+#ifdef CONFIG_MCA
+ if (MCA_bus) {
+ mca_adaptor_select_mode(1);
+ for (i = 0; i < 8; i++)
+ if ((mca_adaptor_id(i) | 1) == 0x627c) {
+ ioaddr = mca_pos_base_addr(i);
+ irq = inw(ioaddr + WN0_IRQ) >> 12;
+ if_port = inw(ioaddr + 6)>>14;
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(read_eeprom(ioaddr, i));
+
+ mca_adaptor_select_mode(0);
+ goto found;
+ }
+ mca_adaptor_select_mode(0);
+
+ }
+#endif
+
+ /* Reset the ISA PnP mechanism on 3c509b. */
+ outb(0x02, 0x279); /* Select PnP config control register. */
+ outb(0x02, 0xA79); /* Return to WaitForKey state. */
+ /* Select an open I/O location at 0x1*0 to do contention select. */
+ for ( ; id_port < 0x200; id_port += 0x10) {
+ if (check_region(id_port, 1))
+ continue;
+ outb(0x00, id_port);
+ outb(0xff, id_port);
+ if (inb(id_port) & 0x01)
+ break;
+ }
+ if (id_port >= 0x200) { /* GCC optimizes this test out. */
+ /* Rare -- do we really need a warning? */
+ printk(" WARNING: No I/O port available for 3c509 activation.\n");
+ return -ENODEV;
+ }
+ /* Next check for all ISA bus boards by sending the ID sequence to the
+ ID_PORT. We find cards past the first by setting the 'current_tag'
+ on cards as they are found. Cards with their tag set will not
+ respond to subsequent ID sequences. */
+
+ outb(0x00, id_port);
+ outb(0x00, id_port);
+ for(i = 0; i < 255; i++) {
+ outb(lrs_state, id_port);
+ lrs_state <<= 1;
+ lrs_state = lrs_state & 0x100 ? lrs_state ^ 0xcf : lrs_state;
+ }
+
+ /* For the first probe, clear all board's tag registers. */
+ if (current_tag == 0)
+ outb(0xd0, id_port);
+ else /* Otherwise kill off already-found boards. */
+ outb(0xd8, id_port);
+
+ if (id_read_eeprom(7) != 0x6d50) {
+ return -ENODEV;
+ }
+
+ /* Read in EEPROM data, which does contention-select.
+ Only the lowest address board will stay "on-line".
+ 3Com got the byte order backwards. */
+ for (i = 0; i < 3; i++) {
+ phys_addr[i] = htons(id_read_eeprom(i));
+ }
+
+ {
+ unsigned int iobase = id_read_eeprom(8);
+ if_port = iobase >> 14;
+ ioaddr = 0x200 + ((iobase & 0x1f) << 4);
+ }
+ irq = id_read_eeprom(9) >> 12;
+
+ if (dev) { /* Set passed-in IRQ or I/O Addr. */
+ if (dev->irq > 1 && dev->irq < 16)
+ irq = dev->irq;
+
+ if (dev->base_addr) {
+ if (dev->mem_end == 0x3c509 /* Magic key */
+ && dev->base_addr >= 0x200 && dev->base_addr <= 0x3e0)
+ ioaddr = dev->base_addr & 0x3f0;
+ else if (dev->base_addr != ioaddr)
+ return -ENODEV;
+ }
+ }
+
+ /* Set the adaptor tag so that the next card can be found. */
+ outb(0xd0 + ++current_tag, id_port);
+
+ /* Activate the adaptor at the EEPROM location. */
+ outb((ioaddr >> 4) | 0xe0, id_port);
+
+ EL3WINDOW(0);
+ if (inw(ioaddr) != 0x6d50)
+ return -ENODEV;
+
+ /* Free the interrupt so that some other card can use it. */
+ outw(0x0f00, ioaddr + WN0_IRQ);
+ found:
+ if (dev == NULL) {
+ dev = init_etherdev(dev, sizeof(struct el3_private));
+ }
+ memcpy(dev->dev_addr, phys_addr, sizeof(phys_addr));
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ dev->if_port = (dev->mem_start & 0x1f) ? dev->mem_start & 3 : if_port;
+
+ request_region(dev->base_addr, EL3_IO_EXTENT, "3c509");
+
+ {
+ const char *if_names[] = {"10baseT", "AUI", "undefined", "BNC"};
+ printk("%s: 3c509 at %#3.3lx tag %d, %s port, address ",
+ dev->name, dev->base_addr, current_tag, if_names[dev->if_port]);
+ }
+
+ /* Read in the station address. */
+ for (i = 0; i < 6; i++)
+ printk(" %2.2x", dev->dev_addr[i]);
+ printk(", IRQ %d.\n", dev->irq);
+
+ /* Make up a EL3-specific-data structure. */
+ if (dev->priv == NULL)
+ dev->priv = kmalloc(sizeof(struct el3_private), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct el3_private));
+
+ ((struct el3_private *)dev->priv)->next_dev = el3_root_dev;
+ el3_root_dev = dev;
+
+ if (el3_debug > 0)
+ printk("%s", version);
+
+ /* The EL3-specific entries in the device structure. */
+ dev->open = &el3_open;
+ dev->hard_start_xmit = &el3_start_xmit;
+ dev->stop = &el3_close;
+ dev->get_stats = &el3_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ /* Fill in the generic fields of the device structure. */
+ ether_setup(dev);
+ return 0;
+}
+
+/* Read a word from the EEPROM using the regular EEPROM access register.
+ Assume that we are in register window zero.
+ */
+static ushort read_eeprom(int ioaddr, int index)
+{
+ outw(EEPROM_READ + index, ioaddr + 10);
+ /* Pause for at least 162 us. for the read to take place. */
+ udelay (500);
+ return inw(ioaddr + 12);
+}
+
+/* Read a word from the EEPROM when in the ISA ID probe state. */
+static ushort id_read_eeprom(int index)
+{
+ int bit, word = 0;
+
+ /* Issue read command, and pause for at least 162 us. for it to complete.
+ Assume extra-fast 16Mhz bus. */
+ outb(EEPROM_READ + index, id_port);
+
+ /* Pause for at least 162 us. for the read to take place. */
+ udelay (500);
+
+ for (bit = 15; bit >= 0; bit--)
+ word = (word << 1) + (inb(id_port) & 0x01);
+
+ if (el3_debug > 3)
+ printk(" 3c509 EEPROM word %d %#4.4x.\n", index, word);
+
+ return word;
+}
+
+
+
+static int
+el3_open(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+ int i;
+
+ outw(TxReset, ioaddr + EL3_CMD);
+ outw(RxReset, ioaddr + EL3_CMD);
+ outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
+
+ if (request_irq(dev->irq, &el3_interrupt, 0, "3c509", dev)) {
+ return -EAGAIN;
+ }
+
+ EL3WINDOW(0);
+ if (el3_debug > 3)
+ printk("%s: Opening, IRQ %d status@%x %4.4x.\n", dev->name,
+ dev->irq, ioaddr + EL3_STATUS, inw(ioaddr + EL3_STATUS));
+
+ /* Activate board: this is probably unnecessary. */
+ outw(0x0001, ioaddr + 4);
+
+ /* Set the IRQ line. */
+ outw((dev->irq << 12) | 0x0f00, ioaddr + WN0_IRQ);
+
+ /* Set the station address in window 2 each time opened. */
+ EL3WINDOW(2);
+
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + i);
+
+ if (dev->if_port == 3)
+ /* Start the thinnet transceiver. We should really wait 50ms...*/
+ outw(StartCoax, ioaddr + EL3_CMD);
+ else if (dev->if_port == 0) {
+ /* 10baseT interface, enabled link beat and jabber check. */
+ EL3WINDOW(4);
+ outw(inw(ioaddr + WN4_MEDIA) | MEDIA_TP, ioaddr + WN4_MEDIA);
+ }
+
+ /* Switch to the stats window, and clear all stats by reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ EL3WINDOW(6);
+ for (i = 0; i < 9; i++)
+ inb(ioaddr + i);
+ inw(ioaddr + 10);
+ inw(ioaddr + 12);
+
+ /* Switch to register set 1 for normal use. */
+ EL3WINDOW(1);
+
+ /* Accept b-case and phys addr only. */
+ outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
+ outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+
+ dev->interrupt = 0;
+ dev->tbusy = 0;
+ dev->start = 1;
+
+ outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
+ outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
+ /* Allow status bits to be seen. */
+ outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
+ /* Ack all pending events, and set active indicator mask. */
+ outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+ ioaddr + EL3_CMD);
+ outw(SetIntrEnb | IntLatch|TxAvailable|TxComplete|RxComplete|StatsFull,
+ ioaddr + EL3_CMD);
+
+ if (el3_debug > 3)
+ printk("%s: Opened 3c509 IRQ %d status %4.4x.\n",
+ dev->name, dev->irq, inw(ioaddr + EL3_STATUS));
+
+ MOD_INC_USE_COUNT;
+ return 0; /* Always succeed */
+}
+
+static int
+el3_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ /* Transmitter timeout, serious problems. */
+ if (dev->tbusy) {
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < TX_TIMEOUT)
+ return 1;
+ printk("%s: transmit timed out, Tx_status %2.2x status %4.4x "
+ "Tx FIFO room %d.\n",
+ dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS),
+ inw(ioaddr + TX_FREE));
+ lp->stats.tx_errors++;
+ dev->trans_start = jiffies;
+ /* Issue TX_RESET and TX_START commands. */
+ outw(TxReset, ioaddr + EL3_CMD);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ dev->tbusy = 0;
+ }
+
+ if (el3_debug > 4) {
+ printk("%s: el3_start_xmit(length = %ld) called, status %4.4x.\n",
+ dev->name, skb->len, inw(ioaddr + EL3_STATUS));
+ }
+#if 0
+#ifndef final_version
+ { /* Error-checking code, delete someday. */
+ ushort status = inw(ioaddr + EL3_STATUS);
+ if (status & 0x0001 /* IRQ line active, missed one. */
+ && inw(ioaddr + EL3_STATUS) & 1) { /* Make sure. */
+ printk("%s: Missed interrupt, status then %04x now %04x"
+ " Tx %2.2x Rx %4.4x.\n", dev->name, status,
+ inw(ioaddr + EL3_STATUS), inb(ioaddr + TX_STATUS),
+ inw(ioaddr + RX_STATUS));
+ /* Fake interrupt trigger by masking, acknowledge interrupts. */
+ outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
+ outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+ ioaddr + EL3_CMD);
+ outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
+ }
+ }
+#endif
+#endif
+ /* Avoid timer-based retransmission conflicts. */
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ else {
+ /* Put out the doubleword header... */
+ outw(skb->len, ioaddr + TX_FIFO);
+ outw(0x00, ioaddr + TX_FIFO);
+ /* ... and the packet rounded to a doubleword. */
+#ifdef __powerpc__
+ outsl_unswapped(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+#else
+ outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+#endif
+
+ dev->trans_start = jiffies;
+ if (inw(ioaddr + TX_FREE) > 1536) {
+ dev->tbusy = 0;
+ } else
+ /* Interrupt us when the FIFO has room for max-sized packet. */
+ outw(SetTxThreshold + 1536, ioaddr + EL3_CMD);
+ }
+
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ /* Clear the Tx status stack. */
+ {
+ short tx_status;
+ int i = 4;
+
+ while (--i > 0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) {
+ if (tx_status & 0x38) lp->stats.tx_aborted_errors++;
+ if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD);
+ if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD);
+ outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
+ }
+ }
+ return 0;
+}
+
+/* The EL3 interrupt handler. */
+static void
+el3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct device *dev = (struct device *)dev_id;
+ int ioaddr, status;
+ int i = max_interrupt_work;
+
+ if (dev == NULL) {
+ printk ("el3_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ if (dev->interrupt)
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+ dev->interrupt = 1;
+
+ ioaddr = dev->base_addr;
+ status = inw(ioaddr + EL3_STATUS);
+
+ if (el3_debug > 4)
+ printk("%s: interrupt, status %4.4x.\n", dev->name, status);
+
+ while ((status = inw(ioaddr + EL3_STATUS)) &
+ (IntLatch | RxComplete | StatsFull)) {
+
+ if (status & RxComplete)
+ el3_rx(dev);
+
+ if (status & TxAvailable) {
+ if (el3_debug > 5)
+ printk(" TX room bit was handled.\n");
+ /* There's room in the FIFO for a full-sized packet. */
+ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+ if (status & (AdapterFailure | RxEarly | StatsFull | TxComplete)) {
+ /* Handle all uncommon interrupts. */
+ if (status & StatsFull) /* Empty statistics. */
+ update_stats(ioaddr, dev);
+ if (status & RxEarly) { /* Rx early is unused. */
+ el3_rx(dev);
+ outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
+ }
+ if (status & TxComplete) { /* Really Tx error. */
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ short tx_status;
+ int i = 4;
+
+ while (--i>0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) {
+ if (tx_status & 0x38) lp->stats.tx_aborted_errors++;
+ if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD);
+ if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD);
+ outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
+ }
+ }
+ if (status & AdapterFailure) {
+ /* Adapter failure requires Rx reset and reinit. */
+ outw(RxReset, ioaddr + EL3_CMD);
+ /* Set the Rx filter to the current state. */
+ outw(SetRxFilter | RxStation | RxBroadcast
+ | (dev->flags & IFF_ALLMULTI ? RxMulticast : 0)
+ | (dev->flags & IFF_PROMISC ? RxProm : 0),
+ ioaddr + EL3_CMD);
+ outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
+ outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
+ }
+ }
+
+ if (--i < 0) {
+ printk("%s: Infinite loop in interrupt, status %4.4x.\n",
+ dev->name, status);
+ /* Clear all interrupts. */
+ outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
+ break;
+ }
+ /* Acknowledge the IRQ. */
+ outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); /* Ack IRQ */
+ }
+
+ if (el3_debug > 4) {
+ printk("%s: exiting interrupt, status %4.4x.\n", dev->name,
+ inw(ioaddr + EL3_STATUS));
+ }
+
+ dev->interrupt = 0;
+ return;
+}
+
+
+static struct enet_statistics *
+el3_get_stats(struct device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ update_stats(dev->base_addr, dev);
+ restore_flags(flags);
+ return &lp->stats;
+}
+
+/* Update statistics. We change to register window 6, so this should be run
+ single-threaded if the device is active. This is expected to be a rare
+ operation, and it's simpler for the rest of the driver to assume that
+ window 1 is always valid rather than use a special window-state variable.
+ */
+static void update_stats(int ioaddr, struct device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+
+ if (el3_debug > 5)
+ printk(" Updating the statistics.\n");
+ /* Turn off statistics updates while reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ /* Switch to the stats window, and read everything. */
+ EL3WINDOW(6);
+ lp->stats.tx_carrier_errors += inb(ioaddr + 0);
+ lp->stats.tx_heartbeat_errors += inb(ioaddr + 1);
+ /* Multiple collisions. */ inb(ioaddr + 2);
+ lp->stats.collisions += inb(ioaddr + 3);
+ lp->stats.tx_window_errors += inb(ioaddr + 4);
+ lp->stats.rx_fifo_errors += inb(ioaddr + 5);
+ lp->stats.tx_packets += inb(ioaddr + 6);
+ /* Rx packets */ inb(ioaddr + 7);
+ /* Tx deferrals */ inb(ioaddr + 8);
+ inw(ioaddr + 10); /* Total Rx and Tx octets. */
+ inw(ioaddr + 12);
+
+ /* Back to window 1, and turn statistics back on. */
+ EL3WINDOW(1);
+ outw(StatsEnable, ioaddr + EL3_CMD);
+ return;
+}
+
+static int
+el3_rx(struct device *dev)
+{
+ struct el3_private *lp = (struct el3_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ short rx_status;
+
+ if (el3_debug > 5)
+ printk(" In rx_packet(), status %4.4x, rx_status %4.4x.\n",
+ inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
+ while ((rx_status = inw(ioaddr + RX_STATUS)) > 0) {
+ if (rx_status & 0x4000) { /* Error, update stats. */
+ short error = rx_status & 0x3800;
+
+ outw(RxDiscard, ioaddr + EL3_CMD);
+ lp->stats.rx_errors++;
+ switch (error) {
+ case 0x0000: lp->stats.rx_over_errors++; break;
+ case 0x0800: lp->stats.rx_length_errors++; break;
+ case 0x1000: lp->stats.rx_frame_errors++; break;
+ case 0x1800: lp->stats.rx_length_errors++; break;
+ case 0x2000: lp->stats.rx_frame_errors++; break;
+ case 0x2800: lp->stats.rx_crc_errors++; break;
+ }
+ } else {
+ short pkt_len = rx_status & 0x7ff;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len+5);
+ if (el3_debug > 4)
+ printk("Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
+ if (skb != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* Align IP on 16 byte */
+
+ /* 'skb->data' points to the start of sk_buff data area. */
+#ifdef __powerpc__
+ insl_unswapped(ioaddr+RX_FIFO, skb_put(skb,pkt_len),
+ (pkt_len + 3) >> 2);
+#else
+ insl(ioaddr + RX_FIFO, skb_put(skb,pkt_len),
+ (pkt_len + 3) >> 2);
+#endif
+
+ outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
+ skb->protocol = eth_type_trans(skb,dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ continue;
+ }
+ outw(RxDiscard, ioaddr + EL3_CMD);
+ lp->stats.rx_dropped++;
+ if (el3_debug)
+ printk("%s: Couldn't allocate a sk_buff of size %d.\n",
+ dev->name, pkt_len);
+ }
+ inw(ioaddr + EL3_STATUS); /* Delay. */
+ while (inw(ioaddr + EL3_STATUS) & 0x1000)
+ printk(" Waiting for 3c509 to discard packet, status %x.\n",
+ inw(ioaddr + EL3_STATUS) );
+ }
+
+ return 0;
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+static void
+set_multicast_list(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+ if (el3_debug > 1) {
+ static int old = 0;
+ if (old != dev->mc_count) {
+ old = dev->mc_count;
+ printk("%s: Setting Rx mode to %d addresses.\n", dev->name, dev->mc_count);
+ }
+ }
+ if (dev->flags&IFF_PROMISC) {
+ outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm,
+ ioaddr + EL3_CMD);
+ }
+ else if (dev->mc_count || (dev->flags&IFF_ALLMULTI)) {
+ outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast, ioaddr + EL3_CMD);
+ }
+ else
+ outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
+}
+
+static int
+el3_close(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if (el3_debug > 2)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ /* Turn off statistics ASAP. We update lp->stats below. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+
+ /* Disable the receiver and transmitter. */
+ outw(RxDisable, ioaddr + EL3_CMD);
+ outw(TxDisable, ioaddr + EL3_CMD);
+
+ if (dev->if_port == 3)
+ /* Turn off thinnet power. Green! */
+ outw(StopCoax, ioaddr + EL3_CMD);
+ else if (dev->if_port == 0) {
+ /* Disable link beat and jabber, if_port may change ere next open(). */
+ EL3WINDOW(4);
+ outw(inw(ioaddr + WN4_MEDIA) & ~MEDIA_TP, ioaddr + WN4_MEDIA);
+ }
+
+ free_irq(dev->irq, dev);
+ /* Switching back to window 0 disables the IRQ. */
+ EL3WINDOW(0);
+ /* But we explicitly zero the IRQ line select anyway. */
+ outw(0x0f00, ioaddr + WN0_IRQ);
+
+ update_stats(ioaddr, dev);
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+#ifdef MODULE
+/* Parameters that may be passed into the module. */
+static int debug = -1;
+static int irq[] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int xcvr[] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+int
+init_module(void)
+{
+ int el3_cards = 0;
+
+ if (debug >= 0)
+ el3_debug = debug;
+
+ el3_root_dev = NULL;
+ while (el3_probe(0) == 0) {
+ if (irq[el3_cards] > 1)
+ el3_root_dev->irq = irq[el3_cards];
+ if (xcvr[el3_cards] >= 0)
+ el3_root_dev->if_port = xcvr[el3_cards];
+ el3_cards++;
+ }
+
+ return el3_cards ? 0 : -ENODEV;
+}
+
+void
+cleanup_module(void)
+{
+ struct device *next_dev;
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (el3_root_dev) {
+ next_dev = ((struct el3_private *)el3_root_dev->priv)->next_dev;
+ unregister_netdev(el3_root_dev);
+ release_region(el3_root_dev->base_addr, EL3_IO_EXTENT);
+ kfree(el3_root_dev);
+ el3_root_dev = next_dev;
+ }
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c 3c509.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/3c515.c b/linux/src/drivers/net/3c515.c
new file mode 100644
index 0000000..52f4703
--- /dev/null
+++ b/linux/src/drivers/net/3c515.c
@@ -0,0 +1,1501 @@
+/* 3c515.c: A 3Com ISA EtherLink XL "Corkscrew" ethernet driver for linux. */
+/*
+ Written 1997-1998 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ This driver is for the 3Com ISA EtherLink XL "Corkscrew" 3c515 ethercard.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+*/
+
+static char *version = "3c515.c:v0.99 4/7/98 becker@cesdis.gsfc.nasa.gov\n";
+#define CORKSCREW 1
+
+/* "Knobs" that adjust features and parameters. */
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1512 effectively disables this feature. */
+static const int rx_copybreak = 200;
+/* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */
+static const int mtu = 1500;
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Enable the automatic media selection code -- usually set. */
+#define AUTOMEDIA 1
+
+/* Allow the use of fragment bus master transfers instead of only
+ programmed-I/O for Vortex cards. Full-bus-master transfers are always
+ enabled by default on Boomerang cards. If VORTEX_BUS_MASTER is defined,
+ the feature may be turned on using 'options'. */
+#define VORTEX_BUS_MASTER
+
+/* A few values that may be tweaked. */
+/* Keep the ring sizes a power of two for efficiency. */
+#define TX_RING_SIZE 16
+#define RX_RING_SIZE 16
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
+
+#ifdef MODULE
+#ifdef MODVERSIONS
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+#include <linux/version.h>
+#else
+#define MOD_INC_USE_COUNT
+#define MOD_DEC_USE_COUNT
+#endif
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/bios32.h>
+#include <linux/timer.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#if (LINUX_VERSION_CODE >= 0x10344)
+#define NEW_MULTICAST
+#include <linux/delay.h>
+#else
+#define udelay(microsec) do { int _i = 4*microsec; while (--_i > 0) { __SLOW_DOWN_IO; }} while (0)
+#endif
+
+/* Kernel version compatibility functions. */
+#define RUN_AT(x) (jiffies + (x))
+#define DEV_ALLOC_SKB(len) dev_alloc_skb(len + 2)
+
+#define FREE_IRQ(irqnum, dev) free_irq(irqnum, dev)
+#define REQUEST_IRQ(i,h,f,n, instance) request_irq(i,h,f,n, instance)
+#define IRQ(irq, dev_id, pt_regs) (irq, dev_id, pt_regs)
+
+#if (LINUX_VERSION_CODE < 0x20123)
+//#define test_and_set_bit(val, addr) set_bit(val, addr)
+#elif defined(MODULE)
+MODULE_AUTHOR("Donald Becker <becker@cesdis.gsfc.nasa.gov>");
+MODULE_DESCRIPTION("3Com 3c515 Corkscrew driver");
+MODULE_PARM(debug, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(8) "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(8) "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(max_interrupt_work, "i");
+#endif
+
+/* "Knobs" for adjusting internal parameters. */
+/* Put out somewhat more debugging messages. (0 - no msg, 1 minimal msgs). */
+#define DRIVER_DEBUG 1
+/* Some values here only for performance evaluation and path-coverage
+ debugging. */
+static int rx_nocopy = 0, rx_copy = 0, queued_packet = 0;
+
+/* Number of times to check to see if the Tx FIFO has space, used in some
+ limited cases. */
+#define WAIT_TX_AVAIL 200
+
+/* Operational parameter that usually are not changed. */
+#define TX_TIMEOUT 40 /* Time in jiffies before concluding Tx hung */
+
+/* The size here is somewhat misleading: the Corkscrew also uses the ISA
+ aliased registers at <base>+0x400.
+ */
+#define CORKSCREW_TOTAL_SIZE 0x20
+
+#ifdef HAVE_DEVLIST
+struct netdev_entry tc515_drv =
+{"3c515", tc515_probe, CORKSCREW_TOTAL_SIZE, NULL};
+#endif
+
+#ifdef DRIVER_DEBUG
+int vortex_debug = DRIVER_DEBUG;
+#else
+int vortex_debug = 1;
+#endif
+
+#define CORKSCREW_ID 10
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the 3Com 3c515 ISA Fast EtherLink XL,
+3Com's ISA bus adapter for Fast Ethernet. Due to the unique I/O port layout,
+it's not practical to integrate this driver with the other EtherLink drivers.
+
+II. Board-specific settings
+
+The Corkscrew has an EEPROM for configuration, but no special settings are
+needed for Linux.
+
+III. Driver operation
+
+The 3c515 series use an interface that's very similar to the 3c900 "Boomerang"
+PCI cards, with the bus master interface extensively modified to work with
+the ISA bus.
+
+The card is capable of full-bus-master transfers with separate
+lists of transmit and receive descriptors, similar to the AMD LANCE/PCnet,
+DEC Tulip and Intel Speedo3.
+
+This driver uses a "RX_COPYBREAK" scheme rather than a fixed intermediate
+receive buffer. This scheme allocates full-sized skbuffs as receive
+buffers. The value RX_COPYBREAK is used as the copying breakpoint: it is
+chosen to trade-off the memory wasted by passing the full-sized skbuff to
+the queue layer for all frames vs. the copying cost of copying a frame to a
+correctly-sized skbuff.
+
+
+IIIC. Synchronization
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and other software.
+
+IV. Notes
+
+Thanks to Terry Murphy of 3Com for providing documentation and a development
+board.
+
+The names "Vortex", "Boomerang" and "Corkscrew" are the internal 3Com
+project names. I use these names to eliminate confusion -- 3Com product
+numbers and names are very similar and often confused.
+
+The new chips support both ethernet (1.5K) and FDDI (4.5K) frame sizes!
+This driver only supports ethernet frames because of the recent MTU limit
+of 1.5K, but the changes to support 4.5K are minimal.
+*/
+
+/* Operational definitions.
+ These are not used by other compilation units and thus are not
+ exported in a ".h" file.
+
+ First the windows. There are eight register windows, with the command
+ and status registers available in each.
+ */
+#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
+#define EL3_CMD 0x0e
+#define EL3_STATUS 0x0e
+
+/* The top five bits written to EL3_CMD are a command, the lower
+ 11 bits are the parameter, if applicable.
+ Note that 11 parameters bits was fine for ethernet, but the new chips
+ can handle FDDI length frames (~4500 octets) and now parameters count
+ 32-bit 'Dwords' rather than octets. */
+
+enum vortex_cmd {
+ TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
+ RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11,
+ UpStall = 6<<11, UpUnstall = (6<<11)+1,
+ DownStall = (6<<11)+2, DownUnstall = (6<<11)+3,
+ RxDiscard = 8<<11, TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
+ FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
+ SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
+ SetTxThreshold = 18<<11, SetTxStart = 19<<11,
+ StartDMAUp = 20<<11, StartDMADown = (20<<11)+1, StatsEnable = 21<<11,
+ StatsDisable = 22<<11, StopCoax = 23<<11,};
+
+/* The SetRxFilter command accepts the following classes: */
+enum RxFilter {
+ RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 };
+
+/* Bits in the general status register. */
+enum vortex_status {
+ IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004,
+ TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
+ IntReq = 0x0040, StatsFull = 0x0080,
+ DMADone = 1<<8, DownComplete = 1<<9, UpComplete = 1<<10,
+ DMAInProgress = 1<<11, /* DMA controller is still busy.*/
+ CmdInProgress = 1<<12, /* EL3_CMD is still busy.*/
+};
+
+/* Register window 1 offsets, the window used in normal operation.
+ On the Corkscrew this window is always mapped at offsets 0x10-0x1f. */
+enum Window1 {
+ TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14,
+ RxStatus = 0x18, Timer=0x1A, TxStatus = 0x1B,
+ TxFree = 0x1C, /* Remaining free bytes in Tx buffer. */
+};
+enum Window0 {
+ Wn0IRQ = 0x08,
+#if defined(CORKSCREW)
+ Wn0EepromCmd = 0x200A, /* Corkscrew EEPROM command register. */
+ Wn0EepromData = 0x200C, /* Corkscrew EEPROM results register. */
+#else
+ Wn0EepromCmd = 10, /* Window 0: EEPROM command register. */
+ Wn0EepromData = 12, /* Window 0: EEPROM results register. */
+#endif
+};
+enum Win0_EEPROM_bits {
+ EEPROM_Read = 0x80, EEPROM_WRITE = 0x40, EEPROM_ERASE = 0xC0,
+ EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */
+ EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */
+};
+/* EEPROM locations. */
+enum eeprom_offset {
+ PhysAddr01=0, PhysAddr23=1, PhysAddr45=2, ModelID=3,
+ EtherLink3ID=7, };
+
+enum Window3 { /* Window 3: MAC/config bits. */
+ Wn3_Config=0, Wn3_MAC_Ctrl=6, Wn3_Options=8,
+};
+union wn3_config {
+ int i;
+ struct w3_config_fields {
+ unsigned int ram_size:3, ram_width:1, ram_speed:2, rom_size:2;
+ int pad8:8;
+ unsigned int ram_split:2, pad18:2, xcvr:3, pad21:1, autoselect:1;
+ int pad24:7;
+ } u;
+};
+
+enum Window4 {
+ Wn4_NetDiag = 6, Wn4_Media = 10, /* Window 4: Xcvr/media bits. */
+};
+enum Win4_Media_bits {
+ Media_SQE = 0x0008, /* Enable SQE error counting for AUI. */
+ Media_10TP = 0x00C0, /* Enable link beat and jabber for 10baseT. */
+ Media_Lnk = 0x0080, /* Enable just link beat for 100TX/100FX. */
+ Media_LnkBeat = 0x0800,
+};
+enum Window7 { /* Window 7: Bus Master control. */
+ Wn7_MasterAddr = 0, Wn7_MasterLen = 6, Wn7_MasterStatus = 12,
+};
+/* Boomerang-style bus master control registers. Note ISA aliases! */
+enum MasterCtrl {
+ PktStatus = 0x400, DownListPtr = 0x404, FragAddr = 0x408, FragLen = 0x40c,
+ TxFreeThreshold = 0x40f, UpPktStatus = 0x410, UpListPtr = 0x418,
+};
+
+/* The Rx and Tx descriptor lists.
+ Caution Alpha hackers: these types are 32 bits! Note also the 8 byte
+ alignment contraint on tx_ring[] and rx_ring[]. */
+struct boom_rx_desc {
+ u32 next;
+ s32 status;
+ u32 addr;
+ s32 length;
+};
+/* Values for the Rx status entry. */
+enum rx_desc_status {
+ RxDComplete=0x00008000, RxDError=0x4000,
+ /* See boomerang_rx() for actual error bits */
+};
+
+struct boom_tx_desc {
+ u32 next;
+ s32 status;
+ u32 addr;
+ s32 length;
+};
+
+struct vortex_private {
+ char devname[8]; /* "ethN" string, also for kernel debug. */
+ const char *product_name;
+ struct device *next_module;
+ /* The Rx and Tx rings are here to keep them quad-word-aligned. */
+ struct boom_rx_desc rx_ring[RX_RING_SIZE];
+ struct boom_tx_desc tx_ring[TX_RING_SIZE];
+ /* The addresses of transmit- and receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ unsigned int cur_rx, cur_tx; /* The next free ring entry */
+ unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
+ struct enet_statistics stats;
+ struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */
+ struct timer_list timer; /* Media selection timer. */
+ int capabilities; /* Adapter capabilities word. */
+ int options; /* User-settable misc. driver options. */
+ int last_rx_packets; /* For media autoselection. */
+ unsigned int available_media:8, /* From Wn3_Options */
+ media_override:3, /* Passed-in media type. */
+ default_media:3, /* Read from the EEPROM. */
+ full_duplex:1, autoselect:1,
+ bus_master:1, /* Vortex can only do a fragment bus-m. */
+ full_bus_master_tx:1, full_bus_master_rx:1, /* Boomerang */
+ tx_full:1;
+};
+
+/* The action to take with a media selection timer tick.
+ Note that we deviate from the 3Com order by checking 10base2 before AUI.
+ */
+enum xcvr_types {
+ XCVR_10baseT=0, XCVR_AUI, XCVR_10baseTOnly, XCVR_10base2, XCVR_100baseTx,
+ XCVR_100baseFx, XCVR_MII=6, XCVR_Default=8,
+};
+
+static struct media_table {
+ char *name;
+ unsigned int media_bits:16, /* Bits to set in Wn4_Media register. */
+ mask:8, /* The transceiver-present bit in Wn3_Config.*/
+ next:8; /* The media type to try next. */
+ short wait; /* Time before we check media status. */
+} media_tbl[] = {
+ { "10baseT", Media_10TP,0x08, XCVR_10base2, (14*HZ)/10},
+ { "10Mbs AUI", Media_SQE, 0x20, XCVR_Default, (1*HZ)/10},
+ { "undefined", 0, 0x80, XCVR_10baseT, 10000},
+ { "10base2", 0, 0x10, XCVR_AUI, (1*HZ)/10},
+ { "100baseTX", Media_Lnk, 0x02, XCVR_100baseFx, (14*HZ)/10},
+ { "100baseFX", Media_Lnk, 0x04, XCVR_MII, (14*HZ)/10},
+ { "MII", 0, 0x40, XCVR_10baseT, 3*HZ },
+ { "undefined", 0, 0x01, XCVR_10baseT, 10000},
+ { "Default", 0, 0xFF, XCVR_10baseT, 10000},
+};
+
+static int vortex_scan(struct device *dev);
+static struct device *vortex_found_device(struct device *dev, int ioaddr,
+ int irq, int product_index,
+ int options);
+static int vortex_probe1(struct device *dev);
+static int vortex_open(struct device *dev);
+static void vortex_timer(unsigned long arg);
+static int vortex_start_xmit(struct sk_buff *skb, struct device *dev);
+static int vortex_rx(struct device *dev);
+static int boomerang_rx(struct device *dev);
+static void vortex_interrupt IRQ(int irq, void *dev_id, struct pt_regs *regs);
+static int vortex_close(struct device *dev);
+static void update_stats(int addr, struct device *dev);
+static struct enet_statistics *vortex_get_stats(struct device *dev);
+static void set_rx_mode(struct device *dev);
+
+
+/* Unlike the other PCI cards the 59x cards don't need a large contiguous
+ memory region, so making the driver a loadable module is feasible.
+
+ Unfortunately maximizing the shared code between the integrated and
+ module version of the driver results in a complicated set of initialization
+ procedures.
+ init_module() -- modules / tc59x_init() -- built-in
+ The wrappers for vortex_scan()
+ vortex_scan() The common routine that scans for PCI and EISA cards
+ vortex_found_device() Allocate a device structure when we find a card.
+ Different versions exist for modules and built-in.
+ vortex_probe1() Fill in the device structure -- this is separated
+ so that the modules code can put it in dev->init.
+*/
+/* This driver uses 'options' to pass the media type, full-duplex flag, etc. */
+/* Note: this is the only limit on the number of cards supported!! */
+static int options[8] = { -1, -1, -1, -1, -1, -1, -1, -1,};
+
+#ifdef MODULE
+static int debug = -1;
+/* A list of all installed Vortex devices, for removing the driver module. */
+static struct device *root_vortex_dev = NULL;
+
+int
+init_module(void)
+{
+ int cards_found;
+
+ if (debug >= 0)
+ vortex_debug = debug;
+ if (vortex_debug)
+ printk("%s", version);
+
+ root_vortex_dev = NULL;
+ cards_found = vortex_scan(0);
+ return cards_found ? 0 : -ENODEV;
+}
+
+#else
+int tc515_probe(struct device *dev)
+{
+ int cards_found = 0;
+
+ cards_found = vortex_scan(dev);
+
+ if (vortex_debug > 0 && cards_found)
+ printk("%s", version);
+
+ return cards_found ? 0 : -ENODEV;
+}
+#endif /* not MODULE */
+
+static int vortex_scan(struct device *dev)
+{
+ int cards_found = 0;
+ static int ioaddr = 0x100;
+
+ /* Check all locations on the ISA bus -- evil! */
+ for (; ioaddr < 0x400; ioaddr += 0x20) {
+ int irq;
+ if (check_region(ioaddr, CORKSCREW_TOTAL_SIZE))
+ continue;
+ /* Check the resource configuration for a matching ioaddr. */
+ if ((inw(ioaddr + 0x2002) & 0x1f0) != (ioaddr & 0x1f0))
+ continue;
+ /* Verify by reading the device ID from the EEPROM. */
+ {
+ int timer;
+ outw(EEPROM_Read + 7, ioaddr + Wn0EepromCmd);
+ /* Pause for at least 162 us. for the read to take place. */
+ for (timer = 4; timer >= 0; timer--) {
+ udelay(162);
+ if ((inw(ioaddr + Wn0EepromCmd) & 0x0200) == 0)
+ break;
+ }
+ if (inw(ioaddr + Wn0EepromData) != 0x6d50)
+ continue;
+ }
+ printk("3c515 Resource configuraiton register %#4.4x, DCR %4.4x.\n",
+ inl(ioaddr + 0x2002), inw(ioaddr + 0x2000));
+ irq = inw(ioaddr + 0x2002) & 15;
+ vortex_found_device(dev, ioaddr, irq, CORKSCREW_ID, dev && dev->mem_start
+ ? dev->mem_start : options[cards_found]);
+ dev = 0;
+ cards_found++;
+ }
+
+ if (vortex_debug)
+ printk("%d 3c515 cards found.\n", cards_found);
+ return cards_found;
+}
+
+static struct device *vortex_found_device(struct device *dev, int ioaddr,
+ int irq, int product_index,
+ int options)
+{
+ struct vortex_private *vp;
+
+#ifdef MODULE
+ /* Allocate and fill new device structure. */
+ int dev_size = sizeof(struct device) +
+ sizeof(struct vortex_private) + 15; /* Pad for alignment */
+
+ dev = (struct device *) kmalloc(dev_size, GFP_KERNEL);
+ memset(dev, 0, dev_size);
+ /* Align the Rx and Tx ring entries. */
+ dev->priv = (void *)(((long)dev + sizeof(struct device) + 15) & ~15);
+ vp = (struct vortex_private *)dev->priv;
+ dev->name = vp->devname; /* An empty string. */
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ dev->dma = (product_index == CORKSCREW_ID ? inw(ioaddr + 0x2000) & 7 : 0);
+ dev->init = vortex_probe1;
+ vp->product_name = "3c515";
+ vp->options = options;
+ if (options >= 0) {
+ vp->media_override = ((options & 7) == 2) ? 0 : options & 7;
+ vp->full_duplex = (options & 8) ? 1 : 0;
+ vp->bus_master = (options & 16) ? 1 : 0;
+ } else {
+ vp->media_override = 7;
+ vp->full_duplex = 0;
+ vp->bus_master = 0;
+ }
+ ether_setup(dev);
+ vp->next_module = root_vortex_dev;
+ root_vortex_dev = dev;
+ if (register_netdev(dev) != 0)
+ return 0;
+#else /* not a MODULE */
+ if (dev) {
+ /* Caution: quad-word alignment required for rings! */
+ dev->priv = kmalloc(sizeof (struct vortex_private), GFP_KERNEL);
+ memset(dev->priv, 0, sizeof (struct vortex_private));
+ }
+ dev = init_etherdev(dev, sizeof(struct vortex_private));
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ dev->dma = (product_index == CORKSCREW_ID ? inw(ioaddr + 0x2000) & 7 : 0);
+ vp = (struct vortex_private *)dev->priv;
+ vp->product_name = "3c515";
+ vp->options = options;
+ if (options >= 0) {
+ vp->media_override = ((options & 7) == 2) ? 0 : options & 7;
+ vp->full_duplex = (options & 8) ? 1 : 0;
+ vp->bus_master = (options & 16) ? 1 : 0;
+ } else {
+ vp->media_override = 7;
+ vp->full_duplex = 0;
+ vp->bus_master = 0;
+ }
+
+ vortex_probe1(dev);
+#endif /* MODULE */
+ return dev;
+}
+
+static int vortex_probe1(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */
+ int i;
+
+ printk("%s: 3Com %s at %#3x,", dev->name,
+ vp->product_name, ioaddr);
+
+ /* Read the station address from the EEPROM. */
+ EL3WINDOW(0);
+ for (i = 0; i < 0x18; i++) {
+ short *phys_addr = (short *)dev->dev_addr;
+ int timer;
+ outw(EEPROM_Read + i, ioaddr + Wn0EepromCmd);
+ /* Pause for at least 162 us. for the read to take place. */
+ for (timer = 4; timer >= 0; timer--) {
+ udelay(162);
+ if ((inw(ioaddr + Wn0EepromCmd) & 0x0200) == 0)
+ break;
+ }
+ eeprom[i] = inw(ioaddr + Wn0EepromData);
+ checksum ^= eeprom[i];
+ if (i < 3)
+ phys_addr[i] = htons(eeprom[i]);
+ }
+ checksum = (checksum ^ (checksum >> 8)) & 0xff;
+ if (checksum != 0x00)
+ printk(" ***INVALID CHECKSUM %4.4x*** ", checksum);
+ for (i = 0; i < 6; i++)
+ printk("%c%2.2x", i ? ':' : ' ', dev->dev_addr[i]);
+ if (eeprom[16] == 0x11c7) { /* Corkscrew */
+ if (request_dma(dev->dma, "3c515")) {
+ printk(", DMA %d allocation failed", dev->dma);
+ dev->dma = 0;
+ } else
+ printk(", DMA %d", dev->dma);
+ }
+ printk(", IRQ %d\n", dev->irq);
+ /* Tell them about an invalid IRQ. */
+ if (vortex_debug && (dev->irq <= 0 || dev->irq > 15))
+ printk(" *** Warning: this IRQ is unlikely to work! ***\n");
+
+ {
+ char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
+ union wn3_config config;
+ EL3WINDOW(3);
+ vp->available_media = inw(ioaddr + Wn3_Options);
+ config.i = inl(ioaddr + Wn3_Config);
+ if (vortex_debug > 1)
+ printk(" Internal config register is %4.4x, transceivers %#x.\n",
+ config.i, inw(ioaddr + Wn3_Options));
+ printk(" %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n",
+ 8 << config.u.ram_size,
+ config.u.ram_width ? "word" : "byte",
+ ram_split[config.u.ram_split],
+ config.u.autoselect ? "autoselect/" : "",
+ media_tbl[config.u.xcvr].name);
+ dev->if_port = config.u.xcvr;
+ vp->default_media = config.u.xcvr;
+ vp->autoselect = config.u.autoselect;
+ }
+ if (vp->media_override != 7) {
+ printk(" Media override to transceiver type %d (%s).\n",
+ vp->media_override, media_tbl[vp->media_override].name);
+ dev->if_port = vp->media_override;
+ }
+
+ vp->capabilities = eeprom[16];
+ vp->full_bus_master_tx = (vp->capabilities & 0x20) ? 1 : 0;
+ /* Rx is broken at 10mbps, so we always disable it. */
+ /* vp->full_bus_master_rx = 0;*/
+ vp->full_bus_master_rx = (vp->capabilities & 0x20) ? 1 : 0;
+
+ /* We do a request_region() to register /proc/ioports info. */
+ request_region(ioaddr, CORKSCREW_TOTAL_SIZE, vp->product_name);
+
+ /* The 3c59x-specific entries in the device structure. */
+ dev->open = &vortex_open;
+ dev->hard_start_xmit = &vortex_start_xmit;
+ dev->stop = &vortex_close;
+ dev->get_stats = &vortex_get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+
+ return 0;
+}
+
+
+static int
+vortex_open(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ union wn3_config config;
+ int i;
+
+ /* Before initializing select the active media port. */
+ EL3WINDOW(3);
+ if (vp->full_duplex)
+ outb(0x20, ioaddr + Wn3_MAC_Ctrl); /* Set the full-duplex bit. */
+ config.i = inl(ioaddr + Wn3_Config);
+
+ if (vp->media_override != 7) {
+ if (vortex_debug > 1)
+ printk("%s: Media override to transceiver %d (%s).\n",
+ dev->name, vp->media_override,
+ media_tbl[vp->media_override].name);
+ dev->if_port = vp->media_override;
+ } else if (vp->autoselect) {
+ /* Find first available media type, starting with 100baseTx. */
+ dev->if_port = 4;
+ while (! (vp->available_media & media_tbl[dev->if_port].mask))
+ dev->if_port = media_tbl[dev->if_port].next;
+
+ if (vortex_debug > 1)
+ printk("%s: Initial media type %s.\n",
+ dev->name, media_tbl[dev->if_port].name);
+
+ init_timer(&vp->timer);
+ vp->timer.expires = RUN_AT(media_tbl[dev->if_port].wait);
+ vp->timer.data = (unsigned long)dev;
+ vp->timer.function = &vortex_timer; /* timer handler */
+ add_timer(&vp->timer);
+ } else
+ dev->if_port = vp->default_media;
+
+ config.u.xcvr = dev->if_port;
+ outl(config.i, ioaddr + Wn3_Config);
+
+ if (vortex_debug > 1) {
+ printk("%s: vortex_open() InternalConfig %8.8x.\n",
+ dev->name, config.i);
+ }
+
+ outw(TxReset, ioaddr + EL3_CMD);
+ for (i = 20; i >= 0 ; i--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+
+ outw(RxReset, ioaddr + EL3_CMD);
+ /* Wait a few ticks for the RxReset command to complete. */
+ for (i = 20; i >= 0 ; i--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+
+ outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
+
+ /* Use the now-standard shared IRQ implementation. */
+ if (vp->capabilities == 0x11c7) {
+ /* Corkscrew: Cannot share ISA resources. */
+ if (dev->irq == 0
+ || dev->dma == 0
+ || request_irq(dev->irq, &vortex_interrupt, 0,
+ vp->product_name, dev))
+ return -EAGAIN;
+ enable_dma(dev->dma);
+ set_dma_mode(dev->dma, DMA_MODE_CASCADE);
+ } else if (request_irq(dev->irq, &vortex_interrupt, SA_SHIRQ,
+ vp->product_name, dev)) {
+ return -EAGAIN;
+ }
+
+ if (vortex_debug > 1) {
+ EL3WINDOW(4);
+ printk("%s: vortex_open() irq %d media status %4.4x.\n",
+ dev->name, dev->irq, inw(ioaddr + Wn4_Media));
+ }
+
+ /* Set the station address and mask in window 2 each time opened. */
+ EL3WINDOW(2);
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + i);
+ for (; i < 12; i+=2)
+ outw(0, ioaddr + i);
+
+ if (dev->if_port == 3)
+ /* Start the thinnet transceiver. We should really wait 50ms...*/
+ outw(StartCoax, ioaddr + EL3_CMD);
+ EL3WINDOW(4);
+ outw((inw(ioaddr + Wn4_Media) & ~(Media_10TP|Media_SQE)) |
+ media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media);
+
+ /* Switch to the stats window, and clear all stats by reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ EL3WINDOW(6);
+ for (i = 0; i < 10; i++)
+ inb(ioaddr + i);
+ inw(ioaddr + 10);
+ inw(ioaddr + 12);
+ /* New: On the Vortex we must also clear the BadSSD counter. */
+ EL3WINDOW(4);
+ inb(ioaddr + 12);
+ /* ..and on the Boomerang we enable the extra statistics bits. */
+ outw(0x0040, ioaddr + Wn4_NetDiag);
+
+ /* Switch to register set 7 for normal use. */
+ EL3WINDOW(7);
+
+ if (vp->full_bus_master_rx) { /* Boomerang bus master. */
+ vp->cur_rx = vp->dirty_rx = 0;
+ if (vortex_debug > 2)
+ printk("%s: Filling in the Rx ring.\n", dev->name);
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb;
+ if (i < (RX_RING_SIZE - 1))
+ vp->rx_ring[i].next = virt_to_bus(&vp->rx_ring[i+1]);
+ else
+ vp->rx_ring[i].next = 0;
+ vp->rx_ring[i].status = 0; /* Clear complete bit. */
+ vp->rx_ring[i].length = PKT_BUF_SZ | 0x80000000;
+ skb = dev_alloc_skb(PKT_BUF_SZ);
+ vp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break; /* Bad news! */
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ vp->rx_ring[i].addr = virt_to_bus(skb->tail);
+ }
+ vp->rx_ring[i-1].next = virt_to_bus(&vp->rx_ring[0]); /* Wrap the ring. */
+ outl(virt_to_bus(&vp->rx_ring[0]), ioaddr + UpListPtr);
+ }
+ if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */
+ vp->cur_tx = vp->dirty_tx = 0;
+ outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); /* Room for a packet. */
+ /* Clear the Tx ring. */
+ for (i = 0; i < TX_RING_SIZE; i++)
+ vp->tx_skbuff[i] = 0;
+ outl(0, ioaddr + DownListPtr);
+ }
+ /* Set reciever mode: presumably accept b-case and phys addr only. */
+ set_rx_mode(dev);
+ outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+ outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
+ outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
+ /* Allow status bits to be seen. */
+ outw(SetStatusEnb | AdapterFailure|IntReq|StatsFull |
+ (vp->full_bus_master_tx ? DownComplete : TxAvailable) |
+ (vp->full_bus_master_rx ? UpComplete : RxComplete) |
+ (vp->bus_master ? DMADone : 0),
+ ioaddr + EL3_CMD);
+ /* Ack all pending events, and set active indicator mask. */
+ outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+ ioaddr + EL3_CMD);
+ outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull
+ | (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete,
+ ioaddr + EL3_CMD);
+
+ MOD_INC_USE_COUNT;
+
+ return 0;
+}
+
+static void vortex_timer(unsigned long data)
+{
+#ifdef AUTOMEDIA
+ struct device *dev = (struct device *)data;
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ unsigned long flags;
+ int ok = 0;
+
+ if (vortex_debug > 1)
+ printk("%s: Media selection timer tick happened, %s.\n",
+ dev->name, media_tbl[dev->if_port].name);
+
+ save_flags(flags); cli(); {
+ int old_window = inw(ioaddr + EL3_CMD) >> 13;
+ int media_status;
+ EL3WINDOW(4);
+ media_status = inw(ioaddr + Wn4_Media);
+ switch (dev->if_port) {
+ case 0: case 4: case 5: /* 10baseT, 100baseTX, 100baseFX */
+ if (media_status & Media_LnkBeat) {
+ ok = 1;
+ if (vortex_debug > 1)
+ printk("%s: Media %s has link beat, %x.\n",
+ dev->name, media_tbl[dev->if_port].name, media_status);
+ } else if (vortex_debug > 1)
+ printk("%s: Media %s is has no link beat, %x.\n",
+ dev->name, media_tbl[dev->if_port].name, media_status);
+
+ break;
+ default: /* Other media types handled by Tx timeouts. */
+ if (vortex_debug > 1)
+ printk("%s: Media %s is has no indication, %x.\n",
+ dev->name, media_tbl[dev->if_port].name, media_status);
+ ok = 1;
+ }
+ if ( ! ok) {
+ union wn3_config config;
+
+ do {
+ dev->if_port = media_tbl[dev->if_port].next;
+ } while ( ! (vp->available_media & media_tbl[dev->if_port].mask));
+ if (dev->if_port == 8) { /* Go back to default. */
+ dev->if_port = vp->default_media;
+ if (vortex_debug > 1)
+ printk("%s: Media selection failing, using default %s port.\n",
+ dev->name, media_tbl[dev->if_port].name);
+ } else {
+ if (vortex_debug > 1)
+ printk("%s: Media selection failed, now trying %s port.\n",
+ dev->name, media_tbl[dev->if_port].name);
+ vp->timer.expires = RUN_AT(media_tbl[dev->if_port].wait);
+ add_timer(&vp->timer);
+ }
+ outw((media_status & ~(Media_10TP|Media_SQE)) |
+ media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media);
+
+ EL3WINDOW(3);
+ config.i = inl(ioaddr + Wn3_Config);
+ config.u.xcvr = dev->if_port;
+ outl(config.i, ioaddr + Wn3_Config);
+
+ outw(dev->if_port == 3 ? StartCoax : StopCoax, ioaddr + EL3_CMD);
+ }
+ EL3WINDOW(old_window);
+ } restore_flags(flags);
+ if (vortex_debug > 1)
+ printk("%s: Media selection timer finished, %s.\n",
+ dev->name, media_tbl[dev->if_port].name);
+
+#endif /* AUTOMEDIA*/
+ return;
+}
+
+static int
+vortex_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ if (dev->tbusy) {
+ int tickssofar = jiffies - dev->trans_start;
+ int i;
+
+ /* Min. wait before assuming a Tx failed == 400ms. */
+
+ if (tickssofar < 400*HZ/1000) /* We probably aren't empty. */
+ return 1;
+ printk("%s: transmit timed out, tx_status %2.2x status %4.4x.\n",
+ dev->name, inb(ioaddr + TxStatus),
+ inw(ioaddr + EL3_STATUS));
+ /* Slight code bloat to be user friendly. */
+ if ((inb(ioaddr + TxStatus) & 0x88) == 0x88)
+ printk("%s: Transmitter encountered 16 collisions -- network"
+ " network cable problem?\n", dev->name);
+#ifndef final_version
+ printk(" Flags; bus-master %d, full %d; dirty %d current %d.\n",
+ vp->full_bus_master_tx, vp->tx_full, vp->dirty_tx, vp->cur_tx);
+ printk(" Down list %8.8x vs. %p.\n", inl(ioaddr + DownListPtr),
+ &vp->tx_ring[0]);
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ printk(" %d: %p length %8.8x status %8.8x\n", i,
+ &vp->tx_ring[i],
+ vp->tx_ring[i].length,
+ vp->tx_ring[i].status);
+ }
+#endif
+ /* Issue TX_RESET and TX_START commands. */
+ outw(TxReset, ioaddr + EL3_CMD);
+ for (i = 20; i >= 0 ; i--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ outw(TxEnable, ioaddr + EL3_CMD);
+ dev->trans_start = jiffies;
+ /* dev->tbusy = 0;*/
+ vp->stats.tx_errors++;
+ vp->stats.tx_dropped++;
+ return 0; /* Yes, silently *drop* the packet! */
+ }
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well.
+ If this ever occurs the queue layer is doing something evil! */
+ if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ return 1;
+ }
+
+ if (vp->full_bus_master_tx) { /* BOOMERANG bus-master */
+ /* Calculate the next Tx descriptor entry. */
+ int entry = vp->cur_tx % TX_RING_SIZE;
+ struct boom_tx_desc *prev_entry;
+ unsigned long flags, i;
+
+ if (vp->tx_full) /* No room to transmit with */
+ return 1;
+ if (vp->cur_tx != 0)
+ prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
+ else
+ prev_entry = NULL;
+ if (vortex_debug > 3)
+ printk("%s: Trying to send a packet, Tx index %d.\n",
+ dev->name, vp->cur_tx);
+ /* vp->tx_full = 1; */
+ vp->tx_skbuff[entry] = skb;
+ vp->tx_ring[entry].next = 0;
+ vp->tx_ring[entry].addr = virt_to_bus(skb->data);
+ vp->tx_ring[entry].length = skb->len | 0x80000000;
+ vp->tx_ring[entry].status = skb->len | 0x80000000;
+
+ save_flags(flags);
+ cli();
+ outw(DownStall, ioaddr + EL3_CMD);
+ /* Wait for the stall to complete. */
+ for (i = 20; i >= 0 ; i--)
+ if ( (inw(ioaddr + EL3_STATUS) & CmdInProgress) == 0)
+ break;
+ if (prev_entry)
+ prev_entry->next = virt_to_bus(&vp->tx_ring[entry]);
+ if (inl(ioaddr + DownListPtr) == 0) {
+ outl(virt_to_bus(&vp->tx_ring[entry]), ioaddr + DownListPtr);
+ queued_packet++;
+ }
+ outw(DownUnstall, ioaddr + EL3_CMD);
+ restore_flags(flags);
+
+ vp->cur_tx++;
+ if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1)
+ vp->tx_full = 1;
+ else { /* Clear previous interrupt enable. */
+ if (prev_entry)
+ prev_entry->status &= ~0x80000000;
+ dev->tbusy = 0;
+ }
+ dev->trans_start = jiffies;
+ return 0;
+ }
+ /* Put out the doubleword header... */
+ outl(skb->len, ioaddr + TX_FIFO);
+#ifdef VORTEX_BUS_MASTER
+ if (vp->bus_master) {
+ /* Set the bus-master controller to transfer the packet. */
+ outl((int)(skb->data), ioaddr + Wn7_MasterAddr);
+ outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
+ vp->tx_skb = skb;
+ outw(StartDMADown, ioaddr + EL3_CMD);
+ /* dev->tbusy will be cleared at the DMADone interrupt. */
+ } else {
+ /* ... and the packet rounded to a doubleword. */
+ outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+ dev_kfree_skb (skb, FREE_WRITE);
+ if (inw(ioaddr + TxFree) > 1536) {
+ dev->tbusy = 0;
+ } else
+ /* Interrupt us when the FIFO has room for max-sized packet. */
+ outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
+ }
+#else
+ /* ... and the packet rounded to a doubleword. */
+ outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+ dev_kfree_skb (skb, FREE_WRITE);
+ if (inw(ioaddr + TxFree) > 1536) {
+ dev->tbusy = 0;
+ } else
+ /* Interrupt us when the FIFO has room for max-sized packet. */
+ outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
+#endif /* bus master */
+
+ dev->trans_start = jiffies;
+
+ /* Clear the Tx status stack. */
+ {
+ short tx_status;
+ int i = 4;
+
+ while (--i > 0 && (tx_status = inb(ioaddr + TxStatus)) > 0) {
+ if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */
+ if (vortex_debug > 2)
+ printk("%s: Tx error, status %2.2x.\n",
+ dev->name, tx_status);
+ if (tx_status & 0x04) vp->stats.tx_fifo_errors++;
+ if (tx_status & 0x38) vp->stats.tx_aborted_errors++;
+ if (tx_status & 0x30) {
+ int j;
+ outw(TxReset, ioaddr + EL3_CMD);
+ for (j = 20; j >= 0 ; j--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ }
+ outw(TxEnable, ioaddr + EL3_CMD);
+ }
+ outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */
+ }
+ }
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void vortex_interrupt IRQ(int irq, void *dev_id, struct pt_regs *regs)
+{
+ /* Use the now-standard shared IRQ implementation. */
+ struct device *dev = dev_id;
+ struct vortex_private *lp;
+ int ioaddr, status;
+ int latency;
+ int i = max_interrupt_work;
+
+ if (test_and_set_bit(0, (void*)&dev->interrupt)) {
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+ return;
+ }
+
+ ioaddr = dev->base_addr;
+ latency = inb(ioaddr + Timer);
+ lp = (struct vortex_private *)dev->priv;
+
+ status = inw(ioaddr + EL3_STATUS);
+
+ if (vortex_debug > 4)
+ printk("%s: interrupt, status %4.4x, timer %d.\n", dev->name,
+ status, latency);
+ if ((status & 0xE000) != 0xE000) {
+ static int donedidthis=0;
+ /* Some interrupt controllers store a bogus interrupt from boot-time.
+ Ignore a single early interrupt, but don't hang the machine for
+ other interrupt problems. */
+ if (donedidthis++ > 100) {
+ printk("%s: Bogus interrupt, bailing. Status %4.4x, start=%d.\n",
+ dev->name, status, dev->start);
+ FREE_IRQ(dev->irq, dev);
+ }
+ }
+
+ do {
+ if (vortex_debug > 5)
+ printk("%s: In interrupt loop, status %4.4x.\n",
+ dev->name, status);
+ if (status & RxComplete)
+ vortex_rx(dev);
+
+ if (status & TxAvailable) {
+ if (vortex_debug > 5)
+ printk(" TX room bit was handled.\n");
+ /* There's room in the FIFO for a full-sized packet. */
+ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+ if (status & DownComplete) {
+ unsigned int dirty_tx = lp->dirty_tx;
+
+ while (lp->cur_tx - dirty_tx > 0) {
+ int entry = dirty_tx % TX_RING_SIZE;
+ if (inl(ioaddr + DownListPtr) ==
+ virt_to_bus(&lp->tx_ring[entry]))
+ break; /* It still hasn't been processed. */
+ if (lp->tx_skbuff[entry]) {
+ dev_kfree_skb(lp->tx_skbuff[entry], FREE_WRITE);
+ lp->tx_skbuff[entry] = 0;
+ }
+ dirty_tx++;
+ }
+ lp->dirty_tx = dirty_tx;
+ outw(AckIntr | DownComplete, ioaddr + EL3_CMD);
+ if (lp->tx_full && (lp->cur_tx - dirty_tx <= TX_RING_SIZE - 1)) {
+ lp->tx_full= 0;
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+ }
+#ifdef VORTEX_BUS_MASTER
+ if (status & DMADone) {
+ outw(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
+ dev->tbusy = 0;
+ dev_kfree_skb (lp->tx_skb, FREE_WRITE); /* Release the transfered buffer */
+ mark_bh(NET_BH);
+ }
+#endif
+ if (status & UpComplete) {
+ boomerang_rx(dev);
+ outw(AckIntr | UpComplete, ioaddr + EL3_CMD);
+ }
+ if (status & (AdapterFailure | RxEarly | StatsFull)) {
+ /* Handle all uncommon interrupts at once. */
+ if (status & RxEarly) { /* Rx early is unused. */
+ vortex_rx(dev);
+ outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
+ }
+ if (status & StatsFull) { /* Empty statistics. */
+ static int DoneDidThat = 0;
+ if (vortex_debug > 4)
+ printk("%s: Updating stats.\n", dev->name);
+ update_stats(ioaddr, dev);
+ /* DEBUG HACK: Disable statistics as an interrupt source. */
+ /* This occurs when we have the wrong media type! */
+ if (DoneDidThat == 0 &&
+ inw(ioaddr + EL3_STATUS) & StatsFull) {
+ int win, reg;
+ printk("%s: Updating stats failed, disabling stats as an"
+ " interrupt source.\n", dev->name);
+ for (win = 0; win < 8; win++) {
+ EL3WINDOW(win);
+ printk("\n Vortex window %d:", win);
+ for (reg = 0; reg < 16; reg++)
+ printk(" %2.2x", inb(ioaddr+reg));
+ }
+ EL3WINDOW(7);
+ outw(SetIntrEnb | TxAvailable | RxComplete | AdapterFailure
+ | UpComplete | DownComplete | TxComplete,
+ ioaddr + EL3_CMD);
+ DoneDidThat++;
+ }
+ }
+ if (status & AdapterFailure) {
+ /* Adapter failure requires Rx reset and reinit. */
+ outw(RxReset, ioaddr + EL3_CMD);
+ /* Set the Rx filter to the current state. */
+ set_rx_mode(dev);
+ outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
+ outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
+ }
+ }
+
+ if (--i < 0) {
+ printk("%s: Too much work in interrupt, status %4.4x. "
+ "Disabling functions (%4.4x).\n",
+ dev->name, status, SetStatusEnb | ((~status) & 0x7FE));
+ /* Disable all pending interrupts. */
+ outw(SetStatusEnb | ((~status) & 0x7FE), ioaddr + EL3_CMD);
+ outw(AckIntr | 0x7FF, ioaddr + EL3_CMD);
+ break;
+ }
+ /* Acknowledge the IRQ. */
+ outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
+
+ } while ((status = inw(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
+
+ if (vortex_debug > 4)
+ printk("%s: exiting interrupt, status %4.4x.\n", dev->name, status);
+
+ dev->interrupt = 0;
+ return;
+}
+
+static int
+vortex_rx(struct device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int i;
+ short rx_status;
+
+ if (vortex_debug > 5)
+ printk(" In rx_packet(), status %4.4x, rx_status %4.4x.\n",
+ inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus));
+ while ((rx_status = inw(ioaddr + RxStatus)) > 0) {
+ if (rx_status & 0x4000) { /* Error, update stats. */
+ unsigned char rx_error = inb(ioaddr + RxErrors);
+ if (vortex_debug > 2)
+ printk(" Rx error: status %2.2x.\n", rx_error);
+ vp->stats.rx_errors++;
+ if (rx_error & 0x01) vp->stats.rx_over_errors++;
+ if (rx_error & 0x02) vp->stats.rx_length_errors++;
+ if (rx_error & 0x04) vp->stats.rx_frame_errors++;
+ if (rx_error & 0x08) vp->stats.rx_crc_errors++;
+ if (rx_error & 0x10) vp->stats.rx_length_errors++;
+ } else {
+ /* The packet length: up to 4.5K!. */
+ short pkt_len = rx_status & 0x1fff;
+ struct sk_buff *skb;
+
+ skb = DEV_ALLOC_SKB(pkt_len + 5);
+ if (vortex_debug > 4)
+ printk("Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
+ if (skb != NULL) {
+ skb->dev = dev;
+#if LINUX_VERSION_CODE >= 0x10300
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ /* 'skb_put()' points to the start of sk_buff data area. */
+ insl(ioaddr + RX_FIFO, skb_put(skb, pkt_len),
+ (pkt_len + 3) >> 2);
+ outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
+ skb->protocol = eth_type_trans(skb, dev);
+#else
+ skb->len = pkt_len;
+ /* 'skb->data' points to the start of sk_buff data area. */
+ insl(ioaddr + RX_FIFO, skb->data, (pkt_len + 3) >> 2);
+ outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
+#endif /* KERNEL_1_3_0 */
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ vp->stats.rx_packets++;
+ /* Wait a limited time to go to next packet. */
+ for (i = 200; i >= 0; i--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ continue;
+ } else if (vortex_debug)
+ printk("%s: Couldn't allocate a sk_buff of size %d.\n",
+ dev->name, pkt_len);
+ }
+ outw(RxDiscard, ioaddr + EL3_CMD);
+ vp->stats.rx_dropped++;
+ /* Wait a limited time to skip this packet. */
+ for (i = 200; i >= 0; i--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ }
+
+ return 0;
+}
+
+static int
+boomerang_rx(struct device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ int entry = vp->cur_rx % RX_RING_SIZE;
+ int ioaddr = dev->base_addr;
+ int rx_status;
+
+ if (vortex_debug > 5)
+ printk(" In boomerang_rx(), status %4.4x, rx_status %4.4x.\n",
+ inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus));
+ while ((rx_status = vp->rx_ring[entry].status) & RxDComplete) {
+ if (rx_status & RxDError) { /* Error, update stats. */
+ unsigned char rx_error = rx_status >> 16;
+ if (vortex_debug > 2)
+ printk(" Rx error: status %2.2x.\n", rx_error);
+ vp->stats.rx_errors++;
+ if (rx_error & 0x01) vp->stats.rx_over_errors++;
+ if (rx_error & 0x02) vp->stats.rx_length_errors++;
+ if (rx_error & 0x04) vp->stats.rx_frame_errors++;
+ if (rx_error & 0x08) vp->stats.rx_crc_errors++;
+ if (rx_error & 0x10) vp->stats.rx_length_errors++;
+ } else {
+ /* The packet length: up to 4.5K!. */
+ short pkt_len = rx_status & 0x1fff;
+ struct sk_buff *skb;
+
+ if (vortex_debug > 4)
+ printk("Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
+
+ /* Check if the packet is long enough to just accept without
+ copying to a properly sized skbuff. */
+ if (pkt_len < rx_copybreak
+ && (skb = DEV_ALLOC_SKB(pkt_len + 2)) != 0) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ /* 'skb_put()' points to the start of sk_buff data area. */
+ memcpy(skb_put(skb, pkt_len),
+ bus_to_virt(vp->rx_ring[entry].addr),
+ pkt_len);
+ rx_copy++;
+ } else{
+ void *temp;
+ /* Pass up the skbuff already on the Rx ring. */
+ skb = vp->rx_skbuff[entry];
+ vp->rx_skbuff[entry] = NULL;
+ temp = skb_put(skb, pkt_len);
+ /* Remove this checking code for final release. */
+ if (bus_to_virt(vp->rx_ring[entry].addr) != temp)
+ printk("%s: Warning -- the skbuff addresses do not match"
+ " in boomerang_rx: %p vs. %p / %p.\n", dev->name,
+ bus_to_virt(vp->rx_ring[entry].addr),
+ skb->head, temp);
+ rx_nocopy++;
+ }
+#if LINUX_VERSION_CODE > 0x10300
+ skb->protocol = eth_type_trans(skb, dev);
+#else
+ skb->len = pkt_len;
+#endif
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ vp->stats.rx_packets++;
+ }
+ entry = (++vp->cur_rx) % RX_RING_SIZE;
+ }
+ /* Refill the Rx ring buffers. */
+ for (; vp->dirty_rx < vp->cur_rx; vp->dirty_rx++) {
+ struct sk_buff *skb;
+ entry = vp->dirty_rx % RX_RING_SIZE;
+ if (vp->rx_skbuff[entry] == NULL) {
+ skb = dev_alloc_skb(PKT_BUF_SZ);
+ if (skb == NULL)
+ break; /* Bad news! */
+ skb->dev = dev; /* Mark as being used by this device. */
+#if LINUX_VERSION_CODE > 0x10300
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ vp->rx_ring[entry].addr = virt_to_bus(skb->tail);
+#else
+ vp->rx_ring[entry].addr = virt_to_bus(skb->data);
+#endif
+ vp->rx_skbuff[entry] = skb;
+ }
+ vp->rx_ring[entry].status = 0; /* Clear complete bit. */
+ }
+ return 0;
+}
+
+static int
+vortex_close(struct device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int i;
+
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ if (vortex_debug > 1) {
+ printk("%s: vortex_close() status %4.4x, Tx status %2.2x.\n",
+ dev->name, inw(ioaddr + EL3_STATUS), inb(ioaddr + TxStatus));
+ printk("%s: vortex close stats: rx_nocopy %d rx_copy %d"
+ " tx_queued %d.\n",
+ dev->name, rx_nocopy, rx_copy, queued_packet);
+ }
+
+ del_timer(&vp->timer);
+
+ /* Turn off statistics ASAP. We update lp->stats below. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+
+ /* Disable the receiver and transmitter. */
+ outw(RxDisable, ioaddr + EL3_CMD);
+ outw(TxDisable, ioaddr + EL3_CMD);
+
+ if (dev->if_port == XCVR_10base2)
+ /* Turn off thinnet power. Green! */
+ outw(StopCoax, ioaddr + EL3_CMD);
+
+#ifdef SA_SHIRQ
+ free_irq(dev->irq, dev);
+#else
+ free_irq(dev->irq);
+ irq2dev_map[dev->irq] = 0;
+#endif
+
+ outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);
+
+ update_stats(ioaddr, dev);
+ if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
+ outl(0, ioaddr + UpListPtr);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ if (vp->rx_skbuff[i]) {
+#if LINUX_VERSION_CODE < 0x20100
+ vp->rx_skbuff[i]->free = 1;
+#endif
+ dev_kfree_skb (vp->rx_skbuff[i], FREE_WRITE);
+ vp->rx_skbuff[i] = 0;
+ }
+ }
+ if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */
+ outl(0, ioaddr + DownListPtr);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ if (vp->tx_skbuff[i]) {
+ dev_kfree_skb(vp->tx_skbuff[i], FREE_WRITE);
+ vp->tx_skbuff[i] = 0;
+ }
+ }
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static struct enet_statistics *
+vortex_get_stats(struct device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ unsigned long flags;
+
+ if (dev->start) {
+ save_flags(flags);
+ cli();
+ update_stats(dev->base_addr, dev);
+ restore_flags(flags);
+ }
+ return &vp->stats;
+}
+
+/* Update statistics.
+ Unlike with the EL3 we need not worry about interrupts changing
+ the window setting from underneath us, but we must still guard
+ against a race condition with a StatsUpdate interrupt updating the
+ table. This is done by checking that the ASM (!) code generated uses
+ atomic updates with '+='.
+ */
+static void update_stats(int ioaddr, struct device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+
+ /* Unlike the 3c5x9 we need not turn off stats updates while reading. */
+ /* Switch to the stats window, and read everything. */
+ EL3WINDOW(6);
+ vp->stats.tx_carrier_errors += inb(ioaddr + 0);
+ vp->stats.tx_heartbeat_errors += inb(ioaddr + 1);
+ /* Multiple collisions. */ inb(ioaddr + 2);
+ vp->stats.collisions += inb(ioaddr + 3);
+ vp->stats.tx_window_errors += inb(ioaddr + 4);
+ vp->stats.rx_fifo_errors += inb(ioaddr + 5);
+ vp->stats.tx_packets += inb(ioaddr + 6);
+ vp->stats.tx_packets += (inb(ioaddr + 9)&0x30) << 4;
+ /* Rx packets */ inb(ioaddr + 7); /* Must read to clear */
+ /* Tx deferrals */ inb(ioaddr + 8);
+ /* Don't bother with register 9, an extension of registers 6&7.
+ If we do use the 6&7 values the atomic update assumption above
+ is invalid. */
+ inw(ioaddr + 10); /* Total Rx and Tx octets. */
+ inw(ioaddr + 12);
+ /* New: On the Vortex we must also clear the BadSSD counter. */
+ EL3WINDOW(4);
+ inb(ioaddr + 12);
+
+ /* We change back to window 7 (not 1) with the Vortex. */
+ EL3WINDOW(7);
+ return;
+}
+
+/* This new version of set_rx_mode() supports v1.4 kernels.
+ The Vortex chip has no documented multicast filter, so the only
+ multicast setting is to receive all multicast frames. At least
+ the chip has a very clean way to set the mode, unlike many others. */
+static void
+set_rx_mode(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+ short new_mode;
+
+ if (dev->flags & IFF_PROMISC) {
+ if (vortex_debug > 3)
+ printk("%s: Setting promiscuous mode.\n", dev->name);
+ new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm;
+ } else if ((dev->mc_list) || (dev->flags & IFF_ALLMULTI)) {
+ new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast;
+ } else
+ new_mode = SetRxFilter | RxStation | RxBroadcast;
+
+ outw(new_mode, ioaddr + EL3_CMD);
+}
+
+#ifdef MODULE
+void
+cleanup_module(void)
+{
+ struct device *next_dev;
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_vortex_dev) {
+ next_dev = ((struct vortex_private *)root_vortex_dev->priv)->next_module;
+ if (root_vortex_dev->dma)
+ free_dma(root_vortex_dev->dma);
+ unregister_netdev(root_vortex_dev);
+ outw(TotalReset, root_vortex_dev->base_addr + EL3_CMD);
+ release_region(root_vortex_dev->base_addr, CORKSCREW_TOTAL_SIZE);
+ kfree(root_vortex_dev);
+ root_vortex_dev = next_dev;
+ }
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c 3c515.c"
+ * c-indent-level: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/3c59x.c b/linux/src/drivers/net/3c59x.c
new file mode 100644
index 0000000..a6b89cd
--- /dev/null
+++ b/linux/src/drivers/net/3c59x.c
@@ -0,0 +1,2648 @@
+/* EtherLinkXL.c: A 3Com EtherLink PCI III/XL ethernet driver for linux. */
+/*
+ Written 1996-2003 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ This driver is for the 3Com "Vortex" and "Boomerang" series ethercards.
+ Members of the series include Fast EtherLink 3c590/3c592/3c595/3c597
+ and the EtherLink XL 3c900 and 3c905 cards.
+
+ The original author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Support information and updates are available at
+ http://www.scyld.com/network/vortex.html
+*/
+
+static const char versionA[] =
+"3c59x.c:v0.99Za 4/17/2003 Donald Becker, becker@scyld.com\n";
+static const char versionB[] =
+" http://www.scyld.com/network/vortex.html\n";
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
+static int debug = 2;
+
+/* This driver uses 'options' to pass the media type, full-duplex flag, etc.
+ See media_tbl[] and the web page for the possible types.
+ There is no limit on card count, MAX_UNITS limits only module options. */
+#define MAX_UNITS 8
+static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1,};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1512 effectively disables this feature. */
+static const int rx_copybreak = 200;
+
+/* Allow setting MTU to a larger size, bypassing the normal Ethernet setup. */
+static const int mtu = 1500;
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ Cyclones and later have a 64 or 256 element hash table based on the
+ Ethernet CRC. */
+static int multicast_filter_limit = 64;
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ Do not increase the Tx ring beyond 256.
+ Large receive rings waste memory and confound network buffer limits.
+ These values have been carefully studied: changing these might mask a
+ problem, it won't fix it.
+ */
+#define TX_RING_SIZE 16
+#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
+#define RX_RING_SIZE 32
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+
+/* Allocation size of Rx buffers with normal sized Ethernet frames.
+ Do not change this value without good reason. The 1536 value is not
+ a limit, or directly related to MTU, but rather a way to keep a
+ consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ 1536
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < 0x20300 && defined(MODVERSIONS)
+#include <linux/module.h>
+#include <linux/modversions.h>
+#else
+#include <linux/modversions.h>
+#include <linux/module.h>
+#endif
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+/* Condensed operations for readability.
+ Compatibility defines are now in kern_compat.h */
+
+#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
+#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("3Com EtherLink XL (3c590/3c900 series) driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(debug, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(multicast_filter_limit, "i");
+#ifdef MODULE_PARM_DESC
+MODULE_PARM_DESC(debug, "3c59x message level (0-31)");
+MODULE_PARM_DESC(options, "3c59x force fixed media type");
+MODULE_PARM_DESC(full_duplex,
+ "3c59x set to 1 to force full duplex (deprecated)");
+MODULE_PARM_DESC(rx_copybreak,
+ "3c59x copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(max_interrupt_work,
+ "3c59x maximum events handled per interrupt");
+MODULE_PARM_DESC(multicast_filter_limit,
+ "Multicast address count before switching to Rx-all-multicast");
+#endif
+
+/* Operational parameter that usually are not changed. */
+
+/* Set iff a MII transceiver on any interface requires mdio preamble.
+ This only set with the original DP83840 on older 3c905 boards, so the extra
+ code size of a per-interface flag is not worthwhile. */
+static char mii_preamble_required = 0;
+
+/* Performance and path-coverage information. */
+static int rx_nocopy = 0, rx_copy = 0, queued_packet = 0, rx_csumhits;
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the 3Com FastEtherLink and FastEtherLink
+XL, 3Com's PCI to 10/100baseT adapters. It also works with the 10Mbs
+versions of the FastEtherLink cards. The supported product IDs are
+in the pci_tbl[] list.
+
+The related ISA 3c515 is supported with a separate driver, 3c515.c, included
+with the kernel source.
+
+II. Board-specific settings
+
+PCI bus devices are configured by the system at boot time, so no jumpers
+need to be set on the board. The system BIOS should be set to assign the
+PCI INTA signal to an otherwise unused system IRQ line.
+
+The EEPROM settings for media type and forced-full-duplex are observed.
+The EEPROM media type should be left at the default "autoselect" unless using
+10base2 or AUI connections which cannot be reliably detected.
+
+III. Driver operation
+
+The 3c59x series use an interface that's very similar to the previous 3c5x9
+series. The primary interface is two programmed-I/O FIFOs, with an
+alternate single-contiguous-region bus-master transfer (see next).
+
+The 3c900 "Boomerang" series uses a full-bus-master interface with separate
+lists of transmit and receive descriptors, similar to the AMD LANCE/PCnet,
+DEC Tulip and Intel Speedo3. The first chip version retains a compatible
+programmed-I/O interface that has been removed in 'B' and subsequent board
+revisions.
+
+One extension that is advertised in a very large font is that the adapters
+are capable of being bus masters. On the Vortex chip this capability was
+only for a single contiguous region making it far less useful than the full
+bus master capability. There is a significant performance impact of taking
+an extra interrupt or polling for the completion of each transfer, as well
+as difficulty sharing the single transfer engine between the transmit and
+receive threads. Using DMA transfers is a win only with large blocks or
+with the flawed versions of the Intel Orion motherboard PCI controller.
+
+The Boomerang chip's full-bus-master interface is useful, and has the
+currently-unused advantages over other similar chips that queued transmit
+packets may be reordered and receive buffer groups are associated with a
+single frame.
+
+With full-bus-master support, this driver uses a "RX_COPYBREAK" scheme.
+Rather than a fixed intermediate receive buffer, this scheme allocates
+full-sized skbuffs as receive buffers. The value RX_COPYBREAK is used as
+the copying breakpoint: it is chosen to trade-off the memory wasted by
+passing the full-sized skbuff to the queue layer for all frames vs. the
+copying cost of copying a frame to a correctly-sized skbuff.
+
+
+IIIC. Synchronization
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and other software.
+
+IV. Notes
+
+Thanks to Cameron Spitzer and Terry Murphy of 3Com for providing development
+3c590, 3c595, and 3c900 boards.
+The name "Vortex" is the internal 3Com project name for the PCI ASIC, and
+the EISA version is called "Demon". According to Terry these names come
+from rides at the local amusement park.
+
+The new chips support both ethernet (1.5K) and FDDI (4.5K) packet sizes.
+This driver only supports ethernet packets on some kernels because of the
+skbuff allocation limit of 4K.
+*/
+
+/* The Vortex size is twice that of the original EtherLinkIII series: the
+ runtime register window, window 1, is now always mapped in.
+ The Boomerang size is twice as large as the Vortex -- it has additional
+ bus master control registers. */
+#define VORTEX_SIZE 0x20
+#define BOOMERANG_SIZE 0x40
+#define CYCLONE_SIZE 0x80
+enum { IS_VORTEX=1, IS_BOOMERANG=2, IS_CYCLONE=0x804, IS_TORNADO=0x08,
+ HAS_PWR_CTRL=0x10, HAS_MII=0x20, HAS_NWAY=0x40, HAS_CB_FNS=0x80,
+ EEPROM_8BIT=0x200, INVERT_LED_PWR=0x400, MII_XCVR_PWR=0x4000,
+ HAS_V2_TX=0x800, WN0_XCVR_PWR=0x1000,
+};
+/* Base feature sets for the generations. */
+#define FEATURE_BOOMERANG (HAS_MII) /* 905 */
+#define FEATURE_CYCLONE (IS_CYCLONE|HAS_V2_TX) /* 905B */
+#define FEATURE_TORNADO (IS_TORNADO|HAS_NWAY|HAS_V2_TX) /* 905C */
+
+static void *vortex_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int find_cnt);
+static int pwr_event(void *dev_instance, int event);
+#ifdef USE_MEM_OPS
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
+#else
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0)
+#endif
+
+static struct pci_id_info pci_tbl[] = {
+ {"3c590 Vortex 10Mbps", { 0x590010B7, 0xffffffff },
+ PCI_IOTYPE, VORTEX_SIZE, IS_VORTEX, },
+ {"3c595 Vortex 100baseTx", { 0x595010B7, 0xffffffff },
+ PCI_IOTYPE, VORTEX_SIZE, IS_VORTEX, },
+ {"3c595 Vortex 100baseT4", { 0x595110B7, 0xffffffff },
+ PCI_IOTYPE, VORTEX_SIZE, IS_VORTEX, },
+ {"3c595 Vortex 100base-MII",{ 0x595210B7, 0xffffffff },
+ PCI_IOTYPE, VORTEX_SIZE, IS_VORTEX, },
+ /* Change EISA_scan if these move from index 4 and 5. */
+ {"3c592 EISA Vortex", { 0x592010B7, 0xffffffff },
+ PCI_IOTYPE, VORTEX_SIZE, IS_VORTEX, },
+ {"3c597 EISA Vortex", { 0x597010B7, 0xffffffff },
+ PCI_IOTYPE, VORTEX_SIZE, IS_VORTEX, },
+ {"Vortex (unknown)", { 0x590010B7, 0xff00ffff },
+ PCI_IOTYPE, VORTEX_SIZE, IS_VORTEX, },
+ {"3c900 Boomerang 10baseT", { 0x900010B7, 0xffffffff },
+ PCI_IOTYPE, BOOMERANG_SIZE, IS_BOOMERANG, },
+ {"3c900 Boomerang 10Mbps Combo", { 0x900110B7, 0xffffffff },
+ PCI_IOTYPE,BOOMERANG_SIZE, IS_BOOMERANG, },
+ {"3c900 Cyclone 10Mbps TPO", { 0x900410B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE, IS_CYCLONE, },
+ {"3c900 Cyclone 10Mbps Combo", { 0x900510B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE, IS_CYCLONE, },
+ {"3c900 Cyclone 10Mbps TPC", { 0x900610B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE, IS_CYCLONE, },
+ {"3c900B-FL Cyclone 10base-FL",{ 0x900A10B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE, IS_CYCLONE, },
+ {"3c905 Boomerang 100baseTx",{ 0x905010B7, 0xffffffff },
+ PCI_IOTYPE,BOOMERANG_SIZE, IS_BOOMERANG|HAS_MII, },
+ {"3c905 Boomerang 100baseT4",{ 0x905110B7, 0xffffffff },
+ PCI_IOTYPE,BOOMERANG_SIZE, IS_BOOMERANG|HAS_MII, },
+ {"3c905B Cyclone 100baseTx",{ 0x905510B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE, IS_CYCLONE|HAS_NWAY, },
+ {"3c905B Cyclone 10/100/BNC",{ 0x905810B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE, IS_CYCLONE|HAS_NWAY, },
+ {"3c905B-FX Cyclone 100baseFx",{ 0x905A10B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE, IS_CYCLONE, },
+ {"3c905C Tornado",{ 0x920010B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE, FEATURE_TORNADO, },
+ {"3c920 Tornado",{ 0x920110B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE, FEATURE_TORNADO, },
+ {"3c920 series Tornado",{ 0x920010B7, 0xfff0ffff },
+ PCI_IOTYPE, CYCLONE_SIZE, FEATURE_TORNADO, },
+ {"3c982 Server Tornado",{ 0x980510B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE, FEATURE_TORNADO, },
+ {"3c980 Cyclone",{ 0x980010B7, 0xfff0ffff },
+ PCI_IOTYPE, CYCLONE_SIZE, FEATURE_CYCLONE|HAS_NWAY, },
+ {"3cSOHO100-TX Hurricane", { 0x764610B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE, FEATURE_CYCLONE, },
+ {"3c555 Laptop Hurricane", { 0x505510B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE, FEATURE_CYCLONE, },
+ {"3c556 Laptop Tornado",{ 0x605510B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE, FEATURE_TORNADO|EEPROM_8BIT, },
+ {"3c556 series Laptop Tornado",{ 0x605510B7, 0xf0ffffff },
+ PCI_IOTYPE, CYCLONE_SIZE, FEATURE_TORNADO|EEPROM_8BIT, },
+ {"3c1556B-5 mini-PCI",{ 0x605610B7, 0xffffffff, 0x655610b7, 0xffffffff, },
+ PCI_IOTYPE, CYCLONE_SIZE,
+ FEATURE_TORNADO|EEPROM_8BIT|INVERT_LED_PWR|WN0_XCVR_PWR, },
+ {"3c1556B mini-PCI",{ 0x605610B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE,
+ FEATURE_TORNADO|EEPROM_8BIT|HAS_CB_FNS|INVERT_LED_PWR|MII_XCVR_PWR, },
+ {"3c1556B series mini-PCI",{ 0x605610B7, 0xf0ffffff },
+ PCI_IOTYPE, CYCLONE_SIZE,
+ FEATURE_TORNADO|EEPROM_8BIT|HAS_CB_FNS|INVERT_LED_PWR|MII_XCVR_PWR, },
+ {"3c575 Boomerang CardBus", { 0x505710B7, 0xffffffff },
+ PCI_IOTYPE,BOOMERANG_SIZE, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, },
+ {"3CCFE575BT Cyclone CardBus",{ 0x515710B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE,
+ FEATURE_CYCLONE | HAS_CB_FNS | EEPROM_8BIT | INVERT_LED_PWR, },
+ {"3CCFE575CT Tornado CardBus",{ 0x525710B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE,
+ FEATURE_TORNADO|HAS_CB_FNS|EEPROM_8BIT|MII_XCVR_PWR, },
+ {"3CCFE656 Cyclone CardBus",{ 0x656010B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE,
+ IS_CYCLONE|HAS_NWAY|HAS_CB_FNS| INVERT_LED_PWR | MII_XCVR_PWR, },
+ {"3CCFE656B Cyclone+Winmodem CardBus",{ 0x656210B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE,
+ FEATURE_CYCLONE/*|HAS_NWAY*/ |HAS_CB_FNS|EEPROM_8BIT|INVERT_LED_PWR|MII_XCVR_PWR, },
+ {"3CCFE656C Tornado+Winmodem CardBus",{ 0x656410B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE,
+ (FEATURE_TORNADO & ~HAS_NWAY)|HAS_CB_FNS|EEPROM_8BIT | MII_XCVR_PWR, },
+ {"3c450 HomePNA Tornado",{ 0x450010B7, 0xffffffff },
+ PCI_IOTYPE, CYCLONE_SIZE, FEATURE_TORNADO, },
+ {"3c575 series CardBus (unknown version)", {0x505710B7, 0xf0ffffff },
+ PCI_IOTYPE, BOOMERANG_SIZE, IS_BOOMERANG|HAS_MII, },
+ {"3Com Boomerang (unknown version)",{ 0x900010B7, 0xff00ffff },
+ PCI_IOTYPE, BOOMERANG_SIZE, IS_BOOMERANG, },
+ {0,}, /* 0 terminated list. */
+};
+
+struct drv_id_info vortex_drv_id = {
+ "vortex", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_tbl,
+ vortex_probe1, pwr_event };
+
+/* This driver was written to use I/O operations.
+ However there are performance benefits to using memory operations, so
+ that mode is now an options.
+ Compiling for memory ops turns off EISA support.
+*/
+#ifdef USE_MEM_OPS
+#undef inb
+#undef inw
+#undef inl
+#undef outb
+#undef outw
+#undef outl
+#define inb readb
+#define inw readw
+#define inl readl
+#define outb writeb
+#define outw writew
+#define outl writel
+#endif
+
+/* Operational definitions.
+ These are not used by other compilation units and thus are not
+ exported in a ".h" file.
+
+ First the windows. There are eight register windows, with the command
+ and status registers available in each.
+ */
+#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
+#define EL3_CMD 0x0e
+#define EL3_STATUS 0x0e
+
+/* The top five bits written to EL3_CMD are a command, the lower
+ 11 bits are the parameter, if applicable.
+ Note that 11 parameters bits was fine for ethernet, but the new chip
+ can handle FDDI length frames (~4500 octets) and now parameters count
+ 32-bit 'Dwords' rather than octets. */
+
+enum vortex_cmd {
+ TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
+ RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11,
+ UpStall = 6<<11, UpUnstall = (6<<11)+1,
+ DownStall = (6<<11)+2, DownUnstall = (6<<11)+3,
+ RxDiscard = 8<<11, TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
+ FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
+ SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
+ SetTxThreshold = 18<<11, SetTxStart = 19<<11,
+ StartDMAUp = 20<<11, StartDMADown = (20<<11)+1, StatsEnable = 21<<11,
+ StatsDisable = 22<<11, StopCoax = 23<<11, SetFilterBit = 25<<11,};
+
+/* The SetRxFilter command accepts the following classes: */
+enum RxFilter {
+ RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8,
+ RxMulticastHash = 0x10,
+};
+
+/* Bits in the general status register. */
+enum vortex_status {
+ IntLatch = 0x0001, HostError = 0x0002, TxComplete = 0x0004,
+ TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
+ IntReq = 0x0040, StatsFull = 0x0080,
+ DMADone = 1<<8, DownComplete = 1<<9, UpComplete = 1<<10,
+ DMAInProgress = 1<<11, /* DMA controller is still busy.*/
+ CmdInProgress = 1<<12, /* EL3_CMD is still busy.*/
+};
+
+/* Register window 1 offsets, the window used in normal operation.
+ On the Vortex this window is always mapped at offsets 0x10-0x1f. */
+enum Window1 {
+ TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14,
+ RxStatus = 0x18, Timer=0x1A, TxStatus = 0x1B,
+ TxFree = 0x1C, /* Remaining free bytes in Tx buffer. */
+};
+enum Window0 {
+ Wn0EepromCmd = 10, /* Window 0: EEPROM command register. */
+ Wn0EepromData = 12, /* Window 0: EEPROM results register. */
+ IntrStatus=0x0E, /* Valid in all windows. */
+};
+
+/* EEPROM locations. */
+enum eeprom_offset {
+ PhysAddr01=0, PhysAddr23=1, PhysAddr45=2, ModelID=3,
+ EtherLink3ID=7, IFXcvrIO=8, IRQLine=9,
+ NodeAddr01=10, NodeAddr23=11, NodeAddr45=12,
+ DriverTune=13, Checksum=15};
+
+enum Window2 { /* Window 2. */
+ Wn2_ResetOptions=12,
+};
+enum Window3 { /* Window 3: MAC/config bits. */
+ Wn3_Config=0, Wn3_MaxPktSize=4, Wn3_MAC_Ctrl=6, Wn3_Options=8,
+};
+
+enum Window4 { /* Window 4: Xcvr/media bits. */
+ Wn4_FIFODiag = 4, Wn4_NetDiag = 6, Wn4_PhysicalMgmt=8, Wn4_Media = 10,
+};
+enum Window5 {
+ Wn5_TxThreshold = 0, Wn5_RxFilter = 8,
+};
+enum Win4_Media_bits {
+ Media_SQE = 0x0008, /* Enable SQE error counting for AUI. */
+ Media_10TP = 0x00C0, /* Enable link beat and jabber for 10baseT. */
+ Media_Lnk = 0x0080, /* Enable just link beat for 100TX/100FX. */
+ Media_LnkBeat = 0x0800,
+};
+enum Window7 {
+ /* Bus Master control on Vortex. */
+ Wn7_MasterAddr = 0, Wn7_MasterLen = 6, Wn7_MasterStatus = 12,
+ /* On Cyclone and later, VLAN and PowerMgt control. */
+ Wn7_VLAN_Mask = 0, Wn7_VLAN_EtherType = 4, Wn7_PwrMgmtEvent = 12,
+};
+
+/* Boomerang and Cyclone bus master control registers. */
+enum MasterCtrl {
+ PktStatus = 0x20, DownListPtr = 0x24, FragAddr = 0x28, FragLen = 0x2c,
+ DownPollRate = 0x2d, TxFreeThreshold = 0x2f,
+ UpPktStatus = 0x30, UpListPtr = 0x38,
+ /* Cyclone+. */
+ TxPktID=0x18, RxPriorityThresh = 0x3c,
+};
+
+/* The Rx and Tx descriptor lists.
+ Caution Alpha hackers: these types are 32 bits! Note also the 8 byte
+ alignment contraint on tx_ring[] and rx_ring[]. */
+#define LAST_FRAG 0x80000000 /* Last Addr/Len pair in descriptor. */
+struct boom_rx_desc {
+ u32 next; /* Last entry points to 0. */
+ s32 status;
+ u32 addr; /* Up to 63 addr/len pairs possible. */
+ s32 length; /* Set LAST_FRAG to indicate last pair. */
+};
+/* Values for the Rx status entry. */
+enum rx_desc_status {
+ RxDComplete=0x00008000, RxDError=0x4000,
+ /* See boomerang_rx() for actual error bits */
+ IPChksumErr=1<<25, TCPChksumErr=1<<26, UDPChksumErr=1<<27,
+ IPChksumValid=1<<29, TCPChksumValid=1<<30, UDPChksumValid=1<<31,
+};
+
+struct boom_tx_desc {
+ u32 next; /* Last entry points to 0. */
+ s32 status; /* bits 0:12 length, others see below. */
+ u32 addr;
+ s32 length;
+};
+
+/* Values for the Tx status entry. */
+enum tx_desc_status {
+ CRCDisable=0x2000, TxIntrDnComplete=0x8000, TxDownComplete=0x10000,
+ AddIPChksum=0x02000000, AddTCPChksum=0x04000000, AddUDPChksum=0x08000000,
+ TxNoRoundup=0x10000000, /* HAS_V2_TX should not word-pad packet. */
+ TxIntrUploaded=0x80000000, /* IRQ when in FIFO, but maybe not sent. */
+};
+
+/* Chip features we care about in vp->capabilities, read from the EEPROM. */
+enum ChipCaps { CapBusMaster=0x20, CapNoTxLength=0x0200, CapPwrMgmt=0x2000 };
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+struct vortex_private {
+ /* The Rx and Tx rings should be quad-word-aligned. */
+ struct boom_rx_desc rx_ring[RX_RING_SIZE];
+ struct boom_tx_desc tx_ring[TX_RING_SIZE];
+ /* The addresses of transmit- and receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ struct net_device *next_module;
+ void *priv_addr;
+ /* Keep the Rx and Tx variables grouped on their own cache lines. */
+ struct boom_rx_desc *rx_head_desc;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ int rx_copybreak;
+
+ struct boom_tx_desc *tx_desc_tail;
+ struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */
+ unsigned int cur_tx, dirty_tx;
+ unsigned int tx_full:1, restart_tx:1;
+
+ long last_reset;
+ spinlock_t window_lock;
+ struct net_device_stats stats;
+ char *cb_fn_base; /* CardBus function status addr space. */
+ int msg_level;
+ int chip_id, drv_flags;
+ struct pci_dev *pci_dev; /* PCI configuration space information. */
+
+ /* The remainder are related to chip state, mostly media selection. */
+ int multicast_filter_limit;
+ u32 mc_filter[8];
+ int max_interrupt_work;
+ int rx_mode;
+ struct timer_list timer; /* Media selection timer. */
+ int options; /* User-settable misc. driver options. */
+ unsigned int media_override:4, /* Passed-in media type. */
+ default_media:4, /* Read from the EEPROM/Wn3_Config. */
+ full_duplex:1, medialock:1, autoselect:1,
+ bus_master:1, /* Vortex can only do a fragment bus-m. */
+ full_bus_master_tx:1, full_bus_master_rx:2, /* Boomerang */
+ hw_csums:1, /* Has hardware checksums. */
+ restore_intr_mask:1,
+ polling:1;
+ u16 status_enable;
+ u16 intr_enable;
+ u16 available_media; /* From Wn3_Options. */
+ u16 wn3_mac_ctrl; /* Current settings. */
+ u16 capabilities, info1, info2; /* Various, from EEPROM. */
+ u16 advertising; /* NWay media advertisement */
+ unsigned char phys[2]; /* MII device addresses. */
+};
+
+/* The action to take with a media selection timer tick.
+ Note that we deviate from the 3Com order by checking 10base2 before AUI.
+ */
+enum xcvr_types {
+ XCVR_10baseT=0, XCVR_AUI, XCVR_10baseTOnly, XCVR_10base2, XCVR_100baseTx,
+ XCVR_100baseFx, XCVR_MII=6, XCVR_NWAY=8, XCVR_ExtMII=9, XCVR_Default=10,
+};
+
+static struct media_table {
+ char *name;
+ unsigned int media_bits:16, /* Bits to set in Wn4_Media register. */
+ mask:8, /* The transceiver-present bit in Wn3_Config.*/
+ next:8; /* The media type to try next. */
+ int wait; /* Time before we check media status. */
+} media_tbl[] = {
+ { "10baseT", Media_10TP,0x08, XCVR_10base2, (14*HZ)/10},
+ { "10Mbs AUI", Media_SQE, 0x20, XCVR_Default, (1*HZ)/10},
+ { "undefined", 0, 0x80, XCVR_10baseT, 10000},
+ { "10base2", 0, 0x10, XCVR_AUI, (1*HZ)/10},
+ { "100baseTX", Media_Lnk, 0x02, XCVR_100baseFx, (14*HZ)/10},
+ { "100baseFX", Media_Lnk, 0x04, XCVR_MII, (14*HZ)/10},
+ { "MII", 0, 0x41, XCVR_10baseT, 3*HZ },
+ { "undefined", 0, 0x01, XCVR_10baseT, 10000},
+ { "Autonegotiate", 0, 0x41, XCVR_10baseT, 3*HZ},
+ { "MII-External", 0, 0x41, XCVR_10baseT, 3*HZ },
+ { "Default", 0, 0xFF, XCVR_10baseT, 10000},
+};
+
+#if ! defined(CARDBUS) && ! defined(USE_MEM_OPS)
+static int eisa_scan(struct net_device *dev);
+#endif
+static int vortex_open(struct net_device *dev);
+static void set_media_type(struct net_device *dev);
+static void activate_xcvr(struct net_device *dev);
+static void start_operation(struct net_device *dev);
+static void start_operation1(struct net_device *dev);
+static void mdio_sync(long ioaddr, int bits);
+static int mdio_read(long ioaddr, int phy_id, int location);
+static void mdio_write(long ioaddr, int phy_id, int location, int value);
+static void vortex_timer(unsigned long arg);
+static void vortex_tx_timeout(struct net_device *dev);
+static int vortex_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static int boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static int vortex_rx(struct net_device *dev);
+static int boomerang_rx(struct net_device *dev);
+static void vortex_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static int vortex_close(struct net_device *dev);
+static void update_stats(long ioaddr, struct net_device *dev);
+static struct net_device_stats *vortex_get_stats(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+#if defined(NO_PCI)
+#define acpi_set_WOL(dev) do {} while(0);
+#define acpi_wake(pci_dev) do {} while(0);
+#define acpi_set_pwr_state(pci_dev, state) do {} while(0);
+#else
+static void acpi_set_WOL(struct net_device *dev);
+#endif
+
+
+/* A list of all installed Vortex devices, for removing the driver module. */
+static struct net_device *root_vortex_dev = NULL;
+
+
+#if defined(MODULE) && defined(CARDBUS)
+
+#include <pcmcia/driver_ops.h>
+
+static dev_node_t *vortex_attach(dev_locator_t *loc)
+{
+ u32 io, pci_id;
+ u8 bus, devfn, irq;
+ struct net_device *dev;
+ int chip_idx;
+
+ if (loc->bus != LOC_PCI) return NULL;
+ bus = loc->b.pci.bus; devfn = loc->b.pci.devfn;
+ pcibios_read_config_dword(bus, devfn, PCI_BASE_ADDRESS_0, &io);
+ pcibios_read_config_byte(bus, devfn, PCI_INTERRUPT_LINE, &irq);
+ pcibios_read_config_dword(bus, devfn, PCI_VENDOR_ID, &pci_id);
+ printk(KERN_INFO "vortex_attach(bus %d, function %d, device %8.8x)\n",
+ bus, devfn, pci_id);
+ io &= ~3;
+ if (io == 0 || irq == 0) {
+ printk(KERN_ERR "The 3Com CardBus Ethernet interface was not "
+ "assigned an %s.\n" KERN_ERR " It will not be activated.\n",
+ io == 0 ? "I/O address" : "IRQ");
+ return NULL;
+ }
+ for (chip_idx = 0; pci_tbl[chip_idx].id.pci; chip_idx++)
+ if ((pci_id & pci_tbl[chip_idx].id.pci_mask) ==
+ pci_tbl[chip_idx].id.pci)
+ break;
+ if (pci_tbl[chip_idx].id.pci == 0) { /* Compiled out! */
+ printk(KERN_INFO "Unable to match chip type %8.8x in "
+ "vortex_attach().\n", pci_id);
+ return NULL;
+ }
+ dev = vortex_probe1(pci_find_slot(bus, devfn), NULL, io, irq, chip_idx, MAX_UNITS+1);
+ if (dev) {
+ dev_node_t *node = kmalloc(sizeof(dev_node_t), GFP_KERNEL);
+ strcpy(node->dev_name, dev->name);
+ node->major = node->minor = 0;
+ node->next = NULL;
+ MOD_INC_USE_COUNT;
+ return node;
+ }
+ return NULL;
+}
+
+static void vortex_detach(dev_node_t *node)
+{
+ struct net_device **devp, **next;
+ printk(KERN_DEBUG "vortex_detach(%s)\n", node->dev_name);
+ for (devp = &root_vortex_dev; *devp; devp = next) {
+ next = &((struct vortex_private *)(*devp)->priv)->next_module;
+ if (strcmp((*devp)->name, node->dev_name) == 0) break;
+ }
+ if (*devp) {
+ struct net_device *dev = *devp;
+ struct vortex_private *vp = dev->priv;
+ if (dev->flags & IFF_UP)
+ dev_close(dev);
+ dev->flags &= ~(IFF_UP|IFF_RUNNING);
+ unregister_netdev(dev);
+ if (vp->cb_fn_base) iounmap(vp->cb_fn_base);
+ kfree(dev);
+ *devp = *next;
+ kfree(vp->priv_addr);
+ kfree(node);
+ MOD_DEC_USE_COUNT;
+ }
+}
+
+struct driver_operations vortex_ops = {
+ "3c575_cb", vortex_attach, NULL, NULL, vortex_detach
+};
+
+#endif /* Old-style Cardbus module support */
+
+#if defined(MODULE) || (LINUX_VERSION_CODE >= 0x020400)
+
+#if ! defined(MODULE) /* Must be a 2.4 kernel */
+module_init(init_module);
+module_exit(cleanup_module);
+#endif
+
+int init_module(void)
+{
+ printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB);
+#ifdef CARDBUS
+ register_driver(&vortex_ops);
+ return 0;
+#else
+#ifndef USE_MEM_OPS
+ /* This is not quite correct, but both EISA and PCI cards is unlikely. */
+ if (eisa_scan(0) >= 0)
+ return 0;
+#if defined(NO_PCI)
+ return 0;
+#endif
+#endif
+
+ return pci_drv_register(&vortex_drv_id, NULL);
+#endif
+}
+
+#else
+int tc59x_probe(struct net_device *dev)
+{
+ int retval = -ENODEV;
+
+ /* Allow an EISA-only driver. */
+#if ! defined(NO_PCI)
+ if (pci_drv_register(&vortex_drv_id, dev) >= 0) {
+ retval = 0;
+ dev = 0;
+ }
+#endif
+#ifndef USE_MEM_OPS
+ if (eisa_scan(dev) >= 0)
+ retval = 0;
+#endif
+ if (retval >= 0)
+ printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB);
+ return retval;
+}
+#endif /* not MODULE */
+
+#if ! defined(CARDBUS) && ! defined(USE_MEM_OPS)
+static int eisa_scan(struct net_device *dev)
+{
+ int cards_found = 0;
+
+ /* Check the slots of the EISA bus. */
+ if (EISA_bus) {
+ static long ioaddr = 0x1000;
+ for ( ; ioaddr < 0x9000; ioaddr += 0x1000) {
+ int device_id;
+ if (check_region(ioaddr, VORTEX_SIZE))
+ continue;
+ /* Check the standard EISA ID register for an encoded '3Com'. */
+ if (inw(ioaddr + 0xC80) != 0x6d50)
+ continue;
+ /* Check for a product that we support, 3c59{2,7} any rev. */
+ device_id = (inb(ioaddr + 0xC82)<<8) + inb(ioaddr + 0xC83);
+ if ((device_id & 0xFF00) != 0x5900)
+ continue;
+ vortex_probe1(0, dev, ioaddr, inw(ioaddr + 0xC88) >> 12,
+ (device_id & 0xfff0) == 0x5970 ? 5 : 4, cards_found);
+ dev = 0;
+ cards_found++;
+ }
+ }
+
+ return cards_found ? 0 : -ENODEV;
+}
+#endif /* ! Cardbus */
+
+static int do_eeprom_op(long ioaddr, int ee_cmd)
+{
+ int timer;
+
+ outw(ee_cmd, ioaddr + Wn0EepromCmd);
+ /* Wait for the read to take place, worst-case 162 us. */
+ for (timer = 1620; timer >= 0; timer--) {
+ if ((inw(ioaddr + Wn0EepromCmd) & 0x8000) == 0)
+ break;
+ }
+ return inw(ioaddr + Wn0EepromData);
+}
+
+static void *vortex_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int find_cnt)
+{
+ struct net_device *dev;
+ struct vortex_private *vp;
+ void *priv_mem;
+ int option;
+ unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */
+ int ee_read_cmd;
+ int drv_flags = pci_tbl[chip_idx].drv_flags;
+ int i;
+
+ dev = init_etherdev(init_dev, 0);
+ if (!dev)
+ return NULL;
+
+#if ! defined(NO_PCI)
+ /* Check the PCI latency value. On the 3c590 series the latency timer
+ must be set to the maximum value to avoid data corruption that occurs
+ when the timer expires during a transfer. This bug exists the Vortex
+ chip only. */
+ if (pdev) {
+ u8 pci_latency;
+ u8 new_latency = (drv_flags & IS_VORTEX) ? 248 : 32;
+
+ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
+ if (pci_latency < new_latency) {
+ printk(KERN_INFO "%s: Overriding PCI latency"
+ " timer (CFLT) setting of %d, new value is %d.\n",
+ dev->name, pci_latency, new_latency);
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, new_latency);
+ }
+ }
+#endif
+
+ printk(KERN_INFO "%s: 3Com %s at 0x%lx, ",
+ dev->name, pci_tbl[chip_idx].name, ioaddr);
+
+ /* Make certain elements e.g. descriptor lists are aligned. */
+ priv_mem = kmalloc(sizeof(*vp) + PRIV_ALIGN, GFP_KERNEL);
+ /* Check for the very unlikely case of no memory. */
+ if (priv_mem == NULL) {
+ printk(" INTERFACE MEMORY ALLOCATION FAILURE.\n");
+ return NULL;
+ }
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ dev->mtu = mtu;
+
+ dev->priv = vp = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+ memset(vp, 0, sizeof(*vp));
+ vp->priv_addr = priv_mem;
+
+ vp->next_module = root_vortex_dev;
+ root_vortex_dev = dev;
+
+ vp->chip_id = chip_idx;
+ vp->pci_dev = pdev;
+ vp->drv_flags = drv_flags;
+ vp->msg_level = (1 << debug) - 1;
+ vp->rx_copybreak = rx_copybreak;
+ vp->max_interrupt_work = max_interrupt_work;
+ vp->multicast_filter_limit = multicast_filter_limit;
+
+ /* The lower four bits are the media type. */
+ if (dev->mem_start)
+ option = dev->mem_start;
+ else if (find_cnt < MAX_UNITS)
+ option = options[find_cnt];
+ else
+ option = -1;
+
+ if (option >= 0) {
+ vp->media_override = ((option & 7) == 2) ? 0 : option & 15;
+ vp->full_duplex = (option & 0x200) ? 1 : 0;
+ vp->bus_master = (option & 16) ? 1 : 0;
+ } else {
+ vp->media_override = 7;
+ vp->full_duplex = 0;
+ vp->bus_master = 0;
+ }
+ if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
+ vp->full_duplex = 1;
+
+ vp->options = option;
+
+ /* Read the station address from the EEPROM. */
+ EL3WINDOW(0);
+ /* Figure out the size and offset of the EEPROM table.
+ This is complicated by potential discontiguous address bits. */
+
+ /* Locate the opcode bits, 0xC0 or 0x300. */
+ outw(0x5555, ioaddr + Wn0EepromData);
+ ee_read_cmd = do_eeprom_op(ioaddr, 0x80) == 0x5555 ? 0x200 : 0x80;
+ /* Locate the table base for CardBus cards. */
+ if (do_eeprom_op(ioaddr, ee_read_cmd + 0x37) == 0x6d50)
+ ee_read_cmd += 0x30;
+
+ for (i = 0; i < 0x40; i++) {
+ int cmd_and_addr = ee_read_cmd + i;
+ if (ee_read_cmd == 0xB0) { /* Correct for discontinuity. */
+ int offset = 0x30 + i;
+ cmd_and_addr = 0x80 + (offset & 0x3f) + ((offset<<2) & 0x0f00);
+ }
+ eeprom[i] = do_eeprom_op(ioaddr, cmd_and_addr);
+ }
+ for (i = 0; i < 0x18; i++)
+ checksum ^= eeprom[i];
+ checksum = (checksum ^ (checksum >> 8)) & 0xff;
+ if (checksum != 0x00) { /* Grrr, needless incompatible change 3Com. */
+ while (i < 0x21)
+ checksum ^= eeprom[i++];
+ checksum = (checksum ^ (checksum >> 8)) & 0xff;
+ }
+ if (checksum != 0x00 && !(drv_flags & IS_TORNADO))
+ printk(" ***INVALID CHECKSUM %4.4x*** ", checksum);
+
+ for (i = 0; i < 3; i++)
+ ((u16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]);
+ for (i = 0; i < 6; i++)
+ printk("%c%2.2x", i ? ':' : ' ', dev->dev_addr[i]);
+ EL3WINDOW(2);
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + i);
+
+ printk(", IRQ %d\n", dev->irq);
+ /* Tell them about an invalid IRQ. */
+ if (dev->irq <= 0)
+ printk(KERN_WARNING " *** Warning: IRQ %d is unlikely to work! ***\n",
+ dev->irq);
+
+#if ! defined(NO_PCI)
+ if (drv_flags & HAS_CB_FNS) {
+ u32 fn_st_addr; /* Cardbus function status space */
+ pci_read_config_dword(pdev, PCI_BASE_ADDRESS_2, &fn_st_addr);
+ if (fn_st_addr)
+ vp->cb_fn_base = ioremap(fn_st_addr & ~3, 128);
+ printk(KERN_INFO "%s: CardBus functions mapped %8.8x->%p.\n",
+ dev->name, fn_st_addr, vp->cb_fn_base);
+ }
+#endif
+
+ /* Extract our information from the EEPROM data. */
+ vp->info1 = eeprom[13];
+ vp->info2 = eeprom[15];
+ vp->capabilities = eeprom[16];
+
+ if (vp->info1 & 0x8000)
+ vp->full_duplex = 1;
+ if (vp->full_duplex)
+ vp->medialock = 1;
+
+ /* Turn on the transceiver. */
+ activate_xcvr(dev);
+
+ {
+ char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
+ int i_cfg;
+ EL3WINDOW(3);
+ vp->available_media = inw(ioaddr + Wn3_Options);
+ if ((vp->available_media & 0xff) == 0) /* Broken 3c916 */
+ vp->available_media = 0x40;
+ i_cfg = inl(ioaddr + Wn3_Config); /* Internal Configuration */
+ vp->default_media = (i_cfg >> 20) & 15;
+ if (vp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG " Internal config register is %8.8x, "
+ "transceivers %#x.\n", i_cfg, inw(ioaddr + Wn3_Options));
+ printk(KERN_INFO " %dK buffer %s Rx:Tx split, %s%s interface.\n",
+ 8 << (i_cfg & 7),
+ ram_split[(i_cfg >> 16) & 3],
+ i_cfg & 0x01000000 ? "autoselect/" : "",
+ vp->default_media > XCVR_ExtMII ? "<invalid transceiver>" :
+ media_tbl[vp->default_media].name);
+ vp->autoselect = i_cfg & 0x01000000 ? 1 : 0;
+ }
+
+ if (vp->media_override != 7) {
+ printk(KERN_INFO " Media override to transceiver type %d (%s).\n",
+ vp->media_override, media_tbl[vp->media_override].name);
+ dev->if_port = vp->media_override;
+ } else
+ dev->if_port = vp->default_media;
+
+ if ((vp->available_media & 0x41) || (drv_flags & HAS_NWAY) ||
+ dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
+ int phy, phy_idx = 0;
+ EL3WINDOW(4);
+ mii_preamble_required++;
+ mdio_sync(ioaddr, 32);
+ mdio_read(ioaddr, 24, 1);
+ for (phy = 1; phy <= 32 && phy_idx < sizeof(vp->phys); phy++) {
+ int mii_status, phyx = phy & 0x1f;
+ mii_status = mdio_read(ioaddr, phyx, 1);
+ if ((mii_status & 0xf800) && mii_status != 0xffff) {
+ vp->phys[phy_idx++] = phyx;
+ printk(KERN_INFO " MII transceiver found at address %d,"
+ " status %4x.\n", phyx, mii_status);
+ if ((mii_status & 0x0040) == 0)
+ mii_preamble_required++;
+ }
+ }
+ mii_preamble_required--;
+ if (phy_idx == 0) {
+ printk(KERN_WARNING" ***WARNING*** No MII transceivers found!\n");
+ vp->phys[0] = 24;
+ } else {
+ if (mii_preamble_required == 0 &&
+ mdio_read(ioaddr, vp->phys[0], 1) == 0) {
+ printk(KERN_INFO "%s: MII transceiver has preamble bug.\n",
+ dev->name);
+ mii_preamble_required = 1;
+ }
+ vp->advertising = mdio_read(ioaddr, vp->phys[0], 4);
+ if (vp->full_duplex) {
+ /* Only advertise the FD media types. */
+ vp->advertising &= ~0x02A0;
+ mdio_write(ioaddr, vp->phys[0], 4, vp->advertising);
+ }
+ }
+ } else {
+ /* We will emulate MII management. */
+ vp->phys[0] = 32;
+ }
+
+ if (vp->capabilities & CapBusMaster) {
+ vp->full_bus_master_tx = 1;
+ printk(KERN_INFO" Using bus-master transmits and %s receives.\n",
+ (vp->info2 & 1) ? "early" : "whole-frame" );
+ vp->full_bus_master_rx = (vp->info2 & 1) ? 1 : 2;
+ }
+
+ /* We do a request_region() to register /proc/ioports info. */
+ request_region(ioaddr, pci_tbl[chip_idx].io_size, dev->name);
+
+ /* The 3c59x-specific entries in the device structure. */
+ dev->open = &vortex_open;
+ dev->hard_start_xmit = &vortex_start_xmit;
+ dev->stop = &vortex_close;
+ dev->get_stats = &vortex_get_stats;
+ dev->do_ioctl = &vortex_ioctl;
+ dev->set_multicast_list = &set_rx_mode;
+
+ return dev;
+}
+
+
+static int vortex_open(struct net_device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+
+ MOD_INC_USE_COUNT;
+
+ acpi_wake(vp->pci_dev);
+ vp->window_lock = SPIN_LOCK_UNLOCKED;
+ activate_xcvr(dev);
+
+ /* Before initializing select the active media port. */
+ if (vp->media_override != 7) {
+ if (vp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: Media override to transceiver %d (%s).\n",
+ dev->name, vp->media_override,
+ media_tbl[vp->media_override].name);
+ dev->if_port = vp->media_override;
+ } else if (vp->autoselect) {
+ if (vp->drv_flags & HAS_NWAY)
+ dev->if_port = XCVR_NWAY;
+ else {
+ /* Find first available media type, starting with 100baseTx. */
+ dev->if_port = XCVR_100baseTx;
+ while (! (vp->available_media & media_tbl[dev->if_port].mask))
+ dev->if_port = media_tbl[dev->if_port].next;
+ }
+ } else
+ dev->if_port = vp->default_media;
+
+ if (! vp->medialock)
+ vp->full_duplex = 0;
+
+ vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete|
+ (vp->full_bus_master_tx ? DownComplete : TxAvailable) |
+ (vp->full_bus_master_rx ? UpComplete : RxComplete) |
+ (vp->bus_master ? DMADone : 0);
+ vp->intr_enable = SetIntrEnb | IntLatch | TxAvailable | RxComplete |
+ StatsFull | HostError | TxComplete | IntReq
+ | (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete;
+
+ if (vp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Initial media type %s %s-duplex.\n",
+ dev->name, media_tbl[dev->if_port].name,
+ vp->full_duplex ? "full":"half");
+
+ set_media_type(dev);
+ start_operation(dev);
+
+ /* Use the now-standard shared IRQ implementation. */
+ if (request_irq(dev->irq, &vortex_interrupt, SA_SHIRQ, dev->name, dev)) {
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+
+ spin_lock(&vp->window_lock);
+
+ if (vp->msg_level & NETIF_MSG_IFUP) {
+ EL3WINDOW(4);
+ printk(KERN_DEBUG "%s: vortex_open() irq %d media status %4.4x.\n",
+ dev->name, dev->irq, inw(ioaddr + Wn4_Media));
+ }
+
+ /* Switch to the stats window, and clear all stats by reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ EL3WINDOW(6);
+ for (i = 0; i < 10; i++)
+ inb(ioaddr + i);
+ inw(ioaddr + 10);
+ inw(ioaddr + 12);
+ /* New: On the Vortex we must also clear the BadSSD counter. */
+ EL3WINDOW(4);
+ inb(ioaddr + 12);
+ /* ..and on the Boomerang we enable the extra statistics bits. */
+ outw(0x0040, ioaddr + Wn4_NetDiag);
+
+ /* Switch to register set 7 for normal use. */
+ EL3WINDOW(7);
+#if defined(CONFIG_VLAN)
+ /* If this value is set no MTU adjustment is needed for 802.1Q. */
+ outw(0x8100, ioaddr + Wn7_VLAN_EtherType);
+#endif
+ spin_unlock(&vp->window_lock);
+
+ if (vp->full_bus_master_rx) { /* Boomerang bus master. */
+ vp->cur_rx = vp->dirty_rx = 0;
+ /* Use 1518/+18 if the CRC is transferred. */
+ vp->rx_buf_sz = dev->mtu + 14;
+ if (vp->rx_buf_sz < PKT_BUF_SZ)
+ vp->rx_buf_sz = PKT_BUF_SZ;
+
+ /* Initialize the RxEarly register as recommended. */
+ outw(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
+ outl(0x0020, ioaddr + PktStatus);
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ vp->rx_ring[i].length = cpu_to_le32(vp->rx_buf_sz | LAST_FRAG);
+ vp->rx_ring[i].status = 0;
+ vp->rx_ring[i].next = virt_to_le32desc(&vp->rx_ring[i+1]);
+ vp->rx_skbuff[i] = 0;
+ }
+ /* Wrap the ring. */
+ vp->rx_head_desc = &vp->rx_ring[0];
+ vp->rx_ring[i-1].next = virt_to_le32desc(&vp->rx_ring[0]);
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(vp->rx_buf_sz);
+ vp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break; /* Bad news! */
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ vp->rx_ring[i].addr = virt_to_le32desc(skb->tail);
+ }
+ outl(virt_to_bus(vp->rx_head_desc), ioaddr + UpListPtr);
+ }
+ if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */
+ dev->hard_start_xmit = &boomerang_start_xmit;
+ vp->cur_tx = vp->dirty_tx = 0;
+ vp->tx_desc_tail = &vp->tx_ring[TX_RING_SIZE - 1];
+ if (vp->drv_flags & IS_BOOMERANG) {
+ /* Room for a packet, to avoid long DownStall delays. */
+ outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
+ } else if (vp->drv_flags & HAS_V2_TX)
+ outb(20, ioaddr + DownPollRate);
+
+ /* Clear the Tx ring. */
+ for (i = 0; i < TX_RING_SIZE; i++)
+ vp->tx_skbuff[i] = 0;
+ outl(0, ioaddr + DownListPtr);
+ vp->tx_full = 0;
+ vp->restart_tx = 1;
+ }
+ /* The multicast filter is an ill-considered, write-only design.
+ The semantics are not documented, so we assume but do not rely
+ on the table being cleared with an RxReset.
+ Here we do an explicit clear of the largest known table.
+ */
+ if (vp->drv_flags & HAS_V2_TX)
+ for (i = 0; i < 0x100; i++)
+ outw(SetFilterBit | i, ioaddr + EL3_CMD);
+ memset(vp->mc_filter, 0, sizeof vp->mc_filter);
+
+ /* Set receiver mode: presumably accept b-case and phys addr only. */
+ vp->rx_mode = 0;
+ set_rx_mode(dev);
+
+ start_operation1(dev);
+
+ init_timer(&vp->timer);
+ vp->timer.expires = jiffies + media_tbl[dev->if_port].wait;
+ vp->timer.data = (unsigned long)dev;
+ vp->timer.function = &vortex_timer; /* timer handler */
+ add_timer(&vp->timer);
+
+ return 0;
+}
+
+static void set_media_type(struct net_device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i_cfg;
+
+ EL3WINDOW(3);
+ i_cfg = inl(ioaddr + Wn3_Config);
+ i_cfg &= ~0x00f00000;
+ if (vp->drv_flags & HAS_NWAY)
+ outl(i_cfg | 0x00800000, ioaddr + Wn3_Config);
+ else
+ outl(i_cfg | (dev->if_port << 20), ioaddr + Wn3_Config);
+
+ if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
+ int mii_reg1, mii_reg5;
+ EL3WINDOW(4);
+ /* Read BMSR (reg1) only to clear old status. */
+ mii_reg1 = mdio_read(ioaddr, vp->phys[0], 1);
+ mii_reg5 = mdio_read(ioaddr, vp->phys[0], 5);
+ if (mii_reg5 == 0xffff || mii_reg5 == 0x0000)
+ ; /* No MII device or no link partner report */
+ else if ((mii_reg5 & 0x0100) != 0 /* 100baseTx-FD */
+ || (mii_reg5 & 0x00C0) == 0x0040) /* 10T-FD, but not 100-HD */
+ vp->full_duplex = 1;
+ if (vp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: MII #%d status %4.4x, link partner capability %4.4x,"
+ " setting %s-duplex.\n", dev->name, vp->phys[0],
+ mii_reg1, mii_reg5, vp->full_duplex ? "full" : "half");
+ EL3WINDOW(3);
+ }
+ if (dev->if_port == XCVR_10base2)
+ /* Start the thinnet transceiver. We should really wait 50ms...*/
+ outw(StartCoax, ioaddr + EL3_CMD);
+ EL3WINDOW(4);
+ if (dev->if_port != XCVR_NWAY) {
+ outw((inw(ioaddr + Wn4_Media) & ~(Media_10TP|Media_SQE)) |
+ media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media);
+ }
+ /* Do we require link beat to transmit? */
+ if (vp->info1 & 0x4000)
+ outw(inw(ioaddr + Wn4_Media) & ~Media_Lnk, ioaddr + Wn4_Media);
+
+ /* Set the full-duplex and oversized frame bits. */
+ EL3WINDOW(3);
+
+ vp->wn3_mac_ctrl = vp->full_duplex ? 0x0120 : 0;
+ if (dev->mtu > 1500)
+ vp->wn3_mac_ctrl |= (dev->mtu == 1504 ? 0x0400 : 0x0040);
+ outb(vp->wn3_mac_ctrl, ioaddr + Wn3_MAC_Ctrl);
+
+ if (vp->drv_flags & HAS_V2_TX)
+ outw(dev->mtu + 14, ioaddr + Wn3_MaxPktSize);
+}
+
+static void activate_xcvr(struct net_device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int reset_opts;
+
+ /* Correct some magic bits. */
+ EL3WINDOW(2);
+ reset_opts = inw(ioaddr + Wn2_ResetOptions);
+ if (vp->drv_flags & INVERT_LED_PWR)
+ reset_opts |= 0x0010;
+ if (vp->drv_flags & MII_XCVR_PWR)
+ reset_opts |= 0x4000;
+ outw(reset_opts, ioaddr + Wn2_ResetOptions);
+ if (vp->drv_flags & WN0_XCVR_PWR) {
+ EL3WINDOW(0);
+ outw(0x0900, ioaddr);
+ }
+}
+
+static void start_operation(struct net_device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+
+ outw(TxReset, ioaddr + EL3_CMD);
+ for (i = 2000; i >= 0 ; i--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+
+ outw(RxReset | 0x04, ioaddr + EL3_CMD);
+ /* Assume this cleared the filter. */
+ memset(vp->mc_filter, 0, sizeof vp->mc_filter);
+
+ /* Wait a few ticks for the RxReset command to complete. */
+ for (i = 0; i < 200000; i++)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ if (i >= 200 && (vp->msg_level & NETIF_MSG_DRV))
+ printk(KERN_DEBUG "%s: Rx Reset took an unexpectedly long time"
+ " to finish, %d ticks.\n",
+ dev->name, i);
+
+ outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
+ /* Handle VLANs and jumbo frames. */
+ if ((vp->drv_flags & HAS_V2_TX) && dev->mtu > 1500) {
+ EL3WINDOW(3);
+ outw(dev->mtu + 14, ioaddr + Wn3_MaxPktSize);
+ if (dev->mtu > 2033) {
+ outl(inl(ioaddr + Wn3_Config) | 0x0000C000, ioaddr + Wn3_Config);
+ outw(SetTxStart + (2000>>2), ioaddr + EL3_CMD);
+ }
+ }
+ /* Reset the station address and mask. */
+ EL3WINDOW(2);
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + i);
+ for (; i < 12; i+=2)
+ outw(0, ioaddr + i);
+ if (vp->drv_flags & IS_BOOMERANG) {
+ /* Room for a packet, to avoid long DownStall delays. */
+ outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
+ } else if (vp->drv_flags & HAS_V2_TX) {
+ outb(20, ioaddr + DownPollRate);
+ vp->restart_tx = 1;
+ }
+}
+
+static void start_operation1(struct net_device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (vp->full_bus_master_rx) { /* post-Vortex bus master. */
+ /* Initialize the RxEarly register as recommended. */
+ outw(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
+ outl(0x0020, ioaddr + PktStatus);
+ outl(virt_to_bus(&vp->rx_ring[vp->cur_rx % RX_RING_SIZE]),
+ ioaddr + UpListPtr);
+ }
+
+ outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+ outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
+ outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
+ /* Allow status bits to be seen. */
+ outw(vp->status_enable, ioaddr + EL3_CMD);
+ /* Ack all pending events, and set active indicator mask. */
+ outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+ ioaddr + EL3_CMD);
+ outw(vp->intr_enable, ioaddr + EL3_CMD);
+ if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
+ writel(0x8000, vp->cb_fn_base + 4);
+ netif_start_tx_queue(dev);
+}
+
+static void vortex_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 60*HZ;
+ int ok = 0;
+ int media_status, old_window;
+
+ if (vp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: Media selection timer tick happened, "
+ "%s %s duplex.\n",
+ dev->name, media_tbl[dev->if_port].name,
+ vp->full_duplex ? "full" : "half");
+
+ /* This only works with bus-master (non-3c590) chips. */
+ if (vp->cur_tx - vp->dirty_tx > 1 &&
+ (jiffies - dev->trans_start) > TX_TIMEOUT) {
+ /* Check for blocked interrupts. */
+ if (inw(ioaddr + EL3_STATUS) & IntLatch) {
+ /* We have a blocked IRQ line. This should never happen, but
+ we recover as best we can.*/
+ if ( ! vp->polling) {
+ if (jiffies - vp->last_reset > 10*HZ) {
+ printk(KERN_ERR "%s: IRQ %d is physically blocked! "
+ "Failing back to low-rate polling.\n",
+ dev->name, dev->irq);
+ vp->last_reset = jiffies;
+ }
+ vp->polling = 1;
+ }
+ vortex_interrupt(dev->irq, dev, 0);
+ next_tick = jiffies + 2;
+ } else {
+ vortex_tx_timeout(dev);
+ vp->last_reset = jiffies;
+ }
+ }
+
+ disable_irq(dev->irq);
+ old_window = inw(ioaddr + EL3_CMD) >> 13;
+ EL3WINDOW(4);
+ media_status = inw(ioaddr + Wn4_Media);
+ switch (dev->if_port) {
+ case XCVR_10baseT: case XCVR_100baseTx: case XCVR_100baseFx:
+ if (media_status & Media_LnkBeat) {
+ ok = 1;
+ if (vp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Media %s has link beat, %x.\n",
+ dev->name, media_tbl[dev->if_port].name, media_status);
+ } else if (vp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Media %s is has no link beat, %x.\n",
+ dev->name, media_tbl[dev->if_port].name, media_status);
+ break;
+ case XCVR_MII: case XCVR_NWAY: {
+ int mii_status = mdio_read(ioaddr, vp->phys[0], 1);
+ int mii_reg5, negotiated, duplex;
+ ok = 1;
+ if (vp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: MII transceiver has status %4.4x.\n",
+ dev->name, mii_status);
+ if (vp->medialock)
+ break;
+ if ((mii_status & 0x0004) == 0) {
+ next_tick = 5*HZ;
+ break;
+ }
+ mii_reg5 = mdio_read(ioaddr, vp->phys[0], 5);
+ negotiated = mii_reg5 & vp->advertising;
+ duplex = (negotiated & 0x0100) || (negotiated & 0x03C0) == 0x0040;
+ if (mii_reg5 == 0xffff || vp->full_duplex == duplex)
+ break;
+ if (vp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: Setting %s-duplex based on "
+ "MII #%d link partner capability of %4.4x.\n",
+ dev->name, vp->full_duplex ? "full" : "half",
+ vp->phys[0], mii_reg5);
+ vp->full_duplex = duplex;
+ /* Set the full-duplex bit. */
+ EL3WINDOW(3);
+ if (duplex)
+ vp->wn3_mac_ctrl |= 0x120;
+ else
+ vp->wn3_mac_ctrl &= ~0x120;
+ outb(vp->wn3_mac_ctrl, ioaddr + Wn3_MAC_Ctrl);
+ break;
+ }
+ default: /* Other media types handled by Tx timeouts. */
+ if (vp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Media %s is has no indication, %x.\n",
+ dev->name, media_tbl[dev->if_port].name, media_status);
+ ok = 1;
+ }
+ if ( ! ok) {
+ int i_cfg;
+
+ do {
+ dev->if_port = media_tbl[dev->if_port].next;
+ } while ( ! (vp->available_media & media_tbl[dev->if_port].mask));
+ if (dev->if_port == XCVR_Default) { /* Go back to default. */
+ dev->if_port = vp->default_media;
+ if (vp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Media selection failing, using default "
+ "%s port.\n",
+ dev->name, media_tbl[dev->if_port].name);
+ } else {
+ if (vp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Media selection failed, now trying "
+ "%s port.\n",
+ dev->name, media_tbl[dev->if_port].name);
+ next_tick = media_tbl[dev->if_port].wait;
+ }
+ outw((media_status & ~(Media_10TP|Media_SQE)) |
+ media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media);
+
+ EL3WINDOW(3);
+ i_cfg = inl(ioaddr + Wn3_Config);
+ i_cfg &= ~0x00f00000;
+ i_cfg |= (dev->if_port << 20);
+ outl(i_cfg, ioaddr + Wn3_Config);
+
+ outw(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax,
+ ioaddr + EL3_CMD);
+ }
+ EL3WINDOW(old_window);
+ enable_irq(dev->irq);
+ if (vp->restore_intr_mask)
+ outw(FakeIntr, ioaddr + EL3_CMD);
+
+ if (vp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: Media selection timer finished, %s.\n",
+ dev->name, media_tbl[dev->if_port].name);
+
+ vp->timer.expires = jiffies + next_tick;
+ add_timer(&vp->timer);
+ return;
+}
+
+static void vortex_tx_timeout(struct net_device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int tx_status = inb(ioaddr + TxStatus);
+ int intr_status = inw(ioaddr + EL3_STATUS);
+ int j;
+
+ printk(KERN_ERR "%s: transmit timed out, tx_status %2.2x status %4.4x.\n",
+ dev->name, tx_status, intr_status);
+ /* Slight code bloat to be user friendly. */
+ if ((tx_status & 0x88) == 0x88)
+ printk(KERN_ERR "%s: Transmitter encountered 16 collisions --"
+ " network cable problem?\n", dev->name);
+ if (intr_status & IntLatch) {
+ printk(KERN_ERR "%s: Interrupt posted but not delivered --"
+ " IRQ blocked by another device?\n", dev->name);
+ /* Race condition possible, but we handle a few events. */
+ vortex_interrupt(dev->irq, dev, 0);
+ }
+
+#if ! defined(final_version) && LINUX_VERSION_CODE >= 0x10300
+ if (vp->full_bus_master_tx) {
+ int i;
+ printk(KERN_DEBUG " Flags: bus-master %d full %d dirty %d "
+ "current %d restart_tx %d.\n",
+ vp->full_bus_master_tx, vp->tx_full, vp->dirty_tx, vp->cur_tx,
+ vp->restart_tx);
+ printk(KERN_DEBUG " Transmit list %8.8x vs. %p, packet ID %2.2x.\n",
+ (int)inl(ioaddr + DownListPtr),
+ &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE],
+ inb(ioaddr + TxPktID));
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ printk(KERN_DEBUG " %d: @%p length %8.8x status %8.8x\n", i,
+ &vp->tx_ring[i],
+ le32_to_cpu(vp->tx_ring[i].length),
+ le32_to_cpu(vp->tx_ring[i].status));
+ }
+ }
+#endif
+ outw(TxReset, ioaddr + EL3_CMD);
+ for (j = 200; j >= 0 ; j--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+
+ vp->stats.tx_errors++;
+
+ if (vp->full_bus_master_tx) {
+ if (vp->drv_flags & HAS_V2_TX)
+ outb(20, ioaddr + DownPollRate);
+ if (vp->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG "%s: Resetting the Tx ring pointer.\n",
+ dev->name);
+ if (vp->cur_tx - vp->dirty_tx > 0 && inl(ioaddr + DownListPtr) == 0)
+ outl(virt_to_bus(&vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]),
+ ioaddr + DownListPtr);
+ else
+ vp->restart_tx = 1;
+ if (vp->drv_flags & IS_BOOMERANG) {
+ /* Room for a packet, to avoid long DownStall delays. */
+ outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
+ outw(DownUnstall, ioaddr + EL3_CMD);
+ } else {
+ if (dev->mtu > 2033)
+ outw(SetTxStart + (2000>>2), ioaddr + EL3_CMD);
+ }
+
+ if (vp->tx_full && (vp->cur_tx - vp->dirty_tx <= TX_QUEUE_LEN - 1)) {
+ vp->tx_full = 0;
+ netif_unpause_tx_queue(dev);
+ }
+ } else {
+ netif_unpause_tx_queue(dev);
+ vp->stats.tx_dropped++;
+ }
+
+ /* Issue Tx Enable */
+ outw(TxEnable, ioaddr + EL3_CMD);
+ dev->trans_start = jiffies;
+
+ /* Switch to register set 7 for normal use. */
+ EL3WINDOW(7);
+}
+
+/*
+ * Handle uncommon interrupt sources. This is a separate routine to minimize
+ * the cache impact.
+ */
+static void
+vortex_error(struct net_device *dev, int status)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int do_tx_reset = 0;
+ int i;
+
+ if (status & TxComplete) { /* Really "TxError" for us. */
+ unsigned char tx_status = inb(ioaddr + TxStatus);
+ /* Presumably a tx-timeout. We must merely re-enable. */
+ if (vp->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG"%s: Transmit error, Tx status register %2.2x.\n",
+ dev->name, tx_status);
+ if (tx_status & 0x14) vp->stats.tx_fifo_errors++;
+ if (tx_status & 0x38) vp->stats.tx_aborted_errors++;
+ outb(0, ioaddr + TxStatus);
+ if (tx_status & 0x30)
+ do_tx_reset = 1;
+ else { /* Merely re-enable the transmitter. */
+ outw(TxEnable, ioaddr + EL3_CMD);
+ vp->restart_tx = 1;
+ }
+ }
+ if (status & RxEarly) { /* Rx early is unused. */
+ vortex_rx(dev);
+ outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
+ }
+ if (status & StatsFull) { /* Empty statistics. */
+ static int DoneDidThat = 0;
+ if (vp->msg_level & NETIF_MSG_MISC)
+ printk(KERN_DEBUG "%s: Updating stats.\n", dev->name);
+ update_stats(ioaddr, dev);
+ /* HACK: Disable statistics as an interrupt source. */
+ /* This occurs when we have the wrong media type! */
+ if (DoneDidThat == 0 &&
+ inw(ioaddr + EL3_STATUS) & StatsFull) {
+ printk(KERN_WARNING "%s: Updating statistics failed, disabling "
+ "stats as an interrupt source.\n", dev->name);
+ EL3WINDOW(5);
+ outw(SetIntrEnb | (inw(ioaddr + 10) & ~StatsFull), ioaddr + EL3_CMD);
+ EL3WINDOW(7);
+ DoneDidThat++;
+ }
+ }
+ if (status & IntReq) { /* Restore all interrupt sources. */
+ outw(vp->status_enable, ioaddr + EL3_CMD);
+ outw(vp->intr_enable, ioaddr + EL3_CMD);
+ vp->restore_intr_mask = 0;
+ }
+ if (status & HostError) {
+ u16 fifo_diag;
+ EL3WINDOW(4);
+ fifo_diag = inw(ioaddr + Wn4_FIFODiag);
+ if (vp->msg_level & NETIF_MSG_DRV)
+ printk(KERN_ERR "%s: Host error, status %x, FIFO diagnostic "
+ "register %4.4x.\n",
+ dev->name, status, fifo_diag);
+ /* Adapter failure requires Tx/Rx reset and reinit. */
+ if (vp->full_bus_master_tx) {
+ int bus_status = inl(ioaddr + PktStatus);
+ /* 0x80000000 PCI master abort. */
+ /* 0x40000000 PCI target abort. */
+ outw(TotalReset | 0xff, ioaddr + EL3_CMD);
+ for (i = 2000; i >= 0 ; i--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ if (vp->msg_level & NETIF_MSG_DRV)
+ printk(KERN_ERR "%s: PCI bus error, bus status %8.8x, reset "
+ "had %d tick left.\n",
+ dev->name, bus_status, i);
+ /* Re-enable the receiver. */
+ outw(RxEnable, ioaddr + EL3_CMD);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ vp->restart_tx = 1;
+ } else if (fifo_diag & 0x0400)
+ do_tx_reset = 1;
+ if (fifo_diag & 0x3000) {
+ outw(RxReset | 7, ioaddr + EL3_CMD);
+ for (i = 200000; i >= 0 ; i--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ if ((vp->drv_flags & HAS_V2_TX) && dev->mtu > 1500) {
+ EL3WINDOW(3);
+ outw(dev->mtu + 14, ioaddr + Wn3_MaxPktSize);
+ }
+ /* Set the Rx filter to the current state. */
+ memset(vp->mc_filter, 0, sizeof vp->mc_filter);
+ vp->rx_mode = 0;
+ set_rx_mode(dev);
+ outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
+ outw(AckIntr | HostError, ioaddr + EL3_CMD);
+ }
+ }
+ if (do_tx_reset) {
+ int j;
+ outw(TxReset, ioaddr + EL3_CMD);
+ for (j = 200; j >= 0 ; j--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ outw(TxEnable, ioaddr + EL3_CMD);
+ vp->restart_tx = 1;
+ }
+
+}
+
+
+static int
+vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ /* Block a timer-based transmit from overlapping. This happens when
+ packets are presumed lost, and we use this check the Tx status. */
+ if (netif_pause_tx_queue(dev) != 0) {
+ /* This watchdog code is redundant with the media monitor timer. */
+ if (jiffies - dev->trans_start > TX_TIMEOUT)
+ vortex_tx_timeout(dev);
+ return 1;
+ }
+
+ /* Put out the doubleword header... */
+ outl(skb->len, ioaddr + TX_FIFO);
+ if (vp->bus_master) {
+ /* Set the bus-master controller to transfer the packet. */
+ outl(virt_to_bus(skb->data), ioaddr + Wn7_MasterAddr);
+ outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
+ vp->tx_skb = skb;
+ outw(StartDMADown, ioaddr + EL3_CMD);
+ netif_stop_tx_queue(dev);
+ /* Tx busy will be cleared at the DMADone interrupt. */
+ } else {
+ /* ... and the packet rounded to a doubleword. */
+ outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+ dev_free_skb(skb);
+ if (inw(ioaddr + TxFree) <= 1536) {
+ /* Interrupt us when the FIFO has room for max-sized packet. */
+ outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
+ netif_stop_tx_queue(dev);
+ } else
+ netif_unpause_tx_queue(dev); /* Typical path */
+ }
+
+ dev->trans_start = jiffies;
+
+ /* Clear the Tx status stack. */
+ {
+ int tx_status;
+ int i = 32;
+
+ while (--i > 0 && (tx_status = inb(ioaddr + TxStatus)) > 0) {
+ if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */
+ if (vp->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG "%s: Tx error, status %2.2x.\n",
+ dev->name, tx_status);
+ if (tx_status & 0x04) vp->stats.tx_fifo_errors++;
+ if (tx_status & 0x38) vp->stats.tx_aborted_errors++;
+ if (tx_status & 0x30) {
+ int j;
+ outw(TxReset, ioaddr + EL3_CMD);
+ for (j = 200; j >= 0 ; j--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ }
+ outw(TxEnable, ioaddr + EL3_CMD);
+ vp->restart_tx = 1;
+ }
+ outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */
+ }
+ }
+ return 0;
+}
+
+static int
+boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int entry;
+ struct boom_tx_desc *prev_entry;
+ unsigned long flags;
+ int i;
+
+ if (netif_pause_tx_queue(dev) != 0) {
+ /* This watchdog code is redundant with the media monitor timer. */
+ if (jiffies - dev->trans_start > TX_TIMEOUT)
+ vortex_tx_timeout(dev);
+ return 1;
+ }
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = vp->cur_tx % TX_RING_SIZE;
+ prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
+
+ if (vp->msg_level & NETIF_MSG_TX_QUEUED)
+ printk(KERN_DEBUG "%s: Queuing Tx packet, index %d.\n",
+ dev->name, vp->cur_tx);
+ /* Impossible error. */
+ if (vp->tx_full) {
+ printk(KERN_WARNING "%s: Tx Ring full, refusing to send buffer.\n",
+ dev->name);
+ return 1;
+ }
+ vp->tx_skbuff[entry] = skb;
+ vp->tx_ring[entry].next = 0;
+ vp->tx_ring[entry].addr = virt_to_le32desc(skb->data);
+ vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
+ if (vp->capabilities & CapNoTxLength)
+ vp->tx_ring[entry].status =
+ cpu_to_le32(TxNoRoundup | TxIntrUploaded | (entry << 2));
+ else
+ vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
+
+ if (vp->drv_flags & IS_BOOMERANG) {
+ save_flags(flags);
+ cli();
+ outw(DownStall, ioaddr + EL3_CMD);
+ /* Wait for the stall to complete. */
+ for (i = 600; i >= 0 ; i--)
+ if ( (inw(ioaddr + EL3_STATUS) & CmdInProgress) == 0)
+ break;
+ vp->tx_desc_tail->next = virt_to_le32desc(&vp->tx_ring[entry]);
+ vp->tx_desc_tail = &vp->tx_ring[entry];
+ if (inl(ioaddr + DownListPtr) == 0) {
+ outl(virt_to_bus(&vp->tx_ring[entry]), ioaddr + DownListPtr);
+ queued_packet++;
+ }
+ outw(DownUnstall, ioaddr + EL3_CMD);
+ restore_flags(flags);
+ } else {
+ vp->tx_desc_tail->next = virt_to_le32desc(&vp->tx_ring[entry]);
+ vp->tx_desc_tail = &vp->tx_ring[entry];
+ if (vp->restart_tx) {
+ outl(virt_to_bus(vp->tx_desc_tail), ioaddr + DownListPtr);
+ vp->restart_tx = 0;
+ queued_packet++;
+ }
+ }
+ vp->cur_tx++;
+ if (vp->cur_tx - vp->dirty_tx >= TX_QUEUE_LEN) {
+ vp->tx_full = 1;
+ /* Check for a just-cleared queue. */
+ if (vp->cur_tx - (volatile unsigned int)vp->dirty_tx
+ < TX_QUEUE_LEN - 2) {
+ vp->tx_full = 0;
+ netif_unpause_tx_queue(dev);
+ } else
+ netif_stop_tx_queue(dev);
+ } else { /* Clear previous interrupt enable. */
+#if defined(tx_interrupt_mitigation)
+ prev_entry->status &= cpu_to_le32(~TxIntrUploaded);
+#endif
+ netif_unpause_tx_queue(dev); /* Typical path */
+ }
+ dev->trans_start = jiffies;
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void vortex_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr;
+ int latency, status;
+ int work_done = vp->max_interrupt_work;
+
+ ioaddr = dev->base_addr;
+ latency = inb(ioaddr + Timer);
+ status = inw(ioaddr + EL3_STATUS);
+
+ if (status == 0xffff)
+ goto handler_exit;
+ if (vp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: interrupt, status %4.4x, latency %d ticks.\n",
+ dev->name, status, latency);
+ do {
+ if (vp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: In interrupt loop, status %4.4x.\n",
+ dev->name, status);
+ if (status & RxComplete)
+ vortex_rx(dev);
+ if (status & UpComplete) {
+ outw(AckIntr | UpComplete, ioaddr + EL3_CMD);
+ boomerang_rx(dev);
+ }
+
+ if (status & TxAvailable) {
+ if (vp->msg_level & NETIF_MSG_TX_DONE)
+ printk(KERN_DEBUG " TX room bit was handled.\n");
+ /* There's room in the FIFO for a full-sized packet. */
+ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+ netif_resume_tx_queue(dev);
+ }
+
+ if (status & DownComplete) {
+ unsigned int dirty_tx = vp->dirty_tx;
+
+ outw(AckIntr | DownComplete, ioaddr + EL3_CMD);
+ while (vp->cur_tx - dirty_tx > 0) {
+ int entry = dirty_tx % TX_RING_SIZE;
+ int tx_status = le32_to_cpu(vp->tx_ring[entry].status);
+ if (vp->capabilities & CapNoTxLength) {
+ if ( ! (tx_status & TxDownComplete))
+ break;
+ } else if (inl(ioaddr + DownListPtr) ==
+ virt_to_bus(&vp->tx_ring[entry]))
+ break; /* It still hasn't been processed. */
+ if (vp->msg_level & NETIF_MSG_TX_DONE)
+ printk(KERN_DEBUG "%s: Transmit done, Tx status %8.8x.\n",
+ dev->name, tx_status);
+ if (vp->tx_skbuff[entry]) {
+ dev_free_skb_irq(vp->tx_skbuff[entry]);
+ vp->tx_skbuff[entry] = 0;
+ }
+ /* vp->stats.tx_packets++; Counted below. */
+ dirty_tx++;
+ }
+ vp->dirty_tx = dirty_tx;
+ /* 4 entry hysteresis before marking the queue non-full. */
+ if (vp->tx_full && (vp->cur_tx - dirty_tx < TX_QUEUE_LEN - 4)) {
+ vp->tx_full = 0;
+ netif_resume_tx_queue(dev);
+ }
+ }
+ if (status & DMADone) {
+ if (inw(ioaddr + Wn7_MasterStatus) & 0x1000) {
+ outw(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
+ /* Release the transfered buffer */
+ dev_free_skb_irq(vp->tx_skb);
+ if (inw(ioaddr + TxFree) > 1536) {
+ netif_resume_tx_queue(dev);
+ } else /* Interrupt when FIFO has room for max-sized packet. */
+ outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
+ }
+ }
+ /* Check for all uncommon interrupts at once. */
+ if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) {
+ if (status == 0xffff)
+ break;
+ vortex_error(dev, status);
+ }
+
+ if (--work_done < 0) {
+ if ((status & (0x7fe - (UpComplete | DownComplete))) == 0) {
+ /* Just ack these and return. */
+ outw(AckIntr | UpComplete | DownComplete, ioaddr + EL3_CMD);
+ } else {
+ printk(KERN_WARNING "%s: Too much work in interrupt, status "
+ "%4.4x. Temporarily disabling functions (%4.4x).\n",
+ dev->name, status, SetStatusEnb | ((~status) & 0x7FE));
+ /* Disable all pending interrupts. */
+ outw(SetStatusEnb | ((~status) & 0x7FE), ioaddr + EL3_CMD);
+ outw(AckIntr | 0x7FF, ioaddr + EL3_CMD);
+ /* The timer will reenable interrupts. */
+ vp->restore_intr_mask = 1;
+ break;
+ }
+ }
+ /* Acknowledge the IRQ. */
+ outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
+ if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
+ writel(0x8000, vp->cb_fn_base + 4);
+
+ } while ((status = inw(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
+
+ if (vp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n",
+ dev->name, status);
+handler_exit:
+ return;
+}
+
+static int vortex_rx(struct net_device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+ short rx_status;
+
+ if (vp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG" In rx_packet(), status %4.4x, rx_status %4.4x.\n",
+ inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus));
+ while ((rx_status = inw(ioaddr + RxStatus)) > 0) {
+ if (rx_status & 0x4000) { /* Error, update stats. */
+ unsigned char rx_error = inb(ioaddr + RxErrors);
+ if (vp->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
+ vp->stats.rx_errors++;
+ if (rx_error & 0x01) vp->stats.rx_over_errors++;
+ if (rx_error & 0x02) vp->stats.rx_length_errors++;
+ if (rx_error & 0x04) vp->stats.rx_frame_errors++;
+ if (rx_error & 0x08) vp->stats.rx_crc_errors++;
+ if (rx_error & 0x10) vp->stats.rx_length_errors++;
+ } else {
+ /* The packet length: up to 4.5K!. */
+ int pkt_len = rx_status & 0x1fff;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len + 5);
+ if (vp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
+ if (skb != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ /* 'skb_put()' points to the start of sk_buff data area. */
+ if (vp->bus_master &&
+ ! (inw(ioaddr + Wn7_MasterStatus) & 0x8000)) {
+ outl(virt_to_bus(skb_put(skb, pkt_len)),
+ ioaddr + Wn7_MasterAddr);
+ outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
+ outw(StartDMAUp, ioaddr + EL3_CMD);
+ while (inw(ioaddr + Wn7_MasterStatus) & 0x8000)
+ ;
+ } else {
+ insl(ioaddr + RX_FIFO, skb_put(skb, pkt_len),
+ (pkt_len + 3) >> 2);
+ }
+ outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ vp->stats.rx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+ vp->stats.rx_bytes += pkt_len;
+#endif
+ /* Wait a limited time to go to next packet. */
+ for (i = 200; i >= 0; i--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ continue;
+ } else if (vp->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_NOTICE "%s: No memory to allocate a sk_buff of "
+ "size %d.\n", dev->name, pkt_len);
+ }
+ outw(RxDiscard, ioaddr + EL3_CMD);
+ vp->stats.rx_dropped++;
+ /* Wait a limited time to skip this packet. */
+ for (i = 200; i >= 0; i--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ }
+
+ return 0;
+}
+
+static int
+boomerang_rx(struct net_device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ int entry = vp->cur_rx % RX_RING_SIZE;
+ long ioaddr = dev->base_addr;
+ int rx_status;
+ int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx;
+
+ if (vp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " In boomerang_rx(), status %4.4x, rx_status "
+ "%8.8x.\n",
+ inw(ioaddr+EL3_STATUS), (int)inl(ioaddr+UpPktStatus));
+ while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){
+ if (--rx_work_limit < 0)
+ break;
+ if (rx_status & RxDError) { /* Error, update stats. */
+ unsigned char rx_error = rx_status >> 16;
+ if (vp->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
+ vp->stats.rx_errors++;
+ if (rx_error & 0x02) vp->stats.rx_length_errors++;
+ if (rx_error & 0x10) vp->stats.rx_length_errors++;
+ if (rx_error & 0x04) vp->stats.rx_frame_errors++;
+ if (rx_error & 0x08) vp->stats.rx_crc_errors++;
+ if (rx_error & 0x01) {
+ vp->stats.rx_over_errors++;
+ if (vp->drv_flags & HAS_V2_TX) {
+ int cur_rx_thresh = inb(ioaddr + RxPriorityThresh);
+ if (cur_rx_thresh < 0x20)
+ outb(cur_rx_thresh + 1, ioaddr + RxPriorityThresh);
+ else
+ printk(KERN_WARNING "%s: Excessive PCI latency causing"
+ " packet corruption.\n", dev->name);
+ }
+ }
+ } else {
+ /* The packet length: up to 4.5K!. */
+ int pkt_len = rx_status & 0x1fff;
+ struct sk_buff *skb;
+
+ if (vp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
+
+ /* Check if the packet is long enough to just accept without
+ copying to a properly sized skbuff. */
+ if (pkt_len < vp->rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ /* 'skb_put()' points to the start of sk_buff data area. */
+ memcpy(skb_put(skb, pkt_len),
+ le32desc_to_virt(vp->rx_ring[entry].addr), pkt_len);
+ rx_copy++;
+ } else {
+ void *temp;
+ /* Pass up the skbuff already on the Rx ring. */
+ skb = vp->rx_skbuff[entry];
+ vp->rx_skbuff[entry] = NULL;
+ temp = skb_put(skb, pkt_len);
+ /* Remove this checking code for final release. */
+ if (le32desc_to_virt(vp->rx_ring[entry].addr) != temp)
+ printk(KERN_ERR "%s: Warning -- the skbuff addresses do not match"
+ " in boomerang_rx: %p vs. %p.\n", dev->name,
+ bus_to_virt(le32_to_cpu(vp->rx_ring[entry].addr)),
+ temp);
+ rx_nocopy++;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ { /* Use hardware checksum info. */
+ int csum_bits = rx_status & 0xee000000;
+ if (csum_bits &&
+ (csum_bits == (IPChksumValid | TCPChksumValid) ||
+ csum_bits == (IPChksumValid | UDPChksumValid))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ rx_csumhits++;
+ }
+ }
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ vp->stats.rx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+ vp->stats.rx_bytes += pkt_len;
+#endif
+ }
+ entry = (++vp->cur_rx) % RX_RING_SIZE;
+ }
+ /* Refill the Rx ring buffers. */
+ for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {
+ struct sk_buff *skb;
+ entry = vp->dirty_rx % RX_RING_SIZE;
+ if (vp->rx_skbuff[entry] == NULL) {
+ skb = dev_alloc_skb(vp->rx_buf_sz);
+ if (skb == NULL)
+ break; /* Bad news! */
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ vp->rx_ring[entry].addr = virt_to_le32desc(skb->tail);
+ vp->rx_skbuff[entry] = skb;
+ }
+ vp->rx_ring[entry].status = 0; /* Clear complete bit. */
+ outw(UpUnstall, ioaddr + EL3_CMD);
+ }
+ return 0;
+}
+
+static void
+vortex_down(struct net_device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ /* Turn off statistics ASAP. We update vp->stats below. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+
+ /* Disable the receiver and transmitter. */
+ outw(RxDisable, ioaddr + EL3_CMD);
+ outw(TxDisable, ioaddr + EL3_CMD);
+
+ if (dev->if_port == XCVR_10base2)
+ /* Turn off thinnet power. Green! */
+ outw(StopCoax, ioaddr + EL3_CMD);
+
+ outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);
+
+ update_stats(ioaddr, dev);
+ if (vp->full_bus_master_rx)
+ outl(0, ioaddr + UpListPtr);
+ if (vp->full_bus_master_tx)
+ outl(0, ioaddr + DownListPtr);
+}
+
+static int
+vortex_close(struct net_device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+
+ netif_stop_tx_queue(dev);
+
+ if (vp->msg_level & NETIF_MSG_IFDOWN) {
+ printk(KERN_DEBUG"%s: vortex_close() status %4.4x, Tx status %2.2x.\n",
+ dev->name, inw(ioaddr + EL3_STATUS), inb(ioaddr + TxStatus));
+ printk(KERN_DEBUG "%s: vortex close stats: rx_nocopy %d rx_copy %d"
+ " tx_queued %d Rx pre-checksummed %d.\n",
+ dev->name, rx_nocopy, rx_copy, queued_packet, rx_csumhits);
+ }
+
+ del_timer(&vp->timer);
+ vortex_down(dev);
+ free_irq(dev->irq, dev);
+ outw(TotalReset | 0x34, ioaddr + EL3_CMD);
+
+ if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
+ for (i = 0; i < RX_RING_SIZE; i++)
+ if (vp->rx_skbuff[i]) {
+#if LINUX_VERSION_CODE < 0x20100
+ vp->rx_skbuff[i]->free = 1;
+#endif
+ dev_free_skb(vp->rx_skbuff[i]);
+ vp->rx_skbuff[i] = 0;
+ }
+ }
+ if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */
+ for (i = 0; i < TX_RING_SIZE; i++)
+ if (vp->tx_skbuff[i]) {
+ dev_free_skb(vp->tx_skbuff[i]);
+ vp->tx_skbuff[i] = 0;
+ }
+ }
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static struct net_device_stats *vortex_get_stats(struct net_device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ unsigned long flags;
+
+ if (netif_running(dev)) {
+ save_flags(flags);
+ cli();
+ update_stats(dev->base_addr, dev);
+ restore_flags(flags);
+ }
+ return &vp->stats;
+}
+
+/* Update statistics.
+ Unlike with the EL3 we need not worry about interrupts changing
+ the window setting from underneath us, but we must still guard
+ against a race condition with a StatsUpdate interrupt updating the
+ table. This is done by checking that the ASM (!) code generated uses
+ atomic updates with '+='.
+ */
+static void update_stats(long ioaddr, struct net_device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ int old_window = inw(ioaddr + EL3_CMD);
+
+ if (old_window == 0xffff) /* Chip suspended or ejected. */
+ return;
+ /* Unlike the 3c5x9 we need not turn off stats updates while reading. */
+ /* Switch to the stats window, and read everything. */
+ EL3WINDOW(6);
+ vp->stats.tx_carrier_errors += inb(ioaddr + 0);
+ vp->stats.tx_heartbeat_errors += inb(ioaddr + 1);
+ /* Multiple collisions. */ inb(ioaddr + 2);
+ vp->stats.collisions += inb(ioaddr + 3);
+ vp->stats.tx_window_errors += inb(ioaddr + 4);
+ vp->stats.rx_fifo_errors += inb(ioaddr + 5);
+ vp->stats.tx_packets += inb(ioaddr + 6);
+ vp->stats.tx_packets += (inb(ioaddr + 9)&0x30) << 4;
+ /* Rx packets */ inb(ioaddr + 7); /* Must read to clear */
+ /* Tx deferrals */ inb(ioaddr + 8);
+ /* Don't bother with register 9, an extension of registers 6&7.
+ If we do use the 6&7 values the atomic update assumption above
+ is invalid. */
+ /* Rx Bytes is unreliable */ inw(ioaddr + 10);
+#if LINUX_VERSION_CODE > 0x020119
+ vp->stats.tx_bytes += inw(ioaddr + 12);
+#else
+ inw(ioaddr + 10);
+ inw(ioaddr + 12);
+#endif
+ /* New: On the Vortex we must also clear the BadSSD counter. */
+ EL3WINDOW(4);
+ inb(ioaddr + 12);
+
+ /* We change back to window 7 (not 1) with the Vortex. */
+ EL3WINDOW(old_window >> 13);
+ return;
+}
+
+static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u16 *data = (u16 *)&rq->ifr_data;
+ u32 *data32 = (void *)&rq->ifr_data;
+ int phy = vp->phys[0];
+
+ switch(cmd) {
+ case 0x8947: case 0x89F0:
+ /* SIOCGMIIPHY: Get the address of the PHY in use. */
+ data[0] = phy;
+ /* Fall Through */
+ case 0x8948: case 0x89F1:
+ /* SIOCGMIIREG: Read the specified MII register. */
+ if (data[0] == 32) { /* Emulate MII for 3c59*, 3c900. */
+ data[3] = 0;
+ switch (data[1]) {
+ case 0:
+ if (dev->if_port == XCVR_100baseTx) data[3] |= 0x2000;
+ if (vp->full_duplex) data[3] |= 0x0100;
+ break;
+ case 1:
+ if (vp->available_media & 0x02) data[3] |= 0x6000;
+ if (vp->available_media & 0x08) data[3] |= 0x1800;
+ spin_lock(&vp->window_lock);
+ EL3WINDOW(4);
+ if (inw(ioaddr + Wn4_Media) & Media_LnkBeat) data[3] |= 0x0004;
+ spin_unlock(&vp->window_lock);
+ break;
+ case 2: data[3] = 0x0280; break; /* OUI 00:a0:24 */
+ case 3: data[3] = 0x9000; break;
+ default: break;
+ }
+ return 0;
+ }
+ spin_lock(&vp->window_lock);
+ EL3WINDOW(4);
+ data[3] = mdio_read(ioaddr, data[0] & 0x1f, data[1] & 0x1f);
+ spin_unlock(&vp->window_lock);
+ return 0;
+ case 0x8949: case 0x89F2:
+ /* SIOCSMIIREG: Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (data[0] == vp->phys[0]) {
+ u16 value = data[2];
+ if (vp->phys[0] == 32) {
+ if (data[1] == 0) {
+ vp->media_override = (value & 0x2000) ?
+ XCVR_100baseTx : XCVR_10baseT;
+ vp->full_duplex = (value & 0x0100) ? 1 : 0;
+ vp->medialock = 1;
+ }
+ return 0;
+ }
+ switch (data[1]) {
+ case 0:
+ /* Check for autonegotiation on or reset. */
+ vp->medialock = (value & 0x9000) ? 0 : 1;
+ if (vp->medialock)
+ vp->full_duplex = (value & 0x0100) ? 1 : 0;
+ break;
+ case 4: vp->advertising = value; break;
+ }
+ /* Perhaps check_duplex(dev), depending on chip semantics. */
+ }
+ spin_lock(&vp->window_lock);
+ EL3WINDOW(4);
+ mdio_write(ioaddr, data[0] & 0x1f, data[1] & 0x1f, data[2]);
+ spin_unlock(&vp->window_lock);
+ return 0;
+ case SIOCGPARAMS:
+ data32[0] = vp->msg_level;
+ data32[1] = vp->multicast_filter_limit;
+ data32[2] = vp->max_interrupt_work;
+ data32[3] = vp->rx_copybreak;
+ return 0;
+ case SIOCSPARAMS:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ vp->msg_level = data32[0];
+ vp->multicast_filter_limit = data32[1];
+ vp->max_interrupt_work = data32[2];
+ vp->rx_copybreak = data32[3];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static unsigned const ethernet_polynomial = 0x04c11db7U;
+static inline u32 ether_crc(int length, unsigned char *data)
+{
+ int crc = -1;
+
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 0; bit < 8; bit++, current_octet >>= 1)
+ crc = (crc << 1) ^
+ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
+ }
+ return crc;
+}
+
+/* Pre-Cyclone chips have no documented multicast filter, so the only
+ multicast setting is to receive all multicast frames. Cyclone and later
+ chips have a write-only table of unknown size.
+ At least the chip has a very clean way to set the other filter modes. */
+static void set_rx_mode(struct net_device *dev)
+{
+ struct vortex_private *vp = (void *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int new_mode;
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Unconditionally log a net tap. */
+ printk(KERN_NOTICE "%s: Setting promiscuous mode.\n", dev->name);
+ new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm;
+ } else if (dev->flags & IFF_ALLMULTI) {
+ new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast;
+ } else if ((vp->drv_flags & HAS_V2_TX) &&
+ dev->mc_count < vp->multicast_filter_limit) {
+ struct dev_mc_list *mclist;
+ int i;
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ int filter_bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0xff;
+ if (test_bit(filter_bit, vp->mc_filter))
+ continue;
+ outw(SetFilterBit | 0x0400 | filter_bit, ioaddr + EL3_CMD);
+ set_bit(filter_bit, vp->mc_filter);
+ }
+
+ new_mode = SetRxFilter|RxStation|RxMulticastHash|RxBroadcast;
+ } else if (dev->mc_count) {
+ new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast;
+ } else
+ new_mode = SetRxFilter | RxStation | RxBroadcast;
+
+ if (vp->rx_mode != new_mode) {
+ vp->rx_mode = new_mode;
+ outw(new_mode, ioaddr + EL3_CMD);
+ }
+}
+
+
+/* MII transceiver control section.
+ Read and write the MII registers using software-generated serial
+ MDIO protocol. See the MII specifications or DP83840A data sheet
+ for details. */
+
+/* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
+ met by back-to-back PCI I/O cycles, but we insert a delay to avoid
+ "overclocking" issues. */
+#define mdio_delay() inl(mdio_addr)
+
+#define MDIO_SHIFT_CLK 0x01
+#define MDIO_DIR_WRITE 0x04
+#define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE)
+#define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE)
+#define MDIO_DATA_READ 0x02
+#define MDIO_ENB_IN 0x00
+
+/* Generate the preamble required for initial synchronization and
+ a few older transceivers. */
+static void mdio_sync(long ioaddr, int bits)
+{
+ long mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+
+ /* Establish sync by sending at least 32 logic ones. */
+ while (-- bits >= 0) {
+ outw(MDIO_DATA_WRITE1, mdio_addr);
+ mdio_delay();
+ outw(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+}
+
+static int mdio_read(long ioaddr, int phy_id, int location)
+{
+ int i;
+ int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ unsigned int retval = 0;
+ long mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+
+ if (mii_preamble_required)
+ mdio_sync(ioaddr, 32);
+
+ /* Shift the read command bits out. */
+ for (i = 14; i >= 0; i--) {
+ int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outw(dataval, mdio_addr);
+ mdio_delay();
+ outw(dataval | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ /* Read the two transition and 16 data bits. */
+ for (i = 18; i > 0; i--) {
+ outw(MDIO_ENB_IN, mdio_addr);
+ mdio_delay();
+ retval = (retval << 1) | ((inw(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
+ outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ return retval & 0x10000 ? 0xffff : retval & 0xffff;
+}
+
+static void mdio_write(long ioaddr, int phy_id, int location, int value)
+{
+ int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value;
+ long mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+ int i;
+
+ if (mii_preamble_required)
+ mdio_sync(ioaddr, 32);
+
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+ outw(dataval, mdio_addr);
+ mdio_delay();
+ outw(dataval | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ /* Leave the interface idle. */
+ mdio_sync(ioaddr, 32);
+
+ return;
+}
+
+#if ! defined(NO_PCI)
+/* ACPI: Advanced Configuration and Power Interface. */
+/* Set Wake-On-LAN mode and put the board into D3 (power-down) state. */
+static void acpi_set_WOL(struct net_device *dev)
+{
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ /* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */
+ EL3WINDOW(7);
+ outw(2, ioaddr + 0x0c);
+ /* The RxFilter must accept the WOL frames. */
+ outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
+ outw(RxEnable, ioaddr + EL3_CMD);
+ /* Change the power state to D3; RxEnable doesn't take effect. */
+ pci_write_config_word(vp->pci_dev, 0xe0, 0x8103);
+}
+#endif
+
+static int pwr_event(void *dev_instance, int event)
+{
+ struct net_device *dev = dev_instance;
+ struct vortex_private *np = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
+ switch(event) {
+ case DRV_ATTACH:
+ MOD_INC_USE_COUNT;
+ break;
+ case DRV_SUSPEND:
+ vortex_down(dev);
+ netif_stop_tx_queue(dev);
+ if (np->capabilities & CapPwrMgmt)
+ acpi_set_WOL(dev);
+ break;
+ case DRV_RESUME:
+ /* This is incomplete: the actions are very chip specific. */
+ activate_xcvr(dev);
+ set_media_type(dev);
+ start_operation(dev);
+ np->rx_mode = 0;
+ set_rx_mode(dev);
+ start_operation1(dev);
+ break;
+ case DRV_DETACH: {
+ struct net_device **devp, **next;
+ if (dev->flags & IFF_UP) {
+ dev_close(dev);
+ dev->flags &= ~(IFF_UP|IFF_RUNNING);
+ }
+ unregister_netdev(dev);
+ release_region(dev->base_addr, pci_tbl[np->chip_id].io_size);
+#ifndef USE_IO_OPS
+ iounmap((char *)dev->base_addr);
+#endif
+ for (devp = &root_vortex_dev; *devp; devp = next) {
+ next = &((struct vortex_private *)(*devp)->priv)->next_module;
+ if (*devp == dev) {
+ *devp = *next;
+ break;
+ }
+ }
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(dev);
+ MOD_DEC_USE_COUNT;
+ break;
+ }
+ case DRV_PWR_WakeOn:
+ if ( ! (np->capabilities & CapPwrMgmt))
+ return -1;
+ EL3WINDOW(7);
+ /* Power up on: 1=Downloaded Filter, 2=Magic Packets, 4=Link Status.*/
+ outw(2, ioaddr + 12);
+ /* This RxEnable doesn't take effect if we immediately change to D3. */
+ outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
+ outw(RxEnable, ioaddr + EL3_CMD);
+ acpi_set_pwr_state(np->pci_dev, ACPI_D3);
+ break;
+ }
+ return 0;
+}
+
+
+#ifdef MODULE
+void cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+#ifdef CARDBUS
+ unregister_driver(&vortex_ops);
+#elif ! defined(NO_PCI)
+ pci_drv_unregister(&vortex_drv_id);
+#endif
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_vortex_dev) {
+ struct vortex_private *vp=(void *)(root_vortex_dev->priv);
+ unregister_netdev(root_vortex_dev);
+ outw(TotalReset | 0x14, root_vortex_dev->base_addr + EL3_CMD);
+ if (vp->capabilities & CapPwrMgmt)
+ acpi_set_WOL(root_vortex_dev);
+#ifdef USE_MEM_OPS
+ iounmap((char *)root_vortex_dev->base_addr);
+#else
+ release_region(root_vortex_dev->base_addr,
+ pci_tbl[vp->chip_id].io_size);
+#endif
+ next_dev = vp->next_module;
+ if (vp->priv_addr)
+ kfree(vp->priv_addr);
+ kfree(root_vortex_dev);
+ root_vortex_dev = next_dev;
+ }
+}
+
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "make KERNVER=`uname -r` 3c59x.o"
+ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c 3c59x.c"
+ * cardbus-compile-command: "gcc -DCARDBUS -DMODULE -Wall -Wstrict-prototypes -O6 -c 3c59x.c -o 3c575_cb.o -I/usr/src/pcmcia/include/"
+ * eisa-only-compile: "gcc -DNO_PCI -DMODULE -O6 -c 3c59x.c -o 3c597.o"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/8390.c b/linux/src/drivers/net/8390.c
new file mode 100644
index 0000000..747ccb0
--- /dev/null
+++ b/linux/src/drivers/net/8390.c
@@ -0,0 +1,829 @@
+/* 8390.c: A general NS8390 ethernet driver core for linux. */
+/*
+ Written 1992-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This is the chip-specific code for many 8390-based ethernet adaptors.
+ This is not a complete driver, it must be combined with board-specific
+ code such as ne.c, wd.c, 3c503.c, etc.
+
+ Seeing how at least eight drivers use this code, (not counting the
+ PCMCIA ones either) it is easy to break some card by what seems like
+ a simple innocent change. Please contact me or Donald if you think
+ you have found something that needs changing. -- PG
+
+
+ Changelog:
+
+ Paul Gortmaker : remove set_bit lock, other cleanups.
+ Paul Gortmaker : add ei_get_8390_hdr() so we can pass skb's to
+ ei_block_input() for eth_io_copy_and_sum().
+ Paul Gortmaker : exchange static int ei_pingpong for a #define,
+ also add better Tx error handling.
+ Paul Gortmaker : rewrite Rx overrun handling as per NS specs.
+
+
+ Sources:
+ The National Semiconductor LAN Databook, and the 3Com 3c503 databook.
+
+ */
+
+static const char *version =
+ "8390.c:v1.10 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/string.h>
+#include <asm/system.h>
+#include <asm/segment.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/in.h>
+#include <linux/interrupt.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include "8390.h"
+
+/* These are the operational function interfaces to board-specific
+ routines.
+ void reset_8390(struct device *dev)
+ Resets the board associated with DEV, including a hardware reset of
+ the 8390. This is only called when there is a transmit timeout, and
+ it is always followed by 8390_init().
+ void block_output(struct device *dev, int count, const unsigned char *buf,
+ int start_page)
+ Write the COUNT bytes of BUF to the packet buffer at START_PAGE. The
+ "page" value uses the 8390's 256-byte pages.
+ void get_8390_hdr(struct device *dev, struct e8390_hdr *hdr, int ring_page)
+ Read the 4 byte, page aligned 8390 header. *If* there is a
+ subsequent read, it will be of the rest of the packet.
+ void block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset)
+ Read COUNT bytes from the packet buffer into the skb data area. Start
+ reading from RING_OFFSET, the address as the 8390 sees it. This will always
+ follow the read of the 8390 header.
+*/
+#define ei_reset_8390 (ei_local->reset_8390)
+#define ei_block_output (ei_local->block_output)
+#define ei_block_input (ei_local->block_input)
+#define ei_get_8390_hdr (ei_local->get_8390_hdr)
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifdef EI_DEBUG
+int ei_debug = EI_DEBUG;
+#else
+int ei_debug = 1;
+#endif
+
+/* Index to functions. */
+static void ei_tx_intr(struct device *dev);
+static void ei_tx_err(struct device *dev);
+static void ei_receive(struct device *dev);
+static void ei_rx_overrun(struct device *dev);
+
+/* Routines generic to NS8390-based boards. */
+static void NS8390_trigger_send(struct device *dev, unsigned int length,
+ int start_page);
+static void set_multicast_list(struct device *dev);
+
+
+/* Open/initialize the board. This routine goes all-out, setting everything
+ up anew at each open, even though many of these registers should only
+ need to be set once at boot.
+ */
+int ei_open(struct device *dev)
+{
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+
+ /* This can't happen unless somebody forgot to call ethdev_init(). */
+ if (ei_local == NULL) {
+ printk(KERN_EMERG "%s: ei_open passed a non-existent device!\n", dev->name);
+ return -ENXIO;
+ }
+
+ irq2dev_map[dev->irq] = dev;
+ NS8390_init(dev, 1);
+ dev->start = 1;
+ ei_local->irqlock = 0;
+ return 0;
+}
+
+/* Opposite of above. Only used when "ifconfig <devname> down" is done. */
+int ei_close(struct device *dev)
+{
+ NS8390_init(dev, 0);
+ dev->start = 0;
+ return 0;
+}
+
+static int ei_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ int e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+ int length, send_length, output_page;
+
+/*
+ * We normally shouldn't be called if dev->tbusy is set, but the
+ * existing code does anyway. If it has been too long since the
+ * last Tx, we assume the board has died and kick it.
+ */
+
+ if (dev->tbusy) { /* Do timeouts, just like the 8003 driver. */
+ int txsr = inb(e8390_base+EN0_TSR), isr;
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < TX_TIMEOUT || (tickssofar < (TX_TIMEOUT+5) && ! (txsr & ENTSR_PTX))) {
+ return 1;
+ }
+ isr = inb(e8390_base+EN0_ISR);
+ if (dev->start == 0) {
+ printk("%s: xmit on stopped card\n", dev->name);
+ return 1;
+ }
+
+ /*
+ * Note that if the Tx posted a TX_ERR interrupt, then the
+ * error will have been handled from the interrupt handler.
+ * and not here.
+ */
+
+ printk(KERN_DEBUG "%s: Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
+ dev->name, (txsr & ENTSR_ABT) ? "excess collisions." :
+ (isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar);
+
+ if (!isr && !ei_local->stat.tx_packets) {
+ /* The 8390 probably hasn't gotten on the cable yet. */
+ ei_local->interface_num ^= 1; /* Try a different xcvr. */
+ }
+
+ /* Try to restart the card. Perhaps the user has fixed something. */
+ ei_reset_8390(dev);
+ NS8390_init(dev, 1);
+ dev->trans_start = jiffies;
+ }
+
+ /* Sending a NULL skb means some higher layer thinks we've missed an
+ tx-done interrupt. Caution: dev_tint() handles the cli()/sti()
+ itself. */
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ length = skb->len;
+ if (skb->len <= 0)
+ return 0;
+
+ /* Mask interrupts from the ethercard. */
+ outb_p(0x00, e8390_base + EN0_IMR);
+ if (dev->interrupt) {
+ printk("%s: Tx request while isr active.\n",dev->name);
+ outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+ return 1;
+ }
+ ei_local->irqlock = 1;
+
+ send_length = ETH_ZLEN < length ? length : ETH_ZLEN;
+
+#ifdef EI_PINGPONG
+
+ /*
+ * We have two Tx slots available for use. Find the first free
+ * slot, and then perform some sanity checks. With two Tx bufs,
+ * you get very close to transmitting back-to-back packets. With
+ * only one Tx buf, the transmitter sits idle while you reload the
+ * card, leaving a substantial gap between each transmitted packet.
+ */
+
+ if (ei_local->tx1 == 0) {
+ output_page = ei_local->tx_start_page;
+ ei_local->tx1 = send_length;
+ if (ei_debug && ei_local->tx2 > 0)
+ printk("%s: idle transmitter tx2=%d, lasttx=%d, txing=%d.\n",
+ dev->name, ei_local->tx2, ei_local->lasttx, ei_local->txing);
+ } else if (ei_local->tx2 == 0) {
+ output_page = ei_local->tx_start_page + TX_1X_PAGES;
+ ei_local->tx2 = send_length;
+ if (ei_debug && ei_local->tx1 > 0)
+ printk("%s: idle transmitter, tx1=%d, lasttx=%d, txing=%d.\n",
+ dev->name, ei_local->tx1, ei_local->lasttx, ei_local->txing);
+ } else { /* We should never get here. */
+ if (ei_debug)
+ printk("%s: No Tx buffers free! irq=%d tx1=%d tx2=%d last=%d\n",
+ dev->name, dev->interrupt, ei_local->tx1, ei_local->tx2, ei_local->lasttx);
+ ei_local->irqlock = 0;
+ dev->tbusy = 1;
+ outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+ return 1;
+ }
+
+ /*
+ * Okay, now upload the packet and trigger a send if the transmitter
+ * isn't already sending. If it is busy, the interrupt handler will
+ * trigger the send later, upon receiving a Tx done interrupt.
+ */
+
+ ei_block_output(dev, length, skb->data, output_page);
+ if (! ei_local->txing) {
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, send_length, output_page);
+ dev->trans_start = jiffies;
+ if (output_page == ei_local->tx_start_page) {
+ ei_local->tx1 = -1;
+ ei_local->lasttx = -1;
+ } else {
+ ei_local->tx2 = -1;
+ ei_local->lasttx = -2;
+ }
+ } else
+ ei_local->txqueue++;
+
+ dev->tbusy = (ei_local->tx1 && ei_local->tx2);
+
+#else /* EI_PINGPONG */
+
+ /*
+ * Only one Tx buffer in use. You need two Tx bufs to come close to
+ * back-to-back transmits. Expect a 20 -> 25% performance hit on
+ * reasonable hardware if you only use one Tx buffer.
+ */
+
+ ei_block_output(dev, length, skb->data, ei_local->tx_start_page);
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, send_length, ei_local->tx_start_page);
+ dev->trans_start = jiffies;
+ dev->tbusy = 1;
+
+#endif /* EI_PINGPONG */
+
+ /* Turn 8390 interrupts back on. */
+ ei_local->irqlock = 0;
+ outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ return 0;
+}
+
+/* The typical workload of the driver:
+ Handle the ether interface interrupts. */
+void ei_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ int e8390_base;
+ int interrupts, nr_serviced = 0;
+ struct ei_device *ei_local;
+
+ if (dev == NULL) {
+ printk ("net_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+ e8390_base = dev->base_addr;
+ ei_local = (struct ei_device *) dev->priv;
+ if (dev->interrupt || ei_local->irqlock) {
+ /* The "irqlock" check is only for testing. */
+ printk(ei_local->irqlock
+ ? "%s: Interrupted while interrupts are masked! isr=%#2x imr=%#2x.\n"
+ : "%s: Reentering the interrupt handler! isr=%#2x imr=%#2x.\n",
+ dev->name, inb_p(e8390_base + EN0_ISR),
+ inb_p(e8390_base + EN0_IMR));
+ return;
+ }
+
+ dev->interrupt = 1;
+
+ /* Change to page 0 and read the intr status reg. */
+ outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
+ if (ei_debug > 3)
+ printk("%s: interrupt(isr=%#2.2x).\n", dev->name,
+ inb_p(e8390_base + EN0_ISR));
+
+ /* !!Assumption!! -- we stay in page 0. Don't break this. */
+ while ((interrupts = inb_p(e8390_base + EN0_ISR)) != 0
+ && ++nr_serviced < MAX_SERVICE) {
+ if (dev->start == 0) {
+ printk("%s: interrupt from stopped card\n", dev->name);
+ interrupts = 0;
+ break;
+ }
+ if (interrupts & ENISR_OVER) {
+ ei_rx_overrun(dev);
+ } else if (interrupts & (ENISR_RX+ENISR_RX_ERR)) {
+ /* Got a good (?) packet. */
+ ei_receive(dev);
+ }
+ /* Push the next to-transmit packet through. */
+ if (interrupts & ENISR_TX) {
+ ei_tx_intr(dev);
+ } else if (interrupts & ENISR_TX_ERR) {
+ ei_tx_err(dev);
+ }
+
+ if (interrupts & ENISR_COUNTERS) {
+ ei_local->stat.rx_frame_errors += inb_p(e8390_base + EN0_COUNTER0);
+ ei_local->stat.rx_crc_errors += inb_p(e8390_base + EN0_COUNTER1);
+ ei_local->stat.rx_missed_errors+= inb_p(e8390_base + EN0_COUNTER2);
+ outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */
+ }
+
+ /* Ignore any RDC interrupts that make it back to here. */
+ if (interrupts & ENISR_RDC) {
+ outb_p(ENISR_RDC, e8390_base + EN0_ISR);
+ }
+
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
+ }
+
+ if (interrupts && ei_debug) {
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
+ if (nr_serviced >= MAX_SERVICE) {
+ printk("%s: Too much work at interrupt, status %#2.2x\n",
+ dev->name, interrupts);
+ outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
+ } else {
+ printk("%s: unknown interrupt %#2x\n", dev->name, interrupts);
+ outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */
+ }
+ }
+ dev->interrupt = 0;
+ return;
+}
+
+/*
+ * A transmitter error has happened. Most likely excess collisions (which
+ * is a fairly normal condition). If the error is one where the Tx will
+ * have been aborted, we try and send another one right away, instead of
+ * letting the failed packet sit and collect dust in the Tx buffer. This
+ * is a much better solution as it avoids kernel based Tx timeouts, and
+ * an unnecessary card reset.
+ */
+
+static void ei_tx_err(struct device *dev)
+{
+ int e8390_base = dev->base_addr;
+ unsigned char txsr = inb_p(e8390_base+EN0_TSR);
+ unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+
+#ifdef VERBOSE_ERROR_DUMP
+ printk(KERN_DEBUG "%s: transmitter error (%#2x): ", dev->name, txsr);
+ if (txsr & ENTSR_ABT)
+ printk("excess-collisions ");
+ if (txsr & ENTSR_ND)
+ printk("non-deferral ");
+ if (txsr & ENTSR_CRS)
+ printk("lost-carrier ");
+ if (txsr & ENTSR_FU)
+ printk("FIFO-underrun ");
+ if (txsr & ENTSR_CDH)
+ printk("lost-heartbeat ");
+ printk("\n");
+#endif
+
+ outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR); /* Ack intr. */
+
+ if (tx_was_aborted)
+ ei_tx_intr(dev);
+
+ /*
+ * Note: NCR reads zero on 16 collisions so we add them
+ * in by hand. Somebody might care...
+ */
+ if (txsr & ENTSR_ABT)
+ ei_local->stat.collisions += 16;
+
+}
+
+/* We have finished a transmit: check for errors and then trigger the next
+ packet to be sent. */
+static void ei_tx_intr(struct device *dev)
+{
+ int e8390_base = dev->base_addr;
+ int status = inb(e8390_base + EN0_TSR);
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+
+ outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */
+
+#ifdef EI_PINGPONG
+
+ /*
+ * There are two Tx buffers, see which one finished, and trigger
+ * the send of another one if it exists.
+ */
+ ei_local->txqueue--;
+ if (ei_local->tx1 < 0) {
+ if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
+ printk("%s: bogus last_tx_buffer %d, tx1=%d.\n",
+ ei_local->name, ei_local->lasttx, ei_local->tx1);
+ ei_local->tx1 = 0;
+ dev->tbusy = 0;
+ if (ei_local->tx2 > 0) {
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
+ dev->trans_start = jiffies;
+ ei_local->tx2 = -1,
+ ei_local->lasttx = 2;
+ } else
+ ei_local->lasttx = 20, ei_local->txing = 0;
+ } else if (ei_local->tx2 < 0) {
+ if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
+ printk("%s: bogus last_tx_buffer %d, tx2=%d.\n",
+ ei_local->name, ei_local->lasttx, ei_local->tx2);
+ ei_local->tx2 = 0;
+ dev->tbusy = 0;
+ if (ei_local->tx1 > 0) {
+ ei_local->txing = 1;
+ NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
+ dev->trans_start = jiffies;
+ ei_local->tx1 = -1;
+ ei_local->lasttx = 1;
+ } else
+ ei_local->lasttx = 10, ei_local->txing = 0;
+ } else
+ printk("%s: unexpected TX-done interrupt, lasttx=%d.\n",
+ dev->name, ei_local->lasttx);
+
+#else /* EI_PINGPONG */
+ /*
+ * Single Tx buffer: mark it free so another packet can be loaded.
+ */
+ ei_local->txing = 0;
+ dev->tbusy = 0;
+#endif
+
+ /* Minimize Tx latency: update the statistics after we restart TXing. */
+ if (status & ENTSR_COL)
+ ei_local->stat.collisions++;
+ if (status & ENTSR_PTX)
+ ei_local->stat.tx_packets++;
+ else {
+ ei_local->stat.tx_errors++;
+ if (status & ENTSR_ABT) ei_local->stat.tx_aborted_errors++;
+ if (status & ENTSR_CRS) ei_local->stat.tx_carrier_errors++;
+ if (status & ENTSR_FU) ei_local->stat.tx_fifo_errors++;
+ if (status & ENTSR_CDH) ei_local->stat.tx_heartbeat_errors++;
+ if (status & ENTSR_OWC) ei_local->stat.tx_window_errors++;
+ }
+
+ mark_bh (NET_BH);
+}
+
+/* We have a good packet(s), get it/them out of the buffers. */
+
+static void ei_receive(struct device *dev)
+{
+ int e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+ unsigned char rxing_page, this_frame, next_frame;
+ unsigned short current_offset;
+ int rx_pkt_count = 0;
+ struct e8390_pkt_hdr rx_frame;
+ int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page;
+
+ while (++rx_pkt_count < 10) {
+ int pkt_len;
+
+ /* Get the rx page (incoming packet pointer). */
+ outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD);
+ rxing_page = inb_p(e8390_base + EN1_CURPAG);
+ outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
+
+ /* Remove one frame from the ring. Boundary is always a page behind. */
+ this_frame = inb_p(e8390_base + EN0_BOUNDARY) + 1;
+ if (this_frame >= ei_local->stop_page)
+ this_frame = ei_local->rx_start_page;
+
+ /* Someday we'll omit the previous, iff we never get this message.
+ (There is at least one clone claimed to have a problem.) */
+ if (ei_debug > 0 && this_frame != ei_local->current_page)
+ printk("%s: mismatched read page pointers %2x vs %2x.\n",
+ dev->name, this_frame, ei_local->current_page);
+
+ if (this_frame == rxing_page) /* Read all the frames? */
+ break; /* Done for now */
+
+ current_offset = this_frame << 8;
+ ei_get_8390_hdr(dev, &rx_frame, this_frame);
+
+ pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr);
+
+ next_frame = this_frame + 1 + ((pkt_len+4)>>8);
+
+ /* Check for bogosity warned by 3c503 book: the status byte is never
+ written. This happened a lot during testing! This code should be
+ cleaned up someday. */
+ if (rx_frame.next != next_frame
+ && rx_frame.next != next_frame + 1
+ && rx_frame.next != next_frame - num_rx_pages
+ && rx_frame.next != next_frame + 1 - num_rx_pages) {
+ ei_local->current_page = rxing_page;
+ outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY);
+ ei_local->stat.rx_errors++;
+ continue;
+ }
+
+ if (pkt_len < 60 || pkt_len > 1518) {
+ if (ei_debug)
+ printk("%s: bogus packet size: %d, status=%#2x nxpg=%#2x.\n",
+ dev->name, rx_frame.count, rx_frame.status,
+ rx_frame.next);
+ ei_local->stat.rx_errors++;
+ } else if ((rx_frame.status & 0x0F) == ENRSR_RXOK) {
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL) {
+ if (ei_debug > 1)
+ printk("%s: Couldn't allocate a sk_buff of size %d.\n",
+ dev->name, pkt_len);
+ ei_local->stat.rx_dropped++;
+ break;
+ } else {
+ skb_reserve(skb,2); /* IP headers on 16 byte boundaries */
+ skb->dev = dev;
+ skb_put(skb, pkt_len); /* Make room */
+ ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ ei_local->stat.rx_packets++;
+ }
+ } else {
+ int errs = rx_frame.status;
+ if (ei_debug)
+ printk("%s: bogus packet: status=%#2x nxpg=%#2x size=%d\n",
+ dev->name, rx_frame.status, rx_frame.next,
+ rx_frame.count);
+ if (errs & ENRSR_FO)
+ ei_local->stat.rx_fifo_errors++;
+ }
+ next_frame = rx_frame.next;
+
+ /* This _should_ never happen: it's here for avoiding bad clones. */
+ if (next_frame >= ei_local->stop_page) {
+ printk("%s: next frame inconsistency, %#2x\n", dev->name,
+ next_frame);
+ next_frame = ei_local->rx_start_page;
+ }
+ ei_local->current_page = next_frame;
+ outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
+ }
+
+ /* We used to also ack ENISR_OVER here, but that would sometimes mask
+ a real overrun, leaving the 8390 in a stopped state with rec'vr off. */
+ outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR);
+ return;
+}
+
+/*
+ * We have a receiver overrun: we have to kick the 8390 to get it started
+ * again. Problem is that you have to kick it exactly as NS prescribes in
+ * the updated datasheets, or "the NIC may act in an unpredictable manner."
+ * This includes causing "the NIC to defer indefinitely when it is stopped
+ * on a busy network." Ugh.
+ */
+static void ei_rx_overrun(struct device *dev)
+{
+ int e8390_base = dev->base_addr;
+ unsigned long wait_start_time;
+ unsigned char was_txing, must_resend = 0;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+
+ /*
+ * Record whether a Tx was in progress and then issue the
+ * stop command.
+ */
+ was_txing = inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
+
+ if (ei_debug > 1)
+ printk("%s: Receiver overrun.\n", dev->name);
+ ei_local->stat.rx_over_errors++;
+
+ /*
+ * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
+ * Early datasheets said to poll the reset bit, but now they say that
+ * it "is not a reliable indicator and subsequently should be ignored."
+ * We wait at least 10ms.
+ */
+ wait_start_time = jiffies;
+ while (jiffies - wait_start_time <= 1*HZ/100)
+ barrier();
+
+ /*
+ * Reset RBCR[01] back to zero as per magic incantation.
+ */
+ outb_p(0x00, e8390_base+EN0_RCNTLO);
+ outb_p(0x00, e8390_base+EN0_RCNTHI);
+
+ /*
+ * See if any Tx was interrupted or not. According to NS, this
+ * step is vital, and skipping it will cause no end of havoc.
+ */
+ if (was_txing) {
+ unsigned char tx_completed = inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR);
+ if (!tx_completed) must_resend = 1;
+ }
+
+ /*
+ * Have to enter loopback mode and then restart the NIC before
+ * you are allowed to slurp packets up off the ring.
+ */
+ outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
+ outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD);
+
+ /*
+ * Clear the Rx ring of all the debris, and ack the interrupt.
+ */
+ ei_receive(dev);
+ outb_p(ENISR_OVER, e8390_base+EN0_ISR);
+
+ /*
+ * Leave loopback mode, and resend any packet that got stopped.
+ */
+ outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR);
+ if (must_resend)
+ outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
+
+}
+
+static struct enet_statistics *get_stats(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+
+ /* If the card is stopped, just return the present stats. */
+ if (dev->start == 0) return &ei_local->stat;
+
+ /* Read the counter registers, assuming we are in page 0. */
+ ei_local->stat.rx_frame_errors += inb_p(ioaddr + EN0_COUNTER0);
+ ei_local->stat.rx_crc_errors += inb_p(ioaddr + EN0_COUNTER1);
+ ei_local->stat.rx_missed_errors+= inb_p(ioaddr + EN0_COUNTER2);
+
+ return &ei_local->stat;
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+
+static void set_multicast_list(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ if(dev->flags&IFF_PROMISC)
+ {
+ outb_p(E8390_RXCONFIG | 0x18, ioaddr + EN0_RXCR);
+ }
+ else if((dev->flags&IFF_ALLMULTI)||dev->mc_list)
+ {
+ /* The multicast-accept list is initialized to accept-all, and we
+ rely on higher-level filtering for now. */
+ outb_p(E8390_RXCONFIG | 0x08, ioaddr + EN0_RXCR);
+ }
+ else
+ outb_p(E8390_RXCONFIG, ioaddr + EN0_RXCR);
+}
+
+/* Initialize the rest of the 8390 device structure. */
+int ethdev_init(struct device *dev)
+{
+ if (ei_debug > 1)
+ printk("%s", version);
+
+ if (dev->priv == NULL) {
+ dev->priv = kmalloc(sizeof(struct ei_device), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct ei_device));
+ }
+
+ dev->hard_start_xmit = &ei_start_xmit;
+ dev->get_stats = get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ ether_setup(dev);
+
+ return 0;
+}
+
+
+/* This page of functions should be 8390 generic */
+/* Follow National Semi's recommendations for initializing the "NIC". */
+void NS8390_init(struct device *dev, int startp)
+{
+ int e8390_base = dev->base_addr;
+ struct ei_device *ei_local = (struct ei_device *) dev->priv;
+ int i;
+ int endcfg = ei_local->word16 ? (0x48 | ENDCFG_WTS) : 0x48;
+ unsigned long flags;
+
+ /* Follow National Semi's recommendations for initing the DP83902. */
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base); /* 0x21 */
+ outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */
+ /* Clear the remote byte count registers. */
+ outb_p(0x00, e8390_base + EN0_RCNTLO);
+ outb_p(0x00, e8390_base + EN0_RCNTHI);
+ /* Set to monitor and loopback mode -- this is vital!. */
+ outb_p(E8390_RXOFF, e8390_base + EN0_RXCR); /* 0x20 */
+ outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */
+ /* Set the transmit page and receive ring. */
+ outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR);
+ ei_local->tx1 = ei_local->tx2 = 0;
+ outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG);
+ outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY); /* 3c503 says 0x3f,NS0x26*/
+ ei_local->current_page = ei_local->rx_start_page; /* assert boundary+1 */
+ outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG);
+ /* Clear the pending interrupts and mask. */
+ outb_p(0xFF, e8390_base + EN0_ISR);
+ outb_p(0x00, e8390_base + EN0_IMR);
+
+ /* Copy the station address into the DS8390 registers,
+ and set the multicast hash bitmap to receive all multicasts. */
+ save_flags(flags);
+ cli();
+ outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base); /* 0x61 */
+ for(i = 0; i < 6; i++) {
+ outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS + i);
+ }
+ /* Initialize the multicast list to accept-all. If we enable multicast
+ the higher levels can do the filtering. */
+ for(i = 0; i < 8; i++)
+ outb_p(0xff, e8390_base + EN1_MULT + i);
+
+ outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base);
+ restore_flags(flags);
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ ei_local->tx1 = ei_local->tx2 = 0;
+ ei_local->txing = 0;
+ if (startp) {
+ outb_p(0xff, e8390_base + EN0_ISR);
+ outb_p(ENISR_ALL, e8390_base + EN0_IMR);
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base);
+ outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); /* xmit on. */
+ /* 3c503 TechMan says rxconfig only after the NIC is started. */
+ outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); /* rx on, */
+ dev->set_multicast_list(dev); /* Get the multicast status right if this
+ was a reset. */
+ }
+ return;
+}
+
+/* Trigger a transmit start, assuming the length is valid. */
+static void NS8390_trigger_send(struct device *dev, unsigned int length,
+ int start_page)
+{
+ int e8390_base = dev->base_addr;
+
+ outb_p(E8390_NODMA+E8390_PAGE0, e8390_base);
+
+ if (inb_p(e8390_base) & E8390_TRANS) {
+ printk("%s: trigger_send() called with the transmitter busy.\n",
+ dev->name);
+ return;
+ }
+ outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
+ outb_p(length >> 8, e8390_base + EN0_TCNTHI);
+ outb_p(start_page, e8390_base + EN0_TPSR);
+ outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base);
+ return;
+}
+
+#ifdef MODULE
+
+int init_module(void)
+{
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c 8390.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * c-indent-level: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/8390.h b/linux/src/drivers/net/8390.h
new file mode 100644
index 0000000..9cc0ddc
--- /dev/null
+++ b/linux/src/drivers/net/8390.h
@@ -0,0 +1,175 @@
+/* Generic NS8390 register definitions. */
+/* This file is part of Donald Becker's 8390 drivers, and is distributed
+ under the same license.
+ Some of these names and comments originated from the Crynwr
+ packet drivers, which are distributed under the GPL. */
+
+#ifndef _8390_h
+#define _8390_h
+
+#include <linux/if_ether.h>
+#include <linux/ioport.h>
+#include <linux/skbuff.h>
+
+#define TX_2X_PAGES 12
+#define TX_1X_PAGES 6
+
+/* Should always use two Tx slots to get back-to-back transmits. */
+#define EI_PINGPONG
+
+#ifdef EI_PINGPONG
+#define TX_PAGES TX_2X_PAGES
+#else
+#define TX_PAGES TX_1X_PAGES
+#endif
+
+#define ETHER_ADDR_LEN 6
+
+/* The 8390 specific per-packet-header format. */
+struct e8390_pkt_hdr {
+ unsigned char status; /* status */
+ unsigned char next; /* pointer to next packet. */
+ unsigned short count; /* header + packet length in bytes */
+};
+
+/* From 8390.c */
+extern int ei_debug;
+extern struct sigaction ei_sigaction;
+
+extern int ethif_init(struct device *dev);
+extern int ethdev_init(struct device *dev);
+extern void NS8390_init(struct device *dev, int startp);
+extern int ei_open(struct device *dev);
+extern int ei_close(struct device *dev);
+extern void ei_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+
+#ifndef HAVE_AUTOIRQ
+/* From auto_irq.c */
+extern struct device *irq2dev_map[16];
+extern int autoirq_setup(int waittime);
+extern int autoirq_report(int waittime);
+#endif
+
+/* Most of these entries should be in 'struct device' (or most of the
+ things in there should be here!) */
+/* You have one of these per-board */
+struct ei_device {
+ const char *name;
+ void (*reset_8390)(struct device *);
+ void (*get_8390_hdr)(struct device *, struct e8390_pkt_hdr *, int);
+ void (*block_output)(struct device *, int, const unsigned char *, int);
+ void (*block_input)(struct device *, int, struct sk_buff *, int);
+ unsigned open:1;
+ unsigned word16:1; /* We have the 16-bit (vs 8-bit) version of the card. */
+ unsigned txing:1; /* Transmit Active */
+ unsigned irqlock:1; /* 8390's intrs disabled when '1'. */
+ unsigned dmaing:1; /* Remote DMA Active */
+ unsigned char tx_start_page, rx_start_page, stop_page;
+ unsigned char current_page; /* Read pointer in buffer */
+ unsigned char interface_num; /* Net port (AUI, 10bT.) to use. */
+ unsigned char txqueue; /* Tx Packet buffer queue length. */
+ short tx1, tx2; /* Packet lengths for ping-pong tx. */
+ short lasttx; /* Alpha version consistency check. */
+ unsigned char reg0; /* Register '0' in a WD8013 */
+ unsigned char reg5; /* Register '5' in a WD8013 */
+ unsigned char saved_irq; /* Original dev->irq value. */
+ /* The new statistics table. */
+ struct enet_statistics stat;
+};
+
+/* The maximum number of 8390 interrupt service routines called per IRQ. */
+#define MAX_SERVICE 12
+
+/* The maximum time waited (in jiffies) before assuming a Tx failed. (20ms) */
+#define TX_TIMEOUT (20*HZ/100)
+
+#define ei_status (*(struct ei_device *)(dev->priv))
+
+/* Some generic ethernet register configurations. */
+#define E8390_TX_IRQ_MASK 0xa /* For register EN0_ISR */
+#define E8390_RX_IRQ_MASK 0x5
+#define E8390_RXCONFIG 0x4 /* EN0_RXCR: broadcasts, no multicast,errors */
+#define E8390_RXOFF 0x20 /* EN0_RXCR: Accept no packets */
+#define E8390_TXCONFIG 0x00 /* EN0_TXCR: Normal transmit mode */
+#define E8390_TXOFF 0x02 /* EN0_TXCR: Transmitter off */
+
+/* Register accessed at EN_CMD, the 8390 base addr. */
+#define E8390_STOP 0x01 /* Stop and reset the chip */
+#define E8390_START 0x02 /* Start the chip, clear reset */
+#define E8390_TRANS 0x04 /* Transmit a frame */
+#define E8390_RREAD 0x08 /* Remote read */
+#define E8390_RWRITE 0x10 /* Remote write */
+#define E8390_NODMA 0x20 /* Remote DMA */
+#define E8390_PAGE0 0x00 /* Select page chip registers */
+#define E8390_PAGE1 0x40 /* using the two high-order bits */
+#define E8390_PAGE2 0x80 /* Page 3 is invalid. */
+
+#define E8390_CMD 0x00 /* The command register (for all pages) */
+/* Page 0 register offsets. */
+#define EN0_CLDALO 0x01 /* Low byte of current local dma addr RD */
+#define EN0_STARTPG 0x01 /* Starting page of ring bfr WR */
+#define EN0_CLDAHI 0x02 /* High byte of current local dma addr RD */
+#define EN0_STOPPG 0x02 /* Ending page +1 of ring bfr WR */
+#define EN0_BOUNDARY 0x03 /* Boundary page of ring bfr RD WR */
+#define EN0_TSR 0x04 /* Transmit status reg RD */
+#define EN0_TPSR 0x04 /* Transmit starting page WR */
+#define EN0_NCR 0x05 /* Number of collision reg RD */
+#define EN0_TCNTLO 0x05 /* Low byte of tx byte count WR */
+#define EN0_FIFO 0x06 /* FIFO RD */
+#define EN0_TCNTHI 0x06 /* High byte of tx byte count WR */
+#define EN0_ISR 0x07 /* Interrupt status reg RD WR */
+#define EN0_CRDALO 0x08 /* low byte of current remote dma address RD */
+#define EN0_RSARLO 0x08 /* Remote start address reg 0 */
+#define EN0_CRDAHI 0x09 /* high byte, current remote dma address RD */
+#define EN0_RSARHI 0x09 /* Remote start address reg 1 */
+#define EN0_RCNTLO 0x0a /* Remote byte count reg WR */
+#define EN0_RCNTHI 0x0b /* Remote byte count reg WR */
+#define EN0_RSR 0x0c /* rx status reg RD */
+#define EN0_RXCR 0x0c /* RX configuration reg WR */
+#define EN0_TXCR 0x0d /* TX configuration reg WR */
+#define EN0_COUNTER0 0x0d /* Rcv alignment error counter RD */
+#define EN0_DCFG 0x0e /* Data configuration reg WR */
+#define EN0_COUNTER1 0x0e /* Rcv CRC error counter RD */
+#define EN0_IMR 0x0f /* Interrupt mask reg WR */
+#define EN0_COUNTER2 0x0f /* Rcv missed frame error counter RD */
+
+/* Bits in EN0_ISR - Interrupt status register */
+#define ENISR_RX 0x01 /* Receiver, no error */
+#define ENISR_TX 0x02 /* Transmitter, no error */
+#define ENISR_RX_ERR 0x04 /* Receiver, with error */
+#define ENISR_TX_ERR 0x08 /* Transmitter, with error */
+#define ENISR_OVER 0x10 /* Receiver overwrote the ring */
+#define ENISR_COUNTERS 0x20 /* Counters need emptying */
+#define ENISR_RDC 0x40 /* remote dma complete */
+#define ENISR_RESET 0x80 /* Reset completed */
+#define ENISR_ALL 0x3f /* Interrupts we will enable */
+
+/* Bits in EN0_DCFG - Data config register */
+#define ENDCFG_WTS 0x01 /* word transfer mode selection */
+
+/* Page 1 register offsets. */
+#define EN1_PHYS 0x01 /* This board's physical enet addr RD WR */
+#define EN1_CURPAG 0x07 /* Current memory page RD WR */
+#define EN1_MULT 0x08 /* Multicast filter mask array (8 bytes) RD WR */
+
+/* Bits in received packet status byte and EN0_RSR*/
+#define ENRSR_RXOK 0x01 /* Received a good packet */
+#define ENRSR_CRC 0x02 /* CRC error */
+#define ENRSR_FAE 0x04 /* frame alignment error */
+#define ENRSR_FO 0x08 /* FIFO overrun */
+#define ENRSR_MPA 0x10 /* missed pkt */
+#define ENRSR_PHY 0x20 /* physical/multicase address */
+#define ENRSR_DIS 0x40 /* receiver disable. set in monitor mode */
+#define ENRSR_DEF 0x80 /* deferring */
+
+/* Transmitted packet status, EN0_TSR. */
+#define ENTSR_PTX 0x01 /* Packet transmitted without error */
+#define ENTSR_ND 0x02 /* The transmit wasn't deferred. */
+#define ENTSR_COL 0x04 /* The transmit collided at least once. */
+#define ENTSR_ABT 0x08 /* The transmit collided 16 times, and was deferred. */
+#define ENTSR_CRS 0x10 /* The carrier sense was lost. */
+#define ENTSR_FU 0x20 /* A "FIFO underrun" occurred during transmit. */
+#define ENTSR_CDH 0x40 /* The collision detect "heartbeat" signal was lost. */
+#define ENTSR_OWC 0x80 /* There was an out-of-window collision. */
+
+#endif /* _8390_h */
diff --git a/linux/src/drivers/net/Space.c b/linux/src/drivers/net/Space.c
new file mode 100644
index 0000000..083cdeb
--- /dev/null
+++ b/linux/src/drivers/net/Space.c
@@ -0,0 +1,541 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Holds initial configuration information for devices.
+ *
+ * NOTE: This file is a nice idea, but its current format does not work
+ * well for drivers that support multiple units, like the SLIP
+ * driver. We should actually have only one pointer to a driver
+ * here, with the driver knowing how many units it supports.
+ * Currently, the SLIP driver abuses the "base_addr" integer
+ * field of the 'device' structure to store the unit number...
+ * -FvK
+ *
+ * Version: @(#)Space.c 1.0.8 07/31/96
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Donald J. Becker, <becker@super.org>
+ *
+ * FIXME:
+ * Sort the device chain fastest first.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/config.h>
+#include <linux/netdevice.h>
+#include <linux/errno.h>
+
+#define NEXT_DEV NULL
+
+
+/* A unified ethernet device probe. This is the easiest way to have every
+ ethernet adaptor have the name "eth[0123...]".
+ */
+
+extern int tulip_probe(struct device *dev);
+extern int hp100_probe(struct device *dev);
+extern int ultra_probe(struct device *dev);
+extern int ultra32_probe(struct device *dev);
+extern int wd_probe(struct device *dev);
+extern int el2_probe(struct device *dev);
+extern int ne_probe(struct device *dev);
+extern int ne2k_pci_probe(struct device *dev);
+extern int hp_probe(struct device *dev);
+extern int hp_plus_probe(struct device *dev);
+extern int znet_probe(struct device *);
+extern int express_probe(struct device *);
+extern int eepro_probe(struct device *);
+extern int el3_probe(struct device *);
+extern int at1500_probe(struct device *);
+extern int at1700_probe(struct device *);
+extern int fmv18x_probe(struct device *);
+extern int eth16i_probe(struct device *);
+extern int depca_probe(struct device *);
+extern int apricot_probe(struct device *);
+extern int ewrk3_probe(struct device *);
+extern int de4x5_probe(struct device *);
+extern int el1_probe(struct device *);
+extern int via_rhine_probe(struct device *);
+#if defined(CONFIG_WAVELAN)
+extern int wavelan_probe(struct device *);
+#endif /* defined(CONFIG_WAVELAN) */
+extern int el16_probe(struct device *);
+extern int elplus_probe(struct device *);
+extern int ac3200_probe(struct device *);
+extern int e2100_probe(struct device *);
+extern int ni52_probe(struct device *);
+extern int ni65_probe(struct device *);
+extern int SK_init(struct device *);
+extern int seeq8005_probe(struct device *);
+extern int tc59x_probe(struct device *);
+extern int dgrs_probe(struct device *);
+extern int smc_init( struct device * );
+extern int sparc_lance_probe(struct device *);
+extern int atarilance_probe(struct device *);
+extern int a2065_probe(struct device *);
+extern int ariadne_probe(struct device *);
+extern int hydra_probe(struct device *);
+extern int yellowfin_probe(struct device *);
+extern int eepro100_probe(struct device *);
+extern int epic100_probe(struct device *);
+extern int rtl8139_probe(struct device *);
+extern int tlan_probe(struct device *);
+extern int isa515_probe(struct device *);
+extern int pcnet32_probe(struct device *);
+extern int lance_probe(struct device *);
+/* Detachable devices ("pocket adaptors") */
+extern int atp_init(struct device *);
+extern int de600_probe(struct device *);
+extern int de620_probe(struct device *);
+/* The shaper hook */
+extern int shaper_probe(struct device *);
+/* Red Creek PCI hook */
+extern int rcpci_probe(struct device *);
+
+static int
+ethif_probe(struct device *dev)
+{
+ u_long base_addr = dev->base_addr;
+
+ if ((base_addr == 0xffe0) || (base_addr == 1))
+ return 1; /* ENXIO */
+
+ if (1
+ /* All PCI probes are safe, and thus should be first. */
+#ifdef CONFIG_DE4X5 /* DEC DE425, DE434, DE435 adapters */
+ && de4x5_probe(dev)
+#endif
+#ifdef CONFIG_DGRS
+ && dgrs_probe(dev)
+#endif
+#ifdef CONFIG_EEXPRESS_PRO100B /* Intel EtherExpress Pro100B */
+ && eepro100_probe(dev)
+#endif
+#ifdef CONFIG_EPIC
+ && epic100_probe(dev)
+#endif
+#if defined(CONFIG_HP100)
+ && hp100_probe(dev)
+#endif
+#if defined(CONFIG_NE2K_PCI)
+ && ne2k_pci_probe(dev)
+#endif
+#ifdef CONFIG_PCNET32
+ && pcnet32_probe(dev)
+#endif
+#ifdef CONFIG_RTL8139
+ && rtl8139_probe(dev)
+#endif
+#if defined(CONFIG_VIA_RHINE)
+ && via_rhine_probe(dev)
+#endif
+#if defined(CONFIG_VORTEX)
+ && tc59x_probe(dev)
+#endif
+#if defined(CONFIG_DEC_ELCP)
+ && tulip_probe(dev)
+#endif
+#ifdef CONFIG_YELLOWFIN
+ && yellowfin_probe(dev)
+#endif
+ /* Next mostly-safe EISA-only drivers. */
+#ifdef CONFIG_AC3200 /* Ansel Communications EISA 3200. */
+ && ac3200_probe(dev)
+#endif
+#if defined(CONFIG_ULTRA32)
+ && ultra32_probe(dev)
+#endif
+ /* Third, sensitive ISA boards. */
+#ifdef CONFIG_AT1700
+ && at1700_probe(dev)
+#endif
+#if defined(CONFIG_ULTRA)
+ && ultra_probe(dev)
+#endif
+#if defined(CONFIG_SMC9194)
+ && smc_init(dev)
+#endif
+#if defined(CONFIG_WD80x3)
+ && wd_probe(dev)
+#endif
+#if defined(CONFIG_EL2) /* 3c503 */
+ && el2_probe(dev)
+#endif
+#if defined(CONFIG_HPLAN)
+ && hp_probe(dev)
+#endif
+#if defined(CONFIG_HPLAN_PLUS)
+ && hp_plus_probe(dev)
+#endif
+#if defined(CONFIG_SEEQ8005)
+ && seeq8005_probe(dev)
+#endif
+#ifdef CONFIG_E2100 /* Cabletron E21xx series. */
+ && e2100_probe(dev)
+#endif
+#if defined(CONFIG_NE2000)
+ && ne_probe(dev)
+#endif
+#ifdef CONFIG_AT1500
+ && at1500_probe(dev)
+#endif
+#ifdef CONFIG_FMV18X /* Fujitsu FMV-181/182 */
+ && fmv18x_probe(dev)
+#endif
+#ifdef CONFIG_ETH16I
+ && eth16i_probe(dev) /* ICL EtherTeam 16i/32 */
+#endif
+#ifdef CONFIG_EL3 /* 3c509 */
+ && el3_probe(dev)
+#endif
+#ifdef CONFIG_3C515 /* 3c515 */
+ && tc515_probe(dev)
+#endif
+#ifdef CONFIG_ZNET /* Zenith Z-Note and some IBM Thinkpads. */
+ && znet_probe(dev)
+#endif
+#ifdef CONFIG_EEXPRESS /* Intel EtherExpress */
+ && express_probe(dev)
+#endif
+#ifdef CONFIG_EEXPRESS_PRO /* Intel EtherExpress Pro/10 */
+ && eepro_probe(dev)
+#endif
+#ifdef CONFIG_DEPCA /* DEC DEPCA */
+ && depca_probe(dev)
+#endif
+#ifdef CONFIG_EWRK3 /* DEC EtherWORKS 3 */
+ && ewrk3_probe(dev)
+#endif
+#ifdef CONFIG_APRICOT /* Apricot I82596 */
+ && apricot_probe(dev)
+#endif
+#ifdef CONFIG_EL1 /* 3c501 */
+ && el1_probe(dev)
+#endif
+#if defined(CONFIG_WAVELAN) /* WaveLAN */
+ && wavelan_probe(dev)
+#endif /* defined(CONFIG_WAVELAN) */
+#ifdef CONFIG_EL16 /* 3c507 */
+ && el16_probe(dev)
+#endif
+#ifdef CONFIG_ELPLUS /* 3c505 */
+ && elplus_probe(dev)
+#endif
+#ifdef CONFIG_DE600 /* D-Link DE-600 adapter */
+ && de600_probe(dev)
+#endif
+#ifdef CONFIG_DE620 /* D-Link DE-620 adapter */
+ && de620_probe(dev)
+#endif
+#if defined(CONFIG_SK_G16)
+ && SK_init(dev)
+#endif
+#ifdef CONFIG_NI52
+ && ni52_probe(dev)
+#endif
+#ifdef CONFIG_NI65
+ && ni65_probe(dev)
+#endif
+#ifdef CONFIG_LANCE /* ISA LANCE boards */
+ && lance_probe(dev)
+#endif
+#ifdef CONFIG_ATARILANCE /* Lance-based Atari ethernet boards */
+ && atarilance_probe(dev)
+#endif
+#ifdef CONFIG_A2065 /* Commodore/Ameristar A2065 Ethernet Board */
+ && a2065_probe(dev)
+#endif
+#ifdef CONFIG_ARIADNE /* Village Tronic Ariadne Ethernet Board */
+ && ariadne_probe(dev)
+#endif
+#ifdef CONFIG_HYDRA /* Hydra Systems Amiganet Ethernet board */
+ && hydra_probe(dev)
+#endif
+#ifdef CONFIG_SUNLANCE
+ && sparc_lance_probe(dev)
+#endif
+#ifdef CONFIG_TLAN
+ && tlan_probe(dev)
+#endif
+#ifdef CONFIG_LANCE
+ && lance_probe(dev)
+#endif
+ && 1 ) {
+ return 1; /* -ENODEV or -EAGAIN would be more accurate. */
+ }
+ return 0;
+}
+
+#ifdef CONFIG_SDLA
+ extern int sdla_init(struct device *);
+ static struct device sdla0_dev = { "sdla0", 0, 0, 0, 0, 0, 0, 0, 0, 0, NEXT_DEV, sdla_init, };
+
+# undef NEXT_DEV
+# define NEXT_DEV (&sdla0_dev)
+#endif
+
+/* Run-time ATtachable (Pocket) devices have a different (not "eth#") name. */
+#ifdef CONFIG_ATP /* AT-LAN-TEC (RealTek) pocket adaptor. */
+static struct device atp_dev = {
+ "atp0", 0, 0, 0, 0, 0, 0, 0, 0, 0, NEXT_DEV, atp_init, /* ... */ };
+# undef NEXT_DEV
+# define NEXT_DEV (&atp_dev)
+#endif
+
+#ifdef CONFIG_ARCNET
+ extern int arcnet_probe(struct device *dev);
+ static struct device arcnet_dev = {
+ "arc0", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, arcnet_probe, };
+# undef NEXT_DEV
+# define NEXT_DEV (&arcnet_dev)
+#endif
+
+/* The first device defaults to I/O base '0', which means autoprobe. */
+#ifndef ETH0_ADDR
+# define ETH0_ADDR 0
+#endif
+#ifndef ETH0_IRQ
+# define ETH0_IRQ 0
+#endif
+/* "eth0" defaults to autoprobe (== 0), other use a base of 0xffe0 (== -0x20),
+ which means "don't probe". These entries exist to only to provide empty
+ slots which may be enabled at boot-time. */
+
+static struct device eth7_dev = {
+ "eth7", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, NEXT_DEV, ethif_probe };
+static struct device eth6_dev = {
+ "eth6", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, &eth7_dev, ethif_probe };
+static struct device eth5_dev = {
+ "eth5", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, &eth6_dev, ethif_probe };
+static struct device eth4_dev = {
+ "eth4", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, &eth5_dev, ethif_probe };
+static struct device eth3_dev = {
+ "eth3", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, &eth4_dev, ethif_probe };
+static struct device eth2_dev = {
+ "eth2", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, &eth3_dev, ethif_probe };
+static struct device eth1_dev = {
+ "eth1", 0,0,0,0,0xffe0 /* I/O base*/, 0,0,0,0, &eth2_dev, ethif_probe };
+
+static struct device eth0_dev = {
+ "eth0", 0, 0, 0, 0, ETH0_ADDR, ETH0_IRQ, 0, 0, 0, &eth1_dev, ethif_probe };
+
+# undef NEXT_DEV
+# define NEXT_DEV (&eth0_dev)
+
+#if defined(PLIP) || defined(CONFIG_PLIP)
+ extern int plip_init(struct device *);
+ static struct device plip2_dev = {
+ "plip2", 0, 0, 0, 0, 0x278, 2, 0, 0, 0, NEXT_DEV, plip_init, };
+ static struct device plip1_dev = {
+ "plip1", 0, 0, 0, 0, 0x378, 7, 0, 0, 0, &plip2_dev, plip_init, };
+ static struct device plip0_dev = {
+ "plip0", 0, 0, 0, 0, 0x3BC, 5, 0, 0, 0, &plip1_dev, plip_init, };
+# undef NEXT_DEV
+# define NEXT_DEV (&plip0_dev)
+#endif /* PLIP */
+
+#if defined(SLIP) || defined(CONFIG_SLIP)
+ /* To be exact, this node just hooks the initialization
+ routines to the device structures. */
+extern int slip_init_ctrl_dev(struct device *);
+static struct device slip_bootstrap = {
+ "slip_proto", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, slip_init_ctrl_dev, };
+#undef NEXT_DEV
+#define NEXT_DEV (&slip_bootstrap)
+#endif /* SLIP */
+
+#if defined(CONFIG_MKISS)
+ /* To be exact, this node just hooks the initialization
+ routines to the device structures. */
+extern int mkiss_init_ctrl_dev(struct device *);
+static struct device mkiss_bootstrap = {
+ "mkiss_proto", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, mkiss_init_ctrl_dev, };
+#undef NEXT_DEV
+#define NEXT_DEV (&mkiss_bootstrap)
+#endif /* MKISS */
+
+#if defined(CONFIG_STRIP)
+extern int strip_init_ctrl_dev(struct device *);
+static struct device strip_bootstrap = {
+ "strip_proto", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, strip_init_ctrl_dev, };
+#undef NEXT_DEV
+#define NEXT_DEV (&strip_bootstrap)
+#endif /* STRIP */
+
+#if defined(CONFIG_SHAPER)
+static struct device shaper_bootstrap = {
+ "shaper", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, shaper_probe, };
+#undef NEXT_DEV
+#define NEXT_DEV (&shaper_bootstrap)
+#endif /* SHAPER */
+
+#if defined(CONFIG_RCPCI)
+static struct device rcpci_bootstrap = {
+ "rcpci", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, rcpci_probe, };
+#undef NEXT_DEV
+#define NEXT_DEV (&rcpci_bootstrap)
+#endif /* RCPCI */
+
+#if defined(CONFIG_PPP)
+extern int ppp_init(struct device *);
+static struct device ppp_bootstrap = {
+ "ppp_proto", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, ppp_init, };
+#undef NEXT_DEV
+#define NEXT_DEV (&ppp_bootstrap)
+#endif /* PPP */
+
+#ifdef CONFIG_DUMMY
+ extern int dummy_init(struct device *dev);
+ static struct device dummy_dev = {
+ "dummy", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, dummy_init, };
+# undef NEXT_DEV
+# define NEXT_DEV (&dummy_dev)
+#endif
+
+#ifdef CONFIG_EQUALIZER
+extern int eql_init(struct device *dev);
+struct device eql_dev = {
+ "eql", /* Master device for IP traffic load
+ balancing */
+ 0x0, 0x0, 0x0, 0x0, /* recv end/start; mem end/start */
+ 0, /* base I/O address */
+ 0, /* IRQ */
+ 0, 0, 0, /* flags */
+ NEXT_DEV, /* next device */
+ eql_init /* set up the rest */
+};
+# undef NEXT_DEV
+# define NEXT_DEV (&eql_dev)
+#endif
+
+#ifdef CONFIG_IBMTR
+
+ extern int tok_probe(struct device *dev);
+ static struct device ibmtr_dev1 = {
+ "tr1", /* IBM Token Ring (Non-DMA) Interface */
+ 0x0, /* recv memory end */
+ 0x0, /* recv memory start */
+ 0x0, /* memory end */
+ 0x0, /* memory start */
+ 0xa24, /* base I/O address */
+ 0, /* IRQ */
+ 0, 0, 0, /* flags */
+ NEXT_DEV, /* next device */
+ tok_probe /* ??? Token_init should set up the rest */
+ };
+# undef NEXT_DEV
+# define NEXT_DEV (&ibmtr_dev1)
+
+
+ static struct device ibmtr_dev0 = {
+ "tr0", /* IBM Token Ring (Non-DMA) Interface */
+ 0x0, /* recv memory end */
+ 0x0, /* recv memory start */
+ 0x0, /* memory end */
+ 0x0, /* memory start */
+ 0xa20, /* base I/O address */
+ 0, /* IRQ */
+ 0, 0, 0, /* flags */
+ NEXT_DEV, /* next device */
+ tok_probe /* ??? Token_init should set up the rest */
+ };
+# undef NEXT_DEV
+# define NEXT_DEV (&ibmtr_dev0)
+
+#endif
+
+#ifdef CONFIG_DEFXX
+ extern int dfx_probe(struct device *dev);
+ static struct device fddi7_dev =
+ {"fddi7", 0, 0, 0, 0, 0, 0, 0, 0, 0, NEXT_DEV, dfx_probe};
+ static struct device fddi6_dev =
+ {"fddi6", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi7_dev, dfx_probe};
+ static struct device fddi5_dev =
+ {"fddi5", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi6_dev, dfx_probe};
+ static struct device fddi4_dev =
+ {"fddi4", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi5_dev, dfx_probe};
+ static struct device fddi3_dev =
+ {"fddi3", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi4_dev, dfx_probe};
+ static struct device fddi2_dev =
+ {"fddi2", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi3_dev, dfx_probe};
+ static struct device fddi1_dev =
+ {"fddi1", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi2_dev, dfx_probe};
+ static struct device fddi0_dev =
+ {"fddi0", 0, 0, 0, 0, 0, 0, 0, 0, 0, &fddi1_dev, dfx_probe};
+
+#undef NEXT_DEV
+#define NEXT_DEV (&fddi0_dev)
+#endif
+
+#ifdef CONFIG_NET_IPIP
+ extern int tunnel_init(struct device *);
+
+ static struct device tunnel_dev1 =
+ {
+ "tunl1", /* IPIP tunnel */
+ 0x0, /* recv memory end */
+ 0x0, /* recv memory start */
+ 0x0, /* memory end */
+ 0x0, /* memory start */
+ 0x0, /* base I/O address */
+ 0, /* IRQ */
+ 0, 0, 0, /* flags */
+ NEXT_DEV, /* next device */
+ tunnel_init /* Fill in the details */
+ };
+
+ static struct device tunnel_dev0 =
+ {
+ "tunl0", /* IPIP tunnel */
+ 0x0, /* recv memory end */
+ 0x0, /* recv memory start */
+ 0x0, /* memory end */
+ 0x0, /* memory start */
+ 0x0, /* base I/O address */
+ 0, /* IRQ */
+ 0, 0, 0, /* flags */
+ &tunnel_dev1, /* next device */
+ tunnel_init /* Fill in the details */
+ };
+# undef NEXT_DEV
+# define NEXT_DEV (&tunnel_dev0)
+
+#endif
+
+#ifdef CONFIG_APFDDI
+ extern int apfddi_init(struct device *dev);
+ static struct device fddi_dev = {
+ "fddi", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, apfddi_init };
+# undef NEXT_DEV
+# define NEXT_DEV (&fddi_dev)
+#endif
+
+#ifdef CONFIG_APBIF
+ extern int bif_init(struct device *dev);
+ static struct device bif_dev = {
+ "bif", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, bif_init };
+# undef NEXT_DEV
+# define NEXT_DEV (&bif_dev)
+#endif
+
+extern int loopback_init(struct device *dev);
+struct device loopback_dev = {
+ "lo", /* Software Loopback interface */
+ 0x0, /* recv memory end */
+ 0x0, /* recv memory start */
+ 0x0, /* memory end */
+ 0x0, /* memory start */
+ 0, /* base I/O address */
+ 0, /* IRQ */
+ 0, 0, 0, /* flags */
+ NEXT_DEV, /* next device */
+ loopback_init /* loopback_init should set up the rest */
+};
+
+struct device *dev_base = &loopback_dev;
diff --git a/linux/src/drivers/net/ac3200.c b/linux/src/drivers/net/ac3200.c
new file mode 100644
index 0000000..600949f
--- /dev/null
+++ b/linux/src/drivers/net/ac3200.c
@@ -0,0 +1,385 @@
+/* ac3200.c: A driver for the Ansel Communications EISA ethernet adaptor. */
+/*
+ Written 1993, 1994 by Donald Becker.
+ Copyright 1993 United States Government as represented by the Director,
+ National Security Agency. This software may only be used and distributed
+ according to the terms of the GNU Public License as modified by SRC,
+ incorporated herein by reference.
+
+ The author may be reached as becker@cesdis.gsfc.nasa.gov, or
+ C/O Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This is driver for the Ansel Communications Model 3200 EISA Ethernet LAN
+ Adapter. The programming information is from the users manual, as related
+ by glee@ardnassak.math.clemson.edu.
+ */
+
+static const char *version =
+ "ac3200.c:v1.01 7/1/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+
+#include "8390.h"
+
+/* Offsets from the base address. */
+#define AC_NIC_BASE 0x00
+#define AC_SA_PROM 0x16 /* The station address PROM. */
+#define AC_ADDR0 0x00 /* Prefix station address values. */
+#define AC_ADDR1 0x40 /* !!!!These are just guesses!!!! */
+#define AC_ADDR2 0x90
+#define AC_ID_PORT 0xC80
+#define AC_EISA_ID 0x0110d305
+#define AC_RESET_PORT 0xC84
+#define AC_RESET 0x00
+#define AC_ENABLE 0x01
+#define AC_CONFIG 0xC90 /* The configuration port. */
+
+#define AC_IO_EXTENT 0x10 /* IS THIS REALLY TRUE ??? */
+ /* Actually accessed is:
+ * AC_NIC_BASE (0-15)
+ * AC_SA_PROM (0-5)
+ * AC_ID_PORT (0-3)
+ * AC_RESET_PORT
+ * AC_CONFIG
+ */
+
+/* Decoding of the configuration register. */
+static unsigned char config2irqmap[8] = {15, 12, 11, 10, 9, 7, 5, 3};
+static int addrmap[8] =
+{0xFF0000, 0xFE0000, 0xFD0000, 0xFFF0000, 0xFFE0000, 0xFFC0000, 0xD0000, 0 };
+static const char *port_name[4] = { "10baseT", "invalid", "AUI", "10base2"};
+
+#define config2irq(configval) config2irqmap[((configval) >> 3) & 7]
+#define config2mem(configval) addrmap[(configval) & 7]
+#define config2name(configval) port_name[((configval) >> 6) & 3]
+
+/* First and last 8390 pages. */
+#define AC_START_PG 0x00 /* First page of 8390 TX buffer */
+#define AC_STOP_PG 0x80 /* Last page +1 of the 8390 RX ring */
+
+int ac3200_probe(struct device *dev);
+static int ac_probe1(int ioaddr, struct device *dev);
+
+static int ac_open(struct device *dev);
+static void ac_reset_8390(struct device *dev);
+static void ac_block_input(struct device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void ac_block_output(struct device *dev, const int count,
+ const unsigned char *buf, const int start_page);
+static void ac_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+
+static int ac_close_card(struct device *dev);
+
+
+/* Probe for the AC3200.
+
+ The AC3200 can be identified by either the EISA configuration registers,
+ or the unique value in the station address PROM.
+ */
+
+int ac3200_probe(struct device *dev)
+{
+ unsigned short ioaddr = dev->base_addr;
+
+ if (ioaddr > 0x1ff) /* Check a single specified location. */
+ return ac_probe1(ioaddr, dev);
+ else if (ioaddr > 0) /* Don't probe at all. */
+ return ENXIO;
+
+ /* If you have a pre 0.99pl15 machine you should delete this line. */
+ if ( ! EISA_bus)
+ return ENXIO;
+
+ for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
+ if (check_region(ioaddr, AC_IO_EXTENT))
+ continue;
+ if (ac_probe1(ioaddr, dev) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+
+static int ac_probe1(int ioaddr, struct device *dev)
+{
+ int i;
+
+#ifndef final_version
+ printk("AC3200 ethercard probe at %#3x:", ioaddr);
+
+ for(i = 0; i < 6; i++)
+ printk(" %02x", inb(ioaddr + AC_SA_PROM + i));
+#endif
+
+ /* !!!!The values of AC_ADDRn (see above) should be corrected when we
+ find out the correct station address prefix!!!! */
+ if (inb(ioaddr + AC_SA_PROM + 0) != AC_ADDR0
+ || inb(ioaddr + AC_SA_PROM + 1) != AC_ADDR1
+ || inb(ioaddr + AC_SA_PROM + 2) != AC_ADDR2 ) {
+#ifndef final_version
+ printk(" not found (invalid prefix).\n");
+#endif
+ return ENODEV;
+ }
+
+ /* The correct probe method is to check the EISA ID. */
+ for (i = 0; i < 4; i++)
+ if (inl(ioaddr + AC_ID_PORT) != AC_EISA_ID) {
+ printk("EISA ID mismatch, %8x vs %8x.\n",
+ inl(ioaddr + AC_ID_PORT), AC_EISA_ID);
+ return ENODEV;
+ }
+
+
+ /* We should have a "dev" from Space.c or the static module table. */
+ if (dev == NULL) {
+ printk("ac3200.c: Passed a NULL device.\n");
+ dev = init_etherdev(0, 0);
+ }
+
+ for(i = 0; i < ETHER_ADDR_LEN; i++)
+ dev->dev_addr[i] = inb(ioaddr + AC_SA_PROM + i);
+
+#ifndef final_version
+ printk("\nAC3200 ethercard configuration register is %#02x,"
+ " EISA ID %02x %02x %02x %02x.\n", inb(ioaddr + AC_CONFIG),
+ inb(ioaddr + AC_ID_PORT + 0), inb(ioaddr + AC_ID_PORT + 1),
+ inb(ioaddr + AC_ID_PORT + 2), inb(ioaddr + AC_ID_PORT + 3));
+#endif
+
+ /* Assign and allocate the interrupt now. */
+ if (dev->irq == 0)
+ dev->irq = config2irq(inb(ioaddr + AC_CONFIG));
+ else if (dev->irq == 2)
+ dev->irq = 9;
+
+ if (request_irq(dev->irq, ei_interrupt, 0, "ac3200", NULL)) {
+ printk (" unable to get IRQ %d.\n", dev->irq);
+ return EAGAIN;
+ }
+
+ /* Allocate dev->priv and fill in 8390 specific dev fields. */
+ if (ethdev_init(dev)) {
+ printk (" unable to allocate memory for dev->priv.\n");
+ free_irq(dev->irq, NULL);
+ return -ENOMEM;
+ }
+
+ request_region(ioaddr, AC_IO_EXTENT, "ac3200");
+
+ dev->base_addr = ioaddr;
+
+#ifdef notyet
+ if (dev->mem_start) { /* Override the value from the board. */
+ for (i = 0; i < 7; i++)
+ if (addrmap[i] == dev->mem_start)
+ break;
+ if (i >= 7)
+ i = 0;
+ outb((inb(ioaddr + AC_CONFIG) & ~7) | i, ioaddr + AC_CONFIG);
+ }
+#endif
+
+ dev->if_port = inb(ioaddr + AC_CONFIG) >> 6;
+ dev->mem_start = config2mem(inb(ioaddr + AC_CONFIG));
+ dev->rmem_start = dev->mem_start + TX_PAGES*256;
+ dev->mem_end = dev->rmem_end = dev->mem_start
+ + (AC_STOP_PG - AC_START_PG)*256;
+
+ ei_status.name = "AC3200";
+ ei_status.tx_start_page = AC_START_PG;
+ ei_status.rx_start_page = AC_START_PG + TX_PAGES;
+ ei_status.stop_page = AC_STOP_PG;
+ ei_status.word16 = 1;
+
+ printk("\n%s: AC3200 at %#x, IRQ %d, %s port, shared memory %#lx-%#lx.\n",
+ dev->name, ioaddr, dev->irq, port_name[dev->if_port],
+ dev->mem_start, dev->mem_end-1);
+
+ if (ei_debug > 0)
+ printk("%s", version);
+
+ ei_status.reset_8390 = &ac_reset_8390;
+ ei_status.block_input = &ac_block_input;
+ ei_status.block_output = &ac_block_output;
+ ei_status.get_8390_hdr = &ac_get_8390_hdr;
+
+ dev->open = &ac_open;
+ dev->stop = &ac_close_card;
+ NS8390_init(dev, 0);
+ return 0;
+}
+
+static int ac_open(struct device *dev)
+{
+#ifdef notyet
+ /* Someday we may enable the IRQ and shared memory here. */
+ int ioaddr = dev->base_addr;
+
+ if (request_irq(dev->irq, ei_interrupt, 0, "ac3200", NULL))
+ return -EAGAIN;
+#endif
+
+ ei_open(dev);
+
+ MOD_INC_USE_COUNT;
+
+ return 0;
+}
+
+static void ac_reset_8390(struct device *dev)
+{
+ ushort ioaddr = dev->base_addr;
+
+ outb(AC_RESET, ioaddr + AC_RESET_PORT);
+ if (ei_debug > 1) printk("resetting AC3200, t=%ld...", jiffies);
+
+ ei_status.txing = 0;
+ outb(AC_ENABLE, ioaddr + AC_RESET_PORT);
+ if (ei_debug > 1) printk("reset done\n");
+
+ return;
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void
+ac_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ unsigned long hdr_start = dev->mem_start + ((ring_page - AC_START_PG)<<8);
+ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
+}
+
+/* Block input and output are easy on shared memory ethercards, the only
+ complication is when the ring buffer wraps. */
+
+static void ac_block_input(struct device *dev, int count, struct sk_buff *skb,
+ int ring_offset)
+{
+ unsigned long xfer_start = dev->mem_start + ring_offset - (AC_START_PG<<8);
+
+ if (xfer_start + count > dev->rmem_end) {
+ /* We must wrap the input move. */
+ int semi_count = dev->rmem_end - xfer_start;
+ memcpy_fromio(skb->data, xfer_start, semi_count);
+ count -= semi_count;
+ memcpy_fromio(skb->data + semi_count, dev->rmem_start, count);
+ } else {
+ /* Packet is in one chunk -- we can copy + cksum. */
+ eth_io_copy_and_sum(skb, xfer_start, count, 0);
+ }
+}
+
+static void ac_block_output(struct device *dev, int count,
+ const unsigned char *buf, int start_page)
+{
+ unsigned long shmem = dev->mem_start + ((start_page - AC_START_PG)<<8);
+
+ memcpy_toio(shmem, buf, count);
+}
+
+static int ac_close_card(struct device *dev)
+{
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+
+#ifdef notyet
+ /* We should someday disable shared memory and interrupts. */
+ outb(0x00, ioaddr + 6); /* Disable interrupts. */
+ free_irq(dev->irq, NULL);
+ irq2dev_map[dev->irq] = 0;
+#endif
+
+ ei_close(dev);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+#ifdef MODULE
+#define MAX_AC32_CARDS 4 /* Max number of AC32 cards per module */
+#define NAMELEN 8 /* # of chars for storing dev->name */
+static char namelist[NAMELEN * MAX_AC32_CARDS] = { 0, };
+static struct device dev_ac32[MAX_AC32_CARDS] = {
+ {
+ NULL, /* assign a chunk of namelist[] below */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, NULL
+ },
+};
+
+static int io[MAX_AC32_CARDS] = { 0, };
+static int irq[MAX_AC32_CARDS] = { 0, };
+static int mem[MAX_AC32_CARDS] = { 0, };
+
+int
+init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_AC32_CARDS; this_dev++) {
+ struct device *dev = &dev_ac32[this_dev];
+ dev->name = namelist+(NAMELEN*this_dev);
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->mem_start = mem[this_dev]; /* Currently ignored by driver */
+ dev->init = ac3200_probe;
+ /* Default is to only install one card. */
+ if (io[this_dev] == 0 && this_dev != 0) break;
+ if (register_netdev(dev) != 0) {
+ printk(KERN_WARNING "ac3200.c: No ac3200 card found (i/o = 0x%x).\n", io[this_dev]);
+ if (found != 0) return 0; /* Got at least one. */
+ return -ENXIO;
+ }
+ found++;
+ }
+
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_AC32_CARDS; this_dev++) {
+ struct device *dev = &dev_ac32[this_dev];
+ if (dev->priv != NULL) {
+ kfree(dev->priv);
+ dev->priv = NULL;
+ /* Someday free_irq + irq2dev may be in ac_close_card() */
+ free_irq(dev->irq, NULL);
+ irq2dev_map[dev->irq] = NULL;
+ release_region(dev->base_addr, AC_IO_EXTENT);
+ unregister_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c ac3200.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/apricot.c b/linux/src/drivers/net/apricot.c
new file mode 100644
index 0000000..57fccaf
--- /dev/null
+++ b/linux/src/drivers/net/apricot.c
@@ -0,0 +1,1046 @@
+/* apricot.c: An Apricot 82596 ethernet driver for linux. */
+/*
+ Apricot
+ Written 1994 by Mark Evans.
+ This driver is for the Apricot 82596 bus-master interface
+
+ Modularised 12/94 Mark Evans
+
+ Driver skeleton
+ Written 1993 by Donald Becker.
+ Copyright 1993 United States Government as represented by the Director,
+ National Security Agency. This software may only be used and distributed
+ according to the terms of the GNU Public License as modified by SRC,
+ incorporated herein by reference.
+
+ The author may be reached as becker@super.org or
+ C/O Supercomputing Research Ctr., 17100 Science Dr., Bowie MD 20715
+
+
+*/
+
+static const char *version = "apricot.c:v0.2 05/12/94\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#ifndef HAVE_PORTRESERVE
+#define check_region(addr, size) 0
+#define request_region(addr, size,name) do ; while(0)
+#endif
+
+#ifndef HAVE_ALLOC_SKB
+#define alloc_skb(size, priority) (struct sk_buff *) kmalloc(size,priority)
+#define kfree_skbmem(buff, size) kfree_s(buff,size)
+#endif
+
+#define APRICOT_DEBUG 1
+
+#ifdef APRICOT_DEBUG
+int i596_debug = APRICOT_DEBUG;
+#else
+int i596_debug = 1;
+#endif
+
+#define APRICOT_TOTAL_SIZE 17
+
+#define I596_NULL -1
+
+#define CMD_EOL 0x8000 /* The last command of the list, stop. */
+#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
+#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
+
+#define CMD_FLEX 0x0008 /* Enable flexible memory model */
+
+enum commands {
+ CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
+ CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7};
+
+#define STAT_C 0x8000 /* Set to 0 after execution */
+#define STAT_B 0x4000 /* Command being executed */
+#define STAT_OK 0x2000 /* Command executed ok */
+#define STAT_A 0x1000 /* Command aborted */
+
+#define CUC_START 0x0100
+#define CUC_RESUME 0x0200
+#define CUC_SUSPEND 0x0300
+#define CUC_ABORT 0x0400
+#define RX_START 0x0010
+#define RX_RESUME 0x0020
+#define RX_SUSPEND 0x0030
+#define RX_ABORT 0x0040
+
+struct i596_cmd {
+ unsigned short status;
+ unsigned short command;
+ struct i596_cmd *next;
+};
+
+#define EOF 0x8000
+#define SIZE_MASK 0x3fff
+
+struct i596_tbd {
+ unsigned short size;
+ unsigned short pad;
+ struct i596_tbd *next;
+ unsigned char *data;
+};
+
+struct tx_cmd {
+ struct i596_cmd cmd;
+ struct i596_tbd *tbd;
+ unsigned short size;
+ unsigned short pad;
+};
+
+struct i596_rfd {
+ unsigned short stat;
+ unsigned short cmd;
+ struct i596_rfd *next;
+ long rbd;
+ unsigned short count;
+ unsigned short size;
+ unsigned char data[1532];
+};
+
+#define RX_RING_SIZE 8
+
+struct i596_scb {
+ unsigned short status;
+ unsigned short command;
+ struct i596_cmd *cmd;
+ struct i596_rfd *rfd;
+ unsigned long crc_err;
+ unsigned long align_err;
+ unsigned long resource_err;
+ unsigned long over_err;
+ unsigned long rcvdt_err;
+ unsigned long short_err;
+ unsigned short t_on;
+ unsigned short t_off;
+};
+
+struct i596_iscp {
+ unsigned long stat;
+ struct i596_scb *scb;
+};
+
+struct i596_scp {
+ unsigned long sysbus;
+ unsigned long pad;
+ struct i596_iscp *iscp;
+};
+
+struct i596_private {
+ volatile struct i596_scp scp;
+ volatile struct i596_iscp iscp;
+ volatile struct i596_scb scb;
+ volatile struct i596_cmd set_add;
+ char eth_addr[8];
+ volatile struct i596_cmd set_conf;
+ char i596_config[16];
+ volatile struct i596_cmd tdr;
+ unsigned long stat;
+ int last_restart;
+ struct i596_rfd *rx_tail;
+ struct i596_cmd *cmd_tail;
+ struct i596_cmd *cmd_head;
+ int cmd_backlog;
+ unsigned long last_cmd;
+ struct enet_statistics stats;
+};
+
+char init_setup[] = {
+ 0x8E, /* length, prefetch on */
+ 0xC8, /* fifo to 8, monitor off */
+ 0x80, /* don't save bad frames */
+ 0x2E, /* No source address insertion, 8 byte preamble */
+ 0x00, /* priority and backoff defaults */
+ 0x60, /* interframe spacing */
+ 0x00, /* slot time LSB */
+ 0xf2, /* slot time and retries */
+ 0x00, /* promiscuous mode */
+ 0x00, /* collision detect */
+ 0x40, /* minimum frame length */
+ 0xff,
+ 0x00,
+ 0x7f /* *multi IA */ };
+
+static int i596_open(struct device *dev);
+static int i596_start_xmit(struct sk_buff *skb, struct device *dev);
+static void i596_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static int i596_close(struct device *dev);
+static struct enet_statistics *i596_get_stats(struct device *dev);
+static void i596_add_cmd(struct device *dev, struct i596_cmd *cmd);
+static void print_eth(unsigned char *);
+static void set_multicast_list(struct device *dev);
+
+
+static inline int
+init_rx_bufs(struct device *dev, int num)
+{
+ struct i596_private *lp = (struct i596_private *)dev->priv;
+ int i;
+ struct i596_rfd *rfd;
+
+ lp->scb.rfd = (struct i596_rfd *)I596_NULL;
+
+ if (i596_debug > 1) printk ("%s: init_rx_bufs %d.\n", dev->name, num);
+
+ for (i = 0; i < num; i++)
+ {
+ if (!(rfd = (struct i596_rfd *)kmalloc(sizeof(struct i596_rfd), GFP_KERNEL)))
+ break;
+
+ rfd->stat = 0x0000;
+ rfd->rbd = I596_NULL;
+ rfd->count = 0;
+ rfd->size = 1532;
+ if (i == 0)
+ {
+ rfd->cmd = CMD_EOL;
+ lp->rx_tail = rfd;
+ }
+ else
+ rfd->cmd = 0x0000;
+
+ rfd->next = lp->scb.rfd;
+ lp->scb.rfd = rfd;
+ }
+
+ if (i != 0)
+ lp->rx_tail->next = lp->scb.rfd;
+
+ return (i);
+}
+
+static inline void
+remove_rx_bufs(struct device *dev)
+{
+ struct i596_private *lp = (struct i596_private *)dev->priv;
+ struct i596_rfd *rfd = lp->scb.rfd;
+
+ lp->rx_tail->next = (struct i596_rfd *)I596_NULL;
+
+ do
+ {
+ lp->scb.rfd = rfd->next;
+ kfree_s(rfd, sizeof(struct i596_rfd));
+ rfd = lp->scb.rfd;
+ }
+ while (rfd != lp->rx_tail);
+}
+
+static inline void
+init_i596_mem(struct device *dev)
+{
+ struct i596_private *lp = (struct i596_private *)dev->priv;
+ short ioaddr = dev->base_addr;
+ int boguscnt = 100;
+
+ /* change the scp address */
+ outw(0, ioaddr);
+ outw(0, ioaddr);
+ outb(4, ioaddr+0xf);
+ outw(((((int)&lp->scp) & 0xffff) | 2), ioaddr);
+ outw((((int)&lp->scp)>>16) & 0xffff, ioaddr);
+
+ lp->last_cmd = jiffies;
+
+ lp->scp.sysbus = 0x00440000;
+ lp->scp.iscp = &(lp->iscp);
+ lp->iscp.scb = &(lp->scb);
+ lp->iscp.stat = 0x0001;
+ lp->cmd_backlog = 0;
+
+ lp->cmd_head = lp->scb.cmd = (struct i596_cmd *) I596_NULL;
+
+ if (i596_debug > 2) printk("%s: starting i82596.\n", dev->name);
+
+ (void) inb (ioaddr+0x10);
+ outb(4, ioaddr+0xf);
+ outw(0, ioaddr+4);
+
+ while (lp->iscp.stat)
+ if (--boguscnt == 0)
+ {
+ printk("%s: i82596 initialization timed out with status %4.4x, cmd %4.4x.\n",
+ dev->name, lp->scb.status, lp->scb.command);
+ break;
+ }
+
+ lp->scb.command = 0;
+
+ memcpy (lp->i596_config, init_setup, 14);
+ lp->set_conf.command = CmdConfigure;
+ i596_add_cmd(dev, &lp->set_conf);
+
+ memcpy (lp->eth_addr, dev->dev_addr, 6);
+ lp->set_add.command = CmdSASetup;
+ i596_add_cmd(dev, &lp->set_add);
+
+ lp->tdr.command = CmdTDR;
+ i596_add_cmd(dev, &lp->tdr);
+
+ boguscnt = 200;
+ while (lp->scb.status, lp->scb.command)
+ if (--boguscnt == 0)
+ {
+ printk("%s: receive unit start timed out with status %4.4x, cmd %4.4x.\n",
+ dev->name, lp->scb.status, lp->scb.command);
+ break;
+ }
+
+ lp->scb.command = RX_START;
+ outw(0, ioaddr+4);
+
+ boguscnt = 200;
+ while (lp->scb.status, lp->scb.command)
+ if (--boguscnt == 0)
+ {
+ printk("i82596 init timed out with status %4.4x, cmd %4.4x.\n",
+ lp->scb.status, lp->scb.command);
+ break;
+ }
+
+ return;
+}
+
+static inline int
+i596_rx(struct device *dev)
+{
+ struct i596_private *lp = (struct i596_private *)dev->priv;
+ int frames = 0;
+
+ if (i596_debug > 3) printk ("i596_rx()\n");
+
+ while ((lp->scb.rfd->stat) & STAT_C)
+ {
+ if (i596_debug >2) print_eth(lp->scb.rfd->data);
+
+ if ((lp->scb.rfd->stat) & STAT_OK)
+ {
+ /* a good frame */
+ int pkt_len = lp->scb.rfd->count & 0x3fff;
+ struct sk_buff *skb = dev_alloc_skb(pkt_len);
+
+ frames++;
+
+ if (skb == NULL)
+ {
+ printk ("%s: i596_rx Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ break;
+ }
+
+ skb->dev = dev;
+ memcpy(skb_put(skb,pkt_len), lp->scb.rfd->data, pkt_len);
+
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+
+ if (i596_debug > 4) print_eth(skb->data);
+ }
+ else
+ {
+ lp->stats.rx_errors++;
+ if ((lp->scb.rfd->stat) & 0x0001) lp->stats.collisions++;
+ if ((lp->scb.rfd->stat) & 0x0080) lp->stats.rx_length_errors++;
+ if ((lp->scb.rfd->stat) & 0x0100) lp->stats.rx_over_errors++;
+ if ((lp->scb.rfd->stat) & 0x0200) lp->stats.rx_fifo_errors++;
+ if ((lp->scb.rfd->stat) & 0x0400) lp->stats.rx_frame_errors++;
+ if ((lp->scb.rfd->stat) & 0x0800) lp->stats.rx_crc_errors++;
+ if ((lp->scb.rfd->stat) & 0x1000) lp->stats.rx_length_errors++;
+ }
+
+ lp->scb.rfd->stat = 0;
+ lp->rx_tail->cmd = 0;
+ lp->rx_tail = lp->scb.rfd;
+ lp->scb.rfd = lp->scb.rfd->next;
+ lp->rx_tail->count = 0;
+ lp->rx_tail->cmd = CMD_EOL;
+
+ }
+
+ if (i596_debug > 3) printk ("frames %d\n", frames);
+
+ return 0;
+}
+
+static inline void
+i596_cleanup_cmd(struct i596_private *lp)
+{
+ struct i596_cmd *ptr;
+ int boguscnt = 100;
+
+ if (i596_debug > 4) printk ("i596_cleanup_cmd\n");
+
+ while (lp->cmd_head != (struct i596_cmd *) I596_NULL)
+ {
+ ptr = lp->cmd_head;
+
+ lp->cmd_head = lp->cmd_head->next;
+ lp->cmd_backlog--;
+
+ switch ((ptr->command) & 0x7)
+ {
+ case CmdTx:
+ {
+ struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
+ struct sk_buff *skb = ((struct sk_buff *)(tx_cmd->tbd->data)) -1;
+
+ dev_kfree_skb(skb, FREE_WRITE);
+
+ lp->stats.tx_errors++;
+ lp->stats.tx_aborted_errors++;
+
+ ptr->next = (struct i596_cmd * ) I596_NULL;
+ kfree_s((unsigned char *)tx_cmd, (sizeof (struct tx_cmd) + sizeof (struct i596_tbd)));
+ break;
+ }
+ case CmdMulticastList:
+ {
+ unsigned short count = *((unsigned short *) (ptr + 1));
+
+ ptr->next = (struct i596_cmd * ) I596_NULL;
+ kfree_s((unsigned char *)ptr, (sizeof (struct i596_cmd) + count + 2));
+ break;
+ }
+ default:
+ ptr->next = (struct i596_cmd * ) I596_NULL;
+ }
+ }
+
+ while (lp->scb.status, lp->scb.command)
+ if (--boguscnt == 0)
+ {
+ printk("i596_cleanup_cmd timed out with status %4.4x, cmd %4.4x.\n",
+ lp->scb.status, lp->scb.command);
+ break;
+ }
+
+ lp->scb.cmd = lp->cmd_head;
+}
+
+static inline void
+i596_reset(struct device *dev, struct i596_private *lp, int ioaddr)
+{
+ int boguscnt = 100;
+
+ if (i596_debug > 4) printk ("i596_reset\n");
+
+ while (lp->scb.status, lp->scb.command)
+ if (--boguscnt == 0)
+ {
+ printk("i596_reset timed out with status %4.4x, cmd %4.4x.\n",
+ lp->scb.status, lp->scb.command);
+ break;
+ }
+
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ lp->scb.command = CUC_ABORT|RX_ABORT;
+ outw(0, ioaddr+4);
+
+ /* wait for shutdown */
+ boguscnt = 400;
+
+ while ((lp->scb.status, lp->scb.command) || lp->scb.command)
+ if (--boguscnt == 0)
+ {
+ printk("i596_reset 2 timed out with status %4.4x, cmd %4.4x.\n",
+ lp->scb.status, lp->scb.command);
+ break;
+ }
+
+ i596_cleanup_cmd(lp);
+ i596_rx(dev);
+
+ dev->start = 1;
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ init_i596_mem(dev);
+}
+
+static void i596_add_cmd(struct device *dev, struct i596_cmd *cmd)
+{
+ struct i596_private *lp = (struct i596_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ unsigned long flags;
+ int boguscnt = 100;
+
+ if (i596_debug > 4) printk ("i596_add_cmd\n");
+
+ cmd->status = 0;
+ cmd->command |= (CMD_EOL|CMD_INTR);
+ cmd->next = (struct i596_cmd *) I596_NULL;
+
+ save_flags(flags);
+ cli();
+ if (lp->cmd_head != (struct i596_cmd *) I596_NULL)
+ lp->cmd_tail->next = cmd;
+ else
+ {
+ lp->cmd_head = cmd;
+ while (lp->scb.status, lp->scb.command)
+ if (--boguscnt == 0)
+ {
+ printk("i596_add_cmd timed out with status %4.4x, cmd %4.4x.\n",
+ lp->scb.status, lp->scb.command);
+ break;
+ }
+
+ lp->scb.cmd = cmd;
+ lp->scb.command = CUC_START;
+ outw (0, ioaddr+4);
+ }
+ lp->cmd_tail = cmd;
+ lp->cmd_backlog++;
+
+ lp->cmd_head = lp->scb.cmd;
+ restore_flags(flags);
+
+ if (lp->cmd_backlog > 16)
+ {
+ int tickssofar = jiffies - lp->last_cmd;
+
+ if (tickssofar < 25) return;
+
+ printk("%s: command unit timed out, status resetting.\n", dev->name);
+
+ i596_reset(dev, lp, ioaddr);
+ }
+}
+
+static int
+i596_open(struct device *dev)
+{
+ int i;
+
+ if (i596_debug > 1)
+ printk("%s: i596_open() irq %d.\n", dev->name, dev->irq);
+
+ if (request_irq(dev->irq, &i596_interrupt, 0, "apricot", NULL))
+ return -EAGAIN;
+
+ irq2dev_map[dev->irq] = dev;
+
+ i = init_rx_bufs(dev, RX_RING_SIZE);
+
+ if ((i = init_rx_bufs(dev, RX_RING_SIZE)) < RX_RING_SIZE)
+ printk("%s: only able to allocate %d receive buffers\n", dev->name, i);
+
+ if (i < 4)
+ {
+ free_irq(dev->irq, NULL);
+ irq2dev_map[dev->irq] = 0;
+ return -EAGAIN;
+ }
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+ MOD_INC_USE_COUNT;
+
+ /* Initialize the 82596 memory */
+ init_i596_mem(dev);
+
+ return 0; /* Always succeed */
+}
+
+static int
+i596_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ struct i596_private *lp = (struct i596_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ struct tx_cmd *tx_cmd;
+
+ if (i596_debug > 2) printk ("%s: Apricot start xmit\n", dev->name);
+
+ /* Transmitter timeout, serious problems. */
+ if (dev->tbusy) {
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 5)
+ return 1;
+ printk("%s: transmit timed out, status resetting.\n",
+ dev->name);
+ lp->stats.tx_errors++;
+ /* Try to restart the adaptor */
+ if (lp->last_restart == lp->stats.tx_packets) {
+ if (i596_debug > 1) printk ("Resetting board.\n");
+
+ /* Shutdown and restart */
+ i596_reset(dev,lp, ioaddr);
+ } else {
+ /* Issue a channel attention signal */
+ if (i596_debug > 1) printk ("Kicking board.\n");
+
+ lp->scb.command = CUC_START|RX_START;
+ outw(0, ioaddr+4);
+
+ lp->last_restart = lp->stats.tx_packets;
+ }
+ dev->tbusy = 0;
+ dev->trans_start = jiffies;
+ }
+
+ /* If some higher level thinks we've misses a tx-done interrupt
+ we are passed NULL. n.b. dev_tint handles the cli()/sti()
+ itself. */
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ /* shouldn't happen */
+ if (skb->len <= 0) return 0;
+
+ if (i596_debug > 3) printk("%s: i596_start_xmit() called\n", dev->name);
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ else
+ {
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ dev->trans_start = jiffies;
+
+ tx_cmd = (struct tx_cmd *) kmalloc ((sizeof (struct tx_cmd) + sizeof (struct i596_tbd)), GFP_ATOMIC);
+ if (tx_cmd == NULL)
+ {
+ printk ("%s: i596_xmit Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.tx_dropped++;
+
+ dev_kfree_skb(skb, FREE_WRITE);
+ }
+ else
+ {
+ tx_cmd->tbd = (struct i596_tbd *) (tx_cmd + 1);
+ tx_cmd->tbd->next = (struct i596_tbd *) I596_NULL;
+
+ tx_cmd->cmd.command = CMD_FLEX|CmdTx;
+
+ tx_cmd->pad = 0;
+ tx_cmd->size = 0;
+ tx_cmd->tbd->pad = 0;
+ tx_cmd->tbd->size = EOF | length;
+
+ tx_cmd->tbd->data = skb->data;
+
+ if (i596_debug > 3) print_eth(skb->data);
+
+ i596_add_cmd(dev, (struct i596_cmd *)tx_cmd);
+
+ lp->stats.tx_packets++;
+ }
+ }
+
+ dev->tbusy = 0;
+
+ return 0;
+}
+
+
+static void print_eth(unsigned char *add)
+{
+ int i;
+
+ printk ("Dest ");
+ for (i = 0; i < 6; i++)
+ printk(" %2.2X", add[i]);
+ printk ("\n");
+
+ printk ("Source");
+ for (i = 0; i < 6; i++)
+ printk(" %2.2X", add[i+6]);
+ printk ("\n");
+ printk ("type %2.2X%2.2X\n", add[12], add[13]);
+}
+
+int apricot_probe(struct device *dev)
+{
+ int i;
+ struct i596_private *lp;
+ int checksum = 0;
+ int ioaddr = 0x300;
+ char eth_addr[8];
+
+ /* this is easy the ethernet interface can only be at 0x300 */
+ /* first check nothing is already registered here */
+
+ if (check_region(ioaddr, APRICOT_TOTAL_SIZE))
+ return ENODEV;
+
+ for (i = 0; i < 8; i++)
+ {
+ eth_addr[i] = inb(ioaddr+8+i);
+ checksum += eth_addr[i];
+ }
+
+ /* checksum is a multiple of 0x100, got this wrong first time
+ some machines have 0x100, some 0x200. The DOS driver doesn't
+ even bother with the checksum */
+
+ if (checksum % 0x100) return ENODEV;
+
+ /* Some other boards trip the checksum.. but then appear as ether
+ address 0. Trap these - AC */
+
+ if(memcmp(eth_addr,"\x00\x00\x49",3)!= 0)
+ return ENODEV;
+
+ request_region(ioaddr, APRICOT_TOTAL_SIZE, "apricot");
+
+ dev->base_addr = ioaddr;
+ ether_setup(dev);
+ printk("%s: Apricot 82596 at %#3x,", dev->name, ioaddr);
+
+ for (i = 0; i < 6; i++)
+ printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]);
+
+ dev->base_addr = ioaddr;
+ dev->irq = 10;
+ printk(" IRQ %d.\n", dev->irq);
+
+ if (i596_debug > 0) printk("%s", version);
+
+ /* The APRICOT-specific entries in the device structure. */
+ dev->open = &i596_open;
+ dev->stop = &i596_close;
+ dev->hard_start_xmit = &i596_start_xmit;
+ dev->get_stats = &i596_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ dev->mem_start = (int)kmalloc(sizeof(struct i596_private)+ 0x0f, GFP_KERNEL);
+ /* align for scp */
+ dev->priv = (void *)((dev->mem_start + 0xf) & 0xfffffff0);
+
+ lp = (struct i596_private *)dev->priv;
+ memset((void *)lp, 0, sizeof(struct i596_private));
+ lp->scb.command = 0;
+ lp->scb.cmd = (struct i596_cmd *) I596_NULL;
+ lp->scb.rfd = (struct i596_rfd *)I596_NULL;
+
+ return 0;
+}
+
+static void
+i596_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ struct i596_private *lp;
+ short ioaddr;
+ int boguscnt = 200;
+ unsigned short status, ack_cmd = 0;
+
+ if (dev == NULL) {
+ printk ("i596_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ if (i596_debug > 3) printk ("%s: i596_interrupt(): irq %d\n",dev->name, irq);
+
+ if (dev->interrupt)
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+
+ dev->interrupt = 1;
+
+ ioaddr = dev->base_addr;
+
+ lp = (struct i596_private *)dev->priv;
+
+ while (lp->scb.status, lp->scb.command)
+ if (--boguscnt == 0)
+ {
+ printk("%s: i596 interrupt, timeout status %4.4x command %4.4x.\n", dev->name, lp->scb.status, lp->scb.command);
+ break;
+ }
+ status = lp->scb.status;
+
+ if (i596_debug > 4)
+ printk("%s: i596 interrupt, status %4.4x.\n", dev->name, status);
+
+ ack_cmd = status & 0xf000;
+
+ if ((status & 0x8000) || (status & 0x2000))
+ {
+ struct i596_cmd *ptr;
+
+ if ((i596_debug > 4) && (status & 0x8000))
+ printk("%s: i596 interrupt completed command.\n", dev->name);
+ if ((i596_debug > 4) && (status & 0x2000))
+ printk("%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700);
+
+ while ((lp->cmd_head != (struct i596_cmd *) I596_NULL) && (lp->cmd_head->status & STAT_C))
+ {
+ ptr = lp->cmd_head;
+
+ lp->cmd_head = lp->cmd_head->next;
+ lp->cmd_backlog--;
+
+ switch ((ptr->command) & 0x7)
+ {
+ case CmdTx:
+ {
+ struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
+ struct sk_buff *skb = ((struct sk_buff *)(tx_cmd->tbd->data)) -1;
+
+ dev_kfree_skb(skb, FREE_WRITE);
+
+ if ((ptr->status) & STAT_OK)
+ {
+ if (i596_debug >2) print_eth(skb->data);
+ }
+ else
+ {
+ lp->stats.tx_errors++;
+ if ((ptr->status) & 0x0020) lp->stats.collisions++;
+ if (!((ptr->status) & 0x0040)) lp->stats.tx_heartbeat_errors++;
+ if ((ptr->status) & 0x0400) lp->stats.tx_carrier_errors++;
+ if ((ptr->status) & 0x0800) lp->stats.collisions++;
+ if ((ptr->status) & 0x1000) lp->stats.tx_aborted_errors++;
+ }
+
+
+ ptr->next = (struct i596_cmd * ) I596_NULL;
+ kfree_s((unsigned char *)tx_cmd, (sizeof (struct tx_cmd) + sizeof (struct i596_tbd)));
+ break;
+ }
+ case CmdMulticastList:
+ {
+ unsigned short count = *((unsigned short *) (ptr + 1));
+
+ ptr->next = (struct i596_cmd * ) I596_NULL;
+ kfree_s((unsigned char *)ptr, (sizeof (struct i596_cmd) + count + 2));
+ break;
+ }
+ case CmdTDR:
+ {
+ unsigned long status = *((unsigned long *) (ptr + 1));
+
+ if (status & 0x8000)
+ {
+ if (i596_debug > 3)
+ printk("%s: link ok.\n", dev->name);
+ }
+ else
+ {
+ if (status & 0x4000)
+ printk("%s: Transceiver problem.\n", dev->name);
+ if (status & 0x2000)
+ printk("%s: Termination problem.\n", dev->name);
+ if (status & 0x1000)
+ printk("%s: Short circuit.\n", dev->name);
+
+ printk("%s: Time %ld.\n", dev->name, status & 0x07ff);
+ }
+ }
+ default:
+ ptr->next = (struct i596_cmd * ) I596_NULL;
+
+ lp->last_cmd = jiffies;
+ }
+ }
+
+ ptr = lp->cmd_head;
+ while ((ptr != (struct i596_cmd *) I596_NULL) && (ptr != lp->cmd_tail))
+ {
+ ptr->command &= 0x1fff;
+ ptr = ptr->next;
+ }
+
+ if ((lp->cmd_head != (struct i596_cmd *) I596_NULL) && (dev->start)) ack_cmd |= CUC_START;
+ lp->scb.cmd = lp->cmd_head;
+ }
+
+ if ((status & 0x1000) || (status & 0x4000))
+ {
+ if ((i596_debug > 4) && (status & 0x4000))
+ printk("%s: i596 interrupt received a frame.\n", dev->name);
+ if ((i596_debug > 4) && (status & 0x1000))
+ printk("%s: i596 interrupt receive unit inactive %x.\n", dev->name, status & 0x0070);
+
+ i596_rx(dev);
+
+ if (dev->start) ack_cmd |= RX_START;
+ }
+
+ /* acknowledge the interrupt */
+
+/*
+ if ((lp->scb.cmd != (struct i596_cmd *) I596_NULL) && (dev->start)) ack_cmd | = CUC_START;
+*/
+ boguscnt = 100;
+ while (lp->scb.status, lp->scb.command)
+ if (--boguscnt == 0)
+ {
+ printk("%s: i596 interrupt, timeout status %4.4x command %4.4x.\n", dev->name, lp->scb.status, lp->scb.command);
+ break;
+ }
+ lp->scb.command = ack_cmd;
+
+ (void) inb (ioaddr+0x10);
+ outb (4, ioaddr+0xf);
+ outw (0, ioaddr+4);
+
+ if (i596_debug > 4)
+ printk("%s: exiting interrupt.\n", dev->name);
+
+ dev->interrupt = 0;
+ return;
+}
+
+static int
+i596_close(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct i596_private *lp = (struct i596_private *)dev->priv;
+ int boguscnt = 200;
+
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ if (i596_debug > 1)
+ printk("%s: Shutting down ethercard, status was %4.4x.\n",
+ dev->name, lp->scb.status);
+
+ lp->scb.command = CUC_ABORT|RX_ABORT;
+ outw(0, ioaddr+4);
+
+ i596_cleanup_cmd(lp);
+
+ while (lp->scb.status, lp->scb.command)
+ if (--boguscnt == 0)
+ {
+ printk("%s: close timed out with status %4.4x, cmd %4.4x.\n",
+ dev->name, lp->scb.status, lp->scb.command);
+ break;
+ }
+ free_irq(dev->irq, NULL);
+ irq2dev_map[dev->irq] = 0;
+ remove_rx_bufs(dev);
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static struct enet_statistics *
+i596_get_stats(struct device *dev)
+{
+ struct i596_private *lp = (struct i596_private *)dev->priv;
+
+ return &lp->stats;
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+
+static void set_multicast_list(struct device *dev)
+{
+ struct i596_private *lp = (struct i596_private *)dev->priv;
+ struct i596_cmd *cmd;
+
+ if (i596_debug > 1)
+ printk ("%s: set multicast list %d\n", dev->name, dev->mc_count);
+
+ if (dev->mc_count > 0)
+ {
+ struct dev_mc_list *dmi;
+ char *cp;
+ cmd = (struct i596_cmd *) kmalloc(sizeof(struct i596_cmd)+2+dev->mc_count*6, GFP_ATOMIC);
+ if (cmd == NULL)
+ {
+ printk ("%s: set_multicast Memory squeeze.\n", dev->name);
+ return;
+ }
+ cmd->command = CmdMulticastList;
+ *((unsigned short *) (cmd + 1)) = dev->mc_count * 6;
+ cp=((char *)(cmd + 1))+2;
+ for(dmi=dev->mc_list;dmi!=NULL;dmi=dmi->next)
+ {
+ memcpy(cp, dmi,6);
+ cp+=6;
+ }
+ print_eth (((unsigned char *)(cmd + 1)) + 2);
+ i596_add_cmd(dev, cmd);
+ }
+ else
+ {
+ if (lp->set_conf.next != (struct i596_cmd * ) I596_NULL)
+ return;
+ if (dev->mc_count == 0 && !(dev->flags&(IFF_PROMISC|IFF_ALLMULTI)))
+ {
+ if(dev->flags&IFF_ALLMULTI)
+ dev->flags|=IFF_PROMISC;
+ lp->i596_config[8] &= ~0x01;
+ }
+ else
+ lp->i596_config[8] |= 0x01;
+
+ i596_add_cmd(dev, &lp->set_conf);
+ }
+}
+
+#ifdef HAVE_DEVLIST
+static unsigned int apricot_portlist[] = {0x300, 0};
+struct netdev_entry apricot_drv =
+{"apricot", apricot_probe, APRICOT_TOTAL_SIZE, apricot_portlist};
+#endif
+
+#ifdef MODULE
+static char devicename[9] = { 0, };
+static struct device dev_apricot = {
+ devicename, /* device name inserted by /linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0x300, 10,
+ 0, 0, 0, NULL, apricot_probe };
+
+static int io = 0x300;
+static int irq = 10;
+
+int
+init_module(void)
+{
+ dev_apricot.base_addr = io;
+ dev_apricot.irq = irq;
+ if (register_netdev(&dev_apricot) != 0)
+ return -EIO;
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ unregister_netdev(&dev_apricot);
+ kfree_s((void *)dev_apricot.mem_start, sizeof(struct i596_private) + 0xf);
+ dev_apricot.priv = NULL;
+
+ /* If we don't do this, we can't re-insmod it later. */
+ release_region(dev_apricot.base_addr, APRICOT_TOTAL_SIZE);
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c apricot.c"
+ * End:
+ */
diff --git a/linux/src/drivers/net/at1700.c b/linux/src/drivers/net/at1700.c
new file mode 100644
index 0000000..f4025f4
--- /dev/null
+++ b/linux/src/drivers/net/at1700.c
@@ -0,0 +1,756 @@
+/* at1700.c: A network device driver for the Allied Telesis AT1700.
+
+ Written 1993-98 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This is a device driver for the Allied Telesis AT1700, which is a
+ straight-forward Fujitsu MB86965 implementation.
+
+ Sources:
+ The Fujitsu MB86965 datasheet.
+
+ After the initial version of this driver was written Gerry Sawkins of
+ ATI provided their EEPROM configuration code header file.
+ Thanks to NIIBE Yutaka <gniibe@mri.co.jp> for bug fixes.
+
+ Bugs:
+ The MB86965 has a design flaw that makes all probes unreliable. Not
+ only is it difficult to detect, it also moves around in I/O space in
+ response to inb()s from other device probes!
+*/
+
+static const char *version =
+ "at1700.c:v1.15 4/7/98 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/malloc.h>
+#include <linux/string.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/errno.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+/* Tunable parameters. */
+
+/* When to switch from the 64-entry multicast filter to Rx-all-multicast. */
+#define MC_FILTERBREAK 64
+
+/* These unusual address orders are used to verify the CONFIG register. */
+static int at1700_probe_list[] =
+{0x260, 0x280, 0x2a0, 0x240, 0x340, 0x320, 0x380, 0x300, 0};
+static int fmv18x_probe_list[] =
+{0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x300, 0x340, 0};
+
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 1
+#endif
+static unsigned int net_debug = NET_DEBUG;
+
+typedef unsigned char uchar;
+
+/* Information that need to be kept for each board. */
+struct net_local {
+ struct enet_statistics stats;
+ unsigned char mc_filter[8];
+ uint jumpered:1; /* Set iff the board has jumper config. */
+ uint tx_started:1; /* Packets are on the Tx queue. */
+ uint invalid_irq:1;
+ uchar tx_queue; /* Number of packet on the Tx queue. */
+ ushort tx_queue_len; /* Current length of the Tx queue. */
+};
+
+
+/* Offsets from the base address. */
+#define STATUS 0
+#define TX_STATUS 0
+#define RX_STATUS 1
+#define TX_INTR 2 /* Bit-mapped interrupt enable registers. */
+#define RX_INTR 3
+#define TX_MODE 4
+#define RX_MODE 5
+#define CONFIG_0 6 /* Misc. configuration settings. */
+#define CONFIG_1 7
+/* Run-time register bank 2 definitions. */
+#define DATAPORT 8 /* Word-wide DMA or programmed-I/O dataport. */
+#define TX_START 10
+#define MODE13 13
+/* Configuration registers only on the '865A/B chips. */
+#define EEPROM_Ctrl 16
+#define EEPROM_Data 17
+#define IOCONFIG 18 /* Either read the jumper, or move the I/O. */
+#define IOCONFIG1 19
+#define SAPROM 20 /* The station address PROM, if no EEPROM. */
+#define RESET 31 /* Write to reset some parts of the chip. */
+#define AT1700_IO_EXTENT 32
+/* Index to functions, as function prototypes. */
+
+extern int at1700_probe(struct device *dev);
+
+static int at1700_probe1(struct device *dev, int ioaddr);
+static int read_eeprom(int ioaddr, int location);
+static int net_open(struct device *dev);
+static int net_send_packet(struct sk_buff *skb, struct device *dev);
+static void net_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void net_rx(struct device *dev);
+static int net_close(struct device *dev);
+static struct enet_statistics *net_get_stats(struct device *dev);
+static void set_rx_mode(struct device *dev);
+
+
+/* Check for a network adaptor of this type, and return '0' iff one exists.
+ If dev->base_addr == 0, probe all likely locations.
+ If dev->base_addr == 1, always return failure.
+ If dev->base_addr == 2, allocate space for the device and return success
+ (detachable devices only).
+ */
+#ifdef HAVE_DEVLIST
+/* Support for a alternate probe manager, which will eliminate the
+ boilerplate below. */
+struct netdev_entry at1700_drv =
+{"at1700", at1700_probe1, AT1700_IO_EXTENT, at1700_probe_list};
+#else
+int
+at1700_probe(struct device *dev)
+{
+ int i;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return at1700_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (i = 0; at1700_probe_list[i]; i++) {
+ int ioaddr = at1700_probe_list[i];
+ if (check_region(ioaddr, AT1700_IO_EXTENT))
+ continue;
+ if (at1700_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif
+
+/* The Fujitsu datasheet suggests that the NIC be probed for by checking its
+ "signature", the default bit pattern after a reset. This *doesn't* work --
+ there is no way to reset the bus interface without a complete power-cycle!
+
+ It turns out that ATI came to the same conclusion I did: the only thing
+ that can be done is checking a few bits and then diving right into an
+ EEPROM read. */
+
+int at1700_probe1(struct device *dev, int ioaddr)
+{
+ char fmv_irqmap[4] = {3, 7, 10, 15};
+ char at1700_irqmap[8] = {3, 4, 5, 9, 10, 11, 14, 15};
+ unsigned int i, irq, is_fmv18x = 0, is_at1700 = 0;
+
+ /* Resetting the chip doesn't reset the ISA interface, so don't bother.
+ That means we have to be careful with the register values we probe for.
+ */
+#ifdef notdef
+ printk("at1700 probe at %#x, eeprom is %4.4x %4.4x %4.4x ctrl %4.4x.\n",
+ ioaddr, read_eeprom(ioaddr, 4), read_eeprom(ioaddr, 5),
+ read_eeprom(ioaddr, 6), inw(ioaddr + EEPROM_Ctrl));
+#endif
+ /* We must check for the EEPROM-config boards first, else accessing
+ IOCONFIG0 will move the board! */
+ if (at1700_probe_list[inb(ioaddr + IOCONFIG1) & 0x07] == ioaddr
+ && read_eeprom(ioaddr, 4) == 0x0000
+ && (read_eeprom(ioaddr, 5) & 0xff00) == 0xF400)
+ is_at1700 = 1;
+ else if (fmv18x_probe_list[inb(ioaddr + IOCONFIG) & 0x07] == ioaddr
+ && inb(ioaddr + SAPROM ) == 0x00
+ && inb(ioaddr + SAPROM + 1) == 0x00
+ && inb(ioaddr + SAPROM + 2) == 0x0e)
+ is_fmv18x = 1;
+ else
+ return -ENODEV;
+
+ /* Reset the internal state machines. */
+ outb(0, ioaddr + RESET);
+
+ /* Allocate a new 'dev' if needed. */
+ if (dev == NULL)
+ dev = init_etherdev(0, sizeof(struct net_local));
+
+ if (is_at1700)
+ irq = at1700_irqmap[(read_eeprom(ioaddr, 12)&0x04)
+ | (read_eeprom(ioaddr, 0)>>14)];
+ else
+ irq = fmv_irqmap[(inb(ioaddr + IOCONFIG)>>6) & 0x03];
+
+ /* Grab the region so that we can find another board if the IRQ request
+ fails. */
+ request_region(ioaddr, AT1700_IO_EXTENT, dev->name);
+
+ printk("%s: AT1700 found at %#3x, IRQ %d, address ", dev->name,
+ ioaddr, irq);
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ for(i = 0; i < 3; i++) {
+ unsigned short eeprom_val = read_eeprom(ioaddr, 4+i);
+ printk("%04x", eeprom_val);
+ ((unsigned short *)dev->dev_addr)[i] = ntohs(eeprom_val);
+ }
+
+ /* The EEPROM word 12 bit 0x0400 means use regular 100 ohm 10baseT signals,
+ rather than 150 ohm shielded twisted pair compensation.
+ 0x0000 == auto-sense the interface
+ 0x0800 == use TP interface
+ 0x1800 == use coax interface
+ */
+ {
+ const char *porttype[] = {"auto-sense", "10baseT", "auto-sense", "10base2"};
+ ushort setup_value = read_eeprom(ioaddr, 12);
+
+ dev->if_port = setup_value >> 8;
+ printk(" %s interface.\n", porttype[(dev->if_port>>3) & 3]);
+ }
+
+ /* Set the station address in bank zero. */
+ outb(0xe0, ioaddr + CONFIG_1);
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + 8 + i);
+
+ /* Switch to bank 1 and set the multicast table to accept none. */
+ outb(0xe4, ioaddr + CONFIG_1);
+ for (i = 0; i < 8; i++)
+ outb(0x00, ioaddr + 8 + i);
+
+ /* Set the configuration register 0 to 32K 100ns. byte-wide memory, 16 bit
+ bus access, two 4K Tx queues, and disabled Tx and Rx. */
+ outb(0xda, ioaddr + CONFIG_0);
+
+ /* Switch to bank 2 and lock our I/O address. */
+ outb(0xe8, ioaddr + CONFIG_1);
+ outb(dev->if_port, MODE13);
+
+ /* Power-down the chip. Aren't we green! */
+ outb(0x00, ioaddr + CONFIG_1);
+
+ if (net_debug)
+ printk("%s", version);
+
+ /* Initialize the device structure. */
+ dev->priv = kmalloc(sizeof(struct net_local), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct net_local));
+
+ dev->open = net_open;
+ dev->stop = net_close;
+ dev->hard_start_xmit = net_send_packet;
+ dev->get_stats = net_get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+
+ /* Fill in the fields of 'dev' with ethernet-generic values. */
+ ether_setup(dev);
+
+ {
+ struct net_local *lp = (struct net_local *)dev->priv;
+ lp->jumpered = is_fmv18x;
+ /* Snarf the interrupt vector now. */
+ if (request_irq(irq, &net_interrupt, 0, dev->name, dev)) {
+ printk (" AT1700 at %#3x is unusable due to a conflict on"
+ "IRQ %d.\n", ioaddr, irq);
+ lp->invalid_irq = 1;
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x40 /* EEPROM shift clock, in reg. 16. */
+#define EE_CS 0x20 /* EEPROM chip select, in reg. 16. */
+#define EE_DATA_WRITE 0x80 /* EEPROM chip data in, in reg. 17. */
+#define EE_DATA_READ 0x80 /* EEPROM chip data out, in reg. 17. */
+
+/* Delay between EEPROM clock transitions. */
+#define eeprom_delay() do {} while (0);
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_WRITE_CMD (5 << 6)
+#define EE_READ_CMD (6 << 6)
+#define EE_ERASE_CMD (7 << 6)
+
+static int read_eeprom(int ioaddr, int location)
+{
+ int i;
+ unsigned short retval = 0;
+ int ee_addr = ioaddr + EEPROM_Ctrl;
+ int ee_daddr = ioaddr + EEPROM_Data;
+ int read_cmd = location | EE_READ_CMD;
+
+ /* Shift the read command bits out. */
+ for (i = 9; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
+ outb(EE_CS, ee_addr);
+ outb(dataval, ee_daddr);
+ eeprom_delay();
+ outb(EE_CS | EE_SHIFT_CLK, ee_addr); /* EEPROM clock tick. */
+ eeprom_delay();
+ }
+ outb(EE_DATA_WRITE, ee_daddr);
+ for (i = 16; i > 0; i--) {
+ outb(EE_CS, ee_addr);
+ eeprom_delay();
+ outb(EE_CS | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay();
+ retval = (retval << 1) | ((inb(ee_daddr) & EE_DATA_READ) ? 1 : 0);
+ }
+
+ /* Terminate the EEPROM access. */
+ outb(EE_CS, ee_addr);
+ eeprom_delay();
+ outb(EE_SHIFT_CLK, ee_addr);
+ outb(0, ee_addr);
+ return retval;
+}
+
+
+
+static int net_open(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int i;
+
+ /* Powerup the chip, initialize config register 1, and select bank 0. */
+ outb(0xe0, ioaddr + CONFIG_1);
+
+ /* Set the station address in bank zero. */
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + 8 + i);
+
+ /* Switch to bank 1 and set the multicast table to accept none. */
+ outb(0xe4, ioaddr + CONFIG_1);
+ for (i = 0; i < 8; i++)
+ outb(0x00, ioaddr + 8 + i);
+
+ /* Set the configuration register 0 to 32K 100ns. byte-wide memory, 16 bit
+ bus access, and two 4K Tx queues. */
+ outb(0xda, ioaddr + CONFIG_0);
+
+ /* Switch to register bank 2, enable the Rx and Tx. */
+ outw(0xe85a, ioaddr + CONFIG_0);
+
+ lp->tx_started = 0;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+
+ /* Turn on Rx interrupts, leave Tx interrupts off until packet Tx. */
+ outb(0x00, ioaddr + TX_INTR);
+ outb(0x81, ioaddr + RX_INTR);
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+ MOD_INC_USE_COUNT;
+
+ return 0;
+}
+
+static int
+net_send_packet(struct sk_buff *skb, struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ if (dev->tbusy) {
+ /* If we get here, some higher level has decided we are broken.
+ There should really be a "kick me" function call instead. */
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 10)
+ return 1;
+ printk("%s: transmit timed out with status %04x, %s?\n", dev->name,
+ inw(ioaddr + STATUS), inb(ioaddr + TX_STATUS) & 0x80
+ ? "IRQ conflict" : "network cable problem");
+ printk("%s: timeout registers: %04x %04x %04x %04x %04x %04x %04x %04x.\n",
+ dev->name, inw(ioaddr + 0), inw(ioaddr + 2), inw(ioaddr + 4),
+ inw(ioaddr + 6), inw(ioaddr + 8), inw(ioaddr + 10),
+ inw(ioaddr + 12), inw(ioaddr + 14));
+ lp->stats.tx_errors++;
+ /* ToDo: We should try to restart the adaptor... */
+ outw(0xffff, ioaddr + 24);
+ outw(0xffff, ioaddr + TX_STATUS);
+ outw(0xe85a, ioaddr + CONFIG_0);
+ outw(0x8100, ioaddr + TX_INTR);
+ dev->tbusy=0;
+ dev->trans_start = jiffies;
+ lp->tx_started = 0;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ }
+
+ /* If some higher layer thinks we've missed an tx-done interrupt
+ we are passed NULL. Caution: dev_tint() handles the cli()/sti()
+ itself. */
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ else {
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned char *buf = skb->data;
+
+ /* Turn off the possible Tx interrupts. */
+ outb(0x00, ioaddr + TX_INTR);
+
+ outw(length, ioaddr + DATAPORT);
+ outsw(ioaddr + DATAPORT, buf, (length + 1) >> 1);
+
+ lp->tx_queue++;
+ lp->tx_queue_len += length + 2;
+
+ if (lp->tx_started == 0) {
+ /* If the Tx is idle, always trigger a transmit. */
+ outb(0x80 | lp->tx_queue, ioaddr + TX_START);
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ dev->trans_start = jiffies;
+ lp->tx_started = 1;
+ dev->tbusy = 0;
+ } else if (lp->tx_queue_len < 4096 - 1502)
+ /* Yes, there is room for one more packet. */
+ dev->tbusy = 0;
+
+ /* Turn on Tx interrupts back on. */
+ outb(0x82, ioaddr + TX_INTR);
+ }
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ return 0;
+}
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+static void
+net_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct device *dev = dev_id;
+ struct net_local *lp;
+ int ioaddr, status;
+
+ if (dev == NULL) {
+ printk ("at1700_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+ dev->interrupt = 1;
+
+ ioaddr = dev->base_addr;
+ lp = (struct net_local *)dev->priv;
+ status = inw(ioaddr + TX_STATUS);
+ outw(status, ioaddr + TX_STATUS);
+
+ if (net_debug > 4)
+ printk("%s: Interrupt with status %04x.\n", dev->name, status);
+ if (status & 0xff00
+ || (inb(ioaddr + RX_MODE) & 0x40) == 0) { /* Got a packet(s). */
+ net_rx(dev);
+ }
+ if (status & 0x00ff) {
+ if (status & 0x80) {
+ lp->stats.tx_packets++;
+ if (lp->tx_queue) {
+ outb(0x80 | lp->tx_queue, ioaddr + TX_START);
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ dev->trans_start = jiffies;
+ dev->tbusy = 0;
+ mark_bh(NET_BH); /* Inform upper layers. */
+ } else {
+ lp->tx_started = 0;
+ /* Turn on Tx interrupts off. */
+ outb(0x00, ioaddr + TX_INTR);
+ dev->tbusy = 0;
+ mark_bh(NET_BH); /* Inform upper layers. */
+ }
+ }
+ }
+
+ dev->interrupt = 0;
+ return;
+}
+
+/* We have a good packet(s), get it/them out of the buffers. */
+static void
+net_rx(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int boguscount = 5;
+
+ while ((inb(ioaddr + RX_MODE) & 0x40) == 0) {
+ ushort status = inw(ioaddr + DATAPORT);
+ ushort pkt_len = inw(ioaddr + DATAPORT);
+
+ if (net_debug > 4)
+ printk("%s: Rxing packet mode %02x status %04x.\n",
+ dev->name, inb(ioaddr + RX_MODE), status);
+#ifndef final_version
+ if (status == 0) {
+ outb(0x05, ioaddr + 14);
+ break;
+ }
+#endif
+
+ if ((status & 0xF0) != 0x20) { /* There was an error. */
+ lp->stats.rx_errors++;
+ if (status & 0x08) lp->stats.rx_length_errors++;
+ if (status & 0x04) lp->stats.rx_frame_errors++;
+ if (status & 0x02) lp->stats.rx_crc_errors++;
+ if (status & 0x01) lp->stats.rx_over_errors++;
+ } else {
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+
+ if (pkt_len > 1550) {
+ printk("%s: The AT1700 claimed a very large packet, size %d.\n",
+ dev->name, pkt_len);
+ /* Prime the FIFO and then flush the packet. */
+ inw(ioaddr + DATAPORT); inw(ioaddr + DATAPORT);
+ outb(0x05, ioaddr + 14);
+ lp->stats.rx_errors++;
+ break;
+ }
+ skb = dev_alloc_skb(pkt_len+3);
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, dropping packet (len %d).\n",
+ dev->name, pkt_len);
+ /* Prime the FIFO and then flush the packet. */
+ inw(ioaddr + DATAPORT); inw(ioaddr + DATAPORT);
+ outb(0x05, ioaddr + 14);
+ lp->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = dev;
+ skb_reserve(skb,2);
+
+ insw(ioaddr + DATAPORT, skb_put(skb,pkt_len), (pkt_len + 1) >> 1);
+ skb->protocol=eth_type_trans(skb, dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+ if (--boguscount <= 0)
+ break;
+ }
+
+ /* If any worth-while packets have been received, dev_rint()
+ has done a mark_bh(NET_BH) for us and will work on them
+ when we get to the bottom-half routine. */
+ {
+ int i;
+ for (i = 0; i < 20; i++) {
+ if ((inb(ioaddr + RX_MODE) & 0x40) == 0x40)
+ break;
+ inw(ioaddr + DATAPORT); /* dummy status read */
+ outb(0x05, ioaddr + 14);
+ }
+
+ if (net_debug > 5)
+ printk("%s: Exint Rx packet with mode %02x after %d ticks.\n",
+ dev->name, inb(ioaddr + RX_MODE), i);
+ }
+ return;
+}
+
+/* The inverse routine to net_open(). */
+static int net_close(struct device *dev)
+{
+#if 0
+ struct net_local *lp = (struct net_local *)dev->priv;
+#endif
+ int ioaddr = dev->base_addr;
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ /* Set configuration register 0 to disable Tx and Rx. */
+ outb(0xda, ioaddr + CONFIG_0);
+
+ /* No statistic counters on the chip to update. */
+
+#if 0
+ /* Disable the IRQ on boards where it is feasible. */
+ if (lp->jumpered) {
+ outb(0x00, ioaddr + IOCONFIG1);
+ free_irq(dev->irq, dev);
+ }
+#endif
+
+ /* Power-down the chip. Green, green, green! */
+ outb(0x00, ioaddr + CONFIG_1);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+/* Get the current statistics.
+ This may be called with the card open or closed.
+ There are no on-chip counters, so this function is trivial.
+*/
+static struct enet_statistics *
+net_get_stats(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ return &lp->stats;
+}
+
+/*
+ Set the multicast/promiscuous mode for this adaptor.
+*/
+
+/* The little-endian AUTODIN II ethernet CRC calculation.
+ N.B. Do not use for bulk data, use a table-based routine instead.
+ This is common code and should be moved to net/core/crc.c */
+static unsigned const ethernet_polynomial_le = 0xedb88320U;
+static inline unsigned ether_crc_le(int length, unsigned char *data)
+{
+ unsigned int crc = 0xffffffff; /* Initial value. */
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 8; --bit >= 0; current_octet >>= 1) {
+ if ((crc ^ current_octet) & 1) {
+ crc >>= 1;
+ crc ^= ethernet_polynomial_le;
+ } else
+ crc >>= 1;
+ }
+ }
+ return crc;
+}
+
+static void
+set_rx_mode(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct net_local *lp = (struct net_local *)dev->priv;
+ unsigned char mc_filter[8]; /* Multicast hash filter */
+ long flags;
+ int i;
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Unconditionally log net taps. */
+ printk("%s: Promiscuous mode enabled.\n", dev->name);
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */
+ } else if (dev->mc_count > MC_FILTERBREAK
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter perfectly -- accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ outb(2, ioaddr + RX_MODE); /* Use normal mode. */
+ } else if (dev->mc_count == 0) {
+ memset(mc_filter, 0x00, sizeof(mc_filter));
+ outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
+ } else {
+ struct dev_mc_list *mclist;
+ int i;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next)
+ set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26,
+ mc_filter);
+ }
+
+ save_flags(flags);
+ cli();
+ if (memcmp(mc_filter, lp->mc_filter, sizeof(mc_filter))) {
+ int saved_bank = inw(ioaddr + CONFIG_0);
+ /* Switch to bank 1 and set the multicast table. */
+ outw((saved_bank & ~0x0C00) | 0x0480, ioaddr + CONFIG_0);
+ for (i = 0; i < 8; i++)
+ outb(mc_filter[i], ioaddr + 8 + i);
+ memcpy(lp->mc_filter, mc_filter, sizeof(mc_filter));
+ outw(saved_bank, ioaddr + CONFIG_0);
+ }
+ restore_flags(flags);
+ return;
+}
+
+#ifdef MODULE
+static char devicename[9] = { 0, };
+static struct device dev_at1700 = {
+ devicename, /* device name is inserted by linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, at1700_probe };
+
+static int io = 0x260;
+static int irq = 0;
+
+int init_module(void)
+{
+ if (io == 0)
+ printk("at1700: You should not use auto-probing with insmod!\n");
+ dev_at1700.base_addr = io;
+ dev_at1700.irq = irq;
+ if (register_netdev(&dev_at1700) != 0) {
+ printk("at1700: register_netdev() returned non-zero.\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ unregister_netdev(&dev_at1700);
+ kfree(dev_at1700.priv);
+ dev_at1700.priv = NULL;
+
+ /* If we don't do this, we can't re-insmod it later. */
+ free_irq(dev_at1700.irq, NULL);
+ release_region(dev_at1700.base_addr, AT1700_IO_EXTENT);
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c at1700.c"
+ * alt-compile-command: "gcc -DMODVERSIONS -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c at1700.c"
+ * tab-width: 4
+ * c-basic-offset: 4
+ * c-indent-level: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/atp.c b/linux/src/drivers/net/atp.c
new file mode 100644
index 0000000..a9445ea
--- /dev/null
+++ b/linux/src/drivers/net/atp.c
@@ -0,0 +1,977 @@
+/* atp.c: Attached (pocket) ethernet adapter driver for linux. */
+/*
+ This is a driver for commonly OEMed pocket (parallel port)
+ ethernet adapters based on the Realtek RTL8002 and RTL8012 chips.
+
+ Written 1993-95,1997 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ The timer-based reset code was written by Bill Carlson, wwc@super.org.
+*/
+
+static const char *version =
+ "atp.c:v1.08 4/1/97 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+/* Operational parameters that may be safely changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT ((400*HZ)/1000)
+
+/*
+ This file is a device driver for the RealTek (aka AT-Lan-Tec) pocket
+ ethernet adapter. This is a common low-cost OEM pocket ethernet
+ adapter, sold under many names.
+
+ Sources:
+ This driver was written from the packet driver assembly code provided by
+ Vincent Bono of AT-Lan-Tec. Ever try to figure out how a complicated
+ device works just from the assembly code? It ain't pretty. The following
+ description is written based on guesses and writing lots of special-purpose
+ code to test my theorized operation.
+
+ In 1997 Realtek made available the documentation for the second generation
+ RTL8012 chip, which has lead to several driver improvements.
+ http://www.realtek.com.tw/cn/cn.html
+
+ Theory of Operation
+
+ The RTL8002 adapter seems to be built around a custom spin of the SEEQ
+ controller core. It probably has a 16K or 64K internal packet buffer, of
+ which the first 4K is devoted to transmit and the rest to receive.
+ The controller maintains the queue of received packet and the packet buffer
+ access pointer internally, with only 'reset to beginning' and 'skip to next
+ packet' commands visible. The transmit packet queue holds two (or more?)
+ packets: both 'retransmit this packet' (due to collision) and 'transmit next
+ packet' commands must be started by hand.
+
+ The station address is stored in a standard bit-serial EEPROM which must be
+ read (ughh) by the device driver. (Provisions have been made for
+ substituting a 74S288 PROM, but I haven't gotten reports of any models
+ using it.) Unlike built-in devices, a pocket adapter can temporarily lose
+ power without indication to the device driver. The major effect is that
+ the station address, receive filter (promiscuous, etc.) and transceiver
+ must be reset.
+
+ The controller itself has 16 registers, some of which use only the lower
+ bits. The registers are read and written 4 bits at a time. The four bit
+ register address is presented on the data lines along with a few additional
+ timing and control bits. The data is then read from status port or written
+ to the data port.
+
+ Correction: the controller has two banks of 16 registers. The second
+ bank contains only the multicast filter table (now used) and the EEPROM
+ access registers.
+
+ Since the bulk data transfer of the actual packets through the slow
+ parallel port dominates the driver's running time, four distinct data
+ (non-register) transfer modes are provided by the adapter, two in each
+ direction. In the first mode timing for the nibble transfers is
+ provided through the data port. In the second mode the same timing is
+ provided through the control port. In either case the data is read from
+ the status port and written to the data port, just as it is accessing
+ registers.
+
+ In addition to the basic data transfer methods, several more are modes are
+ created by adding some delay by doing multiple reads of the data to allow
+ it to stabilize. This delay seems to be needed on most machines.
+
+ The data transfer mode is stored in the 'dev->if_port' field. Its default
+ value is '4'. It may be overridden at boot-time using the third parameter
+ to the "ether=..." initialization.
+
+ The header file <atp.h> provides inline functions that encapsulate the
+ register and data access methods. These functions are hand-tuned to
+ generate reasonable object code. This header file also documents my
+ interpretations of the device registers.
+*/
+#include <linux/config.h>
+#ifdef MODULE
+#include <linux/module.h>
+#include <linux/version.h>
+#else
+#define MOD_INC_USE_COUNT
+#define MOD_DEC_USE_COUNT
+#endif
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/malloc.h>
+#include <linux/string.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/errno.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include "atp.h"
+
+/* Kernel compatibility defines, common to David Hind's PCMCIA package.
+ This is only in the support-all-kernels source code. */
+#include <linux/version.h> /* Evil, but neccessary */
+
+#if defined (LINUX_VERSION_CODE) && LINUX_VERSION_CODE < 0x10300
+#define RUN_AT(x) (x) /* What to put in timer->expires. */
+#define DEV_ALLOC_SKB(len) alloc_skb(len, GFP_ATOMIC)
+#define virt_to_bus(addr) ((unsigned long)addr)
+#define bus_to_virt(addr) ((void*)addr)
+
+#else /* 1.3.0 and later */
+#define RUN_AT(x) (jiffies + (x))
+#define DEV_ALLOC_SKB(len) dev_alloc_skb(len + 2)
+#endif
+#if defined (LINUX_VERSION_CODE) && LINUX_VERSION_CODE < 0x10338
+#ifdef MODULE
+#if !defined(CONFIG_MODVERSIONS) && !defined(__NO_VERSION__)
+char kernel_version[] = UTS_RELEASE;
+#endif
+#else
+#undef MOD_INC_USE_COUNT
+#define MOD_INC_USE_COUNT
+#undef MOD_DEC_USE_COUNT
+#define MOD_DEC_USE_COUNT
+#endif
+#endif /* 1.3.38 */
+
+#if (LINUX_VERSION_CODE >= 0x10344)
+#define NEW_MULTICAST
+#include <linux/delay.h>
+#endif
+
+#ifdef SA_SHIRQ
+#define FREE_IRQ(irqnum, dev) free_irq(irqnum, dev)
+#define REQUEST_IRQ(i,h,f,n, instance) request_irq(i,h,f,n, instance)
+#define IRQ(irq, dev_id, pt_regs) (irq, dev_id, pt_regs)
+#else
+#define FREE_IRQ(irqnum, dev) free_irq(irqnum)
+#define REQUEST_IRQ(i,h,f,n, instance) request_irq(i,h,f,n)
+#define IRQ(irq, dev_id, pt_regs) (irq, pt_regs)
+#endif
+/* End of kernel compatibility defines. */
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 1
+#endif
+static unsigned int net_debug = NET_DEBUG;
+
+/* The number of low I/O ports used by the ethercard. */
+#define ETHERCARD_TOTAL_SIZE 3
+
+/* Sequence to switch an 8012 from printer mux to ethernet mode. */
+static char mux_8012[] = { 0xff, 0xf7, 0xff, 0xfb, 0xf3, 0xfb, 0xff, 0xf7,};
+
+/* This code, written by wwc@super.org, resets the adapter every
+ TIMED_CHECKER ticks. This recovers from an unknown error which
+ hangs the device. */
+#define TIMED_CHECKER (HZ/4)
+#ifdef TIMED_CHECKER
+#include <linux/timer.h>
+static void atp_timed_checker(unsigned long ignored);
+#endif
+
+/* Index to functions, as function prototypes. */
+
+extern int atp_init(struct device *dev);
+
+static int atp_probe1(struct device *dev, short ioaddr);
+static void get_node_ID(struct device *dev);
+static unsigned short eeprom_op(short ioaddr, unsigned int cmd);
+static int net_open(struct device *dev);
+static void hardware_init(struct device *dev);
+static void write_packet(short ioaddr, int length, unsigned char *packet, int mode);
+static void trigger_send(short ioaddr, int length);
+static int net_send_packet(struct sk_buff *skb, struct device *dev);
+static void net_interrupt IRQ(int irq, void *dev_id, struct pt_regs *regs);
+static void net_rx(struct device *dev);
+static void read_block(short ioaddr, int length, unsigned char *buffer, int data_mode);
+static int net_close(struct device *dev);
+static struct enet_statistics *net_get_stats(struct device *dev);
+#ifdef NEW_MULTICAST
+static void set_rx_mode_8002(struct device *dev);
+static void set_rx_mode_8012(struct device *dev);
+#else
+static void set_rx_mode_8002(struct device *dev, int num_addrs, void *addrs);
+static void set_rx_mode_8012(struct device *dev, int num_addrs, void *addrs);
+#endif
+
+
+/* A list of all installed ATP devices, for removing the driver module. */
+static struct device *root_atp_dev = NULL;
+
+/* Check for a network adapter of this type, and return '0' iff one exists.
+ If dev->base_addr == 0, probe all likely locations.
+ If dev->base_addr == 1, always return failure.
+ If dev->base_addr == 2, allocate space for the device and return success
+ (detachable devices only).
+ */
+int
+atp_init(struct device *dev)
+{
+ int *port, ports[] = {0x378, 0x278, 0x3bc, 0};
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return atp_probe1(dev, base_addr);
+ else if (base_addr == 1) /* Don't probe at all. */
+ return ENXIO;
+
+ for (port = ports; *port; port++) {
+ int ioaddr = *port;
+ outb(0x57, ioaddr + PAR_DATA);
+ if (inb(ioaddr + PAR_DATA) != 0x57)
+ continue;
+ if (atp_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+
+static int atp_probe1(struct device *dev, short ioaddr)
+{
+ struct net_local *lp;
+ int saved_ctrl_reg, status, i;
+
+ outb(0xff, ioaddr + PAR_DATA);
+ /* Save the original value of the Control register, in case we guessed
+ wrong. */
+ saved_ctrl_reg = inb(ioaddr + PAR_CONTROL);
+ /* IRQEN=0, SLCTB=high INITB=high, AUTOFDB=high, STBB=high. */
+ outb(0x04, ioaddr + PAR_CONTROL);
+ /* Turn off the printer multiplexer on the 8012. */
+ for (i = 0; i < 8; i++)
+ outb(mux_8012[i], ioaddr + PAR_DATA);
+ write_reg_high(ioaddr, CMR1, CMR1h_RESET);
+ eeprom_delay(2048);
+ status = read_nibble(ioaddr, CMR1);
+
+ if ((status & 0x78) != 0x08) {
+ /* The pocket adapter probe failed, restore the control register. */
+ outb(saved_ctrl_reg, ioaddr + PAR_CONTROL);
+ return 1;
+ }
+ status = read_nibble(ioaddr, CMR2_h);
+ if ((status & 0x78) != 0x10) {
+ outb(saved_ctrl_reg, ioaddr + PAR_CONTROL);
+ return 1;
+ }
+
+ dev = init_etherdev(dev, sizeof(struct net_local));
+
+ /* Find the IRQ used by triggering an interrupt. */
+ write_reg_byte(ioaddr, CMR2, 0x01); /* No accept mode, IRQ out. */
+ write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE); /* Enable Tx and Rx. */
+
+ /* Omit autoIRQ routine for now. Use "table lookup" instead. Uhgggh. */
+ if (ioaddr == 0x378)
+ dev->irq = 7;
+ else
+ dev->irq = 5;
+ write_reg_high(ioaddr, CMR1, CMR1h_TxRxOFF); /* Disable Tx and Rx units. */
+ write_reg(ioaddr, CMR2, CMR2_NULL);
+
+ dev->base_addr = ioaddr;
+
+ /* Read the station address PROM. */
+ get_node_ID(dev);
+
+ printk("%s: Pocket adapter found at %#3lx, IRQ %d, SAPROM "
+ "%02X:%02X:%02X:%02X:%02X:%02X.\n", dev->name, dev->base_addr,
+ dev->irq, dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+ dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+
+ /* Reset the ethernet hardware and activate the printer pass-through. */
+ write_reg_high(ioaddr, CMR1, CMR1h_RESET | CMR1h_MUX);
+
+ if (net_debug)
+ printk(version);
+
+ /* Initialize the device structure. */
+ ether_setup(dev);
+ if (dev->priv == NULL)
+ dev->priv = kmalloc(sizeof(struct net_local), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct net_local));
+
+ lp = (struct net_local *)dev->priv;
+ lp->chip_type = RTL8002;
+ lp->addr_mode = CMR2h_Normal;
+
+ lp->next_module = root_atp_dev;
+ root_atp_dev = dev;
+
+ /* For the ATP adapter the "if_port" is really the data transfer mode. */
+ dev->if_port = (dev->mem_start & 0xf) ? (dev->mem_start & 0x7) : 4;
+ if (dev->mem_end & 0xf)
+ net_debug = dev->mem_end & 7;
+
+ dev->open = net_open;
+ dev->stop = net_close;
+ dev->hard_start_xmit = net_send_packet;
+ dev->get_stats = net_get_stats;
+ dev->set_multicast_list =
+ lp->chip_type == RTL8002 ? &set_rx_mode_8002 : &set_rx_mode_8012;
+
+ return 0;
+}
+
+/* Read the station address PROM, usually a word-wide EEPROM. */
+static void get_node_ID(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+ int sa_offset = 0;
+ int i;
+
+ write_reg(ioaddr, CMR2, CMR2_EEPROM); /* Point to the EEPROM control registers. */
+
+ /* Some adapters have the station address at offset 15 instead of offset
+ zero. Check for it, and fix it if needed. */
+ if (eeprom_op(ioaddr, EE_READ(0)) == 0xffff)
+ sa_offset = 15;
+
+ for (i = 0; i < 3; i++)
+ ((unsigned short *)dev->dev_addr)[i] =
+ ntohs(eeprom_op(ioaddr, EE_READ(sa_offset + i)));
+
+ write_reg(ioaddr, CMR2, CMR2_NULL);
+}
+
+/*
+ An EEPROM read command starts by shifting out 0x60+address, and then
+ shifting in the serial data. See the NatSemi databook for details.
+ * ________________
+ * CS : __|
+ * ___ ___
+ * CLK: ______| |___| |
+ * __ _______ _______
+ * DI : __X_______X_______X
+ * DO : _________X_______X
+ */
+
+static unsigned short eeprom_op(short ioaddr, unsigned int cmd)
+{
+ unsigned eedata_out = 0;
+ int num_bits = EE_CMD_SIZE;
+
+ while (--num_bits >= 0) {
+ char outval = test_bit(num_bits, &cmd) ? EE_DATA_WRITE : 0;
+ write_reg_high(ioaddr, PROM_CMD, outval | EE_CLK_LOW);
+ eeprom_delay(5);
+ write_reg_high(ioaddr, PROM_CMD, outval | EE_CLK_HIGH);
+ eedata_out <<= 1;
+ if (read_nibble(ioaddr, PROM_DATA) & EE_DATA_READ)
+ eedata_out++;
+ eeprom_delay(5);
+ }
+ write_reg_high(ioaddr, PROM_CMD, EE_CLK_LOW & ~EE_CS);
+ return eedata_out;
+}
+
+
+/* Open/initialize the board. This is called (in the current kernel)
+ sometime after booting when the 'ifconfig' program is run.
+
+ This routine sets everything up anew at each open, even
+ registers that "should" only need to be set once at boot, so that
+ there is non-reboot way to recover if something goes wrong.
+
+ This is an attachable device: if there is no dev->priv entry then it wasn't
+ probed for at boot-time, and we need to probe for it again.
+ */
+static int net_open(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+
+ /* The interrupt line is turned off (tri-stated) when the device isn't in
+ use. That's especially important for "attached" interfaces where the
+ port or interrupt may be shared. */
+#ifndef SA_SHIRQ
+ if (irq2dev_map[dev->irq] != 0
+ || (irq2dev_map[dev->irq] = dev) == 0
+ || REQUEST_IRQ(dev->irq, &net_interrupt, 0, "ATP", dev)) {
+ return -EAGAIN;
+ }
+#else
+ if (request_irq(dev->irq, &net_interrupt, 0, "ATP Ethernet", dev))
+ return -EAGAIN;
+#endif
+
+ MOD_INC_USE_COUNT;
+ hardware_init(dev);
+ dev->start = 1;
+
+ init_timer(&lp->timer);
+ lp->timer.expires = RUN_AT(TIMED_CHECKER);
+ lp->timer.data = (unsigned long)dev;
+ lp->timer.function = &atp_timed_checker; /* timer handler */
+ add_timer(&lp->timer);
+
+ return 0;
+}
+
+/* This routine resets the hardware. We initialize everything, assuming that
+ the hardware may have been temporarily detached. */
+static void hardware_init(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int i;
+
+ /* Turn off the printer multiplexer on the 8012. */
+ for (i = 0; i < 8; i++)
+ outb(mux_8012[i], ioaddr + PAR_DATA);
+ write_reg_high(ioaddr, CMR1, CMR1h_RESET);
+
+ for (i = 0; i < 6; i++)
+ write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]);
+
+ write_reg_high(ioaddr, CMR2, lp->addr_mode);
+
+ if (net_debug > 2) {
+ printk("%s: Reset: current Rx mode %d.\n", dev->name,
+ (read_nibble(ioaddr, CMR2_h) >> 3) & 0x0f);
+ }
+
+ write_reg(ioaddr, CMR2, CMR2_IRQOUT);
+ write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE);
+
+ /* Enable the interrupt line from the serial port. */
+ outb(Ctrl_SelData + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
+
+ /* Unmask the interesting interrupts. */
+ write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
+ write_reg_high(ioaddr, IMR, ISRh_RxErr);
+
+ lp->tx_unit_busy = 0;
+ lp->pac_cnt_in_tx_buf = 0;
+ lp->saved_tx_size = 0;
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+}
+
+static void trigger_send(short ioaddr, int length)
+{
+ write_reg_byte(ioaddr, TxCNT0, length & 0xff);
+ write_reg(ioaddr, TxCNT1, length >> 8);
+ write_reg(ioaddr, CMR1, CMR1_Xmit);
+}
+
+static void write_packet(short ioaddr, int length, unsigned char *packet, int data_mode)
+{
+ length = (length + 1) & ~1; /* Round up to word length. */
+ outb(EOC+MAR, ioaddr + PAR_DATA);
+ if ((data_mode & 1) == 0) {
+ /* Write the packet out, starting with the write addr. */
+ outb(WrAddr+MAR, ioaddr + PAR_DATA);
+ do {
+ write_byte_mode0(ioaddr, *packet++);
+ } while (--length > 0) ;
+ } else {
+ /* Write the packet out in slow mode. */
+ unsigned char outbyte = *packet++;
+
+ outb(Ctrl_LNibWrite + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
+ outb(WrAddr+MAR, ioaddr + PAR_DATA);
+
+ outb((outbyte & 0x0f)|0x40, ioaddr + PAR_DATA);
+ outb(outbyte & 0x0f, ioaddr + PAR_DATA);
+ outbyte >>= 4;
+ outb(outbyte & 0x0f, ioaddr + PAR_DATA);
+ outb(Ctrl_HNibWrite + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
+ while (--length > 0)
+ write_byte_mode1(ioaddr, *packet++);
+ }
+ /* Terminate the Tx frame. End of write: ECB. */
+ outb(0xff, ioaddr + PAR_DATA);
+ outb(Ctrl_HNibWrite | Ctrl_SelData | Ctrl_IRQEN, ioaddr + PAR_CONTROL);
+}
+
+static int
+net_send_packet(struct sk_buff *skb, struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+#ifndef final_version
+ if (skb == NULL || skb->len <= 0) {
+ printk("%s: Obsolete driver layer request made: skbuff==NULL.\n",
+ dev->name);
+ dev_tint(dev);
+ return 0;
+ }
+#endif
+
+ /* Use transmit-while-tbusy as a crude error timer. */
+ if (set_bit(0, (void*)&dev->tbusy) != 0) {
+ if (jiffies - dev->trans_start < TX_TIMEOUT)
+ return 1;
+ printk("%s: transmit timed out, %s?\n", dev->name,
+ inb(ioaddr + PAR_CONTROL) & 0x10 ? "network cable problem"
+ : "IRQ conflict");
+ lp->stats.tx_errors++;
+ /* Try to restart the adapter. */
+ hardware_init(dev);
+ dev->tbusy=0;
+ dev->trans_start = jiffies;
+ return 1;
+ } else {
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned char *buf = skb->data;
+ int flags;
+
+ /* Disable interrupts by writing 0x00 to the Interrupt Mask Register.
+ This sequence must not be interrupted by an incoming packet. */
+ save_flags(flags);
+ cli();
+ write_reg(ioaddr, IMR, 0);
+ write_reg_high(ioaddr, IMR, 0);
+ restore_flags(flags);
+
+ write_packet(ioaddr, length, buf, dev->if_port);
+
+ lp->pac_cnt_in_tx_buf++;
+ if (lp->tx_unit_busy == 0) {
+ trigger_send(ioaddr, length);
+ lp->saved_tx_size = 0; /* Redundant */
+ lp->re_tx = 0;
+ lp->tx_unit_busy = 1;
+ } else
+ lp->saved_tx_size = length;
+
+ dev->trans_start = jiffies;
+ /* Re-enable the LPT interrupts. */
+ write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
+ write_reg_high(ioaddr, IMR, ISRh_RxErr);
+ }
+
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ return 0;
+}
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+static void
+net_interrupt IRQ(int irq, void *dev_instance, struct pt_regs * regs)
+{
+#ifdef SA_SHIRQ
+ struct device *dev = (struct device *)dev_instance;
+#else
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+#endif
+ struct net_local *lp;
+ int ioaddr, status, boguscount = 20;
+ static int num_tx_since_rx = 0;
+
+ if (dev == NULL) {
+ printk ("ATP_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+ dev->interrupt = 1;
+
+ ioaddr = dev->base_addr;
+ lp = (struct net_local *)dev->priv;
+
+ /* Disable additional spurious interrupts. */
+ outb(Ctrl_SelData, ioaddr + PAR_CONTROL);
+
+ /* The adapter's output is currently the IRQ line, switch it to data. */
+ write_reg(ioaddr, CMR2, CMR2_NULL);
+ write_reg(ioaddr, IMR, 0);
+
+ if (net_debug > 5) printk("%s: In interrupt ", dev->name);
+ while (--boguscount > 0) {
+ status = read_nibble(ioaddr, ISR);
+ if (net_debug > 5) printk("loop status %02x..", status);
+
+ if (status & (ISR_RxOK<<3)) {
+ write_reg(ioaddr, ISR, ISR_RxOK); /* Clear the Rx interrupt. */
+ do {
+ int read_status = read_nibble(ioaddr, CMR1);
+ if (net_debug > 6)
+ printk("handling Rx packet %02x..", read_status);
+ /* We acknowledged the normal Rx interrupt, so if the interrupt
+ is still outstanding we must have a Rx error. */
+ if (read_status & (CMR1_IRQ << 3)) { /* Overrun. */
+ lp->stats.rx_over_errors++;
+ /* Set to no-accept mode long enough to remove a packet. */
+ write_reg_high(ioaddr, CMR2, CMR2h_OFF);
+ net_rx(dev);
+ /* Clear the interrupt and return to normal Rx mode. */
+ write_reg_high(ioaddr, ISR, ISRh_RxErr);
+ write_reg_high(ioaddr, CMR2, lp->addr_mode);
+ } else if ((read_status & (CMR1_BufEnb << 3)) == 0) {
+ net_rx(dev);
+ dev->last_rx = jiffies;
+ num_tx_since_rx = 0;
+ } else
+ break;
+ } while (--boguscount > 0);
+ } else if (status & ((ISR_TxErr + ISR_TxOK)<<3)) {
+ if (net_debug > 6) printk("handling Tx done..");
+ /* Clear the Tx interrupt. We should check for too many failures
+ and reinitialize the adapter. */
+ write_reg(ioaddr, ISR, ISR_TxErr + ISR_TxOK);
+ if (status & (ISR_TxErr<<3)) {
+ lp->stats.collisions++;
+ if (++lp->re_tx > 15) {
+ lp->stats.tx_aborted_errors++;
+ hardware_init(dev);
+ break;
+ }
+ /* Attempt to retransmit. */
+ if (net_debug > 6) printk("attempting to ReTx");
+ write_reg(ioaddr, CMR1, CMR1_ReXmit + CMR1_Xmit);
+ } else {
+ /* Finish up the transmit. */
+ lp->stats.tx_packets++;
+ lp->pac_cnt_in_tx_buf--;
+ if ( lp->saved_tx_size) {
+ trigger_send(ioaddr, lp->saved_tx_size);
+ lp->saved_tx_size = 0;
+ lp->re_tx = 0;
+ } else
+ lp->tx_unit_busy = 0;
+ dev->tbusy = 0;
+ mark_bh(NET_BH); /* Inform upper layers. */
+ }
+ num_tx_since_rx++;
+ } else if (num_tx_since_rx > 8
+ && jiffies > dev->last_rx + 100) {
+ if (net_debug > 2)
+ printk("%s: Missed packet? No Rx after %d Tx and %ld jiffies"
+ " status %02x CMR1 %02x.\n", dev->name,
+ num_tx_since_rx, jiffies - dev->last_rx, status,
+ (read_nibble(ioaddr, CMR1) >> 3) & 15);
+ lp->stats.rx_missed_errors++;
+ hardware_init(dev);
+ num_tx_since_rx = 0;
+ break;
+ } else
+ break;
+ }
+
+ /* This following code fixes a rare (and very difficult to track down)
+ problem where the adapter forgets its ethernet address. */
+ {
+ int i;
+ for (i = 0; i < 6; i++)
+ write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]);
+ }
+
+ /* Tell the adapter that it can go back to using the output line as IRQ. */
+ write_reg(ioaddr, CMR2, CMR2_IRQOUT);
+ /* Enable the physical interrupt line, which is sure to be low until.. */
+ outb(Ctrl_SelData + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
+ /* .. we enable the interrupt sources. */
+ write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
+ write_reg_high(ioaddr, IMR, ISRh_RxErr); /* Hmmm, really needed? */
+
+ if (net_debug > 5) printk("exiting interrupt.\n");
+
+ dev->interrupt = 0;
+
+ return;
+}
+
+#ifdef TIMED_CHECKER
+/* This following code fixes a rare (and very difficult to track down)
+ problem where the adapter forgets its ethernet address. */
+static void atp_timed_checker(unsigned long data)
+{
+ struct device *dev = (struct device *)data;
+ int ioaddr = dev->base_addr;
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int tickssofar = jiffies - lp->last_rx_time;
+ int i;
+
+ if (tickssofar > 2*HZ && dev->interrupt == 0) {
+#if 1
+ for (i = 0; i < 6; i++)
+ write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]);
+ lp->last_rx_time = jiffies;
+#else
+ for (i = 0; i < 6; i++)
+ if (read_cmd_byte(ioaddr, PAR0 + i) != atp_timed_dev->dev_addr[i])
+ {
+ struct net_local *lp = (struct net_local *)atp_timed_dev->priv;
+ write_reg_byte(ioaddr, PAR0 + i, atp_timed_dev->dev_addr[i]);
+ if (i == 2)
+ lp->stats.tx_errors++;
+ else if (i == 3)
+ lp->stats.tx_dropped++;
+ else if (i == 4)
+ lp->stats.collisions++;
+ else
+ lp->stats.rx_errors++;
+ }
+#endif
+ }
+ lp->timer.expires = RUN_AT(TIMED_CHECKER);
+ add_timer(&lp->timer);
+}
+#endif
+
+/* We have a good packet(s), get it/them out of the buffers. */
+static void net_rx(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ struct rx_header rx_head;
+
+ /* Process the received packet. */
+ outb(EOC+MAR, ioaddr + PAR_DATA);
+ read_block(ioaddr, 8, (unsigned char*)&rx_head, dev->if_port);
+ if (net_debug > 5)
+ printk(" rx_count %04x %04x %04x %04x..", rx_head.pad,
+ rx_head.rx_count, rx_head.rx_status, rx_head.cur_addr);
+ if ((rx_head.rx_status & 0x77) != 0x01) {
+ lp->stats.rx_errors++;
+ if (rx_head.rx_status & 0x0004) lp->stats.rx_frame_errors++;
+ else if (rx_head.rx_status & 0x0002) lp->stats.rx_crc_errors++;
+ if (net_debug > 3) printk("%s: Unknown ATP Rx error %04x.\n",
+ dev->name, rx_head.rx_status);
+ if (rx_head.rx_status & 0x0020) {
+ lp->stats.rx_fifo_errors++;
+ write_reg_high(ioaddr, CMR1, CMR1h_TxENABLE);
+ write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE);
+ } else if (rx_head.rx_status & 0x0050)
+ hardware_init(dev);
+ return;
+ } else {
+ /* Malloc up new buffer. The "-4" is omits the FCS (CRC). */
+ int pkt_len = (rx_head.rx_count & 0x7ff) - 4;
+ struct sk_buff *skb;
+
+ skb = DEV_ALLOC_SKB(pkt_len + 2);
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ goto done;
+ }
+ skb->dev = dev;
+
+#if LINUX_VERSION_CODE >= 0x10300
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ read_block(ioaddr, pkt_len, skb_put(skb,pkt_len), dev->if_port);
+ skb->protocol = eth_type_trans(skb, dev);
+#else
+ read_block(ioaddr, pkt_len, skb->data, dev->if_port);
+ skb->len = pkt_len;
+#endif
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+ done:
+ write_reg(ioaddr, CMR1, CMR1_NextPkt);
+ lp->last_rx_time = jiffies;
+ return;
+}
+
+static void read_block(short ioaddr, int length, unsigned char *p, int data_mode)
+{
+
+ if (data_mode <= 3) { /* Mode 0 or 1 */
+ outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
+ outb(length == 8 ? RdAddr | HNib | MAR : RdAddr | MAR,
+ ioaddr + PAR_DATA);
+ if (data_mode <= 1) { /* Mode 0 or 1 */
+ do *p++ = read_byte_mode0(ioaddr); while (--length > 0);
+ } else /* Mode 2 or 3 */
+ do *p++ = read_byte_mode2(ioaddr); while (--length > 0);
+ } else if (data_mode <= 5)
+ do *p++ = read_byte_mode4(ioaddr); while (--length > 0);
+ else
+ do *p++ = read_byte_mode6(ioaddr); while (--length > 0);
+
+ outb(EOC+HNib+MAR, ioaddr + PAR_DATA);
+ outb(Ctrl_SelData, ioaddr + PAR_CONTROL);
+}
+
+/* The inverse routine to net_open(). */
+static int
+net_close(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ del_timer(&lp->timer);
+
+ /* Flush the Tx and disable Rx here. */
+ lp->addr_mode = CMR2h_OFF;
+ write_reg_high(ioaddr, CMR2, CMR2h_OFF);
+
+ /* Free the IRQ line. */
+ outb(0x00, ioaddr + PAR_CONTROL);
+ FREE_IRQ(dev->irq, dev);
+#ifndef SA_SHIRQ
+ irq2dev_map[dev->irq] = 0;
+#endif
+
+ /* Reset the ethernet hardware and activate the printer pass-through. */
+ write_reg_high(ioaddr, CMR1, CMR1h_RESET | CMR1h_MUX);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+/* Get the current statistics. This may be called with the card open or
+ closed. */
+static struct enet_statistics *
+net_get_stats(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ return &lp->stats;
+}
+
+/*
+ * Set or clear the multicast filter for this adapter.
+ */
+
+/* The little-endian AUTODIN32 ethernet CRC calculation.
+ This is common code and should be moved to net/core/crc.c */
+static unsigned const ethernet_polynomial_le = 0xedb88320U;
+static inline unsigned ether_crc_le(int length, unsigned char *data)
+{
+ unsigned int crc = 0xffffffff; /* Initial value. */
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 8; --bit >= 0; current_octet >>= 1) {
+ if ((crc ^ current_octet) & 1) {
+ crc >>= 1;
+ crc ^= ethernet_polynomial_le;
+ } else
+ crc >>= 1;
+ }
+ }
+ return crc;
+}
+
+static void
+#ifdef NEW_MULTICAST
+set_rx_mode_8002(struct device *dev)
+#else
+static void set_rx_mode_8002(struct device *dev, int num_addrs, void *addrs);
+#endif
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ short ioaddr = dev->base_addr;
+
+ if ( dev->mc_count > 0 || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC))) {
+ /* We must make the kernel realise we had to move
+ * into promisc mode or we start all out war on
+ * the cable. - AC
+ */
+ dev->flags|=IFF_PROMISC;
+ lp->addr_mode = CMR2h_PROMISC;
+ } else
+ lp->addr_mode = CMR2h_Normal;
+ write_reg_high(ioaddr, CMR2, lp->addr_mode);
+}
+
+static void
+#ifdef NEW_MULTICAST
+set_rx_mode_8012(struct device *dev)
+#else
+static void set_rx_mode_8012(struct device *dev, int num_addrs, void *addrs);
+#endif
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ short ioaddr = dev->base_addr;
+ unsigned char new_mode, mc_filter[8]; /* Multicast hash filter */
+ int i;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ new_mode = CMR2h_PROMISC;
+ } else if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter perfectly -- accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ new_mode = CMR2h_Normal;
+ } else {
+ struct dev_mc_list *mclist;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next)
+ set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f,
+ mc_filter);
+ new_mode = CMR2h_Normal;
+ }
+ lp->addr_mode = new_mode;
+ write_reg(ioaddr, CMR2, CMR2_IRQOUT | 0x04); /* Switch to page 1. */
+ for (i = 0; i < 8; i++)
+ write_reg_byte(ioaddr, i, mc_filter[i]);
+ if (net_debug > 2 || 1) {
+ lp->addr_mode = 1;
+ printk("%s: Mode %d, setting multicast filter to",
+ dev->name, lp->addr_mode);
+ for (i = 0; i < 8; i++)
+ printk(" %2.2x", mc_filter[i]);
+ printk(".\n");
+ }
+
+ write_reg_high(ioaddr, CMR2, lp->addr_mode);
+ write_reg(ioaddr, CMR2, CMR2_IRQOUT); /* Switch back to page 0 */
+}
+
+#ifdef MODULE
+static int debug = 1;
+int
+init_module(void)
+{
+ net_debug = debug;
+ root_atp_dev = NULL;
+ atp_init(0);
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ struct device *next_dev;
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ /* No need to release_region(), since we never snarf it. */
+ while (root_atp_dev) {
+ next_dev = ((struct net_local *)root_atp_dev->priv)->next_module;
+ unregister_netdev(root_atp_dev);
+ kfree(root_atp_dev);
+ root_atp_dev = next_dev;
+ }
+}
+#endif /* MODULE */
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c atp.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/atp.h b/linux/src/drivers/net/atp.h
new file mode 100644
index 0000000..b4d1933
--- /dev/null
+++ b/linux/src/drivers/net/atp.h
@@ -0,0 +1,274 @@
+/* Linux header file for the ATP pocket ethernet adapter. */
+/* v1.04 4/1/97 becker@cesdis.gsfc.nasa.gov. */
+
+#include <linux/if_ether.h>
+#include <linux/types.h>
+#include <asm/io.h>
+
+struct net_local {
+#ifdef __KERNEL__
+ struct enet_statistics stats;
+#endif
+ struct device *next_module;
+ struct timer_list timer; /* Media selection timer. */
+ long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
+ ushort saved_tx_size;
+ unsigned tx_unit_busy:1;
+ unsigned char re_tx, /* Number of packet retransmissions. */
+ addr_mode, /* Current Rx filter e.g. promiscuous, etc. */
+ pac_cnt_in_tx_buf,
+ chip_type;
+};
+
+struct rx_header {
+ ushort pad; /* Pad. */
+ ushort rx_count;
+ ushort rx_status; /* Unknown bit assignments :-<. */
+ ushort cur_addr; /* Apparently the current buffer address(?) */
+};
+
+#define PAR_DATA 0
+#define PAR_STATUS 1
+#define PAR_CONTROL 2
+
+enum chip_type { RTL8002, RTL8012 };
+
+#define Ctrl_LNibRead 0x08 /* LP_PSELECP */
+#define Ctrl_HNibRead 0
+#define Ctrl_LNibWrite 0x08 /* LP_PSELECP */
+#define Ctrl_HNibWrite 0
+#define Ctrl_SelData 0x04 /* LP_PINITP */
+#define Ctrl_IRQEN 0x10 /* LP_PINTEN */
+
+#define EOW 0xE0
+#define EOC 0xE0
+#define WrAddr 0x40 /* Set address of EPLC read, write register. */
+#define RdAddr 0xC0
+#define HNib 0x10
+
+enum page0_regs
+{
+ /* The first six registers hold the ethernet physical station address. */
+ PAR0 = 0, PAR1 = 1, PAR2 = 2, PAR3 = 3, PAR4 = 4, PAR5 = 5,
+ TxCNT0 = 6, TxCNT1 = 7, /* The transmit byte count. */
+ TxSTAT = 8, RxSTAT = 9, /* Tx and Rx status. */
+ ISR = 10, IMR = 11, /* Interrupt status and mask. */
+ CMR1 = 12, /* Command register 1. */
+ CMR2 = 13, /* Command register 2. */
+ MODSEL = 14, /* Mode select register. */
+ MAR = 14, /* Memory address register (?). */
+ CMR2_h = 0x1d, };
+
+enum eepage_regs
+{ PROM_CMD = 6, PROM_DATA = 7 }; /* Note that PROM_CMD is in the "high" bits. */
+
+
+#define ISR_TxOK 0x01
+#define ISR_RxOK 0x04
+#define ISR_TxErr 0x02
+#define ISRh_RxErr 0x11 /* ISR, high nibble */
+
+#define CMR1h_MUX 0x08 /* Select printer multiplexor on 8012. */
+#define CMR1h_RESET 0x04 /* Reset. */
+#define CMR1h_RxENABLE 0x02 /* Rx unit enable. */
+#define CMR1h_TxENABLE 0x01 /* Tx unit enable. */
+#define CMR1h_TxRxOFF 0x00
+#define CMR1_ReXmit 0x08 /* Trigger a retransmit. */
+#define CMR1_Xmit 0x04 /* Trigger a transmit. */
+#define CMR1_IRQ 0x02 /* Interrupt active. */
+#define CMR1_BufEnb 0x01 /* Enable the buffer(?). */
+#define CMR1_NextPkt 0x01 /* Enable the buffer(?). */
+
+#define CMR2_NULL 8
+#define CMR2_IRQOUT 9
+#define CMR2_RAMTEST 10
+#define CMR2_EEPROM 12 /* Set to page 1, for reading the EEPROM. */
+
+#define CMR2h_OFF 0 /* No accept mode. */
+#define CMR2h_Physical 1 /* Accept a physical address match only. */
+#define CMR2h_Normal 2 /* Accept physical and broadcast address. */
+#define CMR2h_PROMISC 3 /* Promiscuous mode. */
+
+/* An inline function used below: it differs from inb() by explicitly return an unsigned
+ char, saving a truncation. */
+extern inline unsigned char inbyte(unsigned short port)
+{
+ unsigned char _v;
+ __asm__ __volatile__ ("inb %w1,%b0" :"=a" (_v):"d" (port));
+ return _v;
+}
+
+/* Read register OFFSET.
+ This command should always be terminated with read_end(). */
+extern inline unsigned char read_nibble(short port, unsigned char offset)
+{
+ unsigned char retval;
+ outb(EOC+offset, port + PAR_DATA);
+ outb(RdAddr+offset, port + PAR_DATA);
+ inbyte(port + PAR_STATUS); /* Settling time delay */
+ retval = inbyte(port + PAR_STATUS);
+ outb(EOC+offset, port + PAR_DATA);
+
+ return retval;
+}
+
+/* Functions for bulk data read. The interrupt line is always disabled. */
+/* Get a byte using read mode 0, reading data from the control lines. */
+extern inline unsigned char read_byte_mode0(short ioaddr)
+{
+ unsigned char low_nib;
+
+ outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
+ inbyte(ioaddr + PAR_STATUS);
+ low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
+ outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL);
+ inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
+ inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
+ return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
+}
+
+/* The same as read_byte_mode0(), but does multiple inb()s for stability. */
+extern inline unsigned char read_byte_mode2(short ioaddr)
+{
+ unsigned char low_nib;
+
+ outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
+ inbyte(ioaddr + PAR_STATUS);
+ low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
+ outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL);
+ inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */
+ return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
+}
+
+/* Read a byte through the data register. */
+extern inline unsigned char read_byte_mode4(short ioaddr)
+{
+ unsigned char low_nib;
+
+ outb(RdAddr | MAR, ioaddr + PAR_DATA);
+ low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
+ outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA);
+ return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
+}
+
+/* Read a byte through the data register, double reading to allow settling. */
+extern inline unsigned char read_byte_mode6(short ioaddr)
+{
+ unsigned char low_nib;
+
+ outb(RdAddr | MAR, ioaddr + PAR_DATA);
+ inbyte(ioaddr + PAR_STATUS);
+ low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f;
+ outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA);
+ inbyte(ioaddr + PAR_STATUS);
+ return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0);
+}
+
+extern inline void
+write_reg(short port, unsigned char reg, unsigned char value)
+{
+ unsigned char outval;
+ outb(EOC | reg, port + PAR_DATA);
+ outval = WrAddr | reg;
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA); /* Double write for PS/2. */
+
+ outval &= 0xf0;
+ outval |= value;
+ outb(outval, port + PAR_DATA);
+ outval &= 0x1f;
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA);
+
+ outb(EOC | outval, port + PAR_DATA);
+}
+
+extern inline void
+write_reg_high(short port, unsigned char reg, unsigned char value)
+{
+ unsigned char outval = EOC | HNib | reg;
+
+ outb(outval, port + PAR_DATA);
+ outval &= WrAddr | HNib | 0x0f;
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA); /* Double write for PS/2. */
+
+ outval = WrAddr | HNib | value;
+ outb(outval, port + PAR_DATA);
+ outval &= HNib | 0x0f; /* HNib | value */
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA);
+
+ outb(EOC | HNib | outval, port + PAR_DATA);
+}
+
+/* Write a byte out using nibble mode. The low nibble is written first. */
+extern inline void
+write_reg_byte(short port, unsigned char reg, unsigned char value)
+{
+ unsigned char outval;
+ outb(EOC | reg, port + PAR_DATA); /* Reset the address register. */
+ outval = WrAddr | reg;
+ outb(outval, port + PAR_DATA);
+ outb(outval, port + PAR_DATA); /* Double write for PS/2. */
+
+ outb((outval & 0xf0) | (value & 0x0f), port + PAR_DATA);
+ outb(value & 0x0f, port + PAR_DATA);
+ value >>= 4;
+ outb(value, port + PAR_DATA);
+ outb(0x10 | value, port + PAR_DATA);
+ outb(0x10 | value, port + PAR_DATA);
+
+ outb(EOC | value, port + PAR_DATA); /* Reset the address register. */
+}
+
+/*
+ * Bulk data writes to the packet buffer. The interrupt line remains enabled.
+ * The first, faster method uses only the dataport (data modes 0, 2 & 4).
+ * The second (backup) method uses data and control regs (modes 1, 3 & 5).
+ * It should only be needed when there is skew between the individual data
+ * lines.
+ */
+extern inline void write_byte_mode0(short ioaddr, unsigned char value)
+{
+ outb(value & 0x0f, ioaddr + PAR_DATA);
+ outb((value>>4) | 0x10, ioaddr + PAR_DATA);
+}
+
+extern inline void write_byte_mode1(short ioaddr, unsigned char value)
+{
+ outb(value & 0x0f, ioaddr + PAR_DATA);
+ outb(Ctrl_IRQEN | Ctrl_LNibWrite, ioaddr + PAR_CONTROL);
+ outb((value>>4) | 0x10, ioaddr + PAR_DATA);
+ outb(Ctrl_IRQEN | Ctrl_HNibWrite, ioaddr + PAR_CONTROL);
+}
+
+/* Write 16bit VALUE to the packet buffer: the same as above just doubled. */
+extern inline void write_word_mode0(short ioaddr, unsigned short value)
+{
+ outb(value & 0x0f, ioaddr + PAR_DATA);
+ value >>= 4;
+ outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA);
+ value >>= 4;
+ outb(value & 0x0f, ioaddr + PAR_DATA);
+ value >>= 4;
+ outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA);
+}
+
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
+#define EE_CS 0x02 /* EEPROM chip select. */
+#define EE_CLK_HIGH 0x12
+#define EE_CLK_LOW 0x16
+#define EE_DATA_WRITE 0x01 /* EEPROM chip data in. */
+#define EE_DATA_READ 0x08 /* EEPROM chip data out. */
+
+/* Delay between EEPROM clock transitions. */
+#define eeprom_delay(ticks) \
+do { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; }} while (0)
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_WRITE_CMD(offset) (((5 << 6) + (offset)) << 17)
+#define EE_READ(offset) (((6 << 6) + (offset)) << 17)
+#define EE_ERASE(offset) (((7 << 6) + (offset)) << 17)
+#define EE_CMD_SIZE 27 /* The command+address+data size. */
diff --git a/linux/src/drivers/net/auto_irq.c b/linux/src/drivers/net/auto_irq.c
new file mode 100644
index 0000000..82bc7b1
--- /dev/null
+++ b/linux/src/drivers/net/auto_irq.c
@@ -0,0 +1,123 @@
+/* auto_irq.c: Auto-configure IRQ lines for linux. */
+/*
+ Written 1994 by Donald Becker.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This code is a general-purpose IRQ line detector for devices with
+ jumpered IRQ lines. If you can make the device raise an IRQ (and
+ that IRQ line isn't already being used), these routines will tell
+ you what IRQ line it's using -- perfect for those oh-so-cool boot-time
+ device probes!
+
+ To use this, first call autoirq_setup(timeout). TIMEOUT is how many
+ 'jiffies' (1/100 sec.) to detect other devices that have active IRQ lines,
+ and can usually be zero at boot. 'autoirq_setup()' returns the bit
+ vector of nominally-available IRQ lines (lines may be physically in-use,
+ but not yet registered to a device).
+ Next, set up your device to trigger an interrupt.
+ Finally call autoirq_report(TIMEOUT) to find out which IRQ line was
+ most recently active. The TIMEOUT should usually be zero, but may
+ be set to the number of jiffies to wait for a slow device to raise an IRQ.
+
+ The idea of using the setup timeout to filter out bogus IRQs came from
+ the serial driver.
+*/
+
+
+#ifdef version
+static const char *version=
+"auto_irq.c:v1.11 Donald Becker (becker@cesdis.gsfc.nasa.gov)";
+#endif
+
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <linux/netdevice.h>
+
+struct device *irq2dev_map[NR_IRQS] = {0, 0, /* ... zeroed */};
+
+unsigned long irqs_busy = 0x2147; /* The set of fixed IRQs (keyboard, timer, etc) */
+unsigned long irqs_used = 0x0001; /* The set of fixed IRQs sometimes enabled. */
+unsigned long irqs_reserved = 0x0000; /* An advisory "reserved" table. */
+unsigned long irqs_shared = 0x0000; /* IRQ lines "shared" among conforming cards.*/
+
+static volatile unsigned long irq_bitmap; /* The irqs we actually found. */
+static unsigned long irq_handled; /* The irq lines we have a handler on. */
+static volatile int irq_number; /* The latest irq number we actually found. */
+
+static void autoirq_probe(int irq, void *dev_id, struct pt_regs * regs)
+{
+ irq_number = irq;
+ set_bit(irq, (void *)&irq_bitmap); /* irq_bitmap |= 1 << irq; */
+ /* This code used to disable the irq. However, the interrupt stub
+ * would then re-enable the interrupt with (potentially) disastrous
+ * consequences
+ */
+ free_irq(irq, dev_id);
+ return;
+}
+
+int autoirq_setup(int waittime)
+{
+ int i;
+ unsigned long timeout = jiffies + waittime;
+ unsigned long boguscount = (waittime*loops_per_sec) / 100;
+
+ irq_handled = 0;
+ irq_bitmap = 0;
+
+ for (i = 0; i < 16; i++) {
+ if (test_bit(i, &irqs_busy) == 0
+ && request_irq(i, autoirq_probe, SA_INTERRUPT, "irq probe", NULL) == 0)
+ set_bit(i, (void *)&irq_handled); /* irq_handled |= 1 << i;*/
+ }
+ /* Update our USED lists. */
+ irqs_used |= ~irq_handled;
+
+ /* Hang out at least <waittime> jiffies waiting for bogus IRQ hits. */
+ while (timeout > jiffies && --boguscount > 0)
+ ;
+
+ irq_handled &= ~irq_bitmap;
+
+ irq_number = 0; /* We are interested in new interrupts from now on */
+
+ return irq_handled;
+}
+
+int autoirq_report(int waittime)
+{
+ int i;
+ unsigned long timeout = jiffies+waittime;
+ unsigned long boguscount = (waittime*loops_per_sec) / 100;
+
+ /* Hang out at least <waittime> jiffies waiting for the IRQ. */
+
+ while (timeout > jiffies && --boguscount > 0)
+ if (irq_number)
+ break;
+
+ irq_handled &= ~irq_bitmap; /* This eliminates the already reset handlers */
+
+ /* Retract the irq handlers that we installed. */
+ for (i = 0; i < 16; i++) {
+ if (test_bit(i, (void *)&irq_handled))
+ free_irq(i, NULL);
+ }
+ return irq_number;
+}
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DKERNEL -Wall -O6 -fomit-frame-pointer -I/usr/src/linux/net/tcp -c auto_irq.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * c-indent-level: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/cb_shim.c b/linux/src/drivers/net/cb_shim.c
new file mode 100644
index 0000000..599b5bb
--- /dev/null
+++ b/linux/src/drivers/net/cb_shim.c
@@ -0,0 +1,296 @@
+/* cb_shim.c: Linux CardBus device support code. */
+/*
+ Written 1999-2002 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by
+ reference. This is not a documented interface. Drivers incorporating
+ or interacting with these functions are derivative works and thus
+ are covered the GPL. They must include an explicit GPL notice.
+
+ This code provides a shim to allow newer drivers to interact with the
+ older Cardbus driver activation code. The functions supported are
+ attach, suspend, power-off, resume and eject.
+
+ The author may be reached as becker@scyld.com, or
+ Donald Becker
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Support and updates available at
+ http://www.scyld.com/network/drivers.html
+
+ Other contributers: (none yet)
+*/
+
+static const char version1[] =
+"cb_shim.c:v1.03 7/12/2002 Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+" http://www.scyld.com/linux/drivers.html\n";
+
+/* Module options. */
+static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+
+/* These might be awkward to locate. */
+#include <pcmcia/driver_ops.h>
+#include "pci-scan.h"
+#include "kern_compat.h"
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Hot-swap-PCI and Cardbus event dispatch");
+MODULE_LICENSE("GPL");
+MODULE_PARM(debug, "i");
+MODULE_PARM_DESC(debug, "Enable additional status messages (0-7)");
+
+/* Note: this is used in a slightly sleazy manner: it is passed to routines
+ that expect and return just dev_node_t. However using the too-simple
+ dev_node_t complicates devices management -- older drivers had to
+ look up dev_node_t.name in their private list. */
+
+struct registered_pci_device {
+ struct dev_node_t node;
+ int magic;
+ struct registered_pci_device *next;
+ struct drv_id_info *drv_info;
+ struct pci_dev *pci_loc;
+ void *dev_instance;
+} static *root_pci_devs = 0;
+
+struct drv_shim {
+ struct drv_id_info *did;
+ struct driver_operations drv_ops;
+ int magic;
+ struct drv_shim *next;
+} static *root_drv_id = 0;
+
+static void drv_power_op(struct dev_node_t *node, enum drv_pwr_action action)
+{
+ struct registered_pci_device **devp, **next, *rpin = (void *)node, *rp;
+ if (debug > 1)
+ printk(KERN_DEBUG "power operation(%s, %d).\n",
+ rpin->drv_info->name, action);
+ /* With our wrapper structure we can almost do
+ rpin->drv_info->pwr_event(rpin->dev_instance, action);
+ But the detach operation requires us to remove the object from the
+ list, so we check for uncontrolled "ghost" devices. */
+ for (devp = &root_pci_devs; *devp; devp = next) {
+ rp = *devp;
+ next = &rp->next;
+ if (rp == rpin) {
+ if (rp->drv_info->pwr_event)
+ rp->drv_info->pwr_event((*devp)->dev_instance, action);
+ else
+ printk(KERN_ERR "No power event hander for driver %s.\n",
+ rpin->drv_info->name);
+ if (action == DRV_DETACH) {
+ kfree(rp);
+ *devp = *next;
+ MOD_DEC_USE_COUNT;
+ }
+ return;
+ }
+ }
+ if (debug)
+ printk(KERN_WARNING "power operation(%s, %d) for a ghost device.\n",
+ node->dev_name, action);
+}
+/* Wrappers / static lambdas. */
+static void drv_suspend(struct dev_node_t *node)
+{
+ drv_power_op(node, DRV_SUSPEND);
+}
+static void drv_resume(struct dev_node_t *node)
+{
+ drv_power_op(node, DRV_RESUME);
+}
+static void drv_detach(struct dev_node_t *node)
+{
+ drv_power_op(node, DRV_DETACH);
+}
+
+/* The CardBus interaction does not identify the driver the attach() is
+ for, thus we must search for the ID in all PCI device tables.
+ While ugly, we likely only have one driver loaded anyway.
+*/
+static dev_node_t *drv_attach(struct dev_locator_t *loc)
+{
+ struct drv_shim *dp;
+ struct drv_id_info *drv_id = NULL;
+ struct pci_id_info *pci_tbl = NULL;
+ u32 pci_id, subsys_id, pci_rev, pciaddr;
+ u8 irq;
+ int chip_idx = 0, pci_flags, bus, devfn;
+ long ioaddr;
+ void *newdev;
+
+ if (debug > 1)
+ printk(KERN_INFO "drv_attach()\n");
+ if (loc->bus != LOC_PCI) return NULL;
+ bus = loc->b.pci.bus; devfn = loc->b.pci.devfn;
+ if (debug > 1)
+ printk(KERN_DEBUG "drv_attach(bus %d, function %d)\n", bus, devfn);
+
+ pcibios_read_config_dword(bus, devfn, PCI_VENDOR_ID, &pci_id);
+ pcibios_read_config_dword(bus, devfn, PCI_SUBSYSTEM_ID, &subsys_id);
+ pcibios_read_config_dword(bus, devfn, PCI_REVISION_ID, &pci_rev);
+ pcibios_read_config_byte(bus, devfn, PCI_INTERRUPT_LINE, &irq);
+ for (dp = root_drv_id; dp; dp = dp->next) {
+ drv_id = dp->did;
+ pci_tbl = drv_id->pci_dev_tbl;
+ for (chip_idx = 0; pci_tbl[chip_idx].name; chip_idx++) {
+ struct pci_id_info *chip = &pci_tbl[chip_idx];
+ if ((pci_id & chip->id.pci_mask) == chip->id.pci
+ && (subsys_id & chip->id.subsystem_mask) == chip->id.subsystem
+ && (pci_rev & chip->id.revision_mask) == chip->id.revision)
+ break;
+ }
+ if (pci_tbl[chip_idx].name) /* Compiled out! */
+ break;
+ }
+ if (dp == 0) {
+ printk(KERN_WARNING "No driver match for device %8.8x at %d/%d.\n",
+ pci_id, bus, devfn);
+ return 0;
+ }
+ pci_flags = pci_tbl[chip_idx].pci_flags;
+ pcibios_read_config_dword(bus, devfn, ((pci_flags >> 2) & 0x1C) + 0x10,
+ &pciaddr);
+ if ((pciaddr & PCI_BASE_ADDRESS_SPACE_IO)) {
+ ioaddr = pciaddr & PCI_BASE_ADDRESS_IO_MASK;
+ } else
+ ioaddr = (long)ioremap(pciaddr & PCI_BASE_ADDRESS_MEM_MASK,
+ pci_tbl[chip_idx].io_size);
+ if (ioaddr == 0 || irq == 0) {
+ printk(KERN_ERR "The %s at %d/%d was not assigned an %s.\n"
+ KERN_ERR " It will not be activated.\n",
+ pci_tbl[chip_idx].name, bus, devfn,
+ ioaddr == 0 ? "address" : "IRQ");
+ return NULL;
+ }
+ printk(KERN_INFO "Found a %s at %d/%d address 0x%x->0x%lx IRQ %d.\n",
+ pci_tbl[chip_idx].name, bus, devfn, pciaddr, ioaddr, irq);
+ {
+ u16 pci_command;
+ pcibios_read_config_word(bus, devfn, PCI_COMMAND, &pci_command);
+ printk(KERN_INFO "%s at %d/%d command 0x%x.\n",
+ pci_tbl[chip_idx].name, bus, devfn, pci_command);
+ }
+
+ newdev = drv_id->probe1(pci_find_slot(bus, devfn), 0,
+ ioaddr, irq, chip_idx, 0);
+ if (newdev) {
+ struct registered_pci_device *hsdev =
+ kmalloc(sizeof(struct registered_pci_device), GFP_KERNEL);
+ if (drv_id->pci_class == PCI_CLASS_NETWORK_ETHERNET<<8)
+ strcpy(hsdev->node.dev_name, ((struct net_device *)newdev)->name);
+ hsdev->node.major = hsdev->node.minor = 0;
+ hsdev->node.next = NULL;
+ hsdev->drv_info = drv_id;
+ hsdev->dev_instance = newdev;
+ hsdev->next = root_pci_devs;
+ root_pci_devs = hsdev;
+ drv_id->pwr_event(newdev, DRV_ATTACH);
+ MOD_INC_USE_COUNT;
+ return &hsdev->node;
+ }
+ return NULL;
+}
+
+/* Add/remove a driver ID structure to our private list of known drivers. */
+int do_cb_register(struct drv_id_info *did)
+{
+ struct driver_operations *dop;
+ struct drv_shim *dshim = kmalloc(sizeof(*dshim), GFP_KERNEL);
+ if (dshim == 0)
+ return 0;
+ if (debug > 1)
+ printk(KERN_INFO "Registering driver support for '%s'.\n",
+ did->name);
+ MOD_INC_USE_COUNT;
+ dshim->did = did;
+ dop = &dshim->drv_ops;
+ dop->name = (char *)did->name;
+ dop->attach = drv_attach;
+ dop->suspend = drv_suspend;
+ dop->resume = drv_resume;
+ dop->detach = drv_detach;
+ dshim->next = root_drv_id;
+ root_drv_id = dshim;
+ return register_driver(dop);
+}
+
+void do_cb_unregister(struct drv_id_info *did)
+{
+ struct drv_shim **dp;
+ for (dp = &root_drv_id; *dp; dp = &(*dp)->next)
+ if ((*dp)->did == did) {
+ struct drv_shim *dshim = *dp;
+ unregister_driver(&dshim->drv_ops);
+ *dp = dshim->next;
+ kfree(dshim);
+ MOD_DEC_USE_COUNT;
+ return;
+ }
+}
+
+extern int (*register_hotswap_hook)(struct drv_id_info *did);
+extern void (*unregister_hotswap_hook)(struct drv_id_info *did);
+
+int (*old_cb_hook)(struct drv_id_info *did);
+void (*old_un_cb_hook)(struct drv_id_info *did);
+
+int init_module(void)
+{
+ if (debug)
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ old_cb_hook = register_hotswap_hook;
+ old_un_cb_hook = unregister_hotswap_hook;
+ register_hotswap_hook = do_cb_register;
+ unregister_hotswap_hook = do_cb_unregister;
+ return 0;
+}
+void cleanup_module(void)
+{
+ register_hotswap_hook = old_cb_hook;
+ unregister_hotswap_hook = old_un_cb_hook;
+ return;
+}
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c cb_shim.c -I/usr/include/ -I/usr/src/pcmcia/include/"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
+
diff --git a/linux/src/drivers/net/de4x5.c b/linux/src/drivers/net/de4x5.c
new file mode 100644
index 0000000..c85bcdb
--- /dev/null
+++ b/linux/src/drivers/net/de4x5.c
@@ -0,0 +1,5942 @@
+/* de4x5.c: A DIGITAL DC21x4x DECchip and DE425/DE434/DE435/DE450/DE500
+ ethernet driver for Linux.
+
+ Copyright 1994, 1995 Digital Equipment Corporation.
+
+ Testing resources for this driver have been made available
+ in part by NASA Ames Research Center (mjacob@nas.nasa.gov).
+
+ The author may be reached at davies@maniac.ultranet.com.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 2 of the License, or (at your
+ option) any later version.
+
+ THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ Originally, this driver was written for the Digital Equipment
+ Corporation series of EtherWORKS ethernet cards:
+
+ DE425 TP/COAX EISA
+ DE434 TP PCI
+ DE435 TP/COAX/AUI PCI
+ DE450 TP/COAX/AUI PCI
+ DE500 10/100 PCI Fasternet
+
+ but it will now attempt to support all cards which conform to the
+ Digital Semiconductor SROM Specification. The driver currently
+ recognises the following chips:
+
+ DC21040 (no SROM)
+ DC21041[A]
+ DC21140[A]
+ DC21142
+ DC21143
+
+ So far the driver is known to work with the following cards:
+
+ KINGSTON
+ Linksys
+ ZNYX342
+ SMC8432
+ SMC9332 (w/new SROM)
+ ZNYX31[45]
+ ZNYX346 10/100 4 port (can act as a 10/100 bridge!)
+
+ The driver has been tested on a relatively busy network using the DE425,
+ DE434, DE435 and DE500 cards and benchmarked with 'ttcp': it transferred
+ 16M of data to a DECstation 5000/200 as follows:
+
+ TCP UDP
+ TX RX TX RX
+ DE425 1030k 997k 1170k 1128k
+ DE434 1063k 995k 1170k 1125k
+ DE435 1063k 995k 1170k 1125k
+ DE500 1063k 998k 1170k 1125k in 10Mb/s mode
+
+ All values are typical (in kBytes/sec) from a sample of 4 for each
+ measurement. Their error is +/-20k on a quiet (private) network and also
+ depend on what load the CPU has.
+
+ =========================================================================
+ This driver has been written substantially from scratch, although its
+ inheritance of style and stack interface from 'ewrk3.c' and in turn from
+ Donald Becker's 'lance.c' should be obvious. With the module autoload of
+ every usable DECchip board, I pinched Donald's 'next_module' field to
+ link my modules together.
+
+ Upto 15 EISA cards can be supported under this driver, limited primarily
+ by the available IRQ lines. I have checked different configurations of
+ multiple depca, EtherWORKS 3 cards and de4x5 cards and have not found a
+ problem yet (provided you have at least depca.c v0.38) ...
+
+ PCI support has been added to allow the driver to work with the DE434,
+ DE435, DE450 and DE500 cards. The I/O accesses are a bit of a kludge due
+ to the differences in the EISA and PCI CSR address offsets from the base
+ address.
+
+ The ability to load this driver as a loadable module has been included
+ and used extensively during the driver development (to save those long
+ reboot sequences). Loadable module support under PCI and EISA has been
+ achieved by letting the driver autoprobe as if it were compiled into the
+ kernel. Do make sure you're not sharing interrupts with anything that
+ cannot accommodate interrupt sharing!
+
+ To utilise this ability, you have to do 8 things:
+
+ 0) have a copy of the loadable modules code installed on your system.
+ 1) copy de4x5.c from the /linux/drivers/net directory to your favourite
+ temporary directory.
+ 2) for fixed autoprobes (not recommended), edit the source code near
+ line 5594 to reflect the I/O address you're using, or assign these when
+ loading by:
+
+ insmod de4x5 io=0xghh where g = bus number
+ hh = device number
+
+ NB: autoprobing for modules is now supported by default. You may just
+ use:
+
+ insmod de4x5
+
+ to load all available boards. For a specific board, still use
+ the 'io=?' above.
+ 3) compile de4x5.c, but include -DMODULE in the command line to ensure
+ that the correct bits are compiled (see end of source code).
+ 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
+ kernel with the de4x5 configuration turned off and reboot.
+ 5) insmod de4x5 [io=0xghh]
+ 6) run the net startup bits for your new eth?? interface(s) manually
+ (usually /etc/rc.inet[12] at boot time).
+ 7) enjoy!
+
+ To unload a module, turn off the associated interface(s)
+ 'ifconfig eth?? down' then 'rmmod de4x5'.
+
+ Automedia detection is included so that in principal you can disconnect
+ from, e.g. TP, reconnect to BNC and things will still work (after a
+ pause whilst the driver figures out where its media went). My tests
+ using ping showed that it appears to work....
+
+ By default, the driver will now autodetect any DECchip based card.
+ Should you have a need to restrict the driver to DIGITAL only cards, you
+ can compile with a DEC_ONLY define, or if loading as a module, use the
+ 'dec_only=1' parameter.
+
+ I've changed the timing routines to use the kernel timer and scheduling
+ functions so that the hangs and other assorted problems that occurred
+ while autosensing the media should be gone. A bonus for the DC21040
+ auto media sense algorithm is that it can now use one that is more in
+ line with the rest (the DC21040 chip doesn't have a hardware timer).
+ The downside is the 1 'jiffies' (10ms) resolution.
+
+ IEEE 802.3u MII interface code has been added in anticipation that some
+ products may use it in the future.
+
+ The SMC9332 card has a non-compliant SROM which needs fixing - I have
+ patched this driver to detect it because the SROM format used complies
+ to a previous DEC-STD format.
+
+ I have removed the buffer copies needed for receive on Intels. I cannot
+ remove them for Alphas since the Tulip hardware only does longword
+ aligned DMA transfers and the Alphas get alignment traps with non
+ longword aligned data copies (which makes them really slow). No comment.
+
+ I have added SROM decoding routines to make this driver work with any
+ card that supports the Digital Semiconductor SROM spec. This will help
+ all cards running the dc2114x series chips in particular. Cards using
+ the dc2104x chips should run correctly with the basic driver. I'm in
+ debt to <mjacob@feral.com> for the testing and feedback that helped get
+ this feature working. So far we have tested KINGSTON, SMC8432, SMC9332
+ (with the latest SROM complying with the SROM spec V3: their first was
+ broken), ZNYX342 and LinkSys. ZYNX314 (dual 21041 MAC) and ZNYX 315
+ (quad 21041 MAC) cards also appear to work despite their incorrectly
+ wired IRQs.
+
+ I have added a temporary fix for interrupt problems when some SCSI cards
+ share the same interrupt as the DECchip based cards. The problem occurs
+ because the SCSI card wants to grab the interrupt as a fast interrupt
+ (runs the service routine with interrupts turned off) vs. this card
+ which really needs to run the service routine with interrupts turned on.
+ This driver will now add the interrupt service routine as a fast
+ interrupt if it is bounced from the slow interrupt. THIS IS NOT A
+ RECOMMENDED WAY TO RUN THE DRIVER and has been done for a limited time
+ until people sort out their compatibility issues and the kernel
+ interrupt service code is fixed. YOU SHOULD SEPARATE OUT THE FAST
+ INTERRUPT CARDS FROM THE SLOW INTERRUPT CARDS to ensure that they do not
+ run on the same interrupt. PCMCIA/CardBus is another can of worms...
+
+ Finally, I think I have really fixed the module loading problem with
+ more than one DECchip based card. As a side effect, I don't mess with
+ the device structure any more which means that if more than 1 card in
+ 2.0.x is installed (4 in 2.1.x), the user will have to edit
+ linux/drivers/net/Space.c to make room for them. Hence, module loading
+ is the preferred way to use this driver, since it doesn't have this
+ limitation.
+
+ Where SROM media detection is used and full duplex is specified in the
+ SROM, the feature is ignored unless lp->params.fdx is set at compile
+ time OR during a module load (insmod de4x5 args='eth??:fdx' [see
+ below]). This is because there is no way to automatically detect full
+ duplex links except through autonegotiation. When I include the
+ autonegotiation feature in the SROM autoconf code, this detection will
+ occur automatically for that case.
+
+ Command line arguments are now allowed, similar to passing arguments
+ through LILO. This will allow a per adapter board set up of full duplex
+ and media. The only lexical constraints are: the board name (dev->name)
+ appears in the list before its parameters. The list of parameters ends
+ either at the end of the parameter list or with another board name. The
+ following parameters are allowed:
+
+ fdx for full duplex
+ autosense to set the media/speed; with the following
+ sub-parameters:
+ TP, TP_NW, BNC, AUI, BNC_AUI, 100Mb, 10Mb, AUTO
+
+ Case sensitivity is important for the sub-parameters. They *must* be
+ upper case. Examples:
+
+ insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'.
+
+ For a compiled in driver, in linux/drivers/net/CONFIG, place e.g.
+ DE4X5_OPTS = -DDE4X5_PARM='"eth0:fdx autosense=AUI eth2:autosense=TP"'
+
+ Yes, I know full duplex isn't permissible on BNC or AUI; they're just
+ examples. By default, full duplex is turned off and AUTO is the default
+ autosense setting. In reality, I expect only the full duplex option to
+ be used. Note the use of single quotes in the two examples above and the
+ lack of commas to separate items.
+
+ TO DO:
+ ------
+
+ o check what revision numbers the 21142 and 21143 have
+ o
+
+ Revision History
+ ----------------
+
+ Version Date Description
+
+ 0.1 17-Nov-94 Initial writing. ALPHA code release.
+ 0.2 13-Jan-95 Added PCI support for DE435's.
+ 0.21 19-Jan-95 Added auto media detection.
+ 0.22 10-Feb-95 Fix interrupt handler call <chris@cosy.sbg.ac.at>.
+ Fix recognition bug reported by <bkm@star.rl.ac.uk>.
+ Add request/release_region code.
+ Add loadable modules support for PCI.
+ Clean up loadable modules support.
+ 0.23 28-Feb-95 Added DC21041 and DC21140 support.
+ Fix missed frame counter value and initialisation.
+ Fixed EISA probe.
+ 0.24 11-Apr-95 Change delay routine to use <linux/udelay>.
+ Change TX_BUFFS_AVAIL macro.
+ Change media autodetection to allow manual setting.
+ Completed DE500 (DC21140) support.
+ 0.241 18-Apr-95 Interim release without DE500 Autosense Algorithm.
+ 0.242 10-May-95 Minor changes.
+ 0.30 12-Jun-95 Timer fix for DC21140.
+ Portability changes.
+ Add ALPHA changes from <jestabro@ant.tay1.dec.com>.
+ Add DE500 semi automatic autosense.
+ Add Link Fail interrupt TP failure detection.
+ Add timer based link change detection.
+ Plugged a memory leak in de4x5_queue_pkt().
+ 0.31 13-Jun-95 Fixed PCI stuff for 1.3.1.
+ 0.32 26-Jun-95 Added verify_area() calls in de4x5_ioctl() from a
+ suggestion by <heiko@colossus.escape.de>.
+ 0.33 8-Aug-95 Add shared interrupt support (not released yet).
+ 0.331 21-Aug-95 Fix de4x5_open() with fast CPUs.
+ Fix de4x5_interrupt().
+ Fix dc21140_autoconf() mess.
+ No shared interrupt support.
+ 0.332 11-Sep-95 Added MII management interface routines.
+ 0.40 5-Mar-96 Fix setup frame timeout <maartenb@hpkuipc.cern.ch>.
+ Add kernel timer code (h/w is too flaky).
+ Add MII based PHY autosense.
+ Add new multicasting code.
+ Add new autosense algorithms for media/mode
+ selection using kernel scheduling/timing.
+ Re-formatted.
+ Made changes suggested by <jeff@router.patch.net>:
+ Change driver to detect all DECchip based cards
+ with DEC_ONLY restriction a special case.
+ Changed driver to autoprobe as a module. No irq
+ checking is done now - assume BIOS is good!
+ Added SMC9332 detection <manabe@Roy.dsl.tutics.ac.jp>
+ 0.41 21-Mar-96 Don't check for get_hw_addr checksum unless DEC card
+ only <niles@axp745gsfc.nasa.gov>
+ Fix for multiple PCI cards reported by <jos@xos.nl>
+ Duh, put the SA_SHIRQ flag into request_interrupt().
+ Fix SMC ethernet address in enet_det[].
+ Print chip name instead of "UNKNOWN" during boot.
+ 0.42 26-Apr-96 Fix MII write TA bit error.
+ Fix bug in dc21040 and dc21041 autosense code.
+ Remove buffer copies on receive for Intels.
+ Change sk_buff handling during media disconnects to
+ eliminate DUP packets.
+ Add dynamic TX thresholding.
+ Change all chips to use perfect multicast filtering.
+ Fix alloc_device() bug <jari@markkus2.fimr.fi>
+ 0.43 21-Jun-96 Fix unconnected media TX retry bug.
+ Add Accton to the list of broken cards.
+ Fix TX under-run bug for non DC21140 chips.
+ Fix boot command probe bug in alloc_device() as
+ reported by <koen.gadeyne@barco.com> and
+ <orava@nether.tky.hut.fi>.
+ Add cache locks to prevent a race condition as
+ reported by <csd@microplex.com> and
+ <baba@beckman.uiuc.edu>.
+ Upgraded alloc_device() code.
+ 0.431 28-Jun-96 Fix potential bug in queue_pkt() from discussion
+ with <csd@microplex.com>
+ 0.44 13-Aug-96 Fix RX overflow bug in 2114[023] chips.
+ Fix EISA probe bugs reported by <os2@kpi.kharkov.ua>
+ and <michael@compurex.com>.
+ 0.441 9-Sep-96 Change dc21041_autoconf() to probe quiet BNC media
+ with a loopback packet.
+ 0.442 9-Sep-96 Include AUI in dc21041 media printout. Bug reported
+ by <bhat@mundook.cs.mu.OZ.AU>
+ 0.45 8-Dec-96 Include endian functions for PPC use, from work
+ by <cort@cs.nmt.edu> and <g.thomas@opengroup.org>.
+ 0.451 28-Dec-96 Added fix to allow autoprobe for modules after
+ suggestion from <mjacob@feral.com>.
+ 0.5 30-Jan-97 Added SROM decoding functions.
+ Updated debug flags.
+ Fix sleep/wakeup calls for PCI cards, bug reported
+ by <cross@gweep.lkg.dec.com>.
+ Added multi-MAC, one SROM feature from discussion
+ with <mjacob@feral.com>.
+ Added full module autoprobe capability.
+ Added attempt to use an SMC9332 with broken SROM.
+ Added fix for ZYNX multi-mac cards that didn't
+ get their IRQs wired correctly.
+ 0.51 13-Feb-97 Added endian fixes for the SROM accesses from
+ <paubert@iram.es>
+ Fix init_connection() to remove extra device reset.
+ Fix MAC/PHY reset ordering in dc21140m_autoconf().
+ Fix initialisation problem with lp->timeout in
+ typeX_infoblock() from <paubert@iram.es>.
+ Fix MII PHY reset problem from work done by
+ <paubert@iram.es>.
+ 0.52 26-Apr-97 Some changes may not credit the right people -
+ a disk crash meant I lost some mail.
+ Change RX interrupt routine to drop rather than
+ defer packets to avoid hang reported by
+ <g.thomas@opengroup.org>.
+ Fix srom_exec() to return for COMPACT and type 1
+ infoblocks.
+ Added DC21142 and DC21143 functions.
+ Added byte counters from <phil@tazenda.demon.co.uk>
+ Added SA_INTERRUPT temporary fix from
+ <mjacob@feral.com>.
+ 0.53 12-Nov-97 Fix the *_probe() to include 'eth??' name during
+ module load: bug reported by
+ <Piete.Brooks@cl.cam.ac.uk>
+ Fix multi-MAC, one SROM, to work with 2114x chips:
+ bug reported by <cmetz@inner.net>.
+ Make above search independent of BIOS device scan
+ direction.
+ Completed DC2114[23] autosense functions.
+ 0.531 21-Dec-97 Fix DE500-XA 100Mb/s bug reported by
+ <robin@intercore.com
+ Fix type1_infoblock() bug introduced in 0.53, from
+ problem reports by
+ <parmee@postecss.ncrfran.france.ncr.com> and
+ <jo@ice.dillingen.baynet.de>.
+ Added argument list to set up each board from either
+ a module's command line or a compiled in #define.
+ Added generic MII PHY functionality to deal with
+ newer PHY chips.
+ Fix the mess in 2.1.67.
+ 0.532 5-Jan-98 Fix bug in mii_get_phy() reported by
+ <redhat@cococo.net>.
+ Fix bug in pci_probe() for 64 bit systems reported
+ by <belliott@accessone.com>.
+ 0.533 9-Jan-98 Fix more 64 bit bugs reported by <jal@cs.brown.edu>.
+ 0.534 24-Jan-98 Fix last (?) endian bug from
+ <Geert.Uytterhoeven@cs.kuleuven.ac.be>
+ 0.535 21-Feb-98 Fix Ethernet Address PROM reset bug for DC21040.
+ 0.5351 4-Oct-98 Atomicize assertion of dev->interrupt for SMP (not
+ for Alpha arch.) from <lma@varesearch.com>
+ Add TP, AUI and BNC cases to 21140m_autoconf() for
+ case where a 21140 under SROM control uses, e.g. AUI
+ from problem report by <delchini@lpnp09.in2p3.fr>
+ Add MII parallel detection to 2114x_autoconf() for
+ case where no autonegotiation partner exists from
+ problem report by <mlapsley@ndirect.co.uk>.
+ Add ability to force connection type directly even
+ when using SROM control from problem report by
+ <earl@exis.net>.
+ Fix is_anc_capable() bug reported by
+ <Austin.Donnelly@cl.cam.ac.uk>.
+ Fix type[13]_infoblock() bug: during MII search, PHY
+ lp->rst not run because lp->ibn not initialised -
+ from report & fix by <paubert@iram.es>.
+ Fix probe bug with EISA & PCI cards present from
+ report by <eirik@netcom.com>.
+ Fix compiler problems associated with i386-string
+ ops from multiple bug reports and temporary fix
+ from <paubert@iram.es>.
+ Add an_exception() for old ZYNX346 and fix compile
+ warning on PPC & SPARC, from <ecd@skynet.be>.
+ Fix lastPCI to correctly work with compiled in
+ kernels and modules from bug report by
+ <Zlatko.Calusic@CARNet.hr> et al.
+ Fix dc2114x_autoconf() to stop multiple messages
+ when media is unconnected.
+ Change dev->interrupt to lp->interrupt to ensure
+ alignment for Alpha's and avoid their unaligned
+ access traps. This flag is merely for log messages:
+ should do something more definitive though...
+
+ =========================================================================
+*/
+
+static const char *version = "de4x5.c:V0.5351 1998/10/4 davies@maniac.ultranet.com\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/unistd.h>
+#include <linux/ctype.h>
+
+#include "de4x5.h"
+
+#define c_char const char
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,1,0)
+# define __initfunc(__arginit) __arginit
+//# define test_and_set_bit set_bit
+# define net_device_stats enet_statistics
+# define copy_to_user(a,b,c) memcpy_tofs(a,b,c)
+# define copy_from_user(a,b,c) memcpy_fromfs(a,b,c)
+# define le16_to_cpu(a) cpu_to_le16(a)
+# define le32_to_cpu(a) cpu_to_le32(a)
+# ifdef __powerpc__
+# define cpu_to_le16(a) ((((a) & 0x00ffU) << 8) | (((a) & 0xff00U) >> 8))
+# define cpu_to_le32(a) ((((a) & 0x000000ffU) << 24) |\
+ (((a) & 0x0000ff00U) << 8) |\
+ (((a) & 0x00ff0000U) >> 8) |\
+ (((a) & 0xff000000U) >> 24))
+# else
+# define cpu_to_le16(a) (a)
+# define cpu_to_le32(a) (a)
+# endif /* __powerpc__ */
+# include <asm/segment.h>
+#else
+# include <asm/uaccess.h>
+# include <linux/init.h>
+#endif /* LINUX_VERSION_CODE */
+#define TWIDDLE(a) (u_short)le16_to_cpu(get_unaligned((u_short *)(a)))
+
+/*
+** MII Information
+*/
+struct phy_table {
+ int reset; /* Hard reset required? */
+ int id; /* IEEE OUI */
+ int ta; /* One cycle TA time - 802.3u is confusing here */
+ struct { /* Non autonegotiation (parallel) speed det. */
+ int reg;
+ int mask;
+ int value;
+ } spd;
+};
+
+struct mii_phy {
+ int reset; /* Hard reset required? */
+ int id; /* IEEE OUI */
+ int ta; /* One cycle TA time */
+ struct { /* Non autonegotiation (parallel) speed det. */
+ int reg;
+ int mask;
+ int value;
+ } spd;
+ int addr; /* MII address for the PHY */
+ u_char *gep; /* Start of GEP sequence block in SROM */
+ u_char *rst; /* Start of reset sequence in SROM */
+ u_int mc; /* Media Capabilities */
+ u_int ana; /* NWay Advertisement */
+ u_int fdx; /* Full DupleX capabilites for each media */
+ u_int ttm; /* Transmit Threshold Mode for each media */
+ u_int mci; /* 21142 MII Connector Interrupt info */
+};
+
+#define DE4X5_MAX_PHY 8 /* Allow upto 8 attached PHY devices per board */
+
+struct sia_phy {
+ u_char mc; /* Media Code */
+ u_char ext; /* csr13-15 valid when set */
+ int csr13; /* SIA Connectivity Register */
+ int csr14; /* SIA TX/RX Register */
+ int csr15; /* SIA General Register */
+ int gepc; /* SIA GEP Control Information */
+ int gep; /* SIA GEP Data */
+};
+
+/*
+** Define the know universe of PHY devices that can be
+** recognised by this driver.
+*/
+static struct phy_table phy_info[] = {
+ {0, NATIONAL_TX, 1, {0x19, 0x40, 0x00}}, /* National TX */
+ {1, BROADCOM_T4, 1, {0x10, 0x02, 0x02}}, /* Broadcom T4 */
+ {0, SEEQ_T4 , 1, {0x12, 0x10, 0x10}}, /* SEEQ T4 */
+ {0, CYPRESS_T4 , 1, {0x05, 0x20, 0x20}}, /* Cypress T4 */
+ {0, 0x7810 , 1, {0x05, 0x0380, 0x0380}} /* Level One? */
+};
+
+/*
+** These GENERIC values assumes that the PHY devices follow 802.3u and
+** allow parallel detection to set the link partner ability register.
+** Detection of 100Base-TX [H/F Duplex] and 100Base-T4 is supported.
+*/
+#define GENERIC_REG 0x05 /* Autoneg. Link Partner Advertisement Reg. */
+#define GENERIC_MASK MII_ANLPA_100M /* All 100Mb/s Technologies */
+#define GENERIC_VALUE MII_ANLPA_100M /* 100B-TX, 100B-TX FDX, 100B-T4 */
+
+/*
+** Define special SROM detection cases
+*/
+static c_char enet_det[][ETH_ALEN] = {
+ {0x00, 0x00, 0xc0, 0x00, 0x00, 0x00},
+ {0x00, 0x00, 0xe8, 0x00, 0x00, 0x00}
+};
+
+#define SMC 1
+#define ACCTON 2
+
+/*
+** SROM Repair definitions. If a broken SROM is detected a card may
+** use this information to help figure out what to do. This is a
+** "stab in the dark" and so far for SMC9332's only.
+*/
+static c_char srom_repair_info[][100] = {
+ {0x00,0x1e,0x00,0x00,0x00,0x08, /* SMC9332 */
+ 0x1f,0x01,0x8f,0x01,0x00,0x01,0x00,0x02,
+ 0x01,0x00,0x00,0x78,0xe0,0x01,0x00,0x50,
+ 0x00,0x18,}
+};
+
+
+#ifdef DE4X5_DEBUG
+static int de4x5_debug = DE4X5_DEBUG;
+#else
+/*static int de4x5_debug = (DEBUG_MII | DEBUG_SROM | DEBUG_PCICFG | DEBUG_MEDIA | DEBUG_VERSION);*/
+static int de4x5_debug = (DEBUG_MEDIA | DEBUG_VERSION);
+#endif
+
+/*
+** Allow per adapter set up. For modules this is simply a command line
+** parameter, e.g.:
+** insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'.
+**
+** For a compiled in driver, place e.g.
+** DE4X5_OPTS = -DDE4X5_PARM='"eth0:fdx autosense=AUI eth2:autosense=TP"'
+** in linux/drivers/net/CONFIG
+*/
+#ifdef DE4X5_PARM
+static char *args = DE4X5_PARM;
+#else
+static char *args = NULL;
+#endif
+
+struct parameters {
+ int fdx;
+ int autosense;
+};
+
+#define DE4X5_AUTOSENSE_MS 250 /* msec autosense tick (DE500) */
+
+#define DE4X5_NDA 0xffe0 /* No Device (I/O) Address */
+
+/*
+** Ethernet PROM defines
+*/
+#define PROBE_LENGTH 32
+#define ETH_PROM_SIG 0xAA5500FFUL
+
+/*
+** Ethernet Info
+*/
+#define PKT_BUF_SZ 1536 /* Buffer size for each Tx/Rx buffer */
+#define IEEE802_3_SZ 1518 /* Packet + CRC */
+#define MAX_PKT_SZ 1514 /* Maximum ethernet packet length */
+#define MAX_DAT_SZ 1500 /* Maximum ethernet data length */
+#define MIN_DAT_SZ 1 /* Minimum ethernet data length */
+#define PKT_HDR_LEN 14 /* Addresses and data length info */
+#define FAKE_FRAME_LEN (MAX_PKT_SZ + 1)
+#define QUEUE_PKT_TIMEOUT (3*HZ) /* 3 second timeout */
+
+
+#define CRC_POLYNOMIAL_BE 0x04c11db7UL /* Ethernet CRC, big endian */
+#define CRC_POLYNOMIAL_LE 0xedb88320UL /* Ethernet CRC, little endian */
+
+/*
+** EISA bus defines
+*/
+#define DE4X5_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */
+#define DE4X5_EISA_TOTAL_SIZE 0x100 /* I/O address extent */
+
+#define MAX_EISA_SLOTS 16
+#define EISA_SLOT_INC 0x1000
+#define EISA_ALLOWED_IRQ_LIST {5, 9, 10, 11}
+
+#define DE4X5_SIGNATURE {"DE425","DE434","DE435","DE450","DE500"}
+#define DE4X5_NAME_LENGTH 8
+
+/*
+** Ethernet PROM defines for DC21040
+*/
+#define PROBE_LENGTH 32
+#define ETH_PROM_SIG 0xAA5500FFUL
+
+/*
+** PCI Bus defines
+*/
+#define PCI_MAX_BUS_NUM 8
+#define DE4X5_PCI_TOTAL_SIZE 0x80 /* I/O address extent */
+#define DE4X5_CLASS_CODE 0x00020000 /* Network controller, Ethernet */
+#define NO_MORE_PCI -2 /* PCI bus search all done */
+
+/*
+** Memory Alignment. Each descriptor is 4 longwords long. To force a
+** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and
+** DESC_ALIGN. ALIGN aligns the start address of the private memory area
+** and hence the RX descriptor ring's first entry.
+*/
+#define ALIGN4 ((u_long)4 - 1) /* 1 longword align */
+#define ALIGN8 ((u_long)8 - 1) /* 2 longword align */
+#define ALIGN16 ((u_long)16 - 1) /* 4 longword align */
+#define ALIGN32 ((u_long)32 - 1) /* 8 longword align */
+#define ALIGN64 ((u_long)64 - 1) /* 16 longword align */
+#define ALIGN128 ((u_long)128 - 1) /* 32 longword align */
+
+#define ALIGN ALIGN32 /* Keep the DC21040 happy... */
+#define CACHE_ALIGN CAL_16LONG
+#define DESC_SKIP_LEN DSL_0 /* Must agree with DESC_ALIGN */
+/*#define DESC_ALIGN u32 dummy[4]; / * Must agree with DESC_SKIP_LEN */
+#define DESC_ALIGN
+
+#ifndef DEC_ONLY /* See README.de4x5 for using this */
+static int dec_only = 0;
+#else
+static int dec_only = 1;
+#endif
+
+/*
+** DE4X5 IRQ ENABLE/DISABLE
+*/
+#define ENABLE_IRQs { \
+ imr |= lp->irq_en;\
+ outl(imr, DE4X5_IMR); /* Enable the IRQs */\
+}
+
+#define DISABLE_IRQs {\
+ imr = inl(DE4X5_IMR);\
+ imr &= ~lp->irq_en;\
+ outl(imr, DE4X5_IMR); /* Disable the IRQs */\
+}
+
+#define UNMASK_IRQs {\
+ imr |= lp->irq_mask;\
+ outl(imr, DE4X5_IMR); /* Unmask the IRQs */\
+}
+
+#define MASK_IRQs {\
+ imr = inl(DE4X5_IMR);\
+ imr &= ~lp->irq_mask;\
+ outl(imr, DE4X5_IMR); /* Mask the IRQs */\
+}
+
+/*
+** DE4X5 START/STOP
+*/
+#define START_DE4X5 {\
+ omr = inl(DE4X5_OMR);\
+ omr |= OMR_ST | OMR_SR;\
+ outl(omr, DE4X5_OMR); /* Enable the TX and/or RX */\
+}
+
+#define STOP_DE4X5 {\
+ omr = inl(DE4X5_OMR);\
+ omr &= ~(OMR_ST|OMR_SR);\
+ outl(omr, DE4X5_OMR); /* Disable the TX and/or RX */ \
+}
+
+/*
+** DE4X5 SIA RESET
+*/
+#define RESET_SIA outl(0, DE4X5_SICR); /* Reset SIA connectivity regs */
+
+/*
+** DE500 AUTOSENSE TIMER INTERVAL (MILLISECS)
+*/
+#define DE4X5_AUTOSENSE_MS 250
+
+/*
+** SROM Structure
+*/
+struct de4x5_srom {
+ char sub_vendor_id[2];
+ char sub_system_id[2];
+ char reserved[12];
+ char id_block_crc;
+ char reserved2;
+ char version;
+ char num_controllers;
+ char ieee_addr[6];
+ char info[100];
+ short chksum;
+};
+#define SUB_VENDOR_ID 0x500a
+
+/*
+** DE4X5 Descriptors. Make sure that all the RX buffers are contiguous
+** and have sizes of both a power of 2 and a multiple of 4.
+** A size of 256 bytes for each buffer could be chosen because over 90% of
+** all packets in our network are <256 bytes long and 64 longword alignment
+** is possible. 1536 showed better 'ttcp' performance. Take your pick. 32 TX
+** descriptors are needed for machines with an ALPHA CPU.
+*/
+#define NUM_RX_DESC 8 /* Number of RX descriptors */
+#define NUM_TX_DESC 32 /* Number of TX descriptors */
+#define RX_BUFF_SZ 1536 /* Power of 2 for kmalloc and */
+ /* Multiple of 4 for DC21040 */
+ /* Allows 512 byte alignment */
+struct de4x5_desc {
+ volatile s32 status;
+ u32 des1;
+ u32 buf;
+ u32 next;
+ DESC_ALIGN
+};
+
+/*
+** The DE4X5 private structure
+*/
+#define DE4X5_PKT_STAT_SZ 16
+#define DE4X5_PKT_BIN_SZ 128 /* Should be >=100 unless you
+ increase DE4X5_PKT_STAT_SZ */
+
+struct de4x5_private {
+ char adapter_name[80]; /* Adapter name */
+ u_long interrupt; /* Aligned ISR flag */
+ struct de4x5_desc rx_ring[NUM_RX_DESC]; /* RX descriptor ring */
+ struct de4x5_desc tx_ring[NUM_TX_DESC]; /* TX descriptor ring */
+ struct sk_buff *tx_skb[NUM_TX_DESC]; /* TX skb for freeing when sent */
+ struct sk_buff *rx_skb[NUM_RX_DESC]; /* RX skb's */
+ int rx_new, rx_old; /* RX descriptor ring pointers */
+ int tx_new, tx_old; /* TX descriptor ring pointers */
+ char setup_frame[SETUP_FRAME_LEN]; /* Holds MCA and PA info. */
+ char frame[64]; /* Min sized packet for loopback*/
+ struct net_device_stats stats; /* Public stats */
+ struct {
+ u_int bins[DE4X5_PKT_STAT_SZ]; /* Private stats counters */
+ u_int unicast;
+ u_int multicast;
+ u_int broadcast;
+ u_int excessive_collisions;
+ u_int tx_underruns;
+ u_int excessive_underruns;
+ u_int rx_runt_frames;
+ u_int rx_collision;
+ u_int rx_dribble;
+ u_int rx_overflow;
+ } pktStats;
+ char rxRingSize;
+ char txRingSize;
+ int bus; /* EISA or PCI */
+ int bus_num; /* PCI Bus number */
+ int device; /* Device number on PCI bus */
+ int state; /* Adapter OPENED or CLOSED */
+ int chipset; /* DC21040, DC21041 or DC21140 */
+ s32 irq_mask; /* Interrupt Mask (Enable) bits */
+ s32 irq_en; /* Summary interrupt bits */
+ int media; /* Media (eg TP), mode (eg 100B)*/
+ int c_media; /* Remember the last media conn */
+ int fdx; /* media full duplex flag */
+ int linkOK; /* Link is OK */
+ int autosense; /* Allow/disallow autosensing */
+ int tx_enable; /* Enable descriptor polling */
+ int setup_f; /* Setup frame filtering type */
+ int local_state; /* State within a 'media' state */
+ struct mii_phy phy[DE4X5_MAX_PHY]; /* List of attached PHY devices */
+ struct sia_phy sia; /* SIA PHY Information */
+ int active; /* Index to active PHY device */
+ int mii_cnt; /* Number of attached PHY's */
+ int timeout; /* Scheduling counter */
+ struct timer_list timer; /* Timer info for kernel */
+ int tmp; /* Temporary global per card */
+ struct {
+ void *priv; /* Original kmalloc'd mem addr */
+ void *buf; /* Original kmalloc'd mem addr */
+ u_long lock; /* Lock the cache accesses */
+ s32 csr0; /* Saved Bus Mode Register */
+ s32 csr6; /* Saved Operating Mode Reg. */
+ s32 csr7; /* Saved IRQ Mask Register */
+ s32 gep; /* Saved General Purpose Reg. */
+ s32 gepc; /* Control info for GEP */
+ s32 csr13; /* Saved SIA Connectivity Reg. */
+ s32 csr14; /* Saved SIA TX/RX Register */
+ s32 csr15; /* Saved SIA General Register */
+ int save_cnt; /* Flag if state already saved */
+ struct sk_buff *skb; /* Save the (re-ordered) skb's */
+ } cache;
+ struct de4x5_srom srom; /* A copy of the SROM */
+ struct device *next_module; /* Link to the next module */
+ int rx_ovf; /* Check for 'RX overflow' tag */
+ int useSROM; /* For non-DEC card use SROM */
+ int useMII; /* Infoblock using the MII */
+ int asBitValid; /* Autosense bits in GEP? */
+ int asPolarity; /* 0 => asserted high */
+ int asBit; /* Autosense bit number in GEP */
+ int defMedium; /* SROM default medium */
+ int tcount; /* Last infoblock number */
+ int infoblock_init; /* Initialised this infoblock? */
+ int infoleaf_offset; /* SROM infoleaf for controller */
+ s32 infoblock_csr6; /* csr6 value in SROM infoblock */
+ int infoblock_media; /* infoblock media */
+ int (*infoleaf_fn)(struct device *); /* Pointer to infoleaf function */
+ u_char *rst; /* Pointer to Type 5 reset info */
+ u_char ibn; /* Infoblock number */
+ struct parameters params; /* Command line/ #defined params */
+};
+
+/*
+** Kludge to get around the fact that the CSR addresses have different
+** offsets in the PCI and EISA boards. Also note that the ethernet address
+** PROM is accessed differently.
+*/
+static struct bus_type {
+ int bus;
+ int bus_num;
+ int device;
+ int chipset;
+ struct de4x5_srom srom;
+ int autosense;
+ int useSROM;
+} bus;
+
+/*
+** To get around certain poxy cards that don't provide an SROM
+** for the second and more DECchip, I have to key off the first
+** chip's address. I'll assume there's not a bad SROM iff:
+**
+** o the chipset is the same
+** o the bus number is the same and > 0
+** o the sum of all the returned hw address bytes is 0 or 0x5fa
+**
+** Also have to save the irq for those cards whose hardware designers
+** can't follow the PCI to PCI Bridge Architecture spec.
+*/
+static struct {
+ int chipset;
+ int bus;
+ int irq;
+ u_char addr[ETH_ALEN];
+} last = {0,};
+
+/*
+** The transmit ring full condition is described by the tx_old and tx_new
+** pointers by:
+** tx_old = tx_new Empty ring
+** tx_old = tx_new+1 Full ring
+** tx_old+txRingSize = tx_new+1 Full ring (wrapped condition)
+*/
+#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
+ lp->tx_old+lp->txRingSize-lp->tx_new-1:\
+ lp->tx_old -lp->tx_new-1)
+
+#define TX_PKT_PENDING (lp->tx_old != lp->tx_new)
+
+/*
+** Public Functions
+*/
+static int de4x5_open(struct device *dev);
+static int de4x5_queue_pkt(struct sk_buff *skb, struct device *dev);
+static void de4x5_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static int de4x5_close(struct device *dev);
+static struct net_device_stats *de4x5_get_stats(struct device *dev);
+static void de4x5_local_stats(struct device *dev, char *buf, int pkt_len);
+static void set_multicast_list(struct device *dev);
+static int de4x5_ioctl(struct device *dev, struct ifreq *rq, int cmd);
+
+/*
+** Private functions
+*/
+static int de4x5_hw_init(struct device *dev, u_long iobase);
+static int de4x5_init(struct device *dev);
+static int de4x5_sw_reset(struct device *dev);
+static int de4x5_rx(struct device *dev);
+static int de4x5_tx(struct device *dev);
+static int de4x5_ast(struct device *dev);
+static int de4x5_txur(struct device *dev);
+static int de4x5_rx_ovfc(struct device *dev);
+
+static int autoconf_media(struct device *dev);
+static void create_packet(struct device *dev, char *frame, int len);
+static void de4x5_us_delay(u32 usec);
+static void de4x5_ms_delay(u32 msec);
+static void load_packet(struct device *dev, char *buf, u32 flags, struct sk_buff *skb);
+static int dc21040_autoconf(struct device *dev);
+static int dc21041_autoconf(struct device *dev);
+static int dc21140m_autoconf(struct device *dev);
+static int dc2114x_autoconf(struct device *dev);
+static int srom_autoconf(struct device *dev);
+static int de4x5_suspect_state(struct device *dev, int timeout, int prev_state, int (*fn)(struct device *, int), int (*asfn)(struct device *));
+static int dc21040_state(struct device *dev, int csr13, int csr14, int csr15, int timeout, int next_state, int suspect_state, int (*fn)(struct device *, int));
+static int test_media(struct device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec);
+static int test_for_100Mb(struct device *dev, int msec);
+static int wait_for_link(struct device *dev);
+static int test_mii_reg(struct device *dev, int reg, int mask, int pol, long msec);
+static int is_spd_100(struct device *dev);
+static int is_100_up(struct device *dev);
+static int is_10_up(struct device *dev);
+static int is_anc_capable(struct device *dev);
+static int ping_media(struct device *dev, int msec);
+static struct sk_buff *de4x5_alloc_rx_buff(struct device *dev, int index, int len);
+static void de4x5_free_rx_buffs(struct device *dev);
+static void de4x5_free_tx_buffs(struct device *dev);
+static void de4x5_save_skbs(struct device *dev);
+static void de4x5_rst_desc_ring(struct device *dev);
+static void de4x5_cache_state(struct device *dev, int flag);
+static void de4x5_put_cache(struct device *dev, struct sk_buff *skb);
+static void de4x5_putb_cache(struct device *dev, struct sk_buff *skb);
+static struct sk_buff *de4x5_get_cache(struct device *dev);
+static void de4x5_setup_intr(struct device *dev);
+static void de4x5_init_connection(struct device *dev);
+static int de4x5_reset_phy(struct device *dev);
+static void reset_init_sia(struct device *dev, s32 sicr, s32 strr, s32 sigr);
+static int test_ans(struct device *dev, s32 irqs, s32 irq_mask, s32 msec);
+static int test_tp(struct device *dev, s32 msec);
+static int EISA_signature(char *name, s32 eisa_id);
+static int PCI_signature(char *name, struct bus_type *lp);
+static void DevicePresent(u_long iobase);
+static void enet_addr_rst(u_long aprom_addr);
+static int de4x5_bad_srom(struct bus_type *lp);
+static short srom_rd(u_long address, u_char offset);
+static void srom_latch(u_int command, u_long address);
+static void srom_command(u_int command, u_long address);
+static void srom_address(u_int command, u_long address, u_char offset);
+static short srom_data(u_int command, u_long address);
+/*static void srom_busy(u_int command, u_long address);*/
+static void sendto_srom(u_int command, u_long addr);
+static int getfrom_srom(u_long addr);
+static int srom_map_media(struct device *dev);
+static int srom_infoleaf_info(struct device *dev);
+static void srom_init(struct device *dev);
+static void srom_exec(struct device *dev, u_char *p);
+static int mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr);
+static void mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr);
+static int mii_rdata(u_long ioaddr);
+static void mii_wdata(int data, int len, u_long ioaddr);
+static void mii_ta(u_long rw, u_long ioaddr);
+static int mii_swap(int data, int len);
+static void mii_address(u_char addr, u_long ioaddr);
+static void sendto_mii(u32 command, int data, u_long ioaddr);
+static int getfrom_mii(u32 command, u_long ioaddr);
+static int mii_get_oui(u_char phyaddr, u_long ioaddr);
+static int mii_get_phy(struct device *dev);
+static void SetMulticastFilter(struct device *dev);
+static int get_hw_addr(struct device *dev);
+static void srom_repair(struct device *dev, int card);
+static int test_bad_enet(struct device *dev, int status);
+static int an_exception(struct bus_type *lp);
+#if !defined(__sparc_v9__) && !defined(__powerpc__) && !defined(__alpha__)
+static void eisa_probe(struct device *dev, u_long iobase);
+#endif
+static void pci_probe(struct device *dev, u_long iobase);
+static void srom_search(int index);
+static char *build_setup_frame(struct device *dev, int mode);
+static void disable_ast(struct device *dev);
+static void enable_ast(struct device *dev, u32 time_out);
+static long de4x5_switch_mac_port(struct device *dev);
+static int gep_rd(struct device *dev);
+static void gep_wr(s32 data, struct device *dev);
+static void timeout(struct device *dev, void (*fn)(u_long data), u_long data, u_long msec);
+static void yawn(struct device *dev, int state);
+static void link_modules(struct device *dev, struct device *tmp);
+static void de4x5_parse_params(struct device *dev);
+static void de4x5_dbg_open(struct device *dev);
+static void de4x5_dbg_mii(struct device *dev, int k);
+static void de4x5_dbg_media(struct device *dev);
+static void de4x5_dbg_srom(struct de4x5_srom *p);
+static void de4x5_dbg_rx(struct sk_buff *skb, int len);
+static int de4x5_strncmp(char *a, char *b, int n);
+static int dc21041_infoleaf(struct device *dev);
+static int dc21140_infoleaf(struct device *dev);
+static int dc21142_infoleaf(struct device *dev);
+static int dc21143_infoleaf(struct device *dev);
+static int type0_infoblock(struct device *dev, u_char count, u_char *p);
+static int type1_infoblock(struct device *dev, u_char count, u_char *p);
+static int type2_infoblock(struct device *dev, u_char count, u_char *p);
+static int type3_infoblock(struct device *dev, u_char count, u_char *p);
+static int type4_infoblock(struct device *dev, u_char count, u_char *p);
+static int type5_infoblock(struct device *dev, u_char count, u_char *p);
+static int compact_infoblock(struct device *dev, u_char count, u_char *p);
+
+#ifdef MODULE
+int init_module(void);
+void cleanup_module(void);
+static struct device *unlink_modules(struct device *p);
+static struct device *insert_device(struct device *dev, u_long iobase,
+ int (*init)(struct device *));
+static int count_adapters(void);
+static int loading_module = 1;
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,0)
+MODULE_PARM(de4x5_debug, "i");
+MODULE_PARM(dec_only, "i");
+MODULE_PARM(args, "s");
+#endif /* LINUX_VERSION_CODE */
+# else
+static int loading_module = 0;
+#endif /* MODULE */
+
+static char name[DE4X5_NAME_LENGTH + 1];
+#if !defined(__sparc_v9__) && !defined(__powerpc__) && !defined(__alpha__)
+static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST;
+static int lastEISA = 0;
+#else
+static int lastEISA = MAX_EISA_SLOTS; /* Only PCI probes */
+#endif
+static int num_de4x5s = 0;
+static int cfrv = 0, useSROM = 0;
+static int lastPCI = -1;
+static struct device *lastModule = NULL;
+
+/*
+** List the SROM infoleaf functions and chipsets
+*/
+struct InfoLeaf {
+ int chipset;
+ int (*fn)(struct device *);
+};
+static struct InfoLeaf infoleaf_array[] = {
+ {DC21041, dc21041_infoleaf},
+ {DC21140, dc21140_infoleaf},
+ {DC21142, dc21142_infoleaf},
+ {DC21143, dc21143_infoleaf}
+};
+#define INFOLEAF_SIZE (sizeof(infoleaf_array)/(sizeof(int)+sizeof(int *)))
+
+/*
+** List the SROM info block functions
+*/
+static int (*dc_infoblock[])(struct device *dev, u_char, u_char *) = {
+ type0_infoblock,
+ type1_infoblock,
+ type2_infoblock,
+ type3_infoblock,
+ type4_infoblock,
+ type5_infoblock,
+ compact_infoblock
+};
+
+#define COMPACT (sizeof(dc_infoblock)/sizeof(int *) - 1)
+
+/*
+** Miscellaneous defines...
+*/
+#define RESET_DE4X5 {\
+ int i;\
+ i=inl(DE4X5_BMR);\
+ de4x5_ms_delay(1);\
+ outl(i | BMR_SWR, DE4X5_BMR);\
+ de4x5_ms_delay(1);\
+ outl(i, DE4X5_BMR);\
+ de4x5_ms_delay(1);\
+ for (i=0;i<5;i++) {inl(DE4X5_BMR); de4x5_ms_delay(1);}\
+ de4x5_ms_delay(1);\
+}
+
+#define PHY_HARD_RESET {\
+ outl(GEP_HRST, DE4X5_GEP); /* Hard RESET the PHY dev. */\
+ udelay(1000); /* Assert for 1ms */\
+ outl(0x00, DE4X5_GEP);\
+ udelay(2000); /* Wait for 2ms */\
+}
+
+
+/*
+** Autoprobing in modules is allowed here. See the top of the file for
+** more info.
+*/
+__initfunc(int
+de4x5_probe(struct device *dev))
+{
+ u_long iobase = dev->base_addr;
+
+#if !defined(__sparc_v9__) && !defined(__powerpc__) && !defined(__alpha__)
+ eisa_probe(dev, iobase);
+#endif
+ if (lastEISA == MAX_EISA_SLOTS) {
+ pci_probe(dev, iobase);
+ }
+
+ return (dev->priv ? 0 : -ENODEV);
+}
+
+__initfunc(static int
+de4x5_hw_init(struct device *dev, u_long iobase))
+{
+ struct bus_type *lp = &bus;
+ int i, status=0;
+ char *tmp;
+
+ /* Ensure we're not sleeping */
+ if (lp->bus == EISA) {
+ outb(WAKEUP, PCI_CFPM);
+ } else {
+ pcibios_write_config_byte(lp->bus_num, lp->device << 3,
+ PCI_CFDA_PSM, WAKEUP);
+ }
+ de4x5_ms_delay(10);
+
+ RESET_DE4X5;
+
+ if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) != 0) {
+ return -ENXIO; /* Hardware could not reset */
+ }
+
+ /*
+ ** Now find out what kind of DC21040/DC21041/DC21140 board we have.
+ */
+ useSROM = FALSE;
+ if (lp->bus == PCI) {
+ PCI_signature(name, lp);
+ } else {
+ EISA_signature(name, EISA_ID0);
+ }
+
+ if (*name == '\0') { /* Not found a board signature */
+ return -ENXIO;
+ }
+
+ dev->base_addr = iobase;
+ if (lp->bus == EISA) {
+ printk("%s: %s at 0x%04lx (EISA slot %ld)",
+ dev->name, name, iobase, ((iobase>>12)&0x0f));
+ } else { /* PCI port address */
+ printk("%s: %s at 0x%04lx (PCI bus %d, device %d)", dev->name, name,
+ iobase, lp->bus_num, lp->device);
+ }
+
+ printk(", h/w address ");
+ status = get_hw_addr(dev);
+ for (i = 0; i < ETH_ALEN - 1; i++) { /* get the ethernet addr. */
+ printk("%2.2x:", dev->dev_addr[i]);
+ }
+ printk("%2.2x,\n", dev->dev_addr[i]);
+
+ if (status != 0) {
+ printk(" which has an Ethernet PROM CRC error.\n");
+ return -ENXIO;
+ } else {
+ struct de4x5_private *lp;
+
+ /*
+ ** Reserve a section of kernel memory for the adapter
+ ** private area and the TX/RX descriptor rings.
+ */
+ dev->priv = (void *) kmalloc(sizeof(struct de4x5_private) + ALIGN,
+ GFP_KERNEL);
+ if (dev->priv == NULL) {
+ return -ENOMEM;
+ }
+
+ /*
+ ** Align to a longword boundary
+ */
+ tmp = dev->priv;
+ dev->priv = (void *)(((u_long)dev->priv + ALIGN) & ~ALIGN);
+ lp = (struct de4x5_private *)dev->priv;
+ memset(dev->priv, 0, sizeof(struct de4x5_private));
+ lp->bus = bus.bus;
+ lp->bus_num = bus.bus_num;
+ lp->device = bus.device;
+ lp->chipset = bus.chipset;
+ lp->cache.priv = tmp;
+ lp->cache.gepc = GEP_INIT;
+ lp->asBit = GEP_SLNK;
+ lp->asPolarity = GEP_SLNK;
+ lp->asBitValid = TRUE;
+ lp->timeout = -1;
+ lp->useSROM = useSROM;
+ memcpy((char *)&lp->srom,(char *)&bus.srom,sizeof(struct de4x5_srom));
+ de4x5_parse_params(dev);
+
+ /*
+ ** Choose correct autosensing in case someone messed up
+ */
+ lp->autosense = lp->params.autosense;
+ if (lp->chipset != DC21140) {
+ if ((lp->chipset==DC21040) && (lp->params.autosense&TP_NW)) {
+ lp->params.autosense = TP;
+ }
+ if ((lp->chipset==DC21041) && (lp->params.autosense&BNC_AUI)) {
+ lp->params.autosense = BNC;
+ }
+ }
+ lp->fdx = lp->params.fdx;
+ sprintf(lp->adapter_name,"%s (%s)", name, dev->name);
+
+ /*
+ ** Set up the RX descriptor ring (Intels)
+ ** Allocate contiguous receive buffers, long word aligned (Alphas)
+ */
+#if !defined(__alpha__) && !defined(__powerpc__) && !defined(__sparc_v9__) && !defined(DE4X5_DO_MEMCPY)
+ for (i=0; i<NUM_RX_DESC; i++) {
+ lp->rx_ring[i].status = 0;
+ lp->rx_ring[i].des1 = RX_BUFF_SZ;
+ lp->rx_ring[i].buf = 0;
+ lp->rx_ring[i].next = 0;
+ lp->rx_skb[i] = (struct sk_buff *) 1; /* Dummy entry */
+ }
+
+#else
+ if ((tmp = (void *)kmalloc(RX_BUFF_SZ * NUM_RX_DESC + ALIGN,
+ GFP_KERNEL)) == NULL) {
+ kfree(lp->cache.priv);
+ return -ENOMEM;
+ }
+
+ lp->cache.buf = tmp;
+ tmp = (char *)(((u_long) tmp + ALIGN) & ~ALIGN);
+ for (i=0; i<NUM_RX_DESC; i++) {
+ lp->rx_ring[i].status = 0;
+ lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
+ lp->rx_ring[i].buf = cpu_to_le32(virt_to_bus(tmp+i*RX_BUFF_SZ));
+ lp->rx_ring[i].next = 0;
+ lp->rx_skb[i] = (struct sk_buff *) 1; /* Dummy entry */
+ }
+#endif
+
+ barrier();
+
+ request_region(iobase, (lp->bus == PCI ? DE4X5_PCI_TOTAL_SIZE :
+ DE4X5_EISA_TOTAL_SIZE),
+ lp->adapter_name);
+
+ lp->rxRingSize = NUM_RX_DESC;
+ lp->txRingSize = NUM_TX_DESC;
+
+ /* Write the end of list marker to the descriptor lists */
+ lp->rx_ring[lp->rxRingSize - 1].des1 |= cpu_to_le32(RD_RER);
+ lp->tx_ring[lp->txRingSize - 1].des1 |= cpu_to_le32(TD_TER);
+
+ /* Tell the adapter where the TX/RX rings are located. */
+ outl(virt_to_bus(lp->rx_ring), DE4X5_RRBA);
+ outl(virt_to_bus(lp->tx_ring), DE4X5_TRBA);
+
+ /* Initialise the IRQ mask and Enable/Disable */
+ lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM | IMR_UNM;
+ lp->irq_en = IMR_NIM | IMR_AIM;
+
+ /* Create a loopback packet frame for later media probing */
+ create_packet(dev, lp->frame, sizeof(lp->frame));
+
+ /* Check if the RX overflow bug needs testing for */
+ i = cfrv & 0x000000fe;
+ if ((lp->chipset == DC21140) && (i == 0x20)) {
+ lp->rx_ovf = 1;
+ }
+
+ /* Initialise the SROM pointers if possible */
+ if (lp->useSROM) {
+ lp->state = INITIALISED;
+ if (srom_infoleaf_info(dev)) {
+ return -ENXIO;
+ }
+ srom_init(dev);
+ }
+
+ lp->state = CLOSED;
+
+ /*
+ ** Check for an MII interface
+ */
+ if ((lp->chipset != DC21040) && (lp->chipset != DC21041)) {
+ mii_get_phy(dev);
+ }
+
+#ifndef __sparc_v9__
+ printk(" and requires IRQ%d (provided by %s).\n", dev->irq,
+#else
+ printk(" and requires IRQ%x (provided by %s).\n", dev->irq,
+#endif
+ ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG"));
+ }
+
+ if (de4x5_debug & DEBUG_VERSION) {
+ printk("%s", version);
+ }
+
+ /* The DE4X5-specific entries in the device structure. */
+ dev->open = &de4x5_open;
+ dev->hard_start_xmit = &de4x5_queue_pkt;
+ dev->stop = &de4x5_close;
+ dev->get_stats = &de4x5_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+ dev->do_ioctl = &de4x5_ioctl;
+
+ dev->mem_start = 0;
+
+ /* Fill in the generic fields of the device structure. */
+ ether_setup(dev);
+
+ /* Let the adapter sleep to save power */
+ yawn(dev, SLEEP);
+
+ return status;
+}
+
+
+static int
+de4x5_open(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int i, status = 0;
+ s32 omr;
+
+ /* Allocate the RX buffers */
+ for (i=0; i<lp->rxRingSize; i++) {
+ if (de4x5_alloc_rx_buff(dev, i, 0) == NULL) {
+ de4x5_free_rx_buffs(dev);
+ return -EAGAIN;
+ }
+ }
+
+ /*
+ ** Wake up the adapter
+ */
+ yawn(dev, WAKEUP);
+
+ /*
+ ** Re-initialize the DE4X5...
+ */
+ status = de4x5_init(dev);
+
+ lp->state = OPEN;
+ de4x5_dbg_open(dev);
+
+ if (request_irq(dev->irq, (void *)de4x5_interrupt, SA_SHIRQ,
+ lp->adapter_name, dev)) {
+ printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq);
+ if (request_irq(dev->irq, de4x5_interrupt, SA_INTERRUPT | SA_SHIRQ,
+ lp->adapter_name, dev)) {
+ printk("\n Cannot get IRQ- reconfigure your hardware.\n");
+ disable_ast(dev);
+ de4x5_free_rx_buffs(dev);
+ de4x5_free_tx_buffs(dev);
+ yawn(dev, SLEEP);
+ lp->state = CLOSED;
+ return -EAGAIN;
+ } else {
+ printk("\n Succeeded, but you should reconfigure your hardware to avoid this.\n");
+ printk("WARNING: there may be IRQ related problems in heavily loaded systems.\n");
+ }
+ }
+
+ dev->tbusy = 0;
+ dev->start = 1;
+ lp->interrupt = UNMASK_INTERRUPTS;
+ dev->trans_start = jiffies;
+
+ START_DE4X5;
+
+ de4x5_setup_intr(dev);
+
+ if (de4x5_debug & DEBUG_OPEN) {
+ printk("\tsts: 0x%08x\n", inl(DE4X5_STS));
+ printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR));
+ printk("\timr: 0x%08x\n", inl(DE4X5_IMR));
+ printk("\tomr: 0x%08x\n", inl(DE4X5_OMR));
+ printk("\tsisr: 0x%08x\n", inl(DE4X5_SISR));
+ printk("\tsicr: 0x%08x\n", inl(DE4X5_SICR));
+ printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));
+ printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));
+ }
+
+ MOD_INC_USE_COUNT;
+
+ return status;
+}
+
+/*
+** Initialize the DE4X5 operating conditions. NB: a chip problem with the
+** DC21140 requires using perfect filtering mode for that chip. Since I can't
+** see why I'd want > 14 multicast addresses, I have changed all chips to use
+** the perfect filtering mode. Keep the DMA burst length at 8: there seems
+** to be data corruption problems if it is larger (UDP errors seen from a
+** ttcp source).
+*/
+static int
+de4x5_init(struct device *dev)
+{
+ /* Lock out other processes whilst setting up the hardware */
+ test_and_set_bit(0, (void *)&dev->tbusy);
+
+ de4x5_sw_reset(dev);
+
+ /* Autoconfigure the connected port */
+ autoconf_media(dev);
+
+ return 0;
+}
+
+static int
+de4x5_sw_reset(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int i, j, status = 0;
+ s32 bmr, omr;
+
+ /* Select the MII or SRL port now and RESET the MAC */
+ if (!lp->useSROM) {
+ if (lp->phy[lp->active].id != 0) {
+ lp->infoblock_csr6 = OMR_SDP | OMR_PS | OMR_HBD;
+ } else {
+ lp->infoblock_csr6 = OMR_SDP | OMR_TTM;
+ }
+ de4x5_switch_mac_port(dev);
+ }
+
+ /*
+ ** Set the programmable burst length to 8 longwords for all the DC21140
+ ** Fasternet chips and 4 longwords for all others: DMA errors result
+ ** without these values. Cache align 16 long.
+ */
+ bmr = (lp->chipset==DC21140 ? PBL_8 : PBL_4) | DESC_SKIP_LEN | CACHE_ALIGN;
+ bmr |= ((lp->chipset & ~0x00ff)==DC2114x ? BMR_RML : 0);
+ outl(bmr, DE4X5_BMR);
+
+ omr = inl(DE4X5_OMR) & ~OMR_PR; /* Turn off promiscuous mode */
+ if (lp->chipset == DC21140) {
+ omr |= (OMR_SDP | OMR_SB);
+ }
+ lp->setup_f = PERFECT;
+ outl(virt_to_bus(lp->rx_ring), DE4X5_RRBA);
+ outl(virt_to_bus(lp->tx_ring), DE4X5_TRBA);
+
+ lp->rx_new = lp->rx_old = 0;
+ lp->tx_new = lp->tx_old = 0;
+
+ for (i = 0; i < lp->rxRingSize; i++) {
+ lp->rx_ring[i].status = cpu_to_le32(R_OWN);
+ }
+
+ for (i = 0; i < lp->txRingSize; i++) {
+ lp->tx_ring[i].status = cpu_to_le32(0);
+ }
+
+ barrier();
+
+ /* Build the setup frame depending on filtering mode */
+ SetMulticastFilter(dev);
+
+ load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, NULL);
+ outl(omr|OMR_ST, DE4X5_OMR);
+
+ /* Poll for setup frame completion (adapter interrupts are disabled now) */
+ sti(); /* Ensure timer interrupts */
+ for (j=0, i=0;(i<500) && (j==0);i++) { /* Upto 500ms delay */
+ udelay(1000);
+ if ((s32)le32_to_cpu(lp->tx_ring[lp->tx_new].status) >= 0) j=1;
+ }
+ outl(omr, DE4X5_OMR); /* Stop everything! */
+
+ if (j == 0) {
+ printk("%s: Setup frame timed out, status %08x\n", dev->name,
+ inl(DE4X5_STS));
+ status = -EIO;
+ }
+
+ lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
+ lp->tx_old = lp->tx_new;
+
+ return status;
+}
+
+/*
+** Writes a socket buffer address to the next available transmit descriptor.
+*/
+static int
+de4x5_queue_pkt(struct sk_buff *skb, struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int status = 0;
+
+ test_and_set_bit(0, (void*)&dev->tbusy); /* Stop send re-tries */
+ if (lp->tx_enable == NO) { /* Cannot send for now */
+ return -1;
+ }
+
+ /*
+ ** Clean out the TX ring asynchronously to interrupts - sometimes the
+ ** interrupts are lost by delayed descriptor status updates relative to
+ ** the irq assertion, especially with a busy PCI bus.
+ */
+ cli();
+ de4x5_tx(dev);
+ sti();
+
+ /* Test if cache is already locked - requeue skb if so */
+ if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt)
+ return -1;
+
+ /* Transmit descriptor ring full or stale skb */
+ if (dev->tbusy || lp->tx_skb[lp->tx_new]) {
+ if (lp->interrupt) {
+ de4x5_putb_cache(dev, skb); /* Requeue the buffer */
+ } else {
+ de4x5_put_cache(dev, skb);
+ }
+ if (de4x5_debug & DEBUG_TX) {
+ printk("%s: transmit busy, lost media or stale skb found:\n STS:%08x\n tbusy:%ld\n IMR:%08x\n OMR:%08x\n Stale skb: %s\n",dev->name, inl(DE4X5_STS), dev->tbusy, inl(DE4X5_IMR), inl(DE4X5_OMR), (lp->tx_skb[lp->tx_new] ? "YES" : "NO"));
+ }
+ } else if (skb->len > 0) {
+ /* If we already have stuff queued locally, use that first */
+ if (lp->cache.skb && !lp->interrupt) {
+ de4x5_put_cache(dev, skb);
+ skb = de4x5_get_cache(dev);
+ }
+
+ while (skb && !dev->tbusy && !lp->tx_skb[lp->tx_new]) {
+ cli();
+ test_and_set_bit(0, (void*)&dev->tbusy);
+ load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
+#if LINUX_VERSION_CODE >= ((2 << 16) | (1 << 8))
+ lp->stats.tx_bytes += skb->len;
+#endif
+ outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */
+
+ lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
+ dev->trans_start = jiffies;
+
+ if (TX_BUFFS_AVAIL) {
+ dev->tbusy = 0; /* Another pkt may be queued */
+ }
+ skb = de4x5_get_cache(dev);
+ sti();
+ }
+ if (skb) de4x5_putb_cache(dev, skb);
+ }
+
+ lp->cache.lock = 0;
+
+ return status;
+}
+
+/*
+** The DE4X5 interrupt handler.
+**
+** I/O Read/Writes through intermediate PCI bridges are never 'posted',
+** so that the asserted interrupt always has some real data to work with -
+** if these I/O accesses are ever changed to memory accesses, ensure the
+** STS write is read immediately to complete the transaction if the adapter
+** is not on bus 0. Lost interrupts can still occur when the PCI bus load
+** is high and descriptor status bits cannot be set before the associated
+** interrupt is asserted and this routine entered.
+*/
+static void
+de4x5_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct device *dev = (struct device *)dev_id;
+ struct de4x5_private *lp;
+ s32 imr, omr, sts, limit;
+ u_long iobase;
+
+ if (dev == NULL) {
+ printk ("de4x5_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+ lp = (struct de4x5_private *)dev->priv;
+ iobase = dev->base_addr;
+
+ DISABLE_IRQs; /* Ensure non re-entrancy */
+
+ if (test_and_set_bit(MASK_INTERRUPTS, (void*) &lp->interrupt))
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+
+#if LINUX_VERSION_CODE >= ((2 << 16) | (1 << 8))
+ synchronize_irq();
+#endif
+
+ for (limit=0; limit<8; limit++) {
+ sts = inl(DE4X5_STS); /* Read IRQ status */
+ outl(sts, DE4X5_STS); /* Reset the board interrupts */
+
+ if (!(sts & lp->irq_mask)) break;/* All done */
+
+ if (sts & (STS_RI | STS_RU)) /* Rx interrupt (packet[s] arrived) */
+ de4x5_rx(dev);
+
+ if (sts & (STS_TI | STS_TU)) /* Tx interrupt (packet sent) */
+ de4x5_tx(dev);
+
+ if (sts & STS_LNF) { /* TP Link has failed */
+ lp->irq_mask &= ~IMR_LFM;
+ }
+
+ if (sts & STS_UNF) { /* Transmit underrun */
+ de4x5_txur(dev);
+ }
+
+ if (sts & STS_SE) { /* Bus Error */
+ STOP_DE4X5;
+ printk("%s: Fatal bus error occurred, sts=%#8x, device stopped.\n",
+ dev->name, sts);
+ return;
+ }
+ }
+
+ /* Load the TX ring with any locally stored packets */
+ if (!test_and_set_bit(0, (void *)&lp->cache.lock)) {
+ while (lp->cache.skb && !dev->tbusy && lp->tx_enable) {
+ de4x5_queue_pkt(de4x5_get_cache(dev), dev);
+ }
+ lp->cache.lock = 0;
+ }
+
+ lp->interrupt = UNMASK_INTERRUPTS;
+ ENABLE_IRQs;
+
+ return;
+}
+
+static int
+de4x5_rx(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int entry;
+ s32 status;
+
+ for (entry=lp->rx_new; (s32)le32_to_cpu(lp->rx_ring[entry].status)>=0;
+ entry=lp->rx_new) {
+ status = (s32)le32_to_cpu(lp->rx_ring[entry].status);
+
+ if (lp->rx_ovf) {
+ if (inl(DE4X5_MFC) & MFC_FOCM) {
+ de4x5_rx_ovfc(dev);
+ break;
+ }
+ }
+
+ if (status & RD_FS) { /* Remember the start of frame */
+ lp->rx_old = entry;
+ }
+
+ if (status & RD_LS) { /* Valid frame status */
+ if (lp->tx_enable) lp->linkOK++;
+ if (status & RD_ES) { /* There was an error. */
+ lp->stats.rx_errors++; /* Update the error stats. */
+ if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++;
+ if (status & RD_CE) lp->stats.rx_crc_errors++;
+ if (status & RD_OF) lp->stats.rx_fifo_errors++;
+ if (status & RD_TL) lp->stats.rx_length_errors++;
+ if (status & RD_RF) lp->pktStats.rx_runt_frames++;
+ if (status & RD_CS) lp->pktStats.rx_collision++;
+ if (status & RD_DB) lp->pktStats.rx_dribble++;
+ if (status & RD_OF) lp->pktStats.rx_overflow++;
+ } else { /* A valid frame received */
+ struct sk_buff *skb;
+ short pkt_len = (short)(le32_to_cpu(lp->rx_ring[entry].status)
+ >> 16) - 4;
+
+ if ((skb = de4x5_alloc_rx_buff(dev, entry, pkt_len)) == NULL) {
+ printk("%s: Insufficient memory; nuking packet.\n",
+ dev->name);
+ lp->stats.rx_dropped++;
+ } else {
+ de4x5_dbg_rx(skb, pkt_len);
+
+ /* Push up the protocol stack */
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+
+ /* Update stats */
+ lp->stats.rx_packets++;
+#if LINUX_VERSION_CODE >= ((2 << 16) | (1 << 8))
+ lp->stats.rx_bytes += pkt_len;
+#endif
+ de4x5_local_stats(dev, skb->data, pkt_len);
+ }
+ }
+
+ /* Change buffer ownership for this frame, back to the adapter */
+ for (;lp->rx_old!=entry;lp->rx_old=(lp->rx_old+1)%lp->rxRingSize) {
+ lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN);
+ barrier();
+ }
+ lp->rx_ring[entry].status = cpu_to_le32(R_OWN);
+ barrier();
+ }
+
+ /*
+ ** Update entry information
+ */
+ lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
+ }
+
+ return 0;
+}
+
+/*
+** Buffer sent - check for TX buffer errors.
+*/
+static int
+de4x5_tx(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int entry;
+ s32 status;
+
+ for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
+ status = (s32)le32_to_cpu(lp->tx_ring[entry].status);
+ if (status < 0) { /* Buffer not sent yet */
+ break;
+ } else if (status != 0x7fffffff) { /* Not setup frame */
+ if (status & TD_ES) { /* An error happened */
+ lp->stats.tx_errors++;
+ if (status & TD_NC) lp->stats.tx_carrier_errors++;
+ if (status & TD_LC) lp->stats.tx_window_errors++;
+ if (status & TD_UF) lp->stats.tx_fifo_errors++;
+ if (status & TD_EC) lp->pktStats.excessive_collisions++;
+ if (status & TD_DE) lp->stats.tx_aborted_errors++;
+
+ if (TX_PKT_PENDING) {
+ outl(POLL_DEMAND, DE4X5_TPD);/* Restart a stalled TX */
+ }
+ } else { /* Packet sent */
+ lp->stats.tx_packets++;
+ if (lp->tx_enable) lp->linkOK++;
+ }
+ /* Update the collision counter */
+ lp->stats.collisions += ((status & TD_EC) ? 16 :
+ ((status & TD_CC) >> 3));
+
+ /* Free the buffer. */
+ if (lp->tx_skb[entry] != NULL) {
+ dev_kfree_skb(lp->tx_skb[entry], FREE_WRITE);
+ lp->tx_skb[entry] = NULL;
+ }
+ }
+
+ /* Update all the pointers */
+ lp->tx_old = (lp->tx_old + 1) % lp->txRingSize;
+ }
+
+ if (TX_BUFFS_AVAIL && dev->tbusy) { /* Any resources available? */
+ dev->tbusy = 0; /* Clear TX busy flag */
+ if (lp->interrupt) mark_bh(NET_BH);
+ }
+
+ return 0;
+}
+
+static int
+de4x5_ast(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+
+ disable_ast(dev);
+
+ if (lp->useSROM) {
+ next_tick = srom_autoconf(dev);
+ } else if (lp->chipset == DC21140) {
+ next_tick = dc21140m_autoconf(dev);
+ } else if (lp->chipset == DC21041) {
+ next_tick = dc21041_autoconf(dev);
+ } else if (lp->chipset == DC21040) {
+ next_tick = dc21040_autoconf(dev);
+ }
+ lp->linkOK = 0;
+ enable_ast(dev, next_tick);
+
+ return 0;
+}
+
+static int
+de4x5_txur(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int omr;
+
+ omr = inl(DE4X5_OMR);
+ if (!(omr & OMR_SF) || (lp->chipset==DC21041) || (lp->chipset==DC21040)) {
+ omr &= ~(OMR_ST|OMR_SR);
+ outl(omr, DE4X5_OMR);
+ while (inl(DE4X5_STS) & STS_TS);
+ if ((omr & OMR_TR) < OMR_TR) {
+ omr += 0x4000;
+ } else {
+ omr |= OMR_SF;
+ }
+ outl(omr | OMR_ST | OMR_SR, DE4X5_OMR);
+ }
+
+ return 0;
+}
+
+static int
+de4x5_rx_ovfc(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int omr;
+
+ omr = inl(DE4X5_OMR);
+ outl(omr & ~OMR_SR, DE4X5_OMR);
+ while (inl(DE4X5_STS) & STS_RS);
+
+ for (; (s32)le32_to_cpu(lp->rx_ring[lp->rx_new].status)>=0;) {
+ lp->rx_ring[lp->rx_new].status = cpu_to_le32(R_OWN);
+ lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
+ }
+
+ outl(omr, DE4X5_OMR);
+
+ return 0;
+}
+
+static int
+de4x5_close(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ s32 imr, omr;
+
+ disable_ast(dev);
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ if (de4x5_debug & DEBUG_CLOSE) {
+ printk("%s: Shutting down ethercard, status was %8.8x.\n",
+ dev->name, inl(DE4X5_STS));
+ }
+
+ /*
+ ** We stop the DE4X5 here... mask interrupts and stop TX & RX
+ */
+ DISABLE_IRQs;
+ STOP_DE4X5;
+
+ /* Free the associated irq */
+ free_irq(dev->irq, dev);
+ lp->state = CLOSED;
+
+ /* Free any socket buffers */
+ de4x5_free_rx_buffs(dev);
+ de4x5_free_tx_buffs(dev);
+
+ MOD_DEC_USE_COUNT;
+
+ /* Put the adapter to sleep to save power */
+ yawn(dev, SLEEP);
+
+ return 0;
+}
+
+static struct net_device_stats *
+de4x5_get_stats(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+
+ lp->stats.rx_missed_errors = (int)(inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR));
+
+ return &lp->stats;
+}
+
+static void
+de4x5_local_stats(struct device *dev, char *buf, int pkt_len)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ int i;
+
+ for (i=1; i<DE4X5_PKT_STAT_SZ-1; i++) {
+ if (pkt_len < (i*DE4X5_PKT_BIN_SZ)) {
+ lp->pktStats.bins[i]++;
+ i = DE4X5_PKT_STAT_SZ;
+ }
+ }
+ if (buf[0] & 0x01) { /* Multicast/Broadcast */
+ if ((*(s32 *)&buf[0] == -1) && (*(s16 *)&buf[4] == -1)) {
+ lp->pktStats.broadcast++;
+ } else {
+ lp->pktStats.multicast++;
+ }
+ } else if ((*(s32 *)&buf[0] == *(s32 *)&dev->dev_addr[0]) &&
+ (*(s16 *)&buf[4] == *(s16 *)&dev->dev_addr[4])) {
+ lp->pktStats.unicast++;
+ }
+
+ lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
+ if (lp->pktStats.bins[0] == 0) { /* Reset counters */
+ memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
+ }
+
+ return;
+}
+
+static void
+load_packet(struct device *dev, char *buf, u32 flags, struct sk_buff *skb)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+
+ lp->tx_ring[lp->tx_new].buf = cpu_to_le32(virt_to_bus(buf));
+ lp->tx_ring[lp->tx_new].des1 &= cpu_to_le32(TD_TER);
+ lp->tx_ring[lp->tx_new].des1 |= cpu_to_le32(flags);
+ lp->tx_skb[lp->tx_new] = skb;
+ barrier();
+ lp->tx_ring[lp->tx_new].status = cpu_to_le32(T_OWN);
+ barrier();
+
+ return;
+}
+
+/*
+** Set or clear the multicast filter for this adaptor.
+*/
+static void
+set_multicast_list(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+
+ /* First, double check that the adapter is open */
+ if (lp->state == OPEN) {
+ if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
+ u32 omr;
+ omr = inl(DE4X5_OMR);
+ omr |= OMR_PR;
+ outl(omr, DE4X5_OMR);
+ } else {
+ SetMulticastFilter(dev);
+ load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
+ SETUP_FRAME_LEN, NULL);
+
+ lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
+ outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
+ dev->trans_start = jiffies;
+ }
+ }
+
+ return;
+}
+
+/*
+** Calculate the hash code and update the logical address filter
+** from a list of ethernet multicast addresses.
+** Little endian crc one liner from Matt Thomas, DEC.
+*/
+static void
+SetMulticastFilter(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ struct dev_mc_list *dmi=dev->mc_list;
+ u_long iobase = dev->base_addr;
+ int i, j, bit, byte;
+ u16 hashcode;
+ u32 omr, crc, poly = CRC_POLYNOMIAL_LE;
+ char *pa;
+ unsigned char *addrs;
+
+ omr = inl(DE4X5_OMR);
+ omr &= ~(OMR_PR | OMR_PM);
+ pa = build_setup_frame(dev, ALL); /* Build the basic frame */
+
+ if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 14)) {
+ omr |= OMR_PM; /* Pass all multicasts */
+ } else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */
+ for (i=0;i<dev->mc_count;i++) { /* for each address in the list */
+ addrs=dmi->dmi_addr;
+ dmi=dmi->next;
+ if ((*addrs & 0x01) == 1) { /* multicast address? */
+ crc = 0xffffffff; /* init CRC for each address */
+ for (byte=0;byte<ETH_ALEN;byte++) {/* for each address byte */
+ /* process each address bit */
+ for (bit = *addrs++,j=0;j<8;j++, bit>>=1) {
+ crc = (crc >> 1) ^ (((crc ^ bit) & 0x01) ? poly : 0);
+ }
+ }
+ hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */
+
+ byte = hashcode >> 3; /* bit[3-8] -> byte in filter */
+ bit = 1 << (hashcode & 0x07);/* bit[0-2] -> bit in byte */
+
+ byte <<= 1; /* calc offset into setup frame */
+ if (byte & 0x02) {
+ byte -= 1;
+ }
+ lp->setup_frame[byte] |= bit;
+ }
+ }
+ } else { /* Perfect filtering */
+ for (j=0; j<dev->mc_count; j++) {
+ addrs=dmi->dmi_addr;
+ dmi=dmi->next;
+ for (i=0; i<ETH_ALEN; i++) {
+ *(pa + (i&1)) = *addrs++;
+ if (i & 0x01) pa += 4;
+ }
+ }
+ }
+ outl(omr, DE4X5_OMR);
+
+ return;
+}
+
+#if !defined(__sparc_v9__) && !defined(__powerpc__) && !defined(__alpha__)
+/*
+** EISA bus I/O device probe. Probe from slot 1 since slot 0 is usually
+** the motherboard. Upto 15 EISA devices are supported.
+*/
+__initfunc(static void
+eisa_probe(struct device *dev, u_long ioaddr))
+{
+ int i, maxSlots, status, device;
+ u_char irq;
+ u_short vendor;
+ u32 cfid;
+ u_long iobase;
+ struct bus_type *lp = &bus;
+ char name[DE4X5_STRLEN];
+
+ if (lastEISA == MAX_EISA_SLOTS) return;/* No more EISA devices to search */
+
+ lp->bus = EISA;
+
+ if (ioaddr == 0) { /* Autoprobing */
+ iobase = EISA_SLOT_INC; /* Get the first slot address */
+ i = 1;
+ maxSlots = MAX_EISA_SLOTS;
+ } else { /* Probe a specific location */
+ iobase = ioaddr;
+ i = (ioaddr >> 12);
+ maxSlots = i + 1;
+ }
+
+ for (status = -ENODEV; (i<maxSlots) && (dev!=NULL); i++, iobase+=EISA_SLOT_INC) {
+ if (EISA_signature(name, EISA_ID)) {
+ cfid = (u32) inl(PCI_CFID);
+ cfrv = (u_short) inl(PCI_CFRV);
+ device = (cfid >> 8) & 0x00ffff00;
+ vendor = (u_short) cfid;
+
+ /* Read the EISA Configuration Registers */
+ irq = inb(EISA_REG0);
+ irq = de4x5_irq[(irq >> 1) & 0x03];
+
+ if (is_DC2114x) device |= (cfrv & CFRV_RN);
+ lp->chipset = device;
+
+ /* Write the PCI Configuration Registers */
+ outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS);
+ outl(0x00006000, PCI_CFLT);
+ outl(iobase, PCI_CBIO);
+
+ DevicePresent(EISA_APROM);
+ if (check_region(iobase, DE4X5_EISA_TOTAL_SIZE) == 0) {
+ dev->irq = irq;
+ if ((status = de4x5_hw_init(dev, iobase)) == 0) {
+ num_de4x5s++;
+ if (loading_module) link_modules(lastModule, dev);
+ lastEISA = i;
+ return;
+ }
+ } else if (ioaddr != 0) {
+ printk("%s: region already allocated at 0x%04lx.\n", dev->name,iobase);
+ }
+ }
+ }
+
+ if (ioaddr == 0) lastEISA = i;
+
+ return;
+}
+#endif /* !(__sparc_v9__) && !(__powerpc__) && !defined(__alpha__)*/
+
+/*
+** PCI bus I/O device probe
+** NB: PCI I/O accesses and Bus Mastering are enabled by the PCI BIOS, not
+** the driver. Some PCI BIOS's, pre V2.1, need the slot + features to be
+** enabled by the user first in the set up utility. Hence we just check for
+** enabled features and silently ignore the card if they're not.
+**
+** STOP PRESS: Some BIOS's __require__ the driver to enable the bus mastering
+** bit. Here, check for I/O accesses and then set BM. If you put the card in
+** a non BM slot, you're on your own (and complain to the PC vendor that your
+** PC doesn't conform to the PCI standard)!
+*/
+#define PCI_DEVICE (dev_num << 3)
+#define PCI_LAST_DEV 32
+
+__initfunc(static void
+pci_probe(struct device *dev, u_long ioaddr))
+{
+ u_char pb, pbus, dev_num, dnum, dev_fn, timer, tirq;
+ u_short dev_id, vendor, index, status;
+ u_int tmp, irq = 0, device, class = DE4X5_CLASS_CODE;
+ u_long iobase = 0; /* Clear upper 32 bits in Alphas */
+ struct bus_type *lp = &bus;
+
+ if (lastPCI == NO_MORE_PCI) return;
+
+ if (!pcibios_present()) {
+ lastPCI = NO_MORE_PCI;
+ return; /* No PCI bus in this machine! */
+ }
+
+ lp->bus = PCI;
+ lp->bus_num = 0;
+
+ if ((ioaddr < 0x1000) && loading_module) {
+ pbus = (u_short)(ioaddr >> 8);
+ dnum = (u_short)(ioaddr & 0xff);
+ } else {
+ pbus = 0;
+ dnum = 0;
+ }
+
+ for (index=lastPCI+1;
+ (pcibios_find_class(class, index, &pb, &dev_fn)== PCIBIOS_SUCCESSFUL);
+ index++) {
+ dev_num = PCI_SLOT(dev_fn);
+ if ((!pbus && !dnum) || ((pbus == pb) && (dnum == dev_num))) {
+#ifdef __sparc_v9__
+ struct pci_dev *pdev;
+ for (pdev = pci_devices; pdev; pdev = pdev->next) {
+ if ((pdev->bus->number==pb) && (pdev->devfn==dev_fn)) break;
+ }
+#endif
+ device = 0;
+ pcibios_read_config_word(pb, PCI_DEVICE, PCI_VENDOR_ID, &vendor);
+ pcibios_read_config_word(pb, PCI_DEVICE, PCI_DEVICE_ID, &dev_id);
+ device = dev_id;
+ device <<= 8;
+ if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) {
+ continue;
+ }
+
+ /* Search for an SROM on this bus */
+ if (lp->bus_num != pb) {
+ lp->bus_num = pb;
+ srom_search(index);
+ }
+
+ /* Get the chip configuration revision register */
+ pcibios_read_config_dword(pb, PCI_DEVICE, PCI_REVISION_ID, &cfrv);
+
+ /* Set the device number information */
+ lp->device = dev_num;
+ lp->bus_num = pb;
+
+ /* Set the chipset information */
+ if (is_DC2114x) device |= (cfrv & CFRV_RN);
+ lp->chipset = device;
+
+ /* Get the board I/O address (64 bits on sparc64) */
+#ifndef __sparc_v9__
+ pcibios_read_config_dword(pb, PCI_DEVICE, PCI_BASE_ADDRESS_0, &tmp);
+ iobase = tmp;
+#else
+ iobase = pdev->base_address[0];
+#endif
+ iobase &= CBIO_MASK;
+
+ /* Fetch the IRQ to be used */
+#ifndef __sparc_v9__
+ pcibios_read_config_byte(pb, PCI_DEVICE, PCI_INTERRUPT_LINE, &tirq);
+ irq = tirq;
+#else
+ irq = pdev->irq;
+#endif
+ if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) continue;
+
+ /* Check if I/O accesses and Bus Mastering are enabled */
+ pcibios_read_config_word(pb, PCI_DEVICE, PCI_COMMAND, &status);
+#ifdef __powerpc__
+ if (!(status & PCI_COMMAND_IO)) {
+ status |= PCI_COMMAND_IO;
+ pcibios_write_config_word(pb, PCI_DEVICE, PCI_COMMAND, status);
+ pcibios_read_config_word(pb, PCI_DEVICE, PCI_COMMAND, &status);
+ }
+#endif /* __powerpc__ */
+ if (!(status & PCI_COMMAND_IO)) continue;
+
+ if (!(status & PCI_COMMAND_MASTER)) {
+ status |= PCI_COMMAND_MASTER;
+ pcibios_write_config_word(pb, PCI_DEVICE, PCI_COMMAND, status);
+ pcibios_read_config_word(pb, PCI_DEVICE, PCI_COMMAND, &status);
+ }
+ if (!(status & PCI_COMMAND_MASTER)) continue;
+
+ /* Check the latency timer for values >= 0x60 */
+ pcibios_read_config_byte(pb, PCI_DEVICE, PCI_LATENCY_TIMER, &timer);
+ if (timer < 0x60) {
+ pcibios_write_config_byte(pb, PCI_DEVICE, PCI_LATENCY_TIMER, 0x60);
+ }
+
+ DevicePresent(DE4X5_APROM);
+ if (check_region(iobase, DE4X5_PCI_TOTAL_SIZE) == 0) {
+ dev->irq = irq;
+ if ((status = de4x5_hw_init(dev, iobase)) == 0) {
+ num_de4x5s++;
+ lastPCI = index;
+ if (loading_module) link_modules(lastModule, dev);
+ return;
+ }
+ } else if (ioaddr != 0) {
+ printk("%s: region already allocated at 0x%04lx.\n", dev->name,
+ iobase);
+ }
+ }
+ }
+
+ lastPCI = NO_MORE_PCI;
+
+ return;
+}
+
+/*
+** This function searches the current bus (which is >0) for a DECchip with an
+** SROM, so that in multiport cards that have one SROM shared between multiple
+** DECchips, we can find the base SROM irrespective of the BIOS scan direction.
+** For single port cards this is a time waster...
+*/
+__initfunc(static void
+srom_search(int index))
+{
+ u_char pb, dev_fn, tirq;
+ u_short dev_id, dev_num, vendor, status;
+ u_int tmp, irq = 0, device, class = DE4X5_CLASS_CODE;
+ u_long iobase = 0; /* Clear upper 32 bits in Alphas */
+ int i, j;
+ struct bus_type *lp = &bus;
+
+ for (;
+ (pcibios_find_class(class, index, &pb, &dev_fn)!= PCIBIOS_DEVICE_NOT_FOUND);
+ index++) {
+
+ if (lp->bus_num != pb) return;
+ dev_num = PCI_SLOT(dev_fn);
+#ifdef __sparc_v9__
+ struct pci_dev *pdev;
+ for (pdev = pci_devices; pdev; pdev = pdev->next) {
+ if ((pdev->bus->number == pb) && (pdev->devfn == dev_fn)) break;
+ }
+#endif
+ device = 0;
+ pcibios_read_config_word(pb, PCI_DEVICE, PCI_VENDOR_ID, &vendor);
+ pcibios_read_config_word(pb, PCI_DEVICE, PCI_DEVICE_ID, &dev_id);
+ device = dev_id;
+ device <<= 8;
+ if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) {
+ continue;
+ }
+
+ /* Get the chip configuration revision register */
+ pcibios_read_config_dword(pb, PCI_DEVICE, PCI_REVISION_ID, &cfrv);
+
+ /* Set the device number information */
+ lp->device = dev_num;
+ lp->bus_num = pb;
+
+ /* Set the chipset information */
+ if (is_DC2114x) device |= (cfrv & CFRV_RN);
+ lp->chipset = device;
+
+ /* Get the board I/O address (64 bits on sparc64) */
+#ifndef __sparc_v9__
+ pcibios_read_config_dword(pb, PCI_DEVICE, PCI_BASE_ADDRESS_0, &tmp);
+ iobase = tmp;
+#else
+ iobase = pdev->base_address[0];
+#endif
+ iobase &= CBIO_MASK;
+
+ /* Fetch the IRQ to be used */
+#ifndef __sparc_v9__
+ pcibios_read_config_byte(pb, PCI_DEVICE, PCI_INTERRUPT_LINE, &tirq);
+ irq = tirq;
+#else
+ irq = pdev->irq;
+#endif
+ if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) continue;
+
+ /* Check if I/O accesses are enabled */
+ pcibios_read_config_word(pb, PCI_DEVICE, PCI_COMMAND, &status);
+ if (!(status & PCI_COMMAND_IO)) continue;
+
+ /* Search for a valid SROM attached to this DECchip */
+ DevicePresent(DE4X5_APROM);
+ for (j=0, i=0; i<ETH_ALEN; i++) {
+ j += (u_char) *((u_char *)&lp->srom + SROM_HWADD + i);
+ }
+ if ((j != 0) && (j != 0x5fa)) {
+ last.chipset = device;
+ last.bus = pb;
+ last.irq = irq;
+ for (i=0; i<ETH_ALEN; i++) {
+ last.addr[i] = (u_char)*((u_char *)&lp->srom + SROM_HWADD + i);
+ }
+ return;
+ }
+ }
+
+ return;
+}
+
+__initfunc(static void
+link_modules(struct device *dev, struct device *tmp))
+{
+ struct device *p=dev;
+
+ if (p) {
+ while (((struct de4x5_private *)(p->priv))->next_module) {
+ p = ((struct de4x5_private *)(p->priv))->next_module;
+ }
+
+ if (dev != tmp) {
+ ((struct de4x5_private *)(p->priv))->next_module = tmp;
+ } else {
+ ((struct de4x5_private *)(p->priv))->next_module = NULL;
+ }
+ }
+
+ return;
+}
+
+/*
+** Auto configure the media here rather than setting the port at compile
+** time. This routine is called by de4x5_init() and when a loss of media is
+** detected (excessive collisions, loss of carrier, no carrier or link fail
+** [TP] or no recent receive activity) to check whether the user has been
+** sneaky and changed the port on us.
+*/
+static int
+autoconf_media(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+
+ lp->linkOK = 0;
+ lp->c_media = AUTO; /* Bogus last media */
+ disable_ast(dev);
+ inl(DE4X5_MFC); /* Zero the lost frames counter */
+ lp->media = INIT;
+ lp->tcount = 0;
+
+ if (lp->useSROM) {
+ next_tick = srom_autoconf(dev);
+ } else if (lp->chipset == DC21040) {
+ next_tick = dc21040_autoconf(dev);
+ } else if (lp->chipset == DC21041) {
+ next_tick = dc21041_autoconf(dev);
+ } else if (lp->chipset == DC21140) {
+ next_tick = dc21140m_autoconf(dev);
+ }
+
+ enable_ast(dev, next_tick);
+
+ return (lp->media);
+}
+
+/*
+** Autoconfigure the media when using the DC21040. AUI cannot be distinguished
+** from BNC as the port has a jumper to set thick or thin wire. When set for
+** BNC, the BNC port will indicate activity if it's not terminated correctly.
+** The only way to test for that is to place a loopback packet onto the
+** network and watch for errors. Since we're messing with the interrupt mask
+** register, disable the board interrupts and do not allow any more packets to
+** be queued to the hardware. Re-enable everything only when the media is
+** found.
+** I may have to "age out" locally queued packets so that the higher layer
+** timeouts don't effectively duplicate packets on the network.
+*/
+static int
+dc21040_autoconf(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+ s32 imr;
+
+ switch (lp->media) {
+ case INIT:
+ DISABLE_IRQs;
+ lp->tx_enable = NO;
+ lp->timeout = -1;
+ de4x5_save_skbs(dev);
+ if ((lp->autosense == AUTO) || (lp->autosense == TP)) {
+ lp->media = TP;
+ } else if ((lp->autosense == BNC) || (lp->autosense == AUI) || (lp->autosense == BNC_AUI)) {
+ lp->media = BNC_AUI;
+ } else if (lp->autosense == EXT_SIA) {
+ lp->media = EXT_SIA;
+ } else {
+ lp->media = NC;
+ }
+ lp->local_state = 0;
+ next_tick = dc21040_autoconf(dev);
+ break;
+
+ case TP:
+ next_tick = dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI,
+ TP_SUSPECT, test_tp);
+ break;
+
+ case TP_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21040_autoconf);
+ break;
+
+ case BNC:
+ case AUI:
+ case BNC_AUI:
+ next_tick = dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA,
+ BNC_AUI_SUSPECT, ping_media);
+ break;
+
+ case BNC_AUI_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, BNC_AUI, ping_media, dc21040_autoconf);
+ break;
+
+ case EXT_SIA:
+ next_tick = dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000,
+ NC, EXT_SIA_SUSPECT, ping_media);
+ break;
+
+ case EXT_SIA_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, EXT_SIA, ping_media, dc21040_autoconf);
+ break;
+
+ case NC:
+ /* default to TP for all */
+ reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
+ if (lp->media != lp->c_media) {
+ de4x5_dbg_media(dev);
+ lp->c_media = lp->media;
+ }
+ lp->media = INIT;
+ lp->tx_enable = NO;
+ break;
+ }
+
+ return next_tick;
+}
+
+static int
+dc21040_state(struct device *dev, int csr13, int csr14, int csr15, int timeout,
+ int next_state, int suspect_state,
+ int (*fn)(struct device *, int))
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+ int linkBad;
+
+ switch (lp->local_state) {
+ case 0:
+ reset_init_sia(dev, csr13, csr14, csr15);
+ lp->local_state++;
+ next_tick = 500;
+ break;
+
+ case 1:
+ if (!lp->tx_enable) {
+ linkBad = fn(dev, timeout);
+ if (linkBad < 0) {
+ next_tick = linkBad & ~TIMER_CB;
+ } else {
+ if (linkBad && (lp->autosense == AUTO)) {
+ lp->local_state = 0;
+ lp->media = next_state;
+ } else {
+ de4x5_init_connection(dev);
+ }
+ }
+ } else if (!lp->linkOK && (lp->autosense == AUTO)) {
+ lp->media = suspect_state;
+ next_tick = 3000;
+ }
+ break;
+ }
+
+ return next_tick;
+}
+
+static int
+de4x5_suspect_state(struct device *dev, int timeout, int prev_state,
+ int (*fn)(struct device *, int),
+ int (*asfn)(struct device *))
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+ int linkBad;
+
+ switch (lp->local_state) {
+ case 1:
+ if (lp->linkOK) {
+ lp->media = prev_state;
+ } else {
+ lp->local_state++;
+ next_tick = asfn(dev);
+ }
+ break;
+
+ case 2:
+ linkBad = fn(dev, timeout);
+ if (linkBad < 0) {
+ next_tick = linkBad & ~TIMER_CB;
+ } else if (!linkBad) {
+ lp->local_state--;
+ lp->media = prev_state;
+ } else {
+ lp->media = INIT;
+ lp->tcount++;
+ }
+ }
+
+ return next_tick;
+}
+
+/*
+** Autoconfigure the media when using the DC21041. AUI needs to be tested
+** before BNC, because the BNC port will indicate activity if it's not
+** terminated correctly. The only way to test for that is to place a loopback
+** packet onto the network and watch for errors. Since we're messing with
+** the interrupt mask register, disable the board interrupts and do not allow
+** any more packets to be queued to the hardware. Re-enable everything only
+** when the media is found.
+*/
+static int
+dc21041_autoconf(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ s32 sts, irqs, irq_mask, imr, omr;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+
+ switch (lp->media) {
+ case INIT:
+ DISABLE_IRQs;
+ lp->tx_enable = NO;
+ lp->timeout = -1;
+ de4x5_save_skbs(dev); /* Save non transmitted skb's */
+ if ((lp->autosense == AUTO) || (lp->autosense == TP_NW)) {
+ lp->media = TP; /* On chip auto negotiation is broken */
+ } else if (lp->autosense == TP) {
+ lp->media = TP;
+ } else if (lp->autosense == BNC) {
+ lp->media = BNC;
+ } else if (lp->autosense == AUI) {
+ lp->media = AUI;
+ } else {
+ lp->media = NC;
+ }
+ lp->local_state = 0;
+ next_tick = dc21041_autoconf(dev);
+ break;
+
+ case TP_NW:
+ if (lp->timeout < 0) {
+ omr = inl(DE4X5_OMR);/* Set up full duplex for the autonegotiate */
+ outl(omr | OMR_FDX, DE4X5_OMR);
+ }
+ irqs = STS_LNF | STS_LNP;
+ irq_mask = IMR_LFM | IMR_LPM;
+ sts = test_media(dev, irqs, irq_mask, 0xef01, 0xffff, 0x0008, 2400);
+ if (sts < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ if (sts & STS_LNP) {
+ lp->media = ANS;
+ } else {
+ lp->media = AUI;
+ }
+ next_tick = dc21041_autoconf(dev);
+ }
+ break;
+
+ case ANS:
+ if (!lp->tx_enable) {
+ irqs = STS_LNP;
+ irq_mask = IMR_LPM;
+ sts = test_ans(dev, irqs, irq_mask, 3000);
+ if (sts < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
+ lp->media = TP;
+ next_tick = dc21041_autoconf(dev);
+ } else {
+ lp->local_state = 1;
+ de4x5_init_connection(dev);
+ }
+ }
+ } else if (!lp->linkOK && (lp->autosense == AUTO)) {
+ lp->media = ANS_SUSPECT;
+ next_tick = 3000;
+ }
+ break;
+
+ case ANS_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, ANS, test_tp, dc21041_autoconf);
+ break;
+
+ case TP:
+ if (!lp->tx_enable) {
+ if (lp->timeout < 0) {
+ omr = inl(DE4X5_OMR); /* Set up half duplex for TP */
+ outl(omr & ~OMR_FDX, DE4X5_OMR);
+ }
+ irqs = STS_LNF | STS_LNP;
+ irq_mask = IMR_LFM | IMR_LPM;
+ sts = test_media(dev,irqs, irq_mask, 0xef01, 0xff3f, 0x0008, 2400);
+ if (sts < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
+ if (inl(DE4X5_SISR) & SISR_NRA) {
+ lp->media = AUI; /* Non selected port activity */
+ } else {
+ lp->media = BNC;
+ }
+ next_tick = dc21041_autoconf(dev);
+ } else {
+ lp->local_state = 1;
+ de4x5_init_connection(dev);
+ }
+ }
+ } else if (!lp->linkOK && (lp->autosense == AUTO)) {
+ lp->media = TP_SUSPECT;
+ next_tick = 3000;
+ }
+ break;
+
+ case TP_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21041_autoconf);
+ break;
+
+ case AUI:
+ if (!lp->tx_enable) {
+ if (lp->timeout < 0) {
+ omr = inl(DE4X5_OMR); /* Set up half duplex for AUI */
+ outl(omr & ~OMR_FDX, DE4X5_OMR);
+ }
+ irqs = 0;
+ irq_mask = 0;
+ sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x000e, 1000);
+ if (sts < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
+ lp->media = BNC;
+ next_tick = dc21041_autoconf(dev);
+ } else {
+ lp->local_state = 1;
+ de4x5_init_connection(dev);
+ }
+ }
+ } else if (!lp->linkOK && (lp->autosense == AUTO)) {
+ lp->media = AUI_SUSPECT;
+ next_tick = 3000;
+ }
+ break;
+
+ case AUI_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc21041_autoconf);
+ break;
+
+ case BNC:
+ switch (lp->local_state) {
+ case 0:
+ if (lp->timeout < 0) {
+ omr = inl(DE4X5_OMR); /* Set up half duplex for BNC */
+ outl(omr & ~OMR_FDX, DE4X5_OMR);
+ }
+ irqs = 0;
+ irq_mask = 0;
+ sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x0006, 1000);
+ if (sts < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ lp->local_state++; /* Ensure media connected */
+ next_tick = dc21041_autoconf(dev);
+ }
+ break;
+
+ case 1:
+ if (!lp->tx_enable) {
+ if ((sts = ping_media(dev, 3000)) < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ if (sts) {
+ lp->local_state = 0;
+ lp->media = NC;
+ } else {
+ de4x5_init_connection(dev);
+ }
+ }
+ } else if (!lp->linkOK && (lp->autosense == AUTO)) {
+ lp->media = BNC_SUSPECT;
+ next_tick = 3000;
+ }
+ break;
+ }
+ break;
+
+ case BNC_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc21041_autoconf);
+ break;
+
+ case NC:
+ omr = inl(DE4X5_OMR); /* Set up full duplex for the autonegotiate */
+ outl(omr | OMR_FDX, DE4X5_OMR);
+ reset_init_sia(dev, 0xef01, 0xffff, 0x0008);/* Initialise the SIA */
+ if (lp->media != lp->c_media) {
+ de4x5_dbg_media(dev);
+ lp->c_media = lp->media;
+ }
+ lp->media = INIT;
+ lp->tx_enable = NO;
+ break;
+ }
+
+ return next_tick;
+}
+
+/*
+** Some autonegotiation chips are broken in that they do not return the
+** acknowledge bit (anlpa & MII_ANLPA_ACK) in the link partner advertisement
+** register, except at the first power up negotiation.
+*/
+static int
+dc21140m_autoconf(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ int ana, anlpa, cap, cr, slnk, sr;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+ u_long imr, omr, iobase = dev->base_addr;
+
+ switch(lp->media) {
+ case INIT:
+ if (lp->timeout < 0) {
+ DISABLE_IRQs;
+ lp->tx_enable = FALSE;
+ lp->linkOK = 0;
+ de4x5_save_skbs(dev); /* Save non transmitted skb's */
+ }
+ if ((next_tick = de4x5_reset_phy(dev)) < 0) {
+ next_tick &= ~TIMER_CB;
+ } else {
+ if (lp->useSROM) {
+ if (srom_map_media(dev) < 0) {
+ lp->tcount++;
+ return next_tick;
+ }
+ srom_exec(dev, lp->phy[lp->active].gep);
+ if (lp->infoblock_media == ANS) {
+ ana = lp->phy[lp->active].ana | MII_ANA_CSMA;
+ mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
+ }
+ } else {
+ lp->tmp = MII_SR_ASSC; /* Fake out the MII speed set */
+ SET_10Mb;
+ if (lp->autosense == _100Mb) {
+ lp->media = _100Mb;
+ } else if (lp->autosense == _10Mb) {
+ lp->media = _10Mb;
+ } else if ((lp->autosense == AUTO) &&
+ ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
+ ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
+ ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
+ mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
+ lp->media = ANS;
+ } else if (lp->autosense == AUTO) {
+ lp->media = SPD_DET;
+ } else if (is_spd_100(dev) && is_100_up(dev)) {
+ lp->media = _100Mb;
+ } else {
+ lp->media = NC;
+ }
+ }
+ lp->local_state = 0;
+ next_tick = dc21140m_autoconf(dev);
+ }
+ break;
+
+ case ANS:
+ switch (lp->local_state) {
+ case 0:
+ if (lp->timeout < 0) {
+ mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
+ }
+ cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, FALSE, 500);
+ if (cr < 0) {
+ next_tick = cr & ~TIMER_CB;
+ } else {
+ if (cr) {
+ lp->local_state = 0;
+ lp->media = SPD_DET;
+ } else {
+ lp->local_state++;
+ }
+ next_tick = dc21140m_autoconf(dev);
+ }
+ break;
+
+ case 1:
+ if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, TRUE, 2000)) < 0) {
+ next_tick = sr & ~TIMER_CB;
+ } else {
+ lp->media = SPD_DET;
+ lp->local_state = 0;
+ if (sr) { /* Success! */
+ lp->tmp = MII_SR_ASSC;
+ anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
+ ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
+ if (!(anlpa & MII_ANLPA_RF) &&
+ (cap = anlpa & MII_ANLPA_TAF & ana)) {
+ if (cap & MII_ANA_100M) {
+ lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) ? TRUE : FALSE);
+ lp->media = _100Mb;
+ } else if (cap & MII_ANA_10M) {
+ lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) ? TRUE : FALSE);
+
+ lp->media = _10Mb;
+ }
+ }
+ } /* Auto Negotiation failed to finish */
+ next_tick = dc21140m_autoconf(dev);
+ } /* Auto Negotiation failed to start */
+ break;
+ }
+ break;
+
+ case SPD_DET: /* Choose 10Mb/s or 100Mb/s */
+ if (lp->timeout < 0) {
+ lp->tmp = (lp->phy[lp->active].id ? MII_SR_LKS :
+ (~gep_rd(dev) & GEP_LNP));
+ SET_100Mb_PDET;
+ }
+ if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
+ next_tick = slnk & ~TIMER_CB;
+ } else {
+ if (is_spd_100(dev) && is_100_up(dev)) {
+ lp->media = _100Mb;
+ } else if ((!is_spd_100(dev) && (is_10_up(dev) & lp->tmp))) {
+ lp->media = _10Mb;
+ } else {
+ lp->media = NC;
+ }
+ next_tick = dc21140m_autoconf(dev);
+ }
+ break;
+
+ case _100Mb: /* Set 100Mb/s */
+ next_tick = 3000;
+ if (!lp->tx_enable) {
+ SET_100Mb;
+ de4x5_init_connection(dev);
+ } else {
+ if (!lp->linkOK && (lp->autosense == AUTO)) {
+ if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
+ lp->media = INIT;
+ lp->tcount++;
+ next_tick = DE4X5_AUTOSENSE_MS;
+ }
+ }
+ }
+ break;
+
+ case BNC:
+ case AUI:
+ case _10Mb: /* Set 10Mb/s */
+ next_tick = 3000;
+ if (!lp->tx_enable) {
+ SET_10Mb;
+ de4x5_init_connection(dev);
+ } else {
+ if (!lp->linkOK && (lp->autosense == AUTO)) {
+ if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
+ lp->media = INIT;
+ lp->tcount++;
+ next_tick = DE4X5_AUTOSENSE_MS;
+ }
+ }
+ }
+ break;
+
+ case NC:
+ if (lp->media != lp->c_media) {
+ de4x5_dbg_media(dev);
+ lp->c_media = lp->media;
+ }
+ lp->media = INIT;
+ lp->tx_enable = FALSE;
+ break;
+ }
+
+ return next_tick;
+}
+
+/*
+** This routine may be merged into dc21140m_autoconf() sometime as I'm
+** changing how I figure out the media - but trying to keep it backwards
+** compatible with the de500-xa and de500-aa.
+** Whether it's BNC, AUI, SYM or MII is sorted out in the infoblock
+** functions and set during de4x5_mac_port() and/or de4x5_reset_phy().
+** This routine just has to figure out whether 10Mb/s or 100Mb/s is
+** active.
+** When autonegotiation is working, the ANS part searches the SROM for
+** the highest common speed (TP) link that both can run and if that can
+** be full duplex. That infoblock is executed and then the link speed set.
+**
+** Only _10Mb and _100Mb are tested here.
+*/
+static int
+dc2114x_autoconf(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ s32 cr, anlpa, ana, cap, irqs, irq_mask, imr, omr, slnk, sr, sts;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+
+ switch (lp->media) {
+ case INIT:
+ if (lp->timeout < 0) {
+ DISABLE_IRQs;
+ lp->tx_enable = FALSE;
+ lp->linkOK = 0;
+ lp->timeout = -1;
+ de4x5_save_skbs(dev); /* Save non transmitted skb's */
+ if (lp->params.autosense & ~AUTO) {
+ srom_map_media(dev); /* Fixed media requested */
+ if (lp->media != lp->params.autosense) {
+ lp->tcount++;
+ lp->media = INIT;
+ return next_tick;
+ }
+ lp->media = INIT;
+ }
+ }
+ if ((next_tick = de4x5_reset_phy(dev)) < 0) {
+ next_tick &= ~TIMER_CB;
+ } else {
+ if (lp->autosense == _100Mb) {
+ lp->media = _100Mb;
+ } else if (lp->autosense == _10Mb) {
+ lp->media = _10Mb;
+ } else if (lp->autosense == TP) {
+ lp->media = TP;
+ } else if (lp->autosense == BNC) {
+ lp->media = BNC;
+ } else if (lp->autosense == AUI) {
+ lp->media = AUI;
+ } else {
+ lp->media = SPD_DET;
+ if ((lp->infoblock_media == ANS) &&
+ ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
+ ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
+ ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
+ mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
+ lp->media = ANS;
+ }
+ }
+ lp->local_state = 0;
+ next_tick = dc2114x_autoconf(dev);
+ }
+ break;
+
+ case ANS:
+ switch (lp->local_state) {
+ case 0:
+ if (lp->timeout < 0) {
+ mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
+ }
+ cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, FALSE, 500);
+ if (cr < 0) {
+ next_tick = cr & ~TIMER_CB;
+ } else {
+ if (cr) {
+ lp->local_state = 0;
+ lp->media = SPD_DET;
+ } else {
+ lp->local_state++;
+ }
+ next_tick = dc2114x_autoconf(dev);
+ }
+ break;
+
+ case 1:
+ if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, TRUE, 2000)) < 0) {
+ next_tick = sr & ~TIMER_CB;
+ } else {
+ lp->media = SPD_DET;
+ lp->local_state = 0;
+ if (sr) { /* Success! */
+ lp->tmp = MII_SR_ASSC;
+ anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
+ ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
+ if (!(anlpa & MII_ANLPA_RF) &&
+ (cap = anlpa & MII_ANLPA_TAF & ana)) {
+ if (cap & MII_ANA_100M) {
+ lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) ? TRUE : FALSE);
+ lp->media = _100Mb;
+ } else if (cap & MII_ANA_10M) {
+ lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) ? TRUE : FALSE);
+ lp->media = _10Mb;
+ }
+ }
+ } /* Auto Negotiation failed to finish */
+ next_tick = dc2114x_autoconf(dev);
+ } /* Auto Negotiation failed to start */
+ break;
+ }
+ break;
+
+ case AUI:
+ if (!lp->tx_enable) {
+ if (lp->timeout < 0) {
+ omr = inl(DE4X5_OMR); /* Set up half duplex for AUI */
+ outl(omr & ~OMR_FDX, DE4X5_OMR);
+ }
+ irqs = 0;
+ irq_mask = 0;
+ sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
+ if (sts < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
+ lp->media = BNC;
+ next_tick = dc2114x_autoconf(dev);
+ } else {
+ lp->local_state = 1;
+ de4x5_init_connection(dev);
+ }
+ }
+ } else if (!lp->linkOK && (lp->autosense == AUTO)) {
+ lp->media = AUI_SUSPECT;
+ next_tick = 3000;
+ }
+ break;
+
+ case AUI_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc2114x_autoconf);
+ break;
+
+ case BNC:
+ switch (lp->local_state) {
+ case 0:
+ if (lp->timeout < 0) {
+ omr = inl(DE4X5_OMR); /* Set up half duplex for BNC */
+ outl(omr & ~OMR_FDX, DE4X5_OMR);
+ }
+ irqs = 0;
+ irq_mask = 0;
+ sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
+ if (sts < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ lp->local_state++; /* Ensure media connected */
+ next_tick = dc2114x_autoconf(dev);
+ }
+ break;
+
+ case 1:
+ if (!lp->tx_enable) {
+ if ((sts = ping_media(dev, 3000)) < 0) {
+ next_tick = sts & ~TIMER_CB;
+ } else {
+ if (sts) {
+ lp->local_state = 0;
+ lp->tcount++;
+ lp->media = INIT;
+ } else {
+ de4x5_init_connection(dev);
+ }
+ }
+ } else if (!lp->linkOK && (lp->autosense == AUTO)) {
+ lp->media = BNC_SUSPECT;
+ next_tick = 3000;
+ }
+ break;
+ }
+ break;
+
+ case BNC_SUSPECT:
+ next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc2114x_autoconf);
+ break;
+
+ case SPD_DET: /* Choose 10Mb/s or 100Mb/s */
+ if (srom_map_media(dev) < 0) {
+ lp->tcount++;
+ lp->media = INIT;
+ return next_tick;
+ }
+ if (lp->media == _100Mb) {
+ if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
+ lp->media = SPD_DET;
+ return (slnk & ~TIMER_CB);
+ }
+ } else {
+ if (wait_for_link(dev) < 0) {
+ lp->media = SPD_DET;
+ return PDET_LINK_WAIT;
+ }
+ }
+ if (lp->media == ANS) { /* Do MII parallel detection */
+ if (is_spd_100(dev)) {
+ lp->media = _100Mb;
+ } else {
+ lp->media = _10Mb;
+ }
+ next_tick = dc2114x_autoconf(dev);
+ } else if (((lp->media == _100Mb) && is_100_up(dev)) ||
+ (((lp->media == _10Mb) || (lp->media == TP) ||
+ (lp->media == BNC) || (lp->media == AUI)) &&
+ is_10_up(dev))) {
+ next_tick = dc2114x_autoconf(dev);
+ } else {
+ lp->tcount++;
+ lp->media = INIT;
+ }
+ break;
+
+ case _10Mb:
+ next_tick = 3000;
+ if (!lp->tx_enable) {
+ SET_10Mb;
+ de4x5_init_connection(dev);
+ } else {
+ if (!lp->linkOK && (lp->autosense == AUTO)) {
+ if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
+ lp->media = INIT;
+ lp->tcount++;
+ next_tick = DE4X5_AUTOSENSE_MS;
+ }
+ }
+ }
+ break;
+
+ case _100Mb:
+ next_tick = 3000;
+ if (!lp->tx_enable) {
+ SET_100Mb;
+ de4x5_init_connection(dev);
+ } else {
+ if (!lp->linkOK && (lp->autosense == AUTO)) {
+ if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
+ lp->media = INIT;
+ lp->tcount++;
+ next_tick = DE4X5_AUTOSENSE_MS;
+ }
+ }
+ }
+ break;
+
+ default:
+ lp->tcount++;
+printk("Huh?: media:%02x\n", lp->media);
+ lp->media = INIT;
+ break;
+ }
+
+ return next_tick;
+}
+
+static int
+srom_autoconf(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+
+ return lp->infoleaf_fn(dev);
+}
+
+/*
+** This mapping keeps the original media codes and FDX flag unchanged.
+** While it isn't strictly necessary, it helps me for the moment...
+** The early return avoids a media state / SROM media space clash.
+*/
+static int
+srom_map_media(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+
+ lp->fdx = 0;
+ if (lp->infoblock_media == lp->media)
+ return 0;
+
+ switch(lp->infoblock_media) {
+ case SROM_10BASETF:
+ if (!lp->params.fdx) return -1;
+ lp->fdx = TRUE;
+ case SROM_10BASET:
+ if (lp->params.fdx && !lp->fdx) return -1;
+ if ((lp->chipset == DC21140) || ((lp->chipset & ~0x00ff) == DC2114x)) {
+ lp->media = _10Mb;
+ } else {
+ lp->media = TP;
+ }
+ break;
+
+ case SROM_10BASE2:
+ lp->media = BNC;
+ break;
+
+ case SROM_10BASE5:
+ lp->media = AUI;
+ break;
+
+ case SROM_100BASETF:
+ if (!lp->params.fdx) return -1;
+ lp->fdx = TRUE;
+ case SROM_100BASET:
+ if (lp->params.fdx && !lp->fdx) return -1;
+ lp->media = _100Mb;
+ break;
+
+ case SROM_100BASET4:
+ lp->media = _100Mb;
+ break;
+
+ case SROM_100BASEFF:
+ if (!lp->params.fdx) return -1;
+ lp->fdx = TRUE;
+ case SROM_100BASEF:
+ if (lp->params.fdx && !lp->fdx) return -1;
+ lp->media = _100Mb;
+ break;
+
+ case ANS:
+ lp->media = ANS;
+ break;
+
+ default:
+ printk("%s: Bad media code [%d] detected in SROM!\n", dev->name,
+ lp->infoblock_media);
+ return -1;
+ break;
+ }
+
+ return 0;
+}
+
+static void
+de4x5_init_connection(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+
+ if (lp->media != lp->c_media) {
+ de4x5_dbg_media(dev);
+ lp->c_media = lp->media; /* Stop scrolling media messages */
+ }
+
+ cli();
+ de4x5_rst_desc_ring(dev);
+ de4x5_setup_intr(dev);
+ lp->tx_enable = YES;
+ dev->tbusy = 0;
+ sti();
+ outl(POLL_DEMAND, DE4X5_TPD);
+ mark_bh(NET_BH);
+
+ return;
+}
+
+/*
+** General PHY reset function. Some MII devices don't reset correctly
+** since their MII address pins can float at voltages that are dependent
+** on the signal pin use. Do a double reset to ensure a reset.
+*/
+static int
+de4x5_reset_phy(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int next_tick = 0;
+
+ if ((lp->useSROM) || (lp->phy[lp->active].id)) {
+ if (lp->timeout < 0) {
+ if (lp->useSROM) {
+ if (lp->phy[lp->active].rst) {
+ srom_exec(dev, lp->phy[lp->active].rst);
+ srom_exec(dev, lp->phy[lp->active].rst);
+ } else if (lp->rst) { /* Type 5 infoblock reset */
+ srom_exec(dev, lp->rst);
+ srom_exec(dev, lp->rst);
+ }
+ } else {
+ PHY_HARD_RESET;
+ }
+ if (lp->useMII) {
+ mii_wr(MII_CR_RST, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
+ }
+ }
+ if (lp->useMII) {
+ next_tick = test_mii_reg(dev, MII_CR, MII_CR_RST, FALSE, 500);
+ }
+ } else if (lp->chipset == DC21140) {
+ PHY_HARD_RESET;
+ }
+
+ return next_tick;
+}
+
+static int
+test_media(struct device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ s32 sts, csr12;
+
+ if (lp->timeout < 0) {
+ lp->timeout = msec/100;
+ if (!lp->useSROM) { /* Already done if by SROM, else dc2104[01] */
+ reset_init_sia(dev, csr13, csr14, csr15);
+ }
+
+ /* set up the interrupt mask */
+ outl(irq_mask, DE4X5_IMR);
+
+ /* clear all pending interrupts */
+ sts = inl(DE4X5_STS);
+ outl(sts, DE4X5_STS);
+
+ /* clear csr12 NRA and SRA bits */
+ if ((lp->chipset == DC21041) || lp->useSROM) {
+ csr12 = inl(DE4X5_SISR);
+ outl(csr12, DE4X5_SISR);
+ }
+ }
+
+ sts = inl(DE4X5_STS) & ~TIMER_CB;
+
+ if (!(sts & irqs) && --lp->timeout) {
+ sts = 100 | TIMER_CB;
+ } else {
+ lp->timeout = -1;
+ }
+
+ return sts;
+}
+
+static int
+test_tp(struct device *dev, s32 msec)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int sisr;
+
+ if (lp->timeout < 0) {
+ lp->timeout = msec/100;
+ }
+
+ sisr = (inl(DE4X5_SISR) & ~TIMER_CB) & (SISR_LKF | SISR_NCR);
+
+ if (sisr && --lp->timeout) {
+ sisr = 100 | TIMER_CB;
+ } else {
+ lp->timeout = -1;
+ }
+
+ return sisr;
+}
+
+/*
+** Samples the 100Mb Link State Signal. The sample interval is important
+** because too fast a rate can give erroneous results and confuse the
+** speed sense algorithm.
+*/
+#define SAMPLE_INTERVAL 500 /* ms */
+#define SAMPLE_DELAY 2000 /* ms */
+static int
+test_for_100Mb(struct device *dev, int msec)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ int gep = 0, ret = ((lp->chipset & ~0x00ff)==DC2114x? -1 :GEP_SLNK);
+
+ if (lp->timeout < 0) {
+ if ((msec/SAMPLE_INTERVAL) <= 0) return 0;
+ if (msec > SAMPLE_DELAY) {
+ lp->timeout = (msec - SAMPLE_DELAY)/SAMPLE_INTERVAL;
+ gep = SAMPLE_DELAY | TIMER_CB;
+ return gep;
+ } else {
+ lp->timeout = msec/SAMPLE_INTERVAL;
+ }
+ }
+
+ if (lp->phy[lp->active].id || lp->useSROM) {
+ gep = is_100_up(dev) | is_spd_100(dev);
+ } else {
+ gep = (~gep_rd(dev) & (GEP_SLNK | GEP_LNP));
+ }
+ if (!(gep & ret) && --lp->timeout) {
+ gep = SAMPLE_INTERVAL | TIMER_CB;
+ } else {
+ lp->timeout = -1;
+ }
+
+ return gep;
+}
+
+static int
+wait_for_link(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+
+ if (lp->timeout < 0) {
+ lp->timeout = 1;
+ }
+
+ if (lp->timeout--) {
+ return TIMER_CB;
+ } else {
+ lp->timeout = -1;
+ }
+
+ return 0;
+}
+
+/*
+**
+**
+*/
+static int
+test_mii_reg(struct device *dev, int reg, int mask, int pol, long msec)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ int test;
+ u_long iobase = dev->base_addr;
+
+ if (lp->timeout < 0) {
+ lp->timeout = msec/100;
+ }
+
+ if (pol) pol = ~0;
+ reg = mii_rd((u_char)reg, lp->phy[lp->active].addr, DE4X5_MII) & mask;
+ test = (reg ^ pol) & mask;
+
+ if (test && --lp->timeout) {
+ reg = 100 | TIMER_CB;
+ } else {
+ lp->timeout = -1;
+ }
+
+ return reg;
+}
+
+static int
+is_spd_100(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int spd;
+
+ if (lp->useMII) {
+ spd = mii_rd(lp->phy[lp->active].spd.reg, lp->phy[lp->active].addr, DE4X5_MII);
+ spd = ~(spd ^ lp->phy[lp->active].spd.value);
+ spd &= lp->phy[lp->active].spd.mask;
+ } else if (!lp->useSROM) { /* de500-xa */
+ spd = ((~gep_rd(dev)) & GEP_SLNK);
+ } else {
+ if ((lp->ibn == 2) || !lp->asBitValid)
+ return ((lp->chipset == DC21143)?(~inl(DE4X5_SISR)&SISR_LS100):0);
+
+ spd = (lp->asBitValid & (lp->asPolarity ^ (gep_rd(dev) & lp->asBit))) |
+ (lp->linkOK & ~lp->asBitValid);
+ }
+
+ return spd;
+}
+
+static int
+is_100_up(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+
+ if (lp->useMII) {
+ /* Double read for sticky bits & temporary drops */
+ mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
+ return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS);
+ } else if (!lp->useSROM) { /* de500-xa */
+ return ((~gep_rd(dev)) & GEP_SLNK);
+ } else {
+ if ((lp->ibn == 2) || !lp->asBitValid)
+ return ((lp->chipset == DC21143)?(~inl(DE4X5_SISR)&SISR_LS100):0);
+
+ return ((lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
+ (lp->linkOK & ~lp->asBitValid));
+ }
+}
+
+static int
+is_10_up(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+
+ if (lp->useMII) {
+ /* Double read for sticky bits & temporary drops */
+ mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
+ return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS);
+ } else if (!lp->useSROM) { /* de500-xa */
+ return ((~gep_rd(dev)) & GEP_LNP);
+ } else {
+ if ((lp->ibn == 2) || !lp->asBitValid)
+ return (((lp->chipset & ~0x00ff) == DC2114x) ?
+ (~inl(DE4X5_SISR)&SISR_LS10):
+ 0);
+
+ return ((lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
+ (lp->linkOK & ~lp->asBitValid));
+ }
+}
+
+static int
+is_anc_capable(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+
+ if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
+ return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII));
+ } else if ((lp->chipset & ~0x00ff) == DC2114x) {
+ return (inl(DE4X5_SISR) & SISR_LPN) >> 12;
+ } else {
+ return 0;
+ }
+}
+
+/*
+** Send a packet onto the media and watch for send errors that indicate the
+** media is bad or unconnected.
+*/
+static int
+ping_media(struct device *dev, int msec)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int sisr;
+
+ if (lp->timeout < 0) {
+ lp->timeout = msec/100;
+
+ lp->tmp = lp->tx_new; /* Remember the ring position */
+ load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), NULL);
+ lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
+ outl(POLL_DEMAND, DE4X5_TPD);
+ }
+
+ sisr = inl(DE4X5_SISR);
+
+ if ((!(sisr & SISR_NCR)) &&
+ ((s32)le32_to_cpu(lp->tx_ring[lp->tmp].status) < 0) &&
+ (--lp->timeout)) {
+ sisr = 100 | TIMER_CB;
+ } else {
+ if ((!(sisr & SISR_NCR)) &&
+ !(le32_to_cpu(lp->tx_ring[lp->tmp].status) & (T_OWN | TD_ES)) &&
+ lp->timeout) {
+ sisr = 0;
+ } else {
+ sisr = 1;
+ }
+ lp->timeout = -1;
+ }
+
+ return sisr;
+}
+
+/*
+** This function does 2 things: on Intels it kmalloc's another buffer to
+** replace the one about to be passed up. On Alpha's it kmallocs a buffer
+** into which the packet is copied.
+*/
+static struct sk_buff *
+de4x5_alloc_rx_buff(struct device *dev, int index, int len)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ struct sk_buff *p;
+
+#if !defined(__alpha__) && !defined(__powerpc__) && !defined(__sparc_v9__) && !defined(DE4X5_DO_MEMCPY)
+ struct sk_buff *ret;
+ u_long i=0, tmp;
+
+ p = dev_alloc_skb(IEEE802_3_SZ + ALIGN + 2);
+ if (!p) return NULL;
+
+ p->dev = dev;
+ tmp = virt_to_bus(p->data);
+ i = ((tmp + ALIGN) & ~ALIGN) - tmp;
+ skb_reserve(p, i);
+ lp->rx_ring[index].buf = tmp + i;
+
+ ret = lp->rx_skb[index];
+ lp->rx_skb[index] = p;
+
+ if ((u_long) ret > 1) {
+ skb_put(ret, len);
+ }
+
+ return ret;
+
+#else
+ if (lp->state != OPEN) return (struct sk_buff *)1; /* Fake out the open */
+
+ p = dev_alloc_skb(len + 2);
+ if (!p) return NULL;
+
+ p->dev = dev;
+ skb_reserve(p, 2); /* Align */
+ if (index < lp->rx_old) { /* Wrapped buffer */
+ short tlen = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
+ memcpy(skb_put(p,tlen),
+ bus_to_virt(le32_to_cpu(lp->rx_ring[lp->rx_old].buf)),tlen);
+ memcpy(skb_put(p,len-tlen),
+ bus_to_virt(le32_to_cpu(lp->rx_ring[0].buf)), len-tlen);
+ } else { /* Linear buffer */
+ memcpy(skb_put(p,len),
+ bus_to_virt(le32_to_cpu(lp->rx_ring[lp->rx_old].buf)),len);
+ }
+
+ return p;
+#endif
+}
+
+static void
+de4x5_free_rx_buffs(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ int i;
+
+ for (i=0; i<lp->rxRingSize; i++) {
+ if ((u_long) lp->rx_skb[i] > 1) {
+ dev_kfree_skb(lp->rx_skb[i], FREE_WRITE);
+ }
+ lp->rx_ring[i].status = 0;
+ lp->rx_skb[i] = (struct sk_buff *)1; /* Dummy entry */
+ }
+
+ return;
+}
+
+static void
+de4x5_free_tx_buffs(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ int i;
+
+ for (i=0; i<lp->txRingSize; i++) {
+ if (lp->tx_skb[i]) {
+ dev_kfree_skb(lp->tx_skb[i], FREE_WRITE);
+ lp->tx_skb[i] = NULL;
+ }
+ lp->tx_ring[i].status = 0;
+ }
+
+ /* Unload the locally queued packets */
+ while (lp->cache.skb) {
+ dev_kfree_skb(de4x5_get_cache(dev), FREE_WRITE);
+ }
+
+ return;
+}
+
+/*
+** When a user pulls a connection, the DECchip can end up in a
+** 'running - waiting for end of transmission' state. This means that we
+** have to perform a chip soft reset to ensure that we can synchronize
+** the hardware and software and make any media probes using a loopback
+** packet meaningful.
+*/
+static void
+de4x5_save_skbs(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ s32 omr;
+
+ if (!lp->cache.save_cnt) {
+ STOP_DE4X5;
+ de4x5_tx(dev); /* Flush any sent skb's */
+ de4x5_free_tx_buffs(dev);
+ de4x5_cache_state(dev, DE4X5_SAVE_STATE);
+ de4x5_sw_reset(dev);
+ de4x5_cache_state(dev, DE4X5_RESTORE_STATE);
+ lp->cache.save_cnt++;
+ START_DE4X5;
+ }
+
+ return;
+}
+
+static void
+de4x5_rst_desc_ring(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int i;
+ s32 omr;
+
+ if (lp->cache.save_cnt) {
+ STOP_DE4X5;
+ outl(virt_to_bus(lp->rx_ring), DE4X5_RRBA);
+ outl(virt_to_bus(lp->tx_ring), DE4X5_TRBA);
+
+ lp->rx_new = lp->rx_old = 0;
+ lp->tx_new = lp->tx_old = 0;
+
+ for (i = 0; i < lp->rxRingSize; i++) {
+ lp->rx_ring[i].status = cpu_to_le32(R_OWN);
+ }
+
+ for (i = 0; i < lp->txRingSize; i++) {
+ lp->tx_ring[i].status = cpu_to_le32(0);
+ }
+
+ barrier();
+ lp->cache.save_cnt--;
+ START_DE4X5;
+ }
+
+ return;
+}
+
+static void
+de4x5_cache_state(struct device *dev, int flag)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+
+ switch(flag) {
+ case DE4X5_SAVE_STATE:
+ lp->cache.csr0 = inl(DE4X5_BMR);
+ lp->cache.csr6 = (inl(DE4X5_OMR) & ~(OMR_ST | OMR_SR));
+ lp->cache.csr7 = inl(DE4X5_IMR);
+ break;
+
+ case DE4X5_RESTORE_STATE:
+ outl(lp->cache.csr0, DE4X5_BMR);
+ outl(lp->cache.csr6, DE4X5_OMR);
+ outl(lp->cache.csr7, DE4X5_IMR);
+ if (lp->chipset == DC21140) {
+ gep_wr(lp->cache.gepc, dev);
+ gep_wr(lp->cache.gep, dev);
+ } else {
+ reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14,
+ lp->cache.csr15);
+ }
+ break;
+ }
+
+ return;
+}
+
+static void
+de4x5_put_cache(struct device *dev, struct sk_buff *skb)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ struct sk_buff *p;
+
+ if (lp->cache.skb) {
+ for (p=lp->cache.skb; p->next; p=p->next);
+ p->next = skb;
+ } else {
+ lp->cache.skb = skb;
+ }
+ skb->next = NULL;
+
+ return;
+}
+
+static void
+de4x5_putb_cache(struct device *dev, struct sk_buff *skb)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ struct sk_buff *p = lp->cache.skb;
+
+ lp->cache.skb = skb;
+ skb->next = p;
+
+ return;
+}
+
+static struct sk_buff *
+de4x5_get_cache(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ struct sk_buff *p = lp->cache.skb;
+
+ if (p) {
+ lp->cache.skb = p->next;
+ p->next = NULL;
+ }
+
+ return p;
+}
+
+/*
+** Check the Auto Negotiation State. Return OK when a link pass interrupt
+** is received and the auto-negotiation status is NWAY OK.
+*/
+static int
+test_ans(struct device *dev, s32 irqs, s32 irq_mask, s32 msec)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ s32 sts, ans;
+
+ if (lp->timeout < 0) {
+ lp->timeout = msec/100;
+ outl(irq_mask, DE4X5_IMR);
+
+ /* clear all pending interrupts */
+ sts = inl(DE4X5_STS);
+ outl(sts, DE4X5_STS);
+ }
+
+ ans = inl(DE4X5_SISR) & SISR_ANS;
+ sts = inl(DE4X5_STS) & ~TIMER_CB;
+
+ if (!(sts & irqs) && (ans ^ ANS_NWOK) && --lp->timeout) {
+ sts = 100 | TIMER_CB;
+ } else {
+ lp->timeout = -1;
+ }
+
+ return sts;
+}
+
+static void
+de4x5_setup_intr(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ s32 imr, sts;
+
+ if (inl(DE4X5_OMR) & OMR_SR) { /* Only unmask if TX/RX is enabled */
+ imr = 0;
+ UNMASK_IRQs;
+ sts = inl(DE4X5_STS); /* Reset any pending (stale) interrupts */
+ outl(sts, DE4X5_STS);
+ ENABLE_IRQs;
+ }
+
+ return;
+}
+
+/*
+**
+*/
+static void
+reset_init_sia(struct device *dev, s32 csr13, s32 csr14, s32 csr15)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+
+ RESET_SIA;
+ if (lp->useSROM) {
+ if (lp->ibn == 3) {
+ srom_exec(dev, lp->phy[lp->active].rst);
+ srom_exec(dev, lp->phy[lp->active].gep);
+ outl(1, DE4X5_SICR);
+ return;
+ } else {
+ csr15 = lp->cache.csr15;
+ csr14 = lp->cache.csr14;
+ csr13 = lp->cache.csr13;
+ outl(csr15 | lp->cache.gepc, DE4X5_SIGR);
+ outl(csr15 | lp->cache.gep, DE4X5_SIGR);
+ }
+ } else {
+ outl(csr15, DE4X5_SIGR);
+ }
+ outl(csr14, DE4X5_STRR);
+ outl(csr13, DE4X5_SICR);
+
+ de4x5_ms_delay(10);
+
+ return;
+}
+
+/*
+** Create a loopback ethernet packet
+*/
+static void
+create_packet(struct device *dev, char *frame, int len)
+{
+ int i;
+ char *buf = frame;
+
+ for (i=0; i<ETH_ALEN; i++) { /* Use this source address */
+ *buf++ = dev->dev_addr[i];
+ }
+ for (i=0; i<ETH_ALEN; i++) { /* Use this destination address */
+ *buf++ = dev->dev_addr[i];
+ }
+
+ *buf++ = 0; /* Packet length (2 bytes) */
+ *buf++ = 1;
+
+ return;
+}
+
+/*
+** Known delay in microseconds
+*/
+static void
+de4x5_us_delay(u32 usec)
+{
+ udelay(usec);
+
+ return;
+}
+
+/*
+** Known delay in milliseconds, in millisecond steps.
+*/
+static void
+de4x5_ms_delay(u32 msec)
+{
+ u_int i;
+
+ for (i=0; i<msec; i++) {
+ de4x5_us_delay(1000);
+ }
+
+ return;
+}
+
+
+/*
+** Look for a particular board name in the EISA configuration space
+*/
+static int
+EISA_signature(char *name, s32 eisa_id)
+{
+ static c_char *signatures[] = DE4X5_SIGNATURE;
+ char ManCode[DE4X5_STRLEN];
+ union {
+ s32 ID;
+ char Id[4];
+ } Eisa;
+ int i, status = 0, siglen = sizeof(signatures)/sizeof(c_char *);
+
+ *name = '\0';
+ Eisa.ID = inl(eisa_id);
+
+ ManCode[0]=(((Eisa.Id[0]>>2)&0x1f)+0x40);
+ ManCode[1]=(((Eisa.Id[1]&0xe0)>>5)+((Eisa.Id[0]&0x03)<<3)+0x40);
+ ManCode[2]=(((Eisa.Id[2]>>4)&0x0f)+0x30);
+ ManCode[3]=((Eisa.Id[2]&0x0f)+0x30);
+ ManCode[4]=(((Eisa.Id[3]>>4)&0x0f)+0x30);
+ ManCode[5]='\0';
+
+ for (i=0;i<siglen;i++) {
+ if (strstr(ManCode, signatures[i]) != NULL) {
+ strcpy(name,ManCode);
+ status = 1;
+ break;
+ }
+ }
+
+ return status; /* return the device name string */
+}
+
+/*
+** Look for a particular board name in the PCI configuration space
+*/
+static int
+PCI_signature(char *name, struct bus_type *lp)
+{
+ static c_char *de4x5_signatures[] = DE4X5_SIGNATURE;
+ int i, status = 0, siglen = sizeof(de4x5_signatures)/sizeof(c_char *);
+
+ if (lp->chipset == DC21040) {
+ strcpy(name, "DE434/5");
+ return status;
+ } else { /* Search for a DEC name in the SROM */
+ int i = *((char *)&lp->srom + 19) * 3;
+ strncpy(name, (char *)&lp->srom + 26 + i, 8);
+ }
+ name[8] = '\0';
+ for (i=0; i<siglen; i++) {
+ if (strstr(name,de4x5_signatures[i])!=NULL) break;
+ }
+ if (i == siglen) {
+ if (dec_only) {
+ *name = '\0';
+ } else { /* Use chip name to avoid confusion */
+ strcpy(name, (((lp->chipset == DC21040) ? "DC21040" :
+ ((lp->chipset == DC21041) ? "DC21041" :
+ ((lp->chipset == DC21140) ? "DC21140" :
+ ((lp->chipset == DC21142) ? "DC21142" :
+ ((lp->chipset == DC21143) ? "DC21143" : "UNKNOWN"
+ )))))));
+ }
+ if (lp->chipset != DC21041) {
+ useSROM = TRUE; /* card is not recognisably DEC */
+ }
+ } else if ((lp->chipset & ~0x00ff) == DC2114x) {
+ useSROM = TRUE;
+ }
+
+ return status;
+}
+
+/*
+** Set up the Ethernet PROM counter to the start of the Ethernet address on
+** the DC21040, else read the SROM for the other chips.
+** The SROM may not be present in a multi-MAC card, so first read the
+** MAC address and check for a bad address. If there is a bad one then exit
+** immediately with the prior srom contents intact (the h/w address will
+** be fixed up later).
+*/
+static void
+DevicePresent(u_long aprom_addr)
+{
+ int i, j=0;
+ struct bus_type *lp = &bus;
+
+ if (lp->chipset == DC21040) {
+ if (lp->bus == EISA) {
+ enet_addr_rst(aprom_addr); /* Reset Ethernet Address ROM Pointer */
+ } else {
+ outl(0, aprom_addr); /* Reset Ethernet Address ROM Pointer */
+ }
+ } else { /* Read new srom */
+ u_short tmp, *p = (short *)((char *)&lp->srom + SROM_HWADD);
+ for (i=0; i<(ETH_ALEN>>1); i++) {
+ tmp = srom_rd(aprom_addr, (SROM_HWADD>>1) + i);
+ *p = le16_to_cpu(tmp);
+ j += *p++;
+ }
+ if ((j == 0) || (j == 0x2fffd)) {
+ return;
+ }
+
+ p=(short *)&lp->srom;
+ for (i=0; i<(sizeof(struct de4x5_srom)>>1); i++) {
+ tmp = srom_rd(aprom_addr, i);
+ *p++ = le16_to_cpu(tmp);
+ }
+ de4x5_dbg_srom((struct de4x5_srom *)&lp->srom);
+ }
+
+ return;
+}
+
+/*
+** Since the write on the Enet PROM register doesn't seem to reset the PROM
+** pointer correctly (at least on my DE425 EISA card), this routine should do
+** it...from depca.c.
+*/
+static void
+enet_addr_rst(u_long aprom_addr)
+{
+ union {
+ struct {
+ u32 a;
+ u32 b;
+ } llsig;
+ char Sig[sizeof(u32) << 1];
+ } dev;
+ short sigLength=0;
+ s8 data;
+ int i, j;
+
+ dev.llsig.a = ETH_PROM_SIG;
+ dev.llsig.b = ETH_PROM_SIG;
+ sigLength = sizeof(u32) << 1;
+
+ for (i=0,j=0;j<sigLength && i<PROBE_LENGTH+sigLength-1;i++) {
+ data = inb(aprom_addr);
+ if (dev.Sig[j] == data) { /* track signature */
+ j++;
+ } else { /* lost signature; begin search again */
+ if (data == dev.Sig[0]) { /* rare case.... */
+ j=1;
+ } else {
+ j=0;
+ }
+ }
+ }
+
+ return;
+}
+
+/*
+** For the bad status case and no SROM, then add one to the previous
+** address. However, need to add one backwards in case we have 0xff
+** as one or more of the bytes. Only the last 3 bytes should be checked
+** as the first three are invariant - assigned to an organisation.
+*/
+static int
+get_hw_addr(struct device *dev)
+{
+ u_long iobase = dev->base_addr;
+ int broken, i, k, tmp, status = 0;
+ u_short j,chksum;
+ struct bus_type *lp = &bus;
+
+ broken = de4x5_bad_srom(lp);
+
+ for (i=0,k=0,j=0;j<3;j++) {
+ k <<= 1;
+ if (k > 0xffff) k-=0xffff;
+
+ if (lp->bus == PCI) {
+ if (lp->chipset == DC21040) {
+ while ((tmp = inl(DE4X5_APROM)) < 0);
+ k += (u_char) tmp;
+ dev->dev_addr[i++] = (u_char) tmp;
+ while ((tmp = inl(DE4X5_APROM)) < 0);
+ k += (u_short) (tmp << 8);
+ dev->dev_addr[i++] = (u_char) tmp;
+ } else if (!broken) {
+ dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
+ dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
+ } else if ((broken == SMC) || (broken == ACCTON)) {
+ dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
+ dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
+ }
+ } else {
+ k += (u_char) (tmp = inb(EISA_APROM));
+ dev->dev_addr[i++] = (u_char) tmp;
+ k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
+ dev->dev_addr[i++] = (u_char) tmp;
+ }
+
+ if (k > 0xffff) k-=0xffff;
+ }
+ if (k == 0xffff) k=0;
+
+ if (lp->bus == PCI) {
+ if (lp->chipset == DC21040) {
+ while ((tmp = inl(DE4X5_APROM)) < 0);
+ chksum = (u_char) tmp;
+ while ((tmp = inl(DE4X5_APROM)) < 0);
+ chksum |= (u_short) (tmp << 8);
+ if ((k != chksum) && (dec_only)) status = -1;
+ }
+ } else {
+ chksum = (u_char) inb(EISA_APROM);
+ chksum |= (u_short) (inb(EISA_APROM) << 8);
+ if ((k != chksum) && (dec_only)) status = -1;
+ }
+
+ /* If possible, try to fix a broken card - SMC only so far */
+ srom_repair(dev, broken);
+
+#ifdef CONFIG_PMAC
+ /*
+ ** If the address starts with 00 a0, we have to bit-reverse
+ ** each byte of the address.
+ */
+ if (dev->dev_addr[0] == 0 && dev->dev_addr[1] == 0xa0) {
+ for (i = 0; i < ETH_ALEN; ++i) {
+ int x = dev->dev_addr[i];
+ x = ((x & 0xf) << 4) + ((x & 0xf0) >> 4);
+ x = ((x & 0x33) << 2) + ((x & 0xcc) >> 2);
+ dev->dev_addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1);
+ }
+ }
+#endif /* CONFIG_PMAC */
+
+ /* Test for a bad enet address */
+ status = test_bad_enet(dev, status);
+
+ return status;
+}
+
+/*
+** Test for enet addresses in the first 32 bytes. The built-in strncmp
+** didn't seem to work here...?
+*/
+static int
+de4x5_bad_srom(struct bus_type *lp)
+{
+ int i, status = 0;
+
+ for (i=0; i<sizeof(enet_det)/ETH_ALEN; i++) {
+ if (!de4x5_strncmp((char *)&lp->srom, (char *)&enet_det[i], 3) &&
+ !de4x5_strncmp((char *)&lp->srom+0x10, (char *)&enet_det[i], 3)) {
+ if (i == 0) {
+ status = SMC;
+ } else if (i == 1) {
+ status = ACCTON;
+ }
+ break;
+ }
+ }
+
+ return status;
+}
+
+static int
+de4x5_strncmp(char *a, char *b, int n)
+{
+ int ret=0;
+
+ for (;n && !ret;n--) {
+ ret = *a++ - *b++;
+ }
+
+ return ret;
+}
+
+static void
+srom_repair(struct device *dev, int card)
+{
+ struct bus_type *lp = &bus;
+
+ switch(card) {
+ case SMC:
+ memset((char *)&bus.srom, 0, sizeof(struct de4x5_srom));
+ memcpy(lp->srom.ieee_addr, (char *)dev->dev_addr, ETH_ALEN);
+ memcpy(lp->srom.info, (char *)&srom_repair_info[SMC-1], 100);
+ useSROM = TRUE;
+ break;
+ }
+
+ return;
+}
+
+/*
+** Assume that the irq's do not follow the PCI spec - this is seems
+** to be true so far (2 for 2).
+*/
+static int
+test_bad_enet(struct device *dev, int status)
+{
+ struct bus_type *lp = &bus;
+ int i, tmp;
+
+ for (tmp=0,i=0; i<ETH_ALEN; i++) tmp += (u_char)dev->dev_addr[i];
+ if ((tmp == 0) || (tmp == 0x5fa)) {
+ if ((lp->chipset == last.chipset) &&
+ (lp->bus_num == last.bus) && (lp->bus_num > 0)) {
+ for (i=0; i<ETH_ALEN; i++) dev->dev_addr[i] = last.addr[i];
+ for (i=ETH_ALEN-1; i>2; --i) {
+ dev->dev_addr[i] += 1;
+ if (dev->dev_addr[i] != 0) break;
+ }
+ for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
+ if (!an_exception(lp)) {
+ dev->irq = last.irq;
+ }
+
+ status = 0;
+ }
+ } else if (!status) {
+ last.chipset = lp->chipset;
+ last.bus = lp->bus_num;
+ last.irq = dev->irq;
+ for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
+ }
+
+ return status;
+}
+
+/*
+** List of board exceptions with correctly wired IRQs
+*/
+static int
+an_exception(struct bus_type *lp)
+{
+ if ((*(u_short *)lp->srom.sub_vendor_id == 0x00c0) &&
+ (*(u_short *)lp->srom.sub_system_id == 0x95e0)) {
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+** SROM Read
+*/
+static short
+srom_rd(u_long addr, u_char offset)
+{
+ sendto_srom(SROM_RD | SROM_SR, addr);
+
+ srom_latch(SROM_RD | SROM_SR | DT_CS, addr);
+ srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr);
+ srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset);
+
+ return srom_data(SROM_RD | SROM_SR | DT_CS, addr);
+}
+
+static void
+srom_latch(u_int command, u_long addr)
+{
+ sendto_srom(command, addr);
+ sendto_srom(command | DT_CLK, addr);
+ sendto_srom(command, addr);
+
+ return;
+}
+
+static void
+srom_command(u_int command, u_long addr)
+{
+ srom_latch(command, addr);
+ srom_latch(command, addr);
+ srom_latch((command & 0x0000ff00) | DT_CS, addr);
+
+ return;
+}
+
+static void
+srom_address(u_int command, u_long addr, u_char offset)
+{
+ int i;
+ char a;
+
+ a = (char)(offset << 2);
+ for (i=0; i<6; i++, a <<= 1) {
+ srom_latch(command | ((a < 0) ? DT_IN : 0), addr);
+ }
+ de4x5_us_delay(1);
+
+ i = (getfrom_srom(addr) >> 3) & 0x01;
+
+ return;
+}
+
+static short
+srom_data(u_int command, u_long addr)
+{
+ int i;
+ short word = 0;
+ s32 tmp;
+
+ for (i=0; i<16; i++) {
+ sendto_srom(command | DT_CLK, addr);
+ tmp = getfrom_srom(addr);
+ sendto_srom(command, addr);
+
+ word = (word << 1) | ((tmp >> 3) & 0x01);
+ }
+
+ sendto_srom(command & 0x0000ff00, addr);
+
+ return word;
+}
+
+/*
+static void
+srom_busy(u_int command, u_long addr)
+{
+ sendto_srom((command & 0x0000ff00) | DT_CS, addr);
+
+ while (!((getfrom_srom(addr) >> 3) & 0x01)) {
+ de4x5_ms_delay(1);
+ }
+
+ sendto_srom(command & 0x0000ff00, addr);
+
+ return;
+}
+*/
+
+static void
+sendto_srom(u_int command, u_long addr)
+{
+ outl(command, addr);
+ udelay(1);
+
+ return;
+}
+
+static int
+getfrom_srom(u_long addr)
+{
+ s32 tmp;
+
+ tmp = inl(addr);
+ udelay(1);
+
+ return tmp;
+}
+
+static int
+srom_infoleaf_info(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ int i, count;
+ u_char *p;
+
+ /* Find the infoleaf decoder function that matches this chipset */
+ for (i=0; i<INFOLEAF_SIZE; i++) {
+ if (lp->chipset == infoleaf_array[i].chipset) break;
+ }
+ if (i == INFOLEAF_SIZE) {
+ lp->useSROM = FALSE;
+ printk("%s: Cannot find correct chipset for SROM decoding!\n",
+ dev->name);
+ return -ENXIO;
+ }
+
+ lp->infoleaf_fn = infoleaf_array[i].fn;
+
+ /* Find the information offset that this function should use */
+ count = *((u_char *)&lp->srom + 19);
+ p = (u_char *)&lp->srom + 26;
+
+ if (count > 1) {
+ for (i=count; i; --i, p+=3) {
+ if (lp->device == *p) break;
+ }
+ if (i == 0) {
+ lp->useSROM = FALSE;
+ printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n",
+ dev->name, lp->device);
+ return -ENXIO;
+ }
+ }
+
+ lp->infoleaf_offset = TWIDDLE(p+1);
+
+ return 0;
+}
+
+/*
+** This routine loads any type 1 or 3 MII info into the mii device
+** struct and executes any type 5 code to reset PHY devices for this
+** controller.
+** The info for the MII devices will be valid since the index used
+** will follow the discovery process from MII address 1-31 then 0.
+*/
+static void
+srom_init(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
+ u_char count;
+
+ p+=2;
+ if (lp->chipset == DC21140) {
+ lp->cache.gepc = (*p++ | GEP_CTRL);
+ gep_wr(lp->cache.gepc, dev);
+ }
+
+ /* Block count */
+ count = *p++;
+
+ /* Jump the infoblocks to find types */
+ for (;count; --count) {
+ if (*p < 128) {
+ p += COMPACT_LEN;
+ } else if (*(p+1) == 5) {
+ type5_infoblock(dev, 1, p);
+ p += ((*p & BLOCK_LEN) + 1);
+ } else if (*(p+1) == 4) {
+ p += ((*p & BLOCK_LEN) + 1);
+ } else if (*(p+1) == 3) {
+ type3_infoblock(dev, 1, p);
+ p += ((*p & BLOCK_LEN) + 1);
+ } else if (*(p+1) == 2) {
+ p += ((*p & BLOCK_LEN) + 1);
+ } else if (*(p+1) == 1) {
+ type1_infoblock(dev, 1, p);
+ p += ((*p & BLOCK_LEN) + 1);
+ } else {
+ p += ((*p & BLOCK_LEN) + 1);
+ }
+ }
+
+ return;
+}
+
+/*
+** A generic routine that writes GEP control, data and reset information
+** to the GEP register (21140) or csr15 GEP portion (2114[23]).
+*/
+static void
+srom_exec(struct device *dev, u_char *p)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ u_char count = (p ? *p++ : 0);
+ u_short *w = (u_short *)p;
+
+ if (((lp->ibn != 1) && (lp->ibn != 3) && (lp->ibn != 5)) || !count) return;
+
+ if (lp->chipset != DC21140) RESET_SIA;
+
+ while (count--) {
+ gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ?
+ *p++ : TWIDDLE(w++)), dev);
+ udelay(2000); /* 2ms per action */
+ }
+
+ if (lp->chipset != DC21140) {
+ outl(lp->cache.csr14, DE4X5_STRR);
+ outl(lp->cache.csr13, DE4X5_SICR);
+ }
+
+ return;
+}
+
+/*
+** Basically this function is a NOP since it will never be called,
+** unless I implement the DC21041 SROM functions. There's no need
+** since the existing code will be satisfactory for all boards.
+*/
+static int
+dc21041_infoleaf(struct device *dev)
+{
+ return DE4X5_AUTOSENSE_MS;
+}
+
+static int
+dc21140_infoleaf(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_char count = 0;
+ u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+
+ /* Read the connection type */
+ p+=2;
+
+ /* GEP control */
+ lp->cache.gepc = (*p++ | GEP_CTRL);
+
+ /* Block count */
+ count = *p++;
+
+ /* Recursively figure out the info blocks */
+ if (*p < 128) {
+ next_tick = dc_infoblock[COMPACT](dev, count, p);
+ } else {
+ next_tick = dc_infoblock[*(p+1)](dev, count, p);
+ }
+
+ if (lp->tcount == count) {
+ lp->media = NC;
+ if (lp->media != lp->c_media) {
+ de4x5_dbg_media(dev);
+ lp->c_media = lp->media;
+ }
+ lp->media = INIT;
+ lp->tcount = 0;
+ lp->tx_enable = FALSE;
+ }
+
+ return next_tick & ~TIMER_CB;
+}
+
+static int
+dc21142_infoleaf(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_char count = 0;
+ u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+
+ /* Read the connection type */
+ p+=2;
+
+ /* Block count */
+ count = *p++;
+
+ /* Recursively figure out the info blocks */
+ if (*p < 128) {
+ next_tick = dc_infoblock[COMPACT](dev, count, p);
+ } else {
+ next_tick = dc_infoblock[*(p+1)](dev, count, p);
+ }
+
+ if (lp->tcount == count) {
+ lp->media = NC;
+ if (lp->media != lp->c_media) {
+ de4x5_dbg_media(dev);
+ lp->c_media = lp->media;
+ }
+ lp->media = INIT;
+ lp->tcount = 0;
+ lp->tx_enable = FALSE;
+ }
+
+ return next_tick & ~TIMER_CB;
+}
+
+static int
+dc21143_infoleaf(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_char count = 0;
+ u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
+ int next_tick = DE4X5_AUTOSENSE_MS;
+
+ /* Read the connection type */
+ p+=2;
+
+ /* Block count */
+ count = *p++;
+
+ /* Recursively figure out the info blocks */
+ if (*p < 128) {
+ next_tick = dc_infoblock[COMPACT](dev, count, p);
+ } else {
+ next_tick = dc_infoblock[*(p+1)](dev, count, p);
+ }
+ if (lp->tcount == count) {
+ lp->media = NC;
+ if (lp->media != lp->c_media) {
+ de4x5_dbg_media(dev);
+ lp->c_media = lp->media;
+ }
+ lp->media = INIT;
+ lp->tcount = 0;
+ lp->tx_enable = FALSE;
+ }
+
+ return next_tick & ~TIMER_CB;
+}
+
+/*
+** The compact infoblock is only designed for DC21140[A] chips, so
+** we'll reuse the dc21140m_autoconf function. Non MII media only.
+*/
+static int
+compact_infoblock(struct device *dev, u_char count, u_char *p)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_char flags, csr6;
+
+ /* Recursively figure out the info blocks */
+ if (--count > lp->tcount) {
+ if (*(p+COMPACT_LEN) < 128) {
+ return dc_infoblock[COMPACT](dev, count, p+COMPACT_LEN);
+ } else {
+ return dc_infoblock[*(p+COMPACT_LEN+1)](dev, count, p+COMPACT_LEN);
+ }
+ }
+
+ if ((lp->media == INIT) && (lp->timeout < 0)) {
+ lp->ibn = COMPACT;
+ lp->active = 0;
+ gep_wr(lp->cache.gepc, dev);
+ lp->infoblock_media = (*p++) & COMPACT_MC;
+ lp->cache.gep = *p++;
+ csr6 = *p++;
+ flags = *p++;
+
+ lp->asBitValid = (flags & 0x80) ? 0 : -1;
+ lp->defMedium = (flags & 0x40) ? -1 : 0;
+ lp->asBit = 1 << ((csr6 >> 1) & 0x07);
+ lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
+ lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
+ lp->useMII = FALSE;
+
+ de4x5_switch_mac_port(dev);
+ }
+
+ return dc21140m_autoconf(dev);
+}
+
+/*
+** This block describes non MII media for the DC21140[A] only.
+*/
+static int
+type0_infoblock(struct device *dev, u_char count, u_char *p)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
+
+ /* Recursively figure out the info blocks */
+ if (--count > lp->tcount) {
+ if (*(p+len) < 128) {
+ return dc_infoblock[COMPACT](dev, count, p+len);
+ } else {
+ return dc_infoblock[*(p+len+1)](dev, count, p+len);
+ }
+ }
+
+ if ((lp->media == INIT) && (lp->timeout < 0)) {
+ lp->ibn = 0;
+ lp->active = 0;
+ gep_wr(lp->cache.gepc, dev);
+ p+=2;
+ lp->infoblock_media = (*p++) & BLOCK0_MC;
+ lp->cache.gep = *p++;
+ csr6 = *p++;
+ flags = *p++;
+
+ lp->asBitValid = (flags & 0x80) ? 0 : -1;
+ lp->defMedium = (flags & 0x40) ? -1 : 0;
+ lp->asBit = 1 << ((csr6 >> 1) & 0x07);
+ lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
+ lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
+ lp->useMII = FALSE;
+
+ de4x5_switch_mac_port(dev);
+ }
+
+ return dc21140m_autoconf(dev);
+}
+
+/* These functions are under construction! */
+
+static int
+type1_infoblock(struct device *dev, u_char count, u_char *p)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_char len = (*p & BLOCK_LEN)+1;
+
+ /* Recursively figure out the info blocks */
+ if (--count > lp->tcount) {
+ if (*(p+len) < 128) {
+ return dc_infoblock[COMPACT](dev, count, p+len);
+ } else {
+ return dc_infoblock[*(p+len+1)](dev, count, p+len);
+ }
+ }
+
+ p += 2;
+ if (lp->state == INITIALISED) {
+ lp->ibn = 1;
+ lp->active = *p++;
+ lp->phy[lp->active].gep = (*p ? p : 0); p += (*p + 1);
+ lp->phy[lp->active].rst = (*p ? p : 0); p += (*p + 1);
+ lp->phy[lp->active].mc = TWIDDLE(p); p += 2;
+ lp->phy[lp->active].ana = TWIDDLE(p); p += 2;
+ lp->phy[lp->active].fdx = TWIDDLE(p); p += 2;
+ lp->phy[lp->active].ttm = TWIDDLE(p);
+ return 0;
+ } else if ((lp->media == INIT) && (lp->timeout < 0)) {
+ lp->ibn = 1;
+ lp->active = *p;
+ lp->infoblock_csr6 = OMR_MII_100;
+ lp->useMII = TRUE;
+ lp->infoblock_media = ANS;
+
+ de4x5_switch_mac_port(dev);
+ }
+
+ return dc21140m_autoconf(dev);
+}
+
+static int
+type2_infoblock(struct device *dev, u_char count, u_char *p)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_char len = (*p & BLOCK_LEN)+1;
+
+ /* Recursively figure out the info blocks */
+ if (--count > lp->tcount) {
+ if (*(p+len) < 128) {
+ return dc_infoblock[COMPACT](dev, count, p+len);
+ } else {
+ return dc_infoblock[*(p+len+1)](dev, count, p+len);
+ }
+ }
+
+ if ((lp->media == INIT) && (lp->timeout < 0)) {
+ lp->ibn = 2;
+ lp->active = 0;
+ p += 2;
+ lp->infoblock_media = (*p) & MEDIA_CODE;
+
+ if ((*p++) & EXT_FIELD) {
+ lp->cache.csr13 = TWIDDLE(p); p += 2;
+ lp->cache.csr14 = TWIDDLE(p); p += 2;
+ lp->cache.csr15 = TWIDDLE(p); p += 2;
+ } else {
+ lp->cache.csr13 = CSR13;
+ lp->cache.csr14 = CSR14;
+ lp->cache.csr15 = CSR15;
+ }
+ lp->cache.gepc = ((s32)(TWIDDLE(p)) << 16); p += 2;
+ lp->cache.gep = ((s32)(TWIDDLE(p)) << 16);
+ lp->infoblock_csr6 = OMR_SIA;
+ lp->useMII = FALSE;
+
+ de4x5_switch_mac_port(dev);
+ }
+
+ return dc2114x_autoconf(dev);
+}
+
+static int
+type3_infoblock(struct device *dev, u_char count, u_char *p)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_char len = (*p & BLOCK_LEN)+1;
+
+ /* Recursively figure out the info blocks */
+ if (--count > lp->tcount) {
+ if (*(p+len) < 128) {
+ return dc_infoblock[COMPACT](dev, count, p+len);
+ } else {
+ return dc_infoblock[*(p+len+1)](dev, count, p+len);
+ }
+ }
+
+ p += 2;
+ if (lp->state == INITIALISED) {
+ lp->ibn = 3;
+ lp->active = *p++;
+ lp->phy[lp->active].gep = (*p ? p : 0); p += (2 * (*p) + 1);
+ lp->phy[lp->active].rst = (*p ? p : 0); p += (2 * (*p) + 1);
+ lp->phy[lp->active].mc = TWIDDLE(p); p += 2;
+ lp->phy[lp->active].ana = TWIDDLE(p); p += 2;
+ lp->phy[lp->active].fdx = TWIDDLE(p); p += 2;
+ lp->phy[lp->active].ttm = TWIDDLE(p); p += 2;
+ lp->phy[lp->active].mci = *p;
+ return 0;
+ } else if ((lp->media == INIT) && (lp->timeout < 0)) {
+ lp->ibn = 3;
+ lp->active = *p;
+ lp->infoblock_csr6 = OMR_MII_100;
+ lp->useMII = TRUE;
+ lp->infoblock_media = ANS;
+
+ de4x5_switch_mac_port(dev);
+ }
+
+ return dc2114x_autoconf(dev);
+}
+
+static int
+type4_infoblock(struct device *dev, u_char count, u_char *p)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
+
+ /* Recursively figure out the info blocks */
+ if (--count > lp->tcount) {
+ if (*(p+len) < 128) {
+ return dc_infoblock[COMPACT](dev, count, p+len);
+ } else {
+ return dc_infoblock[*(p+len+1)](dev, count, p+len);
+ }
+ }
+
+ if ((lp->media == INIT) && (lp->timeout < 0)) {
+ lp->ibn = 4;
+ lp->active = 0;
+ p+=2;
+ lp->infoblock_media = (*p++) & MEDIA_CODE;
+ lp->cache.csr13 = CSR13; /* Hard coded defaults */
+ lp->cache.csr14 = CSR14;
+ lp->cache.csr15 = CSR15;
+ lp->cache.gepc = ((s32)(TWIDDLE(p)) << 16); p += 2;
+ lp->cache.gep = ((s32)(TWIDDLE(p)) << 16); p += 2;
+ csr6 = *p++;
+ flags = *p++;
+
+ lp->asBitValid = (flags & 0x80) ? 0 : -1;
+ lp->defMedium = (flags & 0x40) ? -1 : 0;
+ lp->asBit = 1 << ((csr6 >> 1) & 0x07);
+ lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
+ lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
+ lp->useMII = FALSE;
+
+ de4x5_switch_mac_port(dev);
+ }
+
+ return dc2114x_autoconf(dev);
+}
+
+/*
+** This block type provides information for resetting external devices
+** (chips) through the General Purpose Register.
+*/
+static int
+type5_infoblock(struct device *dev, u_char count, u_char *p)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_char len = (*p & BLOCK_LEN)+1;
+
+ /* Recursively figure out the info blocks */
+ if (--count > lp->tcount) {
+ if (*(p+len) < 128) {
+ return dc_infoblock[COMPACT](dev, count, p+len);
+ } else {
+ return dc_infoblock[*(p+len+1)](dev, count, p+len);
+ }
+ }
+
+ /* Must be initializing to run this code */
+ if ((lp->state == INITIALISED) || (lp->media == INIT)) {
+ p+=2;
+ lp->rst = p;
+ srom_exec(dev, lp->rst);
+ }
+
+ return DE4X5_AUTOSENSE_MS;
+}
+
+/*
+** MII Read/Write
+*/
+
+static int
+mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr)
+{
+ mii_wdata(MII_PREAMBLE, 2, ioaddr); /* Start of 34 bit preamble... */
+ mii_wdata(MII_PREAMBLE, 32, ioaddr); /* ...continued */
+ mii_wdata(MII_STRD, 4, ioaddr); /* SFD and Read operation */
+ mii_address(phyaddr, ioaddr); /* PHY address to be accessed */
+ mii_address(phyreg, ioaddr); /* PHY Register to read */
+ mii_ta(MII_STRD, ioaddr); /* Turn around time - 2 MDC */
+
+ return mii_rdata(ioaddr); /* Read data */
+}
+
+static void
+mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr)
+{
+ mii_wdata(MII_PREAMBLE, 2, ioaddr); /* Start of 34 bit preamble... */
+ mii_wdata(MII_PREAMBLE, 32, ioaddr); /* ...continued */
+ mii_wdata(MII_STWR, 4, ioaddr); /* SFD and Write operation */
+ mii_address(phyaddr, ioaddr); /* PHY address to be accessed */
+ mii_address(phyreg, ioaddr); /* PHY Register to write */
+ mii_ta(MII_STWR, ioaddr); /* Turn around time - 2 MDC */
+ data = mii_swap(data, 16); /* Swap data bit ordering */
+ mii_wdata(data, 16, ioaddr); /* Write data */
+
+ return;
+}
+
+static int
+mii_rdata(u_long ioaddr)
+{
+ int i;
+ s32 tmp = 0;
+
+ for (i=0; i<16; i++) {
+ tmp <<= 1;
+ tmp |= getfrom_mii(MII_MRD | MII_RD, ioaddr);
+ }
+
+ return tmp;
+}
+
+static void
+mii_wdata(int data, int len, u_long ioaddr)
+{
+ int i;
+
+ for (i=0; i<len; i++) {
+ sendto_mii(MII_MWR | MII_WR, data, ioaddr);
+ data >>= 1;
+ }
+
+ return;
+}
+
+static void
+mii_address(u_char addr, u_long ioaddr)
+{
+ int i;
+
+ addr = mii_swap(addr, 5);
+ for (i=0; i<5; i++) {
+ sendto_mii(MII_MWR | MII_WR, addr, ioaddr);
+ addr >>= 1;
+ }
+
+ return;
+}
+
+static void
+mii_ta(u_long rw, u_long ioaddr)
+{
+ if (rw == MII_STWR) {
+ sendto_mii(MII_MWR | MII_WR, 1, ioaddr);
+ sendto_mii(MII_MWR | MII_WR, 0, ioaddr);
+ } else {
+ getfrom_mii(MII_MRD | MII_RD, ioaddr); /* Tri-state MDIO */
+ }
+
+ return;
+}
+
+static int
+mii_swap(int data, int len)
+{
+ int i, tmp = 0;
+
+ for (i=0; i<len; i++) {
+ tmp <<= 1;
+ tmp |= (data & 1);
+ data >>= 1;
+ }
+
+ return tmp;
+}
+
+static void
+sendto_mii(u32 command, int data, u_long ioaddr)
+{
+ u32 j;
+
+ j = (data & 1) << 17;
+ outl(command | j, ioaddr);
+ udelay(1);
+ outl(command | MII_MDC | j, ioaddr);
+ udelay(1);
+
+ return;
+}
+
+static int
+getfrom_mii(u32 command, u_long ioaddr)
+{
+ outl(command, ioaddr);
+ udelay(1);
+ outl(command | MII_MDC, ioaddr);
+ udelay(1);
+
+ return ((inl(ioaddr) >> 19) & 1);
+}
+
+/*
+** Here's 3 ways to calculate the OUI from the ID registers.
+*/
+static int
+mii_get_oui(u_char phyaddr, u_long ioaddr)
+{
+/*
+ union {
+ u_short reg;
+ u_char breg[2];
+ } a;
+ int i, r2, r3, ret=0;*/
+ int r2, r3;
+
+ /* Read r2 and r3 */
+ r2 = mii_rd(MII_ID0, phyaddr, ioaddr);
+ r3 = mii_rd(MII_ID1, phyaddr, ioaddr);
+ /* SEEQ and Cypress way * /
+ / * Shuffle r2 and r3 * /
+ a.reg=0;
+ r3 = ((r3>>10)|(r2<<6))&0x0ff;
+ r2 = ((r2>>2)&0x3fff);
+
+ / * Bit reverse r3 * /
+ for (i=0;i<8;i++) {
+ ret<<=1;
+ ret |= (r3&1);
+ r3>>=1;
+ }
+
+ / * Bit reverse r2 * /
+ for (i=0;i<16;i++) {
+ a.reg<<=1;
+ a.reg |= (r2&1);
+ r2>>=1;
+ }
+
+ / * Swap r2 bytes * /
+ i=a.breg[0];
+ a.breg[0]=a.breg[1];
+ a.breg[1]=i;
+
+ return ((a.reg<<8)|ret); */ /* SEEQ and Cypress way */
+/* return ((r2<<6)|(u_int)(r3>>10)); */ /* NATIONAL and BROADCOM way */
+ return r2; /* (I did it) My way */
+}
+
+/*
+** The SROM spec forces us to search addresses [1-31 0]. Bummer.
+*/
+static int
+mii_get_phy(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int i, j, k, n, limit=sizeof(phy_info)/sizeof(struct phy_table);
+ int id;
+
+ lp->active = 0;
+ lp->useMII = TRUE;
+
+ /* Search the MII address space for possible PHY devices */
+ for (n=0, lp->mii_cnt=0, i=1; !((i==1) && (n==1)); i=(i+1)%DE4X5_MAX_MII) {
+ lp->phy[lp->active].addr = i;
+ if (i==0) n++; /* Count cycles */
+ while (de4x5_reset_phy(dev)<0) udelay(100);/* Wait for reset */
+ id = mii_get_oui(i, DE4X5_MII);
+ if ((id == 0) || (id == 65535)) continue; /* Valid ID? */
+ for (j=0; j<limit; j++) { /* Search PHY table */
+ if (id != phy_info[j].id) continue; /* ID match? */
+ for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++);
+ if (k < DE4X5_MAX_PHY) {
+ memcpy((char *)&lp->phy[k],
+ (char *)&phy_info[j], sizeof(struct phy_table));
+ lp->phy[k].addr = i;
+ lp->mii_cnt++;
+ lp->active++;
+ } else {
+ goto purgatory; /* Stop the search */
+ }
+ break;
+ }
+ if ((j == limit) && (i < DE4X5_MAX_MII)) {
+ for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++);
+ lp->phy[k].addr = i;
+ lp->phy[k].id = id;
+ lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */
+ lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */
+ lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */
+ lp->mii_cnt++;
+ lp->active++;
+ printk("%s: Using generic MII device control. If the board doesn't operate, \nplease mail the following dump to the author:\n", dev->name);
+ j = de4x5_debug;
+ de4x5_debug |= DEBUG_MII;
+ de4x5_dbg_mii(dev, k);
+ de4x5_debug = j;
+ printk("\n");
+ }
+ }
+ purgatory:
+ lp->active = 0;
+ if (lp->phy[0].id) { /* Reset the PHY devices */
+ for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++) { /*For each PHY*/
+ mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII);
+ while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST);
+
+ de4x5_dbg_mii(dev, k);
+ }
+ }
+ if (!lp->mii_cnt) lp->useMII = FALSE;
+
+ return lp->mii_cnt;
+}
+
+static char *
+build_setup_frame(struct device *dev, int mode)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ int i;
+ char *pa = lp->setup_frame;
+
+ /* Initialise the setup frame */
+ if (mode == ALL) {
+ memset(lp->setup_frame, 0, SETUP_FRAME_LEN);
+ }
+
+ if (lp->setup_f == HASH_PERF) {
+ for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) {
+ *(pa + i) = dev->dev_addr[i]; /* Host address */
+ if (i & 0x01) pa += 2;
+ }
+ *(lp->setup_frame + (HASH_TABLE_LEN >> 3) - 3) = 0x80;
+ } else {
+ for (i=0; i<ETH_ALEN; i++) { /* Host address */
+ *(pa + (i&1)) = dev->dev_addr[i];
+ if (i & 0x01) pa += 4;
+ }
+ for (i=0; i<ETH_ALEN; i++) { /* Broadcast address */
+ *(pa + (i&1)) = (char) 0xff;
+ if (i & 0x01) pa += 4;
+ }
+ }
+
+ return pa; /* Points to the next entry */
+}
+
+static void
+enable_ast(struct device *dev, u32 time_out)
+{
+ timeout(dev, (void *)&de4x5_ast, (u_long)dev, time_out);
+
+ return;
+}
+
+static void
+disable_ast(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+
+ del_timer(&lp->timer);
+
+ return;
+}
+
+static long
+de4x5_switch_mac_port(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ s32 omr;
+
+ STOP_DE4X5;
+
+ /* Assert the OMR_PS bit in CSR6 */
+ omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR |
+ OMR_FDX));
+ omr |= lp->infoblock_csr6;
+ if (omr & OMR_PS) omr |= OMR_HBD;
+ outl(omr, DE4X5_OMR);
+
+ /* Soft Reset */
+ RESET_DE4X5;
+
+ /* Restore the GEP - especially for COMPACT and Type 0 Infoblocks */
+ if (lp->chipset == DC21140) {
+ gep_wr(lp->cache.gepc, dev);
+ gep_wr(lp->cache.gep, dev);
+ } else if ((lp->chipset & ~0x0ff) == DC2114x) {
+ reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14, lp->cache.csr15);
+ }
+
+ /* Restore CSR6 */
+ outl(omr, DE4X5_OMR);
+
+ /* Reset CSR8 */
+ inl(DE4X5_MFC);
+
+ return omr;
+}
+
+static void
+gep_wr(s32 data, struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+
+ if (lp->chipset == DC21140) {
+ outl(data, DE4X5_GEP);
+ } else if ((lp->chipset & ~0x00ff) == DC2114x) {
+ outl((data<<16) | lp->cache.csr15, DE4X5_SIGR);
+ }
+
+ return;
+}
+
+static int
+gep_rd(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+
+ if (lp->chipset == DC21140) {
+ return inl(DE4X5_GEP);
+ } else if ((lp->chipset & ~0x00ff) == DC2114x) {
+ return (inl(DE4X5_SIGR) & 0x000fffff);
+ }
+
+ return 0;
+}
+
+static void
+timeout(struct device *dev, void (*fn)(u_long data), u_long data, u_long msec)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ int dt;
+
+ /* First, cancel any pending timer events */
+ del_timer(&lp->timer);
+
+ /* Convert msec to ticks */
+ dt = (msec * HZ) / 1000;
+ if (dt==0) dt=1;
+
+ /* Set up timer */
+ lp->timer.expires = jiffies + dt;
+ lp->timer.function = fn;
+ lp->timer.data = data;
+ add_timer(&lp->timer);
+
+ return;
+}
+
+static void
+yawn(struct device *dev, int state)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+
+ if ((lp->chipset == DC21040) || (lp->chipset == DC21140)) return;
+
+ if(lp->bus == EISA) {
+ switch(state) {
+ case WAKEUP:
+ outb(WAKEUP, PCI_CFPM);
+ de4x5_ms_delay(10);
+ break;
+
+ case SNOOZE:
+ outb(SNOOZE, PCI_CFPM);
+ break;
+
+ case SLEEP:
+ outl(0, DE4X5_SICR);
+ outb(SLEEP, PCI_CFPM);
+ break;
+ }
+ } else {
+ switch(state) {
+ case WAKEUP:
+ pcibios_write_config_byte(lp->bus_num, lp->device << 3,
+ PCI_CFDA_PSM, WAKEUP);
+ de4x5_ms_delay(10);
+ break;
+
+ case SNOOZE:
+ pcibios_write_config_byte(lp->bus_num, lp->device << 3,
+ PCI_CFDA_PSM, SNOOZE);
+ break;
+
+ case SLEEP:
+ outl(0, DE4X5_SICR);
+ pcibios_write_config_byte(lp->bus_num, lp->device << 3,
+ PCI_CFDA_PSM, SLEEP);
+ break;
+ }
+ }
+
+ return;
+}
+
+static void
+de4x5_parse_params(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ char *p, *q, t;
+
+ lp->params.fdx = 0;
+ lp->params.autosense = AUTO;
+
+ if (args == NULL) return;
+
+ if ((p = strstr(args, dev->name))) {
+ if (!(q = strstr(p+strlen(dev->name), "eth"))) q = p + strlen(p);
+ t = *q;
+ *q = '\0';
+
+ if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = 1;
+
+ if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) {
+ if (strstr(p, "TP")) {
+ lp->params.autosense = TP;
+ } else if (strstr(p, "TP_NW")) {
+ lp->params.autosense = TP_NW;
+ } else if (strstr(p, "BNC")) {
+ lp->params.autosense = BNC;
+ } else if (strstr(p, "AUI")) {
+ lp->params.autosense = AUI;
+ } else if (strstr(p, "BNC_AUI")) {
+ lp->params.autosense = BNC;
+ } else if (strstr(p, "10Mb")) {
+ lp->params.autosense = _10Mb;
+ } else if (strstr(p, "100Mb")) {
+ lp->params.autosense = _100Mb;
+ } else if (strstr(p, "AUTO")) {
+ lp->params.autosense = AUTO;
+ }
+ }
+ *q = t;
+ }
+
+ return;
+}
+
+static void
+de4x5_dbg_open(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ int i;
+
+ if (de4x5_debug & DEBUG_OPEN) {
+ printk("%s: de4x5 opening with irq %d\n",dev->name,dev->irq);
+ printk("\tphysical address: ");
+ for (i=0;i<6;i++) {
+ printk("%2.2x:",(short)dev->dev_addr[i]);
+ }
+ printk("\n");
+ printk("Descriptor head addresses:\n");
+ printk("\t0x%8.8lx 0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring);
+ printk("Descriptor addresses:\nRX: ");
+ for (i=0;i<lp->rxRingSize-1;i++){
+ if (i < 3) {
+ printk("0x%8.8lx ",(u_long)&lp->rx_ring[i].status);
+ }
+ }
+ printk("...0x%8.8lx\n",(u_long)&lp->rx_ring[i].status);
+ printk("TX: ");
+ for (i=0;i<lp->txRingSize-1;i++){
+ if (i < 3) {
+ printk("0x%8.8lx ", (u_long)&lp->tx_ring[i].status);
+ }
+ }
+ printk("...0x%8.8lx\n", (u_long)&lp->tx_ring[i].status);
+ printk("Descriptor buffers:\nRX: ");
+ for (i=0;i<lp->rxRingSize-1;i++){
+ if (i < 3) {
+ printk("0x%8.8x ",le32_to_cpu(lp->rx_ring[i].buf));
+ }
+ }
+ printk("...0x%8.8x\n",le32_to_cpu(lp->rx_ring[i].buf));
+ printk("TX: ");
+ for (i=0;i<lp->txRingSize-1;i++){
+ if (i < 3) {
+ printk("0x%8.8x ", le32_to_cpu(lp->tx_ring[i].buf));
+ }
+ }
+ printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf));
+ printk("Ring size: \nRX: %d\nTX: %d\n",
+ (short)lp->rxRingSize,
+ (short)lp->txRingSize);
+ }
+
+ return;
+}
+
+static void
+de4x5_dbg_mii(struct device *dev, int k)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+
+ if (de4x5_debug & DEBUG_MII) {
+ printk("\nMII device address: %d\n", lp->phy[k].addr);
+ printk("MII CR: %x\n",mii_rd(MII_CR,lp->phy[k].addr,DE4X5_MII));
+ printk("MII SR: %x\n",mii_rd(MII_SR,lp->phy[k].addr,DE4X5_MII));
+ printk("MII ID0: %x\n",mii_rd(MII_ID0,lp->phy[k].addr,DE4X5_MII));
+ printk("MII ID1: %x\n",mii_rd(MII_ID1,lp->phy[k].addr,DE4X5_MII));
+ if (lp->phy[k].id != BROADCOM_T4) {
+ printk("MII ANA: %x\n",mii_rd(0x04,lp->phy[k].addr,DE4X5_MII));
+ printk("MII ANC: %x\n",mii_rd(0x05,lp->phy[k].addr,DE4X5_MII));
+ }
+ printk("MII 16: %x\n",mii_rd(0x10,lp->phy[k].addr,DE4X5_MII));
+ if (lp->phy[k].id != BROADCOM_T4) {
+ printk("MII 17: %x\n",mii_rd(0x11,lp->phy[k].addr,DE4X5_MII));
+ printk("MII 18: %x\n",mii_rd(0x12,lp->phy[k].addr,DE4X5_MII));
+ } else {
+ printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII));
+ }
+ }
+
+ return;
+}
+
+static void
+de4x5_dbg_media(struct device *dev)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+
+ if (lp->media != lp->c_media) {
+ if (de4x5_debug & DEBUG_MEDIA) {
+ printk("%s: media is %s%s\n", dev->name,
+ (lp->media == NC ? "unconnected, link down or incompatible connection" :
+ (lp->media == TP ? "TP" :
+ (lp->media == ANS ? "TP/Nway" :
+ (lp->media == BNC ? "BNC" :
+ (lp->media == AUI ? "AUI" :
+ (lp->media == BNC_AUI ? "BNC/AUI" :
+ (lp->media == EXT_SIA ? "EXT SIA" :
+ (lp->media == _100Mb ? "100Mb/s" :
+ (lp->media == _10Mb ? "10Mb/s" :
+ "???"
+ ))))))))), (lp->fdx?" full duplex.":"."));
+ }
+ lp->c_media = lp->media;
+ }
+
+ return;
+}
+
+static void
+de4x5_dbg_srom(struct de4x5_srom *p)
+{
+ int i;
+
+ if (de4x5_debug & DEBUG_SROM) {
+ printk("Sub-system Vendor ID: %04x\n", *((u_short *)p->sub_vendor_id));
+ printk("Sub-system ID: %04x\n", *((u_short *)p->sub_system_id));
+ printk("ID Block CRC: %02x\n", (u_char)(p->id_block_crc));
+ printk("SROM version: %02x\n", (u_char)(p->version));
+ printk("# controllers: %02x\n", (u_char)(p->num_controllers));
+
+ printk("Hardware Address: ");
+ for (i=0;i<ETH_ALEN-1;i++) {
+ printk("%02x:", (u_char)*(p->ieee_addr+i));
+ }
+ printk("%02x\n", (u_char)*(p->ieee_addr+i));
+ printk("CRC checksum: %04x\n", (u_short)(p->chksum));
+ for (i=0; i<64; i++) {
+ printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i));
+ }
+ }
+
+ return;
+}
+
+static void
+de4x5_dbg_rx(struct sk_buff *skb, int len)
+{
+ int i, j;
+
+ if (de4x5_debug & DEBUG_RX) {
+ printk("R: %02x:%02x:%02x:%02x:%02x:%02x <- %02x:%02x:%02x:%02x:%02x:%02x len/SAP:%02x%02x [%d]\n",
+ (u_char)skb->data[0],
+ (u_char)skb->data[1],
+ (u_char)skb->data[2],
+ (u_char)skb->data[3],
+ (u_char)skb->data[4],
+ (u_char)skb->data[5],
+ (u_char)skb->data[6],
+ (u_char)skb->data[7],
+ (u_char)skb->data[8],
+ (u_char)skb->data[9],
+ (u_char)skb->data[10],
+ (u_char)skb->data[11],
+ (u_char)skb->data[12],
+ (u_char)skb->data[13],
+ len);
+ if (de4x5_debug & DEBUG_RX) {
+ for (j=0; len>0;j+=16, len-=16) {
+ printk(" %03x: ",j);
+ for (i=0; i<16 && i<len; i++) {
+ printk("%02x ",(u_char)skb->data[i+j]);
+ }
+ printk("\n");
+ }
+ }
+ }
+
+ return;
+}
+
+/*
+** Perform IOCTL call functions here. Some are privileged operations and the
+** effective uid is checked in those cases. In the normal course of events
+** this function is only used for my testing.
+*/
+static int
+de4x5_ioctl(struct device *dev, struct ifreq *rq, int cmd)
+{
+ struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
+ struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_data;
+ u_long iobase = dev->base_addr;
+ int i, j, status = 0;
+ s32 omr;
+ union {
+ u8 addr[144];
+ u16 sval[72];
+ u32 lval[36];
+ } tmp;
+
+ switch(ioc->cmd) {
+ case DE4X5_GET_HWADDR: /* Get the hardware address */
+ ioc->len = ETH_ALEN;
+ status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len);
+ if (status)
+ break;
+ for (i=0; i<ETH_ALEN; i++) {
+ tmp.addr[i] = dev->dev_addr[i];
+ }
+ copy_to_user(ioc->data, tmp.addr, ioc->len);
+
+ break;
+ case DE4X5_SET_HWADDR: /* Set the hardware address */
+ status = verify_area(VERIFY_READ, (void *)ioc->data, ETH_ALEN);
+ if (status)
+ break;
+ status = -EPERM;
+ if (!suser())
+ break;
+ status = 0;
+ copy_from_user(tmp.addr, ioc->data, ETH_ALEN);
+ for (i=0; i<ETH_ALEN; i++) {
+ dev->dev_addr[i] = tmp.addr[i];
+ }
+ build_setup_frame(dev, PHYS_ADDR_ONLY);
+ /* Set up the descriptor and give ownership to the card */
+ while (test_and_set_bit(0, (void *)&dev->tbusy) != 0);
+ load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
+ SETUP_FRAME_LEN, NULL);
+ lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
+ outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
+ dev->tbusy = 0; /* Unlock the TX ring */
+
+ break;
+ case DE4X5_SET_PROM: /* Set Promiscuous Mode */
+ if (suser()) {
+ omr = inl(DE4X5_OMR);
+ omr |= OMR_PR;
+ outl(omr, DE4X5_OMR);
+ dev->flags |= IFF_PROMISC;
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DE4X5_CLR_PROM: /* Clear Promiscuous Mode */
+ if (suser()) {
+ omr = inl(DE4X5_OMR);
+ omr &= ~OMR_PR;
+ outb(omr, DE4X5_OMR);
+ dev->flags &= ~IFF_PROMISC;
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DE4X5_SAY_BOO: /* Say "Boo!" to the kernel log file */
+ printk("%s: Boo!\n", dev->name);
+
+ break;
+ case DE4X5_MCA_EN: /* Enable pass all multicast addressing */
+ if (suser()) {
+ omr = inl(DE4X5_OMR);
+ omr |= OMR_PM;
+ outl(omr, DE4X5_OMR);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DE4X5_GET_STATS: /* Get the driver statistics */
+ ioc->len = sizeof(lp->pktStats);
+ status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len);
+ if (status)
+ break;
+
+ cli();
+ copy_to_user(ioc->data, &lp->pktStats, ioc->len);
+ sti();
+
+ break;
+ case DE4X5_CLR_STATS: /* Zero out the driver statistics */
+ if (suser()) {
+ cli();
+ memset(&lp->pktStats, 0, sizeof(lp->pktStats));
+ sti();
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DE4X5_GET_OMR: /* Get the OMR Register contents */
+ tmp.addr[0] = inl(DE4X5_OMR);
+ if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, 1))) {
+ copy_to_user(ioc->data, tmp.addr, 1);
+ }
+
+ break;
+ case DE4X5_SET_OMR: /* Set the OMR Register contents */
+ if (suser()) {
+ if (!(status = verify_area(VERIFY_READ, (void *)ioc->data, 1))) {
+ copy_from_user(tmp.addr, ioc->data, 1);
+ outl(tmp.addr[0], DE4X5_OMR);
+ }
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DE4X5_GET_REG: /* Get the DE4X5 Registers */
+ j = 0;
+ tmp.lval[0] = inl(DE4X5_STS); j+=4;
+ tmp.lval[1] = inl(DE4X5_BMR); j+=4;
+ tmp.lval[2] = inl(DE4X5_IMR); j+=4;
+ tmp.lval[3] = inl(DE4X5_OMR); j+=4;
+ tmp.lval[4] = inl(DE4X5_SISR); j+=4;
+ tmp.lval[5] = inl(DE4X5_SICR); j+=4;
+ tmp.lval[6] = inl(DE4X5_STRR); j+=4;
+ tmp.lval[7] = inl(DE4X5_SIGR); j+=4;
+ ioc->len = j;
+ if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len))) {
+ copy_to_user(ioc->data, tmp.addr, ioc->len);
+ }
+ break;
+
+#define DE4X5_DUMP 0x0f /* Dump the DE4X5 Status */
+/*
+ case DE4X5_DUMP:
+ j = 0;
+ tmp.addr[j++] = dev->irq;
+ for (i=0; i<ETH_ALEN; i++) {
+ tmp.addr[j++] = dev->dev_addr[i];
+ }
+ tmp.addr[j++] = lp->rxRingSize;
+ tmp.lval[j>>2] = (long)lp->rx_ring; j+=4;
+ tmp.lval[j>>2] = (long)lp->tx_ring; j+=4;
+
+ for (i=0;i<lp->rxRingSize-1;i++){
+ if (i < 3) {
+ tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
+ }
+ }
+ tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
+ for (i=0;i<lp->txRingSize-1;i++){
+ if (i < 3) {
+ tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
+ }
+ }
+ tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
+
+ for (i=0;i<lp->rxRingSize-1;i++){
+ if (i < 3) {
+ tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4;
+ }
+ }
+ tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4;
+ for (i=0;i<lp->txRingSize-1;i++){
+ if (i < 3) {
+ tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4;
+ }
+ }
+ tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4;
+
+ for (i=0;i<lp->rxRingSize;i++){
+ tmp.lval[j>>2] = le32_to_cpu(lp->rx_ring[i].status); j+=4;
+ }
+ for (i=0;i<lp->txRingSize;i++){
+ tmp.lval[j>>2] = le32_to_cpu(lp->tx_ring[i].status); j+=4;
+ }
+
+ tmp.lval[j>>2] = inl(DE4X5_BMR); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_TPD); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_RPD); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_RRBA); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_TRBA); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_STS); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_OMR); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_IMR); j+=4;
+ tmp.lval[j>>2] = lp->chipset; j+=4;
+ if (lp->chipset == DC21140) {
+ tmp.lval[j>>2] = gep_rd(dev); j+=4;
+ } else {
+ tmp.lval[j>>2] = inl(DE4X5_SISR); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_SICR); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_STRR); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4;
+ }
+ tmp.lval[j>>2] = lp->phy[lp->active].id; j+=4;
+ if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
+ tmp.lval[j>>2] = lp->active; j+=4;
+ tmp.lval[j>>2]=mii_rd(MII_CR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ tmp.lval[j>>2]=mii_rd(MII_SR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ tmp.lval[j>>2]=mii_rd(MII_ID0,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ tmp.lval[j>>2]=mii_rd(MII_ID1,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ if (lp->phy[lp->active].id != BROADCOM_T4) {
+ tmp.lval[j>>2]=mii_rd(MII_ANA,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ tmp.lval[j>>2]=mii_rd(MII_ANLPA,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ }
+ tmp.lval[j>>2]=mii_rd(0x10,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ if (lp->phy[lp->active].id != BROADCOM_T4) {
+ tmp.lval[j>>2]=mii_rd(0x11,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ tmp.lval[j>>2]=mii_rd(0x12,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ } else {
+ tmp.lval[j>>2]=mii_rd(0x14,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
+ }
+ }
+
+ tmp.addr[j++] = lp->txRingSize;
+ tmp.addr[j++] = dev->tbusy;
+
+ ioc->len = j;
+ if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len))) {
+ copy_to_user(ioc->data, tmp.addr, ioc->len);
+ }
+
+ break;
+*/
+ default:
+ status = -EOPNOTSUPP;
+ }
+
+ return status;
+}
+
+#ifdef MODULE
+/*
+** Note now that module autoprobing is allowed under EISA and PCI. The
+** IRQ lines will not be auto-detected; instead I'll rely on the BIOSes
+** to "do the right thing".
+*/
+#define LP(a) ((struct de4x5_private *)(a))
+static struct device *mdev = NULL;
+static int io=0x0;/* EDIT THIS LINE FOR YOUR CONFIGURATION IF NEEDED */
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,0)
+MODULE_PARM(io, "i");
+#endif /* LINUX_VERSION_CODE */
+
+int
+init_module(void)
+{
+ int i, num, status = -EIO;
+ struct device *p;
+
+ num = count_adapters();
+
+ for (i=0; i<num; i++) {
+ if ((p = insert_device(NULL, io, de4x5_probe)) == NULL)
+ return -ENOMEM;
+
+ if (!mdev) mdev = p;
+
+ if (register_netdev(p) != 0) {
+ kfree(p);
+ } else {
+ status = 0; /* At least one adapter will work */
+ lastModule = p;
+ }
+ }
+
+ return status;
+}
+
+void
+cleanup_module(void)
+{
+ while (mdev != NULL) {
+ mdev = unlink_modules(mdev);
+ }
+
+ return;
+}
+
+static struct device *
+unlink_modules(struct device *p)
+{
+ struct device *next = NULL;
+
+ if (p->priv) { /* Private areas allocated? */
+ struct de4x5_private *lp = (struct de4x5_private *)p->priv;
+
+ next = lp->next_module;
+ if (lp->cache.buf) { /* MAC buffers allocated? */
+ kfree(lp->cache.buf); /* Free the MAC buffers */
+ }
+ kfree(lp->cache.priv); /* Free the private area */
+ release_region(p->base_addr, (lp->bus == PCI ?
+ DE4X5_PCI_TOTAL_SIZE :
+ DE4X5_EISA_TOTAL_SIZE));
+ }
+ unregister_netdev(p);
+ kfree(p); /* Free the device structure */
+
+ return next;
+}
+
+static int
+count_adapters(void)
+{
+ int i, j=0;
+ u_char pb, dev_fn, dev_num;
+ u_short dev_id, vendor;
+ u_int class = DE4X5_CLASS_CODE;
+ u_int device;
+
+#if !defined(__sparc_v9__) && !defined(__powerpc__) && !defined(__alpha__)
+ char name[DE4X5_STRLEN];
+ u_long iobase = 0x1000;
+
+ for (i=1; i<MAX_EISA_SLOTS; i++, iobase+=EISA_SLOT_INC) {
+ if (EISA_signature(name, EISA_ID)) j++;
+ }
+#endif
+ if (!pcibios_present()) return j;
+
+ for (i=0;
+ (pcibios_find_class(class, i, &pb, &dev_fn)!= PCIBIOS_DEVICE_NOT_FOUND);
+ i++) {
+ dev_num = PCI_SLOT(dev_fn);
+ device = 0;
+ pcibios_read_config_word(pb, PCI_DEVICE, PCI_VENDOR_ID, &vendor);
+ pcibios_read_config_word(pb, PCI_DEVICE, PCI_DEVICE_ID, &dev_id);
+ device = dev_id;
+ device <<= 8;
+ if (is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x) j++;
+ }
+
+ return j;
+}
+
+/*
+** If at end of eth device list and can't use current entry, malloc
+** one up. If memory could not be allocated, print an error message.
+*/
+__initfunc(static struct device *
+insert_device(struct device *dev, u_long iobase, int (*init)(struct device *)))
+{
+ struct device *new;
+
+ new = (struct device *)kmalloc(sizeof(struct device)+8, GFP_KERNEL);
+ if (new == NULL) {
+ printk("de4x5.c: Device not initialised, insufficient memory\n");
+ return NULL;
+ } else {
+ memset((char *)new, 0, sizeof(struct device)+8);
+ new->name = (char *)(new + 1);
+ new->base_addr = iobase; /* assign the io address */
+ new->init = init; /* initialisation routine */
+ }
+
+ return new;
+}
+
+#endif /* MODULE */
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/linux/include -Wall -Wstrict-prototypes -fomit-frame-pointer -fno-strength-reduce -malign-loops=2 -malign-jumps=2 -malign-functions=2 -O2 -m486 -c de4x5.c"
+ *
+ * compile-command: "gcc -D__KERNEL__ -DMODULE -I/linux/include -Wall -Wstrict-prototypes -fomit-frame-pointer -fno-strength-reduce -malign-loops=2 -malign-jumps=2 -malign-functions=2 -O2 -m486 -c de4x5.c"
+ * End:
+ */
diff --git a/linux/src/drivers/net/de4x5.h b/linux/src/drivers/net/de4x5.h
new file mode 100644
index 0000000..c0c58cc
--- /dev/null
+++ b/linux/src/drivers/net/de4x5.h
@@ -0,0 +1,1028 @@
+/*
+ Copyright 1994 Digital Equipment Corporation.
+
+ This software may be used and distributed according to the terms of the
+ GNU Public License, incorporated herein by reference.
+
+ The author may be reached as davies@wanton.lkg.dec.com or Digital
+ Equipment Corporation, 550 King Street, Littleton MA 01460.
+
+ =========================================================================
+*/
+
+/*
+** DC21040 CSR<1..15> Register Address Map
+*/
+#define DE4X5_BMR iobase+(0x000 << lp->bus) /* Bus Mode Register */
+#define DE4X5_TPD iobase+(0x008 << lp->bus) /* Transmit Poll Demand Reg */
+#define DE4X5_RPD iobase+(0x010 << lp->bus) /* Receive Poll Demand Reg */
+#define DE4X5_RRBA iobase+(0x018 << lp->bus) /* RX Ring Base Address Reg */
+#define DE4X5_TRBA iobase+(0x020 << lp->bus) /* TX Ring Base Address Reg */
+#define DE4X5_STS iobase+(0x028 << lp->bus) /* Status Register */
+#define DE4X5_OMR iobase+(0x030 << lp->bus) /* Operation Mode Register */
+#define DE4X5_IMR iobase+(0x038 << lp->bus) /* Interrupt Mask Register */
+#define DE4X5_MFC iobase+(0x040 << lp->bus) /* Missed Frame Counter */
+#define DE4X5_APROM iobase+(0x048 << lp->bus) /* Ethernet Address PROM */
+#define DE4X5_BROM iobase+(0x048 << lp->bus) /* Boot ROM Register */
+#define DE4X5_SROM iobase+(0x048 << lp->bus) /* Serial ROM Register */
+#define DE4X5_MII iobase+(0x048 << lp->bus) /* MII Interface Register */
+#define DE4X5_DDR iobase+(0x050 << lp->bus) /* Data Diagnostic Register */
+#define DE4X5_FDR iobase+(0x058 << lp->bus) /* Full Duplex Register */
+#define DE4X5_GPT iobase+(0x058 << lp->bus) /* General Purpose Timer Reg.*/
+#define DE4X5_GEP iobase+(0x060 << lp->bus) /* General Purpose Register */
+#define DE4X5_SISR iobase+(0x060 << lp->bus) /* SIA Status Register */
+#define DE4X5_SICR iobase+(0x068 << lp->bus) /* SIA Connectivity Register */
+#define DE4X5_STRR iobase+(0x070 << lp->bus) /* SIA TX/RX Register */
+#define DE4X5_SIGR iobase+(0x078 << lp->bus) /* SIA General Register */
+
+/*
+** EISA Register Address Map
+*/
+#define EISA_ID iobase+0x0c80 /* EISA ID Registers */
+#define EISA_ID0 iobase+0x0c80 /* EISA ID Register 0 */
+#define EISA_ID1 iobase+0x0c81 /* EISA ID Register 1 */
+#define EISA_ID2 iobase+0x0c82 /* EISA ID Register 2 */
+#define EISA_ID3 iobase+0x0c83 /* EISA ID Register 3 */
+#define EISA_CR iobase+0x0c84 /* EISA Control Register */
+#define EISA_REG0 iobase+0x0c88 /* EISA Configuration Register 0 */
+#define EISA_REG1 iobase+0x0c89 /* EISA Configuration Register 1 */
+#define EISA_REG2 iobase+0x0c8a /* EISA Configuration Register 2 */
+#define EISA_REG3 iobase+0x0c8f /* EISA Configuration Register 3 */
+#define EISA_APROM iobase+0x0c90 /* Ethernet Address PROM */
+
+/*
+** PCI/EISA Configuration Registers Address Map
+*/
+#define PCI_CFID iobase+0x0008 /* PCI Configuration ID Register */
+#define PCI_CFCS iobase+0x000c /* PCI Command/Status Register */
+#define PCI_CFRV iobase+0x0018 /* PCI Revision Register */
+#define PCI_CFLT iobase+0x001c /* PCI Latency Timer Register */
+#define PCI_CBIO iobase+0x0028 /* PCI Base I/O Register */
+#define PCI_CBMA iobase+0x002c /* PCI Base Memory Address Register */
+#define PCI_CBER iobase+0x0030 /* PCI Expansion ROM Base Address Reg. */
+#define PCI_CFIT iobase+0x003c /* PCI Configuration Interrupt Register */
+#define PCI_CFDA iobase+0x0040 /* PCI Driver Area Register */
+#define PCI_CFDD iobase+0x0041 /* PCI Driver Dependent Area Register */
+#define PCI_CFPM iobase+0x0043 /* PCI Power Management Area Register */
+
+/*
+** EISA Configuration Register 0 bit definitions
+*/
+#define ER0_BSW 0x80 /* EISA Bus Slave Width, 1: 32 bits */
+#define ER0_BMW 0x40 /* EISA Bus Master Width, 1: 32 bits */
+#define ER0_EPT 0x20 /* EISA PREEMPT Time, 0: 23 BCLKs */
+#define ER0_ISTS 0x10 /* Interrupt Status (X) */
+#define ER0_LI 0x08 /* Latch Interrupts */
+#define ER0_INTL 0x06 /* INTerrupt Level */
+#define ER0_INTT 0x01 /* INTerrupt Type, 0: Level, 1: Edge */
+
+/*
+** EISA Configuration Register 1 bit definitions
+*/
+#define ER1_IAM 0xe0 /* ISA Address Mode */
+#define ER1_IAE 0x10 /* ISA Addressing Enable */
+#define ER1_UPIN 0x0f /* User Pins */
+
+/*
+** EISA Configuration Register 2 bit definitions
+*/
+#define ER2_BRS 0xc0 /* Boot ROM Size */
+#define ER2_BRA 0x3c /* Boot ROM Address <16:13> */
+
+/*
+** EISA Configuration Register 3 bit definitions
+*/
+#define ER3_BWE 0x40 /* Burst Write Enable */
+#define ER3_BRE 0x04 /* Burst Read Enable */
+#define ER3_LSR 0x02 /* Local Software Reset */
+
+/*
+** PCI Configuration ID Register (PCI_CFID). The Device IDs are left
+** shifted 8 bits to allow detection of DC21142 and DC21143 variants with
+** the configuration revision register step number.
+*/
+#define CFID_DID 0xff00 /* Device ID */
+#define CFID_VID 0x00ff /* Vendor ID */
+#define DC21040_DID 0x0200 /* Unique Device ID # */
+#define DC21040_VID 0x1011 /* DC21040 Manufacturer */
+#define DC21041_DID 0x1400 /* Unique Device ID # */
+#define DC21041_VID 0x1011 /* DC21041 Manufacturer */
+#define DC21140_DID 0x0900 /* Unique Device ID # */
+#define DC21140_VID 0x1011 /* DC21140 Manufacturer */
+#define DC2114x_DID 0x1900 /* Unique Device ID # */
+#define DC2114x_VID 0x1011 /* DC2114[23] Manufacturer */
+
+/*
+** Chipset defines
+*/
+#define DC21040 DC21040_DID
+#define DC21041 DC21041_DID
+#define DC21140 DC21140_DID
+#define DC2114x DC2114x_DID
+#define DC21142 (DC2114x_DID | 0x0010)
+#define DC21143 (DC2114x_DID | 0x0030)
+
+#define is_DC21040 ((vendor == DC21040_VID) && (device == DC21040_DID))
+#define is_DC21041 ((vendor == DC21041_VID) && (device == DC21041_DID))
+#define is_DC21140 ((vendor == DC21140_VID) && (device == DC21140_DID))
+#define is_DC2114x ((vendor == DC2114x_VID) && (device == DC2114x_DID))
+#define is_DC21142 ((vendor == DC2114x_VID) && (device == DC21142))
+#define is_DC21143 ((vendor == DC2114x_VID) && (device == DC21143))
+
+/*
+** PCI Configuration Command/Status Register (PCI_CFCS)
+*/
+#define CFCS_DPE 0x80000000 /* Detected Parity Error (S) */
+#define CFCS_SSE 0x40000000 /* Signal System Error (S) */
+#define CFCS_RMA 0x20000000 /* Receive Master Abort (S) */
+#define CFCS_RTA 0x10000000 /* Receive Target Abort (S) */
+#define CFCS_DST 0x06000000 /* DEVSEL Timing (S) */
+#define CFCS_DPR 0x01000000 /* Data Parity Report (S) */
+#define CFCS_FBB 0x00800000 /* Fast Back-To-Back (S) */
+#define CFCS_SEE 0x00000100 /* System Error Enable (C) */
+#define CFCS_PER 0x00000040 /* Parity Error Response (C) */
+#define CFCS_MO 0x00000004 /* Master Operation (C) */
+#define CFCS_MSA 0x00000002 /* Memory Space Access (C) */
+#define CFCS_IOSA 0x00000001 /* I/O Space Access (C) */
+
+/*
+** PCI Configuration Revision Register (PCI_CFRV)
+*/
+#define CFRV_BC 0xff000000 /* Base Class */
+#define CFRV_SC 0x00ff0000 /* Subclass */
+#define CFRV_RN 0x000000f0 /* Revision Number */
+#define CFRV_SN 0x0000000f /* Step Number */
+#define BASE_CLASS 0x02000000 /* Indicates Network Controller */
+#define SUB_CLASS 0x00000000 /* Indicates Ethernet Controller */
+#define STEP_NUMBER 0x00000020 /* Increments for future chips */
+#define REV_NUMBER 0x00000003 /* 0x00, 0x01, 0x02, 0x03: Rev in Step */
+#define CFRV_MASK 0xffff0000 /* Register mask */
+
+/*
+** PCI Configuration Latency Timer Register (PCI_CFLT)
+*/
+#define CFLT_BC 0x0000ff00 /* Latency Timer bits */
+
+/*
+** PCI Configuration Base I/O Address Register (PCI_CBIO)
+*/
+#define CBIO_MASK -128 /* Base I/O Address Mask */
+#define CBIO_IOSI 0x00000001 /* I/O Space Indicator (RO, value is 1) */
+
+/*
+** PCI Configuration Card Information Structure Register (PCI_CCIS)
+*/
+#define CCIS_ROMI 0xf0000000 /* ROM Image */
+#define CCIS_ASO 0x0ffffff8 /* Address Space Offset */
+#define CCIS_ASI 0x00000007 /* Address Space Indicator */
+
+/*
+** PCI Configuration Subsystem ID Register (PCI_SSID)
+*/
+#define SSID_SSID 0xffff0000 /* Subsystem ID */
+#define SSID_SVID 0x0000ffff /* Subsystem Vendor ID */
+
+/*
+** PCI Configuration Expansion ROM Base Address Register (PCI_CBER)
+*/
+#define CBER_MASK 0xfffffc00 /* Expansion ROM Base Address Mask */
+#define CBER_ROME 0x00000001 /* ROM Enable */
+
+/*
+** PCI Configuration Interrupt Register (PCI_CFIT)
+*/
+#define CFIT_MXLT 0xff000000 /* MAX_LAT Value (0.25us periods) */
+#define CFIT_MNGT 0x00ff0000 /* MIN_GNT Value (0.25us periods) */
+#define CFIT_IRQP 0x0000ff00 /* Interrupt Pin */
+#define CFIT_IRQL 0x000000ff /* Interrupt Line */
+
+/*
+** PCI Configuration Power Management Area Register (PCI_CFPM)
+*/
+#define SLEEP 0x80 /* Power Saving Sleep Mode */
+#define SNOOZE 0x40 /* Power Saving Snooze Mode */
+#define WAKEUP 0x00 /* Power Saving Wakeup */
+
+#define PCI_CFDA_DSU 0x41 /* 8 bit Configuration Space Address */
+#define PCI_CFDA_PSM 0x43 /* 8 bit Configuration Space Address */
+
+/*
+** DC21040 Bus Mode Register (DE4X5_BMR)
+*/
+#define BMR_RML 0x00200000 /* [Memory] Read Multiple */
+#define BMR_DBO 0x00100000 /* Descriptor Byte Ordering (Endian) */
+#define BMR_TAP 0x000e0000 /* Transmit Automatic Polling */
+#define BMR_DAS 0x00010000 /* Diagnostic Address Space */
+#define BMR_CAL 0x0000c000 /* Cache Alignment */
+#define BMR_PBL 0x00003f00 /* Programmable Burst Length */
+#define BMR_BLE 0x00000080 /* Big/Little Endian */
+#define BMR_DSL 0x0000007c /* Descriptor Skip Length */
+#define BMR_BAR 0x00000002 /* Bus ARbitration */
+#define BMR_SWR 0x00000001 /* Software Reset */
+
+ /* Timings here are for 10BASE-T/AUI only*/
+#define TAP_NOPOLL 0x00000000 /* No automatic polling */
+#define TAP_200US 0x00020000 /* TX automatic polling every 200us */
+#define TAP_800US 0x00040000 /* TX automatic polling every 800us */
+#define TAP_1_6MS 0x00060000 /* TX automatic polling every 1.6ms */
+#define TAP_12_8US 0x00080000 /* TX automatic polling every 12.8us */
+#define TAP_25_6US 0x000a0000 /* TX automatic polling every 25.6us */
+#define TAP_51_2US 0x000c0000 /* TX automatic polling every 51.2us */
+#define TAP_102_4US 0x000e0000 /* TX automatic polling every 102.4us */
+
+#define CAL_NOUSE 0x00000000 /* Not used */
+#define CAL_8LONG 0x00004000 /* 8-longword alignment */
+#define CAL_16LONG 0x00008000 /* 16-longword alignment */
+#define CAL_32LONG 0x0000c000 /* 32-longword alignment */
+
+#define PBL_0 0x00000000 /* DMA burst length = amount in RX FIFO */
+#define PBL_1 0x00000100 /* 1 longword DMA burst length */
+#define PBL_2 0x00000200 /* 2 longwords DMA burst length */
+#define PBL_4 0x00000400 /* 4 longwords DMA burst length */
+#define PBL_8 0x00000800 /* 8 longwords DMA burst length */
+#define PBL_16 0x00001000 /* 16 longwords DMA burst length */
+#define PBL_32 0x00002000 /* 32 longwords DMA burst length */
+
+#define DSL_0 0x00000000 /* 0 longword / descriptor */
+#define DSL_1 0x00000004 /* 1 longword / descriptor */
+#define DSL_2 0x00000008 /* 2 longwords / descriptor */
+#define DSL_4 0x00000010 /* 4 longwords / descriptor */
+#define DSL_8 0x00000020 /* 8 longwords / descriptor */
+#define DSL_16 0x00000040 /* 16 longwords / descriptor */
+#define DSL_32 0x00000080 /* 32 longwords / descriptor */
+
+/*
+** DC21040 Transmit Poll Demand Register (DE4X5_TPD)
+*/
+#define TPD 0x00000001 /* Transmit Poll Demand */
+
+/*
+** DC21040 Receive Poll Demand Register (DE4X5_RPD)
+*/
+#define RPD 0x00000001 /* Receive Poll Demand */
+
+/*
+** DC21040 Receive Ring Base Address Register (DE4X5_RRBA)
+*/
+#define RRBA 0xfffffffc /* RX Descriptor List Start Address */
+
+/*
+** DC21040 Transmit Ring Base Address Register (DE4X5_TRBA)
+*/
+#define TRBA 0xfffffffc /* TX Descriptor List Start Address */
+
+/*
+** Status Register (DE4X5_STS)
+*/
+#define STS_GPI 0x04000000 /* General Purpose Port Interrupt */
+#define STS_BE 0x03800000 /* Bus Error Bits */
+#define STS_TS 0x00700000 /* Transmit Process State */
+#define STS_RS 0x000e0000 /* Receive Process State */
+#define STS_NIS 0x00010000 /* Normal Interrupt Summary */
+#define STS_AIS 0x00008000 /* Abnormal Interrupt Summary */
+#define STS_ER 0x00004000 /* Early Receive */
+#define STS_FBE 0x00002000 /* Fatal Bus Error */
+#define STS_SE 0x00002000 /* System Error */
+#define STS_LNF 0x00001000 /* Link Fail */
+#define STS_FD 0x00000800 /* Full-Duplex Short Frame Received */
+#define STS_TM 0x00000800 /* Timer Expired (DC21041) */
+#define STS_ETI 0x00000400 /* Early Transmit Interupt */
+#define STS_AT 0x00000400 /* AUI/TP Pin */
+#define STS_RWT 0x00000200 /* Receive Watchdog Time-Out */
+#define STS_RPS 0x00000100 /* Receive Process Stopped */
+#define STS_RU 0x00000080 /* Receive Buffer Unavailable */
+#define STS_RI 0x00000040 /* Receive Interrupt */
+#define STS_UNF 0x00000020 /* Transmit Underflow */
+#define STS_LNP 0x00000010 /* Link Pass */
+#define STS_ANC 0x00000010 /* Autonegotiation Complete */
+#define STS_TJT 0x00000008 /* Transmit Jabber Time-Out */
+#define STS_TU 0x00000004 /* Transmit Buffer Unavailable */
+#define STS_TPS 0x00000002 /* Transmit Process Stopped */
+#define STS_TI 0x00000001 /* Transmit Interrupt */
+
+#define EB_PAR 0x00000000 /* Parity Error */
+#define EB_MA 0x00800000 /* Master Abort */
+#define EB_TA 0x01000000 /* Target Abort */
+#define EB_RES0 0x01800000 /* Reserved */
+#define EB_RES1 0x02000000 /* Reserved */
+
+#define TS_STOP 0x00000000 /* Stopped */
+#define TS_FTD 0x00100000 /* Fetch Transmit Descriptor */
+#define TS_WEOT 0x00200000 /* Wait for End Of Transmission */
+#define TS_QDAT 0x00300000 /* Queue skb data into TX FIFO */
+#define TS_RES 0x00400000 /* Reserved */
+#define TS_SPKT 0x00500000 /* Setup Packet */
+#define TS_SUSP 0x00600000 /* Suspended */
+#define TS_CLTD 0x00700000 /* Close Transmit Descriptor */
+
+#define RS_STOP 0x00000000 /* Stopped */
+#define RS_FRD 0x00020000 /* Fetch Receive Descriptor */
+#define RS_CEOR 0x00040000 /* Check for End of Receive Packet */
+#define RS_WFRP 0x00060000 /* Wait for Receive Packet */
+#define RS_SUSP 0x00080000 /* Suspended */
+#define RS_CLRD 0x000a0000 /* Close Receive Descriptor */
+#define RS_FLUSH 0x000c0000 /* Flush RX FIFO */
+#define RS_QRFS 0x000e0000 /* Queue RX FIFO into RX Skb */
+
+#define INT_CANCEL 0x0001ffff /* For zeroing all interrupt sources */
+
+/*
+** Operation Mode Register (DE4X5_OMR)
+*/
+#define OMR_SC 0x80000000 /* Special Capture Effect Enable */
+#define OMR_RA 0x40000000 /* Receive All */
+#define OMR_SDP 0x02000000 /* SD Polarity - MUST BE ASSERTED */
+#define OMR_SCR 0x01000000 /* Scrambler Mode */
+#define OMR_PCS 0x00800000 /* PCS Function */
+#define OMR_TTM 0x00400000 /* Transmit Threshold Mode */
+#define OMR_SF 0x00200000 /* Store and Forward */
+#define OMR_HBD 0x00080000 /* HeartBeat Disable */
+#define OMR_PS 0x00040000 /* Port Select */
+#define OMR_CA 0x00020000 /* Capture Effect Enable */
+#define OMR_BP 0x00010000 /* Back Pressure */
+#define OMR_TR 0x0000c000 /* Threshold Control Bits */
+#define OMR_ST 0x00002000 /* Start/Stop Transmission Command */
+#define OMR_FC 0x00001000 /* Force Collision Mode */
+#define OMR_OM 0x00000c00 /* Operating Mode */
+#define OMR_FDX 0x00000200 /* Full Duplex Mode */
+#define OMR_FKD 0x00000100 /* Flaky Oscillator Disable */
+#define OMR_PM 0x00000080 /* Pass All Multicast */
+#define OMR_PR 0x00000040 /* Promiscuous Mode */
+#define OMR_SB 0x00000020 /* Start/Stop Backoff Counter */
+#define OMR_IF 0x00000010 /* Inverse Filtering */
+#define OMR_PB 0x00000008 /* Pass Bad Frames */
+#define OMR_HO 0x00000004 /* Hash Only Filtering Mode */
+#define OMR_SR 0x00000002 /* Start/Stop Receive */
+#define OMR_HP 0x00000001 /* Hash/Perfect Receive Filtering Mode */
+
+#define TR_72 0x00000000 /* Threshold set to 72 (128) bytes */
+#define TR_96 0x00004000 /* Threshold set to 96 (256) bytes */
+#define TR_128 0x00008000 /* Threshold set to 128 (512) bytes */
+#define TR_160 0x0000c000 /* Threshold set to 160 (1024) bytes */
+
+#define OMR_DEF (OMR_SDP)
+#define OMR_SIA (OMR_SDP | OMR_TTM)
+#define OMR_SYM (OMR_SDP | OMR_SCR | OMR_PCS | OMR_HBD | OMR_PS)
+#define OMR_MII_10 (OMR_SDP | OMR_TTM | OMR_PS)
+#define OMR_MII_100 (OMR_SDP | OMR_HBD | OMR_PS)
+
+/*
+** DC21040 Interrupt Mask Register (DE4X5_IMR)
+*/
+#define IMR_GPM 0x04000000 /* General Purpose Port Mask */
+#define IMR_NIM 0x00010000 /* Normal Interrupt Summary Mask */
+#define IMR_AIM 0x00008000 /* Abnormal Interrupt Summary Mask */
+#define IMR_ERM 0x00004000 /* Early Receive Mask */
+#define IMR_FBM 0x00002000 /* Fatal Bus Error Mask */
+#define IMR_SEM 0x00002000 /* System Error Mask */
+#define IMR_LFM 0x00001000 /* Link Fail Mask */
+#define IMR_FDM 0x00000800 /* Full-Duplex (Short Frame) Mask */
+#define IMR_TMM 0x00000800 /* Timer Expired Mask (DC21041) */
+#define IMR_ETM 0x00000400 /* Early Transmit Interrupt Mask */
+#define IMR_ATM 0x00000400 /* AUI/TP Switch Mask */
+#define IMR_RWM 0x00000200 /* Receive Watchdog Time-Out Mask */
+#define IMR_RSM 0x00000100 /* Receive Stopped Mask */
+#define IMR_RUM 0x00000080 /* Receive Buffer Unavailable Mask */
+#define IMR_RIM 0x00000040 /* Receive Interrupt Mask */
+#define IMR_UNM 0x00000020 /* Underflow Interrupt Mask */
+#define IMR_ANM 0x00000010 /* Autonegotiation Complete Mask */
+#define IMR_LPM 0x00000010 /* Link Pass */
+#define IMR_TJM 0x00000008 /* Transmit Time-Out Jabber Mask */
+#define IMR_TUM 0x00000004 /* Transmit Buffer Unavailable Mask */
+#define IMR_TSM 0x00000002 /* Transmission Stopped Mask */
+#define IMR_TIM 0x00000001 /* Transmit Interrupt Mask */
+
+/*
+** Missed Frames and FIFO Overflow Counters (DE4X5_MFC)
+*/
+#define MFC_FOCO 0x10000000 /* FIFO Overflow Counter Overflow Bit */
+#define MFC_FOC 0x0ffe0000 /* FIFO Overflow Counter Bits */
+#define MFC_OVFL 0x00010000 /* Missed Frames Counter Overflow Bit */
+#define MFC_CNTR 0x0000ffff /* Missed Frames Counter Bits */
+#define MFC_FOCM 0x1ffe0000 /* FIFO Overflow Counter Mask */
+
+/*
+** DC21040 Ethernet Address PROM (DE4X5_APROM)
+*/
+#define APROM_DN 0x80000000 /* Data Not Valid */
+#define APROM_DT 0x000000ff /* Address Byte */
+
+/*
+** DC21041 Boot/Ethernet Address ROM (DE4X5_BROM)
+*/
+#define BROM_MODE 0x00008000 /* MODE_1: 0, MODE_0: 1 (read only) */
+#define BROM_RD 0x00004000 /* Read from Boot ROM */
+#define BROM_WR 0x00002000 /* Write to Boot ROM */
+#define BROM_BR 0x00001000 /* Select Boot ROM when set */
+#define BROM_SR 0x00000800 /* Select Serial ROM when set */
+#define BROM_REG 0x00000400 /* External Register Select */
+#define BROM_DT 0x000000ff /* Data Byte */
+
+/*
+** DC21041 Serial/Ethernet Address ROM (DE4X5_SROM, DE4X5_MII)
+*/
+#define MII_MDI 0x00080000 /* MII Management Data In */
+#define MII_MDO 0x00060000 /* MII Management Mode/Data Out */
+#define MII_MRD 0x00040000 /* MII Management Define Read Mode */
+#define MII_MWR 0x00000000 /* MII Management Define Write Mode */
+#define MII_MDT 0x00020000 /* MII Management Data Out */
+#define MII_MDC 0x00010000 /* MII Management Clock */
+#define MII_RD 0x00004000 /* Read from MII */
+#define MII_WR 0x00002000 /* Write to MII */
+#define MII_SEL 0x00000800 /* Select MII when RESET */
+
+#define SROM_MODE 0x00008000 /* MODE_1: 0, MODE_0: 1 (read only) */
+#define SROM_RD 0x00004000 /* Read from Boot ROM */
+#define SROM_WR 0x00002000 /* Write to Boot ROM */
+#define SROM_BR 0x00001000 /* Select Boot ROM when set */
+#define SROM_SR 0x00000800 /* Select Serial ROM when set */
+#define SROM_REG 0x00000400 /* External Register Select */
+#define SROM_DT 0x000000ff /* Data Byte */
+
+#define DT_OUT 0x00000008 /* Serial Data Out */
+#define DT_IN 0x00000004 /* Serial Data In */
+#define DT_CLK 0x00000002 /* Serial ROM Clock */
+#define DT_CS 0x00000001 /* Serial ROM Chip Select */
+
+#define MII_PREAMBLE 0xffffffff /* MII Management Preamble */
+#define MII_TEST 0xaaaaaaaa /* MII Test Signal */
+#define MII_STRD 0x06 /* Start of Frame+Op Code: use low nibble */
+#define MII_STWR 0x0a /* Start of Frame+Op Code: use low nibble */
+
+#define MII_CR 0x00 /* MII Management Control Register */
+#define MII_SR 0x01 /* MII Management Status Register */
+#define MII_ID0 0x02 /* PHY Identifier Register 0 */
+#define MII_ID1 0x03 /* PHY Identifier Register 1 */
+#define MII_ANA 0x04 /* Auto Negotiation Advertisement */
+#define MII_ANLPA 0x05 /* Auto Negotiation Link Partner Ability */
+#define MII_ANE 0x06 /* Auto Negotiation Expansion */
+#define MII_ANP 0x07 /* Auto Negotiation Next Page TX */
+
+#define DE4X5_MAX_MII 32 /* Maximum address of MII PHY devices */
+
+/*
+** MII Management Control Register
+*/
+#define MII_CR_RST 0x8000 /* RESET the PHY chip */
+#define MII_CR_LPBK 0x4000 /* Loopback enable */
+#define MII_CR_SPD 0x2000 /* 0: 10Mb/s; 1: 100Mb/s */
+#define MII_CR_10 0x0000 /* Set 10Mb/s */
+#define MII_CR_100 0x2000 /* Set 100Mb/s */
+#define MII_CR_ASSE 0x1000 /* Auto Speed Select Enable */
+#define MII_CR_PD 0x0800 /* Power Down */
+#define MII_CR_ISOL 0x0400 /* Isolate Mode */
+#define MII_CR_RAN 0x0200 /* Restart Auto Negotiation */
+#define MII_CR_FDM 0x0100 /* Full Duplex Mode */
+#define MII_CR_CTE 0x0080 /* Collision Test Enable */
+
+/*
+** MII Management Status Register
+*/
+#define MII_SR_T4C 0x8000 /* 100BASE-T4 capable */
+#define MII_SR_TXFD 0x4000 /* 100BASE-TX Full Duplex capable */
+#define MII_SR_TXHD 0x2000 /* 100BASE-TX Half Duplex capable */
+#define MII_SR_TFD 0x1000 /* 10BASE-T Full Duplex capable */
+#define MII_SR_THD 0x0800 /* 10BASE-T Half Duplex capable */
+#define MII_SR_ASSC 0x0020 /* Auto Speed Selection Complete*/
+#define MII_SR_RFD 0x0010 /* Remote Fault Detected */
+#define MII_SR_ANC 0x0008 /* Auto Negotiation capable */
+#define MII_SR_LKS 0x0004 /* Link Status */
+#define MII_SR_JABD 0x0002 /* Jabber Detect */
+#define MII_SR_XC 0x0001 /* Extended Capabilities */
+
+/*
+** MII Management Auto Negotiation Advertisement Register
+*/
+#define MII_ANA_TAF 0x03e0 /* Technology Ability Field */
+#define MII_ANA_T4AM 0x0200 /* T4 Technology Ability Mask */
+#define MII_ANA_TXAM 0x0180 /* TX Technology Ability Mask */
+#define MII_ANA_FDAM 0x0140 /* Full Duplex Technology Ability Mask */
+#define MII_ANA_HDAM 0x02a0 /* Half Duplex Technology Ability Mask */
+#define MII_ANA_100M 0x0380 /* 100Mb Technology Ability Mask */
+#define MII_ANA_10M 0x0060 /* 10Mb Technology Ability Mask */
+#define MII_ANA_CSMA 0x0001 /* CSMA-CD Capable */
+
+/*
+** MII Management Auto Negotiation Remote End Register
+*/
+#define MII_ANLPA_NP 0x8000 /* Next Page (Enable) */
+#define MII_ANLPA_ACK 0x4000 /* Remote Acknowledge */
+#define MII_ANLPA_RF 0x2000 /* Remote Fault */
+#define MII_ANLPA_TAF 0x03e0 /* Technology Ability Field */
+#define MII_ANLPA_T4AM 0x0200 /* T4 Technology Ability Mask */
+#define MII_ANLPA_TXAM 0x0180 /* TX Technology Ability Mask */
+#define MII_ANLPA_FDAM 0x0140 /* Full Duplex Technology Ability Mask */
+#define MII_ANLPA_HDAM 0x02a0 /* Half Duplex Technology Ability Mask */
+#define MII_ANLPA_100M 0x0380 /* 100Mb Technology Ability Mask */
+#define MII_ANLPA_10M 0x0060 /* 10Mb Technology Ability Mask */
+#define MII_ANLPA_CSMA 0x0001 /* CSMA-CD Capable */
+
+/*
+** SROM Media Definitions (ABG SROM Section)
+*/
+#define MEDIA_NWAY 0x0080 /* Nway (Auto Negotiation) on PHY */
+#define MEDIA_MII 0x0040 /* MII Present on the adapter */
+#define MEDIA_FIBRE 0x0008 /* Fibre Media present */
+#define MEDIA_AUI 0x0004 /* AUI Media present */
+#define MEDIA_TP 0x0002 /* TP Media present */
+#define MEDIA_BNC 0x0001 /* BNC Media present */
+
+/*
+** SROM Definitions (Digital Semiconductor Format)
+*/
+#define SROM_SSVID 0x0000 /* Sub-system Vendor ID offset */
+#define SROM_SSID 0x0002 /* Sub-system ID offset */
+#define SROM_CISPL 0x0004 /* CardBus CIS Pointer low offset */
+#define SROM_CISPH 0x0006 /* CardBus CIS Pointer high offset */
+#define SROM_IDCRC 0x0010 /* ID Block CRC offset*/
+#define SROM_RSVD2 0x0011 /* ID Reserved 2 offset */
+#define SROM_SFV 0x0012 /* SROM Format Version offset */
+#define SROM_CCNT 0x0013 /* Controller Count offset */
+#define SROM_HWADD 0x0014 /* Hardware Address offset */
+#define SROM_MRSVD 0x007c /* Manufacturer Reserved offset*/
+#define SROM_CRC 0x007e /* SROM CRC offset */
+
+/*
+** SROM Media Connection Definitions
+*/
+#define SROM_10BT 0x0000 /* 10BASE-T half duplex */
+#define SROM_10BTN 0x0100 /* 10BASE-T with Nway */
+#define SROM_10BTF 0x0204 /* 10BASE-T full duplex */
+#define SROM_10BTNLP 0x0400 /* 10BASE-T without Link Pass test */
+#define SROM_10B2 0x0001 /* 10BASE-2 (BNC) */
+#define SROM_10B5 0x0002 /* 10BASE-5 (AUI) */
+#define SROM_100BTH 0x0003 /* 100BASE-T half duplex */
+#define SROM_100BTF 0x0205 /* 100BASE-T full duplex */
+#define SROM_100BT4 0x0006 /* 100BASE-T4 */
+#define SROM_100BFX 0x0007 /* 100BASE-FX half duplex (Fiber) */
+#define SROM_M10BT 0x0009 /* MII 10BASE-T half duplex */
+#define SROM_M10BTF 0x020a /* MII 10BASE-T full duplex */
+#define SROM_M100BT 0x000d /* MII 100BASE-T half duplex */
+#define SROM_M100BTF 0x020e /* MII 100BASE-T full duplex */
+#define SROM_M100BT4 0x000f /* MII 100BASE-T4 */
+#define SROM_M100BF 0x0010 /* MII 100BASE-FX half duplex */
+#define SROM_M100BFF 0x0211 /* MII 100BASE-FX full duplex */
+#define SROM_PDA 0x0800 /* Powerup & Dynamic Autosense */
+#define SROM_PAO 0x8800 /* Powerup Autosense Only */
+#define SROM_NSMI 0xffff /* No Selected Media Information */
+
+/*
+** SROM Media Definitions
+*/
+#define SROM_10BASET 0x0000 /* 10BASE-T half duplex */
+#define SROM_10BASE2 0x0001 /* 10BASE-2 (BNC) */
+#define SROM_10BASE5 0x0002 /* 10BASE-5 (AUI) */
+#define SROM_100BASET 0x0003 /* 100BASE-T half duplex */
+#define SROM_10BASETF 0x0004 /* 10BASE-T full duplex */
+#define SROM_100BASETF 0x0005 /* 100BASE-T full duplex */
+#define SROM_100BASET4 0x0006 /* 100BASE-T4 */
+#define SROM_100BASEF 0x0007 /* 100BASE-FX half duplex */
+#define SROM_100BASEFF 0x0008 /* 100BASE-FX full duplex */
+
+#define BLOCK_LEN 0x7f /* Extended blocks length mask */
+#define EXT_FIELD 0x40 /* Extended blocks extension field bit */
+#define MEDIA_CODE 0x3f /* Extended blocks media code mask */
+
+/*
+** SROM Compact Format Block Masks
+*/
+#define COMPACT_FI 0x80 /* Format Indicator */
+#define COMPACT_LEN 0x04 /* Length */
+#define COMPACT_MC 0x3f /* Media Code */
+
+/*
+** SROM Extended Format Block Type 0 Masks
+*/
+#define BLOCK0_FI 0x80 /* Format Indicator */
+#define BLOCK0_MCS 0x80 /* Media Code byte Sign */
+#define BLOCK0_MC 0x3f /* Media Code */
+
+/*
+** DC21040 Full Duplex Register (DE4X5_FDR)
+*/
+#define FDR_FDACV 0x0000ffff /* Full Duplex Auto Configuration Value */
+
+/*
+** DC21041 General Purpose Timer Register (DE4X5_GPT)
+*/
+#define GPT_CON 0x00010000 /* One shot: 0, Continuous: 1 */
+#define GPT_VAL 0x0000ffff /* Timer Value */
+
+/*
+** DC21140 General Purpose Register (DE4X5_GEP) (hardware dependent bits)
+*/
+/* Valid ONLY for DE500 hardware */
+#define GEP_LNP 0x00000080 /* Link Pass (input) */
+#define GEP_SLNK 0x00000040 /* SYM LINK (input) */
+#define GEP_SDET 0x00000020 /* Signal Detect (input) */
+#define GEP_HRST 0x00000010 /* Hard RESET (to PHY) (output) */
+#define GEP_FDXD 0x00000008 /* Full Duplex Disable (output) */
+#define GEP_PHYL 0x00000004 /* PHY Loopback (output) */
+#define GEP_FLED 0x00000002 /* Force Activity LED on (output) */
+#define GEP_MODE 0x00000001 /* 0: 10Mb/s, 1: 100Mb/s */
+#define GEP_INIT 0x0000011f /* Setup inputs (0) and outputs (1) */
+#define GEP_CTRL 0x00000100 /* GEP control bit */
+
+/*
+** SIA Register Defaults
+*/
+#define CSR13 0x00000001
+#define CSR14 0x0003ff7f /* Autonegotiation disabled */
+#define CSR15 0x00000008
+
+/*
+** SIA Status Register (DE4X5_SISR)
+*/
+#define SISR_LPC 0xffff0000 /* Link Partner's Code Word */
+#define SISR_LPN 0x00008000 /* Link Partner Negotiable */
+#define SISR_ANS 0x00007000 /* Auto Negotiation Arbitration State */
+#define SISR_NSN 0x00000800 /* Non Stable NLPs Detected (DC21041) */
+#define SISR_TRF 0x00000800 /* Transmit Remote Fault */
+#define SISR_NSND 0x00000400 /* Non Stable NLPs Detected (DC21142) */
+#define SISR_ANR_FDS 0x00000400 /* Auto Negotiate Restart/Full Duplex Sel.*/
+#define SISR_TRA 0x00000200 /* 10BASE-T Receive Port Activity */
+#define SISR_NRA 0x00000200 /* Non Selected Port Receive Activity */
+#define SISR_ARA 0x00000100 /* AUI Receive Port Activity */
+#define SISR_SRA 0x00000100 /* Selected Port Receive Activity */
+#define SISR_DAO 0x00000080 /* PLL All One */
+#define SISR_DAZ 0x00000040 /* PLL All Zero */
+#define SISR_DSP 0x00000020 /* PLL Self-Test Pass */
+#define SISR_DSD 0x00000010 /* PLL Self-Test Done */
+#define SISR_APS 0x00000008 /* Auto Polarity State */
+#define SISR_LKF 0x00000004 /* Link Fail Status */
+#define SISR_LS10 0x00000004 /* 10Mb/s Link Fail Status */
+#define SISR_NCR 0x00000002 /* Network Connection Error */
+#define SISR_LS100 0x00000002 /* 100Mb/s Link Fail Status */
+#define SISR_PAUI 0x00000001 /* AUI_TP Indication */
+#define SISR_MRA 0x00000001 /* MII Receive Port Activity */
+
+#define ANS_NDIS 0x00000000 /* Nway disable */
+#define ANS_TDIS 0x00001000 /* Transmit Disable */
+#define ANS_ADET 0x00002000 /* Ability Detect */
+#define ANS_ACK 0x00003000 /* Acknowledge */
+#define ANS_CACK 0x00004000 /* Complete Acknowledge */
+#define ANS_NWOK 0x00005000 /* Nway OK - FLP Link Good */
+#define ANS_LCHK 0x00006000 /* Link Check */
+
+#define SISR_RST 0x00000301 /* CSR12 reset */
+#define SISR_ANR 0x00001301 /* Autonegotiation restart */
+
+/*
+** SIA Connectivity Register (DE4X5_SICR)
+*/
+#define SICR_SDM 0xffff0000 /* SIA Diagnostics Mode */
+#define SICR_OE57 0x00008000 /* Output Enable 5 6 7 */
+#define SICR_OE24 0x00004000 /* Output Enable 2 4 */
+#define SICR_OE13 0x00002000 /* Output Enable 1 3 */
+#define SICR_IE 0x00001000 /* Input Enable */
+#define SICR_EXT 0x00000000 /* SIA MUX Select External SIA Mode */
+#define SICR_D_SIA 0x00000400 /* SIA MUX Select Diagnostics - SIA Sigs */
+#define SICR_DPLL 0x00000800 /* SIA MUX Select Diagnostics - DPLL Sigs*/
+#define SICR_APLL 0x00000a00 /* SIA MUX Select Diagnostics - DPLL Sigs*/
+#define SICR_D_RxM 0x00000c00 /* SIA MUX Select Diagnostics - RxM Sigs */
+#define SICR_M_RxM 0x00000d00 /* SIA MUX Select Diagnostics - RxM Sigs */
+#define SICR_LNKT 0x00000e00 /* SIA MUX Select Diagnostics - Link Test*/
+#define SICR_SEL 0x00000f00 /* SIA MUX Select AUI or TP with LEDs */
+#define SICR_ASE 0x00000080 /* APLL Start Enable*/
+#define SICR_SIM 0x00000040 /* Serial Interface Input Multiplexer */
+#define SICR_ENI 0x00000020 /* Encoder Input Multiplexer */
+#define SICR_EDP 0x00000010 /* SIA PLL External Input Enable */
+#define SICR_AUI 0x00000008 /* 10Base-T (0) or AUI (1) */
+#define SICR_CAC 0x00000004 /* CSR Auto Configuration */
+#define SICR_PS 0x00000002 /* Pin AUI/TP Selection */
+#define SICR_SRL 0x00000001 /* SIA Reset */
+#define SIA_RESET 0x00000000 /* SIA Reset Value */
+
+/*
+** SIA Transmit and Receive Register (DE4X5_STRR)
+*/
+#define STRR_TAS 0x00008000 /* 10Base-T/AUI Autosensing Enable */
+#define STRR_SPP 0x00004000 /* Set Polarity Plus */
+#define STRR_APE 0x00002000 /* Auto Polarity Enable */
+#define STRR_LTE 0x00001000 /* Link Test Enable */
+#define STRR_SQE 0x00000800 /* Signal Quality Enable */
+#define STRR_CLD 0x00000400 /* Collision Detect Enable */
+#define STRR_CSQ 0x00000200 /* Collision Squelch Enable */
+#define STRR_RSQ 0x00000100 /* Receive Squelch Enable */
+#define STRR_ANE 0x00000080 /* Auto Negotiate Enable */
+#define STRR_HDE 0x00000040 /* Half Duplex Enable */
+#define STRR_CPEN 0x00000030 /* Compensation Enable */
+#define STRR_LSE 0x00000008 /* Link Pulse Send Enable */
+#define STRR_DREN 0x00000004 /* Driver Enable */
+#define STRR_LBK 0x00000002 /* Loopback Enable */
+#define STRR_ECEN 0x00000001 /* Encoder Enable */
+#define STRR_RESET 0xffffffff /* Reset value for STRR */
+
+/*
+** SIA General Register (DE4X5_SIGR)
+*/
+#define SIGR_RMI 0x40000000 /* Receive Match Interrupt */
+#define SIGR_GI1 0x20000000 /* General Port Interrupt 1 */
+#define SIGR_GI0 0x10000000 /* General Port Interrupt 0 */
+#define SIGR_CWE 0x08000000 /* Control Write Enable */
+#define SIGR_RME 0x04000000 /* Receive Match Enable */
+#define SIGR_GEI1 0x02000000 /* GEP Interrupt Enable on Port 1 */
+#define SIGR_GEI0 0x01000000 /* GEP Interrupt Enable on Port 0 */
+#define SIGR_LGS3 0x00800000 /* LED/GEP3 Select */
+#define SIGR_LGS2 0x00400000 /* LED/GEP2 Select */
+#define SIGR_LGS1 0x00200000 /* LED/GEP1 Select */
+#define SIGR_LGS0 0x00100000 /* LED/GEP0 Select */
+#define SIGR_MD 0x000f0000 /* General Purpose Mode and Data */
+#define SIGR_LV2 0x00008000 /* General Purpose LED2 value */
+#define SIGR_LE2 0x00004000 /* General Purpose LED2 enable */
+#define SIGR_FRL 0x00002000 /* Force Receiver Low */
+#define SIGR_DPST 0x00001000 /* PLL Self Test Start */
+#define SIGR_LSD 0x00000800 /* LED Stretch Disable */
+#define SIGR_FLF 0x00000400 /* Force Link Fail */
+#define SIGR_FUSQ 0x00000200 /* Force Unsquelch */
+#define SIGR_TSCK 0x00000100 /* Test Clock */
+#define SIGR_LV1 0x00000080 /* General Purpose LED1 value */
+#define SIGR_LE1 0x00000040 /* General Purpose LED1 enable */
+#define SIGR_RWR 0x00000020 /* Receive Watchdog Release */
+#define SIGR_RWD 0x00000010 /* Receive Watchdog Disable */
+#define SIGR_ABM 0x00000008 /* BNC: 0, AUI:1 */
+#define SIGR_JCK 0x00000004 /* Jabber Clock */
+#define SIGR_HUJ 0x00000002 /* Host Unjab */
+#define SIGR_JBD 0x00000001 /* Jabber Disable */
+#define SIGR_RESET 0xffff0000 /* Reset value for SIGR */
+
+/*
+** Receive Descriptor Bit Summary
+*/
+#define R_OWN 0x80000000 /* Own Bit */
+#define RD_FF 0x40000000 /* Filtering Fail */
+#define RD_FL 0x3fff0000 /* Frame Length */
+#define RD_ES 0x00008000 /* Error Summary */
+#define RD_LE 0x00004000 /* Length Error */
+#define RD_DT 0x00003000 /* Data Type */
+#define RD_RF 0x00000800 /* Runt Frame */
+#define RD_MF 0x00000400 /* Multicast Frame */
+#define RD_FS 0x00000200 /* First Descriptor */
+#define RD_LS 0x00000100 /* Last Descriptor */
+#define RD_TL 0x00000080 /* Frame Too Long */
+#define RD_CS 0x00000040 /* Collision Seen */
+#define RD_FT 0x00000020 /* Frame Type */
+#define RD_RJ 0x00000010 /* Receive Watchdog */
+#define RD_RE 0x00000008 /* Report on MII Error */
+#define RD_DB 0x00000004 /* Dribbling Bit */
+#define RD_CE 0x00000002 /* CRC Error */
+#define RD_OF 0x00000001 /* Overflow */
+
+#define RD_RER 0x02000000 /* Receive End Of Ring */
+#define RD_RCH 0x01000000 /* Second Address Chained */
+#define RD_RBS2 0x003ff800 /* Buffer 2 Size */
+#define RD_RBS1 0x000007ff /* Buffer 1 Size */
+
+/*
+** Transmit Descriptor Bit Summary
+*/
+#define T_OWN 0x80000000 /* Own Bit */
+#define TD_ES 0x00008000 /* Error Summary */
+#define TD_TO 0x00004000 /* Transmit Jabber Time-Out */
+#define TD_LO 0x00000800 /* Loss Of Carrier */
+#define TD_NC 0x00000400 /* No Carrier */
+#define TD_LC 0x00000200 /* Late Collision */
+#define TD_EC 0x00000100 /* Excessive Collisions */
+#define TD_HF 0x00000080 /* Heartbeat Fail */
+#define TD_CC 0x00000078 /* Collision Counter */
+#define TD_LF 0x00000004 /* Link Fail */
+#define TD_UF 0x00000002 /* Underflow Error */
+#define TD_DE 0x00000001 /* Deferred */
+
+#define TD_IC 0x80000000 /* Interrupt On Completion */
+#define TD_LS 0x40000000 /* Last Segment */
+#define TD_FS 0x20000000 /* First Segment */
+#define TD_FT1 0x10000000 /* Filtering Type */
+#define TD_SET 0x08000000 /* Setup Packet */
+#define TD_AC 0x04000000 /* Add CRC Disable */
+#define TD_TER 0x02000000 /* Transmit End Of Ring */
+#define TD_TCH 0x01000000 /* Second Address Chained */
+#define TD_DPD 0x00800000 /* Disabled Padding */
+#define TD_FT0 0x00400000 /* Filtering Type */
+#define TD_TBS2 0x003ff800 /* Buffer 2 Size */
+#define TD_TBS1 0x000007ff /* Buffer 1 Size */
+
+#define PERFECT_F 0x00000000
+#define HASH_F TD_FT0
+#define INVERSE_F TD_FT1
+#define HASH_O_F (TD_FT1 | TD_F0)
+
+/*
+** Media / mode state machine definitions
+** User selectable:
+*/
+#define TP 0x0001 /* 10Base-T */
+#define TP_NW 0x0002 /* 10Base-T with Nway */
+#define BNC 0x0004 /* Thinwire */
+#define AUI 0x0008 /* Thickwire */
+#define BNC_AUI 0x0010 /* BNC/AUI on DC21040 indistinguishable */
+#define _10Mb 0x0040 /* 10Mb/s Ethernet */
+#define _100Mb 0x0080 /* 100Mb/s Ethernet */
+#define AUTO 0x4000 /* Auto sense the media or speed */
+
+/*
+** Internal states
+*/
+#define NC 0x0000 /* No Connection */
+#define ANS 0x0020 /* Intermediate AutoNegotiation State */
+#define SPD_DET 0x0100 /* Parallel speed detection */
+#define INIT 0x0200 /* Initial state */
+#define EXT_SIA 0x0400 /* External SIA for motherboard chip */
+#define ANS_SUSPECT 0x0802 /* Suspect the ANS (TP) port is down */
+#define TP_SUSPECT 0x0803 /* Suspect the TP port is down */
+#define BNC_AUI_SUSPECT 0x0804 /* Suspect the BNC or AUI port is down */
+#define EXT_SIA_SUSPECT 0x0805 /* Suspect the EXT SIA port is down */
+#define BNC_SUSPECT 0x0806 /* Suspect the BNC port is down */
+#define AUI_SUSPECT 0x0807 /* Suspect the AUI port is down */
+#define MII 0x1000 /* MII on the 21143 */
+
+#define TIMER_CB 0x80000000 /* Timer callback detection */
+
+/*
+** DE4X5 DEBUG Options
+*/
+#define DEBUG_NONE 0x0000 /* No DEBUG messages */
+#define DEBUG_VERSION 0x0001 /* Print version message */
+#define DEBUG_MEDIA 0x0002 /* Print media messages */
+#define DEBUG_TX 0x0004 /* Print TX (queue_pkt) messages */
+#define DEBUG_RX 0x0008 /* Print RX (de4x5_rx) messages */
+#define DEBUG_SROM 0x0010 /* Print SROM messages */
+#define DEBUG_MII 0x0020 /* Print MII messages */
+#define DEBUG_OPEN 0x0040 /* Print de4x5_open() messages */
+#define DEBUG_CLOSE 0x0080 /* Print de4x5_close() messages */
+#define DEBUG_PCICFG 0x0100
+#define DEBUG_ALL 0x01ff
+
+/*
+** Miscellaneous
+*/
+#define PCI 0
+#define EISA 1
+
+#define HASH_TABLE_LEN 512 /* Bits */
+#define HASH_BITS 0x01ff /* 9 LS bits */
+
+#define SETUP_FRAME_LEN 192 /* Bytes */
+#define IMPERF_PA_OFFSET 156 /* Bytes */
+
+#define POLL_DEMAND 1
+
+#define LOST_MEDIA_THRESHOLD 3
+
+#define MASK_INTERRUPTS 1
+#define UNMASK_INTERRUPTS 0
+
+#define DE4X5_STRLEN 8
+
+#define DE4X5_INIT 0 /* Initialisation time */
+#define DE4X5_RUN 1 /* Run time */
+
+#define DE4X5_SAVE_STATE 0
+#define DE4X5_RESTORE_STATE 1
+
+/*
+** Address Filtering Modes
+*/
+#define PERFECT 0 /* 16 perfect physical addresses */
+#define HASH_PERF 1 /* 1 perfect, 512 multicast addresses */
+#define PERFECT_REJ 2 /* Reject 16 perfect physical addresses */
+#define ALL_HASH 3 /* Hashes all physical & multicast addrs */
+
+#define ALL 0 /* Clear out all the setup frame */
+#define PHYS_ADDR_ONLY 1 /* Update the physical address only */
+
+/*
+** Booleans
+*/
+#define NO 0
+#define FALSE 0
+
+#define YES ~0
+#define TRUE ~0
+
+/*
+** Adapter state
+*/
+#define INITIALISED 0 /* After h/w initialised and mem alloc'd */
+#define CLOSED 1 /* Ready for opening */
+#define OPEN 2 /* Running */
+
+/*
+** Various wait times
+*/
+#define PDET_LINK_WAIT 1200 /* msecs to wait for link detect bits */
+#define ANS_FINISH_WAIT 1000 /* msecs to wait for link detect bits */
+
+/*
+** IEEE OUIs for various PHY vendor/chip combos - Reg 2 values only. Since
+** the vendors seem split 50-50 on how to calculate the OUI register values
+** anyway, just reading Reg2 seems reasonable for now [see de4x5_get_oui()].
+*/
+#define NATIONAL_TX 0x2000
+#define BROADCOM_T4 0x03e0
+#define SEEQ_T4 0x0016
+#define CYPRESS_T4 0x0014
+
+/*
+** Speed Selection stuff
+*/
+#define SET_10Mb {\
+ if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\
+ omr = inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX);\
+ if ((lp->tmp != MII_SR_ASSC) || (lp->autosense != AUTO)) {\
+ mii_wr(MII_CR_10|(lp->fdx?MII_CR_FDM:0), MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\
+ }\
+ omr |= ((lp->fdx ? OMR_FDX : 0) | OMR_TTM);\
+ outl(omr, DE4X5_OMR);\
+ if (!lp->useSROM) lp->cache.gep = 0;\
+ } else if (lp->useSROM && !lp->useMII) {\
+ omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
+ omr |= (lp->fdx ? OMR_FDX : 0);\
+ outl(omr | (lp->infoblock_csr6 & ~(OMR_SCR | OMR_HBD)), DE4X5_OMR);\
+ } else {\
+ omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
+ omr |= (lp->fdx ? OMR_FDX : 0);\
+ outl(omr | OMR_SDP | OMR_TTM, DE4X5_OMR);\
+ lp->cache.gep = (lp->fdx ? 0 : GEP_FDXD);\
+ gep_wr(lp->cache.gep, dev);\
+ }\
+}
+
+#define SET_100Mb {\
+ if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\
+ int fdx=0;\
+ if (lp->phy[lp->active].id == NATIONAL_TX) {\
+ mii_wr(mii_rd(0x18, lp->phy[lp->active].addr, DE4X5_MII) & ~0x2000,\
+ 0x18, lp->phy[lp->active].addr, DE4X5_MII);\
+ }\
+ omr = inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX);\
+ sr = mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);\
+ if (!(sr & MII_ANA_T4AM) && lp->fdx) fdx=1;\
+ if ((lp->tmp != MII_SR_ASSC) || (lp->autosense != AUTO)) {\
+ mii_wr(MII_CR_100|(fdx?MII_CR_FDM:0), MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\
+ }\
+ if (fdx) omr |= OMR_FDX;\
+ outl(omr, DE4X5_OMR);\
+ if (!lp->useSROM) lp->cache.gep = 0;\
+ } else if (lp->useSROM && !lp->useMII) {\
+ omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
+ omr |= (lp->fdx ? OMR_FDX : 0);\
+ outl(omr | lp->infoblock_csr6, DE4X5_OMR);\
+ } else {\
+ omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
+ omr |= (lp->fdx ? OMR_FDX : 0);\
+ outl(omr | OMR_SDP | OMR_PS | OMR_HBD | OMR_PCS | OMR_SCR, DE4X5_OMR);\
+ lp->cache.gep = (lp->fdx ? 0 : GEP_FDXD) | GEP_MODE;\
+ gep_wr(lp->cache.gep, dev);\
+ }\
+}
+
+/* FIX ME so I don't jam 10Mb networks */
+#define SET_100Mb_PDET {\
+ if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\
+ mii_wr(MII_CR_100|MII_CR_ASSE, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\
+ omr = (inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
+ outl(omr, DE4X5_OMR);\
+ } else if (lp->useSROM && !lp->useMII) {\
+ omr = (inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
+ outl(omr, DE4X5_OMR);\
+ } else {\
+ omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
+ outl(omr | OMR_SDP | OMR_PS | OMR_HBD | OMR_PCS, DE4X5_OMR);\
+ lp->cache.gep = (GEP_FDXD | GEP_MODE);\
+ gep_wr(lp->cache.gep, dev);\
+ }\
+}
+
+/*
+** Include the IOCTL stuff
+*/
+#include <linux/sockios.h>
+
+#define DE4X5IOCTL SIOCDEVPRIVATE
+
+struct de4x5_ioctl {
+ unsigned short cmd; /* Command to run */
+ unsigned short len; /* Length of the data buffer */
+ unsigned char *data; /* Pointer to the data buffer */
+};
+
+/*
+** Recognised commands for the driver
+*/
+#define DE4X5_GET_HWADDR 0x01 /* Get the hardware address */
+#define DE4X5_SET_HWADDR 0x02 /* Set the hardware address */
+#define DE4X5_SET_PROM 0x03 /* Set Promiscuous Mode */
+#define DE4X5_CLR_PROM 0x04 /* Clear Promiscuous Mode */
+#define DE4X5_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */
+#define DE4X5_GET_MCA 0x06 /* Get a multicast address */
+#define DE4X5_SET_MCA 0x07 /* Set a multicast address */
+#define DE4X5_CLR_MCA 0x08 /* Clear a multicast address */
+#define DE4X5_MCA_EN 0x09 /* Enable a multicast address group */
+#define DE4X5_GET_STATS 0x0a /* Get the driver statistics */
+#define DE4X5_CLR_STATS 0x0b /* Zero out the driver statistics */
+#define DE4X5_GET_OMR 0x0c /* Get the OMR Register contents */
+#define DE4X5_SET_OMR 0x0d /* Set the OMR Register contents */
+#define DE4X5_GET_REG 0x0e /* Get the DE4X5 Registers */
+
+#define LinuxVersionCode(v, p, s) (((v)<<16)+((p)<<8)+(s))
diff --git a/linux/src/drivers/net/de600.c b/linux/src/drivers/net/de600.c
new file mode 100644
index 0000000..ce96942
--- /dev/null
+++ b/linux/src/drivers/net/de600.c
@@ -0,0 +1,853 @@
+static const char *version =
+ "de600.c: $Revision: 1.1 $, Bjorn Ekwall (bj0rn@blox.se)\n";
+/*
+ * de600.c
+ *
+ * Linux driver for the D-Link DE-600 Ethernet pocket adapter.
+ *
+ * Portions (C) Copyright 1993, 1994 by Bjorn Ekwall
+ * The Author may be reached as bj0rn@blox.se
+ *
+ * Based on adapter information gathered from DE600.ASM by D-Link Inc.,
+ * as included on disk C in the v.2.11 of PC/TCP from FTP Software.
+ * For DE600.asm:
+ * Portions (C) Copyright 1990 D-Link, Inc.
+ * Copyright, 1988-1992, Russell Nelson, Crynwr Software
+ *
+ * Adapted to the sample network driver core for linux,
+ * written by: Donald Becker <becker@super.org>
+ * C/O Supercomputing Research Ctr., 17100 Science Dr., Bowie MD 20715
+ *
+ * compile-command:
+ * "gcc -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer \
+ * -m486 -c de600.c
+ *
+ **************************************************************/
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ **************************************************************/
+/* Add another "; SLOW_DOWN_IO" here if your adapter won't work OK: */
+#define DE600_SLOW_DOWN SLOW_DOWN_IO; SLOW_DOWN_IO; SLOW_DOWN_IO
+
+ /*
+ * If you still have trouble reading/writing to the adapter,
+ * modify the following "#define": (see <asm/io.h> for more info)
+#define REALLY_SLOW_IO
+ */
+#define SLOW_IO_BY_JUMPING /* Looks "better" than dummy write to port 0x80 :-) */
+
+/*
+ * If you want to enable automatic continuous checking for the DE600,
+ * keep this #define enabled.
+ * It doesn't cost much per packet, so I think it is worth it!
+ * If you disagree, comment away the #define, and live with it...
+ *
+ */
+#define CHECK_LOST_DE600
+
+/*
+ * Enable this #define if you want the adapter to do a "ifconfig down" on
+ * itself when we have detected that something is possibly wrong with it.
+ * The default behaviour is to retry with "adapter_init()" until success.
+ * This should be used for debugging purposes only.
+ * (Depends on the CHECK_LOST_DE600 above)
+ *
+ */
+#define SHUTDOWN_WHEN_LOST
+
+/*
+ * See comment at "de600_rspace()"!
+ * This is an *ugly* hack, but for now it achieves its goal of
+ * faking a TCP flow-control that will not flood the poor DE600.
+ *
+ * Tricks TCP to announce a small max window (max 2 fast packets please :-)
+ *
+ * Comment away at your own risk!
+ *
+ * Update: Use the more general per-device maxwindow parameter instead.
+ */
+#undef FAKE_SMALL_MAX
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifdef DE600_DEBUG
+#define PRINTK(x) if (de600_debug >= 2) printk x
+#else
+#define DE600_DEBUG 0
+#define PRINTK(x) /**/
+#endif
+unsigned int de600_debug = DE600_DEBUG;
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <asm/io.h>
+#include <linux/in.h>
+#include <linux/ptrace.h>
+#include <asm/system.h>
+#include <linux/errno.h>
+
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#ifdef FAKE_SMALL_MAX
+static unsigned long de600_rspace(struct sock *sk);
+#include <net/sock.h>
+#endif
+
+#define netstats enet_statistics
+typedef unsigned char byte;
+
+/**************************************************
+ * *
+ * Definition of D-Link Ethernet Pocket adapter *
+ * *
+ **************************************************/
+/*
+ * D-Link Ethernet pocket adapter ports
+ */
+/*
+ * OK, so I'm cheating, but there are an awful lot of
+ * reads and writes in order to get anything in and out
+ * of the DE-600 with 4 bits at a time in the parallel port,
+ * so every saved instruction really helps :-)
+ *
+ * That is, I don't care what the device struct says
+ * but hope that Space.c will keep the rest of the drivers happy.
+ */
+#ifndef DE600_IO
+#define DE600_IO 0x378
+#endif
+
+#define DATA_PORT (DE600_IO)
+#define STATUS_PORT (DE600_IO + 1)
+#define COMMAND_PORT (DE600_IO + 2)
+
+#ifndef DE600_IRQ
+#define DE600_IRQ 7
+#endif
+/*
+ * It really should look like this, and autoprobing as well...
+ *
+#define DATA_PORT (dev->base_addr + 0)
+#define STATUS_PORT (dev->base_addr + 1)
+#define COMMAND_PORT (dev->base_addr + 2)
+#define DE600_IRQ dev->irq
+ */
+
+/*
+ * D-Link COMMAND_PORT commands
+ */
+#define SELECT_NIC 0x04 /* select Network Interface Card */
+#define SELECT_PRN 0x1c /* select Printer */
+#define NML_PRN 0xec /* normal Printer situation */
+#define IRQEN 0x10 /* enable IRQ line */
+
+/*
+ * D-Link STATUS_PORT
+ */
+#define RX_BUSY 0x80
+#define RX_GOOD 0x40
+#define TX_FAILED16 0x10
+#define TX_BUSY 0x08
+
+/*
+ * D-Link DATA_PORT commands
+ * command in low 4 bits
+ * data in high 4 bits
+ * select current data nibble with HI_NIBBLE bit
+ */
+#define WRITE_DATA 0x00 /* write memory */
+#define READ_DATA 0x01 /* read memory */
+#define STATUS 0x02 /* read status register */
+#define COMMAND 0x03 /* write command register (see COMMAND below) */
+#define NULL_COMMAND 0x04 /* null command */
+#define RX_LEN 0x05 /* read received packet length */
+#define TX_ADDR 0x06 /* set adapter transmit memory address */
+#define RW_ADDR 0x07 /* set adapter read/write memory address */
+#define HI_NIBBLE 0x08 /* read/write the high nibble of data,
+ or-ed with rest of command */
+
+/*
+ * command register, accessed through DATA_PORT with low bits = COMMAND
+ */
+#define RX_ALL 0x01 /* PROMISCUOUS */
+#define RX_BP 0x02 /* default: BROADCAST & PHYSICAL ADDRESS */
+#define RX_MBP 0x03 /* MULTICAST, BROADCAST & PHYSICAL ADDRESS */
+
+#define TX_ENABLE 0x04 /* bit 2 */
+#define RX_ENABLE 0x08 /* bit 3 */
+
+#define RESET 0x80 /* set bit 7 high */
+#define STOP_RESET 0x00 /* set bit 7 low */
+
+/*
+ * data to command register
+ * (high 4 bits in write to DATA_PORT)
+ */
+#define RX_PAGE2_SELECT 0x10 /* bit 4, only 2 pages to select */
+#define RX_BASE_PAGE 0x20 /* bit 5, always set when specifying RX_ADDR */
+#define FLIP_IRQ 0x40 /* bit 6 */
+
+/*
+ * D-Link adapter internal memory:
+ *
+ * 0-2K 1:st transmit page (send from pointer up to 2K)
+ * 2-4K 2:nd transmit page (send from pointer up to 4K)
+ *
+ * 4-6K 1:st receive page (data from 4K upwards)
+ * 6-8K 2:nd receive page (data from 6K upwards)
+ *
+ * 8K+ Adapter ROM (contains magic code and last 3 bytes of Ethernet address)
+ */
+#define MEM_2K 0x0800 /* 2048 */
+#define MEM_4K 0x1000 /* 4096 */
+#define MEM_6K 0x1800 /* 6144 */
+#define NODE_ADDRESS 0x2000 /* 8192 */
+
+#define RUNT 60 /* Too small Ethernet packet */
+
+/**************************************************
+ * *
+ * End of definition *
+ * *
+ **************************************************/
+
+/*
+ * Index to functions, as function prototypes.
+ */
+/* Routines used internally. (See "convenience macros") */
+static byte de600_read_status(struct device *dev);
+static byte de600_read_byte(unsigned char type, struct device *dev);
+
+/* Put in the device structure. */
+static int de600_open(struct device *dev);
+static int de600_close(struct device *dev);
+static struct netstats *get_stats(struct device *dev);
+static int de600_start_xmit(struct sk_buff *skb, struct device *dev);
+
+/* Dispatch from interrupts. */
+static void de600_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static int de600_tx_intr(struct device *dev, int irq_status);
+static void de600_rx_intr(struct device *dev);
+
+/* Initialization */
+static void trigger_interrupt(struct device *dev);
+int de600_probe(struct device *dev);
+static int adapter_init(struct device *dev);
+
+/*
+ * D-Link driver variables:
+ */
+static volatile int rx_page = 0;
+
+#define TX_PAGES 2
+static volatile int tx_fifo[TX_PAGES];
+static volatile int tx_fifo_in = 0;
+static volatile int tx_fifo_out = 0;
+static volatile int free_tx_pages = TX_PAGES;
+static int was_down = 0;
+
+/*
+ * Convenience macros/functions for D-Link adapter
+ */
+
+#define select_prn() outb_p(SELECT_PRN, COMMAND_PORT); DE600_SLOW_DOWN
+#define select_nic() outb_p(SELECT_NIC, COMMAND_PORT); DE600_SLOW_DOWN
+
+/* Thanks for hints from Mark Burton <markb@ordern.demon.co.uk> */
+#define de600_put_byte(data) ( \
+ outb_p(((data) << 4) | WRITE_DATA , DATA_PORT), \
+ outb_p(((data) & 0xf0) | WRITE_DATA | HI_NIBBLE, DATA_PORT))
+
+/*
+ * The first two outb_p()'s below could perhaps be deleted if there
+ * would be more delay in the last two. Not certain about it yet...
+ */
+#define de600_put_command(cmd) ( \
+ outb_p(( rx_page << 4) | COMMAND , DATA_PORT), \
+ outb_p(( rx_page & 0xf0) | COMMAND | HI_NIBBLE, DATA_PORT), \
+ outb_p(((rx_page | cmd) << 4) | COMMAND , DATA_PORT), \
+ outb_p(((rx_page | cmd) & 0xf0) | COMMAND | HI_NIBBLE, DATA_PORT))
+
+#define de600_setup_address(addr,type) ( \
+ outb_p((((addr) << 4) & 0xf0) | type , DATA_PORT), \
+ outb_p(( (addr) & 0xf0) | type | HI_NIBBLE, DATA_PORT), \
+ outb_p((((addr) >> 4) & 0xf0) | type , DATA_PORT), \
+ outb_p((((addr) >> 8) & 0xf0) | type | HI_NIBBLE, DATA_PORT))
+
+#define rx_page_adr() ((rx_page & RX_PAGE2_SELECT)?(MEM_6K):(MEM_4K))
+
+/* Flip bit, only 2 pages */
+#define next_rx_page() (rx_page ^= RX_PAGE2_SELECT)
+
+#define tx_page_adr(a) (((a) + 1) * MEM_2K)
+
+static inline byte
+de600_read_status(struct device *dev)
+{
+ byte status;
+
+ outb_p(STATUS, DATA_PORT);
+ status = inb(STATUS_PORT);
+ outb_p(NULL_COMMAND | HI_NIBBLE, DATA_PORT);
+
+ return status;
+}
+
+static inline byte
+de600_read_byte(unsigned char type, struct device *dev) { /* dev used by macros */
+ byte lo;
+
+ (void)outb_p((type), DATA_PORT);
+ lo = ((unsigned char)inb(STATUS_PORT)) >> 4;
+ (void)outb_p((type) | HI_NIBBLE, DATA_PORT);
+ return ((unsigned char)inb(STATUS_PORT) & (unsigned char)0xf0) | lo;
+}
+
+/*
+ * Open/initialize the board. This is called (in the current kernel)
+ * after booting when 'ifconfig <dev->name> $IP_ADDR' is run (in rc.inet1).
+ *
+ * This routine should set everything up anew at each open, even
+ * registers that "should" only need to be set once at boot, so that
+ * there is a non-reboot way to recover if something goes wrong.
+ */
+static int
+de600_open(struct device *dev)
+{
+ if (request_irq(DE600_IRQ, de600_interrupt, 0, "de600", NULL)) {
+ printk ("%s: unable to get IRQ %d\n", dev->name, DE600_IRQ);
+ return 1;
+ }
+ irq2dev_map[DE600_IRQ] = dev;
+
+ MOD_INC_USE_COUNT;
+ dev->start = 1;
+ if (adapter_init(dev)) {
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * The inverse routine to de600_open().
+ */
+static int
+de600_close(struct device *dev)
+{
+ select_nic();
+ rx_page = 0;
+ de600_put_command(RESET);
+ de600_put_command(STOP_RESET);
+ de600_put_command(0);
+ select_prn();
+
+ if (dev->start) {
+ free_irq(DE600_IRQ, NULL);
+ irq2dev_map[DE600_IRQ] = NULL;
+ dev->start = 0;
+ MOD_DEC_USE_COUNT;
+ }
+ return 0;
+}
+
+static struct netstats *
+get_stats(struct device *dev)
+{
+ return (struct netstats *)(dev->priv);
+}
+
+static inline void
+trigger_interrupt(struct device *dev)
+{
+ de600_put_command(FLIP_IRQ);
+ select_prn();
+ DE600_SLOW_DOWN;
+ select_nic();
+ de600_put_command(0);
+}
+
+/*
+ * Copy a buffer to the adapter transmit page memory.
+ * Start sending.
+ */
+static int
+de600_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ int transmit_from;
+ int len;
+ int tickssofar;
+ byte *buffer = skb->data;
+
+ /*
+ * If some higher layer thinks we've missed a
+ * tx-done interrupt we are passed NULL.
+ * Caution: dev_tint() handles the cli()/sti() itself.
+ */
+
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ if (free_tx_pages <= 0) { /* Do timeouts, to avoid hangs. */
+ tickssofar = jiffies - dev->trans_start;
+
+ if (tickssofar < 5)
+ return 1;
+
+ /* else */
+ printk("%s: transmit timed out (%d), %s?\n",
+ dev->name,
+ tickssofar,
+ "network cable problem"
+ );
+ /* Restart the adapter. */
+ if (adapter_init(dev)) {
+ return 1;
+ }
+ }
+
+ /* Start real output */
+ PRINTK(("de600_start_xmit:len=%d, page %d/%d\n", skb->len, tx_fifo_in, free_tx_pages));
+
+ if ((len = skb->len) < RUNT)
+ len = RUNT;
+
+ cli();
+ select_nic();
+ tx_fifo[tx_fifo_in] = transmit_from = tx_page_adr(tx_fifo_in) - len;
+ tx_fifo_in = (tx_fifo_in + 1) % TX_PAGES; /* Next free tx page */
+
+#ifdef CHECK_LOST_DE600
+ /* This costs about 40 instructions per packet... */
+ de600_setup_address(NODE_ADDRESS, RW_ADDR);
+ de600_read_byte(READ_DATA, dev);
+ if (was_down || (de600_read_byte(READ_DATA, dev) != 0xde)) {
+ if (adapter_init(dev)) {
+ sti();
+ return 1;
+ }
+ }
+#endif
+
+ de600_setup_address(transmit_from, RW_ADDR);
+ for ( ; len > 0; --len, ++buffer)
+ de600_put_byte(*buffer);
+
+ if (free_tx_pages-- == TX_PAGES) { /* No transmission going on */
+ dev->trans_start = jiffies;
+ dev->tbusy = 0; /* allow more packets into adapter */
+ /* Send page and generate a faked interrupt */
+ de600_setup_address(transmit_from, TX_ADDR);
+ de600_put_command(TX_ENABLE);
+ }
+ else {
+ dev->tbusy = !free_tx_pages;
+ select_prn();
+ }
+
+ sti(); /* interrupts back on */
+
+#ifdef FAKE_SMALL_MAX
+ /* This will "patch" the socket TCP proto at an early moment */
+ if (skb->sk && (skb->sk->protocol == IPPROTO_TCP) &&
+ (skb->sk->prot->rspace != &de600_rspace))
+ skb->sk->prot->rspace = de600_rspace; /* Ugh! */
+#endif
+
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ return 0;
+}
+
+/*
+ * The typical workload of the driver:
+ * Handle the network interface interrupts.
+ */
+static void
+de600_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct device *dev = irq2dev_map[irq];
+ byte irq_status;
+ int retrig = 0;
+ int boguscount = 0;
+
+ /* This might just as well be deleted now, no crummy drivers present :-) */
+ if ((dev == NULL) || (dev->start == 0) || (DE600_IRQ != irq)) {
+ printk("%s: bogus interrupt %d\n", dev?dev->name:"DE-600", irq);
+ return;
+ }
+
+ dev->interrupt = 1;
+ select_nic();
+ irq_status = de600_read_status(dev);
+
+ do {
+ PRINTK(("de600_interrupt (%02X)\n", irq_status));
+
+ if (irq_status & RX_GOOD)
+ de600_rx_intr(dev);
+ else if (!(irq_status & RX_BUSY))
+ de600_put_command(RX_ENABLE);
+
+ /* Any transmission in progress? */
+ if (free_tx_pages < TX_PAGES)
+ retrig = de600_tx_intr(dev, irq_status);
+ else
+ retrig = 0;
+
+ irq_status = de600_read_status(dev);
+ } while ( (irq_status & RX_GOOD) || ((++boguscount < 100) && retrig) );
+ /*
+ * Yeah, it _looks_ like busy waiting, smells like busy waiting
+ * and I know it's not PC, but please, it will only occur once
+ * in a while and then only for a loop or so (< 1ms for sure!)
+ */
+
+ /* Enable adapter interrupts */
+ dev->interrupt = 0;
+ select_prn();
+
+ if (retrig)
+ trigger_interrupt(dev);
+
+ sti();
+ return;
+}
+
+static int
+de600_tx_intr(struct device *dev, int irq_status)
+{
+ /*
+ * Returns 1 if tx still not done
+ */
+
+ mark_bh(NET_BH);
+ /* Check if current transmission is done yet */
+ if (irq_status & TX_BUSY)
+ return 1; /* tx not done, try again */
+
+ /* else */
+ /* If last transmission OK then bump fifo index */
+ if (!(irq_status & TX_FAILED16)) {
+ tx_fifo_out = (tx_fifo_out + 1) % TX_PAGES;
+ ++free_tx_pages;
+ ((struct netstats *)(dev->priv))->tx_packets++;
+ dev->tbusy = 0;
+ }
+
+ /* More to send, or resend last packet? */
+ if ((free_tx_pages < TX_PAGES) || (irq_status & TX_FAILED16)) {
+ dev->trans_start = jiffies;
+ de600_setup_address(tx_fifo[tx_fifo_out], TX_ADDR);
+ de600_put_command(TX_ENABLE);
+ return 1;
+ }
+ /* else */
+
+ return 0;
+}
+
+/*
+ * We have a good packet, get it out of the adapter.
+ */
+static void
+de600_rx_intr(struct device *dev)
+{
+ struct sk_buff *skb;
+ int i;
+ int read_from;
+ int size;
+ register unsigned char *buffer;
+
+ cli();
+ /* Get size of received packet */
+ size = de600_read_byte(RX_LEN, dev); /* low byte */
+ size += (de600_read_byte(RX_LEN, dev) << 8); /* high byte */
+ size -= 4; /* Ignore trailing 4 CRC-bytes */
+
+ /* Tell adapter where to store next incoming packet, enable receiver */
+ read_from = rx_page_adr();
+ next_rx_page();
+ de600_put_command(RX_ENABLE);
+ sti();
+
+ if ((size < 32) || (size > 1535)) {
+ printk("%s: Bogus packet size %d.\n", dev->name, size);
+ if (size > 10000)
+ adapter_init(dev);
+ return;
+ }
+
+ skb = dev_alloc_skb(size+2);
+ sti();
+ if (skb == NULL) {
+ printk("%s: Couldn't allocate a sk_buff of size %d.\n",
+ dev->name, size);
+ return;
+ }
+ /* else */
+
+ skb->dev = dev;
+ skb_reserve(skb,2); /* Align */
+
+ /* 'skb->data' points to the start of sk_buff data area. */
+ buffer = skb_put(skb,size);
+
+ /* copy the packet into the buffer */
+ de600_setup_address(read_from, RW_ADDR);
+ for (i = size; i > 0; --i, ++buffer)
+ *buffer = de600_read_byte(READ_DATA, dev);
+
+ ((struct netstats *)(dev->priv))->rx_packets++; /* count all receives */
+
+ skb->protocol=eth_type_trans(skb,dev);
+
+ netif_rx(skb);
+ /*
+ * If any worth-while packets have been received, netif_rx()
+ * has done a mark_bh(INET_BH) for us and will work on them
+ * when we get to the bottom-half routine.
+ */
+}
+
+int
+de600_probe(struct device *dev)
+{
+ int i;
+ static struct netstats de600_netstats;
+ /*dev->priv = kmalloc(sizeof(struct netstats), GFP_KERNEL);*/
+
+ printk("%s: D-Link DE-600 pocket adapter", dev->name);
+ /* Alpha testers must have the version number to report bugs. */
+ if (de600_debug > 1)
+ printk("%s", version);
+
+ /* probe for adapter */
+ rx_page = 0;
+ select_nic();
+ (void)de600_read_status(dev);
+ de600_put_command(RESET);
+ de600_put_command(STOP_RESET);
+ if (de600_read_status(dev) & 0xf0) {
+ printk(": not at I/O %#3x.\n", DATA_PORT);
+ return ENODEV;
+ }
+
+ /*
+ * Maybe we found one,
+ * have to check if it is a D-Link DE-600 adapter...
+ */
+
+ /* Get the adapter ethernet address from the ROM */
+ de600_setup_address(NODE_ADDRESS, RW_ADDR);
+ for (i = 0; i < ETH_ALEN; i++) {
+ dev->dev_addr[i] = de600_read_byte(READ_DATA, dev);
+ dev->broadcast[i] = 0xff;
+ }
+
+ /* Check magic code */
+ if ((dev->dev_addr[1] == 0xde) && (dev->dev_addr[2] == 0x15)) {
+ /* OK, install real address */
+ dev->dev_addr[0] = 0x00;
+ dev->dev_addr[1] = 0x80;
+ dev->dev_addr[2] = 0xc8;
+ dev->dev_addr[3] &= 0x0f;
+ dev->dev_addr[3] |= 0x70;
+ } else {
+ printk(" not identified in the printer port\n");
+ return ENODEV;
+ }
+
+#if 0 /* Not yet */
+ if (check_region(DE600_IO, 3)) {
+ printk(", port 0x%x busy\n", DE600_IO);
+ return EBUSY;
+ }
+#endif
+ request_region(DE600_IO, 3, "de600");
+
+ printk(", Ethernet Address: %02X", dev->dev_addr[0]);
+ for (i = 1; i < ETH_ALEN; i++)
+ printk(":%02X",dev->dev_addr[i]);
+ printk("\n");
+
+ /* Initialize the device structure. */
+ /*dev->priv = kmalloc(sizeof(struct netstats), GFP_KERNEL);*/
+ dev->priv = &de600_netstats;
+
+ memset(dev->priv, 0, sizeof(struct netstats));
+ dev->get_stats = get_stats;
+
+ dev->open = de600_open;
+ dev->stop = de600_close;
+ dev->hard_start_xmit = &de600_start_xmit;
+
+ ether_setup(dev);
+
+ dev->flags&=~IFF_MULTICAST;
+
+ select_prn();
+ return 0;
+}
+
+static int
+adapter_init(struct device *dev)
+{
+ int i;
+ long flags;
+
+ save_flags(flags);
+ cli();
+
+ select_nic();
+ rx_page = 0; /* used by RESET */
+ de600_put_command(RESET);
+ de600_put_command(STOP_RESET);
+#ifdef CHECK_LOST_DE600
+ /* Check if it is still there... */
+ /* Get the some bytes of the adapter ethernet address from the ROM */
+ de600_setup_address(NODE_ADDRESS, RW_ADDR);
+ de600_read_byte(READ_DATA, dev);
+ if ((de600_read_byte(READ_DATA, dev) != 0xde) ||
+ (de600_read_byte(READ_DATA, dev) != 0x15)) {
+ /* was: if (de600_read_status(dev) & 0xf0) { */
+ printk("Something has happened to the DE-600! Please check it"
+#ifdef SHUTDOWN_WHEN_LOST
+ " and do a new ifconfig"
+#endif /* SHUTDOWN_WHEN_LOST */
+ "!\n");
+#ifdef SHUTDOWN_WHEN_LOST
+ /* Goodbye, cruel world... */
+ dev->flags &= ~IFF_UP;
+ de600_close(dev);
+#endif /* SHUTDOWN_WHEN_LOST */
+ was_down = 1;
+ dev->tbusy = 1; /* Transmit busy... */
+ restore_flags(flags);
+ return 1; /* failed */
+ }
+#endif /* CHECK_LOST_DE600 */
+ if (was_down) {
+ printk("Thanks, I feel much better now!\n");
+ was_down = 0;
+ }
+
+ dev->tbusy = 0; /* Transmit busy... */
+ dev->interrupt = 0;
+ tx_fifo_in = 0;
+ tx_fifo_out = 0;
+ free_tx_pages = TX_PAGES;
+
+ /* set the ether address. */
+ de600_setup_address(NODE_ADDRESS, RW_ADDR);
+ for (i = 0; i < ETH_ALEN; i++)
+ de600_put_byte(dev->dev_addr[i]);
+
+ /* where to start saving incoming packets */
+ rx_page = RX_BP | RX_BASE_PAGE;
+ de600_setup_address(MEM_4K, RW_ADDR);
+ /* Enable receiver */
+ de600_put_command(RX_ENABLE);
+ select_prn();
+ restore_flags(flags);
+
+ return 0; /* OK */
+}
+
+#ifdef FAKE_SMALL_MAX
+/*
+ * The new router code (coming soon 8-) ) will fix this properly.
+ */
+#define DE600_MIN_WINDOW 1024
+#define DE600_MAX_WINDOW 2048
+#define DE600_TCP_WINDOW_DIFF 1024
+/*
+ * Copied from "net/inet/sock.c"
+ *
+ * Sets a lower max receive window in order to achieve <= 2
+ * packets arriving at the adapter in fast succession.
+ * (No way that a DE-600 can keep up with a net saturated
+ * with packets homing in on it :-( )
+ *
+ * Since there are only 2 receive buffers in the DE-600
+ * and it takes some time to copy from the adapter,
+ * this is absolutely necessary for any TCP performance whatsoever!
+ *
+ * Note that the returned window info will never be smaller than
+ * DE600_MIN_WINDOW, i.e. 1024
+ * This differs from the standard function, that can return an
+ * arbitrarily small window!
+ */
+#define min(a,b) ((a)<(b)?(a):(b))
+static unsigned long
+de600_rspace(struct sock *sk)
+{
+ int amt;
+
+ if (sk != NULL) {
+/*
+ * Hack! You might want to play with commenting away the following line,
+ * if you know what you do!
+ sk->max_unacked = DE600_MAX_WINDOW - DE600_TCP_WINDOW_DIFF;
+ */
+
+ if (sk->rmem_alloc >= sk->rcvbuf-2*DE600_MIN_WINDOW) return(0);
+ amt = min((sk->rcvbuf-sk->rmem_alloc)/2/*-DE600_MIN_WINDOW*/, DE600_MAX_WINDOW);
+ if (amt < 0) return(0);
+ return(amt);
+ }
+ return(0);
+}
+#endif
+
+#ifdef MODULE
+static char nullname[8];
+static struct device de600_dev = {
+ nullname, 0, 0, 0, 0, 0, 0, 0, 0, 0, NULL, de600_probe };
+
+int
+init_module(void)
+{
+ if (register_netdev(&de600_dev) != 0)
+ return -EIO;
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ unregister_netdev(&de600_dev);
+ release_region(DE600_IO, 3);
+}
+#endif /* MODULE */
+/*
+ * Local variables:
+ * kernel-compile-command: "gcc -D__KERNEL__ -Ilinux/include -I../../net/inet -Wall -Wstrict-prototypes -O2 -m486 -c de600.c"
+ * module-compile-command: "gcc -D__KERNEL__ -DMODULE -Ilinux/include -I../../net/inet -Wall -Wstrict-prototypes -O2 -m486 -c de600.c"
+ * compile-command: "gcc -D__KERNEL__ -DMODULE -Ilinux/include -I../../net/inet -Wall -Wstrict-prototypes -O2 -m486 -c de600.c"
+ * End:
+ */
diff --git a/linux/src/drivers/net/de620.c b/linux/src/drivers/net/de620.c
new file mode 100644
index 0000000..0e0c552
--- /dev/null
+++ b/linux/src/drivers/net/de620.c
@@ -0,0 +1,1045 @@
+/*
+ * de620.c $Revision: 1.1 $ BETA
+ *
+ *
+ * Linux driver for the D-Link DE-620 Ethernet pocket adapter.
+ *
+ * Portions (C) Copyright 1993, 1994 by Bjorn Ekwall <bj0rn@blox.se>
+ *
+ * Based on adapter information gathered from DOS packetdriver
+ * sources from D-Link Inc: (Special thanks to Henry Ngai of D-Link.)
+ * Portions (C) Copyright D-Link SYSTEM Inc. 1991, 1992
+ * Copyright, 1988, Russell Nelson, Crynwr Software
+ *
+ * Adapted to the sample network driver core for linux,
+ * written by: Donald Becker <becker@super.org>
+ * (Now at <becker@cesdis.gsfc.nasa.gov>
+ *
+ * Valuable assistance from:
+ * J. Joshua Kopper <kopper@rtsg.mot.com>
+ * Olav Kvittem <Olav.Kvittem@uninett.no>
+ * Germano Caronni <caronni@nessie.cs.id.ethz.ch>
+ * Jeremy Fitzhardinge <jeremy@suite.sw.oz.au>
+ *
+ *****************************************************************************/
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+static const char *version =
+ "de620.c: $Revision: 1.1 $, Bjorn Ekwall <bj0rn@blox.se>\n";
+
+/***********************************************************************
+ *
+ * "Tuning" section.
+ *
+ * Compile-time options: (see below for descriptions)
+ * -DDE620_IO=0x378 (lpt1)
+ * -DDE620_IRQ=7 (lpt1)
+ * -DDE602_DEBUG=...
+ * -DSHUTDOWN_WHEN_LOST
+ * -DCOUNT_LOOPS
+ * -DLOWSPEED
+ * -DREAD_DELAY
+ * -DWRITE_DELAY
+ */
+
+/*
+ * This driver assumes that the printer port is a "normal",
+ * dumb, uni-directional port!
+ * If your port is "fancy" in any way, please try to set it to "normal"
+ * with your BIOS setup. I have no access to machines with bi-directional
+ * ports, so I can't test such a driver :-(
+ * (Yes, I _know_ it is possible to use DE620 with bidirectional ports...)
+ *
+ * There are some clones of DE620 out there, with different names.
+ * If the current driver does not recognize a clone, try to change
+ * the following #define to:
+ *
+ * #define DE620_CLONE 1
+ */
+#define DE620_CLONE 0
+
+/*
+ * If the adapter has problems with high speeds, enable this #define
+ * otherwise full printerport speed will be attempted.
+ *
+ * You can tune the READ_DELAY/WRITE_DELAY below if you enable LOWSPEED
+ *
+#define LOWSPEED
+ */
+
+#ifndef READ_DELAY
+#define READ_DELAY 100 /* adapter internal read delay in 100ns units */
+#endif
+
+#ifndef WRITE_DELAY
+#define WRITE_DELAY 100 /* adapter internal write delay in 100ns units */
+#endif
+
+/*
+ * Enable this #define if you want the adapter to do a "ifconfig down" on
+ * itself when we have detected that something is possibly wrong with it.
+ * The default behaviour is to retry with "adapter_init()" until success.
+ * This should be used for debugging purposes only.
+ *
+#define SHUTDOWN_WHEN_LOST
+ */
+
+/*
+ * Enable debugging by "-DDE620_DEBUG=3" when compiling,
+ * OR in "./CONFIG"
+ * OR by enabling the following #define
+ *
+ * use 0 for production, 1 for verification, >2 for debug
+ *
+#define DE620_DEBUG 3
+ */
+
+#ifdef LOWSPEED
+/*
+ * Enable this #define if you want to see debugging output that show how long
+ * we have to wait before the DE-620 is ready for the next read/write/command.
+ *
+#define COUNT_LOOPS
+ */
+#endif
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <asm/io.h>
+#include <linux/in.h>
+#include <linux/ptrace.h>
+#include <asm/system.h>
+#include <linux/errno.h>
+
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+/* Constant definitions for the DE-620 registers, commands and bits */
+#include "de620.h"
+
+#define netstats enet_statistics
+typedef unsigned char byte;
+
+/*******************************************************
+ * *
+ * Definition of D-Link DE-620 Ethernet Pocket adapter *
+ * See also "de620.h" *
+ * *
+ *******************************************************/
+#ifndef DE620_IO /* Compile-time configurable */
+#define DE620_IO 0x378
+#endif
+
+#ifndef DE620_IRQ /* Compile-time configurable */
+#define DE620_IRQ 7
+#endif
+
+#define DATA_PORT (dev->base_addr)
+#define STATUS_PORT (dev->base_addr + 1)
+#define COMMAND_PORT (dev->base_addr + 2)
+
+#define RUNT 60 /* Too small Ethernet packet */
+#define GIANT 1514 /* largest legal size packet, no fcs */
+
+#ifdef DE620_DEBUG /* Compile-time configurable */
+#define PRINTK(x) if (de620_debug >= 2) printk x
+#else
+#define DE620_DEBUG 0
+#define PRINTK(x) /**/
+#endif
+
+
+/*
+ * Force media with insmod:
+ * insmod de620.o bnc=1
+ * or
+ * insmod de620.o utp=1
+ *
+ * Force io and/or irq with insmod:
+ * insmod de620.o io=0x378 irq=7
+ *
+ * Make a clone skip the Ethernet-address range check:
+ * insmod de620.o clone=1
+ */
+static int bnc = 0;
+static int utp = 0;
+static int io = DE620_IO;
+static int irq = DE620_IRQ;
+static int clone = DE620_CLONE;
+
+static unsigned int de620_debug = DE620_DEBUG;
+
+/***********************************************
+ * *
+ * Index to functions, as function prototypes. *
+ * *
+ ***********************************************/
+
+/*
+ * Routines used internally. (See also "convenience macros.. below")
+ */
+
+/* Put in the device structure. */
+static int de620_open(struct device *);
+static int de620_close(struct device *);
+static struct netstats *get_stats(struct device *);
+static void de620_set_multicast_list(struct device *);
+static int de620_start_xmit(struct sk_buff *, struct device *);
+
+/* Dispatch from interrupts. */
+static void de620_interrupt(int, void *, struct pt_regs *);
+static int de620_rx_intr(struct device *);
+
+/* Initialization */
+static int adapter_init(struct device *);
+int de620_probe(struct device *);
+static int read_eeprom(struct device *);
+
+
+/*
+ * D-Link driver variables:
+ */
+#define SCR_DEF NIBBLEMODE |INTON | SLEEP | AUTOTX
+#define TCR_DEF RXPB /* not used: | TXSUCINT | T16INT */
+#define DE620_RX_START_PAGE 12 /* 12 pages (=3k) reserved for tx */
+#define DEF_NIC_CMD IRQEN | ICEN | DS1
+
+static volatile byte NIC_Cmd;
+static volatile byte next_rx_page;
+static byte first_rx_page;
+static byte last_rx_page;
+static byte EIPRegister;
+
+static struct nic {
+ byte NodeID[6];
+ byte RAM_Size;
+ byte Model;
+ byte Media;
+ byte SCR;
+} nic_data;
+
+/**********************************************************
+ * *
+ * Convenience macros/functions for D-Link DE-620 adapter *
+ * *
+ **********************************************************/
+#define de620_tx_buffs(dd) (inb(STATUS_PORT) & (TXBF0 | TXBF1))
+#define de620_flip_ds(dd) NIC_Cmd ^= DS0 | DS1; outb(NIC_Cmd, COMMAND_PORT);
+
+/* Check for ready-status, and return a nibble (high 4 bits) for data input */
+#ifdef COUNT_LOOPS
+static int tot_cnt;
+#endif
+static inline byte
+de620_ready(struct device *dev)
+{
+ byte value;
+ register short int cnt = 0;
+
+ while ((((value = inb(STATUS_PORT)) & READY) == 0) && (cnt <= 1000))
+ ++cnt;
+
+#ifdef COUNT_LOOPS
+ tot_cnt += cnt;
+#endif
+ return value & 0xf0; /* nibble */
+}
+
+static inline void
+de620_send_command(struct device *dev, byte cmd)
+{
+ de620_ready(dev);
+ if (cmd == W_DUMMY)
+ outb(NIC_Cmd, COMMAND_PORT);
+
+ outb(cmd, DATA_PORT);
+
+ outb(NIC_Cmd ^ CS0, COMMAND_PORT);
+ de620_ready(dev);
+ outb(NIC_Cmd, COMMAND_PORT);
+}
+
+static inline void
+de620_put_byte(struct device *dev, byte value)
+{
+ /* The de620_ready() makes 7 loops, on the average, on a DX2/66 */
+ de620_ready(dev);
+ outb(value, DATA_PORT);
+ de620_flip_ds(dev);
+}
+
+static inline byte
+de620_read_byte(struct device *dev)
+{
+ byte value;
+
+ /* The de620_ready() makes 7 loops, on the average, on a DX2/66 */
+ value = de620_ready(dev); /* High nibble */
+ de620_flip_ds(dev);
+ value |= de620_ready(dev) >> 4; /* Low nibble */
+ return value;
+}
+
+static inline void
+de620_write_block(struct device *dev, byte *buffer, int count)
+{
+#ifndef LOWSPEED
+ byte uflip = NIC_Cmd ^ (DS0 | DS1);
+ byte dflip = NIC_Cmd;
+#else /* LOWSPEED */
+#ifdef COUNT_LOOPS
+ int bytes = count;
+#endif /* COUNT_LOOPS */
+#endif /* LOWSPEED */
+
+#ifdef LOWSPEED
+#ifdef COUNT_LOOPS
+ tot_cnt = 0;
+#endif /* COUNT_LOOPS */
+ /* No further optimization useful, the limit is in the adapter. */
+ for ( ; count > 0; --count, ++buffer) {
+ de620_put_byte(dev,*buffer);
+ }
+ de620_send_command(dev,W_DUMMY);
+#ifdef COUNT_LOOPS
+ /* trial debug output: loops per byte in de620_ready() */
+ printk("WRITE(%d)\n", tot_cnt/((bytes?bytes:1)));
+#endif /* COUNT_LOOPS */
+#else /* not LOWSPEED */
+ for ( ; count > 0; count -=2) {
+ outb(*buffer++, DATA_PORT);
+ outb(uflip, COMMAND_PORT);
+ outb(*buffer++, DATA_PORT);
+ outb(dflip, COMMAND_PORT);
+ }
+ de620_send_command(dev,W_DUMMY);
+#endif /* LOWSPEED */
+}
+
+static inline void
+de620_read_block(struct device *dev, byte *data, int count)
+{
+#ifndef LOWSPEED
+ byte value;
+ byte uflip = NIC_Cmd ^ (DS0 | DS1);
+ byte dflip = NIC_Cmd;
+#else /* LOWSPEED */
+#ifdef COUNT_LOOPS
+ int bytes = count;
+
+ tot_cnt = 0;
+#endif /* COUNT_LOOPS */
+#endif /* LOWSPEED */
+
+#ifdef LOWSPEED
+ /* No further optimization useful, the limit is in the adapter. */
+ while (count-- > 0) {
+ *data++ = de620_read_byte(dev);
+ de620_flip_ds(dev);
+ }
+#ifdef COUNT_LOOPS
+ /* trial debug output: loops per byte in de620_ready() */
+ printk("READ(%d)\n", tot_cnt/(2*(bytes?bytes:1)));
+#endif /* COUNT_LOOPS */
+#else /* not LOWSPEED */
+ while (count-- > 0) {
+ value = inb(STATUS_PORT) & 0xf0; /* High nibble */
+ outb(uflip, COMMAND_PORT);
+ *data++ = value | inb(STATUS_PORT) >> 4; /* Low nibble */
+ outb(dflip , COMMAND_PORT);
+ }
+#endif /* LOWSPEED */
+}
+
+static inline void
+de620_set_delay(struct device *dev)
+{
+ de620_ready(dev);
+ outb(W_DFR, DATA_PORT);
+ outb(NIC_Cmd ^ CS0, COMMAND_PORT);
+
+ de620_ready(dev);
+#ifdef LOWSPEED
+ outb(WRITE_DELAY, DATA_PORT);
+#else
+ outb(0, DATA_PORT);
+#endif
+ de620_flip_ds(dev);
+
+ de620_ready(dev);
+#ifdef LOWSPEED
+ outb(READ_DELAY, DATA_PORT);
+#else
+ outb(0, DATA_PORT);
+#endif
+ de620_flip_ds(dev);
+}
+
+static inline void
+de620_set_register(struct device *dev, byte reg, byte value)
+{
+ de620_ready(dev);
+ outb(reg, DATA_PORT);
+ outb(NIC_Cmd ^ CS0, COMMAND_PORT);
+
+ de620_put_byte(dev, value);
+}
+
+static inline byte
+de620_get_register(struct device *dev, byte reg)
+{
+ byte value;
+
+ de620_send_command(dev,reg);
+ value = de620_read_byte(dev);
+ de620_send_command(dev,W_DUMMY);
+
+ return value;
+}
+
+/*********************************************************************
+ *
+ * Open/initialize the board.
+ *
+ * This routine should set everything up anew at each open, even
+ * registers that "should" only need to be set once at boot, so that
+ * there is a non-reboot way to recover if something goes wrong.
+ *
+ */
+static int
+de620_open(struct device *dev)
+{
+ if (request_irq(dev->irq, de620_interrupt, 0, "de620", NULL)) {
+ printk ("%s: unable to get IRQ %d\n", dev->name, dev->irq);
+ return 1;
+ }
+ irq2dev_map[dev->irq] = dev;
+
+ MOD_INC_USE_COUNT;
+ if (adapter_init(dev)) {
+ return 1;
+ }
+ dev->start = 1;
+ return 0;
+}
+
+/************************************************
+ *
+ * The inverse routine to de620_open().
+ *
+ */
+static int
+de620_close(struct device *dev)
+{
+ /* disable recv */
+ de620_set_register(dev, W_TCR, RXOFF);
+
+ free_irq(dev->irq, NULL);
+ irq2dev_map[dev->irq] = NULL;
+
+ dev->start = 0;
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+/*********************************************
+ *
+ * Return current statistics
+ *
+ */
+static struct netstats *
+get_stats(struct device *dev)
+{
+ return (struct netstats *)(dev->priv);
+}
+
+/*********************************************
+ *
+ * Set or clear the multicast filter for this adaptor.
+ * (no real multicast implemented for the DE-620, but she can be promiscuous...)
+ *
+ */
+
+static void de620_set_multicast_list(struct device *dev)
+{
+ if (dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
+ { /* Enable promiscuous mode */
+ /*
+ * We must make the kernel realise we had to move
+ * into promisc mode or we start all out war on
+ * the cable. - AC
+ */
+ dev->flags|=IFF_PROMISC;
+
+ de620_set_register(dev, W_TCR, (TCR_DEF & ~RXPBM) | RXALL);
+ }
+ else
+ { /* Disable promiscuous mode, use normal mode */
+ de620_set_register(dev, W_TCR, TCR_DEF);
+ }
+}
+
+/*******************************************************
+ *
+ * Copy a buffer to the adapter transmit page memory.
+ * Start sending.
+ */
+static int
+de620_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ unsigned long flags;
+ int len;
+ int tickssofar;
+ byte *buffer = skb->data;
+ byte using_txbuf;
+
+ /*
+ * If some higher layer thinks we've missed a
+ * tx-done interrupt we are passed NULL.
+ * Caution: dev_tint() handles the cli()/sti() itself.
+ */
+
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ using_txbuf = de620_tx_buffs(dev); /* Peek at the adapter */
+ dev->tbusy = (using_txbuf == (TXBF0 | TXBF1)); /* Boolean! */
+
+ if (dev->tbusy) { /* Do timeouts, to avoid hangs. */
+ tickssofar = jiffies - dev->trans_start;
+
+ if (tickssofar < 5)
+ return 1;
+
+ /* else */
+ printk("%s: transmit timed out (%d), %s?\n",
+ dev->name,
+ tickssofar,
+ "network cable problem"
+ );
+ /* Restart the adapter. */
+ if (adapter_init(dev)) /* maybe close it */
+ return 1;
+ }
+
+ if ((len = skb->len) < RUNT)
+ len = RUNT;
+ if (len & 1) /* send an even number of bytes */
+ ++len;
+
+ /* Start real output */
+ save_flags(flags);
+ cli();
+
+ PRINTK(("de620_start_xmit: len=%d, bufs 0x%02x\n",
+ (int)skb->len, using_txbuf));
+
+ /* select a free tx buffer. if there is one... */
+ switch (using_txbuf) {
+ default: /* both are free: use TXBF0 */
+ case TXBF1: /* use TXBF0 */
+ de620_send_command(dev,W_CR | RW0);
+ using_txbuf |= TXBF0;
+ break;
+
+ case TXBF0: /* use TXBF1 */
+ de620_send_command(dev,W_CR | RW1);
+ using_txbuf |= TXBF1;
+ break;
+
+ case (TXBF0 | TXBF1): /* NONE!!! */
+ printk("de620: Ouch! No tx-buffer available!\n");
+ restore_flags(flags);
+ return 1;
+ break;
+ }
+ de620_write_block(dev, buffer, len);
+
+ dev->trans_start = jiffies;
+ dev->tbusy = (using_txbuf == (TXBF0 | TXBF1)); /* Boolean! */
+
+ ((struct netstats *)(dev->priv))->tx_packets++;
+
+ restore_flags(flags); /* interrupts maybe back on */
+
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ return 0;
+}
+
+/*****************************************************
+ *
+ * Handle the network interface interrupts.
+ *
+ */
+static void
+de620_interrupt(int irq_in, void *dev_id, struct pt_regs *regs)
+{
+ struct device *dev = irq2dev_map[irq_in];
+ byte irq_status;
+ int bogus_count = 0;
+ int again = 0;
+
+ /* This might be deleted now, no crummy drivers present :-) Or..? */
+ if ((dev == NULL) || (irq != irq_in)) {
+ printk("%s: bogus interrupt %d\n", dev?dev->name:"de620", irq_in);
+ return;
+ }
+
+ cli();
+ dev->interrupt = 1;
+
+ /* Read the status register (_not_ the status port) */
+ irq_status = de620_get_register(dev, R_STS);
+
+ PRINTK(("de620_interrupt (%2.2X)\n", irq_status));
+
+ if (irq_status & RXGOOD) {
+ do {
+ again = de620_rx_intr(dev);
+ PRINTK(("again=%d\n", again));
+ }
+ while (again && (++bogus_count < 100));
+ }
+
+ dev->tbusy = (de620_tx_buffs(dev) == (TXBF0 | TXBF1)); /* Boolean! */
+
+ dev->interrupt = 0;
+ sti();
+ return;
+}
+
+/**************************************
+ *
+ * Get a packet from the adapter
+ *
+ * Send it "upstairs"
+ *
+ */
+static int
+de620_rx_intr(struct device *dev)
+{
+ struct header_buf {
+ byte status;
+ byte Rx_NextPage;
+ unsigned short Rx_ByteCount;
+ } header_buf;
+ struct sk_buff *skb;
+ int size;
+ byte *buffer;
+ byte pagelink;
+ byte curr_page;
+
+ PRINTK(("de620_rx_intr: next_rx_page = %d\n", next_rx_page));
+
+ /* Tell the adapter that we are going to read data, and from where */
+ de620_send_command(dev, W_CR | RRN);
+ de620_set_register(dev, W_RSA1, next_rx_page);
+ de620_set_register(dev, W_RSA0, 0);
+
+ /* Deep breath, and away we goooooo */
+ de620_read_block(dev, (byte *)&header_buf, sizeof(struct header_buf));
+ PRINTK(("page status=0x%02x, nextpage=%d, packetsize=%d\n",
+ header_buf.status, header_buf.Rx_NextPage, header_buf.Rx_ByteCount));
+
+ /* Plausible page header? */
+ pagelink = header_buf.Rx_NextPage;
+ if ((pagelink < first_rx_page) || (last_rx_page < pagelink)) {
+ /* Ouch... Forget it! Skip all and start afresh... */
+ printk("%s: Ring overrun? Restoring...\n", dev->name);
+ /* You win some, you loose some. And sometimes plenty... */
+ adapter_init(dev);
+ ((struct netstats *)(dev->priv))->rx_over_errors++;
+ return 0;
+ }
+
+ /* OK, this look good, so far. Let's see if it's consistent... */
+ /* Let's compute the start of the next packet, based on where we are */
+ pagelink = next_rx_page +
+ ((header_buf.Rx_ByteCount + (4 - 1 + 0x100)) >> 8);
+
+ /* Are we going to wrap around the page counter? */
+ if (pagelink > last_rx_page)
+ pagelink -= (last_rx_page - first_rx_page + 1);
+
+ /* Is the _computed_ next page number equal to what the adapter says? */
+ if (pagelink != header_buf.Rx_NextPage) {
+ /* Naah, we'll skip this packet. Probably bogus data as well */
+ printk("%s: Page link out of sync! Restoring...\n", dev->name);
+ next_rx_page = header_buf.Rx_NextPage; /* at least a try... */
+ de620_send_command(dev, W_DUMMY);
+ de620_set_register(dev, W_NPRF, next_rx_page);
+ ((struct netstats *)(dev->priv))->rx_over_errors++;
+ return 0;
+ }
+ next_rx_page = pagelink;
+
+ size = header_buf.Rx_ByteCount - 4;
+ if ((size < RUNT) || (GIANT < size)) {
+ printk("%s: Illegal packet size: %d!\n", dev->name, size);
+ }
+ else { /* Good packet? */
+ skb = dev_alloc_skb(size+2);
+ if (skb == NULL) { /* Yeah, but no place to put it... */
+ printk("%s: Couldn't allocate a sk_buff of size %d.\n",
+ dev->name, size);
+ ((struct netstats *)(dev->priv))->rx_dropped++;
+ }
+ else { /* Yep! Go get it! */
+ skb_reserve(skb,2); /* Align */
+ skb->dev = dev;
+ skb->free = 1;
+ /* skb->data points to the start of sk_buff data area */
+ buffer = skb_put(skb,size);
+ /* copy the packet into the buffer */
+ de620_read_block(dev, buffer, size);
+ PRINTK(("Read %d bytes\n", size));
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb); /* deliver it "upstairs" */
+ /* count all receives */
+ ((struct netstats *)(dev->priv))->rx_packets++;
+ }
+ }
+
+ /* Let's peek ahead to see if we have read the last current packet */
+ /* NOTE! We're _not_ checking the 'EMPTY'-flag! This seems better... */
+ curr_page = de620_get_register(dev, R_CPR);
+ de620_set_register(dev, W_NPRF, next_rx_page);
+ PRINTK(("next_rx_page=%d CPR=%d\n", next_rx_page, curr_page));
+
+ return (next_rx_page != curr_page); /* That was slightly tricky... */
+}
+
+/*********************************************
+ *
+ * Reset the adapter to a known state
+ *
+ */
+static int
+adapter_init(struct device *dev)
+{
+ int i;
+ static int was_down = 0;
+
+ if ((nic_data.Model == 3) || (nic_data.Model == 0)) { /* CT */
+ EIPRegister = NCTL0;
+ if (nic_data.Media != 1)
+ EIPRegister |= NIS0; /* not BNC */
+ }
+ else if (nic_data.Model == 2) { /* UTP */
+ EIPRegister = NCTL0 | NIS0;
+ }
+
+ if (utp)
+ EIPRegister = NCTL0 | NIS0;
+ if (bnc)
+ EIPRegister = NCTL0;
+
+ de620_send_command(dev, W_CR | RNOP | CLEAR);
+ de620_send_command(dev, W_CR | RNOP);
+
+ de620_set_register(dev, W_SCR, SCR_DEF);
+ /* disable recv to wait init */
+ de620_set_register(dev, W_TCR, RXOFF);
+
+ /* Set the node ID in the adapter */
+ for (i = 0; i < 6; ++i) { /* W_PARn = 0xaa + n */
+ de620_set_register(dev, W_PAR0 + i, dev->dev_addr[i]);
+ }
+
+ de620_set_register(dev, W_EIP, EIPRegister);
+
+ next_rx_page = first_rx_page = DE620_RX_START_PAGE;
+ if (nic_data.RAM_Size)
+ last_rx_page = nic_data.RAM_Size - 1;
+ else /* 64k RAM */
+ last_rx_page = 255;
+
+ de620_set_register(dev, W_SPR, first_rx_page); /* Start Page Register*/
+ de620_set_register(dev, W_EPR, last_rx_page); /* End Page Register */
+ de620_set_register(dev, W_CPR, first_rx_page);/*Current Page Register*/
+ de620_send_command(dev, W_NPR | first_rx_page); /* Next Page Register*/
+ de620_send_command(dev, W_DUMMY);
+ de620_set_delay(dev);
+
+ /* Final sanity check: Anybody out there? */
+ /* Let's hope some bits from the statusregister make a good check */
+#define CHECK_MASK ( 0 | TXSUC | T16 | 0 | RXCRC | RXSHORT | 0 | 0 )
+#define CHECK_OK ( 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 )
+ /* success: X 0 0 X 0 0 X X */
+ /* ignore: EEDI RXGOOD COLS LNKS*/
+
+ if (((i = de620_get_register(dev, R_STS)) & CHECK_MASK) != CHECK_OK) {
+ printk("Something has happened to the DE-620! Please check it"
+#ifdef SHUTDOWN_WHEN_LOST
+ " and do a new ifconfig"
+#endif
+ "! (%02x)\n", i);
+#ifdef SHUTDOWN_WHEN_LOST
+ /* Goodbye, cruel world... */
+ dev->flags &= ~IFF_UP;
+ de620_close(dev);
+#endif
+ was_down = 1;
+ return 1; /* failed */
+ }
+ if (was_down) {
+ printk("Thanks, I feel much better now!\n");
+ was_down = 0;
+ }
+
+ /* All OK, go ahead... */
+ de620_set_register(dev, W_TCR, TCR_DEF);
+
+ return 0; /* all ok */
+}
+
+/******************************************************************************
+ *
+ * Only start-up code below
+ *
+ */
+/****************************************
+ *
+ * Check if there is a DE-620 connected
+ */
+int
+de620_probe(struct device *dev)
+{
+ static struct netstats de620_netstats;
+ int i;
+ byte checkbyte = 0xa5;
+
+ /*
+ * This is where the base_addr and irq gets set.
+ * Tunable at compile-time and insmod-time
+ */
+ dev->base_addr = io;
+ dev->irq = irq;
+
+ if (de620_debug)
+ printk("%s", version);
+
+ printk("D-Link DE-620 pocket adapter");
+
+ /* Initially, configure basic nibble mode, so we can read the EEPROM */
+ NIC_Cmd = DEF_NIC_CMD;
+ de620_set_register(dev, W_EIP, EIPRegister);
+
+ /* Anybody out there? */
+ de620_set_register(dev, W_CPR, checkbyte);
+ checkbyte = de620_get_register(dev, R_CPR);
+
+ if ((checkbyte != 0xa5) || (read_eeprom(dev) != 0)) {
+ printk(" not identified in the printer port\n");
+ return ENODEV;
+ }
+
+#if 0 /* Not yet */
+ if (check_region(dev->base_addr, 3)) {
+ printk(", port 0x%x busy\n", dev->base_addr);
+ return EBUSY;
+ }
+#endif
+ request_region(dev->base_addr, 3, "de620");
+
+ /* else, got it! */
+ printk(", Ethernet Address: %2.2X",
+ dev->dev_addr[0] = nic_data.NodeID[0]);
+ for (i = 1; i < ETH_ALEN; i++) {
+ printk(":%2.2X", dev->dev_addr[i] = nic_data.NodeID[i]);
+ dev->broadcast[i] = 0xff;
+ }
+
+ printk(" (%dk RAM,",
+ (nic_data.RAM_Size) ? (nic_data.RAM_Size >> 2) : 64);
+
+ if (nic_data.Media == 1)
+ printk(" BNC)\n");
+ else
+ printk(" UTP)\n");
+
+ /* Initialize the device structure. */
+ /*dev->priv = kmalloc(sizeof(struct netstats), GFP_KERNEL);*/
+ dev->priv = &de620_netstats;
+
+ memset(dev->priv, 0, sizeof(struct netstats));
+ dev->get_stats = get_stats;
+ dev->open = de620_open;
+ dev->stop = de620_close;
+ dev->hard_start_xmit = &de620_start_xmit;
+ dev->set_multicast_list = &de620_set_multicast_list;
+ /* base_addr and irq are already set, see above! */
+
+ ether_setup(dev);
+
+ /* dump eeprom */
+ if (de620_debug) {
+ printk("\nEEPROM contents:\n");
+ printk("RAM_Size = 0x%02X\n", nic_data.RAM_Size);
+ printk("NodeID = %02X:%02X:%02X:%02X:%02X:%02X\n",
+ nic_data.NodeID[0], nic_data.NodeID[1],
+ nic_data.NodeID[2], nic_data.NodeID[3],
+ nic_data.NodeID[4], nic_data.NodeID[5]);
+ printk("Model = %d\n", nic_data.Model);
+ printk("Media = %d\n", nic_data.Media);
+ printk("SCR = 0x%02x\n", nic_data.SCR);
+ }
+
+ return 0;
+}
+
+/**********************************
+ *
+ * Read info from on-board EEPROM
+ *
+ * Note: Bitwise serial I/O to/from the EEPROM vi the status _register_!
+ */
+#define sendit(dev,data) de620_set_register(dev, W_EIP, data | EIPRegister);
+
+static unsigned short
+ReadAWord(struct device *dev, int from)
+{
+ unsigned short data;
+ int nbits;
+
+ /* cs [__~~] SET SEND STATE */
+ /* di [____] */
+ /* sck [_~~_] */
+ sendit(dev, 0); sendit(dev, 1); sendit(dev, 5); sendit(dev, 4);
+
+ /* Send the 9-bit address from where we want to read the 16-bit word */
+ for (nbits = 9; nbits > 0; --nbits, from <<= 1) {
+ if (from & 0x0100) { /* bit set? */
+ /* cs [~~~~] SEND 1 */
+ /* di [~~~~] */
+ /* sck [_~~_] */
+ sendit(dev, 6); sendit(dev, 7); sendit(dev, 7); sendit(dev, 6);
+ }
+ else {
+ /* cs [~~~~] SEND 0 */
+ /* di [____] */
+ /* sck [_~~_] */
+ sendit(dev, 4); sendit(dev, 5); sendit(dev, 5); sendit(dev, 4);
+ }
+ }
+
+ /* Shift in the 16-bit word. The bits appear serially in EEDI (=0x80) */
+ for (data = 0, nbits = 16; nbits > 0; --nbits) {
+ /* cs [~~~~] SEND 0 */
+ /* di [____] */
+ /* sck [_~~_] */
+ sendit(dev, 4); sendit(dev, 5); sendit(dev, 5); sendit(dev, 4);
+ data = (data << 1) | ((de620_get_register(dev, R_STS) & EEDI) >> 7);
+ }
+ /* cs [____] RESET SEND STATE */
+ /* di [____] */
+ /* sck [_~~_] */
+ sendit(dev, 0); sendit(dev, 1); sendit(dev, 1); sendit(dev, 0);
+
+ return data;
+}
+
+static int
+read_eeprom(struct device *dev)
+{
+ unsigned short wrd;
+
+ /* D-Link Ethernet addresses are in the series 00:80:c8:7X:XX:XX:XX */
+ wrd = ReadAWord(dev, 0x1aa); /* bytes 0 + 1 of NodeID */
+ if (!clone && (wrd != htons(0x0080))) /* Valid D-Link ether sequence? */
+ return -1; /* Nope, not a DE-620 */
+ nic_data.NodeID[0] = wrd & 0xff;
+ nic_data.NodeID[1] = wrd >> 8;
+
+ wrd = ReadAWord(dev, 0x1ab); /* bytes 2 + 3 of NodeID */
+ if (!clone && ((wrd & 0xff) != 0xc8)) /* Valid D-Link ether sequence? */
+ return -1; /* Nope, not a DE-620 */
+ nic_data.NodeID[2] = wrd & 0xff;
+ nic_data.NodeID[3] = wrd >> 8;
+
+ wrd = ReadAWord(dev, 0x1ac); /* bytes 4 + 5 of NodeID */
+ nic_data.NodeID[4] = wrd & 0xff;
+ nic_data.NodeID[5] = wrd >> 8;
+
+ wrd = ReadAWord(dev, 0x1ad); /* RAM size in pages (256 bytes). 0 = 64k */
+ nic_data.RAM_Size = (wrd >> 8);
+
+ wrd = ReadAWord(dev, 0x1ae); /* hardware model (CT = 3) */
+ nic_data.Model = (wrd & 0xff);
+
+ wrd = ReadAWord(dev, 0x1af); /* media (indicates BNC/UTP) */
+ nic_data.Media = (wrd & 0xff);
+
+ wrd = ReadAWord(dev, 0x1a8); /* System Configuration Register */
+ nic_data.SCR = (wrd >> 8);
+
+ return 0; /* no errors */
+}
+
+/******************************************************************************
+ *
+ * Loadable module skeleton
+ *
+ */
+#ifdef MODULE
+static char nullname[8] = "";
+static struct device de620_dev = {
+ nullname, 0, 0, 0, 0, 0, 0, 0, 0, 0, NULL, de620_probe };
+
+int
+init_module(void)
+{
+ if (register_netdev(&de620_dev) != 0)
+ return -EIO;
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ unregister_netdev(&de620_dev);
+ release_region(de620_dev.base_addr, 3);
+}
+#endif /* MODULE */
+
+/*
+ * (add '-DMODULE' when compiling as loadable module)
+ *
+ * compile-command:
+ * gcc -D__KERNEL__ -Wall -Wstrict-prototypes -O2 \
+ * -fomit-frame-pointer -m486 \
+ * -I/usr/src/linux/include -I../../net/inet -c de620.c
+*/
+/*
+ * Local variables:
+ * kernel-compile-command: "gcc -D__KERNEL__ -Ilinux/include -I../../net/inet -Wall -Wstrict-prototypes -O2 -m486 -c de620.c"
+ * module-compile-command: "gcc -D__KERNEL__ -DMODULE -Ilinux/include -I../../net/inet -Wall -Wstrict-prototypes -O2 -m486 -c de620.c"
+ * compile-command: "gcc -D__KERNEL__ -DMODULE -Ilinux/include -I../../net/inet -Wall -Wstrict-prototypes -O2 -m486 -c de620.c"
+ * End:
+ */
diff --git a/linux/src/drivers/net/de620.h b/linux/src/drivers/net/de620.h
new file mode 100644
index 0000000..e8d9a88
--- /dev/null
+++ b/linux/src/drivers/net/de620.h
@@ -0,0 +1,117 @@
+/*********************************************************
+ * *
+ * Definition of D-Link DE-620 Ethernet Pocket adapter *
+ * *
+ *********************************************************/
+
+/* DE-620's CMD port Command */
+#define CS0 0x08 /* 1->0 command strobe */
+#define ICEN 0x04 /* 0=enable DL3520 host interface */
+#define DS0 0x02 /* 1->0 data strobe 0 */
+#define DS1 0x01 /* 1->0 data strobe 1 */
+
+#define WDIR 0x20 /* general 0=read 1=write */
+#define RDIR 0x00 /* (not 100% confirm ) */
+#define PS2WDIR 0x00 /* ps/2 mode 1=read, 0=write */
+#define PS2RDIR 0x20
+
+#define IRQEN 0x10 /* 1 = enable printer IRQ line */
+#define SELECTIN 0x08 /* 1 = select printer */
+#define INITP 0x04 /* 0 = initial printer */
+#define AUTOFEED 0x02 /* 1 = printer auto form feed */
+#define STROBE 0x01 /* 0->1 data strobe */
+
+#define RESET 0x08
+#define NIS0 0x20 /* 0 = BNC, 1 = UTP */
+#define NCTL0 0x10
+
+/* DE-620 DIC Command */
+#define W_DUMMY 0x00 /* DIC reserved command */
+#define W_CR 0x20 /* DIC write command register */
+#define W_NPR 0x40 /* DIC write Next Page Register */
+#define W_TBR 0x60 /* DIC write Tx Byte Count 1 reg */
+#define W_RSA 0x80 /* DIC write Remote Start Addr 1 */
+
+/* DE-620's STAT port bits 7-4 */
+#define EMPTY 0x80 /* 1 = receive buffer empty */
+#define INTLEVEL 0x40 /* 1 = interrupt level is high */
+#define TXBF1 0x20 /* 1 = transmit buffer 1 is in use */
+#define TXBF0 0x10 /* 1 = transmit buffer 0 is in use */
+#define READY 0x08 /* 1 = h/w ready to accept cmd/data */
+
+/* IDC 1 Command */
+#define W_RSA1 0xa0 /* write remote start address 1 */
+#define W_RSA0 0xa1 /* write remote start address 0 */
+#define W_NPRF 0xa2 /* write next page register NPR15-NPR8 */
+#define W_DFR 0xa3 /* write delay factor register */
+#define W_CPR 0xa4 /* write current page register */
+#define W_SPR 0xa5 /* write start page register */
+#define W_EPR 0xa6 /* write end page register */
+#define W_SCR 0xa7 /* write system configuration register */
+#define W_TCR 0xa8 /* write Transceiver Configuration reg */
+#define W_EIP 0xa9 /* write EEPM Interface port */
+#define W_PAR0 0xaa /* write physical address register 0 */
+#define W_PAR1 0xab /* write physical address register 1 */
+#define W_PAR2 0xac /* write physical address register 2 */
+#define W_PAR3 0xad /* write physical address register 3 */
+#define W_PAR4 0xae /* write physical address register 4 */
+#define W_PAR5 0xaf /* write physical address register 5 */
+
+/* IDC 2 Command */
+#define R_STS 0xc0 /* read status register */
+#define R_CPR 0xc1 /* read current page register */
+#define R_BPR 0xc2 /* read boundary page register */
+#define R_TDR 0xc3 /* read time domain reflectometry reg */
+
+/* STATUS Register */
+#define EEDI 0x80 /* EEPM DO pin */
+#define TXSUC 0x40 /* tx success */
+#define T16 0x20 /* tx fail 16 times */
+#define TS1 0x40 /* 0=Tx success, 1=T16 */
+#define TS0 0x20 /* 0=Tx success, 1=T16 */
+#define RXGOOD 0x10 /* rx a good packet */
+#define RXCRC 0x08 /* rx a CRC error packet */
+#define RXSHORT 0x04 /* rx a short packet */
+#define COLS 0x02 /* coaxial collision status */
+#define LNKS 0x01 /* UTP link status */
+
+/* Command Register */
+#define CLEAR 0x10 /* reset part of hardware */
+#define NOPER 0x08 /* No Operation */
+#define RNOP 0x08
+#define RRA 0x06 /* After RR then auto-advance NPR & BPR(=NPR-1) */
+#define RRN 0x04 /* Normal Remote Read mode */
+#define RW1 0x02 /* Remote Write tx buffer 1 ( page 6 - 11 ) */
+#define RW0 0x00 /* Remote Write tx buffer 0 ( page 0 - 5 ) */
+#define TXEN 0x01 /* 0->1 tx enable */
+
+/* System Configuration Register */
+#define TESTON 0x80 /* test host data transfer reliability */
+#define SLEEP 0x40 /* sleep mode */
+#if 0
+#define FASTMODE 0x04 /* fast mode for intel 82360SL fast mode */
+#define BYTEMODE 0x02 /* byte mode */
+#else
+#define FASTMODE 0x20 /* fast mode for intel 82360SL fast mode */
+#define BYTEMODE 0x10 /* byte mode */
+#endif
+#define NIBBLEMODE 0x00 /* nibble mode */
+#define IRQINV 0x08 /* turn off IRQ line inverter */
+#define IRQNML 0x00 /* turn on IRQ line inverter */
+#define INTON 0x04
+#define AUTOFFSET 0x02 /* auto shift address to TPR+12 */
+#define AUTOTX 0x01 /* auto tx when leave RW mode */
+
+/* Transceiver Configuration Register */
+#define JABBER 0x80 /* generate jabber condition */
+#define TXSUCINT 0x40 /* enable tx success interrupt */
+#define T16INT 0x20 /* enable T16 interrupt */
+#define RXERRPKT 0x10 /* accept CRC error or short packet */
+#define EXTERNALB2 0x0C /* external loopback 2 */
+#define EXTERNALB1 0x08 /* external loopback 1 */
+#define INTERNALB 0x04 /* internal loopback */
+#define NMLOPERATE 0x00 /* normal operation */
+#define RXPBM 0x03 /* rx physical, broadcast, multicast */
+#define RXPB 0x02 /* rx physical, broadcast */
+#define RXALL 0x01 /* rx all packet */
+#define RXOFF 0x00 /* rx disable */
diff --git a/linux/src/drivers/net/depca.c b/linux/src/drivers/net/depca.c
new file mode 100644
index 0000000..2048812
--- /dev/null
+++ b/linux/src/drivers/net/depca.c
@@ -0,0 +1,1890 @@
+/* depca.c: A DIGITAL DEPCA & EtherWORKS ethernet driver for linux.
+
+ Written 1994, 1995 by David C. Davies.
+
+
+ Copyright 1994 David C. Davies
+ and
+ United States Government
+ (as represented by the Director, National Security Agency).
+
+ Copyright 1995 Digital Equipment Corporation.
+
+
+ This software may be used and distributed according to the terms of
+ the GNU Public License, incorporated herein by reference.
+
+ This driver is written for the Digital Equipment Corporation series
+ of DEPCA and EtherWORKS ethernet cards:
+
+ DEPCA (the original)
+ DE100
+ DE101
+ DE200 Turbo
+ DE201 Turbo
+ DE202 Turbo (TP BNC)
+ DE210
+ DE422 (EISA)
+
+ The driver has been tested on DE100, DE200 and DE202 cards in a
+ relatively busy network. The DE422 has been tested a little.
+
+ This driver will NOT work for the DE203, DE204 and DE205 series of
+ cards, since they have a new custom ASIC in place of the AMD LANCE
+ chip. See the 'ewrk3.c' driver in the Linux source tree for running
+ those cards.
+
+ I have benchmarked the driver with a DE100 at 595kB/s to (542kB/s from)
+ a DECstation 5000/200.
+
+ The author may be reached at davies@maniac.ultranet.com
+
+ =========================================================================
+
+ The driver was originally based on the 'lance.c' driver from Donald
+ Becker which is included with the standard driver distribution for
+ linux. V0.4 is a complete re-write with only the kernel interface
+ remaining from the original code.
+
+ 1) Lance.c code in /linux/drivers/net/
+ 2) "Ethernet/IEEE 802.3 Family. 1992 World Network Data Book/Handbook",
+ AMD, 1992 [(800) 222-9323].
+ 3) "Am79C90 CMOS Local Area Network Controller for Ethernet (C-LANCE)",
+ AMD, Pub. #17881, May 1993.
+ 4) "Am79C960 PCnet-ISA(tm), Single-Chip Ethernet Controller for ISA",
+ AMD, Pub. #16907, May 1992
+ 5) "DEC EtherWORKS LC Ethernet Controller Owners Manual",
+ Digital Equipment corporation, 1990, Pub. #EK-DE100-OM.003
+ 6) "DEC EtherWORKS Turbo Ethernet Controller Owners Manual",
+ Digital Equipment corporation, 1990, Pub. #EK-DE200-OM.003
+ 7) "DEPCA Hardware Reference Manual", Pub. #EK-DEPCA-PR
+ Digital Equipment Corporation, 1989
+ 8) "DEC EtherWORKS Turbo_(TP BNC) Ethernet Controller Owners Manual",
+ Digital Equipment corporation, 1991, Pub. #EK-DE202-OM.001
+
+
+ Peter Bauer's depca.c (V0.5) was referred to when debugging V0.1 of this
+ driver.
+
+ The original DEPCA card requires that the ethernet ROM address counter
+ be enabled to count and has an 8 bit NICSR. The ROM counter enabling is
+ only done when a 0x08 is read as the first address octet (to minimise
+ the chances of writing over some other hardware's I/O register). The
+ NICSR accesses have been changed to byte accesses for all the cards
+ supported by this driver, since there is only one useful bit in the MSB
+ (remote boot timeout) and it is not used. Also, there is a maximum of
+ only 48kB network RAM for this card. My thanks to Torbjorn Lindh for
+ help debugging all this (and holding my feet to the fire until I got it
+ right).
+
+ The DE200 series boards have on-board 64kB RAM for use as a shared
+ memory network buffer. Only the DE100 cards make use of a 2kB buffer
+ mode which has not been implemented in this driver (only the 32kB and
+ 64kB modes are supported [16kB/48kB for the original DEPCA]).
+
+ At the most only 2 DEPCA cards can be supported on the ISA bus because
+ there is only provision for two I/O base addresses on each card (0x300
+ and 0x200). The I/O address is detected by searching for a byte sequence
+ in the Ethernet station address PROM at the expected I/O address for the
+ Ethernet PROM. The shared memory base address is 'autoprobed' by
+ looking for the self test PROM and detecting the card name. When a
+ second DEPCA is detected, information is placed in the base_addr
+ variable of the next device structure (which is created if necessary),
+ thus enabling ethif_probe initialization for the device. More than 2
+ EISA cards can be supported, but care will be needed assigning the
+ shared memory to ensure that each slot has the correct IRQ, I/O address
+ and shared memory address assigned.
+
+ ************************************************************************
+
+ NOTE: If you are using two ISA DEPCAs, it is important that you assign
+ the base memory addresses correctly. The driver autoprobes I/O 0x300
+ then 0x200. The base memory address for the first device must be less
+ than that of the second so that the auto probe will correctly assign the
+ I/O and memory addresses on the same card. I can't think of a way to do
+ this unambiguously at the moment, since there is nothing on the cards to
+ tie I/O and memory information together.
+
+ I am unable to test 2 cards together for now, so this code is
+ unchecked. All reports, good or bad, are welcome.
+
+ ************************************************************************
+
+ The board IRQ setting must be at an unused IRQ which is auto-probed
+ using Donald Becker's autoprobe routines. DEPCA and DE100 board IRQs are
+ {2,3,4,5,7}, whereas the DE200 is at {5,9,10,11,15}. Note that IRQ2 is
+ really IRQ9 in machines with 16 IRQ lines.
+
+ No 16MB memory limitation should exist with this driver as DMA is not
+ used and the common memory area is in low memory on the network card (my
+ current system has 20MB and I've not had problems yet).
+
+ The ability to load this driver as a loadable module has been added. To
+ utilise this ability, you have to do <8 things:
+
+ 0) have a copy of the loadable modules code installed on your system.
+ 1) copy depca.c from the /linux/drivers/net directory to your favourite
+ temporary directory.
+ 2) if you wish, edit the source code near line 1530 to reflect the I/O
+ address and IRQ you're using (see also 5).
+ 3) compile depca.c, but include -DMODULE in the command line to ensure
+ that the correct bits are compiled (see end of source code).
+ 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
+ kernel with the depca configuration turned off and reboot.
+ 5) insmod depca.o [irq=7] [io=0x200] [mem=0xd0000] [adapter_name=DE100]
+ [Alan Cox: Changed the code to allow command line irq/io assignments]
+ [Dave Davies: Changed the code to allow command line mem/name
+ assignments]
+ 6) run the net startup bits for your eth?? interface manually
+ (usually /etc/rc.inet[12] at boot time).
+ 7) enjoy!
+
+ Note that autoprobing is not allowed in loadable modules - the system is
+ already up and running and you're messing with interrupts.
+
+ To unload a module, turn off the associated interface
+ 'ifconfig eth?? down' then 'rmmod depca'.
+
+ To assign a base memory address for the shared memory when running as a
+ loadable module, see 5 above. To include the adapter name (if you have
+ no PROM but know the card name) also see 5 above. Note that this last
+ option will not work with kernel built-in depca's.
+
+ The shared memory assignment for a loadable module makes sense to avoid
+ the 'memory autoprobe' picking the wrong shared memory (for the case of
+ 2 depca's in a PC).
+
+
+ TO DO:
+ ------
+
+
+ Revision History
+ ----------------
+
+ Version Date Description
+
+ 0.1 25-jan-94 Initial writing.
+ 0.2 27-jan-94 Added LANCE TX hardware buffer chaining.
+ 0.3 1-feb-94 Added multiple DEPCA support.
+ 0.31 4-feb-94 Added DE202 recognition.
+ 0.32 19-feb-94 Tidy up. Improve multi-DEPCA support.
+ 0.33 25-feb-94 Fix DEPCA ethernet ROM counter enable.
+ Add jabber packet fix from murf@perftech.com
+ and becker@super.org
+ 0.34 7-mar-94 Fix DEPCA max network memory RAM & NICSR access.
+ 0.35 8-mar-94 Added DE201 recognition. Tidied up.
+ 0.351 30-apr-94 Added EISA support. Added DE422 recognition.
+ 0.36 16-may-94 DE422 fix released.
+ 0.37 22-jul-94 Added MODULE support
+ 0.38 15-aug-94 Added DBR ROM switch in depca_close().
+ Multi DEPCA bug fix.
+ 0.38axp 15-sep-94 Special version for Alpha AXP Linux V1.0.
+ 0.381 12-dec-94 Added DE101 recognition, fix multicast bug.
+ 0.382 9-feb-95 Fix recognition bug reported by <bkm@star.rl.ac.uk>.
+ 0.383 22-feb-95 Fix for conflict with VESA SCSI reported by
+ <stromain@alf.dec.com>
+ 0.384 17-mar-95 Fix a ring full bug reported by <bkm@star.rl.ac.uk>
+ 0.385 3-apr-95 Fix a recognition bug reported by
+ <ryan.niemi@lastfrontier.com>
+ 0.386 21-apr-95 Fix the last fix...sorry, must be galloping senility
+ 0.40 25-May-95 Rewrite for portability & updated.
+ ALPHA support from <jestabro@amt.tay1.dec.com>
+ 0.41 26-Jun-95 Added verify_area() calls in depca_ioctl() from
+ suggestion by <heiko@colossus.escape.de>
+ 0.42 27-Dec-95 Add 'mem' shared memory assignment for loadable
+ modules.
+ Add 'adapter_name' for loadable modules when no PROM.
+ Both above from a suggestion by
+ <pchen@woodruffs121.residence.gatech.edu>.
+ Add new multicasting code.
+ 0.421 22-Apr-96 Fix alloc_device() bug <jari@markkus2.fimr.fi>
+ 0.422 29-Apr-96 Fix depca_hw_init() bug <jari@markkus2.fimr.fi>
+ 0.423 7-Jun-96 Fix module load bug <kmg@barco.be>
+ 0.43 16-Aug-96 Update alloc_device() to conform to de4x5.c
+
+ =========================================================================
+*/
+
+static const char *version = "depca.c:v0.43 96/8/16 davies@maniac.ultranet.com\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <asm/segment.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/unistd.h>
+#include <linux/ctype.h>
+
+#include "depca.h"
+
+#ifdef DEPCA_DEBUG
+static int depca_debug = DEPCA_DEBUG;
+#else
+static int depca_debug = 1;
+#endif
+
+#define DEPCA_NDA 0xffe0 /* No Device Address */
+
+/*
+** Ethernet PROM defines
+*/
+#define PROBE_LENGTH 32
+#define ETH_PROM_SIG 0xAA5500FFUL
+
+/*
+** Set the number of Tx and Rx buffers. Ensure that the memory requested
+** here is <= to the amount of shared memory set up by the board switches.
+** The number of descriptors MUST BE A POWER OF 2.
+**
+** total_memory = NUM_RX_DESC*(8+RX_BUFF_SZ) + NUM_TX_DESC*(8+TX_BUFF_SZ)
+*/
+#define NUM_RX_DESC 8 /* Number of RX descriptors */
+#define NUM_TX_DESC 8 /* Number of TX descriptors */
+#define RX_BUFF_SZ 1536 /* Buffer size for each Rx buffer */
+#define TX_BUFF_SZ 1536 /* Buffer size for each Tx buffer */
+
+#define CRC_POLYNOMIAL_BE 0x04c11db7UL /* Ethernet CRC, big endian */
+#define CRC_POLYNOMIAL_LE 0xedb88320UL /* Ethernet CRC, little endian */
+
+/*
+** EISA bus defines
+*/
+#define DEPCA_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */
+#define MAX_EISA_SLOTS 16
+#define EISA_SLOT_INC 0x1000
+
+/*
+** ISA Bus defines
+*/
+#define DEPCA_RAM_BASE_ADDRESSES {0xc0000,0xd0000,0xe0000,0x00000}
+#define DEPCA_IO_PORTS {0x300, 0x200, 0}
+#define DEPCA_TOTAL_SIZE 0x10
+static short mem_chkd = 0;
+
+/*
+** Name <-> Adapter mapping
+*/
+#define DEPCA_SIGNATURE {"DEPCA",\
+ "DE100","DE101",\
+ "DE200","DE201","DE202",\
+ "DE210",\
+ "DE422",\
+ ""}
+static enum {DEPCA, de100, de101, de200, de201, de202, de210, de422, unknown} adapter;
+
+/*
+** Miscellaneous info...
+*/
+#define DEPCA_STRLEN 16
+#define MAX_NUM_DEPCAS 2
+
+/*
+** Memory Alignment. Each descriptor is 4 longwords long. To force a
+** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and
+** DESC_ALIGN. ALIGN aligns the start address of the private memory area
+** and hence the RX descriptor ring's first entry.
+*/
+#define ALIGN4 ((u_long)4 - 1) /* 1 longword align */
+#define ALIGN8 ((u_long)8 - 1) /* 2 longword (quadword) align */
+#define ALIGN ALIGN8 /* Keep the LANCE happy... */
+
+/*
+** The DEPCA Rx and Tx ring descriptors.
+*/
+struct depca_rx_desc {
+ volatile s32 base;
+ s16 buf_length; /* This length is negative 2's complement! */
+ s16 msg_length; /* This length is "normal". */
+};
+
+struct depca_tx_desc {
+ volatile s32 base;
+ s16 length; /* This length is negative 2's complement! */
+ s16 misc; /* Errors and TDR info */
+};
+
+#define LA_MASK 0x0000ffff /* LANCE address mask for mapping network RAM
+ to LANCE memory address space */
+
+/*
+** The Lance initialization block, described in databook, in common memory.
+*/
+struct depca_init {
+ u16 mode; /* Mode register */
+ u8 phys_addr[ETH_ALEN]; /* Physical ethernet address */
+ u8 mcast_table[8]; /* Multicast Hash Table. */
+ u32 rx_ring; /* Rx ring base pointer & ring length */
+ u32 tx_ring; /* Tx ring base pointer & ring length */
+};
+
+#define DEPCA_PKT_STAT_SZ 16
+#define DEPCA_PKT_BIN_SZ 128 /* Should be >=100 unless you
+ increase DEPCA_PKT_STAT_SZ */
+struct depca_private {
+ char devname[DEPCA_STRLEN]; /* Device Product String */
+ char adapter_name[DEPCA_STRLEN];/* /proc/ioports string */
+ char adapter; /* Adapter type */
+ struct depca_rx_desc *rx_ring; /* Pointer to start of RX descriptor ring */
+ struct depca_tx_desc *tx_ring; /* Pointer to start of TX descriptor ring */
+ struct depca_init init_block;/* Shadow Initialization block */
+ char *rx_memcpy[NUM_RX_DESC]; /* CPU virt address of sh'd memory buffs */
+ char *tx_memcpy[NUM_TX_DESC]; /* CPU virt address of sh'd memory buffs */
+ u_long bus_offset; /* (E)ISA bus address offset vs LANCE */
+ u_long sh_mem; /* Physical start addr of shared mem area */
+ u_long dma_buffs; /* LANCE Rx and Tx buffers start address. */
+ int rx_new, tx_new; /* The next free ring entry */
+ int rx_old, tx_old; /* The ring entries to be free()ed. */
+ struct enet_statistics stats;
+ struct { /* Private stats counters */
+ u32 bins[DEPCA_PKT_STAT_SZ];
+ u32 unicast;
+ u32 multicast;
+ u32 broadcast;
+ u32 excessive_collisions;
+ u32 tx_underruns;
+ u32 excessive_underruns;
+ } pktStats;
+ int txRingMask; /* TX ring mask */
+ int rxRingMask; /* RX ring mask */
+ s32 rx_rlen; /* log2(rxRingMask+1) for the descriptors */
+ s32 tx_rlen; /* log2(txRingMask+1) for the descriptors */
+};
+
+/*
+** The transmit ring full condition is described by the tx_old and tx_new
+** pointers by:
+** tx_old = tx_new Empty ring
+** tx_old = tx_new+1 Full ring
+** tx_old+txRingMask = tx_new Full ring (wrapped condition)
+*/
+#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
+ lp->tx_old+lp->txRingMask-lp->tx_new:\
+ lp->tx_old -lp->tx_new-1)
+
+/*
+** Public Functions
+*/
+static int depca_open(struct device *dev);
+static int depca_start_xmit(struct sk_buff *skb, struct device *dev);
+static void depca_interrupt(int irq, void *dev_id, struct pt_regs * regs);
+static int depca_close(struct device *dev);
+static int depca_ioctl(struct device *dev, struct ifreq *rq, int cmd);
+static struct enet_statistics *depca_get_stats(struct device *dev);
+static void set_multicast_list(struct device *dev);
+
+/*
+** Private functions
+*/
+static int depca_hw_init(struct device *dev, u_long ioaddr);
+static void depca_init_ring(struct device *dev);
+static int depca_rx(struct device *dev);
+static int depca_tx(struct device *dev);
+
+static void LoadCSRs(struct device *dev);
+static int InitRestartDepca(struct device *dev);
+static void DepcaSignature(char *name, u_long paddr);
+static int DevicePresent(u_long ioaddr);
+static int get_hw_addr(struct device *dev);
+static int EISA_signature(char *name, s32 eisa_id);
+static void SetMulticastFilter(struct device *dev);
+static void isa_probe(struct device *dev, u_long iobase);
+static void eisa_probe(struct device *dev, u_long iobase);
+static struct device *alloc_device(struct device *dev, u_long iobase);
+static int depca_dev_index(char *s);
+static struct device *insert_device(struct device *dev, u_long iobase, int (*init)(struct device *));
+static int load_packet(struct device *dev, struct sk_buff *skb);
+static void depca_dbg_open(struct device *dev);
+
+#ifdef MODULE
+int init_module(void);
+void cleanup_module(void);
+static int autoprobed = 1, loading_module = 1;
+# else
+static u_char de1xx_irq[] = {2,3,4,5,7,9,0};
+static u_char de2xx_irq[] = {5,9,10,11,15,0};
+static u_char de422_irq[] = {5,9,10,11,0};
+static u_char *depca_irq;
+static int autoprobed = 0, loading_module = 0;
+#endif /* MODULE */
+
+static char name[DEPCA_STRLEN];
+static int num_depcas = 0, num_eth = 0;
+static int mem=0; /* For loadable module assignment
+ use insmod mem=0x????? .... */
+static char *adapter_name = '\0'; /* If no PROM when loadable module
+ use insmod adapter_name=DE??? ...
+ */
+/*
+** Miscellaneous defines...
+*/
+#define STOP_DEPCA \
+ outw(CSR0, DEPCA_ADDR);\
+ outw(STOP, DEPCA_DATA)
+
+
+
+int depca_probe(struct device *dev)
+{
+ int tmp = num_depcas, status = -ENODEV;
+ u_long iobase = dev->base_addr;
+
+ if ((iobase == 0) && loading_module){
+ printk("Autoprobing is not supported when loading a module based driver.\n");
+ status = -EIO;
+ } else {
+ isa_probe(dev, iobase);
+ eisa_probe(dev, iobase);
+
+ if ((tmp == num_depcas) && (iobase != 0) && loading_module) {
+ printk("%s: depca_probe() cannot find device at 0x%04lx.\n", dev->name,
+ iobase);
+ }
+
+ /*
+ ** Walk the device list to check that at least one device
+ ** initialised OK
+ */
+ for (; (dev->priv == NULL) && (dev->next != NULL); dev = dev->next);
+
+ if (dev->priv) status = 0;
+ if (iobase == 0) autoprobed = 1;
+ }
+
+ return status;
+}
+
+static int
+depca_hw_init(struct device *dev, u_long ioaddr)
+{
+ struct depca_private *lp;
+ int i, j, offset, netRAM, mem_len, status=0;
+ s16 nicsr;
+ u_long mem_start=0, mem_base[] = DEPCA_RAM_BASE_ADDRESSES;
+
+ STOP_DEPCA;
+
+ nicsr = inb(DEPCA_NICSR);
+ nicsr = ((nicsr & ~SHE & ~RBE & ~IEN) | IM);
+ outb(nicsr, DEPCA_NICSR);
+
+ if (inw(DEPCA_DATA) == STOP) {
+ do {
+ strcpy(name, (adapter_name ? adapter_name : ""));
+ mem_start = (mem ? mem & 0xf0000 : mem_base[mem_chkd++]);
+ DepcaSignature(name, mem_start);
+ } while (!mem && mem_base[mem_chkd] && (adapter == unknown));
+
+ if ((adapter != unknown) && mem_start) { /* found a DEPCA device */
+ dev->base_addr = ioaddr;
+
+ if ((ioaddr&0x0fff)==DEPCA_EISA_IO_PORTS) {/* EISA slot address */
+ printk("%s: %s at 0x%04lx (EISA slot %d)",
+ dev->name, name, ioaddr, (int)((ioaddr>>12)&0x0f));
+ } else { /* ISA port address */
+ printk("%s: %s at 0x%04lx", dev->name, name, ioaddr);
+ }
+
+ printk(", h/w address ");
+ status = get_hw_addr(dev);
+ for (i=0; i<ETH_ALEN - 1; i++) { /* get the ethernet address */
+ printk("%2.2x:", dev->dev_addr[i]);
+ }
+ printk("%2.2x", dev->dev_addr[i]);
+
+ if (status == 0) {
+ /* Set up the maximum amount of network RAM(kB) */
+ netRAM = ((adapter != DEPCA) ? 64 : 48);
+ if ((nicsr & _128KB) && (adapter == de422)) netRAM = 128;
+ offset = 0x0000;
+
+ /* Shared Memory Base Address */
+ if (nicsr & BUF) {
+ offset = 0x8000; /* 32kbyte RAM offset*/
+ nicsr &= ~BS; /* DEPCA RAM in top 32k */
+ netRAM -= 32;
+ }
+ mem_start += offset; /* (E)ISA start address */
+ if ((mem_len = (NUM_RX_DESC*(sizeof(struct depca_rx_desc)+RX_BUFF_SZ) +
+ NUM_TX_DESC*(sizeof(struct depca_tx_desc)+TX_BUFF_SZ) +
+ sizeof(struct depca_init))) <=
+ (netRAM<<10)) {
+ printk(",\n has %dkB RAM at 0x%.5lx", netRAM, mem_start);
+
+ /* Enable the shadow RAM. */
+ if (adapter != DEPCA) {
+ nicsr |= SHE;
+ outb(nicsr, DEPCA_NICSR);
+ }
+
+ /* Define the device private memory */
+ dev->priv = (void *) kmalloc(sizeof(struct depca_private), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ lp = (struct depca_private *)dev->priv;
+ memset((char *)dev->priv, 0, sizeof(struct depca_private));
+ lp->adapter = adapter;
+ sprintf(lp->adapter_name,"%s (%s)", name, dev->name);
+ request_region(ioaddr, DEPCA_TOTAL_SIZE, lp->adapter_name);
+
+ /* Initialisation Block */
+ lp->sh_mem = mem_start;
+ mem_start += sizeof(struct depca_init);
+
+ /* Tx & Rx descriptors (aligned to a quadword boundary) */
+ mem_start = (mem_start + ALIGN) & ~ALIGN;
+ lp->rx_ring = (struct depca_rx_desc *)mem_start;
+
+ mem_start += (sizeof(struct depca_rx_desc) * NUM_RX_DESC);
+ lp->tx_ring = (struct depca_tx_desc *)mem_start;
+
+ mem_start += (sizeof(struct depca_tx_desc) * NUM_TX_DESC);
+ lp->bus_offset = mem_start & 0x00ff0000;
+ mem_start &= LA_MASK; /* LANCE re-mapped start address */
+
+ lp->dma_buffs = mem_start;
+
+ /* Finish initialising the ring information. */
+ lp->rxRingMask = NUM_RX_DESC - 1;
+ lp->txRingMask = NUM_TX_DESC - 1;
+
+ /* Calculate Tx/Rx RLEN size for the descriptors. */
+ for (i=0, j = lp->rxRingMask; j>0; i++) {
+ j >>= 1;
+ }
+ lp->rx_rlen = (s32)(i << 29);
+ for (i=0, j = lp->txRingMask; j>0; i++) {
+ j >>= 1;
+ }
+ lp->tx_rlen = (s32)(i << 29);
+
+ /* Load the initialisation block */
+ depca_init_ring(dev);
+
+ /* Initialise the control and status registers */
+ LoadCSRs(dev);
+
+ /* Enable DEPCA board interrupts for autoprobing */
+ nicsr = ((nicsr & ~IM)|IEN);
+ outb(nicsr, DEPCA_NICSR);
+
+ /* To auto-IRQ we enable the initialization-done and DMA err,
+ interrupts. For now we will always get a DMA error. */
+ if (dev->irq < 2) {
+#ifndef MODULE
+ unsigned char irqnum;
+ autoirq_setup(0);
+
+ /* Assign the correct irq list */
+ switch (lp->adapter) {
+ case DEPCA:
+ case de100:
+ case de101:
+ depca_irq = de1xx_irq;
+ break;
+ case de200:
+ case de201:
+ case de202:
+ case de210:
+ depca_irq = de2xx_irq;
+ break;
+ case de422:
+ depca_irq = de422_irq;
+ break;
+ }
+
+ /* Trigger an initialization just for the interrupt. */
+ outw(INEA | INIT, DEPCA_DATA);
+
+ irqnum = autoirq_report(1);
+ if (!irqnum) {
+ printk(" and failed to detect IRQ line.\n");
+ status = -ENXIO;
+ } else {
+ for (dev->irq=0,i=0; (depca_irq[i]) && (!dev->irq); i++) {
+ if (irqnum == depca_irq[i]) {
+ dev->irq = irqnum;
+ printk(" and uses IRQ%d.\n", dev->irq);
+ }
+ }
+
+ if (!dev->irq) {
+ printk(" but incorrect IRQ line detected.\n");
+ status = -ENXIO;
+ }
+ }
+#endif /* MODULE */
+ } else {
+ printk(" and assigned IRQ%d.\n", dev->irq);
+ }
+ if (status) release_region(ioaddr, DEPCA_TOTAL_SIZE);
+ } else {
+ printk(",\n requests %dkB RAM: only %dkB is available!\n",
+ (mem_len>>10), netRAM);
+ status = -ENXIO;
+ }
+ } else {
+ printk(" which has an Ethernet PROM CRC error.\n");
+ status = -ENXIO;
+ }
+ } else {
+ status = -ENXIO;
+ }
+ if (!status) {
+ if (depca_debug > 1) {
+ printk("%s", version);
+ }
+
+ /* The DEPCA-specific entries in the device structure. */
+ dev->open = &depca_open;
+ dev->hard_start_xmit = &depca_start_xmit;
+ dev->stop = &depca_close;
+ dev->get_stats = &depca_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+ dev->do_ioctl = &depca_ioctl;
+
+ dev->mem_start = 0;
+
+ /* Fill in the generic field of the device structure. */
+ ether_setup(dev);
+ } else { /* Incorrectly initialised hardware */
+ if (dev->priv) {
+ kfree_s(dev->priv, sizeof(struct depca_private));
+ dev->priv = NULL;
+ }
+ }
+ } else {
+ status = -ENXIO;
+ }
+
+ return status;
+}
+
+
+static int
+depca_open(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ u_long ioaddr = dev->base_addr;
+ s16 nicsr;
+ int status = 0;
+
+ irq2dev_map[dev->irq] = dev;
+ STOP_DEPCA;
+ nicsr = inb(DEPCA_NICSR);
+
+ /* Make sure the shadow RAM is enabled */
+ if (adapter != DEPCA) {
+ nicsr |= SHE;
+ outb(nicsr, DEPCA_NICSR);
+ }
+
+ /* Re-initialize the DEPCA... */
+ depca_init_ring(dev);
+ LoadCSRs(dev);
+
+ depca_dbg_open(dev);
+
+ if (request_irq(dev->irq, &depca_interrupt, 0, lp->adapter_name, NULL)) {
+ printk("depca_open(): Requested IRQ%d is busy\n",dev->irq);
+ status = -EAGAIN;
+ } else {
+
+ /* Enable DEPCA board interrupts and turn off LED */
+ nicsr = ((nicsr & ~IM & ~LED)|IEN);
+ outb(nicsr, DEPCA_NICSR);
+ outw(CSR0,DEPCA_ADDR);
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+ status = InitRestartDepca(dev);
+
+ if (depca_debug > 1){
+ printk("CSR0: 0x%4.4x\n",inw(DEPCA_DATA));
+ printk("nicsr: 0x%02x\n",inb(DEPCA_NICSR));
+ }
+ }
+
+ MOD_INC_USE_COUNT;
+
+ return status;
+}
+
+/* Initialize the lance Rx and Tx descriptor rings. */
+static void
+depca_init_ring(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ u_int i;
+ u_long p;
+
+ /* Lock out other processes whilst setting up the hardware */
+ set_bit(0, (void *)&dev->tbusy);
+
+ lp->rx_new = lp->tx_new = 0;
+ lp->rx_old = lp->tx_old = 0;
+
+ /* Initialize the base addresses and length of each buffer in the ring */
+ for (i = 0; i <= lp->rxRingMask; i++) {
+ writel((p=lp->dma_buffs+i*RX_BUFF_SZ) | R_OWN, &lp->rx_ring[i].base);
+ writew(-RX_BUFF_SZ, &lp->rx_ring[i].buf_length);
+ lp->rx_memcpy[i]=(char *)(p+lp->bus_offset);
+ }
+ for (i = 0; i <= lp->txRingMask; i++) {
+ writel((p=lp->dma_buffs+(i+lp->txRingMask+1)*TX_BUFF_SZ) & 0x00ffffff,
+ &lp->tx_ring[i].base);
+ lp->tx_memcpy[i]=(char *)(p+lp->bus_offset);
+ }
+
+ /* Set up the initialization block */
+ lp->init_block.rx_ring = ((u32)((u_long)lp->rx_ring)&LA_MASK) | lp->rx_rlen;
+ lp->init_block.tx_ring = ((u32)((u_long)lp->tx_ring)&LA_MASK) | lp->tx_rlen;
+
+ SetMulticastFilter(dev);
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ }
+
+ lp->init_block.mode = 0x0000; /* Enable the Tx and Rx */
+
+ return;
+}
+
+/*
+** Writes a socket buffer to TX descriptor ring and starts transmission
+*/
+static int
+depca_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ u_long ioaddr = dev->base_addr;
+ int status = 0;
+
+ /* Transmitter timeout, serious problems. */
+ if (dev->tbusy) {
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 1*HZ) {
+ status = -1;
+ } else {
+ printk("%s: transmit timed out, status %04x, resetting.\n",
+ dev->name, inw(DEPCA_DATA));
+
+ STOP_DEPCA;
+ depca_init_ring(dev);
+ LoadCSRs(dev);
+ dev->interrupt = UNMASK_INTERRUPTS;
+ dev->start = 1;
+ dev->tbusy=0;
+ dev->trans_start = jiffies;
+ InitRestartDepca(dev);
+ dev_kfree_skb(skb, FREE_WRITE);
+ }
+ return status;
+ } else if (skb == NULL) {
+ dev_tint(dev);
+ } else if (skb->len > 0) {
+ /* Enforce 1 process per h/w access */
+ if (set_bit(0, (void*)&dev->tbusy) != 0) {
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ status = -1;
+ } else {
+ if (TX_BUFFS_AVAIL) { /* Fill in a Tx ring entry */
+ status = load_packet(dev, skb);
+
+ if (!status) {
+ /* Trigger an immediate send demand. */
+ outw(CSR0, DEPCA_ADDR);
+ outw(INEA | TDMD, DEPCA_DATA);
+
+ dev->trans_start = jiffies;
+ dev_kfree_skb(skb, FREE_WRITE);
+ }
+ if (TX_BUFFS_AVAIL) {
+ dev->tbusy=0;
+ }
+ } else {
+ status = -1;
+ }
+ }
+ }
+
+ return status;
+}
+
+/*
+** The DEPCA interrupt handler.
+*/
+static void
+depca_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ struct depca_private *lp;
+ s16 csr0, nicsr;
+ u_long ioaddr;
+
+ if (dev == NULL) {
+ printk ("depca_interrupt(): irq %d for unknown device.\n", irq);
+ } else {
+ lp = (struct depca_private *)dev->priv;
+ ioaddr = dev->base_addr;
+
+ if (dev->interrupt)
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+
+ dev->interrupt = MASK_INTERRUPTS;
+
+ /* mask the DEPCA board interrupts and turn on the LED */
+ nicsr = inb(DEPCA_NICSR);
+ nicsr |= (IM|LED);
+ outb(nicsr, DEPCA_NICSR);
+
+ outw(CSR0, DEPCA_ADDR);
+ csr0 = inw(DEPCA_DATA);
+
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ outw(csr0 & INTE, DEPCA_DATA);
+
+ if (csr0 & RINT) /* Rx interrupt (packet arrived) */
+ depca_rx(dev);
+
+ if (csr0 & TINT) /* Tx interrupt (packet sent) */
+ depca_tx(dev);
+
+ if ((TX_BUFFS_AVAIL >= 0) && dev->tbusy) { /* any resources available? */
+ dev->tbusy = 0; /* clear TX busy flag */
+ mark_bh(NET_BH);
+ }
+
+ /* Unmask the DEPCA board interrupts and turn off the LED */
+ nicsr = (nicsr & ~IM & ~LED);
+ outb(nicsr, DEPCA_NICSR);
+
+ dev->interrupt = UNMASK_INTERRUPTS;
+ }
+
+ return;
+}
+
+static int
+depca_rx(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ int i, entry;
+ s32 status;
+
+ for (entry=lp->rx_new;
+ !(readl(&lp->rx_ring[entry].base) & R_OWN);
+ entry=lp->rx_new){
+ status = readl(&lp->rx_ring[entry].base) >> 16 ;
+ if (status & R_STP) { /* Remember start of frame */
+ lp->rx_old = entry;
+ }
+ if (status & R_ENP) { /* Valid frame status */
+ if (status & R_ERR) { /* There was an error. */
+ lp->stats.rx_errors++; /* Update the error stats. */
+ if (status & R_FRAM) lp->stats.rx_frame_errors++;
+ if (status & R_OFLO) lp->stats.rx_over_errors++;
+ if (status & R_CRC) lp->stats.rx_crc_errors++;
+ if (status & R_BUFF) lp->stats.rx_fifo_errors++;
+ } else {
+ short len, pkt_len = readw(&lp->rx_ring[entry].msg_length);
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb != NULL) {
+ unsigned char *buf;
+ skb_reserve(skb,2); /* 16 byte align the IP header */
+ buf = skb_put(skb,pkt_len);
+ skb->dev = dev;
+ if (entry < lp->rx_old) { /* Wrapped buffer */
+ len = (lp->rxRingMask - lp->rx_old + 1) * RX_BUFF_SZ;
+ memcpy_fromio(buf, lp->rx_memcpy[lp->rx_old], len);
+ memcpy_fromio(buf + len, lp->rx_memcpy[0], pkt_len-len);
+ } else { /* Linear buffer */
+ memcpy_fromio(buf, lp->rx_memcpy[lp->rx_old], pkt_len);
+ }
+
+ /*
+ ** Notify the upper protocol layers that there is another
+ ** packet to handle
+ */
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+
+ /*
+ ** Update stats
+ */
+ lp->stats.rx_packets++;
+ for (i=1; i<DEPCA_PKT_STAT_SZ-1; i++) {
+ if (pkt_len < (i*DEPCA_PKT_BIN_SZ)) {
+ lp->pktStats.bins[i]++;
+ i = DEPCA_PKT_STAT_SZ;
+ }
+ }
+ if (buf[0] & 0x01) { /* Multicast/Broadcast */
+ if ((*(s16 *)&buf[0] == -1) &&
+ (*(s16 *)&buf[2] == -1) &&
+ (*(s16 *)&buf[4] == -1)) {
+ lp->pktStats.broadcast++;
+ } else {
+ lp->pktStats.multicast++;
+ }
+ } else if ((*(s16 *)&buf[0] == *(s16 *)&dev->dev_addr[0]) &&
+ (*(s16 *)&buf[2] == *(s16 *)&dev->dev_addr[2]) &&
+ (*(s16 *)&buf[4] == *(s16 *)&dev->dev_addr[4])) {
+ lp->pktStats.unicast++;
+ }
+
+ lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
+ if (lp->pktStats.bins[0] == 0) { /* Reset counters */
+ memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
+ }
+ } else {
+ printk("%s: Memory squeeze, deferring packet.\n", dev->name);
+ lp->stats.rx_dropped++; /* Really, deferred. */
+ break;
+ }
+ }
+ /* Change buffer ownership for this last frame, back to the adapter */
+ for (; lp->rx_old!=entry; lp->rx_old=(lp->rx_old+1)&lp->rxRingMask) {
+ writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN,
+ &lp->rx_ring[lp->rx_old].base);
+ }
+ writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base);
+ }
+
+ /*
+ ** Update entry information
+ */
+ lp->rx_new = (lp->rx_new + 1) & lp->rxRingMask;
+ }
+
+ return 0;
+}
+
+/*
+** Buffer sent - check for buffer errors.
+*/
+static int
+depca_tx(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ int entry;
+ s32 status;
+ u_long ioaddr = dev->base_addr;
+
+ for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
+ status = readl(&lp->tx_ring[entry].base) >> 16 ;
+
+ if (status < 0) { /* Packet not yet sent! */
+ break;
+ } else if (status & T_ERR) { /* An error occurred. */
+ status = readl(&lp->tx_ring[entry].misc);
+ lp->stats.tx_errors++;
+ if (status & TMD3_RTRY) lp->stats.tx_aborted_errors++;
+ if (status & TMD3_LCAR) lp->stats.tx_carrier_errors++;
+ if (status & TMD3_LCOL) lp->stats.tx_window_errors++;
+ if (status & TMD3_UFLO) lp->stats.tx_fifo_errors++;
+ if (status & (TMD3_BUFF | TMD3_UFLO)) {
+ /* Trigger an immediate send demand. */
+ outw(CSR0, DEPCA_ADDR);
+ outw(INEA | TDMD, DEPCA_DATA);
+ }
+ } else if (status & (T_MORE | T_ONE)) {
+ lp->stats.collisions++;
+ } else {
+ lp->stats.tx_packets++;
+ }
+
+ /* Update all the pointers */
+ lp->tx_old = (lp->tx_old + 1) & lp->txRingMask;
+ }
+
+ return 0;
+}
+
+static int
+depca_close(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ s16 nicsr;
+ u_long ioaddr = dev->base_addr;
+
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ outw(CSR0, DEPCA_ADDR);
+
+ if (depca_debug > 1) {
+ printk("%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, inw(DEPCA_DATA));
+ }
+
+ /*
+ ** We stop the DEPCA here -- it occasionally polls
+ ** memory if we don't.
+ */
+ outw(STOP, DEPCA_DATA);
+
+ /*
+ ** Give back the ROM in case the user wants to go to DOS
+ */
+ if (lp->adapter != DEPCA) {
+ nicsr = inb(DEPCA_NICSR);
+ nicsr &= ~SHE;
+ outb(nicsr, DEPCA_NICSR);
+ }
+
+ /*
+ ** Free the associated irq
+ */
+ free_irq(dev->irq, NULL);
+ irq2dev_map[dev->irq] = NULL;
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static void LoadCSRs(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ u_long ioaddr = dev->base_addr;
+
+ outw(CSR1, DEPCA_ADDR); /* initialisation block address LSW */
+ outw((u16)(lp->sh_mem & LA_MASK), DEPCA_DATA);
+ outw(CSR2, DEPCA_ADDR); /* initialisation block address MSW */
+ outw((u16)((lp->sh_mem & LA_MASK) >> 16), DEPCA_DATA);
+ outw(CSR3, DEPCA_ADDR); /* ALE control */
+ outw(ACON, DEPCA_DATA);
+
+ outw(CSR0, DEPCA_ADDR); /* Point back to CSR0 */
+
+ return;
+}
+
+static int InitRestartDepca(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ u_long ioaddr = dev->base_addr;
+ int i, status=0;
+
+ /* Copy the shadow init_block to shared memory */
+ memcpy_toio((char *)lp->sh_mem, &lp->init_block, sizeof(struct depca_init));
+
+ outw(CSR0, DEPCA_ADDR); /* point back to CSR0 */
+ outw(INIT, DEPCA_DATA); /* initialize DEPCA */
+
+ /* wait for lance to complete initialisation */
+ for (i=0;(i<100) && !(inw(DEPCA_DATA) & IDON); i++);
+
+ if (i!=100) {
+ /* clear IDON by writing a "1", enable interrupts and start lance */
+ outw(IDON | INEA | STRT, DEPCA_DATA);
+ if (depca_debug > 2) {
+ printk("%s: DEPCA open after %d ticks, init block 0x%08lx csr0 %4.4x.\n",
+ dev->name, i, lp->sh_mem, inw(DEPCA_DATA));
+ }
+ } else {
+ printk("%s: DEPCA unopen after %d ticks, init block 0x%08lx csr0 %4.4x.\n",
+ dev->name, i, lp->sh_mem, inw(DEPCA_DATA));
+ status = -1;
+ }
+
+ return status;
+}
+
+static struct enet_statistics *
+depca_get_stats(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+
+ /* Null body since there is no framing error counter */
+
+ return &lp->stats;
+}
+
+/*
+** Set or clear the multicast filter for this adaptor.
+*/
+static void
+set_multicast_list(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ u_long ioaddr = dev->base_addr;
+
+ if (irq2dev_map[dev->irq] != NULL) {
+ while(dev->tbusy); /* Stop ring access */
+ set_bit(0, (void*)&dev->tbusy);
+ while(lp->tx_old != lp->tx_new); /* Wait for the ring to empty */
+
+ STOP_DEPCA; /* Temporarily stop the depca. */
+ depca_init_ring(dev); /* Initialize the descriptor rings */
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous mode */
+ lp->init_block.mode |= PROM;
+ } else {
+ SetMulticastFilter(dev);
+ lp->init_block.mode &= ~PROM; /* Unset promiscuous mode */
+ }
+
+ LoadCSRs(dev); /* Reload CSR3 */
+ InitRestartDepca(dev); /* Resume normal operation. */
+ dev->tbusy = 0; /* Unlock the TX ring */
+ }
+}
+
+/*
+** Calculate the hash code and update the logical address filter
+** from a list of ethernet multicast addresses.
+** Big endian crc one liner is mine, all mine, ha ha ha ha!
+** LANCE calculates its hash codes big endian.
+*/
+static void SetMulticastFilter(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ struct dev_mc_list *dmi=dev->mc_list;
+ char *addrs;
+ int i, j, bit, byte;
+ u16 hashcode;
+ s32 crc, poly = CRC_POLYNOMIAL_BE;
+
+ if (dev->flags & IFF_ALLMULTI) { /* Set all multicast bits */
+ for (i=0; i<(HASH_TABLE_LEN>>3); i++) {
+ lp->init_block.mcast_table[i] = (char)0xff;
+ }
+ } else {
+ for (i=0; i<(HASH_TABLE_LEN>>3); i++){ /* Clear the multicast table */
+ lp->init_block.mcast_table[i]=0;
+ }
+ /* Add multicast addresses */
+ for (i=0;i<dev->mc_count;i++) { /* for each address in the list */
+ addrs=dmi->dmi_addr;
+ dmi=dmi->next;
+ if ((*addrs & 0x01) == 1) { /* multicast address? */
+ crc = 0xffffffff; /* init CRC for each address */
+ for (byte=0;byte<ETH_ALEN;byte++) {/* for each address byte */
+ /* process each address bit */
+ for (bit = *addrs++,j=0;j<8;j++, bit>>=1) {
+ crc = (crc << 1) ^ ((((crc<0?1:0) ^ bit) & 0x01) ? poly : 0);
+ }
+ }
+ hashcode = (crc & 1); /* hashcode is 6 LSb of CRC ... */
+ for (j=0;j<5;j++) { /* ... in reverse order. */
+ hashcode = (hashcode << 1) | ((crc>>=1) & 1);
+ }
+
+
+ byte = hashcode >> 3; /* bit[3-5] -> byte in filter */
+ bit = 1 << (hashcode & 0x07); /* bit[0-2] -> bit in byte */
+ lp->init_block.mcast_table[byte] |= bit;
+ }
+ }
+ }
+
+ return;
+}
+
+/*
+** ISA bus I/O device probe
+*/
+static void isa_probe(struct device *dev, u_long ioaddr)
+{
+ int i = num_depcas, maxSlots;
+ s32 ports[] = DEPCA_IO_PORTS;
+
+ if (!ioaddr && autoprobed) return ; /* Been here before ! */
+ if (ioaddr > 0x400) return; /* EISA Address */
+ if (i >= MAX_NUM_DEPCAS) return; /* Too many ISA adapters */
+
+ if (ioaddr == 0) { /* Autoprobing */
+ maxSlots = MAX_NUM_DEPCAS;
+ } else { /* Probe a specific location */
+ ports[i] = ioaddr;
+ maxSlots = i + 1;
+ }
+
+ for (; (i<maxSlots) && (dev!=NULL) && ports[i]; i++) {
+ if (DevicePresent(ports[i]) == 0) {
+ if (check_region(ports[i], DEPCA_TOTAL_SIZE) == 0) {
+ if ((dev = alloc_device(dev, ports[i])) != NULL) {
+ if (depca_hw_init(dev, ports[i]) == 0) {
+ num_depcas++;
+ }
+ num_eth++;
+ }
+ } else if (autoprobed) {
+ printk("%s: region already allocated at 0x%04x.\n", dev->name,ports[i]);
+ }
+ }
+ }
+
+ return;
+}
+
+/*
+** EISA bus I/O device probe. Probe from slot 1 since slot 0 is usually
+** the motherboard. Upto 15 EISA devices are supported.
+*/
+static void eisa_probe(struct device *dev, u_long ioaddr)
+{
+ int i, maxSlots;
+ u_long iobase;
+ char name[DEPCA_STRLEN];
+
+ if (!ioaddr && autoprobed) return ; /* Been here before ! */
+ if ((ioaddr < 0x400) && (ioaddr > 0)) return; /* ISA Address */
+
+ if (ioaddr == 0) { /* Autoprobing */
+ iobase = EISA_SLOT_INC; /* Get the first slot address */
+ i = 1;
+ maxSlots = MAX_EISA_SLOTS;
+ } else { /* Probe a specific location */
+ iobase = ioaddr;
+ i = (ioaddr >> 12);
+ maxSlots = i + 1;
+ }
+ if ((iobase & 0x0fff) == 0) iobase += DEPCA_EISA_IO_PORTS;
+
+ for (; (i<maxSlots) && (dev!=NULL); i++, iobase+=EISA_SLOT_INC) {
+ if (EISA_signature(name, EISA_ID)) {
+ if (DevicePresent(iobase) == 0) {
+ if (check_region(iobase, DEPCA_TOTAL_SIZE) == 0) {
+ if ((dev = alloc_device(dev, iobase)) != NULL) {
+ if (depca_hw_init(dev, iobase) == 0) {
+ num_depcas++;
+ }
+ num_eth++;
+ }
+ } else if (autoprobed) {
+ printk("%s: region already allocated at 0x%04lx.\n",dev->name,iobase);
+ }
+ }
+ }
+ }
+
+ return;
+}
+
+/*
+** Search the entire 'eth' device list for a fixed probe. If a match isn't
+** found then check for an autoprobe or unused device location. If they
+** are not available then insert a new device structure at the end of
+** the current list.
+*/
+static struct device *
+alloc_device(struct device *dev, u_long iobase)
+{
+ struct device *adev = NULL;
+ int fixed = 0, new_dev = 0;
+
+ num_eth = depca_dev_index(dev->name);
+ if (loading_module) return dev;
+
+ while (1) {
+ if (((dev->base_addr == DEPCA_NDA) || (dev->base_addr==0)) && !adev) {
+ adev=dev;
+ } else if ((dev->priv == NULL) && (dev->base_addr==iobase)) {
+ fixed = 1;
+ } else {
+ if (dev->next == NULL) {
+ new_dev = 1;
+ } else if (strncmp(dev->next->name, "eth", 3) != 0) {
+ new_dev = 1;
+ }
+ }
+ if ((dev->next == NULL) || new_dev || fixed) break;
+ dev = dev->next;
+ num_eth++;
+ }
+ if (adev && !fixed) {
+ dev = adev;
+ num_eth = depca_dev_index(dev->name);
+ new_dev = 0;
+ }
+
+ if (((dev->next == NULL) &&
+ ((dev->base_addr != DEPCA_NDA) && (dev->base_addr != 0)) && !fixed) ||
+ new_dev) {
+ num_eth++; /* New device */
+ dev = insert_device(dev, iobase, depca_probe);
+ }
+
+ return dev;
+}
+
+/*
+** If at end of eth device list and can't use current entry, malloc
+** one up. If memory could not be allocated, print an error message.
+*/
+static struct device *
+insert_device(struct device *dev, u_long iobase, int (*init)(struct device *))
+{
+ struct device *new;
+
+ new = (struct device *)kmalloc(sizeof(struct device)+8, GFP_KERNEL);
+ if (new == NULL) {
+ printk("eth%d: Device not initialised, insufficient memory\n",num_eth);
+ return NULL;
+ } else {
+ new->next = dev->next;
+ dev->next = new;
+ dev = dev->next; /* point to the new device */
+ dev->name = (char *)(dev + 1);
+ if (num_eth > 9999) {
+ sprintf(dev->name,"eth????");/* New device name */
+ } else {
+ sprintf(dev->name,"eth%d", num_eth);/* New device name */
+ }
+ dev->base_addr = iobase; /* assign the io address */
+ dev->init = init; /* initialisation routine */
+ }
+
+ return dev;
+}
+
+static int
+depca_dev_index(char *s)
+{
+ int i=0, j=0;
+
+ for (;*s; s++) {
+ if (isdigit(*s)) {
+ j=1;
+ i = (i * 10) + (*s - '0');
+ } else if (j) break;
+ }
+
+ return i;
+}
+
+/*
+** Look for a particular board name in the on-board Remote Diagnostics
+** and Boot (readb) ROM. This will also give us a clue to the network RAM
+** base address.
+*/
+static void DepcaSignature(char *name, u_long paddr)
+{
+ u_int i,j,k;
+ const char *signatures[] = DEPCA_SIGNATURE;
+ char tmpstr[16];
+
+ /* Copy the first 16 bytes of ROM */
+ for (i=0;i<16;i++) {
+ tmpstr[i] = readb(paddr+0xc000+i);
+ }
+
+ /* Check if PROM contains a valid string */
+ for (i=0;*signatures[i]!='\0';i++) {
+ for (j=0,k=0;j<16 && k<strlen(signatures[i]);j++) {
+ if (signatures[i][k] == tmpstr[j]) { /* track signature */
+ k++;
+ } else { /* lost signature; begin search again */
+ k=0;
+ }
+ }
+ if (k == strlen(signatures[i])) break;
+ }
+
+ /* Check if name string is valid, provided there's no PROM */
+ if (*name && (i == unknown)) {
+ for (i=0;*signatures[i]!='\0';i++) {
+ if (strcmp(name,signatures[i]) == 0) break;
+ }
+ }
+
+ /* Update search results */
+ strcpy(name,signatures[i]);
+ adapter = i;
+
+ return;
+}
+
+/*
+** Look for a special sequence in the Ethernet station address PROM that
+** is common across all DEPCA products. Note that the original DEPCA needs
+** its ROM address counter to be initialized and enabled. Only enable
+** if the first address octet is a 0x08 - this minimises the chances of
+** messing around with some other hardware, but it assumes that this DEPCA
+** card initialized itself correctly.
+**
+** Search the Ethernet address ROM for the signature. Since the ROM address
+** counter can start at an arbitrary point, the search must include the entire
+** probe sequence length plus the (length_of_the_signature - 1).
+** Stop the search IMMEDIATELY after the signature is found so that the
+** PROM address counter is correctly positioned at the start of the
+** ethernet address for later read out.
+*/
+static int DevicePresent(u_long ioaddr)
+{
+ union {
+ struct {
+ u32 a;
+ u32 b;
+ } llsig;
+ char Sig[sizeof(u32) << 1];
+ } dev;
+ short sigLength=0;
+ s8 data;
+ s16 nicsr;
+ int i, j, status = 0;
+
+ data = inb(DEPCA_PROM); /* clear counter on DEPCA */
+ data = inb(DEPCA_PROM); /* read data */
+
+ if (data == 0x08) { /* Enable counter on DEPCA */
+ nicsr = inb(DEPCA_NICSR);
+ nicsr |= AAC;
+ outb(nicsr, DEPCA_NICSR);
+ }
+
+ dev.llsig.a = ETH_PROM_SIG;
+ dev.llsig.b = ETH_PROM_SIG;
+ sigLength = sizeof(u32) << 1;
+
+ for (i=0,j=0;j<sigLength && i<PROBE_LENGTH+sigLength-1;i++) {
+ data = inb(DEPCA_PROM);
+ if (dev.Sig[j] == data) { /* track signature */
+ j++;
+ } else { /* lost signature; begin search again */
+ if (data == dev.Sig[0]) { /* rare case.... */
+ j=1;
+ } else {
+ j=0;
+ }
+ }
+ }
+
+ if (j!=sigLength) {
+ status = -ENODEV; /* search failed */
+ }
+
+ return status;
+}
+
+/*
+** The DE100 and DE101 PROM accesses were made non-standard for some bizarre
+** reason: access the upper half of the PROM with x=0; access the lower half
+** with x=1.
+*/
+static int get_hw_addr(struct device *dev)
+{
+ u_long ioaddr = dev->base_addr;
+ int i, k, tmp, status = 0;
+ u_short j, x, chksum;
+
+ x = (((adapter == de100) || (adapter == de101)) ? 1 : 0);
+
+ for (i=0,k=0,j=0;j<3;j++) {
+ k <<= 1 ;
+ if (k > 0xffff) k-=0xffff;
+
+ k += (u_char) (tmp = inb(DEPCA_PROM + x));
+ dev->dev_addr[i++] = (u_char) tmp;
+ k += (u_short) ((tmp = inb(DEPCA_PROM + x)) << 8);
+ dev->dev_addr[i++] = (u_char) tmp;
+
+ if (k > 0xffff) k-=0xffff;
+ }
+ if (k == 0xffff) k=0;
+
+ chksum = (u_char) inb(DEPCA_PROM + x);
+ chksum |= (u_short) (inb(DEPCA_PROM + x) << 8);
+ if (k != chksum) status = -1;
+
+ return status;
+}
+
+/*
+** Load a packet into the shared memory
+*/
+static int load_packet(struct device *dev, struct sk_buff *skb)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ int i, entry, end, len, status = 0;
+
+ entry = lp->tx_new; /* Ring around buffer number. */
+ end = (entry + (skb->len - 1) / TX_BUFF_SZ) & lp->txRingMask;
+ if (!(readl(&lp->tx_ring[end].base) & T_OWN)) {/* Enough room? */
+ /*
+ ** Caution: the write order is important here... don't set up the
+ ** ownership rights until all the other information is in place.
+ */
+ if (end < entry) { /* wrapped buffer */
+ len = (lp->txRingMask - entry + 1) * TX_BUFF_SZ;
+ memcpy_toio(lp->tx_memcpy[entry], skb->data, len);
+ memcpy_toio(lp->tx_memcpy[0], skb->data + len, skb->len - len);
+ } else { /* linear buffer */
+ memcpy_toio(lp->tx_memcpy[entry], skb->data, skb->len);
+ }
+
+ /* set up the buffer descriptors */
+ len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
+ for (i = entry; i != end; i = (i + 1) & lp->txRingMask) {
+ /* clean out flags */
+ writel(readl(&lp->tx_ring[i].base) & ~T_FLAGS, &lp->tx_ring[i].base);
+ writew(0x0000, &lp->tx_ring[i].misc); /* clears other error flags */
+ writew(-TX_BUFF_SZ, &lp->tx_ring[i].length);/* packet length in buffer */
+ len -= TX_BUFF_SZ;
+ }
+ /* clean out flags */
+ writel(readl(&lp->tx_ring[end].base) & ~T_FLAGS, &lp->tx_ring[end].base);
+ writew(0x0000, &lp->tx_ring[end].misc); /* clears other error flags */
+ writew(-len, &lp->tx_ring[end].length); /* packet length in last buff */
+
+ /* start of packet */
+ writel(readl(&lp->tx_ring[entry].base) | T_STP, &lp->tx_ring[entry].base);
+ /* end of packet */
+ writel(readl(&lp->tx_ring[end].base) | T_ENP, &lp->tx_ring[end].base);
+
+ for (i=end; i!=entry; --i) {
+ /* ownership of packet */
+ writel(readl(&lp->tx_ring[i].base) | T_OWN, &lp->tx_ring[i].base);
+ if (i == 0) i=lp->txRingMask+1;
+ }
+ writel(readl(&lp->tx_ring[entry].base) | T_OWN, &lp->tx_ring[entry].base);
+
+ lp->tx_new = (++end) & lp->txRingMask; /* update current pointers */
+ } else {
+ status = -1;
+ }
+
+ return status;
+}
+
+/*
+** Look for a particular board name in the EISA configuration space
+*/
+static int EISA_signature(char *name, s32 eisa_id)
+{
+ u_int i;
+ const char *signatures[] = DEPCA_SIGNATURE;
+ char ManCode[DEPCA_STRLEN];
+ union {
+ s32 ID;
+ char Id[4];
+ } Eisa;
+ int status = 0;
+
+ *name = '\0';
+ Eisa.ID = inl(eisa_id);
+
+ ManCode[0]=(((Eisa.Id[0]>>2)&0x1f)+0x40);
+ ManCode[1]=(((Eisa.Id[1]&0xe0)>>5)+((Eisa.Id[0]&0x03)<<3)+0x40);
+ ManCode[2]=(((Eisa.Id[2]>>4)&0x0f)+0x30);
+ ManCode[3]=(( Eisa.Id[2]&0x0f)+0x30);
+ ManCode[4]=(((Eisa.Id[3]>>4)&0x0f)+0x30);
+ ManCode[5]='\0';
+
+ for (i=0;(*signatures[i] != '\0') && (*name == '\0');i++) {
+ if (strstr(ManCode, signatures[i]) != NULL) {
+ strcpy(name,ManCode);
+ status = 1;
+ }
+ }
+
+ return status;
+}
+
+static void depca_dbg_open(struct device *dev)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ u_long ioaddr = dev->base_addr;
+ struct depca_init *p = (struct depca_init *)lp->sh_mem;
+ int i;
+
+ if (depca_debug > 1){
+ /* Copy the shadow init_block to shared memory */
+ memcpy_toio((char *)lp->sh_mem,&lp->init_block,sizeof(struct depca_init));
+
+ printk("%s: depca open with irq %d\n",dev->name,dev->irq);
+ printk("Descriptor head addresses:\n");
+ printk("\t0x%lx 0x%lx\n",(u_long)lp->rx_ring, (u_long)lp->tx_ring);
+ printk("Descriptor addresses:\nRX: ");
+ for (i=0;i<lp->rxRingMask;i++){
+ if (i < 3) {
+ printk("0x%8.8lx ", (long) &lp->rx_ring[i].base);
+ }
+ }
+ printk("...0x%8.8lx\n", (long) &lp->rx_ring[i].base);
+ printk("TX: ");
+ for (i=0;i<lp->txRingMask;i++){
+ if (i < 3) {
+ printk("0x%8.8lx ", (long) &lp->tx_ring[i].base);
+ }
+ }
+ printk("...0x%8.8lx\n", (long) &lp->tx_ring[i].base);
+ printk("\nDescriptor buffers:\nRX: ");
+ for (i=0;i<lp->rxRingMask;i++){
+ if (i < 3) {
+ printk("0x%8.8x ", readl(&lp->rx_ring[i].base));
+ }
+ }
+ printk("...0x%8.8x\n", readl(&lp->rx_ring[i].base));
+ printk("TX: ");
+ for (i=0;i<lp->txRingMask;i++){
+ if (i < 3) {
+ printk("0x%8.8x ", readl(&lp->tx_ring[i].base));
+ }
+ }
+ printk("...0x%8.8x\n", readl(&lp->tx_ring[i].base));
+ printk("Initialisation block at 0x%8.8lx\n",lp->sh_mem);
+ printk("\tmode: 0x%4.4x\n",readw(&p->mode));
+ printk("\tphysical address: ");
+ for (i=0;i<ETH_ALEN-1;i++){
+ printk("%2.2x:",(u_char)readb(&p->phys_addr[i]));
+ }
+ printk("%2.2x\n",(u_char)readb(&p->phys_addr[i]));
+ printk("\tmulticast hash table: ");
+ for (i=0;i<(HASH_TABLE_LEN >> 3)-1;i++){
+ printk("%2.2x:",(u_char)readb(&p->mcast_table[i]));
+ }
+ printk("%2.2x\n",(u_char)readb(&p->mcast_table[i]));
+ printk("\trx_ring at: 0x%8.8x\n",readl(&p->rx_ring));
+ printk("\ttx_ring at: 0x%8.8x\n",readl(&p->tx_ring));
+ printk("dma_buffs: 0x%8.8lx\n",lp->dma_buffs);
+ printk("Ring size:\nRX: %d Log2(rxRingMask): 0x%8.8x\n",
+ (int)lp->rxRingMask + 1,
+ lp->rx_rlen);
+ printk("TX: %d Log2(txRingMask): 0x%8.8x\n",
+ (int)lp->txRingMask + 1,
+ lp->tx_rlen);
+ outw(CSR2,DEPCA_ADDR);
+ printk("CSR2&1: 0x%4.4x",inw(DEPCA_DATA));
+ outw(CSR1,DEPCA_ADDR);
+ printk("%4.4x\n",inw(DEPCA_DATA));
+ outw(CSR3,DEPCA_ADDR);
+ printk("CSR3: 0x%4.4x\n",inw(DEPCA_DATA));
+ }
+
+ return;
+}
+
+/*
+** Perform IOCTL call functions here. Some are privileged operations and the
+** effective uid is checked in those cases.
+** All MCA IOCTLs will not work here and are for testing purposes only.
+*/
+static int depca_ioctl(struct device *dev, struct ifreq *rq, int cmd)
+{
+ struct depca_private *lp = (struct depca_private *)dev->priv;
+ struct depca_ioctl *ioc = (struct depca_ioctl *) &rq->ifr_data;
+ int i, status = 0;
+ u_long ioaddr = dev->base_addr;
+ union {
+ u8 addr[(HASH_TABLE_LEN * ETH_ALEN)];
+ u16 sval[(HASH_TABLE_LEN * ETH_ALEN) >> 1];
+ u32 lval[(HASH_TABLE_LEN * ETH_ALEN) >> 2];
+ } tmp;
+
+ switch(ioc->cmd) {
+ case DEPCA_GET_HWADDR: /* Get the hardware address */
+ for (i=0; i<ETH_ALEN; i++) {
+ tmp.addr[i] = dev->dev_addr[i];
+ }
+ ioc->len = ETH_ALEN;
+ if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len))) {
+ memcpy_tofs(ioc->data, tmp.addr, ioc->len);
+ }
+
+ break;
+ case DEPCA_SET_HWADDR: /* Set the hardware address */
+ if (suser()) {
+ if (!(status = verify_area(VERIFY_READ, (void *)ioc->data, ETH_ALEN))) {
+ memcpy_fromfs(tmp.addr,ioc->data,ETH_ALEN);
+ for (i=0; i<ETH_ALEN; i++) {
+ dev->dev_addr[i] = tmp.addr[i];
+ }
+ while(dev->tbusy); /* Stop ring access */
+ set_bit(0, (void*)&dev->tbusy);
+ while(lp->tx_old != lp->tx_new);/* Wait for the ring to empty */
+
+ STOP_DEPCA; /* Temporarily stop the depca. */
+ depca_init_ring(dev); /* Initialize the descriptor rings */
+ LoadCSRs(dev); /* Reload CSR3 */
+ InitRestartDepca(dev); /* Resume normal operation. */
+ dev->tbusy = 0; /* Unlock the TX ring */
+ }
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DEPCA_SET_PROM: /* Set Promiscuous Mode */
+ if (suser()) {
+ while(dev->tbusy); /* Stop ring access */
+ set_bit(0, (void*)&dev->tbusy);
+ while(lp->tx_old != lp->tx_new); /* Wait for the ring to empty */
+
+ STOP_DEPCA; /* Temporarily stop the depca. */
+ depca_init_ring(dev); /* Initialize the descriptor rings */
+ lp->init_block.mode |= PROM; /* Set promiscuous mode */
+
+ LoadCSRs(dev); /* Reload CSR3 */
+ InitRestartDepca(dev); /* Resume normal operation. */
+ dev->tbusy = 0; /* Unlock the TX ring */
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DEPCA_CLR_PROM: /* Clear Promiscuous Mode */
+ if (suser()) {
+ while(dev->tbusy); /* Stop ring access */
+ set_bit(0, (void*)&dev->tbusy);
+ while(lp->tx_old != lp->tx_new); /* Wait for the ring to empty */
+
+ STOP_DEPCA; /* Temporarily stop the depca. */
+ depca_init_ring(dev); /* Initialize the descriptor rings */
+ lp->init_block.mode &= ~PROM; /* Clear promiscuous mode */
+
+ LoadCSRs(dev); /* Reload CSR3 */
+ InitRestartDepca(dev); /* Resume normal operation. */
+ dev->tbusy = 0; /* Unlock the TX ring */
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DEPCA_SAY_BOO: /* Say "Boo!" to the kernel log file */
+ printk("%s: Boo!\n", dev->name);
+
+ break;
+ case DEPCA_GET_MCA: /* Get the multicast address table */
+ ioc->len = (HASH_TABLE_LEN >> 3);
+ if (!(status = verify_area(VERIFY_WRITE, ioc->data, ioc->len))) {
+ memcpy_tofs(ioc->data, lp->init_block.mcast_table, ioc->len);
+ }
+
+ break;
+ case DEPCA_SET_MCA: /* Set a multicast address */
+ if (suser()) {
+ if (!(status=verify_area(VERIFY_READ, ioc->data, ETH_ALEN*ioc->len))) {
+ memcpy_fromfs(tmp.addr, ioc->data, ETH_ALEN * ioc->len);
+ set_multicast_list(dev);
+ }
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DEPCA_CLR_MCA: /* Clear all multicast addresses */
+ if (suser()) {
+ set_multicast_list(dev);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DEPCA_MCA_EN: /* Enable pass all multicast addressing */
+ if (suser()) {
+ set_multicast_list(dev);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DEPCA_GET_STATS: /* Get the driver statistics */
+ cli();
+ ioc->len = sizeof(lp->pktStats);
+ if (!(status=verify_area(VERIFY_WRITE, ioc->data, ioc->len))) {
+ memcpy_tofs(ioc->data, &lp->pktStats, ioc->len);
+ }
+ sti();
+
+ break;
+ case DEPCA_CLR_STATS: /* Zero out the driver statistics */
+ if (suser()) {
+ cli();
+ memset(&lp->pktStats, 0, sizeof(lp->pktStats));
+ sti();
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case DEPCA_GET_REG: /* Get the DEPCA Registers */
+ i=0;
+ tmp.sval[i++] = inw(DEPCA_NICSR);
+ outw(CSR0, DEPCA_ADDR); /* status register */
+ tmp.sval[i++] = inw(DEPCA_DATA);
+ memcpy(&tmp.sval[i], &lp->init_block, sizeof(struct depca_init));
+ ioc->len = i+sizeof(struct depca_init);
+ if (!(status=verify_area(VERIFY_WRITE, ioc->data, ioc->len))) {
+ memcpy_tofs(ioc->data, tmp.addr, ioc->len);
+ }
+
+ break;
+ default:
+ status = -EOPNOTSUPP;
+ }
+
+ return status;
+}
+
+#ifdef MODULE
+static char devicename[9] = { 0, };
+static struct device thisDepca = {
+ devicename, /* device name is inserted by /linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0x200, 7, /* I/O address, IRQ */
+ 0, 0, 0, NULL, depca_probe };
+
+static int irq=7; /* EDIT THESE LINE FOR YOUR CONFIGURATION */
+static int io=0x200; /* Or use the irq= io= options to insmod */
+
+/* See depca_probe() for autoprobe messages when a module */
+int
+init_module(void)
+{
+ thisDepca.irq=irq;
+ thisDepca.base_addr=io;
+
+ if (register_netdev(&thisDepca) != 0)
+ return -EIO;
+
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ if (thisDepca.priv) {
+ kfree(thisDepca.priv);
+ thisDepca.priv = NULL;
+ }
+ thisDepca.irq=0;
+
+ unregister_netdev(&thisDepca);
+ release_region(thisDepca.base_addr, DEPCA_TOTAL_SIZE);
+}
+#endif /* MODULE */
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/linux/include -Wall -Wstrict-prototypes -fomit-frame-pointer -fno-strength-reduce -malign-loops=2 -malign-jumps=2 -malign-functions=2 -O2 -m486 -c depca.c"
+ *
+ * compile-command: "gcc -D__KERNEL__ -DMODULE -I/linux/include -Wall -Wstrict-prototypes -fomit-frame-pointer -fno-strength-reduce -malign-loops=2 -malign-jumps=2 -malign-functions=2 -O2 -m486 -c depca.c"
+ * End:
+ */
diff --git a/linux/src/drivers/net/depca.h b/linux/src/drivers/net/depca.h
new file mode 100644
index 0000000..012f739
--- /dev/null
+++ b/linux/src/drivers/net/depca.h
@@ -0,0 +1,185 @@
+/*
+ Written 1994 by David C. Davies.
+
+ Copyright 1994 David C. Davies. This software may be used and distributed
+ according to the terms of the GNU Public License, incorporated herein by
+ reference.
+*/
+
+/*
+** I/O addresses. Note that the 2k buffer option is not supported in
+** this driver.
+*/
+#define DEPCA_NICSR ioaddr+0x00 /* Network interface CSR */
+#define DEPCA_RBI ioaddr+0x02 /* RAM buffer index (2k buffer mode) */
+#define DEPCA_DATA ioaddr+0x04 /* LANCE registers' data port */
+#define DEPCA_ADDR ioaddr+0x06 /* LANCE registers' address port */
+#define DEPCA_HBASE ioaddr+0x08 /* EISA high memory base address reg. */
+#define DEPCA_PROM ioaddr+0x0c /* Ethernet address ROM data port */
+#define DEPCA_CNFG ioaddr+0x0c /* EISA Configuration port */
+#define DEPCA_RBSA ioaddr+0x0e /* RAM buffer starting address (2k buff.) */
+
+/*
+** These are LANCE registers addressable through DEPCA_ADDR
+*/
+#define CSR0 0
+#define CSR1 1
+#define CSR2 2
+#define CSR3 3
+
+/*
+** NETWORK INTERFACE CSR (NI_CSR) bit definitions
+*/
+
+#define TO 0x0100 /* Time Out for remote boot */
+#define SHE 0x0080 /* SHadow memory Enable */
+#define BS 0x0040 /* Bank Select */
+#define BUF 0x0020 /* BUFfer size (1->32k, 0->64k) */
+#define RBE 0x0010 /* Remote Boot Enable (1->net boot) */
+#define AAC 0x0008 /* Address ROM Address Counter (1->enable) */
+#define _128KB 0x0008 /* 128kB Network RAM (1->enable) */
+#define IM 0x0004 /* Interrupt Mask (1->mask) */
+#define IEN 0x0002 /* Interrupt tristate ENable (1->enable) */
+#define LED 0x0001 /* LED control */
+
+/*
+** Control and Status Register 0 (CSR0) bit definitions
+*/
+
+#define ERR 0x8000 /* Error summary */
+#define BABL 0x4000 /* Babble transmitter timeout error */
+#define CERR 0x2000 /* Collision Error */
+#define MISS 0x1000 /* Missed packet */
+#define MERR 0x0800 /* Memory Error */
+#define RINT 0x0400 /* Receiver Interrupt */
+#define TINT 0x0200 /* Transmit Interrupt */
+#define IDON 0x0100 /* Initialization Done */
+#define INTR 0x0080 /* Interrupt Flag */
+#define INEA 0x0040 /* Interrupt Enable */
+#define RXON 0x0020 /* Receiver on */
+#define TXON 0x0010 /* Transmitter on */
+#define TDMD 0x0008 /* Transmit Demand */
+#define STOP 0x0004 /* Stop */
+#define STRT 0x0002 /* Start */
+#define INIT 0x0001 /* Initialize */
+#define INTM 0xff00 /* Interrupt Mask */
+#define INTE 0xfff0 /* Interrupt Enable */
+
+/*
+** CONTROL AND STATUS REGISTER 3 (CSR3)
+*/
+
+#define BSWP 0x0004 /* Byte SWaP */
+#define ACON 0x0002 /* ALE control */
+#define BCON 0x0001 /* Byte CONtrol */
+
+/*
+** Initialization Block Mode Register
+*/
+
+#define PROM 0x8000 /* Promiscuous Mode */
+#define EMBA 0x0080 /* Enable Modified Back-off Algorithm */
+#define INTL 0x0040 /* Internal Loopback */
+#define DRTY 0x0020 /* Disable Retry */
+#define COLL 0x0010 /* Force Collision */
+#define DTCR 0x0008 /* Disable Transmit CRC */
+#define LOOP 0x0004 /* Loopback */
+#define DTX 0x0002 /* Disable the Transmitter */
+#define DRX 0x0001 /* Disable the Receiver */
+
+/*
+** Receive Message Descriptor 1 (RMD1) bit definitions.
+*/
+
+#define R_OWN 0x80000000 /* Owner bit 0 = host, 1 = lance */
+#define R_ERR 0x4000 /* Error Summary */
+#define R_FRAM 0x2000 /* Framing Error */
+#define R_OFLO 0x1000 /* Overflow Error */
+#define R_CRC 0x0800 /* CRC Error */
+#define R_BUFF 0x0400 /* Buffer Error */
+#define R_STP 0x0200 /* Start of Packet */
+#define R_ENP 0x0100 /* End of Packet */
+
+/*
+** Transmit Message Descriptor 1 (TMD1) bit definitions.
+*/
+
+#define T_OWN 0x80000000 /* Owner bit 0 = host, 1 = lance */
+#define T_ERR 0x4000 /* Error Summary */
+#define T_ADD_FCS 0x2000 /* More the 1 retry needed to Xmit */
+#define T_MORE 0x1000 /* >1 retry to transmit packet */
+#define T_ONE 0x0800 /* 1 try needed to transmit the packet */
+#define T_DEF 0x0400 /* Deferred */
+#define T_STP 0x02000000 /* Start of Packet */
+#define T_ENP 0x01000000 /* End of Packet */
+#define T_FLAGS 0xff000000 /* TX Flags Field */
+
+/*
+** Transmit Message Descriptor 3 (TMD3) bit definitions.
+*/
+
+#define TMD3_BUFF 0x8000 /* BUFFer error */
+#define TMD3_UFLO 0x4000 /* UnderFLOw error */
+#define TMD3_RES 0x2000 /* REServed */
+#define TMD3_LCOL 0x1000 /* Late COLlision */
+#define TMD3_LCAR 0x0800 /* Loss of CARrier */
+#define TMD3_RTRY 0x0400 /* ReTRY error */
+
+/*
+** EISA configuration Register (CNFG) bit definitions
+*/
+
+#define TIMEOUT 0x0100 /* 0:2.5 mins, 1: 30 secs */
+#define REMOTE 0x0080 /* Remote Boot Enable -> 1 */
+#define IRQ11 0x0040 /* Enable -> 1 */
+#define IRQ10 0x0020 /* Enable -> 1 */
+#define IRQ9 0x0010 /* Enable -> 1 */
+#define IRQ5 0x0008 /* Enable -> 1 */
+#define BUFF 0x0004 /* 0: 64kB or 128kB, 1: 32kB */
+#define PADR16 0x0002 /* RAM on 64kB boundary */
+#define PADR17 0x0001 /* RAM on 128kB boundary */
+
+/*
+** Miscellaneous
+*/
+#define HASH_TABLE_LEN 64 /* Bits */
+#define HASH_BITS 0x003f /* 6 LS bits */
+
+#define MASK_INTERRUPTS 1
+#define UNMASK_INTERRUPTS 0
+
+#define EISA_EN 0x0001 /* Enable EISA bus buffers */
+#define EISA_ID iobase+0x0080 /* ID long word for EISA card */
+#define EISA_CTRL iobase+0x0084 /* Control word for EISA card */
+
+/*
+** Include the IOCTL stuff
+*/
+#include <linux/sockios.h>
+
+#define DEPCAIOCTL SIOCDEVPRIVATE
+
+struct depca_ioctl {
+ unsigned short cmd; /* Command to run */
+ unsigned short len; /* Length of the data buffer */
+ unsigned char *data; /* Pointer to the data buffer */
+};
+
+/*
+** Recognised commands for the driver
+*/
+#define DEPCA_GET_HWADDR 0x01 /* Get the hardware address */
+#define DEPCA_SET_HWADDR 0x02 /* Get the hardware address */
+#define DEPCA_SET_PROM 0x03 /* Set Promiscuous Mode */
+#define DEPCA_CLR_PROM 0x04 /* Clear Promiscuous Mode */
+#define DEPCA_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */
+#define DEPCA_GET_MCA 0x06 /* Get a multicast address */
+#define DEPCA_SET_MCA 0x07 /* Set a multicast address */
+#define DEPCA_CLR_MCA 0x08 /* Clear a multicast address */
+#define DEPCA_MCA_EN 0x09 /* Enable a multicast address group */
+#define DEPCA_GET_STATS 0x0a /* Get the driver statistics */
+#define DEPCA_CLR_STATS 0x0b /* Zero out the driver statistics */
+#define DEPCA_GET_REG 0x0c /* Get the Register contents */
+#define DEPCA_SET_REG 0x0d /* Set the Register contents */
+#define DEPCA_DUMP 0x0f /* Dump the DEPCA Status */
+
diff --git a/linux/src/drivers/net/e2100.c b/linux/src/drivers/net/e2100.c
new file mode 100644
index 0000000..be4185a
--- /dev/null
+++ b/linux/src/drivers/net/e2100.c
@@ -0,0 +1,456 @@
+/* e2100.c: A Cabletron E2100 series ethernet driver for linux. */
+/*
+ Written 1993-1994 by Donald Becker.
+
+ Copyright 1994 by Donald Becker.
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency. This software may be used and
+ distributed according to the terms of the GNU Public License,
+ incorporated herein by reference.
+
+ This is a driver for the Cabletron E2100 series ethercards.
+
+ The Author may be reached as becker@cesdis.gsfc.nasa.gov, or
+ C/O Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ The E2100 series ethercard is a fairly generic shared memory 8390
+ implementation. The only unusual aspect is the way the shared memory
+ registers are set: first you do an inb() in what is normally the
+ station address region, and the low three bits of next outb() *address*
+ is used as the write value for that register. Either someone wasn't
+ too used to dem bit en bites, or they were trying to obfuscate the
+ programming interface.
+
+ There is an additional complication when setting the window on the packet
+ buffer. You must first do a read into the packet buffer region with the
+ low 8 address bits the address setting the page for the start of the packet
+ buffer window, and then do the above operation. See mem_on() for details.
+
+ One bug on the chip is that even a hard reset won't disable the memory
+ window, usually resulting in a hung machine if mem_off() isn't called.
+ If this happens, you must power down the machine for about 30 seconds.
+*/
+
+static const char *version =
+ "e2100.c:v1.01 7/21/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include "8390.h"
+
+static int e21_probe_list[] = {0x300, 0x280, 0x380, 0x220, 0};
+
+/* Offsets from the base_addr.
+ Read from the ASIC register, and the low three bits of the next outb()
+ address is used to set the corresponding register. */
+#define E21_NIC_OFFSET 0 /* Offset to the 8390 NIC. */
+#define E21_ASIC 0x10
+#define E21_MEM_ENABLE 0x10
+#define E21_MEM_ON 0x05 /* Enable memory in 16 bit mode. */
+#define E21_MEM_ON_8 0x07 /* Enable memory in 8 bit mode. */
+#define E21_MEM_BASE 0x11
+#define E21_IRQ_LOW 0x12 /* The low three bits of the IRQ number. */
+#define E21_IRQ_HIGH 0x14 /* The high IRQ bit and media select ... */
+#define E21_MEDIA 0x14 /* (alias). */
+#define E21_ALT_IFPORT 0x02 /* Set to use the other (BNC,AUI) port. */
+#define E21_BIG_MEM 0x04 /* Use a bigger (64K) buffer (we don't) */
+#define E21_SAPROM 0x10 /* Offset to station address data. */
+#define E21_IO_EXTENT 0x20
+
+static inline void mem_on(short port, volatile char *mem_base,
+ unsigned char start_page )
+{
+ /* This is a little weird: set the shared memory window by doing a
+ read. The low address bits specify the starting page. */
+ mem_base[start_page];
+ inb(port + E21_MEM_ENABLE);
+ outb(E21_MEM_ON, port + E21_MEM_ENABLE + E21_MEM_ON);
+}
+
+static inline void mem_off(short port)
+{
+ inb(port + E21_MEM_ENABLE);
+ outb(0x00, port + E21_MEM_ENABLE);
+}
+
+/* In other drivers I put the TX pages first, but the E2100 window circuitry
+ is designed to have a 4K Tx region last. The windowing circuitry wraps the
+ window at 0x2fff->0x0000 so that the packets at e.g. 0x2f00 in the RX ring
+ appear contiguously in the window. */
+#define E21_RX_START_PG 0x00 /* First page of RX buffer */
+#define E21_RX_STOP_PG 0x30 /* Last page +1 of RX ring */
+#define E21_BIG_RX_STOP_PG 0xF0 /* Last page +1 of RX ring */
+#define E21_TX_START_PG E21_RX_STOP_PG /* First page of TX buffer */
+
+int e2100_probe(struct device *dev);
+int e21_probe1(struct device *dev, int ioaddr);
+
+static int e21_open(struct device *dev);
+static void e21_reset_8390(struct device *dev);
+static void e21_block_input(struct device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void e21_block_output(struct device *dev, int count,
+ const unsigned char *buf, const int start_page);
+static void e21_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+
+static int e21_close(struct device *dev);
+
+
+/* Probe for the E2100 series ethercards. These cards have an 8390 at the
+ base address and the station address at both offset 0x10 and 0x18. I read
+ the station address from offset 0x18 to avoid the dataport of NE2000
+ ethercards, and look for Ctron's unique ID (first three octets of the
+ station address).
+ */
+
+int e2100_probe(struct device *dev)
+{
+ int *port;
+ int base_addr = dev->base_addr;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return e21_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (port = e21_probe_list; *port; port++) {
+ if (check_region(*port, E21_IO_EXTENT))
+ continue;
+ if (e21_probe1(dev, *port) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+
+int e21_probe1(struct device *dev, int ioaddr)
+{
+ int i, status;
+ unsigned char *station_addr = dev->dev_addr;
+ static unsigned version_printed = 0;
+
+ /* First check the station address for the Ctron prefix. */
+ if (inb(ioaddr + E21_SAPROM + 0) != 0x00
+ || inb(ioaddr + E21_SAPROM + 1) != 0x00
+ || inb(ioaddr + E21_SAPROM + 2) != 0x1d)
+ return ENODEV;
+
+ /* Verify by making certain that there is a 8390 at there. */
+ outb(E8390_NODMA + E8390_STOP, ioaddr);
+ SLOW_DOWN_IO;
+ status = inb(ioaddr);
+ if (status != 0x21 && status != 0x23)
+ return ENODEV;
+
+ /* Read the station address PROM. */
+ for (i = 0; i < 6; i++)
+ station_addr[i] = inb(ioaddr + E21_SAPROM + i);
+
+ inb(ioaddr + E21_MEDIA); /* Point to media selection. */
+ outb(0, ioaddr + E21_ASIC); /* and disable the secondary interface. */
+
+ if (ei_debug && version_printed++ == 0)
+ printk("%s", version);
+
+ /* We should have a "dev" from Space.c or the static module table. */
+ if (dev == NULL) {
+ printk("e2100.c: Passed a NULL device.\n");
+ dev = init_etherdev(0, 0);
+ }
+
+ printk("%s: E21** at %#3x,", dev->name, ioaddr);
+ for (i = 0; i < 6; i++)
+ printk(" %02X", station_addr[i]);
+
+ if (dev->irq < 2) {
+ int irqlist[] = {15,11,10,12,5,9,3,4}, i;
+ for (i = 0; i < 8; i++)
+ if (request_irq (irqlist[i], NULL, 0, "bogus", NULL) != -EBUSY) {
+ dev->irq = irqlist[i];
+ break;
+ }
+ if (i >= 8) {
+ printk(" unable to get IRQ %d.\n", dev->irq);
+ return EAGAIN;
+ }
+ } else if (dev->irq == 2) /* Fixup luser bogosity: IRQ2 is really IRQ9 */
+ dev->irq = 9;
+
+ /* Allocate dev->priv and fill in 8390 specific dev fields. */
+ if (ethdev_init(dev)) {
+ printk (" unable to get memory for dev->priv.\n");
+ return -ENOMEM;
+ }
+
+ /* Grab the region so we can find a different board if IRQ select fails. */
+ request_region(ioaddr, E21_IO_EXTENT, "e2100");
+
+ /* The 8390 is at the base address. */
+ dev->base_addr = ioaddr;
+
+ ei_status.name = "E2100";
+ ei_status.word16 = 1;
+ ei_status.tx_start_page = E21_TX_START_PG;
+ ei_status.rx_start_page = E21_RX_START_PG;
+ ei_status.stop_page = E21_RX_STOP_PG;
+ ei_status.saved_irq = dev->irq;
+
+ /* Check the media port used. The port can be passed in on the
+ low mem_end bits. */
+ if (dev->mem_end & 15)
+ dev->if_port = dev->mem_end & 7;
+ else {
+ dev->if_port = 0;
+ inb(ioaddr + E21_MEDIA); /* Turn automatic media detection on. */
+ for(i = 0; i < 6; i++)
+ if (station_addr[i] != inb(ioaddr + E21_SAPROM + 8 + i)) {
+ dev->if_port = 1;
+ break;
+ }
+ }
+
+ /* Never map in the E21 shared memory unless you are actively using it.
+ Also, the shared memory has effective only one setting -- spread all
+ over the 128K region! */
+ if (dev->mem_start == 0)
+ dev->mem_start = 0xd0000;
+
+#ifdef notdef
+ /* These values are unused. The E2100 has a 2K window into the packet
+ buffer. The window can be set to start on any page boundary. */
+ dev->rmem_start = dev->mem_start + TX_PAGES*256;
+ dev->mem_end = dev->rmem_end = dev->mem_start + 2*1024;
+#endif
+
+ printk(", IRQ %d, %s media, memory @ %#lx.\n", dev->irq,
+ dev->if_port ? "secondary" : "primary", dev->mem_start);
+
+ ei_status.reset_8390 = &e21_reset_8390;
+ ei_status.block_input = &e21_block_input;
+ ei_status.block_output = &e21_block_output;
+ ei_status.get_8390_hdr = &e21_get_8390_hdr;
+ dev->open = &e21_open;
+ dev->stop = &e21_close;
+ NS8390_init(dev, 0);
+
+ return 0;
+}
+
+static int
+e21_open(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ if (request_irq(dev->irq, ei_interrupt, 0, "e2100", NULL)) {
+ return EBUSY;
+ }
+ irq2dev_map[dev->irq] = dev;
+
+ /* Set the interrupt line and memory base on the hardware. */
+ inb(ioaddr + E21_IRQ_LOW);
+ outb(0, ioaddr + E21_ASIC + (dev->irq & 7));
+ inb(ioaddr + E21_IRQ_HIGH); /* High IRQ bit, and if_port. */
+ outb(0, ioaddr + E21_ASIC + (dev->irq > 7 ? 1:0)
+ + (dev->if_port ? E21_ALT_IFPORT : 0));
+ inb(ioaddr + E21_MEM_BASE);
+ outb(0, ioaddr + E21_ASIC + ((dev->mem_start >> 17) & 7));
+
+ ei_open(dev);
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static void
+e21_reset_8390(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ outb(0x01, ioaddr);
+ if (ei_debug > 1) printk("resetting the E2180x3 t=%ld...", jiffies);
+ ei_status.txing = 0;
+
+ /* Set up the ASIC registers, just in case something changed them. */
+
+ if (ei_debug > 1) printk("reset done\n");
+ return;
+}
+
+/* Grab the 8390 specific header. We put the 2k window so the header page
+ appears at the start of the shared memory. */
+
+static void
+e21_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+
+ short ioaddr = dev->base_addr;
+ char *shared_mem = (char *)dev->mem_start;
+
+ mem_on(ioaddr, shared_mem, ring_page);
+
+#ifdef notdef
+ /* Officially this is what we are doing, but the readl() is faster */
+ memcpy_fromio(hdr, shared_mem, sizeof(struct e8390_pkt_hdr));
+#else
+ ((unsigned int*)hdr)[0] = readl(shared_mem);
+#endif
+
+ /* Turn off memory access: we would need to reprogram the window anyway. */
+ mem_off(ioaddr);
+
+}
+
+/* Block input and output are easy on shared memory ethercards.
+ The E21xx makes block_input() especially easy by wrapping the top
+ ring buffer to the bottom automatically. */
+static void
+e21_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ short ioaddr = dev->base_addr;
+ char *shared_mem = (char *)dev->mem_start;
+
+ mem_on(ioaddr, shared_mem, (ring_offset>>8));
+
+ /* Packet is always in one chunk -- we can copy + cksum. */
+ eth_io_copy_and_sum(skb, dev->mem_start + (ring_offset & 0xff), count, 0);
+
+ mem_off(ioaddr);
+}
+
+static void
+e21_block_output(struct device *dev, int count, const unsigned char *buf,
+ const int start_page)
+{
+ short ioaddr = dev->base_addr;
+ volatile char *shared_mem = (char *)dev->mem_start;
+
+ /* Set the shared memory window start by doing a read, with the low address
+ bits specifying the starting page. */
+ readb(shared_mem + start_page);
+ mem_on(ioaddr, shared_mem, start_page);
+
+ memcpy_toio(shared_mem, buf, count);
+ mem_off(ioaddr);
+}
+
+static int
+e21_close(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+
+ free_irq(dev->irq, NULL);
+ dev->irq = ei_status.saved_irq;
+
+ /* Shut off the interrupt line and secondary interface. */
+ inb(ioaddr + E21_IRQ_LOW);
+ outb(0, ioaddr + E21_ASIC);
+ inb(ioaddr + E21_IRQ_HIGH); /* High IRQ bit, and if_port. */
+ outb(0, ioaddr + E21_ASIC);
+
+ irq2dev_map[dev->irq] = NULL;
+
+ ei_close(dev);
+
+ /* Double-check that the memory has been turned off, because really
+ really bad things happen if it isn't. */
+ mem_off(ioaddr);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+#ifdef HAVE_DEVLIST
+struct netdev_entry e21_drv =
+{"e21", e21_probe1, E21_IO_EXTENT, e21_probe_list};
+#endif
+
+
+#ifdef MODULE
+#define MAX_E21_CARDS 4 /* Max number of E21 cards per module */
+#define NAMELEN 8 /* # of chars for storing dev->name */
+static char namelist[NAMELEN * MAX_E21_CARDS] = { 0, };
+static struct device dev_e21[MAX_E21_CARDS] = {
+ {
+ NULL, /* assign a chunk of namelist[] below */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, NULL
+ },
+};
+
+static int io[MAX_E21_CARDS] = { 0, };
+static int irq[MAX_E21_CARDS] = { 0, };
+static int mem[MAX_E21_CARDS] = { 0, };
+static int xcvr[MAX_E21_CARDS] = { 0, }; /* choose int. or ext. xcvr */
+
+/* This is set up so that only a single autoprobe takes place per call.
+ISA device autoprobes on a running machine are not recommended. */
+int
+init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_E21_CARDS; this_dev++) {
+ struct device *dev = &dev_e21[this_dev];
+ dev->name = namelist+(NAMELEN*this_dev);
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->mem_start = mem[this_dev];
+ dev->mem_end = xcvr[this_dev]; /* low 4bits = xcvr sel. */
+ dev->init = e2100_probe;
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only autoprobe 1st one */
+ printk(KERN_NOTICE "e2100.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+ if (register_netdev(dev) != 0) {
+ printk(KERN_WARNING "e2100.c: No E2100 card found (i/o = 0x%x).\n", io[this_dev]);
+ if (found != 0) return 0; /* Got at least one. */
+ return -ENXIO;
+ }
+ found++;
+ }
+
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_E21_CARDS; this_dev++) {
+ struct device *dev = &dev_e21[this_dev];
+ if (dev->priv != NULL) {
+ /* NB: e21_close() handles free_irq + irq2dev map */
+ kfree(dev->priv);
+ dev->priv = NULL;
+ release_region(dev->base_addr, E21_IO_EXTENT);
+ unregister_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c e2100.c"
+ * version-control: t
+ * tab-width: 4
+ * kept-new-versions: 5
+ * End:
+ */
diff --git a/linux/src/drivers/net/eepro.c b/linux/src/drivers/net/eepro.c
new file mode 100644
index 0000000..3d4fc57
--- /dev/null
+++ b/linux/src/drivers/net/eepro.c
@@ -0,0 +1,1407 @@
+/* eepro.c: Intel EtherExpress Pro/10 device driver for Linux. */
+/*
+ Written 1994-1998 by Bao C. Ha.
+
+ Copyright (C) 1994-1998 by Bao C. Ha.
+
+ This software may be used and distributed
+ according to the terms of the GNU Public License,
+ incorporated herein by reference.
+
+ The author may be reached at bao@hacom.net
+ or Hacom, 2477 Wrightsboro Rd., Augusta, GA 30904.
+
+ Things remaining to do:
+ Better record keeping of errors.
+ Eliminate transmit interrupt to reduce overhead.
+ Implement "concurrent processing". I won't be doing it!
+
+ Bugs:
+
+ If you have a problem of not detecting the 82595 during a
+ reboot (warm reset), disable the FLASH memory should fix it.
+ This is a compatibility hardware problem.
+
+ Versions:
+
+ 0.10c Some cosmetic changes. (9/28/98, BCH)
+
+ 0.10b Should work now with (some) Pro/10+. At least for
+ me (and my two cards) it does. _No_ guarantee for
+ function with non-Pro/10+ cards! (don't have any)
+ (RMC, 9/11/96)
+
+ 0.10 Added support for the Etherexpress Pro/10+. The
+ IRQ map was changed significantly from the old
+ pro/10. The new interrupt map was provided by
+ Rainer M. Canavan (Canavan@Zeus.cs.bonn.edu).
+ (BCH, 9/3/96)
+
+ 0.09 Fixed a race condition in the transmit algorithm,
+ which causes crashes under heavy load with fast
+ pentium computers. The performance should also
+ improve a bit. The size of RX buffer, and hence
+ TX buffer, can also be changed via lilo or insmod.
+ (BCH, 7/31/96)
+
+ 0.08 Implement 32-bit I/O for the 82595TX and 82595FX
+ based lan cards. Disable full-duplex mode if TPE
+ is not used. (BCH, 4/8/96)
+
+ 0.07a Fix a stat report which counts every packet as a
+ heart-beat failure. (BCH, 6/3/95)
+
+ 0.07 Modified to support all other 82595-based lan cards.
+ The IRQ vector of the EtherExpress Pro will be set
+ according to the value saved in the EEPROM. For other
+ cards, I will do autoirq_request() to grab the next
+ available interrupt vector. (BCH, 3/17/95)
+
+ 0.06a,b Interim released. Minor changes in the comments and
+ print out format. (BCH, 3/9/95 and 3/14/95)
+
+ 0.06 First stable release that I am comfortable with. (BCH,
+ 3/2/95)
+
+ 0.05 Complete testing of multicast. (BCH, 2/23/95)
+
+ 0.04 Adding multicast support. (BCH, 2/14/95)
+
+ 0.03 First widely alpha release for public testing.
+ (BCH, 2/14/95)
+
+*/
+
+static const char *version =
+ "eepro.c: v0.10c 9/28/98 Bao C. Ha (bao@hacom.net)\n";
+
+#include <linux/module.h>
+
+/*
+ Sources:
+
+ This driver wouldn't have been written without the availability
+ of the Crynwr's Lan595 driver source code. It helps me to
+ familiarize with the 82595 chipset while waiting for the Intel
+ documentation. I also learned how to detect the 82595 using
+ the packet driver's technique.
+
+ This driver is written by cutting and pasting the skeleton.c driver
+ provided by Donald Becker. I also borrowed the EEPROM routine from
+ Donald Becker's 82586 driver.
+
+ Datasheet for the Intel 82595 (including the TX and FX version). It
+ provides just enough info that the casual reader might think that it
+ documents the i82595.
+
+ The User Manual for the 82595. It provides a lot of the missing
+ information.
+
+*/
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/malloc.h>
+#include <linux/string.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/errno.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+/* First, a few definitions that the brave might change. */
+
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int eepro_portlist[] =
+ { 0x300, 0x240, 0x280, 0x2C0, 0x200, 0x320, 0x340, 0x360, 0};
+
+/* use 0 for production, 1 for verification, >2 for debug */
+
+#ifndef NET_DEBUG
+#define NET_DEBUG 1
+#endif
+
+static unsigned int net_debug = NET_DEBUG;
+
+/* The number of low I/O ports used by the ethercard. */
+
+#define EEPRO_IO_EXTENT 16
+
+/* Different 82595 chips */
+
+#define LAN595 0
+#define LAN595TX 1
+#define LAN595FX 2
+
+/* Information that need to be kept for each board. */
+struct eepro_local {
+ struct enet_statistics stats;
+ unsigned rx_start;
+ unsigned tx_start; /* start of the transmit chain */
+ int tx_last; /* pointer to last packet in the transmit chain */
+ unsigned tx_end; /* end of the transmit chain (plus 1) */
+ int eepro; /* 1 for the EtherExpress Pro/10,
+ 2 for the EtherExpress Pro/10+,
+ 0 for other 82595-based lan cards. */
+ int version; /* a flag to indicate if this is a TX or FX
+ version of the 82595 chip. */
+ int stepping;
+};
+
+/* The station (ethernet) address prefix, used for IDing the board. */
+
+#define SA_ADDR0 0x00 /* Etherexpress Pro/10 */
+#define SA_ADDR1 0xaa
+#define SA_ADDR2 0x00
+
+#define SA2_ADDR0 0x00 /* Etherexpress Pro/10+ */
+#define SA2_ADDR1 0xa0
+#define SA2_ADDR2 0xc9
+
+#define SA3_ADDR0 0x00 /* more Etherexpress Pro/10+ */
+#define SA3_ADDR1 0xaa
+#define SA3_ADDR2 0x00
+#define SA3_ADDR3 0xc9
+
+/* Index to functions, as function prototypes. */
+
+extern int eepro_probe(struct device *dev);
+
+static int eepro_probe1(struct device *dev, short ioaddr);
+static int eepro_open(struct device *dev);
+static int eepro_send_packet(struct sk_buff *skb, struct device *dev);
+static void eepro_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void eepro_rx(struct device *dev);
+static void eepro_transmit_interrupt(struct device *dev);
+static int eepro_close(struct device *dev);
+static struct enet_statistics *eepro_get_stats(struct device *dev);
+static void set_multicast_list(struct device *dev);
+
+static int read_eeprom(int ioaddr, int location);
+static void hardware_send_packet(struct device *dev, void *buf, short length);
+static int eepro_grab_irq(struct device *dev);
+
+/*
+ Details of the i82595.
+
+You will need either the datasheet or the user manual to understand what
+is going on here. The 82595 is very different from the 82586, 82593.
+
+The receive algorithm in eepro_rx() is just an implementation of the
+RCV ring structure that the Intel 82595 imposes at the hardware level.
+The receive buffer is set at 24K, and the transmit buffer is 8K. I
+am assuming that the total buffer memory is 32K, which is true for the
+Intel EtherExpress Pro/10. If it is less than that on a generic card,
+the driver will be broken.
+
+The transmit algorithm in the hardware_send_packet() is similar to the
+one in the eepro_rx(). The transmit buffer is a ring linked list.
+I just queue the next available packet to the end of the list. In my
+system, the 82595 is so fast that the list seems to always contain a
+single packet. In other systems with faster computers and more congested
+network traffics, the ring linked list should improve performance by
+allowing up to 8K worth of packets to be queued.
+
+The sizes of the receive and transmit buffers can now be changed via lilo
+or insmod. Lilo uses the appended line "ether=io,irq,debug,rx-buffer,eth0"
+where rx-buffer is in KB unit. Modules uses the parameter mem which is
+also in KB unit, for example "insmod io=io-address irq=0 mem=rx-buffer."
+The receive buffer has to be more than 3K or less than 29K. Otherwise,
+it is reset to the default of 24K, and, hence, 8K for the trasnmit
+buffer (transmit-buffer = 32K - receive-buffer).
+
+*/
+
+#define RAM_SIZE 0x8000
+#define RCV_HEADER 8
+#define RCV_RAM 0x6000 /* 24KB default for RCV buffer */
+#define RCV_LOWER_LIMIT 0x00 /* 0x0000 */
+
+/* #define RCV_UPPER_LIMIT ((RCV_RAM - 2) >> 8) */ /* 0x5ffe */
+#define RCV_UPPER_LIMIT (((rcv_ram) - 2) >> 8)
+
+/* #define XMT_RAM (RAM_SIZE - RCV_RAM) */ /* 8KB for XMT buffer */
+#define XMT_RAM (RAM_SIZE - (rcv_ram)) /* 8KB for XMT buffer */
+
+/* #define XMT_LOWER_LIMIT (RCV_RAM >> 8) */ /* 0x6000 */
+#define XMT_LOWER_LIMIT ((rcv_ram) >> 8)
+#define XMT_UPPER_LIMIT ((RAM_SIZE - 2) >> 8) /* 0x7ffe */
+#define XMT_HEADER 8
+
+#define RCV_DONE 0x0008
+#define RX_OK 0x2000
+#define RX_ERROR 0x0d81
+
+#define TX_DONE_BIT 0x0080
+#define CHAIN_BIT 0x8000
+#define XMT_STATUS 0x02
+#define XMT_CHAIN 0x04
+#define XMT_COUNT 0x06
+
+#define BANK0_SELECT 0x00
+#define BANK1_SELECT 0x40
+#define BANK2_SELECT 0x80
+
+/* Bank 0 registers */
+
+#define COMMAND_REG 0x00 /* Register 0 */
+#define MC_SETUP 0x03
+#define XMT_CMD 0x04
+#define DIAGNOSE_CMD 0x07
+#define RCV_ENABLE_CMD 0x08
+#define RCV_DISABLE_CMD 0x0a
+#define STOP_RCV_CMD 0x0b
+#define RESET_CMD 0x0e
+#define POWER_DOWN_CMD 0x18
+#define RESUME_XMT_CMD 0x1c
+#define SEL_RESET_CMD 0x1e
+#define STATUS_REG 0x01 /* Register 1 */
+#define RX_INT 0x02
+#define TX_INT 0x04
+#define EXEC_STATUS 0x30
+#define ID_REG 0x02 /* Register 2 */
+#define R_ROBIN_BITS 0xc0 /* round robin counter */
+#define ID_REG_MASK 0x2c
+#define ID_REG_SIG 0x24
+#define AUTO_ENABLE 0x10
+#define INT_MASK_REG 0x03 /* Register 3 */
+#define RX_STOP_MASK 0x01
+#define RX_MASK 0x02
+#define TX_MASK 0x04
+#define EXEC_MASK 0x08
+#define ALL_MASK 0x0f
+#define IO_32_BIT 0x10
+#define RCV_BAR 0x04 /* The following are word (16-bit) registers */
+#define RCV_STOP 0x06
+#define XMT_BAR 0x0a
+#define HOST_ADDRESS_REG 0x0c
+#define IO_PORT 0x0e
+#define IO_PORT_32_BIT 0x0c
+
+/* Bank 1 registers */
+
+#define REG1 0x01
+#define WORD_WIDTH 0x02
+#define INT_ENABLE 0x80
+#define INT_NO_REG 0x02
+#define RCV_LOWER_LIMIT_REG 0x08
+#define RCV_UPPER_LIMIT_REG 0x09
+#define XMT_LOWER_LIMIT_REG 0x0a
+#define XMT_UPPER_LIMIT_REG 0x0b
+
+/* Bank 2 registers */
+
+#define XMT_Chain_Int 0x20 /* Interrupt at the end of the transmit chain */
+#define XMT_Chain_ErrStop 0x40 /* Interrupt at the end of the chain even if there are errors */
+#define RCV_Discard_BadFrame 0x80 /* Throw bad frames away, and continue to receive others */
+#define REG2 0x02
+#define PRMSC_Mode 0x01
+#define Multi_IA 0x20
+#define REG3 0x03
+#define TPE_BIT 0x04
+#define BNC_BIT 0x20
+#define REG13 0x0d
+#define FDX 0x00
+#define A_N_ENABLE 0x02
+
+#define I_ADD_REG0 0x04
+#define I_ADD_REG1 0x05
+#define I_ADD_REG2 0x06
+#define I_ADD_REG3 0x07
+#define I_ADD_REG4 0x08
+#define I_ADD_REG5 0x09
+
+#define EEPROM_REG 0x0a
+#define EESK 0x01
+#define EECS 0x02
+#define EEDI 0x04
+#define EEDO 0x08
+
+/* Check for a network adaptor of this type, and return '0' if one exists.
+
+ If dev->base_addr == 0, probe all likely locations.
+ If dev->base_addr == 1, always return failure.
+ If dev->base_addr == 2, allocate space for the device and return success
+ (detachable devices only).
+
+ */
+
+#ifdef HAVE_DEVLIST
+
+/* Support for a alternate probe manager, which will eliminate the
+ boilerplate below. */
+
+struct netdev_entry netcard_drv =
+{"eepro", eepro_probe1, EEPRO_IO_EXTENT, eepro_portlist};
+
+#else
+
+int
+eepro_probe(struct device *dev)
+{
+ int i;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return eepro_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (i = 0; eepro_portlist[i]; i++) {
+ int ioaddr = eepro_portlist[i];
+ if (check_region(ioaddr, EEPRO_IO_EXTENT))
+ continue;
+
+ if (eepro_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif
+
+/* This is the real probe routine. Linux has a history of friendly device
+ probes on the ISA bus. A good device probes avoids doing writes, and
+ verifies that the correct device exists and functions. */
+
+int
+eepro_probe1(struct device *dev, short ioaddr)
+{
+ unsigned short station_addr[6], id, counter;
+ int i;
+ int eepro;
+ const char *ifmap[] = {"AUI", "10Base2", "10BaseT"};
+ enum iftype { AUI=0, BNC=1, TPE=2 };
+
+ /* Now, we are going to check for the signature of the
+ ID_REG (register 2 of bank 0) */
+ if (((id=inb(ioaddr + ID_REG)) & ID_REG_MASK) == ID_REG_SIG) {
+
+ /* We seem to have the 82595 signature, let's
+ play with its counter (last 2 bits of
+ register 2 of bank 0) to be sure. */
+
+ counter = (id & R_ROBIN_BITS);
+ if (((id=inb(ioaddr+ID_REG)) & R_ROBIN_BITS) ==
+ (counter + 0x40)) {
+
+ /* Yes, the 82595 has been found */
+
+ /* Now, get the ethernet hardware address from
+ the EEPROM */
+
+ station_addr[0] = read_eeprom(ioaddr, 2);
+ station_addr[1] = read_eeprom(ioaddr, 3);
+ station_addr[2] = read_eeprom(ioaddr, 4);
+
+ /* Check the station address for the manufacturer's code */
+
+ if ((station_addr[2] == 0x00aa) && (station_addr[1]!= 0x00c9)) {
+ eepro = 1;
+ printk("%s: Intel EtherExpress Pro/10 ISA at %#x,",
+ dev->name, ioaddr);
+ } else
+ if ( (station_addr[2] == 0x00a0)
+ || ((station_addr[2] == 0x00aa) && (station_addr[1] == 0x00c9) )) {
+ eepro = 2;
+ printk("%s: Intel EtherExpress Pro/10+ ISA\n at %#x,",
+ dev->name, ioaddr);
+ }
+ else {
+ eepro = 0;
+ printk("%s: Intel 82595-based lan card at %#x,",
+ dev->name, ioaddr);
+ }
+
+ /* Fill in the 'dev' fields. */
+ dev->base_addr = ioaddr;
+
+ for (i=0; i < 6; i++) {
+ dev->dev_addr[i] = ((unsigned char *) station_addr)[5-i];
+ printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
+ }
+
+ if ((dev->mem_end & 0x3f) < 3 || /* RX buffer must be more than 3K */
+ (dev->mem_end & 0x3f) > 29) /* and less than 29K */
+ dev->mem_end = RCV_RAM; /* or it will be set to 24K */
+ else dev->mem_end = 1024*dev->mem_end; /* Maybe I should shift << 10 */
+
+ /* From now on, dev->mem_end contains the actual size of rx buffer */
+
+ if (net_debug > 3)
+ printk(", %dK RCV buffer", (int)(dev->mem_end)/1024);
+
+ outb(BANK2_SELECT, ioaddr); /* be CAREFUL, BANK 2 now */
+ id = inb(ioaddr + REG3);
+ if (id & TPE_BIT)
+ dev->if_port = TPE;
+ else dev->if_port = BNC;
+
+ if (net_debug>3)
+ printk("id: %x\n", id);
+
+ if (dev->irq < 2 && eepro) {
+ i = read_eeprom(ioaddr, 1);
+ if (eepro == 1)
+ switch (i & 0x07) {
+ case 0: dev->irq = 9; break;
+ case 1: dev->irq = 3; break;
+ case 2: dev->irq = 5; break;
+ case 3: dev->irq = 10; break;
+ case 4: dev->irq = 11; break;
+ default: /* should never get here !!!!! */
+ printk(" illegal interrupt vector stored in EEPROM.\n");
+ return ENODEV;
+ }
+ else switch (i & 0x07) {
+ case 0: dev->irq = 3; break;
+ case 1: dev->irq = 4; break;
+ case 2: dev->irq = 5; break;
+ case 3: dev->irq = 7; break;
+ case 4: dev->irq = 9; break;
+ case 5: dev->irq = 10; break;
+ case 6: dev->irq = 11; break;
+ case 7: dev->irq = 12; break;
+ }
+ }
+ else if (dev->irq == 2)
+ dev->irq = 9;
+
+ if (dev->irq > 2) {
+ printk(", IRQ %d, %s.\n", dev->irq,
+ ifmap[dev->if_port]);
+ if (request_irq(dev->irq, &eepro_interrupt, 0, "eepro", NULL)) {
+ printk("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+ }
+ else printk(", %s.\n", ifmap[dev->if_port]);
+
+ if ((dev->mem_start & 0xf) > 0) /* I don't know if this is */
+ net_debug = dev->mem_start & 7; /* still useful or not */
+
+ if (net_debug > 3) {
+ i = read_eeprom(ioaddr, 5);
+ if (i & 0x2000) /* bit 13 of EEPROM word 5 */
+ printk("%s: Concurrent Processing is enabled but not used!\n",
+ dev->name);
+ }
+
+ if (net_debug)
+ printk("%s", version);
+
+ /* Grab the region so we can find another board if autoIRQ fails. */
+ request_region(ioaddr, EEPRO_IO_EXTENT, "eepro");
+
+ /* Initialize the device structure */
+ dev->priv = kmalloc(sizeof(struct eepro_local), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct eepro_local));
+
+ dev->open = eepro_open;
+ dev->stop = eepro_close;
+ dev->hard_start_xmit = eepro_send_packet;
+ dev->get_stats = eepro_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ /* Fill in the fields of the device structure with
+ ethernet generic values */
+
+ ether_setup(dev);
+
+ outb(RESET_CMD, ioaddr); /* RESET the 82595 */
+
+ return 0;
+ }
+ else return ENODEV;
+ }
+ else if (net_debug > 3)
+ printk ("EtherExpress Pro probed failed!\n");
+ return ENODEV;
+}
+
+/* Open/initialize the board. This is called (in the current kernel)
+ sometime after booting when the 'ifconfig' program is run.
+
+ This routine should set everything up anew at each open, even
+ registers that "should" only need to be set once at boot, so that
+ there is non-reboot way to recover if something goes wrong.
+ */
+
+static char irqrmap[] = {-1,-1,0,1,-1,2,-1,-1,-1,0,3,4,-1,-1,-1,-1};
+static char irqrmap2[] = {-1,-1,4,0,1,2,-1,3,-1,4,5,6,7,-1,-1,-1};
+
+static int
+eepro_grab_irq(struct device *dev)
+{
+ int irqlist[] = { 3, 4, 5, 7, 9, 10, 11, 12 };
+ int *irqp = irqlist, temp_reg, ioaddr = dev->base_addr;
+
+ outb(BANK1_SELECT, ioaddr); /* be CAREFUL, BANK 1 now */
+
+ /* Enable the interrupt line. */
+ temp_reg = inb(ioaddr + REG1);
+ outb(temp_reg | INT_ENABLE, ioaddr + REG1);
+
+ outb(BANK0_SELECT, ioaddr); /* be CAREFUL, BANK 0 now */
+
+ /* clear all interrupts */
+ outb(ALL_MASK, ioaddr + STATUS_REG);
+
+ /* Let EXEC event to interrupt */
+ outb(ALL_MASK & ~(EXEC_MASK), ioaddr + INT_MASK_REG);
+
+ do {
+ outb(BANK1_SELECT, ioaddr); /* be CAREFUL, BANK 1 now */
+ temp_reg = inb(ioaddr + INT_NO_REG);
+ outb((temp_reg & 0xf8) | irqrmap[*irqp], ioaddr + INT_NO_REG);
+ outb(BANK0_SELECT, ioaddr); /* Switch back to Bank 0 */
+ if (request_irq (*irqp, NULL, 0, "bogus", NULL) != EBUSY) {
+ /* Twinkle the interrupt, and check if it's seen */
+ autoirq_setup(0);
+ outb(DIAGNOSE_CMD, ioaddr); /* RESET the 82595 */
+
+ if (*irqp == autoirq_report(2) && /* It's a good IRQ line */
+ (request_irq(dev->irq = *irqp, &eepro_interrupt, 0, "eepro", NULL) == 0))
+ break;
+ /* clear all interrupts */
+ outb(ALL_MASK, ioaddr + STATUS_REG);
+ }
+ } while (*++irqp);
+
+ outb(BANK1_SELECT, ioaddr); /* Switch back to Bank 1 */
+
+ /* Disable the physical interrupt line. */
+ temp_reg = inb(ioaddr + REG1);
+ outb(temp_reg & 0x7f, ioaddr + REG1);
+ outb(BANK0_SELECT, ioaddr); /* Switch back to Bank 0 */
+
+ /* Mask all the interrupts. */
+ outb(ALL_MASK, ioaddr + INT_MASK_REG);
+
+ /* clear all interrupts */
+ outb(ALL_MASK, ioaddr + STATUS_REG);
+
+ return dev->irq;
+}
+
+static int
+eepro_open(struct device *dev)
+{
+ unsigned short temp_reg, old8, old9;
+ int i, ioaddr = dev->base_addr, rcv_ram = dev->mem_end;
+ struct eepro_local *lp = (struct eepro_local *)dev->priv;
+
+ if (net_debug > 3)
+ printk("eepro: entering eepro_open routine.\n");
+
+ if ((dev->dev_addr[0] == SA_ADDR0 &&
+ dev->dev_addr[1] == SA_ADDR1 &&
+ dev->dev_addr[2] == SA_ADDR2)&&
+ (dev->dev_addr[3] != SA3_ADDR3))
+ {
+ lp->eepro = 1;
+ if (net_debug > 3) printk("p->eepro = 1;\n");
+ } /* Yes, an Intel EtherExpress Pro/10 */
+
+ else if ((dev->dev_addr[0] == SA2_ADDR0 &&
+ dev->dev_addr[1] == SA2_ADDR1 &&
+ dev->dev_addr[2] == SA2_ADDR2)||
+ (dev->dev_addr[0] == SA3_ADDR0 &&
+ dev->dev_addr[1] == SA3_ADDR1 &&
+ dev->dev_addr[2] == SA3_ADDR2 &&
+ dev->dev_addr[3] == SA3_ADDR3))
+ {
+ lp->eepro = 2; /* Yes, an Intel EtherExpress Pro/10+ */
+ if (net_debug > 3) printk("p->eepro = 2;\n");
+ }
+
+ else lp->eepro = 0; /* No, it is a generic 82585 lan card */
+
+ /* Get the interrupt vector for the 82595 */
+ if (dev->irq < 2 && eepro_grab_irq(dev) == 0) {
+ printk("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+
+ if (irq2dev_map[dev->irq] != 0
+ || (irq2dev_map[dev->irq] = dev) == 0)
+ return -EAGAIN;
+
+ /* Initialize the 82595. */
+ outb(BANK2_SELECT, ioaddr); /* be CAREFUL, BANK 2 now */
+ temp_reg = inb(ioaddr + EEPROM_REG);
+ lp->stepping = temp_reg >> 5; /* Get the stepping number of the 595 */
+
+ if (net_debug > 3)
+ printk("The stepping of the 82595 is %d\n", lp->stepping);
+ if (temp_reg & 0x10) /* Check the TurnOff Enable bit */
+ outb(temp_reg & 0xef, ioaddr + EEPROM_REG);
+ for (i=0; i < 6; i++)
+ outb(dev->dev_addr[i] , ioaddr + I_ADD_REG0 + i);
+
+ temp_reg = inb(ioaddr + REG1); /* Setup Transmit Chaining */
+ outb(temp_reg | XMT_Chain_Int | XMT_Chain_ErrStop /* and discard bad RCV frames */
+ | RCV_Discard_BadFrame, ioaddr + REG1);
+ temp_reg = inb(ioaddr + REG2); /* Match broadcast */
+ outb(temp_reg | 0x14, ioaddr + REG2);
+ temp_reg = inb(ioaddr + REG3);
+ outb(temp_reg & 0x3f, ioaddr + REG3); /* clear test mode */
+
+ /* Set the receiving mode */
+ outb(BANK1_SELECT, ioaddr); /* be CAREFUL, BANK 1 now */
+
+ /* Set the interrupt vector */
+ temp_reg = inb(ioaddr + INT_NO_REG);
+
+ if (lp->eepro == 2)
+ outb((temp_reg & 0xf8) | irqrmap2[dev->irq], ioaddr + INT_NO_REG);
+ else outb((temp_reg & 0xf8) | irqrmap[dev->irq], ioaddr + INT_NO_REG);
+
+ temp_reg = inb(ioaddr + INT_NO_REG);
+
+ if (lp->eepro == 2)
+ outb((temp_reg & 0xf0) | irqrmap2[dev->irq] | 0x08,ioaddr+INT_NO_REG);
+ else outb((temp_reg & 0xf8) | irqrmap[dev->irq], ioaddr + INT_NO_REG);
+
+ if (net_debug > 3)
+ printk("eepro_open: content of INT Reg is %x\n", temp_reg);
+
+
+ /* Initialize the RCV and XMT upper and lower limits */
+ outb(RCV_LOWER_LIMIT, ioaddr + RCV_LOWER_LIMIT_REG);
+ outb(RCV_UPPER_LIMIT, ioaddr + RCV_UPPER_LIMIT_REG);
+ outb(XMT_LOWER_LIMIT, ioaddr + XMT_LOWER_LIMIT_REG);
+ outb(XMT_UPPER_LIMIT, ioaddr + XMT_UPPER_LIMIT_REG);
+
+ /* Enable the interrupt line. */
+ temp_reg = inb(ioaddr + REG1);
+ outb(temp_reg | INT_ENABLE, ioaddr + REG1);
+ outb(BANK0_SELECT, ioaddr); /* Switch back to Bank 0 */
+
+ /* Let RX and TX events to interrupt */
+ outb(ALL_MASK & ~(RX_MASK | TX_MASK), ioaddr + INT_MASK_REG);
+
+ /* clear all interrupts */
+ outb(ALL_MASK, ioaddr + STATUS_REG);
+
+ /* Initialize RCV */
+ outw(RCV_LOWER_LIMIT << 8, ioaddr + RCV_BAR);
+ lp->rx_start = (RCV_LOWER_LIMIT << 8) ;
+ outw((RCV_UPPER_LIMIT << 8) | 0xfe, ioaddr + RCV_STOP);
+
+ /* Initialize XMT */
+ outw(XMT_LOWER_LIMIT << 8, ioaddr + XMT_BAR);
+
+ /* Check for the i82595TX and i82595FX */
+ old8 = inb(ioaddr + 8);
+ outb(~old8, ioaddr + 8);
+
+ if ((temp_reg = inb(ioaddr + 8)) == old8) {
+ if (net_debug > 3)
+ printk("i82595 detected!\n");
+ lp->version = LAN595;
+ }
+ else {
+ lp->version = LAN595TX;
+ outb(old8, ioaddr + 8);
+ old9 = inb(ioaddr + 9);
+ outb(~old9, ioaddr + 9);
+
+ if (((temp_reg = inb(ioaddr + 9)) == ( (~old9)&0xff) )) {
+ enum iftype { AUI=0, BNC=1, TPE=2 };
+
+ if (net_debug > 3) {
+ printk("temp_reg: %#x ~old9: %#x\n",temp_reg, ~old9);
+ printk("i82595FX detected!\n");
+ }
+
+ lp->version = LAN595FX;
+ outb(old9, ioaddr + 9);
+
+ if (dev->if_port != TPE) { /* Hopefully, this will fix the
+ problem of using Pentiums and
+ pro/10 w/ BNC. */
+ outb(BANK2_SELECT, ioaddr); /* be CAREFUL, BANK 2 now */
+ temp_reg = inb(ioaddr + REG13);
+
+ /* disable the full duplex mode since it is not
+ applicable with the 10Base2 cable. */
+ outb(temp_reg & ~(FDX | A_N_ENABLE), REG13);
+ outb(BANK0_SELECT, ioaddr); /* be CAREFUL, BANK 0 now */
+ }
+ }
+ else if (net_debug > 3) {
+ printk("temp_reg: %#x ~old9: %#x\n",temp_reg,((~old9)&0xff));
+ printk("i82595TX detected!\n");
+ }
+ }
+
+ outb(SEL_RESET_CMD, ioaddr);
+
+ /* We are supposed to wait for 2 us after a SEL_RESET */
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+
+ lp->tx_start = lp->tx_end = XMT_LOWER_LIMIT << 8; /* or = RCV_RAM */
+ lp->tx_last = 0;
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+ if (net_debug > 3)
+ printk("eepro: exiting eepro_open routine.\n");
+
+ outb(RCV_ENABLE_CMD, ioaddr);
+ MOD_INC_USE_COUNT;
+
+ return 0;
+}
+
+static int
+eepro_send_packet(struct sk_buff *skb, struct device *dev)
+{
+ struct eepro_local *lp = (struct eepro_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int rcv_ram = dev->mem_end;
+
+ if (net_debug > 5)
+ printk("eepro: entering eepro_send_packet routine.\n");
+
+ if (dev->tbusy) {
+ /* If we get here, some higher level has decided we are broken.
+ There should really be a "kick me" function call instead. */
+
+ int tickssofar = jiffies - dev->trans_start;
+
+ if (tickssofar < 40)
+ return 1;
+
+ if (net_debug > 1)
+ printk("%s: transmit timed out, %s?\n", dev->name,
+ "network cable problem");
+
+ lp->stats.tx_errors++;
+
+ /* Try to restart the adaptor. */
+ outb(SEL_RESET_CMD, ioaddr);
+
+ /* We are supposed to wait for 2 us after a SEL_RESET */
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+
+ /* Do I also need to flush the transmit buffers here? YES? */
+ lp->tx_start = lp->tx_end = rcv_ram;
+ lp->tx_last = 0;
+
+ dev->tbusy=0;
+ dev->trans_start = jiffies;
+ outb(RCV_ENABLE_CMD, ioaddr);
+ }
+
+ /* If some higher layer thinks we've missed an tx-done interrupt
+ we are passed NULL. Caution: dev_tint() handles the cli()/sti()
+ itself. */
+
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ /* Block a timer-based transmit from overlapping. */
+
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ else {
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned char *buf = skb->data;
+ hardware_send_packet(dev, buf, length);
+ dev->trans_start = jiffies;
+ }
+
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ /* You might need to clean up and record Tx statistics here. */
+ /* lp->stats.tx_aborted_errors++; */
+
+ if (net_debug > 5)
+ printk("eepro: exiting eepro_send_packet routine.\n");
+
+ return 0;
+}
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+
+static void
+eepro_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ int ioaddr, status, boguscount = 20;
+
+ if (net_debug > 5)
+ printk("eepro: entering eepro_interrupt routine.\n");
+
+ if (dev == NULL) {
+ printk ("eepro_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ dev->interrupt = 1;
+
+ ioaddr = dev->base_addr;
+
+ do {
+ status = inb(ioaddr + STATUS_REG);
+
+ if (status & RX_INT) {
+ if (net_debug > 4)
+ printk("eepro: packet received interrupt.\n");
+ /* Acknowledge the RX_INT */
+ outb(RX_INT, ioaddr + STATUS_REG);
+ /* Get the received packets */
+ eepro_rx(dev);
+ }
+ else if (status & TX_INT) {
+ if (net_debug > 4)
+ printk("eepro: packet transmit interrupt.\n");
+ /* Acknowledge the TX_INT */
+ outb(TX_INT, ioaddr + STATUS_REG);
+ /* Process the status of transmitted packets */
+ eepro_transmit_interrupt(dev);
+ }
+
+ } while ((boguscount-- > 0) && (status & 0x06));
+
+ dev->interrupt = 0;
+
+ if (net_debug > 5)
+ printk("eepro: exiting eepro_interrupt routine.\n");
+
+ return;
+}
+
+static int
+eepro_close(struct device *dev)
+{
+ struct eepro_local *lp = (struct eepro_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int rcv_ram = dev->mem_end;
+ short temp_reg;
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ outb(BANK1_SELECT, ioaddr); /* Switch back to Bank 1 */
+
+ /* Disable the physical interrupt line. */
+ temp_reg = inb(ioaddr + REG1);
+ outb(temp_reg & 0x7f, ioaddr + REG1);
+ outb(BANK0_SELECT, ioaddr); /* Switch back to Bank 0 */
+
+ /* Flush the Tx and disable Rx. */
+ outb(STOP_RCV_CMD, ioaddr);
+
+ lp->tx_start = lp->tx_end = rcv_ram ;
+ lp->tx_last = 0;
+
+ /* Mask all the interrupts. */
+ outb(ALL_MASK, ioaddr + INT_MASK_REG);
+
+ /* clear all interrupts */
+ outb(ALL_MASK, ioaddr + STATUS_REG);
+
+ /* Reset the 82595 */
+ outb(RESET_CMD, ioaddr);
+
+ /* release the interrupt */
+ free_irq(dev->irq, NULL);
+ irq2dev_map[dev->irq] = 0;
+
+ /* Update the statistics here. What statistics? */
+ /* We are supposed to wait for 200 us after a RESET */
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO; /* May not be enough? */
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+/* Get the current statistics. This may be called with the card open or
+ closed. */
+static struct enet_statistics *
+eepro_get_stats(struct device *dev)
+{
+ struct eepro_local *lp = (struct eepro_local *)dev->priv;
+ return &lp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ */
+
+static void
+set_multicast_list(struct device *dev)
+{
+ struct eepro_local *lp = (struct eepro_local *)dev->priv;
+ short ioaddr = dev->base_addr;
+ unsigned short mode;
+ struct dev_mc_list *dmi=dev->mc_list;
+
+ if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || dev->mc_count > 63)
+ {
+ /*
+ * We must make the kernel realise we had to move
+ * into promisc mode or we start all out war on
+ * the cable. If it was a promisc request the
+ * flag is already set. If not we assert it.
+ */
+ dev->flags|=IFF_PROMISC;
+ outb(BANK2_SELECT, ioaddr); /* be CAREFUL, BANK 2 now */
+ mode = inb(ioaddr + REG2);
+ outb(mode | PRMSC_Mode, ioaddr + REG2);
+ mode = inb(ioaddr + REG3);
+ outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */
+ outb(BANK0_SELECT, ioaddr); /* Return to BANK 0 now */
+ printk("%s: promiscuous mode enabled.\n", dev->name);
+ }
+
+ else if (dev->mc_count==0 )
+ {
+ outb(BANK2_SELECT, ioaddr); /* be CAREFUL, BANK 2 now */
+ mode = inb(ioaddr + REG2);
+ outb(mode & 0xd6, ioaddr + REG2); /* Turn off Multi-IA and PRMSC_Mode bits */
+ mode = inb(ioaddr + REG3);
+ outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */
+ outb(BANK0_SELECT, ioaddr); /* Return to BANK 0 now */
+ }
+
+ else
+ {
+ unsigned short status, *eaddrs;
+ int i, boguscount = 0;
+
+ /* Disable RX and TX interrupts. Necessary to avoid
+ corruption of the HOST_ADDRESS_REG by interrupt
+ service routines. */
+ outb(ALL_MASK, ioaddr + INT_MASK_REG);
+ outb(BANK2_SELECT, ioaddr); /* be CAREFUL, BANK 2 now */
+ mode = inb(ioaddr + REG2);
+ outb(mode | Multi_IA, ioaddr + REG2);
+ mode = inb(ioaddr + REG3);
+ outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */
+ outb(BANK0_SELECT, ioaddr); /* Return to BANK 0 now */
+ outw(lp->tx_end, ioaddr + HOST_ADDRESS_REG);
+ outw(MC_SETUP, ioaddr + IO_PORT);
+ outw(0, ioaddr + IO_PORT);
+ outw(0, ioaddr + IO_PORT);
+ outw(6*(dev->mc_count + 1), ioaddr + IO_PORT);
+
+ for (i = 0; i < dev->mc_count; i++)
+ {
+ eaddrs=(unsigned short *)dmi->dmi_addr;
+ dmi=dmi->next;
+ outw(*eaddrs++, ioaddr + IO_PORT);
+ outw(*eaddrs++, ioaddr + IO_PORT);
+ outw(*eaddrs++, ioaddr + IO_PORT);
+ }
+
+ eaddrs = (unsigned short *) dev->dev_addr;
+ outw(eaddrs[0], ioaddr + IO_PORT);
+ outw(eaddrs[1], ioaddr + IO_PORT);
+ outw(eaddrs[2], ioaddr + IO_PORT);
+ outw(lp->tx_end, ioaddr + XMT_BAR);
+ outb(MC_SETUP, ioaddr);
+
+ /* Update the transmit queue */
+ i = lp->tx_end + XMT_HEADER + 6*(dev->mc_count + 1);
+
+ if (lp->tx_start != lp->tx_end)
+ {
+ /* update the next address and the chain bit in the
+ last packet */
+ outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG);
+ outw(i, ioaddr + IO_PORT);
+ outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG);
+ status = inw(ioaddr + IO_PORT);
+ outw(status | CHAIN_BIT, ioaddr + IO_PORT);
+ lp->tx_end = i ;
+ }
+ else {
+ lp->tx_start = lp->tx_end = i ;
+ }
+
+ /* Acknowledge that the MC setup is done */
+ do { /* We should be doing this in the eepro_interrupt()! */
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+
+ if (inb(ioaddr + STATUS_REG) & 0x08)
+ {
+ i = inb(ioaddr);
+ outb(0x08, ioaddr + STATUS_REG);
+
+ if (i & 0x20) { /* command ABORTed */
+ printk("%s: multicast setup failed.\n",
+ dev->name);
+ break;
+ } else if ((i & 0x0f) == 0x03) { /* MC-Done */
+ printk("%s: set Rx mode to %d addresses.\n",
+ dev->name, dev->mc_count);
+ break;
+ }
+ }
+ } while (++boguscount < 100);
+
+ /* Re-enable RX and TX interrupts */
+ outb(ALL_MASK & ~(RX_MASK | TX_MASK), ioaddr + INT_MASK_REG);
+
+ }
+ outb(RCV_ENABLE_CMD, ioaddr);
+}
+
+/* The horrible routine to read a word from the serial EEPROM. */
+/* IMPORTANT - the 82595 will be set to Bank 0 after the eeprom is read */
+/* The delay between EEPROM clock transitions. */
+
+#define eeprom_delay() { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; }}
+#define EE_READ_CMD (6 << 6)
+
+int
+read_eeprom(int ioaddr, int location)
+{
+ int i;
+ unsigned short retval = 0;
+ short ee_addr = ioaddr + EEPROM_REG;
+ int read_cmd = location | EE_READ_CMD;
+ short ctrl_val = EECS ;
+
+ outb(BANK2_SELECT, ioaddr);
+ outb(ctrl_val, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 8; i >= 0; i--) {
+ short outval = (read_cmd & (1 << i)) ? ctrl_val | EEDI
+ : ctrl_val;
+ outb(outval, ee_addr);
+ outb(outval | EESK, ee_addr); /* EEPROM clock tick. */
+ eeprom_delay();
+ outb(outval, ee_addr); /* Finish EEPROM a clock tick. */
+ eeprom_delay();
+ }
+ outb(ctrl_val, ee_addr);
+
+ for (i = 16; i > 0; i--) {
+ outb(ctrl_val | EESK, ee_addr); eeprom_delay();
+ retval = (retval << 1) | ((inb(ee_addr) & EEDO) ? 1 : 0);
+ outb(ctrl_val, ee_addr); eeprom_delay();
+ }
+ /* Terminate the EEPROM access. */
+ ctrl_val &= ~EECS;
+ outb(ctrl_val | EESK, ee_addr);
+ eeprom_delay();
+ outb(ctrl_val, ee_addr);
+ eeprom_delay();
+ outb(BANK0_SELECT, ioaddr);
+ return retval;
+}
+
+static void
+hardware_send_packet(struct device *dev, void *buf, short length)
+{
+ struct eepro_local *lp = (struct eepro_local *)dev->priv;
+ short ioaddr = dev->base_addr;
+ int rcv_ram = dev->mem_end;
+ unsigned status, tx_available, last, end, boguscount = 100;
+
+ if (net_debug > 5)
+ printk("eepro: entering hardware_send_packet routine.\n");
+
+ while (boguscount-- > 0) {
+
+ /* Disable RX and TX interrupts. Necessary to avoid
+ corruption of the HOST_ADDRESS_REG by interrupt
+ service routines. */
+ outb(ALL_MASK, ioaddr + INT_MASK_REG);
+
+ if (dev->interrupt == 1) {
+ /* Enable RX and TX interrupts */
+ outb(ALL_MASK & ~(RX_MASK | TX_MASK), ioaddr + INT_MASK_REG);
+ continue;
+ }
+
+ /* determine how much of the transmit buffer space is available */
+ if (lp->tx_end > lp->tx_start)
+ tx_available = XMT_RAM - (lp->tx_end - lp->tx_start);
+ else if (lp->tx_end < lp->tx_start)
+ tx_available = lp->tx_start - lp->tx_end;
+ else tx_available = XMT_RAM;
+
+ if (((((length + 3) >> 1) << 1) + 2*XMT_HEADER)
+ >= tx_available) /* No space available ??? */
+ {
+ eepro_transmit_interrupt(dev); /* Clean up the transmiting queue */
+ /* Enable RX and TX interrupts */
+ outb(ALL_MASK & ~(RX_MASK | TX_MASK), ioaddr + INT_MASK_REG);
+ continue;
+ }
+
+ last = lp->tx_end;
+ end = last + (((length + 3) >> 1) << 1) + XMT_HEADER;
+ if (end >= RAM_SIZE) { /* the transmit buffer is wrapped around */
+
+ if ((RAM_SIZE - last) <= XMT_HEADER) {
+ /* Arrrr!!!, must keep the xmt header together,
+ several days were lost to chase this one down. */
+ last = rcv_ram;
+ end = last + (((length + 3) >> 1) << 1) + XMT_HEADER;
+ }
+
+ else end = rcv_ram + (end - RAM_SIZE);
+ }
+
+ outw(last, ioaddr + HOST_ADDRESS_REG);
+ outw(XMT_CMD, ioaddr + IO_PORT);
+ outw(0, ioaddr + IO_PORT);
+ outw(end, ioaddr + IO_PORT);
+ outw(length, ioaddr + IO_PORT);
+
+ if (lp->version == LAN595)
+ outsw(ioaddr + IO_PORT, buf, (length + 3) >> 1);
+
+ else { /* LAN595TX or LAN595FX, capable of 32-bit I/O processing */
+ unsigned short temp = inb(ioaddr + INT_MASK_REG);
+ outb(temp | IO_32_BIT, ioaddr + INT_MASK_REG);
+ outsl(ioaddr + IO_PORT_32_BIT, buf, (length + 3) >> 2);
+ outb(temp & ~(IO_32_BIT), ioaddr + INT_MASK_REG);
+ }
+
+ /* A dummy read to flush the DRAM write pipeline */
+ status = inw(ioaddr + IO_PORT);
+
+ if (lp->tx_start == lp->tx_end) {
+ outw(last, ioaddr + XMT_BAR);
+ outb(XMT_CMD, ioaddr);
+ lp->tx_start = last; /* I don't like to change tx_start here */
+ }
+ else {
+ /* update the next address and the chain bit in the
+ last packet */
+
+ if (lp->tx_end != last) {
+ outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG);
+ outw(last, ioaddr + IO_PORT);
+ }
+
+ outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG);
+ status = inw(ioaddr + IO_PORT);
+ outw(status | CHAIN_BIT, ioaddr + IO_PORT);
+
+ /* Continue the transmit command */
+ outb(RESUME_XMT_CMD, ioaddr);
+ }
+ lp->tx_last = last;
+ lp->tx_end = end;
+
+ /* Enable RX and TX interrupts */
+ outb(ALL_MASK & ~(RX_MASK | TX_MASK), ioaddr + INT_MASK_REG);
+
+ if (dev->tbusy) {
+ dev->tbusy = 0;
+ }
+
+ if (net_debug > 5)
+ printk("eepro: exiting hardware_send_packet routine.\n");
+
+ return;
+ }
+ dev->tbusy = 1;
+
+ if (net_debug > 5)
+ printk("eepro: exiting hardware_send_packet routine.\n");
+}
+
+static void
+eepro_rx(struct device *dev)
+{
+ struct eepro_local *lp = (struct eepro_local *)dev->priv;
+ short ioaddr = dev->base_addr, rcv_ram = dev->mem_end;
+ short boguscount = 20;
+ short rcv_car = lp->rx_start;
+ unsigned rcv_event, rcv_status, rcv_next_frame, rcv_size;
+
+ if (net_debug > 5)
+ printk("eepro: entering eepro_rx routine.\n");
+
+ /* Set the read pointer to the start of the RCV */
+ outw(rcv_car, ioaddr + HOST_ADDRESS_REG);
+
+ rcv_event = inw(ioaddr + IO_PORT);
+ while (rcv_event == RCV_DONE) {
+
+ rcv_status = inw(ioaddr + IO_PORT);
+ rcv_next_frame = inw(ioaddr + IO_PORT);
+ rcv_size = inw(ioaddr + IO_PORT);
+
+ if ((rcv_status & (RX_OK | RX_ERROR)) == RX_OK) {
+
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+ rcv_size &= 0x3fff;
+ skb = dev_alloc_skb(rcv_size+5);
+
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ break;
+ }
+
+ skb->dev = dev;
+ skb_reserve(skb,2);
+
+ if (lp->version == LAN595)
+ insw(ioaddr+IO_PORT, skb_put(skb,rcv_size), (rcv_size + 3) >> 1);
+
+ else { /* LAN595TX or LAN595FX, capable of 32-bit I/O processing */
+ unsigned short temp = inb(ioaddr + INT_MASK_REG);
+ outb(temp | IO_32_BIT, ioaddr + INT_MASK_REG);
+ insl(ioaddr+IO_PORT_32_BIT, skb_put(skb,rcv_size), (rcv_size + 3) >> 2);
+ outb(temp & ~(IO_32_BIT), ioaddr + INT_MASK_REG);
+ }
+
+ skb->protocol = eth_type_trans(skb,dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+
+ else { /* Not sure will ever reach here,
+ I set the 595 to discard bad received frames */
+ lp->stats.rx_errors++;
+
+ if (rcv_status & 0x0100)
+ lp->stats.rx_over_errors++;
+
+ else if (rcv_status & 0x0400)
+ lp->stats.rx_frame_errors++;
+
+ else if (rcv_status & 0x0800)
+ lp->stats.rx_crc_errors++;
+
+ printk("%s: event = %#x, status = %#x, next = %#x, size = %#x\n",
+ dev->name, rcv_event, rcv_status, rcv_next_frame, rcv_size);
+ }
+
+ if (rcv_status & 0x1000)
+ lp->stats.rx_length_errors++;
+
+ if (--boguscount == 0)
+ break;
+
+ rcv_car = lp->rx_start + RCV_HEADER + rcv_size;
+ lp->rx_start = rcv_next_frame;
+ outw(rcv_next_frame, ioaddr + HOST_ADDRESS_REG);
+ rcv_event = inw(ioaddr + IO_PORT);
+ }
+ if (rcv_car == 0)
+ rcv_car = (RCV_UPPER_LIMIT << 8) | 0xff;
+
+ outw(rcv_car - 1, ioaddr + RCV_STOP);
+
+ if (net_debug > 5)
+ printk("eepro: exiting eepro_rx routine.\n");
+}
+
+static void
+eepro_transmit_interrupt(struct device *dev)
+{
+ struct eepro_local *lp = (struct eepro_local *)dev->priv;
+ short ioaddr = dev->base_addr;
+ short boguscount = 20;
+ short xmt_status;
+
+ while (lp->tx_start != lp->tx_end) {
+
+ outw(lp->tx_start, ioaddr + HOST_ADDRESS_REG);
+ xmt_status = inw(ioaddr+IO_PORT);
+
+ if ((xmt_status & TX_DONE_BIT) == 0) break;
+
+ xmt_status = inw(ioaddr+IO_PORT);
+ lp->tx_start = inw(ioaddr+IO_PORT);
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+
+ if (xmt_status & 0x2000)
+ lp->stats.tx_packets++;
+ else {
+ lp->stats.tx_errors++;
+ if (xmt_status & 0x0400)
+ lp->stats.tx_carrier_errors++;
+ printk("%s: XMT status = %#x\n",
+ dev->name, xmt_status);
+ }
+
+ if (xmt_status & 0x000f) {
+ lp->stats.collisions += (xmt_status & 0x000f);
+ }
+
+ if ((xmt_status & 0x0040) == 0x0) {
+ lp->stats.tx_heartbeat_errors++;
+ }
+
+ if (--boguscount == 0)
+ break;
+ }
+}
+
+#ifdef MODULE
+
+static char devicename[9] = { 0, };
+static struct device dev_eepro = {
+ devicename, /* device name is inserted by linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, eepro_probe };
+static int io = 0x200;
+static int irq = 0;
+static int mem = (RCV_RAM/1024); /* Size of the rx buffer in KB */
+
+int
+init_module(void)
+{
+ if (io == 0)
+ printk("eepro: You should not use auto-probing with insmod!\n");
+
+ dev_eepro.base_addr = io;
+ dev_eepro.irq = irq;
+ dev_eepro.mem_end = mem;
+
+ if (register_netdev(&dev_eepro) != 0)
+ return -EIO;
+
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ unregister_netdev(&dev_eepro);
+
+ kfree_s(dev_eepro.priv,sizeof(struct eepro_local));
+ dev_eepro.priv=NULL;
+
+ /* If we don't do this, we can't re-insmod it later. */
+ release_region(dev_eepro.base_addr, EEPRO_IO_EXTENT);
+}
+#endif /* MODULE */
diff --git a/linux/src/drivers/net/eepro100.c b/linux/src/drivers/net/eepro100.c
new file mode 100644
index 0000000..d03462c
--- /dev/null
+++ b/linux/src/drivers/net/eepro100.c
@@ -0,0 +1,2155 @@
+/* drivers/net/eepro100.c: An Intel i82557-559 Ethernet driver for Linux. */
+/*
+ Written 1998-2003 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This driver is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ This driver is for the Intel EtherExpress Pro100 (Speedo3) design.
+ It should work with all i82557/558/559 boards.
+
+ To use as a module, use the compile-command at the end of the file.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 914 Bay Ridge Road, Suite 220
+ Annapolis MD 21403
+
+ For updates see
+ http://www.scyld.com/network/eepro100.html
+ For installation instructions
+ http://www.scyld.com/network/modules.html
+ The information and support mailing lists are based at
+ http://www.scyld.com/mailman/listinfo/
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version1[] =
+"eepro100.c:v1.28 7/22/2003 Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+" http://www.scyld.com/network/eepro100.html\n";
+
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.
+ The first five are undocumented and spelled per Intel recommendations.
+*/
+
+/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
+static int debug = 2;
+
+static int congenb = 0; /* Enable congestion control in the DP83840. */
+static int txfifo = 8; /* Tx FIFO threshold in 4 byte units, 0-15 */
+static int rxfifo = 8; /* Rx FIFO threshold, default 32 bytes. */
+/* Tx/Rx DMA burst length, 0-127, 0 == no preemption, tx==128 -> disabled. */
+static int txdmacount = 128;
+static int rxdmacount = 0;
+
+/* Set the copy breakpoint for the copy-only-tiny-frame Rx method.
+ Lower values use more memory, but are faster.
+ Setting to > 1518 disables this feature. */
+static int rx_copybreak = 200;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast) */
+static int multicast_filter_limit = 64;
+
+/* Used to pass the media type, etc.
+ Both 'options[]' and 'full_duplex[]' should exist for driver
+ interoperability, however setting full_duplex[] is deprecated.
+ The media type is usually passed in 'options[]'.
+ Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
+ Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
+ Use option values 0x20 and 0x200 for forcing full duplex operation.
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Operational parameters that are set at compile time. */
+
+/* The ring sizes should be a power of two for efficiency. */
+#define TX_RING_SIZE 32 /* Effectively 2 entries fewer. */
+#define RX_RING_SIZE 32
+/* Actual number of TX packets queued, must be <= TX_RING_SIZE-2. */
+#define TX_QUEUE_LIMIT 12
+#define TX_QUEUE_UNFULL 8 /* Hysteresis marking queue as no longer full. */
+
+/* Operational parameters that usually are not changed. */
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+
+/* Allocation size of Rx buffers with normal sized Ethernet frames.
+ Do not change this value without good reason. This is not a limit,
+ but a way to keep a consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ 1536
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#if LINUX_VERSION_CODE >= 0x20300
+#include <linux/spinlock.h>
+#elif LINUX_VERSION_CODE >= 0x20200
+#include <asm/spinlock.h>
+#endif
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+/* Condensed bus+endian portability operations. */
+#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
+#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Intel PCI EtherExpressPro 100 driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(debug, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(congenb, "i");
+MODULE_PARM(txfifo, "i");
+MODULE_PARM(rxfifo, "i");
+MODULE_PARM(txdmacount, "i");
+MODULE_PARM(rxdmacount, "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(multicast_filter_limit, "i");
+#ifdef MODULE_PARM_DESC
+MODULE_PARM_DESC(debug, "EEPro100 message level (0-31)");
+MODULE_PARM_DESC(options,
+ "EEPro100: force fixed speed+duplex 0x10 0x20 0x100 0x200");
+MODULE_PARM_DESC(max_interrupt_work,
+ "EEPro100 maximum events handled per interrupt");
+MODULE_PARM_DESC(full_duplex, "EEPro100 set to forced full duplex when not 0"
+ " (deprecated)");
+MODULE_PARM_DESC(rx_copybreak,
+ "EEPro100 copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit,
+ "EEPro100 breakpoint for switching to Rx-all-multicast");
+/* Other settings are undocumented per Intel recommendation. */
+#endif
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the Intel i82557 "Speedo3" chip, Intel's
+single-chip fast Ethernet controller for PCI, as used on the Intel
+EtherExpress Pro 100 adapter.
+
+II. Board-specific settings
+
+PCI bus devices are configured by the system at boot time, so no jumpers
+need to be set on the board. The system BIOS should be set to assign the
+PCI INTA signal to an otherwise unused system IRQ line. While it's
+possible to share PCI interrupt lines, it negatively impacts performance and
+only recent kernels support it.
+
+III. Driver operation
+
+IIIA. General
+The Speedo3 is very similar to other Intel network chips, that is to say
+"apparently designed on a different planet". This chips retains the complex
+Rx and Tx descriptors and multiple buffers pointers as previous chips, but
+also has simplified Tx and Rx buffer modes. This driver uses the "flexible"
+Tx mode, but in a simplified lower-overhead manner: it associates only a
+single buffer descriptor with each frame descriptor.
+
+Despite the extra space overhead in each receive skbuff, the driver must use
+the simplified Rx buffer mode to assure that only a single data buffer is
+associated with each RxFD. The driver implements this by reserving space
+for the Rx descriptor at the head of each Rx skbuff.
+
+The Speedo-3 has receive and command unit base addresses that are added to
+almost all descriptor pointers. The driver sets these to zero, so that all
+pointer fields are absolute addresses.
+
+The System Control Block (SCB) of some previous Intel chips exists on the
+chip in both PCI I/O and memory space. This driver uses the I/O space
+registers, but might switch to memory mapped mode to better support non-x86
+processors.
+
+IIIB. Transmit structure
+
+The driver must use the complex Tx command+descriptor mode in order to
+have a indirect pointer to the skbuff data section. Each Tx command block
+(TxCB) is associated with two immediately appended Tx Buffer Descriptor
+(TxBD). A fixed ring of these TxCB+TxBD pairs are kept as part of the
+speedo_private data structure for each adapter instance.
+
+The i82558 and later explicitly supports this structure, and can read the two
+TxBDs in the same PCI burst as the TxCB.
+
+This ring structure is used for all normal transmit packets, but the
+transmit packet descriptors aren't long enough for most non-Tx commands such
+as CmdConfigure. This is complicated by the possibility that the chip has
+already loaded the link address in the previous descriptor. So for these
+commands we convert the next free descriptor on the ring to a NoOp, and point
+that descriptor's link to the complex command.
+
+An additional complexity of these non-transmit commands are that they may be
+added asynchronous to the normal transmit queue, so we set a lock
+whenever the Tx descriptor ring is manipulated.
+
+A notable aspect of these special configure commands is that they do
+work with the normal Tx ring entry scavenge method. The Tx ring scavenge
+is done at interrupt time using the 'dirty_tx' index, and checking for the
+command-complete bit. While the setup frames may have the NoOp command on the
+Tx ring marked as complete, but not have completed the setup command, this
+is not a problem. The tx_ring entry can be still safely reused, as the
+tx_skbuff[] entry is always empty for config_cmd and mc_setup frames.
+
+Commands may have bits set e.g. CmdSuspend in the command word to either
+suspend or stop the transmit/command unit. This driver always initializes
+the current command with CmdSuspend before erasing the CmdSuspend in the
+previous command, and only then issues a CU_RESUME.
+
+Note: In previous generation Intel chips, restarting the command unit was a
+notoriously slow process. This is presumably no longer true.
+
+IIIC. Receive structure
+
+Because of the bus-master support on the Speedo3 this driver uses the
+SKBUFF_RX_COPYBREAK scheme, rather than a fixed intermediate receive buffer.
+This scheme allocates full-sized skbuffs as receive buffers. The value
+SKBUFF_RX_COPYBREAK is used as the copying breakpoint: it is chosen to
+trade-off the memory wasted by passing the full-sized skbuff to the queue
+layer for all frames vs. the copying cost of copying a frame to a
+correctly-sized skbuff.
+
+For small frames the copying cost is negligible (esp. considering that we
+are pre-loading the cache with immediately useful header information), so we
+allocate a new, minimally-sized skbuff. For large frames the copying cost
+is non-trivial, and the larger copy might flush the cache of useful data, so
+we pass up the skbuff the packet was received into.
+
+IIID. Synchronization
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and other software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'sp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
+we can't avoid the interrupt overhead by having the Tx routine reap the Tx
+stats.) After reaping the stats, it marks the queue entry as empty by setting
+the 'base' to zero. Iff the 'sp->tx_full' flag is set, it clears both the
+tx_full and tbusy flags.
+
+IV. Notes
+
+Thanks to Steve Williams of Intel for arranging the non-disclosure agreement
+that stated that I could disclose the information. But I still resent
+having to sign an Intel NDA when I'm helping Intel sell their own product!
+
+*/
+
+/* This table drives the PCI probe routines. */
+static void *speedo_found1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int fnd_cnt);
+static int speedo_pwr_event(void *dev_instance, int event);
+enum chip_capability_flags { ResetMII=1, HasChksum=2};
+
+/* I/O registers beyond 0x18 do not exist on the i82557. */
+#ifdef USE_IO_OPS
+#define SPEEDO_IOTYPE PCI_USES_MASTER|PCI_USES_IO|PCI_ADDR1
+#define SPEEDO_SIZE 32
+#else
+#define SPEEDO_IOTYPE PCI_USES_MASTER|PCI_USES_MEM|PCI_ADDR0
+#define SPEEDO_SIZE 0x1000
+#endif
+
+struct pci_id_info static pci_id_tbl[] = {
+ {"Intel PCI EtherExpress Pro100 82865", { 0x12278086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel PCI EtherExpress Pro100 Smart (i960RP/RD)",
+ { 0x12288086, 0xffffffff,}, SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel i82559 rev 8", { 0x12298086, ~0, 0,0, 8,0xff},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, HasChksum, },
+ {"Intel PCI EtherExpress Pro100", { 0x12298086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel EtherExpress Pro/100+ i82559ER", { 0x12098086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, ResetMII, },
+ {"Intel EtherExpress Pro/100 type 1029", { 0x10298086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel EtherExpress Pro/100 type 1030", { 0x10308086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 V Network", { 0x24498086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel PCI LAN0 Controller 82801E", { 0x24598086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel PCI LAN1 Controller 82801E", { 0x245D8086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 VE (type 1031)", { 0x10318086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 VE (type 1032)", { 0x10328086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 VE (type 1033)", { 0x10338086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 VE (type 1034)", { 0x10348086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 VE (type 1035)", { 0x10358086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 VM (type 1038)", { 0x10388086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 VM (type 1039)", { 0x10398086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 VM (type 103a)", { 0x103a8086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"HP/Compaq D510 Intel Pro/100 VM",
+ { 0x103b8086, 0xffffffff, 0x00120e11, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 VM (type 103b)", { 0x103b8086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 VE (type 103D)", { 0x103d8086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 VE (type 103E)", { 0x103e8086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel EtherExpress Pro/100 865G Northbridge type 1051",
+ { 0x10518086, 0xffffffff,}, SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel PCI to PCI Bridge EtherExpress Pro100 Server Adapter",
+ { 0x52008086, 0xffffffff,}, SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel PCI EtherExpress Pro100 Server Adapter",
+ { 0x52018086, 0xffffffff,}, SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 VM (unknown type series 1030)",
+ { 0x10308086, 0xfff0ffff,}, SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 (unknown type series 1050)",
+ { 0x10508086, 0xfff0ffff,}, SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {0,}, /* 0 terminated list. */
+};
+
+struct drv_id_info eepro100_drv_id = {
+ "eepro100", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
+ speedo_found1, speedo_pwr_event, };
+
+#ifndef USE_IO_OPS
+#undef inb
+#undef inw
+#undef inl
+#undef outb
+#undef outw
+#undef outl
+#define inb readb
+#define inw readw
+#define inl readl
+#define outb writeb
+#define outw writew
+#define outl writel
+#endif
+
+/* Offsets to the various registers.
+ All accesses need not be longword aligned. */
+enum speedo_offsets {
+ SCBStatus = 0, SCBCmd = 2, /* Rx/Command Unit command and status. */
+ SCBPointer = 4, /* General purpose pointer. */
+ SCBPort = 8, /* Misc. commands and operands. */
+ SCBflash = 12, SCBeeprom = 14, /* EEPROM and flash memory control. */
+ SCBCtrlMDI = 16, /* MDI interface control. */
+ SCBEarlyRx = 20, /* Early receive byte count. */
+};
+/* Commands that can be put in a command list entry. */
+enum commands {
+ CmdNOp = 0, CmdIASetup = 0x10000, CmdConfigure = 0x20000,
+ CmdMulticastList = 0x30000, CmdTx = 0x40000, CmdTDR = 0x50000,
+ CmdDump = 0x60000, CmdDiagnose = 0x70000,
+ CmdSuspend = 0x40000000, /* Suspend after completion. */
+ CmdIntr = 0x20000000, /* Interrupt after completion. */
+ CmdTxFlex = 0x00080000, /* Use "Flexible mode" for CmdTx command. */
+};
+/* Do atomically if possible. */
+#if defined(__i386__)
+#define clear_suspend(cmd) ((char *)(&(cmd)->cmd_status))[3] &= ~0x40
+#elif defined(__alpha__) || defined(__x86_64) || defined(__ia64)
+#define clear_suspend(cmd) clear_bit(30, &(cmd)->cmd_status)
+#elif defined(__powerpc__) || defined(__sparc__) || (__BIG_ENDIAN)
+#define clear_suspend(cmd) clear_bit(6, &(cmd)->cmd_status)
+#else
+#warning Undefined architecture.
+#define clear_suspend(cmd) (cmd)->cmd_status &= cpu_to_le32(~CmdSuspend)
+#endif
+
+enum SCBCmdBits {
+ SCBMaskCmdDone=0x8000, SCBMaskRxDone=0x4000, SCBMaskCmdIdle=0x2000,
+ SCBMaskRxSuspend=0x1000, SCBMaskEarlyRx=0x0800, SCBMaskFlowCtl=0x0400,
+ SCBTriggerIntr=0x0200, SCBMaskAll=0x0100,
+ /* The rest are Rx and Tx commands. */
+ CUStart=0x0010, CUResume=0x0020, CUHiPriStart=0x0030, CUStatsAddr=0x0040,
+ CUShowStats=0x0050,
+ CUCmdBase=0x0060, /* CU Base address (set to zero) . */
+ CUDumpStats=0x0070, /* Dump then reset stats counters. */
+ CUHiPriResume=0x00b0, /* Resume for the high priority Tx queue. */
+ RxStart=0x0001, RxResume=0x0002, RxAbort=0x0004, RxAddrLoad=0x0006,
+ RxResumeNoResources=0x0007,
+};
+
+enum intr_status_bits {
+ IntrCmdDone=0x8000, IntrRxDone=0x4000, IntrCmdIdle=0x2000,
+ IntrRxSuspend=0x1000, IntrMIIDone=0x0800, IntrDrvrIntr=0x0400,
+ IntrAllNormal=0xfc00,
+};
+
+enum SCBPort_cmds {
+ PortReset=0, PortSelfTest=1, PortPartialReset=2, PortDump=3,
+};
+
+/* The Speedo3 Rx and Tx frame/buffer descriptors. */
+struct descriptor { /* A generic descriptor. */
+ s32 cmd_status; /* All command and status fields. */
+ u32 link; /* struct descriptor * */
+ unsigned char params[0];
+};
+
+/* The Speedo3 Rx and Tx buffer descriptors. */
+struct RxFD { /* Receive frame descriptor. */
+ s32 status;
+ u32 link; /* struct RxFD * */
+ u32 rx_buf_addr; /* void * */
+ u32 count;
+};
+
+/* Selected elements of the Tx/RxFD.status word. */
+enum RxFD_bits {
+ RxComplete=0x8000, RxOK=0x2000,
+ RxErrCRC=0x0800, RxErrAlign=0x0400, RxErrTooBig=0x0200, RxErrSymbol=0x0010,
+ RxEth2Type=0x0020, RxNoMatch=0x0004, RxNoIAMatch=0x0002,
+ TxUnderrun=0x1000, StatusComplete=0x8000,
+};
+
+struct TxFD { /* Transmit frame descriptor set. */
+ s32 status;
+ u32 link; /* void * */
+ u32 tx_desc_addr; /* Always points to the tx_buf_addr element. */
+ s32 count; /* # of TBD (=1), Tx start thresh., etc. */
+ /* This constitutes two "TBD" entries. Non-zero-copy uses only one. */
+ u32 tx_buf_addr0; /* void *, frame to be transmitted. */
+ s32 tx_buf_size0; /* Length of Tx frame. */
+ u32 tx_buf_addr1; /* Used only for zero-copy data section. */
+ s32 tx_buf_size1; /* Length of second data buffer (0). */
+};
+
+/* Elements of the dump_statistics block. This block must be lword aligned. */
+struct speedo_stats {
+ u32 tx_good_frames;
+ u32 tx_coll16_errs;
+ u32 tx_late_colls;
+ u32 tx_underruns;
+ u32 tx_lost_carrier;
+ u32 tx_deferred;
+ u32 tx_one_colls;
+ u32 tx_multi_colls;
+ u32 tx_total_colls;
+ u32 rx_good_frames;
+ u32 rx_crc_errs;
+ u32 rx_align_errs;
+ u32 rx_resource_errs;
+ u32 rx_overrun_errs;
+ u32 rx_colls_errs;
+ u32 rx_runt_errs;
+ u32 done_marker;
+};
+
+/* Do not change the position (alignment) of the first few elements!
+ The later elements are grouped for cache locality. */
+struct speedo_private {
+ struct TxFD tx_ring[TX_RING_SIZE]; /* Commands (usually CmdTxPacket). */
+ struct RxFD *rx_ringp[RX_RING_SIZE]; /* Rx descriptor, used as ring. */
+ struct speedo_stats lstats; /* Statistics and self-test region */
+
+ /* The addresses of a Tx/Rx-in-place packets/buffers. */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+
+ /* Transmit and other commands control. */
+ struct descriptor *last_cmd; /* Last command sent. */
+ unsigned int cur_tx, dirty_tx; /* The ring entries to be free()ed. */
+ spinlock_t lock; /* Group with Tx control cache line. */
+ u32 tx_threshold; /* The value for txdesc.count. */
+ unsigned long last_cmd_time;
+
+ /* Rx control, one cache line. */
+ struct RxFD *last_rxf; /* Most recent Rx frame. */
+ unsigned int cur_rx, dirty_rx; /* The next free ring entry */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
+ int rx_copybreak;
+
+ int msg_level;
+ int max_interrupt_work;
+ struct net_device *next_module;
+ void *priv_addr; /* Unaligned address for kfree */
+ struct net_device_stats stats;
+ int alloc_failures;
+ int chip_id, drv_flags;
+ struct pci_dev *pci_dev;
+ unsigned char acpi_pwr;
+ struct timer_list timer; /* Media selection timer. */
+ /* Multicast filter command. */
+ int mc_setup_frm_len; /* The length of an allocated.. */
+ struct descriptor *mc_setup_frm; /* ..multicast setup frame. */
+ int mc_setup_busy; /* Avoid double-use of setup frame. */
+ int multicast_filter_limit;
+
+ int in_interrupt; /* Word-aligned dev->interrupt */
+ int rx_mode; /* Current PROMISC/ALLMULTI setting. */
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int flow_ctrl:1; /* Use 802.3x flow control. */
+ unsigned int rx_bug:1; /* Work around receiver hang errata. */
+ unsigned int rx_bug10:1; /* Receiver might hang at 10mbps. */
+ unsigned int rx_bug100:1; /* Receiver might hang at 100mbps. */
+ unsigned int polling:1; /* Hardware blocked interrupt line. */
+ unsigned int medialock:1; /* The media speed/duplex is fixed. */
+ unsigned char default_port; /* Last dev->if_port value. */
+ unsigned short phy[2]; /* PHY media interfaces available. */
+ unsigned short advertising; /* Current PHY advertised caps. */
+ unsigned short partner; /* Link partner caps. */
+ long last_reset;
+};
+
+/* Our internal RxMode state, not tied to the hardware bits. */
+enum rx_mode_bits {
+ AcceptAllMulticast=0x01, AcceptAllPhys=0x02,
+ AcceptErr=0x80, AcceptRunt=0x10,
+ AcceptBroadcast=0x08, AcceptMulticast=0x04,
+ AcceptMyPhys=0x01, RxInvalidMode=0x7f
+};
+
+/* The parameters for a CmdConfigure operation.
+ There are so many options that it would be difficult to document each bit.
+ We mostly use the default or recommended settings. */
+const char i82557_config_cmd[22] = {
+ 22, 0x08, 0, 0, 0, 0, 0x32, 0x03, 1, /* 1=Use MII 0=Use AUI */
+ 0, 0x2E, 0, 0x60, 0,
+ 0xf2, 0x48, 0, 0x40, 0xf2, 0x80, /* 0x40=Force full-duplex */
+ 0x3f, 0x05, };
+const char i82558_config_cmd[22] = {
+ 22, 0x08, 0, 1, 0, 0, 0x22, 0x03, 1, /* 1=Use MII 0=Use AUI */
+ 0, 0x2E, 0, 0x60, 0x08, 0x88,
+ 0x68, 0, 0x40, 0xf2, 0xBD, /* 0xBD->0xFD=Force full-duplex */
+ 0x31, 0x05, };
+
+/* PHY media interface chips, defined by the databook. */
+static const char *phys[] = {
+ "None", "i82553-A/B", "i82553-C", "i82503",
+ "DP83840", "80c240", "80c24", "i82555",
+ "unknown-8", "unknown-9", "DP83840A", "unknown-11",
+ "unknown-12", "unknown-13", "unknown-14", "unknown-15", };
+enum phy_chips { NonSuchPhy=0, I82553AB, I82553C, I82503, DP83840, S80C240,
+ S80C24, I82555, DP83840A=10, };
+static const char is_mii[] = { 0, 1, 1, 0, 1, 1, 0, 1 };
+
+/* Standard serial configuration EEPROM commands. */
+#define EE_READ_CMD (6)
+
+static int do_eeprom_cmd(long ioaddr, int cmd, int cmd_len);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static int mdio_write(long ioaddr, int phy_id, int location, int value);
+static int speedo_open(struct net_device *dev);
+static void speedo_resume(struct net_device *dev);
+static void speedo_timer(unsigned long data);
+static void speedo_init_rx_ring(struct net_device *dev);
+static void speedo_tx_timeout(struct net_device *dev);
+static int speedo_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static int speedo_rx(struct net_device *dev);
+static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static int speedo_close(struct net_device *dev);
+static struct net_device_stats *speedo_get_stats(struct net_device *dev);
+static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void set_rx_mode(struct net_device *dev);
+
+
+
+#ifdef honor_default_port
+/* Optional driver feature to allow forcing the transceiver setting.
+ Not recommended. */
+static int mii_ctrl[8] = { 0x3300, 0x3100, 0x0000, 0x0100,
+ 0x2000, 0x2100, 0x0400, 0x3100};
+#endif
+
+/* A list of all installed Speedo devices, for removing the driver module. */
+static struct net_device *root_speedo_dev = NULL;
+
+static void *speedo_found1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int card_idx)
+{
+ struct net_device *dev;
+ struct speedo_private *sp;
+ void *priv_mem;
+ int i, option;
+ u16 eeprom[0x100];
+ int acpi_idle_state = 0;
+
+ dev = init_etherdev(init_dev, 0);
+ if (!dev)
+ return NULL;
+
+ if (dev->mem_start > 0)
+ option = dev->mem_start;
+ else if (card_idx >= 0 && options[card_idx] >= 0)
+ option = options[card_idx];
+ else
+ option = -1;
+
+ acpi_idle_state = acpi_set_pwr_state(pdev, ACPI_D0);
+
+ /* Read the station address EEPROM before doing the reset.
+ Nominally his should even be done before accepting the device, but
+ then we wouldn't have a device name with which to report the error.
+ The size test is for 6 bit vs. 8 bit address serial EEPROMs.
+ */
+ {
+ u16 sum = 0;
+ int j;
+ int read_cmd, ee_size;
+
+ if ((do_eeprom_cmd(ioaddr, EE_READ_CMD << 24, 27) & 0xffe0000)
+ == 0xffe0000) {
+ ee_size = 0x100;
+ read_cmd = EE_READ_CMD << 24;
+ } else {
+ ee_size = 0x40;
+ read_cmd = EE_READ_CMD << 22;
+ }
+
+ for (j = 0, i = 0; i < ee_size; i++) {
+ u16 value = do_eeprom_cmd(ioaddr, read_cmd | (i << 16), 27);
+ eeprom[i] = value;
+ sum += value;
+ if (i < 3) {
+ dev->dev_addr[j++] = value;
+ dev->dev_addr[j++] = value >> 8;
+ }
+ }
+ if (sum != 0xBABA)
+ printk(KERN_WARNING "%s: Invalid EEPROM checksum %#4.4x, "
+ "check settings before activating this device!\n",
+ dev->name, sum);
+ /* Don't unregister_netdev(dev); as the EEPro may actually be
+ usable, especially if the MAC address is set later. */
+ }
+
+ /* Reset the chip: stop Tx and Rx processes and clear counters.
+ This takes less than 10usec and will easily finish before the next
+ action. */
+ outl(PortReset, ioaddr + SCBPort);
+
+ printk(KERN_INFO "%s: %s%s at %#3lx, ", dev->name,
+ eeprom[3] & 0x0100 ? "OEM " : "", pci_id_tbl[chip_idx].name,
+ ioaddr);
+
+ for (i = 0; i < 5; i++)
+ printk("%2.2X:", dev->dev_addr[i]);
+ printk("%2.2X, IRQ %d.\n", dev->dev_addr[i], irq);
+
+ /* We have decided to accept this device. */
+ /* Allocate cached private storage.
+ The PCI coherent descriptor rings are allocated at each open. */
+ sp = priv_mem = kmalloc(sizeof(*sp), GFP_KERNEL);
+ /* Check for the very unlikely case of no memory. */
+ if (priv_mem == NULL)
+ return NULL;
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+#ifndef kernel_bloat
+ /* OK, this is pure kernel bloat. I don't like it when other drivers
+ waste non-pageable kernel space to emit similar messages, but I need
+ them for bug reports. */
+ {
+ const char *connectors[] = {" RJ45", " BNC", " AUI", " MII"};
+ /* The self-test results must be paragraph aligned. */
+ s32 *volatile self_test_results;
+ int boguscnt = 16000; /* Timeout for set-test. */
+ printk(KERN_INFO " Board assembly %4.4x%2.2x-%3.3d, Physical"
+ " connectors present:",
+ eeprom[8], eeprom[9]>>8, eeprom[9] & 0xff);
+ for (i = 0; i < 4; i++)
+ if (eeprom[5] & (1<<i))
+ printk("%s", connectors[i]);
+ printk("\n"KERN_INFO" Primary interface chip %s PHY #%d.\n",
+ phys[(eeprom[6]>>8)&15], eeprom[6] & 0x1f);
+ if (eeprom[7] & 0x0700)
+ printk(KERN_INFO " Secondary interface chip %s.\n",
+ phys[(eeprom[7]>>8)&7]);
+ if (((eeprom[6]>>8) & 0x3f) == DP83840
+ || ((eeprom[6]>>8) & 0x3f) == DP83840A) {
+ int mdi_reg23 = mdio_read(dev, eeprom[6] & 0x1f, 23) | 0x0422;
+ if (congenb)
+ mdi_reg23 |= 0x0100;
+ printk(KERN_INFO" DP83840 specific setup, setting register 23 to %4.4x.\n",
+ mdi_reg23);
+ mdio_write(ioaddr, eeprom[6] & 0x1f, 23, mdi_reg23);
+ }
+ if ((option >= 0) && (option & 0x330)) {
+ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
+ (option & 0x300 ? 100 : 10),
+ (option & 0x220 ? "full" : "half"));
+ mdio_write(ioaddr, eeprom[6] & 0x1f, 0,
+ ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
+ ((option & 0x220) ? 0x0100 : 0)); /* Full duplex? */
+ } else {
+ int mii_bmcrctrl = mdio_read(dev, eeprom[6] & 0x1f, 0);
+ /* Reset out of a transceiver left in 10baseT-fixed mode. */
+ if ((mii_bmcrctrl & 0x3100) == 0)
+ mdio_write(ioaddr, eeprom[6] & 0x1f, 0, 0x8000);
+ }
+ if (eeprom[10] & 0x0002)
+ printk(KERN_INFO "\n" KERN_INFO " ** The configuration "
+ "EEPROM enables Sleep Mode.\n" KERN_INFO "\n"
+ " ** This will cause PCI bus errors!\n"
+ KERN_INFO " ** Update the configuration EEPROM "
+ "with the eepro100-diag program.\n" );
+ if (eeprom[6] == 0)
+ printk(KERN_INFO " ** The configuration EEPROM does not have a "
+ "transceiver type set.\n" KERN_INFO "\n"
+ " ** This will cause configuration problems and prevent "
+ "monitoring the link!\n"
+ KERN_INFO " ** Update the configuration EEPROM "
+ "with the eepro100-diag program.\n" );
+
+ /* Perform a system self-test. */
+ self_test_results = (s32*)(&sp->lstats);
+ self_test_results[0] = 0;
+ self_test_results[1] = -1;
+ outl(virt_to_bus(self_test_results) | PortSelfTest, ioaddr + SCBPort);
+ do {
+ udelay(10);
+ } while (self_test_results[1] == -1 && --boguscnt >= 0);
+
+ if (boguscnt < 0) { /* Test optimized out. */
+ printk(KERN_ERR "Self test failed, status %8.8x:\n"
+ KERN_ERR " Failure to initialize the i82557.\n"
+ KERN_ERR " Verify that the card is a bus-master"
+ " capable slot.\n",
+ self_test_results[1]);
+ } else
+ printk(KERN_INFO " General self-test: %s.\n"
+ KERN_INFO " Serial sub-system self-test: %s.\n"
+ KERN_INFO " Internal registers self-test: %s.\n"
+ KERN_INFO " ROM checksum self-test: %s (%#8.8x).\n",
+ self_test_results[1] & 0x1000 ? "failed" : "passed",
+ self_test_results[1] & 0x0020 ? "failed" : "passed",
+ self_test_results[1] & 0x0008 ? "failed" : "passed",
+ self_test_results[1] & 0x0004 ? "failed" : "passed",
+ self_test_results[0]);
+ }
+#endif /* kernel_bloat */
+
+ outl(PortReset, ioaddr + SCBPort);
+
+ /* Return the chip to its original power state. */
+ acpi_set_pwr_state(pdev, acpi_idle_state);
+
+ /* We do a request_region() only to register /proc/ioports info. */
+ request_region(ioaddr, pci_id_tbl[chip_idx].io_size, dev->name);
+
+ dev->priv = sp; /* Allocated above. */
+ memset(sp, 0, sizeof(*sp));
+ sp->next_module = root_speedo_dev;
+ root_speedo_dev = dev;
+
+ sp->priv_addr = priv_mem;
+ sp->pci_dev = pdev;
+ sp->chip_id = chip_idx;
+ sp->drv_flags = pci_id_tbl[chip_idx].drv_flags;
+ sp->acpi_pwr = acpi_idle_state;
+ sp->msg_level = (1 << debug) - 1;
+ sp->rx_copybreak = rx_copybreak;
+ sp->max_interrupt_work = max_interrupt_work;
+ sp->multicast_filter_limit = multicast_filter_limit;
+
+ sp->full_duplex = option >= 0 && (option & 0x220) ? 1 : 0;
+ if (card_idx >= 0) {
+ if (full_duplex[card_idx] >= 0)
+ sp->full_duplex = full_duplex[card_idx];
+ }
+ sp->default_port = option >= 0 ? (option & 0x0f) : 0;
+ if (sp->full_duplex)
+ sp->medialock = 1;
+
+ sp->phy[0] = eeprom[6];
+ sp->phy[1] = eeprom[7];
+ sp->rx_bug = (eeprom[3] & 0x03) == 3 ? 0 : 1;
+
+ if (sp->rx_bug)
+ printk(KERN_INFO " Receiver lock-up workaround activated.\n");
+
+ /* The Speedo-specific entries in the device structure. */
+ dev->open = &speedo_open;
+ dev->hard_start_xmit = &speedo_start_xmit;
+ dev->stop = &speedo_close;
+ dev->get_stats = &speedo_get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &speedo_ioctl;
+
+ return dev;
+}
+
+/* How to wait for the command unit to accept a command.
+ Typically this takes 0 ticks. */
+
+static inline void wait_for_cmd_done(struct net_device *dev)
+{
+ long cmd_ioaddr = dev->base_addr + SCBCmd;
+ int wait = 0;
+ int delayed_cmd;
+ do
+ if (inb(cmd_ioaddr) == 0) return;
+ while(++wait <= 100);
+ delayed_cmd = inb(cmd_ioaddr);
+ do
+ if (inb(cmd_ioaddr) == 0) break;
+ while(++wait <= 10000);
+ printk(KERN_ERR "%s: Command %2.2x was not immediately accepted, "
+ "%d ticks!\n",
+ dev->name, delayed_cmd, wait);
+}
+
+/* Perform a SCB command known to be slow.
+ This function checks the status both before and after command execution. */
+static void do_slow_command(struct net_device *dev, int cmd)
+{
+ long cmd_ioaddr = dev->base_addr + SCBCmd;
+ int wait = 0;
+ do
+ if (inb(cmd_ioaddr) == 0) break;
+ while(++wait <= 200);
+ if (wait > 100)
+ printk(KERN_ERR "%s: Command %4.4x was never accepted (%d polls)!\n",
+ dev->name, inb(cmd_ioaddr), wait);
+ outb(cmd, cmd_ioaddr);
+ for (wait = 0; wait <= 100; wait++)
+ if (inb(cmd_ioaddr) == 0) return;
+ for (; wait <= 20000; wait++)
+ if (inb(cmd_ioaddr) == 0) return;
+ else udelay(1);
+ printk(KERN_ERR "%s: Command %4.4x was not accepted after %d polls!"
+ " Current status %8.8x.\n",
+ dev->name, cmd, wait, (int)inl(dev->base_addr + SCBStatus));
+}
+
+
+/* Serial EEPROM section.
+ A "bit" grungy, but we work our way through bit-by-bit :->. */
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x01 /* EEPROM shift clock. */
+#define EE_CS 0x02 /* EEPROM chip select. */
+#define EE_DATA_WRITE 0x04 /* EEPROM chip data in. */
+#define EE_DATA_READ 0x08 /* EEPROM chip data out. */
+#define EE_ENB (0x4800 | EE_CS)
+#define EE_WRITE_0 0x4802
+#define EE_WRITE_1 0x4806
+#define EE_OFFSET SCBeeprom
+
+/* Delay between EEPROM clock transitions.
+ The code works with no delay on 33Mhz PCI. */
+#ifndef USE_IO_OPS
+#define eeprom_delay(ee_addr) writew(readw(ee_addr), ee_addr)
+#else
+#define eeprom_delay(ee_addr) inw(ee_addr)
+#endif
+
+static int do_eeprom_cmd(long ioaddr, int cmd, int cmd_len)
+{
+ unsigned retval = 0;
+ long ee_addr = ioaddr + SCBeeprom;
+
+ outw(EE_ENB | EE_SHIFT_CLK, ee_addr);
+
+ /* Shift the command bits out. */
+ do {
+ short dataval = (cmd & (1 << cmd_len)) ? EE_WRITE_1 : EE_WRITE_0;
+ outw(dataval, ee_addr);
+ eeprom_delay(ee_addr);
+ outw(dataval | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay(ee_addr);
+ retval = (retval << 1) | ((inw(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ } while (--cmd_len >= 0);
+ outw(EE_ENB, ee_addr);
+
+ /* Terminate the EEPROM access. */
+ outw(EE_ENB & ~EE_CS, ee_addr);
+ return retval;
+}
+
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ long ioaddr = dev->base_addr;
+ int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
+
+ outl(0x08000000 | (location<<16) | (phy_id<<21), ioaddr + SCBCtrlMDI);
+ do {
+ val = inl(ioaddr + SCBCtrlMDI);
+ if (--boguscnt < 0) {
+ printk(KERN_ERR "%s: mdio_read() timed out with val = %8.8x.\n",
+ dev->name, val);
+ break;
+ }
+ } while (! (val & 0x10000000));
+ return val & 0xffff;
+}
+
+static int mdio_write(long ioaddr, int phy_id, int location, int value)
+{
+ int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
+ outl(0x04000000 | (location<<16) | (phy_id<<21) | value,
+ ioaddr + SCBCtrlMDI);
+ do {
+ val = inl(ioaddr + SCBCtrlMDI);
+ if (--boguscnt < 0) {
+ printk(KERN_ERR" mdio_write() timed out with val = %8.8x.\n", val);
+ break;
+ }
+ } while (! (val & 0x10000000));
+ return val & 0xffff;
+}
+
+
+static int
+speedo_open(struct net_device *dev)
+{
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ MOD_INC_USE_COUNT;
+ acpi_set_pwr_state(sp->pci_dev, ACPI_D0);
+
+ if (sp->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: speedo_open() irq %d.\n", dev->name, dev->irq);
+
+ /* Set up the Tx queue early.. */
+ sp->cur_tx = 0;
+ sp->dirty_tx = 0;
+ sp->last_cmd = 0;
+ sp->tx_full = 0;
+ sp->lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
+ sp->polling = sp->in_interrupt = 0;
+
+ dev->if_port = sp->default_port;
+
+ if ((sp->phy[0] & 0x8000) == 0)
+ sp->advertising = mdio_read(dev, sp->phy[0] & 0x1f, 4);
+ /* With some transceivers we must retrigger negotiation to reset
+ power-up errors. */
+ if ((sp->drv_flags & ResetMII) &&
+ (sp->phy[0] & 0x8000) == 0) {
+ int phy_addr = sp->phy[0] & 0x1f ;
+ /* Use 0x3300 for restarting NWay, other values to force xcvr:
+ 0x0000 10-HD
+ 0x0100 10-FD
+ 0x2000 100-HD
+ 0x2100 100-FD
+ */
+#ifdef honor_default_port
+ mdio_write(ioaddr, phy_addr, 0, mii_ctrl[dev->default_port & 7]);
+#else
+ mdio_write(ioaddr, phy_addr, 0, 0x3300);
+#endif
+ }
+
+ /* We can safely take handler calls during init.
+ Doing this after speedo_init_rx_ring() results in a memory leak. */
+ if (request_irq(dev->irq, &speedo_interrupt, SA_SHIRQ, dev->name, dev)) {
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+
+ speedo_init_rx_ring(dev);
+
+ /* Fire up the hardware. */
+ speedo_resume(dev);
+ netif_start_tx_queue(dev);
+
+ /* Setup the chip and configure the multicast list. */
+ sp->mc_setup_frm = NULL;
+ sp->mc_setup_frm_len = 0;
+ sp->mc_setup_busy = 0;
+ sp->rx_mode = RxInvalidMode; /* Invalid -> always reset the mode. */
+ sp->flow_ctrl = sp->partner = 0;
+ set_rx_mode(dev);
+
+ if (sp->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: Done speedo_open(), status %8.8x.\n",
+ dev->name, (int)inw(ioaddr + SCBStatus));
+
+ /* Set the timer. The timer serves a dual purpose:
+ 1) to monitor the media interface (e.g. link beat) and perhaps switch
+ to an alternate media type
+ 2) to monitor Rx activity, and restart the Rx process if the receiver
+ hangs. */
+ init_timer(&sp->timer);
+ sp->timer.expires = jiffies + 3*HZ;
+ sp->timer.data = (unsigned long)dev;
+ sp->timer.function = &speedo_timer; /* timer handler */
+ add_timer(&sp->timer);
+
+ /* No need to wait for the command unit to accept here. */
+ if ((sp->phy[0] & 0x8000) == 0)
+ mdio_read(dev, sp->phy[0] & 0x1f, 0);
+ return 0;
+}
+
+/* Start the chip hardware after a full reset. */
+static void speedo_resume(struct net_device *dev)
+{
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ outw(SCBMaskAll, ioaddr + SCBCmd);
+
+ /* Start with a Tx threshold of 256 (0x..20.... 8 byte units). */
+ sp->tx_threshold = 0x01208000;
+
+ /* Set the segment registers to '0'. */
+ wait_for_cmd_done(dev);
+ if (inb(ioaddr + SCBCmd)) {
+ outl(PortPartialReset, ioaddr + SCBPort);
+ udelay(10);
+ }
+ outl(0, ioaddr + SCBPointer);
+ inl(ioaddr + SCBPointer); /* Flush to PCI. */
+ udelay(10); /* Bogus, but it avoids the bug. */
+ /* Note: these next two operations can take a while. */
+ do_slow_command(dev, RxAddrLoad);
+ do_slow_command(dev, CUCmdBase);
+
+ /* Load the statistics block and rx ring addresses. */
+ outl(virt_to_bus(&sp->lstats), ioaddr + SCBPointer);
+ inl(ioaddr + SCBPointer); /* Flush to PCI. */
+ outb(CUStatsAddr, ioaddr + SCBCmd);
+ sp->lstats.done_marker = 0;
+ wait_for_cmd_done(dev);
+
+ outl(virt_to_bus(sp->rx_ringp[sp->cur_rx % RX_RING_SIZE]),
+ ioaddr + SCBPointer);
+ inl(ioaddr + SCBPointer); /* Flush to PCI. */
+ /* Note: RxStart should complete instantly. */
+ do_slow_command(dev, RxStart);
+ do_slow_command(dev, CUDumpStats);
+
+ /* Fill the first command with our physical address. */
+ {
+ int entry = sp->cur_tx++ % TX_RING_SIZE;
+ struct descriptor *cur_cmd = (struct descriptor *)&sp->tx_ring[entry];
+
+ /* Avoid a bug(?!) here by marking the command already completed. */
+ cur_cmd->cmd_status = cpu_to_le32((CmdSuspend | CmdIASetup) | 0xa000);
+ cur_cmd->link =
+ virt_to_le32desc(&sp->tx_ring[sp->cur_tx % TX_RING_SIZE]);
+ memcpy(cur_cmd->params, dev->dev_addr, 6);
+ if (sp->last_cmd)
+ clear_suspend(sp->last_cmd);
+ sp->last_cmd = cur_cmd;
+ }
+
+ /* Start the chip's Tx process and unmask interrupts. */
+ outl(virt_to_bus(&sp->tx_ring[sp->dirty_tx % TX_RING_SIZE]),
+ ioaddr + SCBPointer);
+ outw(CUStart, ioaddr + SCBCmd);
+}
+
+/* Media monitoring and control. */
+static void speedo_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int phy_num = sp->phy[0] & 0x1f;
+ int status = inw(ioaddr + SCBStatus);
+
+ if (sp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: Interface monitor tick, chip status %4.4x.\n",
+ dev->name, status);
+
+ /* Normally we check every two seconds. */
+ sp->timer.expires = jiffies + 2*HZ;
+
+ if (sp->polling) {
+ /* Continue to be annoying. */
+ if (status & 0xfc00) {
+ speedo_interrupt(dev->irq, dev, 0);
+ if (jiffies - sp->last_reset > 10*HZ) {
+ printk(KERN_ERR "%s: IRQ %d is still blocked!\n",
+ dev->name, dev->irq);
+ sp->last_reset = jiffies;
+ }
+ } else if (jiffies - sp->last_reset > 10*HZ)
+ sp->polling = 0;
+ sp->timer.expires = jiffies + 2;
+ }
+ /* We have MII and lost link beat. */
+ if ((sp->phy[0] & 0x8000) == 0) {
+ int partner = mdio_read(dev, phy_num, 5);
+ if (partner != sp->partner) {
+ int flow_ctrl = sp->advertising & partner & 0x0400 ? 1 : 0;
+ sp->partner = partner;
+ if (flow_ctrl != sp->flow_ctrl) {
+ sp->flow_ctrl = flow_ctrl;
+ sp->rx_mode = RxInvalidMode; /* Trigger a reload. */
+ }
+ /* Clear sticky bit. */
+ mdio_read(dev, phy_num, 1);
+ /* If link beat has returned... */
+ if (mdio_read(dev, phy_num, 1) & 0x0004)
+ netif_link_up(dev);
+ else
+ netif_link_down(dev);
+ }
+ }
+
+ /* This no longer has a false-trigger window. */
+ if (sp->cur_tx - sp->dirty_tx > 1 &&
+ (jiffies - dev->trans_start) > TX_TIMEOUT &&
+ (jiffies - sp->last_cmd_time) > TX_TIMEOUT) {
+ if (status == 0xffff) {
+ if (jiffies - sp->last_reset > 10*HZ) {
+ sp->last_reset = jiffies;
+ printk(KERN_ERR "%s: The EEPro100 chip is missing!\n",
+ dev->name);
+ }
+ } else if (status & 0xfc00) {
+ /* We have a blocked IRQ line. This should never happen, but
+ we recover as best we can.*/
+ if ( ! sp->polling) {
+ if (jiffies - sp->last_reset > 10*HZ) {
+ printk(KERN_ERR "%s: IRQ %d is physically blocked! (%4.4x)"
+ "Failing back to low-rate polling.\n",
+ dev->name, dev->irq, status);
+ sp->last_reset = jiffies;
+ }
+ sp->polling = 1;
+ }
+ speedo_interrupt(dev->irq, dev, 0);
+ sp->timer.expires = jiffies + 2; /* Avoid */
+ } else {
+ speedo_tx_timeout(dev);
+ sp->last_reset = jiffies;
+ }
+ }
+ if (sp->rx_mode == RxInvalidMode ||
+ (sp->rx_bug && jiffies - sp->last_rx_time > 2*HZ)) {
+ /* We haven't received a packet in a Long Time. We might have been
+ bitten by the receiver hang bug. This can be cleared by sending
+ a set multicast list command. */
+ set_rx_mode(dev);
+ }
+ add_timer(&sp->timer);
+}
+
+static void speedo_show_state(struct net_device *dev)
+{
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ int phy_num = sp->phy[0] & 0x1f;
+ int i;
+
+ /* Print a few items for debugging. */
+ if (sp->msg_level & NETIF_MSG_DRV) {
+ int i;
+ printk(KERN_DEBUG "%s: Tx ring dump, Tx queue %d / %d:\n", dev->name,
+ sp->cur_tx, sp->dirty_tx);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(KERN_DEBUG "%s: %c%c%d %8.8x.\n", dev->name,
+ i == sp->dirty_tx % TX_RING_SIZE ? '*' : ' ',
+ i == sp->cur_tx % TX_RING_SIZE ? '=' : ' ',
+ i, sp->tx_ring[i].status);
+ }
+ printk(KERN_DEBUG "%s:Printing Rx ring (next to receive into %d).\n",
+ dev->name, sp->cur_rx);
+
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(KERN_DEBUG " Rx ring entry %d %8.8x.\n",
+ i, sp->rx_ringp[i] ? (int)sp->rx_ringp[i]->status : 0);
+
+ for (i = 0; i < 16; i++) {
+ if (i == 6) i = 21;
+ printk(KERN_DEBUG " PHY index %d register %d is %4.4x.\n",
+ phy_num, i, mdio_read(dev, phy_num, i));
+ }
+
+}
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void
+speedo_init_rx_ring(struct net_device *dev)
+{
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ struct RxFD *rxf, *last_rxf = NULL;
+ int i;
+
+ sp->cur_rx = 0;
+#if defined(CONFIG_VLAN)
+ /* Note that buffer sizing is not a run-time check! */
+ sp->rx_buf_sz = dev->mtu + 14 + sizeof(struct RxFD) + 4;
+#else
+ sp->rx_buf_sz = dev->mtu + 14 + sizeof(struct RxFD);
+#endif
+ if (sp->rx_buf_sz < PKT_BUF_SZ)
+ sp->rx_buf_sz = PKT_BUF_SZ;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb;
+ skb = dev_alloc_skb(sp->rx_buf_sz);
+ sp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break; /* OK. Just initially short of Rx bufs. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ rxf = (struct RxFD *)skb->tail;
+ sp->rx_ringp[i] = rxf;
+ skb_reserve(skb, sizeof(struct RxFD));
+ if (last_rxf)
+ last_rxf->link = virt_to_le32desc(rxf);
+ last_rxf = rxf;
+ rxf->status = cpu_to_le32(0x00000001); /* '1' is flag value only. */
+ rxf->link = 0; /* None yet. */
+ /* This field unused by i82557, we use it as a consistency check. */
+#ifdef final_version
+ rxf->rx_buf_addr = 0xffffffff;
+#else
+ rxf->rx_buf_addr = virt_to_bus(skb->tail);
+#endif
+ rxf->count = cpu_to_le32((sp->rx_buf_sz - sizeof(struct RxFD)) << 16);
+ }
+ sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+ /* Mark the last entry as end-of-list. */
+ last_rxf->status = cpu_to_le32(0xC0000002); /* '2' is flag value only. */
+ sp->last_rxf = last_rxf;
+}
+
+static void speedo_tx_timeout(struct net_device *dev)
+{
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int status = inw(ioaddr + SCBStatus);
+
+ printk(KERN_WARNING "%s: Transmit timed out: status %4.4x "
+ " %4.4x at %d/%d commands %8.8x %8.8x %8.8x.\n",
+ dev->name, status, (int)inw(ioaddr + SCBCmd),
+ sp->dirty_tx, sp->cur_tx,
+ sp->tx_ring[(sp->dirty_tx+0) % TX_RING_SIZE].status,
+ sp->tx_ring[(sp->dirty_tx+1) % TX_RING_SIZE].status,
+ sp->tx_ring[(sp->dirty_tx+2) % TX_RING_SIZE].status);
+
+ /* Trigger a stats dump to give time before the reset. */
+ speedo_get_stats(dev);
+
+ speedo_show_state(dev);
+ if ((status & 0x00C0) != 0x0080
+ && (status & 0x003C) == 0x0010 && 0) {
+ /* Only the command unit has stopped. */
+ printk(KERN_WARNING "%s: Trying to restart the transmitter...\n",
+ dev->name);
+ outl(virt_to_bus(&sp->tx_ring[sp->dirty_tx % TX_RING_SIZE]),
+ ioaddr + SCBPointer);
+ outw(CUStart, ioaddr + SCBCmd);
+ } else {
+ printk(KERN_WARNING "%s: Restarting the chip...\n",
+ dev->name);
+ /* Reset the Tx and Rx units. */
+ outl(PortReset, ioaddr + SCBPort);
+ if (sp->msg_level & NETIF_MSG_TX_ERR)
+ speedo_show_state(dev);
+ udelay(10);
+ speedo_resume(dev);
+ }
+ /* Reset the MII transceiver, suggested by Fred Young @ scalable.com. */
+ if ((sp->phy[0] & 0x8000) == 0) {
+ int phy_addr = sp->phy[0] & 0x1f;
+ int advertising = mdio_read(dev, phy_addr, 4);
+ int mii_bmcr = mdio_read(dev, phy_addr, 0);
+ mdio_write(ioaddr, phy_addr, 0, 0x0400);
+ mdio_write(ioaddr, phy_addr, 1, 0x0000);
+ mdio_write(ioaddr, phy_addr, 4, 0x0000);
+ mdio_write(ioaddr, phy_addr, 0, 0x8000);
+#ifdef honor_default_port
+ mdio_write(ioaddr, phy_addr, 0, mii_ctrl[dev->default_port & 7]);
+#else
+ mdio_read(dev, phy_addr, 0);
+ mdio_write(ioaddr, phy_addr, 0, mii_bmcr);
+ mdio_write(ioaddr, phy_addr, 4, advertising);
+#endif
+ }
+ sp->stats.tx_errors++;
+ dev->trans_start = jiffies;
+ return;
+}
+
+/* Handle the interrupt cases when something unexpected happens. */
+static void speedo_intr_error(struct net_device *dev, int intr_status)
+{
+ long ioaddr = dev->base_addr;
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+
+ if (intr_status & IntrRxSuspend) {
+ if ((intr_status & 0x003c) == 0x0028) /* No more Rx buffers. */
+ outb(RxResumeNoResources, ioaddr + SCBCmd);
+ else if ((intr_status & 0x003c) == 0x0008) { /* No resources (why?!) */
+ printk(KERN_DEBUG "%s: Unknown receiver error, status=%#4.4x.\n",
+ dev->name, intr_status);
+ /* No idea of what went wrong. Restart the receiver. */
+ outl(virt_to_bus(sp->rx_ringp[sp->cur_rx % RX_RING_SIZE]),
+ ioaddr + SCBPointer);
+ outb(RxStart, ioaddr + SCBCmd);
+ }
+ sp->stats.rx_errors++;
+ }
+}
+
+
+static int
+speedo_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int entry;
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well.
+ If this ever occurs the queue layer is doing something evil! */
+ if (netif_pause_tx_queue(dev) != 0) {
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < TX_TIMEOUT - 2)
+ return 1;
+ if (tickssofar < TX_TIMEOUT) {
+ /* Reap sent packets from the full Tx queue. */
+ outw(SCBTriggerIntr, ioaddr + SCBCmd);
+ return 1;
+ }
+ speedo_tx_timeout(dev);
+ return 1;
+ }
+
+ /* Caution: the write order is important here, set the base address
+ with the "ownership" bits last. */
+
+ { /* Prevent interrupts from changing the Tx ring from underneath us. */
+ unsigned long flags;
+
+ spin_lock_irqsave(&sp->lock, flags);
+ /* Calculate the Tx descriptor entry. */
+ entry = sp->cur_tx % TX_RING_SIZE;
+
+ sp->tx_skbuff[entry] = skb;
+ /* Todo: be a little more clever about setting the interrupt bit. */
+ sp->tx_ring[entry].status =
+ cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);
+ sp->cur_tx++;
+ sp->tx_ring[entry].link =
+ virt_to_le32desc(&sp->tx_ring[sp->cur_tx % TX_RING_SIZE]);
+ /* We may nominally release the lock here. */
+ sp->tx_ring[entry].tx_desc_addr =
+ virt_to_le32desc(&sp->tx_ring[entry].tx_buf_addr0);
+ /* The data region is always in one buffer descriptor. */
+ sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold);
+ sp->tx_ring[entry].tx_buf_addr0 = virt_to_le32desc(skb->data);
+ sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len);
+ /* Todo: perhaps leave the interrupt bit set if the Tx queue is more
+ than half full. Argument against: we should be receiving packets
+ and scavenging the queue. Argument for: if so, it shouldn't
+ matter. */
+ {
+ struct descriptor *last_cmd = sp->last_cmd;
+ sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
+ clear_suspend(last_cmd);
+ }
+ if (sp->cur_tx - sp->dirty_tx >= TX_QUEUE_LIMIT) {
+ sp->tx_full = 1;
+ netif_stop_tx_queue(dev);
+ } else
+ netif_unpause_tx_queue(dev);
+ spin_unlock_irqrestore(&sp->lock, flags);
+ }
+ wait_for_cmd_done(dev);
+ outb(CUResume, ioaddr + SCBCmd);
+ dev->trans_start = jiffies;
+
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct speedo_private *sp;
+ long ioaddr;
+ int work_limit;
+ u16 status;
+
+ ioaddr = dev->base_addr;
+ sp = (struct speedo_private *)dev->priv;
+ work_limit = sp->max_interrupt_work;
+#ifndef final_version
+ /* A lock to prevent simultaneous entry on SMP machines. */
+ if (test_and_set_bit(0, (void*)&sp->in_interrupt)) {
+ printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
+ dev->name);
+ sp->in_interrupt = 0; /* Avoid halting machine. */
+ return;
+ }
+#endif
+
+ do {
+ status = inw(ioaddr + SCBStatus);
+
+ if ((status & IntrAllNormal) == 0 || status == 0xffff)
+ break;
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ outw(status & IntrAllNormal, ioaddr + SCBStatus);
+
+ if (sp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: interrupt status=%#4.4x.\n",
+ dev->name, status);
+
+ if (status & (IntrRxDone|IntrRxSuspend))
+ speedo_rx(dev);
+
+ /* The command unit did something, scavenge finished Tx entries. */
+ if (status & (IntrCmdDone | IntrCmdIdle | IntrDrvrIntr)) {
+ unsigned int dirty_tx;
+ /* We should nominally not need this lock. */
+ spin_lock(&sp->lock);
+
+ dirty_tx = sp->dirty_tx;
+ while (sp->cur_tx - dirty_tx > 0) {
+ int entry = dirty_tx % TX_RING_SIZE;
+ int status = le32_to_cpu(sp->tx_ring[entry].status);
+
+ if (sp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n",
+ entry, status);
+ if ((status & StatusComplete) == 0) {
+ /* Special case error check: look for descriptor that the
+ chip skipped(?). */
+ if (sp->cur_tx - dirty_tx > 2 &&
+ (sp->tx_ring[(dirty_tx+1) % TX_RING_SIZE].status
+ & cpu_to_le32(StatusComplete))) {
+ printk(KERN_ERR "%s: Command unit failed to mark "
+ "command %8.8x as complete at %d.\n",
+ dev->name, status, dirty_tx);
+ } else
+ break; /* It still hasn't been processed. */
+ }
+ if ((status & TxUnderrun) &&
+ (sp->tx_threshold < 0x01e08000)) {
+ sp->tx_threshold += 0x00040000;
+ if (sp->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG "%s: Tx threshold increased, "
+ "%#8.8x.\n", dev->name, sp->tx_threshold);
+ }
+ /* Free the original skb. */
+ if (sp->tx_skbuff[entry]) {
+ sp->stats.tx_packets++; /* Count only user packets. */
+#if LINUX_VERSION_CODE > 0x20127
+ sp->stats.tx_bytes += sp->tx_skbuff[entry]->len;
+#endif
+ dev_free_skb_irq(sp->tx_skbuff[entry]);
+ sp->tx_skbuff[entry] = 0;
+ } else if ((status & 0x70000) == CmdNOp)
+ sp->mc_setup_busy = 0;
+ dirty_tx++;
+ }
+
+#ifndef final_version
+ if (sp->cur_tx - dirty_tx > TX_RING_SIZE) {
+ printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d,"
+ " full=%d.\n",
+ dirty_tx, sp->cur_tx, sp->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ sp->dirty_tx = dirty_tx;
+ if (sp->tx_full
+ && sp->cur_tx - dirty_tx < TX_QUEUE_UNFULL) {
+ /* The ring is no longer full, clear tbusy. */
+ sp->tx_full = 0;
+ netif_resume_tx_queue(dev);
+ }
+ spin_unlock(&sp->lock);
+ }
+
+ if (status & IntrRxSuspend)
+ speedo_intr_error(dev, status);
+
+ if (--work_limit < 0) {
+ printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n",
+ dev->name, status);
+ /* Clear all interrupt sources. */
+ outl(0xfc00, ioaddr + SCBStatus);
+ break;
+ }
+ } while (1);
+
+ if (sp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, (int)inw(ioaddr + SCBStatus));
+
+ clear_bit(0, (void*)&sp->in_interrupt);
+ return;
+}
+
+static int
+speedo_rx(struct net_device *dev)
+{
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ int entry = sp->cur_rx % RX_RING_SIZE;
+ int status;
+ int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;
+
+ if (sp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " In speedo_rx().\n");
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while (sp->rx_ringp[entry] != NULL &&
+ (status = le32_to_cpu(sp->rx_ringp[entry]->status)) & RxComplete) {
+ int desc_count = le32_to_cpu(sp->rx_ringp[entry]->count);
+ int pkt_len = desc_count & 0x07ff;
+
+ if (--rx_work_limit < 0)
+ break;
+ if (sp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " speedo_rx() status %8.8x len %d.\n", status,
+ pkt_len);
+ if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) {
+ if (status & RxErrTooBig)
+ printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, "
+ "status %8.8x!\n", dev->name, status);
+ else if ( ! (status & RxOK)) {
+ /* There was a fatal error. This *should* be impossible. */
+ sp->stats.rx_errors++;
+ printk(KERN_ERR "%s: Anomalous event in speedo_rx(), "
+ "status %8.8x.\n", dev->name, status);
+ }
+ } else {
+ struct sk_buff *skb;
+
+ if (sp->drv_flags & HasChksum)
+ pkt_len -= 2;
+
+ /* Check if the packet is long enough to just accept without
+ copying to a properly sized skbuff. */
+ if (pkt_len < sp->rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ /* 'skb_put()' points to the start of sk_buff data area. */
+ /* Packet is in one chunk -- we can copy + cksum. */
+ eth_copy_and_sum(skb, sp->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+ } else {
+ void *temp;
+ /* Pass up the already-filled skbuff. */
+ skb = sp->rx_skbuff[entry];
+ if (skb == NULL) {
+ printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
+ dev->name);
+ break;
+ }
+ sp->rx_skbuff[entry] = NULL;
+ temp = skb_put(skb, pkt_len);
+#if !defined(final_version) && !defined(__powerpc__)
+ if (bus_to_virt(sp->rx_ringp[entry]->rx_buf_addr) != temp)
+ printk(KERN_ERR "%s: Rx consistency error -- the skbuff "
+ "addresses do not match in speedo_rx: %p vs. %p "
+ "/ %p.\n", dev->name,
+ bus_to_virt(sp->rx_ringp[entry]->rx_buf_addr),
+ skb->head, temp);
+#endif
+ sp->rx_ringp[entry] = NULL;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ if (sp->drv_flags & HasChksum) {
+#if 0
+ u16 csum = get_unaligned((u16*)(skb->head + pkt_len))
+ if (desc_count & 0x8000)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+#endif
+ }
+ netif_rx(skb);
+ sp->stats.rx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+ sp->stats.rx_bytes += pkt_len;
+#endif
+ }
+ entry = (++sp->cur_rx) % RX_RING_SIZE;
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; sp->cur_rx - sp->dirty_rx > 0; sp->dirty_rx++) {
+ struct RxFD *rxf;
+ entry = sp->dirty_rx % RX_RING_SIZE;
+ if (sp->rx_skbuff[entry] == NULL) {
+ struct sk_buff *skb;
+ /* Get a fresh skbuff to replace the consumed one. */
+ skb = dev_alloc_skb(sp->rx_buf_sz);
+ sp->rx_skbuff[entry] = skb;
+ if (skb == NULL) {
+ sp->rx_ringp[entry] = NULL;
+ sp->alloc_failures++;
+ break; /* Better luck next time! */
+ }
+ rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail;
+ skb->dev = dev;
+ skb_reserve(skb, sizeof(struct RxFD));
+ rxf->rx_buf_addr = virt_to_le32desc(skb->tail);
+ } else {
+ rxf = sp->rx_ringp[entry];
+ }
+ rxf->status = cpu_to_le32(0xC0000001); /* '1' for driver use only. */
+ rxf->link = 0; /* None yet. */
+ rxf->count = cpu_to_le32((sp->rx_buf_sz - sizeof(struct RxFD)) << 16);
+ sp->last_rxf->link = virt_to_le32desc(rxf);
+ sp->last_rxf->status &= cpu_to_le32(~0xC0000000);
+ sp->last_rxf = rxf;
+ }
+
+ sp->last_rx_time = jiffies;
+ return 0;
+}
+
+static int
+speedo_close(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ int i;
+
+ netif_stop_tx_queue(dev);
+
+ if (sp->msg_level & NETIF_MSG_IFDOWN)
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n"
+ KERN_DEBUG "%s: Cumlative allocation failures: %d.\n",
+ dev->name, (int)inw(ioaddr + SCBStatus),
+ dev->name, sp->alloc_failures);
+
+ /* Shut off the media monitoring timer. */
+ del_timer(&sp->timer);
+
+ /* Shutting down the chip nicely fails to disable flow control. So.. */
+ outl(PortPartialReset, ioaddr + SCBPort);
+
+ free_irq(dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx and Tx queues. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = sp->rx_skbuff[i];
+ sp->rx_skbuff[i] = 0;
+ /* Clear the Rx descriptors. */
+ if (skb) {
+#if LINUX_VERSION_CODE < 0x20100
+ skb->free = 1;
+#endif
+ dev_free_skb(skb);
+ }
+ }
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ struct sk_buff *skb = sp->tx_skbuff[i];
+ sp->tx_skbuff[i] = 0;
+ /* Clear the Tx descriptors. */
+ if (skb)
+ dev_free_skb(skb);
+ }
+ if (sp->mc_setup_frm) {
+ kfree(sp->mc_setup_frm);
+ sp->mc_setup_frm_len = 0;
+ }
+
+ /* Print a few items for debugging. */
+ if (sp->msg_level & NETIF_MSG_IFDOWN)
+ speedo_show_state(dev);
+
+ /* Alt: acpi_set_pwr_state(pdev, sp->acpi_pwr); */
+ acpi_set_pwr_state(sp->pci_dev, ACPI_D2);
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+/* The Speedo-3 has an especially awkward and unusable method of getting
+ statistics out of the chip. It takes an unpredictable length of time
+ for the dump-stats command to complete. To avoid a busy-wait loop we
+ update the stats with the previous dump results, and then trigger a
+ new dump.
+
+ These problems are mitigated by the current /proc implementation, which
+ calls this routine first to judge the output length, and then to emit the
+ output.
+
+ Oh, and incoming frames are dropped while executing dump-stats!
+ */
+static struct net_device_stats *speedo_get_stats(struct net_device *dev)
+{
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ /* Update only if the previous dump finished. */
+ if (sp->lstats.done_marker == le32_to_cpu(0xA007)) {
+ sp->stats.tx_aborted_errors += le32_to_cpu(sp->lstats.tx_coll16_errs);
+ sp->stats.tx_window_errors += le32_to_cpu(sp->lstats.tx_late_colls);
+ sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats.tx_underruns);
+ sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats.tx_lost_carrier);
+ /*sp->stats.tx_deferred += le32_to_cpu(sp->lstats.tx_deferred);*/
+ sp->stats.collisions += le32_to_cpu(sp->lstats.tx_total_colls);
+ sp->stats.rx_crc_errors += le32_to_cpu(sp->lstats.rx_crc_errs);
+ sp->stats.rx_frame_errors += le32_to_cpu(sp->lstats.rx_align_errs);
+ sp->stats.rx_over_errors += le32_to_cpu(sp->lstats.rx_resource_errs);
+ sp->stats.rx_fifo_errors += le32_to_cpu(sp->lstats.rx_overrun_errs);
+ sp->stats.rx_length_errors += le32_to_cpu(sp->lstats.rx_runt_errs);
+ sp->lstats.done_marker = 0x0000;
+ if (netif_running(dev)) {
+ wait_for_cmd_done(dev);
+ outb(CUDumpStats, ioaddr + SCBCmd);
+ }
+ }
+ return &sp->stats;
+}
+
+static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u16 *data = (u16 *)&rq->ifr_data;
+ u32 *data32 = (void *)&rq->ifr_data;
+ int phy = sp->phy[0] & 0x1f;
+ int saved_acpi;
+
+ switch(cmd) {
+ case 0x8947: case 0x89F0:
+ /* SIOCGMIIPHY: Get the address of the PHY in use. */
+ data[0] = phy;
+ /* Fall Through */
+ case 0x8948: case 0x89F1:
+ /* SIOCGMIIREG: Read the specified MII register. */
+ saved_acpi = acpi_set_pwr_state(sp->pci_dev, ACPI_D0);
+ data[3] = mdio_read(dev, data[0], data[1]);
+ acpi_set_pwr_state(sp->pci_dev, saved_acpi);
+ return 0;
+ case 0x8949: case 0x89F2:
+ /* SIOCSMIIREG: Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (data[0] == sp->phy[0]) {
+ u16 value = data[2];
+ switch (data[1]) {
+ case 0:
+ /* Check for autonegotiation on or reset. */
+ sp->medialock = (value & 0x9000) ? 0 : 1;
+ if (sp->medialock) {
+ sp->full_duplex = (value & 0x0100) ? 1 : 0;
+ sp->rx_mode = RxInvalidMode;
+ }
+ break;
+ case 4: sp->advertising = value; break;
+ }
+ }
+ saved_acpi = acpi_set_pwr_state(sp->pci_dev, ACPI_D0);
+ mdio_write(ioaddr, data[0], data[1], data[2]);
+ acpi_set_pwr_state(sp->pci_dev, saved_acpi);
+ return 0;
+ case SIOCGPARAMS:
+ data32[0] = sp->msg_level;
+ data32[1] = sp->multicast_filter_limit;
+ data32[2] = sp->max_interrupt_work;
+ data32[3] = sp->rx_copybreak;
+#if 0
+ /* No room in the ioctl() to set these. */
+ data32[4] = txfifo;
+ data32[5] = rxfifo;
+#endif
+ return 0;
+ case SIOCSPARAMS:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ sp->msg_level = data32[0];
+ sp->multicast_filter_limit = data32[1];
+ sp->max_interrupt_work = data32[2];
+ sp->rx_copybreak = data32[3];
+#if 0
+ /* No room in the ioctl() to set these. */
+ if (data32[4] < 16)
+ txfifo = data32[4];
+ if (data32[5] < 16)
+ rxfifo = data32[5];
+#endif
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ This is very ugly with Intel chips -- we usually have to execute an
+ entire configuration command, plus process a multicast command.
+ This is complicated. We must put a large configuration command and
+ an arbitrarily-sized multicast command in the transmit list.
+ To minimize the disruption -- the previous command might have already
+ loaded the link -- we convert the current command block, normally a Tx
+ command, into a no-op and link it to the new command.
+*/
+static void set_rx_mode(struct net_device *dev)
+{
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ struct descriptor *last_cmd;
+ char new_rx_mode;
+ unsigned long flags;
+ int entry, i;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ new_rx_mode = AcceptAllMulticast | AcceptAllPhys;
+ } else if ((dev->flags & IFF_ALLMULTI) ||
+ dev->mc_count > sp->multicast_filter_limit) {
+ new_rx_mode = AcceptAllMulticast;
+ } else
+ new_rx_mode = 0;
+
+ if (sp->cur_tx - sp->dirty_tx >= TX_RING_SIZE - 1) {
+ /* The Tx ring is full -- don't add anything! Presumably the new mode
+ is in config_cmd_data and will be added anyway, otherwise we wait
+ for a timer tick or the mode to change again. */
+ sp->rx_mode = RxInvalidMode;
+ return;
+ }
+
+ if (new_rx_mode != sp->rx_mode) {
+ u8 *config_cmd_data;
+
+ spin_lock_irqsave(&sp->lock, flags);
+ entry = sp->cur_tx % TX_RING_SIZE;
+ last_cmd = sp->last_cmd;
+ sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
+
+ sp->tx_skbuff[entry] = 0; /* Redundant. */
+ sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdConfigure);
+ sp->cur_tx++;
+ sp->tx_ring[entry].link =
+ virt_to_le32desc(&sp->tx_ring[(entry + 1) % TX_RING_SIZE]);
+ /* We may nominally release the lock here. */
+
+ config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr;
+ /* Construct a full CmdConfig frame. */
+ memcpy(config_cmd_data, i82558_config_cmd, sizeof(i82558_config_cmd));
+ config_cmd_data[1] = (txfifo << 4) | rxfifo;
+ config_cmd_data[4] = rxdmacount;
+ config_cmd_data[5] = txdmacount + 0x80;
+ config_cmd_data[6] |= (new_rx_mode & AcceptErr) ? 0x80 : 0;
+ config_cmd_data[7] &= (new_rx_mode & AcceptRunt) ? ~0x01 : ~0;
+ if (sp->drv_flags & HasChksum)
+ config_cmd_data[9] |= 1;
+ config_cmd_data[15] |= (new_rx_mode & AcceptAllPhys) ? 1 : 0;
+ config_cmd_data[19] = sp->flow_ctrl ? 0xBD : 0x80;
+ config_cmd_data[19] |= sp->full_duplex ? 0x40 : 0;
+ config_cmd_data[21] = (new_rx_mode & AcceptAllMulticast) ? 0x0D : 0x05;
+ if (sp->phy[0] & 0x8000) { /* Use the AUI port instead. */
+ config_cmd_data[15] |= 0x80;
+ config_cmd_data[8] = 0;
+ }
+ /* Trigger the command unit resume. */
+ wait_for_cmd_done(dev);
+ clear_suspend(last_cmd);
+ outb(CUResume, ioaddr + SCBCmd);
+ spin_unlock_irqrestore(&sp->lock, flags);
+ sp->last_cmd_time = jiffies;
+ }
+
+ if (new_rx_mode == 0 && dev->mc_count < 4) {
+ /* The simple case of 0-3 multicast list entries occurs often, and
+ fits within one tx_ring[] entry. */
+ struct dev_mc_list *mclist;
+ u16 *setup_params, *eaddrs;
+
+ spin_lock_irqsave(&sp->lock, flags);
+ entry = sp->cur_tx % TX_RING_SIZE;
+ last_cmd = sp->last_cmd;
+ sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
+
+ sp->tx_skbuff[entry] = 0;
+ sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdMulticastList);
+ sp->cur_tx++;
+ sp->tx_ring[entry].link =
+ virt_to_le32desc(&sp->tx_ring[(entry + 1) % TX_RING_SIZE]);
+ /* We may nominally release the lock here. */
+ sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */
+ setup_params = (u16 *)&sp->tx_ring[entry].tx_desc_addr;
+ *setup_params++ = cpu_to_le16(dev->mc_count*6);
+ /* Fill in the multicast addresses. */
+ for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ eaddrs = (u16 *)mclist->dmi_addr;
+ *setup_params++ = *eaddrs++;
+ *setup_params++ = *eaddrs++;
+ *setup_params++ = *eaddrs++;
+ }
+
+ wait_for_cmd_done(dev);
+ clear_suspend(last_cmd);
+ /* Immediately trigger the command unit resume. */
+ outb(CUResume, ioaddr + SCBCmd);
+ spin_unlock_irqrestore(&sp->lock, flags);
+ sp->last_cmd_time = jiffies;
+ } else if (new_rx_mode == 0) {
+ struct dev_mc_list *mclist;
+ u16 *setup_params, *eaddrs;
+ struct descriptor *mc_setup_frm = sp->mc_setup_frm;
+ int i;
+
+ if (sp->mc_setup_frm_len < 10 + dev->mc_count*6
+ || sp->mc_setup_frm == NULL) {
+ /* Allocate a full setup frame, 10bytes + <max addrs>. */
+ if (sp->mc_setup_frm)
+ kfree(sp->mc_setup_frm);
+ sp->mc_setup_busy = 0;
+ sp->mc_setup_frm_len = 10 + sp->multicast_filter_limit*6;
+ sp->mc_setup_frm = kmalloc(sp->mc_setup_frm_len, GFP_ATOMIC);
+ if (sp->mc_setup_frm == NULL) {
+ printk(KERN_ERR "%s: Failed to allocate a setup frame.\n",
+ dev->name);
+ sp->rx_mode = RxInvalidMode; /* We failed, try again. */
+ return;
+ }
+ }
+ /* If we are busy, someone might be quickly adding to the MC list.
+ Try again later when the list updates stop. */
+ if (sp->mc_setup_busy) {
+ sp->rx_mode = RxInvalidMode;
+ return;
+ }
+ mc_setup_frm = sp->mc_setup_frm;
+ /* Fill the setup frame. */
+ if (sp->msg_level & NETIF_MSG_RXFILTER)
+ printk(KERN_DEBUG "%s: Constructing a setup frame at %p, "
+ "%d bytes.\n",
+ dev->name, sp->mc_setup_frm, sp->mc_setup_frm_len);
+ mc_setup_frm->cmd_status =
+ cpu_to_le32(CmdSuspend | CmdIntr | CmdMulticastList);
+ /* Link set below. */
+ setup_params = (u16 *)&mc_setup_frm->params;
+ *setup_params++ = cpu_to_le16(dev->mc_count*6);
+ /* Fill in the multicast addresses. */
+ for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ eaddrs = (u16 *)mclist->dmi_addr;
+ *setup_params++ = *eaddrs++;
+ *setup_params++ = *eaddrs++;
+ *setup_params++ = *eaddrs++;
+ }
+
+ /* Disable interrupts while playing with the Tx Cmd list. */
+ spin_lock_irqsave(&sp->lock, flags);
+ entry = sp->cur_tx % TX_RING_SIZE;
+ last_cmd = sp->last_cmd;
+ sp->last_cmd = mc_setup_frm;
+ sp->mc_setup_busy++;
+
+ /* Change the command to a NoOp, pointing to the CmdMulti command. */
+ sp->tx_skbuff[entry] = 0;
+ sp->tx_ring[entry].status = cpu_to_le32(CmdNOp);
+ sp->cur_tx++;
+ sp->tx_ring[entry].link = virt_to_le32desc(mc_setup_frm);
+ /* We may nominally release the lock here. */
+
+ /* Set the link in the setup frame. */
+ mc_setup_frm->link =
+ virt_to_le32desc(&(sp->tx_ring[(entry+1) % TX_RING_SIZE]));
+
+ wait_for_cmd_done(dev);
+ clear_suspend(last_cmd);
+ /* Immediately trigger the command unit resume. */
+ outb(CUResume, ioaddr + SCBCmd);
+ spin_unlock_irqrestore(&sp->lock, flags);
+ sp->last_cmd_time = jiffies;
+ if (sp->msg_level & NETIF_MSG_RXFILTER)
+ printk(KERN_DEBUG " CmdMCSetup frame length %d in entry %d.\n",
+ dev->mc_count, entry);
+ }
+
+ sp->rx_mode = new_rx_mode;
+}
+
+static int speedo_pwr_event(void *dev_instance, int event)
+{
+ struct net_device *dev = dev_instance;
+ struct speedo_private *np = (struct speedo_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
+ switch(event) {
+ case DRV_ATTACH:
+ MOD_INC_USE_COUNT;
+ break;
+ case DRV_SUSPEND:
+ outl(PortPartialReset, ioaddr + SCBPort);
+ break;
+ case DRV_RESUME:
+ speedo_resume(dev);
+ np->rx_mode = RxInvalidMode;
+ np->flow_ctrl = np->partner = 0;
+ set_rx_mode(dev);
+ break;
+ case DRV_DETACH: {
+ struct net_device **devp, **next;
+ if (dev->flags & IFF_UP) {
+ dev_close(dev);
+ dev->flags &= ~(IFF_UP|IFF_RUNNING);
+ }
+ unregister_netdev(dev);
+ release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);
+#ifndef USE_IO_OPS
+ iounmap((char *)dev->base_addr);
+#endif
+ for (devp = &root_speedo_dev; *devp; devp = next) {
+ next = &((struct speedo_private *)(*devp)->priv)->next_module;
+ if (*devp == dev) {
+ *devp = *next;
+ break;
+ }
+ }
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(dev);
+ MOD_DEC_USE_COUNT;
+ break;
+ }
+ case DRV_PWR_DOWN:
+ case DRV_PWR_UP:
+ acpi_set_pwr_state(np->pci_dev, event==DRV_PWR_DOWN ? ACPI_D3:ACPI_D0);
+ break;
+ case DRV_PWR_WakeOn:
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+
+#if defined(MODULE) || (LINUX_VERSION_CODE >= 0x020400)
+
+int init_module(void)
+{
+ int cards_found;
+
+ /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ cards_found = pci_drv_register(&eepro100_drv_id, NULL);
+ if (cards_found < 0)
+ printk(KERN_INFO "eepro100: No cards found, driver not installed.\n");
+ return cards_found;
+}
+
+void cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+ pci_drv_unregister(&eepro100_drv_id);
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_speedo_dev) {
+ struct speedo_private *sp = (void *)root_speedo_dev->priv;
+ unregister_netdev(root_speedo_dev);
+#ifdef USE_IO_OPS
+ release_region(root_speedo_dev->base_addr,
+ pci_id_tbl[sp->chip_id].io_size);
+#else
+ iounmap((char *)root_speedo_dev->base_addr);
+#endif
+ acpi_set_pwr_state(sp->pci_dev, sp->acpi_pwr);
+ next_dev = sp->next_module;
+ if (sp->priv_addr)
+ kfree(sp->priv_addr);
+ kfree(root_speedo_dev);
+ root_speedo_dev = next_dev;
+ }
+}
+
+#if (LINUX_VERSION_CODE >= 0x020400) && 0
+module_init(init_module);
+module_exit(cleanup_module);
+#endif
+
+#else /* not MODULE */
+
+int eepro100_probe(struct net_device *dev)
+{
+ int cards_found = pci_drv_register(&eepro100_drv_id, dev);
+
+ /* Only emit the version if the driver is being used. */
+ if (cards_found >= 0)
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+
+ return cards_found;
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "make KERNVER=`uname -r` eepro100.o"
+ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c eepro100.c"
+ * simple-compile-command: "gcc -DMODULE -O6 -c eepro100.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/eexpress.c b/linux/src/drivers/net/eexpress.c
new file mode 100644
index 0000000..9c816ee
--- /dev/null
+++ b/linux/src/drivers/net/eexpress.c
@@ -0,0 +1,1285 @@
+/* $Id: eexpress.c,v 1.1 1999/04/26 05:52:09 tb Exp $
+ *
+ * Intel EtherExpress device driver for Linux
+ *
+ * Original version written 1993 by Donald Becker
+ * Modularized by Pauline Middelink <middelin@polyware.iaf.nl>
+ * Changed to support io= irq= by Alan Cox <Alan.Cox@linux.org>
+ * Reworked 1995 by John Sullivan <js10039@cam.ac.uk>
+ * More fixes by Philip Blundell <pjb27@cam.ac.uk>
+ * Added the Compaq LTE Alan Cox <alan@redhat.com>
+ *
+ * Note - this driver is experimental still - it has problems on faster
+ * machines. Someone needs to sit down and go through it line by line with
+ * a databook...
+ */
+
+/*
+ * The original EtherExpress driver was just about usable, but
+ * suffered from a long startup delay, a hard limit of 16k memory
+ * usage on the card (EtherExpress 16s have either 32k or 64k),
+ * and random locks under load. The last was particularly annoying
+ * and made running eXceed/W preferable to Linux/XFree. After hacking
+ * through the driver for a couple of days, I had fixed most of the
+ * card handling errors, at the expense of turning the code into
+ * a complete jungle, but still hadn't tracked down the lock-ups.
+ * I had hoped these would be an IP bug, but failed to reproduce them
+ * under other drivers, so decided to start from scratch and rewrite
+ * the driver cleanly. And here it is.
+ *
+ * It's still not quite there, but self-corrects a lot more problems.
+ * the 'CU wedged, resetting...' message shouldn't happen at all, but
+ * at least we recover. It still locks occasionally, any ideas welcome.
+ *
+ * The original startup delay experienced by some people was due to the
+ * first ARP request for the address of the default router getting lost.
+ * (mostly the reply we were getting back was arriving before our
+ * hardware address was set up, or before the configuration sequence
+ * had told the card NOT to strip of the frame header). If you a long
+ * startup delay, you may have lost this ARP request/reply, although
+ * the original cause has been fixed. However, it is more likely that
+ * you've just locked under this version.
+ *
+ * The main changes are in the 586 initialization procedure (which was
+ * just broken before - the EExp is a strange beasty and needs careful
+ * handling) the receive buffer handling (we now use a non-terminating
+ * circular list of buffers, which stops the card giving us out-of-
+ * resources errors), and the transmit code. The driver is also more
+ * structured, and I have tried to keep the kernel interface separate
+ * from the hardware interface (although some routines naturally want
+ * to do both).
+ *
+ * John Sullivan
+ *
+ * 18/5/95:
+ *
+ * The lock-ups seem to happen when you access card memory after a 586
+ * reset. This happens only 1 in 12 resets, on a random basis, and
+ * completely locks the machine. As far as I can see there is no
+ * workaround possible - the only thing to be done is make sure we
+ * never reset the card *after* booting the kernel - once at probe time
+ * must be sufficient, and we'll just have to put up with that failing
+ * occasionally (or buy a new NIC). By the way, this looks like a
+ * definite card bug, since Intel's own driver for DOS does exactly the
+ * same.
+ *
+ * This bug makes switching in and out of promiscuous mode a risky
+ * business, since we must do a 586 reset each time.
+ */
+
+/*
+ * Sources:
+ *
+ * The original eexpress.c by Donald Becker
+ * Sources: the Crynwr EtherExpress driver source.
+ * the Intel Microcommunications Databook Vol.1 1990
+ *
+ * wavelan.c and i82586.h
+ * This was invaluable for the complete '586 configuration details
+ * and command format.
+ *
+ * The Crynwr sources (again)
+ * Not as useful as the Wavelan driver, but then I had eexpress.c to
+ * go off.
+ *
+ * The Intel EtherExpress 16 ethernet card
+ * Provided the only reason I want to see a working etherexpress driver.
+ * A lot of fixes came from just observing how the card (mis)behaves when
+ * you prod it.
+ *
+ */
+
+static char version[] =
+"eexpress.c: v0.10 04-May-95 John Sullivan <js10039@cam.ac.uk>\n"
+" v0.14 19-May-96 Philip Blundell <phil@tazenda.demon.co.uk>\n"
+" v0.15 04-Aug-98 Alan Cox <alan@redhat.com>\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/malloc.h>
+
+/*
+ * Not actually used yet - may be implemented when the driver has
+ * been debugged!
+ *
+ * Debug Level Driver Status
+ * 0 Final release
+ * 1 Beta test
+ * 2
+ * 3
+ * 4 Report timeouts & 586 errors (normal debug level)
+ * 5 Report all major events
+ * 6 Dump sent/received packet contents
+ * 7 Report function entry/exit
+ */
+
+#ifndef NET_DEBUG
+#define NET_DEBUG 4
+#endif
+static unsigned int net_debug = NET_DEBUG;
+
+#undef F_DEB
+
+#include "eth82586.h"
+
+#define PRIV(x) ((struct net_local *)(x)->priv)
+#define EEXP_IO_EXTENT 16
+
+/*
+ * Private data declarations
+ */
+
+struct net_local
+{
+ struct enet_statistics stats;
+ unsigned long init_time; /* jiffies when eexp_hw_init586 called */
+ unsigned short rx_first; /* first rx buf, same as RX_BUF_START */
+ unsigned short rx_last; /* last rx buf */
+ unsigned short tx_head; /* next free tx buf */
+ unsigned short tx_reap; /* first in-use tx buf */
+ unsigned short tx_tail; /* previous tx buf to tx_head */
+ unsigned short tx_link; /* last known-executing tx buf */
+ unsigned short last_tx_restart; /* set to tx_link when we restart the CU */
+ unsigned char started;
+ unsigned char promisc;
+ unsigned short rx_buf_start;
+ unsigned short rx_buf_end;
+ unsigned short num_tx_bufs;
+ unsigned short num_rx_bufs;
+};
+
+unsigned short start_code[] = {
+ 0x0000, /* SCP: set bus to 16 bits */
+ 0x0000,0x0000, /* junk */
+ 0x0000,0x0000, /* address of ISCP (lo,hi) */
+
+ 0x0001, /* ISCP: busy - cleared after reset */
+ 0x0008,0x0000,0x0000, /* offset,address (lo,hi) of SCB */
+
+ 0x0000,0x0000, /* SCB: status, commands */
+ 0x0000,0x0000, /* links to first command block, first receive descriptor */
+ 0x0000,0x0000, /* CRC error, alignment error counts */
+ 0x0000,0x0000, /* out of resources, overrun error counts */
+
+ 0x0000,0x0000, /* pad */
+ 0x0000,0x0000,
+
+ 0x0000,Cmd_Config, /* startup configure sequence, at 0x0020 */
+ 0x0032, /* link to next command */
+ 0x080c, /* 12 bytes follow : fifo threshold=8 */
+ 0x2e40, /* don't rx bad frames : SRDY/ARDY => ext. sync. : preamble len=8
+ * take addresses from data buffers : 6 bytes/address */
+ 0x6000, /* default backoff method & priority : interframe spacing = 0x60 */
+ 0xf200, /* slot time=0x200 : max collision retry = 0xf */
+ 0x0000, /* no HDLC : normal CRC : enable broadcast : disable promiscuous/multicast modes */
+ 0x003c, /* minimum frame length = 60 octets) */
+
+ 0x0000,Cmd_INT|Cmd_SetAddr,
+ 0x003e, /* link to next command */
+ 0x0000,0x0000,0x0000, /* hardware address placed here, 0x0038 */
+ 0x0000,Cmd_END|Cmd_Nop, /* end of configure sequence */
+ 0x003e,
+
+ 0x0000
+
+};
+
+#define CONF_LINK 0x0020
+#define CONF_HW_ADDR 0x0038
+
+/* maps irq number to EtherExpress magic value */
+static char irqrmap[] = { 0,0,1,2,3,4,0,0,0,1,5,6,0,0,0,0 };
+
+/*
+ * Prototypes for Linux interface
+ */
+
+extern int express_probe(struct device *dev);
+static int eexp_open (struct device *dev);
+static int eexp_close(struct device *dev);
+static struct enet_statistics *eexp_stats(struct device *dev);
+static int eexp_xmit (struct sk_buff *buf, struct device *dev);
+
+static void eexp_irq (int irq, void *dev_addr, struct pt_regs *regs);
+static void eexp_set_multicast(struct device *dev);
+
+/*
+ * Prototypes for hardware access functions
+ */
+
+static void eexp_hw_rx (struct device *dev);
+static void eexp_hw_tx (struct device *dev, unsigned short *buf, unsigned short len);
+static int eexp_hw_probe (struct device *dev,unsigned short ioaddr);
+static unsigned short eexp_hw_readeeprom(unsigned short ioaddr, unsigned char location);
+
+static unsigned short eexp_hw_lasttxstat(struct device *dev);
+static void eexp_hw_txrestart (struct device *dev);
+
+static void eexp_hw_txinit (struct device *dev);
+static void eexp_hw_rxinit (struct device *dev);
+
+static void eexp_hw_init586 (struct device *dev);
+static void eexp_hw_ASICrst (struct device *dev);
+
+/*
+ * Linux interface
+ */
+
+/*
+ * checks for presence of EtherExpress card
+ */
+
+int express_probe(struct device *dev)
+{
+ unsigned short *port,ports[] = { 0x0300,0x0270,0x0320,0x0340,0 };
+ unsigned short ioaddr = dev->base_addr;
+
+ if (ioaddr&0xfe00)
+ return eexp_hw_probe(dev,ioaddr);
+ else if (ioaddr)
+ return ENXIO;
+
+ for ( port=&ports[0] ; *port ; port++ )
+ {
+ unsigned short sum = 0;
+ int i;
+ for ( i=0 ; i<4 ; i++ )
+ {
+ unsigned short t;
+ t = inb(*port + ID_PORT);
+ sum |= (t>>4) << ((t & 0x03)<<2);
+ }
+ if (sum==0xbaba && !eexp_hw_probe(dev,*port))
+ return 0;
+ }
+ return ENODEV;
+}
+
+/*
+ * open and initialize the adapter, ready for use
+ */
+
+static int eexp_open(struct device *dev)
+{
+ int irq = dev->irq;
+ unsigned short ioaddr = dev->base_addr;
+
+#if NET_DEBUG > 6
+ printk(KERN_DEBUG "%s: eexp_open()\n", dev->name);
+#endif
+
+ if (!irq || !irqrmap[irq])
+ return -ENXIO;
+
+ if (irq2dev_map[irq] ||
+ /* more consistent, surely? */
+ ((irq2dev_map[irq]=dev),0) ||
+ request_irq(irq,&eexp_irq,0,"eexpress",NULL))
+ return -EAGAIN;
+
+ request_region(ioaddr, EEXP_IO_EXTENT, "eexpress");
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ eexp_hw_init586(dev);
+ dev->start = 1;
+ MOD_INC_USE_COUNT;
+#if NET_DEBUG > 6
+ printk(KERN_DEBUG "%s: leaving eexp_open()\n", dev->name);
+#endif
+ return 0;
+}
+
+/*
+ * close and disable the interface, leaving
+ * the 586 in reset
+ */
+static int eexp_close(struct device *dev)
+{
+ unsigned short ioaddr = dev->base_addr;
+ int irq = dev->irq;
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ outb(SIRQ_dis|irqrmap[irq],ioaddr+SET_IRQ);
+ PRIV(dev)->started = 0;
+ outw(SCB_CUsuspend|SCB_RUsuspend,ioaddr+SCB_CMD);
+ outb(0,ioaddr+SIGNAL_CA);
+ free_irq(irq,NULL);
+ irq2dev_map[irq] = NULL;
+ outb(i586_RST,ioaddr+EEPROM_Ctrl);
+ release_region(ioaddr,16);
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+/*
+ * Return interface stats
+ */
+
+static struct enet_statistics *eexp_stats(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+
+ /*
+ * Hmmm, this looks a little too easy... The card maintains
+ * some stats in the SCB, and I'm not convinced we're
+ * incrementing the most sensible statistics when the card
+ * returns an error (esp. slow DMA, out-of-resources)
+ */
+ return &lp->stats;
+}
+
+/*
+ * Called to transmit a packet, or to allow us to right ourselves
+ * if the kernel thinks we've died.
+ */
+
+static int eexp_xmit(struct sk_buff *buf, struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ unsigned short ioaddr = dev->base_addr;
+
+#if NET_DEBUG > 6
+ printk(KERN_DEBUG "%s: eexp_xmit()\n", dev->name);
+#endif
+
+ outb(SIRQ_dis|irqrmap[dev->irq],ioaddr+SET_IRQ);
+ if (dev->tbusy)
+ {
+ /* This will happen, but hopefully not as often as when
+ * tbusy==0. If it happens too much, we probably ought
+ * to think about unwedging ourselves...
+ */
+ if (test_bit(0,(void *)&PRIV(dev)->started))
+ {
+ if ((jiffies - dev->trans_start)>5)
+ {
+ if (lp->tx_link==lp->last_tx_restart)
+ {
+ unsigned short boguscount=200,rsst;
+ printk(KERN_WARNING "%s: Retransmit timed out, status %04x, resetting...\n",
+ dev->name,inw(ioaddr+SCB_STATUS));
+ eexp_hw_txinit(dev);
+ lp->last_tx_restart = 0;
+ outw(lp->tx_link,ioaddr+SCB_CBL);
+ outw(0,ioaddr+SCB_STATUS);
+ outw(SCB_CUstart,ioaddr+SCB_CMD);
+ outb(0,ioaddr+SIGNAL_CA);
+ while (!SCB_complete(rsst=inw(ioaddr+SCB_STATUS)))
+ {
+ if (!--boguscount)
+ {
+ boguscount=200;
+ printk(KERN_WARNING "%s: Reset timed out status %04x, retrying...\n",
+ dev->name,rsst);
+ outw(lp->tx_link,ioaddr+SCB_CBL);
+ outw(0,ioaddr+SCB_STATUS);
+ outw(SCB_CUstart,ioaddr+SCB_CMD);
+ outb(0,ioaddr+SIGNAL_CA);
+ }
+ }
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+ else
+ {
+ unsigned short status = inw(ioaddr+SCB_STATUS);
+ if (SCB_CUdead(status))
+ {
+ unsigned short txstatus = eexp_hw_lasttxstat(dev);
+ printk(KERN_WARNING "%s: Transmit timed out, CU not active status %04x %04x, restarting...\n",
+ dev->name, status, txstatus);
+ eexp_hw_txrestart(dev);
+ }
+ else
+ {
+ unsigned short txstatus = eexp_hw_lasttxstat(dev);
+ if (dev->tbusy && !txstatus)
+ {
+ printk(KERN_WARNING "%s: CU wedged, status %04x %04x, resetting...\n",
+ dev->name,status,txstatus);
+ eexp_hw_init586(dev);
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+ }
+ }
+ }
+ }
+ else
+ {
+ if ((jiffies-lp->init_time)>10)
+ {
+ unsigned short status = inw(ioaddr+SCB_STATUS);
+ printk(KERN_WARNING "%s: i82586 startup timed out, status %04x, resetting...\n",
+ dev->name, status);
+ eexp_hw_init586(dev);
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+ }
+ }
+
+ if (buf==NULL)
+ {
+ unsigned short status = inw(ioaddr+SCB_STATUS);
+ unsigned short txstatus = eexp_hw_lasttxstat(dev);
+ if (SCB_CUdead(status))
+ {
+ printk(KERN_WARNING "%s: CU has died! status %04x %04x, attempting to restart...\n",
+ dev->name, status, txstatus);
+ lp->stats.tx_errors++;
+ eexp_hw_txrestart(dev);
+ }
+ dev_tint(dev);
+ outb(SIRQ_en|irqrmap[dev->irq],ioaddr+SET_IRQ);
+ dev_kfree_skb(buf, FREE_WRITE);
+ return 0;
+ }
+
+ if (set_bit(0,(void *)&dev->tbusy))
+ {
+ lp->stats.tx_dropped++;
+ }
+ else
+ {
+ unsigned short length = (ETH_ZLEN < buf->len) ? buf->len : ETH_ZLEN;
+ unsigned short *data = (unsigned short *)buf->data;
+
+ outb(SIRQ_dis|irqrmap[dev->irq],ioaddr+SET_IRQ);
+ eexp_hw_tx(dev,data,length);
+ outb(SIRQ_en|irqrmap[dev->irq],ioaddr+SET_IRQ);
+ }
+ dev_kfree_skb(buf, FREE_WRITE);
+ outb(SIRQ_en|irqrmap[dev->irq],ioaddr+SET_IRQ);
+ return 0;
+}
+
+/*
+ * Handle an EtherExpress interrupt
+ * If we've finished initializing, start the RU and CU up.
+ * If we've already started, reap tx buffers, handle any received packets,
+ * check to make sure we've not become wedged.
+ */
+
+static void eexp_irq(int irq, void *dev_info, struct pt_regs *regs)
+{
+ struct device *dev = irq2dev_map[irq];
+ struct net_local *lp;
+ unsigned short ioaddr,status,ack_cmd;
+ unsigned short old_rp,old_wp;
+
+ if (dev==NULL)
+ {
+ printk(KERN_WARNING "net_interrupt(): irq %d for unknown device caught by EExpress\n",irq);
+ return;
+ }
+
+#if NET_DEBUG > 6
+ printk(KERN_DEBUG "%s: interrupt\n", dev->name);
+#endif
+
+ dev->interrupt = 1; /* should this be reset on exit? */
+
+ lp = (struct net_local *)dev->priv;
+ ioaddr = dev->base_addr;
+
+ outb(SIRQ_dis|irqrmap[irq],ioaddr+SET_IRQ);
+ old_rp = inw(ioaddr+READ_PTR);
+ old_wp = inw(ioaddr+WRITE_PTR);
+ status = inw(ioaddr+SCB_STATUS);
+ ack_cmd = SCB_ack(status);
+
+ if (PRIV(dev)->started==0 && SCB_complete(status))
+ {
+#if NET_DEBUG > 4
+ printk(KERN_DEBUG "%s: SCBcomplete event received\n", dev->name);
+#endif
+ while (SCB_CUstat(status)==2)
+ status = inw_p(ioaddr+SCB_STATUS);
+#if NET_DEBUG > 4
+ printk(KERN_DEBUG "%s: CU went non-active (status = %08x)\n", dev->name, status);
+#endif
+ PRIV(dev)->started=1;
+ outw_p(lp->tx_link,ioaddr+SCB_CBL);
+ outw_p(PRIV(dev)->rx_buf_start,ioaddr+SCB_RFA);
+ ack_cmd |= SCB_CUstart | SCB_RUstart;
+ }
+ else if (PRIV(dev)->started)
+ {
+ unsigned short txstatus;
+ txstatus = eexp_hw_lasttxstat(dev);
+ }
+
+ if (SCB_rxdframe(status))
+ {
+ eexp_hw_rx(dev);
+ }
+
+ if ((PRIV(dev)->started&2)!=0 && SCB_RUstat(status)!=4)
+ {
+ printk(KERN_WARNING "%s: RU stopped status %04x, restarting...\n",
+ dev->name,status);
+ lp->stats.rx_errors++;
+ eexp_hw_rxinit(dev);
+ outw(PRIV(dev)->rx_buf_start,ioaddr+SCB_RFA);
+ ack_cmd |= SCB_RUstart;
+ }
+ else if (PRIV(dev)->started==1 && SCB_RUstat(status)==4)
+ PRIV(dev)->started|=2;
+
+ outw(ack_cmd,ioaddr+SCB_CMD);
+ outb(0,ioaddr+SIGNAL_CA);
+ outw(old_rp,ioaddr+READ_PTR);
+ outw(old_wp,ioaddr+WRITE_PTR);
+ outb(SIRQ_en|irqrmap[irq],ioaddr+SET_IRQ);
+ dev->interrupt = 0;
+#if NET_DEBUG > 6
+ printk(KERN_DEBUG "%s: leaving eexp_irq()\n", dev->name);
+#endif
+ return;
+}
+
+/*
+ * Hardware access functions
+ */
+
+/*
+ * Check all the receive buffers, and hand any received packets
+ * to the upper levels. Basic sanity check on each frame
+ * descriptor
+ */
+
+static void eexp_hw_rx(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ unsigned short ioaddr = dev->base_addr;
+ unsigned short old_wp = inw(ioaddr+WRITE_PTR);
+ unsigned short old_rp = inw(ioaddr+READ_PTR);
+ unsigned short rx_block = lp->rx_first;
+ unsigned short boguscount = lp->num_rx_bufs;
+
+#if NET_DEBUG > 6
+ printk(KERN_DEBUG "%s: eexp_hw_rx()\n", dev->name);
+#endif
+
+ while (outw(rx_block,ioaddr+READ_PTR),boguscount--)
+ {
+ unsigned short status = inw(ioaddr);
+ unsigned short rfd_cmd = inw(ioaddr);
+ unsigned short rx_next = inw(ioaddr);
+ unsigned short pbuf = inw(ioaddr);
+ unsigned short pkt_len;
+
+ if (FD_Done(status))
+ {
+ outw(pbuf,ioaddr+READ_PTR);
+ pkt_len = inw(ioaddr);
+
+ if (rfd_cmd!=0x0000 || pbuf!=rx_block+0x16
+ || (pkt_len & 0xc000)!=0xc000)
+ {
+ printk(KERN_WARNING "%s: Rx frame at %04x corrupted, status %04x, cmd %04x, "
+ "next %04x, pbuf %04x, len %04x\n",dev->name,rx_block,
+ status,rfd_cmd,rx_next,pbuf,pkt_len);
+ boguscount++;
+ continue;
+ }
+ else if (!FD_OK(status))
+ {
+ lp->stats.rx_errors++;
+ if (FD_CRC(status))
+ lp->stats.rx_crc_errors++;
+ if (FD_Align(status))
+ lp->stats.rx_frame_errors++;
+ if (FD_Resrc(status))
+ lp->stats.rx_fifo_errors++;
+ if (FD_DMA(status))
+ lp->stats.rx_over_errors++;
+ if (FD_Short(status))
+ lp->stats.rx_length_errors++;
+ }
+ else
+ {
+ struct sk_buff *skb;
+ pkt_len &= 0x3fff;
+ skb = dev_alloc_skb(pkt_len+16);
+ if (skb == NULL)
+ {
+ printk(KERN_WARNING "%s: Memory squeeze, dropping packet\n",dev->name);
+ lp->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = dev;
+ skb_reserve(skb, 2);
+ outw(pbuf+10,ioaddr+READ_PTR);
+ insw(ioaddr,skb_put(skb,pkt_len),(pkt_len+1)>>1);
+ skb->protocol = eth_type_trans(skb,dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+ outw(rx_block,ioaddr+WRITE_PTR);
+ outw(0x0000,ioaddr);
+ outw(0x0000,ioaddr);
+ }
+ rx_block = rx_next;
+ }
+ outw(old_rp,ioaddr+READ_PTR);
+ outw(old_wp,ioaddr+WRITE_PTR);
+}
+
+/*
+ * Hand a packet to the card for transmission
+ * If we get here, we MUST have already checked
+ * to make sure there is room in the transmit
+ * buffer region
+ */
+
+static void eexp_hw_tx(struct device *dev, unsigned short *buf, unsigned short len)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ unsigned short ioaddr = dev->base_addr;
+ unsigned short old_wp = inw(ioaddr+WRITE_PTR);
+
+ outw(lp->tx_head,ioaddr+WRITE_PTR);
+ outw(0x0000,ioaddr);
+ outw(Cmd_INT|Cmd_Xmit,ioaddr);
+ outw(lp->tx_head+0x08,ioaddr);
+ outw(lp->tx_head+0x0e,ioaddr);
+ outw(0x0000,ioaddr);
+ outw(0x0000,ioaddr);
+ outw(lp->tx_head+0x08,ioaddr);
+ outw(0x8000|len,ioaddr);
+ outw(-1,ioaddr);
+ outw(lp->tx_head+0x16,ioaddr);
+ outw(0,ioaddr);
+ outsw(ioaddr,buf,(len+1)>>1);
+ outw(lp->tx_tail+0x0c,ioaddr+WRITE_PTR);
+ outw(lp->tx_head,ioaddr);
+ dev->trans_start = jiffies;
+ lp->tx_tail = lp->tx_head;
+ if (lp->tx_head==TX_BUF_START+((lp->num_tx_bufs-1)*TX_BUF_SIZE))
+ lp->tx_head = TX_BUF_START;
+ else
+ lp->tx_head += TX_BUF_SIZE;
+ if (lp->tx_head != lp->tx_reap)
+ dev->tbusy = 0;
+ outw(old_wp,ioaddr+WRITE_PTR);
+}
+
+/*
+ * Sanity check the suspected EtherExpress card
+ * Read hardware address, reset card, size memory and
+ * initialize buffer memory pointers. These should
+ * probably be held in dev->priv, in case someone has 2
+ * differently configured cards in their box (Arghhh!)
+ */
+
+static int eexp_hw_probe(struct device *dev, unsigned short ioaddr)
+{
+ unsigned short hw_addr[3];
+ int i;
+ unsigned char *chw_addr = (unsigned char *)hw_addr;
+
+ printk("%s: EtherExpress at %#x, ",dev->name,ioaddr);
+
+ hw_addr[0] = eexp_hw_readeeprom(ioaddr,2);
+ hw_addr[1] = eexp_hw_readeeprom(ioaddr,3);
+ hw_addr[2] = eexp_hw_readeeprom(ioaddr,4);
+
+ /* Standard Address or Compaq LTE Address */
+ if (!((hw_addr[2]==0x00aa && ((hw_addr[1] & 0xff00)==0x0000)) ||
+ (hw_addr[2]==0x0080 && ((hw_addr[1] & 0xff00)==0x5F00))))
+ {
+ printk("rejected: invalid address %04x%04x%04x\n",
+ hw_addr[2],hw_addr[1],hw_addr[0]);
+ return -ENODEV;
+ }
+
+ dev->base_addr = ioaddr;
+ for ( i=0 ; i<6 ; i++ )
+ dev->dev_addr[i] = chw_addr[5-i];
+
+ {
+ char irqmap[]={0, 9, 3, 4, 5, 10, 11, 0};
+ char *ifmap[]={"AUI", "BNC", "10baseT"};
+ enum iftype {AUI=0, BNC=1, TP=2};
+ unsigned short setupval = eexp_hw_readeeprom(ioaddr,0);
+
+ dev->irq = irqmap[setupval>>13];
+ dev->if_port = !(setupval & 0x1000) ? AUI :
+ eexp_hw_readeeprom(ioaddr,5) & 0x1 ? TP : BNC;
+
+ printk("IRQ %d, Interface %s, ",dev->irq,ifmap[dev->if_port]);
+
+ outb(SIRQ_dis|irqrmap[dev->irq],ioaddr+SET_IRQ);
+ outb(0,ioaddr+SET_IRQ);
+ }
+
+ dev->priv = kmalloc(sizeof(struct net_local), GFP_KERNEL);
+ if (!dev->priv)
+ return -ENOMEM;
+
+ memset(dev->priv, 0, sizeof(struct net_local));
+
+ eexp_hw_ASICrst(dev);
+
+ {
+ unsigned short i586mso = 0x023e;
+ unsigned short old_wp,old_rp,old_a0,old_a1;
+ unsigned short a0_0,a1_0,a0_1,a1_1;
+
+ old_wp = inw(ioaddr+WRITE_PTR);
+ old_rp = inw(ioaddr+READ_PTR);
+ outw(0x8000+i586mso,ioaddr+READ_PTR);
+ old_a1 = inw(ioaddr);
+ outw(i586mso,ioaddr+READ_PTR);
+ old_a0 = inw(ioaddr);
+ outw(i586mso,ioaddr+WRITE_PTR);
+ outw(0x55aa,ioaddr);
+ outw(i586mso,ioaddr+READ_PTR);
+ a0_0 = inw(ioaddr);
+ outw(0x8000+i586mso,ioaddr+WRITE_PTR);
+ outw(0x5a5a,ioaddr);
+ outw(0x8000+i586mso,ioaddr+READ_PTR);
+ a1_0 = inw(ioaddr);
+ outw(i586mso,ioaddr+READ_PTR);
+ a0_1 = inw(ioaddr);
+ outw(i586mso,ioaddr+WRITE_PTR);
+ outw(0x1234,ioaddr);
+ outw(0x8000+i586mso,ioaddr+READ_PTR);
+ a1_1 = inw(ioaddr);
+
+ if ((a0_0 != a0_1) || (a1_0 != a1_1) ||
+ (a1_0 != 0x5a5a) || (a0_0 != 0x55aa))
+ {
+ printk("32k\n");
+ PRIV(dev)->rx_buf_end = 0x7ff6;
+ PRIV(dev)->num_tx_bufs = 4;
+ }
+ else
+ {
+ printk("64k\n");
+ PRIV(dev)->num_tx_bufs = 8;
+ PRIV(dev)->rx_buf_start = TX_BUF_START + (PRIV(dev)->num_tx_bufs*TX_BUF_SIZE);
+ PRIV(dev)->rx_buf_end = 0xfff6;
+ }
+
+ outw(0x8000+i586mso,ioaddr+WRITE_PTR);
+ outw(old_a1,ioaddr);
+ outw(i586mso,ioaddr+WRITE_PTR);
+ outw(old_a0,ioaddr);
+ outw(old_wp,ioaddr+WRITE_PTR);
+ outw(old_rp,ioaddr+READ_PTR);
+ }
+
+ if (net_debug)
+ printk("%s", version);
+ dev->open = eexp_open;
+ dev->stop = eexp_close;
+ dev->hard_start_xmit = eexp_xmit;
+ dev->get_stats = eexp_stats;
+ dev->set_multicast_list = &eexp_set_multicast;
+ ether_setup(dev);
+ return 0;
+}
+
+/*
+ * Read a word from eeprom location (0-63?)
+ */
+static unsigned short eexp_hw_readeeprom(unsigned short ioaddr, unsigned char location)
+{
+ unsigned short cmd = 0x180|(location&0x7f);
+ unsigned short rval = 0,wval = EC_CS|i586_RST;
+ int i;
+
+ outb(EC_CS|i586_RST,ioaddr+EEPROM_Ctrl);
+ for ( i=0x100 ; i ; i>>=1 )
+ {
+ if (cmd&i)
+ wval |= EC_Wr;
+ else
+ wval &= ~EC_Wr;
+
+ outb(wval,ioaddr+EEPROM_Ctrl);
+ outb(wval|EC_Clk,ioaddr+EEPROM_Ctrl);
+ eeprom_delay();
+ outb(wval,ioaddr+EEPROM_Ctrl);
+ eeprom_delay();
+ }
+ wval &= ~EC_Wr;
+ outb(wval,ioaddr+EEPROM_Ctrl);
+ for ( i=0x8000 ; i ; i>>=1 )
+ {
+ outb(wval|EC_Clk,ioaddr+EEPROM_Ctrl);
+ eeprom_delay();
+ if (inb(ioaddr+EEPROM_Ctrl)&EC_Rd)
+ rval |= i;
+ outb(wval,ioaddr+EEPROM_Ctrl);
+ eeprom_delay();
+ }
+ wval &= ~EC_CS;
+ outb(wval|EC_Clk,ioaddr+EEPROM_Ctrl);
+ eeprom_delay();
+ outb(wval,ioaddr+EEPROM_Ctrl);
+ eeprom_delay();
+ return rval;
+}
+
+/*
+ * Reap tx buffers and return last transmit status.
+ * if ==0 then either:
+ * a) we're not transmitting anything, so why are we here?
+ * b) we've died.
+ * otherwise, Stat_Busy(return) means we've still got some packets
+ * to transmit, Stat_Done(return) means our buffers should be empty
+ * again
+ */
+
+static unsigned short eexp_hw_lasttxstat(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ unsigned short ioaddr = dev->base_addr;
+ unsigned short old_rp = inw(ioaddr+READ_PTR);
+ unsigned short old_wp = inw(ioaddr+WRITE_PTR);
+ unsigned short tx_block = lp->tx_reap;
+ unsigned short status;
+
+ if (!test_bit(0,(void *)&dev->tbusy) && lp->tx_head==lp->tx_reap)
+ return 0x0000;
+
+ do
+ {
+ outw(tx_block,ioaddr+READ_PTR);
+ status = inw(ioaddr);
+ if (!Stat_Done(status))
+ {
+ lp->tx_link = tx_block;
+ outw(old_rp,ioaddr+READ_PTR);
+ outw(old_wp,ioaddr+WRITE_PTR);
+ return status;
+ }
+ else
+ {
+ lp->last_tx_restart = 0;
+ lp->stats.collisions += Stat_NoColl(status);
+ if (!Stat_OK(status))
+ {
+ if (Stat_Abort(status))
+ lp->stats.tx_aborted_errors++;
+ if (Stat_TNoCar(status) || Stat_TNoCTS(status))
+ lp->stats.tx_carrier_errors++;
+ if (Stat_TNoDMA(status))
+ lp->stats.tx_fifo_errors++;
+ }
+ else
+ lp->stats.tx_packets++;
+ }
+ if (tx_block == TX_BUF_START+((lp->num_tx_bufs-1)*TX_BUF_SIZE))
+ lp->tx_reap = tx_block = TX_BUF_START;
+ else
+ lp->tx_reap = tx_block += TX_BUF_SIZE;
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+ while (lp->tx_reap != lp->tx_head);
+
+ lp->tx_link = lp->tx_tail + 0x08;
+ outw(old_rp,ioaddr+READ_PTR);
+ outw(old_wp,ioaddr+WRITE_PTR);
+
+ return status;
+}
+
+/*
+ * This should never happen. It is called when some higher
+ * routine detects the CU has stopped, to try to restart
+ * it from the last packet we knew we were working on,
+ * or the idle loop if we had finished for the time.
+ */
+
+static void eexp_hw_txrestart(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ unsigned short ioaddr = dev->base_addr;
+
+ lp->last_tx_restart = lp->tx_link;
+ outw(lp->tx_link,ioaddr+SCB_CBL);
+ outw(SCB_CUstart,ioaddr+SCB_CMD);
+ outw(0,ioaddr+SCB_STATUS);
+ outb(0,ioaddr+SIGNAL_CA);
+
+ {
+ unsigned short boguscount=50,failcount=5;
+ while (!inw(ioaddr+SCB_STATUS))
+ {
+ if (!--boguscount)
+ {
+ if (--failcount)
+ {
+ printk(KERN_WARNING "%s: CU start timed out, status %04x, cmd %04x\n",
+ dev->name, inw(ioaddr+SCB_STATUS), inw(ioaddr+SCB_CMD));
+ outw(lp->tx_link,ioaddr+SCB_CBL);
+ outw(0,ioaddr+SCB_STATUS);
+ outw(SCB_CUstart,ioaddr+SCB_CMD);
+ outb(0,ioaddr+SIGNAL_CA);
+ boguscount = 100;
+ }
+ else
+ {
+ printk(KERN_WARNING "%s: Failed to restart CU, resetting board...\n",dev->name);
+ eexp_hw_init586(dev);
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ return;
+ }
+ }
+ }
+ }
+}
+
+/*
+ * Writes down the list of transmit buffers into card
+ * memory. Initial separate, repeated transmits link
+ * them into a circular list, such that the CU can
+ * be constantly active, and unlink them as we reap
+ * transmitted packet buffers, so the CU doesn't loop
+ * and endlessly transmit packets. (Try hacking the driver
+ * to send continuous broadcast messages, say ARP requests
+ * on a subnet with Windows boxes running on Novell and
+ * LAN Workplace with EMM386. Amusing to watch them all die
+ * horribly leaving the Linux boxes up!)
+ */
+
+static void eexp_hw_txinit(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ unsigned short ioaddr = dev->base_addr;
+ unsigned short old_wp = inw(ioaddr+WRITE_PTR);
+ unsigned short tx_block = TX_BUF_START;
+ unsigned short curtbuf;
+
+ for ( curtbuf=0 ; curtbuf<lp->num_tx_bufs ; curtbuf++ )
+ {
+ outw(tx_block,ioaddr+WRITE_PTR);
+ outw(0x0000,ioaddr);
+ outw(Cmd_INT|Cmd_Xmit,ioaddr);
+ outw(tx_block+0x08,ioaddr);
+ outw(tx_block+0x0e,ioaddr);
+ outw(0x0000,ioaddr);
+ outw(0x0000,ioaddr);
+ outw(tx_block+0x08,ioaddr);
+ outw(0x8000,ioaddr);
+ outw(-1,ioaddr);
+ outw(tx_block+0x16,ioaddr);
+ outw(0x0000,ioaddr);
+ tx_block += TX_BUF_SIZE;
+ }
+ lp->tx_head = TX_BUF_START;
+ lp->tx_reap = TX_BUF_START;
+ lp->tx_tail = tx_block - TX_BUF_SIZE;
+ lp->tx_link = lp->tx_tail + 0x08;
+ lp->rx_buf_start = tx_block;
+ outw(old_wp,ioaddr+WRITE_PTR);
+}
+
+/* is this a standard test pattern, or dbecker randomness? */
+
+unsigned short rx_words[] =
+{
+ 0xfeed,0xf00d,0xf001,0x0505,0x2424,0x6565,0xdeaf
+};
+
+/*
+ * Write the circular list of receive buffer descriptors to
+ * card memory. Note, we no longer mark the end of the list,
+ * so if all the buffers fill up, the 82586 will loop until
+ * we free one. This may sound dodgy, but it works, and
+ * it makes the error detection in the interrupt handler
+ * a lot simpler.
+ */
+
+static void eexp_hw_rxinit(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ unsigned short ioaddr = dev->base_addr;
+ unsigned short old_wp = inw(ioaddr+WRITE_PTR);
+ unsigned short rx_block = lp->rx_buf_start;
+
+ lp->num_rx_bufs = 0;
+ lp->rx_first = rx_block;
+ do
+ {
+ lp->num_rx_bufs++;
+ outw(rx_block,ioaddr+WRITE_PTR);
+ outw(0x0000,ioaddr);
+ outw(0x0000,ioaddr);
+ outw(rx_block+RX_BUF_SIZE,ioaddr);
+ outw(rx_block+0x16,ioaddr);
+ outsw(ioaddr, rx_words, sizeof(rx_words)>>1);
+ outw(0x8000,ioaddr);
+ outw(-1,ioaddr);
+ outw(rx_block+0x20,ioaddr);
+ outw(0x0000,ioaddr);
+ outw(0x8000|(RX_BUF_SIZE-0x20),ioaddr);
+ lp->rx_last = rx_block;
+ rx_block += RX_BUF_SIZE;
+ } while (rx_block <= lp->rx_buf_end-RX_BUF_SIZE);
+
+ outw(lp->rx_last+4,ioaddr+WRITE_PTR);
+ outw(lp->rx_first,ioaddr);
+
+ outw(old_wp,ioaddr+WRITE_PTR);
+}
+
+/*
+ * Reset the 586, fill memory (including calls to
+ * eexp_hw_[(rx)(tx)]init()) unreset, and start
+ * the configuration sequence. We don't wait for this
+ * to finish, but allow the interrupt handler to start
+ * the CU and RU for us. We can't start the receive/
+ * transmission system up before we know that the
+ * hardware is configured correctly
+ */
+static void eexp_hw_init586(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ unsigned short ioaddr = dev->base_addr;
+
+#if NET_DEBUG > 6
+ printk("%s: eexp_hw_init586()\n", dev->name);
+#endif
+
+ lp->started = 0;
+ set_loopback;
+
+ outb(SIRQ_dis|irqrmap[dev->irq],ioaddr+SET_IRQ);
+ outb_p(i586_RST,ioaddr+EEPROM_Ctrl);
+ udelay(2000); /* delay 20ms */
+ {
+ unsigned long ofs;
+ for (ofs = 0; ofs < lp->rx_buf_end; ofs += 32) {
+ unsigned long i;
+ outw_p(ofs, ioaddr+SM_PTR);
+ for (i = 0; i < 16; i++) {
+ outw_p(0, ioaddr+SM_ADDR(i<<1));
+ }
+ }
+ }
+
+ outw_p(lp->rx_buf_end,ioaddr+WRITE_PTR);
+ start_code[28] = (dev->flags & IFF_PROMISC)?(start_code[28] | 1):(start_code[28] & ~1);
+ lp->promisc = dev->flags & IFF_PROMISC;
+ /* We may die here */
+ outsw(ioaddr, start_code, sizeof(start_code)>>1);
+ outw(CONF_HW_ADDR,ioaddr+WRITE_PTR);
+ outsw(ioaddr,dev->dev_addr,3);
+ eexp_hw_txinit(dev);
+ eexp_hw_rxinit(dev);
+ outw(0,ioaddr+WRITE_PTR);
+ outw(1,ioaddr);
+ outb(0,ioaddr+EEPROM_Ctrl);
+ outw(0,ioaddr+SCB_CMD);
+ outb(0,ioaddr+SIGNAL_CA);
+ {
+ unsigned short rboguscount=50,rfailcount=5;
+ while (outw(0,ioaddr+READ_PTR),inw(ioaddr))
+ {
+ if (!--rboguscount)
+ {
+ printk(KERN_WARNING "%s: i82586 reset timed out, kicking...\n",
+ dev->name);
+ outw(0,ioaddr+SCB_CMD);
+ outb(0,ioaddr+SIGNAL_CA);
+ rboguscount = 100;
+ if (!--rfailcount)
+ {
+ printk(KERN_WARNING "%s: i82586 not responding, giving up.\n",
+ dev->name);
+ return;
+ }
+ }
+ }
+ }
+
+ outw(CONF_LINK,ioaddr+SCB_CBL);
+ outw(0,ioaddr+SCB_STATUS);
+ outw(0xf000|SCB_CUstart,ioaddr+SCB_CMD);
+ outb(0,ioaddr+SIGNAL_CA);
+ {
+ unsigned short iboguscount=50,ifailcount=5;
+ while (!inw(ioaddr+SCB_STATUS))
+ {
+ if (!--iboguscount)
+ {
+ if (--ifailcount)
+ {
+ printk(KERN_WARNING "%s: i82586 initialization timed out, status %04x, cmd %04x\n",
+ dev->name, inw(ioaddr+SCB_STATUS), inw(ioaddr+SCB_CMD));
+ outw(CONF_LINK,ioaddr+SCB_CBL);
+ outw(0,ioaddr+SCB_STATUS);
+ outw(0xf000|SCB_CUstart,ioaddr+SCB_CMD);
+ outb(0,ioaddr+SIGNAL_CA);
+ iboguscount = 100;
+ }
+ else
+ {
+ printk(KERN_WARNING "%s: Failed to initialize i82586, giving up.\n",dev->name);
+ return;
+ }
+ }
+ }
+ }
+
+ outb(SIRQ_en|irqrmap[dev->irq],ioaddr+SET_IRQ);
+ clear_loopback;
+ lp->init_time = jiffies;
+#if NET_DEBUG > 6
+ printk("%s: leaving eexp_hw_init586()\n", dev->name);
+#endif
+ return;
+}
+
+/*
+ * completely reset the EtherExpress hardware. We will most likely get
+ * an interrupt during this whether we want one or not. It is best,
+ * therefore, to call this while we don't have a request_irq() on.
+ */
+
+static void eexp_hw_ASICrst(struct device *dev)
+{
+ unsigned short ioaddr = dev->base_addr;
+ unsigned short wrval = 0x0001,succount=0,boguscount=500;
+
+ outb(SIRQ_dis|irqrmap[dev->irq],ioaddr+SET_IRQ);
+
+ PRIV(dev)->started = 0;
+ outb(ASIC_RST|i586_RST,ioaddr+EEPROM_Ctrl);
+ while (succount<20)
+ {
+ if (wrval == 0xffff)
+ wrval = 0x0001;
+ outw(0,ioaddr+WRITE_PTR);
+ outw(wrval,ioaddr);
+ outw(0,ioaddr+READ_PTR);
+ if (wrval++ == inw(ioaddr))
+ succount++;
+ else
+ {
+ succount = 0;
+ if (!boguscount--)
+ {
+ boguscount = 500;
+ printk("%s: Having problems resetting EtherExpress ASIC, continuing...\n",
+ dev->name);
+ wrval = 0x0001;
+ outb(ASIC_RST|i586_RST,ioaddr+EEPROM_Ctrl);
+ }
+ }
+ }
+ outb(i586_RST,ioaddr+EEPROM_Ctrl);
+}
+
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ * We have to do a complete 586 restart for this to take effect.
+ * At the moment only promiscuous mode is supported.
+ */
+static void
+eexp_set_multicast(struct device *dev)
+{
+ if ((dev->flags & IFF_PROMISC) != PRIV(dev)->promisc)
+ eexp_hw_init586(dev);
+}
+
+
+/*
+ * MODULE stuff
+ */
+#ifdef MODULE
+
+#define EEXP_MAX_CARDS 4 /* max number of cards to support */
+#define NAMELEN 8 /* max length of dev->name (inc null) */
+
+static char namelist[NAMELEN * EEXP_MAX_CARDS] = { 0, };
+
+static struct device dev_eexp[EEXP_MAX_CARDS] =
+{
+ { NULL, /* will allocate dynamically */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, NULL, express_probe },
+};
+
+int irq[EEXP_MAX_CARDS] = {0, };
+int io[EEXP_MAX_CARDS] = {0, };
+
+/* Ideally the user would give us io=, irq= for every card. If any parameters
+ * are specified, we verify and then use them. If no parameters are given, we
+ * autoprobe for one card only.
+ */
+int init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < EEXP_MAX_CARDS; this_dev++) {
+ struct device *dev = &dev_eexp[this_dev];
+ dev->name = namelist + (NAMELEN*this_dev);
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ if (io[this_dev] == 0) {
+ if (this_dev) break;
+ printk(KERN_NOTICE "eexpress.c: Module autoprobe not recommended, give io=xx.\n");
+ }
+ if (register_netdev(dev) != 0) {
+ printk(KERN_WARNING "eexpress.c: Failed to register card at 0x%x.\n", io[this_dev]);
+ if (found != 0) return 0;
+ return -ENXIO;
+ }
+ found++;
+ }
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < EEXP_MAX_CARDS; this_dev++) {
+ struct device *dev = &dev_eexp[this_dev];
+ if (dev->priv != NULL) {
+ kfree(dev->priv);
+ dev->priv = NULL;
+ release_region(dev->base_addr, EEXP_IO_EXTENT);
+ unregister_netdev(dev);
+ }
+ }
+}
+#endif
+
+/*
+ * Local Variables:
+ * c-file-style: "linux"
+ * tab-width: 8
+ * compile-command: "gcc -D__KERNEL__ -I/discs/bibble/src/linux-1.3.69/include -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -fno-strength-reduce -pipe -m486 -DCPU=486 -DMODULE -c 3c505.c"
+ * End:
+ */
diff --git a/linux/src/drivers/net/epic100.c b/linux/src/drivers/net/epic100.c
new file mode 100644
index 0000000..b44f291
--- /dev/null
+++ b/linux/src/drivers/net/epic100.c
@@ -0,0 +1,1560 @@
+/* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
+/*
+ Written/copyright 1997-2002 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ This driver is for the SMC83c170/175 "EPIC" series, as used on the
+ SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 914 Bay Ridge Road, Suite 220
+ Annapolis MD 21403
+
+ Information and updates available at
+ http://www.scyld.com/network/epic100.html
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version[] =
+"epic100.c:v1.18 7/22/2003 Written by Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+" http://www.scyld.com/network/epic100.html\n";
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
+static int debug = 2;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 32;
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ This chip uses a 64 element hash table based on the Ethernet CRC. */
+static int multicast_filter_limit = 32;
+
+/* Used to set a special media speed or duplex.
+ Both 'options[]' and 'full_duplex[]' should exist for driver
+ interoperability.
+ The media type is usually passed in 'options[]'.
+ The default is autonegotation for speed and duplex.
+ This should rarely be overridden.
+ Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
+ Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
+ Use option values 0x20 and 0x200 for forcing full duplex operation.
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature. */
+static int rx_copybreak = 0;
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for operational efficiency.
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ Too-large receive rings only waste memory. */
+#define TX_RING_SIZE 16
+#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
+#define RX_RING_SIZE 32
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+
+/* Allocation size of Rx buffers with normal sized Ethernet frames.
+ Do not change this value without good reason. This is not a limit,
+ but a way to keep a consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ 1536
+
+/* Bytes transferred to chip before transmission starts. */
+/* Initial threshold, increased on underflow, rounded down to 4 byte units. */
+#define TX_FIFO_THRESH 256
+#define RX_FIFO_THRESH 1 /* 0-3, 0==32, 64,96, or 3==128 bytes */
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#if LINUX_VERSION_CODE >= 0x20300
+#include <linux/spinlock.h>
+#elif LINUX_VERSION_CODE >= 0x20200
+#include <asm/spinlock.h>
+#endif
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(debug, "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(multicast_filter_limit, "i");
+MODULE_PARM_DESC(debug, "Driver message level (0-31)");
+MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex.\n"
+"Values are 0x10/0x20/0x100/0x200.");
+MODULE_PARM_DESC(max_interrupt_work,
+ "Driver maximum events handled per interrupt");
+MODULE_PARM_DESC(full_duplex, "Non-zero to set forced full duplex.");
+MODULE_PARM_DESC(rx_copybreak,
+ "Breakpoint in bytes for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit,
+ "Multicast addresses before switching to Rx-all-multicast");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the SMC "EPIC/100", the SMC
+single-chip Ethernet controllers for PCI. This chip is used on
+the SMC EtherPower II boards.
+
+II. Board-specific settings
+
+PCI bus devices are configured by the system at boot time, so no jumpers
+need to be set on the board. The system BIOS will assign the
+PCI INTA signal to a (preferably otherwise unused) system IRQ line.
+Note: Kernel versions earlier than 1.3.73 do not support shared PCI
+interrupt lines.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+IVb. References
+
+http://www.smsc.com/main/datasheets/83c171.pdf
+http://www.smsc.com/main/datasheets/83c175.pdf
+http://scyld.com/expert/NWay.html
+http://www.national.com/pf/DP/DP83840A.html
+
+IVc. Errata
+
+*/
+
+static void *epic_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int find_cnt);
+static int epic_pwr_event(void *dev_instance, int event);
+
+enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
+
+#define EPIC_TOTAL_SIZE 0x100
+#ifdef USE_IO_OPS
+#define EPIC_IOTYPE PCI_USES_MASTER|PCI_USES_IO|PCI_ADDR0
+#else
+#define EPIC_IOTYPE PCI_USES_MASTER|PCI_USES_MEM|PCI_ADDR1
+#endif
+
+static struct pci_id_info pci_id_tbl[] = {
+ {"SMSC EPIC 83c172", {0x000510B8, 0xffffffff, 0,0, 9,0xff},
+ EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | MII_PWRDWN, },
+ {"SMSC EPIC 83c171", {0x000510B8, 0xffffffff, 0,0, 6,0xff},
+ EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | MII_PWRDWN, },
+ {"SMSC EPIC/100 83c170", {0x000510B8, 0xffffffff, 0x0ab41092, 0xffffffff},
+ EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | NO_MII | MII_PWRDWN, },
+ {"SMSC EPIC/100 83c170", {0x000510B8, 0xffffffff},
+ EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR, },
+ {"SMSC EPIC/C 83c175", {0x000610B8, 0xffffffff},
+ EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | MII_PWRDWN, },
+ {0,},
+};
+
+struct drv_id_info epic_drv_id = {
+ "epic100", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
+ epic_probe1, epic_pwr_event };
+
+#ifndef USE_IO_OPS
+#undef inb
+#undef inw
+#undef inl
+#undef outb
+#undef outw
+#undef outl
+#define inb readb
+#define inw readw
+#define inl readl
+#define outb writeb
+#define outw writew
+#define outl writel
+#endif
+
+/* Offsets to registers, using the (ugh) SMC names. */
+enum epic_registers {
+ COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
+ PCIBurstCnt=0x18,
+ TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28, /* Rx error counters. */
+ MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
+ LAN0=64, /* MAC address. */
+ MC0=80, /* Multicast filter table. */
+ RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
+ PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
+};
+
+/* Interrupt register bits, using my own meaningful names. */
+enum IntrStatus {
+ TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
+ PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
+ RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
+ TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
+ RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
+};
+enum CommandBits {
+ StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
+ StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
+};
+
+/* The EPIC100 Rx and Tx buffer descriptors. */
+
+struct epic_tx_desc {
+ u32 txstatus;
+ u32 bufaddr;
+ u32 buflength;
+ u32 next;
+};
+
+struct epic_rx_desc {
+ u32 rxstatus;
+ u32 bufaddr;
+ u32 buflength;
+ u32 next;
+};
+
+enum desc_status_bits {
+ DescOwn=0x8000,
+};
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+struct epic_private {
+ /* Tx and Rx rings first so that they remain paragraph aligned. */
+ struct epic_rx_desc rx_ring[RX_RING_SIZE];
+ struct epic_tx_desc tx_ring[TX_RING_SIZE];
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+
+ struct net_device *next_module;
+ void *priv_addr; /* Unaligned address for kfree */
+
+ /* Ring pointers. */
+ spinlock_t lock; /* Group with Tx control cache line. */
+ unsigned int cur_tx, dirty_tx;
+ struct descriptor *last_tx_desc;
+
+ unsigned int cur_rx, dirty_rx;
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ struct descriptor *last_rx_desc;
+ long last_rx_time; /* Last Rx, in jiffies. */
+ int rx_copybreak;
+
+ int msg_level;
+ int max_interrupt_work;
+ struct pci_dev *pci_dev; /* PCI bus location. */
+ int chip_id, chip_flags;
+
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media selection timer. */
+ int tx_threshold;
+ int genctl; /* Including Rx threshold. */
+ u32 cur_rx_mode;
+ unsigned char mc_filter[8];
+ int multicast_filter_limit;
+
+ signed char phys[4]; /* MII device addresses. */
+ u16 mii_bmcr; /* MII control register */
+ u16 advertising; /* NWay media advertisement */
+ int mii_phy_cnt;
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ unsigned int full_duplex:1; /* Current duplex setting. */
+ unsigned int duplex_lock:1; /* Duplex forced by the user. */
+ unsigned int default_port; /* Last dev->if_port value. */
+ unsigned int media2:4; /* Secondary monitored media port. */
+ unsigned int medialock:1; /* Don't sense media type. */
+ unsigned int mediasense:1; /* Media sensing in progress. */
+};
+
+static int epic_open(struct net_device *dev);
+static int read_eeprom(long ioaddr, int location);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
+static void epic_start(struct net_device *dev, int restart);
+static void check_media(struct net_device *dev);
+static void epic_timer(unsigned long data);
+static void epic_tx_timeout(struct net_device *dev);
+static void epic_init_ring(struct net_device *dev);
+static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static int epic_rx(struct net_device *dev);
+static void epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int epic_close(struct net_device *dev);
+static struct net_device_stats *epic_get_stats(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+
+
+/* A list of all installed EPIC devices, for removing the driver module. */
+static struct net_device *root_epic_dev = NULL;
+
+static void *epic_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int card_idx)
+{
+ struct net_device *dev;
+ struct epic_private *ep;
+ void *priv_mem;
+ int i, option = 0, duplex = 0;
+
+ dev = init_etherdev(init_dev, 0);
+ if (!dev)
+ return NULL;
+
+ if (dev->mem_start) {
+ option = dev->mem_start;
+ duplex = (dev->mem_start & 16) ? 1 : 0;
+ } else if (card_idx >= 0 && card_idx < MAX_UNITS) {
+ if (options[card_idx] >= 0)
+ option = options[card_idx];
+ if (full_duplex[card_idx] >= 0)
+ duplex = full_duplex[card_idx];
+ }
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ printk(KERN_INFO "%s: %s at %#lx, %2.2x:%2.2x IRQ %d, ",
+ dev->name, pci_id_tbl[chip_idx].name, ioaddr,
+ pci_bus_number(pdev), pci_devfn(pdev)>>3, dev->irq);
+
+ /* Bring the chip out of low-power mode. */
+ outl(0x4200, ioaddr + GENCTL);
+ /* Magic from SMSC app note 7.15 */
+ outl(0x0008, ioaddr + TEST1);
+
+ /* Turn on the MII transceiver. */
+ outl(0x12, ioaddr + MIICfg);
+ if (pci_id_tbl[chip_idx].drv_flags & NO_MII)
+ outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+ outl(0x0200, ioaddr + GENCTL);
+
+ if (((1 << debug) - 1) & NETIF_MSG_MISC) {
+ printk(KERN_DEBUG "%s: EEPROM contents\n", dev->name);
+ for (i = 0; i < 64; i++)
+ printk(" %4.4x%s", read_eeprom(ioaddr, i),
+ i % 16 == 15 ? "\n" : "");
+ }
+
+ /* Note: the '175 does not have a serial EEPROM. */
+ for (i = 0; i < 3; i++)
+ ((u16 *)dev->dev_addr)[i] = le16_to_cpu(inw(ioaddr + LAN0 + i*4));
+
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x.\n", dev->dev_addr[i]);
+
+ /* Make certain elements e.g. descriptor lists are aligned. */
+ priv_mem = kmalloc(sizeof(*ep) + PRIV_ALIGN, GFP_KERNEL);
+ /* Check for the very unlikely case of no memory. */
+ if (priv_mem == NULL)
+ return NULL;
+
+ /* We do a request_region() to register /proc/ioports info. */
+ request_region(ioaddr, pci_id_tbl[chip_idx].io_size, dev->name);
+
+ dev->priv = ep = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+ memset(ep, 0, sizeof(*ep));
+ ep->priv_addr = priv_mem;
+
+ ep->next_module = root_epic_dev;
+ root_epic_dev = dev;
+
+ ep->pci_dev = pdev;
+ ep->chip_id = chip_idx;
+ ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
+ ep->msg_level = (1 << debug) - 1;
+ ep->rx_copybreak = rx_copybreak;
+ ep->max_interrupt_work = max_interrupt_work;
+ ep->multicast_filter_limit = multicast_filter_limit;
+
+ /* The lower four bits are non-TP media types. */
+ if (option > 0) {
+ if (option & 0x220)
+ ep->duplex_lock = ep->full_duplex = 1;
+ ep->default_port = option & 0xFFFF;
+ ep->medialock = 1;
+ }
+ if (duplex) {
+ ep->duplex_lock = ep->full_duplex = 1;
+ printk(KERN_INFO "%s: Forced full duplex operation requested.\n",
+ dev->name);
+ }
+ dev->if_port = ep->default_port;
+
+ /* Find the connected MII xcvrs.
+ Doing this in open() would allow detecting external xcvrs later, but
+ takes much time and no cards have external MII. */
+ {
+ int phy, phy_idx = 0;
+ for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
+ int mii_status = mdio_read(dev, phy, 1);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ ep->phys[phy_idx++] = phy;
+ printk(KERN_INFO "%s: Located MII transceiver #%d control "
+ "%4.4x status %4.4x.\n",
+ dev->name, phy, mdio_read(dev, phy, 0), mii_status);
+ }
+ }
+ ep->mii_phy_cnt = phy_idx;
+ }
+ if (ep->mii_phy_cnt == 0 && ! (ep->chip_flags & NO_MII)) {
+ printk(KERN_WARNING "%s: ***WARNING***: No MII transceiver found!\n",
+ dev->name);
+ /* Use the known PHY address of the EPII. */
+ ep->phys[0] = 3;
+ }
+
+ if (ep->mii_phy_cnt) {
+ int phy = ep->phys[0];
+ int xcvr = ep->default_port & 0x330;
+ if (xcvr) {
+ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
+ (xcvr & 0x300 ? 100 : 10),
+ (xcvr & 0x220 ? "full" : "half"));
+ ep->mii_bmcr = xcvr & 0x300 ? 0x2000 : 0; /* 10/100mbps? */
+ ep->mii_bmcr |= xcvr & 0x220 ? 0x0100 : 0; /* duplex */
+ mdio_write(dev, phy, 0, ep->mii_bmcr);
+ } else {
+ ep->mii_bmcr = 0x3000;
+ ep->advertising = mdio_read(dev, phy, 4);
+ printk(KERN_INFO "%s: Autonegotiation advertising %4.4x link "
+ "partner %4.4x.\n",
+ dev->name, ep->advertising, mdio_read(dev, phy, 5));
+ }
+ }
+
+#if EPIC_POWER_SAVE
+ /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
+ if (ep->chip_flags & MII_PWRDWN)
+ outl(inl(ioaddr + NVCTL) & ~0x483C, ioaddr + NVCTL);
+#endif
+ outl(0x0008, ioaddr + GENCTL);
+
+ /* The Epic-specific entries in the device structure. */
+ dev->open = &epic_open;
+ dev->hard_start_xmit = &epic_start_xmit;
+ dev->stop = &epic_close;
+ dev->get_stats = &epic_get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &mii_ioctl;
+
+ return dev;
+}
+
+/* Serial EEPROM section. */
+
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
+#define EE_CS 0x02 /* EEPROM chip select. */
+#define EE_DATA_WRITE 0x08 /* EEPROM chip data in. */
+#define EE_WRITE_0 0x01
+#define EE_WRITE_1 0x09
+#define EE_DATA_READ 0x10 /* EEPROM chip data out. */
+#define EE_ENB (0x0001 | EE_CS)
+
+/* Delay between EEPROM clock transitions.
+ This serves to flush the operation to the PCI bus.
+ */
+
+#define eeprom_delay() inl(ee_addr)
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_WRITE_CMD (5 << 6)
+#define EE_READ64_CMD (6 << 6)
+#define EE_READ256_CMD (6 << 8)
+#define EE_ERASE_CMD (7 << 6)
+
+static int read_eeprom(long ioaddr, int location)
+{
+ int i;
+ int retval = 0;
+ long ee_addr = ioaddr + EECTL;
+ int read_cmd = location |
+ (inl(ee_addr) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
+
+ outl(EE_ENB & ~EE_CS, ee_addr);
+ outl(EE_ENB, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 12; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
+ outl(EE_ENB | dataval, ee_addr);
+ eeprom_delay();
+ outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay();
+ }
+ outl(EE_ENB, ee_addr);
+
+ for (i = 16; i > 0; i--) {
+ outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay();
+ retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ outl(EE_ENB, ee_addr);
+ eeprom_delay();
+ }
+
+ /* Terminate the EEPROM access. */
+ outl(EE_ENB & ~EE_CS, ee_addr);
+ return retval;
+}
+
+#define MII_READOP 1
+#define MII_WRITEOP 2
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ long ioaddr = dev->base_addr;
+ int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
+ int i;
+
+ outl(read_cmd, ioaddr + MIICtrl);
+ /* Typical operation takes 25 loops. */
+ for (i = 400; i > 0; i--)
+ if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0) {
+ /* Work around read failure bug. */
+ if (phy_id == 1 && location < 6
+ && inw(ioaddr + MIIData) == 0xffff) {
+ outl(read_cmd, ioaddr + MIICtrl);
+ continue;
+ }
+ return inw(ioaddr + MIIData);
+ }
+ return 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
+{
+ long ioaddr = dev->base_addr;
+ int i;
+
+ outw(value, ioaddr + MIIData);
+ outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl);
+ for (i = 10000; i > 0; i--) {
+ if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
+ break;
+ }
+ return;
+}
+
+
+static int epic_open(struct net_device *dev)
+{
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+
+ MOD_INC_USE_COUNT;
+
+ if (request_irq(dev->irq, &epic_interrupt, SA_SHIRQ, dev->name, dev)) {
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+
+ epic_init_ring(dev);
+ check_media(dev);
+ epic_start(dev, 0);
+
+ /* Set the timer to switch to check for link beat and perhaps switch
+ to an alternate media type. */
+ init_timer(&ep->timer);
+ ep->timer.expires = jiffies + 3*HZ;
+ ep->timer.data = (unsigned long)dev;
+ ep->timer.function = &epic_timer; /* timer handler */
+ add_timer(&ep->timer);
+
+ return 0;
+}
+
+/* Reset the chip to recover from a PCI transaction error.
+ This may occur at interrupt time. */
+static void epic_pause(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ outl(0x00000000, ioaddr + INTMASK);
+ /* Stop the chip's Tx and Rx DMA processes. */
+ outw(StopRx | StopTxDMA | StopRxDMA, ioaddr + COMMAND);
+
+ /* Update the error counts. */
+ if (inw(ioaddr + COMMAND) != 0xffff) {
+ ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
+ ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
+ ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
+ }
+
+ /* Remove the packets on the Rx queue. */
+ epic_rx(dev);
+}
+
+static void epic_start(struct net_device *dev, int restart)
+{
+ long ioaddr = dev->base_addr;
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+ int i;
+
+ if (restart) {
+ /* Soft reset the chip. */
+ outl(0x4001, ioaddr + GENCTL);
+ printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
+ dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
+ udelay(1);
+
+ /* This magic is documented in SMSC app note 7.15 */
+ for (i = 16; i > 0; i--)
+ outl(0x0008, ioaddr + TEST1);
+ }
+
+#if defined(__powerpc__) || defined(__sparc__) || defined(__BIG_ENDIAN)
+ ep->genctl = 0x0432 | (RX_FIFO_THRESH<<8);
+#elif defined(__LITTLE_ENDIAN) || defined(__i386__)
+ ep->genctl = 0x0412 | (RX_FIFO_THRESH<<8);
+#else
+#error The byte order of this architecture is not defined.
+#endif
+
+ /* Power and reset the PHY. */
+ if (ep->chip_flags & MII_PWRDWN)
+ outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+ if (restart) {
+ outl(ep->genctl | 0x4000, ioaddr + GENCTL);
+ inl(ioaddr + GENCTL);
+ }
+ outl(ep->genctl, ioaddr + GENCTL);
+
+ if (dev->if_port == 2 || dev->if_port == 5) { /* 10base2 or AUI */
+ outl(0x13, ioaddr + MIICfg);
+ printk(KERN_INFO "%s: Disabling MII PHY to use 10base2/AUI.\n",
+ dev->name);
+ mdio_write(dev, ep->phys[0], 0, 0x0C00);
+ } else {
+ outl(0x12, ioaddr + MIICfg);
+ mdio_write(dev, ep->phys[0], 0, ep->advertising);
+ mdio_write(dev, ep->phys[0], 0, ep->mii_bmcr);
+ check_media(dev);
+ }
+
+ for (i = 0; i < 3; i++)
+ outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
+
+ ep->tx_threshold = TX_FIFO_THRESH;
+ outl(ep->tx_threshold, ioaddr + TxThresh);
+ outl(ep->full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
+ outl(virt_to_bus(&ep->rx_ring[ep->cur_rx % RX_RING_SIZE]),
+ ioaddr + PRxCDAR);
+ outl(virt_to_bus(&ep->tx_ring[ep->dirty_tx % TX_RING_SIZE]),
+ ioaddr + PTxCDAR);
+
+ /* Start the chip's Rx process. */
+ set_rx_mode(dev);
+ outl(StartRx | RxQueued, ioaddr + COMMAND);
+
+ if ( ! restart)
+ netif_start_tx_queue(dev);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
+ | CntFull | TxUnderrun | TxDone | TxEmpty
+ | RxError | RxOverflow | RxFull | RxHeader | RxDone,
+ ioaddr + INTMASK);
+ if (ep->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: epic_start() done, cmd status %4.4x, "
+ "ctl %4.4x interrupt %4.4x.\n",
+ dev->name, (int)inl(ioaddr + COMMAND),
+ (int)inl(ioaddr + GENCTL), (int)inl(ioaddr + INTSTAT));
+ return;
+}
+
+static void check_media(struct net_device *dev)
+{
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int mii_reg5 = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], 5) : 0;
+ int negotiated = mii_reg5 & ep->advertising;
+ int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
+
+ if (ep->duplex_lock)
+ return;
+ if (mii_reg5 == 0xffff) /* Bogus read */
+ return;
+ if (ep->full_duplex != duplex) {
+ ep->full_duplex = duplex;
+ printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
+ " partner capability of %4.4x.\n", dev->name,
+ ep->full_duplex ? "full" : "half", ep->phys[0], mii_reg5);
+ outl(ep->full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
+ }
+}
+
+static void epic_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 5*HZ;
+
+ if (ep->msg_level & NETIF_MSG_TIMER) {
+ printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
+ dev->name, (int)inl(ioaddr + TxSTAT));
+ printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
+ "IntStatus %4.4x RxStatus %4.4x.\n",
+ dev->name, (int)inl(ioaddr + INTMASK),
+ (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT));
+ }
+
+ if (ep->cur_tx - ep->dirty_tx > 1 &&
+ jiffies - dev->trans_start > TX_TIMEOUT) {
+ printk(KERN_WARNING "%s: Tx hung, %d vs. %d.\n",
+ dev->name, ep->cur_tx, ep->dirty_tx);
+ epic_tx_timeout(dev);
+ }
+
+ check_media(dev);
+
+ ep->timer.expires = jiffies + next_tick;
+ add_timer(&ep->timer);
+}
+
+static void epic_tx_timeout(struct net_device *dev)
+{
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int tx_status = inw(ioaddr + TxSTAT);
+
+ printk(KERN_WARNING "%s: EPIC transmit timeout, Tx status %4.4x.\n",
+ dev->name, tx_status);
+ if (ep->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
+ dev->name, ep->dirty_tx, ep->cur_tx);
+ if (tx_status & 0x10) { /* Tx FIFO underflow. */
+ ep->stats.tx_fifo_errors++;
+ outl(RestartTx, ioaddr + COMMAND);
+ } else {
+ epic_start(dev, 1);
+ outl(TxQueued, dev->base_addr + COMMAND);
+ }
+
+ dev->trans_start = jiffies;
+ ep->stats.tx_errors++;
+ return;
+}
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void epic_init_ring(struct net_device *dev)
+{
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+ int i;
+
+ ep->tx_full = 0;
+ ep->lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
+ ep->dirty_tx = ep->cur_tx = 0;
+ ep->cur_rx = ep->dirty_rx = 0;
+ ep->last_rx_time = jiffies;
+ ep->rx_buf_sz = (dev->mtu <= 1522 ? PKT_BUF_SZ : dev->mtu + 14);
+
+ /* Initialize all Rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ ep->rx_ring[i].rxstatus = 0;
+ ep->rx_ring[i].buflength = ep->rx_buf_sz;
+ ep->rx_ring[i].next = virt_to_bus(&ep->rx_ring[i+1]);
+ ep->rx_skbuff[i] = 0;
+ }
+ /* Mark the last entry as wrapping the ring. */
+ ep->rx_ring[i-1].next = virt_to_bus(&ep->rx_ring[0]);
+
+ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz);
+ ep->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* 16 byte align the IP header. */
+ ep->rx_ring[i].bufaddr = virt_to_bus(skb->tail);
+ ep->rx_ring[i].rxstatus = DescOwn;
+ }
+ ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+ /* The Tx buffer descriptor is filled in as needed, but we
+ do need to clear the ownership bit. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ ep->tx_skbuff[i] = 0;
+ ep->tx_ring[i].txstatus = 0x0000;
+ ep->tx_ring[i].next = virt_to_bus(&ep->tx_ring[i+1]);
+ }
+ ep->tx_ring[i-1].next = virt_to_bus(&ep->tx_ring[0]);
+ return;
+}
+
+static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+ int entry, free_count;
+ u32 ctrl_word;
+ unsigned long flags;
+
+ /* Block a timer-based transmit from overlapping. */
+ if (netif_pause_tx_queue(dev) != 0) {
+ /* This watchdog code is redundant with the media monitor timer. */
+ if (jiffies - dev->trans_start > TX_TIMEOUT)
+ epic_tx_timeout(dev);
+ return 1;
+ }
+
+ /* Caution: the write order is important here, set the field with the
+ "ownership" bit last. */
+
+ /* Calculate the next Tx descriptor entry. */
+ spin_lock_irqsave(&ep->lock, flags);
+ free_count = ep->cur_tx - ep->dirty_tx;
+ entry = ep->cur_tx % TX_RING_SIZE;
+
+ ep->tx_skbuff[entry] = skb;
+ ep->tx_ring[entry].bufaddr = virt_to_bus(skb->data);
+
+ if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
+ ctrl_word = 0x100000; /* No interrupt */
+ } else if (free_count == TX_QUEUE_LEN/2) {
+ ctrl_word = 0x140000; /* Tx-done intr. */
+ } else if (free_count < TX_QUEUE_LEN - 1) {
+ ctrl_word = 0x100000; /* No Tx-done intr. */
+ } else {
+ /* Leave room for an additional entry. */
+ ctrl_word = 0x140000; /* Tx-done intr. */
+ ep->tx_full = 1;
+ }
+ ep->tx_ring[entry].buflength = ctrl_word | skb->len;
+ ep->tx_ring[entry].txstatus =
+ ((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
+ | DescOwn;
+
+ ep->cur_tx++;
+ if (ep->tx_full) {
+ /* Check for a just-cleared queue. */
+ if (ep->cur_tx - (volatile int)ep->dirty_tx < TX_QUEUE_LEN - 2) {
+ netif_unpause_tx_queue(dev);
+ ep->tx_full = 0;
+ } else
+ netif_stop_tx_queue(dev);
+ } else
+ netif_unpause_tx_queue(dev);
+
+ spin_unlock_irqrestore(&ep->lock, flags);
+ /* Trigger an immediate transmit demand. */
+ outl(TxQueued, dev->base_addr + COMMAND);
+
+ dev->trans_start = jiffies;
+ if (ep->msg_level & NETIF_MSG_TX_QUEUED)
+ printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
+ "flag %2.2x Tx status %8.8x.\n",
+ dev->name, (int)skb->len, entry, ctrl_word,
+ (int)inl(dev->base_addr + TxSTAT));
+
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int status, boguscnt = max_interrupt_work;
+
+ do {
+ status = inl(ioaddr + INTSTAT);
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ outl(status & 0x00007fff, ioaddr + INTSTAT);
+
+ if (ep->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
+ "intstat=%#8.8x.\n",
+ dev->name, status, (int)inl(ioaddr + INTSTAT));
+
+ if ((status & IntrSummary) == 0)
+ break;
+
+ if (status & (RxDone | RxStarted | RxEarlyWarn | RxOverflow))
+ epic_rx(dev);
+
+ if (status & (TxEmpty | TxDone)) {
+ unsigned int dirty_tx, cur_tx;
+
+ /* Note: if this lock becomes a problem we can narrow the locked
+ region at the cost of occasionally grabbing the lock more
+ times. */
+ spin_lock(&ep->lock);
+ cur_tx = ep->cur_tx;
+ dirty_tx = ep->dirty_tx;
+ for (; cur_tx - dirty_tx > 0; dirty_tx++) {
+ int entry = dirty_tx % TX_RING_SIZE;
+ int txstatus = ep->tx_ring[entry].txstatus;
+
+ if (txstatus & DescOwn)
+ break; /* It still hasn't been Txed */
+
+ if ( ! (txstatus & 0x0001)) {
+ /* There was an major error, log it. */
+#ifndef final_version
+ if (ep->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+ dev->name, txstatus);
+#endif
+ ep->stats.tx_errors++;
+ if (txstatus & 0x1050) ep->stats.tx_aborted_errors++;
+ if (txstatus & 0x0008) ep->stats.tx_carrier_errors++;
+ if (txstatus & 0x0040) ep->stats.tx_window_errors++;
+ if (txstatus & 0x0010) ep->stats.tx_fifo_errors++;
+#ifdef ETHER_STATS
+ if (txstatus & 0x1000) ep->stats.collisions16++;
+#endif
+ } else {
+ if (ep->msg_level & NETIF_MSG_TX_DONE)
+ printk(KERN_DEBUG "%s: Transmit done, Tx status "
+ "%8.8x.\n", dev->name, txstatus);
+#ifdef ETHER_STATS
+ if ((txstatus & 0x0002) != 0) ep->stats.tx_deferred++;
+#endif
+ ep->stats.collisions += (txstatus >> 8) & 15;
+ ep->stats.tx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+ ep->stats.tx_bytes += ep->tx_skbuff[entry]->len;
+#endif
+ }
+
+ /* Free the original skb. */
+ dev_free_skb_irq(ep->tx_skbuff[entry]);
+ ep->tx_skbuff[entry] = 0;
+ }
+
+#ifndef final_version
+ if (cur_tx - dirty_tx > TX_RING_SIZE) {
+ printk(KERN_WARNING "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+ dev->name, dirty_tx, cur_tx, ep->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+ ep->dirty_tx = dirty_tx;
+ if (ep->tx_full
+ && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
+ /* The ring is no longer full, allow new TX entries. */
+ ep->tx_full = 0;
+ spin_unlock(&ep->lock);
+ netif_resume_tx_queue(dev);
+ } else
+ spin_unlock(&ep->lock);
+ }
+
+ /* Check uncommon events all at once. */
+ if (status & (CntFull | TxUnderrun | RxOverflow | RxFull |
+ PCIBusErr170 | PCIBusErr175)) {
+ if (status == 0xffffffff) /* Chip failed or removed (CardBus). */
+ break;
+ /* Always update the error counts to avoid overhead later. */
+ ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
+ ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
+ ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
+
+ if (status & TxUnderrun) { /* Tx FIFO underflow. */
+ ep->stats.tx_fifo_errors++;
+ outl(ep->tx_threshold += 128, ioaddr + TxThresh);
+ /* Restart the transmit process. */
+ outl(RestartTx, ioaddr + COMMAND);
+ }
+ if (status & RxOverflow) { /* Missed a Rx frame. */
+ ep->stats.rx_errors++;
+ }
+ if (status & (RxOverflow | RxFull))
+ outw(RxQueued, ioaddr + COMMAND);
+ if (status & PCIBusErr170) {
+ printk(KERN_ERR "%s: PCI Bus Error! EPIC status %4.4x.\n",
+ dev->name, status);
+ epic_pause(dev);
+ epic_start(dev, 1);
+ }
+ /* Clear all error sources. */
+ outl(status & 0x7f18, ioaddr + INTSTAT);
+ }
+ if (--boguscnt < 0) {
+ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "IntrStatus=0x%8.8x.\n",
+ dev->name, status);
+ /* Clear all interrupt sources. */
+ outl(0x0001ffff, ioaddr + INTSTAT);
+ /* Ill-advised: Slowly stop emitting this message. */
+ max_interrupt_work++;
+ break;
+ }
+ } while (1);
+
+ if (ep->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: Exiting interrupt, intr_status=%#4.4x.\n",
+ dev->name, status);
+
+ return;
+}
+
+static int epic_rx(struct net_device *dev)
+{
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+ int entry = ep->cur_rx % RX_RING_SIZE;
+ int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
+ int work_done = 0;
+
+ if (ep->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
+ ep->rx_ring[entry].rxstatus);
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) {
+ int status = ep->rx_ring[entry].rxstatus;
+
+ if (ep->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " epic_rx() status was %8.8x.\n", status);
+ if (--rx_work_limit < 0)
+ break;
+ if (status & 0x2006) {
+ if (ep->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n",
+ dev->name, status);
+ if (status & 0x2000) {
+ printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
+ "multiple buffers, status %4.4x!\n", dev->name, status);
+ ep->stats.rx_length_errors++;
+ } else if (status & 0x0006)
+ /* Rx Frame errors are counted in hardware. */
+ ep->stats.rx_errors++;
+ } else {
+ /* Malloc up new buffer, compatible with net-2e. */
+ /* Omit the four octet CRC from the length. */
+ short pkt_len = (status >> 16) - 4;
+ struct sk_buff *skb;
+
+ if (pkt_len > PKT_BUF_SZ - 4) {
+ printk(KERN_ERR "%s: Oversized Ethernet frame, status %x "
+ "%d bytes.\n",
+ dev->name, pkt_len, status);
+ pkt_len = 1514;
+ }
+ if (ep->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
+ ", bogus_cnt %d.\n", pkt_len, rx_work_limit);
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+#if 1 /* HAS_IP_COPYSUM */
+ eth_copy_and_sum(skb, ep->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+#else
+ memcpy(skb_put(skb, pkt_len), ep->rx_skbuff[entry]->tail,
+ pkt_len);
+#endif
+ } else {
+ skb_put(skb = ep->rx_skbuff[entry], pkt_len);
+ ep->rx_skbuff[entry] = NULL;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ ep->stats.rx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+ ep->stats.rx_bytes += pkt_len;
+#endif
+ }
+ work_done++;
+ entry = (++ep->cur_rx) % RX_RING_SIZE;
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
+ entry = ep->dirty_rx % RX_RING_SIZE;
+ if (ep->rx_skbuff[entry] == NULL) {
+ struct sk_buff *skb;
+ skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz);
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ ep->rx_ring[entry].bufaddr = virt_to_bus(skb->tail);
+ work_done++;
+ }
+ ep->rx_ring[entry].rxstatus = DescOwn;
+ }
+ return work_done;
+}
+
+static int epic_close(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+ int i;
+
+ netif_stop_tx_queue(dev);
+
+ if (ep->msg_level & NETIF_MSG_IFDOWN)
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %8.8x.\n",
+ dev->name, (int)inl(ioaddr + INTSTAT));
+
+ epic_pause(dev);
+ del_timer(&ep->timer);
+ free_irq(dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = ep->rx_skbuff[i];
+ ep->rx_skbuff[i] = 0;
+ ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
+ ep->rx_ring[i].buflength = 0;
+ ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
+ if (skb) {
+#if LINUX_VERSION_CODE < 0x20100
+ skb->free = 1;
+#endif
+ dev_free_skb(skb);
+ }
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (ep->tx_skbuff[i])
+ dev_free_skb(ep->tx_skbuff[i]);
+ ep->tx_skbuff[i] = 0;
+ }
+
+ /* Green! Leave the chip in low-power mode. */
+ outl(0x440008, ioaddr + GENCTL);
+
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+static struct net_device_stats *epic_get_stats(struct net_device *dev)
+{
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (netif_running(dev)) {
+ /* Update the error counts. */
+ ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
+ ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
+ ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
+ }
+
+ return &ep->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ Note that we only use exclusion around actually queueing the
+ new frame, not around filling ep->setup_frame. This is non-deterministic
+ when re-entered but still correct. */
+
+/* The little-endian AUTODIN II ethernet CRC calculation.
+ N.B. Do not use for bulk data, use a table-based routine instead.
+ This is common code and should be moved to net/core/crc.c */
+static unsigned const ethernet_polynomial_le = 0xedb88320U;
+static inline unsigned ether_crc_le(int length, unsigned char *data)
+{
+ unsigned int crc = 0xffffffff; /* Initial value. */
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 8; --bit >= 0; current_octet >>= 1) {
+ if ((crc ^ current_octet) & 1) {
+ crc >>= 1;
+ crc ^= ethernet_polynomial_le;
+ } else
+ crc >>= 1;
+ }
+ }
+ return crc;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+ unsigned char mc_filter[8]; /* Multicast hash filter */
+ u32 new_rx_mode;
+ int i;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ new_rx_mode = 0x002C;
+ /* Unconditionally log net taps. */
+ printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ } else if ((dev->mc_count > 0) || (dev->flags & IFF_ALLMULTI)) {
+ /* There is apparently a chip bug, so the multicast filter
+ is never enabled. */
+ /* Too many to filter perfectly -- accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ new_rx_mode = 0x000C;
+ } else if (dev->mc_count == 0) {
+ memset(mc_filter, 0, sizeof(mc_filter));
+ new_rx_mode = 0x0004;
+ } else { /* Never executed, for now. */
+ struct dev_mc_list *mclist;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next)
+ set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f,
+ mc_filter);
+ new_rx_mode = 0x000C;
+ }
+ if (ep->cur_rx_mode != new_rx_mode) {
+ ep->cur_rx_mode = new_rx_mode;
+ outl(new_rx_mode, ioaddr + RxCtrl);
+ }
+ /* ToDo: perhaps we need to stop the Tx and Rx process here? */
+ if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
+ for (i = 0; i < 4; i++)
+ outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4);
+ memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
+ }
+ return;
+}
+
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct epic_private *ep = (void *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u16 *data = (u16 *)&rq->ifr_data;
+ u32 *data32 = (void *)&rq->ifr_data;
+
+ switch(cmd) {
+ case 0x8947: case 0x89F0:
+ /* SIOCGMIIPHY: Get the address of the PHY in use. */
+ data[0] = ep->phys[0] & 0x1f;
+ /* Fall Through */
+ case 0x8948: case 0x89F1:
+ /* SIOCGMIIREG: Read the specified MII register. */
+ if (! netif_running(dev)) {
+ outl(0x0200, ioaddr + GENCTL);
+ outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+ }
+ data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
+#if defined(PWRDWN_AFTER_IOCTL)
+ if (! netif_running(dev)) {
+ outl(0x0008, ioaddr + GENCTL);
+ outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
+ }
+#endif
+ return 0;
+ case 0x8949: case 0x89F2:
+ /* SIOCSMIIREG: Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (! netif_running(dev)) {
+ outl(0x0200, ioaddr + GENCTL);
+ outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+ }
+ if (data[0] == ep->phys[0]) {
+ u16 value = data[2];
+ switch (data[1]) {
+ case 0:
+ /* Check for autonegotiation on or reset. */
+ ep->duplex_lock = (value & 0x9000) ? 0 : 1;
+ if (ep->duplex_lock)
+ ep->full_duplex = (value & 0x0100) ? 1 : 0;
+ break;
+ case 4: ep->advertising = value; break;
+ }
+ /* Perhaps check_duplex(dev), depending on chip semantics. */
+ }
+ mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
+#if defined(PWRDWN_AFTER_IOCTL)
+ if (! netif_running(dev)) {
+ outl(0x0008, ioaddr + GENCTL);
+ outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
+ }
+#endif
+ return 0;
+ case SIOCGPARAMS:
+ data32[0] = ep->msg_level;
+ data32[1] = ep->multicast_filter_limit;
+ data32[2] = ep->max_interrupt_work;
+ data32[3] = ep->rx_copybreak;
+ return 0;
+ case SIOCSPARAMS:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ ep->msg_level = data32[0];
+ ep->multicast_filter_limit = data32[1];
+ ep->max_interrupt_work = data32[2];
+ ep->rx_copybreak = data32[3];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int epic_pwr_event(void *dev_instance, int event)
+{
+ struct net_device *dev = dev_instance;
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ if (ep->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
+ switch(event) {
+ case DRV_SUSPEND:
+ epic_pause(dev);
+ /* Put the chip into low-power mode. */
+ outl(0x0008, ioaddr + GENCTL);
+ break;
+ case DRV_RESUME:
+ epic_start(dev, 1);
+ break;
+ case DRV_DETACH: {
+ struct net_device **devp, **next;
+ if (dev->flags & IFF_UP) {
+ dev_close(dev);
+ dev->flags &= ~(IFF_UP|IFF_RUNNING);
+ }
+ unregister_netdev(dev);
+ release_region(dev->base_addr, pci_id_tbl[ep->chip_id].io_size);
+#ifndef USE_IO_OPS
+ iounmap((char *)dev->base_addr);
+#endif
+ for (devp = &root_epic_dev; *devp; devp = next) {
+ next = &((struct epic_private *)(*devp)->priv)->next_module;
+ if (*devp == dev) {
+ *devp = *next;
+ break;
+ }
+ }
+ if (ep->priv_addr)
+ kfree(ep->priv_addr);
+ kfree(dev);
+ /*MOD_DEC_USE_COUNT;*/
+ break;
+ }
+ }
+
+ return 0;
+}
+
+
+#ifdef CARDBUS
+
+#include <pcmcia/driver_ops.h>
+
+static dev_node_t *epic_attach(dev_locator_t *loc)
+{
+ struct net_device *dev;
+ u16 dev_id;
+ u32 pciaddr;
+ u8 bus, devfn, irq;
+ long ioaddr;
+
+ if (loc->bus != LOC_PCI) return NULL;
+ bus = loc->b.pci.bus; devfn = loc->b.pci.devfn;
+ printk(KERN_DEBUG "epic_attach(bus %d, function %d)\n", bus, devfn);
+#ifdef USE_IO_OPS
+ pcibios_read_config_dword(bus, devfn, PCI_BASE_ADDRESS_0, &pciaddr);
+ ioaddr = pciaddr & PCI_BASE_ADDRESS_IO_MASK;
+#else
+ pcibios_read_config_dword(bus, devfn, PCI_BASE_ADDRESS_1, &pciaddr);
+ ioaddr = (long)ioremap(pciaddr & PCI_BASE_ADDRESS_MEM_MASK,
+ pci_id_tbl[1].io_size);
+#endif
+ pcibios_read_config_byte(bus, devfn, PCI_INTERRUPT_LINE, &irq);
+ pcibios_read_config_word(bus, devfn, PCI_DEVICE_ID, &dev_id);
+ if (ioaddr == 0 || irq == 0) {
+ printk(KERN_ERR "The EPIC/C CardBus Ethernet interface at %d/%d was "
+ "not assigned an %s.\n"
+ KERN_ERR " It will not be activated.\n",
+ bus, devfn, ioaddr == 0 ? "address" : "IRQ");
+ return NULL;
+ }
+ dev = epic_probe1(pci_find_slot(bus, devfn), NULL, ioaddr, irq, 1, 0);
+ if (dev) {
+ dev_node_t *node = kmalloc(sizeof(dev_node_t), GFP_KERNEL);
+ strcpy(node->dev_name, dev->name);
+ node->major = node->minor = 0;
+ node->next = NULL;
+ MOD_INC_USE_COUNT;
+ return node;
+ }
+ return NULL;
+}
+
+static void epic_suspend(dev_node_t *node)
+{
+ struct net_device **devp, **next;
+ printk(KERN_INFO "epic_suspend(%s)\n", node->dev_name);
+ for (devp = &root_epic_dev; *devp; devp = next) {
+ next = &((struct epic_private *)(*devp)->priv)->next_module;
+ if (strcmp((*devp)->name, node->dev_name) == 0) break;
+ }
+ if (*devp) {
+ long ioaddr = (*devp)->base_addr;
+ epic_pause(*devp);
+ /* Put the chip into low-power mode. */
+ outl(0x0008, ioaddr + GENCTL);
+ }
+}
+static void epic_resume(dev_node_t *node)
+{
+ struct net_device **devp, **next;
+ printk(KERN_INFO "epic_resume(%s)\n", node->dev_name);
+ for (devp = &root_epic_dev; *devp; devp = next) {
+ next = &((struct epic_private *)(*devp)->priv)->next_module;
+ if (strcmp((*devp)->name, node->dev_name) == 0) break;
+ }
+ if (*devp) {
+ epic_start(*devp, 1);
+ }
+}
+static void epic_detach(dev_node_t *node)
+{
+ struct net_device **devp, **next;
+ printk(KERN_INFO "epic_detach(%s)\n", node->dev_name);
+ for (devp = &root_epic_dev; *devp; devp = next) {
+ next = &((struct epic_private *)(*devp)->priv)->next_module;
+ if (strcmp((*devp)->name, node->dev_name) == 0) break;
+ }
+ if (*devp) {
+ unregister_netdev(*devp);
+ release_region((*devp)->base_addr, EPIC_TOTAL_SIZE);
+#ifndef USE_IO_OPS
+ iounmap((char *)(*devp)->base_addr);
+#endif
+ kfree(*devp);
+ *devp = *next;
+ kfree(node);
+ MOD_DEC_USE_COUNT;
+ }
+}
+
+struct driver_operations epic_ops = {
+ "epic_cb", epic_attach, epic_suspend, epic_resume, epic_detach
+};
+
+#endif /* Cardbus support */
+
+
+#ifdef MODULE
+
+int init_module(void)
+{
+ /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s", version);
+
+#ifdef CARDBUS
+ register_driver(&epic_ops);
+ return 0;
+#else
+ return pci_drv_register(&epic_drv_id, NULL);
+#endif
+}
+
+void cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+#ifdef CARDBUS
+ unregister_driver(&epic_ops);
+#else
+ pci_drv_unregister(&epic_drv_id);
+#endif
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_epic_dev) {
+ struct epic_private *ep = (struct epic_private *)root_epic_dev->priv;
+ unregister_netdev(root_epic_dev);
+ release_region(root_epic_dev->base_addr, pci_id_tbl[ep->chip_id].io_size);
+#ifndef USE_IO_OPS
+ iounmap((char *)root_epic_dev->base_addr);
+#endif
+ next_dev = ep->next_module;
+ if (ep->priv_addr)
+ kfree(ep->priv_addr);
+ kfree(root_epic_dev);
+ root_epic_dev = next_dev;
+ }
+}
+#else
+int epic100_probe(struct net_device *dev)
+{
+ int retval = pci_drv_register(&epic_drv_id, dev);
+ if (retval >= 0)
+ printk(KERN_INFO "%s", version);
+ return retval;
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c epic100.c"
+ * cardbus-compile-command: "gcc -DCARDBUS -DMODULE -Wall -Wstrict-prototypes -O6 -c epic100.c -o epic_cb.o -I/usr/src/pcmcia/include/"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/eth16i.c b/linux/src/drivers/net/eth16i.c
new file mode 100644
index 0000000..244c3e7
--- /dev/null
+++ b/linux/src/drivers/net/eth16i.c
@@ -0,0 +1,1604 @@
+/* eth16i.c An ICL EtherTeam 16i and 32 EISA ethernet driver for Linux
+
+ Written 1994-1998 by Mika Kuoppala
+
+ Copyright (C) 1994-1998 by Mika Kuoppala
+ Based on skeleton.c and heavily on at1700.c by Donald Becker
+
+ This software may be used and distributed according to the terms
+ of the GNU Public Licence, incorporated herein by reference.
+
+ The author may be reached as miku@iki.fi
+
+ This driver supports following cards :
+ - ICL EtherTeam 16i
+ - ICL EtherTeam 32 EISA
+ (Uses true 32 bit transfers rather than 16i compability mode)
+
+ Example Module usage:
+ insmod eth16i.o ioaddr=0x2a0 mediatype=bnc
+
+ mediatype can be one of the following: bnc,tp,dix,auto,eprom
+
+ 'auto' will try to autoprobe mediatype.
+ 'eprom' will use whatever type defined in eprom.
+
+ I have benchmarked driver with PII/300Mhz as a ftp client
+ and 486/33Mhz as a ftp server. Top speed was 1128.37 kilobytes/sec.
+
+ Sources:
+ - skeleton.c a sample network driver core for linux,
+ written by Donald Becker <becker@CESDIS.gsfc.nasa.gov>
+ - at1700.c a driver for Allied Telesis AT1700, written
+ by Donald Becker.
+ - e16iSRV.asm a Netware 3.X Server Driver for ICL EtherTeam16i
+ written by Markku Viima
+ - The Fujitsu MB86965 databook.
+
+ Author thanks following persons due to their valueble assistance:
+ Markku Viima (ICL)
+ Ari Valve (ICL)
+ Donald Becker
+ Kurt Huwig <kurt@huwig.de>
+
+ Revision history:
+
+ Version Date Description
+
+ 0.01 15.12-94 Initial version (card detection)
+ 0.02 23.01-95 Interrupt is now hooked correctly
+ 0.03 01.02-95 Rewrote initialization part
+ 0.04 07.02-95 Base skeleton done...
+ Made a few changes to signature checking
+ to make it a bit reliable.
+ - fixed bug in tx_buf mapping
+ - fixed bug in initialization (DLC_EN
+ wasn't enabled when initialization
+ was done.)
+ 0.05 08.02-95 If there were more than one packet to send,
+ transmit was jammed due to invalid
+ register write...now fixed
+ 0.06 19.02-95 Rewrote interrupt handling
+ 0.07 13.04-95 Wrote EEPROM read routines
+ Card configuration now set according to
+ data read from EEPROM
+ 0.08 23.06-95 Wrote part that tries to probe used interface
+ port if AUTO is selected
+
+ 0.09 01.09-95 Added module support
+
+ 0.10 04.09-95 Fixed receive packet allocation to work
+ with kernels > 1.3.x
+
+ 0.20 20.09-95 Added support for EtherTeam32 EISA
+
+ 0.21 17.10-95 Removed the unnecessary extern
+ init_etherdev() declaration. Some
+ other cleanups.
+
+ 0.22 22.02-96 Receive buffer was not flushed
+ correctly when faulty packet was
+ received. Now fixed.
+
+ 0.23 26.02-96 Made resetting the adapter
+ more reliable.
+
+ 0.24 27.02-96 Rewrote faulty packet handling in eth16i_rx
+
+ 0.25 22.05-96 kfree() was missing from cleanup_module.
+
+ 0.26 11.06-96 Sometimes card was not found by
+ check_signature(). Now made more reliable.
+
+ 0.27 23.06-96 Oops. 16 consecutive collisions halted
+ adapter. Now will try to retransmit
+ MAX_COL_16 times before finally giving up.
+
+ 0.28 28.10-97 Added dev_id parameter (NULL) for free_irq
+
+ 0.29 29.10-97 Multiple card support for module users
+
+ 0.30 30.10-97 Fixed irq allocation bug.
+ (request_irq moved from probe to open)
+
+ 0.30a 21.08-98 Card detection made more relaxed. Driver
+ had problems with some TCP/IP-PROM boots
+ to find the card. Suggested by
+ Kurt Huwig <kurt@huwig.de>
+
+ 0.31 28.08-98 Media interface port can now be selected
+ with module parameters or kernel
+ boot parameters.
+
+ 0.32 31.08-98 IRQ was never freed if open/close
+ pair wasn't called. Now fixed.
+
+ 0.33 10.09-98 When eth16i_open() was called after
+ eth16i_close() chip never recovered.
+ Now more shallow reset is made on
+ close.
+
+ Bugs:
+ In some cases the media interface autoprobing code doesn't find
+ the correct interface type. In this case you can
+ manually choose the interface type in DOS with E16IC.EXE which is
+ configuration software for EtherTeam16i and EtherTeam32 cards.
+ This is also true for IRQ setting. You cannot use module
+ parameter to configure IRQ of the card (yet).
+
+ To do:
+ - Real multicast support
+ - Rewrite the media interface autoprobing code. Its _horrible_ !
+ - Possibly merge all the MB86965 specific code to external
+ module for use by eth16.c and Donald's at1700.c
+ - IRQ configuration with module parameter. I will do
+ this when i will get enough info about setting
+ irq without configuration utility.
+*/
+
+static char *version =
+ "eth16i.c: v0.33 10-09-98 Mika Kuoppala (miku@iki.fi)\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/malloc.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#ifndef LINUX_VERSION_CODE
+#include <linux/version.h>
+#endif
+
+#if LINUX_VERSION_CODE >= 0x20123
+#include <linux/init.h>
+#else
+#define __init
+#define __initdata
+#define __initfunc(x) x
+#endif
+
+#if LINUX_VERSION_CODE < 0x20138
+//#define test_and_set_bit(val,addr) set_bit(val,addr)
+#endif
+
+#if LINUX_VERSION_CODE < 0x020100
+typedef struct enet_statistics eth16i_stats_type;
+#else
+typedef struct net_device_stats eth16i_stats_type;
+#endif
+
+/* Few macros */
+#define BIT(a) ( (1 << (a)) )
+#define BITSET(ioaddr, bnum) ((outb(((inb(ioaddr)) | (bnum)), ioaddr)))
+#define BITCLR(ioaddr, bnum) ((outb(((inb(ioaddr)) & (~(bnum))), ioaddr)))
+
+/* This is the I/O address space for Etherteam 16i adapter. */
+#define ETH16I_IO_EXTENT 32
+
+/* Ticks before deciding that transmit has timed out */
+#define TX_TIMEOUT (400*HZ/1000)
+
+/* Maximum loop count when receiving packets */
+#define MAX_RX_LOOP 20
+
+/* Some interrupt masks */
+#define ETH16I_INTR_ON 0xef8a /* Higher is receive mask */
+#define ETH16I_INTR_OFF 0x0000
+
+/* Buffers header status byte meanings */
+#define PKT_GOOD BIT(5)
+#define PKT_GOOD_RMT BIT(4)
+#define PKT_SHORT BIT(3)
+#define PKT_ALIGN_ERR BIT(2)
+#define PKT_CRC_ERR BIT(1)
+#define PKT_RX_BUF_OVERFLOW BIT(0)
+
+/* Transmit status register (DLCR0) */
+#define TX_STATUS_REG 0
+#define TX_DONE BIT(7)
+#define NET_BUSY BIT(6)
+#define TX_PKT_RCD BIT(5)
+#define CR_LOST BIT(4)
+#define TX_JABBER_ERR BIT(3)
+#define COLLISION BIT(2)
+#define COLLISIONS_16 BIT(1)
+
+/* Receive status register (DLCR1) */
+#define RX_STATUS_REG 1
+#define RX_PKT BIT(7) /* Packet received */
+#define BUS_RD_ERR BIT(6)
+#define SHORT_PKT_ERR BIT(3)
+#define ALIGN_ERR BIT(2)
+#define CRC_ERR BIT(1)
+#define RX_BUF_OVERFLOW BIT(0)
+
+/* Transmit Interrupt Enable Register (DLCR2) */
+#define TX_INTR_REG 2
+#define TX_INTR_DONE BIT(7)
+#define TX_INTR_COL BIT(2)
+#define TX_INTR_16_COL BIT(1)
+
+/* Receive Interrupt Enable Register (DLCR3) */
+#define RX_INTR_REG 3
+#define RX_INTR_RECEIVE BIT(7)
+#define RX_INTR_SHORT_PKT BIT(3)
+#define RX_INTR_CRC_ERR BIT(1)
+#define RX_INTR_BUF_OVERFLOW BIT(0)
+
+/* Transmit Mode Register (DLCR4) */
+#define TRANSMIT_MODE_REG 4
+#define LOOPBACK_CONTROL BIT(1)
+#define CONTROL_OUTPUT BIT(2)
+
+/* Receive Mode Register (DLCR5) */
+#define RECEIVE_MODE_REG 5
+#define RX_BUFFER_EMPTY BIT(6)
+#define ACCEPT_BAD_PACKETS BIT(5)
+#define RECEIVE_SHORT_ADDR BIT(4)
+#define ACCEPT_SHORT_PACKETS BIT(3)
+#define REMOTE_RESET BIT(2)
+
+#define ADDRESS_FILTER_MODE BIT(1) | BIT(0)
+#define REJECT_ALL 0
+#define ACCEPT_ALL 3
+#define MODE_1 1 /* NODE ID, BC, MC, 2-24th bit */
+#define MODE_2 2 /* NODE ID, BC, MC, Hash Table */
+
+/* Configuration Register 0 (DLCR6) */
+#define CONFIG_REG_0 6
+#define DLC_EN BIT(7)
+#define SRAM_CYCLE_TIME_100NS BIT(6)
+#define SYSTEM_BUS_WIDTH_8 BIT(5) /* 1 = 8bit, 0 = 16bit */
+#define BUFFER_WIDTH_8 BIT(4) /* 1 = 8bit, 0 = 16bit */
+#define TBS1 BIT(3)
+#define TBS0 BIT(2)
+#define SRAM_BS1 BIT(1) /* 00=8kb, 01=16kb */
+#define SRAM_BS0 BIT(0) /* 10=32kb, 11=64kb */
+
+#ifndef ETH16I_TX_BUF_SIZE /* 0 = 2kb, 1 = 4kb */
+#define ETH16I_TX_BUF_SIZE 3 /* 2 = 8kb, 3 = 16kb */
+#endif
+#define TX_BUF_1x2048 0
+#define TX_BUF_2x2048 1
+#define TX_BUF_2x4098 2
+#define TX_BUF_2x8192 3
+
+/* Configuration Register 1 (DLCR7) */
+#define CONFIG_REG_1 7
+#define POWERUP BIT(5)
+
+/* Transmit start register */
+#define TRANSMIT_START_REG 10
+#define TRANSMIT_START_RB 2
+#define TX_START BIT(7) /* Rest of register bit indicate*/
+ /* number of packets in tx buffer*/
+/* Node ID registers (DLCR8-13) */
+#define NODE_ID_0 8
+#define NODE_ID_RB 0
+
+/* Hash Table registers (HT8-15) */
+#define HASH_TABLE_0 8
+#define HASH_TABLE_RB 1
+
+/* Buffer memory ports */
+#define BUFFER_MEM_PORT_LB 8
+#define DATAPORT BUFFER_MEM_PORT_LB
+#define BUFFER_MEM_PORT_HB 9
+
+/* 16 Collision control register (BMPR11) */
+#define COL_16_REG 11
+#define HALT_ON_16 0x00
+#define RETRANS_AND_HALT_ON_16 0x02
+
+/* Maximum number of attempts to send after 16 concecutive collisions */
+#define MAX_COL_16 10
+
+/* DMA Burst and Transceiver Mode Register (BMPR13) */
+#define TRANSCEIVER_MODE_REG 13
+#define TRANSCEIVER_MODE_RB 2
+#define IO_BASE_UNLOCK BIT(7)
+#define LOWER_SQUELCH_TRESH BIT(6)
+#define LINK_TEST_DISABLE BIT(5)
+#define AUI_SELECT BIT(4)
+#define DIS_AUTO_PORT_SEL BIT(3)
+
+/* Filter Self Receive Register (BMPR14) */
+#define FILTER_SELF_RX_REG 14
+#define SKIP_RX_PACKET BIT(2)
+#define FILTER_SELF_RECEIVE BIT(0)
+
+/* EEPROM Control Register (BMPR 16) */
+#define EEPROM_CTRL_REG 16
+
+/* EEPROM Data Register (BMPR 17) */
+#define EEPROM_DATA_REG 17
+
+/* NMC93CSx6 EEPROM Control Bits */
+#define CS_0 0x00
+#define CS_1 0x20
+#define SK_0 0x00
+#define SK_1 0x40
+#define DI_0 0x00
+#define DI_1 0x80
+
+/* NMC93CSx6 EEPROM Instructions */
+#define EEPROM_READ 0x80
+
+/* NMC93CSx6 EEPROM Addresses */
+#define E_NODEID_0 0x02
+#define E_NODEID_1 0x03
+#define E_NODEID_2 0x04
+#define E_PORT_SELECT 0x14
+ #define E_PORT_BNC 0x00
+ #define E_PORT_DIX 0x01
+ #define E_PORT_TP 0x02
+ #define E_PORT_AUTO 0x03
+ #define E_PORT_FROM_EPROM 0x04
+#define E_PRODUCT_CFG 0x30
+
+
+/* Macro to slow down io between EEPROM clock transitions */
+#define eeprom_slow_io() do { int _i = 40; while(--_i > 0) { inb(0x80); }}while(0)
+
+/* Jumperless Configuration Register (BMPR19) */
+#define JUMPERLESS_CONFIG 19
+
+/* ID ROM registers, writing to them also resets some parts of chip */
+#define ID_ROM_0 24
+#define ID_ROM_7 31
+#define RESET ID_ROM_0
+
+/* This is the I/O address list to be probed when seeking the card */
+static unsigned int eth16i_portlist[] =
+ { 0x260, 0x280, 0x2A0, 0x240, 0x340, 0x320, 0x380, 0x300, 0 };
+
+static unsigned int eth32i_portlist[] =
+ { 0x1000, 0x2000, 0x3000, 0x4000, 0x5000, 0x6000, 0x7000, 0x8000,
+ 0x9000, 0xA000, 0xB000, 0xC000, 0xD000, 0xE000, 0xF000, 0 };
+
+/* This is the Interrupt lookup table for Eth16i card */
+static unsigned int eth16i_irqmap[] = { 9, 10, 5, 15, 0 };
+#define NUM_OF_ISA_IRQS 4
+
+/* This is the Interrupt lookup table for Eth32i card */
+static unsigned int eth32i_irqmap[] = { 3, 5, 7, 9, 10, 11, 12, 15, 0 };
+#define EISA_IRQ_REG 0xc89
+#define NUM_OF_EISA_IRQS 8
+
+static unsigned int eth16i_tx_buf_map[] = { 2048, 2048, 4096, 8192 };
+static unsigned int boot = 1;
+
+/* Use 0 for production, 1 for verification, >2 for debug */
+#ifndef ETH16I_DEBUG
+#define ETH16I_DEBUG 0
+#endif
+static unsigned int eth16i_debug = ETH16I_DEBUG;
+
+/* Information for each board */
+
+struct eth16i_local {
+ eth16i_stats_type stats;
+ unsigned char tx_started;
+ unsigned char tx_buf_busy;
+ unsigned short tx_queue; /* Number of packets in transmit buffer */
+ unsigned short tx_queue_len;
+ unsigned int tx_buf_size;
+ unsigned long open_time;
+ unsigned long tx_buffered_packets;
+ unsigned long col_16;
+};
+
+/* Function prototypes */
+
+extern int eth16i_probe(struct device *dev);
+
+static int eth16i_probe1(struct device *dev, int ioaddr);
+static int eth16i_check_signature(int ioaddr);
+static int eth16i_probe_port(int ioaddr);
+static void eth16i_set_port(int ioaddr, int porttype);
+static int eth16i_send_probe_packet(int ioaddr, unsigned char *b, int l);
+static int eth16i_receive_probe_packet(int ioaddr);
+static int eth16i_get_irq(int ioaddr);
+static int eth16i_read_eeprom(int ioaddr, int offset);
+static int eth16i_read_eeprom_word(int ioaddr);
+static void eth16i_eeprom_cmd(int ioaddr, unsigned char command);
+static int eth16i_open(struct device *dev);
+static int eth16i_close(struct device *dev);
+static int eth16i_tx(struct sk_buff *skb, struct device *dev);
+static void eth16i_rx(struct device *dev);
+static void eth16i_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void eth16i_reset(struct device *dev);
+static void eth16i_skip_packet(struct device *dev);
+static void eth16i_multicast(struct device *dev);
+static void eth16i_select_regbank(unsigned char regbank, int ioaddr);
+static void eth16i_initialize(struct device *dev);
+
+#if 0
+static int eth16i_set_irq(struct device *dev);
+#endif
+
+#ifdef MODULE
+static ushort eth16i_parse_mediatype(const char* s);
+#endif
+
+static struct enet_statistics *eth16i_get_stats(struct device *dev);
+
+static char *cardname = "ICL EtherTeam 16i/32";
+
+#ifdef HAVE_DEVLIST
+
+/* Support for alternate probe manager */
+/struct netdev_entry eth16i_drv =
+ {"eth16i", eth16i_probe1, ETH16I_IO_EXTENT, eth16i_probe_list};
+
+#else /* Not HAVE_DEVLIST */
+
+__initfunc(int eth16i_probe(struct device *dev))
+{
+ int i;
+ int ioaddr;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if(eth16i_debug > 4)
+ printk(KERN_DEBUG "Probing started for %s\n", cardname);
+
+ if(base_addr > 0x1ff) /* Check only single location */
+ return eth16i_probe1(dev, base_addr);
+ else if(base_addr != 0) /* Don't probe at all */
+ return ENXIO;
+
+ /* Seek card from the ISA io address space */
+ for(i = 0; (ioaddr = eth16i_portlist[i]) ; i++) {
+ if(check_region(ioaddr, ETH16I_IO_EXTENT))
+ continue;
+ if(eth16i_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ /* Seek card from the EISA io address space */
+ for(i = 0; (ioaddr = eth32i_portlist[i]) ; i++) {
+ if(check_region(ioaddr, ETH16I_IO_EXTENT))
+ continue;
+ if(eth16i_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif /* Not HAVE_DEVLIST */
+
+__initfunc(static int eth16i_probe1(struct device *dev, int ioaddr))
+{
+ static unsigned version_printed = 0;
+ boot = 1; /* To inform initilization that we are in boot probe */
+
+ /*
+ The MB86985 chip has on register which holds information in which
+ io address the chip lies. First read this register and compare
+ it to our current io address and if match then this could
+ be our chip.
+ */
+
+ if(ioaddr < 0x1000) {
+
+ if(eth16i_portlist[(inb(ioaddr + JUMPERLESS_CONFIG) & 0x07)]
+ != ioaddr)
+ return -ENODEV;
+ }
+
+ /* Now we will go a bit deeper and try to find the chip's signature */
+
+ if(eth16i_check_signature(ioaddr) != 0)
+ return -ENODEV;
+
+ /*
+ Now it seems that we have found a ethernet chip in this particular
+ ioaddr. The MB86985 chip has this feature, that when you read a
+ certain register it will increase it's io base address to next
+ configurable slot. Now when we have found the chip, first thing is
+ to make sure that the chip's ioaddr will hold still here.
+ */
+
+ eth16i_select_regbank(TRANSCEIVER_MODE_RB, ioaddr);
+ outb(0x00, ioaddr + TRANSCEIVER_MODE_REG);
+
+ outb(0x00, ioaddr + RESET); /* Reset some parts of chip */
+ BITSET(ioaddr + CONFIG_REG_0, BIT(7)); /* Disable the data link */
+
+ if(dev == NULL)
+ dev = init_etherdev(0, 0);
+
+ if( (eth16i_debug & version_printed++) == 0)
+ printk(KERN_INFO "%s", version);
+
+ dev->base_addr = ioaddr;
+
+#if 0
+ if(dev->irq) {
+ if(eth16i_set_irq(dev)) {
+ dev->irq = eth16i_get_irq(ioaddr);
+ }
+
+ }
+ else {
+#endif
+
+ dev->irq = eth16i_get_irq(ioaddr);
+
+ /* Try to obtain interrupt vector */
+
+ if (request_irq(dev->irq, (void *)&eth16i_interrupt, 0, "eth16i", dev)) {
+ printk(KERN_WARNING "%s: %s at %#3x, but is unusable due conflicting IRQ %d.\n",
+ dev->name, cardname, ioaddr, dev->irq);
+ return -EAGAIN;
+ }
+
+#if 0
+ irq2dev_map[dev->irq] = dev;
+#endif
+
+ printk(KERN_INFO "%s: %s at %#3x, IRQ %d, ",
+ dev->name, cardname, ioaddr, dev->irq);
+
+ /* Let's grab the region */
+ request_region(ioaddr, ETH16I_IO_EXTENT, "eth16i");
+
+ /* Now we will have to lock the chip's io address */
+ eth16i_select_regbank(TRANSCEIVER_MODE_RB, ioaddr);
+ outb(0x38, ioaddr + TRANSCEIVER_MODE_REG);
+
+ eth16i_initialize(dev); /* Initialize rest of the chip's registers */
+
+ /* Now let's same some energy by shutting down the chip ;) */
+ BITCLR(ioaddr + CONFIG_REG_1, POWERUP);
+
+ /* Initialize the device structure */
+ if(dev->priv == NULL) {
+ dev->priv = kmalloc(sizeof(struct eth16i_local), GFP_KERNEL);
+ if(dev->priv == NULL)
+ return -ENOMEM;
+ }
+
+ memset(dev->priv, 0, sizeof(struct eth16i_local));
+
+ dev->open = eth16i_open;
+ dev->stop = eth16i_close;
+ dev->hard_start_xmit = eth16i_tx;
+ dev->get_stats = eth16i_get_stats;
+ dev->set_multicast_list = &eth16i_multicast;
+
+ /* Fill in the fields of the device structure with ethernet values. */
+ ether_setup(dev);
+
+ boot = 0;
+
+ return 0;
+}
+
+
+static void eth16i_initialize(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+ int i, node_w = 0;
+ unsigned char node_byte = 0;
+
+ /* Setup station address */
+ eth16i_select_regbank(NODE_ID_RB, ioaddr);
+ for(i = 0 ; i < 3 ; i++) {
+ unsigned short node_val = eth16i_read_eeprom(ioaddr, E_NODEID_0 + i);
+ ((unsigned short *)dev->dev_addr)[i] = ntohs(node_val);
+ }
+
+ for(i = 0; i < 6; i++) {
+ outb( ((unsigned char *)dev->dev_addr)[i], ioaddr + NODE_ID_0 + i);
+ if(boot) {
+ printk("%02x", inb(ioaddr + NODE_ID_0 + i));
+ if(i != 5)
+ printk(":");
+ }
+ }
+
+ /* Now we will set multicast addresses to accept none */
+ eth16i_select_regbank(HASH_TABLE_RB, ioaddr);
+ for(i = 0; i < 8; i++)
+ outb(0x00, ioaddr + HASH_TABLE_0 + i);
+
+ /*
+ Now let's disable the transmitter and receiver, set the buffer ram
+ cycle time, bus width and buffer data path width. Also we shall
+ set transmit buffer size and total buffer size.
+ */
+
+ eth16i_select_regbank(2, ioaddr);
+
+ node_byte = 0;
+ node_w = eth16i_read_eeprom(ioaddr, E_PRODUCT_CFG);
+
+ if( (node_w & 0xFF00) == 0x0800)
+ node_byte |= BUFFER_WIDTH_8;
+
+ node_byte |= SRAM_BS1;
+
+ if( (node_w & 0x00FF) == 64)
+ node_byte |= SRAM_BS0;
+
+ node_byte |= DLC_EN | SRAM_CYCLE_TIME_100NS | (ETH16I_TX_BUF_SIZE << 2);
+
+ outb(node_byte, ioaddr + CONFIG_REG_0);
+
+ /* We shall halt the transmitting, if 16 collisions are detected */
+ outb(HALT_ON_16, ioaddr + COL_16_REG);
+
+#ifdef MODULE
+ /* if_port already set by init_module() */
+#else
+ dev->if_port = (dev->mem_start < E_PORT_FROM_EPROM) ?
+ dev->mem_start : E_PORT_FROM_EPROM;
+#endif
+
+ /* Set interface port type */
+ if(boot) {
+ char *porttype[] = {"BNC", "DIX", "TP", "AUTO", "FROM_EPROM" };
+
+ switch(dev->if_port)
+ {
+
+ case E_PORT_FROM_EPROM:
+ dev->if_port = eth16i_read_eeprom(ioaddr, E_PORT_SELECT);
+ break;
+
+ case E_PORT_AUTO:
+ dev->if_port = eth16i_probe_port(ioaddr);
+ break;
+
+ case E_PORT_BNC:
+ case E_PORT_TP:
+ case E_PORT_DIX:
+ break;
+ }
+
+ printk(" %s interface.\n", porttype[dev->if_port]);
+
+ eth16i_set_port(ioaddr, dev->if_port);
+ }
+
+ /* Set Receive Mode to normal operation */
+ outb(MODE_2, ioaddr + RECEIVE_MODE_REG);
+}
+
+static int eth16i_probe_port(int ioaddr)
+{
+ int i;
+ int retcode;
+ unsigned char dummy_packet[64] = { 0 };
+
+ /* Powerup the chip */
+ outb(0xc0 | POWERUP, ioaddr + CONFIG_REG_1);
+
+ BITSET(ioaddr + CONFIG_REG_0, DLC_EN);
+
+ eth16i_select_regbank(NODE_ID_RB, ioaddr);
+
+ for(i = 0; i < 6; i++) {
+ dummy_packet[i] = inb(ioaddr + NODE_ID_0 + i);
+ dummy_packet[i+6] = inb(ioaddr + NODE_ID_0 + i);
+ }
+
+ dummy_packet[12] = 0x00;
+ dummy_packet[13] = 0x04;
+
+ eth16i_select_regbank(2, ioaddr);
+
+ for(i = 0; i < 3; i++) {
+ BITSET(ioaddr + CONFIG_REG_0, DLC_EN);
+ BITCLR(ioaddr + CONFIG_REG_0, DLC_EN);
+ eth16i_set_port(ioaddr, i);
+
+ if(eth16i_debug > 1)
+ printk(KERN_DEBUG "Set port number %d\n", i);
+
+ retcode = eth16i_send_probe_packet(ioaddr, dummy_packet, 64);
+ if(retcode == 0) {
+ retcode = eth16i_receive_probe_packet(ioaddr);
+ if(retcode != -1) {
+ if(eth16i_debug > 1)
+ printk(KERN_DEBUG "Eth16i interface port found at %d\n", i);
+ return i;
+ }
+ }
+ else {
+ if(eth16i_debug > 1)
+ printk(KERN_DEBUG "TRANSMIT_DONE timeout when probing interface port\n");
+ }
+ }
+
+ if( eth16i_debug > 1)
+ printk(KERN_DEBUG "Using default port\n");
+
+ return E_PORT_BNC;
+}
+
+static void eth16i_set_port(int ioaddr, int porttype)
+{
+ unsigned short temp = 0;
+
+ eth16i_select_regbank(TRANSCEIVER_MODE_RB, ioaddr);
+ outb(LOOPBACK_CONTROL, ioaddr + TRANSMIT_MODE_REG);
+
+ temp |= DIS_AUTO_PORT_SEL;
+
+ switch(porttype) {
+
+ case E_PORT_BNC :
+ temp |= AUI_SELECT;
+ break;
+
+ case E_PORT_TP :
+ break;
+
+ case E_PORT_DIX :
+ temp |= AUI_SELECT;
+ BITSET(ioaddr + TRANSMIT_MODE_REG, CONTROL_OUTPUT);
+ break;
+ }
+
+ outb(temp, ioaddr + TRANSCEIVER_MODE_REG);
+
+ if(eth16i_debug > 1) {
+ printk(KERN_DEBUG "TRANSMIT_MODE_REG = %x\n", inb(ioaddr + TRANSMIT_MODE_REG));
+ printk(KERN_DEBUG "TRANSCEIVER_MODE_REG = %x\n",
+ inb(ioaddr+TRANSCEIVER_MODE_REG));
+ }
+}
+
+static int eth16i_send_probe_packet(int ioaddr, unsigned char *b, int l)
+{
+ int starttime;
+
+ outb(0xff, ioaddr + TX_STATUS_REG);
+
+ outw(l, ioaddr + DATAPORT);
+ outsw(ioaddr + DATAPORT, (unsigned short *)b, (l + 1) >> 1);
+
+ starttime = jiffies;
+ outb(TX_START | 1, ioaddr + TRANSMIT_START_REG);
+
+ while( (inb(ioaddr + TX_STATUS_REG) & 0x80) == 0) {
+ if( (jiffies - starttime) > TX_TIMEOUT) {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int eth16i_receive_probe_packet(int ioaddr)
+{
+ int starttime;
+
+ starttime = jiffies;
+
+ while((inb(ioaddr + TX_STATUS_REG) & 0x20) == 0) {
+ if( (jiffies - starttime) > TX_TIMEOUT) {
+
+ if(eth16i_debug > 1)
+ printk(KERN_DEBUG "Timeout occured waiting transmit packet received\n");
+ starttime = jiffies;
+ while((inb(ioaddr + RX_STATUS_REG) & 0x80) == 0) {
+ if( (jiffies - starttime) > TX_TIMEOUT) {
+ if(eth16i_debug > 1)
+ printk(KERN_DEBUG "Timeout occured waiting receive packet\n");
+ return -1;
+ }
+ }
+
+ if(eth16i_debug > 1)
+ printk(KERN_DEBUG "RECEIVE_PACKET\n");
+ return(0); /* Found receive packet */
+ }
+ }
+
+ if(eth16i_debug > 1) {
+ printk(KERN_DEBUG "TRANSMIT_PACKET_RECEIVED %x\n", inb(ioaddr + TX_STATUS_REG));
+ printk(KERN_DEBUG "RX_STATUS_REG = %x\n", inb(ioaddr + RX_STATUS_REG));
+ }
+
+ return(0); /* Return success */
+}
+
+#if 0
+static int eth16i_set_irq(struct device* dev)
+{
+ const int ioaddr = dev->base_addr;
+ const int irq = dev->irq;
+ int i = 0;
+
+ if(ioaddr < 0x1000) {
+ while(eth16i_irqmap[i] && eth16i_irqmap[i] != irq)
+ i++;
+
+ if(i < NUM_OF_ISA_IRQS) {
+ u8 cbyte = inb(ioaddr + JUMPERLESS_CONFIG);
+ cbyte = (cbyte & 0x3F) | (i << 6);
+ outb(cbyte, ioaddr + JUMPERLESS_CONFIG);
+ return 0;
+ }
+ }
+ else {
+ printk(KERN_NOTICE "%s: EISA Interrupt cannot be set. Use EISA Configuration utility.\n", dev->name);
+ }
+
+ return -1;
+
+}
+#endif
+
+static int eth16i_get_irq(int ioaddr)
+{
+ unsigned char cbyte;
+
+ if( ioaddr < 0x1000) {
+ cbyte = inb(ioaddr + JUMPERLESS_CONFIG);
+ return( eth16i_irqmap[ ((cbyte & 0xC0) >> 6) ] );
+ } else { /* Oh..the card is EISA so method getting IRQ different */
+ unsigned short index = 0;
+ cbyte = inb(ioaddr + EISA_IRQ_REG);
+ while( (cbyte & 0x01) == 0) {
+ cbyte = cbyte >> 1;
+ index++;
+ }
+ return( eth32i_irqmap[ index ] );
+ }
+}
+
+static int eth16i_check_signature(int ioaddr)
+{
+ int i;
+ unsigned char creg[4] = { 0 };
+
+ for(i = 0; i < 4 ; i++) {
+
+ creg[i] = inb(ioaddr + TRANSMIT_MODE_REG + i);
+
+ if(eth16i_debug > 1)
+ printk("eth16i: read signature byte %x at %x\n",
+ creg[i],
+ ioaddr + TRANSMIT_MODE_REG + i);
+ }
+
+ creg[0] &= 0x0F; /* Mask collision cnr */
+ creg[2] &= 0x7F; /* Mask DCLEN bit */
+
+#if 0
+ /*
+ This was removed because the card was sometimes left to state
+ from which it couldn't be find anymore. If there is need
+ to more strict check still this have to be fixed.
+ */
+ if( ! ((creg[0] == 0x06) && (creg[1] == 0x41)) ) {
+ if(creg[1] != 0x42)
+ return -1;
+ }
+#endif
+
+ if( !((creg[2] == 0x36) && (creg[3] == 0xE0)) ) {
+ creg[2] &= 0x40;
+ creg[3] &= 0x03;
+
+ if( !((creg[2] == 0x40) && (creg[3] == 0x00)) )
+ return -1;
+ }
+
+ if(eth16i_read_eeprom(ioaddr, E_NODEID_0) != 0)
+ return -1;
+
+ if((eth16i_read_eeprom(ioaddr, E_NODEID_1) & 0xFF00) != 0x4B00)
+ return -1;
+
+ return 0;
+}
+
+static int eth16i_read_eeprom(int ioaddr, int offset)
+{
+ int data = 0;
+
+ eth16i_eeprom_cmd(ioaddr, EEPROM_READ | offset);
+ outb(CS_1, ioaddr + EEPROM_CTRL_REG);
+ data = eth16i_read_eeprom_word(ioaddr);
+ outb(CS_0 | SK_0, ioaddr + EEPROM_CTRL_REG);
+
+ return(data);
+}
+
+static int eth16i_read_eeprom_word(int ioaddr)
+{
+ int i;
+ int data = 0;
+
+ for(i = 16; i > 0; i--) {
+ outb(CS_1 | SK_0, ioaddr + EEPROM_CTRL_REG);
+ eeprom_slow_io();
+ outb(CS_1 | SK_1, ioaddr + EEPROM_CTRL_REG);
+ eeprom_slow_io();
+ data = (data << 1) |
+ ((inb(ioaddr + EEPROM_DATA_REG) & DI_1) ? 1 : 0);
+
+ eeprom_slow_io();
+ }
+
+ return(data);
+}
+
+static void eth16i_eeprom_cmd(int ioaddr, unsigned char command)
+{
+ int i;
+
+ outb(CS_0 | SK_0, ioaddr + EEPROM_CTRL_REG);
+ outb(DI_0, ioaddr + EEPROM_DATA_REG);
+ outb(CS_1 | SK_0, ioaddr + EEPROM_CTRL_REG);
+ outb(DI_1, ioaddr + EEPROM_DATA_REG);
+ outb(CS_1 | SK_1, ioaddr + EEPROM_CTRL_REG);
+
+ for(i = 7; i >= 0; i--) {
+ short cmd = ( (command & (1 << i)) ? DI_1 : DI_0 );
+ outb(cmd, ioaddr + EEPROM_DATA_REG);
+ outb(CS_1 | SK_0, ioaddr + EEPROM_CTRL_REG);
+ eeprom_slow_io();
+ outb(CS_1 | SK_1, ioaddr + EEPROM_CTRL_REG);
+ eeprom_slow_io();
+ }
+}
+
+static int eth16i_open(struct device *dev)
+{
+ struct eth16i_local *lp = (struct eth16i_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ /* Powerup the chip */
+ outb(0xc0 | POWERUP, ioaddr + CONFIG_REG_1);
+
+ /* Initialize the chip */
+ eth16i_initialize(dev);
+
+ /* Set the transmit buffer size */
+ lp->tx_buf_size = eth16i_tx_buf_map[ETH16I_TX_BUF_SIZE & 0x03];
+
+ if(eth16i_debug > 0)
+ printk(KERN_DEBUG "%s: transmit buffer size %d\n",
+ dev->name, lp->tx_buf_size);
+
+ /* Now enable Transmitter and Receiver sections */
+ BITCLR(ioaddr + CONFIG_REG_0, DLC_EN);
+
+ /* Now switch to register bank 2, for run time operation */
+ eth16i_select_regbank(2, ioaddr);
+
+ lp->open_time = jiffies;
+ lp->tx_started = 0;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+
+ /* Turn on interrupts*/
+ outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+ MOD_INC_USE_COUNT;
+
+ return 0;
+}
+
+static int eth16i_close(struct device *dev)
+{
+ struct eth16i_local *lp = (struct eth16i_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ eth16i_reset(dev);
+
+ /* Turn off interrupts*/
+ outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG);
+
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ lp->open_time = 0;
+
+ /* Disable transmit and receive */
+ BITSET(ioaddr + CONFIG_REG_0, DLC_EN);
+
+ /* Reset the chip */
+ /* outb(0xff, ioaddr + RESET); */
+ /* outw(0xffff, ioaddr + TX_STATUS_REG); */
+
+ outb(0x00, ioaddr + CONFIG_REG_1);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static int eth16i_tx(struct sk_buff *skb, struct device *dev)
+{
+ struct eth16i_local *lp = (struct eth16i_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int status = 0;
+
+ if(dev->tbusy) {
+
+ /*
+ If we get here, some higher level has decided that
+ we are broken. There should really be a "kick me"
+ function call instead.
+ */
+
+ int tickssofar = jiffies - dev->trans_start;
+ if(tickssofar < TX_TIMEOUT)
+ return 1;
+
+ outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG);
+
+ printk(KERN_WARNING "%s: transmit timed out with status %04x, %s ?\n",
+ dev->name,
+ inw(ioaddr + TX_STATUS_REG),
+ (inb(ioaddr + TX_STATUS_REG) & TX_DONE) ?
+ "IRQ conflict" : "network cable problem");
+
+ dev->trans_start = jiffies;
+
+ /* Let's dump all registers */
+ if(eth16i_debug > 0) {
+ printk(KERN_DEBUG "%s: timeout: %02x %02x %02x %02x %02x %02x %02x %02x.\n",
+ dev->name, inb(ioaddr + 0),
+ inb(ioaddr + 1), inb(ioaddr + 2),
+ inb(ioaddr + 3), inb(ioaddr + 4),
+ inb(ioaddr + 5),
+ inb(ioaddr + 6), inb(ioaddr + 7));
+
+ printk(KERN_DEBUG "%s: transmit start reg: %02x. collision reg %02x\n",
+ dev->name, inb(ioaddr + TRANSMIT_START_REG),
+ inb(ioaddr + COL_16_REG));
+
+ printk(KERN_DEBUG "lp->tx_queue = %d\n", lp->tx_queue);
+ printk(KERN_DEBUG "lp->tx_queue_len = %d\n", lp->tx_queue_len);
+ printk(KERN_DEBUG "lp->tx_started = %d\n", lp->tx_started);
+
+ }
+
+ lp->stats.tx_errors++;
+
+ eth16i_reset(dev);
+
+ dev->trans_start = jiffies;
+
+ outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
+
+ }
+
+ /*
+ If some higher layer thinks we've missed an tx-done interrupt
+ we are passed NULL. Caution: dev_tint() handles the cli()/sti()
+ itself
+ */
+
+ if(skb == NULL) {
+#if LINUX_VERSION_CODE < 0x020100
+ dev_tint(dev);
+#endif
+ if(eth16i_debug > 0)
+ printk(KERN_WARNING "%s: Missed tx-done interrupt.\n", dev->name);
+ return 0;
+ }
+
+ /* Block a timer based transmitter from overlapping.
+ This could better be done with atomic_swap(1, dev->tbusy),
+ but set_bit() works as well. */
+
+ set_bit(0, (void *)&lp->tx_buf_busy);
+
+ /* Turn off TX interrupts */
+ outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG);
+
+ if(test_and_set_bit(0, (void *)&dev->tbusy) != 0) {
+ printk(KERN_WARNING "%s: Transmitter access conflict.\n", dev->name);
+ status = -1;
+ }
+ else {
+ ushort length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned char *buf = skb->data;
+
+ if( (length + 2) > (lp->tx_buf_size - lp->tx_queue_len)) {
+ if(eth16i_debug > 0)
+ printk(KERN_WARNING "%s: Transmit buffer full.\n", dev->name);
+ }
+ else {
+ outw(length, ioaddr + DATAPORT);
+
+ if( ioaddr < 0x1000 )
+ outsw(ioaddr + DATAPORT, buf, (length + 1) >> 1);
+ else {
+ unsigned char frag = length % 4;
+
+ outsl(ioaddr + DATAPORT, buf, length >> 2);
+
+ if( frag != 0 ) {
+ outsw(ioaddr + DATAPORT, (buf + (length & 0xFFFC)), 1);
+ if( frag == 3 )
+ outsw(ioaddr + DATAPORT,
+ (buf + (length & 0xFFFC) + 2), 1);
+ }
+ }
+
+ lp->tx_buffered_packets++;
+ lp->tx_queue++;
+ lp->tx_queue_len += length + 2;
+
+ }
+
+ lp->tx_buf_busy = 0;
+
+ if(lp->tx_started == 0) {
+ /* If the transmitter is idle..always trigger a transmit */
+ outb(TX_START | lp->tx_queue, ioaddr + TRANSMIT_START_REG);
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ dev->trans_start = jiffies;
+ lp->tx_started = 1;
+ dev->tbusy = 0;
+ }
+ else if(lp->tx_queue_len < lp->tx_buf_size - (ETH_FRAME_LEN + 2)) {
+ /* There is still more room for one more packet in tx buffer */
+ dev->tbusy = 0;
+ }
+
+ outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
+
+ /* Turn TX interrupts back on */
+ /* outb(TX_INTR_DONE | TX_INTR_16_COL, ioaddr + TX_INTR_REG); */
+ status = 0;
+ }
+
+#if LINUX_VERSION_CODE >= 0x020100
+ dev_kfree_skb(skb);
+#else
+ dev_kfree_skb(skb, FREE_WRITE);
+#endif
+
+ return status;
+}
+
+static void eth16i_rx(struct device *dev)
+{
+ struct eth16i_local *lp = (struct eth16i_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int boguscount = MAX_RX_LOOP;
+
+ /* Loop until all packets have been read */
+ while( (inb(ioaddr + RECEIVE_MODE_REG) & RX_BUFFER_EMPTY) == 0) {
+
+ /* Read status byte from receive buffer */
+ ushort status = inw(ioaddr + DATAPORT);
+
+ /* Get the size of the packet from receive buffer */
+ ushort pkt_len = inw(ioaddr + DATAPORT);
+
+ if(eth16i_debug > 4)
+ printk(KERN_DEBUG "%s: Receiving packet mode %02x status %04x.\n",
+ dev->name,
+ inb(ioaddr + RECEIVE_MODE_REG), status);
+
+ if( !(status & PKT_GOOD) ) {
+ lp->stats.rx_errors++;
+
+ if( (pkt_len < ETH_ZLEN) || (pkt_len > ETH_FRAME_LEN) ) {
+ lp->stats.rx_length_errors++;
+ eth16i_reset(dev);
+ return;
+ }
+ else {
+ eth16i_skip_packet(dev);
+ lp->stats.rx_dropped++;
+ }
+ }
+ else { /* Ok so now we should have a good packet */
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len + 3);
+ if( skb == NULL ) {
+ printk(KERN_WARNING "%s: Could'n allocate memory for packet (len %d)\n",
+ dev->name, pkt_len);
+ eth16i_skip_packet(dev);
+ lp->stats.rx_dropped++;
+ break;
+ }
+
+ skb->dev = dev;
+ skb_reserve(skb,2);
+
+ /*
+ Now let's get the packet out of buffer.
+ size is (pkt_len + 1) >> 1, cause we are now reading words
+ and it have to be even aligned.
+ */
+
+ if(ioaddr < 0x1000)
+ insw(ioaddr + DATAPORT, skb_put(skb, pkt_len),
+ (pkt_len + 1) >> 1);
+ else {
+ unsigned char *buf = skb_put(skb, pkt_len);
+ unsigned char frag = pkt_len % 4;
+
+ insl(ioaddr + DATAPORT, buf, pkt_len >> 2);
+
+ if(frag != 0) {
+ unsigned short rest[2];
+ rest[0] = inw( ioaddr + DATAPORT );
+ if(frag == 3)
+ rest[1] = inw( ioaddr + DATAPORT );
+
+ memcpy(buf + (pkt_len & 0xfffc), (char *)rest, frag);
+ }
+ }
+
+ skb->protocol=eth_type_trans(skb, dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+
+ if( eth16i_debug > 5 ) {
+ int i;
+ printk(KERN_DEBUG "%s: Received packet of length %d.\n",
+ dev->name, pkt_len);
+ for(i = 0; i < 14; i++)
+ printk(KERN_DEBUG " %02x", skb->data[i]);
+ printk(KERN_DEBUG ".\n");
+ }
+
+ } /* else */
+
+ if(--boguscount <= 0)
+ break;
+
+ } /* while */
+
+#if 0
+ {
+ int i;
+
+ for(i = 0; i < 20; i++) {
+ if( (inb(ioaddr+RECEIVE_MODE_REG) & RX_BUFFER_EMPTY) ==
+ RX_BUFFER_EMPTY)
+ break;
+ inw(ioaddr + DATAPORT);
+ outb(SKIP_RX_PACKET, ioaddr + FILTER_SELF_RX_REG);
+ }
+
+ if(eth16i_debug > 1)
+ printk(KERN_DEBUG "%s: Flushed receive buffer.\n", dev->name);
+ }
+#endif
+
+ return;
+}
+
+static void eth16i_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct device *dev = dev_id;
+ struct eth16i_local *lp;
+ int ioaddr = 0,
+ status;
+
+ if(dev == NULL) {
+ printk(KERN_WARNING "eth16i_interrupt(): irq %d for unknown device. \n", irq);
+ return;
+ }
+
+ /* Turn off all interrupts from adapter */
+ outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG);
+
+ set_bit(0, (void *)&dev->tbusy); /* Set the device busy so that */
+ /* eth16i_tx wont be called */
+
+ if(dev->interrupt)
+ printk(KERN_WARNING "%s: Re-entering the interrupt handler.\n", dev->name);
+ dev->interrupt = 1;
+
+ ioaddr = dev->base_addr;
+ lp = (struct eth16i_local *)dev->priv;
+ status = inw(ioaddr + TX_STATUS_REG); /* Get the status */
+ outw(status, ioaddr + TX_STATUS_REG); /* Clear status bits */
+
+ if(eth16i_debug > 3)
+ printk(KERN_DEBUG "%s: Interrupt with status %04x.\n", dev->name, status);
+
+ if( status & 0x7f00 ) {
+
+ lp->stats.rx_errors++;
+
+ if(status & (BUS_RD_ERR << 8) )
+ printk(KERN_WARNING "%s: Bus read error.\n",dev->name);
+ if(status & (SHORT_PKT_ERR << 8) ) lp->stats.rx_length_errors++;
+ if(status & (ALIGN_ERR << 8) ) lp->stats.rx_frame_errors++;
+ if(status & (CRC_ERR << 8) ) lp->stats.rx_crc_errors++;
+ if(status & (RX_BUF_OVERFLOW << 8) ) lp->stats.rx_over_errors++;
+ }
+ if( status & 0x001a) {
+
+ lp->stats.tx_errors++;
+
+ if(status & CR_LOST) lp->stats.tx_carrier_errors++;
+ if(status & TX_JABBER_ERR) lp->stats.tx_window_errors++;
+
+#if 0
+ if(status & COLLISION) {
+ lp->stats.collisions +=
+ ((inb(ioaddr+TRANSMIT_MODE_REG) & 0xF0) >> 4);
+ }
+#endif
+ if(status & COLLISIONS_16) {
+ if(lp->col_16 < MAX_COL_16) {
+ lp->col_16++;
+ lp->stats.collisions++;
+ /* Resume transmitting, skip failed packet */
+ outb(0x02, ioaddr + COL_16_REG);
+ }
+ else {
+ printk(KERN_WARNING "%s: bailing out due to many consecutive 16-in-a-row collisions. Network cable problem?\n", dev->name);
+ }
+ }
+ }
+
+ if( status & 0x00ff ) { /* Let's check the transmit status reg */
+
+ if(status & TX_DONE) { /* The transmit has been done */
+ lp->stats.tx_packets = lp->tx_buffered_packets;
+ lp->col_16 = 0;
+
+ if(lp->tx_queue) { /* Is there still packets ? */
+ /* There was packet(s) so start transmitting and write also
+ how many packets there is to be sended */
+ outb(TX_START | lp->tx_queue, ioaddr + TRANSMIT_START_REG);
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ lp->tx_started = 1;
+ dev->trans_start = jiffies;
+ mark_bh(NET_BH);
+ }
+ else {
+ lp->tx_started = 0;
+ mark_bh(NET_BH);
+ }
+ }
+ }
+
+ if( ( status & 0x8000 ) ||
+ ( (inb(ioaddr + RECEIVE_MODE_REG) & RX_BUFFER_EMPTY) == 0) ) {
+ eth16i_rx(dev); /* We have packet in receive buffer */
+ }
+
+ dev->interrupt = 0;
+
+ /* Turn interrupts back on */
+ outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
+
+ if(lp->tx_queue_len < lp->tx_buf_size - (ETH_FRAME_LEN + 2)) {
+ /* There is still more room for one more packet in tx buffer */
+ dev->tbusy = 0;
+ }
+
+ return;
+}
+
+static void eth16i_skip_packet(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ inw(ioaddr + DATAPORT);
+ inw(ioaddr + DATAPORT);
+ inw(ioaddr + DATAPORT);
+
+ outb(SKIP_RX_PACKET, ioaddr + FILTER_SELF_RX_REG);
+ while( inb( ioaddr + FILTER_SELF_RX_REG ) != 0);
+}
+
+static void eth16i_reset(struct device *dev)
+{
+ struct eth16i_local *lp = (struct eth16i_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ if(eth16i_debug > 1)
+ printk(KERN_DEBUG "%s: Resetting device.\n", dev->name);
+
+ BITSET(ioaddr + CONFIG_REG_0, DLC_EN);
+ outw(0xffff, ioaddr + TX_STATUS_REG);
+ eth16i_select_regbank(2, ioaddr);
+
+ lp->tx_started = 0;
+ lp->tx_buf_busy = 0;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+
+ dev->interrupt = 0;
+ dev->start = 1;
+ dev->tbusy = 0;
+ BITCLR(ioaddr + CONFIG_REG_0, DLC_EN);
+}
+
+static void eth16i_multicast(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if(dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
+ {
+ dev->flags|=IFF_PROMISC; /* Must do this */
+ outb(3, ioaddr + RECEIVE_MODE_REG);
+ } else {
+ outb(2, ioaddr + RECEIVE_MODE_REG);
+ }
+}
+
+static struct enet_statistics *eth16i_get_stats(struct device *dev)
+{
+ struct eth16i_local *lp = (struct eth16i_local *)dev->priv;
+
+ return &lp->stats;
+}
+
+static void eth16i_select_regbank(unsigned char banknbr, int ioaddr)
+{
+ unsigned char data;
+
+ data = inb(ioaddr + CONFIG_REG_1);
+ outb( ((data & 0xF3) | ( (banknbr & 0x03) << 2)), ioaddr + CONFIG_REG_1);
+}
+
+#ifdef MODULE
+
+static ushort eth16i_parse_mediatype(const char* s)
+{
+ if(!s)
+ return E_PORT_FROM_EPROM;
+
+ if (!strncmp(s, "bnc", 3))
+ return E_PORT_BNC;
+ else if (!strncmp(s, "tp", 2))
+ return E_PORT_TP;
+ else if (!strncmp(s, "dix", 3))
+ return E_PORT_DIX;
+ else if (!strncmp(s, "auto", 4))
+ return E_PORT_AUTO;
+ else
+ return E_PORT_FROM_EPROM;
+}
+
+#define MAX_ETH16I_CARDS 4 /* Max number of Eth16i cards per module */
+#define NAMELEN 8 /* number of chars for storing dev->name */
+
+static char namelist[NAMELEN * MAX_ETH16I_CARDS] = { 0, };
+static struct device dev_eth16i[MAX_ETH16I_CARDS] = {
+ {
+ NULL,
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, NULL
+ },
+};
+
+static int ioaddr[MAX_ETH16I_CARDS] = { 0, };
+#if 0
+static int irq[MAX_ETH16I_CARDS] = { 0, };
+#endif
+static char* mediatype[MAX_ETH16I_CARDS] = { 0, };
+static int debug = -1;
+
+#if (LINUX_VERSION_CODE >= 0x20115)
+MODULE_AUTHOR("Mika Kuoppala <miku@iki.fi>");
+MODULE_DESCRIPTION("ICL EtherTeam 16i/32 driver");
+
+MODULE_PARM(ioaddr, "1-" __MODULE_STRING(MAX_ETH16I_CARDS) "i");
+MODULE_PARM_DESC(ioaddr, "eth16i io base address");
+
+#if 0
+MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_ETH16I_CARDS) "i");
+MODULE_PARM_DESC(irq, "eth16i interrupt request number");
+#endif
+
+MODULE_PARM(mediatype, "1-" __MODULE_STRING(MAX_ETH16I_CARDS) "s");
+MODULE_PARM_DESC(mediatype, "eth16i interfaceport mediatype");
+
+MODULE_PARM(debug, "i");
+MODULE_PARM_DESC(debug, "eth16i debug level (0-4)");
+#endif
+
+int init_module(void)
+{
+ int this_dev, found = 0;
+
+ for(this_dev = 0; this_dev < MAX_ETH16I_CARDS; this_dev++)
+ {
+ struct device *dev = &dev_eth16i[this_dev];
+
+ dev->name = namelist + (NAMELEN*this_dev);
+ dev->irq = 0; /* irq[this_dev]; */
+ dev->base_addr = ioaddr[this_dev];
+ dev->init = eth16i_probe;
+
+ if(debug != -1)
+ eth16i_debug = debug;
+
+ if(eth16i_debug > 1)
+ printk(KERN_NOTICE "eth16i(%d): interface type %s\n", this_dev, mediatype[this_dev] ? mediatype[this_dev] : "none" );
+
+ dev->if_port = eth16i_parse_mediatype(mediatype[this_dev]);
+
+ if(ioaddr[this_dev] == 0)
+ {
+ if(this_dev != 0) break; /* Only autoprobe 1st one */
+
+ printk(KERN_NOTICE "eth16i.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+
+ if(register_netdev(dev) != 0)
+ {
+ printk(KERN_WARNING "eth16i.c No Eth16i card found (i/o = 0x%x).\n",
+ ioaddr[this_dev]);
+
+ if(found != 0) return 0;
+ return -ENXIO;
+ }
+
+ found++;
+ }
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ int this_dev;
+
+ for(this_dev = 0; this_dev < MAX_ETH16I_CARDS; this_dev++)
+ {
+ struct device* dev = &dev_eth16i[this_dev];
+
+ if(dev->priv != NULL)
+ {
+ unregister_netdev(dev);
+ kfree(dev->priv);
+ dev->priv = NULL;
+
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, ETH16I_IO_EXTENT);
+
+ }
+ }
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c eth16i.c"
+ * alt-compile-command: "gcc -DMODVERSIONS -DMODULE -D__KERNEL__ -Wall -Wstrict -prototypes -O6 -c eth16i.c"
+ * tab-width: 8
+ * c-basic-offset: 8
+ * c-indent-level: 8
+ * End:
+ */
+
+/* End of file eth16i.c */
diff --git a/linux/src/drivers/net/eth82586.h b/linux/src/drivers/net/eth82586.h
new file mode 100644
index 0000000..c2178ff
--- /dev/null
+++ b/linux/src/drivers/net/eth82586.h
@@ -0,0 +1,172 @@
+/*
+ * eth82586.h: Intel EtherExpress defines
+ *
+ * Written 1995 by John Sullivan
+ * See eexpress.c for further details
+ * documentation and usage to do.
+ */
+
+/*
+ * EtherExpress card register addresses
+ * as offsets from the base IO region (dev->base_addr)
+ */
+
+#define DATAPORT 0x0000
+#define WRITE_PTR 0x0002
+#define READ_PTR 0x0004
+#define SIGNAL_CA 0x0006
+#define SET_IRQ 0x0007
+#define SM_PTR 0x0008
+#define MEM_Ctrl 0x000b
+#define MEM_Page_Ctrl 0x000c
+#define Config 0x000d
+#define EEPROM_Ctrl 0x000e
+#define ID_PORT 0x000f
+
+/*
+ * offset to shadowed memory, 0 <= x <= 31. We don't use this yet,
+ * but may in the future. Is shadow memory access any faster than
+ * dataport access?
+ */
+#define SM_ADDR(x) (0x4000+((x&0x10)<<10)+(x&0xf))
+
+/* Always mirrors eexp-memory at 0x0008-0x000f */
+#define SCB_STATUS 0xc008
+#define SCB_CMD 0xc00a
+#define SCB_CBL 0xc00c
+#define SCB_RFA 0xc00e
+
+
+
+/*
+ * card register defines
+ */
+
+/* SET_IRQ */
+#define SIRQ_en 0x08
+#define SIRQ_dis 0x00
+
+/* Config */
+#define set_loopback outb(inb(ioaddr+Config)|0x02,ioaddr+Config)
+#define clear_loopback outb(inb(ioaddr+Config)&0xfd,ioaddr+Config)
+
+/* EEPROM_Ctrl */
+#define EC_Clk 0x01
+#define EC_CS 0x02
+#define EC_Wr 0x04
+#define EC_Rd 0x08
+#define ASIC_RST 0x40
+#define i586_RST 0x80
+
+#define eeprom_delay() { int _i = 40; while (--_i>0) { __SLOW_DOWN_IO; }}
+
+/*
+ * i82586 Memory Configuration
+ */
+
+/* (System Configuration Pointer) System start up block, read after 586_RST */
+#define SCP_START 0xfff6
+
+
+/* Intermediate System Configuration Pointer */
+#define ISCP_START 0x0000
+/* System Command Block */
+#define SCB_START 0x0008
+
+/*
+ * Start of buffer region. If we have 64k memory, eexp_hw_probe() may raise
+ * NUM_TX_BUFS. RX_BUF_END is set to the end of memory, and all space between
+ * the transmit buffer region and end of memory used for as many receive buffers
+ * as we can fit. See eexp_hw_[(rx)(tx)]init().
+ */
+#define TX_BUF_START 0x0100
+#define TX_BUF_SIZE ((24+ETH_FRAME_LEN+31)&~0x1f)
+#define RX_BUF_SIZE ((32+ETH_FRAME_LEN+31)&~0x1f)
+
+
+
+/*
+ * SCB defines
+ */
+
+/* these functions take the SCB status word and test the relevant status bit */
+#define SCB_complete(s) ((s&0x8000)!=0)
+#define SCB_rxdframe(s) ((s&0x4000)!=0)
+#define SCB_CUdead(s) ((s&0x2000)!=0)
+#define SCB_RUdead(s) ((s&0x1000)!=0)
+#define SCB_ack(s) (s & 0xf000)
+
+/* Command unit status: 0=idle, 1=suspended, 2=active */
+#define SCB_CUstat(s) ((s&0x0300)>>8)
+
+/* Receive unit status: 0=idle, 1=suspended, 2=out of resources, 4=ready */
+#define SCB_RUstat(s) ((s&0x0070)>>4)
+
+/* SCB commands */
+#define SCB_CUnop 0x0000
+#define SCB_CUstart 0x0100
+#define SCB_CUresume 0x0200
+#define SCB_CUsuspend 0x0300
+#define SCB_CUabort 0x0400
+
+/* ? */
+#define SCB_resetchip 0x0080
+
+#define SCB_RUnop 0x0000
+#define SCB_RUstart 0x0010
+#define SCB_RUresume 0x0020
+#define SCB_RUsuspend 0x0030
+#define SCB_RUabort 0x0040
+
+
+/*
+ * Command block defines
+ */
+
+#define Stat_Done(s) ((s&0x8000)!=0)
+#define Stat_Busy(s) ((s&0x4000)!=0)
+#define Stat_OK(s) ((s&0x2000)!=0)
+#define Stat_Abort(s) ((s&0x1000)!=0)
+#define Stat_STFail ((s&0x0800)!=0)
+#define Stat_TNoCar(s) ((s&0x0400)!=0)
+#define Stat_TNoCTS(s) ((s&0x0200)!=0)
+#define Stat_TNoDMA(s) ((s&0x0100)!=0)
+#define Stat_TDefer(s) ((s&0x0080)!=0)
+#define Stat_TColl(s) ((s&0x0040)!=0)
+#define Stat_TXColl(s) ((s&0x0020)!=0)
+#define Stat_NoColl(s) (s&0x000f)
+
+/* Cmd_END will end AFTER the command if this is the first
+ * command block after an SCB_CUstart, but BEFORE the command
+ * for all subsequent commands. Best strategy is to place
+ * Cmd_INT on the last command in the sequence, followed by a
+ * dummy Cmd_Nop with Cmd_END after this.
+ */
+#define Cmd_END 0x8000
+#define Cmd_SUS 0x4000
+#define Cmd_INT 0x2000
+
+#define Cmd_Nop 0x0000
+#define Cmd_SetAddr 0x0001
+#define Cmd_Config 0x0002
+#define Cmd_MCast 0x0003
+#define Cmd_Xmit 0x0004
+#define Cmd_TDR 0x0005
+#define Cmd_Dump 0x0006
+#define Cmd_Diag 0x0007
+
+
+/*
+ * Frame Descriptor (Receive block) defines
+ */
+
+#define FD_Done(s) ((s&0x8000)!=0)
+#define FD_Busy(s) ((s&0x4000)!=0)
+#define FD_OK(s) ((s&0x2000)!=0)
+
+#define FD_CRC(s) ((s&0x0800)!=0)
+#define FD_Align(s) ((s&0x0400)!=0)
+#define FD_Resrc(s) ((s&0x0200)!=0)
+#define FD_DMA(s) ((s&0x0100)!=0)
+#define FD_Short(s) ((s&0x0080)!=0)
+#define FD_NoEOF(s) ((s&0x0040)!=0)
diff --git a/linux/src/drivers/net/ewrk3.c b/linux/src/drivers/net/ewrk3.c
new file mode 100644
index 0000000..07b0f13
--- /dev/null
+++ b/linux/src/drivers/net/ewrk3.c
@@ -0,0 +1,1920 @@
+/* ewrk3.c: A DIGITAL EtherWORKS 3 ethernet driver for Linux.
+
+ Written 1994 by David C. Davies.
+
+ Copyright 1994 Digital Equipment Corporation.
+
+ This software may be used and distributed according to the terms of
+ the GNU Public License, incorporated herein by reference.
+
+ This driver is written for the Digital Equipment Corporation series
+ of EtherWORKS ethernet cards:
+
+ DE203 Turbo (BNC)
+ DE204 Turbo (TP)
+ DE205 Turbo (TP BNC)
+
+ The driver has been tested on a relatively busy network using the DE205
+ card and benchmarked with 'ttcp': it transferred 16M of data at 975kB/s
+ (7.8Mb/s) to a DECstation 5000/200.
+
+ The author may be reached at davies@maniac.ultranet.com.
+
+ =========================================================================
+ This driver has been written substantially from scratch, although its
+ inheritance of style and stack interface from 'depca.c' and in turn from
+ Donald Becker's 'lance.c' should be obvious.
+
+ The DE203/4/5 boards all use a new proprietary chip in place of the
+ LANCE chip used in prior cards (DEPCA, DE100, DE200/1/2, DE210, DE422).
+ Use the depca.c driver in the standard distribution for the LANCE based
+ cards from DIGITAL; this driver will not work with them.
+
+ The DE203/4/5 cards have 2 main modes: shared memory and I/O only. I/O
+ only makes all the card accesses through I/O transactions and no high
+ (shared) memory is used. This mode provides a >48% performance penalty
+ and is deprecated in this driver, although allowed to provide initial
+ setup when hardstrapped.
+
+ The shared memory mode comes in 3 flavours: 2kB, 32kB and 64kB. There is
+ no point in using any mode other than the 2kB mode - their performances
+ are virtually identical, although the driver has been tested in the 2kB
+ and 32kB modes. I would suggest you uncomment the line:
+
+ FORCE_2K_MODE;
+
+ to allow the driver to configure the card as a 2kB card at your current
+ base address, thus leaving more room to clutter your system box with
+ other memory hungry boards.
+
+ As many ISA and EISA cards can be supported under this driver as you
+ wish, limited primarily by the available IRQ lines, rather than by the
+ available I/O addresses (24 ISA, 16 EISA). I have checked different
+ configurations of multiple depca cards and ewrk3 cards and have not
+ found a problem yet (provided you have at least depca.c v0.38) ...
+
+ The board IRQ setting must be at an unused IRQ which is auto-probed
+ using Donald Becker's autoprobe routines. All these cards are at
+ {5,10,11,15}.
+
+ No 16MB memory limitation should exist with this driver as DMA is not
+ used and the common memory area is in low memory on the network card (my
+ current system has 20MB and I've not had problems yet).
+
+ The ability to load this driver as a loadable module has been included
+ and used extensively during the driver development (to save those long
+ reboot sequences). To utilise this ability, you have to do 8 things:
+
+ 0) have a copy of the loadable modules code installed on your system.
+ 1) copy ewrk3.c from the /linux/drivers/net directory to your favourite
+ temporary directory.
+ 2) edit the source code near line 1898 to reflect the I/O address and
+ IRQ you're using.
+ 3) compile ewrk3.c, but include -DMODULE in the command line to ensure
+ that the correct bits are compiled (see end of source code).
+ 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
+ kernel with the ewrk3 configuration turned off and reboot.
+ 5) insmod ewrk3.o
+ [Alan Cox: Changed this so you can insmod ewrk3.o irq=x io=y]
+ 6) run the net startup bits for your new eth?? interface manually
+ (usually /etc/rc.inet[12] at boot time).
+ 7) enjoy!
+
+ Note that autoprobing is not allowed in loadable modules - the system is
+ already up and running and you're messing with interrupts.
+
+ To unload a module, turn off the associated interface
+ 'ifconfig eth?? down' then 'rmmod ewrk3'.
+
+ Promiscuous mode has been turned off in this driver, but all the
+ multicast address bits have been turned on. This improved the send
+ performance on a busy network by about 13%.
+
+ Ioctl's have now been provided (primarily because I wanted to grab some
+ packet size statistics). They are patterned after 'plipconfig.c' from a
+ suggestion by Alan Cox. Using these ioctls, you can enable promiscuous
+ mode, add/delete multicast addresses, change the hardware address, get
+ packet size distribution statistics and muck around with the control and
+ status register. I'll add others if and when the need arises.
+
+ TO DO:
+ ------
+
+
+ Revision History
+ ----------------
+
+ Version Date Description
+
+ 0.1 26-aug-94 Initial writing. ALPHA code release.
+ 0.11 31-aug-94 Fixed: 2k mode memory base calc.,
+ LeMAC version calc.,
+ IRQ vector assignments during autoprobe.
+ 0.12 31-aug-94 Tested working on LeMAC2 (DE20[345]-AC) card.
+ Fixed up MCA hash table algorithm.
+ 0.20 4-sep-94 Added IOCTL functionality.
+ 0.21 14-sep-94 Added I/O mode.
+ 0.21axp 15-sep-94 Special version for ALPHA AXP Linux V1.0.
+ 0.22 16-sep-94 Added more IOCTLs & tidied up.
+ 0.23 21-sep-94 Added transmit cut through.
+ 0.24 31-oct-94 Added uid checks in some ioctls.
+ 0.30 1-nov-94 BETA code release.
+ 0.31 5-dec-94 Added check/allocate region code.
+ 0.32 16-jan-95 Broadcast packet fix.
+ 0.33 10-Feb-95 Fix recognition bug reported by <bkm@star.rl.ac.uk>.
+ 0.40 27-Dec-95 Rationalise MODULE and autoprobe code.
+ Rewrite for portability & updated.
+ ALPHA support from <jestabro@amt.tay1.dec.com>
+ Added verify_area() calls in ewrk3_ioctl() from
+ suggestion by <heiko@colossus.escape.de>.
+ Add new multicasting code.
+ 0.41 20-Jan-96 Fix IRQ set up problem reported by
+ <kenneth@bbs.sas.ntu.ac.sg>.
+ 0.42 22-Apr-96 Fix alloc_device() bug <jari@markkus2.fimr.fi>
+ 0.43 16-Aug-96 Update alloc_device() to conform to de4x5.c
+
+ =========================================================================
+*/
+
+static const char *version = "ewrk3.c:v0.43 96/8/16 davies@maniac.ultranet.com\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/segment.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/unistd.h>
+#include <linux/ctype.h>
+
+#include "ewrk3.h"
+
+#ifdef EWRK3_DEBUG
+static int ewrk3_debug = EWRK3_DEBUG;
+#else
+static int ewrk3_debug = 1;
+#endif
+
+#define EWRK3_NDA 0xffe0 /* No Device Address */
+
+#define PROBE_LENGTH 32
+#define ETH_PROM_SIG 0xAA5500FFUL
+
+#ifndef EWRK3_SIGNATURE
+#define EWRK3_SIGNATURE {"DE203","DE204","DE205",""}
+#define EWRK3_STRLEN 8
+#endif
+
+#ifndef EWRK3_RAM_BASE_ADDRESSES
+#define EWRK3_RAM_BASE_ADDRESSES {0xc0000,0xd0000,0x00000}
+#endif
+
+/*
+** Sets up the I/O area for the autoprobe.
+*/
+#define EWRK3_IO_BASE 0x100 /* Start address for probe search */
+#define EWRK3_IOP_INC 0x20 /* I/O address increment */
+#define EWRK3_TOTAL_SIZE 0x20 /* required I/O address length */
+
+#ifndef MAX_NUM_EWRK3S
+#define MAX_NUM_EWRK3S 21
+#endif
+
+#ifndef EWRK3_EISA_IO_PORTS
+#define EWRK3_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */
+#endif
+
+#ifndef MAX_EISA_SLOTS
+#define MAX_EISA_SLOTS 16
+#define EISA_SLOT_INC 0x1000
+#endif
+
+#define CRC_POLYNOMIAL_BE 0x04c11db7UL /* Ethernet CRC, big endian */
+#define CRC_POLYNOMIAL_LE 0xedb88320UL /* Ethernet CRC, little endian */
+
+#define QUEUE_PKT_TIMEOUT (1*HZ) /* Jiffies */
+
+/*
+** EtherWORKS 3 shared memory window sizes
+*/
+#define IO_ONLY 0x00
+#define SHMEM_2K 0x800
+#define SHMEM_32K 0x8000
+#define SHMEM_64K 0x10000
+
+/*
+** EtherWORKS 3 IRQ ENABLE/DISABLE
+*/
+#define ENABLE_IRQs { \
+ icr |= lp->irq_mask;\
+ outb(icr, EWRK3_ICR); /* Enable the IRQs */\
+}
+
+#define DISABLE_IRQs { \
+ icr = inb(EWRK3_ICR);\
+ icr &= ~lp->irq_mask;\
+ outb(icr, EWRK3_ICR); /* Disable the IRQs */\
+}
+
+/*
+** EtherWORKS 3 START/STOP
+*/
+#define START_EWRK3 { \
+ csr = inb(EWRK3_CSR);\
+ csr &= ~(CSR_TXD|CSR_RXD);\
+ outb(csr, EWRK3_CSR); /* Enable the TX and/or RX */\
+}
+
+#define STOP_EWRK3 { \
+ csr = (CSR_TXD|CSR_RXD);\
+ outb(csr, EWRK3_CSR); /* Disable the TX and/or RX */\
+}
+
+/*
+** The EtherWORKS 3 private structure
+*/
+#define EWRK3_PKT_STAT_SZ 16
+#define EWRK3_PKT_BIN_SZ 128 /* Should be >=100 unless you
+ increase EWRK3_PKT_STAT_SZ */
+
+struct ewrk3_private {
+ char adapter_name[80]; /* Name exported to /proc/ioports */
+ u_long shmem_base; /* Shared memory start address */
+ u_long shmem_length; /* Shared memory window length */
+ struct enet_statistics stats; /* Public stats */
+ struct {
+ u32 bins[EWRK3_PKT_STAT_SZ]; /* Private stats counters */
+ u32 unicast;
+ u32 multicast;
+ u32 broadcast;
+ u32 excessive_collisions;
+ u32 tx_underruns;
+ u32 excessive_underruns;
+ } pktStats;
+ u_char irq_mask; /* Adapter IRQ mask bits */
+ u_char mPage; /* Maximum 2kB Page number */
+ u_char lemac; /* Chip rev. level */
+ u_char hard_strapped; /* Don't allow a full open */
+ u_char lock; /* Lock the page register */
+ u_char txc; /* Transmit cut through */
+ u_char *mctbl; /* Pointer to the multicast table */
+};
+
+/*
+** Force the EtherWORKS 3 card to be in 2kB MODE
+*/
+#define FORCE_2K_MODE { \
+ shmem_length = SHMEM_2K;\
+ outb(((mem_start - 0x80000) >> 11), EWRK3_MBR);\
+}
+
+/*
+** Public Functions
+*/
+static int ewrk3_open(struct device *dev);
+static int ewrk3_queue_pkt(struct sk_buff *skb, struct device *dev);
+static void ewrk3_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static int ewrk3_close(struct device *dev);
+static struct enet_statistics *ewrk3_get_stats(struct device *dev);
+static void set_multicast_list(struct device *dev);
+static int ewrk3_ioctl(struct device *dev, struct ifreq *rq, int cmd);
+
+/*
+** Private functions
+*/
+static int ewrk3_hw_init(struct device *dev, u_long iobase);
+static void ewrk3_init(struct device *dev);
+static int ewrk3_rx(struct device *dev);
+static int ewrk3_tx(struct device *dev);
+
+static void EthwrkSignature(char * name, char *eeprom_image);
+static int DevicePresent(u_long iobase);
+static void SetMulticastFilter(struct device *dev);
+static int EISA_signature(char *name, s32 eisa_id);
+
+static int Read_EEPROM(u_long iobase, u_char eaddr);
+static int Write_EEPROM(short data, u_long iobase, u_char eaddr);
+static u_char get_hw_addr (struct device *dev, u_char *eeprom_image, char chipType);
+
+static void isa_probe(struct device *dev, u_long iobase);
+static void eisa_probe(struct device *dev, u_long iobase);
+static struct device *alloc_device(struct device *dev, u_long iobase);
+static int ewrk3_dev_index(char *s);
+static struct device *insert_device(struct device *dev, u_long iobase, int (*init)(struct device *));
+
+
+#ifdef MODULE
+int init_module(void);
+void cleanup_module(void);
+static int autoprobed = 1, loading_module = 1;
+
+# else
+static u_char irq[] = {5,0,10,3,11,9,15,12};
+static int autoprobed = 0, loading_module = 0;
+
+#endif /* MODULE */
+
+static char name[EWRK3_STRLEN + 1];
+static int num_ewrk3s = 0, num_eth = 0;
+
+/*
+** Miscellaneous defines...
+*/
+#define INIT_EWRK3 {\
+ outb(EEPROM_INIT, EWRK3_IOPR);\
+ udelay(1000);\
+}
+
+
+
+
+int ewrk3_probe(struct device *dev)
+{
+ int tmp = num_ewrk3s, status = -ENODEV;
+ u_long iobase = dev->base_addr;
+
+ if ((iobase == 0) && loading_module){
+ printk("Autoprobing is not supported when loading a module based driver.\n");
+ status = -EIO;
+ } else { /* First probe for the Ethernet */
+ /* Address PROM pattern */
+ isa_probe(dev, iobase);
+ eisa_probe(dev, iobase);
+
+ if ((tmp == num_ewrk3s) && (iobase != 0) && loading_module) {
+ printk("%s: ewrk3_probe() cannot find device at 0x%04lx.\n", dev->name,
+ iobase);
+ }
+
+ /*
+ ** Walk the device list to check that at least one device
+ ** initialised OK
+ */
+ for (; (dev->priv == NULL) && (dev->next != NULL); dev = dev->next);
+
+ if (dev->priv) status = 0;
+ if (iobase == 0) autoprobed = 1;
+ }
+
+ return status;
+}
+
+static int
+ewrk3_hw_init(struct device *dev, u_long iobase)
+{
+ struct ewrk3_private *lp;
+ int i, status=0;
+ u_long mem_start, shmem_length;
+ u_char cr, cmr, icr, nicsr, lemac, hard_strapped = 0;
+ u_char eeprom_image[EEPROM_MAX], chksum, eisa_cr = 0;
+
+ /*
+ ** Stop the EWRK3. Enable the DBR ROM. Disable interrupts and remote boot.
+ ** This also disables the EISA_ENABLE bit in the EISA Control Register.
+ */
+ if (iobase > 0x400) eisa_cr = inb(EISA_CR);
+ INIT_EWRK3;
+
+ nicsr = inb(EWRK3_CSR);
+
+ icr = inb(EWRK3_ICR);
+ icr &= 0x70;
+ outb(icr, EWRK3_ICR); /* Disable all the IRQs */
+
+ if (nicsr == (CSR_TXD|CSR_RXD)) {
+
+ /* Check that the EEPROM is alive and well and not living on Pluto... */
+ for (chksum=0, i=0; i<EEPROM_MAX; i+=2) {
+ union {
+ short val;
+ char c[2];
+ } tmp;
+
+ tmp.val = (short)Read_EEPROM(iobase, (i>>1));
+ eeprom_image[i] = tmp.c[0];
+ eeprom_image[i+1] = tmp.c[1];
+ chksum += eeprom_image[i] + eeprom_image[i+1];
+ }
+
+ if (chksum != 0) { /* Bad EEPROM Data! */
+ printk("%s: Device has a bad on-board EEPROM.\n", dev->name);
+ status = -ENXIO;
+ } else {
+ EthwrkSignature(name, eeprom_image);
+ if (*name != '\0') { /* found a EWRK3 device */
+ dev->base_addr = iobase;
+
+ if (iobase > 0x400) {
+ outb(eisa_cr, EISA_CR); /* Rewrite the EISA CR */
+ }
+
+ lemac = eeprom_image[EEPROM_CHIPVER];
+ cmr = inb(EWRK3_CMR);
+
+ if (((lemac == LeMAC) && ((cmr & CMR_NO_EEPROM) != CMR_NO_EEPROM)) ||
+ ((lemac == LeMAC2) && !(cmr & CMR_HS))) {
+ printk("%s: %s at %#4lx", dev->name, name, iobase);
+ hard_strapped = 1;
+ } else if ((iobase&0x0fff)==EWRK3_EISA_IO_PORTS) {
+ /* EISA slot address */
+ printk("%s: %s at %#4lx (EISA slot %ld)",
+ dev->name, name, iobase, ((iobase>>12)&0x0f));
+ } else { /* ISA port address */
+ printk("%s: %s at %#4lx", dev->name, name, iobase);
+ }
+
+ if (!status) {
+ printk(", h/w address ");
+ if (lemac!=LeMAC2) DevicePresent(iobase);/* need after EWRK3_INIT */
+ status = get_hw_addr(dev, eeprom_image, lemac);
+ for (i = 0; i < ETH_ALEN - 1; i++) { /* get the ethernet addr. */
+ printk("%2.2x:", dev->dev_addr[i]);
+ }
+ printk("%2.2x,\n", dev->dev_addr[i]);
+
+ if (status) {
+ printk(" which has an EEPROM CRC error.\n");
+ status = -ENXIO;
+ } else {
+ if (lemac == LeMAC2) { /* Special LeMAC2 CMR things */
+ cmr &= ~(CMR_RA | CMR_WB | CMR_LINK | CMR_POLARITY | CMR_0WS);
+ if (eeprom_image[EEPROM_MISC0] & READ_AHEAD) cmr |= CMR_RA;
+ if (eeprom_image[EEPROM_MISC0] & WRITE_BEHIND) cmr |= CMR_WB;
+ if (eeprom_image[EEPROM_NETMAN0] & NETMAN_POL) cmr |= CMR_POLARITY;
+ if (eeprom_image[EEPROM_NETMAN0] & NETMAN_LINK) cmr |= CMR_LINK;
+ if (eeprom_image[EEPROM_MISC0] & _0WS_ENA) cmr |= CMR_0WS;
+ }
+ if (eeprom_image[EEPROM_SETUP] & SETUP_DRAM) cmr |= CMR_DRAM;
+ outb(cmr, EWRK3_CMR);
+
+ cr = inb(EWRK3_CR); /* Set up the Control Register */
+ cr |= eeprom_image[EEPROM_SETUP] & SETUP_APD;
+ if (cr & SETUP_APD) cr |= eeprom_image[EEPROM_SETUP] & SETUP_PS;
+ cr |= eeprom_image[EEPROM_MISC0] & FAST_BUS;
+ cr |= eeprom_image[EEPROM_MISC0] & ENA_16;
+ outb(cr, EWRK3_CR);
+
+ /*
+ ** Determine the base address and window length for the EWRK3
+ ** RAM from the memory base register.
+ */
+ mem_start = inb(EWRK3_MBR);
+ shmem_length = 0;
+ if (mem_start != 0) {
+ if ((mem_start >= 0x0a) && (mem_start <= 0x0f)) {
+ mem_start *= SHMEM_64K;
+ shmem_length = SHMEM_64K;
+ } else if ((mem_start >= 0x14) && (mem_start <= 0x1f)) {
+ mem_start *= SHMEM_32K;
+ shmem_length = SHMEM_32K;
+ } else if ((mem_start >= 0x40) && (mem_start <= 0xff)) {
+ mem_start = mem_start * SHMEM_2K + 0x80000;
+ shmem_length = SHMEM_2K;
+ } else {
+ status = -ENXIO;
+ }
+ }
+
+ /*
+ ** See the top of this source code for comments about
+ ** uncommenting this line.
+ */
+/* FORCE_2K_MODE;*/
+
+ if (!status) {
+ if (hard_strapped) {
+ printk(" is hard strapped.\n");
+ } else if (mem_start) {
+ printk(" has a %dk RAM window", (int)(shmem_length >> 10));
+ printk(" at 0x%.5lx", mem_start);
+ } else {
+ printk(" is in I/O only mode");
+ }
+
+ /* private area & initialise */
+ dev->priv = (void *) kmalloc(sizeof(struct ewrk3_private),
+ GFP_KERNEL);
+ if (dev->priv == NULL) {
+ return -ENOMEM;
+ }
+ lp = (struct ewrk3_private *)dev->priv;
+ memset(dev->priv, 0, sizeof(struct ewrk3_private));
+ lp->shmem_base = mem_start;
+ lp->shmem_length = shmem_length;
+ lp->lemac = lemac;
+ lp->hard_strapped = hard_strapped;
+
+ lp->mPage = 64;
+ if (cmr & CMR_DRAM) lp->mPage <<= 1 ;/* 2 DRAMS on module */
+
+ sprintf(lp->adapter_name,"%s (%s)", name, dev->name);
+ request_region(iobase, EWRK3_TOTAL_SIZE, lp->adapter_name);
+
+ lp->irq_mask = ICR_TNEM|ICR_TXDM|ICR_RNEM|ICR_RXDM;
+
+ if (!hard_strapped) {
+ /*
+ ** Enable EWRK3 board interrupts for autoprobing
+ */
+ icr |= ICR_IE; /* Enable interrupts */
+ outb(icr, EWRK3_ICR);
+
+ /* The DMA channel may be passed in on this parameter. */
+ dev->dma = 0;
+
+ /* To auto-IRQ we enable the initialization-done and DMA err,
+ interrupts. For now we will always get a DMA error. */
+ if (dev->irq < 2) {
+#ifndef MODULE
+ u_char irqnum;
+
+ autoirq_setup(0);
+
+ /*
+ ** Trigger a TNE interrupt.
+ */
+ icr |=ICR_TNEM;
+ outb(1,EWRK3_TDQ); /* Write to the TX done queue */
+ outb(icr, EWRK3_ICR); /* Unmask the TXD interrupt */
+
+ irqnum = irq[((icr & IRQ_SEL) >> 4)];
+
+ dev->irq = autoirq_report(1);
+ if ((dev->irq) && (irqnum == dev->irq)) {
+ printk(" and uses IRQ%d.\n", dev->irq);
+ } else {
+ if (!dev->irq) {
+ printk(" and failed to detect IRQ line.\n");
+ } else if ((irqnum == 1) && (lemac == LeMAC2)) {
+ printk(" and an illegal IRQ line detected.\n");
+ } else {
+ printk(", but incorrect IRQ line detected.\n");
+ }
+ status = -ENXIO;
+ }
+
+ DISABLE_IRQs; /* Mask all interrupts */
+
+#endif /* MODULE */
+ } else {
+ printk(" and requires IRQ%d.\n", dev->irq);
+ }
+ }
+ if (status) release_region(iobase, EWRK3_TOTAL_SIZE);
+ } else {
+ status = -ENXIO;
+ }
+ }
+ }
+ } else {
+ status = -ENXIO;
+ }
+ }
+
+ if (!status) {
+ if (ewrk3_debug > 1) {
+ printk("%s", version);
+ }
+
+ /* The EWRK3-specific entries in the device structure. */
+ dev->open = &ewrk3_open;
+ dev->hard_start_xmit = &ewrk3_queue_pkt;
+ dev->stop = &ewrk3_close;
+ dev->get_stats = &ewrk3_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+ dev->do_ioctl = &ewrk3_ioctl;
+
+ dev->mem_start = 0;
+
+ /* Fill in the generic field of the device structure. */
+ ether_setup(dev);
+ }
+ } else {
+ status = -ENXIO;
+ }
+
+ return status;
+}
+
+
+static int
+ewrk3_open(struct device *dev)
+{
+ struct ewrk3_private *lp = (struct ewrk3_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int i, status = 0;
+ u_char icr, csr;
+
+ /*
+ ** Stop the TX and RX...
+ */
+ STOP_EWRK3;
+
+ if (!lp->hard_strapped) {
+ irq2dev_map[dev->irq] = dev; /* For latched interrupts */
+
+ if (request_irq(dev->irq, (void *)ewrk3_interrupt, 0, "ewrk3", NULL)) {
+ printk("ewrk3_open(): Requested IRQ%d is busy\n",dev->irq);
+ status = -EAGAIN;
+ } else {
+
+ /*
+ ** Re-initialize the EWRK3...
+ */
+ ewrk3_init(dev);
+
+ if (ewrk3_debug > 1){
+ printk("%s: ewrk3 open with irq %d\n",dev->name,dev->irq);
+ printk(" physical address: ");
+ for (i=0;i<5;i++){
+ printk("%2.2x:",(u_char)dev->dev_addr[i]);
+ }
+ printk("%2.2x\n",(u_char)dev->dev_addr[i]);
+ if (lp->shmem_length == 0) {
+ printk(" no shared memory, I/O only mode\n");
+ } else {
+ printk(" start of shared memory: 0x%08lx\n",lp->shmem_base);
+ printk(" window length: 0x%04lx\n",lp->shmem_length);
+ }
+ printk(" # of DRAMS: %d\n",((inb(EWRK3_CMR) & 0x02) ? 2 : 1));
+ printk(" csr: 0x%02x\n", inb(EWRK3_CSR));
+ printk(" cr: 0x%02x\n", inb(EWRK3_CR));
+ printk(" icr: 0x%02x\n", inb(EWRK3_ICR));
+ printk(" cmr: 0x%02x\n", inb(EWRK3_CMR));
+ printk(" fmqc: 0x%02x\n", inb(EWRK3_FMQC));
+ }
+
+ dev->tbusy = 0;
+ dev->start = 1;
+ dev->interrupt = UNMASK_INTERRUPTS;
+
+ /*
+ ** Unmask EWRK3 board interrupts
+ */
+ icr = inb(EWRK3_ICR);
+ ENABLE_IRQs;
+
+ }
+ } else {
+ dev->start = 0;
+ dev->tbusy = 1;
+ printk("%s: ewrk3 available for hard strapped set up only.\n", dev->name);
+ printk(" Run the 'ewrk3setup' utility or remove the hard straps.\n");
+ }
+
+ MOD_INC_USE_COUNT;
+
+ return status;
+}
+
+/*
+** Initialize the EtherWORKS 3 operating conditions
+*/
+static void
+ewrk3_init(struct device *dev)
+{
+ struct ewrk3_private *lp = (struct ewrk3_private *)dev->priv;
+ u_char csr, page;
+ u_long iobase = dev->base_addr;
+
+ /*
+ ** Enable any multicasts
+ */
+ set_multicast_list(dev);
+
+ /*
+ ** Clean out any remaining entries in all the queues here
+ */
+ while (inb(EWRK3_TQ));
+ while (inb(EWRK3_TDQ));
+ while (inb(EWRK3_RQ));
+ while (inb(EWRK3_FMQ));
+
+ /*
+ ** Write a clean free memory queue
+ */
+ for (page=1;page<lp->mPage;page++) { /* Write the free page numbers */
+ outb(page, EWRK3_FMQ); /* to the Free Memory Queue */
+ }
+
+ lp->lock = 0; /* Ensure there are no locks */
+
+ START_EWRK3; /* Enable the TX and/or RX */
+}
+
+/*
+** Writes a socket buffer to the free page queue
+*/
+static int
+ewrk3_queue_pkt(struct sk_buff *skb, struct device *dev)
+{
+ struct ewrk3_private *lp = (struct ewrk3_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int status = 0;
+ u_char icr, csr;
+
+ /* Transmitter timeout, serious problems. */
+ if (dev->tbusy || lp->lock) {
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < QUEUE_PKT_TIMEOUT) {
+ status = -1;
+ } else if (!lp->hard_strapped) {
+ printk("%s: transmit timed/locked out, status %04x, resetting.\n",
+ dev->name, inb(EWRK3_CSR));
+
+ /*
+ ** Mask all board interrupts
+ */
+ DISABLE_IRQs;
+
+ /*
+ ** Stop the TX and RX...
+ */
+ STOP_EWRK3;
+
+ ewrk3_init(dev);
+
+ /*
+ ** Unmask EWRK3 board interrupts
+ */
+ ENABLE_IRQs;
+
+ dev->tbusy=0;
+ dev->trans_start = jiffies;
+ dev_kfree_skb(skb, FREE_WRITE);
+ }
+ } else if (skb == NULL) {
+ dev_tint(dev);
+ } else if (skb->len > 0) {
+
+ /*
+ ** Block a timer-based transmit from overlapping. This could better be
+ ** done with atomic_swap(1, dev->tbusy), but set_bit() works as well.
+ */
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ printk("%s: Transmitter access conflict.\n", dev->name);
+
+ DISABLE_IRQs; /* So that the page # remains correct */
+
+ /*
+ ** Get a free page from the FMQ when resources are available
+ */
+ if (inb(EWRK3_FMQC) > 0) {
+ u_long buf = 0;
+ u_char page;
+
+ if ((page = inb(EWRK3_FMQ)) < lp->mPage) {
+ /*
+ ** Set up shared memory window and pointer into the window
+ */
+ while (set_bit(0, (void *)&lp->lock) != 0); /* Wait for lock to free */
+ if (lp->shmem_length == IO_ONLY) {
+ outb(page, EWRK3_IOPR);
+ } else if (lp->shmem_length == SHMEM_2K) {
+ buf = lp->shmem_base;
+ outb(page, EWRK3_MPR);
+ } else if (lp->shmem_length == SHMEM_32K) {
+ buf = ((((short)page << 11) & 0x7800) + lp->shmem_base);
+ outb((page >> 4), EWRK3_MPR);
+ } else if (lp->shmem_length == SHMEM_64K) {
+ buf = ((((short)page << 11) & 0xf800) + lp->shmem_base);
+ outb((page >> 5), EWRK3_MPR);
+ } else {
+ status = -1;
+ printk("%s: Oops - your private data area is hosed!\n",dev->name);
+ }
+
+ if (!status) {
+
+ /*
+ ** Set up the buffer control structures and copy the data from
+ ** the socket buffer to the shared memory .
+ */
+
+ if (lp->shmem_length == IO_ONLY) {
+ int i;
+ u_char *p = skb->data;
+
+ outb((char)(TCR_QMODE | TCR_PAD | TCR_IFC), EWRK3_DATA);
+ outb((char)(skb->len & 0xff), EWRK3_DATA);
+ outb((char)((skb->len >> 8) & 0xff), EWRK3_DATA);
+ outb((char)0x04, EWRK3_DATA);
+ for (i=0; i<skb->len; i++) {
+ outb(*p++, EWRK3_DATA);
+ }
+ outb(page, EWRK3_TQ); /* Start sending pkt */
+ } else {
+ writeb((char)(TCR_QMODE|TCR_PAD|TCR_IFC), (char *)buf);/* ctrl byte*/
+ buf+=1;
+ writeb((char)(skb->len & 0xff), (char *)buf);/* length (16 bit xfer)*/
+ buf+=1;
+ if (lp->txc) {
+ writeb((char)(((skb->len >> 8) & 0xff) | XCT), (char *)buf);
+ buf+=1;
+ writeb(0x04, (char *)buf); /* index byte */
+ buf+=1;
+ writeb(0x00, (char *)(buf + skb->len)); /* Write the XCT flag */
+ memcpy_toio(buf, skb->data, PRELOAD);/* Write PRELOAD bytes*/
+ outb(page, EWRK3_TQ); /* Start sending pkt */
+ memcpy_toio(buf+PRELOAD, skb->data+PRELOAD, skb->len-PRELOAD);
+ writeb(0xff, (char *)(buf + skb->len)); /* Write the XCT flag */
+ } else {
+ writeb((char)((skb->len >> 8) & 0xff), (char *)buf);
+ buf+=1;
+ writeb(0x04, (char *)buf); /* index byte */
+ buf+=1;
+ memcpy_toio((char *)buf, skb->data, skb->len);/* Write data bytes */
+ outb(page, EWRK3_TQ); /* Start sending pkt */
+ }
+ }
+
+ dev->trans_start = jiffies;
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ } else { /* return unused page to the free memory queue */
+ outb(page, EWRK3_FMQ);
+ }
+ lp->lock = 0; /* unlock the page register */
+ } else {
+ printk("ewrk3_queue_pkt(): Invalid free memory page (%d).\n",
+ (u_char) page);
+ }
+ } else {
+ printk("ewrk3_queue_pkt(): No free resources...\n");
+ printk("ewrk3_queue_pkt(): CSR: %02x ICR: %02x FMQC: %02x\n",inb(EWRK3_CSR),inb(EWRK3_ICR),inb(EWRK3_FMQC));
+ }
+
+ /* Check for free resources: clear 'tbusy' if there are some */
+ if (inb(EWRK3_FMQC) > 0) {
+ dev->tbusy = 0;
+ }
+
+ ENABLE_IRQs;
+ }
+
+ return status;
+}
+
+/*
+** The EWRK3 interrupt handler.
+*/
+static void
+ewrk3_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ struct ewrk3_private *lp;
+ u_long iobase;
+ u_char icr, cr, csr;
+
+ if (dev == NULL) {
+ printk ("ewrk3_interrupt(): irq %d for unknown device.\n", irq);
+ } else {
+ lp = (struct ewrk3_private *)dev->priv;
+ iobase = dev->base_addr;
+
+ if (dev->interrupt)
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+
+ dev->interrupt = MASK_INTERRUPTS;
+
+ /* get the interrupt information */
+ csr = inb(EWRK3_CSR);
+
+ /*
+ ** Mask the EWRK3 board interrupts and turn on the LED
+ */
+ DISABLE_IRQs;
+
+ cr = inb(EWRK3_CR);
+ cr |= CR_LED;
+ outb(cr, EWRK3_CR);
+
+ if (csr & CSR_RNE) /* Rx interrupt (packet[s] arrived) */
+ ewrk3_rx(dev);
+
+ if (csr & CSR_TNE) /* Tx interrupt (packet sent) */
+ ewrk3_tx(dev);
+
+ /*
+ ** Now deal with the TX/RX disable flags. These are set when there
+ ** are no more resources. If resources free up then enable these
+ ** interrupts, otherwise mask them - failure to do this will result
+ ** in the system hanging in an interrupt loop.
+ */
+ if (inb(EWRK3_FMQC)) { /* any resources available? */
+ lp->irq_mask |= ICR_TXDM|ICR_RXDM;/* enable the interrupt source */
+ csr &= ~(CSR_TXD|CSR_RXD);/* ensure restart of a stalled TX or RX */
+ outb(csr, EWRK3_CSR);
+ dev->tbusy = 0; /* clear TX busy flag */
+ mark_bh(NET_BH);
+ } else {
+ lp->irq_mask &= ~(ICR_TXDM|ICR_RXDM);/* disable the interrupt source */
+ }
+
+ /* Unmask the EWRK3 board interrupts and turn off the LED */
+ cr &= ~CR_LED;
+ outb(cr, EWRK3_CR);
+
+ dev->interrupt = UNMASK_INTERRUPTS;
+ ENABLE_IRQs;
+ }
+
+ return;
+}
+
+static int
+ewrk3_rx(struct device *dev)
+{
+ struct ewrk3_private *lp = (struct ewrk3_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ int i, status = 0;
+ u_char page, tmpPage = 0, tmpLock = 0;
+ u_long buf = 0;
+
+ while (inb(EWRK3_RQC) && !status) { /* Whilst there's incoming data */
+ if ((page = inb(EWRK3_RQ)) < lp->mPage) {/* Get next entry's buffer page */
+ /*
+ ** Preempt any process using the current page register. Check for
+ ** an existing lock to reduce time taken in I/O transactions.
+ */
+ if ((tmpLock = set_bit(0, (void *)&lp->lock)) == 1) { /* Assert lock */
+ if (lp->shmem_length == IO_ONLY) { /* Get existing page */
+ tmpPage = inb(EWRK3_IOPR);
+ } else {
+ tmpPage = inb(EWRK3_MPR);
+ }
+ }
+
+ /*
+ ** Set up shared memory window and pointer into the window
+ */
+ if (lp->shmem_length == IO_ONLY) {
+ outb(page, EWRK3_IOPR);
+ } else if (lp->shmem_length == SHMEM_2K) {
+ buf = lp->shmem_base;
+ outb(page, EWRK3_MPR);
+ } else if (lp->shmem_length == SHMEM_32K) {
+ buf = ((((short)page << 11) & 0x7800) + lp->shmem_base);
+ outb((page >> 4), EWRK3_MPR);
+ } else if (lp->shmem_length == SHMEM_64K) {
+ buf = ((((short)page << 11) & 0xf800) + lp->shmem_base);
+ outb((page >> 5), EWRK3_MPR);
+ } else {
+ status = -1;
+ printk("%s: Oops - your private data area is hosed!\n",dev->name);
+ }
+
+ if (!status) {
+ char rx_status;
+ int pkt_len;
+
+ if (lp->shmem_length == IO_ONLY) {
+ rx_status = inb(EWRK3_DATA);
+ pkt_len = inb(EWRK3_DATA);
+ pkt_len |= ((u_short)inb(EWRK3_DATA) << 8);
+ } else {
+ rx_status = readb(buf);
+ buf+=1;
+ pkt_len = readw(buf);
+ buf+=3;
+ }
+
+ if (!(rx_status & R_ROK)) { /* There was an error. */
+ lp->stats.rx_errors++; /* Update the error stats. */
+ if (rx_status & R_DBE) lp->stats.rx_frame_errors++;
+ if (rx_status & R_CRC) lp->stats.rx_crc_errors++;
+ if (rx_status & R_PLL) lp->stats.rx_fifo_errors++;
+ } else {
+ struct sk_buff *skb;
+
+ if ((skb = dev_alloc_skb(pkt_len+2)) != NULL) {
+ unsigned char *p;
+ skb->dev = dev;
+ skb_reserve(skb,2); /* Align to 16 bytes */
+ p = skb_put(skb,pkt_len);
+
+ if (lp->shmem_length == IO_ONLY) {
+ *p = inb(EWRK3_DATA); /* dummy read */
+ for (i=0; i<pkt_len; i++) {
+ *p++ = inb(EWRK3_DATA);
+ }
+ } else {
+ memcpy_fromio(p, buf, pkt_len);
+ }
+
+ /*
+ ** Notify the upper protocol layers that there is another
+ ** packet to handle
+ */
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+
+ /*
+ ** Update stats
+ */
+ lp->stats.rx_packets++;
+ for (i=1; i<EWRK3_PKT_STAT_SZ-1; i++) {
+ if (pkt_len < i*EWRK3_PKT_BIN_SZ) {
+ lp->pktStats.bins[i]++;
+ i = EWRK3_PKT_STAT_SZ;
+ }
+ }
+ p = skb->data; /* Look at the dest addr */
+ if (p[0] & 0x01) { /* Multicast/Broadcast */
+ if ((*(s32 *)&p[0] == -1) && (*(s16 *)&p[4] == -1)) {
+ lp->pktStats.broadcast++;
+ } else {
+ lp->pktStats.multicast++;
+ }
+ } else if ((*(s32 *)&p[0] == *(s32 *)&dev->dev_addr[0]) &&
+ (*(s16 *)&p[4] == *(s16 *)&dev->dev_addr[4])) {
+ lp->pktStats.unicast++;
+ }
+
+ lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
+ if (lp->pktStats.bins[0] == 0) { /* Reset counters */
+ memset(&lp->pktStats, 0, sizeof(lp->pktStats));
+ }
+ } else {
+ printk("%s: Insufficient memory; nuking packet.\n", dev->name);
+ lp->stats.rx_dropped++; /* Really, deferred. */
+ break;
+ }
+ }
+ }
+ /*
+ ** Return the received buffer to the free memory queue
+ */
+ outb(page, EWRK3_FMQ);
+
+ if (tmpLock) { /* If a lock was preempted */
+ if (lp->shmem_length == IO_ONLY) { /* Replace old page */
+ outb(tmpPage, EWRK3_IOPR);
+ } else {
+ outb(tmpPage, EWRK3_MPR);
+ }
+ }
+ lp->lock = 0; /* Unlock the page register */
+ } else {
+ printk("ewrk3_rx(): Illegal page number, page %d\n",page);
+ printk("ewrk3_rx(): CSR: %02x ICR: %02x FMQC: %02x\n",inb(EWRK3_CSR),inb(EWRK3_ICR),inb(EWRK3_FMQC));
+ }
+ }
+ return status;
+}
+
+/*
+** Buffer sent - check for TX buffer errors.
+*/
+static int
+ewrk3_tx(struct device *dev)
+{
+ struct ewrk3_private *lp = (struct ewrk3_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ u_char tx_status;
+
+ while ((tx_status = inb(EWRK3_TDQ)) > 0) { /* Whilst there's old buffers */
+ if (tx_status & T_VSTS) { /* The status is valid */
+ if (tx_status & T_TXE) {
+ lp->stats.tx_errors++;
+ if (tx_status & T_NCL) lp->stats.tx_carrier_errors++;
+ if (tx_status & T_LCL) lp->stats.tx_window_errors++;
+ if (tx_status & T_CTU) {
+ if ((tx_status & T_COLL) ^ T_XUR) {
+ lp->pktStats.tx_underruns++;
+ } else {
+ lp->pktStats.excessive_underruns++;
+ }
+ } else if (tx_status & T_COLL) {
+ if ((tx_status & T_COLL) ^ T_XCOLL) {
+ lp->stats.collisions++;
+ } else {
+ lp->pktStats.excessive_collisions++;
+ }
+ }
+ } else {
+ lp->stats.tx_packets++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+ewrk3_close(struct device *dev)
+{
+ struct ewrk3_private *lp = (struct ewrk3_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ u_char icr, csr;
+
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ if (ewrk3_debug > 1) {
+ printk("%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, inb(EWRK3_CSR));
+ }
+
+ /*
+ ** We stop the EWRK3 here... mask interrupts and stop TX & RX
+ */
+ DISABLE_IRQs;
+
+ STOP_EWRK3;
+
+ /*
+ ** Clean out the TX and RX queues here (note that one entry
+ ** may get added to either the TXD or RX queues if the TX or RX
+ ** just starts processing a packet before the STOP_EWRK3 command
+ ** is received. This will be flushed in the ewrk3_open() call).
+ */
+ while (inb(EWRK3_TQ));
+ while (inb(EWRK3_TDQ));
+ while (inb(EWRK3_RQ));
+
+ if (!lp->hard_strapped) {
+ free_irq(dev->irq, NULL);
+
+ irq2dev_map[dev->irq] = 0;
+ }
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static struct enet_statistics *
+ewrk3_get_stats(struct device *dev)
+{
+ struct ewrk3_private *lp = (struct ewrk3_private *)dev->priv;
+
+ /* Null body since there is no framing error counter */
+
+ return &lp->stats;
+}
+
+/*
+** Set or clear the multicast filter for this adapter.
+*/
+static void
+set_multicast_list(struct device *dev)
+{
+ struct ewrk3_private *lp = (struct ewrk3_private *)dev->priv;
+ u_long iobase = dev->base_addr;
+ u_char csr;
+
+ if (irq2dev_map[dev->irq] != NULL) {
+ csr = inb(EWRK3_CSR);
+
+ if (lp->shmem_length == IO_ONLY) {
+ lp->mctbl = (char *) PAGE0_HTE;
+ } else {
+ lp->mctbl = (char *)(lp->shmem_base + PAGE0_HTE);
+ }
+
+ csr &= ~(CSR_PME | CSR_MCE);
+ if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
+ csr |= CSR_PME;
+ outb(csr, EWRK3_CSR);
+ } else {
+ SetMulticastFilter(dev);
+ csr |= CSR_MCE;
+ outb(csr, EWRK3_CSR);
+ }
+ }
+}
+
+/*
+** Calculate the hash code and update the logical address filter
+** from a list of ethernet multicast addresses.
+** Little endian crc one liner from Matt Thomas, DEC.
+**
+** Note that when clearing the table, the broadcast bit must remain asserted
+** to receive broadcast messages.
+*/
+static void SetMulticastFilter(struct device *dev)
+{
+ struct ewrk3_private *lp = (struct ewrk3_private *)dev->priv;
+ struct dev_mc_list *dmi=dev->mc_list;
+ u_long iobase = dev->base_addr;
+ int i;
+ char *addrs, j, bit, byte;
+ short *p = (short *) lp->mctbl;
+ u16 hashcode;
+ s32 crc, poly = CRC_POLYNOMIAL_LE;
+
+ while (set_bit(0, (void *)&lp->lock) != 0); /* Wait for lock to free */
+
+ if (lp->shmem_length == IO_ONLY) {
+ outb(0, EWRK3_IOPR);
+ outw(EEPROM_OFFSET(lp->mctbl), EWRK3_PIR1);
+ } else {
+ outb(0, EWRK3_MPR);
+ }
+
+ if (dev->flags & IFF_ALLMULTI) {
+ for (i=0; i<(HASH_TABLE_LEN >> 3); i++) {
+ if (lp->shmem_length == IO_ONLY) {
+ outb(0xff, EWRK3_DATA);
+ } else { /* memset didn't work here */
+ writew(0xffff, p);
+ p++; i++;
+ }
+ }
+ } else {
+ /* Clear table except for broadcast bit */
+ if (lp->shmem_length == IO_ONLY) {
+ for (i=0; i<(HASH_TABLE_LEN >> 4) - 1; i++) {
+ outb(0x00, EWRK3_DATA);
+ }
+ outb(0x80, EWRK3_DATA); i++; /* insert the broadcast bit */
+ for (; i<(HASH_TABLE_LEN >> 3); i++) {
+ outb(0x00, EWRK3_DATA);
+ }
+ } else {
+ memset_io(lp->mctbl, 0, (HASH_TABLE_LEN >> 3));
+ writeb(0x80, (char *)(lp->mctbl + (HASH_TABLE_LEN >> 4) - 1));
+ }
+
+ /* Update table */
+ for (i=0;i<dev->mc_count;i++) { /* for each address in the list */
+ addrs=dmi->dmi_addr;
+ dmi=dmi->next;
+ if ((*addrs & 0x01) == 1) { /* multicast address? */
+ crc = 0xffffffff; /* init CRC for each address */
+ for (byte=0;byte<ETH_ALEN;byte++) { /* for each address byte */
+ /* process each address bit */
+ for (bit = *addrs++,j=0;j<8;j++, bit>>=1) {
+ crc = (crc >> 1) ^ (((crc ^ bit) & 0x01) ? poly : 0);
+ }
+ }
+ hashcode = crc & ((1 << 9) - 1); /* hashcode is 9 LSb of CRC */
+
+ byte = hashcode >> 3; /* bit[3-8] -> byte in filter */
+ bit = 1 << (hashcode & 0x07); /* bit[0-2] -> bit in byte */
+
+ if (lp->shmem_length == IO_ONLY) {
+ u_char tmp;
+
+ outw((short)((long)lp->mctbl) + byte, EWRK3_PIR1);
+ tmp = inb(EWRK3_DATA);
+ tmp |= bit;
+ outw((short)((long)lp->mctbl) + byte, EWRK3_PIR1);
+ outb(tmp, EWRK3_DATA);
+ } else {
+ writeb(readb(lp->mctbl + byte) | bit, lp->mctbl + byte);
+ }
+ }
+ }
+ }
+
+ lp->lock = 0; /* Unlock the page register */
+
+ return;
+}
+
+/*
+** ISA bus I/O device probe
+*/
+static void isa_probe(struct device *dev, u_long ioaddr)
+{
+ int i = num_ewrk3s, maxSlots;
+ u_long iobase;
+
+ if (!ioaddr && autoprobed) return ; /* Been here before ! */
+ if (ioaddr >= 0x400) return; /* Not ISA */
+
+ if (ioaddr == 0) { /* Autoprobing */
+ iobase = EWRK3_IO_BASE; /* Get the first slot address */
+ maxSlots = 24;
+ } else { /* Probe a specific location */
+ iobase = ioaddr;
+ maxSlots = i + 1;
+ }
+
+ for (; (i<maxSlots) && (dev!=NULL);iobase+=EWRK3_IOP_INC, i++) {
+ if (!check_region(iobase, EWRK3_TOTAL_SIZE)) {
+ if (DevicePresent(iobase) == 0) {
+ if ((dev = alloc_device(dev, iobase)) != NULL) {
+ if (ewrk3_hw_init(dev, iobase) == 0) {
+ num_ewrk3s++;
+ }
+ num_eth++;
+ }
+ }
+ } else if (autoprobed) {
+ printk("%s: region already allocated at 0x%04lx.\n", dev->name, iobase);
+ }
+ }
+
+ return;
+}
+
+/*
+** EISA bus I/O device probe. Probe from slot 1 since slot 0 is usually
+** the motherboard.
+*/
+static void eisa_probe(struct device *dev, u_long ioaddr)
+{
+ int i, maxSlots;
+ u_long iobase;
+ char name[EWRK3_STRLEN];
+
+ if (!ioaddr && autoprobed) return ; /* Been here before ! */
+ if (ioaddr < 0x1000) return; /* Not EISA */
+
+ if (ioaddr == 0) { /* Autoprobing */
+ iobase = EISA_SLOT_INC; /* Get the first slot address */
+ i = 1;
+ maxSlots = MAX_EISA_SLOTS;
+ } else { /* Probe a specific location */
+ iobase = ioaddr;
+ i = (ioaddr >> 12);
+ maxSlots = i + 1;
+ }
+
+ for (i=1; (i<maxSlots) && (dev!=NULL); i++, iobase+=EISA_SLOT_INC) {
+ if (EISA_signature(name, EISA_ID) == 0) {
+ if (!check_region(iobase, EWRK3_TOTAL_SIZE)) {
+ if (DevicePresent(iobase) == 0) {
+ if ((dev = alloc_device(dev, iobase)) != NULL) {
+ if (ewrk3_hw_init(dev, iobase) == 0) {
+ num_ewrk3s++;
+ }
+ num_eth++;
+ }
+ }
+ } else if (autoprobed) {
+ printk("%s: region already allocated at 0x%04lx.\n", dev->name, iobase);
+ }
+ }
+ }
+
+ return;
+}
+
+/*
+** Search the entire 'eth' device list for a fixed probe. If a match isn't
+** found then check for an autoprobe or unused device location. If they
+** are not available then insert a new device structure at the end of
+** the current list.
+*/
+static struct device *
+alloc_device(struct device *dev, u_long iobase)
+{
+ struct device *adev = NULL;
+ int fixed = 0, new_dev = 0;
+
+ num_eth = ewrk3_dev_index(dev->name);
+ if (loading_module) return dev;
+
+ while (1) {
+ if (((dev->base_addr == EWRK3_NDA) || (dev->base_addr==0)) && !adev) {
+ adev=dev;
+ } else if ((dev->priv == NULL) && (dev->base_addr==iobase)) {
+ fixed = 1;
+ } else {
+ if (dev->next == NULL) {
+ new_dev = 1;
+ } else if (strncmp(dev->next->name, "eth", 3) != 0) {
+ new_dev = 1;
+ }
+ }
+ if ((dev->next == NULL) || new_dev || fixed) break;
+ dev = dev->next;
+ num_eth++;
+ }
+ if (adev && !fixed) {
+ dev = adev;
+ num_eth = ewrk3_dev_index(dev->name);
+ new_dev = 0;
+ }
+
+ if (((dev->next == NULL) &&
+ ((dev->base_addr != EWRK3_NDA) && (dev->base_addr != 0)) && !fixed) ||
+ new_dev) {
+ num_eth++; /* New device */
+ dev = insert_device(dev, iobase, ewrk3_probe);
+ }
+
+ return dev;
+}
+
+/*
+** If at end of eth device list and can't use current entry, malloc
+** one up. If memory could not be allocated, print an error message.
+*/
+static struct device *
+insert_device(struct device *dev, u_long iobase, int (*init)(struct device *))
+{
+ struct device *new;
+
+ new = (struct device *)kmalloc(sizeof(struct device)+8, GFP_KERNEL);
+ if (new == NULL) {
+ printk("eth%d: Device not initialised, insufficient memory\n",num_eth);
+ return NULL;
+ } else {
+ new->next = dev->next;
+ dev->next = new;
+ dev = dev->next; /* point to the new device */
+ dev->name = (char *)(dev + 1);
+ if (num_eth > 9999) {
+ sprintf(dev->name,"eth????");/* New device name */
+ } else {
+ sprintf(dev->name,"eth%d", num_eth);/* New device name */
+ }
+ dev->base_addr = iobase; /* assign the io address */
+ dev->init = init; /* initialisation routine */
+ }
+
+ return dev;
+}
+
+static int
+ewrk3_dev_index(char *s)
+{
+ int i=0, j=0;
+
+ for (;*s; s++) {
+ if (isdigit(*s)) {
+ j=1;
+ i = (i * 10) + (*s - '0');
+ } else if (j) break;
+ }
+
+ return i;
+}
+
+/*
+** Read the EWRK3 EEPROM using this routine
+*/
+static int Read_EEPROM(u_long iobase, u_char eaddr)
+{
+ int i;
+
+ outb((eaddr & 0x3f), EWRK3_PIR1); /* set up 6 bits of address info */
+ outb(EEPROM_RD, EWRK3_IOPR); /* issue read command */
+ for (i=0;i<5000;i++) inb(EWRK3_CSR); /* wait 1msec */
+
+ return inw(EWRK3_EPROM1); /* 16 bits data return */
+}
+
+/*
+** Write the EWRK3 EEPROM using this routine
+*/
+static int Write_EEPROM(short data, u_long iobase, u_char eaddr)
+{
+ int i;
+
+ outb(EEPROM_WR_EN, EWRK3_IOPR); /* issue write enable command */
+ for (i=0;i<5000;i++) inb(EWRK3_CSR); /* wait 1msec */
+ outw(data, EWRK3_EPROM1); /* write data to register */
+ outb((eaddr & 0x3f), EWRK3_PIR1); /* set up 6 bits of address info */
+ outb(EEPROM_WR, EWRK3_IOPR); /* issue write command */
+ for (i=0;i<75000;i++) inb(EWRK3_CSR); /* wait 15msec */
+ outb(EEPROM_WR_DIS, EWRK3_IOPR); /* issue write disable command */
+ for (i=0;i<5000;i++) inb(EWRK3_CSR); /* wait 1msec */
+
+ return 0;
+}
+
+/*
+** Look for a particular board name in the on-board EEPROM.
+*/
+static void EthwrkSignature(char *name, char *eeprom_image)
+{
+ u_long i,j,k;
+ char *signatures[] = EWRK3_SIGNATURE;
+
+ strcpy(name, "");
+ for (i=0;*signatures[i] != '\0' && *name == '\0';i++) {
+ for (j=EEPROM_PNAME7,k=0;j<=EEPROM_PNAME0 && k<strlen(signatures[i]);j++) {
+ if (signatures[i][k] == eeprom_image[j]) { /* track signature */
+ k++;
+ } else { /* lost signature; begin search again */
+ k=0;
+ }
+ }
+ if (k == strlen(signatures[i])) {
+ for (k=0; k<EWRK3_STRLEN; k++) {
+ name[k] = eeprom_image[EEPROM_PNAME7 + k];
+ name[EWRK3_STRLEN] = '\0';
+ }
+ }
+ }
+
+ return; /* return the device name string */
+}
+
+/*
+** Look for a special sequence in the Ethernet station address PROM that
+** is common across all EWRK3 products.
+**
+** Search the Ethernet address ROM for the signature. Since the ROM address
+** counter can start at an arbitrary point, the search must include the entire
+** probe sequence length plus the (length_of_the_signature - 1).
+** Stop the search IMMEDIATELY after the signature is found so that the
+** PROM address counter is correctly positioned at the start of the
+** ethernet address for later read out.
+*/
+
+static int DevicePresent(u_long iobase)
+{
+ union {
+ struct {
+ u32 a;
+ u32 b;
+ } llsig;
+ char Sig[sizeof(u32) << 1];
+ } dev;
+ short sigLength;
+ char data;
+ int i, j, status = 0;
+
+ dev.llsig.a = ETH_PROM_SIG;
+ dev.llsig.b = ETH_PROM_SIG;
+ sigLength = sizeof(u32) << 1;
+
+ for (i=0,j=0;j<sigLength && i<PROBE_LENGTH+sigLength-1;i++) {
+ data = inb(EWRK3_APROM);
+ if (dev.Sig[j] == data) { /* track signature */
+ j++;
+ } else { /* lost signature; begin search again */
+ if (data == dev.Sig[0]) {
+ j=1;
+ } else {
+ j=0;
+ }
+ }
+ }
+
+ if (j!=sigLength) {
+ status = -ENODEV; /* search failed */
+ }
+
+ return status;
+}
+
+static u_char get_hw_addr(struct device *dev, u_char *eeprom_image, char chipType)
+{
+ int i, j, k;
+ u_short chksum;
+ u_char crc, lfsr, sd, status = 0;
+ u_long iobase = dev->base_addr;
+ u16 tmp;
+
+ if (chipType == LeMAC2) {
+ for (crc=0x6a, j=0; j<ETH_ALEN; j++) {
+ sd = dev->dev_addr[j] = eeprom_image[EEPROM_PADDR0 + j];
+ outb(dev->dev_addr[j], EWRK3_PAR0 + j);
+ for (k=0; k<8; k++, sd >>= 1) {
+ lfsr = ((((crc & 0x02) >> 1) ^ (crc & 0x01)) ^ (sd & 0x01)) << 7;
+ crc = (crc >> 1) + lfsr;
+ }
+ }
+ if (crc != eeprom_image[EEPROM_PA_CRC]) status = -1;
+ } else {
+ for (i=0,k=0;i<ETH_ALEN;) {
+ k <<= 1 ;
+ if (k > 0xffff) k-=0xffff;
+
+ k += (u_char) (tmp = inb(EWRK3_APROM));
+ dev->dev_addr[i] = (u_char) tmp;
+ outb(dev->dev_addr[i], EWRK3_PAR0 + i);
+ i++;
+ k += (u_short) ((tmp = inb(EWRK3_APROM)) << 8);
+ dev->dev_addr[i] = (u_char) tmp;
+ outb(dev->dev_addr[i], EWRK3_PAR0 + i);
+ i++;
+
+ if (k > 0xffff) k-=0xffff;
+ }
+ if (k == 0xffff) k=0;
+ chksum = inb(EWRK3_APROM);
+ chksum |= (inb(EWRK3_APROM)<<8);
+ if (k != chksum) status = -1;
+ }
+
+ return status;
+}
+
+/*
+** Look for a particular board name in the EISA configuration space
+*/
+static int EISA_signature(char *name, s32 eisa_id)
+{
+ u_long i;
+ char *signatures[] = EWRK3_SIGNATURE;
+ char ManCode[EWRK3_STRLEN];
+ union {
+ s32 ID;
+ char Id[4];
+ } Eisa;
+ int status = 0;
+
+ *name = '\0';
+ for (i=0; i<4; i++) {
+ Eisa.Id[i] = inb(eisa_id + i);
+ }
+
+ ManCode[0]=(((Eisa.Id[0]>>2)&0x1f)+0x40);
+ ManCode[1]=(((Eisa.Id[1]&0xe0)>>5)+((Eisa.Id[0]&0x03)<<3)+0x40);
+ ManCode[2]=(((Eisa.Id[2]>>4)&0x0f)+0x30);
+ ManCode[3]=((Eisa.Id[2]&0x0f)+0x30);
+ ManCode[4]=(((Eisa.Id[3]>>4)&0x0f)+0x30);
+ ManCode[5]='\0';
+
+ for (i=0;(*signatures[i] != '\0') && (*name == '\0');i++) {
+ if (strstr(ManCode, signatures[i]) != NULL) {
+ strcpy(name,ManCode);
+ status = 1;
+ }
+ }
+
+ return status; /* return the device name string */
+}
+
+/*
+** Perform IOCTL call functions here. Some are privileged operations and the
+** effective uid is checked in those cases.
+*/
+static int ewrk3_ioctl(struct device *dev, struct ifreq *rq, int cmd)
+{
+ struct ewrk3_private *lp = (struct ewrk3_private *)dev->priv;
+ struct ewrk3_ioctl *ioc = (struct ewrk3_ioctl *) &rq->ifr_data;
+ u_long iobase = dev->base_addr;
+ int i, j, status = 0;
+ u_char csr;
+ union {
+ u_char addr[HASH_TABLE_LEN * ETH_ALEN];
+ u_short val[(HASH_TABLE_LEN * ETH_ALEN) >> 1];
+ } tmp;
+
+ switch(ioc->cmd) {
+ case EWRK3_GET_HWADDR: /* Get the hardware address */
+ for (i=0; i<ETH_ALEN; i++) {
+ tmp.addr[i] = dev->dev_addr[i];
+ }
+ ioc->len = ETH_ALEN;
+ if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len))) {
+ memcpy_tofs(ioc->data, tmp.addr, ioc->len);
+ }
+
+ break;
+ case EWRK3_SET_HWADDR: /* Set the hardware address */
+ if (suser()) {
+ if (!(status = verify_area(VERIFY_READ, (void *)ioc->data, ETH_ALEN))) {
+ csr = inb(EWRK3_CSR);
+ csr |= (CSR_TXD|CSR_RXD);
+ outb(csr, EWRK3_CSR); /* Disable the TX and RX */
+
+ memcpy_fromfs(tmp.addr,ioc->data,ETH_ALEN);
+ for (i=0; i<ETH_ALEN; i++) {
+ dev->dev_addr[i] = tmp.addr[i];
+ outb(tmp.addr[i], EWRK3_PAR0 + i);
+ }
+
+ csr &= ~(CSR_TXD|CSR_RXD); /* Enable the TX and RX */
+ outb(csr, EWRK3_CSR);
+ }
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_SET_PROM: /* Set Promiscuous Mode */
+ if (suser()) {
+ csr = inb(EWRK3_CSR);
+ csr |= CSR_PME;
+ csr &= ~CSR_MCE;
+ outb(csr, EWRK3_CSR);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_CLR_PROM: /* Clear Promiscuous Mode */
+ if (suser()) {
+ csr = inb(EWRK3_CSR);
+ csr &= ~CSR_PME;
+ outb(csr, EWRK3_CSR);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_SAY_BOO: /* Say "Boo!" to the kernel log file */
+ printk("%s: Boo!\n", dev->name);
+
+ break;
+ case EWRK3_GET_MCA: /* Get the multicast address table */
+ if (!(status = verify_area(VERIFY_WRITE, ioc->data, ioc->len))) {
+ while (set_bit(0, (void *)&lp->lock) != 0); /* Wait for lock to free */
+ if (lp->shmem_length == IO_ONLY) {
+ outb(0, EWRK3_IOPR);
+ outw(PAGE0_HTE, EWRK3_PIR1);
+ for (i=0; i<(HASH_TABLE_LEN >> 3); i++) {
+ tmp.addr[i] = inb(EWRK3_DATA);
+ }
+ } else {
+ outb(0, EWRK3_MPR);
+ memcpy_fromio(tmp.addr, (char *)(lp->shmem_base + PAGE0_HTE), (HASH_TABLE_LEN >> 3));
+ }
+ ioc->len = (HASH_TABLE_LEN >> 3);
+ memcpy_tofs(ioc->data, tmp.addr, ioc->len);
+ }
+ lp->lock = 0; /* Unlock the page register */
+
+ break;
+ case EWRK3_SET_MCA: /* Set a multicast address */
+ if (suser()) {
+ if (!(status=verify_area(VERIFY_READ, ioc->data, ETH_ALEN*ioc->len))) {
+ memcpy_fromfs(tmp.addr, ioc->data, ETH_ALEN * ioc->len);
+ set_multicast_list(dev);
+ }
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_CLR_MCA: /* Clear all multicast addresses */
+ if (suser()) {
+ set_multicast_list(dev);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_MCA_EN: /* Enable multicast addressing */
+ if (suser()) {
+ csr = inb(EWRK3_CSR);
+ csr |= CSR_MCE;
+ csr &= ~CSR_PME;
+ outb(csr, EWRK3_CSR);
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_GET_STATS: /* Get the driver statistics */
+ cli();
+ ioc->len = sizeof(lp->pktStats);
+ if (!(status=verify_area(VERIFY_WRITE, ioc->data, ioc->len))) {
+ memcpy_tofs(ioc->data, &lp->pktStats, ioc->len);
+ }
+ sti();
+
+ break;
+ case EWRK3_CLR_STATS: /* Zero out the driver statistics */
+ if (suser()) {
+ cli();
+ memset(&lp->pktStats, 0, sizeof(lp->pktStats));
+ sti();
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_GET_CSR: /* Get the CSR Register contents */
+ tmp.addr[0] = inb(EWRK3_CSR);
+ ioc->len = 1;
+ if (!(status=verify_area(VERIFY_WRITE, ioc->data, ioc->len))) {
+ memcpy_tofs(ioc->data, tmp.addr, ioc->len);
+ }
+
+ break;
+ case EWRK3_SET_CSR: /* Set the CSR Register contents */
+ if (suser()) {
+ if (!(status=verify_area(VERIFY_READ, ioc->data, 1))) {
+ memcpy_fromfs(tmp.addr, ioc->data, 1);
+ outb(tmp.addr[0], EWRK3_CSR);
+ }
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_GET_EEPROM: /* Get the EEPROM contents */
+ if (suser()) {
+ for (i=0; i<(EEPROM_MAX>>1); i++) {
+ tmp.val[i] = (short)Read_EEPROM(iobase, i);
+ }
+ i = EEPROM_MAX;
+ tmp.addr[i++] = inb(EWRK3_CMR); /* Config/Management Reg. */
+ for (j=0;j<ETH_ALEN;j++) {
+ tmp.addr[i++] = inb(EWRK3_PAR0 + j);
+ }
+ ioc->len = EEPROM_MAX + 1 + ETH_ALEN;
+ if (!(status=verify_area(VERIFY_WRITE, ioc->data, ioc->len))) {
+ memcpy_tofs(ioc->data, tmp.addr, ioc->len);
+ }
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_SET_EEPROM: /* Set the EEPROM contents */
+ if (suser()) {
+ if (!(status=verify_area(VERIFY_READ, ioc->data, EEPROM_MAX))) {
+ memcpy_fromfs(tmp.addr, ioc->data, EEPROM_MAX);
+ for (i=0; i<(EEPROM_MAX>>1); i++) {
+ Write_EEPROM(tmp.val[i], iobase, i);
+ }
+ }
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_GET_CMR: /* Get the CMR Register contents */
+ tmp.addr[0] = inb(EWRK3_CMR);
+ ioc->len = 1;
+ if (!(status=verify_area(VERIFY_WRITE, ioc->data, ioc->len))) {
+ memcpy_tofs(ioc->data, tmp.addr, ioc->len);
+ }
+
+ break;
+ case EWRK3_SET_TX_CUT_THRU: /* Set TX cut through mode */
+ if (suser()) {
+ lp->txc = 1;
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ case EWRK3_CLR_TX_CUT_THRU: /* Clear TX cut through mode */
+ if (suser()) {
+ lp->txc = 0;
+ } else {
+ status = -EPERM;
+ }
+
+ break;
+ default:
+ status = -EOPNOTSUPP;
+ }
+
+ return status;
+}
+
+#ifdef MODULE
+static char devicename[9] = { 0, };
+static struct device thisEthwrk = {
+ devicename, /* device name is inserted by /linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0x300, 5, /* I/O address, IRQ */
+ 0, 0, 0, NULL, ewrk3_probe };
+
+static int io=0x300; /* <--- EDIT THESE LINES FOR YOUR CONFIGURATION */
+static int irq=5; /* or use the insmod io= irq= options */
+
+int
+init_module(void)
+{
+ thisEthwrk.base_addr=io;
+ thisEthwrk.irq=irq;
+ if (register_netdev(&thisEthwrk) != 0)
+ return -EIO;
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ if (thisEthwrk.priv) {
+ kfree(thisEthwrk.priv);
+ thisEthwrk.priv = NULL;
+ }
+ thisEthwrk.irq = 0;
+
+ unregister_netdev(&thisEthwrk);
+ release_region(thisEthwrk.base_addr, EWRK3_TOTAL_SIZE);
+}
+#endif /* MODULE */
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/linux/include -Wall -Wstrict-prototypes -fomit-frame-pointer -fno-strength-reduce -malign-loops=2 -malign-jumps=2 -malign-functions=2 -O2 -m486 -c ewrk3.c"
+ *
+ * compile-command: "gcc -D__KERNEL__ -DMODULE -I/linux/include -Wall -Wstrict-prototypes -fomit-frame-pointer -fno-strength-reduce -malign-loops=2 -malign-jumps=2 -malign-functions=2 -O2 -m486 -c ewrk3.c"
+ * End:
+ */
+
diff --git a/linux/src/drivers/net/ewrk3.h b/linux/src/drivers/net/ewrk3.h
new file mode 100644
index 0000000..554a18a
--- /dev/null
+++ b/linux/src/drivers/net/ewrk3.h
@@ -0,0 +1,322 @@
+/*
+ Written 1994 by David C. Davies.
+
+ Copyright 1994 Digital Equipment Corporation.
+
+ This software may be used and distributed according to the terms of the
+ GNU Public License, incorporated herein by reference.
+
+ The author may be reached as davies@wanton.lkg.dec.com or Digital
+ Equipment Corporation, 550 King Street, Littleton MA 01460.
+
+ =========================================================================
+*/
+
+/*
+** I/O Address Register Map
+*/
+#define EWRK3_CSR iobase+0x00 /* Control and Status Register */
+#define EWRK3_CR iobase+0x01 /* Control Register */
+#define EWRK3_ICR iobase+0x02 /* Interrupt Control Register */
+#define EWRK3_TSR iobase+0x03 /* Transmit Status Register */
+#define EWRK3_RSVD1 iobase+0x04 /* RESERVED */
+#define EWRK3_RSVD2 iobase+0x05 /* RESERVED */
+#define EWRK3_FMQ iobase+0x06 /* Free Memory Queue */
+#define EWRK3_FMQC iobase+0x07 /* Free Memory Queue Counter */
+#define EWRK3_RQ iobase+0x08 /* Receive Queue */
+#define EWRK3_RQC iobase+0x09 /* Receive Queue Counter */
+#define EWRK3_TQ iobase+0x0a /* Transmit Queue */
+#define EWRK3_TQC iobase+0x0b /* Transmit Queue Counter */
+#define EWRK3_TDQ iobase+0x0c /* Transmit Done Queue */
+#define EWRK3_TDQC iobase+0x0d /* Transmit Done Queue Counter */
+#define EWRK3_PIR1 iobase+0x0e /* Page Index Register 1 */
+#define EWRK3_PIR2 iobase+0x0f /* Page Index Register 2 */
+#define EWRK3_DATA iobase+0x10 /* Data Register */
+#define EWRK3_IOPR iobase+0x11 /* I/O Page Register */
+#define EWRK3_IOBR iobase+0x12 /* I/O Base Register */
+#define EWRK3_MPR iobase+0x13 /* Memory Page Register */
+#define EWRK3_MBR iobase+0x14 /* Memory Base Register */
+#define EWRK3_APROM iobase+0x15 /* Address PROM */
+#define EWRK3_EPROM1 iobase+0x16 /* EEPROM Data Register 1 */
+#define EWRK3_EPROM2 iobase+0x17 /* EEPROM Data Register 2 */
+#define EWRK3_PAR0 iobase+0x18 /* Physical Address Register 0 */
+#define EWRK3_PAR1 iobase+0x19 /* Physical Address Register 1 */
+#define EWRK3_PAR2 iobase+0x1a /* Physical Address Register 2 */
+#define EWRK3_PAR3 iobase+0x1b /* Physical Address Register 3 */
+#define EWRK3_PAR4 iobase+0x1c /* Physical Address Register 4 */
+#define EWRK3_PAR5 iobase+0x1d /* Physical Address Register 5 */
+#define EWRK3_CMR iobase+0x1e /* Configuration/Management Register */
+
+/*
+** Control Page Map
+*/
+#define PAGE0_FMQ 0x000 /* Free Memory Queue */
+#define PAGE0_RQ 0x080 /* Receive Queue */
+#define PAGE0_TQ 0x100 /* Transmit Queue */
+#define PAGE0_TDQ 0x180 /* Transmit Done Queue */
+#define PAGE0_HTE 0x200 /* Hash Table Entries */
+#define PAGE0_RSVD 0x240 /* RESERVED */
+#define PAGE0_USRD 0x600 /* User Data */
+
+/*
+** Control and Status Register bit definitions (EWRK3_CSR)
+*/
+#define CSR_RA 0x80 /* Runt Accept */
+#define CSR_PME 0x40 /* Promiscuous Mode Enable */
+#define CSR_MCE 0x20 /* Multicast Enable */
+#define CSR_TNE 0x08 /* TX Done Queue Not Empty */
+#define CSR_RNE 0x04 /* RX Queue Not Empty */
+#define CSR_TXD 0x02 /* TX Disable */
+#define CSR_RXD 0x01 /* RX Disable */
+
+/*
+** Control Register bit definitions (EWRK3_CR)
+*/
+#define CR_APD 0x80 /* Auto Port Disable */
+#define CR_PSEL 0x40 /* Port Select (0->TP port) */
+#define CR_LBCK 0x20 /* LoopBaCK enable */
+#define CR_FDUP 0x10 /* Full DUPlex enable */
+#define CR_FBUS 0x08 /* Fast BUS enable (ISA clk > 8.33MHz) */
+#define CR_EN_16 0x04 /* ENable 16 bit memory accesses */
+#define CR_LED 0x02 /* LED (1-> turn on) */
+
+/*
+** Interrupt Control Register bit definitions (EWRK3_ICR)
+*/
+#define ICR_IE 0x80 /* Interrupt Enable */
+#define ICR_IS 0x60 /* Interrupt Selected */
+#define ICR_TNEM 0x08 /* TNE Mask (0->mask) */
+#define ICR_RNEM 0x04 /* RNE Mask (0->mask) */
+#define ICR_TXDM 0x02 /* TXD Mask (0->mask) */
+#define ICR_RXDM 0x01 /* RXD Mask (0->mask) */
+
+/*
+** Transmit Status Register bit definitions (EWRK3_TSR)
+*/
+#define TSR_NCL 0x80 /* No Carrier Loopback */
+#define TSR_ID 0x40 /* Initially Deferred */
+#define TSR_LCL 0x20 /* Late CoLlision */
+#define TSR_ECL 0x10 /* Excessive CoLlisions */
+#define TSR_RCNTR 0x0f /* Retries CouNTeR */
+
+/*
+** I/O Page Register bit definitions (EWRK3_IOPR)
+*/
+#define EEPROM_INIT 0xc0 /* EEPROM INIT command */
+#define EEPROM_WR_EN 0xc8 /* EEPROM WRITE ENABLE command */
+#define EEPROM_WR 0xd0 /* EEPROM WRITE command */
+#define EEPROM_WR_DIS 0xd8 /* EEPROM WRITE DISABLE command */
+#define EEPROM_RD 0xe0 /* EEPROM READ command */
+
+/*
+** I/O Base Register bit definitions (EWRK3_IOBR)
+*/
+#define EISA_REGS_EN 0x20 /* Enable EISA ID and Control Registers */
+#define EISA_IOB 0x1f /* Compare bits for I/O Base Address */
+
+/*
+** I/O Configuration/Management Register bit definitions (EWRK3_CMR)
+*/
+#define CMR_RA 0x80 /* Read Ahead */
+#define CMR_WB 0x40 /* Write Behind */
+#define CMR_LINK 0x20 /* 0->TP */
+#define CMR_POLARITY 0x10 /* Informational */
+#define CMR_NO_EEPROM 0x0c /* NO_EEPROM<1:0> pin status */
+#define CMR_HS 0x08 /* Hard Strapped pin status (LeMAC2) */
+#define CMR_PNP 0x04 /* Plug 'n Play */
+#define CMR_DRAM 0x02 /* 0-> 1DRAM, 1-> 2 DRAM on board */
+#define CMR_0WS 0x01 /* Zero Wait State */
+
+/*
+** MAC Receive Status Register bit definitions
+*/
+
+#define R_ROK 0x80 /* Receive OK summary */
+#define R_IAM 0x10 /* Individual Address Match */
+#define R_MCM 0x08 /* MultiCast Match */
+#define R_DBE 0x04 /* Dribble Bit Error */
+#define R_CRC 0x02 /* CRC error */
+#define R_PLL 0x01 /* Phase Lock Lost */
+
+/*
+** MAC Transmit Control Register bit definitions
+*/
+
+#define TCR_SQEE 0x40 /* SQE Enable - look for heartbeat */
+#define TCR_SED 0x20 /* Stop when Error Detected */
+#define TCR_QMODE 0x10 /* Q_MODE */
+#define TCR_LAB 0x08 /* Less Aggressive Backoff */
+#define TCR_PAD 0x04 /* PAD Runt Packets */
+#define TCR_IFC 0x02 /* Insert Frame Check */
+#define TCR_ISA 0x01 /* Insert Source Address */
+
+/*
+** MAC Transmit Status Register bit definitions
+*/
+
+#define T_VSTS 0x80 /* Valid STatuS */
+#define T_CTU 0x40 /* Cut Through Used */
+#define T_SQE 0x20 /* Signal Quality Error */
+#define T_NCL 0x10 /* No Carrier Loopback */
+#define T_LCL 0x08 /* Late Collision */
+#define T_ID 0x04 /* Initially Deferred */
+#define T_COLL 0x03 /* COLLision status */
+#define T_XCOLL 0x03 /* Excessive Collisions */
+#define T_MCOLL 0x02 /* Multiple Collisions */
+#define T_OCOLL 0x01 /* One Collision */
+#define T_NOCOLL 0x00 /* No Collisions */
+#define T_XUR 0x03 /* Excessive Underruns */
+#define T_TXE 0x7f /* TX Errors */
+
+/*
+** EISA Configuration Register bit definitions
+*/
+
+#define EISA_ID iobase + 0x0c80 /* EISA ID Registers */
+#define EISA_ID0 iobase + 0x0c80 /* EISA ID Register 0 */
+#define EISA_ID1 iobase + 0x0c81 /* EISA ID Register 1 */
+#define EISA_ID2 iobase + 0x0c82 /* EISA ID Register 2 */
+#define EISA_ID3 iobase + 0x0c83 /* EISA ID Register 3 */
+#define EISA_CR iobase + 0x0c84 /* EISA Control Register */
+
+/*
+** EEPROM BYTES
+*/
+#define EEPROM_MEMB 0x00
+#define EEPROM_IOB 0x01
+#define EEPROM_EISA_ID0 0x02
+#define EEPROM_EISA_ID1 0x03
+#define EEPROM_EISA_ID2 0x04
+#define EEPROM_EISA_ID3 0x05
+#define EEPROM_MISC0 0x06
+#define EEPROM_MISC1 0x07
+#define EEPROM_PNAME7 0x08
+#define EEPROM_PNAME6 0x09
+#define EEPROM_PNAME5 0x0a
+#define EEPROM_PNAME4 0x0b
+#define EEPROM_PNAME3 0x0c
+#define EEPROM_PNAME2 0x0d
+#define EEPROM_PNAME1 0x0e
+#define EEPROM_PNAME0 0x0f
+#define EEPROM_SWFLAGS 0x10
+#define EEPROM_HWCAT 0x11
+#define EEPROM_NETMAN2 0x12
+#define EEPROM_REVLVL 0x13
+#define EEPROM_NETMAN0 0x14
+#define EEPROM_NETMAN1 0x15
+#define EEPROM_CHIPVER 0x16
+#define EEPROM_SETUP 0x17
+#define EEPROM_PADDR0 0x18
+#define EEPROM_PADDR1 0x19
+#define EEPROM_PADDR2 0x1a
+#define EEPROM_PADDR3 0x1b
+#define EEPROM_PADDR4 0x1c
+#define EEPROM_PADDR5 0x1d
+#define EEPROM_PA_CRC 0x1e
+#define EEPROM_CHKSUM 0x1f
+
+/*
+** EEPROM bytes for checksumming
+*/
+#define EEPROM_MAX 32 /* bytes */
+
+/*
+** EEPROM MISCELLANEOUS FLAGS
+*/
+#define RBE_SHADOW 0x0100 /* Remote Boot Enable Shadow */
+#define READ_AHEAD 0x0080 /* Read Ahead feature */
+#define IRQ_SEL2 0x0070 /* IRQ line selection (LeMAC2) */
+#define IRQ_SEL 0x0060 /* IRQ line selection */
+#define FAST_BUS 0x0008 /* ISA Bus speeds > 8.33MHz */
+#define ENA_16 0x0004 /* Enables 16 bit memory transfers */
+#define WRITE_BEHIND 0x0002 /* Write Behind feature */
+#define _0WS_ENA 0x0001 /* Zero Wait State Enable */
+
+/*
+** EEPROM NETWORK MANAGEMENT FLAGS
+*/
+#define NETMAN_POL 0x04 /* Polarity defeat */
+#define NETMAN_LINK 0x02 /* Link defeat */
+#define NETMAN_CCE 0x01 /* Custom Counters Enable */
+
+/*
+** EEPROM SW FLAGS
+*/
+#define SW_SQE 0x10 /* Signal Quality Error */
+#define SW_LAB 0x08 /* Less Aggressive Backoff */
+#define SW_INIT 0x04 /* Initialized */
+#define SW_TIMEOUT 0x02 /* 0:2.5 mins, 1: 30 secs */
+#define SW_REMOTE 0x01 /* Remote Boot Enable -> 1 */
+
+/*
+** EEPROM SETUP FLAGS
+*/
+#define SETUP_APD 0x80 /* AutoPort Disable */
+#define SETUP_PS 0x40 /* Port Select */
+#define SETUP_MP 0x20 /* MultiPort */
+#define SETUP_1TP 0x10 /* 1 port, TP */
+#define SETUP_1COAX 0x00 /* 1 port, Coax */
+#define SETUP_DRAM 0x02 /* Number of DRAMS on board */
+
+/*
+** EEPROM MANAGEMENT FLAGS
+*/
+#define MGMT_CCE 0x01 /* Custom Counters Enable */
+
+/*
+** EEPROM VERSIONS
+*/
+#define LeMAC 0x11
+#define LeMAC2 0x12
+
+/*
+** Miscellaneous
+*/
+
+#define EEPROM_WAIT_TIME 1000 /* Number of microseconds */
+#define EISA_EN 0x0001 /* Enable EISA bus buffers */
+
+#define HASH_TABLE_LEN 512 /* Bits */
+
+#define XCT 0x80 /* Transmit Cut Through */
+#define PRELOAD 16 /* 4 long words */
+
+#define MASK_INTERRUPTS 1
+#define UNMASK_INTERRUPTS 0
+
+#define EEPROM_OFFSET(a) ((u_short)((u_long)(a)))
+
+/*
+** Include the IOCTL stuff
+*/
+#include <linux/sockios.h>
+
+#define EWRK3IOCTL SIOCDEVPRIVATE
+
+struct ewrk3_ioctl {
+ unsigned short cmd; /* Command to run */
+ unsigned short len; /* Length of the data buffer */
+ unsigned char *data; /* Pointer to the data buffer */
+};
+
+/*
+** Recognised commands for the driver
+*/
+#define EWRK3_GET_HWADDR 0x01 /* Get the hardware address */
+#define EWRK3_SET_HWADDR 0x02 /* Get the hardware address */
+#define EWRK3_SET_PROM 0x03 /* Set Promiscuous Mode */
+#define EWRK3_CLR_PROM 0x04 /* Clear Promiscuous Mode */
+#define EWRK3_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */
+#define EWRK3_GET_MCA 0x06 /* Get a multicast address */
+#define EWRK3_SET_MCA 0x07 /* Set a multicast address */
+#define EWRK3_CLR_MCA 0x08 /* Clear a multicast address */
+#define EWRK3_MCA_EN 0x09 /* Enable a multicast address group */
+#define EWRK3_GET_STATS 0x0a /* Get the driver statistics */
+#define EWRK3_CLR_STATS 0x0b /* Zero out the driver statistics */
+#define EWRK3_GET_CSR 0x0c /* Get the CSR Register contents */
+#define EWRK3_SET_CSR 0x0d /* Set the CSR Register contents */
+#define EWRK3_GET_EEPROM 0x0e /* Get the EEPROM contents */
+#define EWRK3_SET_EEPROM 0x0f /* Set the EEPROM contents */
+#define EWRK3_GET_CMR 0x10 /* Get the CMR Register contents */
+#define EWRK3_CLR_TX_CUT_THRU 0x11 /* Clear the TX cut through mode */
+#define EWRK3_SET_TX_CUT_THRU 0x12 /* Set the TX cut through mode */
diff --git a/linux/src/drivers/net/fmv18x.c b/linux/src/drivers/net/fmv18x.c
new file mode 100644
index 0000000..b29ddf0
--- /dev/null
+++ b/linux/src/drivers/net/fmv18x.c
@@ -0,0 +1,664 @@
+/* fmv18x.c: A network device driver for the Fujitsu FMV-181/182/183/184.
+
+ Original: at1700.c (1993-94 by Donald Becker).
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ Modified by Yutaka TAMIYA (tamy@flab.fujitsu.co.jp)
+ Copyright 1994 Fujitsu Laboratories Ltd.
+ Special thanks to:
+ Masayoshi UTAKA (utaka@ace.yk.fujitsu.co.jp)
+ for testing this driver.
+ H. NEGISHI (agy, negishi@sun45.psd.cs.fujitsu.co.jp)
+ for suggestion of some program modification.
+ Masahiro SEKIGUCHI <seki@sysrap.cs.fujitsu.co.jp>
+ for suggestion of some program modification.
+ Kazutoshi MORIOKA (morioka@aurora.oaks.cs.fujitsu.co.jp)
+ for testing this driver.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ This is a device driver for the Fujitsu FMV-181/182/183/184, which
+ is a straight-forward Fujitsu MB86965 implementation.
+
+ Sources:
+ at1700.c
+ The Fujitsu MB86965 datasheet.
+ The Fujitsu FMV-181/182 user's guide
+*/
+
+static const char *version =
+ "fmv18x.c:v1.3.71e 03/04/96 Yutaka TAMIYA (tamy@flab.fujitsu.co.jp)\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/malloc.h>
+#include <linux/string.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/errno.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+
+static int fmv18x_probe_list[] =
+{0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x300, 0x340, 0};
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 1
+#endif
+static unsigned int net_debug = NET_DEBUG;
+
+typedef unsigned char uchar;
+
+/* Information that need to be kept for each board. */
+struct net_local {
+ struct enet_statistics stats;
+ long open_time; /* Useless example local info. */
+ uint tx_started:1; /* Number of packet on the Tx queue. */
+ uchar tx_queue; /* Number of packet on the Tx queue. */
+ ushort tx_queue_len; /* Current length of the Tx queue. */
+};
+
+
+/* Offsets from the base address. */
+#define STATUS 0
+#define TX_STATUS 0
+#define RX_STATUS 1
+#define TX_INTR 2 /* Bit-mapped interrupt enable registers. */
+#define RX_INTR 3
+#define TX_MODE 4
+#define RX_MODE 5
+#define CONFIG_0 6 /* Misc. configuration settings. */
+#define CONFIG_1 7
+/* Run-time register bank 2 definitions. */
+#define DATAPORT 8 /* Word-wide DMA or programmed-I/O dataport. */
+#define TX_START 10
+#define COL16CNTL 11
+#define MODE13 13
+/* Fujitsu FMV-18x Card Configuration */
+#define FJ_STATUS0 0x10
+#define FJ_STATUS1 0x11
+#define FJ_CONFIG0 0x12
+#define FJ_CONFIG1 0x13
+#define FJ_MACADDR 0x14 /* 0x14 - 0x19 */
+#define FJ_BUFCNTL 0x1A
+#define FJ_BUFDATA 0x1C
+#define FMV18X_IO_EXTENT 32
+
+/* Index to functions, as function prototypes. */
+
+extern int fmv18x_probe(struct device *dev);
+
+static int fmv18x_probe1(struct device *dev, short ioaddr);
+static int net_open(struct device *dev);
+static int net_send_packet(struct sk_buff *skb, struct device *dev);
+static void net_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void net_rx(struct device *dev);
+static int net_close(struct device *dev);
+static struct enet_statistics *net_get_stats(struct device *dev);
+static void set_multicast_list(struct device *dev);
+
+
+/* Check for a network adaptor of this type, and return '0' iff one exists.
+ If dev->base_addr == 0, probe all likely locations.
+ If dev->base_addr == 1, always return failure.
+ If dev->base_addr == 2, allocate space for the device and return success
+ (detachable devices only).
+ */
+#ifdef HAVE_DEVLIST
+/* Support for a alternate probe manager, which will eliminate the
+ boilerplate below. */
+struct netdev_entry fmv18x_drv =
+{"fmv18x", fmv18x_probe1, FMV18X_IO_EXTENT, fmv18x_probe_list};
+#else
+int
+fmv18x_probe(struct device *dev)
+{
+ int i;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return fmv18x_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (i = 0; fmv18x_probe_list[i]; i++) {
+ int ioaddr = fmv18x_probe_list[i];
+ if (check_region(ioaddr, FMV18X_IO_EXTENT))
+ continue;
+ if (fmv18x_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif
+
+/* The Fujitsu datasheet suggests that the NIC be probed for by checking its
+ "signature", the default bit pattern after a reset. This *doesn't* work --
+ there is no way to reset the bus interface without a complete power-cycle!
+
+ It turns out that ATI came to the same conclusion I did: the only thing
+ that can be done is checking a few bits and then diving right into MAC
+ address check. */
+
+int fmv18x_probe1(struct device *dev, short ioaddr)
+{
+ char irqmap[4] = {3, 7, 10, 15};
+ unsigned int i, irq;
+
+ /* Resetting the chip doesn't reset the ISA interface, so don't bother.
+ That means we have to be careful with the register values we probe for.
+ */
+
+ /* Check I/O address configuration and Fujitsu vendor code */
+ if (fmv18x_probe_list[inb(ioaddr + FJ_CONFIG0) & 0x07] != ioaddr
+ || inb(ioaddr+FJ_MACADDR ) != 0x00
+ || inb(ioaddr+FJ_MACADDR+1) != 0x00
+ || inb(ioaddr+FJ_MACADDR+2) != 0x0e)
+ return -ENODEV;
+
+ irq = irqmap[(inb(ioaddr + FJ_CONFIG0)>>6) & 0x03];
+
+ /* Snarf the interrupt vector now. */
+ if (request_irq(irq, &net_interrupt, 0, "fmv18x", NULL)) {
+ printk ("FMV-18x found at %#3x, but it's unusable due to a conflict on"
+ "IRQ %d.\n", ioaddr, irq);
+ return EAGAIN;
+ }
+
+ /* Allocate a new 'dev' if needed. */
+ if (dev == NULL)
+ dev = init_etherdev(0, sizeof(struct net_local));
+
+ /* Grab the region so that we can find another board if the IRQ request
+ fails. */
+ request_region(ioaddr, FMV18X_IO_EXTENT, "fmv18x");
+
+ printk("%s: FMV-18x found at %#3x, IRQ %d, address ", dev->name,
+ ioaddr, irq);
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ irq2dev_map[irq] = dev;
+
+ for(i = 0; i < 6; i++) {
+ unsigned char val = inb(ioaddr + FJ_MACADDR + i);
+ printk("%02x", val);
+ dev->dev_addr[i] = val;
+ }
+
+ /* "FJ_STATUS0" 12 bit 0x0400 means use regular 100 ohm 10baseT signals,
+ rather than 150 ohm shielded twisted pair compensation.
+ 0x0000 == auto-sense the interface
+ 0x0800 == use TP interface
+ 0x1800 == use coax interface
+ */
+ {
+ const char *porttype[] = {"auto-sense", "10baseT", "auto-sense", "10base2/5"};
+ ushort setup_value = inb(ioaddr + FJ_STATUS0);
+
+ switch( setup_value & 0x07 ){
+ case 0x01 /* 10base5 */:
+ case 0x02 /* 10base2 */: dev->if_port = 0x18; break;
+ case 0x04 /* 10baseT */: dev->if_port = 0x08; break;
+ default /* auto-sense*/: dev->if_port = 0x00; break;
+ }
+ printk(" %s interface.\n", porttype[(dev->if_port>>3) & 3]);
+ }
+
+ /* Initialize LAN Controller and LAN Card */
+ outb(0xda, ioaddr + CONFIG_0); /* Initialize LAN Controller */
+ outb(0x00, ioaddr + CONFIG_1); /* Stand by mode */
+ outb(0x00, ioaddr + FJ_CONFIG1); /* Disable IRQ of LAN Card */
+ outb(0x00, ioaddr + FJ_BUFCNTL); /* Reset ? I'm not sure (TAMIYA) */
+
+ /* wait for a while */
+ udelay(200);
+
+ /* Set the station address in bank zero. */
+ outb(0x00, ioaddr + CONFIG_1);
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + 8 + i);
+
+ /* Switch to bank 1 and set the multicast table to accept none. */
+ outb(0x04, ioaddr + CONFIG_1);
+ for (i = 0; i < 8; i++)
+ outb(0x00, ioaddr + 8 + i);
+
+ /* Switch to bank 2 and lock our I/O address. */
+ outb(0x08, ioaddr + CONFIG_1);
+ outb(dev->if_port, ioaddr + MODE13);
+
+ if (net_debug)
+ printk("%s", version);
+
+ /* Initialize the device structure. */
+ dev->priv = kmalloc(sizeof(struct net_local), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct net_local));
+
+ dev->open = net_open;
+ dev->stop = net_close;
+ dev->hard_start_xmit = net_send_packet;
+ dev->get_stats = net_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ /* Fill in the fields of 'dev' with ethernet-generic values. */
+
+ ether_setup(dev);
+ return 0;
+}
+
+
+static int net_open(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ /* Set the configuration register 0 to 32K 100ns. byte-wide memory,
+ 16 bit bus access, and two 4K Tx, enable the Rx and Tx. */
+ outb(0x5a, ioaddr + CONFIG_0);
+
+ /* Powerup and switch to register bank 2 for the run-time registers. */
+ outb(0xe8, ioaddr + CONFIG_1);
+
+ lp->tx_started = 0;
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+
+ /* Clear Tx and Rx Status */
+ outb(0xff, ioaddr + TX_STATUS);
+ outb(0xff, ioaddr + RX_STATUS);
+ lp->open_time = jiffies;
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+ /* Enable the IRQ of the LAN Card */
+ outb(0x80, ioaddr + FJ_CONFIG1);
+
+ /* Enable both Tx and Rx interrupts */
+ outw(0x8182, ioaddr+TX_INTR);
+
+ MOD_INC_USE_COUNT;
+
+ return 0;
+}
+
+static int
+net_send_packet(struct sk_buff *skb, struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ if (dev->tbusy) {
+ /* If we get here, some higher level has decided we are broken.
+ There should really be a "kick me" function call instead. */
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 10)
+ return 1;
+ printk("%s: transmit timed out with status %04x, %s?\n", dev->name,
+ htons(inw(ioaddr + TX_STATUS)),
+ inb(ioaddr + TX_STATUS) & 0x80
+ ? "IRQ conflict" : "network cable problem");
+ printk("%s: timeout registers: %04x %04x %04x %04x %04x %04x %04x %04x.\n",
+ dev->name, htons(inw(ioaddr + 0)),
+ htons(inw(ioaddr + 2)), htons(inw(ioaddr + 4)),
+ htons(inw(ioaddr + 6)), htons(inw(ioaddr + 8)),
+ htons(inw(ioaddr +10)), htons(inw(ioaddr +12)),
+ htons(inw(ioaddr +14)));
+ printk("eth card: %04x %04x\n",
+ htons(inw(ioaddr+FJ_STATUS0)),
+ htons(inw(ioaddr+FJ_CONFIG0)));
+ lp->stats.tx_errors++;
+ /* ToDo: We should try to restart the adaptor... */
+ cli();
+
+ /* Initialize LAN Controller and LAN Card */
+ outb(0xda, ioaddr + CONFIG_0); /* Initialize LAN Controller */
+ outb(0x00, ioaddr + CONFIG_1); /* Stand by mode */
+ outb(0x00, ioaddr + FJ_CONFIG1); /* Disable IRQ of LAN Card */
+ outb(0x00, ioaddr + FJ_BUFCNTL); /* Reset ? I'm not sure */
+ net_open(dev);
+
+ sti();
+ }
+
+ /* If some higher layer thinks we've missed an tx-done interrupt
+ we are passed NULL. Caution: dev_tint() handles the cli()/sti()
+ itself. */
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ else {
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned char *buf = skb->data;
+
+ if (length > ETH_FRAME_LEN) {
+ if (net_debug)
+ printk("%s: Attempting to send a large packet (%d bytes).\n",
+ dev->name, length);
+ return 1;
+ }
+
+ if (net_debug > 4)
+ printk("%s: Transmitting a packet of length %lu.\n", dev->name,
+ (unsigned long)skb->len);
+
+ /* Disable both interrupts. */
+ outw(0x0000, ioaddr + TX_INTR);
+
+ outw(length, ioaddr + DATAPORT);
+ outsw(ioaddr + DATAPORT, buf, (length + 1) >> 1);
+
+ lp->tx_queue++;
+ lp->tx_queue_len += length + 2;
+
+ if (lp->tx_started == 0) {
+ /* If the Tx is idle, always trigger a transmit. */
+ outb(0x80 | lp->tx_queue, ioaddr + TX_START);
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ dev->trans_start = jiffies;
+ lp->tx_started = 1;
+ dev->tbusy = 0;
+ } else if (lp->tx_queue_len < 4096 - 1502)
+ /* Yes, there is room for one more packet. */
+ dev->tbusy = 0;
+
+ /* Re-enable interrupts */
+ outw(0x8182, ioaddr + TX_INTR);
+ }
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ return 0;
+}
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+static void
+net_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ struct net_local *lp;
+ int ioaddr, status;
+
+ if (dev == NULL) {
+ printk ("fmv18x_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+ dev->interrupt = 1;
+
+ ioaddr = dev->base_addr;
+ lp = (struct net_local *)dev->priv;
+
+ /* Avoid multiple interrupts. */
+ outw(0x0000, ioaddr + TX_INTR);
+
+ status = inw(ioaddr + TX_STATUS);
+ outw(status, ioaddr + TX_STATUS);
+
+ if (net_debug > 4)
+ printk("%s: Interrupt with status %04x.\n", dev->name, status);
+ if (status & 0xff00
+ || (inb(ioaddr + RX_MODE) & 0x40) == 0) { /* Got a packet(s). */
+ net_rx(dev);
+ }
+ if (status & 0x00ff) {
+ if (status & 0x80) {
+ lp->stats.tx_packets++;
+ if (lp->tx_queue) {
+ outb(0x80 | lp->tx_queue, ioaddr + TX_START);
+ lp->tx_queue = 0;
+ lp->tx_queue_len = 0;
+ dev->trans_start = jiffies;
+ dev->tbusy = 0;
+ mark_bh(NET_BH); /* Inform upper layers. */
+ } else {
+ lp->tx_started = 0;
+ dev->tbusy = 0;
+ mark_bh(NET_BH); /* Inform upper layers. */
+ }
+ }
+ if (status & 0x02 ) {
+ if (net_debug > 4)
+ printk("%s: 16 Collision occur during Txing.\n", dev->name);
+ /* Retry to send the packet */
+ outb(0x02, ioaddr + COL16CNTL);
+ }
+ }
+
+ dev->interrupt = 0;
+ outw(0x8182, ioaddr + TX_INTR);
+ return;
+}
+
+/* We have a good packet(s), get it/them out of the buffers. */
+static void
+net_rx(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int boguscount = 10; /* 5 -> 10: by agy 19940922 */
+
+ while ((inb(ioaddr + RX_MODE) & 0x40) == 0) {
+ /* Clear PKT_RDY bit: by agy 19940922 */
+ /* outb(0x80, ioaddr + RX_STATUS); */
+ ushort status = inw(ioaddr + DATAPORT);
+
+ if (net_debug > 4)
+ printk("%s: Rxing packet mode %02x status %04x.\n",
+ dev->name, inb(ioaddr + RX_MODE), status);
+#ifndef final_version
+ if (status == 0) {
+ outb(0x05, ioaddr + 14);
+ break;
+ }
+#endif
+
+ if ((status & 0xF0) != 0x20) { /* There was an error. */
+ lp->stats.rx_errors++;
+ if (status & 0x08) lp->stats.rx_length_errors++;
+ if (status & 0x04) lp->stats.rx_frame_errors++;
+ if (status & 0x02) lp->stats.rx_crc_errors++;
+ if (status & 0x01) lp->stats.rx_over_errors++;
+ } else {
+ ushort pkt_len = inw(ioaddr + DATAPORT);
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+
+ if (pkt_len > 1550) {
+ printk("%s: The FMV-18x claimed a very large packet, size %d.\n",
+ dev->name, pkt_len);
+ outb(0x05, ioaddr + 14);
+ lp->stats.rx_errors++;
+ break;
+ }
+ skb = dev_alloc_skb(pkt_len+3);
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, dropping packet (len %d).\n",
+ dev->name, pkt_len);
+ outb(0x05, ioaddr + 14);
+ lp->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = dev;
+ skb_reserve(skb,2);
+
+ insw(ioaddr + DATAPORT, skb_put(skb,pkt_len), (pkt_len + 1) >> 1);
+
+ if (net_debug > 5) {
+ int i;
+ printk("%s: Rxed packet of length %d: ", dev->name, pkt_len);
+ for (i = 0; i < 14; i++)
+ printk(" %02x", skb->data[i]);
+ printk(".\n");
+ }
+
+ skb->protocol=eth_type_trans(skb, dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+ if (--boguscount <= 0)
+ break;
+ }
+
+ /* If any worth-while packets have been received, dev_rint()
+ has done a mark_bh(NET_BH) for us and will work on them
+ when we get to the bottom-half routine. */
+ {
+ int i;
+ for (i = 0; i < 20; i++) {
+ if ((inb(ioaddr + RX_MODE) & 0x40) == 0x40)
+ break;
+ (void)inw(ioaddr + DATAPORT); /* dummy status read */
+ outb(0x05, ioaddr + 14);
+ }
+
+ if (net_debug > 5 && i > 0)
+ printk("%s: Exint Rx packet with mode %02x after %d ticks.\n",
+ dev->name, inb(ioaddr + RX_MODE), i);
+ }
+
+ return;
+}
+
+/* The inverse routine to net_open(). */
+static int net_close(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ ((struct net_local *)dev->priv)->open_time = 0;
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ /* Set configuration register 0 to disable Tx and Rx. */
+ outb(0xda, ioaddr + CONFIG_0);
+
+ /* Update the statistics -- ToDo. */
+
+ /* Power-down the chip. Green, green, green! */
+ outb(0x00, ioaddr + CONFIG_1);
+
+ MOD_DEC_USE_COUNT;
+
+ /* Set the ethernet adaptor disable IRQ */
+ outb(0x00, ioaddr + FJ_CONFIG1);
+
+ return 0;
+}
+
+/* Get the current statistics. This may be called with the card open or
+ closed. */
+static struct enet_statistics *
+net_get_stats(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+
+ cli();
+ /* ToDo: Update the statistics from the device registers. */
+ sti();
+
+ return &lp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ num_addrs == -1 Promiscuous mode, receive all packets
+ num_addrs == 0 Normal mode, clear multicast list
+ num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ best-effort filtering.
+ */
+static void
+set_multicast_list(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+ if (dev->mc_count || dev->flags&(IFF_PROMISC|IFF_ALLMULTI))
+ {
+ /*
+ * We must make the kernel realise we had to move
+ * into promisc mode or we start all out war on
+ * the cable. - AC
+ */
+ dev->flags|=IFF_PROMISC;
+
+ outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */
+ }
+ else
+ outb(2, ioaddr + RX_MODE); /* Disable promiscuous, use normal mode */
+}
+
+#ifdef MODULE
+static char devicename[9] = { 0, };
+static struct device dev_fmv18x = {
+ devicename, /* device name is inserted by linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, fmv18x_probe };
+
+static int io = 0x220;
+static int irq = 0;
+
+int init_module(void)
+{
+ if (io == 0)
+ printk("fmv18x: You should not use auto-probing with insmod!\n");
+ dev_fmv18x.base_addr = io;
+ dev_fmv18x.irq = irq;
+ if (register_netdev(&dev_fmv18x) != 0) {
+ printk("fmv18x: register_netdev() returned non-zero.\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ unregister_netdev(&dev_fmv18x);
+ kfree(dev_fmv18x.priv);
+ dev_fmv18x.priv = NULL;
+
+ /* If we don't do this, we can't re-insmod it later. */
+ free_irq(dev_fmv18x.irq, NULL);
+ irq2dev_map[dev_fmv18x.irq] = NULL;
+ release_region(dev_fmv18x.base_addr, FMV18X_IO_EXTENT);
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c fmv18x.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * c-indent-level: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/hamachi.c b/linux/src/drivers/net/hamachi.c
new file mode 100644
index 0000000..fdcf43d
--- /dev/null
+++ b/linux/src/drivers/net/hamachi.c
@@ -0,0 +1,1315 @@
+/* hamachi.c: A Packet Engines GNIC-II Gigabit Ethernet driver for Linux. */
+/*
+ Written 1998-2002 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ This driver is for the Packet Engines GNIC-II PCI Gigabit Ethernet
+ adapter.
+
+ Support and updates available at
+ http://www.scyld.com/network/hamachi.html
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version1[] =
+"hamachi.c:v1.04 11/17/2002 Written by Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+" http://www.scyld.com/network/hamachi.html\n";
+
+/* Automatically extracted configuration info:
+probe-func: hamachi_probe
+config-in: tristate 'Packet Engines "Hamachi" PCI Gigabit Ethernet support' CONFIG_HAMACHI
+c-help-name: Packet Engines "Hamachi" PCI Gigabit Ethernet support
+c-help-symbol: CONFIG_HAMACHI
+c-help: This driver is for the Packet Engines "Hamachi" GNIC-2 Gigabit Ethernet
+c-help: adapter.
+c-help: Usage information and updates are available from
+c-help: http://www.scyld.com/network/hamachi.html
+*/
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
+static int debug = 2;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 40;
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ The Hamachi has a 64 element perfect filter. */
+static int multicast_filter_limit = 32;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature. */
+static int rx_copybreak = 0;
+
+/* A override for the hardware detection of bus width.
+ Set to 1 to force 32 bit PCI bus detection. Set to 4 to force 64 bit.
+ Add 2 to disable parity detection.
+*/
+static int force32 = 0;
+
+/* Used to pass the media type, etc.
+ These exist for driver interoperability.
+ Only 1 Gigabit is supported by the chip.
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ There are no ill effects from too-large receive rings. */
+#define TX_RING_SIZE 64
+#define TX_QUEUE_LEN 60 /* Limit ring entries actually used. */
+#define RX_RING_SIZE 128
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+
+/* Allocation size of Rx buffers with normal sized Ethernet frames.
+ Do not change this value without good reason. This is not a limit,
+ but a way to keep a consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ 1536
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/unaligned.h>
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+/* Condensed operations for readability. */
+#if ADDRLEN == 64
+#define virt_to_desc(addr) cpu_to_le64(virt_to_bus(addr))
+#else
+#define virt_to_desc(addr) cpu_to_le32(virt_to_bus(addr))
+#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
+#endif
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Packet Engines 'Hamachi' GNIC-II Gigabit Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(debug, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(multicast_filter_limit, "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(force32, "i");
+MODULE_PARM_DESC(debug, "Driver message level (0-31)");
+MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
+MODULE_PARM_DESC(max_interrupt_work,
+ "Driver maximum events handled per interrupt");
+MODULE_PARM_DESC(full_duplex,
+ "Non-zero to force full duplex, non-negotiated link "
+ "(unused, deprecated).");
+MODULE_PARM_DESC(rx_copybreak,
+ "Breakpoint in bytes for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit,
+ "Multicast addresses before switching to Rx-all-multicast");
+MODULE_PARM_DESC(force32, "Set to 1 to force 32 bit PCI bus use.");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the Packet Engines "Hamachi"
+Gigabit Ethernet chip. The only PCA currently supported is the GNIC-II 64-bit
+66Mhz PCI card.
+
+II. Board-specific settings
+
+No jumpers exist on the board. The chip supports software correction of
+various motherboard wiring errors, however this driver does not support
+that feature.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+The Hamachi uses a typical descriptor based bus-master architecture.
+The descriptor list is similar to that used by the Digital Tulip.
+This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
+
+This driver uses a zero-copy receive and transmit scheme similar my other
+network drivers.
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the Hamachi as receive data
+buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack and replaced by a newly allocated skbuff.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. Gigabit cards are typically used on generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets.
+
+IIIb/c. Transmit/Receive Structure
+
+The Rx and Tx descriptor structure are straight-forward, with no historical
+baggage that must be explained. Unlike the awkward DBDMA structure, there
+are no unused fields or option bits that had only one allowable setting.
+
+Two details should be noted about the descriptors: The chip supports both 32
+bit and 64 bit address structures, and the length field is overwritten on
+the receive descriptors. The descriptor length is set in the control word
+for each channel. The development driver uses 32 bit addresses only, however
+64 bit addresses may be enabled for 64 bit architectures e.g. the Alpha.
+
+IIId. Synchronization
+
+This driver is very similar to my other network drivers.
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and other software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'hmp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the 'hmp->tx_full' flag is set, it
+clears both the tx_full and tbusy flags.
+
+IV. Notes
+
+Thanks to Kim Stearns of Packet Engines for providing a pair of GNIC-II boards.
+
+IVb. References
+
+Hamachi Engineering Design Specification, 5/15/97
+(Note: This version was marked "Confidential".)
+
+IVc. Errata
+
+None noted.
+*/
+
+
+/* The table for PCI detection and activation. */
+
+static void *hamachi_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int find_cnt);
+enum chip_capability_flags { CanHaveMII=1, };
+
+static struct pci_id_info pci_id_tbl[] = {
+ {"Packet Engines GNIC-II \"Hamachi\"", { 0x09111318, 0xffffffff,},
+ PCI_USES_MEM | PCI_USES_MASTER | PCI_ADDR0 | PCI_ADDR_64BITS, 0x400, 0, },
+ { 0,},
+};
+
+struct drv_id_info hamachi_drv_id = {
+ "hamachi", 0, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
+ hamachi_probe1, 0,
+};
+
+/* Offsets to the Hamachi registers. Various sizes. */
+enum hamachi_offsets {
+ TxDMACtrl=0x00, TxCmd=0x04, TxStatus=0x06, TxPtr=0x08, TxCurPtr=0x10,
+ RxDMACtrl=0x20, RxCmd=0x24, RxStatus=0x26, RxPtr=0x28, RxCurPtr=0x30,
+ PCIClkMeas=0x060, MiscStatus=0x066, ChipRev=0x68, ChipReset=0x06B,
+ LEDCtrl=0x06C, VirtualJumpers=0x06D,
+ TxChecksum=0x074, RxChecksum=0x076,
+ TxIntrCtrl=0x078, RxIntrCtrl=0x07C,
+ InterruptEnable=0x080, InterruptClear=0x084, IntrStatus=0x088,
+ EventStatus=0x08C,
+ MACCnfg=0x0A0, FrameGap0=0x0A2, FrameGap1=0x0A4,
+ /* See enum MII_offsets below. */
+ MACCnfg2=0x0B0, RxDepth=0x0B8, FlowCtrl=0x0BC, MaxFrameSize=0x0CE,
+ AddrMode=0x0D0, StationAddr=0x0D2,
+ /* Gigabit AutoNegotiation. */
+ ANCtrl=0x0E0, ANStatus=0x0E2, ANXchngCtrl=0x0E4, ANAdvertise=0x0E8,
+ ANLinkPartnerAbility=0x0EA,
+ EECmdStatus=0x0F0, EEData=0x0F1, EEAddr=0x0F2,
+ FIFOcfg=0x0F8,
+};
+
+/* Offsets to the MII-mode registers. */
+enum MII_offsets {
+ MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
+ MII_Status=0xAE,
+};
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+ IntrRxDone=0x01, IntrRxPCIFault=0x02, IntrRxPCIErr=0x04,
+ IntrTxDone=0x100, IntrTxPCIFault=0x200, IntrTxPCIErr=0x400,
+ LinkChange=0x10000, NegotiationChange=0x20000, StatsMax=0x40000, };
+
+/* The Hamachi Rx and Tx buffer descriptors. */
+struct hamachi_desc {
+ u32 status_n_length;
+#if ADDRLEN == 64
+ u32 pad;
+ u64 addr;
+#else
+ u32 addr;
+#endif
+};
+
+/* Bits in hamachi_desc.status */
+enum desc_status_bits {
+ DescOwn=0x80000000, DescEndPacket=0x40000000, DescEndRing=0x20000000,
+ DescIntr=0x10000000,
+};
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+struct hamachi_private {
+ /* Descriptor rings first for alignment. Tx requires a second descriptor
+ for status. */
+ struct hamachi_desc rx_ring[RX_RING_SIZE];
+ struct hamachi_desc tx_ring[TX_RING_SIZE];
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ struct net_device *next_module;
+ void *priv_addr; /* Unaligned address for kfree */
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media monitoring timer. */
+ int chip_id, drv_flags;
+ struct pci_dev *pci_dev;
+
+ /* Frequently used and paired value: keep adjacent for cache effect. */
+ int msg_level;
+ int max_interrupt_work;
+ long in_interrupt;
+
+ struct hamachi_desc *rx_head_desc;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ int rx_copybreak;
+ int multicast_filter_limit;
+ int rx_mode;
+
+ unsigned int cur_tx, dirty_tx;
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int duplex_lock:1;
+ unsigned int medialock:1; /* Do not sense media. */
+ unsigned int default_port; /* Last dev->if_port value. */
+ /* MII transceiver section. */
+ int mii_cnt; /* MII device addresses. */
+ u16 advertising; /* NWay media advertisement */
+ unsigned char phys[2]; /* MII device addresses. */
+};
+
+static int read_eeprom(struct net_device *dev, int location);
+static int mdio_read(long ioaddr, int phy_id, int location);
+static void mdio_write(long ioaddr, int phy_id, int location, int value);
+static int hamachi_open(struct net_device *dev);
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+#ifdef HAVE_CHANGE_MTU
+static int change_mtu(struct net_device *dev, int new_mtu);
+#endif
+static void hamachi_timer(unsigned long data);
+static void hamachi_tx_timeout(struct net_device *dev);
+static void hamachi_init_ring(struct net_device *dev);
+static int hamachi_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void hamachi_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static int hamachi_rx(struct net_device *dev);
+static void hamachi_error(struct net_device *dev, int intr_status);
+static int hamachi_close(struct net_device *dev);
+static struct net_device_stats *hamachi_get_stats(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+
+
+
+/* A list of our installed devices, for removing the driver module. */
+static struct net_device *root_hamachi_dev = NULL;
+
+#ifndef MODULE
+int hamachi_probe(struct net_device *dev)
+{
+ if (pci_drv_register(&hamachi_drv_id, dev) < 0)
+ return -ENODEV;
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return 0;
+}
+#endif
+
+static void *hamachi_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int card_idx)
+{
+ struct net_device *dev;
+ struct hamachi_private *np;
+ void *priv_mem;
+ int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+
+ dev = init_etherdev(init_dev, 0);
+ if (!dev)
+ return NULL;
+
+ printk(KERN_INFO "%s: %s type %x at 0x%lx, ",
+ dev->name, pci_id_tbl[chip_idx].name, (int)readl(ioaddr + ChipRev),
+ ioaddr);
+
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = read_eeprom(dev, 4 + i);
+ /* Alternate: readb(ioaddr + StationAddr + i); */
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+
+ i = readb(ioaddr + PCIClkMeas);
+ printk(KERN_INFO "%s: %d-bit %d Mhz PCI bus (%d), Virtual Jumpers "
+ "%2.2x, LPA %4.4x.\n",
+ dev->name, readw(ioaddr + MiscStatus) & 1 ? 64 : 32,
+ i ? 2000/(i&0x7f) : 0, i&0x7f, (int)readb(ioaddr + VirtualJumpers),
+ (int)readw(ioaddr + ANLinkPartnerAbility));
+
+ /* Hmmm, do we really need to reset the chip???. */
+ writeb(1, ioaddr + ChipReset);
+
+ /* If the bus size is misidentified, do the following. */
+ if (force32)
+ writeb(force32, ioaddr + VirtualJumpers);
+
+ /* Make certain elements e.g. descriptor lists are aligned. */
+ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
+ /* Check for the very unlikely case of no memory. */
+ if (priv_mem == NULL)
+ return NULL;
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+ memset(np, 0, sizeof(*np));
+ np->priv_addr = priv_mem;
+
+ np->next_module = root_hamachi_dev;
+ root_hamachi_dev = dev;
+
+ np->pci_dev = pdev;
+ np->chip_id = chip_idx;
+ np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
+ np->msg_level = (1 << debug) - 1;
+ np->rx_copybreak = rx_copybreak;
+ np->max_interrupt_work = max_interrupt_work;
+ np->multicast_filter_limit =
+ multicast_filter_limit < 64 ? multicast_filter_limit : 64;
+
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ /* The lower four bits are the media type. */
+ if (option > 0) {
+ if (option & 0x2220)
+ np->full_duplex = 1;
+ np->default_port = option & 15;
+ if (np->default_port & 0x3330)
+ np->medialock = 1;
+ }
+ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
+ np->full_duplex = 1;
+
+ if (np->full_duplex) {
+ if (np->msg_level & NETIF_MSG_PROBE)
+ printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
+ " disabled.\n", dev->name);
+ np->duplex_lock = 1;
+ }
+
+ /* The Hamachi-specific entries in the device structure. */
+ dev->open = &hamachi_open;
+ dev->hard_start_xmit = &hamachi_start_xmit;
+ dev->stop = &hamachi_close;
+ dev->get_stats = &hamachi_get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &mii_ioctl;
+#ifdef HAVE_CHANGE_MTU
+ dev->change_mtu = change_mtu;
+#endif
+
+ if (np->drv_flags & CanHaveMII) {
+ int phy, phy_idx = 0;
+ for (phy = 0; phy < 32 && phy_idx < 4; phy++) {
+ int mii_status = mdio_read(ioaddr, phy, 1);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ np->phys[phy_idx++] = phy;
+ np->advertising = mdio_read(ioaddr, phy, 4);
+ printk(KERN_INFO "%s: MII PHY found at address %d, status "
+ "0x%4.4x advertising %4.4x.\n",
+ dev->name, phy, mii_status, np->advertising);
+ }
+ }
+ np->mii_cnt = phy_idx;
+ }
+#ifdef notyet
+ /* Disable PCI Parity Error (0x02) or PCI 64 Bit (0x01) for miswired
+ motherboards. */
+ if (readb(ioaddr + VirtualJumpers) != 0x30)
+ writeb(0x33, ioaddr + VirtualJumpers)
+#endif
+ /* Configure gigabit autonegotiation. */
+ writew(0x0400, ioaddr + ANXchngCtrl); /* Enable legacy links. */
+ writew(0x08e0, ioaddr + ANAdvertise); /* Set our advertise word. */
+ writew(0x1000, ioaddr + ANCtrl); /* Enable negotiation */
+
+ return dev;
+}
+
+static int read_eeprom(struct net_device *dev, int location)
+{
+ struct hamachi_private *np = (void *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int bogus_cnt = 1000;
+
+ writew(location, ioaddr + EEAddr);
+ writeb(0x02, ioaddr + EECmdStatus);
+ while ((readb(ioaddr + EECmdStatus) & 0x40) && --bogus_cnt > 0)
+ ;
+ if (np->msg_level & NETIF_MSG_MISC)
+ printk(KERN_DEBUG " EEPROM status is %2.2x after %d ticks.\n",
+ (int)readb(ioaddr + EECmdStatus), 1000- bogus_cnt);
+ return readb(ioaddr + EEData);
+}
+
+/* MII Managemen Data I/O accesses.
+ These routines assume the MDIO controller is idle, and do not exit until
+ the command is finished. */
+
+static int mdio_read(long ioaddr, int phy_id, int location)
+{
+ int i;
+
+ writew((phy_id<<8) + location, ioaddr + MII_Addr);
+ writew(1, ioaddr + MII_Cmd);
+ for (i = 10000; i >= 0; i--)
+ if ((readw(ioaddr + MII_Status) & 1) == 0)
+ break;
+ return readw(ioaddr + MII_Rd_Data);
+}
+
+static void mdio_write(long ioaddr, int phy_id, int location, int value)
+{
+ int i;
+
+ writew((phy_id<<8) + location, ioaddr + MII_Addr);
+ writew(value, ioaddr + MII_Wr_Data);
+
+ /* Wait for the command to finish. */
+ for (i = 10000; i >= 0; i--)
+ if ((readw(ioaddr + MII_Status) & 1) == 0)
+ break;
+ return;
+}
+
+
+static int hamachi_open(struct net_device *dev)
+{
+ struct hamachi_private *hmp = (struct hamachi_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+
+ /* Do we need to reset the chip??? */
+
+ MOD_INC_USE_COUNT;
+
+ if (request_irq(dev->irq, &hamachi_interrupt, SA_SHIRQ, dev->name, dev)) {
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+
+ if (hmp->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: hamachi_open() irq %d.\n",
+ dev->name, dev->irq);
+
+ hamachi_init_ring(dev);
+
+#if ADDRLEN == 64
+ writel(virt_to_bus(hmp->rx_ring), ioaddr + RxPtr);
+ writel(virt_to_bus(hmp->rx_ring) >> 32, ioaddr + RxPtr + 4);
+ writel(virt_to_bus(hmp->tx_ring), ioaddr + TxPtr);
+ writel(virt_to_bus(hmp->tx_ring) >> 32, ioaddr + TxPtr + 4);
+#else
+ writel(virt_to_bus(hmp->rx_ring), ioaddr + RxPtr);
+ writel(virt_to_bus(hmp->tx_ring), ioaddr + TxPtr);
+#endif
+
+ for (i = 0; i < 6; i++)
+ writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
+
+ /* Initialize other registers: with so many this eventually this will
+ converted to an offset/value list. */
+ /* Configure the FIFO for 512K external, 16K used for Tx. */
+ writew(0x0028, ioaddr + FIFOcfg);
+
+ if (dev->if_port == 0)
+ dev->if_port = hmp->default_port;
+ hmp->in_interrupt = 0;
+
+ /* Setting the Rx mode will start the Rx process. */
+ /* We are always in full-duplex mode with gigabit! */
+ hmp->full_duplex = 1;
+ writew(0x0001, ioaddr + RxChecksum); /* Enable Rx IP partial checksum. */
+ writew(0x8000, ioaddr + MACCnfg); /* Soft reset the MAC */
+ writew(0x215F, ioaddr + MACCnfg);
+ writew(0x000C, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
+ writew(0x1018, ioaddr + FrameGap1);
+ writew(0x2780, ioaddr + MACCnfg2); /* Upper 16 bits control LEDs. */
+ /* Enable automatic generation of flow control frames, period 0xffff. */
+ writel(0x0030FFFF, ioaddr + FlowCtrl);
+ writew(dev->mtu+19, ioaddr + MaxFrameSize); /* hmp->rx_buf_sz ??? */
+
+ /* Enable legacy links. */
+ writew(0x0400, ioaddr + ANXchngCtrl); /* Enable legacy links. */
+ /* Initial Link LED to blinking red. */
+ writeb(0x03, ioaddr + LEDCtrl);
+
+ /* Configure interrupt mitigation. This has a great effect on
+ performance, so systems tuning should start here!. */
+ writel(0x00080000, ioaddr + TxIntrCtrl);
+ writel(0x00000020, ioaddr + RxIntrCtrl);
+
+ hmp->rx_mode = 0; /* Force Rx mode write. */
+ set_rx_mode(dev);
+ netif_start_tx_queue(dev);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ writel(0x80878787, ioaddr + InterruptEnable);
+ writew(0x0000, ioaddr + EventStatus); /* Clear non-interrupting events */
+
+ /* Configure and start the DMA channels. */
+ /* Burst sizes are in the low three bits: size = 4<<(val&7) */
+#if ADDRLEN == 64
+ writew(0x0055, ioaddr + RxDMACtrl); /* 128 dword bursts */
+ writew(0x0055, ioaddr + TxDMACtrl);
+#else
+ writew(0x0015, ioaddr + RxDMACtrl);
+ writew(0x0015, ioaddr + TxDMACtrl);
+#endif
+ writew(1, dev->base_addr + RxCmd);
+
+ if (hmp->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: Done hamachi_open(), status: Rx %x Tx %x.\n",
+ dev->name, (int)readw(ioaddr + RxStatus),
+ (int)readw(ioaddr + TxStatus));
+
+ /* Set the timer to check for link beat. */
+ init_timer(&hmp->timer);
+ hmp->timer.expires = jiffies + 3*HZ;
+ hmp->timer.data = (unsigned long)dev;
+ hmp->timer.function = &hamachi_timer; /* timer handler */
+ add_timer(&hmp->timer);
+
+ return 0;
+}
+
+static void hamachi_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct hamachi_private *hmp = (struct hamachi_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 10*HZ;
+
+ if (hmp->msg_level & NETIF_MSG_TIMER) {
+ printk(KERN_INFO "%s: Hamachi Autonegotiation status %4.4x, LPA "
+ "%4.4x.\n", dev->name, (int)readw(ioaddr + ANStatus),
+ (int)readw(ioaddr + ANLinkPartnerAbility));
+ printk(KERN_INFO "%s: Autonegotiation regs %4.4x %4.4x %4.4x "
+ "%4.4x %4.4x %4.4x.\n", dev->name,
+ (int)readw(ioaddr + 0x0e0),
+ (int)readw(ioaddr + 0x0e2),
+ (int)readw(ioaddr + 0x0e4),
+ (int)readw(ioaddr + 0x0e6),
+ (int)readw(ioaddr + 0x0e8),
+ (int)readw(ioaddr + 0x0eA));
+ }
+ /* This has a small false-trigger window. */
+ if (netif_queue_paused(dev) &&
+ (jiffies - dev->trans_start) > TX_TIMEOUT
+ && hmp->cur_tx - hmp->dirty_tx > 1) {
+ hamachi_tx_timeout(dev);
+ }
+ /* We could do something here... nah. */
+ hmp->timer.expires = jiffies + next_tick;
+ add_timer(&hmp->timer);
+}
+
+static void hamachi_tx_timeout(struct net_device *dev)
+{
+ struct hamachi_private *hmp = (struct hamachi_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ printk(KERN_WARNING "%s: Hamachi transmit timed out, status %8.8x,"
+ " resetting...\n", dev->name, (int)readw(ioaddr + TxStatus));
+
+ if (hmp->msg_level & NETIF_MSG_TX_ERR) {
+ int i;
+ printk(KERN_DEBUG " Rx ring %p: ", hmp->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(" %8.8x", (unsigned int)hmp->rx_ring[i].status_n_length);
+ printk("\n"KERN_DEBUG" Tx ring %p: ", hmp->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" %4.4x", hmp->tx_ring[i].status_n_length);
+ printk("\n");
+ }
+
+ /* Perhaps we should reinitialize the hardware here. */
+ dev->if_port = 0;
+ /* Stop and restart the chip's Tx processes . */
+
+ /* Trigger an immediate transmit demand. */
+ writew(2, dev->base_addr + TxCmd);
+ writew(1, dev->base_addr + TxCmd);
+ writew(1, dev->base_addr + RxCmd);
+
+ dev->trans_start = jiffies;
+ hmp->stats.tx_errors++;
+ return;
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void hamachi_init_ring(struct net_device *dev)
+{
+ struct hamachi_private *hmp = (struct hamachi_private *)dev->priv;
+ int i;
+
+ hmp->tx_full = 0;
+ hmp->cur_rx = hmp->cur_tx = 0;
+ hmp->dirty_rx = hmp->dirty_tx = 0;
+
+ /* Size of each temporary Rx buffer. Add 8 if you do Rx checksumming! */
+ hmp->rx_buf_sz = dev->mtu + 18 + 8;
+ /* Match other driver's allocation size when possible. */
+ if (hmp->rx_buf_sz < PKT_BUF_SZ)
+ hmp->rx_buf_sz = PKT_BUF_SZ;
+ hmp->rx_head_desc = &hmp->rx_ring[0];
+
+ /* Initialize all Rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ hmp->rx_ring[i].status_n_length = 0;
+ hmp->rx_skbuff[i] = 0;
+ }
+ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz);
+ hmp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* 16 byte align the IP header. */
+ hmp->rx_ring[i].addr = virt_to_desc(skb->tail);
+ hmp->rx_ring[i].status_n_length =
+ cpu_to_le32(DescOwn | DescEndPacket | DescIntr | hmp->rx_buf_sz);
+ }
+ hmp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+ /* Mark the last entry as wrapping the ring. */
+ hmp->rx_ring[i-1].status_n_length |= cpu_to_le32(DescEndRing);
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ hmp->tx_skbuff[i] = 0;
+ hmp->tx_ring[i].status_n_length = 0;
+ }
+ return;
+}
+
+static int hamachi_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct hamachi_private *hmp = (struct hamachi_private *)dev->priv;
+ unsigned entry;
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+ if (netif_pause_tx_queue(dev) != 0) {
+ /* This watchdog code is redundant with the media monitor timer. */
+ if (jiffies - dev->trans_start > TX_TIMEOUT)
+ hamachi_tx_timeout(dev);
+ return 1;
+ }
+
+ /* Note: Ordering is important here, set the field with the
+ "ownership" bit last, and only then increment cur_tx. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = hmp->cur_tx % TX_RING_SIZE;
+
+ hmp->tx_skbuff[entry] = skb;
+
+ hmp->tx_ring[entry].addr = virt_to_desc(skb->data);
+ if (entry >= TX_RING_SIZE-1) /* Wrap ring */
+ hmp->tx_ring[entry].status_n_length =
+ cpu_to_le32(DescOwn|DescEndPacket|DescEndRing|DescIntr | skb->len);
+ else
+ hmp->tx_ring[entry].status_n_length =
+ cpu_to_le32(DescOwn|DescEndPacket | skb->len);
+ hmp->cur_tx++;
+
+ /* Architecture-specific: explicitly flush cache lines here. */
+
+ /* Wake the potentially-idle transmit channel. */
+ writew(1, dev->base_addr + TxCmd);
+
+ if (hmp->cur_tx - hmp->dirty_tx >= TX_QUEUE_LEN - 1) {
+ hmp->tx_full = 1;
+ if (hmp->cur_tx - hmp->dirty_tx < TX_QUEUE_LEN - 1) {
+ netif_unpause_tx_queue(dev);
+ hmp->tx_full = 0;
+ } else
+ netif_stop_tx_queue(dev);
+ } else
+ netif_unpause_tx_queue(dev); /* Typical path */
+ dev->trans_start = jiffies;
+
+ if (hmp->msg_level & NETIF_MSG_TX_QUEUED) {
+ printk(KERN_DEBUG "%s: Hamachi transmit frame #%d length %d queued "
+ "in slot %d.\n", dev->name, hmp->cur_tx, (int)skb->len, entry);
+ }
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void hamachi_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct hamachi_private *hmp;
+ long ioaddr;
+ int boguscnt = max_interrupt_work;
+
+#ifndef final_version /* Can never occur. */
+ if (dev == NULL) {
+ printk (KERN_ERR "hamachi_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+#endif
+
+ ioaddr = dev->base_addr;
+ hmp = (struct hamachi_private *)dev->priv;
+ if (test_and_set_bit(0, (void*)&hmp->in_interrupt)) {
+ printk(KERN_ERR "%s: Re-entering the interrupt handler.\n", dev->name);
+ hmp->in_interrupt = 0; /* Avoid future hang on bug */
+ return;
+ }
+
+ do {
+ u32 intr_status = readl(ioaddr + InterruptClear);
+
+ if (hmp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: Hamachi interrupt, status %4.4x.\n",
+ dev->name, intr_status);
+
+ if (intr_status == 0)
+ break;
+
+ if (intr_status & IntrRxDone)
+ hamachi_rx(dev);
+
+ for (; hmp->cur_tx - hmp->dirty_tx > 0; hmp->dirty_tx++) {
+ int entry = hmp->dirty_tx % TX_RING_SIZE;
+ if (!(hmp->tx_ring[entry].status_n_length & cpu_to_le32(DescOwn)))
+ break;
+ if (hmp->msg_level & NETIF_MSG_TX_DONE)
+ printk(KERN_DEBUG "%s: Transmit done, Tx status %8.8x.\n",
+ dev->name, hmp->tx_ring[entry].status_n_length);
+ /* Free the original skb. */
+ dev_free_skb_irq(hmp->tx_skbuff[entry]);
+ hmp->tx_skbuff[entry] = 0;
+ hmp->stats.tx_packets++;
+ }
+ if (hmp->tx_full
+ && hmp->cur_tx - hmp->dirty_tx < TX_QUEUE_LEN - 4) {
+ /* The ring is no longer full, clear tbusy. */
+ hmp->tx_full = 0;
+ netif_resume_tx_queue(dev);
+ }
+
+ /* Abnormal error summary/uncommon events handlers. */
+ if (intr_status &
+ (IntrTxPCIFault | IntrTxPCIErr | IntrRxPCIFault | IntrRxPCIErr |
+ LinkChange | NegotiationChange | StatsMax))
+ hamachi_error(dev, intr_status);
+
+ if (--boguscnt < 0) {
+ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "status=0x%4.4x.\n",
+ dev->name, intr_status);
+ break;
+ }
+ } while (1);
+
+ if (hmp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, (int)readl(ioaddr + IntrStatus));
+ clear_bit(0, (void*)&hmp->in_interrupt);
+ return;
+}
+
+/* This routine is logically part of the interrupt handler, but separated
+ for clarity and better register allocation. */
+static int hamachi_rx(struct net_device *dev)
+{
+ struct hamachi_private *hmp = (struct hamachi_private *)dev->priv;
+ int entry = hmp->cur_rx % RX_RING_SIZE;
+ int boguscnt = hmp->dirty_rx + RX_RING_SIZE - hmp->cur_rx;
+
+ if (hmp->msg_level & NETIF_MSG_RX_STATUS) {
+ printk(KERN_DEBUG " In hamachi_rx(), entry %d status %4.4x.\n",
+ entry, hmp->rx_ring[entry].status_n_length);
+ }
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while ( ! (hmp->rx_head_desc->status_n_length & cpu_to_le32(DescOwn))) {
+ struct hamachi_desc *desc = hmp->rx_head_desc;
+ u32 desc_status = le32_to_cpu(desc->status_n_length);
+ u16 data_size = desc_status; /* Implicit truncate */
+ u8 *buf_addr = hmp->rx_skbuff[entry]->tail;
+ s32 frame_status =
+ le32_to_cpu(get_unaligned((s32*)&(buf_addr[data_size - 12])));
+
+ if (hmp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " hamachi_rx() status was %8.8x.\n",
+ frame_status);
+ if (--boguscnt < 0)
+ break;
+ if ( ! (desc_status & DescEndPacket)) {
+ printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
+ "multiple buffers, entry %#x length %d status %4.4x!\n",
+ dev->name, hmp->cur_rx, data_size, desc_status);
+ printk(KERN_WARNING "%s: Oversized Ethernet frame %p vs %p.\n",
+ dev->name, desc, &hmp->rx_ring[hmp->cur_rx % RX_RING_SIZE]);
+ printk(KERN_WARNING "%s: Oversized Ethernet frame -- next status"
+ " %x last status %x.\n", dev->name,
+ hmp->rx_ring[(hmp->cur_rx+1) % RX_RING_SIZE].status_n_length,
+ hmp->rx_ring[(hmp->cur_rx-1) % RX_RING_SIZE].status_n_length);
+ hmp->stats.rx_length_errors++;
+ } /* else Omit for prototype errata??? */
+ if (frame_status & 0x00380000) {
+ /* There was a error. */
+ if (hmp->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG " hamachi_rx() Rx error was %8.8x.\n",
+ frame_status);
+ hmp->stats.rx_errors++;
+ if (frame_status & 0x00600000) hmp->stats.rx_length_errors++;
+ if (frame_status & 0x00080000) hmp->stats.rx_frame_errors++;
+ if (frame_status & 0x00100000) hmp->stats.rx_crc_errors++;
+ if (frame_status < 0) hmp->stats.rx_dropped++;
+ } else {
+ struct sk_buff *skb;
+ u16 pkt_len = (frame_status & 0x07ff) - 4; /* Omit CRC */
+
+#if ! defined(final_version) && 0
+ if (hmp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " hamachi_rx() normal Rx pkt length %d"
+ " of %d, bogus_cnt %d.\n",
+ pkt_len, data_size, boguscnt);
+ if (hmp->msg_level & NETIF_MSG_PKTDATA)
+ printk(KERN_DEBUG"%s: rx status %8.8x %8.8x %8.8x %8.8x %8.8x.\n",
+ dev->name,
+ *(s32*)&(buf_addr[data_size - 20]),
+ *(s32*)&(buf_addr[data_size - 16]),
+ *(s32*)&(buf_addr[data_size - 12]),
+ *(s32*)&(buf_addr[data_size - 8]),
+ *(s32*)&(buf_addr[data_size - 4]));
+#endif
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ eth_copy_and_sum(skb, hmp->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+ } else {
+ char *temp = skb_put(skb = hmp->rx_skbuff[entry], pkt_len);
+ hmp->rx_skbuff[entry] = NULL;
+#if ! defined(final_version)
+ if (bus_to_virt(desc->addr) != temp)
+ printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
+ "do not match in hamachi_rx: %p vs. %p / %p.\n",
+ dev->name, bus_to_virt(desc->addr),
+ skb->head, temp);
+#endif
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ hmp->stats.rx_packets++;
+ }
+ entry = (++hmp->cur_rx) % RX_RING_SIZE;
+ hmp->rx_head_desc = &hmp->rx_ring[entry];
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; hmp->cur_rx - hmp->dirty_rx > 0; hmp->dirty_rx++) {
+ struct sk_buff *skb;
+ entry = hmp->dirty_rx % RX_RING_SIZE;
+ if (hmp->rx_skbuff[entry] == NULL) {
+ skb = dev_alloc_skb(hmp->rx_buf_sz);
+ hmp->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ hmp->rx_ring[entry].addr = virt_to_desc(skb->tail);
+ }
+ if (entry >= RX_RING_SIZE-1) /* Wrap ring */
+ hmp->rx_ring[entry].status_n_length =
+ cpu_to_le32(DescOwn|DescEndPacket|DescEndRing|DescIntr | hmp->rx_buf_sz);
+ else
+ hmp->rx_ring[entry].status_n_length =
+ cpu_to_le32(DescOwn|DescEndPacket|DescIntr | hmp->rx_buf_sz);
+ }
+
+ /* Restart Rx engine if stopped. */
+ writew(1, dev->base_addr + RxCmd);
+ return 0;
+}
+
+/* This is more properly named "uncommon interrupt events", as it covers more
+ than just errors. */
+static void hamachi_error(struct net_device *dev, int intr_status)
+{
+ long ioaddr = dev->base_addr;
+ struct hamachi_private *hmp = (struct hamachi_private *)dev->priv;
+
+ if (intr_status & (LinkChange|NegotiationChange)) {
+ if (hmp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: Link changed: AutoNegotiation Ctrl"
+ " %4.4x, Status %4.4x %4.4x Intr status %4.4x.\n",
+ dev->name, (int)readw(ioaddr + 0x0E0),
+ (int)readw(ioaddr + 0x0E2),
+ (int)readw(ioaddr + ANLinkPartnerAbility),
+ (int)readl(ioaddr + IntrStatus));
+ if (readw(ioaddr + ANStatus) & 0x20) {
+ writeb(0x01, ioaddr + LEDCtrl);
+ netif_link_up(dev);
+ } else {
+ writeb(0x03, ioaddr + LEDCtrl);
+ netif_link_down(dev);
+ }
+ }
+ if (intr_status & StatsMax) {
+ hamachi_get_stats(dev);
+ /* Read the overflow bits to clear. */
+ readl(ioaddr + 0x36C);
+ readl(ioaddr + 0x3F0);
+ }
+ if ((intr_status & ~(LinkChange|StatsMax|NegotiationChange))
+ && (hmp->msg_level & NETIF_MSG_DRV))
+ printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
+ dev->name, intr_status);
+ /* Hmmmmm, it's not clear how to recover from PCI faults. */
+ if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
+ hmp->stats.tx_fifo_errors++;
+ if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
+ hmp->stats.rx_fifo_errors++;
+}
+
+static int hamachi_close(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct hamachi_private *hmp = (struct hamachi_private *)dev->priv;
+ int i;
+
+ netif_stop_tx_queue(dev);
+
+ if (hmp->msg_level & NETIF_MSG_IFDOWN) {
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x "
+ "Rx %4.4x Int %2.2x.\n",
+ dev->name, (int)readw(ioaddr + TxStatus),
+ (int)readw(ioaddr + RxStatus), (int)readl(ioaddr + IntrStatus));
+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
+ dev->name, hmp->cur_tx, hmp->dirty_tx, hmp->cur_rx,
+ hmp->dirty_rx);
+ }
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ writel(0x0000, ioaddr + InterruptEnable);
+
+ /* Stop the chip's Tx and Rx processes. */
+ writel(2, ioaddr + RxCmd);
+ writew(2, ioaddr + TxCmd);
+
+ del_timer(&hmp->timer);
+
+#ifdef __i386__
+ if (hmp->msg_level & NETIF_MSG_IFDOWN) {
+ printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
+ (int)virt_to_bus(hmp->tx_ring));
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" %c #%d desc. %8.8x %8.8x.\n",
+ readl(ioaddr + TxCurPtr) == (long)&hmp->tx_ring[i] ? '>' : ' ',
+ i, hmp->tx_ring[i].status_n_length, hmp->tx_ring[i].addr);
+ printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
+ (int)virt_to_bus(hmp->rx_ring));
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x\n",
+ readl(ioaddr + RxCurPtr) == (long)&hmp->rx_ring[i] ? '>' : ' ',
+ i, hmp->rx_ring[i].status_n_length, hmp->rx_ring[i].addr);
+ if (*(u8*)hmp->rx_ring[i].addr != 0x69) {
+ int j;
+ for (j = 0; j < 0x50; j++)
+ printk(" %4.4x", ((u16*)hmp->rx_ring[i].addr)[j]);
+ printk("\n");
+ }
+ }
+ }
+#endif /* __i386__ debugging only */
+
+ free_irq(dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ hmp->rx_ring[i].status_n_length = 0;
+ hmp->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
+ if (hmp->rx_skbuff[i]) {
+#if LINUX_VERSION_CODE < 0x20100
+ hmp->rx_skbuff[i]->free = 1;
+#endif
+ dev_free_skb(hmp->rx_skbuff[i]);
+ }
+ hmp->rx_skbuff[i] = 0;
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (hmp->tx_skbuff[i])
+ dev_free_skb(hmp->tx_skbuff[i]);
+ hmp->tx_skbuff[i] = 0;
+ }
+
+ writeb(0x00, ioaddr + LEDCtrl);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static struct net_device_stats *hamachi_get_stats(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct hamachi_private *hmp = (struct hamachi_private *)dev->priv;
+
+ /* We should lock this segment of code for SMP eventually, although
+ the vulnerability window is very small and statistics are
+ non-critical. */
+#if LINUX_VERSION_CODE >= 0x20119
+ hmp->stats.rx_bytes += readl(ioaddr + 0x330); /* Total Uni+Brd+Multi */
+ hmp->stats.tx_bytes += readl(ioaddr + 0x3B0); /* Total Uni+Brd+Multi */
+#endif
+ hmp->stats.multicast += readl(ioaddr + 0x320); /* Multicast Rx */
+
+ hmp->stats.rx_length_errors += readl(ioaddr + 0x368); /* Over+Undersized */
+ hmp->stats.rx_over_errors += readl(ioaddr + 0x35C); /* Jabber */
+ hmp->stats.rx_crc_errors += readl(ioaddr + 0x360);
+ hmp->stats.rx_frame_errors += readl(ioaddr + 0x364); /* Symbol Errs */
+ hmp->stats.rx_missed_errors += readl(ioaddr + 0x36C); /* Dropped */
+
+ return &hmp->stats;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct hamachi_private *np = (void *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int new_rx_mode;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+ new_rx_mode = 0x000F;
+ } else if (dev->mc_count > np->multicast_filter_limit ||
+ (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ new_rx_mode = 0x000B;
+ } else if (dev->mc_count > 0) { /* Must use the CAM filter. */
+ struct dev_mc_list *mclist;
+ int i;
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ writel(*(u32*)(mclist->dmi_addr), ioaddr + 0x100 + i*8);
+ writel(0x20000 | (*(u16*)&mclist->dmi_addr[4]),
+ ioaddr + 0x104 + i*8);
+ }
+ /* Clear remaining entries. */
+ for (; i < 64; i++)
+ writel(0, ioaddr + 0x104 + i*8);
+ new_rx_mode = 0x0003;
+ } else { /* Normal, unicast/broadcast-only mode. */
+ new_rx_mode = 0x0001;
+ }
+ if (np->rx_mode != new_rx_mode) {
+ np->rx_mode = new_rx_mode;
+ writew(new_rx_mode, ioaddr + AddrMode);
+ }
+}
+
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct hamachi_private *np = (void *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u16 *data = (u16 *)&rq->ifr_data;
+ u32 *data32 = (void *)&rq->ifr_data;
+
+ switch(cmd) {
+ case 0x8947: case 0x89F0:
+ /* SIOCGMIIPHY: Get the address of the PHY in use. */
+ data[0] = np->phys[0] & 0x1f;
+ /* Fall Through */
+ case 0x8948: case 0x89F1:
+ /* SIOCGMIIREG: Read the specified MII register. */
+ data[3] = mdio_read(ioaddr, data[0] & 0x1f, data[1] & 0x1f);
+ return 0;
+ case 0x8949: case 0x89F2:
+ /* SIOCSMIIREG: Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ /* We are always full duplex. Skip recording the advertised value. */
+ mdio_write(ioaddr, data[0] & 0x1f, data[1] & 0x1f, data[2]);
+ return 0;
+ case SIOCGPARAMS:
+ data32[0] = np->msg_level;
+ data32[1] = np->multicast_filter_limit;
+ data32[2] = np->max_interrupt_work;
+ data32[3] = np->rx_copybreak;
+ return 0;
+ case SIOCSPARAMS: {
+ /* Set rx,tx intr params, from Eric Kasten. */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ np->msg_level = data32[0];
+ np->max_interrupt_work = data32[2];
+ writel(data32[1], dev->base_addr + TxIntrCtrl);
+ writel(data32[3], dev->base_addr + RxIntrCtrl);
+ printk(KERN_INFO "%s: Set interrupt mitigate paramters tx %08x, "
+ "rx %08x.\n", dev->name,
+ (int) readl(dev->base_addr + TxIntrCtrl),
+ (int) readl(dev->base_addr + RxIntrCtrl));
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+#ifdef HAVE_CHANGE_MTU
+static int change_mtu(struct net_device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > 1536))
+ return -EINVAL;
+ if (netif_running(dev))
+ return -EBUSY;
+ printk(KERN_NOTICE "%s: Changing MTU to %d.\n", dev->name, new_mtu);
+ dev->mtu = new_mtu;
+ return 0;
+}
+#endif
+
+
+#ifdef MODULE
+int init_module(void)
+{
+ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return pci_drv_register(&hamachi_drv_id, NULL);
+}
+
+void cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+ pci_drv_unregister(&hamachi_drv_id);
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_hamachi_dev) {
+ struct hamachi_private *hmp = (void *)(root_hamachi_dev->priv);
+ unregister_netdev(root_hamachi_dev);
+ iounmap((char *)root_hamachi_dev->base_addr);
+ next_dev = hmp->next_module;
+ if (hmp->priv_addr)
+ kfree(hmp->priv_addr);
+ kfree(root_hamachi_dev);
+ root_hamachi_dev = next_dev;
+ }
+}
+
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "make KERNVER=`uname -r` hamachi.o"
+ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c hamachi.c"
+ * simple-compile-command: "gcc -DMODULE -O6 -c hamachi.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/hp-plus.c b/linux/src/drivers/net/hp-plus.c
new file mode 100644
index 0000000..c2b7116
--- /dev/null
+++ b/linux/src/drivers/net/hp-plus.c
@@ -0,0 +1,483 @@
+/* hp-plus.c: A HP PCLAN/plus ethernet driver for linux. */
+/*
+ Written 1994 by Donald Becker.
+
+ This driver is for the Hewlett Packard PC LAN (27***) plus ethercards.
+ These cards are sold under several model numbers, usually 2724*.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ As is often the case, a great deal of credit is owed to Russ Nelson.
+ The Crynwr packet driver was my primary source of HP-specific
+ programming information.
+*/
+
+static const char *version =
+"hp-plus.c:v1.10 9/24/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/module.h>
+
+#include <linux/string.h> /* Important -- this inlines word moves. */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+
+
+#include "8390.h"
+
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int hpplus_portlist[] =
+{0x200, 0x240, 0x280, 0x2C0, 0x300, 0x320, 0x340, 0};
+
+/*
+ The HP EtherTwist chip implementation is a fairly routine DP8390
+ implementation. It allows both shared memory and programmed-I/O buffer
+ access, using a custom interface for both. The programmed-I/O mode is
+ entirely implemented in the HP EtherTwist chip, bypassing the problem
+ ridden built-in 8390 facilities used on NE2000 designs. The shared
+ memory mode is likewise special, with an offset register used to make
+ packets appear at the shared memory base. Both modes use a base and bounds
+ page register to hide the Rx ring buffer wrap -- a packet that spans the
+ end of physical buffer memory appears continuous to the driver. (c.f. the
+ 3c503 and Cabletron E2100)
+
+ A special note: the internal buffer of the board is only 8 bits wide.
+ This lays several nasty traps for the unaware:
+ - the 8390 must be programmed for byte-wide operations
+ - all I/O and memory operations must work on whole words (the access
+ latches are serially preloaded and have no byte-swapping ability).
+
+ This board is laid out in I/O space much like the earlier HP boards:
+ the first 16 locations are for the board registers, and the second 16 are
+ for the 8390. The board is easy to identify, with both a dedicated 16 bit
+ ID register and a constant 0x530* value in the upper bits of the paging
+ register.
+*/
+
+#define HP_ID 0x00 /* ID register, always 0x4850. */
+#define HP_PAGING 0x02 /* Registers visible @ 8-f, see PageName. */
+#define HPP_OPTION 0x04 /* Bitmapped options, see HP_Option. */
+#define HPP_OUT_ADDR 0x08 /* I/O output location in Perf_Page. */
+#define HPP_IN_ADDR 0x0A /* I/O input location in Perf_Page. */
+#define HP_DATAPORT 0x0c /* I/O data transfer in Perf_Page. */
+#define NIC_OFFSET 0x10 /* Offset to the 8390 registers. */
+#define HP_IO_EXTENT 32
+
+#define HP_START_PG 0x00 /* First page of TX buffer */
+#define HP_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+/* The register set selected in HP_PAGING. */
+enum PageName {
+ Perf_Page = 0, /* Normal operation. */
+ MAC_Page = 1, /* The ethernet address (+checksum). */
+ HW_Page = 2, /* EEPROM-loaded hardware parameters. */
+ LAN_Page = 4, /* Transceiver selection, testing, etc. */
+ ID_Page = 6 };
+
+/* The bit definitions for the HPP_OPTION register. */
+enum HP_Option {
+ NICReset = 1, ChipReset = 2, /* Active low, really UNreset. */
+ EnableIRQ = 4, FakeIntr = 8, BootROMEnb = 0x10, IOEnb = 0x20,
+ MemEnable = 0x40, ZeroWait = 0x80, MemDisable = 0x1000, };
+
+int hp_plus_probe(struct device *dev);
+int hpp_probe1(struct device *dev, int ioaddr);
+
+static void hpp_reset_8390(struct device *dev);
+static int hpp_open(struct device *dev);
+static int hpp_close(struct device *dev);
+static void hpp_mem_block_input(struct device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void hpp_mem_block_output(struct device *dev, int count,
+ const unsigned char *buf, const int start_page);
+static void hpp_mem_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void hpp_io_block_input(struct device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void hpp_io_block_output(struct device *dev, int count,
+ const unsigned char *buf, const int start_page);
+static void hpp_io_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+
+
+/* Probe a list of addresses for an HP LAN+ adaptor.
+ This routine is almost boilerplate. */
+#ifdef HAVE_DEVLIST
+/* Support for a alternate probe manager, which will eliminate the
+ boilerplate below. */
+struct netdev_entry hpplus_drv =
+{"hpplus", hpp_probe1, HP_IO_EXTENT, hpplus_portlist};
+#else
+
+int hp_plus_probe(struct device *dev)
+{
+ int i;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return hpp_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (i = 0; hpplus_portlist[i]; i++) {
+ int ioaddr = hpplus_portlist[i];
+ if (check_region(ioaddr, HP_IO_EXTENT))
+ continue;
+ if (hpp_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif
+
+/* Do the interesting part of the probe at a single address. */
+int hpp_probe1(struct device *dev, int ioaddr)
+{
+ int i;
+ unsigned char checksum = 0;
+ const char *name = "HP-PC-LAN+";
+ int mem_start;
+ static unsigned version_printed = 0;
+
+ /* Check for the HP+ signature, 50 48 0x 53. */
+ if (inw(ioaddr + HP_ID) != 0x4850
+ || (inw(ioaddr + HP_PAGING) & 0xfff0) != 0x5300)
+ return ENODEV;
+
+ /* We should have a "dev" from Space.c or the static module table. */
+ if (dev == NULL) {
+ printk("hp-plus.c: Passed a NULL device.\n");
+ dev = init_etherdev(0, 0);
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk("%s", version);
+
+ printk("%s: %s at %#3x,", dev->name, name, ioaddr);
+
+ /* Retrieve and checksum the station address. */
+ outw(MAC_Page, ioaddr + HP_PAGING);
+
+ for(i = 0; i < ETHER_ADDR_LEN; i++) {
+ unsigned char inval = inb(ioaddr + 8 + i);
+ dev->dev_addr[i] = inval;
+ checksum += inval;
+ printk(" %2.2x", inval);
+ }
+ checksum += inb(ioaddr + 14);
+
+ if (checksum != 0xff) {
+ printk(" bad checksum %2.2x.\n", checksum);
+ return ENODEV;
+ } else {
+ /* Point at the Software Configuration Flags. */
+ outw(ID_Page, ioaddr + HP_PAGING);
+ printk(" ID %4.4x", inw(ioaddr + 12));
+ }
+
+ /* Allocate dev->priv and fill in 8390 specific dev fields. */
+ if (ethdev_init(dev)) {
+ printk ("hp-plus.c: unable to allocate memory for dev->priv.\n");
+ return -ENOMEM;
+ }
+
+ /* Grab the region so we can find another board if something fails. */
+ request_region(ioaddr, HP_IO_EXTENT,"hp-plus");
+
+ /* Read the IRQ line. */
+ outw(HW_Page, ioaddr + HP_PAGING);
+ {
+ int irq = inb(ioaddr + 13) & 0x0f;
+ int option = inw(ioaddr + HPP_OPTION);
+
+ dev->irq = irq;
+ if (option & MemEnable) {
+ mem_start = inw(ioaddr + 9) << 8;
+ printk(", IRQ %d, memory address %#x.\n", irq, mem_start);
+ } else {
+ mem_start = 0;
+ printk(", IRQ %d, programmed-I/O mode.\n", irq);
+ }
+ }
+
+ /* Set the wrap registers for string I/O reads. */
+ outw((HP_START_PG + TX_2X_PAGES) | ((HP_STOP_PG - 1) << 8), ioaddr + 14);
+
+ /* Set the base address to point to the NIC, not the "real" base! */
+ dev->base_addr = ioaddr + NIC_OFFSET;
+
+ dev->open = &hpp_open;
+ dev->stop = &hpp_close;
+
+ ei_status.name = name;
+ ei_status.word16 = 0; /* Agggghhhhh! Debug time: 2 days! */
+ ei_status.tx_start_page = HP_START_PG;
+ ei_status.rx_start_page = HP_START_PG + TX_2X_PAGES;
+ ei_status.stop_page = HP_STOP_PG;
+
+ ei_status.reset_8390 = &hpp_reset_8390;
+ ei_status.block_input = &hpp_io_block_input;
+ ei_status.block_output = &hpp_io_block_output;
+ ei_status.get_8390_hdr = &hpp_io_get_8390_hdr;
+
+ /* Check if the memory_enable flag is set in the option register. */
+ if (mem_start) {
+ ei_status.block_input = &hpp_mem_block_input;
+ ei_status.block_output = &hpp_mem_block_output;
+ ei_status.get_8390_hdr = &hpp_mem_get_8390_hdr;
+ dev->mem_start = mem_start;
+ dev->rmem_start = dev->mem_start + TX_2X_PAGES*256;
+ dev->mem_end = dev->rmem_end
+ = dev->mem_start + (HP_STOP_PG - HP_START_PG)*256;
+ }
+
+ outw(Perf_Page, ioaddr + HP_PAGING);
+ NS8390_init(dev, 0);
+ /* Leave the 8390 and HP chip reset. */
+ outw(inw(ioaddr + HPP_OPTION) & ~EnableIRQ, ioaddr + HPP_OPTION);
+
+ return 0;
+}
+
+static int
+hpp_open(struct device *dev)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ int option_reg;
+
+ if (request_irq(dev->irq, &ei_interrupt, 0, "hp-plus", NULL)) {
+ return -EAGAIN;
+ }
+
+ /* Reset the 8390 and HP chip. */
+ option_reg = inw(ioaddr + HPP_OPTION);
+ outw(option_reg & ~(NICReset + ChipReset), ioaddr + HPP_OPTION);
+ SLOW_DOWN_IO; SLOW_DOWN_IO;
+ /* Unreset the board and enable interrupts. */
+ outw(option_reg | (EnableIRQ + NICReset + ChipReset), ioaddr + HPP_OPTION);
+
+ /* Set the wrap registers for programmed-I/O operation. */
+ outw(HW_Page, ioaddr + HP_PAGING);
+ outw((HP_START_PG + TX_2X_PAGES) | ((HP_STOP_PG - 1) << 8), ioaddr + 14);
+
+ /* Select the operational page. */
+ outw(Perf_Page, ioaddr + HP_PAGING);
+
+ ei_open(dev);
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static int
+hpp_close(struct device *dev)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ int option_reg = inw(ioaddr + HPP_OPTION);
+
+ free_irq(dev->irq, NULL);
+ irq2dev_map[dev->irq] = NULL;
+ ei_close(dev);
+ outw((option_reg & ~EnableIRQ) | MemDisable | NICReset | ChipReset,
+ ioaddr + HPP_OPTION);
+
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+static void
+hpp_reset_8390(struct device *dev)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ int option_reg = inw(ioaddr + HPP_OPTION);
+
+ if (ei_debug > 1) printk("resetting the 8390 time=%ld...", jiffies);
+
+ outw(option_reg & ~(NICReset + ChipReset), ioaddr + HPP_OPTION);
+ /* Pause a few cycles for the hardware reset to take place. */
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+ ei_status.txing = 0;
+ outw(option_reg | (EnableIRQ + NICReset + ChipReset), ioaddr + HPP_OPTION);
+
+ SLOW_DOWN_IO; SLOW_DOWN_IO;
+
+
+ if ((inb_p(ioaddr+NIC_OFFSET+EN0_ISR) & ENISR_RESET) == 0)
+ printk("%s: hp_reset_8390() did not complete.\n", dev->name);
+
+ if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies);
+ return;
+}
+
+/* The programmed-I/O version of reading the 4 byte 8390 specific header.
+ Note that transfer with the EtherTwist+ must be on word boundaries. */
+
+static void
+hpp_io_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+
+ outw((ring_page<<8), ioaddr + HPP_IN_ADDR);
+ insw(ioaddr + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
+}
+
+/* Block input and output, similar to the Crynwr packet driver. */
+
+static void
+hpp_io_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ char *buf = skb->data;
+
+ outw(ring_offset, ioaddr + HPP_IN_ADDR);
+ insw(ioaddr + HP_DATAPORT, buf, count>>1);
+ if (count & 0x01)
+ buf[count-1] = inw(ioaddr + HP_DATAPORT);
+}
+
+/* The corresponding shared memory versions of the above 2 functions. */
+
+static void
+hpp_mem_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ int option_reg = inw(ioaddr + HPP_OPTION);
+
+ outw((ring_page<<8), ioaddr + HPP_IN_ADDR);
+ outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION);
+ memcpy_fromio(hdr, dev->mem_start, sizeof(struct e8390_pkt_hdr));
+ outw(option_reg, ioaddr + HPP_OPTION);
+ hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */
+}
+
+static void
+hpp_mem_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ int option_reg = inw(ioaddr + HPP_OPTION);
+
+ outw(ring_offset, ioaddr + HPP_IN_ADDR);
+
+ outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION);
+
+ /* Caution: this relies on get_8390_hdr() rounding up count!
+ Also note that we *can't* use eth_io_copy_and_sum() because
+ it will not always copy "count" bytes (e.g. padded IP). */
+
+ memcpy_fromio(skb->data, dev->mem_start, count);
+ outw(option_reg, ioaddr + HPP_OPTION);
+}
+
+/* A special note: we *must* always transfer >=16 bit words.
+ It's always safe to round up, so we do. */
+static void
+hpp_io_block_output(struct device *dev, int count,
+ const unsigned char *buf, const int start_page)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ outw(start_page << 8, ioaddr + HPP_OUT_ADDR);
+ outsl(ioaddr + HP_DATAPORT, buf, (count+3)>>2);
+ return;
+}
+
+static void
+hpp_mem_block_output(struct device *dev, int count,
+ const unsigned char *buf, const int start_page)
+{
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ int option_reg = inw(ioaddr + HPP_OPTION);
+
+ outw(start_page << 8, ioaddr + HPP_OUT_ADDR);
+ outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION);
+ memcpy_toio(dev->mem_start, buf, (count + 3) & ~3);
+ outw(option_reg, ioaddr + HPP_OPTION);
+
+ return;
+}
+
+
+#ifdef MODULE
+#define MAX_HPP_CARDS 4 /* Max number of HPP cards per module */
+#define NAMELEN 8 /* # of chars for storing dev->name */
+static char namelist[NAMELEN * MAX_HPP_CARDS] = { 0, };
+static struct device dev_hpp[MAX_HPP_CARDS] = {
+ {
+ NULL, /* assign a chunk of namelist[] below */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, NULL
+ },
+};
+
+static int io[MAX_HPP_CARDS] = { 0, };
+static int irq[MAX_HPP_CARDS] = { 0, };
+
+/* This is set up so that only a single autoprobe takes place per call.
+ISA device autoprobes on a running machine are not recommended. */
+int
+init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_HPP_CARDS; this_dev++) {
+ struct device *dev = &dev_hpp[this_dev];
+ dev->name = namelist+(NAMELEN*this_dev);
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->init = hp_plus_probe;
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only autoprobe 1st one */
+ printk(KERN_NOTICE "hp-plus.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+ if (register_netdev(dev) != 0) {
+ printk(KERN_WARNING "hp-plus.c: No HP-Plus card found (i/o = 0x%x).\n", io[this_dev]);
+ if (found != 0) return 0; /* Got at least one. */
+ return -ENXIO;
+ }
+ found++;
+ }
+
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_HPP_CARDS; this_dev++) {
+ struct device *dev = &dev_hpp[this_dev];
+ if (dev->priv != NULL) {
+ /* NB: hpp_close() handles free_irq + irq2dev map */
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ kfree(dev->priv);
+ dev->priv = NULL;
+ release_region(ioaddr, HP_IO_EXTENT);
+ unregister_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c hp-plus.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * c-indent-level: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/hp.c b/linux/src/drivers/net/hp.c
new file mode 100644
index 0000000..6ddbfd2
--- /dev/null
+++ b/linux/src/drivers/net/hp.c
@@ -0,0 +1,451 @@
+/* hp.c: A HP LAN ethernet driver for linux. */
+/*
+ Written 1993-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This is a driver for the HP PC-LAN adaptors.
+
+ Sources:
+ The Crynwr packet driver.
+*/
+
+static const char *version =
+ "hp.c:v1.10 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+
+#include "8390.h"
+
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int hppclan_portlist[] =
+{ 0x300, 0x320, 0x340, 0x280, 0x2C0, 0x200, 0x240, 0};
+
+#define HP_IO_EXTENT 32
+
+#define HP_DATAPORT 0x0c /* "Remote DMA" data port. */
+#define HP_ID 0x07
+#define HP_CONFIGURE 0x08 /* Configuration register. */
+#define HP_RUN 0x01 /* 1 == Run, 0 == reset. */
+#define HP_IRQ 0x0E /* Mask for software-configured IRQ line. */
+#define HP_DATAON 0x10 /* Turn on dataport */
+#define NIC_OFFSET 0x10 /* Offset the 8390 registers. */
+
+#define HP_START_PG 0x00 /* First page of TX buffer */
+#define HP_8BSTOP_PG 0x80 /* Last page +1 of RX ring */
+#define HP_16BSTOP_PG 0xFF /* Same, for 16 bit cards. */
+
+int hp_probe(struct device *dev);
+int hp_probe1(struct device *dev, int ioaddr);
+
+static int hp_open(struct device *dev);
+static int hp_close(struct device *dev);
+static void hp_reset_8390(struct device *dev);
+static void hp_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void hp_block_input(struct device *dev, int count,
+ struct sk_buff *skb , int ring_offset);
+static void hp_block_output(struct device *dev, int count,
+ const unsigned char *buf, const int start_page);
+
+static void hp_init_card(struct device *dev);
+
+/* The map from IRQ number to HP_CONFIGURE register setting. */
+/* My default is IRQ5 0 1 2 3 4 5 6 7 8 9 10 11 */
+static char irqmap[16] = { 0, 0, 4, 6, 8,10, 0,14, 0, 4, 2,12,0,0,0,0};
+
+
+/* Probe for an HP LAN adaptor.
+ Also initialize the card and fill in STATION_ADDR with the station
+ address. */
+#ifdef HAVE_DEVLIST
+struct netdev_entry netcard_drv =
+{"hp", hp_probe1, HP_IO_EXTENT, hppclan_portlist};
+#else
+
+int hp_probe(struct device *dev)
+{
+ int i;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return hp_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (i = 0; hppclan_portlist[i]; i++) {
+ int ioaddr = hppclan_portlist[i];
+ if (check_region(ioaddr, HP_IO_EXTENT))
+ continue;
+ if (hp_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif
+
+int hp_probe1(struct device *dev, int ioaddr)
+{
+ int i, board_id, wordmode;
+ const char *name;
+ static unsigned version_printed = 0;
+
+ /* Check for the HP physical address, 08 00 09 xx xx xx. */
+ /* This really isn't good enough: we may pick up HP LANCE boards
+ also! Avoid the lance 0x5757 signature. */
+ if (inb(ioaddr) != 0x08
+ || inb(ioaddr+1) != 0x00
+ || inb(ioaddr+2) != 0x09
+ || inb(ioaddr+14) == 0x57)
+ return ENODEV;
+
+ /* Set up the parameters based on the board ID.
+ If you have additional mappings, please mail them to me -djb. */
+ if ((board_id = inb(ioaddr + HP_ID)) & 0x80) {
+ name = "HP27247";
+ wordmode = 1;
+ } else {
+ name = "HP27250";
+ wordmode = 0;
+ }
+
+ /* We should have a "dev" from Space.c or the static module table. */
+ if (dev == NULL) {
+ printk("hp.c: Passed a NULL device.\n");
+ dev = init_etherdev(0, 0);
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk("%s", version);
+
+ printk("%s: %s (ID %02x) at %#3x,", dev->name, name, board_id, ioaddr);
+
+ for(i = 0; i < ETHER_ADDR_LEN; i++)
+ printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i));
+
+ /* Snarf the interrupt now. Someday this could be moved to open(). */
+ if (dev->irq < 2) {
+ int irq_16list[] = { 11, 10, 5, 3, 4, 7, 9, 0};
+ int irq_8list[] = { 7, 5, 3, 4, 9, 0};
+ int *irqp = wordmode ? irq_16list : irq_8list;
+ do {
+ int irq = *irqp;
+ if (request_irq (irq, NULL, 0, "bogus", NULL) != -EBUSY) {
+ autoirq_setup(0);
+ /* Twinkle the interrupt, and check if it's seen. */
+ outb_p(irqmap[irq] | HP_RUN, ioaddr + HP_CONFIGURE);
+ outb_p( 0x00 | HP_RUN, ioaddr + HP_CONFIGURE);
+ if (irq == autoirq_report(0) /* It's a good IRQ line! */
+ && request_irq (irq, &ei_interrupt, 0, "hp", NULL) == 0) {
+ printk(" selecting IRQ %d.\n", irq);
+ dev->irq = *irqp;
+ break;
+ }
+ }
+ } while (*++irqp);
+ if (*irqp == 0) {
+ printk(" no free IRQ lines.\n");
+ return EBUSY;
+ }
+ } else {
+ if (dev->irq == 2)
+ dev->irq = 9;
+ if (request_irq(dev->irq, ei_interrupt, 0, "hp", NULL)) {
+ printk (" unable to get IRQ %d.\n", dev->irq);
+ return EBUSY;
+ }
+ }
+
+ /* Allocate dev->priv and fill in 8390 specific dev fields. */
+ if (ethdev_init(dev)) {
+ printk (" unable to get memory for dev->priv.\n");
+ free_irq(dev->irq, NULL);
+ return -ENOMEM;
+ }
+
+ /* Grab the region so we can find another board if something fails. */
+ request_region(ioaddr, HP_IO_EXTENT,"hp");
+
+ /* Set the base address to point to the NIC, not the "real" base! */
+ dev->base_addr = ioaddr + NIC_OFFSET;
+ dev->open = &hp_open;
+ dev->stop = &hp_close;
+
+ ei_status.name = name;
+ ei_status.word16 = wordmode;
+ ei_status.tx_start_page = HP_START_PG;
+ ei_status.rx_start_page = HP_START_PG + TX_PAGES;
+ ei_status.stop_page = wordmode ? HP_16BSTOP_PG : HP_8BSTOP_PG;
+
+ ei_status.reset_8390 = &hp_reset_8390;
+ ei_status.get_8390_hdr = &hp_get_8390_hdr;
+ ei_status.block_input = &hp_block_input;
+ ei_status.block_output = &hp_block_output;
+ hp_init_card(dev);
+
+ return 0;
+}
+
+static int
+hp_open(struct device *dev)
+{
+ ei_open(dev);
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static int
+hp_close(struct device *dev)
+{
+ ei_close(dev);
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+static void
+hp_reset_8390(struct device *dev)
+{
+ int hp_base = dev->base_addr - NIC_OFFSET;
+ int saved_config = inb_p(hp_base + HP_CONFIGURE);
+
+ if (ei_debug > 1) printk("resetting the 8390 time=%ld...", jiffies);
+ outb_p(0x00, hp_base + HP_CONFIGURE);
+ ei_status.txing = 0;
+ /* Pause just a few cycles for the hardware reset to take place. */
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+
+ outb_p(saved_config, hp_base + HP_CONFIGURE);
+ SLOW_DOWN_IO; SLOW_DOWN_IO;
+
+ if ((inb_p(hp_base+NIC_OFFSET+EN0_ISR) & ENISR_RESET) == 0)
+ printk("%s: hp_reset_8390() did not complete.\n", dev->name);
+
+ if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies);
+ return;
+}
+
+static void
+hp_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ int nic_base = dev->base_addr;
+ int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE);
+
+ outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE);
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base);
+ outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
+ outb_p(0, nic_base + EN0_RCNTHI);
+ outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */
+ outb_p(ring_page, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base);
+
+ if (ei_status.word16)
+ insw(nic_base - NIC_OFFSET + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
+ else
+ insb(nic_base - NIC_OFFSET + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr));
+
+ outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE);
+}
+
+/* Block input and output, similar to the Crynwr packet driver. If you are
+ porting to a new ethercard look at the packet driver source for hints.
+ The HP LAN doesn't use shared memory -- we put the packet
+ out through the "remote DMA" dataport. */
+
+static void
+hp_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ int nic_base = dev->base_addr;
+ int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE);
+ int xfer_count = count;
+ char *buf = skb->data;
+
+ outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE);
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base);
+ outb_p(count & 0xff, nic_base + EN0_RCNTLO);
+ outb_p(count >> 8, nic_base + EN0_RCNTHI);
+ outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
+ outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base);
+ if (ei_status.word16) {
+ insw(nic_base - NIC_OFFSET + HP_DATAPORT,buf,count>>1);
+ if (count & 0x01)
+ buf[count-1] = inb(nic_base - NIC_OFFSET + HP_DATAPORT), xfer_count++;
+ } else {
+ insb(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count);
+ }
+ /* This is for the ALPHA version only, remove for later releases. */
+ if (ei_debug > 0) { /* DMA termination address check... */
+ int high = inb_p(nic_base + EN0_RSARHI);
+ int low = inb_p(nic_base + EN0_RSARLO);
+ int addr = (high << 8) + low;
+ /* Check only the lower 8 bits so we can ignore ring wrap. */
+ if (((ring_offset + xfer_count) & 0xff) != (addr & 0xff))
+ printk("%s: RX transfer address mismatch, %#4.4x vs. %#4.4x (actual).\n",
+ dev->name, ring_offset + xfer_count, addr);
+ }
+ outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE);
+}
+
+static void
+hp_block_output(struct device *dev, int count,
+ const unsigned char *buf, const int start_page)
+{
+ int nic_base = dev->base_addr;
+ int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE);
+
+ outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE);
+ /* Round the count up for word writes. Do we need to do this?
+ What effect will an odd byte count have on the 8390?
+ I should check someday. */
+ if (ei_status.word16 && (count & 0x01))
+ count++;
+ /* We should already be in page 0, but to be safe... */
+ outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base);
+
+#ifdef NE8390_RW_BUGFIX
+ /* Handle the read-before-write bug the same way as the
+ Crynwr packet driver -- the NatSemi method doesn't work. */
+ outb_p(0x42, nic_base + EN0_RCNTLO);
+ outb_p(0, nic_base + EN0_RCNTHI);
+ outb_p(0xff, nic_base + EN0_RSARLO);
+ outb_p(0x00, nic_base + EN0_RSARHI);
+#define NE_CMD 0x00
+ outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+ /* Make certain that the dummy read has occurred. */
+ inb_p(0x61);
+ inb_p(0x61);
+#endif
+
+ outb_p(count & 0xff, nic_base + EN0_RCNTLO);
+ outb_p(count >> 8, nic_base + EN0_RCNTHI);
+ outb_p(0x00, nic_base + EN0_RSARLO);
+ outb_p(start_page, nic_base + EN0_RSARHI);
+
+ outb_p(E8390_RWRITE+E8390_START, nic_base);
+ if (ei_status.word16) {
+ /* Use the 'rep' sequence for 16 bit boards. */
+ outsw(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count>>1);
+ } else {
+ outsb(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count);
+ }
+
+ /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here -- it's broken! */
+
+ /* This is for the ALPHA version only, remove for later releases. */
+ if (ei_debug > 0) { /* DMA termination address check... */
+ int high = inb_p(nic_base + EN0_RSARHI);
+ int low = inb_p(nic_base + EN0_RSARLO);
+ int addr = (high << 8) + low;
+ if ((start_page << 8) + count != addr)
+ printk("%s: TX Transfer address mismatch, %#4.4x vs. %#4.4x.\n",
+ dev->name, (start_page << 8) + count, addr);
+ }
+ outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE);
+ return;
+}
+
+/* This function resets the ethercard if something screws up. */
+static void
+hp_init_card(struct device *dev)
+{
+ int irq = dev->irq;
+ NS8390_init(dev, 0);
+ outb_p(irqmap[irq&0x0f] | HP_RUN,
+ dev->base_addr - NIC_OFFSET + HP_CONFIGURE);
+ return;
+}
+
+#ifdef MODULE
+#define MAX_HP_CARDS 4 /* Max number of HP cards per module */
+#define NAMELEN 8 /* # of chars for storing dev->name */
+static char namelist[NAMELEN * MAX_HP_CARDS] = { 0, };
+static struct device dev_hp[MAX_HP_CARDS] = {
+ {
+ NULL, /* assign a chunk of namelist[] below */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, NULL
+ },
+};
+
+static int io[MAX_HP_CARDS] = { 0, };
+static int irq[MAX_HP_CARDS] = { 0, };
+
+/* This is set up so that only a single autoprobe takes place per call.
+ISA device autoprobes on a running machine are not recommended. */
+int
+init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_HP_CARDS; this_dev++) {
+ struct device *dev = &dev_hp[this_dev];
+ dev->name = namelist+(NAMELEN*this_dev);
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->init = hp_probe;
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only autoprobe 1st one */
+ printk(KERN_NOTICE "hp.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+ if (register_netdev(dev) != 0) {
+ printk(KERN_WARNING "hp.c: No HP card found (i/o = 0x%x).\n", io[this_dev]);
+ if (found != 0) return 0; /* Got at least one. */
+ return -ENXIO;
+ }
+ found++;
+ }
+
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_HP_CARDS; this_dev++) {
+ struct device *dev = &dev_hp[this_dev];
+ if (dev->priv != NULL) {
+ int ioaddr = dev->base_addr - NIC_OFFSET;
+ kfree(dev->priv);
+ dev->priv = NULL;
+ free_irq(dev->irq, NULL);
+ irq2dev_map[dev->irq] = NULL;
+ release_region(ioaddr, HP_IO_EXTENT);
+ unregister_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c hp.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * c-indent-level: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/hp100.c b/linux/src/drivers/net/hp100.c
new file mode 100644
index 0000000..0b86ef4
--- /dev/null
+++ b/linux/src/drivers/net/hp100.c
@@ -0,0 +1,3121 @@
+/*
+** hp100.c
+** HP CASCADE Architecture Driver for 100VG-AnyLan Network Adapters
+**
+** $Id: hp100.c,v 1.1.4.1 2005/06/02 18:52:39 ams Exp $
+**
+** Based on the HP100 driver written by Jaroslav Kysela <perex@jcu.cz>
+** Extended for new busmaster capable chipsets by
+** Siegfried "Frieder" Loeffler (dg1sek) <floeff@mathematik.uni-stuttgart.de>
+**
+** Maintained by: Jaroslav Kysela <perex@jcu.cz>
+**
+** This driver has only been tested with
+** -- HP J2585B 10/100 Mbit/s PCI Busmaster
+** -- HP J2585A 10/100 Mbit/s PCI
+** -- HP J2970 10 Mbit/s PCI Combo 10base-T/BNC
+** -- HP J2973 10 Mbit/s PCI 10base-T
+** -- HP J2573 10/100 ISA
+** -- Compex ReadyLink ENET100-VG4 10/100 Mbit/s PCI / EISA
+** -- Compex FreedomLine 100/VG 10/100 Mbit/s ISA / EISA / PCI
+**
+** but it should also work with the other CASCADE based adapters.
+**
+** TODO:
+** - J2573 seems to hang sometimes when in shared memory mode.
+** - Mode for Priority TX
+** - Check PCI registers, performance might be improved?
+** - To reduce interrupt load in busmaster, one could switch off
+** the interrupts that are used to refill the queues whenever the
+** queues are filled up to more than a certain threshold.
+** - some updates for EISA version of card
+**
+**
+** This source/code is public free; you can distribute it and/or modify
+** it under terms of the GNU General Public License (published by the
+** Free Software Foundation) either version two of this License, or any
+** later version.
+**
+** 1.55 -> 1.56
+** - removed printk in misc. interrupt and update statistics to allow
+** monitoring of card status
+** - timing changes in xmit routines, relogin to 100VG hub added when
+** driver does reset
+** - included fix for Compex FreedomLine PCI adapter
+**
+** 1.54 -> 1.55
+** - fixed bad initialization in init_module
+** - added Compex FreedomLine adapter
+** - some fixes in card initialization
+**
+** 1.53 -> 1.54
+** - added hardware multicast filter support (doesn't work)
+** - little changes in hp100_sense_lan routine
+** - added support for Coax and AUI (J2970)
+** - fix for multiple cards and hp100_mode parameter (insmod)
+** - fix for shared IRQ
+**
+** 1.52 -> 1.53
+** - fixed bug in multicast support
+**
+*/
+
+#define HP100_DEFAULT_PRIORITY_TX 0
+
+#undef HP100_DEBUG
+#undef HP100_DEBUG_B /* Trace */
+#undef HP100_DEBUG_BM /* Debug busmaster code (PDL stuff) */
+
+#undef HP100_DEBUG_TRAINING /* Debug login-to-hub procedure */
+#undef HP100_DEBUG_TX
+#undef HP100_DEBUG_IRQ
+#undef HP100_DEBUG_RX
+
+#undef HP100_MULTICAST_FILTER /* Need to be debugged... */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/bios32.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <linux/types.h>
+#include <linux/config.h> /* for CONFIG_PCI */
+#include <linux/delay.h>
+
+#if LINUX_VERSION_CODE < 0x020100
+#define ioremap vremap
+#define iounmap vfree
+typedef struct enet_statistics hp100_stats_t;
+#else
+#define LINUX_2_1
+typedef struct net_device_stats hp100_stats_t;
+#endif
+
+#ifndef __initfunc
+#define __initfunc(__initarg) __initarg
+#else
+#include <linux/init.h>
+#endif
+
+#include "hp100.h"
+
+/*
+ * defines
+ */
+
+#define HP100_BUS_ISA 0
+#define HP100_BUS_EISA 1
+#define HP100_BUS_PCI 2
+
+#ifndef PCI_DEVICE_ID_HP_J2585B
+#define PCI_DEVICE_ID_HP_J2585B 0x1031
+#endif
+#ifndef PCI_VENDOR_ID_COMPEX
+#define PCI_VENDOR_ID_COMPEX 0x11f6
+#endif
+#ifndef PCI_DEVICE_ID_COMPEX_ENET100VG4
+#define PCI_DEVICE_ID_COMPEX_ENET100VG4 0x0112
+#endif
+#ifndef PCI_VENDOR_ID_COMPEX2
+#define PCI_VENDOR_ID_COMPEX2 0x101a
+#endif
+#ifndef PCI_DEVICE_ID_COMPEX2_100VG
+#define PCI_DEVICE_ID_COMPEX2_100VG 0x0005
+#endif
+
+#define HP100_REGION_SIZE 0x20 /* for ioports */
+
+#define HP100_MAX_PACKET_SIZE (1536+4)
+#define HP100_MIN_PACKET_SIZE 60
+
+#ifndef HP100_DEFAULT_RX_RATIO
+/* default - 75% onboard memory on the card are used for RX packets */
+#define HP100_DEFAULT_RX_RATIO 75
+#endif
+
+#ifndef HP100_DEFAULT_PRIORITY_TX
+/* default - don't enable transmit outgoing packets as priority */
+#define HP100_DEFAULT_PRIORITY_TX 0
+#endif
+
+/*
+ * structures
+ */
+
+struct hp100_eisa_id {
+ u_int id;
+ const char *name;
+ u_char bus;
+};
+
+struct hp100_pci_id {
+ u_short vendor;
+ u_short device;
+};
+
+struct hp100_private {
+ struct hp100_eisa_id *id;
+ u_short chip;
+ u_short soft_model;
+ u_int memory_size;
+ u_int virt_memory_size;
+ u_short rx_ratio; /* 1 - 99 */
+ u_short priority_tx; /* != 0 - priority tx */
+ u_short mode; /* PIO, Shared Mem or Busmaster */
+ u_char bus;
+ u_char pci_bus;
+ u_char pci_device_fn;
+ short mem_mapped; /* memory mapped access */
+ u_int *mem_ptr_virt; /* virtual memory mapped area, maybe NULL */
+ u_int *mem_ptr_phys; /* physical memory mapped area */
+ short lan_type; /* 10Mb/s, 100Mb/s or -1 (error) */
+ int hub_status; /* was login to hub successful? */
+ u_char mac1_mode;
+ u_char mac2_mode;
+ u_char hash_bytes[ 8 ];
+ hp100_stats_t stats;
+
+ /* Rings for busmaster mode: */
+ hp100_ring_t *rxrhead; /* Head (oldest) index into rxring */
+ hp100_ring_t *rxrtail; /* Tail (newest) index into rxring */
+ hp100_ring_t *txrhead; /* Head (oldest) index into txring */
+ hp100_ring_t *txrtail; /* Tail (newest) index into txring */
+
+ hp100_ring_t rxring[ MAX_RX_PDL ];
+ hp100_ring_t txring[ MAX_TX_PDL ];
+
+ u_int *page_vaddr; /* Virtual address of allocated page */
+ u_int *page_vaddr_algn; /* Aligned virtual address of allocated page */
+ int rxrcommit; /* # Rx PDLs commited to adapter */
+ int txrcommit; /* # Tx PDLs commited to adapter */
+};
+
+/*
+ * variables
+ */
+
+static struct hp100_eisa_id hp100_eisa_ids[] = {
+
+ /* 10/100 EISA card with revision A Cascade chip */
+ { 0x80F1F022, "HP J2577 rev A", HP100_BUS_EISA },
+
+ /* 10/100 ISA card with revision A Cascade chip */
+ { 0x50F1F022, "HP J2573 rev A", HP100_BUS_ISA },
+
+ /* 10 only EISA card with Cascade chip */
+ { 0x2019F022, "HP 27248B", HP100_BUS_EISA },
+
+ /* 10/100 EISA card with Cascade chip */
+ { 0x4019F022, "HP J2577", HP100_BUS_EISA },
+
+ /* 10/100 ISA card with Cascade chip */
+ { 0x5019F022, "HP J2573", HP100_BUS_ISA },
+
+ /* 10/100 PCI card - old J2585A */
+ { 0x1030103c, "HP J2585A", HP100_BUS_PCI },
+
+ /* 10/100 PCI card - new J2585B - master capable */
+ { 0x1041103c, "HP J2585B", HP100_BUS_PCI },
+
+ /* 10 Mbit Combo Adapter */
+ { 0x1042103c, "HP J2970", HP100_BUS_PCI },
+
+ /* 10 Mbit 10baseT Adapter */
+ { 0x1040103c, "HP J2973", HP100_BUS_PCI },
+
+ /* 10/100 EISA card from Compex */
+ { 0x0103180e, "ReadyLink ENET100-VG4", HP100_BUS_EISA },
+
+ /* 10/100 EISA card from Compex - FreedomLine (sq5bpf) */
+ /* Note: plhbrod@mbox.vol.cz reported that same ID have ISA */
+ /* version of adapter, too... */
+ { 0x0104180e, "FreedomLine 100/VG", HP100_BUS_EISA },
+
+ /* 10/100 PCI card from Compex - FreedomLine
+ *
+ * I think this card doesn't like aic7178 scsi controller, but
+ * I haven't tested this much. It works fine on diskless machines.
+ * Jacek Lipkowski <sq5bpf@acid.ch.pw.edu.pl>
+ */
+ { 0x021211f6, "FreedomLine 100/VG", HP100_BUS_PCI },
+
+ /* 10/100 PCI card from Compex (J2585A compatible) */
+ { 0x011211f6, "ReadyLink ENET100-VG4", HP100_BUS_PCI }
+
+};
+
+#define HP100_EISA_IDS_SIZE (sizeof(hp100_eisa_ids)/sizeof(struct hp100_eisa_id))
+
+static struct hp100_pci_id hp100_pci_ids[] = {
+ { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A },
+ { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B },
+ { PCI_VENDOR_ID_COMPEX, PCI_DEVICE_ID_COMPEX_ENET100VG4 },
+ { PCI_VENDOR_ID_COMPEX2, PCI_DEVICE_ID_COMPEX2_100VG }
+};
+
+#define HP100_PCI_IDS_SIZE (sizeof(hp100_pci_ids)/sizeof(struct hp100_pci_id))
+
+static int hp100_rx_ratio = HP100_DEFAULT_RX_RATIO;
+static int hp100_priority_tx = HP100_DEFAULT_PRIORITY_TX;
+static int hp100_mode = 1;
+
+#ifdef LINUX_2_1
+MODULE_PARM( hp100_rx_ratio, "1i" );
+MODULE_PARM( hp100_priority_tx, "1i" );
+MODULE_PARM( hp100_mode, "1i" );
+#endif
+
+/*
+ * prototypes
+ */
+
+static int hp100_probe1( struct device *dev, int ioaddr, u_char bus, u_char pci_bus, u_char pci_device_fn );
+static int hp100_open( struct device *dev );
+static int hp100_close( struct device *dev );
+static int hp100_start_xmit( struct sk_buff *skb, struct device *dev );
+static int hp100_start_xmit_bm (struct sk_buff *skb, struct device *dev );
+static void hp100_rx( struct device *dev );
+static hp100_stats_t *hp100_get_stats( struct device *dev );
+static void hp100_misc_interrupt( struct device *dev );
+static void hp100_update_stats( struct device *dev );
+static void hp100_clear_stats( int ioaddr );
+static void hp100_set_multicast_list( struct device *dev);
+static void hp100_interrupt( int irq, void *dev_id, struct pt_regs *regs );
+static void hp100_start_interface( struct device *dev );
+static void hp100_stop_interface( struct device *dev );
+static void hp100_load_eeprom( struct device *dev, u_short ioaddr );
+static int hp100_sense_lan( struct device *dev );
+static int hp100_login_to_vg_hub( struct device *dev, u_short force_relogin );
+static int hp100_down_vg_link( struct device *dev );
+static void hp100_cascade_reset( struct device *dev, u_short enable );
+static void hp100_BM_shutdown( struct device *dev );
+static void hp100_mmuinit( struct device *dev );
+static void hp100_init_pdls( struct device *dev );
+static int hp100_init_rxpdl( struct device *dev, register hp100_ring_t *ringptr, register u_int *pdlptr);
+static int hp100_init_txpdl( struct device *dev, register hp100_ring_t *ringptr, register u_int *pdlptr);
+static void hp100_rxfill( struct device *dev );
+static void hp100_hwinit( struct device *dev );
+static void hp100_clean_txring( struct device *dev );
+#ifdef HP100_DEBUG
+static void hp100_RegisterDump( struct device *dev );
+#endif
+
+/* TODO: This function should not really be needed in a good design... */
+static void wait( void )
+{
+ udelay( 1000 );
+}
+
+/*
+ * probe functions
+ * These functions should - if possible - avoid doing write operations
+ * since this could cause problems when the card is not installed.
+ */
+
+__initfunc(int hp100_probe( struct device *dev ))
+{
+ int base_addr = dev ? dev -> base_addr : 0;
+ int ioaddr = 0;
+#ifdef CONFIG_PCI
+ int pci_start_index = 0;
+#endif
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4200, TRACE );
+ printk( "hp100: %s: probe\n", dev->name );
+#endif
+
+ if ( base_addr > 0xff ) /* Check a single specified location. */
+ {
+ if ( check_region( base_addr, HP100_REGION_SIZE ) ) return -EINVAL;
+ if ( base_addr < 0x400 )
+ return hp100_probe1( dev, base_addr, HP100_BUS_ISA, 0, 0 );
+ if ( EISA_bus && base_addr >= 0x1c38 && ( (base_addr - 0x1c38) & 0x3ff ) == 0 )
+ return hp100_probe1( dev, base_addr, HP100_BUS_EISA, 0, 0 );
+#ifdef CONFIG_PCI
+ printk( "hp100: %s: You may specify card # in i/o address parameter for PCI bus...", dev->name );
+ return hp100_probe1( dev, base_addr, HP100_BUS_PCI, 0, 0 );
+#else
+ return -ENODEV;
+#endif
+ }
+ else
+#ifdef CONFIG_PCI
+ if ( base_addr > 0 && base_addr < 8 + 1 )
+ pci_start_index = 0x100 | ( base_addr - 1 );
+ else
+#endif
+ if ( base_addr != 0 ) return -ENXIO;
+
+ /* at first - scan PCI bus(es) */
+
+#ifdef CONFIG_PCI
+ if ( pcibios_present() )
+ {
+ int pci_index;
+
+#ifdef HP100_DEBUG_PCI
+ printk( "hp100: %s: PCI BIOS is present, checking for devices..\n", dev->name );
+#endif
+ for ( pci_index = pci_start_index & 7; pci_index < 8; pci_index++ )
+ {
+ u_char pci_bus, pci_device_fn;
+ u_short pci_command;
+ int pci_id_index;
+
+ for ( pci_id_index = 0; pci_id_index < HP100_PCI_IDS_SIZE; pci_id_index++ )
+ if ( pcibios_find_device( hp100_pci_ids[ pci_id_index ].vendor,
+ hp100_pci_ids[ pci_id_index ].device,
+ pci_index, &pci_bus,
+ &pci_device_fn ) == 0 ) goto __pci_found;
+ break;
+
+ __pci_found:
+ pcibios_read_config_dword( pci_bus, pci_device_fn,
+ PCI_BASE_ADDRESS_0, &ioaddr );
+
+ ioaddr &= ~3; /* remove I/O space marker in bit 0. */
+
+ if ( check_region( ioaddr, HP100_REGION_SIZE ) ) continue;
+
+ pcibios_read_config_word( pci_bus, pci_device_fn,
+ PCI_COMMAND, &pci_command );
+ if ( !( pci_command & PCI_COMMAND_IO ) )
+ {
+#ifdef HP100_DEBUG
+ printk( "hp100: %s: PCI I/O Bit has not been set. Setting...\n", dev->name );
+#endif
+ pci_command |= PCI_COMMAND_IO;
+ pcibios_write_config_word( pci_bus, pci_device_fn,
+ PCI_COMMAND, pci_command );
+ }
+ if ( !( pci_command & PCI_COMMAND_MASTER ) )
+ {
+#ifdef HP100_DEBUG
+ printk( "hp100: %s: PCI Master Bit has not been set. Setting...\n", dev->name );
+#endif
+ pci_command |= PCI_COMMAND_MASTER;
+ pcibios_write_config_word( pci_bus, pci_device_fn,
+ PCI_COMMAND, pci_command );
+ }
+#ifdef HP100_DEBUG
+ printk( "hp100: %s: PCI adapter found at 0x%x\n", dev->name, ioaddr );
+#endif
+ if ( hp100_probe1( dev, ioaddr, HP100_BUS_PCI, pci_bus, pci_device_fn ) == 0 )
+ return 0;
+ }
+ }
+ if ( pci_start_index > 0 ) return -ENODEV;
+#endif /* CONFIG_PCI */
+
+ /* Second: Probe all EISA possible port regions (if EISA bus present) */
+ for ( ioaddr = 0x1c38; EISA_bus && ioaddr < 0x10000; ioaddr += 0x400 )
+ {
+ if ( check_region( ioaddr, HP100_REGION_SIZE ) ) continue;
+ if ( hp100_probe1( dev, ioaddr, HP100_BUS_EISA, 0, 0 ) == 0 ) return 0;
+ }
+
+ /* Third Probe all ISA possible port regions */
+ for ( ioaddr = 0x100; ioaddr < 0x400; ioaddr += 0x20 )
+ {
+ if ( check_region( ioaddr, HP100_REGION_SIZE ) ) continue;
+ if ( hp100_probe1( dev, ioaddr, HP100_BUS_ISA, 0, 0 ) == 0 ) return 0;
+ }
+
+ return -ENODEV;
+}
+
+
+__initfunc(static int hp100_probe1( struct device *dev, int ioaddr, u_char bus, u_char pci_bus, u_char pci_device_fn ))
+{
+ int i;
+
+ u_char uc, uc_1;
+ u_int eisa_id;
+ u_int chip;
+ u_int memory_size = 0, virt_memory_size = 0;
+ u_short local_mode, lsw;
+ short mem_mapped;
+ u_int *mem_ptr_phys, *mem_ptr_virt;
+ struct hp100_private *lp;
+ struct hp100_eisa_id *eid;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4201, TRACE );
+ printk("hp100: %s: probe1\n",dev->name);
+#endif
+
+ if ( dev == NULL )
+ {
+#ifdef HP100_DEBUG
+ printk( "hp100_probe1: %s: dev == NULL ?\n", dev->name );
+#endif
+ return EIO;
+ }
+
+ if ( hp100_inw( HW_ID ) != HP100_HW_ID_CASCADE )
+ {
+ return -ENODEV;
+ }
+ else
+ {
+ chip = hp100_inw( PAGING ) & HP100_CHIPID_MASK;
+#ifdef HP100_DEBUG
+ if ( chip == HP100_CHIPID_SHASTA )
+ printk("hp100: %s: Shasta Chip detected. (This is a pre 802.12 chip)\n", dev->name);
+ else if ( chip == HP100_CHIPID_RAINIER )
+ printk("hp100: %s: Rainier Chip detected. (This is a pre 802.12 chip)\n", dev->name);
+ else if ( chip == HP100_CHIPID_LASSEN )
+ printk("hp100: %s: Lassen Chip detected.\n", dev->name);
+ else
+ printk("hp100: %s: Warning: Unknown CASCADE chip (id=0x%.4x).\n",dev->name,chip);
+#endif
+ }
+
+ dev->base_addr = ioaddr;
+
+ hp100_page( ID_MAC_ADDR );
+ for ( i = uc = eisa_id = 0; i < 4; i++ )
+ {
+ eisa_id >>= 8;
+ uc_1 = hp100_inb( BOARD_ID + i );
+ eisa_id |= uc_1 << 24;
+ uc += uc_1;
+ }
+ uc += hp100_inb( BOARD_ID + 4 );
+
+ if ( uc != 0xff ) /* bad checksum? */
+ {
+ printk("hp100_probe: %s: bad EISA ID checksum at base port 0x%x\n", dev->name, ioaddr );
+ return -ENODEV;
+ }
+
+ for ( i=0; i < HP100_EISA_IDS_SIZE; i++)
+ if ( hp100_eisa_ids[ i ].id == eisa_id )
+ break;
+ if ( i >= HP100_EISA_IDS_SIZE ) {
+ for ( i = 0; i < HP100_EISA_IDS_SIZE; i++)
+ if ( ( hp100_eisa_ids[ i ].id & 0xf0ffffff ) == ( eisa_id & 0xf0ffffff ) )
+ break;
+ if ( i >= HP100_EISA_IDS_SIZE ) {
+ printk( "hp100_probe: %s: card at port 0x%x isn't known (id = 0x%x)\n", dev -> name, ioaddr, eisa_id );
+ return -ENODEV;
+ }
+ }
+ eid = &hp100_eisa_ids[ i ];
+ if ( ( eid->id & 0x0f000000 ) < ( eisa_id & 0x0f000000 ) )
+ {
+ printk( "hp100_probe: %s: newer version of card %s at port 0x%x - unsupported\n",
+ dev->name, eid->name, ioaddr );
+ return -ENODEV;
+ }
+
+ for ( i = uc = 0; i < 7; i++ )
+ uc += hp100_inb( LAN_ADDR + i );
+ if ( uc != 0xff )
+ {
+ printk("hp100_probe: %s: bad lan address checksum (card %s at port 0x%x)\n",
+ dev->name, eid->name, ioaddr );
+ return -EIO;
+ }
+
+ /* Make sure, that all registers are correctly updated... */
+
+ hp100_load_eeprom( dev, ioaddr );
+ wait();
+
+ /*
+ * Determine driver operation mode
+ *
+ * Use the variable "hp100_mode" upon insmod or as kernel parameter to
+ * force driver modes:
+ * hp100_mode=1 -> default, use busmaster mode if configured.
+ * hp100_mode=2 -> enable shared memory mode
+ * hp100_mode=3 -> force use of i/o mapped mode.
+ * hp100_mode=4 -> same as 1, but re-set the enable bit on the card.
+ */
+
+ /*
+ * LSW values:
+ * 0x2278 -> J2585B, PnP shared memory mode
+ * 0x2270 -> J2585B, shared memory mode, 0xdc000
+ * 0xa23c -> J2585B, I/O mapped mode
+ * 0x2240 -> EISA COMPEX, BusMaster (Shasta Chip)
+ * 0x2220 -> EISA HP, I/O (Shasta Chip)
+ * 0x2260 -> EISA HP, BusMaster (Shasta Chip)
+ */
+
+#if 0
+ local_mode = 0x2270;
+ hp100_outw(0xfefe,OPTION_LSW);
+ hp100_outw(local_mode|HP100_SET_LB|HP100_SET_HB,OPTION_LSW);
+#endif
+
+ /* hp100_mode value maybe used in future by another card */
+ local_mode=hp100_mode;
+ if ( local_mode < 1 || local_mode > 4 )
+ local_mode = 1; /* default */
+#ifdef HP100_DEBUG
+ printk( "hp100: %s: original LSW = 0x%x\n", dev->name, hp100_inw(OPTION_LSW) );
+#endif
+
+ if(local_mode==3)
+ {
+ hp100_outw(HP100_MEM_EN|HP100_RESET_LB, OPTION_LSW);
+ hp100_outw(HP100_IO_EN|HP100_SET_LB, OPTION_LSW);
+ hp100_outw(HP100_BM_WRITE|HP100_BM_READ|HP100_RESET_HB, OPTION_LSW);
+ printk("hp100: %s: IO mapped mode forced.\n", dev->name);
+ }
+ else if(local_mode==2)
+ {
+ hp100_outw(HP100_MEM_EN|HP100_SET_LB, OPTION_LSW);
+ hp100_outw(HP100_IO_EN |HP100_SET_LB, OPTION_LSW);
+ hp100_outw(HP100_BM_WRITE|HP100_BM_READ|HP100_RESET_HB, OPTION_LSW);
+ printk("hp100: %s: Shared memory mode requested.\n", dev->name);
+ }
+ else if(local_mode==4)
+ {
+ if(chip==HP100_CHIPID_LASSEN)
+ {
+ hp100_outw(HP100_BM_WRITE|
+ HP100_BM_READ | HP100_SET_HB, OPTION_LSW);
+ hp100_outw(HP100_IO_EN |
+ HP100_MEM_EN | HP100_RESET_LB, OPTION_LSW);
+ printk("hp100: %s: Busmaster mode requested.\n",dev->name);
+ }
+ local_mode=1;
+ }
+
+ if(local_mode==1) /* default behaviour */
+ {
+ lsw = hp100_inw(OPTION_LSW);
+
+ if ( (lsw & HP100_IO_EN) &&
+ (~lsw & HP100_MEM_EN) &&
+ (~lsw & (HP100_BM_WRITE|HP100_BM_READ)) )
+ {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: IO_EN bit is set on card.\n",dev->name);
+#endif
+ local_mode=3;
+ }
+ else if ( chip == HP100_CHIPID_LASSEN &&
+ ( lsw & (HP100_BM_WRITE|HP100_BM_READ) ) ==
+ (HP100_BM_WRITE|HP100_BM_READ) )
+ {
+ printk("hp100: %s: Busmaster mode enabled.\n",dev->name);
+ hp100_outw(HP100_MEM_EN|HP100_IO_EN|HP100_RESET_LB, OPTION_LSW);
+ }
+ else
+ {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: Card not configured for BM or BM not supported with this card.\n", dev->name );
+ printk("hp100: %s: Trying shared memory mode.\n", dev->name);
+#endif
+ /* In this case, try shared memory mode */
+ local_mode=2;
+ hp100_outw(HP100_MEM_EN|HP100_SET_LB, OPTION_LSW);
+ /* hp100_outw(HP100_IO_EN|HP100_RESET_LB, OPTION_LSW); */
+ }
+ }
+
+#ifdef HP100_DEBUG
+ printk( "hp100: %s: new LSW = 0x%x\n", dev->name, hp100_inw(OPTION_LSW) );
+#endif
+
+ /* Check for shared memory on the card, eventually remap it */
+ hp100_page( HW_MAP );
+ mem_mapped = (( hp100_inw( OPTION_LSW ) & ( HP100_MEM_EN ) ) != 0);
+ mem_ptr_phys = mem_ptr_virt = NULL;
+ memory_size = (8192<<( (hp100_inb(SRAM)>>5)&0x07));
+ virt_memory_size = 0;
+
+ /* For memory mapped or busmaster mode, we want the memory address */
+ if ( mem_mapped || (local_mode==1))
+ {
+ mem_ptr_phys = (u_int *)( hp100_inw( MEM_MAP_LSW ) |
+ ( hp100_inw( MEM_MAP_MSW ) << 16 ) );
+ mem_ptr_phys = (u_int *) ((u_int) mem_ptr_phys & ~0x1fff); /* 8k alignment */
+ if ( bus == HP100_BUS_ISA && ( (u_long)mem_ptr_phys & ~0xfffff ) != 0 )
+ {
+ printk("hp100: %s: Can only use programmed i/o mode.\n", dev->name);
+ mem_ptr_phys = NULL;
+ mem_mapped = 0;
+ local_mode=3; /* Use programmed i/o */
+ }
+
+ /* We do not need access to shared memory in busmaster mode */
+ /* However in slave mode we need to remap high (>1GB) card memory */
+ if(local_mode!=1) /* = not busmaster */
+ {
+ if ( bus == HP100_BUS_PCI && mem_ptr_phys >= (u_int *)0x100000 )
+ {
+ /* We try with smaller memory sizes, if ioremap fails */
+ for(virt_memory_size = memory_size; virt_memory_size>16383; virt_memory_size>>=1)
+ {
+ if((mem_ptr_virt=ioremap((u_long)mem_ptr_phys,virt_memory_size))==NULL)
+ {
+#ifdef HP100_DEBUG
+ printk( "hp100: %s: ioremap for 0x%x bytes high PCI memory at 0x%lx failed\n", dev->name, virt_memory_size, (u_long)mem_ptr_phys );
+#endif
+ }
+ else
+ {
+#ifdef HP100_DEBUG
+ printk( "hp100: %s: remapped 0x%x bytes high PCI memory at 0x%lx to 0x%lx.\n", dev->name, virt_memory_size, (u_long)mem_ptr_phys, (u_long)mem_ptr_virt);
+#endif
+ break;
+ }
+ }
+
+ if(mem_ptr_virt==NULL) /* all ioremap tries failed */
+ {
+ printk("hp100: %s: Failed to ioremap the PCI card memory. Will have to use i/o mapped mode.\n", dev->name);
+ local_mode=3;
+ virt_memory_size = 0;
+ }
+ }
+ }
+
+ }
+
+ if(local_mode==3) /* io mapped forced */
+ {
+ mem_mapped = 0;
+ mem_ptr_phys = mem_ptr_virt = NULL;
+ printk("hp100: %s: Using (slow) programmed i/o mode.\n", dev->name);
+ }
+
+ /* Initialise the "private" data structure for this card. */
+ if ( (dev->priv=kmalloc(sizeof(struct hp100_private), GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+ memset( dev->priv, 0, sizeof(struct hp100_private) );
+
+ lp = (struct hp100_private *)dev->priv;
+ lp->id = eid;
+ lp->chip = chip;
+ lp->mode = local_mode;
+ lp->pci_bus = pci_bus;
+ lp->bus = bus;
+ lp->pci_device_fn = pci_device_fn;
+ lp->priority_tx = hp100_priority_tx;
+ lp->rx_ratio = hp100_rx_ratio;
+ lp->mem_ptr_phys = mem_ptr_phys;
+ lp->mem_ptr_virt = mem_ptr_virt;
+ hp100_page( ID_MAC_ADDR );
+ lp->soft_model = hp100_inb( SOFT_MODEL );
+ lp->mac1_mode = HP100_MAC1MODE3;
+ lp->mac2_mode = HP100_MAC2MODE3;
+ memset( &lp->hash_bytes, 0x00, 8 );
+
+ dev->base_addr = ioaddr;
+
+ lp->memory_size = memory_size;
+ lp->virt_memory_size = virt_memory_size;
+ lp->rx_ratio = hp100_rx_ratio; /* can be conf'd with insmod */
+
+ /* memory region for programmed i/o */
+ request_region( dev->base_addr, HP100_REGION_SIZE, eid->name );
+
+ dev->open = hp100_open;
+ dev->stop = hp100_close;
+
+ if (lp->mode==1) /* busmaster */
+ dev->hard_start_xmit = hp100_start_xmit_bm;
+ else
+ dev->hard_start_xmit = hp100_start_xmit;
+
+ dev->get_stats = hp100_get_stats;
+ dev->set_multicast_list = &hp100_set_multicast_list;
+
+ /* Ask the card for which IRQ line it is configured */
+ hp100_page( HW_MAP );
+ dev->irq = hp100_inb( IRQ_CHANNEL ) & HP100_IRQMASK;
+ if ( dev->irq == 2 )
+ dev->irq = 9;
+
+ if(lp->mode==1) /* busmaster */
+ dev->dma=4;
+
+ /* Ask the card for its MAC address and store it for later use. */
+ hp100_page( ID_MAC_ADDR );
+ for ( i = uc = 0; i < 6; i++ )
+ dev->dev_addr[ i ] = hp100_inb( LAN_ADDR + i );
+
+ /* Reset statistics (counters) */
+ hp100_clear_stats( ioaddr );
+
+ ether_setup( dev );
+
+ /* If busmaster mode is wanted, a dma-capable memory area is needed for
+ * the rx and tx PDLs
+ * PCI cards can access the whole PC memory. Therefore GFP_DMA is not
+ * needed for the allocation of the memory area.
+ */
+
+ /* TODO: We do not need this with old cards, where PDLs are stored
+ * in the cards shared memory area. But currently, busmaster has been
+ * implemented/tested only with the lassen chip anyway... */
+ if(lp->mode==1) /* busmaster */
+ {
+ /* Get physically continous memory for TX & RX PDLs */
+ if ( (lp->page_vaddr=kmalloc(MAX_RINGSIZE+0x0f,GFP_KERNEL) ) == NULL)
+ return -ENOMEM;
+ lp->page_vaddr_algn=((u_int *) ( ((u_int)(lp->page_vaddr)+0x0f) &~0x0f));
+ memset(lp->page_vaddr, 0, MAX_RINGSIZE+0x0f);
+
+#ifdef HP100_DEBUG_BM
+ printk("hp100: %s: Reserved DMA memory from 0x%x to 0x%x\n",
+ dev->name,
+ (u_int)lp->page_vaddr_algn,
+ (u_int)lp->page_vaddr_algn+MAX_RINGSIZE);
+#endif
+ lp->rxrcommit = lp->txrcommit = 0;
+ lp->rxrhead = lp->rxrtail = &(lp->rxring[0]);
+ lp->txrhead = lp->txrtail = &(lp->txring[0]);
+ }
+
+ /* Initialise the card. */
+ /* (I'm not really sure if it's a good idea to do this during probing, but
+ * like this it's assured that the lan connection type can be sensed
+ * correctly)
+ */
+ hp100_hwinit( dev );
+
+ /* Try to find out which kind of LAN the card is connected to. */
+ lp->lan_type = hp100_sense_lan( dev );
+
+ /* Print out a message what about what we think we have probed. */
+ printk( "hp100: %s: %s at 0x%x, IRQ %d, ",
+ dev->name, lp->id->name, ioaddr, dev->irq );
+ switch ( bus ) {
+ case HP100_BUS_EISA: printk( "EISA" ); break;
+ case HP100_BUS_PCI: printk( "PCI" ); break;
+ default: printk( "ISA" ); break;
+ }
+ printk( " bus, %dk SRAM (rx/tx %d%%).\n",
+ lp->memory_size >> 10, lp->rx_ratio );
+
+ if ( lp->mode==2 ) /* memory mapped */
+ {
+ printk( "hp100: %s: Memory area at 0x%lx-0x%lx",
+ dev->name,(u_long)mem_ptr_phys,
+ ((u_long)mem_ptr_phys+(mem_ptr_phys>(u_int *)0x100000?(u_long)lp->memory_size:16*1024))-1 );
+ if ( mem_ptr_virt )
+ printk( " (virtual base 0x%lx)", (u_long)mem_ptr_virt );
+ printk( ".\n" );
+
+ /* Set for info when doing ifconfig */
+ dev->mem_start = (u_long)mem_ptr_phys;
+ dev->mem_end = (u_long)mem_ptr_phys+(u_long)lp->memory_size;
+ }
+ printk( "hp100: %s: ", dev->name );
+ if ( lp->lan_type != HP100_LAN_ERR )
+ printk( "Adapter is attached to " );
+ switch ( lp->lan_type ) {
+ case HP100_LAN_100:
+ printk( "100Mb/s Voice Grade AnyLAN network.\n" );
+ break;
+ case HP100_LAN_10:
+ printk( "10Mb/s network.\n" );
+ break;
+ default:
+ printk( "Warning! Link down.\n" );
+ }
+
+ return 0;
+}
+
+
+/* This procedure puts the card into a stable init state */
+static void hp100_hwinit( struct device *dev )
+{
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4202, TRACE );
+ printk("hp100: %s: hwinit\n", dev->name);
+#endif
+
+ /* Initialise the card. -------------------------------------------- */
+
+ /* Clear all pending Ints and disable Ints */
+ hp100_page( PERFORMANCE );
+ hp100_outw( 0xfefe, IRQ_MASK ); /* mask off all ints */
+ hp100_outw( 0xffff, IRQ_STATUS ); /* clear all pending ints */
+
+ hp100_outw( HP100_INT_EN | HP100_RESET_LB, OPTION_LSW );
+ hp100_outw( HP100_TRI_INT | HP100_SET_HB, OPTION_LSW );
+
+ if(lp->mode==1)
+ {
+ hp100_BM_shutdown( dev ); /* disables BM, puts cascade in reset */
+ wait();
+ }
+ else
+ {
+ hp100_outw( HP100_INT_EN | HP100_RESET_LB, OPTION_LSW );
+ hp100_cascade_reset( dev, TRUE );
+ hp100_page( MAC_CTRL );
+ hp100_andb( ~(HP100_RX_EN|HP100_TX_EN), MAC_CFG_1);
+ }
+
+ /* Initiate EEPROM reload */
+ hp100_load_eeprom( dev, 0 );
+
+ wait();
+
+ /* Go into reset again. */
+ hp100_cascade_reset( dev, TRUE );
+
+ /* Set Option Registers to a safe state */
+ hp100_outw( HP100_DEBUG_EN |
+ HP100_RX_HDR |
+ HP100_EE_EN |
+ HP100_BM_WRITE |
+ HP100_BM_READ | HP100_RESET_HB |
+ HP100_FAKE_INT |
+ HP100_INT_EN |
+ HP100_MEM_EN |
+ HP100_IO_EN | HP100_RESET_LB, OPTION_LSW);
+
+ hp100_outw( HP100_TRI_INT |
+ HP100_MMAP_DIS | HP100_SET_HB, OPTION_LSW );
+
+ hp100_outb( HP100_PRIORITY_TX |
+ HP100_ADV_NXT_PKT |
+ HP100_TX_CMD | HP100_RESET_LB, OPTION_MSW );
+
+ /* TODO: Configure MMU for Ram Test. */
+ /* TODO: Ram Test. */
+
+ /* Re-check if adapter is still at same i/o location */
+ /* (If the base i/o in eeprom has been changed but the */
+ /* registers had not been changed, a reload of the eeprom */
+ /* would move the adapter to the address stored in eeprom */
+
+ /* TODO: Code to implement. */
+
+ /* Until here it was code from HWdiscover procedure. */
+ /* Next comes code from mmuinit procedure of SCO BM driver which is
+ * called from HWconfigure in the SCO driver. */
+
+ /* Initialise MMU, eventually switch on Busmaster Mode, initialise
+ * multicast filter...
+ */
+ hp100_mmuinit( dev );
+
+ /* We don't turn the interrupts on here - this is done by start_interface. */
+ wait(); /* TODO: Do we really need this? */
+
+ /* Enable Hardware (e.g. unreset) */
+ hp100_cascade_reset( dev, FALSE );
+
+ /* ------- initialisation complete ----------- */
+
+ /* Finally try to log in the Hub if there may be a VG connection. */
+ if( lp->lan_type != HP100_LAN_10 )
+ hp100_login_to_vg_hub( dev, FALSE ); /* relogin */
+}
+
+
+/*
+ * mmuinit - Reinitialise Cascade MMU and MAC settings.
+ * Note: Must already be in reset and leaves card in reset.
+ */
+static void hp100_mmuinit( struct device *dev )
+{
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+ int i;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4203, TRACE );
+ printk("hp100: %s: mmuinit\n",dev->name);
+#endif
+
+#ifdef HP100_DEBUG
+ if( 0!=(hp100_inw(OPTION_LSW)&HP100_HW_RST) )
+ {
+ printk("hp100: %s: Not in reset when entering mmuinit. Fix me.\n",dev->name);
+ return;
+ }
+#endif
+
+ /* Make sure IRQs are masked off and ack'ed. */
+ hp100_page( PERFORMANCE );
+ hp100_outw( 0xfefe, IRQ_MASK ); /* mask off all ints */
+ hp100_outw( 0xffff, IRQ_STATUS ); /* ack IRQ */
+
+ /*
+ * Enable Hardware
+ * - Clear Debug En, Rx Hdr Pipe, EE En, I/O En, Fake Int and Intr En
+ * - Set Tri-State Int, Bus Master Rd/Wr, and Mem Map Disable
+ * - Clear Priority, Advance Pkt and Xmit Cmd
+ */
+
+ hp100_outw( HP100_DEBUG_EN |
+ HP100_RX_HDR |
+ HP100_EE_EN | HP100_RESET_HB |
+ HP100_IO_EN |
+ HP100_FAKE_INT |
+ HP100_INT_EN | HP100_RESET_LB, OPTION_LSW );
+
+ hp100_outw( HP100_TRI_INT | HP100_SET_HB, OPTION_LSW);
+
+ if(lp->mode==1) /* busmaster */
+ {
+ hp100_outw( HP100_BM_WRITE |
+ HP100_BM_READ |
+ HP100_MMAP_DIS | HP100_SET_HB, OPTION_LSW );
+ }
+ else if(lp->mode==2) /* memory mapped */
+ {
+ hp100_outw( HP100_BM_WRITE |
+ HP100_BM_READ | HP100_RESET_HB, OPTION_LSW );
+ hp100_outw( HP100_MMAP_DIS | HP100_RESET_HB, OPTION_LSW );
+ hp100_outw( HP100_MEM_EN | HP100_SET_LB, OPTION_LSW );
+ hp100_outw( HP100_IO_EN | HP100_SET_LB, OPTION_LSW );
+ }
+ else if( lp->mode==3 ) /* i/o mapped mode */
+ {
+ hp100_outw( HP100_MMAP_DIS | HP100_SET_HB |
+ HP100_IO_EN | HP100_SET_LB, OPTION_LSW );
+ }
+
+ hp100_page( HW_MAP );
+ hp100_outb( 0, EARLYRXCFG );
+ hp100_outw( 0, EARLYTXCFG );
+
+ /*
+ * Enable Bus Master mode
+ */
+ if(lp->mode==1) /* busmaster */
+ {
+ /* Experimental: Set some PCI configuration bits */
+ hp100_page( HW_MAP );
+ hp100_andb( ~HP100_PDL_USE3, MODECTRL1 ); /* BM engine read maximum */
+ hp100_andb( ~HP100_TX_DUALQ, MODECTRL1 ); /* No Queue for Priority TX */
+
+ /* PCI Bus failures should result in a Misc. Interrupt */
+ hp100_orb( HP100_EN_BUS_FAIL, MODECTRL2);
+
+ hp100_outw( HP100_BM_READ | HP100_BM_WRITE | HP100_SET_HB, OPTION_LSW );
+ hp100_page( HW_MAP );
+ /* Use Burst Mode and switch on PAGE_CK */
+ hp100_orb( HP100_BM_BURST_RD |
+ HP100_BM_BURST_WR, BM);
+ if((lp->chip==HP100_CHIPID_RAINIER)||(lp->chip==HP100_CHIPID_SHASTA))
+ hp100_orb( HP100_BM_PAGE_CK, BM );
+ hp100_orb( HP100_BM_MASTER, BM );
+ }
+ else /* not busmaster */
+ {
+ hp100_page(HW_MAP);
+ hp100_andb(~HP100_BM_MASTER, BM );
+ }
+
+ /*
+ * Divide card memory into regions for Rx, Tx and, if non-ETR chip, PDLs
+ */
+ hp100_page( MMU_CFG );
+ if(lp->mode==1) /* only needed for Busmaster */
+ {
+ int xmit_stop, recv_stop;
+
+ if((lp->chip==HP100_CHIPID_RAINIER)||(lp->chip==HP100_CHIPID_SHASTA))
+ {
+ int pdl_stop;
+
+ /*
+ * Each pdl is 508 bytes long. (63 frags * 4 bytes for address and
+ * 4 bytes for header). We will leave NUM_RXPDLS * 508 (rounded
+ * to the next higher 1k boundary) bytes for the rx-pdl's
+ * Note: For non-etr chips the transmit stop register must be
+ * programmed on a 1k boundary, i.e. bits 9:0 must be zero.
+ */
+ pdl_stop = lp->memory_size;
+ xmit_stop = ( pdl_stop-508*(MAX_RX_PDL)-16 )& ~(0x03ff);
+ recv_stop = ( xmit_stop * (lp->rx_ratio)/100 ) &~(0x03ff);
+ hp100_outw( (pdl_stop>>4)-1, PDL_MEM_STOP );
+#ifdef HP100_DEBUG_BM
+ printk("hp100: %s: PDL_STOP = 0x%x\n", dev->name, pdl_stop);
+#endif
+ }
+ else /* ETR chip (Lassen) in busmaster mode */
+ {
+ xmit_stop = ( lp->memory_size ) - 1;
+ recv_stop = ( ( lp->memory_size * lp->rx_ratio ) / 100 ) & ~(0x03ff);
+ }
+
+ hp100_outw( xmit_stop>>4 , TX_MEM_STOP );
+ hp100_outw( recv_stop>>4 , RX_MEM_STOP );
+#ifdef HP100_DEBUG_BM
+ printk("hp100: %s: TX_STOP = 0x%x\n",dev->name,xmit_stop>>4);
+ printk("hp100: %s: RX_STOP = 0x%x\n",dev->name,recv_stop>>4);
+#endif
+ }
+ else /* Slave modes (memory mapped and programmed io) */
+ {
+ hp100_outw( (((lp->memory_size*lp->rx_ratio)/100)>>4), RX_MEM_STOP );
+ hp100_outw( ((lp->memory_size - 1 )>>4), TX_MEM_STOP );
+#ifdef HP100_DEBUG
+ printk("hp100: %s: TX_MEM_STOP: 0x%x\n", dev->name,hp100_inw(TX_MEM_STOP));
+ printk("hp100: %s: RX_MEM_STOP: 0x%x\n", dev->name,hp100_inw(RX_MEM_STOP));
+#endif
+ }
+
+ /* Write MAC address into page 1 */
+ hp100_page( MAC_ADDRESS );
+ for ( i = 0; i < 6; i++ )
+ hp100_outb( dev->dev_addr[ i ], MAC_ADDR + i );
+
+ /* Zero the multicast hash registers */
+ for ( i = 0; i < 8; i++ )
+ hp100_outb( 0x0, HASH_BYTE0 + i );
+
+ /* Set up MAC defaults */
+ hp100_page( MAC_CTRL );
+
+ /* Go to LAN Page and zero all filter bits */
+ /* Zero accept error, accept multicast, accept broadcast and accept */
+ /* all directed packet bits */
+ hp100_andb( ~(HP100_RX_EN|
+ HP100_TX_EN|
+ HP100_ACC_ERRORED|
+ HP100_ACC_MC|
+ HP100_ACC_BC|
+ HP100_ACC_PHY), MAC_CFG_1 );
+
+ hp100_outb( 0x00, MAC_CFG_2 );
+
+ /* Zero the frame format bit. This works around a training bug in the */
+ /* new hubs. */
+ hp100_outb( 0x00, VG_LAN_CFG_2); /* (use 802.3) */
+
+ if(lp->priority_tx)
+ hp100_outb( HP100_PRIORITY_TX | HP100_SET_LB, OPTION_MSW );
+ else
+ hp100_outb( HP100_PRIORITY_TX | HP100_RESET_LB, OPTION_MSW );
+
+ hp100_outb( HP100_ADV_NXT_PKT |
+ HP100_TX_CMD | HP100_RESET_LB, OPTION_MSW );
+
+ /* If busmaster, initialize the PDLs */
+ if(lp->mode==1)
+ hp100_init_pdls( dev );
+
+ /* Go to performance page and initalize isr and imr registers */
+ hp100_page( PERFORMANCE );
+ hp100_outw( 0xfefe, IRQ_MASK ); /* mask off all ints */
+ hp100_outw( 0xffff, IRQ_STATUS ); /* ack IRQ */
+}
+
+
+/*
+ * open/close functions
+ */
+
+static int hp100_open( struct device *dev )
+{
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+#ifdef HP100_DEBUG_B
+ int ioaddr=dev->base_addr;
+#endif
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4204, TRACE );
+ printk("hp100: %s: open\n",dev->name);
+#endif
+
+ /* New: if bus is PCI or EISA, interrupts might be shared interrupts */
+ if ( request_irq(dev->irq, hp100_interrupt,
+ lp->bus==HP100_BUS_PCI||lp->bus==HP100_BUS_EISA?SA_SHIRQ:SA_INTERRUPT,
+ lp->id->name, dev))
+ {
+ printk( "hp100: %s: unable to get IRQ %d\n", dev->name, dev->irq );
+ return -EAGAIN;
+ }
+
+ MOD_INC_USE_COUNT;
+
+ dev->tbusy = 0;
+ dev->trans_start = jiffies;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+ lp->lan_type = hp100_sense_lan( dev );
+ lp->mac1_mode = HP100_MAC1MODE3;
+ lp->mac2_mode = HP100_MAC2MODE3;
+ memset( &lp->hash_bytes, 0x00, 8 );
+
+ hp100_stop_interface( dev );
+
+ hp100_hwinit( dev );
+
+ hp100_start_interface( dev ); /* sets mac modes, enables interrupts */
+
+ return 0;
+}
+
+
+/* The close function is called when the interface is to be brought down */
+static int hp100_close( struct device *dev )
+{
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4205, TRACE );
+ printk("hp100: %s: close\n", dev->name);
+#endif
+
+ hp100_page( PERFORMANCE );
+ hp100_outw( 0xfefe, IRQ_MASK ); /* mask off all IRQs */
+
+ hp100_stop_interface( dev );
+
+ if ( lp->lan_type == HP100_LAN_100 )
+ lp->hub_status=hp100_login_to_vg_hub( dev, FALSE );
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ free_irq( dev->irq, dev );
+
+#ifdef HP100_DEBUG
+ printk( "hp100: %s: close LSW = 0x%x\n", dev->name, hp100_inw(OPTION_LSW) );
+#endif
+
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+
+/*
+ * Configure the PDL Rx rings and LAN
+ */
+static void hp100_init_pdls( struct device *dev )
+{
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+ hp100_ring_t *ringptr;
+ u_int *pageptr;
+ int i;
+
+#ifdef HP100_DEBUG_B
+ int ioaddr = dev->base_addr;
+#endif
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4206, TRACE );
+ printk("hp100: %s: init pdls\n", dev->name);
+#endif
+
+ if(0==lp->page_vaddr_algn)
+ printk("hp100: %s: Warning: lp->page_vaddr_algn not initialised!\n",dev->name);
+ else
+ {
+ /* pageptr shall point into the DMA accessible memory region */
+ /* we use this pointer to status the upper limit of allocated */
+ /* memory in the allocated page. */
+ /* note: align the pointers to the pci cache line size */
+ memset(lp->page_vaddr_algn, 0, MAX_RINGSIZE); /* Zero Rx/Tx ring page */
+ pageptr=lp->page_vaddr_algn;
+
+ lp->rxrcommit =0;
+ ringptr = lp->rxrhead = lp-> rxrtail = &(lp->rxring[0]);
+
+ /* Initialise Rx Ring */
+ for (i=MAX_RX_PDL-1; i>=0; i--)
+ {
+ lp->rxring[i].next = ringptr;
+ ringptr=&(lp->rxring[i]);
+ pageptr+=hp100_init_rxpdl(dev, ringptr, pageptr);
+ }
+
+ /* Initialise Tx Ring */
+ lp->txrcommit = 0;
+ ringptr = lp->txrhead = lp->txrtail = &(lp->txring[0]);
+ for (i=MAX_TX_PDL-1; i>=0; i--)
+ {
+ lp->txring[i].next = ringptr;
+ ringptr=&(lp->txring[i]);
+ pageptr+=hp100_init_txpdl(dev, ringptr, pageptr);
+ }
+ }
+}
+
+
+/* These functions "format" the entries in the pdl structure */
+/* They return how much memory the fragments need. */
+static int hp100_init_rxpdl( struct device *dev, register hp100_ring_t *ringptr, register u32 *pdlptr )
+{
+ /* pdlptr is starting adress for this pdl */
+
+ if( 0!=( ((unsigned)pdlptr) & 0xf) )
+ printk("hp100: %s: Init rxpdl: Unaligned pdlptr 0x%x.\n",dev->name,(unsigned)pdlptr);
+
+ ringptr->pdl = pdlptr+1;
+ ringptr->pdl_paddr = virt_to_bus(pdlptr+1);
+ ringptr->skb = (void *) NULL;
+
+ /*
+ * Write address and length of first PDL Fragment (which is used for
+ * storing the RX-Header
+ * We use the 4 bytes _before_ the PDH in the pdl memory area to
+ * store this information. (PDH is at offset 0x04)
+ */
+ /* Note that pdlptr+1 and not pdlptr is the pointer to the PDH */
+
+ *(pdlptr+2) =(u_int) virt_to_bus(pdlptr); /* Address Frag 1 */
+ *(pdlptr+3) = 4; /* Length Frag 1 */
+
+ return( ( ((MAX_RX_FRAG*2+2)+3) /4)*4 );
+}
+
+
+static int hp100_init_txpdl( struct device *dev, register hp100_ring_t *ringptr, register u32 *pdlptr )
+{
+ if( 0!=( ((unsigned)pdlptr) & 0xf) )
+ printk("hp100: %s: Init txpdl: Unaligned pdlptr 0x%x.\n",dev->name,(unsigned) pdlptr);
+
+ ringptr->pdl = pdlptr; /* +1; */
+ ringptr->pdl_paddr = virt_to_bus(pdlptr); /* +1 */
+ ringptr->skb = (void *) NULL;
+
+ return((((MAX_TX_FRAG*2+2)+3)/4)*4);
+}
+
+
+/*
+ * hp100_build_rx_pdl allocates an skb_buff of maximum size plus two bytes
+ * for possible odd word alignment rounding up to next dword and set PDL
+ * address for fragment#2
+ * Returns: 0 if unable to allocate skb_buff
+ * 1 if successful
+ */
+int hp100_build_rx_pdl( hp100_ring_t *ringptr, struct device *dev )
+{
+#ifdef HP100_DEBUG_B
+ int ioaddr = dev->base_addr;
+#endif
+#ifdef HP100_DEBUG_BM
+ u_int *p;
+#endif
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4207, TRACE );
+ printk("hp100: %s: build rx pdl\n", dev->name);
+#endif
+
+ /* Allocate skb buffer of maximum size */
+ /* Note: This depends on the alloc_skb functions allocating more
+ * space than requested, i.e. aligning to 16bytes */
+
+ ringptr->skb = dev_alloc_skb( ((MAX_ETHER_SIZE+2+3)/4)*4 );
+
+ if(NULL!=ringptr->skb)
+ {
+ /*
+ * Reserve 2 bytes at the head of the buffer to land the IP header
+ * on a long word boundary (According to the Network Driver section
+ * in the Linux KHG, this should help to increase performance.)
+ */
+ skb_reserve(ringptr->skb, 2);
+
+ ringptr->skb->dev=dev;
+ ringptr->skb->data=(u_char *)skb_put(ringptr->skb, MAX_ETHER_SIZE );
+
+ /* ringptr->pdl points to the beginning of the PDL, i.e. the PDH */
+ /* Note: 1st Fragment is used for the 4 byte packet status
+ * (receive header). Its PDL entries are set up by init_rxpdl. So
+ * here we only have to set up the PDL fragment entries for the data
+ * part. Those 4 bytes will be stored in the DMA memory region
+ * directly before the PDL.
+ */
+#ifdef HP100_DEBUG_BM
+ printk("hp100: %s: build_rx_pdl: PDH@0x%x, skb->data (len %d) at 0x%x\n",
+ dev->name,
+ (u_int) ringptr->pdl,
+ ((MAX_ETHER_SIZE+2+3)/4)*4,
+ (unsigned int) ringptr->skb->data);
+#endif
+
+ ringptr->pdl[0] = 0x00020000; /* Write PDH */
+ ringptr->pdl[3] = ((u_int)virt_to_bus(ringptr->skb->data));
+ ringptr->pdl[4] = MAX_ETHER_SIZE; /* Length of Data */
+
+#ifdef HP100_DEBUG_BM
+ for(p=(ringptr->pdl); p<(ringptr->pdl+5); p++)
+ printk("hp100: %s: Adr 0x%.8x = 0x%.8x\n",dev->name,(u_int) p,(u_int) *p );
+#endif
+ return(1);
+ }
+ /* else: */
+ /* alloc_skb failed (no memory) -> still can receive the header
+ * fragment into PDL memory. make PDL safe by clearing msgptr and
+ * making the PDL only 1 fragment (i.e. the 4 byte packet status)
+ */
+#ifdef HP100_DEBUG_BM
+ printk("hp100: %s: build_rx_pdl: PDH@0x%x, No space for skb.\n",
+ dev->name,
+ (u_int) ringptr->pdl);
+#endif
+
+ ringptr->pdl[0]=0x00010000; /* PDH: Count=1 Fragment */
+
+ return(0);
+}
+
+
+/*
+ * hp100_rxfill - attempt to fill the Rx Ring will empty skb's
+ *
+ * Makes assumption that skb's are always contiguous memory areas and
+ * therefore PDLs contain only 2 physical fragments.
+ * - While the number of Rx PDLs with buffers is less than maximum
+ * a. Get a maximum packet size skb
+ * b. Put the physical address of the buffer into the PDL.
+ * c. Output physical address of PDL to adapter.
+ */
+static void hp100_rxfill( struct device *dev )
+{
+ int ioaddr=dev->base_addr;
+
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+ hp100_ring_t *ringptr;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4208, TRACE );
+ printk("hp100: %s: rxfill\n",dev->name);
+#endif
+
+ hp100_page( PERFORMANCE );
+
+ while (lp->rxrcommit < MAX_RX_PDL)
+ {
+ /*
+ ** Attempt to get a buffer and build a Rx PDL.
+ */
+ ringptr = lp->rxrtail;
+ if (0 == hp100_build_rx_pdl( ringptr, dev ))
+ {
+ return; /* None available, return */
+ }
+
+ /* Hand this PDL over to the card */
+ /* Note: This needs performance page selected! */
+#ifdef HP100_DEBUG_BM
+ printk("hp100: %s: rxfill: Hand to card: pdl #%d @0x%x phys:0x%x, buffer: 0x%x\n",
+ dev->name,
+ lp->rxrcommit,
+ (u_int)ringptr->pdl,
+ (u_int)ringptr->pdl_paddr,
+ (u_int)ringptr->pdl[3]);
+#endif
+
+ hp100_outl( (u32)ringptr->pdl_paddr, RX_PDA);
+
+ lp->rxrcommit += 1;
+ lp->rxrtail = ringptr->next;
+ }
+}
+
+
+/*
+ * BM_shutdown - shutdown bus mastering and leave chip in reset state
+ */
+
+static void hp100_BM_shutdown( struct device *dev )
+{
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+ unsigned long time;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4209, TRACE );
+ printk("hp100: %s: bm shutdown\n",dev->name);
+#endif
+
+ hp100_page( PERFORMANCE );
+ hp100_outw( 0xfefe, IRQ_MASK ); /* mask off all ints */
+ hp100_outw( 0xffff, IRQ_STATUS ); /* Ack all ints */
+
+ /* Ensure Interrupts are off */
+ hp100_outw( HP100_INT_EN | HP100_RESET_LB , OPTION_LSW );
+
+ /* Disable all MAC activity */
+ hp100_page( MAC_CTRL );
+ hp100_andb( ~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1 ); /* stop rx/tx */
+
+ /* If cascade MMU is not already in reset */
+ if (0 != (hp100_inw(OPTION_LSW)&HP100_HW_RST) )
+ {
+ /* Wait 1.3ms (10Mb max packet time) to ensure MAC is idle so
+ * MMU pointers will not be reset out from underneath
+ */
+ hp100_page( MAC_CTRL );
+ for(time=0; time<5000; time++)
+ {
+ if( (hp100_inb(MAC_CFG_1)&(HP100_TX_IDLE|HP100_RX_IDLE))==
+ (HP100_TX_IDLE|HP100_RX_IDLE) ) break;
+ }
+
+ /* Shutdown algorithm depends on the generation of Cascade */
+ if( lp->chip==HP100_CHIPID_LASSEN )
+ { /* ETR shutdown/reset */
+ /* Disable Busmaster mode and wait for bit to go to zero. */
+ hp100_page(HW_MAP);
+ hp100_andb( ~HP100_BM_MASTER, BM );
+ /* 100 ms timeout */
+ for(time=0; time<32000; time++)
+ {
+ if ( 0 == (hp100_inb( BM ) & HP100_BM_MASTER) ) break;
+ }
+ }
+ else
+ { /* Shasta or Rainier Shutdown/Reset */
+ /* To ensure all bus master inloading activity has ceased,
+ * wait for no Rx PDAs or no Rx packets on card.
+ */
+ hp100_page( PERFORMANCE );
+ /* 100 ms timeout */
+ for(time=0; time<10000; time++)
+ {
+ /* RX_PDL: PDLs not executed. */
+ /* RX_PKT_CNT: RX'd packets on card. */
+ if ( (hp100_inb( RX_PDL ) == 0) &&
+ (hp100_inb( RX_PKT_CNT ) == 0) ) break;
+ }
+
+ if(time>=10000)
+ printk("hp100: %s: BM shutdown error.\n", dev->name);
+
+ /* To ensure all bus master outloading activity has ceased,
+ * wait until the Tx PDA count goes to zero or no more Tx space
+ * available in the Tx region of the card.
+ */
+ /* 100 ms timeout */
+ for(time=0; time<10000; time++) {
+ if ( (0 == hp100_inb( TX_PKT_CNT )) &&
+ (0 != (hp100_inb( TX_MEM_FREE )&HP100_AUTO_COMPARE))) break;
+ }
+
+ /* Disable Busmaster mode */
+ hp100_page(HW_MAP);
+ hp100_andb( ~HP100_BM_MASTER, BM );
+ } /* end of shutdown procedure for non-etr parts */
+
+ hp100_cascade_reset( dev, TRUE );
+ }
+ hp100_page( PERFORMANCE );
+ /* hp100_outw( HP100_BM_READ | HP100_BM_WRITE | HP100_RESET_HB, OPTION_LSW ); */
+ /* Busmaster mode should be shut down now. */
+}
+
+
+
+/*
+ * transmit functions
+ */
+
+/* tx function for busmaster mode */
+static int hp100_start_xmit_bm( struct sk_buff *skb, struct device *dev )
+{
+ unsigned long flags;
+ int i, ok_flag;
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+ hp100_ring_t *ringptr;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4210, TRACE );
+ printk("hp100: %s: start_xmit_bm\n",dev->name);
+#endif
+
+ if ( skb==NULL )
+ {
+#ifndef LINUX_2_1
+ dev_tint( dev );
+#endif
+ return 0;
+ }
+
+ if ( skb->len <= 0 ) return 0;
+
+ /* Get Tx ring tail pointer */
+ if( lp->txrtail->next==lp->txrhead )
+ {
+ /* No memory. */
+#ifdef HP100_DEBUG
+ printk("hp100: %s: start_xmit_bm: No TX PDL available.\n", dev->name);
+#endif
+ /* not waited long enough since last tx? */
+ if ( jiffies - dev->trans_start < HZ ) return -EAGAIN;
+
+ if ( lp->lan_type < 0 ) /* no LAN type detected yet? */
+ {
+ hp100_stop_interface( dev );
+ if ( ( lp->lan_type = hp100_sense_lan( dev ) ) < 0 )
+ {
+ printk( "hp100: %s: no connection found - check wire\n", dev->name );
+ hp100_start_interface( dev ); /* 10Mb/s RX pkts maybe handled */
+ return -EIO;
+ }
+ if ( lp->lan_type == HP100_LAN_100 )
+ lp->hub_status = hp100_login_to_vg_hub( dev, FALSE ); /* relogin */
+ hp100_start_interface( dev );
+ }
+
+ if ( lp->lan_type == HP100_LAN_100 && lp->hub_status < 0 )
+ /* we have a 100Mb/s adapter but it isn't connected to hub */
+ {
+ printk( "hp100: %s: login to 100Mb/s hub retry\n", dev->name );
+ hp100_stop_interface( dev );
+ lp->hub_status = hp100_login_to_vg_hub( dev, FALSE );
+ hp100_start_interface( dev );
+ }
+ else
+ {
+ hp100_ints_off();
+ i = hp100_sense_lan( dev );
+ hp100_ints_on();
+ if ( i == HP100_LAN_ERR )
+ printk( "hp100: %s: link down detected\n", dev->name );
+ else
+ if ( lp->lan_type != i ) /* cable change! */
+ {
+ /* it's very hard - all network setting must be changed!!! */
+ printk( "hp100: %s: cable change 10Mb/s <-> 100Mb/s detected\n", dev->name );
+ lp->lan_type = i;
+ hp100_stop_interface( dev );
+ if ( lp->lan_type == HP100_LAN_100 )
+ lp->hub_status = hp100_login_to_vg_hub( dev, FALSE );
+ hp100_start_interface( dev );
+ }
+ else
+ {
+ printk( "hp100: %s: interface reset\n", dev->name );
+ hp100_stop_interface( dev );
+ if ( lp->lan_type == HP100_LAN_100 )
+ lp->hub_status = hp100_login_to_vg_hub( dev, FALSE );
+ hp100_start_interface( dev );
+ }
+ }
+
+ dev->trans_start = jiffies;
+ return -EAGAIN;
+ }
+
+ /*
+ * we have to turn int's off before modifying this, otherwise
+ * a tx_pdl_cleanup could occur at the same time
+ */
+ save_flags( flags );
+ cli();
+ ringptr=lp->txrtail;
+ lp->txrtail=ringptr->next;
+
+ /* Check whether packet has minimal packet size */
+ ok_flag = skb->len >= HP100_MIN_PACKET_SIZE;
+ i = ok_flag ? skb->len : HP100_MIN_PACKET_SIZE;
+
+ ringptr->skb=skb;
+ ringptr->pdl[0]=((1<<16) | i); /* PDH: 1 Fragment & length */
+ ringptr->pdl[1]=(u32)virt_to_bus(skb->data); /* 1st Frag: Adr. of data */
+ if(lp->chip==HP100_CHIPID_SHASTA)
+ {
+ /* TODO:Could someone who has the EISA card please check if this works? */
+ ringptr->pdl[2]=i;
+ }
+ else /* Lassen */
+ {
+ /* In the PDL, don't use the padded size but the real packet size: */
+ ringptr->pdl[2]=skb->len; /* 1st Frag: Length of frag */
+ }
+
+ /* Hand this PDL to the card. */
+ hp100_outl( ringptr->pdl_paddr, TX_PDA_L ); /* Low Prio. Queue */
+
+ lp->txrcommit++;
+ restore_flags( flags );
+
+ /* Update statistics */
+ lp->stats.tx_packets++;
+#ifdef LINUX_2_1
+ lp->stats.tx_bytes += skb->len;
+#endif
+ dev->trans_start = jiffies;
+
+ return 0;
+}
+
+
+/* clean_txring checks if packets have been sent by the card by reading
+ * the TX_PDL register from the performance page and comparing it to the
+ * number of commited packets. It then frees the skb's of the packets that
+ * obviously have been sent to the network.
+ *
+ * Needs the PERFORMANCE page selected.
+ */
+static void hp100_clean_txring( struct device *dev )
+{
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int donecount;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4211, TRACE );
+ printk("hp100: %s: clean txring\n", dev->name);
+#endif
+
+ /* How many PDLs have been transmitted? */
+ donecount=(lp->txrcommit)-hp100_inb(TX_PDL);
+
+#ifdef HP100_DEBUG
+ if(donecount>MAX_TX_PDL)
+ printk("hp100: %s: Warning: More PDLs transmitted than commited to card???\n",dev->name);
+#endif
+
+ for( ; 0!=donecount; donecount-- )
+ {
+#ifdef HP100_DEBUG_BM
+ printk("hp100: %s: Free skb: data @0x%.8x txrcommit=0x%x TXPDL=0x%x, done=0x%x\n",
+ dev->name,
+ (u_int) lp->txrhead->skb->data,
+ lp->txrcommit,
+ hp100_inb(TX_PDL),
+ donecount);
+#endif
+#ifdef LINUX_2_1
+ dev_kfree_skb( lp->txrhead->skb );
+#else
+ dev_kfree_skb( lp->txrhead->skb, FREE_WRITE );
+#endif
+ lp->txrhead->skb=(void *)NULL;
+ lp->txrhead=lp->txrhead->next;
+ lp->txrcommit--;
+ }
+}
+
+
+/* tx function for slave modes */
+static int hp100_start_xmit( struct sk_buff *skb, struct device *dev )
+{
+ int i, ok_flag;
+ int ioaddr = dev->base_addr;
+ u_short val;
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4212, TRACE );
+ printk("hp100: %s: start_xmit\n", dev->name);
+#endif
+
+ if ( skb==NULL )
+ {
+#ifndef LINUX_2_1
+ dev_tint( dev );
+#endif
+ return 0;
+ }
+
+ if ( skb->len <= 0 ) return 0;
+
+ if ( lp->lan_type < 0 ) /* no LAN type detected yet? */
+ {
+ hp100_stop_interface( dev );
+ if ( ( lp->lan_type = hp100_sense_lan( dev ) ) < 0 )
+ {
+ printk( "hp100: %s: no connection found - check wire\n", dev->name );
+ hp100_start_interface( dev ); /* 10Mb/s RX packets maybe handled */
+ return -EIO;
+ }
+ if ( lp->lan_type == HP100_LAN_100 )
+ lp->hub_status = hp100_login_to_vg_hub( dev, FALSE ); /* relogin */
+ hp100_start_interface( dev );
+ }
+
+ /* If there is not enough free memory on the card... */
+ i=hp100_inl(TX_MEM_FREE)&0x7fffffff;
+ if ( !(((i/2)-539)>(skb->len+16) && (hp100_inb(TX_PKT_CNT)<255)) )
+ {
+#ifdef HP100_DEBUG
+ printk( "hp100: %s: start_xmit: tx free mem = 0x%x\n", dev->name, i );
+#endif
+ /* not waited long enough since last failed tx try? */
+ if ( jiffies - dev->trans_start < HZ )
+ {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: trans_start timing problem\n", dev->name);
+#endif
+ return -EAGAIN;
+ }
+ if ( lp->lan_type == HP100_LAN_100 && lp->hub_status < 0 )
+ /* we have a 100Mb/s adapter but it isn't connected to hub */
+ {
+ printk( "hp100: %s: login to 100Mb/s hub retry\n", dev->name );
+ hp100_stop_interface( dev );
+ lp->hub_status = hp100_login_to_vg_hub( dev, FALSE );
+ hp100_start_interface( dev );
+ }
+ else
+ {
+ hp100_ints_off();
+ i = hp100_sense_lan( dev );
+ hp100_ints_on();
+ if ( i == HP100_LAN_ERR )
+ printk( "hp100: %s: link down detected\n", dev->name );
+ else
+ if ( lp->lan_type != i ) /* cable change! */
+ {
+ /* it's very hard - all network setting must be changed!!! */
+ printk( "hp100: %s: cable change 10Mb/s <-> 100Mb/s detected\n", dev->name );
+ lp->lan_type = i;
+ hp100_stop_interface( dev );
+ if ( lp->lan_type == HP100_LAN_100 )
+ lp->hub_status = hp100_login_to_vg_hub( dev, FALSE );
+ hp100_start_interface( dev );
+ }
+ else
+ {
+ printk( "hp100: %s: interface reset\n", dev->name );
+ hp100_stop_interface( dev );
+ if ( lp->lan_type == HP100_LAN_100 )
+ lp->hub_status = hp100_login_to_vg_hub( dev, FALSE );
+ hp100_start_interface( dev );
+ udelay(1000);
+ }
+ }
+ dev->trans_start = jiffies;
+ return -EAGAIN;
+ }
+
+ for ( i=0; i<6000 && ( hp100_inb( OPTION_MSW ) & HP100_TX_CMD ); i++ )
+ {
+#ifdef HP100_DEBUG_TX
+ printk( "hp100: %s: start_xmit: busy\n", dev->name );
+#endif
+ }
+
+ hp100_ints_off();
+ val = hp100_inw( IRQ_STATUS );
+ /* Ack / clear the interrupt TX_COMPLETE interrupt - this interrupt is set
+ * when the current packet being transmitted on the wire is completed. */
+ hp100_outw( HP100_TX_COMPLETE, IRQ_STATUS );
+#ifdef HP100_DEBUG_TX
+ printk("hp100: %s: start_xmit: irq_status=0x%.4x, irqmask=0x%.4x, len=%d\n",dev->name,val,hp100_inw(IRQ_MASK),(int)skb->len );
+#endif
+
+ ok_flag = skb->len >= HP100_MIN_PACKET_SIZE;
+ i = ok_flag ? skb->len : HP100_MIN_PACKET_SIZE;
+
+ hp100_outw( i, DATA32 ); /* tell card the total packet length */
+ hp100_outw( i, FRAGMENT_LEN ); /* and first/only fragment length */
+
+ if ( lp->mode==2 ) /* memory mapped */
+ {
+ if ( lp->mem_ptr_virt ) /* high pci memory was remapped */
+ {
+ /* Note: The J2585B needs alignment to 32bits here! */
+ memcpy( lp->mem_ptr_virt, skb->data, ( skb->len + 3 ) & ~3 );
+ if ( !ok_flag )
+ memset( lp->mem_ptr_virt, 0, HP100_MIN_PACKET_SIZE - skb->len );
+ }
+ else
+ {
+ /* Note: The J2585B needs alignment to 32bits here! */
+ memcpy_toio( lp->mem_ptr_phys, skb->data, (skb->len + 3) & ~3 );
+ if ( !ok_flag )
+ memset_io( lp->mem_ptr_phys, 0, HP100_MIN_PACKET_SIZE - skb->len );
+ }
+ }
+ else /* programmed i/o */
+ {
+ outsl( ioaddr + HP100_REG_DATA32, skb->data, ( skb->len + 3 ) >> 2 );
+ if ( !ok_flag )
+ for ( i = ( skb->len + 3 ) & ~3; i < HP100_MIN_PACKET_SIZE; i += 4 )
+ hp100_outl( 0, DATA32 );
+ }
+
+ hp100_outb( HP100_TX_CMD | HP100_SET_LB, OPTION_MSW ); /* send packet */
+
+ lp->stats.tx_packets++;
+#ifdef LINUX_2_1
+ lp->stats.tx_bytes += skb->len;
+#endif
+ dev->trans_start=jiffies;
+ hp100_ints_on();
+
+#ifdef LINUX_2_1
+ dev_kfree_skb( skb );
+#else
+ dev_kfree_skb( skb, FREE_WRITE );
+#endif
+
+#ifdef HP100_DEBUG_TX
+ printk( "hp100: %s: start_xmit: end\n", dev->name );
+#endif
+
+ return 0;
+}
+
+
+/*
+ * Receive Function (Non-Busmaster mode)
+ * Called when an "Receive Packet" interrupt occurs, i.e. the receive
+ * packet counter is non-zero.
+ * For non-busmaster, this function does the whole work of transfering
+ * the packet to the host memory and then up to higher layers via skb
+ * and netif_rx.
+ */
+
+static void hp100_rx( struct device *dev )
+{
+ int packets, pkt_len;
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+ u_int header;
+ struct sk_buff *skb;
+
+#ifdef DEBUG_B
+ hp100_outw( 0x4213, TRACE );
+ printk("hp100: %s: rx\n", dev->name);
+#endif
+
+ /* First get indication of received lan packet */
+ /* RX_PKT_CND indicates the number of packets which have been fully */
+ /* received onto the card but have not been fully transfered of the card */
+ packets = hp100_inb( RX_PKT_CNT );
+#ifdef HP100_DEBUG_RX
+ if ( packets > 1 )
+ printk( "hp100: %s: rx: waiting packets = %d\n", dev->name,packets );
+#endif
+
+ while ( packets-- > 0 )
+ {
+ /* If ADV_NXT_PKT is still set, we have to wait until the card has */
+ /* really advanced to the next packet. */
+ for (pkt_len=0; pkt_len<6000 &&(hp100_inb(OPTION_MSW)&HP100_ADV_NXT_PKT);
+ pkt_len++ )
+ {
+#ifdef HP100_DEBUG_RX
+ printk( "hp100: %s: rx: busy, remaining packets = %d\n", dev->name, packets );
+#endif
+ }
+
+ /* First we get the header, which contains information about the */
+ /* actual length of the received packet. */
+ if( lp->mode==2 ) /* memory mapped mode */
+ {
+ if ( lp->mem_ptr_virt ) /* if memory was remapped */
+ header = *(__u32 *)lp->mem_ptr_virt;
+ else
+ header = readl( lp->mem_ptr_phys );
+ }
+ else /* programmed i/o */
+ header = hp100_inl( DATA32 );
+
+ pkt_len = ((header & HP100_PKT_LEN_MASK) + 3) & ~3;
+
+#ifdef HP100_DEBUG_RX
+ printk( "hp100: %s: rx: new packet - length=%d, errors=0x%x, dest=0x%x\n",
+ dev->name,
+ header & HP100_PKT_LEN_MASK, (header>>16)&0xfff8,
+ (header>>16)&7);
+#endif
+
+ /* Now we allocate the skb and transfer the data into it. */
+ skb = dev_alloc_skb( pkt_len );
+ if ( skb == NULL ) /* Not enough memory->drop packet */
+ {
+#ifdef HP100_DEBUG
+ printk( "hp100: %s: rx: couldn't allocate a sk_buff of size %d\n", dev->name, pkt_len );
+#endif
+ lp->stats.rx_dropped++;
+ }
+ else /* skb successfully allocated */
+ {
+ u_char *ptr;
+
+ skb->dev = dev;
+
+ /* ptr to start of the sk_buff data area */
+ ptr = (u_char *)skb_put( skb, pkt_len );
+
+ /* Now transfer the data from the card into that area */
+ if ( lp->mode==2 )
+ {
+ if ( lp->mem_ptr_virt )
+ memcpy( ptr, lp->mem_ptr_virt, pkt_len );
+ /* Note alignment to 32bit transfers */
+ else
+ memcpy_fromio( ptr, lp->mem_ptr_phys, pkt_len );
+ }
+ else /* io mapped */
+ insl( ioaddr + HP100_REG_DATA32, ptr, pkt_len >> 2 );
+
+ skb->protocol = eth_type_trans( skb, dev );
+
+ netif_rx( skb );
+ lp->stats.rx_packets++;
+#ifdef LINUX_2_1
+ lp->stats.rx_bytes += skb->len;
+#endif
+
+#ifdef HP100_DEBUG_RX
+ printk( "hp100: %s: rx: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ dev->name,
+ ptr[ 0 ], ptr[ 1 ], ptr[ 2 ], ptr[ 3 ], ptr[ 4 ], ptr[ 5 ],
+ ptr[ 6 ], ptr[ 7 ], ptr[ 8 ], ptr[ 9 ], ptr[ 10 ], ptr[ 11 ] );
+#endif
+ }
+
+ /* Indicate the card that we have got the packet */
+ hp100_outb( HP100_ADV_NXT_PKT | HP100_SET_LB, OPTION_MSW );
+
+ switch ( header & 0x00070000 ) {
+ case (HP100_MULTI_ADDR_HASH<<16):
+ case (HP100_MULTI_ADDR_NO_HASH<<16):
+ lp->stats.multicast++; break;
+ }
+ } /* end of while(there are packets) loop */
+#ifdef HP100_DEBUG_RX
+ printk( "hp100_rx: %s: end\n", dev->name );
+#endif
+}
+
+
+/*
+ * Receive Function for Busmaster Mode
+ */
+static void hp100_rx_bm( struct device *dev )
+{
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+ hp100_ring_t *ptr;
+ u_int header;
+ int pkt_len;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4214, TRACE );
+ printk("hp100: %s: rx_bm\n", dev->name);
+#endif
+
+#ifdef HP100_DEBUG
+ if(0==lp->rxrcommit)
+ {
+ printk("hp100: %s: rx_bm called although no PDLs were committed to adapter?\n", dev->name);
+ return;
+ }
+ else
+
+ /* RX_PKT_CNT states how many PDLs are currently formatted and available to
+ * the cards BM engine */
+ if( (hp100_inw(RX_PKT_CNT)&0x00ff) >= lp->rxrcommit)
+ {
+ printk("hp100: %s: More packets received than commited? RX_PKT_CNT=0x%x, commit=0x%x\n", dev->name, hp100_inw(RX_PKT_CNT)&0x00ff, lp->rxrcommit);
+ return;
+ }
+#endif
+
+ while( (lp->rxrcommit > hp100_inb(RX_PDL)) )
+ {
+ /*
+ * The packet was received into the pdl pointed to by lp->rxrhead (
+ * the oldest pdl in the ring
+ */
+
+ /* First we get the header, which contains information about the */
+ /* actual length of the received packet. */
+
+ ptr=lp->rxrhead;
+
+ header = *(ptr->pdl-1);
+ pkt_len = (header & HP100_PKT_LEN_MASK);
+
+#ifdef HP100_DEBUG_BM
+ printk( "hp100: %s: rx_bm: header@0x%x=0x%x length=%d, errors=0x%x, dest=0x%x\n",
+ dev->name,
+ (u_int) (ptr->pdl-1),(u_int) header,
+ pkt_len,
+ (header>>16)&0xfff8,
+ (header>>16)&7);
+ printk( "hp100: %s: RX_PDL_COUNT:0x%x TX_PDL_COUNT:0x%x, RX_PKT_CNT=0x%x PDH=0x%x, Data@0x%x len=0x%x\n",
+ dev->name,
+ hp100_inb( RX_PDL ),
+ hp100_inb( TX_PDL ),
+ hp100_inb( RX_PKT_CNT ),
+ (u_int) *(ptr->pdl),
+ (u_int) *(ptr->pdl+3),
+ (u_int) *(ptr->pdl+4));
+#endif
+
+ if( (pkt_len>=MIN_ETHER_SIZE) &&
+ (pkt_len<=MAX_ETHER_SIZE) )
+ {
+ if(ptr->skb==NULL)
+ {
+ printk("hp100: %s: rx_bm: skb null\n", dev->name);
+ /* can happen if we only allocated room for the pdh due to memory shortage. */
+ lp->stats.rx_dropped++;
+ }
+ else
+ {
+ skb_trim( ptr->skb, pkt_len ); /* Shorten it */
+ ptr->skb->protocol = eth_type_trans( ptr->skb, dev );
+
+ netif_rx( ptr->skb ); /* Up and away... */
+
+ lp->stats.rx_packets++;
+#ifdef LINUX_2_1
+ lp->stats.rx_bytes += ptr->skb->len;
+#endif
+ }
+
+ switch ( header & 0x00070000 ) {
+ case (HP100_MULTI_ADDR_HASH<<16):
+ case (HP100_MULTI_ADDR_NO_HASH<<16):
+ lp->stats.multicast++; break;
+ }
+ }
+ else
+ {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: rx_bm: Received bad packet (length=%d)\n",dev->name,pkt_len);
+#endif
+ if(ptr->skb!=NULL)
+#ifdef LINUX_2_1
+ dev_kfree_skb( ptr->skb );
+#else
+ dev_kfree_skb( ptr->skb, FREE_READ );
+#endif
+ lp->stats.rx_errors++;
+ }
+
+ lp->rxrhead=lp->rxrhead->next;
+
+ /* Allocate a new rx PDL (so lp->rxrcommit stays the same) */
+ if (0 == hp100_build_rx_pdl( lp->rxrtail, dev ))
+ {
+ /* No space for skb, header can still be received. */
+#ifdef HP100_DEBUG
+ printk("hp100: %s: rx_bm: No space for new PDL.\n", dev->name);
+#endif
+ return;
+ }
+ else
+ { /* successfully allocated new PDL - put it in ringlist at tail. */
+ hp100_outl((u32)lp->rxrtail->pdl_paddr, RX_PDA);
+ lp->rxrtail=lp->rxrtail->next;
+ }
+
+ }
+}
+
+
+
+/*
+ * statistics
+ */
+static hp100_stats_t *hp100_get_stats( struct device *dev )
+{
+ int ioaddr = dev->base_addr;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4215, TRACE );
+#endif
+
+ hp100_ints_off();
+ hp100_update_stats( dev );
+ hp100_ints_on();
+ return &((struct hp100_private *)dev->priv)->stats;
+}
+
+static void hp100_update_stats( struct device *dev )
+{
+ int ioaddr = dev->base_addr;
+ u_short val;
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4216, TRACE );
+ printk("hp100: %s: update-stats\n", dev->name);
+#endif
+
+ /* Note: Statistics counters clear when read. */
+ hp100_page( MAC_CTRL );
+ val = hp100_inw( DROPPED ) & 0x0fff;
+ lp->stats.rx_errors += val;
+ lp->stats.rx_over_errors += val;
+ val = hp100_inb( CRC );
+ lp->stats.rx_errors += val;
+ lp->stats.rx_crc_errors += val;
+ val = hp100_inb( ABORT );
+ lp->stats.tx_errors += val;
+ lp->stats.tx_aborted_errors += val;
+ hp100_page( PERFORMANCE );
+}
+
+static void hp100_misc_interrupt( struct device *dev )
+{
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4216, TRACE );
+ printk("hp100: %s: misc_interrupt\n", dev->name);
+#endif
+
+ /* Note: Statistics counters clear when read. */
+ lp->stats.rx_errors++;
+ lp->stats.tx_errors++;
+}
+
+static void hp100_clear_stats( int ioaddr )
+{
+ unsigned long flags;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4217, TRACE );
+ printk("hp100: %s: clear_stats\n", dev->name);
+#endif
+
+ save_flags( flags );
+ cli();
+ hp100_page( MAC_CTRL ); /* get all statistics bytes */
+ hp100_inw( DROPPED );
+ hp100_inb( CRC );
+ hp100_inb( ABORT );
+ hp100_page( PERFORMANCE );
+ restore_flags( flags );
+}
+
+
+/*
+ * multicast setup
+ */
+
+/*
+ * Set or clear the multicast filter for this adapter.
+ */
+
+static void hp100_set_multicast_list( struct device *dev )
+{
+ unsigned long flags;
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4218, TRACE );
+ printk("hp100: %s: set_mc_list\n", dev->name);
+#endif
+
+ save_flags( flags );
+ cli();
+ hp100_ints_off();
+ hp100_page( MAC_CTRL );
+ hp100_andb( ~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1 ); /* stop rx/tx */
+
+ if ( dev->flags & IFF_PROMISC )
+ {
+ lp->mac2_mode = HP100_MAC2MODE6; /* promiscuous mode = get all good */
+ lp->mac1_mode = HP100_MAC1MODE6; /* packets on the net */
+ memset( &lp->hash_bytes, 0xff, 8 );
+ }
+ else if ( dev->mc_count || (dev->flags&IFF_ALLMULTI) )
+ {
+ lp->mac2_mode = HP100_MAC2MODE5; /* multicast mode = get packets for */
+ lp->mac1_mode = HP100_MAC1MODE5; /* me, broadcasts and all multicasts */
+#ifdef HP100_MULTICAST_FILTER /* doesn't work!!! */
+ if ( dev -> flags & IFF_ALLMULTI )
+ {
+ /* set hash filter to receive all multicast packets */
+ memset( &lp->hash_bytes, 0xff, 8 );
+ }
+ else
+ {
+ int i, j, idx;
+ u_char *addrs;
+ struct dev_mc_list *dmi;
+
+ memset( &lp->hash_bytes, 0x00, 8 );
+#ifdef HP100_DEBUG
+ printk("hp100: %s: computing hash filter - mc_count = %i\n", dev -> name, dev -> mc_count );
+#endif
+ for ( i = 0, dmi = dev -> mc_list; i < dev -> mc_count; i++, dmi = dmi -> next )
+ {
+ addrs = dmi -> dmi_addr;
+ if ( ( *addrs & 0x01 ) == 0x01 ) /* multicast address? */
+ {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: multicast = %02x:%02x:%02x:%02x:%02x:%02x, ",
+ dev -> name,
+ addrs[ 0 ], addrs[ 1 ], addrs[ 2 ],
+ addrs[ 3 ], addrs[ 4 ], addrs[ 5 ] );
+#endif
+ for ( j = idx = 0; j < 6; j++ )
+ {
+ idx ^= *addrs++ & 0x3f;
+ printk( ":%02x:", idx );
+ }
+#ifdef HP100_DEBUG
+ printk("idx = %i\n", idx );
+#endif
+ lp->hash_bytes[ idx >> 3 ] |= ( 1 << ( idx & 7 ) );
+ }
+ }
+ }
+#else
+ memset( &lp->hash_bytes, 0xff, 8 );
+#endif
+ }
+ else
+ {
+ lp->mac2_mode = HP100_MAC2MODE3; /* normal mode = get packets for me */
+ lp->mac1_mode = HP100_MAC1MODE3; /* and broadcasts */
+ memset( &lp->hash_bytes, 0x00, 8 );
+ }
+
+ if ( ( (hp100_inb(MAC_CFG_1) & 0x0f)!=lp->mac1_mode ) ||
+ ( hp100_inb(MAC_CFG_2)!=lp->mac2_mode ) )
+ {
+ int i;
+
+ hp100_outb( lp->mac2_mode, MAC_CFG_2 );
+ hp100_andb( HP100_MAC1MODEMASK, MAC_CFG_1 ); /* clear mac1 mode bits */
+ hp100_orb( lp->mac1_mode, MAC_CFG_1 ); /* and set the new mode */
+
+ hp100_page( MAC_ADDRESS );
+ for ( i = 0; i < 8; i++ )
+ hp100_outb( lp->hash_bytes[ i ], HASH_BYTE0 + i );
+#ifdef HP100_DEBUG
+ printk("hp100: %s: mac1 = 0x%x, mac2 = 0x%x, multicast hash = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ dev->name, lp->mac1_mode, lp->mac2_mode,
+ lp->hash_bytes[ 0 ], lp->hash_bytes[ 1 ],
+ lp->hash_bytes[ 2 ], lp->hash_bytes[ 3 ],
+ lp->hash_bytes[ 4 ], lp->hash_bytes[ 5 ],
+ lp->hash_bytes[ 6 ], lp->hash_bytes[ 7 ]
+ );
+#endif
+
+ if(lp->lan_type==HP100_LAN_100)
+ {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: 100VG MAC settings have changed - relogin.\n", dev->name);
+#endif
+ lp->hub_status=hp100_login_to_vg_hub( dev, TRUE ); /* force a relogin to the hub */
+ }
+ }
+ else
+ {
+ int i;
+ u_char old_hash_bytes[ 8 ];
+
+ hp100_page( MAC_ADDRESS );
+ for ( i = 0; i < 8; i++ )
+ old_hash_bytes[ i ] = hp100_inb( HASH_BYTE0 + i );
+ if ( memcmp( old_hash_bytes, &lp->hash_bytes, 8 ) )
+ {
+ for ( i = 0; i < 8; i++ )
+ hp100_outb( lp->hash_bytes[ i ], HASH_BYTE0 + i );
+#ifdef HP100_DEBUG
+ printk("hp100: %s: multicast hash = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ dev->name,
+ lp->hash_bytes[ 0 ], lp->hash_bytes[ 1 ],
+ lp->hash_bytes[ 2 ], lp->hash_bytes[ 3 ],
+ lp->hash_bytes[ 4 ], lp->hash_bytes[ 5 ],
+ lp->hash_bytes[ 6 ], lp->hash_bytes[ 7 ]
+ );
+#endif
+
+ if(lp->lan_type==HP100_LAN_100)
+ {
+#ifdef HP100_DEBUG
+ printk("hp100: %s: 100VG MAC settings have changed - relogin.\n", dev->name);
+#endif
+ lp->hub_status=hp100_login_to_vg_hub( dev, TRUE ); /* force a relogin to the hub */
+ }
+ }
+ }
+
+ hp100_page( MAC_CTRL );
+ hp100_orb( HP100_RX_EN | HP100_RX_IDLE | /* enable rx */
+ HP100_TX_EN | HP100_TX_IDLE, MAC_CFG_1 ); /* enable tx */
+
+ hp100_page( PERFORMANCE );
+ hp100_ints_on();
+ restore_flags( flags );
+}
+
+
+/*
+ * hardware interrupt handling
+ */
+
+static void hp100_interrupt( int irq, void *dev_id, struct pt_regs *regs )
+{
+ struct device *dev = (struct device *)dev_id;
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+
+ int ioaddr;
+ u_int val;
+
+ if ( dev == NULL ) return;
+ ioaddr = dev->base_addr;
+
+ if ( dev->interrupt )
+ printk( "hp100: %s: re-entering the interrupt handler\n", dev->name );
+ hp100_ints_off();
+ dev->interrupt = 1; /* mark that we are inside the handler */
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4219, TRACE );
+#endif
+
+ /* hp100_page( PERFORMANCE ); */
+ val = hp100_inw( IRQ_STATUS );
+#ifdef HP100_DEBUG_IRQ
+ printk( "hp100: %s: mode=%x,IRQ_STAT=0x%.4x,RXPKTCNT=0x%.2x RXPDL=0x%.2x TXPKTCNT=0x%.2x TXPDL=0x%.2x\n",
+ dev->name,
+ lp->mode,
+ (u_int)val,
+ hp100_inb( RX_PKT_CNT ),
+ hp100_inb( RX_PDL ),
+ hp100_inb( TX_PKT_CNT ),
+ hp100_inb( TX_PDL )
+ );
+#endif
+
+ if(val==0) /* might be a shared interrupt */
+ {
+ dev->interrupt=0;
+ hp100_ints_on();
+ return;
+ }
+ /* We're only interested in those interrupts we really enabled. */
+ /* val &= hp100_inw( IRQ_MASK ); */
+
+ /*
+ * RX_PDL_FILL_COMPL is set whenever a RX_PDL has been executed. A RX_PDL
+ * is considered executed whenever the RX_PDL data structure is no longer
+ * needed.
+ */
+ if ( val & HP100_RX_PDL_FILL_COMPL )
+ {
+ if(lp->mode==1)
+ hp100_rx_bm( dev );
+ else
+ {
+ printk("hp100: %s: rx_pdl_fill_compl interrupt although not busmaster?\n", dev->name);
+ }
+ }
+
+ /*
+ * The RX_PACKET interrupt is set, when the receive packet counter is
+ * non zero. We use this interrupt for receiving in slave mode. In
+ * busmaster mode, we use it to make sure we did not miss any rx_pdl_fill
+ * interrupts. If rx_pdl_fill_compl is not set and rx_packet is set, then
+ * we somehow have missed a rx_pdl_fill_compl interrupt.
+ */
+
+ if ( val & HP100_RX_PACKET ) /* Receive Packet Counter is non zero */
+ {
+ if(lp->mode!=1) /* non busmaster */
+ hp100_rx( dev );
+ else if ( !(val & HP100_RX_PDL_FILL_COMPL ))
+ {
+ /* Shouldnt happen - maybe we missed a RX_PDL_FILL Interrupt? */
+ hp100_rx_bm( dev );
+ }
+ }
+
+ /*
+ * Ack. that we have noticed the interrupt and thereby allow next one.
+ * Note that this is now done after the slave rx function, since first
+ * acknowledging and then setting ADV_NXT_PKT caused an extra interrupt
+ * on the J2573.
+ */
+ hp100_outw( val, IRQ_STATUS );
+
+ /*
+ * RX_ERROR is set when a packet is dropped due to no memory resources on
+ * the card or when a RCV_ERR occurs.
+ * TX_ERROR is set when a TX_ABORT condition occurs in the MAC->exists
+ * only in the 802.3 MAC and happens when 16 collisions occur during a TX
+ */
+ if ( val & ( HP100_TX_ERROR | HP100_RX_ERROR ) )
+ {
+#ifdef HP100_DEBUG_IRQ
+ printk("hp100: %s: TX/RX Error IRQ\n", dev->name);
+#endif
+ hp100_update_stats( dev );
+ if(lp->mode==1)
+ {
+ hp100_rxfill( dev );
+ hp100_clean_txring( dev );
+ }
+ }
+
+ /*
+ * RX_PDA_ZERO is set when the PDA count goes from non-zero to zero.
+ */
+ if ( (lp->mode==1)&&(val &(HP100_RX_PDA_ZERO)) )
+ hp100_rxfill( dev );
+
+ /*
+ * HP100_TX_COMPLETE interrupt occurs when packet transmitted on wire
+ * is completed
+ */
+ if ( (lp->mode==1) && ( val & ( HP100_TX_COMPLETE )) )
+ hp100_clean_txring( dev );
+
+ /*
+ * MISC_ERROR is set when either the LAN link goes down or a detected
+ * bus error occurs.
+ */
+ if ( val & HP100_MISC_ERROR ) /* New for J2585B */
+ {
+#ifdef HP100_DEBUG_IRQ
+ printk("hp100: %s: Misc. Error Interrupt - Check cabling.\n", dev->name);
+#endif
+ if(lp->mode==1)
+ {
+ hp100_clean_txring( dev );
+ hp100_rxfill( dev );
+ }
+ hp100_misc_interrupt( dev );
+ }
+
+ dev->interrupt = 0;
+ hp100_ints_on();
+}
+
+
+/*
+ * some misc functions
+ */
+
+static void hp100_start_interface( struct device *dev )
+{
+ unsigned long flags;
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4220, TRACE );
+ printk("hp100: %s: hp100_start_interface\n",dev->name);
+#endif
+
+ save_flags( flags );
+ cli();
+
+ /* Ensure the adapter does not want to request an interrupt when */
+ /* enabling the IRQ line to be active on the bus (i.e. not tri-stated) */
+ hp100_page( PERFORMANCE );
+ hp100_outw( 0xfefe, IRQ_MASK ); /* mask off all ints */
+ hp100_outw( 0xffff, IRQ_STATUS ); /* ack all IRQs */
+ hp100_outw( HP100_FAKE_INT|HP100_INT_EN|HP100_RESET_LB, OPTION_LSW);
+ /* Un Tri-state int. TODO: Check if shared interrupts can be realised? */
+ hp100_outw( HP100_TRI_INT | HP100_RESET_HB, OPTION_LSW );
+
+ if(lp->mode==1)
+ {
+ /* Make sure BM bit is set... */
+ hp100_page(HW_MAP);
+ hp100_orb( HP100_BM_MASTER, BM );
+ hp100_rxfill( dev );
+ }
+ else if(lp->mode==2)
+ {
+ /* Enable memory mapping. Note: Don't do this when busmaster. */
+ hp100_outw( HP100_MMAP_DIS | HP100_RESET_HB, OPTION_LSW );
+ }
+
+ hp100_page(PERFORMANCE);
+ hp100_outw( 0xfefe, IRQ_MASK ); /* mask off all ints */
+ hp100_outw( 0xffff, IRQ_STATUS ); /* ack IRQ */
+
+ /* enable a few interrupts: */
+ if(lp->mode==1) /* busmaster mode */
+ {
+ hp100_outw( HP100_RX_PDL_FILL_COMPL |
+ HP100_RX_PDA_ZERO |
+ HP100_RX_ERROR |
+ /* HP100_RX_PACKET | */
+ /* HP100_RX_EARLY_INT | */ HP100_SET_HB |
+ /* HP100_TX_PDA_ZERO | */
+ HP100_TX_COMPLETE |
+ /* HP100_MISC_ERROR | */
+ HP100_TX_ERROR | HP100_SET_LB, IRQ_MASK );
+ }
+ else
+ {
+ hp100_outw( HP100_RX_PACKET |
+ HP100_RX_ERROR | HP100_SET_HB |
+ HP100_TX_ERROR | HP100_SET_LB , IRQ_MASK );
+ }
+
+ /* Enable MAC Tx and RX, set MAC modes, ... */
+ hp100_set_multicast_list( dev );
+
+ restore_flags( flags );
+}
+
+
+static void hp100_stop_interface( struct device *dev )
+{
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ u_int val;
+
+#ifdef HP100_DEBUG_B
+ printk("hp100: %s: hp100_stop_interface\n",dev->name);
+ hp100_outw( 0x4221, TRACE );
+#endif
+
+ if (lp->mode==1)
+ hp100_BM_shutdown( dev );
+ else
+ {
+ /* Note: MMAP_DIS will be reenabled by start_interface */
+ hp100_outw( HP100_INT_EN | HP100_RESET_LB |
+ HP100_TRI_INT | HP100_MMAP_DIS | HP100_SET_HB, OPTION_LSW );
+ val = hp100_inw( OPTION_LSW );
+
+ hp100_page( MAC_CTRL );
+ hp100_andb( ~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1 );
+
+ if ( !(val & HP100_HW_RST) ) return; /* If reset, imm. return ... */
+ /* ... else: busy wait until idle */
+ for ( val = 0; val < 6000; val++ )
+ if ( ( hp100_inb( MAC_CFG_1 ) & (HP100_TX_IDLE | HP100_RX_IDLE) ) ==
+ (HP100_TX_IDLE | HP100_RX_IDLE) )
+ {
+ hp100_page(PERFORMANCE);
+ return;
+ }
+ printk( "hp100: %s: hp100_stop_interface - timeout\n", dev->name );
+ hp100_page(PERFORMANCE);
+ }
+}
+
+
+static void hp100_load_eeprom( struct device *dev, u_short probe_ioaddr )
+{
+ int i;
+ int ioaddr = probe_ioaddr > 0 ? probe_ioaddr : dev->base_addr;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4222, TRACE );
+#endif
+
+ hp100_page( EEPROM_CTRL );
+ hp100_andw( ~HP100_EEPROM_LOAD, EEPROM_CTRL );
+ hp100_orw( HP100_EEPROM_LOAD, EEPROM_CTRL );
+ for ( i = 0; i < 10000; i++ )
+ if ( !( hp100_inb( OPTION_MSW ) & HP100_EE_LOAD ) ) return;
+ printk( "hp100: %s: hp100_load_eeprom - timeout\n", dev->name );
+}
+
+
+/* Sense connection status.
+ * return values: LAN_10 - Connected to 10Mbit/s network
+ * LAN_100 - Connected to 100Mbit/s network
+ * LAN_ERR - not connected or 100Mbit/s Hub down
+ */
+static int hp100_sense_lan( struct device *dev )
+{
+ int ioaddr = dev->base_addr;
+ u_short val_VG, val_10;
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4223, TRACE );
+#endif
+
+ hp100_page( MAC_CTRL );
+ val_10 = hp100_inb( 10_LAN_CFG_1 );
+ val_VG = hp100_inb( VG_LAN_CFG_1 );
+ hp100_page( PERFORMANCE );
+#ifdef HP100_DEBUG
+ printk( "hp100: %s: sense_lan: val_VG = 0x%04x, val_10 = 0x%04x\n", dev->name, val_VG, val_10 );
+#endif
+
+ if ( val_10 & HP100_LINK_BEAT_ST ) /* 10Mb connection is active */
+ return HP100_LAN_10;
+
+ if ( val_10 & HP100_AUI_ST ) /* have we BNC or AUI onboard? */
+ {
+ val_10 |= HP100_AUI_SEL | HP100_LOW_TH;
+ hp100_page( MAC_CTRL );
+ hp100_outb( val_10, 10_LAN_CFG_1 );
+ hp100_page( PERFORMANCE );
+ return HP100_LAN_10;
+ }
+
+ if ( (lp->id->id == 0x02019F022) ||
+ (lp->id->id == 0x01042103c) ||
+ (lp->id->id == 0x01040103c) )
+ return HP100_LAN_ERR; /* Those cards don't have a 100 Mbit connector */
+
+ if ( val_VG & HP100_LINK_CABLE_ST ) /* Can hear the HUBs tone. */
+ return HP100_LAN_100;
+ return HP100_LAN_ERR;
+}
+
+
+
+static int hp100_down_vg_link( struct device *dev )
+{
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ unsigned long time;
+ long savelan, newlan;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4224, TRACE );
+ printk("hp100: %s: down_vg_link\n", dev->name);
+#endif
+
+ hp100_page( MAC_CTRL );
+ time=jiffies+(HZ/4);
+ do{
+ if ( hp100_inb( VG_LAN_CFG_1 ) & HP100_LINK_CABLE_ST ) break;
+ } while (time>jiffies);
+
+ if ( jiffies >= time ) /* no signal->no logout */
+ return 0;
+
+ /* Drop the VG Link by clearing the link up cmd and load addr.*/
+
+ hp100_andb( ~( HP100_LOAD_ADDR| HP100_LINK_CMD), VG_LAN_CFG_1);
+ hp100_orb( HP100_VG_SEL, VG_LAN_CFG_1);
+
+ /* Conditionally stall for >250ms on Link-Up Status (to go down) */
+ time=jiffies+(HZ/2);
+ do{
+ if ( !(hp100_inb( VG_LAN_CFG_1) & HP100_LINK_UP_ST) ) break;
+ } while(time>jiffies);
+
+#ifdef HP100_DEBUG
+ if (jiffies>=time)
+ printk("hp100: %s: down_vg_link: Link does not go down?\n", dev->name);
+#endif
+
+ /* To prevent condition where Rev 1 VG MAC and old hubs do not complete */
+ /* logout under traffic (even though all the status bits are cleared), */
+ /* do this workaround to get the Rev 1 MAC in its idle state */
+ if ( lp->chip==HP100_CHIPID_LASSEN )
+ {
+ /* Reset VG MAC to insure it leaves the logoff state even if */
+ /* the Hub is still emitting tones */
+ hp100_andb(~HP100_VG_RESET, VG_LAN_CFG_1);
+ udelay(1500); /* wait for >1ms */
+ hp100_orb(HP100_VG_RESET, VG_LAN_CFG_1); /* Release Reset */
+ udelay(1500);
+ }
+
+ /* New: For lassen, switch to 10 Mbps mac briefly to clear training ACK */
+ /* to get the VG mac to full reset. This is not req.d with later chips */
+ /* Note: It will take the between 1 and 2 seconds for the VG mac to be */
+ /* selected again! This will be left to the connect hub function to */
+ /* perform if desired. */
+ if (lp->chip==HP100_CHIPID_LASSEN)
+ {
+ /* Have to write to 10 and 100VG control registers simultaneously */
+ savelan=newlan=hp100_inl(10_LAN_CFG_1); /* read 10+100 LAN_CFG regs */
+ newlan &= ~(HP100_VG_SEL<<16);
+ newlan |= (HP100_DOT3_MAC)<<8;
+ hp100_andb( ~HP100_AUTO_MODE, MAC_CFG_3); /* Autosel off */
+ hp100_outl(newlan, 10_LAN_CFG_1);
+
+ /* Conditionally stall for 5sec on VG selected. */
+ time=jiffies+(HZ*5);
+ do{
+ if( !(hp100_inb(MAC_CFG_4) & HP100_MAC_SEL_ST) ) break;
+ } while(time>jiffies);
+
+ hp100_orb( HP100_AUTO_MODE, MAC_CFG_3); /* Autosel back on */
+ hp100_outl(savelan, 10_LAN_CFG_1);
+ }
+
+ time=jiffies+(3*HZ); /* Timeout 3s */
+ do {
+ if ( (hp100_inb( VG_LAN_CFG_1 )&HP100_LINK_CABLE_ST) == 0) break;
+ } while (time>jiffies);
+
+ if(time<=jiffies)
+ {
+#ifdef HP100_DEBUG
+ printk( "hp100: %s: down_vg_link: timeout\n", dev->name );
+#endif
+ return -EIO;
+ }
+
+ time=jiffies+(2*HZ); /* This seems to take a while.... */
+ do {} while (time>jiffies);
+
+ return 0;
+}
+
+
+static int hp100_login_to_vg_hub( struct device *dev, u_short force_relogin )
+{
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+ u_short val=0;
+ unsigned long time;
+ int startst;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4225, TRACE );
+ printk("hp100: %s: login_to_vg_hub\n", dev->name);
+#endif
+
+ /* Initiate a login sequence iff VG MAC is enabled and either Load Address
+ * bit is zero or the force relogin flag is set (e.g. due to MAC address or
+ * promiscuous mode change)
+ */
+ hp100_page( MAC_CTRL );
+ startst=hp100_inb( VG_LAN_CFG_1 );
+ if((force_relogin==TRUE)||(hp100_inb( MAC_CFG_4 )&HP100_MAC_SEL_ST))
+ {
+#ifdef HP100_DEBUG_TRAINING
+ printk("hp100: %s: Start training\n", dev->name);
+#endif
+
+ /* Ensure VG Reset bit is 1 (i.e., do not reset)*/
+ hp100_orb( HP100_VG_RESET , VG_LAN_CFG_1 );
+
+ /* If Lassen AND auto-select-mode AND VG tones were sensed on */
+ /* entry then temporarily put them into force 100Mbit mode */
+ if((lp->chip==HP100_CHIPID_LASSEN)&&( startst & HP100_LINK_CABLE_ST ) )
+ hp100_andb( ~HP100_DOT3_MAC, 10_LAN_CFG_2 );
+
+ /* Drop the VG link by zeroing Link Up Command and Load Address */
+ hp100_andb( ~(HP100_LINK_CMD/* |HP100_LOAD_ADDR */), VG_LAN_CFG_1);
+
+#ifdef HP100_DEBUG_TRAINING
+ printk("hp100: %s: Bring down the link\n", dev->name);
+#endif
+
+ /* Wait for link to drop */
+ time = jiffies + (HZ/10);
+ do {
+ if (~(hp100_inb( VG_LAN_CFG_1 )& HP100_LINK_UP_ST) ) break;
+ } while (time>jiffies);
+
+ /* Start an addressed training and optionally request promiscuous port */
+ if ( (dev->flags) & IFF_PROMISC )
+ {
+ hp100_orb( HP100_PROM_MODE, VG_LAN_CFG_2);
+ if(lp->chip==HP100_CHIPID_LASSEN)
+ hp100_orw( HP100_MACRQ_PROMSC, TRAIN_REQUEST );
+ }
+ else
+ {
+ hp100_andb( ~HP100_PROM_MODE, VG_LAN_CFG_2);
+ /* For ETR parts we need to reset the prom. bit in the training
+ * register, otherwise promiscious mode won't be disabled.
+ */
+ if(lp->chip==HP100_CHIPID_LASSEN)
+ {
+ hp100_andw( ~HP100_MACRQ_PROMSC, TRAIN_REQUEST );
+ }
+ }
+
+ /* With ETR parts, frame format request bits can be set. */
+ if(lp->chip==HP100_CHIPID_LASSEN)
+ hp100_orb( HP100_MACRQ_FRAMEFMT_EITHER, TRAIN_REQUEST);
+
+ hp100_orb( HP100_LINK_CMD|HP100_LOAD_ADDR|HP100_VG_RESET, VG_LAN_CFG_1);
+
+ /* Note: Next wait could be omitted for Hood and earlier chips under */
+ /* certain circumstances */
+ /* TODO: check if hood/earlier and skip wait. */
+
+ /* Wait for either short timeout for VG tones or long for login */
+ /* Wait for the card hardware to signalise link cable status ok... */
+ hp100_page( MAC_CTRL );
+ time = jiffies + ( 1*HZ ); /* 1 sec timeout for cable st */
+ do {
+ if ( hp100_inb( VG_LAN_CFG_1 ) & HP100_LINK_CABLE_ST ) break;
+ } while ( jiffies < time );
+
+ if ( jiffies >= time )
+ {
+#ifdef HP100_DEBUG_TRAINING
+ printk( "hp100: %s: Link cable status not ok? Training aborted.\n", dev->name );
+#endif
+ }
+ else
+ {
+#ifdef HP100_DEBUG_TRAINING
+ printk( "hp100: %s: HUB tones detected. Trying to train.\n", dev->name);
+#endif
+
+ time = jiffies + ( 2*HZ ); /* again a timeout */
+ do {
+ val = hp100_inb( VG_LAN_CFG_1 );
+ if ( (val & ( HP100_LINK_UP_ST )) )
+ {
+#ifdef HP100_DEBUG_TRAINING
+ printk( "hp100: %s: Passed training.\n", dev->name);
+#endif
+ break;
+ }
+ } while ( time > jiffies );
+ }
+
+ /* If LINK_UP_ST is set, then we are logged into the hub. */
+ if ( (jiffies<=time) && (val & HP100_LINK_UP_ST) )
+ {
+#ifdef HP100_DEBUG_TRAINING
+ printk( "hp100: %s: Successfully logged into the HUB.\n", dev->name);
+ if(lp->chip==HP100_CHIPID_LASSEN)
+ {
+ val = hp100_inw(TRAIN_ALLOW);
+ printk( "hp100: %s: Card supports 100VG MAC Version \"%s\" ",
+ dev->name,(hp100_inw(TRAIN_REQUEST)&HP100_CARD_MACVER) ? "802.12" : "Pre");
+ printk( "Driver will use MAC Version \"%s\"\n",
+ ( val & HP100_HUB_MACVER) ? "802.12" : "Pre" );
+ printk( "hp100: %s: Frame format is %s.\n",dev->name,(val&HP100_MALLOW_FRAMEFMT)?"802.5":"802.3");
+ }
+#endif
+ }
+ else
+ {
+ /* If LINK_UP_ST is not set, login was not successful */
+ printk("hp100: %s: Problem logging into the HUB.\n",dev->name);
+ if(lp->chip==HP100_CHIPID_LASSEN)
+ {
+ /* Check allowed Register to find out why there is a problem. */
+ val = hp100_inw( TRAIN_ALLOW ); /* wont work on non-ETR card */
+#ifdef HP100_DEBUG_TRAINING
+ printk("hp100: %s: MAC Configuration requested: 0x%04x, HUB allowed: 0x%04x\n", dev->name, hp100_inw(TRAIN_REQUEST), val);
+#endif
+ if ( val & HP100_MALLOW_ACCDENIED )
+ printk("hp100: %s: HUB access denied.\n", dev->name);
+ if ( val & HP100_MALLOW_CONFIGURE )
+ printk("hp100: %s: MAC Configuration is incompatible with the Network.\n", dev->name);
+ if ( val & HP100_MALLOW_DUPADDR )
+ printk("hp100: %s: Duplicate MAC Address on the Network.\n", dev->name);
+ }
+ }
+
+ /* If we have put the chip into forced 100 Mbit mode earlier, go back */
+ /* to auto-select mode */
+
+ if( (lp->chip==HP100_CHIPID_LASSEN)&&(startst & HP100_LINK_CABLE_ST) )
+ {
+ hp100_page( MAC_CTRL );
+ hp100_orb( HP100_DOT3_MAC, 10_LAN_CFG_2 );
+ }
+
+ val=hp100_inb(VG_LAN_CFG_1);
+
+ /* Clear the MISC_ERROR Interrupt, which might be generated when doing the relogin */
+ hp100_page(PERFORMANCE);
+ hp100_outw( HP100_MISC_ERROR, IRQ_STATUS);
+
+ if (val&HP100_LINK_UP_ST)
+ return(0); /* login was ok */
+ else
+ {
+ printk("hp100: %s: Training failed.\n", dev->name);
+ hp100_down_vg_link( dev );
+ return -EIO;
+ }
+ }
+ /* no forced relogin & already link there->no training. */
+ return -EIO;
+}
+
+
+static void hp100_cascade_reset( struct device *dev, u_short enable )
+{
+ int ioaddr = dev->base_addr;
+ struct hp100_private *lp = (struct hp100_private *)dev->priv;
+ int i;
+
+#ifdef HP100_DEBUG_B
+ hp100_outw( 0x4226, TRACE );
+ printk("hp100: %s: cascade_reset\n", dev->name);
+#endif
+
+ if (enable==TRUE)
+ {
+ hp100_outw( HP100_HW_RST | HP100_RESET_LB, OPTION_LSW );
+ if(lp->chip==HP100_CHIPID_LASSEN)
+ {
+ /* Lassen requires a PCI transmit fifo reset */
+ hp100_page( HW_MAP );
+ hp100_andb( ~HP100_PCI_RESET, PCICTRL2 );
+ hp100_orb( HP100_PCI_RESET, PCICTRL2 );
+ /* Wait for min. 300 ns */
+ /* we cant use jiffies here, because it may be */
+ /* that we have disabled the timer... */
+ for (i=0; i<0xffff; i++);
+ hp100_andb( ~HP100_PCI_RESET, PCICTRL2 );
+ hp100_page( PERFORMANCE );
+ }
+ }
+ else
+ { /* bring out of reset */
+ hp100_outw(HP100_HW_RST|HP100_SET_LB, OPTION_LSW);
+ for (i=0; i<0xffff; i++ );
+ hp100_page(PERFORMANCE);
+ }
+}
+
+#ifdef HP100_DEBUG
+void hp100_RegisterDump( struct device *dev )
+{
+ int ioaddr=dev->base_addr;
+ int Page;
+ int Register;
+
+ /* Dump common registers */
+ printk("hp100: %s: Cascade Register Dump\n", dev->name);
+ printk("hardware id #1: 0x%.2x\n",hp100_inb(HW_ID));
+ printk("hardware id #2/paging: 0x%.2x\n",hp100_inb(PAGING));
+ printk("option #1: 0x%.4x\n",hp100_inw(OPTION_LSW));
+ printk("option #2: 0x%.4x\n",hp100_inw(OPTION_MSW));
+
+ /* Dump paged registers */
+ for (Page = 0; Page < 8; Page++)
+ {
+ /* Dump registers */
+ printk("page: 0x%.2x\n",Page);
+ outw( Page, ioaddr+0x02);
+ for (Register = 0x8; Register < 0x22; Register += 2)
+ {
+ /* Display Register contents except data port */
+ if (((Register != 0x10) && (Register != 0x12)) || (Page > 0))
+ {
+ printk("0x%.2x = 0x%.4x\n",Register,inw(ioaddr+Register));
+ }
+ }
+ }
+ hp100_page(PERFORMANCE);
+}
+#endif
+
+
+
+/*
+ * module section
+ */
+
+#ifdef MODULE
+
+/* Parameters set by insmod */
+int hp100_port[5] = { 0, -1, -1, -1, -1 };
+#ifdef LINUX_2_1
+MODULE_PARM(hp100_port, "1-5i");
+#endif
+
+#ifdef LINUX_2_1
+char hp100_name[5][IFNAMSIZ] = { "", "", "", "", "" };
+MODULE_PARM(hp100_name, "1-5c" __MODULE_STRING(IFNAMSIZ));
+#else
+static char devname[5][IFNAMSIZ] = { "", "", "", "", "" };
+static char *hp100_name[5] = { devname[0], devname[1],
+ devname[2], devname[3],
+ devname[4] };
+#endif
+
+/* List of devices */
+static struct device *hp100_devlist[5] = { NULL, NULL, NULL, NULL, NULL };
+
+/*
+ * Note: if you have more than five 100vg cards in your pc, feel free to
+ * increase this value
+ */
+
+/*
+ * Note: to register three eisa or pci devices, use:
+ * option hp100 hp100_port=0,0,0
+ * to register one card at io 0x280 as eth239, use:
+ * option hp100 hp100_port=0x280 hp100_name=eth239
+ */
+
+int init_module( void )
+{
+ int i, cards;
+
+ if (hp100_port == 0 && !EISA_bus && !pcibios_present())
+ printk("hp100: You should not use auto-probing with insmod!\n");
+
+ /* Loop on all possible base addresses */
+ i = -1; cards = 0;
+ while((hp100_port[++i] != -1) && (i < 5))
+ {
+ /* Create device and set basics args */
+ hp100_devlist[i] = kmalloc(sizeof(struct device), GFP_KERNEL);
+ memset(hp100_devlist[i], 0x00, sizeof(struct device));
+ hp100_devlist[i]->name = hp100_name[i];
+ hp100_devlist[i]->base_addr = hp100_port[i];
+ hp100_devlist[i]->init = &hp100_probe;
+
+ /* Try to create the device */
+ if(register_netdev(hp100_devlist[i]) != 0)
+ {
+ /* DeAllocate everything */
+ /* Note: if dev->priv is mallocated, there is no way to fail */
+ kfree_s(hp100_devlist[i], sizeof(struct device));
+ hp100_devlist[i] = (struct device *) NULL;
+ }
+ else
+ cards++;
+ } /* Loop over all devices */
+
+ return cards > 0 ? 0 : -ENODEV;
+}
+
+void cleanup_module( void )
+{
+ int i;
+
+ /* TODO: Check if all skb's are released/freed. */
+ for(i = 0; i < 5; i++)
+ if(hp100_devlist[i] != (struct device *) NULL)
+ {
+ unregister_netdev( hp100_devlist[i] );
+ release_region( hp100_devlist[i]->base_addr, HP100_REGION_SIZE );
+ if( ((struct hp100_private *)hp100_devlist[i]->priv)->mode==1 ) /* busmaster */
+ kfree_s( ((struct hp100_private *)hp100_devlist[i]->priv)->page_vaddr, MAX_RINGSIZE+0x0f);
+ if ( ((struct hp100_private *)hp100_devlist[i]->priv) -> mem_ptr_virt )
+ iounmap( ((struct hp100_private *)hp100_devlist[i]->priv) -> mem_ptr_virt );
+ kfree_s( hp100_devlist[i]->priv, sizeof( struct hp100_private ) );
+ hp100_devlist[i]->priv = NULL;
+ kfree_s(hp100_devlist[i], sizeof(struct device));
+ hp100_devlist[i] = (struct device *) NULL;
+ }
+}
+
+#endif /* MODULE */
+
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c hp100.c"
+ * c-indent-level: 2
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/net/hp100.h b/linux/src/drivers/net/hp100.h
new file mode 100644
index 0000000..e1884aa
--- /dev/null
+++ b/linux/src/drivers/net/hp100.h
@@ -0,0 +1,626 @@
+/*
+ * hp100.h: Hewlett Packard HP10/100VG ANY LAN ethernet driver for Linux.
+ *
+ * $Id: hp100.h,v 1.1 1999/04/26 05:52:20 tb Exp $
+ *
+ * Authors: Jaroslav Kysela, <perex@pf.jcu.cz>
+ * Siegfried Loeffler <floeff@tunix.mathematik.uni-stuttgart.de>
+ *
+ * This driver is based on the 'hpfepkt' crynwr packet driver.
+ *
+ * This source/code is public free; you can distribute it and/or modify
+ * it under terms of the GNU General Public License (published by the
+ * Free Software Foundation) either version two of this License, or any
+ * later version.
+ */
+
+/****************************************************************************
+ * Hardware Constants
+ ****************************************************************************/
+
+/*
+ * Page Identifiers
+ * (Swap Paging Register, PAGING, bits 3:0, Offset 0x02)
+ */
+
+#define HP100_PAGE_PERFORMANCE 0x0 /* Page 0 */
+#define HP100_PAGE_MAC_ADDRESS 0x1 /* Page 1 */
+#define HP100_PAGE_HW_MAP 0x2 /* Page 2 */
+#define HP100_PAGE_EEPROM_CTRL 0x3 /* Page 3 */
+#define HP100_PAGE_MAC_CTRL 0x4 /* Page 4 */
+#define HP100_PAGE_MMU_CFG 0x5 /* Page 5 */
+#define HP100_PAGE_ID_MAC_ADDR 0x6 /* Page 6 */
+#define HP100_PAGE_MMU_POINTER 0x7 /* Page 7 */
+
+
+/* Registers that are present on all pages */
+
+#define HP100_REG_HW_ID 0x00 /* R: (16) Unique card ID */
+#define HP100_REG_TRACE 0x00 /* W: (16) Used for debug output */
+#define HP100_REG_PAGING 0x02 /* R: (16),15:4 Card ID */
+ /* W: (16),3:0 Switch pages */
+#define HP100_REG_OPTION_LSW 0x04 /* RW: (16) Select card functions */
+#define HP100_REG_OPTION_MSW 0x06 /* RW: (16) Select card functions */
+
+/* Page 0 - Performance */
+
+#define HP100_REG_IRQ_STATUS 0x08 /* RW: (16) Which ints are pending */
+#define HP100_REG_IRQ_MASK 0x0a /* RW: (16) Select ints to allow */
+#define HP100_REG_FRAGMENT_LEN 0x0c /* W: (16)12:0 Current fragment len */
+/* Note: For 32 bit systems, fragment len and offset registers are available */
+/* at offset 0x28 and 0x2c, where they can be written as 32bit values. */
+#define HP100_REG_OFFSET 0x0e /* RW: (16)12:0 Offset to start read */
+#define HP100_REG_DATA32 0x10 /* RW: (32) I/O mode data port */
+#define HP100_REG_DATA16 0x12 /* RW: WORDs must be read from here */
+#define HP100_REG_TX_MEM_FREE 0x14 /* RD: (32) Amount of free Tx mem */
+#define HP100_REG_TX_PDA_L 0x14 /* W: (32) BM: Ptr to PDL, Low Pri */
+#define HP100_REG_TX_PDA_H 0x1c /* W: (32) BM: Ptr to PDL, High Pri */
+#define HP100_REG_RX_PKT_CNT 0x18 /* RD: (8) Rx count of pkts on card */
+#define HP100_REG_TX_PKT_CNT 0x19 /* RD: (8) Tx count of pkts on card */
+#define HP100_REG_RX_PDL 0x1a /* R: (8) BM: # rx pdl not executed */
+#define HP100_REG_TX_PDL 0x1b /* R: (8) BM: # tx pdl not executed */
+#define HP100_REG_RX_PDA 0x18 /* W: (32) BM: Up to 31 addresses */
+ /* which point to a PDL */
+#define HP100_REG_SL_EARLY 0x1c /* (32) Enhanced Slave Early Rx */
+#define HP100_REG_STAT_DROPPED 0x20 /* R (12) Dropped Packet Counter */
+#define HP100_REG_STAT_ERRORED 0x22 /* R (8) Errored Packet Counter */
+#define HP100_REG_STAT_ABORT 0x23 /* R (8) Abort Counter/OW Coll. Flag */
+#define HP100_REG_RX_RING 0x24 /* W (32) Slave: RX Ring Pointers */
+#define HP100_REG_32_FRAGMENT_LEN 0x28 /* W (13) Slave: Fragment Length Reg */
+#define HP100_REG_32_OFFSET 0x2c /* W (16) Slave: Offset Register */
+
+/* Page 1 - MAC Address/Hash Table */
+
+#define HP100_REG_MAC_ADDR 0x08 /* RW: (8) Cards MAC address */
+#define HP100_REG_HASH_BYTE0 0x10 /* RW: (8) Cards multicast filter */
+
+/* Page 2 - Hardware Mapping */
+
+#define HP100_REG_MEM_MAP_LSW 0x08 /* RW: (16) LSW of cards mem addr */
+#define HP100_REG_MEM_MAP_MSW 0x0a /* RW: (16) MSW of cards mem addr */
+#define HP100_REG_IO_MAP 0x0c /* RW: (8) Cards I/O address */
+#define HP100_REG_IRQ_CHANNEL 0x0d /* RW: (8) IRQ and edge/level int */
+#define HP100_REG_SRAM 0x0e /* RW: (8) How much RAM on card */
+#define HP100_REG_BM 0x0f /* RW: (8) Controls BM functions */
+
+/* New on Page 2 for ETR chips: */
+#define HP100_REG_MODECTRL1 0x10 /* RW: (8) Mode Control 1 */
+#define HP100_REG_MODECTRL2 0x11 /* RW: (8) Mode Control 2 */
+#define HP100_REG_PCICTRL1 0x12 /* RW: (8) PCI Cfg 1 */
+#define HP100_REG_PCICTRL2 0x13 /* RW: (8) PCI Cfg 2 */
+#define HP100_REG_PCIBUSMLAT 0x15 /* RW: (8) PCI Bus Master Latency */
+#define HP100_REG_EARLYTXCFG 0x16 /* RW: (16) Early TX Cfg/Cntrl Reg */
+#define HP100_REG_EARLYRXCFG 0x18 /* RW: (8) Early RX Cfg/Cntrl Reg */
+#define HP100_REG_ISAPNPCFG1 0x1a /* RW: (8) ISA PnP Cfg/Cntrl Reg 1 */
+#define HP100_REG_ISAPNPCFG2 0x1b /* RW: (8) ISA PnP Cfg/Cntrl Reg 2 */
+
+/* Page 3 - EEPROM/Boot ROM */
+
+#define HP100_REG_EEPROM_CTRL 0x08 /* RW: (16) Used to load EEPROM */
+#define HP100_REG_BOOTROM_CTRL 0x0a
+
+/* Page 4 - LAN Configuration (MAC_CTRL) */
+
+#define HP100_REG_10_LAN_CFG_1 0x08 /* RW: (8) Set 10M XCVR functions */
+#define HP100_REG_10_LAN_CFG_2 0x09 /* RW: (8) 10M XCVR functions */
+#define HP100_REG_VG_LAN_CFG_1 0x0a /* RW: (8) Set 100M XCVR functions */
+#define HP100_REG_VG_LAN_CFG_2 0x0b /* RW: (8) 100M LAN Training cfgregs */
+#define HP100_REG_MAC_CFG_1 0x0c /* RW: (8) Types of pkts to accept */
+#define HP100_REG_MAC_CFG_2 0x0d /* RW: (8) Misc MAC functions */
+#define HP100_REG_MAC_CFG_3 0x0e /* RW: (8) Misc MAC functions */
+#define HP100_REG_MAC_CFG_4 0x0f /* R: (8) Misc MAC states */
+#define HP100_REG_DROPPED 0x10 /* R: (16),11:0 Pkts cant fit in mem*/
+#define HP100_REG_CRC 0x12 /* R: (8) Pkts with CRC */
+#define HP100_REG_ABORT 0x13 /* R: (8) Aborted Tx pkts */
+#define HP100_REG_TRAIN_REQUEST 0x14 /* RW: (16) Endnode MAC register.*/
+#define HP100_REG_TRAIN_ALLOW 0x16 /* R: (16) Hub allowed register */
+
+/* Page 5 - MMU */
+
+#define HP100_REG_RX_MEM_STOP 0x0c /* RW: (16) End of Rx ring addr */
+#define HP100_REG_TX_MEM_STOP 0x0e /* RW: (16) End of Tx ring addr */
+#define HP100_REG_PDL_MEM_STOP 0x10 /* Not used by 802.12 devices */
+#define HP100_REG_ECB_MEM_STOP 0x14 /* I've no idea what this is */
+
+/* Page 6 - Card ID/Physical LAN Address */
+
+#define HP100_REG_BOARD_ID 0x08 /* R: (8) EISA/ISA card ID */
+#define HP100_REG_BOARD_IO_CHCK 0x0c /* R: (8) Added to ID to get FFh */
+#define HP100_REG_SOFT_MODEL 0x0d /* R: (8) Config program defined */
+#define HP100_REG_LAN_ADDR 0x10 /* R: (8) MAC addr of card */
+#define HP100_REG_LAN_ADDR_CHCK 0x16 /* R: (8) Added to addr to get FFh */
+
+/* Page 7 - MMU Current Pointers */
+
+#define HP100_REG_PTR_RXSTART 0x08 /* R: (16) Current begin of Rx ring */
+#define HP100_REG_PTR_RXEND 0x0a /* R: (16) Current end of Rx ring */
+#define HP100_REG_PTR_TXSTART 0x0c /* R: (16) Current begin of Tx ring */
+#define HP100_REG_PTR_TXEND 0x0e /* R: (16) Current end of Rx ring */
+#define HP100_REG_PTR_RPDLSTART 0x10
+#define HP100_REG_PTR_RPDLEND 0x12
+#define HP100_REG_PTR_RINGPTRS 0x14
+#define HP100_REG_PTR_MEMDEBUG 0x1a
+/* ------------------------------------------------------------------------ */
+
+
+/*
+ * Hardware ID Register I (Always available, HW_ID, Offset 0x00)
+ */
+#define HP100_HW_ID_CASCADE 0x4850 /* Identifies Cascade Chip */
+
+/*
+ * Hardware ID Register 2 & Paging Register
+ * (Always available, PAGING, Offset 0x02)
+ * Bits 15:4 are for the Chip ID
+ */
+#define HP100_CHIPID_MASK 0xFFF0
+#define HP100_CHIPID_SHASTA 0x5350 /* Not 802.12 compliant */
+ /* EISA BM/SL, MCA16/32 SL, ISA SL */
+#define HP100_CHIPID_RAINIER 0x5360 /* Not 802.12 compliant EISA BM,*/
+ /* PCI SL, MCA16/32 SL, ISA SL */
+#define HP100_CHIPID_LASSEN 0x5370 /* 802.12 compliant PCI BM, PCI SL */
+ /* LRF supported */
+
+/*
+ * Option Registers I and II
+ * (Always available, OPTION_LSW, Offset 0x04-0x05)
+ */
+#define HP100_DEBUG_EN 0x8000 /* 0:Dis., 1:Enable Debug Dump Ptr. */
+#define HP100_RX_HDR 0x4000 /* 0:Dis., 1:Enable putting pkt into */
+ /* system mem. before Rx interrupt */
+#define HP100_MMAP_DIS 0x2000 /* 0:Enable, 1:Disable mem.mapping. */
+ /* MMAP_DIS must be 0 and MEM_EN */
+ /* must be 1 for memory-mapped */
+ /* mode to be enabled */
+#define HP100_EE_EN 0x1000 /* 0:Disable,1:Enable EEPROM writing */
+#define HP100_BM_WRITE 0x0800 /* 0:Slave, 1:Bus Master for Tx data */
+#define HP100_BM_READ 0x0400 /* 0:Slave, 1:Bus Master for Rx data */
+#define HP100_TRI_INT 0x0200 /* 0:Don't, 1:Do tri-state the int */
+#define HP100_MEM_EN 0x0040 /* Config program set this to */
+ /* 0:Disable, 1:Enable mem map. */
+ /* See MMAP_DIS. */
+#define HP100_IO_EN 0x0020 /* 1:Enable I/O transfers */
+#define HP100_BOOT_EN 0x0010 /* 1:Enable boot ROM access */
+#define HP100_FAKE_INT 0x0008 /* 1:int */
+#define HP100_INT_EN 0x0004 /* 1:Enable ints from card */
+#define HP100_HW_RST 0x0002 /* 0:Reset, 1:Out of reset */
+ /* NIC reset on 0 to 1 transition */
+
+/*
+ * Option Register III
+ * (Always available, OPTION_MSW, Offset 0x06)
+ */
+#define HP100_PRIORITY_TX 0x0080 /* 1:Do all Tx pkts as priority */
+#define HP100_EE_LOAD 0x0040 /* 1:EEPROM loading, 0 when done */
+#define HP100_ADV_NXT_PKT 0x0004 /* 1:Advance to next pkt in Rx queue */
+ /* h/w will set to 0 when done */
+#define HP100_TX_CMD 0x0002 /* 1:Tell h/w download done, h/w */
+ /* will set to 0 when done */
+
+/*
+ * Interrupt Status Registers I and II
+ * (Page PERFORMANCE, IRQ_STATUS, Offset 0x08-0x09)
+ * Note: With old chips, these Registers will clear when 1 is written to them
+ * with new chips this depends on setting of CLR_ISMODE
+ */
+#define HP100_RX_EARLY_INT 0x2000
+#define HP100_RX_PDA_ZERO 0x1000
+#define HP100_RX_PDL_FILL_COMPL 0x0800
+#define HP100_RX_PACKET 0x0400 /* 0:No, 1:Yes pkt has been Rx */
+#define HP100_RX_ERROR 0x0200 /* 0:No, 1:Yes Rx pkt had error */
+#define HP100_TX_PDA_ZERO 0x0020 /* 1 when PDA count goes to zero */
+#define HP100_TX_SPACE_AVAIL 0x0010 /* 0:<8192, 1:>=8192 Tx free bytes */
+#define HP100_TX_COMPLETE 0x0008 /* 0:No, 1:Yes a Tx has completed */
+#define HP100_MISC_ERROR 0x0004 /* 0:No, 1:Lan Link down or bus error*/
+#define HP100_TX_ERROR 0x0002 /* 0:No, 1:Yes Tx pkt had error */
+
+/*
+ * Xmit Memory Free Count
+ * (Page PERFORMANCE, TX_MEM_FREE, Offset 0x14) (Read only, 32bit)
+ */
+#define HP100_AUTO_COMPARE 0x80000000 /* Tx Space avail & pkts<255 */
+#define HP100_FREE_SPACE 0x7fffffe0 /* Tx free memory */
+
+/*
+ * IRQ Channel
+ * (Page HW_MAP, IRQ_CHANNEL, Offset 0x0d)
+ */
+#define HP100_ZERO_WAIT_EN 0x80 /* 0:No, 1:Yes asserts NOWS signal */
+#define HP100_IRQ_SCRAMBLE 0x40
+#define HP100_BOND_HP 0x20
+#define HP100_LEVEL_IRQ 0x10 /* 0:Edge, 1:Level type interrupts. */
+ /* (Only valid on EISA cards) */
+#define HP100_IRQMASK 0x0F /* Isolate the IRQ bits */
+
+/*
+ * SRAM Parameters
+ * (Page HW_MAP, SRAM, Offset 0x0e)
+ */
+#define HP100_RAM_SIZE_MASK 0xe0 /* AND to get SRAM size index */
+#define HP100_RAM_SIZE_SHIFT 0x05 /* Shift count(put index in lwr bits)*/
+
+/*
+ * Bus Master Register
+ * (Page HW_MAP, BM, Offset 0x0f)
+ */
+#define HP100_BM_BURST_RD 0x01 /* EISA only: 1=Use burst trans. fm system */
+ /* memory to chip (tx) */
+#define HP100_BM_BURST_WR 0x02 /* EISA only: 1=Use burst trans. fm system */
+ /* memory to chip (rx) */
+#define HP100_BM_MASTER 0x04 /* 0:Slave, 1:BM mode */
+#define HP100_BM_PAGE_CK 0x08 /* This bit should be set whenever in*/
+ /* an EISA system */
+#define HP100_BM_PCI_8CLK 0x40 /* ... cycles 8 clocks apart */
+
+
+/*
+ * Mode Control Register I
+ * (Page HW_MAP, MODECTRL1, Offset0x10)
+ */
+#define HP100_TX_DUALQ 0x10
+ /* If set and BM -> dual tx pda queues*/
+#define HP100_ISR_CLRMODE 0x02 /* If set ISR will clear all pending */
+ /* interrupts on read (etr only?) */
+#define HP100_EE_NOLOAD 0x04 /* Status whether res will be loaded */
+ /* from the eeprom */
+#define HP100_TX_CNT_FLG 0x08 /* Controls Early TX Reg Cnt Field */
+#define HP100_PDL_USE3 0x10 /* If set BM engine will read only */
+ /* first three data elements of a PDL */
+ /* on the first access. */
+#define HP100_BUSTYPE_MASK 0xe0 /* Three bit bus type info */
+
+/*
+ * Mode Control Register II
+ * (Page HW_MAP, MODECTRL2, Offset0x11)
+ */
+#define HP100_EE_MASK 0x0f /* Tell EEPROM circuit not to load */
+ /* certain resources */
+#define HP100_DIS_CANCEL 0x20 /* For tx dualq mode operation */
+#define HP100_EN_PDL_WB 0x40 /* 1: Status of PDL completion may be */
+ /* written back to system mem */
+#define HP100_EN_BUS_FAIL 0x80 /* Enables bus-fail portion of misc */
+ /* interrupt */
+
+/*
+ * PCI Configuration and Control Register I
+ * (Page HW_MAP, PCICTRL1, Offset 0x12)
+ */
+#define HP100_LO_MEM 0x01 /* 1: Mapped Mem requested below 1MB */
+#define HP100_NO_MEM 0x02 /* 1: Disables Req for sysmem to PCI */
+ /* bios */
+#define HP100_USE_ISA 0x04 /* 1: isa type decodes will occur */
+ /* simultaneously with PCI decodes */
+#define HP100_IRQ_HI_MASK 0xf0 /* pgmed by pci bios */
+#define HP100_PCI_IRQ_HI_MASK 0x78 /* Isolate 4 bits for PCI IRQ */
+
+/*
+ * PCI Configuration and Control Register II
+ * (Page HW_MAP, PCICTRL2, Offset 0x13)
+ */
+#define HP100_RD_LINE_PDL 0x01 /* 1: PCI command Memory Read Line en */
+#define HP100_RD_TX_DATA_MASK 0x06 /* choose PCI memread cmds for TX */
+#define HP100_MWI 0x08 /* 1: en. PCI memory write invalidate */
+#define HP100_ARB_MODE 0x10 /* Select PCI arbitor type */
+#define HP100_STOP_EN 0x20 /* Enables PCI state machine to issue */
+ /* pci stop if cascade not ready */
+#define HP100_IGNORE_PAR 0x40 /* 1: PCI state machine ignores parity*/
+#define HP100_PCI_RESET 0x80 /* 0->1: Reset PCI block */
+
+/*
+ * Early TX Configuration and Control Register
+ * (Page HW_MAP, EARLYTXCFG, Offset 0x16)
+ */
+#define HP100_EN_EARLY_TX 0x8000 /* 1=Enable Early TX */
+#define HP100_EN_ADAPTIVE 0x4000 /* 1=Enable adaptive mode */
+#define HP100_EN_TX_UR_IRQ 0x2000 /* reserved, must be 0 */
+#define HP100_EN_LOW_TX 0x1000 /* reserved, must be 0 */
+#define HP100_ET_CNT_MASK 0x0fff /* bits 11..0: ET counters */
+
+/*
+ * Early RX Configuration and Control Register
+ * (Page HW_MAP, EARLYRXCFG, Offset 0x18)
+ */
+#define HP100_EN_EARLY_RX 0x80 /* 1=Enable Early RX */
+#define HP100_EN_LOW_RX 0x40 /* reserved, must be 0 */
+#define HP100_RX_TRIP_MASK 0x1f /* bits 4..0: threshold at which the
+ * early rx circuit will start the
+ * dma of received packet into system
+ * memory for BM */
+
+/*
+ * Serial Devices Control Register
+ * (Page EEPROM_CTRL, EEPROM_CTRL, Offset 0x08)
+ */
+#define HP100_EEPROM_LOAD 0x0001 /* 0->1 loads EEPROM into registers. */
+ /* When it goes back to 0, load is */
+ /* complete. This should take ~600us.*/
+
+/*
+ * 10MB LAN Control and Configuration Register I
+ * (Page MAC_CTRL, 10_LAN_CFG_1, Offset 0x08)
+ */
+#define HP100_MAC10_SEL 0xc0 /* Get bits to indicate MAC */
+#define HP100_AUI_SEL 0x20 /* Status of AUI selection */
+#define HP100_LOW_TH 0x10 /* 0:No, 1:Yes allow better cabling */
+#define HP100_LINK_BEAT_DIS 0x08 /* 0:Enable, 1:Disable link beat */
+#define HP100_LINK_BEAT_ST 0x04 /* 0:No, 1:Yes link beat being Rx */
+#define HP100_R_ROL_ST 0x02 /* 0:No, 1:Yes Rx twisted pair has */
+ /* been reversed */
+#define HP100_AUI_ST 0x01 /* 0:No, 1:Yes use AUI on TP card */
+
+/*
+ * 10 MB LAN Control and Configuration Register II
+ * (Page MAC_CTRL, 10_LAN_CFG_2, Offset 0x09)
+ */
+#define HP100_SQU_ST 0x01 /* 0:No, 1:Yes collision signal sent */
+ /* after Tx.Only used for AUI. */
+#define HP100_FULLDUP 0x02 /* 1: LXT901 XCVR fullduplx enabled */
+#define HP100_DOT3_MAC 0x04 /* 1: DOT 3 Mac sel. unless Autosel */
+
+/*
+ * MAC Selection, use with MAC10_SEL bits
+ */
+#define HP100_AUTO_SEL_10 0x0 /* Auto select */
+#define HP100_XCVR_LXT901_10 0x1 /* LXT901 10BaseT transceiver */
+#define HP100_XCVR_7213 0x2 /* 7213 transceiver */
+#define HP100_XCVR_82503 0x3 /* 82503 transceiver */
+
+/*
+ * 100MB LAN Training Register
+ * (Page MAC_CTRL, VG_LAN_CFG_2, Offset 0x0b) (old, pre 802.12)
+ */
+#define HP100_FRAME_FORMAT 0x08 /* 0:802.3, 1:802.5 frames */
+#define HP100_BRIDGE 0x04 /* 0:No, 1:Yes tell hub i am a bridge */
+#define HP100_PROM_MODE 0x02 /* 0:No, 1:Yes tell hub card is */
+ /* promiscuous */
+#define HP100_REPEATER 0x01 /* 0:No, 1:Yes tell hub MAC wants to */
+ /* be a cascaded repeater */
+
+/*
+ * 100MB LAN Control and Configuration Register
+ * (Page MAC_CTRL, VG_LAN_CFG_1, Offset 0x0a)
+ */
+#define HP100_VG_SEL 0x80 /* 0:No, 1:Yes use 100 Mbit MAC */
+#define HP100_LINK_UP_ST 0x40 /* 0:No, 1:Yes endnode logged in */
+#define HP100_LINK_CABLE_ST 0x20 /* 0:No, 1:Yes cable can hear tones */
+ /* from hub */
+#define HP100_LOAD_ADDR 0x10 /* 0->1 card addr will be sent */
+ /* 100ms later the link status */
+ /* bits are valid */
+#define HP100_LINK_CMD 0x08 /* 0->1 link will attempt to log in. */
+ /* 100ms later the link status */
+ /* bits are valid */
+#define HP100_TRN_DONE 0x04 /* NEW ETR-Chips only: Will be reset */
+ /* after LinkUp Cmd is given and set */
+ /* when training has completed. */
+#define HP100_LINK_GOOD_ST 0x02 /* 0:No, 1:Yes cable passed training */
+#define HP100_VG_RESET 0x01 /* 0:Yes, 1:No reset the 100VG MAC */
+
+
+/*
+ * MAC Configuration Register I
+ * (Page MAC_CTRL, MAC_CFG_1, Offset 0x0c)
+ */
+#define HP100_RX_IDLE 0x80 /* 0:Yes, 1:No currently receiving pkts */
+#define HP100_TX_IDLE 0x40 /* 0:Yes, 1:No currently Txing pkts */
+#define HP100_RX_EN 0x20 /* 1: allow receiving of pkts */
+#define HP100_TX_EN 0x10 /* 1: allow transmitting of pkts */
+#define HP100_ACC_ERRORED 0x08 /* 0:No, 1:Yes allow Rx of errored pkts */
+#define HP100_ACC_MC 0x04 /* 0:No, 1:Yes allow Rx of multicast pkts */
+#define HP100_ACC_BC 0x02 /* 0:No, 1:Yes allow Rx of broadcast pkts */
+#define HP100_ACC_PHY 0x01 /* 0:No, 1:Yes allow Rx of ALL phys. pkts */
+#define HP100_MAC1MODEMASK 0xf0 /* Hide ACC bits */
+#define HP100_MAC1MODE1 0x00 /* Receive nothing, must also disable RX */
+#define HP100_MAC1MODE2 0x00
+#define HP100_MAC1MODE3 HP100_MAC1MODE2 | HP100_ACC_BC
+#define HP100_MAC1MODE4 HP100_MAC1MODE3 | HP100_ACC_MC
+#define HP100_MAC1MODE5 HP100_MAC1MODE4 /* set mc hash to all ones also */
+#define HP100_MAC1MODE6 HP100_MAC1MODE5 | HP100_ACC_PHY /* Promiscuous */
+/* Note MODE6 will receive all GOOD packets on the LAN. This really needs
+ a mode 7 defined to be LAN Analyzer mode, which will receive errored and
+ runt packets, and keep the CRC bytes. */
+#define HP100_MAC1MODE7 HP100_MAC1MODE6 | HP100_ACC_ERRORED
+
+/*
+ * MAC Configuration Register II
+ * (Page MAC_CTRL, MAC_CFG_2, Offset 0x0d)
+ */
+#define HP100_TR_MODE 0x80 /* 0:No, 1:Yes support Token Ring formats */
+#define HP100_TX_SAME 0x40 /* 0:No, 1:Yes Tx same packet continuous */
+#define HP100_LBK_XCVR 0x20 /* 0:No, 1:Yes loopback through MAC & */
+ /* transceiver */
+#define HP100_LBK_MAC 0x10 /* 0:No, 1:Yes loopback through MAC */
+#define HP100_CRC_I 0x08 /* 0:No, 1:Yes inhibit CRC on Tx packets */
+#define HP100_ACCNA 0x04 /* 1: For 802.5: Accept only token ring
+ * group addr that maches NA mask */
+#define HP100_KEEP_CRC 0x02 /* 0:No, 1:Yes keep CRC on Rx packets. */
+ /* The length will reflect this. */
+#define HP100_ACCFA 0x01 /* 1: For 802.5: Accept only functional
+ * addrs that match FA mask (page1) */
+#define HP100_MAC2MODEMASK 0x02
+#define HP100_MAC2MODE1 0x00
+#define HP100_MAC2MODE2 0x00
+#define HP100_MAC2MODE3 0x00
+#define HP100_MAC2MODE4 0x00
+#define HP100_MAC2MODE5 0x00
+#define HP100_MAC2MODE6 0x00
+#define HP100_MAC2MODE7 KEEP_CRC
+
+/*
+ * MAC Configuration Register III
+ * (Page MAC_CTRL, MAC_CFG_3, Offset 0x0e)
+ */
+#define HP100_PACKET_PACE 0x03 /* Packet Pacing:
+ * 00: No packet pacing
+ * 01: 8 to 16 uS delay
+ * 10: 16 to 32 uS delay
+ * 11: 32 to 64 uS delay
+ */
+#define HP100_LRF_EN 0x04 /* 1: External LAN Rcv Filter and
+ * TCP/IP Checksumming enabled. */
+#define HP100_AUTO_MODE 0x10 /* 1: AutoSelect between 10/100 */
+
+/*
+ * MAC Configuration Register IV
+ * (Page MAC_CTRL, MAC_CFG_4, Offset 0x0f)
+ */
+#define HP100_MAC_SEL_ST 0x01 /* (R): Status of external VGSEL
+ * Signal, 1=100VG, 0=10Mbit sel. */
+#define HP100_LINK_FAIL_ST 0x02 /* (R): Status of Link Fail portion
+ * of the Misc. Interrupt */
+
+/*
+ * 100 MB LAN Training Request/Allowed Registers
+ * (Page MAC_CTRL, TRAIN_REQUEST and TRAIN_ALLOW, Offset 0x14-0x16)(ETR parts only)
+ */
+#define HP100_MACRQ_REPEATER 0x0001 /* 1: MAC tells HUB it wants to be
+ * a cascaded repeater
+ * 0: ... wants to be a DTE */
+#define HP100_MACRQ_PROMSC 0x0006 /* 2 bits: Promiscious mode
+ * 00: Rcv only unicast packets
+ * specifically addr to this
+ * endnode
+ * 10: Rcv all pckts fwded by
+ * the local repeater */
+#define HP100_MACRQ_FRAMEFMT_EITHER 0x0018 /* 11: either format allowed */
+#define HP100_MACRQ_FRAMEFMT_802_3 0x0000 /* 00: 802.3 is requested */
+#define HP100_MACRQ_FRAMEFMT_802_5 0x0010 /* 10: 802.5 format is requested */
+#define HP100_CARD_MACVER 0xe000 /* R: 3 bit Cards 100VG MAC version */
+#define HP100_MALLOW_REPEATER 0x0001 /* If reset, requested access as an
+ * end node is allowed */
+#define HP100_MALLOW_PROMSC 0x0004 /* 2 bits: Promiscious mode
+ * 00: Rcv only unicast packets
+ * specifically addr to this
+ * endnode
+ * 10: Rcv all pckts fwded by
+ * the local repeater */
+#define HP100_MALLOW_FRAMEFMT 0x00e0 /* 2 bits: Frame Format
+ * 00: 802.3 format will be used
+ * 10: 802.5 format will be used */
+#define HP100_MALLOW_ACCDENIED 0x0400 /* N bit */
+#define HP100_MALLOW_CONFIGURE 0x0f00 /* C bit */
+#define HP100_MALLOW_DUPADDR 0x1000 /* D bit */
+#define HP100_HUB_MACVER 0xe000 /* R: 3 bit 802.12 MAC/RMAC training */
+ /* protocol of repeater */
+
+/* ****************************************************************************** */
+
+/*
+ * Set/Reset bits
+ */
+#define HP100_SET_HB 0x0100 /* 0:Set fields to 0 whose mask is 1 */
+#define HP100_SET_LB 0x0001 /* HB sets upper byte, LB sets lower byte */
+#define HP100_RESET_HB 0x0000 /* For readability when resetting bits */
+#define HP100_RESET_LB 0x0000 /* For readability when resetting bits */
+
+/*
+ * Misc. Constants
+ */
+#define HP100_LAN_100 100 /* lan_type value for VG */
+#define HP100_LAN_10 10 /* lan_type value for 10BaseT */
+#define HP100_LAN_ERR (-1) /* lan_type value for link down */
+
+#define TRUE 1
+#define FALSE 0
+
+
+/*
+ * Bus Master Data Structures ----------------------------------------------
+ */
+
+#define MAX_RX_PDL 30 /* Card limit = 31 */
+#define MAX_RX_FRAG 2 /* Don't need more... */
+#define MAX_TX_PDL 29
+#define MAX_TX_FRAG 2 /* Limit = 31 */
+
+/* Define total PDL area size in bytes (should be 4096) */
+/* This is the size of kernel (dma) memory that will be allocated. */
+#define MAX_RINGSIZE ((MAX_RX_FRAG*8+4+4)*MAX_RX_PDL+(MAX_TX_FRAG*8+4+4)*MAX_TX_PDL)+16
+
+/* Ethernet Packet Sizes */
+#define MIN_ETHER_SIZE 60
+#define MAX_ETHER_SIZE 1514 /* Needed for preallocation of */
+ /* skb buffer when busmastering */
+
+/* Tx or Rx Ring Entry */
+typedef struct hp100_ring {
+ u_int *pdl; /* Address of PDLs PDH, dword before
+ * this address is used for rx hdr */
+ u_int pdl_paddr; /* Physical address of PDL */
+ struct sk_buff *skb;
+ struct hp100_ring *next;
+} hp100_ring_t;
+
+
+
+/* Mask for Header Descriptor */
+#define HP100_PKT_LEN_MASK 0x1FFF /* AND with RxLength to get length */
+
+
+/* Receive Packet Status. Note, the error bits are only valid if ACC_ERRORED
+ bit in the MAC Configuration Register 1 is set. */
+#define HP100_RX_PRI 0x8000 /* 0:No, 1:Yes packet is priority */
+#define HP100_SDF_ERR 0x4000 /* 0:No, 1:Yes start of frame error */
+#define HP100_SKEW_ERR 0x2000 /* 0:No, 1:Yes skew out of range */
+#define HP100_BAD_SYMBOL_ERR 0x1000 /* 0:No, 1:Yes invalid symbol received */
+#define HP100_RCV_IPM_ERR 0x0800 /* 0:No, 1:Yes pkt had an invalid packet */
+ /* marker */
+#define HP100_SYMBOL_BAL_ERR 0x0400 /* 0:No, 1:Yes symbol balance error */
+#define HP100_VG_ALN_ERR 0x0200 /* 0:No, 1:Yes non-octet received */
+#define HP100_TRUNC_ERR 0x0100 /* 0:No, 1:Yes the packet was truncated */
+#define HP100_RUNT_ERR 0x0040 /* 0:No, 1:Yes pkt length < Min Pkt */
+ /* Length Reg. */
+#define HP100_ALN_ERR 0x0010 /* 0:No, 1:Yes align error. */
+#define HP100_CRC_ERR 0x0008 /* 0:No, 1:Yes CRC occurred. */
+
+/* The last three bits indicate the type of destination address */
+
+#define HP100_MULTI_ADDR_HASH 0x0006 /* 110: Addr multicast, matched hash */
+#define HP100_BROADCAST_ADDR 0x0003 /* x11: Addr broadcast */
+#define HP100_MULTI_ADDR_NO_HASH 0x0002 /* 010: Addr multicast, didn't match hash */
+#define HP100_PHYS_ADDR_MATCH 0x0001 /* x01: Addr was physical and mine */
+#define HP100_PHYS_ADDR_NO_MATCH 0x0000 /* x00: Addr was physical but not mine */
+
+/*
+ * macros
+ */
+
+#define hp100_inb( reg ) \
+ inb( ioaddr + HP100_REG_##reg )
+#define hp100_inw( reg ) \
+ inw( ioaddr + HP100_REG_##reg )
+#define hp100_inl( reg ) \
+ inl( ioaddr + HP100_REG_##reg )
+#define hp100_outb( data, reg ) \
+ outb( data, ioaddr + HP100_REG_##reg )
+#define hp100_outw( data, reg ) \
+ outw( data, ioaddr + HP100_REG_##reg )
+#define hp100_outl( data, reg ) \
+ outl( data, ioaddr + HP100_REG_##reg )
+#define hp100_orb( data, reg ) \
+ outb( inb( ioaddr + HP100_REG_##reg ) | (data), ioaddr + HP100_REG_##reg )
+#define hp100_orw( data, reg ) \
+ outw( inw( ioaddr + HP100_REG_##reg ) | (data), ioaddr + HP100_REG_##reg )
+#define hp100_andb( data, reg ) \
+ outb( inb( ioaddr + HP100_REG_##reg ) & (data), ioaddr + HP100_REG_##reg )
+#define hp100_andw( data, reg ) \
+ outw( inw( ioaddr + HP100_REG_##reg ) & (data), ioaddr + HP100_REG_##reg )
+
+#define hp100_page( page ) \
+ outw( HP100_PAGE_##page, ioaddr + HP100_REG_PAGING )
+#define hp100_ints_off() \
+ outw( HP100_INT_EN | HP100_RESET_LB, ioaddr + HP100_REG_OPTION_LSW )
+#define hp100_ints_on() \
+ outw( HP100_INT_EN | HP100_SET_LB, ioaddr + HP100_REG_OPTION_LSW )
+#define hp100_mem_map_enable() \
+ outw( HP100_MMAP_DIS | HP100_RESET_HB, ioaddr + HP100_REG_OPTION_LSW )
+#define hp100_mem_map_disable() \
+ outw( HP100_MMAP_DIS | HP100_SET_HB, ioaddr + HP100_REG_OPTION_LSW )
+
+
+/*
+ * Local variables:
+ * c-indent-level: 2
+ * tab-width: 8
+ * End:
+*/
diff --git a/linux/src/drivers/net/i82586.h b/linux/src/drivers/net/i82586.h
new file mode 100644
index 0000000..d41702e
--- /dev/null
+++ b/linux/src/drivers/net/i82586.h
@@ -0,0 +1,413 @@
+/*
+ * Intel 82586 IEEE 802.3 Ethernet LAN Coprocessor.
+ *
+ * See:
+ * Intel Microcommunications 1991
+ * p1-1 to p1-37
+ * Intel order No. 231658
+ * ISBN 1-55512-119-5
+ *
+ * Unfortunately, the above chapter mentions neither
+ * the System Configuration Pointer (SCP) nor the
+ * Intermediate System Configuration Pointer (ISCP),
+ * so we probably need to look elsewhere for the
+ * whole story -- some recommend the "Intel LAN
+ * Components manual" but I have neither a copy
+ * nor a full reference. But "elsewhere" may be
+ * in the same publication...
+ * The description of a later device, the
+ * "82596CA High-Performance 32-Bit Local Area Network
+ * Coprocessor", (ibid. p1-38 to p1-109) does mention
+ * the SCP and ISCP and also has an i82586 compatibility
+ * mode. Even more useful is "AP-235 An 82586 Data Link
+ * Driver" (ibid. p1-337 to p1-417).
+ */
+
+#define I82586_MEMZ (64 * 1024)
+
+#define I82586_SCP_ADDR (I82586_MEMZ - sizeof(scp_t))
+
+#define ADDR_LEN 6
+#define I82586NULL 0xFFFF
+
+#define toff(t,p,f) (unsigned short)((void *)(&((t *)((void *)0 + (p)))->f) - (void *)0)
+
+/*
+ * System Configuration Pointer (SCP).
+ */
+typedef struct scp_t scp_t;
+struct scp_t
+{
+ unsigned short scp_sysbus; /* 82586 bus width: */
+#define SCP_SY_16BBUS (0x0 << 0) /* 16 bits */
+#define SCP_SY_8BBUS (0x1 << 0) /* 8 bits. */
+ unsigned short scp_junk[2]; /* Unused */
+ unsigned short scp_iscpl; /* lower 16 bits of ISCP_ADDR */
+ unsigned short scp_iscph; /* upper 16 bits of ISCP_ADDR */
+};
+
+/*
+ * Intermediate System Configuration Pointer (ISCP).
+ */
+typedef struct iscp_t iscp_t;
+struct iscp_t
+{
+ unsigned short iscp_busy; /* set by CPU before first CA, */
+ /* cleared by 82586 after read. */
+ unsigned short iscp_offset; /* offset of SCB */
+ unsigned short iscp_basel; /* base of SCB */
+ unsigned short iscp_baseh; /* " */
+};
+
+/*
+ * System Control Block (SCB).
+ * The 82586 writes its status to scb_status and then
+ * raises an interrupt to alert the CPU.
+ * The CPU writes a command to scb_command and
+ * then issues a Channel Attention (CA) to alert the 82586.
+ */
+typedef struct scb_t scb_t;
+struct scb_t
+{
+ unsigned short scb_status; /* Status of 82586 */
+#define SCB_ST_INT (0xF << 12) /* Some of: */
+#define SCB_ST_CX (0x1 << 15) /* Cmd completed */
+#define SCB_ST_FR (0x1 << 14) /* Frame received */
+#define SCB_ST_CNA (0x1 << 13) /* Cmd unit not active */
+#define SCB_ST_RNR (0x1 << 12) /* Rcv unit not ready */
+#define SCB_ST_JUNK0 (0x1 << 11) /* 0 */
+#define SCB_ST_CUS (0x7 << 8) /* Cmd unit status */
+#define SCB_ST_CUS_IDLE (0 << 8) /* Idle */
+#define SCB_ST_CUS_SUSP (1 << 8) /* Suspended */
+#define SCB_ST_CUS_ACTV (2 << 8) /* Active */
+#define SCB_ST_JUNK1 (0x1 << 7) /* 0 */
+#define SCB_ST_RUS (0x7 << 4) /* Rcv unit status */
+#define SCB_ST_RUS_IDLE (0 << 4) /* Idle */
+#define SCB_ST_RUS_SUSP (1 << 4) /* Suspended */
+#define SCB_ST_RUS_NRES (2 << 4) /* No resources */
+#define SCB_ST_RUS_RDY (4 << 4) /* Ready */
+ unsigned short scb_command; /* Next command */
+#define SCB_CMD_ACK_CX (0x1 << 15) /* Ack cmd completion */
+#define SCB_CMD_ACK_FR (0x1 << 14) /* Ack frame received */
+#define SCB_CMD_ACK_CNA (0x1 << 13) /* Ack CU not active */
+#define SCB_CMD_ACK_RNR (0x1 << 12) /* Ack RU not ready */
+#define SCB_CMD_JUNKX (0x1 << 11) /* Unused */
+#define SCB_CMD_CUC (0x7 << 8) /* Command Unit command */
+#define SCB_CMD_CUC_NOP (0 << 8) /* Nop */
+#define SCB_CMD_CUC_GO (1 << 8) /* Start cbl_offset */
+#define SCB_CMD_CUC_RES (2 << 8) /* Resume execution */
+#define SCB_CMD_CUC_SUS (3 << 8) /* Suspend " */
+#define SCB_CMD_CUC_ABT (4 << 8) /* Abort " */
+#define SCB_CMD_RESET (0x1 << 7) /* Reset chip (hardware) */
+#define SCB_CMD_RUC (0x7 << 4) /* Receive Unit command */
+#define SCB_CMD_RUC_NOP (0 << 4) /* Nop */
+#define SCB_CMD_RUC_GO (1 << 4) /* Start rfa_offset */
+#define SCB_CMD_RUC_RES (2 << 4) /* Resume reception */
+#define SCB_CMD_RUC_SUS (3 << 4) /* Suspend " */
+#define SCB_CMD_RUC_ABT (4 << 4) /* Abort " */
+ unsigned short scb_cbl_offset; /* Offset of first command unit */
+ /* Action Command */
+ unsigned short scb_rfa_offset; /* Offset of first Receive */
+ /* Frame Descriptor in the */
+ /* Receive Frame Area */
+ unsigned short scb_crcerrs; /* Properly aligned frames */
+ /* received with a CRC error */
+ unsigned short scb_alnerrs; /* Misaligned frames received */
+ /* with a CRC error */
+ unsigned short scb_rscerrs; /* Frames lost due to no space */
+ unsigned short scb_ovrnerrs; /* Frames lost due to slow bus */
+};
+
+#define scboff(p,f) toff(scb_t, p, f)
+
+/*
+ * The eight Action Commands.
+ */
+typedef enum acmd_e acmd_e;
+enum acmd_e
+{
+ acmd_nop = 0, /* Do nothing */
+ acmd_ia_setup = 1, /* Load an (ethernet) address into the */
+ /* 82586 */
+ acmd_configure = 2, /* Update the 82586 operating parameters */
+ acmd_mc_setup = 3, /* Load a list of (ethernet) multicast */
+ /* addresses into the 82586 */
+ acmd_transmit = 4, /* Transmit a frame */
+ acmd_tdr = 5, /* Perform a Time Domain Reflectometer */
+ /* test on the serial link */
+ acmd_dump = 6, /* Copy 82586 registers to memory */
+ acmd_diagnose = 7, /* Run an internal self test */
+};
+
+/*
+ * Generic Action Command header.
+ */
+typedef struct ach_t ach_t;
+struct ach_t
+{
+ unsigned short ac_status; /* Command status: */
+#define AC_SFLD_C (0x1 << 15) /* Command completed */
+#define AC_SFLD_B (0x1 << 14) /* Busy executing */
+#define AC_SFLD_OK (0x1 << 13) /* Completed error free */
+#define AC_SFLD_A (0x1 << 12) /* Command aborted */
+#define AC_SFLD_FAIL (0x1 << 11) /* Selftest failed */
+#define AC_SFLD_S10 (0x1 << 10) /* No carrier sense */
+ /* during transmission */
+#define AC_SFLD_S9 (0x1 << 9) /* Tx unsuccessful: */
+ /* (stopped) lost CTS */
+#define AC_SFLD_S8 (0x1 << 8) /* Tx unsuccessful: */
+ /* (stopped) slow DMA */
+#define AC_SFLD_S7 (0x1 << 7) /* Tx deferred: */
+ /* other link traffic */
+#define AC_SFLD_S6 (0x1 << 6) /* Heart Beat: collision */
+ /* detect after last tx */
+#define AC_SFLD_S5 (0x1 << 5) /* Tx stopped: */
+ /* excessive collisions */
+#define AC_SFLD_MAXCOL (0xF << 0) /* Collision count */
+ unsigned short ac_command; /* Command specifier: */
+#define AC_CFLD_EL (0x1 << 15) /* End of command list */
+#define AC_CFLD_S (0x1 << 14) /* Suspend on completion */
+#define AC_CFLD_I (0x1 << 13) /* Interrupt on completion */
+#define AC_CFLD_CMD (0x7 << 0) /* acmd_e */
+ unsigned short ac_link; /* Next Action Command */
+};
+
+#define acoff(p,f) toff(ach_t, p, f)
+
+/*
+ * The Nop Action Command.
+ */
+typedef struct ac_nop_t ac_nop_t;
+struct ac_nop_t
+{
+ ach_t nop_h;
+};
+
+/*
+ * The IA-Setup Action Command.
+ */
+typedef struct ac_ias_t ac_ias_t;
+struct ac_ias_t
+{
+ ach_t ias_h;
+ unsigned char ias_addr[ADDR_LEN]; /* The (ethernet) address */
+};
+
+/*
+ * The Configure Action Command.
+ */
+typedef struct ac_cfg_t ac_cfg_t;
+struct ac_cfg_t
+{
+ ach_t cfg_h;
+ unsigned char cfg_byte_cnt; /* Size foll data: 4-12 */
+#define AC_CFG_BYTE_CNT(v) (((v) & 0xF) << 0)
+ unsigned char cfg_fifolim; /* FIFO threshold */
+#define AC_CFG_FIFOLIM(v) (((v) & 0xF) << 0)
+ unsigned char cfg_byte8;
+#define AC_CFG_SAV_BF(v) (((v) & 0x1) << 7) /* Save rxd bad frames */
+#define AC_CFG_SRDY(v) (((v) & 0x1) << 6) /* SRDY/ARDY pin means */
+ /* external sync. */
+ unsigned char cfg_byte9;
+#define AC_CFG_ELPBCK(v) (((v) & 0x1) << 7) /* External loopback */
+#define AC_CFG_ILPBCK(v) (((v) & 0x1) << 6) /* Internal loopback */
+#define AC_CFG_PRELEN(v) (((v) & 0x3) << 4) /* Preamble length */
+#define AC_CFG_PLEN_2 0 /* 2 bytes */
+#define AC_CFG_PLEN_4 1 /* 4 bytes */
+#define AC_CFG_PLEN_8 2 /* 8 bytes */
+#define AC_CFG_PLEN_16 3 /* 16 bytes */
+#define AC_CFG_ALOC(v) (((v) & 0x1) << 3) /* Addr/len data is */
+ /* explicit in buffers */
+#define AC_CFG_ADDRLEN(v) (((v) & 0x7) << 0) /* Bytes per address */
+ unsigned char cfg_byte10;
+#define AC_CFG_BOFMET(v) (((v) & 0x1) << 7) /* Use alternate expo. */
+ /* backoff method */
+#define AC_CFG_ACR(v) (((v) & 0x7) << 4) /* Accelerated cont. res. */
+#define AC_CFG_LINPRIO(v) (((v) & 0x7) << 0) /* Linear priority */
+ unsigned char cfg_ifs; /* Interframe spacing */
+ unsigned char cfg_slotl; /* Slot time (low byte) */
+ unsigned char cfg_byte13;
+#define AC_CFG_RETRYNUM(v) (((v) & 0xF) << 4) /* Max. collision retry */
+#define AC_CFG_SLTTMHI(v) (((v) & 0x7) << 0) /* Slot time (high bits) */
+ unsigned char cfg_byte14;
+#define AC_CFG_FLGPAD(v) (((v) & 0x1) << 7) /* Pad with HDLC flags */
+#define AC_CFG_BTSTF(v) (((v) & 0x1) << 6) /* Do HDLC bitstuffing */
+#define AC_CFG_CRC16(v) (((v) & 0x1) << 5) /* 16 bit CCITT CRC */
+#define AC_CFG_NCRC(v) (((v) & 0x1) << 4) /* Insert no CRC */
+#define AC_CFG_TNCRS(v) (((v) & 0x1) << 3) /* Tx even if no carrier */
+#define AC_CFG_MANCH(v) (((v) & 0x1) << 2) /* Manchester coding */
+#define AC_CFG_BCDIS(v) (((v) & 0x1) << 1) /* Disable broadcast */
+#define AC_CFG_PRM(v) (((v) & 0x1) << 0) /* Promiscuous mode */
+ unsigned char cfg_byte15;
+#define AC_CFG_ICDS(v) (((v) & 0x1) << 7) /* Internal collision */
+ /* detect source */
+#define AC_CFG_CDTF(v) (((v) & 0x7) << 4) /* Collision detect */
+ /* filter in bit times */
+#define AC_CFG_ICSS(v) (((v) & 0x1) << 3) /* Internal carrier */
+ /* sense source */
+#define AC_CFG_CSTF(v) (((v) & 0x7) << 0) /* Carrier sense */
+ /* filter in bit times */
+ unsigned short cfg_min_frm_len;
+#define AC_CFG_MNFRM(v) (((v) & 0xFF) << 0) /* Min. bytes/frame (<= 255) */
+};
+
+/*
+ * The MC-Setup Action Command.
+ */
+typedef struct ac_mcs_t ac_mcs_t;
+struct ac_mcs_t
+{
+ ach_t mcs_h;
+ unsigned short mcs_cnt; /* No. of bytes of MC addresses */
+#if 0
+ unsigned char mcs_data[ADDR_LEN]; /* The first MC address .. */
+ ...
+#endif
+};
+
+#define I82586_MAX_MULTICAST_ADDRESSES 128 /* Hardware hashed filter */
+
+/*
+ * The Transmit Action Command.
+ */
+typedef struct ac_tx_t ac_tx_t;
+struct ac_tx_t
+{
+ ach_t tx_h;
+ unsigned short tx_tbd_offset; /* Address of list of buffers. */
+#if 0
+Linux packets are passed down with the destination MAC address
+and length/type field already prepended to the data,
+so we do not need to insert it. Consistent with this
+we must also set the AC_CFG_ALOC(..) flag during the
+ac_cfg_t action command.
+ unsigned char tx_addr[ADDR_LEN]; /* The frame dest. address */
+ unsigned short tx_length; /* The frame length */
+#endif /* 0 */
+};
+
+/*
+ * The Time Domain Reflectometer Action Command.
+ */
+typedef struct ac_tdr_t ac_tdr_t;
+struct ac_tdr_t
+{
+ ach_t tdr_h;
+ unsigned short tdr_result; /* Result. */
+#define AC_TDR_LNK_OK (0x1 << 15) /* No link problem */
+#define AC_TDR_XCVR_PRB (0x1 << 14) /* Txcvr cable problem */
+#define AC_TDR_ET_OPN (0x1 << 13) /* Open on the link */
+#define AC_TDR_ET_SRT (0x1 << 12) /* Short on the link */
+#define AC_TDR_TIME (0x7FF << 0) /* Distance to problem */
+ /* site in transmit */
+ /* clock cycles */
+};
+
+/*
+ * The Dump Action Command.
+ */
+typedef struct ac_dmp_t ac_dmp_t;
+struct ac_dmp_t
+{
+ ach_t dmp_h;
+ unsigned short dmp_offset; /* Result. */
+};
+
+/*
+ * Size of the result of the dump command.
+ */
+#define DUMPBYTES 170
+
+/*
+ * The Diagnose Action Command.
+ */
+typedef struct ac_dgn_t ac_dgn_t;
+struct ac_dgn_t
+{
+ ach_t dgn_h;
+};
+
+/*
+ * Transmit Buffer Descriptor (TBD).
+ */
+typedef struct tbd_t tbd_t;
+struct tbd_t
+{
+ unsigned short tbd_status; /* Written by the CPU */
+#define TBD_STATUS_EOF (0x1 << 15) /* This TBD is the */
+ /* last for this frame */
+#define TBD_STATUS_ACNT (0x3FFF << 0) /* Actual count of data */
+ /* bytes in this buffer */
+ unsigned short tbd_next_bd_offset; /* Next in list */
+ unsigned short tbd_bufl; /* Buffer address (low) */
+ unsigned short tbd_bufh; /* " " (high) */
+};
+
+/*
+ * Receive Buffer Descriptor (RBD).
+ */
+typedef struct rbd_t rbd_t;
+struct rbd_t
+{
+ unsigned short rbd_status; /* Written by the 82586 */
+#define RBD_STATUS_EOF (0x1 << 15) /* This RBD is the */
+ /* last for this frame */
+#define RBD_STATUS_F (0x1 << 14) /* ACNT field is valid */
+#define RBD_STATUS_ACNT (0x3FFF << 0) /* Actual no. of data */
+ /* bytes in this buffer */
+ unsigned short rbd_next_rbd_offset; /* Next rbd in list */
+ unsigned short rbd_bufl; /* Data pointer (low) */
+ unsigned short rbd_bufh; /* " " (high) */
+ unsigned short rbd_el_size; /* EL+Data buf. size */
+#define RBD_EL (0x1 << 15) /* This BD is the */
+ /* last in the list */
+#define RBD_SIZE (0x3FFF << 0) /* No. of bytes the */
+ /* buffer can hold */
+};
+
+#define rbdoff(p,f) toff(rbd_t, p, f)
+
+/*
+ * Frame Descriptor (FD).
+ */
+typedef struct fd_t fd_t;
+struct fd_t
+{
+ unsigned short fd_status; /* Written by the 82586 */
+#define FD_STATUS_C (0x1 << 15) /* Completed storing frame */
+#define FD_STATUS_B (0x1 << 14) /* FD was consumed by RU */
+#define FD_STATUS_OK (0x1 << 13) /* Frame rxd successfully */
+#define FD_STATUS_S11 (0x1 << 11) /* CRC error */
+#define FD_STATUS_S10 (0x1 << 10) /* Alignment error */
+#define FD_STATUS_S9 (0x1 << 9) /* Ran out of resources */
+#define FD_STATUS_S8 (0x1 << 8) /* Rx DMA overrun */
+#define FD_STATUS_S7 (0x1 << 7) /* Frame too short */
+#define FD_STATUS_S6 (0x1 << 6) /* No EOF flag */
+ unsigned short fd_command; /* Command */
+#define FD_COMMAND_EL (0x1 << 15) /* Last FD in list */
+#define FD_COMMAND_S (0x1 << 14) /* Suspend RU after rx */
+ unsigned short fd_link_offset; /* Next FD */
+ unsigned short fd_rbd_offset; /* First RBD (data) */
+ /* Prepared by CPU, */
+ /* updated by 82586 */
+#if 0
+I think the rest is unused since we
+have set AC_CFG_ALOC(..). However, just
+in case, we leave the space.
+#endif /* 0 */
+ unsigned char fd_dest[ADDR_LEN]; /* Destination address */
+ /* Written by 82586 */
+ unsigned char fd_src[ADDR_LEN]; /* Source address */
+ /* Written by 82586 */
+ unsigned short fd_length; /* Frame length or type */
+ /* Written by 82586 */
+};
+
+#define fdoff(p,f) toff(fd_t, p, f)
+
+/*
+ * This software may only be used and distributed
+ * according to the terms of the GNU Public License.
+ *
+ * For more details, see wavelan.c.
+ */
diff --git a/linux/src/drivers/net/intel-gige.c b/linux/src/drivers/net/intel-gige.c
new file mode 100644
index 0000000..5884ffb
--- /dev/null
+++ b/linux/src/drivers/net/intel-gige.c
@@ -0,0 +1,1450 @@
+/* intel-gige.c: A Linux device driver for Intel Gigabit Ethernet adapters. */
+/*
+ Written 2000-2002 by Donald Becker.
+ Copyright Scyld Computing Corporation.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ You should have received a copy of the GPL with this file.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Support information and updates available at
+ http://www.scyld.com/network/ethernet.html
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version1[] =
+"intel-gige.c:v0.14 11/17/2002 Written by Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+" http://www.scyld.com/network/ethernet.html\n";
+
+/* Automatically extracted configuration info:
+probe-func: igige_probe
+config-in: tristate 'Intel PCI Gigabit Ethernet support' CONFIG_IGIGE
+
+c-help-name: Intel PCI Gigabit Ethernet support
+c-help-symbol: CONFIG_IGIGE
+c-help: This driver is for the Intel PCI Gigabit Ethernet
+c-help: adapter series.
+c-help: More specific information and updates are available from
+c-help: http://www.scyld.com/network/drivers.html
+*/
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
+static int debug = 2;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ This chip has a 16 element perfect filter, and an unusual 4096 bit
+ hash filter based directly on address bits, not the Ethernet CRC.
+ It is costly to recalculate a large, frequently changing table.
+ However even a large table may useful in some nearly-static environments.
+*/
+static int multicast_filter_limit = 15;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature. */
+static int rx_copybreak = 0;
+
+/* Used to pass the media type, etc.
+ The media type is passed in 'options[]'. The full_duplex[] table only
+ allows the duplex to be forced on, implicitly disabling autonegotiation.
+ Setting the entry to zero still allows a link to autonegotiate to full
+ duplex.
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* The delay before announcing a Rx or Tx has completed. */
+static int rx_intr_holdoff = 0;
+static int tx_intr_holdoff = 128;
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two to avoid divides.
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ There are no ill effects from too-large receive rings. */
+#if ! defined(final_version) /* Stress the driver. */
+#define TX_RING_SIZE 8
+#define TX_QUEUE_LEN 5
+#define RX_RING_SIZE 4
+#else
+#define TX_RING_SIZE 16
+#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
+#define RX_RING_SIZE 32
+#endif
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+
+/* Allocation size of Rx buffers with normal sized Ethernet frames.
+ Do not change this value without good reason. This is not a limit,
+ but a way to keep a consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ 1536
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+/* Condensed operations for readability. */
+#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
+#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Intel Gigabit Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(debug, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(multicast_filter_limit, "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM_DESC(debug, "Driver message level (0-31)");
+MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
+MODULE_PARM_DESC(max_interrupt_work,
+ "Driver maximum events handled per interrupt");
+MODULE_PARM_DESC(full_duplex,
+ "Non-zero to set forced full duplex (deprecated).");
+MODULE_PARM_DESC(rx_copybreak,
+ "Breakpoint in bytes for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit,
+ "Multicast addresses before switching to Rx-all-multicast");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This driver is for the Intel Gigabit Ethernet adapter.
+
+II. Board-specific settings
+
+III. Driver operation
+
+IIIa. Descriptor Rings
+
+This driver uses two statically allocated fixed-size descriptor arrays
+treated as rings by the hardware. The ring sizes are set at compile time
+by RX/TX_RING_SIZE.
+
+IIIb/c. Transmit/Receive Structure
+
+This driver uses a zero-copy receive and transmit scheme.
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the chip as receive data
+buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack. Buffers consumed this way are replaced by newly allocated
+skbuffs in a later phase of receives.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. New boards are typically used in generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets. When copying is done, the cost is usually mitigated by using
+a combined copy/checksum routine. Copying also preloads the cache, which is
+most useful with small frames.
+
+A subtle aspect of the operation is that the IP header at offset 14 in an
+ethernet frame isn't longword aligned for further processing.
+When unaligned buffers are permitted by the hardware (and always on copies)
+frames are put into the skbuff at an offset of "+2", 16-byte aligning
+the IP header.
+
+IIId. Synchronization
+
+The driver runs as two independent, single-threaded flows of control.
+One is the send-packet routine which is single-threaded by the queue
+layer. The other thread is the interrupt handler, which is single
+threaded by the hardware and interrupt handling software.
+
+The send packet thread has partial control over the Tx ring. At the
+start of a transmit attempt netif_pause_tx_queue(dev) is called. If the
+transmit attempt fills the Tx queue controlled by the chip, the driver
+informs the software queue layer by not calling
+netif_unpause_tx_queue(dev) on exit.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
+clears both the tx_full and tbusy flags.
+
+IIId. SMP semantics
+
+The following are serialized with respect to each other via the "xmit_lock".
+ dev->hard_start_xmit() Transmit a packet
+ dev->tx_timeout() Transmit watchdog for stuck Tx
+ dev->set_multicast_list() Set the recieve filter.
+Note: The Tx timeout watchdog code is implemented by the timer routine in
+kernels up to 2.2.*. In 2.4.* and later the timeout code is part of the
+driver interface.
+
+The following fall under the global kernel lock. The module will not be
+unloaded during the call, unless a call with a potential reschedule e.g.
+kmalloc() is called. No other synchronization assertion is made.
+ dev->open()
+ dev->do_ioctl()
+ dev->get_stats()
+Caution: The lock for dev->open() is commonly broken with request_irq() or
+kmalloc(). It is best to avoid any lock-breaking call in do_ioctl() and
+get_stats(), or additional module locking code must be implemented.
+
+The following is self-serialized (no simultaneous entry)
+ An handler registered with request_irq().
+
+IV. Notes
+
+IVb. References
+
+Intel has also released a Linux driver for this product, "e1000".
+
+IVc. Errata
+
+*/
+
+
+
+static void *igige_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int find_cnt);
+static int netdev_pwr_event(void *dev_instance, int event);
+enum chip_capability_flags { CanHaveMII=1, };
+#define PCI_IOTYPE ()
+
+static struct pci_id_info pci_id_tbl[] = {
+ {"Intel Gigabit Ethernet adapter", {0x10008086, 0xffffffff, },
+ PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR0, 0x1ffff, 0},
+ {0,}, /* 0 terminated list. */
+};
+
+struct drv_id_info igige_drv_id = {
+ "intel-gige", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
+ igige_probe1, netdev_pwr_event };
+
+/* This hardware only has a PCI memory space BAR, not I/O space. */
+#ifdef USE_IO_OPS
+#error This driver only works with PCI memory space access.
+#endif
+
+/* Offsets to the device registers.
+*/
+enum register_offsets {
+ ChipCtrl=0x00, ChipStatus=0x08, EECtrl=0x10,
+ FlowCtrlAddrLo=0x028, FlowCtrlAddrHi=0x02c, FlowCtrlType=0x030,
+ VLANetherType=0x38,
+
+ RxAddrCAM=0x040,
+ IntrStatus=0x0C0, /* Interrupt, Clear on Read, AKA ICR */
+ IntrEnable=0x0D0, /* Set enable mask when '1' AKA IMS */
+ IntrDisable=0x0D8, /* Clear enable mask when '1' */
+
+ RxControl=0x100,
+ RxQ0IntrDelay=0x108, /* Rx list #0 interrupt delay timer. */
+ RxRingPtr=0x110, /* Rx Desc. list #0 base address, 64bits */
+ RxRingLen=0x118, /* Num bytes of Rx descriptors in ring. */
+ RxDescHead=0x120,
+ RxDescTail=0x128,
+
+ RxQ1IntrDelay=0x130, /* Rx list #1 interrupt delay timer. */
+ RxRing1Ptr=0x138, /* Rx Desc. list #1 base address, 64bits */
+ RxRing1Len=0x140, /* Num bytes of Rx descriptors in ring. */
+ RxDesc1Head=0x148,
+ RxDesc1Tail=0x150,
+
+ FlowCtrlTimer=0x170, FlowCtrlThrshHi=0x160, FlowCtrlThrshLo=0x168,
+ TxConfigReg=0x178,
+ RxConfigReg=0x180,
+ MulticastArray=0x200,
+
+ TxControl=0x400,
+ TxQState=0x408, /* 64 bit queue state */
+ TxIPG=0x410, /* Inter-Packet Gap */
+ TxRingPtr=0x420, TxRingLen=0x428,
+ TxDescHead=0x430, TxDescTail=0x438, TxIntrDelay=0x440,
+
+ RxCRCErrs=0x4000, RxMissed=0x4010,
+
+ TxStatus=0x408,
+ RxStatus=0x180,
+};
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+ IntrTxDone=0x0001, /* Tx packet queued */
+ IntrLinkChange=0x0004, /* Link Status Change */
+ IntrRxSErr=0x0008, /* Rx Symbol/Sequence error */
+ IntrRxEmpty=0x0010, /* Rx queue 0 Empty */
+ IntrRxQ1Empty=0x0020, /* Rx queue 1 Empty */
+ IntrRxDone=0x0080, /* Rx Done, Queue 0*/
+ IntrRxDoneQ1=0x0100, /* Rx Done, Queue 0*/
+ IntrPCIErr=0x0200, /* PCI Bus Error */
+
+ IntrTxEmpty=0x0002, /* Guess */
+ StatsMax=0x1000, /* Unknown */
+};
+
+/* Bits in the RxFilterMode register. */
+enum rx_mode_bits {
+ RxCtrlReset=0x01, RxCtrlEnable=0x02, RxCtrlAllUnicast=0x08,
+ RxCtrlAllMulticast=0x10,
+ RxCtrlLoopback=0xC0, /* We never configure loopback */
+ RxCtrlAcceptBroadcast=0x8000,
+ /* Aliased names.*/
+ AcceptAllPhys=0x08, AcceptAllMulticast=0x10, AcceptBroadcast=0x8000,
+ AcceptMyPhys=0,
+ AcceptMulticast=0,
+};
+
+/* The Rx and Tx buffer descriptors. */
+struct rx_desc {
+ u32 buf_addr;
+ u32 buf_addr_hi;
+ u32 csum_length; /* Checksum and length */
+ u32 status; /* Errors and status. */
+};
+
+struct tx_desc {
+ u32 buf_addr;
+ u32 buf_addr_hi;
+ u32 cmd_length;
+ u32 status; /* And errors */
+};
+
+/* Bits in tx_desc.cmd_length */
+enum tx_cmd_bits {
+ TxDescEndPacket=0x02000000, TxCmdIntrDelay=0x80000000,
+ TxCmdAddCRC=0x02000000, TxCmdDoTx=0x13000000,
+};
+enum tx_status_bits {
+ TxDescDone=0x0001, TxDescEndPkt=0x0002,
+};
+
+/* Bits in tx_desc.status */
+enum rx_status_bits {
+ RxDescDone=0x0001, RxDescEndPkt=0x0002,
+};
+
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
+ within the structure. */
+struct netdev_private {
+ struct net_device *next_module; /* Link for devices of this type. */
+ void *priv_addr; /* Unaligned address for kfree */
+ const char *product_name;
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ /* The saved address of a sent-in-place packet/buffer, for later free(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media monitoring timer. */
+ /* Keep frequently used values adjacent for cache effect. */
+ int msg_level;
+ int chip_id, drv_flags;
+ struct pci_dev *pci_dev;
+ int max_interrupt_work;
+ int intr_enable;
+ long in_interrupt; /* Word-long for SMP locks. */
+
+ struct rx_desc *rx_ring;
+ struct rx_desc *rx_head_desc;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ int rx_copybreak;
+
+ struct tx_desc *tx_ring;
+ unsigned int cur_tx, dirty_tx;
+ unsigned int tx_full:1; /* The Tx queue is full. */
+
+ unsigned int rx_mode;
+ unsigned int tx_config;
+ int multicast_filter_limit;
+ /* These values track the transceiver/media in use. */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int duplex_lock:1;
+ unsigned int medialock:1; /* Do not sense media. */
+ unsigned int default_port; /* Last dev->if_port value. */
+};
+
+static int eeprom_read(long ioaddr, int location);
+static int netdev_open(struct net_device *dev);
+static int change_mtu(struct net_device *dev, int new_mtu);
+static void check_duplex(struct net_device *dev);
+static void netdev_timer(unsigned long data);
+static void tx_timeout(struct net_device *dev);
+static void init_ring(struct net_device *dev);
+static int start_tx(struct sk_buff *skb, struct net_device *dev);
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
+static void netdev_error(struct net_device *dev, int intr_status);
+static int netdev_rx(struct net_device *dev);
+static void netdev_error(struct net_device *dev, int intr_status);
+static void set_rx_mode(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_close(struct net_device *dev);
+
+
+
+/* A list of our installed devices, for removing the driver module. */
+static struct net_device *root_net_dev = NULL;
+
+#ifndef MODULE
+int igige_probe(struct net_device *dev)
+{
+ if (pci_drv_register(&igige_drv_id, dev) < 0)
+ return -ENODEV;
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return 0;
+}
+#endif
+
+static void *igige_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int card_idx)
+{
+ struct net_device *dev;
+ struct netdev_private *np;
+ void *priv_mem;
+ int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+
+ dev = init_etherdev(init_dev, 0);
+ if (!dev)
+ return NULL;
+
+ printk(KERN_INFO "%s: %s at 0x%lx, ",
+ dev->name, pci_id_tbl[chip_idx].name, ioaddr);
+
+ for (i = 0; i < 3; i++)
+ ((u16*)dev->dev_addr)[i] = le16_to_cpu(eeprom_read(ioaddr, i));
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+
+ /* Make certain elements e.g. descriptor lists are aligned. */
+ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
+ /* Check for the very unlikely case of no memory. */
+ if (priv_mem == NULL)
+ return NULL;
+
+ /* Do bogusness checks before this point.
+ We do a request_region() only to register /proc/ioports info. */
+ request_region(ioaddr, pci_id_tbl[chip_idx].io_size, dev->name);
+
+ /* Reset the chip to erase previous misconfiguration. */
+ writel(0x04000000, ioaddr + ChipCtrl);
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+ memset(np, 0, sizeof(*np));
+ np->priv_addr = priv_mem;
+
+ np->next_module = root_net_dev;
+ root_net_dev = dev;
+
+ np->pci_dev = pdev;
+ np->chip_id = chip_idx;
+ np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
+ np->msg_level = (1 << debug) - 1;
+ np->rx_copybreak = rx_copybreak;
+ np->max_interrupt_work = max_interrupt_work;
+ np->multicast_filter_limit = multicast_filter_limit;
+
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ /* The lower four bits are the media type. */
+ if (option > 0) {
+ if (option & 0x2220)
+ np->full_duplex = 1;
+ np->default_port = option & 0x3330;
+ if (np->default_port)
+ np->medialock = 1;
+ }
+ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
+ np->full_duplex = 1;
+
+ if (np->full_duplex)
+ np->duplex_lock = 1;
+
+#if ! defined(final_version) /* Dump the EEPROM contents during development. */
+ if (np->msg_level & NETIF_MSG_MISC) {
+ int sum = 0;
+ for (i = 0; i < 0x40; i++) {
+ int eeval = eeprom_read(ioaddr, i);
+ printk("%4.4x%s", eeval, i % 16 != 15 ? " " : "\n");
+ sum += eeval;
+ }
+ printk(KERN_DEBUG "%s: EEPROM checksum %4.4X (expected value 0xBABA).\n",
+ dev->name, sum & 0xffff);
+ }
+#endif
+
+ /* The chip-specific entries in the device structure. */
+ dev->open = &netdev_open;
+ dev->hard_start_xmit = &start_tx;
+ dev->stop = &netdev_close;
+ dev->get_stats = &get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &mii_ioctl;
+ dev->change_mtu = &change_mtu;
+
+ /* Turn off VLAN and clear the VLAN filter. */
+ writel(0x04000000, ioaddr + VLANetherType);
+ for (i = 0x600; i < 0x800; i+=4)
+ writel(0, ioaddr + i);
+ np->tx_config = 0x80000020;
+ writel(np->tx_config, ioaddr + TxConfigReg);
+ {
+ int eeword10 = eeprom_read(ioaddr, 10);
+ writel(((eeword10 & 0x01e0) << 17) | ((eeword10 & 0x0010) << 3),
+ ioaddr + ChipCtrl);
+ }
+
+ return dev;
+}
+
+
+/* Read the EEPROM interface with a serial bit streams generated by the
+ host processor.
+ The example below is for the common 93c46 EEPROM, 64 16 bit words. */
+
+/* Delay between EEPROM clock transitions.
+ The effectivly flushes the write cache to prevent quick double-writes.
+*/
+#define eeprom_delay(ee_addr) readl(ee_addr)
+
+enum EEPROM_Ctrl_Bits {
+ EE_ShiftClk=0x01, EE_ChipSelect=0x02, EE_DataIn=0x08, EE_DataOut=0x04,
+};
+#define EE_Write0 (EE_ChipSelect)
+#define EE_Write1 (EE_ChipSelect | EE_DataOut)
+
+/* The EEPROM commands include the alway-set leading bit. */
+enum EEPROM_Cmds { EE_WriteCmd=5, EE_ReadCmd=6, EE_EraseCmd=7, };
+
+static int eeprom_read(long addr, int location)
+{
+ int i;
+ int retval = 0;
+ long ee_addr = addr + EECtrl;
+ int read_cmd = ((EE_ReadCmd<<6) | location) << 16 ;
+ int cmd_len = 2+6+16;
+ u32 baseval = readl(ee_addr) & ~0x0f;
+
+ writel(EE_Write0 | baseval, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = cmd_len; i >= 0; i--) {
+ int dataval = baseval |
+ ((read_cmd & (1 << i)) ? EE_Write1 : EE_Write0);
+ writel(dataval, ee_addr);
+ eeprom_delay(ee_addr);
+ writel(dataval | EE_ShiftClk, ee_addr);
+ eeprom_delay(ee_addr);
+ retval = (retval << 1) | ((readl(ee_addr) & EE_DataIn) ? 1 : 0);
+ }
+
+ /* Terminate the EEPROM access. */
+ writel(baseval | EE_Write0, ee_addr);
+ writel(baseval & ~EE_ChipSelect, ee_addr);
+ return retval;
+}
+
+
+
+static int netdev_open(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ /* Some chips may need to be reset. */
+
+ MOD_INC_USE_COUNT;
+
+ if (np->tx_ring == 0)
+ np->tx_ring = (void *)get_free_page(GFP_KERNEL);
+ if (np->tx_ring == 0)
+ return -ENOMEM;
+ if (np->rx_ring == 0)
+ np->rx_ring = (void *)get_free_page(GFP_KERNEL);
+ if (np->tx_ring == 0) {
+ free_page((long)np->tx_ring);
+ return -ENOMEM;
+ }
+
+ /* Note that both request_irq() and init_ring() call kmalloc(), which
+ break the global kernel lock protecting this routine. */
+ if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
+ dev->name, dev->irq);
+
+ init_ring(dev);
+
+ writel(0, ioaddr + RxControl);
+ writel(virt_to_bus(np->rx_ring), ioaddr + RxRingPtr);
+#if ADDRLEN == 64
+ writel(virt_to_bus(np->rx_ring) >> 32, ioaddr + RxRingPtr + 4);
+#else
+ writel(0, ioaddr + RxRingPtr + 4);
+#endif
+
+ writel(RX_RING_SIZE * sizeof(struct rx_desc), ioaddr + RxRingLen);
+ writel(0x80000000 | rx_intr_holdoff, ioaddr + RxQ0IntrDelay);
+ writel(0, ioaddr + RxDescHead);
+ writel(np->dirty_rx + RX_RING_SIZE, ioaddr + RxDescTail);
+
+ /* Zero the unused Rx ring #1. */
+ writel(0, ioaddr + RxQ1IntrDelay);
+ writel(0, ioaddr + RxRing1Ptr);
+ writel(0, ioaddr + RxRing1Ptr + 4);
+ writel(0, ioaddr + RxRing1Len);
+ writel(0, ioaddr + RxDesc1Head);
+ writel(0, ioaddr + RxDesc1Tail);
+
+ /* Use 0x002000FA for half duplex. */
+ writel(0x000400FA, ioaddr + TxControl);
+
+ writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
+#if ADDRLEN == 64
+ writel(virt_to_bus(np->tx_ring) >> 32, ioaddr + TxRingPtr + 4);
+#else
+ writel(0, ioaddr + TxRingPtr + 4);
+#endif
+
+ writel(TX_RING_SIZE * sizeof(struct tx_desc), ioaddr + TxRingLen);
+ writel(0, ioaddr + TxDescHead);
+ writel(0, ioaddr + TxDescTail);
+ writel(0, ioaddr + TxQState);
+ writel(0, ioaddr + TxQState + 4);
+
+ /* Set IPG register with Ethernet standard values. */
+ writel(0x00A0080A, ioaddr + TxIPG);
+ /* The delay before announcing a Tx has completed. */
+ writel(tx_intr_holdoff, ioaddr + TxIntrDelay);
+
+ writel(((u32*)dev->dev_addr)[0], ioaddr + RxAddrCAM);
+ writel(0x80000000 | ((((u32*)dev->dev_addr)[1]) & 0xffff),
+ ioaddr + RxAddrCAM + 4);
+
+ /* Initialize other registers. */
+ /* Configure the PCI bus bursts and FIFO thresholds. */
+
+ if (dev->if_port == 0)
+ dev->if_port = np->default_port;
+
+ np->in_interrupt = 0;
+
+ np->rx_mode = RxCtrlEnable;
+ set_rx_mode(dev);
+
+ /* Tx mode */
+ np->tx_config = 0x80000020;
+ writel(np->tx_config, ioaddr + TxConfigReg);
+
+ /* Flow control */
+ writel(0x00C28001, ioaddr + FlowCtrlAddrLo);
+ writel(0x00000100, ioaddr + FlowCtrlAddrHi);
+ writel(0x8808, ioaddr + FlowCtrlType);
+ writel(0x0100, ioaddr + FlowCtrlTimer);
+ writel(0x8000, ioaddr + FlowCtrlThrshHi);
+ writel(0x4000, ioaddr + FlowCtrlThrshLo);
+
+ netif_start_tx_queue(dev);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ writel(IntrTxDone | IntrLinkChange | IntrRxDone | IntrPCIErr
+ | IntrRxEmpty | IntrRxSErr, ioaddr + IntrEnable);
+
+ /* writel(1, dev->base_addr + RxCmd);*/
+
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: Done netdev_open(), status: %x Rx %x Tx %x.\n",
+ dev->name, (int)readl(ioaddr + ChipStatus),
+ (int)readl(ioaddr + RxStatus), (int)readl(ioaddr + TxStatus));
+
+ /* Set the timer to check for link beat. */
+ init_timer(&np->timer);
+ np->timer.expires = jiffies + 3*HZ;
+ np->timer.data = (unsigned long)dev;
+ np->timer.function = &netdev_timer; /* timer handler */
+ add_timer(&np->timer);
+
+ return 0;
+}
+
+/* Update for jumbo frames...
+ Changing the MTU while active is not allowed.
+ */
+static int change_mtu(struct net_device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > 1500))
+ return -EINVAL;
+ if (netif_running(dev))
+ return -EBUSY;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static void check_duplex(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int chip_ctrl = readl(ioaddr + ChipCtrl);
+ int rx_cfg = readl(ioaddr + RxConfigReg);
+ int tx_cfg = readl(ioaddr + TxConfigReg);
+#if 0
+ int chip_status = readl(ioaddr + ChipStatus);
+#endif
+
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Link changed status. Ctrl %x rxcfg %8.8x "
+ "txcfg %8.8x.\n",
+ dev->name, chip_ctrl, rx_cfg, tx_cfg);
+ if (np->medialock) {
+ if (np->full_duplex)
+ ;
+ }
+ /* writew(new_tx_mode, ioaddr + TxMode); */
+}
+
+static void netdev_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 10*HZ;
+
+ if (np->msg_level & NETIF_MSG_TIMER) {
+ printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x, "
+ "Tx %x Rx %x.\n",
+ dev->name, (int)readl(ioaddr + ChipStatus),
+ (int)readl(ioaddr + TxStatus), (int)readl(ioaddr + RxStatus));
+ }
+ /* This will either have a small false-trigger window or will not catch
+ tbusy incorrectly set when the queue is empty. */
+ if ((jiffies - dev->trans_start) > TX_TIMEOUT &&
+ (np->cur_tx - np->dirty_tx > 0 ||
+ netif_queue_paused(dev)) ) {
+ tx_timeout(dev);
+ }
+ check_duplex(dev);
+ np->timer.expires = jiffies + next_tick;
+ add_timer(&np->timer);
+}
+
+static void tx_timeout(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
+ " resetting...\n", dev->name, (int)readl(ioaddr + ChipStatus));
+
+#ifndef __alpha__
+ if (np->msg_level & NETIF_MSG_TX_ERR) {
+ int i;
+ printk(KERN_DEBUG " Tx registers: ");
+ for (i = 0x400; i < 0x444; i += 8)
+ printk(" %8.8x", (int)readl(ioaddr + i));
+ printk("\n"KERN_DEBUG " Rx ring %p: ", np->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
+ printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" %4.4x", np->tx_ring[i].status);
+ printk("\n");
+ }
+#endif
+
+ /* Perhaps we should reinitialize the hardware here. */
+ dev->if_port = 0;
+ /* Stop and restart the chip's Tx processes . */
+
+ /* Trigger an immediate transmit demand. */
+
+ dev->trans_start = jiffies;
+ np->stats.tx_errors++;
+ return;
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+ np->tx_full = 0;
+ np->cur_rx = np->cur_tx = 0;
+ np->dirty_rx = np->dirty_tx = 0;
+
+ np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
+ np->rx_head_desc = &np->rx_ring[0];
+
+ /* Initialize all Rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_skbuff[i] = 0;
+ }
+
+ /* The number of ring descriptors is set by the ring length register,
+ thus the chip does not use 'next_desc' chains. */
+
+ /* Fill in the Rx buffers. Allocation failures are acceptable. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* 16 byte align the IP header. */
+ np->rx_ring[i].buf_addr = virt_to_le32desc(skb->tail);
+ np->rx_ring[i].buf_addr_hi = 0;
+ np->rx_ring[i].status = 0;
+ }
+ np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_skbuff[i] = 0;
+ np->tx_ring[i].status = 0;
+ }
+ return;
+}
+
+static int start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ unsigned entry;
+
+ /* Block a timer-based transmit from overlapping. This happens when
+ packets are presumed lost, and we use this check the Tx status. */
+ if (netif_pause_tx_queue(dev) != 0) {
+ /* This watchdog code is redundant with the media monitor timer. */
+ if (jiffies - dev->trans_start > TX_TIMEOUT)
+ tx_timeout(dev);
+ return 1;
+ }
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = np->cur_tx % TX_RING_SIZE;
+
+ np->tx_skbuff[entry] = skb;
+
+ /* Note: Descriptors may be uncached. Write each field only once. */
+ np->tx_ring[entry].buf_addr = virt_to_le32desc(skb->data);
+ np->tx_ring[entry].buf_addr_hi = 0;
+ np->tx_ring[entry].cmd_length = cpu_to_le32(TxCmdDoTx | skb->len);
+ np->tx_ring[entry].status = 0;
+
+ /* Non-CC architectures: explicitly flush descriptor and packet.
+ cache_flush(np->tx_ring[entry], sizeof np->tx_ring[entry]);
+ cache_flush(skb->data, skb->len);
+ */
+
+ np->cur_tx++;
+ if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
+ np->tx_full = 1;
+ /* Check for a just-cleared queue. */
+ if (np->cur_tx - (volatile int)np->dirty_tx < TX_QUEUE_LEN - 2) {
+ netif_unpause_tx_queue(dev);
+ np->tx_full = 0;
+ } else
+ netif_stop_tx_queue(dev);
+ } else
+ netif_unpause_tx_queue(dev); /* Typical path */
+
+ /* Inform the chip we have another Tx. */
+ if (np->msg_level & NETIF_MSG_TX_QUEUED)
+ printk(KERN_DEBUG "%s: Tx queued to slot %d, desc tail now %d "
+ "writing %d.\n",
+ dev->name, entry, (int)readl(dev->base_addr + TxDescTail),
+ np->cur_tx % TX_RING_SIZE);
+ writel(np->cur_tx % TX_RING_SIZE, dev->base_addr + TxDescTail);
+
+ dev->trans_start = jiffies;
+
+ if (np->msg_level & NETIF_MSG_TX_QUEUED) {
+ printk(KERN_DEBUG "%s: Transmit frame #%d (%x) queued in slot %d.\n",
+ dev->name, np->cur_tx, (int)virt_to_bus(&np->tx_ring[entry]),
+ entry);
+ }
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct netdev_private *np;
+ long ioaddr;
+ int work_limit;
+
+ ioaddr = dev->base_addr;
+ np = (struct netdev_private *)dev->priv;
+ work_limit = np->max_interrupt_work;
+
+#if defined(__i386__) && LINUX_VERSION_CODE < 0x020300
+ /* A lock to prevent simultaneous entry bug on Intel SMP machines. */
+ if (test_and_set_bit(0, (void*)&dev->interrupt)) {
+ printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
+ dev->name);
+ dev->interrupt = 0; /* Avoid halting machine. */
+ return;
+ }
+#endif
+
+ do {
+ u32 intr_status = readl(ioaddr + IntrStatus);
+
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
+ dev->name, intr_status);
+
+ if (intr_status == 0 || intr_status == 0xffffffff)
+ break;
+
+ if (intr_status & IntrRxDone)
+ netdev_rx(dev);
+
+ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+ int entry = np->dirty_tx % TX_RING_SIZE;
+ if (np->tx_ring[entry].status == 0)
+ break;
+ if (np->msg_level & NETIF_MSG_TX_DONE)
+ printk(KERN_DEBUG "%s: Transmit done, Tx status %8.8x.\n",
+ dev->name, np->tx_ring[entry].status);
+ np->stats.tx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+ np->stats.tx_bytes += np->tx_skbuff[entry]->len;
+#endif
+ /* Free the original skb. */
+ dev_free_skb_irq(np->tx_skbuff[entry]);
+ np->tx_skbuff[entry] = 0;
+ }
+ /* Note the 4 slot hysteresis to mark the queue non-full. */
+ if (np->tx_full && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
+ /* The ring is no longer full, allow new TX entries. */
+ np->tx_full = 0;
+ netif_resume_tx_queue(dev);
+ }
+
+ /* Abnormal error summary/uncommon events handlers. */
+ if (intr_status & (IntrPCIErr | IntrLinkChange | StatsMax))
+ netdev_error(dev, intr_status);
+
+ if (--work_limit < 0) {
+ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "status=0x%4.4x.\n",
+ dev->name, intr_status);
+ break;
+ }
+ } while (1);
+
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, (int)readl(ioaddr + IntrStatus));
+
+#if defined(__i386__) && LINUX_VERSION_CODE < 0x020300
+ clear_bit(0, (void*)&dev->interrupt);
+#endif
+ return;
+}
+
+/* This routine is logically part of the interrupt handler, but separated
+ for clarity and better register allocation. */
+static int netdev_rx(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int entry = np->cur_rx % RX_RING_SIZE;
+ int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
+
+ if (np->msg_level & NETIF_MSG_RX_STATUS) {
+ printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
+ entry, np->rx_ring[entry].status);
+ }
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while (np->rx_head_desc->status & cpu_to_le32(RxDescDone)) {
+ struct rx_desc *desc = np->rx_head_desc;
+ u32 desc_status = le32_to_cpu(desc->status);
+ int data_size = le32_to_cpu(desc->csum_length);
+
+ if (np->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
+ desc_status);
+ if (--boguscnt < 0)
+ break;
+ if ( ! (desc_status & RxDescEndPkt)) {
+ printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
+ "multiple buffers, entry %#x length %d status %4.4x!\n",
+ dev->name, np->cur_rx, data_size, desc_status);
+ np->stats.rx_length_errors++;
+ } else {
+ struct sk_buff *skb;
+ /* Reported length should omit the CRC. */
+ int pkt_len = (data_size & 0xffff) - 4;
+
+#ifndef final_version
+ if (np->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
+ " of %d, bogus_cnt %d.\n",
+ pkt_len, data_size, boguscnt);
+#endif
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < np->rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+#if HAS_IP_COPYSUM /* Call copy + cksum if available. */
+ eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+#else
+ memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
+ pkt_len);
+#endif
+ } else {
+ char *temp = skb_put(skb = np->rx_skbuff[entry], pkt_len);
+ np->rx_skbuff[entry] = NULL;
+#ifndef final_version /* Remove after testing. */
+ if (le32desc_to_virt(np->rx_ring[entry].buf_addr) != temp)
+ printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
+ "do not match in netdev_rx: %p vs. %p / %p.\n",
+ dev->name,
+ le32desc_to_virt(np->rx_ring[entry].buf_addr),
+ skb->head, temp);
+#endif
+ }
+#ifndef final_version /* Remove after testing. */
+ /* You will want this info for the initial debug. */
+ if (np->msg_level & NETIF_MSG_PKTDATA)
+ printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
+ "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
+ "%d.%d.%d.%d.\n",
+ skb->data[0], skb->data[1], skb->data[2], skb->data[3],
+ skb->data[4], skb->data[5], skb->data[6], skb->data[7],
+ skb->data[8], skb->data[9], skb->data[10],
+ skb->data[11], skb->data[12], skb->data[13],
+ skb->data[14], skb->data[15], skb->data[16],
+ skb->data[17]);
+#endif
+ skb->protocol = eth_type_trans(skb, dev);
+ /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ np->stats.rx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+ np->stats.rx_bytes += pkt_len;
+#endif
+ }
+ entry = (++np->cur_rx) % RX_RING_SIZE;
+ np->rx_head_desc = &np->rx_ring[entry];
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
+ struct sk_buff *skb;
+ entry = np->dirty_rx % RX_RING_SIZE;
+ if (np->rx_skbuff[entry] == NULL) {
+ skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ np->rx_ring[entry].buf_addr = virt_to_le32desc(skb->tail);
+ }
+ np->rx_ring[entry].status = 0;
+ }
+
+ /* Restart Rx engine if stopped. */
+ /* writel(1, dev->base_addr + RxCmd); */
+ return 0;
+}
+
+static void netdev_error(struct net_device *dev, int intr_status)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+
+ if (intr_status & IntrLinkChange) {
+ int chip_ctrl = readl(ioaddr + ChipCtrl);
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_ERR "%s: Link changed: Autonegotiation on-going.\n",
+ dev->name);
+ if (chip_ctrl & 1)
+ netif_link_up(dev);
+ else
+ netif_link_down(dev);
+ check_duplex(dev);
+ }
+ if (intr_status & StatsMax) {
+ get_stats(dev);
+ }
+ if ((intr_status & ~(IntrLinkChange|StatsMax))
+ && (np->msg_level & NETIF_MSG_DRV))
+ printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
+ dev->name, intr_status);
+ /* Hmmmmm, it's not clear how to recover from PCI faults. */
+ if (intr_status & IntrPCIErr)
+ np->stats.tx_fifo_errors++;
+}
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int crc_errs = readl(ioaddr + RxCRCErrs);
+
+ if (crc_errs != 0xffffffff) {
+ /* We need not lock this segment of code for SMP.
+ The non-atomic-add vulnerability is very small
+ and statistics are non-critical. */
+ np->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs);
+ np->stats.rx_missed_errors += readl(ioaddr + RxMissed);
+ }
+
+ return &np->stats;
+}
+
+/* The little-endian AUTODIN II ethernet CRC calculations.
+ A big-endian version is also available.
+ This is slow but compact code. Do not use this routine for bulk data,
+ use a table-based routine instead.
+ This is common code and should be moved to net/core/crc.c.
+ Chips may use the upper or lower CRC bits, and may reverse and/or invert
+ them. Select the endian-ness that results in minimal calculations.
+*/
+static unsigned const ethernet_polynomial_le = 0xedb88320U;
+static inline unsigned ether_crc_le(int length, unsigned char *data)
+{
+ unsigned int crc = 0xffffffff; /* Initial value. */
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 8; --bit >= 0; current_octet >>= 1) {
+ if ((crc ^ current_octet) & 1) {
+ crc >>= 1;
+ crc ^= ethernet_polynomial_le;
+ } else
+ crc >>= 1;
+ }
+ }
+ return crc;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ u32 new_mc_filter[128]; /* Multicast filter table */
+ u32 new_rx_mode = np->rx_mode;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+ new_rx_mode |=
+ RxCtrlAcceptBroadcast | RxCtrlAllMulticast | RxCtrlAllUnicast;
+ } else if ((dev->mc_count > np->multicast_filter_limit)
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ new_rx_mode &= ~RxCtrlAllUnicast;
+ new_rx_mode |= RxCtrlAcceptBroadcast | RxCtrlAllMulticast;
+ } else {
+ struct dev_mc_list *mclist;
+ int i;
+ memset(new_mc_filter, 0, sizeof(new_mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < 15;
+ i++, mclist = mclist->next) {
+ writel(((u32*)mclist->dmi_addr)[0], ioaddr + RxAddrCAM + 8 + i*8);
+ writel((((u32*)mclist->dmi_addr)[1] & 0xffff) | 0x80000000,
+ ioaddr + RxAddrCAM + 12 + i*8);
+ }
+ for (; mclist && i < dev->mc_count; i++, mclist = mclist->next) {
+ set_bit(((u32*)mclist->dmi_addr)[1] & 0xfff,
+ new_mc_filter);
+ }
+ new_rx_mode &= ~RxCtrlAllUnicast | RxCtrlAllMulticast;
+ new_rx_mode |= RxCtrlAcceptBroadcast;
+ if (dev->mc_count > 15)
+ for (i = 0; i < 128; i++)
+ writel(new_mc_filter[i], ioaddr + MulticastArray + (i<<2));
+ }
+ if (np->rx_mode != new_rx_mode)
+ writel(np->rx_mode = new_rx_mode, ioaddr + RxControl);
+}
+
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ u32 *data32 = (void *)&rq->ifr_data;
+
+ switch(cmd) {
+ case SIOCGPARAMS:
+ data32[0] = np->msg_level;
+ data32[1] = np->multicast_filter_limit;
+ data32[2] = np->max_interrupt_work;
+ data32[3] = np->rx_copybreak;
+ return 0;
+ case SIOCSPARAMS:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ np->msg_level = data32[0];
+ np->multicast_filter_limit = data32[1];
+ np->max_interrupt_work = data32[2];
+ np->rx_copybreak = data32[3];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int netdev_close(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+ netif_stop_tx_queue(dev);
+
+ if (np->msg_level & NETIF_MSG_IFDOWN) {
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x "
+ "Rx %4.4x Int %2.2x.\n",
+ dev->name, (int)readl(ioaddr + TxStatus),
+ (int)readl(ioaddr + RxStatus), (int)readl(ioaddr + IntrStatus));
+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
+ dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
+ }
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ writel(~0, ioaddr + IntrDisable);
+ readl(ioaddr + IntrStatus);
+
+ /* Reset everything. */
+ writel(0x04000000, ioaddr + ChipCtrl);
+
+ del_timer(&np->timer);
+
+#ifdef __i386__
+ if (np->msg_level & NETIF_MSG_IFDOWN) {
+ printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
+ (int)virt_to_bus(np->tx_ring));
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" #%d desc. buf %8.8x, length %8.8x, status %8.8x.\n",
+ i, np->tx_ring[i].buf_addr, np->tx_ring[i].cmd_length,
+ np->tx_ring[i].status);
+ printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
+ (int)virt_to_bus(np->rx_ring));
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
+ i, np->rx_ring[i].csum_length,
+ np->rx_ring[i].status, np->rx_ring[i].buf_addr);
+ if (np->rx_ring[i].buf_addr) {
+ if (*(u8*)np->rx_skbuff[i]->tail != 0x69) {
+ u16 *pkt_buf = (void *)np->rx_skbuff[i]->tail;
+ int j;
+ for (j = 0; j < 0x50; j++)
+ printk(" %4.4x", pkt_buf[j]);
+ printk("\n");
+ }
+ }
+ }
+ }
+#endif /* __i386__ debugging only */
+
+ free_irq(dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].status = 0;
+ np->rx_ring[i].buf_addr = 0xBADF00D0; /* An invalid address. */
+ if (np->rx_skbuff[i]) {
+#if LINUX_VERSION_CODE < 0x20100
+ np->rx_skbuff[i]->free = 1;
+#endif
+ dev_free_skb(np->rx_skbuff[i]);
+ }
+ np->rx_skbuff[i] = 0;
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (np->tx_skbuff[i])
+ dev_free_skb(np->tx_skbuff[i]);
+ np->tx_skbuff[i] = 0;
+ }
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static int netdev_pwr_event(void *dev_instance, int event)
+{
+ struct net_device *dev = dev_instance;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
+ switch(event) {
+ case DRV_ATTACH:
+ MOD_INC_USE_COUNT;
+ break;
+ case DRV_SUSPEND:
+ /* Disable interrupts, stop Tx and Rx. */
+ writel(~0, ioaddr + IntrDisable);
+ /* writel(2, ioaddr + RxCmd); */
+ /* writew(2, ioaddr + TxCmd); */
+ break;
+ case DRV_RESUME:
+ /* This is incomplete: the actions are very chip specific. */
+ set_rx_mode(dev);
+ break;
+ case DRV_DETACH: {
+ struct net_device **devp, **next;
+ if (dev->flags & IFF_UP) {
+ /* Some, but not all, kernel versions close automatically. */
+ dev_close(dev);
+ dev->flags &= ~(IFF_UP|IFF_RUNNING);
+ }
+ unregister_netdev(dev);
+ release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);
+ iounmap((char *)dev->base_addr);
+ for (devp = &root_net_dev; *devp; devp = next) {
+ next = &((struct netdev_private *)(*devp)->priv)->next_module;
+ if (*devp == dev) {
+ *devp = *next;
+ break;
+ }
+ }
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(dev);
+ MOD_DEC_USE_COUNT;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+
+#ifdef MODULE
+int init_module(void)
+{
+ /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return pci_drv_register(&igige_drv_id, NULL);
+}
+
+void cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+ pci_drv_unregister(&igige_drv_id);
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_net_dev) {
+ struct netdev_private *np = (void *)(root_net_dev->priv);
+ unregister_netdev(root_net_dev);
+ release_region(root_net_dev->base_addr,
+ pci_id_tbl[np->chip_id].io_size);
+ iounmap((char *)(root_net_dev->base_addr));
+ next_dev = np->next_module;
+ if (np->tx_ring == 0)
+ free_page((long)np->tx_ring);
+ if (np->rx_ring == 0)
+ free_page((long)np->rx_ring);
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(root_net_dev);
+ root_net_dev = next_dev;
+ }
+}
+
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "make KERNVER=`uname -r` intel-gige.o"
+ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c intel-gige.c"
+ * simple-compile-command: "gcc -DMODULE -O6 -c intel-gige.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/iow.h b/linux/src/drivers/net/iow.h
new file mode 100644
index 0000000..6e15688
--- /dev/null
+++ b/linux/src/drivers/net/iow.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_IOW_H
+#define _ASM_IOW_H
+
+/* no longer used */
+
+#endif
diff --git a/linux/src/drivers/net/kern_compat.h b/linux/src/drivers/net/kern_compat.h
new file mode 100644
index 0000000..39e1934
--- /dev/null
+++ b/linux/src/drivers/net/kern_compat.h
@@ -0,0 +1,285 @@
+#ifndef _KERN_COMPAT_H
+#define _KERN_COMPAT_H
+/* kern_compat.h: Linux PCI network adapter backward compatibility code. */
+/*
+ $Revision: 1.1.2.2 $ $Date: 2007/08/04 21:02:21 $
+
+ Kernel compatibility defines.
+ This file provides macros to mask the difference between kernel versions.
+ It is designed primarily to allow device drivers to be written so that
+ they work with a range of kernel versions.
+
+ Written 1999-2003 Donald Becker, Scyld Computing Corporation
+ This software may be used and distributed according to the terms
+ of the GNU General Public License (GPL), incorporated herein by
+ reference. Drivers interacting with these functions are derivative
+ works and thus are covered the GPL. They must include an explicit
+ GPL notice.
+
+ This code also provides inline scan and activate functions for PCI network
+ interfaces. It has an interface identical to pci-scan.c, but is
+ intended as an include file to simplify using updated drivers with older
+ kernel versions.
+ This code version matches pci-scan.c:v0.05 9/16/99
+
+ The author may be reached as becker@scyld.com, or
+ Donald Becker
+ Penguin Computing Corporation
+ 914 Bay Ridge Road, Suite 220
+ Annapolis MD 21403
+
+ Other contributers:
+ <none>
+*/
+
+/* We try to use defined values to decide when an interface has changed or
+ added features, but we must have the kernel version number for a few. */
+#if ! defined(LINUX_VERSION_CODE) || (LINUX_VERSION_CODE < 0x10000)
+#include <linux/version.h>
+#endif
+/* Older kernel versions didn't include modversions automatically. */
+#if LINUX_VERSION_CODE < 0x20300 && defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+
+/* There was no support for PCI address space mapping in 2.0, but the
+ Alpha needed it. See the 2.2 documentation. */
+#if LINUX_VERSION_CODE < 0x20100 && ! defined(__alpha__)
+#define ioremap(a,b)\
+ (((unsigned long)(a) >= 0x100000) ? vremap(a,b) : (void*)(a))
+#define iounmap(v)\
+ do { if ((unsigned long)(v) >= 0x100000) vfree(v);} while (0)
+#endif
+
+/* Support for adding info about the purpose of and parameters for kernel
+ modules was added in 2.1. */
+#if LINUX_VERSION_CODE < 0x20115
+#define MODULE_AUTHOR(name) extern int nonesuch
+#define MODULE_DESCRIPTION(string) extern int nonesuch
+#define MODULE_PARM(varname, typestring) extern int nonesuch
+#define MODULE_PARM_DESC(var,desc) extern int nonesuch
+#endif
+#if !defined(MODULE_LICENSE)
+#define MODULE_LICENSE(license) \
+static const char __module_license[] __attribute__((section(".modinfo"))) = \
+"license=" license
+#endif
+#if !defined(MODULE_PARM_DESC)
+#define MODULE_PARM_DESC(var,desc) \
+const char __module_parm_desc_##var[] \
+__attribute__((section(".modinfo"))) = \
+"parm_desc_" __MODULE_STRING(var) "=" desc
+#endif
+
+/* SMP and better multiarchitecture support were added.
+ Using an older kernel means we assume a little-endian uniprocessor.
+*/
+#if LINUX_VERSION_CODE < 0x20123
+#define hard_smp_processor_id() smp_processor_id()
+//#define test_and_set_bit(val, addr) set_bit(val, addr)
+#define cpu_to_le16(val) (val)
+#define cpu_to_le32(val) (val)
+#define le16_to_cpu(val) (val)
+#define le16_to_cpus(val) /* In-place conversion. */
+#define le32_to_cpu(val) (val)
+#define cpu_to_be16(val) ((((val) & 0xff) << 8) + (((val) >> 8) & 0xff))
+#define cpu_to_be32(val) ((cpu_to_be16(val) << 16) + cpu_to_be16((val) >> 16))
+typedef long spinlock_t;
+#define SPIN_LOCK_UNLOCKED 0
+#define spin_lock(lock)
+#define spin_unlock(lock)
+#define spin_lock_irqsave(lock, flags) do {save_flags(flags); cli();} while(0)
+#define spin_unlock_irqrestore(lock, flags) restore_flags(flags)
+#endif
+
+#if LINUX_VERSION_CODE <= 0x20139
+#define net_device_stats enet_statistics
+#else
+#define NETSTATS_VER2
+#endif
+
+/* These are used by the netdrivers to report values from the
+ MII (Media Indpendent Interface) management registers.
+*/
+#ifndef SIOCGMIIPHY
+#define SIOCGMIIPHY (SIOCDEVPRIVATE) /* Get the PHY in use. */
+#define SIOCGMIIREG (SIOCDEVPRIVATE+1) /* Read a PHY register. */
+#define SIOCSMIIREG (SIOCDEVPRIVATE+2) /* Write a PHY register. */
+#endif
+#ifndef SIOCGPARAMS
+#define SIOCGPARAMS (SIOCDEVPRIVATE+3) /* Read operational parameters. */
+#define SIOCSPARAMS (SIOCDEVPRIVATE+4) /* Set operational parameters. */
+#endif
+
+#if !defined(HAVE_NETIF_MSG)
+enum {
+ NETIF_MSG_DRV = 0x0001,
+ NETIF_MSG_PROBE = 0x0002,
+ NETIF_MSG_LINK = 0x0004,
+ NETIF_MSG_TIMER = 0x0008,
+ NETIF_MSG_IFDOWN = 0x0010,
+ NETIF_MSG_IFUP = 0x0020,
+ NETIF_MSG_RX_ERR = 0x0040,
+ NETIF_MSG_TX_ERR = 0x0080,
+ NETIF_MSG_TX_QUEUED = 0x0100,
+ NETIF_MSG_INTR = 0x0200,
+ NETIF_MSG_TX_DONE = 0x0400,
+ NETIF_MSG_RX_STATUS = 0x0800,
+ NETIF_MSG_PKTDATA = 0x1000,
+ /* 2000 is reserved. */
+ NETIF_MSG_WOL = 0x4000,
+ NETIF_MSG_MISC = 0x8000,
+ NETIF_MSG_RXFILTER = 0x10000,
+};
+#define NETIF_MSG_MAX 0x10000
+#endif
+
+#if !defined(NETIF_MSG_MAX) || NETIF_MSG_MAX < 0x8000
+#define NETIF_MSG_MISC 0x8000
+#endif
+#if !defined(NETIF_MSG_MAX) || NETIF_MSG_MAX < 0x10000
+#define NETIF_MSG_RXFILTER 0x10000
+#endif
+
+#if LINUX_VERSION_CODE < 0x20155
+#include <linux/bios32.h>
+#define PCI_SUPPORT_VER1
+/* A minimal version of the 2.2.* PCI support that handles configuration
+ space access.
+ Drivers that actually use pci_dev fields must do explicit compatibility.
+ Note that the struct pci_dev * "pointer" is actually a byte mapped integer!
+*/
+#if LINUX_VERSION_CODE < 0x20014
+struct pci_dev { int not_used; };
+#endif
+
+#define pci_find_slot(bus, devfn) (struct pci_dev*)((bus<<8) | devfn | 0xf0000)
+#define bus_number(pci_dev) ((((int)(pci_dev))>>8) & 0xff)
+#define devfn_number(pci_dev) (((int)(pci_dev)) & 0xff)
+#define pci_bus_number(pci_dev) ((((int)(pci_dev))>>8) & 0xff)
+#define pci_devfn(pci_dev) (((int)(pci_dev)) & 0xff)
+
+#ifndef CONFIG_PCI
+extern inline int pci_present(void) { return 0; }
+#else
+#define pci_present pcibios_present
+#endif
+
+#define pci_read_config_byte(pdev, where, valp)\
+ pcibios_read_config_byte(bus_number(pdev), devfn_number(pdev), where, valp)
+#define pci_read_config_word(pdev, where, valp)\
+ pcibios_read_config_word(bus_number(pdev), devfn_number(pdev), where, valp)
+#define pci_read_config_dword(pdev, where, valp)\
+ pcibios_read_config_dword(bus_number(pdev), devfn_number(pdev), where, valp)
+#define pci_write_config_byte(pdev, where, val)\
+ pcibios_write_config_byte(bus_number(pdev), devfn_number(pdev), where, val)
+#define pci_write_config_word(pdev, where, val)\
+ pcibios_write_config_word(bus_number(pdev), devfn_number(pdev), where, val)
+#define pci_write_config_dword(pdev, where, val)\
+ pcibios_write_config_dword(bus_number(pdev), devfn_number(pdev), where, val)
+#else
+#define PCI_SUPPORT_VER2
+#define pci_bus_number(pci_dev) ((pci_dev)->bus->number)
+#define pci_devfn(pci_dev) ((pci_dev)->devfn)
+#endif
+
+/* The arg count changed, but function name did not.
+ We cover that bad choice by defining a new name.
+*/
+#if LINUX_VERSION_CODE < 0x20159
+#define dev_free_skb(skb) dev_kfree_skb(skb, FREE_WRITE)
+#define dev_free_skb_irq(skb) dev_kfree_skb(skb, FREE_WRITE)
+#elif LINUX_VERSION_CODE < 0x20400
+#define dev_free_skb(skb) dev_kfree_skb(skb)
+#define dev_free_skb_irq(skb) dev_kfree_skb(skb)
+#else
+#define dev_free_skb(skb) dev_kfree_skb(skb)
+#define dev_free_skb_irq(skb) dev_kfree_skb_irq(skb)
+#endif
+
+/* Added at the suggestion of Jes Sorensen. */
+#if LINUX_VERSION_CODE > 0x20153
+#include <linux/init.h>
+#else
+#define __init
+#define __initdata
+#define __initfunc(__arginit) __arginit
+#endif
+
+/* The old 'struct device' used a too-generic name. */
+#if LINUX_VERSION_CODE < 0x2030d
+#define net_device device
+#endif
+
+/* More changes for the 2.4 kernel, some in the zillion 2.3.99 releases. */
+#if LINUX_VERSION_CODE < 0x20363
+#define DECLARE_MUTEX(name) struct semaphore (name) = MUTEX;
+#define down_write(semaphore_p) down(semaphore_p)
+#define down_read(semaphore_p) down(semaphore_p)
+#define up_write(semaphore_p) up(semaphore_p)
+#define up_read(semaphore_p) up(semaphore_p)
+/* Note that the kernel version has a broken time_before()! */
+#define time_after(a,b) ((long)(b) - (long)(a) < 0)
+#define time_before(a,b) ((long)(a) - (long)(b) < 0)
+#else
+#define get_free_page get_zeroed_page
+#endif
+
+/* The 2.2 kernels added the start of capability-based security for operations
+ that formerally could only be done by root.
+*/
+#if ! defined(CAP_NET_ADMIN)
+#define capable(CAP_XXX) (suser())
+#endif
+
+#if ! defined(HAVE_NETIF_QUEUE)
+#define netif_wake_queue(dev) do { clear_bit( 0, (void*)&(dev)->tbusy); mark_bh(NET_BH); } while (0)
+#define netif_start_tx_queue(dev) do { (dev)->tbusy = 0; dev->start = 1; } while (0)
+#define netif_stop_tx_queue(dev) do { (dev)->tbusy = 1; dev->start = 0; } while (0)
+#define netif_queue_paused(dev) ((dev)->tbusy != 0)
+/* Splitting these lines exposes a bug in some preprocessors. */
+#define netif_pause_tx_queue(dev) (test_and_set_bit( 0, (void*)&(dev)->tbusy))
+#define netif_unpause_tx_queue(dev) do { clear_bit( 0, (void*)&(dev)->tbusy); } while (0)
+#define netif_resume_tx_queue(dev) do { clear_bit( 0, (void*)&(dev)->tbusy); mark_bh(NET_BH); } while (0)
+
+#define netif_running(dev) ((dev)->start != 0)
+#define netif_device_attach(dev) do {; } while (0)
+#define netif_device_detach(dev) do {; } while (0)
+#define netif_device_present(dev) (1)
+#define netif_set_tx_timeout(dev, func, deltajiffs) do {; } while (0)
+#define netif_link_down(dev) (dev)->flags &= ~IFF_RUNNING
+#define netif_link_up(dev) (dev)->flags |= IFF_RUNNING
+
+#else
+
+#define netif_start_tx_queue(dev) netif_start_queue(dev)
+#define netif_stop_tx_queue(dev) netif_stop_queue(dev)
+#define netif_queue_paused(dev) netif_queue_stopped(dev)
+#define netif_resume_tx_queue(dev) netif_wake_queue(dev)
+/* Only used in transmit path. No function in 2.4. */
+#define netif_pause_tx_queue(dev) 0
+#define netif_unpause_tx_queue(dev) do {; } while (0)
+
+#ifdef __LINK_STATE_NOCARRIER
+#define netif_link_down(dev) netif_carrier_off(dev)
+#define netif_link_up(dev) netif_carrier_on(dev)
+#else
+#define netif_link_down(dev) (dev)->flags &= ~IFF_RUNNING
+#define netif_link_up(dev) (dev)->flags |= IFF_RUNNING
+#endif
+
+#endif
+#ifndef PCI_DMA_BUS_IS_PHYS
+#define pci_dma_sync_single(pci_dev, base_addr, extent, tofrom) do {; } while (0)
+#define pci_map_single(pci_dev, base_addr, extent, dir) virt_to_bus(base_addr)
+#define pci_unmap_single(pci_dev, base_addr, extent, dir) do {; } while (0)
+#endif
+
+#endif
+/*
+ * Local variables:
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/lance.c b/linux/src/drivers/net/lance.c
new file mode 100644
index 0000000..fe3cf68
--- /dev/null
+++ b/linux/src/drivers/net/lance.c
@@ -0,0 +1,1293 @@
+/* lance.c: An AMD LANCE/PCnet ethernet driver for Linux. */
+/*
+ Written/copyright 1993-1998 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ This driver is for the Allied Telesis AT1500 and HP J2405A, and should work
+ with most other LANCE-based bus-master (NE2100/NE2500) ethercards.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ Fixing alignment problem with 1.3.* kernel and some minor changes
+ by Andrey V. Savochkin, 1996.
+
+ Problems or questions may be send to Donald Becker (see above) or to
+ Andrey Savochkin -- saw@shade.msu.ru or
+ Laboratory of Computation Methods,
+ Department of Mathematics and Mechanics,
+ Moscow State University,
+ Leninskye Gory, Moscow 119899
+
+ But I should to inform you that I'm not an expert in the LANCE card
+ and it may occurs that you will receive no answer on your mail
+ to Donald Becker. I didn't receive any answer on all my letters
+ to him. Who knows why... But may be you are more lucky? ;->
+ SAW
+
+ Thomas Bogendoerfer (tsbogend@bigbug.franken.de):
+ - added support for Linux/Alpha, but removed most of it, because
+ it worked only for the PCI chip.
+ - added hook for the 32bit lance driver
+ - added PCnetPCI II (79C970A) to chip table
+ Paul Gortmaker (gpg109@rsphy1.anu.edu.au):
+ - hopefully fix above so Linux/Alpha can use ISA cards too.
+ 8/20/96 Fixed 7990 autoIRQ failure and reversed unneeded alignment -djb
+ v1.12 10/27/97 Module support -djb
+ v1.14 2/3/98 Module support modified, made PCI support optional -djb
+*/
+
+static const char *version = "lance.c:v1.14 2/3/1998 dplatt@3do.com, becker@cesdis.gsfc.nasa.gov\n";
+
+#ifdef MODULE
+#ifdef MODVERSIONS
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+#include <linux/version.h>
+#else
+#define MOD_INC_USE_COUNT
+#define MOD_DEC_USE_COUNT
+#endif
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/bios32.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+static unsigned int lance_portlist[] = { 0x300, 0x320, 0x340, 0x360, 0};
+int lance_probe(struct device *dev);
+int lance_probe1(struct device *dev, int ioaddr, int irq, int options);
+
+#ifdef HAVE_DEVLIST
+struct netdev_entry lance_drv =
+{"lance", lance_probe1, LANCE_TOTAL_SIZE, lance_portlist};
+#endif
+
+#ifdef LANCE_DEBUG
+int lance_debug = LANCE_DEBUG;
+#else
+int lance_debug = 1;
+#endif
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the AMD 79C960, the "PCnet-ISA
+single-chip ethernet controller for ISA". This chip is used in a wide
+variety of boards from vendors such as Allied Telesis, HP, Kingston,
+and Boca. This driver is also intended to work with older AMD 7990
+designs, such as the NE1500 and NE2100, and newer 79C961. For convenience,
+I use the name LANCE to refer to all of the AMD chips, even though it properly
+refers only to the original 7990.
+
+II. Board-specific settings
+
+The driver is designed to work the boards that use the faster
+bus-master mode, rather than in shared memory mode. (Only older designs
+have on-board buffer memory needed to support the slower shared memory mode.)
+
+Most ISA boards have jumpered settings for the I/O base, IRQ line, and DMA
+channel. This driver probes the likely base addresses:
+{0x300, 0x320, 0x340, 0x360}.
+After the board is found it generates a DMA-timeout interrupt and uses
+autoIRQ to find the IRQ line. The DMA channel can be set with the low bits
+of the otherwise-unused dev->mem_start value (aka PARAM1). If unset it is
+probed for by enabling each free DMA channel in turn and checking if
+initialization succeeds.
+
+The HP-J2405A board is an exception: with this board it is easy to read the
+EEPROM-set values for the base, IRQ, and DMA. (Of course you must already
+_know_ the base address -- that field is for writing the EEPROM.)
+
+III. Driver operation
+
+IIIa. Ring buffers
+The LANCE uses ring buffers of Tx and Rx descriptors. Each entry describes
+the base and length of the data buffer, along with status bits. The length
+of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of
+the buffer length (rather than being directly the buffer length) for
+implementation ease. The current values are 2 (Tx) and 4 (Rx), which leads to
+ring sizes of 4 (Tx) and 16 (Rx). Increasing the number of ring entries
+needlessly uses extra space and reduces the chance that an upper layer will
+be able to reorder queued Tx packets based on priority. Decreasing the number
+of entries makes it more difficult to achieve back-to-back packet transmission
+and increases the chance that Rx ring will overflow. (Consider the worst case
+of receiving back-to-back minimum-sized packets.)
+
+The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver
+statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
+avoid the administrative overhead. For the Rx side this avoids dynamically
+allocating full-sized buffers "just in case", at the expense of a
+memory-to-memory data copy for each packet received. For most systems this
+is a good tradeoff: the Rx buffer will always be in low memory, the copy
+is inexpensive, and it primes the cache for later packet processing. For Tx
+the buffers are only used when needed as low-memory bounce buffers.
+
+IIIB. 16M memory limitations.
+For the ISA bus master mode all structures used directly by the LANCE,
+the initialization block, Rx and Tx rings, and data buffers, must be
+accessible from the ISA bus, i.e. in the lower 16M of real memory.
+This is a problem for current Linux kernels on >16M machines. The network
+devices are initialized after memory initialization, and the kernel doles out
+memory from the top of memory downward. The current solution is to have a
+special network initialization routine that's called before memory
+initialization; this will eventually be generalized for all network devices.
+As mentioned before, low-memory "bounce-buffers" are used when needed.
+
+IIIC. Synchronization
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and other software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'lp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
+we can't avoid the interrupt overhead by having the Tx routine reap the Tx
+stats.) After reaping the stats, it marks the queue entry as empty by setting
+the 'base' to zero. Iff the 'lp->tx_full' flag is set, it clears both the
+tx_full and tbusy flags.
+
+*/
+
+/* Set the number of Tx and Rx buffers, using Log_2(# buffers).
+ Reasonable default values are 16 Tx buffers, and 16 Rx buffers.
+ That translates to 4 and 4 (16 == 2^^4).
+ This is a compile-time option for efficiency.
+ */
+#ifndef LANCE_LOG_TX_BUFFERS
+#define LANCE_LOG_TX_BUFFERS 4
+#define LANCE_LOG_RX_BUFFERS 4
+#endif
+
+#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
+
+#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
+
+#define PKT_BUF_SZ 1544
+
+/* Offsets from base I/O address. */
+#define LANCE_DATA 0x10
+#define LANCE_ADDR 0x12
+#define LANCE_RESET 0x14
+#define LANCE_BUS_IF 0x16
+#define LANCE_TOTAL_SIZE 0x18
+
+/* The LANCE Rx and Tx ring descriptors. */
+struct lance_rx_head {
+ s32 base;
+ s16 buf_length; /* This length is 2s complement (negative)! */
+ s16 msg_length; /* This length is "normal". */
+};
+
+struct lance_tx_head {
+ s32 base;
+ s16 length; /* Length is 2s complement (negative)! */
+ s16 misc;
+};
+
+/* The LANCE initialization block, described in databook. */
+struct lance_init_block {
+ u16 mode; /* Pre-set mode (reg. 15) */
+ u8 phys_addr[6]; /* Physical ethernet address */
+ u32 filter[2]; /* Multicast filter (unused). */
+ /* Receive and transmit ring base, along with extra bits. */
+ u32 rx_ring; /* Tx and Rx ring base pointers */
+ u32 tx_ring;
+};
+
+struct lance_private {
+ /* The Tx and Rx ring entries must be aligned on 8-byte boundaries. */
+ struct lance_rx_head rx_ring[RX_RING_SIZE];
+ struct lance_tx_head tx_ring[TX_RING_SIZE];
+ struct lance_init_block init_block;
+ const char *name;
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ unsigned long rx_buffs; /* Address of Rx and Tx buffers. */
+ /* Tx low-memory "bounce buffer" address. */
+ char (*tx_bounce_buffs)[PKT_BUF_SZ];
+ int cur_rx, cur_tx; /* The next free ring entry */
+ int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
+ int dma;
+ struct enet_statistics stats;
+ unsigned char chip_version; /* See lance_chip_type. */
+ char tx_full;
+ unsigned long lock;
+};
+
+#define LANCE_MUST_PAD 0x00000001
+#define LANCE_ENABLE_AUTOSELECT 0x00000002
+#define LANCE_MUST_REINIT_RING 0x00000004
+#define LANCE_MUST_UNRESET 0x00000008
+#define LANCE_HAS_MISSED_FRAME 0x00000010
+
+/* A mapping from the chip ID number to the part number and features.
+ These are from the datasheets -- in real life the '970 version
+ reportedly has the same ID as the '965. */
+static struct lance_chip_type {
+ int id_number;
+ const char *name;
+ int flags;
+} chip_table[] = {
+ {0x0000, "LANCE 7990", /* Ancient lance chip. */
+ LANCE_MUST_PAD + LANCE_MUST_UNRESET},
+ {0x0003, "PCnet/ISA 79C960", /* 79C960 PCnet/ISA. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ {0x2260, "PCnet/ISA+ 79C961", /* 79C961 PCnet/ISA+, Plug-n-Play. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ {0x2420, "PCnet/PCI 79C970", /* 79C970 or 79C974 PCnet-SCSI, PCI. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ /* Bug: the PCnet/PCI actually uses the PCnet/VLB ID number, so just call
+ it the PCnet32. */
+ {0x2430, "PCnet32", /* 79C965 PCnet for VL bus. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ {0x2621, "PCnet/PCI-II 79C970A", /* 79C970A PCInetPCI II. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ {0x0, "PCnet (unknown)",
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+};
+
+enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, PCNET_PCI_II=5, LANCE_UNKNOWN=6};
+
+/* Non-zero only if the current card is a PCI with BIOS-set IRQ. */
+static unsigned char pci_irq_line = 0;
+
+/* Non-zero if lance_probe1() needs to allocate low-memory bounce buffers.
+ Assume yes until we know the memory size. */
+static unsigned char lance_need_isa_bounce_buffers = 1;
+
+static int lance_open(struct device *dev);
+static int lance_open_fail(struct device *dev);
+static void lance_init_ring(struct device *dev, int mode);
+static int lance_start_xmit(struct sk_buff *skb, struct device *dev);
+static int lance_rx(struct device *dev);
+static void lance_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static int lance_close(struct device *dev);
+static struct enet_statistics *lance_get_stats(struct device *dev);
+static void set_multicast_list(struct device *dev);
+
+
+
+#ifdef MODULE
+#define MAX_CARDS 8 /* Max number of interfaces (cards) per module */
+#define IF_NAMELEN 8 /* # of chars for storing dev->name */
+
+static int io[MAX_CARDS] = { 0, };
+static int dma[MAX_CARDS] = { 0, };
+static int irq[MAX_CARDS] = { 0, };
+
+static char ifnames[MAX_CARDS][IF_NAMELEN] = { {0, }, };
+static struct device dev_lance[MAX_CARDS] =
+{{
+ 0, /* device name is inserted by linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, NULL}};
+
+int init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
+ struct device *dev = &dev_lance[this_dev];
+ dev->name = ifnames[this_dev];
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->dma = dma[this_dev];
+ dev->init = lance_probe;
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only complain once */
+ printk(KERN_NOTICE "lance.c: Module autoprobing not allowed. Append \"io=0xNNN\" value(s).\n");
+ return -EPERM;
+ }
+ if (register_netdev(dev) != 0) {
+ printk(KERN_WARNING "lance.c: No PCnet/LANCE card found (i/o = 0x%x).\n", io[this_dev]);
+ if (found != 0) return 0; /* Got at least one. */
+ return -ENXIO;
+ }
+ found++;
+ }
+
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
+ struct device *dev = &dev_lance[this_dev];
+ if (dev->priv != NULL) {
+ kfree(dev->priv);
+ dev->priv = NULL;
+ free_dma(dev->dma);
+ release_region(dev->base_addr, LANCE_TOTAL_SIZE);
+ unregister_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
+/* Starting in v2.1.*, the LANCE/PCnet probe is now similar to the other
+ board probes now that kmalloc() can allocate ISA DMA-able regions.
+ This also allows the LANCE driver to be used as a module.
+ */
+int lance_probe(struct device *dev)
+{
+ int *port, result;
+
+ if (high_memory <= 16*1024*1024)
+ lance_need_isa_bounce_buffers = 0;
+
+#if defined(CONFIG_PCI)
+ if (pcibios_present()) {
+ int pci_index;
+ if (lance_debug > 1)
+ printk("lance.c: PCI bios is present, checking for devices...\n");
+ for (pci_index = 0; pci_index < 8; pci_index++) {
+ unsigned char pci_bus, pci_device_fn;
+ unsigned int pci_ioaddr;
+ unsigned short pci_command;
+
+ if (pcibios_find_device (PCI_VENDOR_ID_AMD,
+ PCI_DEVICE_ID_AMD_LANCE, pci_index,
+ &pci_bus, &pci_device_fn) != 0)
+ break;
+ pcibios_read_config_byte(pci_bus, pci_device_fn,
+ PCI_INTERRUPT_LINE, &pci_irq_line);
+ pcibios_read_config_dword(pci_bus, pci_device_fn,
+ PCI_BASE_ADDRESS_0, &pci_ioaddr);
+ /* Remove I/O space marker in bit 0. */
+ pci_ioaddr &= ~3;
+ /* PCI Spec 2.1 states that it is either the driver or PCI card's
+ * responsibility to set the PCI Master Enable Bit if needed.
+ * (From Mark Stockton <marks@schooner.sys.hou.compaq.com>)
+ */
+ pcibios_read_config_word(pci_bus, pci_device_fn,
+ PCI_COMMAND, &pci_command);
+ if ( ! (pci_command & PCI_COMMAND_MASTER)) {
+ printk("PCI Master Bit has not been set. Setting...\n");
+ pci_command |= PCI_COMMAND_MASTER;
+ pcibios_write_config_word(pci_bus, pci_device_fn,
+ PCI_COMMAND, pci_command);
+ }
+ printk("Found PCnet/PCI at %#x, irq %d.\n",
+ pci_ioaddr, pci_irq_line);
+ result = lance_probe1(dev, pci_ioaddr, pci_irq_line, 0);
+ pci_irq_line = 0;
+ if (!result) return 0;
+ }
+ }
+#endif /* defined(CONFIG_PCI) */
+
+ for (port = lance_portlist; *port; port++) {
+ int ioaddr = *port;
+
+ if ( check_region(ioaddr, LANCE_TOTAL_SIZE) == 0) {
+ /* Detect "normal" 0x57 0x57 and the NI6510EB 0x52 0x44
+ signatures w/ minimal I/O reads */
+ char offset15, offset14 = inb(ioaddr + 14);
+
+ if ((offset14 == 0x52 || offset14 == 0x57) &&
+ ((offset15 = inb(ioaddr + 15)) == 0x57 || offset15 == 0x44)) {
+ result = lance_probe1(dev, ioaddr, 0, 0);
+ if ( !result ) return 0;
+ }
+ }
+ }
+ return -ENODEV;
+}
+
+int lance_probe1(struct device *dev, int ioaddr, int irq, int options)
+{
+ struct lance_private *lp;
+ short dma_channels; /* Mark spuriously-busy DMA channels */
+ int i, reset_val, lance_version;
+ const char *chipname;
+ /* Flags for specific chips or boards. */
+ unsigned char hpJ2405A = 0; /* HP ISA adaptor */
+ int hp_builtin = 0; /* HP on-board ethernet. */
+ static int did_version = 0; /* Already printed version info. */
+
+ /* First we look for special cases.
+ Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
+ There are two HP versions, check the BIOS for the configuration port.
+ This method provided by L. Julliard, Laurent_Julliard@grenoble.hp.com.
+ */
+ if (readw(0x000f0102) == 0x5048) {
+ static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
+ int hp_port = (readl(0x000f00f1) & 1) ? 0x499 : 0x99;
+ /* We can have boards other than the built-in! Verify this is on-board. */
+ if ((inb(hp_port) & 0xc0) == 0x80
+ && ioaddr_table[inb(hp_port) & 3] == ioaddr)
+ hp_builtin = hp_port;
+ }
+ /* We also recognize the HP Vectra on-board here, but check below. */
+ hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00
+ && inb(ioaddr+2) == 0x09);
+
+ /* Reset the LANCE. */
+ reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */
+
+ /* The Un-Reset needed is only needed for the real NE2100, and will
+ confuse the HP board. */
+ if (!hpJ2405A)
+ outw(reset_val, ioaddr+LANCE_RESET);
+
+ outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */
+ if (inw(ioaddr+LANCE_DATA) != 0x0004)
+ return -ENODEV;
+
+ /* Get the version of the chip. */
+ outw(88, ioaddr+LANCE_ADDR);
+ if (inw(ioaddr+LANCE_ADDR) != 88) {
+ lance_version = 0;
+ } else { /* Good, it's a newer chip. */
+ int chip_version = inw(ioaddr+LANCE_DATA);
+ outw(89, ioaddr+LANCE_ADDR);
+ chip_version |= inw(ioaddr+LANCE_DATA) << 16;
+ if (lance_debug > 2)
+ printk(" LANCE chip version is %#x.\n", chip_version);
+ if ((chip_version & 0xfff) != 0x003)
+ return -ENODEV;
+ chip_version = (chip_version >> 12) & 0xffff;
+ for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
+ if (chip_table[lance_version].id_number == chip_version)
+ break;
+ }
+ }
+
+ /* We can't use init_etherdev() to allocate dev->priv because it must
+ a ISA DMA-able region. */
+ dev = init_etherdev(dev, 0);
+ dev->open = lance_open_fail;
+ chipname = chip_table[lance_version].name;
+ printk("%s: %s at %#3x,", dev->name, chipname, ioaddr);
+
+ /* There is a 16 byte station address PROM at the base address.
+ The first six bytes are the station address. */
+ for (i = 0; i < 6; i++)
+ printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i));
+
+ dev->base_addr = ioaddr;
+ request_region(ioaddr, LANCE_TOTAL_SIZE, chip_table[lance_version].name);
+
+ /* Make certain the data structures used by the LANCE are aligned and DMAble. */
+
+ lp = (struct lance_private *)(((unsigned long)kmalloc(sizeof(*lp)+7,
+ GFP_DMA | GFP_KERNEL)+7) & ~7);
+ if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
+ memset(lp, 0, sizeof(*lp));
+ dev->priv = lp;
+ lp->name = chipname;
+ lp->rx_buffs = (unsigned long)kmalloc(PKT_BUF_SZ*RX_RING_SIZE,
+ GFP_DMA | GFP_KERNEL);
+ if (lance_need_isa_bounce_buffers)
+ lp->tx_bounce_buffs = kmalloc(PKT_BUF_SZ*TX_RING_SIZE,
+ GFP_DMA | GFP_KERNEL);
+ else
+ lp->tx_bounce_buffs = NULL;
+
+ lp->chip_version = lance_version;
+
+ lp->init_block.mode = 0x0003; /* Disable Rx and Tx. */
+ for (i = 0; i < 6; i++)
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ lp->init_block.filter[0] = 0x00000000;
+ lp->init_block.filter[1] = 0x00000000;
+ lp->init_block.rx_ring = ((u32)virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
+ lp->init_block.tx_ring = ((u32)virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
+
+ outw(0x0001, ioaddr+LANCE_ADDR);
+ inw(ioaddr+LANCE_ADDR);
+ outw((short) (u32) virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
+ outw(0x0002, ioaddr+LANCE_ADDR);
+ inw(ioaddr+LANCE_ADDR);
+ outw(((u32)virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
+ outw(0x0000, ioaddr+LANCE_ADDR);
+ inw(ioaddr+LANCE_ADDR);
+
+ if (irq) { /* Set iff PCI card. */
+ dev->dma = 4; /* Native bus-master, no DMA channel needed. */
+ dev->irq = irq;
+ } else if (hp_builtin) {
+ static const char dma_tbl[4] = {3, 5, 6, 0};
+ static const char irq_tbl[4] = {3, 4, 5, 9};
+ unsigned char port_val = inb(hp_builtin);
+ dev->dma = dma_tbl[(port_val >> 4) & 3];
+ dev->irq = irq_tbl[(port_val >> 2) & 3];
+ printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
+ } else if (hpJ2405A) {
+ static const char dma_tbl[4] = {3, 5, 6, 7};
+ static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
+ short reset_val = inw(ioaddr+LANCE_RESET);
+ dev->dma = dma_tbl[(reset_val >> 2) & 3];
+ dev->irq = irq_tbl[(reset_val >> 4) & 7];
+ printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
+ } else if (lance_version == PCNET_ISAP) { /* The plug-n-play version. */
+ short bus_info;
+ outw(8, ioaddr+LANCE_ADDR);
+ bus_info = inw(ioaddr+LANCE_BUS_IF);
+ dev->dma = bus_info & 0x07;
+ dev->irq = (bus_info >> 4) & 0x0F;
+ } else {
+ /* The DMA channel may be passed in PARAM1. */
+ if (dev->mem_start & 0x07)
+ dev->dma = dev->mem_start & 0x07;
+ }
+
+ if (dev->dma == 0) {
+ /* Read the DMA channel status register, so that we can avoid
+ stuck DMA channels in the DMA detection below. */
+ dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
+ (inb(DMA2_STAT_REG) & 0xf0);
+ }
+ if (dev->irq >= 2)
+ printk(" assigned IRQ %d", dev->irq);
+ else if (lance_version != 0) { /* 7990 boards need DMA detection first. */
+ /* To auto-IRQ we enable the initialization-done and DMA error
+ interrupts. For ISA boards we get a DMA error, but VLB and PCI
+ boards will work. */
+ autoirq_setup(0);
+
+ /* Trigger an initialization just for the interrupt. */
+ outw(0x0041, ioaddr+LANCE_DATA);
+
+ dev->irq = autoirq_report(2);
+ if (dev->irq)
+ printk(", probed IRQ %d", dev->irq);
+ else {
+ printk(", failed to detect IRQ line.\n");
+ return -ENODEV;
+ }
+
+ /* Check for the initialization done bit, 0x0100, which means
+ that we don't need a DMA channel. */
+ if (inw(ioaddr+LANCE_DATA) & 0x0100)
+ dev->dma = 4;
+ }
+
+ if (dev->dma == 4) {
+ printk(", no DMA needed.\n");
+ } else if (dev->dma) {
+ if (request_dma(dev->dma, chipname)) {
+ printk("DMA %d allocation failed.\n", dev->dma);
+ return -ENODEV;
+ } else
+ printk(", assigned DMA %d.\n", dev->dma);
+ } else { /* OK, we have to auto-DMA. */
+ for (i = 0; i < 4; i++) {
+ static const char dmas[] = { 5, 6, 7, 3 };
+ int dma = dmas[i];
+ int boguscnt;
+
+ /* Don't enable a permanently busy DMA channel, or the machine
+ will hang. */
+ if (test_bit(dma, &dma_channels))
+ continue;
+ outw(0x7f04, ioaddr+LANCE_DATA); /* Clear the memory error bits. */
+ if (request_dma(dma, chipname))
+ continue;
+ set_dma_mode(dma, DMA_MODE_CASCADE);
+ enable_dma(dma);
+
+ /* Trigger an initialization. */
+ outw(0x0001, ioaddr+LANCE_DATA);
+ for (boguscnt = 100; boguscnt > 0; --boguscnt)
+ if (inw(ioaddr+LANCE_DATA) & 0x0900)
+ break;
+ if (inw(ioaddr+LANCE_DATA) & 0x0100) {
+ dev->dma = dma;
+ printk(", DMA %d.\n", dev->dma);
+ break;
+ } else {
+ disable_dma(dma);
+ free_dma(dma);
+ }
+ }
+ if (i == 4) { /* Failure: bail. */
+ printk("DMA detection failed.\n");
+ return -ENODEV;
+ }
+ }
+
+ if (lance_version == 0 && dev->irq == 0) {
+ /* We may auto-IRQ now that we have a DMA channel. */
+ /* Trigger an initialization just for the interrupt. */
+ autoirq_setup(0);
+ outw(0x0041, ioaddr+LANCE_DATA);
+
+ dev->irq = autoirq_report(4);
+ if (dev->irq == 0) {
+ printk(" Failed to detect the 7990 IRQ line.\n");
+ return -ENODEV;
+ }
+ printk(" Auto-IRQ detected IRQ%d.\n", dev->irq);
+ }
+
+ if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
+ /* Turn on auto-select of media (10baseT or BNC) so that the user
+ can watch the LEDs even if the board isn't opened. */
+ outw(0x0002, ioaddr+LANCE_ADDR);
+ /* Don't touch 10base2 power bit. */
+ outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
+ }
+
+ if (lance_debug > 0 && did_version++ == 0)
+ printk("%s", version);
+
+ /* The LANCE-specific entries in the device structure. */
+ dev->open = lance_open;
+ dev->hard_start_xmit = lance_start_xmit;
+ dev->stop = lance_close;
+ dev->get_stats = lance_get_stats;
+ dev->set_multicast_list = set_multicast_list;
+
+ return 0;
+}
+
+static int
+lance_open_fail(struct device *dev)
+{
+ return -ENODEV;
+}
+
+
+
+static int
+lance_open(struct device *dev)
+{
+ struct lance_private *lp = (struct lance_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int i;
+
+ if (dev->irq == 0 ||
+ request_irq(dev->irq, &lance_interrupt, 0, lp->name, dev)) {
+ return -EAGAIN;
+ }
+
+ MOD_INC_USE_COUNT;
+
+ /* We used to allocate DMA here, but that was silly.
+ DMA lines can't be shared! We now permanently allocate them. */
+
+ /* Reset the LANCE */
+ inw(ioaddr+LANCE_RESET);
+
+ /* The DMA controller is used as a no-operation slave, "cascade mode". */
+ if (dev->dma != 4) {
+ enable_dma(dev->dma);
+ set_dma_mode(dev->dma, DMA_MODE_CASCADE);
+ }
+
+ /* Un-Reset the LANCE, needed only for the NE2100. */
+ if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET)
+ outw(0, ioaddr+LANCE_RESET);
+
+ if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
+ /* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */
+ outw(0x0002, ioaddr+LANCE_ADDR);
+ /* Only touch autoselect bit. */
+ outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
+ }
+
+ if (lance_debug > 1)
+ printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
+ dev->name, dev->irq, dev->dma,
+ (u32) virt_to_bus(lp->tx_ring),
+ (u32) virt_to_bus(lp->rx_ring),
+ (u32) virt_to_bus(&lp->init_block));
+
+ lance_init_ring(dev, GFP_KERNEL);
+ /* Re-initialize the LANCE, and start it when done. */
+ outw(0x0001, ioaddr+LANCE_ADDR);
+ outw((short) (u32) virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
+ outw(0x0002, ioaddr+LANCE_ADDR);
+ outw(((u32)virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
+
+ outw(0x0004, ioaddr+LANCE_ADDR);
+ outw(0x0915, ioaddr+LANCE_DATA);
+
+ outw(0x0000, ioaddr+LANCE_ADDR);
+ outw(0x0001, ioaddr+LANCE_DATA);
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+ i = 0;
+ while (i++ < 100)
+ if (inw(ioaddr+LANCE_DATA) & 0x0100)
+ break;
+ /*
+ * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
+ * reports that doing so triggers a bug in the '974.
+ */
+ outw(0x0042, ioaddr+LANCE_DATA);
+
+ if (lance_debug > 2)
+ printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
+ dev->name, i, (u32) virt_to_bus(&lp->init_block), inw(ioaddr+LANCE_DATA));
+
+ return 0; /* Always succeed */
+}
+
+/* The LANCE has been halted for one reason or another (busmaster memory
+ arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
+ etc.). Modern LANCE variants always reload their ring-buffer
+ configuration when restarted, so we must reinitialize our ring
+ context before restarting. As part of this reinitialization,
+ find all packets still on the Tx ring and pretend that they had been
+ sent (in effect, drop the packets on the floor) - the higher-level
+ protocols will time out and retransmit. It'd be better to shuffle
+ these skbs to a temp list and then actually re-Tx them after
+ restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
+*/
+
+static void
+lance_purge_tx_ring(struct device *dev)
+{
+ struct lance_private *lp = (struct lance_private *)dev->priv;
+ int i;
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (lp->tx_skbuff[i]) {
+ dev_kfree_skb(lp->tx_skbuff[i],FREE_WRITE);
+ lp->tx_skbuff[i] = NULL;
+ }
+ }
+}
+
+
+/* Initialize the LANCE Rx and Tx rings. */
+static void
+lance_init_ring(struct device *dev, int gfp)
+{
+ struct lance_private *lp = (struct lance_private *)dev->priv;
+ int i;
+
+ lp->lock = 0, lp->tx_full = 0;
+ lp->cur_rx = lp->cur_tx = 0;
+ lp->dirty_rx = lp->dirty_tx = 0;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb;
+ void *rx_buff;
+
+ skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp);
+ lp->rx_skbuff[i] = skb;
+ if (skb) {
+ skb->dev = dev;
+ rx_buff = skb->tail;
+ } else
+ rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
+ if (rx_buff == NULL)
+ lp->rx_ring[i].base = 0;
+ else
+ lp->rx_ring[i].base = (u32)virt_to_bus(rx_buff) | 0x80000000;
+ lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
+ }
+ /* The Tx buffer address is filled in as needed, but we do need to clear
+ the upper ownership bit. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ lp->tx_skbuff[i] = 0;
+ lp->tx_ring[i].base = 0;
+ }
+
+ lp->init_block.mode = 0x0000;
+ for (i = 0; i < 6; i++)
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ lp->init_block.filter[0] = 0x00000000;
+ lp->init_block.filter[1] = 0x00000000;
+ lp->init_block.rx_ring = ((u32)virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
+ lp->init_block.tx_ring = ((u32)virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
+}
+
+static void
+lance_restart(struct device *dev, unsigned int csr0_bits, int must_reinit)
+{
+ struct lance_private *lp = (struct lance_private *)dev->priv;
+
+ if (must_reinit ||
+ (chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) {
+ lance_purge_tx_ring(dev);
+ lance_init_ring(dev, GFP_ATOMIC);
+ }
+ outw(0x0000, dev->base_addr + LANCE_ADDR);
+ outw(csr0_bits, dev->base_addr + LANCE_DATA);
+}
+
+static int
+lance_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ struct lance_private *lp = (struct lance_private *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int entry;
+ unsigned long flags;
+
+ /* Transmitter timeout, serious problems. */
+ if (dev->tbusy) {
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 20)
+ return 1;
+ outw(0, ioaddr+LANCE_ADDR);
+ printk("%s: transmit timed out, status %4.4x, resetting.\n",
+ dev->name, inw(ioaddr+LANCE_DATA));
+ outw(0x0004, ioaddr+LANCE_DATA);
+ lp->stats.tx_errors++;
+#ifndef final_version
+ {
+ int i;
+ printk(" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
+ lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
+ lp->cur_rx);
+ for (i = 0 ; i < RX_RING_SIZE; i++)
+ printk("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
+ lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
+ lp->rx_ring[i].msg_length);
+ for (i = 0 ; i < TX_RING_SIZE; i++)
+ printk("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
+ lp->tx_ring[i].base, -lp->tx_ring[i].length,
+ lp->tx_ring[i].misc);
+ printk("\n");
+ }
+#endif
+ lance_restart(dev, 0x0043, 1);
+
+ dev->tbusy=0;
+ dev->trans_start = jiffies;
+
+ return 0;
+ }
+
+ if (lance_debug > 3) {
+ outw(0x0000, ioaddr+LANCE_ADDR);
+ printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
+ inw(ioaddr+LANCE_DATA));
+ outw(0x0000, ioaddr+LANCE_DATA);
+ }
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+ if (set_bit(0, (void*)&dev->tbusy) != 0) {
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ return 1;
+ }
+
+ if (set_bit(0, (void*)&lp->lock) != 0) {
+ if (lance_debug > 0)
+ printk("%s: tx queue lock!.\n", dev->name);
+ /* don't clear dev->tbusy flag. */
+ return 1;
+ }
+
+ /* Fill in a Tx ring entry */
+
+ /* Mask to ring buffer boundary. */
+ entry = lp->cur_tx & TX_RING_MOD_MASK;
+
+ /* Caution: the write order is important here, set the base address
+ with the "ownership" bits last. */
+
+ /* The old LANCE chips doesn't automatically pad buffers to min. size. */
+ if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
+ lp->tx_ring[entry].length =
+ -(ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN);
+ } else
+ lp->tx_ring[entry].length = -skb->len;
+
+ lp->tx_ring[entry].misc = 0x0000;
+
+ /* If any part of this buffer is >16M we must copy it to a low-memory
+ buffer. */
+ if ((u32)virt_to_bus(skb->data) + skb->len > 0x01000000) {
+ if (lance_debug > 5)
+ printk("%s: bouncing a high-memory packet (%#x).\n",
+ dev->name, (u32)virt_to_bus(skb->data));
+ memcpy(&lp->tx_bounce_buffs[entry], skb->data, skb->len);
+ lp->tx_ring[entry].base =
+ ((u32)virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
+ dev_kfree_skb (skb, FREE_WRITE);
+ } else {
+ lp->tx_skbuff[entry] = skb;
+ lp->tx_ring[entry].base = ((u32)virt_to_bus(skb->data) & 0xffffff) | 0x83000000;
+ }
+ lp->cur_tx++;
+
+ /* Trigger an immediate send poll. */
+ outw(0x0000, ioaddr+LANCE_ADDR);
+ outw(0x0048, ioaddr+LANCE_DATA);
+
+ dev->trans_start = jiffies;
+
+ save_flags(flags);
+ cli();
+ lp->lock = 0;
+ if (lp->tx_ring[(entry+1) & TX_RING_MOD_MASK].base == 0)
+ dev->tbusy=0;
+ else
+ lp->tx_full = 1;
+ restore_flags(flags);
+
+ return 0;
+}
+
+/* The LANCE interrupt handler. */
+static void
+lance_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct device *dev = (struct device *)dev_id;
+ struct lance_private *lp;
+ int csr0, ioaddr, boguscnt=10;
+ int must_restart;
+
+ if (dev == NULL) {
+ printk ("lance_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ ioaddr = dev->base_addr;
+ lp = (struct lance_private *)dev->priv;
+ if (dev->interrupt)
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+
+ dev->interrupt = 1;
+
+ outw(0x00, dev->base_addr + LANCE_ADDR);
+ while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600
+ && --boguscnt >= 0) {
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
+
+ must_restart = 0;
+
+ if (lance_debug > 5)
+ printk("%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
+ dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
+
+ if (csr0 & 0x0400) /* Rx interrupt */
+ lance_rx(dev);
+
+ if (csr0 & 0x0200) { /* Tx-done interrupt */
+ int dirty_tx = lp->dirty_tx;
+
+ while (dirty_tx < lp->cur_tx) {
+ int entry = dirty_tx & TX_RING_MOD_MASK;
+ int status = lp->tx_ring[entry].base;
+
+ if (status < 0)
+ break; /* It still hasn't been Txed */
+
+ lp->tx_ring[entry].base = 0;
+
+ if (status & 0x40000000) {
+ /* There was an major error, log it. */
+ int err_status = lp->tx_ring[entry].misc;
+ lp->stats.tx_errors++;
+ if (err_status & 0x0400) lp->stats.tx_aborted_errors++;
+ if (err_status & 0x0800) lp->stats.tx_carrier_errors++;
+ if (err_status & 0x1000) lp->stats.tx_window_errors++;
+ if (err_status & 0x4000) {
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ lp->stats.tx_fifo_errors++;
+ /* Remove this verbosity later! */
+ printk("%s: Tx FIFO error! Status %4.4x.\n",
+ dev->name, csr0);
+ /* Restart the chip. */
+ must_restart = 1;
+ }
+ } else {
+ if (status & 0x18000000)
+ lp->stats.collisions++;
+ lp->stats.tx_packets++;
+ }
+
+ /* We must free the original skb if it's not a data-only copy
+ in the bounce buffer. */
+ if (lp->tx_skbuff[entry]) {
+ dev_kfree_skb(lp->tx_skbuff[entry],FREE_WRITE);
+ lp->tx_skbuff[entry] = 0;
+ }
+ dirty_tx++;
+ }
+
+#ifndef final_version
+ if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
+ printk("out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+ dirty_tx, lp->cur_tx, lp->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ if (lp->tx_full && dev->tbusy
+ && dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
+ /* The ring is no longer full, clear tbusy. */
+ lp->tx_full = 0;
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+
+ lp->dirty_tx = dirty_tx;
+ }
+
+ /* Log misc errors. */
+ if (csr0 & 0x4000) lp->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & 0x1000) lp->stats.rx_errors++; /* Missed a Rx frame. */
+ if (csr0 & 0x0800) {
+ printk("%s: Bus master arbitration failure, status %4.4x.\n",
+ dev->name, csr0);
+ /* Restart the chip. */
+ must_restart = 1;
+ }
+
+ if (must_restart) {
+ /* stop the chip to clear the error condition, then restart */
+ outw(0x0000, dev->base_addr + LANCE_ADDR);
+ outw(0x0004, dev->base_addr + LANCE_DATA);
+ lance_restart(dev, 0x0002, 0);
+ }
+ }
+
+ /* Clear any other interrupt, and set interrupt enable. */
+ outw(0x0000, dev->base_addr + LANCE_ADDR);
+ outw(0x7940, dev->base_addr + LANCE_DATA);
+
+ if (lance_debug > 4)
+ printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
+ dev->name, inw(ioaddr + LANCE_ADDR),
+ inw(dev->base_addr + LANCE_DATA));
+
+ dev->interrupt = 0;
+ return;
+}
+
+static int
+lance_rx(struct device *dev)
+{
+ struct lance_private *lp = (struct lance_private *)dev->priv;
+ int entry = lp->cur_rx & RX_RING_MOD_MASK;
+ int i;
+
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while (lp->rx_ring[entry].base >= 0) {
+ int status = lp->rx_ring[entry].base >> 24;
+
+ if (status != 0x03) { /* There was an error. */
+ /* There is a tricky error noted by John Murphy,
+ <murf@perftech.com> to Russ Nelson: Even with full-sized
+ buffers it's possible for a jabber packet to use two
+ buffers, with only the last correctly noting the error. */
+ if (status & 0x01) /* Only count a general error at the */
+ lp->stats.rx_errors++; /* end of a packet.*/
+ if (status & 0x20) lp->stats.rx_frame_errors++;
+ if (status & 0x10) lp->stats.rx_over_errors++;
+ if (status & 0x08) lp->stats.rx_crc_errors++;
+ if (status & 0x04) lp->stats.rx_fifo_errors++;
+ lp->rx_ring[entry].base &= 0x03ffffff;
+ }
+ else
+ {
+ /* Malloc up new buffer, compatible with net3. */
+ short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4;
+ struct sk_buff *skb;
+
+ if(pkt_len<60)
+ {
+ printk("%s: Runt packet!\n",dev->name);
+ lp->stats.rx_errors++;
+ }
+ else
+ {
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL)
+ {
+ printk("%s: Memory squeeze, deferring packet.\n", dev->name);
+ for (i=0; i < RX_RING_SIZE; i++)
+ if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
+ break;
+
+ if (i > RX_RING_SIZE -2)
+ {
+ lp->stats.rx_dropped++;
+ lp->rx_ring[entry].base |= 0x80000000;
+ lp->cur_rx++;
+ }
+ break;
+ }
+ skb->dev = dev;
+ skb_reserve(skb,2); /* 16 byte align */
+ skb_put(skb,pkt_len); /* Make room */
+ eth_copy_and_sum(skb,
+ (unsigned char *)bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)),
+ pkt_len,0);
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+ }
+ /* The docs say that the buffer length isn't touched, but Andrew Boyd
+ of QNX reports that some revs of the 79C965 clear it. */
+ lp->rx_ring[entry].buf_length = -PKT_BUF_SZ;
+ lp->rx_ring[entry].base |= 0x80000000;
+ entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
+ }
+
+ /* We should check that at least two ring entries are free. If not,
+ we should free one and mark stats->rx_dropped++. */
+
+ return 0;
+}
+
+static int
+lance_close(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct lance_private *lp = (struct lance_private *)dev->priv;
+ int i;
+
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
+ outw(112, ioaddr+LANCE_ADDR);
+ lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
+ }
+ outw(0, ioaddr+LANCE_ADDR);
+
+ if (lance_debug > 1)
+ printk("%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, inw(ioaddr+LANCE_DATA));
+
+ /* We stop the LANCE here -- it occasionally polls
+ memory if we don't. */
+ outw(0x0004, ioaddr+LANCE_DATA);
+
+ if (dev->dma != 4)
+ disable_dma(dev->dma);
+
+ free_irq(dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx and Tx queues. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = lp->rx_skbuff[i];
+ lp->rx_skbuff[i] = 0;
+ lp->rx_ring[i].base = 0; /* Not owned by LANCE chip. */
+ if (skb) {
+ skb->free = 1;
+ dev_kfree_skb(skb, FREE_WRITE);
+ }
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (lp->tx_skbuff[i])
+ dev_kfree_skb(lp->tx_skbuff[i], FREE_WRITE);
+ lp->tx_skbuff[i] = 0;
+ }
+
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+static struct enet_statistics *
+lance_get_stats(struct device *dev)
+{
+ struct lance_private *lp = (struct lance_private *)dev->priv;
+ short ioaddr = dev->base_addr;
+ short saved_addr;
+ unsigned long flags;
+
+ if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
+ save_flags(flags);
+ cli();
+ saved_addr = inw(ioaddr+LANCE_ADDR);
+ outw(112, ioaddr+LANCE_ADDR);
+ lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
+ outw(saved_addr, ioaddr+LANCE_ADDR);
+ restore_flags(flags);
+ }
+
+ return &lp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ */
+
+static void set_multicast_list(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ outw(0, ioaddr+LANCE_ADDR);
+ outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance. */
+
+ if (dev->flags&IFF_PROMISC) {
+ /* Log any net taps. */
+ printk("%s: Promiscuous mode enabled.\n", dev->name);
+ outw(15, ioaddr+LANCE_ADDR);
+ outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
+ } else {
+ short multicast_table[4];
+ int i;
+ int num_addrs=dev->mc_count;
+ if(dev->flags&IFF_ALLMULTI)
+ num_addrs=1;
+ /* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
+ memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
+ for (i = 0; i < 4; i++) {
+ outw(8 + i, ioaddr+LANCE_ADDR);
+ outw(multicast_table[i], ioaddr+LANCE_DATA);
+ }
+ outw(15, ioaddr+LANCE_ADDR);
+ outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */
+ }
+
+ lance_restart(dev, 0x0142, 0); /* Resume normal operation */
+
+}
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c lance.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/myson803.c b/linux/src/drivers/net/myson803.c
new file mode 100644
index 0000000..545d124
--- /dev/null
+++ b/linux/src/drivers/net/myson803.c
@@ -0,0 +1,1650 @@
+/* myson803.c: A Linux device driver for the Myson mtd803 Ethernet chip. */
+/*
+ Written 1998-2003 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Support information and updates available at
+ http://www.scyld.com/network/myson803.html
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version1[] =
+"myson803.c:v1.05 3/10/2003 Written by Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+" http://www.scyld.com/network/drivers.html\n";
+
+/* Automatically extracted configuration info:
+probe-func: myson803_probe
+config-in: tristate 'Myson MTD803 series Ethernet support' CONFIG_MYSON_ETHER
+
+c-help-name: Myson MTD803 PCI Ethernet support
+c-help-symbol: CONFIG_MYSON_ETHER
+c-help: This driver is for the Myson MTD803 Ethernet adapter series.
+c-help: More specific information and updates are available from
+c-help: http://www.scyld.com/network/drivers.html
+*/
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
+static int debug = 2;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 40;
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ This chip uses a 64 element hash table based on the Ethernet CRC. */
+static int multicast_filter_limit = 32;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature. */
+static int rx_copybreak = 0;
+
+/* Used to pass the media type, etc.
+ Both 'options[]' and 'full_duplex[]' should exist for driver
+ interoperability.
+ The media type is usually passed in 'options[]'.
+ The default is autonegotation for speed and duplex.
+ This should rarely be overridden.
+ Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
+ Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
+ Use option values 0x20 and 0x200 for forcing full duplex operation.
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ There are no ill effects from too-large receive rings. */
+#define TX_RING_SIZE 16
+#define TX_QUEUE_LEN 10 /* Limit Tx ring entries actually used. */
+#define RX_RING_SIZE 32
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+
+/* Allocation size of Rx buffers with normal sized Ethernet frames.
+ Do not change this value without good reason. This is not a limit,
+ but a way to keep a consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ 1536
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/unaligned.h>
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+/* Condensed operations for readability. */
+#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
+#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+/* Kernels before 2.1.0 cannot map the high addrs assigned by some BIOSes. */
+#if (LINUX_VERSION_CODE < 0x20100) || ! defined(MODULE)
+#define USE_IO_OPS
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Myson mtd803 Ethernet driver");
+MODULE_LICENSE("GPL");
+/* List in order of common use. */
+MODULE_PARM(debug, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(multicast_filter_limit, "i");
+MODULE_PARM_DESC(debug, "Driver message level (0-31)");
+MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
+MODULE_PARM_DESC(full_duplex, "Non-zero to force full duplex, "
+ "non-negotiated link (deprecated).");
+MODULE_PARM_DESC(max_interrupt_work,
+ "Maximum events handled per interrupt");
+MODULE_PARM_DESC(rx_copybreak,
+ "Breakpoint in bytes for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit,
+ "Multicast addresses before switching to Rx-all-multicast");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This driver is for the Myson mtd803 chip.
+It should work with other Myson 800 series chips.
+
+II. Board-specific settings
+
+None.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
+Some chips explicitly use only 2^N sized rings, while others use a
+'next descriptor' pointer that the driver forms into rings.
+
+IIIb/c. Transmit/Receive Structure
+
+This driver uses a zero-copy receive and transmit scheme.
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the chip as receive data
+buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack. Buffers consumed this way are replaced by newly allocated
+skbuffs in a later phase of receives.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. New boards are typically used in generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets. When copying is done, the cost is usually mitigated by using
+a combined copy/checksum routine. Copying also preloads the cache, which is
+most useful with small frames.
+
+A subtle aspect of the operation is that the IP header at offset 14 in an
+ethernet frame isn't longword aligned for further processing.
+When unaligned buffers are permitted by the hardware (and always on copies)
+frames are put into the skbuff at an offset of "+2", 16-byte aligning
+the IP header.
+
+IIId. Synchronization
+
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and interrupt handling software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'lp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
+clears both the tx_full and tbusy flags.
+
+IIId. SMP semantics
+
+The following are serialized with respect to each other via the "xmit_lock".
+ dev->hard_start_xmit() Transmit a packet
+ dev->tx_timeout() Transmit watchdog for stuck Tx
+ dev->set_multicast_list() Set the recieve filter.
+Note: The Tx timeout watchdog code is implemented by the timer routine in
+kernels up to 2.2.*. In 2.4.* and later the timeout code is part of the
+driver interface.
+
+The following fall under the global kernel lock. The module will not be
+unloaded during the call, unless a call with a potential reschedule e.g.
+kmalloc() is called. No other synchronization assertion is made.
+ dev->open()
+ dev->do_ioctl()
+ dev->get_stats()
+Caution: The lock for dev->open() is commonly broken with request_irq() or
+kmalloc(). It is best to avoid any lock-breaking call in do_ioctl() and
+get_stats(), or additional module locking code must be implemented.
+
+The following is self-serialized (no simultaneous entry)
+ An handler registered with request_irq().
+
+IV. Notes
+
+IVb. References
+
+http://www.scyld.com/expert/100mbps.html
+http://scyld.com/expert/NWay.html
+http://www.myson.com.hk/mtd/datasheet/mtd803.pdf
+ Myson does not require a NDA to read the datasheet.
+
+IVc. Errata
+
+No undocumented errata.
+*/
+
+
+
+/* PCI probe routines. */
+
+static void *myson_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int find_cnt);
+static int netdev_pwr_event(void *dev_instance, int event);
+
+/* Chips prior to the 803 have an external MII transceiver. */
+enum chip_capability_flags { HasMIIXcvr=1, HasChipXcvr=2 };
+
+#ifdef USE_IO_OPS
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0)
+#define PCI_IOSIZE 256
+#else
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
+#define PCI_IOSIZE 1024
+#endif
+
+static struct pci_id_info pci_id_tbl[] = {
+ {"Myson mtd803 Fast Ethernet", {0x08031516, 0xffffffff, },
+ PCI_IOTYPE, PCI_IOSIZE, HasChipXcvr},
+ {"Myson mtd891 Gigabit Ethernet", {0x08911516, 0xffffffff, },
+ PCI_IOTYPE, PCI_IOSIZE, HasChipXcvr},
+ {0,}, /* 0 terminated list. */
+};
+
+struct drv_id_info myson803_drv_id = {
+ "myson803", 0, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl, myson_probe1,
+ netdev_pwr_event };
+
+/* This driver was written to use PCI memory space, however x86-oriented
+ hardware sometimes works only with I/O space accesses. */
+#ifdef USE_IO_OPS
+#undef readb
+#undef readw
+#undef readl
+#undef writeb
+#undef writew
+#undef writel
+#define readb inb
+#define readw inw
+#define readl inl
+#define writeb outb
+#define writew outw
+#define writel outl
+#endif
+
+/* Offsets to the various registers.
+ Most accesses must be longword aligned. */
+enum register_offsets {
+ StationAddr=0x00, MulticastFilter0=0x08, MulticastFilter1=0x0C,
+ FlowCtrlAddr=0x10, RxConfig=0x18, TxConfig=0x1a, PCIBusCfg=0x1c,
+ TxStartDemand=0x20, RxStartDemand=0x24,
+ RxCurrentPtr=0x28, TxRingPtr=0x2c, RxRingPtr=0x30,
+ IntrStatus=0x34, IntrEnable=0x38,
+ FlowCtrlThreshold=0x3c,
+ MIICtrl=0x40, EECtrl=0x40, RxErrCnts=0x44, TxErrCnts=0x48,
+ PHYMgmt=0x4c,
+};
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+ IntrRxErr=0x0002, IntrRxDone=0x0004, IntrTxDone=0x0008,
+ IntrTxEmpty=0x0010, IntrRxEmpty=0x0020, StatsMax=0x0040, RxEarly=0x0080,
+ TxEarly=0x0100, RxOverflow=0x0200, TxUnderrun=0x0400,
+ IntrPCIErr=0x2000, NWayDone=0x4000, LinkChange=0x8000,
+};
+
+/* Bits in the RxMode (np->txrx_config) register. */
+enum rx_mode_bits {
+ RxEnable=0x01, RxFilter=0xfe,
+ AcceptErr=0x02, AcceptRunt=0x08, AcceptBroadcast=0x40,
+ AcceptMulticast=0x20, AcceptAllPhys=0x80, AcceptMyPhys=0x00,
+ RxFlowCtrl=0x2000,
+ TxEnable=0x40000, TxModeFDX=0x00100000, TxThreshold=0x00e00000,
+};
+
+/* Misc. bits. */
+enum misc_bits {
+ BCR_Reset=1, /* PCIBusCfg */
+ TxThresholdInc=0x200000,
+};
+
+/* The Rx and Tx buffer descriptors. */
+/* Note that using only 32 bit fields simplifies conversion to big-endian
+ architectures. */
+struct netdev_desc {
+ u32 status;
+ u32 ctrl_length;
+ u32 buf_addr;
+ u32 next_desc;
+};
+
+/* Bits in network_desc.status */
+enum desc_status_bits {
+ DescOwn=0x80000000,
+ RxDescStartPacket=0x0800, RxDescEndPacket=0x0400, RxDescWholePkt=0x0c00,
+ RxDescErrSum=0x80, RxErrRunt=0x40, RxErrLong=0x20, RxErrFrame=0x10,
+ RxErrCRC=0x08, RxErrCode=0x04,
+ TxErrAbort=0x2000, TxErrCarrier=0x1000, TxErrLate=0x0800,
+ TxErr16Colls=0x0400, TxErrDefer=0x0200, TxErrHeartbeat=0x0100,
+ TxColls=0x00ff,
+};
+/* Bits in network_desc.ctrl_length */
+enum ctrl_length_bits {
+ TxIntrOnDone=0x80000000, TxIntrOnFIFO=0x40000000,
+ TxDescEndPacket=0x20000000, TxDescStartPacket=0x10000000,
+ TxAppendCRC=0x08000000, TxPadTo64=0x04000000, TxNormalPkt=0x3C000000,
+};
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
+ within the structure. */
+struct netdev_private {
+ /* Descriptor rings first for alignment. */
+ struct netdev_desc rx_ring[RX_RING_SIZE];
+ struct netdev_desc tx_ring[TX_RING_SIZE];
+ struct net_device *next_module; /* Link for devices of this type. */
+ void *priv_addr; /* Unaligned address for kfree */
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ /* The saved address of a sent-in-place packet/buffer, for later free(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media monitoring timer. */
+ /* Frequently used values: keep some adjacent for cache effect. */
+ int msg_level;
+ int max_interrupt_work;
+ int intr_enable;
+ int chip_id, drv_flags;
+ struct pci_dev *pci_dev;
+
+ struct netdev_desc *rx_head_desc;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ int rx_copybreak;
+
+ unsigned int cur_tx, dirty_tx;
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ unsigned int rx_died:1;
+ unsigned int txrx_config;
+
+ /* These values keep track of the transceiver/media in use. */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int duplex_lock:1;
+ unsigned int medialock:1; /* Do not sense media. */
+ unsigned int default_port; /* Last dev->if_port value. */
+
+ unsigned int mcast_filter[2];
+ int multicast_filter_limit;
+
+ /* MII transceiver section. */
+ int mii_cnt; /* MII device addresses. */
+ u16 advertising; /* NWay media advertisement */
+ unsigned char phys[2]; /* MII device addresses. */
+};
+
+static int eeprom_read(long ioaddr, int location);
+static int mdio_read(struct net_device *dev, int phy_id,
+ unsigned int location);
+static void mdio_write(struct net_device *dev, int phy_id,
+ unsigned int location, int value);
+static int netdev_open(struct net_device *dev);
+static void check_duplex(struct net_device *dev);
+static void netdev_timer(unsigned long data);
+static void tx_timeout(struct net_device *dev);
+static void init_ring(struct net_device *dev);
+static int start_tx(struct sk_buff *skb, struct net_device *dev);
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
+static void netdev_error(struct net_device *dev, int intr_status);
+static int netdev_rx(struct net_device *dev);
+static void netdev_error(struct net_device *dev, int intr_status);
+static void set_rx_mode(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_close(struct net_device *dev);
+
+
+
+/* A list of our installed devices, for removing the driver module. */
+static struct net_device *root_net_dev = NULL;
+
+#ifndef MODULE
+int myson803_probe(struct net_device *dev)
+{
+ if (pci_drv_register(&myson803_drv_id, dev) < 0)
+ return -ENODEV;
+ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return 0;
+}
+#endif
+
+static void *myson_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int card_idx)
+{
+ struct net_device *dev;
+ struct netdev_private *np;
+ void *priv_mem;
+ int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+
+ dev = init_etherdev(init_dev, 0);
+ if (!dev)
+ return NULL;
+
+ printk(KERN_INFO "%s: %s at 0x%lx, ",
+ dev->name, pci_id_tbl[chip_idx].name, ioaddr);
+
+ for (i = 0; i < 3; i++)
+ ((u16 *)dev->dev_addr)[i] = le16_to_cpu(eeprom_read(ioaddr, i + 8));
+ if (memcmp(dev->dev_addr, "\0\0\0\0\0", 6) == 0) {
+ /* Fill a temp addr with the "locally administered" bit set. */
+ memcpy(dev->dev_addr, ">Linux", 6);
+ }
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+
+#if ! defined(final_version) /* Dump the EEPROM contents during development. */
+ if (debug > 4)
+ for (i = 0; i < 0x40; i++)
+ printk("%4.4x%s",
+ eeprom_read(ioaddr, i), i % 16 != 15 ? " " : "\n");
+#endif
+
+ /* Make certain elements e.g. descriptor lists are aligned. */
+ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
+ /* Check for the very unlikely case of no memory. */
+ if (priv_mem == NULL)
+ return NULL;
+
+ /* Do bogusness checks before this point.
+ We do a request_region() only to register /proc/ioports info. */
+#ifdef USE_IO_OPS
+ request_region(ioaddr, pci_id_tbl[chip_idx].io_size, dev->name);
+#endif
+
+ /* Reset the chip to erase previous misconfiguration. */
+ writel(BCR_Reset, ioaddr + PCIBusCfg);
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+ memset(np, 0, sizeof(*np));
+ np->priv_addr = priv_mem;
+
+ np->next_module = root_net_dev;
+ root_net_dev = dev;
+
+ np->pci_dev = pdev;
+ np->chip_id = chip_idx;
+ np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
+ np->msg_level = (1 << debug) - 1;
+ np->rx_copybreak = rx_copybreak;
+ np->max_interrupt_work = max_interrupt_work;
+ np->multicast_filter_limit = multicast_filter_limit;
+
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ /* The lower four bits are the media type. */
+ if (option > 0) {
+ if (option & 0x220)
+ np->full_duplex = 1;
+ np->default_port = option & 0x3ff;
+ if (np->default_port)
+ np->medialock = 1;
+ }
+ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
+ np->full_duplex = 1;
+
+ if (np->full_duplex) {
+ if (np->msg_level & NETIF_MSG_PROBE)
+ printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
+ " disabled.\n", dev->name);
+ np->duplex_lock = 1;
+ }
+
+ /* The chip-specific entries in the device structure. */
+ dev->open = &netdev_open;
+ dev->hard_start_xmit = &start_tx;
+ dev->stop = &netdev_close;
+ dev->get_stats = &get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &mii_ioctl;
+
+ if (np->drv_flags & HasMIIXcvr) {
+ int phy, phy_idx = 0;
+ for (phy = 0; phy < 32 && phy_idx < 4; phy++) {
+ int mii_status = mdio_read(dev, phy, 1);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ np->phys[phy_idx++] = phy;
+ np->advertising = mdio_read(dev, phy, 4);
+ if (np->msg_level & NETIF_MSG_PROBE)
+ printk(KERN_INFO "%s: MII PHY found at address %d, status "
+ "0x%4.4x advertising %4.4x.\n",
+ dev->name, phy, mii_status, np->advertising);
+ }
+ }
+ np->mii_cnt = phy_idx;
+ }
+ if (np->drv_flags & HasChipXcvr) {
+ np->phys[np->mii_cnt++] = 32;
+ printk(KERN_INFO "%s: Internal PHY status 0x%4.4x"
+ " advertising %4.4x.\n",
+ dev->name, mdio_read(dev, 32, 1), mdio_read(dev, 32, 4));
+ }
+ /* Allow forcing the media type. */
+ if (np->default_port & 0x330) {
+ np->medialock = 1;
+ if (option & 0x220)
+ np->full_duplex = 1;
+ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
+ (option & 0x300 ? 100 : 10),
+ (np->full_duplex ? "full" : "half"));
+ if (np->mii_cnt)
+ mdio_write(dev, np->phys[0], 0,
+ ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
+ (np->full_duplex ? 0x0100 : 0)); /* Full duplex? */
+ }
+
+ return dev;
+}
+
+
+/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are
+ often serial bit streams generated by the host processor.
+ The example below is for the common 93c46 EEPROM, 64 16 bit words. */
+
+/* This "delay" forces out buffered PCI writes.
+ The udelay() is unreliable for timing, but some Myson NICs shipped with
+ absurdly slow EEPROMs.
+ */
+#define eeprom_delay(ee_addr) readl(ee_addr); udelay(2); readl(ee_addr)
+
+enum EEPROM_Ctrl_Bits {
+ EE_ShiftClk=0x04<<16, EE_ChipSelect=0x88<<16,
+ EE_DataOut=0x02<<16, EE_DataIn=0x01<<16,
+ EE_Write0=0x88<<16, EE_Write1=0x8a<<16,
+};
+
+/* The EEPROM commands always start with 01.. preamble bits.
+ Commands are prepended to the variable-length address. */
+enum EEPROM_Cmds { EE_WriteCmd=5, EE_ReadCmd=6, EE_EraseCmd=7, };
+
+static int eeprom_read(long addr, int location)
+{
+ int i;
+ int retval = 0;
+ long ee_addr = addr + EECtrl;
+ int read_cmd = location | (EE_ReadCmd<<6);
+
+ writel(EE_ChipSelect, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 10; i >= 0; i--) {
+ int dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
+ writel(dataval, ee_addr);
+ eeprom_delay(ee_addr);
+ writel(dataval | EE_ShiftClk, ee_addr);
+ eeprom_delay(ee_addr);
+ }
+ writel(EE_ChipSelect, ee_addr);
+ eeprom_delay(ee_addr);
+
+ for (i = 16; i > 0; i--) {
+ writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
+ eeprom_delay(ee_addr);
+ retval = (retval << 1) | ((readl(ee_addr) & EE_DataIn) ? 1 : 0);
+ writel(EE_ChipSelect, ee_addr);
+ eeprom_delay(ee_addr);
+ }
+
+ /* Terminate the EEPROM access. */
+ writel(EE_ChipSelect, ee_addr);
+ writel(0, ee_addr);
+ return retval;
+}
+
+/* MII transceiver control section.
+ Read and write the MII registers using software-generated serial
+ MDIO protocol. See the MII specifications or DP83840A data sheet
+ for details.
+
+ The maximum data clock rate is 2.5 Mhz.
+ The timing is decoupled from the processor clock by flushing the write
+ from the CPU write buffer with a following read, and using PCI
+ transaction timing. */
+#define mdio_in(mdio_addr) readl(mdio_addr)
+#define mdio_out(value, mdio_addr) writel(value, mdio_addr)
+#define mdio_delay(mdio_addr) readl(mdio_addr)
+
+/* Set iff a MII transceiver on any interface requires mdio preamble.
+ This only set with older tranceivers, so the extra
+ code size of a per-interface flag is not worthwhile. */
+static char mii_preamble_required = 0;
+
+enum mii_reg_bits {
+ MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
+};
+#define MDIO_EnbIn (0)
+#define MDIO_WRITE0 (MDIO_EnbOutput)
+#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
+
+/* Generate the preamble required for initial synchronization and
+ a few older transceivers. */
+static void mdio_sync(long mdio_addr)
+{
+ int bits = 32;
+
+ /* Establish sync by sending at least 32 logic ones. */
+ while (--bits >= 0) {
+ mdio_out(MDIO_WRITE1, mdio_addr);
+ mdio_delay(mdio_addr);
+ mdio_out(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+}
+
+static int mdio_read(struct net_device *dev, int phy_id, unsigned int location)
+{
+ long ioaddr = dev->base_addr;
+ long mdio_addr = ioaddr + MIICtrl;
+ int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ int i, retval = 0;
+
+ if (location >= 32)
+ return 0xffff;
+ if (phy_id >= 32) {
+ if (location < 6)
+ return readw(ioaddr + PHYMgmt + location*2);
+ else if (location == 16)
+ return readw(ioaddr + PHYMgmt + 6*2);
+ else if (location == 17)
+ return readw(ioaddr + PHYMgmt + 7*2);
+ else if (location == 18)
+ return readw(ioaddr + PHYMgmt + 10*2);
+ else
+ return 0;
+ }
+
+ if (mii_preamble_required)
+ mdio_sync(mdio_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+
+ mdio_out(dataval, mdio_addr);
+ mdio_delay(mdio_addr);
+ mdio_out(dataval | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 19; i > 0; i--) {
+ mdio_out(MDIO_EnbIn, mdio_addr);
+ mdio_delay(mdio_addr);
+ retval = (retval << 1) | ((mdio_in(mdio_addr) & MDIO_Data) ? 1 : 0);
+ mdio_out(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id,
+ unsigned int location, int value)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ long mdio_addr = ioaddr + MIICtrl;
+ int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
+ int i;
+
+ if (location == 4 && phy_id == np->phys[0])
+ np->advertising = value;
+ else if (location >= 32)
+ return;
+
+ if (phy_id == 32) {
+ if (location < 6)
+ writew(value, ioaddr + PHYMgmt + location*2);
+ else if (location == 16)
+ writew(value, ioaddr + PHYMgmt + 6*2);
+ else if (location == 17)
+ writew(value, ioaddr + PHYMgmt + 7*2);
+ return;
+ }
+
+ if (mii_preamble_required)
+ mdio_sync(mdio_addr);
+
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+
+ mdio_out(dataval, mdio_addr);
+ mdio_delay(mdio_addr);
+ mdio_out(dataval | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ /* Clear out extra bits. */
+ for (i = 2; i > 0; i--) {
+ mdio_out(MDIO_EnbIn, mdio_addr);
+ mdio_delay(mdio_addr);
+ mdio_out(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ return;
+}
+
+
+static int netdev_open(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ /* Some chips may need to be reset. */
+
+ MOD_INC_USE_COUNT;
+
+ writel(~0, ioaddr + IntrStatus);
+
+ /* Note that both request_irq() and init_ring() call kmalloc(), which
+ break the global kernel lock protecting this routine. */
+ if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
+ dev->name, dev->irq);
+
+ init_ring(dev);
+
+ writel(virt_to_bus(np->rx_ring), ioaddr + RxRingPtr);
+ writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
+
+ /* Address register must be written as words. */
+ writel(cpu_to_le32(cpu_to_le32(get_unaligned((u32 *)dev->dev_addr))),
+ ioaddr + StationAddr);
+ writel(cpu_to_le16(cpu_to_le16(get_unaligned((u16 *)(dev->dev_addr+4)))),
+ ioaddr + StationAddr + 4);
+ /* Set the flow control address, 01:80:c2:00:00:01. */
+ writel(0x00c28001, ioaddr + FlowCtrlAddr);
+ writel(0x00000100, ioaddr + FlowCtrlAddr + 4);
+
+ /* Initialize other registers. */
+ /* Configure the PCI bus bursts and FIFO thresholds. */
+ writel(0x01f8, ioaddr + PCIBusCfg);
+
+ if (dev->if_port == 0)
+ dev->if_port = np->default_port;
+
+ np->txrx_config = TxEnable | RxEnable | RxFlowCtrl | 0x00600000;
+ np->mcast_filter[0] = np->mcast_filter[1] = 0;
+ np->rx_died = 0;
+ set_rx_mode(dev);
+ netif_start_tx_queue(dev);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ np->intr_enable = IntrRxDone | IntrRxErr | IntrRxEmpty | IntrTxDone
+ | IntrTxEmpty | StatsMax | RxOverflow | TxUnderrun | IntrPCIErr
+ | NWayDone | LinkChange;
+ writel(np->intr_enable, ioaddr + IntrEnable);
+
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: Done netdev_open(), PHY status: %x %x.\n",
+ dev->name, (int)readw(ioaddr + PHYMgmt),
+ (int)readw(ioaddr + PHYMgmt + 2));
+
+ /* Set the timer to check for link beat. */
+ init_timer(&np->timer);
+ np->timer.expires = jiffies + 3*HZ;
+ np->timer.data = (unsigned long)dev;
+ np->timer.function = &netdev_timer; /* timer handler */
+ add_timer(&np->timer);
+
+ return 0;
+}
+
+static void check_duplex(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int new_tx_mode = np->txrx_config;
+
+ if (np->medialock) {
+ } else {
+ int mii_reg5 = mdio_read(dev, np->phys[0], 5);
+ int negotiated = mii_reg5 & np->advertising;
+ int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
+ if (np->duplex_lock || mii_reg5 == 0xffff)
+ return;
+ if (duplex)
+ new_tx_mode |= TxModeFDX;
+ if (np->full_duplex != duplex) {
+ np->full_duplex = duplex;
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d"
+ " negotiated capability %4.4x.\n", dev->name,
+ duplex ? "full" : "half", np->phys[0], negotiated);
+ }
+ }
+ if (np->txrx_config != new_tx_mode)
+ writel(new_tx_mode, ioaddr + RxConfig);
+}
+
+static void netdev_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 10*HZ;
+
+ if (np->msg_level & NETIF_MSG_TIMER) {
+ printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x.\n",
+ dev->name, (int)readw(ioaddr + PHYMgmt + 10));
+ }
+ /* This will either have a small false-trigger window or will not catch
+ tbusy incorrectly set when the queue is empty. */
+ if (netif_queue_paused(dev) &&
+ np->cur_tx - np->dirty_tx > 1 &&
+ (jiffies - dev->trans_start) > TX_TIMEOUT) {
+ tx_timeout(dev);
+ }
+ /* It's dead Jim, no race condition. */
+ if (np->rx_died)
+ netdev_rx(dev);
+ check_duplex(dev);
+ np->timer.expires = jiffies + next_tick;
+ add_timer(&np->timer);
+}
+
+static void tx_timeout(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
+ " resetting...\n", dev->name, (int)readl(ioaddr + IntrStatus));
+
+ if (np->msg_level & NETIF_MSG_TX_ERR) {
+ int i;
+ printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
+ printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" %8.8x", np->tx_ring[i].status);
+ printk("\n");
+ }
+
+ /* Stop and restart the chip's Tx processes . */
+ writel(np->txrx_config & ~TxEnable, ioaddr + RxConfig);
+ writel(virt_to_bus(np->tx_ring + (np->dirty_tx%TX_RING_SIZE)),
+ ioaddr + TxRingPtr);
+ writel(np->txrx_config, ioaddr + RxConfig);
+ /* Trigger an immediate transmit demand. */
+ writel(0, dev->base_addr + TxStartDemand);
+
+ dev->trans_start = jiffies;
+ np->stats.tx_errors++;
+ return;
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+ np->tx_full = 0;
+ np->cur_rx = np->cur_tx = 0;
+ np->dirty_rx = np->dirty_tx = 0;
+
+ np->rx_buf_sz = (dev->mtu <= 1532 ? PKT_BUF_SZ : dev->mtu + 4);
+ np->rx_head_desc = &np->rx_ring[0];
+
+ /* Initialize all Rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].ctrl_length = cpu_to_le32(np->rx_buf_sz);
+ np->rx_ring[i].status = 0;
+ np->rx_ring[i].next_desc = virt_to_le32desc(&np->rx_ring[i+1]);
+ np->rx_skbuff[i] = 0;
+ }
+ /* Mark the last entry as wrapping the ring. */
+ np->rx_ring[i-1].next_desc = virt_to_le32desc(&np->rx_ring[0]);
+
+ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ np->rx_ring[i].buf_addr = virt_to_le32desc(skb->tail);
+ np->rx_ring[i].status = cpu_to_le32(DescOwn);
+ }
+ np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_skbuff[i] = 0;
+ np->tx_ring[i].status = 0;
+ np->tx_ring[i].next_desc = virt_to_le32desc(&np->tx_ring[i+1]);
+ }
+ np->tx_ring[i-1].next_desc = virt_to_le32desc(&np->tx_ring[0]);
+ return;
+}
+
+static int start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ unsigned entry;
+
+ /* Block a timer-based transmit from overlapping. This happens when
+ packets are presumed lost, and we use this check the Tx status. */
+ if (netif_pause_tx_queue(dev) != 0) {
+ /* This watchdog code is redundant with the media monitor timer. */
+ if (jiffies - dev->trans_start > TX_TIMEOUT)
+ tx_timeout(dev);
+ return 1;
+ }
+
+ /* Note: Ordering is important here, set the field with the
+ "ownership" bit last, and only then increment cur_tx. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = np->cur_tx % TX_RING_SIZE;
+
+ np->tx_skbuff[entry] = skb;
+
+ np->tx_ring[entry].buf_addr = virt_to_le32desc(skb->data);
+ np->tx_ring[entry].ctrl_length =
+ cpu_to_le32(TxIntrOnDone | TxNormalPkt | (skb->len << 11) | skb->len);
+ np->tx_ring[entry].status = cpu_to_le32(DescOwn);
+ np->cur_tx++;
+
+ /* On some architectures: explicitly flushing cache lines here speeds
+ operation. */
+
+ if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
+ np->tx_full = 1;
+ /* Check for a just-cleared queue. */
+ if (np->cur_tx - (volatile unsigned int)np->dirty_tx
+ < TX_QUEUE_LEN - 2) {
+ np->tx_full = 0;
+ netif_unpause_tx_queue(dev);
+ } else
+ netif_stop_tx_queue(dev);
+ } else
+ netif_unpause_tx_queue(dev); /* Typical path */
+ /* Wake the potentially-idle transmit channel. */
+ writel(0, dev->base_addr + TxStartDemand);
+
+ dev->trans_start = jiffies;
+
+ if (np->msg_level & NETIF_MSG_TX_QUEUED) {
+ printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
+ dev->name, np->cur_tx, entry);
+ }
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct netdev_private *np;
+ long ioaddr;
+ int boguscnt;
+
+#ifndef final_version /* Can never occur. */
+ if (dev == NULL) {
+ printk (KERN_ERR "Netdev interrupt handler(): IRQ %d for unknown "
+ "device.\n", irq);
+ return;
+ }
+#endif
+
+ ioaddr = dev->base_addr;
+ np = (struct netdev_private *)dev->priv;
+ boguscnt = np->max_interrupt_work;
+
+#if defined(__i386__) && LINUX_VERSION_CODE < 0x020300
+ /* A lock to prevent simultaneous entry bug on Intel SMP machines. */
+ if (test_and_set_bit(0, (void*)&dev->interrupt)) {
+ printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
+ dev->name);
+ dev->interrupt = 0; /* Avoid halting machine. */
+ return;
+ }
+#endif
+
+ do {
+ u32 intr_status = readl(ioaddr + IntrStatus);
+
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ writel(intr_status, ioaddr + IntrStatus);
+
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
+ dev->name, intr_status);
+
+ if (intr_status == 0)
+ break;
+
+ if (intr_status & IntrRxDone)
+ netdev_rx(dev);
+
+ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+ int entry = np->dirty_tx % TX_RING_SIZE;
+ int tx_status = le32_to_cpu(np->tx_ring[entry].status);
+ if (tx_status & DescOwn)
+ break;
+ if (np->msg_level & NETIF_MSG_TX_DONE)
+ printk(KERN_DEBUG "%s: Transmit done, Tx status %8.8x.\n",
+ dev->name, tx_status);
+ if (tx_status & (TxErrAbort | TxErrCarrier | TxErrLate
+ | TxErr16Colls | TxErrHeartbeat)) {
+ if (np->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+ dev->name, tx_status);
+ np->stats.tx_errors++;
+ if (tx_status & TxErrCarrier) np->stats.tx_carrier_errors++;
+ if (tx_status & TxErrLate) np->stats.tx_window_errors++;
+ if (tx_status & TxErrHeartbeat) np->stats.tx_heartbeat_errors++;
+#ifdef ETHER_STATS
+ if (tx_status & TxErr16Colls) np->stats.collisions16++;
+ if (tx_status & TxErrAbort) np->stats.tx_aborted_errors++;
+#else
+ if (tx_status & (TxErr16Colls|TxErrAbort))
+ np->stats.tx_aborted_errors++;
+#endif
+ } else {
+ np->stats.tx_packets++;
+ np->stats.collisions += tx_status & TxColls;
+#if LINUX_VERSION_CODE > 0x20127
+ np->stats.tx_bytes += np->tx_skbuff[entry]->len;
+#endif
+#ifdef ETHER_STATS
+ if (tx_status & TxErrDefer) np->stats.tx_deferred++;
+#endif
+ }
+ /* Free the original skb. */
+ dev_free_skb_irq(np->tx_skbuff[entry]);
+ np->tx_skbuff[entry] = 0;
+ }
+ /* Note the 4 slot hysteresis to mark the queue non-full. */
+ if (np->tx_full && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
+ /* The ring is no longer full, allow new TX entries. */
+ np->tx_full = 0;
+ netif_resume_tx_queue(dev);
+ }
+
+ /* Abnormal error summary/uncommon events handlers. */
+ if (intr_status & (IntrRxErr | IntrRxEmpty | StatsMax | RxOverflow
+ | TxUnderrun | IntrPCIErr | NWayDone | LinkChange))
+ netdev_error(dev, intr_status);
+
+ if (--boguscnt < 0) {
+ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "status=0x%4.4x.\n",
+ dev->name, intr_status);
+ break;
+ }
+ } while (1);
+
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, (int)readl(ioaddr + IntrStatus));
+
+#if defined(__i386__) && LINUX_VERSION_CODE < 0x020300
+ clear_bit(0, (void*)&dev->interrupt);
+#endif
+ return;
+}
+
+/* This routine is logically part of the interrupt handler, but separated
+ for clarity and better register allocation. */
+static int netdev_rx(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int entry = np->cur_rx % RX_RING_SIZE;
+ int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
+ int refilled = 0;
+
+ if (np->msg_level & NETIF_MSG_RX_STATUS) {
+ printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
+ entry, np->rx_ring[entry].status);
+ }
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while ( ! (np->rx_head_desc->status & cpu_to_le32(DescOwn))) {
+ struct netdev_desc *desc = np->rx_head_desc;
+ u32 desc_status = le32_to_cpu(desc->status);
+
+ if (np->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
+ desc_status);
+ if (--boguscnt < 0)
+ break;
+ if ((desc_status & RxDescWholePkt) != RxDescWholePkt) {
+ printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
+ "multiple buffers, entry %#x length %d status %4.4x!\n",
+ dev->name, np->cur_rx, desc_status >> 16, desc_status);
+ np->stats.rx_length_errors++;
+ } else if (desc_status & RxDescErrSum) {
+ /* There was a error. */
+ if (np->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
+ desc_status);
+ np->stats.rx_errors++;
+ if (desc_status & (RxErrLong|RxErrRunt))
+ np->stats.rx_length_errors++;
+ if (desc_status & (RxErrFrame|RxErrCode))
+ np->stats.rx_frame_errors++;
+ if (desc_status & RxErrCRC)
+ np->stats.rx_crc_errors++;
+ } else {
+ struct sk_buff *skb;
+ /* Reported length should omit the CRC. */
+ u16 pkt_len = ((desc_status >> 16) & 0xfff) - 4;
+
+#ifndef final_version
+ if (np->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
+ " of %d, bogus_cnt %d.\n",
+ pkt_len, pkt_len, boguscnt);
+#endif
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < np->rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+ } else {
+ skb_put(skb = np->rx_skbuff[entry], pkt_len);
+ np->rx_skbuff[entry] = NULL;
+ }
+#ifndef final_version /* Remove after testing. */
+ /* You will want this info for the initial debug. */
+ if (np->msg_level & NETIF_MSG_PKTDATA)
+ printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
+ "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
+ "%d.%d.%d.%d.\n",
+ skb->data[0], skb->data[1], skb->data[2], skb->data[3],
+ skb->data[4], skb->data[5], skb->data[6], skb->data[7],
+ skb->data[8], skb->data[9], skb->data[10],
+ skb->data[11], skb->data[12], skb->data[13],
+ skb->data[14], skb->data[15], skb->data[16],
+ skb->data[17]);
+#endif
+ skb->mac.raw = skb->data;
+ /* Protocol lookup disabled until verified with all kernels. */
+ if (0 && ntohs(skb->mac.ethernet->h_proto) >= 0x0800) {
+ struct ethhdr *eth = skb->mac.ethernet;
+ skb->protocol = eth->h_proto;
+ if (desc_status & 0x1000) {
+ if ((dev->flags & IFF_PROMISC) &&
+ memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN))
+ skb->pkt_type = PACKET_OTHERHOST;
+ } else if (desc_status & 0x2000)
+ skb->pkt_type = PACKET_BROADCAST;
+ else if (desc_status & 0x4000)
+ skb->pkt_type = PACKET_MULTICAST;
+ } else
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ np->stats.rx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+ np->stats.rx_bytes += pkt_len;
+#endif
+ }
+ entry = (++np->cur_rx) % RX_RING_SIZE;
+ np->rx_head_desc = &np->rx_ring[entry];
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
+ struct sk_buff *skb;
+ entry = np->dirty_rx % RX_RING_SIZE;
+ if (np->rx_skbuff[entry] == NULL) {
+ skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ np->rx_ring[entry].buf_addr = virt_to_le32desc(skb->tail);
+ }
+ np->rx_ring[entry].ctrl_length = cpu_to_le32(np->rx_buf_sz);
+ np->rx_ring[entry].status = cpu_to_le32(DescOwn);
+ refilled++;
+ }
+
+ /* Restart Rx engine if stopped. */
+ if (refilled) { /* Perhaps "&& np->rx_died" */
+ writel(0, dev->base_addr + RxStartDemand);
+ np->rx_died = 0;
+ }
+ return refilled;
+}
+
+static void netdev_error(struct net_device *dev, int intr_status)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (intr_status & (LinkChange | NWayDone)) {
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_NOTICE "%s: Link changed: Autonegotiation advertising"
+ " %4.4x partner %4.4x.\n", dev->name,
+ mdio_read(dev, np->phys[0], 4),
+ mdio_read(dev, np->phys[0], 5));
+ /* Clear sticky bit first. */
+ readw(ioaddr + PHYMgmt + 2);
+ if (readw(ioaddr + PHYMgmt + 2) & 0x0004)
+ netif_link_up(dev);
+ else
+ netif_link_down(dev);
+ check_duplex(dev);
+ }
+ if ((intr_status & TxUnderrun)
+ && (np->txrx_config & TxThreshold) != TxThreshold) {
+ np->txrx_config += TxThresholdInc;
+ writel(np->txrx_config, ioaddr + RxConfig);
+ np->stats.tx_fifo_errors++;
+ }
+ if (intr_status & IntrRxEmpty) {
+ printk(KERN_WARNING "%s: Out of receive buffers: no free memory.\n",
+ dev->name);
+ /* Refill Rx descriptors */
+ np->rx_died = 1;
+ netdev_rx(dev);
+ }
+ if (intr_status & RxOverflow) {
+ printk(KERN_WARNING "%s: Receiver overflow.\n", dev->name);
+ np->stats.rx_over_errors++;
+ netdev_rx(dev); /* Refill Rx descriptors */
+ get_stats(dev); /* Empty dropped counter. */
+ }
+ if (intr_status & StatsMax) {
+ get_stats(dev);
+ }
+ if ((intr_status & ~(LinkChange|NWayDone|StatsMax|TxUnderrun|RxOverflow
+ |TxEarly|RxEarly|0x001e))
+ && (np->msg_level & NETIF_MSG_DRV))
+ printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
+ dev->name, intr_status);
+ /* Hmmmmm, it's not clear how to recover from PCI faults. */
+ if (intr_status & IntrPCIErr) {
+ const char *const pcierr[4] =
+ { "Parity Error", "Master Abort", "Target Abort", "Unknown Error" };
+ if (np->msg_level & NETIF_MSG_DRV)
+ printk(KERN_WARNING "%s: PCI Bus %s, %x.\n",
+ dev->name, pcierr[(intr_status>>11) & 3], intr_status);
+ }
+}
+
+/* We do not bother to spinlock statistics.
+ A window only exists if we have non-atomic adds, the error counts are
+ typically zero, and statistics are non-critical. */
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ unsigned int rxerrs = readl(ioaddr + RxErrCnts);
+ unsigned int txerrs = readl(ioaddr + TxErrCnts);
+
+ /* The chip only need report frames silently dropped. */
+ np->stats.rx_crc_errors += rxerrs >> 16;
+ np->stats.rx_missed_errors += rxerrs & 0xffff;
+
+ /* These stats are required when the descriptor is closed before Tx. */
+ np->stats.tx_aborted_errors += txerrs >> 24;
+ np->stats.tx_window_errors += (txerrs >> 16) & 0xff;
+ np->stats.collisions += txerrs & 0xffff;
+
+ return &np->stats;
+}
+
+/* Big-endian AUTODIN II ethernet CRC calculations.
+ This is slow but compact code. Do not use this routine for bulk data,
+ use a table-based routine instead.
+ This is common code and may be in the kernel with Linux 2.5+.
+*/
+static unsigned const ethernet_polynomial = 0x04c11db7U;
+static inline u32 ether_crc(int length, unsigned char *data)
+{
+ u32 crc = ~0;
+
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 0; bit < 8; bit++, current_octet >>= 1)
+ crc = (crc << 1) ^
+ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
+ }
+ return crc;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u32 mc_filter[2]; /* Multicast hash filter */
+ u32 rx_mode;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+ mc_filter[1] = mc_filter[0] = ~0;
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAllPhys
+ | AcceptMyPhys;
+ } else if ((dev->mc_count > np->multicast_filter_limit)
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ mc_filter[1] = mc_filter[0] = ~0;
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ } else {
+ struct dev_mc_list *mclist;
+ int i;
+ mc_filter[1] = mc_filter[0] = 0;
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ set_bit((ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) & 0x3f,
+ mc_filter);
+ }
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ }
+ if (mc_filter[0] != np->mcast_filter[0] ||
+ mc_filter[1] != np->mcast_filter[1]) {
+ writel(mc_filter[0], ioaddr + MulticastFilter0);
+ writel(mc_filter[1], ioaddr + MulticastFilter1);
+ np->mcast_filter[0] = mc_filter[0];
+ np->mcast_filter[1] = mc_filter[1];
+ }
+ if ((np->txrx_config & RxFilter) != rx_mode) {
+ np->txrx_config &= ~RxFilter;
+ np->txrx_config |= rx_mode;
+ writel(np->txrx_config, ioaddr + RxConfig);
+ }
+}
+
+/*
+ Handle user-level ioctl() calls.
+ We must use two numeric constants as the key because some clueless person
+ changed the value for the symbolic name.
+*/
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ u16 *data = (u16 *)&rq->ifr_data;
+ u32 *data32 = (void *)&rq->ifr_data;
+
+ switch(cmd) {
+ case 0x8947: case 0x89F0:
+ /* SIOCGMIIPHY: Get the address of the PHY in use. */
+ data[0] = np->phys[0];
+ /* Fall Through */
+ case 0x8948: case 0x89F1:
+ /* SIOCGMIIREG: Read the specified MII register. */
+ data[3] = mdio_read(dev, data[0], data[1]);
+ return 0;
+ case 0x8949: case 0x89F2:
+ /* SIOCSMIIREG: Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (data[0] == np->phys[0]) {
+ u16 value = data[2];
+ switch (data[1]) {
+ case 0:
+ /* Check for autonegotiation on or reset. */
+ np->medialock = (value & 0x9000) ? 0 : 1;
+ if (np->medialock)
+ np->full_duplex = (value & 0x0100) ? 1 : 0;
+ break;
+ case 4: np->advertising = value; break;
+ }
+ /* Perhaps check_duplex(dev), depending on chip semantics. */
+ }
+ mdio_write(dev, data[0], data[1], data[2]);
+ return 0;
+ case SIOCGPARAMS:
+ data32[0] = np->msg_level;
+ data32[1] = np->multicast_filter_limit;
+ data32[2] = np->max_interrupt_work;
+ data32[3] = np->rx_copybreak;
+ return 0;
+ case SIOCSPARAMS:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ np->msg_level = data32[0];
+ np->multicast_filter_limit = data32[1];
+ np->max_interrupt_work = data32[2];
+ np->rx_copybreak = data32[3];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int netdev_close(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+ netif_stop_tx_queue(dev);
+
+ if (np->msg_level & NETIF_MSG_IFDOWN) {
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %8.8x.\n",
+ dev->name, (int)readl(ioaddr + RxConfig));
+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
+ dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
+ }
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ writel(0x0000, ioaddr + IntrEnable);
+
+ /* Stop the chip's Tx and Rx processes. */
+ np->txrx_config = 0;
+ writel(0, ioaddr + RxConfig);
+
+ del_timer(&np->timer);
+
+#ifdef __i386__
+ if (np->msg_level & NETIF_MSG_IFDOWN) {
+ printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
+ (int)virt_to_bus(np->tx_ring));
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" #%d desc. %x %x %8.8x.\n",
+ i, np->tx_ring[i].status, np->tx_ring[i].ctrl_length,
+ np->tx_ring[i].buf_addr);
+ printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
+ (int)virt_to_bus(np->rx_ring));
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
+ i, np->rx_ring[i].status, np->rx_ring[i].ctrl_length,
+ np->rx_ring[i].buf_addr);
+ }
+ }
+#endif /* __i386__ debugging only */
+
+ free_irq(dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].status = 0;
+ np->rx_ring[i].buf_addr = 0xBADF00D0; /* An invalid address. */
+ if (np->rx_skbuff[i]) {
+#if LINUX_VERSION_CODE < 0x20100
+ np->rx_skbuff[i]->free = 1;
+#endif
+ dev_free_skb(np->rx_skbuff[i]);
+ }
+ np->rx_skbuff[i] = 0;
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (np->tx_skbuff[i])
+ dev_free_skb(np->tx_skbuff[i]);
+ np->tx_skbuff[i] = 0;
+ }
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static int netdev_pwr_event(void *dev_instance, int event)
+{
+ struct net_device *dev = dev_instance;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
+ switch(event) {
+ case DRV_ATTACH:
+ MOD_INC_USE_COUNT;
+ break;
+ case DRV_SUSPEND:
+ /* Disable interrupts, stop Tx and Rx. */
+ writel(0, ioaddr + IntrEnable);
+ writel(0, ioaddr + RxConfig);
+ break;
+ case DRV_RESUME:
+ /* This is incomplete: the actions are very chip specific. */
+ set_rx_mode(dev);
+ writel(np->intr_enable, ioaddr + IntrEnable);
+ break;
+ case DRV_DETACH: {
+ struct net_device **devp, **next;
+ if (dev->flags & IFF_UP) {
+ /* Some, but not all, kernel versions close automatically. */
+ dev_close(dev);
+ dev->flags &= ~(IFF_UP|IFF_RUNNING);
+ }
+ unregister_netdev(dev);
+ release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);
+#ifndef USE_IO_OPS
+ iounmap((char *)dev->base_addr);
+#endif
+ for (devp = &root_net_dev; *devp; devp = next) {
+ next = &((struct netdev_private *)(*devp)->priv)->next_module;
+ if (*devp == dev) {
+ *devp = *next;
+ break;
+ }
+ }
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(dev);
+ MOD_DEC_USE_COUNT;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+
+#ifdef MODULE
+int init_module(void)
+{
+ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return pci_drv_register(&myson803_drv_id, NULL);
+}
+
+void cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+ pci_drv_unregister(&myson803_drv_id);
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_net_dev) {
+ struct netdev_private *np = (void *)(root_net_dev->priv);
+ unregister_netdev(root_net_dev);
+#ifdef USE_IO_OPS
+ release_region(root_net_dev->base_addr,
+ pci_id_tbl[np->chip_id].io_size);
+#else
+ iounmap((char *)(root_net_dev->base_addr));
+#endif
+ next_dev = np->next_module;
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(root_net_dev);
+ root_net_dev = next_dev;
+ }
+}
+
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "make KERNVER=`uname -r` myson803.o"
+ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c myson803.c"
+ * simple-compile-command: "gcc -DMODULE -O6 -c myson803.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/natsemi.c b/linux/src/drivers/net/natsemi.c
new file mode 100644
index 0000000..0d98bea
--- /dev/null
+++ b/linux/src/drivers/net/natsemi.c
@@ -0,0 +1,1448 @@
+/* natsemi.c: A Linux PCI Ethernet driver for the NatSemi DP83810 series. */
+/*
+ Written/copyright 1999-2003 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL. License for under other terms may be
+ available. Contact the original author for details.
+
+ The original author may be reached as becker@scyld.com, or at
+ Scyld Computing Corporation
+ 914 Bay Ridge Road, Suite 220
+ Annapolis MD 21403
+
+ Support information and updates available at
+ http://www.scyld.com/network/natsemi.html
+ The information and support mailing lists are based at
+ http://www.scyld.com/mailman/listinfo/
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version1[] =
+"natsemi.c:v1.17a 8/09/2003 Written by Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+" http://www.scyld.com/network/natsemi.html\n";
+/* Updated to recommendations in pci-skeleton v2.11. */
+
+/* Automatically extracted configuration info:
+probe-func: natsemi_probe
+config-in: tristate 'National Semiconductor DP8381x series PCI Ethernet support' CONFIG_NATSEMI
+
+c-help-name: National Semiconductor DP8381x series PCI Ethernet support
+c-help-symbol: CONFIG_NATSEMI
+c-help: This driver is for the National Semiconductor DP83810 series,
+c-help: including the 83815 chip.
+c-help: Usage information and updates are available from
+c-help: http://www.scyld.com/network/natsemi.html
+*/
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
+static int debug = 2;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ This chip uses a 512 element hash table based on the Ethernet CRC.
+ Some chip versions are reported to have unreliable multicast filter
+ circuitry. To work around an observed problem set this value to '0',
+ which will immediately switch to Rx-all-multicast.
+*/
+static int multicast_filter_limit = 100;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature.
+ This chip can only receive into aligned buffers, so architectures such
+ as the Alpha AXP might benefit from a copy-align.
+*/
+static int rx_copybreak = 0;
+
+/* Used to pass the media type, etc.
+ Both 'options[]' and 'full_duplex[]' should exist for driver
+ interoperability, however setting full_duplex[] is deprecated.
+ The media type is usually passed in 'options[]'.
+ The default is autonegotation for speed and duplex.
+ This should rarely be overridden.
+ Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
+ Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
+ Use option values 0x20 and 0x200 for forcing full duplex operation.
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+ Understand the implications before changing these settings!
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ Too-large receive rings waste memory and confound network buffer limits. */
+#define TX_RING_SIZE 16
+#define TX_QUEUE_LEN 10 /* Limit ring entries actually used, min 4. */
+#define RX_RING_SIZE 32
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung.
+ Re-autonegotiation may take up to 3 seconds.
+ */
+#define TX_TIMEOUT (6*HZ)
+
+/* Allocation size of Rx buffers with normal sized Ethernet frames.
+ Do not change this value without good reason. This is not a limit,
+ but a way to keep a consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ 1536
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+/* Condensed operations for readability. */
+#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
+#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("National Semiconductor DP83810 series PCI Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(debug, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(multicast_filter_limit, "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM_DESC(debug, "Driver message level (0-31)");
+MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
+MODULE_PARM_DESC(max_interrupt_work,
+ "Driver maximum events handled per interrupt");
+MODULE_PARM_DESC(full_duplex,
+ "Non-zero to force full duplex, non-negotiated link "
+ "(deprecated).");
+MODULE_PARM_DESC(rx_copybreak,
+ "Breakpoint in bytes for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit,
+ "Multicast addresses before switching to Rx-all-multicast");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This driver is designed for National Semiconductor DP83815 PCI Ethernet NIC.
+It also works with other chips in in the DP83810 series.
+The most common board is the Netgear FA311 using the 83815.
+
+II. Board-specific settings
+
+This driver requires the PCI interrupt line to be valid.
+It honors the EEPROM-set values.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
+The NatSemi design uses a 'next descriptor' pointer that the driver forms
+into a list, thus rings can be arbitrarily sized. Before changing the
+ring sizes you should understand the flow and cache effects of the
+full/available/empty hysteresis.
+
+IIIb/c. Transmit/Receive Structure
+
+This driver uses a zero-copy receive and transmit scheme.
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the chip as receive data
+buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack. Buffers consumed this way are replaced by newly allocated
+skbuffs in a later phase of receives.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. New boards are typically used in generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets. When copying is done, the cost is usually mitigated by using
+a combined copy/checksum routine. Copying also preloads the cache, which is
+most useful with small frames.
+
+A subtle aspect of the operation is that unaligned buffers are not permitted
+by the hardware. Thus the IP header at offset 14 in an ethernet frame isn't
+longword aligned for further processing. On copies frames are put into the
+skbuff at an offset of "+2", 16-byte aligning the IP header.
+
+IIId. Synchronization
+
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and interrupt handling software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'lp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
+clears both the tx_full and tbusy flags.
+
+IV. Notes
+
+The older dp83810 chips are so uncommon that support is not relevant.
+No NatSemi datasheet was publically available at the initial release date,
+but the dp83815 has now been published.
+
+IVb. References
+
+http://www.scyld.com/expert/100mbps.html
+http://www.scyld.com/expert/NWay.html
+
+
+IVc. Errata
+
+Qustionable multicast filter implementation.
+The EEPROM format is obviously the result of a chip bug.
+*/
+
+
+
+static void *natsemi_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int find_cnt);
+static int power_event(void *dev_instance, int event);
+#ifdef USE_IO_OPS
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0)
+#else
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
+#endif
+
+static struct pci_id_info pci_id_tbl[] = {
+ {"Netgear FA311 (NatSemi DP83815)",
+ { 0x0020100B, 0xffffffff, 0xf3111385, 0xffffffff, },
+ PCI_IOTYPE, 256, 0},
+ {"NatSemi DP83815", { 0x0020100B, 0xffffffff },
+ PCI_IOTYPE, 256, 0},
+ {0,}, /* 0 terminated list. */
+};
+
+struct drv_id_info natsemi_drv_id = {
+ "natsemi", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
+ natsemi_probe1, power_event };
+
+/* Offsets to the device registers.
+ Unlike software-only systems, device drivers interact with complex hardware.
+ It's not useful to define symbolic names for every register bit in the
+ device. Please do not change these names without good reason.
+*/
+enum register_offsets {
+ ChipCmd=0x00, ChipConfig=0x04, EECtrl=0x08, PCIBusCfg=0x0C,
+ IntrStatus=0x10, IntrMask=0x14, IntrEnable=0x18,
+ TxRingPtr=0x20, TxConfig=0x24,
+ RxRingPtr=0x30, RxConfig=0x34, ClkRunCtrl=0x3C,
+ WOLCmd=0x40, PauseCmd=0x44, RxFilterAddr=0x48, RxFilterData=0x4C,
+ BootRomAddr=0x50, BootRomData=0x54, ChipRevReg=0x58,
+ StatsCtrl=0x5C, StatsData=0x60,
+ RxPktErrs=0x60, RxMissed=0x68, RxCRCErrs=0x64,
+ NS_Xcvr_Mgmt = 0x80, NS_MII_BMCR=0x80, NS_MII_BMSR=0x84,
+ NS_MII_Advert=0x90, NS_MIILinkPartner=0x94,
+};
+
+/* Bits in ChipCmd. */
+enum ChipCmdBits {
+ ChipReset=0x100, SoftIntr=0x80, RxReset=0x20, TxReset=0x10,
+ RxOff=0x08, RxOn=0x04, TxOff=0x02, TxOn=0x01,
+};
+
+/* Bits in ChipConfig. */
+enum ChipConfigBits {
+ CfgLinkGood=0x80000000, CfgFDX=0x20000000,
+};
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+ IntrRxDone=0x0001, IntrRxIntr=0x0002, IntrRxErr=0x0004, IntrRxEarly=0x0008,
+ IntrRxIdle=0x0010, IntrRxOverrun=0x0020,
+ IntrTxDone=0x0040, IntrTxIntr=0x0080, IntrTxErr=0x0100,
+ IntrTxIdle=0x0200, IntrTxUnderrun=0x0400,
+ StatsMax=0x0800, IntrDrv=0x1000, WOLPkt=0x2000, LinkChange=0x4000,
+ RxStatusOverrun=0x10000,
+ RxResetDone=0x1000000, TxResetDone=0x2000000,
+ IntrPCIErr=0x00f00000,
+ IntrNormalSummary=0x0251, IntrAbnormalSummary=0xED20,
+};
+
+/* Bits in the RxMode register. */
+enum rx_mode_bits {
+ AcceptErr=0x20, AcceptRunt=0x10,
+ AcceptBroadcast=0xC0000000,
+ AcceptMulticast=0x00200000, AcceptAllMulticast=0x20000000,
+ AcceptAllPhys=0x10000000, AcceptMyPhys=0x08000000,
+};
+
+/* The Rx and Tx buffer descriptors. */
+/* Note that using only 32 bit fields simplifies conversion to big-endian
+ architectures. */
+struct netdev_desc {
+ u32 next_desc;
+ s32 cmd_status;
+ u32 buf_addr;
+ u32 software_use;
+};
+
+/* Bits in network_desc.status */
+enum desc_status_bits {
+ DescOwn=0x80000000, DescMore=0x40000000, DescIntr=0x20000000,
+ DescNoCRC=0x10000000,
+ DescPktOK=0x08000000, RxTooLong=0x00400000,
+};
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+struct netdev_private {
+ /* Descriptor rings first for alignment. */
+ struct netdev_desc rx_ring[RX_RING_SIZE];
+ struct netdev_desc tx_ring[TX_RING_SIZE];
+ struct net_device *next_module; /* Link for devices of this type. */
+ void *priv_addr; /* Unaligned address for kfree */
+ const char *product_name;
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ /* The saved address of a sent-in-place packet/buffer, for later free(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media monitoring timer. */
+ /* Frequently used values: keep some adjacent for cache effect. */
+ int msg_level;
+ int chip_id, drv_flags;
+ struct pci_dev *pci_dev;
+ long in_interrupt; /* Word-long for SMP locks. */
+ int max_interrupt_work;
+ int intr_enable;
+ unsigned int restore_intr_enable:1; /* Set if temporarily masked. */
+ unsigned int rx_q_empty:1; /* Set out-of-skbuffs. */
+
+ struct netdev_desc *rx_head_desc;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ int rx_copybreak;
+
+ unsigned int cur_tx, dirty_tx;
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ /* These values keep track of the transceiver/media in use. */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int duplex_lock:1;
+ unsigned int medialock:1; /* Do not sense media. */
+ unsigned int default_port; /* Last dev->if_port value. */
+ /* Rx filter. */
+ u32 cur_rx_mode;
+ u16 rx_filter[32];
+ int multicast_filter_limit;
+ /* FIFO and PCI burst thresholds. */
+ int tx_config, rx_config;
+ /* MII transceiver section. */
+ u16 advertising; /* NWay media advertisement */
+};
+
+static int eeprom_read(long ioaddr, int location);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location,
+ int value);
+static int netdev_open(struct net_device *dev);
+static void check_duplex(struct net_device *dev);
+static void netdev_timer(unsigned long data);
+static void tx_timeout(struct net_device *dev);
+static int rx_ring_fill(struct net_device *dev);
+static void init_ring(struct net_device *dev);
+static int start_tx(struct sk_buff *skb, struct net_device *dev);
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
+static void netdev_error(struct net_device *dev, int intr_status);
+static int netdev_rx(struct net_device *dev);
+static void netdev_error(struct net_device *dev, int intr_status);
+static void set_rx_mode(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_close(struct net_device *dev);
+
+
+
+/* A list of our installed devices, for removing the driver module. */
+static struct net_device *root_net_dev = NULL;
+
+#ifndef MODULE
+int natsemi_probe(struct net_device *dev)
+{
+ if (pci_drv_register(&natsemi_drv_id, dev) < 0)
+ return -ENODEV;
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return 0;
+}
+#endif
+
+static void *natsemi_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int card_idx)
+{
+ struct net_device *dev;
+ struct netdev_private *np;
+ void *priv_mem;
+ int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+ int prev_eedata;
+
+ dev = init_etherdev(init_dev, 0);
+ if (!dev)
+ return NULL;
+
+ /* Perhaps NETIF_MSG_PROBE */
+ printk(KERN_INFO "%s: %s at 0x%lx, ",
+ dev->name, pci_id_tbl[chip_idx].name, ioaddr);
+
+ /* Work around the dropped serial bit. */
+ prev_eedata = eeprom_read(ioaddr, 6);
+ for (i = 0; i < 3; i++) {
+ int eedata = eeprom_read(ioaddr, i + 7);
+ dev->dev_addr[i*2] = (eedata << 1) + (prev_eedata >> 15);
+ dev->dev_addr[i*2+1] = eedata >> 7;
+ prev_eedata = eedata;
+ }
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+
+ /* Reset the chip to erase previous misconfiguration. */
+ writel(ChipReset, ioaddr + ChipCmd);
+
+ /* Make certain elements e.g. descriptor lists are aligned. */
+ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
+ /* Check for the very unlikely case of no memory. */
+ if (priv_mem == NULL)
+ return NULL;
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+ memset(np, 0, sizeof(*np));
+ np->priv_addr = priv_mem;
+
+ np->next_module = root_net_dev;
+ root_net_dev = dev;
+
+ np->pci_dev = pdev;
+ np->chip_id = chip_idx;
+ np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
+ np->msg_level = (1 << debug) - 1;
+ np->rx_copybreak = rx_copybreak;
+ np->max_interrupt_work = max_interrupt_work;
+ np->multicast_filter_limit = multicast_filter_limit;
+
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ /* 0x10/0x20/0x100/0x200 set forced speed&duplex modes. */
+ if (option > 0) {
+ if (option & 0x220)
+ np->full_duplex = 1;
+ np->default_port = option & 0x3ff;
+ if (np->default_port & 0x330) {
+ np->medialock = 1;
+ if (np->msg_level & NETIF_MSG_PROBE)
+ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
+ (option & 0x300 ? 100 : 10),
+ (np->full_duplex ? "full" : "half"));
+ writew(((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
+ (np->full_duplex ? 0x0100 : 0), /* Full duplex? */
+ ioaddr + NS_MII_BMCR);
+ }
+ }
+ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
+ np->full_duplex = 1;
+
+ if (np->full_duplex) {
+ if (np->msg_level & NETIF_MSG_PROBE)
+ printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
+ " disabled.\n", dev->name);
+ np->duplex_lock = 1;
+ }
+
+ /* The chip-specific entries in the device structure. */
+ dev->open = &netdev_open;
+ dev->hard_start_xmit = &start_tx;
+ dev->stop = &netdev_close;
+ dev->get_stats = &get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &mii_ioctl;
+
+ /* Override the PME enable from the EEPROM. */
+ writel(0x8000, ioaddr + ClkRunCtrl);
+
+ if ((readl(ioaddr + ChipConfig) & 0xe000) != 0xe000) {
+ u32 chip_config = readl(ioaddr + ChipConfig);
+ if (np->msg_level & NETIF_MSG_PROBE)
+ printk(KERN_INFO "%s: Transceiver default autonegotiation %s "
+ "10%s %s duplex.\n",
+ dev->name, chip_config & 0x2000 ? "enabled, advertise"
+ : "disabled, force", chip_config & 0x4000 ? "0" : "",
+ chip_config & 0x8000 ? "full" : "half");
+ }
+ if (np->msg_level & NETIF_MSG_PROBE)
+ printk(KERN_INFO "%s: Transceiver status 0x%4.4x partner %4.4x.\n",
+ dev->name, (int)readl(ioaddr + NS_MII_BMSR),
+ (int)readl(ioaddr + NS_MIILinkPartner));
+
+ return dev;
+}
+
+
+/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.
+ The EEPROM code is for the common 93c06/46 EEPROMs with 6 bit addresses.
+ Update to the code in other drivers for 8/10 bit addresses.
+*/
+
+/* Delay between EEPROM clock transitions.
+ This "delay" forces out buffered PCI writes, which is sufficient to meet
+ the timing requirements of most EEPROMs.
+*/
+#define eeprom_delay(ee_addr) readl(ee_addr)
+
+enum EEPROM_Ctrl_Bits {
+ EE_ShiftClk=0x04, EE_DataIn=0x01, EE_ChipSelect=0x08, EE_DataOut=0x02,
+};
+#define EE_Write0 (EE_ChipSelect)
+#define EE_Write1 (EE_ChipSelect | EE_DataIn)
+
+/* The EEPROM commands include the preamble. */
+enum EEPROM_Cmds {
+ EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
+};
+
+static int eeprom_read(long addr, int location)
+{
+ int i;
+ int retval = 0;
+ long ee_addr = addr + EECtrl;
+ int read_cmd = location | EE_ReadCmd;
+ writel(EE_Write0, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 10; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
+ writel(dataval, ee_addr);
+ eeprom_delay(ee_addr);
+ writel(dataval | EE_ShiftClk, ee_addr);
+ eeprom_delay(ee_addr);
+ }
+ writel(EE_ChipSelect, ee_addr);
+ eeprom_delay(ee_addr);
+
+ for (i = 0; i < 16; i++) {
+ writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
+ eeprom_delay(ee_addr);
+ retval |= (readl(ee_addr) & EE_DataOut) ? 1 << i : 0;
+ writel(EE_ChipSelect, ee_addr);
+ eeprom_delay(ee_addr);
+ }
+
+ /* Terminate the EEPROM access. */
+ writel(EE_Write0, ee_addr);
+ writel(0, ee_addr);
+ return retval;
+}
+
+/* MII transceiver control section.
+ The 83815 series has an internal, directly accessable transceiver.
+ We present the management registers as if they were MII connected. */
+
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ if (phy_id == 1 && location < 32)
+ return readw(dev->base_addr + NS_Xcvr_Mgmt + (location<<2));
+ else
+ return 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int location,
+ int value)
+{
+ if (phy_id == 1 && location < 32)
+ writew(value, dev->base_addr + NS_Xcvr_Mgmt + (location<<2));
+}
+
+
+static int netdev_open(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+
+ /* We do not need to reset the '815 chip. */
+
+ MOD_INC_USE_COUNT;
+
+ if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
+ dev->name, dev->irq);
+
+ init_ring(dev);
+
+ writel(virt_to_bus(np->rx_ring), ioaddr + RxRingPtr);
+ writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
+
+ for (i = 0; i < 6; i += 2) {
+ writel(i, ioaddr + RxFilterAddr);
+ writel(dev->dev_addr[i] + (dev->dev_addr[i+1] << 8),
+ ioaddr + RxFilterData);
+ }
+
+ /* Initialize other registers. */
+ /* See the datasheet for this correction. */
+ if (readl(ioaddr + ChipRevReg) == 0x0203) {
+ writew(0x0001, ioaddr + 0xCC);
+ writew(0x18C9, ioaddr + 0xE4);
+ writew(0x0000, ioaddr + 0xFC);
+ writew(0x5040, ioaddr + 0xF4);
+ writew(0x008C, ioaddr + 0xF8);
+ }
+
+ /* Configure the PCI bus bursts and FIFO thresholds. */
+ /* Configure for standard, in-spec Ethernet. */
+
+ if (readl(ioaddr + ChipConfig) & CfgFDX) { /* Full duplex */
+ np->tx_config = 0xD0801002;
+ np->rx_config = 0x10000020;
+ } else {
+ np->tx_config = 0x10801002;
+ np->rx_config = 0x0020;
+ }
+ if (dev->mtu > 1500)
+ np->rx_config |= 0x08000000;
+ writel(np->tx_config, ioaddr + TxConfig);
+ writel(np->rx_config, ioaddr + RxConfig);
+
+ if (dev->if_port == 0)
+ dev->if_port = np->default_port;
+
+ np->in_interrupt = 0;
+
+ check_duplex(dev);
+ set_rx_mode(dev);
+ netif_start_tx_queue(dev);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ np->intr_enable = IntrNormalSummary | IntrAbnormalSummary | 0x1f;
+ writel(np->intr_enable, ioaddr + IntrMask);
+ writel(1, ioaddr + IntrEnable);
+
+ writel(RxOn | TxOn, ioaddr + ChipCmd);
+ writel(4, ioaddr + StatsCtrl); /* Clear Stats */
+
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: Done netdev_open(), status: %x.\n",
+ dev->name, (int)readl(ioaddr + ChipCmd));
+
+ /* Set the timer to check for link beat. */
+ init_timer(&np->timer);
+ np->timer.expires = jiffies + 3*HZ;
+ np->timer.data = (unsigned long)dev;
+ np->timer.function = &netdev_timer; /* timer handler */
+ add_timer(&np->timer);
+
+ return 0;
+}
+
+static void check_duplex(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int duplex;
+
+ if (np->duplex_lock)
+ return;
+ duplex = readl(ioaddr + ChipConfig) & 0x20000000 ? 1 : 0;
+ if (np->full_duplex != duplex) {
+ np->full_duplex = duplex;
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: Setting %s-duplex based on negotiated link"
+ " capability.\n", dev->name,
+ duplex ? "full" : "half");
+ if (duplex) {
+ np->rx_config |= 0x10000000;
+ np->tx_config |= 0xC0000000;
+ } else {
+ np->rx_config &= ~0x10000000;
+ np->tx_config &= ~0xC0000000;
+ }
+ writel(np->tx_config, ioaddr + TxConfig);
+ writel(np->rx_config, ioaddr + RxConfig);
+ }
+}
+
+static void netdev_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 10*HZ;
+
+ if (np->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: Driver monitor timer tick, status %8.8x.\n",
+ dev->name, (int)readl(ioaddr + IntrStatus));
+ if (np->rx_q_empty) {
+ /* Trigger an interrupt to refill. */
+ writel(SoftIntr, ioaddr + ChipCmd);
+ }
+ /* This will either have a small false-trigger window or will not catch
+ tbusy incorrectly set when the queue is empty. */
+ if (netif_queue_paused(dev) &&
+ np->cur_tx - np->dirty_tx > 1 &&
+ (jiffies - dev->trans_start) > TX_TIMEOUT) {
+ tx_timeout(dev);
+ }
+ check_duplex(dev);
+ np->timer.expires = jiffies + next_tick;
+ add_timer(&np->timer);
+}
+
+static void tx_timeout(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
+ " resetting...\n", dev->name, (int)readl(ioaddr + TxRingPtr));
+
+ if (np->msg_level & NETIF_MSG_TX_ERR) {
+ int i;
+ printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(" %8.8x", (unsigned int)np->rx_ring[i].cmd_status);
+ printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" %4.4x", np->tx_ring[i].cmd_status);
+ printk("\n");
+ }
+
+ /* Reinitialize the hardware here. */
+ /* Stop and restart the chip's Tx processes . */
+
+ /* Trigger an immediate transmit demand. */
+
+ dev->trans_start = jiffies;
+ np->stats.tx_errors++;
+ return;
+}
+
+/* Refill the Rx ring buffers, returning non-zero if not full. */
+static int rx_ring_fill(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ unsigned int entry;
+
+ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
+ entry = np->dirty_rx % RX_RING_SIZE;
+ if (np->rx_skbuff[entry] == NULL) {
+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ return 1; /* Better luck next time. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ np->rx_ring[entry].buf_addr = virt_to_le32desc(skb->tail);
+ }
+ np->rx_ring[entry].cmd_status = cpu_to_le32(DescIntr | np->rx_buf_sz);
+ }
+ return 0;
+}
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+ np->tx_full = 0;
+ np->cur_rx = np->cur_tx = 0;
+ np->dirty_rx = np->dirty_tx = 0;
+
+ /* MAX(PKT_BUF_SZ, dev->mtu + 8); */
+ /* I know you _want_ to change this without understanding it. Don't. */
+ np->rx_buf_sz = (dev->mtu <= 1532 ? PKT_BUF_SZ : dev->mtu + 8);
+ np->rx_head_desc = &np->rx_ring[0];
+
+ /* Initialize all Rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].next_desc = virt_to_le32desc(&np->rx_ring[i+1]);
+ np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
+ np->rx_skbuff[i] = 0;
+ }
+ /* Mark the last entry as wrapping the ring. */
+ np->rx_ring[i-1].next_desc = virt_to_le32desc(&np->rx_ring[0]);
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_skbuff[i] = 0;
+ np->tx_ring[i].next_desc = virt_to_le32desc(&np->tx_ring[i+1]);
+ np->tx_ring[i].cmd_status = 0;
+ }
+ np->tx_ring[i-1].next_desc = virt_to_le32desc(&np->tx_ring[0]);
+
+ /* Fill in the Rx buffers.
+ Allocation failure just leaves a "negative" np->dirty_rx. */
+ np->dirty_rx = (unsigned int)(0 - RX_RING_SIZE);
+ rx_ring_fill(dev);
+
+ return;
+}
+
+static int start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ unsigned int entry;
+
+ /* Block a timer-based transmit from overlapping. This happens when
+ packets are presumed lost, and we use this check the Tx status. */
+ if (netif_pause_tx_queue(dev) != 0) {
+ /* This watchdog code is redundant with the media monitor timer. */
+ if (jiffies - dev->trans_start > TX_TIMEOUT)
+ tx_timeout(dev);
+ return 1;
+ }
+
+ /* Note: Ordering is important here, set the field with the
+ "ownership" bit last, and only then increment cur_tx.
+ No spinlock is needed for either Tx or Rx.
+ */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = np->cur_tx % TX_RING_SIZE;
+
+ np->tx_skbuff[entry] = skb;
+
+ np->tx_ring[entry].buf_addr = virt_to_le32desc(skb->data);
+ np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn|DescIntr | skb->len);
+ np->cur_tx++;
+
+ /* For some architectures explicitly flushing np->tx_ring,sizeof(tx_ring)
+ and skb->data,skb->len improves performance. */
+
+ if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
+ np->tx_full = 1;
+ /* Check for a just-cleared queue. */
+ if (np->cur_tx - (volatile unsigned int)np->dirty_tx
+ < TX_QUEUE_LEN - 4) {
+ np->tx_full = 0;
+ netif_unpause_tx_queue(dev);
+ } else
+ netif_stop_tx_queue(dev);
+ } else
+ netif_unpause_tx_queue(dev); /* Typical path */
+ /* Wake the potentially-idle transmit channel. */
+ writel(TxOn, dev->base_addr + ChipCmd);
+
+ dev->trans_start = jiffies;
+
+ if (np->msg_level & NETIF_MSG_TX_QUEUED) {
+ printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
+ dev->name, np->cur_tx, entry);
+ }
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct netdev_private *np;
+ long ioaddr;
+ int boguscnt;
+
+#ifndef final_version /* Can never occur. */
+ if (dev == NULL) {
+ printk (KERN_ERR "Netdev interrupt handler(): IRQ %d for unknown "
+ "device.\n", irq);
+ return;
+ }
+#endif
+
+ ioaddr = dev->base_addr;
+ np = (struct netdev_private *)dev->priv;
+ boguscnt = np->max_interrupt_work;
+
+ do {
+ u32 intr_status = readl(ioaddr + IntrStatus);
+
+ if (intr_status == 0 || intr_status == 0xffffffff)
+ break;
+
+ /* Acknowledge all of the current interrupt sources ASAP.
+ Nominally the read above accomplishes this, but... */
+ writel(intr_status & 0x001ffff, ioaddr + IntrStatus);
+
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
+ dev->name, intr_status);
+
+ if (intr_status & (IntrRxDone | IntrRxIntr)) {
+ netdev_rx(dev);
+ np->rx_q_empty = rx_ring_fill(dev);
+ }
+
+ if (intr_status & (IntrRxIdle | IntrDrv)) {
+ unsigned int old_dirty_rx = np->dirty_rx;
+ if (rx_ring_fill(dev) == 0)
+ np->rx_q_empty = 0;
+ /* Restart Rx engine iff we did add a buffer. */
+ if (np->dirty_rx != old_dirty_rx)
+ writel(RxOn, dev->base_addr + ChipCmd);
+ }
+
+ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+ int entry = np->dirty_tx % TX_RING_SIZE;
+ int tx_status = le32_to_cpu(np->tx_ring[entry].cmd_status);
+ if (tx_status & DescOwn)
+ break;
+ if (np->msg_level & NETIF_MSG_TX_DONE)
+ printk(KERN_DEBUG "%s: Transmit done, Tx status %8.8x.\n",
+ dev->name, tx_status);
+ if (tx_status & 0x08000000) {
+ np->stats.tx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+ np->stats.tx_bytes += np->tx_skbuff[entry]->len;
+#endif
+ } else { /* Various Tx errors */
+ if (np->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+ dev->name, tx_status);
+ if (tx_status & 0x04010000) np->stats.tx_aborted_errors++;
+ if (tx_status & 0x02000000) np->stats.tx_fifo_errors++;
+ if (tx_status & 0x01000000) np->stats.tx_carrier_errors++;
+ if (tx_status & 0x00200000) np->stats.tx_window_errors++;
+ np->stats.tx_errors++;
+ }
+ /* Free the original skb. */
+ dev_free_skb_irq(np->tx_skbuff[entry]);
+ np->tx_skbuff[entry] = 0;
+ }
+ /* Note the 4 slot hysteresis to mark the queue non-full. */
+ if (np->tx_full
+ && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
+ /* The ring is no longer full, allow new TX entries. */
+ np->tx_full = 0;
+ netif_resume_tx_queue(dev);
+ }
+
+ /* Abnormal error summary/uncommon events handlers. */
+ if (intr_status & IntrAbnormalSummary)
+ netdev_error(dev, intr_status);
+
+ if (--boguscnt < 0) {
+ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "status=0x%4.4x.\n",
+ dev->name, intr_status);
+ np->restore_intr_enable = 1;
+ break;
+ }
+ } while (1);
+
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, (int)readl(ioaddr + IntrStatus));
+
+ return;
+}
+
+/* This routine is logically part of the interrupt handler, but separated
+ for clarity and better register allocation. */
+static int netdev_rx(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int entry = np->cur_rx % RX_RING_SIZE;
+ int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
+ s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
+
+ /* If the driver owns the next entry it's a new packet. Send it up. */
+ while (desc_status < 0) { /* e.g. & DescOwn */
+ if (np->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " In netdev_rx() entry %d status was %8.8x.\n",
+ entry, desc_status);
+ if (--boguscnt < 0)
+ break;
+ if ((desc_status & (DescMore|DescPktOK|RxTooLong)) != DescPktOK) {
+ if (desc_status & DescMore) {
+ printk(KERN_WARNING "%s: Oversized(?) Ethernet frame spanned "
+ "multiple buffers, entry %#x status %x.\n",
+ dev->name, np->cur_rx, desc_status);
+ np->stats.rx_length_errors++;
+ } else {
+ /* There was a error. */
+ if (np->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
+ desc_status);
+ np->stats.rx_errors++;
+ if (desc_status & 0x06000000) np->stats.rx_over_errors++;
+ if (desc_status & 0x00600000) np->stats.rx_length_errors++;
+ if (desc_status & 0x00140000) np->stats.rx_frame_errors++;
+ if (desc_status & 0x00080000) np->stats.rx_crc_errors++;
+ }
+ } else {
+ struct sk_buff *skb;
+ int pkt_len = (desc_status & 0x0fff) - 4; /* Omit CRC size. */
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < np->rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+#if defined(HAS_IP_COPYSUM) || (LINUX_VERSION_CODE >= 0x20100)
+ eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+#else
+ memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
+ pkt_len);
+#endif
+ } else {
+ skb_put(skb = np->rx_skbuff[entry], pkt_len);
+ np->rx_skbuff[entry] = NULL;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ /* W/ hardware checksum: skb->ip_summed = CHECKSUM_UNNECESSARY; */
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ np->stats.rx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+ np->stats.rx_bytes += pkt_len;
+#endif
+ }
+ entry = (++np->cur_rx) % RX_RING_SIZE;
+ np->rx_head_desc = &np->rx_ring[entry];
+ desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
+ }
+
+ /* Refill is now done in the main interrupt loop. */
+ return 0;
+}
+
+static void netdev_error(struct net_device *dev, int intr_status)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (intr_status & LinkChange) {
+ int chip_config = readl(ioaddr + ChipConfig);
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_NOTICE "%s: Link changed: Autonegotiation advertising"
+ " %4.4x partner %4.4x.\n", dev->name,
+ (int)readw(ioaddr + NS_MII_Advert),
+ (int)readw(ioaddr + NS_MIILinkPartner));
+ if (chip_config & CfgLinkGood)
+ netif_link_up(dev);
+ else
+ netif_link_down(dev);
+ check_duplex(dev);
+ }
+ if (intr_status & StatsMax) {
+ get_stats(dev);
+ }
+ if (intr_status & IntrTxUnderrun) {
+ /* Increase the Tx threshold, 32 byte units. */
+ if ((np->tx_config & 0x3f) < 62)
+ np->tx_config += 2; /* +64 bytes */
+ writel(np->tx_config, ioaddr + TxConfig);
+ }
+ if (intr_status & WOLPkt) {
+ int wol_status = readl(ioaddr + WOLCmd);
+ printk(KERN_NOTICE "%s: Link wake-up event %8.8x",
+ dev->name, wol_status);
+ }
+ if (intr_status & (RxStatusOverrun | IntrRxOverrun)) {
+ if (np->msg_level & NETIF_MSG_DRV)
+ printk(KERN_ERR "%s: Rx overflow! ns815 %8.8x.\n",
+ dev->name, intr_status);
+ np->stats.rx_fifo_errors++;
+ }
+ if (intr_status & ~(LinkChange|StatsMax|RxResetDone|TxResetDone|
+ RxStatusOverrun|0xA7ff)) {
+ if (np->msg_level & NETIF_MSG_DRV)
+ printk(KERN_ERR "%s: Something Wicked happened! natsemi %8.8x.\n",
+ dev->name, intr_status);
+ }
+ /* Hmmmmm, it's not clear how to recover from PCI faults. */
+ if (intr_status & IntrPCIErr) {
+ np->stats.tx_fifo_errors++;
+ np->stats.rx_fifo_errors++;
+ }
+}
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int crc_errs = readl(ioaddr + RxCRCErrs);
+
+ if (crc_errs != 0xffffffff) {
+ /* We need not lock this segment of code for SMP.
+ There is no atomic-add vulnerability for most CPUs,
+ and statistics are non-critical. */
+ /* The chip only need report frame silently dropped. */
+ np->stats.rx_crc_errors += crc_errs;
+ np->stats.rx_missed_errors += readl(ioaddr + RxMissed);
+ }
+
+ return &np->stats;
+}
+
+/* The big-endian AUTODIN II ethernet CRC calculations.
+ See ns820.c for how to fill the table on new chips.
+ */
+static unsigned const ethernet_polynomial = 0x04c11db7U;
+static inline u32 ether_crc(int length, unsigned char *data)
+{
+ int crc = -1;
+
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 0; bit < 8; bit++, current_octet >>= 1)
+ crc = (crc << 1) ^
+ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
+ }
+ return crc;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ u8 mc_filter[64]; /* Multicast hash filter */
+ u32 rx_mode;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+ rx_mode = AcceptBroadcast | AcceptAllMulticast | AcceptAllPhys
+ | AcceptMyPhys;
+ } else if ((dev->mc_count > np->multicast_filter_limit)
+ || (dev->flags & IFF_ALLMULTI)) {
+ rx_mode = AcceptBroadcast | AcceptAllMulticast | AcceptMyPhys;
+ } else {
+ struct dev_mc_list *mclist;
+ int i;
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ int filterbit = ether_crc(ETH_ALEN, mclist->dmi_addr);
+ set_bit(filterbit & 0x1ff, mc_filter);
+ if (np->msg_level & NETIF_MSG_RXFILTER)
+ printk(KERN_INFO "%s: Added filter for %2.2x:%2.2x:%2.2x:"
+ "%2.2x:%2.2x:%2.2x crc %8.8x bit %d.\n", dev->name,
+ mclist->dmi_addr[0], mclist->dmi_addr[1],
+ mclist->dmi_addr[2], mclist->dmi_addr[3],
+ mclist->dmi_addr[4], mclist->dmi_addr[5],
+ filterbit, filterbit & 0x1ff);
+ }
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ for (i = 0; i < 64; i += 2) {
+ u16 filterword = (mc_filter[i+1]<<8) + mc_filter[i];
+ if (filterword != np->rx_filter[i>>2]) {
+ writel(0x200 + i, ioaddr + RxFilterAddr);
+ writel(filterword, ioaddr + RxFilterData);
+ np->rx_filter[i>>2] = filterword;
+ }
+ }
+ }
+ writel(rx_mode, ioaddr + RxFilterAddr);
+ np->cur_rx_mode = rx_mode;
+}
+
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ u16 *data = (u16 *)&rq->ifr_data;
+ u32 *data32 = (void *)&rq->ifr_data;
+
+ switch(cmd) {
+ case 0x8947: case 0x89F0:
+ /* SIOCGMIIPHY: Get the address of the PHY in use. */
+ data[0] = 1;
+ /* Fall Through */
+ case 0x8948: case 0x89F1:
+ /* SIOCGMIIREG: Read the specified MII register. */
+ data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
+ return 0;
+ case 0x8949: case 0x89F2:
+ /* SIOCSMIIREG: Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (data[0] == 1) {
+ u16 miireg = data[1] & 0x1f;
+ u16 value = data[2];
+ mdio_write(dev, 1, miireg, value);
+ switch (miireg) {
+ case 0:
+ /* Check for autonegotiation on or reset. */
+ np->duplex_lock = (value & 0x9000) ? 0 : 1;
+ if (np->duplex_lock)
+ np->full_duplex = (value & 0x0100) ? 1 : 0;
+ break;
+ case 4: np->advertising = value; break;
+ }
+ }
+ return 0;
+ case SIOCGPARAMS:
+ data32[0] = np->msg_level;
+ data32[1] = np->multicast_filter_limit;
+ data32[2] = np->max_interrupt_work;
+ data32[3] = np->rx_copybreak;
+ return 0;
+ case SIOCSPARAMS:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ np->msg_level = data32[0];
+ np->multicast_filter_limit = data32[1];
+ np->max_interrupt_work = data32[2];
+ np->rx_copybreak = data32[3];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int netdev_close(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+ netif_stop_tx_queue(dev);
+
+ if (np->msg_level & NETIF_MSG_IFDOWN) {
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x "
+ "Int %2.2x.\n",
+ dev->name, (int)readl(ioaddr + ChipCmd),
+ (int)readl(ioaddr + IntrStatus));
+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
+ dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
+ }
+
+ /* We don't want the timer to re-start anything. */
+ del_timer(&np->timer);
+
+ /* Disable interrupts using the mask. */
+ writel(0, ioaddr + IntrMask);
+ writel(0, ioaddr + IntrEnable);
+ writel(2, ioaddr + StatsCtrl); /* Freeze Stats */
+
+ /* Stop the chip's Tx and Rx processes. */
+ writel(RxOff | TxOff, ioaddr + ChipCmd);
+
+ get_stats(dev);
+
+#ifdef __i386__
+ if (np->msg_level & NETIF_MSG_IFDOWN) {
+ printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
+ (int)virt_to_bus(np->tx_ring));
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" #%d desc. %8.8x %8.8x.\n",
+ i, np->tx_ring[i].cmd_status, (u32)np->tx_ring[i].buf_addr);
+ printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
+ (int)virt_to_bus(np->rx_ring));
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ printk(KERN_DEBUG " #%d desc. %8.8x %8.8x\n",
+ i, np->rx_ring[i].cmd_status, (u32)np->rx_ring[i].buf_addr);
+ }
+ }
+#endif /* __i386__ debugging only */
+
+ free_irq(dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].cmd_status = 0;
+ np->rx_ring[i].buf_addr = 0xBADF00D0; /* An invalid address. */
+ if (np->rx_skbuff[i]) {
+#if LINUX_VERSION_CODE < 0x20100
+ np->rx_skbuff[i]->free = 1;
+#endif
+ dev_free_skb(np->rx_skbuff[i]);
+ }
+ np->rx_skbuff[i] = 0;
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (np->tx_skbuff[i])
+ dev_free_skb(np->tx_skbuff[i]);
+ np->tx_skbuff[i] = 0;
+ }
+
+#if 0
+ writel(0x0200, ioaddr + ChipConfig); /* Power down Xcvr. */
+#endif
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static int power_event(void *dev_instance, int event)
+{
+ struct net_device *dev = dev_instance;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
+ switch(event) {
+ case DRV_ATTACH:
+ MOD_INC_USE_COUNT;
+ break;
+ case DRV_SUSPEND:
+ /* Disable interrupts, freeze stats, stop Tx and Rx. */
+ writel(0, ioaddr + IntrEnable);
+ writel(2, ioaddr + StatsCtrl);
+ writel(RxOff | TxOff, ioaddr + ChipCmd);
+ break;
+ case DRV_RESUME:
+ /* This is incomplete: the open() actions should be repeated. */
+ set_rx_mode(dev);
+ writel(np->intr_enable, ioaddr + IntrEnable);
+ writel(1, ioaddr + IntrEnable);
+ writel(RxOn | TxOn, ioaddr + ChipCmd);
+ break;
+ case DRV_DETACH: {
+ struct net_device **devp, **next;
+ if (dev->flags & IFF_UP) {
+ /* Some, but not all, kernel versions close automatically. */
+ dev_close(dev);
+ dev->flags &= ~(IFF_UP|IFF_RUNNING);
+ }
+ unregister_netdev(dev);
+ release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);
+ for (devp = &root_net_dev; *devp; devp = next) {
+ next = &((struct netdev_private *)(*devp)->priv)->next_module;
+ if (*devp == dev) {
+ *devp = *next;
+ break;
+ }
+ }
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(dev);
+ MOD_DEC_USE_COUNT;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+
+#ifdef MODULE
+int init_module(void)
+{
+ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+#ifdef CARDBUS
+ register_driver(&etherdev_ops);
+ return 0;
+#else
+ return pci_drv_register(&natsemi_drv_id, NULL);
+#endif
+}
+
+void cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+#ifdef CARDBUS
+ unregister_driver(&etherdev_ops);
+#else
+ pci_drv_unregister(&natsemi_drv_id);
+#endif
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_net_dev) {
+ struct netdev_private *np = (void *)(root_net_dev->priv);
+ unregister_netdev(root_net_dev);
+ iounmap((char *)root_net_dev->base_addr);
+ next_dev = np->next_module;
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(root_net_dev);
+ root_net_dev = next_dev;
+ }
+}
+
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "make KERNVER=`uname -r` natsemi.o"
+ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c natsemi.c"
+ * simple-compile-command: "gcc -DMODULE -O6 -c natsemi.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/ne.c b/linux/src/drivers/net/ne.c
new file mode 100644
index 0000000..ea2f929
--- /dev/null
+++ b/linux/src/drivers/net/ne.c
@@ -0,0 +1,812 @@
+/* ne.c: A general non-shared-memory NS8390 ethernet driver for linux. */
+/*
+ Written 1992-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This driver should work with many programmed-I/O 8390-based ethernet
+ boards. Currently it supports the NE1000, NE2000, many clones,
+ and some Cabletron products.
+
+ Changelog:
+
+ Paul Gortmaker : use ENISR_RDC to monitor Tx PIO uploads, made
+ sanity checks and bad clone support optional.
+ Paul Gortmaker : new reset code, reset card after probe at boot.
+ Paul Gortmaker : multiple card support for module users.
+ Paul Gortmaker : Support for PCI ne2k clones, similar to lance.c
+ Paul Gortmaker : Allow users with bad cards to avoid full probe.
+ Paul Gortmaker : PCI probe changes, more PCI cards supported.
+
+*/
+
+/* Routines for the NatSemi-based designs (NE[12]000). */
+
+static const char *version =
+ "ne.c:v1.10 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/bios32.h>
+#include <asm/system.h>
+#include <asm/io.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include "8390.h"
+
+/* Some defines that people can play with if so inclined. */
+
+/* Do we support clones that don't adhere to 14,15 of the SAprom ? */
+#define SUPPORT_NE_BAD_CLONES
+
+/* Do we perform extra sanity checks on stuff ? */
+/* #define NE_SANITY_CHECK */
+
+/* Do we implement the read before write bugfix ? */
+/* #define NE_RW_BUGFIX */
+
+/* Do we have a non std. amount of memory? (in units of 256 byte pages) */
+/* #define PACKETBUF_MEMSIZE 0x40 */
+
+#if defined(HAVE_DEVLIST) || !defined(MODULE)
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int netcard_portlist[] =
+{ 0x300, 0x280, 0x320, 0x340, 0x360, 0};
+#endif /* defined(HAVE_DEVLIST) || !defined(MODULE) */
+
+#ifdef CONFIG_PCI
+/* Ack! People are making PCI ne2000 clones! Oh the horror, the horror... */
+static struct { unsigned short vendor, dev_id;}
+pci_clone_list[] = {
+ {PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8029},
+ {PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_89C940},
+ {PCI_VENDOR_ID_COMPEX, PCI_DEVICE_ID_COMPEX_RL2000},
+ {PCI_VENDOR_ID_KTI, PCI_DEVICE_ID_KTI_ET32P2},
+ {PCI_VENDOR_ID_NETVIN, PCI_DEVICE_ID_NETVIN_NV5000SC},
+ {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C926},
+ {0,}
+};
+#endif
+
+#ifdef SUPPORT_NE_BAD_CLONES
+/* A list of bad clones that we none-the-less recognize. */
+static struct { const char *name8, *name16; unsigned char SAprefix[4];}
+bad_clone_list[] = {
+ {"DE100", "DE200", {0x00, 0xDE, 0x01,}},
+ {"DE120", "DE220", {0x00, 0x80, 0xc8,}},
+ {"DFI1000", "DFI2000", {'D', 'F', 'I',}}, /* Original, eh? */
+ {"EtherNext UTP8", "EtherNext UTP16", {0x00, 0x00, 0x79}},
+ {"NE1000","NE2000-invalid", {0x00, 0x00, 0xd8}}, /* Ancient real NE1000. */
+ {"NN1000", "NN2000", {0x08, 0x03, 0x08}}, /* Outlaw no-name clone. */
+ {"4-DIM8","4-DIM16", {0x00,0x00,0x4d,}}, /* Outlaw 4-Dimension cards. */
+ {"Con-Intl_8", "Con-Intl_16", {0x00, 0x00, 0x24}}, /* Connect Int'nl */
+ {"ET-100","ET-200", {0x00, 0x45, 0x54}}, /* YANG and YA clone */
+ {"COMPEX","COMPEX16",{0x00,0x80,0x48}}, /* Broken ISA Compex cards */
+ {"E-LAN100", "E-LAN200", {0x00, 0x00, 0x5d}}, /* Broken ne1000 clones */
+ {"RealTek 8029", "RealTek 8029", {0x00, 0x3e, 0x4d}}, /* RealTek PCI cards */
+ {0,}
+};
+#endif
+
+/* ---- No user-serviceable parts below ---- */
+
+#define NE_BASE (dev->base_addr)
+#define NE_CMD 0x00
+#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */
+#define NE_RESET 0x1f /* Issue a read to reset, a write to clear. */
+#define NE_IO_EXTENT 0x20
+
+#define NE1SM_START_PG 0x20 /* First page of TX buffer */
+#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */
+#define NESM_START_PG 0x40 /* First page of TX buffer */
+#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+/* Non-zero only if the current card is a PCI with BIOS-set IRQ. */
+static unsigned char pci_irq_line = 0;
+
+int ne_probe(struct device *dev);
+#ifdef CONFIG_PCI
+static int ne_probe_pci(struct device *dev);
+#endif
+static int ne_probe1(struct device *dev, int ioaddr);
+
+static int ne_open(struct device *dev);
+static int ne_close(struct device *dev);
+
+static void ne_reset_8390(struct device *dev);
+static void ne_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void ne_block_input(struct device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void ne_block_output(struct device *dev, const int count,
+ const unsigned char *buf, const int start_page);
+
+
+/* Probe for various non-shared-memory ethercards.
+
+ NEx000-clone boards have a Station Address PROM (SAPROM) in the packet
+ buffer memory space. NE2000 clones have 0x57,0x57 in bytes 0x0e,0x0f of
+ the SAPROM, while other supposed NE2000 clones must be detected by their
+ SA prefix.
+
+ Reading the SAPROM from a word-wide card with the 8390 set in byte-wide
+ mode results in doubled values, which can be detected and compensated for.
+
+ The probe is also responsible for initializing the card and filling
+ in the 'dev' and 'ei_status' structures.
+
+ We use the minimum memory size for some ethercard product lines, iff we can't
+ distinguish models. You can increase the packet buffer size by setting
+ PACKETBUF_MEMSIZE. Reported Cabletron packet buffer locations are:
+ E1010 starts at 0x100 and ends at 0x2000.
+ E1010-x starts at 0x100 and ends at 0x8000. ("-x" means "more memory")
+ E2010 starts at 0x100 and ends at 0x4000.
+ E2010-x starts at 0x100 and ends at 0xffff. */
+
+#ifdef HAVE_DEVLIST
+struct netdev_entry netcard_drv =
+{"ne", ne_probe1, NE_IO_EXTENT, netcard_portlist};
+#else
+
+/* Note that this probe only picks up one card at a time, even for multiple
+ PCI ne2k cards. Use "ether=0,0,eth1" if you have a second PCI ne2k card.
+ This keeps things consistent regardless of the bus type of the card. */
+
+int ne_probe(struct device *dev)
+{
+#ifndef MODULE
+ int i;
+#endif /* MODULE */
+ int base_addr = dev ? dev->base_addr : 0;
+
+ /* First check any supplied i/o locations. User knows best. <cough> */
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return ne_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+#ifdef CONFIG_PCI
+ /* Then look for any installed PCI clones */
+ if (pcibios_present() && (ne_probe_pci(dev) == 0))
+ return 0;
+#endif
+
+#ifndef MODULE
+ /* Last resort. The semi-risky ISA auto-probe. */
+ for (i = 0; netcard_portlist[i]; i++) {
+ int ioaddr = netcard_portlist[i];
+ if (check_region(ioaddr, NE_IO_EXTENT))
+ continue;
+ if (ne_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+#endif
+
+ return ENODEV;
+}
+#endif
+
+#ifdef CONFIG_PCI
+static int ne_probe_pci(struct device *dev)
+{
+ int i;
+
+ for (i = 0; pci_clone_list[i].vendor != 0; i++) {
+ unsigned char pci_bus, pci_device_fn;
+ unsigned int pci_ioaddr;
+ u16 pci_command, new_command;
+ int pci_index;
+
+ for (pci_index = 0; pci_index < 8; pci_index++) {
+ if (pcibios_find_device (pci_clone_list[i].vendor,
+ pci_clone_list[i].dev_id, pci_index,
+ &pci_bus, &pci_device_fn) != 0)
+ break; /* No more of these type of cards */
+ pcibios_read_config_dword(pci_bus, pci_device_fn,
+ PCI_BASE_ADDRESS_0, &pci_ioaddr);
+ /* Strip the I/O address out of the returned value */
+ pci_ioaddr &= PCI_BASE_ADDRESS_IO_MASK;
+ /* Avoid already found cards from previous calls */
+ if (check_region(pci_ioaddr, NE_IO_EXTENT))
+ continue;
+ pcibios_read_config_byte(pci_bus, pci_device_fn,
+ PCI_INTERRUPT_LINE, &pci_irq_line);
+ break; /* Beauty -- got a valid card. */
+ }
+ if (pci_irq_line == 0) continue; /* Try next PCI ID */
+ printk("ne.c: PCI BIOS reports NE 2000 clone at i/o %#x, irq %d.\n",
+ pci_ioaddr, pci_irq_line);
+
+ pcibios_read_config_word(pci_bus, pci_device_fn,
+ PCI_COMMAND, &pci_command);
+
+ /* Activate the card: fix for brain-damaged Win98 BIOSes. */
+ new_command = pci_command | PCI_COMMAND_IO;
+ if (pci_command != new_command) {
+ printk(KERN_INFO " The PCI BIOS has not enabled this"
+ " NE2k clone! Updating PCI command %4.4x->%4.4x.\n",
+ pci_command, new_command);
+ pcibios_write_config_word(pci_bus, pci_device_fn,
+ PCI_COMMAND, new_command);
+ }
+
+ if (ne_probe1(dev, pci_ioaddr) != 0) { /* Shouldn't happen. */
+ printk(KERN_ERR "ne.c: Probe of PCI card at %#x failed.\n", pci_ioaddr);
+ pci_irq_line = 0;
+ return -ENXIO;
+ }
+ pci_irq_line = 0;
+ return 0;
+ }
+ return -ENODEV;
+}
+#endif /* CONFIG_PCI */
+
+static int ne_probe1(struct device *dev, int ioaddr)
+{
+ int i;
+ unsigned char SA_prom[32];
+ int wordlength = 2;
+ const char *name = NULL;
+ int start_page, stop_page;
+ int neX000, ctron, bad_card;
+ int reg0 = inb_p(ioaddr);
+ static unsigned version_printed = 0;
+
+ if (reg0 == 0xFF)
+ return ENODEV;
+
+ /* Do a preliminary verification that we have a 8390. */
+ { int regd;
+ outb_p(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD);
+ regd = inb_p(ioaddr + 0x0d);
+ outb_p(0xff, ioaddr + 0x0d);
+ outb_p(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD);
+ inb_p(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */
+ if (inb_p(ioaddr + EN0_COUNTER0) != 0) {
+ outb_p(reg0, ioaddr);
+ outb_p(regd, ioaddr + 0x0d); /* Restore the old values. */
+ return ENODEV;
+ }
+ }
+
+ /* We should have a "dev" from Space.c or the static module table. */
+ if (dev == NULL) {
+ printk(KERN_ERR "ne.c: Passed a NULL device.\n");
+ dev = init_etherdev(0, 0);
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk("%s", version);
+
+ printk("NE*000 ethercard probe at %#3x:", ioaddr);
+
+ /* A user with a poor card that fails to ack the reset, or that
+ does not have a valid 0x57,0x57 signature can still use this
+ without having to recompile. Specifying an i/o address along
+ with an otherwise unused dev->mem_end value of "0xBAD" will
+ cause the driver to skip these parts of the probe. */
+
+ bad_card = ((dev->base_addr != 0) && (dev->mem_end == 0xbad));
+
+ /* Reset card. Who knows what dain-bramaged state it was left in. */
+ { unsigned long reset_start_time = jiffies;
+
+ /* DON'T change these to inb_p/outb_p or reset will fail on clones. */
+ outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET);
+
+ while ((inb_p(ioaddr + EN0_ISR) & ENISR_RESET) == 0)
+ if (jiffies - reset_start_time > 2*HZ/100) {
+ if (bad_card) {
+ printk(" (warning: no reset ack)");
+ break;
+ } else {
+ printk(" not found (no reset ack).\n");
+ return ENODEV;
+ }
+ }
+
+ outb_p(0xff, ioaddr + EN0_ISR); /* Ack all intr. */
+ }
+
+ /* Read the 16 bytes of station address PROM.
+ We must first initialize registers, similar to NS8390_init(eifdev, 0).
+ We can't reliably read the SAPROM address without this.
+ (I learned the hard way!). */
+ {
+ struct {unsigned char value, offset; } program_seq[] = {
+ {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/
+ {0x48, EN0_DCFG}, /* Set byte-wide (0x48) access. */
+ {0x00, EN0_RCNTLO}, /* Clear the count regs. */
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_IMR}, /* Mask completion irq. */
+ {0xFF, EN0_ISR},
+ {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */
+ {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */
+ {32, EN0_RCNTLO},
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */
+ {0x00, EN0_RSARHI},
+ {E8390_RREAD+E8390_START, E8390_CMD},
+ };
+ for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++)
+ outb_p(program_seq[i].value, ioaddr + program_seq[i].offset);
+
+ }
+ for(i = 0; i < 32 /*sizeof(SA_prom)*/; i+=2) {
+ SA_prom[i] = inb(ioaddr + NE_DATAPORT);
+ SA_prom[i+1] = inb(ioaddr + NE_DATAPORT);
+ if (SA_prom[i] != SA_prom[i+1])
+ wordlength = 1;
+ }
+
+ /* At this point, wordlength *only* tells us if the SA_prom is doubled
+ up or not because some broken PCI cards don't respect the byte-wide
+ request in program_seq above, and hence don't have doubled up values.
+ These broken cards would otherwise be detected as an ne1000. */
+
+ if (wordlength == 2)
+ for (i = 0; i < 16; i++)
+ SA_prom[i] = SA_prom[i+i];
+
+ if (pci_irq_line || ioaddr >= 0x400)
+ wordlength = 2; /* Catch broken PCI cards mentioned above. */
+
+ if (wordlength == 2) {
+ /* We must set the 8390 for word mode. */
+ outb_p(0x49, ioaddr + EN0_DCFG);
+ start_page = NESM_START_PG;
+ stop_page = NESM_STOP_PG;
+ } else {
+ start_page = NE1SM_START_PG;
+ stop_page = NE1SM_STOP_PG;
+ }
+
+ neX000 = (SA_prom[14] == 0x57 && SA_prom[15] == 0x57);
+ ctron = (SA_prom[0] == 0x00 && SA_prom[1] == 0x00 && SA_prom[2] == 0x1d);
+
+ /* Set up the rest of the parameters. */
+ if (neX000 || bad_card) {
+ name = (wordlength == 2) ? "NE2000" : "NE1000";
+ } else if (ctron) {
+ name = (wordlength == 2) ? "Ctron-8" : "Ctron-16";
+ start_page = 0x01;
+ stop_page = (wordlength == 2) ? 0x40 : 0x20;
+ } else {
+#ifdef SUPPORT_NE_BAD_CLONES
+ /* Ack! Well, there might be a *bad* NE*000 clone there.
+ Check for total bogus addresses. */
+ for (i = 0; bad_clone_list[i].name8; i++) {
+ if (SA_prom[0] == bad_clone_list[i].SAprefix[0] &&
+ SA_prom[1] == bad_clone_list[i].SAprefix[1] &&
+ SA_prom[2] == bad_clone_list[i].SAprefix[2]) {
+ if (wordlength == 2) {
+ name = bad_clone_list[i].name16;
+ } else {
+ name = bad_clone_list[i].name8;
+ }
+ break;
+ }
+ }
+ if (bad_clone_list[i].name8 == NULL) {
+ printk(" not found (invalid signature %2.2x %2.2x).\n",
+ SA_prom[14], SA_prom[15]);
+ return ENXIO;
+ }
+#else
+ printk(" not found.\n");
+ return ENXIO;
+#endif
+
+ }
+
+ if (pci_irq_line)
+ dev->irq = pci_irq_line;
+
+ if (dev->irq < 2) {
+ autoirq_setup(0);
+ outb_p(0x50, ioaddr + EN0_IMR); /* Enable one interrupt. */
+ outb_p(0x00, ioaddr + EN0_RCNTLO);
+ outb_p(0x00, ioaddr + EN0_RCNTHI);
+ outb_p(E8390_RREAD+E8390_START, ioaddr); /* Trigger it... */
+ outb_p(0x00, ioaddr + EN0_IMR); /* Mask it again. */
+ dev->irq = autoirq_report(0);
+ if (ei_debug > 2)
+ printk(" autoirq is %d\n", dev->irq);
+ } else if (dev->irq == 2)
+ /* Fixup for users that don't know that IRQ 2 is really IRQ 9,
+ or don't know which one to set. */
+ dev->irq = 9;
+
+ if (! dev->irq) {
+ printk(" failed to detect IRQ line.\n");
+ return EAGAIN;
+ }
+
+ /* Snarf the interrupt now. There's no point in waiting since we cannot
+ share (with ISA cards) and the board will usually be enabled. */
+ {
+ int irqval = request_irq(dev->irq, ei_interrupt,
+ pci_irq_line ? SA_SHIRQ : 0, name, dev);
+ if (irqval) {
+ printk (" unable to get IRQ %d (irqval=%d).\n", dev->irq, irqval);
+ return EAGAIN;
+ }
+ }
+
+ dev->base_addr = ioaddr;
+
+ /* Allocate dev->priv and fill in 8390 specific dev fields. */
+ if (ethdev_init(dev)) {
+ printk (" unable to get memory for dev->priv.\n");
+ free_irq(dev->irq, NULL);
+ return -ENOMEM;
+ }
+
+ request_region(ioaddr, NE_IO_EXTENT, name);
+
+ for(i = 0; i < ETHER_ADDR_LEN; i++) {
+ printk(" %2.2x", SA_prom[i]);
+ dev->dev_addr[i] = SA_prom[i];
+ }
+
+ printk("\n%s: %s found at %#x, using IRQ %d.\n",
+ dev->name, name, ioaddr, dev->irq);
+
+ ei_status.name = name;
+ ei_status.tx_start_page = start_page;
+ ei_status.stop_page = stop_page;
+ ei_status.word16 = (wordlength == 2);
+
+ ei_status.rx_start_page = start_page + TX_PAGES;
+#ifdef PACKETBUF_MEMSIZE
+ /* Allow the packet buffer size to be overridden by know-it-alls. */
+ ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE;
+#endif
+
+ ei_status.reset_8390 = &ne_reset_8390;
+ ei_status.block_input = &ne_block_input;
+ ei_status.block_output = &ne_block_output;
+ ei_status.get_8390_hdr = &ne_get_8390_hdr;
+ dev->open = &ne_open;
+ dev->stop = &ne_close;
+ NS8390_init(dev, 0);
+ return 0;
+}
+
+static int
+ne_open(struct device *dev)
+{
+ ei_open(dev);
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static int
+ne_close(struct device *dev)
+{
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+ ei_close(dev);
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+/* Hard reset the card. This used to pause for the same period that a
+ 8390 reset command required, but that shouldn't be necessary. */
+static void
+ne_reset_8390(struct device *dev)
+{
+ unsigned long reset_start_time = jiffies;
+
+ if (ei_debug > 1) printk("resetting the 8390 t=%ld...", jiffies);
+
+ /* DON'T change these to inb_p/outb_p or reset will fail on clones. */
+ outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
+
+ ei_status.txing = 0;
+ ei_status.dmaing = 0;
+
+ /* This check _should_not_ be necessary, omit eventually. */
+ while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
+ if (jiffies - reset_start_time > 2*HZ/100) {
+ printk("%s: ne_reset_8390() did not complete.\n", dev->name);
+ break;
+ }
+ outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void
+ne_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+
+ int nic_base = dev->base_addr;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne_get_8390_hdr "
+ "[DMAstat:%d][irqlock:%d][intr:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock,
+ dev->interrupt);
+ return;
+ }
+
+ ei_status.dmaing |= 0x01;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
+ outb_p(0, nic_base + EN0_RCNTHI);
+ outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */
+ outb_p(ring_page, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+
+ if (ei_status.word16)
+ insw(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
+ else
+ insb(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr));
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+/* Block input and output, similar to the Crynwr packet driver. If you
+ are porting to a new ethercard, look at the packet driver source for hints.
+ The NEx000 doesn't share the on-board packet memory -- you have to put
+ the packet out through the "remote DMA" dataport using outb. */
+
+static void
+ne_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+#ifdef NE_SANITY_CHECK
+ int xfer_count = count;
+#endif
+ int nic_base = dev->base_addr;
+ char *buf = skb->data;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne_block_input "
+ "[DMAstat:%d][irqlock:%d][intr:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock,
+ dev->interrupt);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ outb_p(count & 0xff, nic_base + EN0_RCNTLO);
+ outb_p(count >> 8, nic_base + EN0_RCNTHI);
+ outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
+ outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+ if (ei_status.word16) {
+ insw(NE_BASE + NE_DATAPORT,buf,count>>1);
+ if (count & 0x01) {
+ buf[count-1] = inb(NE_BASE + NE_DATAPORT);
+#ifdef NE_SANITY_CHECK
+ xfer_count++;
+#endif
+ }
+ } else {
+ insb(NE_BASE + NE_DATAPORT, buf, count);
+ }
+
+#ifdef NE_SANITY_CHECK
+ /* This was for the ALPHA version only, but enough people have
+ been encountering problems so it is still here. If you see
+ this message you either 1) have a slightly incompatible clone
+ or 2) have noise/speed problems with your bus. */
+ if (ei_debug > 1) { /* DMA termination address check... */
+ int addr, tries = 20;
+ do {
+ /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here
+ -- it's broken for Rx on some cards! */
+ int high = inb_p(nic_base + EN0_RSARHI);
+ int low = inb_p(nic_base + EN0_RSARLO);
+ addr = (high << 8) + low;
+ if (((ring_offset + xfer_count) & 0xff) == low)
+ break;
+ } while (--tries > 0);
+ if (tries <= 0)
+ printk("%s: RX transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ dev->name, ring_offset + xfer_count, addr);
+ }
+#endif
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+static void
+ne_block_output(struct device *dev, int count,
+ const unsigned char *buf, const int start_page)
+{
+ int nic_base = NE_BASE;
+ unsigned long dma_start;
+#ifdef NE_SANITY_CHECK
+ int retries = 0;
+#endif
+
+ /* Round the count up for word writes. Do we need to do this?
+ What effect will an odd byte count have on the 8390?
+ I should check someday. */
+ if (ei_status.word16 && (count & 0x01))
+ count++;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne_block_output."
+ "[DMAstat:%d][irqlock:%d][intr:%d]\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock,
+ dev->interrupt);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ /* We should already be in page 0, but to be safe... */
+ outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
+
+#ifdef NE_SANITY_CHECK
+ retry:
+#endif
+
+#ifdef NE8390_RW_BUGFIX
+ /* Handle the read-before-write bug the same way as the
+ Crynwr packet driver -- the NatSemi method doesn't work.
+ Actually this doesn't always work either, but if you have
+ problems with your NEx000 this is better than nothing! */
+ outb_p(0x42, nic_base + EN0_RCNTLO);
+ outb_p(0x00, nic_base + EN0_RCNTHI);
+ outb_p(0x42, nic_base + EN0_RSARLO);
+ outb_p(0x00, nic_base + EN0_RSARHI);
+ outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+ /* Make certain that the dummy read has occurred. */
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+#endif
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR);
+
+ /* Now the normal output. */
+ outb_p(count & 0xff, nic_base + EN0_RCNTLO);
+ outb_p(count >> 8, nic_base + EN0_RCNTHI);
+ outb_p(0x00, nic_base + EN0_RSARLO);
+ outb_p(start_page, nic_base + EN0_RSARHI);
+
+ outb_p(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
+ if (ei_status.word16) {
+ outsw(NE_BASE + NE_DATAPORT, buf, count>>1);
+ } else {
+ outsb(NE_BASE + NE_DATAPORT, buf, count);
+ }
+
+ dma_start = jiffies;
+
+#ifdef NE_SANITY_CHECK
+ /* This was for the ALPHA version only, but enough people have
+ been encountering problems so it is still here. */
+ if (ei_debug > 1) { /* DMA termination address check... */
+ int addr, tries = 20;
+ do {
+ int high = inb_p(nic_base + EN0_RSARHI);
+ int low = inb_p(nic_base + EN0_RSARLO);
+ addr = (high << 8) + low;
+ if ((start_page << 8) + count == addr)
+ break;
+ } while (--tries > 0);
+ if (tries <= 0) {
+ printk("%s: Tx packet transfer address mismatch,"
+ "%#4.4x (expected) vs. %#4.4x (actual).\n",
+ dev->name, (start_page << 8) + count, addr);
+ if (retries++ == 0)
+ goto retry;
+ }
+ }
+#endif
+
+ while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0)
+ if (jiffies - dma_start > 2*HZ/100) { /* 20ms */
+ printk("%s: timeout waiting for Tx RDC.\n", dev->name);
+ ne_reset_8390(dev);
+ NS8390_init(dev,1);
+ break;
+ }
+
+ outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+ return;
+}
+
+
+#ifdef MODULE
+#define MAX_NE_CARDS 4 /* Max number of NE cards per module */
+#define NAMELEN 8 /* # of chars for storing dev->name */
+static char namelist[NAMELEN * MAX_NE_CARDS] = { 0, };
+static struct device dev_ne[MAX_NE_CARDS] = {
+ {
+ NULL, /* assign a chunk of namelist[] below */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, NULL
+ },
+};
+
+static int io[MAX_NE_CARDS] = { 0, };
+static int irq[MAX_NE_CARDS] = { 0, };
+static int bad[MAX_NE_CARDS] = { 0, };
+
+/* This is set up so that no autoprobe takes place. We can't guarantee
+that the ne2k probe is the last 8390 based probe to take place (as it
+is at boot) and so the probe will get confused by any other 8390 cards.
+ISA device autoprobes on a running machine are not recommended anyway. */
+
+int
+init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
+ struct device *dev = &dev_ne[this_dev];
+ dev->name = namelist+(NAMELEN*this_dev);
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->init = ne_probe;
+ dev->mem_end = bad[this_dev];
+ if (register_netdev(dev) == 0) {
+ found++;
+ continue;
+ }
+ if (found != 0) /* Got at least one. */
+ return 0;
+ if (io[this_dev] != 0)
+ printk(KERN_WARNING "ne.c: No NE*000 card found at i/o = %#x\n", io[this_dev]);
+ else
+ printk(KERN_NOTICE "ne.c: No PCI cards found. Use \"io=0xNNN\" value(s) for ISA cards.\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
+ struct device *dev = &dev_ne[this_dev];
+ if (dev->priv != NULL) {
+ kfree(dev->priv);
+ dev->priv = NULL;
+ free_irq(dev->irq, dev);
+ irq2dev_map[dev->irq] = NULL;
+ release_region(dev->base_addr, NE_IO_EXTENT);
+ unregister_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DKERNEL -Wall -O6 -fomit-frame-pointer -I/usr/src/linux/net/tcp -c ne.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * End:
+ */
diff --git a/linux/src/drivers/net/ne2k-pci.c b/linux/src/drivers/net/ne2k-pci.c
new file mode 100644
index 0000000..2b2b1f4
--- /dev/null
+++ b/linux/src/drivers/net/ne2k-pci.c
@@ -0,0 +1,647 @@
+/* ne2k-pci.c: A NE2000 clone on PCI bus driver for Linux. */
+/*
+ A Linux device driver for PCI NE2000 clones.
+
+ Authors and other copyright holders:
+ 1992-2002 by Donald Becker, NE2000 core and various modifications.
+ 1995-1998 by Paul Gortmaker, core modifications and PCI support.
+ Copyright 1993 assigned to the United States Government as represented
+ by the Director, National Security Agency.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Issues remaining:
+ People are making PCI ne2000 clones! Oh the horror, the horror...
+ Limited full-duplex support.
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version1[] =
+"ne2k-pci.c:v1.05 6/13/2002 D. Becker/P. Gortmaker\n";
+static const char version2[] =
+" http://www.scyld.com/network/ne2k-pci.html\n";
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
+
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+/* Used to pass the full-duplex flag, etc. */
+static int full_duplex[MAX_UNITS] = {0, };
+static int options[MAX_UNITS] = {0, };
+
+/* Force a non std. amount of memory. Units are 256 byte pages. */
+/* #define PACKETBUF_MEMSIZE 0x40 */
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#include <linux/module.h>
+#if LINUX_VERSION_CODE < 0x20300 && defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#if LINUX_VERSION_CODE < 0x20200
+#define lock_8390_module()
+#define unlock_8390_module()
+#else
+#include <linux/init.h>
+#endif
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include "8390.h"
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+MODULE_AUTHOR("Donald Becker / Paul Gortmaker");
+MODULE_DESCRIPTION("PCI NE2000 clone driver");
+MODULE_PARM(debug, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+
+/* Some defines that people can play with if so inclined. */
+
+/* Do #define LOAD_8390_BY_KERNELD to automatically load 8390 support. */
+#ifdef LOAD_8390_BY_KERNELD
+#include <linux/kerneld.h>
+#endif
+
+static void *ne2k_pci_probe1(struct pci_dev *pdev, void *dev,
+ long ioaddr, int irq, int chip_idx, int fnd_cnt);
+/* Flags. We rename an existing ei_status field to store flags! */
+/* Thus only the low 8 bits are usable for non-init-time flags. */
+#define ne2k_flags reg0
+enum {
+ ONLY_16BIT_IO=8, ONLY_32BIT_IO=4, /* Chip can do only 16/32-bit xfers. */
+ FORCE_FDX=0x20, /* User override. */
+ REALTEK_FDX=0x40, HOLTEK_FDX=0x80,
+ STOP_PG_0x60=0x100,
+};
+#define NE_IO_EXTENT 0x20
+#ifndef USE_MEMORY_OPS
+#define PCI_IOTYPE (PCI_USES_IO | PCI_ADDR0)
+#else
+#warning When using PCI memory mode the 8390 core must be compiled for memory
+#warning operations as well.
+#warning Not all PCI NE2000 clones support memory mode access.
+#define PCI_IOTYPE (PCI_USES_MEM | PCI_ADDR1)
+#endif
+
+static struct pci_id_info pci_id_tbl[] = {
+ {"RealTek RTL-8029",{ 0x802910ec, 0xffffffff}, PCI_IOTYPE, NE_IO_EXTENT,
+ REALTEK_FDX },
+ {"Winbond 89C940", { 0x09401050, 0xffffffff}, PCI_IOTYPE, NE_IO_EXTENT, 0},
+ {"Winbond w89c940", { 0x5a5a1050, 0xffffffff}, PCI_IOTYPE, NE_IO_EXTENT, 0},
+ {"KTI ET32P2", { 0x30008e2e, 0xffffffff}, PCI_IOTYPE, NE_IO_EXTENT, 0},
+ {"NetVin NV5000SC", { 0x50004a14, 0xffffffff}, PCI_IOTYPE, NE_IO_EXTENT, 0},
+ {"Via 86C926", { 0x09261106, 0xffffffff},
+ PCI_IOTYPE, NE_IO_EXTENT, ONLY_16BIT_IO},
+ {"SureCom NE34", { 0x0e3410bd, 0xffffffff}, PCI_IOTYPE, NE_IO_EXTENT, 0},
+ {"Holtek HT80232", { 0x005812c3, 0xffffffff},
+ PCI_IOTYPE, NE_IO_EXTENT, ONLY_16BIT_IO | HOLTEK_FDX},
+ {"Holtek HT80229", { 0x559812c3, 0xffffffff},
+ PCI_IOTYPE, NE_IO_EXTENT, ONLY_32BIT_IO | HOLTEK_FDX | STOP_PG_0x60},
+ {"Compex RL2000",
+ { 0x140111f6, 0xffffffff}, PCI_IOTYPE, NE_IO_EXTENT, 0},
+ /* A mutant board: Winbond chip with a RTL format EEPROM. */
+ {"Winbond w89c940 (misprogrammed type 0x1980)", { 0x19808c4a, 0xffffffff},
+ PCI_IOTYPE, NE_IO_EXTENT, 0},
+ {0,}, /* 0 terminated list. */
+};
+
+struct drv_id_info ne2k_pci_drv_id = {
+ "ne2k-pci", 0, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl, ne2k_pci_probe1,
+};
+
+/* ---- No user-serviceable parts below ---- */
+
+#define NE_BASE (dev->base_addr)
+#define NE_CMD 0x00
+#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */
+#define NE_RESET 0x1f /* Issue a read to reset, a write to clear. */
+
+#define NESM_START_PG 0x40 /* First page of TX buffer */
+#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+int ne2k_pci_probe(struct net_device *dev);
+
+static int ne2k_pci_open(struct net_device *dev);
+static int ne2k_pci_close(struct net_device *dev);
+
+static void ne2k_pci_reset_8390(struct net_device *dev);
+static void ne2k_pci_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void ne2k_pci_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void ne2k_pci_block_output(struct net_device *dev, const int count,
+ const unsigned char *buf, const int start_page);
+
+
+
+/* There is no room in the standard 8390 structure for extra info we need,
+ so we build a meta/outer-wrapper structure.. */
+struct ne2k_pci_card {
+ struct ne2k_pci_card *next;
+ struct net_device *dev;
+ struct pci_dev *pci_dev;
+};
+/* A list of all installed devices, for removing the driver module. */
+static struct ne2k_pci_card *ne2k_card_list = NULL;
+
+#ifdef LOAD_8390_BY_KERNELD
+static int (*Lethdev_init)(struct net_device *dev);
+static void (*LNS8390_init)(struct net_device *dev, int startp);
+static int (*Lei_open)(struct net_device *dev);
+static int (*Lei_close)(struct net_device *dev);
+static void (*Lei_interrupt)(int irq, void *dev_id, struct pt_regs *regs);
+#else
+#define Lethdev_init ethdev_init
+#define LNS8390_init NS8390_init
+#define Lei_open ei_open
+#define Lei_close ei_close
+#define Lei_interrupt ei_interrupt
+#endif
+
+#ifdef MODULE
+int init_module(void)
+{
+ int found_cnt;
+
+ if (debug) /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ found_cnt = pci_drv_register(&ne2k_pci_drv_id, NULL);
+ if (found_cnt < 0) {
+ printk(KERN_NOTICE "ne2k-pci.c: No useable cards found, driver NOT installed.\n");
+ return -ENODEV;
+ }
+ lock_8390_module();
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ struct net_device *dev;
+ struct ne2k_pci_card *this_card;
+
+ pci_drv_unregister(&ne2k_pci_drv_id);
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (ne2k_card_list) {
+ dev = ne2k_card_list->dev;
+ unregister_netdev(dev);
+ release_region(dev->base_addr, NE_IO_EXTENT);
+ kfree(dev);
+ this_card = ne2k_card_list;
+ ne2k_card_list = ne2k_card_list->next;
+ kfree(this_card);
+ }
+
+#ifdef LOAD_8390_BY_KERNELD
+ release_module("8390", 0);
+#else
+ unlock_8390_module();
+#endif
+}
+
+#else
+
+int ne2k_pci_probe(struct net_device *dev)
+{
+ int found_cnt = pci_drv_register(&ne2k_pci_drv_id, NULL);
+ if (found_cnt >= 0 && debug)
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return found_cnt;
+}
+#endif /* MODULE */
+
+static void *ne2k_pci_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int fnd_cnt)
+{
+ struct net_device *dev;
+ int i;
+ unsigned char SA_prom[32];
+ int start_page, stop_page;
+ int reg0 = inb(ioaddr);
+ int flags = pci_id_tbl[chip_idx].drv_flags;
+ struct ne2k_pci_card *ne2k_card;
+
+ if (reg0 == 0xFF)
+ return 0;
+
+ /* Do a preliminary verification that we have a 8390. */
+ {
+ int regd;
+ outb(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD);
+ regd = inb(ioaddr + 0x0d);
+ outb(0xff, ioaddr + 0x0d);
+ outb(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD);
+ inb(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */
+ if (inb(ioaddr + EN0_COUNTER0) != 0) {
+ outb(reg0, ioaddr);
+ outb(regd, ioaddr + 0x0d); /* Restore the old values. */
+ return 0;
+ }
+ }
+
+ dev = init_etherdev(init_dev, 0);
+
+ if (dev == NULL)
+ return 0;
+ ne2k_card = kmalloc(sizeof(struct ne2k_pci_card), GFP_KERNEL);
+ if (ne2k_card == NULL)
+ return 0;
+
+ ne2k_card->next = ne2k_card_list;
+ ne2k_card_list = ne2k_card;
+ ne2k_card->dev = dev;
+ ne2k_card->pci_dev = pdev;
+
+ /* Reset card. Who knows what dain-bramaged state it was left in. */
+ {
+ unsigned long reset_start_time = jiffies;
+
+ outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET);
+
+ /* This looks like a horrible timing loop, but it should never take
+ more than a few cycles.
+ */
+ while ((inb(ioaddr + EN0_ISR) & ENISR_RESET) == 0)
+ /* Limit wait: '2' avoids jiffy roll-over. */
+ if (jiffies - reset_start_time > 2) {
+ printk("ne2k-pci: Card failure (no reset ack).\n");
+ return 0;
+ }
+
+ outb(0xff, ioaddr + EN0_ISR); /* Ack all intr. */
+ }
+
+#if defined(LOAD_8390_BY_KERNELD)
+ /* We are now certain the 8390 module is required. */
+ if (request_module("8390")) {
+ printk("ne2k-pci: Failed to load the 8390 core module.\n");
+ return 0;
+ }
+ if ((Lethdev_init = (void*)get_module_symbol(0, "ethdev_init")) == 0 ||
+ (LNS8390_init = (void*)get_module_symbol(0, "NS8390_init")) == 0 ||
+ (Lei_open = (void*)get_module_symbol(0, "ei_open")) == 0 ||
+ (Lei_close = (void*)get_module_symbol(0, "ei_close")) == 0 ||
+ (Lei_interrupt = (void*)get_module_symbol(0, "ei_interrupt")) == 0 ) {
+ printk("ne2k-pci: Failed to resolve an 8390 symbol.\n");
+ release_module("8390", 0);
+ return 0;
+ }
+#endif
+
+ /* Read the 16 bytes of station address PROM.
+ We must first initialize registers, similar to NS8390_init(eifdev, 0).
+ We can't reliably read the SAPROM address without this.
+ (I learned the hard way!). */
+ {
+ struct {unsigned char value, offset; } program_seq[] = {
+ {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/
+ {0x49, EN0_DCFG}, /* Set word-wide access. */
+ {0x00, EN0_RCNTLO}, /* Clear the count regs. */
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_IMR}, /* Mask completion irq. */
+ {0xFF, EN0_ISR},
+ {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */
+ {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */
+ {32, EN0_RCNTLO},
+ {0x00, EN0_RCNTHI},
+ {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */
+ {0x00, EN0_RSARHI},
+ {E8390_RREAD+E8390_START, E8390_CMD},
+ };
+ for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++)
+ outb(program_seq[i].value, ioaddr + program_seq[i].offset);
+
+ }
+
+ /* Note: all PCI cards have at least 16 bit access, so we don't have
+ to check for 8 bit cards. Most cards permit 32 bit access. */
+
+ if (flags & ONLY_32BIT_IO) {
+ for (i = 0; i < 8; i++)
+ ((u32 *)SA_prom)[i] = le32_to_cpu(inl(ioaddr + NE_DATAPORT));
+ } else
+ for(i = 0; i < 32 /*sizeof(SA_prom)*/; i++)
+ SA_prom[i] = inb(ioaddr + NE_DATAPORT);
+
+ /* We always set the 8390 registers for word mode. */
+ outb(0x49, ioaddr + EN0_DCFG);
+ start_page = NESM_START_PG;
+
+ stop_page = flags & STOP_PG_0x60 ? 0x60 : NESM_STOP_PG;
+
+ /* Set up the rest of the parameters. */
+ dev->irq = irq;
+ dev->base_addr = ioaddr;
+
+ /* Allocate dev->priv and fill in 8390 specific dev fields. */
+ if (Lethdev_init(dev)) {
+ printk ("%s: unable to get memory for dev->priv.\n", dev->name);
+ return 0;
+ }
+
+ request_region(ioaddr, NE_IO_EXTENT, dev->name);
+
+ printk("%s: %s found at %#lx, IRQ %d, ",
+ dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq);
+ for(i = 0; i < 6; i++) {
+ printk("%2.2X%s", SA_prom[i], i == 5 ? ".\n": ":");
+ dev->dev_addr[i] = SA_prom[i];
+ }
+
+ ei_status.name = pci_id_tbl[chip_idx].name;
+ ei_status.tx_start_page = start_page;
+ ei_status.stop_page = stop_page;
+ ei_status.word16 = 1;
+ ei_status.ne2k_flags = flags;
+ if (fnd_cnt < MAX_UNITS) {
+ if (full_duplex[fnd_cnt] > 0 || (options[fnd_cnt] & FORCE_FDX)) {
+ printk("%s: Full duplex set by user option.\n", dev->name);
+ ei_status.ne2k_flags |= FORCE_FDX;
+ }
+ }
+
+ ei_status.rx_start_page = start_page + TX_PAGES;
+#ifdef PACKETBUF_MEMSIZE
+ /* Allow the packet buffer size to be overridden by know-it-alls. */
+ ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE;
+#endif
+
+ ei_status.reset_8390 = &ne2k_pci_reset_8390;
+ ei_status.block_input = &ne2k_pci_block_input;
+ ei_status.block_output = &ne2k_pci_block_output;
+ ei_status.get_8390_hdr = &ne2k_pci_get_8390_hdr;
+ dev->open = &ne2k_pci_open;
+ dev->stop = &ne2k_pci_close;
+ LNS8390_init(dev, 0);
+ return dev;
+}
+
+static int ne2k_pci_open(struct net_device *dev)
+{
+ MOD_INC_USE_COUNT;
+ if (request_irq(dev->irq, Lei_interrupt, SA_SHIRQ, dev->name, dev)) {
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+ /* Set full duplex for the chips that we know about. */
+ if (ei_status.ne2k_flags & FORCE_FDX) {
+ long ioaddr = dev->base_addr;
+ if (ei_status.ne2k_flags & REALTEK_FDX) {
+ outb(0xC0 + E8390_NODMA, ioaddr + NE_CMD); /* Page 3 */
+ outb(inb(ioaddr + 0x20) | 0x80, ioaddr + 0x20);
+ } else if (ei_status.ne2k_flags & HOLTEK_FDX)
+ outb(inb(ioaddr + 0x20) | 0x80, ioaddr + 0x20);
+ }
+ Lei_open(dev);
+ return 0;
+}
+
+static int ne2k_pci_close(struct net_device *dev)
+{
+ Lei_close(dev);
+ free_irq(dev->irq, dev);
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+/* Hard reset the card. This used to pause for the same period that a
+ 8390 reset command required, but that shouldn't be necessary. */
+static void ne2k_pci_reset_8390(struct net_device *dev)
+{
+ unsigned long reset_start_time = jiffies;
+
+ if (debug > 1) printk("%s: Resetting the 8390 t=%ld...",
+ dev->name, jiffies);
+
+ outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
+
+ ei_status.txing = 0;
+ ei_status.dmaing = 0;
+
+ /* This check _should_not_ be necessary, omit eventually. */
+ while ((inb(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
+ if (jiffies - reset_start_time > 2) {
+ printk("%s: ne2k_pci_reset_8390() did not complete.\n", dev->name);
+ break;
+ }
+ outb(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void ne2k_pci_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page)
+{
+
+ long nic_base = dev->base_addr;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne2k_pci_get_8390_hdr "
+ "[DMAstat:%d][irqlock:%d][intr:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock,
+ (int)dev->interrupt);
+ return;
+ }
+
+ ei_status.dmaing |= 0x01;
+ outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ outb(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
+ outb(0, nic_base + EN0_RCNTHI);
+ outb(0, nic_base + EN0_RSARLO); /* On page boundary */
+ outb(ring_page, nic_base + EN0_RSARHI);
+ outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+
+ if (ei_status.ne2k_flags & ONLY_16BIT_IO) {
+ insw(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
+ } else {
+ *(u32*)hdr = le32_to_cpu(inl(NE_BASE + NE_DATAPORT));
+ le16_to_cpus(&hdr->count);
+ }
+
+ outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+/* Block input and output, similar to the Crynwr packet driver. If you
+ are porting to a new ethercard, look at the packet driver source for hints.
+ The NEx000 doesn't share the on-board packet memory -- you have to put
+ the packet out through the "remote DMA" dataport using outb. */
+
+static void ne2k_pci_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ long nic_base = dev->base_addr;
+ char *buf = skb->data;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne2k_pci_block_input "
+ "[DMAstat:%d][irqlock:%d][intr:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock,
+ (int)dev->interrupt);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ if (ei_status.ne2k_flags & ONLY_32BIT_IO)
+ count = (count + 3) & 0xFFFC;
+ outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ outb(count & 0xff, nic_base + EN0_RCNTLO);
+ outb(count >> 8, nic_base + EN0_RCNTHI);
+ outb(ring_offset & 0xff, nic_base + EN0_RSARLO);
+ outb(ring_offset >> 8, nic_base + EN0_RSARHI);
+ outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+
+ if (ei_status.ne2k_flags & ONLY_16BIT_IO) {
+ insw(NE_BASE + NE_DATAPORT,buf,count>>1);
+ if (count & 0x01) {
+ buf[count-1] = inb(NE_BASE + NE_DATAPORT);
+ }
+ } else {
+ insl(NE_BASE + NE_DATAPORT, buf, count>>2);
+ if (count & 3) {
+ buf += count & ~3;
+ if (count & 2) {
+ *((u16 *) buf) = le16_to_cpu(inw(NE_BASE + NE_DATAPORT));
+ buf = (void *) buf + sizeof (u16);
+ }
+ if (count & 1)
+ *buf = inb(NE_BASE + NE_DATAPORT);
+ }
+ }
+
+ outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+}
+
+static void
+ne2k_pci_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, const int start_page)
+{
+ int nic_base = NE_BASE;
+ unsigned long dma_start;
+
+ /* On little-endian it's always safe to round the count up for
+ word writes. */
+ if (ei_status.ne2k_flags & ONLY_32BIT_IO)
+ count = (count + 3) & 0xFFFC;
+ else
+ if (count & 0x01)
+ count++;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+ printk("%s: DMAing conflict in ne2k_pci_block_output."
+ "[DMAstat:%d][irqlock:%d][intr:%d]\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock,
+ (int)dev->interrupt);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ /* We should already be in page 0, but to be safe... */
+ outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
+
+#ifdef NE8390_RW_BUGFIX
+ /* Handle the read-before-write bug the same way as the
+ Crynwr packet driver -- the NatSemi method doesn't work.
+ Actually this doesn't always work either, but if you have
+ problems with your NEx000 this is better than nothing! */
+ outb(0x42, nic_base + EN0_RCNTLO);
+ outb(0x00, nic_base + EN0_RCNTHI);
+ outb(0x42, nic_base + EN0_RSARLO);
+ outb(0x00, nic_base + EN0_RSARHI);
+ outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+#endif
+ outb(ENISR_RDC, nic_base + EN0_ISR);
+
+ /* Now the normal output. */
+ outb(count & 0xff, nic_base + EN0_RCNTLO);
+ outb(count >> 8, nic_base + EN0_RCNTHI);
+ outb(0x00, nic_base + EN0_RSARLO);
+ outb(start_page, nic_base + EN0_RSARHI);
+ outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
+ if (ei_status.ne2k_flags & ONLY_16BIT_IO) {
+ outsw(NE_BASE + NE_DATAPORT, buf, count>>1);
+ } else {
+ outsl(NE_BASE + NE_DATAPORT, buf, count>>2);
+ if (count & 3) {
+ buf += count & ~3;
+ if (count & 2) {
+ outw(cpu_to_le16(*((u16 *) buf)), NE_BASE + NE_DATAPORT);
+ buf = (void *) buf + sizeof (u16);
+ }
+ }
+ }
+
+ dma_start = jiffies;
+
+ while ((inb(nic_base + EN0_ISR) & ENISR_RDC) == 0)
+ if (jiffies - dma_start > 2) { /* Avoid clock roll-over. */
+ printk("%s: timeout waiting for Tx RDC.\n", dev->name);
+ ne2k_pci_reset_8390(dev);
+ LNS8390_init(dev,1);
+ break;
+ }
+
+ outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+ return;
+}
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c ne2k-pci.c -I/usr/src/linux/drivers/net/"
+ * alt-compile-command: "gcc -DMODULE -O6 -c ne2k-pci.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * version-control: t
+ * kept-new-versions: 5
+ * End:
+ */
diff --git a/linux/src/drivers/net/net_init.c b/linux/src/drivers/net/net_init.c
new file mode 100644
index 0000000..3d4c42d
--- /dev/null
+++ b/linux/src/drivers/net/net_init.c
@@ -0,0 +1,439 @@
+/* netdrv_init.c: Initialization for network devices. */
+/*
+ Written 1993,1994,1995 by Donald Becker.
+
+ The author may be reached as becker@cesdis.gsfc.nasa.gov or
+ C/O Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This file contains the initialization for the "pl14+" style ethernet
+ drivers. It should eventually replace most of drivers/net/Space.c.
+ It's primary advantage is that it's able to allocate low-memory buffers.
+ A secondary advantage is that the dangerous NE*000 netcards can reserve
+ their I/O port region before the SCSI probes start.
+
+ Modifications/additions by Bjorn Ekwall <bj0rn@blox.se>:
+ ethdev_index[MAX_ETH_CARDS]
+ register_netdev() / unregister_netdev()
+
+ Modifications by Wolfgang Walter
+ Use dev_close cleanly so we always shut things down tidily.
+
+ Changed 29/10/95, Alan Cox to pass sockaddr's around for mac addresses.
+
+ 14/06/96 - Paul Gortmaker: Add generic eth_change_mtu() function.
+
+ August 12, 1996 - Lawrence V. Stefani: Added fddi_change_mtu() and
+ fddi_setup() functions.
+ Sept. 10, 1996 - Lawrence V. Stefani: Increased hard_header_len to
+ include 3 pad bytes.
+*/
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/malloc.h>
+#include <linux/if_ether.h>
+#include <linux/string.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/fddidevice.h>
+#include <linux/trdevice.h>
+#include <linux/if_arp.h>
+#ifdef CONFIG_NET_ALIAS
+#include <linux/net_alias.h>
+#endif
+
+/* The network devices currently exist only in the socket namespace, so these
+ entries are unused. The only ones that make sense are
+ open start the ethercard
+ close stop the ethercard
+ ioctl To get statistics, perhaps set the interface port (AUI, BNC, etc.)
+ One can also imagine getting raw packets using
+ read & write
+ but this is probably better handled by a raw packet socket.
+
+ Given that almost all of these functions are handled in the current
+ socket-based scheme, putting ethercard devices in /dev/ seems pointless.
+
+ [Removed all support for /dev network devices. When someone adds
+ streams then by magic we get them, but otherwise they are un-needed
+ and a space waste]
+*/
+
+/* The list of used and available "eth" slots (for "eth0", "eth1", etc.) */
+#define MAX_ETH_CARDS 16 /* same as the number if irq's in irq2dev[] */
+static struct device *ethdev_index[MAX_ETH_CARDS];
+
+
+/* Fill in the fields of the device structure with ethernet-generic values.
+
+ If no device structure is passed, a new one is constructed, complete with
+ a SIZEOF_PRIVATE private data area.
+
+ If an empty string area is passed as dev->name, or a new structure is made,
+ a new name string is constructed. The passed string area should be 8 bytes
+ long.
+ */
+
+struct device *
+init_etherdev(struct device *dev, int sizeof_priv)
+{
+ int new_device = 0;
+ int i;
+
+ /* Use an existing correctly named device in Space.c:dev_base. */
+ if (dev == NULL) {
+ int alloc_size = sizeof(struct device) + sizeof("eth%d ")
+ + sizeof_priv + 3;
+ struct device *cur_dev;
+ char pname[8]; /* Putative name for the device. */
+
+ for (i = 0; i < MAX_ETH_CARDS; ++i)
+ if (ethdev_index[i] == NULL) {
+ sprintf(pname, "eth%d", i);
+ for (cur_dev = dev_base; cur_dev; cur_dev = cur_dev->next)
+ if (strcmp(pname, cur_dev->name) == 0) {
+ dev = cur_dev;
+ dev->init = NULL;
+ sizeof_priv = (sizeof_priv + 3) & ~3;
+ dev->priv = sizeof_priv
+ ? kmalloc(sizeof_priv, GFP_KERNEL)
+ : NULL;
+ if (dev->priv) memset(dev->priv, 0, sizeof_priv);
+ goto found;
+ }
+ }
+
+ alloc_size &= ~3; /* Round to dword boundary. */
+
+ dev = (struct device *)kmalloc(alloc_size, GFP_KERNEL);
+ memset(dev, 0, alloc_size);
+ if (sizeof_priv)
+ dev->priv = (void *) (dev + 1);
+ dev->name = sizeof_priv + (char *)(dev + 1);
+ new_device = 1;
+ }
+
+ found: /* From the double loop above. */
+
+ if (dev->name &&
+ ((dev->name[0] == '\0') || (dev->name[0] == ' '))) {
+ for (i = 0; i < MAX_ETH_CARDS; ++i)
+ if (ethdev_index[i] == NULL) {
+ sprintf(dev->name, "eth%d", i);
+ ethdev_index[i] = dev;
+ break;
+ }
+ }
+
+ ether_setup(dev); /* Hmmm, should this be called here? */
+
+ if (new_device) {
+ /* Append the device to the device queue. */
+ struct device **old_devp = &dev_base;
+ while ((*old_devp)->next)
+ old_devp = & (*old_devp)->next;
+ (*old_devp)->next = dev;
+ dev->next = 0;
+ }
+ return dev;
+}
+
+
+static int eth_mac_addr(struct device *dev, void *p)
+{
+ struct sockaddr *addr=p;
+ if(dev->start)
+ return -EBUSY;
+ memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
+ return 0;
+}
+
+static int eth_change_mtu(struct device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > 1500))
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+#ifdef CONFIG_FDDI
+
+static int fddi_change_mtu(struct device *dev, int new_mtu)
+{
+ if ((new_mtu < FDDI_K_SNAP_HLEN) || (new_mtu > FDDI_K_SNAP_DLEN))
+ return(-EINVAL);
+ dev->mtu = new_mtu;
+ return(0);
+}
+
+#endif
+
+void ether_setup(struct device *dev)
+{
+ int i;
+ /* Fill in the fields of the device structure with ethernet-generic values.
+ This should be in a common file instead of per-driver. */
+ for (i = 0; i < DEV_NUMBUFFS; i++)
+ skb_queue_head_init(&dev->buffs[i]);
+
+ /* register boot-defined "eth" devices */
+ if (dev->name && (strncmp(dev->name, "eth", 3) == 0)) {
+ i = simple_strtoul(dev->name + 3, NULL, 0);
+ if (ethdev_index[i] == NULL) {
+ ethdev_index[i] = dev;
+ }
+ else if (dev != ethdev_index[i]) {
+ /* Really shouldn't happen! */
+ printk("ether_setup: Ouch! Someone else took %s\n",
+ dev->name);
+ }
+ }
+
+ dev->change_mtu = eth_change_mtu;
+ dev->hard_header = eth_header;
+ dev->rebuild_header = eth_rebuild_header;
+ dev->set_mac_address = eth_mac_addr;
+ dev->header_cache_bind = eth_header_cache_bind;
+ dev->header_cache_update= eth_header_cache_update;
+
+ dev->type = ARPHRD_ETHER;
+ dev->hard_header_len = ETH_HLEN;
+ dev->mtu = 1500; /* eth_mtu */
+ dev->addr_len = ETH_ALEN;
+ dev->tx_queue_len = 100; /* Ethernet wants good queues */
+
+ memset(dev->broadcast,0xFF, ETH_ALEN);
+
+ /* New-style flags. */
+ dev->flags = IFF_BROADCAST|IFF_MULTICAST;
+ dev->family = AF_INET;
+ dev->pa_addr = 0;
+ dev->pa_brdaddr = 0;
+ dev->pa_mask = 0;
+ dev->pa_alen = 4;
+}
+
+#ifdef CONFIG_TR
+
+void tr_setup(struct device *dev)
+{
+ int i;
+ /* Fill in the fields of the device structure with ethernet-generic values.
+ This should be in a common file instead of per-driver. */
+ for (i = 0; i < DEV_NUMBUFFS; i++)
+ skb_queue_head_init(&dev->buffs[i]);
+
+ dev->hard_header = tr_header;
+ dev->rebuild_header = tr_rebuild_header;
+
+ dev->type = ARPHRD_IEEE802;
+ dev->hard_header_len = TR_HLEN;
+ dev->mtu = 2000; /* bug in fragmenter...*/
+ dev->addr_len = TR_ALEN;
+ dev->tx_queue_len = 100; /* Long queues on tr */
+
+ memset(dev->broadcast,0xFF, TR_ALEN);
+
+ /* New-style flags. */
+ dev->flags = IFF_BROADCAST;
+ dev->family = AF_INET;
+ dev->pa_addr = 0;
+ dev->pa_brdaddr = 0;
+ dev->pa_mask = 0;
+ dev->pa_alen = 4;
+}
+
+#endif
+
+#ifdef CONFIG_FDDI
+
+void fddi_setup(struct device *dev)
+ {
+ int i;
+
+ /*
+ * Fill in the fields of the device structure with FDDI-generic values.
+ * This should be in a common file instead of per-driver.
+ */
+ for (i=0; i < DEV_NUMBUFFS; i++)
+ skb_queue_head_init(&dev->buffs[i]);
+
+ dev->change_mtu = fddi_change_mtu;
+ dev->hard_header = fddi_header;
+ dev->rebuild_header = fddi_rebuild_header;
+
+ dev->type = ARPHRD_FDDI;
+ dev->hard_header_len = FDDI_K_SNAP_HLEN+3; /* Assume 802.2 SNAP hdr len + 3 pad bytes */
+ dev->mtu = FDDI_K_SNAP_DLEN; /* Assume max payload of 802.2 SNAP frame */
+ dev->addr_len = FDDI_K_ALEN;
+ dev->tx_queue_len = 100; /* Long queues on FDDI */
+
+ memset(dev->broadcast, 0xFF, FDDI_K_ALEN);
+
+ /* New-style flags */
+ dev->flags = IFF_BROADCAST | IFF_MULTICAST;
+ dev->family = AF_INET;
+ dev->pa_addr = 0;
+ dev->pa_brdaddr = 0;
+ dev->pa_mask = 0;
+ dev->pa_alen = 4;
+ return;
+ }
+
+#endif
+
+int ether_config(struct device *dev, struct ifmap *map)
+{
+ if (map->mem_start != (u_long)(-1))
+ dev->mem_start = map->mem_start;
+ if (map->mem_end != (u_long)(-1))
+ dev->mem_end = map->mem_end;
+ if (map->base_addr != (u_short)(-1))
+ dev->base_addr = map->base_addr;
+ if (map->irq != (u_char)(-1))
+ dev->irq = map->irq;
+ if (map->dma != (u_char)(-1))
+ dev->dma = map->dma;
+ if (map->port != (u_char)(-1))
+ dev->if_port = map->port;
+ return 0;
+}
+
+int register_netdev(struct device *dev)
+{
+ struct device *d = dev_base;
+ unsigned long flags;
+ int i=MAX_ETH_CARDS;
+
+ save_flags(flags);
+ cli();
+
+ if (dev && dev->init) {
+ if (dev->name &&
+ ((dev->name[0] == '\0') || (dev->name[0] == ' '))) {
+ for (i = 0; i < MAX_ETH_CARDS; ++i)
+ if (ethdev_index[i] == NULL) {
+ sprintf(dev->name, "eth%d", i);
+/* printk("loading device '%s'...\n", dev->name);*/
+ ethdev_index[i] = dev;
+ break;
+ }
+ }
+
+ sti(); /* device probes assume interrupts enabled */
+ if (dev->init(dev) != 0) {
+ if (i < MAX_ETH_CARDS) ethdev_index[i] = NULL;
+ restore_flags(flags);
+ return -EIO;
+ }
+ cli();
+
+ /* Add device to end of chain */
+ if (dev_base) {
+ while (d->next)
+ d = d->next;
+ d->next = dev;
+ }
+ else
+ dev_base = dev;
+ dev->next = NULL;
+ }
+ restore_flags(flags);
+ return 0;
+}
+
+void unregister_netdev(struct device *dev)
+{
+ struct device *d = dev_base;
+ unsigned long flags;
+ int i;
+
+ save_flags(flags);
+ cli();
+
+ if (dev == NULL)
+ {
+ printk("was NULL\n");
+ restore_flags(flags);
+ return;
+ }
+ /* else */
+ if (dev->start)
+ printk("ERROR '%s' busy and not MOD_IN_USE.\n", dev->name);
+
+ /*
+ * must jump over main_device+aliases
+ * avoid alias devices unregistration so that only
+ * net_alias module manages them
+ */
+#ifdef CONFIG_NET_ALIAS
+ if (dev_base == dev)
+ dev_base = net_alias_nextdev(dev);
+ else
+ {
+ while(d && (net_alias_nextdev(d) != dev)) /* skip aliases */
+ d = net_alias_nextdev(d);
+
+ if (d && (net_alias_nextdev(d) == dev))
+ {
+ /*
+ * Critical: Bypass by consider devices as blocks (maindev+aliases)
+ */
+ net_alias_nextdev_set(d, net_alias_nextdev(dev));
+ }
+#else
+ if (dev_base == dev)
+ dev_base = dev->next;
+ else
+ {
+ while (d && (d->next != dev))
+ d = d->next;
+
+ if (d && (d->next == dev))
+ {
+ d->next = dev->next;
+ }
+#endif
+ else
+ {
+ printk("unregister_netdev: '%s' not found\n", dev->name);
+ restore_flags(flags);
+ return;
+ }
+ }
+ for (i = 0; i < MAX_ETH_CARDS; ++i)
+ {
+ if (ethdev_index[i] == dev)
+ {
+ ethdev_index[i] = NULL;
+ break;
+ }
+ }
+
+ restore_flags(flags);
+
+ /*
+ * You can i.e use a interfaces in a route though it is not up.
+ * We call close_dev (which is changed: it will down a device even if
+ * dev->flags==0 (but it will not call dev->stop if IFF_UP
+ * is not set).
+ * This will call notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev),
+ * dev_mc_discard(dev), ....
+ */
+
+ dev_close(dev);
+}
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c net_init.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/ni52.c b/linux/src/drivers/net/ni52.c
new file mode 100644
index 0000000..6d486e9
--- /dev/null
+++ b/linux/src/drivers/net/ni52.c
@@ -0,0 +1,1387 @@
+/*
+ * net-3-driver for the NI5210 card (i82586 Ethernet chip)
+ *
+ * This is an extension to the Linux operating system, and is covered by the
+ * same Gnu Public License that covers that work.
+ *
+ * Alphacode 0.80 (96/02/19) for Linux 1.3.66 (or later)
+ * Copyrights (c) 1994,1995,1996 by M.Hipp (Michael.Hipp@student.uni-tuebingen.de)
+ * [feel free to mail ....]
+ *
+ * when using as module: (no autoprobing!)
+ * compile with: gcc -D__KERNEL__ -DMODULE -O2 -c ni52.c
+ * run with e.g: insmod ni52.o io=0x360 irq=9 memstart=0xd0000 memend=0xd4000
+ *
+ * CAN YOU PLEASE REPORT ME YOUR PERFORMANCE EXPERIENCES !!.
+ *
+ * If you find a bug, please report me:
+ * The kernel panic output and any kmsg from the ni52 driver
+ * the ni5210-driver-version and the linux-kernel version
+ * how many shared memory (memsize) on the netcard,
+ * bootprom: yes/no, base_addr, mem_start
+ * maybe the ni5210-card revision and the i82586 version
+ *
+ * autoprobe for: base_addr: 0x300,0x280,0x360,0x320,0x340
+ * mem_start: 0xd0000,0xd2000,0xc8000,0xca000,0xd4000,0xd6000,
+ * 0xd8000,0xcc000,0xce000,0xda000,0xdc000
+ *
+ * sources:
+ * skeleton.c from Donald Becker
+ *
+ * I have also done a look in the following sources: (mail me if you need them)
+ * crynwr-packet-driver by Russ Nelson
+ * Garret A. Wollman's (fourth) i82586-driver for BSD
+ * (before getting an i82596 (yes 596 not 586) manual, the existing drivers helped
+ * me a lot to understand this tricky chip.)
+ *
+ * Known Problems:
+ * The internal sysbus seems to be slow. So we often lose packets because of
+ * overruns while receiving from a fast remote host.
+ * This can slow down TCP connections. Maybe the newer ni5210 cards are better.
+ * my experience is, that if a machine sends with more then about 500-600K/s
+ * the fifo/sysbus overflows.
+ *
+ * IMPORTANT NOTE:
+ * On fast networks, it's a (very) good idea to have 16K shared memory. With
+ * 8K, we can store only 4 receive frames, so it can (easily) happen that a remote
+ * machine 'overruns' our system.
+ *
+ * Known i82586/card problems (I'm sure, there are many more!):
+ * Running the NOP-mode, the i82586 sometimes seems to forget to report
+ * every xmit-interrupt until we restart the CU.
+ * Another MAJOR bug is, that the RU sometimes seems to ignore the EL-Bit
+ * in the RBD-Struct which indicates an end of the RBD queue.
+ * Instead, the RU fetches another (randomly selected and
+ * usually used) RBD and begins to fill it. (Maybe, this happens only if
+ * the last buffer from the previous RFD fits exact into the queue and
+ * the next RFD can't fetch an initial RBD. Anyone knows more? )
+ *
+ * results from ftp performance tests with Linux 1.2.5
+ * send and receive about 350-400 KByte/s (peak up to 460 kbytes/s)
+ * sending in NOP-mode: peak performance up to 530K/s (but better don't run this mode)
+ */
+
+/*
+ * 19.Feb.96: more Mcast changes, module support (MH)
+ *
+ * 18.Nov.95: Mcast changes (AC).
+ *
+ * 23.April.95: fixed(?) receiving problems by configuring a RFD more
+ * than the number of RBD's. Can maybe cause other problems.
+ * 18.April.95: Added MODULE support (MH)
+ * 17.April.95: MC related changes in init586() and set_multicast_list().
+ * removed use of 'jiffies' in init586() (MH)
+ *
+ * 19.Sep.94: Added Multicast support (not tested yet) (MH)
+ *
+ * 18.Sep.94: Workaround for 'EL-Bug'. Removed flexible RBD-handling.
+ * Now, every RFD has exact one RBD. (MH)
+ *
+ * 14.Sep.94: added promiscuous mode, a few cleanups (MH)
+ *
+ * 19.Aug.94: changed request_irq() parameter (MH)
+ *
+ * 20.July.94: removed cleanup bugs, removed a 16K-mem-probe-bug (MH)
+ *
+ * 19.July.94: lotsa cleanups .. (MH)
+ *
+ * 17.July.94: some patches ... verified to run with 1.1.29 (MH)
+ *
+ * 4.July.94: patches for Linux 1.1.24 (MH)
+ *
+ * 26.March.94: patches for Linux 1.0 and iomem-auto-probe (MH)
+ *
+ * 30.Sep.93: Added nop-chain .. driver now runs with only one Xmit-Buff, too (MH)
+ *
+ * < 30.Sep.93: first versions
+ */
+
+static int debuglevel = 0; /* debug-printk 0: off 1: a few 2: more */
+static int automatic_resume = 0; /* experimental .. better should be zero */
+static int rfdadd = 0; /* rfdadd=1 may be better for 8K MEM cards */
+static int fifo=0x8; /* don't change */
+
+/* #define REALLY_SLOW_IO */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include "ni52.h"
+
+#define DEBUG /* debug on */
+#define SYSBUSVAL 1 /* 8 Bit */
+
+#define ni_attn586() {outb(0,dev->base_addr+NI52_ATTENTION);}
+#define ni_reset586() {outb(0,dev->base_addr+NI52_RESET);}
+#define ni_disint() {outb(0,dev->base_addr+NI52_INTDIS);}
+#define ni_enaint() {outb(0,dev->base_addr+NI52_INTENA);}
+
+#define make32(ptr16) (p->memtop + (short) (ptr16) )
+#define make24(ptr32) ((char *) (ptr32) - p->base)
+#define make16(ptr32) ((unsigned short) ((unsigned long) (ptr32) - (unsigned long) p->memtop ))
+
+/******************* how to calculate the buffers *****************************
+
+ * IMPORTANT NOTE: if you configure only one NUM_XMIT_BUFFS, the driver works
+ * --------------- in a different (more stable?) mode. Only in this mode it's
+ * possible to configure the driver with 'NO_NOPCOMMANDS'
+
+sizeof(scp)=12; sizeof(scb)=16; sizeof(iscp)=8;
+sizeof(scp)+sizeof(iscp)+sizeof(scb) = 36 = INIT
+sizeof(rfd) = 24; sizeof(rbd) = 12;
+sizeof(tbd) = 8; sizeof(transmit_cmd) = 16;
+sizeof(nop_cmd) = 8;
+
+ * if you don't know the driver, better do not change these values: */
+
+#define RECV_BUFF_SIZE 1524 /* slightly oversized */
+#define XMIT_BUFF_SIZE 1524 /* slightly oversized */
+#define NUM_XMIT_BUFFS 1 /* config for both, 8K and 16K shmem */
+#define NUM_RECV_BUFFS_8 4 /* config for 8K shared mem */
+#define NUM_RECV_BUFFS_16 9 /* config for 16K shared mem */
+#define NO_NOPCOMMANDS /* only possible with NUM_XMIT_BUFFS=1 */
+
+/**************************************************************************/
+
+/* different DELAYs */
+#define DELAY(x) __delay((loops_per_sec>>5)*(x));
+#define DELAY_16(); { __delay( (loops_per_sec>>16)+1 ); }
+#define DELAY_18(); { __delay( (loops_per_sec>>18)+1 ); }
+
+/* wait for command with timeout: */
+#define WAIT_4_SCB_CMD() { int i; \
+ for(i=0;i<16384;i++) { \
+ if(!p->scb->cmd_cuc) break; \
+ DELAY_18(); \
+ if(i == 16383) { \
+ printk("%s: scb_cmd timed out: %04x,%04x .. disabling i82586!!\n",dev->name,p->scb->cmd_cuc,p->scb->cus); \
+ if(!p->reseted) { p->reseted = 1; ni_reset586(); } } } }
+
+#define WAIT_4_SCB_CMD_RUC() { int i; \
+ for(i=0;i<16384;i++) { \
+ if(!p->scb->cmd_ruc) break; \
+ DELAY_18(); \
+ if(i == 16383) { \
+ printk("%s: scb_cmd (ruc) timed out: %04x,%04x .. disabling i82586!!\n",dev->name,p->scb->cmd_ruc,p->scb->rus); \
+ if(!p->reseted) { p->reseted = 1; ni_reset586(); } } } }
+
+#define WAIT_4_STAT_COMPL(addr) { int i; \
+ for(i=0;i<32767;i++) { \
+ if((addr)->cmd_status & STAT_COMPL) break; \
+ DELAY_16(); DELAY_16(); } }
+
+#define NI52_TOTAL_SIZE 16
+#define NI52_ADDR0 0x02
+#define NI52_ADDR1 0x07
+#define NI52_ADDR2 0x01
+
+static int ni52_probe1(struct device *dev,int ioaddr);
+static void ni52_interrupt(int irq,void *dev_id,struct pt_regs *reg_ptr);
+static int ni52_open(struct device *dev);
+static int ni52_close(struct device *dev);
+static int ni52_send_packet(struct sk_buff *,struct device *);
+static struct enet_statistics *ni52_get_stats(struct device *dev);
+static void set_multicast_list(struct device *dev);
+#if 0
+static void ni52_dump(struct device *,void *);
+#endif
+
+/* helper-functions */
+static int init586(struct device *dev);
+static int check586(struct device *dev,char *where,unsigned size);
+static void alloc586(struct device *dev);
+static void startrecv586(struct device *dev);
+static void *alloc_rfa(struct device *dev,void *ptr);
+static void ni52_rcv_int(struct device *dev);
+static void ni52_xmt_int(struct device *dev);
+static void ni52_rnr_int(struct device *dev);
+
+struct priv
+{
+ struct enet_statistics stats;
+ unsigned long base;
+ char *memtop;
+ int lock,reseted;
+ volatile struct rfd_struct *rfd_last,*rfd_top,*rfd_first;
+ volatile struct scp_struct *scp; /* volatile is important */
+ volatile struct iscp_struct *iscp; /* volatile is important */
+ volatile struct scb_struct *scb; /* volatile is important */
+ volatile struct tbd_struct *xmit_buffs[NUM_XMIT_BUFFS];
+ volatile struct transmit_cmd_struct *xmit_cmds[NUM_XMIT_BUFFS];
+#if (NUM_XMIT_BUFFS == 1)
+ volatile struct nop_cmd_struct *nop_cmds[2];
+#else
+ volatile struct nop_cmd_struct *nop_cmds[NUM_XMIT_BUFFS];
+#endif
+ volatile int nop_point,num_recv_buffs;
+ volatile char *xmit_cbuffs[NUM_XMIT_BUFFS];
+ volatile int xmit_count,xmit_last;
+};
+
+/**********************************************
+ * close device
+ */
+static int ni52_close(struct device *dev)
+{
+ free_irq(dev->irq, NULL);
+ irq2dev_map[dev->irq] = NULL;
+
+ ni_reset586(); /* the hard way to stop the receiver */
+
+ dev->start = 0;
+ dev->tbusy = 0;
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+/**********************************************
+ * open device
+ */
+static int ni52_open(struct device *dev)
+{
+ ni_disint();
+ alloc586(dev);
+ init586(dev);
+ startrecv586(dev);
+ ni_enaint();
+
+ if(request_irq(dev->irq, &ni52_interrupt,0,"ni5210",NULL))
+ {
+ ni_reset586();
+ return -EAGAIN;
+ }
+ irq2dev_map[dev->irq] = dev;
+
+ dev->interrupt = 0;
+ dev->tbusy = 0;
+ dev->start = 1;
+
+ MOD_INC_USE_COUNT;
+
+ return 0; /* most done by init */
+}
+
+/**********************************************
+ * Check to see if there's an 82586 out there.
+ */
+static int check586(struct device *dev,char *where,unsigned size)
+{
+ struct priv pb;
+ struct priv *p = /* (struct priv *) dev->priv*/ &pb;
+ char *iscp_addrs[2];
+ int i;
+
+ p->base = (unsigned long) where + size - 0x01000000;
+ p->memtop = where + size;
+ p->scp = (struct scp_struct *)(p->base + SCP_DEFAULT_ADDRESS);
+ memset((char *)p->scp,0, sizeof(struct scp_struct));
+ for(i=0;i<sizeof(struct scp_struct);i++) /* memory was writeable? */
+ if(((char *)p->scp)[i])
+ return 0;
+ p->scp->sysbus = SYSBUSVAL; /* 1 = 8Bit-Bus, 0 = 16 Bit */
+ if(p->scp->sysbus != SYSBUSVAL)
+ return 0;
+
+ iscp_addrs[0] = where;
+ iscp_addrs[1]= (char *) p->scp - sizeof(struct iscp_struct);
+
+ for(i=0;i<2;i++)
+ {
+ p->iscp = (struct iscp_struct *) iscp_addrs[i];
+ memset((char *)p->iscp,0, sizeof(struct iscp_struct));
+
+ p->scp->iscp = make24(p->iscp);
+ p->iscp->busy = 1;
+
+ ni_reset586();
+ ni_attn586();
+ DELAY(1); /* wait a while... */
+
+ if(p->iscp->busy) /* i82586 clears 'busy' after successful init */
+ return 0;
+ }
+ return 1;
+}
+
+/******************************************************************
+ * set iscp at the right place, called by ni52_probe1 and open586.
+ */
+void alloc586(struct device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+
+ ni_reset586();
+ DELAY(1);
+
+ p->scp = (struct scp_struct *) (p->base + SCP_DEFAULT_ADDRESS);
+ p->scb = (struct scb_struct *) (dev->mem_start);
+ p->iscp = (struct iscp_struct *) ((char *)p->scp - sizeof(struct iscp_struct));
+
+ memset((char *) p->iscp,0,sizeof(struct iscp_struct));
+ memset((char *) p->scp ,0,sizeof(struct scp_struct));
+
+ p->scp->iscp = make24(p->iscp);
+ p->scp->sysbus = SYSBUSVAL;
+ p->iscp->scb_offset = make16(p->scb);
+
+ p->iscp->busy = 1;
+ ni_reset586();
+ ni_attn586();
+
+ DELAY(1);
+
+ if(p->iscp->busy)
+ printk("%s: Init-Problems (alloc).\n",dev->name);
+
+ p->reseted = 0;
+
+ memset((char *)p->scb,0,sizeof(struct scb_struct));
+}
+
+/**********************************************
+ * probe the ni5210-card
+ */
+int ni52_probe(struct device *dev)
+{
+#ifndef MODULE
+ int *port;
+ static int ports[] = {0x300, 0x280, 0x360 , 0x320 , 0x340, 0};
+#endif
+ int base_addr = dev->base_addr;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ if( (inb(base_addr+NI52_MAGIC1) == NI52_MAGICVAL1) &&
+ (inb(base_addr+NI52_MAGIC2) == NI52_MAGICVAL2))
+ return ni52_probe1(dev, base_addr);
+ else if (base_addr > 0) /* Don't probe at all. */
+ return ENXIO;
+
+#ifdef MODULE
+ printk("%s: no autoprobing allowed for modules.\n",dev->name);
+#else
+ for (port = ports; *port; port++) {
+ int ioaddr = *port;
+ if (check_region(ioaddr, NI52_TOTAL_SIZE))
+ continue;
+ if( !(inb(ioaddr+NI52_MAGIC1) == NI52_MAGICVAL1) ||
+ !(inb(ioaddr+NI52_MAGIC2) == NI52_MAGICVAL2))
+ continue;
+
+ dev->base_addr = ioaddr;
+ if (ni52_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+#ifdef FULL_IO_PROBE
+ for(dev->base_addr=0x200;dev->base_addr<0x400;dev->base_addr+=8)
+ {
+ int ioaddr = dev->base_addr;
+ if (check_region(ioaddr, NI52_TOTAL_SIZE))
+ continue;
+ if( !(inb(ioaddr+NI52_MAGIC1) == NI52_MAGICVAL1) ||
+ !(inb(ioaddr+NI52_MAGIC2) == NI52_MAGICVAL2))
+ continue;
+ if (ni52_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+#endif
+
+#endif
+
+ dev->base_addr = base_addr;
+ return ENODEV;
+}
+
+static int ni52_probe1(struct device *dev,int ioaddr)
+{
+ int i,size;
+
+ for(i=0;i<ETH_ALEN;i++)
+ dev->dev_addr[i] = inb(dev->base_addr+i);
+
+ if(dev->dev_addr[0] != NI52_ADDR0 || dev->dev_addr[1] != NI52_ADDR1
+ || dev->dev_addr[2] != NI52_ADDR2)
+ return ENODEV;
+
+ printk("%s: NI5210 found at %#3lx, ",dev->name,dev->base_addr);
+
+ request_region(ioaddr,NI52_TOTAL_SIZE,"ni5210");
+
+ /*
+ * check (or search) IO-Memory, 8K and 16K
+ */
+#ifdef MODULE
+ size = dev->mem_end - dev->mem_start;
+ if(size != 0x2000 && size != 0x4000)
+ {
+ printk("\n%s: Illegal memory size %d. Allowed is 0x2000 or 0x4000 bytes.\n",dev->name,size);
+ return ENODEV;
+ }
+ if(!check586(dev,(char *) dev->mem_start,size))
+ {
+ printk("?memcheck, Can't find memory at 0x%lx with size %d!\n",dev->mem_start,size);
+ return ENODEV;
+ }
+#else
+ if(dev->mem_start != 0) /* no auto-mem-probe */
+ {
+ size = 0x4000; /* check for 16K mem */
+ if(!check586(dev,(char *) dev->mem_start,size)) {
+ size = 0x2000; /* check for 8K mem */
+ if(!check586(dev,(char *) dev->mem_start,size)) {
+ printk("?memprobe, Can't find memory at 0x%lx!\n",dev->mem_start);
+ return ENODEV;
+ }
+ }
+ }
+ else
+ {
+ static long memaddrs[] = { 0xc8000,0xca000,0xcc000,0xce000,0xd0000,0xd2000,
+ 0xd4000,0xd6000,0xd8000,0xda000,0xdc000, 0 };
+ for(i=0;;i++)
+ {
+ if(!memaddrs[i]) {
+ printk("?memprobe, Can't find io-memory!\n");
+ return ENODEV;
+ }
+ dev->mem_start = memaddrs[i];
+ size = 0x2000; /* check for 8K mem */
+ if(check586(dev,(char *)dev->mem_start,size)) /* 8K-check */
+ break;
+ size = 0x4000; /* check for 16K mem */
+ if(check586(dev,(char *)dev->mem_start,size)) /* 16K-check */
+ break;
+ }
+ }
+ dev->mem_end = dev->mem_start + size; /* set mem_end showed by 'ifconfig' */
+#endif
+
+ dev->priv = (void *) kmalloc(sizeof(struct priv),GFP_KERNEL);
+ if(dev->priv == NULL)
+ {
+ printk("%s: Ooops .. can't allocate private driver memory.\n",dev->name);
+ return -ENOMEM;
+ }
+ /* warning: we don't free it on errors */
+ memset((char *) dev->priv,0,sizeof(struct priv));
+
+ ((struct priv *) (dev->priv))->memtop = (char *) dev->mem_start + size;
+ ((struct priv *) (dev->priv))->base = dev->mem_start + size - 0x01000000;
+ alloc586(dev);
+
+ /* set number of receive-buffs according to memsize */
+ if(size == 0x2000)
+ ((struct priv *) dev->priv)->num_recv_buffs = NUM_RECV_BUFFS_8;
+ else
+ ((struct priv *) dev->priv)->num_recv_buffs = NUM_RECV_BUFFS_16;
+
+ printk("Memaddr: 0x%lx, Memsize: %d, ",dev->mem_start,size);
+
+ if(dev->irq < 2)
+ {
+ autoirq_setup(0);
+ ni_reset586();
+ ni_attn586();
+ if(!(dev->irq = autoirq_report(2)))
+ {
+ printk("?autoirq, Failed to detect IRQ line!\n");
+ return 1;
+ }
+ printk("IRQ %d (autodetected).\n",dev->irq);
+ }
+ else {
+ if(dev->irq == 2)
+ dev->irq = 9;
+ printk("IRQ %d (assigned and not checked!).\n",dev->irq);
+ }
+
+ dev->open = &ni52_open;
+ dev->stop = &ni52_close;
+ dev->get_stats = &ni52_get_stats;
+ dev->hard_start_xmit = &ni52_send_packet;
+ dev->set_multicast_list = &set_multicast_list;
+
+ dev->if_port = 0;
+
+ ether_setup(dev);
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 0;
+
+ return 0;
+}
+
+/**********************************************
+ * init the chip (ni52-interrupt should be disabled?!)
+ * needs a correct 'allocated' memory
+ */
+
+static int init586(struct device *dev)
+{
+ void *ptr;
+ int i,result=0;
+ struct priv *p = (struct priv *) dev->priv;
+ volatile struct configure_cmd_struct *cfg_cmd;
+ volatile struct iasetup_cmd_struct *ias_cmd;
+ volatile struct tdr_cmd_struct *tdr_cmd;
+ volatile struct mcsetup_cmd_struct *mc_cmd;
+ struct dev_mc_list *dmi=dev->mc_list;
+ int num_addrs=dev->mc_count;
+
+ ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct));
+
+ cfg_cmd = (struct configure_cmd_struct *)ptr; /* configure-command */
+ cfg_cmd->cmd_status = 0;
+ cfg_cmd->cmd_cmd = CMD_CONFIGURE | CMD_LAST;
+ cfg_cmd->cmd_link = 0xffff;
+
+ cfg_cmd->byte_cnt = 0x0a; /* number of cfg bytes */
+ cfg_cmd->fifo = fifo; /* fifo-limit (8=tx:32/rx:64) */
+ cfg_cmd->sav_bf = 0x40; /* hold or discard bad recv frames (bit 7) */
+ cfg_cmd->adr_len = 0x2e; /* addr_len |!src_insert |pre-len |loopback */
+ cfg_cmd->priority = 0x00;
+ cfg_cmd->ifs = 0x60;
+ cfg_cmd->time_low = 0x00;
+ cfg_cmd->time_high = 0xf2;
+ cfg_cmd->promisc = 0;
+ if(dev->flags & IFF_ALLMULTI) {
+ int len = ((char *) p->iscp - (char *) ptr - 8) / 6;
+ if(num_addrs > len) {
+ printk("%s: switching to promisc. mode\n",dev->name);
+ dev->flags|=IFF_PROMISC;
+ }
+ }
+ if(dev->flags&IFF_PROMISC)
+ {
+ cfg_cmd->promisc=1;
+ dev->flags|=IFF_PROMISC;
+ }
+ cfg_cmd->carr_coll = 0x00;
+
+ p->scb->cbl_offset = make16(cfg_cmd);
+ p->scb->cmd_ruc = 0;
+
+ p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */
+ ni_attn586();
+
+ WAIT_4_STAT_COMPL(cfg_cmd);
+
+ if((cfg_cmd->cmd_status & (STAT_OK|STAT_COMPL)) != (STAT_COMPL|STAT_OK))
+ {
+ printk("%s: configure command failed: %x\n",dev->name,cfg_cmd->cmd_status);
+ return 1;
+ }
+
+ /*
+ * individual address setup
+ */
+ ias_cmd = (struct iasetup_cmd_struct *)ptr;
+
+ ias_cmd->cmd_status = 0;
+ ias_cmd->cmd_cmd = CMD_IASETUP | CMD_LAST;
+ ias_cmd->cmd_link = 0xffff;
+
+ memcpy((char *)&ias_cmd->iaddr,(char *) dev->dev_addr,ETH_ALEN);
+
+ p->scb->cbl_offset = make16(ias_cmd);
+
+ p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */
+ ni_attn586();
+
+ WAIT_4_STAT_COMPL(ias_cmd);
+
+ if((ias_cmd->cmd_status & (STAT_OK|STAT_COMPL)) != (STAT_OK|STAT_COMPL)) {
+ printk("%s (ni52): individual address setup command failed: %04x\n",dev->name,ias_cmd->cmd_status);
+ return 1;
+ }
+
+ /*
+ * TDR, wire check .. e.g. no resistor e.t.c
+ */
+ tdr_cmd = (struct tdr_cmd_struct *)ptr;
+
+ tdr_cmd->cmd_status = 0;
+ tdr_cmd->cmd_cmd = CMD_TDR | CMD_LAST;
+ tdr_cmd->cmd_link = 0xffff;
+ tdr_cmd->status = 0;
+
+ p->scb->cbl_offset = make16(tdr_cmd);
+ p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */
+ ni_attn586();
+
+ WAIT_4_STAT_COMPL(tdr_cmd);
+
+ if(!(tdr_cmd->cmd_status & STAT_COMPL))
+ {
+ printk("%s: Problems while running the TDR.\n",dev->name);
+ }
+ else
+ {
+ DELAY_16(); /* wait for result */
+ result = tdr_cmd->status;
+
+ p->scb->cmd_cuc = p->scb->cus & STAT_MASK;
+ ni_attn586(); /* ack the interrupts */
+
+ if(result & TDR_LNK_OK)
+ ;
+ else if(result & TDR_XCVR_PRB)
+ printk("%s: TDR: Transceiver problem. Check the cable(s)!\n",dev->name);
+ else if(result & TDR_ET_OPN)
+ printk("%s: TDR: No correct termination %d clocks away.\n",dev->name,result & TDR_TIMEMASK);
+ else if(result & TDR_ET_SRT)
+ {
+ if (result & TDR_TIMEMASK) /* time == 0 -> strange :-) */
+ printk("%s: TDR: Detected a short circuit %d clocks away.\n",dev->name,result & TDR_TIMEMASK);
+ }
+ else
+ printk("%s: TDR: Unknown status %04x\n",dev->name,result);
+ }
+
+ /*
+ * Multicast setup
+ */
+ if(num_addrs && !(dev->flags & IFF_PROMISC) )
+ {
+ mc_cmd = (struct mcsetup_cmd_struct *) ptr;
+ mc_cmd->cmd_status = 0;
+ mc_cmd->cmd_cmd = CMD_MCSETUP | CMD_LAST;
+ mc_cmd->cmd_link = 0xffff;
+ mc_cmd->mc_cnt = num_addrs * 6;
+
+ for(i=0;i<num_addrs;i++,dmi=dmi->next)
+ memcpy((char *) mc_cmd->mc_list[i], dmi->dmi_addr,6);
+
+ p->scb->cbl_offset = make16(mc_cmd);
+ p->scb->cmd_cuc = CUC_START;
+ ni_attn586();
+
+ WAIT_4_STAT_COMPL(mc_cmd);
+
+ if( (mc_cmd->cmd_status & (STAT_COMPL|STAT_OK)) != (STAT_COMPL|STAT_OK) )
+ printk("%s: Can't apply multicast-address-list.\n",dev->name);
+ }
+
+ /*
+ * alloc nop/xmit-cmds
+ */
+#if (NUM_XMIT_BUFFS == 1)
+ for(i=0;i<2;i++)
+ {
+ p->nop_cmds[i] = (struct nop_cmd_struct *)ptr;
+ p->nop_cmds[i]->cmd_cmd = CMD_NOP;
+ p->nop_cmds[i]->cmd_status = 0;
+ p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i]));
+ ptr = (char *) ptr + sizeof(struct nop_cmd_struct);
+ }
+#else
+ for(i=0;i<NUM_XMIT_BUFFS;i++)
+ {
+ p->nop_cmds[i] = (struct nop_cmd_struct *)ptr;
+ p->nop_cmds[i]->cmd_cmd = CMD_NOP;
+ p->nop_cmds[i]->cmd_status = 0;
+ p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i]));
+ ptr = (char *) ptr + sizeof(struct nop_cmd_struct);
+ }
+#endif
+
+ ptr = alloc_rfa(dev,(void *)ptr); /* init receive-frame-area */
+
+ /*
+ * alloc xmit-buffs / init xmit_cmds
+ */
+ for(i=0;i<NUM_XMIT_BUFFS;i++)
+ {
+ p->xmit_cmds[i] = (struct transmit_cmd_struct *)ptr; /*transmit cmd/buff 0*/
+ ptr = (char *) ptr + sizeof(struct transmit_cmd_struct);
+ p->xmit_cbuffs[i] = (char *)ptr; /* char-buffs */
+ ptr = (char *) ptr + XMIT_BUFF_SIZE;
+ p->xmit_buffs[i] = (struct tbd_struct *)ptr; /* TBD */
+ ptr = (char *) ptr + sizeof(struct tbd_struct);
+ if((void *)ptr > (void *)p->iscp)
+ {
+ printk("%s: not enough shared-mem for your configuration!\n",dev->name);
+ return 1;
+ }
+ memset((char *)(p->xmit_cmds[i]) ,0, sizeof(struct transmit_cmd_struct));
+ memset((char *)(p->xmit_buffs[i]),0, sizeof(struct tbd_struct));
+ p->xmit_cmds[i]->cmd_link = make16(p->nop_cmds[(i+1)%NUM_XMIT_BUFFS]);
+ p->xmit_cmds[i]->cmd_status = STAT_COMPL;
+ p->xmit_cmds[i]->cmd_cmd = CMD_XMIT | CMD_INT;
+ p->xmit_cmds[i]->tbd_offset = make16((p->xmit_buffs[i]));
+ p->xmit_buffs[i]->next = 0xffff;
+ p->xmit_buffs[i]->buffer = make24((p->xmit_cbuffs[i]));
+ }
+
+ p->xmit_count = 0;
+ p->xmit_last = 0;
+#ifndef NO_NOPCOMMANDS
+ p->nop_point = 0;
+#endif
+
+ /*
+ * 'start transmitter'
+ */
+#ifndef NO_NOPCOMMANDS
+ p->scb->cbl_offset = make16(p->nop_cmds[0]);
+ p->scb->cmd_cuc = CUC_START;
+ ni_attn586();
+ WAIT_4_SCB_CMD();
+#else
+ p->xmit_cmds[0]->cmd_link = make16(p->xmit_cmds[0]);
+ p->xmit_cmds[0]->cmd_cmd = CMD_XMIT | CMD_SUSPEND | CMD_INT;
+#endif
+
+ /*
+ * ack. interrupts
+ */
+ p->scb->cmd_cuc = p->scb->cus & STAT_MASK;
+ ni_attn586();
+ DELAY_16();
+
+ ni_enaint();
+
+ return 0;
+}
+
+/******************************************************
+ * This is a helper routine for ni52_rnr_int() and init586().
+ * It sets up the Receive Frame Area (RFA).
+ */
+
+static void *alloc_rfa(struct device *dev,void *ptr)
+{
+ volatile struct rfd_struct *rfd = (struct rfd_struct *)ptr;
+ volatile struct rbd_struct *rbd;
+ int i;
+ struct priv *p = (struct priv *) dev->priv;
+
+ memset((char *) rfd,0,sizeof(struct rfd_struct)*(p->num_recv_buffs+rfdadd));
+ p->rfd_first = rfd;
+
+ for(i = 0; i < (p->num_recv_buffs+rfdadd); i++) {
+ rfd[i].next = make16(rfd + (i+1) % (p->num_recv_buffs+rfdadd) );
+ rfd[i].rbd_offset = 0xffff;
+ }
+ rfd[p->num_recv_buffs-1+rfdadd].last = RFD_SUSP; /* RU suspend */
+
+ ptr = (void *) (rfd + (p->num_recv_buffs + rfdadd) );
+
+ rbd = (struct rbd_struct *) ptr;
+ ptr = (void *) (rbd + p->num_recv_buffs);
+
+ /* clr descriptors */
+ memset((char *) rbd,0,sizeof(struct rbd_struct)*(p->num_recv_buffs));
+
+ for(i=0;i<p->num_recv_buffs;i++)
+ {
+ rbd[i].next = make16((rbd + (i+1) % p->num_recv_buffs));
+ rbd[i].size = RECV_BUFF_SIZE;
+ rbd[i].buffer = make24(ptr);
+ ptr = (char *) ptr + RECV_BUFF_SIZE;
+ }
+
+ p->rfd_top = p->rfd_first;
+ p->rfd_last = p->rfd_first + (p->num_recv_buffs - 1 + rfdadd);
+
+ p->scb->rfa_offset = make16(p->rfd_first);
+ p->rfd_first->rbd_offset = make16(rbd);
+
+ return ptr;
+}
+
+
+/**************************************************
+ * Interrupt Handler ...
+ */
+
+static void ni52_interrupt(int irq,void *dev_id,struct pt_regs *reg_ptr)
+{
+ struct device *dev = (struct device *) irq2dev_map[irq];
+ unsigned short stat;
+ int cnt=0;
+ struct priv *p;
+
+ if (!dev) {
+ printk ("ni5210-interrupt: irq %d for unknown device.\n",irq);
+ return;
+ }
+ p = (struct priv *) dev->priv;
+
+ if(debuglevel > 1)
+ printk("I");
+
+ dev->interrupt = 1;
+
+ WAIT_4_SCB_CMD(); /* wait for last command */
+
+ while((stat=p->scb->cus & STAT_MASK))
+ {
+ p->scb->cmd_cuc = stat;
+ ni_attn586();
+
+ if(stat & STAT_FR) /* received a frame */
+ ni52_rcv_int(dev);
+
+ if(stat & STAT_RNR) /* RU went 'not ready' */
+ {
+ printk("(R)");
+ if(p->scb->rus & RU_SUSPEND) /* special case: RU_SUSPEND */
+ {
+ WAIT_4_SCB_CMD();
+ p->scb->cmd_ruc = RUC_RESUME;
+ ni_attn586();
+ WAIT_4_SCB_CMD_RUC();
+ }
+ else
+ {
+ printk("%s: Receiver-Unit went 'NOT READY': %04x/%02x.\n",dev->name,(int) stat,(int) p->scb->rus);
+ ni52_rnr_int(dev);
+ }
+ }
+
+ if(stat & STAT_CX) /* command with I-bit set complete */
+ ni52_xmt_int(dev);
+
+#ifndef NO_NOPCOMMANDS
+ if(stat & STAT_CNA) /* CU went 'not ready' */
+ {
+ if(dev->start)
+ printk("%s: oops! CU has left active state. stat: %04x/%02x.\n",dev->name,(int) stat,(int) p->scb->cus);
+ }
+#endif
+
+ if(debuglevel > 1)
+ printk("%d",cnt++);
+
+ WAIT_4_SCB_CMD(); /* wait for ack. (ni52_xmt_int can be faster than ack!!) */
+ if(p->scb->cmd_cuc) /* timed out? */
+ {
+ printk("%s: Acknowledge timed out.\n",dev->name);
+ ni_disint();
+ break;
+ }
+ }
+
+ if(debuglevel > 1)
+ printk("i");
+
+ dev->interrupt = 0;
+}
+
+/*******************************************************
+ * receive-interrupt
+ */
+
+static void ni52_rcv_int(struct device *dev)
+{
+ int status,cnt=0;
+ unsigned short totlen;
+ struct sk_buff *skb;
+ struct rbd_struct *rbd;
+ struct priv *p = (struct priv *) dev->priv;
+
+ if(debuglevel > 0)
+ printk("R");
+
+ for(;(status = p->rfd_top->stat_high) & RFD_COMPL;)
+ {
+ rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset);
+
+ if(status & RFD_OK) /* frame received without error? */
+ {
+ if( (totlen = rbd->status) & RBD_LAST) /* the first and the last buffer? */
+ {
+ totlen &= RBD_MASK; /* length of this frame */
+ rbd->status = 0;
+ skb = (struct sk_buff *) dev_alloc_skb(totlen+2);
+ if(skb != NULL)
+ {
+ skb->dev = dev;
+ skb_reserve(skb,2);
+ skb_put(skb,totlen);
+ eth_copy_and_sum(skb,(char *) p->base+(unsigned long) rbd->buffer,totlen,0);
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ p->stats.rx_packets++;
+ }
+ else
+ p->stats.rx_dropped++;
+ }
+ else
+ {
+ int rstat;
+ /* free all RBD's until RBD_LAST is set */
+ totlen = 0;
+ while(!((rstat=rbd->status) & RBD_LAST))
+ {
+ totlen += rstat & RBD_MASK;
+ if(!rstat)
+ {
+ printk("%s: Whoops .. no end mark in RBD list\n",dev->name);
+ break;
+ }
+ rbd->status = 0;
+ rbd = (struct rbd_struct *) make32(rbd->next);
+ }
+ totlen += rstat & RBD_MASK;
+ rbd->status = 0;
+ printk("%s: received oversized frame! length: %d\n",dev->name,totlen);
+ p->stats.rx_dropped++;
+ }
+ }
+ else /* frame !(ok), only with 'save-bad-frames' */
+ {
+ printk("%s: oops! rfd-error-status: %04x\n",dev->name,status);
+ p->stats.rx_errors++;
+ }
+ p->rfd_top->stat_high = 0;
+ p->rfd_top->last = RFD_SUSP; /* maybe exchange by RFD_LAST */
+ p->rfd_top->rbd_offset = 0xffff;
+ p->rfd_last->last = 0; /* delete RFD_SUSP */
+ p->rfd_last = p->rfd_top;
+ p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next); /* step to next RFD */
+ p->scb->rfa_offset = make16(p->rfd_top);
+
+ if(debuglevel > 0)
+ printk("%d",cnt++);
+ }
+
+ if(automatic_resume)
+ {
+ WAIT_4_SCB_CMD();
+ p->scb->cmd_ruc = RUC_RESUME;
+ ni_attn586();
+ WAIT_4_SCB_CMD_RUC();
+ }
+
+#ifdef WAIT_4_BUSY
+ {
+ int i;
+ for(i=0;i<1024;i++)
+ {
+ if(p->rfd_top->status)
+ break;
+ DELAY_16();
+ if(i == 1023)
+ printk("%s: RU hasn't fetched next RFD (not busy/complete)\n",dev->name);
+ }
+ }
+#endif
+
+#if 0
+ if(!at_least_one)
+ {
+ int i;
+ volatile struct rfd_struct *rfds=p->rfd_top;
+ volatile struct rbd_struct *rbds;
+ printk("%s: received a FC intr. without having a frame: %04x %d\n",dev->name,status,old_at_least);
+ for(i=0;i< (p->num_recv_buffs+4);i++)
+ {
+ rbds = (struct rbd_struct *) make32(rfds->rbd_offset);
+ printk("%04x:%04x ",rfds->status,rbds->status);
+ rfds = (struct rfd_struct *) make32(rfds->next);
+ }
+ printk("\nerrs: %04x %04x stat: %04x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->status);
+ printk("\nerrs: %04x %04x rus: %02x, cus: %02x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->rus,(int)p->scb->cus);
+ }
+ old_at_least = at_least_one;
+#endif
+
+ if(debuglevel > 0)
+ printk("r");
+}
+
+/**********************************************************
+ * handle 'Receiver went not ready'.
+ */
+
+static void ni52_rnr_int(struct device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+
+ p->stats.rx_errors++;
+
+ WAIT_4_SCB_CMD(); /* wait for the last cmd, WAIT_4_FULLSTAT?? */
+ p->scb->cmd_ruc = RUC_ABORT; /* usually the RU is in the 'no resource'-state .. abort it now. */
+ ni_attn586();
+ WAIT_4_SCB_CMD_RUC(); /* wait for accept cmd. */
+
+ alloc_rfa(dev,(char *)p->rfd_first);
+/* maybe add a check here, before restarting the RU */
+ startrecv586(dev); /* restart RU */
+
+ printk("%s: Receive-Unit restarted. Status: %04x\n",dev->name,p->scb->rus);
+
+}
+
+/**********************************************************
+ * handle xmit - interrupt
+ */
+
+static void ni52_xmt_int(struct device *dev)
+{
+ int status;
+ struct priv *p = (struct priv *) dev->priv;
+
+ if(debuglevel > 0)
+ printk("X");
+
+ status = p->xmit_cmds[p->xmit_last]->cmd_status;
+ if(!(status & STAT_COMPL))
+ printk("%s: strange .. xmit-int without a 'COMPLETE'\n",dev->name);
+
+ if(status & STAT_OK)
+ {
+ p->stats.tx_packets++;
+ p->stats.collisions += (status & TCMD_MAXCOLLMASK);
+ }
+ else
+ {
+ p->stats.tx_errors++;
+ if(status & TCMD_LATECOLL) {
+ printk("%s: late collision detected.\n",dev->name);
+ p->stats.collisions++;
+ }
+ else if(status & TCMD_NOCARRIER) {
+ p->stats.tx_carrier_errors++;
+ printk("%s: no carrier detected.\n",dev->name);
+ }
+ else if(status & TCMD_LOSTCTS)
+ printk("%s: loss of CTS detected.\n",dev->name);
+ else if(status & TCMD_UNDERRUN) {
+ p->stats.tx_fifo_errors++;
+ printk("%s: DMA underrun detected.\n",dev->name);
+ }
+ else if(status & TCMD_MAXCOLL) {
+ printk("%s: Max. collisions exceeded.\n",dev->name);
+ p->stats.collisions += 16;
+ }
+ }
+
+#if (NUM_XMIT_BUFFS > 1)
+ if( (++p->xmit_last) == NUM_XMIT_BUFFS)
+ p->xmit_last = 0;
+#endif
+
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+}
+
+/***********************************************************
+ * (re)start the receiver
+ */
+
+static void startrecv586(struct device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+
+ WAIT_4_SCB_CMD();
+ WAIT_4_SCB_CMD_RUC();
+ p->scb->rfa_offset = make16(p->rfd_first);
+ p->scb->cmd_ruc = RUC_START;
+ ni_attn586(); /* start cmd. */
+ WAIT_4_SCB_CMD_RUC(); /* wait for accept cmd. (no timeout!!) */
+}
+
+/******************************************************
+ * send frame
+ */
+
+static int ni52_send_packet(struct sk_buff *skb, struct device *dev)
+{
+ int len,i;
+#ifndef NO_NOPCOMMANDS
+ int next_nop;
+#endif
+ struct priv *p = (struct priv *) dev->priv;
+
+ if(dev->tbusy)
+ {
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 5)
+ return 1;
+
+#ifndef NO_NOPCOMMANDS
+ if(p->scb->cus & CU_ACTIVE) /* COMMAND-UNIT active? */
+ {
+ dev->tbusy = 0;
+#ifdef DEBUG
+ printk("%s: strange ... timeout with CU active?!?\n",dev->name);
+ printk("%s: X0: %04x N0: %04x N1: %04x %d\n",dev->name,(int)p->xmit_cmds[0]->cmd_status,(int)p->nop_cmds[0]->cmd_status,(int)p->nop_cmds[1]->cmd_status,(int)p->nop_point);
+#endif
+ p->scb->cmd_cuc = CUC_ABORT;
+ ni_attn586();
+ WAIT_4_SCB_CMD();
+ p->scb->cbl_offset = make16(p->nop_cmds[p->nop_point]);
+ p->scb->cmd_cuc = CUC_START;
+ ni_attn586();
+ WAIT_4_SCB_CMD();
+ dev->trans_start = jiffies;
+ dev_kfree_skb(skb, FREE_WRITE);
+ return 0;
+ }
+ else
+#endif
+ {
+#ifdef DEBUG
+ printk("%s: xmitter timed out, try to restart! stat: %02x\n",dev->name,p->scb->cus);
+ printk("%s: command-stats: %04x %04x\n",dev->name,p->xmit_cmds[0]->cmd_status,p->xmit_cmds[1]->cmd_status);
+ printk("%s: check, whether you set the right interrupt number!\n",dev->name);
+#endif
+ ni52_close(dev);
+ ni52_open(dev);
+ }
+ dev->trans_start = jiffies;
+ return 0;
+ }
+
+ if(skb == NULL)
+ {
+ dev_tint(dev);
+ return 0;
+ }
+
+ if (skb->len <= 0)
+ return 0;
+ if(skb->len > XMIT_BUFF_SIZE)
+ {
+ printk("%s: Sorry, max. framelength is %d bytes. The length of your frame is %ld bytes.\n",dev->name,XMIT_BUFF_SIZE,skb->len);
+ return 0;
+ }
+
+ if (set_bit(0, (void*)&dev->tbusy)) {
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ return 1;
+ }
+#if(NUM_XMIT_BUFFS > 1)
+ else if(set_bit(0,(void *) &p->lock)) {
+ printk("%s: Queue was locked\n",dev->name);
+ return 1;
+ }
+#endif
+ else
+ {
+ memcpy((char *)p->xmit_cbuffs[p->xmit_count],(char *)(skb->data),skb->len);
+ len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
+
+#if (NUM_XMIT_BUFFS == 1)
+# ifdef NO_NOPCOMMANDS
+
+#ifdef DEBUG
+ if(p->scb->cus & CU_ACTIVE)
+ {
+ printk("%s: Hmmm .. CU is still running and we wanna send a new packet.\n",dev->name);
+ printk("%s: stat: %04x %04x\n",dev->name,p->scb->cus,p->xmit_cmds[0]->cmd_status);
+ }
+#endif
+
+ p->xmit_buffs[0]->size = TBD_LAST | len;
+ for(i=0;i<16;i++)
+ {
+ p->xmit_cmds[0]->cmd_status = 0;
+ WAIT_4_SCB_CMD();
+ if( (p->scb->cus & CU_STATUS) == CU_SUSPEND)
+ p->scb->cmd_cuc = CUC_RESUME;
+ else
+ {
+ p->scb->cbl_offset = make16(p->xmit_cmds[0]);
+ p->scb->cmd_cuc = CUC_START;
+ }
+
+ ni_attn586();
+ dev->trans_start = jiffies;
+ if(!i)
+ dev_kfree_skb(skb,FREE_WRITE);
+ WAIT_4_SCB_CMD();
+ if( (p->scb->cus & CU_ACTIVE)) /* test it, because CU sometimes doesn't start immediately */
+ break;
+ if(p->xmit_cmds[0]->cmd_status)
+ break;
+ if(i==15)
+ printk("%s: Can't start transmit-command.\n",dev->name);
+ }
+# else
+ next_nop = (p->nop_point + 1) & 0x1;
+ p->xmit_buffs[0]->size = TBD_LAST | len;
+
+ p->xmit_cmds[0]->cmd_link = p->nop_cmds[next_nop]->cmd_link
+ = make16((p->nop_cmds[next_nop]));
+ p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0;
+
+ p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0]));
+ dev->trans_start = jiffies;
+ p->nop_point = next_nop;
+ dev_kfree_skb(skb,FREE_WRITE);
+# endif
+#else
+ p->xmit_buffs[p->xmit_count]->size = TBD_LAST | len;
+ if( (next_nop = p->xmit_count + 1) == NUM_XMIT_BUFFS )
+ next_nop = 0;
+
+ p->xmit_cmds[p->xmit_count]->cmd_status = 0;
+ /* linkpointer of xmit-command already points to next nop cmd */
+ p->nop_cmds[next_nop]->cmd_link = make16((p->nop_cmds[next_nop]));
+ p->nop_cmds[next_nop]->cmd_status = 0;
+
+ p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count]));
+ dev->trans_start = jiffies;
+ p->xmit_count = next_nop;
+
+ {
+ long flags;
+ save_flags(flags);
+ cli();
+ if(p->xmit_count != p->xmit_last)
+ dev->tbusy = 0;
+ p->lock = 0;
+ restore_flags(flags);
+ }
+ dev_kfree_skb(skb,FREE_WRITE);
+#endif
+ }
+ return 0;
+}
+
+/*******************************************
+ * Someone wanna have the statistics
+ */
+
+static struct enet_statistics *ni52_get_stats(struct device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+ unsigned short crc,aln,rsc,ovrn;
+
+ crc = p->scb->crc_errs; /* get error-statistic from the ni82586 */
+ p->scb->crc_errs = 0;
+ aln = p->scb->aln_errs;
+ p->scb->aln_errs = 0;
+ rsc = p->scb->rsc_errs;
+ p->scb->rsc_errs = 0;
+ ovrn = p->scb->ovrn_errs;
+ p->scb->ovrn_errs = 0;
+
+ p->stats.rx_crc_errors += crc;
+ p->stats.rx_fifo_errors += ovrn;
+ p->stats.rx_frame_errors += aln;
+ p->stats.rx_dropped += rsc;
+
+ return &p->stats;
+}
+
+/********************************************************
+ * Set MC list ..
+ */
+static void set_multicast_list(struct device *dev)
+{
+ if(!dev->start)
+ {
+ printk("%s: Can't apply promiscuous/multicastmode to a not running interface.\n",dev->name);
+ return;
+ }
+
+ dev->start = 0;
+
+ ni_disint();
+ alloc586(dev);
+ init586(dev);
+ startrecv586(dev);
+ ni_enaint();
+
+ dev->start = 1;
+}
+
+#ifdef MODULE
+static struct device dev_ni52 = {
+ " ", /* "ni5210": device name inserted by net_init.c */
+ 0, 0, 0, 0,
+ 0x300, 9, /* I/O address, IRQ */
+ 0, 0, 0, NULL, ni52_probe };
+
+/* set: io,irq,memstart,memend or set it when calling insmod */
+int irq=9;
+int io=0x300;
+long memstart=0; /* e.g 0xd0000 */
+long memend=0; /* e.g 0xd4000 */
+
+int init_module(void)
+{
+ if(io <= 0x0 || !memend || !memstart || irq < 2) {
+ printk("ni52: Autoprobing not allowed for modules.\nni52: Set symbols 'io' 'irq' 'memstart' and 'memend'\n");
+ return -ENODEV;
+ }
+ dev_ni52.irq = irq;
+ dev_ni52.base_addr = io;
+ dev_ni52.mem_end = memend;
+ dev_ni52.mem_start = memstart;
+ if (register_netdev(&dev_ni52) != 0)
+ return -EIO;
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ release_region(dev_ni52.base_addr, NI52_TOTAL_SIZE);
+ kfree(dev_ni52.priv);
+ dev_ni52.priv = NULL;
+ unregister_netdev(&dev_ni52);
+}
+#endif /* MODULE */
+
+#if 0
+/*
+ * DUMP .. we expect a not running CMD unit and enough space
+ */
+void ni52_dump(struct device *dev,void *ptr)
+{
+ struct priv *p = (struct priv *) dev->priv;
+ struct dump_cmd_struct *dump_cmd = (struct dump_cmd_struct *) ptr;
+ int i;
+
+ p->scb->cmd_cuc = CUC_ABORT;
+ ni_attn586();
+ WAIT_4_SCB_CMD();
+ WAIT_4_SCB_CMD_RUC();
+
+ dump_cmd->cmd_status = 0;
+ dump_cmd->cmd_cmd = CMD_DUMP | CMD_LAST;
+ dump_cmd->dump_offset = make16((dump_cmd + 1));
+ dump_cmd->cmd_link = 0xffff;
+
+ p->scb->cbl_offset = make16(dump_cmd);
+ p->scb->cmd_cuc = CUC_START;
+ ni_attn586();
+ WAIT_4_STAT_COMPL(dump_cmd);
+
+ if( (dump_cmd->cmd_status & (STAT_COMPL|STAT_OK)) != (STAT_COMPL|STAT_OK) )
+ printk("%s: Can't get dump information.\n",dev->name);
+
+ for(i=0;i<170;i++) {
+ printk("%02x ",(int) ((unsigned char *) (dump_cmd + 1))[i]);
+ if(i % 24 == 23)
+ printk("\n");
+ }
+ printk("\n");
+}
+#endif
+
+/*
+ * END: linux/drivers/net/ni52.c
+ */
+
+
diff --git a/linux/src/drivers/net/ni52.h b/linux/src/drivers/net/ni52.h
new file mode 100644
index 0000000..b3dfdd2
--- /dev/null
+++ b/linux/src/drivers/net/ni52.h
@@ -0,0 +1,310 @@
+/*
+ * Intel i82586 Ethernet definitions
+ *
+ * This is an extension to the Linux operating system, and is covered by the
+ * same Gnu Public License that covers that work.
+ *
+ * copyrights (c) 1994 by Michael Hipp (mhipp@student.uni-tuebingen.de)
+ *
+ * I have done a look in the following sources:
+ * crynwr-packet-driver by Russ Nelson
+ * Garret A. Wollman's i82586-driver for BSD
+ */
+
+
+#define NI52_RESET 0 /* writing to this address, resets the i82586 */
+#define NI52_ATTENTION 1 /* channel attention, kick the 586 */
+#define NI52_TENA 3 /* 2-5 possibly wrong, Xmit enable */
+#define NI52_TDIS 2 /* Xmit disable */
+#define NI52_INTENA 5 /* Interrupt enable */
+#define NI52_INTDIS 4 /* Interrupt disable */
+#define NI52_MAGIC1 6 /* dunno exact function */
+#define NI52_MAGIC2 7 /* dunno exact function */
+
+#define NI52_MAGICVAL1 0x00 /* magic-values for ni5210 card */
+#define NI52_MAGICVAL2 0x55
+
+/*
+ * where to find the System Configuration Pointer (SCP)
+ */
+#define SCP_DEFAULT_ADDRESS 0xfffff4
+
+
+/*
+ * System Configuration Pointer Struct
+ */
+
+struct scp_struct
+{
+ unsigned short zero_dum0; /* has to be zero */
+ unsigned char sysbus; /* 0=16Bit,1=8Bit */
+ unsigned char zero_dum1; /* has to be zero for 586 */
+ unsigned short zero_dum2;
+ unsigned short zero_dum3;
+ char *iscp; /* pointer to the iscp-block */
+};
+
+
+/*
+ * Intermediate System Configuration Pointer (ISCP)
+ */
+struct iscp_struct
+{
+ unsigned char busy; /* 586 clears after successful init */
+ unsigned char zero_dummy; /* has to be zero */
+ unsigned short scb_offset; /* pointeroffset to the scb_base */
+ char *scb_base; /* base-address of all 16-bit offsets */
+};
+
+/*
+ * System Control Block (SCB)
+ */
+struct scb_struct
+{
+ unsigned char rus;
+ unsigned char cus;
+ unsigned char cmd_ruc; /* command word: RU part */
+ unsigned char cmd_cuc; /* command word: CU part & ACK */
+ unsigned short cbl_offset; /* pointeroffset, command block list */
+ unsigned short rfa_offset; /* pointeroffset, receive frame area */
+ unsigned short crc_errs; /* CRC-Error counter */
+ unsigned short aln_errs; /* allignmenterror counter */
+ unsigned short rsc_errs; /* Resourceerror counter */
+ unsigned short ovrn_errs; /* OVerrunerror counter */
+};
+
+/*
+ * possible command values for the command word
+ */
+#define RUC_MASK 0x0070 /* mask for RU commands */
+#define RUC_NOP 0x0000 /* NOP-command */
+#define RUC_START 0x0010 /* start RU */
+#define RUC_RESUME 0x0020 /* resume RU after suspend */
+#define RUC_SUSPEND 0x0030 /* suspend RU */
+#define RUC_ABORT 0x0040 /* abort receiver operation immediately */
+
+#define CUC_MASK 0x07 /* mask for CU command */
+#define CUC_NOP 0x00 /* NOP-command */
+#define CUC_START 0x01 /* start execution of 1. cmd on the CBL */
+#define CUC_RESUME 0x02 /* resume after suspend */
+#define CUC_SUSPEND 0x03 /* Suspend CU */
+#define CUC_ABORT 0x04 /* abort command operation immediately */
+
+#define ACK_MASK 0xf0 /* mask for ACK command */
+#define ACK_CX 0x80 /* acknowledges STAT_CX */
+#define ACK_FR 0x40 /* ack. STAT_FR */
+#define ACK_CNA 0x20 /* ack. STAT_CNA */
+#define ACK_RNR 0x10 /* ack. STAT_RNR */
+
+/*
+ * possible status values for the status word
+ */
+#define STAT_MASK 0xf0 /* mask for cause of interrupt */
+#define STAT_CX 0x80 /* CU finished cmd with its I bit set */
+#define STAT_FR 0x40 /* RU finished receiving a frame */
+#define STAT_CNA 0x20 /* CU left active state */
+#define STAT_RNR 0x10 /* RU left ready state */
+
+#define CU_STATUS 0x7 /* CU status, 0=idle */
+#define CU_SUSPEND 0x1 /* CU is suspended */
+#define CU_ACTIVE 0x2 /* CU is active */
+
+#define RU_STATUS 0x70 /* RU status, 0=idle */
+#define RU_SUSPEND 0x10 /* RU suspended */
+#define RU_NOSPACE 0x20 /* RU no resources */
+#define RU_READY 0x40 /* RU is ready */
+
+/*
+ * Receive Frame Descriptor (RFD)
+ */
+struct rfd_struct
+{
+ unsigned char stat_low; /* status word */
+ unsigned char stat_high; /* status word */
+ unsigned char rfd_sf; /* 82596 mode only */
+ unsigned char last; /* Bit15,Last Frame on List / Bit14,suspend */
+ unsigned short next; /* linkoffset to next RFD */
+ unsigned short rbd_offset; /* pointeroffset to RBD-buffer */
+ unsigned char dest[6]; /* ethernet-address, destination */
+ unsigned char source[6]; /* ethernet-address, source */
+ unsigned short length; /* 802.3 frame-length */
+ unsigned short zero_dummy; /* dummy */
+};
+
+#define RFD_LAST 0x80 /* last: last rfd in the list */
+#define RFD_SUSP 0x40 /* last: suspend RU after */
+#define RFD_COMPL 0x80
+#define RFD_OK 0x20
+#define RFD_BUSY 0x40
+#define RFD_ERR_LEN 0x10 /* Length error (if enabled length-checking */
+#define RFD_ERR_CRC 0x08 /* CRC error */
+#define RFD_ERR_ALGN 0x04 /* Alignment error */
+#define RFD_ERR_RNR 0x02 /* status: receiver out of resources */
+#define RFD_ERR_OVR 0x01 /* DMA Overrun! */
+
+#define RFD_ERR_FTS 0x0080 /* Frame to short */
+#define RFD_ERR_NEOP 0x0040 /* No EOP flag (for bitstuffing only) */
+#define RFD_ERR_TRUN 0x0020 /* (82596 only/SF mode) indicates truncated frame */
+#define RFD_MATCHADD 0x0002 /* status: Destinationaddress !matches IA (only 82596) */
+#define RFD_COLLDET 0x0001 /* Detected collision during reception */
+
+/*
+ * Receive Buffer Descriptor (RBD)
+ */
+struct rbd_struct
+{
+ unsigned short status; /* status word,number of used bytes in buff */
+ unsigned short next; /* pointeroffset to next RBD */
+ char *buffer; /* receive buffer address pointer */
+ unsigned short size; /* size of this buffer */
+ unsigned short zero_dummy; /* dummy */
+};
+
+#define RBD_LAST 0x8000 /* last buffer */
+#define RBD_USED 0x4000 /* this buffer has data */
+#define RBD_MASK 0x3fff /* size-mask for length */
+
+/*
+ * Statusvalues for Commands/RFD
+ */
+#define STAT_COMPL 0x8000 /* status: frame/command is complete */
+#define STAT_BUSY 0x4000 /* status: frame/command is busy */
+#define STAT_OK 0x2000 /* status: frame/command is ok */
+
+/*
+ * Action-Commands
+ */
+#define CMD_NOP 0x0000 /* NOP */
+#define CMD_IASETUP 0x0001 /* initial address setup command */
+#define CMD_CONFIGURE 0x0002 /* configure command */
+#define CMD_MCSETUP 0x0003 /* MC setup command */
+#define CMD_XMIT 0x0004 /* transmit command */
+#define CMD_TDR 0x0005 /* time domain reflectometer (TDR) command */
+#define CMD_DUMP 0x0006 /* dump command */
+#define CMD_DIAGNOSE 0x0007 /* diagnose command */
+
+/*
+ * Action command bits
+ */
+#define CMD_LAST 0x8000 /* indicates last command in the CBL */
+#define CMD_SUSPEND 0x4000 /* suspend CU after this CB */
+#define CMD_INT 0x2000 /* generate interrupt after execution */
+
+/*
+ * NOP - command
+ */
+struct nop_cmd_struct
+{
+ unsigned short cmd_status; /* status of this command */
+ unsigned short cmd_cmd; /* the command itself (+bits) */
+ unsigned short cmd_link; /* offsetpointer to next command */
+};
+
+/*
+ * IA Setup command
+ */
+struct iasetup_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned char iaddr[6];
+};
+
+/*
+ * Configure command
+ */
+struct configure_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned char byte_cnt; /* size of the config-cmd */
+ unsigned char fifo; /* fifo/recv monitor */
+ unsigned char sav_bf; /* save bad frames (bit7=1)*/
+ unsigned char adr_len; /* adr_len(0-2),al_loc(3),pream(4-5),loopbak(6-7)*/
+ unsigned char priority; /* lin_prio(0-2),exp_prio(4-6),bof_metd(7) */
+ unsigned char ifs; /* inter frame spacing */
+ unsigned char time_low; /* slot time low */
+ unsigned char time_high; /* slot time high(0-2) and max. retries(4-7) */
+ unsigned char promisc; /* promisc-mode(0) , et al (1-7) */
+ unsigned char carr_coll; /* carrier(0-3)/collision(4-7) stuff */
+ unsigned char fram_len; /* minimal frame len */
+ unsigned char dummy; /* dummy */
+};
+
+/*
+ * Multicast Setup command
+ */
+struct mcsetup_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short mc_cnt; /* number of bytes in the MC-List */
+ unsigned char mc_list[0][6]; /* pointer to 6 bytes entries */
+};
+
+/*
+ * DUMP command
+ */
+struct dump_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short dump_offset; /* pointeroffset to DUMP space */
+};
+
+/*
+ * transmit command
+ */
+struct transmit_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short tbd_offset; /* pointeroffset to TBD */
+ unsigned char dest[6]; /* destination address of the frame */
+ unsigned short length; /* user defined: 802.3 length / Ether type */
+};
+
+#define TCMD_ERRMASK 0x0fa0
+#define TCMD_MAXCOLLMASK 0x000f
+#define TCMD_MAXCOLL 0x0020
+#define TCMD_HEARTBEAT 0x0040
+#define TCMD_DEFERRED 0x0080
+#define TCMD_UNDERRUN 0x0100
+#define TCMD_LOSTCTS 0x0200
+#define TCMD_NOCARRIER 0x0400
+#define TCMD_LATECOLL 0x0800
+
+struct tdr_cmd_struct
+{
+ unsigned short cmd_status;
+ unsigned short cmd_cmd;
+ unsigned short cmd_link;
+ unsigned short status;
+};
+
+#define TDR_LNK_OK 0x8000 /* No link problem identified */
+#define TDR_XCVR_PRB 0x4000 /* indicates a transceiver problem */
+#define TDR_ET_OPN 0x2000 /* open, no correct termination */
+#define TDR_ET_SRT 0x1000 /* TDR detected a short circuit */
+#define TDR_TIMEMASK 0x07ff /* mask for the time field */
+
+/*
+ * Transmit Buffer Descriptor (TBD)
+ */
+struct tbd_struct
+{
+ unsigned short size; /* size + EOF-Flag(15) */
+ unsigned short next; /* pointeroffset to next TBD */
+ char *buffer; /* pointer to buffer */
+};
+
+#define TBD_LAST 0x8000 /* EOF-Flag, indicates last buffer in list */
+
+
+
+
diff --git a/linux/src/drivers/net/ni65.c b/linux/src/drivers/net/ni65.c
new file mode 100644
index 0000000..75e8914
--- /dev/null
+++ b/linux/src/drivers/net/ni65.c
@@ -0,0 +1,1228 @@
+/*
+ * ni6510 (am7990 'lance' chip) driver for Linux-net-3
+ * BETAcode v0.71 (96/09/29) for 2.0.0 (or later)
+ * copyrights (c) 1994,1995,1996 by M.Hipp
+ *
+ * This driver can handle the old ni6510 board and the newer ni6510
+ * EtherBlaster. (probably it also works with every full NE2100
+ * compatible card)
+ *
+ * To compile as module, type:
+ * gcc -O2 -fomit-frame-pointer -m486 -D__KERNEL__ -DMODULE -c ni65.c
+ * driver probes: io: 0x360,0x300,0x320,0x340 / dma: 3,5,6,7
+ *
+ * This is an extension to the Linux operating system, and is covered by the
+ * same Gnu Public License that covers the Linux-kernel.
+ *
+ * comments/bugs/suggestions can be sent to:
+ * Michael Hipp
+ * email: Michael.Hipp@student.uni-tuebingen.de
+ *
+ * sources:
+ * some things are from the 'ni6510-packet-driver for dos by Russ Nelson'
+ * and from the original drivers by D.Becker
+ *
+ * known problems:
+ * - on some PCI boards (including my own) the card/board/ISA-bridge has
+ * problems with bus master DMA. This results in lotsa overruns.
+ * It may help to '#define RCV_PARANOIA_CHECK' or try to #undef
+ * the XMT and RCV_VIA_SKB option .. this reduces driver performance.
+ * Or just play with your BIOS options to optimize ISA-DMA access.
+ * Maybe you also wanna play with the LOW_PERFORAMCE and MID_PERFORMANCE
+ * defines -> please report me your experience then
+ * - Harald reported for ASUS SP3G mainboards, that you should use
+ * the 'optimal settings' from the user's manual on page 3-12!
+ *
+ * credits:
+ * thanx to Jason Sullivan for sending me a ni6510 card!
+ * lot of debug runs with ASUS SP3G Boards (Intel Saturn) by Harald Koenig
+ *
+ * simple performance test: (486DX-33/Ni6510-EB receives from 486DX4-100/Ni6510-EB)
+ * average: FTP -> 8384421 bytes received in 8.5 seconds
+ * (no RCV_VIA_SKB,no XMT_VIA_SKB,PARANOIA_CHECK,4 XMIT BUFS, 8 RCV_BUFFS)
+ * peak: FTP -> 8384421 bytes received in 7.5 seconds
+ * (RCV_VIA_SKB,XMT_VIA_SKB,no PARANOIA_CHECK,1(!) XMIT BUF, 16 RCV BUFFS)
+ */
+
+/*
+ * 96.Sept.29: virt_to_bus stuff added for new memory modell
+ * 96.April.29: Added Harald Koenig's Patches (MH)
+ * 96.April.13: enhanced error handling .. more tests (MH)
+ * 96.April.5/6: a lot of performance tests. Got it stable now (hopefully) (MH)
+ * 96.April.1: (no joke ;) .. added EtherBlaster and Module support (MH)
+ * 96.Feb.19: fixed a few bugs .. cleanups .. tested for 1.3.66 (MH)
+ * hopefully no more 16MB limit
+ *
+ * 95.Nov.18: multicast tweaked (AC).
+ *
+ * 94.Aug.22: changes in xmit_intr (ack more than one xmitted-packet), ni65_send_packet (p->lock) (MH)
+ *
+ * 94.July.16: fixed bugs in recv_skb and skb-alloc stuff (MH)
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <linux/version.h>
+#include <linux/module.h>
+
+#include "ni65.h"
+
+/*
+ * the current setting allows an acceptable performance
+ * for 'RCV_PARANOIA_CHECK' read the 'known problems' part in
+ * the header of this file
+ * 'invert' the defines for max. performance. This may cause DMA problems
+ * on some boards (e.g on my ASUS SP3G)
+ */
+#undef XMT_VIA_SKB
+#undef RCV_VIA_SKB
+#define RCV_PARANOIA_CHECK
+
+#define MID_PERFORMANCE
+
+#if defined( LOW_PERFORMANCE )
+ static int isa0=7,isa1=7,csr80=0x0c10;
+#elif defined( MID_PERFORMANCE )
+ static int isa0=5,isa1=5,csr80=0x2810;
+#else /* high performance */
+ static int isa0=4,isa1=4,csr80=0x0017;
+#endif
+
+/*
+ * a few card/vendor specific defines
+ */
+#define NI65_ID0 0x00
+#define NI65_ID1 0x55
+#define NI65_EB_ID0 0x52
+#define NI65_EB_ID1 0x44
+#define NE2100_ID0 0x57
+#define NE2100_ID1 0x57
+
+#define PORT p->cmdr_addr
+
+/*
+ * buffer configuration
+ */
+#if 1
+#define RMDNUM 16
+#define RMDNUMMASK 0x80000000
+#else
+#define RMDNUM 8
+#define RMDNUMMASK 0x60000000 /* log2(RMDNUM)<<29 */
+#endif
+
+#if 0
+#define TMDNUM 1
+#define TMDNUMMASK 0x00000000
+#else
+#define TMDNUM 4
+#define TMDNUMMASK 0x40000000 /* log2(TMDNUM)<<29 */
+#endif
+
+/* slightly oversized */
+#define R_BUF_SIZE 1544
+#define T_BUF_SIZE 1544
+
+/*
+ * lance register defines
+ */
+#define L_DATAREG 0x00
+#define L_ADDRREG 0x02
+#define L_RESET 0x04
+#define L_CONFIG 0x05
+#define L_BUSIF 0x06
+
+/*
+ * to access the lance/am7990-regs, you have to write
+ * reg-number into L_ADDRREG, then you can access it using L_DATAREG
+ */
+#define CSR0 0x00
+#define CSR1 0x01
+#define CSR2 0x02
+#define CSR3 0x03
+
+#define INIT_RING_BEFORE_START 0x1
+#define FULL_RESET_ON_ERROR 0x2
+
+#if 0
+#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);inw(PORT+L_ADDRREG); \
+ outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
+#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_ADDRREG),\
+ inw(PORT+L_DATAREG))
+#if 0
+#define writedatareg(val) {outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
+#else
+#define writedatareg(val) { writereg(val,CSR0); }
+#endif
+#else
+#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);outw(val,PORT+L_DATAREG);}
+#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_DATAREG))
+#define writedatareg(val) { writereg(val,CSR0); }
+#endif
+
+static unsigned char ni_vendor[] = { 0x02,0x07,0x01 };
+
+static struct card {
+ unsigned char id0,id1;
+ short id_offset;
+ short total_size;
+ short cmd_offset;
+ short addr_offset;
+ unsigned char *vendor_id;
+ char *cardname;
+ unsigned char config;
+} cards[] = {
+ { NI65_ID0,NI65_ID1,0x0e,0x10,0x0,0x8,ni_vendor,"ni6510", 0x1 } ,
+ { NI65_EB_ID0,NI65_EB_ID1,0x0e,0x18,0x10,0x0,ni_vendor,"ni6510 EtherBlaster", 0x2 } ,
+ { NE2100_ID0,NE2100_ID1,0x0e,0x18,0x10,0x0,NULL,"generic NE2100", 0x0 }
+};
+#define NUM_CARDS 3
+
+struct priv
+{
+ struct rmd rmdhead[RMDNUM];
+ struct tmd tmdhead[TMDNUM];
+ struct init_block ib;
+ int rmdnum;
+ int tmdnum,tmdlast;
+#ifdef RCV_VIA_SKB
+ struct sk_buff *recv_skb[RMDNUM];
+#else
+ void *recvbounce[RMDNUM];
+#endif
+#ifdef XMT_VIA_SKB
+ struct sk_buff *tmd_skb[TMDNUM];
+#endif
+ void *tmdbounce[TMDNUM];
+ int tmdbouncenum;
+ int lock,xmit_queued;
+ struct enet_statistics stats;
+ void *self;
+ int cmdr_addr;
+ int cardno;
+ int features;
+};
+
+static int ni65_probe1(struct device *dev,int);
+static void ni65_interrupt(int irq, void * dev_id, struct pt_regs *regs);
+static void ni65_recv_intr(struct device *dev,int);
+static void ni65_xmit_intr(struct device *dev,int);
+static int ni65_open(struct device *dev);
+static int ni65_lance_reinit(struct device *dev);
+static void ni65_init_lance(struct priv *p,unsigned char*,int,int);
+static int ni65_send_packet(struct sk_buff *skb, struct device *dev);
+static int ni65_close(struct device *dev);
+static int ni65_alloc_buffer(struct device *dev);
+static void ni65_free_buffer(struct priv *p);
+static struct enet_statistics *ni65_get_stats(struct device *);
+static void set_multicast_list(struct device *dev);
+
+static int irqtab[] = { 9,12,15,5 }; /* irq config-translate */
+static int dmatab[] = { 0,3,5,6,7 }; /* dma config-translate and autodetect */
+
+static int debuglevel = 1;
+
+/*
+ * set 'performance' registers .. we must STOP lance for that
+ */
+static void ni65_set_performance(struct priv *p)
+{
+ writereg(CSR0_STOP | CSR0_CLRALL,CSR0); /* STOP */
+
+ if( !(cards[p->cardno].config & 0x02) )
+ return;
+
+ outw(80,PORT+L_ADDRREG);
+ if(inw(PORT+L_ADDRREG) != 80)
+ return;
+
+ writereg( (csr80 & 0x3fff) ,80); /* FIFO watermarks */
+ outw(0,PORT+L_ADDRREG);
+ outw((short)isa0,PORT+L_BUSIF); /* write ISA 0: DMA_R : isa0 * 50ns */
+ outw(1,PORT+L_ADDRREG);
+ outw((short)isa1,PORT+L_BUSIF); /* write ISA 1: DMA_W : isa1 * 50ns */
+
+ outw(CSR0,PORT+L_ADDRREG); /* switch back to CSR0 */
+}
+
+/*
+ * open interface (up)
+ */
+static int ni65_open(struct device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+ int irqval = request_irq(dev->irq, &ni65_interrupt,0,
+ cards[p->cardno].cardname,NULL);
+ if (irqval) {
+ printk ("%s: unable to get IRQ %d (irqval=%d).\n",
+ dev->name,dev->irq, irqval);
+ return -EAGAIN;
+ }
+ irq2dev_map[dev->irq] = dev;
+
+ if(ni65_lance_reinit(dev))
+ {
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+ MOD_INC_USE_COUNT;
+ return 0;
+ }
+ else
+ {
+ irq2dev_map[dev->irq] = NULL;
+ free_irq(dev->irq,NULL);
+ dev->start = 0;
+ return -EAGAIN;
+ }
+}
+
+/*
+ * close interface (down)
+ */
+static int ni65_close(struct device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+
+ outw(inw(PORT+L_RESET),PORT+L_RESET); /* that's the hard way */
+
+#ifdef XMT_VIA_SKB
+ {
+ int i;
+ for(i=0;i<TMDNUM;i++)
+ {
+ if(p->tmd_skb[i]) {
+ dev_kfree_skb(p->tmd_skb[i],FREE_WRITE);
+ p->tmd_skb[i] = NULL;
+ }
+ }
+ }
+#endif
+ irq2dev_map[dev->irq] = NULL;
+ free_irq(dev->irq,NULL);
+ dev->tbusy = 1;
+ dev->start = 0;
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+/*
+ * Probe The Card (not the lance-chip)
+ */
+#ifdef MODULE
+static
+#endif
+int ni65_probe(struct device *dev)
+{
+ int *port;
+ static int ports[] = {0x360,0x300,0x320,0x340, 0};
+
+ if (dev->base_addr > 0x1ff) /* Check a single specified location. */
+ return ni65_probe1(dev, dev->base_addr);
+ else if (dev->base_addr > 0) /* Don't probe at all. */
+ return -ENXIO;
+
+ for (port = ports; *port; port++)
+ {
+ if (ni65_probe1(dev, *port) == 0)
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+/*
+ * this is the real card probe ..
+ */
+static int ni65_probe1(struct device *dev,int ioaddr)
+{
+ int i,j;
+ struct priv *p;
+
+ for(i=0;i<NUM_CARDS;i++) {
+ if(check_region(ioaddr, cards[i].total_size))
+ continue;
+ if(cards[i].id_offset >= 0) {
+ if(inb(ioaddr+cards[i].id_offset+0) != cards[i].id0 ||
+ inb(ioaddr+cards[i].id_offset+1) != cards[i].id1) {
+ continue;
+ }
+ }
+ if(cards[i].vendor_id) {
+ for(j=0;j<3;j++)
+ if(inb(ioaddr+cards[i].addr_offset+j) != cards[i].vendor_id[j])
+ continue;
+ }
+ break;
+ }
+ if(i == NUM_CARDS)
+ return -ENODEV;
+
+ for(j=0;j<6;j++)
+ dev->dev_addr[j] = inb(ioaddr+cards[i].addr_offset+j);
+
+ if( (j=ni65_alloc_buffer(dev)) < 0)
+ return j;
+ p = (struct priv *) dev->priv;
+ p->cmdr_addr = ioaddr + cards[i].cmd_offset;
+ p->cardno = i;
+
+ printk("%s: %s found at %#3x, ", dev->name, cards[p->cardno].cardname , ioaddr);
+
+ outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */
+ if( (j=readreg(CSR0)) != 0x4) {
+ printk(KERN_ERR "can't RESET card: %04x\n",j);
+ ni65_free_buffer(p);
+ return -EAGAIN;
+ }
+
+ outw(88,PORT+L_ADDRREG);
+ if(inw(PORT+L_ADDRREG) == 88) {
+ unsigned long v;
+ v = inw(PORT+L_DATAREG);
+ v <<= 16;
+ outw(89,PORT+L_ADDRREG);
+ v |= inw(PORT+L_DATAREG);
+ printk("Version %#08lx, ",v);
+ p->features = INIT_RING_BEFORE_START;
+ }
+ else {
+ printk("ancient LANCE, ");
+ p->features = 0x0;
+ }
+
+ if(test_bit(0,&cards[i].config)) {
+ dev->irq = irqtab[(inw(ioaddr+L_CONFIG)>>2)&3];
+ dev->dma = dmatab[inw(ioaddr+L_CONFIG)&3];
+ printk("IRQ %d (from card), DMA %d (from card).\n",dev->irq,dev->dma);
+ }
+ else {
+ if(dev->dma == 0) {
+ /* 'stuck test' from lance.c */
+ int dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) | (inb(DMA2_STAT_REG) & 0xf0);
+ for(i=1;i<5;i++) {
+ int dma = dmatab[i];
+ if(test_bit(dma,&dma_channels) || request_dma(dma,"ni6510"))
+ continue;
+ disable_dma(dma);
+ set_dma_mode(dma,DMA_MODE_CASCADE);
+ enable_dma(dma);
+ ni65_init_lance(p,dev->dev_addr,0,0); /* trigger memory access */
+ disable_dma(dma);
+ free_dma(dma);
+ if(readreg(CSR0) & CSR0_IDON)
+ break;
+ }
+ if(i == 5) {
+ printk("Can't detect DMA channel!\n");
+ ni65_free_buffer(p);
+ return -EAGAIN;
+ }
+ dev->dma = dmatab[i];
+ printk("DMA %d (autodetected), ",dev->dma);
+ }
+ else
+ printk("DMA %d (assigned), ",dev->dma);
+
+ if(dev->irq < 2)
+ {
+ ni65_init_lance(p,dev->dev_addr,0,0);
+ autoirq_setup(0);
+ writereg(CSR0_INIT|CSR0_INEA,CSR0); /* trigger interrupt */
+
+ if(!(dev->irq = autoirq_report(2)))
+ {
+ printk("Failed to detect IRQ line!\n");
+ ni65_free_buffer(p);
+ return -EAGAIN;
+ }
+ printk("IRQ %d (autodetected).\n",dev->irq);
+ }
+ else
+ printk("IRQ %d (assigned).\n",dev->irq);
+ }
+
+ if(request_dma(dev->dma, cards[p->cardno].cardname ) != 0)
+ {
+ printk("%s: Can't request dma-channel %d\n",dev->name,(int) dev->dma);
+ ni65_free_buffer(p);
+ return -EAGAIN;
+ }
+
+ /*
+ * Grab the region so we can find another board.
+ */
+ request_region(ioaddr,cards[p->cardno].total_size,cards[p->cardno].cardname);
+
+ dev->base_addr = ioaddr;
+
+ dev->open = ni65_open;
+ dev->stop = ni65_close;
+ dev->hard_start_xmit = ni65_send_packet;
+ dev->get_stats = ni65_get_stats;
+ dev->set_multicast_list = set_multicast_list;
+
+ ether_setup(dev);
+
+ dev->interrupt = 0;
+ dev->tbusy = 0;
+ dev->start = 0;
+
+ return 0; /* everything is OK */
+}
+
+/*
+ * set lance register and trigger init
+ */
+static void ni65_init_lance(struct priv *p,unsigned char *daddr,int filter,int mode)
+{
+ int i;
+ u32 pib;
+
+ writereg(CSR0_CLRALL|CSR0_STOP,CSR0);
+
+ for(i=0;i<6;i++)
+ p->ib.eaddr[i] = daddr[i];
+
+ for(i=0;i<8;i++)
+ p->ib.filter[i] = filter;
+ p->ib.mode = mode;
+
+ p->ib.trp = (u32) virt_to_bus(p->tmdhead) | TMDNUMMASK;
+ p->ib.rrp = (u32) virt_to_bus(p->rmdhead) | RMDNUMMASK;
+ writereg(0,CSR3); /* busmaster/no word-swap */
+ pib = (u32) virt_to_bus(&p->ib);
+ writereg(pib & 0xffff,CSR1);
+ writereg(pib >> 16,CSR2);
+
+ writereg(CSR0_INIT,CSR0); /* this changes L_ADDRREG to CSR0 */
+
+ for(i=0;i<32;i++)
+ {
+ udelay(4000);
+ if(inw(PORT+L_DATAREG) & (CSR0_IDON | CSR0_MERR) )
+ break; /* init ok ? */
+ }
+}
+
+/*
+ * allocate memory area and check the 16MB border
+ */
+static void *ni65_alloc_mem(struct device *dev,char *what,int size,int type)
+{
+ struct sk_buff *skb=NULL;
+ unsigned char *ptr;
+ void *ret;
+
+ if(type) {
+ ret = skb = alloc_skb(2+16+size,GFP_KERNEL|GFP_DMA);
+ if(!skb) {
+ printk("%s: unable to allocate %s memory.\n",dev->name,what);
+ return NULL;
+ }
+ skb->dev = dev;
+ skb_reserve(skb,2+16);
+ skb_put(skb,R_BUF_SIZE); /* grab the whole space .. (not necessary) */
+ ptr = skb->data;
+ }
+ else {
+ ret = ptr = kmalloc(T_BUF_SIZE,GFP_KERNEL | GFP_DMA);
+ if(!ret) {
+ printk("%s: unable to allocate %s memory.\n",dev->name,what);
+ return NULL;
+ }
+ }
+ if( (u32) virt_to_bus(ptr+size) > 0x1000000) {
+ printk("%s: unable to allocate %s memory in lower 16MB!\n",dev->name,what);
+ if(type)
+ kfree_skb(skb,FREE_WRITE);
+ else
+ kfree(ptr);
+ return NULL;
+ }
+ return ret;
+}
+
+/*
+ * allocate all memory structures .. send/recv buffers etc ...
+ */
+static int ni65_alloc_buffer(struct device *dev)
+{
+ unsigned char *ptr;
+ struct priv *p;
+ int i;
+
+ /*
+ * we need 8-aligned memory ..
+ */
+ ptr = ni65_alloc_mem(dev,"BUFFER",sizeof(struct priv)+8,0);
+ if(!ptr)
+ return -ENOMEM;
+
+ p = dev->priv = (struct priv *) (((unsigned long) ptr + 7) & ~0x7);
+ memset((char *) dev->priv,0,sizeof(struct priv));
+ p->self = ptr;
+
+ for(i=0;i<TMDNUM;i++)
+ {
+#ifdef XMT_VIA_SKB
+ p->tmd_skb[i] = NULL;
+#endif
+ p->tmdbounce[i] = ni65_alloc_mem(dev,"XMIT",T_BUF_SIZE,0);
+ if(!p->tmdbounce[i]) {
+ ni65_free_buffer(p);
+ return -ENOMEM;
+ }
+ }
+
+ for(i=0;i<RMDNUM;i++)
+ {
+#ifdef RCV_VIA_SKB
+ p->recv_skb[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,1);
+ if(!p->recv_skb[i]) {
+ ni65_free_buffer(p);
+ return -ENOMEM;
+ }
+#else
+ p->recvbounce[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,0);
+ if(!p->recvbounce[i]) {
+ ni65_free_buffer(p);
+ return -ENOMEM;
+ }
+#endif
+ }
+
+ return 0; /* everything is OK */
+}
+
+/*
+ * free buffers and private struct
+ */
+static void ni65_free_buffer(struct priv *p)
+{
+ int i;
+
+ if(!p)
+ return;
+
+ for(i=0;i<TMDNUM;i++) {
+ if(p->tmdbounce[i])
+ kfree(p->tmdbounce[i]);
+#ifdef XMT_VIA_SKB
+ if(p->tmd_skb[i])
+ dev_kfree_skb(p->tmd_skb[i],FREE_WRITE);
+#endif
+ }
+
+ for(i=0;i<RMDNUM;i++)
+ {
+#ifdef RCV_VIA_SKB
+ if(p->recv_skb[i])
+ dev_kfree_skb(p->recv_skb[i],FREE_WRITE);
+#else
+ if(p->recvbounce[i])
+ kfree(p->recvbounce[i]);
+#endif
+ }
+ if(p->self)
+ kfree(p->self);
+}
+
+
+/*
+ * stop and (re)start lance .. e.g after an error
+ */
+static void ni65_stop_start(struct device *dev,struct priv *p)
+{
+ int csr0 = CSR0_INEA;
+
+ writedatareg(CSR0_STOP);
+
+ if(debuglevel > 1)
+ printk("ni65_stop_start\n");
+
+ if(p->features & INIT_RING_BEFORE_START) {
+ int i;
+#ifdef XMT_VIA_SKB
+ struct sk_buff *skb_save[TMDNUM];
+#endif
+ unsigned long buffer[TMDNUM];
+ short blen[TMDNUM];
+
+ if(p->xmit_queued) {
+ while(1) {
+ if((p->tmdhead[p->tmdlast].u.s.status & XMIT_OWN))
+ break;
+ p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
+ if(p->tmdlast == p->tmdnum)
+ break;
+ }
+ }
+
+ for(i=0;i<TMDNUM;i++) {
+ struct tmd *tmdp = p->tmdhead + i;
+#ifdef XMT_VIA_SKB
+ skb_save[i] = p->tmd_skb[i];
+#endif
+ buffer[i] = (u32) bus_to_virt(tmdp->u.buffer);
+ blen[i] = tmdp->blen;
+ tmdp->u.s.status = 0x0;
+ }
+
+ for(i=0;i<RMDNUM;i++) {
+ struct rmd *rmdp = p->rmdhead + i;
+ rmdp->u.s.status = RCV_OWN;
+ }
+ p->tmdnum = p->xmit_queued = 0;
+ writedatareg(CSR0_STRT | csr0);
+
+ for(i=0;i<TMDNUM;i++) {
+ int num = (i + p->tmdlast) & (TMDNUM-1);
+ p->tmdhead[i].u.buffer = (u32) virt_to_bus((char *)buffer[num]); /* status is part of buffer field */
+ p->tmdhead[i].blen = blen[num];
+ if(p->tmdhead[i].u.s.status & XMIT_OWN) {
+ p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
+ p->xmit_queued = 1;
+ writedatareg(CSR0_TDMD | CSR0_INEA | csr0);
+ }
+#ifdef XMT_VIA_SKB
+ p->tmd_skb[i] = skb_save[num];
+#endif
+ }
+ p->rmdnum = p->tmdlast = 0;
+ if(!p->lock)
+ dev->tbusy = (p->tmdnum || !p->xmit_queued) ? 0 : 1;
+ dev->trans_start = jiffies;
+ }
+ else
+ writedatareg(CSR0_STRT | csr0);
+}
+
+/*
+ * init lance (write init-values .. init-buffers) (open-helper)
+ */
+static int ni65_lance_reinit(struct device *dev)
+{
+ int i;
+ struct priv *p = (struct priv *) dev->priv;
+
+ p->lock = 0;
+ p->xmit_queued = 0;
+
+ disable_dma(dev->dma); /* I've never worked with dma, but we do it like the packetdriver */
+ set_dma_mode(dev->dma,DMA_MODE_CASCADE);
+ enable_dma(dev->dma);
+
+ outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */
+ if( (i=readreg(CSR0) ) != 0x4)
+ {
+ printk(KERN_ERR "%s: can't RESET %s card: %04x\n",dev->name,
+ cards[p->cardno].cardname,(int) i);
+ disable_dma(dev->dma);
+ return 0;
+ }
+
+ p->rmdnum = p->tmdnum = p->tmdlast = p->tmdbouncenum = 0;
+ for(i=0;i<TMDNUM;i++)
+ {
+ struct tmd *tmdp = p->tmdhead + i;
+#ifdef XMT_VIA_SKB
+ if(p->tmd_skb[i]) {
+ dev_kfree_skb(p->tmd_skb[i],FREE_WRITE);
+ p->tmd_skb[i] = NULL;
+ }
+#endif
+ tmdp->u.buffer = 0x0;
+ tmdp->u.s.status = XMIT_START | XMIT_END;
+ tmdp->blen = tmdp->status2 = 0;
+ }
+
+ for(i=0;i<RMDNUM;i++)
+ {
+ struct rmd *rmdp = p->rmdhead + i;
+#ifdef RCV_VIA_SKB
+ rmdp->u.buffer = (u32) virt_to_bus(p->recv_skb[i]->data);
+#else
+ rmdp->u.buffer = (u32) virt_to_bus(p->recvbounce[i]);
+#endif
+ rmdp->blen = -(R_BUF_SIZE-8);
+ rmdp->mlen = 0;
+ rmdp->u.s.status = RCV_OWN;
+ }
+
+ if(dev->flags & IFF_PROMISC)
+ ni65_init_lance(p,dev->dev_addr,0x00,M_PROM);
+ else if(dev->mc_count || dev->flags & IFF_ALLMULTI)
+ ni65_init_lance(p,dev->dev_addr,0xff,0x0);
+ else
+ ni65_init_lance(p,dev->dev_addr,0x00,0x00);
+
+ /*
+ * ni65_set_lance_mem() sets L_ADDRREG to CSR0
+ * NOW, WE WILL NEVER CHANGE THE L_ADDRREG, CSR0 IS ALWAYS SELECTED
+ */
+
+ if(inw(PORT+L_DATAREG) & CSR0_IDON) {
+ ni65_set_performance(p);
+ /* init OK: start lance , enable interrupts */
+ writedatareg(CSR0_CLRALL | CSR0_INEA | CSR0_STRT);
+ return 1; /* ->OK */
+ }
+ printk(KERN_ERR "%s: can't init lance, status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
+ disable_dma(dev->dma);
+ return 0; /* ->Error */
+}
+
+/*
+ * interrupt handler
+ */
+static void ni65_interrupt(int irq, void * dev_id, struct pt_regs * regs)
+{
+ int csr0;
+ struct device *dev = (struct device *) irq2dev_map[irq];
+ struct priv *p;
+ int bcnt = 32;
+
+ if (dev == NULL) {
+ printk (KERN_ERR "ni65_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ if(set_bit(0,(int *) &dev->interrupt)) {
+ printk("ni65: oops .. interrupt while proceeding interrupt\n");
+ return;
+ }
+ p = (struct priv *) dev->priv;
+
+ while(--bcnt) {
+ csr0 = inw(PORT+L_DATAREG);
+
+#if 0
+ writedatareg( (csr0 & CSR0_CLRALL) ); /* ack interrupts, disable int. */
+#else
+ writedatareg( (csr0 & CSR0_CLRALL) | CSR0_INEA ); /* ack interrupts, interrupts enabled */
+#endif
+
+ if(!(csr0 & (CSR0_ERR | CSR0_RINT | CSR0_TINT)))
+ break;
+
+ if(csr0 & CSR0_RINT) /* RECV-int? */
+ ni65_recv_intr(dev,csr0);
+ if(csr0 & CSR0_TINT) /* XMIT-int? */
+ ni65_xmit_intr(dev,csr0);
+
+ if(csr0 & CSR0_ERR)
+ {
+ struct priv *p = (struct priv *) dev->priv;
+ if(debuglevel > 1)
+ printk("%s: general error: %04x.\n",dev->name,csr0);
+ if(csr0 & CSR0_BABL)
+ p->stats.tx_errors++;
+ if(csr0 & CSR0_MISS) {
+ int i;
+ for(i=0;i<RMDNUM;i++)
+ printk("%02x ",p->rmdhead[i].u.s.status);
+ printk("\n");
+ p->stats.rx_errors++;
+ }
+ if(csr0 & CSR0_MERR) {
+ if(debuglevel > 1)
+ printk("%s: Ooops .. memory error: %04x.\n",dev->name,csr0);
+ ni65_stop_start(dev,p);
+ }
+ }
+ }
+
+#ifdef RCV_PARANOIA_CHECK
+{
+ int j;
+ for(j=0;j<RMDNUM;j++)
+ {
+ struct priv *p = (struct priv *) dev->priv;
+ int i,k,num1,num2;
+ for(i=RMDNUM-1;i>0;i--) {
+ num2 = (p->rmdnum + i) & (RMDNUM-1);
+ if(!(p->rmdhead[num2].u.s.status & RCV_OWN))
+ break;
+ }
+
+ if(i) {
+ for(k=0;k<RMDNUM;k++) {
+ num1 = (p->rmdnum + k) & (RMDNUM-1);
+ if(!(p->rmdhead[num1].u.s.status & RCV_OWN))
+ break;
+ }
+ if(!k)
+ break;
+
+ if(debuglevel > 0)
+ {
+ char buf[256],*buf1;
+ int k;
+ buf1 = buf;
+ for(k=0;k<RMDNUM;k++) {
+ sprintf(buf1,"%02x ",(p->rmdhead[k].u.s.status)); /* & RCV_OWN) ); */
+ buf1 += 3;
+ }
+ *buf1 = 0;
+ printk(KERN_ERR "%s: Ooops, receive ring corrupted %2d %2d | %s\n",dev->name,p->rmdnum,i,buf);
+ }
+
+ p->rmdnum = num1;
+ ni65_recv_intr(dev,csr0);
+ if((p->rmdhead[num2].u.s.status & RCV_OWN))
+ break; /* ok, we are 'in sync' again */
+ }
+ else
+ break;
+ }
+}
+#endif
+
+ if( (csr0 & (CSR0_RXON | CSR0_TXON)) != (CSR0_RXON | CSR0_TXON) ) {
+ printk("%s: RX or TX was offline -> restart\n",dev->name);
+ ni65_stop_start(dev,p);
+ }
+ else
+ writedatareg(CSR0_INEA);
+
+ dev->interrupt = 0;
+
+ return;
+}
+
+/*
+ * We have received an Xmit-Interrupt ..
+ * send a new packet if necessary
+ */
+static void ni65_xmit_intr(struct device *dev,int csr0)
+{
+ struct priv *p = (struct priv *) dev->priv;
+
+ while(p->xmit_queued)
+ {
+ struct tmd *tmdp = p->tmdhead + p->tmdlast;
+ int tmdstat = tmdp->u.s.status;
+
+ if(tmdstat & XMIT_OWN)
+ break;
+
+ if(tmdstat & XMIT_ERR)
+ {
+#if 0
+ if(tmdp->status2 & XMIT_TDRMASK && debuglevel > 3)
+ printk(KERN_ERR "%s: tdr-problems (e.g. no resistor)\n",dev->name);
+#endif
+ /* checking some errors */
+ if(tmdp->status2 & XMIT_RTRY)
+ p->stats.tx_aborted_errors++;
+ if(tmdp->status2 & XMIT_LCAR)
+ p->stats.tx_carrier_errors++;
+ if(tmdp->status2 & (XMIT_BUFF | XMIT_UFLO )) {
+ /* this stops the xmitter */
+ p->stats.tx_fifo_errors++;
+ if(debuglevel > 0)
+ printk(KERN_ERR "%s: Xmit FIFO/BUFF error\n",dev->name);
+ if(p->features & INIT_RING_BEFORE_START) {
+ tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END; /* test: resend this frame */
+ ni65_stop_start(dev,p);
+ break; /* no more Xmit processing .. */
+ }
+ else
+ ni65_stop_start(dev,p);
+ }
+ if(debuglevel > 2)
+ printk(KERN_ERR "%s: xmit-error: %04x %02x-%04x\n",dev->name,csr0,(int) tmdstat,(int) tmdp->status2);
+ if(!(csr0 & CSR0_BABL)) /* don't count errors twice */
+ p->stats.tx_errors++;
+ tmdp->status2 = 0;
+ }
+ else
+ p->stats.tx_packets++;
+
+#ifdef XMT_VIA_SKB
+ if(p->tmd_skb[p->tmdlast]) {
+ dev_kfree_skb(p->tmd_skb[p->tmdlast],FREE_WRITE);
+ p->tmd_skb[p->tmdlast] = NULL;
+ }
+#endif
+
+ p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
+ if(p->tmdlast == p->tmdnum)
+ p->xmit_queued = 0;
+ }
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+}
+
+/*
+ * We have received a packet
+ */
+static void ni65_recv_intr(struct device *dev,int csr0)
+{
+ struct rmd *rmdp;
+ int rmdstat,len;
+ int cnt=0;
+ struct priv *p = (struct priv *) dev->priv;
+
+ rmdp = p->rmdhead + p->rmdnum;
+ while(!( (rmdstat = rmdp->u.s.status) & RCV_OWN))
+ {
+ cnt++;
+ if( (rmdstat & (RCV_START | RCV_END | RCV_ERR)) != (RCV_START | RCV_END) ) /* error or oversized? */
+ {
+ if(!(rmdstat & RCV_ERR)) {
+ if(rmdstat & RCV_START)
+ {
+ p->stats.rx_length_errors++;
+ printk(KERN_ERR "%s: recv, packet too long: %d\n",dev->name,rmdp->mlen & 0x0fff);
+ }
+ }
+ else {
+ if(debuglevel > 2)
+ printk(KERN_ERR "%s: receive-error: %04x, lance-status: %04x/%04x\n",
+ dev->name,(int) rmdstat,csr0,(int) inw(PORT+L_DATAREG) );
+ if(rmdstat & RCV_FRAM)
+ p->stats.rx_frame_errors++;
+ if(rmdstat & RCV_OFLO)
+ p->stats.rx_over_errors++;
+ if(rmdstat & RCV_CRC)
+ p->stats.rx_crc_errors++;
+ if(rmdstat & RCV_BUF_ERR)
+ p->stats.rx_fifo_errors++;
+ }
+ if(!(csr0 & CSR0_MISS)) /* don't count errors twice */
+ p->stats.rx_errors++;
+ }
+ else if( (len = (rmdp->mlen & 0x0fff) - 4) >= 60)
+ {
+#ifdef RCV_VIA_SKB
+ struct sk_buff *skb = alloc_skb(R_BUF_SIZE+2+16,GFP_ATOMIC);
+ if (skb)
+ skb_reserve(skb,16);
+#else
+ struct sk_buff *skb = dev_alloc_skb(len+2);
+#endif
+ if(skb)
+ {
+ skb_reserve(skb,2);
+ skb->dev = dev;
+#ifdef RCV_VIA_SKB
+ if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) {
+ skb_put(skb,len);
+ eth_copy_and_sum(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len,0);
+ }
+ else {
+ struct sk_buff *skb1 = p->recv_skb[p->rmdnum];
+ skb_put(skb,R_BUF_SIZE);
+ p->recv_skb[p->rmdnum] = skb;
+ rmdp->u.buffer = (u32) virt_to_bus(skb->data);
+ skb = skb1;
+ skb_trim(skb,len);
+ }
+#else
+ skb_put(skb,len);
+ eth_copy_and_sum(skb, (unsigned char *) p->recvbounce[p->rmdnum],len,0);
+#endif
+ p->stats.rx_packets++;
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ }
+ else
+ {
+ printk(KERN_ERR "%s: can't alloc new sk_buff\n",dev->name);
+ p->stats.rx_dropped++;
+ }
+ }
+ else {
+ printk(KERN_INFO "%s: received runt packet\n",dev->name);
+ p->stats.rx_errors++;
+ }
+ rmdp->blen = -(R_BUF_SIZE-8);
+ rmdp->mlen = 0;
+ rmdp->u.s.status = RCV_OWN; /* change owner */
+ p->rmdnum = (p->rmdnum + 1) & (RMDNUM-1);
+ rmdp = p->rmdhead + p->rmdnum;
+ }
+}
+
+/*
+ * kick xmitter ..
+ */
+static int ni65_send_packet(struct sk_buff *skb, struct device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+
+ if(dev->tbusy)
+ {
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 50)
+ return 1;
+
+ printk(KERN_ERR "%s: xmitter timed out, try to restart!\n",dev->name);
+{
+ int i;
+ for(i=0;i<TMDNUM;i++)
+ printk("%02x ",p->tmdhead[i].u.s.status);
+ printk("\n");
+}
+ ni65_lance_reinit(dev);
+ dev->tbusy=0;
+ dev->trans_start = jiffies;
+ }
+
+ if(skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ if (skb->len <= 0)
+ return 0;
+
+ if (set_bit(0, (void*)&dev->tbusy) != 0) {
+ printk(KERN_ERR "%s: Transmitter access conflict.\n", dev->name);
+ return 1;
+ }
+ if (set_bit(0, (void*)&p->lock)) {
+ printk(KERN_ERR "%s: Queue was locked.\n", dev->name);
+ return 1;
+ }
+
+ {
+ short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ struct tmd *tmdp;
+ long flags;
+
+#ifdef XMT_VIA_SKB
+ if( (unsigned long) (skb->data + skb->len) > 0x1000000) {
+#endif
+
+ memcpy((char *) p->tmdbounce[p->tmdbouncenum] ,(char *)skb->data,
+ (skb->len > T_BUF_SIZE) ? T_BUF_SIZE : skb->len);
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ save_flags(flags);
+ cli();
+
+ tmdp = p->tmdhead + p->tmdnum;
+ tmdp->u.buffer = (u32) virt_to_bus(p->tmdbounce[p->tmdbouncenum]);
+ p->tmdbouncenum = (p->tmdbouncenum + 1) & (TMDNUM - 1);
+
+#ifdef XMT_VIA_SKB
+ }
+ else {
+ save_flags(flags);
+ cli();
+
+ tmdp = p->tmdhead + p->tmdnum;
+ tmdp->u.buffer = (u32) virt_to_bus(skb->data);
+ p->tmd_skb[p->tmdnum] = skb;
+ }
+#endif
+ tmdp->blen = -len;
+
+ tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END;
+ writedatareg(CSR0_TDMD | CSR0_INEA); /* enable xmit & interrupt */
+
+ p->xmit_queued = 1;
+ p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
+
+ dev->tbusy = (p->tmdnum == p->tmdlast) ? 1 : 0;
+ p->lock = 0;
+ dev->trans_start = jiffies;
+
+ restore_flags(flags);
+ }
+
+ return 0;
+}
+
+static struct enet_statistics *ni65_get_stats(struct device *dev)
+{
+
+#if 0
+ int i;
+ struct priv *p = (struct priv *) dev->priv;
+ for(i=0;i<RMDNUM;i++) {
+ struct rmd *rmdp = p->rmdhead + ((p->rmdnum + i) & (RMDNUM-1));
+ printk("%02x ",rmdp->u.s.status);
+ }
+ printk("\n");
+#endif
+
+ return &((struct priv *) dev->priv)->stats;
+}
+
+static void set_multicast_list(struct device *dev)
+{
+ if(!ni65_lance_reinit(dev))
+ printk(KERN_ERR "%s: Can't switch card into MC mode!\n",dev->name);
+ dev->tbusy = 0;
+}
+
+#ifdef MODULE
+static struct device dev_ni65 = {
+ " ", /* "ni6510": device name inserted by net_init.c */
+ 0, 0, 0, 0,
+ 0x360, 9, /* I/O address, IRQ */
+ 0, 0, 0, NULL, ni65_probe };
+
+/* set: io,irq,dma or set it when calling insmod */
+static int irq=0;
+static int io=0;
+static int dma=0;
+
+int init_module(void)
+{
+#if 0
+ if(io <= 0x0 || irq < 2) {
+ printk("ni65: Autoprobing not allowed for modules.\n");
+ printk("ni65: Set symbols 'io' 'irq' and 'dma'\n");
+ return -ENODEV;
+ }
+#endif
+ dev_ni65.irq = irq;
+ dev_ni65.dma = dma;
+ dev_ni65.base_addr = io;
+ if (register_netdev(&dev_ni65) != 0)
+ return -EIO;
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ struct priv *p;
+ p = (struct priv *) dev_ni65.priv;
+ if(!p) {
+ printk("Ooops .. no privat struct\n");
+ return;
+ }
+ disable_dma(dev_ni65.dma);
+ free_dma(dev_ni65.dma);
+ release_region(dev_ni65.base_addr,cards[p->cardno].total_size);
+ ni65_free_buffer(p);
+ dev_ni65.priv = NULL;
+ unregister_netdev(&dev_ni65);
+}
+#endif /* MODULE */
+
+/*
+ * END of ni65.c
+ */
+
+
diff --git a/linux/src/drivers/net/ni65.h b/linux/src/drivers/net/ni65.h
new file mode 100644
index 0000000..6438095
--- /dev/null
+++ b/linux/src/drivers/net/ni65.h
@@ -0,0 +1,130 @@
+/* am7990 (lance) definitions
+ *
+ * This is an extension to the Linux operating system, and is covered by
+ * same Gnu Public License that covers that work.
+ *
+ * Michael Hipp
+ * email: mhipp@student.uni-tuebingen.de
+ *
+ * sources: (mail me or ask archie if you need them)
+ * crynwr-packet-driver
+ */
+
+/*
+ * Control and Status Register 0 (CSR0) bit definitions
+ * (R=Readable) (W=Writeable) (S=Set on write) (C-Clear on write)
+ *
+ */
+
+#define CSR0_ERR 0x8000 /* Error summary (R) */
+#define CSR0_BABL 0x4000 /* Babble transmitter timeout error (RC) */
+#define CSR0_CERR 0x2000 /* Collision Error (RC) */
+#define CSR0_MISS 0x1000 /* Missed packet (RC) */
+#define CSR0_MERR 0x0800 /* Memory Error (RC) */
+#define CSR0_RINT 0x0400 /* Receiver Interrupt (RC) */
+#define CSR0_TINT 0x0200 /* Transmit Interrupt (RC) */
+#define CSR0_IDON 0x0100 /* Initialization Done (RC) */
+#define CSR0_INTR 0x0080 /* Interrupt Flag (R) */
+#define CSR0_INEA 0x0040 /* Interrupt Enable (RW) */
+#define CSR0_RXON 0x0020 /* Receiver on (R) */
+#define CSR0_TXON 0x0010 /* Transmitter on (R) */
+#define CSR0_TDMD 0x0008 /* Transmit Demand (RS) */
+#define CSR0_STOP 0x0004 /* Stop (RS) */
+#define CSR0_STRT 0x0002 /* Start (RS) */
+#define CSR0_INIT 0x0001 /* Initialize (RS) */
+
+#define CSR0_CLRALL 0x7f00 /* mask for all clearable bits */
+/*
+ * Initialization Block Mode operation Bit Definitions.
+ */
+
+#define M_PROM 0x8000 /* Promiscuous Mode */
+#define M_INTL 0x0040 /* Internal Loopback */
+#define M_DRTY 0x0020 /* Disable Retry */
+#define M_COLL 0x0010 /* Force Collision */
+#define M_DTCR 0x0008 /* Disable Transmit CRC) */
+#define M_LOOP 0x0004 /* Loopback */
+#define M_DTX 0x0002 /* Disable the Transmitter */
+#define M_DRX 0x0001 /* Disable the Receiver */
+
+
+/*
+ * Receive message descriptor bit definitions.
+ */
+
+#define RCV_OWN 0x80 /* owner bit 0 = host, 1 = lance */
+#define RCV_ERR 0x40 /* Error Summary */
+#define RCV_FRAM 0x20 /* Framing Error */
+#define RCV_OFLO 0x10 /* Overflow Error */
+#define RCV_CRC 0x08 /* CRC Error */
+#define RCV_BUF_ERR 0x04 /* Buffer Error */
+#define RCV_START 0x02 /* Start of Packet */
+#define RCV_END 0x01 /* End of Packet */
+
+
+/*
+ * Transmit message descriptor bit definitions.
+ */
+
+#define XMIT_OWN 0x80 /* owner bit 0 = host, 1 = lance */
+#define XMIT_ERR 0x40 /* Error Summary */
+#define XMIT_RETRY 0x10 /* more the 1 retry needed to Xmit */
+#define XMIT_1_RETRY 0x08 /* one retry needed to Xmit */
+#define XMIT_DEF 0x04 /* Deferred */
+#define XMIT_START 0x02 /* Start of Packet */
+#define XMIT_END 0x01 /* End of Packet */
+
+/*
+ * transmit status (2) (valid if XMIT_ERR == 1)
+ */
+
+#define XMIT_TDRMASK 0x03ff /* time-domain-reflectometer-value */
+#define XMIT_RTRY 0x0400 /* Failed after 16 retransmissions */
+#define XMIT_LCAR 0x0800 /* Loss of Carrier */
+#define XMIT_LCOL 0x1000 /* Late collision */
+#define XMIT_RESERV 0x2000 /* Reserved */
+#define XMIT_UFLO 0x4000 /* Underflow (late memory) */
+#define XMIT_BUFF 0x8000 /* Buffering error (no ENP) */
+
+struct init_block
+{
+ unsigned short mode;
+ unsigned char eaddr[6];
+ unsigned char filter[8];
+ /* bit 29-31: number of rmd's (power of 2) */
+ u32 rrp; /* receive ring pointer (align 8) */
+ /* bit 29-31: number of tmd's (power of 2) */
+ u32 trp; /* transmit ring pointer (align 8) */
+};
+
+struct rmd /* Receive Message Descriptor */
+{
+ union
+ {
+ volatile u32 buffer;
+ struct
+ {
+ volatile unsigned char dummy[3];
+ volatile unsigned char status;
+ } s;
+ } u;
+ volatile short blen;
+ volatile unsigned short mlen;
+};
+
+struct tmd
+{
+ union
+ {
+ volatile u32 buffer;
+ struct
+ {
+ volatile unsigned char dummy[3];
+ volatile unsigned char status;
+ } s;
+ } u;
+ volatile unsigned short blen;
+ volatile unsigned short status2;
+};
+
+
diff --git a/linux/src/drivers/net/ns820.c b/linux/src/drivers/net/ns820.c
new file mode 100644
index 0000000..968f3ac
--- /dev/null
+++ b/linux/src/drivers/net/ns820.c
@@ -0,0 +1,1547 @@
+/* ns820.c: A Linux Gigabit Ethernet driver for the NatSemi DP83820 series. */
+/*
+ Written/copyright 1999-2003 by Donald Becker.
+ Copyright 2002-2003 by Scyld Computing Corporation.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL. License for under other terms may be
+ available. Contact the original author for details.
+
+ The original author may be reached as becker@scyld.com, or at
+ Scyld Computing Corporation
+ 914 Bay Ridge Road, Suite 220
+ Annapolis MD 21403
+
+ Support information and updates available at
+ http://www.scyld.com/network/natsemi.html
+ The information and support mailing lists are based at
+ http://www.scyld.com/mailman/listinfo/
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version1[] =
+"ns820.c:v1.03a 8/09/2003 Written by Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+" http://www.scyld.com/network/natsemi.html\n";
+/* Updated to recommendations in pci-skeleton v2.13. */
+
+/* Automatically extracted configuration info:
+probe-func: ns820_probe
+config-in: tristate 'National Semiconductor DP8382x series PCI Ethernet support' CONFIG_NATSEMI820
+
+c-help-name: National Semiconductor DP8382x series PCI Ethernet support
+c-help-symbol: CONFIG_NATSEMI820
+c-help: This driver is for the National Semiconductor DP83820 Gigabit Ethernet
+c-help: adapter series.
+c-help: More specific information and updates are available from
+c-help: http://www.scyld.com/network/natsemi.html
+*/
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
+static int debug = 2;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ This chip uses a 2048 element hash table based on the Ethernet CRC.
+ Previous natsemi chips had unreliable multicast filter circuitry.
+ To work around an observed problem set this value to '0',
+ which will immediately switch to Rx-all-multicast.
+ */
+static int multicast_filter_limit = 100;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature.
+ This chip can only receive into aligned buffers, so architectures such
+ as the Alpha AXP might benefit from a copy-align.
+*/
+static int rx_copybreak = 0;
+
+/* Used to pass the media type, etc.
+ Both 'options[]' and 'full_duplex[]' should exist for driver
+ interoperability, however setting full_duplex[] is deprecated.
+ The media type is usually passed in 'options[]'.
+ The default is autonegotation for speed and duplex.
+ This should rarely be overridden.
+ Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
+ Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
+ Use option values 0x20 and 0x200 for forcing full duplex operation.
+ Use 0x1000 or 0x2000 for gigabit.
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+ Understand the implications before changing these settings!
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority, confuses the system network buffer limits,
+ and wastes memory.
+ Too-large receive rings waste memory and confound network buffer limits.
+*/
+#define TX_RING_SIZE 16
+#define TX_QUEUE_LEN 10 /* Limit ring entries actually used, min 4. */
+#define RX_RING_SIZE 64
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung.
+ Re-autonegotiation may take up to 3 seconds.
+ */
+#define TX_TIMEOUT (6*HZ)
+
+/* Allocation size of Rx buffers with normal sized Ethernet frames.
+ Do not change this value without good reason. This is not a limit,
+ but a way to keep a consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ 1536
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("National Semiconductor DP83820 series PCI Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(debug, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(multicast_filter_limit, "i");
+MODULE_PARM_DESC(debug, "Driver message level (0-31)");
+MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
+MODULE_PARM_DESC(max_interrupt_work,
+ "Driver maximum events handled per interrupt");
+MODULE_PARM_DESC(full_duplex,
+ "Non-zero to force full duplex, non-negotiated link "
+ "(deprecated).");
+MODULE_PARM_DESC(rx_copybreak,
+ "Breakpoint in bytes for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit,
+ "Multicast addresses before switching to Rx-all-multicast");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This driver is designed for National Semiconductor DP83820 10/100/1000
+Ethernet NIC. It is superficially similar to the 810 series "natsemi.c"
+driver, however the register layout, descriptor layout and element
+length of the new chip series is different.
+
+II. Board-specific settings
+
+This driver requires the PCI interrupt line to be configured.
+It honors the EEPROM-set values.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
+The NatSemi design uses a 'next descriptor' pointer that the driver forms
+into a list, thus rings can be arbitrarily sized. Before changing the
+ring sizes you should understand the flow and cache effects of the
+full/available/empty hysteresis.
+
+IIIb/c. Transmit/Receive Structure
+
+This driver uses a zero-copy receive and transmit scheme.
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the chip as receive data
+buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack. Buffers consumed this way are replaced by newly allocated
+skbuffs in a later phase of receives.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. New boards are typically used in generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets. When copying is done, the cost is usually mitigated by using
+a combined copy/checksum routine. Copying also preloads the cache, which is
+most useful with small frames.
+
+A subtle aspect of the operation is that unaligned buffers are not permitted
+by the hardware. Thus the IP header at offset 14 in an ethernet frame isn't
+longword aligned for further processing. On copies frames are put into the
+skbuff at an offset of "+2", 16-byte aligning the IP header.
+
+IIId. Synchronization
+
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and interrupt handling software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'lp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
+clears both the tx_full and tbusy flags.
+
+IV. Notes
+
+The NatSemi 820 series PCI gigabit chips are very common on low-cost NICs.
+The '821 appears to be the same as '820 chip, only with pins for the upper
+32 bits marked "N/C".
+
+IVb. References
+
+http://www.scyld.com/expert/100mbps.html
+http://www.scyld.com/expert/NWay.html
+The NatSemi dp83820 datasheet is available: search www.natsemi.com
+
+IVc. Errata
+
+None characterised.
+
+*/
+
+
+
+static void *ns820_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int find_cnt);
+static int power_event(void *dev_instance, int event);
+enum chip_capability_flags {FDXActiveLow=1, InvertGbXcvrPwr=2, };
+#ifdef USE_IO_OPS
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0)
+#else
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
+#endif
+
+static struct pci_id_info pci_id_tbl[] = {
+ { "D-Link DGE-500T (DP83820)",
+ { 0x0022100B, 0xffffffff, 0x49001186, 0xffffffff, },
+ PCI_IOTYPE, 256, FDXActiveLow},
+ {"NatSemi DP83820", { 0x0022100B, 0xffffffff },
+ PCI_IOTYPE, 256, 0},
+ {0,}, /* 0 terminated list. */
+};
+
+struct drv_id_info ns820_drv_id = {
+ "ns820", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
+ ns820_probe1, power_event };
+
+/* Offsets to the device registers.
+ Unlike software-only systems, device drivers interact with complex hardware.
+ It's not useful to define symbolic names for every register bit in the
+ device. Please do not change these names without good reason.
+*/
+enum register_offsets {
+ ChipCmd=0x00, ChipConfig=0x04, EECtrl=0x08, PCIBusCfg=0x0C,
+ IntrStatus=0x10, IntrMask=0x14, IntrEnable=0x18, IntrHoldoff=0x1C,
+ TxRingPtr=0x20, TxRingPtrHi=0x24, TxConfig=0x28,
+ RxRingPtr=0x30, RxRingPtrHi=0x34, RxConfig=0x38,
+ WOLCmd=0x40, PauseCmd=0x44, RxFilterAddr=0x48, RxFilterData=0x4C,
+ BootRomAddr=0x50, BootRomData=0x54, ChipRevReg=0x58,
+ StatsCtrl=0x5C, RxPktErrs=0x60, RxMissed=0x68, RxCRCErrs=0x64,
+};
+
+/* Bits in ChipCmd. */
+enum ChipCmdBits {
+ ChipReset=0x100, SoftIntr=0x80, RxReset=0x20, TxReset=0x10,
+ RxOff=0x08, RxOn=0x04, TxOff=0x02, TxOn=0x01,
+};
+
+/* Bits in ChipConfig. */
+enum ChipConfigBits {
+ CfgLinkGood=0x80000000, CfgFDX=0x10000000,
+ CfgXcrReset=0x0400, CfgXcrOff=0x0200,
+};
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+ IntrRxDone=0x0001, IntrRxIntr=0x0002, IntrRxErr=0x0004, IntrRxEarly=0x0008,
+ IntrRxIdle=0x0010, IntrRxOverrun=0x0020,
+ IntrTxDone=0x0040, IntrTxIntr=0x0080, IntrTxErr=0x0100,
+ IntrTxIdle=0x0200, IntrTxUnderrun=0x0400,
+ StatsMax=0x0800, IntrDrv=0x1000, WOLPkt=0x2000, LinkChange=0x4000,
+ RxStatusOverrun=0x10000,
+ RxResetDone=0x00200000, TxResetDone=0x00400000,
+ IntrPCIErr=0x001E0000,
+ IntrNormalSummary=0x0251, IntrAbnormalSummary=0xED20,
+};
+
+/* Bits in the RxMode register. */
+enum rx_mode_bits {
+ AcceptErr=0x20, AcceptRunt=0x10,
+ AcceptBroadcast=0xC0000000,
+ AcceptMulticast=0x00200000, AcceptAllMulticast=0x20000000,
+ AcceptAllPhys=0x10000000, AcceptMyPhys=0x08000000,
+};
+
+/* The Rx and Tx buffer descriptors. */
+/* Note that using only 32 bit fields simplifies conversion to big-endian
+ architectures. */
+struct netdev_desc {
+#if ADDRLEN == 64
+ u64 next_desc;
+ u64 buf_addr;
+#endif
+ u32 next_desc;
+ u32 buf_addr;
+ s32 cmd_status;
+ u32 vlan_status;
+};
+
+/* Bits in network_desc.status */
+enum desc_status_bits {
+ DescOwn=0x80000000, DescMore=0x40000000, DescIntr=0x20000000,
+ DescNoCRC=0x10000000,
+ DescPktOK=0x08000000, RxTooLong=0x00400000,
+};
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+struct netdev_private {
+ /* Descriptor rings first for alignment. */
+ struct netdev_desc rx_ring[RX_RING_SIZE];
+ struct netdev_desc tx_ring[TX_RING_SIZE];
+ struct net_device *next_module; /* Link for devices of this type. */
+ void *priv_addr; /* Unaligned address for kfree */
+ const char *product_name;
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ /* The saved address of a sent-in-place packet/buffer, for later free(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media monitoring timer. */
+ /* Frequently used values: keep some adjacent for cache effect. */
+ int msg_level;
+ int chip_id, drv_flags;
+ struct pci_dev *pci_dev;
+ long in_interrupt; /* Word-long for SMP locks. */
+ int max_interrupt_work;
+ int intr_enable;
+ unsigned int restore_intr_enable:1; /* Set if temporarily masked. */
+ unsigned int rx_q_empty:1; /* Set out-of-skbuffs. */
+
+ struct netdev_desc *rx_head_desc;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ int rx_copybreak;
+
+ unsigned int cur_tx, dirty_tx;
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ /* These values keep track of the transceiver/media in use. */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int duplex_lock:1;
+ unsigned int medialock:1; /* Do not sense media. */
+ unsigned int default_port; /* Last dev->if_port value. */
+ /* Rx filter. */
+ u32 cur_rx_mode;
+ u32 rx_filter[16];
+ int multicast_filter_limit;
+ /* FIFO and PCI burst thresholds. */
+ int tx_config, rx_config;
+ /* MII transceiver section. */
+ u16 advertising; /* NWay media advertisement */
+};
+
+static int eeprom_read(long ioaddr, int location);
+static void mdio_sync(long mdio_addr);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+static int netdev_open(struct net_device *dev);
+static void check_duplex(struct net_device *dev);
+static void netdev_timer(unsigned long data);
+static void tx_timeout(struct net_device *dev);
+static int rx_ring_fill(struct net_device *dev);
+static void init_ring(struct net_device *dev);
+static int start_tx(struct sk_buff *skb, struct net_device *dev);
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
+static void netdev_error(struct net_device *dev, int intr_status);
+static int netdev_rx(struct net_device *dev);
+static void netdev_error(struct net_device *dev, int intr_status);
+static void set_rx_mode(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_close(struct net_device *dev);
+
+
+
+/* A list of our installed devices, for removing the driver module. */
+static struct net_device *root_net_dev = NULL;
+
+#ifndef MODULE
+int ns820_probe(struct net_device *dev)
+{
+ if (pci_drv_register(&ns820_drv_id, dev) < 0)
+ return -ENODEV;
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return 0;
+}
+#endif
+
+static void *ns820_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int card_idx)
+{
+ struct net_device *dev;
+ struct netdev_private *np;
+ void *priv_mem;
+ int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+
+ dev = init_etherdev(init_dev, 0);
+ if (!dev)
+ return NULL;
+
+ /* Perhaps NETIF_MSG_PROBE */
+ printk(KERN_INFO "%s: %s at 0x%lx, ",
+ dev->name, pci_id_tbl[chip_idx].name, ioaddr);
+
+ for (i = 0; i < 3; i++)
+ ((u16 *)dev->dev_addr)[i] = le16_to_cpu(eeprom_read(ioaddr, 12 - i));
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+
+ /* Reset the chip to erase previous misconfiguration. */
+ writel(ChipReset, ioaddr + ChipCmd);
+ /* Power up Xcvr. */
+ writel(~CfgXcrOff & readl(ioaddr + ChipConfig), ioaddr + ChipConfig);
+
+ /* Make certain elements e.g. descriptor lists are aligned. */
+ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
+ /* Check for the very unlikely case of no memory. */
+ if (priv_mem == NULL)
+ return NULL;
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+ memset(np, 0, sizeof(*np));
+ np->priv_addr = priv_mem;
+
+ np->next_module = root_net_dev;
+ root_net_dev = dev;
+
+ np->pci_dev = pdev;
+ np->chip_id = chip_idx;
+ np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
+ np->msg_level = (1 << debug) - 1;
+ np->rx_copybreak = rx_copybreak;
+ np->max_interrupt_work = max_interrupt_work;
+ np->multicast_filter_limit = multicast_filter_limit;
+
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ /* The lower four bits are the media type. */
+ if (option > 0) {
+ if (option & 0x220)
+ np->full_duplex = 1;
+ np->default_port = option & 0x33ff;
+ if (np->default_port & 0x330)
+ np->medialock = 1;
+ }
+ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
+ np->full_duplex = 1;
+
+ if (np->full_duplex) {
+ if (np->msg_level & NETIF_MSG_PROBE)
+ printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
+ " disabled.\n", dev->name);
+ np->duplex_lock = 1;
+ }
+
+ /* The chip-specific entries in the device structure. */
+ dev->open = &netdev_open;
+ dev->hard_start_xmit = &start_tx;
+ dev->stop = &netdev_close;
+ dev->get_stats = &get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &mii_ioctl;
+
+ /* Allow forcing the media type. */
+ if (option > 0) {
+ if (option & 0x220)
+ np->full_duplex = 1;
+ np->default_port = option & 0x3ff;
+ if (np->default_port & 0x330) {
+ np->medialock = 1;
+ if (np->msg_level & NETIF_MSG_PROBE)
+ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
+ (option & 0x300 ? 100 : 10),
+ (np->full_duplex ? "full" : "half"));
+ mdio_write(dev, 1, 0,
+ ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
+ (np->full_duplex ? 0x0100 : 0)); /* Full duplex? */
+ }
+ }
+
+ return dev;
+}
+
+
+/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.
+ The EEPROM code is for the common 93c06/46 EEPROMs with 6 bit addresses.
+ Update to the code in other drivers for 8/10 bit addresses.
+*/
+
+/* Delay between EEPROM clock transitions.
+ This "delay" forces out buffered PCI writes, which is sufficient to meet
+ the timing requirements of most EEPROMs.
+*/
+#define eeprom_delay(ee_addr) readl(ee_addr)
+
+enum EEPROM_Ctrl_Bits {
+ EE_ShiftClk=0x04, EE_DataIn=0x01, EE_ChipSelect=0x08, EE_DataOut=0x02,
+};
+#define EE_Write0 (EE_ChipSelect)
+#define EE_Write1 (EE_ChipSelect | EE_DataIn)
+
+/* The EEPROM commands include the 01 preamble. */
+enum EEPROM_Cmds {
+ EE_WriteCmd=5, EE_ReadCmd=6, EE_EraseCmd=7,
+};
+
+static int eeprom_read(long addr, int location)
+{
+ long eeprom_addr = addr + EECtrl;
+ int read_cmd = (EE_ReadCmd << 6) | location;
+ int retval = 0;
+ int i;
+
+ writel(EE_Write0, eeprom_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 10; i >= 0; i--) {
+ int dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
+ writel(dataval, eeprom_addr);
+ eeprom_delay(eeprom_addr);
+ writel(dataval | EE_ShiftClk, eeprom_addr);
+ eeprom_delay(eeprom_addr);
+ }
+ writel(EE_ChipSelect, eeprom_addr);
+ eeprom_delay(eeprom_addr);
+
+ for (i = 15; i >= 0; i--) {
+ writel(EE_ChipSelect | EE_ShiftClk, eeprom_addr);
+ eeprom_delay(eeprom_addr);
+ retval |= (readl(eeprom_addr) & EE_DataOut) ? 1 << i : 0;
+ writel(EE_ChipSelect, eeprom_addr);
+ eeprom_delay(eeprom_addr);
+ }
+
+ /* Terminate the EEPROM access. */
+ writel(EE_Write0, eeprom_addr);
+ writel(0, eeprom_addr);
+ return retval;
+}
+
+/* MII transceiver control section.
+ Read and write MII registers using software-generated serial MDIO
+ protocol. See the MII specifications or DP83840A data sheet for details.
+
+ The maximum data clock rate is 2.5 Mhz. To meet minimum timing we
+ must flush writes to the PCI bus with a PCI read. */
+#define mdio_delay(mdio_addr) readl(mdio_addr)
+
+/* Set iff a MII transceiver on any interface requires mdio preamble.
+ This only set with older tranceivers, so the extra
+ code size of a per-interface flag is not worthwhile. */
+static char mii_preamble_required = 0;
+
+enum mii_reg_bits {
+ MDIO_ShiftClk=0x0040, MDIO_Data=0x0010, MDIO_EnbOutput=0x0020,
+};
+#define MDIO_EnbIn (0)
+#define MDIO_WRITE0 (MDIO_EnbOutput)
+#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
+
+/* Generate the preamble required for initial synchronization and
+ a few older transceivers. */
+static void mdio_sync(long mdio_addr)
+{
+ int bits = 32;
+
+ /* Establish sync by sending at least 32 logic ones. */
+ while (--bits >= 0) {
+ writel(MDIO_WRITE1, mdio_addr);
+ mdio_delay(mdio_addr);
+ writel(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+}
+
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ long mdio_addr = dev->base_addr + EECtrl;
+ int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ int i, retval = 0;
+
+ if (mii_preamble_required)
+ mdio_sync(mdio_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+
+ writel(dataval, mdio_addr);
+ mdio_delay(mdio_addr);
+ writel(dataval | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 19; i > 0; i--) {
+ writel(MDIO_EnbIn, mdio_addr);
+ mdio_delay(mdio_addr);
+ retval = (retval << 1) | ((readl(mdio_addr) & MDIO_Data) ? 1 : 0);
+ writel(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
+{
+ long mdio_addr = dev->base_addr + EECtrl;
+ int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
+ int i;
+
+ if (mii_preamble_required)
+ mdio_sync(mdio_addr);
+
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+
+ writel(dataval, mdio_addr);
+ mdio_delay(mdio_addr);
+ writel(dataval | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ /* Clear out extra bits. */
+ for (i = 2; i > 0; i--) {
+ writel(MDIO_EnbIn, mdio_addr);
+ mdio_delay(mdio_addr);
+ writel(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ return;
+}
+
+static int netdev_open(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+ u32 intr_status = readl(ioaddr + IntrStatus);
+
+ /* We have not yet encountered a case where we need to reset the chip. */
+
+ MOD_INC_USE_COUNT;
+
+ if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+
+ /* Power up Xcvr. */
+ writel((~CfgXcrOff & readl(ioaddr + ChipConfig)) | 0x00400000,
+ ioaddr + ChipConfig);
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: netdev_open() irq %d intr_status %8.8x.\n",
+ dev->name, dev->irq, intr_status);
+
+ init_ring(dev);
+
+#if defined(ADDR_64BITS) && defined(__alpha__)
+ writel(virt_to_bus(np->rx_ring) >> 32, ioaddr + RxRingPtrHi);
+ writel(virt_to_bus(np->tx_ring) >> 32, ioaddr + TxRingPtrHi);
+#else
+ writel(0, ioaddr + RxRingPtrHi);
+ writel(0, ioaddr + TxRingPtrHi);
+#endif
+ writel(virt_to_bus(np->rx_ring), ioaddr + RxRingPtr);
+ writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
+
+ for (i = 0; i < 6; i += 2) {
+ writel(i, ioaddr + RxFilterAddr);
+ writel(dev->dev_addr[i] + (dev->dev_addr[i+1] << 8),
+ ioaddr + RxFilterData);
+ }
+
+ /* Initialize other registers. */
+ /* Configure the PCI bus bursts and FIFO thresholds. */
+ /* Configure for standard, in-spec Ethernet. */
+
+ if (np->full_duplex ||
+ ((readl(ioaddr + ChipConfig) & CfgFDX) == 0) ^
+ ((np->drv_flags & FDXActiveLow) != 0)) {
+ np->tx_config = 0xD0801002;
+ np->rx_config = 0x10000020;
+ } else {
+ np->tx_config = 0x10801002;
+ np->rx_config = 0x0020;
+ }
+ if (dev->mtu > 1500)
+ np->rx_config |= 0x08000000;
+ writel(np->tx_config, ioaddr + TxConfig);
+ writel(np->rx_config, ioaddr + RxConfig);
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: Setting TxConfig to %8.8x.\n",
+ dev->name, (int)readl(ioaddr + TxConfig));
+
+ if (dev->if_port == 0)
+ dev->if_port = np->default_port;
+
+ np->in_interrupt = 0;
+
+ check_duplex(dev);
+ set_rx_mode(dev);
+ netif_start_tx_queue(dev);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ np->intr_enable = IntrNormalSummary | IntrAbnormalSummary | 0x1f;
+ writel(np->intr_enable, ioaddr + IntrMask);
+ writel(1, ioaddr + IntrEnable);
+
+ writel(RxOn | TxOn, ioaddr + ChipCmd);
+ writel(4, ioaddr + StatsCtrl); /* Clear Stats */
+
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: Done netdev_open(), status: %x.\n",
+ dev->name, (int)readl(ioaddr + ChipCmd));
+
+ /* Set the timer to check for link beat. */
+ init_timer(&np->timer);
+ np->timer.expires = jiffies + 3*HZ;
+ np->timer.data = (unsigned long)dev;
+ np->timer.function = &netdev_timer; /* timer handler */
+ add_timer(&np->timer);
+
+ return 0;
+}
+
+static void check_duplex(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int duplex;
+
+ if (np->duplex_lock)
+ return;
+ duplex = readl(ioaddr + ChipConfig) & CfgFDX ? 1 : 0;
+ if (np->full_duplex != duplex) {
+ np->full_duplex = duplex;
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: Setting %s-duplex based on negotiated link"
+ " capability.\n", dev->name,
+ duplex ? "full" : "half");
+ if (duplex) {
+ np->rx_config |= 0x10000000;
+ np->tx_config |= 0xC0000000;
+ } else {
+ np->rx_config &= ~0x10000000;
+ np->tx_config &= ~0xC0000000;
+ }
+ writel(np->tx_config, ioaddr + TxConfig);
+ writel(np->rx_config, ioaddr + RxConfig);
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Setting TxConfig to %8.8x (%8.8x).\n",
+ dev->name, np->tx_config, (int)readl(ioaddr + TxConfig));
+ }
+}
+
+static void netdev_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 10*HZ;
+
+ if (np->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: Driver monitor timer tick, status %8.8x.\n",
+ dev->name, (int)readl(ioaddr + ChipConfig));
+ if (np->rx_q_empty) {
+ /* Trigger an interrupt to refill. */
+ writel(SoftIntr, ioaddr + ChipCmd);
+ }
+ if (netif_queue_paused(dev) &&
+ np->cur_tx - np->dirty_tx > 1 &&
+ (jiffies - dev->trans_start) > TX_TIMEOUT) {
+ tx_timeout(dev);
+ }
+ check_duplex(dev);
+ np->timer.expires = jiffies + next_tick;
+ add_timer(&np->timer);
+}
+
+static void tx_timeout(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
+ " resetting...\n", dev->name, (int)readl(ioaddr + TxRingPtr));
+
+ if (np->msg_level & NETIF_MSG_TX_ERR) {
+ int i;
+ printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(" %8.8x", (unsigned int)np->rx_ring[i].cmd_status);
+ printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" %4.4x", np->tx_ring[i].cmd_status);
+ printk("\n");
+ }
+
+ /* Perhaps we should reinitialize the hardware here. */
+ dev->if_port = 0;
+ /* Stop and restart the chip's Tx processes . */
+
+ /* Trigger an immediate transmit demand. */
+
+ dev->trans_start = jiffies;
+ np->stats.tx_errors++;
+ return;
+}
+
+/* Refill the Rx ring buffers, returning non-zero if not full. */
+static int rx_ring_fill(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ unsigned int entry;
+
+ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
+ entry = np->dirty_rx % RX_RING_SIZE;
+ if (np->rx_skbuff[entry] == NULL) {
+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ return 1; /* Better luck next time. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ np->rx_ring[entry].buf_addr = virt_to_bus(skb->tail);
+ }
+ np->rx_ring[entry].cmd_status = cpu_to_le32(DescIntr | np->rx_buf_sz);
+ }
+ return 0;
+}
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+ np->tx_full = 0;
+ np->cur_rx = np->cur_tx = 0;
+ np->dirty_rx = np->dirty_tx = 0;
+
+ /* MAX(PKT_BUF_SZ, dev->mtu + 8); */
+ /* I know you _want_ to change this without understanding it. Don't. */
+ np->rx_buf_sz = (dev->mtu <= 1532 ? PKT_BUF_SZ : dev->mtu + 8);
+ np->rx_head_desc = &np->rx_ring[0];
+
+ /* Initialize all Rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].next_desc = virt_to_bus(&np->rx_ring[i+1]);
+ np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
+ np->rx_skbuff[i] = 0;
+ }
+ /* Mark the last entry as wrapping the ring. */
+ np->rx_ring[i-1].next_desc = virt_to_bus(&np->rx_ring[0]);
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_skbuff[i] = 0;
+ np->tx_ring[i].next_desc = virt_to_bus(&np->tx_ring[i+1]);
+ np->tx_ring[i].cmd_status = 0;
+ }
+ np->tx_ring[i-1].next_desc = virt_to_bus(&np->tx_ring[0]);
+
+ /* Fill in the Rx buffers.
+ Allocation failure just leaves a "negative" np->dirty_rx. */
+ np->dirty_rx = (unsigned int)(0 - RX_RING_SIZE);
+ rx_ring_fill(dev);
+
+ return;
+}
+
+static int start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ unsigned int entry;
+
+ /* Block a timer-based transmit from overlapping. This happens when
+ packets are presumed lost, and we use this check the Tx status. */
+ if (netif_pause_tx_queue(dev) != 0) {
+ /* This watchdog code is redundant with the media monitor timer. */
+ if (jiffies - dev->trans_start > TX_TIMEOUT)
+ tx_timeout(dev);
+ return 1;
+ }
+
+ /* Note: Ordering is important here, set the field with the
+ "ownership" bit last, and only then increment cur_tx.
+ No spinlock is needed for either Tx or Rx.
+ */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = np->cur_tx % TX_RING_SIZE;
+
+ np->tx_skbuff[entry] = skb;
+
+ np->tx_ring[entry].buf_addr = virt_to_bus(skb->data);
+ np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn|DescIntr | skb->len);
+ np->cur_tx++;
+
+ /* StrongARM: Explicitly cache flush np->tx_ring and skb->data,skb->len. */
+
+ if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
+ np->tx_full = 1;
+ /* Check for a just-cleared queue. */
+ if (np->cur_tx - (volatile unsigned int)np->dirty_tx
+ < TX_QUEUE_LEN - 4) {
+ np->tx_full = 0;
+ netif_unpause_tx_queue(dev);
+ } else
+ netif_stop_tx_queue(dev);
+ } else
+ netif_unpause_tx_queue(dev); /* Typical path */
+ /* Wake the potentially-idle transmit channel. */
+ writel(TxOn, dev->base_addr + ChipCmd);
+
+ dev->trans_start = jiffies;
+
+ if (np->msg_level & NETIF_MSG_TX_QUEUED) {
+ printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
+ dev->name, np->cur_tx, entry);
+ }
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct netdev_private *np;
+ long ioaddr;
+ int boguscnt;
+
+#ifndef final_version /* Can never occur. */
+ if (dev == NULL) {
+ printk (KERN_ERR "Netdev interrupt handler(): IRQ %d for unknown "
+ "device.\n", irq);
+ return;
+ }
+#endif
+
+ ioaddr = dev->base_addr;
+ np = (struct netdev_private *)dev->priv;
+ boguscnt = np->max_interrupt_work;
+
+#if defined(__i386__) && LINUX_VERSION_CODE < 0x020300
+ /* A lock to prevent simultaneous entry bug on Intel SMP machines. */
+ if (test_and_set_bit(0, (void*)&dev->interrupt)) {
+ printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
+ dev->name);
+ dev->interrupt = 0; /* Avoid halting machine. */
+ return;
+ }
+#endif
+
+ do {
+ u32 intr_status = readl(ioaddr + IntrStatus);
+
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
+ dev->name, intr_status);
+
+ if (intr_status == 0 || intr_status == 0xffffffff)
+ break;
+
+ /* Acknowledge all of the current interrupt sources ASAP.
+ Nominally the read above accomplishes this, but... */
+ writel(intr_status & 0x001ffff, ioaddr + IntrStatus);
+
+ if (intr_status & (IntrRxDone | IntrRxIntr)) {
+ netdev_rx(dev);
+ np->rx_q_empty = rx_ring_fill(dev);
+ }
+
+ if (intr_status & (IntrRxIdle | IntrDrv)) {
+ unsigned int old_dirty_rx = np->dirty_rx;
+ if (rx_ring_fill(dev) == 0)
+ np->rx_q_empty = 0;
+ /* Restart Rx engine iff we did add a buffer. */
+ if (np->dirty_rx != old_dirty_rx)
+ writel(RxOn, dev->base_addr + ChipCmd);
+ }
+
+ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+ int entry = np->dirty_tx % TX_RING_SIZE;
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: Tx entry %d @%p status %8.8x.\n",
+ dev->name, entry, &np->tx_ring[entry],
+ np->tx_ring[entry].cmd_status);
+ if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn))
+ break;
+ if (np->tx_ring[entry].cmd_status & cpu_to_le32(0x08000000)) {
+ if (np->msg_level & NETIF_MSG_TX_DONE)
+ printk(KERN_DEBUG "%s: Transmit done, Tx status %8.8x.\n",
+ dev->name, np->tx_ring[entry].cmd_status);
+ np->stats.tx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+ np->stats.tx_bytes += np->tx_skbuff[entry]->len;
+#endif
+ } else { /* Various Tx errors */
+ int tx_status = le32_to_cpu(np->tx_ring[entry].cmd_status);
+ if (tx_status & 0x04010000) np->stats.tx_aborted_errors++;
+ if (tx_status & 0x02000000) np->stats.tx_fifo_errors++;
+ if (tx_status & 0x01000000) np->stats.tx_carrier_errors++;
+ if (tx_status & 0x00200000) np->stats.tx_window_errors++;
+ if (np->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+ dev->name, tx_status);
+ np->stats.tx_errors++;
+ }
+ /* Free the original skb. */
+ dev_free_skb_irq(np->tx_skbuff[entry]);
+ np->tx_skbuff[entry] = 0;
+ }
+ /* Note the 4 slot hysteresis to mark the queue non-full. */
+ if (np->tx_full
+ && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
+ /* The ring is no longer full, allow new TX entries. */
+ np->tx_full = 0;
+ netif_resume_tx_queue(dev);
+ }
+
+ /* Abnormal error summary/uncommon events handlers. */
+ if (intr_status & IntrAbnormalSummary)
+ netdev_error(dev, intr_status);
+
+ if (--boguscnt < 0) {
+ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "status=0x%4.4x.\n",
+ dev->name, intr_status);
+ np->restore_intr_enable = 1;
+ break;
+ }
+ } while (1);
+
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, (int)readl(ioaddr + IntrStatus));
+
+#if defined(__i386__) && LINUX_VERSION_CODE < 0x020300
+ clear_bit(0, (void*)&dev->interrupt);
+#endif
+ return;
+}
+
+/* This routine is logically part of the interrupt handler, but separated
+ for clarity and better register allocation. */
+static int netdev_rx(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int entry = np->cur_rx % RX_RING_SIZE;
+ int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
+ s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
+
+ /* If the driver owns the next entry it's a new packet. Send it up. */
+ while (desc_status < 0) { /* e.g. & DescOwn */
+ if (np->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " In netdev_rx() entry %d status was %8.8x.\n",
+ entry, desc_status);
+ if (--boguscnt < 0)
+ break;
+ if ((desc_status & (DescMore|DescPktOK|RxTooLong)) != DescPktOK) {
+ if (desc_status & DescMore) {
+ printk(KERN_WARNING "%s: Oversized(?) Ethernet frame spanned "
+ "multiple buffers, entry %#x status %x.\n",
+ dev->name, np->cur_rx, desc_status);
+ np->stats.rx_length_errors++;
+ } else {
+ /* There was a error. */
+ if (np->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
+ desc_status);
+ np->stats.rx_errors++;
+ if (desc_status & 0x06000000) np->stats.rx_over_errors++;
+ if (desc_status & 0x00600000) np->stats.rx_length_errors++;
+ if (desc_status & 0x00140000) np->stats.rx_frame_errors++;
+ if (desc_status & 0x00080000) np->stats.rx_crc_errors++;
+ }
+ } else {
+ struct sk_buff *skb;
+ int pkt_len = (desc_status & 0x0fff) - 4; /* Omit CRC size. */
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < np->rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+#if HAS_IP_COPYSUM
+ eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+#else
+ memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
+ pkt_len);
+#endif
+ } else {
+ skb_put(skb = np->rx_skbuff[entry], pkt_len);
+ np->rx_skbuff[entry] = NULL;
+ }
+#ifndef final_version /* Remove after testing. */
+ /* You will want this info for the initial debug. */
+ if (np->msg_level & NETIF_MSG_PKTDATA)
+ printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
+ "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
+ "%d.%d.%d.%d.\n",
+ skb->data[0], skb->data[1], skb->data[2], skb->data[3],
+ skb->data[4], skb->data[5], skb->data[6], skb->data[7],
+ skb->data[8], skb->data[9], skb->data[10],
+ skb->data[11], skb->data[12], skb->data[13],
+ skb->data[14], skb->data[15], skb->data[16],
+ skb->data[17]);
+#endif
+ skb->protocol = eth_type_trans(skb, dev);
+ /* W/ hardware checksum: skb->ip_summed = CHECKSUM_UNNECESSARY; */
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ np->stats.rx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+ np->stats.rx_bytes += pkt_len;
+#endif
+ }
+ entry = (++np->cur_rx) % RX_RING_SIZE;
+ np->rx_head_desc = &np->rx_ring[entry];
+ desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
+ }
+
+ /* Refill is now done in the main interrupt loop. */
+ return 0;
+}
+
+static void netdev_error(struct net_device *dev, int intr_status)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (intr_status & LinkChange) {
+ int chip_config = readl(ioaddr + ChipConfig);
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_NOTICE "%s: Link changed: Autonegotiation advertising"
+ " %4.4x partner %4.4x.\n", dev->name,
+ (int)readl(ioaddr + 0x90), (int)readl(ioaddr + 0x94));
+ if (chip_config & CfgLinkGood)
+ netif_link_up(dev);
+ else
+ netif_link_down(dev);
+ check_duplex(dev);
+ }
+ if (intr_status & StatsMax) {
+ get_stats(dev);
+ }
+ if (intr_status & IntrTxUnderrun) {
+ /* Increase the Tx threshold, 32 byte units. */
+ if ((np->tx_config & 0x3f) < 62)
+ np->tx_config += 2; /* +64 bytes */
+ writel(np->tx_config, ioaddr + TxConfig);
+ }
+ if (intr_status & WOLPkt) {
+ int wol_status = readl(ioaddr + WOLCmd);
+ printk(KERN_NOTICE "%s: Link wake-up event %8.8x",
+ dev->name, wol_status);
+ }
+ if (intr_status & (RxStatusOverrun | IntrRxOverrun)) {
+ if (np->msg_level & NETIF_MSG_DRV)
+ printk(KERN_ERR "%s: Rx overflow! ns820 %8.8x.\n",
+ dev->name, intr_status);
+ np->stats.rx_fifo_errors++;
+ }
+ if (intr_status & ~(LinkChange|StatsMax|RxResetDone|TxResetDone|
+ RxStatusOverrun|0xA7ff)) {
+ if (np->msg_level & NETIF_MSG_DRV)
+ printk(KERN_ERR "%s: Something Wicked happened! ns820 %8.8x.\n",
+ dev->name, intr_status);
+ }
+ /* Hmmmmm, it's not clear how to recover from PCI faults. */
+ if (intr_status & IntrPCIErr) {
+ np->stats.tx_fifo_errors++;
+ np->stats.rx_fifo_errors++;
+ }
+}
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int crc_errs = readl(ioaddr + RxCRCErrs);
+
+ if (crc_errs != 0xffffffff) {
+ /* We need not lock this segment of code for SMP.
+ There is no atomic-add vulnerability for most CPUs,
+ and statistics are non-critical. */
+ /* The chip only need report frame silently dropped. */
+ np->stats.rx_crc_errors += crc_errs;
+ np->stats.rx_missed_errors += readl(ioaddr + RxMissed);
+ }
+
+ return &np->stats;
+}
+
+/* The little-endian AUTODIN II ethernet CRC calculations.
+ A big-endian version is also available.
+ This is slow but compact code. Do not use this routine for bulk data,
+ use a table-based routine instead.
+ This is common code and should be moved to net/core/crc.c.
+ Chips may use the upper or lower CRC bits, and may reverse and/or invert
+ them. Select the endian-ness that results in minimal calculations.
+*/
+static unsigned const ethernet_polynomial_le = 0xedb88320U;
+static inline unsigned ether_crc_le(int length, unsigned char *data)
+{
+ unsigned int crc = 0xffffffff; /* Initial value. */
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 8; --bit >= 0; current_octet >>= 1) {
+ if ((crc ^ current_octet) & 1) {
+ crc >>= 1;
+ crc ^= ethernet_polynomial_le;
+ } else
+ crc >>= 1;
+ }
+ }
+ return crc;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ u8 mc_filter[64]; /* Multicast hash filter */
+ u32 rx_mode;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+ rx_mode = AcceptBroadcast | AcceptAllMulticast | AcceptAllPhys
+ | AcceptMyPhys;
+ } else if ((dev->mc_count > np->multicast_filter_limit)
+ || (dev->flags & IFF_ALLMULTI)) {
+ rx_mode = AcceptBroadcast | AcceptAllMulticast | AcceptMyPhys;
+ } else {
+ struct dev_mc_list *mclist;
+ int i;
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x7ff,
+ mc_filter);
+ }
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ for (i = 0; i < 64; i += 2) {
+ writel(rx_mode + 0x200 + i, ioaddr + RxFilterAddr);
+ writel((mc_filter[i+1]<<8) + mc_filter[i], ioaddr + RxFilterData);
+ }
+ }
+ writel(rx_mode, ioaddr + RxFilterAddr);
+ np->cur_rx_mode = rx_mode;
+}
+
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ u16 *data = (u16 *)&rq->ifr_data;
+ u32 *data32 = (void *)&rq->ifr_data;
+
+ switch(cmd) {
+ case 0x8947: case 0x89F0:
+ /* SIOCGMIIPHY: Get the address of the PHY in use. */
+ data[0] = 1;
+ /* Fall Through */
+ case 0x8948: case 0x89F1:
+ /* SIOCGMIIREG: Read the specified MII register. */
+ data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
+ return 0;
+ case 0x8949: case 0x89F2:
+ /* SIOCSMIIREG: Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (data[0] == 1) {
+ u16 miireg = data[1] & 0x1f;
+ u16 value = data[2];
+ switch (miireg) {
+ case 0:
+ /* Check for autonegotiation on or reset. */
+ np->duplex_lock = (value & 0x9000) ? 0 : 1;
+ if (np->duplex_lock)
+ np->full_duplex = (value & 0x0100) ? 1 : 0;
+ break;
+ case 4: np->advertising = value; break;
+ }
+ }
+ mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
+ return 0;
+ case SIOCGPARAMS:
+ data32[0] = np->msg_level;
+ data32[1] = np->multicast_filter_limit;
+ data32[2] = np->max_interrupt_work;
+ data32[3] = np->rx_copybreak;
+ return 0;
+ case SIOCSPARAMS:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ np->msg_level = data32[0];
+ np->multicast_filter_limit = data32[1];
+ np->max_interrupt_work = data32[2];
+ np->rx_copybreak = data32[3];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int netdev_close(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+ netif_stop_tx_queue(dev);
+
+ if (np->msg_level & NETIF_MSG_IFDOWN) {
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x "
+ "Int %2.2x.\n",
+ dev->name, (int)readl(ioaddr + ChipCmd),
+ (int)readl(ioaddr + IntrStatus));
+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
+ dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
+ }
+
+ /* We don't want the timer to re-start anything. */
+ del_timer(&np->timer);
+
+ /* Disable interrupts using the mask. */
+ writel(0, ioaddr + IntrMask);
+ writel(0, ioaddr + IntrEnable);
+ writel(2, ioaddr + StatsCtrl); /* Freeze Stats */
+
+ /* Stop the chip's Tx and Rx processes. */
+ writel(RxOff | TxOff, ioaddr + ChipCmd);
+
+ get_stats(dev);
+
+#ifdef __i386__
+ if (np->msg_level & NETIF_MSG_IFDOWN) {
+ printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
+ (int)virt_to_bus(np->tx_ring));
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" #%d desc. %8.8x %8.8x.\n",
+ i, np->tx_ring[i].cmd_status, (u32)np->tx_ring[i].buf_addr);
+ printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
+ (int)virt_to_bus(np->rx_ring));
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ printk(KERN_DEBUG " #%d desc. %8.8x %8.8x\n",
+ i, np->rx_ring[i].cmd_status, (u32)np->rx_ring[i].buf_addr);
+ }
+ }
+#endif /* __i386__ debugging only */
+
+ free_irq(dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].cmd_status = 0;
+ np->rx_ring[i].buf_addr = 0xBADF00D0; /* An invalid address. */
+ if (np->rx_skbuff[i]) {
+#if LINUX_VERSION_CODE < 0x20100
+ np->rx_skbuff[i]->free = 1;
+#endif
+ dev_free_skb(np->rx_skbuff[i]);
+ }
+ np->rx_skbuff[i] = 0;
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (np->tx_skbuff[i])
+ dev_free_skb(np->tx_skbuff[i]);
+ np->tx_skbuff[i] = 0;
+ }
+
+ /* Power down Xcvr. */
+ writel(CfgXcrOff | readl(ioaddr + ChipConfig), ioaddr + ChipConfig);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static int power_event(void *dev_instance, int event)
+{
+ struct net_device *dev = dev_instance;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
+ switch(event) {
+ case DRV_ATTACH:
+ MOD_INC_USE_COUNT;
+ break;
+ case DRV_SUSPEND:
+ /* Disable interrupts, freeze stats, stop Tx and Rx. */
+ writel(0, ioaddr + IntrEnable);
+ writel(2, ioaddr + StatsCtrl);
+ writel(RxOff | TxOff, ioaddr + ChipCmd);
+ writel(CfgXcrOff | readl(ioaddr + ChipConfig), ioaddr + ChipConfig);
+ break;
+ case DRV_RESUME:
+ /* This is incomplete: the open() actions should be repeated. */
+ writel(~CfgXcrOff & readl(ioaddr + ChipConfig), ioaddr + ChipConfig);
+ set_rx_mode(dev);
+ writel(np->intr_enable, ioaddr + IntrEnable);
+ writel(1, ioaddr + IntrEnable);
+ writel(RxOn | TxOn, ioaddr + ChipCmd);
+ break;
+ case DRV_DETACH: {
+ struct net_device **devp, **next;
+ if (dev->flags & IFF_UP) {
+ /* Some, but not all, kernel versions close automatically. */
+ dev_close(dev);
+ dev->flags &= ~(IFF_UP|IFF_RUNNING);
+ }
+ unregister_netdev(dev);
+ release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);
+ for (devp = &root_net_dev; *devp; devp = next) {
+ next = &((struct netdev_private *)(*devp)->priv)->next_module;
+ if (*devp == dev) {
+ *devp = *next;
+ break;
+ }
+ }
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(dev);
+ MOD_DEC_USE_COUNT;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+
+#ifdef MODULE
+int init_module(void)
+{
+ /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+#ifdef CARDBUS
+ register_driver(&etherdev_ops);
+ return 0;
+#else
+ return pci_drv_register(&ns820_drv_id, NULL);
+#endif
+}
+
+void cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+#ifdef CARDBUS
+ unregister_driver(&etherdev_ops);
+#else
+ pci_drv_unregister(&ns820_drv_id);
+#endif
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_net_dev) {
+ struct netdev_private *np = (void *)(root_net_dev->priv);
+ unregister_netdev(root_net_dev);
+ iounmap((char *)root_net_dev->base_addr);
+ next_dev = np->next_module;
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(root_net_dev);
+ root_net_dev = next_dev;
+ }
+}
+
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "make KERNVER=`uname -r` ns820.o"
+ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c ns820.c"
+ * simple-compile-command: "gcc -DMODULE -O6 -c ns820.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/pci-scan.c b/linux/src/drivers/net/pci-scan.c
new file mode 100644
index 0000000..ffb7b12
--- /dev/null
+++ b/linux/src/drivers/net/pci-scan.c
@@ -0,0 +1,659 @@
+/* pci-scan.c: Linux PCI network adapter support code. */
+/*
+ Originally written 1999-2003 by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License (GPL), incorporated herein by
+ reference. Drivers interacting with these functions are derivative
+ works and thus also must be licensed under the GPL and include an explicit
+ GPL notice.
+
+ This code provides common scan and activate functions for PCI network
+ interfaces.
+
+ The author may be reached as becker@scyld.com, or
+ Donald Becker
+ Scyld Computing Corporation
+ 914 Bay Ridge Road, Suite 220
+ Annapolis MD 21403
+
+ Other contributers:
+*/
+static const char version[] =
+"pci-scan.c:v1.12 7/30/2003 Donald Becker <becker@scyld.com>"
+" http://www.scyld.com/linux/drivers.html\n";
+
+/* A few user-configurable values that may be modified when a module. */
+
+static int msg_level = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
+static int min_pci_latency = 32;
+
+#if ! defined(__KERNEL__)
+#define __KERNEL__ 1
+#endif
+#if !defined(__OPTIMIZE__) && /* Mach glue, we think this is ok now: */ 0
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with the proper options, including "-O".
+#endif
+
+#if defined(MODULE) && ! defined(EXPORT_SYMTAB)
+#define EXPORT_SYMTAB
+#endif
+
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#if LINUX_VERSION_CODE < 0x20500 && defined(MODVERSIONS)
+/* Another interface semantics screw-up. */
+#include <linux/module.h>
+#include <linux/modversions.h>
+#else
+#include <linux/module.h>
+#endif
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20300
+/* Bogus change in the middle of a "stable" kernel series.
+ Also, in 2.4.7+ slab must come before interrupt.h to avoid breakage. */
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <asm/io.h>
+#include "pci-scan.h"
+#include "kern_compat.h"
+#if defined(CONFIG_APM) && LINUX_VERSION_CODE < 0x20400
+#include <linux/apm_bios.h>
+#endif
+#ifdef CONFIG_PM
+/* New in 2.4 kernels, pointlessly incompatible with earlier APM. */
+#include <linux/pm.h>
+#endif
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+#if (LINUX_VERSION_CODE < 0x20100)
+#define PCI_CAPABILITY_LIST 0x34 /* Offset of first capability list entry */
+#define PCI_STATUS_CAP_LIST 0x10 /* Support Capability List */
+#define PCI_CAP_ID_PM 0x01 /* Power Management */
+#endif
+
+int (*register_hotswap_hook)(struct drv_id_info *did);
+void (*unregister_hotswap_hook)(struct drv_id_info *did);
+
+#if LINUX_VERSION_CODE > 0x20118 && defined(MODULE)
+MODULE_LICENSE("GPL");
+MODULE_PARM(msg_level, "i");
+MODULE_PARM(min_pci_latency, "i");
+MODULE_PARM_DESC(msg_level, "Enable additional status messages (0-7)");
+MODULE_PARM_DESC(min_pci_latency,
+ "Minimum value for the PCI Latency Timer settings");
+#if defined(EXPORT_SYMTAB)
+EXPORT_SYMBOL_NOVERS(pci_drv_register);
+EXPORT_SYMBOL_NOVERS(pci_drv_unregister);
+EXPORT_SYMBOL_NOVERS(acpi_wake);
+EXPORT_SYMBOL_NOVERS(acpi_set_pwr_state);
+EXPORT_SYMBOL_NOVERS(register_hotswap_hook);
+EXPORT_SYMBOL_NOVERS(unregister_hotswap_hook);
+#endif
+#endif
+
+/* List of registered drivers. */
+static struct drv_id_info *drv_list;
+/* List of detected PCI devices, for APM events. */
+static struct dev_info {
+ struct dev_info *next;
+ void *dev;
+ struct drv_id_info *drv_id;
+ int flags;
+} *dev_list;
+
+/*
+ This code is not intended to support every configuration.
+ It is intended to minimize duplicated code by providing the functions
+ needed in almost every PCI driver.
+
+ The "no kitchen sink" policy:
+ Additional features and code will be added to this module only if more
+ than half of the drivers for common hardware would benefit from the feature.
+*/
+
+/*
+ Ideally we would detect and number all cards of a type (e.g. network) in
+ PCI slot order.
+ But that does not work with hot-swap card, CardBus cards and added drivers.
+ So instead we detect just the each chip table in slot order.
+
+ This routine takes a PCI ID table, scans the PCI bus, and calls the
+ associated attach/probe1 routine with the hardware already activated and
+ single I/O or memory address already mapped.
+
+ This routine will later be supplemented with CardBus and hot-swap PCI
+ support using the same table. Thus the pci_chip_tbl[] should not be
+ marked as __initdata.
+*/
+
+#if LINUX_VERSION_CODE >= 0x20200
+/* Grrrr.. complex abstaction layers with negative benefit. */
+int pci_drv_register(struct drv_id_info *drv_id, void *initial_device)
+{
+ int chip_idx, cards_found = 0;
+ struct pci_dev *pdev = NULL;
+ struct pci_id_info *pci_tbl = drv_id->pci_dev_tbl;
+ struct drv_id_info *drv;
+ void *newdev;
+
+
+ /* Ignore a double-register attempt. */
+ for (drv = drv_list; drv; drv = drv->next)
+ if (drv == drv_id)
+ return -EBUSY;
+
+ while ((pdev = pci_find_class(drv_id->pci_class, pdev)) != 0) {
+ u32 pci_id, pci_subsys_id, pci_class_rev;
+ u16 pci_command, new_command;
+ int pci_flags;
+ long pciaddr; /* Bus address. */
+ long ioaddr; /* Mapped address for this processor. */
+
+ pci_read_config_dword(pdev, PCI_VENDOR_ID, &pci_id);
+ /* Offset 0x2c is PCI_SUBSYSTEM_ID aka PCI_SUBSYSTEM_VENDOR_ID. */
+ pci_read_config_dword(pdev, 0x2c, &pci_subsys_id);
+ pci_read_config_dword(pdev, PCI_REVISION_ID, &pci_class_rev);
+
+ if (msg_level > 3)
+ printk(KERN_DEBUG "PCI ID %8.8x subsystem ID is %8.8x.\n",
+ pci_id, pci_subsys_id);
+ for (chip_idx = 0; pci_tbl[chip_idx].name; chip_idx++) {
+ struct pci_id_info *chip = &pci_tbl[chip_idx];
+ if ((pci_id & chip->id.pci_mask) == chip->id.pci
+ && (pci_subsys_id&chip->id.subsystem_mask) == chip->id.subsystem
+ && (pci_class_rev&chip->id.revision_mask) == chip->id.revision)
+ break;
+ }
+ if (pci_tbl[chip_idx].name == 0) /* Compiled out! */
+ continue;
+
+ pci_flags = pci_tbl[chip_idx].pci_flags;
+#if LINUX_VERSION_CODE >= 0x2030C
+ /* Wow. A oversized, hard-to-use abstraction. Bogus. */
+ pciaddr = pdev->resource[(pci_flags >> 4) & 7].start;
+#else
+ pciaddr = pdev->base_address[(pci_flags >> 4) & 7];
+#if defined(__alpha__) /* Really any machine with 64 bit addressing. */
+ if (pci_flags & PCI_ADDR_64BITS)
+ pciaddr |= ((long)pdev->base_address[((pci_flags>>4)&7)+ 1]) << 32;
+#endif
+#endif
+ if (msg_level > 2)
+ printk(KERN_INFO "Found %s at PCI address %#lx, mapped IRQ %d.\n",
+ pci_tbl[chip_idx].name, pciaddr, pdev->irq);
+
+ if ( ! (pci_flags & PCI_UNUSED_IRQ) &&
+ (pdev->irq == 0 || pdev->irq == 255)) {
+ if (pdev->bus->number == 32) /* Broken CardBus activation. */
+ printk(KERN_WARNING "Resources for CardBus device '%s' have"
+ " not been allocated.\n"
+ KERN_WARNING "Activation has been delayed.\n",
+ pci_tbl[chip_idx].name);
+ else
+ printk(KERN_WARNING "PCI device '%s' was not assigned an "
+ "IRQ.\n"
+ KERN_WARNING "It will not be activated.\n",
+ pci_tbl[chip_idx].name);
+ continue;
+ }
+ if ((pci_flags & PCI_BASE_ADDRESS_SPACE_IO)) {
+ ioaddr = pciaddr & PCI_BASE_ADDRESS_IO_MASK;
+ if (check_region(ioaddr, pci_tbl[chip_idx].io_size))
+ continue;
+ } else if ((ioaddr = (long)ioremap(pciaddr & PCI_BASE_ADDRESS_MEM_MASK,
+ pci_tbl[chip_idx].io_size)) == 0) {
+ printk(KERN_INFO "Failed to map PCI address %#lx for device "
+ "'%s'.\n", pciaddr, pci_tbl[chip_idx].name);
+ continue;
+ }
+ if ( ! (pci_flags & PCI_NO_ACPI_WAKE))
+ acpi_wake(pdev);
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
+ new_command = pci_command | (pci_flags & 7);
+ if (pci_command != new_command) {
+ printk(KERN_INFO " The PCI BIOS has not enabled the"
+ " device at %d/%d! Updating PCI command %4.4x->%4.4x.\n",
+ pdev->bus->number, pdev->devfn, pci_command, new_command);
+ pci_write_config_word(pdev, PCI_COMMAND, new_command);
+ }
+
+ newdev = drv_id->probe1(pdev, initial_device,
+ ioaddr, pdev->irq, chip_idx, cards_found);
+ if (newdev == NULL)
+ continue;
+ initial_device = 0;
+ cards_found++;
+ if (pci_flags & PCI_COMMAND_MASTER) {
+ pci_set_master(pdev);
+ if ( ! (pci_flags & PCI_NO_MIN_LATENCY)) {
+ u8 pci_latency;
+ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
+ if (pci_latency < min_pci_latency) {
+ printk(KERN_INFO " PCI latency timer (CFLT) is "
+ "unreasonably low at %d. Setting to %d clocks.\n",
+ pci_latency, min_pci_latency);
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER,
+ min_pci_latency);
+ }
+ }
+ }
+ {
+ struct dev_info *devp =
+ kmalloc(sizeof(struct dev_info), GFP_KERNEL);
+ if (devp == 0)
+ continue;
+ devp->next = dev_list;
+ devp->dev = newdev;
+ devp->drv_id = drv_id;
+ dev_list = devp;
+ }
+ }
+
+ if (((drv_id->flags & PCI_HOTSWAP)
+ && register_hotswap_hook && (*register_hotswap_hook)(drv_id) == 0)
+ || cards_found) {
+ MOD_INC_USE_COUNT;
+ drv_id->next = drv_list;
+ drv_list = drv_id;
+ return 0;
+ } else
+ return -ENODEV;
+}
+#else
+int pci_drv_register(struct drv_id_info *drv_id, void *initial_device)
+{
+ int pci_index, cards_found = 0;
+ unsigned char pci_bus, pci_device_fn;
+ struct pci_dev *pdev;
+ struct pci_id_info *pci_tbl = drv_id->pci_dev_tbl;
+ void *newdev;
+
+ if ( ! pcibios_present())
+ return -ENODEV;
+
+ for (pci_index = 0; pci_index < 0xff; pci_index++) {
+ u32 pci_id, subsys_id, pci_class_rev;
+ u16 pci_command, new_command;
+ int chip_idx, irq, pci_flags;
+ long pciaddr;
+ long ioaddr;
+ u32 pci_busaddr;
+ u8 pci_irq_line;
+
+ if (pcibios_find_class (drv_id->pci_class, pci_index,
+ &pci_bus, &pci_device_fn)
+ != PCIBIOS_SUCCESSFUL)
+ break;
+ pcibios_read_config_dword(pci_bus, pci_device_fn,
+ PCI_VENDOR_ID, &pci_id);
+ /* Offset 0x2c is PCI_SUBSYSTEM_ID aka PCI_SUBSYSTEM_VENDOR_ID. */
+ pcibios_read_config_dword(pci_bus, pci_device_fn, 0x2c, &subsys_id);
+ pcibios_read_config_dword(pci_bus, pci_device_fn,
+ PCI_REVISION_ID, &pci_class_rev);
+
+ for (chip_idx = 0; pci_tbl[chip_idx].name; chip_idx++) {
+ struct pci_id_info *chip = &pci_tbl[chip_idx];
+ if ((pci_id & chip->id.pci_mask) == chip->id.pci
+ && (subsys_id & chip->id.subsystem_mask) == chip->id.subsystem
+ && (pci_class_rev&chip->id.revision_mask) == chip->id.revision)
+ break;
+ }
+ if (pci_tbl[chip_idx].name == 0) /* Compiled out! */
+ continue;
+
+ pci_flags = pci_tbl[chip_idx].pci_flags;
+ pdev = pci_find_slot(pci_bus, pci_device_fn);
+ pcibios_read_config_byte(pci_bus, pci_device_fn,
+ PCI_INTERRUPT_LINE, &pci_irq_line);
+ irq = pci_irq_line;
+ pcibios_read_config_dword(pci_bus, pci_device_fn,
+ ((pci_flags >> 2) & 0x1C) + 0x10,
+ &pci_busaddr);
+ pciaddr = pci_busaddr;
+#if defined(__alpha__)
+ if (pci_flags & PCI_ADDR_64BITS) {
+ pcibios_read_config_dword(pci_bus, pci_device_fn,
+ ((pci_flags >> 2) & 0x1C) + 0x14,
+ &pci_busaddr);
+ pciaddr |= ((long)pci_busaddr)<<32;
+ }
+#endif
+
+ if (msg_level > 2)
+ printk(KERN_INFO "Found %s at PCI address %#lx, IRQ %d.\n",
+ pci_tbl[chip_idx].name, pciaddr, irq);
+
+ if ( ! (pci_flags & PCI_UNUSED_IRQ) &&
+ (irq == 0 || irq >= 16)) {
+ if (pci_bus == 32) /* Broken CardBus activation. */
+ printk(KERN_WARNING "Resources for CardBus device '%s' have"
+ " not been allocated.\n"
+ KERN_WARNING "It will not be activated.\n",
+ pci_tbl[chip_idx].name);
+ else
+ printk(KERN_WARNING "PCI device '%s' was not assigned an "
+ "IRQ.\n"
+ KERN_WARNING "It will not be activated.\n",
+ pci_tbl[chip_idx].name);
+ continue;
+ }
+
+ if ((pciaddr & PCI_BASE_ADDRESS_SPACE_IO)) {
+ ioaddr = pciaddr & PCI_BASE_ADDRESS_IO_MASK;
+ if (check_region(ioaddr, pci_tbl[chip_idx].io_size))
+ continue;
+ } else if ((ioaddr = (long)ioremap(pciaddr & PCI_BASE_ADDRESS_MEM_MASK,
+ pci_tbl[chip_idx].io_size)) == 0) {
+ printk(KERN_INFO "Failed to map PCI address %#lx.\n",
+ pciaddr);
+ continue;
+ }
+
+ if ( ! (pci_flags & PCI_NO_ACPI_WAKE))
+ acpi_wake(pdev);
+ pcibios_read_config_word(pci_bus, pci_device_fn,
+ PCI_COMMAND, &pci_command);
+ new_command = pci_command | (pci_flags & 7);
+ if (pci_command != new_command) {
+ printk(KERN_INFO " The PCI BIOS has not enabled the"
+ " device at %d/%d! Updating PCI command %4.4x->%4.4x.\n",
+ pci_bus, pci_device_fn, pci_command, new_command);
+ pcibios_write_config_word(pci_bus, pci_device_fn,
+ PCI_COMMAND, new_command);
+ }
+
+ newdev = drv_id->probe1(pdev, initial_device,
+ ioaddr, irq, chip_idx, cards_found);
+
+ if (newdev && (pci_flags & PCI_COMMAND_MASTER) &&
+ ! (pci_flags & PCI_NO_MIN_LATENCY)) {
+ u8 pci_latency;
+ pcibios_read_config_byte(pci_bus, pci_device_fn,
+ PCI_LATENCY_TIMER, &pci_latency);
+ if (pci_latency < min_pci_latency) {
+ printk(KERN_INFO " PCI latency timer (CFLT) is "
+ "unreasonably low at %d. Setting to %d clocks.\n",
+ pci_latency, min_pci_latency);
+ pcibios_write_config_byte(pci_bus, pci_device_fn,
+ PCI_LATENCY_TIMER, min_pci_latency);
+ }
+ }
+ if (newdev) {
+ struct dev_info *devp =
+ kmalloc(sizeof(struct dev_info), GFP_KERNEL);
+ if (devp) {
+ devp->next = dev_list;
+ devp->dev = newdev;
+ devp->drv_id = drv_id;
+ dev_list = devp;
+ }
+ }
+ initial_device = 0;
+ cards_found++;
+ }
+
+ if (((drv_id->flags & PCI_HOTSWAP)
+ && register_hotswap_hook && (*register_hotswap_hook)(drv_id) == 0)
+ || cards_found) {
+ MOD_INC_USE_COUNT;
+ drv_id->next = drv_list;
+ drv_list = drv_id;
+ return 0;
+ } else
+ return cards_found ? 0 : -ENODEV;
+}
+#endif
+
+void pci_drv_unregister(struct drv_id_info *drv_id)
+{
+ struct drv_id_info **drvp;
+ struct dev_info **devip = &dev_list;
+
+ if (unregister_hotswap_hook)
+ (*unregister_hotswap_hook)(drv_id);
+
+ for (drvp = &drv_list; *drvp; drvp = &(*drvp)->next)
+ if (*drvp == drv_id) {
+ *drvp = (*drvp)->next;
+ MOD_DEC_USE_COUNT;
+ break;
+ }
+ while (*devip) {
+ struct dev_info *thisdevi = *devip;
+ if (thisdevi->drv_id == drv_id) {
+ *devip = thisdevi->next;
+ kfree(thisdevi);
+ } else
+ devip = &(*devip)->next;
+ }
+
+ return;
+}
+
+#if LINUX_VERSION_CODE < 0x20400
+/*
+ Search PCI configuration space for the specified capability registers.
+ Return the index, or 0 on failure.
+ The 2.4 kernel now includes this function.
+*/
+int pci_find_capability(struct pci_dev *pdev, int findtype)
+{
+ u16 pci_status, cap_type;
+ u8 pci_cap_idx;
+ int cap_idx;
+
+ pci_read_config_word(pdev, PCI_STATUS, &pci_status);
+ if ( ! (pci_status & PCI_STATUS_CAP_LIST))
+ return 0;
+ pci_read_config_byte(pdev, PCI_CAPABILITY_LIST, &pci_cap_idx);
+ cap_idx = pci_cap_idx;
+ for (cap_idx = pci_cap_idx; cap_idx; cap_idx = (cap_type >> 8) & 0xff) {
+ pci_read_config_word(pdev, cap_idx, &cap_type);
+ if ((cap_type & 0xff) == findtype)
+ return cap_idx;
+ }
+ return 0;
+}
+#endif
+
+/* Change a device from D3 (sleep) to D0 (active).
+ Return the old power state.
+ This is more complicated than you might first expect since most cards
+ forget all PCI config info during the transition! */
+int acpi_wake(struct pci_dev *pdev)
+{
+ u32 base[5], romaddr;
+ u16 pci_command, pwr_command;
+ u8 pci_latency, pci_cacheline, irq;
+ int i, pwr_cmd_idx = pci_find_capability(pdev, PCI_CAP_ID_PM);
+
+ if (pwr_cmd_idx == 0)
+ return 0;
+ pci_read_config_word(pdev, pwr_cmd_idx + 4, &pwr_command);
+ if ((pwr_command & 3) == 0)
+ return 0;
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
+ for (i = 0; i < 5; i++)
+ pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0 + i*4,
+ &base[i]);
+ pci_read_config_dword(pdev, PCI_ROM_ADDRESS, &romaddr);
+ pci_read_config_byte( pdev, PCI_LATENCY_TIMER, &pci_latency);
+ pci_read_config_byte( pdev, PCI_CACHE_LINE_SIZE, &pci_cacheline);
+ pci_read_config_byte( pdev, PCI_INTERRUPT_LINE, &irq);
+
+ pci_write_config_word(pdev, pwr_cmd_idx + 4, 0x0000);
+ for (i = 0; i < 5; i++)
+ if (base[i])
+ pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0 + i*4,
+ base[i]);
+ pci_write_config_dword(pdev, PCI_ROM_ADDRESS, romaddr);
+ pci_write_config_byte( pdev, PCI_INTERRUPT_LINE, irq);
+ pci_write_config_byte( pdev, PCI_CACHE_LINE_SIZE, pci_cacheline);
+ pci_write_config_byte( pdev, PCI_LATENCY_TIMER, pci_latency);
+ pci_write_config_word( pdev, PCI_COMMAND, pci_command | 5);
+ return pwr_command & 3;
+}
+
+int acpi_set_pwr_state(struct pci_dev *pdev, enum acpi_pwr_state new_state)
+{
+ u16 pwr_command;
+ int pwr_cmd_idx = pci_find_capability(pdev, PCI_CAP_ID_PM);
+
+ if (pwr_cmd_idx == 0)
+ return 0;
+ pci_read_config_word(pdev, pwr_cmd_idx + 4, &pwr_command);
+ if ((pwr_command & 3) == ACPI_D3 && new_state != ACPI_D3)
+ acpi_wake(pdev); /* The complicated sequence. */
+ pci_write_config_word(pdev, pwr_cmd_idx + 4,
+ (pwr_command & ~3) | new_state);
+ return pwr_command & 3;
+}
+
+#if defined(CONFIG_PM)
+static int handle_pm_event(struct pm_dev *dev, int event, void *data)
+{
+ static int down = 0;
+ struct dev_info *devi;
+ int pwr_cmd = -1;
+
+ if (msg_level > 1)
+ printk(KERN_DEBUG "pci-scan: Handling power event %d for driver "
+ "list %s...\n",
+ event, drv_list->name);
+ switch (event) {
+ case PM_SUSPEND:
+ if (down) {
+ printk(KERN_DEBUG "pci-scan: Received extra suspend event\n");
+ break;
+ }
+ down = 1;
+ for (devi = dev_list; devi; devi = devi->next)
+ if (devi->drv_id->pwr_event)
+ devi->drv_id->pwr_event(devi->dev, DRV_SUSPEND);
+ break;
+ case PM_RESUME:
+ if (!down) {
+ printk(KERN_DEBUG "pci-scan: Received bogus resume event\n");
+ break;
+ }
+ for (devi = dev_list; devi; devi = devi->next) {
+ if (devi->drv_id->pwr_event) {
+ if (msg_level > 3)
+ printk(KERN_DEBUG "pci-scan: Calling resume for %s "
+ "device.\n", devi->drv_id->name);
+ devi->drv_id->pwr_event(devi->dev, DRV_RESUME);
+ }
+ }
+ down = 0;
+ break;
+ case PM_SET_WAKEUP: pwr_cmd = DRV_PWR_WakeOn; break;
+ case PM_EJECT: pwr_cmd = DRV_DETACH; break;
+ default:
+ printk(KERN_DEBUG "pci-scan: Unknown power management event %d.\n",
+ event);
+ }
+ if (pwr_cmd >= 0)
+ for (devi = dev_list; devi; devi = devi->next)
+ if (devi->drv_id->pwr_event)
+ devi->drv_id->pwr_event(devi->dev, pwr_cmd);
+
+ return 0;
+}
+
+#elif defined(CONFIG_APM) && LINUX_VERSION_CODE < 0x20400
+static int handle_apm_event(apm_event_t event)
+{
+ static int down = 0;
+ struct dev_info *devi;
+
+ if (msg_level > 1)
+ printk(KERN_DEBUG "pci-scan: Handling APM event %d for driver "
+ "list %s...\n",
+ event, drv_list->name);
+ return 0;
+ switch (event) {
+ case APM_SYS_SUSPEND:
+ case APM_USER_SUSPEND:
+ if (down) {
+ printk(KERN_DEBUG "pci-scan: Received extra suspend event\n");
+ break;
+ }
+ down = 1;
+ for (devi = dev_list; devi; devi = devi->next)
+ if (devi->drv_id->pwr_event)
+ devi->drv_id->pwr_event(devi->dev, DRV_SUSPEND);
+ break;
+ case APM_NORMAL_RESUME:
+ case APM_CRITICAL_RESUME:
+ if (!down) {
+ printk(KERN_DEBUG "pci-scan: Received bogus resume event\n");
+ break;
+ }
+ for (devi = dev_list; devi; devi = devi->next)
+ if (devi->drv_id->pwr_event)
+ devi->drv_id->pwr_event(devi->dev, DRV_RESUME);
+ down = 0;
+ break;
+ }
+ return 0;
+}
+#endif /* CONFIG_APM */
+
+#ifdef MODULE
+int init_module(void)
+{
+ if (msg_level) /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s", version);
+
+#if defined(CONFIG_PM)
+ pm_register(PM_PCI_DEV, 0, &handle_pm_event);
+#elif defined(CONFIG_APM) && LINUX_VERSION_CODE < 0x20400
+ apm_register_callback(&handle_apm_event);
+#endif
+ return 0;
+}
+void cleanup_module(void)
+{
+#if defined(CONFIG_PM)
+ pm_unregister_all(&handle_pm_event);
+#elif defined(CONFIG_APM) && LINUX_VERSION_CODE < 0x20400
+ apm_unregister_callback(&handle_apm_event);
+#endif
+ if (dev_list != NULL)
+ printk(KERN_WARNING "pci-scan: Unfreed device references.\n");
+ return;
+}
+#endif
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODULE -D__KERNEL__ -DEXPORT_SYMTAB -Wall -Wstrict-prototypes -O6 -c pci-scan.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/pci-scan.h b/linux/src/drivers/net/pci-scan.h
new file mode 100644
index 0000000..649b34b
--- /dev/null
+++ b/linux/src/drivers/net/pci-scan.h
@@ -0,0 +1,90 @@
+#ifndef _PCI_SCAN_H
+#define _PCI_SCAN_H
+/*
+ version 1.02 $Version:$ $Date: 2006/01/22 15:54:41 $
+ Copyright 1999-2001 Donald Becker / Scyld Computing Corporation
+ This software is part of the Linux kernel. It may be used and
+ distributed according to the terms of the GNU Public License,
+ incorporated herein by reference.
+*/
+
+/*
+ These are the structures in the table that drives the PCI probe routines.
+ Note the matching code uses a bitmask: more specific table entries should
+ be placed before "catch-all" entries.
+
+ The table must be zero terminated.
+*/
+enum pci_id_flags_bits {
+ /* Set PCI command register bits before calling probe1(). */
+ PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
+ /* Read and map the single following PCI BAR. */
+ PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
+ PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
+ PCI_UNUSED_IRQ=0x800,
+};
+
+struct pci_id_info {
+ const char *name;
+ struct match_info {
+ int pci, pci_mask, subsystem, subsystem_mask;
+ int revision, revision_mask; /* Only 8 bits. */
+ } id;
+ enum pci_id_flags_bits pci_flags;
+ int io_size; /* Needed for I/O region check or ioremap(). */
+ int drv_flags; /* Driver use, intended as capability flags. */
+};
+
+enum drv_id_flags {
+ PCI_HOTSWAP=1, /* Leave module loaded for Cardbus-like chips. */
+};
+enum drv_pwr_action {
+ DRV_NOOP, /* No action. */
+ DRV_ATTACH, /* The driver may expect power ops. */
+ DRV_SUSPEND, /* Machine suspending, next event RESUME or DETACH. */
+ DRV_RESUME, /* Resume from previous SUSPEND */
+ DRV_DETACH, /* Card will-be/is gone. Valid from SUSPEND! */
+ DRV_PWR_WakeOn, /* Put device in e.g. Wake-On-LAN mode. */
+ DRV_PWR_DOWN, /* Go to lowest power mode. */
+ DRV_PWR_UP, /* Go to normal power mode. */
+};
+
+struct drv_id_info {
+ const char *name; /* Single-word driver name. */
+ int flags;
+ int pci_class; /* Typically PCI_CLASS_NETWORK_ETHERNET<<8. */
+ struct pci_id_info *pci_dev_tbl;
+ void *(*probe1)(struct pci_dev *pdev, void *dev_ptr,
+ long ioaddr, int irq, int table_idx, int fnd_cnt);
+ /* Optional, called for suspend, resume and detach. */
+ int (*pwr_event)(void *dev, int event);
+ /* Internal values. */
+ struct drv_id_info *next;
+ void *cb_ops;
+};
+
+/* PCI scan and activate.
+ Scan PCI-like hardware, calling probe1(..,dev,..) on devices that match.
+ Returns -ENODEV, a negative number, if no cards are found. */
+
+extern int pci_drv_register(struct drv_id_info *drv_id, void *initial_device);
+extern void pci_drv_unregister(struct drv_id_info *drv_id);
+
+
+/* ACPI routines.
+ Wake (change to ACPI D0 state) or set the ACPI power level of a sleeping
+ ACPI device. Returns the old power state. */
+
+int acpi_wake(struct pci_dev *pdev);
+enum acpi_pwr_state {ACPI_D0, ACPI_D1, ACPI_D2, ACPI_D3};
+int acpi_set_pwr_state(struct pci_dev *pdev, enum acpi_pwr_state state);
+
+
+/*
+ * Local variables:
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
+#endif
diff --git a/linux/src/drivers/net/pcnet32.c b/linux/src/drivers/net/pcnet32.c
new file mode 100644
index 0000000..da0e870
--- /dev/null
+++ b/linux/src/drivers/net/pcnet32.c
@@ -0,0 +1,970 @@
+/* pcnet32.c: An AMD PCnet32 ethernet driver for linux. */
+/*
+ * Copyright 1996,97 Thomas Bogendoerfer, 1993-1995,1998 Donald Becker
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency.
+ *
+ * Derived from the lance driver written 1993-1995 by Donald Becker.
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU Public License, incorporated herein by reference.
+ *
+ * This driver is for AMD PCnet-PCI based ethercards
+ */
+
+static const char *version = "pcnet32.c:v0.99B 4/4/98 DJBecker/TSBogend.\n";
+
+/* A few user-configurable values. */
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/*
+ * Set the number of Tx and Rx buffers, using Log_2(# buffers).
+ * Reasonable default values are 4 Tx buffers, and 16 Rx buffers.
+ * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4).
+ */
+#define PCNET_LOG_TX_BUFFERS 4
+#define PCNET_LOG_RX_BUFFERS 4
+
+#ifdef MODULE
+#ifdef MODVERSIONS
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+#include <linux/version.h>
+#else
+#define MOD_INC_USE_COUNT
+#define MOD_DEC_USE_COUNT
+#endif
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/bios32.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+/* Driver verbosity level. 0 = no messages, 7 = wordy death.
+ Modify here, or when loading as a module. */
+static int pcnet32_debug = 1;
+
+/*
+ * Theory of Operation
+ *
+ * This driver uses the same software structure as the normal lance
+ * driver. So look for a verbose description in lance.c. The differences
+ * to the normal lance driver is the use of the 32bit mode of PCnet32
+ * and PCnetPCI chips. Because these chips are 32bit chips, there is no
+ * 16MB limitation and we don't need bounce buffers.
+ */
+
+/*
+ * History:
+ * v0.01: Initial version
+ * only tested on Alpha Noname Board
+ * v0.02: changed IRQ handling for new interrupt scheme (dev_id)
+ * tested on a ASUS SP3G
+ * v0.10: fixed an odd problem with the 79C794 in a Compaq Deskpro XL
+ * looks like the 974 doesn't like stopping and restarting in a
+ * short period of time; now we do a reinit of the lance; the
+ * bug was triggered by doing ifconfig eth0 <ip> broadcast <addr>
+ * and hangs the machine (thanks to Klaus Liedl for debugging)
+ * v0.12: by suggestion from Donald Becker: Renamed driver to pcnet32,
+ * made it standalone (no need for lance.c)
+ * v0.13: added additional PCI detecting for special PCI devices (Compaq)
+ * v0.14: stripped down additional PCI probe (thanks to David C Niemi
+ * and sveneric@xs4all.nl for testing this on their Compaq boxes)
+ * v0.15: added 79C965 (VLB) probe
+ * added interrupt sharing for PCI chips
+ * v0.16: fixed set_multicast_list on Alpha machines
+ * v0.17: removed hack from dev.c; now pcnet32 uses ethif_probe in Space.c
+ * v0.19: changed setting of autoselect bit
+ * v0.20: removed additional Compaq PCI probe; there is now a working one
+ * in arch/i386/bios32.c
+ * v0.21: added endian conversion for ppc, from work by cort@cs.nmt.edu
+ * v0.22: added printing of status to ring dump
+ * v0.23: changed enet_statistics to net_devive_stats
+ * v0.99: Changes for 2.0.34 final release. -djb
+ */
+
+
+#ifndef __powerpc__
+#define le16_to_cpu(val) (val)
+#define le32_to_cpu(val) (val)
+#endif
+#if (LINUX_VERSION_CODE < 0x20123)
+//#define test_and_set_bit(val, addr) set_bit(val, addr)
+#endif
+
+#define TX_RING_SIZE (1 << (PCNET_LOG_TX_BUFFERS))
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+#define TX_RING_LEN_BITS ((PCNET_LOG_TX_BUFFERS) << 12)
+
+#define RX_RING_SIZE (1 << (PCNET_LOG_RX_BUFFERS))
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+#define RX_RING_LEN_BITS ((PCNET_LOG_RX_BUFFERS) << 4)
+
+#define PKT_BUF_SZ 1544
+
+/* Offsets from base I/O address. */
+enum pcnet_offsets { PCNET32_DATA=0x10, PCNET32_ADDR=0x12, PCNET32_RESET=0x14,
+ PCNET32_BUS_IF=0x16,};
+#define PCNET32_TOTAL_SIZE 0x20
+
+/* The PCNET32 Rx and Tx ring descriptors. */
+struct pcnet32_rx_head {
+ u32 base;
+ s16 buf_length;
+ s16 status;
+ u32 msg_length;
+ u32 reserved;
+};
+
+struct pcnet32_tx_head {
+ u32 base;
+ s16 length;
+ s16 status;
+ u32 misc;
+ u32 reserved;
+};
+
+/* The PCNET32 32-Bit initialization block, described in databook. */
+struct pcnet32_init_block {
+ u16 mode;
+ u16 tlen_rlen;
+ u8 phys_addr[6];
+ u16 reserved;
+ u32 filter[2];
+ /* Receive and transmit ring base, along with extra bits. */
+ u32 rx_ring;
+ u32 tx_ring;
+};
+
+struct pcnet32_private {
+ /* The Tx and Rx ring entries must be aligned on 16-byte boundaries
+ in 32bit mode. */
+ struct pcnet32_rx_head rx_ring[RX_RING_SIZE];
+ struct pcnet32_tx_head tx_ring[TX_RING_SIZE];
+ struct pcnet32_init_block init_block;
+ const char *name;
+ struct device *next_module;
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ unsigned long rx_buffs; /* Address of Rx and Tx buffers. */
+ int cur_rx, cur_tx; /* The next free ring entry */
+ int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
+ struct enet_statistics stats;
+ char tx_full;
+ unsigned long lock;
+};
+
+static struct pcnet_chip_type {
+ int id_number;
+ const char *name;
+ int flags;
+} chip_table[] = {
+ {0x2420, "PCnet/PCI 79C970", 0},
+ {0x2430, "PCnet32", 0},
+ {0x2621, "PCnet/PCI II 79C970A", 0},
+ {0x2623, "PCnet/FAST 79C971", 0},
+ {0x2624, "PCnet/FAST+ 79C972", 0},
+ {0x0, "PCnet32 (unknown)", 0},
+};
+
+/* Index of functions. */
+int pcnet32_probe(struct device *dev);
+static int pcnet32_probe1(struct device *dev, unsigned int ioaddr, unsigned char irq_line);
+static int pcnet32_open(struct device *dev);
+static void pcnet32_init_ring(struct device *dev);
+static int pcnet32_start_xmit(struct sk_buff *skb, struct device *dev);
+static int pcnet32_rx(struct device *dev);
+static void pcnet32_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static int pcnet32_close(struct device *dev);
+static struct enet_statistics *pcnet32_get_stats(struct device *dev);
+static void pcnet32_set_multicast_list(struct device *dev);
+
+
+/* A list of all installed PCnet32 devices, for removing the driver module. */
+static struct device *root_pcnet32_dev = NULL;
+
+int pcnet32_probe (struct device *dev)
+{
+ static int pci_index = 0; /* Static, for multiple probe calls. */
+ int cards_found = 0;
+
+ if ( ! pcibios_present())
+ return ENODEV;
+
+ for (;pci_index < 0xff; pci_index++) {
+ u8 irq_line;
+ u16 pci_command, new_command;
+ unsigned char pci_bus, pci_device_fn;
+ u32 pci_ioaddr;
+
+ if (pcibios_find_device (PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE,
+ pci_index, &pci_bus, &pci_device_fn)
+ != PCIBIOS_SUCCESSFUL)
+ break;
+
+ pcibios_read_config_byte(pci_bus, pci_device_fn,
+ PCI_INTERRUPT_LINE, &irq_line);
+ pcibios_read_config_dword(pci_bus, pci_device_fn,
+ PCI_BASE_ADDRESS_0, &pci_ioaddr);
+ /* Remove I/O space marker in bit 0. */
+ pci_ioaddr &= ~3;
+
+ /* Avoid already found cards from previous pcnet32_probe() calls */
+ if (check_region(pci_ioaddr, PCNET32_TOTAL_SIZE))
+ continue;
+
+ /* Activate the card: fix for brain-damaged Win98 BIOSes. */
+ pcibios_read_config_word(pci_bus, pci_device_fn,
+ PCI_COMMAND, &pci_command);
+ new_command = pci_command | PCI_COMMAND_MASTER|PCI_COMMAND_IO;
+ if (pci_command != new_command) {
+ printk(KERN_INFO " The PCI BIOS has not enabled the AMD Ethernet"
+ " device at %2x-%2x."
+ " Updating PCI command %4.4x->%4.4x.\n",
+ pci_bus, pci_device_fn, pci_command, new_command);
+ pcibios_write_config_word(pci_bus, pci_device_fn,
+ PCI_COMMAND, new_command);
+ }
+
+ if (pcnet32_probe1(dev, pci_ioaddr, irq_line) != 0) {
+ /* Should never happen. */
+ printk(KERN_ERR "pcnet32.c: Probe of PCI card at %#x failed.\n",
+ pci_ioaddr);
+ } else
+ dev = 0;
+ cards_found++;
+ }
+
+ return cards_found ? 0 : -ENODEV;
+}
+
+
+/* pcnet32_probe1 */
+static int pcnet32_probe1(struct device *dev, unsigned int ioaddr, unsigned char irq_line)
+{
+ struct pcnet32_private *lp;
+ int i;
+ const char *chipname;
+
+ /* check if there is really a pcnet chip on that ioaddr */
+ if ((inb(ioaddr + 14) != 0x57) || (inb(ioaddr + 15) != 0x57))
+ return ENODEV;
+
+ inw(ioaddr+PCNET32_RESET); /* Reset the PCNET32 */
+
+ outw(0x0000, ioaddr+PCNET32_ADDR); /* Switch to window 0 */
+ if (inw(ioaddr+PCNET32_DATA) != 0x0004)
+ return ENODEV;
+
+ /* Get the version of the chip. */
+ outw(88, ioaddr+PCNET32_ADDR);
+ if (inw(ioaddr+PCNET32_ADDR) != 88) {
+ /* should never happen */
+ return ENODEV;
+ } else { /* Good, it's a newer chip. */
+ int chip_version = inw(ioaddr+PCNET32_DATA);
+ outw(89, ioaddr+PCNET32_ADDR);
+ chip_version |= inw(ioaddr+PCNET32_DATA) << 16;
+ if (pcnet32_debug > 2)
+ printk(" PCnet chip version is %#x.\n", chip_version);
+ if ((chip_version & 0xfff) != 0x003)
+ return ENODEV;
+ chip_version = (chip_version >> 12) & 0xffff;
+ for (i = 0; chip_table[i].id_number; i++)
+ if (chip_table[i].id_number == chip_version)
+ break;
+ chipname = chip_table[i].name;
+ }
+
+ dev = init_etherdev(dev, 0);
+
+ printk("%s: %s at %#3x,", dev->name, chipname, ioaddr);
+
+ /* There is a 16 byte station address PROM at the base address.
+ The first six bytes are the station address. */
+ for (i = 0; i < 6; i++)
+ printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i));
+
+ printk("\n");
+
+ dev->base_addr = ioaddr;
+ request_region(ioaddr, PCNET32_TOTAL_SIZE, dev->name);
+
+ /* Data structures used by the PCnet32 are 16byte aligned and DMAble. */
+ lp = (struct pcnet32_private *)
+ (((unsigned long)kmalloc(sizeof(*lp)+15, GFP_DMA | GFP_KERNEL)+15) & ~15);
+
+ memset(lp, 0, sizeof(*lp));
+ dev->priv = lp;
+
+ lp->next_module = root_pcnet32_dev;
+ root_pcnet32_dev = dev;
+
+ lp->name = chipname;
+ lp->rx_buffs = (unsigned long) kmalloc(PKT_BUF_SZ*RX_RING_SIZE, GFP_DMA | GFP_KERNEL);
+
+ lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */
+ lp->init_block.tlen_rlen = le16_to_cpu(TX_RING_LEN_BITS | RX_RING_LEN_BITS);
+ for (i = 0; i < 6; i++)
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ lp->init_block.filter[0] = 0x00000000;
+ lp->init_block.filter[1] = 0x00000000;
+ lp->init_block.rx_ring = (u32)le32_to_cpu(virt_to_bus(lp->rx_ring));
+ lp->init_block.tx_ring = (u32)le32_to_cpu(virt_to_bus(lp->tx_ring));
+
+ /* switch pcnet32 to 32bit mode */
+ outw(0x0014, ioaddr+PCNET32_ADDR);
+ outw(0x0002, ioaddr+PCNET32_BUS_IF);
+
+ outw(0x0001, ioaddr+PCNET32_ADDR);
+ inw(ioaddr+PCNET32_ADDR);
+ outw(virt_to_bus(&lp->init_block) & 0xffff, ioaddr+PCNET32_DATA);
+ outw(0x0002, ioaddr+PCNET32_ADDR);
+ inw(ioaddr+PCNET32_ADDR);
+ outw(virt_to_bus(&lp->init_block) >> 16, ioaddr+PCNET32_DATA);
+ outw(0x0000, ioaddr+PCNET32_ADDR);
+ inw(ioaddr+PCNET32_ADDR);
+
+ dev->irq = irq_line;
+
+ if (pcnet32_debug > 0)
+ printk("%s", version);
+
+ /* The PCNET32-specific entries in the device structure. */
+ dev->open = &pcnet32_open;
+ dev->hard_start_xmit = &pcnet32_start_xmit;
+ dev->stop = &pcnet32_close;
+ dev->get_stats = &pcnet32_get_stats;
+ dev->set_multicast_list = &pcnet32_set_multicast_list;
+
+ /* Fill in the generic fields of the device structure. */
+ ether_setup(dev);
+ return 0;
+}
+
+
+static int
+pcnet32_open(struct device *dev)
+{
+ struct pcnet32_private *lp = (struct pcnet32_private *)dev->priv;
+ unsigned int ioaddr = dev->base_addr;
+ int i;
+
+ if (dev->irq == 0 ||
+ request_irq(dev->irq, &pcnet32_interrupt, SA_SHIRQ,
+ dev->name, (void *)dev)) {
+ return -EAGAIN;
+ }
+ MOD_INC_USE_COUNT;
+
+ /* Reset the PCNET32 */
+ inw(ioaddr+PCNET32_RESET);
+
+ /* switch pcnet32 to 32bit mode */
+ outw(0x0014, ioaddr+PCNET32_ADDR);
+ outw(0x0002, ioaddr+PCNET32_BUS_IF);
+
+ /* Turn on auto-select of media (AUI, BNC). */
+ outw(0x0002, ioaddr+PCNET32_ADDR);
+ /* only touch autoselect bit */
+ outw(inw(ioaddr+PCNET32_BUS_IF) | 0x0002, ioaddr+PCNET32_BUS_IF);
+
+ if (pcnet32_debug > 1)
+ printk("%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n",
+ dev->name, dev->irq,
+ (u32) virt_to_bus(lp->tx_ring),
+ (u32) virt_to_bus(lp->rx_ring),
+ (u32) virt_to_bus(&lp->init_block));
+
+ /* check for ATLAS T1/E1 LAW card */
+ if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 && dev->dev_addr[2] == 0x75) {
+ /* select GPSI mode */
+ lp->init_block.mode = 0x0100;
+ outw(0x0002, ioaddr+PCNET32_ADDR);
+ outw(inw(ioaddr+PCNET32_BUS_IF) & ~2, ioaddr+PCNET32_BUS_IF);
+ /* switch full duplex on */
+ outw(0x0009, ioaddr+PCNET32_ADDR);
+ outw(inw(ioaddr+PCNET32_BUS_IF) | 1, ioaddr+PCNET32_BUS_IF);
+ } else
+ lp->init_block.mode = 0x0000;
+ lp->init_block.filter[0] = 0x00000000;
+ lp->init_block.filter[1] = 0x00000000;
+ pcnet32_init_ring(dev);
+
+ /* Re-initialize the PCNET32, and start it when done. */
+ outw(0x0001, ioaddr+PCNET32_ADDR);
+ outw(virt_to_bus(&lp->init_block) &0xffff, ioaddr+PCNET32_DATA);
+ outw(0x0002, ioaddr+PCNET32_ADDR);
+ outw(virt_to_bus(&lp->init_block) >> 16, ioaddr+PCNET32_DATA);
+
+ outw(0x0004, ioaddr+PCNET32_ADDR);
+ outw(0x0915, ioaddr+PCNET32_DATA);
+
+ outw(0x0000, ioaddr+PCNET32_ADDR);
+ outw(0x0001, ioaddr+PCNET32_DATA);
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+ i = 0;
+ while (i++ < 100)
+ if (inw(ioaddr+PCNET32_DATA) & 0x0100)
+ break;
+ /*
+ * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
+ * reports that doing so triggers a bug in the '974.
+ */
+ outw(0x0042, ioaddr+PCNET32_DATA);
+
+ if (pcnet32_debug > 2)
+ printk("%s: PCNET32 open after %d ticks, init block %#x csr0 %4.4x.\n",
+ dev->name, i, (u32) virt_to_bus(&lp->init_block), inw(ioaddr+PCNET32_DATA));
+
+ return 0; /* Always succeed */
+}
+
+/*
+ * The LANCE has been halted for one reason or another (busmaster memory
+ * arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
+ * etc.). Modern LANCE variants always reload their ring-buffer
+ * configuration when restarted, so we must reinitialize our ring
+ * context before restarting. As part of this reinitialization,
+ * find all packets still on the Tx ring and pretend that they had been
+ * sent (in effect, drop the packets on the floor) - the higher-level
+ * protocols will time out and retransmit. It'd be better to shuffle
+ * these skbs to a temp list and then actually re-Tx them after
+ * restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
+ */
+
+static void
+pcnet32_purge_tx_ring(struct device *dev)
+{
+ struct pcnet32_private *lp = (struct pcnet32_private *)dev->priv;
+ int i;
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (lp->tx_skbuff[i]) {
+ dev_kfree_skb(lp->tx_skbuff[i], FREE_WRITE);
+ lp->tx_skbuff[i] = NULL;
+ }
+ }
+}
+
+
+/* Initialize the PCNET32 Rx and Tx rings. */
+static void
+pcnet32_init_ring(struct device *dev)
+{
+ struct pcnet32_private *lp = (struct pcnet32_private *)dev->priv;
+ int i;
+
+ lp->lock = 0, lp->tx_full = 0;
+ lp->cur_rx = lp->cur_tx = 0;
+ lp->dirty_rx = lp->dirty_tx = 0;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ lp->rx_ring[i].base = (u32)le32_to_cpu(virt_to_bus((char *)lp->rx_buffs + i*PKT_BUF_SZ));
+ lp->rx_ring[i].buf_length = le16_to_cpu(-PKT_BUF_SZ);
+ lp->rx_ring[i].status = le16_to_cpu(0x8000);
+ }
+ /* The Tx buffer address is filled in as needed, but we do need to clear
+ the upper ownership bit. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ lp->tx_ring[i].base = 0;
+ lp->tx_ring[i].status = 0;
+ }
+
+ lp->init_block.tlen_rlen = TX_RING_LEN_BITS | RX_RING_LEN_BITS;
+ for (i = 0; i < 6; i++)
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ lp->init_block.rx_ring = (u32)le32_to_cpu(virt_to_bus(lp->rx_ring));
+ lp->init_block.tx_ring = (u32)le32_to_cpu(virt_to_bus(lp->tx_ring));
+}
+
+static void
+pcnet32_restart(struct device *dev, unsigned int csr0_bits, int must_reinit)
+{
+ int i;
+ unsigned int ioaddr = dev->base_addr;
+
+ pcnet32_purge_tx_ring(dev);
+ pcnet32_init_ring(dev);
+
+ outw(0x0000, ioaddr + PCNET32_ADDR);
+ /* ReInit Ring */
+ outw(0x0001, ioaddr + PCNET32_DATA);
+ i = 0;
+ while (i++ < 100)
+ if (inw(ioaddr+PCNET32_DATA) & 0x0100)
+ break;
+
+ outw(csr0_bits, ioaddr + PCNET32_DATA);
+}
+
+static int
+pcnet32_start_xmit(struct sk_buff *skb, struct device *dev)
+{
+ struct pcnet32_private *lp = (struct pcnet32_private *)dev->priv;
+ unsigned int ioaddr = dev->base_addr;
+ int entry;
+ unsigned long flags;
+
+ /* Transmitter timeout, serious problems. */
+ if (dev->tbusy) {
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 20)
+ return 1;
+ outw(0, ioaddr+PCNET32_ADDR);
+ printk("%s: transmit timed out, status %4.4x, resetting.\n",
+ dev->name, inw(ioaddr+PCNET32_DATA));
+ outw(0x0004, ioaddr+PCNET32_DATA);
+ lp->stats.tx_errors++;
+#ifndef final_version
+ {
+ int i;
+ printk(" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
+ lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
+ lp->cur_rx);
+ for (i = 0 ; i < RX_RING_SIZE; i++)
+ printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
+ lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
+ lp->rx_ring[i].msg_length, (unsigned)lp->rx_ring[i].status);
+ for (i = 0 ; i < TX_RING_SIZE; i++)
+ printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
+ lp->tx_ring[i].base, -lp->tx_ring[i].length,
+ lp->tx_ring[i].misc, (unsigned)lp->tx_ring[i].status);
+ printk("\n");
+ }
+#endif
+ pcnet32_restart(dev, 0x0042, 1);
+
+ dev->tbusy = 0;
+ dev->trans_start = jiffies;
+
+ return 0;
+ }
+
+ if (pcnet32_debug > 3) {
+ outw(0x0000, ioaddr+PCNET32_ADDR);
+ printk("%s: pcnet32_start_xmit() called, csr0 %4.4x.\n", dev->name,
+ inw(ioaddr+PCNET32_DATA));
+ outw(0x0000, ioaddr+PCNET32_DATA);
+ }
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+ if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ return 1;
+ }
+
+ if (test_and_set_bit(0, (void*)&lp->lock) != 0) {
+ if (pcnet32_debug > 0)
+ printk("%s: tx queue lock!.\n", dev->name);
+ /* don't clear dev->tbusy flag. */
+ return 1;
+ }
+
+ /* Fill in a Tx ring entry */
+
+ /* Mask to ring buffer boundary. */
+ entry = lp->cur_tx & TX_RING_MOD_MASK;
+
+ /* Caution: the write order is important here, set the base address
+ with the "ownership" bits last. */
+
+ lp->tx_ring[entry].length = le16_to_cpu(-skb->len);
+
+ lp->tx_ring[entry].misc = 0x00000000;
+
+ lp->tx_skbuff[entry] = skb;
+ lp->tx_ring[entry].base = (u32)le32_to_cpu(virt_to_bus(skb->data));
+ lp->tx_ring[entry].status = le16_to_cpu(0x8300);
+
+ lp->cur_tx++;
+
+ /* Trigger an immediate send poll. */
+ outw(0x0000, ioaddr+PCNET32_ADDR);
+ outw(0x0048, ioaddr+PCNET32_DATA);
+
+ dev->trans_start = jiffies;
+
+ save_flags(flags);
+ cli();
+ lp->lock = 0;
+ if (lp->tx_ring[(entry+1) & TX_RING_MOD_MASK].base == 0)
+ clear_bit(0, (void*)&dev->tbusy);
+ else
+ lp->tx_full = 1;
+ restore_flags(flags);
+
+ return 0;
+}
+
+/* The PCNET32 interrupt handler. */
+static void
+pcnet32_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct device *dev = (struct device *)dev_id;
+ struct pcnet32_private *lp;
+ unsigned int csr0, ioaddr;
+ int boguscnt = max_interrupt_work;
+ int must_restart;
+
+ if (dev == NULL) {
+ printk ("pcnet32_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ ioaddr = dev->base_addr;
+ lp = (struct pcnet32_private *)dev->priv;
+ if (dev->interrupt)
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+
+ dev->interrupt = 1;
+
+ outw(0x00, dev->base_addr + PCNET32_ADDR);
+ while ((csr0 = inw(dev->base_addr + PCNET32_DATA)) & 0x8600
+ && --boguscnt >= 0) {
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ outw(csr0 & ~0x004f, dev->base_addr + PCNET32_DATA);
+
+ must_restart = 0;
+
+ if (pcnet32_debug > 5)
+ printk("%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
+ dev->name, csr0, inw(dev->base_addr + PCNET32_DATA));
+
+ if (csr0 & 0x0400) /* Rx interrupt */
+ pcnet32_rx(dev);
+
+ if (csr0 & 0x0200) { /* Tx-done interrupt */
+ int dirty_tx = lp->dirty_tx;
+
+ while (dirty_tx < lp->cur_tx) {
+ int entry = dirty_tx & TX_RING_MOD_MASK;
+ int status = (short)le16_to_cpu(lp->tx_ring[entry].status);
+
+ if (status < 0)
+ break; /* It still hasn't been Txed */
+
+ lp->tx_ring[entry].base = 0;
+
+ if (status & 0x4000) {
+ /* There was an major error, log it. */
+ int err_status = le16_to_cpu(lp->tx_ring[entry].misc);
+ lp->stats.tx_errors++;
+ if (err_status & 0x04000000) lp->stats.tx_aborted_errors++;
+ if (err_status & 0x08000000) lp->stats.tx_carrier_errors++;
+ if (err_status & 0x10000000) lp->stats.tx_window_errors++;
+ if (err_status & 0x40000000) {
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ lp->stats.tx_fifo_errors++;
+ /* Remove this verbosity later! */
+ printk("%s: Tx FIFO error! Status %4.4x.\n",
+ dev->name, csr0);
+ /* Restart the chip. */
+ must_restart = 1;
+ }
+ } else {
+ if (status & 0x1800)
+ lp->stats.collisions++;
+ lp->stats.tx_packets++;
+ }
+
+ /* We must free the original skb */
+ if (lp->tx_skbuff[entry]) {
+ dev_kfree_skb(lp->tx_skbuff[entry], FREE_WRITE);
+ lp->tx_skbuff[entry] = 0;
+ }
+ dirty_tx++;
+ }
+
+#ifndef final_version
+ if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
+ printk("out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+ dirty_tx, lp->cur_tx, lp->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ if (lp->tx_full && dev->tbusy
+ && dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
+ /* The ring is no longer full, clear tbusy. */
+ lp->tx_full = 0;
+ clear_bit(0, (void*)&dev->tbusy);
+ mark_bh(NET_BH);
+ }
+
+ lp->dirty_tx = dirty_tx;
+ }
+
+ /* Log misc errors. */
+ if (csr0 & 0x4000) lp->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & 0x1000) {
+ /*
+ * this happens when our receive ring is full. This
+ * shouldn't be a problem as we will see normal rx
+ * interrupts for the frames in the receive ring. But
+ * there are some PCI chipsets (I can reproduce this
+ * on SP3G with Intel saturn chipset) which have some-
+ * times problems and will fill up the receive ring
+ * with error descriptors. In this situation we don't
+ * get a rx interrupt, but a missed frame interrupt
+ * sooner or later. So we try to clean up our receive
+ * ring here.
+ */
+ pcnet32_rx(dev);
+ lp->stats.rx_errors++; /* Missed a Rx frame. */
+ }
+ if (csr0 & 0x0800) {
+ printk("%s: Bus master arbitration failure, status %4.4x.\n",
+ dev->name, csr0);
+ /* Restart the chip. */
+ must_restart = 1;
+ }
+
+ if (must_restart) {
+ /* stop the chip to clear the error condition, then restart */
+ outw(0x0000, dev->base_addr + PCNET32_ADDR);
+ outw(0x0004, dev->base_addr + PCNET32_DATA);
+ pcnet32_restart(dev, 0x0002, 0);
+ }
+ }
+
+ /* Clear any other interrupt, and set interrupt enable. */
+ outw(0x0000, dev->base_addr + PCNET32_ADDR);
+ outw(0x7940, dev->base_addr + PCNET32_DATA);
+
+ if (pcnet32_debug > 4)
+ printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
+ dev->name, inw(ioaddr + PCNET32_ADDR),
+ inw(dev->base_addr + PCNET32_DATA));
+
+ dev->interrupt = 0;
+ return;
+}
+
+static int
+pcnet32_rx(struct device *dev)
+{
+ struct pcnet32_private *lp = (struct pcnet32_private *)dev->priv;
+ int entry = lp->cur_rx & RX_RING_MOD_MASK;
+ int i;
+
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) {
+ int status = (short)le16_to_cpu(lp->rx_ring[entry].status) >> 8;
+
+ if (status != 0x03) { /* There was an error. */
+ /* There is a tricky error noted by John Murphy,
+ <murf@perftech.com> to Russ Nelson: Even with full-sized
+ buffers it's possible for a jabber packet to use two
+ buffers, with only the last correctly noting the error. */
+ if (status & 0x01) /* Only count a general error at the */
+ lp->stats.rx_errors++; /* end of a packet.*/
+ if (status & 0x20) lp->stats.rx_frame_errors++;
+ if (status & 0x10) lp->stats.rx_over_errors++;
+ if (status & 0x08) lp->stats.rx_crc_errors++;
+ if (status & 0x04) lp->stats.rx_fifo_errors++;
+ lp->rx_ring[entry].status &= le16_to_cpu(0x03ff);
+ }
+ else
+ {
+ /* Malloc up new buffer, compatible with net-2e. */
+ short pkt_len = (le32_to_cpu(lp->rx_ring[entry].msg_length) & 0xfff)-4;
+ struct sk_buff *skb;
+
+ if(pkt_len < 60) {
+ printk("%s: Runt packet!\n",dev->name);
+ lp->stats.rx_errors++;
+ } else {
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, deferring packet.\n",
+ dev->name);
+ for (i=0; i < RX_RING_SIZE; i++)
+ if ((short)le16_to_cpu(lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].status) < 0)
+ break;
+
+ if (i > RX_RING_SIZE -2)
+ {
+ lp->stats.rx_dropped++;
+ lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
+ lp->cur_rx++;
+ }
+ break;
+ }
+ skb->dev = dev;
+ skb_reserve(skb,2); /* 16 byte align */
+ skb_put(skb,pkt_len); /* Make room */
+ eth_copy_and_sum(skb,
+ (unsigned char *)bus_to_virt(le32_to_cpu(lp->rx_ring[entry].base)),
+ pkt_len,0);
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+ }
+ /* The docs say that the buffer length isn't touched, but Andrew Boyd
+ of QNX reports that some revs of the 79C965 clear it. */
+ lp->rx_ring[entry].buf_length = le16_to_cpu(-PKT_BUF_SZ);
+ lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
+ entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
+ }
+
+ /* We should check that at least two ring entries are free. If not,
+ we should free one and mark stats->rx_dropped++. */
+
+ return 0;
+}
+
+static int
+pcnet32_close(struct device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ struct pcnet32_private *lp = (struct pcnet32_private *)dev->priv;
+
+ dev->start = 0;
+ set_bit(0, (void*)&dev->tbusy);
+
+ outw(112, ioaddr+PCNET32_ADDR);
+ lp->stats.rx_missed_errors = inw(ioaddr+PCNET32_DATA);
+
+ outw(0, ioaddr+PCNET32_ADDR);
+
+ if (pcnet32_debug > 1)
+ printk("%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, inw(ioaddr+PCNET32_DATA));
+
+ /* We stop the PCNET32 here -- it occasionally polls
+ memory if we don't. */
+ outw(0x0004, ioaddr+PCNET32_DATA);
+
+ free_irq(dev->irq, dev);
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static struct enet_statistics *pcnet32_get_stats(struct device *dev)
+{
+ struct pcnet32_private *lp = (struct pcnet32_private *)dev->priv;
+ unsigned int ioaddr = dev->base_addr;
+ unsigned short saved_addr;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ saved_addr = inw(ioaddr+PCNET32_ADDR);
+ outw(112, ioaddr+PCNET32_ADDR);
+ lp->stats.rx_missed_errors = inw(ioaddr+PCNET32_DATA);
+ outw(saved_addr, ioaddr+PCNET32_ADDR);
+ restore_flags(flags);
+
+ return &lp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ */
+
+static void pcnet32_set_multicast_list(struct device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ struct pcnet32_private *lp = (struct pcnet32_private *)dev->priv;
+
+ if (dev->flags&IFF_PROMISC) {
+ /* Log any net taps. */
+ printk("%s: Promiscuous mode enabled.\n", dev->name);
+ lp->init_block.mode |= 0x8000;
+ } else {
+ int num_addrs=dev->mc_count;
+ if(dev->flags&IFF_ALLMULTI)
+ num_addrs=1;
+ /* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
+ memset(lp->init_block.filter , (num_addrs == 0) ? 0 : -1, sizeof(lp->init_block.filter));
+ lp->init_block.mode &= ~0x8000;
+ }
+
+ outw(0, ioaddr+PCNET32_ADDR);
+ outw(0x0004, ioaddr+PCNET32_DATA); /* Temporarily stop the lance. */
+
+ pcnet32_restart(dev, 0x0042, 0); /* Resume normal operation */
+
+}
+
+
+#ifdef MODULE
+#if LINUX_VERSION_CODE > 0x20118
+MODULE_AUTHOR("Donald Becker <becker@cesdis.gsfc.nasa.gov>");
+MODULE_DESCRIPTION("AMD PCnet/PCI ethernet driver");
+MODULE_PARM(debug, "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+#endif
+
+/* An additional parameter that may be passed in... */
+static int debug = -1;
+
+int
+init_module(void)
+{
+ if (debug >= 0)
+ pcnet32_debug = debug;
+
+#ifdef CARDBUS
+ register_driver(&pcnet32_ops);
+ return 0;
+#else
+ return pcnet32_probe(NULL);
+#endif
+}
+
+void
+cleanup_module(void)
+{
+ struct device *next_dev;
+
+#ifdef CARDBUS
+ unregister_driver(&pcnet32_ops);
+#endif
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_pcnet32_dev) {
+ next_dev = ((struct pcnet32_private *)root_pcnet32_dev->priv)->next_module;
+ unregister_netdev(root_pcnet32_dev);
+ release_region(root_pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
+ kfree(root_pcnet32_dev);
+ root_pcnet32_dev = next_dev;
+ }
+}
+
+#endif /* MODULE */
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c pcnet32.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/rtl8139.c b/linux/src/drivers/net/rtl8139.c
new file mode 100644
index 0000000..e97c905
--- /dev/null
+++ b/linux/src/drivers/net/rtl8139.c
@@ -0,0 +1,1737 @@
+/* rtl8139.c: A RealTek RTL8129/8139 Fast Ethernet driver for Linux. */
+/*
+ Written and Copyright 1997-2003 by Donald Becker.
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ This driver is for boards based on the RTL8129 and RTL8139 PCI ethernet
+ chips.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Support and updates available at
+ http://www.scyld.com/network/rtl8139.html
+
+ Twister-tuning table provided by Kinston <shangh@realtek.com.tw>.
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char versionA[] =
+"rtl8139.c:v1.23a 8/24/2003 Donald Becker, becker@scyld.com.\n";
+static const char versionB[] =
+" http://www.scyld.com/network/rtl8139.html\n";
+
+#ifndef USE_MEM_OPS
+/* Note: Register access width and timing restrictions apply in MMIO mode.
+ This updated driver should nominally work, but I/O mode is better tested. */
+#define USE_IO_OPS
+#endif
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
+static int debug = 2;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
+ The RTL chips use a 64 element hash table based on the Ethernet CRC. It
+ is efficient to update the hardware filter, but recalculating the table
+ for a long filter list is painful. */
+static int multicast_filter_limit = 32;
+
+/* Used to pass the full-duplex flag, etc. */
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Operational parameters that are set at compile time. */
+
+/* Maximum size of the in-memory receive ring (smaller if no memory). */
+#define RX_BUF_LEN_IDX 2 /* 0==8K, 1==16K, 2==32K, 3==64K */
+/* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */
+#define TX_BUF_SIZE 1536
+
+/* PCI Tuning Parameters
+ Threshold is bytes transferred to chip before transmission starts. */
+#define TX_FIFO_THRESH 256 /* In bytes, rounded down to 32 byte units. */
+
+/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024. */
+#define RX_FIFO_THRESH 4 /* Rx buffer level before first PCI xfer. */
+#define RX_DMA_BURST 4 /* Maximum PCI burst, '4' is 256 bytes */
+#define TX_DMA_BURST 4 /* Calculate as 16<<val. */
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+
+/* Allocation size of Rx buffers with full-sized Ethernet frames.
+ This is a cross-driver value that is not a limit,
+ but a way to keep a consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ 1536
+
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#if LINUX_VERSION_CODE >= 0x20300
+#include <linux/spinlock.h>
+#elif LINUX_VERSION_CODE >= 0x20200
+#include <asm/spinlock.h>
+#endif
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the RealTek RTL8129 series, the RealTek
+Fast Ethernet controllers for PCI and CardBus. This chip is used on many
+low-end boards, sometimes with custom chip labels.
+
+
+II. Board-specific settings
+
+PCI bus devices are configured by the system at boot time, so no jumpers
+need to be set on the board. The system BIOS will assign the
+PCI INTA signal to a (preferably otherwise unused) system IRQ line.
+Note: Kernel versions earlier than 1.3.73 do not support shared PCI
+interrupt lines.
+
+III. Driver operation
+
+IIIa. Rx Ring buffers
+
+The receive unit uses a single linear ring buffer rather than the more
+common (and more efficient) descriptor-based architecture. Incoming frames
+are sequentially stored into the Rx region, and the host copies them into
+skbuffs.
+
+Comment: While it is theoretically possible to process many frames in place,
+any delay in Rx processing would block the Rx ring and cause us to drop
+frames. It would be difficult to design a protocol stack where the data
+buffer could be recalled by the device driver.
+
+IIIb. Tx operation
+
+The RTL8129 uses a fixed set of four Tx descriptors in register space. Tx
+frames must be 32 bit aligned. Linux aligns the IP header on word
+boundaries, and 14 byte ethernet header means that almost all frames will
+need to be copied to an alignment buffer. The driver statically allocates
+alignment the four alignment buffers at open() time.
+
+IVb. References
+
+http://www.realtek.com.tw/cn/cn.html
+http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
+
+IVc. Errata
+
+*/
+
+
+static void *rtl8139_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int find_cnt);
+static int rtl_pwr_event(void *dev_instance, int event);
+
+enum chip_capability_flags {HAS_MII_XCVR=0x01, HAS_CHIP_XCVR=0x02,
+ HAS_LNK_CHNG=0x04, HAS_DESC=0x08};
+#ifdef USE_IO_OPS
+#define RTL8139_IOTYPE PCI_USES_MASTER|PCI_USES_IO |PCI_ADDR0
+#else
+#define RTL8139_IOTYPE PCI_USES_MASTER|PCI_USES_MEM|PCI_ADDR1
+#endif
+#define RTL8129_CAPS HAS_MII_XCVR
+#define RTL8139_CAPS HAS_CHIP_XCVR|HAS_LNK_CHNG
+#define RTL8139D_CAPS HAS_CHIP_XCVR|HAS_LNK_CHNG|HAS_DESC
+
+/* Note: Update the marked constant in _attach() if the RTL8139B entry moves.*/
+static struct pci_id_info pci_tbl[] = {
+ {"RealTek RTL8139C+, 64 bit high performance",
+ { 0x813910ec, 0xffffffff, 0,0, 0x20, 0xff},
+ RTL8139_IOTYPE, 0x80, RTL8139D_CAPS, },
+ {"RealTek RTL8139C Fast Ethernet",
+ { 0x813910ec, 0xffffffff, 0,0, 0x10, 0xff},
+ RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
+ {"RealTek RTL8129 Fast Ethernet", { 0x812910ec, 0xffffffff,},
+ RTL8139_IOTYPE, 0x80, RTL8129_CAPS, },
+ {"RealTek RTL8139 Fast Ethernet", { 0x813910ec, 0xffffffff,},
+ RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
+ {"RealTek RTL8139B PCI/CardBus", { 0x813810ec, 0xffffffff,},
+ RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
+ {"SMC1211TX EZCard 10/100 (RealTek RTL8139)", { 0x12111113, 0xffffffff,},
+ RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
+ {"Accton MPX5030 (RealTek RTL8139)", { 0x12111113, 0xffffffff,},
+ RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
+ {"D-Link DFE-530TX+ (RealTek RTL8139C)",
+ { 0x13001186, 0xffffffff, 0x13011186, 0xffffffff,},
+ RTL8139_IOTYPE, 0x100, RTL8139_CAPS, },
+ {"D-Link DFE-538TX (RealTek RTL8139)", { 0x13001186, 0xffffffff,},
+ RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
+ {"LevelOne FPC-0106Tx (RealTek RTL8139)", { 0x0106018a, 0xffffffff,},
+ RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
+ {"Compaq HNE-300 (RealTek RTL8139c)", { 0x8139021b, 0xffffffff,},
+ RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
+ {"Edimax EP-4103DL CardBus (RealTek RTL8139c)", { 0xab0613d1, 0xffffffff,},
+ RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
+ {"Siemens 1012v2 CardBus (RealTek RTL8139c)", { 0x101202ac, 0xffffffff,},
+ RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
+ {0,}, /* 0 terminated list. */
+};
+
+struct drv_id_info rtl8139_drv_id = {
+ "realtek", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_tbl,
+ rtl8139_probe1, rtl_pwr_event };
+
+#ifndef USE_IO_OPS
+#undef inb
+#undef inw
+#undef inl
+#undef outb
+#undef outw
+#undef outl
+#define inb readb
+#define inw readw
+#define inl readl
+#define outb writeb
+#define outw writew
+#define outl writel
+#endif
+
+/* The rest of these values should never change. */
+#define NUM_TX_DESC 4 /* Number of Tx descriptor registers. */
+
+/* Symbolic offsets to registers. */
+enum RTL8129_registers {
+ MAC0=0, /* Ethernet hardware address. */
+ MAR0=8, /* Multicast filter. */
+ TxStatus0=0x10, /* Transmit status (Four 32bit registers). */
+ TxAddr0=0x20, /* Tx descriptors (also four 32bit). */
+ RxBuf=0x30, RxEarlyCnt=0x34, RxEarlyStatus=0x36,
+ ChipCmd=0x37, RxBufPtr=0x38, RxBufAddr=0x3A,
+ IntrMask=0x3C, IntrStatus=0x3E,
+ TxConfig=0x40, RxConfig=0x44,
+ Timer=0x48, /* A general-purpose counter. */
+ RxMissed=0x4C, /* 24 bits valid, write clears. */
+ Cfg9346=0x50, Config0=0x51, Config1=0x52,
+ FlashReg=0x54, GPPinData=0x58, GPPinDir=0x59, MII_SMI=0x5A, HltClk=0x5B,
+ MultiIntr=0x5C, TxSummary=0x60,
+ MII_BMCR=0x62, MII_BMSR=0x64, NWayAdvert=0x66, NWayLPAR=0x68,
+ NWayExpansion=0x6A,
+ /* Undocumented registers, but required for proper operation. */
+ FIFOTMS=0x70, /* FIFO Control and test. */
+ CSCR=0x74, /* Chip Status and Configuration Register. */
+ PARA78=0x78, PARA7c=0x7c, /* Magic transceiver parameter register. */
+};
+
+enum ChipCmdBits {
+ CmdReset=0x10, CmdRxEnb=0x08, CmdTxEnb=0x04, RxBufEmpty=0x01, };
+
+/* Interrupt register bits, using my own meaningful names. */
+enum IntrStatusBits {
+ PCIErr=0x8000, PCSTimeout=0x4000,
+ RxFIFOOver=0x40, RxUnderrun=0x20, RxOverflow=0x10,
+ TxErr=0x08, TxOK=0x04, RxErr=0x02, RxOK=0x01,
+};
+enum TxStatusBits {
+ TxHostOwns=0x2000, TxUnderrun=0x4000, TxStatOK=0x8000,
+ TxOutOfWindow=0x20000000, TxAborted=0x40000000, TxCarrierLost=0x80000000,
+};
+enum RxStatusBits {
+ RxMulticast=0x8000, RxPhysical=0x4000, RxBroadcast=0x2000,
+ RxBadSymbol=0x0020, RxRunt=0x0010, RxTooLong=0x0008, RxCRCErr=0x0004,
+ RxBadAlign=0x0002, RxStatusOK=0x0001,
+};
+
+/* Twister tuning parameters from RealTek.
+ Completely undocumented, but required to tune bad links. */
+enum CSCRBits {
+ CSCR_LinkOKBit=0x0400, CSCR_LinkChangeBit=0x0800,
+ CSCR_LinkStatusBits=0x0f000, CSCR_LinkDownOffCmd=0x003c0,
+ CSCR_LinkDownCmd=0x0f3c0,
+};
+#define PARA78_default 0x78fa8388
+#define PARA7c_default 0xcb38de43 /* param[0][3] */
+#define PARA7c_xxx 0xcb38de43
+unsigned long param[4][4]={
+ {0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43},
+ {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
+ {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
+ {0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83}
+};
+
+#define PRIV_ALIGN 15 /* Desired alignment mask */
+struct rtl8129_private {
+ struct net_device *next_module;
+ void *priv_addr; /* Unaligned address for kfree */
+
+ int chip_id, drv_flags;
+ struct pci_dev *pci_dev;
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media selection timer. */
+ int msg_level;
+ int max_interrupt_work;
+
+ /* Receive state. */
+ unsigned char *rx_ring;
+ unsigned int cur_rx; /* Index into the Rx buffer of next Rx pkt. */
+ unsigned int rx_buf_len; /* Size (8K 16K 32K or 64KB) of the Rx ring */
+
+ /* Transmit state. */
+ unsigned int cur_tx, dirty_tx, tx_flag;
+ unsigned long tx_full; /* The Tx queue is full. */
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct sk_buff* tx_skbuff[NUM_TX_DESC];
+ unsigned char *tx_buf[NUM_TX_DESC]; /* Tx bounce buffers */
+ unsigned char *tx_bufs; /* Tx bounce buffer region. */
+
+ /* Receive filter state. */
+ unsigned int rx_config;
+ u32 mc_filter[2]; /* Multicast hash filter */
+ int cur_rx_mode;
+ int multicast_filter_limit;
+
+ /* Transceiver state. */
+ char phys[4]; /* MII device addresses. */
+ u16 advertising; /* NWay media advertisement */
+ char twistie, twist_row, twist_col; /* Twister tune state. */
+ u8 config1;
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int duplex_lock:1;
+ unsigned int media2:4; /* Secondary monitored media port. */
+ unsigned int medialock:1; /* Don't sense media type. */
+ unsigned int mediasense:1; /* Media sensing in progress. */
+ unsigned int default_port; /* Last dev->if_port value. */
+};
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("RealTek RTL8129/8139 Fast Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(multicast_filter_limit, "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(debug, "i");
+MODULE_PARM_DESC(debug, "Driver message level (0-31)");
+MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
+MODULE_PARM_DESC(full_duplex, "Non-zero to set forced full duplex.");
+MODULE_PARM_DESC(multicast_filter_limit,
+ "Multicast addresses before switching to Rx-all-multicast");
+MODULE_PARM_DESC(max_interrupt_work,
+ "Driver maximum events handled per interrupt");
+
+static int rtl8129_open(struct net_device *dev);
+static void rtl_hw_start(struct net_device *dev);
+static int read_eeprom(long ioaddr, int location, int addr_len);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int val);
+static void rtl8129_timer(unsigned long data);
+static void rtl8129_tx_timeout(struct net_device *dev);
+static void rtl8129_init_ring(struct net_device *dev);
+static int rtl8129_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static int rtl8129_rx(struct net_device *dev);
+static void rtl8129_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static void rtl_error(struct net_device *dev, int status, int link_status);
+static int rtl8129_close(struct net_device *dev);
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static struct net_device_stats *rtl8129_get_stats(struct net_device *dev);
+static inline u32 ether_crc(int length, unsigned char *data);
+static void set_rx_mode(struct net_device *dev);
+
+
+/* A list of all installed RTL8129 devices, for removing the driver module. */
+static struct net_device *root_rtl8129_dev = NULL;
+
+#ifndef MODULE
+int rtl8139_probe(struct net_device *dev)
+{
+ static int did_version = 0; /* Already printed version info. */
+
+ if (debug >= NETIF_MSG_DRV /* Emit version even if no cards detected. */
+ && did_version++ == 0)
+ printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB);
+ return pci_drv_register(&rtl8139_drv_id, dev);
+}
+#endif
+
+static void *rtl8139_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int found_cnt)
+{
+ struct net_device *dev;
+ struct rtl8129_private *np;
+ void *priv_mem;
+ int i, option = found_cnt < MAX_UNITS ? options[found_cnt] : 0;
+ int config1;
+
+ dev = init_etherdev(init_dev, 0);
+ if (!dev)
+ return NULL;
+
+ printk(KERN_INFO "%s: %s at %#lx, IRQ %d, ",
+ dev->name, pci_tbl[chip_idx].name, ioaddr, irq);
+
+ /* Bring the chip out of low-power mode. */
+ config1 = inb(ioaddr + Config1);
+ if (pci_tbl[chip_idx].drv_flags & HAS_MII_XCVR) /* rtl8129 chip */
+ outb(config1 & ~0x03, ioaddr + Config1);
+
+ {
+ int addr_len = read_eeprom(ioaddr, 0, 8) == 0x8129 ? 8 : 6;
+ for (i = 0; i < 3; i++)
+ ((u16 *)(dev->dev_addr))[i] =
+ le16_to_cpu(read_eeprom(ioaddr, i+7, addr_len));
+ }
+
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x.\n", dev->dev_addr[i]);
+
+ /* Make certain elements e.g. descriptor lists are aligned. */
+ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
+ /* Check for the very unlikely case of no memory. */
+ if (priv_mem == NULL)
+ return NULL;
+
+ /* We do a request_region() to register /proc/ioports info. */
+ request_region(ioaddr, pci_tbl[chip_idx].io_size, dev->name);
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+ memset(np, 0, sizeof(*np));
+ np->priv_addr = priv_mem;
+
+ np->next_module = root_rtl8129_dev;
+ root_rtl8129_dev = dev;
+
+ np->pci_dev = pdev;
+ np->chip_id = chip_idx;
+ np->drv_flags = pci_tbl[chip_idx].drv_flags;
+ np->msg_level = (1 << debug) - 1;
+ np->max_interrupt_work = max_interrupt_work;
+ np->multicast_filter_limit = multicast_filter_limit;
+
+ np->config1 = config1;
+
+ /* Find the connected MII xcvrs.
+ Doing this in open() would allow detecting external xcvrs later, but
+ takes too much time. */
+ if (np->drv_flags & HAS_MII_XCVR) {
+ int phy, phy_idx = 0;
+ for (phy = 0; phy < 32 && phy_idx < sizeof(np->phys); phy++) {
+ int mii_status = mdio_read(dev, phy, 1);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ np->phys[phy_idx++] = phy;
+ np->advertising = mdio_read(dev, phy, 4);
+ printk(KERN_INFO "%s: MII transceiver %d status 0x%4.4x "
+ "advertising %4.4x.\n",
+ dev->name, phy, mii_status, np->advertising);
+ }
+ }
+ if (phy_idx == 0) {
+ printk(KERN_INFO "%s: No MII transceivers found! Assuming SYM "
+ "transceiver.\n",
+ dev->name);
+ np->phys[0] = 32;
+ }
+ } else
+ np->phys[0] = 32;
+
+ /* Put the chip into low-power mode. */
+ outb(0xC0, ioaddr + Cfg9346);
+ if (np->drv_flags & HAS_MII_XCVR) /* rtl8129 chip */
+ outb(0x03, ioaddr + Config1);
+
+ outb('H', ioaddr + HltClk); /* 'R' would leave the clock running. */
+
+ /* The lower four bits are the media type. */
+ if (option > 0) {
+ np->full_duplex = (option & 0x220) ? 1 : 0;
+ np->default_port = option & 0x330;
+ if (np->default_port)
+ np->medialock = 1;
+ }
+
+ if (found_cnt < MAX_UNITS && full_duplex[found_cnt] > 0)
+ np->full_duplex = full_duplex[found_cnt];
+
+ if (np->full_duplex) {
+ printk(KERN_INFO "%s: Media type forced to Full Duplex.\n", dev->name);
+ /* Changing the MII-advertised media might prevent re-connection. */
+ np->duplex_lock = 1;
+ }
+ if (np->default_port) {
+ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
+ (option & 0x300 ? 100 : 10),
+ (option & 0x220 ? "full" : "half"));
+ mdio_write(dev, np->phys[0], 0,
+ ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
+ ((option & 0x220) ? 0x0100 : 0)); /* Full duplex? */
+ }
+
+ /* The rtl81x9-specific entries in the device structure. */
+ dev->open = &rtl8129_open;
+ dev->hard_start_xmit = &rtl8129_start_xmit;
+ dev->stop = &rtl8129_close;
+ dev->get_stats = &rtl8129_get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &mii_ioctl;
+
+ return dev;
+}
+
+/* Serial EEPROM section. */
+
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
+#define EE_CS 0x08 /* EEPROM chip select. */
+#define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
+#define EE_WRITE_0 0x00
+#define EE_WRITE_1 0x02
+#define EE_DATA_READ 0x01 /* EEPROM chip data out. */
+#define EE_ENB (0x80 | EE_CS)
+
+/* Delay between EEPROM clock transitions.
+ No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
+ */
+
+#define eeprom_delay() inl(ee_addr)
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_WRITE_CMD (5)
+#define EE_READ_CMD (6)
+#define EE_ERASE_CMD (7)
+
+static int read_eeprom(long ioaddr, int location, int addr_len)
+{
+ int i;
+ unsigned retval = 0;
+ long ee_addr = ioaddr + Cfg9346;
+ int read_cmd = location | (EE_READ_CMD << addr_len);
+
+ outb(EE_ENB & ~EE_CS, ee_addr);
+ outb(EE_ENB, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 4 + addr_len; i >= 0; i--) {
+ int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
+ outb(EE_ENB | dataval, ee_addr);
+ eeprom_delay();
+ outb(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay();
+ }
+ outb(EE_ENB, ee_addr);
+ eeprom_delay();
+
+ for (i = 16; i > 0; i--) {
+ outb(EE_ENB | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay();
+ retval = (retval << 1) | ((inb(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ outb(EE_ENB, ee_addr);
+ eeprom_delay();
+ }
+
+ /* Terminate the EEPROM access. */
+ outb(~EE_CS, ee_addr);
+ return retval;
+}
+
+/* MII serial management: mostly bogus for now. */
+/* Read and write the MII management registers using software-generated
+ serial MDIO protocol.
+ The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
+ met by back-to-back PCI I/O cycles, but we insert a delay to avoid
+ "overclocking" issues. */
+#define MDIO_DIR 0x80
+#define MDIO_DATA_OUT 0x04
+#define MDIO_DATA_IN 0x02
+#define MDIO_CLK 0x01
+#define MDIO_WRITE0 (MDIO_DIR)
+#define MDIO_WRITE1 (MDIO_DIR | MDIO_DATA_OUT)
+
+#define mdio_delay(mdio_addr) inb(mdio_addr)
+
+static char mii_2_8139_map[8] = {MII_BMCR, MII_BMSR, 0, 0, NWayAdvert,
+ NWayLPAR, NWayExpansion, 0 };
+
+/* Syncronize the MII management interface by shifting 32 one bits out. */
+static void mdio_sync(long mdio_addr)
+{
+ int i;
+
+ for (i = 32; i >= 0; i--) {
+ outb(MDIO_WRITE1, mdio_addr);
+ mdio_delay(mdio_addr);
+ outb(MDIO_WRITE1 | MDIO_CLK, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ return;
+}
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ long mdio_addr = dev->base_addr + MII_SMI;
+ int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ int retval = 0;
+ int i;
+
+ if (phy_id > 31) { /* Really a 8139. Use internal registers. */
+ return location < 8 && mii_2_8139_map[location] ?
+ inw(dev->base_addr + mii_2_8139_map[location]) : 0;
+ }
+ mdio_sync(mdio_addr);
+ /* Shift the read command bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_DATA_OUT : 0;
+
+ outb(MDIO_DIR | dataval, mdio_addr);
+ mdio_delay(mdio_addr);
+ outb(MDIO_DIR | dataval | MDIO_CLK, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 19; i > 0; i--) {
+ outb(0, mdio_addr);
+ mdio_delay(mdio_addr);
+ retval = (retval << 1) | ((inb(mdio_addr) & MDIO_DATA_IN) ? 1 : 0);
+ outb(MDIO_CLK, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int location,
+ int value)
+{
+ long mdio_addr = dev->base_addr + MII_SMI;
+ int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
+ int i;
+
+ if (phy_id > 31) { /* Really a 8139. Use internal registers. */
+ long ioaddr = dev->base_addr;
+ if (location == 0) {
+ outb(0xC0, ioaddr + Cfg9346);
+ outw(value, ioaddr + MII_BMCR);
+ outb(0x00, ioaddr + Cfg9346);
+ } else if (location < 8 && mii_2_8139_map[location])
+ outw(value, ioaddr + mii_2_8139_map[location]);
+ return;
+ }
+ mdio_sync(mdio_addr);
+
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+ outb(dataval, mdio_addr);
+ mdio_delay(mdio_addr);
+ outb(dataval | MDIO_CLK, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ /* Clear out extra bits. */
+ for (i = 2; i > 0; i--) {
+ outb(0, mdio_addr);
+ mdio_delay(mdio_addr);
+ outb(MDIO_CLK, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ return;
+}
+
+
+static int rtl8129_open(struct net_device *dev)
+{
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int rx_buf_len_idx;
+
+ MOD_INC_USE_COUNT;
+
+ /* The Rx ring allocation size is 2^N + delta, which is worst-case for
+ the kernel binary-buddy allocation. We allocate the Tx bounce buffers
+ at the same time to use some of the otherwise wasted space.
+ The delta of +16 is required for dribble-over because the receiver does
+ not wrap when the packet terminates just beyond the end of the ring. */
+ rx_buf_len_idx = RX_BUF_LEN_IDX;
+ do {
+ tp->rx_buf_len = 8192 << rx_buf_len_idx;
+ tp->rx_ring = kmalloc(tp->rx_buf_len + 16 +
+ (TX_BUF_SIZE * NUM_TX_DESC), GFP_KERNEL);
+ } while (tp->rx_ring == NULL && --rx_buf_len_idx >= 0);
+
+ if (tp->rx_ring == NULL) {
+ if (debug > 0)
+ printk(KERN_ERR "%s: Couldn't allocate a %d byte receive ring.\n",
+ dev->name, tp->rx_buf_len);
+ MOD_DEC_USE_COUNT;
+ return -ENOMEM;
+ }
+ tp->tx_bufs = tp->rx_ring + tp->rx_buf_len + 16;
+
+ rtl8129_init_ring(dev);
+ tp->full_duplex = tp->duplex_lock;
+ tp->tx_flag = (TX_FIFO_THRESH<<11) & 0x003f0000;
+ tp->rx_config =
+ (RX_FIFO_THRESH << 13) | (rx_buf_len_idx << 11) | (RX_DMA_BURST<<8);
+
+ if (request_irq(dev->irq, &rtl8129_interrupt, SA_SHIRQ, dev->name, dev)) {
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+
+ rtl_hw_start(dev);
+ netif_start_tx_queue(dev);
+
+ if (tp->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG"%s: rtl8129_open() ioaddr %#lx IRQ %d"
+ " GP Pins %2.2x %s-duplex.\n",
+ dev->name, ioaddr, dev->irq, inb(ioaddr + GPPinData),
+ tp->full_duplex ? "full" : "half");
+
+ /* Set the timer to switch to check for link beat and perhaps switch
+ to an alternate media type. */
+ init_timer(&tp->timer);
+ tp->timer.expires = jiffies + 3*HZ;
+ tp->timer.data = (unsigned long)dev;
+ tp->timer.function = &rtl8129_timer;
+ add_timer(&tp->timer);
+
+ return 0;
+}
+
+/* Start the hardware at open or resume. */
+static void rtl_hw_start(struct net_device *dev)
+{
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+
+ /* Soft reset the chip. */
+ outb(CmdReset, ioaddr + ChipCmd);
+ /* Check that the chip has finished the reset. */
+ for (i = 1000; i > 0; i--)
+ if ((inb(ioaddr + ChipCmd) & CmdReset) == 0)
+ break;
+ /* Restore our idea of the MAC address. */
+ outb(0xC0, ioaddr + Cfg9346);
+ outl(cpu_to_le32(*(u32*)(dev->dev_addr + 0)), ioaddr + MAC0 + 0);
+ outl(cpu_to_le32(*(u32*)(dev->dev_addr + 4)), ioaddr + MAC0 + 4);
+
+ /* Hmmm, do these belong here? */
+ tp->cur_rx = 0;
+
+ /* Must enable Tx/Rx before setting transfer thresholds! */
+ outb(CmdRxEnb | CmdTxEnb, ioaddr + ChipCmd);
+ outl(tp->rx_config, ioaddr + RxConfig);
+ /* Check this value: the documentation contradicts ifself. Is the
+ IFG correct with bit 28:27 zero, or with |0x03000000 ? */
+ outl((TX_DMA_BURST<<8), ioaddr + TxConfig);
+
+ /* This is check_duplex() */
+ if (tp->phys[0] >= 0 || (tp->drv_flags & HAS_MII_XCVR)) {
+ u16 mii_reg5 = mdio_read(dev, tp->phys[0], 5);
+ if (mii_reg5 == 0xffff)
+ ; /* Not there */
+ else if ((mii_reg5 & 0x0100) == 0x0100
+ || (mii_reg5 & 0x00C0) == 0x0040)
+ tp->full_duplex = 1;
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO"%s: Setting %s%s-duplex based on"
+ " auto-negotiated partner ability %4.4x.\n", dev->name,
+ mii_reg5 == 0 ? "" :
+ (mii_reg5 & 0x0180) ? "100mbps " : "10mbps ",
+ tp->full_duplex ? "full" : "half", mii_reg5);
+ }
+
+ if (tp->drv_flags & HAS_MII_XCVR) /* rtl8129 chip */
+ outb(tp->full_duplex ? 0x60 : 0x20, ioaddr + Config1);
+ outb(0x00, ioaddr + Cfg9346);
+
+ outl(virt_to_bus(tp->rx_ring), ioaddr + RxBuf);
+ /* Start the chip's Tx and Rx process. */
+ outl(0, ioaddr + RxMissed);
+ set_rx_mode(dev);
+ outb(CmdRxEnb | CmdTxEnb, ioaddr + ChipCmd);
+ /* Enable all known interrupts by setting the interrupt mask. */
+ outw(PCIErr | PCSTimeout | RxUnderrun | RxOverflow | RxFIFOOver
+ | TxErr | TxOK | RxErr | RxOK, ioaddr + IntrMask);
+
+}
+
+static void rtl8129_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct rtl8129_private *np = (struct rtl8129_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 60*HZ;
+ int mii_reg5 = mdio_read(dev, np->phys[0], 5);
+
+ if (! np->duplex_lock && mii_reg5 != 0xffff) {
+ int duplex = (mii_reg5&0x0100) || (mii_reg5 & 0x01C0) == 0x0040;
+ if (np->full_duplex != duplex) {
+ np->full_duplex = duplex;
+ printk(KERN_INFO "%s: Using %s-duplex based on MII #%d link"
+ " partner ability of %4.4x.\n", dev->name,
+ np->full_duplex ? "full" : "half", np->phys[0], mii_reg5);
+ if (np->drv_flags & HAS_MII_XCVR) {
+ outb(0xC0, ioaddr + Cfg9346);
+ outb(np->full_duplex ? 0x60 : 0x20, ioaddr + Config1);
+ outb(0x00, ioaddr + Cfg9346);
+ }
+ }
+ }
+#if LINUX_VERSION_CODE < 0x20300
+ /* Check for bogusness. */
+ if (inw(ioaddr + IntrStatus) & (TxOK | RxOK)) {
+ int status = inw(ioaddr + IntrStatus); /* Double check */
+ if (status & (TxOK | RxOK) && ! dev->interrupt) {
+ printk(KERN_ERR "%s: RTL8139 Interrupt line blocked, status %x.\n",
+ dev->name, status);
+ rtl8129_interrupt(dev->irq, dev, 0);
+ }
+ }
+ if (dev->tbusy && jiffies - dev->trans_start >= 2*TX_TIMEOUT)
+ rtl8129_tx_timeout(dev);
+#else
+ if (netif_queue_paused(dev) &&
+ np->cur_tx - np->dirty_tx > 1 &&
+ (jiffies - dev->trans_start) > TX_TIMEOUT) {
+ rtl8129_tx_timeout(dev);
+ }
+#endif
+
+#if defined(RTL_TUNE_TWISTER)
+ /* This is a complicated state machine to configure the "twister" for
+ impedance/echos based on the cable length.
+ All of this is magic and undocumented.
+ */
+ if (np->twistie) switch(np->twistie) {
+ case 1: {
+ if (inw(ioaddr + CSCR) & CSCR_LinkOKBit) {
+ /* We have link beat, let us tune the twister. */
+ outw(CSCR_LinkDownOffCmd, ioaddr + CSCR);
+ np->twistie = 2; /* Change to state 2. */
+ next_tick = HZ/10;
+ } else {
+ /* Just put in some reasonable defaults for when beat returns. */
+ outw(CSCR_LinkDownCmd, ioaddr + CSCR);
+ outl(0x20,ioaddr + FIFOTMS); /* Turn on cable test mode. */
+ outl(PARA78_default ,ioaddr + PARA78);
+ outl(PARA7c_default ,ioaddr + PARA7c);
+ np->twistie = 0; /* Bail from future actions. */
+ }
+ } break;
+ case 2: {
+ /* Read how long it took to hear the echo. */
+ int linkcase = inw(ioaddr + CSCR) & CSCR_LinkStatusBits;
+ if (linkcase == 0x7000) np->twist_row = 3;
+ else if (linkcase == 0x3000) np->twist_row = 2;
+ else if (linkcase == 0x1000) np->twist_row = 1;
+ else np->twist_row = 0;
+ np->twist_col = 0;
+ np->twistie = 3; /* Change to state 2. */
+ next_tick = HZ/10;
+ } break;
+ case 3: {
+ /* Put out four tuning parameters, one per 100msec. */
+ if (np->twist_col == 0) outw(0, ioaddr + FIFOTMS);
+ outl(param[(int)np->twist_row][(int)np->twist_col], ioaddr + PARA7c);
+ next_tick = HZ/10;
+ if (++np->twist_col >= 4) {
+ /* For short cables we are done.
+ For long cables (row == 3) check for mistune. */
+ np->twistie = (np->twist_row == 3) ? 4 : 0;
+ }
+ } break;
+ case 4: {
+ /* Special case for long cables: check for mistune. */
+ if ((inw(ioaddr + CSCR) & CSCR_LinkStatusBits) == 0x7000) {
+ np->twistie = 0;
+ break;
+ } else {
+ outl(0xfb38de03, ioaddr + PARA7c);
+ np->twistie = 5;
+ next_tick = HZ/10;
+ }
+ } break;
+ case 5: {
+ /* Retune for shorter cable (column 2). */
+ outl(0x20,ioaddr + FIFOTMS);
+ outl(PARA78_default, ioaddr + PARA78);
+ outl(PARA7c_default, ioaddr + PARA7c);
+ outl(0x00,ioaddr + FIFOTMS);
+ np->twist_row = 2;
+ np->twist_col = 0;
+ np->twistie = 3;
+ next_tick = HZ/10;
+ } break;
+ }
+#endif
+
+ if (np->msg_level & NETIF_MSG_TIMER) {
+ if (np->drv_flags & HAS_MII_XCVR)
+ printk(KERN_DEBUG"%s: Media selection tick, GP pins %2.2x.\n",
+ dev->name, inb(ioaddr + GPPinData));
+ else
+ printk(KERN_DEBUG"%s: Media selection tick, Link partner %4.4x.\n",
+ dev->name, inw(ioaddr + NWayLPAR));
+ printk(KERN_DEBUG"%s: Other registers are IntMask %4.4x "
+ "IntStatus %4.4x RxStatus %4.4x.\n",
+ dev->name, inw(ioaddr + IntrMask), inw(ioaddr + IntrStatus),
+ (int)inl(ioaddr + RxEarlyStatus));
+ printk(KERN_DEBUG"%s: Chip config %2.2x %2.2x.\n",
+ dev->name, inb(ioaddr + Config0), inb(ioaddr + Config1));
+ }
+
+ np->timer.expires = jiffies + next_tick;
+ add_timer(&np->timer);
+}
+
+static void rtl8129_tx_timeout(struct net_device *dev)
+{
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int status = inw(ioaddr + IntrStatus);
+ int mii_reg, i;
+
+ /* Could be wrapped with if (tp->msg_level & NETIF_MSG_TX_ERR) */
+ printk(KERN_ERR "%s: Transmit timeout, status %2.2x %4.4x "
+ "media %2.2x.\n",
+ dev->name, inb(ioaddr + ChipCmd), status, inb(ioaddr + GPPinData));
+
+ if (status & (TxOK | RxOK)) {
+ printk(KERN_ERR "%s: RTL8139 Interrupt line blocked, status %x.\n",
+ dev->name, status);
+ }
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ outw(0x0000, ioaddr + IntrMask);
+ /* Emit info to figure out what went wrong. */
+ printk(KERN_DEBUG "%s: Tx queue start entry %d dirty entry %d%s.\n",
+ dev->name, tp->cur_tx, tp->dirty_tx, tp->tx_full ? ", full" : "");
+ for (i = 0; i < NUM_TX_DESC; i++)
+ printk(KERN_DEBUG "%s: Tx descriptor %d is %8.8x.%s\n",
+ dev->name, i, (int)inl(ioaddr + TxStatus0 + i*4),
+ i == tp->dirty_tx % NUM_TX_DESC ? " (queue head)" : "");
+ printk(KERN_DEBUG "%s: MII #%d registers are:", dev->name, tp->phys[0]);
+ for (mii_reg = 0; mii_reg < 8; mii_reg++)
+ printk(" %4.4x", mdio_read(dev, tp->phys[0], mii_reg));
+ printk(".\n");
+
+ /* Stop a shared interrupt from scavenging while we are. */
+ tp->dirty_tx = tp->cur_tx = 0;
+ /* Dump the unsent Tx packets. */
+ for (i = 0; i < NUM_TX_DESC; i++) {
+ if (tp->tx_skbuff[i]) {
+ dev_free_skb(tp->tx_skbuff[i]);
+ tp->tx_skbuff[i] = 0;
+ tp->stats.tx_dropped++;
+ }
+ }
+ rtl_hw_start(dev);
+ netif_unpause_tx_queue(dev);
+ tp->tx_full = 0;
+ return;
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void
+rtl8129_init_ring(struct net_device *dev)
+{
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+ int i;
+
+ tp->tx_full = 0;
+ tp->dirty_tx = tp->cur_tx = 0;
+
+ for (i = 0; i < NUM_TX_DESC; i++) {
+ tp->tx_skbuff[i] = 0;
+ tp->tx_buf[i] = &tp->tx_bufs[i*TX_BUF_SIZE];
+ }
+}
+
+static int
+rtl8129_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int entry;
+
+ if (netif_pause_tx_queue(dev) != 0) {
+ /* This watchdog code is redundant with the media monitor timer. */
+ if (jiffies - dev->trans_start > TX_TIMEOUT)
+ rtl8129_tx_timeout(dev);
+ return 1;
+ }
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = tp->cur_tx % NUM_TX_DESC;
+
+ tp->tx_skbuff[entry] = skb;
+ if ((long)skb->data & 3) { /* Must use alignment buffer. */
+ memcpy(tp->tx_buf[entry], skb->data, skb->len);
+ outl(virt_to_bus(tp->tx_buf[entry]), ioaddr + TxAddr0 + entry*4);
+ } else
+ outl(virt_to_bus(skb->data), ioaddr + TxAddr0 + entry*4);
+ /* Note: the chip doesn't have auto-pad! */
+ outl(tp->tx_flag | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN),
+ ioaddr + TxStatus0 + entry*4);
+
+ /* There is a race condition here -- we might read dirty_tx, take an
+ interrupt that clears the Tx queue, and only then set tx_full.
+ So we do this in two phases. */
+ if (++tp->cur_tx - tp->dirty_tx >= NUM_TX_DESC) {
+ set_bit(0, &tp->tx_full);
+ if (tp->cur_tx - (volatile unsigned int)tp->dirty_tx < NUM_TX_DESC) {
+ clear_bit(0, &tp->tx_full);
+ netif_unpause_tx_queue(dev);
+ } else
+ netif_stop_tx_queue(dev);
+ } else
+ netif_unpause_tx_queue(dev);
+
+ dev->trans_start = jiffies;
+ if (tp->msg_level & NETIF_MSG_TX_QUEUED)
+ printk(KERN_DEBUG"%s: Queued Tx packet at %p size %d to slot %d.\n",
+ dev->name, skb->data, (int)skb->len, entry);
+
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void rtl8129_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct rtl8129_private *np = (struct rtl8129_private *)dev->priv;
+ struct rtl8129_private *tp = np;
+ int boguscnt = np->max_interrupt_work;
+ long ioaddr = dev->base_addr;
+ int link_changed = 0; /* Grrr, avoid bogus "uninitialized" warning */
+
+#if defined(__i386__) && LINUX_VERSION_CODE < 0x20123
+ /* A lock to prevent simultaneous entry bug on Intel SMP machines. */
+ if (test_and_set_bit(0, (void*)&dev->interrupt)) {
+ printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
+ dev->name);
+ dev->interrupt = 0; /* Avoid halting machine. */
+ return;
+ }
+#endif
+
+ do {
+ int status = inw(ioaddr + IntrStatus);
+ /* Acknowledge all of the current interrupt sources ASAP, but
+ an first get an additional status bit from CSCR. */
+ if (status & RxUnderrun)
+ link_changed = inw(ioaddr+CSCR) & CSCR_LinkChangeBit;
+ outw(status, ioaddr + IntrStatus);
+
+ if (tp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG"%s: interrupt status=%#4.4x new intstat=%#4.4x.\n",
+ dev->name, status, inw(ioaddr + IntrStatus));
+
+ if ((status & (PCIErr|PCSTimeout|RxUnderrun|RxOverflow|RxFIFOOver
+ |TxErr|TxOK|RxErr|RxOK)) == 0)
+ break;
+
+ if (status & (RxOK|RxUnderrun|RxOverflow|RxFIFOOver))/* Rx interrupt */
+ rtl8129_rx(dev);
+
+ if (status & (TxOK | TxErr)) {
+ unsigned int dirty_tx = tp->dirty_tx;
+
+ while (tp->cur_tx - dirty_tx > 0) {
+ int entry = dirty_tx % NUM_TX_DESC;
+ int txstatus = inl(ioaddr + TxStatus0 + entry*4);
+
+ if ( ! (txstatus & (TxStatOK | TxUnderrun | TxAborted)))
+ break; /* It still hasn't been Txed */
+
+ /* Note: TxCarrierLost is always asserted at 100mbps. */
+ if (txstatus & (TxOutOfWindow | TxAborted)) {
+ /* There was an major error, log it. */
+ if (tp->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_NOTICE"%s: Transmit error, Tx status %8.8x.\n",
+ dev->name, txstatus);
+ tp->stats.tx_errors++;
+ if (txstatus&TxAborted) {
+ tp->stats.tx_aborted_errors++;
+ outl(TX_DMA_BURST << 8, ioaddr + TxConfig);
+ }
+ if (txstatus&TxCarrierLost) tp->stats.tx_carrier_errors++;
+ if (txstatus&TxOutOfWindow) tp->stats.tx_window_errors++;
+#ifdef ETHER_STATS
+ if ((txstatus & 0x0f000000) == 0x0f000000)
+ tp->stats.collisions16++;
+#endif
+ } else {
+ if (tp->msg_level & NETIF_MSG_TX_DONE)
+ printk(KERN_DEBUG "%s: Transmit done, Tx status"
+ " %8.8x.\n", dev->name, txstatus);
+ if (txstatus & TxUnderrun) {
+ /* Add 64 to the Tx FIFO threshold. */
+ if (tp->tx_flag < 0x00300000)
+ tp->tx_flag += 0x00020000;
+ tp->stats.tx_fifo_errors++;
+ }
+ tp->stats.collisions += (txstatus >> 24) & 15;
+#if LINUX_VERSION_CODE > 0x20119
+ tp->stats.tx_bytes += txstatus & 0x7ff;
+#endif
+ tp->stats.tx_packets++;
+ }
+
+ /* Free the original skb. */
+ dev_free_skb_irq(tp->tx_skbuff[entry]);
+ tp->tx_skbuff[entry] = 0;
+ if (test_bit(0, &tp->tx_full)) {
+ /* The ring is no longer full, clear tbusy. */
+ clear_bit(0, &tp->tx_full);
+ netif_resume_tx_queue(dev);
+ }
+ dirty_tx++;
+ }
+
+#ifndef final_version
+ if (tp->cur_tx - dirty_tx > NUM_TX_DESC) {
+ printk(KERN_ERR"%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+ dev->name, dirty_tx, tp->cur_tx, (int)tp->tx_full);
+ dirty_tx += NUM_TX_DESC;
+ }
+#endif
+ tp->dirty_tx = dirty_tx;
+ }
+
+ /* Check uncommon events with one test. */
+ if (status & (PCIErr|PCSTimeout |RxUnderrun|RxOverflow|RxFIFOOver
+ |TxErr|RxErr)) {
+ if (status == 0xffff) /* Missing chip! */
+ break;
+ rtl_error(dev, status, link_changed);
+ }
+
+ if (--boguscnt < 0) {
+ printk(KERN_WARNING"%s: Too much work at interrupt, "
+ "IntrStatus=0x%4.4x.\n",
+ dev->name, status);
+ /* Clear all interrupt sources. */
+ outw(0xffff, ioaddr + IntrStatus);
+ break;
+ }
+ } while (1);
+
+ if (tp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG"%s: exiting interrupt, intr_status=%#4.4x.\n",
+ dev->name, inw(ioaddr + IntrStatus));
+
+#if defined(__i386__) && LINUX_VERSION_CODE < 0x20123
+ clear_bit(0, (void*)&dev->interrupt);
+#endif
+ return;
+}
+
+/* The data sheet doesn't describe the Rx ring at all, so I'm guessing at the
+ field alignments and semantics. */
+static int rtl8129_rx(struct net_device *dev)
+{
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ unsigned char *rx_ring = tp->rx_ring;
+ u16 cur_rx = tp->cur_rx;
+
+ if (tp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG"%s: In rtl8129_rx(), current %4.4x BufAddr %4.4x,"
+ " free to %4.4x, Cmd %2.2x.\n",
+ dev->name, cur_rx, inw(ioaddr + RxBufAddr),
+ inw(ioaddr + RxBufPtr), inb(ioaddr + ChipCmd));
+
+ while ((inb(ioaddr + ChipCmd) & RxBufEmpty) == 0) {
+ int ring_offset = cur_rx % tp->rx_buf_len;
+ u32 rx_status = le32_to_cpu(*(u32*)(rx_ring + ring_offset));
+ int rx_size = rx_status >> 16; /* Includes the CRC. */
+
+ if (tp->msg_level & NETIF_MSG_RX_STATUS) {
+ int i;
+ printk(KERN_DEBUG"%s: rtl8129_rx() status %4.4x, size %4.4x,"
+ " cur %4.4x.\n",
+ dev->name, rx_status, rx_size, cur_rx);
+ printk(KERN_DEBUG"%s: Frame contents ", dev->name);
+ for (i = 0; i < 70; i++)
+ printk(" %2.2x", rx_ring[ring_offset + i]);
+ printk(".\n");
+ }
+ if (rx_status & (RxBadSymbol|RxRunt|RxTooLong|RxCRCErr|RxBadAlign)) {
+ if (tp->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG"%s: Ethernet frame had errors,"
+ " status %8.8x.\n", dev->name, rx_status);
+ if (rx_status == 0xffffffff) {
+ printk(KERN_NOTICE"%s: Invalid receive status at ring "
+ "offset %4.4x\n", dev->name, ring_offset);
+ rx_status = 0;
+ }
+ if (rx_status & RxTooLong) {
+ if (tp->msg_level & NETIF_MSG_DRV)
+ printk(KERN_NOTICE"%s: Oversized Ethernet frame, status"
+ " %4.4x!\n",
+ dev->name, rx_status);
+ /* A.C.: The chip hangs here.
+ This should never occur, which means that we are screwed
+ when it does.
+ */
+ }
+ tp->stats.rx_errors++;
+ if (rx_status & (RxBadSymbol|RxBadAlign))
+ tp->stats.rx_frame_errors++;
+ if (rx_status & (RxRunt|RxTooLong)) tp->stats.rx_length_errors++;
+ if (rx_status & RxCRCErr) tp->stats.rx_crc_errors++;
+ /* Reset the receiver, based on RealTek recommendation. (Bug?) */
+ tp->cur_rx = 0;
+ outb(CmdTxEnb, ioaddr + ChipCmd);
+ /* A.C.: Reset the multicast list. */
+ set_rx_mode(dev);
+ outb(CmdRxEnb | CmdTxEnb, ioaddr + ChipCmd);
+ } else {
+ /* Malloc up new buffer, compatible with net-2e. */
+ /* Omit the four octet CRC from the length. */
+ struct sk_buff *skb;
+ int pkt_size = rx_size - 4;
+
+ /* Allocate a common-sized skbuff if we are close. */
+ skb = dev_alloc_skb(1400 < pkt_size && pkt_size < PKT_BUF_SZ-2 ?
+ PKT_BUF_SZ : pkt_size + 2);
+ if (skb == NULL) {
+ printk(KERN_WARNING"%s: Memory squeeze, deferring packet.\n",
+ dev->name);
+ /* We should check that some rx space is free.
+ If not, free one and mark stats->rx_dropped++. */
+ tp->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP fields. */
+ if (ring_offset + rx_size > tp->rx_buf_len) {
+ int semi_count = tp->rx_buf_len - ring_offset - 4;
+ /* This could presumably use two calls to copy_and_sum()? */
+ memcpy(skb_put(skb, semi_count), &rx_ring[ring_offset + 4],
+ semi_count);
+ memcpy(skb_put(skb, pkt_size-semi_count), rx_ring,
+ pkt_size-semi_count);
+ if (tp->msg_level & NETIF_MSG_PKTDATA) {
+ int i;
+ printk(KERN_DEBUG"%s: Frame wrap @%d",
+ dev->name, semi_count);
+ for (i = 0; i < 16; i++)
+ printk(" %2.2x", rx_ring[i]);
+ printk(".\n");
+ memset(rx_ring, 0xcc, 16);
+ }
+ } else {
+ eth_copy_and_sum(skb, &rx_ring[ring_offset + 4],
+ pkt_size, 0);
+ skb_put(skb, pkt_size);
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+#if LINUX_VERSION_CODE > 0x20119
+ tp->stats.rx_bytes += pkt_size;
+#endif
+ tp->stats.rx_packets++;
+ }
+
+ cur_rx = (cur_rx + rx_size + 4 + 3) & ~3;
+ outw(cur_rx - 16, ioaddr + RxBufPtr);
+ }
+ if (tp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG"%s: Done rtl8129_rx(), current %4.4x BufAddr %4.4x,"
+ " free to %4.4x, Cmd %2.2x.\n",
+ dev->name, cur_rx, inw(ioaddr + RxBufAddr),
+ inw(ioaddr + RxBufPtr), inb(ioaddr + ChipCmd));
+ tp->cur_rx = cur_rx;
+ return 0;
+}
+
+/* Error and abnormal or uncommon events handlers. */
+static void rtl_error(struct net_device *dev, int status, int link_changed)
+{
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_NOTICE"%s: Abnormal interrupt, status %8.8x.\n",
+ dev->name, status);
+
+ /* Update the error count. */
+ tp->stats.rx_missed_errors += inl(ioaddr + RxMissed);
+ outl(0, ioaddr + RxMissed);
+
+ if (status & RxUnderrun){
+ /* This might actually be a link change event. */
+ if ((tp->drv_flags & HAS_LNK_CHNG) && link_changed) {
+ /* Really link-change on new chips. */
+ int lpar = inw(ioaddr + NWayLPAR);
+ int duplex = (lpar&0x0100) || (lpar & 0x01C0) == 0x0040
+ || tp->duplex_lock;
+ /* Do not use MII_BMSR as that clears sticky bit. */
+ if (inw(ioaddr + GPPinData) & 0x0004) {
+ netif_link_down(dev);
+ } else
+ netif_link_up(dev);
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Link changed, link partner "
+ "%4.4x new duplex %d.\n",
+ dev->name, lpar, duplex);
+ tp->full_duplex = duplex;
+ /* Only count as errors with no link change. */
+ status &= ~RxUnderrun;
+ } else {
+ /* If this does not work, we will do rtl_hw_start(dev); */
+ outb(CmdTxEnb, ioaddr + ChipCmd);
+ set_rx_mode(dev); /* Reset the multicast list. */
+ outb(CmdRxEnb | CmdTxEnb, ioaddr + ChipCmd);
+
+ tp->stats.rx_errors++;
+ tp->stats.rx_fifo_errors++;
+ }
+ }
+
+ if (status & (RxOverflow | RxErr | RxFIFOOver)) tp->stats.rx_errors++;
+ if (status & (PCSTimeout)) tp->stats.rx_length_errors++;
+ if (status & RxFIFOOver) tp->stats.rx_fifo_errors++;
+ if (status & RxOverflow) {
+ tp->stats.rx_over_errors++;
+ tp->cur_rx = inw(ioaddr + RxBufAddr) % tp->rx_buf_len;
+ outw(tp->cur_rx - 16, ioaddr + RxBufPtr);
+ }
+ if (status & PCIErr) {
+ u32 pci_cmd_status;
+ pci_read_config_dword(tp->pci_dev, PCI_COMMAND, &pci_cmd_status);
+
+ printk(KERN_ERR "%s: PCI Bus error %4.4x.\n",
+ dev->name, pci_cmd_status);
+ }
+}
+
+static int
+rtl8129_close(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+ int i;
+
+ netif_stop_tx_queue(dev);
+
+ if (tp->msg_level & NETIF_MSG_IFDOWN)
+ printk(KERN_DEBUG"%s: Shutting down ethercard, status was 0x%4.4x.\n",
+ dev->name, inw(ioaddr + IntrStatus));
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ outw(0x0000, ioaddr + IntrMask);
+
+ /* Stop the chip's Tx and Rx DMA processes. */
+ outb(0x00, ioaddr + ChipCmd);
+
+ /* Update the error counts. */
+ tp->stats.rx_missed_errors += inl(ioaddr + RxMissed);
+ outl(0, ioaddr + RxMissed);
+
+ del_timer(&tp->timer);
+
+ free_irq(dev->irq, dev);
+
+ for (i = 0; i < NUM_TX_DESC; i++) {
+ if (tp->tx_skbuff[i])
+ dev_free_skb(tp->tx_skbuff[i]);
+ tp->tx_skbuff[i] = 0;
+ }
+ kfree(tp->rx_ring);
+ tp->rx_ring = 0;
+
+ /* Green! Put the chip in low-power mode. */
+ outb(0xC0, ioaddr + Cfg9346);
+ outb(tp->config1 | 0x03, ioaddr + Config1);
+ outb('H', ioaddr + HltClk); /* 'R' would leave the clock running. */
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+/*
+ Handle user-level ioctl() calls.
+ We must use two numeric constants as the key because some clueless person
+ changed value for the symbolic name.
+*/
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct rtl8129_private *np = (struct rtl8129_private *)dev->priv;
+ u16 *data = (u16 *)&rq->ifr_data;
+ u32 *data32 = (void *)&rq->ifr_data;
+
+ switch(cmd) {
+ case 0x8947: case 0x89F0:
+ /* SIOCGMIIPHY: Get the address of the PHY in use. */
+ data[0] = np->phys[0] & 0x3f;
+ /* Fall Through */
+ case 0x8948: case 0x89F1:
+ /* SIOCGMIIREG: Read the specified MII register. */
+ data[3] = mdio_read(dev, data[0], data[1] & 0x1f);
+ return 0;
+ case 0x8949: case 0x89F2:
+ /* SIOCSMIIREG: Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (data[0] == np->phys[0]) {
+ u16 value = data[2];
+ switch (data[1]) {
+ case 0:
+ /* Check for autonegotiation on or reset. */
+ np->medialock = (value & 0x9000) ? 0 : 1;
+ if (np->medialock)
+ np->full_duplex = (value & 0x0100) ? 1 : 0;
+ break;
+ case 4: np->advertising = value; break;
+ }
+ }
+ mdio_write(dev, data[0], data[1] & 0x1f, data[2]);
+ return 0;
+ case SIOCGPARAMS:
+ data32[0] = np->msg_level;
+ data32[1] = np->multicast_filter_limit;
+ data32[2] = np->max_interrupt_work;
+ data32[3] = 0; /* No rx_copybreak, always copy. */
+ return 0;
+ case SIOCSPARAMS:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ np->msg_level = data32[0];
+ np->multicast_filter_limit = data32[1];
+ np->max_interrupt_work = data32[2];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static struct net_device_stats *
+rtl8129_get_stats(struct net_device *dev)
+{
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (netif_running(dev)) {
+ tp->stats.rx_missed_errors += inl(ioaddr + RxMissed);
+ outl(0, ioaddr + RxMissed);
+ }
+
+ return &tp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ This routine is not state sensitive and need not be SMP locked. */
+
+static unsigned const ethernet_polynomial = 0x04c11db7U;
+static inline u32 ether_crc(int length, unsigned char *data)
+{
+ int crc = -1;
+
+ while (--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 0; bit < 8; bit++, current_octet >>= 1)
+ crc = (crc << 1) ^
+ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
+ }
+ return crc;
+}
+
+/* Bits in RxConfig. */
+enum rx_mode_bits {
+ AcceptErr=0x20, AcceptRunt=0x10, AcceptBroadcast=0x08,
+ AcceptMulticast=0x04, AcceptMyPhys=0x02, AcceptAllPhys=0x01,
+};
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u32 mc_filter[2]; /* Multicast hash filter */
+ int i, rx_mode;
+
+ if (tp->msg_level & NETIF_MSG_RXFILTER)
+ printk(KERN_DEBUG"%s: set_rx_mode(%4.4x) done -- Rx config %8.8x.\n",
+ dev->name, dev->flags, (int)inl(ioaddr + RxConfig));
+
+ /* Note: do not reorder, GCC is clever about common statements. */
+ if (dev->flags & IFF_PROMISC) {
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE"%s: Promiscuous mode enabled.\n", dev->name);
+ rx_mode = AcceptBroadcast|AcceptMulticast|AcceptMyPhys|AcceptAllPhys;
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else if ((dev->mc_count > tp->multicast_filter_limit)
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter perfectly -- accept all multicasts. */
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else {
+ struct dev_mc_list *mclist;
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ mc_filter[1] = mc_filter[0] = 0;
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next)
+ set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26, mc_filter);
+ }
+ /* We can safely update without stopping the chip. */
+ outl(tp->rx_config | rx_mode, ioaddr + RxConfig);
+ tp->mc_filter[0] = mc_filter[0];
+ tp->mc_filter[1] = mc_filter[1];
+ outl(mc_filter[0], ioaddr + MAR0 + 0);
+ outl(mc_filter[1], ioaddr + MAR0 + 4);
+ return;
+}
+
+
+static int rtl_pwr_event(void *dev_instance, int event)
+{
+ struct net_device *dev = dev_instance;
+ struct rtl8129_private *np = (struct rtl8129_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk("%s: Handling power event %d.\n", dev->name, event);
+ switch(event) {
+ case DRV_ATTACH:
+ MOD_INC_USE_COUNT;
+ break;
+ case DRV_SUSPEND:
+ netif_device_detach(dev);
+ /* Disable interrupts, stop Tx and Rx. */
+ outw(0x0000, ioaddr + IntrMask);
+ outb(0x00, ioaddr + ChipCmd);
+ /* Update the error counts. */
+ np->stats.rx_missed_errors += inl(ioaddr + RxMissed);
+ outl(0, ioaddr + RxMissed);
+ break;
+ case DRV_RESUME:
+ netif_device_attach(dev);
+ rtl_hw_start(dev);
+ break;
+ case DRV_DETACH: {
+ struct net_device **devp, **next;
+ if (dev->flags & IFF_UP) {
+ dev_close(dev);
+ dev->flags &= ~(IFF_UP|IFF_RUNNING);
+ }
+ unregister_netdev(dev);
+ release_region(dev->base_addr, pci_tbl[np->chip_id].io_size);
+#ifndef USE_IO_OPS
+ iounmap((char *)dev->base_addr);
+#endif
+ for (devp = &root_rtl8129_dev; *devp; devp = next) {
+ next = &((struct rtl8129_private *)(*devp)->priv)->next_module;
+ if (*devp == dev) {
+ *devp = *next;
+ break;
+ }
+ }
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(dev);
+ MOD_DEC_USE_COUNT;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+#ifdef CARDBUS
+
+#include <pcmcia/driver_ops.h>
+
+static dev_node_t *rtl8139_attach(dev_locator_t *loc)
+{
+ struct net_device *dev;
+ u16 dev_id;
+ u32 pciaddr;
+ u8 bus, devfn, irq;
+ long hostaddr;
+ /* Note: the chip index should match the 8139B pci_tbl[] entry. */
+ int chip_idx = 2;
+
+ if (loc->bus != LOC_PCI) return NULL;
+ bus = loc->b.pci.bus; devfn = loc->b.pci.devfn;
+ printk(KERN_DEBUG "rtl8139_attach(bus %d, function %d)\n", bus, devfn);
+#ifdef USE_IO_OPS
+ pcibios_read_config_dword(bus, devfn, PCI_BASE_ADDRESS_0, &pciaddr);
+ hostaddr = pciaddr & PCI_BASE_ADDRESS_IO_MASK;
+#else
+ pcibios_read_config_dword(bus, devfn, PCI_BASE_ADDRESS_1, &pciaddr);
+ hostaddr = (long)ioremap(pciaddr & PCI_BASE_ADDRESS_MEM_MASK,
+ pci_tbl[chip_idx].io_size);
+#endif
+ pcibios_read_config_byte(bus, devfn, PCI_INTERRUPT_LINE, &irq);
+ pcibios_read_config_word(bus, devfn, PCI_DEVICE_ID, &dev_id);
+ if (hostaddr == 0 || irq == 0) {
+ printk(KERN_ERR "The %s interface at %d/%d was not assigned an %s.\n"
+ KERN_ERR " It will not be activated.\n",
+ pci_tbl[chip_idx].name, bus, devfn,
+ hostaddr == 0 ? "address" : "IRQ");
+ return NULL;
+ }
+ dev = rtl8139_probe1(pci_find_slot(bus, devfn), NULL,
+ hostaddr, irq, chip_idx, 0);
+ if (dev) {
+ dev_node_t *node = kmalloc(sizeof(dev_node_t), GFP_KERNEL);
+ strcpy(node->dev_name, dev->name);
+ node->major = node->minor = 0;
+ node->next = NULL;
+ MOD_INC_USE_COUNT;
+ return node;
+ }
+ return NULL;
+}
+
+static void rtl8139_detach(dev_node_t *node)
+{
+ struct net_device **devp, **next;
+ printk(KERN_INFO "rtl8139_detach(%s)\n", node->dev_name);
+ for (devp = &root_rtl8129_dev; *devp; devp = next) {
+ next = &((struct rtl8129_private *)(*devp)->priv)->next_module;
+ if (strcmp((*devp)->name, node->dev_name) == 0) break;
+ }
+ if (*devp) {
+ struct rtl8129_private *np =
+ (struct rtl8129_private *)(*devp)->priv;
+ unregister_netdev(*devp);
+ release_region((*devp)->base_addr, pci_tbl[np->chip_id].io_size);
+#ifndef USE_IO_OPS
+ iounmap((char *)(*devp)->base_addr);
+#endif
+ kfree(*devp);
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ *devp = *next;
+ kfree(node);
+ MOD_DEC_USE_COUNT;
+ }
+}
+
+struct driver_operations realtek_ops = {
+ "realtek_cb",
+ rtl8139_attach, /*rtl8139_suspend*/0, /*rtl8139_resume*/0, rtl8139_detach
+};
+
+#endif /* Cardbus support */
+
+#ifdef MODULE
+int init_module(void)
+{
+ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB);
+#ifdef CARDBUS
+ register_driver(&realtek_ops);
+ return 0;
+#else
+ return pci_drv_register(&rtl8139_drv_id, NULL);
+#endif
+}
+
+void cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+#ifdef CARDBUS
+ unregister_driver(&realtek_ops);
+#else
+ pci_drv_unregister(&rtl8139_drv_id);
+#endif
+
+ while (root_rtl8129_dev) {
+ struct rtl8129_private *np = (void *)(root_rtl8129_dev->priv);
+ unregister_netdev(root_rtl8129_dev);
+ release_region(root_rtl8129_dev->base_addr,
+ pci_tbl[np->chip_id].io_size);
+#ifndef USE_IO_OPS
+ iounmap((char *)(root_rtl8129_dev->base_addr));
+#endif
+ next_dev = np->next_module;
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(root_rtl8129_dev);
+ root_rtl8129_dev = next_dev;
+ }
+}
+
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "make KERNVER=`uname -r` rtl8139.o"
+ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c rtl8139.c"
+ * cardbus-compile-command: "gcc -DCARDBUS -DMODULE -Wall -Wstrict-prototypes -O6 -c rtl8139.c -o realtek_cb.o -I/usr/src/pcmcia/include/"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/seeq8005.c b/linux/src/drivers/net/seeq8005.c
new file mode 100644
index 0000000..4adebde
--- /dev/null
+++ b/linux/src/drivers/net/seeq8005.c
@@ -0,0 +1,760 @@
+/* seeq8005.c: A network driver for linux. */
+/*
+ Based on skeleton.c,
+ Written 1993-94 by Donald Becker.
+ See the skeleton.c file for further copyright information.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ The author may be reached as hamish@zot.apana.org.au
+
+ This file is a network device driver for the SEEQ 8005 chipset and
+ the Linux operating system.
+
+*/
+
+static const char *version =
+ "seeq8005.c:v1.00 8/07/95 Hamish Coleman (hamish@zot.apana.org.au)\n";
+
+/*
+ Sources:
+ SEEQ 8005 databook
+
+ Version history:
+ 1.00 Public release. cosmetic changes (no warnings now)
+ 0.68 Turning per- packet,interrupt debug messages off - testing for release.
+ 0.67 timing problems/bad buffer reads seem to be fixed now
+ 0.63 *!@$ protocol=eth_type_trans -- now packets flow
+ 0.56 Send working
+ 0.48 Receive working
+*/
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/malloc.h>
+#include <linux/string.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/errno.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include "seeq8005.h"
+
+/* First, a few definitions that the brave might change. */
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int seeq8005_portlist[] =
+ { 0x300, 0x320, 0x340, 0x360, 0};
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#ifndef NET_DEBUG
+#define NET_DEBUG 1
+#endif
+static unsigned int net_debug = NET_DEBUG;
+
+/* Information that need to be kept for each board. */
+struct net_local {
+ struct enet_statistics stats;
+ unsigned short receive_ptr; /* What address in packet memory do we expect a recv_pkt_header? */
+ long open_time; /* Useless example local info. */
+};
+
+/* The station (ethernet) address prefix, used for IDing the board. */
+#define SA_ADDR0 0x00
+#define SA_ADDR1 0x80
+#define SA_ADDR2 0x4b
+
+/* Index to functions, as function prototypes. */
+
+extern int seeq8005_probe(struct device *dev);
+
+static int seeq8005_probe1(struct device *dev, int ioaddr);
+static int seeq8005_open(struct device *dev);
+static int seeq8005_send_packet(struct sk_buff *skb, struct device *dev);
+static void seeq8005_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void seeq8005_rx(struct device *dev);
+static int seeq8005_close(struct device *dev);
+static struct enet_statistics *seeq8005_get_stats(struct device *dev);
+static void set_multicast_list(struct device *dev);
+
+/* Example routines you must write ;->. */
+#define tx_done(dev) (inw(SEEQ_STATUS) & SEEQSTAT_TX_ON)
+extern void hardware_send_packet(struct device *dev, char *buf, int length);
+extern void seeq8005_init(struct device *dev, int startp);
+inline void wait_for_buffer(struct device *dev);
+
+
+/* Check for a network adaptor of this type, and return '0' iff one exists.
+ If dev->base_addr == 0, probe all likely locations.
+ If dev->base_addr == 1, always return failure.
+ If dev->base_addr == 2, allocate space for the device and return success
+ (detachable devices only).
+ */
+#ifdef HAVE_DEVLIST
+/* Support for a alternate probe manager, which will eliminate the
+ boilerplate below. */
+struct netdev_entry seeq8005_drv =
+{"seeq8005", seeq8005_probe1, SEEQ8005_IO_EXTENT, seeq8005_portlist};
+#else
+int
+seeq8005_probe(struct device *dev)
+{
+ int i;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return seeq8005_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (i = 0; seeq8005_portlist[i]; i++) {
+ int ioaddr = seeq8005_portlist[i];
+ if (check_region(ioaddr, SEEQ8005_IO_EXTENT))
+ continue;
+ if (seeq8005_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif
+
+/* This is the real probe routine. Linux has a history of friendly device
+ probes on the ISA bus. A good device probes avoids doing writes, and
+ verifies that the correct device exists and functions. */
+
+static int seeq8005_probe1(struct device *dev, int ioaddr)
+{
+ static unsigned version_printed = 0;
+ int i,j;
+ unsigned char SA_prom[32];
+ int old_cfg1;
+ int old_cfg2;
+ int old_stat;
+ int old_dmaar;
+ int old_rear;
+
+ if (net_debug>1)
+ printk("seeq8005: probing at 0x%x\n",ioaddr);
+
+ old_stat = inw(SEEQ_STATUS); /* read status register */
+ if (old_stat == 0xffff)
+ return ENODEV; /* assume that 0xffff == no device */
+ if ( (old_stat & 0x1800) != 0x1800 ) { /* assume that unused bits are 1, as my manual says */
+ if (net_debug>1) {
+ printk("seeq8005: reserved stat bits != 0x1800\n");
+ printk(" == 0x%04x\n",old_stat);
+ }
+ return ENODEV;
+ }
+
+ old_rear = inw(SEEQ_REA);
+ if (old_rear == 0xffff) {
+ outw(0,SEEQ_REA);
+ if (inw(SEEQ_REA) == 0xffff) { /* assume that 0xffff == no device */
+ return ENODEV;
+ }
+ } else if ((old_rear & 0xff00) != 0xff00) { /* assume that unused bits are 1 */
+ if (net_debug>1) {
+ printk("seeq8005: unused rear bits != 0xff00\n");
+ printk(" == 0x%04x\n",old_rear);
+ }
+ return ENODEV;
+ }
+
+ old_cfg2 = inw(SEEQ_CFG2); /* read CFG2 register */
+ old_cfg1 = inw(SEEQ_CFG1);
+ old_dmaar = inw(SEEQ_DMAAR);
+
+ if (net_debug>4) {
+ printk("seeq8005: stat = 0x%04x\n",old_stat);
+ printk("seeq8005: cfg1 = 0x%04x\n",old_cfg1);
+ printk("seeq8005: cfg2 = 0x%04x\n",old_cfg2);
+ printk("seeq8005: raer = 0x%04x\n",old_rear);
+ printk("seeq8005: dmaar= 0x%04x\n",old_dmaar);
+ }
+
+ outw( SEEQCMD_FIFO_WRITE | SEEQCMD_SET_ALL_OFF, SEEQ_CMD); /* setup for reading PROM */
+ outw( 0, SEEQ_DMAAR); /* set starting PROM address */
+ outw( SEEQCFG1_BUFFER_PROM, SEEQ_CFG1); /* set buffer to look at PROM */
+
+
+ j=0;
+ for(i=0; i <32; i++) {
+ j+= SA_prom[i] = inw(SEEQ_BUFFER) & 0xff;
+ }
+
+#if 0
+ /* untested because I only have the one card */
+ if ( (j&0xff) != 0 ) { /* checksum appears to be 8bit = 0 */
+ if (net_debug>1) { /* check this before deciding that we have a card */
+ printk("seeq8005: prom sum error\n");
+ }
+ outw( old_stat, SEEQ_STATUS);
+ outw( old_dmaar, SEEQ_DMAAR);
+ outw( old_cfg1, SEEQ_CFG1);
+ return ENODEV;
+ }
+#endif
+
+ outw( SEEQCFG2_RESET, SEEQ_CFG2); /* reset the card */
+ SLOW_DOWN_IO; /* have to wait 4us after a reset - should be fixed */
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+ outw( SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
+
+ if (net_debug) {
+ printk("seeq8005: prom sum = 0x%08x\n",j);
+ for(j=0; j<32; j+=16) {
+ printk("seeq8005: prom %02x: ",j);
+ for(i=0;i<16;i++) {
+ printk("%02x ",SA_prom[j|i]);
+ }
+ printk(" ");
+ for(i=0;i<16;i++) {
+ if ((SA_prom[j|i]>31)&&(SA_prom[j|i]<127)) {
+ printk("%c", SA_prom[j|i]);
+ } else {
+ printk(" ");
+ }
+ }
+ printk("\n");
+ }
+ }
+
+#if 0
+ /*
+ * testing the packet buffer memory doesn't work yet
+ * but all other buffer accesses do
+ * - fixing is not a priority
+ */
+ if (net_debug>1) { /* test packet buffer memory */
+ printk("seeq8005: testing packet buffer ... ");
+ outw( SEEQCFG1_BUFFER_BUFFER, SEEQ_CFG1);
+ outw( SEEQCMD_FIFO_WRITE | SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
+ outw( 0 , SEEQ_DMAAR);
+ for(i=0;i<32768;i++) {
+ outw(0x5a5a, SEEQ_BUFFER);
+ }
+ j=jiffies+HZ;
+ while ( ((inw(SEEQ_STATUS) & SEEQSTAT_FIFO_EMPTY) != SEEQSTAT_FIFO_EMPTY) && jiffies < j )
+ mb();
+ outw( 0 , SEEQ_DMAAR);
+ while ( ((inw(SEEQ_STATUS) & SEEQSTAT_WINDOW_INT) != SEEQSTAT_WINDOW_INT) && jiffies < j+HZ)
+ mb();
+ if ( (inw(SEEQ_STATUS) & SEEQSTAT_WINDOW_INT) == SEEQSTAT_WINDOW_INT)
+ outw( SEEQCMD_WINDOW_INT_ACK | (inw(SEEQ_STATUS)& SEEQCMD_INT_MASK), SEEQ_CMD);
+ outw( SEEQCMD_FIFO_READ | SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
+ j=0;
+ for(i=0;i<32768;i++) {
+ if (inw(SEEQ_BUFFER) != 0x5a5a)
+ j++;
+ }
+ if (j) {
+ printk("%i\n",j);
+ } else {
+ printk("ok.\n");
+ }
+ }
+#endif
+
+ /* Allocate a new 'dev' if needed. */
+ if (dev == NULL)
+ dev = init_etherdev(0, sizeof(struct net_local));
+
+ if (net_debug && version_printed++ == 0)
+ printk("%s", version);
+
+ printk("%s: %s found at %#3x, ", dev->name, "seeq8005", ioaddr);
+
+ /* Fill in the 'dev' fields. */
+ dev->base_addr = ioaddr;
+
+ /* Retrieve and print the ethernet address. */
+ for (i = 0; i < 6; i++)
+ printk(" %2.2x", dev->dev_addr[i] = SA_prom[i+6]);
+
+ if (dev->irq == 0xff)
+ ; /* Do nothing: a user-level program will set it. */
+ else if (dev->irq < 2) { /* "Auto-IRQ" */
+ autoirq_setup(0);
+
+ outw( SEEQCMD_RX_INT_EN | SEEQCMD_SET_RX_ON | SEEQCMD_SET_RX_OFF, SEEQ_CMD );
+
+ dev->irq = autoirq_report(0);
+
+ if (net_debug >= 2)
+ printk(" autoirq is %d\n", dev->irq);
+ } else if (dev->irq == 2)
+ /* Fixup for users that don't know that IRQ 2 is really IRQ 9,
+ * or don't know which one to set.
+ */
+ dev->irq = 9;
+
+#if 0
+ {
+ int irqval = request_irq(dev->irq, &seeq8005_interrupt, 0, "seeq8005", NULL);
+ if (irqval) {
+ printk ("%s: unable to get IRQ %d (irqval=%d).\n", dev->name,
+ dev->irq, irqval);
+ return EAGAIN;
+ }
+ }
+#endif
+
+ /* Grab the region so we can find another board if autoIRQ fails. */
+ request_region(ioaddr, SEEQ8005_IO_EXTENT,"seeq8005");
+
+ /* Initialize the device structure. */
+ dev->priv = kmalloc(sizeof(struct net_local), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct net_local));
+
+ dev->open = seeq8005_open;
+ dev->stop = seeq8005_close;
+ dev->hard_start_xmit = seeq8005_send_packet;
+ dev->get_stats = seeq8005_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ /* Fill in the fields of the device structure with ethernet values. */
+ ether_setup(dev);
+
+ dev->flags &= ~IFF_MULTICAST;
+
+ return 0;
+}
+
+
+/* Open/initialize the board. This is called (in the current kernel)
+ sometime after booting when the 'ifconfig' program is run.
+
+ This routine should set everything up anew at each open, even
+ registers that "should" only need to be set once at boot, so that
+ there is non-reboot way to recover if something goes wrong.
+ */
+static int
+seeq8005_open(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+
+ {
+ int irqval = request_irq(dev->irq, &seeq8005_interrupt, 0, "seeq8005", NULL);
+ if (irqval) {
+ printk ("%s: unable to get IRQ %d (irqval=%d).\n", dev->name,
+ dev->irq, irqval);
+ return EAGAIN;
+ }
+ }
+ irq2dev_map[dev->irq] = dev;
+
+ /* Reset the hardware here. Don't forget to set the station address. */
+ seeq8005_init(dev, 1);
+
+ lp->open_time = jiffies;
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+ return 0;
+}
+
+static int
+seeq8005_send_packet(struct sk_buff *skb, struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if (dev->tbusy) {
+ /* If we get here, some higher level has decided we are broken.
+ There should really be a "kick me" function call instead. */
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 5)
+ return 1;
+ printk("%s: transmit timed out, %s?\n", dev->name,
+ tx_done(dev) ? "IRQ conflict" : "network cable problem");
+ /* Try to restart the adaptor. */
+ seeq8005_init(dev, 1);
+ dev->tbusy=0;
+ dev->trans_start = jiffies;
+ }
+
+ /* If some higher layer thinks we've missed an tx-done interrupt
+ we are passed NULL. Caution: dev_tint() handles the cli()/sti()
+ itself. */
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ else {
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned char *buf = skb->data;
+
+ hardware_send_packet(dev, buf, length);
+ dev->trans_start = jiffies;
+ }
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ /* You might need to clean up and record Tx statistics here. */
+
+ return 0;
+}
+
+/* The typical workload of the driver:
+ Handle the network interface interrupts. */
+static void
+seeq8005_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ struct net_local *lp;
+ int ioaddr, status, boguscount = 0;
+
+ if (dev == NULL) {
+ printk ("net_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ if (dev->interrupt)
+ printk ("%s: Re-entering the interrupt handler.\n", dev->name);
+ dev->interrupt = 1;
+
+ ioaddr = dev->base_addr;
+ lp = (struct net_local *)dev->priv;
+
+ status = inw(SEEQ_STATUS);
+ do {
+ if (net_debug >2) {
+ printk("%s: int, status=0x%04x\n",dev->name,status);
+ }
+
+ if (status & SEEQSTAT_WINDOW_INT) {
+ outw( SEEQCMD_WINDOW_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+ if (net_debug) {
+ printk("%s: window int!\n",dev->name);
+ }
+ }
+ if (status & SEEQSTAT_TX_INT) {
+ outw( SEEQCMD_TX_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+ lp->stats.tx_packets++;
+ dev->tbusy = 0;
+ mark_bh(NET_BH); /* Inform upper layers. */
+ }
+ if (status & SEEQSTAT_RX_INT) {
+ /* Got a packet(s). */
+ seeq8005_rx(dev);
+ }
+ status = inw(SEEQ_STATUS);
+ } while ( (++boguscount < 10) && (status & SEEQSTAT_ANY_INT)) ;
+
+ if(net_debug>2) {
+ printk("%s: eoi\n",dev->name);
+ }
+ dev->interrupt = 0;
+ return;
+}
+
+/* We have a good packet(s), get it/them out of the buffers. */
+static void
+seeq8005_rx(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int boguscount = 10;
+ int pkt_hdr;
+ int ioaddr = dev->base_addr;
+
+ do {
+ int next_packet;
+ int pkt_len;
+ int i;
+ int status;
+
+ status = inw(SEEQ_STATUS);
+ outw( lp->receive_ptr, SEEQ_DMAAR);
+ outw(SEEQCMD_FIFO_READ | SEEQCMD_RX_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+ wait_for_buffer(dev);
+ next_packet = ntohs(inw(SEEQ_BUFFER));
+ pkt_hdr = inw(SEEQ_BUFFER);
+
+ if (net_debug>2) {
+ printk("%s: 0x%04x recv next=0x%04x, hdr=0x%04x\n",dev->name,lp->receive_ptr,next_packet,pkt_hdr);
+ }
+
+ if ((next_packet == 0) || ((pkt_hdr & SEEQPKTH_CHAIN)==0)) { /* Read all the frames? */
+ return; /* Done for now */
+ }
+
+ if ((pkt_hdr & SEEQPKTS_DONE)==0)
+ break;
+
+ if (next_packet < lp->receive_ptr) {
+ pkt_len = (next_packet + 0x10000 - ((DEFAULT_TEA+1)<<8)) - lp->receive_ptr - 4;
+ } else {
+ pkt_len = next_packet - lp->receive_ptr - 4;
+ }
+
+ if (next_packet < ((DEFAULT_TEA+1)<<8)) { /* is the next_packet address sane? */
+ printk("%s: recv packet ring corrupt, resetting board\n",dev->name);
+ seeq8005_init(dev,1);
+ return;
+ }
+
+ lp->receive_ptr = next_packet;
+
+ if (net_debug>2) {
+ printk("%s: recv len=0x%04x\n",dev->name,pkt_len);
+ }
+
+ if (pkt_hdr & SEEQPKTS_ANY_ERROR) { /* There was an error. */
+ lp->stats.rx_errors++;
+ if (pkt_hdr & SEEQPKTS_SHORT) lp->stats.rx_frame_errors++;
+ if (pkt_hdr & SEEQPKTS_DRIB) lp->stats.rx_frame_errors++;
+ if (pkt_hdr & SEEQPKTS_OVERSIZE) lp->stats.rx_over_errors++;
+ if (pkt_hdr & SEEQPKTS_CRC_ERR) lp->stats.rx_crc_errors++;
+ /* skip over this packet */
+ outw( SEEQCMD_FIFO_WRITE | SEEQCMD_DMA_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+ outw( (lp->receive_ptr & 0xff00)>>8, SEEQ_REA);
+ } else {
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+ unsigned char *buf;
+
+ skb = dev_alloc_skb(pkt_len);
+ if (skb == NULL) {
+ printk("%s: Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* align data on 16 byte */
+ buf = skb_put(skb,pkt_len);
+
+ insw(SEEQ_BUFFER, buf, (pkt_len + 1) >> 1);
+
+ if (net_debug>2) {
+ char * p = buf;
+ printk("%s: recv ",dev->name);
+ for(i=0;i<14;i++) {
+ printk("%02x ",*(p++)&0xff);
+ }
+ printk("\n");
+ }
+
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+ } while ((--boguscount) && (pkt_hdr & SEEQPKTH_CHAIN));
+
+ /* If any worth-while packets have been received, netif_rx()
+ has done a mark_bh(NET_BH) for us and will work on them
+ when we get to the bottom-half routine. */
+ return;
+}
+
+/* The inverse routine to net_open(). */
+static int
+seeq8005_close(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+
+ lp->open_time = 0;
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ /* Flush the Tx and disable Rx here. */
+ outw( SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
+
+ free_irq(dev->irq, NULL);
+
+ irq2dev_map[dev->irq] = 0;
+
+ /* Update the statistics here. */
+
+ return 0;
+
+}
+
+/* Get the current statistics. This may be called with the card open or
+ closed. */
+static struct enet_statistics *
+seeq8005_get_stats(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+
+ return &lp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ num_addrs == -1 Promiscuous mode, receive all packets
+ num_addrs == 0 Normal mode, clear multicast list
+ num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ best-effort filtering.
+ */
+static void
+set_multicast_list(struct device *dev)
+{
+/*
+ * I _could_ do up to 6 addresses here, but won't (yet?)
+ */
+
+#if 0
+ int ioaddr = dev->base_addr;
+/*
+ * hmm, not even sure if my matching works _anyway_ - seem to be receiving
+ * _everything_ . . .
+ */
+
+ if (num_addrs) { /* Enable promiscuous mode */
+ outw( (inw(SEEQ_CFG1) & ~SEEQCFG1_MATCH_MASK)| SEEQCFG1_MATCH_ALL, SEEQ_CFG1);
+ dev->flags|=IFF_PROMISC;
+ } else { /* Disable promiscuous mode, use normal mode */
+ outw( (inw(SEEQ_CFG1) & ~SEEQCFG1_MATCH_MASK)| SEEQCFG1_MATCH_BROAD, SEEQ_CFG1);
+ }
+#endif
+}
+
+void seeq8005_init(struct device *dev, int startp)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int i;
+
+ outw(SEEQCFG2_RESET, SEEQ_CFG2); /* reset device */
+ SLOW_DOWN_IO; /* have to wait 4us after a reset - should be fixed */
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+ SLOW_DOWN_IO;
+
+ outw( SEEQCMD_FIFO_WRITE | SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
+ outw( 0, SEEQ_DMAAR); /* load start address into both low and high byte */
+/* wait_for_buffer(dev); */ /* I think that you only need a wait for memory buffer */
+ outw( SEEQCFG1_BUFFER_MAC0, SEEQ_CFG1);
+
+ for(i=0;i<6;i++) { /* set Station address */
+ outb(dev->dev_addr[i], SEEQ_BUFFER);
+ SLOW_DOWN_IO;
+ }
+
+ outw( SEEQCFG1_BUFFER_TEA, SEEQ_CFG1); /* set xmit end area pointer to 16K */
+ outb( DEFAULT_TEA, SEEQ_BUFFER); /* this gives us 16K of send buffer and 48K of recv buffer */
+
+ lp->receive_ptr = (DEFAULT_TEA+1)<<8; /* so we can find our packet_header */
+ outw( lp->receive_ptr, SEEQ_RPR); /* Receive Pointer Register is set to recv buffer memory */
+
+ outw( 0x00ff, SEEQ_REA); /* Receive Area End */
+
+ if (net_debug>4) {
+ printk("%s: SA0 = ",dev->name);
+
+ outw( SEEQCMD_FIFO_READ | SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
+ outw( 0, SEEQ_DMAAR);
+ outw( SEEQCFG1_BUFFER_MAC0, SEEQ_CFG1);
+
+ for(i=0;i<6;i++) {
+ printk("%02x ",inb(SEEQ_BUFFER));
+ }
+ printk("\n");
+ }
+
+ outw( SEEQCFG1_MAC0_EN | SEEQCFG1_MATCH_BROAD | SEEQCFG1_BUFFER_BUFFER, SEEQ_CFG1);
+ outw( SEEQCFG2_AUTO_REA | SEEQCFG2_CTRLO, SEEQ_CFG2);
+ outw( SEEQCMD_SET_RX_ON | SEEQCMD_TX_INT_EN | SEEQCMD_RX_INT_EN, SEEQ_CMD);
+
+ if (net_debug>4) {
+ int old_cfg1;
+ old_cfg1 = inw(SEEQ_CFG1);
+ printk("%s: stat = 0x%04x\n",dev->name,inw(SEEQ_STATUS));
+ printk("%s: cfg1 = 0x%04x\n",dev->name,old_cfg1);
+ printk("%s: cfg2 = 0x%04x\n",dev->name,inw(SEEQ_CFG2));
+ printk("%s: raer = 0x%04x\n",dev->name,inw(SEEQ_REA));
+ printk("%s: dmaar= 0x%04x\n",dev->name,inw(SEEQ_DMAAR));
+
+ }
+}
+
+
+void hardware_send_packet(struct device * dev, char *buf, int length)
+{
+ int ioaddr = dev->base_addr;
+ int status = inw(SEEQ_STATUS);
+ int transmit_ptr = 0;
+ int tmp;
+
+ if (net_debug>4) {
+ printk("%s: send 0x%04x\n",dev->name,length);
+ }
+
+ /* Set FIFO to writemode and set packet-buffer address */
+ outw( SEEQCMD_FIFO_WRITE | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+ outw( transmit_ptr, SEEQ_DMAAR);
+
+ /* output SEEQ Packet header barfage */
+ outw( htons(length + 4), SEEQ_BUFFER);
+ outw( SEEQPKTH_XMIT | SEEQPKTH_DATA_FOLLOWS | SEEQPKTH_XMIT_INT_EN, SEEQ_BUFFER );
+
+ /* blat the buffer */
+ outsw( SEEQ_BUFFER, buf, (length +1) >> 1);
+ /* paranoia !! */
+ outw( 0, SEEQ_BUFFER);
+ outw( 0, SEEQ_BUFFER);
+
+ /* set address of start of transmit chain */
+ outw( transmit_ptr, SEEQ_TPR);
+
+ /* drain FIFO */
+ tmp = jiffies;
+ while ( (((status=inw(SEEQ_STATUS)) & SEEQSTAT_FIFO_EMPTY) == 0) && (jiffies < tmp + HZ))
+ mb();
+
+ /* doit ! */
+ outw( SEEQCMD_WINDOW_INT_ACK | SEEQCMD_SET_TX_ON | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+
+}
+
+
+/*
+ * wait_for_buffer
+ *
+ * This routine waits for the SEEQ chip to assert that the FIFO is ready
+ * by checking for a window interrupt, and then clearing it
+ */
+inline void wait_for_buffer(struct device * dev)
+{
+ int ioaddr = dev->base_addr;
+ int tmp;
+ int status;
+
+ tmp = jiffies + HZ;
+ while ( ( ((status=inw(SEEQ_STATUS)) & SEEQSTAT_WINDOW_INT) != SEEQSTAT_WINDOW_INT) && jiffies < tmp)
+ mb();
+
+ if ( (status & SEEQSTAT_WINDOW_INT) == SEEQSTAT_WINDOW_INT)
+ outw( SEEQCMD_WINDOW_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
+}
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c skeleton.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/seeq8005.h b/linux/src/drivers/net/seeq8005.h
new file mode 100644
index 0000000..809ba6d
--- /dev/null
+++ b/linux/src/drivers/net/seeq8005.h
@@ -0,0 +1,156 @@
+/*
+ * defines, etc for the seeq8005
+ */
+
+/*
+ * This file is distributed under GPL.
+ *
+ * This style and layout of this file is also copied
+ * from many of the other linux network device drivers.
+ */
+
+/* The number of low I/O ports used by the ethercard. */
+#define SEEQ8005_IO_EXTENT 16
+
+#define SEEQ_B (ioaddr)
+
+#define SEEQ_CMD (SEEQ_B) /* Write only */
+#define SEEQ_STATUS (SEEQ_B) /* Read only */
+#define SEEQ_CFG1 (SEEQ_B + 2)
+#define SEEQ_CFG2 (SEEQ_B + 4)
+#define SEEQ_REA (SEEQ_B + 6) /* Receive End Area Register */
+#define SEEQ_RPR (SEEQ_B + 10) /* Receive Pointer Register */
+#define SEEQ_TPR (SEEQ_B + 12) /* Transmit Pointer Register */
+#define SEEQ_DMAAR (SEEQ_B + 14) /* DMA Address Register */
+#define SEEQ_BUFFER (SEEQ_B + 8) /* Buffer Window Register */
+
+#define DEFAULT_TEA (0x3f)
+
+#define SEEQCMD_DMA_INT_EN (0x0001) /* DMA Interrupt Enable */
+#define SEEQCMD_RX_INT_EN (0x0002) /* Receive Interrupt Enable */
+#define SEEQCMD_TX_INT_EN (0x0004) /* Transmit Interrupt Enable */
+#define SEEQCMD_WINDOW_INT_EN (0x0008) /* What the hell is this for?? */
+#define SEEQCMD_INT_MASK (0x000f)
+
+#define SEEQCMD_DMA_INT_ACK (0x0010) /* DMA ack */
+#define SEEQCMD_RX_INT_ACK (0x0020)
+#define SEEQCMD_TX_INT_ACK (0x0040)
+#define SEEQCMD_WINDOW_INT_ACK (0x0080)
+#define SEEQCMD_ACK_ALL (0x00f0)
+
+#define SEEQCMD_SET_DMA_ON (0x0100) /* Enables DMA Request logic */
+#define SEEQCMD_SET_RX_ON (0x0200) /* Enables Packet RX */
+#define SEEQCMD_SET_TX_ON (0x0400) /* Starts TX run */
+#define SEEQCMD_SET_DMA_OFF (0x0800)
+#define SEEQCMD_SET_RX_OFF (0x1000)
+#define SEEQCMD_SET_TX_OFF (0x2000)
+#define SEEQCMD_SET_ALL_OFF (0x3800) /* set all logic off */
+
+#define SEEQCMD_FIFO_READ (0x4000) /* Set FIFO to read mode (read from Buffer) */
+#define SEEQCMD_FIFO_WRITE (0x8000) /* Set FIFO to write mode */
+
+#define SEEQSTAT_DMA_INT_EN (0x0001) /* Status of interrupt enable */
+#define SEEQSTAT_RX_INT_EN (0x0002)
+#define SEEQSTAT_TX_INT_EN (0x0004)
+#define SEEQSTAT_WINDOW_INT_EN (0x0008)
+
+#define SEEQSTAT_DMA_INT (0x0010) /* Interrupt flagged */
+#define SEEQSTAT_RX_INT (0x0020)
+#define SEEQSTAT_TX_INT (0x0040)
+#define SEEQSTAT_WINDOW_INT (0x0080)
+#define SEEQSTAT_ANY_INT (0x00f0)
+
+#define SEEQSTAT_DMA_ON (0x0100) /* DMA logic on */
+#define SEEQSTAT_RX_ON (0x0200) /* Packet RX on */
+#define SEEQSTAT_TX_ON (0x0400) /* TX running */
+
+#define SEEQSTAT_FIFO_FULL (0x2000)
+#define SEEQSTAT_FIFO_EMPTY (0x4000)
+#define SEEQSTAT_FIFO_DIR (0x8000) /* 1=read, 0=write */
+
+#define SEEQCFG1_BUFFER_MASK (0x000f) /* define what maps into the BUFFER register */
+#define SEEQCFG1_BUFFER_MAC0 (0x0000) /* MAC station addresses 0-5 */
+#define SEEQCFG1_BUFFER_MAC1 (0x0001)
+#define SEEQCFG1_BUFFER_MAC2 (0x0002)
+#define SEEQCFG1_BUFFER_MAC3 (0x0003)
+#define SEEQCFG1_BUFFER_MAC4 (0x0004)
+#define SEEQCFG1_BUFFER_MAC5 (0x0005)
+#define SEEQCFG1_BUFFER_PROM (0x0006) /* The Address/CFG PROM */
+#define SEEQCFG1_BUFFER_TEA (0x0007) /* Transmit end area */
+#define SEEQCFG1_BUFFER_BUFFER (0x0008) /* Packet buffer memory */
+#define SEEQCFG1_BUFFER_INT_VEC (0x0009) /* Interrupt Vector */
+
+#define SEEQCFG1_DMA_INTVL_MASK (0x0030)
+#define SEEQCFG1_DMA_CONT (0x0000)
+#define SEEQCFG1_DMA_800ns (0x0010)
+#define SEEQCFG1_DMA_1600ns (0x0020)
+#define SEEQCFG1_DMA_3200ns (0x0030)
+
+#define SEEQCFG1_DMA_LEN_MASK (0x00c0)
+#define SEEQCFG1_DMA_LEN1 (0x0000)
+#define SEEQCFG1_DMA_LEN2 (0x0040)
+#define SEEQCFG1_DMA_LEN4 (0x0080)
+#define SEEQCFG1_DMA_LEN8 (0x00c0)
+
+#define SEEQCFG1_MAC_MASK (0x3f00) /* Dis/enable bits for MAC addresses */
+#define SEEQCFG1_MAC0_EN (0x0100)
+#define SEEQCFG1_MAC1_EN (0x0200)
+#define SEEQCFG1_MAC2_EN (0x0400)
+#define SEEQCFG1_MAC3_EN (0x0800)
+#define SEEQCFG1_MAC4_EN (0x1000)
+#define SEEQCFG1_MAC5_EN (0x2000)
+
+#define SEEQCFG1_MATCH_MASK (0xc000) /* Packet matching logic cfg bits */
+#define SEEQCFG1_MATCH_SPECIFIC (0x0000) /* only matching MAC addresses */
+#define SEEQCFG1_MATCH_BROAD (0x4000) /* matching and broadcast addresses */
+#define SEEQCFG1_MATCH_MULTI (0x8000) /* matching, broadcast and multicast */
+#define SEEQCFG1_MATCH_ALL (0xc000) /* Promiscuous mode */
+
+#define SEEQCFG1_DEFAULT (SEEQCFG1_BUFFER_BUFFER | SEEQCFG1_MAC0_EN | SEEQCFG1_MATCH_BROAD)
+
+#define SEEQCFG2_BYTE_SWAP (0x0001) /* 0=Intel byte-order */
+#define SEEQCFG2_AUTO_REA (0x0002) /* if set, Receive End Area will be updated when reading from Buffer */
+
+#define SEEQCFG2_CRC_ERR_EN (0x0008) /* enables receiving of packets with CRC errors */
+#define SEEQCFG2_DRIBBLE_EN (0x0010) /* enables receiving of non-aligned packets */
+#define SEEQCFG2_SHORT_EN (0x0020) /* enables receiving of short packets */
+
+#define SEEQCFG2_SLOTSEL (0x0040) /* 0= standard IEEE802.3, 1= smaller,faster, non-standard */
+#define SEEQCFG2_NO_PREAM (0x0080) /* 1= user supplies Xmit preamble bytes */
+#define SEEQCFG2_ADDR_LEN (0x0100) /* 1= 2byte addresses */
+#define SEEQCFG2_REC_CRC (0x0200) /* 0= received packets will have CRC stripped from them */
+#define SEEQCFG2_XMIT_NO_CRC (0x0400) /* don't xmit CRC with each packet (user supplies it) */
+#define SEEQCFG2_LOOPBACK (0x0800)
+#define SEEQCFG2_CTRLO (0x1000)
+#define SEEQCFG2_RESET (0x8000) /* software Hard-reset bit */
+
+struct seeq_pkt_hdr {
+ unsigned short next; /* address of next packet header */
+ unsigned char babble_int:1, /* enable int on >1514 byte packet */
+ coll_int:1, /* enable int on collision */
+ coll_16_int:1, /* enable int on >15 collision */
+ xmit_int:1, /* enable int on success (or xmit with <15 collision) */
+ unused:1,
+ data_follows:1, /* if not set, process this as a header and pointer only */
+ chain_cont:1, /* if set, more headers in chain only cmd bit valid in recv header */
+ xmit_recv:1; /* if set, a xmit packet, else a receive packet.*/
+ unsigned char status;
+};
+
+#define SEEQPKTH_BAB_INT_EN (0x01) /* xmit only */
+#define SEEQPKTH_COL_INT_EN (0x02) /* xmit only */
+#define SEEQPKTH_COL16_INT_EN (0x04) /* xmit only */
+#define SEEQPKTH_XMIT_INT_EN (0x08) /* xmit only */
+#define SEEQPKTH_DATA_FOLLOWS (0x20) /* supposedly in xmit only */
+#define SEEQPKTH_CHAIN (0x40) /* more headers follow */
+#define SEEQPKTH_XMIT (0x80)
+
+#define SEEQPKTS_BABBLE (0x0100) /* xmit only */
+#define SEEQPKTS_OVERSIZE (0x0100) /* recv only */
+#define SEEQPKTS_COLLISION (0x0200) /* xmit only */
+#define SEEQPKTS_CRC_ERR (0x0200) /* recv only */
+#define SEEQPKTS_COLL16 (0x0400) /* xmit only */
+#define SEEQPKTS_DRIB (0x0400) /* recv only */
+#define SEEQPKTS_SHORT (0x0800) /* recv only */
+#define SEEQPKTS_DONE (0x8000)
+#define SEEQPKTS_ANY_ERROR (0x0f00)
diff --git a/linux/src/drivers/net/sis900.c b/linux/src/drivers/net/sis900.c
new file mode 100644
index 0000000..d9e5f63
--- /dev/null
+++ b/linux/src/drivers/net/sis900.c
@@ -0,0 +1,1803 @@
+/* sis900.c: A SiS 900/7016 PCI Fast Ethernet driver for Linux.
+ Copyright 1999 Silicon Integrated System Corporation
+ Revision: 1.06.11 Apr. 30 2002
+
+ Modified from the driver which is originally written by Donald Becker.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License (GPL), incorporated herein by reference.
+ Drivers based on this skeleton fall under the GPL and must retain
+ the authorship (implicit copyright) notice.
+
+ References:
+ SiS 7016 Fast Ethernet PCI Bus 10/100 Mbps LAN Controller with OnNow Support,
+ preliminary Rev. 1.0 Jan. 14, 1998
+ SiS 900 Fast Ethernet PCI Bus 10/100 Mbps LAN Single Chip with OnNow Support,
+ preliminary Rev. 1.0 Nov. 10, 1998
+ SiS 7014 Single Chip 100BASE-TX/10BASE-T Physical Layer Solution,
+ preliminary Rev. 1.0 Jan. 18, 1998
+ http://www.sis.com.tw/support/databook.htm
+
+ Rev 1.06.11 Apr. 25 2002 Mufasa Yang (mufasa@sis.com.tw) added SiS962 support
+ Rev 1.06.10 Dec. 18 2001 Hui-Fen Hsu workaround for EDB & RTL8201 PHY
+ Rev 1.06.09 Sep. 28 2001 Hui-Fen Hsu update for 630ET & workaround for ICS1893 PHY
+ Rev 1.06.08 Mar. 2 2001 Hui-Fen Hsu (hfhsu@sis.com.tw) some bug fix & 635M/B support
+ Rev 1.06.07 Jan. 8 2001 Lei-Chun Chang added RTL8201 PHY support
+ Rev 1.06.06 Sep. 6 2000 Lei-Chun Chang added ICS1893 PHY support
+ Rev 1.06.05 Aug. 22 2000 Lei-Chun Chang (lcchang@sis.com.tw) modified 630E equalier workaroung rule
+ Rev 1.06.03 Dec. 23 1999 Ollie Lho Third release
+ Rev 1.06.02 Nov. 23 1999 Ollie Lho bug in mac probing fixed
+ Rev 1.06.01 Nov. 16 1999 Ollie Lho CRC calculation provide by Joseph Zbiciak (im14u2c@primenet.com)
+ Rev 1.06 Nov. 4 1999 Ollie Lho (ollie@sis.com.tw) Second release
+ Rev 1.05.05 Oct. 29 1999 Ollie Lho (ollie@sis.com.tw) Single buffer Tx/Rx
+ Chin-Shan Li (lcs@sis.com.tw) Added AMD Am79c901 HomePNA PHY support
+ Rev 1.05 Aug. 7 1999 Jim Huang (cmhuang@sis.com.tw) Initial release
+*/
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/bios32.h>
+#include <linux/compatmac.h>
+
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <linux/delay.h>
+#include <asm/types.h>
+#include "sis900.h"
+
+
+#if LINUX_VERSION_CODE < 0x20159
+#define dev_free_skb(skb) dev_kfree_skb (skb, FREE_WRITE);
+#else /* Grrr, incompatible changes should change the name. */
+#define dev_free_skb(skb) dev_kfree_skb(skb);
+#endif
+
+static const char *version =
+"sis900.c: modified v1.06.11 4/30/2002";
+
+static int max_interrupt_work = 20;
+static int multicast_filter_limit = 128;
+
+#define sis900_debug debug
+static int sis900_debug = 0;
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (4*HZ)
+
+enum pci_flags_bit {
+ PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
+ PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
+};
+
+struct mac_chip_info {
+ const char *name;
+ u16 vendor_id, device_id, flags;
+ int io_size;
+ struct device *(*probe) (struct mac_chip_info *mac, long ioaddr, int irq,
+ int pci_index, unsigned char pci_device_fn, unsigned char pci_bus, struct device * net_dev);
+};
+static struct device * sis900_mac_probe (struct mac_chip_info * mac, long ioaddr, int irq,
+ int pci_index, unsigned char pci_device_fn,
+ unsigned char pci_bus, struct device * net_dev);
+static struct mac_chip_info mac_chip_table[] = {
+ { "SiS 900 PCI Fast Ethernet", PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_900,
+ PCI_COMMAND_IO|PCI_COMMAND_MASTER, SIS900_TOTAL_SIZE, sis900_mac_probe},
+ { "SiS 7016 PCI Fast Ethernet",PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7016,
+ PCI_COMMAND_IO|PCI_COMMAND_MASTER, SIS900_TOTAL_SIZE, sis900_mac_probe},
+ {0,}, /* 0 terminatted list. */
+};
+
+static void sis900_read_mode(struct device *net_dev, int *speed, int *duplex);
+
+static struct mii_chip_info {
+ const char * name;
+ u16 phy_id0;
+ u16 phy_id1;
+ u8 phy_types;
+#define HOME 0x0001
+#define LAN 0x0002
+#define MIX 0x0003
+} mii_chip_table[] = {
+ { "SiS 900 Internal MII PHY", 0x001d, 0x8000, LAN },
+ { "SiS 7014 Physical Layer Solution", 0x0016, 0xf830, LAN },
+ { "AMD 79C901 10BASE-T PHY", 0x0000, 0x6B70, LAN },
+ { "AMD 79C901 HomePNA PHY", 0x0000, 0x6B90, HOME},
+ { "ICS LAN PHY", 0x0015, 0xF440, LAN },
+ { "NS 83851 PHY", 0x2000, 0x5C20, MIX },
+ { "Realtek RTL8201 PHY", 0x0000, 0x8200, LAN },
+ {0,},
+};
+
+struct mii_phy {
+ struct mii_phy * next;
+ int phy_addr;
+ u16 phy_id0;
+ u16 phy_id1;
+ u16 status;
+ u8 phy_types;
+};
+
+typedef struct _BufferDesc {
+ u32 link;
+ u32 cmdsts;
+ u32 bufptr;
+} BufferDesc;
+
+struct sis900_private {
+ struct device *next_module;
+ struct enet_statistics stats;
+
+ /* struct pci_dev * pci_dev;*/
+ unsigned char pci_bus;
+ unsigned char pci_device_fn;
+ int pci_index;
+
+ struct mac_chip_info * mac;
+ struct mii_phy * mii;
+ struct mii_phy * first_mii; /* record the first mii structure */
+ unsigned int cur_phy;
+
+ struct timer_list timer; /* Link status detection timer. */
+ u8 autong_complete; /* 1: auto-negotiate complete */
+
+ unsigned int cur_rx, dirty_rx; /* producer/comsumer pointers for Tx/Rx ring */
+ unsigned int cur_tx, dirty_tx;
+
+ /* The saved address of a sent/receive-in-place packet buffer */
+ struct sk_buff *tx_skbuff[NUM_TX_DESC];
+ struct sk_buff *rx_skbuff[NUM_RX_DESC];
+ BufferDesc tx_ring[NUM_TX_DESC];
+ BufferDesc rx_ring[NUM_RX_DESC];
+
+ unsigned int tx_full; /* The Tx queue is full. */
+ int LinkOn;
+};
+
+#ifdef MODULE
+#if LINUX_VERSION_CODE > 0x20115
+MODULE_AUTHOR("Jim Huang <cmhuang@sis.com.tw>, Ollie Lho <ollie@sis.com.tw>");
+MODULE_DESCRIPTION("SiS 900 PCI Fast Ethernet driver");
+MODULE_PARM(multicast_filter_limit, "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(debug, "i");
+#endif
+#endif
+
+static int sis900_open(struct device *net_dev);
+static int sis900_mii_probe (unsigned char pci_bus, unsigned char pci_device_fn, struct device * net_dev);
+static void sis900_init_rxfilter (struct device * net_dev);
+static u16 read_eeprom(long ioaddr, int location);
+static u16 mdio_read(struct device *net_dev, int phy_id, int location);
+static void mdio_write(struct device *net_dev, int phy_id, int location, int val);
+static void sis900_timer(unsigned long data);
+static void sis900_check_mode (struct device *net_dev, struct mii_phy *mii_phy);
+static void sis900_tx_timeout(struct device *net_dev);
+static void sis900_init_tx_ring(struct device *net_dev);
+static void sis900_init_rx_ring(struct device *net_dev);
+static int sis900_start_xmit(struct sk_buff *skb, struct device *net_dev);
+static int sis900_rx(struct device *net_dev);
+static void sis900_finish_xmit (struct device *net_dev);
+static void sis900_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static int sis900_close(struct device *net_dev);
+static int mii_ioctl(struct device *net_dev, struct ifreq *rq, int cmd);
+static struct enet_statistics *sis900_get_stats(struct device *net_dev);
+static u16 sis900_compute_hashtable_index(u8 *addr, u8 revision);
+static void set_rx_mode(struct device *net_dev);
+static void sis900_reset(struct device *net_dev);
+static void sis630_set_eq(struct device *net_dev, u8 revision);
+static u16 sis900_default_phy(struct device * net_dev);
+static void sis900_set_capability( struct device *net_dev ,struct mii_phy *phy);
+static u16 sis900_reset_phy(struct device *net_dev, int phy_addr);
+static void sis900_auto_negotiate(struct device *net_dev, int phy_addr);
+static void sis900_set_mode (long ioaddr, int speed, int duplex);
+
+/* A list of all installed SiS900 devices, for removing the driver module. */
+static struct device *root_sis900_dev = NULL;
+
+#ifdef HAVE_DEVLIST
+struct netdev_entry netcard_drv =
+ {"sis900", sis900_probe, SIS900_TOTAL_SIZE, NULL};
+#endif
+
+/* walk through every ethernet PCI devices to see if some of them are matched with our card list*/
+int sis900_probe (struct device * net_dev)
+{
+ int found = 0;
+ int pci_index = 0;
+ unsigned char pci_bus, pci_device_fn;
+ long ioaddr;
+ int irq;
+
+ if (!pcibios_present())
+ return -ENODEV;
+
+ for (; pci_index < 0xff; pci_index++)
+ {
+ u16 vendor, device, pci_command;
+ struct mac_chip_info *mac;
+
+ if (pcibios_find_class (PCI_CLASS_NETWORK_ETHERNET << 8, pci_index,
+ &pci_bus, &pci_device_fn) != PCIBIOS_SUCCESSFUL)
+ break;
+
+ pcibios_read_config_word(pci_bus, pci_device_fn, PCI_VENDOR_ID, &vendor);
+ pcibios_read_config_word(pci_bus, pci_device_fn, PCI_DEVICE_ID, &device);
+
+ for (mac = mac_chip_table; mac->vendor_id; mac++)
+ {
+ if (vendor == mac->vendor_id && device == mac->device_id) break;
+ }
+
+ /* pci_dev does not match any of our cards */
+ if (mac->vendor_id == 0)
+ continue;
+
+ {
+ u32 pci_ioaddr;
+ u8 pci_irq_line;
+
+ pcibios_read_config_byte(pci_bus, pci_device_fn,
+ PCI_INTERRUPT_LINE, &pci_irq_line);
+ pcibios_read_config_dword(pci_bus, pci_device_fn,
+ PCI_BASE_ADDRESS_0, &pci_ioaddr);
+ ioaddr = pci_ioaddr & ~3;
+ irq = pci_irq_line;
+
+ if ((mac->flags & PCI_USES_IO) &&
+ check_region (pci_ioaddr, mac->io_size))
+ continue;
+
+ pcibios_read_config_word(pci_bus, pci_device_fn,
+ PCI_COMMAND, &pci_command);
+
+ {
+ u8 lat;
+
+ pcibios_read_config_byte(pci_bus, pci_device_fn, PCI_LATENCY_TIMER, &lat);
+ if (lat < 16) {
+ printk("PCI: Increasing latency timer of device %02x:%02x to 64\n",
+ pci_bus, pci_device_fn);
+ pcibios_write_config_byte(pci_bus, pci_device_fn, PCI_LATENCY_TIMER, 64);
+ }
+ }
+ net_dev = mac->probe (mac, ioaddr, irq, pci_index, pci_device_fn, pci_bus, net_dev);
+ if (net_dev != NULL)
+ {
+ found++;
+ }
+ net_dev = NULL;
+ }
+ }
+ return found ? 0 : -ENODEV;
+
+}
+
+/* older SiS900 and friends, use EEPROM to store MAC address */
+static int
+sis900_get_mac_addr(long ioaddr, struct device *net_dev)
+{
+ u16 signature;
+ int i;
+
+ /* check to see if we have sane EEPROM */
+ signature = (u16) read_eeprom(ioaddr, EEPROMSignature);
+ if (signature == 0xffff || signature == 0x0000) {
+ printk (KERN_INFO "%s: Error EERPOM read %x\n",
+ net_dev->name, signature);
+ return 0;
+ }
+
+ /* get MAC address from EEPROM */
+ for (i = 0; i < 3; i++)
+ ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
+ return 1;
+}
+
+/* SiS630E model, use APC CMOS RAM to store MAC address */
+static int sis630e_get_mac_addr(long ioaddr, int pci_index, struct device *net_dev)
+{
+ u8 reg;
+ int i;
+ u8 pci_bus, pci_dfn;
+ int not_found;
+
+ not_found = pcibios_find_device(0x1039, 0x0008,
+ pci_index,
+ &pci_bus,
+ &pci_dfn);
+ if (not_found) {
+ printk("%s: Can not find ISA bridge\n", net_dev->name);
+ return 0;
+ }
+ pcibios_read_config_byte(pci_bus, pci_dfn, 0x48, &reg);
+ pcibios_write_config_byte(pci_bus, pci_dfn, 0x48, reg | 0x40);
+
+ for (i = 0; i < 6; i++) {
+ outb(0x09 + i, 0x70);
+ ((u8 *)(net_dev->dev_addr))[i] = inb(0x71);
+ }
+ pcibios_write_config_byte(pci_bus, pci_dfn, 0x48, reg & ~0x40);
+
+ return 1;
+}
+
+/* 635 model : set Mac reload bit and get mac address from rfdr */
+static int sis635_get_mac_addr(struct device *net_dev)
+{
+ long ioaddr = net_dev->base_addr;
+ u32 rfcrSave;
+ u32 i;
+
+ rfcrSave = inl(rfcr + ioaddr);
+
+ outl(rfcrSave | RELOAD, ioaddr + cr);
+ outl(0, ioaddr + cr);
+
+ /* disable packet filtering before setting filter */
+ outl(rfcrSave & ~RFEN, rfcr + ioaddr);
+
+ /* load MAC addr to filter data register */
+ for (i = 0 ; i < 3 ; i++) {
+ outl((i << RFADDR_shift), ioaddr + rfcr);
+ *( ((u16 *)net_dev->dev_addr) + i) = inw(ioaddr + rfdr);
+ }
+
+ /* enable packet filitering */
+ outl(rfcrSave | RFEN, rfcr + ioaddr);
+
+ return 1;
+}
+
+
+/**
+ * sis962_get_mac_addr: - Get MAC address for SiS962 model
+ * @pci_dev: the sis900 pci device
+ * @net_dev: the net device to get address for
+ *
+ * SiS962 model, use EEPROM to store MAC address. And EEPROM is shared by
+ * LAN and 1394. When access EEPROM, send EEREQ signal to hardware first
+ * and wait for EEGNT. If EEGNT is ON, EEPROM is permitted to be access
+ * by LAN, otherwise is not. After MAC address is read from EEPROM, send
+ * EEDONE signal to refuse EEPROM access by LAN.
+ * MAC address is read into @net_dev->dev_addr.
+ */
+
+static int sis962_get_mac_addr(struct device *net_dev)
+{
+ long ioaddr = net_dev->base_addr;
+ long ee_addr = ioaddr + mear;
+ u32 waittime = 0;
+ int i;
+
+ outl(EEREQ, ee_addr);
+ while(waittime < 2000) {
+ if(inl(ee_addr) & EEGNT) {
+ /* get MAC address from EEPROM */
+ for (i = 0; i < 3; i++)
+ ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
+ outl(EEDONE, ee_addr);
+ return 1;
+ } else {
+ udelay(1);
+ waittime ++;
+ }
+ }
+ outl(EEDONE, ee_addr);
+ return 0;
+}
+
+struct device *
+sis900_mac_probe (struct mac_chip_info *mac, long ioaddr, int irq, int pci_index,
+ unsigned char pci_device_fn, unsigned char pci_bus, struct device * net_dev)
+{
+ struct sis900_private *sis_priv;
+ static int did_version = 0;
+
+ u8 revision;
+ int i, ret = 0;
+
+ if (did_version++ == 0)
+ printk(KERN_INFO "%s\n", version);
+
+ if ((net_dev = init_etherdev(net_dev, 0)) == NULL)
+ return NULL;
+
+ if ((net_dev->priv = kmalloc(sizeof(struct sis900_private), GFP_KERNEL)) == NULL) {
+ unregister_netdev(net_dev);
+ return NULL;
+ }
+
+ sis_priv = net_dev->priv;
+ memset(sis_priv, 0, sizeof(struct sis900_private));
+
+ /* We do a request_region() to register /proc/ioports info. */
+ request_region(ioaddr, mac->io_size, net_dev->name);
+ net_dev->base_addr = ioaddr;
+ net_dev->irq = irq;
+
+ sis_priv->mac = mac;
+ sis_priv->pci_bus = pci_bus;
+ sis_priv->pci_device_fn = pci_device_fn;
+ sis_priv->pci_index = pci_index;
+
+ pcibios_read_config_byte(pci_bus, pci_device_fn, PCI_CLASS_REVISION, &revision);
+
+ if ( revision == SIS630E_900_REV )
+ ret = sis630e_get_mac_addr(ioaddr, pci_index, net_dev);
+ else if ((revision > 0x81) && (revision <= 0x90))
+ ret = sis635_get_mac_addr(net_dev);
+ else if (revision == SIS962_900_REV)
+ ret = sis962_get_mac_addr(net_dev);
+ else
+ ret = sis900_get_mac_addr(ioaddr, net_dev);
+
+ if (ret == 0) {
+ unregister_netdev(net_dev);
+ return NULL;
+ }
+
+ /* print some information about our NIC */
+ printk(KERN_INFO "%s: %s at %#lx, IRQ %d, ", net_dev->name, mac->name,
+ ioaddr, irq);
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", (u8)net_dev->dev_addr[i]);
+ printk("%2.2x.\n", net_dev->dev_addr[i]);
+
+ /* 630ET : set the mii access mode as software-mode */
+ if (revision == SIS630ET_900_REV)
+ outl(ACCESSMODE | inl(ioaddr + cr), ioaddr + cr);
+
+ /* probe for mii transceiver */
+ if (sis900_mii_probe(pci_bus, pci_device_fn, net_dev) == 0) {
+ unregister_netdev(net_dev);
+ kfree(sis_priv);
+ release_region(ioaddr, mac->io_size);
+ return NULL;
+ }
+
+ sis_priv->next_module = root_sis900_dev;
+ root_sis900_dev = net_dev;
+
+ /* The SiS900-specific entries in the device structure. */
+ net_dev->open = &sis900_open;
+ net_dev->hard_start_xmit = &sis900_start_xmit;
+ net_dev->stop = &sis900_close;
+ net_dev->get_stats = &sis900_get_stats;
+ net_dev->set_multicast_list = &set_rx_mode;
+ net_dev->do_ioctl = &mii_ioctl;
+
+ return net_dev;
+}
+
+/* sis900_mii_probe: - Probe MII PHY for sis900 */
+static int sis900_mii_probe (unsigned char pci_bus, unsigned char pci_device_fn, struct device * net_dev)
+{
+ struct sis900_private * sis_priv = (struct sis900_private *)net_dev->priv;
+ u16 poll_bit = MII_STAT_LINK, status = 0;
+ unsigned int timeout = jiffies + 5 * HZ;
+ int phy_addr;
+ u8 revision;
+
+ sis_priv->mii = NULL;
+
+ /* search for total of 32 possible mii phy addresses */
+ for (phy_addr = 0; phy_addr < 32; phy_addr++) {
+ struct mii_phy * mii_phy = NULL;
+ u16 mii_status;
+ int i;
+
+ for(i=0; i<2; i++)
+ mii_status = mdio_read(net_dev, phy_addr, MII_STATUS);
+
+ if (mii_status == 0xffff || mii_status == 0x0000)
+ /* the mii is not accessable, try next one */
+ continue;
+
+ if ((mii_phy = kmalloc(sizeof(struct mii_phy), GFP_KERNEL)) == NULL) {
+ printk(KERN_INFO "Cannot allocate mem for struct mii_phy\n");
+ return 0;
+ }
+
+ mii_phy->phy_id0 = mdio_read(net_dev, phy_addr, MII_PHY_ID0);
+ mii_phy->phy_id1 = mdio_read(net_dev, phy_addr, MII_PHY_ID1);
+ mii_phy->phy_addr = phy_addr;
+ mii_phy->status = mii_status;
+ mii_phy->next = sis_priv->mii;
+ sis_priv->mii = mii_phy;
+ sis_priv->first_mii = mii_phy;
+
+ for (i=0; mii_chip_table[i].phy_id1; i++)
+ if ( ( mii_phy->phy_id0 == mii_chip_table[i].phy_id0 ) &&
+ ( (mii_phy->phy_id1 & 0xFFF0) == mii_chip_table[i].phy_id1 )){
+
+ mii_phy->phy_types = mii_chip_table[i].phy_types;
+ if(mii_chip_table[i].phy_types == MIX)
+ mii_phy->phy_types =
+ (mii_status & (MII_STAT_CAN_TX_FDX | MII_STAT_CAN_TX))?LAN:HOME;
+ printk(KERN_INFO "%s: %s transceiver found at address %d.\n",
+ net_dev->name, mii_chip_table[i].name, phy_addr);
+ break;
+ }
+
+ if( !mii_chip_table[i].phy_id1 )
+ printk(KERN_INFO "%s: Unknown PHY transceiver found at address %d.\n",
+ net_dev->name, phy_addr);
+ }
+
+ if (sis_priv->mii == NULL) {
+ printk(KERN_INFO "%s: No MII transceivers found!\n",
+ net_dev->name);
+ return 0;
+ }
+
+ /* Slect Default PHY to put in sis_priv->mii & sis_priv->cur_phy */
+ sis_priv->mii = NULL;
+ sis900_default_phy( net_dev );
+
+ /* Reset PHY if default PHY is internal sis900 */
+ if( (sis_priv->mii->phy_id0 == 0x001D) &&
+ ( (sis_priv->mii->phy_id1&0xFFF0) == 0x8000) )
+ status = sis900_reset_phy( net_dev, sis_priv->cur_phy );
+
+ /* workaround for ICS1893 PHY */
+ if ((sis_priv->mii->phy_id0 == 0x0015) &&
+ ((sis_priv->mii->phy_id1&0xFFF0) == 0xF440))
+ mdio_write(net_dev, sis_priv->cur_phy, 0x0018, 0xD200);
+
+ if( status & MII_STAT_LINK ){
+ while (poll_bit)
+ {
+ poll_bit ^= (mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS) & poll_bit);
+ if (jiffies >= timeout)
+ {
+ printk(KERN_WARNING "%s: reset phy and link down now\n", net_dev->name);
+ return -ETIME;
+ }
+ }
+ }
+
+ pcibios_read_config_byte(pci_bus, pci_device_fn, PCI_CLASS_REVISION, &revision);
+ if (revision == SIS630E_900_REV) {
+ /* SiS 630E has some bugs on default value of PHY registers */
+ mdio_write(net_dev, sis_priv->cur_phy, MII_ANADV, 0x05e1);
+ mdio_write(net_dev, sis_priv->cur_phy, MII_CONFIG1, 0x22);
+ mdio_write(net_dev, sis_priv->cur_phy, MII_CONFIG2, 0xff00);
+ mdio_write(net_dev, sis_priv->cur_phy, MII_MASK, 0xffc0);
+ //mdio_write(net_dev, sis_priv->cur_phy, MII_CONTROL, 0x1000);
+ }
+
+ if (sis_priv->mii->status & MII_STAT_LINK)
+ sis_priv->LinkOn = TRUE;
+ else
+ sis_priv->LinkOn = FALSE;
+
+ return 1;
+}
+
+
+/* sis900_default_phy : Select one default PHY for sis900 mac */
+static u16 sis900_default_phy(struct device * net_dev)
+{
+ struct sis900_private * sis_priv = (struct sis900_private *)net_dev->priv;
+ struct mii_phy *phy = NULL, *phy_home = NULL, *default_phy = NULL;
+ u16 status;
+
+ for( phy=sis_priv->first_mii; phy; phy=phy->next ){
+ status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
+ status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
+
+ /* Link ON & Not select deafalut PHY */
+ if ( (status & MII_STAT_LINK) && !(default_phy) )
+ default_phy = phy;
+ else{
+ status = mdio_read(net_dev, phy->phy_addr, MII_CONTROL);
+ mdio_write(net_dev, phy->phy_addr, MII_CONTROL,
+ status | MII_CNTL_AUTO | MII_CNTL_ISOLATE);
+ if( phy->phy_types == HOME )
+ phy_home = phy;
+ }
+ }
+
+ if( (!default_phy) && phy_home )
+ default_phy = phy_home;
+ else if(!default_phy)
+ default_phy = sis_priv->first_mii;
+
+ if( sis_priv->mii != default_phy ){
+ sis_priv->mii = default_phy;
+ sis_priv->cur_phy = default_phy->phy_addr;
+ printk(KERN_INFO "%s: Using transceiver found at address %d as default\n", net_dev->name,sis_priv->cur_phy);
+ }
+
+ status = mdio_read(net_dev, sis_priv->cur_phy, MII_CONTROL);
+ status &= (~MII_CNTL_ISOLATE);
+
+ mdio_write(net_dev, sis_priv->cur_phy, MII_CONTROL, status);
+ status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
+ status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
+
+ return status;
+}
+
+
+/* sis900_set_capability : set the media capability of network adapter */
+static void sis900_set_capability( struct device *net_dev , struct mii_phy *phy )
+{
+ u16 cap;
+ u16 status;
+
+ status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
+ status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
+
+ cap = MII_NWAY_CSMA_CD |
+ ((phy->status & MII_STAT_CAN_TX_FDX)? MII_NWAY_TX_FDX:0) |
+ ((phy->status & MII_STAT_CAN_TX) ? MII_NWAY_TX:0) |
+ ((phy->status & MII_STAT_CAN_T_FDX) ? MII_NWAY_T_FDX:0)|
+ ((phy->status & MII_STAT_CAN_T) ? MII_NWAY_T:0);
+
+ mdio_write( net_dev, phy->phy_addr, MII_ANADV, cap );
+}
+
+
+/* Delay between EEPROM clock transitions. */
+#define eeprom_delay() inl(ee_addr)
+
+/* Read Serial EEPROM through EEPROM Access Register, Note that location is
+ in word (16 bits) unit */
+static u16 read_eeprom(long ioaddr, int location)
+{
+ int i;
+ u16 retval = 0;
+ long ee_addr = ioaddr + mear;
+ u32 read_cmd = location | EEread;
+
+ outl(0, ee_addr);
+ eeprom_delay();
+ outl(EECS, ee_addr);
+ eeprom_delay();
+
+ /* Shift the read command (9) bits out. */
+ for (i = 8; i >= 0; i--) {
+ u32 dataval = (read_cmd & (1 << i)) ? EEDI | EECS : EECS;
+ outl(dataval, ee_addr);
+ eeprom_delay();
+ outl(dataval | EECLK, ee_addr);
+ eeprom_delay();
+ }
+ outb(EECS, ee_addr);
+ eeprom_delay();
+
+ /* read the 16-bits data in */
+ for (i = 16; i > 0; i--) {
+ outl(EECS, ee_addr);
+ eeprom_delay();
+ outl(EECS | EECLK, ee_addr);
+ eeprom_delay();
+ retval = (retval << 1) | ((inl(ee_addr) & EEDO) ? 1 : 0);
+ eeprom_delay();
+ }
+
+ /* Terminate the EEPROM access. */
+ outl(0, ee_addr);
+ eeprom_delay();
+// outl(EECLK, ee_addr);
+
+ return (retval);
+}
+
+/* Read and write the MII management registers using software-generated
+ serial MDIO protocol. Note that the command bits and data bits are
+ send out seperately */
+#define mdio_delay() inl(mdio_addr)
+
+static void mdio_idle(long mdio_addr)
+{
+ outl(MDIO | MDDIR, mdio_addr);
+ mdio_delay();
+ outl(MDIO | MDDIR | MDC, mdio_addr);
+}
+
+/* Syncronize the MII management interface by shifting 32 one bits out. */
+static void mdio_reset(long mdio_addr)
+{
+ int i;
+
+ for (i = 31; i >= 0; i--) {
+ outl(MDDIR | MDIO, mdio_addr);
+ mdio_delay();
+ outl(MDDIR | MDIO | MDC, mdio_addr);
+ mdio_delay();
+ }
+ return;
+}
+
+static u16 mdio_read(struct device *net_dev, int phy_id, int location)
+{
+ long mdio_addr = net_dev->base_addr + mear;
+ int mii_cmd = MIIread|(phy_id<<MIIpmdShift)|(location<<MIIregShift);
+ u16 retval = 0;
+ int i;
+
+ mdio_reset(mdio_addr);
+ mdio_idle(mdio_addr);
+
+ for (i = 15; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR;
+ outl(dataval, mdio_addr);
+ mdio_delay();
+ outl(dataval | MDC, mdio_addr);
+ mdio_delay();
+ }
+
+ /* Read the 16 data bits. */
+ for (i = 16; i > 0; i--) {
+ outl(0, mdio_addr);
+ mdio_delay();
+ retval = (retval << 1) | ((inl(mdio_addr) & MDIO) ? 1 : 0);
+ outl(MDC, mdio_addr);
+ mdio_delay();
+ }
+ outl(0x00, mdio_addr);
+
+ return retval;
+}
+
+static void mdio_write(struct device *net_dev, int phy_id, int location, int value)
+{
+ long mdio_addr = net_dev->base_addr + mear;
+ int mii_cmd = MIIwrite|(phy_id<<MIIpmdShift)|(location<<MIIregShift);
+ int i;
+
+ mdio_reset(mdio_addr);
+ mdio_idle(mdio_addr);
+
+ /* Shift the command bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR;
+ outb(dataval, mdio_addr);
+ mdio_delay();
+ outb(dataval | MDC, mdio_addr);
+ mdio_delay();
+ }
+ mdio_delay();
+
+ /* Shift the value bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (value & (1 << i)) ? MDDIR | MDIO : MDDIR;
+ outl(dataval, mdio_addr);
+ mdio_delay();
+ outl(dataval | MDC, mdio_addr);
+ mdio_delay();
+ }
+ mdio_delay();
+
+ /* Clear out extra bits. */
+ for (i = 2; i > 0; i--) {
+ outb(0, mdio_addr);
+ mdio_delay();
+ outb(MDC, mdio_addr);
+ mdio_delay();
+ }
+ outl(0x00, mdio_addr);
+
+ return;
+}
+
+static u16 sis900_reset_phy(struct device *net_dev, int phy_addr)
+{
+ int i = 0;
+ u16 status;
+
+ while (i++ < 2)
+ status = mdio_read(net_dev, phy_addr, MII_STATUS);
+
+ mdio_write( net_dev, phy_addr, MII_CONTROL, MII_CNTL_RESET );
+
+ return status;
+}
+
+static int
+sis900_open(struct device *net_dev)
+{
+ struct sis900_private *sis_priv = (struct sis900_private *)net_dev->priv;
+ long ioaddr = net_dev->base_addr;
+ u8 revision;
+
+ /* Soft reset the chip. */
+ sis900_reset(net_dev);
+
+ /* Equalizer workaroung Rule */
+ pcibios_read_config_byte(sis_priv->pci_bus, sis_priv->pci_device_fn, PCI_CLASS_REVISION, &revision);
+ sis630_set_eq(net_dev, revision);
+
+ if (request_irq(net_dev->irq, &sis900_interrupt, SA_SHIRQ, net_dev->name, net_dev)) {
+ return -EAGAIN;
+ }
+
+ MOD_INC_USE_COUNT;
+
+ sis900_init_rxfilter(net_dev);
+
+ sis900_init_tx_ring(net_dev);
+ sis900_init_rx_ring(net_dev);
+
+ set_rx_mode(net_dev);
+
+ net_dev->tbusy = 0;
+ net_dev->interrupt = 0;
+ net_dev->start = 1;
+
+ /* Workaround for EDB */
+ sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
+
+ /* Enable all known interrupts by setting the interrupt mask. */
+ outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr);
+ outl(RxENA | inl(ioaddr + cr), ioaddr + cr);
+ outl(IE, ioaddr + ier);
+
+ sis900_check_mode(net_dev, sis_priv->mii);
+
+ /* Set the timer to switch to check for link beat and perhaps switch
+ to an alternate media type. */
+ init_timer(&sis_priv->timer);
+ sis_priv->timer.expires = jiffies + HZ;
+ sis_priv->timer.data = (unsigned long)net_dev;
+ sis_priv->timer.function = &sis900_timer;
+ add_timer(&sis_priv->timer);
+
+ return 0;
+}
+
+/* set receive filter address to our MAC address */
+static void
+sis900_init_rxfilter (struct device * net_dev)
+{
+ long ioaddr = net_dev->base_addr;
+ u32 rfcrSave;
+ u32 i;
+
+ rfcrSave = inl(rfcr + ioaddr);
+
+ /* disable packet filtering before setting filter */
+ outl(rfcrSave & ~RFEN, rfcr + ioaddr);
+
+ /* load MAC addr to filter data register */
+ for (i = 0 ; i < 3 ; i++) {
+ u32 w;
+
+ w = (u32) *((u16 *)(net_dev->dev_addr)+i);
+ outl((i << RFADDR_shift), ioaddr + rfcr);
+ outl(w, ioaddr + rfdr);
+
+ if (sis900_debug > 2) {
+ printk(KERN_INFO "%s: Receive Filter Addrss[%d]=%x\n",
+ net_dev->name, i, inl(ioaddr + rfdr));
+ }
+ }
+
+ /* enable packet filitering */
+ outl(rfcrSave | RFEN, rfcr + ioaddr);
+}
+
+/* Initialize the Tx ring. */
+static void
+sis900_init_tx_ring(struct device *net_dev)
+{
+ struct sis900_private *sis_priv = (struct sis900_private *)net_dev->priv;
+ long ioaddr = net_dev->base_addr;
+ int i;
+
+ sis_priv->tx_full = 0;
+ sis_priv->dirty_tx = sis_priv->cur_tx = 0;
+
+ for (i = 0; i < NUM_TX_DESC; i++) {
+ sis_priv->tx_skbuff[i] = NULL;
+
+ sis_priv->tx_ring[i].link = (u32) virt_to_bus(&sis_priv->tx_ring[i+1]);
+ sis_priv->tx_ring[i].cmdsts = 0;
+ sis_priv->tx_ring[i].bufptr = 0;
+ }
+ sis_priv->tx_ring[i-1].link = (u32) virt_to_bus(&sis_priv->tx_ring[0]);
+
+ /* load Transmit Descriptor Register */
+ outl(virt_to_bus(&sis_priv->tx_ring[0]), ioaddr + txdp);
+ if (sis900_debug > 2)
+ printk(KERN_INFO "%s: TX descriptor register loaded with: %8.8x\n",
+ net_dev->name, inl(ioaddr + txdp));
+}
+
+/* Initialize the Rx descriptor ring, pre-allocate recevie buffers */
+static void
+sis900_init_rx_ring(struct device *net_dev)
+{
+ struct sis900_private *sis_priv = (struct sis900_private *)net_dev->priv;
+ long ioaddr = net_dev->base_addr;
+ int i;
+
+ sis_priv->cur_rx = 0;
+ sis_priv->dirty_rx = 0;
+
+ /* init RX descriptor */
+ for (i = 0; i < NUM_RX_DESC; i++) {
+ sis_priv->rx_skbuff[i] = NULL;
+
+ sis_priv->rx_ring[i].link = (u32) virt_to_bus(&sis_priv->rx_ring[i+1]);
+ sis_priv->rx_ring[i].cmdsts = 0;
+ sis_priv->rx_ring[i].bufptr = 0;
+ }
+ sis_priv->rx_ring[i-1].link = (u32) virt_to_bus(&sis_priv->rx_ring[0]);
+
+ /* allocate sock buffers */
+ for (i = 0; i < NUM_RX_DESC; i++) {
+ struct sk_buff *skb;
+
+ if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) {
+ /* not enough memory for skbuff, this makes a "hole"
+ on the buffer ring, it is not clear how the
+ hardware will react to this kind of degenerated
+ buffer */
+ break;
+ }
+ skb->dev = net_dev;
+ sis_priv->rx_skbuff[i] = skb;
+ sis_priv->rx_ring[i].cmdsts = RX_BUF_SIZE;
+ sis_priv->rx_ring[i].bufptr = virt_to_bus(skb->tail);
+ }
+ sis_priv->dirty_rx = (unsigned int) (i - NUM_RX_DESC);
+
+ /* load Receive Descriptor Register */
+ outl(virt_to_bus(&sis_priv->rx_ring[0]), ioaddr + rxdp);
+ if (sis900_debug > 2)
+ printk(KERN_INFO "%s: RX descriptor register loaded with: %8.8x\n",
+ net_dev->name, inl(ioaddr + rxdp));
+}
+
+/**
+ * sis630_set_eq: - set phy equalizer value for 630 LAN
+ * @net_dev: the net device to set equalizer value
+ * @revision: 630 LAN revision number
+ *
+ * 630E equalizer workaround rule(Cyrus Huang 08/15)
+ * PHY register 14h(Test)
+ * Bit 14: 0 -- Automatically dectect (default)
+ * 1 -- Manually set Equalizer filter
+ * Bit 13: 0 -- (Default)
+ * 1 -- Speed up convergence of equalizer setting
+ * Bit 9 : 0 -- (Default)
+ * 1 -- Disable Baseline Wander
+ * Bit 3~7 -- Equalizer filter setting
+ * Link ON: Set Bit 9, 13 to 1, Bit 14 to 0
+ * Then calculate equalizer value
+ * Then set equalizer value, and set Bit 14 to 1, Bit 9 to 0
+ * Link Off:Set Bit 13 to 1, Bit 14 to 0
+ * Calculate Equalizer value:
+ * When Link is ON and Bit 14 is 0, SIS900PHY will auto-dectect proper equalizer value.
+ * When the equalizer is stable, this value is not a fixed value. It will be within
+ * a small range(eg. 7~9). Then we get a minimum and a maximum value(eg. min=7, max=9)
+ * 0 <= max <= 4 --> set equalizer to max
+ * 5 <= max <= 14 --> set equalizer to max+1 or set equalizer to max+2 if max == min
+ * max >= 15 --> set equalizer to max+5 or set equalizer to max+6 if max == min
+ */
+
+static void sis630_set_eq(struct device *net_dev, u8 revision)
+{
+ struct sis900_private *sis_priv = (struct sis900_private *)net_dev->priv;
+ u16 reg14h, eq_value, max_value=0, min_value=0;
+ u8 host_bridge_rev;
+ int i, maxcount=10;
+ int not_found;
+ u8 pci_bus, pci_device_fn;
+
+ if ( !(revision == SIS630E_900_REV || revision == SIS630EA1_900_REV ||
+ revision == SIS630A_900_REV || revision == SIS630ET_900_REV) )
+ return;
+ not_found = pcibios_find_device(SIS630_VENDOR_ID, SIS630_DEVICE_ID,
+ sis_priv->pci_index,
+ &pci_bus,
+ &pci_device_fn);
+ if (not_found)
+ pcibios_read_config_byte(pci_bus, pci_device_fn, PCI_CLASS_REVISION, &host_bridge_rev);
+
+ if (sis_priv->LinkOn) {
+ reg14h=mdio_read(net_dev, sis_priv->cur_phy, MII_RESV);
+ mdio_write(net_dev, sis_priv->cur_phy, MII_RESV, (0x2200 | reg14h) & 0xBFFF);
+ for (i=0; i < maxcount; i++) {
+ eq_value=(0x00F8 & mdio_read(net_dev, sis_priv->cur_phy, MII_RESV)) >> 3;
+ if (i == 0)
+ max_value=min_value=eq_value;
+ max_value=(eq_value > max_value) ? eq_value : max_value;
+ min_value=(eq_value < min_value) ? eq_value : min_value;
+ }
+ /* 630E rule to determine the equalizer value */
+ if (revision == SIS630E_900_REV || revision == SIS630EA1_900_REV ||
+ revision == SIS630ET_900_REV) {
+ if (max_value < 5)
+ eq_value=max_value;
+ else if (max_value >= 5 && max_value < 15)
+ eq_value=(max_value == min_value) ? max_value+2 : max_value+1;
+ else if (max_value >= 15)
+ eq_value=(max_value == min_value) ? max_value+6 : max_value+5;
+ }
+ /* 630B0&B1 rule to determine the equalizer value */
+ if (revision == SIS630A_900_REV &&
+ (host_bridge_rev == SIS630B0 || host_bridge_rev == SIS630B1)) {
+ if (max_value == 0)
+ eq_value=3;
+ else
+ eq_value=(max_value+min_value+1)/2;
+ }
+ /* write equalizer value and setting */
+ reg14h=mdio_read(net_dev, sis_priv->cur_phy, MII_RESV);
+ reg14h=(reg14h & 0xFF07) | ((eq_value << 3) & 0x00F8);
+ reg14h=(reg14h | 0x6000) & 0xFDFF;
+ mdio_write(net_dev, sis_priv->cur_phy, MII_RESV, reg14h);
+ }
+ else {
+ reg14h=mdio_read(net_dev, sis_priv->cur_phy, MII_RESV);
+ if (revision == SIS630A_900_REV &&
+ (host_bridge_rev == SIS630B0 || host_bridge_rev == SIS630B1))
+ mdio_write(net_dev, sis_priv->cur_phy, MII_RESV, (reg14h | 0x2200) & 0xBFFF);
+ else
+ mdio_write(net_dev, sis_priv->cur_phy, MII_RESV, (reg14h | 0x2000) & 0xBFFF);
+ }
+ return;
+}
+
+
+/* on each timer ticks we check two things, Link Status (ON/OFF) and
+ Link Mode (10/100/Full/Half)
+*/
+static void sis900_timer(unsigned long data)
+{
+ struct device *net_dev = (struct device *)data;
+ struct sis900_private *sis_priv = (struct sis900_private *)net_dev->priv;
+ struct mii_phy *mii_phy = sis_priv->mii;
+ static int next_tick = 5*HZ;
+ u16 status;
+ u8 revision;
+
+ if(!sis_priv->autong_complete){
+ int speed, duplex = 0;
+
+ sis900_read_mode(net_dev, &speed, &duplex);
+ if(duplex){
+ sis900_set_mode(net_dev->base_addr, speed, duplex);
+ pcibios_read_config_byte(sis_priv->pci_bus, sis_priv->pci_device_fn, PCI_CLASS_REVISION, &revision);
+ sis630_set_eq(net_dev, revision);
+ }
+
+ sis_priv->timer.expires = jiffies + HZ;
+ add_timer(&sis_priv->timer);
+ return;
+ }
+
+ status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
+ status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
+
+ /* Link OFF -> ON */
+ if ( !sis_priv->LinkOn ) {
+LookForLink:
+ /* Search for new PHY */
+ status = sis900_default_phy( net_dev );
+ mii_phy = sis_priv->mii;
+
+ if( status & MII_STAT_LINK ){
+ sis900_check_mode(net_dev, mii_phy);
+ sis_priv->LinkOn = TRUE;
+ }
+ }
+ /* Link ON -> OFF */
+ else{
+ if( !(status & MII_STAT_LINK) ){
+ sis_priv->LinkOn = FALSE;
+ printk(KERN_INFO "%s: Media Link Off\n", net_dev->name);
+
+ /* Change mode issue */
+ if( (mii_phy->phy_id0 == 0x001D) &&
+ ( (mii_phy->phy_id1 & 0xFFF0) == 0x8000 ))
+ sis900_reset_phy( net_dev, sis_priv->cur_phy );
+
+ pcibios_read_config_byte(sis_priv->pci_bus, sis_priv->pci_device_fn, PCI_CLASS_REVISION, &revision);
+ sis630_set_eq(net_dev, revision);
+
+ goto LookForLink;
+ }
+ }
+
+ sis_priv->timer.expires = jiffies + next_tick;
+ add_timer(&sis_priv->timer);
+}
+
+static void sis900_check_mode (struct device *net_dev, struct mii_phy *mii_phy)
+{
+ struct sis900_private *sis_priv = (struct sis900_private *)net_dev->priv;
+ long ioaddr = net_dev->base_addr;
+ int speed, duplex;
+
+ if( mii_phy->phy_types == LAN ){
+ outl( ~EXD & inl( ioaddr + cfg ), ioaddr + cfg);
+ sis900_set_capability(net_dev , mii_phy);
+ sis900_auto_negotiate(net_dev, sis_priv->cur_phy);
+ }else{
+ outl(EXD | inl( ioaddr + cfg ), ioaddr + cfg);
+ speed = HW_SPEED_HOME;
+ duplex = FDX_CAPABLE_HALF_SELECTED;
+ sis900_set_mode(net_dev->base_addr, speed, duplex);
+ sis_priv->autong_complete = 1;
+ }
+}
+
+static void sis900_set_mode (long ioaddr, int speed, int duplex)
+{
+ u32 tx_flags = 0, rx_flags = 0;
+
+ if( inl(ioaddr + cfg) & EDB_MASTER_EN ){
+ tx_flags = TxATP | (DMA_BURST_64 << TxMXDMA_shift) | (TX_FILL_THRESH << TxFILLT_shift);
+ rx_flags = DMA_BURST_64 << RxMXDMA_shift;
+ }
+ else{
+ tx_flags = TxATP | (DMA_BURST_512 << TxMXDMA_shift) | (TX_FILL_THRESH << TxFILLT_shift);
+ rx_flags = DMA_BURST_512 << RxMXDMA_shift;
+ }
+
+ if (speed == HW_SPEED_HOME || speed == HW_SPEED_10_MBPS ) {
+ rx_flags |= (RxDRNT_10 << RxDRNT_shift);
+ tx_flags |= (TxDRNT_10 << TxDRNT_shift);
+ }
+ else {
+ rx_flags |= (RxDRNT_100 << RxDRNT_shift);
+ tx_flags |= (TxDRNT_100 << TxDRNT_shift);
+ }
+
+ if (duplex == FDX_CAPABLE_FULL_SELECTED) {
+ tx_flags |= (TxCSI | TxHBI);
+ rx_flags |= RxATX;
+ }
+
+ outl (tx_flags, ioaddr + txcfg);
+ outl (rx_flags, ioaddr + rxcfg);
+}
+
+
+static void sis900_auto_negotiate(struct device *net_dev, int phy_addr)
+{
+ struct sis900_private *sis_priv = (struct sis900_private *)net_dev->priv;
+ int i = 0;
+ u32 status;
+
+ while (i++ < 2)
+ status = mdio_read(net_dev, phy_addr, MII_STATUS);
+
+ if (!(status & MII_STAT_LINK)){
+ printk(KERN_INFO "%s: Media Link Off\n", net_dev->name);
+ sis_priv->autong_complete = 1;
+ sis_priv->LinkOn = FALSE;
+ return;
+ }
+
+ /* (Re)start AutoNegotiate */
+ mdio_write(net_dev, phy_addr, MII_CONTROL,
+ MII_CNTL_AUTO | MII_CNTL_RST_AUTO);
+ sis_priv->autong_complete = 0;
+}
+
+
+static void sis900_read_mode(struct device *net_dev, int *speed, int *duplex)
+{
+ struct sis900_private *sis_priv = (struct sis900_private *)net_dev->priv;
+ struct mii_phy *phy = sis_priv->mii;
+ int phy_addr = sis_priv->cur_phy;
+ u32 status;
+ u16 autoadv, autorec;
+ int i = 0;
+
+ while (i++ < 2)
+ status = mdio_read(net_dev, phy_addr, MII_STATUS);
+
+ if (!(status & MII_STAT_LINK)) return;
+
+ /* AutoNegotiate completed */
+ autoadv = mdio_read(net_dev, phy_addr, MII_ANADV);
+ autorec = mdio_read(net_dev, phy_addr, MII_ANLPAR);
+ status = autoadv & autorec;
+
+ *speed = HW_SPEED_10_MBPS;
+ *duplex = FDX_CAPABLE_HALF_SELECTED;
+
+ if (status & (MII_NWAY_TX | MII_NWAY_TX_FDX))
+ *speed = HW_SPEED_100_MBPS;
+ if (status & ( MII_NWAY_TX_FDX | MII_NWAY_T_FDX))
+ *duplex = FDX_CAPABLE_FULL_SELECTED;
+
+ sis_priv->autong_complete = 1;
+
+ /* Workaround for Realtek RTL8201 PHY issue */
+ if((phy->phy_id0 == 0x0000) && ((phy->phy_id1 & 0xFFF0) == 0x8200)){
+ if(mdio_read(net_dev, phy_addr, MII_CONTROL) & MII_CNTL_FDX)
+ *duplex = FDX_CAPABLE_FULL_SELECTED;
+ if(mdio_read(net_dev, phy_addr, 0x0019) & 0x01)
+ *speed = HW_SPEED_100_MBPS;
+ }
+
+ printk(KERN_INFO "%s: Media Link On %s %s-duplex \n",
+ net_dev->name,
+ *speed == HW_SPEED_100_MBPS ?
+ "100mbps" : "10mbps",
+ *duplex == FDX_CAPABLE_FULL_SELECTED ?
+ "full" : "half");
+}
+
+
+static void sis900_tx_timeout(struct device *net_dev)
+{
+ struct sis900_private *sis_priv = (struct sis900_private *)net_dev->priv;
+ long ioaddr = net_dev->base_addr;
+ int i;
+
+ printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x \n",
+ net_dev->name, inl(ioaddr + cr), inl(ioaddr + isr));
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ outl(0x0000, ioaddr + imr);
+
+ /* discard unsent packets, should this code section be protected by
+ cli(), sti() ?? */
+ sis_priv->dirty_tx = sis_priv->cur_tx = 0;
+ for (i = 0; i < NUM_TX_DESC; i++) {
+ if (sis_priv->tx_skbuff[i] != NULL) {
+ dev_free_skb(sis_priv->tx_skbuff[i]);
+ sis_priv->tx_skbuff[i] = 0;
+ sis_priv->tx_ring[i].cmdsts = 0;
+ sis_priv->tx_ring[i].bufptr = 0;
+ sis_priv->stats.tx_dropped++;
+ }
+ }
+ net_dev->trans_start = jiffies;
+ net_dev->tbusy = sis_priv->tx_full = 0;
+
+ /* FIXME: Should we restart the transmission thread here ?? */
+ outl(TxENA | inl(ioaddr + cr), ioaddr + cr);
+
+ /* Enable all known interrupts by setting the interrupt mask. */
+ outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr);
+ return;
+}
+
+static int
+sis900_start_xmit(struct sk_buff *skb, struct device *net_dev)
+{
+ struct sis900_private *sis_priv = (struct sis900_private *)net_dev->priv;
+ long ioaddr = net_dev->base_addr;
+ unsigned int entry;
+
+ /* test tbusy to see if we have timeout situation then set it */
+ if (test_and_set_bit(0, (void*)&net_dev->tbusy) != 0) {
+ if (jiffies - net_dev->trans_start > TX_TIMEOUT)
+ sis900_tx_timeout(net_dev);
+ return 1;
+ }
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = sis_priv->cur_tx % NUM_TX_DESC;
+ sis_priv->tx_skbuff[entry] = skb;
+
+ /* set the transmit buffer descriptor and enable Transmit State Machine */
+ sis_priv->tx_ring[entry].bufptr = virt_to_bus(skb->data);
+ sis_priv->tx_ring[entry].cmdsts = (OWN | skb->len);
+ outl(TxENA | inl(ioaddr + cr), ioaddr + cr);
+
+ if (++sis_priv->cur_tx - sis_priv->dirty_tx < NUM_TX_DESC) {
+ /* Typical path, clear tbusy to indicate more
+ transmission is possible */
+ clear_bit(0, (void*)&net_dev->tbusy);
+ } else {
+ /* no more transmit descriptor avaiable, tbusy remain set */
+ sis_priv->tx_full = 1;
+ }
+
+ net_dev->trans_start = jiffies;
+
+ {
+ int i;
+ for (i = 0; i < 100000; i++); /* GRUIIIIIK */
+ }
+
+ if (sis900_debug > 3)
+ printk(KERN_INFO "%s: Queued Tx packet at %p size %d "
+ "to slot %d.\n",
+ net_dev->name, skb->data, (int)skb->len, entry);
+
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void sis900_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct device *net_dev = (struct device *)dev_instance;
+ int boguscnt = max_interrupt_work;
+ long ioaddr = net_dev->base_addr;
+ u32 status;
+
+#if defined(__i386__)
+ /* A lock to prevent simultaneous entry bug on Intel SMP machines. */
+ if (test_and_set_bit(0, (void*)&net_dev->interrupt)) {
+ printk(KERN_INFO "%s: SMP simultaneous entry of "
+ "an interrupt handler.\n", net_dev->name);
+ net_dev->interrupt = 0; /* Avoid halting machine. */
+ return;
+ }
+#else
+ if (net_dev->interrupt) {
+ printk(KERN_INFO "%s: Re-entering the interrupt handler.\n",
+ net_dev->name);
+ return;
+ }
+ net_dev->interrupt = 1;
+#endif
+
+ do {
+ status = inl(ioaddr + isr);
+
+ if ((status & (HIBERR|TxURN|TxERR|TxIDLE|RxORN|RxERR|RxOK)) == 0)
+ /* nothing intresting happened */
+ break;
+
+ /* why dow't we break after Tx/Rx case ?? keyword: full-duplex */
+ if (status & (RxORN | RxERR | RxOK))
+ /* Rx interrupt */
+ sis900_rx(net_dev);
+
+ if (status & (TxURN | TxERR | TxIDLE))
+ /* Tx interrupt */
+ sis900_finish_xmit(net_dev);
+
+ /* something strange happened !!! */
+ if (status & HIBERR) {
+ printk(KERN_INFO "%s: Abnormal interrupt,"
+ "status %#8.8x.\n", net_dev->name, status);
+ break;
+ }
+ if (--boguscnt < 0) {
+ printk(KERN_INFO "%s: Too much work at interrupt, "
+ "interrupt status = %#8.8x.\n",
+ net_dev->name, status);
+ break;
+ }
+ } while (1);
+
+ if (sis900_debug > 4)
+ printk(KERN_INFO "%s: exiting interrupt, "
+ "interrupt status = 0x%#8.8x.\n",
+ net_dev->name, inl(ioaddr + isr));
+
+#if defined(__i386__)
+ clear_bit(0, (void*)&net_dev->interrupt);
+#else
+ net_dev->interrupt = 0;
+#endif
+ return;
+}
+
+/* Process receive interrupt events, put buffer to higher layer and refill buffer pool
+ Note: This fucntion is called by interrupt handler, don't do "too much" work here */
+static int sis900_rx(struct device *net_dev)
+{
+ struct sis900_private *sis_priv = (struct sis900_private *)net_dev->priv;
+ long ioaddr = net_dev->base_addr;
+ unsigned int entry = sis_priv->cur_rx % NUM_RX_DESC;
+ u32 rx_status = sis_priv->rx_ring[entry].cmdsts;
+
+ if (sis900_debug > 4)
+ printk(KERN_INFO "sis900_rx, cur_rx:%4.4d, dirty_rx:%4.4d "
+ "status:0x%8.8x\n",
+ sis_priv->cur_rx, sis_priv->dirty_rx, rx_status);
+
+ while (rx_status & OWN) {
+ unsigned int rx_size;
+
+ rx_size = (rx_status & DSIZE) - CRC_SIZE;
+
+ if (rx_status & (ABORT|OVERRUN|TOOLONG|RUNT|RXISERR|CRCERR|FAERR)) {
+ /* corrupted packet received */
+ if (sis900_debug > 4)
+ printk(KERN_INFO "%s: Corrupted packet "
+ "received, buffer status = 0x%8.8x.\n",
+ net_dev->name, rx_status);
+ sis_priv->stats.rx_errors++;
+ if (rx_status & OVERRUN)
+ sis_priv->stats.rx_over_errors++;
+ if (rx_status & (TOOLONG|RUNT))
+ sis_priv->stats.rx_length_errors++;
+ if (rx_status & (RXISERR | FAERR))
+ sis_priv->stats.rx_frame_errors++;
+ if (rx_status & CRCERR)
+ sis_priv->stats.rx_crc_errors++;
+ /* reset buffer descriptor state */
+ sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
+ } else {
+ struct sk_buff * skb;
+
+ /* This situation should never happen, but due to
+ some unknow bugs, it is possible that
+ we are working on NULL sk_buff :-( */
+ if (sis_priv->rx_skbuff[entry] == NULL) {
+ printk(KERN_INFO "%s: NULL pointer "
+ "encountered in Rx ring, skipping\n",
+ net_dev->name);
+ break;
+ }
+
+ /* gvie the socket buffer to upper layers */
+ skb = sis_priv->rx_skbuff[entry];
+ skb_put(skb, rx_size);
+ skb->protocol = eth_type_trans(skb, net_dev);
+ netif_rx(skb);
+
+ /* some network statistics */
+ if ((rx_status & BCAST) == MCAST)
+ sis_priv->stats.multicast++;
+ net_dev->last_rx = jiffies;
+ /* sis_priv->stats.rx_bytes += rx_size;*/
+ sis_priv->stats.rx_packets++;
+
+ /* refill the Rx buffer, what if there is not enought memory for
+ new socket buffer ?? */
+ if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) {
+ /* not enough memory for skbuff, this makes a "hole"
+ on the buffer ring, it is not clear how the
+ hardware will react to this kind of degenerated
+ buffer */
+ printk(KERN_INFO "%s: Memory squeeze,"
+ "deferring packet.\n",
+ net_dev->name);
+ sis_priv->rx_skbuff[entry] = NULL;
+ /* reset buffer descriptor state */
+ sis_priv->rx_ring[entry].cmdsts = 0;
+ sis_priv->rx_ring[entry].bufptr = 0;
+ sis_priv->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = net_dev;
+ sis_priv->rx_skbuff[entry] = skb;
+ sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
+ sis_priv->rx_ring[entry].bufptr = virt_to_bus(skb->tail);
+ sis_priv->dirty_rx++;
+ }
+ sis_priv->cur_rx++;
+ entry = sis_priv->cur_rx % NUM_RX_DESC;
+ rx_status = sis_priv->rx_ring[entry].cmdsts;
+ } // while
+
+ /* refill the Rx buffer, what if the rate of refilling is slower than
+ consuming ?? */
+ for (;sis_priv->cur_rx - sis_priv->dirty_rx > 0; sis_priv->dirty_rx++) {
+ struct sk_buff *skb;
+
+ entry = sis_priv->dirty_rx % NUM_RX_DESC;
+
+ if (sis_priv->rx_skbuff[entry] == NULL) {
+ if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) {
+ /* not enough memory for skbuff, this makes a "hole"
+ on the buffer ring, it is not clear how the
+ hardware will react to this kind of degenerated
+ buffer */
+ printk(KERN_INFO "%s: Memory squeeze,"
+ "deferring packet.\n",
+ net_dev->name);
+ sis_priv->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = net_dev;
+ sis_priv->rx_skbuff[entry] = skb;
+ sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
+ sis_priv->rx_ring[entry].bufptr = virt_to_bus(skb->tail);
+ }
+ }
+
+ /* re-enable the potentially idle receive state matchine */
+ outl(RxENA | inl(ioaddr + cr), ioaddr + cr );
+
+ return 0;
+}
+
+/* finish up transmission of packets, check for error condition and free skbuff etc.
+ Note: This fucntion is called by interrupt handler, don't do "too much" work here */
+static void sis900_finish_xmit (struct device *net_dev)
+{
+ struct sis900_private *sis_priv = (struct sis900_private *)net_dev->priv;
+
+ for (; sis_priv->dirty_tx < sis_priv->cur_tx; sis_priv->dirty_tx++) {
+ unsigned int entry;
+ u32 tx_status;
+
+ entry = sis_priv->dirty_tx % NUM_TX_DESC;
+ tx_status = sis_priv->tx_ring[entry].cmdsts;
+
+ if (tx_status & OWN) {
+ /* The packet is not transmitted yet (owned by hardware) !
+ Note: the interrupt is generated only when Tx Machine
+ is idle, so this is an almost impossible case */
+ break;
+ }
+
+ if (tx_status & (ABORT | UNDERRUN | OWCOLL)) {
+ /* packet unsuccessfully transmitted */
+ if (sis900_debug > 4)
+ printk(KERN_INFO "%s: Transmit "
+ "error, Tx status %8.8x.\n",
+ net_dev->name, tx_status);
+ sis_priv->stats.tx_errors++;
+ if (tx_status & UNDERRUN)
+ sis_priv->stats.tx_fifo_errors++;
+ if (tx_status & ABORT)
+ sis_priv->stats.tx_aborted_errors++;
+ if (tx_status & NOCARRIER)
+ sis_priv->stats.tx_carrier_errors++;
+ if (tx_status & OWCOLL)
+ sis_priv->stats.tx_window_errors++;
+ } else {
+ /* packet successfully transmitted */
+ sis_priv->stats.collisions += (tx_status & COLCNT) >> 16;
+ /* sis_priv->stats.tx_bytes += tx_status & DSIZE;*/
+ sis_priv->stats.tx_packets++;
+ }
+ /* Free the original skb. */
+ dev_free_skb(sis_priv->tx_skbuff[entry]);
+ sis_priv->tx_skbuff[entry] = NULL;
+ sis_priv->tx_ring[entry].bufptr = 0;
+ sis_priv->tx_ring[entry].cmdsts = 0;
+ }
+
+ if (sis_priv->tx_full && net_dev->tbusy &&
+ sis_priv->cur_tx - sis_priv->dirty_tx < NUM_TX_DESC - 4) {
+ /* The ring is no longer full, clear tbusy, tx_full and
+ schedule more transmission by marking NET_BH */
+ sis_priv->tx_full = 0;
+ clear_bit(0, (void *)&net_dev->tbusy);
+ mark_bh(NET_BH);
+ }
+}
+
+static int
+sis900_close(struct device *net_dev)
+{
+ long ioaddr = net_dev->base_addr;
+ struct sis900_private *sis_priv = (struct sis900_private *)net_dev->priv;
+ int i;
+
+ net_dev->start = 0;
+ net_dev->tbusy = 1;
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ outl(0x0000, ioaddr + imr);
+ outl(0x0000, ioaddr + ier);
+
+ /* Stop the chip's Tx and Rx Status Machine */
+ outl(RxDIS | TxDIS | inl(ioaddr + cr), ioaddr + cr);
+
+ del_timer(&sis_priv->timer);
+
+ free_irq(net_dev->irq, net_dev);
+
+ /* Free Tx and RX skbuff */
+ for (i = 0; i < NUM_RX_DESC; i++) {
+ if (sis_priv->rx_skbuff[i] != NULL)
+ dev_free_skb(sis_priv->rx_skbuff[i]);
+ sis_priv->rx_skbuff[i] = 0;
+ }
+ for (i = 0; i < NUM_TX_DESC; i++) {
+ if (sis_priv->tx_skbuff[i] != NULL)
+ dev_free_skb(sis_priv->tx_skbuff[i]);
+ sis_priv->tx_skbuff[i] = 0;
+ }
+
+ /* Green! Put the chip in low-power mode. */
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static int mii_ioctl(struct device *net_dev, struct ifreq *rq, int cmd)
+{
+ struct sis900_private *sis_priv = (struct sis900_private *)net_dev->priv;
+ u16 *data = (u16 *)&rq->ifr_data;
+
+ switch(cmd) {
+ case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
+ data[0] = sis_priv->mii->phy_addr;
+ /* Fall Through */
+ case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
+ data[3] = mdio_read(net_dev, data[0] & 0x1f, data[1] & 0x1f);
+ return 0;
+ case SIOCDEVPRIVATE+2: /* Write the specified MII register */
+ if (!suser())
+ return -EPERM;
+ mdio_write(net_dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static struct enet_statistics *
+sis900_get_stats(struct device *net_dev)
+{
+ struct sis900_private *sis_priv = (struct sis900_private *)net_dev->priv;
+
+ return &sis_priv->stats;
+}
+
+
+/* SiS 900 uses the most sigificant 7 bits to index a 128 bits multicast
+ * hash table, which makes this function a little bit different from other drivers
+ * SiS 900 B0 & 635 M/B uses the most significat 8 bits to index 256 bits
+ * multicast hash table.
+ */
+static u16 sis900_compute_hashtable_index(u8 *addr, u8 revision)
+{
+
+/* what is the correct value of the POLYNOMIAL ??
+ Donald Becker use 0x04C11DB7U
+ Joseph Zbiciak im14u2c@primenet.com gives me the
+ correct answer, thank you Joe !! */
+#define POLYNOMIAL 0x04C11DB7L
+ u32 crc = 0xffffffff, msb;
+ int i, j;
+ u32 byte;
+
+ for (i = 0; i < 6; i++) {
+ byte = *addr++;
+ for (j = 0; j < 8; j++) {
+ msb = crc >> 31;
+ crc <<= 1;
+ if (msb ^ (byte & 1)) {
+ crc ^= POLYNOMIAL;
+ }
+ byte >>= 1;
+ }
+ }
+
+ /* leave 8 or 7 most siginifant bits */
+ if((revision >= SIS635A_900_REV) || (revision == SIS900B_900_REV))
+ return ((int)(crc >> 24));
+ else
+ return ((int)(crc >> 25));
+}
+
+static void set_rx_mode(struct device *net_dev)
+{
+ long ioaddr = net_dev->base_addr;
+ struct sis900_private * sis_priv = (struct sis900_private *)net_dev->priv;
+ u16 mc_filter[16] = {0}; /* 256/128 bits multicast hash table */
+ int i, table_entries;
+ u32 rx_mode;
+ u8 revision;
+
+ /* 635 Hash Table entires = 256(2^16) */
+ pcibios_read_config_byte(sis_priv->pci_bus, sis_priv->pci_device_fn, PCI_CLASS_REVISION, &revision);
+ if((revision >= SIS635A_900_REV) || (revision == SIS900B_900_REV))
+ table_entries = 16;
+ else
+ table_entries = 8;
+
+ if (net_dev->flags & IFF_PROMISC) {
+ /* Accept any kinds of packets */
+ rx_mode = RFPromiscuous;
+ for (i = 0; i < table_entries; i++)
+ mc_filter[i] = 0xffff;
+ } else if ((net_dev->mc_count > multicast_filter_limit) ||
+ (net_dev->flags & IFF_ALLMULTI)) {
+ /* too many multicast addresses or accept all multicast packets */
+ rx_mode = RFAAB | RFAAM;
+ for (i = 0; i < table_entries; i++)
+ mc_filter[i] = 0xffff;
+ } else {
+ /* Accept Broadcast packets, destination addresses match our MAC address,
+ use Receive Filter to reject unwanted MCAST packets */
+ struct dev_mc_list *mclist;
+ rx_mode = RFAAB;
+ for (i = 0, mclist = net_dev->mc_list; mclist && i < net_dev->mc_count;
+ i++, mclist = mclist->next)
+ set_bit(sis900_compute_hashtable_index(mclist->dmi_addr, revision),
+ mc_filter);
+ }
+
+ /* update Multicast Hash Table in Receive Filter */
+ for (i = 0; i < table_entries; i++) {
+ /* why plus 0x04 ??, That makes the correct value for hash table. */
+ outl((u32)(0x00000004+i) << RFADDR_shift, ioaddr + rfcr);
+ outl(mc_filter[i], ioaddr + rfdr);
+ }
+
+ outl(RFEN | rx_mode, ioaddr + rfcr);
+
+ /* sis900 is capatable of looping back packet at MAC level for debugging purpose */
+ if (net_dev->flags & IFF_LOOPBACK) {
+ u32 cr_saved;
+ /* We must disable Tx/Rx before setting loopback mode */
+ cr_saved = inl(ioaddr + cr);
+ outl(cr_saved | TxDIS | RxDIS, ioaddr + cr);
+ /* enable loopback */
+ outl(inl(ioaddr + txcfg) | TxMLB, ioaddr + txcfg);
+ outl(inl(ioaddr + rxcfg) | RxATX, ioaddr + rxcfg);
+ /* restore cr */
+ outl(cr_saved, ioaddr + cr);
+ }
+
+ return;
+}
+
+static void sis900_reset(struct device *net_dev)
+{
+ long ioaddr = net_dev->base_addr;
+ struct sis900_private *sis_priv = (struct sis900_private *)net_dev->priv;
+ int i = 0;
+ u8 revision;
+ u32 status = TxRCMP | RxRCMP;
+
+ outl(0, ioaddr + ier);
+ outl(0, ioaddr + imr);
+ outl(0, ioaddr + rfcr);
+
+ outl(RxRESET | TxRESET | RESET | inl(ioaddr + cr), ioaddr + cr);
+
+ /* Check that the chip has finished the reset. */
+ while (status && (i++ < 1000)) {
+ status ^= (inl(isr + ioaddr) & status);
+ }
+
+ pcibios_read_config_byte(sis_priv->pci_bus, sis_priv->pci_device_fn, PCI_CLASS_REVISION, &revision);
+ if( (revision >= SIS635A_900_REV) || (revision == SIS900B_900_REV) )
+ outl(PESEL | RND_CNT, ioaddr + cfg);
+ else
+ outl(PESEL, ioaddr + cfg);
+}
+
+#ifdef MODULE
+int init_module(void)
+{
+ return sis900_probe(NULL);
+}
+
+void
+cleanup_module(void)
+{
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_sis900_dev) {
+ struct sis900_private *sis_priv =
+ (struct sis900_private *)root_sis900_dev->priv;
+ struct device *next_dev = sis_priv->next_module;
+ struct mii_phy *phy = NULL;
+
+ while(sis_priv->first_mii){
+ phy = sis_priv->first_mii;
+ sis_priv->first_mii = phy->next;
+ kfree(phy);
+ }
+
+ unregister_netdev(root_sis900_dev);
+ release_region(root_sis900_dev->base_addr,
+ sis_priv->mac->io_size);
+ kfree(sis_priv);
+ kfree(root_sis900_dev);
+
+ root_sis900_dev = next_dev;
+ }
+}
+
+#endif /* MODULE */
diff --git a/linux/src/drivers/net/sis900.h b/linux/src/drivers/net/sis900.h
new file mode 100644
index 0000000..2153625
--- /dev/null
+++ b/linux/src/drivers/net/sis900.h
@@ -0,0 +1,284 @@
+/* sis900.h Definitions for SiS ethernet controllers including 7014/7016 and 900
+ * Copyright 1999 Silicon Integrated System Corporation
+ * References:
+ * SiS 7016 Fast Ethernet PCI Bus 10/100 Mbps LAN Controller with OnNow Support,
+ * preliminary Rev. 1.0 Jan. 14, 1998
+ * SiS 900 Fast Ethernet PCI Bus 10/100 Mbps LAN Single Chip with OnNow Support,
+ * preliminary Rev. 1.0 Nov. 10, 1998
+ * SiS 7014 Single Chip 100BASE-TX/10BASE-T Physical Layer Solution,
+ * preliminary Rev. 1.0 Jan. 18, 1998
+ * http://www.sis.com.tw/support/databook.htm
+ */
+
+/* MAC operationl registers of SiS 7016 and SiS 900 ehternet controller */
+/* The I/O extent, SiS 900 needs 256 bytes of io address */
+#define SIS900_TOTAL_SIZE 0x100
+
+/* Symbolic offsets to registers. */
+enum sis900_registers {
+ cr=0x0, //Command Register
+ cfg=0x4, //Configuration Register
+ mear=0x8, //EEPROM Access Register
+ ptscr=0xc, //PCI Test Control Register
+ isr=0x10, //Interrupt Status Register
+ imr=0x14, //Interrupt Mask Register
+ ier=0x18, //Interrupt Enable Register
+ epar=0x18, //Enhanced PHY Access Register
+ txdp=0x20, //Transmit Descriptor Pointer Register
+ txcfg=0x24, //Transmit Configuration Register
+ rxdp=0x30, //Receive Descriptor Pointer Register
+ rxcfg=0x34, //Receive Configuration Register
+ flctrl=0x38, //Flow Control Register
+ rxlen=0x3c, //Receive Packet Length Register
+ rfcr=0x48, //Receive Filter Control Register
+ rfdr=0x4C, //Receive Filter Data Register
+ pmctrl=0xB0, //Power Management Control Register
+ pmer=0xB4 //Power Management Wake-up Event Register
+};
+
+/* Symbolic names for bits in various registers */
+enum sis900_command_register_bits {
+ RELOAD = 0x00000400, ACCESSMODE = 0x00000200,/* ET */
+ RESET = 0x00000100, SWI = 0x00000080, RxRESET = 0x00000020,
+ TxRESET = 0x00000010, RxDIS = 0x00000008, RxENA = 0x00000004,
+ TxDIS = 0x00000002, TxENA = 0x00000001
+};
+
+enum sis900_configuration_register_bits {
+ DESCRFMT = 0x00000100 /* 7016 specific */, REQALG = 0x00000080,
+ SB = 0x00000040, POW = 0x00000020, EXD = 0x00000010,
+ PESEL = 0x00000008, LPM = 0x00000004, BEM = 0x00000001,
+ /* 635 & 900B Specific */
+ RND_CNT = 0x00000400, FAIR_BACKOFF = 0x00000200,
+ EDB_MASTER_EN = 0x00002000
+};
+
+enum sis900_eeprom_access_reigster_bits {
+ MDC = 0x00000040, MDDIR = 0x00000020, MDIO = 0x00000010, /* 7016 specific */
+ EECS = 0x00000008, EECLK = 0x00000004, EEDO = 0x00000002,
+ EEDI = 0x00000001
+};
+
+enum sis900_interrupt_register_bits {
+ WKEVT = 0x10000000, TxPAUSEEND = 0x08000000, TxPAUSE = 0x04000000,
+ TxRCMP = 0x02000000, RxRCMP = 0x01000000, DPERR = 0x00800000,
+ SSERR = 0x00400000, RMABT = 0x00200000, RTABT = 0x00100000,
+ RxSOVR = 0x00010000, HIBERR = 0x00008000, SWINT = 0x00001000,
+ MIBINT = 0x00000800, TxURN = 0x00000400, TxIDLE = 0x00000200,
+ TxERR = 0x00000100, TxDESC = 0x00000080, TxOK = 0x00000040,
+ RxORN = 0x00000020, RxIDLE = 0x00000010, RxEARLY = 0x00000008,
+ RxERR = 0x00000004, RxDESC = 0x00000002, RxOK = 0x00000001
+};
+
+enum sis900_interrupt_enable_reigster_bits {
+ IE = 0x00000001
+};
+
+/* maximum dma burst fro transmission and receive*/
+#define MAX_DMA_RANGE 7 /* actually 0 means MAXIMUM !! */
+#define TxMXDMA_shift 20
+#define RxMXDMA_shift 20
+
+enum sis900_tx_rx_dma{
+ DMA_BURST_512 = 0, DMA_BURST_64 = 5
+};
+
+/* transmit FIFO threshholds */
+#define TX_FILL_THRESH 16 /* 1/4 FIFO size */
+#define TxFILLT_shift 8
+#define TxDRNT_shift 0
+#define TxDRNT_100 48 /* 3/4 FIFO size */
+#define TxDRNT_10 16 /* 1/2 FIFO size */
+
+enum sis900_transmit_config_register_bits {
+ TxCSI = 0x80000000, TxHBI = 0x40000000, TxMLB = 0x20000000,
+ TxATP = 0x10000000, TxIFG = 0x0C000000, TxFILLT = 0x00003F00,
+ TxDRNT = 0x0000003F
+};
+
+/* recevie FIFO thresholds */
+#define RxDRNT_shift 1
+#define RxDRNT_100 16 /* 1/2 FIFO size */
+#define RxDRNT_10 24 /* 3/4 FIFO size */
+
+enum sis900_reveive_config_register_bits {
+ RxAEP = 0x80000000, RxARP = 0x40000000, RxATX = 0x10000000,
+ RxAJAB = 0x08000000, RxDRNT = 0x0000007F
+};
+
+#define RFAA_shift 28
+#define RFADDR_shift 16
+
+enum sis900_receive_filter_control_register_bits {
+ RFEN = 0x80000000, RFAAB = 0x40000000, RFAAM = 0x20000000,
+ RFAAP = 0x10000000, RFPromiscuous = (RFAAB|RFAAM|RFAAP)
+};
+
+enum sis900_reveive_filter_data_mask {
+ RFDAT = 0x0000FFFF
+};
+
+/* EEPROM Addresses */
+enum sis900_eeprom_address {
+ EEPROMSignature = 0x00, EEPROMVendorID = 0x02, EEPROMDeviceID = 0x03,
+ EEPROMMACAddr = 0x08, EEPROMChecksum = 0x0b
+};
+
+/* The EEPROM commands include the alway-set leading bit. Refer to NM93Cxx datasheet */
+enum sis900_eeprom_command {
+ EEread = 0x0180, EEwrite = 0x0140, EEerase = 0x01C0,
+ EEwriteEnable = 0x0130, EEwriteDisable = 0x0100,
+ EEeraseAll = 0x0120, EEwriteAll = 0x0110,
+ EEaddrMask = 0x013F, EEcmdShift = 16
+};
+
+/* For SiS962, request the eeprom software access */
+enum sis962_eeprom_command {
+ EEREQ = 0x00000400, EEDONE = 0x00000200, EEGNT = 0x00000100
+};
+
+/* Manamgement Data I/O (mdio) frame */
+#define MIIread 0x6000
+#define MIIwrite 0x5002
+#define MIIpmdShift 7
+#define MIIregShift 2
+#define MIIcmdLen 16
+#define MIIcmdShift 16
+
+/* Buffer Descriptor Status*/
+enum sis900_buffer_status {
+ OWN = 0x80000000, MORE = 0x40000000, INTR = 0x20000000,
+ SUPCRC = 0x10000000, INCCRC = 0x10000000,
+ OK = 0x08000000, DSIZE = 0x00000FFF
+};
+/* Status for TX Buffers */
+enum sis900_tx_buffer_status {
+ ABORT = 0x04000000, UNDERRUN = 0x02000000, NOCARRIER = 0x01000000,
+ DEFERD = 0x00800000, EXCDEFER = 0x00400000, OWCOLL = 0x00200000,
+ EXCCOLL = 0x00100000, COLCNT = 0x000F0000
+};
+
+enum sis900_rx_bufer_status {
+ OVERRUN = 0x02000000, DEST = 0x00800000, BCAST = 0x01800000,
+ MCAST = 0x01000000, UNIMATCH = 0x00800000, TOOLONG = 0x00400000,
+ RUNT = 0x00200000, RXISERR = 0x00100000, CRCERR = 0x00080000,
+ FAERR = 0x00040000, LOOPBK = 0x00020000, RXCOL = 0x00010000
+};
+
+/* MII register offsets */
+enum mii_registers {
+ MII_CONTROL = 0x0000, MII_STATUS = 0x0001, MII_PHY_ID0 = 0x0002,
+ MII_PHY_ID1 = 0x0003, MII_ANADV = 0x0004, MII_ANLPAR = 0x0005,
+ MII_ANEXT = 0x0006
+};
+
+/* mii registers specific to SiS 900 */
+enum sis_mii_registers {
+ MII_CONFIG1 = 0x0010, MII_CONFIG2 = 0x0011, MII_STSOUT = 0x0012,
+ MII_MASK = 0x0013, MII_RESV = 0x0014
+};
+
+/* mii registers specific to ICS 1893 */
+enum ics_mii_registers {
+ MII_EXTCTRL = 0x0010, MII_QPDSTS = 0x0011, MII_10BTOP = 0x0012,
+ MII_EXTCTRL2 = 0x0013
+};
+
+/* mii registers specific to AMD 79C901 */
+enum amd_mii_registers {
+ MII_STATUS_SUMMARY = 0x0018
+};
+
+/* MII Control register bit definitions. */
+enum mii_control_register_bits {
+ MII_CNTL_FDX = 0x0100, MII_CNTL_RST_AUTO = 0x0200,
+ MII_CNTL_ISOLATE = 0x0400, MII_CNTL_PWRDWN = 0x0800,
+ MII_CNTL_AUTO = 0x1000, MII_CNTL_SPEED = 0x2000,
+ MII_CNTL_LPBK = 0x4000, MII_CNTL_RESET = 0x8000
+};
+
+/* MII Status register bit */
+enum mii_status_register_bits {
+ MII_STAT_EXT = 0x0001, MII_STAT_JAB = 0x0002,
+ MII_STAT_LINK = 0x0004, MII_STAT_CAN_AUTO = 0x0008,
+ MII_STAT_FAULT = 0x0010, MII_STAT_AUTO_DONE = 0x0020,
+ MII_STAT_CAN_T = 0x0800, MII_STAT_CAN_T_FDX = 0x1000,
+ MII_STAT_CAN_TX = 0x2000, MII_STAT_CAN_TX_FDX = 0x4000,
+ MII_STAT_CAN_T4 = 0x8000
+};
+
+#define MII_ID1_OUI_LO 0xFC00 /* low bits of OUI mask */
+#define MII_ID1_MODEL 0x03F0 /* model number */
+#define MII_ID1_REV 0x000F /* model number */
+
+/* MII NWAY Register Bits ...
+ valid for the ANAR (Auto-Negotiation Advertisement) and
+ ANLPAR (Auto-Negotiation Link Partner) registers */
+enum mii_nway_register_bits {
+ MII_NWAY_NODE_SEL = 0x001f, MII_NWAY_CSMA_CD = 0x0001,
+ MII_NWAY_T = 0x0020, MII_NWAY_T_FDX = 0x0040,
+ MII_NWAY_TX = 0x0080, MII_NWAY_TX_FDX = 0x0100,
+ MII_NWAY_T4 = 0x0200, MII_NWAY_PAUSE = 0x0400,
+ MII_NWAY_RF = 0x2000, MII_NWAY_ACK = 0x4000,
+ MII_NWAY_NP = 0x8000
+};
+
+enum mii_stsout_register_bits {
+ MII_STSOUT_LINK_FAIL = 0x4000,
+ MII_STSOUT_SPD = 0x0080, MII_STSOUT_DPLX = 0x0040
+};
+
+enum mii_stsics_register_bits {
+ MII_STSICS_SPD = 0x8000, MII_STSICS_DPLX = 0x4000,
+ MII_STSICS_LINKSTS = 0x0001
+};
+
+enum mii_stssum_register_bits {
+ MII_STSSUM_LINK = 0x0008, MII_STSSUM_DPLX = 0x0004,
+ MII_STSSUM_AUTO = 0x0002, MII_STSSUM_SPD = 0x0001
+};
+
+enum sis900_revision_id {
+ SIS630A_900_REV = 0x80, SIS630E_900_REV = 0x81,
+ SIS630S_900_REV = 0x82, SIS630EA1_900_REV = 0x83,
+ SIS630ET_900_REV = 0x84, SIS635A_900_REV = 0x90,
+ SIS962_900_REV = 0X91, SIS900B_900_REV = 0x03
+};
+
+enum sis630_revision_id {
+ SIS630A0 = 0x00, SIS630A1 = 0x01,
+ SIS630B0 = 0x10, SIS630B1 = 0x11
+};
+
+#define FDX_CAPABLE_DUPLEX_UNKNOWN 0
+#define FDX_CAPABLE_HALF_SELECTED 1
+#define FDX_CAPABLE_FULL_SELECTED 2
+
+#define HW_SPEED_UNCONFIG 0
+#define HW_SPEED_HOME 1
+#define HW_SPEED_10_MBPS 10
+#define HW_SPEED_100_MBPS 100
+#define HW_SPEED_DEFAULT (HW_SPEED_100_MBPS)
+
+#define CRC_SIZE 4
+#define MAC_HEADER_SIZE 14
+
+#define TX_BUF_SIZE 1536
+#define RX_BUF_SIZE 1536
+
+#define NUM_TX_DESC 16 /* Number of Tx descriptor registers. */
+#define NUM_RX_DESC 16 /* Number of Rx descriptor registers. */
+
+#define TRUE 1
+#define FALSE 0
+
+/* PCI stuff, should be move to pic.h */
+#define PCI_DEVICE_ID_SI_900 0x900
+#define PCI_DEVICE_ID_SI_7016 0x7016
+#define SIS630_VENDOR_ID 0x1039
+#define SIS630_DEVICE_ID 0x0630
+
+/* ioctl for accessing MII transveiver */
+#define SIOCGMIIPHY (SIOCDEVPRIVATE) /* Get the PHY in use. */
+#define SIOCGMIIREG (SIOCDEVPRIVATE+1) /* Read a PHY register. */
+#define SIOCSMIIREG (SIOCDEVPRIVATE+2) /* Write a PHY register */
diff --git a/linux/src/drivers/net/sk_g16.c b/linux/src/drivers/net/sk_g16.c
new file mode 100644
index 0000000..13ebb3e
--- /dev/null
+++ b/linux/src/drivers/net/sk_g16.c
@@ -0,0 +1,2110 @@
+/*-
+ * Copyright (C) 1994 by PJD Weichmann & SWS Bern, Switzerland
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU Public License, incorporated herein by reference.
+ *
+ * Module : sk_g16.c
+ *
+ * Version : $Revision: 1.1 $
+ *
+ * Author : Patrick J.D. Weichmann
+ *
+ * Date Created : 94/05/26
+ * Last Updated : $Date: 1999/04/26 05:52:37 $
+ *
+ * Description : Schneider & Koch G16 Ethernet Device Driver for
+ * Linux Kernel >= 1.1.22
+ * Update History :
+ *
+-*/
+
+static const char *rcsid = "$Id: sk_g16.c,v 1.1 1999/04/26 05:52:37 tb Exp $";
+
+/*
+ * The Schneider & Koch (SK) G16 Network device driver is based
+ * on the 'ni6510' driver from Michael Hipp which can be found at
+ * ftp://sunsite.unc.edu/pub/Linux/system/Network/drivers/nidrivers.tar.gz
+ *
+ * Sources: 1) ni6510.c by M. Hipp
+ * 2) depca.c by D.C. Davies
+ * 3) skeleton.c by D. Becker
+ * 4) Am7990 Local Area Network Controller for Ethernet (LANCE),
+ * AMD, Pub. #05698, June 1989
+ *
+ * Many Thanks for helping me to get things working to:
+ *
+ * A. Cox (A.Cox@swansea.ac.uk)
+ * M. Hipp (mhipp@student.uni-tuebingen.de)
+ * R. Bolz (Schneider & Koch, Germany)
+ *
+ * See README.sk_g16 for details about limitations and bugs for the
+ * current version.
+ *
+ * To Do:
+ * - Support of SK_G8 and other SK Network Cards.
+ * - Autoset memory mapped RAM. Check for free memory and then
+ * configure RAM correctly.
+ * - SK_close should really set card in to initial state.
+ * - Test if IRQ 3 is not switched off. Use autoirq() functionality.
+ * (as in /drivers/net/skeleton.c)
+ * - Implement Multicast addressing. At minimum something like
+ * in depca.c.
+ * - Redo the statistics part.
+ * - Try to find out if the board is in 8 Bit or 16 Bit slot.
+ * If in 8 Bit mode don't use IRQ 11.
+ * - (Try to make it slightly faster.)
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/fcntl.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/malloc.h>
+#include <linux/string.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/bitops.h>
+#include <linux/errno.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include "sk_g16.h"
+
+/*
+ * Schneider & Koch Card Definitions
+ * =================================
+ */
+
+#define SK_NAME "SK_G16"
+
+/*
+ * SK_G16 Configuration
+ * --------------------
+ */
+
+/*
+ * Abbreviations
+ * -------------
+ *
+ * RAM - used for the 16KB shared memory
+ * Boot_ROM, ROM - are used for referencing the BootEPROM
+ *
+ * SK_BOOT_ROM and SK_ADDR are symbolic constants used to configure
+ * the behaviour of the driver and the SK_G16.
+ *
+ * ! See sk_g16.install on how to install and configure the driver !
+ *
+ * SK_BOOT_ROM defines if the Boot_ROM should be switched off or not.
+ *
+ * SK_ADDR defines the address where the RAM will be mapped into the real
+ * host memory.
+ * valid addresses are from 0xa0000 to 0xfc000 in 16Kbyte steps.
+ */
+
+#define SK_BOOT_ROM 1 /* 1=BootROM on 0=off */
+
+#define SK_ADDR 0xcc000
+
+/*
+ * In POS3 are bits A14-A19 of the address bus. These bits can be set
+ * to choose the RAM address. That's why we only can choose the RAM address
+ * in 16KB steps.
+ */
+
+#define POS_ADDR (rom_addr>>14) /* Do not change this line */
+
+/*
+ * SK_G16 I/O PORT's + IRQ's + Boot_ROM locations
+ * ----------------------------------------------
+ */
+
+/*
+ * As nearly every card has also SK_G16 a specified I/O Port region and
+ * only a few possible IRQ's.
+ * In the Installation Guide from Schneider & Koch is listed a possible
+ * Interrupt IRQ2. IRQ2 is always IRQ9 in boards with two cascaded interrupt
+ * controllers. So we use in SK_IRQS IRQ9.
+ */
+
+/* Don't touch any of the following #defines. */
+
+#define SK_IO_PORTS { 0x100, 0x180, 0x208, 0x220, 0x288, 0x320, 0x328, 0x390, 0 }
+
+#define SK_IRQS { 3, 5, 9, 11, 0 }
+
+#define SK_BOOT_ROM_LOCATIONS { 0xc0000, 0xc4000, 0xc8000, 0xcc000, 0xd0000, 0xd4000, 0xd8000, 0xdc000, 0 }
+
+#define SK_BOOT_ROM_ID { 0x55, 0xaa, 0x10, 0x50, 0x06, 0x33 }
+
+/*
+ * SK_G16 POS REGISTERS
+ * --------------------
+ */
+
+/*
+ * SK_G16 has a Programmable Option Select (POS) Register.
+ * The POS is composed of 8 separate registers (POS0-7) which
+ * are I/O mapped on an address set by the W1 switch.
+ *
+ */
+
+#define SK_POS_SIZE 8 /* 8 I/O Ports are used by SK_G16 */
+
+#define SK_POS0 ioaddr /* Card-ID Low (R) */
+#define SK_POS1 ioaddr+1 /* Card-ID High (R) */
+#define SK_POS2 ioaddr+2 /* Card-Enable, Boot-ROM Disable (RW) */
+#define SK_POS3 ioaddr+3 /* Base address of RAM */
+#define SK_POS4 ioaddr+4 /* IRQ */
+
+/* POS5 - POS7 are unused */
+
+/*
+ * SK_G16 MAC PREFIX
+ * -----------------
+ */
+
+/*
+ * Scheider & Koch manufacturer code (00:00:a5).
+ * This must be checked, that we are sure it is a SK card.
+ */
+
+#define SK_MAC0 0x00
+#define SK_MAC1 0x00
+#define SK_MAC2 0x5a
+
+/*
+ * SK_G16 ID
+ * ---------
+ */
+
+/*
+ * If POS0,POS1 contain the following ID, then we know
+ * at which I/O Port Address we are.
+ */
+
+#define SK_IDLOW 0xfd
+#define SK_IDHIGH 0x6a
+
+
+/*
+ * LANCE POS Bit definitions
+ * -------------------------
+ */
+
+#define SK_ROM_RAM_ON (POS2_CARD)
+#define SK_ROM_RAM_OFF (POS2_EPROM)
+#define SK_ROM_ON (inb(SK_POS2) & POS2_CARD)
+#define SK_ROM_OFF (inb(SK_POS2) | POS2_EPROM)
+#define SK_RAM_ON (inb(SK_POS2) | POS2_CARD)
+#define SK_RAM_OFF (inb(SK_POS2) & POS2_EPROM)
+
+#define POS2_CARD 0x0001 /* 1 = SK_G16 on 0 = off */
+#define POS2_EPROM 0x0002 /* 1 = Boot EPROM off 0 = on */
+
+/*
+ * SK_G16 Memory mapped Registers
+ * ------------------------------
+ *
+ */
+
+#define SK_IOREG (board->ioreg) /* LANCE data registers. */
+#define SK_PORT (board->port) /* Control, Status register */
+#define SK_IOCOM (board->iocom) /* I/O Command */
+
+/*
+ * SK_G16 Status/Control Register bits
+ * -----------------------------------
+ *
+ * (C) Controlreg (S) Statusreg
+ */
+
+/*
+ * Register transfer: 0 = no transfer
+ * 1 = transferring data between LANCE and I/O reg
+ */
+#define SK_IORUN 0x20
+
+/*
+ * LANCE interrupt: 0 = LANCE interrupt occurred
+ * 1 = no LANCE interrupt occurred
+ */
+#define SK_IRQ 0x10
+
+#define SK_RESET 0x08 /* Reset SK_CARD: 0 = RESET 1 = normal */
+#define SK_RW 0x02 /* 0 = write to 1 = read from */
+#define SK_ADR 0x01 /* 0 = REG DataPort 1 = RAP Reg addr port */
+
+
+#define SK_RREG SK_RW /* Transferdirection to read from lance */
+#define SK_WREG 0 /* Transferdirection to write to lance */
+#define SK_RAP SK_ADR /* Destination Register RAP */
+#define SK_RDATA 0 /* Destination Register REG DataPort */
+
+/*
+ * SK_G16 I/O Command
+ * ------------------
+ */
+
+/*
+ * Any bitcombination sets the internal I/O bit (transfer will start)
+ * when written to I/O Command
+ */
+
+#define SK_DOIO 0x80 /* Do Transfer */
+
+/*
+ * LANCE RAP (Register Address Port).
+ * ---------------------------------
+ */
+
+/*
+ * The LANCE internal registers are selected through the RAP.
+ * The Registers are:
+ *
+ * CSR0 - Status and Control flags
+ * CSR1 - Low order bits of initialize block (bits 15:00)
+ * CSR2 - High order bits of initialize block (bits 07:00, 15:08 are reserved)
+ * CSR3 - Allows redefinition of the Bus Master Interface.
+ * This register must be set to 0x0002, which means BSWAP = 0,
+ * ACON = 1, BCON = 0;
+ *
+ */
+
+#define CSR0 0x00
+#define CSR1 0x01
+#define CSR2 0x02
+#define CSR3 0x03
+
+/*
+ * General Definitions
+ * ===================
+ */
+
+/*
+ * Set the number of Tx and Rx buffers, using Log_2(# buffers).
+ * We have 16KB RAM which can be accessed by the LANCE. In the
+ * memory are not only the buffers but also the ring descriptors and
+ * the initialize block.
+ * Don't change anything unless you really know what you do.
+ */
+
+#define LC_LOG_TX_BUFFERS 1 /* (2 == 2^^1) 2 Transmit buffers */
+#define LC_LOG_RX_BUFFERS 3 /* (8 == 2^^3) 8 Receive buffers */
+
+/* Descriptor ring sizes */
+
+#define TMDNUM (1 << (LC_LOG_TX_BUFFERS)) /* 2 Transmit descriptor rings */
+#define RMDNUM (1 << (LC_LOG_RX_BUFFERS)) /* 8 Receive Buffers */
+
+/* Define Mask for setting RMD, TMD length in the LANCE init_block */
+
+#define TMDNUMMASK (LC_LOG_TX_BUFFERS << 29)
+#define RMDNUMMASK (LC_LOG_RX_BUFFERS << 29)
+
+/*
+ * Data Buffer size is set to maximum packet length.
+ */
+
+#define PKT_BUF_SZ 1518
+
+/*
+ * The number of low I/O ports used by the ethercard.
+ */
+
+#define ETHERCARD_TOTAL_SIZE SK_POS_SIZE
+
+/*
+ * Portreserve is there to mark the Card I/O Port region as used.
+ * Check_region is to check if the region at ioaddr with the size "size"
+ * is free or not.
+ * Snarf_region allocates the I/O Port region.
+ */
+
+#ifndef HAVE_PORTRESERVE
+
+#define check_region(ioaddr, size) 0
+#define request_region(ioaddr, size,name) do ; while (0)
+
+#endif
+
+/*
+ * SK_DEBUG
+ *
+ * Here you can choose what level of debugging wanted.
+ *
+ * If SK_DEBUG and SK_DEBUG2 are undefined, then only the
+ * necessary messages will be printed.
+ *
+ * If SK_DEBUG is defined, there will be many debugging prints
+ * which can help to find some mistakes in configuration or even
+ * in the driver code.
+ *
+ * If SK_DEBUG2 is defined, many many messages will be printed
+ * which normally you don't need. I used this to check the interrupt
+ * routine.
+ *
+ * (If you define only SK_DEBUG2 then only the messages for
+ * checking interrupts will be printed!)
+ *
+ * Normal way of live is:
+ *
+ * For the whole thing get going let both symbolic constants
+ * undefined. If you face any problems and you know what's going
+ * on (you know something about the card and you can interpret some
+ * hex LANCE register output) then define SK_DEBUG
+ *
+ */
+
+#undef SK_DEBUG /* debugging */
+#undef SK_DEBUG2 /* debugging with more verbose report */
+
+#ifdef SK_DEBUG
+#define PRINTK(x) printk x
+#else
+#define PRINTK(x) /**/
+#endif
+
+#ifdef SK_DEBUG2
+#define PRINTK2(x) printk x
+#else
+#define PRINTK2(x) /**/
+#endif
+
+/*
+ * SK_G16 RAM
+ *
+ * The components are memory mapped and can be set in a region from
+ * 0x00000 through 0xfc000 in 16KB steps.
+ *
+ * The Network components are: dual ported RAM, Prom, I/O Reg, Status-,
+ * Controlregister and I/O Command.
+ *
+ * dual ported RAM: This is the only memory region which the LANCE chip
+ * has access to. From the Lance it is addressed from 0x0000 to
+ * 0x3fbf. The host accesses it normally.
+ *
+ * PROM: The PROM obtains the ETHERNET-MAC-Address. It is realised as a
+ * 8-Bit PROM, this means only the 16 even addresses are used of the
+ * 32 Byte Address region. Access to a odd address results in invalid
+ * data.
+ *
+ * LANCE I/O Reg: The I/O Reg is build of 4 single Registers, Low-Byte Write,
+ * Hi-Byte Write, Low-Byte Read, Hi-Byte Read.
+ * Transfer from or to the LANCE is always in 16Bit so Low and High
+ * registers are always relevant.
+ *
+ * The Data from the Readregister is not the data in the Writeregister!!
+ *
+ * Port: Status- and Controlregister.
+ * Two different registers which share the same address, Status is
+ * read-only, Control is write-only.
+ *
+ * I/O Command:
+ * Any bitcombination written in here starts the transmission between
+ * Host and LANCE.
+ */
+
+typedef struct
+{
+ unsigned char ram[0x3fc0]; /* 16KB dual ported ram */
+ unsigned char rom[0x0020]; /* 32Byte PROM containing 6Byte MAC */
+ unsigned char res1[0x0010]; /* reserved */
+ unsigned volatile short ioreg;/* LANCE I/O Register */
+ unsigned volatile char port; /* Statusregister and Controlregister */
+ unsigned char iocom; /* I/O Command Register */
+} SK_RAM;
+
+/* struct */
+
+/*
+ * This is the structure for the dual ported ram. We
+ * have exactly 16 320 Bytes. In here there must be:
+ *
+ * - Initialize Block (starting at a word boundary)
+ * - Receive and Transmit Descriptor Rings (quadword boundary)
+ * - Data Buffers (arbitrary boundary)
+ *
+ * This is because LANCE has on SK_G16 only access to the dual ported
+ * RAM and nowhere else.
+ */
+
+struct SK_ram
+{
+ struct init_block ib;
+ struct tmd tmde[TMDNUM];
+ struct rmd rmde[RMDNUM];
+ char tmdbuf[TMDNUM][PKT_BUF_SZ];
+ char rmdbuf[RMDNUM][PKT_BUF_SZ];
+};
+
+/*
+ * Structure where all necessary information is for ring buffer
+ * management and statistics.
+ */
+
+struct priv
+{
+ struct SK_ram *ram; /* dual ported ram structure */
+ struct rmd *rmdhead; /* start of receive ring descriptors */
+ struct tmd *tmdhead; /* start of transmit ring descriptors */
+ int rmdnum; /* actual used ring descriptor */
+ int tmdnum; /* actual transmit descriptor for transmitting data */
+ int tmdlast; /* last sent descriptor used for error handling, etc */
+ void *rmdbufs[RMDNUM]; /* pointer to the receive buffers */
+ void *tmdbufs[TMDNUM]; /* pointer to the transmit buffers */
+ struct enet_statistics stats; /* Device driver statistics */
+};
+
+/* global variable declaration */
+
+/* IRQ map used to reserve a IRQ (see SK_open()) */
+
+/* extern void *irq2dev_map[16]; */ /* Declared in <linux/ioport.h> */
+
+/* static variables */
+
+static SK_RAM *board; /* pointer to our memory mapped board components */
+
+/* Macros */
+
+
+/* Function Prototypes */
+
+/*
+ * Device Driver functions
+ * -----------------------
+ * See for short explanation of each function its definitions header.
+ */
+
+int SK_init(struct device *dev);
+static int SK_probe(struct device *dev, short ioaddr);
+
+static int SK_open(struct device *dev);
+static int SK_send_packet(struct sk_buff *skb, struct device *dev);
+static void SK_interrupt(int irq, void *dev_id, struct pt_regs * regs);
+static void SK_rxintr(struct device *dev);
+static void SK_txintr(struct device *dev);
+static int SK_close(struct device *dev);
+
+static struct enet_statistics *SK_get_stats(struct device *dev);
+
+unsigned int SK_rom_addr(void);
+
+static void set_multicast_list(struct device *dev);
+
+/*
+ * LANCE Functions
+ * ---------------
+ */
+
+static int SK_lance_init(struct device *dev, unsigned short mode);
+void SK_reset_board(void);
+void SK_set_RAP(int reg_number);
+int SK_read_reg(int reg_number);
+int SK_rread_reg(void);
+void SK_write_reg(int reg_number, int value);
+
+/*
+ * Debugging functions
+ * -------------------
+ */
+
+void SK_print_pos(struct device *dev, char *text);
+void SK_print_dev(struct device *dev, char *text);
+void SK_print_ram(struct device *dev);
+
+
+/*-
+ * Function : SK_init
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/26
+ *
+ * Description : Check for a SK_G16 network adaptor and initialize it.
+ * This function gets called by dev_init which initializes
+ * all Network devices.
+ *
+ * Parameters : I : struct device *dev - structure preconfigured
+ * from Space.c
+ * Return Value : 0 = Driver Found and initialized
+ * Errors : ENODEV - no device found
+ * ENXIO - not probed
+ * Globals : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+/*
+ * Check for a network adaptor of this type, and return '0' if one exists.
+ * If dev->base_addr == 0, probe all likely locations.
+ * If dev->base_addr == 1, always return failure.
+ * If dev->base_addr == 2, allocate space for the device and return success
+ * (detachable devices only).
+ */
+
+int SK_init(struct device *dev)
+{
+ int ioaddr = 0; /* I/O port address used for POS regs */
+ int *port, ports[] = SK_IO_PORTS; /* SK_G16 supported ports */
+
+ /* get preconfigured base_addr from dev which is done in Space.c */
+ int base_addr = dev->base_addr;
+
+ PRINTK(("%s: %s", SK_NAME, rcsid));
+ rcsid = NULL; /* We do not want to use this further */
+
+ if (base_addr > 0x0ff) /* Check a single specified address */
+ {
+ /* Check if on specified address is a SK_G16 */
+
+ if ( (inb(SK_POS0) == SK_IDLOW) ||
+ (inb(SK_POS1) == SK_IDHIGH) )
+ {
+ return SK_probe(dev, base_addr);
+ }
+
+ return ENODEV; /* Sorry, but on specified address NO SK_G16 */
+ }
+ else if (base_addr > 0) /* Don't probe at all */
+ {
+ return ENXIO;
+ }
+
+ /* Autoprobe base_addr */
+
+ for (port = &ports[0]; *port; port++)
+ {
+ ioaddr = *port; /* we need ioaddr for accessing POS regs */
+
+ /* Check if I/O Port region is used by another board */
+
+ if (check_region(ioaddr, ETHERCARD_TOTAL_SIZE))
+ {
+ continue; /* Try next Port address */
+ }
+
+ /* Check if at ioaddr is a SK_G16 */
+
+ if ( !(inb(SK_POS0) == SK_IDLOW) ||
+ !(inb(SK_POS1) == SK_IDHIGH) )
+ {
+ continue; /* Try next Port address */
+ }
+
+ dev->base_addr = ioaddr; /* Set I/O Port Address */
+
+ if (SK_probe(dev, ioaddr) == 0)
+ {
+ return 0; /* Card found and initialized */
+ }
+ }
+
+ dev->base_addr = base_addr; /* Write back original base_addr */
+
+ return ENODEV; /* Failed to find or init driver */
+
+} /* End of SK_init */
+
+
+/*-
+ * Function : SK_probe
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/26
+ *
+ * Description : This function is called by SK_init and
+ * does the main part of initialization.
+ *
+ * Parameters : I : struct device *dev - SK_G16 device structure
+ * I : short ioaddr - I/O Port address where POS is.
+ * Return Value : 0 = Initialization done
+ * Errors : ENODEV - No SK_G16 found
+ * -1 - Configuration problem
+ * Globals : irq2dev_map - Which device uses which IRQ
+ * : board - pointer to SK_RAM
+ * Update History :
+ * YY/MM/DD uid Description
+ * 94/06/30 pwe SK_ADDR now checked and at the correct place
+-*/
+
+int SK_probe(struct device *dev, short ioaddr)
+{
+ int i,j; /* Counters */
+ int sk_addr_flag = 0; /* SK ADDR correct? 1 - no, 0 - yes */
+ unsigned int rom_addr; /* used to store RAM address used for POS_ADDR */
+
+ struct priv *p; /* SK_G16 private structure */
+
+ if (SK_ADDR & 0x3fff || SK_ADDR < 0xa0000)
+ {
+
+ sk_addr_flag = 1;
+
+ /*
+ * Now here we could use a routine which searches for a free
+ * place in the ram and set SK_ADDR if found. TODO.
+ */
+ }
+
+ if (SK_BOOT_ROM) /* Shall we keep Boot_ROM on ? */
+ {
+ PRINTK(("## %s: SK_BOOT_ROM is set.\n", SK_NAME));
+
+ rom_addr = SK_rom_addr();
+
+ if (rom_addr == 0) /* No Boot_ROM found */
+ {
+ if (sk_addr_flag) /* No or Invalid SK_ADDR is defined */
+ {
+ printk("%s: SK_ADDR %#08x is not valid. Check configuration.\n",
+ dev->name, SK_ADDR);
+ return -1;
+ }
+
+ rom_addr = SK_ADDR; /* assign predefined address */
+
+ PRINTK(("## %s: NO Bootrom found \n", SK_NAME));
+
+ outb(SK_ROM_RAM_OFF, SK_POS2); /* Boot_ROM + RAM off */
+ outb(POS_ADDR, SK_POS3); /* Set RAM address */
+ outb(SK_RAM_ON, SK_POS2); /* enable RAM */
+ }
+ else if (rom_addr == SK_ADDR)
+ {
+ printk("%s: RAM + ROM are set to the same address %#08x\n"
+ " Check configuration. Now switching off Boot_ROM\n",
+ SK_NAME, rom_addr);
+
+ outb(SK_ROM_RAM_OFF, SK_POS2); /* Boot_ROM + RAM off*/
+ outb(POS_ADDR, SK_POS3); /* Set RAM address */
+ outb(SK_RAM_ON, SK_POS2); /* enable RAM */
+ }
+ else
+ {
+ PRINTK(("## %s: Found ROM at %#08x\n", SK_NAME, rom_addr));
+ PRINTK(("## %s: Keeping Boot_ROM on\n", SK_NAME));
+
+ if (sk_addr_flag) /* No or Invalid SK_ADDR is defined */
+ {
+ printk("%s: SK_ADDR %#08x is not valid. Check configuration.\n",
+ dev->name, SK_ADDR);
+ return -1;
+ }
+
+ rom_addr = SK_ADDR;
+
+ outb(SK_ROM_RAM_OFF, SK_POS2); /* Boot_ROM + RAM off */
+ outb(POS_ADDR, SK_POS3); /* Set RAM address */
+ outb(SK_ROM_RAM_ON, SK_POS2); /* RAM on, BOOT_ROM on */
+ }
+ }
+ else /* Don't keep Boot_ROM */
+ {
+ PRINTK(("## %s: SK_BOOT_ROM is not set.\n", SK_NAME));
+
+ if (sk_addr_flag) /* No or Invalid SK_ADDR is defined */
+ {
+ printk("%s: SK_ADDR %#08x is not valid. Check configuration.\n",
+ dev->name, SK_ADDR);
+ return -1;
+ }
+
+ rom_addr = SK_rom_addr(); /* Try to find a Boot_ROM */
+
+ /* IF we find a Boot_ROM disable it */
+
+ outb(SK_ROM_RAM_OFF, SK_POS2); /* Boot_ROM + RAM off */
+
+ /* We found a Boot_ROM and it's gone. Set RAM address on
+ * Boot_ROM address.
+ */
+
+ if (rom_addr)
+ {
+ printk("%s: We found Boot_ROM at %#08x. Now setting RAM on"
+ "that address\n", SK_NAME, rom_addr);
+
+ outb(POS_ADDR, SK_POS3); /* Set RAM on Boot_ROM address */
+ }
+ else /* We did not find a Boot_ROM, use predefined SK_ADDR for ram */
+ {
+ if (sk_addr_flag) /* No or Invalid SK_ADDR is defined */
+ {
+ printk("%s: SK_ADDR %#08x is not valid. Check configuration.\n",
+ dev->name, SK_ADDR);
+ return -1;
+ }
+
+ rom_addr = SK_ADDR;
+
+ outb(POS_ADDR, SK_POS3); /* Set RAM address */
+ }
+ outb(SK_RAM_ON, SK_POS2); /* enable RAM */
+ }
+
+#ifdef SK_DEBUG
+ SK_print_pos(dev, "POS registers after ROM, RAM config");
+#endif
+
+ board = (SK_RAM *) rom_addr;
+
+ /* Read in station address */
+ for (i = 0, j = 0; i < ETH_ALEN; i++, j+=2)
+ {
+ dev->dev_addr[i] = board->rom[j];
+ }
+
+ /* Check for manufacturer code */
+ if (!(dev->dev_addr[0] == SK_MAC0 &&
+ dev->dev_addr[1] == SK_MAC1 &&
+ dev->dev_addr[2] == SK_MAC2) )
+ {
+ PRINTK(("## %s: We did not find SK_G16 at RAM location.\n",
+ SK_NAME));
+ return ENODEV; /* NO SK_G16 found */
+ }
+
+ printk("%s: %s found at %#3x, HW addr: %#04x:%02x:%02x:%02x:%02x:%02x\n",
+ dev->name,
+ "Schneider & Koch Netcard",
+ (unsigned int) dev->base_addr,
+ dev->dev_addr[0],
+ dev->dev_addr[1],
+ dev->dev_addr[2],
+ dev->dev_addr[3],
+ dev->dev_addr[4],
+ dev->dev_addr[5]);
+
+ /* Allocate memory for private structure */
+ p = dev->priv = (void *) kmalloc(sizeof(struct priv), GFP_KERNEL);
+ if (p == NULL) {
+ printk("%s: ERROR - no memory for driver data!\n", dev->name);
+ return -ENOMEM;
+ }
+ memset((char *) dev->priv, 0, sizeof(struct priv)); /* clear memory */
+
+ /* Grab the I/O Port region */
+ request_region(ioaddr, ETHERCARD_TOTAL_SIZE,"sk_g16");
+
+ /* Assign our Device Driver functions */
+
+ dev->open = &SK_open;
+ dev->stop = &SK_close;
+ dev->hard_start_xmit = &SK_send_packet;
+ dev->get_stats = &SK_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+
+ /* Set the generic fields of the device structure */
+
+ ether_setup(dev);
+
+ dev->flags &= ~IFF_MULTICAST;
+
+ /* Initialize private structure */
+
+ p->ram = (struct SK_ram *) rom_addr; /* Set dual ported RAM addr */
+ p->tmdhead = &(p->ram)->tmde[0]; /* Set TMD head */
+ p->rmdhead = &(p->ram)->rmde[0]; /* Set RMD head */
+
+ /* Initialize buffer pointers */
+
+ for (i = 0; i < TMDNUM; i++)
+ {
+ p->tmdbufs[i] = &(p->ram)->tmdbuf[i];
+ }
+
+ for (i = 0; i < RMDNUM; i++)
+ {
+ p->rmdbufs[i] = &(p->ram)->rmdbuf[i];
+ }
+
+#ifdef SK_DEBUG
+ SK_print_pos(dev, "End of SK_probe");
+ SK_print_ram(dev);
+#endif
+
+ return 0; /* Initialization done */
+
+} /* End of SK_probe() */
+
+
+/*-
+ * Function : SK_open
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/26
+ *
+ * Description : This function is called sometimes after booting
+ * when ifconfig program is run.
+ *
+ * This function requests an IRQ, sets the correct
+ * IRQ in the card. Then calls SK_lance_init() to
+ * init and start the LANCE chip. Then if everything is
+ * ok returns with 0 (OK), which means SK_G16 is now
+ * opened and operational.
+ *
+ * (Called by dev_open() /net/inet/dev.c)
+ *
+ * Parameters : I : struct device *dev - SK_G16 device structure
+ * Return Value : 0 - Device opened
+ * Errors : -EAGAIN - Open failed
+ * Globals : irq2dev_map - which device uses which irq
+ * Side Effects : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+static int SK_open(struct device *dev)
+{
+ int i = 0;
+ int irqval = 0;
+ int ioaddr = dev->base_addr;
+
+ int irqtab[] = SK_IRQS;
+
+ struct priv *p = (struct priv *)dev->priv;
+
+ PRINTK(("## %s: At beginning of SK_open(). CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ if (dev->irq == 0) /* Autoirq */
+ {
+ i = 0;
+
+ /*
+ * Check if one IRQ out of SK_IRQS is free and install
+ * interrupt handler.
+ * Most done by request_irq().
+ * irqval: 0 - interrupt handler installed for IRQ irqtab[i]
+ * -EBUSY - interrupt busy
+ * -EINVAL - irq > 15 or handler = NULL
+ */
+
+ do
+ {
+ irqval = request_irq(irqtab[i], &SK_interrupt, 0, "sk_g16", NULL);
+ i++;
+ } while (irqval && irqtab[i]);
+
+ if (irqval) /* We tried every possible IRQ but no success */
+ {
+ printk("%s: unable to get an IRQ\n", dev->name);
+ return -EAGAIN;
+ }
+
+ dev->irq = irqtab[--i];
+
+ outb(i<<2, SK_POS4); /* Set Card on probed IRQ */
+
+ }
+ else if (dev->irq == 2) /* IRQ2 is always IRQ9 */
+ {
+ if (request_irq(9, &SK_interrupt, 0, "sk_g16", NULL))
+ {
+ printk("%s: unable to get IRQ 9\n", dev->name);
+ return -EAGAIN;
+ }
+ dev->irq = 9;
+
+ /*
+ * Now we set card on IRQ2.
+ * This can be confusing, but remember that IRQ2 on the network
+ * card is in reality IRQ9
+ */
+ outb(0x08, SK_POS4); /* set card to IRQ2 */
+
+ }
+ else /* Check IRQ as defined in Space.c */
+ {
+ int i = 0;
+
+ /* check if IRQ free and valid. Then install Interrupt handler */
+
+ if (request_irq(dev->irq, &SK_interrupt, 0, "sk_g16", NULL))
+ {
+ printk("%s: unable to get selected IRQ\n", dev->name);
+ return -EAGAIN;
+ }
+
+ switch(dev->irq)
+ {
+ case 3: i = 0;
+ break;
+ case 5: i = 1;
+ break;
+ case 2: i = 2;
+ break;
+ case 11:i = 3;
+ break;
+ default:
+ printk("%s: Preselected IRQ %d is invalid for %s boards",
+ dev->name,
+ dev->irq,
+ SK_NAME);
+ return -EAGAIN;
+ }
+
+ outb(i<<2, SK_POS4); /* Set IRQ on card */
+ }
+
+ irq2dev_map[dev->irq] = dev; /* Set IRQ as used by us */
+
+ printk("%s: Schneider & Koch G16 at %#3x, IRQ %d, shared mem at %#08x\n",
+ dev->name, (unsigned int)dev->base_addr,
+ (int) dev->irq, (unsigned int) p->ram);
+
+ if (!(i = SK_lance_init(dev, 0))) /* LANCE init OK? */
+ {
+
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+#ifdef SK_DEBUG
+
+ /*
+ * This debug block tries to stop LANCE,
+ * reinit LANCE with transmitter and receiver disabled,
+ * then stop again and reinit with NORMAL_MODE
+ */
+
+ printk("## %s: After lance init. CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0));
+ SK_write_reg(CSR0, CSR0_STOP);
+ printk("## %s: LANCE stopped. CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0));
+ SK_lance_init(dev, MODE_DTX | MODE_DRX);
+ printk("## %s: Reinit with DTX + DRX off. CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0));
+ SK_write_reg(CSR0, CSR0_STOP);
+ printk("## %s: LANCE stopped. CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0));
+ SK_lance_init(dev, MODE_NORMAL);
+ printk("## %s: LANCE back to normal mode. CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0));
+ SK_print_pos(dev, "POS regs before returning OK");
+
+#endif /* SK_DEBUG */
+
+ return 0; /* SK_open() is successful */
+ }
+ else /* LANCE init failed */
+ {
+
+ PRINTK(("## %s: LANCE init failed: CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ dev->start = 0; /* Device not ready */
+ return -EAGAIN;
+ }
+
+} /* End of SK_open() */
+
+
+/*-
+ * Function : SK_lance_init
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/26
+ *
+ * Description : Reset LANCE chip, fill RMD, TMD structures with
+ * start values and Start LANCE.
+ *
+ * Parameters : I : struct device *dev - SK_G16 device structure
+ * I : int mode - put LANCE into "mode" see data-sheet for
+ * more info.
+ * Return Value : 0 - Init done
+ * Errors : -1 - Init failed
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+static int SK_lance_init(struct device *dev, unsigned short mode)
+{
+ int i;
+ struct priv *p = (struct priv *) dev->priv;
+ struct tmd *tmdp;
+ struct rmd *rmdp;
+
+ PRINTK(("## %s: At beginning of LANCE init. CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ /* Reset LANCE */
+ SK_reset_board();
+
+ /* Initialize TMD's with start values */
+ p->tmdnum = 0; /* First descriptor for transmitting */
+ p->tmdlast = 0; /* First descriptor for reading stats */
+
+ for (i = 0; i < TMDNUM; i++) /* Init all TMD's */
+ {
+ tmdp = p->tmdhead + i;
+
+ tmdp->u.buffer = (unsigned long) p->tmdbufs[i]; /* assign buffer */
+
+ /* Mark TMD as start and end of packet */
+ tmdp->u.s.status = TX_STP | TX_ENP;
+ }
+
+
+ /* Initialize RMD's with start values */
+
+ p->rmdnum = 0; /* First RMD which will be used */
+
+ for (i = 0; i < RMDNUM; i++) /* Init all RMD's */
+ {
+ rmdp = p->rmdhead + i;
+
+
+ rmdp->u.buffer = (unsigned long) p->rmdbufs[i]; /* assign buffer */
+
+ /*
+ * LANCE must be owner at beginning so that he can fill in
+ * receiving packets, set status and release RMD
+ */
+
+ rmdp->u.s.status = RX_OWN;
+
+ rmdp->blen = -PKT_BUF_SZ; /* Buffer Size in a two's complement */
+
+ rmdp->mlen = 0; /* init message length */
+
+ }
+
+ /* Fill LANCE Initialize Block */
+
+ (p->ram)->ib.mode = mode; /* Set operation mode */
+
+ for (i = 0; i < ETH_ALEN; i++) /* Set physical address */
+ {
+ (p->ram)->ib.paddr[i] = dev->dev_addr[i];
+ }
+
+ for (i = 0; i < 8; i++) /* Set multicast, logical address */
+ {
+ (p->ram)->ib.laddr[i] = 0; /* We do not use logical addressing */
+ }
+
+ /* Set ring descriptor pointers and set number of descriptors */
+
+ (p->ram)->ib.rdrp = (int) p->rmdhead | RMDNUMMASK;
+ (p->ram)->ib.tdrp = (int) p->tmdhead | TMDNUMMASK;
+
+ /* Prepare LANCE Control and Status Registers */
+
+ cli();
+
+ SK_write_reg(CSR3, CSR3_ACON); /* Ale Control !!!THIS MUST BE SET!!!! */
+
+ /*
+ * LANCE addresses the RAM from 0x0000 to 0x3fbf and has no access to
+ * PC Memory locations.
+ *
+ * In structure SK_ram is defined that the first thing in ram
+ * is the initialization block. So his address is for LANCE always
+ * 0x0000
+ *
+ * CSR1 contains low order bits 15:0 of initialization block address
+ * CSR2 is built of:
+ * 7:0 High order bits 23:16 of initialization block address
+ * 15:8 reserved, must be 0
+ */
+
+ /* Set initialization block address (must be on word boundary) */
+ SK_write_reg(CSR1, 0); /* Set low order bits 15:0 */
+ SK_write_reg(CSR2, 0); /* Set high order bits 23:16 */
+
+
+ PRINTK(("## %s: After setting CSR1-3. CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ /* Initialize LANCE */
+
+ /*
+ * INIT = Initialize, when set, causes the LANCE to begin the
+ * initialization procedure and access the Init Block.
+ */
+
+ SK_write_reg(CSR0, CSR0_INIT);
+
+ sti();
+
+ /* Wait until LANCE finished initialization */
+
+ SK_set_RAP(CSR0); /* Register Address Pointer to CSR0 */
+
+ for (i = 0; (i < 100) && !(SK_rread_reg() & CSR0_IDON); i++)
+ ; /* Wait until init done or go ahead if problems (i>=100) */
+
+ if (i >= 100) /* Something is wrong ! */
+ {
+ printk("%s: can't init am7990, status: %04x "
+ "init_block: %#08x\n",
+ dev->name, (int) SK_read_reg(CSR0),
+ (unsigned int) &(p->ram)->ib);
+
+#ifdef SK_DEBUG
+ SK_print_pos(dev, "LANCE INIT failed");
+ SK_print_dev(dev,"Device Structure:");
+#endif
+
+ return -1; /* LANCE init failed */
+ }
+
+ PRINTK(("## %s: init done after %d ticks\n", SK_NAME, i));
+
+ /* Clear Initialize done, enable Interrupts, start LANCE */
+
+ SK_write_reg(CSR0, CSR0_IDON | CSR0_INEA | CSR0_STRT);
+
+ PRINTK(("## %s: LANCE started. CSR0: %#06x\n", SK_NAME,
+ SK_read_reg(CSR0)));
+
+ return 0; /* LANCE is up and running */
+
+} /* End of SK_lance_init() */
+
+
+
+/*-
+ * Function : SK_send_packet
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/27
+ *
+ * Description : Writes an socket buffer into a transmit descriptor
+ * and starts transmission.
+ *
+ * Parameters : I : struct sk_buff *skb - packet to transfer
+ * I : struct device *dev - SK_G16 device structure
+ * Return Value : 0 - OK
+ * 1 - Could not transmit (dev_queue_xmit will queue it)
+ * and try to sent it later
+ * Globals : None
+ * Side Effects : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+static int SK_send_packet(struct sk_buff *skb, struct device *dev)
+{
+ struct priv *p = (struct priv *) dev->priv;
+ struct tmd *tmdp;
+
+ if (dev->tbusy)
+ {
+ /* if Transmitter more than 150ms busy -> time_out */
+
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 15)
+ {
+ return 1; /* We have to try transmit later */
+ }
+
+ printk("%s: xmitter timed out, try to restart!\n", dev->name);
+
+ SK_lance_init(dev, MODE_NORMAL); /* Reinit LANCE */
+
+ dev->tbusy = 0; /* Clear Transmitter flag */
+
+ dev->trans_start = jiffies; /* Mark Start of transmission */
+
+ }
+
+ /*
+ * If some upper Layer thinks we missed a transmit done interrupt
+ * we are passed NULL.
+ * (dev_queue_xmit net/inet/dev.c
+ */
+
+ if (skb == NULL)
+ {
+ /*
+ * Dequeue packets from transmit queue and send them.
+ */
+ dev_tint(dev);
+
+ return 0;
+ }
+
+ PRINTK2(("## %s: SK_send_packet() called, CSR0 %#04x.\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+
+ /*
+ * Block a timer-based transmit from overlapping.
+ * This means check if we are already in.
+ */
+
+ if (set_bit(0, (void *) &dev->tbusy) != 0) /* dev->tbusy already set ? */
+ {
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ }
+ else
+ {
+ /* Evaluate Packet length */
+ short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+
+ tmdp = p->tmdhead + p->tmdnum; /* Which descriptor for transmitting */
+
+ /* Fill in Transmit Message Descriptor */
+
+ /* Copy data into dual ported ram */
+
+ memcpy((char *) (tmdp->u.buffer & 0x00ffffff), (char *)skb->data,
+ skb->len);
+
+ tmdp->blen = -len; /* set length to transmit */
+
+ /*
+ * Packet start and end is always set because we use the maximum
+ * packet length as buffer length.
+ * Relinquish ownership to LANCE
+ */
+
+ tmdp->u.s.status = TX_OWN | TX_STP | TX_ENP;
+
+ /* Start Demand Transmission */
+ SK_write_reg(CSR0, CSR0_TDMD | CSR0_INEA);
+
+ dev->trans_start = jiffies; /* Mark start of transmission */
+
+ /* Set pointer to next transmit buffer */
+ p->tmdnum++;
+ p->tmdnum &= TMDNUM-1;
+
+ /* Do we own the next transmit buffer ? */
+ if (! ((p->tmdhead + p->tmdnum)->u.s.status & TX_OWN) )
+ {
+ /*
+ * We own next buffer and are ready to transmit, so
+ * clear busy flag
+ */
+ dev->tbusy = 0;
+ }
+ }
+ dev_kfree_skb(skb, FREE_WRITE);
+ return 0;
+} /* End of SK_send_packet */
+
+
+/*-
+ * Function : SK_interrupt
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/27
+ *
+ * Description : SK_G16 interrupt handler which checks for LANCE
+ * Errors, handles transmit and receive interrupts
+ *
+ * Parameters : I : int irq, void *dev_id, struct pt_regs * regs -
+ * Return Value : None
+ * Errors : None
+ * Globals : None
+ * Side Effects : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+static void SK_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ int csr0;
+ struct device *dev = (struct device *) irq2dev_map[irq];
+ struct priv *p = (struct priv *) dev->priv;
+
+
+ PRINTK2(("## %s: SK_interrupt(). status: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ if (dev == NULL)
+ {
+ printk("SK_interrupt(): IRQ %d for unknown device.\n", irq);
+ }
+
+
+ if (dev->interrupt)
+ {
+ printk("%s: Re-entering the interrupt handler.\n", dev->name);
+ }
+
+ csr0 = SK_read_reg(CSR0); /* store register for checking */
+
+ dev->interrupt = 1; /* We are handling an interrupt */
+
+ /*
+ * Acknowledge all of the current interrupt sources, disable
+ * Interrupts (INEA = 0)
+ */
+
+ SK_write_reg(CSR0, csr0 & CSR0_CLRALL);
+
+ if (csr0 & CSR0_ERR) /* LANCE Error */
+ {
+ printk("%s: error: %04x\n", dev->name, csr0);
+
+ if (csr0 & CSR0_MISS) /* No place to store packet ? */
+ {
+ p->stats.rx_dropped++;
+ }
+ }
+
+ if (csr0 & CSR0_RINT) /* Receive Interrupt (packet arrived) */
+ {
+ SK_rxintr(dev);
+ }
+
+ if (csr0 & CSR0_TINT) /* Transmit interrupt (packet sent) */
+ {
+ SK_txintr(dev);
+ }
+
+ SK_write_reg(CSR0, CSR0_INEA); /* Enable Interrupts */
+
+ dev->interrupt = 0; /* We are out */
+} /* End of SK_interrupt() */
+
+
+/*-
+ * Function : SK_txintr
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/27
+ *
+ * Description : After sending a packet we check status, update
+ * statistics and relinquish ownership of transmit
+ * descriptor ring.
+ *
+ * Parameters : I : struct device *dev - SK_G16 device structure
+ * Return Value : None
+ * Errors : None
+ * Globals : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+static void SK_txintr(struct device *dev)
+{
+ int tmdstat;
+ struct tmd *tmdp;
+ struct priv *p = (struct priv *) dev->priv;
+
+
+ PRINTK2(("## %s: SK_txintr() status: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ tmdp = p->tmdhead + p->tmdlast; /* Which buffer we sent at last ? */
+
+ /* Set next buffer */
+ p->tmdlast++;
+ p->tmdlast &= TMDNUM-1;
+
+ tmdstat = tmdp->u.s.status & 0xff00; /* filter out status bits 15:08 */
+
+ /*
+ * We check status of transmitted packet.
+ * see LANCE data-sheet for error explanation
+ */
+ if (tmdstat & TX_ERR) /* Error occurred */
+ {
+ printk("%s: TX error: %04x %04x\n", dev->name, (int) tmdstat,
+ (int) tmdp->status2);
+
+ if (tmdp->status2 & TX_TDR) /* TDR problems? */
+ {
+ printk("%s: tdr-problems \n", dev->name);
+ }
+
+ if (tmdp->status2 & TX_RTRY) /* Failed in 16 attempts to transmit ? */
+ p->stats.tx_aborted_errors++;
+ if (tmdp->status2 & TX_LCOL) /* Late collision ? */
+ p->stats.tx_window_errors++;
+ if (tmdp->status2 & TX_LCAR) /* Loss of Carrier ? */
+ p->stats.tx_carrier_errors++;
+ if (tmdp->status2 & TX_UFLO) /* Underflow error ? */
+ {
+ p->stats.tx_fifo_errors++;
+
+ /*
+ * If UFLO error occurs it will turn transmitter of.
+ * So we must reinit LANCE
+ */
+
+ SK_lance_init(dev, MODE_NORMAL);
+ }
+
+ p->stats.tx_errors++;
+
+ tmdp->status2 = 0; /* Clear error flags */
+ }
+ else if (tmdstat & TX_MORE) /* Collisions occurred ? */
+ {
+ /*
+ * Here I have a problem.
+ * I only know that there must be one or up to 15 collisions.
+ * That's why TX_MORE is set, because after 16 attempts TX_RTRY
+ * will be set which means couldn't send packet aborted transfer.
+ *
+ * First I did not have this in but then I thought at minimum
+ * we see that something was not ok.
+ * If anyone knows something better than this to handle this
+ * please report it. (see Email addresses in the README file)
+ */
+
+ p->stats.collisions++;
+ }
+ else /* Packet sent without any problems */
+ {
+ p->stats.tx_packets++;
+ }
+
+ /*
+ * We mark transmitter not busy anymore, because now we have a free
+ * transmit descriptor which can be filled by SK_send_packet and
+ * afterwards sent by the LANCE
+ */
+
+ dev->tbusy = 0;
+
+ /*
+ * mark_bh(NET_BH);
+ * This will cause net_bh() to run after this interrupt handler.
+ *
+ * The function which do handle slow IRQ parts is do_bottom_half()
+ * which runs at normal kernel priority, that means all interrupt are
+ * enabled. (see kernel/irq.c)
+ *
+ * net_bh does something like this:
+ * - check if already in net_bh
+ * - try to transmit something from the send queue
+ * - if something is in the receive queue send it up to higher
+ * levels if it is a known protocol
+ * - try to transmit something from the send queue
+ */
+
+ mark_bh(NET_BH);
+
+} /* End of SK_txintr() */
+
+
+/*-
+ * Function : SK_rxintr
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/27
+ *
+ * Description : Buffer sent, check for errors, relinquish ownership
+ * of the receive message descriptor.
+ *
+ * Parameters : I : SK_G16 device structure
+ * Return Value : None
+ * Globals : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+static void SK_rxintr(struct device *dev)
+{
+
+ struct rmd *rmdp;
+ int rmdstat;
+ struct priv *p = (struct priv *) dev->priv;
+
+ PRINTK2(("## %s: SK_rxintr(). CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ rmdp = p->rmdhead + p->rmdnum;
+
+ /* As long as we own the next entry, check status and send
+ * it up to higher layer
+ */
+
+ while (!( (rmdstat = rmdp->u.s.status) & RX_OWN))
+ {
+ /*
+ * Start and end of packet must be set, because we use
+ * the ethernet maximum packet length (1518) as buffer size.
+ *
+ * Because our buffers are at maximum OFLO and BUFF errors are
+ * not to be concerned (see Data sheet)
+ */
+
+ if ((rmdstat & (RX_STP | RX_ENP)) != (RX_STP | RX_ENP))
+ {
+ /* Start of a frame > 1518 Bytes ? */
+
+ if (rmdstat & RX_STP)
+ {
+ p->stats.rx_errors++; /* bad packet received */
+ p->stats.rx_length_errors++; /* packet too long */
+
+ printk("%s: packet too long\n", dev->name);
+ }
+
+ /*
+ * All other packets will be ignored until a new frame with
+ * start (RX_STP) set follows.
+ *
+ * What we do is just give descriptor free for new incoming
+ * packets.
+ */
+
+ rmdp->u.s.status = RX_OWN; /* Relinquish ownership to LANCE */
+
+ }
+ else if (rmdstat & RX_ERR) /* Receive Error ? */
+ {
+ printk("%s: RX error: %04x\n", dev->name, (int) rmdstat);
+
+ p->stats.rx_errors++;
+
+ if (rmdstat & RX_FRAM) p->stats.rx_frame_errors++;
+ if (rmdstat & RX_CRC) p->stats.rx_crc_errors++;
+
+ rmdp->u.s.status = RX_OWN; /* Relinquish ownership to LANCE */
+
+ }
+ else /* We have a packet which can be queued for the upper layers */
+ {
+
+ int len = (rmdp->mlen & 0x0fff); /* extract message length from receive buffer */
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(len+2); /* allocate socket buffer */
+
+ if (skb == NULL) /* Could not get mem ? */
+ {
+
+ /*
+ * Couldn't allocate sk_buffer so we give descriptor back
+ * to Lance, update statistics and go ahead.
+ */
+
+ rmdp->u.s.status = RX_OWN; /* Relinquish ownership to LANCE */
+ printk("%s: Couldn't allocate sk_buff, deferring packet.\n",
+ dev->name);
+ p->stats.rx_dropped++;
+
+ break; /* Jump out */
+ }
+
+ /* Prepare sk_buff to queue for upper layers */
+
+ skb->dev = dev;
+ skb_reserve(skb,2); /* Align IP header on 16 byte boundary */
+
+ /*
+ * Copy data out of our receive descriptor into sk_buff.
+ *
+ * (rmdp->u.buffer & 0x00ffffff) -> get address of buffer and
+ * ignore status fields)
+ */
+
+ memcpy(skb_put(skb,len), (unsigned char *) (rmdp->u.buffer & 0x00ffffff),
+ len);
+
+
+ /*
+ * Notify the upper protocol layers that there is another packet
+ * to handle
+ *
+ * netif_rx() always succeeds. see /net/inet/dev.c for more.
+ */
+
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb); /* queue packet and mark it for processing */
+
+ /*
+ * Packet is queued and marked for processing so we
+ * free our descriptor and update statistics
+ */
+
+ rmdp->u.s.status = RX_OWN;
+ p->stats.rx_packets++;
+
+
+ p->rmdnum++;
+ p->rmdnum %= RMDNUM;
+
+ rmdp = p->rmdhead + p->rmdnum;
+ }
+ }
+} /* End of SK_rxintr() */
+
+
+/*-
+ * Function : SK_close
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/26
+ *
+ * Description : close gets called from dev_close() and should
+ * deinstall the card (free_irq, mem etc).
+ *
+ * Parameters : I : struct device *dev - our device structure
+ * Return Value : 0 - closed device driver
+ * Errors : None
+ * Globals : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+/* I have tried to set BOOT_ROM on and RAM off but then, after a 'ifconfig
+ * down' the system stops. So I don't shut set card to init state.
+ */
+
+static int SK_close(struct device *dev)
+{
+
+ PRINTK(("## %s: SK_close(). CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ dev->tbusy = 1; /* Transmitter busy */
+ dev->start = 0; /* Card down */
+
+ printk("%s: Shutting %s down CSR0 %#06x\n", dev->name, SK_NAME,
+ (int) SK_read_reg(CSR0));
+
+ SK_write_reg(CSR0, CSR0_STOP); /* STOP the LANCE */
+
+ free_irq(dev->irq, NULL); /* Free IRQ */
+ irq2dev_map[dev->irq] = 0; /* Mark IRQ as unused */
+
+ return 0; /* always succeed */
+
+} /* End of SK_close() */
+
+
+/*-
+ * Function : SK_get_stats
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/26
+ *
+ * Description : Return current status structure to upper layers.
+ * It is called by sprintf_stats (dev.c).
+ *
+ * Parameters : I : struct device *dev - our device structure
+ * Return Value : struct enet_statistics * - our current statistics
+ * Errors : None
+ * Side Effects : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+static struct enet_statistics *SK_get_stats(struct device *dev)
+{
+
+ struct priv *p = (struct priv *) dev->priv;
+
+ PRINTK(("## %s: SK_get_stats(). CSR0: %#06x\n",
+ SK_NAME, SK_read_reg(CSR0)));
+
+ return &p->stats; /* Return Device status */
+
+} /* End of SK_get_stats() */
+
+
+/*-
+ * Function : set_multicast_list
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/26
+ *
+ * Description : This function gets called when a program performs
+ * a SIOCSIFFLAGS call. Ifconfig does this if you call
+ * 'ifconfig [-]allmulti' which enables or disables the
+ * Promiscuous mode.
+ * Promiscuous mode is when the Network card accepts all
+ * packets, not only the packets which match our MAC
+ * Address. It is useful for writing a network monitor,
+ * but it is also a security problem. You have to remember
+ * that all information on the net is not encrypted.
+ *
+ * Parameters : I : struct device *dev - SK_G16 device Structure
+ * Return Value : None
+ * Errors : None
+ * Globals : None
+ * Update History :
+ * YY/MM/DD uid Description
+ * 95/10/18 ACox New multicast calling scheme
+-*/
+
+
+/* Set or clear the multicast filter for SK_G16.
+ */
+
+static void set_multicast_list(struct device *dev)
+{
+
+ if (dev->flags&IFF_PROMISC)
+ {
+ /* Reinitialize LANCE with MODE_PROM set */
+ SK_lance_init(dev, MODE_PROM);
+ }
+ else if (dev->mc_count==0 && !(dev->flags&IFF_ALLMULTI))
+ {
+ /* Reinitialize LANCE without MODE_PROM */
+ SK_lance_init(dev, MODE_NORMAL);
+ }
+ else
+ {
+ /* Multicast with logical address filter on */
+ /* Reinitialize LANCE without MODE_PROM */
+ SK_lance_init(dev, MODE_NORMAL);
+
+ /* Not implemented yet. */
+ }
+} /* End of set_multicast_list() */
+
+
+
+/*-
+ * Function : SK_rom_addr
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/06/01
+ *
+ * Description : Try to find a Boot_ROM at all possible locations
+ *
+ * Parameters : None
+ * Return Value : Address where Boot_ROM is
+ * Errors : 0 - Did not find Boot_ROM
+ * Globals : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+unsigned int SK_rom_addr(void)
+{
+ int i,j;
+ int rom_found = 0;
+ unsigned int rom_location[] = SK_BOOT_ROM_LOCATIONS;
+ unsigned char rom_id[] = SK_BOOT_ROM_ID;
+ unsigned char *test_byte;
+
+ /* Autodetect Boot_ROM */
+ PRINTK(("## %s: Autodetection of Boot_ROM\n", SK_NAME));
+
+ for (i = 0; (rom_location[i] != 0) && (rom_found == 0); i++)
+ {
+
+ PRINTK(("## Trying ROM location %#08x", rom_location[i]));
+
+ rom_found = 1;
+ for (j = 0; j < 6; j++)
+ {
+ test_byte = (unsigned char *) (rom_location[i]+j);
+ PRINTK((" %02x ", *test_byte));
+
+ if(!(*test_byte == rom_id[j]))
+ {
+ rom_found = 0;
+ }
+ }
+ PRINTK(("\n"));
+ }
+
+ if (rom_found == 1)
+ {
+ PRINTK(("## %s: Boot_ROM found at %#08x\n",
+ SK_NAME, rom_location[(i-1)]));
+
+ return (rom_location[--i]);
+ }
+ else
+ {
+ PRINTK(("%s: No Boot_ROM found\n", SK_NAME));
+ return 0;
+ }
+} /* End of SK_rom_addr() */
+
+
+
+/* LANCE access functions
+ *
+ * ! CSR1-3 can only be accessed when in CSR0 the STOP bit is set !
+ */
+
+
+/*-
+ * Function : SK_reset_board
+ *
+ * Author : Patrick J.D. Weichmann
+ *
+ * Date Created : 94/05/25
+ *
+ * Description : This function resets SK_G16 and all components, but
+ * POS registers are not changed
+ *
+ * Parameters : None
+ * Return Value : None
+ * Errors : None
+ * Globals : SK_RAM *board - SK_RAM structure pointer
+ *
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+void SK_reset_board(void)
+{
+ int i;
+
+ SK_PORT = 0x00; /* Reset active */
+ for (i = 0; i < 10 ; i++) /* Delay min 5ms */
+ ;
+ SK_PORT = SK_RESET; /* Set back to normal operation */
+
+} /* End of SK_reset_board() */
+
+
+/*-
+ * Function : SK_set_RAP
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/25
+ *
+ * Description : Set LANCE Register Address Port to register
+ * for later data transfer.
+ *
+ * Parameters : I : reg_number - which CSR to read/write from/to
+ * Return Value : None
+ * Errors : None
+ * Globals : SK_RAM *board - SK_RAM structure pointer
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+void SK_set_RAP(int reg_number)
+{
+ SK_IOREG = reg_number;
+ SK_PORT = SK_RESET | SK_RAP | SK_WREG;
+ SK_IOCOM = SK_DOIO;
+
+ while (SK_PORT & SK_IORUN)
+ ;
+} /* End of SK_set_RAP() */
+
+
+/*-
+ * Function : SK_read_reg
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/25
+ *
+ * Description : Set RAP and read data from a LANCE CSR register
+ *
+ * Parameters : I : reg_number - which CSR to read from
+ * Return Value : Register contents
+ * Errors : None
+ * Globals : SK_RAM *board - SK_RAM structure pointer
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+int SK_read_reg(int reg_number)
+{
+ SK_set_RAP(reg_number);
+
+ SK_PORT = SK_RESET | SK_RDATA | SK_RREG;
+ SK_IOCOM = SK_DOIO;
+
+ while (SK_PORT & SK_IORUN)
+ ;
+ return (SK_IOREG);
+
+} /* End of SK_read_reg() */
+
+
+/*-
+ * Function : SK_rread_reg
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/28
+ *
+ * Description : Read data from preseted register.
+ * This function requires that you know which
+ * Register is actually set. Be aware that CSR1-3
+ * can only be accessed when in CSR0 STOP is set.
+ *
+ * Return Value : Register contents
+ * Errors : None
+ * Globals : SK_RAM *board - SK_RAM structure pointer
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+int SK_rread_reg(void)
+{
+ SK_PORT = SK_RESET | SK_RDATA | SK_RREG;
+
+ SK_IOCOM = SK_DOIO;
+
+ while (SK_PORT & SK_IORUN)
+ ;
+ return (SK_IOREG);
+
+} /* End of SK_rread_reg() */
+
+
+/*-
+ * Function : SK_write_reg
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/25
+ *
+ * Description : This function sets the RAP then fills in the
+ * LANCE I/O Reg and starts Transfer to LANCE.
+ * It waits until transfer has ended which is max. 7 ms
+ * and then it returns.
+ *
+ * Parameters : I : reg_number - which CSR to write to
+ * I : value - what value to fill into register
+ * Return Value : None
+ * Errors : None
+ * Globals : SK_RAM *board - SK_RAM structure pointer
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+void SK_write_reg(int reg_number, int value)
+{
+ SK_set_RAP(reg_number);
+
+ SK_IOREG = value;
+ SK_PORT = SK_RESET | SK_RDATA | SK_WREG;
+ SK_IOCOM = SK_DOIO;
+
+ while (SK_PORT & SK_IORUN)
+ ;
+} /* End of SK_write_reg */
+
+
+
+/*
+ * Debugging functions
+ * -------------------
+ */
+
+/*-
+ * Function : SK_print_pos
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/25
+ *
+ * Description : This function prints out the 4 POS (Programmable
+ * Option Select) Registers. Used mainly to debug operation.
+ *
+ * Parameters : I : struct device *dev - SK_G16 device structure
+ * I : char * - Text which will be printed as title
+ * Return Value : None
+ * Errors : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+void SK_print_pos(struct device *dev, char *text)
+{
+ int ioaddr = dev->base_addr;
+
+ unsigned char pos0 = inb(SK_POS0),
+ pos1 = inb(SK_POS1),
+ pos2 = inb(SK_POS2),
+ pos3 = inb(SK_POS3),
+ pos4 = inb(SK_POS4);
+
+
+ printk("## %s: %s.\n"
+ "## pos0=%#4x pos1=%#4x pos2=%#04x pos3=%#08x pos4=%#04x\n",
+ SK_NAME, text, pos0, pos1, pos2, (pos3<<14), pos4);
+
+} /* End of SK_print_pos() */
+
+
+
+/*-
+ * Function : SK_print_dev
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/05/25
+ *
+ * Description : This function simply prints out the important fields
+ * of the device structure.
+ *
+ * Parameters : I : struct device *dev - SK_G16 device structure
+ * I : char *text - Title for printing
+ * Return Value : None
+ * Errors : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+void SK_print_dev(struct device *dev, char *text)
+{
+ if (dev == NULL)
+ {
+ printk("## %s: Device Structure. %s\n", SK_NAME, text);
+ printk("## DEVICE == NULL\n");
+ }
+ else
+ {
+ printk("## %s: Device Structure. %s\n", SK_NAME, text);
+ printk("## Device Name: %s Base Address: %#06lx IRQ: %d\n",
+ dev->name, dev->base_addr, dev->irq);
+
+ printk("## FLAGS: start: %d tbusy: %ld int: %d\n",
+ dev->start, dev->tbusy, dev->interrupt);
+
+ printk("## next device: %#08x init function: %#08x\n",
+ (int) dev->next, (int) dev->init);
+ }
+
+} /* End of SK_print_dev() */
+
+
+
+/*-
+ * Function : SK_print_ram
+ * Author : Patrick J.D. Weichmann
+ * Date Created : 94/06/02
+ *
+ * Description : This function is used to check how are things set up
+ * in the 16KB RAM. Also the pointers to the receive and
+ * transmit descriptor rings and rx and tx buffers locations.
+ * It contains a minor bug in printing, but has no effect to the values
+ * only newlines are not correct.
+ *
+ * Parameters : I : struct device *dev - SK_G16 device structure
+ * Return Value : None
+ * Errors : None
+ * Globals : None
+ * Update History :
+ * YY/MM/DD uid Description
+-*/
+
+void SK_print_ram(struct device *dev)
+{
+
+ int i;
+ struct priv *p = (struct priv *) dev->priv;
+
+ printk("## %s: RAM Details.\n"
+ "## RAM at %#08x tmdhead: %#08x rmdhead: %#08x initblock: %#08x\n",
+ SK_NAME,
+ (unsigned int) p->ram,
+ (unsigned int) p->tmdhead,
+ (unsigned int) p->rmdhead,
+ (unsigned int) &(p->ram)->ib);
+
+ printk("## ");
+
+ for(i = 0; i < TMDNUM; i++)
+ {
+ if (!(i % 3)) /* Every third line do a newline */
+ {
+ printk("\n## ");
+ }
+ printk("tmdbufs%d: %#08x ", (i+1), (int) p->tmdbufs[i]);
+ }
+ printk("## ");
+
+ for(i = 0; i < RMDNUM; i++)
+ {
+ if (!(i % 3)) /* Every third line do a newline */
+ {
+ printk("\n## ");
+ }
+ printk("rmdbufs%d: %#08x ", (i+1), (int) p->rmdbufs[i]);
+ }
+ printk("\n");
+
+} /* End of SK_print_ram() */
+
diff --git a/linux/src/drivers/net/sk_g16.h b/linux/src/drivers/net/sk_g16.h
new file mode 100644
index 0000000..31ae19a
--- /dev/null
+++ b/linux/src/drivers/net/sk_g16.h
@@ -0,0 +1,164 @@
+/*-
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU Public License, incorporated herein by reference.
+ *
+ * Module : sk_g16.h
+ * Version : $Revision: 1.1.4.1 $
+ *
+ * Author : M.Hipp (mhipp@student.uni-tuebingen.de)
+ * changes by : Patrick J.D. Weichmann
+ *
+ * Date Created : 94/05/25
+ *
+ * Description : In here are all necessary definitions of
+ * the am7990 (LANCE) chip used for writing a
+ * network device driver which uses this chip
+ *
+-*/
+
+#ifndef SK_G16_H
+
+#define SK_G16_H
+
+
+/*
+ * Control and Status Register 0 (CSR0) bit definitions
+ *
+ * (R=Readable) (W=Writeable) (S=Set on write) (C-Clear on write)
+ *
+ */
+
+#define CSR0_ERR 0x8000 /* Error summary (R) */
+#define CSR0_BABL 0x4000 /* Babble transmitter timeout error (RC) */
+#define CSR0_CERR 0x2000 /* Collision Error (RC) */
+#define CSR0_MISS 0x1000 /* Missed packet (RC) */
+#define CSR0_MERR 0x0800 /* Memory Error (RC) */
+#define CSR0_RINT 0x0400 /* Receiver Interrupt (RC) */
+#define CSR0_TINT 0x0200 /* Transmit Interrupt (RC) */
+#define CSR0_IDON 0x0100 /* Initialization Done (RC) */
+#define CSR0_INTR 0x0080 /* Interrupt Flag (R) */
+#define CSR0_INEA 0x0040 /* Interrupt Enable (RW) */
+#define CSR0_RXON 0x0020 /* Receiver on (R) */
+#define CSR0_TXON 0x0010 /* Transmitter on (R) */
+#define CSR0_TDMD 0x0008 /* Transmit Demand (RS) */
+#define CSR0_STOP 0x0004 /* Stop (RS) */
+#define CSR0_STRT 0x0002 /* Start (RS) */
+#define CSR0_INIT 0x0001 /* Initialize (RS) */
+
+#define CSR0_CLRALL 0x7f00 /* mask for all clearable bits */
+
+/*
+ * Control and Status Register 3 (CSR3) bit definitions
+ *
+ */
+
+#define CSR3_BSWAP 0x0004 /* Byte Swap (RW) */
+#define CSR3_ACON 0x0002 /* ALE Control (RW) */
+#define CSR3_BCON 0x0001 /* Byte Control (RW) */
+
+/*
+ * Initialization Block Mode operation Bit Definitions.
+ */
+
+#define MODE_PROM 0x8000 /* Promiscuous Mode */
+#define MODE_INTL 0x0040 /* Internal Loopback */
+#define MODE_DRTY 0x0020 /* Disable Retry */
+#define MODE_COLL 0x0010 /* Force Collision */
+#define MODE_DTCR 0x0008 /* Disable Transmit CRC) */
+#define MODE_LOOP 0x0004 /* Loopback */
+#define MODE_DTX 0x0002 /* Disable the Transmitter */
+#define MODE_DRX 0x0001 /* Disable the Receiver */
+
+#define MODE_NORMAL 0x0000 /* Normal operation mode */
+
+/*
+ * Receive message descriptor status bit definitions.
+ */
+
+#define RX_OWN 0x80 /* Owner bit 0 = host, 1 = lance */
+#define RX_ERR 0x40 /* Error Summary */
+#define RX_FRAM 0x20 /* Framing Error */
+#define RX_OFLO 0x10 /* Overflow Error */
+#define RX_CRC 0x08 /* CRC Error */
+#define RX_BUFF 0x04 /* Buffer Error */
+#define RX_STP 0x02 /* Start of Packet */
+#define RX_ENP 0x01 /* End of Packet */
+
+
+/*
+ * Transmit message descriptor status bit definitions.
+ */
+
+#define TX_OWN 0x80 /* Owner bit 0 = host, 1 = lance */
+#define TX_ERR 0x40 /* Error Summary */
+#define TX_MORE 0x10 /* More the 1 retry needed to Xmit */
+#define TX_ONE 0x08 /* One retry needed to Xmit */
+#define TX_DEF 0x04 /* Deferred */
+#define TX_STP 0x02 /* Start of Packet */
+#define TX_ENP 0x01 /* End of Packet */
+
+/*
+ * Transmit status (2) (valid if TX_ERR == 1)
+ */
+
+#define TX_BUFF 0x8000 /* Buffering error (no ENP) */
+#define TX_UFLO 0x4000 /* Underflow (late memory) */
+#define TX_LCOL 0x1000 /* Late collision */
+#define TX_LCAR 0x0400 /* Loss of Carrier */
+#define TX_RTRY 0x0200 /* Failed after 16 retransmissions */
+#define TX_TDR 0x003f /* Time-domain-reflectometer-value */
+
+
+/*
+ * Structures used for Communication with the LANCE
+ */
+
+/* LANCE Initialize Block */
+
+struct init_block
+{
+ unsigned short mode; /* Mode Register */
+ unsigned char paddr[6]; /* Physical Address (MAC) */
+ unsigned char laddr[8]; /* Logical Filter Address (not used) */
+ unsigned int rdrp; /* Receive Descriptor Ring pointer */
+ unsigned int tdrp; /* Transmit Descriptor Ring pointer */
+};
+
+
+/* Receive Message Descriptor Entry */
+
+struct rmd
+{
+ union
+ {
+ unsigned long buffer; /* Address of buffer */
+ struct
+ {
+ unsigned char unused[3];
+ unsigned volatile char status; /* Status Bits */
+ } s;
+ } u;
+ volatile short blen; /* Buffer Length (two's complement) */
+ unsigned short mlen; /* Message Byte Count */
+};
+
+
+/* Transmit Message Descriptor Entry */
+
+struct tmd
+{
+ union
+ {
+ unsigned long buffer; /* Address of buffer */
+ struct
+ {
+ unsigned char unused[3];
+ unsigned volatile char status; /* Status Bits */
+ } s;
+ } u;
+ unsigned short blen; /* Buffer Length (two's complement) */
+ unsigned volatile short status2; /* Error Status Bits */
+};
+
+#endif /* End of SK_G16_H */
diff --git a/linux/src/drivers/net/smc-ultra.c b/linux/src/drivers/net/smc-ultra.c
new file mode 100644
index 0000000..f593aeb
--- /dev/null
+++ b/linux/src/drivers/net/smc-ultra.c
@@ -0,0 +1,496 @@
+/* smc-ultra.c: A SMC Ultra ethernet driver for linux. */
+/*
+ This is a driver for the SMC Ultra and SMC EtherEZ ISA ethercards.
+
+ Written 1993-1998 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This driver uses the cards in the 8390-compatible mode.
+ Most of the run-time complexity is handled by the generic code in
+ 8390.c. The code in this file is responsible for
+
+ ultra_probe() Detecting and initializing the card.
+ ultra_probe1()
+
+ ultra_open() The card-specific details of starting, stopping
+ ultra_reset_8390() and resetting the 8390 NIC core.
+ ultra_close()
+
+ ultra_block_input() Routines for reading and writing blocks of
+ ultra_block_output() packet buffer memory.
+ ultra_pio_input()
+ ultra_pio_output()
+
+ This driver enables the shared memory only when doing the actual data
+ transfers to avoid a bug in early version of the card that corrupted
+ data transferred by a AHA1542.
+
+ This driver now supports the programmed-I/O (PIO) data transfer mode of
+ the EtherEZ. It does not use the non-8390-compatible "Altego" mode.
+ That support (if available) is in smc-ez.c.
+
+ Changelog:
+
+ Paul Gortmaker : multiple card support for module users.
+ Donald Becker : 4/17/96 PIO support, minor potential problems avoided.
+ Donald Becker : 6/6/96 correctly set auto-wrap bit.
+*/
+
+static const char *version =
+ "smc-ultra.c:v2.02 2/3/98 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include "8390.h"
+
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int ultra_portlist[] =
+{0x200, 0x220, 0x240, 0x280, 0x300, 0x340, 0x380, 0};
+
+int ultra_probe(struct device *dev);
+int ultra_probe1(struct device *dev, int ioaddr);
+
+static int ultra_open(struct device *dev);
+static void ultra_reset_8390(struct device *dev);
+static void ultra_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void ultra_block_input(struct device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void ultra_block_output(struct device *dev, int count,
+ const unsigned char *buf, const int start_page);
+static void ultra_pio_get_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void ultra_pio_input(struct device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void ultra_pio_output(struct device *dev, int count,
+ const unsigned char *buf, const int start_page);
+static int ultra_close_card(struct device *dev);
+
+
+#define START_PG 0x00 /* First page of TX buffer */
+
+#define ULTRA_CMDREG 0 /* Offset to ASIC command register. */
+#define ULTRA_RESET 0x80 /* Board reset, in ULTRA_CMDREG. */
+#define ULTRA_MEMENB 0x40 /* Enable the shared memory. */
+#define IOPD 0x02 /* I/O Pipe Data (16 bits), PIO operation. */
+#define IOPA 0x07 /* I/O Pipe Address for PIO operation. */
+#define ULTRA_NIC_OFFSET 16 /* NIC register offset from the base_addr. */
+#define ULTRA_IO_EXTENT 32
+#define EN0_ERWCNT 0x08 /* Early receive warning count. */
+
+/* Probe for the Ultra. This looks like a 8013 with the station
+ address PROM at I/O ports <base>+8 to <base>+13, with a checksum
+ following.
+*/
+#ifdef HAVE_DEVLIST
+struct netdev_entry ultra_drv =
+{"ultra", ultra_probe1, NETCARD_IO_EXTENT, netcard_portlist};
+#else
+
+int ultra_probe(struct device *dev)
+{
+ int i;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return ultra_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (i = 0; ultra_portlist[i]; i++) {
+ int ioaddr = ultra_portlist[i];
+ if (check_region(ioaddr, ULTRA_IO_EXTENT))
+ continue;
+ if (ultra_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif
+
+int ultra_probe1(struct device *dev, int ioaddr)
+{
+ int i;
+ int checksum = 0;
+ const char *model_name;
+ unsigned char eeprom_irq = 0;
+ static unsigned version_printed = 0;
+ /* Values from various config regs. */
+ unsigned char num_pages, irqreg, addr, piomode;
+ unsigned char idreg = inb(ioaddr + 7);
+ unsigned char reg4 = inb(ioaddr + 4) & 0x7f;
+
+ /* Check the ID nibble. */
+ if ((idreg & 0xF0) != 0x20 /* SMC Ultra */
+ && (idreg & 0xF0) != 0x40) /* SMC EtherEZ */
+ return ENODEV;
+
+ /* Select the station address register set. */
+ outb(reg4, ioaddr + 4);
+
+ for (i = 0; i < 8; i++)
+ checksum += inb(ioaddr + 8 + i);
+ if ((checksum & 0xff) != 0xFF)
+ return ENODEV;
+
+ if (dev == NULL)
+ dev = init_etherdev(0, 0);
+
+ if (ei_debug && version_printed++ == 0)
+ printk("%s", version);
+
+ model_name = (idreg & 0xF0) == 0x20 ? "SMC Ultra" : "SMC EtherEZ";
+
+ printk("%s: %s at %#3x,", dev->name, model_name, ioaddr);
+
+ for (i = 0; i < 6; i++)
+ printk(" %2.2X", dev->dev_addr[i] = inb(ioaddr + 8 + i));
+
+ /* Switch from the station address to the alternate register set and
+ read the useful registers there. */
+ outb(0x80 | reg4, ioaddr + 4);
+
+ /* Enabled FINE16 mode to avoid BIOS ROM width mismatches @ reboot. */
+ outb(0x80 | inb(ioaddr + 0x0c), ioaddr + 0x0c);
+ piomode = inb(ioaddr + 0x8);
+ addr = inb(ioaddr + 0xb);
+ irqreg = inb(ioaddr + 0xd);
+
+ /* Switch back to the station address register set so that the MS-DOS driver
+ can find the card after a warm boot. */
+ outb(reg4, ioaddr + 4);
+
+ if (dev->irq < 2) {
+ unsigned char irqmap[] = {0, 9, 3, 5, 7, 10, 11, 15};
+ int irq;
+
+ /* The IRQ bits are split. */
+ irq = irqmap[((irqreg & 0x40) >> 4) + ((irqreg & 0x0c) >> 2)];
+
+ if (irq == 0) {
+ printk(", failed to detect IRQ line.\n");
+ return -EAGAIN;
+ }
+ dev->irq = irq;
+ eeprom_irq = 1;
+ }
+
+ /* Allocate dev->priv and fill in 8390 specific dev fields. */
+ if (ethdev_init(dev)) {
+ printk (", no memory for dev->priv.\n");
+ return -ENOMEM;
+ }
+
+ /* OK, we are certain this is going to work. Setup the device. */
+ request_region(ioaddr, ULTRA_IO_EXTENT, model_name);
+
+ /* The 8390 isn't at the base address, so fake the offset */
+ dev->base_addr = ioaddr+ULTRA_NIC_OFFSET;
+
+ {
+ int addr_tbl[4] = {0x0C0000, 0x0E0000, 0xFC0000, 0xFE0000};
+ short num_pages_tbl[4] = {0x20, 0x40, 0x80, 0xff};
+
+ dev->mem_start = ((addr & 0x0f) << 13) + addr_tbl[(addr >> 6) & 3] ;
+ num_pages = num_pages_tbl[(addr >> 4) & 3];
+ }
+
+ ei_status.name = model_name;
+ ei_status.word16 = 1;
+ ei_status.tx_start_page = START_PG;
+ ei_status.rx_start_page = START_PG + TX_PAGES;
+ ei_status.stop_page = num_pages;
+
+ dev->rmem_start = dev->mem_start + TX_PAGES*256;
+ dev->mem_end = dev->rmem_end
+ = dev->mem_start + (ei_status.stop_page - START_PG)*256;
+
+ if (piomode) {
+ printk(",%s IRQ %d programmed-I/O mode.\n",
+ eeprom_irq ? "EEPROM" : "assigned ", dev->irq);
+ ei_status.block_input = &ultra_pio_input;
+ ei_status.block_output = &ultra_pio_output;
+ ei_status.get_8390_hdr = &ultra_pio_get_hdr;
+ } else {
+ printk(",%s IRQ %d memory %#lx-%#lx.\n", eeprom_irq ? "" : "assigned ",
+ dev->irq, dev->mem_start, dev->mem_end-1);
+ ei_status.block_input = &ultra_block_input;
+ ei_status.block_output = &ultra_block_output;
+ ei_status.get_8390_hdr = &ultra_get_8390_hdr;
+ }
+ ei_status.reset_8390 = &ultra_reset_8390;
+ dev->open = &ultra_open;
+ dev->stop = &ultra_close_card;
+ NS8390_init(dev, 0);
+
+ return 0;
+}
+
+static int
+ultra_open(struct device *dev)
+{
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
+ unsigned char irq2reg[] = {0, 0, 0x04, 0x08, 0, 0x0C, 0, 0x40,
+ 0, 0x04, 0x44, 0x48, 0, 0, 0, 0x4C, };
+
+ if (request_irq(dev->irq, ei_interrupt, 0, ei_status.name, dev))
+ return -EAGAIN;
+
+ outb(0x00, ioaddr); /* Disable shared memory for safety. */
+ outb(0x80, ioaddr + 5);
+ /* Set the IRQ line. */
+ outb(inb(ioaddr + 4) | 0x80, ioaddr + 4);
+ outb((inb(ioaddr + 13) & ~0x4C) | irq2reg[dev->irq], ioaddr + 13);
+ outb(inb(ioaddr + 4) & 0x7f, ioaddr + 4);
+
+ if (ei_status.block_input == &ultra_pio_input) {
+ outb(0x11, ioaddr + 6); /* Enable interrupts and PIO. */
+ outb(0x01, ioaddr + 0x19); /* Enable ring read auto-wrap. */
+ } else
+ outb(0x01, ioaddr + 6); /* Enable interrupts and memory. */
+ /* Set the early receive warning level in window 0 high enough not
+ to receive ERW interrupts. */
+ outb_p(E8390_NODMA+E8390_PAGE0, dev->base_addr);
+ outb(0xff, dev->base_addr + EN0_ERWCNT);
+ ei_open(dev);
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static void
+ultra_reset_8390(struct device *dev)
+{
+ int cmd_port = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC base addr */
+
+ outb(ULTRA_RESET, cmd_port);
+ if (ei_debug > 1) printk("resetting Ultra, t=%ld...", jiffies);
+ ei_status.txing = 0;
+
+ outb(0x00, cmd_port); /* Disable shared memory for safety. */
+ outb(0x80, cmd_port + 5);
+ if (ei_status.block_input == &ultra_pio_input)
+ outb(0x11, cmd_port + 6); /* Enable interrupts and PIO. */
+ else
+ outb(0x01, cmd_port + 6); /* Enable interrupts and memory. */
+
+ if (ei_debug > 1) printk("reset done\n");
+ return;
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void
+ultra_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ unsigned long hdr_start = dev->mem_start + ((ring_page - START_PG)<<8);
+
+ outb(ULTRA_MEMENB, dev->base_addr - ULTRA_NIC_OFFSET); /* shmem on */
+#ifdef notdef
+ /* Officially this is what we are doing, but the readl() is faster */
+ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
+#else
+ ((unsigned int*)hdr)[0] = readl(hdr_start);
+#endif
+ outb(0x00, dev->base_addr - ULTRA_NIC_OFFSET); /* shmem off */
+}
+
+/* Block input and output are easy on shared memory ethercards, the only
+ complication is when the ring buffer wraps. */
+
+static void
+ultra_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ unsigned long xfer_start = dev->mem_start + ring_offset - (START_PG<<8);
+
+ /* Enable shared memory. */
+ outb(ULTRA_MEMENB, dev->base_addr - ULTRA_NIC_OFFSET);
+
+ if (xfer_start + count > dev->rmem_end) {
+ /* We must wrap the input move. */
+ int semi_count = dev->rmem_end - xfer_start;
+ memcpy_fromio(skb->data, xfer_start, semi_count);
+ count -= semi_count;
+ memcpy_fromio(skb->data + semi_count, dev->rmem_start, count);
+ } else {
+ /* Packet is in one chunk -- we can copy + cksum. */
+ eth_io_copy_and_sum(skb, xfer_start, count, 0);
+ }
+
+ outb(0x00, dev->base_addr - ULTRA_NIC_OFFSET); /* Disable memory. */
+}
+
+static void
+ultra_block_output(struct device *dev, int count, const unsigned char *buf,
+ int start_page)
+{
+ unsigned long shmem = dev->mem_start + ((start_page - START_PG)<<8);
+
+ /* Enable shared memory. */
+ outb(ULTRA_MEMENB, dev->base_addr - ULTRA_NIC_OFFSET);
+
+ memcpy_toio(shmem, buf, count);
+
+ outb(0x00, dev->base_addr - ULTRA_NIC_OFFSET); /* Disable memory. */
+}
+
+/* The identical operations for programmed I/O cards.
+ The PIO model is trivial to use: the 16 bit start address is written
+ byte-sequentially to IOPA, with no intervening I/O operations, and the
+ data is read or written to the IOPD data port.
+ The only potential complication is that the address register is shared
+ and must be always be rewritten between each read/write direction change.
+ This is no problem for us, as the 8390 code ensures that we are single
+ threaded. */
+static void ultra_pio_get_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page)
+{
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
+ outb(0x00, ioaddr + IOPA); /* Set the address, LSB first. */
+ outb(ring_page, ioaddr + IOPA);
+ insw(ioaddr + IOPD, hdr, sizeof(struct e8390_pkt_hdr)>>1);
+}
+
+static void ultra_pio_input(struct device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
+ char *buf = skb->data;
+
+ /* For now set the address again, although it should already be correct. */
+ outb(ring_offset, ioaddr + IOPA); /* Set the address, LSB first. */
+ outb(ring_offset >> 8, ioaddr + IOPA);
+ /* We know skbuffs are padded to at least word alignment. */
+ insw(ioaddr + IOPD, buf, (count+1)>>1);
+}
+
+static void ultra_pio_output(struct device *dev, int count,
+ const unsigned char *buf, const int start_page)
+{
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
+ outb(0x00, ioaddr + IOPA); /* Set the address, LSB first. */
+ outb(start_page, ioaddr + IOPA);
+ /* An extra odd byte is OK here as well. */
+ outsw(ioaddr + IOPD, buf, (count+1)>>1);
+}
+
+static int
+ultra_close_card(struct device *dev)
+{
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* CMDREG */
+
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+
+ outb(0x00, ioaddr + 6); /* Disable interrupts. */
+ free_irq(dev->irq, dev);
+ irq2dev_map[dev->irq] = 0;
+
+ NS8390_init(dev, 0);
+
+ /* We should someday disable shared memory and change to 8-bit mode
+ "just in case"... */
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+
+#ifdef MODULE
+#define MAX_ULTRA_CARDS 4 /* Max number of Ultra cards per module */
+#define NAMELEN 8 /* # of chars for storing dev->name */
+static char namelist[NAMELEN * MAX_ULTRA_CARDS] = { 0, };
+static struct device dev_ultra[MAX_ULTRA_CARDS] = {
+ {
+ NULL, /* assign a chunk of namelist[] below */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, NULL
+ },
+};
+
+static int io[MAX_ULTRA_CARDS] = { 0, };
+static int irq[MAX_ULTRA_CARDS] = { 0, };
+
+/* This is set up so that only a single autoprobe takes place per call.
+ISA device autoprobes on a running machine are not recommended. */
+int
+init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_ULTRA_CARDS; this_dev++) {
+ struct device *dev = &dev_ultra[this_dev];
+ dev->name = namelist+(NAMELEN*this_dev);
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->init = ultra_probe;
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only autoprobe 1st one */
+ printk(KERN_NOTICE "smc-ultra.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+ if (register_netdev(dev) != 0) {
+ printk(KERN_WARNING "smc-ultra.c: No SMC Ultra card found (i/o = 0x%x).\n", io[this_dev]);
+ if (found != 0) return 0; /* Got at least one. */
+ return -ENXIO;
+ }
+ found++;
+ }
+
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_ULTRA_CARDS; this_dev++) {
+ struct device *dev = &dev_ultra[this_dev];
+ if (dev->priv != NULL) {
+ /* NB: ultra_close_card() does free_irq + irq2dev */
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET;
+ kfree(dev->priv);
+ dev->priv = NULL;
+ release_region(ioaddr, ULTRA_IO_EXTENT);
+ unregister_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -Wall -O6 -I/usr/src/linux/net/inet -c smc-ultra.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/smc-ultra32.c b/linux/src/drivers/net/smc-ultra32.c
new file mode 100644
index 0000000..6cde4c2
--- /dev/null
+++ b/linux/src/drivers/net/smc-ultra32.c
@@ -0,0 +1,413 @@
+/* smc-ultra32.c: An SMC Ultra32 EISA ethernet driver for linux.
+
+Sources:
+
+ This driver is based on (cloned from) the ISA SMC Ultra driver
+ written by Donald Becker. Modifications to support the EISA
+ version of the card by Paul Gortmaker and Leonard N. Zubkoff.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+Theory of Operation:
+
+ The SMC Ultra32C card uses the SMC 83c790 chip which is also
+ found on the ISA SMC Ultra cards. It has a shared memory mode of
+ operation that makes it similar to the ISA version of the card.
+ The main difference is that the EISA card has 32KB of RAM, but
+ only an 8KB window into that memory. The EISA card also can be
+ set for a bus-mastering mode of operation via the ECU, but that
+ is not (and probably will never be) supported by this driver.
+ The ECU should be run to enable shared memory and to disable the
+ bus-mastering feature for use with linux.
+
+ By programming the 8390 to use only 8KB RAM, the modifications
+ to the ISA driver can be limited to the probe and initialization
+ code. This allows easy integration of EISA support into the ISA
+ driver. However, the driver development kit from SMC provided the
+ register information for sliding the 8KB window, and hence the 8390
+ is programmed to use the full 32KB RAM.
+
+ Unfortunately this required code changes outside the probe/init
+ routines, and thus we decided to separate the EISA driver from
+ the ISA one. In this way, ISA users don't end up with a larger
+ driver due to the EISA code, and EISA users don't end up with a
+ larger driver due to the ISA EtherEZ PIO code. The driver is
+ similar to the 3c503/16 driver, in that the window must be set
+ back to the 1st 8KB of space for access to the two 8390 Tx slots.
+
+ In testing, using only 8KB RAM (3 Tx / 5 Rx) didn't appear to
+ be a limiting factor, since the EISA bus could get packets off
+ the card fast enough, but having the use of lots of RAM as Rx
+ space is extra insurance if interrupt latencies become excessive.
+
+*/
+
+static const char *version = "smc-ultra32.c: 06/97 v1.00\n";
+
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include "8390.h"
+
+int ultra32_probe(struct device *dev);
+int ultra32_probe1(struct device *dev, int ioaddr);
+static int ultra32_open(struct device *dev);
+static void ultra32_reset_8390(struct device *dev);
+static void ultra32_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void ultra32_block_input(struct device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void ultra32_block_output(struct device *dev, int count,
+ const unsigned char *buf, const int start_page);
+static int ultra32_close(struct device *dev);
+
+#define ULTRA32_CMDREG 0 /* Offset to ASIC command register. */
+#define ULTRA32_RESET 0x80 /* Board reset, in ULTRA32_CMDREG. */
+#define ULTRA32_MEMENB 0x40 /* Enable the shared memory. */
+#define ULTRA32_NIC_OFFSET 16 /* NIC register offset from the base_addr. */
+#define ULTRA32_IO_EXTENT 32
+#define EN0_ERWCNT 0x08 /* Early receive warning count. */
+
+/*
+ * Defines that apply only to the Ultra32 EISA card. Note that
+ * "smc" = 10011 01101 00011 = 0x4da3, and hence !smc8010.cfg translates
+ * into an EISA ID of 0x1080A34D
+ */
+#define ULTRA32_BASE 0xca0
+#define ULTRA32_ID 0x1080a34d
+#define ULTRA32_IDPORT (-0x20) /* 0xc80 */
+/* Config regs 1->7 from the EISA !SMC8010.CFG file. */
+#define ULTRA32_CFG1 0x04 /* 0xca4 */
+#define ULTRA32_CFG2 0x05 /* 0xca5 */
+#define ULTRA32_CFG3 (-0x18) /* 0xc88 */
+#define ULTRA32_CFG4 (-0x17) /* 0xc89 */
+#define ULTRA32_CFG5 (-0x16) /* 0xc8a */
+#define ULTRA32_CFG6 (-0x15) /* 0xc8b */
+#define ULTRA32_CFG7 0x0d /* 0xcad */
+
+
+/* Probe for the Ultra32. This looks like a 8013 with the station
+ address PROM at I/O ports <base>+8 to <base>+13, with a checksum
+ following.
+*/
+
+int ultra32_probe(struct device *dev)
+{
+ const char *ifmap[] = {"UTP No Link", "", "UTP/AUI", "UTP/BNC"};
+ int ioaddr, edge, media;
+
+ if (!EISA_bus) return ENODEV;
+
+ /* EISA spec allows for up to 16 slots, but 8 is typical. */
+ for (ioaddr = 0x1000 + ULTRA32_BASE; ioaddr < 0x9000; ioaddr += 0x1000)
+ if (check_region(ioaddr, ULTRA32_IO_EXTENT) == 0 &&
+ inb(ioaddr + ULTRA32_IDPORT) != 0xff &&
+ inl(ioaddr + ULTRA32_IDPORT) == ULTRA32_ID) {
+ media = inb(ioaddr + ULTRA32_CFG7) & 0x03;
+ edge = inb(ioaddr + ULTRA32_CFG5) & 0x08;
+ printk("SMC Ultra32 in EISA Slot %d, Media: %s, %s IRQs.\n",
+ ioaddr >> 12, ifmap[media],
+ (edge ? "Edge Triggered" : "Level Sensitive"));
+ if (ultra32_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+ return ENODEV;
+}
+
+int ultra32_probe1(struct device *dev, int ioaddr)
+{
+ int i;
+ int checksum = 0;
+ const char *model_name;
+ static unsigned version_printed = 0;
+ /* Values from various config regs. */
+ unsigned char idreg = inb(ioaddr + 7);
+ unsigned char reg4 = inb(ioaddr + 4) & 0x7f;
+
+ /* Check the ID nibble. */
+ if ((idreg & 0xf0) != 0x20) /* SMC Ultra */
+ return ENODEV;
+
+ /* Select the station address register set. */
+ outb(reg4, ioaddr + 4);
+
+ for (i = 0; i < 8; i++)
+ checksum += inb(ioaddr + 8 + i);
+ if ((checksum & 0xff) != 0xff)
+ return ENODEV;
+
+ /* We should have a "dev" from Space.c or the static module table. */
+ if (dev == NULL) {
+ printk("smc-ultra32.c: Passed a NULL device.\n");
+ dev = init_etherdev(0, 0);
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk("%s", version);
+
+ model_name = "SMC Ultra32";
+
+ printk("%s: %s at 0x%X,", dev->name, model_name, ioaddr);
+
+ for (i = 0; i < 6; i++)
+ printk(" %2.2X", dev->dev_addr[i] = inb(ioaddr + 8 + i));
+
+ /* Switch from the station address to the alternate register set and
+ read the useful registers there. */
+ outb(0x80 | reg4, ioaddr + 4);
+
+ /* Enable FINE16 mode to avoid BIOS ROM width mismatches @ reboot. */
+ outb(0x80 | inb(ioaddr + 0x0c), ioaddr + 0x0c);
+
+ /* Reset RAM addr. */
+ outb(0x00, ioaddr + 0x0b);
+
+ /* Switch back to the station address register set so that the
+ MS-DOS driver can find the card after a warm boot. */
+ outb(reg4, ioaddr + 4);
+
+ if ((inb(ioaddr + ULTRA32_CFG5) & 0x40) == 0) {
+ printk("\nsmc-ultra32: Card RAM is disabled! "
+ "Run EISA config utility.\n");
+ return ENODEV;
+ }
+ if ((inb(ioaddr + ULTRA32_CFG2) & 0x04) == 0)
+ printk("\nsmc-ultra32: Ignoring Bus-Master enable bit. "
+ "Run EISA config utility.\n");
+
+ if (dev->irq < 2) {
+ unsigned char irqmap[] = {0, 9, 3, 5, 7, 10, 11, 15};
+ int irq = irqmap[inb(ioaddr + ULTRA32_CFG5) & 0x07];
+ if (irq == 0) {
+ printk(", failed to detect IRQ line.\n");
+ return -EAGAIN;
+ }
+ dev->irq = irq;
+ }
+
+ /* Allocate dev->priv and fill in 8390 specific dev fields. */
+ if (ethdev_init(dev)) {
+ printk (", no memory for dev->priv.\n");
+ return -ENOMEM;
+ }
+
+ /* OK, we are certain this is going to work. Setup the device. */
+ request_region(ioaddr, ULTRA32_IO_EXTENT, model_name);
+
+ /* The 8390 isn't at the base address, so fake the offset */
+ dev->base_addr = ioaddr + ULTRA32_NIC_OFFSET;
+
+ /* Save RAM address in the unused reg0 to avoid excess inb's. */
+ ei_status.reg0 = inb(ioaddr + ULTRA32_CFG3) & 0xfc;
+
+ dev->mem_start = 0xc0000 + ((ei_status.reg0 & 0x7c) << 11);
+
+ ei_status.name = model_name;
+ ei_status.word16 = 1;
+ ei_status.tx_start_page = 0;
+ ei_status.rx_start_page = TX_PAGES;
+ /* All Ultra32 cards have 32KB memory with an 8KB window. */
+ ei_status.stop_page = 128;
+
+ dev->rmem_start = dev->mem_start + TX_PAGES*256;
+ dev->mem_end = dev->rmem_end = dev->mem_start + 0x1fff;
+
+ printk(", IRQ %d, 32KB memory, 8KB window at 0x%lx-0x%lx.\n",
+ dev->irq, dev->mem_start, dev->mem_end);
+ ei_status.block_input = &ultra32_block_input;
+ ei_status.block_output = &ultra32_block_output;
+ ei_status.get_8390_hdr = &ultra32_get_8390_hdr;
+ ei_status.reset_8390 = &ultra32_reset_8390;
+ dev->open = &ultra32_open;
+ dev->stop = &ultra32_close;
+ NS8390_init(dev, 0);
+
+ return 0;
+}
+
+static int ultra32_open(struct device *dev)
+{
+ int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; /* ASIC addr */
+
+ if (request_irq(dev->irq, ei_interrupt, 0, ei_status.name, dev))
+ return -EAGAIN;
+
+ outb(ULTRA32_MEMENB, ioaddr); /* Enable Shared Memory. */
+ outb(0x80, ioaddr + ULTRA32_CFG6); /* Enable Interrupts. */
+ outb(0x84, ioaddr + 5); /* Enable MEM16 & Disable Bus Master. */
+ outb(0x01, ioaddr + 6); /* Enable Interrupts. */
+ /* Set the early receive warning level in window 0 high enough not
+ to receive ERW interrupts. */
+ outb_p(E8390_NODMA+E8390_PAGE0, dev->base_addr);
+ outb(0xff, dev->base_addr + EN0_ERWCNT);
+ ei_open(dev);
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static int ultra32_close(struct device *dev)
+{
+ int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; /* CMDREG */
+
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+
+ outb(0x00, ioaddr + ULTRA32_CFG6); /* Disable Interrupts. */
+ outb(0x00, ioaddr + 6); /* Disable interrupts. */
+ free_irq(dev->irq, dev);
+ irq2dev_map[dev->irq] = 0;
+
+ NS8390_init(dev, 0);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static void ultra32_reset_8390(struct device *dev)
+{
+ int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; /* ASIC base addr */
+
+ outb(ULTRA32_RESET, ioaddr);
+ if (ei_debug > 1) printk("resetting Ultra32, t=%ld...", jiffies);
+ ei_status.txing = 0;
+
+ outb(ULTRA32_MEMENB, ioaddr); /* Enable Shared Memory. */
+ outb(0x80, ioaddr + ULTRA32_CFG6); /* Enable Interrupts. */
+ outb(0x84, ioaddr + 5); /* Enable MEM16 & Disable Bus Master. */
+ outb(0x01, ioaddr + 6); /* Enable Interrupts. */
+ if (ei_debug > 1) printk("reset done\n");
+ return;
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void ultra32_get_8390_hdr(struct device *dev,
+ struct e8390_pkt_hdr *hdr,
+ int ring_page)
+{
+ unsigned long hdr_start = dev->mem_start + ((ring_page & 0x1f) << 8);
+ unsigned int RamReg = dev->base_addr - ULTRA32_NIC_OFFSET + ULTRA32_CFG3;
+
+ /* Select correct 8KB Window. */
+ outb(ei_status.reg0 | ((ring_page & 0x60) >> 5), RamReg);
+
+#ifdef notdef
+ /* Officially this is what we are doing, but the readl() is faster */
+ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
+#else
+ ((unsigned int*)hdr)[0] = readl(hdr_start);
+#endif
+}
+
+/* Block input and output are easy on shared memory ethercards, the only
+ complication is when the ring buffer wraps, or in this case, when a
+ packet spans an 8KB boundary. Note that the current 8KB segment is
+ already set by the get_8390_hdr routine. */
+
+static void ultra32_block_input(struct device *dev,
+ int count,
+ struct sk_buff *skb,
+ int ring_offset)
+{
+ unsigned long xfer_start = dev->mem_start + (ring_offset & 0x1fff);
+ unsigned int RamReg = dev->base_addr - ULTRA32_NIC_OFFSET + ULTRA32_CFG3;
+
+ if ((ring_offset & ~0x1fff) != ((ring_offset + count - 1) & ~0x1fff)) {
+ int semi_count = 8192 - (ring_offset & 0x1FFF);
+ memcpy_fromio(skb->data, xfer_start, semi_count);
+ count -= semi_count;
+ if (ring_offset < 96*256) {
+ /* Select next 8KB Window. */
+ ring_offset += semi_count;
+ outb(ei_status.reg0 | ((ring_offset & 0x6000) >> 13), RamReg);
+ memcpy_fromio(skb->data + semi_count, dev->mem_start, count);
+ } else {
+ /* Select first 8KB Window. */
+ outb(ei_status.reg0, RamReg);
+ memcpy_fromio(skb->data + semi_count, dev->rmem_start, count);
+ }
+ } else {
+ /* Packet is in one chunk -- we can copy + cksum. */
+ eth_io_copy_and_sum(skb, xfer_start, count, 0);
+ }
+}
+
+static void ultra32_block_output(struct device *dev,
+ int count,
+ const unsigned char *buf,
+ int start_page)
+{
+ unsigned long xfer_start = dev->mem_start + (start_page<<8);
+ unsigned int RamReg = dev->base_addr - ULTRA32_NIC_OFFSET + ULTRA32_CFG3;
+
+ /* Select first 8KB Window. */
+ outb(ei_status.reg0, RamReg);
+
+ memcpy_toio(xfer_start, buf, count);
+}
+
+#ifdef MODULE
+#define MAX_ULTRA32_CARDS 4 /* Max number of Ultra cards per module */
+#define NAMELEN 8 /* # of chars for storing dev->name */
+static char namelist[NAMELEN * MAX_ULTRA32_CARDS] = { 0, };
+static struct device dev_ultra[MAX_ULTRA32_CARDS] = {
+ {
+ NULL, /* assign a chunk of namelist[] below */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, NULL
+ },
+};
+
+int init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_ULTRA32_CARDS; this_dev++) {
+ struct device *dev = &dev_ultra[this_dev];
+ dev->name = namelist+(NAMELEN*this_dev);
+ dev->init = ultra32_probe;
+ if (register_netdev(dev) != 0) {
+ if (found > 0) return 0; /* Got at least one. */
+ printk(KERN_WARNING "smc-ultra32.c: No SMC Ultra32 found.\n");
+ return -ENXIO;
+ }
+ found++;
+ }
+
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_ULTRA32_CARDS; this_dev++) {
+ struct device *dev = &dev_ultra[this_dev];
+ if (dev->priv != NULL) {
+ /* NB: ultra32_close_card() does free_irq + irq2dev */
+ int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET;
+ kfree(dev->priv);
+ dev->priv = NULL;
+ release_region(ioaddr, ULTRA32_IO_EXTENT);
+ unregister_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
diff --git a/linux/src/drivers/net/smc9194.c b/linux/src/drivers/net/smc9194.c
new file mode 100644
index 0000000..e3d648d
--- /dev/null
+++ b/linux/src/drivers/net/smc9194.c
@@ -0,0 +1,1779 @@
+/*------------------------------------------------------------------------
+ . smc9194.c
+ . This is a driver for SMC's 9000 series of Ethernet cards.
+ .
+ . Copyright (C) 1996 by Erik Stahlman
+ . This software may be used and distributed according to the terms
+ . of the GNU Public License, incorporated herein by reference.
+ .
+ . "Features" of the SMC chip:
+ . 4608 byte packet memory. ( for the 91C92. Others have more )
+ . EEPROM for configuration
+ . AUI/TP selection ( mine has 10Base2/10BaseT select )
+ .
+ . Arguments:
+ . io = for the base address
+ . irq = for the IRQ
+ . ifport = 0 for autodetect, 1 for TP, 2 for AUI ( or 10base2 )
+ .
+ . author:
+ . Erik Stahlman ( erik@vt.edu )
+ .
+ . Hardware multicast code from Peter Cammaert ( pc@denkart.be )
+ .
+ . Sources:
+ . o SMC databook
+ . o skeleton.c by Donald Becker ( becker@cesdis.gsfc.nasa.gov )
+ . o ( a LOT of advice from Becker as well )
+ .
+ . History:
+ . 12/07/95 Erik Stahlman written, got receive/xmit handled
+ . 01/03/96 Erik Stahlman worked out some bugs, actually usable!!! :-)
+ . 01/06/96 Erik Stahlman cleaned up some, better testing, etc
+ . 01/29/96 Erik Stahlman fixed autoirq, added multicast
+ . 02/01/96 Erik Stahlman 1. disabled all interrupts in smc_reset
+ . 2. got rid of post-decrementing bug -- UGH.
+ . 02/13/96 Erik Stahlman Tried to fix autoirq failure. Added more
+ . descriptive error messages.
+ . 02/15/96 Erik Stahlman Fixed typo that caused detection failure
+ . 02/23/96 Erik Stahlman Modified it to fit into kernel tree
+ . Added support to change hardware address
+ . Cleared stats on opens
+ . 02/26/96 Erik Stahlman Trial support for Kernel 1.2.13
+ . Kludge for automatic IRQ detection
+ . 03/04/96 Erik Stahlman Fixed kernel 1.3.70 +
+ . Fixed bug reported by Gardner Buchanan in
+ . smc_enable, with outw instead of outb
+ . 03/06/96 Erik Stahlman Added hardware multicast from Peter Cammaert
+ ----------------------------------------------------------------------------*/
+
+static const char *version =
+ "smc9194.c:v0.12 03/06/96 by Erik Stahlman (erik@vt.edu)\n";
+
+#ifdef MODULE
+#include <linux/module.h>
+#include <linux/version.h>
+#endif
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/malloc.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <linux/errno.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include "smc9194.h"
+/*------------------------------------------------------------------------
+ .
+ . Configuration options, for the experienced user to change.
+ .
+ -------------------------------------------------------------------------*/
+
+/*
+ . this is for kernels > 1.2.70
+*/
+#define REALLY_NEW_KERNEL
+#ifndef REALLY_NEW_KERNEL
+#define free_irq( x, y ) free_irq( x )
+#define request_irq( x, y, z, u, v ) request_irq( x, y, z, u )
+#endif
+
+/*
+ . Do you want to use this with old kernels.
+ . WARNING: this is not well tested.
+#define SUPPORT_OLD_KERNEL
+*/
+
+
+/*
+ . Do you want to use 32 bit xfers? This should work on all chips, as
+ . the chipset is designed to accommodate them.
+*/
+#define USE_32_BIT 1
+
+/*
+ .the SMC9194 can be at any of the following port addresses. To change,
+ .for a slightly different card, you can add it to the array. Keep in
+ .mind that the array must end in zero.
+*/
+static unsigned int smc_portlist[] =
+ { 0x200, 0x220, 0x240, 0x260, 0x280, 0x2A0, 0x2C0, 0x2E0,
+ 0x300, 0x320, 0x340, 0x360, 0x380, 0x3A0, 0x3C0, 0x3E0, 0};
+
+/*
+ . Wait time for memory to be free. This probably shouldn't be
+ . tuned that much, as waiting for this means nothing else happens
+ . in the system
+*/
+#define MEMORY_WAIT_TIME 16
+
+/*
+ . DEBUGGING LEVELS
+ .
+ . 0 for normal operation
+ . 1 for slightly more details
+ . >2 for various levels of increasingly useless information
+ . 2 for interrupt tracking, status flags
+ . 3 for packet dumps, etc.
+*/
+#define SMC_DEBUG 0
+
+#if (SMC_DEBUG > 2 )
+#define PRINTK3(x) printk x
+#else
+#define PRINTK3(x)
+#endif
+
+#if SMC_DEBUG > 1
+#define PRINTK2(x) printk x
+#else
+#define PRINTK2(x)
+#endif
+
+#ifdef SMC_DEBUG
+#define PRINTK(x) printk x
+#else
+#define PRINTK(x)
+#endif
+
+
+/* the older versions of the kernel cannot support autoprobing */
+#ifdef SUPPORT_OLD_KERNEL
+#define NO_AUTOPROBE
+#endif
+
+
+/*------------------------------------------------------------------------
+ .
+ . The internal workings of the driver. If you are changing anything
+ . here with the SMC stuff, you should have the datasheet and known
+ . what you are doing.
+ .
+ -------------------------------------------------------------------------*/
+#define CARDNAME "SMC9194"
+
+#ifdef SUPPORT_OLD_KERNEL
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+/* store this information for the driver.. */
+struct smc_local {
+ /*
+ these are things that the kernel wants me to keep, so users
+ can find out semi-useless statistics of how well the card is
+ performing
+ */
+ struct enet_statistics stats;
+
+ /*
+ If I have to wait until memory is available to send
+ a packet, I will store the skbuff here, until I get the
+ desired memory. Then, I'll send it out and free it.
+ */
+ struct sk_buff * saved_skb;
+
+ /*
+ . This keeps track of how many packets that I have
+ . sent out. When an TX_EMPTY interrupt comes, I know
+ . that all of these have been sent.
+ */
+ int packets_waiting;
+};
+
+
+/*-----------------------------------------------------------------
+ .
+ . The driver can be entered at any of the following entry points.
+ .
+ .------------------------------------------------------------------ */
+
+/*
+ . This is called by register_netdev(). It is responsible for
+ . checking the portlist for the SMC9000 series chipset. If it finds
+ . one, then it will initialize the device, find the hardware information,
+ . and sets up the appropriate device parameters.
+ . NOTE: Interrupts are *OFF* when this procedure is called.
+ .
+ . NB:This shouldn't be static since it is referred to externally.
+*/
+int smc_init(struct device *dev);
+
+/*
+ . The kernel calls this function when someone wants to use the device,
+ . typically 'ifconfig ethX up'.
+*/
+static int smc_open(struct device *dev);
+
+/*
+ . This is called by the kernel to send a packet out into the net. it's
+ . responsible for doing a best-effort send, but if it's simply not possible
+ . to send it, the packet gets dropped.
+*/
+static int smc_send_packet(struct sk_buff *skb, struct device *dev);
+
+/*
+ . This is called by the kernel in response to 'ifconfig ethX down'. It
+ . is responsible for cleaning up everything that the open routine
+ . does, and maybe putting the card into a powerdown state.
+*/
+static int smc_close(struct device *dev);
+
+/*
+ . This routine allows the proc file system to query the driver's
+ . statistics.
+*/
+static struct enet_statistics * smc_query_statistics( struct device *dev);
+
+/*
+ . Finally, a call to set promiscuous mode ( for TCPDUMP and related
+ . programs ) and multicast modes.
+*/
+#ifdef SUPPORT_OLD_KERNEL
+static void smc_set_multicast_list(struct device *dev, int num_addrs,
+ void *addrs);
+#else
+static void smc_set_multicast_list(struct device *dev);
+#endif
+
+/*---------------------------------------------------------------
+ .
+ . Interrupt level calls..
+ .
+ ----------------------------------------------------------------*/
+
+/*
+ . Handles the actual interrupt
+*/
+#ifdef REALLY_NEW_KERNEL
+static void smc_interrupt(int irq, void *, struct pt_regs *regs);
+#else
+static void smc_interrupt(int irq, struct pt_regs *regs);
+#endif
+/*
+ . This is a separate procedure to handle the receipt of a packet, to
+ . leave the interrupt code looking slightly cleaner
+*/
+inline static void smc_rcv( struct device *dev );
+/*
+ . This handles a TX interrupt, which is only called when an error
+ . relating to a packet is sent.
+*/
+inline static void smc_tx( struct device * dev );
+
+/*
+ ------------------------------------------------------------
+ .
+ . Internal routines
+ .
+ ------------------------------------------------------------
+*/
+
+/*
+ . Test if a given location contains a chip, trying to cause as
+ . little damage as possible if it's not a SMC chip.
+*/
+static int smc_probe( int ioaddr );
+
+/*
+ . this routine initializes the cards hardware, prints out the configuration
+ . to the system log as well as the vanity message, and handles the setup
+ . of a device parameter.
+ . It will give an error if it can't initialize the card.
+*/
+static int smc_initcard( struct device *, int ioaddr );
+
+/*
+ . A rather simple routine to print out a packet for debugging purposes.
+*/
+#if SMC_DEBUG > 2
+static void print_packet( byte *, int );
+#endif
+
+#define tx_done(dev) 1
+
+/* this is called to actually send the packet to the chip */
+static void smc_hardware_send_packet( struct device * dev );
+
+/* Since I am not sure if I will have enough room in the chip's ram
+ . to store the packet, I call this routine, which either sends it
+ . now, or generates an interrupt when the card is ready for the
+ . packet */
+static int smc_wait_to_send_packet( struct sk_buff * skb, struct device *dev );
+
+/* this does a soft reset on the device */
+static void smc_reset( int ioaddr );
+
+/* Enable Interrupts, Receive, and Transmit */
+static void smc_enable( int ioaddr );
+
+/* this puts the device in an inactive state */
+static void smc_shutdown( int ioaddr );
+
+#ifndef NO_AUTOPROBE
+/* This routine will find the IRQ of the driver if one is not
+ . specified in the input to the device. */
+static int smc_findirq( int ioaddr );
+#endif
+
+/*
+ this routine will set the hardware multicast table to the specified
+ values given it by the higher level routines
+*/
+#ifndef SUPPORT_OLD_KERNEL
+static void smc_setmulticast( int ioaddr, int count, struct dev_mc_list * );
+static int crc32( char *, int );
+#endif
+
+#ifdef SUPPORT_OLD_KERNEL
+extern struct device *init_etherdev(struct device *dev, int sizeof_private,
+ unsigned long *mem_startp );
+#endif
+
+/*
+ . Function: smc_reset( int ioaddr )
+ . Purpose:
+ . This sets the SMC91xx chip to its normal state, hopefully from whatever
+ . mess that any other DOS driver has put it in.
+ .
+ . Maybe I should reset more registers to defaults in here? SOFTRESET should
+ . do that for me.
+ .
+ . Method:
+ . 1. send a SOFT RESET
+ . 2. wait for it to finish
+ . 3. enable autorelease mode
+ . 4. reset the memory management unit
+ . 5. clear all interrupts
+ .
+*/
+static void smc_reset( int ioaddr )
+{
+ /* This resets the registers mostly to defaults, but doesn't
+ affect EEPROM. That seems unnecessary */
+ SMC_SELECT_BANK( 0 );
+ outw( RCR_SOFTRESET, ioaddr + RCR );
+
+ /* this should pause enough for the chip to be happy */
+ SMC_DELAY( );
+
+ /* Set the transmit and receive configuration registers to
+ default values */
+ outw( RCR_CLEAR, ioaddr + RCR );
+ outw( TCR_CLEAR, ioaddr + TCR );
+
+ /* set the control register to automatically
+ release successfully transmitted packets, to make the best
+ use out of our limited memory */
+ SMC_SELECT_BANK( 1 );
+ outw( inw( ioaddr + CONTROL ) | CTL_AUTO_RELEASE , ioaddr + CONTROL );
+
+ /* Reset the MMU */
+ SMC_SELECT_BANK( 2 );
+ outw( MC_RESET, ioaddr + MMU_CMD );
+
+ /* Note: It doesn't seem that waiting for the MMU busy is needed here,
+ but this is a place where future chipsets _COULD_ break. Be wary
+ of issuing another MMU command right after this */
+
+ outb( 0, ioaddr + INT_MASK );
+}
+
+/*
+ . Function: smc_enable
+ . Purpose: let the chip talk to the outside work
+ . Method:
+ . 1. Enable the transmitter
+ . 2. Enable the receiver
+ . 3. Enable interrupts
+*/
+static void smc_enable( int ioaddr )
+{
+ SMC_SELECT_BANK( 0 );
+ /* see the header file for options in TCR/RCR NORMAL*/
+ outw( TCR_NORMAL, ioaddr + TCR );
+ outw( RCR_NORMAL, ioaddr + RCR );
+
+ /* now, enable interrupts */
+ SMC_SELECT_BANK( 2 );
+ outb( SMC_INTERRUPT_MASK, ioaddr + INT_MASK );
+}
+
+/*
+ . Function: smc_shutdown
+ . Purpose: closes down the SMC91xxx chip.
+ . Method:
+ . 1. zero the interrupt mask
+ . 2. clear the enable receive flag
+ . 3. clear the enable xmit flags
+ .
+ . TODO:
+ . (1) maybe utilize power down mode.
+ . Why not yet? Because while the chip will go into power down mode,
+ . the manual says that it will wake up in response to any I/O requests
+ . in the register space. Empirical results do not show this working.
+*/
+static void smc_shutdown( int ioaddr )
+{
+ /* no more interrupts for me */
+ SMC_SELECT_BANK( 2 );
+ outb( 0, ioaddr + INT_MASK );
+
+ /* and tell the card to stay away from that nasty outside world */
+ SMC_SELECT_BANK( 0 );
+ outb( RCR_CLEAR, ioaddr + RCR );
+ outb( TCR_CLEAR, ioaddr + TCR );
+#if 0
+ /* finally, shut the chip down */
+ SMC_SELECT_BANK( 1 );
+ outw( inw( ioaddr + CONTROL ), CTL_POWERDOWN, ioaddr + CONTROL );
+#endif
+}
+
+
+#ifndef SUPPORT_OLD_KERNEL
+/*
+ . Function: smc_setmulticast( int ioaddr, int count, dev_mc_list * adds )
+ . Purpose:
+ . This sets the internal hardware table to filter out unwanted multicast
+ . packets before they take up memory.
+ .
+ . The SMC chip uses a hash table where the high 6 bits of the CRC of
+ . address are the offset into the table. If that bit is 1, then the
+ . multicast packet is accepted. Otherwise, it's dropped silently.
+ .
+ . To use the 6 bits as an offset into the table, the high 3 bits are the
+ . number of the 8 bit register, while the low 3 bits are the bit within
+ . that register.
+ .
+ . This routine is based very heavily on the one provided by Peter Cammaert.
+*/
+
+
+static void smc_setmulticast( int ioaddr, int count, struct dev_mc_list * addrs ) {
+ int i;
+ unsigned char multicast_table[ 8 ];
+ struct dev_mc_list * cur_addr;
+ /* table for flipping the order of 3 bits */
+ unsigned char invert3[] = { 0, 4, 2, 6, 1, 5, 3, 7 };
+
+ /* start with a table of all zeros: reject all */
+ memset( multicast_table, 0, sizeof( multicast_table ) );
+
+ cur_addr = addrs;
+ for ( i = 0; i < count ; i ++, cur_addr = cur_addr->next ) {
+ int position;
+
+ /* do we have a pointer here? */
+ if ( !cur_addr )
+ break;
+ /* make sure this is a multicast address - shouldn't this
+ be a given if we have it here ? */
+ if ( !( *cur_addr->dmi_addr & 1 ) )
+ continue;
+
+ /* only use the low order bits */
+ position = crc32( cur_addr->dmi_addr, 6 ) & 0x3f;
+
+ /* do some messy swapping to put the bit in the right spot */
+ multicast_table[invert3[position&7]] |=
+ (1<<invert3[(position>>3)&7]);
+
+ }
+ /* now, the table can be loaded into the chipset */
+ SMC_SELECT_BANK( 3 );
+
+ for ( i = 0; i < 8 ; i++ ) {
+ outb( multicast_table[i], ioaddr + MULTICAST1 + i );
+ }
+}
+
+/*
+ Finds the CRC32 of a set of bytes.
+ Again, from Peter Cammaert's code.
+*/
+static int crc32( char * s, int length ) {
+ /* indices */
+ int perByte;
+ int perBit;
+ /* crc polynomial for Ethernet */
+ const unsigned long poly = 0xedb88320;
+ /* crc value - preinitialized to all 1's */
+ unsigned long crc_value = 0xffffffff;
+
+ for ( perByte = 0; perByte < length; perByte ++ ) {
+ unsigned char c;
+
+ c = *(s++);
+ for ( perBit = 0; perBit < 8; perBit++ ) {
+ crc_value = (crc_value>>1)^
+ (((crc_value^c)&0x01)?poly:0);
+ c >>= 1;
+ }
+ }
+ return crc_value;
+}
+
+#endif
+
+
+/*
+ . Function: smc_wait_to_send_packet( struct sk_buff * skb, struct device * )
+ . Purpose:
+ . Attempt to allocate memory for a packet, if chip-memory is not
+ . available, then tell the card to generate an interrupt when it
+ . is available.
+ .
+ . Algorithm:
+ .
+ . o if the saved_skb is not currently null, then drop this packet
+ . on the floor. This should never happen, because of TBUSY.
+ . o if the saved_skb is null, then replace it with the current packet,
+ . o See if I can sending it now.
+ . o (NO): Enable interrupts and let the interrupt handler deal with it.
+ . o (YES):Send it now.
+*/
+static int smc_wait_to_send_packet( struct sk_buff * skb, struct device * dev )
+{
+ struct smc_local *lp = (struct smc_local *)dev->priv;
+ unsigned short ioaddr = dev->base_addr;
+ word length;
+ unsigned short numPages;
+ word time_out;
+
+ if ( lp->saved_skb) {
+ /* THIS SHOULD NEVER HAPPEN. */
+ lp->stats.tx_aborted_errors++;
+ printk(CARDNAME": Bad Craziness - sent packet while busy.\n" );
+ return 1;
+ }
+ lp->saved_skb = skb;
+
+ length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+
+ /*
+ . the MMU wants the number of pages to be the number of 256 bytes
+ . 'pages', minus 1 ( since a packet can't ever have 0 pages :) )
+ */
+ numPages = length / 256;
+
+ if (numPages > 7 ) {
+ printk(CARDNAME": Far too big packet error. \n");
+ /* freeing the packet is a good thing here... but should
+ . any packets of this size get down here? */
+ dev_kfree_skb (skb, FREE_WRITE);
+ lp->saved_skb = NULL;
+ /* this IS an error, but, i don't want the skb saved */
+ return 0;
+ }
+ /* either way, a packet is waiting now */
+ lp->packets_waiting++;
+
+ /* now, try to allocate the memory */
+ SMC_SELECT_BANK( 2 );
+ outw( MC_ALLOC | numPages, ioaddr + MMU_CMD );
+ /*
+ . Performance Hack
+ .
+ . wait a short amount of time.. if I can send a packet now, I send
+ . it now. Otherwise, I enable an interrupt and wait for one to be
+ . available.
+ .
+ . I could have handled this a slightly different way, by checking to
+ . see if any memory was available in the FREE MEMORY register. However,
+ . either way, I need to generate an allocation, and the allocation works
+ . no matter what, so I saw no point in checking free memory.
+ */
+ time_out = MEMORY_WAIT_TIME;
+ do {
+ word status;
+
+ status = inb( ioaddr + INTERRUPT );
+ if ( status & IM_ALLOC_INT ) {
+ /* acknowledge the interrupt */
+ outb( IM_ALLOC_INT, ioaddr + INTERRUPT );
+ break;
+ }
+ } while ( -- time_out );
+
+ if ( !time_out ) {
+ /* oh well, wait until the chip finds memory later */
+ SMC_ENABLE_INT( IM_ALLOC_INT );
+ PRINTK2((CARDNAME": memory allocation deferred. \n"));
+ /* it's deferred, but I'll handle it later */
+ return 0;
+ }
+ /* or YES! I can send the packet now.. */
+ smc_hardware_send_packet(dev);
+
+ return 0;
+}
+
+/*
+ . Function: smc_hardware_send_packet(struct device * )
+ . Purpose:
+ . This sends the actual packet to the SMC9xxx chip.
+ .
+ . Algorithm:
+ . First, see if a saved_skb is available.
+ . ( this should NOT be called if there is no 'saved_skb'
+ . Now, find the packet number that the chip allocated
+ . Point the data pointers at it in memory
+ . Set the length word in the chip's memory
+ . Dump the packet to chip memory
+ . Check if a last byte is needed ( odd length packet )
+ . if so, set the control flag right
+ . Tell the card to send it
+ . Enable the transmit interrupt, so I know if it failed
+ . Free the kernel data if I actually sent it.
+*/
+static void smc_hardware_send_packet( struct device * dev )
+{
+ struct smc_local *lp = (struct smc_local *)dev->priv;
+ byte packet_no;
+ struct sk_buff * skb = lp->saved_skb;
+ word length;
+ unsigned short ioaddr;
+ byte * buf;
+
+ ioaddr = dev->base_addr;
+
+ if ( !skb ) {
+ PRINTK((CARDNAME": In XMIT with no packet to send \n"));
+ return;
+ }
+ length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ buf = skb->data;
+
+ /* If I get here, I _know_ there is a packet slot waiting for me */
+ packet_no = inb( ioaddr + PNR_ARR + 1 );
+ if ( packet_no & 0x80 ) {
+ /* or isn't there? BAD CHIP! */
+ printk(KERN_DEBUG CARDNAME": Memory allocation failed. \n");
+ kfree(skb);
+ lp->saved_skb = NULL;
+ dev->tbusy = 0;
+ return;
+ }
+
+ /* we have a packet address, so tell the card to use it */
+ outb( packet_no, ioaddr + PNR_ARR );
+
+ /* point to the beginning of the packet */
+ outw( PTR_AUTOINC , ioaddr + POINTER );
+
+ PRINTK3((CARDNAME": Trying to xmit packet of length %x\n", length ));
+#if SMC_DEBUG > 2
+ print_packet( buf, length );
+#endif
+
+ /* send the packet length ( +6 for status, length and ctl byte )
+ and the status word ( set to zeros ) */
+#ifdef USE_32_BIT
+ outl( (length +6 ) << 16 , ioaddr + DATA_1 );
+#else
+ outw( 0, ioaddr + DATA_1 );
+ /* send the packet length ( +6 for status words, length, and ctl*/
+ outb( (length+6) & 0xFF,ioaddr + DATA_1 );
+ outb( (length+6) >> 8 , ioaddr + DATA_1 );
+#endif
+
+ /* send the actual data
+ . I _think_ it's faster to send the longs first, and then
+ . mop up by sending the last word. It depends heavily
+ . on alignment, at least on the 486. Maybe it would be
+ . a good idea to check which is optimal? But that could take
+ . almost as much time as is saved?
+ */
+#ifdef USE_32_BIT
+ if ( length & 0x2 ) {
+ outsl(ioaddr + DATA_1, buf, length >> 2 );
+ outw( *((word *)(buf + (length & 0xFFFFFFFC))),ioaddr +DATA_1);
+ }
+ else
+ outsl(ioaddr + DATA_1, buf, length >> 2 );
+#else
+ outsw(ioaddr + DATA_1 , buf, (length ) >> 1);
+#endif
+ /* Send the last byte, if there is one. */
+
+ if ( (length & 1) == 0 ) {
+ outw( 0, ioaddr + DATA_1 );
+ } else {
+ outb( buf[length -1 ], ioaddr + DATA_1 );
+ outb( 0x20, ioaddr + DATA_1);
+ }
+
+ /* enable the interrupts */
+ SMC_ENABLE_INT( (IM_TX_INT | IM_TX_EMPTY_INT) );
+
+ /* and let the chipset deal with it */
+ outw( MC_ENQUEUE , ioaddr + MMU_CMD );
+
+ PRINTK2((CARDNAME": Sent packet of length %d \n",length));
+
+ lp->saved_skb = NULL;
+ dev_kfree_skb (skb, FREE_WRITE);
+
+ dev->trans_start = jiffies;
+
+ /* we can send another packet */
+ dev->tbusy = 0;
+
+
+ return;
+}
+
+/*-------------------------------------------------------------------------
+ |
+ | smc_init( struct device * dev )
+ | Input parameters:
+ | dev->base_addr == 0, try to find all possible locations
+ | dev->base_addr == 1, return failure code
+ | dev->base_addr == 2, always allocate space, and return success
+ | dev->base_addr == <anything else> this is the address to check
+ |
+ | Output:
+ | 0 --> there is a device
+ | anything else, error
+ |
+ ---------------------------------------------------------------------------
+*/
+int smc_init(struct device *dev)
+{
+ int i;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ /* try a specific location */
+ if (base_addr > 0x1ff) {
+ int error;
+ error = smc_probe(base_addr);
+ if ( 0 == error ) {
+ return smc_initcard( dev, base_addr );
+ }
+ return error;
+ } else {
+ if ( 0 != base_addr ) {
+ return -ENXIO;
+ }
+ }
+
+ /* check every ethernet address */
+ for (i = 0; smc_portlist[i]; i++) {
+ int ioaddr = smc_portlist[i];
+
+ /* check if the area is available */
+ if (check_region( ioaddr , SMC_IO_EXTENT))
+ continue;
+
+ /* check this specific address */
+ if ( smc_probe( ioaddr ) == 0) {
+ return smc_initcard( dev, ioaddr );
+ }
+ }
+
+ /* couldn't find anything */
+ return -ENODEV;
+}
+
+#ifndef NO_AUTOPROBE
+/*----------------------------------------------------------------------
+ . smc_findirq
+ .
+ . This routine has a simple purpose -- make the SMC chip generate an
+ . interrupt, so an auto-detect routine can detect it, and find the IRQ,
+ ------------------------------------------------------------------------
+*/
+int smc_findirq( int ioaddr )
+{
+ int timeout = 20;
+
+
+ /* I have to do a STI() here, because this is called from
+ a routine that does an CLI during this process, making it
+ rather difficult to get interrupts for auto detection */
+ sti();
+
+ autoirq_setup( 0 );
+
+ /*
+ * What I try to do here is trigger an ALLOC_INT. This is done
+ * by allocating a small chunk of memory, which will give an interrupt
+ * when done.
+ */
+
+
+ SMC_SELECT_BANK(2);
+ /* enable ALLOCation interrupts ONLY */
+ outb( IM_ALLOC_INT, ioaddr + INT_MASK );
+
+ /*
+ . Allocate 512 bytes of memory. Note that the chip was just
+ . reset so all the memory is available
+ */
+ outw( MC_ALLOC | 1, ioaddr + MMU_CMD );
+
+ /*
+ . Wait until positive that the interrupt has been generated
+ */
+ while ( timeout ) {
+ byte int_status;
+
+ int_status = inb( ioaddr + INTERRUPT );
+
+ if ( int_status & IM_ALLOC_INT )
+ break; /* got the interrupt */
+ timeout--;
+ }
+ /* there is really nothing that I can do here if timeout fails,
+ as autoirq_report will return a 0 anyway, which is what I
+ want in this case. Plus, the clean up is needed in both
+ cases. */
+
+ /* DELAY HERE!
+ On a fast machine, the status might change before the interrupt
+ is given to the processor. This means that the interrupt was
+ never detected, and autoirq_report fails to report anything.
+ This should fix autoirq_* problems.
+ */
+ SMC_DELAY();
+ SMC_DELAY();
+
+ /* and disable all interrupts again */
+ outb( 0, ioaddr + INT_MASK );
+
+ /* clear hardware interrupts again, because that's how it
+ was when I was called... */
+ cli();
+
+ /* and return what I found */
+ return autoirq_report( 0 );
+}
+#endif
+
+/*----------------------------------------------------------------------
+ . Function: smc_probe( int ioaddr )
+ .
+ . Purpose:
+ . Tests to see if a given ioaddr points to an SMC9xxx chip.
+ . Returns a 0 on success
+ .
+ . Algorithm:
+ . (1) see if the high byte of BANK_SELECT is 0x33
+ . (2) compare the ioaddr with the base register's address
+ . (3) see if I recognize the chip ID in the appropriate register
+ .
+ .---------------------------------------------------------------------
+ */
+
+static int smc_probe( int ioaddr )
+{
+ unsigned int bank;
+ word revision_register;
+ word base_address_register;
+
+ /* First, see if the high byte is 0x33 */
+ bank = inw( ioaddr + BANK_SELECT );
+ if ( (bank & 0xFF00) != 0x3300 ) {
+ return -ENODEV;
+ }
+ /* The above MIGHT indicate a device, but I need to write to further
+ test this. */
+ outw( 0x0, ioaddr + BANK_SELECT );
+ bank = inw( ioaddr + BANK_SELECT );
+ if ( (bank & 0xFF00 ) != 0x3300 ) {
+ return -ENODEV;
+ }
+ /* well, we've already written once, so hopefully another time won't
+ hurt. This time, I need to switch the bank register to bank 1,
+ so I can access the base address register */
+ SMC_SELECT_BANK(1);
+ base_address_register = inw( ioaddr + BASE );
+ if ( ioaddr != ( base_address_register >> 3 & 0x3E0 ) ) {
+ printk(CARDNAME ": IOADDR %x doesn't match configuration (%x)."
+ "Probably not a SMC chip\n",
+ ioaddr, base_address_register >> 3 & 0x3E0 );
+ /* well, the base address register didn't match. Must not have
+ been a SMC chip after all. */
+ return -ENODEV;
+ }
+
+ /* check if the revision register is something that I recognize.
+ These might need to be added to later, as future revisions
+ could be added. */
+ SMC_SELECT_BANK(3);
+ revision_register = inw( ioaddr + REVISION );
+ if ( !chip_ids[ ( revision_register >> 4 ) & 0xF ] ) {
+ /* I don't recognize this chip, so... */
+ printk(CARDNAME ": IO %x: Unrecognized revision register:"
+ " %x, Contact author. \n", ioaddr, revision_register );
+
+ return -ENODEV;
+ }
+
+ /* at this point I'll assume that the chip is an SMC9xxx.
+ It might be prudent to check a listing of MAC addresses
+ against the hardware address, or do some other tests. */
+ return 0;
+}
+
+/*---------------------------------------------------------------
+ . Here I do typical initialization tasks.
+ .
+ . o Initialize the structure if needed
+ . o print out my vanity message if not done so already
+ . o print out what type of hardware is detected
+ . o print out the ethernet address
+ . o find the IRQ
+ . o set up my private data
+ . o configure the dev structure with my subroutines
+ . o actually GRAB the irq.
+ . o GRAB the region
+ .-----------------------------------------------------------------
+*/
+static int smc_initcard(struct device *dev, int ioaddr)
+{
+ int i;
+
+ static unsigned version_printed = 0;
+
+ /* registers */
+ word revision_register;
+ word configuration_register;
+ word memory_info_register;
+ word memory_cfg_register;
+
+ const char * version_string;
+ const char * if_string;
+ int memory;
+
+ int irqval;
+
+ /* see if I need to initialize the ethernet card structure */
+ if (dev == NULL) {
+#ifdef SUPPORT_OLD_KERNEL
+#ifndef MODULE
+/* note: the old module interface does not support this call */
+ dev = init_etherdev( 0, sizeof( struct smc_local ), 0 );
+#endif
+#else
+ dev = init_etherdev(0, 0);
+#endif
+ if (dev == NULL)
+ return -ENOMEM;
+ }
+
+ if (version_printed++ == 0)
+ printk("%s", version);
+
+ /* fill in some of the fields */
+ dev->base_addr = ioaddr;
+
+ /*
+ . Get the MAC address ( bank 1, regs 4 - 9 )
+ */
+ SMC_SELECT_BANK( 1 );
+ for ( i = 0; i < 6; i += 2 ) {
+ word address;
+
+ address = inw( ioaddr + ADDR0 + i );
+ dev->dev_addr[ i + 1] = address >> 8;
+ dev->dev_addr[ i ] = address & 0xFF;
+ }
+
+ /* get the memory information */
+
+ SMC_SELECT_BANK( 0 );
+ memory_info_register = inw( ioaddr + MIR );
+ memory_cfg_register = inw( ioaddr + MCR );
+ memory = ( memory_cfg_register >> 9 ) & 0x7; /* multiplier */
+ memory *= 256 * ( memory_info_register & 0xFF );
+
+ /*
+ Now, I want to find out more about the chip. This is sort of
+ redundant, but it's cleaner to have it in both, rather than having
+ one VERY long probe procedure.
+ */
+ SMC_SELECT_BANK(3);
+ revision_register = inw( ioaddr + REVISION );
+ version_string = chip_ids[ ( revision_register >> 4 ) & 0xF ];
+ if ( !version_string ) {
+ /* I shouldn't get here because this call was done before.... */
+ return -ENODEV;
+ }
+
+ /* is it using AUI or 10BaseT ? */
+ if ( dev->if_port == 0 ) {
+ SMC_SELECT_BANK(1);
+ configuration_register = inw( ioaddr + CONFIG );
+ if ( configuration_register & CFG_AUI_SELECT )
+ dev->if_port = 2;
+ else
+ dev->if_port = 1;
+ }
+ if_string = interfaces[ dev->if_port - 1 ];
+
+ /* now, reset the chip, and put it into a known state */
+ smc_reset( ioaddr );
+
+ /*
+ . If dev->irq is 0, then the device has to be banged on to see
+ . what the IRQ is.
+ .
+ . This banging doesn't always detect the IRQ, for unknown reasons.
+ . a workaround is to reset the chip and try again.
+ .
+ . Interestingly, the DOS packet driver *SETS* the IRQ on the card to
+ . be what is requested on the command line. I don't do that, mostly
+ . because the card that I have uses a non-standard method of accessing
+ . the IRQs, and because this _should_ work in most configurations.
+ .
+ . Specifying an IRQ is done with the assumption that the user knows
+ . what (s)he is doing. No checking is done!!!!
+ .
+ */
+#ifndef NO_AUTOPROBE
+ if ( dev->irq < 2 ) {
+ int trials;
+
+ trials = 3;
+ while ( trials-- ) {
+ dev->irq = smc_findirq( ioaddr );
+ if ( dev->irq )
+ break;
+ /* kick the card and try again */
+ smc_reset( ioaddr );
+ }
+ }
+ if (dev->irq == 0 ) {
+ printk(CARDNAME": Couldn't autodetect your IRQ. Use irq=xx.\n");
+ return -ENODEV;
+ }
+#else
+ if (dev->irq == 0 ) {
+ printk(CARDNAME
+ ": Autoprobing IRQs is not supported for old kernels.\n");
+ return -ENODEV;
+ }
+#endif
+ if (dev->irq == 2) {
+ /* Fixup for users that don't know that IRQ 2 is really IRQ 9,
+ * or don't know which one to set.
+ */
+ dev->irq = 9;
+ }
+
+ /* now, print out the card info, in a short format.. */
+
+ printk(CARDNAME ": %s(r:%d) at %#3x IRQ:%d INTF:%s MEM:%db ",
+ version_string, revision_register & 0xF, ioaddr, dev->irq,
+ if_string, memory );
+ /*
+ . Print the Ethernet address
+ */
+ printk("ADDR: ");
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i] );
+ printk("%2.2x \n", dev->dev_addr[5] );
+
+
+ /* Initialize the private structure. */
+ if (dev->priv == NULL) {
+ dev->priv = kmalloc(sizeof(struct smc_local), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ }
+ /* set the private data to zero by default */
+ memset(dev->priv, 0, sizeof(struct smc_local));
+
+ /* Fill in the fields of the device structure with ethernet values. */
+ ether_setup(dev);
+
+ /* Grab the IRQ */
+ irqval = request_irq(dev->irq, &smc_interrupt, 0, CARDNAME, NULL);
+ if (irqval) {
+ printk(CARDNAME": unable to get IRQ %d (irqval=%d).\n",
+ dev->irq, irqval);
+ return -EAGAIN;
+ }
+ irq2dev_map[dev->irq] = dev;
+
+ /* Grab the region so that no one else tries to probe our ioports. */
+ request_region(ioaddr, SMC_IO_EXTENT, CARDNAME);
+
+ dev->open = smc_open;
+ dev->stop = smc_close;
+ dev->hard_start_xmit = smc_send_packet;
+ dev->get_stats = smc_query_statistics;
+#ifdef HAVE_MULTICAST
+ dev->set_multicast_list = &smc_set_multicast_list;
+#endif
+
+ return 0;
+}
+
+#if SMC_DEBUG > 2
+static void print_packet( byte * buf, int length )
+{
+#if 0
+ int i;
+ int remainder;
+ int lines;
+
+ printk("Packet of length %d \n", length );
+ lines = length / 16;
+ remainder = length % 16;
+
+ for ( i = 0; i < lines ; i ++ ) {
+ int cur;
+
+ for ( cur = 0; cur < 8; cur ++ ) {
+ byte a, b;
+
+ a = *(buf ++ );
+ b = *(buf ++ );
+ printk("%02x%02x ", a, b );
+ }
+ printk("\n");
+ }
+ for ( i = 0; i < remainder/2 ; i++ ) {
+ byte a, b;
+
+ a = *(buf ++ );
+ b = *(buf ++ );
+ printk("%02x%02x ", a, b );
+ }
+ printk("\n");
+#endif
+}
+#endif
+
+
+/*
+ * Open and Initialize the board
+ *
+ * Set up everything, reset the card, etc ..
+ *
+ */
+static int smc_open(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ int i; /* used to set hw ethernet address */
+
+ /* clear out all the junk that was put here before... */
+ memset(dev->priv, 0, sizeof(struct smc_local));
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+#ifdef MODULE
+ MOD_INC_USE_COUNT;
+#endif
+
+ /* reset the hardware */
+
+ smc_reset( ioaddr );
+ smc_enable( ioaddr );
+
+ /* Select which interface to use */
+
+ SMC_SELECT_BANK( 1 );
+ if ( dev->if_port == 1 ) {
+ outw( inw( ioaddr + CONFIG ) & ~CFG_AUI_SELECT,
+ ioaddr + CONFIG );
+ }
+ else if ( dev->if_port == 2 ) {
+ outw( inw( ioaddr + CONFIG ) | CFG_AUI_SELECT,
+ ioaddr + CONFIG );
+ }
+
+ /*
+ According to Becker, I have to set the hardware address
+ at this point, because the (l)user can set it with an
+ ioctl. Easily done...
+ */
+ SMC_SELECT_BANK( 1 );
+ for ( i = 0; i < 6; i += 2 ) {
+ word address;
+
+ address = dev->dev_addr[ i + 1 ] << 8 ;
+ address |= dev->dev_addr[ i ];
+ outw( address, ioaddr + ADDR0 + i );
+ }
+ return 0;
+}
+
+/*--------------------------------------------------------
+ . Called by the kernel to send a packet out into the void
+ . of the net. This routine is largely based on
+ . skeleton.c, from Becker.
+ .--------------------------------------------------------
+*/
+static int smc_send_packet(struct sk_buff *skb, struct device *dev)
+{
+ if (dev->tbusy) {
+ /* If we get here, some higher level has decided we are broken.
+ There should really be a "kick me" function call instead. */
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 5)
+ return 1;
+ printk(KERN_WARNING CARDNAME": transmit timed out, %s?\n",
+ tx_done(dev) ? "IRQ conflict" :
+ "network cable problem");
+ /* "kick" the adaptor */
+ smc_reset( dev->base_addr );
+ smc_enable( dev->base_addr );
+
+ dev->tbusy = 0;
+ dev->trans_start = jiffies;
+ /* clear anything saved */
+ ((struct smc_local *)dev->priv)->saved_skb = NULL;
+ }
+
+ /* If some higher layer thinks we've missed an tx-done interrupt
+ we are passed NULL. Caution: dev_tint() handles the cli()/sti()
+ itself. */
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+ if (set_bit(0, (void*)&dev->tbusy) != 0) {
+ printk(KERN_WARNING CARDNAME": Transmitter access conflict.\n");
+ dev_kfree_skb (skb, FREE_WRITE);
+ } else {
+ /* Well, I want to send the packet.. but I don't know
+ if I can send it right now... */
+ return smc_wait_to_send_packet( skb, dev );
+ }
+ return 0;
+}
+
+/*--------------------------------------------------------------------
+ .
+ . This is the main routine of the driver, to handle the device when
+ . it needs some attention.
+ .
+ . So:
+ . first, save state of the chipset
+ . branch off into routines to handle each case, and acknowledge
+ . each to the interrupt register
+ . and finally restore state.
+ .
+ ---------------------------------------------------------------------*/
+#ifdef REALLY_NEW_KERNEL
+static void smc_interrupt(int irq, void * dev_id, struct pt_regs * regs)
+#else
+static void smc_interrupt(int irq, struct pt_regs * regs)
+#endif
+{
+ struct device *dev = (struct device *)(irq2dev_map[irq]);
+ int ioaddr = dev->base_addr;
+ struct smc_local *lp = (struct smc_local *)dev->priv;
+
+ byte status;
+ word card_stats;
+ byte mask;
+ int timeout;
+ /* state registers */
+ word saved_bank;
+ word saved_pointer;
+
+
+
+ PRINTK3((CARDNAME": SMC interrupt started \n"));
+
+ if (dev == NULL) {
+ printk(KERN_WARNING CARDNAME": irq %d for unknown device.\n",
+ irq);
+ return;
+ }
+
+/* will Linux let this happen ?? If not, this costs some speed */
+ if ( dev->interrupt ) {
+ printk(KERN_WARNING CARDNAME": interrupt inside interrupt.\n");
+ return;
+ }
+
+ dev->interrupt = 1;
+
+ saved_bank = inw( ioaddr + BANK_SELECT );
+
+ SMC_SELECT_BANK(2);
+ saved_pointer = inw( ioaddr + POINTER );
+
+ mask = inb( ioaddr + INT_MASK );
+ /* clear all interrupts */
+ outb( 0, ioaddr + INT_MASK );
+
+
+ /* set a timeout value, so I don't stay here forever */
+ timeout = 4;
+
+ PRINTK2((KERN_WARNING CARDNAME ": MASK IS %x \n", mask ));
+ do {
+ /* read the status flag, and mask it */
+ status = inb( ioaddr + INTERRUPT ) & mask;
+ if (!status )
+ break;
+
+ PRINTK3((KERN_WARNING CARDNAME
+ ": Handling interrupt status %x \n", status ));
+
+ if (status & IM_RCV_INT) {
+ /* Got a packet(s). */
+ PRINTK2((KERN_WARNING CARDNAME
+ ": Receive Interrupt\n"));
+ smc_rcv(dev);
+ } else if (status & IM_TX_INT ) {
+ PRINTK2((KERN_WARNING CARDNAME
+ ": TX ERROR handled\n"));
+ smc_tx(dev);
+ outb(IM_TX_INT, ioaddr + INTERRUPT );
+ } else if (status & IM_TX_EMPTY_INT ) {
+ /* update stats */
+ SMC_SELECT_BANK( 0 );
+ card_stats = inw( ioaddr + COUNTER );
+ /* single collisions */
+ lp->stats.collisions += card_stats & 0xF;
+ card_stats >>= 4;
+ /* multiple collisions */
+ lp->stats.collisions += card_stats & 0xF;
+
+ /* these are for when linux supports these statistics */
+#if 0
+ card_stats >>= 4;
+ /* deferred */
+ card_stats >>= 4;
+ /* excess deferred */
+#endif
+ SMC_SELECT_BANK( 2 );
+ PRINTK2((KERN_WARNING CARDNAME
+ ": TX_BUFFER_EMPTY handled\n"));
+ outb( IM_TX_EMPTY_INT, ioaddr + INTERRUPT );
+ mask &= ~IM_TX_EMPTY_INT;
+ lp->stats.tx_packets += lp->packets_waiting;
+ lp->packets_waiting = 0;
+
+ } else if (status & IM_ALLOC_INT ) {
+ PRINTK2((KERN_DEBUG CARDNAME
+ ": Allocation interrupt \n"));
+ /* clear this interrupt so it doesn't happen again */
+ mask &= ~IM_ALLOC_INT;
+
+ smc_hardware_send_packet( dev );
+
+ /* enable xmit interrupts based on this */
+ mask |= ( IM_TX_EMPTY_INT | IM_TX_INT );
+
+ /* and let the card send more packets to me */
+ mark_bh( NET_BH );
+
+ PRINTK2((CARDNAME": Handoff done successfully.\n"));
+ } else if (status & IM_RX_OVRN_INT ) {
+ lp->stats.rx_errors++;
+ lp->stats.rx_fifo_errors++;
+ outb( IM_RX_OVRN_INT, ioaddr + INTERRUPT );
+ } else if (status & IM_EPH_INT ) {
+ PRINTK((CARDNAME ": UNSUPPORTED: EPH INTERRUPT \n"));
+ } else if (status & IM_ERCV_INT ) {
+ PRINTK((CARDNAME ": UNSUPPORTED: ERCV INTERRUPT \n"));
+ outb( IM_ERCV_INT, ioaddr + INTERRUPT );
+ }
+ } while ( timeout -- );
+
+
+ /* restore state register */
+ SMC_SELECT_BANK( 2 );
+ outb( mask, ioaddr + INT_MASK );
+
+ PRINTK3(( KERN_WARNING CARDNAME ": MASK is now %x \n", mask ));
+ outw( saved_pointer, ioaddr + POINTER );
+
+ SMC_SELECT_BANK( saved_bank );
+
+ dev->interrupt = 0;
+ PRINTK3((CARDNAME ": Interrupt done\n"));
+ return;
+}
+
+/*-------------------------------------------------------------
+ .
+ . smc_rcv - receive a packet from the card
+ .
+ . There is ( at least ) a packet waiting to be read from
+ . chip-memory.
+ .
+ . o Read the status
+ . o If an error, record it
+ . o otherwise, read in the packet
+ --------------------------------------------------------------
+*/
+static void smc_rcv(struct device *dev)
+{
+ struct smc_local *lp = (struct smc_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int packet_number;
+ word status;
+ word packet_length;
+
+ /* assume bank 2 */
+
+ packet_number = inw( ioaddr + FIFO_PORTS );
+
+ if ( packet_number & FP_RXEMPTY ) {
+ /* we got called , but nothing was on the FIFO */
+ PRINTK((CARDNAME ": WARNING: smc_rcv with nothing on FIFO. \n"));
+ /* don't need to restore anything */
+ return;
+ }
+
+ /* start reading from the start of the packet */
+ outw( PTR_READ | PTR_RCV | PTR_AUTOINC, ioaddr + POINTER );
+
+ /* First two words are status and packet_length */
+ status = inw( ioaddr + DATA_1 );
+ packet_length = inw( ioaddr + DATA_1 );
+
+ packet_length &= 0x07ff; /* mask off top bits */
+
+ PRINTK2(("RCV: STATUS %4x LENGTH %4x\n", status, packet_length ));
+ /*
+ . the packet length contains 3 extra words :
+ . status, length, and a extra word with an odd byte .
+ */
+ packet_length -= 6;
+
+ if ( !(status & RS_ERRORS ) ){
+ /* do stuff to make a new packet */
+ struct sk_buff * skb;
+ byte * data;
+
+ /* read one extra byte */
+ if ( status & RS_ODDFRAME )
+ packet_length++;
+
+ /* set multicast stats */
+ if ( status & RS_MULTICAST )
+ lp->stats.multicast++;
+
+#ifdef SUPPORT_OLD_KERNEL
+ skb = alloc_skb( packet_length + 5, GFP_ATOMIC );
+#else
+ skb = dev_alloc_skb( packet_length + 5);
+#endif
+
+ if ( skb == NULL ) {
+ printk(KERN_NOTICE CARDNAME
+ ": Low memory, packet dropped.\n");
+ lp->stats.rx_dropped++;
+ }
+
+ /*
+ ! This should work without alignment, but it could be
+ ! in the worse case
+ */
+#ifndef SUPPORT_OLD_KERNEL
+ /* TODO: Should I use 32bit alignment here ? */
+ skb_reserve( skb, 2 ); /* 16 bit alignment */
+#endif
+
+ skb->dev = dev;
+#ifdef SUPPORT_OLD_KERNEL
+ skb->len = packet_length;
+ data = skb->data;
+#else
+ data = skb_put( skb, packet_length);
+#endif
+#ifdef USE_32_BIT
+ /* QUESTION: Like in the TX routine, do I want
+ to send the DWORDs or the bytes first, or some
+ mixture. A mixture might improve already slow PIO
+ performance */
+ PRINTK3((" Reading %d dwords (and %d bytes) \n",
+ packet_length >> 2, packet_length & 3 ));
+ insl(ioaddr + DATA_1 , data, packet_length >> 2 );
+ /* read the left over bytes */
+ insb( ioaddr + DATA_1, data + (packet_length & 0xFFFFFC),
+ packet_length & 0x3 );
+#else
+ PRINTK3((" Reading %d words and %d byte(s) \n",
+ (packet_length >> 1 ), packet_length & 1 );
+ if ( packet_length & 1 )
+ *(data++) = inb( ioaddr + DATA_1 );
+ insw(ioaddr + DATA_1 , data, (packet_length + 1 ) >> 1);
+ if ( packet_length & 1 ) {
+ data += packet_length & ~1;
+ *((data++) = inb( ioaddr + DATA_1 );
+ }
+#endif
+#if SMC_DEBUG > 2
+ print_packet( data, packet_length );
+#endif
+
+#ifndef SUPPORT_OLD_KERNEL
+ skb->protocol = eth_type_trans(skb, dev );
+#endif
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ } else {
+ /* error ... */
+ lp->stats.rx_errors++;
+
+ if ( status & RS_ALGNERR ) lp->stats.rx_frame_errors++;
+ if ( status & (RS_TOOSHORT | RS_TOOLONG ) )
+ lp->stats.rx_length_errors++;
+ if ( status & RS_BADCRC) lp->stats.rx_crc_errors++;
+ }
+ /* error or good, tell the card to get rid of this packet */
+ outw( MC_RELEASE, ioaddr + MMU_CMD );
+
+
+ return;
+}
+
+
+/*************************************************************************
+ . smc_tx
+ .
+ . Purpose: Handle a transmit error message. This will only be called
+ . when an error, because of the AUTO_RELEASE mode.
+ .
+ . Algorithm:
+ . Save pointer and packet no
+ . Get the packet no from the top of the queue
+ . check if it's valid ( if not, is this an error??? )
+ . read the status word
+ . record the error
+ . ( resend? Not really, since we don't want old packets around )
+ . Restore saved values
+ ************************************************************************/
+static void smc_tx( struct device * dev )
+{
+ int ioaddr = dev->base_addr;
+ struct smc_local *lp = (struct smc_local *)dev->priv;
+ byte saved_packet;
+ byte packet_no;
+ word tx_status;
+
+
+ /* assume bank 2 */
+
+ saved_packet = inb( ioaddr + PNR_ARR );
+ packet_no = inw( ioaddr + FIFO_PORTS );
+ packet_no &= 0x7F;
+
+ /* select this as the packet to read from */
+ outb( packet_no, ioaddr + PNR_ARR );
+
+ /* read the first word from this packet */
+ outw( PTR_AUTOINC | PTR_READ, ioaddr + POINTER );
+
+ tx_status = inw( ioaddr + DATA_1 );
+ PRINTK3((CARDNAME": TX DONE STATUS: %4x \n", tx_status ));
+
+ lp->stats.tx_errors++;
+ if ( tx_status & TS_LOSTCAR ) lp->stats.tx_carrier_errors++;
+ if ( tx_status & TS_LATCOL ) {
+ printk(KERN_DEBUG CARDNAME
+ ": Late collision occurred on last xmit.\n");
+ lp->stats.tx_window_errors++;
+ }
+#if 0
+ if ( tx_status & TS_16COL ) { ... }
+#endif
+
+ if ( tx_status & TS_SUCCESS ) {
+ printk(CARDNAME": Successful packet caused interrupt \n");
+ }
+ /* re-enable transmit */
+ SMC_SELECT_BANK( 0 );
+ outw( inw( ioaddr + TCR ) | TCR_ENABLE, ioaddr + TCR );
+
+ /* kill the packet */
+ SMC_SELECT_BANK( 2 );
+ outw( MC_FREEPKT, ioaddr + MMU_CMD );
+
+ /* one less packet waiting for me */
+ lp->packets_waiting--;
+
+ outb( saved_packet, ioaddr + PNR_ARR );
+ return;
+}
+
+/*----------------------------------------------------
+ . smc_close
+ .
+ . this makes the board clean up everything that it can
+ . and not talk to the outside world. Caused by
+ . an 'ifconfig ethX down'
+ .
+ -----------------------------------------------------*/
+static int smc_close(struct device *dev)
+{
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ /* clear everything */
+ smc_shutdown( dev->base_addr );
+
+ /* Update the statistics here. */
+#ifdef MODULE
+ MOD_DEC_USE_COUNT;
+#endif
+
+ return 0;
+}
+
+/*------------------------------------------------------------
+ . Get the current statistics.
+ . This may be called with the card open or closed.
+ .-------------------------------------------------------------*/
+static struct enet_statistics * smc_query_statistics(struct device *dev) {
+ struct smc_local *lp = (struct smc_local *)dev->priv;
+
+ return &lp->stats;
+}
+
+/*-----------------------------------------------------------
+ . smc_set_multicast_list
+ .
+ . This routine will, depending on the values passed to it,
+ . either make it accept multicast packets, go into
+ . promiscuous mode ( for TCPDUMP and cousins ) or accept
+ . a select set of multicast packets
+*/
+#ifdef SUPPORT_OLD_KERNEL
+static void smc_set_multicast_list( struct device * dev,
+ int num_addrs, void * addrs )
+#else
+static void smc_set_multicast_list(struct device *dev)
+#endif
+{
+ short ioaddr = dev->base_addr;
+
+ SMC_SELECT_BANK(0);
+#ifdef SUPPORT_OLD_KERNEL
+ if ( num_addrs < 0 )
+#else
+ if ( dev->flags & IFF_PROMISC )
+#endif
+ outw( inw(ioaddr + RCR ) | RCR_PROMISC, ioaddr + RCR );
+
+/* BUG? I never disable promiscuous mode if multicasting was turned on.
+ Now, I turn off promiscuous mode, but I don't do anything to multicasting
+ when promiscuous mode is turned on.
+*/
+
+ /* Here, I am setting this to accept all multicast packets.
+ I don't need to zero the multicast table, because the flag is
+ checked before the table is
+ */
+#ifdef SUPPORT_OLD_KERNEL
+ else if ( num_addrs > 20 ) /* arbitrary constant */
+#else
+ else if (dev->flags & IFF_ALLMULTI)
+#endif
+ outw( inw(ioaddr + RCR ) | RCR_ALMUL, ioaddr + RCR );
+
+ /* We just get all multicast packets even if we only want them
+ . from one source. This will be changed at some future
+ . point. */
+#ifdef SUPPORT_OLD_KERNEL
+ else if (num_addrs > 0 ) {
+/* the old kernel support will not have hardware multicast support. It would
+ involve more kludges, and make the multicast setting code even worse.
+ Instead, just use the ALMUL method. This is reasonable, considering that
+ it is seldom used
+*/
+ outw( inw( ioaddr + RCR ) & ~RCR_PROMISC, ioaddr + RCR );
+ outw( inw( ioadddr + RCR ) | RCR_ALMUL, ioadddr + RCR );
+ }
+#else
+ else if (dev->mc_count ) {
+ /* support hardware multicasting */
+
+ /* be sure I get rid of flags I might have set */
+ outw( inw( ioaddr + RCR ) & ~(RCR_PROMISC | RCR_ALMUL),
+ ioaddr + RCR );
+ /* NOTE: this has to set the bank, so make sure it is the
+ last thing called. The bank is set to zero at the top */
+ smc_setmulticast( ioaddr, dev->mc_count, dev->mc_list );
+ }
+#endif
+ else {
+ outw( inw( ioaddr + RCR ) & ~(RCR_PROMISC | RCR_ALMUL),
+ ioaddr + RCR );
+
+ /*
+ since I'm disabling all multicast entirely, I need to
+ clear the multicast list
+ */
+ SMC_SELECT_BANK( 3 );
+ outw( 0, ioaddr + MULTICAST1 );
+ outw( 0, ioaddr + MULTICAST2 );
+ outw( 0, ioaddr + MULTICAST3 );
+ outw( 0, ioaddr + MULTICAST4 );
+ }
+}
+
+#ifdef MODULE
+
+static char devicename[9] = { 0, };
+static struct device devSMC9194 = {
+ devicename, /* device name is inserted by linux/drivers/net/net_init.c */
+ 0, 0, 0, 0,
+ 0, 0, /* I/O address, IRQ */
+ 0, 0, 0, NULL, smc_init };
+
+int io = 0;
+int irq = 0;
+int ifport = 0;
+
+int init_module(void)
+{
+ int result;
+
+ if (io == 0)
+ printk(KERN_WARNING
+ CARDNAME": You shouldn't use auto-probing with insmod!\n" );
+
+ /* copy the parameters from insmod into the device structure */
+ devSMC9194.base_addr = io;
+ devSMC9194.irq = irq;
+ devSMC9194.if_port = ifport;
+ if ((result = register_netdev(&devSMC9194)) != 0)
+ return result;
+
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ unregister_netdev(&devSMC9194);
+
+ free_irq(devSMC9194.irq, NULL );
+ irq2dev_map[devSMC9194.irq] = NULL;
+ release_region(devSMC9194.base_addr, SMC_IO_EXTENT);
+
+ if (devSMC9194.priv)
+ kfree_s(devSMC9194.priv, sizeof(struct smc_local));
+}
+
+#endif /* MODULE */
+
diff --git a/linux/src/drivers/net/smc9194.h b/linux/src/drivers/net/smc9194.h
new file mode 100644
index 0000000..66f8b8c
--- /dev/null
+++ b/linux/src/drivers/net/smc9194.h
@@ -0,0 +1,240 @@
+/*------------------------------------------------------------------------
+ . smc9194.h
+ . Copyright (C) 1996 by Erik Stahlman
+ .
+ . This software may be used and distributed according to the terms
+ . of the GNU Public License, incorporated herein by reference.
+ .
+ . This file contains register information and access macros for
+ . the SMC91xxx chipset.
+ .
+ . Information contained in this file was obtained from the SMC91C94
+ . manual from SMC. To get a copy, if you really want one, you can find
+ . information under www.smc.com in the components division.
+ . ( this thanks to advice from Donald Becker ).
+ .
+ . Authors
+ . Erik Stahlman ( erik@vt.edu )
+ .
+ . History
+ . 01/06/96 Erik Stahlman moved definitions here from main .c file
+ . 01/19/96 Erik Stahlman polished this up some, and added better
+ . error handling
+ .
+ ---------------------------------------------------------------------------*/
+#ifndef _SMC9194_H_
+#define _SMC9194_H_
+
+/* I want some simple types */
+
+typedef unsigned char byte;
+typedef unsigned short word;
+typedef unsigned long int dword;
+
+
+/* Because of bank switching, the SMC91xxx uses only 16 I/O ports */
+
+#define SMC_IO_EXTENT 16
+
+
+/*---------------------------------------------------------------
+ .
+ . A description of the SMC registers is probably in order here,
+ . although for details, the SMC datasheet is invaluable.
+ .
+ . Basically, the chip has 4 banks of registers ( 0 to 3 ), which
+ . are accessed by writing a number into the BANK_SELECT register
+ . ( I also use a SMC_SELECT_BANK macro for this ).
+ .
+ . The banks are configured so that for most purposes, bank 2 is all
+ . that is needed for simple run time tasks.
+ -----------------------------------------------------------------------*/
+
+/*
+ . Bank Select Register:
+ .
+ . yyyy yyyy 0000 00xx
+ . xx = bank number
+ . yyyy yyyy = 0x33, for identification purposes.
+*/
+#define BANK_SELECT 14
+
+/* BANK 0 */
+
+#define TCR 0 /* transmit control register */
+#define TCR_ENABLE 0x0001 /* if this is 1, we can transmit */
+#define TCR_FDUPLX 0x0800 /* receive packets sent out */
+#define TCR_STP_SQET 0x1000 /* stop transmitting if Signal quality error */
+#define TCR_MON_CNS 0x0400 /* monitors the carrier status */
+#define TCR_PAD_ENABLE 0x0080 /* pads short packets to 64 bytes */
+
+#define TCR_CLEAR 0 /* do NOTHING */
+/* the normal settings for the TCR register : */
+/* QUESTION: do I want to enable padding of short packets ? */
+#define TCR_NORMAL TCR_ENABLE
+
+
+#define EPH_STATUS 2
+#define ES_LINK_OK 0x4000 /* is the link integrity ok ? */
+
+#define RCR 4
+#define RCR_SOFTRESET 0x8000 /* resets the chip */
+#define RCR_STRIP_CRC 0x200 /* strips CRC */
+#define RCR_ENABLE 0x100 /* IFF this is set, we can receive packets */
+#define RCR_ALMUL 0x4 /* receive all multicast packets */
+#define RCR_PROMISC 0x2 /* enable promiscuous mode */
+
+/* the normal settings for the RCR register : */
+#define RCR_NORMAL (RCR_STRIP_CRC | RCR_ENABLE)
+#define RCR_CLEAR 0x0 /* set it to a base state */
+
+#define COUNTER 6
+#define MIR 8
+#define MCR 10
+/* 12 is reserved */
+
+/* BANK 1 */
+#define CONFIG 0
+#define CFG_AUI_SELECT 0x100
+#define BASE 2
+#define ADDR0 4
+#define ADDR1 6
+#define ADDR2 8
+#define GENERAL 10
+#define CONTROL 12
+#define CTL_POWERDOWN 0x2000
+#define CTL_LE_ENABLE 0x80
+#define CTL_CR_ENABLE 0x40
+#define CTL_TE_ENABLE 0x0020
+#define CTL_AUTO_RELEASE 0x0800
+#define CTL_EPROM_ACCESS 0x0003 /* high if Eprom is being read */
+
+/* BANK 2 */
+#define MMU_CMD 0
+#define MC_BUSY 1 /* only readable bit in the register */
+#define MC_NOP 0
+#define MC_ALLOC 0x20 /* or with number of 256 byte packets */
+#define MC_RESET 0x40
+#define MC_REMOVE 0x60 /* remove the current rx packet */
+#define MC_RELEASE 0x80 /* remove and release the current rx packet */
+#define MC_FREEPKT 0xA0 /* Release packet in PNR register */
+#define MC_ENQUEUE 0xC0 /* Enqueue the packet for transmit */
+
+#define PNR_ARR 2
+#define FIFO_PORTS 4
+
+#define FP_RXEMPTY 0x8000
+#define FP_TXEMPTY 0x80
+
+#define POINTER 6
+#define PTR_READ 0x2000
+#define PTR_RCV 0x8000
+#define PTR_AUTOINC 0x4000
+#define PTR_AUTO_INC 0x0040
+
+#define DATA_1 8
+#define DATA_2 10
+#define INTERRUPT 12
+
+#define INT_MASK 13
+#define IM_RCV_INT 0x1
+#define IM_TX_INT 0x2
+#define IM_TX_EMPTY_INT 0x4
+#define IM_ALLOC_INT 0x8
+#define IM_RX_OVRN_INT 0x10
+#define IM_EPH_INT 0x20
+#define IM_ERCV_INT 0x40 /* not on SMC9192 */
+
+/* BANK 3 */
+#define MULTICAST1 0
+#define MULTICAST2 2
+#define MULTICAST3 4
+#define MULTICAST4 6
+#define MGMT 8
+#define REVISION 10 /* ( hi: chip id low: rev # ) */
+
+
+/* this is NOT on SMC9192 */
+#define ERCV 12
+
+#define CHIP_9190 3
+#define CHIP_9194 4
+#define CHIP_9195 5
+#define CHIP_91100 7
+
+static const char * chip_ids[ 15 ] = {
+ NULL, NULL, NULL,
+ /* 3 */ "SMC91C90/91C92",
+ /* 4 */ "SMC91C94",
+ /* 5 */ "SMC91C95",
+ NULL,
+ /* 7 */ "SMC91C100",
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL};
+
+/*
+ . Transmit status bits
+*/
+#define TS_SUCCESS 0x0001
+#define TS_LOSTCAR 0x0400
+#define TS_LATCOL 0x0200
+#define TS_16COL 0x0010
+
+/*
+ . Receive status bits
+*/
+#define RS_ALGNERR 0x8000
+#define RS_BADCRC 0x2000
+#define RS_ODDFRAME 0x1000
+#define RS_TOOLONG 0x0800
+#define RS_TOOSHORT 0x0400
+#define RS_MULTICAST 0x0001
+#define RS_ERRORS (RS_ALGNERR | RS_BADCRC | RS_TOOLONG | RS_TOOSHORT)
+
+static const char * interfaces[ 2 ] = { "TP", "AUI" };
+
+/*-------------------------------------------------------------------------
+ . I define some macros to make it easier to do somewhat common
+ . or slightly complicated, repeated tasks.
+ --------------------------------------------------------------------------*/
+
+/* select a register bank, 0 to 3 */
+
+#define SMC_SELECT_BANK(x) { outw( x, ioaddr + BANK_SELECT ); }
+
+/* define a small delay for the reset */
+#define SMC_DELAY() { inw( ioaddr + RCR );\
+ inw( ioaddr + RCR );\
+ inw( ioaddr + RCR ); }
+
+/* this enables an interrupt in the interrupt mask register */
+#define SMC_ENABLE_INT(x) {\
+ unsigned char mask;\
+ SMC_SELECT_BANK(2);\
+ mask = inb( ioaddr + INT_MASK );\
+ mask |= (x);\
+ outb( mask, ioaddr + INT_MASK ); \
+}
+
+/* this disables an interrupt from the interrupt mask register */
+
+#define SMC_DISABLE_INT(x) {\
+ unsigned char mask;\
+ SMC_SELECT_BANK(2);\
+ mask = inb( ioaddr + INT_MASK );\
+ mask &= ~(x);\
+ outb( mask, ioaddr + INT_MASK ); \
+}
+
+/*----------------------------------------------------------------------
+ . Define the interrupts that I want to receive from the card
+ .
+ . I want:
+ . IM_EPH_INT, for nasty errors
+ . IM_RCV_INT, for happy received packets
+ . IM_RX_OVRN_INT, because I have to kick the receiver
+ --------------------------------------------------------------------------*/
+#define SMC_INTERRUPT_MASK (IM_EPH_INT | IM_RX_OVRN_INT | IM_RCV_INT)
+
+#endif /* _SMC_9194_H_ */
+
diff --git a/linux/src/drivers/net/starfire.c b/linux/src/drivers/net/starfire.c
new file mode 100644
index 0000000..b8702a0
--- /dev/null
+++ b/linux/src/drivers/net/starfire.c
@@ -0,0 +1,1535 @@
+/* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
+/*
+ Written/Copyright 1998-2003 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 914 Bay Ridge Road, Suite 220
+ Annapolis MD 21403
+
+ Support information and updates available at
+ http://www.scyld.com/network/starfire.html
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version1[] =
+"starfire.c:v1.09 7/22/2003 Copyright by Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+" Updates and info at http://www.scyld.com/network/starfire.html\n";
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+/* Used for tuning interrupt latency vs. overhead. */
+static int interrupt_mitigation = 0x0;
+
+/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
+static int debug = 2;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ The Starfire has a 512 element hash table based on the Ethernet CRC. */
+static int multicast_filter_limit = 32;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature. */
+static int rx_copybreak = 0;
+
+/* Used to pass the media type, etc.
+ Both 'options[]' and 'full_duplex[]' exist for driver interoperability,
+ however full_duplex[] should never be used in new configurations.
+ The media type is usually passed in 'options[]'.
+ The default is autonegotation for speed and duplex.
+ This should rarely be overridden.
+ Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
+ Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
+ Use option values 0x20 and 0x200 for forcing full duplex operation.
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Automatically extracted configuration info:
+probe-func: starfire_probe
+config-in: tristate 'Adaptec DuraLAN ("starfire") series PCI Ethernet support' CONFIG_DURLAN
+
+c-help-name: Adaptec DuraLAN ("starfire") series PCI Ethernet support
+c-help-symbol: CONFIG_DURALAN
+c-help: This driver is for the Adaptec DuraLAN series, the 6915, 62022
+c-help: and 62044 boards.
+c-help: Design information, usage details and updates are available from
+c-help: http://www.scyld.com/network/starfire.html
+*/
+
+/* Operational parameters that are set at compile time. */
+
+/* The "native" ring sizes are either 256 or 2048.
+ However in some modes a descriptor may be marked to wrap the ring earlier.
+ The driver allocates a single page for each descriptor ring, constraining
+ the maximum size in an architecture-dependent way.
+*/
+#define RX_RING_SIZE 256
+#define TX_RING_SIZE 32
+/* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */
+#define DONE_Q_SIZE 1024
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+
+/* Allocation size of Rx buffers with normal sized Ethernet frames.
+ Do not change this value without good reason. This is not a limit,
+ but a way to keep a consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ 1536
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+/* Condensed operations for readability.
+ Compatibility defines are in kern_compat.h */
+
+#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
+#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(debug, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(multicast_filter_limit, "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM_DESC(debug, "Driver message enable level (0-31)");
+MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
+MODULE_PARM_DESC(max_interrupt_work,
+ "Driver maximum events handled per interrupt");
+MODULE_PARM_DESC(full_duplex,
+ "Non-zero to set forced full duplex (deprecated).");
+MODULE_PARM_DESC(rx_copybreak,
+ "Breakpoint in bytes for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit,
+ "Multicast addresses before switching to Rx-all-multicast");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This driver is for the Adaptec 6915 DuraLAN "Starfire" 64 bit PCI Ethernet
+adapter, and the multiport boards using the same chip.
+
+II. Board-specific settings
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+The Starfire hardware uses multiple fixed-size descriptor queues/rings. The
+ring sizes are set fixed by the hardware, but may optionally be wrapped
+earlier by the END bit in the descriptor.
+This driver uses that hardware queue size for the Rx ring, where a large
+number of entries has no ill effect beyond increases the potential backlog.
+The Tx ring is wrapped with the END bit, since a large hardware Tx queue
+disables the queue layer priority ordering and we have no mechanism to
+utilize the hardware two-level priority queue. When modifying the
+RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
+levels.
+
+IIIb/c. Transmit/Receive Structure
+
+See the Adaptec manual for the many possible structures, and options for
+each structure. There are far too many to document here.
+
+For transmit this driver uses type 1 transmit descriptors, and relies on
+automatic minimum-length padding. It does not use the completion queue
+consumer index, but instead checks for non-zero status entries.
+
+For receive this driver uses type 0 receive descriptors. The driver
+allocates full frame size skbuffs for the Rx ring buffers, so all frames
+should fit in a single descriptor. The driver does not use the completion
+queue consumer index, but instead checks for non-zero status entries.
+
+When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff
+is allocated and the frame is copied to the new skbuff. When the incoming
+frame is larger, the skbuff is passed directly up the protocol stack.
+Buffers consumed this way are replaced by newly allocated skbuffs in a later
+phase of receive.
+
+A notable aspect of operation is that unaligned buffers are not permitted by
+the Starfire hardware. The IP header at offset 14 in an ethernet frame thus
+isn't longword aligned, which may cause problems on some machine
+e.g. Alphas. Copied frames are put into the skbuff at an offset of "+2",
+16-byte aligning the IP header.
+
+IIId. Synchronization
+
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and interrupt handling software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'lp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
+clears both the tx_full and tbusy flags.
+
+IV. Notes
+
+IVb. References
+
+The Adaptec Starfire manuals, available only from Adaptec.
+http://www.scyld.com/expert/100mbps.html
+http://www.scyld.com/expert/NWay.html
+
+IVc. Errata
+
+*/
+
+
+
+static void *starfire_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int find_cnt);
+static int starfire_pwr_event(void *dev_instance, int event);
+enum chip_capability_flags {CanHaveMII=1, };
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR0)
+/* And maps in 0.5MB(!) -- no I/O mapping here! */
+#define MEM_ADDR_SZ 0x80000
+
+#if 0 && (defined(__x86_64) || defined(__alpha__))
+/* Enable 64 bit address modes. */
+#define STARFIRE_ADDR_64BITS 1
+#endif
+
+static struct pci_id_info pci_id_tbl[] = {
+ {"Adaptec Starfire 6915", { 0x69159004, 0xffffffff, },
+ PCI_IOTYPE, MEM_ADDR_SZ, CanHaveMII},
+ {0,}, /* 0 terminated list. */
+};
+
+struct drv_id_info starfire_drv_id = {
+ "starfire", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
+ starfire_probe1, starfire_pwr_event };
+
+/* Offsets to the device registers.
+ Unlike software-only systems, device drivers interact with complex hardware.
+ It's not useful to define symbolic names for every register bit in the
+ device. The name can only partially document the semantics and make
+ the driver longer and more difficult to read.
+ In general, only the important configuration values or bits changed
+ multiple times should be defined symbolically.
+*/
+enum register_offsets {
+ PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
+ IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
+ MIICtrl=0x52000, StationAddr=0x50120, EEPROMCtrl=0x51000,
+ TxDescCtrl=0x50090,
+ TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
+ TxRingHiAddr=0x5009C, /* 64 bit address extension. */
+ TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
+ TxThreshold=0x500B0,
+ CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8,
+ RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0,
+ CompletionQConsumerIdx=0x500C4,
+ RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
+ RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
+ TxMode=0x55000,
+};
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+ IntrNormalSummary=0x8000, IntrAbnormalSummary=0x02000000,
+ IntrRxDone=0x0300, IntrRxEmpty=0x10040, IntrRxPCIErr=0x80000,
+ IntrTxDone=0x4000, IntrTxEmpty=0x1000, IntrTxPCIErr=0x80000,
+ StatsMax=0x08000000, LinkChange=0xf0000000,
+ IntrTxDataLow=0x00040000,
+ IntrPCIPin=0x01,
+};
+
+/* Bits in the RxFilterMode register. */
+enum rx_mode_bits {
+ AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01,
+ AcceptMulticast=0x10, AcceptMyPhys=0xE040,
+};
+
+/* Misc. bits. Symbolic names so that may be searched for. */
+enum misc_bits {
+ ChipResetCmd=1, /* PCIDeviceConfig */
+ PCIIntEnb=0x00800000, /* PCIDeviceConfig */
+ TxEnable=0x0A, RxEnable=0x05, SoftIntr=0x100, /* GenCtrl */
+};
+
+/* The Rx and Tx buffer descriptors. */
+struct starfire_rx_desc {
+ u32 rxaddr; /* Optionally 64 bits. */
+#if defined(STARFIRE_ADDR_64BITS)
+ u32 rxaddr_hi; /* Optionally 64 bits. */
+#endif
+};
+enum rx_desc_bits {
+ RxDescValid=1, RxDescEndRing=2,
+};
+
+/* Completion queue entry.
+ You must update the page allocation, init_ring and the shift count in rx()
+ if using a larger format. */
+struct rx_done_desc {
+ u32 status; /* Low 16 bits is length. */
+#ifdef full_rx_status
+ u32 status2;
+ u16 vlanid;
+ u16 csum; /* partial checksum */
+ u32 timestamp;
+#endif
+};
+enum rx_done_bits {
+ RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
+};
+
+/* Type 1 Tx descriptor. */
+struct starfire_tx_desc {
+ u32 status; /* Upper bits are status, lower 16 length. */
+ u32 addr;
+};
+enum tx_desc_bits {
+ TxDescID=0xB1010000, /* Also marks single fragment, add CRC. */
+ TxDescIntr=0x08000000, TxRingWrap=0x04000000,
+};
+struct tx_done_report {
+ u32 status; /* timestamp, index. */
+#if 0
+ u32 intrstatus; /* interrupt status */
+#endif
+};
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+struct netdev_private {
+ /* Descriptor rings first for alignment. */
+ struct starfire_rx_desc *rx_ring;
+ struct starfire_tx_desc *tx_ring;
+ struct net_device *next_module; /* Link for devices of this type. */
+ void *priv_addr; /* Unaligned address for kfree */
+ const char *product_name;
+ /* The addresses of rx/tx-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ u8 pad0[100]; /* Impact padding */
+ /* Pointers to completion queues (full pages). Cache line pad.. */
+ struct rx_done_desc *rx_done_q __attribute__((aligned (L1_CACHE_BYTES)));
+ unsigned int rx_done;
+ struct tx_done_report *tx_done_q __attribute__((aligned (L1_CACHE_BYTES)));
+ unsigned int tx_done;
+
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media monitoring timer. */
+ int msg_level;
+ int chip_id, drv_flags;
+ struct pci_dev *pci_dev;
+ /* Frequently used values: keep some adjacent for cache effect. */
+ int max_interrupt_work;
+ int intr_enable;
+ unsigned int restore_intr_enable:1; /* Set if temporarily masked. */
+ unsigned int polling:1; /* Erk, IRQ err. */
+
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ int rx_copybreak;
+
+ unsigned int cur_tx, dirty_tx;
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ /* These values keep track of the transceiver/media in use. */
+ unsigned int full_duplex:1, /* Full-duplex operation requested. */
+ medialock:1, /* Xcvr set to fixed speed/duplex. */
+ rx_flowctrl:1,
+ tx_flowctrl:1; /* Use 802.3x flow control. */
+ unsigned int default_port; /* Last dev->if_port value. */
+ u32 tx_mode;
+ u8 tx_threshold;
+ u32 cur_rx_mode;
+ u16 mc_filter[32];
+ int multicast_filter_limit;
+
+ /* MII transceiver section. */
+ int mii_cnt; /* MII device addresses. */
+ u16 advertising; /* NWay media advertisement */
+ unsigned char phys[2]; /* MII device addresses. */
+};
+
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location,
+ int value);
+static int netdev_open(struct net_device *dev);
+static int change_mtu(struct net_device *dev, int new_mtu);
+static void check_duplex(struct net_device *dev);
+static void netdev_timer(unsigned long data);
+static void tx_timeout(struct net_device *dev);
+static void init_ring(struct net_device *dev);
+static int start_tx(struct sk_buff *skb, struct net_device *dev);
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
+static void netdev_error(struct net_device *dev, int intr_status);
+static int netdev_rx(struct net_device *dev);
+static void netdev_error(struct net_device *dev, int intr_status);
+static void set_rx_mode(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_close(struct net_device *dev);
+
+
+
+/* A list of our installed devices, for removing the driver module. */
+static struct net_device *root_net_dev = NULL;
+
+#ifndef MODULE
+int starfire_probe(struct net_device *dev)
+{
+ if (pci_drv_register(&starfire_drv_id, dev) < 0)
+ return -ENODEV;
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return 0;
+}
+#endif
+
+static void *starfire_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int card_idx)
+{
+ struct net_device *dev;
+ struct netdev_private *np;
+ void *priv_mem;
+ int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+
+ dev = init_etherdev(init_dev, 0);
+ if (!dev)
+ return NULL;
+
+ printk(KERN_INFO "%s: %s at 0x%lx, ",
+ dev->name, pci_id_tbl[chip_idx].name, ioaddr);
+
+ /* Serial EEPROM reads are hidden by the hardware. */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = readb(ioaddr + EEPROMCtrl + 20-i);
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+
+ /* Make certain elements e.g. descriptor lists are aligned. */
+ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
+ /* Check for the very unlikely case of no memory. */
+ if (priv_mem == NULL)
+ return NULL;
+
+ /* Reset the chip to erase previous misconfiguration. */
+ writel(ChipResetCmd, ioaddr + PCIDeviceConfig);
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+ memset(np, 0, sizeof(*np));
+ np->priv_addr = priv_mem;
+
+ np->next_module = root_net_dev;
+ root_net_dev = dev;
+
+ np->pci_dev = pdev;
+ np->chip_id = chip_idx;
+ np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
+ np->msg_level = (1 << debug) - 1;
+ np->rx_copybreak = rx_copybreak;
+ np->max_interrupt_work = max_interrupt_work;
+ np->multicast_filter_limit = multicast_filter_limit;
+
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
+ np->full_duplex = 1;
+
+ if (np->full_duplex) {
+ if (np->msg_level & NETIF_MSG_PROBE)
+ printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
+ " disabled.\n", dev->name);
+ np->medialock = 1;
+ }
+
+ /* The chip-specific entries in the device structure. */
+ dev->open = &netdev_open;
+ dev->hard_start_xmit = &start_tx;
+ dev->stop = &netdev_close;
+ dev->get_stats = &get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &mii_ioctl;
+ dev->change_mtu = &change_mtu;
+
+ if (np->drv_flags & CanHaveMII) {
+ int phy, phy_idx = 0;
+ for (phy = 0; phy < 32 && phy_idx < 4; phy++) {
+ int mii_status = mdio_read(dev, phy, 1);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ np->phys[phy_idx++] = phy;
+ np->advertising = mdio_read(dev, phy, 4);
+ if (np->msg_level & NETIF_MSG_PROBE)
+ printk(KERN_INFO "%s: MII PHY found at address %d, status "
+ "0x%4.4x advertising %4.4x.\n",
+ dev->name, phy, mii_status, np->advertising);
+ }
+ }
+ np->mii_cnt = phy_idx;
+ }
+
+ /* Force the media type after detecting the transceiver. */
+ if (option > 0) {
+ if (option & 0x220)
+ np->full_duplex = 1;
+ np->default_port = option & 0x3ff;
+ if (np->default_port & 0x330) {
+ np->medialock = 1;
+ if (np->msg_level & NETIF_MSG_PROBE)
+ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
+ (option & 0x300 ? 100 : 10),
+ (np->full_duplex ? "full" : "half"));
+ mdio_write(dev, np->phys[0], 0,
+ ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
+ (np->full_duplex ? 0x0100 : 0)); /* Full duplex? */
+ }
+ }
+
+ return dev;
+}
+
+
+/* Read the MII Management Data I/O (MDIO) interfaces. */
+
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ long mdio_addr = dev->base_addr + MIICtrl + (phy_id<<7) + (location<<2);
+ int result, boguscnt=1000;
+ /* ??? Should we add a busy-wait here? */
+ do
+ result = readl(mdio_addr);
+ while ((result & 0xC0000000) != 0x80000000 && --boguscnt >= 0);
+ return result & 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
+{
+ long mdio_addr = dev->base_addr + MIICtrl + (phy_id<<7) + (location<<2);
+ writel(value, mdio_addr);
+ /* The busy-wait will occur before a read. */
+ return;
+}
+
+
+static int netdev_open(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+
+ MOD_INC_USE_COUNT;
+
+ if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+
+ /* We have no reports that indicate we need to reset the chip.
+ But to be on the safe side... */
+ /* Disable the Rx and Tx, and reset the chip. */
+ writel(0, ioaddr + GenCtrl);
+ writel(ChipResetCmd, ioaddr + PCIDeviceConfig);
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
+ dev->name, dev->irq);
+ /* Allocate the various queues, failing gracefully. */
+ if (np->tx_done_q == 0)
+ np->tx_done_q = (struct tx_done_report *)get_free_page(GFP_KERNEL);
+ if (np->rx_done_q == 0)
+ np->rx_done_q = (struct rx_done_desc *)get_free_page(GFP_KERNEL);
+ if (np->tx_ring == 0)
+ np->tx_ring = (struct starfire_tx_desc *)get_free_page(GFP_KERNEL);
+ if (np->rx_ring == 0)
+ np->rx_ring = (struct starfire_rx_desc *)get_free_page(GFP_KERNEL);
+ if (np->tx_done_q == 0 || np->rx_done_q == 0
+ || np->rx_ring == 0 || np->tx_ring == 0) {
+ /* Retain the pages to increase our chances next time. */
+ MOD_DEC_USE_COUNT;
+ return -ENOMEM;
+ }
+
+ init_ring(dev);
+ /* Set the size of the Rx buffers. */
+ writel((np->rx_buf_sz<<16) | 0xA000, ioaddr + RxDescQCtrl);
+
+ /* Set Tx descriptor to type 1 and padding to 0 bytes. */
+ writel(0x02000401, ioaddr + TxDescCtrl);
+
+#if defined(STARFIRE_ADDR_64BITS)
+ writel(virt_to_bus(np->rx_ring) >> 32, ioaddr + RxDescQHiAddr);
+ writel(virt_to_bus(np->tx_ring) >> 32, ioaddr + TxRingHiAddr);
+#else
+ writel(0, ioaddr + RxDescQHiAddr);
+ writel(0, ioaddr + TxRingHiAddr);
+ writel(0, ioaddr + CompletionHiAddr);
+#endif
+ writel(virt_to_bus(np->rx_ring), ioaddr + RxDescQAddr);
+ writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
+
+ writel(virt_to_bus(np->tx_done_q), ioaddr + TxCompletionAddr);
+ writel(virt_to_bus(np->rx_done_q), ioaddr + RxCompletionAddr);
+
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
+
+ /* Fill both the unused Tx SA register and the Rx perfect filter. */
+ for (i = 0; i < 6; i++)
+ writeb(dev->dev_addr[i], ioaddr + StationAddr + 5-i);
+ for (i = 0; i < 16; i++) {
+ u16 *eaddrs = (u16 *)dev->dev_addr;
+ long setup_frm = ioaddr + 0x56000 + i*16;
+ writew(cpu_to_be16(eaddrs[2]), setup_frm); setup_frm += 4;
+ writew(cpu_to_be16(eaddrs[1]), setup_frm); setup_frm += 4;
+ writew(cpu_to_be16(eaddrs[0]), setup_frm); setup_frm += 8;
+ }
+
+ /* Initialize other registers. */
+ /* Configure the PCI bus bursts and FIFO thresholds. */
+ np->tx_mode = 0; /* Initialized when TxMode set. */
+ np->tx_threshold = 4;
+ writel(np->tx_threshold, ioaddr + TxThreshold);
+ writel(interrupt_mitigation, ioaddr + IntrTimerCtrl);
+
+ if (dev->if_port == 0)
+ dev->if_port = np->default_port;
+
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
+ set_rx_mode(dev);
+
+ np->advertising = mdio_read(dev, np->phys[0], 4);
+ check_duplex(dev);
+ netif_start_tx_queue(dev);
+
+ /* Set the interrupt mask and enable PCI interrupts. */
+ np->intr_enable = IntrRxDone | IntrRxEmpty | IntrRxPCIErr |
+ IntrTxDone | IntrTxEmpty | IntrTxPCIErr |
+ StatsMax | LinkChange | IntrNormalSummary | IntrAbnormalSummary
+ | 0x0010;
+ writel(np->intr_enable, ioaddr + IntrEnable);
+ writel(PCIIntEnb | readl(ioaddr + PCIDeviceConfig),
+ ioaddr + PCIDeviceConfig);
+
+ /* Enable the Rx and Tx units. */
+ writel(TxEnable|RxEnable, ioaddr + GenCtrl);
+
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: Done netdev_open().\n",
+ dev->name);
+
+ /* Set the timer to check for link beat. */
+ init_timer(&np->timer);
+ np->timer.expires = jiffies + 3*HZ;
+ np->timer.data = (unsigned long)dev;
+ np->timer.function = &netdev_timer; /* timer handler */
+ add_timer(&np->timer);
+
+ return 0;
+}
+
+/* The starfire can handle frame sizes up to 64KB, but we arbitrarily
+ * limit the size.
+ */
+static int change_mtu(struct net_device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > 17268))
+ return -EINVAL;
+ if (netif_running(dev))
+ return -EBUSY;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static void check_duplex(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int new_tx_mode;
+
+ new_tx_mode = 0x0C04 | (np->tx_flowctrl ? 0x0800:0)
+ | (np->rx_flowctrl ? 0x0400:0);
+ if (np->medialock) {
+ if (np->full_duplex)
+ new_tx_mode |= 2;
+ } else {
+ int mii_reg5 = mdio_read(dev, np->phys[0], 5);
+ int negotiated = mii_reg5 & np->advertising;
+ int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
+ if (duplex)
+ new_tx_mode |= 2;
+ if (np->full_duplex != duplex) {
+ np->full_duplex = duplex;
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d"
+ " negotiated capability %4.4x.\n", dev->name,
+ duplex ? "full" : "half", np->phys[0], negotiated);
+ }
+ }
+ if (new_tx_mode != np->tx_mode) {
+ np->tx_mode = new_tx_mode;
+ writel(np->tx_mode | 0x8000, ioaddr + TxMode);
+ writel(np->tx_mode, ioaddr + TxMode);
+ }
+}
+
+/* Check for duplex changes, but mostly check for failures. */
+static void netdev_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int status = readl(ioaddr + IntrStatus);
+ static long last_msg = 0;
+
+ /* Normally we check only every few seconds. */
+ np->timer.expires = jiffies + 60*HZ;
+
+ if (np->msg_level & NETIF_MSG_TIMER) {
+ printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x.\n",
+ dev->name, status);
+ }
+
+ /* Check for a missing chip or failed interrupt line.
+ * The latter may be falsely triggered, so we check twice. */
+ if (status == 0xffffffff) {
+ if (jiffies - last_msg > 10*HZ) {
+ last_msg = jiffies;
+ printk(KERN_ERR "%s: The Starfire chip is missing!\n",
+ dev->name);
+ }
+ } else if (np->polling) {
+ if (status & IntrPCIPin) {
+ intr_handler(dev->irq, dev, 0);
+ if (jiffies - last_msg > 10*HZ) {
+ printk(KERN_ERR "%s: IRQ %d is still blocked!\n",
+ dev->name, dev->irq);
+ last_msg = jiffies;
+ }
+ } else if (jiffies - last_msg > 10*HZ)
+ np->polling = 0;
+ np->timer.expires = jiffies + 2;
+ } else if (status & IntrPCIPin) {
+ int new_status = readl(ioaddr + IntrStatus);
+ /* Bogus hardware IRQ mapping: Fake an interrupt handler call. */
+ if (new_status & IntrPCIPin) {
+ printk(KERN_ERR "%s: IRQ %d is not raising an interrupt! "
+ "Status %8.8x/%8.8x. \n",
+ dev->name, dev->irq, status, new_status);
+ intr_handler(dev->irq, dev, 0);
+ np->timer.expires = jiffies + 2;
+ np->polling = 1;
+ }
+ } else if (netif_queue_paused(dev) &&
+ np->cur_tx - np->dirty_tx > 1 &&
+ (jiffies - dev->trans_start) > TX_TIMEOUT) {
+ /* This will not catch tbusy incorrectly set when the queue is empty,
+ * but that state should never occur. */
+ tx_timeout(dev);
+ }
+
+ check_duplex(dev);
+
+ add_timer(&np->timer);
+}
+
+static void tx_timeout(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
+ " resetting...\n", dev->name, (int)readl(ioaddr + IntrStatus));
+
+#if defined(__i386__)
+ if (np->msg_level & NETIF_MSG_TX_ERR) {
+ int i;
+ printk("\n" KERN_DEBUG " Tx ring %p: ", np->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" %4.4x", np->tx_ring[i].status);
+ printk("\n" KERN_DEBUG " Rx ring %p: ", np->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(" %8.8x", (unsigned int)np->rx_ring[i].rxaddr);
+ printk("\n");
+ }
+#endif
+
+ /* If a specific problem is reported, reinitialize the hardware here. */
+ dev->if_port = 0;
+ /* Stop and restart the chip's Tx processes . */
+ writel(0, ioaddr + GenCtrl);
+ /* Enable the Rx and Tx units. */
+ writel(TxEnable|RxEnable, ioaddr + GenCtrl);
+
+ dev->trans_start = jiffies;
+ np->stats.tx_errors++;
+ return;
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+ np->tx_full = 0;
+ np->cur_rx = np->cur_tx = 0;
+ np->dirty_rx = np->rx_done = np->dirty_tx = np->tx_done = 0;
+
+ np->rx_buf_sz = (dev->mtu <= 1522 ? PKT_BUF_SZ :
+ (dev->mtu + 14 + 3) & ~3); /* Round to word. */
+
+ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ /* Grrr, we cannot offset to correctly align the IP header. */
+ np->rx_ring[i].rxaddr =
+ virt_to_le32desc(skb->tail) | cpu_to_le32(RxDescValid);
+ }
+ writew(i - 1, dev->base_addr + RxDescQIdx);
+ np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+ /* Clear the remainder of the Rx buffer ring. */
+ for ( ; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].rxaddr = 0;
+ np->rx_skbuff[i] = 0;
+ }
+ /* Mark the last entry as wrapping the ring. */
+ np->rx_ring[i-1].rxaddr |= cpu_to_le32(RxDescEndRing);
+
+ /* Clear the completion rings. */
+ for (i = 0; i < DONE_Q_SIZE; i++) {
+ np->rx_done_q[i].status = 0;
+ np->tx_done_q[i].status = 0;
+ }
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_skbuff[i] = 0;
+ np->tx_ring[i].status = 0;
+ }
+ return;
+}
+
+static int start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ unsigned entry;
+
+ /* Block a timer-based transmit from overlapping. This happens when
+ packets are presumed lost, and we use this check the Tx status. */
+ if (netif_pause_tx_queue(dev) != 0) {
+ /* This watchdog code is redundant with the media monitor timer. */
+ if (jiffies - dev->trans_start > TX_TIMEOUT)
+ tx_timeout(dev);
+ return 1;
+ }
+
+ /* Caution: the write order is important here, set the field
+ with the "ownership" bits last. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = np->cur_tx % TX_RING_SIZE;
+
+ np->tx_skbuff[entry] = skb;
+
+ np->tx_ring[entry].addr = virt_to_le32desc(skb->data);
+ /* Add "| TxDescIntr" to generate Tx-done interrupts. */
+ np->tx_ring[entry].status = cpu_to_le32(skb->len | TxDescID);
+#if 1
+ if (entry >= TX_RING_SIZE-1) { /* Wrap ring */
+ np->tx_ring[entry].status |= cpu_to_le32(TxRingWrap | TxDescIntr);
+ entry = -1;
+ }
+#endif
+
+ /* On some architectures better performance results by explicitly
+ flushing cache lines: pci_flush_virt(skb->data, skb->len); */
+
+ np->cur_tx++;
+ /* Update the producer index. */
+ writel(++entry, dev->base_addr + TxProducerIdx);
+
+ /* cf. using TX_QUEUE_LEN instead of TX_RING_SIZE here. */
+ if (np->cur_tx - np->dirty_tx >= TX_RING_SIZE - 1) {
+ np->tx_full = 1;
+ /* Check for the rare case of a just-cleared queue. */
+ if (np->cur_tx - (volatile unsigned int)np->dirty_tx
+ < TX_RING_SIZE - 2) {
+ np->tx_full = 0;
+ netif_unpause_tx_queue(dev);
+ } else
+ netif_stop_tx_queue(dev);
+ } else
+ netif_unpause_tx_queue(dev); /* Typical path */
+
+ dev->trans_start = jiffies;
+
+ if (np->msg_level & NETIF_MSG_TX_QUEUED) {
+ printk(KERN_DEBUG "%s: Tx frame #%d slot %d %8.8x %8.8x.\n",
+ dev->name, np->cur_tx, entry,
+ np->tx_ring[entry].status, np->tx_ring[entry].addr);
+ }
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct netdev_private *np;
+ long ioaddr;
+ int boguscnt;
+
+#ifndef final_version /* Can never occur. */
+ if (dev == NULL) {
+ printk (KERN_ERR "Netdev interrupt handler(): IRQ %d for unknown "
+ "device.\n", irq);
+ return;
+ }
+#endif
+
+ ioaddr = dev->base_addr;
+ np = (struct netdev_private *)dev->priv;
+ boguscnt = np->max_interrupt_work;
+
+ do {
+ u32 intr_status = readl(ioaddr + IntrClear);
+
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: Interrupt status %4.4x.\n",
+ dev->name, intr_status);
+
+ if (intr_status == 0 || intr_status == 0xffffffff)
+ break;
+
+ if (intr_status & IntrRxDone)
+ netdev_rx(dev);
+
+ /* Scavenge the skbuff list based on the Tx-done queue.
+ There are redundant checks here that may be cleaned up
+ after the driver has proven to be reliable. */
+ {
+ int consumer = readl(ioaddr + TxConsumerIdx);
+ int tx_status;
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
+ dev->name, consumer);
+#if 0
+ if (np->tx_done >= 250 || np->tx_done == 0)
+ printk(KERN_DEBUG "%s: Tx completion entry %d is %8.8x, "
+ "%d is %8.8x.\n", dev->name,
+ np->tx_done, np->tx_done_q[np->tx_done].status,
+ (np->tx_done+1) & (DONE_Q_SIZE-1),
+ np->tx_done_q[(np->tx_done+1)&(DONE_Q_SIZE-1)].status);
+#endif
+ while ((tx_status = cpu_to_le32(np->tx_done_q[np->tx_done].status))
+ != 0) {
+ if (np->msg_level & NETIF_MSG_TX_DONE)
+ printk(KERN_DEBUG "%s: Tx completion entry %d is %8.8x.\n",
+ dev->name, np->tx_done, tx_status);
+ if ((tx_status & 0xe0000000) == 0xa0000000) {
+ np->stats.tx_packets++;
+ } else if ((tx_status & 0xe0000000) == 0x80000000) {
+ u16 entry = tx_status; /* Implicit truncate */
+ entry >>= 3;
+ /* Scavenge the descriptor. */
+ if (np->tx_skbuff[entry]) {
+ dev_free_skb_irq(np->tx_skbuff[entry]);
+ } else
+ printk(KERN_WARNING "%s: Null skbuff at entry %d!!!\n",
+ dev->name, entry);
+ np->tx_skbuff[entry] = 0;
+ np->dirty_tx++;
+ }
+ np->tx_done_q[np->tx_done].status = 0;
+ np->tx_done = (np->tx_done+1) & (DONE_Q_SIZE-1);
+ }
+ writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
+ }
+ if (np->tx_full && np->cur_tx - np->dirty_tx < TX_RING_SIZE - 4) {
+ /* The ring is no longer full, allow new TX entries. */
+ np->tx_full = 0;
+ netif_resume_tx_queue(dev);
+ }
+
+ /* Abnormal error summary/uncommon events handlers. */
+ if (intr_status & IntrAbnormalSummary)
+ netdev_error(dev, intr_status);
+
+ if (--boguscnt < 0) {
+ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "status=0x%4.4x.\n",
+ dev->name, intr_status);
+ writel(0x0021, ioaddr + IntrTimerCtrl);
+ break;
+ }
+ } while (1);
+
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, (int)readl(ioaddr + IntrStatus));
+
+ return;
+}
+
+/* This routine is logically part of the interrupt handler, but separated
+ for clarity and better register allocation. */
+static int netdev_rx(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
+ u32 desc_status;
+
+ if (np->rx_done_q == 0) {
+ printk(KERN_ERR "%s: rx_done_q is NULL! rx_done is %d. %p.\n",
+ dev->name, np->rx_done, np->tx_done_q);
+ return 0;
+ }
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
+ if (np->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " netdev_rx() status of %d was %8.8x.\n",
+ np->rx_done, desc_status);
+ if (--boguscnt < 0)
+ break;
+ if ( ! (desc_status & RxOK)) {
+ /* There was a error. */
+ if (np->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
+ desc_status);
+ np->stats.rx_errors++;
+ if (desc_status & RxFIFOErr)
+ np->stats.rx_fifo_errors++;
+ } else {
+ struct sk_buff *skb;
+ u16 pkt_len = desc_status; /* Implicitly Truncate */
+ int entry = (desc_status >> 16) & 0x7ff;
+
+#ifndef final_version
+ if (np->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
+ ", bogus_cnt %d.\n",
+ pkt_len, boguscnt);
+#endif
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+#if HAS_IP_COPYSUM /* Call copy + cksum if available. */
+ eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+#else
+ memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
+ pkt_len);
+#endif
+ } else {
+ char *temp = skb_put(skb = np->rx_skbuff[entry], pkt_len);
+ np->rx_skbuff[entry] = NULL;
+#ifndef final_version /* Remove after testing. */
+ if (le32desc_to_virt(np->rx_ring[entry].rxaddr & ~3) != temp)
+ printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
+ "do not match in netdev_rx: %p vs. %p / %p.\n",
+ dev->name,
+ le32desc_to_virt(np->rx_ring[entry].rxaddr),
+ skb->head, temp);
+#endif
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+#ifdef full_rx_status
+ if (np->rx_done_q[np->rx_done].status2 & cpu_to_le32(0x01000000))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+#endif
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ np->stats.rx_packets++;
+ }
+ np->cur_rx++;
+ np->rx_done_q[np->rx_done].status = 0;
+ np->rx_done = (np->rx_done + 1) & (DONE_Q_SIZE-1);
+ }
+ writew(np->rx_done, dev->base_addr + CompletionQConsumerIdx);
+
+ /* Refill the Rx ring buffers. */
+ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
+ struct sk_buff *skb;
+ int entry = np->dirty_rx % RX_RING_SIZE;
+ if (np->rx_skbuff[entry] == NULL) {
+ skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ np->rx_ring[entry].rxaddr =
+ virt_to_le32desc(skb->tail) | cpu_to_le32(RxDescValid);
+ }
+ if (entry == RX_RING_SIZE - 1)
+ np->rx_ring[entry].rxaddr |= cpu_to_le32(RxDescEndRing);
+ /* We could defer this until later... */
+ writew(entry, dev->base_addr + RxDescQIdx);
+ }
+
+ if ((np->msg_level & NETIF_MSG_RX_STATUS)
+ || memcmp(np->pad0, np->pad0 + 1, sizeof(np->pad0) -1))
+ printk(KERN_DEBUG " exiting netdev_rx() status of %d was %8.8x %d.\n",
+ np->rx_done, desc_status,
+ memcmp(np->pad0, np->pad0 + 1, sizeof(np->pad0) -1));
+
+ return 0;
+}
+
+static void netdev_error(struct net_device *dev, int intr_status)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+
+ if (intr_status & LinkChange) {
+ int phy_num = np->phys[0];
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_NOTICE "%s: Link changed: Autonegotiation advertising"
+ " %4.4x partner %4.4x.\n", dev->name,
+ mdio_read(dev, phy_num, 4),
+ mdio_read(dev, phy_num, 5));
+ /* Clear sticky bit. */
+ mdio_read(dev, phy_num, 1);
+ /* If link beat has returned... */
+ if (mdio_read(dev, phy_num, 1) & 0x0004)
+ netif_link_up(dev);
+ else
+ netif_link_down(dev);
+ check_duplex(dev);
+ }
+ if (intr_status & StatsMax) {
+ get_stats(dev);
+ }
+ /* Came close to underrunning the Tx FIFO, increase threshold. */
+ if (intr_status & IntrTxDataLow)
+ writel(++np->tx_threshold, dev->base_addr + TxThreshold);
+ /* Ingore expected normal events, and handled abnormal events. */
+ if ((intr_status &
+ ~(IntrAbnormalSummary|LinkChange|StatsMax|IntrTxDataLow| 0xFF01))
+ && (np->msg_level & NETIF_MSG_DRV))
+ printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
+ dev->name, intr_status);
+ /* Hmmmmm, it's not clear how to recover from PCI faults. */
+ if (intr_status & IntrTxPCIErr)
+ np->stats.tx_fifo_errors++;
+ if (intr_status & IntrRxPCIErr)
+ np->stats.rx_fifo_errors++;
+}
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+
+ /* This adapter architecture needs no SMP locks. */
+#if LINUX_VERSION_CODE > 0x20119
+ np->stats.tx_bytes = readl(ioaddr + 0x57010);
+ np->stats.rx_bytes = readl(ioaddr + 0x57044);
+#endif
+ np->stats.tx_packets = readl(ioaddr + 0x57000);
+ np->stats.tx_aborted_errors =
+ readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
+ np->stats.tx_window_errors = readl(ioaddr + 0x57018);
+ np->stats.collisions = readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
+
+ /* The chip only need report frame silently dropped. */
+ np->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
+ writew(0, ioaddr + RxDMAStatus);
+ np->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
+ np->stats.rx_frame_errors = readl(ioaddr + 0x57040);
+ np->stats.rx_length_errors = readl(ioaddr + 0x57058);
+ np->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
+
+ return &np->stats;
+}
+
+/* The little-endian AUTODIN II ethernet CRC calculations.
+ A big-endian version is also available.
+ This is slow but compact code. Do not use this routine for bulk data,
+ use a table-based routine instead.
+ This is common code and should be moved to net/core/crc.c.
+ Chips may use the upper or lower CRC bits, and may reverse and/or invert
+ them. Select the endian-ness that results in minimal calculations.
+*/
+static unsigned const ethernet_polynomial_le = 0xedb88320U;
+static inline unsigned ether_crc_le(int length, unsigned char *data)
+{
+ unsigned int crc = ~0; /* Initial value. */
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 8; --bit >= 0; current_octet >>= 1) {
+ if ((crc ^ current_octet) & 1) {
+ crc >>= 1;
+ crc ^= ethernet_polynomial_le;
+ } else
+ crc >>= 1;
+ }
+ }
+ return crc;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u32 rx_mode;
+ struct dev_mc_list *mclist;
+ int i;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+ rx_mode = AcceptBroadcast|AcceptAllMulticast|AcceptAll|AcceptMyPhys;
+ } else if ((dev->mc_count > np->multicast_filter_limit)
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ rx_mode = AcceptBroadcast|AcceptAllMulticast|AcceptMyPhys;
+ } else if (dev->mc_count <= 15) {
+ /* Use the 16 element perfect filter. */
+ long filter_addr = ioaddr + 0x56000 + 1*16;
+ for (i = 1, mclist = dev->mc_list; mclist && i <= dev->mc_count;
+ i++, mclist = mclist->next) {
+ u16 *eaddrs = (u16 *)mclist->dmi_addr;
+ writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 4;
+ writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4;
+ writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 8;
+ }
+ while (i++ < 16) {
+ writew(0xffff, filter_addr); filter_addr += 4;
+ writew(0xffff, filter_addr); filter_addr += 4;
+ writew(0xffff, filter_addr); filter_addr += 8;
+ }
+ rx_mode = AcceptBroadcast | AcceptMyPhys;
+ } else {
+ /* Must use a multicast hash table. */
+ long filter_addr;
+ u16 mc_filter[32]; /* Multicast hash filter */
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23, mc_filter);
+ }
+ /* Clear the perfect filter list. */
+ filter_addr = ioaddr + 0x56000 + 1*16;
+ for (i = 1; i < 16; i++) {
+ writew(0xffff, filter_addr); filter_addr += 4;
+ writew(0xffff, filter_addr); filter_addr += 4;
+ writew(0xffff, filter_addr); filter_addr += 8;
+ }
+ for (filter_addr=ioaddr + 0x56100, i=0; i < 32; filter_addr+= 16, i++){
+ np->mc_filter[i] = mc_filter[i];
+ writew(mc_filter[i], filter_addr);
+ }
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ }
+ writel(rx_mode, ioaddr + RxFilterMode);
+}
+
+/*
+ Handle user-level ioctl() calls.
+ We must use two numeric constants as the key because some clueless person
+ changed the value for the symbolic name.
+*/
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ u16 *data = (u16 *)&rq->ifr_data;
+ u32 *data32 = (void *)&rq->ifr_data;
+
+ switch(cmd) {
+ case 0x8947: case 0x89F0:
+ /* SIOCGMIIPHY: Get the address of the PHY in use. */
+ data[0] = np->phys[0] & 0x1f;
+ /* Fall Through */
+ case 0x8948: case 0x89F1:
+ /* SIOCGMIIREG: Read the specified MII register. */
+ data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
+ return 0;
+ case 0x8949: case 0x89F2:
+ /* SIOCSMIIREG: Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (data[0] == np->phys[0]) {
+ u16 value = data[2];
+ switch (data[1]) {
+ case 0:
+ /* Check for autonegotiation on or reset. */
+ np->medialock = (value & 0x9000) ? 0 : 1;
+ if (np->medialock)
+ np->full_duplex = (value & 0x0100) ? 1 : 0;
+ break;
+ case 4: np->advertising = value; break;
+ }
+ check_duplex(dev);
+ }
+ mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
+ return 0;
+ case SIOCGPARAMS:
+ data32[0] = np->msg_level;
+ data32[1] = np->multicast_filter_limit;
+ data32[2] = np->max_interrupt_work;
+ data32[3] = np->rx_copybreak;
+ return 0;
+ case SIOCSPARAMS:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ np->msg_level = data32[0];
+ np->multicast_filter_limit = data32[1];
+ np->max_interrupt_work = data32[2];
+ np->rx_copybreak = data32[3];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int netdev_close(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+ netif_stop_tx_queue(dev);
+
+ if (np->msg_level & NETIF_MSG_IFDOWN) {
+ printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %4.4x.\n",
+ dev->name, (int)readl(ioaddr + IntrStatus));
+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
+ dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
+ }
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ writel(0, ioaddr + IntrEnable);
+
+ /* Stop the chip's Tx and Rx processes. */
+ writel(0, ioaddr + GenCtrl);
+
+ del_timer(&np->timer);
+
+#ifdef __i386__
+ if (np->msg_level & NETIF_MSG_IFDOWN) {
+ printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
+ (int)virt_to_bus(np->tx_ring));
+ for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++)
+ printk(KERN_DEBUG " #%d desc. %8.8x %8.8x -> %8.8x.\n",
+ i, np->tx_ring[i].status, np->tx_ring[i].addr,
+ np->tx_done_q[i].status);
+ printk(KERN_DEBUG " Rx ring at %8.8x -> %p:\n",
+ (int)virt_to_bus(np->rx_ring), np->rx_done_q);
+ if (np->rx_done_q)
+ for (i = 0; i < 8 /* RX_RING_SIZE */; i++) {
+ printk(KERN_DEBUG " #%d desc. %8.8x -> %8.8x\n",
+ i, np->rx_ring[i].rxaddr, np->rx_done_q[i].status);
+ }
+ }
+#endif /* __i386__ debugging only */
+
+ free_irq(dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].rxaddr = 0xBADF00D0; /* An invalid address. */
+ if (np->rx_skbuff[i]) {
+#if LINUX_VERSION_CODE < 0x20100
+ np->rx_skbuff[i]->free = 1;
+#endif
+ dev_free_skb(np->rx_skbuff[i]);
+ }
+ np->rx_skbuff[i] = 0;
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (np->tx_skbuff[i])
+ dev_free_skb(np->tx_skbuff[i]);
+ np->tx_skbuff[i] = 0;
+ }
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+
+static int starfire_pwr_event(void *dev_instance, int event)
+{
+ struct net_device *dev = dev_instance;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
+ switch(event) {
+ case DRV_ATTACH:
+ MOD_INC_USE_COUNT;
+ break;
+ case DRV_SUSPEND:
+ /* Disable interrupts, stop Tx and Rx. */
+ writel(0x0000, ioaddr + IntrEnable);
+ writel(0, ioaddr + GenCtrl);
+ break;
+ case DRV_RESUME:
+ /* This is incomplete: we must factor start_chip() out of open(). */
+ writel(np->tx_threshold, ioaddr + TxThreshold);
+ writel(interrupt_mitigation, ioaddr + IntrTimerCtrl);
+ set_rx_mode(dev);
+ writel(np->intr_enable, ioaddr + IntrEnable);
+ writel(TxEnable|RxEnable, ioaddr + GenCtrl);
+ break;
+ case DRV_DETACH: {
+ struct net_device **devp, **next;
+ if (dev->flags & IFF_UP) {
+ /* Some, but not all, kernel versions close automatically. */
+ dev_close(dev);
+ dev->flags &= ~(IFF_UP|IFF_RUNNING);
+ }
+ unregister_netdev(dev);
+ release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);
+#ifndef USE_IO_OPS
+ iounmap((char *)dev->base_addr);
+#endif
+ for (devp = &root_net_dev; *devp; devp = next) {
+ next = &((struct netdev_private *)(*devp)->priv)->next_module;
+ if (*devp == dev) {
+ *devp = *next;
+ break;
+ }
+ }
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(dev);
+ MOD_DEC_USE_COUNT;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+
+#ifdef MODULE
+int init_module(void)
+{
+ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ if (pci_drv_register(&starfire_drv_id, NULL)) {
+ printk(KERN_INFO " No Starfire adapters detected, driver not loaded.\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+ pci_drv_unregister(&starfire_drv_id);
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_net_dev) {
+ struct netdev_private *np = (void *)(root_net_dev->priv);
+ unregister_netdev(root_net_dev);
+ iounmap((char *)(root_net_dev->base_addr));
+ next_dev = np->next_module;
+ if (np->tx_done_q) free_page((long)np->tx_done_q);
+ if (np->rx_done_q) free_page((long)np->rx_done_q);
+ if (np->priv_addr) kfree(np->priv_addr);
+ kfree(root_net_dev);
+ root_net_dev = next_dev;
+ }
+}
+
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "make KERNVER=`uname -r` starfire.o"
+ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c starfire.c"
+ * simple-compile-command: "gcc -DMODULE -O6 -c starfire.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/sundance.c b/linux/src/drivers/net/sundance.c
new file mode 100644
index 0000000..3723164
--- /dev/null
+++ b/linux/src/drivers/net/sundance.c
@@ -0,0 +1,1556 @@
+/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
+/*
+ Written 1999-2003 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Support information and updates available at
+ http://www.scyld.com/network/sundance.html
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version1[] =
+"sundance.c:v1.11 2/4/2003 Written by Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+" http://www.scyld.com/network/sundance.html\n";
+/* Updated to recommendations in pci-skeleton v2.12. */
+
+/* Automatically extracted configuration info:
+probe-func: sundance_probe
+config-in: tristate 'Sundance ST201 "Alta" PCI Ethernet support' CONFIG_SUNDANCE
+c-help-name: Sundance ST201 "Alta" PCI Ethernet support
+c-help-symbol: CONFIG_SUNDANCE
+c-help: This driver is for the Sundance ST201 "Alta" and Kendin KS8723, as
+c-help: used on the D-Link DFE-550 and DFE-580.
+c-help: Design information, usage details and updates are available from
+c-help: http://www.scyld.com/network/sundance.html
+*/
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
+static int debug = 2;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ The sundance uses a 64 element hash table based on the Ethernet CRC. */
+static int multicast_filter_limit = 32;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature.
+ This chip can receive into any byte alignment buffers, so word-oriented
+ archs do not need a copy-align of the IP header. */
+static int rx_copybreak = 0;
+
+/* Used to pass the media type, etc.
+ Both 'options[]' and 'full_duplex[]' should exist for driver
+ interoperability.
+ The media type is usually passed in 'options[]'.
+ The default is autonegotation for speed and duplex.
+ This should rarely be overridden.
+ Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
+ Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
+ Use option values 0x20 and 0x200 for forcing full duplex operation.
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Operational parameters that are set at compile time. */
+
+/* Ring sizes are a power of two only for compile efficiency.
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ There must be at least five Tx entries for the tx_full hysteresis, and
+ more than 31 requires modifying the Tx status handling error recovery.
+ Leave a inactive gap in the Tx ring for better cache behavior.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ Large receive rings waste memory and impact buffer accounting.
+ The driver need to protect against interrupt latency and the kernel
+ not reserving enough available memory.
+*/
+#define TX_RING_SIZE 16
+#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
+#define RX_RING_SIZE 32
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+
+/* Allocation size of Rx buffers with normal sized Ethernet frames.
+ Do not change this value without good reason. This is not a limit,
+ but a way to keep a consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ 1536
+
+/* Set iff a MII transceiver on any interface requires mdio preamble.
+ This only set with older tranceivers, so the extra
+ code size of a per-interface flag is not worthwhile. */
+static char mii_preamble_required = 0;
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#if LINUX_VERSION_CODE >= 0x20300
+#include <linux/spinlock.h>
+#elif LINUX_VERSION_CODE >= 0x20200
+#include <asm/spinlock.h>
+#endif
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+/* Condensed operations for readability. */
+#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
+#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(debug, "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(multicast_filter_limit, "i");
+
+MODULE_PARM_DESC(debug, "Driver message level (0-31)");
+MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
+MODULE_PARM_DESC(max_interrupt_work,
+ "Driver maximum events handled per interrupt");
+MODULE_PARM_DESC(full_duplex,
+ "Non-zero to set forced full duplex (deprecated).");
+MODULE_PARM_DESC(rx_copybreak,
+ "Breakpoint in bytes for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit,
+ "Multicast addresses before switching to Rx-all-multicast");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This driver is designed for the Sundance Technologies "Alta" ST201 chip.
+The Kendin KS8723 is the same design with an integrated transceiver and
+new quirks.
+
+II. Board-specific settings
+
+This is an all-in-one chip, so there are no board-specific settings.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
+Some chips explicitly use only 2^N sized rings, while others use a
+'next descriptor' pointer that the driver forms into rings.
+
+IIIb/c. Transmit/Receive Structure
+
+This driver uses a zero-copy receive and transmit scheme.
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the chip as receive data
+buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack. Buffers consumed this way are replaced by newly allocated
+skbuffs in a later phase of receives.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. New boards are typically used in generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets. When copying is done, the cost is usually mitigated by using
+a combined copy/checksum routine. Copying also preloads the cache, which is
+most useful with small frames.
+
+A subtle aspect of the operation is that the IP header at offset 14 in an
+ethernet frame isn't longword aligned for further processing.
+Unaligned buffers are permitted by the Sundance hardware, so
+frames are received into the skbuff at an offset of "+2", 16-byte aligning
+the IP header.
+
+IIId. Synchronization
+
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and interrupt handling software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'lp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
+clears both the tx_full and tbusy flags.
+
+IV. Notes
+
+IVb. References
+
+The Sundance ST201 datasheet, preliminary version.
+The Kendin KS8723 datasheet, preliminary version.
+http://www.scyld.com/expert/100mbps.html
+http://www.scyld.com/expert/NWay.html
+
+IVc. Errata
+
+*/
+
+
+
+/* Work-around for Kendin chip bugs. This will be reversed after tracking
+ down all of the chip access quirks in memory mode. */
+#ifndef USE_MEM_OPS
+#define USE_IO_OPS 1
+#endif
+
+static void *sundance_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int find_cnt);
+static int sundance_pwr_event(void *dev_instance, int event);
+
+enum chip_capability_flags {CanHaveMII=1, KendinPktDropBug=2, };
+#ifdef USE_IO_OPS
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0)
+#else
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
+#endif
+
+static struct pci_id_info pci_id_tbl[] = {
+ {"D-Link DFE-580TX (Kendin/Sundance ST201 Alta)",
+ {0x10021186, 0xffffffff, 0x10121186, 0xffffffff, 0x14, 0xff},
+ PCI_IOTYPE, 128, CanHaveMII|KendinPktDropBug},
+ {"D-Link DFE-580TX (Sundance ST201)",
+ {0x10021186, 0xffffffff, 0x10121186, 0xffffffff, },
+ PCI_IOTYPE, 128, CanHaveMII|KendinPktDropBug},
+ {"D-Link DFE-550FX 100baseFx (Sundance ST201)",
+ {0x10031186, 0xffffffff, },
+ PCI_IOTYPE, 128, CanHaveMII|KendinPktDropBug},
+ {"OEM Sundance Technology ST201", {0x10021186, 0xffffffff, },
+ PCI_IOTYPE, 128, CanHaveMII},
+ {"Sundance Technology Alta", {0x020113F0, 0xffffffff, },
+ PCI_IOTYPE, 128, CanHaveMII},
+ {0,}, /* 0 terminated list. */
+};
+
+struct drv_id_info sundance_drv_id = {
+ "sundance", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
+ sundance_probe1, sundance_pwr_event };
+
+/* This driver was written to use PCI memory space, however x86-oriented
+ hardware often uses I/O space accesses. */
+#ifdef USE_IO_OPS
+#undef readb
+#undef readw
+#undef readl
+#undef writeb
+#undef writew
+#undef writel
+#define readb inb
+#define readw inw
+#define readl inl
+#define writeb outb
+#define writew outw
+#define writel outl
+#endif
+
+/* Offsets to the device registers.
+ Unlike software-only systems, device drivers interact with complex hardware.
+ It's not useful to define symbolic names for every register bit in the
+ device. The name can only partially document the semantics and make
+ the driver longer and more difficult to read.
+ In general, only the important configuration values or bits changed
+ multiple times should be defined symbolically.
+*/
+enum alta_offsets {
+ DMACtrl=0x00, TxListPtr=0x04, TxDMACtrl=0x08, TxDescPoll=0x0a,
+ RxDMAStatus=0x0c, RxListPtr=0x10, RxDMACtrl=0x14, RxDescPoll=0x16,
+ LEDCtrl=0x1a, ASICCtrl=0x30,
+ EEData=0x34, EECtrl=0x36, TxThreshold=0x3c,
+ FlashAddr=0x40, FlashData=0x44, WakeEvent=0x45, TxStatus=0x46,
+ DownCounter=0x48, IntrClear=0x4a, IntrEnable=0x4c, IntrStatus=0x4e,
+ MACCtrl0=0x50, MACCtrl1=0x52, StationAddr=0x54,
+ MaxFrameSize=0x5A, RxMode=0x5c, MIICtrl=0x5e,
+ MulticastFilter0=0x60, MulticastFilter1=0x64,
+ RxOctetsLow=0x68, RxOctetsHigh=0x6a, TxOctetsLow=0x6c, TxOctetsHigh=0x6e,
+ TxFramesOK=0x70, RxFramesOK=0x72, StatsCarrierError=0x74,
+ StatsLateColl=0x75, StatsMultiColl=0x76, StatsOneColl=0x77,
+ StatsTxDefer=0x78, RxMissed=0x79, StatsTxXSDefer=0x7a, StatsTxAbort=0x7b,
+ StatsBcastTx=0x7c, StatsBcastRx=0x7d, StatsMcastTx=0x7e, StatsMcastRx=0x7f,
+ /* Aliased and bogus values! */
+ RxStatus=0x0c,
+};
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+ IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
+ IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
+ IntrDrvRqst=0x0040,
+ StatsMax=0x0080, LinkChange=0x0100,
+ IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
+};
+
+/* Bits in the RxMode register. */
+enum rx_mode_bits {
+ AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
+ AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
+};
+/* Bits in MACCtrl. */
+enum mac_ctrl0_bits {
+ EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
+ EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
+};
+enum mac_ctrl1_bits {
+ StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
+ TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
+ RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
+};
+
+/* The Rx and Tx buffer descriptors.
+ Using only 32 bit fields simplifies software endian correction.
+ This structure must be aligned, and should avoid spanning cache lines.
+*/
+struct netdev_desc {
+ u32 next_desc;
+ u32 status;
+ struct desc_frag { u32 addr, length; } frag[1];
+};
+
+/* Bits in netdev_desc.status */
+enum desc_status_bits {
+ DescOwn=0x8000, DescEndPacket=0x4000, DescEndRing=0x2000,
+ DescTxDMADone=0x10000,
+ LastFrag=0x80000000, DescIntrOnTx=0x8000, DescIntrOnDMADone=0x80000000,
+};
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
+ within the structure. */
+struct netdev_private {
+ /* Descriptor rings first for alignment. */
+ struct netdev_desc rx_ring[RX_RING_SIZE];
+ struct netdev_desc tx_ring[TX_RING_SIZE];
+ struct net_device *next_module; /* Link for devices of this type. */
+ void *priv_addr; /* Unaligned address for kfree */
+ const char *product_name;
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ /* The saved address of a sent-in-place packet/buffer, for later free(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media monitoring timer. */
+ /* Frequently used values: keep some adjacent for cache effect. */
+ int msg_level;
+ int chip_id, drv_flags;
+ struct pci_dev *pci_dev;
+ int max_interrupt_work;
+
+ /* Note: Group variables for cache line effect. */
+ struct netdev_desc *rx_head_desc;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ int rx_copybreak;
+
+ spinlock_t txlock; /* Group with Tx control cache line. */
+ struct netdev_desc *last_tx; /* Last Tx descriptor used. */
+ unsigned int cur_tx, dirty_tx;
+ unsigned int tx_full:1; /* The Tx queue is full. */
+
+ /* These values keep track of the transceiver/media in use. */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int duplex_lock:1;
+ unsigned int medialock:1; /* Do not sense media. */
+ unsigned int default_port; /* Last dev->if_port value. */
+ /* Multicast and receive mode. */
+ spinlock_t mcastlock; /* SMP lock multicast updates. */
+ u16 mcast_filter[4];
+ int multicast_filter_limit;
+
+ /* MII transceiver section. */
+ int mii_cnt; /* MII device addresses. */
+ int link_status;
+ u16 advertising; /* NWay media advertisement */
+ unsigned char phys[2]; /* MII device addresses. */
+};
+
+/* The station address location in the EEPROM. */
+#define EEPROM_SA_OFFSET 0x10
+
+static int eeprom_read(long ioaddr, int location);
+static int mdio_read(struct net_device *dev, int phy_id,
+ unsigned int location);
+static void mdio_write(struct net_device *dev, int phy_id,
+ unsigned int location, int value);
+static int netdev_open(struct net_device *dev);
+static void sundance_start(struct net_device *dev);
+static int change_mtu(struct net_device *dev, int new_mtu);
+static void check_duplex(struct net_device *dev);
+static void netdev_timer(unsigned long data);
+static void tx_timeout(struct net_device *dev);
+static void init_ring(struct net_device *dev);
+static int start_tx(struct sk_buff *skb, struct net_device *dev);
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
+static void netdev_error(struct net_device *dev, int intr_status);
+static int netdev_rx(struct net_device *dev);
+static void netdev_error(struct net_device *dev, int intr_status);
+static void set_rx_mode(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_close(struct net_device *dev);
+
+
+
+/* A list of our installed devices, for removing the driver module. */
+static struct net_device *root_net_dev = NULL;
+
+#ifndef MODULE
+int sundance_probe(struct net_device *dev)
+{
+ if (pci_drv_register(&sundance_drv_id, dev) < 0)
+ return -ENODEV;
+ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return 0;
+}
+#endif
+
+static void *sundance_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int card_idx)
+{
+ struct net_device *dev;
+ struct netdev_private *np;
+ void *priv_mem;
+ int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+
+ dev = init_etherdev(init_dev, 0);
+ if (!dev)
+ return NULL;
+
+ /* Perhaps NETIF_MSG_PROBE */
+ printk(KERN_INFO "%s: %s at 0x%lx, ",
+ dev->name, pci_id_tbl[chip_idx].name, ioaddr);
+
+ for (i = 0; i < 3; i++)
+ ((u16 *)dev->dev_addr)[i] =
+ le16_to_cpu(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+
+ /* Make certain elements e.g. descriptor lists are aligned. */
+ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
+ /* Check for the very unlikely case of no memory. */
+ if (priv_mem == NULL)
+ return NULL;
+
+ /* All failure checks before this point.
+ We do a request_region() only to register /proc/ioports info. */
+#ifdef USE_IO_OPS
+ request_region(ioaddr, pci_id_tbl[chip_idx].io_size, dev->name);
+#endif
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+ memset(np, 0, sizeof(*np));
+ np->priv_addr = priv_mem;
+
+ np->next_module = root_net_dev;
+ root_net_dev = dev;
+
+ np->pci_dev = pdev;
+ np->chip_id = chip_idx;
+ np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
+ np->msg_level = (1 << debug) - 1;
+ np->rx_copybreak = rx_copybreak;
+ np->max_interrupt_work = max_interrupt_work;
+ np->multicast_filter_limit = multicast_filter_limit;
+
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
+ np->full_duplex = 1;
+
+ if (np->full_duplex)
+ np->medialock = 1;
+
+ /* The chip-specific entries in the device structure. */
+ dev->open = &netdev_open;
+ dev->hard_start_xmit = &start_tx;
+ dev->stop = &netdev_close;
+ dev->get_stats = &get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &mii_ioctl;
+ dev->change_mtu = &change_mtu;
+
+ if (1) {
+ int phy, phy_idx = 0;
+ np->phys[0] = 1; /* Default setting */
+ mii_preamble_required++;
+ for (phy = 1; phy < 32 && phy_idx < 4; phy++) {
+ int mii_status = mdio_read(dev, phy, 1);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ np->phys[phy_idx++] = phy;
+ np->advertising = mdio_read(dev, phy, 4);
+ if ((mii_status & 0x0040) == 0)
+ mii_preamble_required++;
+ if (np->msg_level & NETIF_MSG_PROBE)
+ printk(KERN_INFO "%s: MII PHY found at address %d, status "
+ "0x%4.4x advertising %4.4x.\n",
+ dev->name, phy, mii_status, np->advertising);
+ }
+ }
+ mii_preamble_required--;
+ np->mii_cnt = phy_idx;
+ if (phy_idx == 0)
+ printk(KERN_INFO "%s: No MII transceiver found!, ASIC status %x\n",
+ dev->name, (int)readl(ioaddr + ASICCtrl));
+ }
+
+ /* Allow forcing the media type. */
+ if (option > 0) {
+ if (option & 0x220)
+ np->full_duplex = 1;
+ np->default_port = option & 0x3ff;
+ if (np->default_port & 0x330) {
+ np->medialock = 1;
+ if (np->msg_level & NETIF_MSG_PROBE)
+ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
+ (option & 0x300 ? 100 : 10),
+ (np->full_duplex ? "full" : "half"));
+ if (np->mii_cnt)
+ mdio_write(dev, np->phys[0], 0,
+ ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
+ (np->full_duplex ? 0x0100 : 0)); /* Full duplex? */
+ }
+ }
+
+ /* Reset the chip to erase previous misconfiguration. */
+ if (np->msg_level & NETIF_MSG_MISC)
+ printk("ASIC Control is %x.\n", (int)readl(ioaddr + ASICCtrl));
+ writel(0x007f0000 | readl(ioaddr + ASICCtrl), ioaddr + ASICCtrl);
+ if (np->msg_level & NETIF_MSG_MISC)
+ printk("ASIC Control is now %x.\n", (int)readl(ioaddr + ASICCtrl));
+
+ return dev;
+}
+
+
+
+static int change_mtu(struct net_device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > 8191)) /* Limited by RxDMAFrameLen */
+ return -EINVAL;
+ if (netif_running(dev))
+ return -EBUSY;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
+static int eeprom_read(long ioaddr, int location)
+{
+ int boguscnt = 2000; /* Typical 190 ticks. */
+ writew(0x0200 | (location & 0xff), ioaddr + EECtrl);
+ do {
+ if (! (readw(ioaddr + EECtrl) & 0x8000)) {
+ return readw(ioaddr + EEData);
+ }
+ } while (--boguscnt > 0);
+ return 0;
+}
+
+/* MII transceiver control section.
+ Read and write the MII registers using software-generated serial
+ MDIO protocol. See the MII specifications or DP83840A data sheet
+ for details.
+
+ The maximum data clock rate is 2.5 Mhz.
+ The timing is decoupled from the processor clock by flushing the write
+ from the CPU write buffer with a following read, and using PCI
+ transaction time. */
+#define mdio_in(mdio_addr) readb(mdio_addr)
+#define mdio_out(value, mdio_addr) writeb(value, mdio_addr)
+#define mdio_delay(mdio_addr) readb(mdio_addr)
+
+enum mii_reg_bits {
+ MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
+};
+#define MDIO_EnbIn (0)
+#define MDIO_WRITE0 (MDIO_EnbOutput)
+#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
+
+/* Generate the preamble required for initial synchronization and
+ a few older transceivers. */
+static void mdio_sync(long mdio_addr)
+{
+ int bits = 32;
+
+ /* Establish sync by sending at least 32 logic ones. */
+ while (--bits >= 0) {
+ mdio_out(MDIO_WRITE1, mdio_addr);
+ mdio_delay(mdio_addr);
+ mdio_out(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+}
+
+static int mdio_read(struct net_device *dev, int phy_id, unsigned int location)
+{
+ long mdio_addr = dev->base_addr + MIICtrl;
+ int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ int i, retval = 0;
+
+ if (mii_preamble_required)
+ mdio_sync(mdio_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+
+ mdio_out(dataval, mdio_addr);
+ mdio_delay(mdio_addr);
+ mdio_out(dataval | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 19; i > 0; i--) {
+ mdio_out(MDIO_EnbIn, mdio_addr);
+ mdio_delay(mdio_addr);
+ retval = (retval << 1) | ((mdio_in(mdio_addr) & MDIO_Data) ? 1 : 0);
+ mdio_out(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id,
+ unsigned int location, int value)
+{
+ long mdio_addr = dev->base_addr + MIICtrl;
+ int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
+ int i;
+
+ if (mii_preamble_required)
+ mdio_sync(mdio_addr);
+
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+
+ mdio_out(dataval, mdio_addr);
+ mdio_delay(mdio_addr);
+ mdio_out(dataval | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ /* Clear out extra bits. */
+ for (i = 2; i > 0; i--) {
+ mdio_out(MDIO_EnbIn, mdio_addr);
+ mdio_delay(mdio_addr);
+ mdio_out(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ return;
+}
+
+
+static int netdev_open(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ MOD_INC_USE_COUNT;
+
+ if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
+ dev->name, dev->irq);
+
+ init_ring(dev);
+
+ if (dev->if_port == 0)
+ dev->if_port = np->default_port;
+
+ np->full_duplex = np->duplex_lock;
+ np->mcastlock = (spinlock_t) SPIN_LOCK_UNLOCKED;
+
+ sundance_start(dev);
+ netif_start_tx_queue(dev);
+
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
+ "MAC Control %x, %4.4x %4.4x.\n",
+ dev->name, (int)readl(ioaddr + RxStatus),
+ (int)readw(ioaddr + TxStatus), (int)readl(ioaddr + MACCtrl0),
+ (int)readw(ioaddr + MACCtrl1), (int)readw(ioaddr + MACCtrl0));
+
+ /* Set the timer to check for link beat. */
+ init_timer(&np->timer);
+ np->timer.expires = jiffies + 3*HZ;
+ np->timer.data = (unsigned long)dev;
+ np->timer.function = &netdev_timer; /* timer handler */
+ add_timer(&np->timer);
+
+ return 0;
+}
+
+static void sundance_start(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+
+ /* No reports have indicated that we need to reset the chip. */
+
+ writel(virt_to_bus(&np->rx_ring[np->cur_rx % RX_RING_SIZE]),
+ ioaddr + RxListPtr);
+ /* The Tx list pointer is written as packets are queued. */
+
+ /* Station address must be written as 16 bit words with the Kendin chip. */
+ for (i = 0; i < 6; i += 2)
+ writew((dev->dev_addr[i + 1] << 8) + dev->dev_addr[i],
+ ioaddr + StationAddr + i);
+
+ np->link_status = readb(ioaddr + MIICtrl) & 0xE0;
+ writew((np->full_duplex || (np->link_status & 0x20)) ? 0x120 : 0,
+ ioaddr + MACCtrl0);
+ writew(dev->mtu + 14, ioaddr + MaxFrameSize);
+ if (dev->mtu > 2047)
+ writel(readl(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
+
+ set_rx_mode(dev);
+ writew(0, ioaddr + DownCounter);
+ /* Set the chip to poll every N*320nsec. */
+ writeb(100, ioaddr + RxDescPoll);
+ writeb(127, ioaddr + TxDescPoll);
+#if 0
+ if (np->drv_flags & KendinPktDropBug)
+ writeb(0x01, ioaddr + DebugCtrl1);
+#endif
+
+ /* Enable interrupts by setting the interrupt mask. */
+ writew(IntrRxDMADone | IntrPCIErr | IntrDrvRqst | IntrTxDone
+ | StatsMax | LinkChange, ioaddr + IntrEnable);
+ writew(StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
+}
+
+static void check_duplex(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int mii_reg5 = mdio_read(dev, np->phys[0], 5);
+ int negotiated = mii_reg5 & np->advertising;
+ int duplex;
+
+ if (np->duplex_lock || mii_reg5 == 0xffff)
+ return;
+ duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
+ if (np->full_duplex != duplex) {
+ np->full_duplex = duplex;
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
+ "negotiated capability %4.4x.\n", dev->name,
+ duplex ? "full" : "half", np->phys[0], negotiated);
+ writew(duplex ? 0x20 : 0, ioaddr + MACCtrl0);
+ }
+}
+
+static void netdev_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 10*HZ;
+
+ if (np->msg_level & NETIF_MSG_TIMER) {
+ printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
+ "Tx %x Rx %x.\n",
+ dev->name, (int)readw(ioaddr + IntrEnable),
+ (int)readw(ioaddr + TxStatus), (int)readl(ioaddr + RxStatus));
+ }
+ /* Note: This does not catch a 0 or 1 element stuck queue. */
+ if (netif_queue_paused(dev) &&
+ np->cur_tx - np->dirty_tx > 1 &&
+ (jiffies - dev->trans_start) > TX_TIMEOUT) {
+ tx_timeout(dev);
+ }
+ check_duplex(dev);
+ np->timer.expires = jiffies + next_tick;
+ add_timer(&np->timer);
+}
+
+static void tx_timeout(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ printk(KERN_WARNING "%s: Transmit timed out, status %4.4x,"
+ " resetting...\n", dev->name, (int)readw(ioaddr + TxStatus));
+
+#ifdef __i386__
+ if (np->msg_level & NETIF_MSG_TX_ERR) {
+ int i;
+ printk(KERN_DEBUG " Rx ring %8.8x: ", (int)np->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
+ printk("\n"KERN_DEBUG" Tx ring %8.8x: ", (int)np->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" %8.8x", np->tx_ring[i].status);
+ printk("\n");
+ }
+#endif
+
+ /* Perhaps we should reinitialize the hardware here. */
+ dev->if_port = 0;
+ /* Stop and restart the chip's Tx processes . */
+
+ /* Trigger an immediate transmit demand. */
+ writew(IntrRxDMADone | IntrPCIErr | IntrDrvRqst | IntrTxDone
+ | StatsMax | LinkChange, ioaddr + IntrEnable);
+
+ dev->trans_start = jiffies;
+ np->stats.tx_errors++;
+ return;
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+ np->tx_full = 0;
+ np->cur_rx = np->cur_tx = 0;
+ np->dirty_rx = np->dirty_tx = 0;
+
+ np->rx_buf_sz = dev->mtu + 20;
+ if (np->rx_buf_sz < PKT_BUF_SZ)
+ np->rx_buf_sz = PKT_BUF_SZ;
+ np->rx_head_desc = &np->rx_ring[0];
+
+ /* Initialize all Rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].next_desc = virt_to_le32desc(&np->rx_ring[i+1]);
+ np->rx_ring[i].status = 0;
+ np->rx_ring[i].frag[0].length = 0;
+ np->rx_skbuff[i] = 0;
+ }
+ /* Wrap the ring. */
+ np->rx_ring[i-1].next_desc = virt_to_le32desc(&np->rx_ring[0]);
+
+ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* 16 byte align the IP header. */
+ np->rx_ring[i].frag[0].addr = virt_to_le32desc(skb->tail);
+ np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
+ }
+ np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_skbuff[i] = 0;
+ np->tx_ring[i].status = 0;
+ }
+ return;
+}
+
+static int start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ struct netdev_desc *txdesc;
+ unsigned entry;
+
+ /* Block a timer-based transmit from overlapping. */
+ if (netif_pause_tx_queue(dev) != 0) {
+ /* This watchdog code is redundant with the media monitor timer. */
+ if (jiffies - dev->trans_start > TX_TIMEOUT)
+ tx_timeout(dev);
+ return 1;
+ }
+
+ /* Note: Ordering is important here, set the field with the
+ "ownership" bit last, and only then increment cur_tx. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = np->cur_tx % TX_RING_SIZE;
+ np->tx_skbuff[entry] = skb;
+ txdesc = &np->tx_ring[entry];
+
+ txdesc->next_desc = 0;
+ /* Note: disable the interrupt generation here before releasing. */
+ txdesc->status =
+ cpu_to_le32((entry<<2) | DescIntrOnDMADone | DescIntrOnTx | 1);
+ txdesc->frag[0].addr = virt_to_le32desc(skb->data);
+ txdesc->frag[0].length = cpu_to_le32(skb->len | LastFrag);
+ if (np->last_tx)
+ np->last_tx->next_desc = virt_to_le32desc(txdesc);
+ np->last_tx = txdesc;
+ np->cur_tx++;
+
+ /* On some architectures: explicitly flush cache lines here. */
+
+ if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
+ np->tx_full = 1;
+ /* Check for a just-cleared queue. */
+ if (np->cur_tx - (volatile unsigned int)np->dirty_tx
+ < TX_QUEUE_LEN - 2) {
+ np->tx_full = 0;
+ netif_unpause_tx_queue(dev);
+ } else
+ netif_stop_tx_queue(dev);
+ } else
+ netif_unpause_tx_queue(dev); /* Typical path */
+
+ /* Side effect: The read wakes the potentially-idle transmit channel. */
+ if (readl(dev->base_addr + TxListPtr) == 0)
+ writel(virt_to_bus(&np->tx_ring[entry]), dev->base_addr + TxListPtr);
+
+ dev->trans_start = jiffies;
+
+ if (np->msg_level & NETIF_MSG_TX_QUEUED) {
+ printk(KERN_DEBUG "%s: Transmit frame #%d len %ld queued in slot %u.\n",
+ dev->name, np->cur_tx, skb->len, entry);
+ }
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct netdev_private *np;
+ long ioaddr;
+ int boguscnt;
+
+ ioaddr = dev->base_addr;
+ np = (struct netdev_private *)dev->priv;
+ boguscnt = np->max_interrupt_work;
+
+ do {
+ int intr_status = readw(ioaddr + IntrStatus);
+ if ((intr_status & ~IntrRxDone) == 0 || intr_status == 0xffff)
+ break;
+
+ writew(intr_status & (IntrRxDMADone | IntrPCIErr |
+ IntrDrvRqst |IntrTxDone|IntrTxDMADone |
+ StatsMax | LinkChange),
+ ioaddr + IntrStatus);
+
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
+ dev->name, intr_status);
+
+ if (intr_status & IntrRxDMADone)
+ netdev_rx(dev);
+
+ if (intr_status & IntrTxDone) {
+ int txboguscnt = 32;
+ int tx_status = readw(ioaddr + TxStatus);
+ while (tx_status & 0x80) {
+ if (np->msg_level & NETIF_MSG_TX_DONE)
+ printk("%s: Transmit status is %4.4x.\n",
+ dev->name, tx_status);
+ if (tx_status & 0x1e) {
+ if (np->msg_level & NETIF_MSG_TX_ERR)
+ printk("%s: Transmit error status %4.4x.\n",
+ dev->name, tx_status);
+ np->stats.tx_errors++;
+ if (tx_status & 0x10) np->stats.tx_fifo_errors++;
+#ifdef ETHER_STATS
+ if (tx_status & 0x08) np->stats.collisions16++;
+#else
+ if (tx_status & 0x08) np->stats.collisions++;
+#endif
+ if (tx_status & 0x04) np->stats.tx_fifo_errors++;
+ if (tx_status & 0x02) np->stats.tx_window_errors++;
+ /* This reset has not been verified!. */
+ if (tx_status & 0x10) { /* Reset the Tx. */
+ writel(0x001c0000 | readl(ioaddr + ASICCtrl),
+ ioaddr + ASICCtrl);
+#if 0 /* Do we need to reset the Tx pointer here? */
+ writel(virt_to_bus(&np->tx_ring[np->dirty_tx]),
+ dev->base_addr + TxListPtr);
+#endif
+ }
+ if (tx_status & 0x1e) /* Restart the Tx. */
+ writew(TxEnable, ioaddr + MACCtrl1);
+ }
+ /* Yup, this is a documentation bug. It cost me *hours*. */
+ writew(0, ioaddr + TxStatus);
+ if (--txboguscnt < 0)
+ break;
+ tx_status = readw(ioaddr + TxStatus);
+ }
+ }
+ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+ int entry = np->dirty_tx % TX_RING_SIZE;
+ if ( ! (np->tx_ring[entry].status & cpu_to_le32(DescTxDMADone)))
+ break;
+ /* Free the original skb. */
+ dev_free_skb_irq(np->tx_skbuff[entry]);
+ np->tx_skbuff[entry] = 0;
+ }
+ if (np->tx_full && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
+ /* The ring is no longer full, allow new TX entries. */
+ np->tx_full = 0;
+ netif_resume_tx_queue(dev);
+ }
+
+ /* Abnormal error summary/uncommon events handlers. */
+ if (intr_status & (IntrDrvRqst | IntrPCIErr | LinkChange | StatsMax))
+ netdev_error(dev, intr_status);
+
+ if (--boguscnt < 0) {
+ int intr_clear = readw(ioaddr + IntrClear);
+ get_stats(dev);
+ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "status=0x%4.4x / 0x%4.4x .. 0x%4.4x.\n",
+ dev->name, intr_status, intr_clear,
+ (int)readw(ioaddr + IntrClear));
+ /* Re-enable us in 3.2msec. */
+ writew(1000, ioaddr + DownCounter);
+ writew(IntrDrvRqst, ioaddr + IntrEnable);
+ break;
+ }
+ } while (1);
+
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, (int)readw(ioaddr + IntrStatus));
+
+ return;
+}
+
+/* This routine is logically part of the interrupt handler, but separated
+ for clarity and better register allocation. */
+static int netdev_rx(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int entry = np->cur_rx % RX_RING_SIZE;
+ int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
+
+ if (np->msg_level & NETIF_MSG_RX_STATUS) {
+ printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
+ entry, np->rx_ring[entry].status);
+ }
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while (np->rx_head_desc->status & cpu_to_le32(DescOwn)) {
+ struct netdev_desc *desc = np->rx_head_desc;
+ u32 frame_status = le32_to_cpu(desc->status);
+ int pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
+
+ if (np->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
+ frame_status);
+ if (--boguscnt < 0)
+ break;
+ if (frame_status & 0x001f4000) {
+ /* There was a error. */
+ if (np->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
+ frame_status);
+ np->stats.rx_errors++;
+ if (frame_status & 0x00100000) np->stats.rx_length_errors++;
+ if (frame_status & 0x00010000) np->stats.rx_fifo_errors++;
+ if (frame_status & 0x00060000) np->stats.rx_frame_errors++;
+ if (frame_status & 0x00080000) np->stats.rx_crc_errors++;
+ if (frame_status & 0x00100000) {
+ printk(KERN_WARNING "%s: Oversized Ethernet frame,"
+ " status %8.8x.\n",
+ dev->name, frame_status);
+ }
+ } else {
+ struct sk_buff *skb;
+
+#ifndef final_version
+ if (np->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
+ ", bogus_cnt %d.\n",
+ pkt_len, boguscnt);
+#endif
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < np->rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+ } else {
+ skb_put(skb = np->rx_skbuff[entry], pkt_len);
+ np->rx_skbuff[entry] = NULL;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ }
+ entry = (++np->cur_rx) % RX_RING_SIZE;
+ np->rx_head_desc = &np->rx_ring[entry];
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
+ struct sk_buff *skb;
+ entry = np->dirty_rx % RX_RING_SIZE;
+ if (np->rx_skbuff[entry] == NULL) {
+ skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ np->rx_ring[entry].frag[0].addr = virt_to_le32desc(skb->tail);
+ }
+ /* Perhaps we need not reset this field. */
+ np->rx_ring[entry].frag[0].length =
+ cpu_to_le32(np->rx_buf_sz | LastFrag);
+ np->rx_ring[entry].status = 0;
+ }
+
+ /* No need to restart Rx engine, it will poll. */
+ return 0;
+}
+
+static void netdev_error(struct net_device *dev, int intr_status)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+
+ if (intr_status & IntrDrvRqst) {
+ /* Stop the down counter and turn interrupts back on. */
+ printk(KERN_WARNING "%s: Turning interrupts back on.\n", dev->name);
+ writew(0, ioaddr + DownCounter);
+ writew(IntrRxDMADone | IntrPCIErr | IntrDrvRqst |
+ IntrTxDone | StatsMax | LinkChange, ioaddr + IntrEnable);
+ }
+ if (intr_status & LinkChange) {
+ int new_status = readb(ioaddr + MIICtrl) & 0xE0;
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_NOTICE "%s: Link changed: Autonegotiation advertising"
+ " %4.4x partner %4.4x.\n", dev->name,
+ mdio_read(dev, np->phys[0], 4),
+ mdio_read(dev, np->phys[0], 5));
+ if ((np->link_status ^ new_status) & 0x80) {
+ if (new_status & 0x80)
+ netif_link_up(dev);
+ else
+ netif_link_down(dev);
+ }
+ np->link_status = new_status;
+ check_duplex(dev);
+ }
+ if (intr_status & StatsMax) {
+ get_stats(dev);
+ }
+ if (intr_status & IntrPCIErr) {
+ printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
+ dev->name, intr_status);
+ /* We must do a global reset of DMA to continue. */
+ }
+}
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+ if (readw(ioaddr + StationAddr) == 0xffff)
+ return &np->stats;
+
+ /* We do not spinlock statistics.
+ A window only exists if we have non-atomic adds, the error counts
+ are typically zero, and statistics are non-critical. */
+ np->stats.rx_missed_errors += readb(ioaddr + RxMissed);
+ np->stats.tx_packets += readw(ioaddr + TxFramesOK);
+ np->stats.rx_packets += readw(ioaddr + RxFramesOK);
+ np->stats.collisions += readb(ioaddr + StatsLateColl);
+ np->stats.collisions += readb(ioaddr + StatsMultiColl);
+ np->stats.collisions += readb(ioaddr + StatsOneColl);
+ readb(ioaddr + StatsCarrierError);
+ readb(ioaddr + StatsTxDefer);
+ for (i = StatsTxXSDefer; i <= StatsMcastRx; i++)
+ readb(ioaddr + i);
+#if LINUX_VERSION_CODE > 0x20127
+ np->stats.tx_bytes += readw(ioaddr + TxOctetsLow);
+ np->stats.tx_bytes += readw(ioaddr + TxOctetsHigh) << 16;
+ np->stats.rx_bytes += readw(ioaddr + RxOctetsLow);
+ np->stats.rx_bytes += readw(ioaddr + RxOctetsHigh) << 16;
+#else
+ readw(ioaddr + TxOctetsLow);
+ readw(ioaddr + TxOctetsHigh);
+ readw(ioaddr + RxOctetsLow);
+ readw(ioaddr + RxOctetsHigh);
+#endif
+
+ return &np->stats;
+}
+
+/* The little-endian AUTODIN II ethernet CRC calculations.
+ A big-endian version is also available.
+ This is slow but compact code. Do not use this routine for bulk data,
+ use a table-based routine instead.
+ This is common code and should be moved to net/core/crc.c.
+ Chips may use the upper or lower CRC bits, and may reverse and/or invert
+ them. Select the endian-ness that results in minimal calculations.
+*/
+static unsigned const ethernet_polynomial_le = 0xedb88320U;
+static inline unsigned ether_crc_le(int length, unsigned char *data)
+{
+ unsigned int crc = ~0; /* Initial value. */
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 8; --bit >= 0; current_octet >>= 1) {
+ if ((crc ^ current_octet) & 1) {
+ crc >>= 1;
+ crc ^= ethernet_polynomial_le;
+ } else
+ crc >>= 1;
+ }
+ }
+ return crc;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u16 mc_filter[4]; /* Multicast hash filter */
+ u32 rx_mode;
+ int i;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+ memset(mc_filter, ~0, sizeof(mc_filter));
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
+ } else if ((dev->mc_count > np->multicast_filter_limit)
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ } else if (dev->mc_count) {
+ struct dev_mc_list *mclist;
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f,
+ mc_filter);
+ }
+ rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
+ } else {
+ writeb(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
+ return;
+ }
+ for (i = 0; i < 4; i++)
+ writew(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
+ writeb(rx_mode, ioaddr + RxMode);
+}
+
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ u16 *data = (u16 *)&rq->ifr_data;
+ u32 *data32 = (void *)&rq->ifr_data;
+
+ switch(cmd) {
+ case 0x8947: case 0x89F0:
+ /* SIOCGMIIPHY: Get the address of the PHY in use. */
+ data[0] = np->phys[0] & 0x1f;
+ /* Fall Through */
+ case 0x8948: case 0x89F1:
+ /* SIOCGMIIREG: Read the specified MII register. */
+ data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
+ return 0;
+ case 0x8949: case 0x89F2:
+ /* SIOCSMIIREG: Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (data[0] == np->phys[0]) {
+ u16 value = data[2];
+ switch (data[1]) {
+ case 0:
+ /* Check for autonegotiation on or reset. */
+ np->medialock = (value & 0x9000) ? 0 : 1;
+ if (np->medialock)
+ np->full_duplex = (value & 0x0100) ? 1 : 0;
+ break;
+ case 4: np->advertising = value; break;
+ }
+ /* Perhaps check_duplex(dev), depending on chip semantics. */
+ }
+ mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
+ return 0;
+ case SIOCGPARAMS:
+ data32[0] = np->msg_level;
+ data32[1] = np->multicast_filter_limit;
+ data32[2] = np->max_interrupt_work;
+ data32[3] = np->rx_copybreak;
+ return 0;
+ case SIOCSPARAMS:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ np->msg_level = data32[0];
+ np->multicast_filter_limit = data32[1];
+ np->max_interrupt_work = data32[2];
+ np->rx_copybreak = data32[3];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int sundance_pwr_event(void *dev_instance, int event)
+{
+ struct net_device *dev = dev_instance;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
+ switch(event) {
+ case DRV_ATTACH:
+ MOD_INC_USE_COUNT;
+ break;
+ case DRV_SUSPEND:
+ /* Disable interrupts, stop Tx and Rx. */
+ writew(0x0000, ioaddr + IntrEnable);
+ writew(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
+ break;
+ case DRV_RESUME:
+ sundance_start(dev);
+ break;
+ case DRV_DETACH: {
+ struct net_device **devp, **next;
+ if (dev->flags & IFF_UP) {
+ /* Some, but not all, kernel versions close automatically. */
+ dev_close(dev);
+ dev->flags &= ~(IFF_UP|IFF_RUNNING);
+ }
+ unregister_netdev(dev);
+ release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);
+#ifndef USE_IO_OPS
+ iounmap((char *)dev->base_addr);
+#endif
+ for (devp = &root_net_dev; *devp; devp = next) {
+ next = &((struct netdev_private *)(*devp)->priv)->next_module;
+ if (*devp == dev) {
+ *devp = *next;
+ break;
+ }
+ }
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(dev);
+ MOD_DEC_USE_COUNT;
+ break;
+ }
+ case DRV_PWR_WakeOn:
+ writeb(readb(ioaddr + WakeEvent) | 2, ioaddr + WakeEvent);
+ /* Fall through. */
+ case DRV_PWR_DOWN:
+ case DRV_PWR_UP:
+ acpi_set_pwr_state(np->pci_dev, event==DRV_PWR_UP ? ACPI_D0:ACPI_D3);
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+static int netdev_close(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+ netif_stop_tx_queue(dev);
+
+ if (np->msg_level & NETIF_MSG_IFDOWN) {
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
+ "Rx %4.4x Int %2.2x.\n",
+ dev->name, (int)readw(ioaddr + TxStatus),
+ (int)readl(ioaddr + RxStatus), (int)readw(ioaddr + IntrStatus));
+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
+ dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
+ }
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ writew(0x0000, ioaddr + IntrEnable);
+
+ /* Stop the chip's Tx and Rx processes. */
+ writew(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
+
+ del_timer(&np->timer);
+
+#ifdef __i386__
+ if (np->msg_level & NETIF_MSG_IFDOWN) {
+ printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
+ (int)virt_to_bus(np->tx_ring));
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" #%d desc. %4.4x %8.8x %8.8x.\n",
+ i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
+ np->tx_ring[i].frag[0].length);
+ printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
+ (int)virt_to_bus(np->rx_ring));
+ for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
+ printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
+ i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
+ np->rx_ring[i].frag[0].length);
+ }
+ }
+#endif /* __i386__ debugging only */
+
+ free_irq(dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].status = 0;
+ np->rx_ring[i].frag[0].addr = 0xBADF00D0; /* An invalid address. */
+ if (np->rx_skbuff[i]) {
+#if LINUX_VERSION_CODE < 0x20100
+ np->rx_skbuff[i]->free = 1;
+#endif
+ dev_free_skb(np->rx_skbuff[i]);
+ }
+ np->rx_skbuff[i] = 0;
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (np->tx_skbuff[i])
+ dev_free_skb(np->tx_skbuff[i]);
+ np->tx_skbuff[i] = 0;
+ }
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+
+#ifdef MODULE
+int init_module(void)
+{
+ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return pci_drv_register(&sundance_drv_id, NULL);
+}
+
+void cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+ pci_drv_unregister(&sundance_drv_id);
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_net_dev) {
+ struct netdev_private *np = (void *)(root_net_dev->priv);
+ unregister_netdev(root_net_dev);
+#ifdef USE_IO_OPS
+ release_region(root_net_dev->base_addr,
+ pci_id_tbl[np->chip_id].io_size);
+#else
+ iounmap((char *)root_net_dev->base_addr);
+#endif
+ next_dev = np->next_module;
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(root_net_dev);
+ root_net_dev = next_dev;
+ }
+}
+
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "make KERNVER=`uname -r` sundance.o"
+ * compile-cmd1: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c sundance.c"
+ * simple-compile-command: "gcc -DMODULE -O6 -c sundance.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/tlan.c b/linux/src/drivers/net/tlan.c
new file mode 100644
index 0000000..fedc11f
--- /dev/null
+++ b/linux/src/drivers/net/tlan.c
@@ -0,0 +1,2863 @@
+/********************************************************************
+ *
+ * Linux ThunderLAN Driver
+ *
+ * tlan.c
+ * by James Banks
+ *
+ * (C) 1997-1998 Caldera, Inc.
+ * (C) 1998 James Banks
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU Public License, incorporated herein by reference.
+ *
+ ** This file is best viewed/edited with columns>=132.
+ *
+ ** Useful (if not required) reading:
+ *
+ * Texas Instruments, ThunderLAN Programmer's Guide,
+ * TI Literature Number SPWU013A
+ * available in PDF format from www.ti.com
+ * Level One, LXT901 and LXT970 Data Sheets
+ * available in PDF format from www.level1.com
+ * National Semiconductor, DP83840A Data Sheet
+ * available in PDF format from www.national.com
+ * Microchip Technology, 24C01A/02A/04A Data Sheet
+ * available in PDF format from www.microchip.com
+ *
+ ********************************************************************/
+
+
+#include <linux/module.h>
+
+#include "tlan.h"
+
+#include <linux/bios32.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+
+
+
+typedef u32 (TLanIntVectorFunc)( struct device *, u16 );
+
+
+#ifdef MODULE
+
+static struct device *TLanDevices = NULL;
+static int TLanDevicesInstalled = 0;
+
+#endif
+
+
+static int debug = 0;
+static int aui = 0;
+static int sa_int = 0;
+static int bbuf = 0;
+static int duplex = 0;
+static int speed = 0;
+static u8 *TLanPadBuffer;
+static char TLanSignature[] = "TLAN";
+static int TLanVersionMajor = 1;
+static int TLanVersionMinor = 0;
+
+
+static TLanAdapterEntry TLanAdapterList[] = {
+ { PCI_VENDOR_ID_COMPAQ,
+ PCI_DEVICE_ID_NETELLIGENT_10,
+ "Compaq Netelligent 10 T PCI UTP",
+ TLAN_ADAPTER_ACTIVITY_LED,
+ 0x83
+ },
+ { PCI_VENDOR_ID_COMPAQ,
+ PCI_DEVICE_ID_NETELLIGENT_10_100,
+ "Compaq Netelligent 10/100 TX PCI UTP",
+ TLAN_ADAPTER_ACTIVITY_LED,
+ 0x83
+ },
+ { PCI_VENDOR_ID_COMPAQ,
+ PCI_DEVICE_ID_NETFLEX_3P_INTEGRATED,
+ "Compaq Integrated NetFlex-3/P",
+ TLAN_ADAPTER_NONE,
+ 0x83
+ },
+ { PCI_VENDOR_ID_COMPAQ,
+ PCI_DEVICE_ID_NETFLEX_3P,
+ "Compaq NetFlex-3/P",
+ TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY,
+ 0x83
+ },
+ { PCI_VENDOR_ID_COMPAQ,
+ PCI_DEVICE_ID_NETFLEX_3P_BNC,
+ "Compaq NetFlex-3/P",
+ TLAN_ADAPTER_NONE,
+ 0x83
+ },
+ { PCI_VENDOR_ID_COMPAQ,
+ PCI_DEVICE_ID_NETELLIGENT_10_100_PROLIANT,
+ "Compaq Netelligent Integrated 10/100 TX UTP",
+ TLAN_ADAPTER_NONE,
+ 0x83
+ },
+ { PCI_VENDOR_ID_COMPAQ,
+ PCI_DEVICE_ID_NETELLIGENT_10_100_DUAL,
+ "Compaq Netelligent Dual 10/100 TX PCI UTP",
+ TLAN_ADAPTER_NONE,
+ 0x83
+ },
+ { PCI_VENDOR_ID_COMPAQ,
+ PCI_DEVICE_ID_DESKPRO_4000_5233MMX,
+ "Compaq Netelligent 10/100 TX Embedded UTP",
+ TLAN_ADAPTER_NONE,
+ 0x83
+ },
+ { PCI_VENDOR_ID_OLICOM,
+ PCI_DEVICE_ID_OLICOM_OC2183,
+ "Olicom OC-2183/2185",
+ TLAN_ADAPTER_USE_INTERN_10,
+ 0xF8
+ },
+ { PCI_VENDOR_ID_OLICOM,
+ PCI_DEVICE_ID_OLICOM_OC2325,
+ "Olicom OC-2325",
+ TLAN_ADAPTER_UNMANAGED_PHY,
+ 0xF8
+ },
+ { PCI_VENDOR_ID_OLICOM,
+ PCI_DEVICE_ID_OLICOM_OC2326,
+ "Olicom OC-2326",
+ TLAN_ADAPTER_USE_INTERN_10,
+ 0xF8
+ },
+ { PCI_VENDOR_ID_COMPAQ,
+ PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100,
+ "Compaq Netelligent 10/100 TX UTP",
+ TLAN_ADAPTER_ACTIVITY_LED,
+ 0x83
+ },
+ { PCI_VENDOR_ID_COMPAQ,
+ PCI_DEVICE_ID_NETELLIGENT_10_T2,
+ "Compaq Netelligent 10 T/2 PCI UTP/Coax",
+ TLAN_ADAPTER_NONE,
+ 0x83
+ },
+ { 0,
+ 0,
+ NULL,
+ 0,
+ 0
+ } /* End of List */
+};
+
+
+static int TLan_PciProbe( u8 *, u8 *, u8 *, u8 *, u32 *, u32 * );
+static int TLan_Init( struct device * );
+static int TLan_Open(struct device *dev);
+static int TLan_StartTx(struct sk_buff *, struct device *);
+static void TLan_HandleInterrupt(int, void *, struct pt_regs *);
+static int TLan_Close(struct device *);
+static struct net_device_stats *TLan_GetStats( struct device * );
+static void TLan_SetMulticastList( struct device * );
+
+static u32 TLan_HandleInvalid( struct device *, u16 );
+static u32 TLan_HandleTxEOF( struct device *, u16 );
+static u32 TLan_HandleStatOverflow( struct device *, u16 );
+static u32 TLan_HandleRxEOF( struct device *, u16 );
+static u32 TLan_HandleDummy( struct device *, u16 );
+static u32 TLan_HandleTxEOC( struct device *, u16 );
+static u32 TLan_HandleStatusCheck( struct device *, u16 );
+static u32 TLan_HandleRxEOC( struct device *, u16 );
+
+static void TLan_Timer( unsigned long );
+
+static void TLan_ResetLists( struct device * );
+static void TLan_FreeLists( struct device * );
+static void TLan_PrintDio( u16 );
+static void TLan_PrintList( TLanList *, char *, int );
+static void TLan_ReadAndClearStats( struct device *, int );
+static void TLan_ResetAdapter( struct device * );
+static void TLan_FinishReset( struct device * );
+static void TLan_SetMac( struct device *, int areg, char *mac );
+
+static void TLan_PhyPrint( struct device * );
+static void TLan_PhyDetect( struct device * );
+static void TLan_PhyPowerDown( struct device * );
+static void TLan_PhyPowerUp( struct device * );
+static void TLan_PhyReset( struct device * );
+static void TLan_PhyStartLink( struct device * );
+static void TLan_PhyFinishAutoNeg( struct device * );
+/*
+static int TLan_PhyNop( struct device * );
+static int TLan_PhyInternalCheck( struct device * );
+static int TLan_PhyInternalService( struct device * );
+static int TLan_PhyDp83840aCheck( struct device * );
+*/
+
+static int TLan_MiiReadReg( struct device *, u16, u16, u16 * );
+static void TLan_MiiSendData( u16, u32, unsigned );
+static void TLan_MiiSync( u16 );
+static void TLan_MiiWriteReg( struct device *, u16, u16, u16 );
+
+static void TLan_EeSendStart( u16 );
+static int TLan_EeSendByte( u16, u8, int );
+static void TLan_EeReceiveByte( u16, u8 *, int );
+static int TLan_EeReadByte( struct device *, u8, u8 * );
+
+
+static TLanIntVectorFunc *TLanIntVector[TLAN_INT_NUMBER_OF_INTS] = {
+ TLan_HandleInvalid,
+ TLan_HandleTxEOF,
+ TLan_HandleStatOverflow,
+ TLan_HandleRxEOF,
+ TLan_HandleDummy,
+ TLan_HandleTxEOC,
+ TLan_HandleStatusCheck,
+ TLan_HandleRxEOC
+};
+
+static inline void
+TLan_SetTimer( struct device *dev, u32 ticks, u32 type )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+
+ cli();
+ if ( priv->timer.function != NULL ) {
+ return;
+ }
+ priv->timer.function = &TLan_Timer;
+ sti();
+
+ priv->timer.data = (unsigned long) dev;
+ priv->timer.expires = jiffies + ticks;
+ priv->timerSetAt = jiffies;
+ priv->timerType = type;
+ add_timer( &priv->timer );
+
+} /* TLan_SetTimer */
+
+
+/*****************************************************************************
+******************************************************************************
+
+ ThunderLAN Driver Primary Functions
+
+ These functions are more or less common to all Linux network drivers.
+
+******************************************************************************
+*****************************************************************************/
+
+
+#ifdef MODULE
+
+ /***************************************************************
+ * init_module
+ *
+ * Returns:
+ * 0 if module installed ok, non-zero if not.
+ * Parms:
+ * None
+ *
+ * This function begins the setup of the driver creating a
+ * pad buffer, finding all TLAN devices (matching
+ * TLanAdapterList entries), and creating and initializing a
+ * device structure for each adapter.
+ *
+ **************************************************************/
+
+extern int init_module(void)
+{
+ TLanPrivateInfo *priv;
+ u8 bus;
+ struct device *dev;
+ size_t dev_size;
+ u8 dfn;
+ u32 index;
+ int failed;
+ int found;
+ u32 io_base;
+ u8 irq;
+ u8 rev;
+
+ printk( "TLAN driver, v%d.%d, (C) 1997-8 Caldera, Inc.\n",
+ TLanVersionMajor,
+ TLanVersionMinor
+ );
+ TLanPadBuffer = (u8 *) kmalloc( TLAN_MIN_FRAME_SIZE,
+ ( GFP_KERNEL | GFP_DMA )
+ );
+ if ( TLanPadBuffer == NULL ) {
+ printk( "TLAN: Could not allocate memory for pad buffer.\n" );
+ return -ENOMEM;
+ }
+
+ memset( TLanPadBuffer, 0, TLAN_MIN_FRAME_SIZE );
+
+ dev_size = sizeof(struct device) + sizeof(TLanPrivateInfo);
+
+ while ( ( found = TLan_PciProbe( &bus, &dfn, &irq, &rev, &io_base, &index ) ) ) {
+ dev = (struct device *) kmalloc( dev_size, GFP_KERNEL );
+ if ( dev == NULL ) {
+ printk( "TLAN: Could not allocate memory for device.\n" );
+ continue;
+ }
+ memset( dev, 0, dev_size );
+
+ dev->priv = priv = ( (void *) dev ) + sizeof(struct device);
+ dev->name = priv->devName;
+ strcpy( priv->devName, " " );
+ dev->base_addr = io_base;
+ dev->irq = irq;
+ dev->init = TLan_Init;
+
+ priv->adapter = &TLanAdapterList[index];
+ priv->adapterRev = rev;
+ priv->aui = aui;
+ if ( ( duplex != 1 ) && ( duplex != 2 ) ) {
+ duplex = 0;
+ }
+ priv->duplex = duplex;
+ if ( ( speed != 10 ) && ( speed != 100 ) ) {
+ speed = 0;
+ }
+ priv->speed = speed;
+ priv->sa_int = sa_int;
+ priv->debug = debug;
+
+ ether_setup( dev );
+
+ failed = register_netdev( dev );
+
+ if ( failed ) {
+ printk( "TLAN: Could not register device.\n" );
+ kfree( dev );
+ } else {
+ priv->nextDevice = TLanDevices;
+ TLanDevices = dev;
+ TLanDevicesInstalled++;
+ printk("TLAN: %s irq=%2d io=%04x, %s, Rev. %d\n",
+ dev->name,
+ (int) dev->irq,
+ (int) dev->base_addr,
+ priv->adapter->deviceLabel,
+ priv->adapterRev );
+ }
+ }
+
+ /* printk( "TLAN: Found %d device(s).\n", TLanDevicesInstalled ); */
+
+ return ( ( TLanDevicesInstalled >= 0 ) ? 0 : -ENODEV );
+
+} /* init_module */
+
+
+
+
+ /***************************************************************
+ * cleanup_module
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * None
+ *
+ * Goes through the TLanDevices list and frees the device
+ * structs and memory associated with each device (lists
+ * and buffers). It also ureserves the IO port regions
+ * associated with this device.
+ *
+ **************************************************************/
+
+extern void cleanup_module(void)
+{
+ struct device *dev;
+ TLanPrivateInfo *priv;
+
+ while ( TLanDevicesInstalled ) {
+ dev = TLanDevices;
+ priv = (TLanPrivateInfo *) dev->priv;
+ if ( priv->dmaStorage ) {
+ kfree( priv->dmaStorage );
+ }
+ release_region( dev->base_addr, 0x10 );
+ unregister_netdev( dev );
+ TLanDevices = priv->nextDevice;
+ kfree( dev );
+ TLanDevicesInstalled--;
+ }
+ kfree( TLanPadBuffer );
+
+} /* cleanup_module */
+
+
+#else /* MODULE */
+
+
+
+
+ /***************************************************************
+ * tlan_probe
+ *
+ * Returns:
+ * 0 on success, error code on error
+ * Parms:
+ * dev device struct to use if adapter is
+ * found.
+ *
+ * The name is lower case to fit in with all the rest of
+ * the netcard_probe names. This function looks for a/
+ * another TLan based adapter, setting it up with the
+ * provided device struct if one is found.
+ *
+ **************************************************************/
+
+extern int tlan_probe( struct device *dev )
+{
+ TLanPrivateInfo *priv;
+ static int pad_allocated = 0;
+ int found;
+ u8 bus, dfn, irq, rev;
+ u32 io_base, index;
+
+ found = TLan_PciProbe( &bus, &dfn, &irq, &rev, &io_base, &index );
+
+ if ( ! found ) {
+ return -ENODEV;
+ }
+
+ dev->priv = kmalloc( sizeof(TLanPrivateInfo), GFP_KERNEL );
+
+ if ( dev->priv == NULL ) {
+ printk( "TLAN: Could not allocate memory for device.\n" );
+ return -ENOMEM;
+ }
+
+ memset( dev->priv, 0, sizeof(TLanPrivateInfo) );
+
+ if ( ! pad_allocated ) {
+ TLanPadBuffer = (u8 *) kmalloc( TLAN_MIN_FRAME_SIZE,
+// ( GFP_KERNEL | GFP_DMA )
+ ( GFP_KERNEL )
+ );
+ if ( TLanPadBuffer == NULL ) {
+ printk( "TLAN: Could not allocate memory for padding.\n" );
+ kfree( dev->priv );
+ return -ENOMEM;
+ } else {
+ pad_allocated = 1;
+ memset( TLanPadBuffer, 0, TLAN_MIN_FRAME_SIZE );
+ }
+ }
+
+ priv = (TLanPrivateInfo *) dev->priv;
+
+ dev->name = priv->devName;
+ strcpy( priv->devName, " " );
+
+ dev = init_etherdev( dev, sizeof(TLanPrivateInfo) );
+
+ dev->base_addr = io_base;
+ dev->irq = irq;
+
+
+ priv->adapter = &TLanAdapterList[index];
+ priv->adapterRev = rev;
+ priv->aui = dev->mem_start & 0x01;
+ priv->duplex = ( ( dev->mem_start & 0x0C ) == 0x0C ) ? 0 : ( dev->mem_start & 0x0C ) >> 2;
+ priv->speed = ( ( dev->mem_start & 0x30 ) == 0x30 ) ? 0 : ( dev->mem_start & 0x30 ) >> 4;
+ if ( priv->speed == 0x1 ) {
+ priv->speed = TLAN_SPEED_10;
+ } else if ( priv->speed == 0x2 ) {
+ priv->speed = TLAN_SPEED_100;
+ }
+ priv->sa_int = dev->mem_start & 0x02;
+ priv->debug = dev->mem_end;
+
+
+ printk("TLAN %d.%d: %s irq=%2d io=%04x, %s, Rev. %d\n",
+ TLanVersionMajor,
+ TLanVersionMinor,
+ dev->name,
+ (int) irq,
+ io_base,
+ priv->adapter->deviceLabel,
+ priv->adapterRev );
+
+ TLan_Init( dev );
+
+ return 0;
+
+} /* tlan_probe */
+
+
+#endif /* MODULE */
+
+
+
+
+ /***************************************************************
+ * TLan_PciProbe
+ *
+ * Returns:
+ * 1 if another TLAN card was found, 0 if not.
+ * Parms:
+ * pci_bus The PCI bus the card was found
+ * on.
+ * pci_dfn The PCI whatever the card was
+ * found at.
+ * pci_irq The IRQ of the found adapter.
+ * pci_rev The revision of the adapter.
+ * pci_io_base The first IO port used by the
+ * adapter.
+ * dl_ix The index in the device list
+ * of the adapter.
+ *
+ * This function searches for an adapter with PCI vendor
+ * and device IDs matching those in the TLanAdapterList.
+ * The function 'remembers' the last device it found,
+ * and so finds a new device (if anymore are to be found)
+ * each time the function is called. It then looks up
+ * pertinent PCI info and returns it to the caller.
+ *
+ **************************************************************/
+
+int TLan_PciProbe( u8 *pci_bus, u8 *pci_dfn, u8 *pci_irq, u8 *pci_rev, u32 *pci_io_base, u32 *dl_ix )
+{
+ static int dl_index = 0;
+ static int pci_index = 0;
+
+ int not_found;
+ u8 pci_latency;
+ u16 pci_command;
+ int reg;
+
+
+ if ( ! pcibios_present() ) {
+ printk( "TLAN: PCI Bios not present.\n" );
+ return 0;
+ }
+
+ for (; TLanAdapterList[dl_index].vendorId != 0; dl_index++) {
+
+ not_found = pcibios_find_device(
+ TLanAdapterList[dl_index].vendorId,
+ TLanAdapterList[dl_index].deviceId,
+ pci_index,
+ pci_bus,
+ pci_dfn
+ );
+
+ if ( ! not_found ) {
+
+ TLAN_DBG(
+ TLAN_DEBUG_GNRL,
+ "TLAN: found: Vendor Id = 0x%hx, Device Id = 0x%hx\n",
+ TLanAdapterList[dl_index].vendorId,
+ TLanAdapterList[dl_index].deviceId
+ );
+
+ pcibios_read_config_byte ( *pci_bus, *pci_dfn, PCI_REVISION_ID, pci_rev);
+ pcibios_read_config_byte ( *pci_bus, *pci_dfn, PCI_INTERRUPT_LINE, pci_irq);
+ pcibios_read_config_word ( *pci_bus, *pci_dfn, PCI_COMMAND, &pci_command);
+ pcibios_read_config_dword( *pci_bus, *pci_dfn, PCI_BASE_ADDRESS_0, pci_io_base);
+ pcibios_read_config_byte ( *pci_bus, *pci_dfn, PCI_LATENCY_TIMER, &pci_latency);
+
+ if (pci_latency < 0x10) {
+ pcibios_write_config_byte( *pci_bus, *pci_dfn, PCI_LATENCY_TIMER, 0xff);
+ TLAN_DBG( TLAN_DEBUG_GNRL, "TLAN: Setting latency timer to max.\n");
+ }
+
+ for ( reg = PCI_BASE_ADDRESS_0; reg <= PCI_BASE_ADDRESS_5; reg +=4 ) {
+ pcibios_read_config_dword( *pci_bus, *pci_dfn, reg, pci_io_base);
+ if ((pci_command & PCI_COMMAND_IO) && (*pci_io_base & 0x3)) {
+ *pci_io_base &= PCI_BASE_ADDRESS_IO_MASK;
+ TLAN_DBG( TLAN_DEBUG_GNRL, "TLAN: IO mapping is available at %x.\n", *pci_io_base);
+ break;
+ } else {
+ *pci_io_base = 0;
+ }
+ }
+
+ if ( *pci_io_base == 0 )
+ printk("TLAN: IO mapping not available, ignoring device.\n");
+
+ if ( ! ( pci_command & PCI_COMMAND_MASTER ) ) {
+ pcibios_write_config_word ( *pci_bus, *pci_dfn, PCI_COMMAND, pci_command | PCI_COMMAND_MASTER );
+ printk( "TLAN: Activating PCI bus mastering for this device.\n" );
+ }
+
+ pci_index++;
+
+ if ( *pci_io_base ) {
+ *dl_ix = dl_index;
+ return 1;
+ }
+
+ } else {
+ pci_index = 0;
+ }
+ }
+
+ return 0;
+
+} /* TLan_PciProbe */
+
+
+
+
+ /***************************************************************
+ * TLan_Init
+ *
+ * Returns:
+ * 0 on success, error code otherwise.
+ * Parms:
+ * dev The structure of the device to be
+ * init'ed.
+ *
+ * This function completes the initialization of the
+ * device structure and driver. It reserves the IO
+ * addresses, allocates memory for the lists and bounce
+ * buffers, retrieves the MAC address from the eeprom
+ * and assignes the device's methods.
+ *
+ **************************************************************/
+
+int TLan_Init( struct device *dev )
+{
+ int dma_size;
+ int err;
+ int i;
+ TLanPrivateInfo *priv;
+
+ priv = (TLanPrivateInfo *) dev->priv;
+
+ err = check_region( dev->base_addr, 0x10 );
+ if ( err ) {
+ printk( "TLAN: %s: Io port region 0x%lx size 0x%x in use.\n",
+ dev->name,
+ dev->base_addr,
+ 0x10 );
+ return -EIO;
+ }
+ request_region( dev->base_addr, 0x10, TLanSignature );
+
+ if ( bbuf ) {
+ dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS )
+ * ( sizeof(TLanList) + TLAN_MAX_FRAME_SIZE );
+ } else {
+ dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS )
+ * ( sizeof(TLanList) );
+ }
+
+ priv->dmaStorage = kmalloc( dma_size, GFP_KERNEL | GFP_DMA );
+ if ( priv->dmaStorage == NULL ) {
+ printk( "TLAN: Could not allocate lists and buffers for %s.\n",
+ dev->name );
+ return -ENOMEM;
+ }
+ memset( priv->dmaStorage, 0, dma_size );
+ priv->rxList = (TLanList *)
+ ( ( ( (u32) priv->dmaStorage ) + 7 ) & 0xFFFFFFF8 );
+ priv->txList = priv->rxList + TLAN_NUM_RX_LISTS;
+
+ if ( bbuf ) {
+ priv->rxBuffer = (u8 *) ( priv->txList + TLAN_NUM_TX_LISTS );
+ priv->txBuffer = priv->rxBuffer
+ + ( TLAN_NUM_RX_LISTS * TLAN_MAX_FRAME_SIZE );
+ }
+
+ err = 0;
+ for ( i = 0; i < 6 ; i++ )
+ err |= TLan_EeReadByte( dev,
+ (u8) priv->adapter->addrOfs + i,
+ (u8 *) &dev->dev_addr[i] );
+ if ( err ) {
+ printk( "TLAN: %s: Error reading MAC from eeprom: %d\n",
+ dev->name,
+ err );
+ }
+
+ dev->addr_len = 6;
+
+ dev->open = &TLan_Open;
+ dev->hard_start_xmit = &TLan_StartTx;
+ dev->stop = &TLan_Close;
+ dev->get_stats = &TLan_GetStats;
+ dev->set_multicast_list = &TLan_SetMulticastList;
+
+
+ return 0;
+
+} /* TLan_Init */
+
+
+
+
+ /***************************************************************
+ * TLan_Open
+ *
+ * Returns:
+ * 0 on success, error code otherwise.
+ * Parms:
+ * dev Structure of device to be opened.
+ *
+ * This routine puts the driver and TLAN adapter in a
+ * state where it is ready to send and receive packets.
+ * It allocates the IRQ, resets and brings the adapter
+ * out of reset, and allows interrupts. It also delays
+ * the startup for autonegotiation or sends a Rx GO
+ * command to the adapter, as appropriate.
+ *
+ **************************************************************/
+
+int TLan_Open( struct device *dev )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ int err;
+
+ priv->tlanRev = TLan_DioRead8( dev->base_addr, TLAN_DEF_REVISION );
+ if ( priv->sa_int ) {
+ TLAN_DBG( TLAN_DEBUG_GNRL, "TLAN: Using SA_INTERRUPT\n" );
+ err = request_irq( dev->irq, TLan_HandleInterrupt, SA_SHIRQ | SA_INTERRUPT, TLanSignature, dev );
+ } else {
+ err = request_irq( dev->irq, TLan_HandleInterrupt, SA_SHIRQ, TLanSignature, dev );
+ }
+ if ( err ) {
+ printk( "TLAN: Cannot open %s because IRQ %d is already in use.\n", dev->name, dev->irq );
+ return -EAGAIN;
+ }
+
+ MOD_INC_USE_COUNT;
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ dev->start = 1;
+
+ /* NOTE: It might not be necessary to read the stats before a
+ reset if you don't care what the values are.
+ */
+ TLan_ResetLists( dev );
+ TLan_ReadAndClearStats( dev, TLAN_IGNORE );
+ TLan_ResetAdapter( dev );
+
+ TLAN_DBG( TLAN_DEBUG_GNRL, "TLAN: %s: Opened. TLAN Chip Rev: %x\n", dev->name, priv->tlanRev );
+
+ return 0;
+
+} /* TLan_Open */
+
+
+
+
+ /***************************************************************
+ * TLan_StartTx
+ *
+ * Returns:
+ * 0 on success, non-zero on failure.
+ * Parms:
+ * skb A pointer to the sk_buff containing the
+ * frame to be sent.
+ * dev The device to send the data on.
+ *
+ * This function adds a frame to the Tx list to be sent
+ * ASAP. First it verifies that the adapter is ready and
+ * there is room in the queue. Then it sets up the next
+ * available list, copies the frame to the corresponding
+ * buffer. If the adapter Tx channel is idle, it gives
+ * the adapter a Tx Go command on the list, otherwise it
+ * sets the forward address of the previous list to point
+ * to this one. Then it frees the sk_buff.
+ *
+ **************************************************************/
+
+int TLan_StartTx( struct sk_buff *skb, struct device *dev )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ TLanList *tail_list;
+ u8 *tail_buffer;
+ int pad;
+
+ if ( ! priv->phyOnline ) {
+ TLAN_DBG( TLAN_DEBUG_TX, "TLAN TRANSMIT: %s PHY is not ready\n", dev->name );
+ dev_kfree_skb( skb, FREE_WRITE );
+ return 0;
+ }
+
+ tail_list = priv->txList + priv->txTail;
+
+ if ( tail_list->cStat != TLAN_CSTAT_UNUSED ) {
+ TLAN_DBG( TLAN_DEBUG_TX, "TLAN TRANSMIT: %s is busy (Head=%d Tail=%d)\n", dev->name, priv->txHead, priv->txTail );
+ dev->tbusy = 1;
+ priv->txBusyCount++;
+ return 1;
+ }
+
+ tail_list->forward = 0;
+
+ if ( bbuf ) {
+ tail_buffer = priv->txBuffer + ( priv->txTail * TLAN_MAX_FRAME_SIZE );
+ memcpy( tail_buffer, skb->data, skb->len );
+ } else {
+ tail_list->buffer[0].address = virt_to_bus( skb->data );
+ tail_list->buffer[9].address = (u32) skb;
+ }
+
+ pad = TLAN_MIN_FRAME_SIZE - skb->len;
+
+ if ( pad > 0 ) {
+ tail_list->frameSize = (u16) skb->len + pad;
+ tail_list->buffer[0].count = (u32) skb->len;
+ tail_list->buffer[1].count = TLAN_LAST_BUFFER | (u32) pad;
+ tail_list->buffer[1].address = virt_to_bus( TLanPadBuffer );
+ } else {
+ tail_list->frameSize = (u16) skb->len;
+ tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) skb->len;
+ tail_list->buffer[1].count = 0;
+ tail_list->buffer[1].address = 0;
+ }
+
+ cli();
+ tail_list->cStat = TLAN_CSTAT_READY;
+ if ( ! priv->txInProgress ) {
+ priv->txInProgress = 1;
+ outw( 0x4, dev->base_addr + TLAN_HOST_INT );
+ TLAN_DBG( TLAN_DEBUG_TX, "TLAN TRANSMIT: Starting TX on buffer %d\n", priv->txTail );
+ outl( virt_to_bus( tail_list ), dev->base_addr + TLAN_CH_PARM );
+ outl( TLAN_HC_GO | TLAN_HC_ACK, dev->base_addr + TLAN_HOST_CMD );
+ } else {
+ TLAN_DBG( TLAN_DEBUG_TX, "TLAN TRANSMIT: Adding buffer %d to TX channel\n", priv->txTail );
+ if ( priv->txTail == 0 ) {
+ ( priv->txList + ( TLAN_NUM_TX_LISTS - 1 ) )->forward = virt_to_bus( tail_list );
+ } else {
+ ( priv->txList + ( priv->txTail - 1 ) )->forward = virt_to_bus( tail_list );
+ }
+ }
+ sti();
+
+ CIRC_INC( priv->txTail, TLAN_NUM_TX_LISTS );
+
+ if ( bbuf ) {
+ dev_kfree_skb( skb, FREE_WRITE );
+ }
+
+ dev->trans_start = jiffies;
+ return 0;
+
+} /* TLan_StartTx */
+
+
+
+
+ /***************************************************************
+ * TLan_HandleInterrupt
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * irq The line on which the interrupt
+ * occurred.
+ * dev_id A pointer to the device assigned to
+ * this irq line.
+ * regs ???
+ *
+ * This function handles an interrupt generated by its
+ * assigned TLAN adapter. The function deactivates
+ * interrupts on its adapter, records the type of
+ * interrupt, executes the appropriate subhandler, and
+ * acknowdges the interrupt to the adapter (thus
+ * re-enabling adapter interrupts.
+ *
+ **************************************************************/
+
+void TLan_HandleInterrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ u32 ack;
+ struct device *dev;
+ u32 host_cmd;
+ u16 host_int;
+ int type;
+
+ dev = (struct device *) dev_id;
+
+ cli();
+ if ( dev->interrupt ) {
+ printk( "TLAN: Re-entering interrupt handler for %s: %d.\n" , dev->name, dev->interrupt );
+ }
+ dev->interrupt++;
+
+ host_int = inw( dev->base_addr + TLAN_HOST_INT );
+ outw( host_int, dev->base_addr + TLAN_HOST_INT );
+
+ type = ( host_int & TLAN_HI_IT_MASK ) >> 2;
+
+ ack = TLanIntVector[type]( dev, host_int );
+
+ if ( ack ) {
+ host_cmd = TLAN_HC_ACK | ack | ( type << 18 );
+ outl( host_cmd, dev->base_addr + TLAN_HOST_CMD );
+ }
+
+ dev->interrupt--;
+ sti();
+
+} /* TLan_HandleInterrupts */
+
+
+
+
+ /***************************************************************
+ * TLan_Close
+ *
+ * Returns:
+ * An error code.
+ * Parms:
+ * dev The device structure of the device to
+ * close.
+ *
+ * This function shuts down the adapter. It records any
+ * stats, puts the adapter into reset state, deactivates
+ * its time as needed, and frees the irq it is using.
+ *
+ **************************************************************/
+
+int TLan_Close(struct device *dev)
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+
+ dev->start = 0;
+ dev->tbusy = 1;
+
+ TLan_ReadAndClearStats( dev, TLAN_RECORD );
+ outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD );
+ if ( priv->timer.function != NULL )
+ del_timer( &priv->timer );
+ free_irq( dev->irq, dev );
+ TLan_FreeLists( dev );
+ TLAN_DBG( TLAN_DEBUG_GNRL, "TLAN: Device %s closed.\n", dev->name );
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+
+} /* TLan_Close */
+
+
+
+
+ /***************************************************************
+ * TLan_GetStats
+ *
+ * Returns:
+ * A pointer to the device's statistics structure.
+ * Parms:
+ * dev The device structure to return the
+ * stats for.
+ *
+ * This function updates the devices statistics by reading
+ * the TLAN chip's onboard registers. Then it returns the
+ * address of the statistics structure.
+ *
+ **************************************************************/
+
+struct net_device_stats *TLan_GetStats( struct device *dev )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ int i;
+
+ /* Should only read stats if open ? */
+ TLan_ReadAndClearStats( dev, TLAN_RECORD );
+
+ TLAN_DBG( TLAN_DEBUG_RX, "TLAN RECEIVE: %s EOC count = %d\n", dev->name, priv->rxEocCount );
+ TLAN_DBG( TLAN_DEBUG_TX, "TLAN TRANSMIT: %s Busy count = %d\n", dev->name, priv->txBusyCount );
+ if ( debug & TLAN_DEBUG_GNRL ) {
+ TLan_PrintDio( dev->base_addr );
+ TLan_PhyPrint( dev );
+ }
+ if ( debug & TLAN_DEBUG_LIST ) {
+ for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ )
+ TLan_PrintList( priv->rxList + i, "RX", i );
+ for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ )
+ TLan_PrintList( priv->txList + i, "TX", i );
+ }
+
+ return ( &( (TLanPrivateInfo *) dev->priv )->stats );
+
+} /* TLan_GetStats */
+
+
+
+
+ /***************************************************************
+ * TLan_SetMulticastList
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev The device structure to set the
+ * multicast list for.
+ *
+ * This function sets the TLAN adaptor to various receive
+ * modes. If the IFF_PROMISC flag is set, promiscuous
+ * mode is acitviated. Otherwise, promiscuous mode is
+ * turned off. If the IFF_ALLMULTI flag is set, then
+ * the hash table is set to receive all group addresses.
+ * Otherwise, the first three multicast addresses are
+ * stored in AREG_1-3, and the rest are selected via the
+ * hash table, as necessary.
+ *
+ **************************************************************/
+
+void TLan_SetMulticastList( struct device *dev )
+{
+ struct dev_mc_list *dmi = dev->mc_list;
+ u32 hash1 = 0;
+ u32 hash2 = 0;
+ int i;
+ u32 offset;
+ u8 tmp;
+
+ if ( dev->flags & IFF_PROMISC ) {
+ tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD );
+ TLan_DioWrite8( dev->base_addr, TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF );
+ } else {
+ tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD );
+ TLan_DioWrite8( dev->base_addr, TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF );
+ if ( dev->flags & IFF_ALLMULTI ) {
+ for ( i = 0; i < 3; i++ )
+ TLan_SetMac( dev, i + 1, NULL );
+ TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, 0xFFFFFFFF );
+ TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF );
+ } else {
+ for ( i = 0; i < dev->mc_count; i++ ) {
+ if ( i < 3 ) {
+ TLan_SetMac( dev, i + 1, (char *) &dmi->dmi_addr );
+ } else {
+ offset = TLan_HashFunc( (u8 *) &dmi->dmi_addr );
+ if ( offset < 32 )
+ hash1 |= ( 1 << offset );
+ else
+ hash2 |= ( 1 << ( offset - 32 ) );
+ }
+ dmi = dmi->next;
+ }
+ for ( ; i < 3; i++ )
+ TLan_SetMac( dev, i + 1, NULL );
+ TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, hash1 );
+ TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, hash2 );
+ }
+ }
+
+} /* TLan_SetMulticastList */
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ ThunderLAN Driver Interrupt Vectors and Table
+
+ Please see Chap. 4, "Interrupt Handling" of the "ThunderLAN
+ Programmer's Guide" for more informations on handling interrupts
+ generated by TLAN based adapters.
+
+******************************************************************************
+*****************************************************************************/
+
+
+ /***************************************************************
+ * TLan_HandleInvalid
+ *
+ * Returns:
+ * 0
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles invalid interrupts. This should
+ * never happen unless some other adapter is trying to use
+ * the IRQ line assigned to the device.
+ *
+ **************************************************************/
+
+u32 TLan_HandleInvalid( struct device *dev, u16 host_int )
+{
+ host_int = 0;
+ /* printk( "TLAN: Invalid interrupt on %s.\n", dev->name ); */
+ return 0;
+
+} /* TLan_HandleInvalid */
+
+
+
+
+ /***************************************************************
+ * TLan_HandleTxEOF
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles Tx EOF interrupts which are raised
+ * by the adapter when it has completed sending the
+ * contents of a buffer. If detemines which list/buffer
+ * was completed and resets it. If the buffer was the last
+ * in the channel (EOC), then the function checks to see if
+ * another buffer is ready to send, and if so, sends a Tx
+ * Go command. Finally, the driver activates/continues the
+ * activity LED.
+ *
+ **************************************************************/
+
+u32 TLan_HandleTxEOF( struct device *dev, u16 host_int )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ int eoc = 0;
+ TLanList *head_list;
+ u32 ack = 1;
+
+ TLAN_DBG( TLAN_DEBUG_TX, "TLAN TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n", priv->txHead, priv->txTail );
+ host_int = 0;
+ head_list = priv->txList + priv->txHead;
+
+ if ( ! bbuf ) {
+ dev_kfree_skb( (struct sk_buff *) head_list->buffer[9].address, FREE_WRITE );
+ head_list->buffer[9].address = 0;
+ }
+
+ if ( head_list->cStat & TLAN_CSTAT_EOC )
+ eoc = 1;
+ if (!(head_list->cStat & TLAN_CSTAT_FRM_CMP)) {
+ printk( "TLAN: Received interrupt for uncompleted TX frame.\n" );
+ }
+
+#if LINUX_KERNEL_VERSION > 0x20100
+ priv->stats->tx_bytes += head_list->frameSize;
+#endif
+
+ head_list->cStat = TLAN_CSTAT_UNUSED;
+ dev->tbusy = 0;
+ CIRC_INC( priv->txHead, TLAN_NUM_TX_LISTS );
+ if ( eoc ) {
+ TLAN_DBG( TLAN_DEBUG_TX, "TLAN TRANSMIT: Handling TX EOC (Head=%d Tail=%d)\n", priv->txHead, priv->txTail );
+ head_list = priv->txList + priv->txHead;
+ if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) {
+ outl( virt_to_bus( head_list ), dev->base_addr + TLAN_CH_PARM );
+ ack |= TLAN_HC_GO;
+ } else {
+ priv->txInProgress = 0;
+ }
+ }
+
+ if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) {
+ TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
+ if ( priv->timer.function == NULL ) {
+ TLan_SetTimer( dev, TLAN_TIMER_ACT_DELAY, TLAN_TIMER_ACTIVITY );
+ } else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) {
+ priv->timerSetAt = jiffies;
+ }
+ }
+
+ return ack;
+
+} /* TLan_HandleTxEOF */
+
+
+
+
+ /***************************************************************
+ * TLan_HandleStatOverflow
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles the Statistics Overflow interrupt
+ * which means that one or more of the TLAN statistics
+ * registers has reached 1/2 capacity and needs to be read.
+ *
+ **************************************************************/
+
+u32 TLan_HandleStatOverflow( struct device *dev, u16 host_int )
+{
+ host_int = 0;
+ TLan_ReadAndClearStats( dev, TLAN_RECORD );
+
+ return 1;
+
+} /* TLan_HandleStatOverflow */
+
+
+
+
+ /***************************************************************
+ * TLan_HandleRxEOF
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles the Rx EOF interrupt which
+ * indicates a frame has been received by the adapter from
+ * the net and the frame has been transferred to memory.
+ * The function determines the bounce buffer the frame has
+ * been loaded into, creates a new sk_buff big enough to
+ * hold the frame, and sends it to protocol stack. It
+ * then resets the used buffer and appends it to the end
+ * of the list. If the frame was the last in the Rx
+ * channel (EOC), the function restarts the receive channel
+ * by sending an Rx Go command to the adapter. Then it
+ * activates/continues the activity LED.
+ *
+ **************************************************************/
+
+u32 TLan_HandleRxEOF( struct device *dev, u16 host_int )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ u32 ack = 1;
+ int eoc = 0;
+ u8 *head_buffer;
+ TLanList *head_list;
+ struct sk_buff *skb;
+ TLanList *tail_list;
+ void *t;
+
+ TLAN_DBG( TLAN_DEBUG_RX, "TLAN RECEIVE: Handling RX EOF (Head=%d Tail=%d)\n", priv->rxHead, priv->rxTail );
+ host_int = 0;
+ head_list = priv->rxList + priv->rxHead;
+ tail_list = priv->rxList + priv->rxTail;
+
+ if ( head_list->cStat & TLAN_CSTAT_EOC ) {
+ eoc = 1;
+ }
+
+ if (!(head_list->cStat & TLAN_CSTAT_FRM_CMP)) {
+ printk( "TLAN: Received interrupt for uncompleted RX frame.\n" );
+ } else if ( bbuf ) {
+ skb = dev_alloc_skb( head_list->frameSize + 7 );
+ if ( skb == NULL ) {
+ printk( "TLAN: Couldn't allocate memory for received data.\n" );
+ } else {
+ head_buffer = priv->rxBuffer + ( priv->rxHead * TLAN_MAX_FRAME_SIZE );
+ skb->dev = dev;
+ skb_reserve( skb, 2 );
+ t = (void *) skb_put( skb, head_list->frameSize );
+
+#if LINUX_KERNEL_VERSION > 0x20100
+ priv->stats->rx_bytes += head_list->frameSize;
+#endif
+
+ memcpy( t, head_buffer, head_list->frameSize );
+ skb->protocol = eth_type_trans( skb, dev );
+ netif_rx( skb );
+ }
+ } else {
+ skb = (struct sk_buff *) head_list->buffer[9].address;
+ head_list->buffer[9].address = 0;
+ skb_trim( skb, head_list->frameSize );
+
+#if LINUX_KERNEL_VERSION > 0x20100
+ priv->stats->rx_bytes += head_list->frameSize;
+#endif
+
+ skb->protocol = eth_type_trans( skb, dev );
+ netif_rx( skb );
+
+ skb = dev_alloc_skb( TLAN_MAX_FRAME_SIZE + 7 );
+ if ( skb == NULL ) {
+ printk( "TLAN: Couldn't allocate memory for received data.\n" );
+ /* If this ever happened it would be a problem */
+ } else {
+ skb->dev = dev;
+ skb_reserve( skb, 2 );
+ t = (void *) skb_put( skb, TLAN_MAX_FRAME_SIZE );
+ head_list->buffer[0].address = virt_to_bus( t );
+ head_list->buffer[9].address = (u32) skb;
+ }
+ }
+
+ head_list->forward = 0;
+ head_list->frameSize = TLAN_MAX_FRAME_SIZE;
+ head_list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
+ tail_list->forward = virt_to_bus( head_list );
+
+ CIRC_INC( priv->rxHead, TLAN_NUM_RX_LISTS );
+ CIRC_INC( priv->rxTail, TLAN_NUM_RX_LISTS );
+
+ if ( eoc ) {
+ TLAN_DBG( TLAN_DEBUG_RX, "TLAN RECEIVE: Handling RX EOC (Head=%d Tail=%d)\n", priv->rxHead, priv->rxTail );
+ head_list = priv->rxList + priv->rxHead;
+ outl( virt_to_bus( head_list ), dev->base_addr + TLAN_CH_PARM );
+ ack |= TLAN_HC_GO | TLAN_HC_RT;
+ priv->rxEocCount++;
+ }
+
+ if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) {
+ TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
+ if ( priv->timer.function == NULL ) {
+ TLan_SetTimer( dev, TLAN_TIMER_ACT_DELAY, TLAN_TIMER_ACTIVITY );
+ } else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) {
+ priv->timerSetAt = jiffies;
+ }
+ }
+
+ dev->last_rx = jiffies;
+
+ return ack;
+
+} /* TLan_HandleRxEOF */
+
+
+
+
+ /***************************************************************
+ * TLan_HandleDummy
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles the Dummy interrupt, which is
+ * raised whenever a test interrupt is generated by setting
+ * the Req_Int bit of HOST_CMD to 1.
+ *
+ **************************************************************/
+
+u32 TLan_HandleDummy( struct device *dev, u16 host_int )
+{
+ host_int = 0;
+ printk( "TLAN: Test interrupt on %s.\n", dev->name );
+ return 1;
+
+} /* TLan_HandleDummy */
+
+
+
+
+ /***************************************************************
+ * TLan_HandleTxEOC
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This driver is structured to determine EOC occurances by
+ * reading the CSTAT member of the list structure. Tx EOC
+ * interrupts are disabled via the DIO INTDIS register.
+ * However, TLAN chips before revision 3.0 didn't have this
+ * functionality, so process EOC events if this is the
+ * case.
+ *
+ **************************************************************/
+
+u32 TLan_HandleTxEOC( struct device *dev, u16 host_int )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ TLanList *head_list;
+ u32 ack = 1;
+
+ host_int = 0;
+ if ( priv->tlanRev < 0x30 ) {
+ TLAN_DBG( TLAN_DEBUG_TX, "TLAN TRANSMIT: Handling TX EOC (Head=%d Tail=%d) -- IRQ\n", priv->txHead, priv->txTail );
+ head_list = priv->txList + priv->txHead;
+ if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) {
+ outl( virt_to_bus( head_list ), dev->base_addr + TLAN_CH_PARM );
+ ack |= TLAN_HC_GO;
+ } else {
+ priv->txInProgress = 0;
+ }
+ }
+
+ return ack;
+
+} /* TLan_HandleTxEOC */
+
+
+
+
+ /***************************************************************
+ * TLan_HandleStatusCheck
+ *
+ * Returns:
+ * 0 if Adapter check, 1 if Network Status check.
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles Adapter Check/Network Status
+ * interrupts generated by the adapter. It checks the
+ * vector in the HOST_INT register to determine if it is
+ * an Adapter Check interrupt. If so, it resets the
+ * adapter. Otherwise it clears the status registers
+ * and services the PHY.
+ *
+ **************************************************************/
+
+u32 TLan_HandleStatusCheck( struct device *dev, u16 host_int )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ u32 ack;
+ u32 error;
+ u8 net_sts;
+ u32 phy;
+ u16 tlphy_ctl;
+ u16 tlphy_sts;
+
+ ack = 1;
+ if ( host_int & TLAN_HI_IV_MASK ) {
+ error = inl( dev->base_addr + TLAN_CH_PARM );
+ printk( "TLAN: %s: Adaptor Error = 0x%x\n", dev->name, error );
+ TLan_ReadAndClearStats( dev, TLAN_RECORD );
+ outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD );
+ TLan_FreeLists( dev );
+ TLan_ResetLists( dev );
+ TLan_ResetAdapter( dev );
+ dev->tbusy = 0;
+ ack = 0;
+ } else {
+ TLAN_DBG( TLAN_DEBUG_GNRL, "TLAN: %s: Status Check\n", dev->name );
+ phy = priv->phy[priv->phyNum];
+
+ net_sts = TLan_DioRead8( dev->base_addr, TLAN_NET_STS );
+ if ( net_sts ) {
+ TLan_DioWrite8( dev->base_addr, TLAN_NET_STS, net_sts );
+ TLAN_DBG( TLAN_DEBUG_GNRL, "TLAN: %s: Net_Sts = %x\n", dev->name, (unsigned) net_sts );
+ }
+ if ( ( net_sts & TLAN_NET_STS_MIRQ ) && ( priv->phyNum == 0 ) ) {
+ TLan_MiiReadReg( dev, phy, TLAN_TLPHY_STS, &tlphy_sts );
+ TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl );
+ if ( ! ( tlphy_sts & TLAN_TS_POLOK ) && ! ( tlphy_ctl & TLAN_TC_SWAPOL ) ) {
+ tlphy_ctl |= TLAN_TC_SWAPOL;
+ TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
+ } else if ( ( tlphy_sts & TLAN_TS_POLOK ) && ( tlphy_ctl & TLAN_TC_SWAPOL ) ) {
+ tlphy_ctl &= ~TLAN_TC_SWAPOL;
+ TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
+ }
+
+ if (debug) {
+ TLan_PhyPrint( dev );
+ }
+ }
+ }
+
+ return ack;
+
+} /* TLan_HandleStatusCheck */
+
+
+
+
+ /***************************************************************
+ * TLan_HandleRxEOC
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This driver is structured to determine EOC occurances by
+ * reading the CSTAT member of the list structure. Rx EOC
+ * interrupts are disabled via the DIO INTDIS register.
+ * However, TLAN chips before revision 3.0 didn't have this
+ * CSTAT member or a INTDIS register, so if this chip is
+ * pre-3.0, process EOC interrupts normally.
+ *
+ **************************************************************/
+
+u32 TLan_HandleRxEOC( struct device *dev, u16 host_int )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ TLanList *head_list;
+ u32 ack = 1;
+
+ host_int = 0;
+ if ( priv->tlanRev < 0x30 ) {
+ TLAN_DBG( TLAN_DEBUG_RX, "TLAN RECEIVE: Handling RX EOC (Head=%d Tail=%d) -- IRQ\n", priv->rxHead, priv->rxTail );
+ head_list = priv->rxList + priv->rxHead;
+ outl( virt_to_bus( head_list ), dev->base_addr + TLAN_CH_PARM );
+ ack |= TLAN_HC_GO | TLAN_HC_RT;
+ priv->rxEocCount++;
+ }
+
+ return ack;
+
+} /* TLan_HandleRxEOC */
+
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ ThunderLAN Driver Timer Function
+
+******************************************************************************
+*****************************************************************************/
+
+
+ /***************************************************************
+ * TLan_Timer
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * data A value given to add timer when
+ * add_timer was called.
+ *
+ * This function handles timed functionality for the
+ * TLAN driver. The two current timer uses are for
+ * delaying for autonegotionation and driving the ACT LED.
+ * - Autonegotiation requires being allowed about
+ * 2 1/2 seconds before attempting to transmit a
+ * packet. It would be a very bad thing to hang
+ * the kernel this long, so the driver doesn't
+ * allow transmission 'til after this time, for
+ * certain PHYs. It would be much nicer if all
+ * PHYs were interrupt-capable like the internal
+ * PHY.
+ * - The ACT LED, which shows adapter activity, is
+ * driven by the driver, and so must be left on
+ * for a short period to power up the LED so it
+ * can be seen. This delay can be changed by
+ * changing the TLAN_TIMER_ACT_DELAY in tlan.h,
+ * if desired. 10 jiffies produces a slightly
+ * sluggish response.
+ *
+ **************************************************************/
+
+void TLan_Timer( unsigned long data )
+{
+ struct device *dev = (struct device *) data;
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ u32 elapsed;
+
+ priv->timer.function = NULL;
+
+ switch ( priv->timerType ) {
+ case TLAN_TIMER_PHY_PDOWN:
+ TLan_PhyPowerDown( dev );
+ break;
+ case TLAN_TIMER_PHY_PUP:
+ TLan_PhyPowerUp( dev );
+ break;
+ case TLAN_TIMER_PHY_RESET:
+ TLan_PhyReset( dev );
+ break;
+ case TLAN_TIMER_PHY_START_LINK:
+ TLan_PhyStartLink( dev );
+ break;
+ case TLAN_TIMER_PHY_FINISH_AN:
+ TLan_PhyFinishAutoNeg( dev );
+ break;
+ case TLAN_TIMER_FINISH_RESET:
+ TLan_FinishReset( dev );
+ break;
+ case TLAN_TIMER_ACTIVITY:
+ cli();
+ if ( priv->timer.function == NULL ) {
+ elapsed = jiffies - priv->timerSetAt;
+ if ( elapsed >= TLAN_TIMER_ACT_DELAY ) {
+ TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK );
+ } else {
+ priv->timer.function = &TLan_Timer;
+ priv->timer.expires = priv->timerSetAt + TLAN_TIMER_ACT_DELAY;
+ sti();
+ add_timer( &priv->timer );
+ }
+ }
+ sti();
+ break;
+ default:
+ break;
+ }
+
+} /* TLan_Timer */
+
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ ThunderLAN Driver Adapter Related Routines
+
+******************************************************************************
+*****************************************************************************/
+
+
+ /***************************************************************
+ * TLan_ResetLists
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev The device structure with the list
+ * stuctures to be reset.
+ *
+ * This routine sets the variables associated with managing
+ * the TLAN lists to their initial values.
+ *
+ **************************************************************/
+
+void TLan_ResetLists( struct device *dev )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ int i;
+ TLanList *list;
+ struct sk_buff *skb;
+ void *t = NULL;
+
+ priv->txHead = 0;
+ priv->txTail = 0;
+ for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) {
+ list = priv->txList + i;
+ list->cStat = TLAN_CSTAT_UNUSED;
+ if ( bbuf ) {
+ list->buffer[0].address = virt_to_bus( priv->txBuffer + ( i * TLAN_MAX_FRAME_SIZE ) );
+ } else {
+ list->buffer[0].address = 0;
+ }
+ list->buffer[2].count = 0;
+ list->buffer[2].address = 0;
+ }
+
+ priv->rxHead = 0;
+ priv->rxTail = TLAN_NUM_RX_LISTS - 1;
+ for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) {
+ list = priv->rxList + i;
+ list->cStat = TLAN_CSTAT_READY;
+ list->frameSize = TLAN_MAX_FRAME_SIZE;
+ list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
+ if ( bbuf ) {
+ list->buffer[0].address = virt_to_bus( priv->rxBuffer + ( i * TLAN_MAX_FRAME_SIZE ) );
+ } else {
+ skb = dev_alloc_skb( TLAN_MAX_FRAME_SIZE + 7 );
+ if ( skb == NULL ) {
+ printk( "TLAN: Couldn't allocate memory for received data.\n" );
+ /* If this ever happened it would be a problem */
+ } else {
+ skb->dev = dev;
+ skb_reserve( skb, 2 );
+ t = (void *) skb_put( skb, TLAN_MAX_FRAME_SIZE );
+ }
+ list->buffer[0].address = virt_to_bus( t );
+ list->buffer[9].address = (u32) skb;
+ }
+ list->buffer[1].count = 0;
+ list->buffer[1].address = 0;
+ if ( i < TLAN_NUM_RX_LISTS - 1 )
+ list->forward = virt_to_bus( list + 1 );
+ else
+ list->forward = 0;
+ }
+
+} /* TLan_ResetLists */
+
+
+void TLan_FreeLists( struct device *dev )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ int i;
+ TLanList *list;
+ struct sk_buff *skb;
+
+ if ( ! bbuf ) {
+ for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) {
+ list = priv->txList + i;
+ skb = (struct sk_buff *) list->buffer[9].address;
+ if ( skb ) {
+ dev_kfree_skb( skb, FREE_WRITE );
+ list->buffer[9].address = 0;
+ }
+ }
+
+ for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) {
+ list = priv->rxList + i;
+ skb = (struct sk_buff *) list->buffer[9].address;
+ if ( skb ) {
+ dev_kfree_skb( skb, FREE_READ );
+ list->buffer[9].address = 0;
+ }
+ }
+ }
+
+} /* TLan_FreeLists */
+
+
+
+
+ /***************************************************************
+ * TLan_PrintDio
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * io_base Base IO port of the device of
+ * which to print DIO registers.
+ *
+ * This function prints out all the internal (DIO)
+ * registers of a TLAN chip.
+ *
+ **************************************************************/
+
+void TLan_PrintDio( u16 io_base )
+{
+ u32 data0, data1;
+ int i;
+
+ printk( "TLAN: Contents of internal registers for io base 0x%04hx.\n", io_base );
+ printk( "TLAN: Off. +0 +4\n" );
+ for ( i = 0; i < 0x4C; i+= 8 ) {
+ data0 = TLan_DioRead32( io_base, i );
+ data1 = TLan_DioRead32( io_base, i + 0x4 );
+ printk( "TLAN: 0x%02x 0x%08x 0x%08x\n", i, data0, data1 );
+ }
+
+} /* TLan_PrintDio */
+
+
+
+
+ /***************************************************************
+ * TLan_PrintList
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * list A pointer to the TLanList structure to
+ * be printed.
+ * type A string to designate type of list,
+ * "Rx" or "Tx".
+ * num The index of the list.
+ *
+ * This function prints out the contents of the list
+ * pointed to by the list parameter.
+ *
+ **************************************************************/
+
+void TLan_PrintList( TLanList *list, char *type, int num)
+{
+ int i;
+
+ printk( "TLAN: %s List %d at 0x%08x\n", type, num, (u32) list );
+ printk( "TLAN: Forward = 0x%08x\n", list->forward );
+ printk( "TLAN: CSTAT = 0x%04hx\n", list->cStat );
+ printk( "TLAN: Frame Size = 0x%04hx\n", list->frameSize );
+ /* for ( i = 0; i < 10; i++ ) { */
+ for ( i = 0; i < 2; i++ ) {
+ printk( "TLAN: Buffer[%d].count, addr = 0x%08x, 0x%08x\n", i, list->buffer[i].count, list->buffer[i].address );
+ }
+
+} /* TLan_PrintList */
+
+
+
+
+ /***************************************************************
+ * TLan_ReadAndClearStats
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev Pointer to device structure of adapter
+ * to which to read stats.
+ * record Flag indicating whether to add
+ *
+ * This functions reads all the internal status registers
+ * of the TLAN chip, which clears them as a side effect.
+ * It then either adds the values to the device's status
+ * struct, or discards them, depending on whether record
+ * is TLAN_RECORD (!=0) or TLAN_IGNORE (==0).
+ *
+ **************************************************************/
+
+void TLan_ReadAndClearStats( struct device *dev, int record )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ u32 tx_good, tx_under;
+ u32 rx_good, rx_over;
+ u32 def_tx, crc, code;
+ u32 multi_col, single_col;
+ u32 excess_col, late_col, loss;
+
+ outw( TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR );
+ tx_good = inb( dev->base_addr + TLAN_DIO_DATA );
+ tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
+ tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16;
+ tx_under = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
+
+ outw( TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR );
+ rx_good = inb( dev->base_addr + TLAN_DIO_DATA );
+ rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
+ rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16;
+ rx_over = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
+
+ outw( TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR );
+ def_tx = inb( dev->base_addr + TLAN_DIO_DATA );
+ def_tx += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
+ crc = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
+ code = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
+
+ outw( TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR );
+ multi_col = inb( dev->base_addr + TLAN_DIO_DATA );
+ multi_col += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
+ single_col = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
+ single_col += inb( dev->base_addr + TLAN_DIO_DATA + 3 ) << 8;
+
+ outw( TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR );
+ excess_col = inb( dev->base_addr + TLAN_DIO_DATA );
+ late_col = inb( dev->base_addr + TLAN_DIO_DATA + 1 );
+ loss = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
+
+ if ( record ) {
+ priv->stats.rx_packets += rx_good;
+ priv->stats.rx_errors += rx_over + crc + code;
+ priv->stats.tx_packets += tx_good;
+ priv->stats.tx_errors += tx_under + loss;
+ priv->stats.collisions += multi_col + single_col + excess_col + late_col;
+
+ priv->stats.rx_over_errors += rx_over;
+ priv->stats.rx_crc_errors += crc;
+ priv->stats.rx_frame_errors += code;
+
+ priv->stats.tx_aborted_errors += tx_under;
+ priv->stats.tx_carrier_errors += loss;
+ }
+
+} /* TLan_ReadAndClearStats */
+
+
+
+
+ /***************************************************************
+ * TLan_Reset
+ *
+ * Returns:
+ * 0
+ * Parms:
+ * dev Pointer to device structure of adapter
+ * to be reset.
+ *
+ * This function resets the adapter and it's physical
+ * device. See Chap. 3, pp. 9-10 of the "ThunderLAN
+ * Programmer's Guide" for details. The routine tries to
+ * implement what is detailed there, though adjustments
+ * have been made.
+ *
+ **************************************************************/
+
+void
+TLan_ResetAdapter( struct device *dev )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ int i;
+ u32 addr;
+ u32 data;
+ u8 data8;
+
+ priv->tlanFullDuplex = FALSE;
+/* 1. Assert reset bit. */
+
+ data = inl(dev->base_addr + TLAN_HOST_CMD);
+ data |= TLAN_HC_AD_RST;
+ outl(data, dev->base_addr + TLAN_HOST_CMD);
+
+ udelay(1000);
+
+/* 2. Turn off interrupts. ( Probably isn't necessary ) */
+
+ data = inl(dev->base_addr + TLAN_HOST_CMD);
+ data |= TLAN_HC_INT_OFF;
+ outl(data, dev->base_addr + TLAN_HOST_CMD);
+
+/* 3. Clear AREGs and HASHs. */
+
+ for ( i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4 ) {
+ TLan_DioWrite32( dev->base_addr, (u16) i, 0 );
+ }
+
+/* 4. Setup NetConfig register. */
+
+ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
+ TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data );
+
+/* 5. Load Ld_Tmr and Ld_Thr in HOST_CMD. */
+
+ outl( TLAN_HC_LD_TMR | 0x0, dev->base_addr + TLAN_HOST_CMD );
+ outl( TLAN_HC_LD_THR | 0x1, dev->base_addr + TLAN_HOST_CMD );
+
+/* 6. Unreset the MII by setting NMRST (in NetSio) to 1. */
+
+ outw( TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR );
+ addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
+ TLan_SetBit( TLAN_NET_SIO_NMRST, addr );
+
+/* 7. Setup the remaining registers. */
+
+ if ( priv->tlanRev >= 0x30 ) {
+ data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC;
+ TLan_DioWrite8( dev->base_addr, TLAN_INT_DIS, data8 );
+ }
+ TLan_PhyDetect( dev );
+ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN;
+ if ( priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY ) {
+ data |= TLAN_NET_CFG_BIT;
+ if ( priv->aui == 1 ) {
+ TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x0a );
+ } else if ( priv->duplex == TLAN_DUPLEX_FULL ) {
+ TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x00 );
+ priv->tlanFullDuplex = TRUE;
+ } else {
+ TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x08 );
+ }
+ }
+ if ( priv->phyNum == 0 ) {
+ data |= TLAN_NET_CFG_PHY_EN;
+ }
+ TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data );
+
+ if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
+ TLan_FinishReset( dev );
+ } else {
+ TLan_PhyPowerDown( dev );
+ }
+
+} /* TLan_ResetAdapter */
+
+
+
+
+void
+TLan_FinishReset( struct device *dev )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ u8 data;
+ u32 phy;
+ u8 sio;
+ u16 status;
+ u16 tlphy_ctl;
+
+ phy = priv->phy[priv->phyNum];
+
+ data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP;
+ if ( priv->tlanFullDuplex ) {
+ data |= TLAN_NET_CMD_DUPLEX;
+ }
+ TLan_DioWrite8( dev->base_addr, TLAN_NET_CMD, data );
+ data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5;
+ if ( priv->phyNum == 0 ) {
+ data |= TLAN_NET_MASK_MASK7;
+ }
+ TLan_DioWrite8( dev->base_addr, TLAN_NET_MASK, data );
+ TLan_DioWrite16( dev->base_addr, TLAN_MAX_RX, TLAN_MAX_FRAME_SIZE );
+
+ if ( ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) || ( priv->aui ) ) {
+ status = MII_GS_LINK;
+ printk( "TLAN: %s: Link forced.\n", dev->name );
+ } else {
+ TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
+ udelay( 1000 );
+ TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
+ if ( status & MII_GS_LINK ) {
+ printk( "TLAN: %s: Link active.\n", dev->name );
+ TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK );
+ }
+ }
+
+ if ( priv->phyNum == 0 ) {
+ TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl );
+ tlphy_ctl |= TLAN_TC_INTEN;
+ TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl );
+ sio = TLan_DioRead8( dev->base_addr, TLAN_NET_SIO );
+ sio |= TLAN_NET_SIO_MINTEN;
+ TLan_DioWrite8( dev->base_addr, TLAN_NET_SIO, sio );
+ }
+
+ if ( status & MII_GS_LINK ) {
+ TLan_SetMac( dev, 0, dev->dev_addr );
+ priv->phyOnline = 1;
+ outb( ( TLAN_HC_INT_ON >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 );
+ if ( debug >= 1 ) {
+ outb( ( TLAN_HC_REQ_INT >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 );
+ }
+ outl( virt_to_bus( priv->rxList ), dev->base_addr + TLAN_CH_PARM );
+ outl( TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD );
+ } else {
+ printk( "TLAN: %s: Link inactive, will retry in 10 secs...\n", dev->name );
+ TLan_SetTimer( dev, 1000, TLAN_TIMER_FINISH_RESET );
+ return;
+ }
+
+} /* TLan_FinishReset */
+
+
+
+
+ /***************************************************************
+ * TLan_SetMac
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev Pointer to device structure of adapter
+ * on which to change the AREG.
+ * areg The AREG to set the address in (0 - 3).
+ * mac A pointer to an array of chars. Each
+ * element stores one byte of the address.
+ * IE, it isn't in ascii.
+ *
+ * This function transfers a MAC address to one of the
+ * TLAN AREGs (address registers). The TLAN chip locks
+ * the register on writing to offset 0 and unlocks the
+ * register after writing to offset 5. If NULL is passed
+ * in mac, then the AREG is filled with 0's.
+ *
+ **************************************************************/
+
+void TLan_SetMac( struct device *dev, int areg, char *mac )
+{
+ int i;
+
+ areg *= 6;
+
+ if ( mac != NULL ) {
+ for ( i = 0; i < 6; i++ )
+ TLan_DioWrite8( dev->base_addr, TLAN_AREG_0 + areg + i, mac[i] );
+ } else {
+ for ( i = 0; i < 6; i++ )
+ TLan_DioWrite8( dev->base_addr, TLAN_AREG_0 + areg + i, 0 );
+ }
+
+} /* TLan_SetMac */
+
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ ThunderLAN Driver PHY Layer Routines
+
+******************************************************************************
+*****************************************************************************/
+
+
+
+ /*********************************************************************
+ * TLan_PhyPrint
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev A pointer to the device structure of the
+ * TLAN device having the PHYs to be detailed.
+ *
+ * This function prints the registers a PHY (aka tranceiver).
+ *
+ ********************************************************************/
+
+void TLan_PhyPrint( struct device *dev )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ u16 i, data0, data1, data2, data3, phy;
+
+ phy = priv->phy[priv->phyNum];
+
+ if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
+ printk( "TLAN: Device %s, Unmanaged PHY.\n", dev->name );
+ } else if ( phy <= TLAN_PHY_MAX_ADDR ) {
+ printk( "TLAN: Device %s, PHY 0x%02x.\n", dev->name, phy );
+ printk( "TLAN: Off. +0 +1 +2 +3 \n" );
+ for ( i = 0; i < 0x20; i+= 4 ) {
+ printk( "TLAN: 0x%02x", i );
+ TLan_MiiReadReg( dev, phy, i, &data0 );
+ printk( " 0x%04hx", data0 );
+ TLan_MiiReadReg( dev, phy, i + 1, &data1 );
+ printk( " 0x%04hx", data1 );
+ TLan_MiiReadReg( dev, phy, i + 2, &data2 );
+ printk( " 0x%04hx", data2 );
+ TLan_MiiReadReg( dev, phy, i + 3, &data3 );
+ printk( " 0x%04hx\n", data3 );
+ }
+ } else {
+ printk( "TLAN: Device %s, Invalid PHY.\n", dev->name );
+ }
+
+} /* TLan_PhyPrint */
+
+
+
+
+ /*********************************************************************
+ * TLan_PhyDetect
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev A pointer to the device structure of the adapter
+ * for which the PHY needs determined.
+ *
+ * So far I've found that adapters which have external PHYs
+ * may also use the internal PHY for part of the functionality.
+ * (eg, AUI/Thinnet). This function finds out if this TLAN
+ * chip has an internal PHY, and then finds the first external
+ * PHY (starting from address 0) if it exists).
+ *
+ ********************************************************************/
+
+void TLan_PhyDetect( struct device *dev )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ u16 control;
+ u16 hi;
+ u16 lo;
+ u32 phy;
+
+ if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
+ priv->phyNum = 0xFFFF;
+ return;
+ }
+
+ TLan_MiiReadReg( dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi );
+
+ if ( hi != 0xFFFF ) {
+ priv->phy[0] = TLAN_PHY_MAX_ADDR;
+ } else {
+ priv->phy[0] = TLAN_PHY_NONE;
+ }
+
+ priv->phy[1] = TLAN_PHY_NONE;
+ for ( phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++ ) {
+ TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &control );
+ TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &hi );
+ TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &lo );
+ if ( ( control != 0xFFFF ) || ( hi != 0xFFFF ) || ( lo != 0xFFFF ) ) {
+ TLAN_DBG( TLAN_DEBUG_GNRL, "TLAN: PHY found at %02x %04x %04x %04x\n", phy, control, hi, lo );
+ if ( ( priv->phy[1] == TLAN_PHY_NONE ) && ( phy != TLAN_PHY_MAX_ADDR ) ) {
+ priv->phy[1] = phy;
+ }
+ }
+ }
+
+ if ( priv->phy[1] != TLAN_PHY_NONE ) {
+ priv->phyNum = 1;
+ } else if ( priv->phy[0] != TLAN_PHY_NONE ) {
+ priv->phyNum = 0;
+ } else {
+ printk( "TLAN: Cannot initialize device, no PHY was found!\n" );
+ }
+
+} /* TLan_PhyDetect */
+
+
+
+
+void TLan_PhyPowerDown( struct device *dev )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ u16 value;
+
+ TLAN_DBG( TLAN_DEBUG_GNRL, "TLAN: %s: Powering down PHY(s).\n", dev->name );
+ value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
+ TLan_MiiSync( dev->base_addr );
+ TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value );
+ if ( ( priv->phyNum == 0 ) && ( priv->phy[1] != TLAN_PHY_NONE ) && ( ! ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) ) ) {
+ TLan_MiiSync( dev->base_addr );
+ TLan_MiiWriteReg( dev, priv->phy[1], MII_GEN_CTL, value );
+ }
+
+ /* Wait for 5 jiffies (50 ms) and powerup
+ * This is abitrary. It is intended to make sure the
+ * tranceiver settles.
+ */
+ TLan_SetTimer( dev, 5, TLAN_TIMER_PHY_PUP );
+
+} /* TLan_PhyPowerDown */
+
+
+
+
+void TLan_PhyPowerUp( struct device *dev )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ u16 value;
+
+ TLAN_DBG( TLAN_DEBUG_GNRL, "TLAN: %s: Powering up PHY.\n", dev->name );
+ TLan_MiiSync( dev->base_addr );
+ value = MII_GC_LOOPBK;
+ TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value );
+
+ /* Wait for 50 jiffies (500 ms) and reset the
+ * tranceiver. The TLAN docs say both 50 ms and
+ * 500 ms, so do the longer, just in case
+ */
+ TLan_SetTimer( dev, 50, TLAN_TIMER_PHY_RESET );
+
+} /* TLan_PhyPowerUp */
+
+
+
+
+void TLan_PhyReset( struct device *dev )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ u16 phy;
+ u16 value;
+
+ phy = priv->phy[priv->phyNum];
+
+ TLAN_DBG( TLAN_DEBUG_GNRL, "TLAN: %s: Reseting PHY.\n", dev->name );
+ TLan_MiiSync( dev->base_addr );
+ value = MII_GC_LOOPBK | MII_GC_RESET;
+ TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, value );
+ TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value );
+ while ( value & MII_GC_RESET ) {
+ TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value );
+ }
+ TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0 );
+
+ /* Wait for 50 jiffies (500 ms) and initialize.
+ * I don't remember why I wait this long.
+ */
+ TLan_SetTimer( dev, 50, TLAN_TIMER_PHY_START_LINK );
+
+} /* TLan_PhyReset */
+
+
+
+
+void TLan_PhyStartLink( struct device *dev )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ u16 ability;
+ u16 control;
+ u16 data;
+ u16 phy;
+ u16 status;
+ u16 tctl;
+
+ phy = priv->phy[priv->phyNum];
+
+ TLAN_DBG( TLAN_DEBUG_GNRL, "TLAN: %s: Trying to activate link.\n", dev->name );
+ TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
+ if ( ( status & MII_GS_AUTONEG ) &&
+ ( priv->duplex == TLAN_DUPLEX_DEFAULT ) &&
+ ( priv->speed == TLAN_SPEED_DEFAULT ) &&
+ ( ! priv->aui ) ) {
+ ability = status >> 11;
+
+ if ( priv->speed == TLAN_SPEED_10 ) {
+ ability &= 0x0003;
+ } else if ( priv->speed == TLAN_SPEED_100 ) {
+ ability &= 0x001C;
+ }
+
+ if ( priv->duplex == TLAN_DUPLEX_FULL ) {
+ ability &= 0x000A;
+ } else if ( priv->duplex == TLAN_DUPLEX_HALF ) {
+ ability &= 0x0005;
+ }
+
+ TLan_MiiWriteReg( dev, phy, MII_AN_ADV, ( ability << 5 ) | 1 );
+ TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1000 );
+ TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1200 );
+
+ /* Wait for 400 jiffies (4 sec) for autonegotiation
+ * to complete. The max spec time is less than this
+ * but the card need additional time to start AN.
+ * .5 sec should be plenty extra.
+ */
+ printk( "TLAN: %s: Starting autonegotiation.\n", dev->name );
+ TLan_SetTimer( dev, 400, TLAN_TIMER_PHY_FINISH_AN );
+ return;
+ }
+
+ if ( ( priv->aui ) && ( priv->phyNum != 0 ) ) {
+ priv->phyNum = 0;
+ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
+ TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data );
+ TLan_SetTimer( dev, 4, TLAN_TIMER_PHY_PDOWN );
+ return;
+ } else if ( priv->phyNum == 0 ) {
+ TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tctl );
+ if ( priv->aui ) {
+ tctl |= TLAN_TC_AUISEL;
+ } else {
+ tctl &= ~TLAN_TC_AUISEL;
+ control = 0;
+ if ( priv->duplex == TLAN_DUPLEX_FULL ) {
+ control |= MII_GC_DUPLEX;
+ priv->tlanFullDuplex = TRUE;
+ }
+ if ( priv->speed == TLAN_SPEED_100 ) {
+ control |= MII_GC_SPEEDSEL;
+ }
+ TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, control );
+ }
+ TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tctl );
+ }
+
+ /* Wait for 100 jiffies (1 sec) to give the tranceiver time
+ * to establish link.
+ */
+ TLan_SetTimer( dev, 100, TLAN_TIMER_FINISH_RESET );
+
+} /* TLan_PhyStartLink */
+
+
+
+
+void TLan_PhyFinishAutoNeg( struct device *dev )
+{
+ TLanPrivateInfo *priv = (TLanPrivateInfo *) dev->priv;
+ u16 an_adv;
+ u16 an_lpa;
+ u16 data;
+ u16 mode;
+ u16 phy;
+ u16 status;
+
+ phy = priv->phy[priv->phyNum];
+
+ TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
+ if ( ! ( status & MII_GS_AUTOCMPLT ) ) {
+ /* Wait for 800 jiffies (8 sec) to give the process
+ * more time. Perhaps we should fail after a while.
+ */
+ printk( "TLAN: Giving autonegotiation more time.\n" );
+ TLan_SetTimer( dev, 800, TLAN_TIMER_PHY_FINISH_AN );
+ return;
+ }
+
+ printk( "TLAN: %s: Autonegotiation complete.\n", dev->name );
+ TLan_MiiReadReg( dev, phy, MII_AN_ADV, &an_adv );
+ TLan_MiiReadReg( dev, phy, MII_AN_LPA, &an_lpa );
+ mode = an_adv & an_lpa & 0x03E0;
+ if ( mode & 0x0100 ) {
+ priv->tlanFullDuplex = TRUE;
+ } else if ( ! ( mode & 0x0080 ) && ( mode & 0x0040 ) ) {
+ priv->tlanFullDuplex = TRUE;
+ }
+
+ if ( ( ! ( mode & 0x0180 ) ) && ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) && ( priv->phyNum != 0 ) ) {
+ priv->phyNum = 0;
+ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
+ TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data );
+ TLan_SetTimer( dev, 40, TLAN_TIMER_PHY_PDOWN );
+ return;
+ }
+
+ if ( priv->phyNum == 0 ) {
+ if ( ( priv->duplex == TLAN_DUPLEX_FULL ) || ( an_adv & an_lpa & 0x0040 ) ) {
+ TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB | MII_GC_DUPLEX );
+ printk( "TLAN: Starting internal PHY with DUPLEX\n" );
+ } else {
+ TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB );
+ printk( "TLAN: Starting internal PHY with HALF-DUPLEX\n" );
+ }
+ }
+
+ /* Wait for 10 jiffies (100 ms). No reason in partiticular.
+ */
+ TLan_SetTimer( dev, 10, TLAN_TIMER_FINISH_RESET );
+
+} /* TLan_PhyFinishAutoNeg */
+
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ ThunderLAN Driver MII Routines
+
+ These routines are based on the information in Chap. 2 of the
+ "ThunderLAN Programmer's Guide", pp. 15-24.
+
+******************************************************************************
+*****************************************************************************/
+
+
+ /***************************************************************
+ * TLan_MiiReadReg
+ *
+ * Returns:
+ * 0 if ack received ok
+ * 1 otherwise.
+ *
+ * Parms:
+ * dev The device structure containing
+ * The io address and interrupt count
+ * for this device.
+ * phy The address of the PHY to be queried.
+ * reg The register whose contents are to be
+ * retreived.
+ * val A pointer to a variable to store the
+ * retrieved value.
+ *
+ * This function uses the TLAN's MII bus to retreive the contents
+ * of a given register on a PHY. It sends the appropriate info
+ * and then reads the 16-bit register value from the MII bus via
+ * the TLAN SIO register.
+ *
+ **************************************************************/
+
+int TLan_MiiReadReg( struct device *dev, u16 phy, u16 reg, u16 *val )
+{
+ u8 nack;
+ u16 sio, tmp;
+ u32 i;
+ int err;
+ int minten;
+
+ err = FALSE;
+ outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
+ sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+ if ( dev->interrupt == 0 )
+ cli();
+ dev->interrupt++;
+
+ TLan_MiiSync(dev->base_addr);
+
+ minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio );
+ if ( minten )
+ TLan_ClearBit(TLAN_NET_SIO_MINTEN, sio);
+
+ TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Start ( 01b ) */
+ TLan_MiiSendData( dev->base_addr, 0x2, 2 ); /* Read ( 10b ) */
+ TLan_MiiSendData( dev->base_addr, phy, 5 ); /* Device # */
+ TLan_MiiSendData( dev->base_addr, reg, 5 ); /* Register # */
+
+
+ TLan_ClearBit(TLAN_NET_SIO_MTXEN, sio); /* Change direction */
+
+ TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Clock Idle bit */
+ TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+ TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Wait 300ns */
+
+ nack = TLan_GetBit(TLAN_NET_SIO_MDATA, sio); /* Check for ACK */
+ TLan_SetBit(TLAN_NET_SIO_MCLK, sio); /* Finish ACK */
+ if (nack) { /* No ACK, so fake it */
+ for (i = 0; i < 16; i++) {
+ TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);
+ TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+ }
+ tmp = 0xffff;
+ err = TRUE;
+ } else { /* ACK, so read data */
+ for (tmp = 0, i = 0x8000; i; i >>= 1) {
+ TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);
+ if (TLan_GetBit(TLAN_NET_SIO_MDATA, sio))
+ tmp |= i;
+ TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+ }
+ }
+
+
+ TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Idle cycle */
+ TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+
+ if ( minten )
+ TLan_SetBit(TLAN_NET_SIO_MINTEN, sio);
+
+ *val = tmp;
+
+ dev->interrupt--;
+ if ( dev->interrupt == 0 )
+ sti();
+
+ return err;
+
+} /* TLan_MiiReadReg */
+
+
+
+
+ /***************************************************************
+ * TLan_MiiSendData
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * base_port The base IO port of the adapter in
+ * question.
+ * dev The address of the PHY to be queried.
+ * data The value to be placed on the MII bus.
+ * num_bits The number of bits in data that are to
+ * be placed on the MII bus.
+ *
+ * This function sends on sequence of bits on the MII
+ * configuration bus.
+ *
+ **************************************************************/
+
+void TLan_MiiSendData( u16 base_port, u32 data, unsigned num_bits )
+{
+ u16 sio;
+ u32 i;
+
+ if ( num_bits == 0 )
+ return;
+
+ outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR );
+ sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
+ TLan_SetBit( TLAN_NET_SIO_MTXEN, sio );
+
+ for ( i = ( 0x1 << ( num_bits - 1 ) ); i; i >>= 1 ) {
+ TLan_ClearBit( TLAN_NET_SIO_MCLK, sio );
+ TLan_GetBit( TLAN_NET_SIO_MCLK, sio );
+ if ( data & i )
+ TLan_SetBit( TLAN_NET_SIO_MDATA, sio );
+ else
+ TLan_ClearBit( TLAN_NET_SIO_MDATA, sio );
+ TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
+ TLan_GetBit( TLAN_NET_SIO_MCLK, sio );
+ }
+
+} /* TLan_MiiSendData */
+
+
+
+
+ /***************************************************************
+ * TLan_MiiSync
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * base_port The base IO port of the adapter in
+ * question.
+ *
+ * This functions syncs all PHYs in terms of the MII configuration
+ * bus.
+ *
+ **************************************************************/
+
+void TLan_MiiSync( u16 base_port )
+{
+ int i;
+ u16 sio;
+
+ outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR );
+ sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+ TLan_ClearBit( TLAN_NET_SIO_MTXEN, sio );
+ for ( i = 0; i < 32; i++ ) {
+ TLan_ClearBit( TLAN_NET_SIO_MCLK, sio );
+ TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
+ }
+
+} /* TLan_MiiSync */
+
+
+
+
+ /***************************************************************
+ * TLan_MiiWriteReg
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev The device structure for the device
+ * to write to.
+ * phy The address of the PHY to be written to.
+ * reg The register whose contents are to be
+ * written.
+ * val The value to be written to the register.
+ *
+ * This function uses the TLAN's MII bus to write the contents of a
+ * given register on a PHY. It sends the appropriate info and then
+ * writes the 16-bit register value from the MII configuration bus
+ * via the TLAN SIO register.
+ *
+ **************************************************************/
+
+void TLan_MiiWriteReg( struct device *dev, u16 phy, u16 reg, u16 val )
+{
+ u16 sio;
+ int minten;
+
+ outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
+ sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+ if ( dev->interrupt == 0 )
+ cli();
+ dev->interrupt++;
+
+ TLan_MiiSync( dev->base_addr );
+
+ minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio );
+ if ( minten )
+ TLan_ClearBit( TLAN_NET_SIO_MINTEN, sio );
+
+ TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Start ( 01b ) */
+ TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Write ( 01b ) */
+ TLan_MiiSendData( dev->base_addr, phy, 5 ); /* Device # */
+ TLan_MiiSendData( dev->base_addr, reg, 5 ); /* Register # */
+
+ TLan_MiiSendData( dev->base_addr, 0x2, 2 ); /* Send ACK */
+ TLan_MiiSendData( dev->base_addr, val, 16 ); /* Send Data */
+
+ TLan_ClearBit( TLAN_NET_SIO_MCLK, sio ); /* Idle cycle */
+ TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
+
+ if ( minten )
+ TLan_SetBit( TLAN_NET_SIO_MINTEN, sio );
+
+ dev->interrupt--;
+ if ( dev->interrupt == 0 )
+ sti();
+
+} /* TLan_MiiWriteReg */
+
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ ThunderLAN Driver Eeprom routines
+
+ The Compaq Netelligent 10 and 10/100 cards use a Microchip 24C02A
+ EEPROM. These functions are based on information in Microchip's
+ data sheet. I don't know how well this functions will work with
+ other EEPROMs.
+
+******************************************************************************
+*****************************************************************************/
+
+
+ /***************************************************************
+ * TLan_EeSendStart
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ *
+ * This function sends a start cycle to an EEPROM attached
+ * to a TLAN chip.
+ *
+ **************************************************************/
+
+void TLan_EeSendStart( u16 io_base )
+{
+ u16 sio;
+
+ outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
+ sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+ TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
+ TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
+ TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
+ TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
+ TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+
+} /* TLan_EeSendStart */
+
+
+
+
+ /***************************************************************
+ * TLan_EeSendByte
+ *
+ * Returns:
+ * If the correct ack was received, 0, otherwise 1
+ * Parms: io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ * data The 8 bits of information to
+ * send to the EEPROM.
+ * stop If TLAN_EEPROM_STOP is passed, a
+ * stop cycle is sent after the
+ * byte is sent after the ack is
+ * read.
+ *
+ * This function sends a byte on the serial EEPROM line,
+ * driving the clock to send each bit. The function then
+ * reverses transmission direction and reads an acknowledge
+ * bit.
+ *
+ **************************************************************/
+
+int TLan_EeSendByte( u16 io_base, u8 data, int stop )
+{
+ int err;
+ u8 place;
+ u16 sio;
+
+ outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
+ sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+ /* Assume clock is low, tx is enabled; */
+ for ( place = 0x80; place != 0; place >>= 1 ) {
+ if ( place & data )
+ TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
+ else
+ TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
+ TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
+ TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+ }
+ TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio );
+ TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
+ err = TLan_GetBit( TLAN_NET_SIO_EDATA, sio );
+ TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+ TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
+
+ if ( ( ! err ) && stop ) {
+ TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); /* STOP, raise data while clock is high */
+ TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
+ TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
+ }
+
+ return ( err );
+
+} /* TLan_EeSendByte */
+
+
+
+
+ /***************************************************************
+ * TLan_EeReceiveByte
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ * data An address to a char to hold the
+ * data sent from the EEPROM.
+ * stop If TLAN_EEPROM_STOP is passed, a
+ * stop cycle is sent after the
+ * byte is received, and no ack is
+ * sent.
+ *
+ * This function receives 8 bits of data from the EEPROM
+ * over the serial link. It then sends and ack bit, or no
+ * ack and a stop bit. This function is used to retrieve
+ * data after the address of a byte in the EEPROM has been
+ * sent.
+ *
+ **************************************************************/
+
+void TLan_EeReceiveByte( u16 io_base, u8 *data, int stop )
+{
+ u8 place;
+ u16 sio;
+
+ outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
+ sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
+ *data = 0;
+
+ /* Assume clock is low, tx is enabled; */
+ TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio );
+ for ( place = 0x80; place; place >>= 1 ) {
+ TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
+ if ( TLan_GetBit( TLAN_NET_SIO_EDATA, sio ) )
+ *data |= place;
+ TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+ }
+
+ TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
+ if ( ! stop ) {
+ TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); /* Ack = 0 */
+ TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
+ TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+ } else {
+ TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); /* No ack = 1 (?) */
+ TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
+ TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+ TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); /* STOP, raise data while clock is high */
+ TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
+ TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
+ }
+
+} /* TLan_EeReceiveByte */
+
+
+
+
+ /***************************************************************
+ * TLan_EeReadByte
+ *
+ * Returns:
+ * No error = 0, else, the stage at which the error
+ * occured.
+ * Parms:
+ * io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ * ee_addr The address of the byte in the
+ * EEPROM whose contents are to be
+ * retrieved.
+ * data An address to a char to hold the
+ * data obtained from the EEPROM.
+ *
+ * This function reads a byte of information from an byte
+ * cell in the EEPROM.
+ *
+ **************************************************************/
+
+int TLan_EeReadByte( struct device *dev, u8 ee_addr, u8 *data )
+{
+ int err;
+
+ if ( dev->interrupt == 0 )
+ cli();
+ dev->interrupt++;
+
+ TLan_EeSendStart( dev->base_addr );
+ err = TLan_EeSendByte( dev->base_addr, 0xA0, TLAN_EEPROM_ACK );
+ if (err)
+ return 1;
+ err = TLan_EeSendByte( dev->base_addr, ee_addr, TLAN_EEPROM_ACK );
+ if (err)
+ return 2;
+ TLan_EeSendStart( dev->base_addr );
+ err = TLan_EeSendByte( dev->base_addr, 0xA1, TLAN_EEPROM_ACK );
+ if (err)
+ return 3;
+ TLan_EeReceiveByte( dev->base_addr, data, TLAN_EEPROM_STOP );
+
+ dev->interrupt--;
+ if ( dev->interrupt == 0 )
+ sti();
+
+ return 0;
+
+} /* TLan_EeReadByte */
+
+
+
+
+
diff --git a/linux/src/drivers/net/tlan.h b/linux/src/drivers/net/tlan.h
new file mode 100644
index 0000000..a66e26c
--- /dev/null
+++ b/linux/src/drivers/net/tlan.h
@@ -0,0 +1,525 @@
+#ifndef TLAN_H
+#define TLAN_H
+/********************************************************************
+ *
+ * Linux ThunderLAN Driver
+ *
+ * tlan.h
+ * by James Banks
+ *
+ * (C) 1997-1998 Caldera, Inc.
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU Public License, incorporated herein by reference.
+ *
+ ** This file is best viewed/edited with tabstop=4, colums>=132
+ *
+ ********************************************************************/
+
+
+#include <asm/io.h>
+#include <asm/types.h>
+#include <linux/netdevice.h>
+
+#if LINUX_VERSION_CODE <= 0x20100
+#define net_device_stats enet_statistics
+#endif
+
+
+
+
+ /*****************************************************************
+ * TLan Definitions
+ *
+ ****************************************************************/
+
+#define FALSE 0
+#define TRUE 1
+
+#define TLAN_MIN_FRAME_SIZE 64
+#define TLAN_MAX_FRAME_SIZE 1600
+
+#define TLAN_NUM_RX_LISTS 4
+#define TLAN_NUM_TX_LISTS 8
+
+#define TLAN_IGNORE 0
+#define TLAN_RECORD 1
+
+#define TLAN_DBG(lvl, format, args...) if (debug&lvl) printk( format, ##args );
+#define TLAN_DEBUG_GNRL 0x0001
+#define TLAN_DEBUG_TX 0x0002
+#define TLAN_DEBUG_RX 0x0004
+#define TLAN_DEBUG_LIST 0x0008
+
+
+
+
+ /*****************************************************************
+ * Device Identification Definitions
+ *
+ ****************************************************************/
+
+#define PCI_DEVICE_ID_NETELLIGENT_10 0xAE34
+#define PCI_DEVICE_ID_NETELLIGENT_10_100 0xAE32
+#define PCI_DEVICE_ID_NETFLEX_3P_INTEGRATED 0xAE35
+#define PCI_DEVICE_ID_NETFLEX_3P 0xF130
+#define PCI_DEVICE_ID_NETFLEX_3P_BNC 0xF150
+#define PCI_DEVICE_ID_NETELLIGENT_10_100_PROLIANT 0xAE43
+#define PCI_DEVICE_ID_NETELLIGENT_10_100_DUAL 0xAE40
+#define PCI_DEVICE_ID_DESKPRO_4000_5233MMX 0xB011
+#define PCI_DEVICE_ID_NETELLIGENT_10_T2 0xB012
+#define PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100 0xB030
+#ifndef PCI_DEVICE_ID_OLICOM_OC2183
+#define PCI_DEVICE_ID_OLICOM_OC2183 0x0013
+#endif
+#ifndef PCI_DEVICE_ID_OLICOM_OC2325
+#define PCI_DEVICE_ID_OLICOM_OC2325 0x0012
+#endif
+#ifndef PCI_DEVICE_ID_OLICOM_OC2326
+#define PCI_DEVICE_ID_OLICOM_OC2326 0x0014
+#endif
+
+typedef struct tlan_adapter_entry {
+ u16 vendorId;
+ u16 deviceId;
+ char *deviceLabel;
+ u32 flags;
+ u16 addrOfs;
+} TLanAdapterEntry;
+
+#define TLAN_ADAPTER_NONE 0x00000000
+#define TLAN_ADAPTER_UNMANAGED_PHY 0x00000001
+#define TLAN_ADAPTER_BIT_RATE_PHY 0x00000002
+#define TLAN_ADAPTER_USE_INTERN_10 0x00000004
+#define TLAN_ADAPTER_ACTIVITY_LED 0x00000008
+
+#define TLAN_SPEED_DEFAULT 0
+#define TLAN_SPEED_10 10
+#define TLAN_SPEED_100 100
+
+#define TLAN_DUPLEX_DEFAULT 0
+#define TLAN_DUPLEX_HALF 1
+#define TLAN_DUPLEX_FULL 2
+
+
+
+
+ /*****************************************************************
+ * Rx/Tx List Definitions
+ *
+ ****************************************************************/
+
+#define TLAN_BUFFERS_PER_LIST 10
+#define TLAN_LAST_BUFFER 0x80000000
+#define TLAN_CSTAT_UNUSED 0x8000
+#define TLAN_CSTAT_FRM_CMP 0x4000
+#define TLAN_CSTAT_READY 0x3000
+#define TLAN_CSTAT_EOC 0x0800
+#define TLAN_CSTAT_RX_ERROR 0x0400
+#define TLAN_CSTAT_PASS_CRC 0x0200
+#define TLAN_CSTAT_DP_PR 0x0100
+
+
+typedef struct tlan_buffer_ref_tag {
+ u32 count;
+ u32 address;
+} TLanBufferRef;
+
+
+typedef struct tlan_list_tag {
+ u32 forward;
+ u16 cStat;
+ u16 frameSize;
+ TLanBufferRef buffer[TLAN_BUFFERS_PER_LIST];
+} TLanList;
+
+
+typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE];
+
+
+
+
+ /*****************************************************************
+ * PHY definitions
+ *
+ ****************************************************************/
+
+#define TLAN_PHY_MAX_ADDR 0x1F
+#define TLAN_PHY_NONE 0x20
+
+
+
+
+ /*****************************************************************
+ * TLAN Private Information Structure
+ *
+ ****************************************************************/
+
+typedef struct tlan_private_tag {
+ struct device *nextDevice;
+ void *dmaStorage;
+ u8 *padBuffer;
+ TLanList *rxList;
+ u8 *rxBuffer;
+ u32 rxHead;
+ u32 rxTail;
+ u32 rxEocCount;
+ TLanList *txList;
+ u8 *txBuffer;
+ u32 txHead;
+ u32 txInProgress;
+ u32 txTail;
+ u32 txBusyCount;
+ u32 phyOnline;
+ u32 timerSetAt;
+ u32 timerType;
+ struct timer_list timer;
+ struct net_device_stats stats;
+ TLanAdapterEntry *adapter;
+ u32 adapterRev;
+ u32 aui;
+ u32 debug;
+ u32 duplex;
+ u32 phy[2];
+ u32 phyNum;
+ u32 sa_int;
+ u32 speed;
+ u8 tlanRev;
+ u8 tlanFullDuplex;
+ char devName[8];
+} TLanPrivateInfo;
+
+
+
+
+ /*****************************************************************
+ * TLan Driver Timer Definitions
+ *
+ ****************************************************************/
+
+#define TLAN_TIMER_LINK 1
+#define TLAN_TIMER_ACTIVITY 2
+#define TLAN_TIMER_PHY_PDOWN 3
+#define TLAN_TIMER_PHY_PUP 4
+#define TLAN_TIMER_PHY_RESET 5
+#define TLAN_TIMER_PHY_START_LINK 6
+#define TLAN_TIMER_PHY_FINISH_AN 7
+#define TLAN_TIMER_FINISH_RESET 8
+
+#define TLAN_TIMER_ACT_DELAY 10
+
+
+
+
+ /*****************************************************************
+ * TLan Driver Eeprom Definitions
+ *
+ ****************************************************************/
+
+#define TLAN_EEPROM_ACK 0
+#define TLAN_EEPROM_STOP 1
+
+
+
+
+ /*****************************************************************
+ * Host Register Offsets and Contents
+ *
+ ****************************************************************/
+
+#define TLAN_HOST_CMD 0x00
+#define TLAN_HC_GO 0x80000000
+#define TLAN_HC_STOP 0x40000000
+#define TLAN_HC_ACK 0x20000000
+#define TLAN_HC_CS_MASK 0x1FE00000
+#define TLAN_HC_EOC 0x00100000
+#define TLAN_HC_RT 0x00080000
+#define TLAN_HC_NES 0x00040000
+#define TLAN_HC_AD_RST 0x00008000
+#define TLAN_HC_LD_TMR 0x00004000
+#define TLAN_HC_LD_THR 0x00002000
+#define TLAN_HC_REQ_INT 0x00001000
+#define TLAN_HC_INT_OFF 0x00000800
+#define TLAN_HC_INT_ON 0x00000400
+#define TLAN_HC_AC_MASK 0x000000FF
+#define TLAN_CH_PARM 0x04
+#define TLAN_DIO_ADR 0x08
+#define TLAN_DA_ADR_INC 0x8000
+#define TLAN_DA_RAM_ADR 0x4000
+#define TLAN_HOST_INT 0x0A
+#define TLAN_HI_IV_MASK 0x1FE0
+#define TLAN_HI_IT_MASK 0x001C
+#define TLAN_DIO_DATA 0x0C
+
+
+/* ThunderLAN Internal Register DIO Offsets */
+
+#define TLAN_NET_CMD 0x00
+#define TLAN_NET_CMD_NRESET 0x80
+#define TLAN_NET_CMD_NWRAP 0x40
+#define TLAN_NET_CMD_CSF 0x20
+#define TLAN_NET_CMD_CAF 0x10
+#define TLAN_NET_CMD_NOBRX 0x08
+#define TLAN_NET_CMD_DUPLEX 0x04
+#define TLAN_NET_CMD_TRFRAM 0x02
+#define TLAN_NET_CMD_TXPACE 0x01
+#define TLAN_NET_SIO 0x01
+#define TLAN_NET_SIO_MINTEN 0x80
+#define TLAN_NET_SIO_ECLOK 0x40
+#define TLAN_NET_SIO_ETXEN 0x20
+#define TLAN_NET_SIO_EDATA 0x10
+#define TLAN_NET_SIO_NMRST 0x08
+#define TLAN_NET_SIO_MCLK 0x04
+#define TLAN_NET_SIO_MTXEN 0x02
+#define TLAN_NET_SIO_MDATA 0x01
+#define TLAN_NET_STS 0x02
+#define TLAN_NET_STS_MIRQ 0x80
+#define TLAN_NET_STS_HBEAT 0x40
+#define TLAN_NET_STS_TXSTOP 0x20
+#define TLAN_NET_STS_RXSTOP 0x10
+#define TLAN_NET_STS_RSRVD 0x0F
+#define TLAN_NET_MASK 0x03
+#define TLAN_NET_MASK_MASK7 0x80
+#define TLAN_NET_MASK_MASK6 0x40
+#define TLAN_NET_MASK_MASK5 0x20
+#define TLAN_NET_MASK_MASK4 0x10
+#define TLAN_NET_MASK_RSRVD 0x0F
+#define TLAN_NET_CONFIG 0x04
+#define TLAN_NET_CFG_RCLK 0x8000
+#define TLAN_NET_CFG_TCLK 0x4000
+#define TLAN_NET_CFG_BIT 0x2000
+#define TLAN_NET_CFG_RXCRC 0x1000
+#define TLAN_NET_CFG_PEF 0x0800
+#define TLAN_NET_CFG_1FRAG 0x0400
+#define TLAN_NET_CFG_1CHAN 0x0200
+#define TLAN_NET_CFG_MTEST 0x0100
+#define TLAN_NET_CFG_PHY_EN 0x0080
+#define TLAN_NET_CFG_MSMASK 0x007F
+#define TLAN_MAN_TEST 0x06
+#define TLAN_DEF_VENDOR_ID 0x08
+#define TLAN_DEF_DEVICE_ID 0x0A
+#define TLAN_DEF_REVISION 0x0C
+#define TLAN_DEF_SUBCLASS 0x0D
+#define TLAN_DEF_MIN_LAT 0x0E
+#define TLAN_DEF_MAX_LAT 0x0F
+#define TLAN_AREG_0 0x10
+#define TLAN_AREG_1 0x16
+#define TLAN_AREG_2 0x1C
+#define TLAN_AREG_3 0x22
+#define TLAN_HASH_1 0x28
+#define TLAN_HASH_2 0x2C
+#define TLAN_GOOD_TX_FRMS 0x30
+#define TLAN_TX_UNDERUNS 0x33
+#define TLAN_GOOD_RX_FRMS 0x34
+#define TLAN_RX_OVERRUNS 0x37
+#define TLAN_DEFERRED_TX 0x38
+#define TLAN_CRC_ERRORS 0x3A
+#define TLAN_CODE_ERRORS 0x3B
+#define TLAN_MULTICOL_FRMS 0x3C
+#define TLAN_SINGLECOL_FRMS 0x3E
+#define TLAN_EXCESSCOL_FRMS 0x40
+#define TLAN_LATE_COLS 0x41
+#define TLAN_CARRIER_LOSS 0x42
+#define TLAN_ACOMMIT 0x43
+#define TLAN_LED_REG 0x44
+#define TLAN_LED_ACT 0x10
+#define TLAN_LED_LINK 0x01
+#define TLAN_BSIZE_REG 0x45
+#define TLAN_MAX_RX 0x46
+#define TLAN_INT_DIS 0x48
+#define TLAN_ID_TX_EOC 0x04
+#define TLAN_ID_RX_EOF 0x02
+#define TLAN_ID_RX_EOC 0x01
+
+
+
+/* ThunderLAN Interrupt Codes */
+
+#define TLAN_INT_NUMBER_OF_INTS 8
+
+#define TLAN_INT_NONE 0x0000
+#define TLAN_INT_TX_EOF 0x0001
+#define TLAN_INT_STAT_OVERFLOW 0x0002
+#define TLAN_INT_RX_EOF 0x0003
+#define TLAN_INT_DUMMY 0x0004
+#define TLAN_INT_TX_EOC 0x0005
+#define TLAN_INT_STATUS_CHECK 0x0006
+#define TLAN_INT_RX_EOC 0x0007
+
+
+
+/* ThunderLAN MII Registers */
+
+/* Generic MII/PHY Registers */
+
+#define MII_GEN_CTL 0x00
+#define MII_GC_RESET 0x8000
+#define MII_GC_LOOPBK 0x4000
+#define MII_GC_SPEEDSEL 0x2000
+#define MII_GC_AUTOENB 0x1000
+#define MII_GC_PDOWN 0x0800
+#define MII_GC_ISOLATE 0x0400
+#define MII_GC_AUTORSRT 0x0200
+#define MII_GC_DUPLEX 0x0100
+#define MII_GC_COLTEST 0x0080
+#define MII_GC_RESERVED 0x007F
+#define MII_GEN_STS 0x01
+#define MII_GS_100BT4 0x8000
+#define MII_GS_100BTXFD 0x4000
+#define MII_GS_100BTXHD 0x2000
+#define MII_GS_10BTFD 0x1000
+#define MII_GS_10BTHD 0x0800
+#define MII_GS_RESERVED 0x07C0
+#define MII_GS_AUTOCMPLT 0x0020
+#define MII_GS_RFLT 0x0010
+#define MII_GS_AUTONEG 0x0008
+#define MII_GS_LINK 0x0004
+#define MII_GS_JABBER 0x0002
+#define MII_GS_EXTCAP 0x0001
+#define MII_GEN_ID_HI 0x02
+#define MII_GEN_ID_LO 0x03
+#define MII_GIL_OUI 0xFC00
+#define MII_GIL_MODEL 0x03F0
+#define MII_GIL_REVISION 0x000F
+#define MII_AN_ADV 0x04
+#define MII_AN_LPA 0x05
+#define MII_AN_EXP 0x06
+
+/* ThunderLAN Specific MII/PHY Registers */
+
+#define TLAN_TLPHY_ID 0x10
+#define TLAN_TLPHY_CTL 0x11
+#define TLAN_TC_IGLINK 0x8000
+#define TLAN_TC_SWAPOL 0x4000
+#define TLAN_TC_AUISEL 0x2000
+#define TLAN_TC_SQEEN 0x1000
+#define TLAN_TC_MTEST 0x0800
+#define TLAN_TC_RESERVED 0x07F8
+#define TLAN_TC_NFEW 0x0004
+#define TLAN_TC_INTEN 0x0002
+#define TLAN_TC_TINT 0x0001
+#define TLAN_TLPHY_STS 0x12
+#define TLAN_TS_MINT 0x8000
+#define TLAN_TS_PHOK 0x4000
+#define TLAN_TS_POLOK 0x2000
+#define TLAN_TS_TPENERGY 0x1000
+#define TLAN_TS_RESERVED 0x0FFF
+
+
+#define CIRC_INC( a, b ) if ( ++a >= b ) a = 0
+
+/* Routines to access internal registers. */
+
+inline u8 TLan_DioRead8(u16 base_addr, u16 internal_addr)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ return (inb((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x3)));
+
+} /* TLan_DioRead8 */
+
+
+
+
+inline u16 TLan_DioRead16(u16 base_addr, u16 internal_addr)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ return (inw((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x2)));
+
+} /* TLan_DioRead16 */
+
+
+
+
+inline u32 TLan_DioRead32(u16 base_addr, u16 internal_addr)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ return (inl(base_addr + TLAN_DIO_DATA));
+
+} /* TLan_DioRead32 */
+
+
+
+
+inline void TLan_DioWrite8(u16 base_addr, u16 internal_addr, u8 data)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ outb(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x3));
+
+}
+
+
+
+
+inline void TLan_DioWrite16(u16 base_addr, u16 internal_addr, u16 data)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ outw(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
+
+}
+
+
+
+
+inline void TLan_DioWrite32(u16 base_addr, u16 internal_addr, u32 data)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ outl(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
+
+}
+
+
+
+#if 0
+inline void TLan_ClearBit(u8 bit, u16 port)
+{
+ outb_p(inb_p(port) & ~bit, port);
+}
+
+
+
+
+inline int TLan_GetBit(u8 bit, u16 port)
+{
+ return ((int) (inb_p(port) & bit));
+}
+
+
+
+
+inline void TLan_SetBit(u8 bit, u16 port)
+{
+ outb_p(inb_p(port) | bit, port);
+}
+#endif
+
+#define TLan_ClearBit( bit, port ) outb_p(inb_p(port) & ~bit, port)
+#define TLan_GetBit( bit, port ) ((int) (inb_p(port) & bit))
+#define TLan_SetBit( bit, port ) outb_p(inb_p(port) | bit, port)
+
+
+inline u32 xor( u32 a, u32 b )
+{
+ return ( ( a && ! b ) || ( ! a && b ) );
+}
+#define XOR8( a, b, c, d, e, f, g, h ) xor( a, xor( b, xor( c, xor( d, xor( e, xor( f, xor( g, h ) ) ) ) ) ) )
+#define DA( a, bit ) ( ( (u8) a[bit/8] ) & ( (u8) ( 1 << bit%8 ) ) )
+
+inline u32 TLan_HashFunc( u8 *a )
+{
+ u32 hash;
+
+ hash = XOR8( DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24), DA(a,30), DA(a,36), DA(a,42) );
+ hash |= XOR8( DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25), DA(a,31), DA(a,37), DA(a,43) ) << 1;
+ hash |= XOR8( DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26), DA(a,32), DA(a,38), DA(a,44) ) << 2;
+ hash |= XOR8( DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27), DA(a,33), DA(a,39), DA(a,45) ) << 3;
+ hash |= XOR8( DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28), DA(a,34), DA(a,40), DA(a,46) ) << 4;
+ hash |= XOR8( DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29), DA(a,35), DA(a,41), DA(a,47) ) << 5;
+
+ return hash;
+
+}
+
+
+
+
+#endif
diff --git a/linux/src/drivers/net/tulip.c b/linux/src/drivers/net/tulip.c
new file mode 100644
index 0000000..2a20301
--- /dev/null
+++ b/linux/src/drivers/net/tulip.c
@@ -0,0 +1,3685 @@
+/* tulip.c: A DEC 21040 family ethernet driver for Linux. */
+/*
+ Written/copyright 1994-2003 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ This driver is for the Digital "Tulip" Ethernet adapter interface.
+ It should work with most DEC 21*4*-based chips/ethercards, as well as
+ with work-alike chips from Lite-On (PNIC) and Macronix (MXIC) and ASIX.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 914 Bay Ridge Road, Suite 220
+ Annapolis MD 21403
+
+ Support and updates available at
+ http://www.scyld.com/network/tulip.html
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version1[] =
+"tulip.c:v0.97 7/22/2003 Written by Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+" http://www.scyld.com/network/tulip.html\n";
+
+#define SMP_CHECK
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+static int debug = 2; /* Message enable: 0..31 = no..all messages. */
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 25;
+
+#define MAX_UNITS 8
+/* Used to pass the full-duplex flag, etc. */
+static int full_duplex[MAX_UNITS] = {0, };
+static int options[MAX_UNITS] = {0, };
+static int mtu[MAX_UNITS] = {0, }; /* Jumbo MTU for interfaces. */
+
+/* The possible media types that can be set in options[] are: */
+#define MEDIA_MASK 31
+static const char * const medianame[32] = {
+ "10baseT", "10base2", "AUI", "100baseTx",
+ "10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx",
+ "100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII",
+ "10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4",
+ "MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19",
+ "","","","", "","","","", "","","","Transceiver reset",
+};
+
+/* Set if the PCI BIOS detects the chips on a multiport board backwards. */
+#ifdef REVERSE_PROBE_ORDER
+static int reverse_probe = 1;
+#else
+static int reverse_probe = 0;
+#endif
+
+/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
+#ifdef __alpha__ /* Always copy to aligned IP headers. */
+static int rx_copybreak = 1518;
+#else
+static int rx_copybreak = 100;
+#endif
+
+/*
+ Set the bus performance register.
+ Typical: Set 16 longword cache alignment, no burst limit.
+ Cache alignment bits 15:14 Burst length 13:8
+ 0000 No alignment 0x00000000 unlimited 0800 8 longwords
+ 4000 8 longwords 0100 1 longword 1000 16 longwords
+ 8000 16 longwords 0200 2 longwords 2000 32 longwords
+ C000 32 longwords 0400 4 longwords
+ Warning: many older 486 systems are broken and require setting 0x00A04800
+ 8 longword cache alignment, 8 longword burst.
+ ToDo: Non-Intel setting could be better.
+*/
+
+#if defined(__alpha__) || defined(__x86_64) || defined(__ia64)
+static int csr0 = 0x01A00000 | 0xE000;
+#elif defined(__i386__) || defined(__powerpc__) || defined(__sparc__)
+/* Do *not* rely on hardware endian correction for big-endian machines! */
+static int csr0 = 0x01A00000 | 0x8000;
+#else
+#warning Processor architecture undefined!
+static int csr0 = 0x00A00000 | 0x4800;
+#endif
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ Typical is a 64 element hash table based on the Ethernet CRC.
+ This value does not apply to the 512 bit table chips.
+*/
+static int multicast_filter_limit = 32;
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the descriptor ring sizes a power of two for efficiency.
+ The Tx queue length limits transmit packets to a portion of the available
+ ring entries. It should be at least one element less to allow multicast
+ filter setup frames to be queued. It must be at least four for hysteresis.
+ Making the Tx queue too long decreases the effectiveness of channel
+ bonding and packet priority.
+ Large receive rings waste memory and confound network buffer limits.
+ These values have been carefully studied: changing these might mask a
+ problem, it won't fix it.
+*/
+#define TX_RING_SIZE 16
+#define TX_QUEUE_LEN 10
+#define RX_RING_SIZE 32
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+/* Preferred skbuff allocation size. */
+#define PKT_BUF_SZ 1536
+/* This is a mysterious value that can be written to CSR11 in the 21040 (only)
+ to support a pre-NWay full-duplex signaling mechanism using short frames.
+ No one knows what it should be, but if left at its default value some
+ 10base2(!) packets trigger a full-duplex-request interrupt. */
+#define FULL_DUPLEX_MAGIC 0x6969
+
+/* The include file section. We start by doing checks and fix-ups for
+ missing compile flags. */
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(CONFIG_MODVERSIONS) && defined(MODULE) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/unaligned.h>
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+/* Condensed operations for readability. */
+#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(debug, "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(reverse_probe, "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(csr0, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(multicast_filter_limit, "i");
+#ifdef MODULE_PARM_DESC
+MODULE_PARM_DESC(debug, "Tulip driver message level (0-31)");
+MODULE_PARM_DESC(options,
+ "Tulip: force transceiver type or fixed speed+duplex");
+MODULE_PARM_DESC(max_interrupt_work,
+ "Tulip driver maximum events handled per interrupt");
+MODULE_PARM_DESC(full_duplex, "Tulip: non-zero to set forced full duplex.");
+MODULE_PARM_DESC(rx_copybreak,
+ "Tulip breakpoint in bytes for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit,
+ "Tulip breakpoint for switching to Rx-all-multicast");
+MODULE_PARM_DESC(reverse_probe, "Search PCI devices in reverse order to work "
+ "around misordered multiport NICS.");
+MODULE_PARM_DESC(csr0, "Special setting for the CSR0 PCI bus parameter "
+ "register.");
+#endif
+
+/* This driver was originally written to use I/O space access, but now
+ uses memory space by default. Override this this with -DUSE_IO_OPS. */
+#if (LINUX_VERSION_CODE < 0x20100) || ! defined(MODULE)
+#define USE_IO_OPS
+#endif
+#ifndef USE_IO_OPS
+#undef inb
+#undef inw
+#undef inl
+#undef outb
+#undef outw
+#undef outl
+#define inb readb
+#define inw readw
+#define inl readl
+#define outb writeb
+#define outw writew
+#define outl writel
+#endif
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the DECchip "Tulip", Digital's
+single-chip ethernet controllers for PCI. Supported members of the family
+are the 21040, 21041, 21140, 21140A, 21142, and 21143. Similar work-alike
+chips from Lite-On, Macronics, ASIX, Compex and other listed below are also
+supported.
+
+These chips are used on at least 140 unique PCI board designs. The great
+number of chips and board designs supported is the reason for the
+driver size and complexity. Almost of the increasing complexity is in the
+board configuration and media selection code. There is very little
+increasing in the operational critical path length.
+
+II. Board-specific settings
+
+PCI bus devices are configured by the system at boot time, so no jumpers
+need to be set on the board. The system BIOS preferably should assign the
+PCI INTA signal to an otherwise unused system IRQ line.
+
+Some boards have EEPROMs tables with default media entry. The factory default
+is usually "autoselect". This should only be overridden when using
+transceiver connections without link beat e.g. 10base2 or AUI, or (rarely!)
+for forcing full-duplex when used with old link partners that do not do
+autonegotiation.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+The Tulip can use either ring buffers or lists of Tx and Rx descriptors.
+This driver uses statically allocated rings of Rx and Tx descriptors, set at
+compile time by RX/TX_RING_SIZE. This version of the driver allocates skbuffs
+for the Rx ring buffers at open() time and passes the skb->data field to the
+Tulip as receive data buffers. When an incoming frame is less than
+RX_COPYBREAK bytes long, a fresh skbuff is allocated and the frame is
+copied to the new skbuff. When the incoming frame is larger, the skbuff is
+passed directly up the protocol stack and replaced by a newly allocated
+skbuff.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. For small frames the copying cost is negligible (esp. considering
+that we are pre-loading the cache with immediately useful header
+information). For large frames the copying cost is non-trivial, and the
+larger copy might flush the cache of useful data. A subtle aspect of this
+choice is that the Tulip only receives into longword aligned buffers, thus
+the IP header at offset 14 is not longword aligned for further processing.
+Copied frames are put into the new skbuff at an offset of "+2", thus copying
+has the beneficial effect of aligning the IP header and preloading the
+cache.
+
+IIIC. Synchronization
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and other software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it is queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'tp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. (The Tx-done interrupt can not be selectively turned off, so
+we cannot avoid the interrupt overhead by having the Tx routine reap the Tx
+stats.) After reaping the stats, it marks the queue entry as empty by setting
+the 'base' to zero. Iff the 'tp->tx_full' flag is set, it clears both the
+tx_full and tbusy flags.
+
+IV. Notes
+
+Thanks to Duke Kamstra of SMC for long ago providing an EtherPower board.
+Greg LaPolla at Linksys provided PNIC and other Linksys boards.
+Znyx provided a four-port card for testing.
+
+IVb. References
+
+http://scyld.com/expert/NWay.html
+http://www.digital.com (search for current 21*4* datasheets and "21X4 SROM")
+http://www.national.com/pf/DP/DP83840A.html
+http://www.asix.com.tw/pmac.htm
+http://www.admtek.com.tw/
+
+IVc. Errata
+
+The old DEC databooks were light on details.
+The 21040 databook claims that CSR13, CSR14, and CSR15 should each be the last
+register of the set CSR12-15 written. Hmmm, now how is that possible?
+
+The DEC SROM format is very badly designed not precisely defined, leading to
+part of the media selection junkheap below. Some boards do not have EEPROM
+media tables and need to be patched up. Worse, other boards use the DEC
+design kit media table when it is not correct for their design.
+
+We cannot use MII interrupts because there is no defined GPIO pin to attach
+them. The MII transceiver status is polled using an kernel timer.
+
+*/
+
+static void *tulip_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int find_cnt);
+static int tulip_pwr_event(void *dev_instance, int event);
+
+#ifdef USE_IO_OPS
+#define TULIP_IOTYPE PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0
+#define TULIP_SIZE 0x80
+#define TULIP_SIZE1 0x100
+#else
+#define TULIP_IOTYPE PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1
+#define TULIP_SIZE 0x400 /* New PCI v2.1 recommends 4K min mem size. */
+#define TULIP_SIZE1 0x400 /* New PCI v2.1 recommends 4K min mem size. */
+#endif
+
+/* This much match tulip_tbl[]! Note 21142 == 21143. */
+enum tulip_chips {
+ DC21040=0, DC21041=1, DC21140=2, DC21142=3, DC21143=3,
+ LC82C168, MX98713, MX98715, MX98725, AX88141, AX88140, PNIC2, COMET,
+ COMPEX9881, I21145, XIRCOM, CONEXANT,
+ /* These flags may be added to the chip type. */
+ HAS_VLAN=0x100,
+};
+
+static struct pci_id_info pci_id_tbl[] = {
+ { "Digital DC21040 Tulip", { 0x00021011, 0xffffffff },
+ TULIP_IOTYPE, 0x80, DC21040 },
+ { "Digital DC21041 Tulip", { 0x00141011, 0xffffffff },
+ TULIP_IOTYPE, 0x80, DC21041 },
+ { "Digital DS21140A Tulip", { 0x00091011, 0xffffffff, 0,0, 0x20,0xf0 },
+ TULIP_IOTYPE, 0x80, DC21140 },
+ { "Digital DS21140 Tulip", { 0x00091011, 0xffffffff },
+ TULIP_IOTYPE, 0x80, DC21140 },
+ { "Digital DS21143-xD Tulip", { 0x00191011, 0xffffffff, 0,0, 0x40,0xf0 },
+ TULIP_IOTYPE, TULIP_SIZE, DC21142 | HAS_VLAN },
+ { "Digital DS21143-xC Tulip", { 0x00191011, 0xffffffff, 0,0, 0x30,0xf0 },
+ TULIP_IOTYPE, TULIP_SIZE, DC21142 },
+ { "Digital DS21142 Tulip", { 0x00191011, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE, DC21142 },
+ { "Kingston KNE110tx (PNIC)",
+ { 0x000211AD, 0xffffffff, 0xf0022646, 0xffffffff },
+ TULIP_IOTYPE, 256, LC82C168 },
+ { "Linksys LNE100TX (82c168 PNIC)", /* w/SYM */
+ { 0x000211AD, 0xffffffff, 0xffff11ad, 0xffffffff, 17,0xff },
+ TULIP_IOTYPE, 256, LC82C168 },
+ { "Linksys LNE100TX (82c169 PNIC)", /* w/ MII */
+ { 0x000211AD, 0xffffffff, 0xf00311ad, 0xffffffff, 32,0xff },
+ TULIP_IOTYPE, 256, LC82C168 },
+ { "Lite-On 82c168 PNIC", { 0x000211AD, 0xffffffff },
+ TULIP_IOTYPE, 256, LC82C168 },
+ { "Macronix 98713 PMAC", { 0x051210d9, 0xffffffff },
+ TULIP_IOTYPE, 256, MX98713 },
+ { "Macronix 98715 PMAC", { 0x053110d9, 0xffffffff },
+ TULIP_IOTYPE, 256, MX98715 },
+ { "Macronix 98725 PMAC", { 0x053110d9, 0xffffffff },
+ TULIP_IOTYPE, 256, MX98725 },
+ { "ASIX AX88141", { 0x1400125B, 0xffffffff, 0,0, 0x10, 0xf0 },
+ TULIP_IOTYPE, 128, AX88141 },
+ { "ASIX AX88140", { 0x1400125B, 0xffffffff },
+ TULIP_IOTYPE, 128, AX88140 },
+ { "Lite-On LC82C115 PNIC-II", { 0xc11511AD, 0xffffffff },
+ TULIP_IOTYPE, 256, PNIC2 },
+ { "ADMtek AN981 Comet", { 0x09811317, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "ADMtek Centaur-P", { 0x09851317, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "ADMtek Centaur-C", { 0x19851317, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "D-Link DFE-680TXD v1.0 (ADMtek Centaur-C)", { 0x15411186, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "ADMtek Centaur-C (Linksys v2)", { 0xab0213d1, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "ADMtek Centaur-C (Linksys)", { 0xab0313d1, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "ADMtek Centaur-C (Linksys)", { 0xab0813d1, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "ADMtek Centaur-C (Linksys PCM200 v3)", { 0xab081737, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "ADMtek Centaur-C (Linksys PCM200 v3)", { 0xab091737, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "STMicro STE10/100 Comet", { 0x0981104a, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "STMicro STE10/100A Comet", { 0x2774104a, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "ADMtek Comet-II", { 0x95111317, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "ADMtek Comet-II (9513)", { 0x95131317, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "SMC1255TX (ADMtek Comet)",
+ { 0x12161113, 0xffffffff, 0x125510b8, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "Accton EN1217/EN2242 (ADMtek Comet)", { 0x12161113, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "SMC1255TX (ADMtek Comet-II)", { 0x125510b8, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "ADMtek Comet-II (model 1020)", { 0x1020111a, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "Allied Telesyn A120 (ADMtek Comet)", { 0xa1201259, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { "Compex RL100-TX", { 0x988111F6, 0xffffffff },
+ TULIP_IOTYPE, 128, COMPEX9881 },
+ { "Intel 21145 Tulip", { 0x00398086, 0xffffffff },
+ TULIP_IOTYPE, 128, I21145 },
+ { "Xircom Tulip clone", { 0x0003115d, 0xffffffff },
+ TULIP_IOTYPE, 128, XIRCOM },
+ { "Davicom DM9102", { 0x91021282, 0xffffffff },
+ TULIP_IOTYPE, 0x80, DC21140 },
+ { "Davicom DM9100", { 0x91001282, 0xffffffff },
+ TULIP_IOTYPE, 0x80, DC21140 },
+ { "Macronix mxic-98715 (EN1217)", { 0x12171113, 0xffffffff },
+ TULIP_IOTYPE, 256, MX98715 },
+ { "Conexant LANfinity", { 0x180314f1, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, CONEXANT },
+ { "3Com 3cSOHO100B-TX (ADMtek Centaur)", { 0x930010b7, 0xffffffff },
+ TULIP_IOTYPE, TULIP_SIZE1, COMET },
+ { 0},
+};
+
+struct drv_id_info tulip_drv_id = {
+ "tulip", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
+ tulip_probe1, tulip_pwr_event };
+
+/* This table is used during operation for capabilities and media timer. */
+
+static void tulip_timer(unsigned long data);
+static void nway_timer(unsigned long data);
+static void mxic_timer(unsigned long data);
+static void pnic_timer(unsigned long data);
+static void comet_timer(unsigned long data);
+
+enum tbl_flag {
+ HAS_MII=1, HAS_MEDIA_TABLE=2, CSR12_IN_SROM=4, ALWAYS_CHECK_MII=8,
+ HAS_PWRDWN=0x10, MC_HASH_ONLY=0x20, /* Hash-only multicast filter. */
+ HAS_PNICNWAY=0x80, HAS_NWAY=0x40, /* Uses internal NWay xcvr. */
+ HAS_INTR_MITIGATION=0x100, IS_ASIX=0x200, HAS_8023X=0x400,
+ COMET_MAC_ADDR=0x0800,
+};
+
+/* Note: this table must match enum tulip_chips above. */
+static struct tulip_chip_table {
+ char *chip_name;
+ int io_size; /* Unused */
+ int valid_intrs; /* CSR7 interrupt enable settings */
+ int flags;
+ void (*media_timer)(unsigned long data);
+} tulip_tbl[] = {
+ { "Digital DC21040 Tulip", 128, 0x0001ebef, 0, tulip_timer },
+ { "Digital DC21041 Tulip", 128, 0x0001ebff,
+ HAS_MEDIA_TABLE | HAS_NWAY, tulip_timer },
+ { "Digital DS21140 Tulip", 128, 0x0001ebef,
+ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, tulip_timer },
+ { "Digital DS21143 Tulip", 128, 0x0801fbff,
+ HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_PWRDWN | HAS_NWAY
+ | HAS_INTR_MITIGATION, nway_timer },
+ { "Lite-On 82c168 PNIC", 256, 0x0001ebef,
+ HAS_MII | HAS_PNICNWAY, pnic_timer },
+ { "Macronix 98713 PMAC", 128, 0x0001ebef,
+ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer },
+ { "Macronix 98715 PMAC", 256, 0x0001ebef,
+ HAS_MEDIA_TABLE, mxic_timer },
+ { "Macronix 98725 PMAC", 256, 0x0001ebef,
+ HAS_MEDIA_TABLE, mxic_timer },
+ { "ASIX AX88140", 128, 0x0001fbff,
+ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY | IS_ASIX, tulip_timer },
+ { "ASIX AX88141", 128, 0x0001fbff,
+ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY | IS_ASIX, tulip_timer },
+ { "Lite-On PNIC-II", 256, 0x0801fbff,
+ HAS_MII | HAS_NWAY | HAS_8023X, nway_timer },
+ { "ADMtek Comet", 256, 0x0001abef,
+ HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer },
+ { "Compex 9881 PMAC", 128, 0x0001ebef,
+ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer },
+ { "Intel DS21145 Tulip", 128, 0x0801fbff,
+ HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_PWRDWN | HAS_NWAY,
+ nway_timer },
+ { "Xircom tulip work-alike", 128, 0x0801fbff,
+ HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_PWRDWN | HAS_NWAY,
+ nway_timer },
+ { "Conexant LANfinity", 256, 0x0001ebef,
+ HAS_MII | HAS_PWRDWN, tulip_timer },
+ {0},
+};
+
+/* A full-duplex map for media types. */
+enum MediaIs {
+ MediaIsFD = 1, MediaAlwaysFD=2, MediaIsMII=4, MediaIsFx=8,
+ MediaIs100=16};
+static const char media_cap[32] =
+{0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20, 28,31,0,0, };
+static u8 t21040_csr13[] = {2,0x0C,8,4, 4,0,0,0, 0,0,0,0, 4,0,0,0};
+
+/* 21041 transceiver register settings: 10-T, 10-2, AUI, 10-T, 10T-FD*/
+static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
+static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, };
+static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
+
+static u16 t21142_csr13[] = { 0x0001, 0x0009, 0x0009, 0x0000, 0x0001, };
+static u16 t21142_csr14[] = { 0xFFFF, 0x0705, 0x0705, 0x0000, 0x7F3D, };
+static u16 t21142_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
+
+/* Offsets to the Command and Status Registers, "CSRs". All accesses
+ must be longword instructions and quadword aligned. */
+enum tulip_offsets {
+ CSR0=0, CSR1=0x08, CSR2=0x10, CSR3=0x18, CSR4=0x20, CSR5=0x28,
+ CSR6=0x30, CSR7=0x38, CSR8=0x40, CSR9=0x48, CSR10=0x50, CSR11=0x58,
+ CSR12=0x60, CSR13=0x68, CSR14=0x70, CSR15=0x78 };
+
+/* The bits in the CSR5 status registers, mostly interrupt sources. */
+enum status_bits {
+ TimerInt=0x800, TPLnkFail=0x1000, TPLnkPass=0x10,
+ NormalIntr=0x10000, AbnormalIntr=0x8000, PCIBusError=0x2000,
+ RxJabber=0x200, RxStopped=0x100, RxNoBuf=0x80, RxIntr=0x40,
+ TxFIFOUnderflow=0x20, TxJabber=0x08, TxNoBuf=0x04, TxDied=0x02, TxIntr=0x01,
+};
+
+/* The configuration bits in CSR6. */
+enum csr6_mode_bits {
+ TxOn=0x2000, RxOn=0x0002, FullDuplex=0x0200,
+ AcceptBroadcast=0x0100, AcceptAllMulticast=0x0080,
+ AcceptAllPhys=0x0040, AcceptRunt=0x0008,
+};
+
+
+/* The Tulip Rx and Tx buffer descriptors. */
+struct tulip_rx_desc {
+ s32 status;
+ s32 length;
+ u32 buffer1, buffer2;
+};
+
+struct tulip_tx_desc {
+ s32 status;
+ s32 length;
+ u32 buffer1, buffer2; /* We use only buffer 1. */
+};
+
+enum desc_status_bits {
+ DescOwned=0x80000000, RxDescFatalErr=0x8000, RxWholePkt=0x0300,
+};
+
+/* Ring-wrap flag in length field, use for last ring entry.
+ 0x01000000 means chain on buffer2 address,
+ 0x02000000 means use the ring start address in CSR2/3.
+ Note: Some work-alike chips do not function correctly in chained mode.
+ The ASIX chip works only in chained mode.
+ Thus we indicates ring mode, but always write the 'next' field for
+ chained mode as well.
+*/
+#define DESC_RING_WRAP 0x02000000
+
+#define EEPROM_SIZE 512 /* support 256*16 EEPROMs */
+
+struct medialeaf {
+ u8 type;
+ u8 media;
+ unsigned char *leafdata;
+};
+
+struct mediatable {
+ u16 defaultmedia;
+ u8 leafcount, csr12dir; /* General purpose pin directions. */
+ unsigned has_mii:1, has_nonmii:1, has_reset:6;
+ u32 csr15dir, csr15val; /* 21143 NWay setting. */
+ struct medialeaf mleaf[0];
+};
+
+struct mediainfo {
+ struct mediainfo *next;
+ int info_type;
+ int index;
+ unsigned char *info;
+};
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+struct tulip_private {
+ struct tulip_rx_desc rx_ring[RX_RING_SIZE];
+ struct tulip_tx_desc tx_ring[TX_RING_SIZE];
+ /* The saved addresses of Rx/Tx-in-place packet buffers. */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ struct net_device *next_module;
+ void *priv_addr; /* Unaligned address of dev->priv for kfree */
+ /* Multicast filter control. */
+ u16 setup_frame[96]; /* Pseudo-Tx frame to init address table. */
+ u32 mc_filter[2]; /* Multicast hash filter */
+ int multicast_filter_limit;
+ struct pci_dev *pci_dev;
+ int chip_id, revision;
+ int flags;
+ int max_interrupt_work;
+ int msg_level;
+ unsigned int csr0, csr6; /* Current CSR0, CSR6 settings. */
+ /* Note: cache line pairing and isolation of Rx vs. Tx indicies. */
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ int rx_copybreak;
+ unsigned int rx_dead:1; /* We have no Rx buffers. */
+
+ struct net_device_stats stats;
+ unsigned int cur_tx, dirty_tx;
+ unsigned int tx_full:1; /* The Tx queue is full. */
+
+ /* Media selection state. */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int full_duplex_lock:1;
+ unsigned int fake_addr:1; /* Multiport board faked address. */
+ unsigned int media2:4; /* Secondary monitored media port. */
+ unsigned int medialock:1; /* Do not sense media type. */
+ unsigned int mediasense:1; /* Media sensing in progress. */
+ unsigned int nway:1, nwayset:1; /* 21143 internal NWay. */
+ unsigned int default_port; /* Last dev->if_port value. */
+ unsigned char eeprom[EEPROM_SIZE]; /* Serial EEPROM contents. */
+ struct timer_list timer; /* Media selection timer. */
+ void (*link_change)(struct net_device *dev, int csr5);
+ u16 lpar; /* 21143 Link partner ability. */
+ u16 sym_advertise, mii_advertise; /* NWay to-advertise. */
+ u16 advertising[4]; /* MII advertise, from SROM table. */
+ signed char phys[4], mii_cnt; /* MII device addresses. */
+ spinlock_t mii_lock;
+ struct mediatable *mtable;
+ int cur_index; /* Current media index. */
+ int saved_if_port;
+};
+
+static void start_link(struct net_device *dev);
+static void parse_eeprom(struct net_device *dev);
+static int read_eeprom(long ioaddr, int location, int addr_len);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+static int tulip_open(struct net_device *dev);
+/* Chip-specific media selection (timer functions prototyped above). */
+static int check_duplex(struct net_device *dev);
+static void select_media(struct net_device *dev, int startup);
+static void init_media(struct net_device *dev);
+static void nway_lnk_change(struct net_device *dev, int csr5);
+static void nway_start(struct net_device *dev);
+static void pnic_lnk_change(struct net_device *dev, int csr5);
+static void pnic_do_nway(struct net_device *dev);
+
+static void tulip_tx_timeout(struct net_device *dev);
+static void tulip_init_ring(struct net_device *dev);
+static int tulip_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static int tulip_rx(struct net_device *dev);
+static void tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static int tulip_close(struct net_device *dev);
+static struct net_device_stats *tulip_get_stats(struct net_device *dev);
+#ifdef HAVE_PRIVATE_IOCTL
+static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+#endif
+static void set_rx_mode(struct net_device *dev);
+
+
+
+/* A list of all installed Tulip devices. */
+static struct net_device *root_tulip_dev = NULL;
+
+static void *tulip_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int pci_tbl_idx, int find_cnt)
+{
+ struct net_device *dev;
+ struct tulip_private *tp;
+ void *priv_mem;
+ /* See note below on the multiport cards. */
+ static unsigned char last_phys_addr[6] = {0x02, 'L', 'i', 'n', 'u', 'x'};
+ static int last_irq = 0;
+ static int multiport_cnt = 0; /* For four-port boards w/one EEPROM */
+ u8 chip_rev;
+ int i, chip_idx = pci_id_tbl[pci_tbl_idx].drv_flags & 0xff;
+ unsigned short sum;
+ u8 ee_data[EEPROM_SIZE];
+
+ /* Bring the 21041/21143 out of sleep mode.
+ Caution: Snooze mode does not work with some boards! */
+ if (tulip_tbl[chip_idx].flags & HAS_PWRDWN)
+ pci_write_config_dword(pdev, 0x40, 0x00000000);
+
+ if (inl(ioaddr + CSR5) == 0xffffffff) {
+ printk(KERN_ERR "The Tulip chip at %#lx is not functioning.\n", ioaddr);
+ return 0;
+ }
+
+ dev = init_etherdev(init_dev, 0);
+ if (!dev)
+ return NULL;
+
+ /* Make certain the data structures are quadword aligned. */
+ priv_mem = kmalloc(sizeof(*tp) + PRIV_ALIGN, GFP_KERNEL);
+ /* Check for the very unlikely case of no memory. */
+ if (priv_mem == NULL)
+ return NULL;
+ dev->priv = tp = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+ memset(tp, 0, sizeof(*tp));
+ tp->mii_lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
+ tp->priv_addr = priv_mem;
+
+ tp->next_module = root_tulip_dev;
+ root_tulip_dev = dev;
+
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &chip_rev);
+
+ printk(KERN_INFO "%s: %s rev %d at %#3lx,",
+ dev->name, pci_id_tbl[pci_tbl_idx].name, chip_rev, ioaddr);
+
+ /* Stop the Tx and Rx processes. */
+ outl(inl(ioaddr + CSR6) & ~TxOn & ~RxOn, ioaddr + CSR6);
+ /* Clear the missed-packet counter. */
+ inl(ioaddr + CSR8);
+
+ if (chip_idx == DC21041 && inl(ioaddr + CSR9) & 0x8000) {
+ printk(" 21040 compatible mode,");
+ chip_idx = DC21040;
+ }
+
+ /* The SROM/EEPROM interface varies dramatically. */
+ sum = 0;
+ if (chip_idx == DC21040) {
+ outl(0, ioaddr + CSR9); /* Reset the pointer with a dummy write. */
+ for (i = 0; i < 6; i++) {
+ int value, boguscnt = 100000;
+ do
+ value = inl(ioaddr + CSR9);
+ while (value < 0 && --boguscnt > 0);
+ dev->dev_addr[i] = value;
+ sum += value & 0xff;
+ }
+ } else if (chip_idx == LC82C168) {
+ for (i = 0; i < 3; i++) {
+ int value, boguscnt = 100000;
+ outl(0x600 | i, ioaddr + 0x98);
+ do
+ value = inl(ioaddr + CSR9);
+ while (value < 0 && --boguscnt > 0);
+ put_unaligned(le16_to_cpu(value), ((u16*)dev->dev_addr) + i);
+ sum += value & 0xffff;
+ }
+ } else if (chip_idx == COMET) {
+ /* No need to read the EEPROM. */
+ put_unaligned(le32_to_cpu(inl(ioaddr + 0xA4)), (u32 *)dev->dev_addr);
+ put_unaligned(le16_to_cpu(inl(ioaddr + 0xA8)),
+ (u16 *)(dev->dev_addr + 4));
+ for (i = 0; i < 6; i ++)
+ sum += dev->dev_addr[i];
+ } else {
+ /* A serial EEPROM interface, we read now and sort it out later. */
+ int sa_offset = 0;
+ int ee_addr_size = read_eeprom(ioaddr, 0xff, 8) & 0x40000 ? 8 : 6;
+ int eeprom_word_cnt = 1 << ee_addr_size;
+
+ for (i = 0; i < eeprom_word_cnt; i++)
+ ((u16 *)ee_data)[i] =
+ le16_to_cpu(read_eeprom(ioaddr, i, ee_addr_size));
+
+ /* DEC now has a specification (see Notes) but early board makers
+ just put the address in the first EEPROM locations. */
+ /* This does memcmp(eedata, eedata+16, 8) */
+ for (i = 0; i < 8; i ++)
+ if (ee_data[i] != ee_data[16+i])
+ sa_offset = 20;
+ if (chip_idx == CONEXANT) {
+ /* Check that the tuple type and length is correct. */
+ if (ee_data[0x198] == 0x04 && ee_data[0x199] == 6)
+ sa_offset = 0x19A;
+ } else if (ee_data[0] == 0xff && ee_data[1] == 0xff &&
+ ee_data[2] == 0) {
+ sa_offset = 2; /* Grrr, damn Matrox boards. */
+ multiport_cnt = 4;
+ }
+ for (i = 0; i < 6; i ++) {
+ dev->dev_addr[i] = ee_data[i + sa_offset];
+ sum += ee_data[i + sa_offset];
+ }
+ }
+ /* Lite-On boards have the address byte-swapped. */
+ if ((dev->dev_addr[0] == 0xA0 || dev->dev_addr[0] == 0xC0)
+ && dev->dev_addr[1] == 0x00)
+ for (i = 0; i < 6; i+=2) {
+ char tmp = dev->dev_addr[i];
+ dev->dev_addr[i] = dev->dev_addr[i+1];
+ dev->dev_addr[i+1] = tmp;
+ }
+ /* On the Zynx 315 Etherarray and other multiport boards only the
+ first Tulip has an EEPROM.
+ The addresses of the subsequent ports are derived from the first.
+ Many PCI BIOSes also incorrectly report the IRQ line, so we correct
+ that here as well. */
+ if (sum == 0 || sum == 6*0xff) {
+ printk(" EEPROM not present,");
+ for (i = 0; i < 5; i++)
+ dev->dev_addr[i] = last_phys_addr[i];
+ dev->dev_addr[i] = last_phys_addr[i] + 1;
+#if defined(__i386__) /* Patch up x86 BIOS bug. */
+ if (last_irq)
+ irq = last_irq;
+#endif
+ }
+
+ for (i = 0; i < 6; i++)
+ printk("%c%2.2X", i ? ':' : ' ', last_phys_addr[i] = dev->dev_addr[i]);
+ printk(", IRQ %d.\n", irq);
+ last_irq = irq;
+
+#ifdef USE_IO_OPS
+ /* We do a request_region() to register /proc/ioports info. */
+ request_region(ioaddr, pci_id_tbl[chip_idx].io_size, dev->name);
+#endif
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ tp->pci_dev = pdev;
+ tp->msg_level = (1 << debug) - 1;
+ tp->chip_id = chip_idx;
+ tp->revision = chip_rev;
+ tp->flags = tulip_tbl[chip_idx].flags
+ | (pci_id_tbl[pci_tbl_idx].drv_flags & 0xffffff00);
+ tp->rx_copybreak = rx_copybreak;
+ tp->max_interrupt_work = max_interrupt_work;
+ tp->multicast_filter_limit = multicast_filter_limit;
+ tp->csr0 = csr0;
+
+ /* BugFixes: The 21143-TD hangs with PCI Write-and-Invalidate cycles.
+ And the ASIX must have a burst limit or horrible things happen. */
+ if (chip_idx == DC21143 && chip_rev == 65)
+ tp->csr0 &= ~0x01000000;
+ else if (tp->flags & IS_ASIX)
+ tp->csr0 |= 0x2000;
+
+ /* We support a zillion ways to set the media type. */
+#ifdef TULIP_FULL_DUPLEX
+ tp->full_duplex = 1;
+ tp->full_duplex_lock = 1;
+#endif
+#ifdef TULIP_DEFAULT_MEDIA
+ tp->default_port = TULIP_DEFAULT_MEDIA;
+#endif
+#ifdef TULIP_NO_MEDIA_SWITCH
+ tp->medialock = 1;
+#endif
+
+ /* The lower four bits are the media type. */
+ if (find_cnt >= 0 && find_cnt < MAX_UNITS) {
+ if (options[find_cnt] & 0x1f)
+ tp->default_port = options[find_cnt] & 0x1f;
+ if ((options[find_cnt] & 0x200) || full_duplex[find_cnt] > 0)
+ tp->full_duplex = 1;
+ if (mtu[find_cnt] > 0)
+ dev->mtu = mtu[find_cnt];
+ }
+ if (dev->mem_start)
+ tp->default_port = dev->mem_start & 0x1f;
+ if (tp->default_port) {
+ printk(KERN_INFO "%s: Transceiver selection forced to %s.\n",
+ dev->name, medianame[tp->default_port & MEDIA_MASK]);
+ tp->medialock = 1;
+ if (media_cap[tp->default_port] & MediaAlwaysFD)
+ tp->full_duplex = 1;
+ }
+ if (tp->full_duplex)
+ tp->full_duplex_lock = 1;
+
+ if (media_cap[tp->default_port] & MediaIsMII) {
+ u16 media2advert[] = { 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200 };
+ tp->mii_advertise = media2advert[tp->default_port - 9];
+ tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */
+ }
+
+ /* This is logically part of probe1(), but too complex to write inline. */
+ if (tp->flags & HAS_MEDIA_TABLE) {
+ memcpy(tp->eeprom, ee_data, sizeof(tp->eeprom));
+ parse_eeprom(dev);
+ }
+
+ /* The Tulip-specific entries in the device structure. */
+ dev->open = &tulip_open;
+ dev->hard_start_xmit = &tulip_start_xmit;
+ dev->stop = &tulip_close;
+ dev->get_stats = &tulip_get_stats;
+#ifdef HAVE_PRIVATE_IOCTL
+ dev->do_ioctl = &private_ioctl;
+#endif
+#ifdef HAVE_MULTICAST
+ dev->set_multicast_list = &set_rx_mode;
+#endif
+
+ if (tp->flags & HAS_NWAY)
+ tp->link_change = nway_lnk_change;
+ else if (tp->flags & HAS_PNICNWAY)
+ tp->link_change = pnic_lnk_change;
+ start_link(dev);
+ if (chip_idx == COMET) {
+ /* Set the Comet LED configuration. */
+ outl(0xf0000000, ioaddr + CSR9);
+ }
+
+ return dev;
+}
+
+/* Start the link, typically called at probe1() time but sometimes later with
+ multiport cards. */
+static void start_link(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+
+ if ((tp->flags & ALWAYS_CHECK_MII) ||
+ (tp->mtable && tp->mtable->has_mii) ||
+ ( ! tp->mtable && (tp->flags & HAS_MII))) {
+ int phyn, phy_idx = 0;
+ if (tp->mtable && tp->mtable->has_mii) {
+ for (i = 0; i < tp->mtable->leafcount; i++)
+ if (tp->mtable->mleaf[i].media == 11) {
+ tp->cur_index = i;
+ tp->saved_if_port = dev->if_port;
+ select_media(dev, 2);
+ dev->if_port = tp->saved_if_port;
+ break;
+ }
+ }
+ /* Find the connected MII xcvrs.
+ Doing this in open() would allow detecting external xcvrs later,
+ but takes much time. */
+ for (phyn = 1; phyn <= 32 && phy_idx < sizeof(tp->phys); phyn++) {
+ int phy = phyn & 0x1f;
+ int mii_status = mdio_read(dev, phy, 1);
+ if ((mii_status & 0x8301) == 0x8001 ||
+ ((mii_status & 0x8000) == 0 && (mii_status & 0x7800) != 0)) {
+ int mii_reg0 = mdio_read(dev, phy, 0);
+ int mii_advert = mdio_read(dev, phy, 4);
+ int to_advert;
+
+ if (tp->mii_advertise)
+ to_advert = tp->mii_advertise;
+ else if (tp->advertising[phy_idx])
+ to_advert = tp->advertising[phy_idx];
+ else /* Leave unchanged. */
+ tp->mii_advertise = to_advert = mii_advert;
+
+ tp->phys[phy_idx++] = phy;
+ printk(KERN_INFO "%s: MII transceiver #%d "
+ "config %4.4x status %4.4x advertising %4.4x.\n",
+ dev->name, phy, mii_reg0, mii_status, mii_advert);
+ /* Fixup for DLink with miswired PHY. */
+ if (mii_advert != to_advert) {
+ printk(KERN_DEBUG "%s: Advertising %4.4x on PHY %d,"
+ " previously advertising %4.4x.\n",
+ dev->name, to_advert, phy, mii_advert);
+ mdio_write(dev, phy, 4, to_advert);
+ }
+ /* Enable autonegotiation: some boards default to off. */
+ mdio_write(dev, phy, 0, (mii_reg0 & ~0x3000) |
+ (tp->full_duplex ? 0x0100 : 0x0000) |
+ ((media_cap[tp->default_port] & MediaIs100) ?
+ 0x2000 : 0x1000));
+ }
+ }
+ tp->mii_cnt = phy_idx;
+ if (tp->mtable && tp->mtable->has_mii && phy_idx == 0) {
+ printk(KERN_INFO "%s: ***WARNING***: No MII transceiver found!\n",
+ dev->name);
+ tp->phys[0] = 1;
+ }
+ }
+
+ /* Reset the xcvr interface and turn on heartbeat. */
+ switch (tp->chip_id) {
+ case DC21040:
+ outl(0x00000000, ioaddr + CSR13);
+ outl(0x00000004, ioaddr + CSR13);
+ break;
+ case DC21041:
+ /* This is nway_start(). */
+ if (tp->sym_advertise == 0)
+ tp->sym_advertise = 0x0061;
+ outl(0x00000000, ioaddr + CSR13);
+ outl(0xFFFFFFFF, ioaddr + CSR14);
+ outl(0x00000008, ioaddr + CSR15); /* Listen on AUI also. */
+ outl(inl(ioaddr + CSR6) | FullDuplex, ioaddr + CSR6);
+ outl(0x0000EF01, ioaddr + CSR13);
+ break;
+ case DC21140: default:
+ if (tp->mtable)
+ outl(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
+ break;
+ case DC21142:
+ case PNIC2:
+ if (tp->mii_cnt || media_cap[dev->if_port] & MediaIsMII) {
+ outl(0x82020000, ioaddr + CSR6);
+ outl(0x0000, ioaddr + CSR13);
+ outl(0x0000, ioaddr + CSR14);
+ outl(0x820E0000, ioaddr + CSR6);
+ } else
+ nway_start(dev);
+ break;
+ case LC82C168:
+ if ( ! tp->mii_cnt) {
+ tp->nway = 1;
+ tp->nwayset = 0;
+ outl(0x00420000, ioaddr + CSR6);
+ outl(0x30, ioaddr + CSR12);
+ outl(0x0001F078, ioaddr + 0xB8);
+ outl(0x0201F078, ioaddr + 0xB8); /* Turn on autonegotiation. */
+ }
+ break;
+ case COMPEX9881:
+ outl(0x00000000, ioaddr + CSR6);
+ outl(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */
+ outl(0x00000001, ioaddr + CSR13);
+ break;
+ case MX98713: case MX98715: case MX98725:
+ outl(0x01a80000, ioaddr + CSR6);
+ outl(0xFFFFFFFF, ioaddr + CSR14);
+ outl(0x00001000, ioaddr + CSR12);
+ break;
+ case COMET:
+ break;
+ }
+
+ if (tp->flags & HAS_PWRDWN)
+ pci_write_config_dword(tp->pci_dev, 0x40, 0x40000000);
+}
+
+
+/* Serial EEPROM section. */
+/* The main routine to parse the very complicated SROM structure.
+ Search www.digital.com for "21X4 SROM" to get details.
+ This code is very complex, and will require changes to support
+ additional cards, so I will be verbose about what is going on.
+ */
+
+/* Known cards that have old-style EEPROMs.
+ Writing this table is described at
+ http://www.scyld.com/network/tulip-media.html
+*/
+static struct fixups {
+ char *name;
+ unsigned char addr0, addr1, addr2;
+ u16 newtable[32]; /* Max length below. */
+} eeprom_fixups[] = {
+ {"Asante", 0, 0, 0x94, {0x1e00, 0x0000, 0x0800, 0x0100, 0x018c,
+ 0x0000, 0x0000, 0xe078, 0x0001, 0x0050, 0x0018 }},
+ {"SMC9332DST", 0, 0, 0xC0, { 0x1e00, 0x0000, 0x0800, 0x041f,
+ 0x0000, 0x009E, /* 10baseT */
+ 0x0004, 0x009E, /* 10baseT-FD */
+ 0x0903, 0x006D, /* 100baseTx */
+ 0x0905, 0x006D, /* 100baseTx-FD */ }},
+ {"Cogent EM100", 0, 0, 0x92, { 0x1e00, 0x0000, 0x0800, 0x063f,
+ 0x0107, 0x8021, /* 100baseFx */
+ 0x0108, 0x8021, /* 100baseFx-FD */
+ 0x0100, 0x009E, /* 10baseT */
+ 0x0104, 0x009E, /* 10baseT-FD */
+ 0x0103, 0x006D, /* 100baseTx */
+ 0x0105, 0x006D, /* 100baseTx-FD */ }},
+ {"Maxtech NX-110", 0, 0, 0xE8, { 0x1e00, 0x0000, 0x0800, 0x0513,
+ 0x1001, 0x009E, /* 10base2, CSR12 0x10*/
+ 0x0000, 0x009E, /* 10baseT */
+ 0x0004, 0x009E, /* 10baseT-FD */
+ 0x0303, 0x006D, /* 100baseTx, CSR12 0x03 */
+ 0x0305, 0x006D, /* 100baseTx-FD CSR12 0x03 */}},
+ {"Accton EN1207", 0, 0, 0xE8, { 0x1e00, 0x0000, 0x0800, 0x051F,
+ 0x1B01, 0x0000, /* 10base2, CSR12 0x1B */
+ 0x0B00, 0x009E, /* 10baseT, CSR12 0x0B */
+ 0x0B04, 0x009E, /* 10baseT-FD,CSR12 0x0B */
+ 0x1B03, 0x006D, /* 100baseTx, CSR12 0x1B */
+ 0x1B05, 0x006D, /* 100baseTx-FD CSR12 0x1B */
+ }},
+ {0, 0, 0, 0, {}}};
+
+static const char * block_name[] = {"21140 non-MII", "21140 MII PHY",
+ "21142 Serial PHY", "21142 MII PHY", "21143 SYM PHY", "21143 reset method"};
+
+#if defined(__i386__) /* AKA get_unaligned() */
+#define get_u16(ptr) (*(u16 *)(ptr))
+#else
+#define get_u16(ptr) (((u8*)(ptr))[0] + (((u8*)(ptr))[1]<<8))
+#endif
+
+static void parse_eeprom(struct net_device *dev)
+{
+ /* The last media info list parsed, for multiport boards. */
+ static struct mediatable *last_mediatable = NULL;
+ static unsigned char *last_ee_data = NULL;
+ static int controller_index = 0;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ unsigned char *p, *ee_data = tp->eeprom;
+ int new_advertise = 0;
+ int i;
+
+ tp->mtable = 0;
+ /* Detect an old-style (SA only) EEPROM layout:
+ memcmp(eedata, eedata+16, 8). */
+ for (i = 0; i < 8; i ++)
+ if (ee_data[i] != ee_data[16+i])
+ break;
+ if (i >= 8) {
+ if (ee_data[0] == 0xff) {
+ if (last_mediatable) {
+ controller_index++;
+ printk(KERN_INFO "%s: Controller %d of multiport board.\n",
+ dev->name, controller_index);
+ tp->mtable = last_mediatable;
+ ee_data = last_ee_data;
+ goto subsequent_board;
+ } else
+ printk(KERN_INFO "%s: Missing EEPROM, this interface may "
+ "not work correctly!\n",
+ dev->name);
+ return;
+ }
+ /* Do a fix-up based on the vendor half of the station address. */
+ for (i = 0; eeprom_fixups[i].name; i++) {
+ if (dev->dev_addr[0] == eeprom_fixups[i].addr0
+ && dev->dev_addr[1] == eeprom_fixups[i].addr1
+ && dev->dev_addr[2] == eeprom_fixups[i].addr2) {
+ if (dev->dev_addr[2] == 0xE8 && ee_data[0x1a] == 0x55)
+ i++; /* An Accton EN1207, not an outlaw Maxtech. */
+ memcpy(ee_data + 26, eeprom_fixups[i].newtable,
+ sizeof(eeprom_fixups[i].newtable));
+ printk(KERN_INFO "%s: Old format EEPROM on '%s' board.\n"
+ KERN_INFO "%s: Using substitute media control info.\n",
+ dev->name, eeprom_fixups[i].name, dev->name);
+ break;
+ }
+ }
+ if (eeprom_fixups[i].name == NULL) { /* No fixup found. */
+ printk(KERN_INFO "%s: Old style EEPROM with no media selection "
+ "information.\n",
+ dev->name);
+ return;
+ }
+ }
+
+ controller_index = 0;
+ if (ee_data[19] > 1) {
+ struct net_device *prev_dev;
+ struct tulip_private *otp;
+ /* This is a multiport board. The probe order may be "backwards", so
+ we patch up already found devices. */
+ last_ee_data = ee_data;
+ for (prev_dev = tp->next_module; prev_dev; prev_dev = otp->next_module) {
+ otp = (struct tulip_private *)prev_dev->priv;
+ if (otp->eeprom[0] == 0xff && otp->mtable == 0) {
+ parse_eeprom(prev_dev);
+ start_link(prev_dev);
+ } else
+ break;
+ }
+ controller_index = 0;
+ }
+subsequent_board:
+
+ p = (void *)ee_data + ee_data[27 + controller_index*3];
+ if (ee_data[27] == 0) { /* No valid media table. */
+ } else if (tp->chip_id == DC21041) {
+ int media = get_u16(p);
+ int count = p[2];
+ p += 3;
+
+ printk(KERN_INFO "%s: 21041 Media table, default media %4.4x (%s).\n",
+ dev->name, media,
+ media & 0x0800 ? "Autosense" : medianame[media & MEDIA_MASK]);
+ for (i = 0; i < count; i++) {
+ unsigned char media_block = *p++;
+ int media_code = media_block & MEDIA_MASK;
+ if (media_block & 0x40)
+ p += 6;
+ switch(media_code) {
+ case 0: new_advertise |= 0x0020; break;
+ case 4: new_advertise |= 0x0040; break;
+ }
+ printk(KERN_INFO "%s: 21041 media #%d, %s.\n",
+ dev->name, media_code, medianame[media_code]);
+ }
+ } else {
+ unsigned char csr12dir = 0;
+ int count;
+ struct mediatable *mtable;
+ u16 media = get_u16(p);
+
+ p += 2;
+ if (tp->flags & CSR12_IN_SROM)
+ csr12dir = *p++;
+ count = *p++;
+ mtable = (struct mediatable *)
+ kmalloc(sizeof(struct mediatable) + count*sizeof(struct medialeaf),
+ GFP_KERNEL);
+ if (mtable == NULL)
+ return; /* Horrible, impossible failure. */
+ last_mediatable = tp->mtable = mtable;
+ mtable->defaultmedia = media;
+ mtable->leafcount = count;
+ mtable->csr12dir = csr12dir;
+ mtable->has_nonmii = mtable->has_mii = mtable->has_reset = 0;
+ mtable->csr15dir = mtable->csr15val = 0;
+
+ printk(KERN_INFO "%s: EEPROM default media type %s.\n", dev->name,
+ media & 0x0800 ? "Autosense" : medianame[media & MEDIA_MASK]);
+ for (i = 0; i < count; i++) {
+ struct medialeaf *leaf = &mtable->mleaf[i];
+
+ if ((p[0] & 0x80) == 0) { /* 21140 Compact block. */
+ leaf->type = 0;
+ leaf->media = p[0] & 0x3f;
+ leaf->leafdata = p;
+ if ((p[2] & 0x61) == 0x01) /* Bogus, but Znyx boards do it. */
+ mtable->has_mii = 1;
+ p += 4;
+ } else {
+ switch(leaf->type = p[1]) {
+ case 5:
+ mtable->has_reset = i + 1; /* Assure non-zero */
+ /* Fall through */
+ case 6:
+ leaf->media = 31;
+ break;
+ case 1: case 3:
+ mtable->has_mii = 1;
+ leaf->media = 11;
+ break;
+ case 2:
+ if ((p[2] & 0x3f) == 0) {
+ u32 base15 = (p[2] & 0x40) ? get_u16(p + 7) : 0x0008;
+ u16 *p1 = (u16 *)(p + (p[2] & 0x40 ? 9 : 3));
+ mtable->csr15dir = (get_unaligned(p1 + 0)<<16) + base15;
+ mtable->csr15val = (get_unaligned(p1 + 1)<<16) + base15;
+ }
+ /* Fall through. */
+ case 0: case 4:
+ mtable->has_nonmii = 1;
+ leaf->media = p[2] & MEDIA_MASK;
+ switch (leaf->media) {
+ case 0: new_advertise |= 0x0020; break;
+ case 4: new_advertise |= 0x0040; break;
+ case 3: new_advertise |= 0x0080; break;
+ case 5: new_advertise |= 0x0100; break;
+ case 6: new_advertise |= 0x0200; break;
+ }
+ break;
+ default:
+ leaf->media = 19;
+ }
+ leaf->leafdata = p + 2;
+ p += (p[0] & 0x3f) + 1;
+ }
+ if ((tp->msg_level & NETIF_MSG_LINK) &&
+ leaf->media == 11) {
+ unsigned char *bp = leaf->leafdata;
+ printk(KERN_INFO "%s: MII interface PHY %d, setup/reset "
+ "sequences %d/%d long, capabilities %2.2x %2.2x.\n",
+ dev->name, bp[0], bp[1], bp[2 + bp[1]*2],
+ bp[5 + bp[2 + bp[1]*2]*2], bp[4 + bp[2 + bp[1]*2]*2]);
+ }
+ if (tp->msg_level & NETIF_MSG_PROBE)
+ printk(KERN_INFO "%s: Index #%d - Media %s (#%d) described "
+ "by a %s (%d) block.\n",
+ dev->name, i, medianame[leaf->media], leaf->media,
+ leaf->type < 6 ? block_name[leaf->type] : "UNKNOWN",
+ leaf->type);
+ }
+ if (new_advertise)
+ tp->sym_advertise = new_advertise;
+ }
+}
+/* Reading a serial EEPROM is a "bit" grungy, but we work our way through:->.*/
+
+/* EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK 0x02 /* EEPROM shift clock. */
+#define EE_CS 0x01 /* EEPROM chip select. */
+#define EE_DATA_WRITE 0x04 /* Data from the Tulip to EEPROM. */
+#define EE_WRITE_0 0x01
+#define EE_WRITE_1 0x05
+#define EE_DATA_READ 0x08 /* Data from the EEPROM chip. */
+#define EE_ENB (0x4800 | EE_CS)
+
+/* Delay between EEPROM clock transitions.
+ Even at 33Mhz current PCI implementations do not overrun the EEPROM clock.
+ We add a bus turn-around to insure that this remains true. */
+#define eeprom_delay() inl(ee_addr)
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_READ_CMD (6)
+
+/* Note: this routine returns extra data bits for size detection. */
+static int read_eeprom(long ioaddr, int location, int addr_len)
+{
+ int i;
+ unsigned retval = 0;
+ long ee_addr = ioaddr + CSR9;
+ int read_cmd = location | (EE_READ_CMD << addr_len);
+
+ outl(EE_ENB & ~EE_CS, ee_addr);
+ outl(EE_ENB, ee_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 4 + addr_len; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
+ outl(EE_ENB | dataval, ee_addr);
+ eeprom_delay();
+ outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay();
+ retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ }
+ outl(EE_ENB, ee_addr);
+ eeprom_delay();
+
+ for (i = 16; i > 0; i--) {
+ outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay();
+ retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ outl(EE_ENB, ee_addr);
+ eeprom_delay();
+ }
+
+ /* Terminate the EEPROM access. */
+ outl(EE_ENB & ~EE_CS, ee_addr);
+ return retval;
+}
+
+/* MII transceiver control section.
+ Read and write the MII registers using software-generated serial
+ MDIO protocol. See the MII specifications or DP83840A data sheet
+ for details. */
+
+/* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
+ met by back-to-back PCI I/O cycles, but we insert a delay to avoid
+ "overclocking" issues or future 66Mhz PCI. */
+#define mdio_delay() inl(mdio_addr)
+
+/* Read and write the MII registers using software-generated serial
+ MDIO protocol. It is just different enough from the EEPROM protocol
+ to not share code. The maxium data clock rate is 2.5 Mhz. */
+#define MDIO_SHIFT_CLK 0x10000
+#define MDIO_DATA_WRITE0 0x00000
+#define MDIO_DATA_WRITE1 0x20000
+#define MDIO_ENB 0x00000 /* Ignore the 0x02000 databook setting. */
+#define MDIO_ENB_IN 0x40000
+#define MDIO_DATA_READ 0x80000
+
+static const unsigned char comet_miireg2offset[32] = {
+ 0xB4, 0xB8, 0xBC, 0xC0, 0xC4, 0xC8, 0xCC, 0, 0,0,0,0, 0,0,0,0,
+ 0,0xD0,0,0, 0,0,0,0, 0,0,0,0, 0, 0xD4, 0xD8, 0xDC, };
+
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ int i;
+ int read_cmd = (0xf6 << 10) | ((phy_id & 0x1f) << 5) | location;
+ int retval = 0;
+ long ioaddr = dev->base_addr;
+ long mdio_addr = ioaddr + CSR9;
+ unsigned long flags;
+
+ if (location & ~0x1f)
+ return 0xffff;
+
+ if (tp->chip_id == COMET && phy_id == 30) {
+ if (comet_miireg2offset[location])
+ return inl(ioaddr + comet_miireg2offset[location]);
+ return 0xffff;
+ }
+
+ spin_lock_irqsave(&tp->mii_lock, flags);
+ if (tp->chip_id == LC82C168) {
+ int i = 1000;
+ outl(0x60020000 + (phy_id<<23) + (location<<18), ioaddr + 0xA0);
+ inl(ioaddr + 0xA0);
+ inl(ioaddr + 0xA0);
+ inl(ioaddr + 0xA0);
+ inl(ioaddr + 0xA0);
+ while (--i > 0)
+ if ( ! ((retval = inl(ioaddr + 0xA0)) & 0x80000000))
+ break;
+ spin_unlock_irqrestore(&tp->mii_lock, flags);
+ return retval & 0xffff;
+ }
+
+ /* Establish sync by sending at least 32 logic ones. */
+ for (i = 32; i >= 0; i--) {
+ outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
+ mdio_delay();
+ outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ /* Shift the read command bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (read_cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
+
+ outl(MDIO_ENB | dataval, mdio_addr);
+ mdio_delay();
+ outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 19; i > 0; i--) {
+ outl(MDIO_ENB_IN, mdio_addr);
+ mdio_delay();
+ retval = (retval << 1) | ((inl(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
+ outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ spin_unlock_irqrestore(&tp->mii_lock, flags);
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int location, int val)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ int i;
+ int cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | (val & 0xffff);
+ long ioaddr = dev->base_addr;
+ long mdio_addr = ioaddr + CSR9;
+ unsigned long flags;
+
+ if (location & ~0x1f)
+ return;
+
+ if (tp->chip_id == COMET && phy_id == 30) {
+ if (comet_miireg2offset[location])
+ outl(val, ioaddr + comet_miireg2offset[location]);
+ return;
+ }
+
+ spin_lock_irqsave(&tp->mii_lock, flags);
+ if (tp->chip_id == LC82C168) {
+ int i = 1000;
+ outl(cmd, ioaddr + 0xA0);
+ do
+ if ( ! (inl(ioaddr + 0xA0) & 0x80000000))
+ break;
+ while (--i > 0);
+ spin_unlock_irqrestore(&tp->mii_lock, flags);
+ return;
+ }
+
+ /* Establish sync by sending 32 logic ones. */
+ for (i = 32; i >= 0; i--) {
+ outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
+ mdio_delay();
+ outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval = (cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
+ outl(MDIO_ENB | dataval, mdio_addr);
+ mdio_delay();
+ outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ /* Clear out extra bits. */
+ for (i = 2; i > 0; i--) {
+ outl(MDIO_ENB_IN, mdio_addr);
+ mdio_delay();
+ outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+ spin_unlock_irqrestore(&tp->mii_lock, flags);
+ return;
+}
+
+
+static int
+tulip_open(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 3*HZ;
+
+ /* Wake the chip from sleep/snooze mode. */
+ if (tp->flags & HAS_PWRDWN)
+ pci_write_config_dword(tp->pci_dev, 0x40, 0);
+
+ /* On some chip revs we must set the MII/SYM port before the reset!? */
+ if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii))
+ outl(0x00040000, ioaddr + CSR6);
+
+ /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
+ outl(0x00000001, ioaddr + CSR0);
+
+ MOD_INC_USE_COUNT;
+
+ /* This would be done after interrupts are initialized, but we do not want
+ to frob the transceiver only to fail later. */
+ if (request_irq(dev->irq, &tulip_interrupt, SA_SHIRQ, dev->name, dev)) {
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+
+ /* Deassert reset.
+ Wait the specified 50 PCI cycles after a reset by initializing
+ Tx and Rx queues and the address filter list. */
+ outl(tp->csr0, ioaddr + CSR0);
+
+ if (tp->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: tulip_open() irq %d.\n", dev->name, dev->irq);
+
+ tulip_init_ring(dev);
+
+ if (tp->chip_id == PNIC2) {
+ u32 addr_high = (dev->dev_addr[1]<<8) + (dev->dev_addr[0]<<0);
+ /* This address setting does not appear to impact chip operation?? */
+ outl((dev->dev_addr[5]<<8) + dev->dev_addr[4] +
+ (dev->dev_addr[3]<<24) + (dev->dev_addr[2]<<16),
+ ioaddr + 0xB0);
+ outl(addr_high + (addr_high<<16), ioaddr + 0xB8);
+ }
+ if (tp->flags & MC_HASH_ONLY) {
+ u32 addr_low = cpu_to_le32(get_unaligned((u32 *)dev->dev_addr));
+ u32 addr_high = cpu_to_le16(get_unaligned((u16 *)(dev->dev_addr+4)));
+ if (tp->flags & IS_ASIX) {
+ outl(0, ioaddr + CSR13);
+ outl(addr_low, ioaddr + CSR14);
+ outl(1, ioaddr + CSR13);
+ outl(addr_high, ioaddr + CSR14);
+ } else if (tp->flags & COMET_MAC_ADDR) {
+ outl(addr_low, ioaddr + 0xA4);
+ outl(addr_high, ioaddr + 0xA8);
+ outl(0, ioaddr + 0xAC);
+ outl(0, ioaddr + 0xB0);
+ }
+ }
+
+ outl(virt_to_bus(tp->rx_ring), ioaddr + CSR3);
+ outl(virt_to_bus(tp->tx_ring), ioaddr + CSR4);
+
+ if ( ! tp->full_duplex_lock)
+ tp->full_duplex = 0;
+ init_media(dev);
+ if (media_cap[dev->if_port] & MediaIsMII)
+ check_duplex(dev);
+ set_rx_mode(dev);
+
+ /* Start the Tx to process setup frame. */
+ outl(tp->csr6, ioaddr + CSR6);
+ outl(tp->csr6 | TxOn, ioaddr + CSR6);
+
+ netif_start_tx_queue(dev);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
+ outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
+ outl(tp->csr6 | TxOn | RxOn, ioaddr + CSR6);
+ outl(0, ioaddr + CSR2); /* Rx poll demand */
+
+ if (tp->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: Done tulip_open(), CSR0 %8.8x, CSR5 %8.8x CSR6 "
+ "%8.8x.\n", dev->name, (int)inl(ioaddr + CSR0),
+ (int)inl(ioaddr + CSR5), (int)inl(ioaddr + CSR6));
+
+ /* Set the timer to switch to check for link beat and perhaps switch
+ to an alternate media type. */
+ init_timer(&tp->timer);
+ tp->timer.expires = jiffies + next_tick;
+ tp->timer.data = (unsigned long)dev;
+ tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
+ add_timer(&tp->timer);
+
+ return 0;
+}
+
+static void init_media(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+
+ tp->saved_if_port = dev->if_port;
+ if (dev->if_port == 0)
+ dev->if_port = tp->default_port;
+
+ /* Allow selecting a default media. */
+ i = 0;
+ if (tp->mtable == NULL)
+ goto media_picked;
+ if (dev->if_port) {
+ int looking_for = media_cap[dev->if_port] & MediaIsMII ? 11 :
+ (dev->if_port == 12 ? 0 : dev->if_port);
+ for (i = 0; i < tp->mtable->leafcount; i++)
+ if (tp->mtable->mleaf[i].media == looking_for) {
+ printk(KERN_INFO "%s: Using user-specified media %s.\n",
+ dev->name, medianame[dev->if_port]);
+ goto media_picked;
+ }
+ }
+ if ((tp->mtable->defaultmedia & 0x0800) == 0) {
+ int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
+ for (i = 0; i < tp->mtable->leafcount; i++)
+ if (tp->mtable->mleaf[i].media == looking_for) {
+ printk(KERN_INFO "%s: Using EEPROM-set media %s.\n",
+ dev->name, medianame[looking_for]);
+ goto media_picked;
+ }
+ }
+ /* Start sensing first non-full-duplex media. */
+ for (i = tp->mtable->leafcount - 1;
+ (media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--)
+ ;
+media_picked:
+
+ tp->csr6 = 0;
+ tp->cur_index = i;
+ tp->nwayset = 0;
+
+ if (dev->if_port) {
+ if (tp->chip_id == DC21143 &&
+ (media_cap[dev->if_port] & MediaIsMII)) {
+ /* We must reset the media CSRs when we force-select MII mode. */
+ outl(0x0000, ioaddr + CSR13);
+ outl(0x0000, ioaddr + CSR14);
+ outl(0x0008, ioaddr + CSR15);
+ }
+ select_media(dev, 1);
+ return;
+ }
+ switch(tp->chip_id) {
+ case DC21041:
+ /* tp->nway = 1;*/
+ nway_start(dev);
+ break;
+ case DC21142:
+ if (tp->mii_cnt) {
+ select_media(dev, 1);
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: Using MII transceiver %d, status "
+ "%4.4x.\n",
+ dev->name, tp->phys[0], mdio_read(dev, tp->phys[0], 1));
+ outl(0x82020000, ioaddr + CSR6);
+ tp->csr6 = 0x820E0000;
+ dev->if_port = 11;
+ outl(0x0000, ioaddr + CSR13);
+ outl(0x0000, ioaddr + CSR14);
+ } else
+ nway_start(dev);
+ break;
+ case PNIC2:
+ nway_start(dev);
+ break;
+ case LC82C168:
+ if (tp->mii_cnt) {
+ dev->if_port = 11;
+ tp->csr6 = 0x814C0000 | (tp->full_duplex ? FullDuplex : 0);
+ outl(0x0001, ioaddr + CSR15);
+ } else if (inl(ioaddr + CSR5) & TPLnkPass)
+ pnic_do_nway(dev);
+ else {
+ /* Start with 10mbps to do autonegotiation. */
+ outl(0x32, ioaddr + CSR12);
+ tp->csr6 = 0x00420000;
+ outl(0x0001B078, ioaddr + 0xB8);
+ outl(0x0201B078, ioaddr + 0xB8);
+ }
+ break;
+ case MX98713: case COMPEX9881:
+ dev->if_port = 0;
+ tp->csr6 = 0x01880000 | (tp->full_duplex ? FullDuplex : 0);
+ outl(0x0f370000 | inw(ioaddr + 0x80), ioaddr + 0x80);
+ break;
+ case MX98715: case MX98725:
+ /* Provided by BOLO, Macronix - 12/10/1998. */
+ dev->if_port = 0;
+ tp->csr6 = 0x01a80000 | FullDuplex;
+ outl(0x0f370000 | inw(ioaddr + 0x80), ioaddr + 0x80);
+ outl(0x11000 | inw(ioaddr + 0xa0), ioaddr + 0xa0);
+ break;
+ case COMET: case CONEXANT:
+ /* Enable automatic Tx underrun recovery. */
+ outl(inl(ioaddr + 0x88) | 1, ioaddr + 0x88);
+ dev->if_port = tp->mii_cnt ? 11 : 0;
+ tp->csr6 = 0x00040000;
+ break;
+ case AX88140: case AX88141:
+ tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100;
+ break;
+ default:
+ select_media(dev, 1);
+ }
+}
+
+/* Set up the transceiver control registers for the selected media type.
+ STARTUP indicates to reset the transceiver. It is set to '2' for
+ the initial card detection, and '1' during resume or open().
+*/
+static void select_media(struct net_device *dev, int startup)
+{
+ long ioaddr = dev->base_addr;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ struct mediatable *mtable = tp->mtable;
+ u32 new_csr6;
+ int i;
+
+ if (mtable) {
+ struct medialeaf *mleaf = &mtable->mleaf[tp->cur_index];
+ unsigned char *p = mleaf->leafdata;
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Media table type %d.\n",
+ dev->name, mleaf->type);
+ switch (mleaf->type) {
+ case 0: /* 21140 non-MII xcvr. */
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Using a 21140 non-MII transceiver"
+ " with control setting %2.2x.\n",
+ dev->name, p[1]);
+ dev->if_port = p[0];
+ if (startup)
+ outl(mtable->csr12dir | 0x100, ioaddr + CSR12);
+ outl(p[1], ioaddr + CSR12);
+ new_csr6 = 0x02000000 | ((p[2] & 0x71) << 18);
+ break;
+ case 2: case 4: {
+ u16 setup[5];
+ u32 csr13val, csr14val, csr15dir, csr15val;
+ for (i = 0; i < 5; i++)
+ setup[i] = get_u16(&p[i*2 + 1]);
+
+ dev->if_port = p[0] & MEDIA_MASK;
+ if (media_cap[dev->if_port] & MediaAlwaysFD)
+ tp->full_duplex = 1;
+
+ if (startup && mtable->has_reset) {
+ struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset-1];
+ unsigned char *rst = rleaf->leafdata;
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Resetting the transceiver.\n",
+ dev->name);
+ for (i = 0; i < rst[0]; i++)
+ outl(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
+ }
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: 21143 non-MII %s transceiver control "
+ "%4.4x/%4.4x.\n",
+ dev->name, medianame[dev->if_port], setup[0], setup[1]);
+ if (p[0] & 0x40) { /* SIA (CSR13-15) setup values are provided. */
+ csr13val = setup[0];
+ csr14val = setup[1];
+ csr15dir = (setup[3]<<16) | setup[2];
+ csr15val = (setup[4]<<16) | setup[2];
+ outl(0, ioaddr + CSR13);
+ outl(csr14val, ioaddr + CSR14);
+ outl(csr15dir, ioaddr + CSR15); /* Direction */
+ outl(csr15val, ioaddr + CSR15); /* Data */
+ outl(csr13val, ioaddr + CSR13);
+ } else {
+ csr13val = 1;
+ csr14val = 0x0003FFFF;
+ csr15dir = (setup[0]<<16) | 0x0008;
+ csr15val = (setup[1]<<16) | 0x0008;
+ if (dev->if_port <= 4)
+ csr14val = t21142_csr14[dev->if_port];
+ if (startup) {
+ outl(0, ioaddr + CSR13);
+ outl(csr14val, ioaddr + CSR14);
+ }
+ outl(csr15dir, ioaddr + CSR15); /* Direction */
+ outl(csr15val, ioaddr + CSR15); /* Data */
+ if (startup) outl(csr13val, ioaddr + CSR13);
+ }
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Setting CSR15 to %8.8x/%8.8x.\n",
+ dev->name, csr15dir, csr15val);
+ if (mleaf->type == 4)
+ new_csr6 = 0x820A0000 | ((setup[2] & 0x71) << 18);
+ else
+ new_csr6 = 0x82420000;
+ break;
+ }
+ case 1: case 3: {
+ int phy_num = p[0];
+ int init_length = p[1];
+ u16 *misc_info;
+
+ dev->if_port = 11;
+ new_csr6 = 0x020E0000;
+ if (mleaf->type == 3) { /* 21142 */
+ u16 *init_sequence = (u16*)(p+2);
+ u16 *reset_sequence = &((u16*)(p+3))[init_length];
+ int reset_length = p[2 + init_length*2];
+ misc_info = reset_sequence + reset_length;
+ if (startup)
+ for (i = 0; i < reset_length; i++)
+ outl(get_u16(&reset_sequence[i]) << 16, ioaddr + CSR15);
+ for (i = 0; i < init_length; i++)
+ outl(get_u16(&init_sequence[i]) << 16, ioaddr + CSR15);
+ } else {
+ u8 *init_sequence = p + 2;
+ u8 *reset_sequence = p + 3 + init_length;
+ int reset_length = p[2 + init_length];
+ misc_info = (u16*)(reset_sequence + reset_length);
+ if (startup) {
+ outl(mtable->csr12dir | 0x100, ioaddr + CSR12);
+ for (i = 0; i < reset_length; i++)
+ outl(reset_sequence[i], ioaddr + CSR12);
+ }
+ for (i = 0; i < init_length; i++)
+ outl(init_sequence[i], ioaddr + CSR12);
+ }
+ tp->advertising[phy_num] = get_u16(&misc_info[1]) | 1;
+ if (startup < 2) {
+ if (tp->mii_advertise == 0)
+ tp->mii_advertise = tp->advertising[phy_num];
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Advertising %4.4x on MII %d.\n",
+ dev->name, tp->mii_advertise, tp->phys[phy_num]);
+ mdio_write(dev, tp->phys[phy_num], 4, tp->mii_advertise);
+ }
+ break;
+ }
+ default:
+ printk(KERN_DEBUG "%s: Invalid media table selection %d.\n",
+ dev->name, mleaf->type);
+ new_csr6 = 0x020E0000;
+ }
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Using media type %s, CSR12 is %2.2x.\n",
+ dev->name, medianame[dev->if_port],
+ (int)inl(ioaddr + CSR12) & 0xff);
+ } else if (tp->chip_id == DC21041) {
+ int port = dev->if_port <= 4 ? dev->if_port : 0;
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: 21041 using media %s, CSR12 is %4.4x.\n",
+ dev->name, medianame[port == 3 ? 12: port],
+ (int)inl(ioaddr + CSR12));
+ outl(0x00000000, ioaddr + CSR13); /* Reset the serial interface */
+ outl(t21041_csr14[port], ioaddr + CSR14);
+ outl(t21041_csr15[port], ioaddr + CSR15);
+ outl(t21041_csr13[port], ioaddr + CSR13);
+ new_csr6 = 0x80020000;
+ } else if (tp->chip_id == LC82C168) {
+ if (startup && ! tp->medialock)
+ dev->if_port = tp->mii_cnt ? 11 : 0;
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: PNIC PHY status is %3.3x, media %s.\n",
+ dev->name, (int)inl(ioaddr + 0xB8),
+ medianame[dev->if_port]);
+ if (tp->mii_cnt) {
+ new_csr6 = 0x810C0000;
+ outl(0x0001, ioaddr + CSR15);
+ outl(0x0201B07A, ioaddr + 0xB8);
+ } else if (startup) {
+ /* Start with 10mbps to do autonegotiation. */
+ outl(0x32, ioaddr + CSR12);
+ new_csr6 = 0x00420000;
+ outl(0x0001B078, ioaddr + 0xB8);
+ outl(0x0201B078, ioaddr + 0xB8);
+ } else if (dev->if_port == 3 || dev->if_port == 5) {
+ outl(0x33, ioaddr + CSR12);
+ new_csr6 = 0x01860000;
+ /* Trigger autonegotiation. */
+ outl(startup ? 0x0201F868 : 0x0001F868, ioaddr + 0xB8);
+ } else {
+ outl(0x32, ioaddr + CSR12);
+ new_csr6 = 0x00420000;
+ outl(0x1F078, ioaddr + 0xB8);
+ }
+ } else if (tp->chip_id == DC21040) { /* 21040 */
+ /* Turn on the xcvr interface. */
+ int csr12 = inl(ioaddr + CSR12);
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: 21040 media type is %s, CSR12 is %2.2x.\n",
+ dev->name, medianame[dev->if_port], csr12);
+ if (media_cap[dev->if_port] & MediaAlwaysFD)
+ tp->full_duplex = 1;
+ new_csr6 = 0x20000;
+ /* Set the full duplux match frame. */
+ outl(FULL_DUPLEX_MAGIC, ioaddr + CSR11);
+ outl(0x00000000, ioaddr + CSR13); /* Reset the serial interface */
+ if (t21040_csr13[dev->if_port] & 8) {
+ outl(0x0705, ioaddr + CSR14);
+ outl(0x0006, ioaddr + CSR15);
+ } else {
+ outl(0xffff, ioaddr + CSR14);
+ outl(0x0000, ioaddr + CSR15);
+ }
+ outl(0x8f01 | t21040_csr13[dev->if_port], ioaddr + CSR13);
+ } else { /* Unknown chip type with no media table. */
+ if (tp->default_port == 0)
+ dev->if_port = tp->mii_cnt ? 11 : 3;
+ if (media_cap[dev->if_port] & MediaIsMII) {
+ new_csr6 = 0x020E0000;
+ } else if (media_cap[dev->if_port] & MediaIsFx) {
+ new_csr6 = 0x02860000;
+ } else
+ new_csr6 = 0x038E0000;
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: No media description table, assuming "
+ "%s transceiver, CSR12 %2.2x.\n",
+ dev->name, medianame[dev->if_port],
+ (int)inl(ioaddr + CSR12));
+ }
+
+ tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) |
+ (tp->full_duplex ? FullDuplex : 0);
+ return;
+}
+
+/*
+ Check the MII negotiated duplex, and change the CSR6 setting if
+ required.
+ Return 0 if everything is OK.
+ Return < 0 if the transceiver is missing or has no link beat.
+ */
+static int check_duplex(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ int mii_reg1, mii_reg5, negotiated, duplex;
+
+ if (tp->full_duplex_lock)
+ return 0;
+ mii_reg5 = mdio_read(dev, tp->phys[0], 5);
+ negotiated = mii_reg5 & tp->mii_advertise;
+
+ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_INFO "%s: MII link partner %4.4x, negotiated %4.4x.\n",
+ dev->name, mii_reg5, negotiated);
+ if (mii_reg5 == 0xffff)
+ return -2;
+ if ((mii_reg5 & 0x4000) == 0 && /* No negotiation. */
+ ((mii_reg1 = mdio_read(dev, tp->phys[0], 1)) & 0x0004) == 0) {
+ int new_reg1 = mdio_read(dev, tp->phys[0], 1);
+ if ((new_reg1 & 0x0004) == 0) {
+ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_INFO "%s: No link beat on the MII interface,"
+ " status %4.4x.\n", dev->name, new_reg1);
+ return -1;
+ }
+ }
+ duplex = ((negotiated & 0x0300) == 0x0100
+ || (negotiated & 0x00C0) == 0x0040);
+ /* 100baseTx-FD or 10T-FD, but not 100-HD */
+ if (tp->full_duplex != duplex) {
+ tp->full_duplex = duplex;
+ if (negotiated & 0x0380) /* 100mbps. */
+ tp->csr6 &= ~0x00400000;
+ if (tp->full_duplex) tp->csr6 |= FullDuplex;
+ else tp->csr6 &= ~FullDuplex;
+ outl(tp->csr6 | RxOn, ioaddr + CSR6);
+ outl(tp->csr6 | TxOn | RxOn, ioaddr + CSR6);
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: Setting %s-duplex based on MII "
+ "#%d link partner capability of %4.4x.\n",
+ dev->name, tp->full_duplex ? "full" : "half",
+ tp->phys[0], mii_reg5);
+ return 1;
+ }
+ return 0;
+}
+
+static void tulip_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u32 csr12 = inl(ioaddr + CSR12);
+ int next_tick = 2*HZ;
+
+ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: Media selection tick, %s, status %8.8x mode"
+ " %8.8x SIA %8.8x %8.8x %8.8x %8.8x.\n",
+ dev->name, medianame[dev->if_port], (int)inl(ioaddr + CSR5),
+ (int)inl(ioaddr + CSR6), csr12, (int)inl(ioaddr + CSR13),
+ (int)inl(ioaddr + CSR14), (int)inl(ioaddr + CSR15));
+
+ switch (tp->chip_id) {
+ case DC21040:
+ if (!tp->medialock && (csr12 & 0x0002)) { /* Network error */
+ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_INFO "%s: No link beat found.\n",
+ dev->name);
+ dev->if_port = (dev->if_port == 2 ? 0 : 2);
+ select_media(dev, 0);
+ dev->trans_start = jiffies;
+ }
+ break;
+ case DC21041:
+ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: 21041 media tick CSR12 %8.8x.\n",
+ dev->name, csr12);
+ if (tp->medialock) break;
+ switch (dev->if_port) {
+ case 0: case 3: case 4:
+ if (csr12 & 0x0004) { /*LnkFail */
+ /* 10baseT is dead. Check for activity on alternate port. */
+ tp->mediasense = 1;
+ if (csr12 & 0x0200)
+ dev->if_port = 2;
+ else
+ dev->if_port = 1;
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: No 21041 10baseT link beat, Media "
+ "switched to %s.\n",
+ dev->name, medianame[dev->if_port]);
+ outl(0, ioaddr + CSR13); /* Reset */
+ outl(t21041_csr14[dev->if_port], ioaddr + CSR14);
+ outl(t21041_csr15[dev->if_port], ioaddr + CSR15);
+ outl(t21041_csr13[dev->if_port], ioaddr + CSR13);
+ next_tick = 10*HZ; /* 2.4 sec. */
+ } else
+ next_tick = 30*HZ;
+ break;
+ case 1: /* 10base2 */
+ case 2: /* AUI */
+ if (csr12 & 0x0100) {
+ next_tick = (30*HZ); /* 30 sec. */
+ tp->mediasense = 0;
+ } else if ((csr12 & 0x0004) == 0) {
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: 21041 media switched to 10baseT.\n",
+ dev->name);
+ dev->if_port = 0;
+ select_media(dev, 0);
+ next_tick = (24*HZ)/10; /* 2.4 sec. */
+ } else if (tp->mediasense || (csr12 & 0x0002)) {
+ dev->if_port = 3 - dev->if_port; /* Swap ports. */
+ select_media(dev, 0);
+ next_tick = 20*HZ;
+ } else {
+ next_tick = 20*HZ;
+ }
+ break;
+ }
+ break;
+ case DC21140: case DC21142: case MX98713: case COMPEX9881: default: {
+ struct medialeaf *mleaf;
+ unsigned char *p;
+ if (tp->mtable == NULL) { /* No EEPROM info, use generic code. */
+ /* Not much that can be done.
+ Assume this a generic MII or SYM transceiver. */
+ next_tick = 60*HZ;
+ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: network media monitor CSR6 %8.8x "
+ "CSR12 0x%2.2x.\n",
+ dev->name, (int)inl(ioaddr + CSR6), csr12 & 0xff);
+ break;
+ }
+ mleaf = &tp->mtable->mleaf[tp->cur_index];
+ p = mleaf->leafdata;
+ switch (mleaf->type) {
+ case 0: case 4: {
+ /* Type 0 serial or 4 SYM transceiver. Check the link beat bit. */
+ int offset = mleaf->type == 4 ? 5 : 2;
+ s8 bitnum = p[offset];
+ if (p[offset+1] & 0x80) {
+ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG"%s: Transceiver monitor tick "
+ "CSR12=%#2.2x, no media sense.\n",
+ dev->name, csr12);
+ if (mleaf->type == 4) {
+ if (mleaf->media == 3 && (csr12 & 0x02))
+ goto select_next_media;
+ }
+ break;
+ }
+ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: Transceiver monitor tick: CSR12=%#2.2x"
+ " bit %d is %d, expecting %d.\n",
+ dev->name, csr12, (bitnum >> 1) & 7,
+ (csr12 & (1 << ((bitnum >> 1) & 7))) != 0,
+ (bitnum >= 0));
+ /* Check that the specified bit has the proper value. */
+ if ((bitnum < 0) !=
+ ((csr12 & (1 << ((bitnum >> 1) & 7))) != 0)) {
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Link beat detected for %s.\n",
+ dev->name, medianame[mleaf->media & MEDIA_MASK]);
+ if ((p[2] & 0x61) == 0x01) /* Bogus Znyx board. */
+ goto actually_mii;
+ break;
+ }
+ if (tp->medialock)
+ break;
+ select_next_media:
+ if (--tp->cur_index < 0) {
+ /* We start again, but should instead look for default. */
+ tp->cur_index = tp->mtable->leafcount - 1;
+ }
+ dev->if_port = tp->mtable->mleaf[tp->cur_index].media;
+ if (media_cap[dev->if_port] & MediaIsFD)
+ goto select_next_media; /* Skip FD entries. */
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: No link beat on media %s,"
+ " trying transceiver type %s.\n",
+ dev->name, medianame[mleaf->media & MEDIA_MASK],
+ medianame[tp->mtable->mleaf[tp->cur_index].media]);
+ select_media(dev, 0);
+ /* Restart the transmit process. */
+ outl(tp->csr6 | RxOn, ioaddr + CSR6);
+ outl(tp->csr6 | TxOn | RxOn, ioaddr + CSR6);
+ next_tick = (24*HZ)/10;
+ break;
+ }
+ case 1: case 3: /* 21140, 21142 MII */
+ actually_mii:
+ check_duplex(dev);
+ next_tick = 60*HZ;
+ break;
+ case 2: /* 21142 serial block has no link beat. */
+ default:
+ break;
+ }
+ }
+ break;
+ }
+ tp->timer.expires = jiffies + next_tick;
+ add_timer(&tp->timer);
+}
+
+/* Handle internal NWay transceivers uniquely.
+ These exist on the 21041, 21143 (in SYM mode) and the PNIC2.
+ */
+static void nway_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int csr12 = inl(ioaddr + CSR12);
+ int next_tick = 60*HZ;
+ int new_csr6 = 0;
+
+ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_INFO"%s: N-Way autonegotiation status %8.8x, %s.\n",
+ dev->name, csr12, medianame[dev->if_port]);
+ if (media_cap[dev->if_port] & MediaIsMII) {
+ check_duplex(dev);
+ } else if (tp->nwayset) {
+ /* Do not screw up a negotiated session! */
+ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_INFO"%s: Using NWay-set %s media, csr12 %8.8x.\n",
+ dev->name, medianame[dev->if_port], csr12);
+ } else if (tp->medialock) {
+ ;
+ } else if (dev->if_port == 3) {
+ if (csr12 & 2) { /* No 100mbps link beat, revert to 10mbps. */
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO"%s: No 21143 100baseTx link beat, %8.8x, "
+ "trying NWay.\n", dev->name, csr12);
+ nway_start(dev);
+ next_tick = 3*HZ;
+ }
+ } else if ((csr12 & 0x7000) != 0x5000) {
+ /* Negotiation failed. Search media types. */
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO"%s: 21143 negotiation failed, status %8.8x.\n",
+ dev->name, csr12);
+ if (!(csr12 & 4)) { /* 10mbps link beat good. */
+ new_csr6 = 0x82420000;
+ dev->if_port = 0;
+ outl(0, ioaddr + CSR13);
+ outl(0x0003FFFF, ioaddr + CSR14);
+ outw(t21142_csr15[dev->if_port], ioaddr + CSR15);
+ outl(t21142_csr13[dev->if_port], ioaddr + CSR13);
+ } else {
+ /* Select 100mbps port to check for link beat. */
+ new_csr6 = 0x83860000;
+ dev->if_port = 3;
+ outl(0, ioaddr + CSR13);
+ outl(0x0003FF7F, ioaddr + CSR14);
+ outw(8, ioaddr + CSR15);
+ outl(1, ioaddr + CSR13);
+ }
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO"%s: Testing new 21143 media %s.\n",
+ dev->name, medianame[dev->if_port]);
+ if (new_csr6 != (tp->csr6 & ~0x20D7)) {
+ tp->csr6 &= 0x20D7;
+ tp->csr6 |= new_csr6;
+ outl(0x0301, ioaddr + CSR12);
+ outl(tp->csr6 | RxOn, ioaddr + CSR6);
+ outl(tp->csr6 | TxOn | RxOn, ioaddr + CSR6);
+ }
+ next_tick = 3*HZ;
+ }
+ if (tp->cur_tx - tp->dirty_tx > 0 &&
+ jiffies - dev->trans_start > TX_TIMEOUT) {
+ printk(KERN_WARNING "%s: Tx hung, %d vs. %d.\n",
+ dev->name, tp->cur_tx, tp->dirty_tx);
+ tulip_tx_timeout(dev);
+ }
+
+ tp->timer.expires = jiffies + next_tick;
+ add_timer(&tp->timer);
+}
+
+static void nway_start(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int csr14 = ((tp->sym_advertise & 0x0780) << 9) |
+ ((tp->sym_advertise&0x0020)<<1) | 0xffbf;
+
+ dev->if_port = 0;
+ tp->nway = tp->mediasense = 1;
+ tp->nwayset = tp->lpar = 0;
+ if (tp->chip_id == PNIC2) {
+ tp->csr6 = 0x01000000 | (tp->sym_advertise & 0x0040 ? FullDuplex : 0);
+ return;
+ }
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Restarting internal NWay autonegotiation, "
+ "%8.8x.\n", dev->name, csr14);
+ outl(0x0001, ioaddr + CSR13);
+ outl(csr14, ioaddr + CSR14);
+ tp->csr6 = 0x82420000 | (tp->sym_advertise & 0x0040 ? FullDuplex : 0)
+ | (tp->csr6 & 0x20ff);
+ outl(tp->csr6, ioaddr + CSR6);
+ if (tp->mtable && tp->mtable->csr15dir) {
+ outl(tp->mtable->csr15dir, ioaddr + CSR15);
+ outl(tp->mtable->csr15val, ioaddr + CSR15);
+ } else if (tp->chip_id != PNIC2)
+ outw(0x0008, ioaddr + CSR15);
+ if (tp->chip_id == DC21041) /* Trigger NWAY. */
+ outl(0xEF01, ioaddr + CSR12);
+ else
+ outl(0x1301, ioaddr + CSR12);
+}
+
+static void nway_lnk_change(struct net_device *dev, int csr5)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int csr12 = inl(ioaddr + CSR12);
+
+ if (tp->chip_id == PNIC2) {
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO"%s: PNIC-2 link status changed, CSR5/12/14 %8.8x"
+ " %8.8x, %8.8x.\n",
+ dev->name, csr12, csr5, (int)inl(ioaddr + CSR14));
+ dev->if_port = 5;
+ tp->lpar = csr12 >> 16;
+ tp->nwayset = 1;
+ tp->csr6 = 0x01000000 | (tp->csr6 & 0xffff);
+ outl(tp->csr6, ioaddr + CSR6);
+ return;
+ }
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO"%s: 21143 link status interrupt %8.8x, CSR5 %x, "
+ "%8.8x.\n", dev->name, csr12, csr5, (int)inl(ioaddr + CSR14));
+
+ /* If NWay finished and we have a negotiated partner capability. */
+ if (tp->nway && !tp->nwayset && (csr12 & 0x7000) == 0x5000) {
+ int setup_done = 0;
+ int negotiated = tp->sym_advertise & (csr12 >> 16);
+ tp->lpar = csr12 >> 16;
+ tp->nwayset = 1;
+ if (negotiated & 0x0100) dev->if_port = 5;
+ else if (negotiated & 0x0080) dev->if_port = 3;
+ else if (negotiated & 0x0040) dev->if_port = 4;
+ else if (negotiated & 0x0020) dev->if_port = 0;
+ else {
+ tp->nwayset = 0;
+ if ((csr12 & 2) == 0 && (tp->sym_advertise & 0x0180))
+ dev->if_port = 3;
+ }
+ tp->full_duplex = (media_cap[dev->if_port] & MediaAlwaysFD) ? 1:0;
+
+ if (tp->msg_level & NETIF_MSG_LINK) {
+ if (tp->nwayset)
+ printk(KERN_INFO "%s: Switching to %s based on link "
+ "negotiation %4.4x & %4.4x = %4.4x.\n",
+ dev->name, medianame[dev->if_port], tp->sym_advertise,
+ tp->lpar, negotiated);
+ else
+ printk(KERN_INFO "%s: Autonegotiation failed, using %s,"
+ " link beat status %4.4x.\n",
+ dev->name, medianame[dev->if_port], csr12);
+ }
+
+ if (tp->mtable) {
+ int i;
+ for (i = 0; i < tp->mtable->leafcount; i++)
+ if (tp->mtable->mleaf[i].media == dev->if_port) {
+ tp->cur_index = i;
+ select_media(dev, 0);
+ setup_done = 1;
+ break;
+ }
+ }
+ if ( ! setup_done) {
+ tp->csr6 = (dev->if_port & 1 ? 0x838E0000 : 0x82420000)
+ | (tp->csr6 & 0x20ff);
+ if (tp->full_duplex)
+ tp->csr6 |= FullDuplex;
+ outl(1, ioaddr + CSR13);
+ }
+#if 0 /* Restart should not be needed. */
+ outl(tp->csr6 | 0x0000, ioaddr + CSR6);
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Restarting Tx and Rx, CSR5 is %8.8x.\n",
+ dev->name, inl(ioaddr + CSR5));
+#endif
+ outl(tp->csr6 | TxOn | RxOn, ioaddr + CSR6);
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Setting CSR6 %8.8x/%x CSR12 %8.8x.\n",
+ dev->name, tp->csr6, (int)inl(ioaddr + CSR6),
+ (int)inl(ioaddr + CSR12));
+ } else if ((tp->nwayset && (csr5 & 0x08000000)
+ && (dev->if_port == 3 || dev->if_port == 5)
+ && (csr12 & 2) == 2) ||
+ (tp->nway && (csr5 & (TPLnkFail)))) {
+ /* Link blew? Maybe restart NWay. */
+ del_timer(&tp->timer);
+ nway_start(dev);
+ tp->timer.expires = jiffies + 3*HZ;
+ add_timer(&tp->timer);
+ } else if (dev->if_port == 3 || dev->if_port == 5) {
+ if (tp->msg_level & NETIF_MSG_LINK) /* TIMER? */
+ printk(KERN_INFO"%s: 21143 %s link beat %s.\n",
+ dev->name, medianame[dev->if_port],
+ (csr12 & 2) ? "failed" : "good");
+ if ((csr12 & 2) && ! tp->medialock) {
+ del_timer(&tp->timer);
+ nway_start(dev);
+ tp->timer.expires = jiffies + 3*HZ;
+ add_timer(&tp->timer);
+ } else if (dev->if_port == 5)
+ outl(inl(ioaddr + CSR14) & ~0x080, ioaddr + CSR14);
+ } else if (dev->if_port == 0 || dev->if_port == 4) {
+ if ((csr12 & 4) == 0)
+ printk(KERN_INFO"%s: 21143 10baseT link beat good.\n",
+ dev->name);
+ } else if (!(csr12 & 4)) { /* 10mbps link beat good. */
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO"%s: 21143 10mbps sensed media.\n",
+ dev->name);
+ dev->if_port = 0;
+ } else if (tp->nwayset) {
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO"%s: 21143 using NWay-set %s, csr6 %8.8x.\n",
+ dev->name, medianame[dev->if_port], tp->csr6);
+ } else { /* 100mbps link beat good. */
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO"%s: 21143 100baseTx sensed media.\n",
+ dev->name);
+ dev->if_port = 3;
+ tp->csr6 = 0x838E0000 | (tp->csr6 & 0x20ff);
+ outl(0x0003FF7F, ioaddr + CSR14);
+ outl(0x0301, ioaddr + CSR12);
+ outl(tp->csr6 | RxOn, ioaddr + CSR6);
+ outl(tp->csr6 | RxOn | TxOn, ioaddr + CSR6);
+ }
+}
+
+static void mxic_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 60*HZ;
+
+ if (tp->msg_level & NETIF_MSG_TIMER) {
+ printk(KERN_INFO"%s: MXIC negotiation status %8.8x.\n", dev->name,
+ (int)inl(ioaddr + CSR12));
+ }
+ tp->timer.expires = jiffies + next_tick;
+ add_timer(&tp->timer);
+}
+
+static void pnic_do_nway(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u32 phy_reg = inl(ioaddr + 0xB8);
+ u32 new_csr6 = tp->csr6 & ~0x40C40200;
+
+ if (phy_reg & 0x78000000) { /* Ignore baseT4 */
+ if (phy_reg & 0x20000000) dev->if_port = 5;
+ else if (phy_reg & 0x40000000) dev->if_port = 3;
+ else if (phy_reg & 0x10000000) dev->if_port = 4;
+ else if (phy_reg & 0x08000000) dev->if_port = 0;
+ tp->nwayset = 1;
+ new_csr6 = (dev->if_port & 1) ? 0x01860000 : 0x00420000;
+ outl(0x32 | (dev->if_port & 1), ioaddr + CSR12);
+ if (dev->if_port & 1)
+ outl(0x1F868, ioaddr + 0xB8);
+ if (phy_reg & 0x30000000) {
+ tp->full_duplex = 1;
+ new_csr6 |= FullDuplex;
+ }
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: PNIC autonegotiated status %8.8x, %s.\n",
+ dev->name, phy_reg, medianame[dev->if_port]);
+ if (tp->csr6 != new_csr6) {
+ tp->csr6 = new_csr6;
+ outl(tp->csr6 | RxOn, ioaddr + CSR6); /* Restart Tx */
+ outl(tp->csr6 | TxOn | RxOn, ioaddr + CSR6);
+ dev->trans_start = jiffies;
+ }
+ }
+}
+
+static void pnic_lnk_change(struct net_device *dev, int csr5)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int phy_reg = inl(ioaddr + 0xB8);
+
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: PNIC link changed state %8.8x, CSR5 %8.8x.\n",
+ dev->name, phy_reg, csr5);
+ if (inl(ioaddr + CSR5) & TPLnkFail) {
+ outl((inl(ioaddr + CSR7) & ~TPLnkFail) | TPLnkPass, ioaddr + CSR7);
+ if (! tp->nwayset || jiffies - dev->trans_start > 1*HZ) {
+ tp->csr6 = 0x00420000 | (tp->csr6 & 0x0000fdff);
+ outl(tp->csr6, ioaddr + CSR6);
+ outl(0x30, ioaddr + CSR12);
+ outl(0x0201F078, ioaddr + 0xB8); /* Turn on autonegotiation. */
+ dev->trans_start = jiffies;
+ }
+ } else if (inl(ioaddr + CSR5) & TPLnkPass) {
+ pnic_do_nway(dev);
+ outl((inl(ioaddr + CSR7) & ~TPLnkPass) | TPLnkFail, ioaddr + CSR7);
+ }
+}
+static void pnic_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 60*HZ;
+
+ if (media_cap[dev->if_port] & MediaIsMII) {
+ if (check_duplex(dev) > 0)
+ next_tick = 3*HZ;
+ } else {
+ int csr12 = inl(ioaddr + CSR12);
+ int new_csr6 = tp->csr6 & ~0x40C40200;
+ int phy_reg = inl(ioaddr + 0xB8);
+ int csr5 = inl(ioaddr + CSR5);
+
+ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: PNIC timer PHY status %8.8x, %s "
+ "CSR5 %8.8x.\n",
+ dev->name, phy_reg, medianame[dev->if_port], csr5);
+ if (phy_reg & 0x04000000) { /* Remote link fault */
+ outl(0x0201F078, ioaddr + 0xB8);
+ next_tick = 1*HZ;
+ tp->nwayset = 0;
+ } else if (phy_reg & 0x78000000) { /* Ignore baseT4 */
+ pnic_do_nway(dev);
+ next_tick = 60*HZ;
+ } else if (csr5 & TPLnkFail) { /* 100baseTx link beat */
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: %s link beat failed, CSR12 %4.4x, "
+ "CSR5 %8.8x, PHY %3.3x.\n",
+ dev->name, medianame[dev->if_port], csr12,
+ (int)inl(ioaddr + CSR5), (int)inl(ioaddr + 0xB8));
+ next_tick = 3*HZ;
+ if (tp->medialock) {
+ } else if (tp->nwayset && (dev->if_port & 1)) {
+ next_tick = 1*HZ;
+ } else if (dev->if_port == 0) {
+ dev->if_port = 3;
+ outl(0x33, ioaddr + CSR12);
+ new_csr6 = 0x01860000;
+ outl(0x1F868, ioaddr + 0xB8);
+ } else {
+ dev->if_port = 0;
+ outl(0x32, ioaddr + CSR12);
+ new_csr6 = 0x00420000;
+ outl(0x1F078, ioaddr + 0xB8);
+ }
+ if (tp->csr6 != new_csr6) {
+ tp->csr6 = new_csr6;
+ outl(tp->csr6 | RxOn, ioaddr + CSR6); /* Restart Tx */
+ outl(tp->csr6 | RxOn | TxOn, ioaddr + CSR6);
+ dev->trans_start = jiffies;
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: Changing PNIC configuration to %s "
+ "%s-duplex, CSR6 %8.8x.\n",
+ dev->name, medianame[dev->if_port],
+ tp->full_duplex ? "full" : "half", new_csr6);
+ }
+ }
+ }
+ tp->timer.expires = jiffies + next_tick;
+ add_timer(&tp->timer);
+}
+
+static void comet_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ int next_tick = 60*HZ;
+
+ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: Comet link status %4.4x partner capability "
+ "%4.4x.\n",
+ dev->name, mdio_read(dev, tp->phys[0], 1),
+ mdio_read(dev, tp->phys[0], 5));
+ check_duplex(dev);
+ tp->timer.expires = jiffies + next_tick;
+ add_timer(&tp->timer);
+}
+
+static void tulip_tx_timeout(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (media_cap[dev->if_port] & MediaIsMII) {
+ /* Do nothing -- the media monitor should handle this. */
+ int mii_bmsr = mdio_read(dev, tp->phys[0], 1);
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_WARNING "%s: Transmit timeout using MII device,"
+ " status %4.4x.\n",
+ dev->name, mii_bmsr);
+ if ( ! (mii_bmsr & 0x0004)) { /* No link beat present */
+ dev->trans_start = jiffies;
+ netif_link_down(dev);
+ return;
+ }
+ } else switch (tp->chip_id) {
+ case DC21040:
+ if ( !tp->medialock && inl(ioaddr + CSR12) & 0x0002) {
+ dev->if_port = (dev->if_port == 2 ? 0 : 2);
+ printk(KERN_INFO "%s: transmit timed out, switching to "
+ "%s.\n",
+ dev->name, medianame[dev->if_port]);
+ select_media(dev, 0);
+ }
+ dev->trans_start = jiffies;
+ return; /* Note: not break! */
+ case DC21041: {
+ int csr12 = inl(ioaddr + CSR12);
+
+ printk(KERN_WARNING "%s: 21041 transmit timed out, status %8.8x, "
+ "CSR12 %8.8x, CSR13 %8.8x, CSR14 %8.8x, resetting...\n",
+ dev->name, (int)inl(ioaddr + CSR5), csr12,
+ (int)inl(ioaddr + CSR13), (int)inl(ioaddr + CSR14));
+ tp->mediasense = 1;
+ if ( ! tp->medialock) {
+ if (dev->if_port == 1 || dev->if_port == 2)
+ dev->if_port = (csr12 & 0x0004) ? 2 - dev->if_port : 0;
+ else
+ dev->if_port = 1;
+ select_media(dev, 0);
+ }
+ break;
+ }
+ case DC21142:
+ if (tp->nwayset) {
+ printk(KERN_WARNING "%s: Transmit timed out, status %8.8x, "
+ "SIA %8.8x %8.8x %8.8x %8.8x, restarting NWay .\n",
+ dev->name, (int)inl(ioaddr + CSR5),
+ (int)inl(ioaddr + CSR12), (int)inl(ioaddr + CSR13),
+ (int)inl(ioaddr + CSR14), (int)inl(ioaddr + CSR15));
+ nway_start(dev);
+ break;
+ }
+ /* Fall through. */
+ case DC21140: case MX98713: case COMPEX9881:
+ printk(KERN_WARNING "%s: %s transmit timed out, status %8.8x, "
+ "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n",
+ dev->name, tulip_tbl[tp->chip_id].chip_name,
+ (int)inl(ioaddr + CSR5), (int)inl(ioaddr + CSR12),
+ (int)inl(ioaddr + CSR13), (int)inl(ioaddr + CSR14),
+ (int)inl(ioaddr + CSR15));
+ if ( ! tp->medialock && tp->mtable) {
+ do
+ --tp->cur_index;
+ while (tp->cur_index >= 0
+ && (media_cap[tp->mtable->mleaf[tp->cur_index].media]
+ & MediaIsFD));
+ if (tp->cur_index < 0) {
+ /* We start again, but should instead look for default. */
+ tp->cur_index = tp->mtable->leafcount - 1;
+ }
+ select_media(dev, 0);
+ printk(KERN_WARNING "%s: transmit timed out, switching to %s "
+ "media.\n", dev->name, medianame[dev->if_port]);
+ }
+ break;
+ case PNIC2:
+ printk(KERN_WARNING "%s: PNIC2 transmit timed out, status %8.8x, "
+ "CSR6/7 %8.8x / %8.8x CSR12 %8.8x, resetting...\n",
+ dev->name, (int)inl(ioaddr + CSR5), (int)inl(ioaddr + CSR6),
+ (int)inl(ioaddr + CSR7), (int)inl(ioaddr + CSR12));
+ break;
+ default:
+ printk(KERN_WARNING "%s: Transmit timed out, status %8.8x, CSR12 "
+ "%8.8x, resetting...\n",
+ dev->name, (int)inl(ioaddr + CSR5), (int)inl(ioaddr + CSR12));
+ }
+
+#if defined(way_too_many_messages) && defined(__i386__)
+ if (tp->msg_level & NETIF_MSG_TXERR) {
+ int i;
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
+ int j;
+ printk(KERN_DEBUG "%2d: %8.8x %8.8x %8.8x %8.8x "
+ "%2.2x %2.2x %2.2x.\n",
+ i, (unsigned int)tp->rx_ring[i].status,
+ (unsigned int)tp->rx_ring[i].length,
+ (unsigned int)tp->rx_ring[i].buffer1,
+ (unsigned int)tp->rx_ring[i].buffer2,
+ buf[0], buf[1], buf[2]);
+ for (j = 0; buf[j] != 0xee && j < 1600; j++)
+ if (j < 100) printk(" %2.2x", buf[j]);
+ printk(" j=%d.\n", j);
+ }
+ printk(KERN_DEBUG " Rx ring %8.8x: ", (int)tp->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(" %8.8x", (unsigned int)tp->rx_ring[i].status);
+ printk("\n" KERN_DEBUG " Tx ring %8.8x: ", (int)tp->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" %8.8x", (unsigned int)tp->tx_ring[i].status);
+ printk("\n");
+ }
+#endif
+
+ /* Stop and restart the Tx process.
+ The pwr_event approach of empty/init_rings() may be better... */
+ outl(tp->csr6 | RxOn, ioaddr + CSR6);
+ outl(tp->csr6 | RxOn | TxOn, ioaddr + CSR6);
+ /* Trigger an immediate transmit demand. */
+ outl(0, ioaddr + CSR1);
+ outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
+
+ dev->trans_start = jiffies;
+ tp->stats.tx_errors++;
+ return;
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void tulip_init_ring(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ int i;
+
+ tp->rx_dead = tp->tx_full = 0;
+ tp->cur_rx = tp->cur_tx = 0;
+ tp->dirty_rx = tp->dirty_tx = 0;
+
+ tp->rx_buf_sz = dev->mtu + 18;
+ if (tp->rx_buf_sz < PKT_BUF_SZ)
+ tp->rx_buf_sz = PKT_BUF_SZ;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ tp->rx_ring[i].status = 0x00000000;
+ tp->rx_ring[i].length = cpu_to_le32(tp->rx_buf_sz);
+ tp->rx_ring[i].buffer2 = virt_to_le32desc(&tp->rx_ring[i+1]);
+ tp->rx_skbuff[i] = NULL;
+ }
+ /* Mark the last entry as wrapping the ring. */
+ tp->rx_ring[i-1].length |= cpu_to_le32(DESC_RING_WRAP);
+ tp->rx_ring[i-1].buffer2 = virt_to_le32desc(&tp->rx_ring[0]);
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ /* Note the receive buffer must be longword aligned.
+ dev_alloc_skb() provides 16 byte alignment. But do *not*
+ use skb_reserve() to align the IP header! */
+ struct sk_buff *skb = dev_alloc_skb(tp->rx_buf_sz);
+ tp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ tp->rx_ring[i].status = cpu_to_le32(DescOwned);
+ tp->rx_ring[i].buffer1 = virt_to_le32desc(skb->tail);
+ }
+ tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+ /* The Tx buffer descriptor is filled in as needed, but we
+ do need to clear the ownership bit. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ tp->tx_skbuff[i] = 0;
+ tp->tx_ring[i].status = 0x00000000;
+ tp->tx_ring[i].buffer2 = virt_to_le32desc(&tp->tx_ring[i+1]);
+ }
+ tp->tx_ring[i-1].buffer2 = virt_to_le32desc(&tp->tx_ring[0]);
+}
+
+static int
+tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ int entry, q_used_cnt;
+ u32 flag;
+
+ /* Block a timer-based transmit from overlapping. This happens when
+ packets are presumed lost, and we use this check the Tx status. */
+ if (netif_pause_tx_queue(dev) != 0) {
+ /* This watchdog code is redundant with the media monitor timer. */
+ if (jiffies - dev->trans_start > TX_TIMEOUT)
+ tulip_tx_timeout(dev);
+ return 1;
+ }
+
+ /* Caution: the write order is important here, set the field
+ with the ownership bits last. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = tp->cur_tx % TX_RING_SIZE;
+ q_used_cnt = tp->cur_tx - tp->dirty_tx;
+
+ tp->tx_skbuff[entry] = skb;
+ tp->tx_ring[entry].buffer1 = virt_to_le32desc(skb->data);
+
+ if (q_used_cnt < TX_QUEUE_LEN/2) {/* Typical path */
+ flag = 0x60000000; /* No interrupt */
+ } else if (q_used_cnt == TX_QUEUE_LEN/2) {
+ flag = 0xe0000000; /* Tx-done intr. */
+ } else if (q_used_cnt < TX_QUEUE_LEN) {
+ flag = 0x60000000; /* No Tx-done intr. */
+ } else { /* Leave room for set_rx_mode() to fill entries. */
+ tp->tx_full = 1;
+ flag = 0xe0000000; /* Tx-done intr. */
+ }
+ if (entry == TX_RING_SIZE-1)
+ flag = 0xe0000000 | DESC_RING_WRAP;
+
+ tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
+ tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
+ tp->cur_tx++;
+ if ( ! tp->tx_full)
+ netif_unpause_tx_queue(dev);
+ else {
+ netif_stop_tx_queue(dev);
+ /* Check for a just-cleared queue race.
+ Note that this code path differs from other drivers because we
+ set the tx_full flag early. */
+ if ( ! tp->tx_full)
+ netif_resume_tx_queue(dev);
+ }
+
+ dev->trans_start = jiffies;
+ /* Trigger an immediate transmit demand. */
+ outl(0, dev->base_addr + CSR1);
+
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int csr5, work_budget = tp->max_interrupt_work;
+
+ do {
+ csr5 = inl(ioaddr + CSR5);
+ if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
+ break;
+
+ if (tp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
+ dev->name, csr5, (int)inl(dev->base_addr + CSR5));
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ outl(csr5 & 0x0001ffff, ioaddr + CSR5);
+
+ if (csr5 & (RxIntr | RxNoBuf))
+ work_budget -= tulip_rx(dev);
+
+ if (csr5 & (TxNoBuf | TxDied | TxIntr)) {
+ unsigned int dirty_tx;
+
+ for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
+ dirty_tx++) {
+ int entry = dirty_tx % TX_RING_SIZE;
+ int status = le32_to_cpu(tp->tx_ring[entry].status);
+
+ if (status < 0)
+ break; /* It still has not been Txed */
+ /* Check for Rx filter setup frames. */
+ if (tp->tx_skbuff[entry] == NULL)
+ continue;
+
+ if (status & 0x8000) {
+ /* There was an major error, log it. */
+ if (tp->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+ dev->name, status);
+ tp->stats.tx_errors++;
+ if (status & 0x4104) tp->stats.tx_aborted_errors++;
+ if (status & 0x0C00) tp->stats.tx_carrier_errors++;
+ if (status & 0x0200) tp->stats.tx_window_errors++;
+ if (status & 0x0002) tp->stats.tx_fifo_errors++;
+ if ((status & 0x0080) && tp->full_duplex == 0)
+ tp->stats.tx_heartbeat_errors++;
+#ifdef ETHER_STATS
+ if (status & 0x0100) tp->stats.collisions16++;
+#endif
+ } else {
+ if (tp->msg_level & NETIF_MSG_TX_DONE)
+ printk(KERN_DEBUG "%s: Transmit complete, status "
+ "%8.8x.\n", dev->name, status);
+#ifdef ETHER_STATS
+ if (status & 0x0001) tp->stats.tx_deferred++;
+#endif
+#if LINUX_VERSION_CODE > 0x20127
+ tp->stats.tx_bytes += tp->tx_skbuff[entry]->len;
+#endif
+ tp->stats.collisions += (status >> 3) & 15;
+ tp->stats.tx_packets++;
+ }
+
+ /* Free the original skb. */
+ dev_free_skb_irq(tp->tx_skbuff[entry]);
+ tp->tx_skbuff[entry] = 0;
+ }
+
+#ifndef final_version
+ if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
+ printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+ dev->name, dirty_tx, tp->cur_tx, tp->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ if (tp->tx_full && tp->cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
+ /* The ring is no longer full, clear tbusy. */
+ tp->tx_full = 0;
+ netif_resume_tx_queue(dev);
+ }
+
+ tp->dirty_tx = dirty_tx;
+ }
+
+ if (tp->rx_dead) {
+ tulip_rx(dev);
+ if (tp->cur_rx - tp->dirty_rx < RX_RING_SIZE - 3) {
+ printk(KERN_ERR "%s: Restarted Rx at %d / %d.\n",
+ dev->name, tp->cur_rx, tp->dirty_rx);
+ outl(0, ioaddr + CSR2); /* Rx poll demand */
+ tp->rx_dead = 0;
+ }
+ }
+
+ /* Log errors. */
+ if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
+ if (csr5 == 0xffffffff)
+ break;
+ if (csr5 & TxJabber) tp->stats.tx_errors++;
+ if (csr5 & PCIBusError) {
+ printk(KERN_ERR "%s: PCI Fatal Bus Error, %8.8x.\n",
+ dev->name, csr5);
+ }
+ if (csr5 & TxFIFOUnderflow) {
+ if ((tp->csr6 & 0xC000) != 0xC000)
+ tp->csr6 += 0x4000; /* Bump up the Tx threshold */
+ else
+ tp->csr6 |= 0x00200000; /* Store-n-forward. */
+ if (tp->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_WARNING "%s: Tx threshold increased, "
+ "new CSR6 %x.\n", dev->name, tp->csr6);
+ }
+ if (csr5 & TxDied) {
+ /* This is normal when changing Tx modes. */
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_WARNING "%s: The transmitter stopped."
+ " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
+ dev->name, csr5, (int)inl(ioaddr + CSR6), tp->csr6);
+ }
+ if (csr5 & (TxDied | TxFIFOUnderflow | PCIBusError)) {
+ /* Restart the transmit process. */
+ outl(tp->csr6 | RxOn, ioaddr + CSR6);
+ outl(tp->csr6 | RxOn | TxOn, ioaddr + CSR6);
+ }
+ if (csr5 & (RxStopped | RxNoBuf)) {
+ /* Missed a Rx frame or mode change. */
+ tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
+ if (tp->flags & COMET_MAC_ADDR) {
+ outl(tp->mc_filter[0], ioaddr + 0xAC);
+ outl(tp->mc_filter[1], ioaddr + 0xB0);
+ }
+ tulip_rx(dev);
+ if (csr5 & RxNoBuf)
+ tp->rx_dead = 1;
+ outl(tp->csr6 | RxOn | TxOn, ioaddr + CSR6);
+ }
+ if (csr5 & TimerInt) {
+ if (tp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
+ dev->name, csr5);
+ outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
+ }
+ if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
+ if (tp->link_change)
+ (tp->link_change)(dev, csr5);
+ }
+ /* Clear all error sources, included undocumented ones! */
+ outl(0x0800f7ba, ioaddr + CSR5);
+ }
+ if (--work_budget < 0) {
+ if (tp->msg_level & NETIF_MSG_DRV)
+ printk(KERN_WARNING "%s: Too much work during an interrupt, "
+ "csr5=0x%8.8x.\n", dev->name, csr5);
+ /* Acknowledge all interrupt sources. */
+ outl(0x8001ffff, ioaddr + CSR5);
+ if (tp->flags & HAS_INTR_MITIGATION) {
+ /* Josip Loncaric at ICASE did extensive experimentation
+ to develop a good interrupt mitigation setting.*/
+ outl(0x8b240000, ioaddr + CSR11);
+ } else {
+ /* Mask all interrupting sources, set timer to re-enable. */
+ outl(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt,
+ ioaddr + CSR7);
+ outl(0x0012, ioaddr + CSR11);
+ }
+ break;
+ }
+ } while (1);
+
+ if (tp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
+ dev->name, (int)inl(ioaddr + CSR5));
+
+ return;
+}
+
+static int tulip_rx(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ int entry = tp->cur_rx % RX_RING_SIZE;
+ int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
+ int work_done = 0;
+
+ if (tp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
+ tp->rx_ring[entry].status);
+ /* If we own the next entry, it is a new packet. Send it up. */
+ while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
+ s32 status = le32_to_cpu(tp->rx_ring[entry].status);
+
+ if (tp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
+ dev->name, entry, status);
+ if (--rx_work_limit < 0)
+ break;
+ if ((status & 0x38008300) != 0x0300) {
+ if ((status & 0x38000300) != 0x0300) {
+ /* Ingore earlier buffers. */
+ if ((status & 0xffff) != 0x7fff) {
+ if (tp->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_WARNING "%s: Oversized Ethernet frame "
+ "spanned multiple buffers, status %8.8x!\n",
+ dev->name, status);
+ tp->stats.rx_length_errors++;
+ }
+ } else if (status & RxDescFatalErr) {
+ /* There was a fatal error. */
+ if (tp->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
+ dev->name, status);
+ tp->stats.rx_errors++; /* end of a packet.*/
+ if (status & 0x0890) tp->stats.rx_length_errors++;
+ if (status & 0x0004) tp->stats.rx_frame_errors++;
+ if (status & 0x0002) tp->stats.rx_crc_errors++;
+ if (status & 0x0001) tp->stats.rx_fifo_errors++;
+ }
+ } else {
+ /* Omit the four octet CRC from the length. */
+ short pkt_len = ((status >> 16) & 0x7ff) - 4;
+ struct sk_buff *skb;
+
+#ifndef final_version
+ if (pkt_len > 1518) {
+ printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
+ dev->name, pkt_len, pkt_len);
+ pkt_len = 1518;
+ tp->stats.rx_length_errors++;
+ }
+#endif
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < tp->rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+#if (LINUX_VERSION_CODE >= 0x20100)
+ eth_copy_and_sum(skb, tp->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+#else
+ memcpy(skb_put(skb, pkt_len), tp->rx_skbuff[entry]->tail,
+ pkt_len);
+#endif
+ work_done++;
+ } else { /* Pass up the skb already on the Rx ring. */
+ skb_put(skb = tp->rx_skbuff[entry], pkt_len);
+ tp->rx_skbuff[entry] = NULL;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ tp->stats.rx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+ tp->stats.rx_bytes += pkt_len;
+#endif
+ }
+ entry = (++tp->cur_rx) % RX_RING_SIZE;
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
+ entry = tp->dirty_rx % RX_RING_SIZE;
+ if (tp->rx_skbuff[entry] == NULL) {
+ struct sk_buff *skb;
+ skb = tp->rx_skbuff[entry] = dev_alloc_skb(tp->rx_buf_sz);
+ if (skb == NULL) {
+ if (tp->cur_rx - tp->dirty_rx == RX_RING_SIZE)
+ printk(KERN_ERR "%s: No kernel memory to allocate "
+ "receive buffers.\n", dev->name);
+ break;
+ }
+ skb->dev = dev; /* Mark as being used by this device. */
+ tp->rx_ring[entry].buffer1 = virt_to_le32desc(skb->tail);
+ work_done++;
+ }
+ tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
+ }
+
+ return work_done;
+}
+
+static void empty_rings(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ int i;
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = tp->rx_skbuff[i];
+ tp->rx_skbuff[i] = 0;
+ tp->rx_ring[i].status = 0; /* Not owned by Tulip chip. */
+ tp->rx_ring[i].length = 0;
+ tp->rx_ring[i].buffer1 = 0xBADF00D0; /* An invalid address. */
+ if (skb) {
+#if LINUX_VERSION_CODE < 0x20100
+ skb->free = 1;
+#endif
+ dev_free_skb(skb);
+ }
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (tp->tx_skbuff[i])
+ dev_free_skb(tp->tx_skbuff[i]);
+ tp->tx_skbuff[i] = 0;
+ }
+}
+
+static int tulip_close(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+
+ netif_stop_tx_queue(dev);
+
+ if (tp->msg_level & NETIF_MSG_IFDOWN)
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, (int)inl(ioaddr + CSR5));
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ outl(0x00000000, ioaddr + CSR7);
+ /* Stop the Tx and Rx processes. */
+ outl(inl(ioaddr + CSR6) & ~TxOn & ~RxOn, ioaddr + CSR6);
+ /* 21040 -- Leave the card in 10baseT state. */
+ if (tp->chip_id == DC21040)
+ outl(0x00000004, ioaddr + CSR13);
+
+ if (inl(ioaddr + CSR6) != 0xffffffff)
+ tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
+
+ del_timer(&tp->timer);
+
+ free_irq(dev->irq, dev);
+
+ dev->if_port = tp->saved_if_port;
+
+ empty_rings(dev);
+ /* Leave the driver in snooze, not sleep, mode. */
+ if (tp->flags & HAS_PWRDWN)
+ pci_write_config_dword(tp->pci_dev, 0x40, 0x40000000);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static struct net_device_stats *tulip_get_stats(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int csr8 = inl(ioaddr + CSR8);
+
+ if (netif_running(dev) && csr8 != 0xffffffff)
+ tp->stats.rx_missed_errors += (u16)csr8;
+
+ return &tp->stats;
+}
+
+#ifdef HAVE_PRIVATE_IOCTL
+/* Provide ioctl() calls to examine the MII xcvr state.
+ We emulate a MII management registers for chips without MII.
+ The two numeric constants are because some clueless person
+ changed value for the symbolic name.
+ */
+static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u16 *data = (u16 *)&rq->ifr_data;
+ u32 *data32 = (void *)&rq->ifr_data;
+ unsigned int phy = tp->phys[0];
+ unsigned int regnum = data[1];
+
+ switch(cmd) {
+ case 0x8947: case 0x89F0:
+ /* SIOCGMIIPHY: Get the address of the PHY in use. */
+ if (tp->mii_cnt)
+ data[0] = phy;
+ else if (tp->flags & HAS_NWAY)
+ data[0] = 32;
+ else if (tp->chip_id == COMET)
+ data[0] = 1;
+ else
+ return -ENODEV;
+ case 0x8948: case 0x89F1:
+ /* SIOCGMIIREG: Read the specified MII register. */
+ if (data[0] == 32 && (tp->flags & HAS_NWAY)) {
+ int csr12 = inl(ioaddr + CSR12);
+ int csr14 = inl(ioaddr + CSR14);
+ switch (regnum) {
+ case 0:
+ if (((csr14<<5) & 0x1000) ||
+ (dev->if_port == 5 && tp->nwayset))
+ data[3] = 0x1000;
+ else
+ data[3] = (media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0)
+ | (media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0);
+ break;
+ case 1:
+ data[3] = 0x1848 + ((csr12&0x7000) == 0x5000 ? 0x20 : 0)
+ + ((csr12&0x06) == 6 ? 0 : 4);
+ if (tp->chip_id != DC21041)
+ data[3] |= 0x6048;
+ break;
+ case 4: {
+ /* Advertised value, bogus 10baseTx-FD value from CSR6. */
+ data[3] = ((inl(ioaddr + CSR6)>>3)&0x0040)+((csr14>>1)&0x20)+1;
+ if (tp->chip_id != DC21041)
+ data[3] |= ((csr14>>9)&0x03C0);
+ break;
+ }
+ case 5: data[3] = tp->lpar; break;
+ default: data[3] = 0; break;
+ }
+ } else {
+ data[3] = mdio_read(dev, data[0] & 0x1f, regnum);
+ }
+ return 0;
+ case 0x8949: case 0x89F2:
+ /* SIOCSMIIREG: Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (regnum & ~0x1f)
+ return -EINVAL;
+ if (data[0] == phy) {
+ u16 value = data[2];
+ switch (regnum) {
+ case 0: /* Check for autonegotiation on or reset. */
+ tp->full_duplex_lock = (value & 0x9000) ? 0 : 1;
+ if (tp->full_duplex_lock)
+ tp->full_duplex = (value & 0x0100) ? 1 : 0;
+ break;
+ case 4: tp->mii_advertise = data[2]; break;
+ }
+ }
+ if (data[0] == 32 && (tp->flags & HAS_NWAY)) {
+ u16 value = data[2];
+ if (regnum == 0) {
+ if ((value & 0x1200) == 0x1200)
+ nway_start(dev);
+ } else if (regnum == 4)
+ tp->sym_advertise = value;
+ } else {
+ mdio_write(dev, data[0] & 0x1f, regnum, data[2]);
+ }
+ return 0;
+ case SIOCGPARAMS:
+ data32[0] = tp->msg_level;
+ data32[1] = tp->multicast_filter_limit;
+ data32[2] = tp->max_interrupt_work;
+ data32[3] = tp->rx_copybreak;
+ data32[4] = inl(ioaddr + CSR11);
+ return 0;
+ case SIOCSPARAMS:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ tp->msg_level = data32[0];
+ tp->multicast_filter_limit = data32[1];
+ tp->max_interrupt_work = data32[2];
+ tp->rx_copybreak = data32[3];
+ if (tp->flags & HAS_INTR_MITIGATION) {
+ u32 *d = (u32 *)&rq->ifr_data;
+ outl(data32[4], ioaddr + CSR11);
+ printk(KERN_NOTICE "%s: Set interrupt mitigate paramters %8.8x.\n",
+ dev->name, d[0]);
+ }
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+#endif /* HAVE_PRIVATE_IOCTL */
+
+/* Set or clear the multicast filter for this adaptor.
+ Note that we only use exclusion around actually queueing the
+ new frame, not around filling tp->setup_frame. This is non-deterministic
+ when re-entered but still correct. */
+
+/* The little-endian AUTODIN32 ethernet CRC calculation.
+ N.B. Do not use for bulk data, use a table-based routine instead.
+ This is common code and should be moved to net/core/crc.c */
+static unsigned const ethernet_polynomial_le = 0xedb88320U;
+static inline u32 ether_crc_le(int length, unsigned char *data)
+{
+ u32 crc = 0xffffffff; /* Initial value. */
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 8; --bit >= 0; current_octet >>= 1) {
+ if ((crc ^ current_octet) & 1) {
+ crc >>= 1;
+ crc ^= ethernet_polynomial_le;
+ } else
+ crc >>= 1;
+ }
+ }
+ return crc;
+}
+static unsigned const ethernet_polynomial = 0x04c11db7U;
+static inline u32 ether_crc(int length, unsigned char *data)
+{
+ int crc = -1;
+
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 0; bit < 8; bit++, current_octet >>= 1)
+ crc = (crc << 1) ^
+ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
+ }
+ return crc;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int csr6 = inl(ioaddr + CSR6) & ~0x00D5;
+
+ tp->csr6 &= ~0x00D5;
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
+ csr6 |= AcceptAllMulticast | AcceptAllPhys;
+ /* Unconditionally log net taps. */
+ printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
+ } else if ((dev->mc_count > tp->multicast_filter_limit) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter well -- accept all multicasts. */
+ tp->csr6 |= AcceptAllMulticast;
+ csr6 |= AcceptAllMulticast;
+ } else if (tp->flags & MC_HASH_ONLY) {
+ /* Some work-alikes have only a 64-entry hash filter table. */
+ /* Should verify correctness on big-endian/__powerpc__ */
+ struct dev_mc_list *mclist;
+ int i;
+ if (dev->mc_count > tp->multicast_filter_limit) {
+ tp->csr6 |= AcceptAllMulticast;
+ csr6 |= AcceptAllMulticast;
+ } else {
+ u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */
+ int filterbit;
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ if (tp->flags & COMET_MAC_ADDR)
+ filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
+ else
+ filterbit = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ filterbit &= 0x3f;
+ set_bit(filterbit, mc_filter);
+ if (tp->msg_level & NETIF_MSG_RXFILTER)
+ printk(KERN_INFO "%s: Added filter for %2.2x:%2.2x:%2.2x:"
+ "%2.2x:%2.2x:%2.2x %8.8x bit %d.\n", dev->name,
+ mclist->dmi_addr[0], mclist->dmi_addr[1],
+ mclist->dmi_addr[2], mclist->dmi_addr[3],
+ mclist->dmi_addr[4], mclist->dmi_addr[5],
+ ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit);
+ }
+ if (mc_filter[0] == tp->mc_filter[0] &&
+ mc_filter[1] == tp->mc_filter[1])
+ ; /* No change. */
+ else if (tp->flags & IS_ASIX) {
+ outl(2, ioaddr + CSR13);
+ outl(mc_filter[0], ioaddr + CSR14);
+ outl(3, ioaddr + CSR13);
+ outl(mc_filter[1], ioaddr + CSR14);
+ } else if (tp->flags & COMET_MAC_ADDR) {
+ outl(mc_filter[0], ioaddr + 0xAC);
+ outl(mc_filter[1], ioaddr + 0xB0);
+ }
+ tp->mc_filter[0] = mc_filter[0];
+ tp->mc_filter[1] = mc_filter[1];
+ }
+ } else {
+ u16 *eaddrs, *setup_frm = tp->setup_frame;
+ struct dev_mc_list *mclist;
+ u32 tx_flags = 0x08000000 | 192;
+ int i;
+
+ /* Note that only the low-address shortword of setup_frame is valid!
+ The values are doubled for big-endian architectures. */
+ if (dev->mc_count > 14) { /* Must use a multicast hash table. */
+ u16 hash_table[32];
+ tx_flags = 0x08400000 | 192; /* Use hash filter. */
+ memset(hash_table, 0, sizeof(hash_table));
+ set_bit(255, hash_table); /* Broadcast entry */
+ /* This should work on big-endian machines as well. */
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next)
+ set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff,
+ hash_table);
+ for (i = 0; i < 32; i++) {
+ *setup_frm++ = hash_table[i];
+ *setup_frm++ = hash_table[i];
+ }
+ setup_frm = &tp->setup_frame[13*6];
+ } else {
+ /* We have <= 14 addresses so we can use the wonderful
+ 16 address perfect filtering of the Tulip. */
+ for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ eaddrs = (u16 *)mclist->dmi_addr;
+ *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
+ *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
+ *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
+ }
+ /* Fill the unused entries with the broadcast address. */
+ memset(setup_frm, 0xff, (15-i)*12);
+ setup_frm = &tp->setup_frame[15*6];
+ }
+ /* Fill the final entry with our physical address. */
+ eaddrs = (u16 *)dev->dev_addr;
+ *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
+ *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
+ *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
+ /* Now add this frame to the Tx list. */
+ if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
+ /* Same setup recently queued, we need not add it. */
+ } else {
+ unsigned long flags;
+ unsigned int entry;
+
+ spin_lock_irqsave(&tp->mii_lock, flags);
+ entry = tp->cur_tx++ % TX_RING_SIZE;
+
+ if (entry != 0) {
+ /* Avoid a chip errata by prefixing a dummy entry. */
+ tp->tx_skbuff[entry] = 0;
+ tp->tx_ring[entry].length =
+ (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP):0;
+ tp->tx_ring[entry].buffer1 = 0;
+ tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
+ entry = tp->cur_tx++ % TX_RING_SIZE;
+ }
+
+ tp->tx_skbuff[entry] = 0;
+ /* Put the setup frame on the Tx list. */
+ if (entry == TX_RING_SIZE-1)
+ tx_flags |= DESC_RING_WRAP; /* Wrap ring. */
+ tp->tx_ring[entry].length = cpu_to_le32(tx_flags);
+ tp->tx_ring[entry].buffer1 = virt_to_le32desc(tp->setup_frame);
+ tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
+ if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2) {
+ netif_stop_tx_queue(dev);
+ tp->tx_full = 1;
+ }
+ spin_unlock_irqrestore(&tp->mii_lock, flags);
+ /* Trigger an immediate transmit demand. */
+ outl(0, ioaddr + CSR1);
+ }
+ }
+ outl(csr6, ioaddr + CSR6);
+}
+
+
+static int tulip_pwr_event(void *dev_instance, int event)
+{
+ struct net_device *dev = dev_instance;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ if (tp->msg_level & NETIF_MSG_LINK)
+ printk("%s: Handling power event %d.\n", dev->name, event);
+ switch(event) {
+ case DRV_ATTACH:
+ MOD_INC_USE_COUNT;
+ break;
+ case DRV_SUSPEND: {
+ int csr6 = inl(ioaddr + CSR6);
+ /* Disable interrupts, stop the chip, gather stats. */
+ if (csr6 != 0xffffffff) {
+ int csr8 = inl(ioaddr + CSR8);
+ outl(0x00000000, ioaddr + CSR7);
+ outl(csr6 & ~TxOn & ~RxOn, ioaddr + CSR6);
+ tp->stats.rx_missed_errors += (unsigned short)csr8;
+ }
+ empty_rings(dev);
+ /* Put the 21143 into sleep mode. */
+ if (tp->flags & HAS_PWRDWN)
+ pci_write_config_dword(tp->pci_dev, 0x40,0x80000000);
+ break;
+ }
+ case DRV_RESUME:
+ if (tp->flags & HAS_PWRDWN)
+ pci_write_config_dword(tp->pci_dev, 0x40, 0x0000);
+ outl(tp->csr0, ioaddr + CSR0);
+ tulip_init_ring(dev);
+ outl(virt_to_bus(tp->rx_ring), ioaddr + CSR3);
+ outl(virt_to_bus(tp->tx_ring), ioaddr + CSR4);
+ if (tp->mii_cnt) {
+ dev->if_port = 11;
+ if (tp->mtable && tp->mtable->has_mii)
+ select_media(dev, 1);
+ tp->csr6 = 0x820E0000;
+ dev->if_port = 11;
+ outl(0x0000, ioaddr + CSR13);
+ outl(0x0000, ioaddr + CSR14);
+ } else if (! tp->medialock)
+ nway_start(dev);
+ else
+ select_media(dev, 1);
+ outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
+ outl(tp->csr6 | TxOn | RxOn, ioaddr + CSR6);
+ outl(0, ioaddr + CSR2); /* Rx poll demand */
+ set_rx_mode(dev);
+ break;
+ case DRV_DETACH: {
+ struct net_device **devp, **next;
+ if (dev->flags & IFF_UP) {
+ printk(KERN_ERR "%s: Tulip CardBus interface was detached while "
+ "still active.\n", dev->name);
+ dev_close(dev);
+ dev->flags &= ~(IFF_UP|IFF_RUNNING);
+ }
+ if (tp->msg_level & NETIF_MSG_DRV)
+ printk(KERN_DEBUG "%s: Unregistering device.\n", dev->name);
+ unregister_netdev(dev);
+#ifdef USE_IO_OPS
+ release_region(dev->base_addr, pci_id_tbl[tp->chip_id].io_size);
+#else
+ iounmap((char *)dev->base_addr);
+#endif
+ for (devp = &root_tulip_dev; *devp; devp = next) {
+ next = &((struct tulip_private *)(*devp)->priv)->next_module;
+ if (*devp == dev) {
+ *devp = *next;
+ break;
+ }
+ }
+ if (tp->priv_addr)
+ kfree(tp->priv_addr);
+ kfree(dev);
+ MOD_DEC_USE_COUNT;
+ break;
+ }
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#ifdef CARDBUS
+
+#include <pcmcia/driver_ops.h>
+
+static dev_node_t *tulip_attach(dev_locator_t *loc)
+{
+ struct net_device *dev;
+ long ioaddr;
+ struct pci_dev *pdev;
+ u8 bus, devfn, irq;
+ u32 dev_id;
+ u32 pciaddr;
+ int i, chip_id = 4; /* DC21143 */
+
+ if (loc->bus != LOC_PCI) return NULL;
+ bus = loc->b.pci.bus; devfn = loc->b.pci.devfn;
+ printk(KERN_INFO "tulip_attach(bus %d, function %d)\n", bus, devfn);
+ pdev = pci_find_slot(bus, devfn);
+#ifdef USE_IO_OPS
+ pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &pciaddr);
+ ioaddr = pciaddr & PCI_BASE_ADDRESS_IO_MASK;
+#else
+ pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &pciaddr);
+ ioaddr = (long)ioremap(pciaddr & PCI_BASE_ADDRESS_MEM_MASK,
+ pci_id_tbl[DC21142].io_size);
+#endif
+ pci_read_config_dword(pdev, 0, &dev_id);
+ pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &irq);
+ if (ioaddr == 0 || irq == 0) {
+ printk(KERN_ERR "The Tulip CardBus Ethernet interface at %d/%d was "
+ "not assigned an %s.\n"
+ KERN_ERR " It will not be activated.\n",
+ bus, devfn, ioaddr == 0 ? "address" : "IRQ");
+ return NULL;
+ }
+ for (i = 0; pci_id_tbl[i].id.pci; i++) {
+ if (pci_id_tbl[i].id.pci == (dev_id & pci_id_tbl[i].id.pci_mask)) {
+ chip_id = i; break;
+ }
+ }
+ dev = tulip_probe1(pdev, NULL, ioaddr, irq, chip_id, 0);
+ if (dev) {
+ dev_node_t *node = kmalloc(sizeof(dev_node_t), GFP_KERNEL);
+ strcpy(node->dev_name, dev->name);
+ node->major = node->minor = 0;
+ node->next = NULL;
+ MOD_INC_USE_COUNT;
+ return node;
+ }
+ return NULL;
+}
+
+static void tulip_suspend(dev_node_t *node)
+{
+ struct net_device **devp, **next;
+ printk(KERN_INFO "tulip_suspend(%s)\n", node->dev_name);
+ for (devp = &root_tulip_dev; *devp; devp = next) {
+ next = &((struct tulip_private *)(*devp)->priv)->next_module;
+ if (strcmp((*devp)->name, node->dev_name) == 0) {
+ tulip_pwr_event(*devp, DRV_SUSPEND);
+ break;
+ }
+ }
+}
+
+static void tulip_resume(dev_node_t *node)
+{
+ struct net_device **devp, **next;
+ printk(KERN_INFO "tulip_resume(%s)\n", node->dev_name);
+ for (devp = &root_tulip_dev; *devp; devp = next) {
+ next = &((struct tulip_private *)(*devp)->priv)->next_module;
+ if (strcmp((*devp)->name, node->dev_name) == 0) {
+ tulip_pwr_event(*devp, DRV_RESUME);
+ break;
+ }
+ }
+}
+
+static void tulip_detach(dev_node_t *node)
+{
+ struct net_device **devp, **next;
+ printk(KERN_INFO "tulip_detach(%s)\n", node->dev_name);
+ for (devp = &root_tulip_dev; *devp; devp = next) {
+ next = &((struct tulip_private *)(*devp)->priv)->next_module;
+ if (strcmp((*devp)->name, node->dev_name) == 0) break;
+ }
+ if (*devp) {
+ struct tulip_private *tp = (struct tulip_private *)(*devp)->priv;
+ unregister_netdev(*devp);
+#ifdef USE_IO_OPS
+ release_region((*devp)->base_addr, pci_id_tbl[DC21142].io_size);
+#else
+ iounmap((char *)(*devp)->base_addr);
+#endif
+ kfree(*devp);
+ if (tp->priv_addr)
+ kfree(tp->priv_addr);
+ *devp = *next;
+ kfree(node);
+ MOD_DEC_USE_COUNT;
+ }
+}
+
+struct driver_operations tulip_ops = {
+ "tulip_cb", tulip_attach, tulip_suspend, tulip_resume, tulip_detach
+};
+
+#endif /* Cardbus support */
+
+
+#ifdef MODULE
+int init_module(void)
+{
+ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+#ifdef CARDBUS
+ register_driver(&tulip_ops);
+ return 0;
+#else
+ return pci_drv_register(&tulip_drv_id, NULL);
+#endif
+ reverse_probe = 0; /* Not used. */
+}
+
+void cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+#ifdef CARDBUS
+ unregister_driver(&tulip_ops);
+#else
+ pci_drv_unregister(&tulip_drv_id);
+#endif
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_tulip_dev) {
+ struct tulip_private *tp = (struct tulip_private*)root_tulip_dev->priv;
+ unregister_netdev(root_tulip_dev);
+#ifdef USE_IO_OPS
+ release_region(root_tulip_dev->base_addr,
+ pci_id_tbl[tp->chip_id].io_size);
+#else
+ iounmap((char *)root_tulip_dev->base_addr);
+#endif
+ next_dev = tp->next_module;
+ if (tp->priv_addr)
+ kfree(tp->priv_addr);
+ kfree(root_tulip_dev);
+ root_tulip_dev = next_dev;
+ }
+}
+#else
+int tulip_probe(struct net_device *dev)
+{
+ if (pci_drv_register(&tulip_drv_id, dev) < 0)
+ return -ENODEV;
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return 0;
+ reverse_probe = 0; /* Not used. */
+}
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "make KERNVER=`uname -r` tulip.o"
+ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c tulip.c"
+ * cardbus-compile-command: "gcc -DCARDBUS -DMODULE -Wall -Wstrict-prototypes -O6 -c tulip.c -o tulip_cb.o -I/usr/src/pcmcia/include/"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/via-rhine.c b/linux/src/drivers/net/via-rhine.c
new file mode 100644
index 0000000..4d7fceb
--- /dev/null
+++ b/linux/src/drivers/net/via-rhine.c
@@ -0,0 +1,1427 @@
+/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
+/*
+ Written 1998-2003 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ This driver is designed for the VIA VT86c100A Rhine-II PCI Fast Ethernet
+ controller. It also works with the older 3043 Rhine-I chip.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 914 Bay Ridge Road, Suite 220
+ Annapolis MD 21403
+
+ Support information and updates available at
+ http://www.scyld.com/network/via-rhine.html
+ The information and support mailing lists are based at
+ http://www.scyld.com/mailman/listinfo/
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version1[] =
+"via-rhine.c:v1.16 7/22/2003 Written by Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+" http://www.scyld.com/network/via-rhine.html\n";
+
+/* Automatically extracted configuration info:
+probe-func: via_rhine_probe
+config-in: tristate 'VIA "Rhine" vt86c100, vt3043, and vt3065 series PCI Ethernet support' CONFIG_VIA_RHINE
+
+c-help-name: VIA Rhine series PCI Ethernet support
+c-help-symbol: CONFIG_VIA_RHINE
+c-help: This driver is for the VIA Rhine (v3043) and Rhine-II
+c-help: (vt3065 AKA vt86c100) network adapter chip series.
+c-help: More specific information and updates are available from
+c-help: http://www.scyld.com/network/via-rhine.html
+*/
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
+static int debug = 2;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature. */
+static int rx_copybreak = 0;
+
+/* Used to pass the media type, etc.
+ Both 'options[]' and 'full_duplex[]' should exist for driver
+ interoperability.
+ The media type is usually passed in 'options[]'.
+ The default is autonegotation for speed and duplex.
+ This should rarely be overridden.
+ Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
+ Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
+ Use option values 0x20 and 0x200 for forcing full duplex operation.
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ The Rhine has a 64 element 8390-like hash table. */
+static const int multicast_filter_limit = 32;
+
+/* Operational parameters that are set at compile time. */
+
+/* Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ There are no ill effects from too-large receive rings. */
+#define TX_RING_SIZE 16
+#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
+#define RX_RING_SIZE 32
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+
+/* Allocation size of Rx buffers with normal sized Ethernet frames.
+ Do not change this value without good reason. This is not a limit,
+ but a way to keep a consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ 1536
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+/* Condensed bus+endian portability operations. */
+#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
+#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
+
+/* This driver was written to use PCI memory space, however most versions
+ of the Rhine only work correctly with I/O space accesses. */
+#if defined(VIA_USE_MEMORY)
+#warning Many adapters using the VIA Rhine chip are not configured to work
+#warning with PCI memory space accesses.
+#else
+#define USE_IO_OPS
+#undef readb
+#undef readw
+#undef readl
+#undef writeb
+#undef writew
+#undef writel
+#define readb inb
+#define readw inw
+#define readl inl
+#define writeb outb
+#define writew outw
+#define writel outl
+#endif
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(debug, "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(multicast_filter_limit, "i");
+MODULE_PARM_DESC(debug, "Driver message level (0-31)");
+MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
+MODULE_PARM_DESC(max_interrupt_work,
+ "Driver maximum events handled per interrupt");
+MODULE_PARM_DESC(full_duplex, "Non-zero to set forced full duplex "
+ "(deprecated, use options[] instead).");
+MODULE_PARM_DESC(rx_copybreak,
+ "Breakpoint in bytes for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit,
+ "Multicast addresses before switching to Rx-all-multicast");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
+controller.
+
+II. Board-specific settings
+
+Boards with this chip are functional only in a bus-master PCI slot.
+
+Many operational settings are loaded from the EEPROM to the Config word at
+offset 0x78. This driver assumes that they are correct.
+If this driver is compiled to use PCI memory space operations the EEPROM
+must be configured to enable memory ops.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
+
+IIIb/c. Transmit/Receive Structure
+
+This driver attempts to use a zero-copy receive and transmit scheme.
+
+Alas, all data buffers are required to start on a 32 bit boundary, so
+the driver must often copy transmit packets into bounce buffers.
+
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the chip as receive data
+buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack. Buffers consumed this way are replaced by newly allocated
+skbuffs in the last phase of netdev_rx().
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. New boards are typically used in generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets. When copying is done, the cost is usually mitigated by using
+a combined copy/checksum routine. Copying also preloads the cache, which is
+most useful with small frames.
+
+Since the VIA chips are only able to transfer data to buffers on 32 bit
+boundaries, the the IP header at offset 14 in an ethernet frame isn't
+longword aligned for further processing. Copying these unaligned buffers
+has the beneficial effect of 16-byte aligning the IP header.
+
+IIId. Synchronization
+
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and interrupt handling software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'lp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
+clears both the tx_full and tbusy flags.
+
+IV. Notes
+
+IVb. References
+
+This driver was originally written using a preliminary VT86C100A manual
+from
+ http://www.via.com.tw/
+The usual background material was used:
+ http://www.scyld.com/expert/100mbps.html
+ http://scyld.com/expert/NWay.html
+
+Additional information is now available, especially for the newer chips.
+ http://www.via.com.tw/en/Networking/DS6105LOM100.pdf
+
+IVc. Errata
+
+The VT86C100A manual is not reliable information.
+The 3043 chip does not handle unaligned transmit or receive buffers,
+resulting in significant performance degradation for bounce buffer
+copies on transmit and unaligned IP headers on receive.
+The chip does not pad to minimum transmit length.
+
+There is a bug with the transmit descriptor pointer handling when the
+chip encounters a transmit error.
+
+*/
+
+
+
+static void *via_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int find_cnt);
+static int via_pwr_event(void *dev_instance, int event);
+enum chip_capability_flags {
+ CanHaveMII=1, HasESIPhy=2, HasDavicomPhy=4, HasV1TxStat=8,
+ ReqTxAlign=0x10, HasWOL=0x20, HasIPChecksum=0x40, HasVLAN=0x80,
+
+};
+
+#if defined(VIA_USE_MEMORY)
+#define RHINE_IOTYPE (PCI_USES_MEM | PCI_USES_MASTER | PCI_ADDR1)
+#define RHINE_I_IOSIZE 128
+#define RHINEII_IOSIZE 4096
+#else
+#define RHINE_IOTYPE (PCI_USES_IO | PCI_USES_MASTER | PCI_ADDR0)
+#define RHINE_I_IOSIZE 128
+#define RHINEII_IOSIZE 256
+#endif
+
+static struct pci_id_info pci_tbl[] = {
+ { "VIA VT3043 Rhine", { 0x30431106, 0xffffffff,},
+ RHINE_IOTYPE, RHINE_I_IOSIZE, CanHaveMII | ReqTxAlign | HasV1TxStat },
+ { "VIA VT86C100A Rhine", { 0x61001106, 0xffffffff,},
+ RHINE_IOTYPE, RHINE_I_IOSIZE, CanHaveMII | ReqTxAlign | HasV1TxStat },
+ { "VIA VT6102 Rhine-II", { 0x30651106, 0xffffffff,},
+ RHINE_IOTYPE, RHINEII_IOSIZE, CanHaveMII | HasWOL },
+ { "VIA VT6105LOM Rhine-III (3106)", { 0x31061106, 0xffffffff,},
+ RHINE_IOTYPE, RHINEII_IOSIZE, CanHaveMII | HasWOL },
+ /* Duplicate entry, with 'M' features enabled. */
+ { "VIA VT6105M Rhine-III (3106)", { 0x31061106, 0xffffffff,},
+ RHINE_IOTYPE, RHINEII_IOSIZE, CanHaveMII|HasWOL|HasIPChecksum|HasVLAN},
+ { "VIA VT6105M Rhine-III (3053 prototype)", { 0x30531106, 0xffffffff,},
+ RHINE_IOTYPE, RHINEII_IOSIZE, CanHaveMII | HasWOL },
+ {0,}, /* 0 terminated list. */
+};
+
+struct drv_id_info via_rhine_drv_id = {
+ "via-rhine", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_tbl,
+ via_probe1, via_pwr_event
+};
+
+/* Offsets to the device registers.
+*/
+enum register_offsets {
+ StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
+ IntrStatus=0x0C, IntrEnable=0x0E,
+ MulticastFilter0=0x10, MulticastFilter1=0x14,
+ RxRingPtr=0x18, TxRingPtr=0x1C,
+ MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
+ MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
+ Config=0x78, ConfigA=0x7A, RxMissed=0x7C, RxCRCErrs=0x7E,
+ StickyHW=0x83, WOLcrClr=0xA4, WOLcgClr=0xA7, PwrcsrClr=0xAC,
+};
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+ IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
+ IntrTxDone=0x0002, IntrTxAbort=0x0008, IntrTxUnderrun=0x0010,
+ IntrPCIErr=0x0040,
+ IntrStatsMax=0x0080, IntrRxEarly=0x0100, IntrMIIChange=0x0200,
+ IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
+ IntrTxAborted=0x2000, IntrLinkChange=0x4000,
+ IntrRxWakeUp=0x8000,
+ IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
+};
+
+/* The Rx and Tx buffer descriptors. */
+struct rx_desc {
+ s32 rx_status;
+ u32 desc_length;
+ u32 addr;
+ u32 next_desc;
+};
+struct tx_desc {
+ s32 tx_status;
+ u32 desc_length;
+ u32 addr;
+ u32 next_desc;
+};
+
+/* Bits in *_desc.status */
+enum rx_status_bits {
+ RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F};
+enum desc_status_bits {
+ DescOwn=0x80000000, DescEndPacket=0x4000, DescIntr=0x1000,
+};
+
+/* Bits in rx.desc_length for extended status. */
+enum rx_info_bits {
+ RxTypeTag=0x00010000,
+ RxTypeUDP=0x00020000, RxTypeTCP=0x00040000, RxTypeIP=0x00080000,
+ RxTypeUTChksumOK=0x00100000, RxTypeIPChksumOK=0x00200000,
+ /* Summarized. */
+ RxTypeCsumMask=0x003E0000,
+ RxTypeUDPSumOK=0x003A0000, RxTypeTCPSumOK=0x003C0000,
+};
+
+/* Bits in ChipCmd. */
+enum chip_cmd_bits {
+ CmdInit=0x0001, CmdStart=0x0002, CmdStop=0x0004, CmdRxOn=0x0008,
+ CmdTxOn=0x0010, CmdTxDemand=0x0020, CmdRxDemand=0x0040,
+ CmdEarlyRx=0x0100, CmdEarlyTx=0x0200, CmdFDuplex=0x0400,
+ CmdNoTxPoll=0x0800, CmdReset=0x8000,
+};
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
+ within the structure. */
+struct netdev_private {
+ /* Descriptor rings first for alignment. */
+ struct rx_desc rx_ring[RX_RING_SIZE];
+ struct tx_desc tx_ring[TX_RING_SIZE];
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ /* The saved address of a sent-in-place packet/buffer, for later free(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ unsigned char *tx_buf[TX_RING_SIZE]; /* Tx bounce buffers */
+ unsigned char *tx_bufs; /* Tx bounce buffer region. */
+ struct net_device *next_module; /* Link for devices of this type. */
+ void *priv_addr; /* Unaligned address for kfree */
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media monitoring timer. */
+ int msg_level;
+ int max_interrupt_work;
+ int intr_enable;
+ int chip_id, drv_flags;
+ struct pci_dev *pci_dev;
+
+ /* Frequently used values: keep some adjacent for cache effect. */
+
+ struct rx_desc *rx_head_desc;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ int rx_copybreak;
+
+ unsigned int cur_tx, dirty_tx;
+ u16 chip_cmd; /* Current setting for ChipCmd */
+ int multicast_filter_limit;
+ u32 mc_filter[2];
+ int rx_mode;
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ /* These values are keep track of the transceiver/media in use. */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int duplex_lock:1;
+ unsigned int medialock:1; /* Do not sense media. */
+ unsigned int default_port; /* Last dev->if_port value. */
+ u8 tx_thresh, rx_thresh;
+ /* MII transceiver section. */
+ int mii_cnt; /* MII device addresses. */
+ u16 advertising; /* NWay media advertisement */
+ unsigned char phys[2]; /* MII device addresses. */
+};
+
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+static int netdev_open(struct net_device *dev);
+static void check_duplex(struct net_device *dev);
+static void netdev_timer(unsigned long data);
+static void tx_timeout(struct net_device *dev);
+static void init_ring(struct net_device *dev);
+static int start_tx(struct sk_buff *skb, struct net_device *dev);
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
+static int netdev_rx(struct net_device *dev);
+static void netdev_error(struct net_device *dev, int intr_status);
+static void set_rx_mode(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_close(struct net_device *dev);
+
+
+
+/* A list of our installed devices, for removing the driver module. */
+static struct net_device *root_net_dev = NULL;
+
+#ifndef MODULE
+int via_rhine_probe(struct net_device *dev)
+{
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return pci_drv_register(&via_rhine_drv_id, dev);
+}
+#endif
+
+static void *via_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int card_idx)
+{
+ struct net_device *dev;
+ struct netdev_private *np;
+ void *priv_mem;
+ int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+
+ dev = init_etherdev(init_dev, 0);
+ if (!dev)
+ return NULL;
+
+ printk(KERN_INFO "%s: %s at 0x%lx, ",
+ dev->name, pci_tbl[chip_idx].name, ioaddr);
+
+ /* We would prefer to directly read the EEPROM but access may be locked. */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = readb(ioaddr + StationAddr + i);
+ if (memcmp(dev->dev_addr, "\0\0\0\0\0", 6) == 0) {
+ /* Reload the station address from the EEPROM. */
+ writeb(0x20, ioaddr + MACRegEEcsr);
+ /* Typically 2 cycles to reload. */
+ for (i = 0; i < 150; i++)
+ if (! (readb(ioaddr + MACRegEEcsr) & 0x20))
+ break;
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = readb(ioaddr + StationAddr + i);
+ if (memcmp(dev->dev_addr, "\0\0\0\0\0", 6) == 0) {
+ printk(" (MISSING EEPROM ADDRESS)");
+ /* Fill a temp addr with the "locally administered" bit set. */
+ memcpy(dev->dev_addr, ">Linux", 6);
+ }
+ }
+
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+
+ /* Make certain the descriptor lists are cache-aligned. */
+ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
+ /* Check for the very unlikely case of no memory. */
+ if (priv_mem == NULL)
+ return NULL;
+
+#ifdef USE_IO_OPS
+ request_region(ioaddr, pci_tbl[chip_idx].io_size, dev->name);
+#endif
+
+ /* Reset the chip to erase previous misconfiguration. */
+ writew(CmdReset, ioaddr + ChipCmd);
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+ memset(np, 0, sizeof(*np));
+ np->priv_addr = priv_mem;
+
+ np->next_module = root_net_dev;
+ root_net_dev = dev;
+
+ np->pci_dev = pdev;
+ np->chip_id = chip_idx;
+ np->drv_flags = pci_tbl[chip_idx].drv_flags;
+ np->msg_level = (1 << debug) - 1;
+ np->rx_copybreak = rx_copybreak;
+ np->max_interrupt_work = max_interrupt_work;
+ np->multicast_filter_limit = multicast_filter_limit;
+
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ /* The lower four bits are the media type. */
+ if (option > 0) {
+ if (option & 0x220)
+ np->full_duplex = 1;
+ np->default_port = option & 15;
+ if (np->default_port)
+ np->medialock = 1;
+ }
+ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
+ np->full_duplex = 1;
+
+ if (np->full_duplex) {
+ printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
+ " disabled.\n", dev->name);
+ np->duplex_lock = 1;
+ }
+
+ /* The chip-specific entries in the device structure. */
+ dev->open = &netdev_open;
+ dev->hard_start_xmit = &start_tx;
+ dev->stop = &netdev_close;
+ dev->get_stats = &get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &mii_ioctl;
+
+ if (np->drv_flags & CanHaveMII) {
+ int phy, phy_idx = 0;
+ np->phys[0] = 1; /* Standard for this chip. */
+ for (phy = 1; phy < 32 && phy_idx < 4; phy++) {
+ int mii_status = mdio_read(dev, phy, 1);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ np->phys[phy_idx++] = phy;
+ np->advertising = mdio_read(dev, phy, 4);
+ printk(KERN_INFO "%s: MII PHY found at address %d, status "
+ "0x%4.4x advertising %4.4x Link %4.4x.\n",
+ dev->name, phy, mii_status, np->advertising,
+ mdio_read(dev, phy, 5));
+ }
+ }
+ np->mii_cnt = phy_idx;
+ }
+
+ /* Allow forcing the media type. */
+ if (option > 0) {
+ if (option & 0x220)
+ np->full_duplex = 1;
+ np->default_port = option & 0x3ff;
+ if (np->default_port & 0x330) {
+ np->medialock = 1;
+ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
+ (option & 0x300 ? 100 : 10),
+ (np->full_duplex ? "full" : "half"));
+ if (np->mii_cnt)
+ mdio_write(dev, np->phys[0], 0,
+ ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
+ (np->full_duplex ? 0x0100 : 0)); /* Full duplex? */
+ }
+ }
+
+ return dev;
+}
+
+
+/* Read and write over the MII Management Data I/O (MDIO) interface. */
+
+static int mdio_read(struct net_device *dev, int phy_id, int regnum)
+{
+ long ioaddr = dev->base_addr;
+ int boguscnt = 1024;
+
+ /* Wait for a previous command to complete. */
+ while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0)
+ ;
+ writeb(0x00, ioaddr + MIICmd);
+ writeb(phy_id, ioaddr + MIIPhyAddr);
+ writeb(regnum, ioaddr + MIIRegAddr);
+ writeb(0x40, ioaddr + MIICmd); /* Trigger read */
+ boguscnt = 1024;
+ while ((readb(ioaddr + MIICmd) & 0x40) && --boguscnt > 0)
+ ;
+ return readw(ioaddr + MIIData);
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int boguscnt = 1024;
+
+ if (phy_id == np->phys[0]) {
+ switch (regnum) {
+ case 0: /* Is user forcing speed/duplex? */
+ if (value & 0x9000) /* Autonegotiation. */
+ np->duplex_lock = 0;
+ else
+ np->full_duplex = (value & 0x0100) ? 1 : 0;
+ break;
+ case 4: np->advertising = value; break;
+ }
+ }
+ /* Wait for a previous command to complete. */
+ while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0)
+ ;
+ writeb(0x00, ioaddr + MIICmd);
+ writeb(phy_id, ioaddr + MIIPhyAddr);
+ writeb(regnum, ioaddr + MIIRegAddr);
+ writew(value, ioaddr + MIIData);
+ writeb(0x20, ioaddr + MIICmd); /* Trigger write. */
+ return;
+}
+
+
+static int netdev_open(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+
+ /* Reset the chip. */
+ writew(CmdReset, ioaddr + ChipCmd);
+
+ MOD_INC_USE_COUNT;
+
+ if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
+ dev->name, dev->irq);
+
+ init_ring(dev);
+
+ writel(virt_to_bus(np->rx_ring), ioaddr + RxRingPtr);
+ writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
+
+ for (i = 0; i < 6; i++)
+ writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
+
+ /* Initialize other registers. */
+ writew(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
+ /* Configure the FIFO thresholds. */
+ writeb(0x20, ioaddr + TxConfig); /* Initial threshold 32 bytes */
+ np->tx_thresh = 0x20;
+ np->rx_thresh = 0x60; /* Written in set_rx_mode(). */
+
+ if (dev->if_port == 0)
+ dev->if_port = np->default_port;
+
+ set_rx_mode(dev);
+ netif_start_tx_queue(dev);
+
+ np->intr_enable = IntrRxDone | IntrRxErr | IntrRxEmpty |
+ IntrRxOverflow| IntrRxDropped| IntrTxDone | IntrTxAbort |
+ IntrTxUnderrun | IntrPCIErr | IntrStatsMax | IntrLinkChange |
+ IntrMIIChange;
+ /* Enable interrupts by setting the interrupt mask. */
+ writew(np->intr_enable, ioaddr + IntrEnable);
+
+ np->chip_cmd = CmdStart|CmdTxOn|CmdRxOn|CmdNoTxPoll;
+ if (np->duplex_lock)
+ np->chip_cmd |= CmdFDuplex;
+ writew(np->chip_cmd, ioaddr + ChipCmd);
+
+ check_duplex(dev);
+ /* The LED outputs of various MII xcvrs should be configured. */
+ /* For NS or Mison phys, turn on bit 1 in register 0x17 */
+ /* For ESI phys, turn on bit 7 in register 0x17. */
+ mdio_write(dev, np->phys[0], 0x17, mdio_read(dev, np->phys[0], 0x17) |
+ (np->drv_flags & HasESIPhy) ? 0x0080 : 0x0001);
+
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: Done netdev_open(), status %4.4x "
+ "MII status: %4.4x.\n",
+ dev->name, readw(ioaddr + ChipCmd),
+ mdio_read(dev, np->phys[0], 1));
+
+ /* Set the timer to check for link beat. */
+ init_timer(&np->timer);
+ np->timer.expires = jiffies + 2;
+ np->timer.data = (unsigned long)dev;
+ np->timer.function = &netdev_timer; /* timer handler */
+ add_timer(&np->timer);
+
+ return 0;
+}
+
+static void check_duplex(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int mii_reg5 = mdio_read(dev, np->phys[0], 5);
+ int negotiated = mii_reg5 & np->advertising;
+ int duplex;
+
+ if (np->duplex_lock || mii_reg5 == 0xffff)
+ return;
+ duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
+ if (np->full_duplex != duplex) {
+ np->full_duplex = duplex;
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
+ " partner capability of %4.4x.\n", dev->name,
+ duplex ? "full" : "half", np->phys[0], mii_reg5);
+ if (duplex)
+ np->chip_cmd |= CmdFDuplex;
+ else
+ np->chip_cmd &= ~CmdFDuplex;
+ writew(np->chip_cmd, ioaddr + ChipCmd);
+ }
+}
+
+static void netdev_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 10*HZ;
+
+ if (np->msg_level & NETIF_MSG_TIMER) {
+ printk(KERN_DEBUG "%s: VIA Rhine monitor tick, status %4.4x.\n",
+ dev->name, readw(ioaddr + IntrStatus));
+ }
+ if (netif_queue_paused(dev)
+ && np->cur_tx - np->dirty_tx > 1
+ && jiffies - dev->trans_start > TX_TIMEOUT)
+ tx_timeout(dev);
+
+ check_duplex(dev);
+
+ np->timer.expires = jiffies + next_tick;
+ add_timer(&np->timer);
+}
+
+static void tx_timeout(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
+ "%4.4x, resetting...\n",
+ dev->name, readw(ioaddr + IntrStatus),
+ mdio_read(dev, np->phys[0], 1));
+
+ /* Perhaps we should reinitialize the hardware here. */
+ dev->if_port = 0;
+ /* Restart the chip's Tx processes . */
+ writel(virt_to_bus(np->tx_ring + (np->dirty_tx % TX_RING_SIZE)),
+ ioaddr + TxRingPtr);
+ writew(CmdTxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
+
+ /* Trigger an immediate transmit demand. */
+
+ dev->trans_start = jiffies;
+ np->stats.tx_errors++;
+ return;
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+ np->tx_full = 0;
+ np->cur_rx = np->cur_tx = 0;
+ np->dirty_rx = np->dirty_tx = 0;
+
+ /* Use 1518/+18 if the CRC is transferred. */
+ np->rx_buf_sz = dev->mtu + 14;
+ if (np->rx_buf_sz < PKT_BUF_SZ)
+ np->rx_buf_sz = PKT_BUF_SZ;
+ np->rx_head_desc = &np->rx_ring[0];
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].rx_status = 0;
+ np->rx_ring[i].desc_length = cpu_to_le32(np->rx_buf_sz);
+ np->rx_ring[i].next_desc = virt_to_le32desc(&np->rx_ring[i+1]);
+ np->rx_skbuff[i] = 0;
+ }
+ /* Mark the last entry as wrapping the ring. */
+ np->rx_ring[i-1].next_desc = virt_to_le32desc(&np->rx_ring[0]);
+
+ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ np->rx_ring[i].addr = virt_to_le32desc(skb->tail);
+ np->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
+ }
+ np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_skbuff[i] = 0;
+ np->tx_ring[i].tx_status = 0;
+ np->tx_ring[i].desc_length = cpu_to_le32(0x00e08000);
+ np->tx_ring[i].next_desc = virt_to_le32desc(&np->tx_ring[i+1]);
+ np->tx_buf[i] = 0; /* Allocated as/if needed. */
+ }
+ np->tx_ring[i-1].next_desc = virt_to_le32desc(&np->tx_ring[0]);
+
+ return;
+}
+
+static int start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ unsigned entry;
+
+ /* Block a timer-based transmit from overlapping. This happens when
+ packets are presumed lost, and we use this check the Tx status. */
+ if (netif_pause_tx_queue(dev) != 0) {
+ /* This watchdog code is redundant with the media monitor timer. */
+ if (jiffies - dev->trans_start > TX_TIMEOUT)
+ tx_timeout(dev);
+ return 1;
+ }
+
+ /* Caution: the write order is important here, set the descriptor word
+ with the "ownership" bit last. No SMP locking is needed if the
+ cur_tx is incremented after the descriptor is consistent. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = np->cur_tx % TX_RING_SIZE;
+
+ np->tx_skbuff[entry] = skb;
+
+ if ((np->drv_flags & ReqTxAlign) && ((long)skb->data & 3)) {
+ /* Must use alignment buffer. */
+ if (np->tx_buf[entry] == NULL &&
+ (np->tx_buf[entry] = kmalloc(PKT_BUF_SZ, GFP_KERNEL)) == NULL)
+ return 1;
+ memcpy(np->tx_buf[entry], skb->data, skb->len);
+ np->tx_ring[entry].addr = virt_to_le32desc(np->tx_buf[entry]);
+ } else
+ np->tx_ring[entry].addr = virt_to_le32desc(skb->data);
+ /* Explicitly flush packet data cache lines here. */
+
+ np->tx_ring[entry].desc_length =
+ cpu_to_le32(0x00E08000 | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
+ np->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
+
+ np->cur_tx++;
+
+ /* Explicitly flush descriptor cache lines here. */
+
+ /* Wake the potentially-idle transmit channel. */
+ writew(CmdTxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
+
+ if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
+ np->tx_full = 1;
+ /* Check for a just-cleared queue. */
+ if (np->cur_tx - (volatile unsigned int)np->dirty_tx
+ < TX_QUEUE_LEN - 2) {
+ np->tx_full = 0;
+ netif_unpause_tx_queue(dev);
+ } else
+ netif_stop_tx_queue(dev);
+ } else
+ netif_unpause_tx_queue(dev); /* Typical path */
+
+ dev->trans_start = jiffies;
+
+ if (np->msg_level & NETIF_MSG_TX_QUEUED) {
+ printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
+ dev->name, np->cur_tx, entry);
+ }
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct netdev_private *np = (void *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int boguscnt = np->max_interrupt_work;
+
+ do {
+ u32 intr_status = readw(ioaddr + IntrStatus);
+
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ writew(intr_status & 0xffff, ioaddr + IntrStatus);
+
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
+ dev->name, intr_status);
+
+ if (intr_status == 0)
+ break;
+
+ if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
+ IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf))
+ netdev_rx(dev);
+
+ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+ int entry = np->dirty_tx % TX_RING_SIZE;
+ int txstatus = le32_to_cpu(np->tx_ring[entry].tx_status);
+ if (txstatus & DescOwn)
+ break;
+ if (np->msg_level & NETIF_MSG_TX_DONE)
+ printk(KERN_DEBUG " Tx scavenge %d status %4.4x.\n",
+ entry, txstatus);
+ if (txstatus & 0x8000) {
+ if (np->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %4.4x.\n",
+ dev->name, txstatus);
+ np->stats.tx_errors++;
+ if (txstatus & 0x0400) np->stats.tx_carrier_errors++;
+ if (txstatus & 0x0200) np->stats.tx_window_errors++;
+ if (txstatus & 0x0100) np->stats.tx_aborted_errors++;
+ if (txstatus & 0x0080) np->stats.tx_heartbeat_errors++;
+ if (txstatus & 0x0002) np->stats.tx_fifo_errors++;
+#ifdef ETHER_STATS
+ if (txstatus & 0x0100) np->stats.collisions16++;
+#endif
+ /* Transmitter restarted in 'abnormal' handler. */
+ } else {
+#ifdef ETHER_STATS
+ if (txstatus & 0x0001) np->stats.tx_deferred++;
+#endif
+ if (np->drv_flags & HasV1TxStat)
+ np->stats.collisions += (txstatus >> 3) & 15;
+ else
+ np->stats.collisions += txstatus & 15;
+#if defined(NETSTATS_VER2)
+ np->stats.tx_bytes += np->tx_skbuff[entry]->len;
+#endif
+ np->stats.tx_packets++;
+ }
+ /* Free the original skb. */
+ dev_free_skb_irq(np->tx_skbuff[entry]);
+ np->tx_skbuff[entry] = 0;
+ }
+ /* Note the 4 slot hysteresis in mark the queue non-full. */
+ if (np->tx_full && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
+ /* The ring is no longer full, allow new TX entries. */
+ np->tx_full = 0;
+ netif_resume_tx_queue(dev);
+ }
+
+ /* Abnormal error summary/uncommon events handlers. */
+ if (intr_status & (IntrPCIErr | IntrLinkChange | IntrMIIChange |
+ IntrStatsMax | IntrTxAbort | IntrTxUnderrun))
+ netdev_error(dev, intr_status);
+
+ if (--boguscnt < 0) {
+ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "status=0x%4.4x.\n",
+ dev->name, intr_status);
+ break;
+ }
+ } while (1);
+
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, (int)readw(ioaddr + IntrStatus));
+
+ return;
+}
+
+/* This routine is logically part of the interrupt handler, but isolated
+ for clarity and better register allocation. */
+static int netdev_rx(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int entry = np->cur_rx % RX_RING_SIZE;
+ int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
+
+ if (np->msg_level & NETIF_MSG_RX_STATUS) {
+ printk(KERN_DEBUG " In netdev_rx(), entry %d status %8.8x.\n",
+ entry, np->rx_head_desc->rx_status);
+ }
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while ( ! (np->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) {
+ struct rx_desc *desc = np->rx_head_desc;
+ u32 desc_status = le32_to_cpu(desc->rx_status);
+ int data_size = desc_status >> 16;
+
+ if (np->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " netdev_rx() status is %4.4x.\n",
+ desc_status);
+ if (--boguscnt < 0)
+ break;
+ if ( (desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
+ if ((desc_status & RxWholePkt) != RxWholePkt) {
+ printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
+ "multiple buffers, entry %#x length %d status %4.4x!\n",
+ dev->name, np->cur_rx, data_size, desc_status);
+ printk(KERN_WARNING "%s: Oversized Ethernet frame %p vs %p.\n",
+ dev->name, np->rx_head_desc,
+ &np->rx_ring[np->cur_rx % RX_RING_SIZE]);
+ np->stats.rx_length_errors++;
+ } else if (desc_status & RxErr) {
+ /* There was a error. */
+ if (np->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
+ desc_status);
+ np->stats.rx_errors++;
+ if (desc_status & 0x0030) np->stats.rx_length_errors++;
+ if (desc_status & 0x0048) np->stats.rx_fifo_errors++;
+ if (desc_status & 0x0004) np->stats.rx_frame_errors++;
+ if (desc_status & 0x0002) np->stats.rx_crc_errors++;
+ }
+ } else {
+ struct sk_buff *skb;
+ /* Length should omit the CRC */
+ int pkt_len = data_size - 4;
+
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < np->rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+#if HAS_IP_COPYSUM /* Call copy + cksum if available. */
+ eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+#else
+ memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
+ pkt_len);
+#endif
+ } else {
+ skb_put(skb = np->rx_skbuff[entry], pkt_len);
+ np->rx_skbuff[entry] = NULL;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ { /* Use hardware checksum info. */
+ int rxtype = le32_to_cpu(desc->desc_length);
+ int csum_bits = rxtype & RxTypeCsumMask;
+ if (csum_bits == RxTypeUDPSumOK ||
+ csum_bits == RxTypeTCPSumOK)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+#if defined(NETSTATS_VER2)
+ np->stats.rx_bytes += pkt_len;
+#endif
+ np->stats.rx_packets++;
+ }
+ entry = (++np->cur_rx) % RX_RING_SIZE;
+ np->rx_head_desc = &np->rx_ring[entry];
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
+ struct sk_buff *skb;
+ entry = np->dirty_rx % RX_RING_SIZE;
+ if (np->rx_skbuff[entry] == NULL) {
+ skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ np->rx_ring[entry].addr = virt_to_le32desc(skb->tail);
+ }
+ np->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
+ }
+
+ /* Pre-emptively restart Rx engine. */
+ writew(CmdRxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
+ return 0;
+}
+
+static void netdev_error(struct net_device *dev, int intr_status)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (intr_status & (IntrMIIChange | IntrLinkChange)) {
+ if (readb(ioaddr + MIIStatus) & 0x02) {
+ /* Link failed, restart autonegotiation. */
+ if (np->drv_flags & HasDavicomPhy)
+ mdio_write(dev, np->phys[0], 0, 0x3300);
+ netif_link_down(dev);
+ } else {
+ netif_link_up(dev);
+ check_duplex(dev);
+ }
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_ERR "%s: MII status changed: Autonegotiation "
+ "advertising %4.4x partner %4.4x.\n", dev->name,
+ mdio_read(dev, np->phys[0], 4),
+ mdio_read(dev, np->phys[0], 5));
+ }
+ if (intr_status & IntrStatsMax) {
+ np->stats.rx_crc_errors += readw(ioaddr + RxCRCErrs);
+ np->stats.rx_missed_errors += readw(ioaddr + RxMissed);
+ writel(0, ioaddr + RxMissed);
+ }
+ if (intr_status & IntrTxAbort) {
+ /* Stats counted in Tx-done handler, just restart Tx. */
+ writel(virt_to_bus(&np->tx_ring[np->dirty_tx % TX_RING_SIZE]),
+ ioaddr + TxRingPtr);
+ writew(CmdTxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
+ }
+ if (intr_status & IntrTxUnderrun) {
+ if (np->tx_thresh < 0xE0)
+ writeb(np->tx_thresh += 0x20, ioaddr + TxConfig);
+ if (np->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_INFO "%s: Transmitter underrun, increasing Tx "
+ "threshold setting to %2.2x.\n", dev->name, np->tx_thresh);
+ }
+ if ((intr_status & ~(IntrLinkChange | IntrMIIChange | IntrStatsMax |
+ IntrTxAbort|IntrTxAborted | IntrNormalSummary))
+ && (np->msg_level & NETIF_MSG_DRV)) {
+ printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
+ dev->name, intr_status);
+ /* Recovery for other fault sources not known. */
+ writew(CmdTxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
+ }
+}
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ /* Nominally we should lock this segment of code for SMP, although
+ the vulnerability window is very small and statistics are
+ non-critical. */
+ np->stats.rx_crc_errors += readw(ioaddr + RxCRCErrs);
+ np->stats.rx_missed_errors += readw(ioaddr + RxMissed);
+ writel(0, ioaddr + RxMissed);
+
+ return &np->stats;
+}
+
+/* The big-endian AUTODIN II ethernet CRC calculation.
+ N.B. Do not use for bulk data, use a table-based routine instead.
+ This is common code and should be moved to net/core/crc.c */
+static unsigned const ethernet_polynomial = 0x04c11db7U;
+static inline u32 ether_crc(int length, unsigned char *data)
+{
+ int crc = -1;
+
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
+ crc = (crc << 1) ^
+ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
+ }
+ }
+ return crc;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u32 mc_filter[2]; /* Multicast hash filter */
+ u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+ rx_mode = 0x1C;
+ } else if ((dev->mc_count > np->multicast_filter_limit)
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ writel(0xffffffff, ioaddr + MulticastFilter0);
+ writel(0xffffffff, ioaddr + MulticastFilter1);
+ rx_mode = 0x0C;
+ } else {
+ struct dev_mc_list *mclist;
+ int i;
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26,
+ mc_filter);
+ }
+ writel(mc_filter[0], ioaddr + MulticastFilter0);
+ writel(mc_filter[1], ioaddr + MulticastFilter1);
+ rx_mode = 0x0C;
+ }
+ writeb(np->rx_thresh | rx_mode, ioaddr + RxConfig);
+}
+
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ u16 *data = (u16 *)&rq->ifr_data;
+ u32 *data32 = (void *)&rq->ifr_data;
+
+ switch(cmd) {
+ case 0x8947: case 0x89F0:
+ /* SIOCGMIIPHY: Get the address of the PHY in use. */
+ data[0] = np->phys[0] & 0x1f;
+ /* Fall Through */
+ case 0x8948: case 0x89F1:
+ /* SIOCGMIIREG: Read the specified MII register. */
+ data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
+ return 0;
+ case 0x8949: case 0x89F2:
+ /* SIOCSMIIREG: Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ /* Note: forced media tracking is done in mdio_write(). */
+ mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
+ return 0;
+ case SIOCGPARAMS:
+ data32[0] = np->msg_level;
+ data32[1] = np->multicast_filter_limit;
+ data32[2] = np->max_interrupt_work;
+ data32[3] = np->rx_copybreak;
+ return 0;
+ case SIOCSPARAMS:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ np->msg_level = data32[0];
+ np->multicast_filter_limit = data32[1];
+ np->max_interrupt_work = data32[2];
+ np->rx_copybreak = data32[3];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int netdev_close(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+ netif_stop_tx_queue(dev);
+
+ if (np->msg_level & NETIF_MSG_IFDOWN)
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
+ dev->name, readw(ioaddr + ChipCmd));
+
+ /* Switch to loopback mode to avoid hardware races. */
+ writeb(np->tx_thresh | 0x01, ioaddr + TxConfig);
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ writew(0x0000, ioaddr + IntrEnable);
+
+ /* Stop the chip's Tx and Rx processes. */
+ np->chip_cmd = CmdStop;
+ writew(CmdStop, ioaddr + ChipCmd);
+
+ del_timer(&np->timer);
+
+ free_irq(dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].rx_status = 0;
+ np->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
+ if (np->rx_skbuff[i]) {
+#if LINUX_VERSION_CODE < 0x20100
+ np->rx_skbuff[i]->free = 1;
+#endif
+ dev_free_skb(np->rx_skbuff[i]);
+ }
+ np->rx_skbuff[i] = 0;
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (np->tx_skbuff[i])
+ dev_free_skb(np->tx_skbuff[i]);
+ np->tx_skbuff[i] = 0;
+ if (np->tx_buf[i]) {
+ kfree(np->tx_buf[i]);
+ np->tx_buf[i] = 0;
+ }
+ }
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static int via_pwr_event(void *dev_instance, int event)
+{
+ struct net_device *dev = dev_instance;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
+ switch(event) {
+ case DRV_ATTACH:
+ MOD_INC_USE_COUNT;
+ break;
+ case DRV_SUSPEND:
+ /* Disable interrupts, stop Tx and Rx. */
+ writew(0x0000, ioaddr + IntrEnable);
+ /* Stop the chip's Tx and Rx processes. */
+ writew(CmdStop, ioaddr + ChipCmd);
+ break;
+ case DRV_RESUME:
+ /* This is incomplete: the actions are very chip specific. */
+ set_rx_mode(dev);
+ netif_start_tx_queue(dev);
+ writew(np->chip_cmd, ioaddr + ChipCmd);
+ writew(np->intr_enable, ioaddr + IntrEnable);
+ break;
+ case DRV_DETACH: {
+ struct net_device **devp, **next;
+ if (dev->flags & IFF_UP) {
+ /* Some, but not all, kernel versions close automatically. */
+ dev_close(dev);
+ dev->flags &= ~(IFF_UP|IFF_RUNNING);
+ }
+ unregister_netdev(dev);
+ release_region(dev->base_addr, pci_tbl[np->chip_id].io_size);
+#ifndef USE_IO_OPS
+ iounmap((char *)dev->base_addr);
+#endif
+ for (devp = &root_net_dev; *devp; devp = next) {
+ next = &((struct netdev_private *)(*devp)->priv)->next_module;
+ if (*devp == dev) {
+ *devp = *next;
+ break;
+ }
+ }
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(dev);
+ MOD_DEC_USE_COUNT;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+
+#ifdef MODULE
+int init_module(void)
+{
+ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return pci_drv_register(&via_rhine_drv_id, NULL);
+}
+
+void cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+ pci_drv_unregister(&via_rhine_drv_id);
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_net_dev) {
+ struct netdev_private *np = (void *)(root_net_dev->priv);
+ unregister_netdev(root_net_dev);
+#ifdef USE_IO_OPS
+ release_region(root_net_dev->base_addr, pci_tbl[np->chip_id].io_size);
+#else
+ iounmap((char *)(root_net_dev->base_addr));
+#endif
+ next_dev = np->next_module;
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(root_net_dev);
+ root_net_dev = next_dev;
+ }
+}
+
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "make KERNVER=`uname -r` via-rhine.o"
+ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c via-rhine.c"
+ * simple-compile-command: "gcc -DMODULE -O6 -c via-rhine.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/wavelan.c b/linux/src/drivers/net/wavelan.c
new file mode 100644
index 0000000..dbe8815
--- /dev/null
+++ b/linux/src/drivers/net/wavelan.c
@@ -0,0 +1,4373 @@
+/*
+ * WaveLAN ISA driver
+ *
+ * Jean II - HPLB '96
+ *
+ * Reorganisation and extension of the driver.
+ * Original copyright follows (also see the end of this file).
+ * See wavelan.p.h for details.
+ */
+
+/*
+ * AT&T GIS (nee NCR) WaveLAN card:
+ * An Ethernet-like radio transceiver
+ * controlled by an Intel 82586 coprocessor.
+ */
+
+#include "wavelan.p.h" /* Private header */
+
+/************************* MISC SUBROUTINES **************************/
+/*
+ * Subroutines which won't fit in one of the following category
+ * (WaveLAN modem or i82586)
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Wrapper for disabling interrupts.
+ */
+static inline unsigned long
+wv_splhi(void)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+
+ return(flags);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wrapper for re-enabling interrupts.
+ */
+static inline void
+wv_splx(unsigned long flags)
+{
+ restore_flags(flags);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Translate irq number to PSA irq parameter
+ */
+static u_char
+wv_irq_to_psa(int irq)
+{
+ if(irq < 0 || irq >= NELS(irqvals))
+ return 0;
+
+ return irqvals[irq];
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Translate PSA irq parameter to irq number
+ */
+static int
+wv_psa_to_irq(u_char irqval)
+{
+ int irq;
+
+ for(irq = 0; irq < NELS(irqvals); irq++)
+ if(irqvals[irq] == irqval)
+ return irq;
+
+ return -1;
+}
+
+#ifdef STRUCT_CHECK
+/*------------------------------------------------------------------*/
+/*
+ * Sanity routine to verify the sizes of the various WaveLAN interface
+ * structures.
+ */
+static char *
+wv_struct_check(void)
+{
+#define SC(t,s,n) if (sizeof(t) != s) return(n);
+
+ SC(psa_t, PSA_SIZE, "psa_t");
+ SC(mmw_t, MMW_SIZE, "mmw_t");
+ SC(mmr_t, MMR_SIZE, "mmr_t");
+ SC(ha_t, HA_SIZE, "ha_t");
+
+#undef SC
+
+ return((char *) NULL);
+} /* wv_struct_check */
+#endif /* STRUCT_CHECK */
+
+/********************* HOST ADAPTER SUBROUTINES *********************/
+/*
+ * Useful subroutines to manage the WaveLAN ISA interface
+ *
+ * One major difference with the PCMCIA hardware (except the port mapping)
+ * is that we have to keep the state of the Host Control Register
+ * because of the interrupt enable & bus size flags.
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Read from card's Host Adaptor Status Register.
+ */
+static inline u_short
+hasr_read(u_long ioaddr)
+{
+ return(inw(HASR(ioaddr)));
+} /* hasr_read */
+
+/*------------------------------------------------------------------*/
+/*
+ * Write to card's Host Adapter Command Register.
+ */
+static inline void
+hacr_write(u_long ioaddr,
+ u_short hacr)
+{
+ outw(hacr, HACR(ioaddr));
+} /* hacr_write */
+
+/*------------------------------------------------------------------*/
+/*
+ * Write to card's Host Adapter Command Register. Include a delay for
+ * those times when it is needed.
+ */
+static inline void
+hacr_write_slow(u_long ioaddr,
+ u_short hacr)
+{
+ hacr_write(ioaddr, hacr);
+ /* delay might only be needed sometimes */
+ udelay(1000L);
+} /* hacr_write_slow */
+
+/*------------------------------------------------------------------*/
+/*
+ * Set the channel attention bit.
+ */
+static inline void
+set_chan_attn(u_long ioaddr,
+ u_short hacr)
+{
+ hacr_write(ioaddr, hacr | HACR_CA);
+} /* set_chan_attn */
+
+/*------------------------------------------------------------------*/
+/*
+ * Reset, and then set host adaptor into default mode.
+ */
+static inline void
+wv_hacr_reset(u_long ioaddr)
+{
+ hacr_write_slow(ioaddr, HACR_RESET);
+ hacr_write(ioaddr, HACR_DEFAULT);
+} /* wv_hacr_reset */
+
+/*------------------------------------------------------------------*/
+/*
+ * Set the i/o transfer over the ISA bus to 8 bits mode
+ */
+static inline void
+wv_16_off(u_long ioaddr,
+ u_short hacr)
+{
+ hacr &= ~HACR_16BITS;
+ hacr_write(ioaddr, hacr);
+} /* wv_16_off */
+
+/*------------------------------------------------------------------*/
+/*
+ * Set the i/o transfer over the ISA bus to 8 bits mode
+ */
+static inline void
+wv_16_on(u_long ioaddr,
+ u_short hacr)
+{
+ hacr |= HACR_16BITS;
+ hacr_write(ioaddr, hacr);
+} /* wv_16_on */
+
+/*------------------------------------------------------------------*/
+/*
+ * Disable interrupts on the WaveLAN hardware
+ */
+static inline void
+wv_ints_off(device * dev)
+{
+ net_local * lp = (net_local *)dev->priv;
+ u_long ioaddr = dev->base_addr;
+ u_long x;
+
+ x = wv_splhi();
+
+ lp->hacr &= ~HACR_INTRON;
+ hacr_write(ioaddr, lp->hacr);
+
+ wv_splx(x);
+} /* wv_ints_off */
+
+/*------------------------------------------------------------------*/
+/*
+ * Enable interrupts on the WaveLAN hardware
+ */
+static inline void
+wv_ints_on(device * dev)
+{
+ net_local * lp = (net_local *)dev->priv;
+ u_long ioaddr = dev->base_addr;
+ u_long x;
+
+ x = wv_splhi();
+
+ lp->hacr |= HACR_INTRON;
+ hacr_write(ioaddr, lp->hacr);
+
+ wv_splx(x);
+} /* wv_ints_on */
+
+/******************* MODEM MANAGEMENT SUBROUTINES *******************/
+/*
+ * Useful subroutines to manage the modem of the WaveLAN
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Read the Parameter Storage Area from the WaveLAN card's memory
+ */
+/*
+ * Read bytes from the PSA.
+ */
+static void
+psa_read(u_long ioaddr,
+ u_short hacr,
+ int o, /* offset in PSA */
+ u_char * b, /* buffer to fill */
+ int n) /* size to read */
+{
+ wv_16_off(ioaddr, hacr);
+
+ while(n-- > 0)
+ {
+ outw(o, PIOR2(ioaddr));
+ o++;
+ *b++ = inb(PIOP2(ioaddr));
+ }
+
+ wv_16_on(ioaddr, hacr);
+} /* psa_read */
+
+/*------------------------------------------------------------------*/
+/*
+ * Write the Paramter Storage Area to the WaveLAN card's memory
+ */
+static void
+psa_write(u_long ioaddr,
+ u_short hacr,
+ int o, /* Offset in psa */
+ u_char * b, /* Buffer in memory */
+ int n) /* Length of buffer */
+{
+ int count = 0;
+
+ wv_16_off(ioaddr, hacr);
+
+ while(n-- > 0)
+ {
+ outw(o, PIOR2(ioaddr));
+ o++;
+
+ outb(*b, PIOP2(ioaddr));
+ b++;
+
+ /* Wait for the memory to finish its write cycle */
+ count = 0;
+ while((count++ < 100) &&
+ (hasr_read(ioaddr) & HASR_PSA_BUSY))
+ udelay(1000);
+ }
+
+ wv_16_on(ioaddr, hacr);
+} /* psa_write */
+
+#ifdef PSA_CRC
+/*------------------------------------------------------------------*/
+/*
+ * Calculate the PSA CRC (not tested yet)
+ * As the WaveLAN drivers don't use the CRC, I won't use it either.
+ * Thanks to Valster, Nico <NVALSTER@wcnd.nl.lucent.com> for the code
+ * NOTE: By specifying a length including the CRC position the
+ * returned value should be zero. (i.e. a correct checksum in the PSA)
+ */
+static u_short
+psa_crc(u_short * psa, /* The PSA */
+ int size) /* Number of short for CRC */
+{
+ int byte_cnt; /* Loop on the PSA */
+ u_short crc_bytes = 0; /* Data in the PSA */
+ int bit_cnt; /* Loop on the bits of the short */
+
+ for(byte_cnt = 0; byte_cnt <= size; byte_cnt++ )
+ {
+ crc_bytes ^= psa[byte_cnt]; /* Its an xor */
+
+ for(bit_cnt = 1; bit_cnt < 9; bit_cnt++ )
+ {
+ if(crc_bytes & 0x0001)
+ crc_bytes = (crc_bytes >> 1) ^ 0xA001;
+ else
+ crc_bytes >>= 1 ;
+ }
+ }
+
+ return crc_bytes;
+} /* psa_crc */
+#endif /* PSA_CRC */
+
+/*------------------------------------------------------------------*/
+/*
+ * Write 1 byte to the MMC.
+ */
+static inline void
+mmc_out(u_long ioaddr,
+ u_short o,
+ u_char d)
+{
+ /* Wait for MMC to go idle */
+ while(inw(HASR(ioaddr)) & HASR_MMC_BUSY)
+ ;
+
+ outw((u_short) (((u_short) d << 8) | (o << 1) | 1),
+ MMCR(ioaddr));
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Routine to write bytes to the Modem Management Controller.
+ * We start by the end because it is the way it should be !
+ */
+static inline void
+mmc_write(u_long ioaddr,
+ u_char o,
+ u_char * b,
+ int n)
+{
+ o += n;
+ b += n;
+
+ while(n-- > 0 )
+ mmc_out(ioaddr, --o, *(--b));
+} /* mmc_write */
+
+/*------------------------------------------------------------------*/
+/*
+ * Read 1 byte from the MMC.
+ * Optimised version for 1 byte, avoid using memory...
+ */
+static inline u_char
+mmc_in(u_long ioaddr,
+ u_short o)
+{
+ while(inw(HASR(ioaddr)) & HASR_MMC_BUSY)
+ ;
+ outw(o << 1, MMCR(ioaddr));
+
+ while(inw(HASR(ioaddr)) & HASR_MMC_BUSY)
+ ;
+ return (u_char) (inw(MMCR(ioaddr)) >> 8);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Routine to read bytes from the Modem Management Controller.
+ * The implementation is complicated by a lack of address lines,
+ * which prevents decoding of the low-order bit.
+ * (code has just been moved in the above function)
+ * We start by the end because it is the way it should be !
+ */
+static inline void
+mmc_read(u_long ioaddr,
+ u_char o,
+ u_char * b,
+ int n)
+{
+ o += n;
+ b += n;
+
+ while(n-- > 0)
+ *(--b) = mmc_in(ioaddr, --o);
+} /* mmc_read */
+
+/*------------------------------------------------------------------*/
+/*
+ * Get the type of encryption available...
+ */
+static inline int
+mmc_encr(u_long ioaddr) /* i/o port of the card */
+{
+ int temp;
+
+ temp = mmc_in(ioaddr, mmroff(0, mmr_des_avail));
+ if((temp != MMR_DES_AVAIL_DES) && (temp != MMR_DES_AVAIL_AES))
+ return 0;
+ else
+ return temp;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Wait for the frequency EEPROM to complete a command...
+ * I hope this one will be optimally inlined...
+ */
+static inline void
+fee_wait(u_long ioaddr, /* i/o port of the card */
+ int delay, /* Base delay to wait for */
+ int number) /* Number of time to wait */
+{
+ int count = 0; /* Wait only a limited time */
+
+ while((count++ < number) &&
+ (mmc_in(ioaddr, mmroff(0, mmr_fee_status)) & MMR_FEE_STATUS_BUSY))
+ udelay(delay);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Read bytes from the frequency EEPROM (frequency select cards).
+ */
+static void
+fee_read(u_long ioaddr, /* i/o port of the card */
+ u_short o, /* destination offset */
+ u_short * b, /* data buffer */
+ int n) /* number of registers */
+{
+ b += n; /* Position at the end of the area */
+
+ /* Write the address */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), o + n - 1);
+
+ /* Loop on all buffer */
+ while(n-- > 0)
+ {
+ /* Write the read command */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_READ);
+
+ /* Wait until EEPROM is ready (should be quick!) */
+ fee_wait(ioaddr, 10, 100);
+
+ /* Read the value */
+ *--b = ((mmc_in(ioaddr, mmroff(0, mmr_fee_data_h)) << 8) |
+ mmc_in(ioaddr, mmroff(0, mmr_fee_data_l)));
+ }
+}
+
+#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */
+
+/*------------------------------------------------------------------*/
+/*
+ * Write bytes from the Frequency EEPROM (frequency select cards).
+ * This is a bit complicated, because the frequency EEPROM has to
+ * be unprotected and the write enabled.
+ * Jean II
+ */
+static void
+fee_write(u_long ioaddr, /* i/o port of the card */
+ u_short o, /* destination offset */
+ u_short * b, /* data buffer */
+ int n) /* number of registers */
+{
+ b += n; /* Position at the end of the area */
+
+#ifdef EEPROM_IS_PROTECTED /* disabled */
+#ifdef DOESNT_SEEM_TO_WORK /* disabled */
+ /* Ask to read the protected register */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_PRREAD);
+
+ fee_wait(ioaddr, 10, 100);
+
+ /* Read the protected register */
+ printk("Protected 2 : %02X-%02X\n",
+ mmc_in(ioaddr, mmroff(0, mmr_fee_data_h)),
+ mmc_in(ioaddr, mmroff(0, mmr_fee_data_l)));
+#endif /* DOESNT_SEEM_TO_WORK */
+
+ /* Enable protected register */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), MMW_FEE_ADDR_EN);
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_PREN);
+
+ fee_wait(ioaddr, 10, 100);
+
+ /* Unprotect area */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), o + n);
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_PRWRITE);
+#ifdef DOESNT_SEEM_TO_WORK /* disabled */
+ /* Or use : */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_PRCLEAR);
+#endif /* DOESNT_SEEM_TO_WORK */
+
+ fee_wait(ioaddr, 10, 100);
+#endif /* EEPROM_IS_PROTECTED */
+
+ /* Write enable */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), MMW_FEE_ADDR_EN);
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_WREN);
+
+ fee_wait(ioaddr, 10, 100);
+
+ /* Write the EEPROM address */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), o + n - 1);
+
+ /* Loop on all buffer */
+ while(n-- > 0)
+ {
+ /* Write the value */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_data_h), (*--b) >> 8);
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_data_l), *b & 0xFF);
+
+ /* Write the write command */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_WRITE);
+
+ /* Wavelan doc says : wait at least 10 ms for EEBUSY = 0 */
+ udelay(10000);
+ fee_wait(ioaddr, 10, 100);
+ }
+
+ /* Write disable */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), MMW_FEE_ADDR_DS);
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_WDS);
+
+ fee_wait(ioaddr, 10, 100);
+
+#ifdef EEPROM_IS_PROTECTED /* disabled */
+ /* Reprotect EEPROM */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), 0x00);
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_PRWRITE);
+
+ fee_wait(ioaddr, 10, 100);
+#endif /* EEPROM_IS_PROTECTED */
+}
+#endif /* WIRELESS_EXT */
+
+/************************ I82586 SUBROUTINES *************************/
+/*
+ * Usefull subroutines to manage the Ethernet controler
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Read bytes from the on-board RAM.
+ * Why inlining this function make it fail ???
+ */
+static /*inline*/ void
+obram_read(u_long ioaddr,
+ u_short o,
+ u_char * b,
+ int n)
+{
+ outw(o, PIOR1(ioaddr));
+ insw(PIOP1(ioaddr), (unsigned short *) b, (n + 1) >> 1);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Write bytes to the on-board RAM.
+ */
+static inline void
+obram_write(u_long ioaddr,
+ u_short o,
+ u_char * b,
+ int n)
+{
+ outw(o, PIOR1(ioaddr));
+ outsw(PIOP1(ioaddr), (unsigned short *) b, (n + 1) >> 1);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Acknowledge the reading of the status issued by the i82586
+ */
+static void
+wv_ack(device * dev)
+{
+ net_local * lp = (net_local *)dev->priv;
+ u_long ioaddr = dev->base_addr;
+ u_short scb_cs;
+ int i;
+
+ obram_read(ioaddr, scboff(OFFSET_SCB, scb_status),
+ (unsigned char *) &scb_cs, sizeof(scb_cs));
+ scb_cs &= SCB_ST_INT;
+
+ if(scb_cs == 0)
+ return;
+
+ obram_write(ioaddr, scboff(OFFSET_SCB, scb_command),
+ (unsigned char *) &scb_cs, sizeof(scb_cs));
+
+ set_chan_attn(ioaddr, lp->hacr);
+
+ for(i = 1000; i > 0; i--)
+ {
+ obram_read(ioaddr, scboff(OFFSET_SCB, scb_command), (unsigned char *)&scb_cs, sizeof(scb_cs));
+ if(scb_cs == 0)
+ break;
+
+ udelay(10);
+ }
+ udelay(100);
+
+#ifdef DEBUG_CONFIG_ERROR
+ if(i <= 0)
+ printk(KERN_INFO "%s: wv_ack(): board not accepting command.\n",
+ dev->name);
+#endif
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Set channel attention bit and busy wait until command has
+ * completed, then acknowledge the command completion.
+ */
+static inline int
+wv_synchronous_cmd(device * dev,
+ const char * str)
+{
+ net_local * lp = (net_local *)dev->priv;
+ u_long ioaddr = dev->base_addr;
+ u_short scb_cmd;
+ ach_t cb;
+ int i;
+
+ scb_cmd = SCB_CMD_CUC & SCB_CMD_CUC_GO;
+ obram_write(ioaddr, scboff(OFFSET_SCB, scb_command),
+ (unsigned char *) &scb_cmd, sizeof(scb_cmd));
+
+ set_chan_attn(ioaddr, lp->hacr);
+
+ for (i = 1000; i > 0; i--)
+ {
+ obram_read(ioaddr, OFFSET_CU, (unsigned char *)&cb, sizeof(cb));
+ if (cb.ac_status & AC_SFLD_C)
+ break;
+
+ udelay(10);
+ }
+ udelay(100);
+
+ if(i <= 0 || !(cb.ac_status & AC_SFLD_OK))
+ {
+#ifdef DEBUG_CONFIG_ERROR
+ printk(KERN_INFO "%s: %s failed; status = 0x%x\n",
+ dev->name, str, cb.ac_status);
+#endif
+#ifdef DEBUG_I82586_SHOW
+ wv_scb_show(ioaddr);
+#endif
+ return -1;
+ }
+
+ /* Ack the status */
+ wv_ack(dev);
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Configuration commands completion interrupt.
+ * Check if done, and if ok...
+ */
+static inline int
+wv_config_complete(device * dev,
+ u_long ioaddr,
+ net_local * lp)
+{
+ unsigned short mcs_addr;
+ unsigned short status;
+ int ret;
+
+#ifdef DEBUG_INTERRUPT_TRACE
+ printk(KERN_DEBUG "%s: ->wv_config_complete()\n", dev->name);
+#endif
+
+ mcs_addr = lp->tx_first_in_use + sizeof(ac_tx_t) + sizeof(ac_nop_t)
+ + sizeof(tbd_t) + sizeof(ac_cfg_t) + sizeof(ac_ias_t);
+
+ /* Read the status of the last command (set mc list) */
+ obram_read(ioaddr, acoff(mcs_addr, ac_status), (unsigned char *)&status, sizeof(status));
+
+ /* If not completed -> exit */
+ if((status & AC_SFLD_C) == 0)
+ ret = 0; /* Not ready to be scrapped */
+ else
+ {
+#ifdef DEBUG_CONFIG_ERROR
+ unsigned short cfg_addr;
+ unsigned short ias_addr;
+
+ /* Check mc_config command */
+ if(status & AC_SFLD_OK != 0)
+ printk(KERN_INFO "wv_config_complete(): set_multicast_address failed; status = 0x%x\n",
+ dev->name, str, status);
+
+ /* check ia-config command */
+ ias_addr = mcs_addr - sizeof(ac_ias_t);
+ obram_read(ioaddr, acoff(ias_addr, ac_status), (unsigned char *)&status, sizeof(status));
+ if(status & AC_SFLD_OK != 0)
+ printk(KERN_INFO "wv_config_complete(): set_MAC_address; status = 0x%x\n",
+ dev->name, str, status);
+
+ /* Check config command */
+ cfg_addr = ias_addr - sizeof(ac_cfg_t);
+ obram_read(ioaddr, acoff(cfg_addr, ac_status), (unsigned char *)&status, sizeof(status));
+ if(status & AC_SFLD_OK != 0)
+ printk(KERN_INFO "wv_config_complete(): configure; status = 0x%x\n",
+ dev->name, str, status);
+#endif /* DEBUG_CONFIG_ERROR */
+
+ ret = 1; /* Ready to be scrapped */
+ }
+
+#ifdef DEBUG_INTERRUPT_TRACE
+ printk(KERN_DEBUG "%s: <-wv_config_complete() - %d\n", dev->name, ret);
+#endif
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Command completion interrupt.
+ * Reclaim as many freed tx buffers as we can.
+ */
+static int
+wv_complete(device * dev,
+ u_long ioaddr,
+ net_local * lp)
+{
+ int nreaped = 0;
+
+#ifdef DEBUG_INTERRUPT_TRACE
+ printk(KERN_DEBUG "%s: ->wv_complete()\n", dev->name);
+#endif
+
+ /* Loop on all the transmit buffers */
+ while(lp->tx_first_in_use != I82586NULL)
+ {
+ unsigned short tx_status;
+
+ /* Read the first transmit buffer */
+ obram_read(ioaddr, acoff(lp->tx_first_in_use, ac_status), (unsigned char *)&tx_status, sizeof(tx_status));
+
+ /* Hack for reconfiguration... */
+ if(tx_status == 0xFFFF)
+ if(!wv_config_complete(dev, ioaddr, lp))
+ break; /* Not completed */
+
+ /* If not completed -> exit */
+ if((tx_status & AC_SFLD_C) == 0)
+ break;
+
+ /* We now remove this buffer */
+ nreaped++;
+ --lp->tx_n_in_use;
+
+/*
+if (lp->tx_n_in_use > 0)
+ printk("%c", "0123456789abcdefghijk"[lp->tx_n_in_use]);
+*/
+
+ /* Was it the last one ? */
+ if(lp->tx_n_in_use <= 0)
+ lp->tx_first_in_use = I82586NULL;
+ else
+ {
+ /* Next one in the chain */
+ lp->tx_first_in_use += TXBLOCKZ;
+ if(lp->tx_first_in_use >= OFFSET_CU + NTXBLOCKS * TXBLOCKZ)
+ lp->tx_first_in_use -= NTXBLOCKS * TXBLOCKZ;
+ }
+
+ /* Hack for reconfiguration... */
+ if(tx_status == 0xFFFF)
+ continue;
+
+ /* Now, check status of the finished command */
+ if(tx_status & AC_SFLD_OK)
+ {
+ int ncollisions;
+
+ lp->stats.tx_packets++;
+ ncollisions = tx_status & AC_SFLD_MAXCOL;
+ lp->stats.collisions += ncollisions;
+#ifdef DEBUG_INTERRUPT_INFO
+ if(ncollisions > 0)
+ printk(KERN_DEBUG "%s: wv_complete(): tx completed after %d collisions.\n",
+ dev->name, ncollisions);
+#endif
+ }
+ else
+ {
+ lp->stats.tx_errors++;
+#ifndef IGNORE_NORMAL_XMIT_ERRS
+ if(tx_status & AC_SFLD_S10)
+ {
+ lp->stats.tx_carrier_errors++;
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO "%s: wv_complete(): tx error: no CS.\n",
+ dev->name);
+#endif
+ }
+#endif /* IGNORE_NORMAL_XMIT_ERRS */
+ if(tx_status & AC_SFLD_S9)
+ {
+ lp->stats.tx_carrier_errors++;
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO "%s: wv_complete(): tx error: lost CTS.\n",
+ dev->name);
+#endif
+ }
+ if(tx_status & AC_SFLD_S8)
+ {
+ lp->stats.tx_fifo_errors++;
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO "%s: wv_complete(): tx error: slow DMA.\n",
+ dev->name);
+#endif
+ }
+#ifndef IGNORE_NORMAL_XMIT_ERRS
+ if(tx_status & AC_SFLD_S6)
+ {
+ lp->stats.tx_heartbeat_errors++;
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO "%s: wv_complete(): tx error: heart beat.\n",
+ dev->name);
+#endif
+ }
+ if(tx_status & AC_SFLD_S5)
+ {
+ lp->stats.tx_aborted_errors++;
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO "%s: wv_complete(): tx error: too many collisions.\n",
+ dev->name);
+#endif
+ }
+#endif /* IGNORE_NORMAL_XMIT_ERRS */
+ }
+
+#ifdef DEBUG_INTERRUPT_INFO
+ printk(KERN_DEBUG "%s: wv_complete(): tx completed, tx_status 0x%04x\n",
+ dev->name, tx_status);
+#endif
+ }
+
+#ifdef DEBUG_INTERRUPT_INFO
+ if(nreaped > 1)
+ printk(KERN_DEBUG "%s: wv_complete(): reaped %d\n", dev->name, nreaped);
+#endif
+
+ /*
+ * Inform upper layers.
+ */
+ if(lp->tx_n_in_use < NTXBLOCKS - 1)
+ {
+ dev->tbusy = 0;
+ mark_bh(NET_BH);
+ }
+
+#ifdef DEBUG_INTERRUPT_TRACE
+ printk(KERN_DEBUG "%s: <-wv_complete()\n", dev->name);
+#endif
+ return nreaped;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Reconfigure the i82586, or at least ask for it...
+ * Because wv_82586_config use a transmission buffer, we must do it
+ * when we are sure that there is one left, so we do it now
+ * or in wavelan_packet_xmit() (I can't find any better place,
+ * wavelan_interrupt is not an option...), so you may experience
+ * some delay sometime...
+ */
+static inline void
+wv_82586_reconfig(device * dev)
+{
+ net_local * lp = (net_local *)dev->priv;
+
+ /* Check if we can do it now ! */
+ if(!(dev->start) || (set_bit(0, (void *)&dev->tbusy) != 0))
+ {
+ lp->reconfig_82586 = 1;
+#ifdef DEBUG_CONFIG_INFO
+ printk(KERN_DEBUG "%s: wv_82586_reconfig(): delayed (busy = %ld, start = %d)\n",
+ dev->name, dev->tbusy, dev->start);
+#endif
+ }
+ else
+ wv_82586_config(dev);
+}
+
+/********************* DEBUG & INFO SUBROUTINES *********************/
+/*
+ * This routines are used in the code to show debug informations.
+ * Most of the time, it dump the content of hardware structures...
+ */
+
+#ifdef DEBUG_PSA_SHOW
+/*------------------------------------------------------------------*/
+/*
+ * Print the formatted contents of the Parameter Storage Area.
+ */
+static void
+wv_psa_show(psa_t * p)
+{
+ printk(KERN_DEBUG "##### WaveLAN psa contents: #####\n");
+ printk(KERN_DEBUG "psa_io_base_addr_1: 0x%02X %02X %02X %02X\n",
+ p->psa_io_base_addr_1,
+ p->psa_io_base_addr_2,
+ p->psa_io_base_addr_3,
+ p->psa_io_base_addr_4);
+ printk(KERN_DEBUG "psa_rem_boot_addr_1: 0x%02X %02X %02X\n",
+ p->psa_rem_boot_addr_1,
+ p->psa_rem_boot_addr_2,
+ p->psa_rem_boot_addr_3);
+ printk(KERN_DEBUG "psa_holi_params: 0x%02x, ", p->psa_holi_params);
+ printk("psa_int_req_no: %d\n", p->psa_int_req_no);
+#ifdef DEBUG_SHOW_UNUSED
+ printk(KERN_DEBUG "psa_unused0[]: %02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ p->psa_unused0[0],
+ p->psa_unused0[1],
+ p->psa_unused0[2],
+ p->psa_unused0[3],
+ p->psa_unused0[4],
+ p->psa_unused0[5],
+ p->psa_unused0[6]);
+#endif /* DEBUG_SHOW_UNUSED */
+ printk(KERN_DEBUG "psa_univ_mac_addr[]: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ p->psa_univ_mac_addr[0],
+ p->psa_univ_mac_addr[1],
+ p->psa_univ_mac_addr[2],
+ p->psa_univ_mac_addr[3],
+ p->psa_univ_mac_addr[4],
+ p->psa_univ_mac_addr[5]);
+ printk(KERN_DEBUG "psa_local_mac_addr[]: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ p->psa_local_mac_addr[0],
+ p->psa_local_mac_addr[1],
+ p->psa_local_mac_addr[2],
+ p->psa_local_mac_addr[3],
+ p->psa_local_mac_addr[4],
+ p->psa_local_mac_addr[5]);
+ printk(KERN_DEBUG "psa_univ_local_sel: %d, ", p->psa_univ_local_sel);
+ printk("psa_comp_number: %d, ", p->psa_comp_number);
+ printk("psa_thr_pre_set: 0x%02x\n", p->psa_thr_pre_set);
+ printk(KERN_DEBUG "psa_feature_select/decay_prm: 0x%02x, ",
+ p->psa_feature_select);
+ printk("psa_subband/decay_update_prm: %d\n", p->psa_subband);
+ printk(KERN_DEBUG "psa_quality_thr: 0x%02x, ", p->psa_quality_thr);
+ printk("psa_mod_delay: 0x%02x\n", p->psa_mod_delay);
+ printk(KERN_DEBUG "psa_nwid: 0x%02x%02x, ", p->psa_nwid[0], p->psa_nwid[1]);
+ printk("psa_nwid_select: %d\n", p->psa_nwid_select);
+ printk(KERN_DEBUG "psa_encryption_select: %d, ", p->psa_encryption_select);
+ printk("psa_encryption_key[]: %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ p->psa_encryption_key[0],
+ p->psa_encryption_key[1],
+ p->psa_encryption_key[2],
+ p->psa_encryption_key[3],
+ p->psa_encryption_key[4],
+ p->psa_encryption_key[5],
+ p->psa_encryption_key[6],
+ p->psa_encryption_key[7]);
+ printk(KERN_DEBUG "psa_databus_width: %d\n", p->psa_databus_width);
+ printk(KERN_DEBUG "psa_call_code/auto_squelch: 0x%02x, ",
+ p->psa_call_code[0]);
+ printk("psa_call_code[]: %02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ p->psa_call_code[0],
+ p->psa_call_code[1],
+ p->psa_call_code[2],
+ p->psa_call_code[3],
+ p->psa_call_code[4],
+ p->psa_call_code[5],
+ p->psa_call_code[6],
+ p->psa_call_code[7]);
+#ifdef DEBUG_SHOW_UNUSED
+ printk(KERN_DEBUG "psa_reserved[]: %02X:%02X:%02X:%02X\n",
+ p->psa_reserved[0],
+ p->psa_reserved[1],
+ p->psa_reserved[2],
+ p->psa_reserved[3]);
+#endif /* DEBUG_SHOW_UNUSED */
+ printk(KERN_DEBUG "psa_conf_status: %d, ", p->psa_conf_status);
+ printk("psa_crc: 0x%02x%02x, ", p->psa_crc[0], p->psa_crc[1]);
+ printk("psa_crc_status: 0x%02x\n", p->psa_crc_status);
+} /* wv_psa_show */
+#endif /* DEBUG_PSA_SHOW */
+
+#ifdef DEBUG_MMC_SHOW
+/*------------------------------------------------------------------*/
+/*
+ * Print the formatted status of the Modem Management Controller.
+ * This function need to be completed...
+ */
+static void
+wv_mmc_show(device * dev)
+{
+ u_long ioaddr = dev->base_addr;
+ net_local * lp = (net_local *)dev->priv;
+ mmr_t m;
+
+ /* Basic check */
+ if(hasr_read(ioaddr) & HASR_NO_CLK)
+ {
+ printk(KERN_WARNING "%s: wv_mmc_show: modem not connected\n",
+ dev->name);
+ return;
+ }
+
+ /* Read the mmc */
+ mmc_out(ioaddr, mmwoff(0, mmw_freeze), 1);
+ mmc_read(ioaddr, 0, (u_char *)&m, sizeof(m));
+ mmc_out(ioaddr, mmwoff(0, mmw_freeze), 0);
+
+#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */
+ /* Don't forget to update statistics */
+ lp->wstats.discard.nwid += (m.mmr_wrong_nwid_h << 8) | m.mmr_wrong_nwid_l;
+#endif /* WIRELESS_EXT */
+
+ printk(KERN_DEBUG "##### WaveLAN modem status registers: #####\n");
+#ifdef DEBUG_SHOW_UNUSED
+ printk(KERN_DEBUG "mmc_unused0[]: %02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ m.mmr_unused0[0],
+ m.mmr_unused0[1],
+ m.mmr_unused0[2],
+ m.mmr_unused0[3],
+ m.mmr_unused0[4],
+ m.mmr_unused0[5],
+ m.mmr_unused0[6],
+ m.mmr_unused0[7]);
+#endif /* DEBUG_SHOW_UNUSED */
+ printk(KERN_DEBUG "Encryption algorythm: %02X - Status: %02X\n",
+ m.mmr_des_avail, m.mmr_des_status);
+#ifdef DEBUG_SHOW_UNUSED
+ printk(KERN_DEBUG "mmc_unused1[]: %02X:%02X:%02X:%02X:%02X\n",
+ m.mmr_unused1[0],
+ m.mmr_unused1[1],
+ m.mmr_unused1[2],
+ m.mmr_unused1[3],
+ m.mmr_unused1[4]);
+#endif /* DEBUG_SHOW_UNUSED */
+ printk(KERN_DEBUG "dce_status: 0x%x [%s%s%s%s]\n",
+ m.mmr_dce_status,
+ (m.mmr_dce_status & MMR_DCE_STATUS_RX_BUSY) ? "energy detected,":"",
+ (m.mmr_dce_status & MMR_DCE_STATUS_LOOPT_IND) ?
+ "loop test indicated," : "",
+ (m.mmr_dce_status & MMR_DCE_STATUS_TX_BUSY) ? "transmitter on," : "",
+ (m.mmr_dce_status & MMR_DCE_STATUS_JBR_EXPIRED) ?
+ "jabber timer expired," : "");
+ printk(KERN_DEBUG "Dsp ID: %02X\n",
+ m.mmr_dsp_id);
+#ifdef DEBUG_SHOW_UNUSED
+ printk(KERN_DEBUG "mmc_unused2[]: %02X:%02X\n",
+ m.mmr_unused2[0],
+ m.mmr_unused2[1]);
+#endif /* DEBUG_SHOW_UNUSED */
+ printk(KERN_DEBUG "# correct_nwid: %d, # wrong_nwid: %d\n",
+ (m.mmr_correct_nwid_h << 8) | m.mmr_correct_nwid_l,
+ (m.mmr_wrong_nwid_h << 8) | m.mmr_wrong_nwid_l);
+ printk(KERN_DEBUG "thr_pre_set: 0x%x [current signal %s]\n",
+ m.mmr_thr_pre_set & MMR_THR_PRE_SET,
+ (m.mmr_thr_pre_set & MMR_THR_PRE_SET_CUR) ? "above" : "below");
+ printk(KERN_DEBUG "signal_lvl: %d [%s], ",
+ m.mmr_signal_lvl & MMR_SIGNAL_LVL,
+ (m.mmr_signal_lvl & MMR_SIGNAL_LVL_VALID) ? "new msg" : "no new msg");
+ printk("silence_lvl: %d [%s], ", m.mmr_silence_lvl & MMR_SILENCE_LVL,
+ (m.mmr_silence_lvl & MMR_SILENCE_LVL_VALID) ? "update done" : "no new update");
+ printk("sgnl_qual: 0x%x [%s]\n",
+ m.mmr_sgnl_qual & MMR_SGNL_QUAL,
+ (m.mmr_sgnl_qual & MMR_SGNL_QUAL_ANT) ? "Antenna 1" : "Antenna 0");
+#ifdef DEBUG_SHOW_UNUSED
+ printk(KERN_DEBUG "netw_id_l: %x\n", m.mmr_netw_id_l);
+#endif /* DEBUG_SHOW_UNUSED */
+} /* wv_mmc_show */
+#endif /* DEBUG_MMC_SHOW */
+
+#ifdef DEBUG_I82586_SHOW
+/*------------------------------------------------------------------*/
+/*
+ * Print the last block of the i82586 memory
+ */
+static void
+wv_scb_show(u_long ioaddr)
+{
+ scb_t scb;
+
+ obram_read(ioaddr, OFFSET_SCB, (unsigned char *)&scb, sizeof(scb));
+
+ printk(KERN_DEBUG "##### WaveLAN system control block: #####\n");
+
+ printk(KERN_DEBUG "status: ");
+ printk("stat 0x%x[%s%s%s%s] ",
+ (scb.scb_status & (SCB_ST_CX | SCB_ST_FR | SCB_ST_CNA | SCB_ST_RNR)) >> 12,
+ (scb.scb_status & SCB_ST_CX) ? "cmd completion interrupt," : "",
+ (scb.scb_status & SCB_ST_FR) ? "frame received," : "",
+ (scb.scb_status & SCB_ST_CNA) ? "cmd unit not active," : "",
+ (scb.scb_status & SCB_ST_RNR) ? "rcv unit not ready," : "");
+ printk("cus 0x%x[%s%s%s] ",
+ (scb.scb_status & SCB_ST_CUS) >> 8,
+ ((scb.scb_status & SCB_ST_CUS) == SCB_ST_CUS_IDLE) ? "idle" : "",
+ ((scb.scb_status & SCB_ST_CUS) == SCB_ST_CUS_SUSP) ? "suspended" : "",
+ ((scb.scb_status & SCB_ST_CUS) == SCB_ST_CUS_ACTV) ? "active" : "");
+ printk("rus 0x%x[%s%s%s%s]\n",
+ (scb.scb_status & SCB_ST_RUS) >> 4,
+ ((scb.scb_status & SCB_ST_RUS) == SCB_ST_RUS_IDLE) ? "idle" : "",
+ ((scb.scb_status & SCB_ST_RUS) == SCB_ST_RUS_SUSP) ? "suspended" : "",
+ ((scb.scb_status & SCB_ST_RUS) == SCB_ST_RUS_NRES) ? "no resources" : "",
+ ((scb.scb_status & SCB_ST_RUS) == SCB_ST_RUS_RDY) ? "ready" : "");
+
+ printk(KERN_DEBUG "command: ");
+ printk("ack 0x%x[%s%s%s%s] ",
+ (scb.scb_command & (SCB_CMD_ACK_CX | SCB_CMD_ACK_FR | SCB_CMD_ACK_CNA | SCB_CMD_ACK_RNR)) >> 12,
+ (scb.scb_command & SCB_CMD_ACK_CX) ? "ack cmd completion," : "",
+ (scb.scb_command & SCB_CMD_ACK_FR) ? "ack frame received," : "",
+ (scb.scb_command & SCB_CMD_ACK_CNA) ? "ack CU not active," : "",
+ (scb.scb_command & SCB_CMD_ACK_RNR) ? "ack RU not ready," : "");
+ printk("cuc 0x%x[%s%s%s%s%s] ",
+ (scb.scb_command & SCB_CMD_CUC) >> 8,
+ ((scb.scb_command & SCB_CMD_CUC) == SCB_CMD_CUC_NOP) ? "nop" : "",
+ ((scb.scb_command & SCB_CMD_CUC) == SCB_CMD_CUC_GO) ? "start cbl_offset" : "",
+ ((scb.scb_command & SCB_CMD_CUC) == SCB_CMD_CUC_RES) ? "resume execution" : "",
+ ((scb.scb_command & SCB_CMD_CUC) == SCB_CMD_CUC_SUS) ? "suspend execution" : "",
+ ((scb.scb_command & SCB_CMD_CUC) == SCB_CMD_CUC_ABT) ? "abort execution" : "");
+ printk("ruc 0x%x[%s%s%s%s%s]\n",
+ (scb.scb_command & SCB_CMD_RUC) >> 4,
+ ((scb.scb_command & SCB_CMD_RUC) == SCB_CMD_RUC_NOP) ? "nop" : "",
+ ((scb.scb_command & SCB_CMD_RUC) == SCB_CMD_RUC_GO) ? "start rfa_offset" : "",
+ ((scb.scb_command & SCB_CMD_RUC) == SCB_CMD_RUC_RES) ? "resume reception" : "",
+ ((scb.scb_command & SCB_CMD_RUC) == SCB_CMD_RUC_SUS) ? "suspend reception" : "",
+ ((scb.scb_command & SCB_CMD_RUC) == SCB_CMD_RUC_ABT) ? "abort reception" : "");
+
+ printk(KERN_DEBUG "cbl_offset 0x%x ", scb.scb_cbl_offset);
+ printk("rfa_offset 0x%x\n", scb.scb_rfa_offset);
+
+ printk(KERN_DEBUG "crcerrs %d ", scb.scb_crcerrs);
+ printk("alnerrs %d ", scb.scb_alnerrs);
+ printk("rscerrs %d ", scb.scb_rscerrs);
+ printk("ovrnerrs %d\n", scb.scb_ovrnerrs);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Print the formatted status of the i82586's receive unit.
+ */
+static void
+wv_ru_show(device * dev)
+{
+ /* net_local *lp = (net_local *) dev->priv; */
+
+ printk(KERN_DEBUG "##### WaveLAN i82586 receiver unit status: #####\n");
+ printk(KERN_DEBUG "ru:");
+ /*
+ * Not implemented yet...
+ */
+ printk("\n");
+} /* wv_ru_show */
+
+/*------------------------------------------------------------------*/
+/*
+ * Display info about one control block of the i82586 memory
+ */
+static void
+wv_cu_show_one(device * dev,
+ net_local * lp,
+ int i,
+ u_short p)
+{
+ u_long ioaddr;
+ ac_tx_t actx;
+
+ ioaddr = dev->base_addr;
+
+ printk("%d: 0x%x:", i, p);
+
+ obram_read(ioaddr, p, (unsigned char *)&actx, sizeof(actx));
+ printk(" status=0x%x,", actx.tx_h.ac_status);
+ printk(" command=0x%x,", actx.tx_h.ac_command);
+
+ /*
+ {
+ tbd_t tbd;
+
+ obram_read(ioaddr, actx.tx_tbd_offset, (unsigned char *)&tbd, sizeof(tbd));
+ printk(" tbd_status=0x%x,", tbd.tbd_status);
+ }
+ */
+
+ printk("|");
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Print status of the command unit of the i82586
+ */
+static void
+wv_cu_show(device * dev)
+{
+ net_local * lp = (net_local *)dev->priv;
+ unsigned int i;
+ u_short p;
+
+ printk(KERN_DEBUG "##### WaveLAN i82586 command unit status: #####\n");
+
+ printk(KERN_DEBUG);
+ for(i = 0, p = lp->tx_first_in_use; i < NTXBLOCKS; i++)
+ {
+ wv_cu_show_one(dev, lp, i, p);
+
+ p += TXBLOCKZ;
+ if(p >= OFFSET_CU + NTXBLOCKS * TXBLOCKZ)
+ p -= NTXBLOCKS * TXBLOCKZ;
+ }
+ printk("\n");
+}
+#endif /* DEBUG_I82586_SHOW */
+
+#ifdef DEBUG_DEVICE_SHOW
+/*------------------------------------------------------------------*/
+/*
+ * Print the formatted status of the WaveLAN PCMCIA device driver.
+ */
+static void
+wv_dev_show(device * dev)
+{
+ printk(KERN_DEBUG "dev:");
+ printk(" start=%d,", dev->start);
+ printk(" tbusy=%ld,", dev->tbusy);
+ printk(" interrupt=%d,", dev->interrupt);
+ printk(" trans_start=%ld,", dev->trans_start);
+ printk(" flags=0x%x,", dev->flags);
+ printk("\n");
+} /* wv_dev_show */
+
+/*------------------------------------------------------------------*/
+/*
+ * Print the formatted status of the WaveLAN PCMCIA device driver's
+ * private information.
+ */
+static void
+wv_local_show(device * dev)
+{
+ net_local *lp;
+
+ lp = (net_local *)dev->priv;
+
+ printk(KERN_DEBUG "local:");
+ printk(" tx_n_in_use=%d,", lp->tx_n_in_use);
+ printk(" hacr=0x%x,", lp->hacr);
+ printk(" rx_head=0x%x,", lp->rx_head);
+ printk(" rx_last=0x%x,", lp->rx_last);
+ printk(" tx_first_free=0x%x,", lp->tx_first_free);
+ printk(" tx_first_in_use=0x%x,", lp->tx_first_in_use);
+ printk("\n");
+} /* wv_local_show */
+#endif /* DEBUG_DEVICE_SHOW */
+
+#if defined(DEBUG_RX_INFO) || defined(DEBUG_TX_INFO)
+/*------------------------------------------------------------------*/
+/*
+ * Dump packet header (and content if necessary) on the screen
+ */
+static inline void
+wv_packet_info(u_char * p, /* Packet to dump */
+ int length, /* Length of the packet */
+ char * msg1, /* Name of the device */
+ char * msg2) /* Name of the function */
+{
+#ifndef DEBUG_PACKET_DUMP
+ printk(KERN_DEBUG "%s: %s(): dest %02X:%02X:%02X:%02X:%02X:%02X, length %d\n",
+ msg1, msg2, p[0], p[1], p[2], p[3], p[4], p[5], length);
+ printk(KERN_DEBUG "%s: %s(): src %02X:%02X:%02X:%02X:%02X:%02X, type 0x%02X%02X\n",
+ msg1, msg2, p[6], p[7], p[8], p[9], p[10], p[11], p[12], p[13]);
+
+#else /* DEBUG_PACKET_DUMP */
+ int i;
+ int maxi;
+
+ printk(KERN_DEBUG "%s: %s(): len=%d, data=\"", msg1, msg2, length);
+
+ if((maxi = length) > DEBUG_PACKET_DUMP)
+ maxi = DEBUG_PACKET_DUMP;
+ for(i = 0; i < maxi; i++)
+ if(p[i] >= ' ' && p[i] <= '~')
+ printk(" %c", p[i]);
+ else
+ printk("%02X", p[i]);
+ if(maxi < length)
+ printk("..");
+ printk("\"\n");
+ printk(KERN_DEBUG "\n");
+#endif /* DEBUG_PACKET_DUMP */
+}
+#endif /* defined(DEBUG_RX_INFO) || defined(DEBUG_TX_INFO) */
+
+/*------------------------------------------------------------------*/
+/*
+ * This is the information which is displayed by the driver at startup
+ * There is a lot of flag to configure it at your will...
+ */
+static inline void
+wv_init_info(device * dev)
+{
+ short ioaddr = dev->base_addr;
+ net_local * lp = (net_local *)dev->priv;
+ psa_t psa;
+ int i;
+
+ /* Read the parameter storage area */
+ psa_read(ioaddr, lp->hacr, 0, (unsigned char *) &psa, sizeof(psa));
+
+#ifdef DEBUG_PSA_SHOW
+ wv_psa_show(&psa);
+#endif
+#ifdef DEBUG_MMC_SHOW
+ wv_mmc_show(dev);
+#endif
+#ifdef DEBUG_I82586_SHOW
+ wv_cu_show(dev);
+#endif
+
+#ifdef DEBUG_BASIC_SHOW
+ /* Now, let's go for the basic stuff */
+ printk(KERN_NOTICE "%s: WaveLAN at %#x,", dev->name, ioaddr);
+ for(i = 0; i < WAVELAN_ADDR_SIZE; i++)
+ printk("%s%02X", (i == 0) ? " " : ":", dev->dev_addr[i]);
+ printk(", IRQ %d", dev->irq);
+
+ /* Print current network id */
+ if(psa.psa_nwid_select)
+ printk(", nwid 0x%02X-%02X", psa.psa_nwid[0], psa.psa_nwid[1]);
+ else
+ printk(", nwid off");
+
+ /* If 2.00 card */
+ if(!(mmc_in(ioaddr, mmroff(0, mmr_fee_status)) &
+ (MMR_FEE_STATUS_DWLD | MMR_FEE_STATUS_BUSY)))
+ {
+ unsigned short freq;
+
+ /* Ask the EEPROM to read the frequency from the first area */
+ fee_read(ioaddr, 0x00 /* 1st area - frequency... */,
+ &freq, 1);
+
+ /* Print frequency */
+ printk(", 2.00, %ld", (freq >> 6) + 2400L);
+
+ /* Hack !!! */
+ if(freq & 0x20)
+ printk(".5");
+ }
+ else
+ {
+ printk(", PC");
+ switch(psa.psa_comp_number)
+ {
+ case PSA_COMP_PC_AT_915:
+ case PSA_COMP_PC_AT_2400:
+ printk("-AT");
+ break;
+ case PSA_COMP_PC_MC_915:
+ case PSA_COMP_PC_MC_2400:
+ printk("-MC");
+ break;
+ case PSA_COMP_PCMCIA_915:
+ printk("MCIA");
+ break;
+ default:
+ printk("???");
+ }
+ printk(", ");
+ switch (psa.psa_subband)
+ {
+ case PSA_SUBBAND_915:
+ printk("915");
+ break;
+ case PSA_SUBBAND_2425:
+ printk("2425");
+ break;
+ case PSA_SUBBAND_2460:
+ printk("2460");
+ break;
+ case PSA_SUBBAND_2484:
+ printk("2484");
+ break;
+ case PSA_SUBBAND_2430_5:
+ printk("2430.5");
+ break;
+ default:
+ printk("???");
+ }
+ }
+
+ printk(" MHz\n");
+#endif /* DEBUG_BASIC_SHOW */
+
+#ifdef DEBUG_VERSION_SHOW
+ /* Print version information */
+ printk(KERN_NOTICE "%s", version);
+#endif
+} /* wv_init_info */
+
+/********************* IOCTL, STATS & RECONFIG *********************/
+/*
+ * We found here routines that are called by Linux on differents
+ * occasions after the configuration and not for transmitting data
+ * These may be called when the user use ifconfig, /proc/net/dev
+ * or wireless extensions
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Get the current ethernet statistics. This may be called with the
+ * card open or closed.
+ * Used when the user read /proc/net/dev
+ */
+static en_stats *
+wavelan_get_stats(device * dev)
+{
+#ifdef DEBUG_IOCTL_TRACE
+ printk(KERN_DEBUG "%s: <>wavelan_get_stats()\n", dev->name);
+#endif
+
+ return(&((net_local *) dev->priv)->stats);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Set or clear the multicast filter for this adaptor.
+ * num_addrs == -1 Promiscuous mode, receive all packets
+ * num_addrs == 0 Normal mode, clear multicast list
+ * num_addrs > 0 Multicast mode, receive normal and MC packets,
+ * and do best-effort filtering.
+ */
+static void
+wavelan_set_multicast_list(device * dev)
+{
+ net_local * lp = (net_local *) dev->priv;
+
+#ifdef DEBUG_IOCTL_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_set_multicast_list()\n", dev->name);
+#endif
+
+#ifdef DEBUG_IOCTL_INFO
+ printk(KERN_DEBUG "%s: wavelan_set_multicast_list(): setting Rx mode %02X to %d addresses.\n",
+ dev->name, dev->flags, dev->mc_count);
+#endif
+
+ /* If we ask for promiscuous mode,
+ * or all multicast addresses (we don't have that !)
+ * or too much multicast addresses for the hardware filter */
+ if((dev->flags & IFF_PROMISC) ||
+ (dev->flags & IFF_ALLMULTI) ||
+ (dev->mc_count > I82586_MAX_MULTICAST_ADDRESSES))
+ {
+ /*
+ * Enable promiscuous mode: receive all packets.
+ */
+ if(!lp->promiscuous)
+ {
+ lp->promiscuous = 1;
+ lp->mc_count = 0;
+
+ wv_82586_reconfig(dev);
+
+ /* Tell the kernel that we are doing a really bad job... */
+ dev->flags |= IFF_PROMISC;
+ }
+ }
+ else
+ /* If there is some multicast addresses to send */
+ if(dev->mc_list != (struct dev_mc_list *) NULL)
+ {
+ /*
+ * Disable promiscuous mode, but receive all packets
+ * in multicast list
+ */
+#ifdef MULTICAST_AVOID
+ if(lp->promiscuous ||
+ (dev->mc_count != lp->mc_count))
+#endif
+ {
+ lp->promiscuous = 0;
+ lp->mc_count = dev->mc_count;
+
+ wv_82586_reconfig(dev);
+ }
+ }
+ else
+ {
+ /*
+ * Switch to normal mode: disable promiscuous mode and
+ * clear the multicast list.
+ */
+ if(lp->promiscuous || lp->mc_count == 0)
+ {
+ lp->promiscuous = 0;
+ lp->mc_count = 0;
+
+ wv_82586_reconfig(dev);
+ }
+ }
+#ifdef DEBUG_IOCTL_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_set_multicast_list()\n", dev->name);
+#endif
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * This function doesn't exist...
+ */
+static int
+wavelan_set_mac_address(device * dev,
+ void * addr)
+{
+ struct sockaddr * mac = addr;
+
+ /* Copy the address */
+ memcpy(dev->dev_addr, mac->sa_data, WAVELAN_ADDR_SIZE);
+
+ /* Reconfig the beast */
+ wv_82586_reconfig(dev);
+
+ return 0;
+}
+
+#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */
+
+/*------------------------------------------------------------------*/
+/*
+ * Frequency setting (for hardware able of it)
+ * It's a bit complicated and you don't really want to look into it...
+ * (called in wavelan_ioctl)
+ */
+static inline int
+wv_set_frequency(u_long ioaddr, /* i/o port of the card */
+ iw_freq * frequency)
+{
+ const int BAND_NUM = 10; /* Number of bands */
+ long freq = 0L; /* offset to 2.4 GHz in .5 MHz */
+#ifdef DEBUG_IOCTL_INFO
+ int i;
+#endif
+
+ /* Setting by frequency */
+ /* Theoretically, you may set any frequency between
+ * the two limits with a 0.5 MHz precision. In practice,
+ * I don't want you to have trouble with local
+ * regulations... */
+ if((frequency->e == 1) &&
+ (frequency->m >= (int) 2.412e8) && (frequency->m <= (int) 2.487e8))
+ {
+ freq = ((frequency->m / 10000) - 24000L) / 5;
+ }
+
+ /* Setting by channel (same as wfreqsel) */
+ /* Warning : each channel is 22MHz wide, so some of the channels
+ * will interfere... */
+ if((frequency->e == 0) &&
+ (frequency->m >= 0) && (frequency->m < BAND_NUM))
+ {
+ /* frequency in 1/4 of MHz (as read in the offset register) */
+ short bands[] = { 0x30, 0x58, 0x64, 0x7A, 0x80, 0xA8, 0xD0, 0xF0, 0xF8, 0x150 };
+
+ /* Get frequency offset */
+ freq = bands[frequency->m] >> 1;
+ }
+
+ /* Verify if the frequency is allowed */
+ if(freq != 0L)
+ {
+ u_short table[10]; /* Authorized frequency table */
+
+ /* Read the frequency table */
+ fee_read(ioaddr, 0x71 /* frequency table */,
+ table, 10);
+
+#ifdef DEBUG_IOCTL_INFO
+ printk(KERN_DEBUG "Frequency table :");
+ for(i = 0; i < 10; i++)
+ {
+ printk(" %04X",
+ table[i]);
+ }
+ printk("\n");
+#endif
+
+ /* Look in the table if the frequency is allowed */
+ if(!(table[9 - ((freq - 24) / 16)] &
+ (1 << ((freq - 24) % 16))))
+ return -EINVAL; /* not allowed */
+ }
+ else
+ return -EINVAL;
+
+ /* If we get a usable frequency */
+ if(freq != 0L)
+ {
+ unsigned short area[16];
+ unsigned short dac[2];
+ unsigned short area_verify[16];
+ unsigned short dac_verify[2];
+ /* Corresponding gain (in the power adjust value table)
+ * see AT&T WaveLAN Data Manual, REF 407-024689/E, page 3-8
+ * & WCIN062D.DOC, page 6.2.9 */
+ unsigned short power_limit[] = { 40, 80, 120, 160, 0 };
+ int power_band = 0; /* Selected band */
+ unsigned short power_adjust; /* Correct value */
+
+ /* Search for the gain */
+ power_band = 0;
+ while((freq > power_limit[power_band]) &&
+ (power_limit[++power_band] != 0))
+ ;
+
+ /* Read the first area */
+ fee_read(ioaddr, 0x00,
+ area, 16);
+
+ /* Read the DAC */
+ fee_read(ioaddr, 0x60,
+ dac, 2);
+
+ /* Read the new power adjust value */
+ fee_read(ioaddr, 0x6B - (power_band >> 1),
+ &power_adjust, 1);
+ if(power_band & 0x1)
+ power_adjust >>= 8;
+ else
+ power_adjust &= 0xFF;
+
+#ifdef DEBUG_IOCTL_INFO
+ printk(KERN_DEBUG "WaveLAN EEPROM Area 1:");
+ for(i = 0; i < 16; i++)
+ {
+ printk(" %04X",
+ area[i]);
+ }
+ printk("\n");
+
+ printk(KERN_DEBUG "WaveLAN EEPROM DAC: %04X %04X\n",
+ dac[0], dac[1]);
+#endif
+
+ /* Frequency offset (for info only) */
+ area[0] = ((freq << 5) & 0xFFE0) | (area[0] & 0x1F);
+
+ /* Receiver Principle main divider coefficient */
+ area[3] = (freq >> 1) + 2400L - 352L;
+ area[2] = ((freq & 0x1) << 4) | (area[2] & 0xFFEF);
+
+ /* Transmitter Main divider coefficient */
+ area[13] = (freq >> 1) + 2400L;
+ area[12] = ((freq & 0x1) << 4) | (area[2] & 0xFFEF);
+
+ /* Others part of the area are flags, bit streams or unused... */
+
+ /* Set the value in the DAC. */
+ dac[1] = ((power_adjust >> 1) & 0x7F) | (dac[1] & 0xFF80);
+ dac[0] = ((power_adjust & 0x1) << 4) | (dac[0] & 0xFFEF);
+
+ /* Write the first area. */
+ fee_write(ioaddr, 0x00,
+ area, 16);
+
+ /* Write the DAC. */
+ fee_write(ioaddr, 0x60,
+ dac, 2);
+
+ /* We now should verify here that the EEPROM writing was OK. */
+
+ /* Reread the first area. */
+ fee_read(ioaddr, 0x00,
+ area_verify, 16);
+
+ /* ReRead the DAC */
+ fee_read(ioaddr, 0x60,
+ dac_verify, 2);
+
+ /* Compare */
+ if(memcmp(area, area_verify, 16 * 2) ||
+ memcmp(dac, dac_verify, 2 * 2))
+ {
+#ifdef DEBUG_IOCTL_ERROR
+ printk(KERN_INFO "WaveLAN: wv_set_frequency: unable to write new frequency to EEPROM(?).\n");
+#endif
+ return -EOPNOTSUPP;
+ }
+
+ /* We must download the frequency parameters to the
+ * synthesizers (from the EEPROM - area 1)
+ * Note: as the EEPROM is automatically decremented, we set the end
+ * if the area... */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), 0x0F);
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl),
+ MMW_FEE_CTRL_READ | MMW_FEE_CTRL_DWLD);
+
+ /* Wait until the download is finished */
+ fee_wait(ioaddr, 100, 100);
+
+ /* We must now download the power adjust value (gain) to
+ * the synthesizers (from the EEPROM - area 7 - DAC) */
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), 0x61);
+ mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl),
+ MMW_FEE_CTRL_READ | MMW_FEE_CTRL_DWLD);
+
+ /* Wait until the download is finished */
+ fee_wait(ioaddr, 100, 100);
+
+#ifdef DEBUG_IOCTL_INFO
+ /* Verification of what we have done... */
+
+ printk(KERN_DEBUG "WaveLAN EEPROM Area 1:");
+ for(i = 0; i < 16; i++)
+ {
+ printk(" %04X",
+ area_verify[i]);
+ }
+ printk("\n");
+
+ printk(KERN_DEBUG "WaveLAN EEPROM DAC: %04X %04X\n",
+ dac_verify[0], dac_verify[1]);
+#endif
+
+ return 0;
+ }
+ else
+ return -EINVAL; /* Bah, never get there... */
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Give the list of available frequencies
+ */
+static inline int
+wv_frequency_list(u_long ioaddr, /* i/o port of the card */
+ iw_freq * list, /* List of frequency to fill */
+ int max) /* Maximum number of frequencies */
+{
+ u_short table[10]; /* Authorized frequency table */
+ long freq = 0L; /* offset to 2.4 GHz in .5 MHz + 12 MHz */
+ int i; /* index in the table */
+
+ /* Read the frequency table */
+ fee_read(ioaddr, 0x71 /* frequency table */,
+ table, 10);
+
+ /* Look all frequencies */
+ i = 0;
+ for(freq = 0; freq < 150; freq++)
+ /* Look in the table if the frequency is allowed */
+ if(table[9 - (freq / 16)] & (1 << (freq % 16)))
+ {
+ /* put in the list */
+ list[i].m = (((freq + 24) * 5) + 24000L) * 10000;
+ list[i++].e = 1;
+
+ /* Check number */
+ if(i >= max)
+ return(i);
+ }
+
+ return(i);
+}
+
+#ifdef WIRELESS_SPY
+/*------------------------------------------------------------------*/
+/*
+ * Gather wireless spy statistics : for each packet, compare the source
+ * address with out list, and if match, get the stats...
+ * Sorry, but this function really need wireless extensions...
+ */
+static inline void
+wl_spy_gather(device * dev,
+ u_char * mac, /* MAC address */
+ u_char * stats) /* Statistics to gather */
+{
+ net_local * lp = (net_local *) dev->priv;
+ int i;
+
+ /* Look all addresses */
+ for(i = 0; i < lp->spy_number; i++)
+ /* If match */
+ if(!memcmp(mac, lp->spy_address[i], WAVELAN_ADDR_SIZE))
+ {
+ /* Update statistics */
+ lp->spy_stat[i].qual = stats[2] & MMR_SGNL_QUAL;
+ lp->spy_stat[i].level = stats[0] & MMR_SIGNAL_LVL;
+ lp->spy_stat[i].noise = stats[1] & MMR_SILENCE_LVL;
+ lp->spy_stat[i].updated = 0x7;
+ }
+}
+#endif /* WIRELESS_SPY */
+
+#ifdef HISTOGRAM
+/*------------------------------------------------------------------*/
+/*
+ * This function calculates an histogram on the signal level.
+ * As the noise is quite constant, it's like doing it on the SNR.
+ * We have defined a set of interval (lp->his_range), and each time
+ * the level goes in that interval, we increment the count (lp->his_sum).
+ * With this histogram you may detect if one WaveLAN is really weak,
+ * or you may also calculate the mean and standard deviation of the level.
+ */
+static inline void
+wl_his_gather(device * dev,
+ u_char * stats) /* Statistics to gather */
+{
+ net_local * lp = (net_local *) dev->priv;
+ u_char level = stats[0] & MMR_SIGNAL_LVL;
+ int i;
+
+ /* Find the correct interval */
+ i = 0;
+ while((i < (lp->his_number - 1)) && (level >= lp->his_range[i++]))
+ ;
+
+ /* Increment interval counter */
+ (lp->his_sum[i])++;
+}
+#endif /* HISTOGRAM */
+
+/*------------------------------------------------------------------*/
+/*
+ * Perform ioctl : config & info stuff
+ * This is here that are treated the wireless extensions (iwconfig)
+ */
+static int
+wavelan_ioctl(struct device * dev, /* device on which the ioctl is applied */
+ struct ifreq * rq, /* data passed */
+ int cmd) /* ioctl number */
+{
+ u_long ioaddr = dev->base_addr;
+ net_local * lp = (net_local *)dev->priv; /* lp is not unused */
+ struct iwreq * wrq = (struct iwreq *) rq;
+ psa_t psa;
+ mm_t m;
+ unsigned long x;
+ int ret = 0;
+
+#ifdef DEBUG_IOCTL_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_ioctl(cmd=0x%X)\n", dev->name, cmd);
+#endif
+
+ /* Disable interrupts & save flags */
+ x = wv_splhi();
+
+ /* Look what is the request */
+ switch(cmd)
+ {
+ /* --------------- WIRELESS EXTENSIONS --------------- */
+
+ case SIOCGIWNAME:
+ strcpy(wrq->u.name, "Wavelan");
+ break;
+
+ case SIOCSIWNWID:
+ /* Set NWID in WaveLAN */
+ if(wrq->u.nwid.on)
+ {
+ /* Set NWID in psa */
+ psa.psa_nwid[0] = (wrq->u.nwid.nwid & 0xFF00) >> 8;
+ psa.psa_nwid[1] = wrq->u.nwid.nwid & 0xFF;
+ psa.psa_nwid_select = 0x01;
+ psa_write(ioaddr, lp->hacr, (char *)psa.psa_nwid - (char *)&psa,
+ (unsigned char *)psa.psa_nwid, 3);
+
+ /* Set NWID in mmc */
+ m.w.mmw_netw_id_l = wrq->u.nwid.nwid & 0xFF;
+ m.w.mmw_netw_id_h = (wrq->u.nwid.nwid & 0xFF00) >> 8;
+ mmc_write(ioaddr, (char *)&m.w.mmw_netw_id_l - (char *)&m,
+ (unsigned char *)&m.w.mmw_netw_id_l, 2);
+ mmc_out(ioaddr, mmwoff(0, mmw_loopt_sel), 0x00);
+ }
+ else
+ {
+ /* Disable nwid in the psa */
+ psa.psa_nwid_select = 0x00;
+ psa_write(ioaddr, lp->hacr,
+ (char *)&psa.psa_nwid_select - (char *)&psa,
+ (unsigned char *)&psa.psa_nwid_select, 1);
+
+ /* Disable nwid in the mmc (no filtering) */
+ mmc_out(ioaddr, mmwoff(0, mmw_loopt_sel), MMW_LOOPT_SEL_DIS_NWID);
+ }
+ break;
+
+ case SIOCGIWNWID:
+ /* Read the NWID */
+ psa_read(ioaddr, lp->hacr, (char *)psa.psa_nwid - (char *)&psa,
+ (unsigned char *)psa.psa_nwid, 3);
+ wrq->u.nwid.nwid = (psa.psa_nwid[0] << 8) + psa.psa_nwid[1];
+ wrq->u.nwid.on = psa.psa_nwid_select;
+ break;
+
+ case SIOCSIWFREQ:
+ /* Attempt to recognise 2.00 cards (2.4 GHz frequency selectable) */
+ if(!(mmc_in(ioaddr, mmroff(0, mmr_fee_status)) &
+ (MMR_FEE_STATUS_DWLD | MMR_FEE_STATUS_BUSY)))
+ ret = wv_set_frequency(ioaddr, &(wrq->u.freq));
+ else
+ ret = -EOPNOTSUPP;
+ break;
+
+ case SIOCGIWFREQ:
+ /* Attempt to recognise 2.00 cards (2.4 GHz frequency selectable)
+ * (does it work for everybody ??? - especially old cards...) */
+ if(!(mmc_in(ioaddr, mmroff(0, mmr_fee_status)) &
+ (MMR_FEE_STATUS_DWLD | MMR_FEE_STATUS_BUSY)))
+ {
+ unsigned short freq;
+
+ /* Ask the EEPROM to read the frequency from the first area */
+ fee_read(ioaddr, 0x00 /* 1st area - frequency... */,
+ &freq, 1);
+ wrq->u.freq.m = ((freq >> 5) * 5 + 24000L) * 10000;
+ wrq->u.freq.e = 1;
+ }
+ else
+ {
+ int bands[] = { 915e6, 2.425e8, 2.46e8, 2.484e8, 2.4305e8 };
+
+ psa_read(ioaddr, lp->hacr, (char *)&psa.psa_subband - (char *)&psa,
+ (unsigned char *)&psa.psa_subband, 1);
+
+ if(psa.psa_subband <= 4)
+ {
+ wrq->u.freq.m = bands[psa.psa_subband];
+ wrq->u.freq.e = (psa.psa_subband != 0);
+ }
+ else
+ ret = -EOPNOTSUPP;
+ }
+ break;
+
+ case SIOCSIWSENS:
+ /* Set the level threshold */
+ if(!suser())
+ return -EPERM;
+ psa.psa_thr_pre_set = wrq->u.sensitivity & 0x3F;
+ psa_write(ioaddr, lp->hacr, (char *)&psa.psa_thr_pre_set - (char *)&psa,
+ (unsigned char *) &psa.psa_thr_pre_set, 1);
+ mmc_out(ioaddr, mmwoff(0, mmw_thr_pre_set), psa.psa_thr_pre_set);
+ break;
+
+ case SIOCGIWSENS:
+ /* Read the level threshold */
+ psa_read(ioaddr, lp->hacr, (char *)&psa.psa_thr_pre_set - (char *)&psa,
+ (unsigned char *) &psa.psa_thr_pre_set, 1);
+ wrq->u.sensitivity = psa.psa_thr_pre_set & 0x3F;
+ break;
+
+ case SIOCSIWENCODE:
+ /* Set encryption key */
+ if(!mmc_encr(ioaddr))
+ {
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ if(wrq->u.encoding.method)
+ { /* enable encryption */
+ int i;
+ long long key = wrq->u.encoding.code;
+
+ for(i = 7; i >= 0; i--)
+ {
+ psa.psa_encryption_key[i] = key & 0xFF;
+ key >>= 8;
+ }
+ psa.psa_encryption_select = 1;
+ psa_write(ioaddr, lp->hacr,
+ (char *) &psa.psa_encryption_select - (char *) &psa,
+ (unsigned char *) &psa.psa_encryption_select, 8+1);
+
+ mmc_out(ioaddr, mmwoff(0, mmw_encr_enable),
+ MMW_ENCR_ENABLE_EN | MMW_ENCR_ENABLE_MODE);
+ mmc_write(ioaddr, mmwoff(0, mmw_encr_key),
+ (unsigned char *) &psa.psa_encryption_key, 8);
+ }
+ else
+ { /* disable encryption */
+ psa.psa_encryption_select = 0;
+ psa_write(ioaddr, lp->hacr,
+ (char *) &psa.psa_encryption_select - (char *) &psa,
+ (unsigned char *) &psa.psa_encryption_select, 1);
+
+ mmc_out(ioaddr, mmwoff(0, mmw_encr_enable), 0);
+ }
+ break;
+
+ case SIOCGIWENCODE:
+ /* Read the encryption key */
+ if(!mmc_encr(ioaddr))
+ {
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ /* only super-user can see encryption key */
+ if(!suser())
+ {
+ ret = -EPERM;
+ break;
+ }
+ else
+ {
+ int i;
+ long long key = 0;
+
+ psa_read(ioaddr, lp->hacr,
+ (char *) &psa.psa_encryption_select - (char *) &psa,
+ (unsigned char *) &psa.psa_encryption_select, 1+8);
+ for(i = 0; i < 8; i++)
+ {
+ key <<= 8;
+ key += psa.psa_encryption_key[i];
+ }
+ wrq->u.encoding.code = key;
+
+ /* encryption is enabled */
+ if(psa.psa_encryption_select)
+ wrq->u.encoding.method = mmc_encr(ioaddr);
+ else
+ wrq->u.encoding.method = 0;
+ }
+ break;
+
+ case SIOCGIWRANGE:
+ /* basic checking */
+ if(wrq->u.data.pointer != (caddr_t) 0)
+ {
+ struct iw_range range;
+
+ /* Verify the user buffer */
+ ret = verify_area(VERIFY_WRITE, wrq->u.data.pointer,
+ sizeof(struct iw_range));
+ if(ret)
+ break;
+
+ /* Set the length (useless : its constant...) */
+ wrq->u.data.length = sizeof(struct iw_range);
+
+ /* Set information in the range struct */
+ range.throughput = 1.6 * 1024 * 1024; /* don't argue on this ! */
+ range.min_nwid = 0x0000;
+ range.max_nwid = 0xFFFF;
+
+ /* Attempt to recognise 2.00 cards (2.4 GHz frequency selectable). */
+ if(!(mmc_in(ioaddr, mmroff(0, mmr_fee_status)) &
+ (MMR_FEE_STATUS_DWLD | MMR_FEE_STATUS_BUSY)))
+ {
+ range.num_channels = 10;
+ range.num_frequency = wv_frequency_list(ioaddr, range.freq,
+ IW_MAX_FREQUENCIES);
+ }
+ else
+ range.num_channels = range.num_frequency = 0;
+
+ range.sensitivity = 0x3F;
+ range.max_qual.qual = MMR_SGNL_QUAL;
+ range.max_qual.level = MMR_SIGNAL_LVL;
+ range.max_qual.noise = MMR_SILENCE_LVL;
+
+ /* Copy structure to the user buffer */
+ copy_to_user(wrq->u.data.pointer, &range,
+ sizeof(struct iw_range));
+ }
+ break;
+
+ case SIOCGIWPRIV:
+ /* Basic checking... */
+ if(wrq->u.data.pointer != (caddr_t) 0)
+ {
+ struct iw_priv_args priv[] =
+ { /* cmd, set_args, get_args, name */
+ { SIOCSIPQTHR, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, 0, "setqualthr" },
+ { SIOCGIPQTHR, 0, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, "getqualthr" },
+
+ { SIOCSIPHISTO, IW_PRIV_TYPE_BYTE | 16, 0, "sethisto" },
+ { SIOCGIPHISTO, 0, IW_PRIV_TYPE_INT | 16, "gethisto" },
+ };
+
+ /* Verify the user buffer */
+ ret = verify_area(VERIFY_WRITE, wrq->u.data.pointer,
+ sizeof(priv));
+ if(ret)
+ break;
+
+ /* Set the number of ioctl available */
+ wrq->u.data.length = 4;
+
+ /* Copy structure to the user buffer */
+ copy_to_user(wrq->u.data.pointer, (u_char *) priv,
+ sizeof(priv));
+ }
+ break;
+
+#ifdef WIRELESS_SPY
+ case SIOCSIWSPY:
+ /* Set the spy list */
+
+ /* Check the number of addresses */
+ if(wrq->u.data.length > IW_MAX_SPY)
+ {
+ ret = -E2BIG;
+ break;
+ }
+ lp->spy_number = wrq->u.data.length;
+
+ /* If there is some addresses to copy */
+ if(lp->spy_number > 0)
+ {
+ struct sockaddr address[IW_MAX_SPY];
+ int i;
+
+ /* Verify where the user has set his addresses */
+ ret = verify_area(VERIFY_READ, wrq->u.data.pointer,
+ sizeof(struct sockaddr) * lp->spy_number);
+ if(ret)
+ break;
+ /* Copy addresses to the driver */
+ copy_from_user(address, wrq->u.data.pointer,
+ sizeof(struct sockaddr) * lp->spy_number);
+
+ /* Copy addresses to the lp structure */
+ for(i = 0; i < lp->spy_number; i++)
+ {
+ memcpy(lp->spy_address[i], address[i].sa_data,
+ WAVELAN_ADDR_SIZE);
+ }
+
+ /* Reset structure... */
+ memset(lp->spy_stat, 0x00, sizeof(iw_qual) * IW_MAX_SPY);
+
+#ifdef DEBUG_IOCTL_INFO
+ printk(KERN_DEBUG "SetSpy - Set of new addresses is :\n");
+ for(i = 0; i < wrq->u.data.length; i++)
+ printk(KERN_DEBUG "%02X:%02X:%02X:%02X:%02X:%02X \n",
+ lp->spy_address[i][0],
+ lp->spy_address[i][1],
+ lp->spy_address[i][2],
+ lp->spy_address[i][3],
+ lp->spy_address[i][4],
+ lp->spy_address[i][5]);
+#endif /* DEBUG_IOCTL_INFO */
+ }
+
+ break;
+
+ case SIOCGIWSPY:
+ /* Get the spy list and spy stats */
+
+ /* Set the number of addresses */
+ wrq->u.data.length = lp->spy_number;
+
+ /* If the user want to have the addresses back... */
+ if((lp->spy_number > 0) && (wrq->u.data.pointer != (caddr_t) 0))
+ {
+ struct sockaddr address[IW_MAX_SPY];
+ int i;
+
+ /* Verify the user buffer */
+ ret = verify_area(VERIFY_WRITE, wrq->u.data.pointer,
+ (sizeof(iw_qual) + sizeof(struct sockaddr))
+ * IW_MAX_SPY);
+ if(ret)
+ break;
+
+ /* Copy addresses from the lp structure */
+ for(i = 0; i < lp->spy_number; i++)
+ {
+ memcpy(address[i].sa_data, lp->spy_address[i],
+ WAVELAN_ADDR_SIZE);
+ address[i].sa_family = AF_UNIX;
+ }
+
+ /* Copy addresses to the user buffer */
+ copy_to_user(wrq->u.data.pointer, address,
+ sizeof(struct sockaddr) * lp->spy_number);
+
+ /* Copy stats to the user buffer (just after) */
+ copy_to_user(wrq->u.data.pointer +
+ (sizeof(struct sockaddr) * lp->spy_number),
+ lp->spy_stat, sizeof(iw_qual) * lp->spy_number);
+
+ /* Reset updated flags */
+ for(i = 0; i < lp->spy_number; i++)
+ lp->spy_stat[i].updated = 0x0;
+ } /* if(pointer != NULL) */
+
+ break;
+#endif /* WIRELESS_SPY */
+
+ /* ------------------ PRIVATE IOCTL ------------------ */
+
+ case SIOCSIPQTHR:
+ if(!suser())
+ return -EPERM;
+ psa.psa_quality_thr = *(wrq->u.name) & 0x0F;
+ psa_write(ioaddr, lp->hacr, (char *)&psa.psa_quality_thr - (char *)&psa,
+ (unsigned char *)&psa.psa_quality_thr, 1);
+ mmc_out(ioaddr, mmwoff(0, mmw_quality_thr), psa.psa_quality_thr);
+ break;
+
+ case SIOCGIPQTHR:
+ psa_read(ioaddr, lp->hacr, (char *)&psa.psa_quality_thr - (char *)&psa,
+ (unsigned char *)&psa.psa_quality_thr, 1);
+ *(wrq->u.name) = psa.psa_quality_thr & 0x0F;
+ break;
+
+#ifdef HISTOGRAM
+ case SIOCSIPHISTO:
+ /* Verif if the user is root */
+ if(!suser())
+ return -EPERM;
+
+ /* Check the number of intervals */
+ if(wrq->u.data.length > 16)
+ {
+ ret = -E2BIG;
+ break;
+ }
+ lp->his_number = wrq->u.data.length;
+
+ /* If there is some addresses to copy */
+ if(lp->his_number > 0)
+ {
+ /* Verify where the user has set his addresses */
+ ret = verify_area(VERIFY_READ, wrq->u.data.pointer,
+ sizeof(char) * lp->his_number);
+ if(ret)
+ break;
+ /* Copy interval ranges to the driver */
+ copy_from_user(lp->his_range, wrq->u.data.pointer,
+ sizeof(char) * lp->his_number);
+
+ /* Reset structure... */
+ memset(lp->his_sum, 0x00, sizeof(long) * 16);
+ }
+ break;
+
+ case SIOCGIPHISTO:
+ /* Set the number of intervals */
+ wrq->u.data.length = lp->his_number;
+
+ /* Give back the distribution statistics */
+ if((lp->his_number > 0) && (wrq->u.data.pointer != (caddr_t) 0))
+ {
+ /* Verify the user buffer */
+ ret = verify_area(VERIFY_WRITE, wrq->u.data.pointer,
+ sizeof(long) * 16);
+ if(ret)
+ break;
+
+ /* Copy data to the user buffer */
+ copy_to_user(wrq->u.data.pointer, lp->his_sum,
+ sizeof(long) * lp->his_number);
+ } /* if(pointer != NULL) */
+ break;
+#endif /* HISTOGRAM */
+
+ /* ------------------- OTHER IOCTL ------------------- */
+
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ /* Enable interrupts, restore flags */
+ wv_splx(x);
+
+#ifdef DEBUG_IOCTL_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_ioctl()\n", dev->name);
+#endif
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Get wireless statistics
+ * Called by /proc/net/wireless
+ */
+static iw_stats *
+wavelan_get_wireless_stats(device * dev)
+{
+ u_long ioaddr = dev->base_addr;
+ net_local * lp = (net_local *) dev->priv;
+ mmr_t m;
+ iw_stats * wstats;
+ unsigned long x;
+
+#ifdef DEBUG_IOCTL_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_get_wireless_stats()\n", dev->name);
+#endif
+
+ /* Disable interrupts & save flags */
+ x = wv_splhi();
+
+ if(lp == (net_local *) NULL)
+ return (iw_stats *) NULL;
+ wstats = &lp->wstats;
+
+ /* Get data from the mmc */
+ mmc_out(ioaddr, mmwoff(0, mmw_freeze), 1);
+
+ mmc_read(ioaddr, mmroff(0, mmr_dce_status), &m.mmr_dce_status, 1);
+ mmc_read(ioaddr, mmroff(0, mmr_wrong_nwid_l), &m.mmr_wrong_nwid_l, 2);
+ mmc_read(ioaddr, mmroff(0, mmr_thr_pre_set), &m.mmr_thr_pre_set, 4);
+
+ mmc_out(ioaddr, mmwoff(0, mmw_freeze), 0);
+
+ /* Copy data to wireless stuff */
+ wstats->status = m.mmr_dce_status;
+ wstats->qual.qual = m.mmr_sgnl_qual & MMR_SGNL_QUAL;
+ wstats->qual.level = m.mmr_signal_lvl & MMR_SIGNAL_LVL;
+ wstats->qual.noise = m.mmr_silence_lvl & MMR_SILENCE_LVL;
+ wstats->qual.updated = (((m.mmr_signal_lvl & MMR_SIGNAL_LVL_VALID) >> 7) |
+ ((m.mmr_signal_lvl & MMR_SIGNAL_LVL_VALID) >> 6) |
+ ((m.mmr_silence_lvl & MMR_SILENCE_LVL_VALID) >> 5));
+ wstats->discard.nwid += (m.mmr_wrong_nwid_h << 8) | m.mmr_wrong_nwid_l;
+ wstats->discard.code = 0L;
+ wstats->discard.misc = 0L;
+
+ /* Enable interrupts & restore flags */
+ wv_splx(x);
+
+#ifdef DEBUG_IOCTL_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_get_wireless_stats()\n", dev->name);
+#endif
+ return &lp->wstats;
+}
+#endif /* WIRELESS_EXT */
+
+/************************* PACKET RECEPTION *************************/
+/*
+ * This part deals with receiving the packets.
+ * The interrupt handler gets an interrupt when a packet has been
+ * successfully received and calls this part.
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * This routine does the actual copying of data (including the Ethernet
+ * header structure) from the WaveLAN card to an sk_buff chain that
+ * will be passed up to the network interface layer. NOTE: we
+ * currently don't handle trailer protocols (neither does the rest of
+ * the network interface), so if that is needed, it will (at least in
+ * part) be added here. The contents of the receive ring buffer are
+ * copied to a message chain that is then passed to the kernel.
+ *
+ * Note: if any errors occur, the packet is "dropped on the floor"
+ * (called by wv_packet_rcv())
+ */
+static inline void
+wv_packet_read(device * dev,
+ u_short buf_off,
+ int sksize)
+{
+ net_local * lp = (net_local *) dev->priv;
+ u_long ioaddr = dev->base_addr;
+ struct sk_buff * skb;
+
+#ifdef DEBUG_RX_TRACE
+ printk(KERN_DEBUG "%s: ->wv_packet_read(0x%X, %d)\n",
+ dev->name, fd_p, sksize);
+#endif
+
+ /* Allocate buffer for the data */
+ if((skb = dev_alloc_skb(sksize)) == (struct sk_buff *) NULL)
+ {
+#ifdef DEBUG_RX_ERROR
+ printk(KERN_INFO "%s: wv_packet_read(): could not alloc_skb(%d, GFP_ATOMIC).\n",
+ dev->name, sksize);
+#endif
+ lp->stats.rx_dropped++;
+ return;
+ }
+
+ skb->dev = dev;
+
+ /* Copy the packet to the buffer */
+ obram_read(ioaddr, buf_off, skb_put(skb, sksize), sksize);
+ skb->protocol=eth_type_trans(skb, dev);
+
+#ifdef DEBUG_RX_INFO
+ wv_packet_info(skb->mac.raw, sksize, dev->name, "wv_packet_read");
+#endif /* DEBUG_RX_INFO */
+
+ /* Statistics gathering & stuff associated.
+ * It seem a bit messy with all the define, but it's really simple... */
+#if defined(WIRELESS_SPY) || defined(HISTOGRAM)
+ if(
+#ifdef WIRELESS_SPY
+ (lp->spy_number > 0) ||
+#endif /* WIRELESS_SPY */
+#ifdef HISTOGRAM
+ (lp->his_number > 0) ||
+#endif /* HISTOGRAM */
+ 0)
+ {
+ u_char stats[3]; /* signal level, noise level, signal quality */
+
+ /* read signal level, silence level and signal quality bytes */
+ /* Note: in the PCMCIA hardware, these are part of the frame. It seems
+ * that for the ISA hardware, it's nowhere to be found in the frame,
+ * so I'm obliged to do this (it has a side effect on /proc/net/wireless).
+ * Any ideas? */
+ mmc_out(ioaddr, mmwoff(0, mmw_freeze), 1);
+ mmc_read(ioaddr, mmroff(0, mmr_signal_lvl), stats, 3);
+ mmc_out(ioaddr, mmwoff(0, mmw_freeze), 0);
+
+#ifdef DEBUG_RX_INFO
+ printk(KERN_DEBUG "%s: wv_packet_read(): Signal level %d/63, Silence level %d/63, signal quality %d/16\n",
+ dev->name, stats[0] & 0x3F, stats[1] & 0x3F, stats[2] & 0x0F);
+#endif
+
+ /* Spying stuff */
+#ifdef WIRELESS_SPY
+ wl_spy_gather(dev, skb->mac.raw + WAVELAN_ADDR_SIZE, stats);
+#endif /* WIRELESS_SPY */
+#ifdef HISTOGRAM
+ wl_his_gather(dev, stats);
+#endif /* HISTOGRAM */
+ }
+#endif /* defined(WIRELESS_SPY) || defined(HISTOGRAM) */
+
+ /*
+ * Hand the packet to the Network Module
+ */
+ netif_rx(skb);
+
+ lp->stats.rx_packets++;
+
+#ifdef DEBUG_RX_TRACE
+ printk(KERN_DEBUG "%s: <-wv_packet_read()\n", dev->name);
+#endif
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Transfer as many packets as we can
+ * from the device RAM.
+ * Called by the interrupt handler.
+ */
+static inline void
+wv_receive(device * dev)
+{
+ u_long ioaddr = dev->base_addr;
+ net_local * lp = (net_local *)dev->priv;
+ int nreaped = 0;
+
+#ifdef DEBUG_RX_TRACE
+ printk(KERN_DEBUG "%s: ->wv_receive()\n", dev->name);
+#endif
+
+ /* Loop on each received packet */
+ for(;;)
+ {
+ fd_t fd;
+ rbd_t rbd;
+ ushort pkt_len;
+
+ obram_read(ioaddr, lp->rx_head, (unsigned char *) &fd, sizeof(fd));
+
+ /* If the current frame is not complete, we have reach the end... */
+ if((fd.fd_status & FD_STATUS_C) != FD_STATUS_C)
+ break; /* This is how we exit the loop */
+
+ nreaped++;
+
+ /* Check if frame correctly received */
+ if((fd.fd_status & (FD_STATUS_B | FD_STATUS_OK)) !=
+ (FD_STATUS_B | FD_STATUS_OK))
+ {
+ /*
+ * Not sure about this one -- it does not seem
+ * to be an error so we will keep quiet about it.
+ */
+#ifndef IGNORE_NORMAL_XMIT_ERRS
+#ifdef DEBUG_RX_ERROR
+ if((fd.fd_status & FD_STATUS_B) != FD_STATUS_B)
+ printk(KERN_INFO "%s: wv_receive(): frame not consumed by RU.\n",
+ dev->name);
+#endif
+#endif /* IGNORE_NORMAL_XMIT_ERRS */
+
+#ifdef DEBUG_RX_ERROR
+ if((fd.fd_status & FD_STATUS_OK) != FD_STATUS_OK)
+ printk(KERN_INFO "%s: wv_receive(): frame not received successfully.\n",
+ dev->name);
+#endif
+ }
+
+ /* Were there problems in processing the frame? Let's check. */
+ if((fd.fd_status & (FD_STATUS_S6 | FD_STATUS_S7 | FD_STATUS_S8 |
+ FD_STATUS_S9 | FD_STATUS_S10 | FD_STATUS_S11))
+ != 0)
+ {
+ lp->stats.rx_errors++;
+
+#ifdef DEBUG_RX_ERROR
+ if((fd.fd_status & FD_STATUS_S6) != 0)
+ printk(KERN_INFO "%s: wv_receive(): no EOF flag.\n", dev->name);
+#endif
+
+ if((fd.fd_status & FD_STATUS_S7) != 0)
+ {
+ lp->stats.rx_length_errors++;
+#ifdef DEBUG_RX_ERROR
+ printk(KERN_INFO "%s: wv_receive(): frame too short.\n",
+ dev->name);
+#endif
+ }
+
+ if((fd.fd_status & FD_STATUS_S8) != 0)
+ {
+ lp->stats.rx_over_errors++;
+#ifdef DEBUG_RX_ERROR
+ printk(KERN_INFO "%s: wv_receive(): rx DMA overrun.\n",
+ dev->name);
+#endif
+ }
+
+ if((fd.fd_status & FD_STATUS_S9) != 0)
+ {
+ lp->stats.rx_fifo_errors++;
+#ifdef DEBUG_RX_ERROR
+ printk(KERN_INFO "%s: wv_receive(): ran out of resources.\n",
+ dev->name);
+#endif
+ }
+
+ if((fd.fd_status & FD_STATUS_S10) != 0)
+ {
+ lp->stats.rx_frame_errors++;
+#ifdef DEBUG_RX_ERROR
+ printk(KERN_INFO "%s: wv_receive(): alignment error.\n",
+ dev->name);
+#endif
+ }
+
+ if((fd.fd_status & FD_STATUS_S11) != 0)
+ {
+ lp->stats.rx_crc_errors++;
+#ifdef DEBUG_RX_ERROR
+ printk(KERN_INFO "%s: wv_receive(): CRC error.\n", dev->name);
+#endif
+ }
+ }
+
+ /* Does the frame contain a pointer to the data? Let's check. */
+ if(fd.fd_rbd_offset == I82586NULL)
+#ifdef DEBUG_RX_ERROR
+ printk(KERN_INFO "%s: wv_receive(): frame has no data.\n", dev->name);
+#endif
+ else
+ {
+ obram_read(ioaddr, fd.fd_rbd_offset,
+ (unsigned char *) &rbd, sizeof(rbd));
+
+#ifdef DEBUG_RX_ERROR
+ if((rbd.rbd_status & RBD_STATUS_EOF) != RBD_STATUS_EOF)
+ printk(KERN_INFO "%s: wv_receive(): missing EOF flag.\n",
+ dev->name);
+
+ if((rbd.rbd_status & RBD_STATUS_F) != RBD_STATUS_F)
+ printk(KERN_INFO "%s: wv_receive(): missing F flag.\n",
+ dev->name);
+#endif
+
+ pkt_len = rbd.rbd_status & RBD_STATUS_ACNT;
+
+ /* Read the packet and transmit to Linux */
+ wv_packet_read(dev, rbd.rbd_bufl, pkt_len);
+ } /* if frame has data */
+
+ fd.fd_status = 0;
+ obram_write(ioaddr, fdoff(lp->rx_head, fd_status),
+ (unsigned char *) &fd.fd_status, sizeof(fd.fd_status));
+
+ fd.fd_command = FD_COMMAND_EL;
+ obram_write(ioaddr, fdoff(lp->rx_head, fd_command),
+ (unsigned char *) &fd.fd_command, sizeof(fd.fd_command));
+
+ fd.fd_command = 0;
+ obram_write(ioaddr, fdoff(lp->rx_last, fd_command),
+ (unsigned char *) &fd.fd_command, sizeof(fd.fd_command));
+
+ lp->rx_last = lp->rx_head;
+ lp->rx_head = fd.fd_link_offset;
+ } /* for(;;) -> loop on all frames */
+
+#ifdef DEBUG_RX_INFO
+ if(nreaped > 1)
+ printk(KERN_DEBUG "%s: wv_receive(): reaped %d\n", dev->name, nreaped);
+#endif
+#ifdef DEBUG_RX_TRACE
+ printk(KERN_DEBUG "%s: <-wv_receive()\n", dev->name);
+#endif
+}
+
+/*********************** PACKET TRANSMISSION ***********************/
+/*
+ * This part deals with sending packet through the WaveLAN
+ *
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * This routine fills in the appropriate registers and memory
+ * locations on the WaveLAN card and starts the card off on
+ * the transmit.
+ *
+ * The principle :
+ * Each block contain a transmit command, a nop command,
+ * a transmit block descriptor and a buffer.
+ * The CU read the transmit block which point to the tbd,
+ * read the tbd and the content of the buffer.
+ * When it has finished with it, it goes to the next command
+ * which in our case is the nop. The nop point on itself,
+ * so the CU stop here.
+ * When we add the next block, we modify the previous nop
+ * to make it point on the new tx command.
+ * Simple, isn't it ?
+ *
+ * (called in wavelan_packet_xmit())
+ */
+static inline void
+wv_packet_write(device * dev,
+ void * buf,
+ short length)
+{
+ net_local * lp = (net_local *) dev->priv;
+ u_long ioaddr = dev->base_addr;
+ unsigned short txblock;
+ unsigned short txpred;
+ unsigned short tx_addr;
+ unsigned short nop_addr;
+ unsigned short tbd_addr;
+ unsigned short buf_addr;
+ ac_tx_t tx;
+ ac_nop_t nop;
+ tbd_t tbd;
+ int clen = length;
+ unsigned long x;
+
+#ifdef DEBUG_TX_TRACE
+ printk(KERN_DEBUG "%s: ->wv_packet_write(%d)\n", dev->name, length);
+#endif
+
+ /* Check if we need some padding */
+ if(clen < ETH_ZLEN)
+ clen = ETH_ZLEN;
+
+ x = wv_splhi();
+
+ /* Calculate addresses of next block and previous block */
+ txblock = lp->tx_first_free;
+ txpred = txblock - TXBLOCKZ;
+ if(txpred < OFFSET_CU)
+ txpred += NTXBLOCKS * TXBLOCKZ;
+ lp->tx_first_free += TXBLOCKZ;
+ if(lp->tx_first_free >= OFFSET_CU + NTXBLOCKS * TXBLOCKZ)
+ lp->tx_first_free -= NTXBLOCKS * TXBLOCKZ;
+
+/*
+if (lp->tx_n_in_use > 0)
+ printk("%c", "0123456789abcdefghijk"[lp->tx_n_in_use]);
+*/
+
+ lp->tx_n_in_use++;
+
+ /* Calculate addresses of the differents part of the block */
+ tx_addr = txblock;
+ nop_addr = tx_addr + sizeof(tx);
+ tbd_addr = nop_addr + sizeof(nop);
+ buf_addr = tbd_addr + sizeof(tbd);
+
+ /*
+ * Transmit command.
+ */
+ tx.tx_h.ac_status = 0;
+ obram_write(ioaddr, toff(ac_tx_t, tx_addr, tx_h.ac_status),
+ (unsigned char *) &tx.tx_h.ac_status,
+ sizeof(tx.tx_h.ac_status));
+
+ /*
+ * NOP command.
+ */
+ nop.nop_h.ac_status = 0;
+ obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_status),
+ (unsigned char *) &nop.nop_h.ac_status,
+ sizeof(nop.nop_h.ac_status));
+ nop.nop_h.ac_link = nop_addr;
+ obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_link),
+ (unsigned char *) &nop.nop_h.ac_link,
+ sizeof(nop.nop_h.ac_link));
+
+ /*
+ * Transmit buffer descriptor
+ */
+ tbd.tbd_status = TBD_STATUS_EOF | (TBD_STATUS_ACNT & clen);
+ tbd.tbd_next_bd_offset = I82586NULL;
+ tbd.tbd_bufl = buf_addr;
+ tbd.tbd_bufh = 0;
+ obram_write(ioaddr, tbd_addr, (unsigned char *)&tbd, sizeof(tbd));
+
+ /*
+ * Data
+ */
+ obram_write(ioaddr, buf_addr, buf, clen);
+
+ /*
+ * Overwrite the predecessor NOP link
+ * so that it points to this txblock.
+ */
+ nop_addr = txpred + sizeof(tx);
+ nop.nop_h.ac_status = 0;
+ obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_status),
+ (unsigned char *)&nop.nop_h.ac_status,
+ sizeof(nop.nop_h.ac_status));
+ nop.nop_h.ac_link = txblock;
+ obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_link),
+ (unsigned char *) &nop.nop_h.ac_link,
+ sizeof(nop.nop_h.ac_link));
+
+ /* If watchdog not already active, activate it... */
+ if(lp->watchdog.prev == (timer_list *) NULL)
+ {
+ /* set timer to expire in WATCHDOG_JIFFIES */
+ lp->watchdog.expires = jiffies + WATCHDOG_JIFFIES;
+ add_timer(&lp->watchdog);
+ }
+
+ if(lp->tx_first_in_use == I82586NULL)
+ lp->tx_first_in_use = txblock;
+
+ if(lp->tx_n_in_use < NTXBLOCKS - 1)
+ dev->tbusy = 0;
+
+ wv_splx(x);
+
+#ifdef DEBUG_TX_INFO
+ wv_packet_info((u_char *) buf, length, dev->name, "wv_packet_write");
+#endif /* DEBUG_TX_INFO */
+
+#ifdef DEBUG_TX_TRACE
+ printk(KERN_DEBUG "%s: <-wv_packet_write()\n", dev->name);
+#endif
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * This routine is called when we want to send a packet (NET3 callback)
+ * In this routine, we check if the hardware is ready to accept
+ * the packet. We also prevent reentrance. Then, we call the function
+ * to send the packet...
+ */
+static int
+wavelan_packet_xmit(struct sk_buff * skb,
+ device * dev)
+{
+ net_local * lp = (net_local *)dev->priv;
+
+#ifdef DEBUG_TX_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_packet_xmit(0x%X)\n", dev->name,
+ (unsigned) skb);
+#endif
+
+ /* This flag indicate that the hardware can't perform a transmission.
+ * Theoretically, NET3 checks it before sending a packet to the driver,
+ * but in fact it never does that and pool continuously.
+ * As the watchdog will abort overly long transmissions, we are quite safe.
+ */
+ if(dev->tbusy)
+ return 1;
+
+ /*
+ * If some higher layer thinks we've missed
+ * a tx-done interrupt we are passed NULL.
+ * Caution: dev_tint() handles the cli()/sti() itself.
+ */
+ if(skb == (struct sk_buff *)0)
+ {
+#ifdef DEBUG_TX_ERROR
+ printk(KERN_INFO "%s: wavelan_packet_xmit(): skb == NULL\n", dev->name);
+#endif
+ dev_tint(dev);
+ return 0;
+ }
+
+ /*
+ * Block a timer-based transmit from overlapping.
+ * In other words, prevent reentering this routine.
+ */
+ if(set_bit(0, (void *)&dev->tbusy) != 0)
+#ifdef DEBUG_TX_ERROR
+ printk(KERN_INFO "%s: Transmitter access conflict.\n", dev->name);
+#endif
+ else
+ {
+ /* If somebody has asked to reconfigure the controller,
+ * we can do it now.
+ */
+ if(lp->reconfig_82586)
+ {
+ wv_82586_config(dev);
+ if(dev->tbusy)
+ return 1;
+ }
+
+#ifdef DEBUG_TX_ERROR
+ if(skb->next)
+ printk(KERN_INFO "skb has next\n");
+#endif
+
+ wv_packet_write(dev, skb->data, skb->len);
+ }
+
+ dev_kfree_skb(skb, FREE_WRITE);
+
+#ifdef DEBUG_TX_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_packet_xmit()\n", dev->name);
+#endif
+ return 0;
+}
+
+/*********************** HARDWARE CONFIGURATION ***********************/
+/*
+ * This part does the real job of starting and configuring the hardware.
+ */
+
+/*--------------------------------------------------------------------*/
+/*
+ * Routine to initialize the Modem Management Controller.
+ * (called by wv_hw_reset())
+ */
+static inline int
+wv_mmc_init(device * dev)
+{
+ u_long ioaddr = dev->base_addr;
+ net_local * lp = (net_local *)dev->priv;
+ psa_t psa;
+ mmw_t m;
+ int configured;
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: ->wv_mmc_init()\n", dev->name);
+#endif
+
+ /* Read the parameter storage area */
+ psa_read(ioaddr, lp->hacr, 0, (unsigned char *) &psa, sizeof(psa));
+
+#ifdef USE_PSA_CONFIG
+ configured = psa.psa_conf_status & 1;
+#else
+ configured = 0;
+#endif
+
+ /* Is the PSA is not configured */
+ if(!configured)
+ {
+ /* User will be able to configure NWID after (with iwconfig) */
+ psa.psa_nwid[0] = 0;
+ psa.psa_nwid[1] = 0;
+
+ /* no NWID checking since NWID is not set */
+ psa.psa_nwid_select = 0;
+
+ /* Disable encryption */
+ psa.psa_encryption_select = 0;
+
+ /* Set to standard values
+ * 0x04 for AT,
+ * 0x01 for MCA,
+ * 0x04 for PCMCIA and 2.00 card (AT&T 407-024689/E document)
+ */
+ if (psa.psa_comp_number & 1)
+ psa.psa_thr_pre_set = 0x01;
+ else
+ psa.psa_thr_pre_set = 0x04;
+ psa.psa_quality_thr = 0x03;
+
+ /* It is configured */
+ psa.psa_conf_status |= 1;
+
+#ifdef USE_PSA_CONFIG
+ /* Write the psa */
+ psa_write(ioaddr, lp->hacr, (char *)psa.psa_nwid - (char *)&psa,
+ (unsigned char *)psa.psa_nwid, 4);
+ psa_write(ioaddr, lp->hacr, (char *)&psa.psa_thr_pre_set - (char *)&psa,
+ (unsigned char *)&psa.psa_thr_pre_set, 1);
+ psa_write(ioaddr, lp->hacr, (char *)&psa.psa_quality_thr - (char *)&psa,
+ (unsigned char *)&psa.psa_quality_thr, 1);
+ psa_write(ioaddr, lp->hacr, (char *)&psa.psa_conf_status - (char *)&psa,
+ (unsigned char *)&psa.psa_conf_status, 1);
+#endif
+ }
+
+ /* Zero the mmc structure */
+ memset(&m, 0x00, sizeof(m));
+
+ /* Copy PSA info to the mmc */
+ m.mmw_netw_id_l = psa.psa_nwid[1];
+ m.mmw_netw_id_h = psa.psa_nwid[0];
+
+ if(psa.psa_nwid_select & 1)
+ m.mmw_loopt_sel = 0x00;
+ else
+ m.mmw_loopt_sel = MMW_LOOPT_SEL_DIS_NWID;
+
+ memcpy(&m.mmw_encr_key, &psa.psa_encryption_key,
+ sizeof(m.mmw_encr_key));
+
+ if(psa.psa_encryption_select)
+ m.mmw_encr_enable = MMW_ENCR_ENABLE_EN | MMW_ENCR_ENABLE_MODE;
+ else
+ m.mmw_encr_enable = 0;
+
+ m.mmw_thr_pre_set = psa.psa_thr_pre_set & 0x3F;
+ m.mmw_quality_thr = psa.psa_quality_thr & 0x0F;
+
+ /* Missing: encryption stuff... */
+
+ /*
+ * Set default modem control parameters.
+ * See NCR document 407-0024326 Rev. A.
+ */
+ m.mmw_jabber_enable = 0x01;
+ m.mmw_anten_sel = MMW_ANTEN_SEL_ALG_EN;
+ m.mmw_ifs = 0x20;
+ m.mmw_mod_delay = 0x04;
+ m.mmw_jam_time = 0x38;
+
+ m.mmw_encr_enable = 0;
+ m.mmw_des_io_invert = 0;
+ m.mmw_freeze = 0;
+ m.mmw_decay_prm = 0;
+ m.mmw_decay_updat_prm = 0;
+
+ /* Write all info to MMC */
+ mmc_write(ioaddr, 0, (u_char *)&m, sizeof(m));
+
+ /* The following code starts the modem of the 2.00 frequency
+ * selectable cards at power on. It's not strictly needed for the
+ * following boots.
+ * The original patch was by Joe Finney for the PCMCIA driver, but
+ * I've cleaned it up a bit and added documentation.
+ * Thanks to Loeke Brederveld from Lucent for the info.
+ */
+
+ /* Attempt to recognise 2.00 cards (2.4 GHz frequency selectable)
+ * (does it work for everybody? -- especially old cards?) */
+ /* Note: WFREQSEL verifies that it is able to read a sensible
+ * frequency from EEPROM (address 0x00) and that MMR_FEE_STATUS_ID
+ * is 0xA (Xilinx version) or 0xB (Ariadne version).
+ * My test is more crude but does work. */
+ if(!(mmc_in(ioaddr, mmroff(0, mmr_fee_status)) &
+ (MMR_FEE_STATUS_DWLD | MMR_FEE_STATUS_BUSY)))
+ {
+ /* We must download the frequency parameters to the
+ * synthesizers (from the EEPROM - area 1)
+ * Note : as the EEPROM is auto decremented, we set the end
+ * if the area... */
+ m.mmw_fee_addr = 0x0F;
+ m.mmw_fee_ctrl = MMW_FEE_CTRL_READ | MMW_FEE_CTRL_DWLD;
+ mmc_write(ioaddr, (char *)&m.mmw_fee_ctrl - (char *)&m,
+ (unsigned char *)&m.mmw_fee_ctrl, 2);
+
+ /* Wait until the download is finished */
+ fee_wait(ioaddr, 100, 100);
+
+#ifdef DEBUG_CONFIG_INFO
+ /* The frequency was in the last word downloaded. */
+ mmc_read(ioaddr, (char *)&m.mmw_fee_data_l - (char *)&m,
+ (unsigned char *)&m.mmw_fee_data_l, 2);
+
+ /* Print some info for the user. */
+ printk(KERN_DEBUG "%s: WaveLAN 2.00 recognised (frequency select) : Current frequency = %ld\n",
+ dev->name,
+ ((m.mmw_fee_data_h << 4) |
+ (m.mmw_fee_data_l >> 4)) * 5 / 2 + 24000L);
+#endif
+
+ /* We must now download the power adjust value (gain) to
+ * the synthesizers (from the EEPROM - area 7 - DAC) */
+ m.mmw_fee_addr = 0x61;
+ m.mmw_fee_ctrl = MMW_FEE_CTRL_READ | MMW_FEE_CTRL_DWLD;
+ mmc_write(ioaddr, (char *)&m.mmw_fee_ctrl - (char *)&m,
+ (unsigned char *)&m.mmw_fee_ctrl, 2);
+
+ /* Wait until the download is finished */
+ } /* if 2.00 card */
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: <-wv_mmc_init()\n", dev->name);
+#endif
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Construct the fd and rbd structures.
+ * Start the receive unit.
+ * (called by wv_hw_reset())
+ */
+static inline int
+wv_ru_start(device * dev)
+{
+ net_local * lp = (net_local *) dev->priv;
+ u_long ioaddr = dev->base_addr;
+ u_short scb_cs;
+ fd_t fd;
+ rbd_t rbd;
+ u_short rx;
+ u_short rx_next;
+ int i;
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: ->wv_ru_start()\n", dev->name);
+#endif
+
+ obram_read(ioaddr, scboff(OFFSET_SCB, scb_status), (unsigned char *)&scb_cs, sizeof(scb_cs));
+ if((scb_cs & SCB_ST_RUS) == SCB_ST_RUS_RDY)
+ return 0;
+
+ lp->rx_head = OFFSET_RU;
+
+ for(i = 0, rx = lp->rx_head; i < NRXBLOCKS; i++, rx = rx_next)
+ {
+ rx_next = (i == NRXBLOCKS - 1) ? lp->rx_head : rx + RXBLOCKZ;
+
+ fd.fd_status = 0;
+ fd.fd_command = (i == NRXBLOCKS - 1) ? FD_COMMAND_EL : 0;
+ fd.fd_link_offset = rx_next;
+ fd.fd_rbd_offset = rx + sizeof(fd);
+ obram_write(ioaddr, rx, (unsigned char *)&fd, sizeof(fd));
+
+ rbd.rbd_status = 0;
+ rbd.rbd_next_rbd_offset = I82586NULL;
+ rbd.rbd_bufl = rx + sizeof(fd) + sizeof(rbd);
+ rbd.rbd_bufh = 0;
+ rbd.rbd_el_size = RBD_EL | (RBD_SIZE & MAXDATAZ);
+ obram_write(ioaddr, rx + sizeof(fd),
+ (unsigned char *) &rbd, sizeof(rbd));
+
+ lp->rx_last = rx;
+ }
+
+ obram_write(ioaddr, scboff(OFFSET_SCB, scb_rfa_offset),
+ (unsigned char *) &lp->rx_head, sizeof(lp->rx_head));
+
+ scb_cs = SCB_CMD_RUC_GO;
+ obram_write(ioaddr, scboff(OFFSET_SCB, scb_command),
+ (unsigned char *) &scb_cs, sizeof(scb_cs));
+
+ set_chan_attn(ioaddr, lp->hacr);
+
+ for(i = 1000; i > 0; i--)
+ {
+ obram_read(ioaddr, scboff(OFFSET_SCB, scb_command),
+ (unsigned char *) &scb_cs, sizeof(scb_cs));
+ if (scb_cs == 0)
+ break;
+
+ udelay(10);
+ }
+
+ if(i <= 0)
+ {
+#ifdef DEBUG_CONFIG_ERRORS
+ printk(KERN_INFO "%s: wavelan_ru_start(): board not accepting command.\n",
+ dev->name);
+#endif
+ return -1;
+ }
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: <-wv_ru_start()\n", dev->name);
+#endif
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Initialise the transmit blocks.
+ * Start the command unit executing the NOP
+ * self-loop of the first transmit block.
+ *
+ * Here, we create the list of send buffers used to transmit packets
+ * between the PC and the command unit. For each buffer, we create a
+ * buffer descriptor (pointing on the buffer), a transmit command
+ * (pointing to the buffer descriptor) and a NOP command.
+ * The transmit command is linked to the NOP, and the NOP to itself.
+ * When we will have finished executing the transmit command, we will
+ * then loop on the NOP. By releasing the NOP link to a new command,
+ * we may send another buffer.
+ *
+ * (called by wv_hw_reset())
+ */
+static inline int
+wv_cu_start(device * dev)
+{
+ net_local * lp = (net_local *) dev->priv;
+ u_long ioaddr = dev->base_addr;
+ int i;
+ u_short txblock;
+ u_short first_nop;
+ u_short scb_cs;
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: ->wv_cu_start()\n", dev->name);
+#endif
+
+ lp->tx_first_free = OFFSET_CU;
+ lp->tx_first_in_use = I82586NULL;
+
+ for(i = 0, txblock = OFFSET_CU;
+ i < NTXBLOCKS;
+ i++, txblock += TXBLOCKZ)
+ {
+ ac_tx_t tx;
+ ac_nop_t nop;
+ tbd_t tbd;
+ unsigned short tx_addr;
+ unsigned short nop_addr;
+ unsigned short tbd_addr;
+ unsigned short buf_addr;
+
+ tx_addr = txblock;
+ nop_addr = tx_addr + sizeof(tx);
+ tbd_addr = nop_addr + sizeof(nop);
+ buf_addr = tbd_addr + sizeof(tbd);
+
+ tx.tx_h.ac_status = 0;
+ tx.tx_h.ac_command = acmd_transmit | AC_CFLD_I;
+ tx.tx_h.ac_link = nop_addr;
+ tx.tx_tbd_offset = tbd_addr;
+ obram_write(ioaddr, tx_addr, (unsigned char *) &tx, sizeof(tx));
+
+ nop.nop_h.ac_status = 0;
+ nop.nop_h.ac_command = acmd_nop;
+ nop.nop_h.ac_link = nop_addr;
+ obram_write(ioaddr, nop_addr, (unsigned char *) &nop, sizeof(nop));
+
+ tbd.tbd_status = TBD_STATUS_EOF;
+ tbd.tbd_next_bd_offset = I82586NULL;
+ tbd.tbd_bufl = buf_addr;
+ tbd.tbd_bufh = 0;
+ obram_write(ioaddr, tbd_addr, (unsigned char *) &tbd, sizeof(tbd));
+ }
+
+ first_nop = OFFSET_CU + (NTXBLOCKS - 1) * TXBLOCKZ + sizeof(ac_tx_t);
+ obram_write(ioaddr, scboff(OFFSET_SCB, scb_cbl_offset),
+ (unsigned char *) &first_nop, sizeof(first_nop));
+
+ scb_cs = SCB_CMD_CUC_GO;
+ obram_write(ioaddr, scboff(OFFSET_SCB, scb_command),
+ (unsigned char *) &scb_cs, sizeof(scb_cs));
+
+ set_chan_attn(ioaddr, lp->hacr);
+
+ for(i = 1000; i > 0; i--)
+ {
+ obram_read(ioaddr, scboff(OFFSET_SCB, scb_command),
+ (unsigned char *) &scb_cs, sizeof(scb_cs));
+ if (scb_cs == 0)
+ break;
+
+ udelay(10);
+ }
+
+ if(i <= 0)
+ {
+#ifdef DEBUG_CONFIG_ERRORS
+ printk(KERN_INFO "%s: wavelan_cu_start(): board not accepting command.\n",
+ dev->name);
+#endif
+ return -1;
+ }
+
+ lp->tx_n_in_use = 0;
+ dev->tbusy = 0;
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: <-wv_cu_start()\n", dev->name);
+#endif
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * This routine does a standard config of the WaveLAN controler (i82586).
+ *
+ * It initialises the scp, iscp and scb structure
+ * The first two are just pointers to the next.
+ * The last one is used for basic configuration and for basic
+ * communication (interrupt status).
+ *
+ * (called by wv_hw_reset())
+ */
+static inline int
+wv_82586_start(device * dev)
+{
+ net_local * lp = (net_local *) dev->priv;
+ u_long ioaddr = dev->base_addr;
+ scp_t scp; /* system configuration pointer */
+ iscp_t iscp; /* intermediate scp */
+ scb_t scb; /* system control block */
+ ach_t cb; /* Action command header */
+ u_char zeroes[512];
+ int i;
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: ->wv_82586_start()\n", dev->name);
+#endif
+
+ /*
+ * Clear the onboard RAM.
+ */
+ memset(&zeroes[0], 0x00, sizeof(zeroes));
+ for(i = 0; i < I82586_MEMZ; i += sizeof(zeroes))
+ obram_write(ioaddr, i, &zeroes[0], sizeof(zeroes));
+
+ /*
+ * Construct the command unit structures:
+ * scp, iscp, scb, cb.
+ */
+ memset(&scp, 0x00, sizeof(scp));
+ scp.scp_sysbus = SCP_SY_16BBUS;
+ scp.scp_iscpl = OFFSET_ISCP;
+ obram_write(ioaddr, OFFSET_SCP, (unsigned char *)&scp, sizeof(scp));
+
+ memset(&iscp, 0x00, sizeof(iscp));
+ iscp.iscp_busy = 1;
+ iscp.iscp_offset = OFFSET_SCB;
+ obram_write(ioaddr, OFFSET_ISCP, (unsigned char *)&iscp, sizeof(iscp));
+
+ /* Our first command is to reset the i82586. */
+ memset(&scb, 0x00, sizeof(scb));
+ scb.scb_command = SCB_CMD_RESET;
+ scb.scb_cbl_offset = OFFSET_CU;
+ scb.scb_rfa_offset = OFFSET_RU;
+ obram_write(ioaddr, OFFSET_SCB, (unsigned char *)&scb, sizeof(scb));
+
+ set_chan_attn(ioaddr, lp->hacr);
+
+ /* Wait for command to finish. */
+ for(i = 1000; i > 0; i--)
+ {
+ obram_read(ioaddr, OFFSET_ISCP, (unsigned char *) &iscp, sizeof(iscp));
+
+ if(iscp.iscp_busy == (unsigned short) 0)
+ break;
+
+ udelay(10);
+ }
+
+ if(i <= 0)
+ {
+#ifdef DEBUG_CONFIG_ERRORS
+ printk(KERN_INFO "%s: wv_82586_start(): iscp_busy timeout.\n",
+ dev->name);
+#endif
+ return -1;
+ }
+
+ /* Check command completion */
+ for(i = 15; i > 0; i--)
+ {
+ obram_read(ioaddr, OFFSET_SCB, (unsigned char *) &scb, sizeof(scb));
+
+ if (scb.scb_status == (SCB_ST_CX | SCB_ST_CNA))
+ break;
+
+ udelay(10);
+ }
+
+ if (i <= 0)
+ {
+#ifdef DEBUG_CONFIG_ERRORS
+ printk(KERN_INFO "%s: wv_82586_start(): status: expected 0x%02x, got 0x%02x.\n",
+ dev->name, SCB_ST_CX | SCB_ST_CNA, scb.scb_status);
+#endif
+ return -1;
+ }
+
+ wv_ack(dev);
+
+ /* Set the action command header. */
+ memset(&cb, 0x00, sizeof(cb));
+ cb.ac_command = AC_CFLD_EL | (AC_CFLD_CMD & acmd_diagnose);
+ cb.ac_link = OFFSET_CU;
+ obram_write(ioaddr, OFFSET_CU, (unsigned char *)&cb, sizeof(cb));
+
+ if(wv_synchronous_cmd(dev, "diag()") == -1)
+ return -1;
+
+ obram_read(ioaddr, OFFSET_CU, (unsigned char *)&cb, sizeof(cb));
+ if(cb.ac_status & AC_SFLD_FAIL)
+ {
+#ifdef DEBUG_CONFIG_ERRORS
+ printk(KERN_INFO "%s: wv_82586_start(): i82586 Self Test failed.\n",
+ dev->name);
+#endif
+ return -1;
+ }
+
+#ifdef DEBUG_I82586_SHOW
+ wv_scb_show(ioaddr);
+#endif
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: <-wv_82586_start()\n", dev->name);
+#endif
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * This routine does a standard configuration of the WaveLAN controller
+ * (i82586).
+ *
+ * This routine is a violent hack. We use the first free transmit block
+ * to make our configuration. In the buffer area, we create the three
+ * configuration commands (linked). We make the previous NOP point to
+ * the beginning of the buffer instead of the tx command. After, we go
+ * as usual to the NOP command.
+ * Note that only the last command (mc_set) will generate an interrupt.
+ *
+ * (called by wv_hw_reset(), wv_82586_reconfig())
+ */
+static void
+wv_82586_config(device * dev)
+{
+ net_local * lp = (net_local *) dev->priv;
+ u_long ioaddr = dev->base_addr;
+ unsigned short txblock;
+ unsigned short txpred;
+ unsigned short tx_addr;
+ unsigned short nop_addr;
+ unsigned short tbd_addr;
+ unsigned short cfg_addr;
+ unsigned short ias_addr;
+ unsigned short mcs_addr;
+ ac_tx_t tx;
+ ac_nop_t nop;
+ ac_cfg_t cfg; /* Configure action */
+ ac_ias_t ias; /* IA-setup action */
+ ac_mcs_t mcs; /* Multicast setup */
+ struct dev_mc_list * dmi;
+ unsigned long x;
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: ->wv_82586_config()\n", dev->name);
+#endif
+
+ x = wv_splhi();
+
+ /* Calculate addresses of next block and previous block */
+ txblock = lp->tx_first_free;
+ txpred = txblock - TXBLOCKZ;
+ if(txpred < OFFSET_CU)
+ txpred += NTXBLOCKS * TXBLOCKZ;
+ lp->tx_first_free += TXBLOCKZ;
+ if(lp->tx_first_free >= OFFSET_CU + NTXBLOCKS * TXBLOCKZ)
+ lp->tx_first_free -= NTXBLOCKS * TXBLOCKZ;
+
+ lp->tx_n_in_use++;
+
+ /* Calculate addresses of the different parts of the block. */
+ tx_addr = txblock;
+ nop_addr = tx_addr + sizeof(tx);
+ tbd_addr = nop_addr + sizeof(nop);
+ cfg_addr = tbd_addr + sizeof(tbd_t); /* beginning of the buffer */
+ ias_addr = cfg_addr + sizeof(cfg);
+ mcs_addr = ias_addr + sizeof(ias);
+
+ /*
+ * Transmit command
+ */
+ tx.tx_h.ac_status = 0xFFFF; /* Fake completion value */
+ obram_write(ioaddr, toff(ac_tx_t, tx_addr, tx_h.ac_status),
+ (unsigned char *) &tx.tx_h.ac_status,
+ sizeof(tx.tx_h.ac_status));
+
+ /*
+ * NOP command
+ */
+ nop.nop_h.ac_status = 0;
+ obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_status),
+ (unsigned char *) &nop.nop_h.ac_status,
+ sizeof(nop.nop_h.ac_status));
+ nop.nop_h.ac_link = nop_addr;
+ obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_link),
+ (unsigned char *) &nop.nop_h.ac_link,
+ sizeof(nop.nop_h.ac_link));
+
+ /* Create a configure action */
+ memset(&cfg, 0x00, sizeof(cfg));
+
+#if 0
+ /*
+ * The default board configuration
+ */
+ cfg.fifolim_bytecnt = 0x080c;
+ cfg.addrlen_mode = 0x2600;
+ cfg.linprio_interframe = 0x7820; /* IFS=120, ACS=2 */
+ cfg.slot_time = 0xf00c; /* slottime=12 */
+ cfg.hardware = 0x0008; /* tx even without CD */
+ cfg.min_frame_len = 0x0040;
+#endif /* 0 */
+
+ /*
+ * For Linux we invert AC_CFG_ALOC(..) so as to conform
+ * to the way that net packets reach us from above.
+ * (See also ac_tx_t.)
+ */
+ cfg.cfg_byte_cnt = AC_CFG_BYTE_CNT(sizeof(ac_cfg_t) - sizeof(ach_t));
+ cfg.cfg_fifolim = AC_CFG_FIFOLIM(8);
+ cfg.cfg_byte8 = AC_CFG_SAV_BF(0) |
+ AC_CFG_SRDY(0);
+ cfg.cfg_byte9 = AC_CFG_ELPBCK(0) |
+ AC_CFG_ILPBCK(0) |
+ AC_CFG_PRELEN(AC_CFG_PLEN_2) |
+ AC_CFG_ALOC(1) |
+ AC_CFG_ADDRLEN(WAVELAN_ADDR_SIZE);
+ cfg.cfg_byte10 = AC_CFG_BOFMET(0) |
+ AC_CFG_ACR(0) |
+ AC_CFG_LINPRIO(0);
+ cfg.cfg_ifs = 32;
+ cfg.cfg_slotl = 0;
+ cfg.cfg_byte13 = AC_CFG_RETRYNUM(15) |
+ AC_CFG_SLTTMHI(2);
+ cfg.cfg_byte14 = AC_CFG_FLGPAD(0) |
+ AC_CFG_BTSTF(0) |
+ AC_CFG_CRC16(0) |
+ AC_CFG_NCRC(0) |
+ AC_CFG_TNCRS(1) |
+ AC_CFG_MANCH(0) |
+ AC_CFG_BCDIS(0) |
+ AC_CFG_PRM(lp->promiscuous);
+ cfg.cfg_byte15 = AC_CFG_ICDS(0) |
+ AC_CFG_CDTF(0) |
+ AC_CFG_ICSS(0) |
+ AC_CFG_CSTF(0);
+/*
+ cfg.cfg_min_frm_len = AC_CFG_MNFRM(64);
+*/
+ cfg.cfg_min_frm_len = AC_CFG_MNFRM(8);
+
+ cfg.cfg_h.ac_command = (AC_CFLD_CMD & acmd_configure);
+ cfg.cfg_h.ac_link = ias_addr;
+ obram_write(ioaddr, cfg_addr, (unsigned char *)&cfg, sizeof(cfg));
+
+ /* Setup the MAC address */
+ memset(&ias, 0x00, sizeof(ias));
+ ias.ias_h.ac_command = (AC_CFLD_CMD & acmd_ia_setup);
+ ias.ias_h.ac_link = mcs_addr;
+ memcpy(&ias.ias_addr[0], (unsigned char *)&dev->dev_addr[0], sizeof(ias.ias_addr));
+ obram_write(ioaddr, ias_addr, (unsigned char *)&ias, sizeof(ias));
+
+ /* Initialize adapter's ethernet multicast addresses */
+ memset(&mcs, 0x00, sizeof(mcs));
+ mcs.mcs_h.ac_command = AC_CFLD_I | (AC_CFLD_CMD & acmd_mc_setup);
+ mcs.mcs_h.ac_link = nop_addr;
+ mcs.mcs_cnt = WAVELAN_ADDR_SIZE * lp->mc_count;
+ obram_write(ioaddr, mcs_addr, (unsigned char *)&mcs, sizeof(mcs));
+
+ /* If any address to set */
+ if(lp->mc_count)
+ {
+ for(dmi=dev->mc_list; dmi; dmi=dmi->next)
+ outsw(PIOP1(ioaddr), (u_short *) dmi->dmi_addr,
+ WAVELAN_ADDR_SIZE >> 1);
+
+#ifdef DEBUG_CONFIG_INFO
+ printk(KERN_DEBUG "%s: wv_82586_config(): set %d multicast addresses:\n",
+ dev->name, lp->mc_count);
+ for(dmi=dev->mc_list; dmi; dmi=dmi->next)
+ printk(KERN_DEBUG " %02x:%02x:%02x:%02x:%02x:%02x\n",
+ dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
+ dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5] );
+#endif
+ }
+
+ /*
+ * Overwrite the predecessor NOP link
+ * so that it points to the configure action.
+ */
+ nop_addr = txpred + sizeof(tx);
+ nop.nop_h.ac_status = 0;
+ obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_status),
+ (unsigned char *)&nop.nop_h.ac_status,
+ sizeof(nop.nop_h.ac_status));
+ nop.nop_h.ac_link = cfg_addr;
+ obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_link),
+ (unsigned char *) &nop.nop_h.ac_link,
+ sizeof(nop.nop_h.ac_link));
+
+ /* If watchdog not already active, activate it... */
+ if(lp->watchdog.prev == (timer_list *) NULL)
+ {
+ /* set timer to expire in WATCHDOG_JIFFIES */
+ lp->watchdog.expires = jiffies + WATCHDOG_JIFFIES;
+ add_timer(&lp->watchdog);
+ }
+
+ lp->reconfig_82586 = 0;
+
+ if(lp->tx_first_in_use == I82586NULL)
+ lp->tx_first_in_use = txblock;
+
+ if(lp->tx_n_in_use < NTXBLOCKS - 1)
+ dev->tbusy = 0;
+
+ wv_splx(x);
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: <-wv_82586_config()\n", dev->name);
+#endif
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * This routine, called by wavelan_close(), gracefully stops the
+ * WaveLAN controller (i82586).
+ */
+static inline void
+wv_82586_stop(device * dev)
+{
+ net_local * lp = (net_local *) dev->priv;
+ u_long ioaddr = dev->base_addr;
+ u_short scb_cmd;
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: ->wv_82586_stop()\n", dev->name);
+#endif
+
+ /* Suspend both command unit and receive unit. */
+ scb_cmd = (SCB_CMD_CUC & SCB_CMD_CUC_SUS) | (SCB_CMD_RUC & SCB_CMD_RUC_SUS);
+ obram_write(ioaddr, scboff(OFFSET_SCB, scb_command),
+ (unsigned char *)&scb_cmd, sizeof(scb_cmd));
+ set_chan_attn(ioaddr, lp->hacr);
+
+ /* No more interrupts */
+ wv_ints_off(dev);
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: <-wv_82586_stop()\n", dev->name);
+#endif
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Totally reset the WaveLAN and restart it.
+ * Performs the following actions:
+ * 1. A power reset (reset DMA)
+ * 2. Initialize the radio modem (using wv_mmc_init)
+ * 3. Reset & Configure LAN controller (using wv_82586_start)
+ * 4. Start the LAN controller's command unit
+ * 5. Start the LAN controller's receive unit
+ */
+static int
+wv_hw_reset(device * dev)
+{
+ net_local * lp = (net_local *)dev->priv;
+ u_long ioaddr = dev->base_addr;
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: ->wv_hw_reset(dev=0x%x)\n", dev->name,
+ (unsigned int)dev);
+#endif
+
+ /* If watchdog was activated, kill it! */
+ if(lp->watchdog.prev != (timer_list *) NULL)
+ del_timer(&lp->watchdog);
+
+ /* Increase the number of resets done */
+ lp->nresets++;
+
+ wv_hacr_reset(ioaddr);
+ lp->hacr = HACR_DEFAULT;
+
+ if((wv_mmc_init(dev) < 0) ||
+ (wv_82586_start(dev) < 0))
+ return -1;
+
+ /* Enable the card to send interrupts */
+ wv_ints_on(dev);
+
+ /* Start card functions */
+ if((wv_ru_start(dev) < 0) ||
+ (wv_cu_start(dev) < 0))
+ return -1;
+
+ /* Finish configuration */
+ wv_82586_config(dev);
+
+#ifdef DEBUG_CONFIG_TRACE
+ printk(KERN_DEBUG "%s: <-wv_hw_reset()\n", dev->name);
+#endif
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Check if there is a WaveLAN at the specific base address.
+ * As a side effect, this reads the MAC address.
+ * (called in wavelan_probe() and init_module())
+ */
+static int
+wv_check_ioaddr(u_long ioaddr,
+ u_char * mac)
+{
+ int i; /* Loop counter */
+
+ /* Check if the base address if available */
+ if(check_region(ioaddr, sizeof(ha_t)))
+ return EADDRINUSE; /* ioaddr already used... */
+
+ /* Reset host interface */
+ wv_hacr_reset(ioaddr);
+
+ /* Read the MAC address from the parameter storage area */
+ psa_read(ioaddr, HACR_DEFAULT, psaoff(0, psa_univ_mac_addr),
+ mac, 6);
+
+ /*
+ * Check the first three octets of the address for the manufacturer's code.
+ * Note: If this can't find your WaveLAN card, you've got a
+ * non-NCR/AT&T/Lucent ISA card. See wavelan.p.h for details on
+ * how to configure your card.
+ */
+ for(i = 0; i < (sizeof(MAC_ADDRESSES) / sizeof(char) / 3); i++)
+ if((mac[0] == MAC_ADDRESSES[i][0]) &&
+ (mac[1] == MAC_ADDRESSES[i][1]) &&
+ (mac[2] == MAC_ADDRESSES[i][2]))
+ return 0;
+
+#ifdef DEBUG_CONFIG_INFO
+ printk(KERN_WARNING "WaveLAN (0x%3X): your MAC address might be: %02X:%02X:%02X.\n",
+ ioaddr, mac[0], mac[1], mac[2]);
+#endif
+ return ENODEV;
+}
+
+/************************ INTERRUPT HANDLING ************************/
+
+/*
+ * This function is the interrupt handler for the WaveLAN card. This
+ * routine will be called whenever:
+ */
+static void
+wavelan_interrupt(int irq,
+ void * dev_id,
+ struct pt_regs * regs)
+{
+ device * dev;
+ u_long ioaddr;
+ net_local * lp;
+ u_short hasr;
+ u_short status;
+ u_short ack_cmd;
+
+ if((dev = (device *) (irq2dev_map[irq])) == (device *) NULL)
+ {
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_WARNING "wavelan_interrupt(): irq %d for unknown device.\n",
+ irq);
+#endif
+ return;
+ }
+
+#ifdef DEBUG_INTERRUPT_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_interrupt()\n", dev->name);
+#endif
+
+ lp = (net_local *) dev->priv;
+ ioaddr = dev->base_addr;
+
+ /* Prevent reentrance. What should we do here? */
+#ifdef DEBUG_INTERRUPT_ERROR
+ if(dev->interrupt)
+ printk(KERN_INFO "%s: wavelan_interrupt(): Re-entering the interrupt handler.\n",
+ dev->name);
+#endif
+ dev->interrupt = 1;
+
+ if((hasr = hasr_read(ioaddr)) & HASR_MMC_INTR)
+ {
+ u_char dce_status;
+
+ /*
+ * Interrupt from the modem management controller.
+ * This will clear it -- ignored for now.
+ */
+ mmc_read(ioaddr, mmroff(0, mmr_dce_status), &dce_status, sizeof(dce_status));
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO "%s: wavelan_interrupt(): unexpected mmc interrupt: status 0x%04x.\n",
+ dev->name, dce_status);
+#endif
+ }
+
+ if((hasr & HASR_82586_INTR) == 0)
+ {
+ dev->interrupt = 0;
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO "%s: wavelan_interrupt(): interrupt not coming from i82586\n",
+ dev->name);
+#endif
+ return;
+ }
+
+ /* Read interrupt data. */
+ obram_read(ioaddr, scboff(OFFSET_SCB, scb_status),
+ (unsigned char *) &status, sizeof(status));
+
+ /*
+ * Acknowledge the interrupt(s).
+ */
+ ack_cmd = status & SCB_ST_INT;
+ obram_write(ioaddr, scboff(OFFSET_SCB, scb_command),
+ (unsigned char *) &ack_cmd, sizeof(ack_cmd));
+ set_chan_attn(ioaddr, lp->hacr);
+
+#ifdef DEBUG_INTERRUPT_INFO
+ printk(KERN_DEBUG "%s: wavelan_interrupt(): status 0x%04x.\n",
+ dev->name, status);
+#endif
+
+ /* Command completed. */
+ if((status & SCB_ST_CX) == SCB_ST_CX)
+ {
+#ifdef DEBUG_INTERRUPT_INFO
+ printk(KERN_DEBUG "%s: wavelan_interrupt(): command completed.\n",
+ dev->name);
+#endif
+ wv_complete(dev, ioaddr, lp);
+
+ /* If watchdog was activated, kill it ! */
+ if(lp->watchdog.prev != (timer_list *) NULL)
+ del_timer(&lp->watchdog);
+ if(lp->tx_n_in_use > 0)
+ {
+ /* set timer to expire in WATCHDOG_JIFFIES */
+ lp->watchdog.expires = jiffies + WATCHDOG_JIFFIES;
+ add_timer(&lp->watchdog);
+ }
+ }
+
+ /* Frame received. */
+ if((status & SCB_ST_FR) == SCB_ST_FR)
+ {
+#ifdef DEBUG_INTERRUPT_INFO
+ printk(KERN_DEBUG "%s: wavelan_interrupt(): received packet.\n",
+ dev->name);
+#endif
+ wv_receive(dev);
+ }
+
+ /* Check the state of the command unit. */
+ if(((status & SCB_ST_CNA) == SCB_ST_CNA) ||
+ (((status & SCB_ST_CUS) != SCB_ST_CUS_ACTV) && dev->start))
+ {
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO "%s: wavelan_interrupt(): CU inactive -- restarting\n",
+ dev->name);
+#endif
+ wv_hw_reset(dev);
+ }
+
+ /* Check the state of the command unit. */
+ if(((status & SCB_ST_RNR) == SCB_ST_RNR) ||
+ (((status & SCB_ST_RUS) != SCB_ST_RUS_RDY) && dev->start))
+ {
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO "%s: wavelan_interrupt(): RU not ready -- restarting\n",
+ dev->name);
+#endif
+ wv_hw_reset(dev);
+ }
+
+ dev->interrupt = 0;
+
+#ifdef DEBUG_INTERRUPT_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_interrupt()\n", dev->name);
+#endif
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Watchdog: when we start a transmission, we set a timer in the
+ * kernel. If the transmission completes, this timer is disabled. If
+ * the timer expires, we try to unlock the hardware.
+ *
+ * Note: this watchdog doesn't work on the same principle as the
+ * watchdog in the previous version of the ISA driver. I made it this
+ * way because the overhead of add_timer() and del_timer() is nothing
+ * and because it avoids calling the watchdog, saving some CPU time.
+ */
+static void
+wavelan_watchdog(u_long a)
+{
+ device * dev;
+ net_local * lp;
+ u_long ioaddr;
+ unsigned long x;
+ unsigned int nreaped;
+
+ dev = (device *) a;
+ ioaddr = dev->base_addr;
+ lp = (net_local *) dev->priv;
+
+#ifdef DEBUG_INTERRUPT_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_watchdog()\n", dev->name);
+#endif
+
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO "%s: wavelan_watchdog: watchdog timer expired\n",
+ dev->name);
+#endif
+
+ x = wv_splhi();
+
+ dev = (device *) a;
+ ioaddr = dev->base_addr;
+ lp = (net_local *) dev->priv;
+
+ if(lp->tx_n_in_use <= 0)
+ {
+ wv_splx(x);
+ return;
+ }
+
+ nreaped = wv_complete(dev, ioaddr, lp);
+
+#ifdef DEBUG_INTERRUPT_INFO
+ printk(KERN_DEBUG "%s: wavelan_watchdog(): %d reaped, %d remain.\n",
+ dev->name, nreaped, lp->tx_n_in_use);
+#endif
+
+#ifdef DEBUG_PSA_SHOW
+ {
+ psa_t psa;
+ psa_read(dev, 0, (unsigned char *) &psa, sizeof(psa));
+ wv_psa_show(&psa);
+ }
+#endif
+#ifdef DEBUG_MMC_SHOW
+ wv_mmc_show(dev);
+#endif
+#ifdef DEBUG_I82586_SHOW
+ wv_cu_show(dev);
+#endif
+
+ /* If no buffer has been freed */
+ if(nreaped == 0)
+ {
+#ifdef DEBUG_INTERRUPT_ERROR
+ printk(KERN_INFO "%s: wavelan_watchdog(): cleanup failed, trying reset\n",
+ dev->name);
+#endif
+ wv_hw_reset(dev);
+ }
+ else
+ /* Reset watchdog for next transmission. */
+ if(lp->tx_n_in_use > 0)
+ {
+ /* set timer to expire in WATCHDOG_JIFFIES */
+ lp->watchdog.expires = jiffies + WATCHDOG_JIFFIES;
+ add_timer(&lp->watchdog);
+ }
+
+ wv_splx(x);
+
+#ifdef DEBUG_INTERRUPT_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_watchdog()\n", dev->name);
+#endif
+}
+
+/********************* CONFIGURATION CALLBACKS *********************/
+/*
+ * Here are the functions called by the Linux networking code (NET3)
+ * for initialization, configuration and deinstallations of the
+ * WaveLAN ISA hardware.
+ */
+
+/*------------------------------------------------------------------*/
+/*
+ * Configure and start up the WaveLAN PCMCIA adaptor.
+ * Called by NET3 when it "open" the device.
+ */
+static int
+wavelan_open(device * dev)
+{
+ u_long x;
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_open(dev=0x%x)\n", dev->name,
+ (unsigned int) dev);
+#endif
+
+ /* Check irq */
+ if(dev->irq == 0)
+ {
+#ifdef DEBUG_CONFIG_ERRORS
+ printk(KERN_WARNING "%s: wavelan_open(): no IRQ\n", dev->name);
+#endif
+ return -ENXIO;
+ }
+
+ if((irq2dev_map[dev->irq] != (device *) NULL) ||
+ /* This is always true, but avoid the false IRQ. */
+ ((irq2dev_map[dev->irq] = dev) == (device *) NULL) ||
+ (request_irq(dev->irq, &wavelan_interrupt, 0, "WaveLAN", NULL) != 0))
+ {
+ irq2dev_map[dev->irq] = (device *) NULL;
+#ifdef DEBUG_CONFIG_ERRORS
+ printk(KERN_WARNING "%s: wavelan_open(): invalid IRQ\n", dev->name);
+#endif
+ return -EAGAIN;
+ }
+
+ x = wv_splhi();
+ if(wv_hw_reset(dev) != -1)
+ {
+ dev->interrupt = 0;
+ dev->start = 1;
+ }
+ else
+ {
+ free_irq(dev->irq, NULL);
+ irq2dev_map[dev->irq] = (device *) NULL;
+#ifdef DEBUG_CONFIG_ERRORS
+ printk(KERN_INFO "%s: wavelan_open(): impossible to start the card\n",
+ dev->name);
+#endif
+ return -EAGAIN;
+ }
+ wv_splx(x);
+
+ MOD_INC_USE_COUNT;
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_open()\n", dev->name);
+#endif
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Shut down the WaveLAN ISA card.
+ * Called by NET3 when it "closes" the device.
+ */
+static int
+wavelan_close(device * dev)
+{
+ net_local * lp = (net_local *)dev->priv;
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_close(dev=0x%x)\n", dev->name,
+ (unsigned int) dev);
+#endif
+
+ /* Not do the job twice. */
+ if(dev->start == 0)
+ return 0;
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ /* If watchdog was activated, kill it! */
+ if(lp->watchdog.prev != (timer_list *) NULL)
+ del_timer(&lp->watchdog);
+
+ /*
+ * Flush the Tx and disable Rx.
+ */
+ wv_82586_stop(dev);
+
+ free_irq(dev->irq, NULL);
+ irq2dev_map[dev->irq] = (device *) NULL;
+
+ MOD_DEC_USE_COUNT;
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_close()\n", dev->name);
+#endif
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Probe an I/O address, and if the WaveLAN is there configure the
+ * device structure
+ * (called by wavelan_probe() & via init_module())
+ */
+static int
+wavelan_config(device * dev)
+{
+ u_long ioaddr = dev->base_addr;
+ u_char irq_mask;
+ int irq;
+ net_local * lp;
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_config(dev=0x%x, ioaddr=0x%x)\n", dev->name,
+ (unsigned int)dev, ioaddr);
+#endif
+
+ /* Check irq arg on command line */
+ if(dev->irq != 0)
+ {
+ irq_mask = wv_irq_to_psa(dev->irq);
+
+ if(irq_mask == 0)
+ {
+#ifdef DEBUG_CONFIG_ERROR
+ printk(KERN_WARNING "%s: wavelan_config(): invalid irq %d -- ignored.\n",
+ dev->name, dev->irq);
+#endif
+ dev->irq = 0;
+ }
+ else
+ {
+#ifdef DEBUG_CONFIG_INFO
+ printk(KERN_DEBUG "%s: wavelan_config(): changing irq to %d\n",
+ dev->name, dev->irq);
+#endif
+ psa_write(ioaddr, HACR_DEFAULT,
+ psaoff(0, psa_int_req_no), &irq_mask, 1);
+ wv_hacr_reset(ioaddr);
+ }
+ }
+
+ psa_read(ioaddr, HACR_DEFAULT, psaoff(0, psa_int_req_no), &irq_mask, 1);
+ if((irq = wv_psa_to_irq(irq_mask)) == -1)
+ {
+#ifdef DEBUG_CONFIG_ERROR
+ printk(KERN_INFO "%s: wavelan_config(): could not wavelan_map_irq(%d).\n",
+ dev->name, irq_mask);
+#endif
+ return EAGAIN;
+ }
+
+ dev->irq = irq;
+
+ request_region(ioaddr, sizeof(ha_t), "wavelan");
+
+ dev->mem_start = 0x0000;
+ dev->mem_end = 0x0000;
+ dev->if_port = 0;
+
+ /* Initialize device structures */
+ dev->priv = kmalloc(sizeof(net_local), GFP_KERNEL);
+ if(dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0x00, sizeof(net_local));
+ lp = (net_local *)dev->priv;
+
+ /* Back link to the device structure. */
+ lp->dev = dev;
+ /* Add the device at the beginning of the linked list. */
+ lp->next = wavelan_list;
+ wavelan_list = lp;
+
+ lp->hacr = HACR_DEFAULT;
+
+ lp->watchdog.function = wavelan_watchdog;
+ lp->watchdog.data = (unsigned long) dev;
+ lp->promiscuous = 0;
+ lp->mc_count = 0;
+
+ /*
+ * Fill in the fields of the device structure
+ * with Ethernet-generic values.
+ */
+ ether_setup(dev);
+
+ dev->open = wavelan_open;
+ dev->stop = wavelan_close;
+ dev->hard_start_xmit = wavelan_packet_xmit;
+ dev->get_stats = wavelan_get_stats;
+ dev->set_multicast_list = &wavelan_set_multicast_list;
+ dev->set_mac_address = &wavelan_set_mac_address;
+
+#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */
+ dev->do_ioctl = wavelan_ioctl;
+ dev->get_wireless_stats = wavelan_get_wireless_stats;
+#endif
+
+ dev->mtu = WAVELAN_MTU;
+
+ /* Display nice info */
+ wv_init_info(dev);
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_config()\n", dev->name);
+#endif
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Check for a network adaptor of this type. Return '0' iff one
+ * exists. (There seem to be different interpretations of
+ * the initial value of dev->base_addr.
+ * We follow the example in drivers/net/ne.c.)
+ * (called in "Space.c")
+ * As this function is called outside the wavelan module, it should be
+ * declared extern, but it seem to cause troubles...
+ */
+/* extern */ int
+wavelan_probe(device * dev)
+{
+ short base_addr;
+ mac_addr mac; /* MAC address (check WaveLAN existence) */
+ int i;
+ int r;
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "%s: ->wavelan_probe(dev=0x%x (base_addr=0x%x))\n",
+ dev->name, (unsigned int)dev, (unsigned int)dev->base_addr);
+#endif
+
+#ifdef STRUCT_CHECK
+ if (wv_struct_check() != (char *) NULL)
+ {
+ printk(KERN_WARNING "%s: wavelan_probe(): structure/compiler botch: \"%s\"\n",
+ dev->name, wv_struct_check());
+ return ENODEV;
+ }
+#endif /* STRUCT_CHECK */
+
+ /* Check the value of the command line parameter for base address */
+ base_addr = dev->base_addr;
+
+ /* Don't probe at all. */
+ if(base_addr < 0)
+ {
+#ifdef DEBUG_CONFIG_ERRORS
+ printk(KERN_WARNING "%s: wavelan_probe(): invalid base address\n",
+ dev->name);
+#endif
+ return ENXIO;
+ }
+
+ /* Check a single specified location. */
+ if(base_addr > 0x100)
+ {
+ /* Check if the is something at this base address */
+ if((r = wv_check_ioaddr(base_addr, mac)) == 0)
+ {
+ memcpy(dev->dev_addr, mac, 6); /* Copy MAC address */
+ r = wavelan_config(dev);
+ }
+
+#ifdef DEBUG_CONFIG_INFO
+ if(r != 0)
+ printk(KERN_DEBUG "%s: wavelan_probe(): no device at specified base address (0x%X) or address already in use\n",
+ dev->name, base_addr);
+#endif
+
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_probe()\n", dev->name);
+#endif
+ return r;
+ }
+
+ /* Scan all possible addresses of the WaveLAN hardware */
+ for(i = 0; i < NELS(iobase); i++)
+ {
+ /* Check whether there is something at this base address */
+ if(wv_check_ioaddr(iobase[i], mac) == 0)
+ {
+ dev->base_addr = iobase[i]; /* Copy base address. */
+ memcpy(dev->dev_addr, mac, 6); /* Copy MAC address. */
+ if(wavelan_config(dev) == 0)
+ {
+#ifdef DEBUG_CALLBACK_TRACE
+ printk(KERN_DEBUG "%s: <-wavelan_probe()\n", dev->name);
+#endif
+ return 0;
+ }
+ }
+ }
+
+ /* We may have touch base_addr: another driver may not like it. */
+ dev->base_addr = base_addr;
+
+#ifdef DEBUG_CONFIG_INFO
+ printk(KERN_DEBUG "%s: wavelan_probe(): no device found\n",
+ dev->name);
+#endif
+
+ return ENODEV;
+}
+
+/****************************** MODULE ******************************/
+/*
+ * Module entry point: insertion & removal
+ */
+
+#ifdef MODULE
+/*------------------------------------------------------------------*/
+/*
+ * Insertion of the module.
+ * I'm now quite proud of the multi-device support.
+ */
+int
+init_module(void)
+{
+ mac_addr mac; /* MAC address (check WaveLAN existence) */
+ int ret = 0;
+ int i;
+
+#ifdef DEBUG_MODULE_TRACE
+ printk(KERN_DEBUG "-> init_module()\n");
+#endif
+
+ /* If probing is asked */
+ if(io[0] == 0)
+ {
+#ifdef DEBUG_CONFIG_ERRORS
+ printk(KERN_WARNING "WaveLAN init_module(): doing device probing (bad !)\n");
+ printk(KERN_WARNING "Specify base addresses while loading module to correct the problem\n");
+#endif
+
+ /* Copy the basic set of address to be probed. */
+ for(i = 0; i < NELS(iobase); i++)
+ io[i] = iobase[i];
+ }
+
+
+ /* Loop on all possible base addresses */
+ i = -1;
+ while((io[++i] != 0) && (i < NELS(io)))
+ {
+ /* Check if there is something at this base address. */
+ if(wv_check_ioaddr(io[i], mac) == 0)
+ {
+ device * dev;
+
+ /* Create device and set basics args */
+ dev = kmalloc(sizeof(struct device), GFP_KERNEL);
+ memset(dev, 0x00, sizeof(struct device));
+ dev->name = name[i];
+ dev->base_addr = io[i];
+ dev->irq = irq[i];
+ dev->init = &wavelan_config;
+ memcpy(dev->dev_addr, mac, 6); /* Copy MAC address */
+
+ /* Try to create the device */
+ if(register_netdev(dev) != 0)
+ {
+ /* DeAllocate everything */
+ /* Note : if dev->priv is mallocated, there is no way to fail */
+ kfree_s(dev, sizeof(struct device));
+ ret = -EIO;
+ }
+ } /* if there is something at the address */
+ } /* Loop on all addresses. */
+
+#ifdef DEBUG_CONFIG_ERRORS
+ if(wavelan_list == (net_local *) NULL)
+ printk(KERN_WARNING "WaveLAN init_module(): no device found\n");
+#endif
+
+#ifdef DEBUG_MODULE_TRACE
+ printk(KERN_DEBUG "<- init_module()\n");
+#endif
+ return ret;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Removal of the module
+ */
+void
+cleanup_module(void)
+{
+#ifdef DEBUG_MODULE_TRACE
+ printk(KERN_DEBUG "-> cleanup_module()\n");
+#endif
+
+ /* Loop on all devices and release them. */
+ while(wavelan_list != (net_local *) NULL)
+ {
+ device * dev = wavelan_list->dev;
+
+#ifdef DEBUG_CONFIG_INFO
+ printk(KERN_DEBUG "%s: cleanup_module(): removing device at 0x%x\n",
+ dev->name, (unsigned int) dev);
+#endif
+
+ /* Release the ioport-region. */
+ release_region(dev->base_addr, sizeof(ha_t));
+
+ /* Definitely remove the device. */
+ unregister_netdev(dev);
+
+ /* Unlink the device. */
+ wavelan_list = wavelan_list->next;
+
+ /* Free pieces. */
+ kfree_s(dev->priv, sizeof(struct net_local));
+ kfree_s(dev, sizeof(struct device));
+ }
+
+#ifdef DEBUG_MODULE_TRACE
+ printk(KERN_DEBUG "<- cleanup_module()\n");
+#endif
+}
+#endif /* MODULE */
+
+/*
+ * This software may only be used and distributed
+ * according to the terms of the GNU Public License.
+ *
+ * This software was developed as a component of the
+ * Linux operating system.
+ * It is based on other device drivers and information
+ * either written or supplied by:
+ * Ajay Bakre (bakre@paul.rutgers.edu),
+ * Donald Becker (becker@cesdis.gsfc.nasa.gov),
+ * Loeke Brederveld (Loeke.Brederveld@Utrecht.NCR.com),
+ * Anders Klemets (klemets@it.kth.se),
+ * Vladimir V. Kolpakov (w@stier.koenig.ru),
+ * Marc Meertens (Marc.Meertens@Utrecht.NCR.com),
+ * Pauline Middelink (middelin@polyware.iaf.nl),
+ * Robert Morris (rtm@das.harvard.edu),
+ * Jean Tourrilhes (jt@hplb.hpl.hp.com),
+ * Girish Welling (welling@paul.rutgers.edu),
+ *
+ * Thanks go also to:
+ * James Ashton (jaa101@syseng.anu.edu.au),
+ * Alan Cox (iialan@iiit.swan.ac.uk),
+ * Allan Creighton (allanc@cs.usyd.edu.au),
+ * Matthew Geier (matthew@cs.usyd.edu.au),
+ * Remo di Giovanni (remo@cs.usyd.edu.au),
+ * Eckhard Grah (grah@wrcs1.urz.uni-wuppertal.de),
+ * Vipul Gupta (vgupta@cs.binghamton.edu),
+ * Mark Hagan (mhagan@wtcpost.daytonoh.NCR.COM),
+ * Tim Nicholson (tim@cs.usyd.edu.au),
+ * Ian Parkin (ian@cs.usyd.edu.au),
+ * John Rosenberg (johnr@cs.usyd.edu.au),
+ * George Rossi (george@phm.gov.au),
+ * Arthur Scott (arthur@cs.usyd.edu.au),
+ * Peter Storey,
+ * for their assistance and advice.
+ *
+ * Please send bug reports, updates, comments to:
+ *
+ * Bruce Janson Email: bruce@cs.usyd.edu.au
+ * Basser Department of Computer Science Phone: +61-2-9351-3423
+ * University of Sydney, N.S.W., 2006, AUSTRALIA Fax: +61-2-9351-3838
+ */
diff --git a/linux/src/drivers/net/wavelan.h b/linux/src/drivers/net/wavelan.h
new file mode 100644
index 0000000..2e92c79
--- /dev/null
+++ b/linux/src/drivers/net/wavelan.h
@@ -0,0 +1,346 @@
+/*
+ * Wavelan ISA driver
+ *
+ * Jean II - HPLB '96
+ *
+ * Reorganisation and extension of the driver.
+ * Original copyrigth follow. See wavelan.p.h for details.
+ *
+ * This file contain the declarations of the Wavelan hardware. Note that
+ * the Wavelan ISA include a i82586 controler (see definitions in
+ * file i82586.h).
+ *
+ * The main difference between the ISA hardware and the pcmcia one is
+ * the Ethernet Controler (i82586 instead of i82593).
+ * The i82586 allow multiple transmit buffers. The PSA need to be accessed
+ * through the host interface.
+ */
+
+#ifndef _WAVELAN_H
+#define _WAVELAN_H
+
+/* The detection of the wavelan card is made by reading the MAC
+ * address from the card and checking it. If you have a non AT&T
+ * product (OEM, like DEC RoamAbout, or Digital Ocean, Epson, ...),
+ * you might need to modify this part to accomodate your hardware...
+ */
+const char MAC_ADDRESSES[][3] =
+{
+ { 0x08, 0x00, 0x0E }, /* AT&T Wavelan (standard) & DEC RoamAbout */
+ { 0x08, 0x00, 0x6A }, /* AT&T Wavelan (alternate) */
+ /* Add your card here and send me the patch ! */
+};
+
+#define WAVELAN_ADDR_SIZE 6 /* Size of a MAC address */
+
+#define WAVELAN_MTU 1500 /* Maximum size of WaveLAN packet */
+
+#define MAXDATAZ (WAVELAN_ADDR_SIZE + WAVELAN_ADDR_SIZE + 2 + WAVELAN_MTU)
+
+/*************************** PC INTERFACE ****************************/
+
+/*
+ * Host Adaptor structure.
+ * (base is board port address).
+ */
+typedef union hacs_u hacs_u;
+union hacs_u
+{
+ unsigned short hu_command; /* Command register */
+#define HACR_RESET 0x0001 /* Reset board */
+#define HACR_CA 0x0002 /* Set Channel Attention for 82586 */
+#define HACR_16BITS 0x0004 /* 16 bits operation (0 => 8bits) */
+#define HACR_OUT0 0x0008 /* General purpose output pin 0 */
+ /* not used - must be 1 */
+#define HACR_OUT1 0x0010 /* General purpose output pin 1 */
+ /* not used - must be 1 */
+#define HACR_82586_INT_ENABLE 0x0020 /* Enable 82586 interrupts */
+#define HACR_MMC_INT_ENABLE 0x0040 /* Enable MMC interrupts */
+#define HACR_INTR_CLR_ENABLE 0x0080 /* Enable interrupt status read/clear */
+ unsigned short hu_status; /* Status Register */
+#define HASR_82586_INTR 0x0001 /* Interrupt request from 82586 */
+#define HASR_MMC_INTR 0x0002 /* Interrupt request from MMC */
+#define HASR_MMC_BUSY 0x0004 /* MMC busy indication */
+#define HASR_PSA_BUSY 0x0008 /* LAN parameter storage area busy */
+};
+
+typedef struct ha_t ha_t;
+struct ha_t
+{
+ hacs_u ha_cs; /* Command and status registers */
+#define ha_command ha_cs.hu_command
+#define ha_status ha_cs.hu_status
+ unsigned short ha_mmcr; /* Modem Management Ctrl Register */
+ unsigned short ha_pior0; /* Program I/O Address Register Port 0 */
+ unsigned short ha_piop0; /* Program I/O Port 0 */
+ unsigned short ha_pior1; /* Program I/O Address Register Port 1 */
+ unsigned short ha_piop1; /* Program I/O Port 1 */
+ unsigned short ha_pior2; /* Program I/O Address Register Port 2 */
+ unsigned short ha_piop2; /* Program I/O Port 2 */
+};
+
+#define HA_SIZE 16
+
+#define hoff(p,f) (unsigned short)((void *)(&((ha_t *)((void *)0 + (p)))->f) - (void *)0)
+#define HACR(p) hoff(p, ha_command)
+#define HASR(p) hoff(p, ha_status)
+#define MMCR(p) hoff(p, ha_mmcr)
+#define PIOR0(p) hoff(p, ha_pior0)
+#define PIOP0(p) hoff(p, ha_piop0)
+#define PIOR1(p) hoff(p, ha_pior1)
+#define PIOP1(p) hoff(p, ha_piop1)
+#define PIOR2(p) hoff(p, ha_pior2)
+#define PIOP2(p) hoff(p, ha_piop2)
+
+/*
+ * Program I/O Mode Register values.
+ */
+#define STATIC_PIO 0 /* Mode 1: static mode */
+ /* RAM access ??? */
+#define AUTOINCR_PIO 1 /* Mode 2: auto increment mode */
+ /* RAM access ??? */
+#define AUTODECR_PIO 2 /* Mode 3: auto decrement mode */
+ /* RAM access ??? */
+#define PARAM_ACCESS_PIO 3 /* Mode 4: LAN parameter access mode */
+ /* Parameter access. */
+#define PIO_MASK 3 /* register mask */
+#define PIOM(cmd,piono) ((u_short)cmd << 10 << (piono * 2))
+
+#define HACR_DEFAULT (HACR_OUT0 | HACR_OUT1 | HACR_16BITS | PIOM(STATIC_PIO, 0) | PIOM(AUTOINCR_PIO, 1) | PIOM(PARAM_ACCESS_PIO, 2))
+#define HACR_INTRON (HACR_82586_INT_ENABLE | HACR_MMC_INT_ENABLE | HACR_INTR_CLR_ENABLE)
+
+/************************** MEMORY LAYOUT **************************/
+
+/*
+ * Onboard 64k RAM layout.
+ * (Offsets from 0x0000.)
+ */
+#define OFFSET_RU 0x0000 /* 75 % memory */
+#define OFFSET_CU 0xC000 /* 25 % memory */
+#define OFFSET_SCB (OFFSET_ISCP - sizeof(scb_t))
+#define OFFSET_ISCP (OFFSET_SCP - sizeof(iscp_t))
+#define OFFSET_SCP I82586_SCP_ADDR
+
+#define RXBLOCKZ (sizeof(fd_t) + sizeof(rbd_t) + MAXDATAZ)
+#define TXBLOCKZ (sizeof(ac_tx_t) + sizeof(ac_nop_t) + sizeof(tbd_t) + MAXDATAZ)
+
+#define NRXBLOCKS ((OFFSET_CU - OFFSET_RU) / RXBLOCKZ)
+#define NTXBLOCKS ((OFFSET_SCB - OFFSET_CU) / TXBLOCKZ)
+
+/********************** PARAMETER STORAGE AREA **********************/
+
+/*
+ * Parameter Storage Area (PSA).
+ */
+typedef struct psa_t psa_t;
+struct psa_t
+{
+ unsigned char psa_io_base_addr_1; /* [0x00] Base address 1 ??? */
+ unsigned char psa_io_base_addr_2; /* [0x01] Base address 2 */
+ unsigned char psa_io_base_addr_3; /* [0x02] Base address 3 */
+ unsigned char psa_io_base_addr_4; /* [0x03] Base address 4 */
+ unsigned char psa_rem_boot_addr_1; /* [0x04] Remote Boot Address 1 */
+ unsigned char psa_rem_boot_addr_2; /* [0x05] Remote Boot Address 2 */
+ unsigned char psa_rem_boot_addr_3; /* [0x06] Remote Boot Address 3 */
+ unsigned char psa_holi_params; /* [0x07] HOst Lan Interface (HOLI) Parameters */
+ unsigned char psa_int_req_no; /* [0x08] Interrupt Request Line */
+ unsigned char psa_unused0[7]; /* [0x09-0x0F] unused */
+
+ unsigned char psa_univ_mac_addr[WAVELAN_ADDR_SIZE]; /* [0x10-0x15] Universal (factory) MAC Address */
+ unsigned char psa_local_mac_addr[WAVELAN_ADDR_SIZE]; /* [0x16-1B] Local MAC Address */
+ unsigned char psa_univ_local_sel; /* [0x1C] Universal Local Selection */
+#define PSA_UNIVERSAL 0 /* Universal (factory) */
+#define PSA_LOCAL 1 /* Local */
+ unsigned char psa_comp_number; /* [0x1D] Compatability Number: */
+#define PSA_COMP_PC_AT_915 0 /* PC-AT 915 MHz */
+#define PSA_COMP_PC_MC_915 1 /* PC-MC 915 MHz */
+#define PSA_COMP_PC_AT_2400 2 /* PC-AT 2.4 GHz */
+#define PSA_COMP_PC_MC_2400 3 /* PC-MC 2.4 GHz */
+#define PSA_COMP_PCMCIA_915 4 /* PCMCIA 915 MHz or 2.0 */
+ unsigned char psa_thr_pre_set; /* [0x1E] Modem Threshold Preset */
+ unsigned char psa_feature_select; /* [0x1F] Call code required (1=on) */
+#define PSA_FEATURE_CALL_CODE 0x01 /* Call code required (Japan) */
+ unsigned char psa_subband; /* [0x20] Subband */
+#define PSA_SUBBAND_915 0 /* 915 MHz or 2.0 */
+#define PSA_SUBBAND_2425 1 /* 2425 MHz */
+#define PSA_SUBBAND_2460 2 /* 2460 MHz */
+#define PSA_SUBBAND_2484 3 /* 2484 MHz */
+#define PSA_SUBBAND_2430_5 4 /* 2430.5 MHz */
+ unsigned char psa_quality_thr; /* [0x21] Modem Quality Threshold */
+ unsigned char psa_mod_delay; /* [0x22] Modem Delay ??? (reserved) */
+ unsigned char psa_nwid[2]; /* [0x23-0x24] Network ID */
+ unsigned char psa_nwid_select; /* [0x25] Network ID Select On Off */
+ unsigned char psa_encryption_select; /* [0x26] Encryption On Off */
+ unsigned char psa_encryption_key[8]; /* [0x27-0x2E] Encryption Key */
+ unsigned char psa_databus_width; /* [0x2F] AT bus width select 8/16 */
+ unsigned char psa_call_code[8]; /* [0x30-0x37] (Japan) Call Code */
+ unsigned char psa_nwid_prefix[2]; /* [0x38-0x39] Roaming domain */
+ unsigned char psa_reserved[2]; /* [0x3A-0x3B] Reserved - fixed 00 */
+ unsigned char psa_conf_status; /* [0x3C] Conf Status, bit 0=1:config*/
+ unsigned char psa_crc[2]; /* [0x3D] CRC-16 over PSA */
+ unsigned char psa_crc_status; /* [0x3F] CRC Valid Flag */
+};
+
+#define PSA_SIZE 64
+
+/* Calculate offset of a field in the above structure
+ * Warning : only even addresses are used */
+#define psaoff(p,f) ((unsigned short) ((void *)(&((psa_t *) ((void *) NULL + (p)))->f) - (void *) NULL))
+
+/******************** MODEM MANAGEMENT INTERFACE ********************/
+
+/*
+ * Modem Management Controller (MMC) write structure.
+ */
+typedef struct mmw_t mmw_t;
+struct mmw_t
+{
+ unsigned char mmw_encr_key[8]; /* encryption key */
+ unsigned char mmw_encr_enable; /* enable/disable encryption */
+#define MMW_ENCR_ENABLE_MODE 0x02 /* Mode of security option */
+#define MMW_ENCR_ENABLE_EN 0x01 /* Enable security option */
+ unsigned char mmw_unused0[1]; /* unused */
+ unsigned char mmw_des_io_invert; /* Encryption option */
+#define MMW_DES_IO_INVERT_RES 0x0F /* Reserved */
+#define MMW_DES_IO_INVERT_CTRL 0xF0 /* Control ??? (set to 0) */
+ unsigned char mmw_unused1[5]; /* unused */
+ unsigned char mmw_loopt_sel; /* looptest selection */
+#define MMW_LOOPT_SEL_DIS_NWID 0x40 /* disable NWID filtering */
+#define MMW_LOOPT_SEL_INT 0x20 /* activate Attention Request */
+#define MMW_LOOPT_SEL_LS 0x10 /* looptest w/o collision avoidance */
+#define MMW_LOOPT_SEL_LT3A 0x08 /* looptest 3a */
+#define MMW_LOOPT_SEL_LT3B 0x04 /* looptest 3b */
+#define MMW_LOOPT_SEL_LT3C 0x02 /* looptest 3c */
+#define MMW_LOOPT_SEL_LT3D 0x01 /* looptest 3d */
+ unsigned char mmw_jabber_enable; /* jabber timer enable */
+ /* Abort transmissions > 200 ms */
+ unsigned char mmw_freeze; /* freeze / unfreeeze signal level */
+ /* 0 : signal level & qual updated for every new message, 1 : frozen */
+ unsigned char mmw_anten_sel; /* antenna selection */
+#define MMW_ANTEN_SEL_SEL 0x01 /* direct antenna selection */
+#define MMW_ANTEN_SEL_ALG_EN 0x02 /* antenna selection algo. enable */
+ unsigned char mmw_ifs; /* inter frame spacing */
+ /* min time between transmission in bit periods (.5 us) - bit 0 ignored */
+ unsigned char mmw_mod_delay; /* modem delay (synchro) */
+ unsigned char mmw_jam_time; /* jamming time (after collision) */
+ unsigned char mmw_unused2[1]; /* unused */
+ unsigned char mmw_thr_pre_set; /* level threshold preset */
+ /* Discard all packet with signal < this value (4) */
+ unsigned char mmw_decay_prm; /* decay parameters */
+ unsigned char mmw_decay_updat_prm; /* decay update parameterz */
+ unsigned char mmw_quality_thr; /* quality (z-quotient) threshold */
+ /* Discard all packet with quality < this value (3) */
+ unsigned char mmw_netw_id_l; /* NWID low order byte */
+ unsigned char mmw_netw_id_h; /* NWID high order byte */
+ /* Network ID or Domain : create virtual net on the air */
+
+ /* 2.0 Hardware extension - frequency selection support */
+ unsigned char mmw_mode_select; /* for analog tests (set to 0) */
+ unsigned char mmw_unused3[1]; /* unused */
+ unsigned char mmw_fee_ctrl; /* frequency eeprom control */
+#define MMW_FEE_CTRL_PRE 0x10 /* Enable protected instructions */
+#define MMW_FEE_CTRL_DWLD 0x08 /* Download eeprom to mmc */
+#define MMW_FEE_CTRL_CMD 0x07 /* EEprom commands : */
+#define MMW_FEE_CTRL_READ 0x06 /* Read */
+#define MMW_FEE_CTRL_WREN 0x04 /* Write enable */
+#define MMW_FEE_CTRL_WRITE 0x05 /* Write data to address */
+#define MMW_FEE_CTRL_WRALL 0x04 /* Write data to all addresses */
+#define MMW_FEE_CTRL_WDS 0x04 /* Write disable */
+#define MMW_FEE_CTRL_PRREAD 0x16 /* Read addr from protect register */
+#define MMW_FEE_CTRL_PREN 0x14 /* Protect register enable */
+#define MMW_FEE_CTRL_PRCLEAR 0x17 /* Unprotect all registers */
+#define MMW_FEE_CTRL_PRWRITE 0x15 /* Write addr in protect register */
+#define MMW_FEE_CTRL_PRDS 0x14 /* Protect register disable */
+ /* Never issue this command (PRDS) : it's irreversible !!! */
+
+ unsigned char mmw_fee_addr; /* EEprom address */
+#define MMW_FEE_ADDR_CHANNEL 0xF0 /* Select the channel */
+#define MMW_FEE_ADDR_OFFSET 0x0F /* Offset in channel data */
+#define MMW_FEE_ADDR_EN 0xC0 /* FEE_CTRL enable operations */
+#define MMW_FEE_ADDR_DS 0x00 /* FEE_CTRL disable operations */
+#define MMW_FEE_ADDR_ALL 0x40 /* FEE_CTRL all operations */
+#define MMW_FEE_ADDR_CLEAR 0xFF /* FEE_CTRL clear operations */
+
+ unsigned char mmw_fee_data_l; /* Write data to EEprom */
+ unsigned char mmw_fee_data_h; /* high octet */
+ unsigned char mmw_ext_ant; /* Setting for external antenna */
+#define MMW_EXT_ANT_EXTANT 0x01 /* Select external antenna */
+#define MMW_EXT_ANT_POL 0x02 /* Polarity of the antenna */
+#define MMW_EXT_ANT_INTERNAL 0x00 /* Internal antenna */
+#define MMW_EXT_ANT_EXTERNAL 0x03 /* External antenna */
+#define MMW_EXT_ANT_IQ_TEST 0x1C /* IQ test pattern (set to 0) */
+};
+
+#define MMW_SIZE 37
+
+#define mmwoff(p,f) (unsigned short)((void *)(&((mmw_t *)((void *)0 + (p)))->f) - (void *)0)
+
+/*
+ * Modem Management Controller (MMC) read structure.
+ */
+typedef struct mmr_t mmr_t;
+struct mmr_t
+{
+ unsigned char mmr_unused0[8]; /* unused */
+ unsigned char mmr_des_status; /* encryption status */
+ unsigned char mmr_des_avail; /* encryption available (0x55 read) */
+#define MMR_DES_AVAIL_DES 0x55 /* DES available */
+#define MMR_DES_AVAIL_AES 0x33 /* AES (AT&T) available */
+ unsigned char mmr_des_io_invert; /* des I/O invert register */
+ unsigned char mmr_unused1[5]; /* unused */
+ unsigned char mmr_dce_status; /* DCE status */
+#define MMR_DCE_STATUS_RX_BUSY 0x01 /* receiver busy */
+#define MMR_DCE_STATUS_LOOPT_IND 0x02 /* loop test indicated */
+#define MMR_DCE_STATUS_TX_BUSY 0x04 /* transmitter on */
+#define MMR_DCE_STATUS_JBR_EXPIRED 0x08 /* jabber timer expired */
+ unsigned char mmr_dsp_id; /* DSP id (AA = Daedalus rev A) */
+ unsigned char mmr_unused2[2]; /* unused */
+ unsigned char mmr_correct_nwid_l; /* # of correct NWID's rxd (low) */
+ unsigned char mmr_correct_nwid_h; /* # of correct NWID's rxd (high) */
+ /* Warning : Read high order octet first !!! */
+ unsigned char mmr_wrong_nwid_l; /* # of wrong NWID's rxd (low) */
+ unsigned char mmr_wrong_nwid_h; /* # of wrong NWID's rxd (high) */
+ unsigned char mmr_thr_pre_set; /* level threshold preset */
+#define MMR_THR_PRE_SET 0x3F /* level threshold preset */
+#define MMR_THR_PRE_SET_CUR 0x80 /* Current signal above it */
+ unsigned char mmr_signal_lvl; /* signal level */
+#define MMR_SIGNAL_LVL 0x3F /* signal level */
+#define MMR_SIGNAL_LVL_VALID 0x80 /* Updated since last read */
+ unsigned char mmr_silence_lvl; /* silence level (noise) */
+#define MMR_SILENCE_LVL 0x3F /* silence level */
+#define MMR_SILENCE_LVL_VALID 0x80 /* Updated since last read */
+ unsigned char mmr_sgnl_qual; /* signal quality */
+#define MMR_SGNL_QUAL 0x0F /* signal quality */
+#define MMR_SGNL_QUAL_ANT 0x80 /* current antenna used */
+ unsigned char mmr_netw_id_l; /* NWID low order byte ??? */
+ unsigned char mmr_unused3[3]; /* unused */
+
+ /* 2.0 Hardware extension - frequency selection support */
+ unsigned char mmr_fee_status; /* Status of frequency eeprom */
+#define MMR_FEE_STATUS_ID 0xF0 /* Modem revision id */
+#define MMR_FEE_STATUS_DWLD 0x08 /* Download in progress */
+#define MMR_FEE_STATUS_BUSY 0x04 /* EEprom busy */
+ unsigned char mmr_unused4[1]; /* unused */
+ unsigned char mmr_fee_data_l; /* Read data from eeprom (low) */
+ unsigned char mmr_fee_data_h; /* Read data from eeprom (high) */
+};
+
+#define MMR_SIZE 36
+
+#define mmroff(p,f) (unsigned short)((void *)(&((mmr_t *)((void *)0 + (p)))->f) - (void *)0)
+
+/* Make the two above structures one */
+typedef union mm_t
+{
+ struct mmw_t w; /* Write to the mmc */
+ struct mmr_t r; /* Read from the mmc */
+} mm_t;
+
+#endif /* _WAVELAN_H */
+
+/*
+ * This software may only be used and distributed
+ * according to the terms of the GNU Public License.
+ *
+ * For more details, see wavelan.c.
+ */
diff --git a/linux/src/drivers/net/wavelan.p.h b/linux/src/drivers/net/wavelan.p.h
new file mode 100644
index 0000000..3a6124e
--- /dev/null
+++ b/linux/src/drivers/net/wavelan.p.h
@@ -0,0 +1,635 @@
+/*
+ * Wavelan ISA driver
+ *
+ * Jean II - HPLB '96
+ *
+ * Reorganisation and extension of the driver.
+ *
+ * This file contain all definition and declarations necessary for the
+ * wavelan isa driver. This file is a private header, so it should
+ * be included only on wavelan.c !!!
+ */
+
+#ifndef WAVELAN_P_H
+#define WAVELAN_P_H
+
+/************************** DOCUMENTATION **************************/
+/*
+ * This driver provide a Linux interface to the Wavelan ISA hardware
+ * The Wavelan is a product of Lucent ("http://wavelan.netland.nl/").
+ * This division was formerly part of NCR and then AT&T.
+ * Wavelan are also distributed by DEC (RoamAbout), Digital Ocean and
+ * Aironet (Arlan). If you have one of those product, you will need to
+ * make some changes below...
+ *
+ * This driver is still a beta software. A lot of bugs have been corrected,
+ * a lot of functionalities are implemented, the whole appear pretty stable,
+ * but there is still some area of improvement (encryption, performance...).
+ *
+ * To know how to use this driver, read the NET3 HOWTO.
+ * If you want to exploit the many other fonctionalities, look comments
+ * in the code...
+ *
+ * This driver is the result of the effort of many peoples (see below).
+ */
+
+/* ------------------------ SPECIFIC NOTES ------------------------ */
+/*
+ * wavelan.o is darn too big
+ * -------------------------
+ * That's true ! There is a very simple way to reduce the driver
+ * object by 33% (yes !). Comment out the following line :
+ * #include <linux/wireless.h>
+ *
+ * MAC address and hardware detection :
+ * ----------------------------------
+ * The detection code of the wavelan chech that the first 3
+ * octets of the MAC address fit the company code. This type of
+ * detection work well for AT&T cards (because the AT&T code is
+ * hardcoded in wavelan.h), but of course will fail for other
+ * manufacturer.
+ *
+ * If you are sure that your card is derived from the wavelan,
+ * here is the way to configure it :
+ * 1) Get your MAC address
+ * a) With your card utilities (wfreqsel, instconf, ...)
+ * b) With the driver :
+ * o compile the kernel with DEBUG_CONFIG_INFO enabled
+ * o Boot and look the card messages
+ * 2) Set your MAC code (3 octets) in MAC_ADDRESSES[][3] (wavelan.h)
+ * 3) Compile & verify
+ * 4) Send me the MAC code - I will include it in the next version...
+ *
+ * "CU Inactive" message at boot up :
+ * -----------------------------------
+ * It seem that there is some weird timings problems with the
+ * Intel microcontroler. In fact, this message is triggered by a
+ * bad reading of the on board ram the first time we read the
+ * control block. If you ignore this message, all is ok (but in
+ * fact, currently, it reset the wavelan hardware).
+ *
+ * To get rid of that problem, there is two solution. The first
+ * is to add a dummy read of the scb at the end of
+ * wv_82586_config. The second is to add the timers
+ * wv_synchronous_cmd and wv_ack (the udelay just after the
+ * waiting loops - seem that the controler is not totally ready
+ * when it say it is !).
+ *
+ * In the current code, I use the second solution (to be
+ * consistent with the original solution of Bruce Janson).
+ */
+
+/* --------------------- WIRELESS EXTENSIONS --------------------- */
+/*
+ * This driver is the first one to support "wireless extensions".
+ * This set of extensions provide you some way to control the wireless
+ * caracteristics of the hardware in a standard way and support for
+ * applications for taking advantage of it (like Mobile IP).
+ *
+ * You will need to enable the CONFIG_NET_RADIO define in the kernel
+ * configuration to enable the wireless extensions (this is the one
+ * giving access to the radio network device choice).
+ *
+ * It might also be a good idea as well to fetch the wireless tools to
+ * configure the device and play a bit.
+ */
+
+/* ---------------------------- FILES ---------------------------- */
+/*
+ * wavelan.c : The actual code for the driver - C functions
+ *
+ * wavelan.p.h : Private header : local types / vars for the driver
+ *
+ * wavelan.h : Description of the hardware interface & structs
+ *
+ * i82586.h : Description if the Ethernet controler
+ */
+
+/* --------------------------- HISTORY --------------------------- */
+/*
+ * (Made with information in drivers headers. It may not be accurate,
+ * and I garantee nothing except my best effort...)
+ *
+ * The history of the Wavelan drivers is as complicated as history of
+ * the Wavelan itself (NCR -> AT&T -> Lucent).
+ *
+ * All started with Anders Klemets <klemets@paul.rutgers.edu>,
+ * writting a Wavelan ISA driver for the MACH microkernel. Girish
+ * Welling <welling@paul.rutgers.edu> had also worked on it.
+ * Keith Moore modify this for the Pcmcia hardware.
+ *
+ * Robert Morris <rtm@das.harvard.edu> port these two drivers to BSDI
+ * and add specific Pcmcia support (there is currently no equivalent
+ * of the PCMCIA package under BSD...).
+ *
+ * Jim Binkley <jrb@cs.pdx.edu> port both BSDI drivers to freeBSD.
+ *
+ * Bruce Janson <bruce@cs.usyd.edu.au> port the BSDI ISA driver to Linux.
+ *
+ * Anthony D. Joseph <adj@lcs.mit.edu> started modify Bruce driver
+ * (with help of the BSDI PCMCIA driver) for PCMCIA.
+ * Yunzhou Li <yunzhou@strat.iol.unh.edu> finished is work.
+ * Joe Finney <joe@comp.lancs.ac.uk> patched the driver to start
+ * correctly 2.00 cards (2.4 GHz with frequency selection).
+ * David Hinds <dhinds@hyper.stanford.edu> integrated the whole in his
+ * Pcmcia package (+ bug corrections).
+ *
+ * I (Jean Tourrilhes - jt@hplb.hpl.hp.com) then started to make some
+ * patchs to the Pcmcia driver. After, I added code in the ISA driver
+ * for Wireless Extensions and full support of frequency selection
+ * cards. Then, I've done the same to the Pcmcia driver + some
+ * reorganisation. Finally, I came back to the ISA driver to
+ * upgrade it at the same level as the Pcmcia one and reorganise
+ * the code
+ * Loeke Brederveld <lbrederv@wavelan.com> from Lucent has given me
+ * much needed informations on the Wavelan hardware.
+ */
+
+/* The original copyrights and litteratures mention others names and
+ * credits. I don't know what there part in this development was...
+ */
+
+/* By the way : for the copyright & legal stuff :
+ * Almost everybody wrote code under GNU or BSD license (or alike),
+ * and want that their original copyright remain somewhere in the
+ * code (for myself, I go with the GPL).
+ * Nobody want to take responsibility for anything, except the fame...
+ */
+
+/* --------------------------- CREDITS --------------------------- */
+/*
+ * This software was developed as a component of the
+ * Linux operating system.
+ * It is based on other device drivers and information
+ * either written or supplied by:
+ * Ajay Bakre (bakre@paul.rutgers.edu),
+ * Donald Becker (becker@cesdis.gsfc.nasa.gov),
+ * Loeke Brederveld (Loeke.Brederveld@Utrecht.NCR.com),
+ * Brent Elphick <belphick@uwaterloo.ca>,
+ * Anders Klemets (klemets@it.kth.se),
+ * Vladimir V. Kolpakov (w@stier.koenig.ru),
+ * Marc Meertens (Marc.Meertens@Utrecht.NCR.com),
+ * Pauline Middelink (middelin@polyware.iaf.nl),
+ * Robert Morris (rtm@das.harvard.edu),
+ * Jean Tourrilhes (jt@hplb.hpl.hp.com),
+ * Girish Welling (welling@paul.rutgers.edu),
+ * Clark Woodworth <clark@hiway1.exit109.com>
+ * Yongguang Zhang <ygz@isl.hrl.hac.com>...
+ *
+ * Thanks go also to:
+ * James Ashton (jaa101@syseng.anu.edu.au),
+ * Alan Cox (iialan@iiit.swan.ac.uk),
+ * Allan Creighton (allanc@cs.usyd.edu.au),
+ * Matthew Geier (matthew@cs.usyd.edu.au),
+ * Remo di Giovanni (remo@cs.usyd.edu.au),
+ * Eckhard Grah (grah@wrcs1.urz.uni-wuppertal.de),
+ * Vipul Gupta (vgupta@cs.binghamton.edu),
+ * Mark Hagan (mhagan@wtcpost.daytonoh.NCR.COM),
+ * Tim Nicholson (tim@cs.usyd.edu.au),
+ * Ian Parkin (ian@cs.usyd.edu.au),
+ * John Rosenberg (johnr@cs.usyd.edu.au),
+ * George Rossi (george@phm.gov.au),
+ * Arthur Scott (arthur@cs.usyd.edu.au),
+ * Stanislav Sinyagin <stas@isf.ru>
+ * Peter Storey,
+ * for their assistance and advice.
+ *
+ * Additional Credits:
+ *
+ * My developpement has been done under Linux 2.0.x (Debian 1.1) with
+ * an HP Vectra XP/60.
+ *
+ */
+
+/* ------------------------- IMPROVEMENTS ------------------------- */
+/*
+ * I proudly present :
+ *
+ * Changes mades in first pre-release :
+ * ----------------------------------
+ * - Reorganisation of the code, function name change
+ * - Creation of private header (wavelan.p.h)
+ * - Reorganised debug messages
+ * - More comments, history, ...
+ * - mmc_init : configure the PSA if not done
+ * - mmc_init : correct default value of level threshold for pcmcia
+ * - mmc_init : 2.00 detection better code for 2.00 init
+ * - better info at startup
+ * - irq setting (note : this setting is permanent...)
+ * - Watchdog : change strategy (+ solve module removal problems)
+ * - add wireless extensions (ioctl & get_wireless_stats)
+ * get/set nwid/frequency on fly, info for /proc/net/wireless
+ * - More wireless extension : SETSPY and GETSPY
+ * - Make wireless extensions optional
+ * - Private ioctl to set/get quality & level threshold, histogram
+ * - Remove /proc/net/wavelan
+ * - Supress useless stuff from lp (net_local)
+ * - kernel 2.1 support (copy_to/from_user instead of memcpy_to/fromfs)
+ * - Add message level (debug stuff in /var/adm/debug & errors not
+ * displayed at console and still in /var/adm/messages)
+ * - multi device support
+ * - Start fixing the probe (init code)
+ * - More inlines
+ * - man page
+ * - Lot of others minor details & cleanups
+ *
+ * Changes made in second pre-release :
+ * ----------------------------------
+ * - Cleanup init code (probe & module init)
+ * - Better multi device support (module)
+ * - name assignement (module)
+ *
+ * Changes made in third pre-release :
+ * ---------------------------------
+ * - Be more conservative on timers
+ * - Preliminary support for multicast (I still lack some details...)
+ *
+ * Changes made in fourth pre-release :
+ * ----------------------------------
+ * - multicast (revisited and finished)
+ * - Avoid reset in set_multicast_list (a really big hack)
+ * if somebody could apply this code for other i82586 based driver...
+ * - Share on board memory 75% RU / 25% CU (instead of 50/50)
+ *
+ * Changes made for release in 2.1.15 :
+ * ----------------------------------
+ * - Change the detection code for multi manufacturer code support
+ *
+ * Changes made for release in 2.1.17 :
+ * ----------------------------------
+ * - Update to wireless extensions changes
+ * - Silly bug in card initial configuration (psa_conf_status)
+ *
+ * Changes made for release in 2.1.27 & 2.0.30 :
+ * -------------------------------------------
+ * - Small bug in debug code (probably not the last one...)
+ * - Remove extern kerword for wavelan_probe()
+ * - Level threshold is now a standard wireless extension (version 4 !)
+ *
+ * Changes made for release in 2.1.36 :
+ * ----------------------------------
+ * - Encryption setting from Brent Elphick (thanks a lot !)
+ * - 'ioaddr' to 'u_long' for the Alpha (thanks to Stanislav Sinyagin)
+ *
+ * Wishes & dreams :
+ * ---------------
+ * - Roaming
+ */
+
+/***************************** INCLUDES *****************************/
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/stat.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/malloc.h>
+#include <linux/timer.h>
+
+#include <linux/wireless.h> /* Wireless extensions */
+
+/* Wavelan declarations */
+#include "i82586.h"
+#include "wavelan.h"
+
+/****************************** DEBUG ******************************/
+
+#undef DEBUG_MODULE_TRACE /* Module insertion/removal */
+#undef DEBUG_CALLBACK_TRACE /* Calls made by Linux */
+#undef DEBUG_INTERRUPT_TRACE /* Calls to handler */
+#undef DEBUG_INTERRUPT_INFO /* type of interrupt & so on */
+#define DEBUG_INTERRUPT_ERROR /* problems */
+#undef DEBUG_CONFIG_TRACE /* Trace the config functions */
+#undef DEBUG_CONFIG_INFO /* What's going on... */
+#define DEBUG_CONFIG_ERRORS /* Errors on configuration */
+#undef DEBUG_TX_TRACE /* Transmission calls */
+#undef DEBUG_TX_INFO /* Header of the transmited packet */
+#define DEBUG_TX_ERROR /* unexpected conditions */
+#undef DEBUG_RX_TRACE /* Transmission calls */
+#undef DEBUG_RX_INFO /* Header of the transmited packet */
+#define DEBUG_RX_ERROR /* unexpected conditions */
+#undef DEBUG_PACKET_DUMP 16 /* Dump packet on the screen */
+#undef DEBUG_IOCTL_TRACE /* Misc call by Linux */
+#undef DEBUG_IOCTL_INFO /* Various debug info */
+#define DEBUG_IOCTL_ERROR /* What's going wrong */
+#define DEBUG_BASIC_SHOW /* Show basic startup info */
+#undef DEBUG_VERSION_SHOW /* Print version info */
+#undef DEBUG_PSA_SHOW /* Dump psa to screen */
+#undef DEBUG_MMC_SHOW /* Dump mmc to screen */
+#undef DEBUG_SHOW_UNUSED /* Show also unused fields */
+#undef DEBUG_I82586_SHOW /* Show i82586 status */
+#undef DEBUG_DEVICE_SHOW /* Show device parameters */
+
+/* Options : */
+#define USE_PSA_CONFIG /* Use info from the PSA */
+#define IGNORE_NORMAL_XMIT_ERRS /* Don't bother with normal conditions */
+#undef STRUCT_CHECK /* Verify padding of structures */
+#undef PSA_CRC /* Check CRC in PSA */
+#undef OLDIES /* Old code (to redo) */
+#undef RECORD_SNR /* To redo */
+#undef EEPROM_IS_PROTECTED /* Doesn't seem to be necessary */
+#define MULTICAST_AVOID /* Avoid extra multicast (I'm sceptical) */
+
+#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */
+/* Warning : these stuff will slow down the driver... */
+#define WIRELESS_SPY /* Enable spying addresses */
+#undef HISTOGRAM /* Enable histogram of sig level... */
+#endif
+
+/************************ CONSTANTS & MACROS ************************/
+
+#ifdef DEBUG_VERSION_SHOW
+static const char *version = "wavelan.c : v16 (wireless extensions) 17/4/97\n";
+#endif
+
+/* Watchdog temporisation */
+#define WATCHDOG_JIFFIES 32 /* TODO: express in HZ. */
+
+/* Macro to get the number of elements in an array */
+#define NELS(a) (sizeof(a) / sizeof(a[0]))
+
+/* ------------------------ PRIVATE IOCTL ------------------------ */
+
+#define SIOCSIPQTHR SIOCDEVPRIVATE /* Set quality threshold */
+#define SIOCGIPQTHR SIOCDEVPRIVATE + 1 /* Get quality threshold */
+#define SIOCSIPLTHR SIOCDEVPRIVATE + 2 /* Set level threshold */
+#define SIOCGIPLTHR SIOCDEVPRIVATE + 3 /* Get level threshold */
+
+#define SIOCSIPHISTO SIOCDEVPRIVATE + 6 /* Set histogram ranges */
+#define SIOCGIPHISTO SIOCDEVPRIVATE + 7 /* Get histogram values */
+
+/* ----------------------- VERSION SUPPORT ----------------------- */
+
+/* This ugly patch is needed to cope with old version of the kernel */
+#ifndef copy_from_user
+#define copy_from_user memcpy_fromfs
+#define copy_to_user memcpy_tofs
+#endif
+
+/****************************** TYPES ******************************/
+
+/* Shortcuts */
+typedef struct device device;
+typedef struct enet_statistics en_stats;
+typedef struct iw_statistics iw_stats;
+typedef struct iw_quality iw_qual;
+typedef struct iw_freq iw_freq;
+typedef struct net_local net_local;
+typedef struct timer_list timer_list;
+
+/* Basic types */
+typedef u_char mac_addr[WAVELAN_ADDR_SIZE]; /* Hardware address */
+
+/*
+ * Static specific data for the interface.
+ *
+ * For each network interface, Linux keep data in two structure. "device"
+ * keep the generic data (same format for everybody) and "net_local" keep
+ * the additional specific data.
+ * Note that some of this specific data is in fact generic (en_stats, for
+ * example).
+ */
+struct net_local
+{
+ net_local * next; /* Linked list of the devices */
+ device * dev; /* Reverse link... */
+ en_stats stats; /* Ethernet interface statistics */
+ int nresets; /* Number of hw resets */
+ u_char reconfig_82586; /* Need to reconfigure the controler */
+ u_char promiscuous; /* Promiscuous mode */
+ int mc_count; /* Number of multicast addresses */
+ timer_list watchdog; /* To avoid blocking state */
+ u_short hacr; /* Current host interface state */
+
+ int tx_n_in_use;
+ u_short rx_head;
+ u_short rx_last;
+ u_short tx_first_free;
+ u_short tx_first_in_use;
+
+#ifdef WIRELESS_EXT
+ iw_stats wstats; /* Wireless specific stats */
+#endif
+
+#ifdef WIRELESS_SPY
+ int spy_number; /* Number of addresses to spy */
+ mac_addr spy_address[IW_MAX_SPY]; /* The addresses to spy */
+ iw_qual spy_stat[IW_MAX_SPY]; /* Statistics gathered */
+#endif /* WIRELESS_SPY */
+#ifdef HISTOGRAM
+ int his_number; /* Number of intervals */
+ u_char his_range[16]; /* Boundaries of interval ]n-1; n] */
+ u_long his_sum[16]; /* Sum in interval */
+#endif /* HISTOGRAM */
+};
+
+/**************************** PROTOTYPES ****************************/
+
+/* ----------------------- MISC SUBROUTINES ------------------------ */
+static inline unsigned long /* flags */
+ wv_splhi(void); /* Disable interrupts */
+static inline void
+ wv_splx(unsigned long); /* ReEnable interrupts : flags */
+static u_char
+ wv_irq_to_psa(int);
+static int
+ wv_psa_to_irq(u_char);
+/* ------------------- HOST ADAPTER SUBROUTINES ------------------- */
+static inline u_short /* data */
+ hasr_read(u_long); /* Read the host interface : base address */
+static inline void
+ hacr_write(u_long, /* Write to host interface : base address */
+ u_short), /* data */
+ hacr_write_slow(u_long,
+ u_short),
+ set_chan_attn(u_long, /* ioaddr */
+ u_short), /* hacr */
+ wv_hacr_reset(u_long), /* ioaddr */
+ wv_16_off(u_long, /* ioaddr */
+ u_short), /* hacr */
+ wv_16_on(u_long, /* ioaddr */
+ u_short), /* hacr */
+ wv_ints_off(device *),
+ wv_ints_on(device *);
+/* ----------------- MODEM MANAGEMENT SUBROUTINES ----------------- */
+static void
+ psa_read(u_long, /* Read the Parameter Storage Area */
+ u_short, /* hacr */
+ int, /* offset in PSA */
+ u_char *, /* buffer to fill */
+ int), /* size to read */
+ psa_write(u_long, /* Write to the PSA */
+ u_short, /* hacr */
+ int, /* Offset in psa */
+ u_char *, /* Buffer in memory */
+ int); /* Length of buffer */
+static inline void
+ mmc_out(u_long, /* Write 1 byte to the Modem Manag Control */
+ u_short,
+ u_char),
+ mmc_write(u_long, /* Write n bytes to the MMC */
+ u_char,
+ u_char *,
+ int);
+static inline u_char /* Read 1 byte from the MMC */
+ mmc_in(u_long,
+ u_short);
+static inline void
+ mmc_read(u_long, /* Read n bytes from the MMC */
+ u_char,
+ u_char *,
+ int),
+ fee_wait(u_long, /* Wait for frequency EEprom : base address */
+ int, /* Base delay to wait for */
+ int); /* Number of time to wait */
+static void
+ fee_read(u_long, /* Read the frequency EEprom : base address */
+ u_short, /* destination offset */
+ u_short *, /* data buffer */
+ int); /* number of registers */
+/* ---------------------- I82586 SUBROUTINES ----------------------- */
+static /*inline*/ void
+ obram_read(u_long, /* ioaddr */
+ u_short, /* o */
+ u_char *, /* b */
+ int); /* n */
+static inline void
+ obram_write(u_long, /* ioaddr */
+ u_short, /* o */
+ u_char *, /* b */
+ int); /* n */
+static void
+ wv_ack(device *);
+static inline int
+ wv_synchronous_cmd(device *,
+ const char *),
+ wv_config_complete(device *,
+ u_long,
+ net_local *);
+static int
+ wv_complete(device *,
+ u_long,
+ net_local *);
+static inline void
+ wv_82586_reconfig(device *);
+/* ------------------- DEBUG & INFO SUBROUTINES ------------------- */
+#ifdef DEBUG_I82586_SHOW
+static void
+ wv_scb_show(unsigned short);
+#endif
+static inline void
+ wv_init_info(device *); /* display startup info */
+/* ------------------- IOCTL, STATS & RECONFIG ------------------- */
+static en_stats *
+ wavelan_get_stats(device *); /* Give stats /proc/net/dev */
+static void
+ wavelan_set_multicast_list(device *);
+/* ----------------------- PACKET RECEPTION ----------------------- */
+static inline void
+ wv_packet_read(device *, /* Read a packet from a frame */
+ u_short,
+ int),
+ wv_receive(device *); /* Read all packets waiting */
+/* --------------------- PACKET TRANSMISSION --------------------- */
+static inline void
+ wv_packet_write(device *, /* Write a packet to the Tx buffer */
+ void *,
+ short);
+static int
+ wavelan_packet_xmit(struct sk_buff *, /* Send a packet */
+ device *);
+/* -------------------- HARDWARE CONFIGURATION -------------------- */
+static inline int
+ wv_mmc_init(device *), /* Initialize the modem */
+ wv_ru_start(device *), /* Start the i82586 receiver unit */
+ wv_cu_start(device *), /* Start the i82586 command unit */
+ wv_82586_start(device *); /* Start the i82586 */
+static void
+ wv_82586_config(device *); /* Configure the i82586 */
+static inline void
+ wv_82586_stop(device *);
+static int
+ wv_hw_reset(device *), /* Reset the wavelan hardware */
+ wv_check_ioaddr(u_long, /* ioaddr */
+ u_char *); /* mac address (read) */
+/* ---------------------- INTERRUPT HANDLING ---------------------- */
+static void
+ wavelan_interrupt(int, /* Interrupt handler */
+ void *,
+ struct pt_regs *);
+static void
+ wavelan_watchdog(u_long); /* Transmission watchdog */
+/* ------------------- CONFIGURATION CALLBACKS ------------------- */
+static int
+ wavelan_open(device *), /* Open the device */
+ wavelan_close(device *), /* Close the device */
+ wavelan_config(device *); /* Configure one device */
+extern int
+ wavelan_probe(device *); /* See Space.c */
+
+/**************************** VARIABLES ****************************/
+
+/*
+ * This is the root of the linked list of wavelan drivers
+ * It is use to verify that we don't reuse the same base address
+ * for two differents drivers and to make the cleanup when
+ * removing the module.
+ */
+static net_local * wavelan_list = (net_local *) NULL;
+
+/*
+ * This table is used to translate the psa value to irq number
+ * and vice versa...
+ */
+static u_char irqvals[] =
+{
+ 0, 0, 0, 0x01,
+ 0x02, 0x04, 0, 0x08,
+ 0, 0, 0x10, 0x20,
+ 0x40, 0, 0, 0x80,
+};
+
+/*
+ * Table of the available i/o address (base address) for wavelan
+ */
+static unsigned short iobase[] =
+{
+#if 0
+ /* Leave out 0x3C0 for now -- seems to clash with some video
+ * controllers.
+ * Leave out the others too -- we will always use 0x390 and leave
+ * 0x300 for the Ethernet device.
+ * Jean II : 0x3E0 is really fine as well...
+ */
+ 0x300, 0x390, 0x3E0, 0x3C0
+#endif /* 0 */
+ 0x390, 0x3E0
+};
+
+#ifdef MODULE
+/* Name of the devices (memory allocation) */
+static char devname[4][IFNAMSIZ] = { "", "", "", "" };
+
+/* Parameters set by insmod */
+static int io[4] = { 0, 0, 0, 0 };
+static int irq[4] = { 0, 0, 0, 0 };
+static char * name[4] = { devname[0], devname[1], devname[2], devname[3] };
+#endif /* MODULE */
+
+#endif /* WAVELAN_P_H */
diff --git a/linux/src/drivers/net/wd.c b/linux/src/drivers/net/wd.c
new file mode 100644
index 0000000..dd87902
--- /dev/null
+++ b/linux/src/drivers/net/wd.c
@@ -0,0 +1,513 @@
+/* wd.c: A WD80x3 ethernet driver for linux. */
+/*
+ Written 1993-94 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+
+ This software may be used and distributed according to the terms
+ of the GNU Public License, incorporated herein by reference.
+
+ The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+ Center of Excellence in Space Data and Information Sciences
+ Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+
+ This is a driver for WD8003 and WD8013 "compatible" ethercards.
+
+ Thanks to Russ Nelson (nelson@crnwyr.com) for loaning me a WD8013.
+
+ Changelog:
+
+ Paul Gortmaker : multiple card support for module users, support
+ for non-standard memory sizes.
+
+
+*/
+
+static const char *version =
+ "wd.c:v1.10 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include "8390.h"
+
+/* A zero-terminated list of I/O addresses to be probed. */
+static unsigned int wd_portlist[] =
+{0x300, 0x280, 0x380, 0x240, 0};
+
+int wd_probe(struct device *dev);
+int wd_probe1(struct device *dev, int ioaddr);
+
+static int wd_open(struct device *dev);
+static void wd_reset_8390(struct device *dev);
+static void wd_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+static void wd_block_input(struct device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void wd_block_output(struct device *dev, int count,
+ const unsigned char *buf, const int start_page);
+static int wd_close_card(struct device *dev);
+
+
+#define WD_START_PG 0x00 /* First page of TX buffer */
+#define WD03_STOP_PG 0x20 /* Last page +1 of RX ring */
+#define WD13_STOP_PG 0x40 /* Last page +1 of RX ring */
+
+#define WD_CMDREG 0 /* Offset to ASIC command register. */
+#define WD_RESET 0x80 /* Board reset, in WD_CMDREG. */
+#define WD_MEMENB 0x40 /* Enable the shared memory. */
+#define WD_CMDREG5 5 /* Offset to 16-bit-only ASIC register 5. */
+#define ISA16 0x80 /* Enable 16 bit access from the ISA bus. */
+#define NIC16 0x40 /* Enable 16 bit access from the 8390. */
+#define WD_NIC_OFFSET 16 /* Offset to the 8390 from the base_addr. */
+#define WD_IO_EXTENT 32
+
+
+/* Probe for the WD8003 and WD8013. These cards have the station
+ address PROM at I/O ports <base>+8 to <base>+13, with a checksum
+ following. A Soundblaster can have the same checksum as an WDethercard,
+ so we have an extra exclusionary check for it.
+
+ The wd_probe1() routine initializes the card and fills the
+ station address field. */
+
+#ifdef HAVE_DEVLIST
+struct netdev_entry wd_drv =
+{"wd", wd_probe1, WD_IO_EXTENT, wd_portlist};
+#else
+
+int wd_probe(struct device *dev)
+{
+ int i;
+ int base_addr = dev ? dev->base_addr : 0;
+
+ if (base_addr > 0x1ff) /* Check a single specified location. */
+ return wd_probe1(dev, base_addr);
+ else if (base_addr != 0) /* Don't probe at all. */
+ return ENXIO;
+
+ for (i = 0; wd_portlist[i]; i++) {
+ int ioaddr = wd_portlist[i];
+ if (check_region(ioaddr, WD_IO_EXTENT))
+ continue;
+ if (wd_probe1(dev, ioaddr) == 0)
+ return 0;
+ }
+
+ return ENODEV;
+}
+#endif
+
+int wd_probe1(struct device *dev, int ioaddr)
+{
+ int i;
+ int checksum = 0;
+ int ancient = 0; /* An old card without config registers. */
+ int word16 = 0; /* 0 = 8 bit, 1 = 16 bit */
+ const char *model_name;
+ static unsigned version_printed = 0;
+
+ for (i = 0; i < 8; i++)
+ checksum += inb(ioaddr + 8 + i);
+ if (inb(ioaddr + 8) == 0xff /* Extra check to avoid soundcard. */
+ || inb(ioaddr + 9) == 0xff
+ || (checksum & 0xff) != 0xFF)
+ return ENODEV;
+
+ /* We should have a "dev" from Space.c or the static module table. */
+ if (dev == NULL) {
+ printk("wd.c: Passed a NULL device.\n");
+ dev = init_etherdev(0, 0);
+ }
+
+ /* Check for semi-valid mem_start/end values if supplied. */
+ if ((dev->mem_start % 0x2000) || (dev->mem_end % 0x2000)) {
+ printk(KERN_WARNING "wd.c: user supplied mem_start or mem_end not on 8kB boundary - ignored.\n");
+ dev->mem_start = 0;
+ dev->mem_end = 0;
+ }
+
+ if (ei_debug && version_printed++ == 0)
+ printk("%s", version);
+
+ printk("%s: WD80x3 at %#3x, ", dev->name, ioaddr);
+ for (i = 0; i < 6; i++)
+ printk(" %2.2X", dev->dev_addr[i] = inb(ioaddr + 8 + i));
+
+ /* The following PureData probe code was contributed by
+ Mike Jagdis <jaggy@purplet.demon.co.uk>. Puredata does software
+ configuration differently from others so we have to check for them.
+ This detects an 8 bit, 16 bit or dumb (Toshiba, jumpered) card.
+ */
+ if (inb(ioaddr+0) == 'P' && inb(ioaddr+1) == 'D') {
+ unsigned char reg5 = inb(ioaddr+5);
+
+ switch (inb(ioaddr+2)) {
+ case 0x03: word16 = 0; model_name = "PDI8023-8"; break;
+ case 0x05: word16 = 0; model_name = "PDUC8023"; break;
+ case 0x0a: word16 = 1; model_name = "PDI8023-16"; break;
+ /* Either 0x01 (dumb) or they've released a new version. */
+ default: word16 = 0; model_name = "PDI8023"; break;
+ }
+ dev->mem_start = ((reg5 & 0x1c) + 0xc0) << 12;
+ dev->irq = (reg5 & 0xe0) == 0xe0 ? 10 : (reg5 >> 5) + 1;
+ } else { /* End of PureData probe */
+ /* This method of checking for a 16-bit board is borrowed from the
+ we.c driver. A simpler method is just to look in ASIC reg. 0x03.
+ I'm comparing the two method in alpha test to make certain they
+ return the same result. */
+ /* Check for the old 8 bit board - it has register 0/8 aliasing.
+ Do NOT check i>=6 here -- it hangs the old 8003 boards! */
+ for (i = 0; i < 6; i++)
+ if (inb(ioaddr+i) != inb(ioaddr+8+i))
+ break;
+ if (i >= 6) {
+ ancient = 1;
+ model_name = "WD8003-old";
+ word16 = 0;
+ } else {
+ int tmp = inb(ioaddr+1); /* fiddle with 16bit bit */
+ outb( tmp ^ 0x01, ioaddr+1 ); /* attempt to clear 16bit bit */
+ if (((inb( ioaddr+1) & 0x01) == 0x01) /* A 16 bit card */
+ && (tmp & 0x01) == 0x01 ) { /* In a 16 slot. */
+ int asic_reg5 = inb(ioaddr+WD_CMDREG5);
+ /* Magic to set ASIC to word-wide mode. */
+ outb( NIC16 | (asic_reg5&0x1f), ioaddr+WD_CMDREG5);
+ outb(tmp, ioaddr+1);
+ model_name = "WD8013";
+ word16 = 1; /* We have a 16bit board here! */
+ } else {
+ model_name = "WD8003";
+ word16 = 0;
+ }
+ outb(tmp, ioaddr+1); /* Restore original reg1 value. */
+ }
+#ifndef final_version
+ if ( !ancient && (inb(ioaddr+1) & 0x01) != (word16 & 0x01))
+ printk("\nWD80?3: Bus width conflict, %d (probe) != %d (reg report).",
+ word16 ? 16 : 8, (inb(ioaddr+1) & 0x01) ? 16 : 8);
+#endif
+ }
+
+#if defined(WD_SHMEM) && WD_SHMEM > 0x80000
+ /* Allow a compile-time override. */
+ dev->mem_start = WD_SHMEM;
+#else
+ if (dev->mem_start == 0) {
+ /* Sanity and old 8003 check */
+ int reg0 = inb(ioaddr);
+ if (reg0 == 0xff || reg0 == 0) {
+ /* Future plan: this could check a few likely locations first. */
+ dev->mem_start = 0xd0000;
+ printk(" assigning address %#lx", dev->mem_start);
+ } else {
+ int high_addr_bits = inb(ioaddr+WD_CMDREG5) & 0x1f;
+ /* Some boards don't have the register 5 -- it returns 0xff. */
+ if (high_addr_bits == 0x1f || word16 == 0)
+ high_addr_bits = 0x01;
+ dev->mem_start = ((reg0&0x3f) << 13) + (high_addr_bits << 19);
+ }
+ }
+#endif
+
+ /* The 8390 isn't at the base address -- the ASIC regs are there! */
+ dev->base_addr = ioaddr+WD_NIC_OFFSET;
+
+ if (dev->irq < 2) {
+ int irqmap[] = {9,3,5,7,10,11,15,4};
+ int reg1 = inb(ioaddr+1);
+ int reg4 = inb(ioaddr+4);
+ if (ancient || reg1 == 0xff) { /* Ack!! No way to read the IRQ! */
+ short nic_addr = ioaddr+WD_NIC_OFFSET;
+
+ /* We have an old-style ethercard that doesn't report its IRQ
+ line. Do autoirq to find the IRQ line. Note that this IS NOT
+ a reliable way to trigger an interrupt. */
+ outb_p(E8390_NODMA + E8390_STOP, nic_addr);
+ outb(0x00, nic_addr+EN0_IMR); /* Disable all intrs. */
+ autoirq_setup(0);
+ outb_p(0xff, nic_addr + EN0_IMR); /* Enable all interrupts. */
+ outb_p(0x00, nic_addr + EN0_RCNTLO);
+ outb_p(0x00, nic_addr + EN0_RCNTHI);
+ outb(E8390_RREAD+E8390_START, nic_addr); /* Trigger it... */
+ dev->irq = autoirq_report(2);
+ outb_p(0x00, nic_addr+EN0_IMR); /* Mask all intrs. again. */
+
+ if (ei_debug > 2)
+ printk(" autoirq is %d", dev->irq);
+ if (dev->irq < 2)
+ dev->irq = word16 ? 10 : 5;
+ } else
+ dev->irq = irqmap[((reg4 >> 5) & 0x03) + (reg1 & 0x04)];
+ } else if (dev->irq == 2) /* Fixup bogosity: IRQ2 is really IRQ9 */
+ dev->irq = 9;
+
+ /* Snarf the interrupt now. There's no point in waiting since we cannot
+ share and the board will usually be enabled. */
+ if (request_irq(dev->irq, ei_interrupt, 0, model_name, NULL)) {
+ printk (" unable to get IRQ %d.\n", dev->irq);
+ return EAGAIN;
+ }
+
+ /* Allocate dev->priv and fill in 8390 specific dev fields. */
+ if (ethdev_init(dev)) {
+ printk (" unable to get memory for dev->priv.\n");
+ free_irq(dev->irq, NULL);
+ return -ENOMEM;
+ }
+
+ /* OK, were are certain this is going to work. Setup the device. */
+ request_region(ioaddr, WD_IO_EXTENT, model_name);
+
+ ei_status.name = model_name;
+ ei_status.word16 = word16;
+ ei_status.tx_start_page = WD_START_PG;
+ ei_status.rx_start_page = WD_START_PG + TX_PAGES;
+
+ /* Don't map in the shared memory until the board is actually opened. */
+ dev->rmem_start = dev->mem_start + TX_PAGES*256;
+
+ /* Some cards (eg WD8003EBT) can be jumpered for more (32k!) memory. */
+ if (dev->mem_end != 0) {
+ ei_status.stop_page = (dev->mem_end - dev->mem_start)/256;
+ } else {
+ ei_status.stop_page = word16 ? WD13_STOP_PG : WD03_STOP_PG;
+ dev->mem_end = dev->mem_start + (ei_status.stop_page - WD_START_PG)*256;
+ }
+ dev->rmem_end = dev->mem_end;
+
+ printk(" %s, IRQ %d, shared memory at %#lx-%#lx.\n",
+ model_name, dev->irq, dev->mem_start, dev->mem_end-1);
+
+ ei_status.reset_8390 = &wd_reset_8390;
+ ei_status.block_input = &wd_block_input;
+ ei_status.block_output = &wd_block_output;
+ ei_status.get_8390_hdr = &wd_get_8390_hdr;
+ dev->open = &wd_open;
+ dev->stop = &wd_close_card;
+ NS8390_init(dev, 0);
+
+#if 1
+ /* Enable interrupt generation on softconfig cards -- M.U */
+ /* .. but possibly potentially unsafe - Donald */
+ if (inb(ioaddr+14) & 0x20)
+ outb(inb(ioaddr+4)|0x80, ioaddr+4);
+#endif
+
+ return 0;
+}
+
+static int
+wd_open(struct device *dev)
+{
+ int ioaddr = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+
+ /* Map in the shared memory. Always set register 0 last to remain
+ compatible with very old boards. */
+ ei_status.reg0 = ((dev->mem_start>>13) & 0x3f) | WD_MEMENB;
+ ei_status.reg5 = ((dev->mem_start>>19) & 0x1f) | NIC16;
+
+ if (ei_status.word16)
+ outb(ei_status.reg5, ioaddr+WD_CMDREG5);
+ outb(ei_status.reg0, ioaddr); /* WD_CMDREG */
+
+ ei_open(dev);
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static void
+wd_reset_8390(struct device *dev)
+{
+ int wd_cmd_port = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+
+ outb(WD_RESET, wd_cmd_port);
+ if (ei_debug > 1) printk("resetting the WD80x3 t=%lu...", jiffies);
+ ei_status.txing = 0;
+
+ /* Set up the ASIC registers, just in case something changed them. */
+ outb((((dev->mem_start>>13) & 0x3f)|WD_MEMENB), wd_cmd_port);
+ if (ei_status.word16)
+ outb(NIC16 | ((dev->mem_start>>19) & 0x1f), wd_cmd_port+WD_CMDREG5);
+
+ if (ei_debug > 1) printk("reset done\n");
+ return;
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+static void
+wd_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+
+ int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+ unsigned long hdr_start = dev->mem_start + ((ring_page - WD_START_PG)<<8);
+
+ /* We'll always get a 4 byte header read followed by a packet read, so
+ we enable 16 bit mode before the header, and disable after the body. */
+ if (ei_status.word16)
+ outb(ISA16 | ei_status.reg5, wd_cmdreg+WD_CMDREG5);
+
+#ifdef notdef
+ /* Officially this is what we are doing, but the readl() is faster */
+ memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
+#else
+ ((unsigned int*)hdr)[0] = readl(hdr_start);
+#endif
+}
+
+/* Block input and output are easy on shared memory ethercards, and trivial
+ on the Western digital card where there is no choice of how to do it.
+ The only complications are that the ring buffer wraps, and need to map
+ switch between 8- and 16-bit modes. */
+
+static void
+wd_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+ int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+ unsigned long xfer_start = dev->mem_start + ring_offset - (WD_START_PG<<8);
+
+ if (xfer_start + count > dev->rmem_end) {
+ /* We must wrap the input move. */
+ int semi_count = dev->rmem_end - xfer_start;
+ memcpy_fromio(skb->data, xfer_start, semi_count);
+ count -= semi_count;
+ memcpy_fromio(skb->data + semi_count, dev->rmem_start, count);
+ } else {
+ /* Packet is in one chunk -- we can copy + cksum. */
+ eth_io_copy_and_sum(skb, xfer_start, count, 0);
+ }
+
+ /* Turn off 16 bit access so that reboot works. ISA brain-damage */
+ if (ei_status.word16)
+ outb(ei_status.reg5, wd_cmdreg+WD_CMDREG5);
+}
+
+static void
+wd_block_output(struct device *dev, int count, const unsigned char *buf,
+ int start_page)
+{
+ int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+ long shmem = dev->mem_start + ((start_page - WD_START_PG)<<8);
+
+
+ if (ei_status.word16) {
+ /* Turn on and off 16 bit access so that reboot works. */
+ outb(ISA16 | ei_status.reg5, wd_cmdreg+WD_CMDREG5);
+ memcpy_toio(shmem, buf, count);
+ outb(ei_status.reg5, wd_cmdreg+WD_CMDREG5);
+ } else
+ memcpy_toio(shmem, buf, count);
+}
+
+
+static int
+wd_close_card(struct device *dev)
+{
+ int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */
+
+ if (ei_debug > 1)
+ printk("%s: Shutting down ethercard.\n", dev->name);
+ ei_close(dev);
+
+ /* Change from 16-bit to 8-bit shared memory so reboot works. */
+ if (ei_status.word16)
+ outb(ei_status.reg5, wd_cmdreg + WD_CMDREG5 );
+
+ /* And disable the shared memory. */
+ outb(ei_status.reg0 & ~WD_MEMENB, wd_cmdreg);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+
+#ifdef MODULE
+#define MAX_WD_CARDS 4 /* Max number of wd cards per module */
+#define NAMELEN 8 /* # of chars for storing dev->name */
+static char namelist[NAMELEN * MAX_WD_CARDS] = { 0, };
+static struct device dev_wd[MAX_WD_CARDS] = {
+ {
+ NULL, /* assign a chunk of namelist[] below */
+ 0, 0, 0, 0,
+ 0, 0,
+ 0, 0, 0, NULL, NULL
+ },
+};
+
+static int io[MAX_WD_CARDS] = { 0, };
+static int irq[MAX_WD_CARDS] = { 0, };
+static int mem[MAX_WD_CARDS] = { 0, };
+static int mem_end[MAX_WD_CARDS] = { 0, }; /* for non std. mem size */
+
+/* This is set up so that only a single autoprobe takes place per call.
+ISA device autoprobes on a running machine are not recommended. */
+int
+init_module(void)
+{
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_WD_CARDS; this_dev++) {
+ struct device *dev = &dev_wd[this_dev];
+ dev->name = namelist+(NAMELEN*this_dev);
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->mem_start = mem[this_dev];
+ dev->mem_end = mem_end[this_dev];
+ dev->init = wd_probe;
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) break; /* only autoprobe 1st one */
+ printk(KERN_NOTICE "wd.c: Presently autoprobing (not recommended) for a single card.\n");
+ }
+ if (register_netdev(dev) != 0) {
+ printk(KERN_WARNING "wd.c: No wd80x3 card found (i/o = 0x%x).\n", io[this_dev]);
+ if (found != 0) return 0; /* Got at least one. */
+ return -ENXIO;
+ }
+ found++;
+ }
+
+ return 0;
+}
+
+void
+cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_WD_CARDS; this_dev++) {
+ struct device *dev = &dev_wd[this_dev];
+ if (dev->priv != NULL) {
+ int ioaddr = dev->base_addr - WD_NIC_OFFSET;
+ kfree(dev->priv);
+ dev->priv = NULL;
+ free_irq(dev->irq, NULL);
+ irq2dev_map[dev->irq] = NULL;
+ release_region(ioaddr, WD_IO_EXTENT);
+ unregister_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c wd.c"
+ * version-control: t
+ * tab-width: 4
+ * kept-new-versions: 5
+ * End:
+ */
diff --git a/linux/src/drivers/net/winbond-840.c b/linux/src/drivers/net/winbond-840.c
new file mode 100644
index 0000000..556d8ad
--- /dev/null
+++ b/linux/src/drivers/net/winbond-840.c
@@ -0,0 +1,1558 @@
+/* winbond-840.c: A Linux network device driver for the Winbond W89c840. */
+/*
+ Written 1998-2003 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 914 Bay Ridge Road, Suite 220
+ Annapolis MD 21403
+
+ Support information and updates available at
+ http://www.scyld.com/network/drivers.html
+ The information and support mailing lists are based at
+ http://www.scyld.com/mailman/listinfo/
+
+ Do not remove the copyright infomation.
+ Do not change the version information unless an improvement has been made.
+ Merely removing my name, as Compex has done in the past, does not count
+ as an improvement.
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version1[] =
+"winbond-840.c:v1.10 7/22/2003 Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+" http://www.scyld.com/network/drivers.html\n";
+
+/* Automatically extracted configuration info:
+probe-func: winbond840_probe
+config-in: tristate 'Winbond W89c840 Ethernet support' CONFIG_WINBOND_840
+
+c-help-name: Winbond W89c840 PCI Ethernet support
+c-help-symbol: CONFIG_WINBOND_840
+c-help: The winbond-840.c driver is for the Winbond W89c840 chip.
+c-help: This chip is named TX9882 on the Compex RL100-ATX board.
+c-help: More specific information and updates are available from
+c-help: http://www.scyld.com/network/drivers.html
+*/
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
+static int debug = 2;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
+ The '840 uses a 64 element hash table based on the Ethernet CRC. */
+static int multicast_filter_limit = 32;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature. */
+static int rx_copybreak = 0;
+
+/* Used to pass the media type, etc.
+ Both 'options[]' and 'full_duplex[]' should exist for driver
+ interoperability, however setting full_duplex[] is deprecated.
+ The media type is usually passed in 'options[]'.
+ The default is autonegotation for speed and duplex.
+ This should rarely be overridden.
+ Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
+ Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
+ Use option values 0x20 and 0x200 for forcing full duplex operation.
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority, confuses the system network buffer limits,
+ and wastes memory.
+ Larger receive rings merely waste memory.
+*/
+#define TX_RING_SIZE 16
+#define TX_QUEUE_LEN 10 /* Limit ring entries actually used, min 4. */
+#define RX_RING_SIZE 32
+
+/* The presumed FIFO size for working around the Tx-FIFO-overflow bug.
+ To avoid overflowing we don't queue again until we have room for a
+ full-size packet.
+ */
+#define TX_FIFO_SIZE (2048)
+#define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16)
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung.
+ Re-autonegotiation may take up to 3 seconds.
+ */
+#define TX_TIMEOUT (6*HZ)
+
+/* Allocation size of Rx buffers with normal sized Ethernet frames.
+ Do not change this value without good reason. This is not a limit,
+ but a way to keep a consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ 1536
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+/* Configure the PCI bus bursts and FIFO thresholds.
+ 486: Set 8 longword cache alignment, 8 longword burst.
+ 586: Set 16 longword cache alignment, no burst limit.
+ Cache alignment bits 15:14 Burst length 13:8
+ 0000 <not allowed> 0000 align to cache 0800 8 longwords
+ 4000 8 longwords 0100 1 longword 1000 16 longwords
+ 8000 16 longwords 0200 2 longwords 2000 32 longwords
+ C000 32 longwords 0400 4 longwords
+ Wait the specified 50 PCI cycles after a reset by initializing
+ Tx and Rx queues and the address filter list. */
+#define TX_DESC_SIZE 16
+#if defined(__powerpc__) || defined(__sparc__) /* Big endian */
+static int csr0 = 0x00100000 | 0xE000 | TX_DESC_SIZE;
+#elif defined(__alpha__) || defined(__x86_64) || defined(__ia64)
+static int csr0 = 0xE000 | TX_DESC_SIZE;
+#elif defined(__i386__)
+static int csr0 = 0xE000 | TX_DESC_SIZE;
+#else
+static int csr0 = 0xE000 | TX_DESC_SIZE;
+#warning Processor architecture unknown!
+#endif
+
+
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(debug, "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(multicast_filter_limit, "i");
+MODULE_PARM_DESC(debug, "Driver message level (0-31)");
+MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
+MODULE_PARM_DESC(max_interrupt_work,
+ "Driver maximum events handled per interrupt");
+MODULE_PARM_DESC(full_duplex, "Non-zero to set forced full duplex.");
+MODULE_PARM_DESC(rx_copybreak,
+ "Breakpoint in bytes for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit,
+ "Multicast addresses before switching to Rx-all-multicast");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This driver is for the Winbond w89c840 chip.
+
+II. Board-specific settings
+
+None.
+
+III. Driver operation
+
+This chip is very similar to the Digital 21*4* "Tulip" family. The first
+twelve registers and the descriptor format are nearly identical. Read a
+Tulip manual for operational details.
+
+A significant difference is that the multicast filter and station address are
+stored in registers rather than loaded through a pseudo-transmit packet.
+
+Unlike the Tulip, transmit buffers are limited to 1KB. To transmit a
+full-sized packet we must use both data buffers in a descriptor. Thus the
+driver uses ring mode where descriptors are implicitly sequential in memory,
+rather than using the second descriptor address as a chain pointer to
+subsequent descriptors.
+
+IV. Notes
+
+If you are going to almost clone a Tulip, why not go all the way and avoid
+the need for a new driver?
+
+IVb. References
+
+http://www.scyld.com/expert/100mbps.html
+http://www.scyld.com/expert/NWay.html
+http://www.winbond.com.tw/
+
+IVc. Errata
+
+A horrible bug exists in the transmit FIFO. Apparently the chip doesn't
+correctly detect a full FIFO, and queuing more than 2048 bytes may result in
+silent data corruption.
+
+*/
+
+
+
+/*
+ PCI probe table.
+*/
+static void *w840_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int find_cnt);
+static int winbond_pwr_event(void *dev_instance, int event);
+enum chip_capability_flags {
+ CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,};
+#ifdef USE_IO_OPS
+#define W840_FLAGS (PCI_USES_IO | PCI_ADDR0 | PCI_USES_MASTER)
+#else
+#define W840_FLAGS (PCI_USES_MEM | PCI_ADDR1 | PCI_USES_MASTER)
+#endif
+
+static struct pci_id_info pci_id_tbl[] = {
+ {"Winbond W89c840", /* Sometime a Level-One switch card. */
+ { 0x08401050, 0xffffffff, 0x81530000, 0xffff0000 },
+ W840_FLAGS, 128, CanHaveMII | HasBrokenTx | FDXOnNoMII},
+ {"Winbond W89c840", { 0x08401050, 0xffffffff, },
+ W840_FLAGS, 128, CanHaveMII | HasBrokenTx},
+ {"Compex RL100-ATX", { 0x201111F6, 0xffffffff,},
+ W840_FLAGS, 128, CanHaveMII | HasBrokenTx},
+ {0,}, /* 0 terminated list. */
+};
+
+struct drv_id_info winbond840_drv_id = {
+ "winbond-840", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
+ w840_probe1, winbond_pwr_event };
+
+/* This driver was written to use PCI memory space, however some x86 systems
+ work only with I/O space accesses. Pass -DUSE_IO_OPS to use PCI I/O space
+ accesses instead of memory space. */
+
+#ifdef USE_IO_OPS
+#undef readb
+#undef readw
+#undef readl
+#undef writeb
+#undef writew
+#undef writel
+#define readb inb
+#define readw inw
+#define readl inl
+#define writeb outb
+#define writew outw
+#define writel outl
+#endif
+
+/* Offsets to the Command and Status Registers, "CSRs".
+ While similar to the Tulip, these registers are longword aligned.
+ Note: It's not useful to define symbolic names for every register bit in
+ the device. The name can only partially document the semantics and make
+ the driver longer and more difficult to read.
+*/
+enum w840_offsets {
+ PCIBusCfg=0x00, TxStartDemand=0x04, RxStartDemand=0x08,
+ RxRingPtr=0x0C, TxRingPtr=0x10,
+ IntrStatus=0x14, NetworkConfig=0x18, IntrEnable=0x1C,
+ RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C,
+ CurRxDescAddr=0x30, CurRxBufAddr=0x34, /* Debug use */
+ MulticastFilter0=0x38, MulticastFilter1=0x3C, StationAddr=0x40,
+ CurTxDescAddr=0x4C, CurTxBufAddr=0x50,
+};
+
+/* Bits in the interrupt status/enable registers. */
+/* The bits in the Intr Status/Enable registers, mostly interrupt sources. */
+enum intr_status_bits {
+ NormalIntr=0x10000, AbnormalIntr=0x8000,
+ IntrPCIErr=0x2000, TimerInt=0x800,
+ IntrRxDied=0x100, RxNoBuf=0x80, IntrRxDone=0x40,
+ TxFIFOUnderflow=0x20, RxErrIntr=0x10,
+ TxIdle=0x04, IntrTxStopped=0x02, IntrTxDone=0x01,
+};
+
+/* Bits in the NetworkConfig register. */
+enum rx_mode_bits {
+ TxOn=0x2000, RxOn=0x0002, FullDuplex=0x0200,
+ AcceptErr=0x80, AcceptRunt=0x40, /* Not used */
+ AcceptBroadcast=0x20, AcceptMulticast=0x10, AcceptAllPhys=0x08,
+};
+
+enum mii_reg_bits {
+ MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000,
+ MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000,
+};
+
+/* The Tulip-like Rx and Tx buffer descriptors. */
+struct w840_rx_desc {
+ s32 status;
+ s32 length;
+ u32 buffer1;
+ u32 next_desc;
+};
+
+struct w840_tx_desc {
+ s32 status;
+ s32 length;
+ u32 buffer1, buffer2; /* We use only buffer 1. */
+ char pad[TX_DESC_SIZE - 16];
+};
+
+/* Bits in network_desc.status */
+enum desc_status_bits {
+ DescOwn=0x80000000, DescEndRing=0x02000000, DescUseLink=0x01000000,
+ DescWholePkt=0x60000000, DescStartPkt=0x20000000, DescEndPkt=0x40000000,
+ DescIntr=0x80000000,
+};
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+struct netdev_private {
+ /* Descriptor rings first for alignment. */
+ struct w840_rx_desc rx_ring[RX_RING_SIZE];
+ struct w840_tx_desc tx_ring[TX_RING_SIZE];
+ struct net_device *next_module; /* Link for devices of this type. */
+ void *priv_addr; /* Unaligned address for kfree */
+ const char *product_name;
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ /* The saved address of a sent-in-place packet/buffer, for later free(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media monitoring timer. */
+ /* Frequently used values: keep some adjacent for cache effect. */
+ int msg_level;
+ int chip_id, drv_flags;
+ struct pci_dev *pci_dev;
+ int csr0, csr6;
+ unsigned int polling; /* Switched to polling mode. */
+ int max_interrupt_work;
+
+ struct w840_rx_desc *rx_head_desc;
+ unsigned int rx_ring_size;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ int rx_copybreak;
+
+ unsigned int tx_ring_size;
+ unsigned int cur_tx, dirty_tx;
+ unsigned int tx_q_bytes, tx_unq_bytes;
+ unsigned int tx_full:1; /* The Tx queue is full. */
+
+ /* These values track of the transceiver/media in use. */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int duplex_lock:1;
+ unsigned int medialock:1; /* Do not sense media. */
+ unsigned int default_port; /* Last dev->if_port value. */
+ /* Rx filter. */
+ u32 cur_rx_mode;
+ u32 rx_filter[2];
+ int multicast_filter_limit;
+
+ /* MII transceiver section. */
+ int mii_cnt; /* MII device addresses. */
+ u16 advertising; /* NWay media advertisement */
+ unsigned char phys[2]; /* MII device addresses. */
+};
+
+static int eeprom_read(long ioaddr, int location);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+static int netdev_open(struct net_device *dev);
+static void check_duplex(struct net_device *dev);
+static void netdev_timer(unsigned long data);
+static void tx_timeout(struct net_device *dev);
+static void init_ring(struct net_device *dev);
+static int start_tx(struct sk_buff *skb, struct net_device *dev);
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
+static void netdev_error(struct net_device *dev, int intr_status);
+static int netdev_rx(struct net_device *dev);
+static void netdev_error(struct net_device *dev, int intr_status);
+static inline unsigned ether_crc(int length, unsigned char *data);
+static void set_rx_mode(struct net_device *dev);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_close(struct net_device *dev);
+
+
+
+/* A list of our installed devices, for removing the driver module. */
+static struct net_device *root_net_dev = NULL;
+
+static void *w840_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int card_idx)
+{
+ struct net_device *dev;
+ struct netdev_private *np;
+ void *priv_mem;
+ int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+
+ dev = init_etherdev(init_dev, 0);
+ if (!dev)
+ return NULL;
+
+#if LINUX_VERSION_CODE < 0x20155
+ printk(KERN_INFO "%s: %s at 0x%lx, %2.2x:%2.2x",
+ dev->name, pci_id_tbl[chip_idx].name, ioaddr,
+ pci_bus_number(pdev), pci_devfn(pdev)>>3);
+#else
+ printk(KERN_INFO "%s: %s at 0x%lx, %2.2x:%2.2x",
+ dev->name, pci_id_tbl[chip_idx].name, ioaddr,
+ pdev->bus->number, pdev->devfn>>3);
+#endif
+
+ /* Warning: validate for big-endian machines. */
+ for (i = 0; i < 3; i++)
+ ((u16 *)dev->dev_addr)[i] = le16_to_cpu(eeprom_read(ioaddr, i));
+
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+
+ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
+ /* Out of memory is very unlikely. */
+ if (priv_mem == NULL)
+ return NULL;
+
+#ifdef USE_IO_OPS
+ request_region(ioaddr, pci_id_tbl[chip_idx].io_size, dev->name);
+#endif
+
+ /* Reset the chip to erase previous misconfiguration.
+ No hold time required! */
+ writel(0x00000001, ioaddr + PCIBusCfg);
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ /* The descriptor lists must be aligned. */
+ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+ memset(np, 0, sizeof(*np));
+ np->priv_addr = priv_mem;
+
+ np->next_module = root_net_dev;
+ root_net_dev = dev;
+
+ np->pci_dev = pdev;
+ np->chip_id = chip_idx;
+ np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
+ np->msg_level = (1 << debug) - 1;
+ np->rx_copybreak = rx_copybreak;
+ np->max_interrupt_work = max_interrupt_work;
+ np->multicast_filter_limit = multicast_filter_limit;
+ np->tx_ring_size = TX_RING_SIZE;
+ np->rx_ring_size = RX_RING_SIZE;
+
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ if ((card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
+ || (np->drv_flags & AlwaysFDX))
+ np->full_duplex = 1;
+
+ /* The chip-specific entries in the device structure. */
+ dev->open = &netdev_open;
+ dev->hard_start_xmit = &start_tx;
+ dev->stop = &netdev_close;
+ dev->get_stats = &get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &mii_ioctl;
+
+ if (np->drv_flags & CanHaveMII) {
+ int phy, phy_idx = 0;
+ for (phy = 1; phy < 32 && phy_idx < 4; phy++) {
+ int mii_status = mdio_read(dev, phy, 1);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ np->phys[phy_idx++] = phy;
+ np->advertising = mdio_read(dev, phy, 4);
+ printk(KERN_INFO "%s: MII PHY found at address %d, status "
+ "0x%4.4x advertising %4.4x.\n",
+ dev->name, phy, mii_status, np->advertising);
+ }
+ }
+ np->mii_cnt = phy_idx;
+ if (phy_idx == 0) {
+ printk(KERN_WARNING "%s: MII PHY not found -- this device may "
+ "not operate correctly.\n"
+ KERN_WARNING "%s: If this is a switch card, explicitly "
+ "force full duplex on this interface.\n",
+ dev->name, dev->name);
+ if (np->drv_flags & FDXOnNoMII) {
+ printk(KERN_INFO "%s: Assuming a switch card, forcing full "
+ "duplex.\n", dev->name);
+ np->full_duplex = np->duplex_lock = 1;
+ }
+ }
+ }
+ /* Allow forcing the media type. */
+ if (np->full_duplex) {
+ printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
+ " disabled.\n", dev->name);
+ np->duplex_lock = 1;
+ }
+ if (option > 0) {
+ if (option & 0x220)
+ np->full_duplex = 1;
+ np->default_port = option & 0x3ff;
+ if (np->default_port & 0x330) {
+ np->medialock = 1;
+ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
+ (option & 0x300 ? 100 : 10),
+ (np->full_duplex ? "full" : "half"));
+ if (np->mii_cnt)
+ mdio_write(dev, np->phys[0], 0,
+ ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
+ (np->full_duplex ? 0x0100 : 0)); /* Full duplex? */
+ }
+ }
+
+ return dev;
+}
+
+
+/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.
+ The Winbond NIC uses serial bit streams generated by the host processor. */
+
+/* Delay between EEPROM clock transitions.
+ This "delay" is to force out buffered PCI writes. */
+#define eeprom_delay(ee_addr) readl(ee_addr)
+
+enum EEPROM_Ctrl_Bits {
+ EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805,
+ EE_ChipSelect=0x801, EE_DataIn=0x08,
+};
+
+/* The EEPROM commands always start with 01.. preamble bits.
+ Commands are prepended to the variable-length address. */
+enum EEPROM_Cmds {
+ EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
+};
+
+static int eeprom_read(long addr, int location)
+{
+ int i;
+ int retval = 0;
+ long ee_addr = addr + EECtrl;
+ int read_cmd = location | EE_ReadCmd;
+
+ writel(EE_ChipSelect, ee_addr);
+ /* Shift the read command bits out. */
+ for (i = 10; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
+ writel(dataval, ee_addr);
+ eeprom_delay(ee_addr);
+ writel(dataval | EE_ShiftClk, ee_addr);
+ eeprom_delay(ee_addr);
+ }
+ writel(EE_ChipSelect, ee_addr);
+ eeprom_delay(ee_addr);
+
+ for (i = 16; i > 0; i--) {
+ writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
+ eeprom_delay(ee_addr);
+ retval = (retval << 1) | ((readl(ee_addr) & EE_DataIn) ? 1 : 0);
+ writel(EE_ChipSelect, ee_addr);
+ eeprom_delay(ee_addr);
+ }
+
+ /* Terminate the EEPROM access. */
+ writel(0, ee_addr);
+ return retval;
+}
+
+/* MII transceiver control section.
+ Read and write the MII registers using software-generated serial
+ MDIO protocol. See the MII specifications or DP83840A data sheet
+ for details.
+
+ The maximum data clock rate is 2.5 Mhz.
+ The timing is decoupled from the processor clock by flushing the write
+ from the CPU write buffer with a following read, and using PCI
+ transaction time. */
+#define mdio_in(mdio_addr) readl(mdio_addr)
+#define mdio_out(value, mdio_addr) writel(value, mdio_addr)
+#define mdio_delay(mdio_addr) readl(mdio_addr)
+
+/* Set iff a MII transceiver on any interface requires mdio preamble.
+ This only set with older tranceivers, so the extra
+ code size of a per-interface flag is not worthwhile. */
+static char mii_preamble_required = 1;
+
+#define MDIO_WRITE0 (MDIO_EnbOutput)
+#define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput)
+
+/* Generate the preamble required for initial synchronization and
+ a few older transceivers. */
+static void mdio_sync(long mdio_addr)
+{
+ int bits = 32;
+
+ /* Establish sync by sending at least 32 logic ones. */
+ while (--bits >= 0) {
+ mdio_out(MDIO_WRITE1, mdio_addr);
+ mdio_delay(mdio_addr);
+ mdio_out(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+}
+
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ long mdio_addr = dev->base_addr + MIICtrl;
+ int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ int i, retval = 0;
+
+ if (mii_preamble_required)
+ mdio_sync(mdio_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+
+ mdio_out(dataval, mdio_addr);
+ mdio_delay(mdio_addr);
+ mdio_out(dataval | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 20; i > 0; i--) {
+ mdio_out(MDIO_EnbIn, mdio_addr);
+ mdio_delay(mdio_addr);
+ retval = (retval << 1) | ((mdio_in(mdio_addr) & MDIO_DataIn) ? 1 : 0);
+ mdio_out(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int reg, int value)
+{
+ long mdio_addr = dev->base_addr + MIICtrl;
+ int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (reg<<18) | value;
+ int i;
+
+ if (mii_preamble_required)
+ mdio_sync(mdio_addr);
+
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+
+ mdio_out(dataval, mdio_addr);
+ mdio_delay(mdio_addr);
+ mdio_out(dataval | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ /* Clear out extra bits. */
+ for (i = 2; i > 0; i--) {
+ mdio_out(MDIO_EnbIn, mdio_addr);
+ mdio_delay(mdio_addr);
+ mdio_out(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+ mdio_delay(mdio_addr);
+ }
+ return;
+}
+
+
+static int netdev_open(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+
+ writel(0x00000001, ioaddr + PCIBusCfg); /* Reset */
+
+ MOD_INC_USE_COUNT;
+
+ if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: w89c840_open() irq %d.\n",
+ dev->name, dev->irq);
+
+ init_ring(dev);
+
+ writel(virt_to_bus(np->rx_ring), ioaddr + RxRingPtr);
+ writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
+
+ for (i = 0; i < 6; i++)
+ writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
+
+ /* Initialize other registers. */
+ np->csr0 = csr0;
+ writel(np->csr0, ioaddr + PCIBusCfg);
+
+ if (dev->if_port == 0)
+ dev->if_port = np->default_port;
+
+ writel(0, ioaddr + RxStartDemand);
+ np->csr6 = np->full_duplex ? 0x20022202 : 0x20022002;
+ check_duplex(dev);
+ set_rx_mode(dev);
+
+ netif_start_tx_queue(dev);
+
+ /* Clear and Enable interrupts by setting the interrupt mask.
+ See enum intr_status_bits above for bit guide.
+ We omit: TimerInt, IntrRxDied, IntrTxStopped
+ */
+ writel(0x1A0F5, ioaddr + IntrStatus);
+ writel(0x1A0F5, ioaddr + IntrEnable);
+
+ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
+
+ /* Set the timer to check for link beat. */
+ init_timer(&np->timer);
+ np->timer.expires = jiffies + 3*HZ;
+ np->timer.data = (unsigned long)dev;
+ np->timer.function = &netdev_timer; /* timer handler */
+ add_timer(&np->timer);
+
+ return 0;
+}
+
+static void check_duplex(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int mii_reg5 = mdio_read(dev, np->phys[0], 5);
+ int negotiated = mii_reg5 & np->advertising;
+ int duplex;
+
+ if (np->duplex_lock || mii_reg5 == 0xffff)
+ return;
+ duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
+ if (np->full_duplex != duplex) {
+ np->full_duplex = duplex;
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
+ "negotiated capability %4.4x.\n", dev->name,
+ duplex ? "full" : "half", np->phys[0], negotiated);
+ np->csr6 &= ~0x200;
+ np->csr6 |= duplex ? 0x200 : 0;
+ }
+}
+
+static void netdev_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 10*HZ;
+ int old_csr6 = np->csr6;
+ u32 intr_status = readl(ioaddr + IntrStatus);
+
+ if (np->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
+ "config %8.8x.\n",
+ dev->name, intr_status, (int)readl(ioaddr + NetworkConfig));
+ /* Check for blocked interrupts. */
+ if (np->polling) {
+ if (intr_status & 0x1ffff) {
+ intr_handler(dev->irq, dev, 0);
+ next_tick = 1;
+ np->polling = 1;
+ } else if (++np->polling > 10*HZ)
+ np->polling = 0;
+ else
+ next_tick = 2;
+ } else if ((intr_status & 0x1ffff)) {
+ np->polling = 1;
+ }
+
+ if (netif_queue_paused(dev) &&
+ np->cur_tx - np->dirty_tx > 1 &&
+ (jiffies - dev->trans_start) > TX_TIMEOUT) {
+ tx_timeout(dev);
+ }
+ check_duplex(dev);
+ if (np->csr6 != old_csr6) {
+ writel(np->csr6 & ~0x0002, ioaddr + NetworkConfig);
+ writel(np->csr6 | 0x2002, ioaddr + NetworkConfig);
+ }
+ np->timer.expires = jiffies + next_tick;
+ add_timer(&np->timer);
+}
+
+static void tx_timeout(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
+ " resetting...\n", dev->name, (int)readl(ioaddr + IntrStatus));
+
+#ifndef __alpha__
+ if (np->msg_level & NETIF_MSG_TX_ERR) {
+ int i;
+ printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
+ for (i = 0; i < np->rx_ring_size; i++)
+ printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
+ printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring);
+ for (i = 0; i < np->tx_ring_size; i++)
+ printk(" %8.8x", np->tx_ring[i].status);
+ printk("\n");
+ }
+#endif
+
+ /* Perhaps we should reinitialize the hardware here. Just trigger a
+ Tx demand for now. */
+ writel(0, ioaddr + TxStartDemand);
+ dev->if_port = 0;
+ /* Stop and restart the chip's Tx processes . */
+
+ dev->trans_start = jiffies;
+ np->stats.tx_errors++;
+ return;
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+ np->tx_full = 0;
+ np->cur_tx = np->dirty_tx = 0;
+ np->tx_q_bytes = np->tx_unq_bytes = 0;
+
+ np->cur_rx = np->dirty_rx = 0;
+ np->rx_buf_sz = (dev->mtu <= 1522 ? PKT_BUF_SZ : dev->mtu + 14);
+ np->rx_head_desc = &np->rx_ring[0];
+
+ /* Initialize all Rx descriptors. */
+ for (i = 0; i < np->rx_ring_size; i++) {
+ np->rx_ring[i].length = np->rx_buf_sz;
+ np->rx_ring[i].status = 0;
+ np->rx_ring[i].next_desc = virt_to_bus(&np->rx_ring[i+1]);
+ np->rx_skbuff[i] = 0;
+ }
+ /* Mark the last entry as wrapping the ring. */
+ np->rx_ring[i-1].length |= DescEndRing;
+ np->rx_ring[i-1].next_desc = virt_to_bus(&np->rx_ring[0]);
+
+ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
+ for (i = 0; i < np->rx_ring_size; i++) {
+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ np->rx_ring[i].buffer1 = virt_to_bus(skb->tail);
+ np->rx_ring[i].status = DescOwn | DescIntr;
+ }
+ np->dirty_rx = (unsigned int)(i - np->rx_ring_size);
+
+ for (i = 0; i < np->tx_ring_size; i++) {
+ np->tx_skbuff[i] = 0;
+ np->tx_ring[i].status = 0;
+ }
+ return;
+}
+
+static int start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ unsigned entry;
+
+ /* Block a timer-based transmit from overlapping. */
+ if (netif_pause_tx_queue(dev) != 0) {
+ /* This watchdog code is redundant with the media monitor timer. */
+ if (jiffies - dev->trans_start > TX_TIMEOUT)
+ tx_timeout(dev);
+ return 1;
+ }
+
+ /* Note: Ordering is important here, set the field with the
+ "ownership" bit last, and only then increment cur_tx. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = np->cur_tx % np->tx_ring_size;
+
+ np->tx_skbuff[entry] = skb;
+ np->tx_ring[entry].buffer1 = virt_to_bus(skb->data);
+
+#define one_buffer
+#define BPT 1022
+#if defined(one_buffer)
+ np->tx_ring[entry].length = DescWholePkt | skb->len;
+ if (entry >= np->tx_ring_size-1) /* Wrap ring */
+ np->tx_ring[entry].length |= DescIntr | DescEndRing;
+ np->tx_ring[entry].status = DescOwn;
+ np->cur_tx++;
+#elif defined(two_buffer)
+ if (skb->len > BPT) {
+ unsigned int entry1 = ++np->cur_tx % np->tx_ring_size;
+ np->tx_ring[entry].length = DescStartPkt | BPT;
+ np->tx_ring[entry1].length = DescEndPkt | (skb->len - BPT);
+ np->tx_ring[entry1].buffer1 = virt_to_bus((skb->data) + BPT);
+ np->tx_ring[entry1].status = DescOwn;
+ np->tx_ring[entry].status = DescOwn;
+ if (entry >= np->tx_ring_size-1)
+ np->tx_ring[entry].length |= DescIntr|DescEndRing;
+ else if (entry1 >= np->tx_ring_size-1)
+ np->tx_ring[entry1].length |= DescIntr|DescEndRing;
+ np->cur_tx++;
+ } else {
+ np->tx_ring[entry].length = DescWholePkt | skb->len;
+ if (entry >= np->tx_ring_size-1) /* Wrap ring */
+ np->tx_ring[entry].length |= DescIntr | DescEndRing;
+ np->tx_ring[entry].status = DescOwn;
+ np->cur_tx++;
+ }
+#elif defined(split_buffer)
+ {
+ /* Work around the Tx-FIFO-full bug by splitting our transmit packet
+ into two pieces, the first which may be loaded without overflowing
+ the FIFO, and the second which contains the remainder of the
+ packet. When we get a Tx-done interrupt that frees enough room
+ in the FIFO we mark the remainder of the packet as loadable.
+
+ This has the problem that the Tx descriptors are written both
+ here and in the interrupt handler.
+ */
+
+ int buf1size = TX_FIFO_SIZE - (np->tx_q_bytes - np->tx_unq_bytes);
+ int buf2size = skb->len - buf1size;
+
+ if (buf2size <= 0) { /* We fit into one descriptor. */
+ np->tx_ring[entry].length = DescWholePkt | skb->len;
+ } else { /* We must use two descriptors. */
+ unsigned int entry2;
+ np->tx_ring[entry].length = DescIntr | DescStartPkt | buf1size;
+ if (entry >= np->tx_ring_size-1) { /* Wrap ring */
+ np->tx_ring[entry].length |= DescEndRing;
+ entry2 = 0;
+ } else
+ entry2 = entry + 1;
+ np->cur_tx++;
+ np->tx_ring[entry2].buffer1 =
+ virt_to_bus(skb->data + buf1size);
+ np->tx_ring[entry2].length = DescEndPkt | buf2size;
+ if (entry2 >= np->tx_ring_size-1) /* Wrap ring */
+ np->tx_ring[entry2].length |= DescEndRing;
+ }
+ np->tx_ring[entry].status = DescOwn;
+ np->cur_tx++;
+ }
+#endif
+ np->tx_q_bytes += skb->len;
+ writel(0, dev->base_addr + TxStartDemand);
+
+ /* Work around horrible bug in the chip by marking the queue as full
+ when we do not have FIFO room for a maximum sized packet. */
+ if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN) {
+ np->tx_full = 1;
+ netif_stop_tx_queue(dev);
+ } else if ((np->drv_flags & HasBrokenTx)
+ && np->tx_q_bytes - np->tx_unq_bytes > TX_BUG_FIFO_LIMIT) {
+ np->tx_full = 1;
+ netif_stop_tx_queue(dev);
+ } else
+ netif_unpause_tx_queue(dev); /* Typical path */
+
+ dev->trans_start = jiffies;
+
+ if (np->msg_level & NETIF_MSG_TX_QUEUED) {
+ printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
+ dev->name, np->cur_tx, entry);
+ }
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int work_limit = np->max_interrupt_work;
+
+ do {
+ u32 intr_status = readl(ioaddr + IntrStatus);
+
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ writel(intr_status & 0x0001ffff, ioaddr + IntrStatus);
+
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
+ dev->name, intr_status);
+
+ if ((intr_status & (NormalIntr|AbnormalIntr)) == 0
+ || intr_status == 0xffffffff)
+ break;
+
+ if (intr_status & (IntrRxDone | RxNoBuf))
+ netdev_rx(dev);
+
+ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+ int entry = np->dirty_tx % np->tx_ring_size;
+ int tx_status = np->tx_ring[entry].status;
+
+ if (tx_status < 0)
+ break;
+ if (np->msg_level & NETIF_MSG_TX_DONE)
+ printk(KERN_DEBUG "%s: Transmit done, Tx status %8.8x.\n",
+ dev->name, tx_status);
+ if (tx_status & 0x8000) { /* There was an error, log it. */
+ if (np->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+ dev->name, tx_status);
+ np->stats.tx_errors++;
+ if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
+ if (tx_status & 0x0C80) np->stats.tx_carrier_errors++;
+ if (tx_status & 0x0200) np->stats.tx_window_errors++;
+ if (tx_status & 0x0002) np->stats.tx_fifo_errors++;
+ if ((tx_status & 0x0080) && np->full_duplex == 0)
+ np->stats.tx_heartbeat_errors++;
+#ifdef ETHER_STATS
+ if (tx_status & 0x0100) np->stats.collisions16++;
+#endif
+ } else {
+#ifdef ETHER_STATS
+ if (tx_status & 0x0001) np->stats.tx_deferred++;
+#endif
+#if LINUX_VERSION_CODE > 0x20127
+ np->stats.tx_bytes += np->tx_skbuff[entry]->len;
+#endif
+ np->stats.collisions += (tx_status >> 3) & 15;
+ np->stats.tx_packets++;
+ }
+ /* Free the original skb. */
+ np->tx_unq_bytes += np->tx_skbuff[entry]->len;
+ dev_free_skb_irq(np->tx_skbuff[entry]);
+ np->tx_skbuff[entry] = 0;
+ }
+ if (np->tx_full &&
+ np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4
+ && np->tx_q_bytes - np->tx_unq_bytes < TX_BUG_FIFO_LIMIT) {
+ /* The ring is no longer full, allow new TX entries. */
+ np->tx_full = 0;
+ netif_resume_tx_queue(dev);
+ }
+
+ /* Abnormal error summary/uncommon events handlers. */
+ if (intr_status & (AbnormalIntr | TxFIFOUnderflow | IntrPCIErr |
+ TimerInt | IntrTxStopped))
+ netdev_error(dev, intr_status);
+
+ if (--work_limit < 0) {
+ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "status=0x%4.4x.\n", dev->name, intr_status);
+ /* Set the timer to re-enable the other interrupts after
+ 10*82usec ticks. */
+ writel(AbnormalIntr | TimerInt, ioaddr + IntrEnable);
+ writel(10, ioaddr + GPTimer);
+ break;
+ }
+ } while (1);
+
+ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, (int)readl(ioaddr + IntrStatus));
+
+ return;
+}
+
+/* This routine is logically part of the interrupt handler, but separated
+ for clarity and better register allocation. */
+static int netdev_rx(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int entry = np->cur_rx % np->rx_ring_size;
+ int work_limit = np->dirty_rx + np->rx_ring_size - np->cur_rx;
+
+ if (np->msg_level & NETIF_MSG_RX_STATUS) {
+ printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
+ entry, np->rx_ring[entry].status);
+ }
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while (--work_limit >= 0) {
+ struct w840_rx_desc *desc = np->rx_head_desc;
+ s32 status = desc->status;
+
+ if (np->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
+ status);
+ if (status < 0)
+ break;
+ if ((status & 0x38008300) != 0x0300) {
+ if ((status & 0x38000300) != 0x0300) {
+ /* Ingore earlier buffers. */
+ if ((status & 0xffff) != 0x7fff) {
+ printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
+ "multiple buffers, entry %#x status %4.4x!\n",
+ dev->name, np->cur_rx, status);
+ np->stats.rx_length_errors++;
+ }
+ } else if (status & 0x8000) {
+ /* There was a fatal error. */
+ if (np->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
+ dev->name, status);
+ np->stats.rx_errors++; /* end of a packet.*/
+ if (status & 0x0890) np->stats.rx_length_errors++;
+ if (status & 0x004C) np->stats.rx_frame_errors++;
+ if (status & 0x0002) np->stats.rx_crc_errors++;
+ }
+ } else {
+ struct sk_buff *skb;
+ /* Omit the four octet CRC from the length. */
+ int pkt_len = ((status >> 16) & 0x7ff) - 4;
+
+ if (np->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
+ " status %x.\n", pkt_len, status);
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < np->rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ /* Call copy + cksum if available. */
+#if HAS_IP_COPYSUM
+ eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+#else
+ memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
+ pkt_len);
+#endif
+ } else {
+ char *temp = skb_put(skb = np->rx_skbuff[entry], pkt_len);
+ np->rx_skbuff[entry] = NULL;
+#ifndef final_version /* Remove after testing. */
+ if (bus_to_virt(desc->buffer1) != temp)
+ printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
+ "do not match in netdev_rx: %p vs. %p / %p.\n",
+ dev->name, bus_to_virt(desc->buffer1),
+ skb->head, temp);
+#endif
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ np->stats.rx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+ np->stats.rx_bytes += pkt_len;
+#endif
+ }
+ entry = (++np->cur_rx) % np->rx_ring_size;
+ np->rx_head_desc = &np->rx_ring[entry];
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
+ struct sk_buff *skb;
+ entry = np->dirty_rx % np->rx_ring_size;
+ if (np->rx_skbuff[entry] == NULL) {
+ skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ np->rx_ring[entry].buffer1 = virt_to_bus(skb->tail);
+ }
+ np->rx_ring[entry].status = DescOwn;
+ }
+
+ return 0;
+}
+
+static void netdev_error(struct net_device *dev, int intr_status)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+
+ if (np->msg_level & NETIF_MSG_MISC)
+ printk(KERN_DEBUG "%s: Abnormal event, %8.8x.\n",
+ dev->name, intr_status);
+ if (intr_status == 0xffffffff)
+ return;
+ if (intr_status & TxFIFOUnderflow) {
+ np->csr6 += 0x4000; /* Bump up the Tx threshold */
+ if (np->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG "%s: Tx underflow, increasing threshold to "
+ "%8.8x.\n", dev->name, np->csr6);
+ writel(np->csr6, ioaddr + NetworkConfig);
+ }
+ if (intr_status & IntrRxDied) { /* Missed a Rx frame. */
+ np->stats.rx_errors++;
+ }
+ if (intr_status & TimerInt) {
+ /* Re-enable other interrupts. */
+ writel(0x1A0F5, ioaddr + IntrEnable);
+ }
+ np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
+ writel(0, ioaddr + RxStartDemand);
+}
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+
+ /* The chip only need report frame silently dropped. */
+ if (netif_running(dev))
+ np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
+
+ return &np->stats;
+}
+
+static unsigned const ethernet_polynomial = 0x04c11db7U;
+static inline u32 ether_crc(int length, unsigned char *data)
+{
+ int crc = -1;
+
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
+ crc = (crc << 1) ^
+ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
+ }
+ }
+ return crc;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u32 mc_filter[2]; /* Multicast hash filter */
+ u32 rx_mode;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+ memset(mc_filter, ~0, sizeof(mc_filter));
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAllPhys;
+ } else if ((dev->mc_count > np->multicast_filter_limit)
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ rx_mode = AcceptBroadcast | AcceptMulticast;
+ } else {
+ struct dev_mc_list *mclist;
+ int i;
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ set_bit((ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F,
+ mc_filter);
+ }
+ rx_mode = AcceptBroadcast | AcceptMulticast;
+ }
+ writel(mc_filter[0], ioaddr + MulticastFilter0);
+ writel(mc_filter[1], ioaddr + MulticastFilter1);
+ np->csr6 &= ~0x00F8;
+ np->csr6 |= rx_mode;
+ writel(np->csr6, ioaddr + NetworkConfig);
+}
+
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ u16 *data = (u16 *)&rq->ifr_data;
+ u32 *data32 = (void *)&rq->ifr_data;
+
+ switch(cmd) {
+ case 0x8947: case 0x89F0:
+ /* SIOCGMIIPHY: Get the address of the PHY in use. */
+ data[0] = np->phys[0] & 0x1f;
+ /* Fall Through */
+ case 0x8948: case 0x89F1:
+ /* SIOCGMIIREG: Read the specified MII register. */
+ data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
+ return 0;
+ case 0x8949: case 0x89F2:
+ /* SIOCSMIIREG: Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (data[0] == np->phys[0]) {
+ u16 value = data[2];
+ switch (data[1]) {
+ case 0:
+ /* Check for autonegotiation on or reset. */
+ np->medialock = (value & 0x9000) ? 0 : 1;
+ if (np->medialock)
+ np->full_duplex = (value & 0x0100) ? 1 : 0;
+ break;
+ case 4: np->advertising = value; break;
+ }
+ /* Perhaps check_duplex(dev), depending on chip semantics. */
+ }
+ mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
+ return 0;
+ case SIOCGPARAMS:
+ data32[0] = np->msg_level;
+ data32[1] = np->multicast_filter_limit;
+ data32[2] = np->max_interrupt_work;
+ data32[3] = np->rx_copybreak;
+ return 0;
+ case SIOCSPARAMS:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ np->msg_level = data32[0];
+ np->multicast_filter_limit = data32[1];
+ np->max_interrupt_work = data32[2];
+ np->rx_copybreak = data32[3];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+
+static void empty_rings(struct net_device *dev)
+{
+ struct netdev_private *np = (void *)dev->priv;
+ int i;
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < np->rx_ring_size; i++) {
+ np->rx_ring[i].status = 0;
+ if (np->rx_skbuff[i]) {
+#if LINUX_VERSION_CODE < 0x20100
+ np->rx_skbuff[i]->free = 1;
+#endif
+ dev_free_skb(np->rx_skbuff[i]);
+ }
+ np->rx_skbuff[i] = 0;
+ }
+ for (i = 0; i < np->tx_ring_size; i++) {
+ if (np->tx_skbuff[i])
+ dev_free_skb(np->tx_skbuff[i]);
+ np->tx_skbuff[i] = 0;
+ }
+}
+
+static int netdev_close(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+
+ netif_stop_tx_queue(dev);
+
+ if (np->msg_level & NETIF_MSG_IFDOWN) {
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %8.8x "
+ "Config %8.8x.\n", dev->name, (int)readl(ioaddr + IntrStatus),
+ (int)readl(ioaddr + NetworkConfig));
+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
+ dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
+ }
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ writel(0x0000, ioaddr + IntrEnable);
+
+ /* Stop the chip's Tx and Rx processes. */
+ writel(np->csr6 &= ~0x20FA, ioaddr + NetworkConfig);
+
+ del_timer(&np->timer);
+ if (readl(ioaddr + NetworkConfig) != 0xffffffff)
+ np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
+
+#ifdef __i386__
+ if (np->msg_level & NETIF_MSG_IFDOWN) {
+ int i;
+ printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
+ (int)virt_to_bus(np->tx_ring));
+ for (i = 0; i < np->tx_ring_size; i++)
+ printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
+ i, np->tx_ring[i].length,
+ np->tx_ring[i].status, np->tx_ring[i].buffer1);
+ printk(KERN_DEBUG "\n" KERN_DEBUG " Rx ring %8.8x:\n",
+ (int)virt_to_bus(np->rx_ring));
+ for (i = 0; i < np->rx_ring_size; i++) {
+ printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
+ i, np->rx_ring[i].length,
+ np->rx_ring[i].status, np->rx_ring[i].buffer1);
+ }
+ }
+#endif /* __i386__ debugging only */
+
+ free_irq(dev->irq, dev);
+ empty_rings(dev);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static int winbond_pwr_event(void *dev_instance, int event)
+{
+ struct net_device *dev = dev_instance;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
+ switch(event) {
+ case DRV_ATTACH:
+ MOD_INC_USE_COUNT;
+ break;
+ case DRV_SUSPEND: {
+ int csr6 = readl(ioaddr + NetworkConfig);
+ /* Disable interrupts, stop the chip, gather stats. */
+ if (csr6 != 0xffffffff) {
+ int csr8 = readl(ioaddr + RxMissed);
+ writel(0x00000000, ioaddr + IntrEnable);
+ writel(csr6 & ~TxOn & ~RxOn, ioaddr + NetworkConfig);
+ np->stats.rx_missed_errors += (unsigned short)csr8;
+ }
+ empty_rings(dev);
+ break;
+ }
+ case DRV_RESUME:
+ writel(np->csr0, ioaddr + PCIBusCfg);
+ init_ring(dev);
+ writel(virt_to_bus(np->rx_ring), ioaddr + RxRingPtr);
+ writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
+ writel(0x1A0F5, ioaddr + IntrStatus);
+ writel(0x1A0F5, ioaddr + IntrEnable);
+ writel(np->csr6 | TxOn | RxOn, ioaddr + NetworkConfig);
+ writel(0, ioaddr + RxStartDemand); /* Rx poll demand */
+ set_rx_mode(dev);
+ break;
+ case DRV_DETACH: {
+ struct net_device **devp, **next;
+ if (dev->flags & IFF_UP) {
+ printk(KERN_ERR "%s: Winbond-840 NIC removed while still "
+ "active.\n", dev->name);
+ dev_close(dev);
+ dev->flags &= ~(IFF_UP|IFF_RUNNING);
+ }
+ unregister_netdev(dev);
+ release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);
+#ifndef USE_IO_OPS
+ iounmap((char *)dev->base_addr);
+#endif
+ for (devp = &root_net_dev; *devp; devp = next) {
+ next = &((struct netdev_private *)(*devp)->priv)->next_module;
+ if (*devp == dev) {
+ *devp = *next;
+ break;
+ }
+ }
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(dev);
+ MOD_DEC_USE_COUNT;
+ break;
+ }
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+
+#ifdef MODULE
+int init_module(void)
+{
+ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return pci_drv_register(&winbond840_drv_id, NULL);
+}
+
+void cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+ pci_drv_unregister(&winbond840_drv_id);
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_net_dev) {
+ struct netdev_private *np = (void *)(root_net_dev->priv);
+ unregister_netdev(root_net_dev);
+#ifdef USE_IO_OPS
+ release_region(root_net_dev->base_addr,
+ pci_id_tbl[np->chip_id].io_size);
+#else
+ iounmap((char *)(root_net_dev->base_addr));
+#endif
+ next_dev = np->next_module;
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(root_net_dev);
+ root_net_dev = next_dev;
+ }
+}
+#else
+int winbond840_probe(struct net_device *dev)
+{
+ if (pci_drv_register(&winbond840_drv_id, dev) < 0)
+ return -ENODEV;
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return 0;
+}
+#endif /* MODULE */
+
+
+/*
+ * Local variables:
+ * compile-command: "make KERNVER=`uname -r` winbond-840.o"
+ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c winbond-840.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/yellowfin.c b/linux/src/drivers/net/yellowfin.c
new file mode 100644
index 0000000..9d7ace8
--- /dev/null
+++ b/linux/src/drivers/net/yellowfin.c
@@ -0,0 +1,1482 @@
+/* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
+/*
+ Written 1997-2003 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
+ It also supports the Symbios Logic version of the same chip core.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 914 Bay Ridge Road, Suite 220
+ Annapolis MD 21403
+
+ Support information and updates available at
+ http://www.scyld.com/network/yellowfin.html
+ The information and support mailing lists are based at
+ http://www.scyld.com/mailman/listinfo/
+*/
+
+/* These identify the driver base version and may not be removed. */
+static const char version1[] =
+"yellowfin.c:v1.10 7/22/2003 Written by Donald Becker <becker@scyld.com>\n";
+static const char version2[] =
+" http://www.scyld.com/network/yellowfin.html\n";
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+
+/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
+static int debug = 2;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ Typical is a 64 element hash table based on the Ethernet CRC. */
+static int multicast_filter_limit = 64;
+
+#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
+/* System-wide count of bogus-rx frames. */
+static int bogus_rx = 0;
+static int dma_ctrl = 0x004A0263; /* Constrained by errata */
+static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
+#elif YF_NEW /* A future perfect board :->. */
+static int dma_ctrl = 0x00CAC277; /* Override when loading module! */
+static int fifo_cfg = 0x0028;
+#else
+static int dma_ctrl = 0x004A0263; /* Constrained by errata */
+static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
+#endif
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature. */
+static int rx_copybreak = 0;
+
+/* Used to pass the media type, etc.
+ No media types are currently defined. These options exist only for
+ compatibility with other drivers.
+*/
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Do ugly workaround for GX server chipset errata. */
+static int gx_fix = 0;
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for efficiency.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority, confuses the system network buffer limits,
+ and wastes memory.
+ Too-large receive rings waste memory and confound network buffer limits.
+*/
+#define TX_RING_SIZE 16
+#define TX_QUEUE_SIZE 12 /* Must be > 4 && <= TX_RING_SIZE */
+#define RX_RING_SIZE 64
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (6*HZ)
+
+/* Allocation size of Rx buffers with normal sized Ethernet frames.
+ Do not change this value without good reason. This is not a limit,
+ but a way to keep a consistent allocation size among drivers.
+ */
+#define PKT_BUF_SZ 1536
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+#if !defined(__OPTIMIZE__)
+#warning You must compile this file with the correct options!
+#warning See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+#include <linux/config.h>
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#if LINUX_VERSION_CODE >= 0x20400
+#include <linux/slab.h>
+#else
+#include <linux/malloc.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/unaligned.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#ifdef INLINE_PCISCAN
+#include "k_compat.h"
+#else
+#include "pci-scan.h"
+#include "kern_compat.h"
+#endif
+
+/* Condensed operations for readability. */
+#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
+#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
+
+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+char kernel_version[] = UTS_RELEASE;
+#endif
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_PARM(debug, "i");
+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(multicast_filter_limit, "i");
+MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM(gx_fix, "i");
+MODULE_PARM_DESC(debug, "Driver message level enable (0-31)");
+MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
+MODULE_PARM_DESC(rx_copybreak,
+ "Breakpoint in bytes for copy-only-tiny-frames");
+MODULE_PARM_DESC(full_duplex,
+ "Non-zero to force full duplex, non-negotiated link "
+ "(deprecated).");
+MODULE_PARM_DESC(max_interrupt_work,
+ "Driver maximum events handled per interrupt");
+MODULE_PARM_DESC(multicast_filter_limit,
+ "Multicast addresses before switching to Rx-all-multicast");
+MODULE_PARM_DESC(gx_fix, "Set to work around old GX chipset errata");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the Packet Engines "Yellowfin" Gigabit
+Ethernet adapter. The only PCA currently supported is the G-NIC 64-bit
+PCI card.
+
+II. Board-specific settings
+
+PCI bus devices are configured by the system at boot time, so no jumpers
+need to be set on the board. The system BIOS preferably should assign the
+PCI INTA signal to an otherwise unused system IRQ line.
+Note: Kernel versions earlier than 1.3.73 do not support shared PCI
+interrupt lines.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
+This is a descriptor list scheme similar to that used by the EEPro100 and
+Tulip. This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
+
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the Yellowfin as receive data
+buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack and replaced by a newly allocated skbuff.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. For small frames the copying cost is negligible (esp. considering
+that we are pre-loading the cache with immediately useful header
+information). For large frames the copying cost is non-trivial, and the
+larger copy might flush the cache of useful data.
+
+IIIC. Synchronization
+
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and other software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'yp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
+clears both the tx_full and tbusy flags.
+
+IV. Notes
+
+Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
+Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
+and an AlphaStation to verifty the Alpha port!
+
+IVb. References
+
+Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
+Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
+ Data Manual v3.0
+http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
+http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
+
+IVc. Errata
+
+See Packet Engines confidential appendix (prototype chips only).
+*/
+
+
+
+static void *yellowfin_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int fnd_cnt);
+enum capability_flags {
+ HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
+ HasMACAddrBug=32, /* Only on early revs. */
+};
+/* The PCI I/O space extent. */
+#define YELLOWFIN_SIZE 0x100
+#ifdef USE_IO_OPS
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0)
+#else
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
+#endif
+
+static struct pci_id_info pci_id_tbl[] = {
+ {"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
+ PCI_IOTYPE, YELLOWFIN_SIZE,
+ FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug},
+ {"Symbios SYM83C885", { 0x07011000, 0xffffffff},
+ PCI_IOTYPE, YELLOWFIN_SIZE, HasMII },
+ {0,},
+};
+
+struct drv_id_info yellowfin_drv_id = {
+ "yellowfin", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
+ yellowfin_probe1, };
+
+/* Offsets to the Yellowfin registers. Various sizes and alignments. */
+enum yellowfin_offsets {
+ TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
+ TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
+ RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
+ RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
+ EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
+ ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94,
+ Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
+ MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
+ MII_Status=0xAE,
+ RxDepth=0xB8, FlowCtrl=0xBC,
+ AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
+ EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
+ EEFeature=0xF5,
+};
+
+/* The Yellowfin Rx and Tx buffer descriptors.
+ Elements are written as 32 bit for endian portability. */
+struct yellowfin_desc {
+ u32 dbdma_cmd;
+ u32 addr;
+ u32 branch_addr;
+ u32 result_status;
+};
+
+struct tx_status_words {
+#if defined(__powerpc__)
+ u16 tx_errs;
+ u16 tx_cnt;
+ u16 paused;
+ u16 total_tx_cnt;
+#else /* Little endian chips. */
+ u16 tx_cnt;
+ u16 tx_errs;
+ u16 total_tx_cnt;
+ u16 paused;
+#endif
+};
+
+/* Bits in yellowfin_desc.cmd */
+enum desc_cmd_bits {
+ CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000,
+ CMD_NOP=0x60000000, CMD_STOP=0x70000000,
+ BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000,
+ BRANCH_IFTRUE=0x040000,
+};
+
+/* Bits in yellowfin_desc.status */
+enum desc_status_bits { RX_EOP=0x0040, };
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+ IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
+ IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
+ IntrEarlyRx=0x100, IntrWakeup=0x200, };
+
+#define PRIV_ALIGN 31 /* Required alignment mask */
+struct yellowfin_private {
+ /* Descriptor rings first for alignment.
+ Tx requires a second descriptor for status. */
+ struct yellowfin_desc rx_ring[RX_RING_SIZE];
+ struct yellowfin_desc tx_ring[TX_RING_SIZE*2];
+ struct net_device *next_module;
+ void *priv_addr; /* Unaligned address for kfree */
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ /* The saved address of a sent-in-place packet/buffer, for later free(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ struct tx_status_words tx_status[TX_RING_SIZE];
+ struct timer_list timer; /* Media selection timer. */
+ struct net_device_stats stats;
+ /* Frequently used and paired value: keep adjacent for cache effect. */
+ int msg_level;
+ int chip_id, drv_flags;
+ struct pci_dev *pci_dev;
+ long in_interrupt;
+ int max_interrupt_work;
+
+ struct yellowfin_desc *rx_head_desc;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ int rx_copybreak;
+
+ struct tx_status_words *tx_tail_desc;
+ unsigned int cur_tx, dirty_tx;
+ int tx_threshold;
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int duplex_lock:1;
+ unsigned int medialock:1; /* Do not sense media. */
+ unsigned int default_port; /* Last dev->if_port value. */
+ /* MII transceiver section. */
+ int mii_cnt; /* MII device addresses. */
+ u16 advertising; /* NWay media advertisement */
+ unsigned char phys[2]; /* MII device addresses. */
+ /* Rx multicast filter. */
+ u16 mc_filter[4];
+ int rx_mode;
+ int multicast_filter_limit;
+};
+
+static int read_eeprom(long ioaddr, int location);
+static int mdio_read(long ioaddr, int phy_id, int location);
+static void mdio_write(long ioaddr, int phy_id, int location, int value);
+#ifdef HAVE_PRIVATE_IOCTL
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+#endif
+static int yellowfin_open(struct net_device *dev);
+static void yellowfin_timer(unsigned long data);
+static void yellowfin_tx_timeout(struct net_device *dev);
+static void yellowfin_init_ring(struct net_device *dev);
+static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void yellowfin_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static int yellowfin_rx(struct net_device *dev);
+static void yellowfin_error(struct net_device *dev, int intr_status);
+static int yellowfin_close(struct net_device *dev);
+static struct net_device_stats *yellowfin_get_stats(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
+
+
+
+/* A list of installed Yellowfin devices, for removing the driver module. */
+static struct net_device *root_yellowfin_dev = NULL;
+
+#ifndef MODULE
+int yellowfin_probe(struct net_device *dev)
+{
+ if (pci_drv_register(&yellowfin_drv_id, dev) < 0)
+ return -ENODEV;
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return 0;
+}
+#endif
+
+static void *yellowfin_probe1(struct pci_dev *pdev, void *init_dev,
+ long ioaddr, int irq, int chip_idx, int find_cnt)
+{
+ struct net_device *dev;
+ struct yellowfin_private *np;
+ void *priv_mem;
+ int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
+ int drv_flags = pci_id_tbl[chip_idx].drv_flags;
+
+ dev = init_etherdev(init_dev, 0);
+ if (!dev)
+ return NULL;
+
+ printk(KERN_INFO "%s: %s type %8x at 0x%lx, ",
+ dev->name, pci_id_tbl[chip_idx].name, (int)inl(ioaddr + ChipRev),
+ ioaddr);
+
+ if (drv_flags & IsGigabit)
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = inb(ioaddr + StnAddr + i);
+ else {
+ int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
+ }
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+
+ /* Reset the chip. */
+ outl(0x80000000, ioaddr + DMACtrl);
+
+ /* Make certain elements e.g. descriptor lists are aligned. */
+ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
+ /* Check for the very unlikely case of no memory. */
+ if (priv_mem == NULL)
+ return NULL;
+
+ /* We do a request_region() only to register /proc/ioports info. */
+ request_region(ioaddr, pci_id_tbl[chip_idx].io_size, dev->name);
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+ memset(np, 0, sizeof(*np));
+ np->priv_addr = priv_mem;
+
+ np->next_module = root_yellowfin_dev;
+ root_yellowfin_dev = dev;
+
+ np->pci_dev = pdev;
+ np->chip_id = chip_idx;
+ np->drv_flags = drv_flags;
+ np->msg_level = (1 << debug) - 1;
+ np->rx_copybreak = rx_copybreak;
+ np->max_interrupt_work = max_interrupt_work;
+ np->multicast_filter_limit = multicast_filter_limit;
+
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ /* The lower four bits are the media type. */
+ if (option > 0) {
+ if (option & 0x220)
+ np->full_duplex = 1;
+ np->default_port = option & 15;
+ if (np->default_port)
+ np->medialock = 1;
+ }
+ if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
+ np->full_duplex = 1;
+
+ if (np->full_duplex)
+ np->duplex_lock = 1;
+
+ /* The Yellowfin-specific entries in the device structure. */
+ dev->open = &yellowfin_open;
+ dev->hard_start_xmit = &yellowfin_start_xmit;
+ dev->stop = &yellowfin_close;
+ dev->get_stats = &yellowfin_get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &mii_ioctl;
+
+ if (np->drv_flags & HasMII) {
+ int phy, phy_idx = 0;
+ for (phy = 0; phy < 32 && phy_idx < 4; phy++) {
+ int mii_status = mdio_read(ioaddr, phy, 1);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ np->phys[phy_idx++] = phy;
+ np->advertising = mdio_read(ioaddr, phy, 4);
+ printk(KERN_INFO "%s: MII PHY found at address %d, status "
+ "0x%4.4x advertising %4.4x.\n",
+ dev->name, phy, mii_status, np->advertising);
+ }
+ }
+ np->mii_cnt = phy_idx;
+ }
+
+ return dev;
+}
+
+static int read_eeprom(long ioaddr, int location)
+{
+ int bogus_cnt = 10000; /* Typical 33Mhz: 1050 ticks */
+
+ outb(location, ioaddr + EEAddr);
+ outb(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
+ while ((inb(ioaddr + EEStatus) & 0x80) && --bogus_cnt > 0)
+ ;
+ return inb(ioaddr + EERead);
+}
+
+/* MII Managemen Data I/O accesses.
+ These routines assume the MDIO controller is idle, and do not exit until
+ the command is finished. */
+
+static int mdio_read(long ioaddr, int phy_id, int location)
+{
+ int i;
+
+ outw((phy_id<<8) + location, ioaddr + MII_Addr);
+ outw(1, ioaddr + MII_Cmd);
+ for (i = 10000; i >= 0; i--)
+ if ((inw(ioaddr + MII_Status) & 1) == 0)
+ break;
+ return inw(ioaddr + MII_Rd_Data);
+}
+
+static void mdio_write(long ioaddr, int phy_id, int location, int value)
+{
+ int i;
+
+ outw((phy_id<<8) + location, ioaddr + MII_Addr);
+ outw(value, ioaddr + MII_Wr_Data);
+
+ /* Wait for the command to finish. */
+ for (i = 10000; i >= 0; i--)
+ if ((inw(ioaddr + MII_Status) & 1) == 0)
+ break;
+ return;
+}
+
+
+static int yellowfin_open(struct net_device *dev)
+{
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+
+ /* Reset the chip. */
+ outl(0x80000000, ioaddr + DMACtrl);
+
+ MOD_INC_USE_COUNT;
+
+ if (request_irq(dev->irq, &yellowfin_interrupt, SA_SHIRQ, dev->name,
+ dev)) {
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+
+ if (yp->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n",
+ dev->name, dev->irq);
+
+ yellowfin_init_ring(dev);
+
+ outl(virt_to_bus(yp->rx_ring), ioaddr + RxPtr);
+ outl(virt_to_bus(yp->tx_ring), ioaddr + TxPtr);
+
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + StnAddr + i);
+
+ /* Set up various condition 'select' registers.
+ There are no options here. */
+ outl(0x00800080, ioaddr + TxIntrSel); /* Interrupt on Tx abort */
+ outl(0x00800080, ioaddr + TxBranchSel); /* Branch on Tx abort */
+ outl(0x00400040, ioaddr + TxWaitSel); /* Wait on Tx status */
+ outl(0x00400040, ioaddr + RxIntrSel); /* Interrupt on Rx done */
+ outl(0x00400040, ioaddr + RxBranchSel); /* Branch on Rx error */
+ outl(0x00400040, ioaddr + RxWaitSel); /* Wait on Rx done */
+
+ /* Initialize other registers: with so many this eventually this will
+ converted to an offset/value list. */
+ outl(dma_ctrl, ioaddr + DMACtrl);
+ outw(fifo_cfg, ioaddr + FIFOcfg);
+ /* Enable automatic generation of flow control frames, period 0xffff. */
+ outl(0x0030FFFF, ioaddr + FlowCtrl);
+
+ yp->tx_threshold = 32;
+ outl(yp->tx_threshold, ioaddr + TxThreshold);
+
+ if (dev->if_port == 0)
+ dev->if_port = yp->default_port;
+
+ yp->in_interrupt = 0;
+
+ /* Setting the Rx mode will start the Rx process. */
+ if (yp->drv_flags & IsGigabit) {
+ /* We are always in full-duplex mode with gigabit! */
+ yp->full_duplex = 1;
+ outw(0x01CF, ioaddr + Cnfg);
+ } else {
+ outw(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
+ outw(0x1018, ioaddr + FrameGap1);
+ outw(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
+ }
+ yp->rx_mode = 0;
+ set_rx_mode(dev);
+ netif_start_tx_queue(dev);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ outw(0x81ff, ioaddr + IntrEnb); /* See enum intr_status_bits */
+ outw(0x0000, ioaddr + EventStatus); /* Clear non-interrupting events */
+ outl(0x80008000, ioaddr + RxCtrl); /* Start Rx and Tx channels. */
+ outl(0x80008000, ioaddr + TxCtrl);
+
+ if (yp->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: Done yellowfin_open().\n",
+ dev->name);
+
+ /* Set the timer to check for link beat. */
+ init_timer(&yp->timer);
+ yp->timer.expires = jiffies + 3*HZ;
+ yp->timer.data = (unsigned long)dev;
+ yp->timer.function = &yellowfin_timer; /* timer handler */
+ add_timer(&yp->timer);
+
+ return 0;
+}
+
+static void yellowfin_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 60*HZ;
+
+ if (yp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: Yellowfin timer tick, status %8.8x.\n",
+ dev->name, inw(ioaddr + IntrStatus));
+
+ if (jiffies - dev->trans_start > TX_TIMEOUT
+ && yp->cur_tx - yp->dirty_tx > 1
+ && netif_queue_paused(dev))
+ yellowfin_tx_timeout(dev);
+
+ if (yp->mii_cnt) {
+ int mii_reg1 = mdio_read(ioaddr, yp->phys[0], 1);
+ int mii_reg5 = mdio_read(ioaddr, yp->phys[0], 5);
+ int negotiated = mii_reg5 & yp->advertising;
+ if (yp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: MII #%d status register is %4.4x, "
+ "link partner capability %4.4x.\n",
+ dev->name, yp->phys[0], mii_reg1, mii_reg5);
+
+ if ( ! yp->duplex_lock &&
+ ((negotiated & 0x0300) == 0x0100
+ || (negotiated & 0x00C0) == 0x0040)) {
+ yp->full_duplex = 1;
+ }
+ outw(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
+
+ if (mii_reg1 & 0x0004)
+ next_tick = 60*HZ;
+ else
+ next_tick = 3*HZ;
+ }
+
+ yp->timer.expires = jiffies + next_tick;
+ add_timer(&yp->timer);
+}
+
+static void yellowfin_tx_timeout(struct net_device *dev)
+{
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ printk(KERN_WARNING "%s: Yellowfin transmit timed out at %d/%d Tx "
+ "status %4.4x, Rx status %4.4x, resetting...\n",
+ dev->name, yp->cur_tx, yp->dirty_tx,
+ (int)inl(ioaddr + TxStatus), (int)inl(ioaddr + RxStatus));
+
+ /* Note: these should be KERN_DEBUG. */
+ if (yp->msg_level & NETIF_MSG_TX_ERR) {
+ int i;
+ printk(KERN_DEBUG " Rx ring %p: ", yp->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk(" %8.8x", yp->rx_ring[i].result_status);
+ printk("\n"KERN_DEBUG" Tx ring %p: ", yp->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(" %4.4x /%8.8x", yp->tx_status[i].tx_errs,
+ yp->tx_ring[i].result_status);
+ printk("\n");
+ }
+
+ /* If the hardware is found to hang regularly, we will update the code
+ to reinitialize the chip here. */
+ dev->if_port = 0;
+
+ /* Wake the potentially-idle transmit channel. */
+ outl(0x10001000, dev->base_addr + TxCtrl);
+ if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
+ netif_unpause_tx_queue(dev);
+
+ dev->trans_start = jiffies;
+ yp->stats.tx_errors++;
+ return;
+}
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void yellowfin_init_ring(struct net_device *dev)
+{
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+ int i;
+
+ yp->tx_full = 0;
+ yp->cur_rx = yp->cur_tx = 0;
+ yp->dirty_tx = 0;
+
+ yp->rx_buf_sz = dev->mtu + 18 + 15;
+ /* Match other driver's allocation size when possible. */
+ if (yp->rx_buf_sz < PKT_BUF_SZ)
+ yp->rx_buf_sz = PKT_BUF_SZ;
+ yp->rx_head_desc = &yp->rx_ring[0];
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ yp->rx_ring[i].dbdma_cmd =
+ cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
+ yp->rx_ring[i].branch_addr = virt_to_le32desc(&yp->rx_ring[i+1]);
+ }
+ /* Mark the last entry as wrapping the ring. */
+ yp->rx_ring[i-1].branch_addr = virt_to_le32desc(&yp->rx_ring[0]);
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
+ yp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* 16 byte align the IP header. */
+ yp->rx_ring[i].addr = virt_to_le32desc(skb->tail);
+ }
+ yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+#define NO_TXSTATS
+#ifdef NO_TXSTATS
+ /* In this mode the Tx ring needs only a single descriptor. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ yp->tx_skbuff[i] = 0;
+ yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ yp->tx_ring[i].branch_addr = virt_to_le32desc(&yp->tx_ring[i+1]);
+ }
+ /* Wrap ring */
+ yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
+ yp->tx_ring[i].branch_addr = virt_to_le32desc(&yp->tx_ring[0]);
+#else
+ /* Tx ring needs a pair of descriptors, the second for the status. */
+ for (i = 0; i < TX_RING_SIZE*2; i++) {
+ yp->tx_skbuff[i/2] = 0;
+ /* Branch on Tx error. */
+ yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ yp->tx_ring[i].branch_addr = virt_to_le32desc(&yp->tx_ring[i+1]);
+ i++;
+ if (yp->flags & FullTxStatus) {
+ yp->tx_ring[i].dbdma_cmd =
+ cpu_to_le32(CMD_TXSTATUS | sizeof(yp->tx_status[i]));
+ yp->tx_ring[i].request_cnt = sizeof(yp->tx_status[i]);
+ yp->tx_ring[i].addr = virt_to_le32desc(&yp->tx_status[i/2]);
+ } else { /* Symbios chips write only tx_errs word. */
+ yp->tx_ring[i].dbdma_cmd =
+ cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2);
+ yp->tx_ring[i].request_cnt = 2;
+ yp->tx_ring[i].addr = virt_to_le32desc(&yp->tx_status[i/2].tx_errs);
+ }
+ yp->tx_ring[i].branch_addr = virt_to_le32desc(&yp->tx_ring[i+1]);
+ }
+ /* Wrap ring */
+ yp->tx_ring[--i].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
+ yp->tx_ring[i].branch_addr = virt_to_le32desc(&yp->tx_ring[0]);
+#endif
+ yp->tx_tail_desc = &yp->tx_status[0];
+ return;
+}
+
+static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+ unsigned entry;
+
+#if LINUX_VERSION_CODE < 0x20323
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+ if (netif_pause_tx_queue(dev) != 0) {
+ /* This watchdog code is redundant with the media monitor timer. */
+ if (jiffies - dev->trans_start > TX_TIMEOUT)
+ yellowfin_tx_timeout(dev);
+ return 1;
+ }
+#endif
+
+ /* Note: Ordering is important here, set the field with the
+ "ownership" bit last, and only then increment cur_tx. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = yp->cur_tx % TX_RING_SIZE;
+
+ yp->tx_skbuff[entry] = skb;
+
+ if (gx_fix) { /* Note: only works for paddable protocols e.g. IP. */
+ int cacheline_end = (virt_to_bus(skb->data) + skb->len) % 32;
+ /* Fix GX chipset errata. */
+ if (cacheline_end > 24 || cacheline_end == 0)
+ skb->len += 32 - cacheline_end + 1;
+ }
+#ifdef NO_TXSTATS
+ yp->tx_ring[entry].addr = virt_to_le32desc(skb->data);
+ yp->tx_ring[entry].result_status = 0;
+ if (entry >= TX_RING_SIZE-1) {
+ /* New stop command. */
+ yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
+ cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | skb->len);
+ } else {
+ yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ yp->tx_ring[entry].dbdma_cmd =
+ cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | skb->len);
+ }
+ yp->cur_tx++;
+#else
+ yp->tx_ring[entry<<1].request_cnt = skb->len;
+ yp->tx_ring[entry<<1].addr = virt_to_le32desc(skb->data);
+ /* The input_last (status-write) command is constant, but we must rewrite
+ the subsequent 'stop' command. */
+
+ yp->cur_tx++;
+ {
+ unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
+ yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ }
+ /* Final step -- overwrite the old 'stop' command. */
+
+ yp->tx_ring[entry<<1].dbdma_cmd =
+ cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE :
+ CMD_TX_PKT | BRANCH_IFTRUE) | skb->len);
+#endif
+
+ /* Non-x86 Todo: explicitly flush cache lines here. */
+
+ /* Wake the potentially-idle transmit channel. */
+ outl(0x10001000, dev->base_addr + TxCtrl);
+
+ if (yp->cur_tx - yp->dirty_tx >= TX_QUEUE_SIZE) {
+ netif_stop_tx_queue(dev);
+ yp->tx_full = 1;
+ if (yp->cur_tx - (volatile int)yp->dirty_tx < TX_QUEUE_SIZE) {
+ netif_unpause_tx_queue(dev);
+ yp->tx_full = 0;
+ } else
+ netif_stop_tx_queue(dev);
+ } else
+ netif_unpause_tx_queue(dev); /* Typical path */
+ dev->trans_start = jiffies;
+
+ if (yp->msg_level & NETIF_MSG_TX_QUEUED) {
+ printk(KERN_DEBUG "%s: Yellowfin transmit frame #%d queued in slot %d.\n",
+ dev->name, yp->cur_tx, entry);
+ }
+ return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+static void yellowfin_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct yellowfin_private *yp;
+ long ioaddr;
+ int boguscnt = max_interrupt_work;
+
+#ifndef final_version /* Can never occur. */
+ if (dev == NULL) {
+ printk (KERN_ERR "yellowfin_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
+#endif
+
+ ioaddr = dev->base_addr;
+ yp = (struct yellowfin_private *)dev->priv;
+ if (test_and_set_bit(0, (void*)&yp->in_interrupt)) {
+ printk(KERN_ERR "%s: Re-entering the interrupt handler.\n", dev->name);
+ return;
+ }
+
+ do {
+ u16 intr_status = inw(ioaddr + IntrClear);
+
+ if (yp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: Yellowfin interrupt, status %4.4x.\n",
+ dev->name, intr_status);
+
+ if (intr_status == 0)
+ break;
+
+ if (intr_status & (IntrRxDone | IntrEarlyRx)) {
+ yellowfin_rx(dev);
+ outl(0x10001000, ioaddr + RxCtrl); /* Wake Rx engine. */
+ }
+
+#ifdef NO_TXSTATS
+ for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
+ int entry = yp->dirty_tx % TX_RING_SIZE;
+ if (yp->tx_ring[entry].result_status == 0)
+ break;
+ yp->stats.tx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+ yp->stats.tx_bytes += yp->tx_skbuff[entry]->len;
+#endif
+ /* Free the original skb. */
+ dev_free_skb_irq(yp->tx_skbuff[entry]);
+ yp->tx_skbuff[entry] = 0;
+ }
+ if (yp->tx_full
+ && yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
+ /* The ring is no longer full, clear tbusy. */
+ yp->tx_full = 0;
+ netif_resume_tx_queue(dev);
+ }
+#else
+ if (intr_status & IntrTxDone
+ || yp->tx_tail_desc->tx_errs) {
+ unsigned dirty_tx = yp->dirty_tx;
+
+ for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
+ dirty_tx++) {
+ /* Todo: optimize this. */
+ int entry = dirty_tx % TX_RING_SIZE;
+ u16 tx_errs = yp->tx_status[entry].tx_errs;
+
+#ifndef final_version
+ if (yp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: Tx queue %d check, Tx status "
+ "%4.4x %4.4x %4.4x %4.4x.\n",
+ dev->name, entry,
+ yp->tx_status[entry].tx_cnt,
+ yp->tx_status[entry].tx_errs,
+ yp->tx_status[entry].total_tx_cnt,
+ yp->tx_status[entry].paused);
+#endif
+ if (tx_errs == 0)
+ break; /* It still hasn't been Txed */
+ if (tx_errs & 0xF810) {
+ /* There was an major error, log it. */
+#ifndef final_version
+ if (yp->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %4.4x.\n",
+ dev->name, tx_errs);
+#endif
+ yp->stats.tx_errors++;
+ if (tx_errs & 0xF800) yp->stats.tx_aborted_errors++;
+ if (tx_errs & 0x0800) yp->stats.tx_carrier_errors++;
+ if (tx_errs & 0x2000) yp->stats.tx_window_errors++;
+ if (tx_errs & 0x8000) yp->stats.tx_fifo_errors++;
+#ifdef ETHER_STATS
+ if (tx_errs & 0x1000) yp->stats.collisions16++;
+#endif
+ } else {
+#ifndef final_version
+ if (yp->msg_level & NETIF_MSG_TX_DONE)
+ printk(KERN_DEBUG "%s: Normal transmit, Tx status %4.4x.\n",
+ dev->name, tx_errs);
+#endif
+#ifdef ETHER_STATS
+ if (tx_errs & 0x0400) yp->stats.tx_deferred++;
+#endif
+#if LINUX_VERSION_CODE > 0x20127
+ yp->stats.tx_bytes += yp->tx_skbuff[entry]->len;
+#endif
+ yp->stats.collisions += tx_errs & 15;
+ yp->stats.tx_packets++;
+ }
+ /* Free the original skb. */
+ dev_free_skb_irq(yp->tx_skbuff[entry]);
+ yp->tx_skbuff[entry] = 0;
+ /* Mark status as empty. */
+ yp->tx_status[entry].tx_errs = 0;
+ }
+
+#ifndef final_version
+ if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
+ printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+ dev->name, dirty_tx, yp->cur_tx, yp->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ if (yp->tx_full
+ && yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
+ /* The ring is no longer full, clear tbusy. */
+ yp->tx_full = 0;
+ netif_resume_tx_queue(dev);
+ }
+
+ yp->dirty_tx = dirty_tx;
+ yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
+ }
+#endif
+
+ /* Log errors and other uncommon events. */
+ if (intr_status & 0x2ee) /* Abnormal error summary. */
+ yellowfin_error(dev, intr_status);
+
+ if (--boguscnt < 0) {
+ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "status=0x%4.4x.\n",
+ dev->name, intr_status);
+ break;
+ }
+ } while (1);
+
+ if (yp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, inw(ioaddr + IntrStatus));
+
+ clear_bit(0, (void*)&yp->in_interrupt);
+ return;
+}
+
+/* This routine is logically part of the interrupt handler, but separated
+ for clarity and better register allocation. */
+static int yellowfin_rx(struct net_device *dev)
+{
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+ int entry = yp->cur_rx % RX_RING_SIZE;
+ int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
+
+ if (yp->msg_level & NETIF_MSG_RX_STATUS) {
+ printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %8.8x.\n",
+ entry, yp->rx_ring[entry].result_status);
+ printk(KERN_DEBUG " #%d desc. %8.8x %8.8x %8.8x.\n",
+ entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
+ yp->rx_ring[entry].result_status);
+ }
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while (yp->rx_head_desc->result_status) {
+ struct yellowfin_desc *desc = yp->rx_head_desc;
+ u16 desc_status = le32_to_cpu(desc->result_status) >> 16;
+ int data_size =
+ (le32_to_cpu(desc->dbdma_cmd) - le32_to_cpu(desc->result_status))
+ & 0xffff;
+ u8 *buf_addr = le32desc_to_virt(desc->addr);
+ s16 frame_status = get_unaligned((s16*)&(buf_addr[data_size - 2]));
+
+ if (yp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " yellowfin_rx() status was %4.4x.\n",
+ frame_status);
+ if (--boguscnt < 0)
+ break;
+ if ( ! (desc_status & RX_EOP)) {
+ printk(KERN_WARNING "%s: Oversized Ethernet frame spanned multiple buffers,"
+ " status %4.4x!\n", dev->name, desc_status);
+ yp->stats.rx_length_errors++;
+ } else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) {
+ /* There was a error. */
+ if (yp->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG " yellowfin_rx() Rx error was %4.4x.\n",
+ frame_status);
+ yp->stats.rx_errors++;
+ if (frame_status & 0x0060) yp->stats.rx_length_errors++;
+ if (frame_status & 0x0008) yp->stats.rx_frame_errors++;
+ if (frame_status & 0x0010) yp->stats.rx_crc_errors++;
+ if (frame_status < 0) yp->stats.rx_dropped++;
+ } else if ( !(yp->drv_flags & IsGigabit) &&
+ ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
+ u8 status1 = buf_addr[data_size-2];
+ u8 status2 = buf_addr[data_size-1];
+ yp->stats.rx_errors++;
+ if (status1 & 0xC0) yp->stats.rx_length_errors++;
+ if (status2 & 0x03) yp->stats.rx_frame_errors++;
+ if (status2 & 0x04) yp->stats.rx_crc_errors++;
+ if (status2 & 0x80) yp->stats.rx_dropped++;
+#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
+ } else if ((yp->flags & HasMACAddrBug) &&
+ memcmp(le32desc_to_virt(yp->rx_ring[entry].addr),
+ dev->dev_addr, 6) != 0
+ && memcmp(le32desc_to_virt(yp->rx_ring[entry].addr),
+ "\377\377\377\377\377\377", 6) != 0) {
+ if (bogus_rx++ == 0)
+ printk(KERN_WARNING "%s: Bad frame to %2.2x:%2.2x:%2.2x:%2.2x:"
+ "%2.2x:%2.2x.\n",
+ dev->name, buf_addr[0], buf_addr[1], buf_addr[2],
+ buf_addr[3], buf_addr[4], buf_addr[5]);
+#endif
+ } else {
+ struct sk_buff *skb;
+ int pkt_len = data_size -
+ (yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
+ /* To verify: Yellowfin Length should omit the CRC! */
+
+#ifndef final_version
+ if (yp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " yellowfin_rx() normal Rx pkt length %d"
+ " of %d, bogus_cnt %d.\n",
+ pkt_len, data_size, boguscnt);
+#endif
+ /* Check if the packet is long enough to just pass up the skbuff
+ without copying to a properly sized skbuff. */
+ if (pkt_len > yp->rx_copybreak) {
+ char *temp = skb_put(skb = yp->rx_skbuff[entry], pkt_len);
+ yp->rx_skbuff[entry] = NULL;
+#ifndef final_version /* Remove after testing. */
+ if (le32desc_to_virt(yp->rx_ring[entry].addr) != temp)
+ printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
+ "do not match in yellowfin_rx: %p vs. %p / %p.\n",
+ dev->name,
+ le32desc_to_virt(yp->rx_ring[entry].addr),
+ skb->head, temp);
+#endif
+ } else {
+ skb = dev_alloc_skb(pkt_len + 2);
+ if (skb == NULL)
+ break;
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+#if HAS_IP_COPYSUM
+ eth_copy_and_sum(skb, yp->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+#else
+ memcpy(skb_put(skb, pkt_len), yp->rx_skbuff[entry]->tail,
+ pkt_len);
+#endif
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ yp->stats.rx_packets++;
+#if LINUX_VERSION_CODE > 0x20127
+ yp->stats.rx_bytes += pkt_len;
+#endif
+ }
+ entry = (++yp->cur_rx) % RX_RING_SIZE;
+ yp->rx_head_desc = &yp->rx_ring[entry];
+ }
+
+ /* Refill the Rx ring buffers. */
+ for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
+ entry = yp->dirty_rx % RX_RING_SIZE;
+ if (yp->rx_skbuff[entry] == NULL) {
+ struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
+ yp->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ yp->rx_ring[entry].addr = virt_to_le32desc(skb->tail);
+ }
+ yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */
+ if (entry != 0)
+ yp->rx_ring[entry - 1].dbdma_cmd =
+ cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
+ else
+ yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
+ cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS
+ | yp->rx_buf_sz);
+ }
+
+ return 0;
+}
+
+static void yellowfin_error(struct net_device *dev, int intr_status)
+{
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+
+ printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
+ dev->name, intr_status);
+ /* Hmmmmm, it's not clear what to do here. */
+ if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
+ yp->stats.tx_errors++;
+ if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
+ yp->stats.rx_errors++;
+}
+
+static int yellowfin_close(struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+ int i;
+
+ netif_stop_tx_queue(dev);
+
+ if (yp->msg_level & NETIF_MSG_IFDOWN) {
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x "
+ "Rx %4.4x Int %2.2x.\n",
+ dev->name, inw(ioaddr + TxStatus),
+ inw(ioaddr + RxStatus), inw(ioaddr + IntrStatus));
+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
+ dev->name, yp->cur_tx, yp->dirty_tx, yp->cur_rx, yp->dirty_rx);
+ }
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ outw(0x0000, ioaddr + IntrEnb);
+
+ /* Stop the chip's Tx and Rx processes. */
+ outl(0x80000000, ioaddr + RxCtrl);
+ outl(0x80000000, ioaddr + TxCtrl);
+
+ del_timer(&yp->timer);
+
+#if defined(__i386__)
+ if (yp->msg_level & NETIF_MSG_IFDOWN) {
+ printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
+ (int)virt_to_bus(yp->tx_ring));
+ for (i = 0; i < TX_RING_SIZE*2; i++)
+ printk(" %c #%d desc. %8.8x %8.8x %8.8x %8.8x.\n",
+ inl(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
+ i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
+ yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
+ printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(KERN_DEBUG " #%d status %4.4x %4.4x %4.4x %4.4x.\n",
+ i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
+ yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
+
+ printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
+ (int)virt_to_bus(yp->rx_ring));
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x %8.8x\n",
+ inl(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
+ i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
+ yp->rx_ring[i].result_status);
+ if (yp->msg_level & NETIF_MSG_PKTDATA) {
+ if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
+ int j;
+ for (j = 0; j < 0x50; j++)
+ printk(" %4.4x",
+ get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
+ printk("\n");
+ }
+ }
+ }
+ }
+#endif /* __i386__ debugging only */
+
+ free_irq(dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ yp->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
+ if (yp->rx_skbuff[i]) {
+#if LINUX_VERSION_CODE < 0x20100
+ yp->rx_skbuff[i]->free = 1;
+#endif
+ dev_free_skb(yp->rx_skbuff[i]);
+ }
+ yp->rx_skbuff[i] = 0;
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (yp->tx_skbuff[i])
+ dev_free_skb(yp->tx_skbuff[i]);
+ yp->tx_skbuff[i] = 0;
+ }
+
+#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
+ if (yp->msg_level & NETIF_MSG_IFDOWN) {
+ printk(KERN_DEBUG "%s: Received %d frames that we should not have.\n",
+ dev->name, bogus_rx);
+ }
+#endif
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+static struct net_device_stats *yellowfin_get_stats(struct net_device *dev)
+{
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+ return &yp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor. */
+
+/* The little-endian AUTODIN32 ethernet CRC calculation.
+ N.B. Do not use for bulk data, use a table-based routine instead.
+ This is common code and should be moved to net/core/crc.c */
+static unsigned const ethernet_polynomial_le = 0xedb88320U;
+
+static inline unsigned ether_crc_le(int length, unsigned char *data)
+{
+ unsigned int crc = 0xffffffff; /* Initial value. */
+ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 8; --bit >= 0; current_octet >>= 1) {
+ if ((crc ^ current_octet) & 1) {
+ crc >>= 1;
+ crc ^= ethernet_polynomial_le;
+ } else
+ crc >>= 1;
+ }
+ }
+ return crc;
+}
+
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+ u16 hash_table[4] = {0, 0, 0, 0};
+ int mc_change = 0;
+ int new_rx_mode, i;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+ new_rx_mode = 0x000F;
+ } else if (dev->mc_count > yp->multicast_filter_limit
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter well, or accept all multicasts. */
+ new_rx_mode = 0x000B;
+ } else if (dev->mc_count > 0) { /* Must use the multicast hash table. */
+ struct dev_mc_list *mclist;
+
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ /* Due to a bug in the early chip versions, multiple filter
+ slots must be set for each address. */
+ if (yp->drv_flags & HasMulticastBug) {
+ set_bit((ether_crc_le(3, mclist->dmi_addr) >> 3) & 0x3f,
+ hash_table);
+ set_bit((ether_crc_le(4, mclist->dmi_addr) >> 3) & 0x3f,
+ hash_table);
+ set_bit((ether_crc_le(5, mclist->dmi_addr) >> 3) & 0x3f,
+ hash_table);
+ }
+ set_bit((ether_crc_le(6, mclist->dmi_addr) >> 3) & 0x3f,
+ hash_table);
+ }
+ if (memcmp(hash_table, yp->mc_filter, sizeof hash_table) != 0)
+ mc_change = 1;
+ new_rx_mode = 0x0003;
+ } else { /* Normal, unicast/broadcast-only mode. */
+ new_rx_mode = 0x0001;
+ }
+
+ /* Stop the Rx process to change any value. */
+ if (yp->rx_mode != new_rx_mode || mc_change) {
+ long ioaddr = dev->base_addr;
+ u16 cfg_value = inw(ioaddr + Cnfg);
+
+ outw(cfg_value & ~0x1000, ioaddr + Cnfg);
+
+ yp->rx_mode = new_rx_mode;
+ outw(new_rx_mode, ioaddr + AddrMode);
+ memcpy(yp->mc_filter, hash_table, sizeof hash_table);
+ /* Copy the hash table to the chip. */
+ for (i = 0; i < 4; i++)
+ outw(hash_table[i], ioaddr + HashTbl + i*2);
+
+ /* Restart the Rx process. */
+ outw(cfg_value | 0x1000, ioaddr + Cnfg);
+ }
+}
+
+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct yellowfin_private *np = (void *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u16 *data = (u16 *)&rq->ifr_data;
+ u32 *data32 = (void *)&rq->ifr_data;
+
+ switch(cmd) {
+ case 0x8947: case 0x89F0:
+ /* SIOCGMIIPHY: Get the address of the PHY in use. */
+ data[0] = np->phys[0] & 0x1f;
+ /* Fall Through */
+ case 0x8948: case 0x89F1:
+ /* SIOCGMIIREG: Read the specified MII register. */
+ data[3] = mdio_read(ioaddr, data[0] & 0x1f, data[1] & 0x1f);
+ return 0;
+ case 0x8949: case 0x89F2:
+ /* SIOCSMIIREG: Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (data[0] == np->phys[0]) {
+ u16 value = data[2];
+ switch (data[1]) {
+ case 0:
+ /* Check for autonegotiation on or reset. */
+ np->medialock = (value & 0x9000) ? 0 : 1;
+ if (np->medialock)
+ np->full_duplex = (value & 0x0100) ? 1 : 0;
+ break;
+ case 4: np->advertising = value; break;
+ }
+ /* Perhaps check_duplex(dev), depending on chip semantics. */
+ }
+ mdio_write(ioaddr, data[0] & 0x1f, data[1] & 0x1f, data[2]);
+ return 0;
+ case SIOCGPARAMS:
+ data32[0] = np->msg_level;
+ data32[1] = np->multicast_filter_limit;
+ data32[2] = np->max_interrupt_work;
+ data32[3] = np->rx_copybreak;
+ return 0;
+ case SIOCSPARAMS:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ np->msg_level = data32[0];
+ np->multicast_filter_limit = data32[1];
+ np->max_interrupt_work = data32[2];
+ np->rx_copybreak = data32[3];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+
+#ifdef MODULE
+int init_module(void)
+{
+ /* Emit version even if no cards detected. */
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return pci_drv_register(&yellowfin_drv_id, NULL);
+}
+
+void cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+ pci_drv_unregister(&yellowfin_drv_id);
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_yellowfin_dev) {
+ struct yellowfin_private *np = (void *)(root_yellowfin_dev->priv);
+ unregister_netdev(root_yellowfin_dev);
+#ifdef USE_IO_OPS
+ release_region(root_yellowfin_dev->base_addr,
+ pci_id_tbl[np->chip_id].io_size);
+#else
+ iounmap((char *)root_yellowfin_dev->base_addr);
+#endif
+ next_dev = np->next_module;
+ if (np->priv_addr)
+ kfree(np->priv_addr);
+ kfree(root_yellowfin_dev);
+ root_yellowfin_dev = next_dev;
+ }
+}
+
+#endif /* MODULE */
+
+/*
+ * Local variables:
+ * compile-command: "make KERNVER=`uname -r` yellowfin.o"
+ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c yellowfin.c"
+ * simple-compile-command: "gcc -DMODULE -O6 -c yellowfin.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/net/znet.c b/linux/src/drivers/net/znet.c
new file mode 100644
index 0000000..a9996fd
--- /dev/null
+++ b/linux/src/drivers/net/znet.c
@@ -0,0 +1,746 @@
+/* znet.c: An Zenith Z-Note ethernet driver for linux. */
+
+static const char *version = "znet.c:v1.02 9/23/94 becker@cesdis.gsfc.nasa.gov\n";
+
+/*
+ Written by Donald Becker.
+
+ The author may be reached as becker@cesdis.gsfc.nasa.gov.
+ This driver is based on the Linux skeleton driver. The copyright of the
+ skeleton driver is held by the United States Government, as represented
+ by DIRNSA, and it is released under the GPL.
+
+ Thanks to Mike Hollick for alpha testing and suggestions.
+
+ References:
+ The Crynwr packet driver.
+
+ "82593 CSMA/CD Core LAN Controller" Intel datasheet, 1992
+ Intel Microcommunications Databook, Vol. 1, 1990.
+ As usual with Intel, the documentation is incomplete and inaccurate.
+ I had to read the Crynwr packet driver to figure out how to actually
+ use the i82593, and guess at what register bits matched the loosely
+ related i82586.
+
+ Theory of Operation
+
+ The i82593 used in the Zenith Z-Note series operates using two(!) slave
+ DMA channels, one interrupt, and one 8-bit I/O port.
+
+ While there several ways to configure '593 DMA system, I chose the one
+ that seemed commensurate with the highest system performance in the face
+ of moderate interrupt latency: Both DMA channels are configured as
+ recirculating ring buffers, with one channel (#0) dedicated to Rx and
+ the other channel (#1) to Tx and configuration. (Note that this is
+ different than the Crynwr driver, where the Tx DMA channel is initialized
+ before each operation. That approach simplifies operation and Tx error
+ recovery, but requires additional I/O in normal operation and precludes
+ transmit buffer chaining.)
+
+ Both rings are set to 8192 bytes using {TX,RX}_RING_SIZE. This provides
+ a reasonable ring size for Rx, while simplifying DMA buffer allocation --
+ DMA buffers must not cross a 128K boundary. (In truth the size selection
+ was influenced by my lack of '593 documentation. I thus was constrained
+ to use the Crynwr '593 initialization table, which sets the Rx ring size
+ to 8K.)
+
+ Despite my usual low opinion about Intel-designed parts, I must admit
+ that the bulk data handling of the i82593 is a good design for
+ an integrated system, like a laptop, where using two slave DMA channels
+ doesn't pose a problem. I still take issue with using only a single I/O
+ port. In the same controlled environment there are essentially no
+ limitations on I/O space, and using multiple locations would eliminate
+ the need for multiple operations when looking at status registers,
+ setting the Rx ring boundary, or switching to promiscuous mode.
+
+ I also question Zenith's selection of the '593: one of the advertised
+ advantages of earlier Intel parts was that if you figured out the magic
+ initialization incantation you could use the same part on many different
+ network types. Zenith's use of the "FriendlyNet" (sic) connector rather
+ than an on-board transceiver leads me to believe that they were planning
+ to take advantage of this. But, uhmmm, the '593 omits all but ethernet
+ functionality from the serial subsystem.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+
+#ifndef ZNET_DEBUG
+#define ZNET_DEBUG 1
+#endif
+static unsigned int znet_debug = ZNET_DEBUG;
+
+/* The DMA modes we need aren't in <dma.h>. */
+#define DMA_RX_MODE 0x14 /* Auto init, I/O to mem, ++, demand. */
+#define DMA_TX_MODE 0x18 /* Auto init, Mem to I/O, ++, demand. */
+#define dma_page_eq(ptr1, ptr2) ((long)(ptr1)>>17 == (long)(ptr2)>>17)
+#define DMA_BUF_SIZE 8192
+#define RX_BUF_SIZE 8192
+#define TX_BUF_SIZE 8192
+
+/* Commands to the i82593 channel 0. */
+#define CMD0_CHNL_0 0x00
+#define CMD0_CHNL_1 0x10 /* Switch to channel 1. */
+#define CMD0_NOP (CMD0_CHNL_0)
+#define CMD0_PORT_1 CMD0_CHNL_1
+#define CMD1_PORT_0 1
+#define CMD0_IA_SETUP 1
+#define CMD0_CONFIGURE 2
+#define CMD0_MULTICAST_LIST 3
+#define CMD0_TRANSMIT 4
+#define CMD0_DUMP 6
+#define CMD0_DIAGNOSE 7
+#define CMD0_Rx_ENABLE 8
+#define CMD0_Rx_DISABLE 10
+#define CMD0_Rx_STOP 11
+#define CMD0_RETRANSMIT 12
+#define CMD0_ABORT 13
+#define CMD0_RESET 14
+
+#define CMD0_ACK 0x80
+
+#define CMD0_STAT0 (0 << 5)
+#define CMD0_STAT1 (1 << 5)
+#define CMD0_STAT2 (2 << 5)
+#define CMD0_STAT3 (3 << 5)
+
+#define net_local znet_private
+struct znet_private {
+ int rx_dma, tx_dma;
+ struct enet_statistics stats;
+ /* The starting, current, and end pointers for the packet buffers. */
+ ushort *rx_start, *rx_cur, *rx_end;
+ ushort *tx_start, *tx_cur, *tx_end;
+ ushort tx_buf_len; /* Tx buffer length, in words. */
+};
+
+/* Only one can be built-in;-> */
+static struct znet_private zn;
+static ushort dma_buffer1[DMA_BUF_SIZE/2];
+static ushort dma_buffer2[DMA_BUF_SIZE/2];
+static ushort dma_buffer3[DMA_BUF_SIZE/2 + 8];
+
+/* The configuration block. What an undocumented nightmare. The first
+ set of values are those suggested (without explanation) for ethernet
+ in the Intel 82586 databook. The rest appear to be completely undocumented,
+ except for cryptic notes in the Crynwr packet driver. This driver uses
+ the Crynwr values verbatim. */
+
+static unsigned char i593_init[] = {
+ 0xAA, /* 0: 16-byte input & 80-byte output FIFO. */
+ /* threshold, 96-byte FIFO, 82593 mode. */
+ 0x88, /* 1: Continuous w/interrupts, 128-clock DMA.*/
+ 0x2E, /* 2: 8-byte preamble, NO address insertion, */
+ /* 6-byte Ethernet address, loopback off.*/
+ 0x00, /* 3: Default priorities & backoff methods. */
+ 0x60, /* 4: 96-bit interframe spacing. */
+ 0x00, /* 5: 512-bit slot time (low-order). */
+ 0xF2, /* 6: Slot time (high-order), 15 COLL retries. */
+ 0x00, /* 7: Promisc-off, broadcast-on, default CRC. */
+ 0x00, /* 8: Default carrier-sense, collision-detect. */
+ 0x40, /* 9: 64-byte minimum frame length. */
+ 0x5F, /* A: Type/length checks OFF, no CRC input,
+ "jabber" termination, etc. */
+ 0x00, /* B: Full-duplex disabled. */
+ 0x3F, /* C: Default multicast addresses & backoff. */
+ 0x07, /* D: Default IFS retriggering. */
+ 0x31, /* E: Internal retransmit, drop "runt" packets,
+ synchr. DRQ deassertion, 6 status bytes. */
+ 0x22, /* F: Receive ring-buffer size (8K),
+ receive-stop register enable. */
+};
+
+struct netidblk {
+ char magic[8]; /* The magic number (string) "NETIDBLK" */
+ unsigned char netid[8]; /* The physical station address */
+ char nettype, globalopt;
+ char vendor[8]; /* The machine vendor and product name. */
+ char product[8];
+ char irq1, irq2; /* Interrupts, only one is currently used. */
+ char dma1, dma2;
+ short dma_mem_misc[8]; /* DMA buffer locations (unused in Linux). */
+ short iobase1, iosize1;
+ short iobase2, iosize2; /* Second iobase unused. */
+ char driver_options; /* Misc. bits */
+ char pad;
+};
+
+int znet_probe(struct device *dev);
+static int znet_open(struct device *dev);
+static int znet_send_packet(struct sk_buff *skb, struct device *dev);
+static void znet_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void znet_rx(struct device *dev);
+static int znet_close(struct device *dev);
+static struct enet_statistics *net_get_stats(struct device *dev);
+static void set_multicast_list(struct device *dev);
+static void hardware_init(struct device *dev);
+static void update_stop_hit(short ioaddr, unsigned short rx_stop_offset);
+
+#ifdef notdef
+static struct sigaction znet_sigaction = { &znet_interrupt, 0, 0, NULL, };
+#endif
+
+
+/* The Z-Note probe is pretty easy. The NETIDBLK exists in the safe-to-probe
+ BIOS area. We just scan for the signature, and pull the vital parameters
+ out of the structure. */
+
+int znet_probe(struct device *dev)
+{
+ int i;
+ struct netidblk *netinfo;
+ char *p;
+
+ /* This code scans the region 0xf0000 to 0xfffff for a "NETIDBLK". */
+ for(p = (char *)0xf0000; p < (char *)0x100000; p++)
+ if (*p == 'N' && strncmp(p, "NETIDBLK", 8) == 0)
+ break;
+
+ if (p >= (char *)0x100000) {
+ if (znet_debug > 1)
+ printk(KERN_INFO "No Z-Note ethernet adaptor found.\n");
+ return ENODEV;
+ }
+ netinfo = (struct netidblk *)p;
+ dev->base_addr = netinfo->iobase1;
+ dev->irq = netinfo->irq1;
+
+ printk(KERN_INFO "%s: ZNET at %#3lx,", dev->name, dev->base_addr);
+
+ /* The station address is in the "netidblk" at 0x0f0000. */
+ for (i = 0; i < 6; i++)
+ printk(" %2.2x", dev->dev_addr[i] = netinfo->netid[i]);
+
+ printk(", using IRQ %d DMA %d and %d.\n", dev->irq, netinfo->dma1,
+ netinfo->dma2);
+
+ if (znet_debug > 1) {
+ printk(KERN_INFO "%s: vendor '%16.16s' IRQ1 %d IRQ2 %d DMA1 %d DMA2 %d.\n",
+ dev->name, netinfo->vendor,
+ netinfo->irq1, netinfo->irq2,
+ netinfo->dma1, netinfo->dma2);
+ printk(KERN_INFO "%s: iobase1 %#x size %d iobase2 %#x size %d net type %2.2x.\n",
+ dev->name, netinfo->iobase1, netinfo->iosize1,
+ netinfo->iobase2, netinfo->iosize2, netinfo->nettype);
+ }
+
+ if (znet_debug > 0)
+ printk("%s%s", KERN_INFO, version);
+
+ dev->priv = (void *) &zn;
+ zn.rx_dma = netinfo->dma1;
+ zn.tx_dma = netinfo->dma2;
+
+ /* These should never fail. You can't add devices to a sealed box! */
+ if (request_irq(dev->irq, &znet_interrupt, 0, "ZNet", NULL)
+ || request_dma(zn.rx_dma,"ZNet rx")
+ || request_dma(zn.tx_dma,"ZNet tx")) {
+ printk(KERN_WARNING "%s: Not opened -- resource busy?!?\n", dev->name);
+ return EBUSY;
+ }
+ irq2dev_map[dev->irq] = dev;
+
+ /* Allocate buffer memory. We can cross a 128K boundary, so we
+ must be careful about the allocation. It's easiest to waste 8K. */
+ if (dma_page_eq(dma_buffer1, &dma_buffer1[RX_BUF_SIZE/2-1]))
+ zn.rx_start = dma_buffer1;
+ else
+ zn.rx_start = dma_buffer2;
+
+ if (dma_page_eq(dma_buffer3, &dma_buffer3[RX_BUF_SIZE/2-1]))
+ zn.tx_start = dma_buffer3;
+ else
+ zn.tx_start = dma_buffer2;
+ zn.rx_end = zn.rx_start + RX_BUF_SIZE/2;
+ zn.tx_buf_len = TX_BUF_SIZE/2;
+ zn.tx_end = zn.tx_start + zn.tx_buf_len;
+
+ /* The ZNET-specific entries in the device structure. */
+ dev->open = &znet_open;
+ dev->hard_start_xmit = &znet_send_packet;
+ dev->stop = &znet_close;
+ dev->get_stats = net_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+
+ /* Fill in the 'dev' with ethernet-generic values. */
+ ether_setup(dev);
+
+ return 0;
+}
+
+
+static int znet_open(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if (znet_debug > 2)
+ printk(KERN_DEBUG "%s: znet_open() called.\n", dev->name);
+
+ /* Turn on the 82501 SIA, using zenith-specific magic. */
+ outb(0x10, 0xe6); /* Select LAN control register */
+ outb(inb(0xe7) | 0x84, 0xe7); /* Turn on LAN power (bit 2). */
+ /* According to the Crynwr driver we should wait 50 msec. for the
+ LAN clock to stabilize. My experiments indicates that the '593 can
+ be initialized immediately. The delay is probably needed for the
+ DC-to-DC converter to come up to full voltage, and for the oscillator
+ to be spot-on at 20Mhz before transmitting.
+ Until this proves to be a problem we rely on the higher layers for the
+ delay and save allocating a timer entry. */
+
+ /* This follows the packet driver's lead, and checks for success. */
+ if (inb(ioaddr) != 0x10 && inb(ioaddr) != 0x00)
+ printk(KERN_WARNING "%s: Problem turning on the transceiver power.\n",
+ dev->name);
+
+ dev->tbusy = 0;
+ dev->interrupt = 0;
+ hardware_init(dev);
+ dev->start = 1;
+
+ return 0;
+}
+
+static int znet_send_packet(struct sk_buff *skb, struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ if (znet_debug > 4)
+ printk(KERN_DEBUG "%s: ZNet_send_packet(%ld).\n", dev->name, dev->tbusy);
+
+ /* Transmitter timeout, likely just recovery after suspending the machine. */
+ if (dev->tbusy) {
+ ushort event, tx_status, rx_offset, state;
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < 10)
+ return 1;
+ outb(CMD0_STAT0, ioaddr); event = inb(ioaddr);
+ outb(CMD0_STAT1, ioaddr); tx_status = inw(ioaddr);
+ outb(CMD0_STAT2, ioaddr); rx_offset = inw(ioaddr);
+ outb(CMD0_STAT3, ioaddr); state = inb(ioaddr);
+ printk(KERN_WARNING "%s: transmit timed out, status %02x %04x %04x %02x,"
+ " resetting.\n", dev->name, event, tx_status, rx_offset, state);
+ if (tx_status == 0x0400)
+ printk(KERN_WARNING "%s: Tx carrier error, check transceiver cable.\n",
+ dev->name);
+ outb(CMD0_RESET, ioaddr);
+ hardware_init(dev);
+ }
+
+ if (skb == NULL) {
+ dev_tint(dev);
+ return 0;
+ }
+
+ /* Check that the part hasn't reset itself, probably from suspend. */
+ outb(CMD0_STAT0, ioaddr);
+ if (inw(ioaddr) == 0x0010
+ && inw(ioaddr) == 0x0000
+ && inw(ioaddr) == 0x0010)
+ hardware_init(dev);
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+ if (set_bit(0, (void*)&dev->tbusy) != 0)
+ printk(KERN_WARNING "%s: Transmitter access conflict.\n", dev->name);
+ else {
+ short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ unsigned char *buf = (void *)skb->data;
+ ushort *tx_link = zn.tx_cur - 1;
+ ushort rnd_len = (length + 1)>>1;
+
+ {
+ short dma_port = ((zn.tx_dma&3)<<2) + IO_DMA2_BASE;
+ unsigned addr = inb(dma_port);
+ addr |= inb(dma_port) << 8;
+ addr <<= 1;
+ if (((int)zn.tx_cur & 0x1ffff) != addr)
+ printk(KERN_WARNING "Address mismatch at Tx: %#x vs %#x.\n",
+ (int)zn.tx_cur & 0xffff, addr);
+ zn.tx_cur = (ushort *)(((int)zn.tx_cur & 0xfe0000) | addr);
+ }
+
+ if (zn.tx_cur >= zn.tx_end)
+ zn.tx_cur = zn.tx_start;
+ *zn.tx_cur++ = length;
+ if (zn.tx_cur + rnd_len + 1 > zn.tx_end) {
+ int semi_cnt = (zn.tx_end - zn.tx_cur)<<1; /* Cvrt to byte cnt. */
+ memcpy(zn.tx_cur, buf, semi_cnt);
+ rnd_len -= semi_cnt>>1;
+ memcpy(zn.tx_start, buf + semi_cnt, length - semi_cnt);
+ zn.tx_cur = zn.tx_start + rnd_len;
+ } else {
+ memcpy(zn.tx_cur, buf, skb->len);
+ zn.tx_cur += rnd_len;
+ }
+ *zn.tx_cur++ = 0;
+ cli(); {
+ *tx_link = CMD0_TRANSMIT + CMD0_CHNL_1;
+ /* Is this always safe to do? */
+ outb(CMD0_TRANSMIT + CMD0_CHNL_1,ioaddr);
+ } sti();
+
+ dev->trans_start = jiffies;
+ if (znet_debug > 4)
+ printk(KERN_DEBUG "%s: Transmitter queued, length %d.\n", dev->name, length);
+ }
+ dev_kfree_skb(skb, FREE_WRITE);
+ return 0;
+}
+
+/* The ZNET interrupt handler. */
+static void znet_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct device *dev = irq2dev_map[irq];
+ int ioaddr;
+ int boguscnt = 20;
+
+ if (dev == NULL) {
+ printk(KERN_WARNING "znet_interrupt(): IRQ %d for unknown device.\n", irq);
+ return;
+ }
+
+ dev->interrupt = 1;
+ ioaddr = dev->base_addr;
+
+ outb(CMD0_STAT0, ioaddr);
+ do {
+ ushort status = inb(ioaddr);
+ if (znet_debug > 5) {
+ ushort result, rx_ptr, running;
+ outb(CMD0_STAT1, ioaddr);
+ result = inw(ioaddr);
+ outb(CMD0_STAT2, ioaddr);
+ rx_ptr = inw(ioaddr);
+ outb(CMD0_STAT3, ioaddr);
+ running = inb(ioaddr);
+ printk(KERN_DEBUG "%s: interrupt, status %02x, %04x %04x %02x serial %d.\n",
+ dev->name, status, result, rx_ptr, running, boguscnt);
+ }
+ if ((status & 0x80) == 0)
+ break;
+
+ if ((status & 0x0F) == 4) { /* Transmit done. */
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int tx_status;
+ outb(CMD0_STAT1, ioaddr);
+ tx_status = inw(ioaddr);
+ /* It's undocumented, but tx_status seems to match the i82586. */
+ if (tx_status & 0x2000) {
+ lp->stats.tx_packets++;
+ lp->stats.collisions += tx_status & 0xf;
+ } else {
+ if (tx_status & 0x0600) lp->stats.tx_carrier_errors++;
+ if (tx_status & 0x0100) lp->stats.tx_fifo_errors++;
+ if (!(tx_status & 0x0040)) lp->stats.tx_heartbeat_errors++;
+ if (tx_status & 0x0020) lp->stats.tx_aborted_errors++;
+ /* ...and the catch-all. */
+ if ((tx_status | 0x0760) != 0x0760)
+ lp->stats.tx_errors++;
+ }
+ dev->tbusy = 0;
+ mark_bh(NET_BH); /* Inform upper layers. */
+ }
+
+ if ((status & 0x40)
+ || (status & 0x0f) == 11) {
+ znet_rx(dev);
+ }
+ /* Clear the interrupts we've handled. */
+ outb(CMD0_ACK,ioaddr);
+ } while (boguscnt--);
+
+ dev->interrupt = 0;
+ return;
+}
+
+static void znet_rx(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+ int ioaddr = dev->base_addr;
+ int boguscount = 1;
+ short next_frame_end_offset = 0; /* Offset of next frame start. */
+ short *cur_frame_end;
+ short cur_frame_end_offset;
+
+ outb(CMD0_STAT2, ioaddr);
+ cur_frame_end_offset = inw(ioaddr);
+
+ if (cur_frame_end_offset == zn.rx_cur - zn.rx_start) {
+ printk(KERN_WARNING "%s: Interrupted, but nothing to receive, offset %03x.\n",
+ dev->name, cur_frame_end_offset);
+ return;
+ }
+
+ /* Use same method as the Crynwr driver: construct a forward list in
+ the same area of the backwards links we now have. This allows us to
+ pass packets to the upper layers in the order they were received --
+ important for fast-path sequential operations. */
+ while (zn.rx_start + cur_frame_end_offset != zn.rx_cur
+ && ++boguscount < 5) {
+ unsigned short hi_cnt, lo_cnt, hi_status, lo_status;
+ int count, status;
+
+ if (cur_frame_end_offset < 4) {
+ /* Oh no, we have a special case: the frame trailer wraps around
+ the end of the ring buffer. We've saved space at the end of
+ the ring buffer for just this problem. */
+ memcpy(zn.rx_end, zn.rx_start, 8);
+ cur_frame_end_offset += (RX_BUF_SIZE/2);
+ }
+ cur_frame_end = zn.rx_start + cur_frame_end_offset - 4;
+
+ lo_status = *cur_frame_end++;
+ hi_status = *cur_frame_end++;
+ status = ((hi_status & 0xff) << 8) + (lo_status & 0xff);
+ lo_cnt = *cur_frame_end++;
+ hi_cnt = *cur_frame_end++;
+ count = ((hi_cnt & 0xff) << 8) + (lo_cnt & 0xff);
+
+ if (znet_debug > 5)
+ printk(KERN_DEBUG "Constructing trailer at location %03x, %04x %04x %04x %04x"
+ " count %#x status %04x.\n",
+ cur_frame_end_offset<<1, lo_status, hi_status, lo_cnt, hi_cnt,
+ count, status);
+ cur_frame_end[-4] = status;
+ cur_frame_end[-3] = next_frame_end_offset;
+ cur_frame_end[-2] = count;
+ next_frame_end_offset = cur_frame_end_offset;
+ cur_frame_end_offset -= ((count + 1)>>1) + 3;
+ if (cur_frame_end_offset < 0)
+ cur_frame_end_offset += RX_BUF_SIZE/2;
+ };
+
+ /* Now step forward through the list. */
+ do {
+ ushort *this_rfp_ptr = zn.rx_start + next_frame_end_offset;
+ int status = this_rfp_ptr[-4];
+ int pkt_len = this_rfp_ptr[-2];
+
+ if (znet_debug > 5)
+ printk(KERN_DEBUG "Looking at trailer ending at %04x status %04x length %03x"
+ " next %04x.\n", next_frame_end_offset<<1, status, pkt_len,
+ this_rfp_ptr[-3]<<1);
+ /* Once again we must assume that the i82586 docs apply. */
+ if ( ! (status & 0x2000)) { /* There was an error. */
+ lp->stats.rx_errors++;
+ if (status & 0x0800) lp->stats.rx_crc_errors++;
+ if (status & 0x0400) lp->stats.rx_frame_errors++;
+ if (status & 0x0200) lp->stats.rx_over_errors++; /* Wrong. */
+ if (status & 0x0100) lp->stats.rx_fifo_errors++;
+ if (status & 0x0080) lp->stats.rx_length_errors++;
+ } else if (pkt_len > 1536) {
+ lp->stats.rx_length_errors++;
+ } else {
+ /* Malloc up new buffer. */
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len);
+ if (skb == NULL) {
+ if (znet_debug)
+ printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = dev;
+
+ if (&zn.rx_cur[(pkt_len+1)>>1] > zn.rx_end) {
+ int semi_cnt = (zn.rx_end - zn.rx_cur)<<1;
+ memcpy(skb_put(skb,semi_cnt), zn.rx_cur, semi_cnt);
+ memcpy(skb_put(skb,pkt_len-semi_cnt), zn.rx_start,
+ pkt_len - semi_cnt);
+ } else {
+ memcpy(skb_put(skb,pkt_len), zn.rx_cur, pkt_len);
+ if (znet_debug > 6) {
+ unsigned int *packet = (unsigned int *) skb->data;
+ printk(KERN_DEBUG "Packet data is %08x %08x %08x %08x.\n", packet[0],
+ packet[1], packet[2], packet[3]);
+ }
+ }
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ }
+ zn.rx_cur = this_rfp_ptr;
+ if (zn.rx_cur >= zn.rx_end)
+ zn.rx_cur -= RX_BUF_SIZE/2;
+ update_stop_hit(ioaddr, (zn.rx_cur - zn.rx_start)<<1);
+ next_frame_end_offset = this_rfp_ptr[-3];
+ if (next_frame_end_offset == 0) /* Read all the frames? */
+ break; /* Done for now */
+ this_rfp_ptr = zn.rx_start + next_frame_end_offset;
+ } while (--boguscount);
+
+ /* If any worth-while packets have been received, dev_rint()
+ has done a mark_bh(INET_BH) for us and will work on them
+ when we get to the bottom-half routine. */
+ return;
+}
+
+/* The inverse routine to znet_open(). */
+static int znet_close(struct device *dev)
+{
+ int ioaddr = dev->base_addr;
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ outb(CMD0_RESET, ioaddr); /* CMD0_RESET */
+
+ disable_dma(zn.rx_dma);
+ disable_dma(zn.tx_dma);
+
+ free_irq(dev->irq, NULL);
+
+ if (znet_debug > 1)
+ printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
+ /* Turn off transceiver power. */
+ outb(0x10, 0xe6); /* Select LAN control register */
+ outb(inb(0xe7) & ~0x84, 0xe7); /* Turn on LAN power (bit 2). */
+
+ return 0;
+}
+
+/* Get the current statistics. This may be called with the card open or
+ closed. */
+static struct enet_statistics *net_get_stats(struct device *dev)
+{
+ struct net_local *lp = (struct net_local *)dev->priv;
+
+ return &lp->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ As a side effect this routine must also initialize the device parameters.
+ This is taken advantage of in open().
+
+ N.B. that we change i593_init[] in place. This (properly) makes the
+ mode change persistent, but must be changed if this code is moved to
+ a multiple adaptor environment.
+ */
+static void set_multicast_list(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ if (dev->flags&IFF_PROMISC) {
+ /* Enable promiscuous mode */
+ i593_init[7] &= ~3; i593_init[7] |= 1;
+ i593_init[13] &= ~8; i593_init[13] |= 8;
+ } else if (dev->mc_list || (dev->flags&IFF_ALLMULTI)) {
+ /* Enable accept-all-multicast mode */
+ i593_init[7] &= ~3; i593_init[7] |= 0;
+ i593_init[13] &= ~8; i593_init[13] |= 8;
+ } else { /* Enable normal mode. */
+ i593_init[7] &= ~3; i593_init[7] |= 0;
+ i593_init[13] &= ~8; i593_init[13] |= 0;
+ }
+ *zn.tx_cur++ = sizeof(i593_init);
+ memcpy(zn.tx_cur, i593_init, sizeof(i593_init));
+ zn.tx_cur += sizeof(i593_init)/2;
+ outb(CMD0_CONFIGURE+CMD0_CHNL_1, ioaddr);
+#ifdef not_tested
+ if (num_addrs > 0) {
+ int addrs_len = 6*num_addrs;
+ *zn.tx_cur++ = addrs_len;
+ memcpy(zn.tx_cur, addrs, addrs_len);
+ outb(CMD0_MULTICAST_LIST+CMD0_CHNL_1, ioaddr);
+ zn.tx_cur += addrs_len>>1;
+ }
+#endif
+}
+
+void show_dma(void)
+{
+ short dma_port = ((zn.tx_dma&3)<<2) + IO_DMA2_BASE;
+ unsigned addr = inb(dma_port);
+ addr |= inb(dma_port) << 8;
+ printk("Addr: %04x cnt:%3x...", addr<<1, get_dma_residue(zn.tx_dma));
+}
+
+/* Initialize the hardware. We have to do this when the board is open()ed
+ or when we come out of suspend mode. */
+static void hardware_init(struct device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ zn.rx_cur = zn.rx_start;
+ zn.tx_cur = zn.tx_start;
+
+ /* Reset the chip, and start it up. */
+ outb(CMD0_RESET, ioaddr);
+
+ cli(); { /* Protect against a DMA flip-flop */
+ disable_dma(zn.rx_dma); /* reset by an interrupting task. */
+ clear_dma_ff(zn.rx_dma);
+ set_dma_mode(zn.rx_dma, DMA_RX_MODE);
+ set_dma_addr(zn.rx_dma, (unsigned int) zn.rx_start);
+ set_dma_count(zn.rx_dma, RX_BUF_SIZE);
+ enable_dma(zn.rx_dma);
+ /* Now set up the Tx channel. */
+ disable_dma(zn.tx_dma);
+ clear_dma_ff(zn.tx_dma);
+ set_dma_mode(zn.tx_dma, DMA_TX_MODE);
+ set_dma_addr(zn.tx_dma, (unsigned int) zn.tx_start);
+ set_dma_count(zn.tx_dma, zn.tx_buf_len<<1);
+ enable_dma(zn.tx_dma);
+ } sti();
+
+ if (znet_debug > 1)
+ printk(KERN_DEBUG "%s: Initializing the i82593, tx buf %p... ", dev->name,
+ zn.tx_start);
+ /* Do an empty configure command, just like the Crynwr driver. This
+ resets to chip to its default values. */
+ *zn.tx_cur++ = 0;
+ *zn.tx_cur++ = 0;
+ printk("stat:%02x ", inb(ioaddr)); show_dma();
+ outb(CMD0_CONFIGURE+CMD0_CHNL_1, ioaddr);
+ *zn.tx_cur++ = sizeof(i593_init);
+ memcpy(zn.tx_cur, i593_init, sizeof(i593_init));
+ zn.tx_cur += sizeof(i593_init)/2;
+ printk("stat:%02x ", inb(ioaddr)); show_dma();
+ outb(CMD0_CONFIGURE+CMD0_CHNL_1, ioaddr);
+ *zn.tx_cur++ = 6;
+ memcpy(zn.tx_cur, dev->dev_addr, 6);
+ zn.tx_cur += 3;
+ printk("stat:%02x ", inb(ioaddr)); show_dma();
+ outb(CMD0_IA_SETUP + CMD0_CHNL_1, ioaddr);
+ printk("stat:%02x ", inb(ioaddr)); show_dma();
+
+ update_stop_hit(ioaddr, 8192);
+ if (znet_debug > 1) printk("enabling Rx.\n");
+ outb(CMD0_Rx_ENABLE+CMD0_CHNL_0, ioaddr);
+ dev->tbusy = 0;
+}
+
+static void update_stop_hit(short ioaddr, unsigned short rx_stop_offset)
+{
+ outb(CMD0_PORT_1, ioaddr);
+ if (znet_debug > 5)
+ printk(KERN_DEBUG "Updating stop hit with value %02x.\n",
+ (rx_stop_offset >> 6) | 0x80);
+ outb((rx_stop_offset >> 6) | 0x80, ioaddr);
+ outb(CMD1_PORT_0, ioaddr);
+}
+
+/*
+ * Local variables:
+ * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c znet.c"
+ * version-control: t
+ * kept-new-versions: 5
+ * c-indent-level: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/linux/src/drivers/pci/pci.c b/linux/src/drivers/pci/pci.c
new file mode 100644
index 0000000..cf7dd80
--- /dev/null
+++ b/linux/src/drivers/pci/pci.c
@@ -0,0 +1,1322 @@
+/*
+ * drivers/pci/pci.c
+ *
+ * PCI services that are built on top of the BIOS32 service.
+ *
+ * Copyright 1993, 1994, 1995 Drew Eckhardt, Frederic Potter,
+ * David Mosberger-Tang
+ *
+ * Apr 12, 1998 : Fixed handling of alien header types. [mj]
+ */
+
+#include <linux/config.h>
+#include <linux/ptrace.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+
+#include <asm/page.h>
+
+struct pci_bus pci_root;
+struct pci_dev *pci_devices = 0;
+
+
+/*
+ * The bridge_id field is an offset of an item into the array
+ * BRIDGE_MAPPING_TYPE. 0xff indicates that the device is not a PCI
+ * bridge, or that we don't know for the moment how to configure it.
+ * I'm trying to do my best so that the kernel stays small. Different
+ * chipset can have same optimization structure. i486 and pentium
+ * chipsets from the same manufacturer usually have the same
+ * structure.
+ */
+#define DEVICE(vid,did,name) \
+ {PCI_VENDOR_ID_##vid, PCI_DEVICE_ID_##did, (name), 0xff}
+
+#define BRIDGE(vid,did,name,bridge) \
+ {PCI_VENDOR_ID_##vid, PCI_DEVICE_ID_##did, (name), (bridge)}
+
+/*
+ * Sorted in ascending order by vendor and device.
+ * Use binary search for lookup. If you add a device make sure
+ * it is sequential by both vendor and device id.
+ */
+struct pci_dev_info dev_info[] = {
+ DEVICE( COMPAQ, COMPAQ_1280, "QVision 1280/p"),
+ DEVICE( COMPAQ, COMPAQ_SMART2P, "Smart-2/P RAID Controller"),
+ DEVICE( COMPAQ, COMPAQ_NETEL100,"Netelligent 10/100"),
+ DEVICE( COMPAQ, COMPAQ_NETEL10, "Netelligent 10"),
+ DEVICE( COMPAQ, COMPAQ_NETFLEX3I,"NetFlex 3"),
+ DEVICE( COMPAQ, COMPAQ_NETEL100D,"Netelligent 10/100 Dual"),
+ DEVICE( COMPAQ, COMPAQ_NETEL100PI,"Netelligent 10/100 ProLiant"),
+ DEVICE( COMPAQ, COMPAQ_NETEL100I,"Netelligent 10/100 Integrated"),
+ DEVICE( COMPAQ, COMPAQ_THUNDER, "ThunderLAN"),
+ DEVICE( COMPAQ, COMPAQ_NETFLEX3B,"NetFlex 3 BNC"),
+ DEVICE( NCR, NCR_53C810, "53c810"),
+ DEVICE( NCR, NCR_53C820, "53c820"),
+ DEVICE( NCR, NCR_53C825, "53c825"),
+ DEVICE( NCR, NCR_53C815, "53c815"),
+ DEVICE( NCR, NCR_53C860, "53c860"),
+ DEVICE( NCR, NCR_53C896, "53c896"),
+ DEVICE( NCR, NCR_53C895, "53c895"),
+ DEVICE( NCR, NCR_53C885, "53c885"),
+ DEVICE( NCR, NCR_53C875, "53c875"),
+ DEVICE( NCR, NCR_53C875J, "53c875J"),
+ DEVICE( ATI, ATI_68800, "68800AX"),
+ DEVICE( ATI, ATI_215CT222, "215CT222"),
+ DEVICE( ATI, ATI_210888CX, "210888CX"),
+ DEVICE( ATI, ATI_215GB, "Mach64 GB"),
+ DEVICE( ATI, ATI_215GD, "Mach64 GD (Rage Pro)"),
+ DEVICE( ATI, ATI_215GI, "Mach64 GI (Rage Pro)"),
+ DEVICE( ATI, ATI_215GP, "Mach64 GP (Rage Pro)"),
+ DEVICE( ATI, ATI_215GQ, "Mach64 GQ (Rage Pro)"),
+ DEVICE( ATI, ATI_215GT, "Mach64 GT (Rage II)"),
+ DEVICE( ATI, ATI_215GTB, "Mach64 GT (Rage II)"),
+ DEVICE( ATI, ATI_210888GX, "210888GX"),
+ DEVICE( ATI, ATI_215LG, "Mach64 LG (3D Rage LT)"),
+ DEVICE( ATI, ATI_264LT, "Mach64 LT"),
+ DEVICE( ATI, ATI_264VT, "Mach64 VT"),
+ DEVICE( VLSI, VLSI_82C592, "82C592-FC1"),
+ DEVICE( VLSI, VLSI_82C593, "82C593-FC1"),
+ DEVICE( VLSI, VLSI_82C594, "82C594-AFC2"),
+ DEVICE( VLSI, VLSI_82C597, "82C597-AFC2"),
+ DEVICE( VLSI, VLSI_82C541, "82C541 Lynx"),
+ DEVICE( VLSI, VLSI_82C543, "82C543 Lynx ISA"),
+ DEVICE( VLSI, VLSI_82C532, "82C532"),
+ DEVICE( VLSI, VLSI_82C534, "82C534"),
+ DEVICE( VLSI, VLSI_82C535, "82C535"),
+ DEVICE( VLSI, VLSI_82C147, "82C147"),
+ DEVICE( VLSI, VLSI_VAS96011, "VAS96011 (Golden Gate II)"),
+ DEVICE( ADL, ADL_2301, "2301"),
+ DEVICE( NS, NS_87415, "87415"),
+ DEVICE( NS, NS_87410, "87410"),
+ DEVICE( TSENG, TSENG_W32P_2, "ET4000W32P"),
+ DEVICE( TSENG, TSENG_W32P_b, "ET4000W32P rev B"),
+ DEVICE( TSENG, TSENG_W32P_c, "ET4000W32P rev C"),
+ DEVICE( TSENG, TSENG_W32P_d, "ET4000W32P rev D"),
+ DEVICE( TSENG, TSENG_ET6000, "ET6000"),
+ DEVICE( WEITEK, WEITEK_P9000, "P9000"),
+ DEVICE( WEITEK, WEITEK_P9100, "P9100"),
+ BRIDGE( DEC, DEC_BRD, "DC21050", 0x00),
+ DEVICE( DEC, DEC_TULIP, "DC21040"),
+ DEVICE( DEC, DEC_TGA, "DC21030 (TGA)"),
+ DEVICE( DEC, DEC_TULIP_FAST, "DC21140"),
+ DEVICE( DEC, DEC_TGA2, "TGA2"),
+ DEVICE( DEC, DEC_FDDI, "DEFPA"),
+ DEVICE( DEC, DEC_TULIP_PLUS, "DC21041"),
+ DEVICE( DEC, DEC_21142, "DC21142"),
+ DEVICE( DEC, DEC_21052, "DC21052"),
+ DEVICE( DEC, DEC_21150, "DC21150"),
+ DEVICE( DEC, DEC_21152, "DC21152"),
+ DEVICE( CIRRUS, CIRRUS_7548, "GD 7548"),
+ DEVICE( CIRRUS, CIRRUS_5430, "GD 5430"),
+ DEVICE( CIRRUS, CIRRUS_5434_4, "GD 5434"),
+ DEVICE( CIRRUS, CIRRUS_5434_8, "GD 5434"),
+ DEVICE( CIRRUS, CIRRUS_5436, "GD 5436"),
+ DEVICE( CIRRUS, CIRRUS_5446, "GD 5446"),
+ DEVICE( CIRRUS, CIRRUS_5480, "GD 5480"),
+ DEVICE( CIRRUS, CIRRUS_5464, "GD 5464"),
+ DEVICE( CIRRUS, CIRRUS_5465, "GD 5465"),
+ DEVICE( CIRRUS, CIRRUS_6729, "CL 6729"),
+ DEVICE( CIRRUS, CIRRUS_6832, "PD 6832"),
+ DEVICE( CIRRUS, CIRRUS_7542, "CL 7542"),
+ DEVICE( CIRRUS, CIRRUS_7543, "CL 7543"),
+ DEVICE( CIRRUS, CIRRUS_7541, "CL 7541"),
+ DEVICE( IBM, IBM_FIRE_CORAL, "Fire Coral"),
+ DEVICE( IBM, IBM_TR, "Token Ring"),
+ DEVICE( IBM, IBM_82G2675, "82G2675"),
+ DEVICE( IBM, IBM_MCA, "MicroChannel"),
+ DEVICE( IBM, IBM_82351, "82351"),
+ DEVICE( IBM, IBM_SERVERAID, "ServeRAID"),
+ DEVICE( IBM, IBM_TR_WAKE, "Wake On LAN Token Ring"),
+ DEVICE( IBM, IBM_3780IDSP, "MWave DSP"),
+ DEVICE( WD, WD_7197, "WD 7197"),
+ DEVICE( AMD, AMD_LANCE, "79C970"),
+ DEVICE( AMD, AMD_SCSI, "53C974"),
+ DEVICE( TRIDENT, TRIDENT_9397, "Cyber9397"),
+ DEVICE( TRIDENT, TRIDENT_9420, "TG 9420"),
+ DEVICE( TRIDENT, TRIDENT_9440, "TG 9440"),
+ DEVICE( TRIDENT, TRIDENT_9660, "TG 9660 / Cyber9385"),
+ DEVICE( TRIDENT, TRIDENT_9750, "Image 975"),
+ DEVICE( AI, AI_M1435, "M1435"),
+ DEVICE( MATROX, MATROX_MGA_2, "Atlas PX2085"),
+ DEVICE( MATROX, MATROX_MIL, "Millennium"),
+ DEVICE( MATROX, MATROX_MYS, "Mystique"),
+ DEVICE( MATROX, MATROX_MIL_2, "Millennium II"),
+ DEVICE( MATROX, MATROX_MIL_2_AGP,"Millennium II AGP"),
+ DEVICE( MATROX, MATROX_MGA_IMP, "MGA Impression"),
+ DEVICE( CT, CT_65545, "65545"),
+ DEVICE( CT, CT_65548, "65548"),
+ DEVICE( CT, CT_65550, "65550"),
+ DEVICE( CT, CT_65554, "65554"),
+ DEVICE( CT, CT_65555, "65555"),
+ DEVICE( MIRO, MIRO_36050, "ZR36050"),
+ DEVICE( NEC, NEC_PCX2, "PowerVR PCX2"),
+ DEVICE( FD, FD_36C70, "TMC-18C30"),
+ DEVICE( SI, SI_5591_AGP, "5591/5592 AGP"),
+ DEVICE( SI, SI_6202, "6202"),
+ DEVICE( SI, SI_503, "85C503"),
+ DEVICE( SI, SI_ACPI, "ACPI"),
+ DEVICE( SI, SI_5597_VGA, "5597/5598 VGA"),
+ DEVICE( SI, SI_6205, "6205"),
+ DEVICE( SI, SI_501, "85C501"),
+ DEVICE( SI, SI_496, "85C496"),
+ DEVICE( SI, SI_601, "85C601"),
+ DEVICE( SI, SI_5107, "5107"),
+ DEVICE( SI, SI_5511, "85C5511"),
+ DEVICE( SI, SI_5513, "85C5513"),
+ DEVICE( SI, SI_5571, "5571"),
+ DEVICE( SI, SI_5591, "5591/5592 Host"),
+ DEVICE( SI, SI_5597, "5597/5598 Host"),
+ DEVICE( SI, SI_7001, "7001 USB"),
+ DEVICE( HP, HP_J2585A, "J2585A"),
+ DEVICE( HP, HP_J2585B, "J2585B (Lassen)"),
+ DEVICE( PCTECH, PCTECH_RZ1000, "RZ1000 (buggy)"),
+ DEVICE( PCTECH, PCTECH_RZ1001, "RZ1001 (buggy?)"),
+ DEVICE( PCTECH, PCTECH_SAMURAI_0,"Samurai 0"),
+ DEVICE( PCTECH, PCTECH_SAMURAI_1,"Samurai 1"),
+ DEVICE( PCTECH, PCTECH_SAMURAI_IDE,"Samurai IDE"),
+ DEVICE( DPT, DPT, "SmartCache/Raid"),
+ DEVICE( OPTI, OPTI_92C178, "92C178"),
+ DEVICE( OPTI, OPTI_82C557, "82C557 Viper-M"),
+ DEVICE( OPTI, OPTI_82C558, "82C558 Viper-M ISA+IDE"),
+ DEVICE( OPTI, OPTI_82C621, "82C621"),
+ DEVICE( OPTI, OPTI_82C700, "82C700"),
+ DEVICE( OPTI, OPTI_82C701, "82C701 FireStar Plus"),
+ DEVICE( OPTI, OPTI_82C814, "82C814 Firebridge 1"),
+ DEVICE( OPTI, OPTI_82C822, "82C822"),
+ DEVICE( OPTI, OPTI_82C825, "82C825 Firebridge 2"),
+ DEVICE( SGS, SGS_2000, "STG 2000X"),
+ DEVICE( SGS, SGS_1764, "STG 1764X"),
+ DEVICE( BUSLOGIC, BUSLOGIC_MULTIMASTER_NC, "MultiMaster NC"),
+ DEVICE( BUSLOGIC, BUSLOGIC_MULTIMASTER, "MultiMaster"),
+ DEVICE( BUSLOGIC, BUSLOGIC_FLASHPOINT, "FlashPoint"),
+ DEVICE( TI, TI_TVP4010, "TVP4010 Permedia"),
+ DEVICE( TI, TI_TVP4020, "TVP4020 Permedia 2"),
+ DEVICE( TI, TI_PCI1130, "PCI1130"),
+ DEVICE( TI, TI_PCI1131, "PCI1131"),
+ DEVICE( TI, TI_PCI1250, "PCI1250"),
+ DEVICE( OAK, OAK_OTI107, "OTI107"),
+ DEVICE( WINBOND2, WINBOND2_89C940,"NE2000-PCI"),
+ DEVICE( MOTOROLA, MOTOROLA_MPC105,"MPC105 Eagle"),
+ DEVICE( MOTOROLA, MOTOROLA_MPC106,"MPC106 Grackle"),
+ DEVICE( MOTOROLA, MOTOROLA_RAVEN, "Raven"),
+ DEVICE( PROMISE, PROMISE_20246, "IDE UltraDMA/33"),
+ DEVICE( PROMISE, PROMISE_5300, "DC5030"),
+ DEVICE( N9, N9_I128, "Imagine 128"),
+ DEVICE( N9, N9_I128_2, "Imagine 128v2"),
+ DEVICE( UMC, UMC_UM8673F, "UM8673F"),
+ BRIDGE( UMC, UMC_UM8891A, "UM8891A", 0x01),
+ DEVICE( UMC, UMC_UM8886BF, "UM8886BF"),
+ DEVICE( UMC, UMC_UM8886A, "UM8886A"),
+ BRIDGE( UMC, UMC_UM8881F, "UM8881F", 0x02),
+ DEVICE( UMC, UMC_UM8886F, "UM8886F"),
+ DEVICE( UMC, UMC_UM9017F, "UM9017F"),
+ DEVICE( UMC, UMC_UM8886N, "UM8886N"),
+ DEVICE( UMC, UMC_UM8891N, "UM8891N"),
+ DEVICE( X, X_AGX016, "ITT AGX016"),
+ DEVICE( PICOP, PICOP_PT86C52X, "PT86C52x Vesuvius"),
+ DEVICE( PICOP, PICOP_PT80C524, "PT80C524 Nile"),
+ DEVICE( APPLE, APPLE_BANDIT, "Bandit"),
+ DEVICE( APPLE, APPLE_GC, "Grand Central"),
+ DEVICE( APPLE, APPLE_HYDRA, "Hydra"),
+ DEVICE( NEXGEN, NEXGEN_82C501, "82C501"),
+ DEVICE( QLOGIC, QLOGIC_ISP1020, "ISP1020"),
+ DEVICE( QLOGIC, QLOGIC_ISP1022, "ISP1022"),
+ DEVICE( CYRIX, CYRIX_5510, "5510"),
+ DEVICE( CYRIX, CYRIX_PCI_MASTER,"PCI Master"),
+ DEVICE( CYRIX, CYRIX_5520, "5520"),
+ DEVICE( CYRIX, CYRIX_5530_LEGACY,"5530 Kahlua Legacy"),
+ DEVICE( CYRIX, CYRIX_5530_SMI, "5530 Kahlua SMI"),
+ DEVICE( CYRIX, CYRIX_5530_IDE, "5530 Kahlua IDE"),
+ DEVICE( CYRIX, CYRIX_5530_AUDIO,"5530 Kahlua Audio"),
+ DEVICE( CYRIX, CYRIX_5530_VIDEO,"5530 Kahlua Video"),
+ DEVICE( LEADTEK, LEADTEK_805, "S3 805"),
+ DEVICE( CONTAQ, CONTAQ_82C599, "82C599"),
+ DEVICE( CONTAQ, CONTAQ_82C693, "82C693"),
+ DEVICE( OLICOM, OLICOM_OC3136, "OC-3136/3137"),
+ DEVICE( OLICOM, OLICOM_OC2315, "OC-2315"),
+ DEVICE( OLICOM, OLICOM_OC2325, "OC-2325"),
+ DEVICE( OLICOM, OLICOM_OC2183, "OC-2183/2185"),
+ DEVICE( OLICOM, OLICOM_OC2326, "OC-2326"),
+ DEVICE( OLICOM, OLICOM_OC6151, "OC-6151/6152"),
+ DEVICE( SUN, SUN_EBUS, "EBUS"),
+ DEVICE( SUN, SUN_HAPPYMEAL, "Happy Meal Ethernet"),
+ DEVICE( SUN, SUN_SIMBA, "Advanced PCI Bridge"),
+ DEVICE( SUN, SUN_PBM, "PCI Bus Module"),
+ DEVICE( SUN, SUN_SABRE, "Ultra IIi PCI"),
+ DEVICE( CMD, CMD_640, "640 (buggy)"),
+ DEVICE( CMD, CMD_643, "643"),
+ DEVICE( CMD, CMD_646, "646"),
+ DEVICE( CMD, CMD_670, "670"),
+ DEVICE( VISION, VISION_QD8500, "QD-8500"),
+ DEVICE( VISION, VISION_QD8580, "QD-8580"),
+ DEVICE( BROOKTREE, BROOKTREE_848, "Bt848"),
+ DEVICE( BROOKTREE, BROOKTREE_849A, "Bt849"),
+ DEVICE( BROOKTREE, BROOKTREE_8474, "Bt8474"),
+ DEVICE( SIERRA, SIERRA_STB, "STB Horizon 64"),
+ DEVICE( ACC, ACC_2056, "2056"),
+ DEVICE( WINBOND, WINBOND_83769, "W83769F"),
+ DEVICE( WINBOND, WINBOND_82C105, "SL82C105"),
+ DEVICE( WINBOND, WINBOND_83C553, "W83C553"),
+ DEVICE( DATABOOK, DATABOOK_87144, "DB87144"),
+ DEVICE( PLX, PLX_SPCOM200, "SPCom 200 PCI serial I/O"),
+ DEVICE( PLX, PLX_9050, "PLX9050 PCI <-> IOBus Bridge"),
+ DEVICE( PLX, PLX_9080, "PCI9080 I2O"),
+ DEVICE( MADGE, MADGE_MK2, "Smart 16/4 BM Mk2 Ringnode"),
+ DEVICE( 3COM, 3COM_3C339, "3C339 TokenRing"),
+ DEVICE( 3COM, 3COM_3C590, "3C590 10bT"),
+ DEVICE( 3COM, 3COM_3C595TX, "3C595 100bTX"),
+ DEVICE( 3COM, 3COM_3C595T4, "3C595 100bT4"),
+ DEVICE( 3COM, 3COM_3C595MII, "3C595 100b-MII"),
+ DEVICE( 3COM, 3COM_3C900TPO, "3C900 10bTPO"),
+ DEVICE( 3COM, 3COM_3C900COMBO,"3C900 10b Combo"),
+ DEVICE( 3COM, 3COM_3C905TX, "3C905 100bTX"),
+ DEVICE( 3COM, 3COM_3C905T4, "3C905 100bT4"),
+ DEVICE( 3COM, 3COM_3C905B_TX, "3C905B 100bTX"),
+ DEVICE( SMC, SMC_EPIC100, "9432 TX"),
+ DEVICE( AL, AL_M1445, "M1445"),
+ DEVICE( AL, AL_M1449, "M1449"),
+ DEVICE( AL, AL_M1451, "M1451"),
+ DEVICE( AL, AL_M1461, "M1461"),
+ DEVICE( AL, AL_M1489, "M1489"),
+ DEVICE( AL, AL_M1511, "M1511"),
+ DEVICE( AL, AL_M1513, "M1513"),
+ DEVICE( AL, AL_M1521, "M1521"),
+ DEVICE( AL, AL_M1523, "M1523"),
+ DEVICE( AL, AL_M1531, "M1531 Aladdin IV"),
+ DEVICE( AL, AL_M1533, "M1533 Aladdin IV"),
+ DEVICE( AL, AL_M3307, "M3307 MPEG-1 decoder"),
+ DEVICE( AL, AL_M4803, "M4803"),
+ DEVICE( AL, AL_M5219, "M5219"),
+ DEVICE( AL, AL_M5229, "M5229 TXpro"),
+ DEVICE( AL, AL_M5237, "M5237 USB"),
+ DEVICE( SURECOM, SURECOM_NE34, "NE-34PCI LAN"),
+ DEVICE( NEOMAGIC, NEOMAGIC_MAGICGRAPH_NM2070, "Magicgraph NM2070"),
+ DEVICE( NEOMAGIC, NEOMAGIC_MAGICGRAPH_128V, "MagicGraph 128V"),
+ DEVICE( NEOMAGIC, NEOMAGIC_MAGICGRAPH_128ZV, "MagicGraph 128ZV"),
+ DEVICE( NEOMAGIC, NEOMAGIC_MAGICGRAPH_NM2160, "MagicGraph NM2160"),
+ DEVICE( ASP, ASP_ABP940, "ABP940"),
+ DEVICE( ASP, ASP_ABP940U, "ABP940U"),
+ DEVICE( ASP, ASP_ABP940UW, "ABP940UW"),
+ DEVICE( MACRONIX, MACRONIX_MX98713,"MX98713"),
+ DEVICE( MACRONIX, MACRONIX_MX987x5,"MX98715 / MX98725"),
+ DEVICE( CERN, CERN_SPSB_PMC, "STAR/RD24 SCI-PCI (PMC)"),
+ DEVICE( CERN, CERN_SPSB_PCI, "STAR/RD24 SCI-PCI (PMC)"),
+ DEVICE( CERN, CERN_HIPPI_DST, "HIPPI destination"),
+ DEVICE( CERN, CERN_HIPPI_SRC, "HIPPI source"),
+ DEVICE( IMS, IMS_8849, "8849"),
+ DEVICE( TEKRAM2, TEKRAM2_690c, "DC690c"),
+ DEVICE( TUNDRA, TUNDRA_CA91C042,"CA91C042 Universe"),
+ DEVICE( AMCC, AMCC_MYRINET, "Myrinet PCI (M2-PCI-32)"),
+ DEVICE( AMCC, AMCC_S5933, "S5933"),
+ DEVICE( AMCC, AMCC_S5933_HEPC3,"S5933 Traquair HEPC3"),
+ DEVICE( INTERG, INTERG_1680, "IGA-1680"),
+ DEVICE( INTERG, INTERG_1682, "IGA-1682"),
+ DEVICE( REALTEK, REALTEK_8029, "8029"),
+ DEVICE( REALTEK, REALTEK_8129, "8129"),
+ DEVICE( REALTEK, REALTEK_8139, "8139"),
+ DEVICE( TRUEVISION, TRUEVISION_T1000,"TARGA 1000"),
+ DEVICE( INIT, INIT_320P, "320 P"),
+ DEVICE( INIT, INIT_360P, "360 P"),
+ DEVICE( VIA, VIA_82C505, "VT 82C505"),
+ DEVICE( VIA, VIA_82C561, "VT 82C561"),
+ DEVICE( VIA, VIA_82C586_1, "VT 82C586 Apollo IDE"),
+ DEVICE( VIA, VIA_82C576, "VT 82C576 3V"),
+ DEVICE( VIA, VIA_82C585, "VT 82C585 Apollo VP1/VPX"),
+ DEVICE( VIA, VIA_82C586_0, "VT 82C586 Apollo ISA"),
+ DEVICE( VIA, VIA_82C595, "VT 82C595 Apollo VP2"),
+ DEVICE( VIA, VIA_82C597_0, "VT 82C597 Apollo VP3"),
+ DEVICE( VIA, VIA_82C926, "VT 82C926 Amazon"),
+ DEVICE( VIA, VIA_82C416, "VT 82C416MV"),
+ DEVICE( VIA, VIA_82C595_97, "VT 82C595 Apollo VP2/97"),
+ DEVICE( VIA, VIA_82C586_2, "VT 82C586 Apollo USB"),
+ DEVICE( VIA, VIA_82C586_3, "VT 82C586B Apollo ACPI"),
+ DEVICE( VIA, VIA_86C100A, "VT 86C100A"),
+ DEVICE( VIA, VIA_82C597_1, "VT 82C597 Apollo VP3 AGP"),
+ DEVICE( VORTEX, VORTEX_GDT60x0, "GDT 60x0"),
+ DEVICE( VORTEX, VORTEX_GDT6000B,"GDT 6000b"),
+ DEVICE( VORTEX, VORTEX_GDT6x10, "GDT 6110/6510"),
+ DEVICE( VORTEX, VORTEX_GDT6x20, "GDT 6120/6520"),
+ DEVICE( VORTEX, VORTEX_GDT6530, "GDT 6530"),
+ DEVICE( VORTEX, VORTEX_GDT6550, "GDT 6550"),
+ DEVICE( VORTEX, VORTEX_GDT6x17, "GDT 6117/6517"),
+ DEVICE( VORTEX, VORTEX_GDT6x27, "GDT 6127/6527"),
+ DEVICE( VORTEX, VORTEX_GDT6537, "GDT 6537"),
+ DEVICE( VORTEX, VORTEX_GDT6557, "GDT 6557"),
+ DEVICE( VORTEX, VORTEX_GDT6x15, "GDT 6115/6515"),
+ DEVICE( VORTEX, VORTEX_GDT6x25, "GDT 6125/6525"),
+ DEVICE( VORTEX, VORTEX_GDT6535, "GDT 6535"),
+ DEVICE( VORTEX, VORTEX_GDT6555, "GDT 6555"),
+ DEVICE( VORTEX, VORTEX_GDT6x17RP,"GDT 6117RP/6517RP"),
+ DEVICE( VORTEX, VORTEX_GDT6x27RP,"GDT 6127RP/6527RP"),
+ DEVICE( VORTEX, VORTEX_GDT6537RP,"GDT 6537RP"),
+ DEVICE( VORTEX, VORTEX_GDT6557RP,"GDT 6557RP"),
+ DEVICE( VORTEX, VORTEX_GDT6x11RP,"GDT 6111RP/6511RP"),
+ DEVICE( VORTEX, VORTEX_GDT6x21RP,"GDT 6121RP/6521RP"),
+ DEVICE( VORTEX, VORTEX_GDT6x17RP1,"GDT 6117RP1/6517RP1"),
+ DEVICE( VORTEX, VORTEX_GDT6x27RP1,"GDT 6127RP1/6527RP1"),
+ DEVICE( VORTEX, VORTEX_GDT6537RP1,"GDT 6537RP1"),
+ DEVICE( VORTEX, VORTEX_GDT6557RP1,"GDT 6557RP1"),
+ DEVICE( VORTEX, VORTEX_GDT6x11RP1,"GDT 6111RP1/6511RP1"),
+ DEVICE( VORTEX, VORTEX_GDT6x21RP1,"GDT 6121RP1/6521RP1"),
+ DEVICE( VORTEX, VORTEX_GDT6x17RP2,"GDT 6117RP2/6517RP2"),
+ DEVICE( VORTEX, VORTEX_GDT6x27RP2,"GDT 6127RP2/6527RP2"),
+ DEVICE( VORTEX, VORTEX_GDT6537RP2,"GDT 6537RP2"),
+ DEVICE( VORTEX, VORTEX_GDT6557RP2,"GDT 6557RP2"),
+ DEVICE( VORTEX, VORTEX_GDT6x11RP2,"GDT 6111RP2/6511RP2"),
+ DEVICE( VORTEX, VORTEX_GDT6x21RP2,"GDT 6121RP2/6521RP2"),
+ DEVICE( EF, EF_ATM_FPGA, "155P-MF1 (FPGA)"),
+ DEVICE( EF, EF_ATM_ASIC, "155P-MF1 (ASIC)"),
+ DEVICE( FORE, FORE_PCA200PC, "PCA-200PC"),
+ DEVICE( FORE, FORE_PCA200E, "PCA-200E"),
+ DEVICE( IMAGINGTECH, IMAGINGTECH_ICPCI, "MVC IC-PCI"),
+ DEVICE( PHILIPS, PHILIPS_SAA7146,"SAA7146"),
+ DEVICE( CYCLONE, CYCLONE_SDK, "SDK"),
+ DEVICE( ALLIANCE, ALLIANCE_PROMOTIO, "Promotion-6410"),
+ DEVICE( ALLIANCE, ALLIANCE_PROVIDEO, "Provideo"),
+ DEVICE( ALLIANCE, ALLIANCE_AT24, "AT24"),
+ DEVICE( ALLIANCE, ALLIANCE_AT3D, "AT3D"),
+ DEVICE( VMIC, VMIC_VME, "VMIVME-7587"),
+ DEVICE( DIGI, DIGI_EPC, "AccelPort EPC"),
+ DEVICE( DIGI, DIGI_RIGHTSWITCH, "RightSwitch SE-6"),
+ DEVICE( DIGI, DIGI_XEM, "AccelPort Xem"),
+ DEVICE( DIGI, DIGI_XR, "AccelPort Xr"),
+ DEVICE( DIGI, DIGI_CX, "AccelPort C/X"),
+ DEVICE( DIGI, DIGI_XRJ, "AccelPort Xr/J"),
+ DEVICE( DIGI, DIGI_EPCJ, "AccelPort EPC/J"),
+ DEVICE( DIGI, DIGI_XR_920, "AccelPort Xr 920"),
+ DEVICE( MUTECH, MUTECH_MV1000, "MV-1000"),
+ DEVICE( RENDITION, RENDITION_VERITE,"Verite 1000"),
+ DEVICE( RENDITION, RENDITION_VERITE2100,"Verite 2100"),
+ DEVICE( TOSHIBA, TOSHIBA_601, "Laptop"),
+ DEVICE( TOSHIBA, TOSHIBA_TOPIC95,"ToPIC95"),
+ DEVICE( TOSHIBA, TOSHIBA_TOPIC97,"ToPIC97"),
+ DEVICE( RICOH, RICOH_RL5C466, "RL5C466"),
+ DEVICE( ARTOP, ARTOP_ATP850UF, "ATP850UF"),
+ DEVICE( ZEITNET, ZEITNET_1221, "1221"),
+ DEVICE( ZEITNET, ZEITNET_1225, "1225"),
+ DEVICE( OMEGA, OMEGA_82C092G, "82C092G"),
+ DEVICE( LITEON, LITEON_LNE100TX,"LNE100TX"),
+ DEVICE( NP, NP_PCI_FDDI, "NP-PCI"),
+ DEVICE( ATT, ATT_L56XMF, "L56xMF"),
+ DEVICE( SPECIALIX, SPECIALIX_IO8, "IO8+/PCI"),
+ DEVICE( SPECIALIX, SPECIALIX_XIO, "XIO/SIO host"),
+ DEVICE( SPECIALIX, SPECIALIX_RIO, "RIO host"),
+ DEVICE( AURAVISION, AURAVISION_VXP524,"VXP524"),
+ DEVICE( IKON, IKON_10115, "10115 Greensheet"),
+ DEVICE( IKON, IKON_10117, "10117 Greensheet"),
+ DEVICE( ZORAN, ZORAN_36057, "ZR36057"),
+ DEVICE( ZORAN, ZORAN_36120, "ZR36120"),
+ DEVICE( KINETIC, KINETIC_2915, "2915 CAMAC"),
+ DEVICE( COMPEX, COMPEX_ENET100VG4, "Readylink ENET100-VG4"),
+ DEVICE( COMPEX, COMPEX_RL2000, "ReadyLink 2000"),
+ DEVICE( RP, RP8OCTA, "RocketPort 8 Oct"),
+ DEVICE( RP, RP8INTF, "RocketPort 8 Intf"),
+ DEVICE( RP, RP16INTF, "RocketPort 16 Intf"),
+ DEVICE( RP, RP32INTF, "RocketPort 32 Intf"),
+ DEVICE( CYCLADES, CYCLOM_Y_Lo, "Cyclom-Y below 1Mbyte"),
+ DEVICE( CYCLADES, CYCLOM_Y_Hi, "Cyclom-Y above 1Mbyte"),
+ DEVICE( CYCLADES, CYCLOM_Z_Lo, "Cyclom-Z below 1Mbyte"),
+ DEVICE( CYCLADES, CYCLOM_Z_Hi, "Cyclom-Z above 1Mbyte"),
+ DEVICE( ESSENTIAL, ESSENTIAL_ROADRUNNER,"Roadrunner serial HIPPI"),
+ DEVICE( O2, O2_6832, "6832"),
+ DEVICE( 3DFX, 3DFX_VOODOO, "Voodoo"),
+ DEVICE( 3DFX, 3DFX_VOODOO2, "Voodoo2"),
+ DEVICE( SIGMADES, SIGMADES_6425, "REALmagic64/GX"),
+ DEVICE( STALLION, STALLION_ECHPCI832,"EasyConnection 8/32"),
+ DEVICE( STALLION, STALLION_ECHPCI864,"EasyConnection 8/64"),
+ DEVICE( STALLION, STALLION_EIOPCI,"EasyIO"),
+ DEVICE( OPTIBASE, OPTIBASE_FORGE, "MPEG Forge"),
+ DEVICE( OPTIBASE, OPTIBASE_FUSION,"MPEG Fusion"),
+ DEVICE( OPTIBASE, OPTIBASE_VPLEX, "VideoPlex"),
+ DEVICE( OPTIBASE, OPTIBASE_VPLEXCC,"VideoPlex CC"),
+ DEVICE( OPTIBASE, OPTIBASE_VQUEST,"VideoQuest"),
+ DEVICE( ASIX, ASIX_88140, "88140"),
+ DEVICE( SATSAGEM, SATSAGEM_PCR2101,"PCR2101 DVB receiver"),
+ DEVICE( SATSAGEM, SATSAGEM_TELSATTURBO,"Telsat Turbo DVB"),
+ DEVICE( ENSONIQ, ENSONIQ_AUDIOPCI,"AudioPCI"),
+ DEVICE( PICTUREL, PICTUREL_PCIVST,"PCIVST"),
+ DEVICE( NVIDIA_SGS, NVIDIA_SGS_RIVA128, "Riva 128"),
+ DEVICE( CBOARDS, CBOARDS_DAS1602_16,"DAS1602/16"),
+ DEVICE( SYMPHONY, SYMPHONY_101, "82C101"),
+ DEVICE( TEKRAM, TEKRAM_DC290, "DC-290"),
+ DEVICE( 3DLABS, 3DLABS_300SX, "GLINT 300SX"),
+ DEVICE( 3DLABS, 3DLABS_500TX, "GLINT 500TX"),
+ DEVICE( 3DLABS, 3DLABS_DELTA, "GLINT Delta"),
+ DEVICE( 3DLABS, 3DLABS_PERMEDIA,"PERMEDIA"),
+ DEVICE( 3DLABS, 3DLABS_MX, "GLINT MX"),
+ DEVICE( AVANCE, AVANCE_ALG2064, "ALG2064i"),
+ DEVICE( AVANCE, AVANCE_2302, "ALG-2302"),
+ DEVICE( NETVIN, NETVIN_NV5000SC,"NV5000"),
+ DEVICE( S3, S3_PLATO_PXS, "PLATO/PX (system)"),
+ DEVICE( S3, S3_ViRGE, "ViRGE"),
+ DEVICE( S3, S3_TRIO, "Trio32/Trio64"),
+ DEVICE( S3, S3_AURORA64VP, "Aurora64V+"),
+ DEVICE( S3, S3_TRIO64UVP, "Trio64UV+"),
+ DEVICE( S3, S3_ViRGE_VX, "ViRGE/VX"),
+ DEVICE( S3, S3_868, "Vision 868"),
+ DEVICE( S3, S3_928, "Vision 928-P"),
+ DEVICE( S3, S3_864_1, "Vision 864-P"),
+ DEVICE( S3, S3_864_2, "Vision 864-P"),
+ DEVICE( S3, S3_964_1, "Vision 964-P"),
+ DEVICE( S3, S3_964_2, "Vision 964-P"),
+ DEVICE( S3, S3_968, "Vision 968"),
+ DEVICE( S3, S3_TRIO64V2, "Trio64V2/DX or /GX"),
+ DEVICE( S3, S3_PLATO_PXG, "PLATO/PX (graphics)"),
+ DEVICE( S3, S3_ViRGE_DXGX, "ViRGE/DX or /GX"),
+ DEVICE( S3, S3_ViRGE_GX2, "ViRGE/GX2"),
+ DEVICE( S3, S3_ViRGE_MX, "ViRGE/MX"),
+ DEVICE( S3, S3_ViRGE_MXP, "ViRGE/MX+"),
+ DEVICE( S3, S3_ViRGE_MXPMV, "ViRGE/MX+MV"),
+ DEVICE( S3, S3_SONICVIBES, "SonicVibes"),
+ DEVICE( INTEL, INTEL_82375, "82375EB"),
+ BRIDGE( INTEL, INTEL_82424, "82424ZX Saturn", 0x00),
+ DEVICE( INTEL, INTEL_82378, "82378IB"),
+ DEVICE( INTEL, INTEL_82430, "82430ZX Aries"),
+ BRIDGE( INTEL, INTEL_82434, "82434LX Mercury/Neptune", 0x00),
+ DEVICE( INTEL, INTEL_82092AA_0,"82092AA PCMCIA bridge"),
+ DEVICE( INTEL, INTEL_82092AA_1,"82092AA EIDE"),
+ DEVICE( INTEL, INTEL_7116, "SAA7116"),
+ DEVICE( INTEL, INTEL_82596, "82596"),
+ DEVICE( INTEL, INTEL_82865, "82865"),
+ DEVICE( INTEL, INTEL_82557, "82557"),
+ DEVICE( INTEL, INTEL_82437, "82437"),
+ DEVICE( INTEL, INTEL_82371_0, "82371 Triton PIIX"),
+ DEVICE( INTEL, INTEL_82371_1, "82371 Triton PIIX"),
+ DEVICE( INTEL, INTEL_82371MX, "430MX - 82371MX MPIIX"),
+ DEVICE( INTEL, INTEL_82437MX, "430MX - 82437MX MTSC"),
+ DEVICE( INTEL, INTEL_82441, "82441FX Natoma"),
+ DEVICE( INTEL, INTEL_82380FB, "82380FB Mobile"),
+ DEVICE( INTEL, INTEL_82439, "82439HX Triton II"),
+ DEVICE( INTEL, INTEL_82371SB_0,"82371SB PIIX3 ISA"),
+ DEVICE( INTEL, INTEL_82371SB_1,"82371SB PIIX3 IDE"),
+ DEVICE( INTEL, INTEL_82371SB_2,"82371SB PIIX3 USB"),
+ DEVICE( INTEL, INTEL_82437VX, "82437VX Triton II"),
+ DEVICE( INTEL, INTEL_82439TX, "82439TX"),
+ DEVICE( INTEL, INTEL_82371AB_0,"82371AB PIIX4 ISA"),
+ DEVICE( INTEL, INTEL_82371AB, "82371AB PIIX4 IDE"),
+ DEVICE( INTEL, INTEL_82371AB_2,"82371AB PIIX4 USB"),
+ DEVICE( INTEL, INTEL_82371AB_3,"82371AB PIIX4 ACPI"),
+ DEVICE( INTEL, INTEL_82443LX_0,"440LX - 82443LX PAC Host"),
+ DEVICE( INTEL, INTEL_82443LX_1,"440LX - 82443LX PAC AGP"),
+ DEVICE( INTEL, INTEL_82443BX_0,"440BX - 82443BX Host"),
+ DEVICE( INTEL, INTEL_82443BX_1,"440BX - 82443BX AGP"),
+ DEVICE( INTEL, INTEL_82443BX_2,"440BX - 82443BX Host (no AGP)"),
+ DEVICE( INTEL, INTEL_82443GX_0,"440GX - 82443GX Host"),
+ DEVICE( INTEL, INTEL_82443GX_1,"440GX - 82443GX AGP"),
+ DEVICE( INTEL, INTEL_82443GX_2,"440GX - 82443GX Host (no AGP)"),
+ DEVICE( INTEL, INTEL_P6, "Orion P6"),
+ DEVICE( INTEL, INTEL_82450GX, "82450GX Orion P6"),
+ DEVICE( KTI, KTI_ET32P2, "ET32P2"),
+ DEVICE( ADAPTEC, ADAPTEC_7810, "AIC-7810 RAID"),
+ DEVICE( ADAPTEC, ADAPTEC_7850, "AIC-7850"),
+ DEVICE( ADAPTEC, ADAPTEC_7855, "AIC-7855"),
+ DEVICE( ADAPTEC, ADAPTEC_5800, "AIC-5800"),
+ DEVICE( ADAPTEC, ADAPTEC_7860, "AIC-7860"),
+ DEVICE( ADAPTEC, ADAPTEC_7861, "AIC-7861"),
+ DEVICE( ADAPTEC, ADAPTEC_7870, "AIC-7870"),
+ DEVICE( ADAPTEC, ADAPTEC_7871, "AIC-7871"),
+ DEVICE( ADAPTEC, ADAPTEC_7872, "AIC-7872"),
+ DEVICE( ADAPTEC, ADAPTEC_7873, "AIC-7873"),
+ DEVICE( ADAPTEC, ADAPTEC_7874, "AIC-7874"),
+ DEVICE( ADAPTEC, ADAPTEC_7895, "AIC-7895U"),
+ DEVICE( ADAPTEC, ADAPTEC_7880, "AIC-7880U"),
+ DEVICE( ADAPTEC, ADAPTEC_7881, "AIC-7881U"),
+ DEVICE( ADAPTEC, ADAPTEC_7882, "AIC-7882U"),
+ DEVICE( ADAPTEC, ADAPTEC_7883, "AIC-7883U"),
+ DEVICE( ADAPTEC, ADAPTEC_7884, "AIC-7884U"),
+ DEVICE( ADAPTEC, ADAPTEC_1030, "ABA-1030 DVB receiver"),
+ DEVICE( ADAPTEC2, ADAPTEC2_2940U2, "AHA-2940U2"),
+ DEVICE( ADAPTEC2, ADAPTEC2_7890, "AIC-7890/1"),
+ DEVICE( ADAPTEC2, ADAPTEC2_3940U2, "AHA-3940U2"),
+ DEVICE( ADAPTEC2, ADAPTEC2_7896, "AIC-7896/7"),
+ DEVICE( ATRONICS, ATRONICS_2015, "IDE-2015PL"),
+ DEVICE( TIGERJET, TIGERJET_300, "Tiger300 ISDN"),
+ DEVICE( ARK, ARK_STING, "Stingray"),
+ DEVICE( ARK, ARK_STINGARK, "Stingray ARK 2000PV"),
+ DEVICE( ARK, ARK_2000MT, "2000MT")
+};
+
+
+#ifdef CONFIG_PCI_OPTIMIZE
+
+/*
+ * An item of this structure has the following meaning:
+ * for each optimization, the register address, the mask
+ * and value to write to turn it on.
+ * There are 5 optimizations for the moment:
+ * Cache L2 write back best than write through
+ * Posted Write for CPU to PCI enable
+ * Posted Write for CPU to MEMORY enable
+ * Posted Write for PCI to MEMORY enable
+ * PCI Burst enable
+ *
+ * Half of the bios I've meet don't allow you to turn that on, and you
+ * can gain more than 15% on graphic accesses using those
+ * optimizations...
+ */
+struct optimization_type {
+ const char *type;
+ const char *off;
+ const char *on;
+} bridge_optimization[] = {
+ {"Cache L2", "write through", "write back"},
+ {"CPU-PCI posted write", "off", "on"},
+ {"CPU-Memory posted write", "off", "on"},
+ {"PCI-Memory posted write", "off", "on"},
+ {"PCI burst", "off", "on"}
+};
+
+#define NUM_OPTIMIZATIONS \
+ (sizeof(bridge_optimization) / sizeof(bridge_optimization[0]))
+
+struct bridge_mapping_type {
+ unsigned char addr; /* config space address */
+ unsigned char mask;
+ unsigned char value;
+} bridge_mapping[] = {
+ /*
+ * Intel Neptune/Mercury/Saturn:
+ * If the internal cache is write back,
+ * the L2 cache must be write through!
+ * I've to check out how to control that
+ * for the moment, we won't touch the cache
+ */
+ {0x0 ,0x02 ,0x02 },
+ {0x53 ,0x02 ,0x02 },
+ {0x53 ,0x01 ,0x01 },
+ {0x54 ,0x01 ,0x01 },
+ {0x54 ,0x02 ,0x02 },
+
+ /*
+ * UMC 8891A Pentium chipset:
+ * Why did you think UMC was cheaper ??
+ */
+ {0x50 ,0x10 ,0x00 },
+ {0x51 ,0x40 ,0x40 },
+ {0x0 ,0x0 ,0x0 },
+ {0x0 ,0x0 ,0x0 },
+ {0x0 ,0x0 ,0x0 },
+
+ /*
+ * UMC UM8881F
+ * This is a dummy entry for my tests.
+ * I have this chipset and no docs....
+ */
+ {0x0 ,0x1 ,0x1 },
+ {0x0 ,0x2 ,0x0 },
+ {0x0 ,0x0 ,0x0 },
+ {0x0 ,0x0 ,0x0 },
+ {0x0 ,0x0 ,0x0 }
+};
+
+#endif /* CONFIG_PCI_OPTIMIZE */
+
+
+/*
+ * device_info[] is sorted so we can use binary search
+ */
+struct pci_dev_info *pci_lookup_dev(unsigned int vendor, unsigned int dev)
+{
+ int min = 0,
+ max = sizeof(dev_info)/sizeof(dev_info[0]) - 1;
+
+ for ( ; ; )
+ {
+ int i = (min + max) >> 1;
+ long order;
+
+ order = dev_info[i].vendor - (long) vendor;
+ if (!order)
+ order = dev_info[i].device - (long) dev;
+
+ if (order < 0)
+ {
+ min = i + 1;
+ if ( min > max )
+ return 0;
+ continue;
+ }
+
+ if (order > 0)
+ {
+ max = i - 1;
+ if ( min > max )
+ return 0;
+ continue;
+ }
+
+ return & dev_info[ i ];
+ }
+}
+
+const char *pci_strclass (unsigned int class)
+{
+ switch (class >> 8) {
+ case PCI_CLASS_NOT_DEFINED: return "Non-VGA device";
+ case PCI_CLASS_NOT_DEFINED_VGA: return "VGA compatible device";
+
+ case PCI_CLASS_STORAGE_SCSI: return "SCSI storage controller";
+ case PCI_CLASS_STORAGE_IDE: return "IDE interface";
+ case PCI_CLASS_STORAGE_FLOPPY: return "Floppy disk controller";
+ case PCI_CLASS_STORAGE_IPI: return "IPI bus controller";
+ case PCI_CLASS_STORAGE_RAID: return "RAID bus controller";
+ case PCI_CLASS_STORAGE_OTHER: return "Unknown mass storage controller";
+
+ case PCI_CLASS_NETWORK_ETHERNET: return "Ethernet controller";
+ case PCI_CLASS_NETWORK_TOKEN_RING: return "Token ring network controller";
+ case PCI_CLASS_NETWORK_FDDI: return "FDDI network controller";
+ case PCI_CLASS_NETWORK_ATM: return "ATM network controller";
+ case PCI_CLASS_NETWORK_OTHER: return "Network controller";
+
+ case PCI_CLASS_DISPLAY_VGA: return "VGA compatible controller";
+ case PCI_CLASS_DISPLAY_XGA: return "XGA compatible controller";
+ case PCI_CLASS_DISPLAY_OTHER: return "Display controller";
+
+ case PCI_CLASS_MULTIMEDIA_VIDEO: return "Multimedia video controller";
+ case PCI_CLASS_MULTIMEDIA_AUDIO: return "Multimedia audio controller";
+ case PCI_CLASS_MULTIMEDIA_OTHER: return "Multimedia controller";
+
+ case PCI_CLASS_MEMORY_RAM: return "RAM memory";
+ case PCI_CLASS_MEMORY_FLASH: return "FLASH memory";
+ case PCI_CLASS_MEMORY_OTHER: return "Memory";
+
+ case PCI_CLASS_BRIDGE_HOST: return "Host bridge";
+ case PCI_CLASS_BRIDGE_ISA: return "ISA bridge";
+ case PCI_CLASS_BRIDGE_EISA: return "EISA bridge";
+ case PCI_CLASS_BRIDGE_MC: return "MicroChannel bridge";
+ case PCI_CLASS_BRIDGE_PCI: return "PCI bridge";
+ case PCI_CLASS_BRIDGE_PCMCIA: return "PCMCIA bridge";
+ case PCI_CLASS_BRIDGE_NUBUS: return "NuBus bridge";
+ case PCI_CLASS_BRIDGE_CARDBUS: return "CardBus bridge";
+ case PCI_CLASS_BRIDGE_OTHER: return "Bridge";
+
+ case PCI_CLASS_COMMUNICATION_SERIAL: return "Serial controller";
+ case PCI_CLASS_COMMUNICATION_PARALLEL: return "Parallel controller";
+ case PCI_CLASS_COMMUNICATION_OTHER: return "Communication controller";
+
+ case PCI_CLASS_SYSTEM_PIC: return "PIC";
+ case PCI_CLASS_SYSTEM_DMA: return "DMA controller";
+ case PCI_CLASS_SYSTEM_TIMER: return "Timer";
+ case PCI_CLASS_SYSTEM_RTC: return "RTC";
+ case PCI_CLASS_SYSTEM_OTHER: return "System peripheral";
+
+ case PCI_CLASS_INPUT_KEYBOARD: return "Keyboard controller";
+ case PCI_CLASS_INPUT_PEN: return "Digitizer Pen";
+ case PCI_CLASS_INPUT_MOUSE: return "Mouse controller";
+ case PCI_CLASS_INPUT_OTHER: return "Input device controller";
+
+ case PCI_CLASS_DOCKING_GENERIC: return "Generic Docking Station";
+ case PCI_CLASS_DOCKING_OTHER: return "Docking Station";
+
+ case PCI_CLASS_PROCESSOR_386: return "386";
+ case PCI_CLASS_PROCESSOR_486: return "486";
+ case PCI_CLASS_PROCESSOR_PENTIUM: return "Pentium";
+ case PCI_CLASS_PROCESSOR_ALPHA: return "Alpha";
+ case PCI_CLASS_PROCESSOR_POWERPC: return "Power PC";
+ case PCI_CLASS_PROCESSOR_CO: return "Co-processor";
+
+ case PCI_CLASS_SERIAL_FIREWIRE: return "FireWire (IEEE 1394)";
+ case PCI_CLASS_SERIAL_ACCESS: return "ACCESS Bus";
+ case PCI_CLASS_SERIAL_SSA: return "SSA";
+ case PCI_CLASS_SERIAL_USB: return "USB Controller";
+ case PCI_CLASS_SERIAL_FIBER: return "Fiber Channel";
+
+ default: return "Unknown class";
+ }
+}
+
+
+const char *pci_strvendor(unsigned int vendor)
+{
+ switch (vendor) {
+ case PCI_VENDOR_ID_COMPAQ: return "Compaq";
+ case PCI_VENDOR_ID_NCR: return "NCR";
+ case PCI_VENDOR_ID_ATI: return "ATI";
+ case PCI_VENDOR_ID_VLSI: return "VLSI";
+ case PCI_VENDOR_ID_ADL: return "Advance Logic";
+ case PCI_VENDOR_ID_NS: return "NS";
+ case PCI_VENDOR_ID_TSENG: return "Tseng'Lab";
+ case PCI_VENDOR_ID_WEITEK: return "Weitek";
+ case PCI_VENDOR_ID_DEC: return "DEC";
+ case PCI_VENDOR_ID_CIRRUS: return "Cirrus Logic";
+ case PCI_VENDOR_ID_IBM: return "IBM";
+ case PCI_VENDOR_ID_WD: return "Western Digital";
+ case PCI_VENDOR_ID_AMD: return "AMD";
+ case PCI_VENDOR_ID_TRIDENT: return "Trident";
+ case PCI_VENDOR_ID_AI: return "Acer Incorporated";
+ case PCI_VENDOR_ID_MATROX: return "Matrox";
+ case PCI_VENDOR_ID_CT: return "Chips & Technologies";
+ case PCI_VENDOR_ID_MIRO: return "Miro";
+ case PCI_VENDOR_ID_NEC: return "NEC";
+ case PCI_VENDOR_ID_FD: return "Future Domain";
+ case PCI_VENDOR_ID_SI: return "Silicon Integrated Systems";
+ case PCI_VENDOR_ID_HP: return "Hewlett Packard";
+ case PCI_VENDOR_ID_PCTECH: return "PCTECH";
+ case PCI_VENDOR_ID_DPT: return "DPT";
+ case PCI_VENDOR_ID_OPTI: return "OPTi";
+ case PCI_VENDOR_ID_SGS: return "SGS Thomson";
+ case PCI_VENDOR_ID_BUSLOGIC: return "BusLogic";
+ case PCI_VENDOR_ID_TI: return "Texas Instruments";
+ case PCI_VENDOR_ID_OAK: return "OAK";
+ case PCI_VENDOR_ID_WINBOND2: return "Winbond";
+ case PCI_VENDOR_ID_MOTOROLA: return "Motorola";
+ case PCI_VENDOR_ID_PROMISE: return "Promise Technology";
+ case PCI_VENDOR_ID_APPLE: return "Apple";
+ case PCI_VENDOR_ID_N9: return "Number Nine";
+ case PCI_VENDOR_ID_UMC: return "UMC";
+ case PCI_VENDOR_ID_X: return "X TECHNOLOGY";
+ case PCI_VENDOR_ID_NEXGEN: return "Nexgen";
+ case PCI_VENDOR_ID_QLOGIC: return "Q Logic";
+ case PCI_VENDOR_ID_LEADTEK: return "Leadtek Research";
+ case PCI_VENDOR_ID_CONTAQ: return "Contaq";
+ case PCI_VENDOR_ID_FOREX: return "Forex";
+ case PCI_VENDOR_ID_OLICOM: return "Olicom";
+ case PCI_VENDOR_ID_CMD: return "CMD";
+ case PCI_VENDOR_ID_VISION: return "Vision";
+ case PCI_VENDOR_ID_BROOKTREE: return "Brooktree";
+ case PCI_VENDOR_ID_SIERRA: return "Sierra";
+ case PCI_VENDOR_ID_ACC: return "ACC MICROELECTRONICS";
+ case PCI_VENDOR_ID_WINBOND: return "Winbond";
+ case PCI_VENDOR_ID_DATABOOK: return "Databook";
+ case PCI_VENDOR_ID_3COM: return "3Com";
+ case PCI_VENDOR_ID_SMC: return "SMC";
+ case PCI_VENDOR_ID_AL: return "Acer Labs";
+ case PCI_VENDOR_ID_MITSUBISHI: return "Mitsubishi";
+ case PCI_VENDOR_ID_NEOMAGIC: return "Neomagic";
+ case PCI_VENDOR_ID_ASP: return "Advanced System Products";
+ case PCI_VENDOR_ID_CERN: return "CERN";
+ case PCI_VENDOR_ID_IMS: return "IMS";
+ case PCI_VENDOR_ID_TEKRAM2: return "Tekram";
+ case PCI_VENDOR_ID_TUNDRA: return "Tundra";
+ case PCI_VENDOR_ID_AMCC: return "AMCC";
+ case PCI_VENDOR_ID_INTERG: return "Intergraphics";
+ case PCI_VENDOR_ID_REALTEK: return "Realtek";
+ case PCI_VENDOR_ID_TRUEVISION: return "Truevision";
+ case PCI_VENDOR_ID_INIT: return "Initio Corp";
+ case PCI_VENDOR_ID_VIA: return "VIA Technologies";
+ case PCI_VENDOR_ID_VORTEX: return "VORTEX";
+ case PCI_VENDOR_ID_EF: return "Efficient Networks";
+ case PCI_VENDOR_ID_FORE: return "Fore Systems";
+ case PCI_VENDOR_ID_IMAGINGTECH: return "Imaging Technology";
+ case PCI_VENDOR_ID_PHILIPS: return "Philips";
+ case PCI_VENDOR_ID_PLX: return "PLX";
+ case PCI_VENDOR_ID_ALLIANCE: return "Alliance";
+ case PCI_VENDOR_ID_VMIC: return "VMIC";
+ case PCI_VENDOR_ID_DIGI: return "Digi Intl.";
+ case PCI_VENDOR_ID_MUTECH: return "Mutech";
+ case PCI_VENDOR_ID_RENDITION: return "Rendition";
+ case PCI_VENDOR_ID_TOSHIBA: return "Toshiba";
+ case PCI_VENDOR_ID_RICOH: return "Ricoh";
+ case PCI_VENDOR_ID_ZEITNET: return "ZeitNet";
+ case PCI_VENDOR_ID_OMEGA: return "Omega Micro";
+ case PCI_VENDOR_ID_NP: return "Network Peripherals";
+ case PCI_VENDOR_ID_SPECIALIX: return "Specialix";
+ case PCI_VENDOR_ID_IKON: return "Ikon";
+ case PCI_VENDOR_ID_ZORAN: return "Zoran";
+ case PCI_VENDOR_ID_COMPEX: return "Compex";
+ case PCI_VENDOR_ID_RP: return "Comtrol";
+ case PCI_VENDOR_ID_CYCLADES: return "Cyclades";
+ case PCI_VENDOR_ID_3DFX: return "3Dfx";
+ case PCI_VENDOR_ID_SIGMADES: return "Sigma Designs";
+ case PCI_VENDOR_ID_OPTIBASE: return "Optibase";
+ case PCI_VENDOR_ID_NVIDIA_SGS: return "NVidia/SGS Thomson";
+ case PCI_VENDOR_ID_ENSONIQ: return "Ensoniq";
+ case PCI_VENDOR_ID_SYMPHONY: return "Symphony";
+ case PCI_VENDOR_ID_TEKRAM: return "Tekram";
+ case PCI_VENDOR_ID_3DLABS: return "3Dlabs";
+ case PCI_VENDOR_ID_AVANCE: return "Avance";
+ case PCI_VENDOR_ID_NETVIN: return "NetVin";
+ case PCI_VENDOR_ID_S3: return "S3 Inc.";
+ case PCI_VENDOR_ID_INTEL: return "Intel";
+ case PCI_VENDOR_ID_KTI: return "KTI";
+ case PCI_VENDOR_ID_ADAPTEC: return "Adaptec";
+ case PCI_VENDOR_ID_ADAPTEC2: return "Adaptec";
+ case PCI_VENDOR_ID_ATRONICS: return "Atronics";
+ case PCI_VENDOR_ID_ARK: return "ARK Logic";
+ case PCI_VENDOR_ID_ASIX: return "ASIX";
+ case PCI_VENDOR_ID_LITEON: return "Lite-on";
+ default: return "Unknown vendor";
+ }
+}
+
+
+const char *pci_strdev(unsigned int vendor, unsigned int device)
+{
+ struct pci_dev_info *info;
+
+ info = pci_lookup_dev(vendor, device);
+ return info ? info->name : "Unknown device";
+}
+
+
+
+/*
+ * Turn on/off PCI bridge optimization. This should allow benchmarking.
+ */
+static void burst_bridge(unsigned char bus, unsigned char devfn,
+ unsigned char pos, int turn_on)
+{
+#ifdef CONFIG_PCI_OPTIMIZE
+ struct bridge_mapping_type *bmap;
+ unsigned char val;
+ int i;
+
+ pos *= NUM_OPTIMIZATIONS;
+ printk("PCI bridge optimization.\n");
+ for (i = 0; i < NUM_OPTIMIZATIONS; i++) {
+ printk(" %s: ", bridge_optimization[i].type);
+ bmap = &bridge_mapping[pos + i];
+ if (!bmap->addr) {
+ printk("Not supported.");
+ } else {
+ pcibios_read_config_byte(bus, devfn, bmap->addr, &val);
+ if ((val & bmap->mask) == bmap->value) {
+ printk("%s.", bridge_optimization[i].on);
+ if (!turn_on) {
+ pcibios_write_config_byte(bus, devfn,
+ bmap->addr,
+ (val | bmap->mask)
+ - bmap->value);
+ printk("Changed! Now %s.", bridge_optimization[i].off);
+ }
+ } else {
+ printk("%s.", bridge_optimization[i].off);
+ if (turn_on) {
+ pcibios_write_config_byte(bus, devfn,
+ bmap->addr,
+ (val & (0xff - bmap->mask))
+ + bmap->value);
+ printk("Changed! Now %s.", bridge_optimization[i].on);
+ }
+ }
+ }
+ printk("\n");
+ }
+#endif /* CONFIG_PCI_OPTIMIZE */
+}
+
+
+/*
+ * Convert some of the configuration space registers of the device at
+ * address (bus,devfn) into a string (possibly several lines each).
+ * The configuration string is stored starting at buf[len]. If the
+ * string would exceed the size of the buffer (SIZE), 0 is returned.
+ */
+static int sprint_dev_config(struct pci_dev *dev, char *buf, int size)
+{
+ unsigned long base;
+ unsigned int l, class_rev, bus, devfn, last_reg;
+ unsigned short vendor, device, status;
+ unsigned char bist, latency, min_gnt, max_lat, hdr_type;
+ int reg, len = 0;
+ const char *str;
+
+ bus = dev->bus->number;
+ devfn = dev->devfn;
+
+ pcibios_read_config_byte (bus, devfn, PCI_HEADER_TYPE, &hdr_type);
+ pcibios_read_config_dword(bus, devfn, PCI_CLASS_REVISION, &class_rev);
+ pcibios_read_config_word (bus, devfn, PCI_VENDOR_ID, &vendor);
+ pcibios_read_config_word (bus, devfn, PCI_DEVICE_ID, &device);
+ pcibios_read_config_word (bus, devfn, PCI_STATUS, &status);
+ pcibios_read_config_byte (bus, devfn, PCI_BIST, &bist);
+ pcibios_read_config_byte (bus, devfn, PCI_LATENCY_TIMER, &latency);
+ pcibios_read_config_byte (bus, devfn, PCI_MIN_GNT, &min_gnt);
+ pcibios_read_config_byte (bus, devfn, PCI_MAX_LAT, &max_lat);
+ if (len + 80 > size) {
+ return -1;
+ }
+ len += sprintf(buf + len, " Bus %2d, device %3d, function %2d:\n",
+ bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+
+ if (len + 80 > size) {
+ return -1;
+ }
+ len += sprintf(buf + len, " %s: %s %s (rev %d).\n ",
+ pci_strclass(class_rev >> 8), pci_strvendor(vendor),
+ pci_strdev(vendor, device), class_rev & 0xff);
+
+ if (!pci_lookup_dev(vendor, device)) {
+ len += sprintf(buf + len,
+ "Vendor id=%x. Device id=%x.\n ",
+ vendor, device);
+ }
+
+ str = 0; /* to keep gcc shut... */
+ switch (status & PCI_STATUS_DEVSEL_MASK) {
+ case PCI_STATUS_DEVSEL_FAST: str = "Fast devsel. "; break;
+ case PCI_STATUS_DEVSEL_MEDIUM: str = "Medium devsel. "; break;
+ case PCI_STATUS_DEVSEL_SLOW: str = "Slow devsel. "; break;
+ }
+ if (len + strlen(str) > size) {
+ return -1;
+ }
+ len += sprintf(buf + len, str);
+
+ if (status & PCI_STATUS_FAST_BACK) {
+# define fast_b2b_capable "Fast back-to-back capable. "
+ if (len + strlen(fast_b2b_capable) > size) {
+ return -1;
+ }
+ len += sprintf(buf + len, fast_b2b_capable);
+# undef fast_b2b_capable
+ }
+
+ if (bist & PCI_BIST_CAPABLE) {
+# define BIST_capable "BIST capable. "
+ if (len + strlen(BIST_capable) > size) {
+ return -1;
+ }
+ len += sprintf(buf + len, BIST_capable);
+# undef BIST_capable
+ }
+
+ if (dev->irq) {
+ if (len + 40 > size) {
+ return -1;
+ }
+ len += sprintf(buf + len, "IRQ %d. ", dev->irq);
+ }
+
+ if (dev->master) {
+ if (len + 80 > size) {
+ return -1;
+ }
+ len += sprintf(buf + len, "Master Capable. ");
+ if (latency)
+ len += sprintf(buf + len, "Latency=%d. ", latency);
+ else
+ len += sprintf(buf + len, "No bursts. ");
+ if (min_gnt)
+ len += sprintf(buf + len, "Min Gnt=%d.", min_gnt);
+ if (max_lat)
+ len += sprintf(buf + len, "Max Lat=%d.", max_lat);
+ }
+
+ switch (hdr_type & 0x7f) {
+ case 0:
+ last_reg = PCI_BASE_ADDRESS_5;
+ break;
+ case 1:
+ last_reg = PCI_BASE_ADDRESS_1;
+ break;
+ default:
+ last_reg = 0;
+ }
+ for (reg = PCI_BASE_ADDRESS_0; reg <= last_reg; reg += 4) {
+ if (len + 40 > size) {
+ return -1;
+ }
+ pcibios_read_config_dword(bus, devfn, reg, &l);
+ base = l;
+ if (!base) {
+ continue;
+ }
+
+ if (base & PCI_BASE_ADDRESS_SPACE_IO) {
+ len += sprintf(buf + len,
+ "\n I/O at 0x%lx.",
+ base & PCI_BASE_ADDRESS_IO_MASK);
+ } else {
+ const char *pref, *type = "unknown";
+
+ if (base & PCI_BASE_ADDRESS_MEM_PREFETCH) {
+ pref = "P";
+ } else {
+ pref = "Non-p";
+ }
+ switch (base & PCI_BASE_ADDRESS_MEM_TYPE_MASK) {
+ case PCI_BASE_ADDRESS_MEM_TYPE_32:
+ type = "32 bit"; break;
+ case PCI_BASE_ADDRESS_MEM_TYPE_1M:
+ type = "20 bit"; break;
+ case PCI_BASE_ADDRESS_MEM_TYPE_64:
+ type = "64 bit";
+ /* read top 32 bit address of base addr: */
+ reg += 4;
+ pcibios_read_config_dword(bus, devfn, reg, &l);
+ base |= ((u64) l) << 32;
+ break;
+ }
+ len += sprintf(buf + len,
+ "\n %srefetchable %s memory at "
+ "0x%lx.", pref, type,
+ base & PCI_BASE_ADDRESS_MEM_MASK);
+ }
+ }
+
+ len += sprintf(buf + len, "\n");
+ return len;
+}
+
+
+/*
+ * Return list of PCI devices as a character string for /proc/pci.
+ * BUF is a buffer that is PAGE_SIZE bytes long.
+ */
+int get_pci_list(char *buf)
+{
+ int nprinted, len, size;
+ struct pci_dev *dev;
+# define MSG "\nwarning: page-size limit reached!\n"
+
+ /* reserve same for truncation warning message: */
+ size = PAGE_SIZE - (strlen(MSG) + 1);
+ len = sprintf(buf, "PCI devices found:\n");
+
+ for (dev = pci_devices; dev; dev = dev->next) {
+ nprinted = sprint_dev_config(dev, buf + len, size - len);
+ if (nprinted < 0) {
+ return len + sprintf(buf + len, MSG);
+ }
+ len += nprinted;
+ }
+ return len;
+}
+
+
+/*
+ * pci_malloc() returns initialized memory of size SIZE. Can be
+ * used only while pci_init() is active.
+ */
+static void *pci_malloc(long size, unsigned long *mem_startp)
+{
+ void *mem;
+
+#ifdef DEBUG
+ printk("...pci_malloc(size=%ld,mem=%p)", size, *mem_startp);
+#endif
+ mem = (void*) *mem_startp;
+ *mem_startp += (size + sizeof(void*) - 1) & ~(sizeof(void*) - 1);
+ memset(mem, 0, size);
+ return mem;
+}
+
+
+static unsigned int scan_bus(struct pci_bus *bus, unsigned long *mem_startp)
+{
+ unsigned int devfn, l, max;
+ unsigned char cmd, tmp, hdr_type, ht, is_multi = 0;
+ struct pci_dev_info *info;
+ struct pci_dev *dev;
+ struct pci_bus *child;
+
+#ifdef DEBUG
+ printk("...scan_bus(busno=%d,mem=%p)\n", bus->number, *mem_startp);
+#endif
+
+ max = bus->secondary;
+ for (devfn = 0; devfn < 0xff; ++devfn) {
+ if (PCI_FUNC(devfn) && !is_multi) {
+ /* Not a multi-function device */
+ continue;
+ }
+ pcibios_read_config_byte(bus->number, devfn, PCI_HEADER_TYPE, &hdr_type);
+ if (!PCI_FUNC(devfn))
+ is_multi = hdr_type & 0x80;
+
+ pcibios_read_config_dword(bus->number, devfn, PCI_VENDOR_ID, &l);
+ /* some broken boards return 0 if a slot is empty: */
+ if (l == 0xffffffff || l == 0x00000000 || l == 0x0000ffff || l == 0xffff0000)
+ continue;
+
+ dev = pci_malloc(sizeof(*dev), mem_startp);
+ dev->bus = bus;
+ dev->devfn = devfn;
+ dev->vendor = l & 0xffff;
+ dev->device = (l >> 16) & 0xffff;
+
+ /*
+ * Check to see if we know about this device and report
+ * a message at boot time. This is the only way to
+ * learn about new hardware...
+ */
+ info = pci_lookup_dev(dev->vendor, dev->device);
+ if (!info) {
+#if 0
+ printk("Warning : Unknown PCI device (%x:%x). Please read include/linux/pci.h\n",
+ dev->vendor, dev->device);
+#endif
+ } else {
+ /* Some BIOS' are lazy. Let's do their job: */
+ if (info->bridge_type != 0xff) {
+ burst_bridge(bus->number, devfn,
+ info->bridge_type, 1);
+ }
+ }
+
+ /* non-destructively determine if device can be a master: */
+ pcibios_read_config_byte(bus->number, devfn, PCI_COMMAND,
+ &cmd);
+ pcibios_write_config_byte(bus->number, devfn, PCI_COMMAND,
+ cmd | PCI_COMMAND_MASTER);
+ pcibios_read_config_byte(bus->number, devfn, PCI_COMMAND,
+ &tmp);
+ dev->master = ((tmp & PCI_COMMAND_MASTER) != 0);
+ pcibios_write_config_byte(bus->number, devfn, PCI_COMMAND,
+ cmd);
+
+ /* read irq level (may be changed during pcibios_fixup()): */
+ pcibios_read_config_byte(bus->number, devfn,
+ PCI_INTERRUPT_LINE, &dev->irq);
+
+ /* check to see if this device is a PCI-PCI bridge: */
+ pcibios_read_config_dword(bus->number, devfn,
+ PCI_CLASS_REVISION, &l);
+ l = l >> 8; /* upper 3 bytes */
+ dev->class = l;
+
+ /*
+ * Check if the header type is known and consistent with
+ * device type. PCI-to-PCI Bridges should have hdr_type 1,
+ * CardBus Bridges 2, all other devices 0.
+ */
+ switch (dev->class >> 8) {
+ case PCI_CLASS_BRIDGE_PCI:
+ ht = 1;
+ break;
+ case PCI_CLASS_BRIDGE_CARDBUS:
+ ht = 2;
+ break;
+ default:
+ ht = 0;
+ }
+ if (ht != (hdr_type & 0x7f)) {
+ printk(KERN_WARNING "PCI: %02x:%02x [%04x/%04x/%06x] has unknown header type %02x, ignoring.\n",
+ bus->number, dev->devfn, dev->vendor, dev->device, dev->class, hdr_type);
+ continue;
+ }
+
+ /*
+ * Put it into the simple chain of all PCI devices.
+ * It is used to find devices once everything is set up.
+ */
+ dev->next = pci_devices;
+ pci_devices = dev;
+
+ /*
+ * Now insert it into the list of devices held
+ * by the parent bus.
+ */
+ dev->sibling = bus->devices;
+ bus->devices = dev;
+
+ if (dev->class >> 8 == PCI_CLASS_BRIDGE_PCI) {
+ unsigned int buses;
+ unsigned short cr;
+
+ /*
+ * Insert it into the tree of buses.
+ */
+ child = pci_malloc(sizeof(*child), mem_startp);
+ child->next = bus->children;
+ bus->children = child;
+ child->self = dev;
+ child->parent = bus;
+
+ /*
+ * Set up the primary, secondary and subordinate
+ * bus numbers.
+ */
+ child->number = child->secondary = ++max;
+ child->primary = bus->secondary;
+ child->subordinate = 0xff;
+ /*
+ * Clear all status bits and turn off memory,
+ * I/O and master enables.
+ */
+ pcibios_read_config_word(bus->number, devfn,
+ PCI_COMMAND, &cr);
+ pcibios_write_config_word(bus->number, devfn,
+ PCI_COMMAND, 0x0000);
+ pcibios_write_config_word(bus->number, devfn,
+ PCI_STATUS, 0xffff);
+ /*
+ * Read the existing primary/secondary/subordinate bus
+ * number configuration to determine if the PCI bridge
+ * has already been configured by the system. If so,
+ * do not modify the configuration, merely note it.
+ */
+ pcibios_read_config_dword(bus->number, devfn, 0x18,
+ &buses);
+ if ((buses & 0xFFFFFF) != 0)
+ {
+ child->primary = buses & 0xFF;
+ child->secondary = (buses >> 8) & 0xFF;
+ child->subordinate = (buses >> 16) & 0xFF;
+ child->number = child->secondary;
+ max = scan_bus(child, mem_startp);
+ }
+ else
+ {
+ /*
+ * Configure the bus numbers for this bridge:
+ */
+ buses &= 0xff000000;
+ buses |=
+ (((unsigned int)(child->primary) << 0) |
+ ((unsigned int)(child->secondary) << 8) |
+ ((unsigned int)(child->subordinate) << 16));
+ pcibios_write_config_dword(bus->number, devfn, 0x18,
+ buses);
+ /*
+ * Now we can scan all subordinate buses:
+ */
+ max = scan_bus(child, mem_startp);
+ /*
+ * Set the subordinate bus number to its real
+ * value:
+ */
+ child->subordinate = max;
+ buses = (buses & 0xff00ffff)
+ | ((unsigned int)(child->subordinate) << 16);
+ pcibios_write_config_dword(bus->number, devfn, 0x18,
+ buses);
+ }
+ pcibios_write_config_word(bus->number, devfn,
+ PCI_COMMAND, cr);
+ }
+ }
+ /*
+ * We've scanned the bus and so we know all about what's on
+ * the other side of any bridges that may be on this bus plus
+ * any devices.
+ *
+ * Return how far we've got finding sub-buses.
+ */
+ return max;
+}
+
+
+unsigned long pci_init (unsigned long mem_start, unsigned long mem_end)
+{
+ mem_start = pcibios_init(mem_start, mem_end);
+
+ if (!pcibios_present()) {
+ printk("pci_init: no BIOS32 detected\n");
+ return mem_start;
+ }
+
+ printk("Probing PCI hardware.\n");
+
+ memset(&pci_root, 0, sizeof(pci_root));
+ pci_root.subordinate = scan_bus(&pci_root, &mem_start);
+
+ /* give BIOS a chance to apply platform specific fixes: */
+ mem_start = pcibios_fixup(mem_start, mem_end);
+
+#ifdef DEBUG
+ {
+ int len = get_pci_list((char*)mem_start);
+ if (len) {
+ ((char *) mem_start)[len] = '\0';
+ printk("%s\n", (char *) mem_start);
+ }
+ }
+#endif
+ return mem_start;
+}
diff --git a/linux/src/drivers/scsi/53c7,8xx.h b/linux/src/drivers/scsi/53c7,8xx.h
new file mode 100644
index 0000000..1a6680f
--- /dev/null
+++ b/linux/src/drivers/scsi/53c7,8xx.h
@@ -0,0 +1,1584 @@
+/*
+ * NCR 53c{7,8}0x0 driver, header file
+ *
+ * Sponsored by
+ * iX Multiuser Multitasking Magazine
+ * Hannover, Germany
+ * hm@ix.de
+ *
+ * Copyright 1993, 1994, 1995 Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@PoohSticks.ORG
+ * +1 (303) 786-7975
+ *
+ * TolerANT and SCSI SCRIPTS are registered trademarks of NCR Corporation.
+ *
+ * PRE-ALPHA
+ *
+ * For more information, please consult
+ *
+ * NCR 53C700/53C700-66
+ * SCSI I/O Processor
+ * Data Manual
+ *
+ * NCR 53C810
+ * PCI-SCSI I/O Processor
+ * Data Manual
+ *
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * +1 (719) 578-3400
+ *
+ * Toll free literature number
+ * +1 (800) 334-5454
+ *
+ */
+
+#ifndef NCR53c7x0_H
+#define NCR53c7x0_H
+#if !defined(LINUX_1_2) && !defined(LINUX_1_3)
+#include <linux/version.h>
+#if LINUX_VERSION_CODE > 65536 + 3 * 256
+#define LINUX_1_3
+#else
+#define LINUX_1_2
+#endif
+#endif
+
+/*
+ * Prevent name space pollution in hosts.c, and only provide the
+ * define we need to get the NCR53c7x0 driver into the host template
+ * array.
+ */
+
+#if defined(HOSTS_C) || defined(MODULE)
+#include <scsi/scsicam.h>
+
+extern int NCR53c7xx_abort(Scsi_Cmnd *);
+extern int NCR53c7xx_detect(Scsi_Host_Template *tpnt);
+extern int NCR53c7xx_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+extern int NCR53c7xx_reset(Scsi_Cmnd *, unsigned int);
+#ifdef MODULE
+extern int NCR53c7xx_release(struct Scsi_Host *);
+#else
+#define NCR53c7xx_release NULL
+#endif
+
+#ifdef LINUX_1_2
+#define NCR53c7xx {NULL, NULL, "NCR53c{7,8}xx (rel 17)", NCR53c7xx_detect,\
+ NULL, /* info */ NULL, /* command, deprecated */ NULL, \
+ NCR53c7xx_queue_command, NCR53c7xx_abort, NCR53c7xx_reset, \
+ NULL /* slave attach */, scsicam_bios_param, /* can queue */ 24, \
+ /* id */ 7, 127 /* old SG_ALL */, /* cmd per lun */ 3, \
+ /* present */ 0, /* unchecked isa dma */ 0, DISABLE_CLUSTERING}
+#else
+#define NCR53c7xx {NULL, NULL, NULL, NULL, \
+ "NCR53c{7,8}xx (rel 17)", NCR53c7xx_detect,\
+ NULL, /* info */ NULL, /* command, deprecated */ NULL, \
+ NCR53c7xx_queue_command, NCR53c7xx_abort, NCR53c7xx_reset, \
+ NULL /* slave attach */, scsicam_bios_param, /* can queue */ 24, \
+ /* id */ 7, 127 /* old SG_ALL */, /* cmd per lun */ 3, \
+ /* present */ 0, /* unchecked isa dma */ 0, DISABLE_CLUSTERING}
+#endif
+
+#endif /* defined(HOSTS_C) || defined(MODULE) */
+
+#ifndef HOSTS_C
+#ifdef LINUX_1_2
+/*
+ * Change virtual addresses to physical addresses and vv.
+ * These are trivial on the 1:1 Linux/i386 mapping (but if we ever
+ * make the kernel segment mapped at 0, we need to do translation
+ * on the i386 as well)
+ */
+extern inline unsigned long virt_to_phys(volatile void * address)
+{
+ return (unsigned long) address;
+}
+
+extern inline void * phys_to_virt(unsigned long address)
+{
+ return (void *) address;
+}
+
+/*
+ * IO bus memory addresses are also 1:1 with the physical address
+ */
+#define virt_to_bus virt_to_phys
+#define bus_to_virt phys_to_virt
+
+/*
+ * readX/writeX() are used to access memory mapped devices. On some
+ * architectures the memory mapped IO stuff needs to be accessed
+ * differently. On the x86 architecture, we just read/write the
+ * memory location directly.
+ */
+#define readb(addr) (*(volatile unsigned char *) (addr))
+#define readw(addr) (*(volatile unsigned short *) (addr))
+#define readl(addr) (*(volatile unsigned int *) (addr))
+
+#define writeb(b,addr) ((*(volatile unsigned char *) (addr)) = (b))
+#define writew(b,addr) ((*(volatile unsigned short *) (addr)) = (b))
+#define writel(b,addr) ((*(volatile unsigned int *) (addr)) = (b))
+
+#define mb()
+
+#endif /* def LINUX_1_2 */
+
+/* Register addresses, ordered numerically */
+
+/* SCSI control 0 rw, default = 0xc0 */
+#define SCNTL0_REG 0x00
+#define SCNTL0_ARB1 0x80 /* 0 0 = simple arbitration */
+#define SCNTL0_ARB2 0x40 /* 1 1 = full arbitration */
+#define SCNTL0_STRT 0x20 /* Start Sequence */
+#define SCNTL0_WATN 0x10 /* Select with ATN */
+#define SCNTL0_EPC 0x08 /* Enable parity checking */
+/* Bit 2 is reserved on 800 series chips */
+#define SCNTL0_EPG_700 0x04 /* Enable parity generation */
+#define SCNTL0_AAP 0x02 /* ATN/ on parity error */
+#define SCNTL0_TRG 0x01 /* Target mode */
+
+/* SCSI control 1 rw, default = 0x00 */
+
+#define SCNTL1_REG 0x01
+#define SCNTL1_EXC 0x80 /* Extra Clock Cycle of Data setup */
+#define SCNTL1_ADB 0x40 /* contents of SODL on bus */
+#define SCNTL1_ESR_700 0x20 /* Enable SIOP response to selection
+ and reselection */
+#define SCNTL1_DHP_800 0x20 /* Disable halt on parity error or ATN
+ target mode only */
+#define SCNTL1_CON 0x10 /* Connected */
+#define SCNTL1_RST 0x08 /* SCSI RST/ */
+#define SCNTL1_AESP 0x04 /* Force bad parity */
+#define SCNTL1_SND_700 0x02 /* Start SCSI send */
+#define SCNTL1_IARB_800 0x02 /* Immediate Arbitration, start
+ arbitration immediately after
+ busfree is detected */
+#define SCNTL1_RCV_700 0x01 /* Start SCSI receive */
+#define SCNTL1_SST_800 0x01 /* Start SCSI transfer */
+
+/* SCSI control 2 rw, */
+
+#define SCNTL2_REG_800 0x02
+#define SCNTL2_800_SDU 0x80 /* SCSI disconnect unexpected */
+
+/* SCSI control 3 rw */
+
+#define SCNTL3_REG_800 0x03
+#define SCNTL3_800_SCF_SHIFT 4
+#define SCNTL3_800_SCF_MASK 0x70
+#define SCNTL3_800_SCF2 0x40 /* Synchronous divisor */
+#define SCNTL3_800_SCF1 0x20 /* 0x00 = SCLK/3 */
+#define SCNTL3_800_SCF0 0x10 /* 0x10 = SCLK/1 */
+ /* 0x20 = SCLK/1.5
+ 0x30 = SCLK/2
+ 0x40 = SCLK/3 */
+
+#define SCNTL3_800_CCF_SHIFT 0
+#define SCNTL3_800_CCF_MASK 0x07
+#define SCNTL3_800_CCF2 0x04 /* 0x00 50.01 to 66 */
+#define SCNTL3_800_CCF1 0x02 /* 0x01 16.67 to 25 */
+#define SCNTL3_800_CCF0 0x01 /* 0x02 25.01 - 37.5
+ 0x03 37.51 - 50
+ 0x04 50.01 - 66 */
+
+/*
+ * SCSI destination ID rw - the appropriate bit is set for the selected
+ * target ID. This is written by the SCSI SCRIPTS processor.
+ * default = 0x00
+ */
+#define SDID_REG_700 0x02
+#define SDID_REG_800 0x06
+
+#define GP_REG_800 0x07 /* General purpose IO */
+#define GP_800_IO1 0x02
+#define GP_800_IO2 0x01
+
+
+/* SCSI interrupt enable rw, default = 0x00 */
+#define SIEN_REG_700 0x03
+#define SIEN0_REG_800 0x40
+#define SIEN_MA 0x80 /* Phase mismatch (ini) or ATN (tgt) */
+#define SIEN_FC 0x40 /* Function complete */
+#define SIEN_700_STO 0x20 /* Selection or reselection timeout */
+#define SIEN_800_SEL 0x20 /* Selected */
+#define SIEN_700_SEL 0x10 /* Selected or reselected */
+#define SIEN_800_RESEL 0x10 /* Reselected */
+#define SIEN_SGE 0x08 /* SCSI gross error */
+#define SIEN_UDC 0x04 /* Unexpected disconnect */
+#define SIEN_RST 0x02 /* SCSI RST/ received */
+#define SIEN_PAR 0x01 /* Parity error */
+
+/*
+ * SCSI chip ID rw
+ * NCR53c700 :
+ * When arbitrating, the highest bit is used, when reselection or selection
+ * occurs, the chip responds to all IDs for which a bit is set.
+ * default = 0x00
+ * NCR53c810 :
+ * Uses bit mapping
+ */
+#define SCID_REG 0x04
+/* Bit 7 is reserved on 800 series chips */
+#define SCID_800_RRE 0x40 /* Enable response to reselection */
+#define SCID_800_SRE 0x20 /* Enable response to selection */
+/* Bits four and three are reserved on 800 series chips */
+#define SCID_800_ENC_MASK 0x07 /* Encoded SCSI ID */
+
+/* SCSI transfer rw, default = 0x00 */
+#define SXFER_REG 0x05
+#define SXFER_DHP 0x80 /* Disable halt on parity */
+
+#define SXFER_TP2 0x40 /* Transfer period msb */
+#define SXFER_TP1 0x20
+#define SXFER_TP0 0x10 /* lsb */
+#define SXFER_TP_MASK 0x70
+/* FIXME : SXFER_TP_SHIFT == 5 is right for '8xx chips */
+#define SXFER_TP_SHIFT 5
+#define SXFER_TP_4 0x00 /* Divisors */
+#define SXFER_TP_5 0x10<<1
+#define SXFER_TP_6 0x20<<1
+#define SXFER_TP_7 0x30<<1
+#define SXFER_TP_8 0x40<<1
+#define SXFER_TP_9 0x50<<1
+#define SXFER_TP_10 0x60<<1
+#define SXFER_TP_11 0x70<<1
+
+#define SXFER_MO3 0x08 /* Max offset msb */
+#define SXFER_MO2 0x04
+#define SXFER_MO1 0x02
+#define SXFER_MO0 0x01 /* lsb */
+#define SXFER_MO_MASK 0x0f
+#define SXFER_MO_SHIFT 0
+
+/*
+ * SCSI output data latch rw
+ * The contents of this register are driven onto the SCSI bus when
+ * the Assert Data Bus bit of the SCNTL1 register is set and
+ * the CD, IO, and MSG bits of the SOCL register match the SCSI phase
+ */
+#define SODL_REG_700 0x06
+#define SODL_REG_800 0x54
+
+
+/*
+ * SCSI output control latch rw, default = 0
+ * Note that when the chip is being manually programmed as an initiator,
+ * the MSG, CD, and IO bits must be set correctly for the phase the target
+ * is driving the bus in. Otherwise no data transfer will occur due to
+ * phase mismatch.
+ */
+
+#define SBCL_REG 0x0b
+#define SBCL_REQ 0x80 /* REQ */
+#define SBCL_ACK 0x40 /* ACK */
+#define SBCL_BSY 0x20 /* BSY */
+#define SBCL_SEL 0x10 /* SEL */
+#define SBCL_ATN 0x08 /* ATN */
+#define SBCL_MSG 0x04 /* MSG */
+#define SBCL_CD 0x02 /* C/D */
+#define SBCL_IO 0x01 /* I/O */
+#define SBCL_PHASE_CMDOUT SBCL_CD
+#define SBCL_PHASE_DATAIN SBCL_IO
+#define SBCL_PHASE_DATAOUT 0
+#define SBCL_PHASE_MSGIN (SBCL_CD|SBCL_IO|SBCL_MSG)
+#define SBCL_PHASE_MSGOUT (SBCL_CD|SBCL_MSG)
+#define SBCL_PHASE_STATIN (SBCL_CD|SBCL_IO)
+#define SBCL_PHASE_MASK (SBCL_CD|SBCL_IO|SBCL_MSG)
+
+/*
+ * SCSI first byte received latch ro
+ * This register contains the first byte received during a block MOVE
+ * SCSI SCRIPTS instruction, including
+ *
+ * Initiator mode Target mode
+ * Message in Command
+ * Status Message out
+ * Data in Data out
+ *
+ * It also contains the selecting or reselecting device's ID and our
+ * ID.
+ *
+ * Note that this is the register the various IF conditionals can
+ * operate on.
+ */
+#define SFBR_REG 0x08
+
+/*
+ * SCSI input data latch ro
+ * In initiator mode, data is latched into this register on the rising
+ * edge of REQ/. In target mode, data is latched on the rising edge of
+ * ACK/
+ */
+#define SIDL_REG_700 0x09
+#define SIDL_REG_800 0x50
+
+/*
+ * SCSI bus data lines ro
+ * This register reflects the instantaneous status of the SCSI data
+ * lines. Note that SCNTL0 must be set to disable parity checking,
+ * otherwise reading this register will latch new parity.
+ */
+#define SBDL_REG_700 0x0a
+#define SBDL_REG_800 0x58
+
+#define SSID_REG_800 0x0a
+#define SSID_800_VAL 0x80 /* Exactly two bits asserted at sel */
+#define SSID_800_ENCID_MASK 0x07 /* Device which performed operation */
+
+
+/*
+ * SCSI bus control lines rw,
+ * instantaneous readout of control lines
+ */
+#define SOCL_REG 0x0b
+#define SOCL_REQ 0x80 /* REQ ro */
+#define SOCL_ACK 0x40 /* ACK ro */
+#define SOCL_BSY 0x20 /* BSY ro */
+#define SOCL_SEL 0x10 /* SEL ro */
+#define SOCL_ATN 0x08 /* ATN ro */
+#define SOCL_MSG 0x04 /* MSG ro */
+#define SOCL_CD 0x02 /* C/D ro */
+#define SOCL_IO 0x01 /* I/O ro */
+/*
+ * Synchronous SCSI Clock Control bits
+ * 0 - set by DCNTL
+ * 1 - SCLK / 1.0
+ * 2 - SCLK / 1.5
+ * 3 - SCLK / 2.0
+ */
+#define SBCL_SSCF1 0x02 /* wo, -66 only */
+#define SBCL_SSCF0 0x01 /* wo, -66 only */
+#define SBCL_SSCF_MASK 0x03
+
+/*
+ * XXX note : when reading the DSTAT and STAT registers to clear interrupts,
+ * insure that 10 clocks elapse between the two
+ */
+/* DMA status ro */
+#define DSTAT_REG 0x0c
+#define DSTAT_DFE 0x80 /* DMA FIFO empty */
+#define DSTAT_800_MDPE 0x40 /* Master Data Parity Error */
+#define DSTAT_800_BF 0x20 /* Bus Fault */
+#define DSTAT_ABRT 0x10 /* Aborted - set on error */
+#define DSTAT_SSI 0x08 /* SCRIPTS single step interrupt */
+#define DSTAT_SIR 0x04 /* SCRIPTS interrupt received -
+ set when INT instruction is
+ executed */
+#define DSTAT_WTD 0x02 /* Watchdog timeout detected */
+#define DSTAT_OPC 0x01 /* Illegal instruction */
+#define DSTAT_800_IID 0x01 /* Same thing, different name */
+
+
+/* NCR53c800 moves this stuff into SIST0 */
+#define SSTAT0_REG 0x0d /* SCSI status 0 ro */
+#define SIST0_REG_800 0x42
+#define SSTAT0_MA 0x80 /* ini : phase mismatch,
+ * tgt : ATN/ asserted
+ */
+#define SSTAT0_CMP 0x40 /* function complete */
+#define SSTAT0_700_STO 0x20 /* Selection or reselection timeout */
+#define SIST0_800_SEL 0x20 /* Selected */
+#define SSTAT0_700_SEL 0x10 /* Selected or reselected */
+#define SIST0_800_RSL 0x10 /* Reselected */
+#define SSTAT0_SGE 0x08 /* SCSI gross error */
+#define SSTAT0_UDC 0x04 /* Unexpected disconnect */
+#define SSTAT0_RST 0x02 /* SCSI RST/ received */
+#define SSTAT0_PAR 0x01 /* Parity error */
+
+/* And uses SSTAT0 for what was SSTAT1 */
+
+#define SSTAT1_REG 0x0e /* SCSI status 1 ro */
+#define SSTAT1_ILF 0x80 /* SIDL full */
+#define SSTAT1_ORF 0x40 /* SODR full */
+#define SSTAT1_OLF 0x20 /* SODL full */
+#define SSTAT1_AIP 0x10 /* Arbitration in progress */
+#define SSTAT1_LOA 0x08 /* Lost arbitration */
+#define SSTAT1_WOA 0x04 /* Won arbitration */
+#define SSTAT1_RST 0x02 /* Instant readout of RST/ */
+#define SSTAT1_SDP 0x01 /* Instant readout of SDP/ */
+
+#define SSTAT2_REG 0x0f /* SCSI status 2 ro */
+#define SSTAT2_FF3 0x80 /* number of bytes in synchronous */
+#define SSTAT2_FF2 0x40 /* data FIFO */
+#define SSTAT2_FF1 0x20
+#define SSTAT2_FF0 0x10
+#define SSTAT2_FF_MASK 0xf0
+#define SSTAT2_FF_SHIFT 4
+
+/*
+ * Latched signals, latched on the leading edge of REQ/ for initiators,
+ * ACK/ for targets.
+ */
+#define SSTAT2_SDP 0x08 /* SDP */
+#define SSTAT2_MSG 0x04 /* MSG */
+#define SSTAT2_CD 0x02 /* C/D */
+#define SSTAT2_IO 0x01 /* I/O */
+#define SSTAT2_PHASE_CMDOUT SSTAT2_CD
+#define SSTAT2_PHASE_DATAIN SSTAT2_IO
+#define SSTAT2_PHASE_DATAOUT 0
+#define SSTAT2_PHASE_MSGIN (SSTAT2_CD|SSTAT2_IO|SSTAT2_MSG)
+#define SSTAT2_PHASE_MSGOUT (SSTAT2_CD|SSTAT2_MSG)
+#define SSTAT2_PHASE_STATIN (SSTAT2_CD|SSTAT2_IO)
+#define SSTAT2_PHASE_MASK (SSTAT2_CD|SSTAT2_IO|SSTAT2_MSG)
+
+
+/* NCR53c700-66 only */
+#define SCRATCHA_REG_00 0x10 /* through 0x13 Scratch A rw */
+/* NCR53c710 and higher */
+#define DSA_REG 0x10 /* DATA structure address */
+
+#define CTEST0_REG_700 0x14 /* Chip test 0 ro */
+#define CTEST0_REG_800 0x18 /* Chip test 0 rw, general purpose */
+/* 0x80 - 0x04 are reserved */
+#define CTEST0_700_RTRG 0x02 /* Real target mode */
+#define CTEST0_700_DDIR 0x01 /* Data direction, 1 =
+ * SCSI bus to host, 0 =
+ * host to SCSI.
+ */
+
+#define CTEST1_REG_700 0x15 /* Chip test 1 ro */
+#define CTEST1_REG_800 0x19 /* Chip test 1 ro */
+#define CTEST1_FMT3 0x80 /* Identify which byte lanes are empty */
+#define CTEST1_FMT2 0x40 /* in the DMA FIFO */
+#define CTEST1_FMT1 0x20
+#define CTEST1_FMT0 0x10
+
+#define CTEST1_FFL3 0x08 /* Identify which bytes lanes are full */
+#define CTEST1_FFL2 0x04 /* in the DMA FIFO */
+#define CTEST1_FFL1 0x02
+#define CTEST1_FFL0 0x01
+
+#define CTEST2_REG_700 0x16 /* Chip test 2 ro */
+#define CTEST2_REG_800 0x1a /* Chip test 2 ro */
+
+#define CTEST2_800_DDIR 0x80 /* 1 = SCSI->host */
+#define CTEST2_800_SIGP 0x40 /* A copy of SIGP in ISTAT.
+ Reading this register clears */
+#define CTEST2_800_CIO 0x20 /* Configured as IO */.
+#define CTEST2_800_CM 0x10 /* Configured as memory */
+
+/* 0x80 - 0x40 are reserved on 700 series chips */
+#define CTEST2_700_SOFF 0x20 /* SCSI Offset Compare,
+ * As an initiator, this bit is
+ * one when the synchronous offset
+ * is zero, as a target this bit
+ * is one when the synchronous
+ * offset is at the maximum
+ * defined in SXFER
+ */
+#define CTEST2_700_SFP 0x10 /* SCSI FIFO parity bit,
+ * reading CTEST3 unloads a byte
+ * from the FIFO and sets this
+ */
+#define CTEST2_700_DFP 0x08 /* DMA FIFO parity bit,
+ * reading CTEST6 unloads a byte
+ * from the FIFO and sets this
+ */
+#define CTEST2_TEOP 0x04 /* SCSI true end of process,
+ * indicates a totally finished
+ * transfer
+ */
+#define CTEST2_DREQ 0x02 /* Data request signal */
+/* 0x01 is reserved on 700 series chips */
+#define CTEST2_800_DACK 0x01
+
+/*
+ * Chip test 3 ro
+ * Unloads the bottom byte of the eight deep SCSI synchronous FIFO,
+ * check SSTAT2 FIFO full bits to determine size. Note that a GROSS
+ * error results if a read is attempted on this register. Also note
+ * that 16 and 32 bit reads of this register will cause corruption.
+ */
+#define CTEST3_REG_700 0x17
+/* Chip test 3 rw */
+#define CTEST3_REG_800 0x1b
+#define CTEST3_800_V3 0x80 /* Chip revision */
+#define CTEST3_800_V2 0x40
+#define CTEST3_800_V1 0x20
+#define CTEST3_800_V0 0x10
+#define CTEST3_800_FLF 0x08 /* Flush DMA FIFO */
+#define CTEST3_800_CLF 0x04 /* Clear DMA FIFO */
+#define CTEST3_800_FM 0x02 /* Fetch mode pin */
+/* bit 0 is reserved on 800 series chips */
+
+#define CTEST4_REG_700 0x18 /* Chip test 4 rw */
+#define CTEST4_REG_800 0x21 /* Chip test 4 rw */
+/* 0x80 is reserved on 700 series chips */
+#define CTEST4_800_BDIS 0x80 /* Burst mode disable */
+#define CTEST4_ZMOD 0x40 /* High impedance mode */
+#define CTEST4_SZM 0x20 /* SCSI bus high impedance */
+#define CTEST4_700_SLBE 0x10 /* SCSI loopback enabled */
+#define CTEST4_800_SRTM 0x10 /* Shadow Register Test Mode */
+#define CTEST4_700_SFWR 0x08 /* SCSI FIFO write enable,
+ * redirects writes from SODL
+ * to the SCSI FIFO.
+ */
+#define CTEST4_800_MPEE 0x08 /* Enable parity checking
+ during master cycles on PCI
+ bus */
+
+/*
+ * These bits send the contents of the CTEST6 register to the appropriate
+ * byte lane of the 32 bit DMA FIFO. Normal operation is zero, otherwise
+ * the high bit means the low two bits select the byte lane.
+ */
+#define CTEST4_FBL2 0x04
+#define CTEST4_FBL1 0x02
+#define CTEST4_FBL0 0x01
+#define CTEST4_FBL_MASK 0x07
+#define CTEST4_FBL_0 0x04 /* Select DMA FIFO byte lane 0 */
+#define CTEST4_FBL_1 0x05 /* Select DMA FIFO byte lane 1 */
+#define CTEST4_FBL_2 0x06 /* Select DMA FIFO byte lane 2 */
+#define CTEST4_FBL_3 0x07 /* Select DMA FIFO byte lane 3 */
+#define CTEST4_800_SAVE (CTEST4_800_BDIS)
+
+
+#define CTEST5_REG_700 0x19 /* Chip test 5 rw */
+#define CTEST5_REG_800 0x22 /* Chip test 5 rw */
+/*
+ * Clock Address Incrementor. When set, it increments the
+ * DNAD register to the next bus size boundary. It automatically
+ * resets itself when the operation is complete.
+ */
+#define CTEST5_ADCK 0x80
+/*
+ * Clock Byte Counter. When set, it decrements the DBC register to
+ * the next bus size boundary.
+ */
+#define CTEST5_BBCK 0x40
+/*
+ * Reset SCSI Offset. Setting this bit to 1 clears the current offset
+ * pointer in the SCSI synchronous offset counter (SSTAT). This bit
+ * is set to 1 if a SCSI Gross Error Condition occurs. The offset should
+ * be cleared when a synchronous transfer fails. When written, it is
+ * automatically cleared after the SCSI synchronous offset counter is
+ * reset.
+ */
+/* Bit 5 is reserved on 800 series chips */
+#define CTEST5_700_ROFF 0x20
+/*
+ * Master Control for Set or Reset pulses. When 1, causes the low
+ * four bits of register to set when set, 0 causes the low bits to
+ * clear when set.
+ */
+#define CTEST5_MASR 0x10
+#define CTEST5_DDIR 0x08 /* DMA direction */
+/*
+ * Bits 2-0 are reserved on 800 series chips
+ */
+#define CTEST5_700_EOP 0x04 /* End of process */
+#define CTEST5_700_DREQ 0x02 /* Data request */
+#define CTEST5_700_DACK 0x01 /* Data acknowledge */
+
+/*
+ * Chip test 6 rw - writing to this register writes to the byte
+ * lane in the DMA FIFO as determined by the FBL bits in the CTEST4
+ * register.
+ */
+#define CTEST6_REG_700 0x1a
+#define CTEST6_REG_800 0x23
+
+#define CTEST7_REG 0x1b /* Chip test 7 rw */
+/* 0x80 - 0x40 are reserved on NCR53c700 and NCR53c700-66 chips */
+#define CTEST7_10_CDIS 0x80 /* Cache burst disable */
+#define CTEST7_10_SC1 0x40 /* Snoop control bits */
+#define CTEST7_10_SC0 0x20
+#define CTEST7_10_SC_MASK 0x60
+/* 0x20 is reserved on the NCR53c700 */
+#define CTEST7_0060_FM 0x20 /* Fetch mode */
+#define CTEST7_STD 0x10 /* Selection timeout disable */
+#define CTEST7_DFP 0x08 /* DMA FIFO parity bit for CTEST6 */
+#define CTEST7_EVP 0x04 /* 1 = host bus even parity, 0 = odd */
+#define CTEST7_10_TT1 0x02 /* Transfer type */
+#define CTEST7_00_DC 0x02 /* Set to drive DC low during instruction
+ fetch */
+#define CTEST7_DIFF 0x01 /* Differential mode */
+
+#define CTEST7_SAVE ( CTEST7_EVP | CTEST7_DIFF )
+
+
+#define TEMP_REG 0x1c /* through 0x1f Temporary stack rw */
+
+#define DFIFO_REG 0x20 /* DMA FIFO rw */
+/*
+ * 0x80 is reserved on the NCR53c710, the CLF and FLF bits have been
+ * moved into the CTEST8 register.
+ */
+#define DFIFO_00_FLF 0x80 /* Flush DMA FIFO to memory */
+#define DFIFO_00_CLF 0x40 /* Clear DMA and SCSI FIFOs */
+#define DFIFO_BO6 0x40
+#define DFIFO_BO5 0x20
+#define DFIFO_BO4 0x10
+#define DFIFO_BO3 0x08
+#define DFIFO_BO2 0x04
+#define DFIFO_BO1 0x02
+#define DFIFO_BO0 0x01
+#define DFIFO_10_BO_MASK 0x7f /* 7 bit counter */
+#define DFIFO_00_BO_MASK 0x3f /* 6 bit counter */
+
+/*
+ * Interrupt status rw
+ * Note that this is the only register which can be read while SCSI
+ * SCRIPTS are being executed.
+ */
+#define ISTAT_REG_700 0x21
+#define ISTAT_REG_800 0x14
+#define ISTAT_ABRT 0x80 /* Software abort, write
+ *1 to abort, wait for interrupt. */
+/* 0x40 and 0x20 are reserved on NCR53c700 and NCR53c700-66 chips */
+#define ISTAT_10_SRST 0x40 /* software reset */
+#define ISTAT_10_SIGP 0x20 /* signal script */
+/* 0x10 is reserved on NCR53c700 series chips */
+#define ISTAT_800_SEM 0x10 /* semaphore */
+#define ISTAT_CON 0x08 /* 1 when connected */
+#define ISTAT_800_INTF 0x04 /* Interrupt on the fly */
+#define ISTAT_700_PRE 0x04 /* Pointer register empty.
+ * Set to 1 when DSPS and DSP
+ * registers are empty in pipeline
+ * mode, always set otherwise.
+ */
+#define ISTAT_SIP 0x02 /* SCSI interrupt pending from
+ * SCSI portion of SIOP see
+ * SSTAT0
+ */
+#define ISTAT_DIP 0x01 /* DMA interrupt pending
+ * see DSTAT
+ */
+
+/* NCR53c700-66 and NCR53c710 only */
+#define CTEST8_REG 0x22 /* Chip test 8 rw */
+#define CTEST8_0066_EAS 0x80 /* Enable alternate SCSI clock,
+ * ie read from SCLK/ rather than CLK/
+ */
+#define CTEST8_0066_EFM 0x40 /* Enable fetch and master outputs */
+#define CTEST8_0066_GRP 0x20 /* Generate Receive Parity for
+ * pass through. This insures that
+ * bad parity won't reach the host
+ * bus.
+ */
+#define CTEST8_0066_TE 0x10 /* TolerANT enable. Enable
+ * active negation, should only
+ * be used for slow SCSI
+ * non-differential.
+ */
+#define CTEST8_0066_HSC 0x08 /* Halt SCSI clock */
+#define CTEST8_0066_SRA 0x04 /* Shorten REQ/ACK filtering,
+ * must be set for fast SCSI-II
+ * speeds.
+ */
+#define CTEST8_0066_DAS 0x02 /* Disable automatic target/initiator
+ * switching.
+ */
+#define CTEST8_0066_LDE 0x01 /* Last disconnect enable.
+ * The status of pending
+ * disconnect is maintained by
+ * the core, eliminating
+ * the possibility of missing a
+ * selection or reselection
+ * while waiting to fetch a
+ * WAIT DISCONNECT opcode.
+ */
+
+#define CTEST8_10_V3 0x80 /* Chip revision */
+#define CTEST8_10_V2 0x40
+#define CTEST8_10_V1 0x20
+#define CTEST8_10_V0 0x10
+#define CTEST8_10_V_MASK 0xf0
+#define CTEST8_10_FLF 0x08 /* Flush FIFOs */
+#define CTEST8_10_CLF 0x04 /* Clear FIFOs */
+#define CTEST8_10_FM 0x02 /* Fetch pin mode */
+#define CTEST8_10_SM 0x01 /* Snoop pin mode */
+
+
+/*
+ * The CTEST9 register may be used to differentiate between a
+ * NCR53c700 and a NCR53c710.
+ *
+ * Write 0xff to this register.
+ * Read it.
+ * If the contents are 0xff, it is a NCR53c700
+ * If the contents are 0x00, it is a NCR53c700-66 first revision
+ * If the contents are some other value, it is some other NCR53c700-66
+ */
+#define CTEST9_REG_00 0x23 /* Chip test 9 ro */
+#define LCRC_REG_10 0x23
+
+/*
+ * 0x24 through 0x27 are the DMA byte counter register. Instructions
+ * write their high 8 bits into the DCMD register, the low 24 bits into
+ * the DBC register.
+ *
+ * Function is dependent on the command type being executed.
+ */
+
+
+#define DBC_REG 0x24
+/*
+ * For Block Move Instructions, DBC is a 24 bit quantity representing
+ * the number of bytes to transfer.
+ * For Transfer Control Instructions, DBC is bit fielded as follows :
+ */
+/* Bits 20 - 23 should be clear */
+#define DBC_TCI_TRUE (1 << 19) /* Jump when true */
+#define DBC_TCI_COMPARE_DATA (1 << 18) /* Compare data */
+#define DBC_TCI_COMPARE_PHASE (1 << 17) /* Compare phase with DCMD field */
+#define DBC_TCI_WAIT_FOR_VALID (1 << 16) /* Wait for REQ */
+/* Bits 8 - 15 are reserved on some implementations ? */
+#define DBC_TCI_MASK_MASK 0xff00 /* Mask for data compare */
+#define DBC_TCI_MASK_SHIFT 8
+#define DBC_TCI_DATA_MASK 0xff /* Data to be compared */
+#define DBC_TCI_DATA_SHIFT 0
+
+#define DBC_RWRI_IMMEDIATE_MASK 0xff00 /* Immediate data */
+#define DBC_RWRI_IMMEDIATE_SHIFT 8 /* Amount to shift */
+#define DBC_RWRI_ADDRESS_MASK 0x3f0000 /* Register address */
+#define DBC_RWRI_ADDRESS_SHIFT 16
+
+
+/*
+ * DMA command r/w
+ */
+#define DCMD_REG 0x27
+#define DCMD_TYPE_MASK 0xc0 /* Masks off type */
+#define DCMD_TYPE_BMI 0x00 /* Indicates a Block Move instruction */
+#define DCMD_BMI_IO 0x01 /* I/O, CD, and MSG bits selecting */
+#define DCMD_BMI_CD 0x02 /* the phase for the block MOVE */
+#define DCMD_BMI_MSG 0x04 /* instruction */
+
+#define DCMD_BMI_OP_MASK 0x18 /* mask for opcode */
+#define DCMD_BMI_OP_MOVE_T 0x00 /* MOVE */
+#define DCMD_BMI_OP_MOVE_I 0x08 /* MOVE Initiator */
+
+#define DCMD_BMI_INDIRECT 0x20 /* Indirect addressing */
+
+#define DCMD_TYPE_TCI 0x80 /* Indicates a Transfer Control
+ instruction */
+#define DCMD_TCI_IO 0x01 /* I/O, CD, and MSG bits selecting */
+#define DCMD_TCI_CD 0x02 /* the phase for the block MOVE */
+#define DCMD_TCI_MSG 0x04 /* instruction */
+#define DCMD_TCI_OP_MASK 0x38 /* mask for opcode */
+#define DCMD_TCI_OP_JUMP 0x00 /* JUMP */
+#define DCMD_TCI_OP_CALL 0x08 /* CALL */
+#define DCMD_TCI_OP_RETURN 0x10 /* RETURN */
+#define DCMD_TCI_OP_INT 0x18 /* INT */
+
+#define DCMD_TYPE_RWRI 0x40 /* Indicates I/O or register Read/Write
+ instruction */
+#define DCMD_RWRI_OPC_MASK 0x38 /* Opcode mask */
+#define DCMD_RWRI_OPC_WRITE 0x28 /* Write SFBR to register */
+#define DCMD_RWRI_OPC_READ 0x30 /* Read register to SFBR */
+#define DCMD_RWRI_OPC_MODIFY 0x38 /* Modify in place */
+
+#define DCMD_RWRI_OP_MASK 0x07
+#define DCMD_RWRI_OP_MOVE 0x00
+#define DCMD_RWRI_OP_SHL 0x01
+#define DCMD_RWRI_OP_OR 0x02
+#define DCMD_RWRI_OP_XOR 0x03
+#define DCMD_RWRI_OP_AND 0x04
+#define DCMD_RWRI_OP_SHR 0x05
+#define DCMD_RWRI_OP_ADD 0x06
+#define DCMD_RWRI_OP_ADDC 0x07
+
+#define DCMD_TYPE_MMI 0xc0 /* Indicates a Memory Move instruction
+ (three words) */
+
+
+#define DNAD_REG 0x28 /* through 0x2b DMA next address for
+ data */
+#define DSP_REG 0x2c /* through 0x2f DMA SCRIPTS pointer rw */
+#define DSPS_REG 0x30 /* through 0x33 DMA SCRIPTS pointer
+ save rw */
+#define DMODE_REG_00 0x34 /* DMA mode rw */
+#define DMODE_00_BL1 0x80 /* Burst length bits */
+#define DMODE_00_BL0 0x40
+#define DMODE_BL_MASK 0xc0
+/* Burst lengths (800) */
+#define DMODE_BL_2 0x00 /* 2 transfer */
+#define DMODE_BL_4 0x40 /* 4 transfers */
+#define DMODE_BL_8 0x80 /* 8 transfers */
+#define DMODE_BL_16 0xc0 /* 16 transfers */
+
+#define DMODE_700_BW16 0x20 /* Host buswidth = 16 */
+#define DMODE_700_286 0x10 /* 286 mode */
+#define DMODE_700_IOM 0x08 /* Transfer to IO port */
+#define DMODE_700_FAM 0x04 /* Fixed address mode */
+#define DMODE_700_PIPE 0x02 /* Pipeline mode disables
+ * automatic fetch / exec
+ */
+#define DMODE_MAN 0x01 /* Manual start mode,
+ * requires a 1 to be written
+ * to the start DMA bit in the DCNTL
+ * register to run scripts
+ */
+
+#define DMODE_700_SAVE ( DMODE_00_BL_MASK | DMODE_00_BW16 | DMODE_00_286 )
+
+/* NCR53c800 series only */
+#define SCRATCHA_REG_800 0x34 /* through 0x37 Scratch A rw */
+/* NCR53c710 only */
+#define SCRATCB_REG_10 0x34 /* through 0x37 scratch B rw */
+
+#define DMODE_REG_10 0x38 /* DMA mode rw, NCR53c710 and newer */
+#define DMODE_800_SIOM 0x20 /* Source IO = 1 */
+#define DMODE_800_DIOM 0x10 /* Destination IO = 1 */
+#define DMODE_800_ERL 0x08 /* Enable Read Line */
+
+/* 35-38 are reserved on 700 and 700-66 series chips */
+#define DIEN_REG 0x39 /* DMA interrupt enable rw */
+/* 0x80, 0x40, and 0x20 are reserved on 700-series chips */
+#define DIEN_800_MDPE 0x40 /* Master data parity error */
+#define DIEN_800_BF 0x20 /* BUS fault */
+#define DIEN_ABRT 0x10 /* Enable aborted interrupt */
+#define DIEN_SSI 0x08 /* Enable single step interrupt */
+#define DIEN_SIR 0x04 /* Enable SCRIPTS INT command
+ * interrupt
+ */
+/* 0x02 is reserved on 800 series chips */
+#define DIEN_700_WTD 0x02 /* Enable watchdog timeout interrupt */
+#define DIEN_700_OPC 0x01 /* Enable illegal instruction
+ * interrupt
+ */
+#define DIEN_800_IID 0x01 /* Same meaning, different name */
+
+/*
+ * DMA watchdog timer rw
+ * set in 16 CLK input periods.
+ */
+#define DWT_REG 0x3a
+
+/* DMA control rw */
+#define DCNTL_REG 0x3b
+#define DCNTL_700_CF1 0x80 /* Clock divisor bits */
+#define DCNTL_700_CF0 0x40
+#define DCNTL_700_CF_MASK 0xc0
+/* Clock divisors Divisor SCLK range (MHZ) */
+#define DCNTL_700_CF_2 0x00 /* 2.0 37.51-50.00 */
+#define DCNTL_700_CF_1_5 0x40 /* 1.5 25.01-37.50 */
+#define DCNTL_700_CF_1 0x80 /* 1.0 16.67-25.00 */
+#define DCNTL_700_CF_3 0xc0 /* 3.0 50.01-66.67 (53c700-66) */
+
+#define DCNTL_700_S16 0x20 /* Load scripts 16 bits at a time */
+#define DCNTL_SSM 0x10 /* Single step mode */
+#define DCNTL_700_LLM 0x08 /* Low level mode, can only be set
+ * after selection */
+#define DCNTL_800_IRQM 0x08 /* Totem pole IRQ pin */
+#define DCNTL_STD 0x04 /* Start DMA / SCRIPTS */
+/* 0x02 is reserved */
+#define DCNTL_00_RST 0x01 /* Software reset, resets everything
+ * but 286 mode bit in DMODE. On the
+ * NCR53c710, this bit moved to CTEST8
+ */
+#define DCNTL_10_COM 0x01 /* 700 software compatibility mode */
+
+#define DCNTL_700_SAVE ( DCNTL_CF_MASK | DCNTL_S16)
+
+
+/* NCR53c700-66 only */
+#define SCRATCHB_REG_00 0x3c /* through 0x3f scratch b rw */
+#define SCRATCHB_REG_800 0x5c /* through 0x5f scratch b rw */
+/* NCR53c710 only */
+#define ADDER_REG_10 0x3c /* Adder, NCR53c710 only */
+
+#define SIEN1_REG_800 0x41
+#define SIEN1_800_STO 0x04 /* selection/reselection timeout */
+#define SIEN1_800_GEN 0x02 /* general purpose timer */
+#define SIEN1_800_HTH 0x01 /* handshake to handshake */
+
+#define SIST1_REG_800 0x43
+#define SIST1_800_STO 0x04 /* selection/reselection timeout */
+#define SIST1_800_GEN 0x02 /* general purpose timer */
+#define SIST1_800_HTH 0x01 /* handshake to handshake */
+
+#define SLPAR_REG_800 0x44 /* Parity */
+
+#define MACNTL_REG_800 0x46 /* Memory access control */
+#define MACNTL_800_TYP3 0x80
+#define MACNTL_800_TYP2 0x40
+#define MACNTL_800_TYP1 0x20
+#define MACNTL_800_TYP0 0x10
+#define MACNTL_800_DWR 0x08
+#define MACNTL_800_DRD 0x04
+#define MACNTL_800_PSCPT 0x02
+#define MACNTL_800_SCPTS 0x01
+
+#define GPCNTL_REG_800 0x47 /* General Purpose Pin Control */
+
+/* Timeouts are expressed such that 0=off, 1=100us, doubling after that */
+#define STIME0_REG_800 0x48 /* SCSI Timer Register 0 */
+#define STIME0_800_HTH_MASK 0xf0 /* Handshake to Handshake timeout */
+#define STIME0_800_HTH_SHIFT 4
+#define STIME0_800_SEL_MASK 0x0f /* Selection timeout */
+#define STIME0_800_SEL_SHIFT 0
+
+#define STIME1_REG_800 0x49
+#define STIME1_800_GEN_MASK 0x0f /* General purpose timer */
+
+#define RESPID_REG_800 0x4a /* Response ID, bit fielded. 8
+ bits on narrow chips, 16 on WIDE */
+
+#define STEST0_REG_800 0x4c
+#define STEST0_800_SLT 0x08 /* Selection response logic test */
+#define STEST0_800_ART 0x04 /* Arbitration priority encoder test */
+#define STEST0_800_SOZ 0x02 /* Synchronous offset zero */
+#define STEST0_800_SOM 0x01 /* Synchronous offset maximum */
+
+#define STEST1_REG_800 0x4d
+#define STEST1_800_SCLK 0x80 /* Disable SCSI clock */
+
+#define STEST2_REG_800 0x4e
+#define STEST2_800_SCE 0x80 /* Enable SOCL/SODL */
+#define STEST2_800_ROF 0x40 /* Reset SCSI sync offset */
+#define STEST2_800_SLB 0x10 /* Enable SCSI loopback mode */
+#define STEST2_800_SZM 0x08 /* SCSI high impedance mode */
+#define STEST2_800_EXT 0x02 /* Extend REQ/ACK filter 30 to 60ns */
+#define STEST2_800_LOW 0x01 /* SCSI low level mode */
+
+#define STEST3_REG_800 0x4f
+#define STEST3_800_TE 0x80 /* Enable active negation */
+#define STEST3_800_STR 0x40 /* SCSI FIFO test read */
+#define STEST3_800_HSC 0x20 /* Halt SCSI clock */
+#define STEST3_800_DSI 0x10 /* Disable single initiator response */
+#define STEST3_800_TTM 0x04 /* Time test mode */
+#define STEST3_800_CSF 0x02 /* Clear SCSI FIFO */
+#define STEST3_800_STW 0x01 /* SCSI FIFO test write */
+
+#define OPTION_PARITY 0x1 /* Enable parity checking */
+#define OPTION_TAGGED_QUEUE 0x2 /* Enable SCSI-II tagged queuing */
+#define OPTION_700 0x8 /* Always run NCR53c700 scripts */
+#define OPTION_INTFLY 0x10 /* Use INTFLY interrupts */
+#define OPTION_DEBUG_INTR 0x20 /* Debug interrupts */
+#define OPTION_DEBUG_INIT_ONLY 0x40 /* Run initialization code and
+ simple test code, return
+ DID_NO_CONNECT if any SCSI
+ commands are attempted. */
+#define OPTION_DEBUG_READ_ONLY 0x80 /* Return DID_ERROR if any
+ SCSI write is attempted */
+#define OPTION_DEBUG_TRACE 0x100 /* Animated trace mode, print
+ each address and instruction
+ executed to debug buffer. */
+#define OPTION_DEBUG_SINGLE 0x200 /* stop after executing one
+ instruction */
+#define OPTION_SYNCHRONOUS 0x400 /* Enable sync SCSI. */
+#define OPTION_MEMORY_MAPPED 0x800 /* NCR registers have valid
+ memory mapping */
+#define OPTION_IO_MAPPED 0x1000 /* NCR registers have valid
+ I/O mapping */
+#define OPTION_DEBUG_PROBE_ONLY 0x2000 /* Probe only, don't even init */
+#define OPTION_DEBUG_TESTS_ONLY 0x4000 /* Probe, init, run selected tests */
+#define OPTION_DEBUG_TEST0 0x08000 /* Run test 0 */
+#define OPTION_DEBUG_TEST1 0x10000 /* Run test 1 */
+#define OPTION_DEBUG_TEST2 0x20000 /* Run test 2 */
+#define OPTION_DEBUG_DUMP 0x40000 /* Dump commands */
+#define OPTION_DEBUG_TARGET_LIMIT 0x80000 /* Only talk to target+luns specified */
+#define OPTION_DEBUG_NCOMMANDS_LIMIT 0x100000 /* Limit the number of commands */
+#define OPTION_DEBUG_SCRIPT 0x200000 /* Print when checkpoints are passed */
+#define OPTION_DEBUG_FIXUP 0x400000 /* print fixup values */
+#define OPTION_DEBUG_DSA 0x800000
+#define OPTION_DEBUG_CORRUPTION 0x1000000 /* Detect script corruption */
+#define OPTION_DEBUG_SDTR 0x2000000 /* Debug SDTR problem */
+#define OPTION_DEBUG_MISMATCH 0x4000000 /* Debug phase mismatches */
+#define OPTION_DISCONNECT 0x8000000 /* Allow disconnect */
+#define OPTION_DEBUG_DISCONNECT 0x10000000
+#define OPTION_ALWAYS_SYNCHRONOUS 0x20000000 /* Negotiate sync. transfers
+ on power up */
+#define OPTION_DEBUG_QUEUES 0x80000000
+#define OPTION_DEBUG_ALLOCATION 0x100000000LL
+#define OPTION_DEBUG_SYNCHRONOUS 0x200000000LL /* Sanity check SXFER and
+ SCNTL3 registers */
+#define OPTION_NO_ASYNC 0x400000000LL /* Don't automagically send
+ SDTR for async transfers when
+ we haven't been told to do
+ a synchronous transfer. */
+#define OPTION_NO_PRINT_RACE 0x800000000LL /* Don't print message when
+ the reselect/WAIT DISCONNECT
+ race condition hits */
+#if !defined(PERM_OPTIONS)
+#define PERM_OPTIONS 0
+#endif
+
+struct NCR53c7x0_synchronous {
+ u32 select_indirect; /* Value used for indirect selection */
+ u32 script[8]; /* Size ?? Script used when target is
+ reselected */
+ unsigned char synchronous_want[5]; /* Per target desired SDTR */
+/*
+ * Set_synchronous programs these, select_indirect and current settings after
+ * int_debug_should show a match.
+ */
+ unsigned char sxfer_sanity, scntl3_sanity;
+};
+
+#define CMD_FLAG_SDTR 1 /* Initiating synchronous
+ transfer negotiation */
+#define CMD_FLAG_WDTR 2 /* Initiating wide transfer
+ negotiation */
+#define CMD_FLAG_DID_SDTR 4 /* did SDTR */
+#define CMD_FLAG_DID_WDTR 8 /* did WDTR */
+
+struct NCR53c7x0_table_indirect {
+ u32 count;
+ void *address;
+};
+
+enum ncr_event {
+ EVENT_NONE = 0,
+/*
+ * Order is IMPORTANT, since these must correspond to the event interrupts
+ * in 53c7,8xx.scr
+ */
+
+ EVENT_ISSUE_QUEUE = 0x5000000, /* Command was added to issue queue */
+ EVENT_START_QUEUE, /* Command moved to start queue */
+ EVENT_SELECT, /* Command completed selection */
+ EVENT_DISCONNECT, /* Command disconnected */
+ EVENT_RESELECT, /* Command reselected */
+ EVENT_COMPLETE, /* Command completed */
+ EVENT_IDLE,
+ EVENT_SELECT_FAILED,
+ EVENT_BEFORE_SELECT,
+ EVENT_RESELECT_FAILED
+};
+
+struct NCR53c7x0_event {
+ enum ncr_event event; /* What type of event */
+ unsigned char target;
+ unsigned char lun;
+ struct timeval time;
+ u32 *dsa; /* What's in the DSA register now (virt) */
+/*
+ * A few things from that SCSI pid so we know what happened after
+ * the Scsi_Cmnd structure in question may have disappeared.
+ */
+ unsigned long pid; /* The SCSI PID which caused this
+ event */
+ unsigned char cmnd[12];
+};
+
+/*
+ * Things in the NCR53c7x0_cmd structure are split into two parts :
+ *
+ * 1. A fixed portion, for things which are not accessed directly by static NCR
+ * code (ie, are referenced only by the Linux side of the driver,
+ * or only by dynamically generated code).
+ *
+ * 2. The DSA portion, for things which are accessed directly by static NCR
+ * code.
+ *
+ * This is a little ugly, but it
+ * 1. Avoids conflicts between the NCR code's picture of the structure, and
+ * Linux code's idea of what it looks like.
+ *
+ * 2. Minimizes the pain in the Linux side of the code needed
+ * to calculate real dsa locations for things, etc.
+ *
+ */
+
+struct NCR53c7x0_cmd {
+ void *real; /* Real, unaligned address for
+ free function */
+ void (* free)(void *, int); /* Command to deallocate; NULL
+ for structures allocated with
+ scsi_register, etc. */
+ Scsi_Cmnd *cmd; /* Associated Scsi_Cmnd
+ structure, Scsi_Cmnd points
+ at NCR53c7x0_cmd using
+ host_scribble structure */
+
+ int size; /* scsi_malloc'd size of this
+ structure */
+
+ int flags; /* CMD_* flags */
+
+/*
+ * SDTR and WIDE messages are an either/or affair
+ * in this message, since we will go into message out and send
+ * _the whole mess_ without dropping out of message out to
+ * let the target go into message in after sending the first
+ * message.
+ */
+
+ unsigned char select[11]; /* Select message, includes
+ IDENTIFY
+ (optional) QUEUE TAG
+ (optional) SDTR or WDTR
+ */
+
+
+ volatile struct NCR53c7x0_cmd *next; /* Linux maintained lists (free,
+ running, eventually finished */
+
+
+ u32 *data_transfer_start; /* Start of data transfer routines */
+ u32 *data_transfer_end; /* Address after end of data transfer o
+ routines */
+/*
+ * The following three fields were moved from the DSA proper to here
+ * since only dynamically generated NCR code refers to them, meaning
+ * we don't need dsa_* absolutes, and it is simpler to let the
+ * host code refer to them directly.
+ */
+
+/*
+ * HARD CODED : residual and saved_residual need to agree with the sizes
+ * used in NCR53c7,8xx.scr.
+ *
+ * FIXME: we want to consider the case where we have odd-length
+ * scatter/gather buffers and a WIDE transfer, in which case
+ * we'll need to use the CHAIN MOVE instruction. Ick.
+ */
+ u32 residual[6]; /* Residual data transfer which
+ allows pointer code to work
+ right.
+
+ [0-1] : Conditional call to
+ appropriate other transfer
+ routine.
+ [2-3] : Residual block transfer
+ instruction.
+ [4-5] : Jump to instruction
+ after splice.
+ */
+ u32 saved_residual[6]; /* Copy of old residual, so we
+ can get another partial
+ transfer and still recover
+ */
+
+ u32 saved_data_pointer; /* Saved data pointer */
+
+ u32 dsa_next_addr; /* _Address_ of dsa_next field
+ in this dsa for RISCy
+ style constant. */
+
+ u32 dsa_addr; /* Address of dsa; RISCy style
+ constant */
+
+ u32 dsa[0]; /* Variable length (depending
+ on host type, number of scatter /
+ gather buffers, etc). */
+};
+
+struct NCR53c7x0_break {
+ u32 *address, old_instruction[2];
+ struct NCR53c7x0_break *next;
+ unsigned char old_size; /* Size of old instruction */
+};
+
+/* Indicates that the NCR is not executing code */
+#define STATE_HALTED 0
+/*
+ * Indicates that the NCR is executing the wait for select / reselect
+ * script. Only used when running NCR53c700 compatible scripts, only
+ * state during which an ABORT is _not_ considered an error condition.
+ */
+#define STATE_WAITING 1
+/* Indicates that the NCR is executing other code. */
+#define STATE_RUNNING 2
+/*
+ * Indicates that the NCR was being aborted.
+ */
+#define STATE_ABORTING 3
+/* Indicates that the NCR was successfully aborted. */
+#define STATE_ABORTED 4
+/* Indicates that the NCR has been disabled due to a fatal error */
+#define STATE_DISABLED 5
+
+/*
+ * Where knowledge of SCSI SCRIPT(tm) specified values are needed
+ * in an interrupt handler, an interrupt handler exists for each
+ * different SCSI script so we don't have name space problems.
+ *
+ * Return values of these handlers are as follows :
+ */
+#define SPECIFIC_INT_NOTHING 0 /* don't even restart */
+#define SPECIFIC_INT_RESTART 1 /* restart at the next instruction */
+#define SPECIFIC_INT_ABORT 2 /* recoverable error, abort cmd */
+#define SPECIFIC_INT_PANIC 3 /* unrecoverable error, panic */
+#define SPECIFIC_INT_DONE 4 /* normal command completion */
+#define SPECIFIC_INT_BREAK 5 /* break point encountered */
+
+struct NCR53c7x0_hostdata {
+ int size; /* Size of entire Scsi_Host
+ structure */
+ int board; /* set to board type, useful if
+ we have host specific things,
+ ie, a general purpose I/O
+ bit is being used to enable
+ termination, etc. */
+
+ int chip; /* set to chip type; 700-66 is
+ 700-66, rest are last three
+ digits of part number */
+ /*
+ * PCI bus, device, function, only for NCR53c8x0 chips.
+ * pci_valid indicates that the PCI configuration information
+ * is valid, and we can twiddle MAX_LAT, etc. as recommended
+ * for maximum performance in the NCR documentation.
+ */
+ unsigned char pci_bus, pci_device_fn;
+ unsigned pci_valid:1;
+
+ u32 *dsp; /* dsp to restart with after
+ all stacked interrupts are
+ handled. */
+
+ unsigned dsp_changed:1; /* Has dsp changed within this
+ set of stacked interrupts ? */
+
+ unsigned char dstat; /* Most recent value of dstat */
+ unsigned dstat_valid:1;
+
+ unsigned expecting_iid:1; /* Expect IID interrupt */
+ unsigned expecting_sto:1; /* Expect STO interrupt */
+
+ /*
+ * The code stays cleaner if we use variables with function
+ * pointers and offsets that are unique for the different
+ * scripts rather than having a slew of switch(hostdata->chip)
+ * statements.
+ *
+ * It also means that the #defines from the SCSI SCRIPTS(tm)
+ * don't have to be visible outside of the script-specific
+ * instructions, preventing name space pollution.
+ */
+
+ void (* init_fixup)(struct Scsi_Host *host);
+ void (* init_save_regs)(struct Scsi_Host *host);
+ void (* dsa_fixup)(struct NCR53c7x0_cmd *cmd);
+ void (* soft_reset)(struct Scsi_Host *host);
+ int (* run_tests)(struct Scsi_Host *host);
+
+ /*
+ * Called when DSTAT_SIR is set, indicating an interrupt generated
+ * by the INT instruction, where values are unique for each SCSI
+ * script. Should return one of the SPEC_* values.
+ */
+
+ int (* dstat_sir_intr)(struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd);
+
+ int dsa_len; /* Size of DSA structure */
+
+ /*
+ * Location of DSA fields for the SCSI SCRIPT corresponding to this
+ * chip.
+ */
+
+ s32 dsa_start;
+ s32 dsa_end;
+ s32 dsa_next;
+ s32 dsa_prev;
+ s32 dsa_cmnd;
+ s32 dsa_select;
+ s32 dsa_msgout;
+ s32 dsa_cmdout;
+ s32 dsa_dataout;
+ s32 dsa_datain;
+ s32 dsa_msgin;
+ s32 dsa_msgout_other;
+ s32 dsa_write_sync;
+ s32 dsa_write_resume;
+ s32 dsa_check_reselect;
+ s32 dsa_status;
+ s32 dsa_saved_pointer;
+ s32 dsa_jump_dest;
+
+ /*
+ * Important entry points that generic fixup code needs
+ * to know about, fixed up.
+ */
+
+ s32 E_accept_message;
+ s32 E_command_complete;
+ s32 E_data_transfer;
+ s32 E_dsa_code_template;
+ s32 E_dsa_code_template_end;
+ s32 E_end_data_transfer;
+ s32 E_msg_in;
+ s32 E_initiator_abort;
+ s32 E_other_transfer;
+ s32 E_other_in;
+ s32 E_other_out;
+ s32 E_target_abort;
+ s32 E_debug_break;
+ s32 E_reject_message;
+ s32 E_respond_message;
+ s32 E_select;
+ s32 E_select_msgout;
+ s32 E_test_0;
+ s32 E_test_1;
+ s32 E_test_2;
+ s32 E_test_3;
+ s32 E_dsa_zero;
+ s32 E_cmdout_cmdout;
+ s32 E_wait_reselect;
+ s32 E_dsa_code_begin;
+
+ long long options; /* Bitfielded set of options enabled */
+ volatile u32 test_completed; /* Test completed */
+ int test_running; /* Test currently running */
+ s32 test_source;
+ volatile s32 test_dest;
+
+ volatile int state; /* state of driver, only used for
+ OPTION_700 */
+
+ unsigned char dmode; /*
+ * set to the address of the DMODE
+ * register for this chip.
+ */
+ unsigned char istat; /*
+ * set to the address of the ISTAT
+ * register for this chip.
+ */
+
+ int scsi_clock; /*
+ * SCSI clock in HZ. 0 may be used
+ * for unknown, although this will
+ * disable synchronous negotiation.
+ */
+
+ volatile int intrs; /* Number of interrupts */
+ volatile int resets; /* Number of SCSI resets */
+ unsigned char saved_dmode;
+ unsigned char saved_ctest4;
+ unsigned char saved_ctest7;
+ unsigned char saved_dcntl;
+ unsigned char saved_scntl3;
+
+ unsigned char this_id_mask;
+
+ /* Debugger information */
+ struct NCR53c7x0_break *breakpoints, /* Linked list of all break points */
+ *breakpoint_current; /* Current breakpoint being stepped
+ through, NULL if we are running
+ normally. */
+#ifdef NCR_DEBUG
+ int debug_size; /* Size of debug buffer */
+ volatile int debug_count; /* Current data count */
+ volatile char *debug_buf; /* Output ring buffer */
+ volatile char *debug_write; /* Current write pointer */
+ volatile char *debug_read; /* Current read pointer */
+#endif /* def NCR_DEBUG */
+
+ /* XXX - primitive debugging junk, remove when working ? */
+ int debug_print_limit; /* Number of commands to print
+ out exhaustive debugging
+ information for if
+ OPTION_DEBUG_DUMP is set */
+
+ unsigned char debug_lun_limit[16]; /* If OPTION_DEBUG_TARGET_LIMIT
+ set, puke if commands are sent
+ to other target/lun combinations */
+
+ int debug_count_limit; /* Number of commands to execute
+ before puking to limit debugging
+ output */
+
+
+ volatile unsigned idle:1; /* set to 1 if idle */
+
+ /*
+ * Table of synchronous+wide transfer parameters set on a per-target
+ * basis.
+ */
+
+ volatile struct NCR53c7x0_synchronous sync[16];
+
+ volatile Scsi_Cmnd *issue_queue;
+ /* waiting to be issued by
+ Linux driver */
+ volatile struct NCR53c7x0_cmd *running_list;
+ /* commands running, maintained
+ by Linux driver */
+
+ volatile struct NCR53c7x0_cmd *current; /* currently connected
+ nexus, ONLY valid for
+ NCR53c700/NCR53c700-66
+ */
+
+ volatile struct NCR53c7x0_cmd *spare; /* pointer to spare,
+ allocated at probe time,
+ which we can use for
+ initialization */
+ volatile struct NCR53c7x0_cmd *free;
+ int max_cmd_size; /* Maximum size of NCR53c7x0_cmd
+ based on number of
+ scatter/gather segments, etc.
+ */
+ volatile int num_cmds; /* Number of commands
+ allocated */
+ volatile int extra_allocate;
+ volatile unsigned char cmd_allocated[16]; /* Have we allocated commands
+ for this target yet? If not,
+ do so ASAP */
+ volatile unsigned char busy[16][8]; /* number of commands
+ executing on each target
+ */
+ /*
+ * Eventually, I'll switch to a coroutine for calling
+ * cmd->done(cmd), etc. so that we can overlap interrupt
+ * processing with this code for maximum performance.
+ */
+
+ volatile struct NCR53c7x0_cmd *finished_queue;
+
+
+ /* Shared variables between SCRIPT and host driver */
+ volatile u32 *schedule; /* Array of JUMPs to dsa_begin
+ routines of various DSAs.
+ When not in use, replace
+ with jump to next slot */
+
+
+ volatile unsigned char msg_buf[16]; /* buffer for messages
+ other than the command
+ complete message */
+
+ /* Per-target default synchronous and WIDE messages */
+ volatile unsigned char synchronous_want[16][5];
+ volatile unsigned char wide_want[16][4];
+
+ /* Bit fielded set of targets we want to speak synchronously with */
+ volatile u16 initiate_sdtr;
+ /* Bit fielded set of targets we want to speak wide with */
+ volatile u16 initiate_wdtr;
+ /* Bit fielded list of targets we've talked to. */
+ volatile u16 talked_to;
+
+ /* Array of bit-fielded lun lists that we need to request_sense */
+ volatile unsigned char request_sense[16];
+
+ u32 addr_reconnect_dsa_head; /* RISCy style constant,
+ address of following */
+ volatile u32 reconnect_dsa_head;
+ /* Data identifying nexus we are trying to match during reselection */
+ volatile unsigned char reselected_identify; /* IDENTIFY message */
+ volatile unsigned char reselected_tag; /* second byte of queue tag
+ message or 0 */
+ /* These were static variables before we moved them */
+
+ s32 NCR53c7xx_zero;
+ s32 NCR53c7xx_sink;
+ u32 NOP_insn;
+ char NCR53c7xx_msg_reject;
+ char NCR53c7xx_msg_abort;
+ char NCR53c7xx_msg_nop;
+
+ volatile int event_size, event_index;
+ volatile struct NCR53c7x0_event *events;
+
+ /* If we need to generate code to kill off the currently connected
+ command, this is where we do it. Should have a BMI instruction
+ to source or sink the current data, followed by a JUMP
+ to abort_connected */
+
+ u32 *abort_script;
+
+ int script_count; /* Size of script in words */
+ u32 script[0]; /* Relocated SCSI script */
+
+};
+
+#define IRQ_NONE 255
+#define DMA_NONE 255
+#define IRQ_AUTO 254
+#define DMA_AUTO 254
+
+#define BOARD_GENERIC 0
+
+#define NCR53c7x0_insn_size(insn) \
+ (((insn) & DCMD_TYPE_MASK) == DCMD_TYPE_MMI ? 3 : 2)
+
+
+#define NCR53c7x0_local_declare() \
+ volatile unsigned char *NCR53c7x0_address_memory; \
+ unsigned int NCR53c7x0_address_io; \
+ int NCR53c7x0_memory_mapped
+
+#define NCR53c7x0_local_setup(host) \
+ NCR53c7x0_address_memory = (void *) (host)->base; \
+ NCR53c7x0_address_io = (unsigned int) (host)->io_port; \
+ NCR53c7x0_memory_mapped = ((struct NCR53c7x0_hostdata *) \
+ host->hostdata)-> options & OPTION_MEMORY_MAPPED
+
+#define NCR53c7x0_read8(address) \
+ (NCR53c7x0_memory_mapped ? \
+ (unsigned int)readb(NCR53c7x0_address_memory + (address)) : \
+ inb(NCR53c7x0_address_io + (address)))
+
+#define NCR53c7x0_read16(address) \
+ (NCR53c7x0_memory_mapped ? \
+ (unsigned int)readw(NCR53c7x0_address_memory + (address)) : \
+ inw(NCR53c7x0_address_io + (address)))
+
+#define NCR53c7x0_read32(address) \
+ (NCR53c7x0_memory_mapped ? \
+ (unsigned int) readl(NCR53c7x0_address_memory + (address)) : \
+ inl(NCR53c7x0_address_io + (address)))
+
+#define NCR53c7x0_write8(address,value) \
+ (NCR53c7x0_memory_mapped ? \
+ ({writeb((value), NCR53c7x0_address_memory + (address)); mb();}) : \
+ outb((value), NCR53c7x0_address_io + (address)))
+
+#define NCR53c7x0_write16(address,value) \
+ (NCR53c7x0_memory_mapped ? \
+ ({writew((value), NCR53c7x0_address_memory + (address)); mb();}) : \
+ outw((value), NCR53c7x0_address_io + (address)))
+
+#define NCR53c7x0_write32(address,value) \
+ (NCR53c7x0_memory_mapped ? \
+ ({writel((value), NCR53c7x0_address_memory + (address)); mb();}) : \
+ outl((value), NCR53c7x0_address_io + (address)))
+
+/* Patch arbitrary 32 bit words in the script */
+#define patch_abs_32(script, offset, symbol, value) \
+ for (i = 0; i < (sizeof (A_##symbol##_used) / sizeof \
+ (u32)); ++i) { \
+ (script)[A_##symbol##_used[i] - (offset)] += (value); \
+ if (hostdata->options & OPTION_DEBUG_FIXUP) \
+ printk("scsi%d : %s reference %d at 0x%x in %s is now 0x%x\n",\
+ host->host_no, #symbol, i, A_##symbol##_used[i] - \
+ (int)(offset), #script, (script)[A_##symbol##_used[i] - \
+ (offset)]); \
+ }
+
+/* Patch read/write instruction immediate field */
+#define patch_abs_rwri_data(script, offset, symbol, value) \
+ for (i = 0; i < (sizeof (A_##symbol##_used) / sizeof \
+ (u32)); ++i) \
+ (script)[A_##symbol##_used[i] - (offset)] = \
+ ((script)[A_##symbol##_used[i] - (offset)] & \
+ ~DBC_RWRI_IMMEDIATE_MASK) | \
+ (((value) << DBC_RWRI_IMMEDIATE_SHIFT) & \
+ DBC_RWRI_IMMEDIATE_MASK)
+
+/* Patch transfer control instruction data field */
+#define patch_abs_tci_data(script, offset, symbol, value) \
+ for (i = 0; i < (sizeof (A_##symbol##_used) / sizeof \
+ (u32)); ++i) \
+ (script)[A_##symbol##_used[i] - (offset)] = \
+ ((script)[A_##symbol##_used[i] - (offset)] & \
+ ~DBC_TCI_DATA_MASK) | \
+ (((value) << DBC_TCI_DATA_SHIFT) & \
+ DBC_TCI_DATA_MASK)
+
+/* Patch field in dsa structure (assignment should be +=?) */
+#define patch_dsa_32(dsa, symbol, word, value) \
+ { \
+ (dsa)[(hostdata->symbol - hostdata->dsa_start) / sizeof(u32) \
+ + (word)] = (value); \
+ if (hostdata->options & OPTION_DEBUG_DSA) \
+ printk("scsi : dsa %s symbol %s(%d) word %d now 0x%x\n", \
+ #dsa, #symbol, hostdata->symbol, \
+ (word), (u32) (value)); \
+ }
+
+/* Paranoid people could use panic() here. */
+#define FATAL(host) shutdown((host));
+
+#endif /* NCR53c7x0_C */
+#endif /* NCR53c7x0_H */
diff --git a/linux/src/drivers/scsi/53c78xx.c b/linux/src/drivers/scsi/53c78xx.c
new file mode 100644
index 0000000..e6a66ff
--- /dev/null
+++ b/linux/src/drivers/scsi/53c78xx.c
@@ -0,0 +1,6401 @@
+/*
+ * PERM_OPTIONS are driver options which will be enabled for all NCR boards
+ * in the system at driver initialization time.
+ *
+ * Don't THINK about touching these in PERM_OPTIONS :
+ * OPTION_IO_MAPPED
+ * Memory mapped IO does not work under i86 Linux.
+ *
+ * OPTION_DEBUG_TEST1
+ * Test 1 does bus mastering and interrupt tests, which will help weed
+ * out brain damaged main boards.
+ *
+ * These are development kernel changes. Code for them included in this
+ * driver release may or may not work. If you turn them on, you should be
+ * running the latest copy of the development sources from
+ *
+ * ftp://tsx-11.mit.edu/pub/linux/ALPHA/scsi/53c7,8xx
+ *
+ * and be subscribed to the ncr53c810@colorado.edu mailing list. To
+ * subscribe, send mail to majordomo@colorado.edu with
+ *
+ * subscribe ncr53c810
+ *
+ * in the text.
+ *
+ *
+ * OPTION_NO_ASYNC
+ * Don't negotiate for asynchronous transfers on the first command
+ * when OPTION_ALWAYS_SYNCHRONOUS is set. Useful for dain bramaged
+ * devices which do something bad rather than sending a MESSAGE
+ * REJECT back to us like they should if they can't cope.
+ *
+ * OPTION_SYNCHRONOUS
+ * Enable support for synchronous transfers. Target negotiated
+ * synchronous transfers will be responded to. To initiate
+ * a synchronous transfer request, call
+ *
+ * request_synchronous (hostno, target)
+ *
+ * from within KGDB.
+ *
+ * OPTION_ALWAYS_SYNCHRONOUS
+ * Negotiate for synchronous transfers with every target after
+ * driver initialization or a SCSI bus reset. This is a bit dangerous,
+ * since there are some dain bramaged SCSI devices which will accept
+ * SDTR messages but keep talking asynchronously.
+ *
+ * OPTION_DISCONNECT
+ * Enable support for disconnect/reconnect. To change the
+ * default setting on a given host adapter, call
+ *
+ * request_disconnect (hostno, allow)
+ *
+ * where allow is non-zero to allow, 0 to disallow.
+ *
+ * If you really want to run 10MHz FAST SCSI-II transfers, you should
+ * know that the NCR driver currently ignores parity information. Most
+ * systems do 5MHz SCSI fine. I've seen a lot that have problems faster
+ * than 8MHz. To play it safe, we only request 5MHz transfers.
+ *
+ * If you'd rather get 10MHz transfers, edit sdtr_message and change
+ * the fourth byte from 50 to 25.
+ */
+
+#include <linux/config.h>
+
+#ifdef CONFIG_SCSI_NCR53C7xx_sync
+#ifdef CONFIG_SCSI_NCR53C7xx_DISCONNECT
+#define PERM_OPTIONS (OPTION_IO_MAPPED|OPTION_DEBUG_TEST1|OPTION_DISCONNECT|\
+ OPTION_SYNCHRONOUS|OPTION_ALWAYS_SYNCHRONOUS)
+#else
+#define PERM_OPTIONS (OPTION_IO_MAPPED|OPTION_DEBUG_TEST1|\
+ OPTION_SYNCHRONOUS|OPTION_ALWAYS_SYNCHRONOUS)
+#endif
+#else
+#ifdef CONFIG_SCSI_NCR53C7xx_DISCONNECT
+#define PERM_OPTIONS (OPTION_IO_MAPPED|OPTION_DEBUG_TEST1|OPTION_DISCONNECT|\
+ OPTION_SYNCHRONOUS)
+#else
+#define PERM_OPTIONS (OPTION_IO_MAPPED|OPTION_DEBUG_TEST1|OPTION_SYNCHRONOUS)
+#endif
+#endif
+
+/*
+ * Sponsored by
+ * iX Multiuser Multitasking Magazine
+ * Hannover, Germany
+ * hm@ix.de
+ *
+ * Copyright 1993, 1994, 1995 Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@PoohSticks.ORG
+ * +1 (303) 786-7975
+ *
+ * TolerANT and SCSI SCRIPTS are registered trademarks of NCR Corporation.
+ *
+ * For more information, please consult
+ *
+ * NCR53C810
+ * SCSI I/O Processor
+ * Programmer's Guide
+ *
+ * NCR 53C810
+ * PCI-SCSI I/O Processor
+ * Data Manual
+ *
+ * NCR 53C810/53C820
+ * PCI-SCSI I/O Processor Design In Guide
+ *
+ * For literature on Symbios Logic Inc. formerly NCR, SCSI,
+ * and Communication products please call (800) 334-5454 or
+ * (719) 536-3300.
+ *
+ * PCI BIOS Specification Revision
+ * PCI Local Bus Specification
+ * PCI System Design Guide
+ *
+ * PCI Special Interest Group
+ * M/S HF3-15A
+ * 5200 N.E. Elam Young Parkway
+ * Hillsboro, Oregon 97124-6497
+ * +1 (503) 696-2000
+ * +1 (800) 433-5177
+ */
+
+/*
+ * Design issues :
+ * The cumulative latency needed to propagate a read/write request
+ * through the file system, buffer cache, driver stacks, SCSI host, and
+ * SCSI device is ultimately the limiting factor in throughput once we
+ * have a sufficiently fast host adapter.
+ *
+ * So, to maximize performance we want to keep the ratio of latency to data
+ * transfer time to a minimum by
+ * 1. Minimizing the total number of commands sent (typical command latency
+ * including drive and bus mastering host overhead is as high as 4.5ms)
+ * to transfer a given amount of data.
+ *
+ * This is accomplished by placing no arbitrary limit on the number
+ * of scatter/gather buffers supported, since we can transfer 1K
+ * per scatter/gather buffer without Eric's cluster patches,
+ * 4K with.
+ *
+ * 2. Minimizing the number of fatal interrupts serviced, since
+ * fatal interrupts halt the SCSI I/O processor. Basically,
+ * this means offloading the practical maximum amount of processing
+ * to the SCSI chip.
+ *
+ * On the NCR53c810/820/720, this is accomplished by using
+ * interrupt-on-the-fly signals when commands complete,
+ * and only handling fatal errors and SDTR / WDTR messages
+ * in the host code.
+ *
+ * On the NCR53c710, interrupts are generated as on the NCR53c8x0,
+ * only the lack of a interrupt-on-the-fly facility complicates
+ * things. Also, SCSI ID registers and commands are
+ * bit fielded rather than binary encoded.
+ *
+ * On the NCR53c700 and NCR53c700-66, operations that are done via
+ * indirect, table mode on the more advanced chips must be
+ * replaced by calls through a jump table which
+ * acts as a surrogate for the DSA. Unfortunately, this
+ * will mean that we must service an interrupt for each
+ * disconnect/reconnect.
+ *
+ * 3. Eliminating latency by pipelining operations at the different levels.
+ *
+ * This driver allows a configurable number of commands to be enqueued
+ * for each target/lun combination (experimentally, I have discovered
+ * that two seems to work best) and will ultimately allow for
+ * SCSI-II tagged queuing.
+ *
+ *
+ * Architecture :
+ * This driver is built around a Linux queue of commands waiting to
+ * be executed, and a shared Linux/NCR array of commands to start. Commands
+ * are transfered to the array by the run_process_issue_queue() function
+ * which is called whenever a command completes.
+ *
+ * As commands are completed, the interrupt routine is triggered,
+ * looks for commands in the linked list of completed commands with
+ * valid status, removes these commands from a list of running commands,
+ * calls the done routine, and flags their target/luns as not busy.
+ *
+ * Due to limitations in the intelligence of the NCR chips, certain
+ * concessions are made. In many cases, it is easier to dynamically
+ * generate/fix-up code rather than calculate on the NCR at run time.
+ * So, code is generated or fixed up for
+ *
+ * - Handling data transfers, using a variable number of MOVE instructions
+ * interspersed with CALL MSG_IN, WHEN MSGIN instructions.
+ *
+ * The DATAIN and DATAOUT routines are separate, so that an incorrect
+ * direction can be trapped, and space isn't wasted.
+ *
+ * It may turn out that we're better off using some sort
+ * of table indirect instruction in a loop with a variable
+ * sized table on the NCR53c710 and newer chips.
+ *
+ * - Checking for reselection (NCR53c710 and better)
+ *
+ * - Handling the details of SCSI context switches (NCR53c710 and better),
+ * such as reprogramming appropriate synchronous parameters,
+ * removing the dsa structure from the NCR's queue of outstanding
+ * commands, etc.
+ *
+ */
+
+/*
+ * Accommodate differences between stock 1.2.x and 1.3.x asm-i386/types.h
+ * so lusers can drop in 53c7,8xx.* and get something which compiles
+ * without warnings.
+ */
+
+#if !defined(LINUX_1_2) && !defined(LINUX_1_3)
+#include <linux/version.h>
+#if LINUX_VERSION_CODE > 65536 + 3 * 256
+#define LINUX_1_3
+#else
+#define LINUX_1_2
+#endif
+#endif
+
+#ifdef LINUX_1_2
+#define u32 bogus_u32
+#define s32 bogus_s32
+#include <asm/types.h>
+#undef u32
+#undef s32
+typedef __signed__ int s32;
+typedef unsigned int u32;
+#endif /* def LINUX_1_2 */
+
+#ifdef MODULE
+#include <linux/module.h>
+#endif
+
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <linux/delay.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/string.h>
+#include <linux/malloc.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/time.h>
+#ifdef LINUX_1_2
+#include "../block/blk.h"
+#else
+#include <linux/blk.h>
+#endif
+#undef current
+
+#include "scsi.h"
+#include "hosts.h"
+#include "53c7,8xx.h"
+#include "constants.h"
+#include "sd.h"
+#include <linux/stat.h>
+#include <linux/stddef.h>
+
+#ifndef LINUX_1_2
+struct proc_dir_entry proc_scsi_ncr53c7xx = {
+ PROC_SCSI_NCR53C7xx, 9, "ncr53c7xx",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+#endif
+
+static int check_address (unsigned long addr, int size);
+static void dump_events (struct Scsi_Host *host, int count);
+static Scsi_Cmnd * return_outstanding_commands (struct Scsi_Host *host,
+ int free, int issue);
+static void hard_reset (struct Scsi_Host *host);
+static void ncr_scsi_reset (struct Scsi_Host *host);
+static void print_lots (struct Scsi_Host *host);
+static void set_synchronous (struct Scsi_Host *host, int target, int sxfer,
+ int scntl3, int now_connected);
+static int datapath_residual (struct Scsi_Host *host);
+static const char * sbcl_to_phase (int sbcl);
+static void print_progress (Scsi_Cmnd *cmd);
+static void print_queues (struct Scsi_Host *host);
+static void process_issue_queue (unsigned long flags);
+static int shutdown (struct Scsi_Host *host);
+static void abnormal_finished (struct NCR53c7x0_cmd *cmd, int result);
+static int disable (struct Scsi_Host *host);
+static int NCR53c8xx_run_tests (struct Scsi_Host *host);
+static int NCR53c8xx_script_len;
+static int NCR53c8xx_dsa_len;
+static void NCR53c7x0_intr(int irq, void *dev_id, struct pt_regs * regs);
+static int ncr_halt (struct Scsi_Host *host);
+static void intr_phase_mismatch (struct Scsi_Host *host, struct NCR53c7x0_cmd
+ *cmd);
+static void intr_dma (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd);
+static void print_dsa (struct Scsi_Host *host, u32 *dsa,
+ const char *prefix);
+static int print_insn (struct Scsi_Host *host, const u32 *insn,
+ const char *prefix, int kernel);
+
+static void NCR53c8xx_dsa_fixup (struct NCR53c7x0_cmd *cmd);
+static void NCR53c8x0_init_fixup (struct Scsi_Host *host);
+static int NCR53c8x0_dstat_sir_intr (struct Scsi_Host *host, struct
+ NCR53c7x0_cmd *cmd);
+static void NCR53c8x0_soft_reset (struct Scsi_Host *host);
+
+/* INSMOD variables */
+static long long perm_options = PERM_OPTIONS;
+/* 14 = .5s; 15 is max; decreasing divides by two. */
+static int selection_timeout = 14;
+/* Size of event list (per host adapter) */
+static int track_events = 0;
+
+static struct Scsi_Host *first_host = NULL; /* Head of list of NCR boards */
+static Scsi_Host_Template *the_template = NULL;
+
+/*
+ * KNOWN BUGS :
+ * - There is some sort of conflict when the PPP driver is compiled with
+ * support for 16 channels?
+ *
+ * - On systems which predate the 1.3.x initialization order change,
+ * the NCR driver will cause Cannot get free page messages to appear.
+ * These are harmless, but I don't know of an easy way to avoid them.
+ *
+ * - With OPTION_DISCONNECT, on two systems under unknown circumstances,
+ * we get a PHASE MISMATCH with DSA set to zero (suggests that we
+ * are occurring somewhere in the reselection code) where
+ * DSP=some value DCMD|DBC=same value.
+ *
+ * Closer inspection suggests that we may be trying to execute
+ * some portion of the DSA?
+ * scsi0 : handling residual transfer (+ 0 bytes from DMA FIFO)
+ * scsi0 : handling residual transfer (+ 0 bytes from DMA FIFO)
+ * scsi0 : no current command : unexpected phase MSGIN.
+ * DSP=0x1c46cc, DCMD|DBC=0x1c46ac, DSA=0x0
+ * DSPS=0x0, TEMP=0x1c3e70, DMODE=0x80
+ * scsi0 : DSP->
+ * 001c46cc : 0x001c46cc 0x00000000
+ * 001c46d4 : 0x001c5ea0 0x000011f8
+ *
+ * Changed the print code in the phase_mismatch handler so
+ * that we call print_lots to try to diagnose this.
+ *
+ */
+
+/*
+ * Possible future direction of architecture for max performance :
+ *
+ * We're using a single start array for the NCR chip. This is
+ * sub-optimal, because we cannot add a command which would conflict with
+ * an executing command to this start queue, and therefore must insert the
+ * next command for a given I/T/L combination after the first has completed;
+ * incurring our interrupt latency between SCSI commands.
+ *
+ * To allow further pipelining of the NCR and host CPU operation, we want
+ * to set things up so that immediately on termination of a command destined
+ * for a given LUN, we get that LUN busy again.
+ *
+ * To do this, we need to add a 32 bit pointer to which is jumped to
+ * on completion of a command. If no new command is available, this
+ * would point to the usual DSA issue queue select routine.
+ *
+ * If one were, it would point to a per-NCR53c7x0_cmd select routine
+ * which starts execution immediately, inserting the command at the head
+ * of the start queue if the NCR chip is selected or reselected.
+ *
+ * We would change so that we keep a list of outstanding commands
+ * for each unit, rather than a single running_list. We'd insert
+ * a new command into the right running list; if the NCR didn't
+ * have something running for that yet, we'd put it in the
+ * start queue as well. Some magic needs to happen to handle the
+ * race condition between the first command terminating before the
+ * new one is written.
+ *
+ * Potential for profiling :
+ * Call do_gettimeofday(struct timeval *tv) to get 800ns resolution.
+ */
+
+
+/*
+ * TODO :
+ * 1. To support WIDE transfers, not much needs to happen. We
+ * should do CHMOVE instructions instead of MOVEs when
+ * we have scatter/gather segments of uneven length. When
+ * we do this, we need to handle the case where we disconnect
+ * between segments.
+ *
+ * 2. Currently, when Icky things happen we do a FATAL(). Instead,
+ * we want to do an integrity check on the parts of the NCR hostdata
+ * structure which were initialized at boot time; FATAL() if that
+ * fails, and otherwise try to recover. Keep track of how many
+ * times this has happened within a single SCSI command; if it
+ * gets excessive, then FATAL().
+ *
+ * 3. Parity checking is currently disabled, and a few things should
+ * happen here now that we support synchronous SCSI transfers :
+ * 1. On soft-reset, we should set the EPC (Enable Parity Checking)
+ * and AAP (Assert SATN/ on parity error) bits in SCNTL0.
+ *
+ * 2. We should enable the parity interrupt in the SIEN0 register.
+ *
+ * 3. intr_phase_mismatch() needs to believe that message out is
+ * always an "acceptable" phase to have a mismatch in. If
+ * the old phase was MSG_IN, we should send a MESSAGE PARITY
+ * error. If the old phase was something else, we should send
+ * a INITIATOR_DETECTED_ERROR message. Note that this could
+ * cause a RESTORE POINTERS message; so we should handle that
+ * correctly first. Instead, we should probably do an
+ * initiator_abort.
+ *
+ * 4. MPEE bit of CTEST4 should be set so we get interrupted if
+ * we detect an error.
+ *
+ *
+ * 5. The initial code has been tested on the NCR53c810. I don't
+ * have access to NCR53c700, 700-66 (Forex boards), NCR53c710
+ * (NCR Pentium systems), NCR53c720, NCR53c820, or NCR53c825 boards to
+ * finish development on those platforms.
+ *
+ * NCR53c820/825/720 - need to add wide transfer support, including WDTR
+ * negotiation, programming of wide transfer capabilities
+ * on reselection and table indirect selection.
+ *
+ * NCR53c710 - need to add fatal interrupt or GEN code for
+ * command completion signaling. Need to modify all
+ * SDID, SCID, etc. registers, and table indirect select code
+ * since these use bit fielded (ie 1<<target) instead of
+ * binary encoded target ids. Need to accommodate
+ * different register mappings, probably scan through
+ * the SCRIPT code and change the non SFBR register operand
+ * of all MOVE instructions.
+ *
+ * NCR53c700/700-66 - need to add code to refix addresses on
+ * every nexus change, eliminate all table indirect code,
+ * very messy.
+ *
+ * 6. The NCR53c7x0 series is very popular on other platforms that
+ * could be running Linux - ie, some high performance AMIGA SCSI
+ * boards use it.
+ *
+ * So, I should include #ifdef'd code so that it is
+ * compatible with these systems.
+ *
+ * Specifically, the little Endian assumptions I made in my
+ * bit fields need to change, and if the NCR doesn't see memory
+ * the right way, we need to provide options to reverse words
+ * when the scripts are relocated.
+ *
+ * 7. Use vremap() to access memory mapped boards.
+ */
+
+/*
+ * Allow for simultaneous existence of multiple SCSI scripts so we
+ * can have a single driver binary for all of the family.
+ *
+ * - one for NCR53c700 and NCR53c700-66 chips (not yet supported)
+ * - one for rest (only the NCR53c810, 815, 820, and 825 are currently
+ * supported)
+ *
+ * So that we only need two SCSI scripts, we need to modify things so
+ * that we fixup register accesses in READ/WRITE instructions, and
+ * we'll also have to accommodate the bit vs. binary encoding of IDs
+ * with the 7xx chips.
+ */
+
+/*
+ * Use pci_chips_ids to translate in both directions between PCI device ID
+ * and chip numbers.
+ */
+
+static struct {
+ unsigned short pci_device_id;
+ int chip;
+/*
+ * The revision field of the PCI_CLASS_REVISION register is compared
+ * against each of these fields if the field is not -1. If it
+ * is less than min_revision or larger than max_revision, a warning
+ * message is printed.
+ */
+ int max_revision;
+ int min_revision;
+} pci_chip_ids[] = {
+ {PCI_DEVICE_ID_NCR_53C810, 810, 2, 1},
+ {PCI_DEVICE_ID_NCR_53C815, 815, 3, 2},
+ {PCI_DEVICE_ID_NCR_53C820, 820, -1, -1},
+ {PCI_DEVICE_ID_NCR_53C825, 825, -1, -1}
+};
+
+#define NPCI_CHIP_IDS (sizeof (pci_chip_ids) / sizeof(pci_chip_ids[0]))
+
+#define ROUNDUP(adr,type) \
+ ((void *) (((long) (adr) + sizeof(type) - 1) & ~(sizeof(type) - 1)))
+
+/*
+ * Forced detection and autoprobe code for various hardware. Currently,
+ * entry points for these are not included in init/main.c because if the
+ * PCI BIOS code isn't working right, you're not going to be able to use
+ * the hardware anyways; this way we force users to solve their
+ * problems rather than forcing detection and blaming us when it
+ * does not work.
+ */
+
+static struct override {
+ int chip; /* 700, 70066, 710, 720, 810, 820 */
+ int board; /* Any special board level gunk */
+ unsigned pci:1;
+ union {
+ struct {
+ int base; /* Memory address - indicates memory mapped regs */
+ int io_port;/* I/O port address - indicates I/O mapped regs */
+ int irq; /* IRQ line */
+ int dma; /* DMA channel - often none */
+ } normal;
+ struct {
+ int bus;
+ int device;
+ int function;
+ } pci;
+ } data;
+ long long options;
+} overrides [4] = {{0,},};
+static int commandline_current = 0;
+static int no_overrides = 0;
+
+#if 0
+#define OVERRIDE_LIMIT (sizeof(overrides) / sizeof(struct override))
+#else
+#define OVERRIDE_LIMIT commandline_current
+#endif
+
+/*
+ * Function: issue_to_cmd
+ *
+ * Purpose: convert jump instruction in issue array to NCR53c7x0_cmd
+ * structure pointer.
+ *
+ * Inputs; issue - pointer to start of NOP or JUMP instruction
+ * in issue array.
+ *
+ * Returns: pointer to command on success; 0 if opcode is NOP.
+ */
+
+static inline struct NCR53c7x0_cmd *
+issue_to_cmd (struct Scsi_Host *host, struct NCR53c7x0_hostdata *hostdata,
+ u32 *issue)
+{
+ return (issue[0] != hostdata->NOP_insn) ?
+ /*
+ * If the IF TRUE bit is set, it's a JUMP instruction. The
+ * operand is a bus pointer to the dsa_begin routine for this DSA. The
+ * dsa field of the NCR53c7x0_cmd structure starts with the
+ * DSA code template. By converting to a virtual address,
+ * subtracting the code template size, and offset of the
+ * dsa field, we end up with a pointer to the start of the
+ * structure (alternatively, we could use the
+ * dsa_cmnd field, an anachronism from when we weren't
+ * sure what the relationship between the NCR structures
+ * and host structures were going to be.
+ */
+ (struct NCR53c7x0_cmd *) ((char *) bus_to_virt (issue[1]) -
+ (hostdata->E_dsa_code_begin - hostdata->E_dsa_code_template) -
+ offsetof(struct NCR53c7x0_cmd, dsa))
+ /* If the IF TRUE bit is not set, it's a NOP */
+ : NULL;
+}
+
+
+/*
+ * Function : static internal_setup(int board, int chip, char *str, int *ints)
+ *
+ * Purpose : LILO command line initialization of the overrides array,
+ *
+ * Inputs : board - currently, unsupported. chip - 700, 70066, 710, 720
+ * 810, 815, 820, 825, although currently only the NCR53c810 is
+ * supported.
+ *
+ */
+
+static void
+internal_setup(int board, int chip, char *str, int *ints) {
+ unsigned char pci; /* Specifies a PCI override, with bus, device,
+ function */
+
+ pci = (str && !strcmp (str, "pci")) ? 1 : 0;
+
+/*
+ * Override syntaxes are as follows :
+ * ncr53c700,ncr53c700-66,ncr53c710,ncr53c720=mem,io,irq,dma
+ * ncr53c810,ncr53c820,ncr53c825=mem,io,irq or pci,bus,device,function
+ */
+
+ if (commandline_current < OVERRIDE_LIMIT) {
+ overrides[commandline_current].pci = pci ? 1 : 0;
+ if (!pci) {
+ overrides[commandline_current].data.normal.base = ints[1];
+ overrides[commandline_current].data.normal.io_port = ints[2];
+ overrides[commandline_current].data.normal.irq = ints[3];
+ overrides[commandline_current].data.normal.dma = (ints[0] >= 4) ?
+ ints[4] : DMA_NONE;
+ /* FIXME: options is now a long long */
+ overrides[commandline_current].options = (ints[0] >= 5) ?
+ ints[5] : 0;
+ } else {
+ overrides[commandline_current].data.pci.bus = ints[1];
+ overrides[commandline_current].data.pci.device = ints[2];
+ overrides[commandline_current].data.pci.function = ints[3];
+ /* FIXME: options is now a long long */
+ overrides[commandline_current].options = (ints[0] >= 4) ?
+ ints[4] : 0;
+ }
+ overrides[commandline_current].board = board;
+ overrides[commandline_current].chip = chip;
+ ++commandline_current;
+ ++no_overrides;
+ } else {
+ printk ("53c7,7x0.c:internal_setup() : too many overrides\n");
+ }
+}
+
+/*
+ * XXX - we might want to implement a single override function
+ * with a chip type field, revamp the command line configuration,
+ * etc.
+ */
+
+#define setup_wrapper(x) \
+void ncr53c##x##_setup (char *str, int *ints) { \
+ internal_setup (BOARD_GENERIC, x, str, ints); \
+}
+
+setup_wrapper(700)
+setup_wrapper(70066)
+setup_wrapper(710)
+setup_wrapper(720)
+setup_wrapper(810)
+setup_wrapper(815)
+setup_wrapper(820)
+setup_wrapper(825)
+
+/*
+ * FIXME: we should junk these, in favor of synchronous_want and
+ * wide_want in the NCR53c7x0_hostdata structure.
+ */
+
+/* Template for "preferred" synchronous transfer parameters. */
+
+static const unsigned char sdtr_message[] = {
+#ifdef CONFIG_SCSI_NCR53C7xx_FAST
+ EXTENDED_MESSAGE, 3 /* length */, EXTENDED_SDTR, 25 /* *4ns */, 8 /* off */
+#else
+ EXTENDED_MESSAGE, 3 /* length */, EXTENDED_SDTR, 50 /* *4ns */, 8 /* off */
+#endif
+};
+
+/* Template to request asynchronous transfers */
+
+static const unsigned char async_message[] = {
+ EXTENDED_MESSAGE, 3 /* length */, EXTENDED_SDTR, 0, 0 /* asynchronous */
+};
+
+/* Template for "preferred" WIDE transfer parameters */
+
+static const unsigned char wdtr_message[] = {
+ EXTENDED_MESSAGE, 2 /* length */, EXTENDED_WDTR, 1 /* 2^1 bytes */
+};
+
+/*
+ * Function : struct Scsi_Host *find_host (int host)
+ *
+ * Purpose : KGDB support function which translates a host number
+ * to a host structure.
+ *
+ * Inputs : host - number of SCSI host
+ *
+ * Returns : NULL on failure, pointer to host structure on success.
+ */
+
+static struct Scsi_Host *
+find_host (int host) {
+ struct Scsi_Host *h;
+ for (h = first_host; h && h->host_no != host; h = h->next);
+ if (!h) {
+ printk (KERN_ALERT "scsi%d not found\n", host);
+ return NULL;
+ } else if (h->hostt != the_template) {
+ printk (KERN_ALERT "scsi%d is not a NCR board\n", host);
+ return NULL;
+ }
+ return h;
+}
+
+/*
+ * Function : request_synchronous (int host, int target)
+ *
+ * Purpose : KGDB interface which will allow us to negotiate for
+ * synchronous transfers. This ill be replaced with a more
+ * integrated function; perhaps a new entry in the scsi_host
+ * structure, accessible via an ioctl() or perhaps /proc/scsi.
+ *
+ * Inputs : host - number of SCSI host; target - number of target.
+ *
+ * Returns : 0 when negotiation has been setup for next SCSI command,
+ * -1 on failure.
+ */
+
+static int
+request_synchronous (int host, int target) {
+ struct Scsi_Host *h;
+ struct NCR53c7x0_hostdata *hostdata;
+ unsigned long flags;
+ if (target < 0) {
+ printk (KERN_ALERT "target %d is bogus\n", target);
+ return -1;
+ }
+ if (!(h = find_host (host)))
+ return -1;
+ else if (h->this_id == target) {
+ printk (KERN_ALERT "target %d is host ID\n", target);
+ return -1;
+ }
+#ifndef LINUX_1_2
+ else if (target > h->max_id) {
+ printk (KERN_ALERT "target %d exceeds maximum of %d\n", target,
+ h->max_id);
+ return -1;
+ }
+#endif
+ hostdata = (struct NCR53c7x0_hostdata *)h->hostdata;
+
+ save_flags(flags);
+ cli();
+ if (hostdata->initiate_sdtr & (1 << target)) {
+ restore_flags(flags);
+ printk (KERN_ALERT "target %d already doing SDTR\n", target);
+ return -1;
+ }
+ hostdata->initiate_sdtr |= (1 << target);
+ restore_flags(flags);
+ return 0;
+}
+
+/*
+ * Function : request_disconnect (int host, int on_or_off)
+ *
+ * Purpose : KGDB support function, tells us to allow or disallow
+ * disconnections.
+ *
+ * Inputs : host - number of SCSI host; on_or_off - non-zero to allow,
+ * zero to disallow.
+ *
+ * Returns : 0 on success, * -1 on failure.
+ */
+
+static int
+request_disconnect (int host, int on_or_off) {
+ struct Scsi_Host *h;
+ struct NCR53c7x0_hostdata *hostdata;
+ if (!(h = find_host (host)))
+ return -1;
+ hostdata = (struct NCR53c7x0_hostdata *) h->hostdata;
+ if (on_or_off)
+ hostdata->options |= OPTION_DISCONNECT;
+ else
+ hostdata->options &= ~OPTION_DISCONNECT;
+ return 0;
+}
+
+/*
+ * Function : static void NCR53c7x0_driver_init (struct Scsi_Host *host)
+ *
+ * Purpose : Initialize internal structures, as required on startup, or
+ * after a SCSI bus reset.
+ *
+ * Inputs : host - pointer to this host adapter's structure
+ */
+
+static void
+NCR53c7x0_driver_init (struct Scsi_Host *host) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ int i, j;
+ u32 *current;
+ for (i = 0; i < 16; ++i) {
+ hostdata->request_sense[i] = 0;
+ for (j = 0; j < 8; ++j)
+ hostdata->busy[i][j] = 0;
+ set_synchronous (host, i, /* sxfer */ 0, hostdata->saved_scntl3, 0);
+ }
+ hostdata->issue_queue = NULL;
+ hostdata->running_list = hostdata->finished_queue =
+ hostdata->current = NULL;
+ for (i = 0, current = (u32 *) hostdata->schedule;
+ i < host->can_queue; ++i, current += 2) {
+ current[0] = hostdata->NOP_insn;
+ current[1] = 0xdeadbeef;
+ }
+ current[0] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_JUMP) << 24) | DBC_TCI_TRUE;
+ current[1] = (u32) virt_to_bus (hostdata->script) +
+ hostdata->E_wait_reselect;
+ hostdata->reconnect_dsa_head = 0;
+ hostdata->addr_reconnect_dsa_head = (u32)
+ virt_to_bus((void *) &(hostdata->reconnect_dsa_head));
+ hostdata->expecting_iid = 0;
+ hostdata->expecting_sto = 0;
+ if (hostdata->options & OPTION_ALWAYS_SYNCHRONOUS)
+ hostdata->initiate_sdtr = 0xffff;
+ else
+ hostdata->initiate_sdtr = 0;
+ hostdata->talked_to = 0;
+ hostdata->idle = 1;
+}
+
+/*
+ * Function : static int ccf_to_clock (int ccf)
+ *
+ * Purpose : Return the largest SCSI clock allowable for a given
+ * clock conversion factor, allowing us to do synchronous periods
+ * when we don't know what the SCSI clock is by taking at least
+ * as long as the device says we can.
+ *
+ * Inputs : ccf
+ *
+ * Returns : clock on success, -1 on failure.
+ */
+
+static int
+ccf_to_clock (int ccf) {
+ switch (ccf) {
+ case 1: return 25000000; /* Divide by 1.0 */
+ case 2: return 37500000; /* Divide by 1.5 */
+ case 3: return 50000000; /* Divide by 2.0 */
+ case 0: /* Divide by 3.0 */
+ case 4: return 66000000;
+ default: return -1;
+ }
+}
+
+/*
+ * Function : static int clock_to_ccf (int clock)
+ *
+ * Purpose : Return the clock conversion factor for a given SCSI clock.
+ *
+ * Inputs : clock - SCSI clock expressed in Hz.
+ *
+ * Returns : ccf on success, -1 on failure.
+ */
+
+static int
+clock_to_ccf (int clock) {
+ if (clock < 16666666)
+ return -1;
+ if (clock < 25000000)
+ return 1; /* Divide by 1.0 */
+ else if (clock < 37500000)
+ return 2; /* Divide by 1.5 */
+ else if (clock < 50000000)
+ return 3; /* Divide by 2.0 */
+ else if (clock < 66000000)
+ return 4; /* Divide by 3.0 */
+ else
+ return -1;
+}
+
+/*
+ * Function : static int NCR53c7x0_init (struct Scsi_Host *host)
+ *
+ * Purpose : initialize the internal structures for a given SCSI host
+ *
+ * Inputs : host - pointer to this host adapter's structure
+ *
+ * Preconditions : when this function is called, the chip_type
+ * field of the hostdata structure MUST have been set.
+ *
+ * Returns : 0 on success, -1 on failure.
+ */
+
+static int
+NCR53c7x0_init (struct Scsi_Host *host) {
+ NCR53c7x0_local_declare();
+ int i, ccf, expected_ccf;
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ struct Scsi_Host *search;
+ /*
+ * There are some things which we need to know about in order to provide
+ * a semblance of support. Print 'em if they aren't what we expect,
+ * otherwise don't add to the noise.
+ *
+ * -1 means we don't know what to expect.
+ */
+ int expected_id = -1;
+ int expected_clock = -1;
+ int uninitialized = 0;
+ /*
+ * FIXME : this is only on Intel boxes. On other platforms, this
+ * will differ.
+ */
+ int expected_mapping = OPTION_IO_MAPPED;
+ NCR53c7x0_local_setup(host);
+
+ switch (hostdata->chip) {
+ case 820:
+ case 825:
+#ifdef notyet
+ host->max_id = 15;
+#endif
+ /* Fall through */
+ case 810:
+ case 815:
+ hostdata->dstat_sir_intr = NCR53c8x0_dstat_sir_intr;
+ hostdata->init_save_regs = NULL;
+ hostdata->dsa_fixup = NCR53c8xx_dsa_fixup;
+ hostdata->init_fixup = NCR53c8x0_init_fixup;
+ hostdata->soft_reset = NCR53c8x0_soft_reset;
+ hostdata->run_tests = NCR53c8xx_run_tests;
+/* Is the SCSI clock ever anything else on these chips? */
+ expected_clock = hostdata->scsi_clock = 40000000;
+ expected_id = 7;
+ break;
+ default:
+ printk ("scsi%d : chip type of %d is not supported yet, detaching.\n",
+ host->host_no, hostdata->chip);
+ scsi_unregister (host);
+ return -1;
+ }
+
+ /* Assign constants accessed by NCR */
+ hostdata->NCR53c7xx_zero = 0;
+ hostdata->NCR53c7xx_msg_reject = MESSAGE_REJECT;
+ hostdata->NCR53c7xx_msg_abort = ABORT;
+ hostdata->NCR53c7xx_msg_nop = NOP;
+ hostdata->NOP_insn = (DCMD_TYPE_TCI|DCMD_TCI_OP_JUMP) << 24;
+
+ if (expected_mapping == -1 ||
+ (hostdata->options & (OPTION_MEMORY_MAPPED)) !=
+ (expected_mapping & OPTION_MEMORY_MAPPED))
+ printk ("scsi%d : using %s mapped access\n", host->host_no,
+ (hostdata->options & OPTION_MEMORY_MAPPED) ? "memory" :
+ "io");
+
+ hostdata->dmode = (hostdata->chip == 700 || hostdata->chip == 70066) ?
+ DMODE_REG_00 : DMODE_REG_10;
+ hostdata->istat = ((hostdata->chip / 100) == 8) ?
+ ISTAT_REG_800 : ISTAT_REG_700;
+
+/* Only the ISTAT register is readable when the NCR is running, so make
+ sure it's halted. */
+ ncr_halt(host);
+
+/*
+ * XXX - the NCR53c700 uses bitfielded registers for SCID, SDID, etc,
+ * as does the 710 with one bit per SCSI ID. Conversely, the NCR
+ * uses a normal, 3 bit binary representation of these values.
+ *
+ * Get the rest of the NCR documentation, and FIND OUT where the change
+ * was.
+ */
+#if 0
+ tmp = hostdata->this_id_mask = NCR53c7x0_read8(SCID_REG);
+ for (host->this_id = 0; tmp != 1; tmp >>=1, ++host->this_id);
+#else
+ host->this_id = NCR53c7x0_read8(SCID_REG) & 15;
+ if (host->this_id == 0)
+ host->this_id = 7; /* sanitize hostid---0 doesn't make sense */
+ hostdata->this_id_mask = 1 << host->this_id;
+#endif
+
+/*
+ * Note : we should never encounter a board setup for ID0. So,
+ * if we see ID0, assume that it was uninitialized and set it
+ * to the industry standard 7.
+ */
+ if (!host->this_id) {
+ printk("scsi%d : initiator ID was %d, changing to 7\n",
+ host->host_no, host->this_id);
+ host->this_id = 7;
+ hostdata->this_id_mask = 1 << 7;
+ uninitialized = 1;
+ };
+
+ if (expected_id == -1 || host->this_id != expected_id)
+ printk("scsi%d : using initiator ID %d\n", host->host_no,
+ host->this_id);
+
+ /*
+ * Save important registers to allow a soft reset.
+ */
+
+ if ((hostdata->chip / 100) == 8) {
+ /*
+ * CTEST4 controls burst mode disable.
+ */
+ hostdata->saved_ctest4 = NCR53c7x0_read8(CTEST4_REG_800) &
+ CTEST4_800_SAVE;
+ } else {
+ /*
+ * CTEST7 controls cache snooping, burst mode, and support for
+ * external differential drivers.
+ */
+ hostdata->saved_ctest7 = NCR53c7x0_read8(CTEST7_REG) & CTEST7_SAVE;
+ }
+
+ /*
+ * On NCR53c700 series chips, DCNTL controls the SCSI clock divisor,
+ * on 800 series chips, it allows for a totem-pole IRQ driver.
+ */
+
+ hostdata->saved_dcntl = NCR53c7x0_read8(DCNTL_REG);
+
+ /*
+ * DCNTL_800_IRQM controls weather we are using an open drain
+ * driver (reset) or totem pole driver (set). In all cases,
+ * it's level active. I suppose this is an issue when we're trying to
+ * wire-or the same PCI INTx line?
+ */
+ if ((hostdata->chip / 100) == 8)
+ hostdata->saved_dcntl &= ~DCNTL_800_IRQM;
+
+ /*
+ * DMODE controls DMA burst length, and on 700 series chips,
+ * 286 mode and bus width
+ */
+ hostdata->saved_dmode = NCR53c7x0_read8(hostdata->dmode);
+
+ /*
+ * Now that burst length and enabled/disabled status is known,
+ * clue the user in on it.
+ */
+
+ if ((hostdata->chip / 100) == 8) {
+ if (hostdata->saved_ctest4 & CTEST4_800_BDIS) {
+ printk ("scsi%d : burst mode disabled\n", host->host_no);
+ } else {
+ switch (hostdata->saved_dmode & DMODE_BL_MASK) {
+ case DMODE_BL_2: i = 2; break;
+ case DMODE_BL_4: i = 4; break;
+ case DMODE_BL_8: i = 8; break;
+ case DMODE_BL_16: i = 16; break;
+ default: i = 0;
+ }
+ printk ("scsi%d : burst length %d\n", host->host_no, i);
+ }
+ }
+
+ /*
+ * On NCR53c810 and NCR53c820 chips, SCNTL3 contails the synchronous
+ * and normal clock conversion factors.
+ */
+ if (hostdata->chip / 100 == 8) {
+ expected_ccf = clock_to_ccf (expected_clock);
+ hostdata->saved_scntl3 = NCR53c7x0_read8(SCNTL3_REG_800);
+ ccf = hostdata->saved_scntl3 & SCNTL3_800_CCF_MASK;
+ if (expected_ccf != -1 && ccf != expected_ccf && !ccf) {
+ hostdata->saved_scntl3 = (hostdata->saved_scntl3 &
+ ~SCNTL3_800_CCF_MASK) | expected_ccf;
+ if (!uninitialized) {
+ printk ("scsi%d : reset ccf to %d from %d\n",
+ host->host_no, expected_ccf, ccf);
+ uninitialized = 1;
+ }
+ }
+ } else
+ ccf = 0;
+
+ /*
+ * If we don't have a SCSI clock programmed, pick one on the upper
+ * bound of that allowed by NCR so that our transfers err on the
+ * slow side, since transfer period must be >= the agreed
+ * upon period.
+ */
+
+ if ((!hostdata->scsi_clock) && (hostdata->scsi_clock = ccf_to_clock (ccf))
+ == -1) {
+ printk ("scsi%d : clock conversion factor %d unknown.\n"
+ " synchronous transfers disabled\n",
+ host->host_no, ccf);
+ hostdata->options &= ~OPTION_SYNCHRONOUS;
+ hostdata->scsi_clock = 0;
+ }
+
+ if (expected_clock == -1 || hostdata->scsi_clock != expected_clock)
+ printk ("scsi%d : using %dMHz SCSI clock\n", host->host_no,
+ hostdata->scsi_clock / 1000000);
+
+ for (i = 0; i < 16; ++i)
+ hostdata->cmd_allocated[i] = 0;
+
+ if (hostdata->init_save_regs)
+ hostdata->init_save_regs (host);
+ if (hostdata->init_fixup)
+ hostdata->init_fixup (host);
+
+ if (!the_template) {
+ the_template = host->hostt;
+ first_host = host;
+ }
+
+ /*
+ * Linux SCSI drivers have always been plagued with initialization
+ * problems - some didn't work with the BIOS disabled since they expected
+ * initialization from it, some didn't work when the networking code
+ * was enabled and registers got scrambled, etc.
+ *
+ * To avoid problems like this, in the future, we will do a soft
+ * reset on the SCSI chip, taking it back to a sane state.
+ */
+
+ hostdata->soft_reset (host);
+
+#if 1
+ hostdata->debug_count_limit = -1;
+#else
+ hostdata->debug_count_limit = 1;
+#endif
+ hostdata->intrs = -1;
+ hostdata->resets = -1;
+ memcpy ((void *) hostdata->synchronous_want, (void *) sdtr_message,
+ sizeof (hostdata->synchronous_want));
+
+ NCR53c7x0_driver_init (host);
+
+ /*
+ * Set up an interrupt handler if we aren't already sharing an IRQ
+ * with another board.
+ */
+
+ for (search = first_host; search && !(search->hostt == the_template &&
+ search->irq == host->irq && search != host); search=search->next);
+
+ if (!search) {
+ if (request_irq(host->irq, NCR53c7x0_intr, SA_INTERRUPT, "53c7,8xx", NULL)) {
+ printk("scsi%d : IRQ%d not free, detaching\n"
+ " You have either a configuration problem, or a\n"
+ " broken BIOS. You may wish to manually assign\n"
+ " an interrupt to the NCR board rather than using\n"
+ " an automatic setting.\n",
+ host->host_no, host->irq);
+ scsi_unregister (host);
+ return -1;
+ }
+ } else {
+ printk("scsi%d : using interrupt handler previously installed for scsi%d\n",
+ host->host_no, search->host_no);
+ }
+
+
+ if ((hostdata->run_tests && hostdata->run_tests(host) == -1) ||
+ (hostdata->options & OPTION_DEBUG_TESTS_ONLY)) {
+ /* XXX Should disable interrupts, etc. here */
+ scsi_unregister (host);
+ return -1;
+ } else {
+ if (host->io_port) {
+ host->n_io_port = 128;
+ request_region (host->io_port, host->n_io_port, "ncr53c7,8xx");
+ }
+ }
+
+ if (NCR53c7x0_read8 (SBCL_REG) & SBCL_BSY) {
+ printk ("scsi%d : bus wedge, doing SCSI reset\n", host->host_no);
+ hard_reset (host);
+ }
+ return 0;
+}
+
+/*
+ * Function : static int normal_init(Scsi_Host_Template *tpnt, int board,
+ * int chip, u32 base, int io_port, int irq, int dma, int pcivalid,
+ * unsigned char pci_bus, unsigned char pci_device_fn,
+ * long long options);
+ *
+ * Purpose : initializes a NCR53c7,8x0 based on base addresses,
+ * IRQ, and DMA channel.
+ *
+ * Useful where a new NCR chip is backwards compatible with
+ * a supported chip, but the DEVICE ID has changed so it
+ * doesn't show up when the autoprobe does a pcibios_find_device.
+ *
+ * Inputs : tpnt - Template for this SCSI adapter, board - board level
+ * product, chip - 810, 820, or 825, bus - PCI bus, device_fn -
+ * device and function encoding as used by PCI BIOS calls.
+ *
+ * Returns : 0 on success, -1 on failure.
+ *
+ */
+
+static int
+normal_init (Scsi_Host_Template *tpnt, int board, int chip,
+ u32 base, int io_port, int irq, int dma, int pci_valid,
+ unsigned char pci_bus, unsigned char pci_device_fn, long long options) {
+ struct Scsi_Host *instance;
+ struct NCR53c7x0_hostdata *hostdata;
+ char chip_str[80];
+ int script_len = 0, dsa_len = 0, size = 0, max_cmd_size = 0,
+ schedule_size = 0, ok = 0;
+ void *tmp;
+
+ options |= perm_options;
+
+ switch (chip) {
+ case 825:
+ case 820:
+ case 815:
+ case 810:
+ schedule_size = (tpnt->can_queue + 1) * 8 /* JUMP instruction size */;
+ script_len = NCR53c8xx_script_len;
+ dsa_len = NCR53c8xx_dsa_len;
+ options |= OPTION_INTFLY;
+ sprintf (chip_str, "NCR53c%d", chip);
+ break;
+ default:
+ printk("scsi-ncr53c7,8xx : unsupported SCSI chip %d\n", chip);
+ return -1;
+ }
+
+ printk("scsi-ncr53c7,8xx : %s at memory 0x%x, io 0x%x, irq %d",
+ chip_str, (unsigned) base, io_port, irq);
+ if (dma == DMA_NONE)
+ printk("\n");
+ else
+ printk(", dma %d\n", dma);
+
+ if ((chip / 100 == 8) && !pci_valid)
+ printk ("scsi-ncr53c7,8xx : for better reliability and performance, please use the\n"
+ " PCI override instead.\n"
+ " Syntax : ncr53c8{10,15,20,25}=pci,<bus>,<device>,<function>\n"
+ " <bus> and <device> are usually 0.\n");
+
+ if (options & OPTION_DEBUG_PROBE_ONLY) {
+ printk ("scsi-ncr53c7,8xx : probe only enabled, aborting initialization\n");
+ return -1;
+ }
+
+ max_cmd_size = sizeof(struct NCR53c7x0_cmd) + dsa_len +
+ /* Size of dynamic part of command structure : */
+ 2 * /* Worst case : we don't know if we need DATA IN or DATA out */
+ ( 2 * /* Current instructions per scatter/gather segment */
+ tpnt->sg_tablesize +
+ 3 /* Current startup / termination required per phase */
+ ) *
+ 8 /* Each instruction is eight bytes */;
+
+ /* Allocate fixed part of hostdata, dynamic part to hold appropriate
+ SCSI SCRIPT(tm) plus a single, maximum-sized NCR53c7x0_cmd structure.
+
+ We need a NCR53c7x0_cmd structure for scan_scsis() when we are
+ not loaded as a module, and when we're loaded as a module, we
+ can't use a non-dynamically allocated structure because modules
+ are vmalloc()'d, which can allow structures to cross page
+ boundaries and breaks our physical/virtual address assumptions
+ for DMA.
+
+ So, we stick it past the end of our hostdata structure.
+
+ ASSUMPTION :
+ Regardless of how many simultaneous SCSI commands we allow,
+ the probe code only executes a _single_ instruction at a time,
+ so we only need one here, and don't need to allocate NCR53c7x0_cmd
+ structures for each target until we are no longer in scan_scsis
+ and kmalloc() has become functional (memory_init() happens
+ after all device driver initialization).
+ */
+
+ size = sizeof(struct NCR53c7x0_hostdata) + script_len +
+ /* Note that alignment will be guaranteed, since we put the command
+ allocated at probe time after the fixed-up SCSI script, which
+ consists of 32 bit words, aligned on a 32 bit boundary. But
+ on a 64bit machine we need 8 byte alignment for hostdata->free, so
+ we add in another 4 bytes to take care of potential misalignment
+ */
+ (sizeof(void *) - sizeof(u32)) + max_cmd_size + schedule_size;
+
+ instance = scsi_register (tpnt, size);
+ if (!instance)
+ return -1;
+
+ /* FIXME : if we ever support an ISA NCR53c7xx based board, we
+ need to check if the chip is running in a 16 bit mode, and if so
+ unregister it if it is past the 16M (0x1000000) mark */
+
+ hostdata = (struct NCR53c7x0_hostdata *)
+ instance->hostdata;
+ hostdata->size = size;
+ hostdata->script_count = script_len / sizeof(u32);
+ hostdata = (struct NCR53c7x0_hostdata *) instance->hostdata;
+ hostdata->board = board;
+ hostdata->chip = chip;
+ if ((hostdata->pci_valid = pci_valid)) {
+ hostdata->pci_bus = pci_bus;
+ hostdata->pci_device_fn = pci_device_fn;
+ }
+
+ /*
+ * Being memory mapped is more desirable, since
+ *
+ * - Memory accesses may be faster.
+ *
+ * - The destination and source address spaces are the same for
+ * all instructions, meaning we don't have to twiddle dmode or
+ * any other registers.
+ *
+ * So, we try for memory mapped, and if we don't get it,
+ * we go for port mapped, and that failing we tell the user
+ * it can't work.
+ */
+
+ if (base) {
+ instance->base = (unsigned char *) (unsigned long) base;
+ /* Check for forced I/O mapping */
+ if (!(options & OPTION_IO_MAPPED)) {
+ options |= OPTION_MEMORY_MAPPED;
+ ok = 1;
+ }
+ } else {
+ options &= ~OPTION_MEMORY_MAPPED;
+ }
+
+ if (io_port) {
+ instance->io_port = io_port;
+ options |= OPTION_IO_MAPPED;
+ ok = 1;
+ } else {
+ options &= ~OPTION_IO_MAPPED;
+ }
+
+ if (!ok) {
+ printk ("scsi%d : not initializing, no I/O or memory mapping known \n",
+ instance->host_no);
+ scsi_unregister (instance);
+ return -1;
+ }
+ instance->irq = irq;
+ instance->dma_channel = dma;
+
+ hostdata->options = options;
+ hostdata->dsa_len = dsa_len;
+ hostdata->max_cmd_size = max_cmd_size;
+ hostdata->num_cmds = 1;
+ /* Initialize single command */
+ tmp = (hostdata->script + hostdata->script_count);
+ hostdata->free = ROUNDUP(tmp, void *);
+ hostdata->free->real = tmp;
+ hostdata->free->size = max_cmd_size;
+ hostdata->free->free = NULL;
+ hostdata->free->next = NULL;
+ hostdata->extra_allocate = 0;
+
+ /* Allocate command start code space */
+ hostdata->schedule = (chip == 700 || chip == 70066) ?
+ NULL : (u32 *) ((char *)hostdata->free + max_cmd_size);
+
+/*
+ * For diagnostic purposes, we don't really care how fast things blaze.
+ * For profiling, we want to access the 800ns resolution system clock,
+ * using a 'C' call on the host processor.
+ *
+ * Therefore, there's no need for the NCR chip to directly manipulate
+ * this data, and we should put it wherever is most convenient for
+ * Linux.
+ */
+ if (track_events)
+ hostdata->events = (struct NCR53c7x0_event *) (track_events ?
+ vmalloc (sizeof (struct NCR53c7x0_event) * track_events) : NULL);
+ else
+ hostdata->events = NULL;
+
+ if (hostdata->events) {
+ memset ((void *) hostdata->events, 0, sizeof(struct NCR53c7x0_event) *
+ track_events);
+ hostdata->event_size = track_events;
+ hostdata->event_index = 0;
+ } else
+ hostdata->event_size = 0;
+
+ return NCR53c7x0_init(instance);
+}
+
+
+/*
+ * Function : static int ncr_pci_init(Scsi_Host_Template *tpnt, int board,
+ * int chip, int bus, int device_fn, long long options)
+ *
+ * Purpose : initializes a NCR53c800 family based on the PCI
+ * bus, device, and function location of it. Allows
+ * reprogramming of latency timer and determining addresses
+ * and whether bus mastering, etc. are OK.
+ *
+ * Useful where a new NCR chip is backwards compatible with
+ * a supported chip, but the DEVICE ID has changed so it
+ * doesn't show up when the autoprobe does a pcibios_find_device.
+ *
+ * Inputs : tpnt - Template for this SCSI adapter, board - board level
+ * product, chip - 810, 820, or 825, bus - PCI bus, device_fn -
+ * device and function encoding as used by PCI BIOS calls.
+ *
+ * Returns : 0 on success, -1 on failure.
+ *
+ */
+
+static int
+ncr_pci_init (Scsi_Host_Template *tpnt, int board, int chip,
+ unsigned char bus, unsigned char device_fn, long long options) {
+ unsigned short vendor_id, device_id, command;
+#ifdef LINUX_1_2
+ unsigned long
+#else
+ unsigned int
+#endif
+ base, io_port;
+ unsigned char irq, revision;
+ int error, expected_chip;
+ int expected_id = -1, max_revision = -1, min_revision = -1;
+ int i;
+
+ printk("scsi-ncr53c7,8xx : at PCI bus %d, device %d, function %d\n",
+ bus, (int) (device_fn & 0xf8) >> 3,
+ (int) device_fn & 7);
+
+ if (!pcibios_present()) {
+ printk("scsi-ncr53c7,8xx : not initializing due to lack of PCI BIOS,\n"
+ " try using memory, port, irq override instead.\n");
+ return -1;
+ }
+
+ if ((error = pcibios_read_config_word (bus, device_fn, PCI_VENDOR_ID,
+ &vendor_id)) ||
+ (error = pcibios_read_config_word (bus, device_fn, PCI_DEVICE_ID,
+ &device_id)) ||
+ (error = pcibios_read_config_word (bus, device_fn, PCI_COMMAND,
+ &command)) ||
+ (error = pcibios_read_config_dword (bus, device_fn,
+ PCI_BASE_ADDRESS_0, &io_port)) ||
+ (error = pcibios_read_config_dword (bus, device_fn,
+ PCI_BASE_ADDRESS_1, &base)) ||
+ (error = pcibios_read_config_byte (bus, device_fn, PCI_CLASS_REVISION,
+ &revision)) ||
+ (error = pcibios_read_config_byte (bus, device_fn, PCI_INTERRUPT_LINE,
+ &irq))) {
+ printk ("scsi-ncr53c7,8xx : error %s not initializing due to error reading configuration space\n"
+ " perhaps you specified an incorrect PCI bus, device, or function.\n"
+ , pcibios_strerror(error));
+ return -1;
+ }
+
+ /* If any one ever clones the NCR chips, this will have to change */
+
+ if (vendor_id != PCI_VENDOR_ID_NCR) {
+ printk ("scsi-ncr53c7,8xx : not initializing, 0x%04x is not NCR vendor ID\n",
+ (int) vendor_id);
+ return -1;
+ }
+
+
+ /*
+ * Bit 0 is the address space indicator and must be one for I/O
+ * space mappings, bit 1 is reserved, discard them after checking
+ * that they have the correct value of 1.
+ */
+
+ if (command & PCI_COMMAND_IO) {
+ if ((io_port & 3) != 1) {
+ printk ("scsi-ncr53c7,8xx : disabling I/O mapping since base address 0 (0x%x)\n"
+ " bits 0..1 indicate a non-IO mapping\n",
+ (unsigned) io_port);
+ io_port = 0;
+ } else
+ io_port &= PCI_BASE_ADDRESS_IO_MASK;
+ } else {
+ io_port = 0;
+ }
+
+ if (command & PCI_COMMAND_MEMORY) {
+ if ((base & PCI_BASE_ADDRESS_SPACE) != PCI_BASE_ADDRESS_SPACE_MEMORY) {
+ printk("scsi-ncr53c7,8xx : disabling memory mapping since base address 1\n"
+ " contains a non-memory mapping\n");
+ base = 0;
+ } else
+ base &= PCI_BASE_ADDRESS_MEM_MASK;
+ } else {
+ base = 0;
+ }
+
+ if (!io_port && !base) {
+ printk ("scsi-ncr53c7,8xx : not initializing, both I/O and memory mappings disabled\n");
+ return -1;
+ }
+
+ if (!(command & PCI_COMMAND_MASTER)) {
+ printk ("scsi-ncr53c7,8xx : not initializing, BUS MASTERING was disabled\n");
+ return -1;
+ }
+
+ for (i = 0; i < NPCI_CHIP_IDS; ++i) {
+ if (device_id == pci_chip_ids[i].pci_device_id) {
+ max_revision = pci_chip_ids[i].max_revision;
+ min_revision = pci_chip_ids[i].min_revision;
+ expected_chip = pci_chip_ids[i].chip;
+ }
+ if (chip == pci_chip_ids[i].chip)
+ expected_id = pci_chip_ids[i].pci_device_id;
+ }
+
+ if (chip && device_id != expected_id)
+ printk ("scsi-ncr53c7,8xx : warning : device id of 0x%04x doesn't\n"
+ " match expected 0x%04x\n",
+ (unsigned int) device_id, (unsigned int) expected_id );
+
+ if (max_revision != -1 && revision > max_revision)
+ printk ("scsi-ncr53c7,8xx : warning : revision of %d is greater than %d.\n",
+ (int) revision, max_revision);
+ else if (min_revision != -1 && revision < min_revision)
+ printk ("scsi-ncr53c7,8xx : warning : revision of %d is less than %d.\n",
+ (int) revision, min_revision);
+
+ if (io_port && check_region (io_port, 128)) {
+ printk ("scsi-ncr53c7,8xx : IO region 0x%x to 0x%x is in use\n",
+ (unsigned) io_port, (unsigned) io_port + 127);
+ return -1;
+ }
+
+ return normal_init (tpnt, board, chip, (int) base, io_port,
+ (int) irq, DMA_NONE, 1, bus, device_fn, options);
+}
+
+
+/*
+ * Function : int NCR53c7xx_detect(Scsi_Host_Template *tpnt)
+ *
+ * Purpose : detects and initializes NCR53c7,8x0 SCSI chips
+ * that were autoprobed, overridden on the LILO command line,
+ * or specified at compile time.
+ *
+ * Inputs : tpnt - template for this SCSI adapter
+ *
+ * Returns : number of host adapters detected
+ *
+ */
+
+int
+NCR53c7xx_detect(Scsi_Host_Template *tpnt) {
+ int i;
+ int current_override;
+ int count; /* Number of boards detected */
+ unsigned char pci_bus, pci_device_fn;
+ static short pci_index=0; /* Device index to PCI BIOS calls */
+
+#ifndef LINUX_1_2
+ tpnt->proc_dir = &proc_scsi_ncr53c7xx;
+#endif
+
+ for (current_override = count = 0; current_override < OVERRIDE_LIMIT;
+ ++current_override) {
+ if (overrides[current_override].pci ?
+ !ncr_pci_init (tpnt, overrides[current_override].board,
+ overrides[current_override].chip,
+ (unsigned char) overrides[current_override].data.pci.bus,
+ (((overrides[current_override].data.pci.device
+ << 3) & 0xf8)|(overrides[current_override].data.pci.function &
+ 7)), overrides[current_override].options):
+ !normal_init (tpnt, overrides[current_override].board,
+ overrides[current_override].chip,
+ overrides[current_override].data.normal.base,
+ overrides[current_override].data.normal.io_port,
+ overrides[current_override].data.normal.irq,
+ overrides[current_override].data.normal.dma,
+ 0 /* PCI data invalid */, 0 /* PCI bus place holder */,
+ 0 /* PCI device_function place holder */,
+ overrides[current_override].options)) {
+ ++count;
+ }
+ }
+
+ if (pcibios_present()) {
+ for (i = 0; i < NPCI_CHIP_IDS; ++i)
+ for (pci_index = 0;
+ !pcibios_find_device (PCI_VENDOR_ID_NCR,
+ pci_chip_ids[i].pci_device_id, pci_index, &pci_bus,
+ &pci_device_fn);
+ ++pci_index)
+ if (!ncr_pci_init (tpnt, BOARD_GENERIC, pci_chip_ids[i].chip,
+ pci_bus, pci_device_fn, /* no options */ 0))
+ ++count;
+ }
+ return count;
+}
+
+/* NCR53c810 and NCR53c820 script handling code */
+
+#include "53c8xx_d.h"
+#ifdef A_int_debug_sync
+#define DEBUG_SYNC_INTR A_int_debug_sync
+#endif
+static int NCR53c8xx_script_len = sizeof (SCRIPT);
+static int NCR53c8xx_dsa_len = A_dsa_end + Ent_dsa_zero - Ent_dsa_code_template;
+
+/*
+ * Function : static void NCR53c8x0_init_fixup (struct Scsi_Host *host)
+ *
+ * Purpose : copy and fixup the SCSI SCRIPTS(tm) code for this device.
+ *
+ * Inputs : host - pointer to this host adapter's structure
+ *
+ */
+
+static void
+NCR53c8x0_init_fixup (struct Scsi_Host *host) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ unsigned char tmp;
+ int i, ncr_to_memory, memory_to_ncr;
+ u32 base;
+ NCR53c7x0_local_setup(host);
+
+
+ /* XXX - NOTE : this code MUST be made endian aware */
+ /* Copy code into buffer that was allocated at detection time. */
+ memcpy ((void *) hostdata->script, (void *) SCRIPT,
+ sizeof(SCRIPT));
+ /* Fixup labels */
+ for (i = 0; i < PATCHES; ++i)
+ hostdata->script[LABELPATCHES[i]] +=
+ virt_to_bus(hostdata->script);
+ /* Fixup addresses of constants that used to be EXTERNAL */
+
+ patch_abs_32 (hostdata->script, 0, NCR53c7xx_msg_abort,
+ virt_to_bus(&(hostdata->NCR53c7xx_msg_abort)));
+ patch_abs_32 (hostdata->script, 0, NCR53c7xx_msg_reject,
+ virt_to_bus(&(hostdata->NCR53c7xx_msg_reject)));
+ patch_abs_32 (hostdata->script, 0, NCR53c7xx_zero,
+ virt_to_bus(&(hostdata->NCR53c7xx_zero)));
+ patch_abs_32 (hostdata->script, 0, NCR53c7xx_sink,
+ virt_to_bus(&(hostdata->NCR53c7xx_sink)));
+ patch_abs_32 (hostdata->script, 0, NOP_insn,
+ virt_to_bus(&(hostdata->NOP_insn)));
+ patch_abs_32 (hostdata->script, 0, schedule,
+ virt_to_bus((void *) hostdata->schedule));
+
+ /* Fixup references to external variables: */
+ for (i = 0; i < EXTERNAL_PATCHES_LEN; ++i)
+ hostdata->script[EXTERNAL_PATCHES[i].offset] +=
+ virt_to_bus(EXTERNAL_PATCHES[i].address);
+
+ /*
+ * Fixup absolutes set at boot-time.
+ *
+ * All non-code absolute variables suffixed with "dsa_" and "int_"
+ * are constants, and need no fixup provided the assembler has done
+ * it for us (I don't know what the "real" NCR assembler does in
+ * this case, my assembler does the right magic).
+ */
+
+ patch_abs_rwri_data (hostdata->script, 0, dsa_save_data_pointer,
+ Ent_dsa_code_save_data_pointer - Ent_dsa_zero);
+ patch_abs_rwri_data (hostdata->script, 0, dsa_restore_pointers,
+ Ent_dsa_code_restore_pointers - Ent_dsa_zero);
+ patch_abs_rwri_data (hostdata->script, 0, dsa_check_reselect,
+ Ent_dsa_code_check_reselect - Ent_dsa_zero);
+
+ /*
+ * Just for the hell of it, preserve the settings of
+ * Burst Length and Enable Read Line bits from the DMODE
+ * register. Make sure SCRIPTS start automagically.
+ */
+
+ tmp = NCR53c7x0_read8(DMODE_REG_10);
+ tmp &= (DMODE_800_ERL | DMODE_BL_MASK);
+
+ if (!(hostdata->options & OPTION_MEMORY_MAPPED)) {
+ base = (u32) host->io_port;
+ memory_to_ncr = tmp|DMODE_800_DIOM;
+ ncr_to_memory = tmp|DMODE_800_SIOM;
+ } else {
+ base = virt_to_bus(host->base);
+ memory_to_ncr = ncr_to_memory = tmp;
+ }
+
+ patch_abs_32 (hostdata->script, 0, addr_scratch, base + SCRATCHA_REG_800);
+ patch_abs_32 (hostdata->script, 0, addr_temp, base + TEMP_REG);
+
+ /*
+ * I needed some variables in the script to be accessible to
+ * both the NCR chip and the host processor. For these variables,
+ * I made the arbitrary decision to store them directly in the
+ * hostdata structure rather than in the RELATIVE area of the
+ * SCRIPTS.
+ */
+
+
+ patch_abs_rwri_data (hostdata->script, 0, dmode_memory_to_memory, tmp);
+ patch_abs_rwri_data (hostdata->script, 0, dmode_memory_to_ncr, memory_to_ncr);
+ patch_abs_rwri_data (hostdata->script, 0, dmode_ncr_to_memory, ncr_to_memory);
+
+ patch_abs_32 (hostdata->script, 0, msg_buf,
+ virt_to_bus((void *)&(hostdata->msg_buf)));
+ patch_abs_32 (hostdata->script, 0, reconnect_dsa_head,
+ virt_to_bus((void *)&(hostdata->reconnect_dsa_head)));
+ patch_abs_32 (hostdata->script, 0, addr_reconnect_dsa_head,
+ virt_to_bus((void *)&(hostdata->addr_reconnect_dsa_head)));
+ patch_abs_32 (hostdata->script, 0, reselected_identify,
+ virt_to_bus((void *)&(hostdata->reselected_identify)));
+/* reselected_tag is currently unused */
+#if 0
+ patch_abs_32 (hostdata->script, 0, reselected_tag,
+ virt_to_bus((void *)&(hostdata->reselected_tag)));
+#endif
+
+ patch_abs_32 (hostdata->script, 0, test_dest,
+ virt_to_bus((void*)&hostdata->test_dest));
+ patch_abs_32 (hostdata->script, 0, test_src,
+ virt_to_bus(&hostdata->test_source));
+
+ patch_abs_rwri_data (hostdata->script, 0, dsa_check_reselect,
+ (unsigned char)(Ent_dsa_code_check_reselect - Ent_dsa_zero));
+
+/* These are for event logging; the ncr_event enum contains the
+ actual interrupt numbers. */
+#ifdef A_int_EVENT_SELECT
+ patch_abs_32 (hostdata->script, 0, int_EVENT_SELECT, (u32) EVENT_SELECT);
+#endif
+#ifdef A_int_EVENT_DISCONNECT
+ patch_abs_32 (hostdata->script, 0, int_EVENT_DISCONNECT, (u32) EVENT_DISCONNECT);
+#endif
+#ifdef A_int_EVENT_RESELECT
+ patch_abs_32 (hostdata->script, 0, int_EVENT_RESELECT, (u32) EVENT_RESELECT);
+#endif
+#ifdef A_int_EVENT_COMPLETE
+ patch_abs_32 (hostdata->script, 0, int_EVENT_COMPLETE, (u32) EVENT_COMPLETE);
+#endif
+#ifdef A_int_EVENT_IDLE
+ patch_abs_32 (hostdata->script, 0, int_EVENT_IDLE, (u32) EVENT_IDLE);
+#endif
+#ifdef A_int_EVENT_SELECT_FAILED
+ patch_abs_32 (hostdata->script, 0, int_EVENT_SELECT_FAILED,
+ (u32) EVENT_SELECT_FAILED);
+#endif
+#ifdef A_int_EVENT_BEFORE_SELECT
+ patch_abs_32 (hostdata->script, 0, int_EVENT_BEFORE_SELECT,
+ (u32) EVENT_BEFORE_SELECT);
+#endif
+#ifdef A_int_EVENT_RESELECT_FAILED
+ patch_abs_32 (hostdata->script, 0, int_EVENT_RESELECT_FAILED,
+ (u32) EVENT_RESELECT_FAILED);
+#endif
+
+ /*
+ * Make sure the NCR and Linux code agree on the location of
+ * certain fields.
+ */
+
+ hostdata->E_accept_message = Ent_accept_message;
+ hostdata->E_command_complete = Ent_command_complete;
+ hostdata->E_cmdout_cmdout = Ent_cmdout_cmdout;
+ hostdata->E_data_transfer = Ent_data_transfer;
+ hostdata->E_debug_break = Ent_debug_break;
+ hostdata->E_dsa_code_template = Ent_dsa_code_template;
+ hostdata->E_dsa_code_template_end = Ent_dsa_code_template_end;
+ hostdata->E_end_data_transfer = Ent_end_data_transfer;
+ hostdata->E_initiator_abort = Ent_initiator_abort;
+ hostdata->E_msg_in = Ent_msg_in;
+ hostdata->E_other_transfer = Ent_other_transfer;
+ hostdata->E_other_in = Ent_other_in;
+ hostdata->E_other_out = Ent_other_out;
+ hostdata->E_reject_message = Ent_reject_message;
+ hostdata->E_respond_message = Ent_respond_message;
+ hostdata->E_select = Ent_select;
+ hostdata->E_select_msgout = Ent_select_msgout;
+ hostdata->E_target_abort = Ent_target_abort;
+#ifdef Ent_test_0
+ hostdata->E_test_0 = Ent_test_0;
+#endif
+ hostdata->E_test_1 = Ent_test_1;
+ hostdata->E_test_2 = Ent_test_2;
+#ifdef Ent_test_3
+ hostdata->E_test_3 = Ent_test_3;
+#endif
+ hostdata->E_wait_reselect = Ent_wait_reselect;
+ hostdata->E_dsa_code_begin = Ent_dsa_code_begin;
+
+ hostdata->dsa_cmdout = A_dsa_cmdout;
+ hostdata->dsa_cmnd = A_dsa_cmnd;
+ hostdata->dsa_datain = A_dsa_datain;
+ hostdata->dsa_dataout = A_dsa_dataout;
+ hostdata->dsa_end = A_dsa_end;
+ hostdata->dsa_msgin = A_dsa_msgin;
+ hostdata->dsa_msgout = A_dsa_msgout;
+ hostdata->dsa_msgout_other = A_dsa_msgout_other;
+ hostdata->dsa_next = A_dsa_next;
+ hostdata->dsa_select = A_dsa_select;
+ hostdata->dsa_start = Ent_dsa_code_template - Ent_dsa_zero;
+ hostdata->dsa_status = A_dsa_status;
+ hostdata->dsa_jump_dest = Ent_dsa_code_fix_jump - Ent_dsa_zero +
+ 8 /* destination operand */;
+
+ /* sanity check */
+ if (A_dsa_fields_start != Ent_dsa_code_template_end -
+ Ent_dsa_zero)
+ printk("scsi%d : NCR dsa_fields start is %d not %d\n",
+ host->host_no, A_dsa_fields_start, Ent_dsa_code_template_end -
+ Ent_dsa_zero);
+
+ printk("scsi%d : NCR code relocated to 0x%lx (virt 0x%p)\n", host->host_no,
+ virt_to_bus(hostdata->script), hostdata->script);
+}
+
+/*
+ * Function : static int NCR53c8xx_run_tests (struct Scsi_Host *host)
+ *
+ * Purpose : run various verification tests on the NCR chip,
+ * including interrupt generation, and proper bus mastering
+ * operation.
+ *
+ * Inputs : host - a properly initialized Scsi_Host structure
+ *
+ * Preconditions : the NCR chip must be in a halted state.
+ *
+ * Returns : 0 if all tests were successful, -1 on error.
+ *
+ */
+
+static int
+NCR53c8xx_run_tests (struct Scsi_Host *host) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ unsigned long timeout;
+ u32 start;
+ int failed, i;
+ unsigned long flags;
+ NCR53c7x0_local_setup(host);
+
+ /* The NCR chip _must_ be idle to run the test scripts */
+
+ save_flags(flags);
+ cli();
+ if (!hostdata->idle) {
+ printk ("scsi%d : chip not idle, aborting tests\n", host->host_no);
+ restore_flags(flags);
+ return -1;
+ }
+
+ /*
+ * Check for functional interrupts, this could work as an
+ * autoprobe routine.
+ */
+
+ if ((hostdata->options & OPTION_DEBUG_TEST1) &&
+ hostdata->state != STATE_DISABLED) {
+ hostdata->idle = 0;
+ hostdata->test_running = 1;
+ hostdata->test_completed = -1;
+ hostdata->test_dest = 0;
+ hostdata->test_source = 0xdeadbeef;
+ start = virt_to_bus (hostdata->script) + hostdata->E_test_1;
+ hostdata->state = STATE_RUNNING;
+ printk ("scsi%d : test 1", host->host_no);
+ NCR53c7x0_write32 (DSP_REG, start);
+ printk (" started\n");
+ sti();
+
+ /*
+ * This is currently a .5 second timeout, since (in theory) no slow
+ * board will take that long. In practice, we've seen one
+ * pentium which occasionally fails with this, but works with
+ * 10 times as much?
+ */
+
+ timeout = jiffies + 5 * HZ / 10;
+ while ((hostdata->test_completed == -1) && jiffies < timeout)
+ barrier();
+
+ failed = 1;
+ if (hostdata->test_completed == -1)
+ printk ("scsi%d : driver test 1 timed out%s\n",host->host_no ,
+ (hostdata->test_dest == 0xdeadbeef) ?
+ " due to lost interrupt.\n"
+ " Please verify that the correct IRQ is being used for your board,\n"
+ " and that the motherboard IRQ jumpering matches the PCI setup on\n"
+ " PCI systems.\n"
+ " If you are using a NCR53c810 board in a PCI system, you should\n"
+ " also verify that the board is jumpered to use PCI INTA, since\n"
+ " most PCI motherboards lack support for INTB, INTC, and INTD.\n"
+ : "");
+ else if (hostdata->test_completed != 1)
+ printk ("scsi%d : test 1 bad interrupt value (%d)\n",
+ host->host_no, hostdata->test_completed);
+ else
+ failed = (hostdata->test_dest != 0xdeadbeef);
+
+ if (hostdata->test_dest != 0xdeadbeef) {
+ printk ("scsi%d : driver test 1 read 0x%x instead of 0xdeadbeef indicating a\n"
+ " probable cache invalidation problem. Please configure caching\n"
+ " as write-through or disabled\n",
+ host->host_no, hostdata->test_dest);
+ }
+
+ if (failed) {
+ printk ("scsi%d : DSP = 0x%p (script at 0x%p, start at 0x%x)\n",
+ host->host_no, bus_to_virt(NCR53c7x0_read32(DSP_REG)),
+ hostdata->script, start);
+ printk ("scsi%d : DSPS = 0x%x\n", host->host_no,
+ NCR53c7x0_read32(DSPS_REG));
+ restore_flags(flags);
+ return -1;
+ }
+ hostdata->test_running = 0;
+ }
+
+ if ((hostdata->options & OPTION_DEBUG_TEST2) &&
+ hostdata->state != STATE_DISABLED) {
+ u32 dsa[48];
+ unsigned char identify = IDENTIFY(0, 0);
+ unsigned char cmd[6];
+ unsigned char data[36];
+ unsigned char status = 0xff;
+ unsigned char msg = 0xff;
+
+ cmd[0] = INQUIRY;
+ cmd[1] = cmd[2] = cmd[3] = cmd[5] = 0;
+ cmd[4] = sizeof(data);
+
+ dsa[2] = 1;
+ dsa[3] = virt_to_bus(&identify);
+ dsa[4] = 6;
+ dsa[5] = virt_to_bus(&cmd);
+ dsa[6] = sizeof(data);
+ dsa[7] = virt_to_bus(&data);
+ dsa[8] = 1;
+ dsa[9] = virt_to_bus(&status);
+ dsa[10] = 1;
+ dsa[11] = virt_to_bus(&msg);
+
+ for (i = 0; i < 3; ++i) {
+ cli();
+ if (!hostdata->idle) {
+ printk ("scsi%d : chip not idle, aborting tests\n", host->host_no);
+ restore_flags(flags);
+ return -1;
+ }
+
+ /* SCNTL3 SDID */
+ dsa[0] = (0x33 << 24) | (i << 16) ;
+ hostdata->idle = 0;
+ hostdata->test_running = 2;
+ hostdata->test_completed = -1;
+ start = virt_to_bus(hostdata->script) + hostdata->E_test_2;
+ hostdata->state = STATE_RUNNING;
+ NCR53c7x0_write32 (DSA_REG, virt_to_bus(dsa));
+ NCR53c7x0_write32 (DSP_REG, start);
+ sti();
+
+ timeout = jiffies + 5 * HZ; /* arbitrary */
+ while ((hostdata->test_completed == -1) && jiffies < timeout)
+ barrier();
+ NCR53c7x0_write32 (DSA_REG, 0);
+
+ if (hostdata->test_completed == 2) {
+ data[35] = 0;
+ printk ("scsi%d : test 2 INQUIRY to target %d, lun 0 : %s\n",
+ host->host_no, i, data + 8);
+ printk ("scsi%d : status ", host->host_no);
+ print_status (status);
+ printk ("\nscsi%d : message ", host->host_no);
+ print_msg (&msg);
+ printk ("\n");
+ } else if (hostdata->test_completed == 3) {
+ printk("scsi%d : test 2 no connection with target %d\n",
+ host->host_no, i);
+ if (!hostdata->idle) {
+ printk("scsi%d : not idle\n", host->host_no);
+ restore_flags(flags);
+ return -1;
+ }
+ } else if (hostdata->test_completed == -1) {
+ printk ("scsi%d : test 2 timed out\n", host->host_no);
+ restore_flags(flags);
+ return -1;
+ }
+ hostdata->test_running = 0;
+ }
+ }
+
+ restore_flags(flags);
+ return 0;
+}
+
+/*
+ * Function : static void NCR53c8xx_dsa_fixup (struct NCR53c7x0_cmd *cmd)
+ *
+ * Purpose : copy the NCR53c8xx dsa structure into cmd's dsa buffer,
+ * performing all necessary relocation.
+ *
+ * Inputs : cmd, a NCR53c7x0_cmd structure with a dsa area large
+ * enough to hold the NCR53c8xx dsa.
+ */
+
+static void
+NCR53c8xx_dsa_fixup (struct NCR53c7x0_cmd *cmd) {
+ Scsi_Cmnd *c = cmd->cmd;
+ struct Scsi_Host *host = c->host;
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ int i;
+
+ memcpy (cmd->dsa, hostdata->script + (hostdata->E_dsa_code_template / 4),
+ hostdata->E_dsa_code_template_end - hostdata->E_dsa_code_template);
+
+ /*
+ * Note : within the NCR 'C' code, dsa points to the _start_
+ * of the DSA structure, and _not_ the offset of dsa_zero within
+ * that structure used to facilitate shorter signed offsets
+ * for the 8 bit ALU.
+ *
+ * The implications of this are that
+ *
+ * - 32 bit A_dsa_* absolute values require an additional
+ * dsa_zero added to their value to be correct, since they are
+ * relative to dsa_zero which is in essentially a separate
+ * space from the code symbols.
+ *
+ * - All other symbols require no special treatment.
+ */
+
+ patch_abs_tci_data (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
+ dsa_temp_lun, c->lun);
+ patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
+ dsa_temp_addr_next, virt_to_bus(&cmd->dsa_next_addr));
+ patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
+ dsa_temp_next, virt_to_bus(cmd->dsa) + Ent_dsa_zero -
+ Ent_dsa_code_template + A_dsa_next);
+ patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
+ dsa_temp_sync, virt_to_bus((void *)hostdata->sync[c->target].script));
+ patch_abs_tci_data (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
+ dsa_temp_target, c->target);
+ /* XXX - new pointer stuff */
+ patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
+ dsa_temp_addr_saved_pointer, virt_to_bus(&cmd->saved_data_pointer));
+ patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
+ dsa_temp_addr_saved_residual, virt_to_bus(&cmd->saved_residual));
+ patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
+ dsa_temp_addr_residual, virt_to_bus(&cmd->residual));
+
+ /* XXX - new start stuff */
+ patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
+ dsa_temp_addr_dsa_value, virt_to_bus(&cmd->dsa_addr));
+
+}
+
+/*
+ * Function : run_process_issue_queue (void)
+ *
+ * Purpose : insure that the coroutine is running and will process our
+ * request. process_issue_queue_running is checked/set here (in an
+ * inline function) rather than in process_issue_queue itself to reduce
+ * the chances of stack overflow.
+ *
+ */
+
+static volatile int process_issue_queue_running = 0;
+
+static __inline__ void
+run_process_issue_queue(void) {
+ unsigned long flags;
+ save_flags (flags);
+ cli();
+ if (!process_issue_queue_running) {
+ process_issue_queue_running = 1;
+ process_issue_queue(flags);
+ /*
+ * process_issue_queue_running is cleared in process_issue_queue
+ * once it can't do more work, and process_issue_queue exits with
+ * interrupts disabled.
+ */
+ }
+ restore_flags (flags);
+}
+
+/*
+ * Function : static void abnormal_finished (struct NCR53c7x0_cmd *cmd, int
+ * result)
+ *
+ * Purpose : mark SCSI command as finished, OR'ing the host portion
+ * of the result word into the result field of the corresponding
+ * Scsi_Cmnd structure, and removing it from the internal queues.
+ *
+ * Inputs : cmd - command, result - entire result field
+ *
+ * Preconditions : the NCR chip should be in a halted state when
+ * abnormal_finished is run, since it modifies structures which
+ * the NCR expects to have exclusive access to.
+ */
+
+static void
+abnormal_finished (struct NCR53c7x0_cmd *cmd, int result) {
+ Scsi_Cmnd *c = cmd->cmd;
+ struct Scsi_Host *host = c->host;
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ unsigned long flags;
+ int left, found;
+ volatile struct NCR53c7x0_cmd * linux_search;
+ volatile struct NCR53c7x0_cmd * volatile *linux_prev;
+ volatile u32 *ncr_prev, *current, ncr_search;
+
+#if 0
+ printk ("scsi%d: abnormal finished\n", host->host_no);
+#endif
+
+ save_flags(flags);
+ cli();
+ found = 0;
+ /*
+ * Traverse the NCR issue array until we find a match or run out
+ * of instructions. Instructions in the NCR issue array are
+ * either JUMP or NOP instructions, which are 2 words in length.
+ */
+
+
+ for (found = 0, left = host->can_queue, current = hostdata->schedule;
+ left > 0; --left, current += 2)
+ {
+ if (issue_to_cmd (host, hostdata, (u32 *) current) == cmd)
+ {
+ current[0] = hostdata->NOP_insn;
+ current[1] = 0xdeadbeef;
+ ++found;
+ break;
+ }
+ }
+
+ /*
+ * Traverse the NCR reconnect list of DSA structures until we find
+ * a pointer to this dsa or have found too many command structures.
+ * We let prev point at the next field of the previous element or
+ * head of the list, so we don't do anything different for removing
+ * the head element.
+ */
+
+ for (left = host->can_queue,
+ ncr_search = hostdata->reconnect_dsa_head,
+ ncr_prev = &hostdata->reconnect_dsa_head;
+ left >= 0 && ncr_search &&
+ ((char*)bus_to_virt(ncr_search) + hostdata->dsa_start)
+ != (char *) cmd->dsa;
+ ncr_prev = (u32*) ((char*)bus_to_virt(ncr_search) +
+ hostdata->dsa_next), ncr_search = *ncr_prev, --left);
+
+ if (left < 0)
+ printk("scsi%d: loop detected in ncr reconnect list\n",
+ host->host_no);
+ else if (ncr_search)
+ if (found)
+ printk("scsi%d: scsi %ld in ncr issue array and reconnect lists\n",
+ host->host_no, c->pid);
+ else {
+ volatile u32 * next = (u32 *)
+ ((char *)bus_to_virt(ncr_search) + hostdata->dsa_next);
+ *ncr_prev = *next;
+/* If we're at the tail end of the issue queue, update that pointer too. */
+ found = 1;
+ }
+
+ /*
+ * Traverse the host running list until we find this command or discover
+ * we have too many elements, pointing linux_prev at the next field of the
+ * linux_previous element or head of the list, search at this element.
+ */
+
+ for (left = host->can_queue, linux_search = hostdata->running_list,
+ linux_prev = &hostdata->running_list;
+ left >= 0 && linux_search && linux_search != cmd;
+ linux_prev = &(linux_search->next),
+ linux_search = linux_search->next, --left);
+
+ if (left < 0)
+ printk ("scsi%d: loop detected in host running list for scsi pid %ld\n",
+ host->host_no, c->pid);
+ else if (linux_search) {
+ *linux_prev = linux_search->next;
+ --hostdata->busy[c->target][c->lun];
+ }
+
+ /* Return the NCR command structure to the free list */
+ cmd->next = hostdata->free;
+ hostdata->free = cmd;
+ c->host_scribble = NULL;
+
+ /* And return */
+ c->result = result;
+ c->scsi_done(c);
+
+ restore_flags(flags);
+ run_process_issue_queue();
+}
+
+/*
+ * Function : static void intr_break (struct Scsi_Host *host,
+ * struct NCR53c7x0_cmd *cmd)
+ *
+ * Purpose : Handler for breakpoint interrupts from a SCSI script
+ *
+ * Inputs : host - pointer to this host adapter's structure,
+ * cmd - pointer to the command (if any) dsa was pointing
+ * to.
+ *
+ */
+
+static void
+intr_break (struct Scsi_Host *host, struct
+ NCR53c7x0_cmd *cmd) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_break *bp;
+#if 0
+ Scsi_Cmnd *c = cmd ? cmd->cmd : NULL;
+#endif
+ u32 *dsp;
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ unsigned long flags;
+ NCR53c7x0_local_setup(host);
+
+ /*
+ * Find the break point corresponding to this address, and
+ * dump the appropriate debugging information to standard
+ * output.
+ */
+ save_flags(flags);
+ cli();
+ dsp = (u32 *) bus_to_virt(NCR53c7x0_read32(DSP_REG));
+ for (bp = hostdata->breakpoints; bp && bp->address != dsp;
+ bp = bp->next);
+ if (!bp)
+ panic("scsi%d : break point interrupt from %p with no breakpoint!",
+ host->host_no, dsp);
+
+ /*
+ * Configure the NCR chip for manual start mode, so that we can
+ * point the DSP register at the instruction that follows the
+ * INT int_debug_break instruction.
+ */
+
+ NCR53c7x0_write8 (hostdata->dmode,
+ NCR53c7x0_read8(hostdata->dmode)|DMODE_MAN);
+
+ /*
+ * And update the DSP register, using the size of the old
+ * instruction in bytes.
+ */
+
+ restore_flags(flags);
+}
+/*
+ * Function : static void print_synchronous (const char *prefix,
+ * const unsigned char *msg)
+ *
+ * Purpose : print a pretty, user and machine parsable representation
+ * of a SDTR message, including the "real" parameters, data
+ * clock so we can tell transfer rate at a glance.
+ *
+ * Inputs ; prefix - text to prepend, msg - SDTR message (5 bytes)
+ */
+
+static void
+print_synchronous (const char *prefix, const unsigned char *msg) {
+ if (msg[4]) {
+ int Hz = 1000000000 / (msg[3] * 4);
+ int integer = Hz / 1000000;
+ int fraction = (Hz - (integer * 1000000)) / 10000;
+ printk ("%speriod %dns offset %d %d.%02dMHz %s SCSI%s\n",
+ prefix, (int) msg[3] * 4, (int) msg[4], integer, fraction,
+ (((msg[3] * 4) < 200) ? "FAST" : "synchronous"),
+ (((msg[3] * 4) < 200) ? "-II" : ""));
+ } else
+ printk ("%sasynchronous SCSI\n", prefix);
+}
+
+/*
+ * Function : static void set_synchronous (struct Scsi_Host *host,
+ * int target, int sxfer, int scntl3, int now_connected)
+ *
+ * Purpose : reprogram transfers between the selected SCSI initiator and
+ * target with the given register values; in the indirect
+ * select operand, reselection script, and chip registers.
+ *
+ * Inputs : host - NCR53c7,8xx SCSI host, target - number SCSI target id,
+ * sxfer and scntl3 - NCR registers. now_connected - if non-zero,
+ * we should reprogram the registers now too.
+ */
+
+static void
+set_synchronous (struct Scsi_Host *host, int target, int sxfer, int scntl3,
+ int now_connected) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ u32 *script;
+ NCR53c7x0_local_setup(host);
+
+ /* These are eight bit registers */
+ sxfer &= 0xff;
+ scntl3 &= 0xff;
+
+ hostdata->sync[target].sxfer_sanity = sxfer;
+ hostdata->sync[target].scntl3_sanity = scntl3;
+
+/*
+ * HARD CODED : synchronous script is EIGHT words long. This
+ * must agree with 53c7.8xx.h
+ */
+
+ if ((hostdata->chip != 700) && (hostdata->chip != 70066)) {
+ hostdata->sync[target].select_indirect = (scntl3 << 24) |
+ (target << 16) | (sxfer << 8);
+
+ script = (u32 *) hostdata->sync[target].script;
+
+ /* XXX - add NCR53c7x0 code to reprogram SCF bits if we want to */
+ if ((hostdata->chip / 100) == 8) {
+ script[0] = ((DCMD_TYPE_RWRI | DCMD_RWRI_OPC_MODIFY |
+ DCMD_RWRI_OP_MOVE) << 24) |
+ (SCNTL3_REG_800 << 16) | (scntl3 << 8);
+ script[1] = 0;
+ script += 2;
+ }
+
+ script[0] = ((DCMD_TYPE_RWRI | DCMD_RWRI_OPC_MODIFY |
+ DCMD_RWRI_OP_MOVE) << 24) |
+ (SXFER_REG << 16) | (sxfer << 8);
+ script[1] = 0;
+ script += 2;
+
+#ifdef DEBUG_SYNC_INTR
+ if (hostdata->options & OPTION_DEBUG_DISCONNECT) {
+ script[0] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_INT) << 24) | DBC_TCI_TRUE;
+ script[1] = DEBUG_SYNC_INTR;
+ script += 2;
+ }
+#endif
+
+ script[0] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_RETURN) << 24) | DBC_TCI_TRUE;
+ script[1] = 0;
+ script += 2;
+ }
+
+ if (hostdata->options & OPTION_DEBUG_SYNCHRONOUS)
+ printk ("scsi%d : target %d sync parameters are sxfer=0x%x, scntl3=0x%x\n",
+ host->host_no, target, sxfer, scntl3);
+
+ if (now_connected) {
+ if ((hostdata->chip / 100) == 8)
+ NCR53c7x0_write8(SCNTL3_REG_800, scntl3);
+ NCR53c7x0_write8(SXFER_REG, sxfer);
+ }
+}
+
+
+/*
+ * Function : static int asynchronous (struct Scsi_Host *host, int target)
+ *
+ * Purpose : reprogram between the selected SCSI Host adapter and target
+ * (assumed to be currently connected) for asynchronous transfers.
+ *
+ * Inputs : host - SCSI host structure, target - numeric target ID.
+ *
+ * Preconditions : the NCR chip should be in one of the halted states
+ */
+
+static void
+asynchronous (struct Scsi_Host *host, int target) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ NCR53c7x0_local_setup(host);
+ set_synchronous (host, target, /* no offset */ 0, hostdata->saved_scntl3,
+ 1);
+ printk ("scsi%d : setting target %d to asynchronous SCSI\n",
+ host->host_no, target);
+}
+
+/*
+ * XXX - do we want to go out of our way (ie, add extra code to selection
+ * in the NCR53c710/NCR53c720 script) to reprogram the synchronous
+ * conversion bits, or can we be content in just setting the
+ * sxfer bits?
+ */
+
+/* Table for NCR53c8xx synchronous values */
+static const struct {
+ int div; /* Total clock divisor * 10 */
+ unsigned char scf; /* */
+ unsigned char tp; /* 4 + tp = xferp divisor */
+} syncs[] = {
+/* div scf tp div scf tp div scf tp */
+ { 40, 1, 0}, { 50, 1, 1}, { 60, 1, 2},
+ { 70, 1, 3}, { 75, 2, 1}, { 80, 1, 4},
+ { 90, 1, 5}, { 100, 1, 6}, { 105, 2, 3},
+ { 110, 1, 7}, { 120, 2, 4}, { 135, 2, 5},
+ { 140, 3, 3}, { 150, 2, 6}, { 160, 3, 4},
+ { 165, 2, 7}, { 180, 3, 5}, { 200, 3, 6},
+ { 210, 4, 3}, { 220, 3, 7}, { 240, 4, 4},
+ { 270, 4, 5}, { 300, 4, 6}, { 330, 4, 7}
+};
+
+/*
+ * Function : static void synchronous (struct Scsi_Host *host, int target,
+ * char *msg)
+ *
+ * Purpose : reprogram transfers between the selected SCSI initiator and
+ * target for synchronous SCSI transfers such that the synchronous
+ * offset is less than that requested and period at least as long
+ * as that requested. Also modify *msg such that it contains
+ * an appropriate response.
+ *
+ * Inputs : host - NCR53c7,8xx SCSI host, target - number SCSI target id,
+ * msg - synchronous transfer request.
+ */
+
+
+static void
+synchronous (struct Scsi_Host *host, int target, char *msg) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ int desire, divisor, i, limit;
+ unsigned char scntl3, sxfer;
+/* The diagnostic message fits on one line, even with max. width integers */
+ char buf[80];
+
+/* Desired transfer clock in Hz */
+ desire = 1000000000L / (msg[3] * 4);
+/* Scale the available SCSI clock by 10 so we get tenths */
+ divisor = (hostdata->scsi_clock * 10) / desire;
+
+/* NCR chips can handle at most an offset of 8 */
+ if (msg[4] > 8)
+ msg[4] = 8;
+
+ if (hostdata->options & OPTION_DEBUG_SDTR)
+ printk("scsi%d : optimal synchronous divisor of %d.%01d\n",
+ host->host_no, divisor / 10, divisor % 10);
+
+ limit = (sizeof(syncs) / sizeof(syncs[0]) -1);
+ for (i = 0; (i < limit) && (divisor > syncs[i].div); ++i);
+
+ if (hostdata->options & OPTION_DEBUG_SDTR)
+ printk("scsi%d : selected synchronous divisor of %d.%01d\n",
+ host->host_no, syncs[i].div / 10, syncs[i].div % 10);
+
+ msg[3] = ((1000000000L / hostdata->scsi_clock) * syncs[i].div / 10 / 4);
+
+ if (hostdata->options & OPTION_DEBUG_SDTR)
+ printk("scsi%d : selected synchronous period of %dns\n", host->host_no,
+ msg[3] * 4);
+
+ scntl3 = (hostdata->chip / 100 == 8) ? ((hostdata->saved_scntl3 &
+ ~SCNTL3_800_SCF_MASK) | (syncs[i].scf << SCNTL3_800_SCF_SHIFT)) : 0;
+ sxfer = (msg[4] << SXFER_MO_SHIFT) | ((syncs[i].tp) << SXFER_TP_SHIFT);
+ if (hostdata->options & OPTION_DEBUG_SDTR)
+ printk ("scsi%d : sxfer=0x%x scntl3=0x%x\n",
+ host->host_no, (int) sxfer, (int) scntl3);
+ set_synchronous (host, target, sxfer, scntl3, 1);
+ sprintf (buf, "scsi%d : setting target %d to ", host->host_no, target);
+ print_synchronous (buf, msg);
+}
+
+/*
+ * Function : static int NCR53c8x0_dstat_sir_intr (struct Scsi_Host *host,
+ * struct NCR53c7x0_cmd *cmd)
+ *
+ * Purpose : Handler for INT generated instructions for the
+ * NCR53c810/820 SCSI SCRIPT
+ *
+ * Inputs : host - pointer to this host adapter's structure,
+ * cmd - pointer to the command (if any) dsa was pointing
+ * to.
+ *
+ */
+
+static int
+NCR53c8x0_dstat_sir_intr (struct Scsi_Host *host, struct
+ NCR53c7x0_cmd *cmd) {
+ NCR53c7x0_local_declare();
+ int print;
+ Scsi_Cmnd *c = cmd ? cmd->cmd : NULL;
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ u32 dsps,*dsp; /* Argument of the INT instruction */
+ NCR53c7x0_local_setup(host);
+ dsps = NCR53c7x0_read32(DSPS_REG);
+ dsp = (u32 *) bus_to_virt(NCR53c7x0_read32(DSP_REG));
+
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk ("scsi%d : DSPS = 0x%x\n", host->host_no, dsps);
+
+ switch (dsps) {
+ case A_int_msg_1:
+ print = 1;
+ switch (hostdata->msg_buf[0]) {
+ /*
+ * Unless we've initiated synchronous negotiation, I don't
+ * think that this should happen.
+ */
+ case MESSAGE_REJECT:
+ hostdata->dsp = hostdata->script + hostdata->E_accept_message /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ if (cmd && (cmd->flags & CMD_FLAG_SDTR)) {
+ printk ("scsi%d : target %d rejected SDTR\n", host->host_no,
+ c->target);
+ cmd->flags &= ~CMD_FLAG_SDTR;
+ asynchronous (host, c->target);
+ print = 0;
+ }
+ break;
+ case INITIATE_RECOVERY:
+ printk ("scsi%d : extended contingent allegiance not supported yet, rejecting\n",
+ host->host_no);
+ /* Fall through to default */
+ hostdata->dsp = hostdata->script + hostdata->E_reject_message /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ break;
+ default:
+ printk ("scsi%d : unsupported message, rejecting\n",
+ host->host_no);
+ hostdata->dsp = hostdata->script + hostdata->E_reject_message /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ }
+ if (print) {
+ printk ("scsi%d : received message", host->host_no);
+ if (c)
+ printk (" from target %d lun %d ", c->target, c->lun);
+ print_msg ((unsigned char *) hostdata->msg_buf);
+ printk("\n");
+ }
+
+ return SPECIFIC_INT_NOTHING;
+
+
+ case A_int_msg_sdtr:
+/*
+ * At this point, hostdata->msg_buf contains
+ * 0 EXTENDED MESSAGE
+ * 1 length
+ * 2 SDTR
+ * 3 period * 4ns
+ * 4 offset
+ */
+
+ if (cmd) {
+ char buf[80];
+ sprintf (buf, "scsi%d : target %d %s ", host->host_no, c->target,
+ (cmd->flags & CMD_FLAG_SDTR) ? "accepting" : "requesting");
+ print_synchronous (buf, (unsigned char *) hostdata->msg_buf);
+
+ /*
+ * Initiator initiated, won't happen unless synchronous
+ * transfers are enabled. If we get a SDTR message in
+ * response to our SDTR, we should program our parameters
+ * such that
+ * offset <= requested offset
+ * period >= requested period
+ */
+ if (cmd->flags & CMD_FLAG_SDTR) {
+ cmd->flags &= ~CMD_FLAG_SDTR;
+ if (hostdata->msg_buf[4])
+ synchronous (host, c->target, (unsigned char *)
+ hostdata->msg_buf);
+ else
+ asynchronous (host, c->target);
+ hostdata->dsp = hostdata->script + hostdata->E_accept_message /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ return SPECIFIC_INT_NOTHING;
+ } else {
+ if (hostdata->options & OPTION_SYNCHRONOUS) {
+ cmd->flags |= CMD_FLAG_DID_SDTR;
+ synchronous (host, c->target, (unsigned char *)
+ hostdata->msg_buf);
+ } else {
+ hostdata->msg_buf[4] = 0; /* 0 offset = async */
+ asynchronous (host, c->target);
+ }
+ patch_dsa_32 (cmd->dsa, dsa_msgout_other, 0, 5);
+ patch_dsa_32 (cmd->dsa, dsa_msgout_other, 1, (u32)
+ virt_to_bus ((void *)&hostdata->msg_buf));
+ hostdata->dsp = hostdata->script +
+ hostdata->E_respond_message / sizeof(u32);
+ hostdata->dsp_changed = 1;
+ }
+ return SPECIFIC_INT_NOTHING;
+ }
+ /* Fall through to abort if we couldn't find a cmd, and
+ therefore a dsa structure to twiddle */
+ case A_int_msg_wdtr:
+ hostdata->dsp = hostdata->script + hostdata->E_reject_message /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ return SPECIFIC_INT_NOTHING;
+ case A_int_err_unexpected_phase:
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk ("scsi%d : unexpected phase\n", host->host_no);
+ return SPECIFIC_INT_ABORT;
+ case A_int_err_selected:
+ printk ("scsi%d : selected by target %d\n", host->host_no,
+ (int) NCR53c7x0_read8(SDID_REG_800) &7);
+ hostdata->dsp = hostdata->script + hostdata->E_target_abort /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ return SPECIFIC_INT_NOTHING;
+ case A_int_err_unexpected_reselect:
+ printk ("scsi%d : unexpected reselect by target %d lun %d\n",
+ host->host_no, (int) NCR53c7x0_read8(SDID_REG_800) & 7,
+ hostdata->reselected_identify & 7);
+ hostdata->dsp = hostdata->script + hostdata->E_initiator_abort /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ return SPECIFIC_INT_NOTHING;
+/*
+ * Since contingent allegiance conditions are cleared by the next
+ * command issued to a target, we must issue a REQUEST SENSE
+ * command after receiving a CHECK CONDITION status, before
+ * another command is issued.
+ *
+ * Since this NCR53c7x0_cmd will be freed after use, we don't
+ * care if we step on the various fields, so modify a few things.
+ */
+ case A_int_err_check_condition:
+#if 0
+ if (hostdata->options & OPTION_DEBUG_INTR)
+#endif
+ printk ("scsi%d : CHECK CONDITION\n", host->host_no);
+ if (!c) {
+ printk("scsi%d : CHECK CONDITION with no SCSI command\n",
+ host->host_no);
+ return SPECIFIC_INT_PANIC;
+ }
+
+ /*
+ * FIXME : this uses the normal one-byte selection message.
+ * We may want to renegotiate for synchronous & WIDE transfers
+ * since these could be the crux of our problem.
+ *
+ hostdata->NOP_insn* FIXME : once SCSI-II tagged queuing is implemented, we'll
+ * have to set this up so that the rest of the DSA
+ * agrees with this being an untagged queue'd command.
+ */
+
+ patch_dsa_32 (cmd->dsa, dsa_msgout, 0, 1);
+
+ /*
+ * Modify the table indirect for COMMAND OUT phase, since
+ * Request Sense is a six byte command.
+ */
+
+ patch_dsa_32 (cmd->dsa, dsa_cmdout, 0, 6);
+
+ c->cmnd[0] = REQUEST_SENSE;
+ c->cmnd[1] &= 0xe0; /* Zero all but LUN */
+ c->cmnd[2] = 0;
+ c->cmnd[3] = 0;
+ c->cmnd[4] = sizeof(c->sense_buffer);
+ c->cmnd[5] = 0;
+
+ /*
+ * Disable dataout phase, and program datain to transfer to the
+ * sense buffer, and add a jump to other_transfer after the
+ * command so overflow/underrun conditions are detected.
+ */
+
+ patch_dsa_32 (cmd->dsa, dsa_dataout, 0,
+ virt_to_bus(hostdata->script) + hostdata->E_other_transfer);
+ patch_dsa_32 (cmd->dsa, dsa_datain, 0,
+ virt_to_bus(cmd->data_transfer_start));
+ cmd->data_transfer_start[0] = (((DCMD_TYPE_BMI | DCMD_BMI_OP_MOVE_I |
+ DCMD_BMI_IO)) << 24) | sizeof(c->sense_buffer);
+ cmd->data_transfer_start[1] = (u32) virt_to_bus(c->sense_buffer);
+
+ cmd->data_transfer_start[2] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_JUMP)
+ << 24) | DBC_TCI_TRUE;
+ cmd->data_transfer_start[3] = (u32) virt_to_bus(hostdata->script) +
+ hostdata->E_other_transfer;
+
+ /*
+ * Currently, this command is flagged as completed, ie
+ * it has valid status and message data. Reflag it as
+ * incomplete. Q - need to do something so that original
+ * status, etc are used.
+ */
+
+ cmd->cmd->result = 0xffff;
+
+ /*
+ * Restart command as a REQUEST SENSE.
+ */
+ hostdata->dsp = (u32 *) hostdata->script + hostdata->E_select /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ return SPECIFIC_INT_NOTHING;
+ case A_int_debug_break:
+ return SPECIFIC_INT_BREAK;
+ case A_int_norm_aborted:
+ hostdata->dsp = (u32 *) hostdata->schedule;
+ hostdata->dsp_changed = 1;
+ if (cmd)
+ abnormal_finished (cmd, DID_ERROR << 16);
+ return SPECIFIC_INT_NOTHING;
+ case A_int_test_1:
+ case A_int_test_2:
+ hostdata->idle = 1;
+ hostdata->test_completed = (dsps - A_int_test_1) / 0x00010000 + 1;
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk("scsi%d : test%d complete\n", host->host_no,
+ hostdata->test_completed);
+ return SPECIFIC_INT_NOTHING;
+#ifdef A_int_debug_reselected_ok
+ case A_int_debug_reselected_ok:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
+ OPTION_DEBUG_DISCONNECT)) {
+ /*
+ * Note - this dsa is not based on location relative to
+ * the command structure, but to location relative to the
+ * DSA register
+ */
+ u32 *dsa;
+ dsa = (u32 *) bus_to_virt (NCR53c7x0_read32(DSA_REG));
+
+ printk("scsi%d : reselected_ok (DSA = 0x%x (virt 0x%p)\n",
+ host->host_no, NCR53c7x0_read32(DSA_REG), dsa);
+ printk("scsi%d : resume address is 0x%x (virt 0x%p)\n",
+ host->host_no, cmd->saved_data_pointer,
+ bus_to_virt(cmd->saved_data_pointer));
+ print_insn (host, hostdata->script + Ent_reselected_ok /
+ sizeof(u32), "", 1);
+ printk ("scsi%d : sxfer=0x%x, scntl3=0x%x\n",
+ host->host_no, NCR53c7x0_read8(SXFER_REG),
+ NCR53c7x0_read8(SCNTL3_REG_800));
+ if (c) {
+ print_insn (host, (u32 *)
+ hostdata->sync[c->target].script, "", 1);
+ print_insn (host, (u32 *)
+ hostdata->sync[c->target].script + 2, "", 1);
+ }
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_reselect_check
+ case A_int_debug_reselect_check:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
+ u32 *dsa;
+#if 0
+ u32 *code;
+#endif
+ /*
+ * Note - this dsa is not based on location relative to
+ * the command structure, but to location relative to the
+ * DSA register
+ */
+ dsa = bus_to_virt (NCR53c7x0_read32(DSA_REG));
+ printk("scsi%d : reselected_check_next (DSA = 0x%lx (virt 0x%p))\n",
+ host->host_no, virt_to_bus(dsa), dsa);
+ if (dsa) {
+ printk("scsi%d : resume address is 0x%x (virt 0x%p)\n",
+ host->host_no, cmd->saved_data_pointer,
+ bus_to_virt (cmd->saved_data_pointer));
+#if 0
+ printk("scsi%d : template code :\n", host->host_no);
+ for (code = dsa + (Ent_dsa_code_check_reselect - Ent_dsa_zero)
+ / sizeof(u32); code < (dsa + Ent_dsa_zero / sizeof(u32));
+ code += print_insn (host, code, "", 1));
+#endif
+ }
+ print_insn (host, hostdata->script + Ent_reselected_ok /
+ sizeof(u32), "", 1);
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_dsa_schedule
+ case A_int_debug_dsa_schedule:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
+ u32 *dsa;
+ /*
+ * Note - this dsa is not based on location relative to
+ * the command structure, but to location relative to the
+ * DSA register
+ */
+ dsa = (u32 *) bus_to_virt (NCR53c7x0_read32(DSA_REG));
+ printk("scsi%d : dsa_schedule (old DSA = 0x%lx (virt 0x%p))\n",
+ host->host_no, virt_to_bus(dsa), dsa);
+ if (dsa)
+ printk("scsi%d : resume address is 0x%x (virt 0x%p)\n"
+ " (temp was 0x%x (virt 0x%p))\n",
+ host->host_no, cmd->saved_data_pointer,
+ bus_to_virt (cmd->saved_data_pointer),
+ NCR53c7x0_read32 (TEMP_REG),
+ bus_to_virt (NCR53c7x0_read32(TEMP_REG)));
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_scheduled
+ case A_int_debug_scheduled:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
+ printk("scsi%d : new I/O 0x%x (virt 0x%p) scheduled\n",
+ host->host_no, NCR53c7x0_read32(DSA_REG),
+ bus_to_virt(NCR53c7x0_read32(DSA_REG)));
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_idle
+ case A_int_debug_idle:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
+ printk("scsi%d : idle\n", host->host_no);
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_cmd
+ case A_int_debug_cmd:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
+ printk("scsi%d : command sent\n");
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_dsa_loaded
+ case A_int_debug_dsa_loaded:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
+ printk("scsi%d : DSA loaded with 0x%x (virt 0x%p)\n", host->host_no,
+ NCR53c7x0_read32(DSA_REG),
+ bus_to_virt(NCR53c7x0_read32(DSA_REG)));
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_reselected
+ case A_int_debug_reselected:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
+ OPTION_DEBUG_DISCONNECT)) {
+ printk("scsi%d : reselected by target %d lun %d\n",
+ host->host_no, (int) NCR53c7x0_read8(SDID_REG_800) & ~0x80,
+ (int) hostdata->reselected_identify & 7);
+ print_queues(host);
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_disconnect_msg
+ case A_int_debug_disconnect_msg:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
+ if (c)
+ printk("scsi%d : target %d lun %d disconnecting\n",
+ host->host_no, c->target, c->lun);
+ else
+ printk("scsi%d : unknown target disconnecting\n",
+ host->host_no);
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_disconnected
+ case A_int_debug_disconnected:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
+ OPTION_DEBUG_DISCONNECT)) {
+ printk ("scsi%d : disconnected, new queues are\n",
+ host->host_no);
+ print_queues(host);
+#if 0
+ printk ("scsi%d : sxfer=0x%x, scntl3=0x%x\n",
+ host->host_no, NCR53c7x0_read8(SXFER_REG),
+ NCR53c7x0_read8(SCNTL3_REG_800));
+#endif
+ if (c) {
+ print_insn (host, (u32 *)
+ hostdata->sync[c->target].script, "", 1);
+ print_insn (host, (u32 *)
+ hostdata->sync[c->target].script + 2, "", 1);
+ }
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_panic
+ case A_int_debug_panic:
+ printk("scsi%d : int_debug_panic received\n", host->host_no);
+ print_lots (host);
+ return SPECIFIC_INT_PANIC;
+#endif
+#ifdef A_int_debug_saved
+ case A_int_debug_saved:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
+ OPTION_DEBUG_DISCONNECT)) {
+ printk ("scsi%d : saved data pointer 0x%x (virt 0x%p)\n",
+ host->host_no, cmd->saved_data_pointer,
+ bus_to_virt (cmd->saved_data_pointer));
+ print_progress (c);
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_restored
+ case A_int_debug_restored:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
+ OPTION_DEBUG_DISCONNECT)) {
+ if (cmd) {
+ int size;
+ printk ("scsi%d : restored data pointer 0x%x (virt 0x%p)\n",
+ host->host_no, cmd->saved_data_pointer, bus_to_virt (
+ cmd->saved_data_pointer));
+ size = print_insn (host, (u32 *)
+ bus_to_virt(cmd->saved_data_pointer), "", 1);
+ size = print_insn (host, (u32 *)
+ bus_to_virt(cmd->saved_data_pointer) + size, "", 1);
+ print_progress (c);
+ }
+#if 0
+ printk ("scsi%d : datapath residual %d\n",
+ host->host_no, datapath_residual (host)) ;
+#endif
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_sync
+ case A_int_debug_sync:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
+ OPTION_DEBUG_DISCONNECT|OPTION_DEBUG_SDTR)) {
+ unsigned char sxfer = NCR53c7x0_read8 (SXFER_REG),
+ scntl3 = NCR53c7x0_read8 (SCNTL3_REG_800);
+ if (c) {
+ if (sxfer != hostdata->sync[c->target].sxfer_sanity ||
+ scntl3 != hostdata->sync[c->target].scntl3_sanity) {
+ printk ("scsi%d : sync sanity check failed sxfer=0x%x, scntl3=0x%x",
+ host->host_no, sxfer, scntl3);
+ NCR53c7x0_write8 (SXFER_REG, sxfer);
+ NCR53c7x0_write8 (SCNTL3_REG_800, scntl3);
+ }
+ } else
+ printk ("scsi%d : unknown command sxfer=0x%x, scntl3=0x%x\n",
+ host->host_no, (int) sxfer, (int) scntl3);
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+#ifdef A_int_debug_datain
+ case A_int_debug_datain:
+ if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
+ OPTION_DEBUG_DISCONNECT|OPTION_DEBUG_SDTR)) {
+ int size;
+ printk ("scsi%d : In do_datain (%s) sxfer=0x%x, scntl3=0x%x\n"
+ " datapath residual=%d\n",
+ host->host_no, sbcl_to_phase (NCR53c7x0_read8 (SBCL_REG)),
+ (int) NCR53c7x0_read8(SXFER_REG),
+ (int) NCR53c7x0_read8(SCNTL3_REG_800),
+ datapath_residual (host)) ;
+ print_insn (host, dsp, "", 1);
+ size = print_insn (host, (u32 *) bus_to_virt(dsp[1]), "", 1);
+ print_insn (host, (u32 *) bus_to_virt(dsp[1]) + size, "", 1);
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+/*
+ * FIXME : for 7xx support, we need to read SDID_REG_700 and handle
+ * the comparison as bitfielded, not binary.
+ */
+#ifdef A_int_debug_check_dsa
+ case A_int_debug_check_dsa:
+ if (NCR53c7x0_read8 (SCNTL1_REG) & SCNTL1_CON) {
+ int sdid = NCR53c7x0_read8 (SDID_REG_800) & 15;
+ char *where = dsp - NCR53c7x0_insn_size(NCR53c7x0_read8
+ (DCMD_REG)) == hostdata->script +
+ Ent_select_check_dsa / sizeof(u32) ?
+ "selection" : "reselection";
+ if (c && sdid != c->target) {
+ printk ("scsi%d : SDID target %d != DSA target %d at %s\n",
+ host->host_no, sdid, c->target, where);
+ print_lots(host);
+ dump_events (host, 20);
+ return SPECIFIC_INT_PANIC;
+ }
+ }
+ return SPECIFIC_INT_RESTART;
+#endif
+ default:
+ if ((dsps & 0xff000000) == 0x03000000) {
+ printk ("scsi%d : misc debug interrupt 0x%x\n",
+ host->host_no, dsps);
+ return SPECIFIC_INT_RESTART;
+ } else if ((dsps & 0xff000000) == 0x05000000) {
+ if (hostdata->events) {
+ struct NCR53c7x0_event *event;
+ ++hostdata->event_index;
+ if (hostdata->event_index >= hostdata->event_size)
+ hostdata->event_index = 0;
+ event = (struct NCR53c7x0_event *) hostdata->events +
+ hostdata->event_index;
+ event->event = (enum ncr_event) dsps;
+ event->dsa = bus_to_virt(NCR53c7x0_read32(DSA_REG));
+ /* FIXME : this needs to change for the '7xx family */
+ if (NCR53c7x0_read8 (SCNTL1_REG) & SCNTL1_CON)
+ event->target = NCR53c7x0_read8(SSID_REG_800);
+ else
+ event->target = 255;
+
+ if (event->event == EVENT_RESELECT)
+ event->lun = hostdata->reselected_identify & 0xf;
+ else if (c)
+ event->lun = c->lun;
+ else
+ event->lun = 255;
+ do_gettimeofday(&(event->time));
+ if (c) {
+ event->pid = c->pid;
+ memcpy ((void *) event->cmnd, (void *) c->cmnd,
+ sizeof (event->cmnd));
+ } else {
+ event->pid = -1;
+ }
+ }
+ return SPECIFIC_INT_RESTART;
+ }
+
+ printk ("scsi%d : unknown user interrupt 0x%x\n",
+ host->host_no, (unsigned) dsps);
+ return SPECIFIC_INT_PANIC;
+ }
+}
+
+/*
+ * XXX - the stock NCR assembler won't output the scriptu.h file,
+ * which undefine's all #define'd CPP symbols from the script.h
+ * file, which will create problems if you use multiple scripts
+ * with the same symbol names.
+ *
+ * If you insist on using NCR's assembler, you could generate
+ * scriptu.h from script.h using something like
+ *
+ * grep #define script.h | \
+ * sed 's/#define[ ][ ]*\([_a-zA-Z][_a-zA-Z0-9]*\).*$/#undefine \1/' \
+ * > scriptu.h
+ */
+
+#include "53c8xx_u.h"
+
+/* XXX - add alternate script handling code here */
+
+
+#ifdef NCR_DEBUG
+/*
+ * Debugging without a debugger is no fun. So, I've provided
+ * a debugging interface in the NCR53c7x0 driver. To avoid
+ * kernel cruft, there's just enough here to act as an interface
+ * to a user level debugger (aka, GDB).
+ *
+ *
+ * The following restrictions apply to debugger commands :
+ * 1. The command must be terminated by a newline.
+ * 2. Command length must be less than 80 bytes including the
+ * newline.
+ * 3. The entire command must be written with one system call.
+ */
+
+static const char debugger_help =
+"bc <addr> - clear breakpoint\n"
+"bl - list breakpoints\n"
+"bs <addr> - set breakpoint\n"
+"g - start\n"
+"h - halt\n"
+"? - this message\n"
+"i - info\n"
+"mp <addr> <size> - print memory\n"
+"ms <addr> <size> <value> - store memory\n"
+"rp <num> <size> - print register\n"
+"rs <num> <size> <value> - store register\n"
+"s - single step\n"
+"tb - begin trace \n"
+"te - end trace\n";
+
+/*
+ * Whenever we change a break point, we should probably
+ * set the NCR up so that it is in a single step mode.
+ */
+
+static int debugger_fn_bc (struct Scsi_Host *host, struct debugger_token *token,
+ u32 args[]) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ instance->hostdata;
+ struct NCR53c7x0_break *bp, **prev;
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ for (bp = (struct NCR53c7x0_break *) instance->breakpoints,
+ prev = (struct NCR53c7x0_break **) &instance->breakpoints;
+ bp; prev = (struct NCR53c7x0_break **) &(bp->next),
+ bp = (struct NCR53c7x0_break *) bp->next);
+
+ if (!bp) {
+ restore_flags(flags);
+ return -EIO;
+ }
+
+ /*
+ * XXX - we need to insure that the processor is halted
+ * here in order to prevent a race condition.
+ */
+
+ memcpy ((void *) bp->addr, (void *) bp->old, sizeof(bp->old));
+ if (prev)
+ *prev = bp->next;
+
+ restore_flags(flags);
+ return 0;
+}
+
+
+static int
+debugger_fn_bl (struct Scsi_Host *host, struct debugger_token *token,
+ u32 args[]) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ struct NCR53c7x0_break *bp;
+ char buf[80];
+ size_t len;
+ unsigned long flags;
+ /*
+ * XXX - we need to insure that the processor is halted
+ * here in order to prevent a race condition. So, if the
+ * processor isn't halted, print an error message and continue.
+ */
+
+ sprintf (buf, "scsi%d : bp : warning : processor not halted\b",
+ host->host_no);
+ debugger_kernel_write (host, buf, strlen(buf));
+
+ save_flags(flags);
+ cli();
+ for (bp = (struct NCR53c7x0_break *) host->breakpoints;
+ bp; bp = (struct NCR53c7x0_break *) bp->next); {
+ sprintf (buf, "scsi%d : bp : success : at %08x, replaces %08x %08x",
+ bp->addr, bp->old[0], bp->old[1]);
+ len = strlen(buf);
+ if ((bp->old[0] & (DCMD_TYPE_MASK << 24)) ==
+ (DCMD_TYPE_MMI << 24)) {
+ sprintf(buf + len, "%08x\n", * (u32 *) bp->addr);
+ } else {
+ sprintf(buf + len, "\n");
+ }
+ len = strlen(buf);
+ debugger_kernel_write (host, buf, len);
+ }
+ restore_flags(flags);
+ return 0;
+}
+
+static int
+debugger_fn_bs (struct Scsi_Host *host, struct debugger_token *token,
+ u32 args[]) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ struct NCR53c7x0_break *bp;
+ char buf[80];
+ size_t len;
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+
+ if (hostdata->state != STATE_HALTED) {
+ sprintf (buf, "scsi%d : bs : failure : NCR not halted\n", host->host_no);
+ debugger_kernel_write (host, buf, strlen(buf));
+ restore_flags(flags);
+ return -1;
+ }
+
+ if (!(bp = kmalloc (sizeof (struct NCR53c7x0_break)))) {
+ printk ("scsi%d : kmalloc(%d) of breakpoint structure failed, try again\n",
+ host->host_no, sizeof(struct NCR53c7x0_break));
+ restore_flags(flags);
+ return -1;
+ }
+
+ bp->address = (u32 *) args[0];
+ memcpy ((void *) bp->old_instruction, (void *) bp->address, 8);
+ bp->old_size = (((bp->old_instruction[0] >> 24) & DCMD_TYPE_MASK) ==
+ DCMD_TYPE_MMI ? 3 : 2;
+ bp->next = hostdata->breakpoints;
+ hostdata->breakpoints = bp->next;
+ memcpy ((void *) bp->address, (void *) hostdata->E_debug_break, 8);
+
+ restore_flags(flags);
+ return 0;
+}
+
+#define TOKEN(name,nargs) {#name, nargs, debugger_fn_##name}
+static const struct debugger_token {
+ char *name;
+ int numargs;
+ int (*fn)(struct debugger_token *token, u32 args[]);
+} debugger_tokens[] = {
+ TOKEN(bc,1), TOKEN(bl,0), TOKEN(bs,1), TOKEN(g,0), TOKEN(halt,0),
+ {DT_help, "?", 0} , TOKEN(h,0), TOKEN(i,0), TOKEN(mp,2),
+ TOKEN(ms,3), TOKEN(rp,2), TOKEN(rs,2), TOKEN(s,0), TOKEN(tb,0), TOKEN(te,0)
+};
+
+#define NDT sizeof(debugger_tokens / sizeof(struct debugger_token))
+
+static struct Scsi_Host * inode_to_host (struct inode *inode) {
+ int dev;
+ struct Scsi_Host *tmp;
+ for (dev = MINOR(inode->rdev), host = first_host;
+ (host->hostt == the_template); --dev, host = host->next)
+ if (!dev) return host;
+ return NULL;
+}
+
+
+static int
+debugger_user_write (struct inode *inode,struct file *filp,
+ char *buf,int count) {
+ struct Scsi_Host *host; /* This SCSI host */
+ struct NCR53c7x0_hostadata *hostdata;
+ char input_buf[80], /* Kernel space copy of buf */
+ *ptr; /* Pointer to argument list */
+ u32 args[3]; /* Arguments */
+ int i, j, error, len;
+
+ if (!(host = inode_to_host(inode)))
+ return -ENXIO;
+
+ hostdata = (struct NCR53c7x0_hostdata *) host->hostdata;
+
+ if (error = verify_area(VERIFY_READ,buf,count))
+ return error;
+
+ if (count > 80)
+ return -EIO;
+
+ memcpy_from_fs(input_buf, buf, count);
+
+ if (input_buf[count - 1] != '\n')
+ return -EIO;
+
+ input_buf[count - 1]=0;
+
+ for (i = 0; i < NDT; ++i) {
+ len = strlen (debugger_tokens[i].name);
+ if (!strncmp(input_buf, debugger_tokens[i].name, len))
+ break;
+ };
+
+ if (i == NDT)
+ return -EIO;
+
+ for (ptr = input_buf + len, j = 0; j < debugger_tokens[i].nargs && *ptr;) {
+ if (*ptr == ' ' || *ptr == '\t') {
+ ++ptr;
+ } else if (isdigit(*ptr)) {
+ args[j++] = simple_strtoul (ptr, &ptr, 0);
+ } else {
+ return -EIO;
+ }
+ }
+
+ if (j != debugger_tokens[i].nargs)
+ return -EIO;
+
+ return count;
+}
+
+static int
+debugger_user_read (struct inode *inode,struct file *filp,
+ char *buf,int count) {
+ struct Scsi_Host *instance;
+
+}
+
+static int
+debugger_kernel_write (struct Scsi_Host *host, char *buf, size_t
+ buflen) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ int copy, left;
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ while (buflen) {
+ left = (hostdata->debug_buf + hostdata->debug_size - 1) -
+ hostdata->debug_write;
+ copy = (buflen <= left) ? buflen : left;
+ memcpy (hostdata->debug_write, buf, copy);
+ buf += copy;
+ buflen -= copy;
+ hostdata->debug_count += copy;
+ if ((hostdata->debug_write += copy) ==
+ (hostdata->debug_buf + hostdata->debug_size))
+ hosdata->debug_write = hostdata->debug_buf;
+ }
+ restore_flags(flags);
+}
+
+#endif /* def NCRDEBUG */
+
+/*
+ * Function : static void NCR538xx_soft_reset (struct Scsi_Host *host)
+ *
+ * Purpose : perform a soft reset of the NCR53c8xx chip
+ *
+ * Inputs : host - pointer to this host adapter's structure
+ *
+ * Preconditions : NCR53c7x0_init must have been called for this
+ * host.
+ *
+ */
+
+static void
+NCR53c8x0_soft_reset (struct Scsi_Host *host) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ NCR53c7x0_local_setup(host);
+
+
+ /*
+ * Do a soft reset of the chip so that everything is
+ * reinitialized to the power-on state.
+ *
+ * Basically follow the procedure outlined in the NCR53c700
+ * data manual under Chapter Six, How to Use, Steps Necessary to
+ * Start SCRIPTS, with the exception of actually starting the
+ * script and setting up the synchronous transfer gunk.
+ */
+
+ NCR53c7x0_write8(ISTAT_REG_800, ISTAT_10_SRST);
+ NCR53c7x0_write8(ISTAT_REG_800, 0);
+ NCR53c7x0_write8(hostdata->dmode, hostdata->saved_dmode & ~DMODE_MAN);
+
+
+ /*
+ * Respond to reselection by targets and use our _initiator_ SCSI ID
+ * for arbitration. If notyet, also respond to SCSI selection.
+ *
+ * XXX - Note : we must reprogram this when reselecting as
+ * a target.
+ */
+
+#ifdef notyet
+ NCR53c7x0_write8(SCID_REG, (host->this_id & 7)|SCID_800_RRE|SCID_800_SRE);
+#else
+ NCR53c7x0_write8(SCID_REG, (host->this_id & 7)|SCID_800_RRE);
+#endif
+ NCR53c7x0_write8(RESPID_REG_800, hostdata->this_id_mask);
+
+ /*
+ * Use a maximum (1.6) second handshake to handshake timeout,
+ * and SCSI recommended .5s selection timeout.
+ */
+
+ /*
+ * The new gcc won't recognize preprocessing directives
+ * within macro args.
+ */
+#if 0
+ NCR53c7x0_write8(STIME0_REG_800,
+ ((selection_timeout << STIME0_800_SEL_SHIFT) & STIME0_800_SEL_MASK)
+ | ((15 << STIME0_800_HTH_SHIFT) & STIME0_800_HTH_MASK));
+#else
+/* Disable HTH interrupt */
+ NCR53c7x0_write8(STIME0_REG_800,
+ ((selection_timeout << STIME0_800_SEL_SHIFT) & STIME0_800_SEL_MASK));
+#endif
+
+
+ /*
+ * Enable active negation for happy synchronous transfers.
+ */
+
+ NCR53c7x0_write8(STEST3_REG_800, STEST3_800_TE);
+
+ /*
+ * Enable all interrupts, except parity which we only want when
+ * the user requests it.
+ */
+
+ NCR53c7x0_write8(DIEN_REG, DIEN_800_MDPE | DIEN_800_BF |
+ DIEN_ABRT | DIEN_SSI | DIEN_SIR | DIEN_800_IID);
+
+
+ NCR53c7x0_write8(SIEN0_REG_800, ((hostdata->options & OPTION_PARITY) ?
+ SIEN_PAR : 0) | SIEN_RST | SIEN_UDC | SIEN_SGE | SIEN_MA);
+ NCR53c7x0_write8(SIEN1_REG_800, SIEN1_800_STO | SIEN1_800_HTH);
+
+ /*
+ * Use saved clock frequency divisor and scripts loaded in 16 bit
+ * mode flags from the saved dcntl.
+ */
+
+ NCR53c7x0_write8(DCNTL_REG, hostdata->saved_dcntl);
+ NCR53c7x0_write8(CTEST4_REG_800, hostdata->saved_ctest4);
+
+ /* Enable active negation */
+ NCR53c7x0_write8(STEST3_REG_800, STEST3_800_TE);
+}
+
+/*
+ * Function static struct NCR53c7x0_cmd *allocate_cmd (Scsi_Cmnd *cmd)
+ *
+ * Purpose : Return the first free NCR53c7x0_cmd structure (which are
+ * reused in a LIFO manner to minimize cache thrashing).
+ *
+ * Side effects : If we haven't yet scheduled allocation of NCR53c7x0_cmd
+ * structures for this device, do so. Attempt to complete all scheduled
+ * allocations using kmalloc(), putting NCR53c7x0_cmd structures on
+ * the free list. Teach programmers not to drink and hack.
+ *
+ * Inputs : cmd - SCSI command
+ *
+ * Returns : NCR53c7x0_cmd structure allocated on behalf of cmd;
+ * NULL on failure.
+ */
+
+static struct NCR53c7x0_cmd *
+allocate_cmd (Scsi_Cmnd *cmd) {
+ struct Scsi_Host *host = cmd->host;
+ struct NCR53c7x0_hostdata *hostdata =
+ (struct NCR53c7x0_hostdata *) host->hostdata;
+ void *real; /* Real address */
+ int size; /* Size of *tmp */
+ struct NCR53c7x0_cmd *tmp;
+ unsigned long flags;
+
+ if (hostdata->options & OPTION_DEBUG_ALLOCATION)
+ printk ("scsi%d : num_cmds = %d, can_queue = %d\n"
+ " target = %d, lun = %d, %s\n",
+ host->host_no, hostdata->num_cmds, host->can_queue,
+ cmd->target, cmd->lun, (hostdata->cmd_allocated[cmd->target] &
+ (1 << cmd->lun)) ? "already allocated" : "not allocated");
+
+/*
+ * If we have not yet reserved commands for this I_T_L nexus, and
+ * the device exists (as indicated by permanent Scsi_Cmnd structures
+ * being allocated under 1.3.x, or being outside of scan_scsis in
+ * 1.2.x), do so now.
+ */
+ if (!(hostdata->cmd_allocated[cmd->target] & (1 << cmd->lun)) &&
+#ifdef LINUX_1_2
+ !in_scan_scsis
+#else
+ cmd->device && cmd->device->has_cmdblocks
+#endif
+ ) {
+ if ((hostdata->extra_allocate + hostdata->num_cmds) < host->can_queue)
+ hostdata->extra_allocate += host->cmd_per_lun;
+ hostdata->cmd_allocated[cmd->target] |= (1 << cmd->lun);
+ }
+
+ for (; hostdata->extra_allocate > 0 ; --hostdata->extra_allocate,
+ ++hostdata->num_cmds) {
+ /* historically, kmalloc has returned unaligned addresses; pad so we
+ have enough room to ROUNDUP */
+ size = hostdata->max_cmd_size + sizeof (void *);
+/* FIXME: for ISA bus '7xx chips, we need to or GFP_DMA in here */
+ real = kmalloc (size, GFP_ATOMIC);
+ if (!real) {
+ if (hostdata->options & OPTION_DEBUG_ALLOCATION)
+ printk ("scsi%d : kmalloc(%d) failed\n",
+ host->host_no, size);
+ break;
+ }
+ tmp = ROUNDUP(real, void *);
+ tmp->real = real;
+ tmp->size = size;
+#ifdef LINUX_1_2
+ tmp->free = ((void (*)(void *, int)) kfree_s);
+#else
+ tmp->free = ((void (*)(void *, int)) kfree);
+#endif
+ save_flags (flags);
+ cli();
+ tmp->next = hostdata->free;
+ hostdata->free = tmp;
+ restore_flags (flags);
+ }
+ save_flags(flags);
+ cli();
+ tmp = (struct NCR53c7x0_cmd *) hostdata->free;
+ if (tmp) {
+ hostdata->free = tmp->next;
+ }
+ restore_flags(flags);
+ if (!tmp)
+ printk ("scsi%d : can't allocate command for target %d lun %d\n",
+ host->host_no, cmd->target, cmd->lun);
+ return tmp;
+}
+
+/*
+ * Function static struct NCR53c7x0_cmd *create_cmd (Scsi_Cmnd *cmd)
+ *
+ *
+ * Purpose : allocate a NCR53c7x0_cmd structure, initialize it based on the
+ * Scsi_Cmnd structure passed in cmd, including dsa and Linux field
+ * initialization, and dsa code relocation.
+ *
+ * Inputs : cmd - SCSI command
+ *
+ * Returns : NCR53c7x0_cmd structure corresponding to cmd,
+ * NULL on failure.
+ */
+
+static struct NCR53c7x0_cmd *
+create_cmd (Scsi_Cmnd *cmd) {
+ NCR53c7x0_local_declare();
+ struct Scsi_Host *host = cmd->host;
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ struct NCR53c7x0_cmd *tmp; /* NCR53c7x0_cmd structure for this command */
+ int datain, /* Number of instructions per phase */
+ dataout;
+ int data_transfer_instructions, /* Count of dynamic instructions */
+ i; /* Counter */
+ u32 *cmd_datain, /* Address of datain/dataout code */
+ *cmd_dataout; /* Incremented as we assemble */
+#ifdef notyet
+ unsigned char *msgptr; /* Current byte in select message */
+ int msglen; /* Length of whole select message */
+#endif
+ unsigned long flags;
+ NCR53c7x0_local_setup(cmd->host);
+
+ if (!(tmp = allocate_cmd (cmd)))
+ return NULL;
+
+
+ /*
+ * Decide whether we need to generate commands for DATA IN,
+ * DATA OUT, neither, or both based on the SCSI command
+ */
+
+ switch (cmd->cmnd[0]) {
+ /* These commands do DATA IN */
+ case INQUIRY:
+ case MODE_SENSE:
+ case READ_6:
+ case READ_10:
+ case READ_CAPACITY:
+ case REQUEST_SENSE:
+ datain = 2 * (cmd->use_sg ? cmd->use_sg : 1) + 3;
+ dataout = 0;
+ break;
+ /* These commands do DATA OUT */
+ case MODE_SELECT:
+ case WRITE_6:
+ case WRITE_10:
+ case START_STOP: /* also SCAN, which may do DATA OUT */
+#if 0
+ printk("scsi%d : command is ", host->host_no);
+ print_command(cmd->cmnd);
+#endif
+#if 0
+ printk ("scsi%d : %d scatter/gather segments\n", host->host_no,
+ cmd->use_sg);
+#endif
+ datain = 0;
+ dataout = 2 * (cmd->use_sg ? cmd->use_sg : 1) + 3;
+#if 0
+ hostdata->options |= OPTION_DEBUG_INTR;
+#endif
+ break;
+ /*
+ * These commands do no data transfer, we should force an
+ * interrupt if a data phase is attempted on them.
+ */
+ case TEST_UNIT_READY:
+ datain = dataout = 0;
+ break;
+ /*
+ * We don't know about these commands, so generate code to handle
+ * both DATA IN and DATA OUT phases.
+ */
+ default:
+ datain = dataout = 2 * (cmd->use_sg ? cmd->use_sg : 1) + 3;
+ }
+
+ /*
+ * New code : so that active pointers work correctly regardless
+ * of where the saved data pointer is at, we want to immediately
+ * enter the dynamic code after selection, and on a non-data
+ * phase perform a CALL to the non-data phase handler, with
+ * returns back to this address.
+ *
+ * If a phase mismatch is encountered in the middle of a
+ * Block MOVE instruction, we want to _leave_ that instruction
+ * unchanged as the current case is, modify a temporary buffer,
+ * and point the active pointer (TEMP) at that.
+ *
+ * Furthermore, we want to implement a saved data pointer,
+ * set by the SAVE_DATA_POINTERs message.
+ *
+ * So, the data transfer segments will change to
+ * CALL data_transfer, WHEN NOT data phase
+ * MOVE x, x, WHEN data phase
+ * ( repeat )
+ * JUMP other_transfer
+ */
+
+ data_transfer_instructions = datain + dataout;
+
+ /*
+ * When we perform a request sense, we overwrite various things,
+ * including the data transfer code. Make sure we have enough
+ * space to do that.
+ */
+
+ if (data_transfer_instructions < 2)
+ data_transfer_instructions = 2;
+
+
+ /*
+ * The saved data pointer is set up so that a RESTORE POINTERS message
+ * will start the data transfer over at the beginning.
+ */
+
+ tmp->saved_data_pointer = virt_to_bus (hostdata->script) +
+ hostdata->E_data_transfer;
+
+ /*
+ * Initialize Linux specific fields.
+ */
+
+ tmp->cmd = cmd;
+ tmp->next = NULL;
+ tmp->flags = 0;
+ tmp->dsa_next_addr = virt_to_bus(tmp->dsa) + hostdata->dsa_next -
+ hostdata->dsa_start;
+ tmp->dsa_addr = virt_to_bus(tmp->dsa) - hostdata->dsa_start;
+
+ /*
+ * Calculate addresses of dynamic code to fill in DSA
+ */
+
+ tmp->data_transfer_start = tmp->dsa + (hostdata->dsa_end -
+ hostdata->dsa_start) / sizeof(u32);
+ tmp->data_transfer_end = tmp->data_transfer_start +
+ 2 * data_transfer_instructions;
+
+ cmd_datain = datain ? tmp->data_transfer_start : NULL;
+ cmd_dataout = dataout ? (datain ? cmd_datain + 2 * datain : tmp->
+ data_transfer_start) : NULL;
+
+ /*
+ * Fill in the NCR53c7x0_cmd structure as follows
+ * dsa, with fixed up DSA code
+ * datain code
+ * dataout code
+ */
+
+ /* Copy template code into dsa and perform all necessary fixups */
+ if (hostdata->dsa_fixup)
+ hostdata->dsa_fixup(tmp);
+
+ patch_dsa_32(tmp->dsa, dsa_next, 0, 0);
+ patch_dsa_32(tmp->dsa, dsa_cmnd, 0, virt_to_bus(cmd));
+
+ if (hostdata->options & OPTION_DEBUG_SYNCHRONOUS)
+ if (hostdata->sync[cmd->target].select_indirect !=
+ ((hostdata->sync[cmd->target].scntl3_sanity << 24) |
+ (cmd->target << 16) |
+ (hostdata->sync[cmd->target].sxfer_sanity << 8))) {
+ printk ("scsi%d : sanity check failed select_indirect=0x%x\n",
+ host->host_no, hostdata->sync[cmd->target].select_indirect);
+ FATAL(host);
+
+ }
+
+ patch_dsa_32(tmp->dsa, dsa_select, 0, hostdata->sync[cmd->target].
+ select_indirect);
+ /*
+ * Right now, we'll do the WIDE and SYNCHRONOUS negotiations on
+ * different commands; although it should be trivial to do them
+ * both at the same time.
+ */
+ if (hostdata->initiate_wdtr & (1 << cmd->target)) {
+ memcpy ((void *) (tmp->select + 1), (void *) wdtr_message,
+ sizeof(wdtr_message));
+ patch_dsa_32(tmp->dsa, dsa_msgout, 0, 1 + sizeof(wdtr_message));
+ save_flags(flags);
+ cli();
+ hostdata->initiate_wdtr &= ~(1 << cmd->target);
+ restore_flags(flags);
+ } else if (hostdata->initiate_sdtr & (1 << cmd->target)) {
+ memcpy ((void *) (tmp->select + 1), (void *) sdtr_message,
+ sizeof(sdtr_message));
+ patch_dsa_32(tmp->dsa, dsa_msgout, 0, 1 + sizeof(sdtr_message));
+ tmp->flags |= CMD_FLAG_SDTR;
+ save_flags(flags);
+ cli();
+ hostdata->initiate_sdtr &= ~(1 << cmd->target);
+ restore_flags(flags);
+
+ }
+#if 1
+ else if (!(hostdata->talked_to & (1 << cmd->target)) &&
+ !(hostdata->options & OPTION_NO_ASYNC)) {
+ memcpy ((void *) (tmp->select + 1), (void *) async_message,
+ sizeof(async_message));
+ patch_dsa_32(tmp->dsa, dsa_msgout, 0, 1 + sizeof(async_message));
+ tmp->flags |= CMD_FLAG_SDTR;
+ }
+#endif
+ else
+ patch_dsa_32(tmp->dsa, dsa_msgout, 0, 1);
+ hostdata->talked_to |= (1 << cmd->target);
+ tmp->select[0] = (hostdata->options & OPTION_DISCONNECT) ?
+ IDENTIFY (1, cmd->lun) : IDENTIFY (0, cmd->lun);
+ patch_dsa_32(tmp->dsa, dsa_msgout, 1, virt_to_bus(tmp->select));
+ patch_dsa_32(tmp->dsa, dsa_cmdout, 0, cmd->cmd_len);
+ patch_dsa_32(tmp->dsa, dsa_cmdout, 1, virt_to_bus(cmd->cmnd));
+ patch_dsa_32(tmp->dsa, dsa_dataout, 0, cmd_dataout ?
+ virt_to_bus (cmd_dataout)
+ : virt_to_bus (hostdata->script) + hostdata->E_other_transfer);
+ patch_dsa_32(tmp->dsa, dsa_datain, 0, cmd_datain ?
+ virt_to_bus (cmd_datain)
+ : virt_to_bus (hostdata->script) + hostdata->E_other_transfer);
+ /*
+ * XXX - need to make endian aware, should use separate variables
+ * for both status and message bytes.
+ */
+ patch_dsa_32(tmp->dsa, dsa_msgin, 0, 1);
+/*
+ * FIXME : these only works for little endian. We probably want to
+ * provide message and status fields in the NCR53c7x0_cmd
+ * structure, and assign them to cmd->result when we're done.
+ */
+ patch_dsa_32(tmp->dsa, dsa_msgin, 1, virt_to_bus(&cmd->result) + 1);
+ patch_dsa_32(tmp->dsa, dsa_status, 0, 1);
+ patch_dsa_32(tmp->dsa, dsa_status, 1, virt_to_bus(&cmd->result));
+ patch_dsa_32(tmp->dsa, dsa_msgout_other, 0, 1);
+ patch_dsa_32(tmp->dsa, dsa_msgout_other, 1,
+ virt_to_bus(&(hostdata->NCR53c7xx_msg_nop)));
+
+ /*
+ * Generate code for zero or more of the DATA IN, DATA OUT phases
+ * in the format
+ *
+ * CALL data_transfer, WHEN NOT phase
+ * MOVE first buffer length, first buffer address, WHEN phase
+ * ...
+ * MOVE last buffer length, last buffer address, WHEN phase
+ * JUMP other_transfer
+ */
+
+/*
+ * See if we're getting to data transfer by generating an unconditional
+ * interrupt.
+ */
+#if 0
+ if (datain) {
+ cmd_datain[0] = 0x98080000;
+ cmd_datain[1] = 0x03ffd00d;
+ cmd_datain += 2;
+ }
+#endif
+
+/*
+ * XXX - I'm undecided whether all of this nonsense is faster
+ * in the long run, or whether I should just go and implement a loop
+ * on the NCR chip using table indirect mode?
+ *
+ * In any case, this is how it _must_ be done for 53c700/700-66 chips,
+ * so this stays even when we come up with something better.
+ *
+ * When we're limited to 1 simultaneous command, no overlapping processing,
+ * we're seeing 630K/sec, with 7% CPU usage on a slow Syquest 45M
+ * drive.
+ *
+ * Not bad, not good. We'll see.
+ */
+
+ for (i = 0; cmd->use_sg ? (i < cmd->use_sg) : !i; cmd_datain += 4,
+ cmd_dataout += 4, ++i) {
+ u32 buf = cmd->use_sg ?
+ virt_to_bus(((struct scatterlist *)cmd->buffer)[i].address) :
+ virt_to_bus(cmd->request_buffer);
+ u32 count = cmd->use_sg ?
+ ((struct scatterlist *)cmd->buffer)[i].length :
+ cmd->request_bufflen;
+
+ if (datain) {
+ /* CALL other_in, WHEN NOT DATA_IN */
+ cmd_datain[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_CALL |
+ DCMD_TCI_IO) << 24) |
+ DBC_TCI_WAIT_FOR_VALID | DBC_TCI_COMPARE_PHASE;
+ cmd_datain[1] = virt_to_bus (hostdata->script) +
+ hostdata->E_other_in;
+ /* MOVE count, buf, WHEN DATA_IN */
+ cmd_datain[2] = ((DCMD_TYPE_BMI | DCMD_BMI_OP_MOVE_I | DCMD_BMI_IO)
+ << 24) | count;
+ cmd_datain[3] = buf;
+#if 0
+ print_insn (host, cmd_datain, "dynamic ", 1);
+ print_insn (host, cmd_datain + 2, "dynamic ", 1);
+#endif
+ }
+ if (dataout) {
+ /* CALL other_out, WHEN NOT DATA_OUT */
+ cmd_dataout[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_CALL) << 24) |
+ DBC_TCI_WAIT_FOR_VALID | DBC_TCI_COMPARE_PHASE;
+ cmd_dataout[1] = virt_to_bus(hostdata->script) +
+ hostdata->E_other_out;
+ /* MOVE count, buf, WHEN DATA+OUT */
+ cmd_dataout[2] = ((DCMD_TYPE_BMI | DCMD_BMI_OP_MOVE_I) << 24)
+ | count;
+ cmd_dataout[3] = buf;
+#if 0
+ print_insn (host, cmd_dataout, "dynamic ", 1);
+ print_insn (host, cmd_dataout + 2, "dynamic ", 1);
+#endif
+ }
+ }
+
+ /*
+ * Install JUMP instructions after the data transfer routines to return
+ * control to the do_other_transfer routines.
+ */
+
+
+ if (datain) {
+ cmd_datain[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_JUMP) << 24) |
+ DBC_TCI_TRUE;
+ cmd_datain[1] = virt_to_bus(hostdata->script) +
+ hostdata->E_other_transfer;
+#if 0
+ print_insn (host, cmd_datain, "dynamic jump ", 1);
+#endif
+ cmd_datain += 2;
+ }
+#if 0
+ if (datain) {
+ cmd_datain[0] = 0x98080000;
+ cmd_datain[1] = 0x03ffdeed;
+ cmd_datain += 2;
+ }
+#endif
+ if (dataout) {
+ cmd_dataout[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_JUMP) << 24) |
+ DBC_TCI_TRUE;
+ cmd_dataout[1] = virt_to_bus(hostdata->script) +
+ hostdata->E_other_transfer;
+#if 0
+ print_insn (host, cmd_dataout, "dynamic jump ", 1);
+#endif
+ cmd_dataout += 2;
+ }
+ return tmp;
+}
+
+/*
+ * Function : int NCR53c7xx_queue_command (Scsi_Cmnd *cmd,
+ * void (*done)(Scsi_Cmnd *))
+ *
+ * Purpose : enqueues a SCSI command
+ *
+ * Inputs : cmd - SCSI command, done - function called on completion, with
+ * a pointer to the command descriptor.
+ *
+ * Returns : 0
+ *
+ * Side effects :
+ * cmd is added to the per instance driver issue_queue, with major
+ * twiddling done to the host specific fields of cmd. If the
+ * process_issue_queue coroutine isn't running, it is restarted.
+ *
+ * NOTE : we use the host_scribble field of the Scsi_Cmnd structure to
+ * hold our own data, and pervert the ptr field of the SCp field
+ * to create a linked list.
+ */
+
+int
+NCR53c7xx_queue_command (Scsi_Cmnd *cmd, void (* done)(Scsi_Cmnd *)) {
+ struct Scsi_Host *host = cmd->host;
+ struct NCR53c7x0_hostdata *hostdata =
+ (struct NCR53c7x0_hostdata *) host->hostdata;
+ unsigned long flags;
+ Scsi_Cmnd *tmp;
+
+ cmd->scsi_done = done;
+ cmd->host_scribble = NULL;
+ cmd->SCp.ptr = NULL;
+ cmd->SCp.buffer = NULL;
+
+ save_flags(flags);
+ cli();
+ if ((hostdata->options & (OPTION_DEBUG_INIT_ONLY|OPTION_DEBUG_PROBE_ONLY))
+ || ((hostdata->options & OPTION_DEBUG_TARGET_LIMIT) &&
+ !(hostdata->debug_lun_limit[cmd->target] & (1 << cmd->lun)))
+#ifdef LINUX_1_2
+ || cmd->target > 7
+#else
+ || cmd->target > host->max_id
+#endif
+ || cmd->target == host->this_id
+ || hostdata->state == STATE_DISABLED) {
+ printk("scsi%d : disabled or bad target %d lun %d\n", host->host_no,
+ cmd->target, cmd->lun);
+ cmd->result = (DID_BAD_TARGET << 16);
+ } else if ((hostdata->options & OPTION_DEBUG_NCOMMANDS_LIMIT) &&
+ (hostdata->debug_count_limit == 0)) {
+ printk("scsi%d : maximum commands exceeded\n", host->host_no);
+ cmd->result = (DID_BAD_TARGET << 16);
+ cmd->result = (DID_BAD_TARGET << 16);
+ } else if (hostdata->options & OPTION_DEBUG_READ_ONLY) {
+ switch (cmd->cmnd[0]) {
+ case WRITE_6:
+ case WRITE_10:
+ printk("scsi%d : WRITE attempted with NO_WRITE debugging flag set\n",
+ host->host_no);
+ cmd->result = (DID_BAD_TARGET << 16);
+ }
+ } else {
+ if ((hostdata->options & OPTION_DEBUG_TARGET_LIMIT) &&
+ hostdata->debug_count_limit != -1)
+ --hostdata->debug_count_limit;
+ restore_flags (flags);
+ cmd->result = 0xffff; /* The NCR will overwrite message
+ and status with valid data */
+ cmd->host_scribble = (unsigned char *) (tmp = create_cmd (cmd));
+ }
+ cli();
+ /*
+ * REQUEST SENSE commands are inserted at the head of the queue
+ * so that we do not clear the contingent allegiance condition
+ * they may be looking at.
+ */
+
+ if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) {
+ cmd->SCp.ptr = (unsigned char *) hostdata->issue_queue;
+ hostdata->issue_queue = cmd;
+ } else {
+ for (tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp->SCp.ptr;
+ tmp = (Scsi_Cmnd *) tmp->SCp.ptr);
+ tmp->SCp.ptr = (unsigned char *) cmd;
+ }
+ restore_flags (flags);
+ run_process_issue_queue();
+ return 0;
+}
+
+/*
+ * Function : void to_schedule_list (struct Scsi_Host *host,
+ * struct NCR53c7x0_hostdata * hostdata, Scsi_Cmnd *cmd)
+ *
+ * Purpose : takes a SCSI command which was just removed from the
+ * issue queue, and deals with it by inserting it in the first
+ * free slot in the schedule list or by terminating it immediately.
+ *
+ * Inputs :
+ * host - SCSI host adapter; hostdata - hostdata structure for
+ * this adapter; cmd - a pointer to the command; should have
+ * the host_scribble field initialized to point to a valid
+ *
+ * Side effects :
+ * cmd is added to the per instance schedule list, with minor
+ * twiddling done to the host specific fields of cmd.
+ *
+ */
+
+static __inline__ void
+to_schedule_list (struct Scsi_Host *host, struct NCR53c7x0_hostdata *hostdata,
+ struct NCR53c7x0_cmd *cmd) {
+ NCR53c7x0_local_declare();
+ Scsi_Cmnd *tmp = cmd->cmd;
+ unsigned long flags;
+ /* dsa start is negative, so subtraction is used */
+ volatile u32 *current;
+
+ int i;
+ NCR53c7x0_local_setup(host);
+#if 0
+ printk("scsi%d : new dsa is 0x%lx (virt 0x%p)\n", host->host_no,
+ virt_to_bus(dsa), dsa);
+#endif
+
+ save_flags(flags);
+ cli();
+
+ /*
+ * Work around race condition : if an interrupt fired and we
+ * got disabled forget about this command.
+ */
+
+ if (hostdata->state == STATE_DISABLED) {
+ printk("scsi%d : driver disabled\n", host->host_no);
+ tmp->result = (DID_BAD_TARGET << 16);
+ cmd->next = (struct NCR53c7x0_cmd *) hostdata->free;
+ hostdata->free = cmd;
+ tmp->scsi_done(tmp);
+ restore_flags (flags);
+ return;
+ }
+
+ for (i = host->can_queue, current = hostdata->schedule;
+ i > 0 && current[0] != hostdata->NOP_insn;
+ --i, current += 2 /* JUMP instructions are two words */);
+
+ if (i > 0) {
+ ++hostdata->busy[tmp->target][tmp->lun];
+ cmd->next = hostdata->running_list;
+ hostdata->running_list = cmd;
+
+ /* Restore this instruction to a NOP once the command starts */
+ cmd->dsa [(hostdata->dsa_jump_dest - hostdata->dsa_start) /
+ sizeof(u32)] = (u32) virt_to_bus ((void *)current);
+ /* Replace the current jump operand. */
+ current[1] =
+ virt_to_bus ((void *) cmd->dsa) + hostdata->E_dsa_code_begin -
+ hostdata->E_dsa_code_template;
+ /* Replace the NOP instruction with a JUMP */
+ current[0] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_JUMP) << 24) |
+ DBC_TCI_TRUE;
+ } else {
+ printk ("scsi%d: no free slot\n", host->host_no);
+ disable(host);
+ tmp->result = (DID_ERROR << 16);
+ cmd->next = (struct NCR53c7x0_cmd *) hostdata->free;
+ hostdata->free = cmd;
+ tmp->scsi_done(tmp);
+ restore_flags (flags);
+ return;
+ }
+
+ /*
+ * If the NCR chip is in an idle state, start it running the scheduler
+ * immediately. Otherwise, signal the chip to jump to schedule as
+ * soon as it is idle.
+ */
+ if (hostdata->idle) {
+ hostdata->idle = 0;
+ hostdata->state = STATE_RUNNING;
+ NCR53c7x0_write32 (DSP_REG, virt_to_bus ((void *)hostdata->schedule));
+ } else {
+ NCR53c7x0_write8(hostdata->istat, ISTAT_10_SIGP);
+ }
+
+ restore_flags(flags);
+}
+
+/*
+ * Function : busyp (struct Scsi_Host *host, struct NCR53c7x0_hostdata
+ * *hostdata, Scsi_Cmnd *cmd)
+ *
+ * Purpose : decide if we can pass the given SCSI command on to the
+ * device in question or not.
+ *
+ * Returns : non-zero when we're busy, 0 when we aren't.
+ */
+
+static __inline__ int
+busyp (struct Scsi_Host *host, struct NCR53c7x0_hostdata *hostdata,
+ Scsi_Cmnd *cmd) {
+ /* FIXME : in the future, this needs to accommodate SCSI-II tagged
+ queuing, and we may be able to play with fairness here a bit.
+ */
+ return hostdata->busy[cmd->target][cmd->lun];
+}
+
+/*
+ * Function : process_issue_queue (void)
+ *
+ * Purpose : transfer commands from the issue queue to NCR start queue
+ * of each NCR53c7/8xx in the system, avoiding kernel stack
+ * overflows when the scsi_done() function is invoked recursively.
+ *
+ * NOTE : process_issue_queue exits with interrupts *disabled*, so the
+ * caller must reenable them if it desires.
+ *
+ * NOTE : process_issue_queue should be called from both
+ * NCR53c7x0_queue_command() and from the interrupt handler
+ * after command completion in case NCR53c7x0_queue_command()
+ * isn't invoked again but we've freed up resources that are
+ * needed.
+ */
+
+static void
+process_issue_queue (unsigned long flags) {
+ Scsi_Cmnd *tmp, *prev;
+ struct Scsi_Host *host;
+ struct NCR53c7x0_hostdata *hostdata;
+ int done;
+
+ /*
+ * We run (with interrupts disabled) until we're sure that none of
+ * the host adapters have anything that can be done, at which point
+ * we set process_issue_queue_running to 0 and exit.
+ *
+ * Interrupts are enabled before doing various other internal
+ * instructions, after we've decided that we need to run through
+ * the loop again.
+ *
+ */
+
+ do {
+ cli(); /* Freeze request queues */
+ done = 1;
+ for (host = first_host; host && host->hostt == the_template;
+ host = host->next) {
+ hostdata = (struct NCR53c7x0_hostdata *) host->hostdata;
+ cli();
+ if (hostdata->issue_queue) {
+ if (hostdata->state == STATE_DISABLED) {
+ tmp = (Scsi_Cmnd *) hostdata->issue_queue;
+ hostdata->issue_queue = (Scsi_Cmnd *) tmp->SCp.ptr;
+ tmp->result = (DID_BAD_TARGET << 16);
+ if (tmp->host_scribble) {
+ ((struct NCR53c7x0_cmd *)tmp->host_scribble)->next =
+ hostdata->free;
+ hostdata->free =
+ (struct NCR53c7x0_cmd *)tmp->host_scribble;
+ tmp->host_scribble = NULL;
+ }
+ tmp->scsi_done (tmp);
+ done = 0;
+ } else
+ for (tmp = (Scsi_Cmnd *) hostdata->issue_queue,
+ prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *)
+ tmp->SCp.ptr)
+ if (!tmp->host_scribble ||
+ !busyp (host, hostdata, tmp)) {
+ if (prev)
+ prev->SCp.ptr = tmp->SCp.ptr;
+ else
+ hostdata->issue_queue = (Scsi_Cmnd *)
+ tmp->SCp.ptr;
+ tmp->SCp.ptr = NULL;
+ if (tmp->host_scribble) {
+ if (hostdata->options & OPTION_DEBUG_QUEUES)
+ printk ("scsi%d : moving command for target %d lun %d to start list\n",
+ host->host_no, tmp->target, tmp->lun);
+
+
+ to_schedule_list (host, hostdata,
+ (struct NCR53c7x0_cmd *)
+ tmp->host_scribble);
+ } else {
+ if (((tmp->result & 0xff) == 0xff) ||
+ ((tmp->result & 0xff00) == 0xff00)) {
+ printk ("scsi%d : danger Will Robinson!\n",
+ host->host_no);
+ tmp->result = DID_ERROR << 16;
+ disable (host);
+ }
+ tmp->scsi_done(tmp);
+ }
+ done = 0;
+ } /* if target/lun is not busy */
+ } /* if hostdata->issue_queue */
+ if (!done)
+ restore_flags (flags);
+ } /* for host */
+ } while (!done);
+ process_issue_queue_running = 0;
+}
+
+/*
+ * Function : static void intr_scsi (struct Scsi_Host *host,
+ * struct NCR53c7x0_cmd *cmd)
+ *
+ * Purpose : handle all SCSI interrupts, indicated by the setting
+ * of the SIP bit in the ISTAT register.
+ *
+ * Inputs : host, cmd - host and NCR command causing the interrupt, cmd
+ * may be NULL.
+ */
+
+static void
+intr_scsi (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata =
+ (struct NCR53c7x0_hostdata *) host->hostdata;
+ unsigned char sstat0_sist0, sist1, /* Registers */
+ fatal; /* Did a fatal interrupt
+ occur ? */
+
+ int is_8xx_chip;
+ NCR53c7x0_local_setup(host);
+
+ fatal = 0;
+
+ is_8xx_chip = ((unsigned) (hostdata->chip - 800)) < 100;
+ if (is_8xx_chip) {
+ sstat0_sist0 = NCR53c7x0_read8(SIST0_REG_800);
+ udelay(1);
+ sist1 = NCR53c7x0_read8(SIST1_REG_800);
+ } else {
+ sstat0_sist0 = NCR53c7x0_read8(SSTAT0_REG);
+ sist1 = 0;
+ }
+
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk ("scsi%d : SIST0 0x%0x, SIST1 0x%0x\n", host->host_no,
+ sstat0_sist0, sist1);
+
+ /* 250ms selection timeout */
+ if ((is_8xx_chip && (sist1 & SIST1_800_STO)) ||
+ (!is_8xx_chip && (sstat0_sist0 & SSTAT0_700_STO))) {
+ fatal = 1;
+ if (hostdata->options & OPTION_DEBUG_INTR) {
+ printk ("scsi%d : Selection Timeout\n", host->host_no);
+ if (cmd) {
+ printk("scsi%d : target %d, lun %d, command ",
+ host->host_no, cmd->cmd->target, cmd->cmd->lun);
+ print_command (cmd->cmd->cmnd);
+ printk("scsi%d : dsp = 0x%x (virt 0x%p)\n", host->host_no,
+ NCR53c7x0_read32(DSP_REG),
+ bus_to_virt(NCR53c7x0_read32(DSP_REG)));
+ } else {
+ printk("scsi%d : no command\n", host->host_no);
+ }
+ }
+/*
+ * XXX - question : how do we want to handle the Illegal Instruction
+ * interrupt, which may occur before or after the Selection Timeout
+ * interrupt?
+ */
+
+ if (1) {
+ hostdata->idle = 1;
+ hostdata->expecting_sto = 0;
+
+ if (hostdata->test_running) {
+ hostdata->test_running = 0;
+ hostdata->test_completed = 3;
+ } else if (cmd) {
+ abnormal_finished(cmd, DID_BAD_TARGET << 16);
+ }
+#if 0
+ hostdata->intrs = 0;
+#endif
+ }
+ }
+
+/*
+ * FIXME : in theory, we can also get a UDC when a STO occurs.
+ */
+ if (sstat0_sist0 & SSTAT0_UDC) {
+ fatal = 1;
+ if (cmd) {
+ printk("scsi%d : target %d lun %d unexpected disconnect\n",
+ host->host_no, cmd->cmd->target, cmd->cmd->lun);
+ print_lots (host);
+ abnormal_finished(cmd, DID_ERROR << 16);
+ } else
+ printk("scsi%d : unexpected disconnect (no command)\n",
+ host->host_no);
+
+ hostdata->dsp = (u32 *) hostdata->schedule;
+ hostdata->dsp_changed = 1;
+ }
+
+ /* SCSI PARITY error */
+ if (sstat0_sist0 & SSTAT0_PAR) {
+ fatal = 1;
+ if (cmd && cmd->cmd) {
+ printk("scsi%d : target %d lun %d parity error.\n",
+ host->host_no, cmd->cmd->target, cmd->cmd->lun);
+ abnormal_finished (cmd, DID_PARITY << 16);
+ } else
+ printk("scsi%d : parity error\n", host->host_no);
+ /* Should send message out, parity error */
+
+ /* XXX - Reduce synchronous transfer rate! */
+ hostdata->dsp = hostdata->script + hostdata->E_initiator_abort /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ /* SCSI GROSS error */
+ }
+
+ if (sstat0_sist0 & SSTAT0_SGE) {
+ fatal = 1;
+ printk("scsi%d : gross error\n", host->host_no);
+ /* Reset SCSI offset */
+ if ((hostdata->chip / 100) == 8) {
+ NCR53c7x0_write8 (STEST2_REG_800, STEST2_800_ROF);
+ }
+
+ /*
+ * A SCSI gross error may occur when we have
+ *
+ * - A synchronous offset which causes the SCSI FIFO to be overwritten.
+ *
+ * - A REQ which causes the maximum synchronous offset programmed in
+ * the SXFER register to be exceeded.
+ *
+ * - A phase change with an outstanding synchronous offset.
+ *
+ * - Residual data in the synchronous data FIFO, with a transfer
+ * other than a synchronous receive is started.$#
+ */
+
+
+ /* XXX Should deduce synchronous transfer rate! */
+ hostdata->dsp = hostdata->script + hostdata->E_initiator_abort /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ /* Phase mismatch */
+ }
+
+ if (sstat0_sist0 & SSTAT0_MA) {
+ fatal = 1;
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk ("scsi%d : SSTAT0_MA\n", host->host_no);
+ intr_phase_mismatch (host, cmd);
+ }
+
+#if 0
+ if (sstat0_sist0 & SIST0_800_RSL)
+ printk ("scsi%d : Oh no Mr. Bill!\n", host->host_no);
+#endif
+
+/*
+ * If a fatal SCSI interrupt occurs, we must insure that the DMA and
+ * SCSI FIFOs were flushed.
+ */
+
+ if (fatal) {
+ if (!hostdata->dstat_valid) {
+ hostdata->dstat = NCR53c7x0_read8(DSTAT_REG);
+ hostdata->dstat_valid = 1;
+ }
+
+/* XXX - code check for 700/800 chips */
+ if (!(hostdata->dstat & DSTAT_DFE)) {
+ printk ("scsi%d : DMA FIFO not empty\n", host->host_no);
+ if (NCR53c7x0_read8 (CTEST2_REG_800) & CTEST2_800_DDIR) {
+ printk ("scsi%d: Flushing DMA FIFO\n",
+ host->host_no);
+ NCR53c7x0_write8 (CTEST3_REG_800, CTEST3_800_FLF);
+ while (!((hostdata->dstat = NCR53c7x0_read8(DSTAT_REG)) &
+ DSTAT_DFE));
+ } else {
+ NCR53c7x0_write8 (CTEST3_REG_800, CTEST3_800_CLF);
+ while (NCR53c7x0_read8 (CTEST3_REG_800) & CTEST3_800_CLF);
+ }
+ hostdata->dstat |= DSTAT_DFE;
+ }
+ }
+}
+
+/*
+ * Function : static void NCR53c7x0_intr (int irq, void *dev_id, struct pt_regs * regs)
+ *
+ * Purpose : handle NCR53c7x0 interrupts for all NCR devices sharing
+ * the same IRQ line.
+ *
+ * Inputs : Since we're using the SA_INTERRUPT interrupt handler
+ * semantics, irq indicates the interrupt which invoked
+ * this handler.
+ */
+
+static void
+NCR53c7x0_intr (int irq, void *dev_id, struct pt_regs * regs) {
+ NCR53c7x0_local_declare();
+ struct Scsi_Host *host; /* Host we are looking at */
+ unsigned char istat; /* Values of interrupt regs */
+ struct NCR53c7x0_hostdata *hostdata; /* host->hostdata */
+ struct NCR53c7x0_cmd *cmd, /* command which halted */
+ **cmd_prev_ptr;
+ u32 *dsa; /* DSA */
+ int done = 1; /* Indicates when handler
+ should terminate */
+ int interrupted = 0; /* This HA generated
+ an interrupt */
+ int have_intfly; /* Don't print warning
+ messages when we stack
+ INTFLYs */
+ unsigned long flags;
+
+#ifdef NCR_DEBUG
+ char buf[80]; /* Debugging sprintf buffer */
+ size_t buflen; /* Length of same */
+#endif
+
+ do {
+ done = 1;
+ for (host = first_host; host; host = host->next)
+ if (host->hostt == the_template && host->irq == irq) {
+ NCR53c7x0_local_setup(host);
+
+ hostdata = (struct NCR53c7x0_hostdata *) host->hostdata;
+ hostdata->dsp_changed = 0;
+ interrupted = 0;
+ have_intfly = 0;
+
+ do {
+ int is_8xx_chip;
+
+ hostdata->dstat_valid = 0;
+ interrupted = 0;
+ /*
+ * Only read istat once, since reading it again will unstack
+ * interrupts?
+ */
+ istat = NCR53c7x0_read8(hostdata->istat);
+
+ /*
+ * INTFLY interrupts are used by the NCR53c720, NCR53c810,
+ * and NCR53c820 to signify completion of a command. Since
+ * the SCSI processor continues running, we can't just look
+ * at the contents of the DSA register and continue running.
+ */
+/* XXX - this is too big, offends my sense of aesthetics, and should
+ move to intr_intfly() */
+ is_8xx_chip = ((unsigned) (hostdata->chip - 800)) < 100;
+ if ((hostdata->options & OPTION_INTFLY) &&
+ (is_8xx_chip && (istat & ISTAT_800_INTF))) {
+ char search_found = 0; /* Got at least one ? */
+ done = 0;
+ interrupted = 1;
+
+ /*
+ * Clear the INTF bit by writing a one.
+ * This reset operation is self-clearing.
+ */
+ NCR53c7x0_write8(hostdata->istat, istat|ISTAT_800_INTF);
+
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk ("scsi%d : INTFLY\n", host->host_no);
+
+ /*
+ * Traverse our list of running commands, and look
+ * for those with valid (non-0xff ff) status and message
+ * bytes encoded in the result which signify command
+ * completion.
+ */
+
+
+ save_flags(flags);
+ cli();
+restart:
+ for (cmd_prev_ptr = (struct NCR53c7x0_cmd **)
+ &(hostdata->running_list), cmd =
+ (struct NCR53c7x0_cmd *) hostdata->running_list; cmd ;
+ cmd_prev_ptr = (struct NCR53c7x0_cmd **) &(cmd->next),
+ cmd = (struct NCR53c7x0_cmd *) cmd->next) {
+ Scsi_Cmnd *tmp;
+
+ if (!cmd) {
+ printk("scsi%d : very weird.\n", host->host_no);
+ break;
+ }
+
+ if (!(tmp = cmd->cmd)) {
+ printk("scsi%d : weird. NCR53c7x0_cmd has no Scsi_Cmnd\n",
+ host->host_no);
+ continue;
+ }
+#if 0
+ printk ("scsi%d : looking at result of 0x%x\n",
+ host->host_no, cmd->cmd->result);
+#endif
+
+ if (((tmp->result & 0xff) == 0xff) ||
+ ((tmp->result & 0xff00) == 0xff00))
+ continue;
+
+ search_found = 1;
+
+ /* Important - remove from list _before_ done is called */
+ if (cmd_prev_ptr)
+ *cmd_prev_ptr = (struct NCR53c7x0_cmd *) cmd->next;
+
+ --hostdata->busy[tmp->target][tmp->lun];
+ cmd->next = hostdata->free;
+ hostdata->free = cmd;
+
+ tmp->host_scribble = NULL;
+
+ if (hostdata->options & OPTION_DEBUG_INTR) {
+ printk ("scsi%d : command complete : pid %lu, id %d,lun %d result 0x%x ",
+ host->host_no, tmp->pid, tmp->target, tmp->lun, tmp->result);
+ print_command (tmp->cmnd);
+ }
+
+#if 0
+ hostdata->options &= ~OPTION_DEBUG_INTR;
+#endif
+ tmp->scsi_done(tmp);
+ goto restart;
+
+ }
+ restore_flags(flags);
+
+ /*
+ * I think that we're stacking INTFLY interrupts; taking care of
+ * all the finished commands on the first one, and then getting
+ * worried when we see the next one. The magic with have_intfly
+ * should tell if this is the case..
+ */
+
+ if (!search_found && !have_intfly) {
+ printk ("scsi%d : WARNING : INTFLY with no completed commands.\n",
+ host->host_no);
+ } else if (!have_intfly) {
+ have_intfly = 1;
+ run_process_issue_queue();
+ }
+ }
+
+ if (istat & (ISTAT_SIP|ISTAT_DIP)) {
+ done = 0;
+ interrupted = 1;
+ hostdata->state = STATE_HALTED;
+
+ if (NCR53c7x0_read8 ((hostdata->chip / 100) == 8 ?
+ SSTAT1_REG : SSTAT2_REG) & SSTAT2_FF_MASK)
+ printk ("scsi%d : SCSI FIFO not empty\n",
+ host->host_no);
+
+ /*
+ * NCR53c700 and NCR53c700-66 change the current SCSI
+ * process, hostdata->current, in the Linux driver so
+ * cmd = hostdata->current.
+ *
+ * With other chips, we must look through the commands
+ * executing and find the command structure which
+ * corresponds to the DSA register.
+ */
+
+ if (hostdata->options & OPTION_700) {
+ cmd = (struct NCR53c7x0_cmd *) hostdata->current;
+ } else {
+ dsa = bus_to_virt(NCR53c7x0_read32(DSA_REG));
+ for (cmd = (struct NCR53c7x0_cmd *)
+ hostdata->running_list; cmd &&
+ (dsa + (hostdata->dsa_start / sizeof(u32))) !=
+ cmd->dsa;
+ cmd = (struct NCR53c7x0_cmd *)(cmd->next));
+ }
+ if (hostdata->options & OPTION_DEBUG_INTR) {
+ if (cmd) {
+ printk("scsi%d : interrupt for pid %lu, id %d, lun %d ",
+ host->host_no, cmd->cmd->pid, (int) cmd->cmd->target,
+ (int) cmd->cmd->lun);
+ print_command (cmd->cmd->cmnd);
+ } else {
+ printk("scsi%d : no active command\n", host->host_no);
+ }
+ }
+
+ if (istat & ISTAT_SIP) {
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk ("scsi%d : ISTAT_SIP\n", host->host_no);
+ intr_scsi (host, cmd);
+ }
+
+ if (istat & ISTAT_DIP) {
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk ("scsi%d : ISTAT_DIP\n", host->host_no);
+ intr_dma (host, cmd);
+ }
+
+ if (!hostdata->dstat_valid) {
+ hostdata->dstat = NCR53c7x0_read8(DSTAT_REG);
+ hostdata->dstat_valid = 1;
+ }
+
+ /* XXX - code check for 700/800 chips */
+ if (!(hostdata->dstat & DSTAT_DFE)) {
+ printk ("scsi%d : DMA FIFO not empty\n", host->host_no);
+ if (NCR53c7x0_read8 (CTEST2_REG_800) & CTEST2_800_DDIR) {
+ printk ("scsi%d: Flushing DMA FIFO\n",
+ host->host_no);
+ NCR53c7x0_write8 (CTEST3_REG_800, CTEST3_800_FLF);
+ while (!((hostdata->dstat = NCR53c7x0_read8(DSTAT_REG)) &
+ DSTAT_DFE));
+ } else
+ {
+ NCR53c7x0_write8 (CTEST3_REG_800, CTEST3_800_CLF);
+ while (NCR53c7x0_read8 (CTEST3_REG_800) & CTEST3_800_CLF);
+ }
+ hostdata->dstat |= DSTAT_DFE;
+ }
+ }
+ } while (interrupted);
+
+
+
+ if (hostdata->intrs != -1)
+ hostdata->intrs++;
+#if 0
+ if (hostdata->intrs > 40) {
+ printk("scsi%d : too many interrupts, halting", host->host_no);
+ disable(host);
+ }
+#endif
+
+ if (!hostdata->idle && hostdata->state == STATE_HALTED) {
+ if (!hostdata->dsp_changed) {
+ hostdata->dsp = (u32 *)
+ bus_to_virt(NCR53c7x0_read32(DSP_REG));
+ }
+
+#if 0
+ printk("scsi%d : new dsp is 0x%lx (virt 0x%p)\n",
+ host->host_no, virt_to_bus(hostdata->dsp), hostdata->dsp);
+#endif
+
+ hostdata->state = STATE_RUNNING;
+ NCR53c7x0_write32 (DSP_REG, virt_to_bus(hostdata->dsp));
+ }
+ }
+ } while (!done);
+}
+
+
+/*
+ * Function : static int abort_connected (struct Scsi_Host *host)
+ *
+ * Purpose : Assuming that the NCR SCSI processor is currently
+ * halted, break the currently established nexus. Clean
+ * up of the NCR53c7x0_cmd and Scsi_Cmnd structures should
+ * be done on receipt of the abort interrupt.
+ *
+ * Inputs : host - SCSI host
+ *
+ */
+
+static int
+abort_connected (struct Scsi_Host *host) {
+#ifdef NEW_ABORT
+ NCR53c7x0_local_declare();
+#endif
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+/* FIXME : this probably should change for production kernels; at the
+ least, counter should move to a per-host structure. */
+ static int counter = 5;
+#ifdef NEW_ABORT
+ int sstat, phase, offset;
+ u32 *script;
+ NCR53c7x0_local_setup(host);
+#endif
+
+ if (--counter <= 0) {
+ disable(host);
+ return 0;
+ }
+
+ printk ("scsi%d : DANGER : abort_connected() called \n",
+ host->host_no);
+
+#ifdef NEW_ABORT
+
+/*
+ * New strategy : Rather than using a generic abort routine,
+ * we'll specifically try to source or sink the appropriate
+ * amount of data for the phase we're currently in (taking into
+ * account the current synchronous offset)
+ */
+
+ sstat = (NCR53c8x0_read8 ((chip / 100) == 8 ? SSTAT1_REG : SSTAT2_REG);
+ offset = OFFSET (sstat & SSTAT2_FF_MASK) >> SSTAT2_FF_SHIFT;
+ phase = sstat & SSTAT2_PHASE_MASK;
+
+/*
+ * SET ATN
+ * MOVE source_or_sink, WHEN CURRENT PHASE
+ * < repeat for each outstanding byte >
+ * JUMP send_abort_message
+ */
+
+ script = hostdata->abort_script = kmalloc (
+ 8 /* instruction size */ * (
+ 1 /* set ATN */ +
+ (!offset ? 1 : offset) /* One transfer per outstanding byte */ +
+ 1 /* send abort message */),
+ GFP_ATOMIC);
+
+
+#else /* def NEW_ABORT */
+ hostdata->dsp = hostdata->script + hostdata->E_initiator_abort /
+ sizeof(u32);
+#endif /* def NEW_ABORT */
+ hostdata->dsp_changed = 1;
+
+/* XXX - need to flag the command as aborted after the abort_connected
+ code runs
+ */
+ return 0;
+}
+
+/*
+ * Function : static int datapath_residual (Scsi_Host *host)
+ *
+ * Purpose : return residual data count of what's in the chip.
+ *
+ * Inputs : host - SCSI host
+ */
+
+static int
+datapath_residual (struct Scsi_Host *host) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ int count, synchronous, sstat;
+ NCR53c7x0_local_setup(host);
+ /* COMPAT : the 700 and 700-66 need to use DFIFO_00_BO_MASK */
+ count = ((NCR53c7x0_read8 (DFIFO_REG) & DFIFO_10_BO_MASK) -
+ (NCR53c7x0_read32 (DBC_REG) & DFIFO_10_BO_MASK)) & DFIFO_10_BO_MASK;
+ synchronous = NCR53c7x0_read8 (SXFER_REG) & SXFER_MO_MASK;
+ /* COMPAT : DDIR is elsewhere on non-'8xx chips. */
+ if (NCR53c7x0_read8 (CTEST2_REG_800) & CTEST2_800_DDIR) {
+ /* Receive */
+ if (synchronous)
+ count += (NCR53c7x0_read8 ((hostdata->chip / 100) == 8 ?
+ SSTAT1_REG : SSTAT2_REG) & SSTAT2_FF_MASK) >> SSTAT2_FF_SHIFT;
+ else
+ if (NCR53c7x0_read8 ((hostdata->chip / 100) == 8 ?
+ SSTAT0_REG : SSTAT1_REG) & SSTAT1_ILF)
+ ++count;
+ } else {
+ /* Send */
+ sstat = ((hostdata->chip / 100) == 8) ? NCR53c7x0_read8 (SSTAT0_REG) :
+ NCR53c7x0_read8 (SSTAT1_REG);
+ if (sstat & SSTAT1_OLF)
+ ++count;
+ if (synchronous && (sstat & SSTAT1_ORF))
+ ++count;
+ }
+ return count;
+}
+
+/*
+ * Function : static const char * sbcl_to_phase (int sbcl)_
+ *
+ * Purpose : Convert SBCL register to user-parsable phase representation
+ *
+ * Inputs : sbcl - value of sbcl register
+ */
+
+
+static const char *
+sbcl_to_phase (int sbcl) {
+ switch (sbcl & SBCL_PHASE_MASK) {
+ case SBCL_PHASE_DATAIN:
+ return "DATAIN";
+ case SBCL_PHASE_DATAOUT:
+ return "DATAOUT";
+ case SBCL_PHASE_MSGIN:
+ return "MSGIN";
+ case SBCL_PHASE_MSGOUT:
+ return "MSGOUT";
+ case SBCL_PHASE_CMDOUT:
+ return "CMDOUT";
+ case SBCL_PHASE_STATIN:
+ return "STATUSIN";
+ default:
+ return "unknown";
+ }
+}
+
+/*
+ * Function : static const char * sstat2_to_phase (int sstat)_
+ *
+ * Purpose : Convert SSTAT2 register to user-parsable phase representation
+ *
+ * Inputs : sstat - value of sstat register
+ */
+
+
+static const char *
+sstat2_to_phase (int sstat) {
+ switch (sstat & SSTAT2_PHASE_MASK) {
+ case SSTAT2_PHASE_DATAIN:
+ return "DATAIN";
+ case SSTAT2_PHASE_DATAOUT:
+ return "DATAOUT";
+ case SSTAT2_PHASE_MSGIN:
+ return "MSGIN";
+ case SSTAT2_PHASE_MSGOUT:
+ return "MSGOUT";
+ case SSTAT2_PHASE_CMDOUT:
+ return "CMDOUT";
+ case SSTAT2_PHASE_STATIN:
+ return "STATUSIN";
+ default:
+ return "unknown";
+ }
+}
+
+/*
+ * Function : static void intr_phase_mismatch (struct Scsi_Host *host,
+ * struct NCR53c7x0_cmd *cmd)
+ *
+ * Purpose : Handle phase mismatch interrupts
+ *
+ * Inputs : host, cmd - host and NCR command causing the interrupt, cmd
+ * may be NULL.
+ *
+ * Side effects : The abort_connected() routine is called or the NCR chip
+ * is restarted, jumping to the command_complete entry point, or
+ * patching the address and transfer count of the current instruction
+ * and calling the msg_in entry point as appropriate.
+ */
+
+static void
+intr_phase_mismatch (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd) {
+ NCR53c7x0_local_declare();
+ u32 dbc_dcmd, *dsp, *dsp_next;
+ unsigned char dcmd, sbcl;
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ int residual;
+ enum {ACTION_ABORT, ACTION_ABORT_PRINT, ACTION_CONTINUE} action =
+ ACTION_ABORT_PRINT;
+ const char *where = NULL;
+ NCR53c7x0_local_setup(host);
+
+ /*
+ * Corrective action is based on where in the SCSI SCRIPT(tm) the error
+ * occurred, as well as which SCSI phase we are currently in.
+ */
+ dsp_next = bus_to_virt(NCR53c7x0_read32(DSP_REG));
+
+ /*
+ * Fetch the current instruction, and remove the operands for easier
+ * interpretation.
+ */
+ dbc_dcmd = NCR53c7x0_read32(DBC_REG);
+ dcmd = (dbc_dcmd & 0xff000000) >> 24;
+ /*
+ * Like other processors, the NCR adjusts the instruction pointer before
+ * instruction decode. Set the DSP address back to what it should
+ * be for this instruction based on its size (2 or 3 32 bit words).
+ */
+ dsp = dsp_next - NCR53c7x0_insn_size(dcmd);
+
+
+ /*
+ * Read new SCSI phase from the SBCL lines. Since all of our code uses
+ * a WHEN conditional instead of an IF conditional, we don't need to
+ * wait for a new REQ.
+ */
+ sbcl = NCR53c7x0_read8(SBCL_REG) & SBCL_PHASE_MASK;
+
+ if (!cmd) {
+ action = ACTION_ABORT_PRINT;
+ where = "no current command";
+ /*
+ * The way my SCSI SCRIPTS(tm) are architected, recoverable phase
+ * mismatches should only occur where we're doing a multi-byte
+ * BMI instruction. Specifically, this means
+ *
+ * - select messages (a SCSI-I target may ignore additional messages
+ * after the IDENTIFY; any target may reject a SDTR or WDTR)
+ *
+ * - command out (targets may send a message to signal an error
+ * condition, or go into STATUSIN after they've decided
+ * they don't like the command.
+ *
+ * - reply_message (targets may reject a multi-byte message in the
+ * middle)
+ *
+ * - data transfer routines (command completion with buffer space
+ * left, disconnect message, or error message)
+ */
+ } else if (((dsp >= cmd->data_transfer_start &&
+ dsp < cmd->data_transfer_end)) || dsp == (cmd->residual + 2)) {
+ if ((dcmd & (DCMD_TYPE_MASK|DCMD_BMI_OP_MASK|DCMD_BMI_INDIRECT|
+ DCMD_BMI_MSG|DCMD_BMI_CD)) == (DCMD_TYPE_BMI|
+ DCMD_BMI_OP_MOVE_I)) {
+ residual = datapath_residual (host);
+ if (hostdata->options & OPTION_DEBUG_DISCONNECT)
+ printk ("scsi%d : handling residual transfer (+ %d bytes from DMA FIFO)\n",
+ host->host_no, residual);
+
+ /*
+ * The first instruction is a CALL to the alternate handler for
+ * this data transfer phase, so we can do calls to
+ * munge_msg_restart as we would if control were passed
+ * from normal dynamic code.
+ */
+ if (dsp != cmd->residual + 2) {
+ cmd->residual[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_CALL |
+ ((dcmd & DCMD_BMI_IO) ? DCMD_TCI_IO : 0)) << 24) |
+ DBC_TCI_WAIT_FOR_VALID | DBC_TCI_COMPARE_PHASE;
+ cmd->residual[1] = virt_to_bus(hostdata->script)
+ + ((dcmd & DCMD_BMI_IO)
+ ? hostdata->E_other_in : hostdata->E_other_out);
+ }
+
+ /*
+ * The second instruction is the a data transfer block
+ * move instruction, reflecting the pointer and count at the
+ * time of the phase mismatch.
+ */
+ cmd->residual[2] = dbc_dcmd + residual;
+ cmd->residual[3] = NCR53c7x0_read32(DNAD_REG) - residual;
+
+ /*
+ * The third and final instruction is a jump to the instruction
+ * which follows the instruction which had to be 'split'
+ */
+ if (dsp != cmd->residual + 2) {
+ cmd->residual[4] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_JUMP)
+ << 24) | DBC_TCI_TRUE;
+ cmd->residual[5] = virt_to_bus(dsp_next);
+ }
+
+ /*
+ * For the sake of simplicity, transfer control to the
+ * conditional CALL at the start of the residual buffer.
+ */
+ hostdata->dsp = cmd->residual;
+ hostdata->dsp_changed = 1;
+ action = ACTION_CONTINUE;
+ } else {
+ where = "non-BMI dynamic DSA code";
+ action = ACTION_ABORT_PRINT;
+ }
+ } else if (dsp == (hostdata->script + hostdata->E_select_msgout / 4)) {
+ /* Release ATN */
+ NCR53c7x0_write8 (SOCL_REG, 0);
+ switch (sbcl) {
+ /*
+ * Some devices (SQ555 come to mind) grab the IDENTIFY message
+ * sent on selection, and decide to go into COMMAND OUT phase
+ * rather than accepting the rest of the messages or rejecting
+ * them. Handle these devices gracefully.
+ */
+ case SBCL_PHASE_CMDOUT:
+ hostdata->dsp = dsp + 2 /* two _words_ */;
+ hostdata->dsp_changed = 1;
+ printk ("scsi%d : target %d ignored SDTR and went into COMMAND OUT\n",
+ host->host_no, cmd->cmd->target);
+ cmd->flags &= ~CMD_FLAG_SDTR;
+ action = ACTION_CONTINUE;
+ break;
+ case SBCL_PHASE_MSGIN:
+ hostdata->dsp = hostdata->script + hostdata->E_msg_in /
+ sizeof(u32);
+ hostdata->dsp_changed = 1;
+ action = ACTION_CONTINUE;
+ break;
+ default:
+ where="select message out";
+ action = ACTION_ABORT_PRINT;
+ }
+ /*
+ * Some SCSI devices will interpret a command as they read the bytes
+ * off the SCSI bus, and may decide that the command is Bogus before
+ * they've read the entire command off the bus.
+ */
+ } else if (dsp == hostdata->script + hostdata->E_cmdout_cmdout / sizeof
+ (u32)) {
+ hostdata->dsp = hostdata->script + hostdata->E_data_transfer /
+ sizeof (u32);
+ hostdata->dsp_changed = 1;
+ action = ACTION_CONTINUE;
+ /* FIXME : we need to handle message reject, etc. within msg_respond. */
+#ifdef notyet
+ } else if (dsp == hostdata->script + hostdata->E_reply_message) {
+ switch (sbcl) {
+ /* Any other phase mismatches abort the currently executing command. */
+#endif
+ } else {
+ where = "unknown location";
+ action = ACTION_ABORT_PRINT;
+ }
+
+ /* Flush DMA FIFO */
+ if (!hostdata->dstat_valid) {
+ hostdata->dstat = NCR53c7x0_read8(DSTAT_REG);
+ hostdata->dstat_valid = 1;
+ }
+ if (!(hostdata->dstat & DSTAT_DFE)) {
+ if (NCR53c7x0_read8 (CTEST2_REG_800) & CTEST2_800_DDIR) {
+ printk ("scsi%d: Flushing DMA FIFO\n",
+ host->host_no);
+ NCR53c7x0_write8 (CTEST3_REG_800, CTEST3_800_FLF);
+ /* FIXME : what about stacked DMA interrupts? */
+ while (!((hostdata->dstat = NCR53c7x0_read8(DSTAT_REG)) &
+ DSTAT_DFE));
+ } else {
+ NCR53c7x0_write8 (CTEST3_REG_800, CTEST3_800_CLF);
+ while (NCR53c7x0_read8 (CTEST3_REG_800) & CTEST3_800_CLF);
+ }
+ hostdata->dstat |= DSTAT_DFE;
+ }
+
+ switch (action) {
+ case ACTION_ABORT_PRINT:
+ printk("scsi%d : %s : unexpected phase %s.\n",
+ host->host_no, where ? where : "unknown location",
+ sbcl_to_phase(sbcl));
+ print_lots (host);
+ /* Fall through to ACTION_ABORT */
+ case ACTION_ABORT:
+ abort_connected (host);
+ break;
+ case ACTION_CONTINUE:
+ break;
+ }
+
+#if 0
+ if (hostdata->dsp_changed) {
+ printk("scsi%d: new dsp 0x%p\n", host->host_no, hostdata->dsp);
+ print_insn (host, hostdata->dsp, "", 1);
+ }
+#endif
+
+}
+
+/*
+ * Function : static void intr_bf (struct Scsi_Host *host,
+ * struct NCR53c7x0_cmd *cmd)
+ *
+ * Purpose : handle BUS FAULT interrupts
+ *
+ * Inputs : host, cmd - host and NCR command causing the interrupt, cmd
+ * may be NULL.
+ */
+
+static void
+intr_bf (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ u32 *dsp,
+ *next_dsp, /* Current dsp */
+ *dsa,
+ dbc_dcmd; /* DCMD (high eight bits) + DBC */
+ unsigned short pci_status;
+ int tmp;
+ unsigned long flags;
+ char *reason = NULL;
+ /* Default behavior is for a silent error, with a retry until we've
+ exhausted retries. */
+ enum {MAYBE, ALWAYS, NEVER} retry = MAYBE;
+ int report = 0;
+ NCR53c7x0_local_setup(host);
+
+ dbc_dcmd = NCR53c7x0_read32 (DBC_REG);
+ next_dsp = bus_to_virt (NCR53c7x0_read32(DSP_REG));
+ dsp = next_dsp - NCR53c7x0_insn_size ((dbc_dcmd >> 24) & 0xff);
+/* FIXME - check chip type */
+ dsa = bus_to_virt (NCR53c7x0_read32(DSA_REG));
+
+ /*
+ * Bus faults can be caused by either a Bad Address or
+ * Target Abort. We should check the Received Target Abort
+ * bit of the PCI status register and Master Abort Bit.
+ *
+ * - Master Abort bit indicates that no device claimed
+ * the address with DEVSEL within five clocks
+ *
+ * - Target Abort bit indicates that a target claimed it,
+ * but changed its mind once it saw the byte enables.
+ *
+ */
+
+ if ((hostdata->chip / 100) == 8) {
+ save_flags (flags);
+ cli();
+ tmp = pcibios_read_config_word (hostdata->pci_bus,
+ hostdata->pci_device_fn, PCI_STATUS, &pci_status);
+ restore_flags (flags);
+ if (tmp == PCIBIOS_SUCCESSFUL) {
+ if (pci_status & PCI_STATUS_REC_TARGET_ABORT) {
+ reason = "PCI target abort";
+ pci_status &= ~PCI_STATUS_REC_TARGET_ABORT;
+ } else if (pci_status & PCI_STATUS_REC_MASTER_ABORT) {
+ reason = "No device asserted PCI DEVSEL within five bus clocks";
+ pci_status &= ~PCI_STATUS_REC_MASTER_ABORT;
+ } else if (pci_status & PCI_STATUS_PARITY) {
+ report = 1;
+ pci_status &= ~PCI_STATUS_PARITY;
+ }
+ } else {
+ printk ("scsi%d : couldn't read status register : %s\n",
+ host->host_no, pcibios_strerror (tmp));
+ retry = NEVER;
+ }
+ }
+
+#ifndef notyet
+ report = 1;
+#endif
+ if (report && reason) {
+ printk(KERN_ALERT "scsi%d : BUS FAULT reason = %s\n",
+ host->host_no, reason ? reason : "unknown");
+ print_lots (host);
+ }
+
+#ifndef notyet
+ retry = NEVER;
+#endif
+
+ /*
+ * TODO : we should attempt to recover from any spurious bus
+ * faults. After X retries, we should figure that things are
+ * sufficiently wedged, and call NCR53c7xx_reset.
+ *
+ * This code should only get executed once we've decided that we
+ * cannot retry.
+ */
+
+ if (retry == NEVER) {
+ printk(KERN_ALERT " mail drew@PoohSticks.ORG\n");
+ FATAL (host);
+ }
+}
+
+/*
+ * Function : static void intr_dma (struct Scsi_Host *host,
+ * struct NCR53c7x0_cmd *cmd)
+ *
+ * Purpose : handle all DMA interrupts, indicated by the setting
+ * of the DIP bit in the ISTAT register.
+ *
+ * Inputs : host, cmd - host and NCR command causing the interrupt, cmd
+ * may be NULL.
+ */
+
+static void
+intr_dma (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ unsigned char dstat; /* DSTAT */
+ u32 *dsp,
+ *next_dsp, /* Current dsp */
+ *dsa,
+ dbc_dcmd; /* DCMD (high eight bits) + DBC */
+ int tmp;
+ unsigned long flags;
+ NCR53c7x0_local_setup(host);
+
+ if (!hostdata->dstat_valid) {
+ hostdata->dstat = NCR53c7x0_read8(DSTAT_REG);
+ hostdata->dstat_valid = 1;
+ }
+
+ dstat = hostdata->dstat;
+
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk("scsi%d : DSTAT=0x%x\n", host->host_no, (int) dstat);
+
+ dbc_dcmd = NCR53c7x0_read32 (DBC_REG);
+ next_dsp = bus_to_virt(NCR53c7x0_read32(DSP_REG));
+ dsp = next_dsp - NCR53c7x0_insn_size ((dbc_dcmd >> 24) & 0xff);
+/* XXX - check chip type */
+ dsa = bus_to_virt(NCR53c7x0_read32(DSA_REG));
+
+ /*
+ * DSTAT_ABRT is the aborted interrupt. This is set whenever the
+ * SCSI chip is aborted.
+ *
+ * With NCR53c700 and NCR53c700-66 style chips, we should only
+ * get this when the chip is currently running the accept
+ * reselect/select code and we have set the abort bit in the
+ * ISTAT register.
+ *
+ */
+
+ if (dstat & DSTAT_ABRT) {
+#if 0
+ /* XXX - add code here to deal with normal abort */
+ if ((hostdata->options & OPTION_700) && (hostdata->state ==
+ STATE_ABORTING)) {
+ } else
+#endif
+ {
+ printk(KERN_ALERT "scsi%d : unexpected abort interrupt at\n"
+ " ", host->host_no);
+ print_insn (host, dsp, KERN_ALERT "s ", 1);
+ FATAL (host);
+ }
+ }
+
+ /*
+ * DSTAT_SSI is the single step interrupt. Should be generated
+ * whenever we have single stepped or are tracing.
+ */
+
+ if (dstat & DSTAT_SSI) {
+ if (hostdata->options & OPTION_DEBUG_TRACE) {
+ } else if (hostdata->options & OPTION_DEBUG_SINGLE) {
+ print_insn (host, dsp, "s ", 0);
+ save_flags(flags);
+ cli();
+/* XXX - should we do this, or can we get away with writing dsp? */
+
+ NCR53c7x0_write8 (DCNTL_REG, (NCR53c7x0_read8(DCNTL_REG) &
+ ~DCNTL_SSM) | DCNTL_STD);
+ restore_flags(flags);
+ } else {
+ printk(KERN_ALERT "scsi%d : unexpected single step interrupt at\n"
+ " ", host->host_no);
+ print_insn (host, dsp, KERN_ALERT "", 1);
+ printk(KERN_ALERT " mail drew@PoohSticks.ORG\n");
+ FATAL (host);
+ }
+ }
+
+ /*
+ * DSTAT_IID / DSTAT_OPC (same bit, same meaning, only the name
+ * is different) is generated whenever an illegal instruction is
+ * encountered.
+ *
+ * XXX - we may want to emulate INTFLY here, so we can use
+ * the same SCSI SCRIPT (tm) for NCR53c710 through NCR53c810
+ * chips.
+ */
+
+ if (dstat & DSTAT_OPC) {
+ /*
+ * Ascertain if this IID interrupts occurred before or after a STO
+ * interrupt. Since the interrupt handling code now leaves
+ * DSP unmodified until _after_ all stacked interrupts have been
+ * processed, reading the DSP returns the original DSP register.
+ * This means that if dsp lies between the select code, and
+ * message out following the selection code (where the IID interrupt
+ * would have to have occurred by due to the implicit wait for REQ),
+ * we have an IID interrupt resulting from a STO condition and
+ * can ignore it.
+ */
+
+ if (((dsp >= (hostdata->script + hostdata->E_select / sizeof(u32))) &&
+ (dsp <= (hostdata->script + hostdata->E_select_msgout /
+ sizeof(u32) + 8))) || (hostdata->test_running == 2)) {
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk ("scsi%d : ignoring DSTAT_IID for SSTAT_STO\n",
+ host->host_no);
+ if (hostdata->expecting_iid) {
+ hostdata->expecting_iid = 0;
+ hostdata->idle = 1;
+ if (hostdata->test_running == 2) {
+ hostdata->test_running = 0;
+ hostdata->test_completed = 3;
+ } else if (cmd)
+ abnormal_finished (cmd, DID_BAD_TARGET << 16);
+ } else {
+ hostdata->expecting_sto = 1;
+ }
+ /*
+ * We can't guarantee we'll be able to execute the WAIT DISCONNECT
+ * instruction within the 3.4us of bus free and arbitration delay
+ * that a target can RESELECT in and assert REQ after we've dropped
+ * ACK. If this happens, we'll get an illegal instruction interrupt.
+ * Doing away with the WAIT DISCONNECT instructions broke everything,
+ * so instead I'll settle for moving one WAIT DISCONNECT a few
+ * instructions closer to the CLEAR ACK before it to minimize the
+ * chances of this happening, and handle it if it occurs anyway.
+ *
+ * Simply continue with what we were doing, and control should
+ * be transfered to the schedule routine which will ultimately
+ * pass control onto the reselection or selection (not yet)
+ * code.
+ */
+ } else if (dbc_dcmd == 0x48000000 && (NCR53c7x0_read8 (SBCL_REG) &
+ SBCL_REQ)) {
+ if (!(hostdata->options & OPTION_NO_PRINT_RACE))
+ {
+ printk("scsi%d: REQ before WAIT DISCONNECT IID\n",
+ host->host_no);
+ hostdata->options |= OPTION_NO_PRINT_RACE;
+ }
+ } else {
+ printk(KERN_ALERT "scsi%d : illegal instruction\n", host->host_no);
+ print_lots (host);
+ printk(KERN_ALERT " mail drew@PoohSticks.ORG with ALL\n"
+ " boot messages and diagnostic output\n");
+ FATAL (host);
+ }
+ }
+
+ /*
+ * DSTAT_BF are bus fault errors
+ */
+
+ if (dstat & DSTAT_800_BF) {
+ intr_bf (host, cmd);
+ }
+
+
+ /*
+ * DSTAT_SIR interrupts are generated by the execution of
+ * the INT instruction. Since the exact values available
+ * are determined entirely by the SCSI script running,
+ * and are local to a particular script, a unique handler
+ * is called for each script.
+ */
+
+ if (dstat & DSTAT_SIR) {
+ if (hostdata->options & OPTION_DEBUG_INTR)
+ printk ("scsi%d : DSTAT_SIR\n", host->host_no);
+ switch ((tmp = hostdata->dstat_sir_intr (host, cmd))) {
+ case SPECIFIC_INT_NOTHING:
+ case SPECIFIC_INT_RESTART:
+ break;
+ case SPECIFIC_INT_ABORT:
+ abort_connected(host);
+ break;
+ case SPECIFIC_INT_PANIC:
+ printk(KERN_ALERT "scsi%d : failure at ", host->host_no);
+ print_insn (host, dsp, KERN_ALERT "", 1);
+ printk(KERN_ALERT " dstat_sir_intr() returned SPECIFIC_INT_PANIC\n");
+ FATAL (host);
+ break;
+ case SPECIFIC_INT_BREAK:
+ intr_break (host, cmd);
+ break;
+ default:
+ printk(KERN_ALERT "scsi%d : failure at ", host->host_no);
+ print_insn (host, dsp, KERN_ALERT "", 1);
+ printk(KERN_ALERT" dstat_sir_intr() returned unknown value %d\n",
+ tmp);
+ FATAL (host);
+ }
+ }
+
+ if ((hostdata->chip / 100) == 8 && (dstat & DSTAT_800_MDPE)) {
+ printk(KERN_ALERT "scsi%d : Master Data Parity Error\n",
+ host->host_no);
+ FATAL (host);
+ }
+}
+
+/*
+ * Function : static int print_insn (struct Scsi_Host *host,
+ * u32 *insn, int kernel)
+ *
+ * Purpose : print numeric representation of the instruction pointed
+ * to by insn to the debugging or kernel message buffer
+ * as appropriate.
+ *
+ * If desired, a user level program can interpret this
+ * information.
+ *
+ * Inputs : host, insn - host, pointer to instruction, prefix -
+ * string to prepend, kernel - use printk instead of debugging buffer.
+ *
+ * Returns : size, in u32s, of instruction printed.
+ */
+
+/*
+ * FIXME: should change kernel parameter so that it takes an ENUM
+ * specifying severity - either KERN_ALERT or KERN_PANIC so
+ * all panic messages are output with the same severity.
+ */
+
+static int
+print_insn (struct Scsi_Host *host, const u32 *insn,
+ const char *prefix, int kernel) {
+ char buf[160], /* Temporary buffer and pointer. ICKY
+ arbitrary length. */
+
+
+ *tmp;
+ unsigned char dcmd; /* dcmd register for *insn */
+ int size;
+
+ /*
+ * Check to see if the instruction pointer is not bogus before
+ * indirecting through it; avoiding red-zone at start of
+ * memory.
+ *
+ * FIXME: icky magic needs to happen here on non-intel boxes which
+ * don't have kernel memory mapped in like this. Might be reasonable
+ * to use vverify()?
+ */
+
+ if (MAP_NR(insn) < 1 || MAP_NR(insn + 8) > MAP_NR(high_memory) ||
+ ((((dcmd = (insn[0] >> 24) & 0xff) & DCMD_TYPE_MMI) == DCMD_TYPE_MMI) &&
+ MAP_NR(insn + 12) > MAP_NR(high_memory))) {
+ size = 0;
+ sprintf (buf, "%s%p: address out of range\n",
+ prefix, insn);
+ } else {
+/*
+ * FIXME : (void *) cast in virt_to_bus should be unnecessary, because
+ * it should take const void * as argument.
+ */
+ sprintf(buf, "%s0x%lx (virt 0x%p) : 0x%08x 0x%08x (virt 0x%p)",
+ (prefix ? prefix : ""), virt_to_bus((void *) insn), insn,
+ insn[0], insn[1], bus_to_virt (insn[1]));
+ tmp = buf + strlen(buf);
+ if ((dcmd & DCMD_TYPE_MASK) == DCMD_TYPE_MMI) {
+ sprintf (tmp, " 0x%08x (virt 0x%p)\n", insn[2],
+ bus_to_virt(insn[2]));
+ size = 3;
+ } else {
+ sprintf (tmp, "\n");
+ size = 2;
+ }
+ }
+
+ if (kernel)
+ printk ("%s", buf);
+#ifdef NCR_DEBUG
+ else {
+ size_t len = strlen(buf);
+ debugger_kernel_write(host, buf, len);
+ }
+#endif
+ return size;
+}
+
+/*
+ * Function : static const char *ncr_state (int state)
+ *
+ * Purpose : convert state (probably from hostdata->state) to a string
+ *
+ * Inputs : state
+ *
+ * Returns : char * representation of state, "unknown" on error.
+ */
+
+static const char *
+ncr_state (int state) {
+ switch (state) {
+ case STATE_HALTED: return "halted";
+ case STATE_WAITING: return "waiting";
+ case STATE_RUNNING: return "running";
+ case STATE_ABORTING: return "aborting";
+ case STATE_DISABLED: return "disabled";
+ default: return "unknown";
+ }
+}
+
+/*
+ * Function : int NCR53c7xx_abort (Scsi_Cmnd *cmd)
+ *
+ * Purpose : Abort an errant SCSI command, doing all necessary
+ * cleanup of the issue_queue, running_list, shared Linux/NCR
+ * dsa issue and reconnect queues.
+ *
+ * Inputs : cmd - command to abort, code - entire result field
+ *
+ * Returns : 0 on success, -1 on failure.
+ */
+
+int
+NCR53c7xx_abort (Scsi_Cmnd *cmd) {
+ NCR53c7x0_local_declare();
+ struct Scsi_Host *host = cmd->host;
+ struct NCR53c7x0_hostdata *hostdata = host ? (struct NCR53c7x0_hostdata *)
+ host->hostdata : NULL;
+ unsigned long flags;
+ struct NCR53c7x0_cmd *curr, **prev;
+ Scsi_Cmnd *me, **last;
+#if 0
+ static long cache_pid = -1;
+#endif
+
+
+ if (!host) {
+ printk ("Bogus SCSI command pid %ld; no host structure\n",
+ cmd->pid);
+ return SCSI_ABORT_ERROR;
+ } else if (!hostdata) {
+ printk ("Bogus SCSI host %d; no hostdata\n", host->host_no);
+ return SCSI_ABORT_ERROR;
+ }
+ NCR53c7x0_local_setup(host);
+
+/*
+ * CHECK : I don't think that reading ISTAT will unstack any interrupts,
+ * since we need to write the INTF bit to clear it, and SCSI/DMA
+ * interrupts don't clear until we read SSTAT/SIST and DSTAT registers.
+ *
+ * See that this is the case.
+ *
+ * I suspect that several of our failures may be coming from a new fatal
+ * interrupt (possibly due to a phase mismatch) happening after we've left
+ * the interrupt handler, but before the PIC has had the interrupt condition
+ * cleared.
+ */
+
+ if (NCR53c7x0_read8(hostdata->istat) &
+ (ISTAT_DIP|ISTAT_SIP|
+ (hostdata->chip / 100 == 8 ? ISTAT_800_INTF : 0))) {
+ printk ("scsi%d : dropped interrupt for command %ld\n", host->host_no,
+ cmd->pid);
+ NCR53c7x0_intr (host->irq, NULL, NULL);
+ return SCSI_ABORT_BUSY;
+ }
+
+ save_flags(flags);
+ cli();
+#if 0
+ if (cache_pid == cmd->pid)
+ panic ("scsi%d : bloody fetus %d\n", host->host_no, cmd->pid);
+ else
+ cache_pid = cmd->pid;
+#endif
+
+
+/*
+ * The command could be hiding in the issue_queue. This would be very
+ * nice, as commands can't be moved from the high level driver's issue queue
+ * into the shared queue until an interrupt routine is serviced, and this
+ * moving is atomic.
+ *
+ * If this is the case, we don't have to worry about anything - we simply
+ * pull the command out of the old queue, and call it aborted.
+ */
+
+ for (me = (Scsi_Cmnd *) hostdata->issue_queue,
+ last = (Scsi_Cmnd **) &(hostdata->issue_queue);
+ me && me != cmd; last = (Scsi_Cmnd **)&(me->SCp.ptr),
+ me = (Scsi_Cmnd *)me->SCp.ptr);
+
+ if (me) {
+ *last = (Scsi_Cmnd *) me->SCp.ptr;
+ if (me->host_scribble) {
+ ((struct NCR53c7x0_cmd *)me->host_scribble)->next = hostdata->free;
+ hostdata->free = (struct NCR53c7x0_cmd *) me->host_scribble;
+ me->host_scribble = NULL;
+ }
+ cmd->result = DID_ABORT << 16;
+ cmd->scsi_done(cmd);
+ printk ("scsi%d : found command %ld in Linux issue queue\n",
+ host->host_no, me->pid);
+ restore_flags(flags);
+ run_process_issue_queue();
+ return SCSI_ABORT_SUCCESS;
+ }
+
+/*
+ * That failing, the command could be in our list of already executing
+ * commands. If this is the case, drastic measures are called for.
+ */
+
+ for (curr = (struct NCR53c7x0_cmd *) hostdata->running_list,
+ prev = (struct NCR53c7x0_cmd **) &(hostdata->running_list);
+ curr && curr->cmd != cmd; prev = (struct NCR53c7x0_cmd **)
+ &(curr->next), curr = (struct NCR53c7x0_cmd *) curr->next);
+
+ if (curr) {
+ if ((cmd->result & 0xff) != 0xff && (cmd->result & 0xff00) != 0xff00) {
+ if (prev)
+ *prev = (struct NCR53c7x0_cmd *) curr->next;
+ curr->next = (struct NCR53c7x0_cmd *) hostdata->free;
+ cmd->host_scribble = NULL;
+ hostdata->free = curr;
+ cmd->scsi_done(cmd);
+ printk ("scsi%d : found finished command %ld in running list\n",
+ host->host_no, cmd->pid);
+ restore_flags(flags);
+ return SCSI_ABORT_NOT_RUNNING;
+ } else {
+ printk ("scsi%d : DANGER : command running, can not abort.\n",
+ cmd->host->host_no);
+ restore_flags(flags);
+ return SCSI_ABORT_BUSY;
+ }
+ }
+
+/*
+ * And if we couldn't find it in any of our queues, it must have been
+ * a dropped interrupt.
+ */
+
+ curr = (struct NCR53c7x0_cmd *) cmd->host_scribble;
+ if (curr) {
+ curr->next = hostdata->free;
+ hostdata->free = curr;
+ cmd->host_scribble = NULL;
+ }
+
+ if (((cmd->result & 0xff00) == 0xff00) ||
+ ((cmd->result & 0xff) == 0xff)) {
+ printk ("scsi%d : did this command ever run?\n", host->host_no);
+ cmd->result = DID_ABORT << 16;
+ } else {
+ printk ("scsi%d : probably lost INTFLY, normal completion\n",
+ host->host_no);
+/*
+ * FIXME : We need to add an additional flag which indicates if a
+ * command was ever counted as BUSY, so if we end up here we can
+ * decrement the busy count if and only if it is necessary.
+ */
+ --hostdata->busy[cmd->target][cmd->lun];
+ }
+ restore_flags(flags);
+ cmd->scsi_done(cmd);
+
+/*
+ * We need to run process_issue_queue since termination of this command
+ * may allow another queued command to execute first?
+ */
+ return SCSI_ABORT_NOT_RUNNING;
+}
+
+/*
+ * Function : int NCR53c7xx_reset (Scsi_Cmnd *cmd)
+ *
+ * Purpose : perform a hard reset of the SCSI bus and NCR
+ * chip.
+ *
+ * Inputs : cmd - command which caused the SCSI RESET
+ *
+ * Returns : 0 on success.
+ */
+
+int
+NCR53c7xx_reset (Scsi_Cmnd *cmd, unsigned int reset_flags) {
+ NCR53c7x0_local_declare();
+ unsigned long flags;
+ int found = 0;
+ struct NCR53c7x0_cmd * c;
+ Scsi_Cmnd *tmp;
+ /*
+ * When we call scsi_done(), it's going to wake up anything sleeping on the
+ * resources which were in use by the aborted commands, and we'll start to
+ * get new commands.
+ *
+ * We can't let this happen until after we've re-initialized the driver
+ * structures, and can't reinitialize those structures until after we've
+ * dealt with their contents.
+ *
+ * So, we need to find all of the commands which were running, stick
+ * them on a linked list of completed commands (we'll use the host_scribble
+ * pointer), do our reinitialization, and then call the done function for
+ * each command.
+ */
+ Scsi_Cmnd *nuke_list = NULL;
+ struct Scsi_Host *host = cmd->host;
+ struct NCR53c7x0_hostdata *hostdata =
+ (struct NCR53c7x0_hostdata *) host->hostdata;
+
+ NCR53c7x0_local_setup(host);
+ save_flags(flags);
+ cli();
+ ncr_halt (host);
+ print_lots (host);
+ dump_events (host, 30);
+ ncr_scsi_reset (host);
+ for (tmp = nuke_list = return_outstanding_commands (host, 1 /* free */,
+ 0 /* issue */ ); tmp; tmp = (Scsi_Cmnd *) tmp->SCp.buffer)
+ if (tmp == cmd) {
+ found = 1;
+ break;
+ }
+
+ /*
+ * If we didn't find the command which caused this reset in our running
+ * list, then we've lost it. See that it terminates normally anyway.
+ */
+ if (!found) {
+ c = (struct NCR53c7x0_cmd *) cmd->host_scribble;
+ if (c) {
+ cmd->host_scribble = NULL;
+ c->next = hostdata->free;
+ hostdata->free = c;
+ } else
+ printk ("scsi%d: lost command %ld\n", host->host_no, cmd->pid);
+ cmd->SCp.buffer = (struct scatterlist *) nuke_list;
+ nuke_list = cmd;
+ }
+
+ NCR53c7x0_driver_init (host);
+ hostdata->soft_reset (host);
+ if (hostdata->resets == 0)
+ disable(host);
+ else if (hostdata->resets != -1)
+ --hostdata->resets;
+ sti();
+ for (; nuke_list; nuke_list = tmp) {
+ tmp = (Scsi_Cmnd *) nuke_list->SCp.buffer;
+ nuke_list->result = DID_RESET << 16;
+ nuke_list->scsi_done (nuke_list);
+ }
+ restore_flags(flags);
+ return SCSI_RESET_SUCCESS;
+}
+
+/*
+ * The NCR SDMS bios follows Annex A of the SCSI-CAM draft, and
+ * therefore shares the scsicam_bios_param function.
+ */
+
+/*
+ * Function : int insn_to_offset (Scsi_Cmnd *cmd, u32 *insn)
+ *
+ * Purpose : convert instructions stored at NCR pointer into data
+ * pointer offset.
+ *
+ * Inputs : cmd - SCSI command; insn - pointer to instruction. Either current
+ * DSP, or saved data pointer.
+ *
+ * Returns : offset on success, -1 on failure.
+ */
+
+
+static int
+insn_to_offset (Scsi_Cmnd *cmd, u32 *insn) {
+ struct NCR53c7x0_hostdata *hostdata =
+ (struct NCR53c7x0_hostdata *) cmd->host->hostdata;
+ struct NCR53c7x0_cmd *ncmd =
+ (struct NCR53c7x0_cmd *) cmd->host_scribble;
+ int offset = 0, buffers;
+ struct scatterlist *segment;
+ char *ptr;
+ int found = 0;
+
+/*
+ * With the current code implementation, if the insn is inside dynamically
+ * generated code, the data pointer will be the instruction preceding
+ * the next transfer segment.
+ */
+
+ if (!check_address ((unsigned long) ncmd, sizeof (struct NCR53c7x0_cmd)) &&
+ ((insn >= ncmd->data_transfer_start &&
+ insn < ncmd->data_transfer_end) ||
+ (insn >= ncmd->residual &&
+ insn < (ncmd->residual +
+ sizeof(ncmd->residual))))) {
+ ptr = bus_to_virt(insn[3]);
+
+ if ((buffers = cmd->use_sg)) {
+ for (offset = 0,
+ segment = (struct scatterlist *) cmd->buffer;
+ buffers && !((found = ((ptr >= segment->address) &&
+ (ptr < (segment->address + segment->length)))));
+ --buffers, offset += segment->length, ++segment)
+#if 0
+ printk("scsi%d: comparing 0x%p to 0x%p\n",
+ cmd->host->host_no, saved, segment->address);
+#else
+ ;
+#endif
+ offset += ptr - segment->address;
+ } else {
+ found = 1;
+ offset = ptr - (char *) (cmd->request_buffer);
+ }
+ } else if ((insn >= hostdata->script +
+ hostdata->E_data_transfer / sizeof(u32)) &&
+ (insn <= hostdata->script +
+ hostdata->E_end_data_transfer / sizeof(u32))) {
+ found = 1;
+ offset = 0;
+ }
+ return found ? offset : -1;
+}
+
+
+
+/*
+ * Function : void print_progress (Scsi_Cmnd *cmd)
+ *
+ * Purpose : print the current location of the saved data pointer
+ *
+ * Inputs : cmd - command we are interested in
+ *
+ */
+
+static void
+print_progress (Scsi_Cmnd *cmd) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_cmd *ncmd =
+ (struct NCR53c7x0_cmd *) cmd->host_scribble;
+ int offset, i;
+ char *where;
+ u32 *ptr;
+ NCR53c7x0_local_setup (cmd->host);
+ for (i = 0; i < 2; ++i) {
+ if (check_address ((unsigned long) ncmd,
+ sizeof (struct NCR53c7x0_cmd)) == -1)
+ continue;
+ if (!i) {
+ where = "saved";
+ ptr = bus_to_virt(ncmd->saved_data_pointer);
+ } else {
+ where = "active";
+ ptr = bus_to_virt (NCR53c7x0_read32 (DSP_REG) -
+ NCR53c7x0_insn_size (NCR53c7x0_read8 (DCMD_REG)) *
+ sizeof(u32));
+ }
+ offset = insn_to_offset (cmd, ptr);
+
+ if (offset != -1)
+ printk ("scsi%d : %s data pointer at offset %d\n",
+ cmd->host->host_no, where, offset);
+ else {
+ int size;
+ printk ("scsi%d : can't determine %s data pointer offset\n",
+ cmd->host->host_no, where);
+ if (ncmd) {
+ size = print_insn (cmd->host,
+ bus_to_virt(ncmd->saved_data_pointer), "", 1);
+ print_insn (cmd->host,
+ bus_to_virt(ncmd->saved_data_pointer) + size * sizeof(u32),
+ "", 1);
+ }
+ }
+ }
+}
+
+
+static void
+print_dsa (struct Scsi_Host *host, u32 *dsa, const char *prefix) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ int i, len;
+ char *ptr;
+ Scsi_Cmnd *cmd;
+
+ if (check_address ((unsigned long) dsa, hostdata->dsa_end -
+ hostdata->dsa_start) == -1) {
+ printk("scsi%d : bad dsa virt 0x%p\n", host->host_no, dsa);
+ return;
+ }
+ printk("%sscsi%d : dsa at phys 0x%lx (virt 0x%p)\n"
+ " + %d : dsa_msgout length = %u, data = 0x%x (virt 0x%p)\n" ,
+ prefix ? prefix : "",
+ host->host_no, virt_to_bus (dsa), dsa, hostdata->dsa_msgout,
+ dsa[hostdata->dsa_msgout / sizeof(u32)],
+ dsa[hostdata->dsa_msgout / sizeof(u32) + 1],
+ bus_to_virt (dsa[hostdata->dsa_msgout / sizeof(u32) + 1]));
+
+ /*
+ * Only print messages if they're sane in length so we don't
+ * blow the kernel printk buffer on something which won't buy us
+ * anything.
+ */
+
+ if (dsa[hostdata->dsa_msgout / sizeof(u32)] <
+ sizeof (hostdata->free->select))
+ for (i = dsa[hostdata->dsa_msgout / sizeof(u32)],
+ ptr = bus_to_virt (dsa[hostdata->dsa_msgout / sizeof(u32) + 1]);
+ i > 0 && !check_address ((unsigned long) ptr, 1);
+ ptr += len, i -= len) {
+ printk(" ");
+ len = print_msg (ptr);
+ printk("\n");
+ if (!len)
+ break;
+ }
+
+ printk(" + %d : select_indirect = 0x%x\n",
+ hostdata->dsa_select, dsa[hostdata->dsa_select / sizeof(u32)]);
+ cmd = (Scsi_Cmnd *) bus_to_virt(dsa[hostdata->dsa_cmnd / sizeof(u32)]);
+ printk(" + %d : dsa_cmnd = 0x%x ", hostdata->dsa_cmnd,
+ (u32) virt_to_bus(cmd));
+ if (cmd) {
+ printk(" result = 0x%x, target = %d, lun = %d, cmd = ",
+ cmd->result, cmd->target, cmd->lun);
+ print_command(cmd->cmnd);
+ } else
+ printk("\n");
+ printk(" + %d : dsa_next = 0x%x\n", hostdata->dsa_next,
+ dsa[hostdata->dsa_next / sizeof(u32)]);
+ if (cmd) {
+ printk("scsi%d target %d : sxfer_sanity = 0x%x, scntl3_sanity = 0x%x\n"
+ " script : ",
+ host->host_no, cmd->target,
+ hostdata->sync[cmd->target].sxfer_sanity,
+ hostdata->sync[cmd->target].scntl3_sanity);
+ for (i = 0; i < (sizeof(hostdata->sync[cmd->target].script) / 4); ++i)
+ printk ("0x%x ", hostdata->sync[cmd->target].script[i]);
+ printk ("\n");
+ print_progress (cmd);
+ }
+}
+/*
+ * Function : void print_queues (Scsi_Host *host)
+ *
+ * Purpose : print the contents of the NCR issue and reconnect queues
+ *
+ * Inputs : host - SCSI host we are interested in
+ *
+ */
+
+static void
+print_queues (struct Scsi_Host *host) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ u32 *dsa, *next_dsa;
+ volatile u32 *current;
+ int left;
+ Scsi_Cmnd *cmd, *next_cmd;
+ unsigned long flags;
+
+ printk ("scsi%d : issue queue\n", host->host_no);
+
+ for (left = host->can_queue, cmd = (Scsi_Cmnd *) hostdata->issue_queue;
+ left >= 0 && cmd;
+ cmd = next_cmd) {
+ next_cmd = (Scsi_Cmnd *) cmd->SCp.ptr;
+ save_flags(flags);
+ cli();
+ if (cmd->host_scribble) {
+ if (check_address ((unsigned long) (cmd->host_scribble),
+ sizeof (cmd->host_scribble)) == -1)
+ printk ("scsi%d: scsi pid %ld bad pointer to NCR53c7x0_cmd\n",
+ host->host_no, cmd->pid);
+ /* print_dsa does sanity check on address, no need to check */
+ else
+ print_dsa (host, ((struct NCR53c7x0_cmd *) cmd->host_scribble)
+ -> dsa, "");
+ } else
+ printk ("scsi%d : scsi pid %ld for target %d lun %d has no NCR53c7x0_cmd\n",
+ host->host_no, cmd->pid, cmd->target, cmd->lun);
+ restore_flags(flags);
+ }
+
+ if (left <= 0) {
+ printk ("scsi%d : loop detected in issue queue\n",
+ host->host_no);
+ }
+
+ /*
+ * Traverse the NCR reconnect and start DSA structures, printing out
+ * each element until we hit the end or detect a loop. Currently,
+ * the reconnect structure is a linked list; and the start structure
+ * is an array. Eventually, the reconnect structure will become a
+ * list as well, since this simplifies the code.
+ */
+
+ printk ("scsi%d : schedule dsa array :\n", host->host_no);
+ for (left = host->can_queue, current = hostdata->schedule;
+ left > 0; current += 2, --left)
+ if (current[0] != hostdata->NOP_insn)
+/* FIXME : convert pointer to dsa_begin to pointer to dsa. */
+ print_dsa (host, bus_to_virt (current[1] -
+ (hostdata->E_dsa_code_begin -
+ hostdata->E_dsa_code_template)), "");
+ printk ("scsi%d : end schedule dsa array\n", host->host_no);
+
+ printk ("scsi%d : reconnect_dsa_head :\n", host->host_no);
+
+ for (left = host->can_queue,
+ dsa = bus_to_virt (hostdata->reconnect_dsa_head);
+ left >= 0 && dsa;
+ dsa = next_dsa) {
+ save_flags (flags);
+ cli();
+ if (check_address ((unsigned long) dsa, sizeof(dsa)) == -1) {
+ printk ("scsi%d: bad DSA pointer 0x%p", host->host_no,
+ dsa);
+ next_dsa = NULL;
+ }
+ else
+ {
+ next_dsa = bus_to_virt(dsa[hostdata->dsa_next / sizeof(u32)]);
+ print_dsa (host, dsa, "");
+ }
+ restore_flags(flags);
+ }
+ printk ("scsi%d : end reconnect_dsa_head\n", host->host_no);
+ if (left < 0)
+ printk("scsi%d: possible loop in ncr reconnect list\n",
+ host->host_no);
+}
+
+static void
+print_lots (struct Scsi_Host *host) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata =
+ (struct NCR53c7x0_hostdata *) host->hostdata;
+ u32 *dsp_next, *dsp, *dsa, dbc_dcmd;
+ unsigned char dcmd, sbcl;
+ int i, size;
+ NCR53c7x0_local_setup(host);
+
+ if ((dsp_next = bus_to_virt(NCR53c7x0_read32 (DSP_REG)))) {
+ dbc_dcmd = NCR53c7x0_read32(DBC_REG);
+ dcmd = (dbc_dcmd & 0xff000000) >> 24;
+ dsp = dsp_next - NCR53c7x0_insn_size(dcmd);
+ dsa = bus_to_virt(NCR53c7x0_read32(DSA_REG));
+ sbcl = NCR53c7x0_read8 (SBCL_REG);
+
+
+ printk ("scsi%d : DCMD|DBC=0x%x, DNAD=0x%x (virt 0x%p)\n"
+ " DSA=0x%lx (virt 0x%p)\n"
+ " DSPS=0x%x, TEMP=0x%x (virt 0x%p), DMODE=0x%x\n"
+ " SXFER=0x%x, SCNTL3=0x%x\n"
+ " %s%s%sphase=%s, %d bytes in SCSI FIFO\n"
+ " STEST0=0x%x\n",
+ host->host_no, dbc_dcmd, NCR53c7x0_read32(DNAD_REG),
+ bus_to_virt(NCR53c7x0_read32(DNAD_REG)),
+ virt_to_bus(dsa), dsa,
+ NCR53c7x0_read32(DSPS_REG), NCR53c7x0_read32(TEMP_REG),
+ bus_to_virt (NCR53c7x0_read32(TEMP_REG)),
+ (int) NCR53c7x0_read8(hostdata->dmode),
+ (int) NCR53c7x0_read8(SXFER_REG),
+ (int) NCR53c7x0_read8(SCNTL3_REG_800),
+ (sbcl & SBCL_BSY) ? "BSY " : "",
+ (sbcl & SBCL_SEL) ? "SEL " : "",
+ (sbcl & SBCL_REQ) ? "REQ " : "",
+ sstat2_to_phase(NCR53c7x0_read8 (((hostdata->chip / 100) == 8) ?
+ SSTAT1_REG : SSTAT2_REG)),
+ (NCR53c7x0_read8 ((hostdata->chip / 100) == 8 ?
+ SSTAT1_REG : SSTAT2_REG) & SSTAT2_FF_MASK) >> SSTAT2_FF_SHIFT,
+ NCR53c7x0_read8 (STEST0_REG_800));
+ printk ("scsi%d : DSP 0x%lx (virt 0x%p) ->\n", host->host_no,
+ virt_to_bus(dsp), dsp);
+ for (i = 6; i > 0; --i, dsp += size)
+ size = print_insn (host, dsp, "", 1);
+ if (NCR53c7x0_read8 (SCNTL1_REG) & SCNTL1_CON) {
+ printk ("scsi%d : connected (SDID=0x%x, SSID=0x%x)\n",
+ host->host_no, NCR53c7x0_read8 (SDID_REG_800),
+ NCR53c7x0_read8 (SSID_REG_800));
+ print_dsa (host, dsa, "");
+ }
+
+#if 1
+ print_queues (host);
+#endif
+ }
+}
+
+/*
+ * Function : static int shutdown (struct Scsi_Host *host)
+ *
+ * Purpose : does a clean (we hope) shutdown of the NCR SCSI
+ * chip. Use prior to dumping core, unloading the NCR driver,
+ *
+ * Returns : 0 on success
+ */
+static int
+shutdown (struct Scsi_Host *host) {
+ NCR53c7x0_local_declare();
+ unsigned long flags;
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ NCR53c7x0_local_setup(host);
+ save_flags (flags);
+ cli();
+/* Get in a state where we can reset the SCSI bus */
+ ncr_halt (host);
+ ncr_scsi_reset (host);
+ hostdata->soft_reset(host);
+
+ disable (host);
+ restore_flags (flags);
+ return 0;
+}
+
+/*
+ * Function : void ncr_scsi_reset (struct Scsi_Host *host)
+ *
+ * Purpose : reset the SCSI bus.
+ */
+
+static void
+ncr_scsi_reset (struct Scsi_Host *host) {
+ NCR53c7x0_local_declare();
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ unsigned long flags;
+ int sien = 0;
+ NCR53c7x0_local_setup(host);
+ save_flags (flags);
+ cli();
+ if ((hostdata->chip / 100) == 8) {
+ sien = NCR53c7x0_read8(SIEN0_REG_800);
+ NCR53c7x0_write8(SIEN0_REG_800, sien & ~SIEN_RST);
+ }
+ NCR53c7x0_write8(SCNTL1_REG, SCNTL1_RST);
+ udelay(25); /* Minimum amount of time to assert RST */
+ NCR53c7x0_write8(SCNTL1_REG, 0);
+ if ((hostdata->chip / 100) == 8) {
+ NCR53c7x0_write8(SIEN0_REG_800, sien);
+ }
+ restore_flags (flags);
+}
+
+/*
+ * Function : void hard_reset (struct Scsi_Host *host)
+ *
+ */
+
+static void
+hard_reset (struct Scsi_Host *host) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ unsigned long flags;
+ save_flags (flags);
+ cli();
+ ncr_scsi_reset(host);
+ NCR53c7x0_driver_init (host);
+ if (hostdata->soft_reset)
+ hostdata->soft_reset (host);
+ restore_flags(flags);
+}
+
+
+/*
+ * Function : Scsi_Cmnd *return_outstanding_commands (struct Scsi_Host *host,
+ * int free, int issue)
+ *
+ * Purpose : return a linked list (using the SCp.buffer field as next,
+ * so we don't perturb hostdata. We don't use a field of the
+ * NCR53c7x0_cmd structure since we may not have allocated one
+ * for the command causing the reset.) of Scsi_Cmnd structures that
+ * had propagated below the Linux issue queue level. If free is set,
+ * free the NCR53c7x0_cmd structures which are associated with
+ * the Scsi_Cmnd structures, and clean up any internal
+ * NCR lists that the commands were on. If issue is set,
+ * also return commands in the issue queue.
+ *
+ * Returns : linked list of commands
+ *
+ * NOTE : the caller should insure that the NCR chip is halted
+ * if the free flag is set.
+ */
+
+static Scsi_Cmnd *
+return_outstanding_commands (struct Scsi_Host *host, int free, int issue) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ struct NCR53c7x0_cmd *c;
+ int i;
+ u32 *current;
+ Scsi_Cmnd *list = NULL, *tmp;
+ for (c = (struct NCR53c7x0_cmd *) hostdata->running_list; c;
+ c = (struct NCR53c7x0_cmd *) c->next) {
+ if (c->cmd->SCp.buffer) {
+ printk ("scsi%d : loop detected in running list!\n", host->host_no);
+ break;
+ } else {
+ printk ("The sti() implicit in a printk() prevents hangs\n");
+ break;
+ }
+
+ c->cmd->SCp.buffer = (struct scatterlist *) list;
+ list = c->cmd;
+ if (free) {
+ c->next = hostdata->free;
+ hostdata->free = c;
+ }
+ }
+
+ if (free) {
+ for (i = 0, current = (u32 *) hostdata->schedule;
+ i < host->can_queue; ++i, current += 2) {
+ current[0] = hostdata->NOP_insn;
+ current[1] = 0xdeadbeef;
+ }
+ hostdata->current = NULL;
+ }
+
+ if (issue) {
+ for (tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp; tmp = tmp->next) {
+ if (tmp->SCp.buffer) {
+ printk ("scsi%d : loop detected in issue queue!\n",
+ host->host_no);
+ break;
+ }
+ tmp->SCp.buffer = (struct scatterlist *) list;
+ list = tmp;
+ }
+ if (free)
+ hostdata->issue_queue = NULL;
+
+ }
+ return list;
+}
+
+/*
+ * Function : static int disable (struct Scsi_Host *host)
+ *
+ * Purpose : disables the given NCR host, causing all commands
+ * to return a driver error. Call this so we can unload the
+ * module during development and try again. Eventually,
+ * we should be able to find clean workarounds for these
+ * problems.
+ *
+ * Inputs : host - hostadapter to twiddle
+ *
+ * Returns : 0 on success.
+ */
+
+static int
+disable (struct Scsi_Host *host) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ unsigned long flags;
+ Scsi_Cmnd *nuke_list, *tmp;
+ save_flags(flags);
+ cli();
+ if (hostdata->state != STATE_HALTED)
+ ncr_halt (host);
+ nuke_list = return_outstanding_commands (host, 1 /* free */, 1 /* issue */);
+ hard_reset (host);
+ hostdata->state = STATE_DISABLED;
+ restore_flags(flags);
+ printk ("scsi%d : nuking commands\n", host->host_no);
+ for (; nuke_list; nuke_list = tmp) {
+ tmp = (Scsi_Cmnd *) nuke_list->SCp.buffer;
+ nuke_list->result = DID_ERROR << 16;
+ nuke_list->scsi_done(nuke_list);
+ }
+ printk ("scsi%d : done. \n", host->host_no);
+ printk (KERN_ALERT "scsi%d : disabled. Unload and reload\n",
+ host->host_no);
+ return 0;
+}
+
+/*
+ * Function : static int ncr_halt (struct Scsi_Host *host)
+ *
+ * Purpose : halts the SCSI SCRIPTS(tm) processor on the NCR chip
+ *
+ * Inputs : host - SCSI chip to halt
+ *
+ * Returns : 0 on success
+ */
+
+static int
+ncr_halt (struct Scsi_Host *host) {
+ NCR53c7x0_local_declare();
+ unsigned long flags;
+ unsigned char istat, tmp;
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ int stage;
+ NCR53c7x0_local_setup(host);
+
+ save_flags(flags);
+ cli();
+ /* Stage 0 : eat all interrupts
+ Stage 1 : set ABORT
+ Stage 2 : eat all but abort interrupts
+ Stage 3 : eat all interrupts
+ */
+ for (stage = 0;;) {
+ if (stage == 1) {
+ NCR53c7x0_write8(hostdata->istat, ISTAT_ABRT);
+ ++stage;
+ }
+ istat = NCR53c7x0_read8 (hostdata->istat);
+ if (istat & ISTAT_SIP) {
+ if ((hostdata->chip / 100) == 8) {
+ tmp = NCR53c7x0_read8(SIST0_REG_800);
+ udelay(1);
+ tmp = NCR53c7x0_read8(SIST1_REG_800);
+ } else {
+ tmp = NCR53c7x0_read8(SSTAT0_REG);
+ }
+ } else if (istat & ISTAT_DIP) {
+ tmp = NCR53c7x0_read8(DSTAT_REG);
+ if (stage == 2) {
+ if (tmp & DSTAT_ABRT) {
+ NCR53c7x0_write8(hostdata->istat, 0);
+ ++stage;
+ } else {
+ printk(KERN_ALERT "scsi%d : could not halt NCR chip\n",
+ host->host_no);
+ disable (host);
+ }
+ }
+ }
+ if (!(istat & (ISTAT_SIP|ISTAT_DIP)))
+ if (stage == 0)
+ ++stage;
+ else if (stage == 3)
+ break;
+ }
+ hostdata->state = STATE_HALTED;
+ restore_flags(flags);
+#if 0
+ print_lots (host);
+#endif
+ return 0;
+}
+
+/*
+ * Function: event_name (int event)
+ *
+ * Purpose: map event enum into user-readable strings.
+ */
+
+static const char *
+event_name (int event) {
+ switch (event) {
+ case EVENT_NONE: return "none";
+ case EVENT_ISSUE_QUEUE: return "to issue queue";
+ case EVENT_START_QUEUE: return "to start queue";
+ case EVENT_SELECT: return "selected";
+ case EVENT_DISCONNECT: return "disconnected";
+ case EVENT_RESELECT: return "reselected";
+ case EVENT_COMPLETE: return "completed";
+ case EVENT_IDLE: return "idle";
+ case EVENT_SELECT_FAILED: return "select failed";
+ case EVENT_BEFORE_SELECT: return "before select";
+ case EVENT_RESELECT_FAILED: return "reselect failed";
+ default: return "unknown";
+ }
+}
+
+/*
+ * Function : void dump_events (struct Scsi_Host *host, count)
+ *
+ * Purpose : print last count events which have occurred.
+ */
+static void
+dump_events (struct Scsi_Host *host, int count) {
+ struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
+ host->hostdata;
+ struct NCR53c7x0_event event;
+ int i;
+ unsigned long flags;
+ if (hostdata->events) {
+ if (count > hostdata->event_size)
+ count = hostdata->event_size;
+ for (i = hostdata->event_index; count > 0;
+ i = (i ? i - 1 : hostdata->event_size -1), --count) {
+ save_flags(flags);
+/*
+ * By copying the event we're currently examining with interrupts
+ * disabled, we can do multiple printk(), etc. operations and
+ * still be guaranteed that they're happening on the same
+ * event structure.
+ */
+ cli();
+#if 0
+ event = hostdata->events[i];
+#else
+ memcpy ((void *) &event, (void *) &(hostdata->events[i]),
+ sizeof(event));
+#endif
+
+ restore_flags(flags);
+ printk ("scsi%d : %s event %d at %ld secs %ld usecs target %d lun %d\n",
+ host->host_no, event_name (event.event), count,
+ (long) event.time.tv_sec, (long) event.time.tv_usec,
+ event.target, event.lun);
+ if (event.dsa)
+ printk (" event for dsa 0x%lx (virt 0x%p)\n",
+ virt_to_bus(event.dsa), event.dsa);
+ if (event.pid != -1) {
+ printk (" event for pid %ld ", event.pid);
+ print_command (event.cmnd);
+ }
+ }
+ }
+}
+
+/*
+ * Function: check_address
+ *
+ * Purpose: Check to see if a possibly corrupt pointer will fault the
+ * kernel.
+ *
+ * Inputs: addr - address; size - size of area
+ *
+ * Returns: 0 if area is OK, -1 on error.
+ *
+ * NOTES: should be implemented in terms of vverify on kernels
+ * that have it.
+ */
+
+static int
+check_address (unsigned long addr, int size) {
+ return (MAP_NR(addr) < 1 || MAP_NR(addr + size) > MAP_NR(high_memory) ?
+ -1 : 0);
+}
+
+#ifdef MODULE
+int
+NCR53c7x0_release(struct Scsi_Host *host) {
+ struct NCR53c7x0_hostdata *hostdata =
+ (struct NCR53c7x0_hostdata *) host->hostdata;
+ struct NCR53c7x0_cmd *cmd, *tmp;
+ shutdown (host);
+ if (host->irq != IRQ_NONE)
+ {
+ int irq_count;
+ struct Scsi_Host *tmp;
+ for (irq_count = 0, tmp = first_host; tmp; tmp = tmp->next)
+ if (tmp->hostt == the_template && tmp->irq == host->irq)
+ ++irq_count;
+ if (irq_count == 1)
+ free_irq(host->irq, NULL);
+ }
+ if (host->dma_channel != DMA_NONE)
+ free_dma(host->dma_channel);
+ if (host->io_port)
+ release_region(host->io_port, host->n_io_port);
+
+ for (cmd = (struct NCR53c7x0_cmd *) hostdata->free; cmd; cmd = tmp,
+ --hostdata->num_cmds) {
+ tmp = (struct NCR53c7x0_cmd *) cmd->next;
+ /*
+ * If we're going to loop, try to stop it to get a more accurate
+ * count of the leaked commands.
+ */
+ cmd->next = NULL;
+ if (cmd->free)
+ cmd->free ((void *) cmd->real, cmd->size);
+ }
+ if (hostdata->num_cmds)
+ printk ("scsi%d : leaked %d NCR53c7x0_cmd structures\n",
+ host->host_no, hostdata->num_cmds);
+ if (hostdata->events)
+ vfree ((void *)hostdata->events);
+ return 1;
+}
+Scsi_Host_Template driver_template = NCR53c7xx;
+#include "scsi_module.c"
+#endif /* def MODULE */
diff --git a/linux/src/drivers/scsi/53c8xx_d.h b/linux/src/drivers/scsi/53c8xx_d.h
new file mode 100644
index 0000000..b586340
--- /dev/null
+++ b/linux/src/drivers/scsi/53c8xx_d.h
@@ -0,0 +1,2677 @@
+u32 SCRIPT[] = {
+/*
+
+
+; NCR 53c810 driver, main script
+; Sponsored by
+; iX Multiuser Multitasking Magazine
+; hm@ix.de
+;
+; Copyright 1993, 1994, 1995 Drew Eckhardt
+; Visionary Computing
+; (Unix and Linux consulting and custom programming)
+; drew@PoohSticks.ORG
+; +1 (303) 786-7975
+;
+; TolerANT and SCSI SCRIPTS are registered trademarks of NCR Corporation.
+;
+; PRE-ALPHA
+;
+; For more information, please consult
+;
+; NCR 53C810
+; PCI-SCSI I/O Processor
+; Data Manual
+;
+; NCR 53C710
+; SCSI I/O Processor
+; Programmers Guide
+;
+; NCR Microelectronics
+; 1635 Aeroplaza Drive
+; Colorado Springs, CO 80916
+; 1+ (719) 578-3400
+;
+; Toll free literature number
+; +1 (800) 334-5454
+;
+; IMPORTANT : This code is self modifying due to the limitations of
+; the NCR53c7,8xx series chips. Persons debugging this code with
+; the remote debugger should take this into account, and NOT set
+; breakpoints in modified instructions.
+;
+; Design:
+; The NCR53c7,8xx family of SCSI chips are busmasters with an onboard
+; microcontroller using a simple instruction set.
+;
+; So, to minimize the effects of interrupt latency, and to maximize
+; throughput, this driver offloads the practical maximum amount
+; of processing to the SCSI chip while still maintaining a common
+; structure.
+;
+; Where tradeoffs were needed between efficiency on the older
+; chips and the newer NCR53c800 series, the NCR53c800 series
+; was chosen.
+;
+; While the NCR53c700 and NCR53c700-66 lacked the facilities to fully
+; automate SCSI transfers without host processor intervention, this
+; isn't the case with the NCR53c710 and newer chips which allow
+;
+; - reads and writes to the internal registers from within the SCSI
+; scripts, allowing the SCSI SCRIPTS(tm) code to save processor
+; state so that multiple threads of execution are possible, and also
+; provide an ALU for loop control, etc.
+;
+; - table indirect addressing for some instructions. This allows
+; pointers to be located relative to the DSA ((Data Structure
+; Address) register.
+;
+; These features make it possible to implement a mailbox style interface,
+; where the same piece of code is run to handle I/O for multiple threads
+; at once minimizing our need to relocate code. Since the NCR53c700/
+; NCR53c800 series have a unique combination of features, making a
+; a standard ingoing/outgoing mailbox system, costly, I've modified it.
+;
+; - Mailboxes are a mixture of code and data. This lets us greatly
+; simplify the NCR53c810 code and do things that would otherwise
+; not be possible.
+;
+; The saved data pointer is now implemented as follows :
+;
+; Control flow has been architected such that if control reaches
+; munge_save_data_pointer, on a restore pointers message or
+; reconnection, a jump to the address formerly in the TEMP register
+; will allow the SCSI command to resume execution.
+;
+
+;
+; Note : the DSA structures must be aligned on 32 bit boundaries,
+; since the source and destination of MOVE MEMORY instructions
+; must share the same alignment and this is the alignment of the
+; NCR registers.
+;
+
+ABSOLUTE dsa_temp_lun = 0 ; Patch to lun for current dsa
+ABSOLUTE dsa_temp_next = 0 ; Patch to dsa next for current dsa
+ABSOLUTE dsa_temp_addr_next = 0 ; Patch to address of dsa next address
+ ; for current dsa
+ABSOLUTE dsa_temp_sync = 0 ; Patch to address of per-target
+ ; sync routine
+ABSOLUTE dsa_temp_target = 0 ; Patch to id for current dsa
+ABSOLUTE dsa_temp_addr_saved_pointer = 0; Patch to address of per-command
+ ; saved data pointer
+ABSOLUTE dsa_temp_addr_residual = 0 ; Patch to address of per-command
+ ; current residual code
+ABSOLUTE dsa_temp_addr_saved_residual = 0; Patch to address of per-command
+ ; saved residual code
+ABSOLUTE dsa_temp_addr_new_value = 0 ; Address of value for JUMP operand
+ABSOLUTE dsa_temp_addr_array_value = 0 ; Address to copy to
+ABSOLUTE dsa_temp_addr_dsa_value = 0 ; Address of this DSA value
+
+;
+; Once a device has initiated reselection, we need to compare it
+; against the singly linked list of commands which have disconnected
+; and are pending reselection. These commands are maintained in
+; an unordered singly linked list of DSA structures, through the
+; DSA pointers at their 'centers' headed by the reconnect_dsa_head
+; pointer.
+;
+; To avoid complications in removing commands from the list,
+; I minimize the amount of expensive (at eight operations per
+; addition @ 500-600ns each) pointer operations which must
+; be done in the NCR driver by precomputing them on the
+; host processor during dsa structure generation.
+;
+; The fixed-up per DSA code knows how to recognize the nexus
+; associated with the corresponding SCSI command, and modifies
+; the source and destination pointers for the MOVE MEMORY
+; instruction which is executed when reselected_ok is called
+; to remove the command from the list. Similarly, DSA is
+; loaded with the address of the next DSA structure and
+; reselected_check_next is called if a failure occurs.
+;
+; Perhaps more concisely, the net effect of the mess is
+;
+; for (dsa = reconnect_dsa_head, dest = &reconnect_dsa_head,
+; src = NULL; dsa; dest = &dsa->next, dsa = dsa->next) {
+; src = &dsa->next;
+; if (target_id == dsa->id && target_lun == dsa->lun) {
+; *dest = *src;
+; break;
+; }
+; }
+;
+; if (!dsa)
+; error (int_err_unexpected_reselect);
+; else
+; longjmp (dsa->jump_resume, 0);
+;
+;
+
+
+; Define DSA structure used for mailboxes
+ENTRY dsa_code_template
+dsa_code_template:
+ENTRY dsa_code_begin
+dsa_code_begin:
+ MOVE dmode_memory_to_ncr TO DMODE
+
+at 0x00000000 : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, dsa_temp_addr_dsa_value, addr_scratch
+
+at 0x00000002 : */ 0xc0000004,0x00000000,0x00000000,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x00000005 : */ 0x78380000,0x00000000,
+/*
+ CALL scratch_to_dsa
+
+at 0x00000007 : */ 0x88080000,0x00000980,
+/*
+ CALL select
+
+at 0x00000009 : */ 0x88080000,0x000001fc,
+/*
+; Handle the phase mismatch which may have resulted from the
+; MOVE FROM dsa_msgout if we returned here. The CLEAR ATN
+; may or may not be necessary, and we should update script_asm.pl
+; to handle multiple pieces.
+ CLEAR ATN
+
+at 0x0000000b : */ 0x60000008,0x00000000,
+/*
+ CLEAR ACK
+
+at 0x0000000d : */ 0x60000040,0x00000000,
+/*
+
+; Replace second operand with address of JUMP instruction dest operand
+; in schedule table for this DSA. Becomes dsa_jump_dest in 53c7,8xx.c.
+ENTRY dsa_code_fix_jump
+dsa_code_fix_jump:
+ MOVE MEMORY 4, NOP_insn, 0
+
+at 0x0000000f : */ 0xc0000004,0x00000000,0x00000000,
+/*
+ JUMP select_done
+
+at 0x00000012 : */ 0x80080000,0x00000224,
+/*
+
+; wrong_dsa loads the DSA register with the value of the dsa_next
+; field.
+;
+wrong_dsa:
+; Patch the MOVE MEMORY INSTRUCTION such that
+; the destination address is the address of the OLD
+; next pointer.
+;
+ MOVE MEMORY 4, dsa_temp_addr_next, reselected_ok + 8
+
+at 0x00000014 : */ 0xc0000004,0x00000000,0x00000758,
+/*
+ MOVE dmode_memory_to_ncr TO DMODE
+
+at 0x00000017 : */ 0x78380000,0x00000000,
+/*
+;
+; Move the _contents_ of the next pointer into the DSA register as
+; the next I_T_L or I_T_L_Q tupple to check against the established
+; nexus.
+;
+ MOVE MEMORY 4, dsa_temp_next, addr_scratch
+
+at 0x00000019 : */ 0xc0000004,0x00000000,0x00000000,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x0000001c : */ 0x78380000,0x00000000,
+/*
+ CALL scratch_to_dsa
+
+at 0x0000001e : */ 0x88080000,0x00000980,
+/*
+ JUMP reselected_check_next
+
+at 0x00000020 : */ 0x80080000,0x000006a4,
+/*
+
+ABSOLUTE dsa_save_data_pointer = 0
+ENTRY dsa_code_save_data_pointer
+dsa_code_save_data_pointer:
+ MOVE dmode_ncr_to_memory TO DMODE
+
+at 0x00000022 : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, addr_temp, dsa_temp_addr_saved_pointer
+
+at 0x00000024 : */ 0xc0000004,0x00000000,0x00000000,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x00000027 : */ 0x78380000,0x00000000,
+/*
+; HARD CODED : 24 bytes needs to agree with 53c7,8xx.h
+ MOVE MEMORY 24, dsa_temp_addr_residual, dsa_temp_addr_saved_residual
+
+at 0x00000029 : */ 0xc0000018,0x00000000,0x00000000,
+/*
+ CLEAR ACK
+
+at 0x0000002c : */ 0x60000040,0x00000000,
+/*
+
+
+
+ RETURN
+
+at 0x0000002e : */ 0x90080000,0x00000000,
+/*
+ABSOLUTE dsa_restore_pointers = 0
+ENTRY dsa_code_restore_pointers
+dsa_code_restore_pointers:
+ MOVE dmode_memory_to_ncr TO DMODE
+
+at 0x00000030 : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, dsa_temp_addr_saved_pointer, addr_temp
+
+at 0x00000032 : */ 0xc0000004,0x00000000,0x00000000,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x00000035 : */ 0x78380000,0x00000000,
+/*
+; HARD CODED : 24 bytes needs to agree with 53c7,8xx.h
+ MOVE MEMORY 24, dsa_temp_addr_saved_residual, dsa_temp_addr_residual
+
+at 0x00000037 : */ 0xc0000018,0x00000000,0x00000000,
+/*
+ CLEAR ACK
+
+at 0x0000003a : */ 0x60000040,0x00000000,
+/*
+
+
+
+ RETURN
+
+at 0x0000003c : */ 0x90080000,0x00000000,
+/*
+
+ABSOLUTE dsa_check_reselect = 0
+; dsa_check_reselect determines whether or not the current target and
+; lun match the current DSA
+ENTRY dsa_code_check_reselect
+dsa_code_check_reselect:
+ MOVE SSID TO SFBR ; SSID contains 3 bit target ID
+
+at 0x0000003e : */ 0x720a0000,0x00000000,
+/*
+; FIXME : we need to accommodate bit fielded and binary here for '7xx/'8xx chips
+ JUMP REL (wrong_dsa), IF NOT dsa_temp_target, AND MASK 0xf8
+
+at 0x00000040 : */ 0x8084f800,0x00ffff48,
+/*
+;
+; Hack - move to scratch first, since SFBR is not writeable
+; via the CPU and hence a MOVE MEMORY instruction.
+;
+ MOVE dmode_memory_to_ncr TO DMODE
+
+at 0x00000042 : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 1, reselected_identify, addr_scratch
+
+at 0x00000044 : */ 0xc0000001,0x00000000,0x00000000,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x00000047 : */ 0x78380000,0x00000000,
+/*
+ MOVE SCRATCH0 TO SFBR
+
+at 0x00000049 : */ 0x72340000,0x00000000,
+/*
+; FIXME : we need to accommodate bit fielded and binary here for '7xx/'8xx chips
+ JUMP REL (wrong_dsa), IF NOT dsa_temp_lun, AND MASK 0xf8
+
+at 0x0000004b : */ 0x8084f800,0x00ffff1c,
+/*
+; Patch the MOVE MEMORY INSTRUCTION such that
+; the source address is the address of this dsa's
+; next pointer.
+ MOVE MEMORY 4, dsa_temp_addr_next, reselected_ok + 4
+
+at 0x0000004d : */ 0xc0000004,0x00000000,0x00000754,
+/*
+ CALL reselected_ok
+
+at 0x00000050 : */ 0x88080000,0x00000750,
+/*
+ CALL dsa_temp_sync
+
+at 0x00000052 : */ 0x88080000,0x00000000,
+/*
+; Release ACK on the IDENTIFY message _after_ we've set the synchronous
+; transfer parameters!
+ CLEAR ACK
+
+at 0x00000054 : */ 0x60000040,0x00000000,
+/*
+; Implicitly restore pointers on reselection, so a RETURN
+; will transfer control back to the right spot.
+ CALL REL (dsa_code_restore_pointers)
+
+at 0x00000056 : */ 0x88880000,0x00ffff60,
+/*
+ RETURN
+
+at 0x00000058 : */ 0x90080000,0x00000000,
+/*
+ENTRY dsa_zero
+dsa_zero:
+ENTRY dsa_code_template_end
+dsa_code_template_end:
+
+; Perform sanity check for dsa_fields_start == dsa_code_template_end -
+; dsa_zero, puke.
+
+ABSOLUTE dsa_fields_start = 0 ; Sanity marker
+ ; pad 48 bytes (fix this RSN)
+ABSOLUTE dsa_next = 48 ; len 4 Next DSA
+ ; del 4 Previous DSA address
+ABSOLUTE dsa_cmnd = 56 ; len 4 Scsi_Cmnd * for this thread.
+ABSOLUTE dsa_select = 60 ; len 4 Device ID, Period, Offset for
+ ; table indirect select
+ABSOLUTE dsa_msgout = 64 ; len 8 table indirect move parameter for
+ ; select message
+ABSOLUTE dsa_cmdout = 72 ; len 8 table indirect move parameter for
+ ; command
+ABSOLUTE dsa_dataout = 80 ; len 4 code pointer for dataout
+ABSOLUTE dsa_datain = 84 ; len 4 code pointer for datain
+ABSOLUTE dsa_msgin = 88 ; len 8 table indirect move for msgin
+ABSOLUTE dsa_status = 96 ; len 8 table indirect move for status byte
+ABSOLUTE dsa_msgout_other = 104 ; len 8 table indirect for normal message out
+ ; (Synchronous transfer negotiation, etc).
+ABSOLUTE dsa_end = 112
+
+ABSOLUTE schedule = 0 ; Array of JUMP dsa_begin or JUMP (next),
+ ; terminated by a call to JUMP wait_reselect
+
+; Linked lists of DSA structures
+ABSOLUTE reconnect_dsa_head = 0 ; Link list of DSAs which can reconnect
+ABSOLUTE addr_reconnect_dsa_head = 0 ; Address of variable containing
+ ; address of reconnect_dsa_head
+
+; These select the source and destination of a MOVE MEMORY instruction
+ABSOLUTE dmode_memory_to_memory = 0x0
+ABSOLUTE dmode_memory_to_ncr = 0x0
+ABSOLUTE dmode_ncr_to_memory = 0x0
+
+ABSOLUTE addr_scratch = 0x0
+ABSOLUTE addr_temp = 0x0
+
+
+; Interrupts -
+; MSB indicates type
+; 0 handle error condition
+; 1 handle message
+; 2 handle normal condition
+; 3 debugging interrupt
+; 4 testing interrupt
+; Next byte indicates specific error
+
+; XXX not yet implemented, I'm not sure if I want to -
+; Next byte indicates the routine the error occurred in
+; The LSB indicates the specific place the error occurred
+
+ABSOLUTE int_err_unexpected_phase = 0x00000000 ; Unexpected phase encountered
+ABSOLUTE int_err_selected = 0x00010000 ; SELECTED (nee RESELECTED)
+ABSOLUTE int_err_unexpected_reselect = 0x00020000
+ABSOLUTE int_err_check_condition = 0x00030000
+ABSOLUTE int_err_no_phase = 0x00040000
+ABSOLUTE int_msg_wdtr = 0x01000000 ; WDTR message received
+ABSOLUTE int_msg_sdtr = 0x01010000 ; SDTR received
+ABSOLUTE int_msg_1 = 0x01020000 ; single byte special message
+ ; received
+
+ABSOLUTE int_norm_select_complete = 0x02000000 ; Select complete, reprogram
+ ; registers.
+ABSOLUTE int_norm_reselect_complete = 0x02010000 ; Nexus established
+ABSOLUTE int_norm_command_complete = 0x02020000 ; Command complete
+ABSOLUTE int_norm_disconnected = 0x02030000 ; Disconnected
+ABSOLUTE int_norm_aborted =0x02040000 ; Aborted *dsa
+ABSOLUTE int_norm_reset = 0x02050000 ; Generated BUS reset.
+ABSOLUTE int_debug_break = 0x03000000 ; Break point
+
+ABSOLUTE int_debug_panic = 0x030b0000 ; Panic driver
+
+
+ABSOLUTE int_test_1 = 0x04000000 ; Test 1 complete
+ABSOLUTE int_test_2 = 0x04010000 ; Test 2 complete
+ABSOLUTE int_test_3 = 0x04020000 ; Test 3 complete
+
+
+; These should start with 0x05000000, with low bits incrementing for
+; each one.
+
+
+
+ABSOLUTE NCR53c7xx_msg_abort = 0 ; Pointer to abort message
+ABSOLUTE NCR53c7xx_msg_reject = 0 ; Pointer to reject message
+ABSOLUTE NCR53c7xx_zero = 0 ; long with zero in it, use for source
+ABSOLUTE NCR53c7xx_sink = 0 ; long to dump worthless data in
+ABSOLUTE NOP_insn = 0 ; NOP instruction
+
+; Pointer to message, potentially multi-byte
+ABSOLUTE msg_buf = 0
+
+; Pointer to holding area for reselection information
+ABSOLUTE reselected_identify = 0
+ABSOLUTE reselected_tag = 0
+
+; Request sense command pointer, it's a 6 byte command, should
+; be constant for all commands since we always want 16 bytes of
+; sense and we don't need to change any fields as we did under
+; SCSI-I when we actually cared about the LUN field.
+;EXTERNAL NCR53c7xx_sense ; Request sense command
+
+
+; dsa_schedule
+; PURPOSE : after a DISCONNECT message has been received, and pointers
+; saved, insert the current DSA structure at the head of the
+; disconnected queue and fall through to the scheduler.
+;
+; CALLS : OK
+;
+; INPUTS : dsa - current DSA structure, reconnect_dsa_head - list
+; of disconnected commands
+;
+; MODIFIES : SCRATCH, reconnect_dsa_head
+;
+; EXITS : always passes control to schedule
+
+ENTRY dsa_schedule
+dsa_schedule:
+
+
+
+
+;
+; Calculate the address of the next pointer within the DSA
+; structure of the command that is currently disconnecting
+;
+ CALL dsa_to_scratch
+
+at 0x0000005a : */ 0x88080000,0x00000938,
+/*
+ MOVE SCRATCH0 + dsa_next TO SCRATCH0
+
+at 0x0000005c : */ 0x7e343000,0x00000000,
+/*
+ MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
+
+at 0x0000005e : */ 0x7f350000,0x00000000,
+/*
+ MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
+
+at 0x00000060 : */ 0x7f360000,0x00000000,
+/*
+ MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
+
+at 0x00000062 : */ 0x7f370000,0x00000000,
+/*
+
+; Point the next field of this DSA structure at the current disconnected
+; list
+ MOVE dmode_ncr_to_memory TO DMODE
+
+at 0x00000064 : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, addr_scratch, dsa_schedule_insert + 8
+
+at 0x00000066 : */ 0xc0000004,0x00000000,0x000001b4,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x00000069 : */ 0x78380000,0x00000000,
+/*
+dsa_schedule_insert:
+ MOVE MEMORY 4, reconnect_dsa_head, 0
+
+at 0x0000006b : */ 0xc0000004,0x00000000,0x00000000,
+/*
+
+; And update the head pointer.
+ CALL dsa_to_scratch
+
+at 0x0000006e : */ 0x88080000,0x00000938,
+/*
+ MOVE dmode_ncr_to_memory TO DMODE
+
+at 0x00000070 : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, addr_scratch, reconnect_dsa_head
+
+at 0x00000072 : */ 0xc0000004,0x00000000,0x00000000,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x00000075 : */ 0x78380000,0x00000000,
+/*
+
+
+ MOVE SCNTL2 & 0x7f TO SCNTL2
+
+at 0x00000077 : */ 0x7c027f00,0x00000000,
+/*
+ CLEAR ACK
+
+at 0x00000079 : */ 0x60000040,0x00000000,
+/*
+
+ WAIT DISCONNECT
+
+at 0x0000007b : */ 0x48000000,0x00000000,
+/*
+
+
+
+
+
+
+ JUMP schedule
+
+at 0x0000007d : */ 0x80080000,0x00000000,
+/*
+
+
+;
+; select
+;
+; PURPOSE : establish a nexus for the SCSI command referenced by DSA.
+; On success, the current DSA structure is removed from the issue
+; queue. Usually, this is entered as a fall-through from schedule,
+; although the contingent allegiance handling code will write
+; the select entry address to the DSP to restart a command as a
+; REQUEST SENSE. A message is sent (usually IDENTIFY, although
+; additional SDTR or WDTR messages may be sent). COMMAND OUT
+; is handled.
+;
+; INPUTS : DSA - SCSI command, issue_dsa_head
+;
+; CALLS : NOT OK
+;
+; MODIFIES : SCRATCH, issue_dsa_head
+;
+; EXITS : on reselection or selection, go to select_failed
+; otherwise, RETURN so control is passed back to
+; dsa_begin.
+;
+
+ENTRY select
+select:
+
+
+
+
+
+
+
+
+
+
+
+
+ CLEAR TARGET
+
+at 0x0000007f : */ 0x60000200,0x00000000,
+/*
+
+; XXX
+;
+; In effect, SELECTION operations are backgrounded, with execution
+; continuing until code which waits for REQ or a fatal interrupt is
+; encountered.
+;
+; So, for more performance, we could overlap the code which removes
+; the command from the NCRs issue queue with the selection, but
+; at this point I don't want to deal with the error recovery.
+;
+
+
+ SELECT ATN FROM dsa_select, select_failed
+
+at 0x00000081 : */ 0x4300003c,0x000007a4,
+/*
+ JUMP select_msgout, WHEN MSG_OUT
+
+at 0x00000083 : */ 0x860b0000,0x00000214,
+/*
+ENTRY select_msgout
+select_msgout:
+ MOVE FROM dsa_msgout, WHEN MSG_OUT
+
+at 0x00000085 : */ 0x1e000000,0x00000040,
+/*
+
+
+
+
+
+
+
+
+
+
+ RETURN
+
+at 0x00000087 : */ 0x90080000,0x00000000,
+/*
+
+;
+; select_done
+;
+; PURPOSE: continue on to normal data transfer; called as the exit
+; point from dsa_begin.
+;
+; INPUTS: dsa
+;
+; CALLS: OK
+;
+;
+
+select_done:
+
+
+
+
+
+
+
+; After a successful selection, we should get either a CMD phase or
+; some transfer request negotiation message.
+
+ JUMP cmdout, WHEN CMD
+
+at 0x00000089 : */ 0x820b0000,0x00000244,
+/*
+ INT int_err_unexpected_phase, WHEN NOT MSG_IN
+
+at 0x0000008b : */ 0x9f030000,0x00000000,
+/*
+
+select_msg_in:
+ CALL msg_in, WHEN MSG_IN
+
+at 0x0000008d : */ 0x8f0b0000,0x00000404,
+/*
+ JUMP select_msg_in, WHEN MSG_IN
+
+at 0x0000008f : */ 0x870b0000,0x00000234,
+/*
+
+cmdout:
+ INT int_err_unexpected_phase, WHEN NOT CMD
+
+at 0x00000091 : */ 0x9a030000,0x00000000,
+/*
+
+
+
+ENTRY cmdout_cmdout
+cmdout_cmdout:
+
+ MOVE FROM dsa_cmdout, WHEN CMD
+
+at 0x00000093 : */ 0x1a000000,0x00000048,
+/*
+
+
+
+
+;
+; data_transfer
+; other_out
+; other_in
+; other_transfer
+;
+; PURPOSE : handle the main data transfer for a SCSI command in
+; several parts. In the first part, data_transfer, DATA_IN
+; and DATA_OUT phases are allowed, with the user provided
+; code (usually dynamically generated based on the scatter/gather
+; list associated with a SCSI command) called to handle these
+; phases.
+;
+; After control has passed to one of the user provided
+; DATA_IN or DATA_OUT routines, back calls are made to
+; other_transfer_in or other_transfer_out to handle non-DATA IN
+; and DATA OUT phases respectively, with the state of the active
+; data pointer being preserved in TEMP.
+;
+; On completion, the user code passes control to other_transfer
+; which causes DATA_IN and DATA_OUT to result in unexpected_phase
+; interrupts so that data overruns may be trapped.
+;
+; INPUTS : DSA - SCSI command
+;
+; CALLS : OK in data_transfer_start, not ok in other_out and other_in, ok in
+; other_transfer
+;
+; MODIFIES : SCRATCH
+;
+; EXITS : if STATUS IN is detected, signifying command completion,
+; the NCR jumps to command_complete. If MSG IN occurs, a
+; CALL is made to msg_in. Otherwise, other_transfer runs in
+; an infinite loop.
+;
+
+ENTRY data_transfer
+data_transfer:
+ JUMP cmdout_cmdout, WHEN CMD
+
+at 0x00000095 : */ 0x820b0000,0x0000024c,
+/*
+ CALL msg_in, WHEN MSG_IN
+
+at 0x00000097 : */ 0x8f0b0000,0x00000404,
+/*
+ INT int_err_unexpected_phase, WHEN MSG_OUT
+
+at 0x00000099 : */ 0x9e0b0000,0x00000000,
+/*
+ JUMP do_dataout, WHEN DATA_OUT
+
+at 0x0000009b : */ 0x800b0000,0x0000028c,
+/*
+ JUMP do_datain, WHEN DATA_IN
+
+at 0x0000009d : */ 0x810b0000,0x000002e4,
+/*
+ JUMP command_complete, WHEN STATUS
+
+at 0x0000009f : */ 0x830b0000,0x0000060c,
+/*
+ JUMP data_transfer
+
+at 0x000000a1 : */ 0x80080000,0x00000254,
+/*
+ENTRY end_data_transfer
+end_data_transfer:
+
+;
+; FIXME: On NCR53c700 and NCR53c700-66 chips, do_dataout/do_datain
+; should be fixed up whenever the nexus changes so it can point to the
+; correct routine for that command.
+;
+
+
+; Nasty jump to dsa->dataout
+do_dataout:
+ CALL dsa_to_scratch
+
+at 0x000000a3 : */ 0x88080000,0x00000938,
+/*
+ MOVE SCRATCH0 + dsa_dataout TO SCRATCH0
+
+at 0x000000a5 : */ 0x7e345000,0x00000000,
+/*
+ MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
+
+at 0x000000a7 : */ 0x7f350000,0x00000000,
+/*
+ MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
+
+at 0x000000a9 : */ 0x7f360000,0x00000000,
+/*
+ MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
+
+at 0x000000ab : */ 0x7f370000,0x00000000,
+/*
+ MOVE dmode_ncr_to_memory TO DMODE
+
+at 0x000000ad : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, addr_scratch, dataout_to_jump + 4
+
+at 0x000000af : */ 0xc0000004,0x00000000,0x000002d4,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x000000b2 : */ 0x78380000,0x00000000,
+/*
+dataout_to_jump:
+ MOVE MEMORY 4, 0, dataout_jump + 4
+
+at 0x000000b4 : */ 0xc0000004,0x00000000,0x000002e0,
+/*
+dataout_jump:
+ JUMP 0
+
+at 0x000000b7 : */ 0x80080000,0x00000000,
+/*
+
+; Nasty jump to dsa->dsain
+do_datain:
+ CALL dsa_to_scratch
+
+at 0x000000b9 : */ 0x88080000,0x00000938,
+/*
+ MOVE SCRATCH0 + dsa_datain TO SCRATCH0
+
+at 0x000000bb : */ 0x7e345400,0x00000000,
+/*
+ MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
+
+at 0x000000bd : */ 0x7f350000,0x00000000,
+/*
+ MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
+
+at 0x000000bf : */ 0x7f360000,0x00000000,
+/*
+ MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
+
+at 0x000000c1 : */ 0x7f370000,0x00000000,
+/*
+ MOVE dmode_ncr_to_memory TO DMODE
+
+at 0x000000c3 : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, addr_scratch, datain_to_jump + 4
+
+at 0x000000c5 : */ 0xc0000004,0x00000000,0x0000032c,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x000000c8 : */ 0x78380000,0x00000000,
+/*
+ENTRY datain_to_jump
+datain_to_jump:
+ MOVE MEMORY 4, 0, datain_jump + 4
+
+at 0x000000ca : */ 0xc0000004,0x00000000,0x00000338,
+/*
+
+
+
+datain_jump:
+ JUMP 0
+
+at 0x000000cd : */ 0x80080000,0x00000000,
+/*
+
+
+
+; Note that other_out and other_in loop until a non-data phase
+; is discovered, so we only execute return statements when we
+; can go on to the next data phase block move statement.
+
+ENTRY other_out
+other_out:
+
+
+
+ INT int_err_unexpected_phase, WHEN CMD
+
+at 0x000000cf : */ 0x9a0b0000,0x00000000,
+/*
+ JUMP msg_in_restart, WHEN MSG_IN
+
+at 0x000000d1 : */ 0x870b0000,0x000003e4,
+/*
+ INT int_err_unexpected_phase, WHEN MSG_OUT
+
+at 0x000000d3 : */ 0x9e0b0000,0x00000000,
+/*
+ INT int_err_unexpected_phase, WHEN DATA_IN
+
+at 0x000000d5 : */ 0x990b0000,0x00000000,
+/*
+ JUMP command_complete, WHEN STATUS
+
+at 0x000000d7 : */ 0x830b0000,0x0000060c,
+/*
+ JUMP other_out, WHEN NOT DATA_OUT
+
+at 0x000000d9 : */ 0x80030000,0x0000033c,
+/*
+ RETURN
+
+at 0x000000db : */ 0x90080000,0x00000000,
+/*
+
+ENTRY other_in
+other_in:
+
+
+
+ INT int_err_unexpected_phase, WHEN CMD
+
+at 0x000000dd : */ 0x9a0b0000,0x00000000,
+/*
+ JUMP msg_in_restart, WHEN MSG_IN
+
+at 0x000000df : */ 0x870b0000,0x000003e4,
+/*
+ INT int_err_unexpected_phase, WHEN MSG_OUT
+
+at 0x000000e1 : */ 0x9e0b0000,0x00000000,
+/*
+ INT int_err_unexpected_phase, WHEN DATA_OUT
+
+at 0x000000e3 : */ 0x980b0000,0x00000000,
+/*
+ JUMP command_complete, WHEN STATUS
+
+at 0x000000e5 : */ 0x830b0000,0x0000060c,
+/*
+ JUMP other_in, WHEN NOT DATA_IN
+
+at 0x000000e7 : */ 0x81030000,0x00000374,
+/*
+ RETURN
+
+at 0x000000e9 : */ 0x90080000,0x00000000,
+/*
+
+
+ENTRY other_transfer
+other_transfer:
+ INT int_err_unexpected_phase, WHEN CMD
+
+at 0x000000eb : */ 0x9a0b0000,0x00000000,
+/*
+ CALL msg_in, WHEN MSG_IN
+
+at 0x000000ed : */ 0x8f0b0000,0x00000404,
+/*
+ INT int_err_unexpected_phase, WHEN MSG_OUT
+
+at 0x000000ef : */ 0x9e0b0000,0x00000000,
+/*
+ INT int_err_unexpected_phase, WHEN DATA_OUT
+
+at 0x000000f1 : */ 0x980b0000,0x00000000,
+/*
+ INT int_err_unexpected_phase, WHEN DATA_IN
+
+at 0x000000f3 : */ 0x990b0000,0x00000000,
+/*
+ JUMP command_complete, WHEN STATUS
+
+at 0x000000f5 : */ 0x830b0000,0x0000060c,
+/*
+ JUMP other_transfer
+
+at 0x000000f7 : */ 0x80080000,0x000003ac,
+/*
+
+;
+; msg_in_restart
+; msg_in
+; munge_msg
+;
+; PURPOSE : process messages from a target. msg_in is called when the
+; caller hasn't read the first byte of the message. munge_message
+; is called when the caller has read the first byte of the message,
+; and left it in SFBR. msg_in_restart is called when the caller
+; hasn't read the first byte of the message, and wishes RETURN
+; to transfer control back to the address of the conditional
+; CALL instruction rather than to the instruction after it.
+;
+; Various int_* interrupts are generated when the host system
+; needs to intervene, as is the case with SDTR, WDTR, and
+; INITIATE RECOVERY messages.
+;
+; When the host system handles one of these interrupts,
+; it can respond by reentering at reject_message,
+; which rejects the message and returns control to
+; the caller of msg_in or munge_msg, accept_message
+; which clears ACK and returns control, or reply_message
+; which sends the message pointed to by the DSA
+; msgout_other table indirect field.
+;
+; DISCONNECT messages are handled by moving the command
+; to the reconnect_dsa_queue.
+;
+; INPUTS : DSA - SCSI COMMAND, SFBR - first byte of message (munge_msg
+; only)
+;
+; CALLS : NO. The TEMP register isn't backed up to allow nested calls.
+;
+; MODIFIES : SCRATCH, DSA on DISCONNECT
+;
+; EXITS : On receipt of SAVE DATA POINTER, RESTORE POINTERS,
+; and normal return from message handlers running under
+; Linux, control is returned to the caller. Receipt
+; of DISCONNECT messages pass control to dsa_schedule.
+;
+ENTRY msg_in_restart
+msg_in_restart:
+; XXX - hackish
+;
+; Since it's easier to debug changes to the statically
+; compiled code, rather than the dynamically generated
+; stuff, such as
+;
+; MOVE x, y, WHEN data_phase
+; CALL other_z, WHEN NOT data_phase
+; MOVE x, y, WHEN data_phase
+;
+; I'd like to have certain routines (notably the message handler)
+; restart on the conditional call rather than the next instruction.
+;
+; So, subtract 8 from the return address
+
+ MOVE TEMP0 + 0xf8 TO TEMP0
+
+at 0x000000f9 : */ 0x7e1cf800,0x00000000,
+/*
+ MOVE TEMP1 + 0xff TO TEMP1 WITH CARRY
+
+at 0x000000fb : */ 0x7f1dff00,0x00000000,
+/*
+ MOVE TEMP2 + 0xff TO TEMP2 WITH CARRY
+
+at 0x000000fd : */ 0x7f1eff00,0x00000000,
+/*
+ MOVE TEMP3 + 0xff TO TEMP3 WITH CARRY
+
+at 0x000000ff : */ 0x7f1fff00,0x00000000,
+/*
+
+ENTRY msg_in
+msg_in:
+ MOVE 1, msg_buf, WHEN MSG_IN
+
+at 0x00000101 : */ 0x0f000001,0x00000000,
+/*
+
+munge_msg:
+ JUMP munge_extended, IF 0x01 ; EXTENDED MESSAGE
+
+at 0x00000103 : */ 0x800c0001,0x00000524,
+/*
+ JUMP munge_2, IF 0x20, AND MASK 0xdf ; two byte message
+
+at 0x00000105 : */ 0x800cdf20,0x0000044c,
+/*
+;
+; XXX - I've seen a handful of broken SCSI devices which fail to issue
+; a SAVE POINTERS message before disconnecting in the middle of
+; a transfer, assuming that the DATA POINTER will be implicitly
+; restored.
+;
+; Historically, I've often done an implicit save when the DISCONNECT
+; message is processed. We may want to consider having the option of
+; doing that here.
+;
+ JUMP munge_save_data_pointer, IF 0x02 ; SAVE DATA POINTER
+
+at 0x00000107 : */ 0x800c0002,0x00000454,
+/*
+ JUMP munge_restore_pointers, IF 0x03 ; RESTORE POINTERS
+
+at 0x00000109 : */ 0x800c0003,0x000004b8,
+/*
+ JUMP munge_disconnect, IF 0x04 ; DISCONNECT
+
+at 0x0000010b : */ 0x800c0004,0x0000051c,
+/*
+ INT int_msg_1, IF 0x07 ; MESSAGE REJECT
+
+at 0x0000010d : */ 0x980c0007,0x01020000,
+/*
+ INT int_msg_1, IF 0x0f ; INITIATE RECOVERY
+
+at 0x0000010f : */ 0x980c000f,0x01020000,
+/*
+
+
+
+ JUMP reject_message
+
+at 0x00000111 : */ 0x80080000,0x000005b4,
+/*
+
+munge_2:
+ JUMP reject_message
+
+at 0x00000113 : */ 0x80080000,0x000005b4,
+/*
+;
+; The SCSI standard allows targets to recover from transient
+; error conditions by backing up the data pointer with a
+; RESTORE POINTERS message.
+;
+; So, we must save and restore the _residual_ code as well as
+; the current instruction pointer. Because of this messiness,
+; it is simpler to put dynamic code in the dsa for this and to
+; just do a simple jump down there.
+;
+
+munge_save_data_pointer:
+ MOVE DSA0 + dsa_save_data_pointer TO SFBR
+
+at 0x00000115 : */ 0x76100000,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH0
+
+at 0x00000117 : */ 0x6a340000,0x00000000,
+/*
+ MOVE DSA1 + 0xff TO SFBR WITH CARRY
+
+at 0x00000119 : */ 0x7711ff00,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH1
+
+at 0x0000011b : */ 0x6a350000,0x00000000,
+/*
+ MOVE DSA2 + 0xff TO SFBR WITH CARRY
+
+at 0x0000011d : */ 0x7712ff00,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH2
+
+at 0x0000011f : */ 0x6a360000,0x00000000,
+/*
+ MOVE DSA3 + 0xff TO SFBR WITH CARRY
+
+at 0x00000121 : */ 0x7713ff00,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH3
+
+at 0x00000123 : */ 0x6a370000,0x00000000,
+/*
+
+ MOVE dmode_ncr_to_memory TO DMODE
+
+at 0x00000125 : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, addr_scratch, jump_dsa_save + 4
+
+at 0x00000127 : */ 0xc0000004,0x00000000,0x000004b4,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x0000012a : */ 0x78380000,0x00000000,
+/*
+jump_dsa_save:
+ JUMP 0
+
+at 0x0000012c : */ 0x80080000,0x00000000,
+/*
+
+munge_restore_pointers:
+ MOVE DSA0 + dsa_restore_pointers TO SFBR
+
+at 0x0000012e : */ 0x76100000,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH0
+
+at 0x00000130 : */ 0x6a340000,0x00000000,
+/*
+ MOVE DSA1 + 0xff TO SFBR WITH CARRY
+
+at 0x00000132 : */ 0x7711ff00,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH1
+
+at 0x00000134 : */ 0x6a350000,0x00000000,
+/*
+ MOVE DSA2 + 0xff TO SFBR WITH CARRY
+
+at 0x00000136 : */ 0x7712ff00,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH2
+
+at 0x00000138 : */ 0x6a360000,0x00000000,
+/*
+ MOVE DSA3 + 0xff TO SFBR WITH CARRY
+
+at 0x0000013a : */ 0x7713ff00,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH3
+
+at 0x0000013c : */ 0x6a370000,0x00000000,
+/*
+
+ MOVE dmode_ncr_to_memory TO DMODE
+
+at 0x0000013e : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, addr_scratch, jump_dsa_restore + 4
+
+at 0x00000140 : */ 0xc0000004,0x00000000,0x00000518,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x00000143 : */ 0x78380000,0x00000000,
+/*
+jump_dsa_restore:
+ JUMP 0
+
+at 0x00000145 : */ 0x80080000,0x00000000,
+/*
+
+
+munge_disconnect:
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ JUMP dsa_schedule
+
+at 0x00000147 : */ 0x80080000,0x00000168,
+/*
+
+
+
+
+
+munge_extended:
+ CLEAR ACK
+
+at 0x00000149 : */ 0x60000040,0x00000000,
+/*
+ INT int_err_unexpected_phase, WHEN NOT MSG_IN
+
+at 0x0000014b : */ 0x9f030000,0x00000000,
+/*
+ MOVE 1, msg_buf + 1, WHEN MSG_IN
+
+at 0x0000014d : */ 0x0f000001,0x00000001,
+/*
+ JUMP munge_extended_2, IF 0x02
+
+at 0x0000014f : */ 0x800c0002,0x00000554,
+/*
+ JUMP munge_extended_3, IF 0x03
+
+at 0x00000151 : */ 0x800c0003,0x00000584,
+/*
+ JUMP reject_message
+
+at 0x00000153 : */ 0x80080000,0x000005b4,
+/*
+
+munge_extended_2:
+ CLEAR ACK
+
+at 0x00000155 : */ 0x60000040,0x00000000,
+/*
+ MOVE 1, msg_buf + 2, WHEN MSG_IN
+
+at 0x00000157 : */ 0x0f000001,0x00000002,
+/*
+ JUMP reject_message, IF NOT 0x02 ; Must be WDTR
+
+at 0x00000159 : */ 0x80040002,0x000005b4,
+/*
+ CLEAR ACK
+
+at 0x0000015b : */ 0x60000040,0x00000000,
+/*
+ MOVE 1, msg_buf + 3, WHEN MSG_IN
+
+at 0x0000015d : */ 0x0f000001,0x00000003,
+/*
+ INT int_msg_wdtr
+
+at 0x0000015f : */ 0x98080000,0x01000000,
+/*
+
+munge_extended_3:
+ CLEAR ACK
+
+at 0x00000161 : */ 0x60000040,0x00000000,
+/*
+ MOVE 1, msg_buf + 2, WHEN MSG_IN
+
+at 0x00000163 : */ 0x0f000001,0x00000002,
+/*
+ JUMP reject_message, IF NOT 0x01 ; Must be SDTR
+
+at 0x00000165 : */ 0x80040001,0x000005b4,
+/*
+ CLEAR ACK
+
+at 0x00000167 : */ 0x60000040,0x00000000,
+/*
+ MOVE 2, msg_buf + 3, WHEN MSG_IN
+
+at 0x00000169 : */ 0x0f000002,0x00000003,
+/*
+ INT int_msg_sdtr
+
+at 0x0000016b : */ 0x98080000,0x01010000,
+/*
+
+ENTRY reject_message
+reject_message:
+ SET ATN
+
+at 0x0000016d : */ 0x58000008,0x00000000,
+/*
+ CLEAR ACK
+
+at 0x0000016f : */ 0x60000040,0x00000000,
+/*
+ MOVE 1, NCR53c7xx_msg_reject, WHEN MSG_OUT
+
+at 0x00000171 : */ 0x0e000001,0x00000000,
+/*
+ RETURN
+
+at 0x00000173 : */ 0x90080000,0x00000000,
+/*
+
+ENTRY accept_message
+accept_message:
+ CLEAR ATN
+
+at 0x00000175 : */ 0x60000008,0x00000000,
+/*
+ CLEAR ACK
+
+at 0x00000177 : */ 0x60000040,0x00000000,
+/*
+ RETURN
+
+at 0x00000179 : */ 0x90080000,0x00000000,
+/*
+
+ENTRY respond_message
+respond_message:
+ SET ATN
+
+at 0x0000017b : */ 0x58000008,0x00000000,
+/*
+ CLEAR ACK
+
+at 0x0000017d : */ 0x60000040,0x00000000,
+/*
+ MOVE FROM dsa_msgout_other, WHEN MSG_OUT
+
+at 0x0000017f : */ 0x1e000000,0x00000068,
+/*
+ RETURN
+
+at 0x00000181 : */ 0x90080000,0x00000000,
+/*
+
+;
+; command_complete
+;
+; PURPOSE : handle command termination when STATUS IN is detected by reading
+; a status byte followed by a command termination message.
+;
+; Normal termination results in an INTFLY instruction, and
+; the host system can pick out which command terminated by
+; examining the MESSAGE and STATUS buffers of all currently
+; executing commands;
+;
+; Abnormal (CHECK_CONDITION) termination results in an
+; int_err_check_condition interrupt so that a REQUEST SENSE
+; command can be issued out-of-order so that no other command
+; clears the contingent allegiance condition.
+;
+;
+; INPUTS : DSA - command
+;
+; CALLS : OK
+;
+; EXITS : On successful termination, control is passed to schedule.
+; On abnormal termination, the user will usually modify the
+; DSA fields and corresponding buffers and return control
+; to select.
+;
+
+ENTRY command_complete
+command_complete:
+ MOVE FROM dsa_status, WHEN STATUS
+
+at 0x00000183 : */ 0x1b000000,0x00000060,
+/*
+
+ MOVE SFBR TO SCRATCH0 ; Save status
+
+at 0x00000185 : */ 0x6a340000,0x00000000,
+/*
+
+ENTRY command_complete_msgin
+command_complete_msgin:
+ MOVE FROM dsa_msgin, WHEN MSG_IN
+
+at 0x00000187 : */ 0x1f000000,0x00000058,
+/*
+; Indicate that we should be expecting a disconnect
+ MOVE SCNTL2 & 0x7f TO SCNTL2
+
+at 0x00000189 : */ 0x7c027f00,0x00000000,
+/*
+ CLEAR ACK
+
+at 0x0000018b : */ 0x60000040,0x00000000,
+/*
+
+ WAIT DISCONNECT
+
+at 0x0000018d : */ 0x48000000,0x00000000,
+/*
+
+;
+; The SCSI specification states that when a UNIT ATTENTION condition
+; is pending, as indicated by a CHECK CONDITION status message,
+; the target shall revert to asynchronous transfers. Since
+; synchronous transfers parameters are maintained on a per INITIATOR/TARGET
+; basis, and returning control to our scheduler could work on a command
+; running on another lun on that target using the old parameters, we must
+; interrupt the host processor to get them changed, or change them ourselves.
+;
+; Once SCSI-II tagged queueing is implemented, things will be even more
+; hairy, since contingent allegiance conditions exist on a per-target/lun
+; basis, and issuing a new command with a different tag would clear it.
+; In these cases, we must interrupt the host processor to get a request
+; added to the HEAD of the queue with the request sense command, or we
+; must automatically issue the request sense command.
+
+
+
+
+
+ INTFLY
+
+at 0x0000018f : */ 0x98180000,0x00000000,
+/*
+
+
+
+
+
+ JUMP schedule
+
+at 0x00000191 : */ 0x80080000,0x00000000,
+/*
+command_failed:
+ INT int_err_check_condition
+
+at 0x00000193 : */ 0x98080000,0x00030000,
+/*
+
+
+
+
+;
+; wait_reselect
+;
+; PURPOSE : This is essentially the idle routine, where control lands
+; when there are no new processes to schedule. wait_reselect
+; waits for reselection, selection, and new commands.
+;
+; When a successful reselection occurs, with the aid
+; of fixed up code in each DSA, wait_reselect walks the
+; reconnect_dsa_queue, asking each dsa if the target ID
+; and LUN match its.
+;
+; If a match is found, a call is made back to reselected_ok,
+; which through the miracles of self modifying code, extracts
+; the found DSA from the reconnect_dsa_queue and then
+; returns control to the DSAs thread of execution.
+;
+; INPUTS : NONE
+;
+; CALLS : OK
+;
+; MODIFIES : DSA,
+;
+; EXITS : On successful reselection, control is returned to the
+; DSA which called reselected_ok. If the WAIT RESELECT
+; was interrupted by a new commands arrival signaled by
+; SIG_P, control is passed to schedule. If the NCR is
+; selected, the host system is interrupted with an
+; int_err_selected which is usually responded to by
+; setting DSP to the target_abort address.
+
+ENTRY wait_reselect
+wait_reselect:
+
+
+
+
+
+
+ WAIT RESELECT wait_reselect_failed
+
+at 0x00000195 : */ 0x50000000,0x0000076c,
+/*
+
+reselected:
+
+
+
+ CLEAR TARGET
+
+at 0x00000197 : */ 0x60000200,0x00000000,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x00000199 : */ 0x78380000,0x00000000,
+/*
+ ; Read all data needed to reestablish the nexus -
+ MOVE 1, reselected_identify, WHEN MSG_IN
+
+at 0x0000019b : */ 0x0f000001,0x00000000,
+/*
+ ; We used to CLEAR ACK here.
+
+
+
+
+
+ ; Point DSA at the current head of the disconnected queue.
+ MOVE dmode_memory_to_ncr TO DMODE
+
+at 0x0000019d : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, reconnect_dsa_head, addr_scratch
+
+at 0x0000019f : */ 0xc0000004,0x00000000,0x00000000,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x000001a2 : */ 0x78380000,0x00000000,
+/*
+ CALL scratch_to_dsa
+
+at 0x000001a4 : */ 0x88080000,0x00000980,
+/*
+
+ ; Fix the update-next pointer so that the reconnect_dsa_head
+ ; pointer is the one that will be updated if this DSA is a hit
+ ; and we remove it from the queue.
+
+ MOVE MEMORY 4, addr_reconnect_dsa_head, reselected_ok + 8
+
+at 0x000001a6 : */ 0xc0000004,0x00000000,0x00000758,
+/*
+
+ENTRY reselected_check_next
+reselected_check_next:
+
+
+
+ ; Check for a NULL pointer.
+ MOVE DSA0 TO SFBR
+
+at 0x000001a9 : */ 0x72100000,0x00000000,
+/*
+ JUMP reselected_not_end, IF NOT 0
+
+at 0x000001ab : */ 0x80040000,0x000006ec,
+/*
+ MOVE DSA1 TO SFBR
+
+at 0x000001ad : */ 0x72110000,0x00000000,
+/*
+ JUMP reselected_not_end, IF NOT 0
+
+at 0x000001af : */ 0x80040000,0x000006ec,
+/*
+ MOVE DSA2 TO SFBR
+
+at 0x000001b1 : */ 0x72120000,0x00000000,
+/*
+ JUMP reselected_not_end, IF NOT 0
+
+at 0x000001b3 : */ 0x80040000,0x000006ec,
+/*
+ MOVE DSA3 TO SFBR
+
+at 0x000001b5 : */ 0x72130000,0x00000000,
+/*
+ JUMP reselected_not_end, IF NOT 0
+
+at 0x000001b7 : */ 0x80040000,0x000006ec,
+/*
+ INT int_err_unexpected_reselect
+
+at 0x000001b9 : */ 0x98080000,0x00020000,
+/*
+
+reselected_not_end:
+ ;
+ ; XXX the ALU is only eight bits wide, and the assembler
+ ; wont do the dirt work for us. As long as dsa_check_reselect
+ ; is negative, we need to sign extend with 1 bits to the full
+ ; 32 bit width of the address.
+ ;
+ ; A potential work around would be to have a known alignment
+ ; of the DSA structure such that the base address plus
+ ; dsa_check_reselect doesn't require carrying from bytes
+ ; higher than the LSB.
+ ;
+
+ MOVE DSA0 TO SFBR
+
+at 0x000001bb : */ 0x72100000,0x00000000,
+/*
+ MOVE SFBR + dsa_check_reselect TO SCRATCH0
+
+at 0x000001bd : */ 0x6e340000,0x00000000,
+/*
+ MOVE DSA1 TO SFBR
+
+at 0x000001bf : */ 0x72110000,0x00000000,
+/*
+ MOVE SFBR + 0xff TO SCRATCH1 WITH CARRY
+
+at 0x000001c1 : */ 0x6f35ff00,0x00000000,
+/*
+ MOVE DSA2 TO SFBR
+
+at 0x000001c3 : */ 0x72120000,0x00000000,
+/*
+ MOVE SFBR + 0xff TO SCRATCH2 WITH CARRY
+
+at 0x000001c5 : */ 0x6f36ff00,0x00000000,
+/*
+ MOVE DSA3 TO SFBR
+
+at 0x000001c7 : */ 0x72130000,0x00000000,
+/*
+ MOVE SFBR + 0xff TO SCRATCH3 WITH CARRY
+
+at 0x000001c9 : */ 0x6f37ff00,0x00000000,
+/*
+
+ MOVE dmode_ncr_to_memory TO DMODE
+
+at 0x000001cb : */ 0x78380000,0x00000000,
+/*
+ MOVE MEMORY 4, addr_scratch, reselected_check + 4
+
+at 0x000001cd : */ 0xc0000004,0x00000000,0x0000074c,
+/*
+ MOVE dmode_memory_to_memory TO DMODE
+
+at 0x000001d0 : */ 0x78380000,0x00000000,
+/*
+reselected_check:
+ JUMP 0
+
+at 0x000001d2 : */ 0x80080000,0x00000000,
+/*
+
+
+;
+;
+ENTRY reselected_ok
+reselected_ok:
+ MOVE MEMORY 4, 0, 0 ; Patched : first word
+
+at 0x000001d4 : */ 0xc0000004,0x00000000,0x00000000,
+/*
+ ; is address of
+ ; successful dsa_next
+ ; Second word is last
+ ; unsuccessful dsa_next,
+ ; starting with
+ ; dsa_reconnect_head
+ ; We used to CLEAR ACK here.
+
+
+
+
+
+
+ RETURN ; Return control to where
+
+at 0x000001d7 : */ 0x90080000,0x00000000,
+/*
+
+
+
+
+selected:
+ INT int_err_selected;
+
+at 0x000001d9 : */ 0x98080000,0x00010000,
+/*
+
+;
+; A select or reselect failure can be caused by one of two conditions :
+; 1. SIG_P was set. This will be the case if the user has written
+; a new value to a previously NULL head of the issue queue.
+;
+; 2. The NCR53c810 was selected or reselected by another device.
+;
+; 3. The bus was already busy since we were selected or reselected
+; before starting the command.
+
+wait_reselect_failed:
+
+
+
+; Check selected bit.
+ MOVE SIST0 & 0x20 TO SFBR
+
+at 0x000001db : */ 0x74422000,0x00000000,
+/*
+ JUMP selected, IF 0x20
+
+at 0x000001dd : */ 0x800c0020,0x00000764,
+/*
+; Reading CTEST2 clears the SIG_P bit in the ISTAT register.
+ MOVE CTEST2 & 0x40 TO SFBR
+
+at 0x000001df : */ 0x741a4000,0x00000000,
+/*
+ JUMP schedule, IF 0x40
+
+at 0x000001e1 : */ 0x800c0040,0x00000000,
+/*
+; Check connected bit.
+; FIXME: this needs to change if we support target mode
+ MOVE ISTAT & 0x08 TO SFBR
+
+at 0x000001e3 : */ 0x74140800,0x00000000,
+/*
+ JUMP reselected, IF 0x08
+
+at 0x000001e5 : */ 0x800c0008,0x0000065c,
+/*
+; FIXME : Something bogus happened, and we shouldn't fail silently.
+
+
+
+ INT int_debug_panic
+
+at 0x000001e7 : */ 0x98080000,0x030b0000,
+/*
+
+
+
+select_failed:
+
+
+
+; Otherwise, mask the selected and reselected bits off SIST0
+ MOVE SIST0 & 0x30 TO SFBR
+
+at 0x000001e9 : */ 0x74423000,0x00000000,
+/*
+ JUMP selected, IF 0x20
+
+at 0x000001eb : */ 0x800c0020,0x00000764,
+/*
+ JUMP reselected, IF 0x10
+
+at 0x000001ed : */ 0x800c0010,0x0000065c,
+/*
+; If SIGP is set, the user just gave us another command, and
+; we should restart or return to the scheduler.
+; Reading CTEST2 clears the SIG_P bit in the ISTAT register.
+ MOVE CTEST2 & 0x40 TO SFBR
+
+at 0x000001ef : */ 0x741a4000,0x00000000,
+/*
+ JUMP select, IF 0x40
+
+at 0x000001f1 : */ 0x800c0040,0x000001fc,
+/*
+; Check connected bit.
+; FIXME: this needs to change if we support target mode
+; FIXME: is this really necessary?
+ MOVE ISTAT & 0x08 TO SFBR
+
+at 0x000001f3 : */ 0x74140800,0x00000000,
+/*
+ JUMP reselected, IF 0x08
+
+at 0x000001f5 : */ 0x800c0008,0x0000065c,
+/*
+; FIXME : Something bogus happened, and we shouldn't fail silently.
+
+
+
+ INT int_debug_panic
+
+at 0x000001f7 : */ 0x98080000,0x030b0000,
+/*
+
+
+;
+; test_1
+; test_2
+;
+; PURPOSE : run some verification tests on the NCR. test_1
+; copies test_src to test_dest and interrupts the host
+; processor, testing for cache coherency and interrupt
+; problems in the processes.
+;
+; test_2 runs a command with offsets relative to the
+; DSA on entry, and is useful for miscellaneous experimentation.
+;
+
+; Verify that interrupts are working correctly and that we don't
+; have a cache invalidation problem.
+
+ABSOLUTE test_src = 0, test_dest = 0
+ENTRY test_1
+test_1:
+ MOVE MEMORY 4, test_src, test_dest
+
+at 0x000001f9 : */ 0xc0000004,0x00000000,0x00000000,
+/*
+ INT int_test_1
+
+at 0x000001fc : */ 0x98080000,0x04000000,
+/*
+
+;
+; Run arbitrary commands, with test code establishing a DSA
+;
+
+ENTRY test_2
+test_2:
+ CLEAR TARGET
+
+at 0x000001fe : */ 0x60000200,0x00000000,
+/*
+ SELECT ATN FROM 0, test_2_fail
+
+at 0x00000200 : */ 0x43000000,0x00000850,
+/*
+ JUMP test_2_msgout, WHEN MSG_OUT
+
+at 0x00000202 : */ 0x860b0000,0x00000810,
+/*
+ENTRY test_2_msgout
+test_2_msgout:
+ MOVE FROM 8, WHEN MSG_OUT
+
+at 0x00000204 : */ 0x1e000000,0x00000008,
+/*
+ MOVE FROM 16, WHEN CMD
+
+at 0x00000206 : */ 0x1a000000,0x00000010,
+/*
+ MOVE FROM 24, WHEN DATA_IN
+
+at 0x00000208 : */ 0x19000000,0x00000018,
+/*
+ MOVE FROM 32, WHEN STATUS
+
+at 0x0000020a : */ 0x1b000000,0x00000020,
+/*
+ MOVE FROM 40, WHEN MSG_IN
+
+at 0x0000020c : */ 0x1f000000,0x00000028,
+/*
+ MOVE SCNTL2 & 0x7f TO SCNTL2
+
+at 0x0000020e : */ 0x7c027f00,0x00000000,
+/*
+ CLEAR ACK
+
+at 0x00000210 : */ 0x60000040,0x00000000,
+/*
+ WAIT DISCONNECT
+
+at 0x00000212 : */ 0x48000000,0x00000000,
+/*
+test_2_fail:
+ INT int_test_2
+
+at 0x00000214 : */ 0x98080000,0x04010000,
+/*
+
+ENTRY debug_break
+debug_break:
+ INT int_debug_break
+
+at 0x00000216 : */ 0x98080000,0x03000000,
+/*
+
+;
+; initiator_abort
+; target_abort
+;
+; PURPOSE : Abort the currently established nexus from with initiator
+; or target mode.
+;
+;
+
+ENTRY target_abort
+target_abort:
+ SET TARGET
+
+at 0x00000218 : */ 0x58000200,0x00000000,
+/*
+ DISCONNECT
+
+at 0x0000021a : */ 0x48000000,0x00000000,
+/*
+ CLEAR TARGET
+
+at 0x0000021c : */ 0x60000200,0x00000000,
+/*
+ JUMP schedule
+
+at 0x0000021e : */ 0x80080000,0x00000000,
+/*
+
+ENTRY initiator_abort
+initiator_abort:
+ SET ATN
+
+at 0x00000220 : */ 0x58000008,0x00000000,
+/*
+;
+; The SCSI-I specification says that targets may go into MSG out at
+; their leisure upon receipt of the ATN single. On all versions of the
+; specification, we can't change phases until REQ transitions true->false,
+; so we need to sink/source one byte of data to allow the transition.
+;
+; For the sake of safety, we'll only source one byte of data in all
+; cases, but to accommodate the SCSI-I dain bramage, we'll sink an
+; arbitrary number of bytes.
+ JUMP spew_cmd, WHEN CMD
+
+at 0x00000222 : */ 0x820b0000,0x000008b8,
+/*
+ JUMP eat_msgin, WHEN MSG_IN
+
+at 0x00000224 : */ 0x870b0000,0x000008c8,
+/*
+ JUMP eat_datain, WHEN DATA_IN
+
+at 0x00000226 : */ 0x810b0000,0x000008f8,
+/*
+ JUMP eat_status, WHEN STATUS
+
+at 0x00000228 : */ 0x830b0000,0x000008e0,
+/*
+ JUMP spew_dataout, WHEN DATA_OUT
+
+at 0x0000022a : */ 0x800b0000,0x00000910,
+/*
+ JUMP sated
+
+at 0x0000022c : */ 0x80080000,0x00000918,
+/*
+spew_cmd:
+ MOVE 1, NCR53c7xx_zero, WHEN CMD
+
+at 0x0000022e : */ 0x0a000001,0x00000000,
+/*
+ JUMP sated
+
+at 0x00000230 : */ 0x80080000,0x00000918,
+/*
+eat_msgin:
+ MOVE 1, NCR53c7xx_sink, WHEN MSG_IN
+
+at 0x00000232 : */ 0x0f000001,0x00000000,
+/*
+ JUMP eat_msgin, WHEN MSG_IN
+
+at 0x00000234 : */ 0x870b0000,0x000008c8,
+/*
+ JUMP sated
+
+at 0x00000236 : */ 0x80080000,0x00000918,
+/*
+eat_status:
+ MOVE 1, NCR53c7xx_sink, WHEN STATUS
+
+at 0x00000238 : */ 0x0b000001,0x00000000,
+/*
+ JUMP eat_status, WHEN STATUS
+
+at 0x0000023a : */ 0x830b0000,0x000008e0,
+/*
+ JUMP sated
+
+at 0x0000023c : */ 0x80080000,0x00000918,
+/*
+eat_datain:
+ MOVE 1, NCR53c7xx_sink, WHEN DATA_IN
+
+at 0x0000023e : */ 0x09000001,0x00000000,
+/*
+ JUMP eat_datain, WHEN DATA_IN
+
+at 0x00000240 : */ 0x810b0000,0x000008f8,
+/*
+ JUMP sated
+
+at 0x00000242 : */ 0x80080000,0x00000918,
+/*
+spew_dataout:
+ MOVE 1, NCR53c7xx_zero, WHEN DATA_OUT
+
+at 0x00000244 : */ 0x08000001,0x00000000,
+/*
+sated:
+ MOVE SCNTL2 & 0x7f TO SCNTL2
+
+at 0x00000246 : */ 0x7c027f00,0x00000000,
+/*
+ MOVE 1, NCR53c7xx_msg_abort, WHEN MSG_OUT
+
+at 0x00000248 : */ 0x0e000001,0x00000000,
+/*
+ WAIT DISCONNECT
+
+at 0x0000024a : */ 0x48000000,0x00000000,
+/*
+ INT int_norm_aborted
+
+at 0x0000024c : */ 0x98080000,0x02040000,
+/*
+
+;
+; dsa_to_scratch
+; scratch_to_dsa
+;
+; PURPOSE :
+; The NCR chips cannot do a move memory instruction with the DSA register
+; as the source or destination. So, we provide a couple of subroutines
+; that let us switch between the DSA register and scratch register.
+;
+; Memory moves to/from the DSPS register also don't work, but we
+; don't use them.
+;
+;
+
+
+dsa_to_scratch:
+ MOVE DSA0 TO SFBR
+
+at 0x0000024e : */ 0x72100000,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH0
+
+at 0x00000250 : */ 0x6a340000,0x00000000,
+/*
+ MOVE DSA1 TO SFBR
+
+at 0x00000252 : */ 0x72110000,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH1
+
+at 0x00000254 : */ 0x6a350000,0x00000000,
+/*
+ MOVE DSA2 TO SFBR
+
+at 0x00000256 : */ 0x72120000,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH2
+
+at 0x00000258 : */ 0x6a360000,0x00000000,
+/*
+ MOVE DSA3 TO SFBR
+
+at 0x0000025a : */ 0x72130000,0x00000000,
+/*
+ MOVE SFBR TO SCRATCH3
+
+at 0x0000025c : */ 0x6a370000,0x00000000,
+/*
+ RETURN
+
+at 0x0000025e : */ 0x90080000,0x00000000,
+/*
+
+scratch_to_dsa:
+ MOVE SCRATCH0 TO SFBR
+
+at 0x00000260 : */ 0x72340000,0x00000000,
+/*
+ MOVE SFBR TO DSA0
+
+at 0x00000262 : */ 0x6a100000,0x00000000,
+/*
+ MOVE SCRATCH1 TO SFBR
+
+at 0x00000264 : */ 0x72350000,0x00000000,
+/*
+ MOVE SFBR TO DSA1
+
+at 0x00000266 : */ 0x6a110000,0x00000000,
+/*
+ MOVE SCRATCH2 TO SFBR
+
+at 0x00000268 : */ 0x72360000,0x00000000,
+/*
+ MOVE SFBR TO DSA2
+
+at 0x0000026a : */ 0x6a120000,0x00000000,
+/*
+ MOVE SCRATCH3 TO SFBR
+
+at 0x0000026c : */ 0x72370000,0x00000000,
+/*
+ MOVE SFBR TO DSA3
+
+at 0x0000026e : */ 0x6a130000,0x00000000,
+/*
+ RETURN
+
+at 0x00000270 : */ 0x90080000,0x00000000,
+};
+
+#define A_NCR53c7xx_msg_abort 0x00000000
+u32 A_NCR53c7xx_msg_abort_used[] = {
+ 0x00000249,
+};
+
+#define A_NCR53c7xx_msg_reject 0x00000000
+u32 A_NCR53c7xx_msg_reject_used[] = {
+ 0x00000172,
+};
+
+#define A_NCR53c7xx_sink 0x00000000
+u32 A_NCR53c7xx_sink_used[] = {
+ 0x00000233,
+ 0x00000239,
+ 0x0000023f,
+};
+
+#define A_NCR53c7xx_zero 0x00000000
+u32 A_NCR53c7xx_zero_used[] = {
+ 0x0000022f,
+ 0x00000245,
+};
+
+#define A_NOP_insn 0x00000000
+u32 A_NOP_insn_used[] = {
+ 0x00000010,
+};
+
+#define A_addr_reconnect_dsa_head 0x00000000
+u32 A_addr_reconnect_dsa_head_used[] = {
+ 0x000001a7,
+};
+
+#define A_addr_scratch 0x00000000
+u32 A_addr_scratch_used[] = {
+ 0x00000004,
+ 0x0000001b,
+ 0x00000046,
+ 0x00000067,
+ 0x00000073,
+ 0x000000b0,
+ 0x000000c6,
+ 0x00000128,
+ 0x00000141,
+ 0x000001a1,
+ 0x000001ce,
+};
+
+#define A_addr_temp 0x00000000
+u32 A_addr_temp_used[] = {
+ 0x00000025,
+ 0x00000034,
+};
+
+#define A_dmode_memory_to_memory 0x00000000
+u32 A_dmode_memory_to_memory_used[] = {
+ 0x00000005,
+ 0x0000001c,
+ 0x00000027,
+ 0x00000035,
+ 0x00000047,
+ 0x00000069,
+ 0x00000075,
+ 0x000000b2,
+ 0x000000c8,
+ 0x0000012a,
+ 0x00000143,
+ 0x00000199,
+ 0x000001a2,
+ 0x000001d0,
+};
+
+#define A_dmode_memory_to_ncr 0x00000000
+u32 A_dmode_memory_to_ncr_used[] = {
+ 0x00000000,
+ 0x00000017,
+ 0x00000030,
+ 0x00000042,
+ 0x0000019d,
+};
+
+#define A_dmode_ncr_to_memory 0x00000000
+u32 A_dmode_ncr_to_memory_used[] = {
+ 0x00000022,
+ 0x00000064,
+ 0x00000070,
+ 0x000000ad,
+ 0x000000c3,
+ 0x00000125,
+ 0x0000013e,
+ 0x000001cb,
+};
+
+#define A_dsa_check_reselect 0x00000000
+u32 A_dsa_check_reselect_used[] = {
+ 0x000001bd,
+};
+
+#define A_dsa_cmdout 0x00000048
+u32 A_dsa_cmdout_used[] = {
+ 0x00000094,
+};
+
+#define A_dsa_cmnd 0x00000038
+u32 A_dsa_cmnd_used[] = {
+};
+
+#define A_dsa_datain 0x00000054
+u32 A_dsa_datain_used[] = {
+ 0x000000bb,
+};
+
+#define A_dsa_dataout 0x00000050
+u32 A_dsa_dataout_used[] = {
+ 0x000000a5,
+};
+
+#define A_dsa_end 0x00000070
+u32 A_dsa_end_used[] = {
+};
+
+#define A_dsa_fields_start 0x00000000
+u32 A_dsa_fields_start_used[] = {
+};
+
+#define A_dsa_msgin 0x00000058
+u32 A_dsa_msgin_used[] = {
+ 0x00000188,
+};
+
+#define A_dsa_msgout 0x00000040
+u32 A_dsa_msgout_used[] = {
+ 0x00000086,
+};
+
+#define A_dsa_msgout_other 0x00000068
+u32 A_dsa_msgout_other_used[] = {
+ 0x00000180,
+};
+
+#define A_dsa_next 0x00000030
+u32 A_dsa_next_used[] = {
+ 0x0000005c,
+};
+
+#define A_dsa_restore_pointers 0x00000000
+u32 A_dsa_restore_pointers_used[] = {
+ 0x0000012e,
+};
+
+#define A_dsa_save_data_pointer 0x00000000
+u32 A_dsa_save_data_pointer_used[] = {
+ 0x00000115,
+};
+
+#define A_dsa_select 0x0000003c
+u32 A_dsa_select_used[] = {
+ 0x00000081,
+};
+
+#define A_dsa_status 0x00000060
+u32 A_dsa_status_used[] = {
+ 0x00000184,
+};
+
+#define A_dsa_temp_addr_array_value 0x00000000
+u32 A_dsa_temp_addr_array_value_used[] = {
+};
+
+#define A_dsa_temp_addr_dsa_value 0x00000000
+u32 A_dsa_temp_addr_dsa_value_used[] = {
+ 0x00000003,
+};
+
+#define A_dsa_temp_addr_new_value 0x00000000
+u32 A_dsa_temp_addr_new_value_used[] = {
+};
+
+#define A_dsa_temp_addr_next 0x00000000
+u32 A_dsa_temp_addr_next_used[] = {
+ 0x00000015,
+ 0x0000004e,
+};
+
+#define A_dsa_temp_addr_residual 0x00000000
+u32 A_dsa_temp_addr_residual_used[] = {
+ 0x0000002a,
+ 0x00000039,
+};
+
+#define A_dsa_temp_addr_saved_pointer 0x00000000
+u32 A_dsa_temp_addr_saved_pointer_used[] = {
+ 0x00000026,
+ 0x00000033,
+};
+
+#define A_dsa_temp_addr_saved_residual 0x00000000
+u32 A_dsa_temp_addr_saved_residual_used[] = {
+ 0x0000002b,
+ 0x00000038,
+};
+
+#define A_dsa_temp_lun 0x00000000
+u32 A_dsa_temp_lun_used[] = {
+ 0x0000004b,
+};
+
+#define A_dsa_temp_next 0x00000000
+u32 A_dsa_temp_next_used[] = {
+ 0x0000001a,
+};
+
+#define A_dsa_temp_sync 0x00000000
+u32 A_dsa_temp_sync_used[] = {
+ 0x00000053,
+};
+
+#define A_dsa_temp_target 0x00000000
+u32 A_dsa_temp_target_used[] = {
+ 0x00000040,
+};
+
+#define A_int_debug_break 0x03000000
+u32 A_int_debug_break_used[] = {
+ 0x00000217,
+};
+
+#define A_int_debug_panic 0x030b0000
+u32 A_int_debug_panic_used[] = {
+ 0x000001e8,
+ 0x000001f8,
+};
+
+#define A_int_err_check_condition 0x00030000
+u32 A_int_err_check_condition_used[] = {
+ 0x00000194,
+};
+
+#define A_int_err_no_phase 0x00040000
+u32 A_int_err_no_phase_used[] = {
+};
+
+#define A_int_err_selected 0x00010000
+u32 A_int_err_selected_used[] = {
+ 0x000001da,
+};
+
+#define A_int_err_unexpected_phase 0x00000000
+u32 A_int_err_unexpected_phase_used[] = {
+ 0x0000008c,
+ 0x00000092,
+ 0x0000009a,
+ 0x000000d0,
+ 0x000000d4,
+ 0x000000d6,
+ 0x000000de,
+ 0x000000e2,
+ 0x000000e4,
+ 0x000000ec,
+ 0x000000f0,
+ 0x000000f2,
+ 0x000000f4,
+ 0x0000014c,
+};
+
+#define A_int_err_unexpected_reselect 0x00020000
+u32 A_int_err_unexpected_reselect_used[] = {
+ 0x000001ba,
+};
+
+#define A_int_msg_1 0x01020000
+u32 A_int_msg_1_used[] = {
+ 0x0000010e,
+ 0x00000110,
+};
+
+#define A_int_msg_sdtr 0x01010000
+u32 A_int_msg_sdtr_used[] = {
+ 0x0000016c,
+};
+
+#define A_int_msg_wdtr 0x01000000
+u32 A_int_msg_wdtr_used[] = {
+ 0x00000160,
+};
+
+#define A_int_norm_aborted 0x02040000
+u32 A_int_norm_aborted_used[] = {
+ 0x0000024d,
+};
+
+#define A_int_norm_command_complete 0x02020000
+u32 A_int_norm_command_complete_used[] = {
+};
+
+#define A_int_norm_disconnected 0x02030000
+u32 A_int_norm_disconnected_used[] = {
+};
+
+#define A_int_norm_reselect_complete 0x02010000
+u32 A_int_norm_reselect_complete_used[] = {
+};
+
+#define A_int_norm_reset 0x02050000
+u32 A_int_norm_reset_used[] = {
+};
+
+#define A_int_norm_select_complete 0x02000000
+u32 A_int_norm_select_complete_used[] = {
+};
+
+#define A_int_test_1 0x04000000
+u32 A_int_test_1_used[] = {
+ 0x000001fd,
+};
+
+#define A_int_test_2 0x04010000
+u32 A_int_test_2_used[] = {
+ 0x00000215,
+};
+
+#define A_int_test_3 0x04020000
+u32 A_int_test_3_used[] = {
+};
+
+#define A_msg_buf 0x00000000
+u32 A_msg_buf_used[] = {
+ 0x00000102,
+ 0x0000014e,
+ 0x00000158,
+ 0x0000015e,
+ 0x00000164,
+ 0x0000016a,
+};
+
+#define A_reconnect_dsa_head 0x00000000
+u32 A_reconnect_dsa_head_used[] = {
+ 0x0000006c,
+ 0x00000074,
+ 0x000001a0,
+};
+
+#define A_reselected_identify 0x00000000
+u32 A_reselected_identify_used[] = {
+ 0x00000045,
+ 0x0000019c,
+};
+
+#define A_reselected_tag 0x00000000
+u32 A_reselected_tag_used[] = {
+};
+
+#define A_schedule 0x00000000
+u32 A_schedule_used[] = {
+ 0x0000007e,
+ 0x00000192,
+ 0x000001e2,
+ 0x0000021f,
+};
+
+#define A_test_dest 0x00000000
+u32 A_test_dest_used[] = {
+ 0x000001fb,
+};
+
+#define A_test_src 0x00000000
+u32 A_test_src_used[] = {
+ 0x000001fa,
+};
+
+#define Ent_accept_message 0x000005d4
+#define Ent_cmdout_cmdout 0x0000024c
+#define Ent_command_complete 0x0000060c
+#define Ent_command_complete_msgin 0x0000061c
+#define Ent_data_transfer 0x00000254
+#define Ent_datain_to_jump 0x00000328
+#define Ent_debug_break 0x00000858
+#define Ent_dsa_code_begin 0x00000000
+#define Ent_dsa_code_check_reselect 0x000000f8
+#define Ent_dsa_code_fix_jump 0x0000003c
+#define Ent_dsa_code_restore_pointers 0x000000c0
+#define Ent_dsa_code_save_data_pointer 0x00000088
+#define Ent_dsa_code_template 0x00000000
+#define Ent_dsa_code_template_end 0x00000168
+#define Ent_dsa_schedule 0x00000168
+#define Ent_dsa_zero 0x00000168
+#define Ent_end_data_transfer 0x0000028c
+#define Ent_initiator_abort 0x00000880
+#define Ent_msg_in 0x00000404
+#define Ent_msg_in_restart 0x000003e4
+#define Ent_other_in 0x00000374
+#define Ent_other_out 0x0000033c
+#define Ent_other_transfer 0x000003ac
+#define Ent_reject_message 0x000005b4
+#define Ent_reselected_check_next 0x000006a4
+#define Ent_reselected_ok 0x00000750
+#define Ent_respond_message 0x000005ec
+#define Ent_select 0x000001fc
+#define Ent_select_msgout 0x00000214
+#define Ent_target_abort 0x00000860
+#define Ent_test_1 0x000007e4
+#define Ent_test_2 0x000007f8
+#define Ent_test_2_msgout 0x00000810
+#define Ent_wait_reselect 0x00000654
+u32 LABELPATCHES[] = {
+ 0x00000008,
+ 0x0000000a,
+ 0x00000013,
+ 0x00000016,
+ 0x0000001f,
+ 0x00000021,
+ 0x0000004f,
+ 0x00000051,
+ 0x0000005b,
+ 0x00000068,
+ 0x0000006f,
+ 0x00000082,
+ 0x00000084,
+ 0x0000008a,
+ 0x0000008e,
+ 0x00000090,
+ 0x00000096,
+ 0x00000098,
+ 0x0000009c,
+ 0x0000009e,
+ 0x000000a0,
+ 0x000000a2,
+ 0x000000a4,
+ 0x000000b1,
+ 0x000000b6,
+ 0x000000ba,
+ 0x000000c7,
+ 0x000000cc,
+ 0x000000d2,
+ 0x000000d8,
+ 0x000000da,
+ 0x000000e0,
+ 0x000000e6,
+ 0x000000e8,
+ 0x000000ee,
+ 0x000000f6,
+ 0x000000f8,
+ 0x00000104,
+ 0x00000106,
+ 0x00000108,
+ 0x0000010a,
+ 0x0000010c,
+ 0x00000112,
+ 0x00000114,
+ 0x00000129,
+ 0x00000142,
+ 0x00000148,
+ 0x00000150,
+ 0x00000152,
+ 0x00000154,
+ 0x0000015a,
+ 0x00000166,
+ 0x00000196,
+ 0x000001a5,
+ 0x000001a8,
+ 0x000001ac,
+ 0x000001b0,
+ 0x000001b4,
+ 0x000001b8,
+ 0x000001cf,
+ 0x000001de,
+ 0x000001e6,
+ 0x000001ec,
+ 0x000001ee,
+ 0x000001f2,
+ 0x000001f6,
+ 0x00000201,
+ 0x00000203,
+ 0x00000223,
+ 0x00000225,
+ 0x00000227,
+ 0x00000229,
+ 0x0000022b,
+ 0x0000022d,
+ 0x00000231,
+ 0x00000235,
+ 0x00000237,
+ 0x0000023b,
+ 0x0000023d,
+ 0x00000241,
+ 0x00000243,
+};
+
+struct {
+ u32 offset;
+ void *address;
+} EXTERNAL_PATCHES[] = {
+};
+
+u32 INSTRUCTIONS = 301;
+u32 PATCHES = 81;
+u32 EXTERNAL_PATCHES_LEN = 0;
diff --git a/linux/src/drivers/scsi/53c8xx_u.h b/linux/src/drivers/scsi/53c8xx_u.h
new file mode 100644
index 0000000..c3d486f
--- /dev/null
+++ b/linux/src/drivers/scsi/53c8xx_u.h
@@ -0,0 +1,97 @@
+#undef A_NCR53c7xx_msg_abort
+#undef A_NCR53c7xx_msg_reject
+#undef A_NCR53c7xx_sink
+#undef A_NCR53c7xx_zero
+#undef A_NOP_insn
+#undef A_addr_reconnect_dsa_head
+#undef A_addr_scratch
+#undef A_addr_temp
+#undef A_dmode_memory_to_memory
+#undef A_dmode_memory_to_ncr
+#undef A_dmode_ncr_to_memory
+#undef A_dsa_check_reselect
+#undef A_dsa_cmdout
+#undef A_dsa_cmnd
+#undef A_dsa_datain
+#undef A_dsa_dataout
+#undef A_dsa_end
+#undef A_dsa_fields_start
+#undef A_dsa_msgin
+#undef A_dsa_msgout
+#undef A_dsa_msgout_other
+#undef A_dsa_next
+#undef A_dsa_restore_pointers
+#undef A_dsa_save_data_pointer
+#undef A_dsa_select
+#undef A_dsa_status
+#undef A_dsa_temp_addr_array_value
+#undef A_dsa_temp_addr_dsa_value
+#undef A_dsa_temp_addr_new_value
+#undef A_dsa_temp_addr_next
+#undef A_dsa_temp_addr_residual
+#undef A_dsa_temp_addr_saved_pointer
+#undef A_dsa_temp_addr_saved_residual
+#undef A_dsa_temp_lun
+#undef A_dsa_temp_next
+#undef A_dsa_temp_sync
+#undef A_dsa_temp_target
+#undef A_int_debug_break
+#undef A_int_debug_panic
+#undef A_int_err_check_condition
+#undef A_int_err_no_phase
+#undef A_int_err_selected
+#undef A_int_err_unexpected_phase
+#undef A_int_err_unexpected_reselect
+#undef A_int_msg_1
+#undef A_int_msg_sdtr
+#undef A_int_msg_wdtr
+#undef A_int_norm_aborted
+#undef A_int_norm_command_complete
+#undef A_int_norm_disconnected
+#undef A_int_norm_reselect_complete
+#undef A_int_norm_reset
+#undef A_int_norm_select_complete
+#undef A_int_test_1
+#undef A_int_test_2
+#undef A_int_test_3
+#undef A_msg_buf
+#undef A_reconnect_dsa_head
+#undef A_reselected_identify
+#undef A_reselected_tag
+#undef A_schedule
+#undef A_test_dest
+#undef A_test_src
+#undef Ent_accept_message
+#undef Ent_cmdout_cmdout
+#undef Ent_command_complete
+#undef Ent_command_complete_msgin
+#undef Ent_data_transfer
+#undef Ent_datain_to_jump
+#undef Ent_debug_break
+#undef Ent_dsa_code_begin
+#undef Ent_dsa_code_check_reselect
+#undef Ent_dsa_code_fix_jump
+#undef Ent_dsa_code_restore_pointers
+#undef Ent_dsa_code_save_data_pointer
+#undef Ent_dsa_code_template
+#undef Ent_dsa_code_template_end
+#undef Ent_dsa_schedule
+#undef Ent_dsa_zero
+#undef Ent_end_data_transfer
+#undef Ent_initiator_abort
+#undef Ent_msg_in
+#undef Ent_msg_in_restart
+#undef Ent_other_in
+#undef Ent_other_out
+#undef Ent_other_transfer
+#undef Ent_reject_message
+#undef Ent_reselected_check_next
+#undef Ent_reselected_ok
+#undef Ent_respond_message
+#undef Ent_select
+#undef Ent_select_msgout
+#undef Ent_target_abort
+#undef Ent_test_1
+#undef Ent_test_2
+#undef Ent_test_2_msgout
+#undef Ent_wait_reselect
diff --git a/linux/src/drivers/scsi/AM53C974.c b/linux/src/drivers/scsi/AM53C974.c
new file mode 100644
index 0000000..da139ce
--- /dev/null
+++ b/linux/src/drivers/scsi/AM53C974.c
@@ -0,0 +1,2270 @@
+#include <linux/module.h>
+
+#include <linux/config.h>
+#include <linux/delay.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/blk.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include "scsi.h"
+#include "hosts.h"
+#include "AM53C974.h"
+#include "constants.h"
+#include "sd.h"
+
+/* AM53/79C974 (PCscsi) driver release 0.5
+ *
+ * The architecture and much of the code of this device
+ * driver was originally developed by Drew Eckhardt for
+ * the NCR5380. The following copyrights apply:
+ * For the architecture and all pieces of code which can also be found
+ * in the NCR5380 device driver:
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 666-5836
+ *
+ * The AM53C974_nobios_detect code was originally developed by
+ * Robin Cutshaw (robin@xfree86.org) and is used here in a
+ * slightly modified form.
+ *
+ * For the remaining code:
+ * Copyright 1994, D. Frieauff
+ * EMail: fri@rsx42sun0.dofn.de
+ * Phone: x49-7545-8-2256 , x49-7541-42305
+ */
+
+#ifdef AM53C974_DEBUG
+ #define DEB(x) x
+ #ifdef AM53C974_DEBUG_KEYWAIT
+ #define KEYWAIT() AM53C974_keywait()
+ #else
+ #define KEYWAIT()
+ #endif
+ #ifdef AM53C974_DEBUG_INIT
+ #define DEB_INIT(x) x
+ #else
+ #define DEB_INIT(x)
+ #endif
+ #ifdef AM53C974_DEBUG_MSG
+ #define DEB_MSG(x) x
+ #else
+ #define DEB_MSG(x)
+ #endif
+ #ifdef AM53C974_DEB_RESEL
+ #define DEB_RESEL(x) x
+ #else
+ #define DEB_RESEL(x)
+ #endif
+ #ifdef AM53C974_DEBUG_QUEUE
+ #define DEB_QUEUE(x) x
+ #define LIST(x,y) {printk("LINE:%d Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); if ((x)==(y)) udelay(5); }
+ #define REMOVE(w,x,y,z) {printk("LINE:%d Removing: %p->%p %p->%p \n", __LINE__, (void*)(w), (void*)(x), (void*)(y), (void*)(z)); if ((x)==(y)) udelay(5); }
+ #else
+ #define DEB_QUEUE(x)
+ #define LIST(x,y)
+ #define REMOVE(w,x,y,z)
+ #endif
+ #ifdef AM53C974_DEBUG_INFO
+ #define DEB_INFO(x) x
+ #else
+ #define DEB_INFO(x)
+ #endif
+ #ifdef AM53C974_DEBUG_LINKED
+ #define DEB_LINKED(x) x
+ #else
+ #define DEB_LINKED(x)
+ #endif
+ #ifdef AM53C974_DEBUG_INTR
+ #define DEB_INTR(x) x
+ #else
+ #define DEB_INTR(x)
+ #endif
+#else
+ #define DEB_INIT(x)
+ #define DEB(x)
+ #define DEB_QUEUE(x)
+ #define LIST(x,y)
+ #define REMOVE(w,x,y,z)
+ #define DEB_INFO(x)
+ #define DEB_LINKED(x)
+ #define DEB_INTR(x)
+ #define DEB_MSG(x)
+ #define DEB_RESEL(x)
+ #define KEYWAIT()
+#endif
+ #ifdef AM53C974_DEBUG_ABORT
+ #define DEB_ABORT(x) x
+ #else
+ #define DEB_ABORT(x)
+ #endif
+
+#ifdef VERBOSE_AM53C974_DEBUG
+#define VDEB(x) x
+#else
+#define VDEB(x)
+#endif
+
+#define INSIDE(x,l,h) ( ((x) >= (l)) && ((x) <= (h)) )
+
+#ifdef AM53C974_DEBUG
+static void AM53C974_print_pci(struct Scsi_Host *instance);
+static void AM53C974_print_phase(struct Scsi_Host *instance);
+static void AM53C974_print_queues(struct Scsi_Host *instance);
+#endif /* AM53C974_DEBUG */
+static void AM53C974_print(struct Scsi_Host *instance);
+static void AM53C974_keywait(void);
+static int AM53C974_bios_detect(Scsi_Host_Template *tpnt);
+static int AM53C974_nobios_detect(Scsi_Host_Template *tpnt);
+static int AM53C974_init(Scsi_Host_Template *tpnt, pci_config_t pci_config);
+static void AM53C974_config_after_reset(struct Scsi_Host *instance);
+static __inline__ void initialize_SCp(Scsi_Cmnd *cmd);
+static __inline__ void run_main(void);
+static void AM53C974_main (void);
+static void AM53C974_intr(int irq, void *dev_id, struct pt_regs *regs);
+static void AM53C974_intr_disconnect(struct Scsi_Host *instance);
+static int AM53C974_sync_neg(struct Scsi_Host *instance, int target, unsigned char *msg);
+static __inline__ void AM53C974_set_async(struct Scsi_Host *instance, int target);
+static __inline__ void AM53C974_set_sync(struct Scsi_Host *instance, int target);
+static void AM53C974_information_transfer(struct Scsi_Host *instance,
+ unsigned char statreg, unsigned char isreg,
+ unsigned char instreg, unsigned char cfifo,
+ unsigned char dmastatus);
+static int AM53C974_message(struct Scsi_Host *instance, Scsi_Cmnd *cmd, unsigned char msg);
+static void AM53C974_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag);
+static void AM53C974_intr_reselect(struct Scsi_Host *instance, unsigned char statreg);
+static __inline__ void AM53C974_transfer_dma(struct Scsi_Host *instance, short dir,
+ unsigned long length, char *data);
+static void AM53C974_dma_blast(struct Scsi_Host *instance, unsigned char dmastatus,
+ unsigned char statreg);
+static void AM53C974_intr_bus_reset(struct Scsi_Host *instance);
+
+static struct Scsi_Host *first_instance = NULL;
+static Scsi_Host_Template *the_template = NULL;
+static struct Scsi_Host *first_host = NULL; /* Head of list of AMD boards */
+static volatile int main_running = 0;
+static int commandline_current = 0;
+override_t overrides[7] = { {-1, 0, 0, 0}, }; /* LILO overrides */
+
+struct proc_dir_entry proc_scsi_am53c974 = {
+ PROC_SCSI_AM53C974, 8, "am53c974",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+#ifdef AM53C974_DEBUG
+static int deb_stop = 1;
+
+/**************************************************************************
+ * Function : void AM53C974_print_pci(struct Scsi_Host *instance)
+ *
+ * Purpose : dump the PCI registers for debugging purposes
+ *
+ * Input : instance - which AM53C974
+ **************************************************************************/
+static void AM53C974_print_pci(struct Scsi_Host *instance)
+{
+int i;
+unsigned short vendor_id, device_id, command, status, scratch[8];
+unsigned long class_revision, base;
+unsigned char irq, cache_line_size, latency_timer, header_type;
+
+AM53C974_PCIREG_OPEN();
+
+for (i = 0; i < 8; i++) *(scratch + i) = AM53C974_PCIREG_READ_WORD(instance, PCI_SCRATCH_REG_0 + 2*i);
+vendor_id = AM53C974_PCIREG_READ_WORD(instance, PCI_VENDOR_ID);
+device_id = AM53C974_PCIREG_READ_WORD(instance, PCI_DEVICE_ID);
+command = AM53C974_PCIREG_READ_WORD(instance, PCI_COMMAND);
+status = AM53C974_PCIREG_READ_WORD(instance, PCI_STATUS);
+class_revision = AM53C974_PCIREG_READ_DWORD(instance, PCI_CLASS_REVISION);
+cache_line_size = AM53C974_PCIREG_READ_BYTE(instance, PCI_CACHE_LINE_SIZE);
+latency_timer = AM53C974_PCIREG_READ_BYTE(instance, PCI_LATENCY_TIMER);
+header_type = AM53C974_PCIREG_READ_BYTE(instance, PCI_HEADER_TYPE);
+base = AM53C974_PCIREG_READ_DWORD(instance, PCI_BASE_ADDRESS_0);
+irq = AM53C974_PCIREG_READ_BYTE(instance, PCI_INTERRUPT_LINE);
+
+AM53C974_PCIREG_CLOSE();
+
+
+printk("------------- start of PCI register dump -------------\n");
+printk("PCI_VENDOR_ID: 0x%x\n", vendor_id);
+printk("PCI_DEVICE_ID: 0x%x\n", device_id);
+printk("PCI_COMMAND: 0x%x\n", command);
+printk("PCI_STATUS: 0x%x\n", status);
+printk("PCI_CLASS_REVISION: 0x%lx\n", class_revision);
+printk("PCI_CACHE_LINE_SIZE: 0x%x\n", cache_line_size);
+printk("PCI_LATENCY_TIMER: 0x%x\n", latency_timer);
+printk("PCI_HEADER_TYPE: 0x%x\n", header_type);
+printk("PCI_BASE_ADDRESS_0: 0x%lx\n", base);
+printk("PCI_INTERRUPT_LINE: %d\n", irq);
+for (i = 0; i < 8; i++) printk("PCI_SCRATCH_%d: 0x%x\n", i, scratch[i]);
+printk("------------- end of PCI register dump -------------\n\n");
+}
+
+static struct {
+ unsigned char value;
+ char *name;
+} phases[] = {
+{PHASE_DATAOUT, "DATAOUT"}, {PHASE_DATAIN, "DATAIN"}, {PHASE_CMDOUT, "CMDOUT"},
+{PHASE_STATIN, "STATIN"}, {PHASE_MSGOUT, "MSGOUT"}, {PHASE_MSGIN, "MSGIN"},
+{PHASE_RES_0, "RESERVED 0"}, {PHASE_RES_1, "RESERVED 1"}};
+
+/**************************************************************************
+ * Function : void AM53C974_print_phase(struct Scsi_Host *instance)
+ *
+ * Purpose : print the current SCSI phase for debugging purposes
+ *
+ * Input : instance - which AM53C974
+ **************************************************************************/
+static void AM53C974_print_phase(struct Scsi_Host *instance)
+{
+AM53C974_local_declare();
+unsigned char statreg, latched;
+int i;
+AM53C974_setio(instance);
+
+latched = (AM53C974_read_8(CNTLREG2)) & CNTLREG2_ENF;
+statreg = AM53C974_read_8(STATREG);
+for (i = 0; (phases[i].value != PHASE_RES_1) &&
+ (phases[i].value != (statreg & STATREG_PHASE)); ++i);
+if (latched)
+ printk("scsi%d : phase %s, latched at end of last command\n", instance->host_no, phases[i].name);
+ else
+ printk("scsi%d : phase %s, real time\n", instance->host_no, phases[i].name);
+}
+
+/**************************************************************************
+ * Function : void AM53C974_print_queues(struct Scsi_Host *instance)
+ *
+ * Purpose : print commands in the various queues
+ *
+ * Inputs : instance - which AM53C974
+ **************************************************************************/
+static void AM53C974_print_queues(struct Scsi_Host *instance)
+{
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+Scsi_Cmnd *ptr;
+
+printk("AM53C974: coroutine is%s running.\n", main_running ? "" : "n't");
+
+cli();
+
+if (!hostdata->connected) {
+ printk ("scsi%d: no currently connected command\n", instance->host_no); }
+ else {
+ print_Scsi_Cmnd ((Scsi_Cmnd *)hostdata->connected); }
+if (!hostdata->sel_cmd) {
+ printk ("scsi%d: no currently arbitrating command\n", instance->host_no); }
+ else {
+ print_Scsi_Cmnd ((Scsi_Cmnd *)hostdata->sel_cmd); }
+
+printk ("scsi%d: issue_queue ", instance->host_no);
+if (!hostdata->issue_queue)
+ printk("empty\n");
+ else {
+ printk(":\n");
+ for (ptr = (Scsi_Cmnd *)hostdata->issue_queue; ptr; ptr = (Scsi_Cmnd *)ptr->host_scribble)
+ print_Scsi_Cmnd (ptr); }
+
+printk ("scsi%d: disconnected_queue ", instance->host_no);
+if (!hostdata->disconnected_queue)
+ printk("empty\n");
+ else {
+ printk(":\n");
+ for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr; ptr = (Scsi_Cmnd *)ptr->host_scribble)
+ print_Scsi_Cmnd (ptr); }
+
+sti();
+}
+
+#endif /* AM53C974_DEBUG */
+
+/**************************************************************************
+ * Function : void AM53C974_print(struct Scsi_Host *instance)
+ *
+ * Purpose : dump the chip registers for debugging purposes
+ *
+ * Input : instance - which AM53C974
+ **************************************************************************/
+static void AM53C974_print(struct Scsi_Host *instance)
+{
+AM53C974_local_declare();
+unsigned long ctcreg, dmastc, dmaspa, dmawbc, dmawac;
+unsigned char cmdreg, statreg, isreg, cfireg, cntlreg[4], dmacmd, dmastatus;
+AM53C974_setio(instance);
+
+cli();
+ctcreg = AM53C974_read_8(CTCHREG) << 16;
+ctcreg |= AM53C974_read_8(CTCMREG) << 8;
+ctcreg |= AM53C974_read_8(CTCLREG);
+cmdreg = AM53C974_read_8(CMDREG);
+statreg = AM53C974_read_8(STATREG);
+isreg = AM53C974_read_8(ISREG);
+cfireg = AM53C974_read_8(CFIREG);
+cntlreg[0] = AM53C974_read_8(CNTLREG1);
+cntlreg[1] = AM53C974_read_8(CNTLREG2);
+cntlreg[2] = AM53C974_read_8(CNTLREG3);
+cntlreg[3] = AM53C974_read_8(CNTLREG4);
+dmacmd = AM53C974_read_8(DMACMD);
+dmastc = AM53C974_read_32(DMASTC);
+dmaspa = AM53C974_read_32(DMASPA);
+dmawbc = AM53C974_read_32(DMAWBC);
+dmawac = AM53C974_read_32(DMAWAC);
+dmastatus = AM53C974_read_8(DMASTATUS);
+sti();
+
+printk("AM53C974 register dump:\n");
+printk("IO base: 0x%04lx; CTCREG: 0x%04lx; CMDREG: 0x%02x; STATREG: 0x%02x; ISREG: 0x%02x\n",
+ io_port, ctcreg, cmdreg, statreg, isreg);
+printk("CFIREG: 0x%02x; CNTLREG1-4: 0x%02x; 0x%02x; 0x%02x; 0x%02x\n",
+ cfireg, cntlreg[0], cntlreg[1], cntlreg[2], cntlreg[3]);
+printk("DMACMD: 0x%02x; DMASTC: 0x%04lx; DMASPA: 0x%04lx\n", dmacmd, dmastc, dmaspa);
+printk("DMAWBC: 0x%04lx; DMAWAC: 0x%04lx; DMASTATUS: 0x%02x\n", dmawbc, dmawac, dmastatus);
+printk("---------------------------------------------------------\n");
+}
+
+/**************************************************************************
+* Function : void AM53C974_keywait(void)
+*
+* Purpose : wait until a key is pressed, if it was the 'r' key leave singlestep mode;
+* this function is used for debugging only
+*
+* Input : none
+**************************************************************************/
+static void AM53C974_keywait(void)
+{
+#ifdef AM53C974_DEBUG
+int key;
+
+if (!deb_stop) return;
+#endif
+
+cli();
+while ((inb_p(0x64) & 0x01) != 0x01) ;
+#ifdef AM53C974_DEBUG
+key = inb(0x60);
+if (key == 0x93) deb_stop = 0; /* don't stop if 'r' was pressed */
+#endif
+sti();
+}
+
+/**************************************************************************
+* Function : AM53C974_setup(char *str, int *ints)
+*
+* Purpose : LILO command line initialization of the overrides array,
+*
+* Inputs : str - unused, ints - array of integer parameters with ints[0]
+* equal to the number of ints.
+*
+* NOTE : this function needs to be declared as an external function
+* in init/main.c and included there in the bootsetups list
+***************************************************************************/
+void AM53C974_setup(char *str, int *ints)
+{
+if (ints[0] < 4)
+ printk("AM53C974_setup: wrong number of parameters;\n correct syntax is: AM53C974=host-scsi-id, target-scsi-id, max-rate, max-offset\n");
+ else {
+ if (commandline_current < (sizeof(overrides) / sizeof(override_t))) {
+ if ((ints[1] < 0) || (ints[1] > 7) ||
+ (ints[2] < 0) || (ints[2] > 7) ||
+ (ints[1] == ints[2]) ||
+ (ints[3] < (DEF_CLK / MAX_PERIOD)) || (ints[3] > (DEF_CLK / MIN_PERIOD)) ||
+ (ints[4] < 0) || (ints[4] > MAX_OFFSET))
+ printk("AM53C974_setup: illegal parameter\n");
+ else {
+ overrides[commandline_current].host_scsi_id = ints[1];
+ overrides[commandline_current].target_scsi_id = ints[2];
+ overrides[commandline_current].max_rate = ints[3];
+ overrides[commandline_current].max_offset = ints[4];
+ commandline_current++; }
+ }
+ else
+ printk("AM53C974_setup: too many overrides\n");
+ }
+}
+
+#if defined (CONFIG_PCI)
+/**************************************************************************
+* Function : int AM53C974_bios_detect(Scsi_Host_Template *tpnt)
+*
+* Purpose : detects and initializes AM53C974 SCSI chips with PCI Bios
+*
+* Inputs : tpnt - host template
+*
+* Returns : number of host adapters detected
+**************************************************************************/
+int AM53C974_bios_detect(Scsi_Host_Template *tpnt)
+{
+int count = 0; /* number of boards detected */
+int pci_index;
+pci_config_t pci_config;
+
+for (pci_index = 0; pci_index <= 16; ++pci_index) {
+ unsigned char pci_bus, pci_device_fn;
+ if (pcibios_find_device(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SCSI, pci_index, &pci_bus, &pci_device_fn) != 0)
+ break;
+
+ pcibios_read_config_word(pci_bus, pci_device_fn, PCI_VENDOR_ID, &pci_config._vendor);
+ pcibios_read_config_word(pci_bus, pci_device_fn, PCI_DEVICE_ID, &pci_config._device);
+ pcibios_read_config_word(pci_bus, pci_device_fn, PCI_COMMAND, &pci_config._command);
+ pcibios_read_config_word(pci_bus, pci_device_fn, PCI_STATUS, &pci_config._status);
+ pcibios_read_config_dword(pci_bus, pci_device_fn, PCI_CLASS_REVISION, &pci_config._class_revision);
+ pcibios_read_config_byte(pci_bus, pci_device_fn, PCI_CACHE_LINE_SIZE, &pci_config._cache_line_size);
+ pcibios_read_config_byte(pci_bus, pci_device_fn, PCI_LATENCY_TIMER, &pci_config._latency_timer);
+ pcibios_read_config_byte(pci_bus, pci_device_fn, PCI_HEADER_TYPE, &pci_config._header_type);
+ pcibios_read_config_byte(pci_bus, pci_device_fn, PCI_BIST, &pci_config._bist);
+ pcibios_read_config_dword(pci_bus, pci_device_fn, PCI_BASE_ADDRESS_0, &pci_config._base0);
+ pcibios_read_config_dword(pci_bus, pci_device_fn, PCI_BASE_ADDRESS_1, &pci_config._base1);
+ pcibios_read_config_dword(pci_bus, pci_device_fn, PCI_BASE_ADDRESS_2, &pci_config._base2);
+ pcibios_read_config_dword(pci_bus, pci_device_fn, PCI_BASE_ADDRESS_3, &pci_config._base3);
+ pcibios_read_config_dword(pci_bus, pci_device_fn, PCI_BASE_ADDRESS_4, &pci_config._base4);
+ pcibios_read_config_dword(pci_bus, pci_device_fn, PCI_BASE_ADDRESS_5, &pci_config._base5);
+ pcibios_read_config_dword(pci_bus, pci_device_fn, PCI_ROM_ADDRESS, &pci_config._baserom);
+ pcibios_read_config_byte(pci_bus, pci_device_fn, PCI_INTERRUPT_LINE, &pci_config._int_line);
+ pcibios_read_config_byte(pci_bus, pci_device_fn, PCI_INTERRUPT_PIN, &pci_config._int_pin);
+ pcibios_read_config_byte(pci_bus, pci_device_fn, PCI_MIN_GNT, &pci_config._min_gnt);
+ pcibios_read_config_byte(pci_bus, pci_device_fn, PCI_MAX_LAT, &pci_config._max_lat);
+ pci_config._pcibus = 0xFFFFFFFF;
+ pci_config._cardnum = 0xFFFFFFFF;
+
+ /* check whether device is I/O mapped -- should be */
+ if (!(pci_config._command & PCI_COMMAND_IO)) continue;
+
+ /* PCI Spec 2.1 states that it is either the driver's or the PCI card's responsibility
+ to set the PCI Master Enable Bit if needed.
+ (from Mark Stockton <marks@schooner.sys.hou.compaq.com>) */
+ if (!(pci_config._command & PCI_COMMAND_MASTER)) {
+ pci_config._command |= PCI_COMMAND_MASTER;
+ printk("PCI Master Bit has not been set. Setting...\n");
+ pcibios_write_config_word(pci_bus, pci_device_fn, PCI_COMMAND, pci_config._command); }
+
+ /* everything seems OK now, so initialize */
+ if (AM53C974_init(tpnt, pci_config)) count++ ;
+ }
+return (count);
+}
+#endif
+
+/**************************************************************************
+* Function : int AM53C974_nobios_detect(Scsi_Host_Template *tpnt)
+*
+* Purpose : detects and initializes AM53C974 SCSI chips using PCI config 2
+*
+* Inputs : tpnt - host template
+*
+* Returns : number of host adapters detected
+*
+* NOTE : This code assumes the controller on PCI bus 0.
+*
+* Origin: Robin Cutshaw (robin@xfree86.org)
+**************************************************************************/
+int AM53C974_nobios_detect(Scsi_Host_Template *tpnt)
+{
+int count = 0; /* number of boards detected */
+pci_config_t pci_config;
+
+/* first try PCI config method 1 */
+for (pci_config._pcibus = 0; pci_config._pcibus < 0x10; pci_config._pcibus++) {
+ for (pci_config._cardnum = 0; pci_config._cardnum < 0x20; pci_config._cardnum++) {
+ unsigned long config_cmd;
+ config_cmd = 0x80000000 | (pci_config._pcibus<<16) | (pci_config._cardnum<<11);
+
+ outl(config_cmd, 0xCF8); /* ioreg 0 */
+ pci_config._device_vendor = inl(0xCFC);
+
+ if ((pci_config._vendor == PCI_VENDOR_ID_AMD) && (pci_config._device == PCI_DEVICE_ID_AMD_SCSI)) {
+ outl(config_cmd | PCI_COMMAND, 0xCF8); pci_config._status_command = inl(0xCFC);
+ outl(config_cmd | PCI_CLASS_REVISION, 0xCF8); pci_config._class_revision = inl(0xCFC);
+ outl(config_cmd | PCI_CACHE_LINE_SIZE, 0xCF8); pci_config._bist_header_latency_cache = inl(0xCFC);
+ outl(config_cmd | PCI_BASE_ADDRESS_0, 0xCF8); pci_config._base0 = inl(0xCFC);
+ outl(config_cmd | PCI_BASE_ADDRESS_1, 0xCF8); pci_config._base1 = inl(0xCFC);
+ outl(config_cmd | PCI_BASE_ADDRESS_2, 0xCF8); pci_config._base2 = inl(0xCFC);
+ outl(config_cmd | PCI_BASE_ADDRESS_3, 0xCF8); pci_config._base3 = inl(0xCFC);
+ outl(config_cmd | PCI_BASE_ADDRESS_4, 0xCF8); pci_config._base4 = inl(0xCFC);
+ outl(config_cmd | PCI_BASE_ADDRESS_5, 0xCF8); pci_config._base5 = inl(0xCFC);
+ outl(config_cmd | PCI_ROM_ADDRESS, 0xCF8); pci_config._baserom = inl(0xCFC);
+ outl(config_cmd | PCI_INTERRUPT_LINE, 0xCF8); pci_config._max_min_ipin_iline = inl(0xCFC);
+
+ /* check whether device is I/O mapped -- should be */
+ if (!(pci_config._command & PCI_COMMAND_IO)) continue;
+
+ /* PCI Spec 2.1 states that it is either the driver's or the PCI card's responsibility
+ to set the PCI Master Enable Bit if needed.
+ From Mark Stockton <marks@schooner.sys.hou.compaq.com> */
+ if (!(pci_config._command & PCI_COMMAND_MASTER)) {
+ pci_config._command |= PCI_COMMAND_MASTER;
+ printk("Config 1; PCI Master Bit has not been set. Setting...\n");
+ outl(config_cmd | PCI_COMMAND, 0xCF8); outw(pci_config._command, 0xCFC); }
+
+ /* everything seems OK now, so initialize */
+ if (AM53C974_init(tpnt, pci_config)) count++ ;
+ }
+ }
+ }
+outb(0, 0xCF8); /* is this really necessary? */
+
+/* try PCI config method 2, if no device was detected by method 1 */
+if (!count) {
+ AM53C974_PCIREG_OPEN();
+
+ pci_config._pcibus = 0xFFFFFFFF;
+ pci_config._cardnum = 0xFFFFFFFF;
+
+ for (pci_config._ioaddr = 0xC000; pci_config._ioaddr < 0xD000; pci_config._ioaddr += 0x0100) {
+ pci_config._device_vendor = inl(pci_config._ioaddr);
+
+ if ((pci_config._vendor == PCI_VENDOR_ID_AMD) && (pci_config._device == PCI_DEVICE_ID_AMD_SCSI)) {
+ pci_config._status_command = inl(pci_config._ioaddr + PCI_COMMAND);
+ pci_config._class_revision = inl(pci_config._ioaddr + PCI_CLASS_REVISION);
+ pci_config._bist_header_latency_cache = inl(pci_config._ioaddr + PCI_CACHE_LINE_SIZE);
+ pci_config._base0 = inl(pci_config._ioaddr + PCI_BASE_ADDRESS_0);
+ pci_config._base1 = inl(pci_config._ioaddr + PCI_BASE_ADDRESS_1);
+ pci_config._base2 = inl(pci_config._ioaddr + PCI_BASE_ADDRESS_2);
+ pci_config._base3 = inl(pci_config._ioaddr + PCI_BASE_ADDRESS_3);
+ pci_config._base4 = inl(pci_config._ioaddr + PCI_BASE_ADDRESS_4);
+ pci_config._base5 = inl(pci_config._ioaddr + PCI_BASE_ADDRESS_5);
+ pci_config._baserom = inl(pci_config._ioaddr + PCI_ROM_ADDRESS);
+ pci_config._max_min_ipin_iline = inl(pci_config._ioaddr + PCI_INTERRUPT_LINE);
+
+ /* check whether device is I/O mapped -- should be */
+ if (!(pci_config._command & PCI_COMMAND_IO)) continue;
+
+ /* PCI Spec 2.1 states that it is either the driver's or the PCI card's responsibility
+ to set the PCI Master Enable Bit if needed.
+ From Mark Stockton <marks@schooner.sys.hou.compaq.com> */
+ if (!(pci_config._command & PCI_COMMAND_MASTER)) {
+ pci_config._command |= PCI_COMMAND_MASTER;
+ printk("Config 2; PCI Master Bit has not been set. Setting...\n");
+ outw(pci_config._command, pci_config._ioaddr + PCI_COMMAND); }
+
+ /* everything seems OK now, so initialize */
+ if (AM53C974_init(tpnt, pci_config)) count++ ;
+ }
+ }
+ AM53C974_PCIREG_CLOSE();
+ }
+
+return(count);
+}
+
+/**************************************************************************
+* Function : int AM53C974_detect(Scsi_Host_Template *tpnt)
+*
+* Purpose : detects and initializes AM53C974 SCSI chips
+*
+* Inputs : tpnt - host template
+*
+* Returns : number of host adapters detected
+**************************************************************************/
+int AM53C974_detect(Scsi_Host_Template *tpnt)
+{
+int count; /* number of boards detected */
+
+tpnt->proc_dir = &proc_scsi_am53c974;
+
+#if defined (CONFIG_PCI)
+if (pcibios_present())
+ count = AM53C974_bios_detect(tpnt);
+ else
+#endif
+count = AM53C974_nobios_detect(tpnt);
+return (count);
+}
+
+/**************************************************************************
+* Function : int AM53C974_init(Scsi_Host_Template *tpnt, pci_config_t pci_config)
+*
+* Purpose : initializes instance and corresponding AM53/79C974 chip,
+*
+* Inputs : tpnt - template, pci_config - PCI configuration,
+*
+* Returns : 1 on success, 0 on failure.
+*
+* NOTE: If no override for the controller's SCSI id is given and AM53C974_SCSI_ID
+* is not defined we assume that the SCSI address of this controller is correctly
+* set up by the BIOS (as reflected by contents of register CNTLREG1).
+* This is the only BIOS assistance we need.
+**************************************************************************/
+static int AM53C974_init(Scsi_Host_Template *tpnt, pci_config_t pci_config)
+{
+AM53C974_local_declare();
+int i, j;
+struct Scsi_Host *instance, *search;
+struct AM53C974_hostdata *hostdata;
+
+#ifdef AM53C974_OPTION_DEBUG_PROBE_ONLY
+ printk ("AM53C974: probe only enabled, aborting initialization\n");
+ return 0;
+#endif
+
+instance = scsi_register(tpnt, sizeof(struct AM53C974_hostdata));
+hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+instance->base = NULL;
+instance->io_port = pci_config._base0 & (pci_config._base0 & 0x1 ?
+ 0xFFFFFFFC : 0xFFFFFFF0);
+instance->irq = pci_config._int_line;
+instance->dma_channel = -1;
+AM53C974_setio(instance);
+
+#ifdef AM53C974_SCSI_ID
+instance->this_id = AM53C974_SCSI_ID;
+AM53C974_write_8(CNTLREG1, instance->this_id & CNTLREG1_SID);
+#else
+instance->this_id = AM53C974_read_8(CNTLREG1) & CNTLREG1_SID;
+if (instance->this_id != 7)
+ printk("scsi%d: WARNING: unusual hostadapter SCSI id %d; please verify!\n",
+ instance->host_no, instance->this_id);
+#endif
+
+for (i = 0; i < sizeof(hostdata->msgout); i++) {
+ hostdata->msgout[i] = NOP;
+ hostdata->last_message[i] = NOP; }
+for (i = 0; i < 8; i++) {
+ hostdata->busy[i] = 0;
+ hostdata->sync_per[i] = DEF_STP;
+ hostdata->sync_off[i] = 0;
+ hostdata->sync_neg[i] = 0;
+ hostdata->sync_en[i] = DEFAULT_SYNC_NEGOTIATION_ENABLED;
+ hostdata->max_rate[i] = DEFAULT_RATE;
+ hostdata->max_offset[i] = DEFAULT_SYNC_OFFSET; }
+
+/* overwrite defaults by LILO overrides */
+for (i = 0; i < commandline_current; i++) {
+ if (overrides[i].host_scsi_id == instance->this_id) {
+ j = overrides[i].target_scsi_id;
+ hostdata->sync_en[j] = 1;
+ hostdata->max_rate[j] = overrides[i].max_rate;
+ hostdata->max_offset[j] = overrides[i].max_offset;
+ }
+ }
+
+hostdata->sel_cmd = NULL;
+hostdata->connected = NULL;
+hostdata->issue_queue = NULL;
+hostdata->disconnected_queue = NULL;
+hostdata->in_reset = 0;
+hostdata->aborted = 0;
+hostdata->selecting = 0;
+hostdata->disconnecting = 0;
+hostdata->dma_busy = 0;
+
+/* Set up an interrupt handler if we aren't already sharing an IRQ with another board */
+for (search = first_host;
+ search && ( ((the_template != NULL) && (search->hostt != the_template)) ||
+ (search->irq != instance->irq) || (search == instance) );
+ search = search->next);
+if (!search) {
+ if (request_irq(instance->irq, AM53C974_intr, SA_INTERRUPT, "AM53C974", NULL)) {
+ printk("scsi%d: IRQ%d not free, detaching\n", instance->host_no, instance->irq);
+ scsi_unregister(instance);
+ return 0; }
+ }
+ else {
+ printk("scsi%d: using interrupt handler previously installed for scsi%d\n",
+ instance->host_no, search->host_no); }
+
+if (!the_template) {
+ the_template = instance->hostt;
+ first_instance = instance; }
+
+/* do hard reset */
+AM53C974_write_8(CMDREG, CMDREG_RDEV); /* reset device */
+udelay(5);
+AM53C974_write_8(CMDREG, CMDREG_NOP);
+AM53C974_write_8(CNTLREG1, CNTLREG1_DISR | instance->this_id);
+AM53C974_write_8(CMDREG, CMDREG_RBUS); /* reset SCSI bus */
+udelay(10);
+AM53C974_config_after_reset(instance);
+udelay(500000);
+return(1);
+}
+
+/*********************************************************************
+* Function : AM53C974_config_after_reset(struct Scsi_Host *instance) *
+* *
+* Purpose : initializes chip registers after reset *
+* *
+* Inputs : instance - which AM53C974 *
+* *
+* Returns : nothing *
+**********************************************************************/
+static void AM53C974_config_after_reset(struct Scsi_Host *instance)
+{
+AM53C974_local_declare();
+AM53C974_setio(instance);
+
+/* clear SCSI FIFO */
+AM53C974_write_8(CMDREG, CMDREG_CFIFO);
+
+/* configure device */
+AM53C974_write_8(STIMREG, DEF_SCSI_TIMEOUT);
+AM53C974_write_8(STPREG, DEF_STP & STPREG_STP);
+AM53C974_write_8(SOFREG, (DEF_SOF_RAD<<6) | (DEF_SOF_RAA<<4));
+AM53C974_write_8(CLKFREG, DEF_CLKF & CLKFREG_MASK);
+AM53C974_write_8(CNTLREG1, (DEF_ETM<<7) | CNTLREG1_DISR | (DEF_PERE<<4) | instance->this_id);
+AM53C974_write_8(CNTLREG2, (DEF_ENF<<6));
+AM53C974_write_8(CNTLREG3, (DEF_ADIDCHK<<7) | (DEF_FASTSCSI<<4) | (DEF_FASTCLK<<3));
+AM53C974_write_8(CNTLREG4, (DEF_GLITCH<<6) | (DEF_PWD<<5) | (DEF_RAE<<3) | (DEF_RADE<<2) | CNTLREG4_RES);
+}
+
+/***********************************************************************
+* Function : const char *AM53C974_info(struct Scsi_Host *instance) *
+* *
+* Purpose : return device driver information *
+* *
+* Inputs : instance - which AM53C974 *
+* *
+* Returns : info string *
+************************************************************************/
+const char *AM53C974_info(struct Scsi_Host *instance)
+{
+static char info[100];
+
+sprintf(info, "AM53/79C974 PCscsi driver rev. %d.%d; host I/O address: 0x%x; irq: %d\n",
+ AM53C974_DRIVER_REVISION_MAJOR, AM53C974_DRIVER_REVISION_MINOR,
+ instance->io_port, instance->irq);
+return (info);
+}
+
+/**************************************************************************
+* Function : int AM53C974_command (Scsi_Cmnd *SCpnt) *
+* *
+* Purpose : the unqueued SCSI command function, replaced by the *
+* AM53C974_queue_command function *
+* *
+* Inputs : SCpnt - pointer to command structure *
+* *
+* Returns :status, see hosts.h for details *
+***************************************************************************/
+int AM53C974_command(Scsi_Cmnd *SCpnt)
+{
+DEB(printk("AM53C974_command called\n"));
+return 0;
+}
+
+/**************************************************************************
+* Function : void initialize_SCp(Scsi_Cmnd *cmd) *
+* *
+* Purpose : initialize the saved data pointers for cmd to point to the *
+* start of the buffer. *
+* *
+* Inputs : cmd - Scsi_Cmnd structure to have pointers reset. *
+* *
+* Returns : nothing *
+**************************************************************************/
+static __inline__ void initialize_SCp(Scsi_Cmnd *cmd)
+{
+if (cmd->use_sg) {
+ cmd->SCp.buffer = (struct scatterlist *)cmd->buffer;
+ cmd->SCp.buffers_residual = cmd->use_sg - 1;
+ cmd->SCp.ptr = (char *)cmd->SCp.buffer->address;
+ cmd->SCp.this_residual = cmd->SCp.buffer->length; }
+ else {
+ cmd->SCp.buffer = NULL;
+ cmd->SCp.buffers_residual = 0;
+ cmd->SCp.ptr = (char *)cmd->request_buffer;
+ cmd->SCp.this_residual = cmd->request_bufflen; }
+}
+
+/**************************************************************************
+* Function : run_main(void) *
+* *
+* Purpose : insure that the coroutine is running and will process our *
+* request. main_running is checked/set here (in an inline *
+* function rather than in AM53C974_main itself to reduce the *
+* chances of stack overflow. *
+* *
+* *
+* Inputs : none *
+* *
+* Returns : nothing *
+**************************************************************************/
+static __inline__ void run_main(void)
+{
+cli();
+if (!main_running) {
+ /* main_running is cleared in AM53C974_main once it can't do
+ more work, and AM53C974_main exits with interrupts disabled. */
+ main_running = 1;
+ AM53C974_main();
+ sti(); }
+ else
+ sti();
+}
+
+/**************************************************************************
+* Function : int AM53C974_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
+*
+* Purpose : writes SCSI command into AM53C974 FIFO
+*
+* Inputs : cmd - SCSI command, done - function called on completion, with
+* a pointer to the command descriptor.
+*
+* Returns : status, see hosts.h for details
+*
+* Side effects :
+* cmd is added to the per instance issue_queue, with minor
+* twiddling done to the host specific fields of cmd. If the
+* main coroutine is not running, it is restarted.
+**************************************************************************/
+int AM53C974_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
+{
+struct Scsi_Host *instance = cmd->host;
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+Scsi_Cmnd *tmp;
+
+cli();
+DEB_QUEUE(printk(SEPARATOR_LINE));
+DEB_QUEUE(printk("scsi%d: AM53C974_queue_command called\n", instance->host_no));
+DEB_QUEUE(printk("cmd=%02x target=%02x lun=%02x bufflen=%d use_sg = %02x\n",
+ cmd->cmnd[0], cmd->target, cmd->lun, cmd->request_bufflen, cmd->use_sg));
+
+/* We use the host_scribble field as a pointer to the next command in a queue */
+cmd->host_scribble = NULL;
+cmd->scsi_done = done;
+cmd->result = 0;
+cmd->device->disconnect = 0;
+
+/* Insert the cmd into the issue queue. Note that REQUEST SENSE
+ * commands are added to the head of the queue since any command will
+ * clear the contingent allegiance condition that exists and the
+ * sense data is only guaranteed to be valid while the condition exists. */
+if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) {
+ LIST(cmd, hostdata->issue_queue);
+ cmd->host_scribble = (unsigned char *)hostdata->issue_queue;
+ hostdata->issue_queue = cmd; }
+ else {
+ for (tmp = (Scsi_Cmnd *)hostdata->issue_queue; tmp->host_scribble;
+ tmp = (Scsi_Cmnd *)tmp->host_scribble);
+ LIST(cmd, tmp);
+ tmp->host_scribble = (unsigned char *)cmd; }
+
+DEB_QUEUE(printk("scsi%d : command added to %s of queue\n", instance->host_no,
+ (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"));
+
+/* Run the coroutine if it isn't already running. */
+run_main();
+return 0;
+}
+
+/**************************************************************************
+ * Function : AM53C974_main (void)
+ *
+ * Purpose : AM53C974_main is a coroutine that runs as long as more work can
+ * be done on the AM53C974 host adapters in a system. Both
+ * AM53C974_queue_command() and AM53C974_intr() will try to start it
+ * in case it is not running.
+ *
+ * NOTE : AM53C974_main exits with interrupts *disabled*, the caller should
+ * reenable them. This prevents reentrancy and kernel stack overflow.
+ **************************************************************************/
+static void AM53C974_main(void)
+{
+AM53C974_local_declare();
+Scsi_Cmnd *tmp, *prev;
+struct Scsi_Host *instance;
+struct AM53C974_hostdata *hostdata;
+int done;
+
+/* We run (with interrupts disabled) until we're sure that none of
+ * the host adapters have anything that can be done, at which point
+ * we set main_running to 0 and exit. */
+
+do {
+ cli(); /* Freeze request queues */
+ done = 1;
+ for (instance = first_instance; instance && instance->hostt == the_template;
+ instance = instance->next) {
+ hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+ AM53C974_setio(instance);
+ /* start to select target if we are not connected and not in the
+ selection process */
+ if (!hostdata->connected && !hostdata->sel_cmd) {
+ /* Search through the issue_queue for a command destined for a target
+ that is not busy. */
+ for (tmp = (Scsi_Cmnd *)hostdata->issue_queue, prev = NULL; tmp;
+ prev = tmp, tmp = (Scsi_Cmnd *)tmp->host_scribble) {
+ /* When we find one, remove it from the issue queue. */
+ if (!(hostdata->busy[tmp->target] & (1 << tmp->lun))) {
+ if (prev) {
+ REMOVE(prev, (Scsi_Cmnd *)(prev->host_scribble), tmp,
+ (Scsi_Cmnd *)(tmp->host_scribble));
+ prev->host_scribble = tmp->host_scribble; }
+ else {
+ REMOVE(-1, hostdata->issue_queue, tmp, tmp->host_scribble);
+ hostdata->issue_queue = (Scsi_Cmnd *)tmp->host_scribble; }
+ tmp->host_scribble = NULL;
+
+ /* go into selection mode, disable reselection and wait for
+ SO interrupt which will continue with the selection process */
+ hostdata->selecting = 1;
+ hostdata->sel_cmd = tmp;
+ AM53C974_write_8(CMDREG, CMDREG_DSR);
+ break;
+ } /* if target/lun is not busy */
+
+ } /* for */
+ } /* if (!hostdata->connected) */
+ else {
+ DEB(printk("main: connected; cmd = 0x%lx, sel_cmd = 0x%lx\n",
+ (long)hostdata->connected, (long)hostdata->sel_cmd));
+ }
+ } /* for instance */
+ } while (!done);
+main_running = 0;
+}
+
+/************************************************************************
+* Function : AM53C974_intr(int irq, void *dev_id, struct pt_regs *regs) *
+* *
+* Purpose : interrupt handler *
+* *
+* Inputs : irq - interrupt line, regs - ? *
+* *
+* Returns : nothing *
+************************************************************************/
+static void AM53C974_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+AM53C974_local_declare();
+struct Scsi_Host *instance;
+struct AM53C974_hostdata *hostdata;
+unsigned char cmdreg, dmastatus, statreg, isreg, instreg, cfifo;
+
+/* find AM53C974 hostadapter responsible for this interrupt */
+for (instance = first_instance; instance; instance = instance->next)
+ if ((instance->irq == irq) && (instance->hostt == the_template)) goto FOUND;
+sti();
+return;
+
+/* found; now decode and process */
+FOUND:
+hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+AM53C974_setio(instance);
+dmastatus = AM53C974_read_8(DMASTATUS);
+
+DEB_INTR(printk(SEPARATOR_LINE));
+DEB_INTR(printk("AM53C974 interrupt; dmastatus=0x%02x\n", dmastatus));
+KEYWAIT();
+
+/*** DMA related interrupts ***/
+if (hostdata->connected && (dmastatus & (DMASTATUS_ERROR | DMASTATUS_PWDN |
+ DMASTATUS_ABORT))) {
+ /* DMA error or POWERDOWN */
+ printk("scsi%d: DMA error or powerdown; dmastatus: 0x%02x\n",
+ instance->host_no, dmastatus);
+#ifdef AM53C974_DEBUG
+ deb_stop = 1;
+#endif
+ panic("scsi%d: cannot recover\n", instance->host_no); }
+
+if (hostdata->connected && (dmastatus & DMASTATUS_DONE)) {
+ /* DMA transfer done */
+ unsigned long residual;
+ cli();
+ if (!(AM53C974_read_8(DMACMD) & DMACMD_DIR)) {
+ do {
+ dmastatus = AM53C974_read_8(DMASTATUS);
+ residual = AM53C974_read_8(CTCLREG) | (AM53C974_read_8(CTCMREG) << 8) |
+ (AM53C974_read_8(CTCHREG) << 16);
+ residual += AM53C974_read_8(CFIREG) & CFIREG_CF;
+ } while (!(dmastatus & DMASTATUS_SCSIINT) && residual);
+ residual = AM53C974_read_8(CTCLREG) | (AM53C974_read_8(CTCMREG) << 8) |
+ (AM53C974_read_8(CTCHREG) << 16);
+ residual += AM53C974_read_8(CFIREG) & CFIREG_CF;
+ }
+ else
+ residual = 0;
+ hostdata->connected->SCp.ptr += hostdata->connected->SCp.this_residual - residual;
+ hostdata->connected->SCp.this_residual = residual;
+
+ AM53C974_write_8(DMACMD, DMACMD_IDLE);
+
+ /* if service request missed before, process it now (ugly) */
+ if (hostdata->dma_busy) {
+ hostdata->dma_busy = 0;
+ cmdreg = AM53C974_read_8(CMDREG);
+ statreg = AM53C974_read_8(STATREG);
+ isreg = AM53C974_read_8(ISREG);
+ instreg = AM53C974_read_8(INSTREG);
+ cfifo = AM53C974_cfifo();
+ AM53C974_information_transfer(instance, statreg, isreg, instreg, cfifo,
+ dmastatus); }
+ sti();
+ }
+
+if (!(dmastatus & DMASTATUS_SCSIINT)) {
+ sti();
+ return; }
+
+/*** SCSI related interrupts ***/
+cmdreg = AM53C974_read_8(CMDREG);
+statreg = AM53C974_read_8(STATREG);
+isreg = AM53C974_read_8(ISREG);
+instreg = AM53C974_read_8(INSTREG);
+cfifo = AM53C974_cfifo();
+
+DEB_INTR(printk("scsi%d: statreg: 0x%02x; isreg: 0x%02x; instreg: 0x%02x; cfifo: 0x%02x\n",
+ instance->host_no, statreg, isreg, instreg, cfifo));
+
+if (statreg & STATREG_PE) {
+ /* parity error */
+#ifdef AM53C974_DEBUG
+ deb_stop = 1;
+#endif
+ printk("scsi%d : PARITY error\n", instance->host_no);
+ if (hostdata->connected) hostdata->sync_off[hostdata->connected->target] = 0; /* setup asynchronous transfer */
+ hostdata->aborted = 1; }
+
+if (statreg & STATREG_IOE) {
+ /* illegal operation error */
+#ifdef AM53C974_DEBUG
+ deb_stop = 1;
+#endif
+ printk("scsi%d : ILLEGAL OPERATION error\n", instance->host_no);
+ printk("cmdreg: 0x%02x; dmacmd: 0x%02x; statreg: 0x%02x; \n"
+ "isreg: 0x%02x; instreg: 0x%02x; cfifo: 0x%02x\n",
+ cmdreg, AM53C974_read_8(DMACMD), statreg, isreg, instreg, cfifo); }
+if (hostdata->in_reset && (instreg & INSTREG_SRST)) {
+ /* RESET INTERRUPT */
+#ifdef AM53C974_DEBUG
+ deb_stop = 1;
+#endif
+ DEB(printk("Bus reset interrupt received\n"));
+ AM53C974_intr_bus_reset(instance);
+ cli();
+ if (hostdata->connected) {
+ hostdata->connected->result = DID_RESET << 16;
+ hostdata->connected->scsi_done((Scsi_Cmnd *)hostdata->connected);
+ hostdata->connected = NULL; }
+ else {
+ if (hostdata->sel_cmd) {
+ hostdata->sel_cmd->result = DID_RESET << 16;
+ hostdata->sel_cmd->scsi_done((Scsi_Cmnd *)hostdata->sel_cmd);
+ hostdata->sel_cmd = NULL; }
+ }
+ sti();
+ if (hostdata->in_reset == 1) goto EXIT;
+ else return;
+ }
+
+if (instreg & INSTREG_ICMD) {
+ /* INVALID COMMAND INTERRUPT */
+#ifdef AM53C974_DEBUG
+ deb_stop = 1;
+#endif
+ printk("scsi%d: Invalid command interrupt\n", instance->host_no);
+ printk("cmdreg: 0x%02x; dmacmd: 0x%02x; statreg: 0x%02x; dmastatus: 0x%02x; \n"
+ "isreg: 0x%02x; instreg: 0x%02x; cfifo: 0x%02x\n",
+ cmdreg, AM53C974_read_8(DMACMD), statreg, dmastatus, isreg, instreg, cfifo);
+ panic("scsi%d: cannot recover\n", instance->host_no); }
+
+if (instreg & INSTREG_DIS) {
+ /* DISCONNECT INTERRUPT */
+ DEB_INTR(printk("Disconnect interrupt received; "));
+ cli();
+ AM53C974_intr_disconnect(instance);
+ sti();
+ goto EXIT; }
+
+if (instreg & INSTREG_RESEL) {
+ /* RESELECTION INTERRUPT */
+ DEB_INTR(printk("Reselection interrupt received\n"));
+ cli();
+ AM53C974_intr_reselect(instance, statreg);
+ sti();
+ goto EXIT; }
+
+if (instreg & INSTREG_SO) {
+ DEB_INTR(printk("Successful operation interrupt received\n"));
+ if (hostdata->selecting) {
+ DEB_INTR(printk("DSR completed, starting select\n"));
+ cli();
+ AM53C974_select(instance, (Scsi_Cmnd *)hostdata->sel_cmd,
+ (hostdata->sel_cmd->cmnd[0] == REQUEST_SENSE) ?
+ TAG_NONE : TAG_NEXT);
+ hostdata->selecting = 0;
+ AM53C974_set_sync(instance, hostdata->sel_cmd->target);
+ sti();
+ return; }
+
+ if (hostdata->sel_cmd != NULL) {
+ if ( ((isreg & ISREG_IS) != ISREG_OK_NO_STOP) &&
+ ((isreg & ISREG_IS) != ISREG_OK_STOP) ) {
+ /* UNSUCCESSFUL SELECTION */
+ DEB_INTR(printk("unsuccessful selection\n"));
+ cli();
+ hostdata->dma_busy = 0;
+ LIST(hostdata->sel_cmd, hostdata->issue_queue);
+ hostdata->sel_cmd->host_scribble = (unsigned char *)hostdata->issue_queue;
+ hostdata->issue_queue = hostdata->sel_cmd;
+ hostdata->sel_cmd = NULL;
+ hostdata->selecting = 0;
+ sti();
+ goto EXIT; }
+ else {
+ /* SUCCESSFUL SELECTION */
+ DEB(printk("successful selection; cmd=0x%02lx\n", (long)hostdata->sel_cmd));
+ cli();
+ hostdata->dma_busy = 0;
+ hostdata->disconnecting = 0;
+ hostdata->connected = hostdata->sel_cmd;
+ hostdata->sel_cmd = NULL;
+ hostdata->selecting = 0;
+#ifdef SCSI2
+ if (!hostdata->connected->device->tagged_queue)
+#endif
+ hostdata->busy[hostdata->connected->target] |= (1 << hostdata->connected->lun);
+ /* very strange -- use_sg is sometimes nonzero for request sense commands !! */
+ if ((hostdata->connected->cmnd[0] == REQUEST_SENSE) && hostdata->connected->use_sg) {
+ DEB(printk("scsi%d: REQUEST_SENSE command with nonzero use_sg\n", instance->host_no));
+ KEYWAIT();
+ hostdata->connected->use_sg = 0; }
+ initialize_SCp((Scsi_Cmnd *)hostdata->connected);
+ hostdata->connected->SCp.phase = PHASE_CMDOUT;
+ AM53C974_information_transfer(instance, statreg, isreg, instreg, cfifo, dmastatus);
+ sti();
+ return; }
+ }
+ else {
+ cli();
+ AM53C974_information_transfer(instance, statreg, isreg, instreg, cfifo, dmastatus);
+ sti();
+ return; }
+ }
+
+if (instreg & INSTREG_SR) {
+ DEB_INTR(printk("Service request interrupt received, "));
+ if (hostdata->connected) {
+ DEB_INTR(printk("calling information_transfer\n"));
+ cli();
+ AM53C974_information_transfer(instance, statreg, isreg, instreg, cfifo, dmastatus);
+ sti(); }
+ else {
+ printk("scsi%d: weird: service request when no command connected\n", instance->host_no);
+ AM53C974_write_8(CMDREG, CMDREG_CFIFO); } /* clear FIFO */
+ return;
+ }
+
+EXIT:
+ DEB_INTR(printk("intr: starting main\n"));
+ run_main();
+ DEB_INTR(printk("end of intr\n"));
+}
+
+/**************************************************************************
+* Function : AM53C974_intr_disconnect(struct Scsi_Host *instance)
+*
+* Purpose : manage target disconnection
+*
+* Inputs : instance -- which AM53C974
+*
+* Returns : nothing
+**************************************************************************/
+static void AM53C974_intr_disconnect(struct Scsi_Host *instance)
+{
+AM53C974_local_declare();
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+Scsi_Cmnd *cmd;
+AM53C974_setio(instance);
+
+if (hostdata->sel_cmd != NULL) {
+ /* normal selection timeout, typical for nonexisting targets */
+ cmd = (Scsi_Cmnd *)hostdata->sel_cmd;
+ DEB_INTR(printk("bad target\n"));
+ cmd->result = DID_BAD_TARGET << 16;
+ goto EXIT_FINISHED; }
+
+if (!hostdata->connected) {
+ /* can happen if controller was reset, a device tried to reconnect,
+ failed and disconnects now */
+ AM53C974_write_8(CMDREG, CMDREG_CFIFO);
+ return; }
+
+if (hostdata->disconnecting) {
+ /* target sent disconnect message, so we are prepared */
+ cmd = (Scsi_Cmnd *)hostdata->connected;
+ AM53C974_set_async(instance, cmd->target);
+ DEB_INTR(printk("scsi%d : disc. from cmnd %d for ta %d, lun %d\n",
+ instance->host_no, cmd->cmnd[0], cmd->target, cmd->lun));
+ if (cmd->device->disconnect) {
+ /* target wants to reselect later */
+ DEB_INTR(printk("ok, re-enabling selection\n"));
+ LIST(cmd,hostdata->disconnected_queue);
+ cmd->host_scribble = (unsigned char *)hostdata->disconnected_queue;
+ hostdata->disconnected_queue = cmd;
+ DEB_QUEUE(printk("scsi%d : command for target %d lun %d this %d was moved from connected to"
+ " the disconnected_queue\n", instance->host_no, cmd->target,
+ cmd->lun, hostdata->disconnected_queue->SCp.this_residual));
+ DEB_QUEUE(AM53C974_print_queues(instance));
+ goto EXIT_UNFINISHED; }
+ else {
+ /* target does not want to reselect later, we are really finished */
+#ifdef AM53C974_DEBUG
+ if (cmd->cmnd[0] == REQUEST_SENSE) {
+ int i;
+ printk("Request sense data dump:\n");
+ for (i = 0; i < cmd->request_bufflen; i++) {
+ printk("%02x ", *((char *)(cmd->request_buffer) + i));
+ if (i && !(i % 16)) printk("\n"); }
+ printk("\n"); }
+#endif
+ goto EXIT_FINISHED; } /* !cmd->device->disconnect */
+ } /* if (hostdata->disconnecting) */
+
+/* no disconnect message received; unexpected disconnection */
+cmd = (Scsi_Cmnd *)hostdata->connected;
+if (cmd) {
+#ifdef AM53C974_DEBUG
+ deb_stop = 1;
+#endif
+ AM53C974_set_async(instance, cmd->target);
+ printk("scsi%d: Unexpected disconnect; phase: %d; target: %d; this_residual: %d; buffers_residual: %d; message: %d\n",
+ instance->host_no, cmd->SCp.phase, cmd->target, cmd->SCp.this_residual, cmd->SCp.buffers_residual,
+ cmd->SCp.Message);
+ printk("cmdreg: 0x%02x; statreg: 0x%02x; isreg: 0x%02x; cfifo: 0x%02x\n",
+ AM53C974_read_8(CMDREG), AM53C974_read_8(STATREG), AM53C974_read_8(ISREG),
+ AM53C974_read_8(CFIREG) & CFIREG_CF);
+
+ if ((hostdata->last_message[0] == EXTENDED_MESSAGE) &&
+ (hostdata->last_message[2] == EXTENDED_SDTR)) {
+ /* sync. negotiation was aborted, setup asynchronous transfer with target */
+ hostdata->sync_off[cmd->target] = 0; }
+ if (hostdata->aborted || hostdata->msgout[0] == ABORT)
+ cmd->result = DID_ABORT << 16;
+ else
+ cmd->result = DID_ERROR << 16;
+ goto EXIT_FINISHED; }
+
+EXIT_FINISHED:
+hostdata->aborted = 0;
+hostdata->msgout[0] = NOP;
+hostdata->sel_cmd = NULL;
+hostdata->connected = NULL;
+hostdata->selecting = 0;
+hostdata->disconnecting = 0;
+hostdata->dma_busy = 0;
+hostdata->busy[cmd->target] &= ~(1 << cmd->lun);
+AM53C974_write_8(CMDREG, CMDREG_CFIFO);
+DEB(printk("disconnect; issue_queue: 0x%lx, disconnected_queue: 0x%lx\n",
+ (long)hostdata->issue_queue, (long)hostdata->disconnected_queue));
+cmd->scsi_done(cmd);
+
+if (!hostdata->selecting) {
+ AM53C974_set_async(instance, cmd->target);
+ AM53C974_write_8(CMDREG, CMDREG_ESR); } /* allow reselect */
+return;
+
+EXIT_UNFINISHED:
+hostdata->msgout[0] = NOP;
+hostdata->sel_cmd = NULL;
+hostdata->connected = NULL;
+hostdata->aborted = 0;
+hostdata->selecting = 0;
+hostdata->disconnecting = 0;
+hostdata->dma_busy = 0;
+DEB(printk("disconnect; issue_queue: 0x%lx, disconnected_queue: 0x%lx\n",
+ (long)hostdata->issue_queue, (long)hostdata->disconnected_queue));
+if (!hostdata->selecting) {
+ AM53C974_set_async(instance, cmd->target);
+ AM53C974_write_8(CMDREG, CMDREG_ESR); } /* allow reselect */
+return;
+}
+
+/**************************************************************************
+* Function : int AM53C974_sync_neg(struct Scsi_Host *instance, int target, unsigned char *msg)
+*
+* Purpose : setup message string for sync. negotiation
+*
+* Inputs : instance -- which AM53C974
+* target -- which SCSI target to deal with
+* msg -- input message string
+*
+* Returns : 0 if parameters accepted or 1 if not accepted
+*
+* Side effects: hostdata is changed
+*
+* Note: we assume here that fastclk is enabled
+**************************************************************************/
+static int AM53C974_sync_neg(struct Scsi_Host *instance, int target, unsigned char *msg)
+{
+AM53C974_local_declare();
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+int period, offset, i, rate, rate_rem;
+AM53C974_setio(instance);
+
+period = (DEF_CLK * msg[3] * 8 + 1000) / 2000;
+if (period < MIN_PERIOD) {
+ period = MIN_PERIOD;
+ hostdata->msgout[3] = period / 4; }
+ else
+ if (period > MAX_PERIOD) {
+ period = MAX_PERIOD;
+ hostdata->msgout[3] = period / 4; }
+ else
+ hostdata->msgout[3] = msg[3];
+offset = msg[4];
+if (offset > MAX_OFFSET) offset = MAX_OFFSET;
+hostdata->msgout[4] = offset;
+hostdata->sync_per[target] = period;
+hostdata->sync_off[target] = offset;
+for (i = 0; i < 3; i++) hostdata->msgout[i] = msg[i];
+if ((hostdata->msgout[3] != msg[3]) || (msg[4] != offset)) return(1);
+
+rate = DEF_CLK / period;
+rate_rem = 10 * (DEF_CLK - period * rate) / period;
+
+if (offset)
+ printk("\ntarget %d: rate=%d.%d Mhz, synchronous, sync offset=%d bytes\n",
+ target, rate, rate_rem, offset);
+ else
+ printk("\ntarget %d: rate=%d.%d Mhz, asynchronous\n", target, rate, rate_rem);
+
+return(0);
+}
+
+/**************************************************************************
+* Function : AM53C974_set_async(struct Scsi_Host *instance, int target)
+*
+* Purpose : put controller into async. mode
+*
+* Inputs : instance -- which AM53C974
+* target -- which SCSI target to deal with
+*
+* Returns : nothing
+**************************************************************************/
+static __inline__ void AM53C974_set_async(struct Scsi_Host *instance, int target)
+{
+AM53C974_local_declare();
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+AM53C974_setio(instance);
+
+AM53C974_write_8(STPREG, hostdata->sync_per[target]);
+AM53C974_write_8(SOFREG, (DEF_SOF_RAD<<6) | (DEF_SOF_RAA<<4));
+}
+
+/**************************************************************************
+* Function : AM53C974_set_sync(struct Scsi_Host *instance, int target)
+*
+* Purpose : put controller into sync. mode
+*
+* Inputs : instance -- which AM53C974
+* target -- which SCSI target to deal with
+*
+* Returns : nothing
+**************************************************************************/
+static __inline__ void AM53C974_set_sync(struct Scsi_Host *instance, int target)
+{
+AM53C974_local_declare();
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+AM53C974_setio(instance);
+
+AM53C974_write_8(STPREG, hostdata->sync_per[target]);
+AM53C974_write_8(SOFREG, (SOFREG_SO & hostdata->sync_off[target]) |
+ (DEF_SOF_RAD<<6) | (DEF_SOF_RAA<<4));
+}
+
+/***********************************************************************
+* Function : AM53C974_information_transfer(struct Scsi_Host *instance, *
+* unsigned char statreg, unsigned char isreg, *
+* unsigned char instreg, unsigned char cfifo, *
+* unsigned char dmastatus) *
+* *
+* Purpose : handle phase changes *
+* *
+* Inputs : instance - which AM53C974 *
+* statreg - status register *
+* isreg - internal state register *
+* instreg - interrupt status register *
+* cfifo - number of bytes in FIFO *
+* dmastatus - dma status register *
+* *
+* Returns : nothing *
+************************************************************************/
+static void AM53C974_information_transfer(struct Scsi_Host *instance,
+ unsigned char statreg, unsigned char isreg,
+ unsigned char instreg, unsigned char cfifo,
+ unsigned char dmastatus)
+{
+AM53C974_local_declare();
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+Scsi_Cmnd *cmd = (Scsi_Cmnd *)hostdata->connected;
+int ret, i, len, residual=-1;
+AM53C974_setio(instance);
+
+DEB_INFO(printk(SEPARATOR_LINE));
+switch (statreg & STATREG_PHASE) { /* scsi phase */
+ case PHASE_DATAOUT:
+ DEB_INFO(printk("Dataout phase; cmd=0x%lx, sel_cmd=0x%lx, this_residual=%d, buffers_residual=%d\n",
+ (long)hostdata->connected, (long)hostdata->sel_cmd, cmd->SCp.this_residual, cmd->SCp.buffers_residual));
+ cmd->SCp.phase = PHASE_DATAOUT;
+ goto PHASE_DATA_IO;
+
+ case PHASE_DATAIN:
+ DEB_INFO(printk("Datain phase; cmd=0x%lx, sel_cmd=0x%lx, this_residual=%d, buffers_residual=%d\n",
+ (long)hostdata->connected, (long)hostdata->sel_cmd, cmd->SCp.this_residual, cmd->SCp.buffers_residual));
+ cmd->SCp.phase = PHASE_DATAIN;
+ PHASE_DATA_IO:
+ if (hostdata->aborted) {
+ AM53C974_write_8(DMACMD, DMACMD_IDLE);
+ AM53C974_write_8(CMDREG, CMDREG_CFIFO);
+ AM53C974_write_8(CMDREG, CMDREG_SATN);
+ return; }
+ if ((!cmd->SCp.this_residual) && cmd->SCp.buffers_residual) {
+ cmd->SCp.buffer++;
+ cmd->SCp.buffers_residual--;
+ cmd->SCp.ptr = (unsigned char *)cmd->SCp.buffer->address;
+ cmd->SCp.this_residual = cmd->SCp.buffer->length; }
+ if (cmd->SCp.this_residual) {
+ if (!(AM53C974_read_8(DMACMD) & DMACMD_START)) {
+ hostdata->dma_busy = 0;
+ AM53C974_transfer_dma(instance, statreg & STATREG_IO,
+ (unsigned long)cmd->SCp.this_residual,
+ cmd->SCp.ptr); }
+ else
+ hostdata->dma_busy = 1;
+ }
+ return;
+
+ case PHASE_MSGIN:
+ DEB_INFO(printk("Message-In phase; cmd=0x%lx, sel_cmd=0x%lx\n",
+ (long)hostdata->connected, (long)hostdata->sel_cmd));
+ AM53C974_set_async(instance, cmd->target);
+ if (cmd->SCp.phase == PHASE_DATAIN)
+ AM53C974_dma_blast(instance, dmastatus, statreg);
+ if ((cmd->SCp.phase == PHASE_DATAOUT) && (AM53C974_read_8(DMACMD) & DMACMD_START)) {
+ AM53C974_write_8(DMACMD, DMACMD_IDLE);
+ residual = cfifo + (AM53C974_read_8(CTCLREG) | (AM53C974_read_8(CTCMREG) << 8) |
+ (AM53C974_read_8(CTCHREG) << 16));
+ cmd->SCp.ptr += cmd->SCp.this_residual - residual;
+ cmd->SCp.this_residual = residual;
+ if (cfifo) { AM53C974_write_8(CMDREG, CMDREG_CFIFO); cfifo = 0; }
+ }
+ if (cmd->SCp.phase == PHASE_STATIN) {
+ while ((AM53C974_read_8(CFIREG) & CFIREG_CF) < 2) ;
+ cmd->SCp.Status = AM53C974_read_8(FFREG);
+ cmd->SCp.Message = AM53C974_read_8(FFREG);
+ DEB_INFO(printk("Message-In phase; status=0x%02x, message=0x%02x\n",
+ cmd->SCp.Status, cmd->SCp.Message));
+ ret = AM53C974_message(instance, cmd, cmd->SCp.Message); }
+ else {
+ if (!cfifo) {
+ AM53C974_write_8(CMDREG, CMDREG_IT);
+ AM53C974_poll_int();
+ cmd->SCp.Message = AM53C974_read_8(FFREG);
+ }
+ ret = AM53C974_message(instance, cmd, cmd->SCp.Message);
+ }
+ cmd->SCp.phase = PHASE_MSGIN;
+ AM53C974_set_sync(instance, cmd->target);
+ break;
+ case PHASE_MSGOUT:
+ DEB_INFO(printk("Message-Out phase; cfifo=%d; msgout[0]=0x%02x\n",
+ AM53C974_read_8(CFIREG) & CFIREG_CF, hostdata->msgout[0]));
+ AM53C974_write_8(DMACMD, DMACMD_IDLE);
+ AM53C974_set_async(instance, cmd->target);
+ for (i = 0; i < sizeof(hostdata->last_message); i++)
+ hostdata->last_message[i] = hostdata->msgout[i];
+ if ((hostdata->msgout[0] == 0) || INSIDE(hostdata->msgout[0], 0x02, 0x1F) ||
+ INSIDE(hostdata->msgout[0], 0x80, 0xFF))
+ len = 1;
+ else {
+ if (hostdata->msgout[0] == EXTENDED_MESSAGE) {
+#ifdef AM53C974_DEBUG_INFO
+ printk("Extended message dump:\n");
+ for (i = 0; i < hostdata->msgout[1] + 2; i++) {
+ printk("%02x ", hostdata->msgout[i]);
+ if (i && !(i % 16)) printk("\n"); }
+ printk("\n");
+#endif
+ len = hostdata->msgout[1] + 2; }
+ else
+ len = 2;
+ }
+ for (i = 0; i < len; i++) AM53C974_write_8(FFREG, hostdata->msgout[i]);
+ AM53C974_write_8(CMDREG, CMDREG_IT);
+ cmd->SCp.phase = PHASE_MSGOUT;
+ hostdata->msgout[0] = NOP;
+ AM53C974_set_sync(instance, cmd->target);
+ break;
+
+ case PHASE_CMDOUT:
+ DEB_INFO(printk("Command-Out phase\n"));
+ AM53C974_set_async(instance, cmd->target);
+ for (i = 0; i < cmd->cmd_len; i++) AM53C974_write_8(FFREG, cmd->cmnd[i]);
+ AM53C974_write_8(CMDREG, CMDREG_IT);
+ cmd->SCp.phase = PHASE_CMDOUT;
+ AM53C974_set_sync(instance, cmd->target);
+ break;
+
+ case PHASE_STATIN:
+ DEB_INFO(printk("Status phase\n"));
+ if (cmd->SCp.phase == PHASE_DATAIN)
+ AM53C974_dma_blast(instance, dmastatus, statreg);
+ AM53C974_set_async(instance, cmd->target);
+ if (cmd->SCp.phase == PHASE_DATAOUT) {
+ unsigned long residual;
+
+ if (AM53C974_read_8(DMACMD) & DMACMD_START) {
+ AM53C974_write_8(DMACMD, DMACMD_IDLE);
+ residual = cfifo + (AM53C974_read_8(CTCLREG) | (AM53C974_read_8(CTCMREG) << 8) |
+ (AM53C974_read_8(CTCHREG) << 16));
+ cmd->SCp.ptr += cmd->SCp.this_residual - residual;
+ cmd->SCp.this_residual = residual; }
+ if (cfifo) { AM53C974_write_8(CMDREG, CMDREG_CFIFO); cfifo = 0; }
+ }
+ cmd->SCp.phase = PHASE_STATIN;
+ AM53C974_write_8(CMDREG, CMDREG_ICCS); /* command complete */
+ break;
+
+ case PHASE_RES_0:
+ case PHASE_RES_1:
+#ifdef AM53C974_DEBUG
+ deb_stop = 1;
+#endif
+ DEB_INFO(printk("Reserved phase\n"));
+ break;
+ }
+KEYWAIT();
+}
+
+/******************************************************************************
+* Function : int AM53C974_message(struct Scsi_Host *instance, Scsi_Cmnd *cmd,
+* unsigned char msg)
+*
+* Purpose : handle SCSI messages
+*
+* Inputs : instance -- which AM53C974
+* cmd -- SCSI command the message belongs to
+* msg -- message id byte
+*
+* Returns : 1 on success, 0 on failure.
+**************************************************************************/
+static int AM53C974_message(struct Scsi_Host *instance, Scsi_Cmnd *cmd,
+ unsigned char msg)
+{
+AM53C974_local_declare();
+static unsigned char extended_msg[10];
+unsigned char statreg;
+int len, ret = 0;
+unsigned char *p;
+#ifdef AM53C974_DEBUG_MSG
+int j;
+#endif
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+AM53C974_setio(instance);
+
+DEB_MSG(printk(SEPARATOR_LINE));
+
+/* Linking lets us reduce the time required to get the
+ * next command out to the device, hopefully this will
+ * mean we don't waste another revolution due to the delays
+ * required by ARBITRATION and another SELECTION.
+ * In the current implementation proposal, low level drivers
+ * merely have to start the next command, pointed to by
+ * next_link, done() is called as with unlinked commands. */
+switch (msg) {
+#ifdef LINKED
+ case LINKED_CMD_COMPLETE:
+ case LINKED_FLG_CMD_COMPLETE:
+ /* Accept message by releasing ACK */
+ DEB_LINKED(printk("scsi%d : target %d lun %d linked command complete.\n",
+ instance->host_no, cmd->target, cmd->lun));
+ /* Sanity check : A linked command should only terminate with
+ * one of these messages if there are more linked commands available. */
+ if (!cmd->next_link) {
+ printk("scsi%d : target %d lun %d linked command complete, no next_link\n"
+ instance->host_no, cmd->target, cmd->lun);
+ hostdata->aborted = 1;
+ AM53C974_write_8(CMDREG, CMDREG_SATN);
+ AM53C974_write_8(CMDREG, CMDREG_MA);
+ break; }
+ if (hostdata->aborted) {
+ DEB_ABORT(printk("ATN set for cmnd %d upon reception of LINKED_CMD_COMPLETE or"
+ "LINKED_FLG_CMD_COMPLETE message\n", cmd->cmnd[0]));
+ AM53C974_write_8(CMDREG, CMDREG_SATN); }
+ AM53C974_write_8(CMDREG, CMDREG_MA);
+
+ initialize_SCp(cmd->next_link);
+ /* The next command is still part of this process */
+ cmd->next_link->tag = cmd->tag;
+ cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
+ DEB_LINKED(printk("scsi%d : target %d lun %d linked request done, calling scsi_done().\n",
+ instance->host_no, cmd->target, cmd->lun));
+ cmd->scsi_done(cmd);
+ cmd = hostdata->connected;
+ break;
+
+#endif /* def LINKED */
+
+ case ABORT:
+ case COMMAND_COMPLETE:
+ DEB_MSG(printk("scsi%d: command complete message received; cmd %d for target %d, lun %d\n",
+ instance->host_no, cmd->cmnd[0], cmd->target, cmd->lun));
+ hostdata->disconnecting = 1;
+ cmd->device->disconnect = 0;
+
+ /* I'm not sure what the correct thing to do here is :
+ *
+ * If the command that just executed is NOT a request
+ * sense, the obvious thing to do is to set the result
+ * code to the values of the stored parameters.
+ * If it was a REQUEST SENSE command, we need some way
+ * to differentiate between the failure code of the original
+ * and the failure code of the REQUEST sense - the obvious
+ * case is success, where we fall through and leave the result
+ * code unchanged.
+ *
+ * The non-obvious place is where the REQUEST SENSE failed */
+ if (cmd->cmnd[0] != REQUEST_SENSE)
+ cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
+ else if (cmd->SCp.Status != GOOD)
+ cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
+ if (hostdata->aborted) {
+ AM53C974_write_8(CMDREG, CMDREG_SATN);
+ AM53C974_write_8(CMDREG, CMDREG_MA);
+ DEB_ABORT(printk("ATN set for cmnd %d upon reception of ABORT or"
+ "COMMAND_COMPLETE message\n", cmd->cmnd[0]));
+ break; }
+ if ((cmd->cmnd[0] != REQUEST_SENSE) && (cmd->SCp.Status == CHECK_CONDITION)) {
+ DEB_MSG(printk("scsi%d : performing request sense\n", instance->host_no));
+ cmd->cmnd[0] = REQUEST_SENSE;
+ cmd->cmnd[1] &= 0xe0;
+ cmd->cmnd[2] = 0;
+ cmd->cmnd[3] = 0;
+ cmd->cmnd[4] = sizeof(cmd->sense_buffer);
+ cmd->cmnd[5] = 0;
+ cmd->SCp.buffer = NULL;
+ cmd->SCp.buffers_residual = 0;
+ cmd->SCp.ptr = (char *)cmd->sense_buffer;
+ cmd->SCp.this_residual = sizeof(cmd->sense_buffer);
+ LIST(cmd,hostdata->issue_queue);
+ cmd->host_scribble = (unsigned char *)hostdata->issue_queue;
+ hostdata->issue_queue = (Scsi_Cmnd *)cmd;
+ DEB_MSG(printk("scsi%d : REQUEST SENSE added to head of issue queue\n",instance->host_no));
+ }
+
+ /* Accept message by clearing ACK */
+ AM53C974_write_8(CMDREG, CMDREG_MA);
+ break;
+
+ case MESSAGE_REJECT:
+ DEB_MSG(printk("scsi%d: reject message received; cmd %d for target %d, lun %d\n",
+ instance->host_no, cmd->cmnd[0], cmd->target, cmd->lun));
+ switch (hostdata->last_message[0]) {
+ case EXTENDED_MESSAGE:
+ if (hostdata->last_message[2] == EXTENDED_SDTR) {
+ /* sync. negotiation was rejected, setup asynchronous transfer with target */
+ printk("\ntarget %d: rate=%d Mhz, asynchronous (sync. negotiation rejected)\n",
+ cmd->target, DEF_CLK / DEF_STP);
+ hostdata->sync_off[cmd->target] = 0;
+ hostdata->sync_per[cmd->target] = DEF_STP; }
+ break;
+ case HEAD_OF_QUEUE_TAG:
+ case ORDERED_QUEUE_TAG:
+ case SIMPLE_QUEUE_TAG:
+ cmd->device->tagged_queue = 0;
+ hostdata->busy[cmd->target] |= (1 << cmd->lun);
+ break;
+ default:
+ break;
+ }
+ if (hostdata->aborted) AM53C974_write_8(CMDREG, CMDREG_SATN);
+ AM53C974_write_8(CMDREG, CMDREG_MA);
+ break;
+
+ case DISCONNECT:
+ DEB_MSG(printk("scsi%d: disconnect message received; cmd %d for target %d, lun %d\n",
+ instance->host_no, cmd->cmnd[0], cmd->target, cmd->lun));
+ cmd->device->disconnect = 1;
+ hostdata->disconnecting = 1;
+ AM53C974_write_8(CMDREG, CMDREG_MA); /* Accept message by clearing ACK */
+ break;
+
+ case SAVE_POINTERS:
+ case RESTORE_POINTERS:
+ DEB_MSG(printk("scsi%d: save/restore pointers message received; cmd %d for target %d, lun %d\n",
+ instance->host_no, cmd->cmnd[0], cmd->target, cmd->lun));
+ /* The SCSI data pointer is *IMPLICITLY* saved on a disconnect
+ * operation, in violation of the SCSI spec so we can safely
+ * ignore SAVE/RESTORE pointers calls.
+ *
+ * Unfortunately, some disks violate the SCSI spec and
+ * don't issue the required SAVE_POINTERS message before
+ * disconnecting, and we have to break spec to remain
+ * compatible. */
+ if (hostdata->aborted) {
+ DEB_ABORT(printk("ATN set for cmnd %d upon reception of SAVE/REST. POINTERS message\n",
+ cmd->cmnd[0]));
+ AM53C974_write_8(CMDREG, CMDREG_SATN); }
+ AM53C974_write_8(CMDREG, CMDREG_MA);
+ break;
+
+ case EXTENDED_MESSAGE:
+ DEB_MSG(printk("scsi%d: extended message received; cmd %d for target %d, lun %d\n",
+ instance->host_no, cmd->cmnd[0], cmd->target, cmd->lun));
+ /* Extended messages are sent in the following format :
+ * Byte
+ * 0 EXTENDED_MESSAGE == 1
+ * 1 length (includes one byte for code, doesn't include first two bytes)
+ * 2 code
+ * 3..length+1 arguments
+ */
+ /* BEWARE!! THIS CODE IS EXTREMELY UGLY */
+ extended_msg[0] = EXTENDED_MESSAGE;
+ AM53C974_read_8(INSTREG) ; /* clear int */
+ AM53C974_write_8(CMDREG, CMDREG_MA); /* ack. msg byte, then wait for SO */
+ AM53C974_poll_int();
+ /* get length */
+ AM53C974_write_8(CMDREG, CMDREG_IT);
+ AM53C974_poll_int();
+ AM53C974_write_8(CMDREG, CMDREG_MA); /* ack. msg byte, then wait for SO */
+ AM53C974_poll_int();
+ extended_msg[1] = len = AM53C974_read_8(FFREG); /* get length */
+ p = extended_msg+2;
+ /* read the remaining (len) bytes */
+ while (len) {
+ AM53C974_write_8(CMDREG, CMDREG_IT);
+ AM53C974_poll_int();
+ if (len > 1) {
+ AM53C974_write_8(CMDREG, CMDREG_MA); /* ack. msg byte, then wait for SO */
+ AM53C974_poll_int(); }
+ *p = AM53C974_read_8(FFREG);
+ p++; len--; }
+
+#ifdef AM53C974_DEBUG_MSG
+ printk("scsi%d: received extended message: ", instance->host_no);
+ for (j = 0; j < extended_msg[1] + 2; j++) {
+ printk("0x%02x ", extended_msg[j]);
+ if (j && !(j % 16)) printk("\n"); }
+ printk("\n");
+#endif
+
+ /* check message */
+ if (extended_msg[2] == EXTENDED_SDTR)
+ ret = AM53C974_sync_neg(instance, cmd->target, extended_msg);
+ if (ret || hostdata->aborted) AM53C974_write_8(CMDREG, CMDREG_SATN);
+
+ AM53C974_write_8(CMDREG, CMDREG_MA);
+ break;
+
+ default:
+ printk("scsi%d: unknown message 0x%02x received\n",instance->host_no, msg);
+#ifdef AM53C974_DEBUG
+ deb_stop = 1;
+#endif
+ /* reject message */
+ hostdata->msgout[0] = MESSAGE_REJECT;
+ AM53C974_write_8(CMDREG, CMDREG_SATN);
+ AM53C974_write_8(CMDREG, CMDREG_MA);
+ return(0);
+ break;
+
+ } /* switch (msg) */
+KEYWAIT();
+return(1);
+}
+
+/**************************************************************************
+* Function : AM53C974_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
+*
+* Purpose : try to establish nexus for the command;
+* start sync negotiation via start stop and transfer the command in
+* cmdout phase in case of an inquiry or req. sense command with no
+* sync. neg. performed yet
+*
+* Inputs : instance -- which AM53C974
+* cmd -- command which requires the selection
+* tag -- tagged queueing
+*
+* Returns : nothing
+*
+* Note: this function initializes the selection process, which is continued
+* in the interrupt handler
+**************************************************************************/
+static void AM53C974_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
+{
+AM53C974_local_declare();
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+unsigned char cfifo, tmp[3];
+unsigned int i, len, cmd_size = COMMAND_SIZE(cmd->cmnd[0]);
+AM53C974_setio(instance);
+
+cfifo = AM53C974_cfifo();
+if (cfifo) {
+ printk("scsi%d: select error; %d residual bytes in FIFO\n", instance->host_no, cfifo);
+ AM53C974_write_8(CMDREG, CMDREG_CFIFO); /* clear FIFO */
+ }
+
+tmp[0] = IDENTIFY(1, cmd->lun);
+
+#ifdef SCSI2
+if (cmd->device->tagged_queue && (tag != TAG_NONE)) {
+ tmp[1] = SIMPLE_QUEUE_TAG;
+ if (tag == TAG_NEXT) {
+ /* 0 is TAG_NONE, used to imply no tag for this command */
+ if (cmd->device->current_tag == 0) cmd->device->current_tag = 1;
+ cmd->tag = cmd->device->current_tag;
+ cmd->device->current_tag++; }
+ else
+ cmd->tag = (unsigned char)tag;
+ tmp[2] = cmd->tag;
+ hostdata->last_message[0] = SIMPLE_QUEUE_TAG;
+ len = 3;
+ AM53C974_write_8(FFREG, tmp[0]);
+ AM53C974_write_8(FFREG, tmp[1]);
+ AM53C974_write_8(FFREG, tmp[2]);
+ }
+ else
+#endif /* def SCSI2 */
+ {
+ len = 1;
+ AM53C974_write_8(FFREG, tmp[0]);
+ cmd->tag = 0; }
+
+/* in case of an inquiry or req. sense command with no sync. neg performed yet, we start
+ sync negotiation via start stops and transfer the command in cmdout phase */
+if (((cmd->cmnd[0] == INQUIRY) || (cmd->cmnd[0] == REQUEST_SENSE)) &&
+ !(hostdata->sync_neg[cmd->target]) && hostdata->sync_en[cmd->target]) {
+ hostdata->sync_neg[cmd->target] = 1;
+ hostdata->msgout[0] = EXTENDED_MESSAGE;
+ hostdata->msgout[1] = 3;
+ hostdata->msgout[2] = EXTENDED_SDTR;
+ hostdata->msgout[3] = 250 / (int)hostdata->max_rate[cmd->target];
+ hostdata->msgout[4] = hostdata->max_offset[cmd->target];
+ len += 5; }
+
+AM53C974_write_8(SDIDREG, SDIREG_MASK & cmd->target); /* setup dest. id */
+AM53C974_write_8(STIMREG, DEF_SCSI_TIMEOUT); /* setup timeout reg */
+switch (len) {
+ case 1:
+ for (i = 0; i < cmd_size; i++) AM53C974_write_8(FFREG, cmd->cmnd[i]);
+ AM53C974_write_8(CMDREG, CMDREG_SAS); /* select with ATN, 1 msg byte */
+ hostdata->msgout[0] = NOP;
+ break;
+ case 3:
+ for (i = 0; i < cmd_size; i++) AM53C974_write_8(FFREG, cmd->cmnd[i]);
+ AM53C974_write_8(CMDREG, CMDREG_SA3S); /* select with ATN, 3 msg bytes */
+ hostdata->msgout[0] = NOP;
+ break;
+ default:
+ AM53C974_write_8(CMDREG, CMDREG_SASS); /* select with ATN, stop steps; continue in message out phase */
+ break;
+ }
+}
+
+/**************************************************************************
+* Function : AM53C974_intr_select(struct Scsi_Host *instance, unsigned char statreg)
+*
+* Purpose : handle reselection
+*
+* Inputs : instance -- which AM53C974
+* statreg -- status register
+*
+* Returns : nothing
+*
+* side effects: manipulates hostdata
+**************************************************************************/
+static void AM53C974_intr_reselect(struct Scsi_Host *instance, unsigned char statreg)
+{
+AM53C974_local_declare();
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+unsigned char cfifo, msg[3], lun, t, target = 0;
+#ifdef SCSI2
+ unsigned char tag;
+#endif
+Scsi_Cmnd *tmp = NULL, *prev;
+AM53C974_setio(instance);
+
+cfifo = AM53C974_cfifo();
+
+if (hostdata->selecting) {
+ /* caught reselect interrupt in selection process;
+ put selecting command back into the issue queue and continue with the
+ reselecting command */
+ DEB_RESEL(printk("AM53C974_intr_reselect: in selection process\n"));
+ LIST(hostdata->sel_cmd, hostdata->issue_queue);
+ hostdata->sel_cmd->host_scribble = (unsigned char *)hostdata->issue_queue;
+ hostdata->issue_queue = hostdata->sel_cmd;
+ hostdata->sel_cmd = NULL;
+ hostdata->selecting = 0; }
+
+/* 2 bytes must be in the FIFO now */
+if (cfifo != 2) {
+ printk("scsi %d: error: %d bytes in fifo, 2 expected\n", instance->host_no, cfifo);
+ hostdata->aborted = 1;
+ goto EXIT_ABORT; }
+
+/* determine target which reselected */
+t = AM53C974_read_8(FFREG);
+if (!(t & (1 << instance->this_id))) {
+ printk("scsi %d: error: invalid host id\n", instance->host_no);
+ hostdata->aborted = 1;
+ goto EXIT_ABORT; }
+t ^= (1 << instance->this_id);
+target = 0; while (t != 1) { t >>= 1; target++; }
+DEB_RESEL(printk("scsi %d: reselect; target: %d\n", instance->host_no, target));
+
+if (hostdata->aborted) goto EXIT_ABORT;
+
+if ((statreg & STATREG_PHASE) != PHASE_MSGIN) {
+ printk("scsi %d: error: upon reselection interrupt not in MSGIN\n", instance->host_no);
+ hostdata->aborted = 1;
+ goto EXIT_ABORT; }
+
+msg[0] = AM53C974_read_8(FFREG);
+if (!(msg[0] & 0x80)) {
+ printk("scsi%d: error: expecting IDENTIFY message, got ", instance->host_no);
+ print_msg(msg);
+ hostdata->aborted = 1;
+ goto EXIT_ABORT; }
+
+lun = (msg[0] & 0x07);
+
+/* We need to add code for SCSI-II to track which devices have
+ * I_T_L_Q nexuses established, and which have simple I_T_L
+ * nexuses so we can chose to do additional data transfer. */
+#ifdef SCSI2
+#error "SCSI-II tagged queueing is not supported yet"
+#endif
+
+/* Find the command corresponding to the I_T_L or I_T_L_Q nexus we
+ * just reestablished, and remove it from the disconnected queue. */
+for (tmp = (Scsi_Cmnd *)hostdata->disconnected_queue, prev = NULL;
+ tmp; prev = tmp, tmp = (Scsi_Cmnd *)tmp->host_scribble)
+ if ((target == tmp->target) && (lun == tmp->lun)
+#ifdef SCSI2
+ && (tag == tmp->tag)
+#endif
+ ) {
+ if (prev) {
+ REMOVE(prev, (Scsi_Cmnd *)(prev->host_scribble), tmp,
+ (Scsi_Cmnd *)(tmp->host_scribble));
+ prev->host_scribble = tmp->host_scribble; }
+ else {
+ REMOVE(-1, hostdata->disconnected_queue, tmp, tmp->host_scribble);
+ hostdata->disconnected_queue = (Scsi_Cmnd *)tmp->host_scribble; }
+ tmp->host_scribble = NULL;
+ hostdata->connected = tmp;
+ break; }
+
+if (!tmp) {
+#ifdef SCSI2
+ printk("scsi%d: warning : target %d lun %d tag %d not in disconnect_queue.\n",
+ instance->host_no, target, lun, tag);
+#else
+ printk("scsi%d: warning : target %d lun %d not in disconnect_queue.\n",
+ instance->host_no, target, lun);
+#endif
+ /* Since we have an established nexus that we can't do anything with, we must abort it. */
+ hostdata->aborted = 1;
+ DEB(AM53C974_keywait());
+ goto EXIT_ABORT; }
+ else
+ goto EXIT_OK;
+
+EXIT_ABORT:
+AM53C974_write_8(CMDREG, CMDREG_SATN);
+AM53C974_write_8(CMDREG, CMDREG_MA);
+return;
+
+EXIT_OK:
+DEB_RESEL(printk("scsi%d: nexus established, target = %d, lun = %d, tag = %d\n",
+ instance->host_no, target, tmp->lun, tmp->tag));
+AM53C974_set_sync(instance, target);
+AM53C974_write_8(SDIDREG, SDIREG_MASK & target); /* setup dest. id */
+AM53C974_write_8(CMDREG, CMDREG_MA);
+hostdata->dma_busy = 0;
+hostdata->connected->SCp.phase = PHASE_CMDOUT;
+}
+
+/**************************************************************************
+* Function : AM53C974_transfer_dma(struct Scsi_Host *instance, short dir,
+* unsigned long length, char *data)
+*
+* Purpose : setup DMA transfer
+*
+* Inputs : instance -- which AM53C974
+* dir -- direction flag, 0: write to device, read from memory;
+* 1: read from device, write to memory
+* length -- number of bytes to transfer to from buffer
+* data -- pointer to data buffer
+*
+* Returns : nothing
+**************************************************************************/
+static __inline__ void AM53C974_transfer_dma(struct Scsi_Host *instance, short dir,
+ unsigned long length, char *data)
+{
+AM53C974_local_declare();
+AM53C974_setio(instance);
+
+AM53C974_write_8(CMDREG, CMDREG_NOP);
+AM53C974_write_8(DMACMD, (dir << 7) | DMACMD_INTE_D); /* idle command */
+AM53C974_write_8(STCLREG, (unsigned char)(length & 0xff));
+AM53C974_write_8(STCMREG, (unsigned char)((length & 0xff00) >> 8));
+AM53C974_write_8(STCHREG, (unsigned char)((length & 0xff0000) >> 16));
+AM53C974_write_32(DMASTC, length & 0xffffff);
+AM53C974_write_32(DMASPA, virt_to_bus(data));
+AM53C974_write_8(CMDREG, CMDREG_IT | CMDREG_DMA);
+AM53C974_write_8(DMACMD, (dir << 7) | DMACMD_INTE_D | DMACMD_START);
+}
+
+/**************************************************************************
+* Function : AM53C974_dma_blast(struct Scsi_Host *instance, unsigned char dmastatus,
+* unsigned char statreg)
+*
+* Purpose : cleanup DMA transfer
+*
+* Inputs : instance -- which AM53C974
+* dmastatus -- dma status register
+* statreg -- status register
+*
+* Returns : nothing
+**************************************************************************/
+static void AM53C974_dma_blast(struct Scsi_Host *instance, unsigned char dmastatus,
+ unsigned char statreg)
+{
+AM53C974_local_declare();
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+unsigned long ctcreg;
+int dir = statreg & STATREG_IO;
+int cfifo, pio, i = 0;
+AM53C974_setio(instance);
+
+do {
+ cfifo = AM53C974_cfifo();
+ i++;
+ } while (cfifo && (i < 50000));
+pio = (i == 50000) ? 1: 0;
+
+if (statreg & STATREG_CTZ) { AM53C974_write_8(DMACMD, DMACMD_IDLE); return; }
+
+if (dmastatus & DMASTATUS_DONE) { AM53C974_write_8(DMACMD, DMACMD_IDLE); return; }
+
+AM53C974_write_8(DMACMD, ((dir << 7) & DMACMD_DIR) | DMACMD_BLAST);
+while(!(AM53C974_read_8(DMASTATUS) & DMASTATUS_BCMPLT)) ;
+AM53C974_write_8(DMACMD, DMACMD_IDLE);
+
+if (pio) {
+ /* transfer residual bytes via PIO */
+ unsigned char *wac = (unsigned char *)AM53C974_read_32(DMAWAC);
+ printk("pio mode, residual=%d\n", AM53C974_read_8(CFIREG) & CFIREG_CF);
+ while (AM53C974_read_8(CFIREG) & CFIREG_CF) *(wac++) = AM53C974_read_8(FFREG);
+ }
+
+ctcreg = AM53C974_read_8(CTCLREG) | (AM53C974_read_8(CTCMREG) << 8) |
+ (AM53C974_read_8(CTCHREG) << 16);
+
+hostdata->connected->SCp.ptr += hostdata->connected->SCp.this_residual - ctcreg;
+hostdata->connected->SCp.this_residual = ctcreg;
+}
+
+/**************************************************************************
+* Function : AM53C974_intr_bus_reset(struct Scsi_Host *instance)
+*
+* Purpose : handle bus reset interrupt
+*
+* Inputs : instance -- which AM53C974
+*
+* Returns : nothing
+**************************************************************************/
+static void AM53C974_intr_bus_reset(struct Scsi_Host *instance)
+{
+AM53C974_local_declare();
+unsigned char cntlreg1;
+AM53C974_setio(instance);
+
+AM53C974_write_8(CMDREG, CMDREG_CFIFO);
+AM53C974_write_8(CMDREG, CMDREG_NOP);
+
+cntlreg1 = AM53C974_read_8(CNTLREG1);
+AM53C974_write_8(CNTLREG1, cntlreg1 | CNTLREG1_DISR);
+}
+
+/**************************************************************************
+* Function : int AM53C974_abort(Scsi_Cmnd *cmd)
+*
+* Purpose : abort a command
+*
+* Inputs : cmd - the Scsi_Cmnd to abort, code - code to set the
+* host byte of the result field to, if zero DID_ABORTED is
+* used.
+*
+* Returns : 0 - success, -1 on failure.
+ **************************************************************************/
+int AM53C974_abort(Scsi_Cmnd *cmd)
+{
+AM53C974_local_declare();
+struct Scsi_Host *instance = cmd->host;
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+Scsi_Cmnd *tmp, **prev;
+
+#ifdef AM53C974_DEBUG
+ deb_stop = 1;
+#endif
+cli();
+AM53C974_setio(instance);
+
+DEB_ABORT(printk(SEPARATOR_LINE));
+DEB_ABORT(printk("scsi%d : AM53C974_abort called -- trouble starts!!\n", instance->host_no));
+DEB_ABORT(AM53C974_print(instance));
+DEB_ABORT(AM53C974_keywait());
+
+/* Case 1 : If the command is the currently executing command,
+ we'll set the aborted flag and return control so that the
+ information transfer routine can exit cleanly. */
+if ((hostdata->connected == cmd) || (hostdata->sel_cmd == cmd)) {
+ DEB_ABORT(printk("scsi%d: aborting connected command\n", instance->host_no));
+ hostdata->aborted = 1;
+ hostdata->msgout[0] = ABORT;
+ sti();
+ return(SCSI_ABORT_PENDING); }
+
+/* Case 2 : If the command hasn't been issued yet,
+ we simply remove it from the issue queue. */
+for (prev = (Scsi_Cmnd **)&(hostdata->issue_queue),
+ tmp = (Scsi_Cmnd *)hostdata->issue_queue; tmp;
+ prev = (Scsi_Cmnd **)&(tmp->host_scribble),
+ tmp = (Scsi_Cmnd *)tmp->host_scribble) {
+ if (cmd == tmp) {
+ DEB_ABORT(printk("scsi%d : abort removed command from issue queue.\n", instance->host_no));
+ REMOVE(5, *prev, tmp, tmp->host_scribble);
+ (*prev) = (Scsi_Cmnd *)tmp->host_scribble;
+ tmp->host_scribble = NULL;
+ tmp->result = DID_ABORT << 16;
+ sti();
+ tmp->done(tmp);
+ return(SCSI_ABORT_SUCCESS); }
+#ifdef AM53C974_DEBUG_ABORT
+ else {
+ if (prev == (Scsi_Cmnd **)tmp)
+ printk("scsi%d : LOOP\n", instance->host_no);
+ }
+#endif
+ }
+
+/* Case 3 : If any commands are connected, we're going to fail the abort
+ * and let the high level SCSI driver retry at a later time or
+ * issue a reset.
+ *
+ * Timeouts, and therefore aborted commands, will be highly unlikely
+ * and handling them cleanly in this situation would make the common
+ * case of noresets less efficient, and would pollute our code. So,
+ * we fail. */
+if (hostdata->connected || hostdata->sel_cmd) {
+ DEB_ABORT(printk("scsi%d : abort failed, other command connected.\n", instance->host_no));
+ sti();
+ return(SCSI_ABORT_NOT_RUNNING); }
+
+/* Case 4: If the command is currently disconnected from the bus, and
+ * there are no connected commands, we reconnect the I_T_L or
+ * I_T_L_Q nexus associated with it, go into message out, and send
+ * an abort message. */
+for (tmp = (Scsi_Cmnd *)hostdata->disconnected_queue; tmp;
+ tmp = (Scsi_Cmnd *)tmp->host_scribble) {
+ if (cmd == tmp) {
+ DEB_ABORT(printk("scsi%d: aborting disconnected command\n", instance->host_no));
+ hostdata->aborted = 1;
+ hostdata->msgout[0] = ABORT;
+ hostdata->selecting = 1;
+ hostdata->sel_cmd = tmp;
+ AM53C974_write_8(CMDREG, CMDREG_DSR);
+ sti();
+ return(SCSI_ABORT_PENDING); }
+ }
+
+/* Case 5 : If we reached this point, the command was not found in any of
+ * the queues.
+ *
+ * We probably reached this point because of an unlikely race condition
+ * between the command completing successfully and the abortion code,
+ * so we won't panic, but we will notify the user in case something really
+ * broke. */
+DEB_ABORT(printk("scsi%d : abort failed, command not found.\n", instance->host_no));
+sti();
+return(SCSI_ABORT_NOT_RUNNING);
+}
+
+/**************************************************************************
+* Function : int AM53C974_reset(Scsi_Cmnd *cmd)
+*
+* Purpose : reset the SCSI controller and bus
+*
+* Inputs : cmd -- which command within the command block was responsible for the reset
+*
+* Returns : status (SCSI_ABORT_SUCCESS)
+**************************************************************************/
+int AM53C974_reset(Scsi_Cmnd *cmd, unsigned int flags)
+{
+AM53C974_local_declare();
+int i;
+struct Scsi_Host *instance = cmd->host;
+struct AM53C974_hostdata *hostdata = (struct AM53C974_hostdata *)instance->hostdata;
+AM53C974_setio(instance);
+
+cli();
+DEB(printk("AM53C974_reset called; "));
+
+printk("AM53C974_reset called\n");
+AM53C974_print(instance);
+AM53C974_keywait();
+
+/* do hard reset */
+AM53C974_write_8(CMDREG, CMDREG_RDEV);
+AM53C974_write_8(CMDREG, CMDREG_NOP);
+hostdata->msgout[0] = NOP;
+for (i = 0; i < 8; i++) {
+ hostdata->busy[i] = 0;
+ hostdata->sync_per[i] = DEF_STP;
+ hostdata->sync_off[i] = 0;
+ hostdata->sync_neg[i] = 0; }
+hostdata->last_message[0] = NOP;
+hostdata->sel_cmd = NULL;
+hostdata->connected = NULL;
+hostdata->issue_queue = NULL;
+hostdata->disconnected_queue = NULL;
+hostdata->in_reset = 0;
+hostdata->aborted = 0;
+hostdata->selecting = 0;
+hostdata->disconnecting = 0;
+hostdata->dma_busy = 0;
+
+/* reset bus */
+AM53C974_write_8(CNTLREG1, CNTLREG1_DISR | instance->this_id); /* disable interrupt upon SCSI RESET */
+AM53C974_write_8(CMDREG, CMDREG_RBUS); /* reset SCSI bus */
+udelay(40);
+AM53C974_config_after_reset(instance);
+
+sti();
+cmd->result = DID_RESET << 16;
+cmd->scsi_done(cmd);
+return SCSI_ABORT_SUCCESS;
+}
+
+
+/*
+ * AM53C974_release()
+ *
+ * Release resources allocated for a single AM53C974 adapter.
+ */
+int
+AM53C974_release(struct Scsi_Host *shp)
+{
+ free_irq(shp->irq, NULL);
+ scsi_unregister(shp);
+ return 0;
+}
+
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = AM53C974;
+
+#include "scsi_module.c"
+#endif
diff --git a/linux/src/drivers/scsi/AM53C974.h b/linux/src/drivers/scsi/AM53C974.h
new file mode 100644
index 0000000..d94db92
--- /dev/null
+++ b/linux/src/drivers/scsi/AM53C974.h
@@ -0,0 +1,409 @@
+/* AM53/79C974 (PCscsi) driver release 0.5
+ *
+ * The architecture and much of the code of this device
+ * driver was originally developed by Drew Eckhardt for
+ * the NCR5380. The following copyrights apply:
+ * For the architecture and all parts similar to the NCR5380:
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 666-5836
+ *
+ * The AM53C974_nobios_detect code was originally developed by
+ * Robin Cutshaw (robin@xfree86.org) and is used here in a
+ * modified form.
+ *
+ * For the other parts:
+ * Copyright 1994, D. Frieauff
+ * EMail: fri@rsx42sun0.dofn.de
+ * Phone: x49-7545-8-2256 , x49-7541-42305
+ */
+
+#ifndef AM53C974_H
+#define AM53C974_H
+
+#include <scsi/scsicam.h>
+
+/***************************************************************************************
+* Default setting of the controller's SCSI id. Edit and uncomment this only if your *
+* BIOS does not correctly initialize the controller's SCSI id. *
+* If you don't get a warning during boot, it is correctly initialized. *
+****************************************************************************************/
+/* #define AM53C974_SCSI_ID 7 */
+
+/***************************************************************************************
+* Default settings for sync. negotiation enable, transfer rate and sync. offset. *
+* These settings can be replaced by LILO overrides (append) with the following syntax: *
+* AM53C974=host-scsi-id, target-scsi-id, max-rate, max-offset *
+* Sync. negotiation is disabled by default and will be enabled for those targets which *
+* are specified in the LILO override *
+****************************************************************************************/
+#define DEFAULT_SYNC_NEGOTIATION_ENABLED 0 /* 0 or 1 */
+#define DEFAULT_RATE 5 /* MHz, min: 3; max: 10 */
+#define DEFAULT_SYNC_OFFSET 0 /* bytes, min: 0; max: 15; use 0 for async. mode */
+
+
+/* --------------------- don't edit below here --------------------- */
+
+#define AM53C974_DRIVER_REVISION_MAJOR 0
+#define AM53C974_DRIVER_REVISION_MINOR 5
+#define SEPARATOR_LINE \
+"--------------------------------------------------------------------------\n"
+
+/* debug control */
+/* #define AM53C974_DEBUG */
+/* #define AM53C974_DEBUG_MSG */
+/* #define AM53C974_DEBUG_KEYWAIT */
+/* #define AM53C974_DEBUG_INIT */
+/* #define AM53C974_DEBUG_QUEUE */
+/* #define AM53C974_DEBUG_INFO */
+/* #define AM53C974_DEBUG_LINKED */
+/* #define VERBOSE_AM53C974_DEBUG */
+/* #define AM53C974_DEBUG_INTR */
+/* #define AM53C974_DEB_RESEL */
+#define AM53C974_DEBUG_ABORT
+/* #define AM53C974_OPTION_DEBUG_PROBE_ONLY */
+
+/* special options/constants */
+#define DEF_CLK 40 /* chip clock freq. in MHz */
+#define MIN_PERIOD 4 /* for negotiation: min. number of clocks per cycle */
+#define MAX_PERIOD 13 /* for negotiation: max. number of clocks per cycle */
+#define MAX_OFFSET 15 /* for negotiation: max. offset (0=async) */
+
+#define DEF_SCSI_TIMEOUT 245 /* STIMREG value, 40 Mhz */
+#define DEF_STP 8 /* STPREG value assuming 5.0 MB/sec, FASTCLK, FASTSCSI */
+#define DEF_SOF_RAD 0 /* REQ/ACK deassertion delay */
+#define DEF_SOF_RAA 0 /* REQ/ACK assertion delay */
+#define DEF_ETM 0 /* CNTLREG1, ext. timing mode */
+#define DEF_PERE 1 /* CNTLREG1, parity error reporting */
+#define DEF_CLKF 0 /* CLKFREG, 0=40 Mhz */
+#define DEF_ENF 1 /* CNTLREG2, enable features */
+#define DEF_ADIDCHK 0 /* CNTLREG3, additional ID check */
+#define DEF_FASTSCSI 1 /* CNTLREG3, fast SCSI */
+#define DEF_FASTCLK 1 /* CNTLREG3, fast clocking, 5 MB/sec at 40MHz chip clk */
+#define DEF_GLITCH 1 /* CNTLREG4, glitch eater, 0=12ns, 1=35ns, 2=25ns, 3=off */
+#define DEF_PWD 0 /* CNTLREG4, reduced power feature */
+#define DEF_RAE 0 /* CNTLREG4, RAE active negation on REQ, ACK only */
+#define DEF_RADE 1 /* 1CNTLREG4, active negation on REQ, ACK and data */
+
+/*** PCI block ***/
+/* standard registers are defined in <linux/pci.h> */
+#ifndef PCI_VENDOR_ID_AMD
+#define PCI_VENDOR_ID_AMD 0x1022
+#define PCI_DEVICE_ID_AMD_SCSI 0x2020
+#endif
+#define PCI_BASE_MASK 0xFFFFFFE0
+#define PCI_COMMAND_PERREN 0x40
+#define PCI_SCRATCH_REG_0 0x40 /* 16 bits */
+#define PCI_SCRATCH_REG_1 0x42 /* 16 bits */
+#define PCI_SCRATCH_REG_2 0x44 /* 16 bits */
+#define PCI_SCRATCH_REG_3 0x46 /* 16 bits */
+#define PCI_SCRATCH_REG_4 0x48 /* 16 bits */
+#define PCI_SCRATCH_REG_5 0x4A /* 16 bits */
+#define PCI_SCRATCH_REG_6 0x4C /* 16 bits */
+#define PCI_SCRATCH_REG_7 0x4E /* 16 bits */
+
+/*** SCSI block ***/
+#define CTCLREG 0x00 /* r current transf. count, low byte */
+#define CTCMREG 0x04 /* r current transf. count, middle byte */
+#define CTCHREG 0x38 /* r current transf. count, high byte */
+#define STCLREG 0x00 /* w start transf. count, low byte */
+#define STCMREG 0x04 /* w start transf. count, middle byte */
+#define STCHREG 0x38 /* w start transf. count, high byte */
+#define FFREG 0x08 /* rw SCSI FIFO reg. */
+#define STIMREG 0x14 /* w SCSI timeout reg. */
+
+#define SDIDREG 0x10 /* w SCSI destination ID reg. */
+#define SDIREG_MASK 0x07 /* mask */
+
+#define STPREG 0x18 /* w synchronous transf. period reg. */
+#define STPREG_STP 0x1F /* synchr. transfer period */
+
+#define CLKFREG 0x24 /* w clock factor reg. */
+#define CLKFREG_MASK 0x07 /* mask */
+
+#define CMDREG 0x0C /* rw SCSI command reg. */
+#define CMDREG_DMA 0x80 /* set DMA mode (set together with opcodes below) */
+#define CMDREG_IT 0x10 /* information transfer */
+#define CMDREG_ICCS 0x11 /* initiator command complete steps */
+#define CMDREG_MA 0x12 /* message accepted */
+#define CMDREG_TPB 0x98 /* transfer pad bytes, DMA mode only */
+#define CMDREG_SATN 0x1A /* set ATN */
+#define CMDREG_RATN 0x1B /* reset ATN */
+#define CMDREG_SOAS 0x41 /* select without ATN steps */
+#define CMDREG_SAS 0x42 /* select with ATN steps (1 msg byte) */
+#define CMDREG_SASS 0x43 /* select with ATN and stop steps */
+#define CMDREG_ESR 0x44 /* enable selection/reselection */
+#define CMDREG_DSR 0x45 /* disable selection/reselection */
+#define CMDREG_SA3S 0x46 /* select with ATN 3 steps (3 msg bytes) */
+#define CMDREG_NOP 0x00 /* no operation */
+#define CMDREG_CFIFO 0x01 /* clear FIFO */
+#define CMDREG_RDEV 0x02 /* reset device */
+#define CMDREG_RBUS 0x03 /* reset SCSI bus */
+
+#define STATREG 0x10 /* r SCSI status reg. */
+#define STATREG_INT 0x80 /* SCSI interrupt condition detected */
+#define STATREG_IOE 0x40 /* SCSI illegal operation error detected */
+#define STATREG_PE 0x20 /* SCSI parity error detected */
+#define STATREG_CTZ 0x10 /* CTC reg decremented to zero */
+#define STATREG_MSG 0x04 /* SCSI MSG phase (latched?) */
+#define STATREG_CD 0x02 /* SCSI C/D phase (latched?) */
+#define STATREG_IO 0x01 /* SCSI I/O phase (latched?) */
+#define STATREG_PHASE 0x07 /* SCSI phase mask */
+
+#define INSTREG 0x14 /* r interrupt status reg. */
+#define INSTREG_SRST 0x80 /* SCSI reset detected */
+#define INSTREG_ICMD 0x40 /* SCSI invalid command detected */
+#define INSTREG_DIS 0x20 /* target disconnected or sel/resel timeout*/
+#define INSTREG_SR 0x10 /* device on bus has service request */
+#define INSTREG_SO 0x08 /* successful operation */
+#define INSTREG_RESEL 0x04 /* device reselected as initiator */
+
+#define ISREG 0x18 /* r internal state reg. */
+#define ISREG_SOF 0x08 /* synchronous offset flag (act. low) */
+#define ISREG_IS 0x07 /* status of intermediate op. */
+#define ISREG_OK_NO_STOP 0x04 /* selection successful */
+#define ISREG_OK_STOP 0x01 /* selection successful */
+
+#define CFIREG 0x1C /* r current FIFO/internal state reg. */
+#define CFIREG_IS 0xE0 /* status of intermediate op. */
+#define CFIREG_CF 0x1F /* number of bytes in SCSI FIFO */
+
+#define SOFREG 0x1C /* w synchr. offset reg. */
+#define SOFREG_RAD 0xC0 /* REQ/ACK deassertion delay (sync.) */
+#define SOFREG_RAA 0x30 /* REQ/ACK assertion delay (sync.) */
+#define SOFREG_SO 0x0F /* synch. offset (sync.) */
+
+#define CNTLREG1 0x20 /* rw control register one */
+#define CNTLREG1_ETM 0x80 /* set extended timing mode */
+#define CNTLREG1_DISR 0x40 /* disable interrupt on SCSI reset */
+#define CNTLREG1_PERE 0x10 /* enable parity error reporting */
+#define CNTLREG1_SID 0x07 /* host adapter SCSI ID */
+
+#define CNTLREG2 0x2C /* rw control register two */
+#define CNTLREG2_ENF 0x40 /* enable features */
+
+#define CNTLREG3 0x30 /* rw control register three */
+#define CNTLREG3_ADIDCHK 0x80 /* additional ID check */
+#define CNTLREG3_FASTSCSI 0x10 /* fast SCSI */
+#define CNTLREG3_FASTCLK 0x08 /* fast SCSI clocking */
+
+#define CNTLREG4 0x34 /* rw control register four */
+#define CNTLREG4_GLITCH 0xC0 /* glitch eater */
+#define CNTLREG4_PWD 0x20 /* reduced power feature */
+#define CNTLREG4_RAE 0x08 /* write only, active negot. ctrl. */
+#define CNTLREG4_RADE 0x04 /* active negot. ctrl. */
+#define CNTLREG4_RES 0x10 /* reserved bit, must be 1 */
+
+/*** DMA block ***/
+#define DMACMD 0x40 /* rw command */
+#define DMACMD_DIR 0x80 /* transfer direction (1=read from device) */
+#define DMACMD_INTE_D 0x40 /* DMA transfer interrupt enable */
+#define DMACMD_INTE_P 0x20 /* page transfer interrupt enable */
+#define DMACMD_MDL 0x10 /* map to memory descriptor list */
+#define DMACMD_DIAG 0x04 /* diagnostics, set to 0 */
+#define DMACMD_IDLE 0x00 /* idle cmd */
+#define DMACMD_BLAST 0x01 /* flush FIFO to memory */
+#define DMACMD_ABORT 0x02 /* terminate DMA */
+#define DMACMD_START 0x03 /* start DMA */
+
+#define DMASTATUS 0x54 /* r status register */
+#define DMASTATUS_BCMPLT 0x20 /* BLAST complete */
+#define DMASTATUS_SCSIINT 0x10 /* SCSI interrupt pending */
+#define DMASTATUS_DONE 0x08 /* DMA transfer terminated */
+#define DMASTATUS_ABORT 0x04 /* DMA transfer aborted */
+#define DMASTATUS_ERROR 0x02 /* DMA transfer error */
+#define DMASTATUS_PWDN 0x02 /* power down indicator */
+
+#define DMASTC 0x44 /* rw starting transfer count */
+#define DMASPA 0x48 /* rw starting physical address */
+#define DMAWBC 0x4C /* r working byte counter */
+#define DMAWAC 0x50 /* r working address counter */
+#define DMASMDLA 0x58 /* rw starting MDL address */
+#define DMAWMAC 0x5C /* r working MDL counter */
+
+/*** SCSI phases ***/
+#define PHASE_MSGIN 0x07
+#define PHASE_MSGOUT 0x06
+#define PHASE_RES_1 0x05
+#define PHASE_RES_0 0x04
+#define PHASE_STATIN 0x03
+#define PHASE_CMDOUT 0x02
+#define PHASE_DATAIN 0x01
+#define PHASE_DATAOUT 0x00
+
+struct AM53C974_hostdata {
+ volatile unsigned in_reset:1; /* flag, says bus reset pending */
+ volatile unsigned aborted:1; /* flag, says aborted */
+ volatile unsigned selecting:1; /* selection started, but not yet finished */
+ volatile unsigned disconnecting: 1; /* disconnection started, but not yet finished */
+ volatile unsigned dma_busy:1; /* dma busy when service request for info transfer received */
+ volatile unsigned char msgout[10]; /* message to output in MSGOUT_PHASE */
+ volatile unsigned char last_message[10]; /* last message OUT */
+ volatile Scsi_Cmnd *issue_queue; /* waiting to be issued */
+ volatile Scsi_Cmnd *disconnected_queue; /* waiting for reconnect */
+ volatile Scsi_Cmnd *sel_cmd; /* command for selection */
+ volatile Scsi_Cmnd *connected; /* currently connected command */
+ volatile unsigned char busy[8]; /* index = target, bit = lun */
+ unsigned char sync_per[8]; /* synchronous transfer period (in effect) */
+ unsigned char sync_off[8]; /* synchronous offset (in effect) */
+ unsigned char sync_neg[8]; /* sync. negotiation performed (in effect) */
+ unsigned char sync_en[8]; /* sync. negotiation performed (in effect) */
+ unsigned char max_rate[8]; /* max. transfer rate (setup) */
+ unsigned char max_offset[8]; /* max. sync. offset (setup), only valid if corresponding sync_en is nonzero */
+ };
+
+#define AM53C974 { \
+ NULL, /* pointer to next in list */ \
+ NULL, /* long * usage_count */ \
+ NULL, /* struct proc_dir_entry *proc_dir */ \
+ NULL, /* int (*proc_info)(char *, char **, off_t, int, int, int); */ \
+ "AM53C974", /* name */ \
+ AM53C974_detect, /* int (* detect)(struct SHT *) */ \
+ NULL, /* int (*release)(struct Scsi_Host *) */ \
+ AM53C974_info, /* const char *(* info)(struct Scsi_Host *) */ \
+ AM53C974_command, /* int (* command)(Scsi_Cmnd *) */ \
+ AM53C974_queue_command, /* int (* queuecommand)(Scsi_Cmnd *, \
+ void (*done)(Scsi_Cmnd *)) */ \
+ AM53C974_abort, /* int (* abort)(Scsi_Cmnd *) */ \
+ AM53C974_reset, /* int (* reset)(Scsi_Cmnd *) */ \
+ NULL, /* int (* slave_attach)(int, int) */ \
+ scsicam_bios_param, /* int (* bios_param)(Disk *, int, int[]) */ \
+ 12, /* can_queue */ \
+ -1, /* this_id */ \
+ SG_ALL, /* sg_tablesize */ \
+ 1, /* cmd_per_lun */ \
+ 0, /* present, i.e. how many adapters of this kind */ \
+ 0, /* unchecked_isa_dma */ \
+ DISABLE_CLUSTERING /* use_clustering */ \
+ }
+
+void AM53C974_setup(char *str, int *ints);
+int AM53C974_detect(Scsi_Host_Template *tpnt);
+int AM53C974_biosparm(Disk *disk, int dev, int *info_array);
+const char *AM53C974_info(struct Scsi_Host *);
+int AM53C974_command(Scsi_Cmnd *SCpnt);
+int AM53C974_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *));
+int AM53C974_abort(Scsi_Cmnd *cmd);
+int AM53C974_reset (Scsi_Cmnd *cmd, unsigned int flags);
+
+#define AM53C974_local_declare() unsigned long io_port
+#define AM53C974_setio(instance) io_port = instance->io_port
+#define AM53C974_read_8(addr) inb(io_port + (addr))
+#define AM53C974_write_8(addr,x) outb((x), io_port + (addr))
+#define AM53C974_read_16(addr) inw(io_port + (addr))
+#define AM53C974_write_16(addr,x) outw((x), io_port + (addr))
+#define AM53C974_read_32(addr) inl(io_port + (addr))
+#define AM53C974_write_32(addr,x) outl((x), io_port + (addr))
+
+#define AM53C974_poll_int() { do { statreg = AM53C974_read_8(STATREG); } \
+ while (!(statreg & STATREG_INT)) ; \
+ AM53C974_read_8(INSTREG) ; } /* clear int */
+#define AM53C974_cfifo() (AM53C974_read_8(CFIREG) & CFIREG_CF)
+
+/* These are "special" values for the tag parameter passed to AM53C974_select. */
+#define TAG_NEXT -1 /* Use next free tag */
+#define TAG_NONE -2 /* Establish I_T_L nexus instead of I_T_L_Q
+ * even on SCSI-II devices */
+
+/************ LILO overrides *************/
+typedef struct _override_t {
+ int host_scsi_id; /* SCSI id of the bus controller */
+ int target_scsi_id; /* SCSI id of target */
+ int max_rate; /* max. transfer rate */
+ int max_offset; /* max. sync. offset, 0 = asynchronous */
+ } override_t;
+
+/************ PCI stuff *************/
+#define AM53C974_PCIREG_OPEN() outb(0xF1, 0xCF8); outb(0, 0xCFA)
+#define AM53C974_PCIREG_CLOSE() outb(0, 0xCF8)
+#define AM53C974_PCIREG_READ_BYTE(instance,a) ( inb((a) + (instance)->io_port) )
+#define AM53C974_PCIREG_READ_WORD(instance,a) ( inw((a) + (instance)->io_port) )
+#define AM53C974_PCIREG_READ_DWORD(instance,a) ( inl((a) + (instance)->io_port) )
+#define AM53C974_PCIREG_WRITE_BYTE(instance,x,a) ( outb((x), (a) + (instance)->io_port) )
+#define AM53C974_PCIREG_WRITE_WORD(instance,x,a) ( outw((x), (a) + (instance)->io_port) )
+#define AM53C974_PCIREG_WRITE_DWORD(instance,x,a) ( outl((x), (a) + (instance)->io_port) )
+
+typedef struct _pci_config_t {
+ /* start of official PCI config space header */
+ union {
+ unsigned int device_vendor;
+ struct {
+ unsigned short vendor;
+ unsigned short device;
+ } dv;
+ } dv_id;
+#define _device_vendor dv_id.device_vendor
+#define _vendor dv_id.dv.vendor
+#define _device dv_id.dv.device
+ union {
+ unsigned int status_command;
+ struct {
+ unsigned short command;
+ unsigned short status;
+ } sc;
+ } stat_cmd;
+#define _status_command stat_cmd.status_command
+#define _command stat_cmd.sc.command
+#define _status stat_cmd.sc.status
+ union {
+ unsigned int class_revision;
+ struct {
+ unsigned char rev_id;
+ unsigned char prog_if;
+ unsigned char sub_class;
+ unsigned char base_class;
+ } cr;
+ } class_rev;
+#define _class_revision class_rev.class_revision
+#define _rev_id class_rev.cr.rev_id
+#define _prog_if class_rev.cr.prog_if
+#define _sub_class class_rev.cr.sub_class
+#define _base_class class_rev.cr.base_class
+ union {
+ unsigned int bist_header_latency_cache;
+ struct {
+ unsigned char cache_line_size;
+ unsigned char latency_timer;
+ unsigned char header_type;
+ unsigned char bist;
+ } bhlc;
+ } bhlc;
+#define _bist_header_latency_cache bhlc.bist_header_latency_cache
+#define _cache_line_size bhlc.bhlc.cache_line_size
+#define _latency_timer bhlc.bhlc.latency_timer
+#define _header_type bhlc.bhlc.header_type
+#define _bist bhlc.bhlc.bist
+ unsigned int _base0;
+ unsigned int _base1;
+ unsigned int _base2;
+ unsigned int _base3;
+ unsigned int _base4;
+ unsigned int _base5;
+ unsigned int rsvd1;
+ unsigned int rsvd2;
+ unsigned int _baserom;
+ unsigned int rsvd3;
+ unsigned int rsvd4;
+ union {
+ unsigned int max_min_ipin_iline;
+ struct {
+ unsigned char int_line;
+ unsigned char int_pin;
+ unsigned char min_gnt;
+ unsigned char max_lat;
+ } mmii;
+ } mmii;
+#define _max_min_ipin_iline mmii.max_min_ipin_iline
+#define _int_line mmii.mmii.int_line
+#define _int_pin mmii.mmii.int_pin
+#define _min_gnt mmii.mmii.min_gnt
+#define _max_lat mmii.mmii.max_lat
+ /* end of official PCI config space header */
+ unsigned short _ioaddr; /* config type 1 - private I/O addr */
+ unsigned int _pcibus; /* config type 2 - private bus id */
+ unsigned int _cardnum; /* config type 2 - private card number */
+} pci_config_t;
+
+#endif /* AM53C974_H */
diff --git a/linux/src/drivers/scsi/BusLogic.c b/linux/src/drivers/scsi/BusLogic.c
new file mode 100644
index 0000000..3c52e15
--- /dev/null
+++ b/linux/src/drivers/scsi/BusLogic.c
@@ -0,0 +1,5003 @@
+/*
+
+ Linux Driver for BusLogic MultiMaster and FlashPoint SCSI Host Adapters
+
+ Copyright 1995-1998 by Leonard N. Zubkoff <lnz@dandelion.com>
+
+ This program is free software; you may redistribute and/or modify it under
+ the terms of the GNU General Public License Version 2 as published by the
+ Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY, without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for complete details.
+
+ The author respectfully requests that any modifications to this software be
+ sent directly to him for evaluation and testing.
+
+ Special thanks to Wayne Yen, Jin-Lon Hon, and Alex Win of BusLogic, whose
+ advice has been invaluable, to David Gentzel, for writing the original Linux
+ BusLogic driver, and to Paul Gortmaker, for being such a dedicated test site.
+
+ Finally, special thanks to Mylex/BusLogic for making the FlashPoint SCCB
+ Manager available as freely redistributable source code.
+
+*/
+
+
+#define BusLogic_DriverVersion "2.0.15"
+#define BusLogic_DriverDate "17 August 1998"
+
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/stat.h>
+#include <linux/pci.h>
+#include <linux/bios32.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/system.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+#include "BusLogic.h"
+#include "FlashPoint.c"
+
+
+/*
+ BusLogic_DriverOptionsCount is a count of the number of BusLogic Driver
+ Options specifications provided via the Linux Kernel Command Line or via
+ the Loadable Kernel Module Installation Facility.
+*/
+
+static int
+ BusLogic_DriverOptionsCount = 0;
+
+
+/*
+ BusLogic_DriverOptions is an array of Driver Options structures representing
+ BusLogic Driver Options specifications provided via the Linux Kernel Command
+ Line or via the Loadable Kernel Module Installation Facility.
+*/
+
+static BusLogic_DriverOptions_T
+ BusLogic_DriverOptions[BusLogic_MaxHostAdapters];
+
+
+/*
+ BusLogic_Options can be assigned a string by the Loadable Kernel Module
+ Installation Facility to be parsed for BusLogic Driver Options
+ specifications.
+*/
+
+static char
+ *BusLogic_Options = NULL;
+
+
+/*
+ BusLogic_ProbeOptions is a set of Probe Options to be applied across
+ all BusLogic Host Adapters.
+*/
+
+static BusLogic_ProbeOptions_T
+ BusLogic_ProbeOptions = { 0 };
+
+
+/*
+ BusLogic_GlobalOptions is a set of Global Options to be applied across
+ all BusLogic Host Adapters.
+*/
+
+static BusLogic_GlobalOptions_T
+ BusLogic_GlobalOptions = { 0 };
+
+
+/*
+ BusLogic_FirstRegisteredHostAdapter and BusLogic_LastRegisteredHostAdapter
+ are pointers to the first and last registered BusLogic Host Adapters.
+*/
+
+static BusLogic_HostAdapter_T
+ *BusLogic_FirstRegisteredHostAdapter = NULL,
+ *BusLogic_LastRegisteredHostAdapter = NULL;
+
+
+/*
+ BusLogic_ProbeInfoCount is the number of entries in BusLogic_ProbeInfoList.
+*/
+
+static int
+ BusLogic_ProbeInfoCount = 0;
+
+
+/*
+ BusLogic_ProbeInfoList is the list of I/O Addresses and Bus Probe Information
+ to be checked for potential BusLogic Host Adapters. It is initialized by
+ interrogating the PCI Configuration Space on PCI machines as well as from the
+ list of standard BusLogic I/O Addresses.
+*/
+
+static BusLogic_ProbeInfo_T
+ *BusLogic_ProbeInfoList = NULL;
+
+
+/*
+ BusLogic_CommandFailureReason holds a string identifying the reason why a
+ call to BusLogic_Command failed. It is only non-NULL when BusLogic_Command
+ returns a failure code.
+*/
+
+static char
+ *BusLogic_CommandFailureReason;
+
+
+/*
+ BusLogic_ProcDirectoryEntry is the BusLogic /proc/scsi directory entry.
+*/
+
+PROC_DirectoryEntry_T
+ BusLogic_ProcDirectoryEntry =
+ { PROC_SCSI_BUSLOGIC, 8, "BusLogic", S_IFDIR | S_IRUGO | S_IXUGO, 2 };
+
+
+/*
+ BusLogic_AnnounceDriver announces the Driver Version and Date, Author's
+ Name, Copyright Notice, and Electronic Mail Address.
+*/
+
+static void BusLogic_AnnounceDriver(BusLogic_HostAdapter_T *HostAdapter)
+{
+ BusLogic_Announce("***** BusLogic SCSI Driver Version "
+ BusLogic_DriverVersion " of "
+ BusLogic_DriverDate " *****\n", HostAdapter);
+ BusLogic_Announce("Copyright 1995-1998 by Leonard N. Zubkoff "
+ "<lnz@dandelion.com>\n", HostAdapter);
+}
+
+
+/*
+ BusLogic_DriverInfo returns the Host Adapter Name to identify this SCSI
+ Driver and Host Adapter.
+*/
+
+const char *BusLogic_DriverInfo(SCSI_Host_T *Host)
+{
+ BusLogic_HostAdapter_T *HostAdapter =
+ (BusLogic_HostAdapter_T *) Host->hostdata;
+ return HostAdapter->FullModelName;
+}
+
+
+/*
+ BusLogic_RegisterHostAdapter adds Host Adapter to the list of registered
+ BusLogic Host Adapters.
+*/
+
+static void BusLogic_RegisterHostAdapter(BusLogic_HostAdapter_T *HostAdapter)
+{
+ HostAdapter->Next = NULL;
+ if (BusLogic_FirstRegisteredHostAdapter == NULL)
+ {
+ BusLogic_FirstRegisteredHostAdapter = HostAdapter;
+ BusLogic_LastRegisteredHostAdapter = HostAdapter;
+ }
+ else
+ {
+ BusLogic_LastRegisteredHostAdapter->Next = HostAdapter;
+ BusLogic_LastRegisteredHostAdapter = HostAdapter;
+ }
+}
+
+
+/*
+ BusLogic_UnregisterHostAdapter removes Host Adapter from the list of
+ registered BusLogic Host Adapters.
+*/
+
+static void BusLogic_UnregisterHostAdapter(BusLogic_HostAdapter_T *HostAdapter)
+{
+ if (HostAdapter == BusLogic_FirstRegisteredHostAdapter)
+ {
+ BusLogic_FirstRegisteredHostAdapter =
+ BusLogic_FirstRegisteredHostAdapter->Next;
+ if (HostAdapter == BusLogic_LastRegisteredHostAdapter)
+ BusLogic_LastRegisteredHostAdapter = NULL;
+ }
+ else
+ {
+ BusLogic_HostAdapter_T *PreviousHostAdapter =
+ BusLogic_FirstRegisteredHostAdapter;
+ while (PreviousHostAdapter != NULL &&
+ PreviousHostAdapter->Next != HostAdapter)
+ PreviousHostAdapter = PreviousHostAdapter->Next;
+ if (PreviousHostAdapter != NULL)
+ PreviousHostAdapter->Next = HostAdapter->Next;
+ }
+ HostAdapter->Next = NULL;
+}
+
+
+/*
+ BusLogic_InitializeCCBs initializes a group of Command Control Blocks (CCBs)
+ for Host Adapter from the BlockSize bytes located at BlockPointer. The newly
+ created CCBs are added to Host Adapter's free list.
+*/
+
+static void BusLogic_InitializeCCBs(BusLogic_HostAdapter_T *HostAdapter,
+ void *BlockPointer, int BlockSize)
+{
+ BusLogic_CCB_T *CCB = (BusLogic_CCB_T *) BlockPointer;
+ memset(BlockPointer, 0, BlockSize);
+ CCB->AllocationGroupHead = true;
+ while ((BlockSize -= sizeof(BusLogic_CCB_T)) >= 0)
+ {
+ CCB->Status = BusLogic_CCB_Free;
+ CCB->HostAdapter = HostAdapter;
+ if (BusLogic_FlashPointHostAdapterP(HostAdapter))
+ {
+ CCB->CallbackFunction = BusLogic_QueueCompletedCCB;
+ CCB->BaseAddress = HostAdapter->FlashPointInfo.BaseAddress;
+ }
+ CCB->Next = HostAdapter->Free_CCBs;
+ CCB->NextAll = HostAdapter->All_CCBs;
+ HostAdapter->Free_CCBs = CCB;
+ HostAdapter->All_CCBs = CCB;
+ HostAdapter->AllocatedCCBs++;
+ CCB++;
+ }
+}
+
+
+/*
+ BusLogic_CreateInitialCCBs allocates the initial CCBs for Host Adapter.
+*/
+
+static boolean BusLogic_CreateInitialCCBs(BusLogic_HostAdapter_T *HostAdapter)
+{
+ int BlockSize = BusLogic_CCB_AllocationGroupSize * sizeof(BusLogic_CCB_T);
+ while (HostAdapter->AllocatedCCBs < HostAdapter->InitialCCBs)
+ {
+ void *BlockPointer = kmalloc(BlockSize,
+ (HostAdapter->BounceBuffersRequired
+ ? GFP_ATOMIC | GFP_DMA
+ : GFP_ATOMIC));
+ if (BlockPointer == NULL)
+ {
+ BusLogic_Error("UNABLE TO ALLOCATE CCB GROUP - DETACHING\n",
+ HostAdapter);
+ return false;
+ }
+ BusLogic_InitializeCCBs(HostAdapter, BlockPointer, BlockSize);
+ }
+ return true;
+}
+
+
+/*
+ BusLogic_DestroyCCBs deallocates the CCBs for Host Adapter.
+*/
+
+static void BusLogic_DestroyCCBs(BusLogic_HostAdapter_T *HostAdapter)
+{
+ BusLogic_CCB_T *NextCCB = HostAdapter->All_CCBs, *CCB;
+ HostAdapter->All_CCBs = NULL;
+ HostAdapter->Free_CCBs = NULL;
+ while ((CCB = NextCCB) != NULL)
+ {
+ NextCCB = CCB->NextAll;
+ if (CCB->AllocationGroupHead)
+ kfree(CCB);
+ }
+}
+
+
+/*
+ BusLogic_CreateAdditionalCCBs allocates Additional CCBs for Host Adapter. If
+ allocation fails and there are no remaining CCBs available, the Driver Queue
+ Depth is decreased to a known safe value to avoid potential deadlocks when
+ multiple host adapters share the same IRQ Channel.
+*/
+
+static void BusLogic_CreateAdditionalCCBs(BusLogic_HostAdapter_T *HostAdapter,
+ int AdditionalCCBs,
+ boolean SuccessMessageP)
+{
+ int BlockSize = BusLogic_CCB_AllocationGroupSize * sizeof(BusLogic_CCB_T);
+ int PreviouslyAllocated = HostAdapter->AllocatedCCBs;
+ if (AdditionalCCBs <= 0) return;
+ while (HostAdapter->AllocatedCCBs - PreviouslyAllocated < AdditionalCCBs)
+ {
+ void *BlockPointer = kmalloc(BlockSize,
+ (HostAdapter->BounceBuffersRequired
+ ? GFP_ATOMIC | GFP_DMA
+ : GFP_ATOMIC));
+ if (BlockPointer == NULL) break;
+ BusLogic_InitializeCCBs(HostAdapter, BlockPointer, BlockSize);
+ }
+ if (HostAdapter->AllocatedCCBs > PreviouslyAllocated)
+ {
+ if (SuccessMessageP)
+ BusLogic_Notice("Allocated %d additional CCBs (total now %d)\n",
+ HostAdapter,
+ HostAdapter->AllocatedCCBs - PreviouslyAllocated,
+ HostAdapter->AllocatedCCBs);
+ return;
+ }
+ BusLogic_Notice("Failed to allocate additional CCBs\n", HostAdapter);
+ if (HostAdapter->DriverQueueDepth >
+ HostAdapter->AllocatedCCBs - HostAdapter->TargetDeviceCount)
+ {
+ HostAdapter->DriverQueueDepth =
+ HostAdapter->AllocatedCCBs - HostAdapter->TargetDeviceCount;
+ HostAdapter->SCSI_Host->can_queue = HostAdapter->DriverQueueDepth;
+ }
+}
+
+
+/*
+ BusLogic_AllocateCCB allocates a CCB from Host Adapter's free list,
+ allocating more memory from the Kernel if necessary. The Host Adapter's
+ Lock should already have been acquired by the caller.
+*/
+
+static BusLogic_CCB_T *BusLogic_AllocateCCB(BusLogic_HostAdapter_T
+ *HostAdapter)
+{
+ static unsigned long SerialNumber = 0;
+ BusLogic_CCB_T *CCB;
+ CCB = HostAdapter->Free_CCBs;
+ if (CCB != NULL)
+ {
+ CCB->SerialNumber = ++SerialNumber;
+ HostAdapter->Free_CCBs = CCB->Next;
+ CCB->Next = NULL;
+ if (HostAdapter->Free_CCBs == NULL)
+ BusLogic_CreateAdditionalCCBs(HostAdapter,
+ HostAdapter->IncrementalCCBs,
+ true);
+ return CCB;
+ }
+ BusLogic_CreateAdditionalCCBs(HostAdapter,
+ HostAdapter->IncrementalCCBs,
+ true);
+ CCB = HostAdapter->Free_CCBs;
+ if (CCB == NULL) return NULL;
+ CCB->SerialNumber = ++SerialNumber;
+ HostAdapter->Free_CCBs = CCB->Next;
+ CCB->Next = NULL;
+ return CCB;
+}
+
+
+/*
+ BusLogic_DeallocateCCB deallocates a CCB, returning it to the Host Adapter's
+ free list. The Host Adapter's Lock should already have been acquired by the
+ caller.
+*/
+
+static void BusLogic_DeallocateCCB(BusLogic_CCB_T *CCB)
+{
+ BusLogic_HostAdapter_T *HostAdapter = CCB->HostAdapter;
+ CCB->Command = NULL;
+ CCB->Status = BusLogic_CCB_Free;
+ CCB->Next = HostAdapter->Free_CCBs;
+ HostAdapter->Free_CCBs = CCB;
+}
+
+
+/*
+ BusLogic_Command sends the command OperationCode to HostAdapter, optionally
+ providing ParameterLength bytes of ParameterData and receiving at most
+ ReplyLength bytes of ReplyData; any excess reply data is received but
+ discarded.
+
+ On success, this function returns the number of reply bytes read from
+ the Host Adapter (including any discarded data); on failure, it returns
+ -1 if the command was invalid, or -2 if a timeout occurred.
+
+ BusLogic_Command is called exclusively during host adapter detection and
+ initialization, so performance and latency are not critical, and exclusive
+ access to the Host Adapter hardware is assumed. Once the host adapter and
+ driver are initialized, the only Host Adapter command that is issued is the
+ single byte Execute Mailbox Command operation code, which does not require
+ waiting for the Host Adapter Ready bit to be set in the Status Register.
+*/
+
+static int BusLogic_Command(BusLogic_HostAdapter_T *HostAdapter,
+ BusLogic_OperationCode_T OperationCode,
+ void *ParameterData,
+ int ParameterLength,
+ void *ReplyData,
+ int ReplyLength)
+{
+ unsigned char *ParameterPointer = (unsigned char *) ParameterData;
+ unsigned char *ReplyPointer = (unsigned char *) ReplyData;
+ BusLogic_StatusRegister_T StatusRegister;
+ BusLogic_InterruptRegister_T InterruptRegister;
+ ProcessorFlags_T ProcessorFlags = 0;
+ int ReplyBytes = 0, Result;
+ long TimeoutCounter;
+ /*
+ Clear out the Reply Data if provided.
+ */
+ if (ReplyLength > 0)
+ memset(ReplyData, 0, ReplyLength);
+ /*
+ If the IRQ Channel has not yet been acquired, then interrupts must be
+ disabled while issuing host adapter commands since a Command Complete
+ interrupt could occur if the IRQ Channel was previously enabled by another
+ BusLogic Host Adapter or another driver sharing the same IRQ Channel.
+ */
+ if (!HostAdapter->IRQ_ChannelAcquired)
+ {
+ save_flags(ProcessorFlags);
+ cli();
+ }
+ /*
+ Wait for the Host Adapter Ready bit to be set and the Command/Parameter
+ Register Busy bit to be reset in the Status Register.
+ */
+ TimeoutCounter = 10000;
+ while (--TimeoutCounter >= 0)
+ {
+ StatusRegister.All = BusLogic_ReadStatusRegister(HostAdapter);
+ if (StatusRegister.Bits.HostAdapterReady &&
+ !StatusRegister.Bits.CommandParameterRegisterBusy)
+ break;
+ udelay(100);
+ }
+ if (TimeoutCounter < 0)
+ {
+ BusLogic_CommandFailureReason = "Timeout waiting for Host Adapter Ready";
+ Result = -2;
+ goto Done;
+ }
+ /*
+ Write the OperationCode to the Command/Parameter Register.
+ */
+ HostAdapter->HostAdapterCommandCompleted = false;
+ BusLogic_WriteCommandParameterRegister(HostAdapter, OperationCode);
+ /*
+ Write any additional Parameter Bytes.
+ */
+ TimeoutCounter = 10000;
+ while (ParameterLength > 0 && --TimeoutCounter >= 0)
+ {
+ /*
+ Wait 100 microseconds to give the Host Adapter enough time to determine
+ whether the last value written to the Command/Parameter Register was
+ valid or not. If the Command Complete bit is set in the Interrupt
+ Register, then the Command Invalid bit in the Status Register will be
+ reset if the Operation Code or Parameter was valid and the command
+ has completed, or set if the Operation Code or Parameter was invalid.
+ If the Data In Register Ready bit is set in the Status Register, then
+ the Operation Code was valid, and data is waiting to be read back
+ from the Host Adapter. Otherwise, wait for the Command/Parameter
+ Register Busy bit in the Status Register to be reset.
+ */
+ udelay(100);
+ InterruptRegister.All = BusLogic_ReadInterruptRegister(HostAdapter);
+ StatusRegister.All = BusLogic_ReadStatusRegister(HostAdapter);
+ if (InterruptRegister.Bits.CommandComplete) break;
+ if (HostAdapter->HostAdapterCommandCompleted) break;
+ if (StatusRegister.Bits.DataInRegisterReady) break;
+ if (StatusRegister.Bits.CommandParameterRegisterBusy) continue;
+ BusLogic_WriteCommandParameterRegister(HostAdapter, *ParameterPointer++);
+ ParameterLength--;
+ }
+ if (TimeoutCounter < 0)
+ {
+ BusLogic_CommandFailureReason =
+ "Timeout waiting for Parameter Acceptance";
+ Result = -2;
+ goto Done;
+ }
+ /*
+ The Modify I/O Address command does not cause a Command Complete Interrupt.
+ */
+ if (OperationCode == BusLogic_ModifyIOAddress)
+ {
+ StatusRegister.All = BusLogic_ReadStatusRegister(HostAdapter);
+ if (StatusRegister.Bits.CommandInvalid)
+ {
+ BusLogic_CommandFailureReason = "Modify I/O Address Invalid";
+ Result = -1;
+ goto Done;
+ }
+ if (BusLogic_GlobalOptions.TraceConfiguration)
+ BusLogic_Notice("BusLogic_Command(%02X) Status = %02X: "
+ "(Modify I/O Address)\n", HostAdapter,
+ OperationCode, StatusRegister.All);
+ Result = 0;
+ goto Done;
+ }
+ /*
+ Select an appropriate timeout value for awaiting command completion.
+ */
+ switch (OperationCode)
+ {
+ case BusLogic_InquireInstalledDevicesID0to7:
+ case BusLogic_InquireInstalledDevicesID8to15:
+ case BusLogic_InquireTargetDevices:
+ /* Approximately 60 seconds. */
+ TimeoutCounter = 60*10000;
+ break;
+ default:
+ /* Approximately 1 second. */
+ TimeoutCounter = 10000;
+ break;
+ }
+ /*
+ Receive any Reply Bytes, waiting for either the Command Complete bit to
+ be set in the Interrupt Register, or for the Interrupt Handler to set the
+ Host Adapter Command Completed bit in the Host Adapter structure.
+ */
+ while (--TimeoutCounter >= 0)
+ {
+ InterruptRegister.All = BusLogic_ReadInterruptRegister(HostAdapter);
+ StatusRegister.All = BusLogic_ReadStatusRegister(HostAdapter);
+ if (InterruptRegister.Bits.CommandComplete) break;
+ if (HostAdapter->HostAdapterCommandCompleted) break;
+ if (StatusRegister.Bits.DataInRegisterReady)
+ {
+ if (++ReplyBytes <= ReplyLength)
+ *ReplyPointer++ = BusLogic_ReadDataInRegister(HostAdapter);
+ else BusLogic_ReadDataInRegister(HostAdapter);
+ }
+ if (OperationCode == BusLogic_FetchHostAdapterLocalRAM &&
+ StatusRegister.Bits.HostAdapterReady) break;
+ udelay(100);
+ }
+ if (TimeoutCounter < 0)
+ {
+ BusLogic_CommandFailureReason = "Timeout waiting for Command Complete";
+ Result = -2;
+ goto Done;
+ }
+ /*
+ Clear any pending Command Complete Interrupt.
+ */
+ BusLogic_InterruptReset(HostAdapter);
+ /*
+ Provide tracing information if requested.
+ */
+ if (BusLogic_GlobalOptions.TraceConfiguration)
+ {
+ int i;
+ BusLogic_Notice("BusLogic_Command(%02X) Status = %02X: %2d ==> %2d:",
+ HostAdapter, OperationCode,
+ StatusRegister.All, ReplyLength, ReplyBytes);
+ if (ReplyLength > ReplyBytes) ReplyLength = ReplyBytes;
+ for (i = 0; i < ReplyLength; i++)
+ BusLogic_Notice(" %02X", HostAdapter,
+ ((unsigned char *) ReplyData)[i]);
+ BusLogic_Notice("\n", HostAdapter);
+ }
+ /*
+ Process Command Invalid conditions.
+ */
+ if (StatusRegister.Bits.CommandInvalid)
+ {
+ /*
+ Some early BusLogic Host Adapters may not recover properly from
+ a Command Invalid condition, so if this appears to be the case,
+ a Soft Reset is issued to the Host Adapter. Potentially invalid
+ commands are never attempted after Mailbox Initialization is
+ performed, so there should be no Host Adapter state lost by a
+ Soft Reset in response to a Command Invalid condition.
+ */
+ udelay(1000);
+ StatusRegister.All = BusLogic_ReadStatusRegister(HostAdapter);
+ if (StatusRegister.Bits.CommandInvalid ||
+ StatusRegister.Bits.Reserved ||
+ StatusRegister.Bits.DataInRegisterReady ||
+ StatusRegister.Bits.CommandParameterRegisterBusy ||
+ !StatusRegister.Bits.HostAdapterReady ||
+ !StatusRegister.Bits.InitializationRequired ||
+ StatusRegister.Bits.DiagnosticActive ||
+ StatusRegister.Bits.DiagnosticFailure)
+ {
+ BusLogic_SoftReset(HostAdapter);
+ udelay(1000);
+ }
+ BusLogic_CommandFailureReason = "Command Invalid";
+ Result = -1;
+ goto Done;
+ }
+ /*
+ Handle Excess Parameters Supplied conditions.
+ */
+ if (ParameterLength > 0)
+ {
+ BusLogic_CommandFailureReason = "Excess Parameters Supplied";
+ Result = -1;
+ goto Done;
+ }
+ /*
+ Indicate the command completed successfully.
+ */
+ BusLogic_CommandFailureReason = NULL;
+ Result = ReplyBytes;
+ /*
+ Restore the interrupt status if necessary and return.
+ */
+Done:
+ if (!HostAdapter->IRQ_ChannelAcquired)
+ restore_flags(ProcessorFlags);
+ return Result;
+}
+
+
+/*
+ BusLogic_AppendProbeAddressISA appends a single ISA I/O Address to the list
+ of I/O Address and Bus Probe Information to be checked for potential BusLogic
+ Host Adapters.
+*/
+
+static void BusLogic_AppendProbeAddressISA(BusLogic_IO_Address_T IO_Address)
+{
+ BusLogic_ProbeInfo_T *ProbeInfo;
+ if (BusLogic_ProbeInfoCount >= BusLogic_MaxHostAdapters) return;
+ ProbeInfo = &BusLogic_ProbeInfoList[BusLogic_ProbeInfoCount++];
+ ProbeInfo->HostAdapterType = BusLogic_MultiMaster;
+ ProbeInfo->HostAdapterBusType = BusLogic_ISA_Bus;
+ ProbeInfo->IO_Address = IO_Address;
+}
+
+
+/*
+ BusLogic_InitializeProbeInfoListISA initializes the list of I/O Address and
+ Bus Probe Information to be checked for potential BusLogic SCSI Host Adapters
+ only from the list of standard BusLogic MultiMaster ISA I/O Addresses.
+*/
+
+static void BusLogic_InitializeProbeInfoListISA(BusLogic_HostAdapter_T
+ *PrototypeHostAdapter)
+{
+ /*
+ If BusLogic Driver Options specifications requested that ISA Bus Probes
+ be inhibited, do not proceed further.
+ */
+ if (BusLogic_ProbeOptions.NoProbeISA) return;
+ /*
+ Append the list of standard BusLogic MultiMaster ISA I/O Addresses.
+ */
+ if (BusLogic_ProbeOptions.LimitedProbeISA
+ ? BusLogic_ProbeOptions.Probe330
+ : check_region(0x330, BusLogic_MultiMasterAddressCount) == 0)
+ BusLogic_AppendProbeAddressISA(0x330);
+ if (BusLogic_ProbeOptions.LimitedProbeISA
+ ? BusLogic_ProbeOptions.Probe334
+ : check_region(0x334, BusLogic_MultiMasterAddressCount) == 0)
+ BusLogic_AppendProbeAddressISA(0x334);
+ if (BusLogic_ProbeOptions.LimitedProbeISA
+ ? BusLogic_ProbeOptions.Probe230
+ : check_region(0x230, BusLogic_MultiMasterAddressCount) == 0)
+ BusLogic_AppendProbeAddressISA(0x230);
+ if (BusLogic_ProbeOptions.LimitedProbeISA
+ ? BusLogic_ProbeOptions.Probe234
+ : check_region(0x234, BusLogic_MultiMasterAddressCount) == 0)
+ BusLogic_AppendProbeAddressISA(0x234);
+ if (BusLogic_ProbeOptions.LimitedProbeISA
+ ? BusLogic_ProbeOptions.Probe130
+ : check_region(0x130, BusLogic_MultiMasterAddressCount) == 0)
+ BusLogic_AppendProbeAddressISA(0x130);
+ if (BusLogic_ProbeOptions.LimitedProbeISA
+ ? BusLogic_ProbeOptions.Probe134
+ : check_region(0x134, BusLogic_MultiMasterAddressCount) == 0)
+ BusLogic_AppendProbeAddressISA(0x134);
+}
+
+
+#ifdef CONFIG_PCI
+
+
+/*
+ BusLogic_SortProbeInfo sorts a section of BusLogic_ProbeInfoList in order
+ of increasing PCI Bus and Device Number.
+*/
+
+static void BusLogic_SortProbeInfo(BusLogic_ProbeInfo_T *ProbeInfoList,
+ int ProbeInfoCount)
+{
+ int LastInterchange = ProbeInfoCount-1, Bound, j;
+ while (LastInterchange > 0)
+ {
+ Bound = LastInterchange;
+ LastInterchange = 0;
+ for (j = 0; j < Bound; j++)
+ {
+ BusLogic_ProbeInfo_T *ProbeInfo1 = &ProbeInfoList[j];
+ BusLogic_ProbeInfo_T *ProbeInfo2 = &ProbeInfoList[j+1];
+ if (ProbeInfo1->Bus > ProbeInfo2->Bus ||
+ (ProbeInfo1->Bus == ProbeInfo2->Bus &&
+ (ProbeInfo1->Device > ProbeInfo2->Device)))
+ {
+ BusLogic_ProbeInfo_T TempProbeInfo;
+ memcpy(&TempProbeInfo, ProbeInfo1, sizeof(BusLogic_ProbeInfo_T));
+ memcpy(ProbeInfo1, ProbeInfo2, sizeof(BusLogic_ProbeInfo_T));
+ memcpy(ProbeInfo2, &TempProbeInfo, sizeof(BusLogic_ProbeInfo_T));
+ LastInterchange = j;
+ }
+ }
+ }
+}
+
+
+/*
+ BusLogic_InitializeMultiMasterProbeInfo initializes the list of I/O Address
+ and Bus Probe Information to be checked for potential BusLogic MultiMaster
+ SCSI Host Adapters by interrogating the PCI Configuration Space on PCI
+ machines as well as from the list of standard BusLogic MultiMaster ISA
+ I/O Addresses. It returns the number of PCI MultiMaster Host Adapters found.
+*/
+
+static int BusLogic_InitializeMultiMasterProbeInfo(BusLogic_HostAdapter_T
+ *PrototypeHostAdapter)
+{
+ BusLogic_ProbeInfo_T *PrimaryProbeInfo =
+ &BusLogic_ProbeInfoList[BusLogic_ProbeInfoCount];
+ int NonPrimaryPCIMultiMasterIndex = BusLogic_ProbeInfoCount + 1;
+ int NonPrimaryPCIMultiMasterCount = 0, PCIMultiMasterCount = 0;
+ boolean ForceBusDeviceScanningOrder = false;
+ boolean ForceBusDeviceScanningOrderChecked = false;
+ boolean StandardAddressSeen[6];
+ unsigned char Bus, DeviceFunction;
+ unsigned int BaseAddress0, BaseAddress1;
+ unsigned char IRQ_Channel;
+ BusLogic_IO_Address_T IO_Address;
+ BusLogic_PCI_Address_T PCI_Address;
+ unsigned short Index = 0;
+ int i;
+ if (BusLogic_ProbeInfoCount >= BusLogic_MaxHostAdapters) return 0;
+ BusLogic_ProbeInfoCount++;
+ for (i = 0; i < 6; i++)
+ StandardAddressSeen[i] = false;
+ /*
+ Iterate over the MultiMaster PCI Host Adapters. For each enumerated host
+ adapter, determine whether its ISA Compatible I/O Port is enabled and if
+ so, whether it is assigned the Primary I/O Address. A host adapter that is
+ assigned the Primary I/O Address will always be the preferred boot device.
+ The MultiMaster BIOS will first recognize a host adapter at the Primary I/O
+ Address, then any other PCI host adapters, and finally any host adapters
+ located at the remaining standard ISA I/O Addresses. When a PCI host
+ adapter is found with its ISA Compatible I/O Port enabled, a command is
+ issued to disable the ISA Compatible I/O Port, and it is noted that the
+ particular standard ISA I/O Address need not be probed.
+ */
+ PrimaryProbeInfo->IO_Address = 0;
+ while (pcibios_find_device(PCI_VENDOR_ID_BUSLOGIC,
+ PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER,
+ Index++, &Bus, &DeviceFunction) == 0)
+ if (pcibios_read_config_dword(Bus, DeviceFunction,
+ PCI_BASE_ADDRESS_0, &BaseAddress0) == 0 &&
+ pcibios_read_config_dword(Bus, DeviceFunction,
+ PCI_BASE_ADDRESS_1, &BaseAddress1) == 0 &&
+ pcibios_read_config_byte(Bus, DeviceFunction,
+ PCI_INTERRUPT_LINE, &IRQ_Channel) == 0)
+ {
+ BusLogic_HostAdapter_T *HostAdapter = PrototypeHostAdapter;
+ BusLogic_PCIHostAdapterInformation_T PCIHostAdapterInformation;
+ BusLogic_ModifyIOAddressRequest_T ModifyIOAddressRequest;
+ unsigned char Device = DeviceFunction >> 3;
+ IO_Address = BaseAddress0 & PCI_BASE_ADDRESS_IO_MASK;
+ PCI_Address = BaseAddress1 & PCI_BASE_ADDRESS_MEM_MASK;
+ if ((BaseAddress0 & PCI_BASE_ADDRESS_SPACE)
+ != PCI_BASE_ADDRESS_SPACE_IO)
+ {
+ BusLogic_Error("BusLogic: Base Address0 0x%X not I/O for "
+ "MultiMaster Host Adapter\n", NULL, BaseAddress0);
+ BusLogic_Error("at PCI Bus %d Device %d I/O Address 0x%X\n",
+ NULL, Bus, Device, IO_Address);
+ continue;
+ }
+ if ((BaseAddress1 & PCI_BASE_ADDRESS_SPACE)
+ != PCI_BASE_ADDRESS_SPACE_MEMORY)
+ {
+ BusLogic_Error("BusLogic: Base Address1 0x%X not Memory for "
+ "MultiMaster Host Adapter\n", NULL, BaseAddress1);
+ BusLogic_Error("at PCI Bus %d Device %d PCI Address 0x%X\n",
+ NULL, Bus, Device, PCI_Address);
+ continue;
+ }
+ if (IRQ_Channel == 0 || IRQ_Channel >= NR_IRQS)
+ {
+ BusLogic_Error("BusLogic: IRQ Channel %d illegal for "
+ "MultiMaster Host Adapter\n", NULL, IRQ_Channel);
+ BusLogic_Error("at PCI Bus %d Device %d I/O Address 0x%X\n",
+ NULL, Bus, Device, IO_Address);
+ continue;
+ }
+ if (BusLogic_GlobalOptions.TraceProbe)
+ {
+ BusLogic_Notice("BusLogic: PCI MultiMaster Host Adapter "
+ "detected at\n", NULL);
+ BusLogic_Notice("BusLogic: PCI Bus %d Device %d I/O Address "
+ "0x%X PCI Address 0x%X\n", NULL,
+ Bus, Device, IO_Address, PCI_Address);
+ }
+ /*
+ Issue the Inquire PCI Host Adapter Information command to determine
+ the ISA Compatible I/O Port. If the ISA Compatible I/O Port is
+ known and enabled, note that the particular Standard ISA I/O
+ Address should not be probed.
+ */
+ HostAdapter->IO_Address = IO_Address;
+ BusLogic_InterruptReset(HostAdapter);
+ if (BusLogic_Command(HostAdapter,
+ BusLogic_InquirePCIHostAdapterInformation,
+ NULL, 0, &PCIHostAdapterInformation,
+ sizeof(PCIHostAdapterInformation))
+ == sizeof(PCIHostAdapterInformation))
+ {
+ if (PCIHostAdapterInformation.ISACompatibleIOPort < 6)
+ StandardAddressSeen[PCIHostAdapterInformation
+ .ISACompatibleIOPort] = true;
+ }
+ else PCIHostAdapterInformation.ISACompatibleIOPort =
+ BusLogic_IO_Disable;
+ /*
+ Issue the Modify I/O Address command to disable the ISA Compatible
+ I/O Port.
+ */
+ ModifyIOAddressRequest = BusLogic_IO_Disable;
+ BusLogic_Command(HostAdapter, BusLogic_ModifyIOAddress,
+ &ModifyIOAddressRequest,
+ sizeof(ModifyIOAddressRequest), NULL, 0);
+ /*
+ For the first MultiMaster Host Adapter enumerated, issue the Fetch
+ Host Adapter Local RAM command to read byte 45 of the AutoSCSI area,
+ for the setting of the "Use Bus And Device # For PCI Scanning Seq."
+ option. Issue the Inquire Board ID command since this option is
+ only valid for the BT-948/958/958D.
+ */
+ if (!ForceBusDeviceScanningOrderChecked)
+ {
+ BusLogic_FetchHostAdapterLocalRAMRequest_T
+ FetchHostAdapterLocalRAMRequest;
+ BusLogic_AutoSCSIByte45_T AutoSCSIByte45;
+ BusLogic_BoardID_T BoardID;
+ FetchHostAdapterLocalRAMRequest.ByteOffset =
+ BusLogic_AutoSCSI_BaseOffset + 45;
+ FetchHostAdapterLocalRAMRequest.ByteCount =
+ sizeof(AutoSCSIByte45);
+ BusLogic_Command(HostAdapter,
+ BusLogic_FetchHostAdapterLocalRAM,
+ &FetchHostAdapterLocalRAMRequest,
+ sizeof(FetchHostAdapterLocalRAMRequest),
+ &AutoSCSIByte45, sizeof(AutoSCSIByte45));
+ BusLogic_Command(HostAdapter, BusLogic_InquireBoardID,
+ NULL, 0, &BoardID, sizeof(BoardID));
+ if (BoardID.FirmwareVersion1stDigit == '5')
+ ForceBusDeviceScanningOrder =
+ AutoSCSIByte45.ForceBusDeviceScanningOrder;
+ ForceBusDeviceScanningOrderChecked = true;
+ }
+ /*
+ Determine whether this MultiMaster Host Adapter has its ISA
+ Compatible I/O Port enabled and is assigned the Primary I/O Address.
+ If it does, then it is the Primary MultiMaster Host Adapter and must
+ be recognized first. If it does not, then it is added to the list
+ for probing after any Primary MultiMaster Host Adapter is probed.
+ */
+ if (PCIHostAdapterInformation.ISACompatibleIOPort == BusLogic_IO_330)
+ {
+ PrimaryProbeInfo->HostAdapterType = BusLogic_MultiMaster;
+ PrimaryProbeInfo->HostAdapterBusType = BusLogic_PCI_Bus;
+ PrimaryProbeInfo->IO_Address = IO_Address;
+ PrimaryProbeInfo->PCI_Address = PCI_Address;
+ PrimaryProbeInfo->Bus = Bus;
+ PrimaryProbeInfo->Device = Device;
+ PrimaryProbeInfo->IRQ_Channel = IRQ_Channel;
+ PCIMultiMasterCount++;
+ }
+ else if (BusLogic_ProbeInfoCount < BusLogic_MaxHostAdapters)
+ {
+ BusLogic_ProbeInfo_T *ProbeInfo =
+ &BusLogic_ProbeInfoList[BusLogic_ProbeInfoCount++];
+ ProbeInfo->HostAdapterType = BusLogic_MultiMaster;
+ ProbeInfo->HostAdapterBusType = BusLogic_PCI_Bus;
+ ProbeInfo->IO_Address = IO_Address;
+ ProbeInfo->PCI_Address = PCI_Address;
+ ProbeInfo->Bus = Bus;
+ ProbeInfo->Device = Device;
+ ProbeInfo->IRQ_Channel = IRQ_Channel;
+ NonPrimaryPCIMultiMasterCount++;
+ PCIMultiMasterCount++;
+ }
+ else BusLogic_Warning("BusLogic: Too many Host Adapters "
+ "detected\n", NULL);
+ }
+ /*
+ If the AutoSCSI "Use Bus And Device # For PCI Scanning Seq." option is ON
+ for the first enumerated MultiMaster Host Adapter, and if that host adapter
+ is a BT-948/958/958D, then the MultiMaster BIOS will recognize MultiMaster
+ Host Adapters in the order of increasing PCI Bus and Device Number. In
+ that case, sort the probe information into the same order the BIOS uses.
+ If this option is OFF, then the MultiMaster BIOS will recognize MultiMaster
+ Host Adapters in the order they are enumerated by the PCI BIOS, and hence
+ no sorting is necessary.
+ */
+ if (ForceBusDeviceScanningOrder)
+ BusLogic_SortProbeInfo(&BusLogic_ProbeInfoList[
+ NonPrimaryPCIMultiMasterIndex],
+ NonPrimaryPCIMultiMasterCount);
+ /*
+ If no PCI MultiMaster Host Adapter is assigned the Primary I/O Address,
+ then the Primary I/O Address must be probed explicitly before any PCI
+ host adapters are probed.
+ */
+ if (!BusLogic_ProbeOptions.NoProbeISA)
+ if (PrimaryProbeInfo->IO_Address == 0 &&
+ (BusLogic_ProbeOptions.LimitedProbeISA
+ ? BusLogic_ProbeOptions.Probe330
+ : check_region(0x330, BusLogic_MultiMasterAddressCount) == 0))
+ {
+ PrimaryProbeInfo->HostAdapterType = BusLogic_MultiMaster;
+ PrimaryProbeInfo->HostAdapterBusType = BusLogic_ISA_Bus;
+ PrimaryProbeInfo->IO_Address = 0x330;
+ }
+ /*
+ Append the list of standard BusLogic MultiMaster ISA I/O Addresses,
+ omitting the Primary I/O Address which has already been handled.
+ */
+ if (!BusLogic_ProbeOptions.NoProbeISA)
+ {
+ if (!StandardAddressSeen[1] &&
+ (BusLogic_ProbeOptions.LimitedProbeISA
+ ? BusLogic_ProbeOptions.Probe334
+ : check_region(0x334, BusLogic_MultiMasterAddressCount) == 0))
+ BusLogic_AppendProbeAddressISA(0x334);
+ if (!StandardAddressSeen[2] &&
+ (BusLogic_ProbeOptions.LimitedProbeISA
+ ? BusLogic_ProbeOptions.Probe230
+ : check_region(0x230, BusLogic_MultiMasterAddressCount) == 0))
+ BusLogic_AppendProbeAddressISA(0x230);
+ if (!StandardAddressSeen[3] &&
+ (BusLogic_ProbeOptions.LimitedProbeISA
+ ? BusLogic_ProbeOptions.Probe234
+ : check_region(0x234, BusLogic_MultiMasterAddressCount) == 0))
+ BusLogic_AppendProbeAddressISA(0x234);
+ if (!StandardAddressSeen[4] &&
+ (BusLogic_ProbeOptions.LimitedProbeISA
+ ? BusLogic_ProbeOptions.Probe130
+ : check_region(0x130, BusLogic_MultiMasterAddressCount) == 0))
+ BusLogic_AppendProbeAddressISA(0x130);
+ if (!StandardAddressSeen[5] &&
+ (BusLogic_ProbeOptions.LimitedProbeISA
+ ? BusLogic_ProbeOptions.Probe134
+ : check_region(0x134, BusLogic_MultiMasterAddressCount) == 0))
+ BusLogic_AppendProbeAddressISA(0x134);
+ }
+ /*
+ Iterate over the older non-compliant MultiMaster PCI Host Adapters,
+ noting the PCI bus location and assigned IRQ Channel.
+ */
+ Index = 0;
+ while (pcibios_find_device(PCI_VENDOR_ID_BUSLOGIC,
+ PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC,
+ Index++, &Bus, &DeviceFunction) == 0)
+ if (pcibios_read_config_dword(Bus, DeviceFunction,
+ PCI_BASE_ADDRESS_0, &BaseAddress0) == 0 &&
+ pcibios_read_config_byte(Bus, DeviceFunction,
+ PCI_INTERRUPT_LINE, &IRQ_Channel) == 0)
+ {
+ unsigned char Device = DeviceFunction >> 3;
+ IO_Address = BaseAddress0 & PCI_BASE_ADDRESS_IO_MASK;
+ if (IO_Address == 0 || IRQ_Channel == 0 || IRQ_Channel >= NR_IRQS)
+ continue;
+ for (i = 0; i < BusLogic_ProbeInfoCount; i++)
+ {
+ BusLogic_ProbeInfo_T *ProbeInfo = &BusLogic_ProbeInfoList[i];
+ if (ProbeInfo->IO_Address == IO_Address &&
+ ProbeInfo->HostAdapterType == BusLogic_MultiMaster)
+ {
+ ProbeInfo->HostAdapterBusType = BusLogic_PCI_Bus;
+ ProbeInfo->PCI_Address = 0;
+ ProbeInfo->Bus = Bus;
+ ProbeInfo->Device = Device;
+ ProbeInfo->IRQ_Channel = IRQ_Channel;
+ break;
+ }
+ }
+ }
+ return PCIMultiMasterCount;
+}
+
+
+/*
+ BusLogic_InitializeFlashPointProbeInfo initializes the list of I/O Address
+ and Bus Probe Information to be checked for potential BusLogic FlashPoint
+ Host Adapters by interrogating the PCI Configuration Space. It returns the
+ number of FlashPoint Host Adapters found.
+*/
+
+static int BusLogic_InitializeFlashPointProbeInfo(BusLogic_HostAdapter_T
+ *PrototypeHostAdapter)
+{
+ int FlashPointIndex = BusLogic_ProbeInfoCount, FlashPointCount = 0;
+ unsigned char Bus, DeviceFunction;
+ unsigned int BaseAddress0, BaseAddress1;
+ unsigned char IRQ_Channel;
+ BusLogic_IO_Address_T IO_Address;
+ BusLogic_PCI_Address_T PCI_Address;
+ unsigned short Index = 0;
+ /*
+ Interrogate PCI Configuration Space for any FlashPoint Host Adapters.
+ */
+ while (pcibios_find_device(PCI_VENDOR_ID_BUSLOGIC,
+ PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT,
+ Index++, &Bus, &DeviceFunction) == 0)
+ if (pcibios_read_config_dword(Bus, DeviceFunction,
+ PCI_BASE_ADDRESS_0, &BaseAddress0) == 0 &&
+ pcibios_read_config_dword(Bus, DeviceFunction,
+ PCI_BASE_ADDRESS_1, &BaseAddress1) == 0 &&
+ pcibios_read_config_byte(Bus, DeviceFunction,
+ PCI_INTERRUPT_LINE, &IRQ_Channel) == 0)
+ {
+ unsigned char Device = DeviceFunction >> 3;
+ IO_Address = BaseAddress0 & PCI_BASE_ADDRESS_IO_MASK;
+ PCI_Address = BaseAddress1 & PCI_BASE_ADDRESS_MEM_MASK;
+#ifndef CONFIG_SCSI_OMIT_FLASHPOINT
+ if ((BaseAddress0 & PCI_BASE_ADDRESS_SPACE)
+ != PCI_BASE_ADDRESS_SPACE_IO)
+ {
+ BusLogic_Error("BusLogic: Base Address0 0x%X not I/O for "
+ "FlashPoint Host Adapter\n", NULL, BaseAddress0);
+ BusLogic_Error("at PCI Bus %d Device %d I/O Address 0x%X\n",
+ NULL, Bus, Device, IO_Address);
+ continue;
+ }
+ if ((BaseAddress1 & PCI_BASE_ADDRESS_SPACE)
+ != PCI_BASE_ADDRESS_SPACE_MEMORY)
+ {
+ BusLogic_Error("BusLogic: Base Address1 0x%X not Memory for "
+ "FlashPoint Host Adapter\n", NULL, BaseAddress1);
+ BusLogic_Error("at PCI Bus %d Device %d PCI Address 0x%X\n",
+ NULL, Bus, Device, PCI_Address);
+ continue;
+ }
+ if (IRQ_Channel == 0 || IRQ_Channel >= NR_IRQS)
+ {
+ BusLogic_Error("BusLogic: IRQ Channel %d illegal for "
+ "FlashPoint Host Adapter\n", NULL, IRQ_Channel);
+ BusLogic_Error("at PCI Bus %d Device %d I/O Address 0x%X\n",
+ NULL, Bus, Device, IO_Address);
+ continue;
+ }
+ if (BusLogic_GlobalOptions.TraceProbe)
+ {
+ BusLogic_Notice("BusLogic: FlashPoint Host Adapter "
+ "detected at\n", NULL);
+ BusLogic_Notice("BusLogic: PCI Bus %d Device %d I/O Address "
+ "0x%X PCI Address 0x%X\n", NULL,
+ Bus, Device, IO_Address, PCI_Address);
+ }
+ if (BusLogic_ProbeInfoCount < BusLogic_MaxHostAdapters)
+ {
+ BusLogic_ProbeInfo_T *ProbeInfo =
+ &BusLogic_ProbeInfoList[BusLogic_ProbeInfoCount++];
+ ProbeInfo->HostAdapterType = BusLogic_FlashPoint;
+ ProbeInfo->HostAdapterBusType = BusLogic_PCI_Bus;
+ ProbeInfo->IO_Address = IO_Address;
+ ProbeInfo->PCI_Address = PCI_Address;
+ ProbeInfo->Bus = Bus;
+ ProbeInfo->Device = Device;
+ ProbeInfo->IRQ_Channel = IRQ_Channel;
+ FlashPointCount++;
+ }
+ else BusLogic_Warning("BusLogic: Too many Host Adapters "
+ "detected\n", NULL);
+#else
+ BusLogic_Error("BusLogic: FlashPoint Host Adapter detected at "
+ "PCI Bus %d Device %d\n", NULL, Bus, Device);
+ BusLogic_Error("BusLogic: I/O Address 0x%X PCI Address 0x%X, "
+ "but FlashPoint\n", NULL, IO_Address, PCI_Address);
+ BusLogic_Error("BusLogic: support was omitted in this kernel "
+ "configuration.\n", NULL);
+#endif
+ }
+ /*
+ The FlashPoint BIOS will scan for FlashPoint Host Adapters in the order of
+ increasing PCI Bus and Device Number, so sort the probe information into
+ the same order the BIOS uses.
+ */
+ BusLogic_SortProbeInfo(&BusLogic_ProbeInfoList[FlashPointIndex],
+ FlashPointCount);
+ return FlashPointCount;
+}
+
+
+/*
+ BusLogic_InitializeProbeInfoList initializes the list of I/O Address and Bus
+ Probe Information to be checked for potential BusLogic SCSI Host Adapters by
+ interrogating the PCI Configuration Space on PCI machines as well as from the
+ list of standard BusLogic MultiMaster ISA I/O Addresses. By default, if both
+ FlashPoint and PCI MultiMaster Host Adapters are present, this driver will
+ probe for FlashPoint Host Adapters first unless the BIOS primary disk is
+ controlled by the first PCI MultiMaster Host Adapter, in which case
+ MultiMaster Host Adapters will be probed first. The BusLogic Driver Options
+ specifications "MultiMasterFirst" and "FlashPointFirst" can be used to force
+ a particular probe order.
+*/
+
+static void BusLogic_InitializeProbeInfoList(BusLogic_HostAdapter_T
+ *PrototypeHostAdapter)
+{
+ /*
+ If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
+ Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
+ */
+ if (!BusLogic_ProbeOptions.NoProbePCI && pcibios_present())
+ {
+ if (BusLogic_ProbeOptions.MultiMasterFirst)
+ {
+ BusLogic_InitializeMultiMasterProbeInfo(PrototypeHostAdapter);
+ BusLogic_InitializeFlashPointProbeInfo(PrototypeHostAdapter);
+ }
+ else if (BusLogic_ProbeOptions.FlashPointFirst)
+ {
+ BusLogic_InitializeFlashPointProbeInfo(PrototypeHostAdapter);
+ BusLogic_InitializeMultiMasterProbeInfo(PrototypeHostAdapter);
+ }
+ else
+ {
+ int FlashPointCount =
+ BusLogic_InitializeFlashPointProbeInfo(PrototypeHostAdapter);
+ int PCIMultiMasterCount =
+ BusLogic_InitializeMultiMasterProbeInfo(PrototypeHostAdapter);
+ if (FlashPointCount > 0 && PCIMultiMasterCount > 0)
+ {
+ BusLogic_ProbeInfo_T *ProbeInfo =
+ &BusLogic_ProbeInfoList[FlashPointCount];
+ BusLogic_HostAdapter_T *HostAdapter = PrototypeHostAdapter;
+ BusLogic_FetchHostAdapterLocalRAMRequest_T
+ FetchHostAdapterLocalRAMRequest;
+ BusLogic_BIOSDriveMapByte_T Drive0MapByte;
+ while (ProbeInfo->HostAdapterBusType != BusLogic_PCI_Bus)
+ ProbeInfo++;
+ HostAdapter->IO_Address = ProbeInfo->IO_Address;
+ FetchHostAdapterLocalRAMRequest.ByteOffset =
+ BusLogic_BIOS_BaseOffset + BusLogic_BIOS_DriveMapOffset + 0;
+ FetchHostAdapterLocalRAMRequest.ByteCount =
+ sizeof(Drive0MapByte);
+ BusLogic_Command(HostAdapter,
+ BusLogic_FetchHostAdapterLocalRAM,
+ &FetchHostAdapterLocalRAMRequest,
+ sizeof(FetchHostAdapterLocalRAMRequest),
+ &Drive0MapByte, sizeof(Drive0MapByte));
+ /*
+ If the Map Byte for BIOS Drive 0 indicates that BIOS Drive 0
+ is controlled by this PCI MultiMaster Host Adapter, then
+ reverse the probe order so that MultiMaster Host Adapters are
+ probed before FlashPoint Host Adapters.
+ */
+ if (Drive0MapByte.DiskGeometry !=
+ BusLogic_BIOS_Disk_Not_Installed)
+ {
+ BusLogic_ProbeInfo_T
+ SavedProbeInfo[BusLogic_MaxHostAdapters];
+ int MultiMasterCount =
+ BusLogic_ProbeInfoCount - FlashPointCount;
+ memcpy(SavedProbeInfo,
+ BusLogic_ProbeInfoList,
+ BusLogic_ProbeInfoCount
+ * sizeof(BusLogic_ProbeInfo_T));
+ memcpy(&BusLogic_ProbeInfoList[0],
+ &SavedProbeInfo[FlashPointCount],
+ MultiMasterCount * sizeof(BusLogic_ProbeInfo_T));
+ memcpy(&BusLogic_ProbeInfoList[MultiMasterCount],
+ &SavedProbeInfo[0],
+ FlashPointCount * sizeof(BusLogic_ProbeInfo_T));
+ }
+ }
+ }
+ }
+ else BusLogic_InitializeProbeInfoListISA(PrototypeHostAdapter);
+}
+
+
+#endif /* CONFIG_PCI */
+
+
+/*
+ BusLogic_Failure prints a standardized error message, and then returns false.
+*/
+
+static boolean BusLogic_Failure(BusLogic_HostAdapter_T *HostAdapter,
+ char *ErrorMessage)
+{
+ BusLogic_AnnounceDriver(HostAdapter);
+ if (HostAdapter->HostAdapterBusType == BusLogic_PCI_Bus)
+ {
+ BusLogic_Error("While configuring BusLogic PCI Host Adapter at\n",
+ HostAdapter);
+ BusLogic_Error("Bus %d Device %d I/O Address 0x%X PCI Address 0x%X:\n",
+ HostAdapter, HostAdapter->Bus, HostAdapter->Device,
+ HostAdapter->IO_Address, HostAdapter->PCI_Address);
+ }
+ else BusLogic_Error("While configuring BusLogic Host Adapter at "
+ "I/O Address 0x%X:\n", HostAdapter,
+ HostAdapter->IO_Address);
+ BusLogic_Error("%s FAILED - DETACHING\n", HostAdapter, ErrorMessage);
+ if (BusLogic_CommandFailureReason != NULL)
+ BusLogic_Error("ADDITIONAL FAILURE INFO - %s\n", HostAdapter,
+ BusLogic_CommandFailureReason);
+ return false;
+}
+
+
+/*
+ BusLogic_ProbeHostAdapter probes for a BusLogic Host Adapter.
+*/
+
+static boolean BusLogic_ProbeHostAdapter(BusLogic_HostAdapter_T *HostAdapter)
+{
+ BusLogic_StatusRegister_T StatusRegister;
+ BusLogic_InterruptRegister_T InterruptRegister;
+ BusLogic_GeometryRegister_T GeometryRegister;
+ /*
+ FlashPoint Host Adapters are Probed by the FlashPoint SCCB Manager.
+ */
+ if (BusLogic_FlashPointHostAdapterP(HostAdapter))
+ {
+ FlashPoint_Info_T *FlashPointInfo = &HostAdapter->FlashPointInfo;
+ FlashPointInfo->BaseAddress =
+ (BusLogic_Base_Address_T) HostAdapter->IO_Address;
+ FlashPointInfo->IRQ_Channel = HostAdapter->IRQ_Channel;
+ FlashPointInfo->Present = false;
+ if (!(FlashPoint_ProbeHostAdapter(FlashPointInfo) == 0 &&
+ FlashPointInfo->Present))
+ {
+ BusLogic_Error("BusLogic: FlashPoint Host Adapter detected at "
+ "PCI Bus %d Device %d\n", HostAdapter,
+ HostAdapter->Bus, HostAdapter->Device);
+ BusLogic_Error("BusLogic: I/O Address 0x%X PCI Address 0x%X, "
+ "but FlashPoint\n", HostAdapter,
+ HostAdapter->IO_Address, HostAdapter->PCI_Address);
+ BusLogic_Error("BusLogic: Probe Function failed to validate it.\n",
+ HostAdapter);
+ return false;
+ }
+ if (BusLogic_GlobalOptions.TraceProbe)
+ BusLogic_Notice("BusLogic_Probe(0x%X): FlashPoint Found\n",
+ HostAdapter, HostAdapter->IO_Address);
+ /*
+ Indicate the Host Adapter Probe completed successfully.
+ */
+ return true;
+ }
+ /*
+ Read the Status, Interrupt, and Geometry Registers to test if there are I/O
+ ports that respond, and to check the values to determine if they are from a
+ BusLogic Host Adapter. A nonexistent I/O port will return 0xFF, in which
+ case there is definitely no BusLogic Host Adapter at this base I/O Address.
+ The test here is a subset of that used by the BusLogic Host Adapter BIOS.
+ */
+ StatusRegister.All = BusLogic_ReadStatusRegister(HostAdapter);
+ InterruptRegister.All = BusLogic_ReadInterruptRegister(HostAdapter);
+ GeometryRegister.All = BusLogic_ReadGeometryRegister(HostAdapter);
+ if (BusLogic_GlobalOptions.TraceProbe)
+ BusLogic_Notice("BusLogic_Probe(0x%X): Status 0x%02X, Interrupt 0x%02X, "
+ "Geometry 0x%02X\n", HostAdapter,
+ HostAdapter->IO_Address, StatusRegister.All,
+ InterruptRegister.All, GeometryRegister.All);
+ if (StatusRegister.All == 0 ||
+ StatusRegister.Bits.DiagnosticActive ||
+ StatusRegister.Bits.CommandParameterRegisterBusy ||
+ StatusRegister.Bits.Reserved ||
+ StatusRegister.Bits.CommandInvalid ||
+ InterruptRegister.Bits.Reserved != 0)
+ return false;
+ /*
+ Check the undocumented Geometry Register to test if there is an I/O port
+ that responded. Adaptec Host Adapters do not implement the Geometry
+ Register, so this test helps serve to avoid incorrectly recognizing an
+ Adaptec 1542A or 1542B as a BusLogic. Unfortunately, the Adaptec 1542C
+ series does respond to the Geometry Register I/O port, but it will be
+ rejected later when the Inquire Extended Setup Information command is
+ issued in BusLogic_CheckHostAdapter. The AMI FastDisk Host Adapter is a
+ BusLogic clone that implements the same interface as earlier BusLogic
+ Host Adapters, including the undocumented commands, and is therefore
+ supported by this driver. However, the AMI FastDisk always returns 0x00
+ upon reading the Geometry Register, so the extended translation option
+ should always be left disabled on the AMI FastDisk.
+ */
+ if (GeometryRegister.All == 0xFF) return false;
+ /*
+ Indicate the Host Adapter Probe completed successfully.
+ */
+ return true;
+}
+
+
+/*
+ BusLogic_HardwareResetHostAdapter issues a Hardware Reset to the Host Adapter
+ and waits for Host Adapter Diagnostics to complete. If HardReset is true, a
+ Hard Reset is performed which also initiates a SCSI Bus Reset. Otherwise, a
+ Soft Reset is performed which only resets the Host Adapter without forcing a
+ SCSI Bus Reset.
+*/
+
+static boolean BusLogic_HardwareResetHostAdapter(BusLogic_HostAdapter_T
+ *HostAdapter,
+ boolean HardReset)
+{
+ BusLogic_StatusRegister_T StatusRegister;
+ int TimeoutCounter;
+ /*
+ FlashPoint Host Adapters are Hard Reset by the FlashPoint SCCB Manager.
+ */
+ if (BusLogic_FlashPointHostAdapterP(HostAdapter))
+ {
+ FlashPoint_Info_T *FlashPointInfo = &HostAdapter->FlashPointInfo;
+ FlashPointInfo->HostSoftReset = !HardReset;
+ FlashPointInfo->ReportDataUnderrun = true;
+ HostAdapter->CardHandle =
+ FlashPoint_HardwareResetHostAdapter(FlashPointInfo);
+ if (HostAdapter->CardHandle == FlashPoint_BadCardHandle) return false;
+ /*
+ Indicate the Host Adapter Hard Reset completed successfully.
+ */
+ return true;
+ }
+ /*
+ Issue a Hard Reset or Soft Reset Command to the Host Adapter. The Host
+ Adapter should respond by setting Diagnostic Active in the Status Register.
+ */
+ if (HardReset)
+ BusLogic_HardReset(HostAdapter);
+ else BusLogic_SoftReset(HostAdapter);
+ /*
+ Wait until Diagnostic Active is set in the Status Register.
+ */
+ TimeoutCounter = 5*10000;
+ while (--TimeoutCounter >= 0)
+ {
+ StatusRegister.All = BusLogic_ReadStatusRegister(HostAdapter);
+ if (StatusRegister.Bits.DiagnosticActive) break;
+ udelay(100);
+ }
+ if (BusLogic_GlobalOptions.TraceHardwareReset)
+ BusLogic_Notice("BusLogic_HardwareReset(0x%X): Diagnostic Active, "
+ "Status 0x%02X\n", HostAdapter,
+ HostAdapter->IO_Address, StatusRegister.All);
+ if (TimeoutCounter < 0) return false;
+ /*
+ Wait 100 microseconds to allow completion of any initial diagnostic
+ activity which might leave the contents of the Status Register
+ unpredictable.
+ */
+ udelay(100);
+ /*
+ Wait until Diagnostic Active is reset in the Status Register.
+ */
+ TimeoutCounter = 10*10000;
+ while (--TimeoutCounter >= 0)
+ {
+ StatusRegister.All = BusLogic_ReadStatusRegister(HostAdapter);
+ if (!StatusRegister.Bits.DiagnosticActive) break;
+ udelay(100);
+ }
+ if (BusLogic_GlobalOptions.TraceHardwareReset)
+ BusLogic_Notice("BusLogic_HardwareReset(0x%X): Diagnostic Completed, "
+ "Status 0x%02X\n", HostAdapter,
+ HostAdapter->IO_Address, StatusRegister.All);
+ if (TimeoutCounter < 0) return false;
+ /*
+ Wait until at least one of the Diagnostic Failure, Host Adapter Ready,
+ or Data In Register Ready bits is set in the Status Register.
+ */
+ TimeoutCounter = 10000;
+ while (--TimeoutCounter >= 0)
+ {
+ StatusRegister.All = BusLogic_ReadStatusRegister(HostAdapter);
+ if (StatusRegister.Bits.DiagnosticFailure ||
+ StatusRegister.Bits.HostAdapterReady ||
+ StatusRegister.Bits.DataInRegisterReady)
+ break;
+ udelay(100);
+ }
+ if (BusLogic_GlobalOptions.TraceHardwareReset)
+ BusLogic_Notice("BusLogic_HardwareReset(0x%X): Host Adapter Ready, "
+ "Status 0x%02X\n", HostAdapter,
+ HostAdapter->IO_Address, StatusRegister.All);
+ if (TimeoutCounter < 0) return false;
+ /*
+ If Diagnostic Failure is set or Host Adapter Ready is reset, then an
+ error occurred during the Host Adapter diagnostics. If Data In Register
+ Ready is set, then there is an Error Code available.
+ */
+ if (StatusRegister.Bits.DiagnosticFailure ||
+ !StatusRegister.Bits.HostAdapterReady)
+ {
+ BusLogic_CommandFailureReason = NULL;
+ BusLogic_Failure(HostAdapter, "HARD RESET DIAGNOSTICS");
+ BusLogic_Error("HOST ADAPTER STATUS REGISTER = %02X\n",
+ HostAdapter, StatusRegister.All);
+ if (StatusRegister.Bits.DataInRegisterReady)
+ {
+ unsigned char ErrorCode = BusLogic_ReadDataInRegister(HostAdapter);
+ BusLogic_Error("HOST ADAPTER ERROR CODE = %d\n",
+ HostAdapter, ErrorCode);
+ }
+ return false;
+ }
+ /*
+ Indicate the Host Adapter Hard Reset completed successfully.
+ */
+ return true;
+}
+
+
+/*
+ BusLogic_CheckHostAdapter checks to be sure this really is a BusLogic
+ Host Adapter.
+*/
+
+static boolean BusLogic_CheckHostAdapter(BusLogic_HostAdapter_T *HostAdapter)
+{
+ BusLogic_ExtendedSetupInformation_T ExtendedSetupInformation;
+ BusLogic_RequestedReplyLength_T RequestedReplyLength;
+ boolean Result = true;
+ /*
+ FlashPoint Host Adapters do not require this protection.
+ */
+ if (BusLogic_FlashPointHostAdapterP(HostAdapter)) return true;
+ /*
+ Issue the Inquire Extended Setup Information command. Only genuine
+ BusLogic Host Adapters and true clones support this command. Adaptec 1542C
+ series Host Adapters that respond to the Geometry Register I/O port will
+ fail this command.
+ */
+ RequestedReplyLength = sizeof(ExtendedSetupInformation);
+ if (BusLogic_Command(HostAdapter,
+ BusLogic_InquireExtendedSetupInformation,
+ &RequestedReplyLength,
+ sizeof(RequestedReplyLength),
+ &ExtendedSetupInformation,
+ sizeof(ExtendedSetupInformation))
+ != sizeof(ExtendedSetupInformation))
+ Result = false;
+ /*
+ Provide tracing information if requested and return.
+ */
+ if (BusLogic_GlobalOptions.TraceProbe)
+ BusLogic_Notice("BusLogic_Check(0x%X): MultiMaster %s\n", HostAdapter,
+ HostAdapter->IO_Address, (Result ? "Found" : "Not Found"));
+ return Result;
+}
+
+
+/*
+ BusLogic_ReadHostAdapterConfiguration reads the Configuration Information
+ from Host Adapter and initializes the Host Adapter structure.
+*/
+
+static boolean BusLogic_ReadHostAdapterConfiguration(BusLogic_HostAdapter_T
+ *HostAdapter)
+{
+ BusLogic_BoardID_T BoardID;
+ BusLogic_Configuration_T Configuration;
+ BusLogic_SetupInformation_T SetupInformation;
+ BusLogic_ExtendedSetupInformation_T ExtendedSetupInformation;
+ BusLogic_HostAdapterModelNumber_T HostAdapterModelNumber;
+ BusLogic_FirmwareVersion3rdDigit_T FirmwareVersion3rdDigit;
+ BusLogic_FirmwareVersionLetter_T FirmwareVersionLetter;
+ BusLogic_PCIHostAdapterInformation_T PCIHostAdapterInformation;
+ BusLogic_FetchHostAdapterLocalRAMRequest_T FetchHostAdapterLocalRAMRequest;
+ BusLogic_AutoSCSIData_T AutoSCSIData;
+ BusLogic_GeometryRegister_T GeometryRegister;
+ BusLogic_RequestedReplyLength_T RequestedReplyLength;
+ unsigned char *TargetPointer, Character;
+ int TargetID, i;
+ /*
+ Configuration Information for FlashPoint Host Adapters is provided in the
+ FlashPoint_Info structure by the FlashPoint SCCB Manager's Probe Function.
+ Initialize fields in the Host Adapter structure from the FlashPoint_Info
+ structure.
+ */
+ if (BusLogic_FlashPointHostAdapterP(HostAdapter))
+ {
+ FlashPoint_Info_T *FlashPointInfo = &HostAdapter->FlashPointInfo;
+ TargetPointer = HostAdapter->ModelName;
+ *TargetPointer++ = 'B';
+ *TargetPointer++ = 'T';
+ *TargetPointer++ = '-';
+ for (i = 0; i < sizeof(FlashPointInfo->ModelNumber); i++)
+ *TargetPointer++ = FlashPointInfo->ModelNumber[i];
+ *TargetPointer++ = '\0';
+ strcpy(HostAdapter->FirmwareVersion, FlashPoint_FirmwareVersion);
+ HostAdapter->SCSI_ID = FlashPointInfo->SCSI_ID;
+ HostAdapter->ExtendedTranslationEnabled =
+ FlashPointInfo->ExtendedTranslationEnabled;
+ HostAdapter->ParityCheckingEnabled =
+ FlashPointInfo->ParityCheckingEnabled;
+ HostAdapter->BusResetEnabled = !FlashPointInfo->HostSoftReset;
+ HostAdapter->LevelSensitiveInterrupt = true;
+ HostAdapter->HostWideSCSI = FlashPointInfo->HostWideSCSI;
+ HostAdapter->HostDifferentialSCSI = false;
+ HostAdapter->HostSupportsSCAM = true;
+ HostAdapter->HostUltraSCSI = true;
+ HostAdapter->ExtendedLUNSupport = true;
+ HostAdapter->TerminationInfoValid = true;
+ HostAdapter->LowByteTerminated = FlashPointInfo->LowByteTerminated;
+ HostAdapter->HighByteTerminated = FlashPointInfo->HighByteTerminated;
+ HostAdapter->SCAM_Enabled = FlashPointInfo->SCAM_Enabled;
+ HostAdapter->SCAM_Level2 = FlashPointInfo->SCAM_Level2;
+ HostAdapter->DriverScatterGatherLimit = BusLogic_ScatterGatherLimit;
+ HostAdapter->MaxTargetDevices = (HostAdapter->HostWideSCSI ? 16 : 8);
+ HostAdapter->MaxLogicalUnits = 32;
+ HostAdapter->InitialCCBs = 4 * BusLogic_CCB_AllocationGroupSize;
+ HostAdapter->IncrementalCCBs = BusLogic_CCB_AllocationGroupSize;
+ HostAdapter->DriverQueueDepth = 255;
+ HostAdapter->HostAdapterQueueDepth = HostAdapter->DriverQueueDepth;
+ HostAdapter->SynchronousPermitted = FlashPointInfo->SynchronousPermitted;
+ HostAdapter->FastPermitted = FlashPointInfo->FastPermitted;
+ HostAdapter->UltraPermitted = FlashPointInfo->UltraPermitted;
+ HostAdapter->WidePermitted = FlashPointInfo->WidePermitted;
+ HostAdapter->DisconnectPermitted = FlashPointInfo->DisconnectPermitted;
+ HostAdapter->TaggedQueuingPermitted = 0xFFFF;
+ goto Common;
+ }
+ /*
+ Issue the Inquire Board ID command.
+ */
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireBoardID, NULL, 0,
+ &BoardID, sizeof(BoardID)) != sizeof(BoardID))
+ return BusLogic_Failure(HostAdapter, "INQUIRE BOARD ID");
+ /*
+ Issue the Inquire Configuration command.
+ */
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireConfiguration, NULL, 0,
+ &Configuration, sizeof(Configuration))
+ != sizeof(Configuration))
+ return BusLogic_Failure(HostAdapter, "INQUIRE CONFIGURATION");
+ /*
+ Issue the Inquire Setup Information command.
+ */
+ RequestedReplyLength = sizeof(SetupInformation);
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireSetupInformation,
+ &RequestedReplyLength, sizeof(RequestedReplyLength),
+ &SetupInformation, sizeof(SetupInformation))
+ != sizeof(SetupInformation))
+ return BusLogic_Failure(HostAdapter, "INQUIRE SETUP INFORMATION");
+ /*
+ Issue the Inquire Extended Setup Information command.
+ */
+ RequestedReplyLength = sizeof(ExtendedSetupInformation);
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireExtendedSetupInformation,
+ &RequestedReplyLength, sizeof(RequestedReplyLength),
+ &ExtendedSetupInformation,
+ sizeof(ExtendedSetupInformation))
+ != sizeof(ExtendedSetupInformation))
+ return BusLogic_Failure(HostAdapter, "INQUIRE EXTENDED SETUP INFORMATION");
+ /*
+ Issue the Inquire Firmware Version 3rd Digit command.
+ */
+ FirmwareVersion3rdDigit = '\0';
+ if (BoardID.FirmwareVersion1stDigit > '0')
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireFirmwareVersion3rdDigit,
+ NULL, 0, &FirmwareVersion3rdDigit,
+ sizeof(FirmwareVersion3rdDigit))
+ != sizeof(FirmwareVersion3rdDigit))
+ return BusLogic_Failure(HostAdapter, "INQUIRE FIRMWARE 3RD DIGIT");
+ /*
+ Issue the Inquire Host Adapter Model Number command.
+ */
+ if (ExtendedSetupInformation.BusType == 'A' &&
+ BoardID.FirmwareVersion1stDigit == '2')
+ /* BusLogic BT-542B ISA 2.xx */
+ strcpy(HostAdapterModelNumber, "542B");
+ else if (ExtendedSetupInformation.BusType == 'E' &&
+ BoardID.FirmwareVersion1stDigit == '2' &&
+ (BoardID.FirmwareVersion2ndDigit <= '1' ||
+ (BoardID.FirmwareVersion2ndDigit == '2' &&
+ FirmwareVersion3rdDigit == '0')))
+ /* BusLogic BT-742A EISA 2.1x or 2.20 */
+ strcpy(HostAdapterModelNumber, "742A");
+ else if (ExtendedSetupInformation.BusType == 'E' &&
+ BoardID.FirmwareVersion1stDigit == '0')
+ /* AMI FastDisk EISA Series 441 0.x */
+ strcpy(HostAdapterModelNumber, "747A");
+ else
+ {
+ RequestedReplyLength = sizeof(HostAdapterModelNumber);
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireHostAdapterModelNumber,
+ &RequestedReplyLength, sizeof(RequestedReplyLength),
+ &HostAdapterModelNumber,
+ sizeof(HostAdapterModelNumber))
+ != sizeof(HostAdapterModelNumber))
+ return BusLogic_Failure(HostAdapter,
+ "INQUIRE HOST ADAPTER MODEL NUMBER");
+ }
+ /*
+ BusLogic MultiMaster Host Adapters can be identified by their model number
+ and the major version number of their firmware as follows:
+
+ 5.xx BusLogic "W" Series Host Adapters:
+ BT-948/958/958D
+ 4.xx BusLogic "C" Series Host Adapters:
+ BT-946C/956C/956CD/747C/757C/757CD/445C/545C/540CF
+ 3.xx BusLogic "S" Series Host Adapters:
+ BT-747S/747D/757S/757D/445S/545S/542D
+ BT-542B/742A (revision H)
+ 2.xx BusLogic "A" Series Host Adapters:
+ BT-542B/742A (revision G and below)
+ 0.xx AMI FastDisk VLB/EISA BusLogic Clone Host Adapter
+ */
+ /*
+ Save the Model Name and Host Adapter Name in the Host Adapter structure.
+ */
+ TargetPointer = HostAdapter->ModelName;
+ *TargetPointer++ = 'B';
+ *TargetPointer++ = 'T';
+ *TargetPointer++ = '-';
+ for (i = 0; i < sizeof(HostAdapterModelNumber); i++)
+ {
+ Character = HostAdapterModelNumber[i];
+ if (Character == ' ' || Character == '\0') break;
+ *TargetPointer++ = Character;
+ }
+ *TargetPointer++ = '\0';
+ /*
+ Save the Firmware Version in the Host Adapter structure.
+ */
+ TargetPointer = HostAdapter->FirmwareVersion;
+ *TargetPointer++ = BoardID.FirmwareVersion1stDigit;
+ *TargetPointer++ = '.';
+ *TargetPointer++ = BoardID.FirmwareVersion2ndDigit;
+ if (FirmwareVersion3rdDigit != ' ' && FirmwareVersion3rdDigit != '\0')
+ *TargetPointer++ = FirmwareVersion3rdDigit;
+ *TargetPointer = '\0';
+ /*
+ Issue the Inquire Firmware Version Letter command.
+ */
+ if (strcmp(HostAdapter->FirmwareVersion, "3.3") >= 0)
+ {
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireFirmwareVersionLetter,
+ NULL, 0, &FirmwareVersionLetter,
+ sizeof(FirmwareVersionLetter))
+ != sizeof(FirmwareVersionLetter))
+ return BusLogic_Failure(HostAdapter,
+ "INQUIRE FIRMWARE VERSION LETTER");
+ if (FirmwareVersionLetter != ' ' && FirmwareVersionLetter != '\0')
+ *TargetPointer++ = FirmwareVersionLetter;
+ *TargetPointer = '\0';
+ }
+ /*
+ Save the Host Adapter SCSI ID in the Host Adapter structure.
+ */
+ HostAdapter->SCSI_ID = Configuration.HostAdapterID;
+ /*
+ Determine the Bus Type and save it in the Host Adapter structure, determine
+ and save the IRQ Channel if necessary, and determine and save the DMA
+ Channel for ISA Host Adapters.
+ */
+ HostAdapter->HostAdapterBusType =
+ BusLogic_HostAdapterBusTypes[HostAdapter->ModelName[3] - '4'];
+ if (HostAdapter->IRQ_Channel == 0)
+ {
+ if (Configuration.IRQ_Channel9)
+ HostAdapter->IRQ_Channel = 9;
+ else if (Configuration.IRQ_Channel10)
+ HostAdapter->IRQ_Channel = 10;
+ else if (Configuration.IRQ_Channel11)
+ HostAdapter->IRQ_Channel = 11;
+ else if (Configuration.IRQ_Channel12)
+ HostAdapter->IRQ_Channel = 12;
+ else if (Configuration.IRQ_Channel14)
+ HostAdapter->IRQ_Channel = 14;
+ else if (Configuration.IRQ_Channel15)
+ HostAdapter->IRQ_Channel = 15;
+ }
+ if (HostAdapter->HostAdapterBusType == BusLogic_ISA_Bus)
+ {
+ if (Configuration.DMA_Channel5)
+ HostAdapter->DMA_Channel = 5;
+ else if (Configuration.DMA_Channel6)
+ HostAdapter->DMA_Channel = 6;
+ else if (Configuration.DMA_Channel7)
+ HostAdapter->DMA_Channel = 7;
+ }
+ /*
+ Determine whether Extended Translation is enabled and save it in
+ the Host Adapter structure.
+ */
+ GeometryRegister.All = BusLogic_ReadGeometryRegister(HostAdapter);
+ HostAdapter->ExtendedTranslationEnabled =
+ GeometryRegister.Bits.ExtendedTranslationEnabled;
+ /*
+ Save the Scatter Gather Limits, Level Sensitive Interrupt flag, Wide
+ SCSI flag, Differential SCSI flag, SCAM Supported flag, and
+ Ultra SCSI flag in the Host Adapter structure.
+ */
+ HostAdapter->HostAdapterScatterGatherLimit =
+ ExtendedSetupInformation.ScatterGatherLimit;
+ HostAdapter->DriverScatterGatherLimit =
+ HostAdapter->HostAdapterScatterGatherLimit;
+ if (HostAdapter->HostAdapterScatterGatherLimit > BusLogic_ScatterGatherLimit)
+ HostAdapter->DriverScatterGatherLimit = BusLogic_ScatterGatherLimit;
+ if (ExtendedSetupInformation.Misc.LevelSensitiveInterrupt)
+ HostAdapter->LevelSensitiveInterrupt = true;
+ HostAdapter->HostWideSCSI = ExtendedSetupInformation.HostWideSCSI;
+ HostAdapter->HostDifferentialSCSI =
+ ExtendedSetupInformation.HostDifferentialSCSI;
+ HostAdapter->HostSupportsSCAM = ExtendedSetupInformation.HostSupportsSCAM;
+ HostAdapter->HostUltraSCSI = ExtendedSetupInformation.HostUltraSCSI;
+ /*
+ Determine whether Extended LUN Format CCBs are supported and save the
+ information in the Host Adapter structure.
+ */
+ if (HostAdapter->FirmwareVersion[0] == '5' ||
+ (HostAdapter->FirmwareVersion[0] == '4' && HostAdapter->HostWideSCSI))
+ HostAdapter->ExtendedLUNSupport = true;
+ /*
+ Issue the Inquire PCI Host Adapter Information command to read the
+ Termination Information from "W" series MultiMaster Host Adapters.
+ */
+ if (HostAdapter->FirmwareVersion[0] == '5')
+ {
+ if (BusLogic_Command(HostAdapter,
+ BusLogic_InquirePCIHostAdapterInformation,
+ NULL, 0, &PCIHostAdapterInformation,
+ sizeof(PCIHostAdapterInformation))
+ != sizeof(PCIHostAdapterInformation))
+ return BusLogic_Failure(HostAdapter,
+ "INQUIRE PCI HOST ADAPTER INFORMATION");
+ /*
+ Save the Termination Information in the Host Adapter structure.
+ */
+ if (PCIHostAdapterInformation.GenericInfoValid)
+ {
+ HostAdapter->TerminationInfoValid = true;
+ HostAdapter->LowByteTerminated =
+ PCIHostAdapterInformation.LowByteTerminated;
+ HostAdapter->HighByteTerminated =
+ PCIHostAdapterInformation.HighByteTerminated;
+ }
+ }
+ /*
+ Issue the Fetch Host Adapter Local RAM command to read the AutoSCSI data
+ from "W" and "C" series MultiMaster Host Adapters.
+ */
+ if (HostAdapter->FirmwareVersion[0] >= '4')
+ {
+ FetchHostAdapterLocalRAMRequest.ByteOffset =
+ BusLogic_AutoSCSI_BaseOffset;
+ FetchHostAdapterLocalRAMRequest.ByteCount = sizeof(AutoSCSIData);
+ if (BusLogic_Command(HostAdapter,
+ BusLogic_FetchHostAdapterLocalRAM,
+ &FetchHostAdapterLocalRAMRequest,
+ sizeof(FetchHostAdapterLocalRAMRequest),
+ &AutoSCSIData, sizeof(AutoSCSIData))
+ != sizeof(AutoSCSIData))
+ return BusLogic_Failure(HostAdapter, "FETCH HOST ADAPTER LOCAL RAM");
+ /*
+ Save the Parity Checking Enabled, Bus Reset Enabled, and Termination
+ Information in the Host Adapter structure.
+ */
+ HostAdapter->ParityCheckingEnabled = AutoSCSIData.ParityCheckingEnabled;
+ HostAdapter->BusResetEnabled = AutoSCSIData.BusResetEnabled;
+ if (HostAdapter->FirmwareVersion[0] == '4')
+ {
+ HostAdapter->TerminationInfoValid = true;
+ HostAdapter->LowByteTerminated = AutoSCSIData.LowByteTerminated;
+ HostAdapter->HighByteTerminated = AutoSCSIData.HighByteTerminated;
+ }
+ /*
+ Save the Wide Permitted, Fast Permitted, Synchronous Permitted,
+ Disconnect Permitted, Ultra Permitted, and SCAM Information in the
+ Host Adapter structure.
+ */
+ HostAdapter->WidePermitted = AutoSCSIData.WidePermitted;
+ HostAdapter->FastPermitted = AutoSCSIData.FastPermitted;
+ HostAdapter->SynchronousPermitted =
+ AutoSCSIData.SynchronousPermitted;
+ HostAdapter->DisconnectPermitted =
+ AutoSCSIData.DisconnectPermitted;
+ if (HostAdapter->HostUltraSCSI)
+ HostAdapter->UltraPermitted = AutoSCSIData.UltraPermitted;
+ if (HostAdapter->HostSupportsSCAM)
+ {
+ HostAdapter->SCAM_Enabled = AutoSCSIData.SCAM_Enabled;
+ HostAdapter->SCAM_Level2 = AutoSCSIData.SCAM_Level2;
+ }
+ }
+ /*
+ Initialize fields in the Host Adapter structure for "S" and "A" series
+ MultiMaster Host Adapters.
+ */
+ if (HostAdapter->FirmwareVersion[0] < '4')
+ {
+ if (SetupInformation.SynchronousInitiationEnabled)
+ {
+ HostAdapter->SynchronousPermitted = 0xFF;
+ if (HostAdapter->HostAdapterBusType == BusLogic_EISA_Bus)
+ {
+ if (ExtendedSetupInformation.Misc.FastOnEISA)
+ HostAdapter->FastPermitted = 0xFF;
+ if (strcmp(HostAdapter->ModelName, "BT-757") == 0)
+ HostAdapter->WidePermitted = 0xFF;
+ }
+ }
+ HostAdapter->DisconnectPermitted = 0xFF;
+ HostAdapter->ParityCheckingEnabled =
+ SetupInformation.ParityCheckingEnabled;
+ HostAdapter->BusResetEnabled = true;
+ }
+ /*
+ Determine the maximum number of Target IDs and Logical Units supported by
+ this driver for Wide and Narrow Host Adapters.
+ */
+ HostAdapter->MaxTargetDevices = (HostAdapter->HostWideSCSI ? 16 : 8);
+ HostAdapter->MaxLogicalUnits = (HostAdapter->ExtendedLUNSupport ? 32 : 8);
+ /*
+ Select appropriate values for the Mailbox Count, Driver Queue Depth,
+ Initial CCBs, and Incremental CCBs variables based on whether or not Strict
+ Round Robin Mode is supported. If Strict Round Robin Mode is supported,
+ then there is no performance degradation in using the maximum possible
+ number of Outgoing and Incoming Mailboxes and allowing the Tagged and
+ Untagged Queue Depths to determine the actual utilization. If Strict Round
+ Robin Mode is not supported, then the Host Adapter must scan all the
+ Outgoing Mailboxes whenever an Outgoing Mailbox entry is made, which can
+ cause a substantial performance penalty. The host adapters actually have
+ room to store the following number of CCBs internally; that is, they can
+ internally queue and manage this many active commands on the SCSI bus
+ simultaneously. Performance measurements demonstrate that the Driver Queue
+ Depth should be set to the Mailbox Count, rather than the Host Adapter
+ Queue Depth (internal CCB capacity), as it is more efficient to have the
+ queued commands waiting in Outgoing Mailboxes if necessary than to block
+ the process in the higher levels of the SCSI Subsystem.
+
+ 192 BT-948/958/958D
+ 100 BT-946C/956C/956CD/747C/757C/757CD/445C
+ 50 BT-545C/540CF
+ 30 BT-747S/747D/757S/757D/445S/545S/542D/542B/742A
+ */
+ if (HostAdapter->FirmwareVersion[0] == '5')
+ HostAdapter->HostAdapterQueueDepth = 192;
+ else if (HostAdapter->FirmwareVersion[0] == '4')
+ HostAdapter->HostAdapterQueueDepth =
+ (HostAdapter->HostAdapterBusType != BusLogic_ISA_Bus ? 100 : 50);
+ else HostAdapter->HostAdapterQueueDepth = 30;
+ if (strcmp(HostAdapter->FirmwareVersion, "3.31") >= 0)
+ {
+ HostAdapter->StrictRoundRobinModeSupport = true;
+ HostAdapter->MailboxCount = BusLogic_MaxMailboxes;
+ }
+ else
+ {
+ HostAdapter->StrictRoundRobinModeSupport = false;
+ HostAdapter->MailboxCount = 32;
+ }
+ HostAdapter->DriverQueueDepth = HostAdapter->MailboxCount;
+ HostAdapter->InitialCCBs = 4 * BusLogic_CCB_AllocationGroupSize;
+ HostAdapter->IncrementalCCBs = BusLogic_CCB_AllocationGroupSize;
+ /*
+ Tagged Queuing support is available and operates properly on all "W" series
+ MultiMaster Host Adapters, on "C" series MultiMaster Host Adapters with
+ firmware version 4.22 and above, and on "S" series MultiMaster Host
+ Adapters with firmware version 3.35 and above.
+ */
+ HostAdapter->TaggedQueuingPermitted = 0;
+ switch (HostAdapter->FirmwareVersion[0])
+ {
+ case '5':
+ HostAdapter->TaggedQueuingPermitted = 0xFFFF;
+ break;
+ case '4':
+ if (strcmp(HostAdapter->FirmwareVersion, "4.22") >= 0)
+ HostAdapter->TaggedQueuingPermitted = 0xFFFF;
+ break;
+ case '3':
+ if (strcmp(HostAdapter->FirmwareVersion, "3.35") >= 0)
+ HostAdapter->TaggedQueuingPermitted = 0xFFFF;
+ break;
+ }
+ /*
+ Determine the Host Adapter BIOS Address if the BIOS is enabled and
+ save it in the Host Adapter structure. The BIOS is disabled if the
+ BIOS_Address is 0.
+ */
+ HostAdapter->BIOS_Address = ExtendedSetupInformation.BIOS_Address << 12;
+ /*
+ ISA Host Adapters require Bounce Buffers if there is more than 16MB memory.
+ */
+ if (HostAdapter->HostAdapterBusType == BusLogic_ISA_Bus &&
+ (void *) high_memory > (void *) MAX_DMA_ADDRESS)
+ HostAdapter->BounceBuffersRequired = true;
+ /*
+ BusLogic BT-445S Host Adapters prior to board revision E have a hardware
+ bug whereby when the BIOS is enabled, transfers to/from the same address
+ range the BIOS occupies modulo 16MB are handled incorrectly. Only properly
+ functioning BT-445S Host Adapters have firmware version 3.37, so require
+ that ISA Bounce Buffers be used for the buggy BT-445S models if there is
+ more than 16MB memory.
+ */
+ if (HostAdapter->BIOS_Address > 0 &&
+ strcmp(HostAdapter->ModelName, "BT-445S") == 0 &&
+ strcmp(HostAdapter->FirmwareVersion, "3.37") < 0 &&
+ (void *) high_memory > (void *) MAX_DMA_ADDRESS)
+ HostAdapter->BounceBuffersRequired = true;
+ /*
+ Initialize parameters common to MultiMaster and FlashPoint Host Adapters.
+ */
+Common:
+ /*
+ Initialize the Host Adapter Full Model Name from the Model Name.
+ */
+ strcpy(HostAdapter->FullModelName, "BusLogic ");
+ strcat(HostAdapter->FullModelName, HostAdapter->ModelName);
+ /*
+ Select an appropriate value for the Tagged Queue Depth either from a
+ BusLogic Driver Options specification, or based on whether this Host
+ Adapter requires that ISA Bounce Buffers be used. The Tagged Queue Depth
+ is left at 0 for automatic determination in BusLogic_SelectQueueDepths.
+ Initialize the Untagged Queue Depth.
+ */
+ for (TargetID = 0; TargetID < BusLogic_MaxTargetDevices; TargetID++)
+ {
+ unsigned char QueueDepth = 0;
+ if (HostAdapter->DriverOptions != NULL &&
+ HostAdapter->DriverOptions->QueueDepth[TargetID] > 0)
+ QueueDepth = HostAdapter->DriverOptions->QueueDepth[TargetID];
+ else if (HostAdapter->BounceBuffersRequired)
+ QueueDepth = BusLogic_TaggedQueueDepthBB;
+ HostAdapter->QueueDepth[TargetID] = QueueDepth;
+ }
+ if (HostAdapter->BounceBuffersRequired)
+ HostAdapter->UntaggedQueueDepth = BusLogic_UntaggedQueueDepthBB;
+ else HostAdapter->UntaggedQueueDepth = BusLogic_UntaggedQueueDepth;
+ if (HostAdapter->DriverOptions != NULL)
+ HostAdapter->CommonQueueDepth =
+ HostAdapter->DriverOptions->CommonQueueDepth;
+ if (HostAdapter->CommonQueueDepth > 0 &&
+ HostAdapter->CommonQueueDepth < HostAdapter->UntaggedQueueDepth)
+ HostAdapter->UntaggedQueueDepth = HostAdapter->CommonQueueDepth;
+ /*
+ Tagged Queuing is only allowed if Disconnect/Reconnect is permitted.
+ Therefore, mask the Tagged Queuing Permitted Default bits with the
+ Disconnect/Reconnect Permitted bits.
+ */
+ HostAdapter->TaggedQueuingPermitted &= HostAdapter->DisconnectPermitted;
+ /*
+ Combine the default Tagged Queuing Permitted bits with any BusLogic Driver
+ Options Tagged Queuing specification.
+ */
+ if (HostAdapter->DriverOptions != NULL)
+ HostAdapter->TaggedQueuingPermitted =
+ (HostAdapter->DriverOptions->TaggedQueuingPermitted &
+ HostAdapter->DriverOptions->TaggedQueuingPermittedMask) |
+ (HostAdapter->TaggedQueuingPermitted &
+ ~HostAdapter->DriverOptions->TaggedQueuingPermittedMask);
+ /*
+ Select appropriate values for the Error Recovery Strategy array
+ either from a BusLogic Driver Options specification, or using
+ BusLogic_ErrorRecovery_Default.
+ */
+ for (TargetID = 0; TargetID < BusLogic_MaxTargetDevices; TargetID++)
+ if (HostAdapter->DriverOptions != NULL)
+ HostAdapter->ErrorRecoveryStrategy[TargetID] =
+ HostAdapter->DriverOptions->ErrorRecoveryStrategy[TargetID];
+ else HostAdapter->ErrorRecoveryStrategy[TargetID] =
+ BusLogic_ErrorRecovery_Default;
+ /*
+ Select an appropriate value for Bus Settle Time either from a BusLogic
+ Driver Options specification, or from BusLogic_DefaultBusSettleTime.
+ */
+ if (HostAdapter->DriverOptions != NULL &&
+ HostAdapter->DriverOptions->BusSettleTime > 0)
+ HostAdapter->BusSettleTime = HostAdapter->DriverOptions->BusSettleTime;
+ else HostAdapter->BusSettleTime = BusLogic_DefaultBusSettleTime;
+ /*
+ Indicate reading the Host Adapter Configuration completed successfully.
+ */
+ return true;
+}
+
+
+/*
+ BusLogic_ReportHostAdapterConfiguration reports the configuration of
+ Host Adapter.
+*/
+
+static boolean BusLogic_ReportHostAdapterConfiguration(BusLogic_HostAdapter_T
+ *HostAdapter)
+{
+ unsigned short AllTargetsMask = (1 << HostAdapter->MaxTargetDevices) - 1;
+ unsigned short SynchronousPermitted, FastPermitted;
+ unsigned short UltraPermitted, WidePermitted;
+ unsigned short DisconnectPermitted, TaggedQueuingPermitted;
+ boolean CommonSynchronousNegotiation, CommonTaggedQueueDepth;
+ boolean CommonErrorRecovery;
+ char SynchronousString[BusLogic_MaxTargetDevices+1];
+ char WideString[BusLogic_MaxTargetDevices+1];
+ char DisconnectString[BusLogic_MaxTargetDevices+1];
+ char TaggedQueuingString[BusLogic_MaxTargetDevices+1];
+ char ErrorRecoveryString[BusLogic_MaxTargetDevices+1];
+ char *SynchronousMessage = SynchronousString;
+ char *WideMessage = WideString;
+ char *DisconnectMessage = DisconnectString;
+ char *TaggedQueuingMessage = TaggedQueuingString;
+ char *ErrorRecoveryMessage = ErrorRecoveryString;
+ int TargetID;
+ BusLogic_Info("Configuring BusLogic Model %s %s%s%s%s SCSI Host Adapter\n",
+ HostAdapter, HostAdapter->ModelName,
+ BusLogic_HostAdapterBusNames[HostAdapter->HostAdapterBusType],
+ (HostAdapter->HostWideSCSI ? " Wide" : ""),
+ (HostAdapter->HostDifferentialSCSI ? " Differential" : ""),
+ (HostAdapter->HostUltraSCSI ? " Ultra" : ""));
+ BusLogic_Info(" Firmware Version: %s, I/O Address: 0x%X, "
+ "IRQ Channel: %d/%s\n", HostAdapter,
+ HostAdapter->FirmwareVersion,
+ HostAdapter->IO_Address, HostAdapter->IRQ_Channel,
+ (HostAdapter->LevelSensitiveInterrupt ? "Level" : "Edge"));
+ if (HostAdapter->HostAdapterBusType != BusLogic_PCI_Bus)
+ {
+ BusLogic_Info(" DMA Channel: ", HostAdapter);
+ if (HostAdapter->DMA_Channel > 0)
+ BusLogic_Info("%d, ", HostAdapter, HostAdapter->DMA_Channel);
+ else BusLogic_Info("None, ", HostAdapter);
+ if (HostAdapter->BIOS_Address > 0)
+ BusLogic_Info("BIOS Address: 0x%X, ", HostAdapter,
+ HostAdapter->BIOS_Address);
+ else BusLogic_Info("BIOS Address: None, ", HostAdapter);
+ }
+ else
+ {
+ BusLogic_Info(" PCI Bus: %d, Device: %d, Address: ",
+ HostAdapter, HostAdapter->Bus, HostAdapter->Device);
+ if (HostAdapter->PCI_Address > 0)
+ BusLogic_Info("0x%X, ", HostAdapter, HostAdapter->PCI_Address);
+ else BusLogic_Info("Unassigned, ", HostAdapter);
+ }
+ BusLogic_Info("Host Adapter SCSI ID: %d\n", HostAdapter,
+ HostAdapter->SCSI_ID);
+ BusLogic_Info(" Parity Checking: %s, Extended Translation: %s\n",
+ HostAdapter,
+ (HostAdapter->ParityCheckingEnabled
+ ? "Enabled" : "Disabled"),
+ (HostAdapter->ExtendedTranslationEnabled
+ ? "Enabled" : "Disabled"));
+ AllTargetsMask &= ~(1 << HostAdapter->SCSI_ID);
+ SynchronousPermitted = HostAdapter->SynchronousPermitted & AllTargetsMask;
+ FastPermitted = HostAdapter->FastPermitted & AllTargetsMask;
+ UltraPermitted = HostAdapter->UltraPermitted & AllTargetsMask;
+ if ((BusLogic_MultiMasterHostAdapterP(HostAdapter) &&
+ (HostAdapter->FirmwareVersion[0] >= '4' ||
+ HostAdapter->HostAdapterBusType == BusLogic_EISA_Bus)) ||
+ BusLogic_FlashPointHostAdapterP(HostAdapter))
+ {
+ CommonSynchronousNegotiation = false;
+ if (SynchronousPermitted == 0)
+ {
+ SynchronousMessage = "Disabled";
+ CommonSynchronousNegotiation = true;
+ }
+ else if (SynchronousPermitted == AllTargetsMask)
+ {
+ if (FastPermitted == 0)
+ {
+ SynchronousMessage = "Slow";
+ CommonSynchronousNegotiation = true;
+ }
+ else if (FastPermitted == AllTargetsMask)
+ {
+ if (UltraPermitted == 0)
+ {
+ SynchronousMessage = "Fast";
+ CommonSynchronousNegotiation = true;
+ }
+ else if (UltraPermitted == AllTargetsMask)
+ {
+ SynchronousMessage = "Ultra";
+ CommonSynchronousNegotiation = true;
+ }
+ }
+ }
+ if (!CommonSynchronousNegotiation)
+ {
+ for (TargetID = 0;
+ TargetID < HostAdapter->MaxTargetDevices;
+ TargetID++)
+ SynchronousString[TargetID] =
+ ((!(SynchronousPermitted & (1 << TargetID))) ? 'N' :
+ (!(FastPermitted & (1 << TargetID)) ? 'S' :
+ (!(UltraPermitted & (1 << TargetID)) ? 'F' : 'U')));
+ SynchronousString[HostAdapter->SCSI_ID] = '#';
+ SynchronousString[HostAdapter->MaxTargetDevices] = '\0';
+ }
+ }
+ else SynchronousMessage =
+ (SynchronousPermitted == 0 ? "Disabled" : "Enabled");
+ WidePermitted = HostAdapter->WidePermitted & AllTargetsMask;
+ if (WidePermitted == 0)
+ WideMessage = "Disabled";
+ else if (WidePermitted == AllTargetsMask)
+ WideMessage = "Enabled";
+ else
+ {
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ WideString[TargetID] =
+ ((WidePermitted & (1 << TargetID)) ? 'Y' : 'N');
+ WideString[HostAdapter->SCSI_ID] = '#';
+ WideString[HostAdapter->MaxTargetDevices] = '\0';
+ }
+ DisconnectPermitted = HostAdapter->DisconnectPermitted & AllTargetsMask;
+ if (DisconnectPermitted == 0)
+ DisconnectMessage = "Disabled";
+ else if (DisconnectPermitted == AllTargetsMask)
+ DisconnectMessage = "Enabled";
+ else
+ {
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ DisconnectString[TargetID] =
+ ((DisconnectPermitted & (1 << TargetID)) ? 'Y' : 'N');
+ DisconnectString[HostAdapter->SCSI_ID] = '#';
+ DisconnectString[HostAdapter->MaxTargetDevices] = '\0';
+ }
+ TaggedQueuingPermitted =
+ HostAdapter->TaggedQueuingPermitted & AllTargetsMask;
+ if (TaggedQueuingPermitted == 0)
+ TaggedQueuingMessage = "Disabled";
+ else if (TaggedQueuingPermitted == AllTargetsMask)
+ TaggedQueuingMessage = "Enabled";
+ else
+ {
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ TaggedQueuingString[TargetID] =
+ ((TaggedQueuingPermitted & (1 << TargetID)) ? 'Y' : 'N');
+ TaggedQueuingString[HostAdapter->SCSI_ID] = '#';
+ TaggedQueuingString[HostAdapter->MaxTargetDevices] = '\0';
+ }
+ BusLogic_Info(" Synchronous Negotiation: %s, Wide Negotiation: %s\n",
+ HostAdapter, SynchronousMessage, WideMessage);
+ BusLogic_Info(" Disconnect/Reconnect: %s, Tagged Queuing: %s\n",
+ HostAdapter, DisconnectMessage, TaggedQueuingMessage);
+ if (BusLogic_MultiMasterHostAdapterP(HostAdapter))
+ {
+ BusLogic_Info(" Scatter/Gather Limit: %d of %d segments, "
+ "Mailboxes: %d\n", HostAdapter,
+ HostAdapter->DriverScatterGatherLimit,
+ HostAdapter->HostAdapterScatterGatherLimit,
+ HostAdapter->MailboxCount);
+ BusLogic_Info(" Driver Queue Depth: %d, "
+ "Host Adapter Queue Depth: %d\n",
+ HostAdapter, HostAdapter->DriverQueueDepth,
+ HostAdapter->HostAdapterQueueDepth);
+ }
+ else BusLogic_Info(" Driver Queue Depth: %d, "
+ "Scatter/Gather Limit: %d segments\n",
+ HostAdapter, HostAdapter->DriverQueueDepth,
+ HostAdapter->DriverScatterGatherLimit);
+ BusLogic_Info(" Tagged Queue Depth: ", HostAdapter);
+ CommonTaggedQueueDepth = true;
+ for (TargetID = 1; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ if (HostAdapter->QueueDepth[TargetID] != HostAdapter->QueueDepth[0])
+ {
+ CommonTaggedQueueDepth = false;
+ break;
+ }
+ if (CommonTaggedQueueDepth)
+ {
+ if (HostAdapter->QueueDepth[0] > 0)
+ BusLogic_Info("%d", HostAdapter, HostAdapter->QueueDepth[0]);
+ else BusLogic_Info("Automatic", HostAdapter);
+ }
+ else BusLogic_Info("Individual", HostAdapter);
+ BusLogic_Info(", Untagged Queue Depth: %d\n", HostAdapter,
+ HostAdapter->UntaggedQueueDepth);
+ CommonErrorRecovery = true;
+ for (TargetID = 1; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ if (HostAdapter->ErrorRecoveryStrategy[TargetID] !=
+ HostAdapter->ErrorRecoveryStrategy[0])
+ {
+ CommonErrorRecovery = false;
+ break;
+ }
+ if (CommonErrorRecovery)
+ ErrorRecoveryMessage =
+ BusLogic_ErrorRecoveryStrategyNames[
+ HostAdapter->ErrorRecoveryStrategy[0]];
+ else
+ {
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ ErrorRecoveryString[TargetID] =
+ BusLogic_ErrorRecoveryStrategyLetters[
+ HostAdapter->ErrorRecoveryStrategy[TargetID]];
+ ErrorRecoveryString[HostAdapter->SCSI_ID] = '#';
+ ErrorRecoveryString[HostAdapter->MaxTargetDevices] = '\0';
+ }
+ BusLogic_Info(" Error Recovery Strategy: %s, SCSI Bus Reset: %s\n",
+ HostAdapter, ErrorRecoveryMessage,
+ (HostAdapter->BusResetEnabled ? "Enabled" : "Disabled"));
+ if (HostAdapter->TerminationInfoValid)
+ {
+ if (HostAdapter->HostWideSCSI)
+ BusLogic_Info(" SCSI Bus Termination: %s", HostAdapter,
+ (HostAdapter->LowByteTerminated
+ ? (HostAdapter->HighByteTerminated
+ ? "Both Enabled" : "Low Enabled")
+ : (HostAdapter->HighByteTerminated
+ ? "High Enabled" : "Both Disabled")));
+ else BusLogic_Info(" SCSI Bus Termination: %s", HostAdapter,
+ (HostAdapter->LowByteTerminated ?
+ "Enabled" : "Disabled"));
+ if (HostAdapter->HostSupportsSCAM)
+ BusLogic_Info(", SCAM: %s", HostAdapter,
+ (HostAdapter->SCAM_Enabled
+ ? (HostAdapter->SCAM_Level2
+ ? "Enabled, Level 2" : "Enabled, Level 1")
+ : "Disabled"));
+ BusLogic_Info("\n", HostAdapter);
+ }
+ /*
+ Indicate reporting the Host Adapter configuration completed successfully.
+ */
+ return true;
+}
+
+
+/*
+ BusLogic_AcquireResources acquires the system resources necessary to use
+ Host Adapter.
+*/
+
+static boolean BusLogic_AcquireResources(BusLogic_HostAdapter_T *HostAdapter)
+{
+ if (HostAdapter->IRQ_Channel == 0)
+ {
+ BusLogic_Error("NO LEGAL INTERRUPT CHANNEL ASSIGNED - DETACHING\n",
+ HostAdapter);
+ return false;
+ }
+ /*
+ Acquire shared access to the IRQ Channel.
+ */
+ if (request_irq(HostAdapter->IRQ_Channel, BusLogic_InterruptHandler,
+ SA_INTERRUPT | SA_SHIRQ,
+ HostAdapter->FullModelName, HostAdapter) < 0)
+ {
+ BusLogic_Error("UNABLE TO ACQUIRE IRQ CHANNEL %d - DETACHING\n",
+ HostAdapter, HostAdapter->IRQ_Channel);
+ return false;
+ }
+ HostAdapter->IRQ_ChannelAcquired = true;
+ /*
+ Acquire exclusive access to the DMA Channel.
+ */
+ if (HostAdapter->DMA_Channel > 0)
+ {
+ if (request_dma(HostAdapter->DMA_Channel,
+ HostAdapter->FullModelName) < 0)
+ {
+ BusLogic_Error("UNABLE TO ACQUIRE DMA CHANNEL %d - DETACHING\n",
+ HostAdapter, HostAdapter->DMA_Channel);
+ return false;
+ }
+ set_dma_mode(HostAdapter->DMA_Channel, DMA_MODE_CASCADE);
+ enable_dma(HostAdapter->DMA_Channel);
+ HostAdapter->DMA_ChannelAcquired = true;
+ }
+ /*
+ Indicate the System Resource Acquisition completed successfully,
+ */
+ return true;
+}
+
+
+/*
+ BusLogic_ReleaseResources releases any system resources previously acquired
+ by BusLogic_AcquireResources.
+*/
+
+static void BusLogic_ReleaseResources(BusLogic_HostAdapter_T *HostAdapter)
+{
+ /*
+ Release shared access to the IRQ Channel.
+ */
+ if (HostAdapter->IRQ_ChannelAcquired)
+ free_irq(HostAdapter->IRQ_Channel, HostAdapter);
+ /*
+ Release exclusive access to the DMA Channel.
+ */
+ if (HostAdapter->DMA_ChannelAcquired)
+ free_dma(HostAdapter->DMA_Channel);
+}
+
+
+/*
+ BusLogic_InitializeHostAdapter initializes Host Adapter. This is the only
+ function called during SCSI Host Adapter detection which modifies the state
+ of the Host Adapter from its initial power on or hard reset state.
+*/
+
+static boolean BusLogic_InitializeHostAdapter(BusLogic_HostAdapter_T
+ *HostAdapter)
+{
+ BusLogic_ExtendedMailboxRequest_T ExtendedMailboxRequest;
+ BusLogic_RoundRobinModeRequest_T RoundRobinModeRequest;
+ BusLogic_SetCCBFormatRequest_T SetCCBFormatRequest;
+ int TargetID;
+ /*
+ Initialize the pointers to the first and last CCBs that are queued for
+ completion processing.
+ */
+ HostAdapter->FirstCompletedCCB = NULL;
+ HostAdapter->LastCompletedCCB = NULL;
+ /*
+ Initialize the Bus Device Reset Pending CCB, Tagged Queuing Active,
+ Command Successful Flag, Active Commands, and Commands Since Reset
+ for each Target Device.
+ */
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ {
+ HostAdapter->BusDeviceResetPendingCCB[TargetID] = NULL;
+ HostAdapter->TargetFlags[TargetID].TaggedQueuingActive = false;
+ HostAdapter->TargetFlags[TargetID].CommandSuccessfulFlag = false;
+ HostAdapter->ActiveCommands[TargetID] = 0;
+ HostAdapter->CommandsSinceReset[TargetID] = 0;
+ }
+ /*
+ FlashPoint Host Adapters do not use Outgoing and Incoming Mailboxes.
+ */
+ if (BusLogic_FlashPointHostAdapterP(HostAdapter)) goto Done;
+ /*
+ Initialize the Outgoing and Incoming Mailbox pointers.
+ */
+ HostAdapter->FirstOutgoingMailbox =
+ (BusLogic_OutgoingMailbox_T *) HostAdapter->MailboxSpace;
+ HostAdapter->LastOutgoingMailbox =
+ HostAdapter->FirstOutgoingMailbox + HostAdapter->MailboxCount - 1;
+ HostAdapter->NextOutgoingMailbox = HostAdapter->FirstOutgoingMailbox;
+ HostAdapter->FirstIncomingMailbox =
+ (BusLogic_IncomingMailbox_T *) (HostAdapter->LastOutgoingMailbox + 1);
+ HostAdapter->LastIncomingMailbox =
+ HostAdapter->FirstIncomingMailbox + HostAdapter->MailboxCount - 1;
+ HostAdapter->NextIncomingMailbox = HostAdapter->FirstIncomingMailbox;
+ /*
+ Initialize the Outgoing and Incoming Mailbox structures.
+ */
+ memset(HostAdapter->FirstOutgoingMailbox, 0,
+ HostAdapter->MailboxCount * sizeof(BusLogic_OutgoingMailbox_T));
+ memset(HostAdapter->FirstIncomingMailbox, 0,
+ HostAdapter->MailboxCount * sizeof(BusLogic_IncomingMailbox_T));
+ /*
+ Initialize the Host Adapter's Pointer to the Outgoing/Incoming Mailboxes.
+ */
+ ExtendedMailboxRequest.MailboxCount = HostAdapter->MailboxCount;
+ ExtendedMailboxRequest.BaseMailboxAddress =
+ Virtual_to_Bus(HostAdapter->FirstOutgoingMailbox);
+ if (BusLogic_Command(HostAdapter, BusLogic_InitializeExtendedMailbox,
+ &ExtendedMailboxRequest,
+ sizeof(ExtendedMailboxRequest), NULL, 0) < 0)
+ return BusLogic_Failure(HostAdapter, "MAILBOX INITIALIZATION");
+ /*
+ Enable Strict Round Robin Mode if supported by the Host Adapter. In
+ Strict Round Robin Mode, the Host Adapter only looks at the next Outgoing
+ Mailbox for each new command, rather than scanning through all the
+ Outgoing Mailboxes to find any that have new commands in them. Strict
+ Round Robin Mode is significantly more efficient.
+ */
+ if (HostAdapter->StrictRoundRobinModeSupport)
+ {
+ RoundRobinModeRequest = BusLogic_StrictRoundRobinMode;
+ if (BusLogic_Command(HostAdapter, BusLogic_EnableStrictRoundRobinMode,
+ &RoundRobinModeRequest,
+ sizeof(RoundRobinModeRequest), NULL, 0) < 0)
+ return BusLogic_Failure(HostAdapter, "ENABLE STRICT ROUND ROBIN MODE");
+ }
+ /*
+ For Host Adapters that support Extended LUN Format CCBs, issue the Set CCB
+ Format command to allow 32 Logical Units per Target Device.
+ */
+ if (HostAdapter->ExtendedLUNSupport)
+ {
+ SetCCBFormatRequest = BusLogic_ExtendedLUNFormatCCB;
+ if (BusLogic_Command(HostAdapter, BusLogic_SetCCBFormat,
+ &SetCCBFormatRequest, sizeof(SetCCBFormatRequest),
+ NULL, 0) < 0)
+ return BusLogic_Failure(HostAdapter, "SET CCB FORMAT");
+ }
+ /*
+ Announce Successful Initialization.
+ */
+Done:
+ if (!HostAdapter->HostAdapterInitialized)
+ {
+ BusLogic_Info("*** %s Initialized Successfully ***\n",
+ HostAdapter, HostAdapter->FullModelName);
+ BusLogic_Info("\n", HostAdapter);
+ }
+ else BusLogic_Warning("*** %s Initialized Successfully ***\n",
+ HostAdapter, HostAdapter->FullModelName);
+ HostAdapter->HostAdapterInitialized = true;
+ /*
+ Indicate the Host Adapter Initialization completed successfully.
+ */
+ return true;
+}
+
+
+/*
+ BusLogic_TargetDeviceInquiry inquires about the Target Devices accessible
+ through Host Adapter.
+*/
+
+static boolean BusLogic_TargetDeviceInquiry(BusLogic_HostAdapter_T
+ *HostAdapter)
+{
+ BusLogic_InstalledDevices_T InstalledDevices;
+ BusLogic_InstalledDevices8_T InstalledDevicesID0to7;
+ BusLogic_SetupInformation_T SetupInformation;
+ BusLogic_SynchronousPeriod_T SynchronousPeriod;
+ BusLogic_RequestedReplyLength_T RequestedReplyLength;
+ int TargetID;
+ /*
+ Wait a few seconds between the Host Adapter Hard Reset which initiates
+ a SCSI Bus Reset and issuing any SCSI Commands. Some SCSI devices get
+ confused if they receive SCSI Commands too soon after a SCSI Bus Reset.
+ */
+ BusLogic_Delay(HostAdapter->BusSettleTime);
+ /*
+ FlashPoint Host Adapters do not provide for Target Device Inquiry.
+ */
+ if (BusLogic_FlashPointHostAdapterP(HostAdapter)) return true;
+ /*
+ Inhibit the Target Device Inquiry if requested.
+ */
+ if (HostAdapter->DriverOptions != NULL &&
+ HostAdapter->DriverOptions->LocalOptions.InhibitTargetInquiry)
+ return true;
+ /*
+ Issue the Inquire Target Devices command for host adapters with firmware
+ version 4.25 or later, or the Inquire Installed Devices ID 0 to 7 command
+ for older host adapters. This is necessary to force Synchronous Transfer
+ Negotiation so that the Inquire Setup Information and Inquire Synchronous
+ Period commands will return valid data. The Inquire Target Devices command
+ is preferable to Inquire Installed Devices ID 0 to 7 since it only probes
+ Logical Unit 0 of each Target Device.
+ */
+ if (strcmp(HostAdapter->FirmwareVersion, "4.25") >= 0)
+ {
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireTargetDevices, NULL, 0,
+ &InstalledDevices, sizeof(InstalledDevices))
+ != sizeof(InstalledDevices))
+ return BusLogic_Failure(HostAdapter, "INQUIRE TARGET DEVICES");
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ HostAdapter->TargetFlags[TargetID].TargetExists =
+ (InstalledDevices & (1 << TargetID) ? true : false);
+ }
+ else
+ {
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireInstalledDevicesID0to7,
+ NULL, 0, &InstalledDevicesID0to7,
+ sizeof(InstalledDevicesID0to7))
+ != sizeof(InstalledDevicesID0to7))
+ return BusLogic_Failure(HostAdapter,
+ "INQUIRE INSTALLED DEVICES ID 0 TO 7");
+ for (TargetID = 0; TargetID < 8; TargetID++)
+ HostAdapter->TargetFlags[TargetID].TargetExists =
+ (InstalledDevicesID0to7[TargetID] != 0 ? true : false);
+ }
+ /*
+ Issue the Inquire Setup Information command.
+ */
+ RequestedReplyLength = sizeof(SetupInformation);
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireSetupInformation,
+ &RequestedReplyLength, sizeof(RequestedReplyLength),
+ &SetupInformation, sizeof(SetupInformation))
+ != sizeof(SetupInformation))
+ return BusLogic_Failure(HostAdapter, "INQUIRE SETUP INFORMATION");
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ HostAdapter->SynchronousOffset[TargetID] =
+ (TargetID < 8
+ ? SetupInformation.SynchronousValuesID0to7[TargetID].Offset
+ : SetupInformation.SynchronousValuesID8to15[TargetID-8].Offset);
+ if (strcmp(HostAdapter->FirmwareVersion, "5.06L") >= 0)
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ HostAdapter->TargetFlags[TargetID].WideTransfersActive =
+ (TargetID < 8
+ ? (SetupInformation.WideTransfersActiveID0to7 & (1 << TargetID)
+ ? true : false)
+ : (SetupInformation.WideTransfersActiveID8to15 & (1 << (TargetID-8))
+ ? true : false));
+ /*
+ Issue the Inquire Synchronous Period command.
+ */
+ if (HostAdapter->FirmwareVersion[0] >= '3')
+ {
+ RequestedReplyLength = sizeof(SynchronousPeriod);
+ if (BusLogic_Command(HostAdapter, BusLogic_InquireSynchronousPeriod,
+ &RequestedReplyLength, sizeof(RequestedReplyLength),
+ &SynchronousPeriod, sizeof(SynchronousPeriod))
+ != sizeof(SynchronousPeriod))
+ return BusLogic_Failure(HostAdapter, "INQUIRE SYNCHRONOUS PERIOD");
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ HostAdapter->SynchronousPeriod[TargetID] = SynchronousPeriod[TargetID];
+ }
+ else
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ if (SetupInformation.SynchronousValuesID0to7[TargetID].Offset > 0)
+ HostAdapter->SynchronousPeriod[TargetID] =
+ 20 + 5 * SetupInformation.SynchronousValuesID0to7[TargetID]
+ .TransferPeriod;
+ /*
+ Indicate the Target Device Inquiry completed successfully.
+ */
+ return true;
+}
+
+
+/*
+ BusLogic_ReportTargetDeviceInfo reports about the Target Devices accessible
+ through Host Adapter.
+*/
+
+static void BusLogic_ReportTargetDeviceInfo(BusLogic_HostAdapter_T
+ *HostAdapter)
+{
+ int TargetID;
+ /*
+ Inhibit the Target Device Inquiry and Reporting if requested.
+ */
+ if (BusLogic_MultiMasterHostAdapterP(HostAdapter) &&
+ HostAdapter->DriverOptions != NULL &&
+ HostAdapter->DriverOptions->LocalOptions.InhibitTargetInquiry)
+ return;
+ /*
+ Report on the Target Devices found.
+ */
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ {
+ BusLogic_TargetFlags_T *TargetFlags = &HostAdapter->TargetFlags[TargetID];
+ if (TargetFlags->TargetExists && !TargetFlags->TargetInfoReported)
+ {
+ int SynchronousTransferRate = 0;
+ if (BusLogic_FlashPointHostAdapterP(HostAdapter))
+ {
+ boolean WideTransfersActive;
+ FlashPoint_InquireTargetInfo(
+ HostAdapter->CardHandle, TargetID,
+ &HostAdapter->SynchronousPeriod[TargetID],
+ &HostAdapter->SynchronousOffset[TargetID],
+ &WideTransfersActive);
+ TargetFlags->WideTransfersActive = WideTransfersActive;
+ }
+ else if (TargetFlags->WideTransfersSupported &&
+ (HostAdapter->WidePermitted & (1 << TargetID)) &&
+ strcmp(HostAdapter->FirmwareVersion, "5.06L") < 0)
+ TargetFlags->WideTransfersActive = true;
+ if (HostAdapter->SynchronousPeriod[TargetID] > 0)
+ SynchronousTransferRate =
+ 100000 / HostAdapter->SynchronousPeriod[TargetID];
+ if (TargetFlags->WideTransfersActive)
+ SynchronousTransferRate <<= 1;
+ if (SynchronousTransferRate >= 9950)
+ {
+ SynchronousTransferRate = (SynchronousTransferRate + 50) / 100;
+ BusLogic_Info("Target %d: Queue Depth %d, %sSynchronous at "
+ "%d.%01d MB/sec, offset %d\n",
+ HostAdapter, TargetID,
+ HostAdapter->QueueDepth[TargetID],
+ (TargetFlags->WideTransfersActive ? "Wide " : ""),
+ SynchronousTransferRate / 10,
+ SynchronousTransferRate % 10,
+ HostAdapter->SynchronousOffset[TargetID]);
+ }
+ else if (SynchronousTransferRate > 0)
+ {
+ SynchronousTransferRate = (SynchronousTransferRate + 5) / 10;
+ BusLogic_Info("Target %d: Queue Depth %d, %sSynchronous at "
+ "%d.%02d MB/sec, offset %d\n",
+ HostAdapter, TargetID,
+ HostAdapter->QueueDepth[TargetID],
+ (TargetFlags->WideTransfersActive ? "Wide " : ""),
+ SynchronousTransferRate / 100,
+ SynchronousTransferRate % 100,
+ HostAdapter->SynchronousOffset[TargetID]);
+ }
+ else BusLogic_Info("Target %d: Queue Depth %d, Asynchronous\n",
+ HostAdapter, TargetID,
+ HostAdapter->QueueDepth[TargetID]);
+ TargetFlags->TargetInfoReported = true;
+ }
+ }
+}
+
+
+/*
+ BusLogic_InitializeHostStructure initializes the fields in the SCSI Host
+ structure. The base, io_port, n_io_ports, irq, and dma_channel fields in the
+ SCSI Host structure are intentionally left uninitialized, as this driver
+ handles acquisition and release of these resources explicitly, as well as
+ ensuring exclusive access to the Host Adapter hardware and data structures
+ through explicit acquisition and release of the Host Adapter's Lock.
+*/
+
+static void BusLogic_InitializeHostStructure(BusLogic_HostAdapter_T
+ *HostAdapter,
+ SCSI_Host_T *Host)
+{
+ Host->max_id = HostAdapter->MaxTargetDevices;
+ Host->max_lun = HostAdapter->MaxLogicalUnits;
+ Host->max_channel = 0;
+ Host->unique_id = HostAdapter->IO_Address;
+ Host->this_id = HostAdapter->SCSI_ID;
+ Host->can_queue = HostAdapter->DriverQueueDepth;
+ Host->sg_tablesize = HostAdapter->DriverScatterGatherLimit;
+ Host->unchecked_isa_dma = HostAdapter->BounceBuffersRequired;
+ Host->cmd_per_lun = HostAdapter->UntaggedQueueDepth;
+}
+
+
+/*
+ BusLogic_SelectQueueDepths selects Queue Depths for each Target Device based
+ on the Host Adapter's Total Queue Depth and the number, type, speed, and
+ capabilities of the Target Devices. When called for the last Host Adapter,
+ it reports on the Target Device Information for all BusLogic Host Adapters
+ since all the Target Devices have now been probed.
+*/
+
+static void BusLogic_SelectQueueDepths(SCSI_Host_T *Host,
+ SCSI_Device_T *DeviceList)
+{
+ BusLogic_HostAdapter_T *HostAdapter =
+ (BusLogic_HostAdapter_T *) Host->hostdata;
+ int TaggedDeviceCount = 0, AutomaticTaggedDeviceCount = 0;
+ int UntaggedDeviceCount = 0, AutomaticTaggedQueueDepth = 0;
+ int AllocatedQueueDepth = 0;
+ SCSI_Device_T *Device;
+ int TargetID;
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ if (HostAdapter->TargetFlags[TargetID].TargetExists)
+ {
+ int QueueDepth = HostAdapter->QueueDepth[TargetID];
+ if (HostAdapter->TargetFlags[TargetID].TaggedQueuingSupported &&
+ (HostAdapter->TaggedQueuingPermitted & (1 << TargetID)))
+ {
+ TaggedDeviceCount++;
+ if (QueueDepth == 0) AutomaticTaggedDeviceCount++;
+ }
+ else
+ {
+ UntaggedDeviceCount++;
+ if (QueueDepth == 0 ||
+ QueueDepth > HostAdapter->UntaggedQueueDepth)
+ {
+ QueueDepth = HostAdapter->UntaggedQueueDepth;
+ HostAdapter->QueueDepth[TargetID] = QueueDepth;
+ }
+ }
+ AllocatedQueueDepth += QueueDepth;
+ if (QueueDepth == 1)
+ HostAdapter->TaggedQueuingPermitted &= ~(1 << TargetID);
+ }
+ HostAdapter->TargetDeviceCount = TaggedDeviceCount + UntaggedDeviceCount;
+ if (AutomaticTaggedDeviceCount > 0)
+ {
+ AutomaticTaggedQueueDepth =
+ (HostAdapter->HostAdapterQueueDepth - AllocatedQueueDepth)
+ / AutomaticTaggedDeviceCount;
+ if (AutomaticTaggedQueueDepth > BusLogic_MaxAutomaticTaggedQueueDepth)
+ AutomaticTaggedQueueDepth = BusLogic_MaxAutomaticTaggedQueueDepth;
+ if (AutomaticTaggedQueueDepth < BusLogic_MinAutomaticTaggedQueueDepth)
+ AutomaticTaggedQueueDepth = BusLogic_MinAutomaticTaggedQueueDepth;
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ if (HostAdapter->TargetFlags[TargetID].TargetExists &&
+ HostAdapter->QueueDepth[TargetID] == 0)
+ {
+ AllocatedQueueDepth += AutomaticTaggedQueueDepth;
+ HostAdapter->QueueDepth[TargetID] = AutomaticTaggedQueueDepth;
+ }
+ }
+ for (Device = DeviceList; Device != NULL; Device = Device->next)
+ if (Device->host == Host)
+ Device->queue_depth = HostAdapter->QueueDepth[Device->id];
+ /* Allocate an extra CCB for each Target Device for a Bus Device Reset. */
+ AllocatedQueueDepth += HostAdapter->TargetDeviceCount;
+ if (AllocatedQueueDepth > HostAdapter->DriverQueueDepth)
+ AllocatedQueueDepth = HostAdapter->DriverQueueDepth;
+ BusLogic_CreateAdditionalCCBs(HostAdapter,
+ AllocatedQueueDepth
+ - HostAdapter->AllocatedCCBs,
+ false);
+ if (HostAdapter == BusLogic_LastRegisteredHostAdapter)
+ for (HostAdapter = BusLogic_FirstRegisteredHostAdapter;
+ HostAdapter != NULL;
+ HostAdapter = HostAdapter->Next)
+ BusLogic_ReportTargetDeviceInfo(HostAdapter);
+}
+
+
+/*
+ BusLogic_DetectHostAdapter probes for BusLogic Host Adapters at the standard
+ I/O Addresses where they may be located, initializing, registering, and
+ reporting the configuration of each BusLogic Host Adapter it finds. It
+ returns the number of BusLogic Host Adapters successfully initialized and
+ registered.
+*/
+
+int BusLogic_DetectHostAdapter(SCSI_Host_Template_T *HostTemplate)
+{
+ int BusLogicHostAdapterCount = 0, DriverOptionsIndex = 0, ProbeIndex;
+ BusLogic_HostAdapter_T *PrototypeHostAdapter;
+ if (BusLogic_ProbeOptions.NoProbe) return 0;
+ BusLogic_ProbeInfoList = (BusLogic_ProbeInfo_T *)
+ kmalloc(BusLogic_MaxHostAdapters * sizeof(BusLogic_ProbeInfo_T),
+ GFP_ATOMIC);
+ if (BusLogic_ProbeInfoList == NULL)
+ {
+ BusLogic_Error("BusLogic: Unable to allocate Probe Info List\n", NULL);
+ return 0;
+ }
+ memset(BusLogic_ProbeInfoList, 0,
+ BusLogic_MaxHostAdapters * sizeof(BusLogic_ProbeInfo_T));
+ PrototypeHostAdapter = (BusLogic_HostAdapter_T *)
+ kmalloc(sizeof(BusLogic_HostAdapter_T), GFP_ATOMIC);
+ if (PrototypeHostAdapter == NULL)
+ {
+ kfree(BusLogic_ProbeInfoList);
+ BusLogic_Error("BusLogic: Unable to allocate Prototype "
+ "Host Adapter\n", NULL);
+ return 0;
+ }
+ memset(PrototypeHostAdapter, 0, sizeof(BusLogic_HostAdapter_T));
+ if (BusLogic_Options != NULL)
+ BusLogic_ParseDriverOptions(BusLogic_Options);
+ BusLogic_InitializeProbeInfoList(PrototypeHostAdapter);
+ for (ProbeIndex = 0; ProbeIndex < BusLogic_ProbeInfoCount; ProbeIndex++)
+ {
+ BusLogic_ProbeInfo_T *ProbeInfo = &BusLogic_ProbeInfoList[ProbeIndex];
+ BusLogic_HostAdapter_T *HostAdapter = PrototypeHostAdapter;
+ SCSI_Host_T *Host;
+ if (ProbeInfo->IO_Address == 0) continue;
+ memset(HostAdapter, 0, sizeof(BusLogic_HostAdapter_T));
+ HostAdapter->HostAdapterType = ProbeInfo->HostAdapterType;
+ HostAdapter->HostAdapterBusType = ProbeInfo->HostAdapterBusType;
+ HostAdapter->IO_Address = ProbeInfo->IO_Address;
+ HostAdapter->PCI_Address = ProbeInfo->PCI_Address;
+ HostAdapter->Bus = ProbeInfo->Bus;
+ HostAdapter->Device = ProbeInfo->Device;
+ HostAdapter->IRQ_Channel = ProbeInfo->IRQ_Channel;
+ HostAdapter->AddressCount =
+ BusLogic_HostAdapterAddressCount[HostAdapter->HostAdapterType];
+ /*
+ Probe the Host Adapter. If unsuccessful, abort further initialization.
+ */
+ if (!BusLogic_ProbeHostAdapter(HostAdapter)) continue;
+ /*
+ Hard Reset the Host Adapter. If unsuccessful, abort further
+ initialization.
+ */
+ if (!BusLogic_HardwareResetHostAdapter(HostAdapter, true)) continue;
+ /*
+ Check the Host Adapter. If unsuccessful, abort further initialization.
+ */
+ if (!BusLogic_CheckHostAdapter(HostAdapter)) continue;
+ /*
+ Initialize the Driver Options field if provided.
+ */
+ if (DriverOptionsIndex < BusLogic_DriverOptionsCount)
+ HostAdapter->DriverOptions =
+ &BusLogic_DriverOptions[DriverOptionsIndex++];
+ /*
+ Announce the Driver Version and Date, Author's Name, Copyright Notice,
+ and Electronic Mail Address.
+ */
+ BusLogic_AnnounceDriver(HostAdapter);
+ /*
+ Register usage of the I/O Address range. From this point onward, any
+ failure will be assumed to be due to a problem with the Host Adapter,
+ rather than due to having mistakenly identified this port as belonging
+ to a BusLogic Host Adapter. The I/O Address range will not be
+ released, thereby preventing it from being incorrectly identified as
+ any other type of Host Adapter.
+ */
+ request_region(HostAdapter->IO_Address, HostAdapter->AddressCount,
+ "BusLogic");
+ /*
+ Register the SCSI Host structure.
+ */
+ Host = scsi_register(HostTemplate, sizeof(BusLogic_HostAdapter_T));
+ HostAdapter = (BusLogic_HostAdapter_T *) Host->hostdata;
+ memcpy(HostAdapter, PrototypeHostAdapter, sizeof(BusLogic_HostAdapter_T));
+ HostAdapter->SCSI_Host = Host;
+ HostAdapter->HostNumber = Host->host_no;
+ Host->select_queue_depths = BusLogic_SelectQueueDepths;
+ /*
+ Add Host Adapter to the end of the list of registered BusLogic
+ Host Adapters.
+ */
+ BusLogic_RegisterHostAdapter(HostAdapter);
+ /*
+ Read the Host Adapter Configuration, Configure the Host Adapter,
+ Acquire the System Resources necessary to use the Host Adapter, then
+ Create the Initial CCBs, Initialize the Host Adapter, and finally
+ perform Target Device Inquiry.
+ */
+ if (BusLogic_ReadHostAdapterConfiguration(HostAdapter) &&
+ BusLogic_ReportHostAdapterConfiguration(HostAdapter) &&
+ BusLogic_AcquireResources(HostAdapter) &&
+ BusLogic_CreateInitialCCBs(HostAdapter) &&
+ BusLogic_InitializeHostAdapter(HostAdapter) &&
+ BusLogic_TargetDeviceInquiry(HostAdapter))
+ {
+ /*
+ Initialization has been completed successfully. Release and
+ re-register usage of the I/O Address range so that the Model
+ Name of the Host Adapter will appear, and initialize the SCSI
+ Host structure.
+ */
+ release_region(HostAdapter->IO_Address,
+ HostAdapter->AddressCount);
+ request_region(HostAdapter->IO_Address,
+ HostAdapter->AddressCount,
+ HostAdapter->FullModelName);
+ BusLogic_InitializeHostStructure(HostAdapter, Host);
+ BusLogicHostAdapterCount++;
+ }
+ else
+ {
+ /*
+ An error occurred during Host Adapter Configuration Querying, Host
+ Adapter Configuration, Resource Acquisition, CCB Creation, Host
+ Adapter Initialization, or Target Device Inquiry, so remove Host
+ Adapter from the list of registered BusLogic Host Adapters, destroy
+ the CCBs, Release the System Resources, and Unregister the SCSI
+ Host.
+ */
+ BusLogic_DestroyCCBs(HostAdapter);
+ BusLogic_ReleaseResources(HostAdapter);
+ BusLogic_UnregisterHostAdapter(HostAdapter);
+ scsi_unregister(Host);
+ }
+ }
+ kfree(PrototypeHostAdapter);
+ kfree(BusLogic_ProbeInfoList);
+ BusLogic_ProbeInfoList = NULL;
+ return BusLogicHostAdapterCount;
+}
+
+
+/*
+ BusLogic_ReleaseHostAdapter releases all resources previously acquired to
+ support a specific Host Adapter, including the I/O Address range, and
+ unregisters the BusLogic Host Adapter.
+*/
+
+int BusLogic_ReleaseHostAdapter(SCSI_Host_T *Host)
+{
+ BusLogic_HostAdapter_T *HostAdapter =
+ (BusLogic_HostAdapter_T *) Host->hostdata;
+ /*
+ FlashPoint Host Adapters must first be released by the FlashPoint
+ SCCB Manager.
+ */
+ if (BusLogic_FlashPointHostAdapterP(HostAdapter))
+ FlashPoint_ReleaseHostAdapter(HostAdapter->CardHandle);
+ /*
+ Destroy the CCBs and release any system resources acquired to
+ support Host Adapter.
+ */
+ BusLogic_DestroyCCBs(HostAdapter);
+ BusLogic_ReleaseResources(HostAdapter);
+ /*
+ Release usage of the I/O Address range.
+ */
+ release_region(HostAdapter->IO_Address, HostAdapter->AddressCount);
+ /*
+ Remove Host Adapter from the list of registered BusLogic Host Adapters.
+ */
+ BusLogic_UnregisterHostAdapter(HostAdapter);
+ return 0;
+}
+
+
+/*
+ BusLogic_QueueCompletedCCB queues CCB for completion processing.
+*/
+
+static void BusLogic_QueueCompletedCCB(BusLogic_CCB_T *CCB)
+{
+ BusLogic_HostAdapter_T *HostAdapter = CCB->HostAdapter;
+ CCB->Status = BusLogic_CCB_Completed;
+ CCB->Next = NULL;
+ if (HostAdapter->FirstCompletedCCB == NULL)
+ {
+ HostAdapter->FirstCompletedCCB = CCB;
+ HostAdapter->LastCompletedCCB = CCB;
+ }
+ else
+ {
+ HostAdapter->LastCompletedCCB->Next = CCB;
+ HostAdapter->LastCompletedCCB = CCB;
+ }
+ HostAdapter->ActiveCommands[CCB->TargetID]--;
+}
+
+
+/*
+ BusLogic_ComputeResultCode computes a SCSI Subsystem Result Code from
+ the Host Adapter Status and Target Device Status.
+*/
+
+static int BusLogic_ComputeResultCode(BusLogic_HostAdapter_T *HostAdapter,
+ BusLogic_HostAdapterStatus_T
+ HostAdapterStatus,
+ BusLogic_TargetDeviceStatus_T
+ TargetDeviceStatus)
+{
+ int HostStatus;
+ switch (HostAdapterStatus)
+ {
+ case BusLogic_CommandCompletedNormally:
+ case BusLogic_LinkedCommandCompleted:
+ case BusLogic_LinkedCommandCompletedWithFlag:
+ HostStatus = DID_OK;
+ break;
+ case BusLogic_SCSISelectionTimeout:
+ HostStatus = DID_TIME_OUT;
+ break;
+ case BusLogic_InvalidOutgoingMailboxActionCode:
+ case BusLogic_InvalidCommandOperationCode:
+ case BusLogic_InvalidCommandParameter:
+ BusLogic_Warning("BusLogic Driver Protocol Error 0x%02X\n",
+ HostAdapter, HostAdapterStatus);
+ case BusLogic_DataUnderRun:
+ case BusLogic_DataOverRun:
+ case BusLogic_UnexpectedBusFree:
+ case BusLogic_LinkedCCBhasInvalidLUN:
+ case BusLogic_AutoRequestSenseFailed:
+ case BusLogic_TaggedQueuingMessageRejected:
+ case BusLogic_UnsupportedMessageReceived:
+ case BusLogic_HostAdapterHardwareFailed:
+ case BusLogic_TargetDeviceReconnectedImproperly:
+ case BusLogic_AbortQueueGenerated:
+ case BusLogic_HostAdapterSoftwareError:
+ case BusLogic_HostAdapterHardwareTimeoutError:
+ case BusLogic_SCSIParityErrorDetected:
+ HostStatus = DID_ERROR;
+ break;
+ case BusLogic_InvalidBusPhaseRequested:
+ case BusLogic_TargetFailedResponseToATN:
+ case BusLogic_HostAdapterAssertedRST:
+ case BusLogic_OtherDeviceAssertedRST:
+ case BusLogic_HostAdapterAssertedBusDeviceReset:
+ HostStatus = DID_RESET;
+ break;
+ default:
+ BusLogic_Warning("Unknown Host Adapter Status 0x%02X\n",
+ HostAdapter, HostAdapterStatus);
+ HostStatus = DID_ERROR;
+ break;
+ }
+ return (HostStatus << 16) | TargetDeviceStatus;
+}
+
+
+/*
+ BusLogic_ScanIncomingMailboxes scans the Incoming Mailboxes saving any
+ Incoming Mailbox entries for completion processing.
+*/
+
+static void BusLogic_ScanIncomingMailboxes(BusLogic_HostAdapter_T *HostAdapter)
+{
+ /*
+ Scan through the Incoming Mailboxes in Strict Round Robin fashion, saving
+ any completed CCBs for further processing. It is essential that for each
+ CCB and SCSI Command issued, command completion processing is performed
+ exactly once. Therefore, only Incoming Mailboxes with completion code
+ Command Completed Without Error, Command Completed With Error, or Command
+ Aborted At Host Request are saved for completion processing. When an
+ Incoming Mailbox has a completion code of Aborted Command Not Found, the
+ CCB had already completed or been aborted before the current Abort request
+ was processed, and so completion processing has already occurred and no
+ further action should be taken.
+ */
+ BusLogic_IncomingMailbox_T *NextIncomingMailbox =
+ HostAdapter->NextIncomingMailbox;
+ BusLogic_CompletionCode_T CompletionCode;
+ while ((CompletionCode = NextIncomingMailbox->CompletionCode) !=
+ BusLogic_IncomingMailboxFree)
+ {
+ BusLogic_CCB_T *CCB = (BusLogic_CCB_T *)
+ Bus_to_Virtual(NextIncomingMailbox->CCB);
+ if (CompletionCode != BusLogic_AbortedCommandNotFound)
+ {
+ if (CCB->Status == BusLogic_CCB_Active ||
+ CCB->Status == BusLogic_CCB_Reset)
+ {
+ /*
+ Save the Completion Code for this CCB and queue the CCB
+ for completion processing.
+ */
+ CCB->CompletionCode = CompletionCode;
+ BusLogic_QueueCompletedCCB(CCB);
+ }
+ else
+ {
+ /*
+ If a CCB ever appears in an Incoming Mailbox and is not marked
+ as status Active or Reset, then there is most likely a bug in
+ the Host Adapter firmware.
+ */
+ BusLogic_Warning("Illegal CCB #%ld status %d in "
+ "Incoming Mailbox\n", HostAdapter,
+ CCB->SerialNumber, CCB->Status);
+ }
+ }
+ NextIncomingMailbox->CompletionCode = BusLogic_IncomingMailboxFree;
+ if (++NextIncomingMailbox > HostAdapter->LastIncomingMailbox)
+ NextIncomingMailbox = HostAdapter->FirstIncomingMailbox;
+ }
+ HostAdapter->NextIncomingMailbox = NextIncomingMailbox;
+}
+
+
+/*
+ BusLogic_ProcessCompletedCCBs iterates over the completed CCBs for Host
+ Adapter setting the SCSI Command Result Codes, deallocating the CCBs, and
+ calling the SCSI Subsystem Completion Routines. The Host Adapter's Lock
+ should already have been acquired by the caller.
+*/
+
+static void BusLogic_ProcessCompletedCCBs(BusLogic_HostAdapter_T *HostAdapter)
+{
+ if (HostAdapter->ProcessCompletedCCBsActive) return;
+ HostAdapter->ProcessCompletedCCBsActive = true;
+ while (HostAdapter->FirstCompletedCCB != NULL)
+ {
+ BusLogic_CCB_T *CCB = HostAdapter->FirstCompletedCCB;
+ SCSI_Command_T *Command = CCB->Command;
+ HostAdapter->FirstCompletedCCB = CCB->Next;
+ if (HostAdapter->FirstCompletedCCB == NULL)
+ HostAdapter->LastCompletedCCB = NULL;
+ /*
+ Process the Completed CCB.
+ */
+ if (CCB->Opcode == BusLogic_BusDeviceReset)
+ {
+ int TargetID = CCB->TargetID;
+ BusLogic_Warning("Bus Device Reset CCB #%ld to Target "
+ "%d Completed\n", HostAdapter,
+ CCB->SerialNumber, TargetID);
+ BusLogic_IncrementErrorCounter(
+ &HostAdapter->TargetStatistics[TargetID].BusDeviceResetsCompleted);
+ HostAdapter->TargetFlags[TargetID].TaggedQueuingActive = false;
+ HostAdapter->CommandsSinceReset[TargetID] = 0;
+ HostAdapter->LastResetCompleted[TargetID] = jiffies;
+ /*
+ Place CCB back on the Host Adapter's free list.
+ */
+ BusLogic_DeallocateCCB(CCB);
+ /*
+ Bus Device Reset CCBs have the Command field non-NULL only when a
+ Bus Device Reset was requested for a Command that did not have a
+ currently active CCB in the Host Adapter (i.e., a Synchronous
+ Bus Device Reset), and hence would not have its Completion Routine
+ called otherwise.
+ */
+ while (Command != NULL)
+ {
+ SCSI_Command_T *NextCommand = Command->reset_chain;
+ Command->reset_chain = NULL;
+ Command->result = DID_RESET << 16;
+ Command->scsi_done(Command);
+ Command = NextCommand;
+ }
+ /*
+ Iterate over the CCBs for this Host Adapter performing completion
+ processing for any CCBs marked as Reset for this Target.
+ */
+ for (CCB = HostAdapter->All_CCBs; CCB != NULL; CCB = CCB->NextAll)
+ if (CCB->Status == BusLogic_CCB_Reset && CCB->TargetID == TargetID)
+ {
+ Command = CCB->Command;
+ BusLogic_DeallocateCCB(CCB);
+ HostAdapter->ActiveCommands[TargetID]--;
+ Command->result = DID_RESET << 16;
+ Command->scsi_done(Command);
+ }
+ HostAdapter->BusDeviceResetPendingCCB[TargetID] = NULL;
+ }
+ else
+ {
+ /*
+ Translate the Completion Code, Host Adapter Status, and Target
+ Device Status into a SCSI Subsystem Result Code.
+ */
+ switch (CCB->CompletionCode)
+ {
+ case BusLogic_IncomingMailboxFree:
+ case BusLogic_AbortedCommandNotFound:
+ case BusLogic_InvalidCCB:
+ BusLogic_Warning("CCB #%ld to Target %d Impossible State\n",
+ HostAdapter, CCB->SerialNumber, CCB->TargetID);
+ break;
+ case BusLogic_CommandCompletedWithoutError:
+ HostAdapter->TargetStatistics[CCB->TargetID]
+ .CommandsCompleted++;
+ HostAdapter->TargetFlags[CCB->TargetID]
+ .CommandSuccessfulFlag = true;
+ Command->result = DID_OK << 16;
+ break;
+ case BusLogic_CommandAbortedAtHostRequest:
+ BusLogic_Warning("CCB #%ld to Target %d Aborted\n",
+ HostAdapter, CCB->SerialNumber, CCB->TargetID);
+ BusLogic_IncrementErrorCounter(
+ &HostAdapter->TargetStatistics[CCB->TargetID]
+ .CommandAbortsCompleted);
+ Command->result = DID_ABORT << 16;
+ break;
+ case BusLogic_CommandCompletedWithError:
+ Command->result =
+ BusLogic_ComputeResultCode(HostAdapter,
+ CCB->HostAdapterStatus,
+ CCB->TargetDeviceStatus);
+ if (CCB->HostAdapterStatus != BusLogic_SCSISelectionTimeout)
+ {
+ HostAdapter->TargetStatistics[CCB->TargetID]
+ .CommandsCompleted++;
+ if (BusLogic_GlobalOptions.TraceErrors)
+ {
+ int i;
+ BusLogic_Notice("CCB #%ld Target %d: Result %X Host "
+ "Adapter Status %02X "
+ "Target Status %02X\n",
+ HostAdapter, CCB->SerialNumber,
+ CCB->TargetID, Command->result,
+ CCB->HostAdapterStatus,
+ CCB->TargetDeviceStatus);
+ BusLogic_Notice("CDB ", HostAdapter);
+ for (i = 0; i < CCB->CDB_Length; i++)
+ BusLogic_Notice(" %02X", HostAdapter, CCB->CDB[i]);
+ BusLogic_Notice("\n", HostAdapter);
+ BusLogic_Notice("Sense ", HostAdapter);
+ for (i = 0; i < CCB->SenseDataLength; i++)
+ BusLogic_Notice(" %02X", HostAdapter,
+ Command->sense_buffer[i]);
+ BusLogic_Notice("\n", HostAdapter);
+ }
+ }
+ break;
+ }
+ /*
+ When an INQUIRY command completes normally, save the
+ CmdQue (Tagged Queuing Supported) and WBus16 (16 Bit
+ Wide Data Transfers Supported) bits.
+ */
+ if (CCB->CDB[0] == INQUIRY && CCB->CDB[1] == 0 &&
+ CCB->HostAdapterStatus == BusLogic_CommandCompletedNormally)
+ {
+ BusLogic_TargetFlags_T *TargetFlags =
+ &HostAdapter->TargetFlags[CCB->TargetID];
+ SCSI_Inquiry_T *InquiryResult =
+ (SCSI_Inquiry_T *) Command->request_buffer;
+ TargetFlags->TargetExists = true;
+ TargetFlags->TaggedQueuingSupported = InquiryResult->CmdQue;
+ TargetFlags->WideTransfersSupported = InquiryResult->WBus16;
+ }
+ /*
+ Place CCB back on the Host Adapter's free list.
+ */
+ BusLogic_DeallocateCCB(CCB);
+ /*
+ Call the SCSI Command Completion Routine.
+ */
+ Command->scsi_done(Command);
+ }
+ }
+ HostAdapter->ProcessCompletedCCBsActive = false;
+}
+
+
+/*
+ BusLogic_InterruptHandler handles hardware interrupts from BusLogic Host
+ Adapters.
+*/
+
+static void BusLogic_InterruptHandler(int IRQ_Channel,
+ void *DeviceIdentifier,
+ Registers_T *InterruptRegisters)
+{
+ BusLogic_HostAdapter_T *HostAdapter =
+ (BusLogic_HostAdapter_T *) DeviceIdentifier;
+ ProcessorFlags_T ProcessorFlags;
+ /*
+ Acquire exclusive access to Host Adapter.
+ */
+ BusLogic_AcquireHostAdapterLockIH(HostAdapter, &ProcessorFlags);
+ /*
+ Handle Interrupts appropriately for each Host Adapter type.
+ */
+ if (BusLogic_MultiMasterHostAdapterP(HostAdapter))
+ {
+ BusLogic_InterruptRegister_T InterruptRegister;
+ /*
+ Read the Host Adapter Interrupt Register.
+ */
+ InterruptRegister.All = BusLogic_ReadInterruptRegister(HostAdapter);
+ if (InterruptRegister.Bits.InterruptValid)
+ {
+ /*
+ Acknowledge the interrupt and reset the Host Adapter
+ Interrupt Register.
+ */
+ BusLogic_InterruptReset(HostAdapter);
+ /*
+ Process valid External SCSI Bus Reset and Incoming Mailbox
+ Loaded Interrupts. Command Complete Interrupts are noted,
+ and Outgoing Mailbox Available Interrupts are ignored, as
+ they are never enabled.
+ */
+ if (InterruptRegister.Bits.ExternalBusReset)
+ HostAdapter->HostAdapterExternalReset = true;
+ else if (InterruptRegister.Bits.IncomingMailboxLoaded)
+ BusLogic_ScanIncomingMailboxes(HostAdapter);
+ else if (InterruptRegister.Bits.CommandComplete)
+ HostAdapter->HostAdapterCommandCompleted = true;
+ }
+ }
+ else
+ {
+ /*
+ Check if there is a pending interrupt for this Host Adapter.
+ */
+ if (FlashPoint_InterruptPending(HostAdapter->CardHandle))
+ switch (FlashPoint_HandleInterrupt(HostAdapter->CardHandle))
+ {
+ case FlashPoint_NormalInterrupt:
+ break;
+ case FlashPoint_ExternalBusReset:
+ HostAdapter->HostAdapterExternalReset = true;
+ break;
+ case FlashPoint_InternalError:
+ BusLogic_Warning("Internal FlashPoint Error detected"
+ " - Resetting Host Adapter\n", HostAdapter);
+ HostAdapter->HostAdapterInternalError = true;
+ break;
+ }
+ }
+ /*
+ Process any completed CCBs.
+ */
+ if (HostAdapter->FirstCompletedCCB != NULL)
+ BusLogic_ProcessCompletedCCBs(HostAdapter);
+ /*
+ Reset the Host Adapter if requested.
+ */
+ if (HostAdapter->HostAdapterExternalReset ||
+ HostAdapter->HostAdapterInternalError)
+ {
+ BusLogic_ResetHostAdapter(HostAdapter, NULL, 0);
+ HostAdapter->HostAdapterExternalReset = false;
+ HostAdapter->HostAdapterInternalError = false;
+ scsi_mark_host_reset(HostAdapter->SCSI_Host);
+ }
+ /*
+ Release exclusive access to Host Adapter.
+ */
+ BusLogic_ReleaseHostAdapterLockIH(HostAdapter, &ProcessorFlags);
+}
+
+
+/*
+ BusLogic_WriteOutgoingMailbox places CCB and Action Code into an Outgoing
+ Mailbox for execution by Host Adapter. The Host Adapter's Lock should
+ already have been acquired by the caller.
+*/
+
+static boolean BusLogic_WriteOutgoingMailbox(BusLogic_HostAdapter_T
+ *HostAdapter,
+ BusLogic_ActionCode_T ActionCode,
+ BusLogic_CCB_T *CCB)
+{
+ BusLogic_OutgoingMailbox_T *NextOutgoingMailbox;
+ NextOutgoingMailbox = HostAdapter->NextOutgoingMailbox;
+ if (NextOutgoingMailbox->ActionCode == BusLogic_OutgoingMailboxFree)
+ {
+ CCB->Status = BusLogic_CCB_Active;
+ /*
+ The CCB field must be written before the Action Code field since
+ the Host Adapter is operating asynchronously and the locking code
+ does not protect against simultaneous access by the Host Adapter.
+ */
+ NextOutgoingMailbox->CCB = Virtual_to_Bus(CCB);
+ NextOutgoingMailbox->ActionCode = ActionCode;
+ BusLogic_StartMailboxCommand(HostAdapter);
+ if (++NextOutgoingMailbox > HostAdapter->LastOutgoingMailbox)
+ NextOutgoingMailbox = HostAdapter->FirstOutgoingMailbox;
+ HostAdapter->NextOutgoingMailbox = NextOutgoingMailbox;
+ if (ActionCode == BusLogic_MailboxStartCommand)
+ {
+ HostAdapter->ActiveCommands[CCB->TargetID]++;
+ if (CCB->Opcode != BusLogic_BusDeviceReset)
+ HostAdapter->TargetStatistics[CCB->TargetID].CommandsAttempted++;
+ }
+ return true;
+ }
+ return false;
+}
+
+
+/*
+ BusLogic_QueueCommand creates a CCB for Command and places it into an
+ Outgoing Mailbox for execution by the associated Host Adapter.
+*/
+
+int BusLogic_QueueCommand(SCSI_Command_T *Command,
+ void (*CompletionRoutine)(SCSI_Command_T *))
+{
+ BusLogic_HostAdapter_T *HostAdapter =
+ (BusLogic_HostAdapter_T *) Command->host->hostdata;
+ BusLogic_TargetFlags_T *TargetFlags =
+ &HostAdapter->TargetFlags[Command->target];
+ BusLogic_TargetStatistics_T *TargetStatistics =
+ HostAdapter->TargetStatistics;
+ unsigned char *CDB = Command->cmnd;
+ int CDB_Length = Command->cmd_len;
+ int TargetID = Command->target;
+ int LogicalUnit = Command->lun;
+ void *BufferPointer = Command->request_buffer;
+ int BufferLength = Command->request_bufflen;
+ int SegmentCount = Command->use_sg;
+ ProcessorFlags_T ProcessorFlags;
+ BusLogic_CCB_T *CCB;
+ /*
+ SCSI REQUEST_SENSE commands will be executed automatically by the Host
+ Adapter for any errors, so they should not be executed explicitly unless
+ the Sense Data is zero indicating that no error occurred.
+ */
+ if (CDB[0] == REQUEST_SENSE && Command->sense_buffer[0] != 0)
+ {
+ Command->result = DID_OK << 16;
+ CompletionRoutine(Command);
+ return 0;
+ }
+ /*
+ Acquire exclusive access to Host Adapter.
+ */
+ BusLogic_AcquireHostAdapterLock(HostAdapter, &ProcessorFlags);
+ /*
+ Allocate a CCB from the Host Adapter's free list. In the unlikely event
+ that there are none available and memory allocation fails, wait 1 second
+ and try again. If that fails, the Host Adapter is probably hung so signal
+ an error as a Host Adapter Hard Reset should be initiated soon.
+ */
+ CCB = BusLogic_AllocateCCB(HostAdapter);
+ if (CCB == NULL)
+ {
+ BusLogic_Delay(1);
+ CCB = BusLogic_AllocateCCB(HostAdapter);
+ if (CCB == NULL)
+ {
+ Command->result = DID_ERROR << 16;
+ CompletionRoutine(Command);
+ goto Done;
+ }
+ }
+ /*
+ Initialize the fields in the BusLogic Command Control Block (CCB).
+ */
+ if (SegmentCount == 0)
+ {
+ CCB->Opcode = BusLogic_InitiatorCCB;
+ CCB->DataLength = BufferLength;
+ CCB->DataPointer = Virtual_to_Bus(BufferPointer);
+ }
+ else
+ {
+ SCSI_ScatterList_T *ScatterList = (SCSI_ScatterList_T *) BufferPointer;
+ int Segment;
+ CCB->Opcode = BusLogic_InitiatorCCB_ScatterGather;
+ CCB->DataLength = SegmentCount * sizeof(BusLogic_ScatterGatherSegment_T);
+ if (BusLogic_MultiMasterHostAdapterP(HostAdapter))
+ CCB->DataPointer = Virtual_to_Bus(CCB->ScatterGatherList);
+ else CCB->DataPointer = Virtual_to_32Bit_Virtual(CCB->ScatterGatherList);
+ for (Segment = 0; Segment < SegmentCount; Segment++)
+ {
+ CCB->ScatterGatherList[Segment].SegmentByteCount =
+ ScatterList[Segment].length;
+ CCB->ScatterGatherList[Segment].SegmentDataPointer =
+ Virtual_to_Bus(ScatterList[Segment].address);
+ }
+ }
+ switch (CDB[0])
+ {
+ case READ_6:
+ case READ_10:
+ CCB->DataDirection = BusLogic_DataInLengthChecked;
+ TargetStatistics[TargetID].ReadCommands++;
+ BusLogic_IncrementByteCounter(
+ &TargetStatistics[TargetID].TotalBytesRead, BufferLength);
+ BusLogic_IncrementSizeBucket(
+ TargetStatistics[TargetID].ReadCommandSizeBuckets, BufferLength);
+ break;
+ case WRITE_6:
+ case WRITE_10:
+ CCB->DataDirection = BusLogic_DataOutLengthChecked;
+ TargetStatistics[TargetID].WriteCommands++;
+ BusLogic_IncrementByteCounter(
+ &TargetStatistics[TargetID].TotalBytesWritten, BufferLength);
+ BusLogic_IncrementSizeBucket(
+ TargetStatistics[TargetID].WriteCommandSizeBuckets, BufferLength);
+ break;
+ default:
+ CCB->DataDirection = BusLogic_UncheckedDataTransfer;
+ break;
+ }
+ CCB->CDB_Length = CDB_Length;
+ CCB->SenseDataLength = sizeof(Command->sense_buffer);
+ CCB->HostAdapterStatus = 0;
+ CCB->TargetDeviceStatus = 0;
+ CCB->TargetID = TargetID;
+ CCB->LogicalUnit = LogicalUnit;
+ CCB->TagEnable = false;
+ CCB->LegacyTagEnable = false;
+ /*
+ BusLogic recommends that after a Reset the first couple of commands that
+ are sent to a Target Device be sent in a non Tagged Queue fashion so that
+ the Host Adapter and Target Device can establish Synchronous and Wide
+ Transfer before Queue Tag messages can interfere with the Synchronous and
+ Wide Negotiation messages. By waiting to enable Tagged Queuing until after
+ the first BusLogic_MaxTaggedQueueDepth commands have been queued, it is
+ assured that after a Reset any pending commands are requeued before Tagged
+ Queuing is enabled and that the Tagged Queuing message will not occur while
+ the partition table is being printed. In addition, some devices do not
+ properly handle the transition from non-tagged to tagged commands, so it is
+ necessary to wait until there are no pending commands for a target device
+ before queuing tagged commands.
+ */
+ if (HostAdapter->CommandsSinceReset[TargetID]++ >=
+ BusLogic_MaxTaggedQueueDepth &&
+ !TargetFlags->TaggedQueuingActive &&
+ HostAdapter->ActiveCommands[TargetID] == 0 &&
+ TargetFlags->TaggedQueuingSupported &&
+ (HostAdapter->TaggedQueuingPermitted & (1 << TargetID)))
+ {
+ TargetFlags->TaggedQueuingActive = true;
+ BusLogic_Notice("Tagged Queuing now active for Target %d\n",
+ HostAdapter, TargetID);
+ }
+ if (TargetFlags->TaggedQueuingActive)
+ {
+ BusLogic_QueueTag_T QueueTag = BusLogic_SimpleQueueTag;
+ /*
+ When using Tagged Queuing with Simple Queue Tags, it appears that disk
+ drive controllers do not guarantee that a queued command will not
+ remain in a disconnected state indefinitely if commands that read or
+ write nearer the head position continue to arrive without interruption.
+ Therefore, for each Target Device this driver keeps track of the last
+ time either the queue was empty or an Ordered Queue Tag was issued. If
+ more than 4 seconds (one fifth of the 20 second disk timeout) have
+ elapsed since this last sequence point, this command will be issued
+ with an Ordered Queue Tag rather than a Simple Queue Tag, which forces
+ the Target Device to complete all previously queued commands before
+ this command may be executed.
+ */
+ if (HostAdapter->ActiveCommands[TargetID] == 0)
+ HostAdapter->LastSequencePoint[TargetID] = jiffies;
+ else if (jiffies - HostAdapter->LastSequencePoint[TargetID] > 4*HZ)
+ {
+ HostAdapter->LastSequencePoint[TargetID] = jiffies;
+ QueueTag = BusLogic_OrderedQueueTag;
+ }
+ if (HostAdapter->ExtendedLUNSupport)
+ {
+ CCB->TagEnable = true;
+ CCB->QueueTag = QueueTag;
+ }
+ else
+ {
+ CCB->LegacyTagEnable = true;
+ CCB->LegacyQueueTag = QueueTag;
+ }
+ }
+ memcpy(CCB->CDB, CDB, CDB_Length);
+ CCB->SenseDataPointer = Virtual_to_Bus(&Command->sense_buffer);
+ CCB->Command = Command;
+ Command->scsi_done = CompletionRoutine;
+ if (BusLogic_MultiMasterHostAdapterP(HostAdapter))
+ {
+ /*
+ Place the CCB in an Outgoing Mailbox. The higher levels of the SCSI
+ Subsystem should not attempt to queue more commands than can be placed
+ in Outgoing Mailboxes, so there should always be one free. In the
+ unlikely event that there are none available, wait 1 second and try
+ again. If that fails, the Host Adapter is probably hung so signal an
+ error as a Host Adapter Hard Reset should be initiated soon.
+ */
+ if (!BusLogic_WriteOutgoingMailbox(
+ HostAdapter, BusLogic_MailboxStartCommand, CCB))
+ {
+ BusLogic_Warning("Unable to write Outgoing Mailbox - "
+ "Pausing for 1 second\n", HostAdapter);
+ BusLogic_Delay(1);
+ if (!BusLogic_WriteOutgoingMailbox(
+ HostAdapter, BusLogic_MailboxStartCommand, CCB))
+ {
+ BusLogic_Warning("Still unable to write Outgoing Mailbox - "
+ "Host Adapter Dead?\n", HostAdapter);
+ BusLogic_DeallocateCCB(CCB);
+ Command->result = DID_ERROR << 16;
+ Command->scsi_done(Command);
+ }
+ }
+ }
+ else
+ {
+ /*
+ Call the FlashPoint SCCB Manager to start execution of the CCB.
+ */
+ CCB->Status = BusLogic_CCB_Active;
+ HostAdapter->ActiveCommands[TargetID]++;
+ TargetStatistics[TargetID].CommandsAttempted++;
+ FlashPoint_StartCCB(HostAdapter->CardHandle, CCB);
+ /*
+ The Command may have already completed and BusLogic_QueueCompletedCCB
+ been called, or it may still be pending.
+ */
+ if (CCB->Status == BusLogic_CCB_Completed)
+ BusLogic_ProcessCompletedCCBs(HostAdapter);
+ }
+ /*
+ Release exclusive access to Host Adapter.
+ */
+Done:
+ BusLogic_ReleaseHostAdapterLock(HostAdapter, &ProcessorFlags);
+ return 0;
+}
+
+
+/*
+ BusLogic_AbortCommand aborts Command if possible.
+*/
+
+int BusLogic_AbortCommand(SCSI_Command_T *Command)
+{
+ BusLogic_HostAdapter_T *HostAdapter =
+ (BusLogic_HostAdapter_T *) Command->host->hostdata;
+ int TargetID = Command->target;
+ ProcessorFlags_T ProcessorFlags;
+ BusLogic_CCB_T *CCB;
+ int Result;
+ BusLogic_IncrementErrorCounter(
+ &HostAdapter->TargetStatistics[TargetID].CommandAbortsRequested);
+ /*
+ Acquire exclusive access to Host Adapter.
+ */
+ BusLogic_AcquireHostAdapterLock(HostAdapter, &ProcessorFlags);
+ /*
+ If this Command has already completed, then no Abort is necessary.
+ */
+ if (Command->serial_number != Command->serial_number_at_timeout)
+ {
+ BusLogic_Warning("Unable to Abort Command to Target %d - "
+ "Already Completed\n", HostAdapter, TargetID);
+ Result = SCSI_ABORT_NOT_RUNNING;
+ goto Done;
+ }
+ /*
+ Attempt to find an Active CCB for this Command. If no Active CCB for this
+ Command is found, then no Abort is necessary.
+ */
+ for (CCB = HostAdapter->All_CCBs; CCB != NULL; CCB = CCB->NextAll)
+ if (CCB->Command == Command) break;
+ if (CCB == NULL)
+ {
+ BusLogic_Warning("Unable to Abort Command to Target %d - "
+ "No CCB Found\n", HostAdapter, TargetID);
+ Result = SCSI_ABORT_NOT_RUNNING;
+ goto Done;
+ }
+ else if (CCB->Status == BusLogic_CCB_Completed)
+ {
+ BusLogic_Warning("Unable to Abort Command to Target %d - "
+ "CCB Completed\n", HostAdapter, TargetID);
+ Result = SCSI_ABORT_NOT_RUNNING;
+ goto Done;
+ }
+ else if (CCB->Status == BusLogic_CCB_Reset)
+ {
+ BusLogic_Warning("Unable to Abort Command to Target %d - "
+ "CCB Reset\n", HostAdapter, TargetID);
+ Result = SCSI_ABORT_PENDING;
+ goto Done;
+ }
+ if (BusLogic_MultiMasterHostAdapterP(HostAdapter))
+ {
+ /*
+ Attempt to Abort this CCB. MultiMaster Firmware versions prior to 5.xx
+ do not generate Abort Tag messages, but only generate the non-tagged
+ Abort message. Since non-tagged commands are not sent by the Host
+ Adapter until the queue of outstanding tagged commands has completed,
+ and the Abort message is treated as a non-tagged command, it is
+ effectively impossible to abort commands when Tagged Queuing is active.
+ Firmware version 5.xx does generate Abort Tag messages, so it is
+ possible to abort commands when Tagged Queuing is active.
+ */
+ if (HostAdapter->TargetFlags[TargetID].TaggedQueuingActive &&
+ HostAdapter->FirmwareVersion[0] < '5')
+ {
+ BusLogic_Warning("Unable to Abort CCB #%ld to Target %d - "
+ "Abort Tag Not Supported\n",
+ HostAdapter, CCB->SerialNumber, TargetID);
+ Result = SCSI_ABORT_SNOOZE;
+ }
+ else if (BusLogic_WriteOutgoingMailbox(
+ HostAdapter, BusLogic_MailboxAbortCommand, CCB))
+ {
+ BusLogic_Warning("Aborting CCB #%ld to Target %d\n",
+ HostAdapter, CCB->SerialNumber, TargetID);
+ BusLogic_IncrementErrorCounter(
+ &HostAdapter->TargetStatistics[TargetID].CommandAbortsAttempted);
+ Result = SCSI_ABORT_PENDING;
+ }
+ else
+ {
+ BusLogic_Warning("Unable to Abort CCB #%ld to Target %d - "
+ "No Outgoing Mailboxes\n",
+ HostAdapter, CCB->SerialNumber, TargetID);
+ Result = SCSI_ABORT_BUSY;
+ }
+ }
+ else
+ {
+ /*
+ Call the FlashPoint SCCB Manager to abort execution of the CCB.
+ */
+ BusLogic_Warning("Aborting CCB #%ld to Target %d\n",
+ HostAdapter, CCB->SerialNumber, TargetID);
+ BusLogic_IncrementErrorCounter(
+ &HostAdapter->TargetStatistics[TargetID].CommandAbortsAttempted);
+ FlashPoint_AbortCCB(HostAdapter->CardHandle, CCB);
+ /*
+ The Abort may have already been completed and
+ BusLogic_QueueCompletedCCB been called, or it
+ may still be pending.
+ */
+ Result = SCSI_ABORT_PENDING;
+ if (CCB->Status == BusLogic_CCB_Completed)
+ {
+ BusLogic_ProcessCompletedCCBs(HostAdapter);
+ Result = SCSI_ABORT_SUCCESS;
+ }
+ }
+ /*
+ Release exclusive access to Host Adapter.
+ */
+Done:
+ BusLogic_ReleaseHostAdapterLock(HostAdapter, &ProcessorFlags);
+ return Result;
+}
+
+
+/*
+ BusLogic_ResetHostAdapter resets Host Adapter if possible, marking all
+ currently executing SCSI Commands as having been Reset.
+*/
+
+static int BusLogic_ResetHostAdapter(BusLogic_HostAdapter_T *HostAdapter,
+ SCSI_Command_T *Command,
+ unsigned int ResetFlags)
+{
+ ProcessorFlags_T ProcessorFlags;
+ BusLogic_CCB_T *CCB;
+ int TargetID, Result;
+ boolean HardReset;
+ if (HostAdapter->HostAdapterExternalReset)
+ {
+ BusLogic_IncrementErrorCounter(&HostAdapter->ExternalHostAdapterResets);
+ HardReset = false;
+ }
+ else if (HostAdapter->HostAdapterInternalError)
+ {
+ BusLogic_IncrementErrorCounter(&HostAdapter->HostAdapterInternalErrors);
+ HardReset = true;
+ }
+ else
+ {
+ BusLogic_IncrementErrorCounter(
+ &HostAdapter->TargetStatistics[Command->target]
+ .HostAdapterResetsRequested);
+ HardReset = true;
+ }
+ /*
+ Acquire exclusive access to Host Adapter.
+ */
+ BusLogic_AcquireHostAdapterLock(HostAdapter, &ProcessorFlags);
+ /*
+ If this is an Asynchronous Reset and this Command has already completed,
+ then no Reset is necessary.
+ */
+ if (ResetFlags & SCSI_RESET_ASYNCHRONOUS)
+ {
+ TargetID = Command->target;
+ if (Command->serial_number != Command->serial_number_at_timeout)
+ {
+ BusLogic_Warning("Unable to Reset Command to Target %d - "
+ "Already Completed or Reset\n",
+ HostAdapter, TargetID);
+ Result = SCSI_RESET_NOT_RUNNING;
+ goto Done;
+ }
+ for (CCB = HostAdapter->All_CCBs; CCB != NULL; CCB = CCB->NextAll)
+ if (CCB->Command == Command) break;
+ if (CCB == NULL)
+ {
+ BusLogic_Warning("Unable to Reset Command to Target %d - "
+ "No CCB Found\n", HostAdapter, TargetID);
+ Result = SCSI_RESET_NOT_RUNNING;
+ goto Done;
+ }
+ else if (CCB->Status == BusLogic_CCB_Completed)
+ {
+ BusLogic_Warning("Unable to Reset Command to Target %d - "
+ "CCB Completed\n", HostAdapter, TargetID);
+ Result = SCSI_RESET_NOT_RUNNING;
+ goto Done;
+ }
+ else if (CCB->Status == BusLogic_CCB_Reset &&
+ HostAdapter->BusDeviceResetPendingCCB[TargetID] == NULL)
+ {
+ BusLogic_Warning("Unable to Reset Command to Target %d - "
+ "Reset Pending\n", HostAdapter, TargetID);
+ Result = SCSI_RESET_PENDING;
+ goto Done;
+ }
+ }
+ if (Command == NULL)
+ {
+ if (HostAdapter->HostAdapterInternalError)
+ BusLogic_Warning("Resetting %s due to Host Adapter Internal Error\n",
+ HostAdapter, HostAdapter->FullModelName);
+ else BusLogic_Warning("Resetting %s due to External SCSI Bus Reset\n",
+ HostAdapter, HostAdapter->FullModelName);
+ }
+ else
+ {
+ BusLogic_Warning("Resetting %s due to Target %d\n", HostAdapter,
+ HostAdapter->FullModelName, Command->target);
+ BusLogic_IncrementErrorCounter(
+ &HostAdapter->TargetStatistics[Command->target]
+ .HostAdapterResetsAttempted);
+ }
+ /*
+ Attempt to Reset and Reinitialize the Host Adapter.
+ */
+ if (!(BusLogic_HardwareResetHostAdapter(HostAdapter, HardReset) &&
+ BusLogic_InitializeHostAdapter(HostAdapter)))
+ {
+ BusLogic_Error("Resetting %s Failed\n", HostAdapter,
+ HostAdapter->FullModelName);
+ Result = SCSI_RESET_ERROR;
+ goto Done;
+ }
+ if (Command != NULL)
+ BusLogic_IncrementErrorCounter(
+ &HostAdapter->TargetStatistics[Command->target]
+ .HostAdapterResetsCompleted);
+ /*
+ Mark all currently executing CCBs as having been Reset.
+ */
+ for (CCB = HostAdapter->All_CCBs; CCB != NULL; CCB = CCB->NextAll)
+ if (CCB->Status == BusLogic_CCB_Active)
+ CCB->Status = BusLogic_CCB_Reset;
+ /*
+ Wait a few seconds between the Host Adapter Hard Reset which initiates
+ a SCSI Bus Reset and issuing any SCSI Commands. Some SCSI devices get
+ confused if they receive SCSI Commands too soon after a SCSI Bus Reset.
+ Note that a timer interrupt may occur here, but all active CCBs have
+ already been marked Reset and so a reentrant call will return Pending.
+ */
+ if (HardReset)
+ BusLogic_Delay(HostAdapter->BusSettleTime);
+ /*
+ If this is a Synchronous Reset, perform completion processing for
+ the Command being Reset.
+ */
+ if (ResetFlags & SCSI_RESET_SYNCHRONOUS)
+ {
+ Command->result = DID_RESET << 16;
+ Command->scsi_done(Command);
+ }
+ /*
+ Perform completion processing for all CCBs marked as Reset.
+ */
+ for (CCB = HostAdapter->All_CCBs; CCB != NULL; CCB = CCB->NextAll)
+ if (CCB->Status == BusLogic_CCB_Reset)
+ {
+ Command = CCB->Command;
+ BusLogic_DeallocateCCB(CCB);
+ while (Command != NULL)
+ {
+ SCSI_Command_T *NextCommand = Command->reset_chain;
+ Command->reset_chain = NULL;
+ Command->result = DID_RESET << 16;
+ Command->scsi_done(Command);
+ Command = NextCommand;
+ }
+ }
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ {
+ HostAdapter->LastResetAttempted[TargetID] = jiffies;
+ HostAdapter->LastResetCompleted[TargetID] = jiffies;
+ }
+ Result = SCSI_RESET_SUCCESS | SCSI_RESET_HOST_RESET;
+ /*
+ Release exclusive access to Host Adapter.
+ */
+Done:
+ BusLogic_ReleaseHostAdapterLock(HostAdapter, &ProcessorFlags);
+ return Result;
+}
+
+
+/*
+ BusLogic_SendBusDeviceReset sends a Bus Device Reset to the Target
+ Device associated with Command.
+*/
+
+static int BusLogic_SendBusDeviceReset(BusLogic_HostAdapter_T *HostAdapter,
+ SCSI_Command_T *Command,
+ unsigned int ResetFlags)
+{
+ int TargetID = Command->target;
+ BusLogic_CCB_T *CCB, *XCCB;
+ ProcessorFlags_T ProcessorFlags;
+ int Result = -1;
+ BusLogic_IncrementErrorCounter(
+ &HostAdapter->TargetStatistics[TargetID].BusDeviceResetsRequested);
+ /*
+ Acquire exclusive access to Host Adapter.
+ */
+ BusLogic_AcquireHostAdapterLock(HostAdapter, &ProcessorFlags);
+ /*
+ If this is an Asynchronous Reset and this Command has already completed,
+ then no Reset is necessary.
+ */
+ if (ResetFlags & SCSI_RESET_ASYNCHRONOUS)
+ {
+ if (Command->serial_number != Command->serial_number_at_timeout)
+ {
+ BusLogic_Warning("Unable to Reset Command to Target %d - "
+ "Already Completed\n", HostAdapter, TargetID);
+ Result = SCSI_RESET_NOT_RUNNING;
+ goto Done;
+ }
+ for (CCB = HostAdapter->All_CCBs; CCB != NULL; CCB = CCB->NextAll)
+ if (CCB->Command == Command) break;
+ if (CCB == NULL)
+ {
+ BusLogic_Warning("Unable to Reset Command to Target %d - "
+ "No CCB Found\n", HostAdapter, TargetID);
+ Result = SCSI_RESET_NOT_RUNNING;
+ goto Done;
+ }
+ else if (CCB->Status == BusLogic_CCB_Completed)
+ {
+ BusLogic_Warning("Unable to Reset Command to Target %d - "
+ "CCB Completed\n", HostAdapter, TargetID);
+ Result = SCSI_RESET_NOT_RUNNING;
+ goto Done;
+ }
+ else if (CCB->Status == BusLogic_CCB_Reset)
+ {
+ BusLogic_Warning("Unable to Reset Command to Target %d - "
+ "Reset Pending\n", HostAdapter, TargetID);
+ Result = SCSI_RESET_PENDING;
+ goto Done;
+ }
+ else if (HostAdapter->BusDeviceResetPendingCCB[TargetID] != NULL)
+ {
+ BusLogic_Warning("Bus Device Reset already pending to Target %d\n",
+ HostAdapter, TargetID);
+ goto Done;
+ }
+ }
+ /*
+ If this is a Synchronous Reset and a Bus Device Reset is already pending
+ for this Target Device, do not send a second one. Add this Command to
+ the list of Commands for which completion processing must be performed
+ when the Bus Device Reset CCB completes.
+ */
+ if (ResetFlags & SCSI_RESET_SYNCHRONOUS)
+ if ((CCB = HostAdapter->BusDeviceResetPendingCCB[TargetID]) != NULL)
+ {
+ Command->reset_chain = CCB->Command;
+ CCB->Command = Command;
+ BusLogic_Warning("Unable to Reset Command to Target %d - "
+ "Reset Pending\n", HostAdapter, TargetID);
+ Result = SCSI_RESET_PENDING;
+ goto Done;
+ }
+ if (BusLogic_MultiMasterHostAdapterP(HostAdapter))
+ {
+ /*
+ MultiMaster Firmware versions prior to 5.xx treat a Bus Device Reset as
+ a non-tagged command. Since non-tagged commands are not sent by the
+ Host Adapter until the queue of outstanding tagged commands has
+ completed, it is effectively impossible to send a Bus Device Reset
+ while there are tagged commands outstanding. Therefore, in that case a
+ full Host Adapter Hard Reset and SCSI Bus Reset must be done.
+ */
+ if (HostAdapter->TargetFlags[TargetID].TaggedQueuingActive &&
+ HostAdapter->ActiveCommands[TargetID] > 0 &&
+ HostAdapter->FirmwareVersion[0] < '5')
+ goto Done;
+ }
+ /*
+ Allocate a CCB from the Host Adapter's free list. In the unlikely event
+ that there are none available and memory allocation fails, attempt a full
+ Host Adapter Hard Reset and SCSI Bus Reset.
+ */
+ CCB = BusLogic_AllocateCCB(HostAdapter);
+ if (CCB == NULL) goto Done;
+ BusLogic_Warning("Sending Bus Device Reset CCB #%ld to Target %d\n",
+ HostAdapter, CCB->SerialNumber, TargetID);
+ CCB->Opcode = BusLogic_BusDeviceReset;
+ CCB->TargetID = TargetID;
+ /*
+ For Synchronous Resets, arrange for the interrupt handler to perform
+ completion processing for the Command being Reset.
+ */
+ if (ResetFlags & SCSI_RESET_SYNCHRONOUS)
+ {
+ Command->reset_chain = NULL;
+ CCB->Command = Command;
+ }
+ if (BusLogic_MultiMasterHostAdapterP(HostAdapter))
+ {
+ /*
+ Attempt to write an Outgoing Mailbox with the Bus Device Reset CCB.
+ If sending a Bus Device Reset is impossible, attempt a full Host
+ Adapter Hard Reset and SCSI Bus Reset.
+ */
+ if (!(BusLogic_WriteOutgoingMailbox(
+ HostAdapter, BusLogic_MailboxStartCommand, CCB)))
+ {
+ BusLogic_Warning("Unable to write Outgoing Mailbox for "
+ "Bus Device Reset\n", HostAdapter);
+ BusLogic_DeallocateCCB(CCB);
+ goto Done;
+ }
+ }
+ else
+ {
+ /*
+ Call the FlashPoint SCCB Manager to start execution of the CCB.
+ */
+ CCB->Status = BusLogic_CCB_Active;
+ HostAdapter->ActiveCommands[TargetID]++;
+ FlashPoint_StartCCB(HostAdapter->CardHandle, CCB);
+ }
+ /*
+ If there is a currently executing CCB in the Host Adapter for this Command
+ (i.e. this is an Asynchronous Reset), then an Incoming Mailbox entry may be
+ made with a completion code of BusLogic_HostAdapterAssertedBusDeviceReset.
+ If there is no active CCB for this Command (i.e. this is a Synchronous
+ Reset), then the Bus Device Reset CCB's Command field will have been set
+ to the Command so that the interrupt for the completion of the Bus Device
+ Reset can call the Completion Routine for the Command. On successful
+ execution of a Bus Device Reset, older firmware versions did return the
+ pending CCBs with the appropriate completion code, but more recent firmware
+ versions only return the Bus Device Reset CCB itself. This driver handles
+ both cases by marking all the currently executing CCBs to this Target
+ Device as Reset. When the Bus Device Reset CCB is processed by the
+ interrupt handler, any remaining CCBs marked as Reset will have completion
+ processing performed.
+ */
+ BusLogic_IncrementErrorCounter(
+ &HostAdapter->TargetStatistics[TargetID].BusDeviceResetsAttempted);
+ HostAdapter->BusDeviceResetPendingCCB[TargetID] = CCB;
+ HostAdapter->LastResetAttempted[TargetID] = jiffies;
+ for (XCCB = HostAdapter->All_CCBs; XCCB != NULL; XCCB = XCCB->NextAll)
+ if (XCCB->Status == BusLogic_CCB_Active && XCCB->TargetID == TargetID)
+ XCCB->Status = BusLogic_CCB_Reset;
+ /*
+ FlashPoint Host Adapters may have already completed the Bus Device
+ Reset and BusLogic_QueueCompletedCCB been called, or it may still be
+ pending.
+ */
+ Result = SCSI_RESET_PENDING;
+ if (BusLogic_FlashPointHostAdapterP(HostAdapter))
+ if (CCB->Status == BusLogic_CCB_Completed)
+ {
+ BusLogic_ProcessCompletedCCBs(HostAdapter);
+ Result = SCSI_RESET_SUCCESS;
+ }
+ /*
+ If a Bus Device Reset was not possible for some reason, force a full
+ Host Adapter Hard Reset and SCSI Bus Reset.
+ */
+Done:
+ if (Result < 0)
+ Result = BusLogic_ResetHostAdapter(HostAdapter, Command, ResetFlags);
+ /*
+ Release exclusive access to Host Adapter.
+ */
+ BusLogic_ReleaseHostAdapterLock(HostAdapter, &ProcessorFlags);
+ return Result;
+}
+
+
+/*
+ BusLogic_ResetCommand takes appropriate action to reset Command.
+*/
+
+int BusLogic_ResetCommand(SCSI_Command_T *Command, unsigned int ResetFlags)
+{
+ BusLogic_HostAdapter_T *HostAdapter =
+ (BusLogic_HostAdapter_T *) Command->host->hostdata;
+ int TargetID = Command->target;
+ BusLogic_ErrorRecoveryStrategy_T
+ ErrorRecoveryStrategy = HostAdapter->ErrorRecoveryStrategy[TargetID];
+ /*
+ Disable Tagged Queuing if it is active for this Target Device and if
+ it has been less than 10 minutes since the last reset occurred, or since
+ the system was initialized if no prior resets have occurred.
+ */
+ if (HostAdapter->TargetFlags[TargetID].TaggedQueuingActive &&
+ jiffies - HostAdapter->LastResetCompleted[TargetID] < 10*60*HZ)
+ {
+ HostAdapter->TaggedQueuingPermitted &= ~(1 << TargetID);
+ HostAdapter->TargetFlags[TargetID].TaggedQueuingActive = false;
+ BusLogic_Warning("Tagged Queuing now disabled for Target %d\n",
+ HostAdapter, TargetID);
+ }
+ switch (ErrorRecoveryStrategy)
+ {
+ case BusLogic_ErrorRecovery_Default:
+ if (ResetFlags & SCSI_RESET_SUGGEST_HOST_RESET)
+ return BusLogic_ResetHostAdapter(HostAdapter, Command, ResetFlags);
+ else if (ResetFlags & SCSI_RESET_SUGGEST_BUS_RESET)
+ return BusLogic_ResetHostAdapter(HostAdapter, Command, ResetFlags);
+ /* Fall through to Bus Device Reset case. */
+ case BusLogic_ErrorRecovery_BusDeviceReset:
+ /*
+ The Bus Device Reset Error Recovery Strategy only graduates to a Hard
+ Reset when no commands have completed successfully since the last Bus
+ Device Reset and it has been at least 100 milliseconds. This prevents
+ a sequence of commands that all timeout together from immediately
+ forcing a Hard Reset before the Bus Device Reset has had a chance to
+ clear the error condition.
+ */
+ if (HostAdapter->TargetFlags[TargetID].CommandSuccessfulFlag ||
+ jiffies - HostAdapter->LastResetAttempted[TargetID] < HZ/10)
+ {
+ HostAdapter->TargetFlags[TargetID].CommandSuccessfulFlag = false;
+ return BusLogic_SendBusDeviceReset(HostAdapter, Command, ResetFlags);
+ }
+ /* Fall through to Hard Reset case. */
+ case BusLogic_ErrorRecovery_HardReset:
+ return BusLogic_ResetHostAdapter(HostAdapter, Command, ResetFlags);
+ case BusLogic_ErrorRecovery_None:
+ BusLogic_Warning("Error Recovery for Target %d Suppressed\n",
+ HostAdapter, TargetID);
+ break;
+ }
+ return SCSI_RESET_PUNT;
+}
+
+
+/*
+ BusLogic_BIOSDiskParameters returns the Heads/Sectors/Cylinders BIOS Disk
+ Parameters for Disk. The default disk geometry is 64 heads, 32 sectors, and
+ the appropriate number of cylinders so as not to exceed drive capacity. In
+ order for disks equal to or larger than 1 GB to be addressable by the BIOS
+ without exceeding the BIOS limitation of 1024 cylinders, Extended Translation
+ may be enabled in AutoSCSI on FlashPoint Host Adapters and on "W" and "C"
+ series MultiMaster Host Adapters, or by a dip switch setting on "S" and "A"
+ series MultiMaster Host Adapters. With Extended Translation enabled, drives
+ between 1 GB inclusive and 2 GB exclusive are given a disk geometry of 128
+ heads and 32 sectors, and drives above 2 GB inclusive are given a disk
+ geometry of 255 heads and 63 sectors. However, if the BIOS detects that the
+ Extended Translation setting does not match the geometry in the partition
+ table, then the translation inferred from the partition table will be used by
+ the BIOS, and a warning may be displayed.
+*/
+
+int BusLogic_BIOSDiskParameters(SCSI_Disk_T *Disk, KernelDevice_T Device,
+ int *Parameters)
+{
+ BusLogic_HostAdapter_T *HostAdapter =
+ (BusLogic_HostAdapter_T *) Disk->device->host->hostdata;
+ BIOS_DiskParameters_T *DiskParameters = (BIOS_DiskParameters_T *) Parameters;
+ struct buffer_head *BufferHead;
+ if (HostAdapter->ExtendedTranslationEnabled &&
+ Disk->capacity >= 2*1024*1024 /* 1 GB in 512 byte sectors */)
+ {
+ if (Disk->capacity >= 4*1024*1024 /* 2 GB in 512 byte sectors */)
+ {
+ DiskParameters->Heads = 255;
+ DiskParameters->Sectors = 63;
+ }
+ else
+ {
+ DiskParameters->Heads = 128;
+ DiskParameters->Sectors = 32;
+ }
+ }
+ else
+ {
+ DiskParameters->Heads = 64;
+ DiskParameters->Sectors = 32;
+ }
+ DiskParameters->Cylinders =
+ Disk->capacity / (DiskParameters->Heads * DiskParameters->Sectors);
+ /*
+ Attempt to read the first 1024 bytes from the disk device.
+ */
+ BufferHead = bread(MKDEV(MAJOR(Device), MINOR(Device) & ~0x0F), 0, 1024);
+ if (BufferHead == NULL) return 0;
+ /*
+ If the boot sector partition table flag is valid, search for a partition
+ table entry whose end_head matches one of the standard BusLogic geometry
+ translations (64/32, 128/32, or 255/63).
+ */
+ if (*(unsigned short *) (BufferHead->b_data + 0x1FE) == 0xAA55)
+ {
+ PartitionTable_T *FirstPartitionEntry =
+ (PartitionTable_T *) (BufferHead->b_data + 0x1BE);
+ PartitionTable_T *PartitionEntry = FirstPartitionEntry;
+ int SavedCylinders = DiskParameters->Cylinders, PartitionNumber;
+ unsigned char PartitionEntryEndHead, PartitionEntryEndSector;
+ for (PartitionNumber = 0; PartitionNumber < 4; PartitionNumber++)
+ {
+ PartitionEntryEndHead = PartitionEntry->end_head;
+ PartitionEntryEndSector = PartitionEntry->end_sector & 0x3F;
+ if (PartitionEntryEndHead == 64-1)
+ {
+ DiskParameters->Heads = 64;
+ DiskParameters->Sectors = 32;
+ break;
+ }
+ else if (PartitionEntryEndHead == 128-1)
+ {
+ DiskParameters->Heads = 128;
+ DiskParameters->Sectors = 32;
+ break;
+ }
+ else if (PartitionEntryEndHead == 255-1)
+ {
+ DiskParameters->Heads = 255;
+ DiskParameters->Sectors = 63;
+ break;
+ }
+ PartitionEntry++;
+ }
+ if (PartitionNumber == 4)
+ {
+ PartitionEntryEndHead = FirstPartitionEntry->end_head;
+ PartitionEntryEndSector = FirstPartitionEntry->end_sector & 0x3F;
+ }
+ DiskParameters->Cylinders =
+ Disk->capacity / (DiskParameters->Heads * DiskParameters->Sectors);
+ if (PartitionNumber < 4 &&
+ PartitionEntryEndSector == DiskParameters->Sectors)
+ {
+ if (DiskParameters->Cylinders != SavedCylinders)
+ BusLogic_Warning("Adopting Geometry %d/%d from Partition Table\n",
+ HostAdapter,
+ DiskParameters->Heads, DiskParameters->Sectors);
+ }
+ else if (PartitionEntryEndHead > 0 || PartitionEntryEndSector > 0)
+ {
+ BusLogic_Warning("Warning: Partition Table appears to "
+ "have Geometry %d/%d which is\n", HostAdapter,
+ PartitionEntryEndHead + 1,
+ PartitionEntryEndSector);
+ BusLogic_Warning("not compatible with current BusLogic "
+ "Host Adapter Geometry %d/%d\n", HostAdapter,
+ DiskParameters->Heads, DiskParameters->Sectors);
+ }
+ }
+ brelse(BufferHead);
+ return 0;
+}
+
+
+/*
+ BugLogic_ProcDirectoryInfo implements /proc/scsi/BusLogic/<N>.
+*/
+
+int BusLogic_ProcDirectoryInfo(char *ProcBuffer, char **StartPointer,
+ off_t Offset, int BytesAvailable,
+ int HostNumber, int WriteFlag)
+{
+ BusLogic_HostAdapter_T *HostAdapter;
+ BusLogic_TargetStatistics_T *TargetStatistics;
+ int TargetID, Length;
+ char *Buffer;
+ for (HostAdapter = BusLogic_FirstRegisteredHostAdapter;
+ HostAdapter != NULL;
+ HostAdapter = HostAdapter->Next)
+ if (HostAdapter->HostNumber == HostNumber) break;
+ if (HostAdapter == NULL)
+ {
+ BusLogic_Error("Cannot find Host Adapter for SCSI Host %d\n",
+ NULL, HostNumber);
+ return 0;
+ }
+ TargetStatistics = HostAdapter->TargetStatistics;
+ if (WriteFlag)
+ {
+ HostAdapter->ExternalHostAdapterResets = 0;
+ HostAdapter->HostAdapterInternalErrors = 0;
+ memset(TargetStatistics, 0,
+ BusLogic_MaxTargetDevices * sizeof(BusLogic_TargetStatistics_T));
+ return 0;
+ }
+ Buffer = HostAdapter->MessageBuffer;
+ Length = HostAdapter->MessageBufferLength;
+ Length += sprintf(&Buffer[Length], "\n\
+Current Driver Queue Depth: %d\n\
+Currently Allocated CCBs: %d\n",
+ HostAdapter->DriverQueueDepth,
+ HostAdapter->AllocatedCCBs);
+ Length += sprintf(&Buffer[Length], "\n\n\
+ DATA TRANSFER STATISTICS\n\
+\n\
+Target Tagged Queuing Queue Depth Active Attempted Completed\n\
+====== ============== =========== ====== ========= =========\n");
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ {
+ BusLogic_TargetFlags_T *TargetFlags = &HostAdapter->TargetFlags[TargetID];
+ if (!TargetFlags->TargetExists) continue;
+ Length +=
+ sprintf(&Buffer[Length], " %2d %s", TargetID,
+ (TargetFlags->TaggedQueuingSupported
+ ? (TargetFlags->TaggedQueuingActive
+ ? " Active"
+ : (HostAdapter->TaggedQueuingPermitted & (1 << TargetID)
+ ? " Permitted" : " Disabled"))
+ : "Not Supported"));
+ Length += sprintf(&Buffer[Length],
+ " %3d %3u %9u %9u\n",
+ HostAdapter->QueueDepth[TargetID],
+ HostAdapter->ActiveCommands[TargetID],
+ TargetStatistics[TargetID].CommandsAttempted,
+ TargetStatistics[TargetID].CommandsCompleted);
+ }
+ Length += sprintf(&Buffer[Length], "\n\
+Target Read Commands Write Commands Total Bytes Read Total Bytes Written\n\
+====== ============= ============== =================== ===================\n");
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ {
+ BusLogic_TargetFlags_T *TargetFlags = &HostAdapter->TargetFlags[TargetID];
+ if (!TargetFlags->TargetExists) continue;
+ Length +=
+ sprintf(&Buffer[Length], " %2d %9u %9u", TargetID,
+ TargetStatistics[TargetID].ReadCommands,
+ TargetStatistics[TargetID].WriteCommands);
+ if (TargetStatistics[TargetID].TotalBytesRead.Billions > 0)
+ Length +=
+ sprintf(&Buffer[Length], " %9u%09u",
+ TargetStatistics[TargetID].TotalBytesRead.Billions,
+ TargetStatistics[TargetID].TotalBytesRead.Units);
+ else
+ Length +=
+ sprintf(&Buffer[Length], " %9u",
+ TargetStatistics[TargetID].TotalBytesRead.Units);
+ if (TargetStatistics[TargetID].TotalBytesWritten.Billions > 0)
+ Length +=
+ sprintf(&Buffer[Length], " %9u%09u\n",
+ TargetStatistics[TargetID].TotalBytesWritten.Billions,
+ TargetStatistics[TargetID].TotalBytesWritten.Units);
+ else
+ Length +=
+ sprintf(&Buffer[Length], " %9u\n",
+ TargetStatistics[TargetID].TotalBytesWritten.Units);
+ }
+ Length += sprintf(&Buffer[Length], "\n\
+Target Command 0-1KB 1-2KB 2-4KB 4-8KB 8-16KB\n\
+====== ======= ========= ========= ========= ========= =========\n");
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ {
+ BusLogic_TargetFlags_T *TargetFlags = &HostAdapter->TargetFlags[TargetID];
+ if (!TargetFlags->TargetExists) continue;
+ Length +=
+ sprintf(&Buffer[Length],
+ " %2d Read %9u %9u %9u %9u %9u\n", TargetID,
+ TargetStatistics[TargetID].ReadCommandSizeBuckets[0],
+ TargetStatistics[TargetID].ReadCommandSizeBuckets[1],
+ TargetStatistics[TargetID].ReadCommandSizeBuckets[2],
+ TargetStatistics[TargetID].ReadCommandSizeBuckets[3],
+ TargetStatistics[TargetID].ReadCommandSizeBuckets[4]);
+ Length +=
+ sprintf(&Buffer[Length],
+ " %2d Write %9u %9u %9u %9u %9u\n", TargetID,
+ TargetStatistics[TargetID].WriteCommandSizeBuckets[0],
+ TargetStatistics[TargetID].WriteCommandSizeBuckets[1],
+ TargetStatistics[TargetID].WriteCommandSizeBuckets[2],
+ TargetStatistics[TargetID].WriteCommandSizeBuckets[3],
+ TargetStatistics[TargetID].WriteCommandSizeBuckets[4]);
+ }
+ Length += sprintf(&Buffer[Length], "\n\
+Target Command 16-32KB 32-64KB 64-128KB 128-256KB 256KB+\n\
+====== ======= ========= ========= ========= ========= =========\n");
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ {
+ BusLogic_TargetFlags_T *TargetFlags = &HostAdapter->TargetFlags[TargetID];
+ if (!TargetFlags->TargetExists) continue;
+ Length +=
+ sprintf(&Buffer[Length],
+ " %2d Read %9u %9u %9u %9u %9u\n", TargetID,
+ TargetStatistics[TargetID].ReadCommandSizeBuckets[5],
+ TargetStatistics[TargetID].ReadCommandSizeBuckets[6],
+ TargetStatistics[TargetID].ReadCommandSizeBuckets[7],
+ TargetStatistics[TargetID].ReadCommandSizeBuckets[8],
+ TargetStatistics[TargetID].ReadCommandSizeBuckets[9]);
+ Length +=
+ sprintf(&Buffer[Length],
+ " %2d Write %9u %9u %9u %9u %9u\n", TargetID,
+ TargetStatistics[TargetID].WriteCommandSizeBuckets[5],
+ TargetStatistics[TargetID].WriteCommandSizeBuckets[6],
+ TargetStatistics[TargetID].WriteCommandSizeBuckets[7],
+ TargetStatistics[TargetID].WriteCommandSizeBuckets[8],
+ TargetStatistics[TargetID].WriteCommandSizeBuckets[9]);
+ }
+ Length += sprintf(&Buffer[Length], "\n\n\
+ ERROR RECOVERY STATISTICS\n\
+\n\
+ Command Aborts Bus Device Resets Host Adapter Resets\n\
+Target Requested Completed Requested Completed Requested Completed\n\
+ ID \\\\\\\\ Attempted //// \\\\\\\\ Attempted //// \\\\\\\\ Attempted ////\n\
+====== ===== ===== ===== ===== ===== ===== ===== ===== =====\n");
+ for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++)
+ {
+ BusLogic_TargetFlags_T *TargetFlags = &HostAdapter->TargetFlags[TargetID];
+ if (!TargetFlags->TargetExists) continue;
+ Length +=
+ sprintf(&Buffer[Length], "\
+ %2d %5d %5d %5d %5d %5d %5d %5d %5d %5d\n", TargetID,
+ TargetStatistics[TargetID].CommandAbortsRequested,
+ TargetStatistics[TargetID].CommandAbortsAttempted,
+ TargetStatistics[TargetID].CommandAbortsCompleted,
+ TargetStatistics[TargetID].BusDeviceResetsRequested,
+ TargetStatistics[TargetID].BusDeviceResetsAttempted,
+ TargetStatistics[TargetID].BusDeviceResetsCompleted,
+ TargetStatistics[TargetID].HostAdapterResetsRequested,
+ TargetStatistics[TargetID].HostAdapterResetsAttempted,
+ TargetStatistics[TargetID].HostAdapterResetsCompleted);
+ }
+ Length += sprintf(&Buffer[Length], "\nExternal Host Adapter Resets: %d\n",
+ HostAdapter->ExternalHostAdapterResets);
+ Length += sprintf(&Buffer[Length], "Host Adapter Internal Errors: %d\n",
+ HostAdapter->HostAdapterInternalErrors);
+ if (Length >= BusLogic_MessageBufferSize)
+ BusLogic_Error("Message Buffer length %d exceeds size %d\n",
+ HostAdapter, Length, BusLogic_MessageBufferSize);
+ if ((Length -= Offset) <= 0) return 0;
+ if (Length >= BytesAvailable) Length = BytesAvailable;
+ *StartPointer = &HostAdapter->MessageBuffer[Offset];
+ return Length;
+}
+
+
+/*
+ BusLogic_Message prints Driver Messages.
+*/
+
+static void BusLogic_Message(BusLogic_MessageLevel_T MessageLevel,
+ char *Format,
+ BusLogic_HostAdapter_T *HostAdapter,
+ ...)
+{
+ static char Buffer[BusLogic_LineBufferSize];
+ static boolean BeginningOfLine = true;
+ va_list Arguments;
+ int Length = 0;
+ va_start(Arguments, HostAdapter);
+ Length = vsprintf(Buffer, Format, Arguments);
+ va_end(Arguments);
+ if (MessageLevel == BusLogic_AnnounceLevel)
+ {
+ static int AnnouncementLines = 0;
+ strcpy(&HostAdapter->MessageBuffer[HostAdapter->MessageBufferLength],
+ Buffer);
+ HostAdapter->MessageBufferLength += Length;
+ if (++AnnouncementLines <= 2)
+ printk("%sscsi: %s", BusLogic_MessageLevelMap[MessageLevel], Buffer);
+ }
+ else if (MessageLevel == BusLogic_InfoLevel)
+ {
+ strcpy(&HostAdapter->MessageBuffer[HostAdapter->MessageBufferLength],
+ Buffer);
+ HostAdapter->MessageBufferLength += Length;
+ if (BeginningOfLine)
+ {
+ if (Buffer[0] != '\n' || Length > 1)
+ printk("%sscsi%d: %s", BusLogic_MessageLevelMap[MessageLevel],
+ HostAdapter->HostNumber, Buffer);
+ }
+ else printk("%s", Buffer);
+ }
+ else
+ {
+ if (BeginningOfLine)
+ {
+ if (HostAdapter != NULL && HostAdapter->HostAdapterInitialized)
+ printk("%sscsi%d: %s", BusLogic_MessageLevelMap[MessageLevel],
+ HostAdapter->HostNumber, Buffer);
+ else printk("%s%s", BusLogic_MessageLevelMap[MessageLevel], Buffer);
+ }
+ else printk("%s", Buffer);
+ }
+ BeginningOfLine = (Buffer[Length-1] == '\n');
+}
+
+
+/*
+ BusLogic_ParseKeyword parses an individual option keyword. It returns true
+ and updates the pointer if the keyword is recognized and false otherwise.
+*/
+
+static boolean BusLogic_ParseKeyword(char **StringPointer, char *Keyword)
+{
+ char *Pointer = *StringPointer;
+ while (*Keyword != '\0')
+ {
+ char StringChar = *Pointer++;
+ char KeywordChar = *Keyword++;
+ if (StringChar >= 'A' && StringChar <= 'Z')
+ StringChar += 'a' - 'Z';
+ if (KeywordChar >= 'A' && KeywordChar <= 'Z')
+ KeywordChar += 'a' - 'Z';
+ if (StringChar != KeywordChar) return false;
+ }
+ *StringPointer = Pointer;
+ return true;
+}
+
+
+/*
+ BusLogic_ParseDriverOptions handles processing of BusLogic Driver Options
+ specifications.
+
+ BusLogic Driver Options may be specified either via the Linux Kernel Command
+ Line or via the Loadable Kernel Module Installation Facility. Driver Options
+ for multiple host adapters may be specified either by separating the option
+ strings by a semicolon, or by specifying multiple "BusLogic=" strings on the
+ command line. Individual option specifications for a single host adapter are
+ separated by commas. The Probing and Debugging Options apply to all host
+ adapters whereas the remaining options apply individually only to the
+ selected host adapter.
+
+ The BusLogic Driver Probing Options comprise the following:
+
+ IO:<integer>
+
+ The "IO:" option specifies an ISA I/O Address to be probed for a non-PCI
+ MultiMaster Host Adapter. If neither "IO:" nor "NoProbeISA" options are
+ specified, then the standard list of BusLogic MultiMaster ISA I/O Addresses
+ will be probed (0x330, 0x334, 0x230, 0x234, 0x130, and 0x134). Multiple
+ "IO:" options may be specified to precisely determine the I/O Addresses to
+ be probed, but the probe order will always follow the standard list.
+
+ NoProbe
+
+ The "NoProbe" option disables all probing and therefore no BusLogic Host
+ Adapters will be detected.
+
+ NoProbeISA
+
+ The "NoProbeISA" option disables probing of the standard BusLogic ISA I/O
+ Addresses and therefore only PCI MultiMaster and FlashPoint Host Adapters
+ will be detected.
+
+ NoProbePCI
+
+ The "NoProbePCI" options disables the interrogation of PCI Configuration
+ Space and therefore only ISA Multimaster Host Adapters will be detected, as
+ well as PCI Multimaster Host Adapters that have their ISA Compatible I/O
+ Port set to "Primary" or "Alternate".
+
+ NoSortPCI
+
+ The "NoSortPCI" option forces PCI MultiMaster Host Adapters to be
+ enumerated in the order provided by the PCI BIOS, ignoring any setting of
+ the AutoSCSI "Use Bus And Device # For PCI Scanning Seq." option.
+
+ MultiMasterFirst
+
+ The "MultiMasterFirst" option forces MultiMaster Host Adapters to be probed
+ before FlashPoint Host Adapters. By default, if both FlashPoint and PCI
+ MultiMaster Host Adapters are present, this driver will probe for
+ FlashPoint Host Adapters first unless the BIOS primary disk is controlled
+ by the first PCI MultiMaster Host Adapter, in which case MultiMaster Host
+ Adapters will be probed first.
+
+ FlashPointFirst
+
+ The "FlashPointFirst" option forces FlashPoint Host Adapters to be probed
+ before MultiMaster Host Adapters.
+
+ The BusLogic Driver Tagged Queuing Options allow for explicitly specifying
+ the Queue Depth and whether Tagged Queuing is permitted for each Target
+ Device (assuming that the Target Device supports Tagged Queuing). The Queue
+ Depth is the number of SCSI Commands that are allowed to be concurrently
+ presented for execution (either to the Host Adapter or Target Device). Note
+ that explicitly enabling Tagged Queuing may lead to problems; the option to
+ enable or disable Tagged Queuing is provided primarily to allow disabling
+ Tagged Queuing on Target Devices that do not implement it correctly. The
+ following options are available:
+
+ QueueDepth:<integer>
+
+ The "QueueDepth:" or QD:" option specifies the Queue Depth to use for all
+ Target Devices that support Tagged Queuing, as well as the maximum Queue
+ Depth for devices that do not support Tagged Queuing. If no Queue Depth
+ option is provided, the Queue Depth will be determined automatically based
+ on the Host Adapter's Total Queue Depth and the number, type, speed, and
+ capabilities of the detected Target Devices. For Host Adapters that
+ require ISA Bounce Buffers, the Queue Depth is automatically set by default
+ to BusLogic_TaggedQueueDepthBB or BusLogic_UntaggedQueueDepthBB to avoid
+ excessive preallocation of DMA Bounce Buffer memory. Target Devices that
+ do not support Tagged Queuing always have their Queue Depth set to
+ BusLogic_UntaggedQueueDepth or BusLogic_UntaggedQueueDepthBB, unless a
+ lower Queue Depth option is provided. A Queue Depth of 1 automatically
+ disables Tagged Queuing.
+
+ QueueDepth:[<integer>,<integer>...]
+
+ The "QueueDepth:[...]" or "QD:[...]" option specifies the Queue Depth
+ individually for each Target Device. If an <integer> is omitted, the
+ associated Target Device will have its Queue Depth selected automatically.
+
+ TaggedQueuing:Default
+
+ The "TaggedQueuing:Default" or "TQ:Default" option permits Tagged Queuing
+ based on the firmware version of the BusLogic Host Adapter and based on
+ whether the Queue Depth allows queuing multiple commands.
+
+ TaggedQueuing:Enable
+
+ The "TaggedQueuing:Enable" or "TQ:Enable" option enables Tagged Queuing for
+ all Target Devices on this Host Adapter, overriding any limitation that
+ would otherwise be imposed based on the Host Adapter firmware version.
+
+ TaggedQueuing:Disable
+
+ The "TaggedQueuing:Disable" or "TQ:Disable" option disables Tagged Queuing
+ for all Target Devices on this Host Adapter.
+
+ TaggedQueuing:<Target-Spec>
+
+ The "TaggedQueuing:<Target-Spec>" or "TQ:<Target-Spec>" option controls
+ Tagged Queuing individually for each Target Device. <Target-Spec> is a
+ sequence of "Y", "N", and "X" characters. "Y" enables Tagged Queuing, "N"
+ disables Tagged Queuing, and "X" accepts the default based on the firmware
+ version. The first character refers to Target Device 0, the second to
+ Target Device 1, and so on; if the sequence of "Y", "N", and "X" characters
+ does not cover all the Target Devices, unspecified characters are assumed
+ to be "X".
+
+ The BusLogic Driver Error Recovery Option allows for explicitly specifying
+ the Error Recovery action to be performed when BusLogic_ResetCommand is
+ called due to a SCSI Command failing to complete successfully. The following
+ options are available:
+
+ ErrorRecovery:Default
+
+ The "ErrorRecovery:Default" or "ER:Default" option selects between the Hard
+ Reset and Bus Device Reset options based on the recommendation of the SCSI
+ Subsystem.
+
+ ErrorRecovery:HardReset
+
+ The "ErrorRecovery:HardReset" or "ER:HardReset" option will initiate a Host
+ Adapter Hard Reset which also causes a SCSI Bus Reset.
+
+ ErrorRecovery:BusDeviceReset
+
+ The "ErrorRecovery:BusDeviceReset" or "ER:BusDeviceReset" option will send
+ a Bus Device Reset message to the individual Target Device causing the
+ error. If Error Recovery is again initiated for this Target Device and no
+ SCSI Command to this Target Device has completed successfully since the Bus
+ Device Reset message was sent, then a Hard Reset will be attempted.
+
+ ErrorRecovery:None
+
+ The "ErrorRecovery:None" or "ER:None" option suppresses Error Recovery.
+ This option should only be selected if a SCSI Bus Reset or Bus Device Reset
+ will cause the Target Device or a critical operation to suffer a complete
+ and unrecoverable failure.
+
+ ErrorRecovery:<Target-Spec>
+
+ The "ErrorRecovery:<Target-Spec>" or "ER:<Target-Spec>" option controls
+ Error Recovery individually for each Target Device. <Target-Spec> is a
+ sequence of "D", "H", "B", and "N" characters. "D" selects Default, "H"
+ selects Hard Reset, "B" selects Bus Device Reset, and "N" selects None.
+ The first character refers to Target Device 0, the second to Target Device
+ 1, and so on; if the sequence of "D", "H", "B", and "N" characters does not
+ cover all the possible Target Devices, unspecified characters are assumed
+ to be "D".
+
+ The BusLogic Driver Miscellaneous Options comprise the following:
+
+ BusSettleTime:<seconds>
+
+ The "BusSettleTime:" or "BST:" option specifies the Bus Settle Time in
+ seconds. The Bus Settle Time is the amount of time to wait between a Host
+ Adapter Hard Reset which initiates a SCSI Bus Reset and issuing any SCSI
+ Commands. If unspecified, it defaults to BusLogic_DefaultBusSettleTime.
+
+ InhibitTargetInquiry
+
+ The "InhibitTargetInquiry" option inhibits the execution of an Inquire
+ Target Devices or Inquire Installed Devices command on MultiMaster Host
+ Adapters. This may be necessary with some older Target Devices that do not
+ respond correctly when Logical Units above 0 are addressed.
+
+ The BusLogic Driver Debugging Options comprise the following:
+
+ TraceProbe
+
+ The "TraceProbe" option enables tracing of Host Adapter Probing.
+
+ TraceHardwareReset
+
+ The "TraceHardwareReset" option enables tracing of Host Adapter Hardware
+ Reset.
+
+ TraceConfiguration
+
+ The "TraceConfiguration" option enables tracing of Host Adapter
+ Configuration.
+
+ TraceErrors
+
+ The "TraceErrors" option enables tracing of SCSI Commands that return an
+ error from the Target Device. The CDB and Sense Data will be printed for
+ each SCSI Command that fails.
+
+ Debug
+
+ The "Debug" option enables all debugging options.
+
+ The following examples demonstrate setting the Queue Depth for Target Devices
+ 1 and 2 on the first host adapter to 7 and 15, the Queue Depth for all Target
+ Devices on the second host adapter to 31, and the Bus Settle Time on the
+ second host adapter to 30 seconds.
+
+ Linux Kernel Command Line:
+
+ linux BusLogic=QueueDepth:[,7,15];QueueDepth:31,BusSettleTime:30
+
+ LILO Linux Boot Loader (in /etc/lilo.conf):
+
+ append = "BusLogic=QueueDepth:[,7,15];QueueDepth:31,BusSettleTime:30"
+
+ INSMOD Loadable Kernel Module Installation Facility:
+
+ insmod BusLogic.o \
+ 'BusLogic_Options="QueueDepth:[,7,15];QueueDepth:31,BusSettleTime:30"'
+
+ NOTE: Module Utilities 2.1.71 or later is required for correct parsing
+ of driver options containing commas.
+
+*/
+
+static void BusLogic_ParseDriverOptions(char *OptionsString)
+{
+ while (true)
+ {
+ BusLogic_DriverOptions_T *DriverOptions =
+ &BusLogic_DriverOptions[BusLogic_DriverOptionsCount++];
+ int TargetID;
+ memset(DriverOptions, 0, sizeof(BusLogic_DriverOptions_T));
+ for (TargetID = 0; TargetID < BusLogic_MaxTargetDevices; TargetID++)
+ DriverOptions->ErrorRecoveryStrategy[TargetID] =
+ BusLogic_ErrorRecovery_Default;
+ while (*OptionsString != '\0' && *OptionsString != ';')
+ {
+ /* Probing Options. */
+ if (BusLogic_ParseKeyword(&OptionsString, "IO:"))
+ {
+ BusLogic_IO_Address_T IO_Address =
+ simple_strtoul(OptionsString, &OptionsString, 0);
+ BusLogic_ProbeOptions.LimitedProbeISA = true;
+ switch (IO_Address)
+ {
+ case 0x330:
+ BusLogic_ProbeOptions.Probe330 = true;
+ break;
+ case 0x334:
+ BusLogic_ProbeOptions.Probe334 = true;
+ break;
+ case 0x230:
+ BusLogic_ProbeOptions.Probe230 = true;
+ break;
+ case 0x234:
+ BusLogic_ProbeOptions.Probe234 = true;
+ break;
+ case 0x130:
+ BusLogic_ProbeOptions.Probe130 = true;
+ break;
+ case 0x134:
+ BusLogic_ProbeOptions.Probe134 = true;
+ break;
+ default:
+ BusLogic_Error("BusLogic: Invalid Driver Options "
+ "(illegal I/O Address 0x%X)\n",
+ NULL, IO_Address);
+ return;
+ }
+ }
+ else if (BusLogic_ParseKeyword(&OptionsString, "NoProbeISA"))
+ BusLogic_ProbeOptions.NoProbeISA = true;
+ else if (BusLogic_ParseKeyword(&OptionsString, "NoProbePCI"))
+ BusLogic_ProbeOptions.NoProbePCI = true;
+ else if (BusLogic_ParseKeyword(&OptionsString, "NoProbe"))
+ BusLogic_ProbeOptions.NoProbe = true;
+ else if (BusLogic_ParseKeyword(&OptionsString, "NoSortPCI"))
+ BusLogic_ProbeOptions.NoSortPCI = true;
+ else if (BusLogic_ParseKeyword(&OptionsString, "MultiMasterFirst"))
+ BusLogic_ProbeOptions.MultiMasterFirst = true;
+ else if (BusLogic_ParseKeyword(&OptionsString, "FlashPointFirst"))
+ BusLogic_ProbeOptions.FlashPointFirst = true;
+ /* Tagged Queuing Options. */
+ else if (BusLogic_ParseKeyword(&OptionsString, "QueueDepth:[") ||
+ BusLogic_ParseKeyword(&OptionsString, "QD:["))
+ {
+ for (TargetID = 0;
+ TargetID < BusLogic_MaxTargetDevices;
+ TargetID++)
+ {
+ unsigned short QueueDepth =
+ simple_strtoul(OptionsString, &OptionsString, 0);
+ if (QueueDepth > BusLogic_MaxTaggedQueueDepth)
+ {
+ BusLogic_Error("BusLogic: Invalid Driver Options "
+ "(illegal Queue Depth %d)\n",
+ NULL, QueueDepth);
+ return;
+ }
+ DriverOptions->QueueDepth[TargetID] = QueueDepth;
+ if (*OptionsString == ',')
+ OptionsString++;
+ else if (*OptionsString == ']')
+ break;
+ else
+ {
+ BusLogic_Error("BusLogic: Invalid Driver Options "
+ "(',' or ']' expected at '%s')\n",
+ NULL, OptionsString);
+ return;
+ }
+ }
+ if (*OptionsString != ']')
+ {
+ BusLogic_Error("BusLogic: Invalid Driver Options "
+ "(']' expected at '%s')\n",
+ NULL, OptionsString);
+ return;
+ }
+ else OptionsString++;
+ }
+ else if (BusLogic_ParseKeyword(&OptionsString, "QueueDepth:") ||
+ BusLogic_ParseKeyword(&OptionsString, "QD:"))
+ {
+ unsigned short QueueDepth =
+ simple_strtoul(OptionsString, &OptionsString, 0);
+ if (QueueDepth == 0 || QueueDepth > BusLogic_MaxTaggedQueueDepth)
+ {
+ BusLogic_Error("BusLogic: Invalid Driver Options "
+ "(illegal Queue Depth %d)\n",
+ NULL, QueueDepth);
+ return;
+ }
+ DriverOptions->CommonQueueDepth = QueueDepth;
+ for (TargetID = 0;
+ TargetID < BusLogic_MaxTargetDevices;
+ TargetID++)
+ DriverOptions->QueueDepth[TargetID] = QueueDepth;
+ }
+ else if (BusLogic_ParseKeyword(&OptionsString, "TaggedQueuing:") ||
+ BusLogic_ParseKeyword(&OptionsString, "TQ:"))
+ {
+ if (BusLogic_ParseKeyword(&OptionsString, "Default"))
+ {
+ DriverOptions->TaggedQueuingPermitted = 0x0000;
+ DriverOptions->TaggedQueuingPermittedMask = 0x0000;
+ }
+ else if (BusLogic_ParseKeyword(&OptionsString, "Enable"))
+ {
+ DriverOptions->TaggedQueuingPermitted = 0xFFFF;
+ DriverOptions->TaggedQueuingPermittedMask = 0xFFFF;
+ }
+ else if (BusLogic_ParseKeyword(&OptionsString, "Disable"))
+ {
+ DriverOptions->TaggedQueuingPermitted = 0x0000;
+ DriverOptions->TaggedQueuingPermittedMask = 0xFFFF;
+ }
+ else
+ {
+ unsigned short TargetBit;
+ for (TargetID = 0, TargetBit = 1;
+ TargetID < BusLogic_MaxTargetDevices;
+ TargetID++, TargetBit <<= 1)
+ switch (*OptionsString++)
+ {
+ case 'Y':
+ DriverOptions->TaggedQueuingPermitted |= TargetBit;
+ DriverOptions->TaggedQueuingPermittedMask |= TargetBit;
+ break;
+ case 'N':
+ DriverOptions->TaggedQueuingPermitted &= ~TargetBit;
+ DriverOptions->TaggedQueuingPermittedMask |= TargetBit;
+ break;
+ case 'X':
+ break;
+ default:
+ OptionsString--;
+ TargetID = BusLogic_MaxTargetDevices;
+ break;
+ }
+ }
+ }
+ /* Error Recovery Option. */
+ else if (BusLogic_ParseKeyword(&OptionsString, "ErrorRecovery:") ||
+ BusLogic_ParseKeyword(&OptionsString, "ER:"))
+ {
+ if (BusLogic_ParseKeyword(&OptionsString, "Default"))
+ for (TargetID = 0;
+ TargetID < BusLogic_MaxTargetDevices;
+ TargetID++)
+ DriverOptions->ErrorRecoveryStrategy[TargetID] =
+ BusLogic_ErrorRecovery_Default;
+ else if (BusLogic_ParseKeyword(&OptionsString, "HardReset"))
+ for (TargetID = 0;
+ TargetID < BusLogic_MaxTargetDevices;
+ TargetID++)
+ DriverOptions->ErrorRecoveryStrategy[TargetID] =
+ BusLogic_ErrorRecovery_HardReset;
+ else if (BusLogic_ParseKeyword(&OptionsString, "BusDeviceReset"))
+ for (TargetID = 0;
+ TargetID < BusLogic_MaxTargetDevices;
+ TargetID++)
+ DriverOptions->ErrorRecoveryStrategy[TargetID] =
+ BusLogic_ErrorRecovery_BusDeviceReset;
+ else if (BusLogic_ParseKeyword(&OptionsString, "None"))
+ for (TargetID = 0;
+ TargetID < BusLogic_MaxTargetDevices;
+ TargetID++)
+ DriverOptions->ErrorRecoveryStrategy[TargetID] =
+ BusLogic_ErrorRecovery_None;
+ else
+ for (TargetID = 0;
+ TargetID < BusLogic_MaxTargetDevices;
+ TargetID++)
+ switch (*OptionsString++)
+ {
+ case 'D':
+ DriverOptions->ErrorRecoveryStrategy[TargetID] =
+ BusLogic_ErrorRecovery_Default;
+ break;
+ case 'H':
+ DriverOptions->ErrorRecoveryStrategy[TargetID] =
+ BusLogic_ErrorRecovery_HardReset;
+ break;
+ case 'B':
+ DriverOptions->ErrorRecoveryStrategy[TargetID] =
+ BusLogic_ErrorRecovery_BusDeviceReset;
+ break;
+ case 'N':
+ DriverOptions->ErrorRecoveryStrategy[TargetID] =
+ BusLogic_ErrorRecovery_None;
+ break;
+ default:
+ OptionsString--;
+ TargetID = BusLogic_MaxTargetDevices;
+ break;
+ }
+ }
+ /* Miscellaneous Options. */
+ else if (BusLogic_ParseKeyword(&OptionsString, "BusSettleTime:") ||
+ BusLogic_ParseKeyword(&OptionsString, "BST:"))
+ {
+ unsigned short BusSettleTime =
+ simple_strtoul(OptionsString, &OptionsString, 0);
+ if (BusSettleTime > 5 * 60)
+ {
+ BusLogic_Error("BusLogic: Invalid Driver Options "
+ "(illegal Bus Settle Time %d)\n",
+ NULL, BusSettleTime);
+ return;
+ }
+ DriverOptions->BusSettleTime = BusSettleTime;
+ }
+ else if (BusLogic_ParseKeyword(&OptionsString,
+ "InhibitTargetInquiry"))
+ DriverOptions->LocalOptions.InhibitTargetInquiry = true;
+ /* Debugging Options. */
+ else if (BusLogic_ParseKeyword(&OptionsString, "TraceProbe"))
+ BusLogic_GlobalOptions.TraceProbe = true;
+ else if (BusLogic_ParseKeyword(&OptionsString, "TraceHardwareReset"))
+ BusLogic_GlobalOptions.TraceHardwareReset = true;
+ else if (BusLogic_ParseKeyword(&OptionsString, "TraceConfiguration"))
+ BusLogic_GlobalOptions.TraceConfiguration = true;
+ else if (BusLogic_ParseKeyword(&OptionsString, "TraceErrors"))
+ BusLogic_GlobalOptions.TraceErrors = true;
+ else if (BusLogic_ParseKeyword(&OptionsString, "Debug"))
+ {
+ BusLogic_GlobalOptions.TraceProbe = true;
+ BusLogic_GlobalOptions.TraceHardwareReset = true;
+ BusLogic_GlobalOptions.TraceConfiguration = true;
+ BusLogic_GlobalOptions.TraceErrors = true;
+ }
+ if (*OptionsString == ',')
+ OptionsString++;
+ else if (*OptionsString != ';' && *OptionsString != '\0')
+ {
+ BusLogic_Error("BusLogic: Unexpected Driver Option '%s' "
+ "ignored\n", NULL, OptionsString);
+ *OptionsString = '\0';
+ }
+ }
+ if (!(BusLogic_DriverOptionsCount == 0 ||
+ BusLogic_ProbeInfoCount == 0 ||
+ BusLogic_DriverOptionsCount == BusLogic_ProbeInfoCount))
+ {
+ BusLogic_Error("BusLogic: Invalid Driver Options "
+ "(all or no I/O Addresses must be specified)\n", NULL);
+ return;
+ }
+ /*
+ Tagged Queuing is disabled when the Queue Depth is 1 since queuing
+ multiple commands is not possible.
+ */
+ for (TargetID = 0; TargetID < BusLogic_MaxTargetDevices; TargetID++)
+ if (DriverOptions->QueueDepth[TargetID] == 1)
+ {
+ unsigned short TargetBit = 1 << TargetID;
+ DriverOptions->TaggedQueuingPermitted &= ~TargetBit;
+ DriverOptions->TaggedQueuingPermittedMask |= TargetBit;
+ }
+ if (*OptionsString == ';') OptionsString++;
+ if (*OptionsString == '\0') return;
+ }
+}
+
+
+/*
+ BusLogic_Setup handles processing of Kernel Command Line Arguments.
+*/
+
+void BusLogic_Setup(char *CommandLineString, int *CommandLineIntegers)
+{
+ if (CommandLineIntegers[0] != 0)
+ {
+ BusLogic_Error("BusLogic: Obsolete Command Line Entry "
+ "Format Ignored\n", NULL);
+ return;
+ }
+ if (CommandLineString == NULL || *CommandLineString == '\0') return;
+ BusLogic_ParseDriverOptions(CommandLineString);
+}
+
+
+/*
+ Include Module support if requested.
+*/
+
+#ifdef MODULE
+
+SCSI_Host_Template_T driver_template = BUSLOGIC;
+
+#include "scsi_module.c"
+
+#endif
diff --git a/linux/src/drivers/scsi/BusLogic.h b/linux/src/drivers/scsi/BusLogic.h
new file mode 100644
index 0000000..f60ee07
--- /dev/null
+++ b/linux/src/drivers/scsi/BusLogic.h
@@ -0,0 +1,1775 @@
+/*
+
+ Linux Driver for BusLogic MultiMaster and FlashPoint SCSI Host Adapters
+
+ Copyright 1995-1998 by Leonard N. Zubkoff <lnz@dandelion.com>
+
+ This program is free software; you may redistribute and/or modify it under
+ the terms of the GNU General Public License Version 2 as published by the
+ Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY, without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for complete details.
+
+ The author respectfully requests that any modifications to this software be
+ sent directly to him for evaluation and testing.
+
+ Special thanks to Wayne Yen, Jin-Lon Hon, and Alex Win of BusLogic, whose
+ advice has been invaluable, to David Gentzel, for writing the original Linux
+ BusLogic driver, and to Paul Gortmaker, for being such a dedicated test site.
+
+ Finally, special thanks to Mylex/BusLogic for making the FlashPoint SCCB
+ Manager available as freely redistributable source code.
+
+*/
+
+
+#include <linux/config.h>
+
+
+/*
+ Define types for some of the structures that interface with the rest
+ of the Linux Kernel and SCSI Subsystem.
+*/
+
+typedef kdev_t KernelDevice_T;
+typedef struct proc_dir_entry PROC_DirectoryEntry_T;
+typedef unsigned long ProcessorFlags_T;
+typedef struct pt_regs Registers_T;
+typedef struct partition PartitionTable_T;
+typedef Scsi_Host_Template SCSI_Host_Template_T;
+typedef struct Scsi_Host SCSI_Host_T;
+typedef struct scsi_device SCSI_Device_T;
+typedef struct scsi_disk SCSI_Disk_T;
+typedef struct scsi_cmnd SCSI_Command_T;
+typedef struct scatterlist SCSI_ScatterList_T;
+
+
+/*
+ Define prototypes for the BusLogic Driver Interface Functions.
+*/
+
+extern PROC_DirectoryEntry_T BusLogic_ProcDirectoryEntry;
+extern const char *BusLogic_DriverInfo(SCSI_Host_T *);
+extern int BusLogic_DetectHostAdapter(SCSI_Host_Template_T *);
+extern int BusLogic_ReleaseHostAdapter(SCSI_Host_T *);
+extern int BusLogic_QueueCommand(SCSI_Command_T *,
+ void (*CompletionRoutine)(SCSI_Command_T *));
+extern int BusLogic_AbortCommand(SCSI_Command_T *);
+extern int BusLogic_ResetCommand(SCSI_Command_T *, unsigned int);
+extern int BusLogic_BIOSDiskParameters(SCSI_Disk_T *, KernelDevice_T, int *);
+extern int BusLogic_ProcDirectoryInfo(char *, char **, off_t, int, int, int);
+
+
+/*
+ Define the BusLogic SCSI Host Template structure.
+*/
+
+#define BUSLOGIC \
+ { proc_dir: &BusLogic_ProcDirectoryEntry, /* ProcFS Directory Entry */ \
+ proc_info: BusLogic_ProcDirectoryInfo, /* ProcFS Info Function */ \
+ name: "BusLogic", /* Driver Name */ \
+ detect: BusLogic_DetectHostAdapter, /* Detect Host Adapter */ \
+ release: BusLogic_ReleaseHostAdapter, /* Release Host Adapter */ \
+ info: BusLogic_DriverInfo, /* Driver Info Function */ \
+ queuecommand: BusLogic_QueueCommand, /* Queue Command Function */ \
+ abort: BusLogic_AbortCommand, /* Abort Command Function */ \
+ reset: BusLogic_ResetCommand, /* Reset Command Function */ \
+ bios_param: BusLogic_BIOSDiskParameters, /* BIOS Disk Parameters */ \
+ unchecked_isa_dma: 1, /* Default Initial Value */ \
+ use_clustering: ENABLE_CLUSTERING } /* Enable Clustering */
+
+
+/*
+ BusLogic_DriverVersion protects the private portion of this file.
+*/
+
+#ifdef BusLogic_DriverVersion
+
+
+/*
+ FlashPoint support is only available for the Intel x86 Architecture with
+ CONFIG_PCI set.
+*/
+
+#ifndef __i386__
+#undef CONFIG_SCSI_OMIT_FLASHPOINT
+#define CONFIG_SCSI_OMIT_FLASHPOINT
+#endif
+
+#ifndef CONFIG_PCI
+#undef CONFIG_SCSI_OMIT_FLASHPOINT
+#define CONFIG_SCSI_OMIT_FLASHPOINT
+#define BusLogic_InitializeProbeInfoListISA \
+ BusLogic_InitializeProbeInfoList
+#endif
+
+
+/*
+ Define the maximum number of BusLogic Host Adapters supported by this driver.
+*/
+
+#define BusLogic_MaxHostAdapters 16
+
+
+/*
+ Define the maximum number of Target Devices supported by this driver.
+*/
+
+#define BusLogic_MaxTargetDevices 16
+
+
+/*
+ Define the maximum number of Scatter/Gather Segments used by this driver.
+ For optimal performance, it is important that this limit be at least as
+ large as the largest single request generated by the I/O Subsystem.
+*/
+
+#define BusLogic_ScatterGatherLimit 128
+
+
+/*
+ Define the maximum, maximum automatic, minimum automatic, and default Queue
+ Depth to allow for Target Devices depending on whether or not they support
+ Tagged Queuing and whether or not ISA Bounce Buffers are required.
+*/
+
+#define BusLogic_MaxTaggedQueueDepth 64
+#define BusLogic_MaxAutomaticTaggedQueueDepth 28
+#define BusLogic_MinAutomaticTaggedQueueDepth 7
+#define BusLogic_TaggedQueueDepthBB 3
+#define BusLogic_UntaggedQueueDepth 3
+#define BusLogic_UntaggedQueueDepthBB 2
+
+
+/*
+ Define the default amount of time in seconds to wait between a Host Adapter
+ Hard Reset which initiates a SCSI Bus Reset and issuing any SCSI commands.
+ Some SCSI devices get confused if they receive SCSI commands too soon after
+ a SCSI Bus Reset.
+*/
+
+#define BusLogic_DefaultBusSettleTime 2
+
+
+/*
+ Define the maximum number of Mailboxes that should be used for MultiMaster
+ Host Adapters. This number is chosen to be larger than the maximum Host
+ Adapter Queue Depth and small enough so that the Host Adapter structure
+ does not cross an allocation block size boundary.
+*/
+
+#define BusLogic_MaxMailboxes 211
+
+
+/*
+ Define the number of CCBs that should be allocated as a group to optimize
+ Kernel memory allocation.
+*/
+
+#define BusLogic_CCB_AllocationGroupSize 7
+
+
+/*
+ Define the Host Adapter Line and Message Buffer Sizes.
+*/
+
+#define BusLogic_LineBufferSize 100
+#define BusLogic_MessageBufferSize 9700
+
+
+/*
+ Define the Driver Message Levels.
+*/
+
+typedef enum BusLogic_MessageLevel
+{
+ BusLogic_AnnounceLevel = 0,
+ BusLogic_InfoLevel = 1,
+ BusLogic_NoticeLevel = 2,
+ BusLogic_WarningLevel = 3,
+ BusLogic_ErrorLevel = 4
+}
+BusLogic_MessageLevel_T;
+
+static char
+ *BusLogic_MessageLevelMap[] =
+ { KERN_NOTICE, KERN_NOTICE, KERN_NOTICE, KERN_WARNING, KERN_ERR };
+
+
+/*
+ Define Driver Message macros.
+*/
+
+#define BusLogic_Announce(Format, Arguments...) \
+ BusLogic_Message(BusLogic_AnnounceLevel, Format, ##Arguments)
+
+#define BusLogic_Info(Format, Arguments...) \
+ BusLogic_Message(BusLogic_InfoLevel, Format, ##Arguments)
+
+#define BusLogic_Notice(Format, Arguments...) \
+ BusLogic_Message(BusLogic_NoticeLevel, Format, ##Arguments)
+
+#define BusLogic_Warning(Format, Arguments...) \
+ BusLogic_Message(BusLogic_WarningLevel, Format, ##Arguments)
+
+#define BusLogic_Error(Format, Arguments...) \
+ BusLogic_Message(BusLogic_ErrorLevel, Format, ##Arguments)
+
+
+/*
+ Define the types of BusLogic Host Adapters that are supported and the number
+ of I/O Addresses required by each type.
+*/
+
+typedef enum
+{
+ BusLogic_MultiMaster = 1,
+ BusLogic_FlashPoint = 2
+}
+__attribute__ ((packed))
+BusLogic_HostAdapterType_T;
+
+#define BusLogic_MultiMasterAddressCount 4
+#define BusLogic_FlashPointAddressCount 256
+
+static int
+ BusLogic_HostAdapterAddressCount[3] =
+ { 0, BusLogic_MultiMasterAddressCount, BusLogic_FlashPointAddressCount };
+
+
+/*
+ Define macros for testing the Host Adapter Type.
+*/
+
+#ifndef CONFIG_SCSI_OMIT_FLASHPOINT
+
+#define BusLogic_MultiMasterHostAdapterP(HostAdapter) \
+ (HostAdapter->HostAdapterType == BusLogic_MultiMaster)
+
+#define BusLogic_FlashPointHostAdapterP(HostAdapter) \
+ (HostAdapter->HostAdapterType == BusLogic_FlashPoint)
+
+#else
+
+#define BusLogic_MultiMasterHostAdapterP(HostAdapter) \
+ (true)
+
+#define BusLogic_FlashPointHostAdapterP(HostAdapter) \
+ (false)
+
+#endif
+
+
+/*
+ Define the possible Host Adapter Bus Types.
+*/
+
+typedef enum
+{
+ BusLogic_Unknown_Bus = 0,
+ BusLogic_ISA_Bus = 1,
+ BusLogic_EISA_Bus = 2,
+ BusLogic_PCI_Bus = 3,
+ BusLogic_VESA_Bus = 4,
+ BusLogic_MCA_Bus = 5
+}
+__attribute__ ((packed))
+BusLogic_HostAdapterBusType_T;
+
+static char
+ *BusLogic_HostAdapterBusNames[] =
+ { "Unknown", "ISA", "EISA", "PCI", "VESA", "MCA" };
+
+static BusLogic_HostAdapterBusType_T
+ BusLogic_HostAdapterBusTypes[] =
+ { BusLogic_VESA_Bus, /* BT-4xx */
+ BusLogic_ISA_Bus, /* BT-5xx */
+ BusLogic_MCA_Bus, /* BT-6xx */
+ BusLogic_EISA_Bus, /* BT-7xx */
+ BusLogic_Unknown_Bus, /* BT-8xx */
+ BusLogic_PCI_Bus }; /* BT-9xx */
+
+
+/*
+ Define the possible Host Adapter BIOS Disk Geometry Translations.
+*/
+
+typedef enum BusLogic_BIOS_DiskGeometryTranslation
+{
+ BusLogic_BIOS_Disk_Not_Installed = 0,
+ BusLogic_BIOS_Disk_Installed_64x32 = 1,
+ BusLogic_BIOS_Disk_Installed_128x32 = 2,
+ BusLogic_BIOS_Disk_Installed_255x63 = 3
+}
+__attribute__ ((packed))
+BusLogic_BIOS_DiskGeometryTranslation_T;
+
+
+/*
+ Define a Boolean data type.
+*/
+
+typedef enum { false, true } __attribute__ ((packed)) boolean;
+
+
+/*
+ Define a 32 bit I/O Address data type.
+*/
+
+typedef unsigned int BusLogic_IO_Address_T;
+
+
+/*
+ Define a 32 bit PCI Bus Address data type.
+*/
+
+typedef unsigned int BusLogic_PCI_Address_T;
+
+
+/*
+ Define a 32 bit Base Address data type.
+*/
+
+typedef unsigned int BusLogic_Base_Address_T;
+
+
+/*
+ Define a 32 bit Bus Address data type.
+*/
+
+typedef unsigned int BusLogic_BusAddress_T;
+
+
+/*
+ Define a 32 bit Byte Count data type.
+*/
+
+typedef unsigned int BusLogic_ByteCount_T;
+
+
+/*
+ Define a 10^18 Statistics Byte Counter data type.
+*/
+
+typedef struct BusLogic_ByteCounter
+{
+ unsigned int Units;
+ unsigned int Billions;
+}
+BusLogic_ByteCounter_T;
+
+
+/*
+ Define the structure for I/O Address and Bus Probing Information.
+*/
+
+typedef struct BusLogic_ProbeInfo
+{
+ BusLogic_HostAdapterType_T HostAdapterType;
+ BusLogic_HostAdapterBusType_T HostAdapterBusType;
+ BusLogic_IO_Address_T IO_Address;
+ BusLogic_PCI_Address_T PCI_Address;
+ unsigned char Bus;
+ unsigned char Device;
+ unsigned char IRQ_Channel;
+}
+BusLogic_ProbeInfo_T;
+
+
+/*
+ Define the Probe Options.
+*/
+
+typedef struct BusLogic_ProbeOptions
+{
+ boolean NoProbe:1; /* Bit 0 */
+ boolean NoProbeISA:1; /* Bit 1 */
+ boolean NoProbePCI:1; /* Bit 2 */
+ boolean NoSortPCI:1; /* Bit 3 */
+ boolean MultiMasterFirst:1; /* Bit 4 */
+ boolean FlashPointFirst:1; /* Bit 5 */
+ boolean LimitedProbeISA:1; /* Bit 6 */
+ boolean Probe330:1; /* Bit 7 */
+ boolean Probe334:1; /* Bit 8 */
+ boolean Probe230:1; /* Bit 9 */
+ boolean Probe234:1; /* Bit 10 */
+ boolean Probe130:1; /* Bit 11 */
+ boolean Probe134:1; /* Bit 12 */
+}
+BusLogic_ProbeOptions_T;
+
+
+/*
+ Define the Global Options.
+*/
+
+typedef struct BusLogic_GlobalOptions
+{
+ boolean TraceProbe:1; /* Bit 0 */
+ boolean TraceHardwareReset:1; /* Bit 1 */
+ boolean TraceConfiguration:1; /* Bit 2 */
+ boolean TraceErrors:1; /* Bit 3 */
+}
+BusLogic_GlobalOptions_T;
+
+
+/*
+ Define the Local Options.
+*/
+
+typedef struct BusLogic_LocalOptions
+{
+ boolean InhibitTargetInquiry:1; /* Bit 0 */
+}
+BusLogic_LocalOptions_T;
+
+
+/*
+ Define the Error Recovery Strategy Options.
+*/
+
+typedef enum
+{
+ BusLogic_ErrorRecovery_Default = 0,
+ BusLogic_ErrorRecovery_BusDeviceReset = 1,
+ BusLogic_ErrorRecovery_HardReset = 2,
+ BusLogic_ErrorRecovery_None = 3
+}
+__attribute__ ((packed))
+BusLogic_ErrorRecoveryStrategy_T;
+
+static char
+ *BusLogic_ErrorRecoveryStrategyNames[] =
+ { "Default", "Bus Device Reset", "Hard Reset", "None" },
+ BusLogic_ErrorRecoveryStrategyLetters[] =
+ { 'D', 'B', 'H', 'N' };
+
+
+/*
+ Define the BusLogic SCSI Host Adapter I/O Register Offsets.
+*/
+
+#define BusLogic_ControlRegisterOffset 0 /* WO register */
+#define BusLogic_StatusRegisterOffset 0 /* RO register */
+#define BusLogic_CommandParameterRegisterOffset 1 /* WO register */
+#define BusLogic_DataInRegisterOffset 1 /* RO register */
+#define BusLogic_InterruptRegisterOffset 2 /* RO register */
+#define BusLogic_GeometryRegisterOffset 3 /* RO register */
+
+
+/*
+ Define the structure of the write-only Control Register.
+*/
+
+typedef union BusLogic_ControlRegister
+{
+ unsigned char All;
+ struct {
+ unsigned char :4; /* Bits 0-3 */
+ boolean SCSIBusReset:1; /* Bit 4 */
+ boolean InterruptReset:1; /* Bit 5 */
+ boolean SoftReset:1; /* Bit 6 */
+ boolean HardReset:1; /* Bit 7 */
+ } Bits;
+}
+BusLogic_ControlRegister_T;
+
+
+/*
+ Define the structure of the read-only Status Register.
+*/
+
+typedef union BusLogic_StatusRegister
+{
+ unsigned char All;
+ struct {
+ boolean CommandInvalid:1; /* Bit 0 */
+ boolean Reserved:1; /* Bit 1 */
+ boolean DataInRegisterReady:1; /* Bit 2 */
+ boolean CommandParameterRegisterBusy:1; /* Bit 3 */
+ boolean HostAdapterReady:1; /* Bit 4 */
+ boolean InitializationRequired:1; /* Bit 5 */
+ boolean DiagnosticFailure:1; /* Bit 6 */
+ boolean DiagnosticActive:1; /* Bit 7 */
+ } Bits;
+}
+BusLogic_StatusRegister_T;
+
+
+/*
+ Define the structure of the read-only Interrupt Register.
+*/
+
+typedef union BusLogic_InterruptRegister
+{
+ unsigned char All;
+ struct {
+ boolean IncomingMailboxLoaded:1; /* Bit 0 */
+ boolean OutgoingMailboxAvailable:1; /* Bit 1 */
+ boolean CommandComplete:1; /* Bit 2 */
+ boolean ExternalBusReset:1; /* Bit 3 */
+ unsigned char Reserved:3; /* Bits 4-6 */
+ boolean InterruptValid:1; /* Bit 7 */
+ } Bits;
+}
+BusLogic_InterruptRegister_T;
+
+
+/*
+ Define the structure of the read-only Geometry Register.
+*/
+
+typedef union BusLogic_GeometryRegister
+{
+ unsigned char All;
+ struct {
+ BusLogic_BIOS_DiskGeometryTranslation_T Drive0Geometry:2; /* Bits 0-1 */
+ BusLogic_BIOS_DiskGeometryTranslation_T Drive1Geometry:2; /* Bits 2-3 */
+ unsigned char :3; /* Bits 4-6 */
+ boolean ExtendedTranslationEnabled:1; /* Bit 7 */
+ } Bits;
+}
+BusLogic_GeometryRegister_T;
+
+
+/*
+ Define the BusLogic SCSI Host Adapter Command Register Operation Codes.
+*/
+
+typedef enum
+{
+ BusLogic_TestCommandCompleteInterrupt = 0x00,
+ BusLogic_InitializeMailbox = 0x01,
+ BusLogic_ExecuteMailboxCommand = 0x02,
+ BusLogic_ExecuteBIOSCommand = 0x03,
+ BusLogic_InquireBoardID = 0x04,
+ BusLogic_EnableOutgoingMailboxAvailableInt = 0x05,
+ BusLogic_SetSCSISelectionTimeout = 0x06,
+ BusLogic_SetPreemptTimeOnBus = 0x07,
+ BusLogic_SetTimeOffBus = 0x08,
+ BusLogic_SetBusTransferRate = 0x09,
+ BusLogic_InquireInstalledDevicesID0to7 = 0x0A,
+ BusLogic_InquireConfiguration = 0x0B,
+ BusLogic_EnableTargetMode = 0x0C,
+ BusLogic_InquireSetupInformation = 0x0D,
+ BusLogic_WriteAdapterLocalRAM = 0x1A,
+ BusLogic_ReadAdapterLocalRAM = 0x1B,
+ BusLogic_WriteBusMasterChipFIFO = 0x1C,
+ BusLogic_ReadBusMasterChipFIFO = 0x1D,
+ BusLogic_EchoCommandData = 0x1F,
+ BusLogic_HostAdapterDiagnostic = 0x20,
+ BusLogic_SetAdapterOptions = 0x21,
+ BusLogic_InquireInstalledDevicesID8to15 = 0x23,
+ BusLogic_InquireTargetDevices = 0x24,
+ BusLogic_DisableHostAdapterInterrupt = 0x25,
+ BusLogic_InitializeExtendedMailbox = 0x81,
+ BusLogic_ExecuteSCSICommand = 0x83,
+ BusLogic_InquireFirmwareVersion3rdDigit = 0x84,
+ BusLogic_InquireFirmwareVersionLetter = 0x85,
+ BusLogic_InquirePCIHostAdapterInformation = 0x86,
+ BusLogic_InquireHostAdapterModelNumber = 0x8B,
+ BusLogic_InquireSynchronousPeriod = 0x8C,
+ BusLogic_InquireExtendedSetupInformation = 0x8D,
+ BusLogic_EnableStrictRoundRobinMode = 0x8F,
+ BusLogic_StoreHostAdapterLocalRAM = 0x90,
+ BusLogic_FetchHostAdapterLocalRAM = 0x91,
+ BusLogic_StoreLocalDataInEEPROM = 0x92,
+ BusLogic_UploadAutoSCSICode = 0x94,
+ BusLogic_ModifyIOAddress = 0x95,
+ BusLogic_SetCCBFormat = 0x96,
+ BusLogic_WriteInquiryBuffer = 0x9A,
+ BusLogic_ReadInquiryBuffer = 0x9B,
+ BusLogic_FlashROMUploadDownload = 0xA7,
+ BusLogic_ReadSCAMData = 0xA8,
+ BusLogic_WriteSCAMData = 0xA9
+}
+BusLogic_OperationCode_T;
+
+
+/*
+ Define the Inquire Board ID reply structure.
+*/
+
+typedef struct BusLogic_BoardID
+{
+ unsigned char BoardType; /* Byte 0 */
+ unsigned char CustomFeatures; /* Byte 1 */
+ unsigned char FirmwareVersion1stDigit; /* Byte 2 */
+ unsigned char FirmwareVersion2ndDigit; /* Byte 3 */
+}
+BusLogic_BoardID_T;
+
+
+/*
+ Define the Inquire Installed Devices ID 0 to 7 and Inquire Installed
+ Devices ID 8 to 15 reply type. For each Target Device, a byte is returned
+ where bit 0 set indicates that Logical Unit 0 exists, bit 1 set indicates
+ that Logical Unit 1 exists, and so on.
+*/
+
+typedef unsigned char BusLogic_InstalledDevices8_T[8];
+
+
+/*
+ Define the Inquire Target Devices reply type. Inquire Target Devices only
+ tests Logical Unit 0 of each Target Device unlike the Inquire Installed
+ Devices commands which test Logical Units 0 - 7. Two bytes are returned,
+ where byte 0 bit 0 set indicates that Target Device 0 exists, and so on.
+*/
+
+typedef unsigned short BusLogic_InstalledDevices_T;
+
+
+/*
+ Define the Inquire Configuration reply structure.
+*/
+
+typedef struct BusLogic_Configuration
+{
+ unsigned char :5; /* Byte 0 Bits 0-4 */
+ boolean DMA_Channel5:1; /* Byte 0 Bit 5 */
+ boolean DMA_Channel6:1; /* Byte 0 Bit 6 */
+ boolean DMA_Channel7:1; /* Byte 0 Bit 7 */
+ boolean IRQ_Channel9:1; /* Byte 1 Bit 0 */
+ boolean IRQ_Channel10:1; /* Byte 1 Bit 1 */
+ boolean IRQ_Channel11:1; /* Byte 1 Bit 2 */
+ boolean IRQ_Channel12:1; /* Byte 1 Bit 3 */
+ unsigned char :1; /* Byte 1 Bit 4 */
+ boolean IRQ_Channel14:1; /* Byte 1 Bit 5 */
+ boolean IRQ_Channel15:1; /* Byte 1 Bit 6 */
+ unsigned char :1; /* Byte 1 Bit 7 */
+ unsigned char HostAdapterID:4; /* Byte 2 Bits 0-3 */
+ unsigned char :4; /* Byte 2 Bits 4-7 */
+}
+BusLogic_Configuration_T;
+
+
+/*
+ Define the Inquire Setup Information reply structure.
+*/
+
+typedef struct BusLogic_SynchronousValue
+{
+ unsigned char Offset:4; /* Bits 0-3 */
+ unsigned char TransferPeriod:3; /* Bits 4-6 */
+ boolean Synchronous:1; /* Bit 7 */
+}
+BusLogic_SynchronousValue_T;
+
+typedef BusLogic_SynchronousValue_T
+ BusLogic_SynchronousValues8_T[8];
+
+typedef BusLogic_SynchronousValue_T
+ BusLogic_SynchronousValues_T[BusLogic_MaxTargetDevices];
+
+typedef struct BusLogic_SetupInformation
+{
+ boolean SynchronousInitiationEnabled:1; /* Byte 0 Bit 0 */
+ boolean ParityCheckingEnabled:1; /* Byte 0 Bit 1 */
+ unsigned char :6; /* Byte 0 Bits 2-7 */
+ unsigned char BusTransferRate; /* Byte 1 */
+ unsigned char PreemptTimeOnBus; /* Byte 2 */
+ unsigned char TimeOffBus; /* Byte 3 */
+ unsigned char MailboxCount; /* Byte 4 */
+ unsigned char MailboxAddress[3]; /* Bytes 5-7 */
+ BusLogic_SynchronousValues8_T SynchronousValuesID0to7; /* Bytes 8-15 */
+ unsigned char DisconnectPermittedID0to7; /* Byte 16 */
+ unsigned char Signature; /* Byte 17 */
+ unsigned char CharacterD; /* Byte 18 */
+ unsigned char HostBusType; /* Byte 19 */
+ unsigned char WideTransfersPermittedID0to7; /* Byte 20 */
+ unsigned char WideTransfersActiveID0to7; /* Byte 21 */
+ BusLogic_SynchronousValues8_T SynchronousValuesID8to15; /* Bytes 22-29 */
+ unsigned char DisconnectPermittedID8to15; /* Byte 30 */
+ unsigned char :8; /* Byte 31 */
+ unsigned char WideTransfersPermittedID8to15; /* Byte 32 */
+ unsigned char WideTransfersActiveID8to15; /* Byte 33 */
+}
+BusLogic_SetupInformation_T;
+
+
+/*
+ Define the Initialize Extended Mailbox request structure.
+*/
+
+typedef struct BusLogic_ExtendedMailboxRequest
+{
+ unsigned char MailboxCount; /* Byte 0 */
+ BusLogic_BusAddress_T BaseMailboxAddress; /* Bytes 1-4 */
+}
+__attribute__ ((packed))
+BusLogic_ExtendedMailboxRequest_T;
+
+
+/*
+ Define the Inquire Firmware Version 3rd Digit reply type.
+*/
+
+typedef unsigned char BusLogic_FirmwareVersion3rdDigit_T;
+
+
+/*
+ Define the Inquire Firmware Version Letter reply type.
+*/
+
+typedef unsigned char BusLogic_FirmwareVersionLetter_T;
+
+
+/*
+ Define the Inquire PCI Host Adapter Information reply type. The ISA
+ Compatible I/O Port values are defined here and are also used with
+ the Modify I/O Address command.
+*/
+
+typedef enum BusLogic_ISACompatibleIOPort
+{
+ BusLogic_IO_330 = 0,
+ BusLogic_IO_334 = 1,
+ BusLogic_IO_230 = 2,
+ BusLogic_IO_234 = 3,
+ BusLogic_IO_130 = 4,
+ BusLogic_IO_134 = 5,
+ BusLogic_IO_Disable = 6,
+ BusLogic_IO_Disable2 = 7
+}
+__attribute__ ((packed))
+BusLogic_ISACompatibleIOPort_T;
+
+typedef struct BusLogic_PCIHostAdapterInformation
+{
+ BusLogic_ISACompatibleIOPort_T ISACompatibleIOPort; /* Byte 0 */
+ unsigned char PCIAssignedIRQChannel; /* Byte 1 */
+ boolean LowByteTerminated:1; /* Byte 2 Bit 0 */
+ boolean HighByteTerminated:1; /* Byte 2 Bit 1 */
+ unsigned char :2; /* Byte 2 Bits 2-3 */
+ boolean JP1:1; /* Byte 2 Bit 4 */
+ boolean JP2:1; /* Byte 2 Bit 5 */
+ boolean JP3:1; /* Byte 2 Bit 6 */
+ boolean GenericInfoValid:1; /* Byte 2 Bit 7 */
+ unsigned char :8; /* Byte 3 */
+}
+BusLogic_PCIHostAdapterInformation_T;
+
+
+/*
+ Define the Inquire Host Adapter Model Number reply type.
+*/
+
+typedef unsigned char BusLogic_HostAdapterModelNumber_T[5];
+
+
+/*
+ Define the Inquire Synchronous Period reply type. For each Target Device,
+ a byte is returned which represents the Synchronous Transfer Period in units
+ of 10 nanoseconds.
+*/
+
+typedef unsigned char BusLogic_SynchronousPeriod_T[BusLogic_MaxTargetDevices];
+
+
+/*
+ Define the Inquire Extended Setup Information reply structure.
+*/
+
+typedef struct BusLogic_ExtendedSetupInformation
+{
+ unsigned char BusType; /* Byte 0 */
+ unsigned char BIOS_Address; /* Byte 1 */
+ unsigned short ScatterGatherLimit; /* Bytes 2-3 */
+ unsigned char MailboxCount; /* Byte 4 */
+ BusLogic_BusAddress_T BaseMailboxAddress; /* Bytes 5-8 */
+ struct { unsigned char :2; /* Byte 9 Bits 0-1 */
+ boolean FastOnEISA:1; /* Byte 9 Bit 2 */
+ unsigned char :3; /* Byte 9 Bits 3-5 */
+ boolean LevelSensitiveInterrupt:1; /* Byte 9 Bit 6 */
+ unsigned char :1; } Misc; /* Byte 9 Bit 7 */
+ unsigned char FirmwareRevision[3]; /* Bytes 10-12 */
+ boolean HostWideSCSI:1; /* Byte 13 Bit 0 */
+ boolean HostDifferentialSCSI:1; /* Byte 13 Bit 1 */
+ boolean HostSupportsSCAM:1; /* Byte 13 Bit 2 */
+ boolean HostUltraSCSI:1; /* Byte 13 Bit 3 */
+ boolean HostSmartTermination:1; /* Byte 13 Bit 4 */
+ unsigned char :3; /* Byte 13 Bits 5-7 */
+}
+__attribute__ ((packed))
+BusLogic_ExtendedSetupInformation_T;
+
+
+/*
+ Define the Enable Strict Round Robin Mode request type.
+*/
+
+typedef enum BusLogic_RoundRobinModeRequest
+{
+ BusLogic_AggressiveRoundRobinMode = 0,
+ BusLogic_StrictRoundRobinMode = 1
+}
+__attribute__ ((packed))
+BusLogic_RoundRobinModeRequest_T;
+
+
+/*
+ Define the Fetch Host Adapter Local RAM request type.
+*/
+
+#define BusLogic_BIOS_BaseOffset 0
+#define BusLogic_AutoSCSI_BaseOffset 64
+
+typedef struct BusLogic_FetchHostAdapterLocalRAMRequest
+{
+ unsigned char ByteOffset; /* Byte 0 */
+ unsigned char ByteCount; /* Byte 1 */
+}
+BusLogic_FetchHostAdapterLocalRAMRequest_T;
+
+
+/*
+ Define the Host Adapter Local RAM AutoSCSI structure.
+*/
+
+typedef struct BusLogic_AutoSCSIData
+{
+ unsigned char InternalFactorySignature[2]; /* Bytes 0-1 */
+ unsigned char InformationByteCount; /* Byte 2 */
+ unsigned char HostAdapterType[6]; /* Bytes 3-8 */
+ unsigned char :8; /* Byte 9 */
+ boolean FloppyEnabled:1; /* Byte 10 Bit 0 */
+ boolean FloppySecondary:1; /* Byte 10 Bit 1 */
+ boolean LevelSensitiveInterrupt:1; /* Byte 10 Bit 2 */
+ unsigned char :2; /* Byte 10 Bits 3-4 */
+ unsigned char SystemRAMAreaForBIOS:3; /* Byte 10 Bits 5-7 */
+ unsigned char DMA_Channel:7; /* Byte 11 Bits 0-6 */
+ boolean DMA_AutoConfiguration:1; /* Byte 11 Bit 7 */
+ unsigned char IRQ_Channel:7; /* Byte 12 Bits 0-6 */
+ boolean IRQ_AutoConfiguration:1; /* Byte 12 Bit 7 */
+ unsigned char DMA_TransferRate; /* Byte 13 */
+ unsigned char SCSI_ID; /* Byte 14 */
+ boolean LowByteTerminated:1; /* Byte 15 Bit 0 */
+ boolean ParityCheckingEnabled:1; /* Byte 15 Bit 1 */
+ boolean HighByteTerminated:1; /* Byte 15 Bit 2 */
+ boolean NoisyCablingEnvironment:1; /* Byte 15 Bit 3 */
+ boolean FastSynchronousNegotiation:1; /* Byte 15 Bit 4 */
+ boolean BusResetEnabled:1; /* Byte 15 Bit 5 */
+ boolean :1; /* Byte 15 Bit 6 */
+ boolean ActiveNegationEnabled:1; /* Byte 15 Bit 7 */
+ unsigned char BusOnDelay; /* Byte 16 */
+ unsigned char BusOffDelay; /* Byte 17 */
+ boolean HostAdapterBIOSEnabled:1; /* Byte 18 Bit 0 */
+ boolean BIOSRedirectionOfINT19Enabled:1; /* Byte 18 Bit 1 */
+ boolean ExtendedTranslationEnabled:1; /* Byte 18 Bit 2 */
+ boolean MapRemovableAsFixedEnabled:1; /* Byte 18 Bit 3 */
+ boolean :1; /* Byte 18 Bit 4 */
+ boolean BIOSSupportsMoreThan2DrivesEnabled:1; /* Byte 18 Bit 5 */
+ boolean BIOSInterruptModeEnabled:1; /* Byte 18 Bit 6 */
+ boolean FlopticalSupportEnabled:1; /* Byte 19 Bit 7 */
+ unsigned short DeviceEnabled; /* Bytes 19-20 */
+ unsigned short WidePermitted; /* Bytes 21-22 */
+ unsigned short FastPermitted; /* Bytes 23-24 */
+ unsigned short SynchronousPermitted; /* Bytes 25-26 */
+ unsigned short DisconnectPermitted; /* Bytes 27-28 */
+ unsigned short SendStartUnitCommand; /* Bytes 29-30 */
+ unsigned short IgnoreInBIOSScan; /* Bytes 31-32 */
+ unsigned char PCIInterruptPin:2; /* Byte 33 Bits 0-1 */
+ unsigned char HostAdapterIOPortAddress:2; /* Byte 33 Bits 2-3 */
+ boolean StrictRoundRobinModeEnabled:1; /* Byte 33 Bit 4 */
+ boolean VESABusSpeedGreaterThan33MHz:1; /* Byte 33 Bit 5 */
+ boolean VESABurstWriteEnabled:1; /* Byte 33 Bit 6 */
+ boolean VESABurstReadEnabled:1; /* Byte 33 Bit 7 */
+ unsigned short UltraPermitted; /* Bytes 34-35 */
+ unsigned int :32; /* Bytes 36-39 */
+ unsigned char :8; /* Byte 40 */
+ unsigned char AutoSCSIMaximumLUN; /* Byte 41 */
+ boolean :1; /* Byte 42 Bit 0 */
+ boolean SCAM_Dominant:1; /* Byte 42 Bit 1 */
+ boolean SCAM_Enabled:1; /* Byte 42 Bit 2 */
+ boolean SCAM_Level2:1; /* Byte 42 Bit 3 */
+ unsigned char :4; /* Byte 42 Bits 4-7 */
+ boolean INT13ExtensionEnabled:1; /* Byte 43 Bit 0 */
+ boolean :1; /* Byte 43 Bit 1 */
+ boolean CDROMBootEnabled:1; /* Byte 43 Bit 2 */
+ unsigned char :5; /* Byte 43 Bits 3-7 */
+ unsigned char BootTargetID:4; /* Byte 44 Bits 0-3 */
+ unsigned char BootChannel:4; /* Byte 44 Bits 4-7 */
+ unsigned char ForceBusDeviceScanningOrder:1; /* Byte 45 Bit 0 */
+ unsigned char :7; /* Byte 45 Bits 1-7 */
+ unsigned short NonTaggedToAlternateLUNPermitted; /* Bytes 46-47 */
+ unsigned short RenegotiateSyncAfterCheckCondition; /* Bytes 48-49 */
+ unsigned char Reserved[10]; /* Bytes 50-59 */
+ unsigned char ManufacturingDiagnostic[2]; /* Bytes 60-61 */
+ unsigned short Checksum; /* Bytes 62-63 */
+}
+__attribute__ ((packed))
+BusLogic_AutoSCSIData_T;
+
+
+/*
+ Define the Host Adapter Local RAM Auto SCSI Byte 45 structure.
+*/
+
+typedef struct BusLogic_AutoSCSIByte45
+{
+ unsigned char ForceBusDeviceScanningOrder:1; /* Bit 0 */
+ unsigned char :7; /* Bits 1-7 */
+}
+BusLogic_AutoSCSIByte45_T;
+
+
+/*
+ Define the Host Adapter Local RAM BIOS Drive Map Byte structure.
+*/
+
+#define BusLogic_BIOS_DriveMapOffset 17
+
+typedef struct BusLogic_BIOSDriveMapByte
+{
+ unsigned char TargetIDBit3:1; /* Bit 0 */
+ unsigned char :2; /* Bits 1-2 */
+ BusLogic_BIOS_DiskGeometryTranslation_T DiskGeometry:2; /* Bits 3-4 */
+ unsigned char TargetID:3; /* Bits 5-7 */
+}
+BusLogic_BIOSDriveMapByte_T;
+
+
+/*
+ Define the Modify I/O Address request type. On PCI Host Adapters, the
+ Modify I/O Address command allows modification of the ISA compatible I/O
+ Address that the Host Adapter responds to; it does not affect the PCI
+ compliant I/O Address assigned at system initialization.
+*/
+
+typedef BusLogic_ISACompatibleIOPort_T BusLogic_ModifyIOAddressRequest_T;
+
+
+/*
+ Define the Set CCB Format request type. Extended LUN Format CCBs are
+ necessary to support more than 8 Logical Units per Target Device.
+*/
+
+typedef enum BusLogic_SetCCBFormatRequest
+{
+ BusLogic_LegacyLUNFormatCCB = 0,
+ BusLogic_ExtendedLUNFormatCCB = 1
+}
+__attribute__ ((packed))
+BusLogic_SetCCBFormatRequest_T;
+
+
+/*
+ Define the Requested Reply Length type used by the Inquire Setup Information,
+ Inquire Host Adapter Model Number, Inquire Synchronous Period, and Inquire
+ Extended Setup Information commands.
+*/
+
+typedef unsigned char BusLogic_RequestedReplyLength_T;
+
+
+/*
+ Define the Outgoing Mailbox Action Codes.
+*/
+
+typedef enum
+{
+ BusLogic_OutgoingMailboxFree = 0x00,
+ BusLogic_MailboxStartCommand = 0x01,
+ BusLogic_MailboxAbortCommand = 0x02
+}
+__attribute__ ((packed))
+BusLogic_ActionCode_T;
+
+
+/*
+ Define the Incoming Mailbox Completion Codes. The MultiMaster Firmware
+ only uses codes 0 - 4. The FlashPoint SCCB Manager has no mailboxes, so
+ completion codes are stored in the CCB; it only uses codes 1, 2, 4, and 5.
+*/
+
+typedef enum
+{
+ BusLogic_IncomingMailboxFree = 0x00,
+ BusLogic_CommandCompletedWithoutError = 0x01,
+ BusLogic_CommandAbortedAtHostRequest = 0x02,
+ BusLogic_AbortedCommandNotFound = 0x03,
+ BusLogic_CommandCompletedWithError = 0x04,
+ BusLogic_InvalidCCB = 0x05
+}
+__attribute__ ((packed))
+BusLogic_CompletionCode_T;
+
+
+/*
+ Define the Command Control Block (CCB) Opcodes.
+*/
+
+typedef enum
+{
+ BusLogic_InitiatorCCB = 0x00,
+ BusLogic_TargetCCB = 0x01,
+ BusLogic_InitiatorCCB_ScatterGather = 0x02,
+ BusLogic_InitiatorCCB_ResidualDataLength = 0x03,
+ BusLogic_InitiatorCCB_ScatterGatherResidual = 0x04,
+ BusLogic_BusDeviceReset = 0x81
+}
+__attribute__ ((packed))
+BusLogic_CCB_Opcode_T;
+
+
+/*
+ Define the CCB Data Direction Codes.
+*/
+
+typedef enum
+{
+ BusLogic_UncheckedDataTransfer = 0,
+ BusLogic_DataInLengthChecked = 1,
+ BusLogic_DataOutLengthChecked = 2,
+ BusLogic_NoDataTransfer = 3
+}
+BusLogic_DataDirection_T;
+
+
+/*
+ Define the Host Adapter Status Codes. The MultiMaster Firmware does not
+ return status code 0x0C; it uses 0x12 for both overruns and underruns.
+*/
+
+typedef enum
+{
+ BusLogic_CommandCompletedNormally = 0x00,
+ BusLogic_LinkedCommandCompleted = 0x0A,
+ BusLogic_LinkedCommandCompletedWithFlag = 0x0B,
+ BusLogic_DataUnderRun = 0x0C,
+ BusLogic_SCSISelectionTimeout = 0x11,
+ BusLogic_DataOverRun = 0x12,
+ BusLogic_UnexpectedBusFree = 0x13,
+ BusLogic_InvalidBusPhaseRequested = 0x14,
+ BusLogic_InvalidOutgoingMailboxActionCode = 0x15,
+ BusLogic_InvalidCommandOperationCode = 0x16,
+ BusLogic_LinkedCCBhasInvalidLUN = 0x17,
+ BusLogic_InvalidCommandParameter = 0x1A,
+ BusLogic_AutoRequestSenseFailed = 0x1B,
+ BusLogic_TaggedQueuingMessageRejected = 0x1C,
+ BusLogic_UnsupportedMessageReceived = 0x1D,
+ BusLogic_HostAdapterHardwareFailed = 0x20,
+ BusLogic_TargetFailedResponseToATN = 0x21,
+ BusLogic_HostAdapterAssertedRST = 0x22,
+ BusLogic_OtherDeviceAssertedRST = 0x23,
+ BusLogic_TargetDeviceReconnectedImproperly = 0x24,
+ BusLogic_HostAdapterAssertedBusDeviceReset = 0x25,
+ BusLogic_AbortQueueGenerated = 0x26,
+ BusLogic_HostAdapterSoftwareError = 0x27,
+ BusLogic_HostAdapterHardwareTimeoutError = 0x30,
+ BusLogic_SCSIParityErrorDetected = 0x34
+}
+__attribute__ ((packed))
+BusLogic_HostAdapterStatus_T;
+
+
+/*
+ Define the SCSI Target Device Status Codes.
+*/
+
+typedef enum
+{
+ BusLogic_OperationGood = 0x00,
+ BusLogic_CheckCondition = 0x02,
+ BusLogic_DeviceBusy = 0x08
+}
+__attribute__ ((packed))
+BusLogic_TargetDeviceStatus_T;
+
+
+/*
+ Define the Queue Tag Codes.
+*/
+
+typedef enum
+{
+ BusLogic_SimpleQueueTag = 0,
+ BusLogic_HeadOfQueueTag = 1,
+ BusLogic_OrderedQueueTag = 2,
+ BusLogic_ReservedQT = 3
+}
+BusLogic_QueueTag_T;
+
+
+/*
+ Define the SCSI Command Descriptor Block (CDB).
+*/
+
+#define BusLogic_CDB_MaxLength 12
+
+typedef unsigned char SCSI_CDB_T[BusLogic_CDB_MaxLength];
+
+
+/*
+ Define the Scatter/Gather Segment structure required by the MultiMaster
+ Firmware Interface and the FlashPoint SCCB Manager.
+*/
+
+typedef struct BusLogic_ScatterGatherSegment
+{
+ BusLogic_ByteCount_T SegmentByteCount; /* Bytes 0-3 */
+ BusLogic_BusAddress_T SegmentDataPointer; /* Bytes 4-7 */
+}
+BusLogic_ScatterGatherSegment_T;
+
+
+/*
+ Define the Driver CCB Status Codes.
+*/
+
+typedef enum
+{
+ BusLogic_CCB_Free = 0,
+ BusLogic_CCB_Active = 1,
+ BusLogic_CCB_Completed = 2,
+ BusLogic_CCB_Reset = 3
+}
+__attribute__ ((packed))
+BusLogic_CCB_Status_T;
+
+
+/*
+ Define the 32 Bit Mode Command Control Block (CCB) structure. The first 40
+ bytes are defined by and common to both the MultiMaster Firmware and the
+ FlashPoint SCCB Manager. The next 60 bytes are defined by the FlashPoint
+ SCCB Manager. The remaining components are defined by the Linux BusLogic
+ Driver. Extended LUN Format CCBs differ from Legacy LUN Format 32 Bit Mode
+ CCBs only in having the TagEnable and QueueTag fields moved from byte 17 to
+ byte 1, and the Logical Unit field in byte 17 expanded to 6 bits. In theory,
+ Extended LUN Format CCBs can support up to 64 Logical Units, but in practice
+ many devices will respond improperly to Logical Units between 32 and 63, and
+ the SCSI-2 specification defines Bit 5 as LUNTAR. Extended LUN Format CCBs
+ are used by recent versions of the MultiMaster Firmware, as well as by the
+ FlashPoint SCCB Manager; the FlashPoint SCCB Manager only supports 32 Logical
+ Units. Since 64 Logical Units are unlikely to be needed in practice, and
+ since they are problematic for the above reasons, and since limiting them to
+ 5 bits simplifies the CCB structure definition, this driver only supports
+ 32 Logical Units per Target Device.
+*/
+
+typedef struct BusLogic_CCB
+{
+ /*
+ MultiMaster Firmware and FlashPoint SCCB Manager Common Portion.
+ */
+ BusLogic_CCB_Opcode_T Opcode; /* Byte 0 */
+ unsigned char :3; /* Byte 1 Bits 0-2 */
+ BusLogic_DataDirection_T DataDirection:2; /* Byte 1 Bits 3-4 */
+ boolean TagEnable:1; /* Byte 1 Bit 5 */
+ BusLogic_QueueTag_T QueueTag:2; /* Byte 1 Bits 6-7 */
+ unsigned char CDB_Length; /* Byte 2 */
+ unsigned char SenseDataLength; /* Byte 3 */
+ BusLogic_ByteCount_T DataLength; /* Bytes 4-7 */
+ BusLogic_BusAddress_T DataPointer; /* Bytes 8-11 */
+ unsigned char :8; /* Byte 12 */
+ unsigned char :8; /* Byte 13 */
+ BusLogic_HostAdapterStatus_T HostAdapterStatus; /* Byte 14 */
+ BusLogic_TargetDeviceStatus_T TargetDeviceStatus; /* Byte 15 */
+ unsigned char TargetID; /* Byte 16 */
+ unsigned char LogicalUnit:5; /* Byte 17 Bits 0-4 */
+ boolean LegacyTagEnable:1; /* Byte 17 Bit 5 */
+ BusLogic_QueueTag_T LegacyQueueTag:2; /* Byte 17 Bits 6-7 */
+ SCSI_CDB_T CDB; /* Bytes 18-29 */
+ unsigned char :8; /* Byte 30 */
+ unsigned char :8; /* Byte 31 */
+ unsigned int :32; /* Bytes 32-35 */
+ BusLogic_BusAddress_T SenseDataPointer; /* Bytes 36-39 */
+ /*
+ FlashPoint SCCB Manager Defined Portion.
+ */
+ void (*CallbackFunction)(struct BusLogic_CCB *); /* Bytes 40-43 */
+ BusLogic_Base_Address_T BaseAddress; /* Bytes 44-47 */
+ BusLogic_CompletionCode_T CompletionCode; /* Byte 48 */
+#ifndef CONFIG_SCSI_OMIT_FLASHPOINT
+ unsigned char :8; /* Byte 49 */
+ unsigned short OS_Flags; /* Bytes 50-51 */
+ unsigned char Private[48]; /* Bytes 52-99 */
+#endif
+ /*
+ BusLogic Linux Driver Defined Portion.
+ */
+ boolean AllocationGroupHead;
+ BusLogic_CCB_Status_T Status;
+ unsigned long SerialNumber;
+ SCSI_Command_T *Command;
+ struct BusLogic_HostAdapter *HostAdapter;
+ struct BusLogic_CCB *Next;
+ struct BusLogic_CCB *NextAll;
+ BusLogic_ScatterGatherSegment_T
+ ScatterGatherList[BusLogic_ScatterGatherLimit];
+}
+BusLogic_CCB_T;
+
+
+/*
+ Define the 32 Bit Mode Outgoing Mailbox structure.
+*/
+
+typedef struct BusLogic_OutgoingMailbox
+{
+ BusLogic_BusAddress_T CCB; /* Bytes 0-3 */
+ unsigned int :24; /* Bytes 4-6 */
+ BusLogic_ActionCode_T ActionCode; /* Byte 7 */
+}
+BusLogic_OutgoingMailbox_T;
+
+
+/*
+ Define the 32 Bit Mode Incoming Mailbox structure.
+*/
+
+typedef struct BusLogic_IncomingMailbox
+{
+ BusLogic_BusAddress_T CCB; /* Bytes 0-3 */
+ BusLogic_HostAdapterStatus_T HostAdapterStatus; /* Byte 4 */
+ BusLogic_TargetDeviceStatus_T TargetDeviceStatus; /* Byte 5 */
+ unsigned char :8; /* Byte 6 */
+ BusLogic_CompletionCode_T CompletionCode; /* Byte 7 */
+}
+BusLogic_IncomingMailbox_T;
+
+
+/*
+ Define the BusLogic Driver Options structure.
+*/
+
+typedef struct BusLogic_DriverOptions
+{
+ unsigned short TaggedQueuingPermitted;
+ unsigned short TaggedQueuingPermittedMask;
+ unsigned short BusSettleTime;
+ BusLogic_LocalOptions_T LocalOptions;
+ unsigned char CommonQueueDepth;
+ unsigned char QueueDepth[BusLogic_MaxTargetDevices];
+ BusLogic_ErrorRecoveryStrategy_T
+ ErrorRecoveryStrategy[BusLogic_MaxTargetDevices];
+}
+BusLogic_DriverOptions_T;
+
+
+/*
+ Define the Host Adapter Target Flags structure.
+*/
+
+typedef struct BusLogic_TargetFlags
+{
+ boolean TargetExists:1;
+ boolean TaggedQueuingSupported:1;
+ boolean WideTransfersSupported:1;
+ boolean TaggedQueuingActive:1;
+ boolean WideTransfersActive:1;
+ boolean CommandSuccessfulFlag:1;
+ boolean TargetInfoReported:1;
+}
+BusLogic_TargetFlags_T;
+
+
+/*
+ Define the Host Adapter Target Statistics structure.
+*/
+
+#define BusLogic_SizeBuckets 10
+
+typedef unsigned int BusLogic_CommandSizeBuckets_T[BusLogic_SizeBuckets];
+
+typedef struct BusLogic_TargetStatistics
+{
+ unsigned int CommandsAttempted;
+ unsigned int CommandsCompleted;
+ unsigned int ReadCommands;
+ unsigned int WriteCommands;
+ BusLogic_ByteCounter_T TotalBytesRead;
+ BusLogic_ByteCounter_T TotalBytesWritten;
+ BusLogic_CommandSizeBuckets_T ReadCommandSizeBuckets;
+ BusLogic_CommandSizeBuckets_T WriteCommandSizeBuckets;
+ unsigned short CommandAbortsRequested;
+ unsigned short CommandAbortsAttempted;
+ unsigned short CommandAbortsCompleted;
+ unsigned short BusDeviceResetsRequested;
+ unsigned short BusDeviceResetsAttempted;
+ unsigned short BusDeviceResetsCompleted;
+ unsigned short HostAdapterResetsRequested;
+ unsigned short HostAdapterResetsAttempted;
+ unsigned short HostAdapterResetsCompleted;
+}
+BusLogic_TargetStatistics_T;
+
+
+/*
+ Define the FlashPoint Card Handle data type.
+*/
+
+#define FlashPoint_BadCardHandle 0xFFFFFFFF
+
+typedef unsigned int FlashPoint_CardHandle_T;
+
+
+/*
+ Define the FlashPoint Information structure. This structure is defined
+ by the FlashPoint SCCB Manager.
+*/
+
+typedef struct FlashPoint_Info
+{
+ BusLogic_Base_Address_T BaseAddress; /* Bytes 0-3 */
+ boolean Present; /* Byte 4 */
+ unsigned char IRQ_Channel; /* Byte 5 */
+ unsigned char SCSI_ID; /* Byte 6 */
+ unsigned char SCSI_LUN; /* Byte 7 */
+ unsigned short FirmwareRevision; /* Bytes 8-9 */
+ unsigned short SynchronousPermitted; /* Bytes 10-11 */
+ unsigned short FastPermitted; /* Bytes 12-13 */
+ unsigned short UltraPermitted; /* Bytes 14-15 */
+ unsigned short DisconnectPermitted; /* Bytes 16-17 */
+ unsigned short WidePermitted; /* Bytes 18-19 */
+ boolean ParityCheckingEnabled:1; /* Byte 20 Bit 0 */
+ boolean HostWideSCSI:1; /* Byte 20 Bit 1 */
+ boolean HostSoftReset:1; /* Byte 20 Bit 2 */
+ boolean ExtendedTranslationEnabled:1; /* Byte 20 Bit 3 */
+ boolean LowByteTerminated:1; /* Byte 20 Bit 4 */
+ boolean HighByteTerminated:1; /* Byte 20 Bit 5 */
+ boolean ReportDataUnderrun:1; /* Byte 20 Bit 6 */
+ boolean SCAM_Enabled:1; /* Byte 20 Bit 7 */
+ boolean SCAM_Level2:1; /* Byte 21 Bit 0 */
+ unsigned char :7; /* Byte 21 Bits 1-7 */
+ unsigned char Family; /* Byte 22 */
+ unsigned char BusType; /* Byte 23 */
+ unsigned char ModelNumber[3]; /* Bytes 24-26 */
+ unsigned char RelativeCardNumber; /* Byte 27 */
+ unsigned char Reserved[4]; /* Bytes 28-31 */
+ unsigned int OS_Reserved; /* Bytes 32-35 */
+ unsigned char TranslationInfo[4]; /* Bytes 36-39 */
+ unsigned int Reserved2[5]; /* Bytes 40-59 */
+ unsigned int SecondaryRange; /* Bytes 60-63 */
+}
+FlashPoint_Info_T;
+
+
+/*
+ Define the BusLogic Driver Host Adapter structure.
+*/
+
+typedef struct BusLogic_HostAdapter
+{
+ SCSI_Host_T *SCSI_Host;
+ BusLogic_HostAdapterType_T HostAdapterType;
+ BusLogic_HostAdapterBusType_T HostAdapterBusType;
+ BusLogic_IO_Address_T IO_Address;
+ BusLogic_PCI_Address_T PCI_Address;
+ unsigned short AddressCount;
+ unsigned char HostNumber;
+ unsigned char ModelName[9];
+ unsigned char FirmwareVersion[6];
+ unsigned char FullModelName[18];
+ unsigned char Bus;
+ unsigned char Device;
+ unsigned char IRQ_Channel;
+ unsigned char DMA_Channel;
+ unsigned char SCSI_ID;
+ boolean IRQ_ChannelAcquired:1;
+ boolean DMA_ChannelAcquired:1;
+ boolean ExtendedTranslationEnabled:1;
+ boolean ParityCheckingEnabled:1;
+ boolean BusResetEnabled:1;
+ boolean LevelSensitiveInterrupt:1;
+ boolean HostWideSCSI:1;
+ boolean HostDifferentialSCSI:1;
+ boolean HostSupportsSCAM:1;
+ boolean HostUltraSCSI:1;
+ boolean ExtendedLUNSupport:1;
+ boolean TerminationInfoValid:1;
+ boolean LowByteTerminated:1;
+ boolean HighByteTerminated:1;
+ boolean BounceBuffersRequired:1;
+ boolean StrictRoundRobinModeSupport:1;
+ boolean SCAM_Enabled:1;
+ boolean SCAM_Level2:1;
+ boolean HostAdapterInitialized:1;
+ boolean HostAdapterExternalReset:1;
+ boolean HostAdapterInternalError:1;
+ boolean ProcessCompletedCCBsActive;
+ volatile boolean HostAdapterCommandCompleted;
+ unsigned short HostAdapterScatterGatherLimit;
+ unsigned short DriverScatterGatherLimit;
+ unsigned short MaxTargetDevices;
+ unsigned short MaxLogicalUnits;
+ unsigned short MailboxCount;
+ unsigned short InitialCCBs;
+ unsigned short IncrementalCCBs;
+ unsigned short AllocatedCCBs;
+ unsigned short DriverQueueDepth;
+ unsigned short HostAdapterQueueDepth;
+ unsigned short UntaggedQueueDepth;
+ unsigned short CommonQueueDepth;
+ unsigned short BusSettleTime;
+ unsigned short SynchronousPermitted;
+ unsigned short FastPermitted;
+ unsigned short UltraPermitted;
+ unsigned short WidePermitted;
+ unsigned short DisconnectPermitted;
+ unsigned short TaggedQueuingPermitted;
+ unsigned short ExternalHostAdapterResets;
+ unsigned short HostAdapterInternalErrors;
+ unsigned short TargetDeviceCount;
+ unsigned short MessageBufferLength;
+ BusLogic_BusAddress_T BIOS_Address;
+ BusLogic_DriverOptions_T *DriverOptions;
+ FlashPoint_Info_T FlashPointInfo;
+ FlashPoint_CardHandle_T CardHandle;
+ struct BusLogic_HostAdapter *Next;
+ BusLogic_CCB_T *All_CCBs;
+ BusLogic_CCB_T *Free_CCBs;
+ BusLogic_CCB_T *FirstCompletedCCB;
+ BusLogic_CCB_T *LastCompletedCCB;
+ BusLogic_CCB_T *BusDeviceResetPendingCCB[BusLogic_MaxTargetDevices];
+ BusLogic_ErrorRecoveryStrategy_T
+ ErrorRecoveryStrategy[BusLogic_MaxTargetDevices];
+ BusLogic_TargetFlags_T TargetFlags[BusLogic_MaxTargetDevices];
+ unsigned char QueueDepth[BusLogic_MaxTargetDevices];
+ unsigned char SynchronousPeriod[BusLogic_MaxTargetDevices];
+ unsigned char SynchronousOffset[BusLogic_MaxTargetDevices];
+ unsigned char ActiveCommands[BusLogic_MaxTargetDevices];
+ unsigned int CommandsSinceReset[BusLogic_MaxTargetDevices];
+ unsigned long LastSequencePoint[BusLogic_MaxTargetDevices];
+ unsigned long LastResetAttempted[BusLogic_MaxTargetDevices];
+ unsigned long LastResetCompleted[BusLogic_MaxTargetDevices];
+ BusLogic_OutgoingMailbox_T *FirstOutgoingMailbox;
+ BusLogic_OutgoingMailbox_T *LastOutgoingMailbox;
+ BusLogic_OutgoingMailbox_T *NextOutgoingMailbox;
+ BusLogic_IncomingMailbox_T *FirstIncomingMailbox;
+ BusLogic_IncomingMailbox_T *LastIncomingMailbox;
+ BusLogic_IncomingMailbox_T *NextIncomingMailbox;
+ BusLogic_TargetStatistics_T TargetStatistics[BusLogic_MaxTargetDevices];
+ unsigned char MailboxSpace[BusLogic_MaxMailboxes
+ * (sizeof(BusLogic_OutgoingMailbox_T)
+ + sizeof(BusLogic_IncomingMailbox_T))];
+ char MessageBuffer[BusLogic_MessageBufferSize];
+}
+BusLogic_HostAdapter_T;
+
+
+/*
+ Define a structure for the BIOS Disk Parameters.
+*/
+
+typedef struct BIOS_DiskParameters
+{
+ int Heads;
+ int Sectors;
+ int Cylinders;
+}
+BIOS_DiskParameters_T;
+
+
+/*
+ Define a structure for the SCSI Inquiry command results.
+*/
+
+typedef struct SCSI_Inquiry
+{
+ unsigned char PeripheralDeviceType:5; /* Byte 0 Bits 0-4 */
+ unsigned char PeripheralQualifier:3; /* Byte 0 Bits 5-7 */
+ unsigned char DeviceTypeModifier:7; /* Byte 1 Bits 0-6 */
+ boolean RMB:1; /* Byte 1 Bit 7 */
+ unsigned char ANSI_ApprovedVersion:3; /* Byte 2 Bits 0-2 */
+ unsigned char ECMA_Version:3; /* Byte 2 Bits 3-5 */
+ unsigned char ISO_Version:2; /* Byte 2 Bits 6-7 */
+ unsigned char ResponseDataFormat:4; /* Byte 3 Bits 0-3 */
+ unsigned char :2; /* Byte 3 Bits 4-5 */
+ boolean TrmIOP:1; /* Byte 3 Bit 6 */
+ boolean AENC:1; /* Byte 3 Bit 7 */
+ unsigned char AdditionalLength; /* Byte 4 */
+ unsigned char :8; /* Byte 5 */
+ unsigned char :8; /* Byte 6 */
+ boolean SftRe:1; /* Byte 7 Bit 0 */
+ boolean CmdQue:1; /* Byte 7 Bit 1 */
+ boolean :1; /* Byte 7 Bit 2 */
+ boolean Linked:1; /* Byte 7 Bit 3 */
+ boolean Sync:1; /* Byte 7 Bit 4 */
+ boolean WBus16:1; /* Byte 7 Bit 5 */
+ boolean WBus32:1; /* Byte 7 Bit 6 */
+ boolean RelAdr:1; /* Byte 7 Bit 7 */
+ unsigned char VendorIdentification[8]; /* Bytes 8-15 */
+ unsigned char ProductIdentification[16]; /* Bytes 16-31 */
+ unsigned char ProductRevisionLevel[4]; /* Bytes 32-35 */
+}
+SCSI_Inquiry_T;
+
+
+/*
+ BusLogic_AcquireHostAdapterLock acquires exclusive access to Host Adapter.
+*/
+
+static inline
+void BusLogic_AcquireHostAdapterLock(BusLogic_HostAdapter_T *HostAdapter,
+ ProcessorFlags_T *ProcessorFlags)
+{
+ save_flags(*ProcessorFlags);
+ cli();
+}
+
+
+/*
+ BusLogic_ReleaseHostAdapterLock releases exclusive access to Host Adapter.
+*/
+
+static inline
+void BusLogic_ReleaseHostAdapterLock(BusLogic_HostAdapter_T *HostAdapter,
+ ProcessorFlags_T *ProcessorFlags)
+{
+ restore_flags(*ProcessorFlags);
+}
+
+
+/*
+ BusLogic_AcquireHostAdapterLockIH acquires exclusive access to Host Adapter,
+ but is only called from the interrupt handler when interrupts are disabled.
+*/
+
+static inline
+void BusLogic_AcquireHostAdapterLockIH(BusLogic_HostAdapter_T *HostAdapter,
+ ProcessorFlags_T *ProcessorFlags)
+{
+}
+
+
+/*
+ BusLogic_ReleaseHostAdapterLockIH releases exclusive access to Host Adapter,
+ but is only called from the interrupt handler when interrupts are disabled.
+*/
+
+static inline
+void BusLogic_ReleaseHostAdapterLockIH(BusLogic_HostAdapter_T *HostAdapter,
+ ProcessorFlags_T *ProcessorFlags)
+{
+}
+
+
+/*
+ Define functions to provide an abstraction for reading and writing the
+ Host Adapter I/O Registers.
+*/
+
+static inline
+void BusLogic_SCSIBusReset(BusLogic_HostAdapter_T *HostAdapter)
+{
+ BusLogic_ControlRegister_T ControlRegister;
+ ControlRegister.All = 0;
+ ControlRegister.Bits.SCSIBusReset = true;
+ outb(ControlRegister.All,
+ HostAdapter->IO_Address + BusLogic_ControlRegisterOffset);
+}
+
+static inline
+void BusLogic_InterruptReset(BusLogic_HostAdapter_T *HostAdapter)
+{
+ BusLogic_ControlRegister_T ControlRegister;
+ ControlRegister.All = 0;
+ ControlRegister.Bits.InterruptReset = true;
+ outb(ControlRegister.All,
+ HostAdapter->IO_Address + BusLogic_ControlRegisterOffset);
+}
+
+static inline
+void BusLogic_SoftReset(BusLogic_HostAdapter_T *HostAdapter)
+{
+ BusLogic_ControlRegister_T ControlRegister;
+ ControlRegister.All = 0;
+ ControlRegister.Bits.SoftReset = true;
+ outb(ControlRegister.All,
+ HostAdapter->IO_Address + BusLogic_ControlRegisterOffset);
+}
+
+static inline
+void BusLogic_HardReset(BusLogic_HostAdapter_T *HostAdapter)
+{
+ BusLogic_ControlRegister_T ControlRegister;
+ ControlRegister.All = 0;
+ ControlRegister.Bits.HardReset = true;
+ outb(ControlRegister.All,
+ HostAdapter->IO_Address + BusLogic_ControlRegisterOffset);
+}
+
+static inline
+unsigned char BusLogic_ReadStatusRegister(BusLogic_HostAdapter_T *HostAdapter)
+{
+ return inb(HostAdapter->IO_Address + BusLogic_StatusRegisterOffset);
+}
+
+static inline
+void BusLogic_WriteCommandParameterRegister(BusLogic_HostAdapter_T
+ *HostAdapter,
+ unsigned char Value)
+{
+ outb(Value,
+ HostAdapter->IO_Address + BusLogic_CommandParameterRegisterOffset);
+}
+
+static inline
+unsigned char BusLogic_ReadDataInRegister(BusLogic_HostAdapter_T *HostAdapter)
+{
+ return inb(HostAdapter->IO_Address + BusLogic_DataInRegisterOffset);
+}
+
+static inline
+unsigned char BusLogic_ReadInterruptRegister(BusLogic_HostAdapter_T
+ *HostAdapter)
+{
+ return inb(HostAdapter->IO_Address + BusLogic_InterruptRegisterOffset);
+}
+
+static inline
+unsigned char BusLogic_ReadGeometryRegister(BusLogic_HostAdapter_T
+ *HostAdapter)
+{
+ return inb(HostAdapter->IO_Address + BusLogic_GeometryRegisterOffset);
+}
+
+
+/*
+ BusLogic_StartMailboxCommand issues an Execute Mailbox Command, which
+ notifies the Host Adapter that an entry has been made in an Outgoing
+ Mailbox.
+*/
+
+static inline
+void BusLogic_StartMailboxCommand(BusLogic_HostAdapter_T *HostAdapter)
+{
+ BusLogic_WriteCommandParameterRegister(HostAdapter,
+ BusLogic_ExecuteMailboxCommand);
+}
+
+
+/*
+ BusLogic_Delay waits for Seconds to elapse.
+*/
+
+static inline void BusLogic_Delay(int Seconds)
+{
+ int Milliseconds = 1000 * Seconds;
+ unsigned long ProcessorFlags;
+ save_flags(ProcessorFlags);
+ sti();
+ while (--Milliseconds >= 0) udelay(1000);
+ restore_flags(ProcessorFlags);
+}
+
+
+/*
+ Virtual_to_Bus and Bus_to_Virtual map between Kernel Virtual Addresses
+ and PCI/VLB/EISA/ISA Bus Addresses.
+*/
+
+static inline BusLogic_BusAddress_T Virtual_to_Bus(void *VirtualAddress)
+{
+ return (BusLogic_BusAddress_T) virt_to_bus(VirtualAddress);
+}
+
+static inline void *Bus_to_Virtual(BusLogic_BusAddress_T BusAddress)
+{
+ return (void *) bus_to_virt(BusAddress);
+}
+
+
+/*
+ Virtual_to_32Bit_Virtual maps between Kernel Virtual Addresses and
+ 32 bit Kernel Virtual Addresses. This avoids compilation warnings
+ on 64 bit architectures.
+*/
+
+static inline
+BusLogic_BusAddress_T Virtual_to_32Bit_Virtual(void *VirtualAddress)
+{
+ return (BusLogic_BusAddress_T) (unsigned long) VirtualAddress;
+}
+
+
+/*
+ BusLogic_IncrementErrorCounter increments Error Counter by 1, stopping at
+ 65535 rather than wrapping around to 0.
+*/
+
+static inline void BusLogic_IncrementErrorCounter(unsigned short *ErrorCounter)
+{
+ if (*ErrorCounter < 65535) (*ErrorCounter)++;
+}
+
+
+/*
+ BusLogic_IncrementByteCounter increments Byte Counter by Amount.
+*/
+
+static inline void BusLogic_IncrementByteCounter(BusLogic_ByteCounter_T
+ *ByteCounter,
+ unsigned int Amount)
+{
+ ByteCounter->Units += Amount;
+ if (ByteCounter->Units > 999999999)
+ {
+ ByteCounter->Units -= 1000000000;
+ ByteCounter->Billions++;
+ }
+}
+
+
+/*
+ BusLogic_IncrementSizeBucket increments the Bucket for Amount.
+*/
+
+static inline void BusLogic_IncrementSizeBucket(BusLogic_CommandSizeBuckets_T
+ CommandSizeBuckets,
+ unsigned int Amount)
+{
+ int Index = 0;
+ if (Amount < 8*1024)
+ {
+ if (Amount < 2*1024)
+ Index = (Amount < 1*1024 ? 0 : 1);
+ else Index = (Amount < 4*1024 ? 2 : 3);
+ }
+ else if (Amount < 128*1024)
+ {
+ if (Amount < 32*1024)
+ Index = (Amount < 16*1024 ? 4 : 5);
+ else Index = (Amount < 64*1024 ? 6 : 7);
+ }
+ else Index = (Amount < 256*1024 ? 8 : 9);
+ CommandSizeBuckets[Index]++;
+}
+
+
+/*
+ Define the version number of the FlashPoint Firmware (SCCB Manager).
+*/
+
+#define FlashPoint_FirmwareVersion "5.02"
+
+
+/*
+ Define the possible return values from FlashPoint_HandleInterrupt.
+*/
+
+#define FlashPoint_NormalInterrupt 0x00
+#define FlashPoint_InternalError 0xFE
+#define FlashPoint_ExternalBusReset 0xFF
+
+
+/*
+ Define prototypes for the forward referenced BusLogic Driver
+ Internal Functions.
+*/
+
+static void BusLogic_QueueCompletedCCB(BusLogic_CCB_T *);
+static void BusLogic_InterruptHandler(int, void *, Registers_T *);
+static int BusLogic_ResetHostAdapter(BusLogic_HostAdapter_T *,
+ SCSI_Command_T *, unsigned int);
+static void BusLogic_Message(BusLogic_MessageLevel_T, char *,
+ BusLogic_HostAdapter_T *, ...);
+static void BusLogic_ParseDriverOptions(char *);
+
+
+#endif /* BusLogic_DriverVersion */
diff --git a/linux/src/drivers/scsi/FlashPoint.c b/linux/src/drivers/scsi/FlashPoint.c
new file mode 100644
index 0000000..8d2f102
--- /dev/null
+++ b/linux/src/drivers/scsi/FlashPoint.c
@@ -0,0 +1,12156 @@
+/*
+
+ FlashPoint.c -- FlashPoint SCCB Manager for Linux
+
+ This file contains the FlashPoint SCCB Manager from BusLogic's FlashPoint
+ Driver Developer's Kit, with minor modifications by Leonard N. Zubkoff for
+ Linux compatibility. It was provided by BusLogic in the form of 16 separate
+ source files, which would have unnecessarily cluttered the scsi directory, so
+ the individual files have been combined into this single file.
+
+ Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+
+ This file is available under both the GNU General Public License
+ and a BSD-style copyright; see LICENSE.FlashPoint for details.
+
+*/
+
+
+#include <linux/config.h>
+
+
+#ifndef CONFIG_SCSI_OMIT_FLASHPOINT
+
+
+#define UNIX
+#define FW_TYPE _SCCB_MGR_
+#define MAX_CARDS 8
+#undef BUSTYPE_PCI
+
+
+#define OS_InPortByte(port) inb(port)
+#define OS_InPortWord(port) inw(port)
+#define OS_InPortLong(port) inl(port)
+#define OS_OutPortByte(port, value) outb(value, port)
+#define OS_OutPortWord(port, value) outw(value, port)
+#define OS_OutPortLong(port, value) outl(value, port)
+#define OS_Lock(x)
+#define OS_UnLock(x)
+
+
+/*
+ Define name replacements for compatibility with the Linux BusLogic Driver.
+*/
+
+#define SccbMgr_sense_adapter FlashPoint_ProbeHostAdapter
+#define SccbMgr_config_adapter FlashPoint_HardwareResetHostAdapter
+#define SccbMgr_unload_card FlashPoint_ReleaseHostAdapter
+#define SccbMgr_start_sccb FlashPoint_StartCCB
+#define SccbMgr_abort_sccb FlashPoint_AbortCCB
+#define SccbMgr_my_int FlashPoint_InterruptPending
+#define SccbMgr_isr FlashPoint_HandleInterrupt
+
+
+/*
+ Define name replacements to avoid kernel namespace pollution.
+*/
+
+#define BL_Card FPT_BL_Card
+#define BusMasterInit FPT_BusMasterInit
+#define CalcCrc16 FPT_CalcCrc16
+#define CalcLrc FPT_CalcLrc
+#define ChkIfChipInitialized FPT_ChkIfChipInitialized
+#define DiagBusMaster FPT_DiagBusMaster
+#define DiagEEPROM FPT_DiagEEPROM
+#define DiagXbow FPT_DiagXbow
+#define GetTarLun FPT_GetTarLun
+#define RNVRamData FPT_RNVRamData
+#define RdStack FPT_RdStack
+#define SccbMgrTableInitAll FPT_SccbMgrTableInitAll
+#define SccbMgrTableInitCard FPT_SccbMgrTableInitCard
+#define SccbMgrTableInitTarget FPT_SccbMgrTableInitTarget
+#define SccbMgr_bad_isr FPT_SccbMgr_bad_isr
+#define SccbMgr_scsi_reset FPT_SccbMgr_scsi_reset
+#define SccbMgr_timer_expired FPT_SccbMgr_timer_expired
+#define SendMsg FPT_SendMsg
+#define Wait FPT_Wait
+#define Wait1Second FPT_Wait1Second
+#define WrStack FPT_WrStack
+#define XbowInit FPT_XbowInit
+#define autoCmdCmplt FPT_autoCmdCmplt
+#define autoLoadDefaultMap FPT_autoLoadDefaultMap
+#define busMstrDataXferStart FPT_busMstrDataXferStart
+#define busMstrSGDataXferStart FPT_busMstrSGDataXferStart
+#define busMstrTimeOut FPT_busMstrTimeOut
+#define dataXferProcessor FPT_dataXferProcessor
+#define default_intena FPT_default_intena
+#define hostDataXferAbort FPT_hostDataXferAbort
+#define hostDataXferRestart FPT_hostDataXferRestart
+#define inisci FPT_inisci
+#define mbCards FPT_mbCards
+#define nvRamInfo FPT_nvRamInfo
+#define phaseBusFree FPT_phaseBusFree
+#define phaseChkFifo FPT_phaseChkFifo
+#define phaseCommand FPT_phaseCommand
+#define phaseDataIn FPT_phaseDataIn
+#define phaseDataOut FPT_phaseDataOut
+#define phaseDecode FPT_phaseDecode
+#define phaseIllegal FPT_phaseIllegal
+#define phaseMsgIn FPT_phaseMsgIn
+#define phaseMsgOut FPT_phaseMsgOut
+#define phaseStatus FPT_phaseStatus
+#define queueAddSccb FPT_queueAddSccb
+#define queueCmdComplete FPT_queueCmdComplete
+#define queueDisconnect FPT_queueDisconnect
+#define queueFindSccb FPT_queueFindSccb
+#define queueFlushSccb FPT_queueFlushSccb
+#define queueFlushTargSccb FPT_queueFlushTargSccb
+#define queueSearchSelect FPT_queueSearchSelect
+#define queueSelectFail FPT_queueSelectFail
+#define s_PhaseTbl FPT_s_PhaseTbl
+#define scamHAString FPT_scamHAString
+#define scamInfo FPT_scamInfo
+#define scarb FPT_scarb
+#define scasid FPT_scasid
+#define scbusf FPT_scbusf
+#define sccbMgrTbl FPT_sccbMgrTbl
+#define schkdd FPT_schkdd
+#define scini FPT_scini
+#define sciso FPT_sciso
+#define scmachid FPT_scmachid
+#define scsavdi FPT_scsavdi
+#define scsel FPT_scsel
+#define scsell FPT_scsell
+#define scsendi FPT_scsendi
+#define scvalq FPT_scvalq
+#define scwirod FPT_scwirod
+#define scwiros FPT_scwiros
+#define scwtsel FPT_scwtsel
+#define scxferc FPT_scxferc
+#define sdecm FPT_sdecm
+#define sfm FPT_sfm
+#define shandem FPT_shandem
+#define sinits FPT_sinits
+#define sisyncn FPT_sisyncn
+#define sisyncr FPT_sisyncr
+#define siwidn FPT_siwidn
+#define siwidr FPT_siwidr
+#define sres FPT_sres
+#define sresb FPT_sresb
+#define ssel FPT_ssel
+#define ssenss FPT_ssenss
+#define sssyncv FPT_sssyncv
+#define stsyncn FPT_stsyncn
+#define stwidn FPT_stwidn
+#define sxfrp FPT_sxfrp
+#define utilEERead FPT_utilEERead
+#define utilEEReadOrg FPT_utilEEReadOrg
+#define utilEESendCmdAddr FPT_utilEESendCmdAddr
+#define utilEEWrite FPT_utilEEWrite
+#define utilEEWriteOnOff FPT_utilEEWriteOnOff
+#define utilUpdateResidual FPT_utilUpdateResidual
+
+
+/*----------------------------------------------------------------------
+ *
+ *
+ * Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+ *
+ * This file is available under both the GNU General Public License
+ * and a BSD-style copyright; see LICENSE.FlashPoint for details.
+ *
+ * $Workfile: globals.h $
+ *
+ * Description: Common shared global defines.
+ *
+ * $Date: 1999/04/26 05:53:56 $
+ *
+ * $Revision: 1.1 $
+ *
+ *----------------------------------------------------------------------*/
+#ifndef __GLOBALS_H__
+#define __GLOBALS_H__
+
+#define _UCB_MGR_ 1
+#define _SCCB_MGR_ 2
+
+/*#include <osflags.h>*/
+
+#define MAX_CDBLEN 12
+
+#define SCAM_LEV_2 1
+
+#define CRCMASK 0xA001
+
+/* In your osflags.h file, please ENSURE that only ONE OS FLAG
+ is on at a time !!! Also, please make sure you turn set the
+ variable FW_TYPE to either _UCB_MGR_ or _SCCB_MGR_ !!! */
+
+#if defined(DOS) || defined(WIN95_16) || defined(OS2) || defined(OTHER_16)
+ #define COMPILER_16_BIT 1
+#elif defined(NETWARE) || defined(NT) || defined(WIN95_32) || defined(UNIX) || defined(OTHER_32) || defined(SOLARIS_REAL_MODE)
+ #define COMPILER_32_BIT 1
+#endif
+
+
+#define BL_VENDOR_ID 0x104B
+#define FP_DEVICE_ID 0x8130
+#define MM_DEVICE_ID 0x1040
+
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+#ifndef TRUE
+#define TRUE (!(FALSE))
+#endif
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#define FAILURE 0xFFFFFFFFL
+
+
+typedef unsigned char UCHAR;
+typedef unsigned short USHORT;
+typedef unsigned int UINT;
+typedef unsigned long ULONG;
+typedef unsigned char * PUCHAR;
+typedef unsigned short* PUSHORT;
+typedef unsigned long * PULONG;
+typedef void * PVOID;
+
+
+#if defined(COMPILER_16_BIT)
+typedef unsigned char far * uchar_ptr;
+typedef unsigned short far * ushort_ptr;
+typedef unsigned long far * ulong_ptr;
+#endif /* 16_BIT_COMPILER */
+
+#if defined(COMPILER_32_BIT)
+typedef unsigned char * uchar_ptr;
+typedef unsigned short * ushort_ptr;
+typedef unsigned long * ulong_ptr;
+#endif /* 32_BIT_COMPILER */
+
+
+/* NEW TYPE DEFINITIONS (shared with Mylex North)
+
+** Use following type defines to avoid confusion in 16 and 32-bit
+** environments. Avoid using 'int' as it denotes 16 bits in 16-bit
+** environment and 32 in 32-bit environments.
+
+*/
+
+#define s08bits char
+#define s16bits short
+#define s32bits long
+
+#define u08bits unsigned s08bits
+#define u16bits unsigned s16bits
+#define u32bits unsigned s32bits
+
+#if defined(COMPILER_16_BIT)
+
+typedef u08bits far * pu08bits;
+typedef u16bits far * pu16bits;
+typedef u32bits far * pu32bits;
+
+#endif /* COMPILER_16_BIT */
+
+#if defined(COMPILER_32_BIT)
+
+typedef u08bits * pu08bits;
+typedef u16bits * pu16bits;
+typedef u32bits * pu32bits;
+
+#endif /* COMPILER_32_BIT */
+
+
+#define BIT(x) ((UCHAR)(1<<(x))) /* single-bit mask in bit position x */
+#define BITW(x) ((USHORT)(1<<(x))) /* single-bit mask in bit position x */
+
+
+
+#if defined(DOS)
+/*#include <dos.h>*/
+ #undef inportb /* undefine for Borland Lib */
+ #undef inport /* they may have define I/O function in LIB */
+ #undef outportb
+ #undef outport
+
+ #define OS_InPortByte(ioport) inportb(ioport)
+ #define OS_InPortWord(ioport) inport(ioport)
+ #define OS_InPortLong(ioport) inportq(ioport, val)
+ #define OS_OutPortByte(ioport, val) outportb(ioport, val)
+ #define OS_OutPortWord(ioport, val) outport(ioport, val)
+ #define OS_OutPortLong(ioport) outportq(ioport, val)
+#endif /* DOS */
+
+#if defined(NETWARE) || defined(OTHER_32) || defined(OTHER_16)
+ extern u08bits OS_InPortByte(u32bits ioport);
+ extern u16bits OS_InPortWord(u32bits ioport);
+ extern u32bits OS_InPortLong(u32bits ioport);
+
+ extern OS_InPortByteBuffer(u32bits ioport, pu08bits buffer, u32bits count);
+ extern OS_InPortWordBuffer(u32bits ioport, pu16bits buffer, u32bits count);
+ extern OS_OutPortByte(u32bits ioport, u08bits val);
+ extern OS_OutPortWord(u32bits ioport, u16bits val);
+ extern OS_OutPortLong(u32bits ioport, u32bits val);
+ extern OS_OutPortByteBuffer(u32bits ioport, pu08bits buffer, u32bits count);
+ extern OS_OutPortWordBuffer(u32bits ioport, pu16bits buffer, u32bits count);
+#endif /* NETWARE || OTHER_32 || OTHER_16 */
+
+#if defined (NT) || defined(WIN95_32) || defined(WIN95_16)
+ #if defined(NT)
+
+ extern __declspec(dllimport) u08bits ScsiPortReadPortUchar(pu08bits ioport);
+ extern __declspec(dllimport) u16bits ScsiPortReadPortUshort(pu16bits ioport);
+ extern __declspec(dllimport) u32bits ScsiPortReadPortUlong(pu32bits ioport);
+ extern __declspec(dllimport) void ScsiPortWritePortUchar(pu08bits ioport, u08bits val);
+ extern __declspec(dllimport) void ScsiPortWritePortUshort(pu16bits port, u16bits val);
+ extern __declspec(dllimport) void ScsiPortWritePortUlong(pu32bits port, u32bits val);
+
+ #else
+
+ extern u08bits ScsiPortReadPortUchar(pu08bits ioport);
+ extern u16bits ScsiPortReadPortUshort(pu16bits ioport);
+ extern u32bits ScsiPortReadPortUlong(pu32bits ioport);
+ extern void ScsiPortWritePortUchar(pu08bits ioport, u08bits val);
+ extern void ScsiPortWritePortUshort(pu16bits port, u16bits val);
+ extern void ScsiPortWritePortUlong(pu32bits port, u32bits val);
+ #endif
+
+
+ #define OS_InPortByte(ioport) ScsiPortReadPortUchar((pu08bits) ioport)
+ #define OS_InPortWord(ioport) ScsiPortReadPortUshort((pu16bits) ioport)
+ #define OS_InPortLong(ioport) ScsiPortReadPortUlong((pu32bits) ioport)
+
+ #define OS_OutPortByte(ioport, val) ScsiPortWritePortUchar((pu08bits) ioport, (u08bits) val)
+ #define OS_OutPortWord(ioport, val) ScsiPortWritePortUshort((pu16bits) ioport, (u16bits) val)
+ #define OS_OutPortLong(ioport, val) ScsiPortWritePortUlong((pu32bits) ioport, (u32bits) val)
+ #define OS_OutPortByteBuffer(ioport, buffer, count) \
+ ScsiPortWritePortBufferUchar((pu08bits)&port, (pu08bits) buffer, (u32bits) count)
+ #define OS_OutPortWordBuffer(ioport, buffer, count) \
+ ScsiPortWritePortBufferUshort((pu16bits)&port, (pu16bits) buffer, (u32bits) count)
+
+ #define OS_Lock(x)
+ #define OS_UnLock(x)
+#endif /* NT || WIN95_32 || WIN95_16 */
+
+#if defined (UNIX) && !defined(OS_InPortByte)
+ #define OS_InPortByte(ioport) inb((u16bits)ioport)
+ #define OS_InPortWord(ioport) inw((u16bits)ioport)
+ #define OS_InPortLong(ioport) inl((u16bits)ioport)
+ #define OS_OutPortByte(ioport,val) outb((u16bits)ioport, (u08bits)val)
+ #define OS_OutPortWord(ioport,val) outw((u16bits)ioport, (u16bits)val)
+ #define OS_OutPortLong(ioport,val) outl((u16bits)ioport, (u32bits)val)
+
+ #define OS_Lock(x)
+ #define OS_UnLock(x)
+#endif /* UNIX */
+
+
+#if defined(OS2)
+ extern u08bits inb(u32bits ioport);
+ extern u16bits inw(u32bits ioport);
+ extern void outb(u32bits ioport, u08bits val);
+ extern void outw(u32bits ioport, u16bits val);
+
+ #define OS_InPortByte(ioport) inb(ioport)
+ #define OS_InPortWord(ioport) inw(ioport)
+ #define OS_OutPortByte(ioport, val) outb(ioport, val)
+ #define OS_OutPortWord(ioport, val) outw(ioport, val)
+ extern u32bits OS_InPortLong(u32bits ioport);
+ extern void OS_OutPortLong(u32bits ioport, u32bits val);
+
+ #define OS_Lock(x)
+ #define OS_UnLock(x)
+#endif /* OS2 */
+
+#if defined(SOLARIS_REAL_MODE)
+
+extern unsigned char inb(unsigned long ioport);
+extern unsigned short inw(unsigned long ioport);
+
+#define OS_InPortByte(ioport) inb(ioport)
+#define OS_InPortWord(ioport) inw(ioport)
+
+extern void OS_OutPortByte(unsigned long ioport, unsigned char val);
+extern void OS_OutPortWord(unsigned long ioport, unsigned short val);
+extern unsigned long OS_InPortLong(unsigned long ioport);
+extern void OS_OutPortLong(unsigned long ioport, unsigned long val);
+
+#define OS_Lock(x)
+#define OS_UnLock(x)
+
+#endif /* SOLARIS_REAL_MODE */
+
+#endif /* __GLOBALS_H__ */
+
+/*----------------------------------------------------------------------
+ *
+ *
+ * Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+ *
+ * This file is available under both the GNU General Public License
+ * and a BSD-style copyright; see LICENSE.FlashPoint for details.
+ *
+ * $Workfile: sccbmgr.h $
+ *
+ * Description: Common shared SCCB Interface defines and SCCB
+ * Manager specifics defines.
+ *
+ * $Date: 1999/04/26 05:53:56 $
+ *
+ * $Revision: 1.1 $
+ *
+ *----------------------------------------------------------------------*/
+
+#ifndef __SCCB_H__
+#define __SCCB_H__
+
+/*#include <osflags.h>*/
+/*#include <globals.h>*/
+
+#if defined(BUGBUG)
+#define debug_size 32
+#endif
+
+#if defined(DOS)
+
+ typedef struct _SCCB near *PSCCB;
+ #if (FW_TYPE == _SCCB_MGR_)
+ typedef void (*CALL_BK_FN)(PSCCB);
+ #endif
+
+#elif defined(OS2)
+
+ typedef struct _SCCB far *PSCCB;
+ #if (FW_TYPE == _SCCB_MGR_)
+ typedef void (far *CALL_BK_FN)(PSCCB);
+ #endif
+
+#else
+
+ typedef struct _SCCB *PSCCB;
+ #if (FW_TYPE == _SCCB_MGR_)
+ typedef void (*CALL_BK_FN)(PSCCB);
+ #endif
+
+#endif
+
+
+typedef struct SCCBMgr_info {
+ ULONG si_baseaddr;
+ UCHAR si_present;
+ UCHAR si_intvect;
+ UCHAR si_id;
+ UCHAR si_lun;
+ USHORT si_fw_revision;
+ USHORT si_per_targ_init_sync;
+ USHORT si_per_targ_fast_nego;
+ USHORT si_per_targ_ultra_nego;
+ USHORT si_per_targ_no_disc;
+ USHORT si_per_targ_wide_nego;
+ USHORT si_flags;
+ UCHAR si_card_family;
+ UCHAR si_bustype;
+ UCHAR si_card_model[3];
+ UCHAR si_relative_cardnum;
+ UCHAR si_reserved[4];
+ ULONG si_OS_reserved;
+ UCHAR si_XlatInfo[4];
+ ULONG si_reserved2[5];
+ ULONG si_secondary_range;
+} SCCBMGR_INFO;
+
+#if defined(DOS)
+ typedef SCCBMGR_INFO * PSCCBMGR_INFO;
+#else
+ #if defined (COMPILER_16_BIT)
+ typedef SCCBMGR_INFO far * PSCCBMGR_INFO;
+ #else
+ typedef SCCBMGR_INFO * PSCCBMGR_INFO;
+ #endif
+#endif // defined(DOS)
+
+
+
+
+#if (FW_TYPE==_SCCB_MGR_)
+ #define SCSI_PARITY_ENA 0x0001
+ #define LOW_BYTE_TERM 0x0010
+ #define HIGH_BYTE_TERM 0x0020
+ #define BUSTYPE_PCI 0x3
+#endif
+
+#define SUPPORT_16TAR_32LUN 0x0002
+#define SOFT_RESET 0x0004
+#define EXTENDED_TRANSLATION 0x0008
+#define POST_ALL_UNDERRRUNS 0x0040
+#define FLAG_SCAM_ENABLED 0x0080
+#define FLAG_SCAM_LEVEL2 0x0100
+
+
+
+
+#define HARPOON_FAMILY 0x02
+
+
+#define ISA_BUS_CARD 0x01
+#define EISA_BUS_CARD 0x02
+#define PCI_BUS_CARD 0x03
+#define VESA_BUS_CARD 0x04
+
+/* SCCB struc used for both SCCB and UCB manager compiles!
+ * The UCB Manager treats the SCCB as it's 'native hardware structure'
+ */
+
+
+#pragma pack(1)
+typedef struct _SCCB {
+ UCHAR OperationCode;
+ UCHAR ControlByte;
+ UCHAR CdbLength;
+ UCHAR RequestSenseLength;
+ ULONG DataLength;
+ ULONG DataPointer;
+ UCHAR CcbRes[2];
+ UCHAR HostStatus;
+ UCHAR TargetStatus;
+ UCHAR TargID;
+ UCHAR Lun;
+ UCHAR Cdb[12];
+ UCHAR CcbRes1;
+ UCHAR Reserved1;
+ ULONG Reserved2;
+ ULONG SensePointer;
+
+
+ CALL_BK_FN SccbCallback; /* VOID (*SccbCallback)(); */
+ ULONG SccbIOPort; /* Identifies board base port */
+ UCHAR SccbStatus;
+ UCHAR SCCBRes2;
+ USHORT SccbOSFlags;
+
+
+ ULONG Sccb_XferCnt; /* actual transfer count */
+ ULONG Sccb_ATC;
+ ULONG SccbVirtDataPtr; /* virtual addr for OS/2 */
+ ULONG Sccb_res1;
+ USHORT Sccb_MGRFlags;
+ USHORT Sccb_sgseg;
+ UCHAR Sccb_scsimsg; /* identify msg for selection */
+ UCHAR Sccb_tag;
+ UCHAR Sccb_scsistat;
+ UCHAR Sccb_idmsg; /* image of last msg in */
+ PSCCB Sccb_forwardlink;
+ PSCCB Sccb_backlink;
+ ULONG Sccb_savedATC;
+ UCHAR Save_Cdb[6];
+ UCHAR Save_CdbLen;
+ UCHAR Sccb_XferState;
+ ULONG Sccb_SGoffset;
+#if (FW_TYPE == _UCB_MGR_)
+ PUCB Sccb_ucb_ptr;
+#endif
+ } SCCB;
+
+#define SCCB_SIZE sizeof(SCCB)
+
+#pragma pack()
+
+
+
+#define SCSI_INITIATOR_COMMAND 0x00
+#define TARGET_MODE_COMMAND 0x01
+#define SCATTER_GATHER_COMMAND 0x02
+#define RESIDUAL_COMMAND 0x03
+#define RESIDUAL_SG_COMMAND 0x04
+#define RESET_COMMAND 0x81
+
+
+#define F_USE_CMD_Q 0x20 /*Inidcates TAGGED command. */
+#define TAG_TYPE_MASK 0xC0 /*Type of tag msg to send. */
+#define TAG_Q_MASK 0xE0
+#define SCCB_DATA_XFER_OUT 0x10 /* Write */
+#define SCCB_DATA_XFER_IN 0x08 /* Read */
+
+
+#define FOURTEEN_BYTES 0x00 /* Request Sense Buffer size */
+#define NO_AUTO_REQUEST_SENSE 0x01 /* No Request Sense Buffer */
+
+
+#define BUS_FREE_ST 0
+#define SELECT_ST 1
+#define SELECT_BDR_ST 2 /* Select w\ Bus Device Reset */
+#define SELECT_SN_ST 3 /* Select w\ Sync Nego */
+#define SELECT_WN_ST 4 /* Select w\ Wide Data Nego */
+#define SELECT_Q_ST 5 /* Select w\ Tagged Q'ing */
+#define COMMAND_ST 6
+#define DATA_OUT_ST 7
+#define DATA_IN_ST 8
+#define DISCONNECT_ST 9
+#define STATUS_ST 10
+#define ABORT_ST 11
+#define MESSAGE_ST 12
+
+
+#define F_HOST_XFER_DIR 0x01
+#define F_ALL_XFERRED 0x02
+#define F_SG_XFER 0x04
+#define F_AUTO_SENSE 0x08
+#define F_ODD_BALL_CNT 0x10
+#define F_NO_DATA_YET 0x80
+
+
+#define F_STATUSLOADED 0x01
+#define F_MSGLOADED 0x02
+#define F_DEV_SELECTED 0x04
+
+
+#define SCCB_COMPLETE 0x00 /* SCCB completed without error */
+#define SCCB_DATA_UNDER_RUN 0x0C
+#define SCCB_SELECTION_TIMEOUT 0x11 /* Set SCSI selection timed out */
+#define SCCB_DATA_OVER_RUN 0x12
+#define SCCB_UNEXPECTED_BUS_FREE 0x13 /* Target dropped SCSI BSY */
+#define SCCB_PHASE_SEQUENCE_FAIL 0x14 /* Target bus phase sequence failure */
+
+#define SCCB_INVALID_OP_CODE 0x16 /* SCCB invalid operation code */
+#define SCCB_INVALID_SCCB 0x1A /* Invalid SCCB - bad parameter */
+#define SCCB_GROSS_FW_ERR 0x27 /* Major problem! */
+#define SCCB_BM_ERR 0x30 /* BusMaster error. */
+#define SCCB_PARITY_ERR 0x34 /* SCSI parity error */
+
+
+
+#if (FW_TYPE==_UCB_MGR_)
+ #define HBA_AUTO_SENSE_FAIL 0x1B
+ #define HBA_TQ_REJECTED 0x1C
+ #define HBA_UNSUPORTED_MSG 0x1D
+ #define HBA_HW_ERROR 0x20
+ #define HBA_ATN_NOT_RESPONDED 0x21
+ #define HBA_SCSI_RESET_BY_ADAPTER 0x22
+ #define HBA_SCSI_RESET_BY_TARGET 0x23
+ #define HBA_WRONG_CONNECTION 0x24
+ #define HBA_BUS_DEVICE_RESET 0x25
+ #define HBA_ABORT_QUEUE 0x26
+
+#else // these are not defined in BUDI/UCB
+
+ #define SCCB_INVALID_DIRECTION 0x18 /* Invalid target direction */
+ #define SCCB_DUPLICATE_SCCB 0x19 /* Duplicate SCCB */
+ #define SCCB_SCSI_RST 0x35 /* SCSI RESET detected. */
+
+#endif // (FW_TYPE==_UCB_MGR_)
+
+
+#define SCCB_IN_PROCESS 0x00
+#define SCCB_SUCCESS 0x01
+#define SCCB_ABORT 0x02
+#define SCCB_NOT_FOUND 0x03
+#define SCCB_ERROR 0x04
+#define SCCB_INVALID 0x05
+
+#define SCCB_SIZE sizeof(SCCB)
+
+
+
+
+#if (FW_TYPE == _UCB_MGR_)
+ void SccbMgr_start_sccb(CARD_HANDLE pCurrCard, PUCB p_ucb);
+ s32bits SccbMgr_abort_sccb(CARD_HANDLE pCurrCard, PUCB p_ucb);
+ u08bits SccbMgr_my_int(CARD_HANDLE pCurrCard);
+ s32bits SccbMgr_isr(CARD_HANDLE pCurrCard);
+ void SccbMgr_scsi_reset(CARD_HANDLE pCurrCard);
+ void SccbMgr_timer_expired(CARD_HANDLE pCurrCard);
+ void SccbMgr_unload_card(CARD_HANDLE pCurrCard);
+ void SccbMgr_restore_foreign_state(CARD_HANDLE pCurrCard);
+ void SccbMgr_restore_native_state(CARD_HANDLE pCurrCard);
+ void SccbMgr_save_foreign_state(PADAPTER_INFO pAdapterInfo);
+
+#endif
+
+
+#if (FW_TYPE == _SCCB_MGR_)
+
+ #if defined (DOS)
+ int SccbMgr_sense_adapter(PSCCBMGR_INFO pCardInfo);
+ USHORT SccbMgr_config_adapter(PSCCBMGR_INFO pCardInfo);
+ void SccbMgr_start_sccb(USHORT pCurrCard, PSCCB p_SCCB);
+ int SccbMgr_abort_sccb(USHORT pCurrCard, PSCCB p_SCCB);
+ UCHAR SccbMgr_my_int(USHORT pCurrCard);
+ int SccbMgr_isr(USHORT pCurrCard);
+ void SccbMgr_scsi_reset(USHORT pCurrCard);
+ void SccbMgr_timer_expired(USHORT pCurrCard);
+ USHORT SccbMgr_status(USHORT pCurrCard);
+ void SccbMgr_unload_card(USHORT pCurrCard);
+
+ #else //non-DOS
+
+ int SccbMgr_sense_adapter(PSCCBMGR_INFO pCardInfo);
+ ULONG SccbMgr_config_adapter(PSCCBMGR_INFO pCardInfo);
+ void SccbMgr_start_sccb(ULONG pCurrCard, PSCCB p_SCCB);
+ int SccbMgr_abort_sccb(ULONG pCurrCard, PSCCB p_SCCB);
+ UCHAR SccbMgr_my_int(ULONG pCurrCard);
+ int SccbMgr_isr(ULONG pCurrCard);
+ void SccbMgr_scsi_reset(ULONG pCurrCard);
+ void SccbMgr_enable_int(ULONG pCurrCard);
+ void SccbMgr_disable_int(ULONG pCurrCard);
+ void SccbMgr_timer_expired(ULONG pCurrCard);
+ void SccbMgr_unload_card(ULONG pCurrCard);
+
+ #endif
+#endif // (FW_TYPE == _SCCB_MGR_)
+
+#endif /* __SCCB_H__ */
+
+/*----------------------------------------------------------------------
+ *
+ *
+ * Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+ *
+ * This file is available under both the GNU General Public License
+ * and a BSD-style copyright; see LICENSE.FlashPoint for details.
+ *
+ * $Workfile: blx30.h $
+ *
+ * Description: This module contains SCCB/UCB Manager implementation
+ * specific stuff.
+ *
+ * $Date: 1999/04/26 05:53:56 $
+ *
+ * $Revision: 1.1 $
+ *
+ *----------------------------------------------------------------------*/
+
+
+#ifndef __blx30_H__
+#define __blx30_H__
+
+/*#include <globals.h>*/
+
+#define ORION_FW_REV 3110
+
+
+
+
+#define HARP_REVD 1
+
+
+#if defined(DOS)
+#define QUEUE_DEPTH 8+1 /*1 for Normal disconnect 0 for Q'ing. */
+#else
+#define QUEUE_DEPTH 254+1 /*1 for Normal disconnect 32 for Q'ing. */
+#endif // defined(DOS)
+
+#define MAX_MB_CARDS 4 /* Max. no of cards suppoerted on Mother Board */
+
+#define WIDE_SCSI 1
+
+#if defined(WIDE_SCSI)
+ #if defined(DOS)
+ #define MAX_SCSI_TAR 16
+ #define MAX_LUN 8
+ #define LUN_MASK 0x07
+ #else
+ #define MAX_SCSI_TAR 16
+ #define MAX_LUN 32
+ #define LUN_MASK 0x1f
+
+ #endif
+#else
+ #define MAX_SCSI_TAR 8
+ #define MAX_LUN 8
+ #define LUN_MASK 0x07
+#endif
+
+#if defined(HARP_REVA)
+#define SG_BUF_CNT 15 /*Number of prefetched elements. */
+#else
+#define SG_BUF_CNT 16 /*Number of prefetched elements. */
+#endif
+
+#define SG_ELEMENT_SIZE 8 /*Eight byte per element. */
+#define SG_LOCAL_MASK 0x00000000L
+#define SG_ELEMENT_MASK 0xFFFFFFFFL
+
+
+#if (FW_TYPE == _UCB_MGR_)
+ #define OPC_DECODE_NORMAL 0x0f7f
+#endif // _UCB_MGR_
+
+
+
+#if defined(DOS)
+
+/*#include <dos.h>*/
+ #define RD_HARPOON(ioport) (OS_InPortByte(ioport))
+ #define RDW_HARPOON(ioport) (OS_InPortWord(ioport))
+ #define WR_HARPOON(ioport,val) (OS_OutPortByte(ioport,val))
+ #define WRW_HARPOON(ioport,val) (OS_OutPortWord(ioport,val))
+
+ #define RD_HARP32(port,offset,data) asm{db 66h; \
+ push ax; \
+ mov dx,port; \
+ add dx, offset; \
+ db 66h; \
+ in ax,dx; \
+ db 66h; \
+ mov word ptr data,ax;\
+ db 66h; \
+ pop ax}
+
+ #define WR_HARP32(port,offset,data) asm{db 66h; \
+ push ax; \
+ mov dx,port; \
+ add dx, offset; \
+ db 66h; \
+ mov ax,word ptr data;\
+ db 66h; \
+ out dx,ax; \
+ db 66h; \
+ pop ax}
+#endif /* DOS */
+
+#if defined(NETWARE) || defined(OTHER_32) || defined(OTHER_16)
+ #define RD_HARPOON(ioport) OS_InPortByte((unsigned long)ioport)
+ #define RDW_HARPOON(ioport) OS_InPortWord((unsigned long)ioport)
+ #define RD_HARP32(ioport,offset,data) (data = OS_InPortLong(ioport + offset))
+ #define WR_HARPOON(ioport,val) OS_OutPortByte((ULONG)ioport,(UCHAR) val)
+ #define WRW_HARPOON(ioport,val) OS_OutPortWord((ULONG)ioport,(USHORT)val)
+ #define WR_HARP32(ioport,offset,data) OS_OutPortLong((ioport + offset), data)
+#endif /* NETWARE || OTHER_32 || OTHER_16 */
+
+#if defined(NT) || defined(WIN95_32) || defined(WIN95_16)
+ #define RD_HARPOON(ioport) OS_InPortByte((ULONG)ioport)
+ #define RDW_HARPOON(ioport) OS_InPortWord((ULONG)ioport)
+ #define RD_HARP32(ioport,offset,data) (data = OS_InPortLong((ULONG)(ioport + offset)))
+ #define WR_HARPOON(ioport,val) OS_OutPortByte((ULONG)ioport,(UCHAR) val)
+ #define WRW_HARPOON(ioport,val) OS_OutPortWord((ULONG)ioport,(USHORT)val)
+ #define WR_HARP32(ioport,offset,data) OS_OutPortLong((ULONG)(ioport + offset), data)
+#endif /* NT || WIN95_32 || WIN95_16 */
+
+#if defined (UNIX)
+ #define RD_HARPOON(ioport) OS_InPortByte((u32bits)ioport)
+ #define RDW_HARPOON(ioport) OS_InPortWord((u32bits)ioport)
+ #define RD_HARP32(ioport,offset,data) (data = OS_InPortLong((u32bits)(ioport + offset)))
+ #define WR_HARPOON(ioport,val) OS_OutPortByte((u32bits)ioport,(u08bits) val)
+ #define WRW_HARPOON(ioport,val) OS_OutPortWord((u32bits)ioport,(u16bits)val)
+ #define WR_HARP32(ioport,offset,data) OS_OutPortLong((u32bits)(ioport + offset), data)
+#endif /* UNIX */
+
+#if defined(OS2)
+ #define RD_HARPOON(ioport) OS_InPortByte((unsigned long)ioport)
+ #define RDW_HARPOON(ioport) OS_InPortWord((unsigned long)ioport)
+ #define RD_HARP32(ioport,offset,data) (data = OS_InPortLong((ULONG)(ioport + offset)))
+ #define WR_HARPOON(ioport,val) OS_OutPortByte((ULONG)ioport,(UCHAR) val)
+ #define WRW_HARPOON(ioport,val) OS_OutPortWord((ULONG)ioport,(USHORT)val)
+ #define WR_HARP32(ioport,offset,data) OS_OutPortLong(((ULONG)(ioport + offset)), data)
+#endif /* OS2 */
+
+#if defined(SOLARIS_REAL_MODE)
+
+ #define RD_HARPOON(ioport) OS_InPortByte((unsigned long)ioport)
+ #define RDW_HARPOON(ioport) OS_InPortWord((unsigned long)ioport)
+ #define RD_HARP32(ioport,offset,data) (data = OS_InPortLong((ULONG)(ioport + offset)))
+ #define WR_HARPOON(ioport,val) OS_OutPortByte((ULONG)ioport,(UCHAR) val)
+ #define WRW_HARPOON(ioport,val) OS_OutPortWord((ULONG)ioport,(USHORT)val)
+ #define WR_HARP32(ioport,offset,data) OS_OutPortLong((ULONG)(ioport + offset), (ULONG)data)
+
+#endif /* SOLARIS_REAL_MODE */
+
+#endif /* __BLX30_H__ */
+
+
+/*----------------------------------------------------------------------
+ *
+ *
+ * Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+ *
+ * This file is available under both the GNU General Public License
+ * and a BSD-style copyright; see LICENSE.FlashPoint for details.
+ *
+ * $Workfile: target.h $
+ *
+ * Description: Definitions for Target related structures
+ *
+ * $Date: 1999/04/26 05:53:56 $
+ *
+ * $Revision: 1.1 $
+ *
+ *----------------------------------------------------------------------*/
+
+#ifndef __TARGET__
+#define __TARGET__
+
+/*#include <globals.h>*/
+/*#include <blx30.h>*/
+
+
+#define TAR_SYNC_MASK (BIT(7)+BIT(6))
+#define SYNC_UNKNOWN 0x00
+#define SYNC_TRYING BIT(6)
+#define SYNC_SUPPORTED (BIT(7)+BIT(6))
+
+#define TAR_WIDE_MASK (BIT(5)+BIT(4))
+#define WIDE_DISABLED 0x00
+#define WIDE_ENABLED BIT(4)
+#define WIDE_NEGOCIATED BIT(5)
+
+#define TAR_TAG_Q_MASK (BIT(3)+BIT(2))
+#define TAG_Q_UNKNOWN 0x00
+#define TAG_Q_TRYING BIT(2)
+#define TAG_Q_REJECT BIT(3)
+#define TAG_Q_SUPPORTED (BIT(3)+BIT(2))
+
+#define TAR_ALLOW_DISC BIT(0)
+
+
+#define EE_SYNC_MASK (BIT(0)+BIT(1))
+#define EE_SYNC_ASYNC 0x00
+#define EE_SYNC_5MB BIT(0)
+#define EE_SYNC_10MB BIT(1)
+#define EE_SYNC_20MB (BIT(0)+BIT(1))
+
+#define EE_ALLOW_DISC BIT(6)
+#define EE_WIDE_SCSI BIT(7)
+
+
+#if defined(DOS)
+ typedef struct SCCBMgr_tar_info near *PSCCBMgr_tar_info;
+
+#elif defined(OS2)
+ typedef struct SCCBMgr_tar_info far *PSCCBMgr_tar_info;
+
+#else
+ typedef struct SCCBMgr_tar_info *PSCCBMgr_tar_info;
+
+#endif
+
+
+typedef struct SCCBMgr_tar_info {
+
+ PSCCB TarSelQ_Head;
+ PSCCB TarSelQ_Tail;
+ UCHAR TarLUN_CA; /*Contingent Allgiance */
+ UCHAR TarTagQ_Cnt;
+ UCHAR TarSelQ_Cnt;
+ UCHAR TarStatus;
+ UCHAR TarEEValue;
+ UCHAR TarSyncCtrl;
+ UCHAR TarReserved[2]; /* for alignment */
+ UCHAR LunDiscQ_Idx[MAX_LUN];
+ UCHAR TarLUNBusy[MAX_LUN];
+} SCCBMGR_TAR_INFO;
+
+typedef struct NVRAMInfo {
+ UCHAR niModel; /* Model No. of card */
+ UCHAR niCardNo; /* Card no. */
+#if defined(DOS)
+ USHORT niBaseAddr; /* Port Address of card */
+#else
+ ULONG niBaseAddr; /* Port Address of card */
+#endif
+ UCHAR niSysConf; /* Adapter Configuration byte - Byte 16 of eeprom map */
+ UCHAR niScsiConf; /* SCSI Configuration byte - Byte 17 of eeprom map */
+ UCHAR niScamConf; /* SCAM Configuration byte - Byte 20 of eeprom map */
+ UCHAR niAdapId; /* Host Adapter ID - Byte 24 of eerpom map */
+ UCHAR niSyncTbl[MAX_SCSI_TAR / 2]; /* Sync/Wide byte of targets */
+ UCHAR niScamTbl[MAX_SCSI_TAR][4]; /* Compressed Scam name string of Targets */
+}NVRAMINFO;
+
+#if defined(DOS)
+typedef NVRAMINFO near *PNVRamInfo;
+#elif defined (OS2)
+typedef NVRAMINFO far *PNVRamInfo;
+#else
+typedef NVRAMINFO *PNVRamInfo;
+#endif
+
+#define MODEL_LT 1
+#define MODEL_DL 2
+#define MODEL_LW 3
+#define MODEL_DW 4
+
+
+typedef struct SCCBcard {
+ PSCCB currentSCCB;
+#if (FW_TYPE==_SCCB_MGR_)
+ PSCCBMGR_INFO cardInfo;
+#else
+ PADAPTER_INFO cardInfo;
+#endif
+
+#if defined(DOS)
+ USHORT ioPort;
+#else
+ ULONG ioPort;
+#endif
+
+ USHORT cmdCounter;
+ UCHAR discQCount;
+ UCHAR tagQ_Lst;
+ UCHAR cardIndex;
+ UCHAR scanIndex;
+ UCHAR globalFlags;
+ UCHAR ourId;
+ PNVRamInfo pNvRamInfo;
+ PSCCB discQ_Tbl[QUEUE_DEPTH];
+
+}SCCBCARD;
+
+#if defined(DOS)
+typedef struct SCCBcard near *PSCCBcard;
+#elif defined (OS2)
+typedef struct SCCBcard far *PSCCBcard;
+#else
+typedef struct SCCBcard *PSCCBcard;
+#endif
+
+
+#define F_TAG_STARTED 0x01
+#define F_CONLUN_IO 0x02
+#define F_DO_RENEGO 0x04
+#define F_NO_FILTER 0x08
+#define F_GREEN_PC 0x10
+#define F_HOST_XFER_ACT 0x20
+#define F_NEW_SCCB_CMD 0x40
+#define F_UPDATE_EEPROM 0x80
+
+
+#define ID_STRING_LENGTH 32
+#define TYPE_CODE0 0x63 /*Level2 Mstr (bits 7-6), */
+
+#define TYPE_CODE1 00 /*No ID yet */
+
+#define SLV_TYPE_CODE0 0xA3 /*Priority Bit set (bits 7-6), */
+
+#define ASSIGN_ID 0x00
+#define SET_P_FLAG 0x01
+#define CFG_CMPLT 0x03
+#define DOM_MSTR 0x0F
+#define SYNC_PTRN 0x1F
+
+#define ID_0_7 0x18
+#define ID_8_F 0x11
+#define ID_10_17 0x12
+#define ID_18_1F 0x0B
+#define MISC_CODE 0x14
+#define CLR_P_FLAG 0x18
+#define LOCATE_ON 0x12
+#define LOCATE_OFF 0x0B
+
+#define LVL_1_MST 0x00
+#define LVL_2_MST 0x40
+#define DOM_LVL_2 0xC0
+
+
+#define INIT_SELTD 0x01
+#define LEVEL2_TAR 0x02
+
+
+enum scam_id_st { ID0,ID1,ID2,ID3,ID4,ID5,ID6,ID7,ID8,ID9,ID10,ID11,ID12,
+ ID13,ID14,ID15,ID_UNUSED,ID_UNASSIGNED,ID_ASSIGNED,LEGACY,
+ CLR_PRIORITY,NO_ID_AVAIL };
+
+typedef struct SCCBscam_info {
+
+ UCHAR id_string[ID_STRING_LENGTH];
+ enum scam_id_st state;
+
+} SCCBSCAM_INFO, *PSCCBSCAM_INFO;
+
+#endif
+/*----------------------------------------------------------------------
+ *
+ *
+ * Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+ *
+ * This file is available under both the GNU General Public License
+ * and a BSD-style copyright; see LICENSE.FlashPoint for details.
+ *
+ * $Workfile: scsi2.h $
+ *
+ * Description: Register definitions for HARPOON ASIC.
+ *
+ * $Date: 1999/04/26 05:53:56 $
+ *
+ * $Revision: 1.1 $
+ *
+ *----------------------------------------------------------------------*/
+
+#ifndef __SCSI_H__
+#define __SCSI_H__
+
+
+
+#define SCSI_TEST_UNIT_READY 0x00
+#define SCSI_REZERO_UNIT 0x01
+#define SCSI_REQUEST_SENSE 0x03
+#define SCSI_FORMAT_UNIT 0x04
+#define SCSI_REASSIGN 0x07
+#define SCSI_READ 0x08
+#define SCSI_WRITE 0x0A
+#define SCSI_SEEK 0x0B
+#define SCSI_INQUIRY 0x12
+#define SCSI_MODE_SELECT 0x15
+#define SCSI_RESERVE_UNIT 0x16
+#define SCSI_RELEASE_UNIT 0x17
+#define SCSI_MODE_SENSE 0x1A
+#define SCSI_START_STOP_UNIT 0x1B
+#define SCSI_SEND_DIAGNOSTIC 0x1D
+#define SCSI_READ_CAPACITY 0x25
+#define SCSI_READ_EXTENDED 0x28
+#define SCSI_WRITE_EXTENDED 0x2A
+#define SCSI_SEEK_EXTENDED 0x2B
+#define SCSI_WRITE_AND_VERIFY 0x2E
+#define SCSI_VERIFY 0x2F
+#define SCSI_READ_DEFECT_DATA 0x37
+#define SCSI_WRITE_BUFFER 0x3B
+#define SCSI_READ_BUFFER 0x3C
+#define SCSI_RECV_DIAGNOSTIC 0x1C
+#define SCSI_READ_LONG 0x3E
+#define SCSI_WRITE_LONG 0x3F
+#define SCSI_LAST_SCSI_CMND SCSI_WRITE_LONG
+#define SCSI_INVALID_CMND 0xFF
+
+
+
+#define SSGOOD 0x00
+#define SSCHECK 0x02
+#define SSCOND_MET 0x04
+#define SSBUSY 0x08
+#define SSRESERVATION_CONFLICT 0x18
+#define SSCMD_TERM 0x22
+#define SSQ_FULL 0x28
+
+
+#define SKNO_SEN 0x00
+#define SKRECOV_ERR 0x01
+#define SKNOT_RDY 0x02
+#define SKMED_ERR 0x03
+#define SKHW_ERR 0x04
+#define SKILL_REQ 0x05
+#define SKUNIT_ATTN 0x06
+#define SKDATA_PROTECT 0x07
+#define SKBLNK_CHK 0x08
+#define SKCPY_ABORT 0x0A
+#define SKABORT_CMD 0x0B
+#define SKEQUAL 0x0C
+#define SKVOL_OVF 0x0D
+#define SKMIS_CMP 0x0E
+
+
+#define SMCMD_COMP 0x00
+#define SMEXT 0x01
+#define SMSAVE_DATA_PTR 0x02
+#define SMREST_DATA_PTR 0x03
+#define SMDISC 0x04
+#define SMINIT_DETEC_ERR 0x05
+#define SMABORT 0x06
+#define SMREJECT 0x07
+#define SMNO_OP 0x08
+#define SMPARITY 0x09
+#define SMDEV_RESET 0x0C
+#define SMABORT_TAG 0x0D
+#define SMINIT_RECOVERY 0x0F
+#define SMREL_RECOVERY 0x10
+
+#define SMIDENT 0x80
+#define DISC_PRIV 0x40
+
+
+#define SMSYNC 0x01
+#define SM10MBS 0x19 /* 100ns */
+#define SM5MBS 0x32 /* 200ns */
+#define SMOFFSET 0x0F /* Maxoffset value */
+#define SMWDTR 0x03
+#define SM8BIT 0x00
+#define SM16BIT 0x01
+#define SM32BIT 0x02
+#define SMIGNORWR 0x23 /* Ignore Wide Residue */
+
+
+#define ARBITRATION_DELAY 0x01 /* 2.4us using a 40Mhz clock */
+#define BUS_SETTLE_DELAY 0x01 /* 400ns */
+#define BUS_CLEAR_DELAY 0x01 /* 800ns */
+
+
+
+#define SPHASE_TO 0x0A /* 10 second timeout waiting for */
+#define SCMD_TO 0x0F /* Overall command timeout */
+
+
+
+#define SIX_BYTE_CMD 0x06
+#define TEN_BYTE_CMD 0x0A
+#define TWELVE_BYTE_CMD 0x0C
+
+#define ASYNC 0x00
+#define PERI25NS 0x06 /* 25/4ns to next clock for xbow. */
+#define SYNC10MBS 0x19
+#define SYNC5MBS 0x32
+#define MAX_OFFSET 0x0F /* Maxbyteoffset for Sync Xfers */
+
+#endif
+/*----------------------------------------------------------------------
+ *
+ *
+ * Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+ *
+ * This file is available under both the GNU General Public License
+ * and a BSD-style copyright; see LICENSE.FlashPoint for details.
+ *
+ * $Workfile: eeprom.h $
+ *
+ * Description: Definitions for EEPROM related structures
+ *
+ * $Date: 1999/04/26 05:53:56 $
+ *
+ * $Revision: 1.1 $
+ *
+ *----------------------------------------------------------------------*/
+
+#ifndef __EEPROM__
+#define __EEPROM__
+
+/*#include <globals.h>*/
+
+#define EEPROM_WD_CNT 256
+
+#define EEPROM_CHECK_SUM 0
+#define FW_SIGNATURE 2
+#define MODEL_NUMB_0 4
+#define MODEL_NUMB_1 5
+#define MODEL_NUMB_2 6
+#define MODEL_NUMB_3 7
+#define MODEL_NUMB_4 8
+#define MODEL_NUMB_5 9
+#define IO_BASE_ADDR 10
+#define IRQ_NUMBER 12
+#define PCI_INT_PIN 13
+#define BUS_DELAY 14 /*On time in byte 14 off delay in 15 */
+#define SYSTEM_CONFIG 16
+#define SCSI_CONFIG 17
+#define BIOS_CONFIG 18
+#define SPIN_UP_DELAY 19
+#define SCAM_CONFIG 20
+#define ADAPTER_SCSI_ID 24
+
+
+#define IGNORE_B_SCAN 32
+#define SEND_START_ENA 34
+#define DEVICE_ENABLE 36
+
+#define SYNC_RATE_TBL 38
+#define SYNC_RATE_TBL01 38
+#define SYNC_RATE_TBL23 40
+#define SYNC_RATE_TBL45 42
+#define SYNC_RATE_TBL67 44
+#define SYNC_RATE_TBL89 46
+#define SYNC_RATE_TBLab 48
+#define SYNC_RATE_TBLcd 50
+#define SYNC_RATE_TBLef 52
+
+
+
+#define EE_SCAMBASE 256
+
+
+
+ #define DOM_MASTER (BIT(0) + BIT(1))
+ #define SCAM_ENABLED BIT(2)
+ #define SCAM_LEVEL2 BIT(3)
+
+
+ #define RENEGO_ENA BITW(10)
+ #define CONNIO_ENA BITW(11)
+ #define GREEN_PC_ENA BITW(12)
+
+
+ #define AUTO_RATE_00 00
+ #define AUTO_RATE_05 01
+ #define AUTO_RATE_10 02
+ #define AUTO_RATE_20 03
+
+ #define WIDE_NEGO_BIT BIT(7)
+ #define DISC_ENABLE_BIT BIT(6)
+
+
+#endif
+/*----------------------------------------------------------------------
+ *
+ *
+ * Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+ *
+ * This file is available under both the GNU General Public License
+ * and a BSD-style copyright; see LICENSE.FlashPoint for details.
+ *
+ * $Workfile: harpoon.h $
+ *
+ * Description: Register definitions for HARPOON ASIC.
+ *
+ * $Date: 1999/04/26 05:53:56 $
+ *
+ * $Revision: 1.1 $
+ *
+ *----------------------------------------------------------------------*/
+
+
+/*#include <globals.h>*/
+
+#ifndef __HARPOON__
+#define __HARPOON__
+
+
+ #define hp_vendor_id_0 0x00 /* LSB */
+ #define ORION_VEND_0 0x4B
+
+ #define hp_vendor_id_1 0x01 /* MSB */
+ #define ORION_VEND_1 0x10
+
+ #define hp_device_id_0 0x02 /* LSB */
+ #define ORION_DEV_0 0x30
+
+ #define hp_device_id_1 0x03 /* MSB */
+ #define ORION_DEV_1 0x81
+
+ /* Sub Vendor ID and Sub Device ID only available in
+ Harpoon Version 2 and higher */
+
+ #define hp_sub_vendor_id_0 0x04 /* LSB */
+ #define hp_sub_vendor_id_1 0x05 /* MSB */
+ #define hp_sub_device_id_0 0x06 /* LSB */
+ #define hp_sub_device_id_1 0x07 /* MSB */
+
+
+ #define hp_dual_addr_lo 0x08
+ #define hp_dual_addr_lmi 0x09
+ #define hp_dual_addr_hmi 0x0A
+ #define hp_dual_addr_hi 0x0B
+
+ #define hp_semaphore 0x0C
+ #define SCCB_MGR_ACTIVE BIT(0)
+ #define TICKLE_ME BIT(1)
+ #define SCCB_MGR_PRESENT BIT(3)
+ #define BIOS_IN_USE BIT(4)
+
+ #define hp_user_defined_D 0x0D
+
+ #define hp_reserved_E 0x0E
+
+ #define hp_sys_ctrl 0x0F
+
+ #define STOP_CLK BIT(0) /*Turn off BusMaster Clock */
+ #define DRVR_RST BIT(1) /*Firmware Reset to 80C15 chip */
+ #define HALT_MACH BIT(3) /*Halt State Machine */
+ #define HARD_ABORT BIT(4) /*Hard Abort */
+ #define DIAG_MODE BIT(5) /*Diagnostic Mode */
+
+ #define BM_ABORT_TMOUT 0x50 /*Halt State machine time out */
+
+ #define hp_sys_cfg 0x10
+
+ #define DONT_RST_FIFO BIT(7) /*Don't reset FIFO */
+
+
+ #define hp_host_ctrl0 0x11
+
+ #define DUAL_ADDR_MODE BIT(0) /*Enable 64-bit addresses */
+ #define IO_MEM_SPACE BIT(1) /*I/O Memory Space */
+ #define RESOURCE_LOCK BIT(2) /*Enable Resource Lock */
+ #define IGNOR_ACCESS_ERR BIT(3) /*Ignore Access Error */
+ #define HOST_INT_EDGE BIT(4) /*Host interrupt level/edge mode sel */
+ #define SIX_CLOCKS BIT(5) /*6 Clocks between Strobe */
+ #define DMA_EVEN_PARITY BIT(6) /*Enable DMA Enen Parity */
+
+/*
+ #define BURST_MODE BIT(0)
+*/
+
+ #define hp_reserved_12 0x12
+
+ #define hp_host_blk_cnt 0x13
+
+ #define XFER_BLK1 0x00 /* 0 0 0 1 byte per block*/
+ #define XFER_BLK2 0x01 /* 0 0 1 2 byte per block*/
+ #define XFER_BLK4 0x02 /* 0 1 0 4 byte per block*/
+ #define XFER_BLK8 0x03 /* 0 1 1 8 byte per block*/
+ #define XFER_BLK16 0x04 /* 1 0 0 16 byte per block*/
+ #define XFER_BLK32 0x05 /* 1 0 1 32 byte per block*/
+ #define XFER_BLK64 0x06 /* 1 1 0 64 byte per block*/
+
+ #define BM_THRESHOLD 0x40 /* PCI mode can only xfer 16 bytes*/
+
+
+ #define hp_reserved_14 0x14
+ #define hp_reserved_15 0x15
+ #define hp_reserved_16 0x16
+
+ #define hp_int_mask 0x17
+
+ #define INT_CMD_COMPL BIT(0) /* DMA command complete */
+ #define INT_EXT_STATUS BIT(1) /* Extended Status Set */
+ #define INT_SCSI BIT(2) /* Scsi block interrupt */
+ #define INT_FIFO_RDY BIT(4) /* FIFO data ready */
+
+
+ #define hp_xfer_cnt_lo 0x18
+ #define hp_xfer_cnt_mi 0x19
+ #define hp_xfer_cnt_hi 0x1A
+ #define hp_xfer_cmd 0x1B
+
+ #define XFER_HOST_DMA 0x00 /* 0 0 0 Transfer Host -> DMA */
+ #define XFER_DMA_HOST 0x01 /* 0 0 1 Transfer DMA -> Host */
+ #define XFER_HOST_MPU 0x02 /* 0 1 0 Transfer Host -> MPU */
+ #define XFER_MPU_HOST 0x03 /* 0 1 1 Transfer MPU -> Host */
+ #define XFER_DMA_MPU 0x04 /* 1 0 0 Transfer DMA -> MPU */
+ #define XFER_MPU_DMA 0x05 /* 1 0 1 Transfer MPU -> DMA */
+ #define SET_SEMAPHORE 0x06 /* 1 1 0 Set Semaphore */
+ #define XFER_NOP 0x07 /* 1 1 1 Transfer NOP */
+ #define XFER_MB_MPU 0x06 /* 1 1 0 Transfer MB -> MPU */
+ #define XFER_MB_DMA 0x07 /* 1 1 1 Transfer MB -> DMA */
+
+
+ #define XFER_HOST_AUTO 0x00 /* 0 0 Auto Transfer Size */
+ #define XFER_HOST_8BIT 0x08 /* 0 1 8 BIT Transfer Size */
+ #define XFER_HOST_16BIT 0x10 /* 1 0 16 BIT Transfer Size */
+ #define XFER_HOST_32BIT 0x18 /* 1 1 32 BIT Transfer Size */
+
+ #define XFER_DMA_8BIT 0x20 /* 0 1 8 BIT Transfer Size */
+ #define XFER_DMA_16BIT 0x40 /* 1 0 16 BIT Transfer Size */
+
+ #define DISABLE_INT BIT(7) /*Do not interrupt at end of cmd. */
+
+ #define HOST_WRT_CMD ((DISABLE_INT + XFER_HOST_DMA + XFER_HOST_AUTO + XFER_DMA_8BIT))
+ #define HOST_RD_CMD ((DISABLE_INT + XFER_DMA_HOST + XFER_HOST_AUTO + XFER_DMA_8BIT))
+ #define WIDE_HOST_WRT_CMD ((DISABLE_INT + XFER_HOST_DMA + XFER_HOST_AUTO + XFER_DMA_16BIT))
+ #define WIDE_HOST_RD_CMD ((DISABLE_INT + XFER_DMA_HOST + XFER_HOST_AUTO + XFER_DMA_16BIT))
+
+ #define hp_host_addr_lo 0x1C
+ #define hp_host_addr_lmi 0x1D
+ #define hp_host_addr_hmi 0x1E
+ #define hp_host_addr_hi 0x1F
+
+ #define hp_pio_data 0x20
+ #define hp_reserved_21 0x21
+ #define hp_ee_ctrl 0x22
+
+ #define EXT_ARB_ACK BIT(7)
+ #define SCSI_TERM_ENA_H BIT(6) /* SCSI high byte terminator */
+ #define SEE_MS BIT(5)
+ #define SEE_CS BIT(3)
+ #define SEE_CLK BIT(2)
+ #define SEE_DO BIT(1)
+ #define SEE_DI BIT(0)
+
+ #define EE_READ 0x06
+ #define EE_WRITE 0x05
+ #define EWEN 0x04
+ #define EWEN_ADDR 0x03C0
+ #define EWDS 0x04
+ #define EWDS_ADDR 0x0000
+
+ #define hp_brdctl 0x23
+
+ #define DAT_7 BIT(7)
+ #define DAT_6 BIT(6)
+ #define DAT_5 BIT(5)
+ #define BRD_STB BIT(4)
+ #define BRD_CS BIT(3)
+ #define BRD_WR BIT(2)
+
+ #define hp_reserved_24 0x24
+ #define hp_reserved_25 0x25
+
+
+
+
+ #define hp_bm_ctrl 0x26
+
+ #define SCSI_TERM_ENA_L BIT(0) /*Enable/Disable external terminators */
+ #define FLUSH_XFER_CNTR BIT(1) /*Flush transfer counter */
+ #define BM_XFER_MIN_8 BIT(2) /*Enable bus master transfer of 9 */
+ #define BIOS_ENA BIT(3) /*Enable BIOS/FLASH Enable */
+ #define FORCE1_XFER BIT(5) /*Always xfer one byte in byte mode */
+ #define FAST_SINGLE BIT(6) /*?? */
+
+ #define BMCTRL_DEFAULT (FORCE1_XFER|FAST_SINGLE|SCSI_TERM_ENA_L)
+
+ #define hp_reserved_27 0x27
+
+ #define hp_sg_addr 0x28
+ #define hp_page_ctrl 0x29
+
+ #define SCATTER_EN BIT(0)
+ #define SGRAM_ARAM BIT(1)
+ #define BIOS_SHADOW BIT(2)
+ #define G_INT_DISABLE BIT(3) /* Enable/Disable all Interrupts */
+ #define NARROW_SCSI_CARD BIT(4) /* NARROW/WIDE SCSI config pin */
+
+ #define hp_reserved_2A 0x2A
+ #define hp_pci_cmd_cfg 0x2B
+
+ #define IO_SPACE_ENA BIT(0) /*enable I/O space */
+ #define MEM_SPACE_ENA BIT(1) /*enable memory space */
+ #define BUS_MSTR_ENA BIT(2) /*enable bus master operation */
+ #define MEM_WI_ENA BIT(4) /*enable Write and Invalidate */
+ #define PAR_ERR_RESP BIT(6) /*enable parity error responce. */
+
+ #define hp_reserved_2C 0x2C
+
+ #define hp_pci_stat_cfg 0x2D
+
+ #define DATA_PARITY_ERR BIT(0)
+ #define REC_TARGET_ABORT BIT(4) /*received Target abort */
+ #define REC_MASTER_ABORT BIT(5) /*received Master abort */
+ #define SIG_SYSTEM_ERR BIT(6)
+ #define DETECTED_PAR_ERR BIT(7)
+
+ #define hp_reserved_2E 0x2E
+
+ #define hp_sys_status 0x2F
+
+ #define SLV_DATA_RDY BIT(0) /*Slave data ready */
+ #define XFER_CNT_ZERO BIT(1) /*Transfer counter = 0 */
+ #define BM_FIFO_EMPTY BIT(2) /*FIFO empty */
+ #define BM_FIFO_FULL BIT(3) /*FIFO full */
+ #define HOST_OP_DONE BIT(4) /*host operation done */
+ #define DMA_OP_DONE BIT(5) /*DMA operation done */
+ #define SLV_OP_DONE BIT(6) /*Slave operation done */
+ #define PWR_ON_FLAG BIT(7) /*Power on flag */
+
+ #define hp_reserved_30 0x30
+
+ #define hp_host_status0 0x31
+
+ #define HOST_TERM BIT(5) /*Host Terminal Count */
+ #define HOST_TRSHLD BIT(6) /*Host Threshold */
+ #define CONNECTED_2_HOST BIT(7) /*Connected to Host */
+
+ #define hp_reserved_32 0x32
+
+ #define hp_rev_num 0x33
+
+ #define REV_A_CONST 0x0E
+ #define REV_B_CONST 0x0E
+
+ #define hp_stack_data 0x34
+ #define hp_stack_addr 0x35
+
+ #define hp_ext_status 0x36
+
+ #define BM_FORCE_OFF BIT(0) /*Bus Master is forced to get off */
+ #define PCI_TGT_ABORT BIT(0) /*PCI bus master transaction aborted */
+ #define PCI_DEV_TMOUT BIT(1) /*PCI Device Time out */
+ #define FIFO_TC_NOT_ZERO BIT(2) /*FIFO or transfer counter not zero */
+ #define CHIP_RST_OCCUR BIT(3) /*Chip reset occurs */
+ #define CMD_ABORTED BIT(4) /*Command aborted */
+ #define BM_PARITY_ERR BIT(5) /*parity error on data received */
+ #define PIO_OVERRUN BIT(6) /*Slave data overrun */
+ #define BM_CMD_BUSY BIT(7) /*Bus master transfer command busy */
+ #define BAD_EXT_STATUS (BM_FORCE_OFF | PCI_DEV_TMOUT | CMD_ABORTED | \
+ BM_PARITY_ERR | PIO_OVERRUN)
+
+ #define hp_int_status 0x37
+
+ #define BM_CMD_CMPL BIT(0) /*Bus Master command complete */
+ #define EXT_STATUS_ON BIT(1) /*Extended status is valid */
+ #define SCSI_INTERRUPT BIT(2) /*Global indication of a SCSI int. */
+ #define BM_FIFO_RDY BIT(4)
+ #define INT_ASSERTED BIT(5) /* */
+ #define SRAM_BUSY BIT(6) /*Scatter/Gather RAM busy */
+ #define CMD_REG_BUSY BIT(7)
+
+
+ #define hp_fifo_cnt 0x38
+ #define hp_curr_host_cnt 0x39
+ #define hp_reserved_3A 0x3A
+ #define hp_fifo_in_addr 0x3B
+
+ #define hp_fifo_out_addr 0x3C
+ #define hp_reserved_3D 0x3D
+ #define hp_reserved_3E 0x3E
+ #define hp_reserved_3F 0x3F
+
+
+
+ extern USHORT default_intena;
+
+ #define hp_intena 0x40
+
+ #define RESET BITW(7)
+ #define PROG_HLT BITW(6)
+ #define PARITY BITW(5)
+ #define FIFO BITW(4)
+ #define SEL BITW(3)
+ #define SCAM_SEL BITW(2)
+ #define RSEL BITW(1)
+ #define TIMEOUT BITW(0)
+ #define BUS_FREE BITW(15)
+ #define XFER_CNT_0 BITW(14)
+ #define PHASE BITW(13)
+ #define IUNKWN BITW(12)
+ #define ICMD_COMP BITW(11)
+ #define ITICKLE BITW(10)
+ #define IDO_STRT BITW(9)
+ #define ITAR_DISC BITW(8)
+ #define AUTO_INT (BITW(12)+BITW(11)+BITW(10)+BITW(9)+BITW(8))
+ #define CLR_ALL_INT 0xFFFF
+ #define CLR_ALL_INT_1 0xFF00
+
+ #define hp_intstat 0x42
+
+ #define hp_scsisig 0x44
+
+ #define SCSI_SEL BIT(7)
+ #define SCSI_BSY BIT(6)
+ #define SCSI_REQ BIT(5)
+ #define SCSI_ACK BIT(4)
+ #define SCSI_ATN BIT(3)
+ #define SCSI_CD BIT(2)
+ #define SCSI_MSG BIT(1)
+ #define SCSI_IOBIT BIT(0)
+
+ #define S_SCSI_PHZ (BIT(2)+BIT(1)+BIT(0))
+ #define S_CMD_PH (BIT(2) )
+ #define S_MSGO_PH (BIT(2)+BIT(1) )
+ #define S_STAT_PH (BIT(2) +BIT(0))
+ #define S_MSGI_PH (BIT(2)+BIT(1)+BIT(0))
+ #define S_DATAI_PH ( BIT(0))
+ #define S_DATAO_PH 0x00
+ #define S_ILL_PH ( BIT(1) )
+
+ #define hp_scsictrl_0 0x45
+
+ #define NO_ARB BIT(7)
+ #define SEL_TAR BIT(6)
+ #define ENA_ATN BIT(4)
+ #define ENA_RESEL BIT(2)
+ #define SCSI_RST BIT(1)
+ #define ENA_SCAM_SEL BIT(0)
+
+
+
+ #define hp_portctrl_0 0x46
+
+ #define SCSI_PORT BIT(7)
+ #define SCSI_INBIT BIT(6)
+ #define DMA_PORT BIT(5)
+ #define DMA_RD BIT(4)
+ #define HOST_PORT BIT(3)
+ #define HOST_WRT BIT(2)
+ #define SCSI_BUS_EN BIT(1)
+ #define START_TO BIT(0)
+
+ #define hp_scsireset 0x47
+
+ #define SCSI_TAR BIT(7)
+ #define SCSI_INI BIT(6)
+ #define SCAM_EN BIT(5)
+ #define ACK_HOLD BIT(4)
+ #define DMA_RESET BIT(3)
+ #define HPSCSI_RESET BIT(2)
+ #define PROG_RESET BIT(1)
+ #define FIFO_CLR BIT(0)
+
+ #define hp_xfercnt_0 0x48
+ #define hp_xfercnt_1 0x49
+ #define hp_xfercnt_2 0x4A
+ #define hp_xfercnt_3 0x4B
+
+ #define hp_fifodata_0 0x4C
+ #define hp_fifodata_1 0x4D
+ #define hp_addstat 0x4E
+
+ #define SCAM_TIMER BIT(7)
+ #define AUTO_RUNNING BIT(6)
+ #define FAST_SYNC BIT(5)
+ #define SCSI_MODE8 BIT(3)
+ #define SCSI_PAR_ERR BIT(0)
+
+ #define hp_prgmcnt_0 0x4F
+
+ #define AUTO_PC_MASK 0x3F
+
+ #define hp_selfid_0 0x50
+ #define hp_selfid_1 0x51
+ #define hp_arb_id 0x52
+
+ #define ARB_ID (BIT(3) + BIT(2) + BIT(1) + BIT(0))
+
+ #define hp_select_id 0x53
+
+ #define RESEL_ID (BIT(7) + BIT(6) + BIT(5) + BIT(4))
+ #define SELECT_ID (BIT(3) + BIT(2) + BIT(1) + BIT(0))
+
+ #define hp_synctarg_base 0x54
+ #define hp_synctarg_12 0x54
+ #define hp_synctarg_13 0x55
+ #define hp_synctarg_14 0x56
+ #define hp_synctarg_15 0x57
+
+ #define hp_synctarg_8 0x58
+ #define hp_synctarg_9 0x59
+ #define hp_synctarg_10 0x5A
+ #define hp_synctarg_11 0x5B
+
+ #define hp_synctarg_4 0x5C
+ #define hp_synctarg_5 0x5D
+ #define hp_synctarg_6 0x5E
+ #define hp_synctarg_7 0x5F
+
+ #define hp_synctarg_0 0x60
+ #define hp_synctarg_1 0x61
+ #define hp_synctarg_2 0x62
+ #define hp_synctarg_3 0x63
+
+ #define RATE_20MB 0x00
+ #define RATE_10MB ( BIT(5))
+ #define RATE_6_6MB ( BIT(6) )
+ #define RATE_5MB ( BIT(6)+BIT(5))
+ #define RATE_4MB (BIT(7) )
+ #define RATE_3_33MB (BIT(7) +BIT(5))
+ #define RATE_2_85MB (BIT(7)+BIT(6) )
+ #define RATE_2_5MB (BIT(7)+BIT(5)+BIT(6))
+ #define NEXT_CLK BIT(5)
+ #define SLOWEST_SYNC (BIT(7)+BIT(6)+BIT(5))
+ #define NARROW_SCSI BIT(4)
+ #define SYNC_OFFSET (BIT(3) + BIT(2) + BIT(1) + BIT(0))
+ #define DEFAULT_ASYNC 0x00
+ #define DEFAULT_OFFSET 0x0F
+
+ #define hp_autostart_0 0x64
+ #define hp_autostart_1 0x65
+ #define hp_autostart_2 0x66
+ #define hp_autostart_3 0x67
+
+
+
+ #define DISABLE 0x00
+ #define AUTO_IMMED BIT(5)
+ #define SELECT BIT(6)
+ #define RESELECT (BIT(6)+BIT(5))
+ #define BUSFREE BIT(7)
+ #define XFER_0 (BIT(7)+BIT(5))
+ #define END_DATA (BIT(7)+BIT(6))
+ #define MSG_PHZ (BIT(7)+BIT(6)+BIT(5))
+
+ #define hp_gp_reg_0 0x68
+ #define hp_gp_reg_1 0x69
+ #define hp_gp_reg_2 0x6A
+ #define hp_gp_reg_3 0x6B
+
+ #define hp_seltimeout 0x6C
+
+
+ #define TO_2ms 0x54 /* 2.0503ms */
+ #define TO_4ms 0x67 /* 3.9959ms */
+
+ #define TO_5ms 0x03 /* 4.9152ms */
+ #define TO_10ms 0x07 /* 11.xxxms */
+ #define TO_250ms 0x99 /* 250.68ms */
+ #define TO_290ms 0xB1 /* 289.99ms */
+ #define TO_350ms 0xD6 /* 350.62ms */
+ #define TO_417ms 0xFF /* 417.79ms */
+
+ #define hp_clkctrl_0 0x6D
+
+ #define PWR_DWN BIT(6)
+ #define ACTdeassert BIT(4)
+ #define ATNonErr BIT(3)
+ #define CLK_30MHZ BIT(1)
+ #define CLK_40MHZ (BIT(1) + BIT(0))
+ #define CLK_50MHZ BIT(2)
+
+ #define CLKCTRL_DEFAULT (ACTdeassert | CLK_40MHZ)
+
+ #define hp_fiforead 0x6E
+ #define hp_fifowrite 0x6F
+
+ #define hp_offsetctr 0x70
+ #define hp_xferstat 0x71
+
+ #define FIFO_FULL BIT(7)
+ #define FIFO_EMPTY BIT(6)
+ #define FIFO_MASK 0x3F /* Mask for the FIFO count value. */
+ #define FIFO_LEN 0x20
+
+ #define hp_portctrl_1 0x72
+
+ #define EVEN_HOST_P BIT(5)
+ #define INVT_SCSI BIT(4)
+ #define CHK_SCSI_P BIT(3)
+ #define HOST_MODE8 BIT(0)
+ #define HOST_MODE16 0x00
+
+ #define hp_xfer_pad 0x73
+
+ #define ID_UNLOCK BIT(3)
+ #define XFER_PAD BIT(2)
+
+ #define hp_scsidata_0 0x74
+ #define hp_scsidata_1 0x75
+ #define hp_timer_0 0x76
+ #define hp_timer_1 0x77
+
+ #define hp_reserved_78 0x78
+ #define hp_reserved_79 0x79
+ #define hp_reserved_7A 0x7A
+ #define hp_reserved_7B 0x7B
+
+ #define hp_reserved_7C 0x7C
+ #define hp_reserved_7D 0x7D
+ #define hp_reserved_7E 0x7E
+ #define hp_reserved_7F 0x7F
+
+ #define hp_aramBase 0x80
+ #define BIOS_DATA_OFFSET 0x60
+ #define BIOS_RELATIVE_CARD 0x64
+
+
+
+
+ #define AUTO_LEN 0x80
+ #define AR0 0x00
+ #define AR1 BITW(8)
+ #define AR2 BITW(9)
+ #define AR3 (BITW(9) + BITW(8))
+ #define SDATA BITW(10)
+
+ #define NOP_OP 0x00 /* Nop command */
+
+ #define CRD_OP BITW(11) /* Cmp Reg. w/ Data */
+
+ #define CRR_OP BITW(12) /* Cmp Reg. w. Reg. */
+
+ #define CBE_OP (BITW(14)+BITW(12)+BITW(11)) /* Cmp SCSI cmd class & Branch EQ */
+
+ #define CBN_OP (BITW(14)+BITW(13)) /* Cmp SCSI cmd class & Branch NOT EQ */
+
+ #define CPE_OP (BITW(14)+BITW(11)) /* Cmp SCSI phs & Branch EQ */
+
+ #define CPN_OP (BITW(14)+BITW(12)) /* Cmp SCSI phs & Branch NOT EQ */
+
+
+ #define ADATA_OUT 0x00
+ #define ADATA_IN BITW(8)
+ #define ACOMMAND BITW(10)
+ #define ASTATUS (BITW(10)+BITW(8))
+ #define AMSG_OUT (BITW(10)+BITW(9))
+ #define AMSG_IN (BITW(10)+BITW(9)+BITW(8))
+ #define AILLEGAL (BITW(9)+BITW(8))
+
+
+ #define BRH_OP BITW(13) /* Branch */
+
+
+ #define ALWAYS 0x00
+ #define EQUAL BITW(8)
+ #define NOT_EQ BITW(9)
+
+ #define TCB_OP (BITW(13)+BITW(11)) /* Test condition & branch */
+
+
+ #define ATN_SET BITW(8)
+ #define ATN_RESET BITW(9)
+ #define XFER_CNT (BITW(9)+BITW(8))
+ #define FIFO_0 BITW(10)
+ #define FIFO_NOT0 (BITW(10)+BITW(8))
+ #define T_USE_SYNC0 (BITW(10)+BITW(9))
+
+
+ #define MPM_OP BITW(15) /* Match phase and move data */
+
+ #define MDR_OP (BITW(12)+BITW(11)) /* Move data to Reg. */
+
+ #define MRR_OP BITW(14) /* Move DReg. to Reg. */
+
+
+ #define S_IDREG (BIT(2)+BIT(1)+BIT(0))
+
+
+ #define D_AR0 0x00
+ #define D_AR1 BIT(0)
+ #define D_AR2 BIT(1)
+ #define D_AR3 (BIT(1) + BIT(0))
+ #define D_SDATA BIT(2)
+ #define D_BUCKET (BIT(2) + BIT(1) + BIT(0))
+
+
+ #define ADR_OP (BITW(13)+BITW(12)) /* Logical AND Reg. w. Data */
+
+ #define ADS_OP (BITW(14)+BITW(13)+BITW(12))
+
+ #define ODR_OP (BITW(13)+BITW(12)+BITW(11))
+
+ #define ODS_OP (BITW(14)+BITW(13)+BITW(12)+BITW(11))
+
+ #define STR_OP (BITW(15)+BITW(14)) /* Store to A_Reg. */
+
+ #define AINT_ENA1 0x00
+ #define AINT_STAT1 BITW(8)
+ #define ASCSI_SIG BITW(9)
+ #define ASCSI_CNTL (BITW(9)+BITW(8))
+ #define APORT_CNTL BITW(10)
+ #define ARST_CNTL (BITW(10)+BITW(8))
+ #define AXFERCNT0 (BITW(10)+BITW(9))
+ #define AXFERCNT1 (BITW(10)+BITW(9)+BITW(8))
+ #define AXFERCNT2 BITW(11)
+ #define AFIFO_DATA (BITW(11)+BITW(8))
+ #define ASCSISELID (BITW(11)+BITW(9))
+ #define ASCSISYNC0 (BITW(11)+BITW(9)+BITW(8))
+
+
+ #define RAT_OP (BITW(14)+BITW(13)+BITW(11))
+
+ #define SSI_OP (BITW(15)+BITW(11))
+
+
+ #define SSI_ITAR_DISC (ITAR_DISC >> 8)
+ #define SSI_IDO_STRT (IDO_STRT >> 8)
+ #define SSI_IDI_STRT (IDO_STRT >> 8)
+
+ #define SSI_ICMD_COMP (ICMD_COMP >> 8)
+ #define SSI_ITICKLE (ITICKLE >> 8)
+
+ #define SSI_IUNKWN (IUNKWN >> 8)
+ #define SSI_INO_CC (IUNKWN >> 8)
+ #define SSI_IRFAIL (IUNKWN >> 8)
+
+
+ #define NP 0x10 /*Next Phase */
+ #define NTCMD 0x02 /*Non- Tagged Command start */
+ #define CMDPZ 0x04 /*Command phase */
+ #define DINT 0x12 /*Data Out/In interrupt */
+ #define DI 0x13 /*Data Out */
+ #define MI 0x14 /*Message In */
+ #define DC 0x19 /*Disconnect Message */
+ #define ST 0x1D /*Status Phase */
+ #define UNKNWN 0x24 /*Unknown bus action */
+ #define CC 0x25 /*Command Completion failure */
+ #define TICK 0x26 /*New target reselected us. */
+ #define RFAIL 0x27 /*Reselection failed */
+ #define SELCHK 0x28 /*Select & Check SCSI ID latch reg */
+
+
+ #define ID_MSG_STRT hp_aramBase + 0x00
+ #define NON_TAG_ID_MSG hp_aramBase + 0x06
+ #define CMD_STRT hp_aramBase + 0x08
+ #define SYNC_MSGS hp_aramBase + 0x08
+
+
+
+
+
+ #define TAG_STRT 0x00
+ #define SELECTION_START 0x00
+ #define DISCONNECT_START 0x10/2
+ #define END_DATA_START 0x14/2
+ #define NONTAG_STRT 0x02/2
+ #define CMD_ONLY_STRT CMDPZ/2
+ #define TICKLE_STRT TICK/2
+ #define SELCHK_STRT SELCHK/2
+
+
+
+
+#define mEEPROM_CLK_DELAY(port) (RD_HARPOON(port+hp_intstat_1))
+
+#define mWAIT_10MS(port) (RD_HARPOON(port+hp_intstat_1))
+
+
+#define CLR_XFER_CNT(port) (WR_HARPOON(port+hp_xfercnt_0, 0x00))
+
+#define SET_XFER_CNT(port, data) (WR_HARP32(port,hp_xfercnt_0,data))
+
+#define GET_XFER_CNT(port, xfercnt) {RD_HARP32(port,hp_xfercnt_0,xfercnt); xfercnt &= 0xFFFFFF;}
+/* #define GET_XFER_CNT(port, xfercnt) (xfercnt = RD_HARPOON(port+hp_xfercnt_2), \
+ xfercnt <<= 16,\
+ xfercnt |= RDW_HARPOON((USHORT)(port+hp_xfercnt_0)))
+ */
+#if defined(DOS)
+#define HP_SETUP_ADDR_CNT(port,addr,count) (WRW_HARPOON((USHORT)(port+hp_host_addr_lo), (USHORT)(addr & 0x0000FFFFL)),\
+ addr >>= 16,\
+ WRW_HARPOON((USHORT)(port+hp_host_addr_hmi), (USHORT)(addr & 0x0000FFFFL)),\
+ WR_HARP32(port,hp_xfercnt_0,count),\
+ WRW_HARPOON((USHORT)(port+hp_xfer_cnt_lo), (USHORT)(count & 0x0000FFFFL)),\
+ count >>= 16,\
+ WR_HARPOON(port+hp_xfer_cnt_hi, (count & 0xFF)))
+#else
+#define HP_SETUP_ADDR_CNT(port,addr,count) (WRW_HARPOON((port+hp_host_addr_lo), (USHORT)(addr & 0x0000FFFFL)),\
+ addr >>= 16,\
+ WRW_HARPOON((port+hp_host_addr_hmi), (USHORT)(addr & 0x0000FFFFL)),\
+ WR_HARP32(port,hp_xfercnt_0,count),\
+ WRW_HARPOON((port+hp_xfer_cnt_lo), (USHORT)(count & 0x0000FFFFL)),\
+ count >>= 16,\
+ WR_HARPOON(port+hp_xfer_cnt_hi, (count & 0xFF)))
+#endif
+
+#define ACCEPT_MSG(port) {while(RD_HARPOON(port+hp_scsisig) & SCSI_REQ){}\
+ WR_HARPOON(port+hp_scsisig, S_ILL_PH);}
+
+
+#define ACCEPT_MSG_ATN(port) {while(RD_HARPOON(port+hp_scsisig) & SCSI_REQ){}\
+ WR_HARPOON(port+hp_scsisig, (S_ILL_PH|SCSI_ATN));}
+
+#define ACCEPT_STAT(port) {while(RD_HARPOON(port+hp_scsisig) & SCSI_REQ){}\
+ WR_HARPOON(port+hp_scsisig, S_ILL_PH);}
+
+#define ACCEPT_STAT_ATN(port) {while(RD_HARPOON(port+hp_scsisig) & SCSI_REQ){}\
+ WR_HARPOON(port+hp_scsisig, (S_ILL_PH|SCSI_ATN));}
+
+#define DISABLE_AUTO(port) (WR_HARPOON(port+hp_scsireset, PROG_RESET),\
+ WR_HARPOON(port+hp_scsireset, 0x00))
+
+#define ARAM_ACCESS(p_port) (WR_HARPOON(p_port+hp_page_ctrl, \
+ (RD_HARPOON(p_port+hp_page_ctrl) | SGRAM_ARAM)))
+
+#define SGRAM_ACCESS(p_port) (WR_HARPOON(p_port+hp_page_ctrl, \
+ (RD_HARPOON(p_port+hp_page_ctrl) & ~SGRAM_ARAM)))
+
+#define MDISABLE_INT(p_port) (WR_HARPOON(p_port+hp_page_ctrl, \
+ (RD_HARPOON(p_port+hp_page_ctrl) | G_INT_DISABLE)))
+
+#define MENABLE_INT(p_port) (WR_HARPOON(p_port+hp_page_ctrl, \
+ (RD_HARPOON(p_port+hp_page_ctrl) & ~G_INT_DISABLE)))
+
+
+
+#endif
+
+
+#if (FW_TYPE==_UCB_MGR_)
+void ReadNVRam(PSCCBcard pCurrCard,PUCB p_ucb);
+void WriteNVRam(PSCCBcard pCurrCard,PUCB p_ucb);
+void UpdateCheckSum(u32bits baseport);
+#endif // (FW_TYPE==_UCB_MGR_)
+
+#if defined(DOS)
+UCHAR sfm(USHORT port, PSCCB pcurrSCCB);
+void scsiStartAuto(USHORT port);
+UCHAR sisyncn(USHORT port, UCHAR p_card, UCHAR syncFlag);
+void ssel(USHORT port, UCHAR p_card);
+void sres(USHORT port, UCHAR p_card, PSCCBcard pCurrCard);
+void sdecm(UCHAR message, USHORT port, UCHAR p_card);
+void shandem(USHORT port, UCHAR p_card,PSCCB pCurrSCCB);
+void stsyncn(USHORT port, UCHAR p_card);
+void sisyncr(USHORT port,UCHAR sync_pulse, UCHAR offset);
+void sssyncv(USHORT p_port, UCHAR p_id, UCHAR p_sync_value, PSCCBMgr_tar_info currTar_Info);
+void sresb(USHORT port, UCHAR p_card);
+void sxfrp(USHORT p_port, UCHAR p_card);
+void schkdd(USHORT port, UCHAR p_card);
+UCHAR RdStack(USHORT port, UCHAR index);
+void WrStack(USHORT portBase, UCHAR index, UCHAR data);
+UCHAR ChkIfChipInitialized(USHORT ioPort);
+
+#if defined(V302)
+UCHAR GetTarLun(USHORT port, UCHAR p_card, UCHAR our_target, PSCCBcard pCurrCard, PUCHAR tag, PUCHAR lun);
+#endif
+
+void SendMsg(USHORT port, UCHAR message);
+void queueFlushTargSccb(UCHAR p_card, UCHAR thisTarg, UCHAR error_code);
+UCHAR scsellDOS(USHORT p_port, UCHAR targ_id);
+#else
+UCHAR sfm(ULONG port, PSCCB pcurrSCCB);
+void scsiStartAuto(ULONG port);
+UCHAR sisyncn(ULONG port, UCHAR p_card, UCHAR syncFlag);
+void ssel(ULONG port, UCHAR p_card);
+void sres(ULONG port, UCHAR p_card, PSCCBcard pCurrCard);
+void sdecm(UCHAR message, ULONG port, UCHAR p_card);
+void shandem(ULONG port, UCHAR p_card,PSCCB pCurrSCCB);
+void stsyncn(ULONG port, UCHAR p_card);
+void sisyncr(ULONG port,UCHAR sync_pulse, UCHAR offset);
+void sssyncv(ULONG p_port, UCHAR p_id, UCHAR p_sync_value, PSCCBMgr_tar_info currTar_Info);
+void sresb(ULONG port, UCHAR p_card);
+void sxfrp(ULONG p_port, UCHAR p_card);
+void schkdd(ULONG port, UCHAR p_card);
+UCHAR RdStack(ULONG port, UCHAR index);
+void WrStack(ULONG portBase, UCHAR index, UCHAR data);
+UCHAR ChkIfChipInitialized(ULONG ioPort);
+
+#if defined(V302)
+UCHAR GetTarLun(ULONG port, UCHAR p_card, UCHAR our_target, PSCCBcard pCurrCard, PUCHAR tar, PUCHAR lun);
+#endif
+
+void SendMsg(ULONG port, UCHAR message);
+void queueFlushTargSccb(UCHAR p_card, UCHAR thisTarg, UCHAR error_code);
+#endif
+
+void ssenss(PSCCBcard pCurrCard);
+void sinits(PSCCB p_sccb, UCHAR p_card);
+void RNVRamData(PNVRamInfo pNvRamInfo);
+
+#if defined(WIDE_SCSI)
+ #if defined(DOS)
+ UCHAR siwidn(USHORT port, UCHAR p_card);
+ void stwidn(USHORT port, UCHAR p_card);
+ void siwidr(USHORT port, UCHAR width);
+ #else
+ UCHAR siwidn(ULONG port, UCHAR p_card);
+ void stwidn(ULONG port, UCHAR p_card);
+ void siwidr(ULONG port, UCHAR width);
+ #endif
+#endif
+
+
+void queueSelectFail(PSCCBcard pCurrCard, UCHAR p_card);
+void queueDisconnect(PSCCB p_SCCB, UCHAR p_card);
+void queueCmdComplete(PSCCBcard pCurrCard, PSCCB p_SCCB, UCHAR p_card);
+void queueSearchSelect(PSCCBcard pCurrCard, UCHAR p_card);
+void queueFlushSccb(UCHAR p_card, UCHAR error_code);
+void queueAddSccb(PSCCB p_SCCB, UCHAR card);
+UCHAR queueFindSccb(PSCCB p_SCCB, UCHAR p_card);
+void utilUpdateResidual(PSCCB p_SCCB);
+USHORT CalcCrc16(UCHAR buffer[]);
+UCHAR CalcLrc(UCHAR buffer[]);
+
+
+#if defined(DOS)
+void Wait1Second(USHORT p_port);
+void Wait(USHORT p_port, UCHAR p_delay);
+void utilEEWriteOnOff(USHORT p_port,UCHAR p_mode);
+void utilEEWrite(USHORT p_port, USHORT ee_data, USHORT ee_addr);
+USHORT utilEERead(USHORT p_port, USHORT ee_addr);
+USHORT utilEEReadOrg(USHORT p_port, USHORT ee_addr);
+void utilEESendCmdAddr(USHORT p_port, UCHAR ee_cmd, USHORT ee_addr);
+#else
+void Wait1Second(ULONG p_port);
+void Wait(ULONG p_port, UCHAR p_delay);
+void utilEEWriteOnOff(ULONG p_port,UCHAR p_mode);
+void utilEEWrite(ULONG p_port, USHORT ee_data, USHORT ee_addr);
+USHORT utilEERead(ULONG p_port, USHORT ee_addr);
+USHORT utilEEReadOrg(ULONG p_port, USHORT ee_addr);
+void utilEESendCmdAddr(ULONG p_port, UCHAR ee_cmd, USHORT ee_addr);
+#endif
+
+
+
+#if defined(OS2)
+ void far phaseDataOut(ULONG port, UCHAR p_card);
+ void far phaseDataIn(ULONG port, UCHAR p_card);
+ void far phaseCommand(ULONG port, UCHAR p_card);
+ void far phaseStatus(ULONG port, UCHAR p_card);
+ void far phaseMsgOut(ULONG port, UCHAR p_card);
+ void far phaseMsgIn(ULONG port, UCHAR p_card);
+ void far phaseIllegal(ULONG port, UCHAR p_card);
+#else
+ #if defined(DOS)
+ void phaseDataOut(USHORT port, UCHAR p_card);
+ void phaseDataIn(USHORT port, UCHAR p_card);
+ void phaseCommand(USHORT port, UCHAR p_card);
+ void phaseStatus(USHORT port, UCHAR p_card);
+ void phaseMsgOut(USHORT port, UCHAR p_card);
+ void phaseMsgIn(USHORT port, UCHAR p_card);
+ void phaseIllegal(USHORT port, UCHAR p_card);
+ #else
+ void phaseDataOut(ULONG port, UCHAR p_card);
+ void phaseDataIn(ULONG port, UCHAR p_card);
+ void phaseCommand(ULONG port, UCHAR p_card);
+ void phaseStatus(ULONG port, UCHAR p_card);
+ void phaseMsgOut(ULONG port, UCHAR p_card);
+ void phaseMsgIn(ULONG port, UCHAR p_card);
+ void phaseIllegal(ULONG port, UCHAR p_card);
+ #endif
+#endif
+
+#if defined(DOS)
+void phaseDecode(USHORT port, UCHAR p_card);
+void phaseChkFifo(USHORT port, UCHAR p_card);
+void phaseBusFree(USHORT p_port, UCHAR p_card);
+#else
+void phaseDecode(ULONG port, UCHAR p_card);
+void phaseChkFifo(ULONG port, UCHAR p_card);
+void phaseBusFree(ULONG p_port, UCHAR p_card);
+#endif
+
+
+
+
+#if defined(DOS)
+void XbowInit(USHORT port, UCHAR scamFlg);
+void BusMasterInit(USHORT p_port);
+int DiagXbow(USHORT port);
+int DiagBusMaster(USHORT port);
+void DiagEEPROM(USHORT p_port);
+#else
+void XbowInit(ULONG port, UCHAR scamFlg);
+void BusMasterInit(ULONG p_port);
+int DiagXbow(ULONG port);
+int DiagBusMaster(ULONG port);
+void DiagEEPROM(ULONG p_port);
+#endif
+
+
+
+
+#if defined(DOS)
+void busMstrAbort(USHORT port);
+UCHAR busMstrTimeOut(USHORT port);
+void dataXferProcessor(USHORT port, PSCCBcard pCurrCard);
+void busMstrSGDataXferStart(USHORT port, PSCCB pCurrSCCB);
+void busMstrDataXferStart(USHORT port, PSCCB pCurrSCCB);
+void hostDataXferAbort(USHORT port, UCHAR p_card, PSCCB pCurrSCCB);
+#else
+void busMstrAbort(ULONG port);
+UCHAR busMstrTimeOut(ULONG port);
+void dataXferProcessor(ULONG port, PSCCBcard pCurrCard);
+void busMstrSGDataXferStart(ULONG port, PSCCB pCurrSCCB);
+void busMstrDataXferStart(ULONG port, PSCCB pCurrSCCB);
+void hostDataXferAbort(ULONG port, UCHAR p_card, PSCCB pCurrSCCB);
+#endif
+void hostDataXferRestart(PSCCB currSCCB);
+
+
+#if defined (DOS)
+UCHAR SccbMgr_bad_isr(USHORT p_port, UCHAR p_card, PSCCBcard pCurrCard, USHORT p_int);
+#else
+UCHAR SccbMgr_bad_isr(ULONG p_port, UCHAR p_card, PSCCBcard pCurrCard, USHORT p_int);
+
+#endif
+
+void SccbMgrTableInitAll(void);
+void SccbMgrTableInitCard(PSCCBcard pCurrCard, UCHAR p_card);
+void SccbMgrTableInitTarget(UCHAR p_card, UCHAR target);
+
+
+
+void scini(UCHAR p_card, UCHAR p_our_id, UCHAR p_power_up);
+
+#if defined(DOS)
+int scarb(USHORT p_port, UCHAR p_sel_type);
+void scbusf(USHORT p_port);
+void scsel(USHORT p_port);
+void scasid(UCHAR p_card, USHORT p_port);
+UCHAR scxferc(USHORT p_port, UCHAR p_data);
+UCHAR scsendi(USHORT p_port, UCHAR p_id_string[]);
+UCHAR sciso(USHORT p_port, UCHAR p_id_string[]);
+void scwirod(USHORT p_port, UCHAR p_data_bit);
+void scwiros(USHORT p_port, UCHAR p_data_bit);
+UCHAR scvalq(UCHAR p_quintet);
+UCHAR scsell(USHORT p_port, UCHAR targ_id);
+void scwtsel(USHORT p_port);
+void inisci(UCHAR p_card, USHORT p_port, UCHAR p_our_id);
+void scsavdi(UCHAR p_card, USHORT p_port);
+#else
+int scarb(ULONG p_port, UCHAR p_sel_type);
+void scbusf(ULONG p_port);
+void scsel(ULONG p_port);
+void scasid(UCHAR p_card, ULONG p_port);
+UCHAR scxferc(ULONG p_port, UCHAR p_data);
+UCHAR scsendi(ULONG p_port, UCHAR p_id_string[]);
+UCHAR sciso(ULONG p_port, UCHAR p_id_string[]);
+void scwirod(ULONG p_port, UCHAR p_data_bit);
+void scwiros(ULONG p_port, UCHAR p_data_bit);
+UCHAR scvalq(UCHAR p_quintet);
+UCHAR scsell(ULONG p_port, UCHAR targ_id);
+void scwtsel(ULONG p_port);
+void inisci(UCHAR p_card, ULONG p_port, UCHAR p_our_id);
+void scsavdi(UCHAR p_card, ULONG p_port);
+#endif
+UCHAR scmachid(UCHAR p_card, UCHAR p_id_string[]);
+
+
+#if defined(DOS)
+void autoCmdCmplt(USHORT p_port, UCHAR p_card);
+void autoLoadDefaultMap(USHORT p_port);
+#else
+void autoCmdCmplt(ULONG p_port, UCHAR p_card);
+void autoLoadDefaultMap(ULONG p_port);
+#endif
+
+
+
+#if (FW_TYPE==_SCCB_MGR_)
+ void OS_start_timer(unsigned long ioport, unsigned long timeout);
+ void OS_stop_timer(unsigned long ioport, unsigned long timeout);
+ void OS_disable_int(unsigned char intvec);
+ void OS_enable_int(unsigned char intvec);
+ void OS_delay(unsigned long count);
+ int OS_VirtToPhys(u32bits CardHandle, u32bits *physaddr, u32bits *virtaddr);
+ #if !(defined(UNIX) || defined(OS2) || defined(SOLARIS_REAL_MODE))
+ void OS_Lock(PSCCBMGR_INFO pCardInfo);
+ void OS_UnLock(PSCCBMGR_INFO pCardInfo);
+#endif // if FW_TYPE == ...
+
+#endif
+
+extern SCCBCARD BL_Card[MAX_CARDS];
+extern SCCBMGR_TAR_INFO sccbMgrTbl[MAX_CARDS][MAX_SCSI_TAR];
+
+
+#if defined(OS2)
+ extern void (far *s_PhaseTbl[8]) (ULONG, UCHAR);
+#else
+ #if defined(DOS)
+ extern void (*s_PhaseTbl[8]) (USHORT, UCHAR);
+ #else
+ extern void (*s_PhaseTbl[8]) (ULONG, UCHAR);
+ #endif
+#endif
+
+extern SCCBSCAM_INFO scamInfo[MAX_SCSI_TAR];
+extern NVRAMINFO nvRamInfo[MAX_MB_CARDS];
+#if defined(DOS) || defined(OS2)
+extern UCHAR temp_id_string[ID_STRING_LENGTH];
+#endif
+extern UCHAR scamHAString[];
+
+
+extern UCHAR mbCards;
+#if defined(BUGBUG)
+extern UCHAR debug_int[MAX_CARDS][debug_size];
+extern UCHAR debug_index[MAX_CARDS];
+void Debug_Load(UCHAR p_card, UCHAR p_bug_data);
+#endif
+
+#if (FW_TYPE==_SCCB_MGR_)
+#if defined(DOS)
+ extern UCHAR first_time;
+#endif
+#endif /* (FW_TYPE==_SCCB_MGR_) */
+
+#if (FW_TYPE==_UCB_MGR_)
+#if defined(DOS)
+ extern u08bits first_time;
+#endif
+#endif /* (FW_TYPE==_UCB_MGR_) */
+
+#if defined(BUGBUG)
+void Debug_Load(UCHAR p_card, UCHAR p_bug_data);
+#endif
+
+extern unsigned int SccbGlobalFlags;
+
+
+/*----------------------------------------------------------------------
+ *
+ *
+ * Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+ *
+ * This file is available under both the GNU General Public License
+ * and a BSD-style copyright; see LICENSE.FlashPoint for details.
+ *
+ * $Workfile: sccb.c $
+ *
+ * Description: Functions relating to handling of the SCCB interface
+ * between the device driver and the HARPOON.
+ *
+ * $Date: 1999/04/26 05:53:56 $
+ *
+ * $Revision: 1.1 $
+ *
+ *----------------------------------------------------------------------*/
+
+/*#include <globals.h>*/
+
+#if (FW_TYPE==_UCB_MGR_)
+ /*#include <budi.h>*/
+ /*#include <budioctl.h>*/
+#endif
+
+/*#include <sccbmgr.h>*/
+/*#include <blx30.h>*/
+/*#include <target.h>*/
+/*#include <eeprom.h>*/
+/*#include <scsi2.h>*/
+/*#include <harpoon.h>*/
+
+
+
+#if (FW_TYPE==_SCCB_MGR_)
+#define mOS_Lock(card) OS_Lock((PSCCBMGR_INFO)(((PSCCBcard)card)->cardInfo))
+#define mOS_UnLock(card) OS_UnLock((PSCCBMGR_INFO)(((PSCCBcard)card)->cardInfo))
+#else /* FW_TYPE==_UCB_MGR_ */
+#define mOS_Lock(card) OS_Lock((u32bits)(((PSCCBcard)card)->ioPort))
+#define mOS_UnLock(card) OS_UnLock((u32bits)(((PSCCBcard)card)->ioPort))
+#endif
+
+
+/*
+extern SCCBMGR_TAR_INFO sccbMgrTbl[MAX_CARDS][MAX_SCSI_TAR];
+extern SCCBCARD BL_Card[MAX_CARDS];
+
+extern NVRAMINFO nvRamInfo[MAX_MB_CARDS];
+extern UCHAR mbCards;
+
+#if defined (OS2)
+ extern void (far *s_PhaseTbl[8]) (ULONG, UCHAR);
+#else
+ #if defined(DOS)
+ extern void (*s_PhaseTbl[8]) (USHORT, UCHAR);
+ #else
+ extern void (*s_PhaseTbl[8]) (ULONG, UCHAR);
+ #endif
+#endif
+
+
+#if defined(BUGBUG)
+extern UCHAR debug_int[MAX_CARDS][debug_size];
+extern UCHAR debug_index[MAX_CARDS];
+void Debug_Load(UCHAR p_card, UCHAR p_bug_data);
+#endif
+*/
+
+#if (FW_TYPE==_SCCB_MGR_)
+
+/*---------------------------------------------------------------------
+ *
+ * Function: SccbMgr_sense_adapter
+ *
+ * Description: Setup and/or Search for cards and return info to caller.
+ *
+ *---------------------------------------------------------------------*/
+
+int SccbMgr_sense_adapter(PSCCBMGR_INFO pCardInfo)
+{
+#if defined(DOS)
+#else
+ static UCHAR first_time = 1;
+#endif
+
+ UCHAR i,j,id,ScamFlg;
+ USHORT temp,temp2,temp3,temp4,temp5,temp6;
+#if defined(DOS)
+ USHORT ioport;
+#else
+ ULONG ioport;
+#endif
+ PNVRamInfo pCurrNvRam;
+
+#if defined(DOS)
+ ioport = (USHORT)pCardInfo->si_baseaddr;
+#else
+ ioport = pCardInfo->si_baseaddr;
+#endif
+
+
+ if (RD_HARPOON(ioport+hp_vendor_id_0) != ORION_VEND_0)
+ return((int)FAILURE);
+
+ if ((RD_HARPOON(ioport+hp_vendor_id_1) != ORION_VEND_1))
+ return((int)FAILURE);
+
+ if ((RD_HARPOON(ioport+hp_device_id_0) != ORION_DEV_0))
+ return((int)FAILURE);
+
+ if ((RD_HARPOON(ioport+hp_device_id_1) != ORION_DEV_1))
+ return((int)FAILURE);
+
+
+ if (RD_HARPOON(ioport+hp_rev_num) != 0x0f){
+
+/* For new Harpoon then check for sub_device ID LSB
+ the bits(0-3) must be all ZERO for compatible with
+ current version of SCCBMgr, else skip this Harpoon
+ device. */
+
+ if (RD_HARPOON(ioport+hp_sub_device_id_0) & 0x0f)
+ return((int)FAILURE);
+ }
+
+ if (first_time)
+ {
+ SccbMgrTableInitAll();
+ first_time = 0;
+ mbCards = 0;
+ }
+
+ if(RdStack(ioport, 0) != 0x00) {
+ if(ChkIfChipInitialized(ioport) == FALSE)
+ {
+ pCurrNvRam = NULL;
+ WR_HARPOON(ioport+hp_semaphore, 0x00);
+ XbowInit(ioport, 0); /*Must Init the SCSI before attempting */
+ DiagEEPROM(ioport);
+ }
+ else
+ {
+ if(mbCards < MAX_MB_CARDS) {
+ pCurrNvRam = &nvRamInfo[mbCards];
+ mbCards++;
+ pCurrNvRam->niBaseAddr = ioport;
+ RNVRamData(pCurrNvRam);
+ }else
+ return((int) FAILURE);
+ }
+ }else
+ pCurrNvRam = NULL;
+#if defined (NO_BIOS_OPTION)
+ pCurrNvRam = NULL;
+ XbowInit(ioport, 0); /*Must Init the SCSI before attempting */
+ DiagEEPROM(ioport);
+#endif /* No BIOS Option */
+
+ WR_HARPOON(ioport+hp_clkctrl_0, CLKCTRL_DEFAULT);
+ WR_HARPOON(ioport+hp_sys_ctrl, 0x00);
+
+ if(pCurrNvRam)
+ pCardInfo->si_id = pCurrNvRam->niAdapId;
+ else
+ pCardInfo->si_id = (UCHAR)(utilEERead(ioport, (ADAPTER_SCSI_ID/2)) &
+ (UCHAR)0x0FF);
+
+ pCardInfo->si_lun = 0x00;
+ pCardInfo->si_fw_revision = ORION_FW_REV;
+ temp2 = 0x0000;
+ temp3 = 0x0000;
+ temp4 = 0x0000;
+ temp5 = 0x0000;
+ temp6 = 0x0000;
+
+ for (id = 0; id < (16/2); id++) {
+
+ if(pCurrNvRam){
+ temp = (USHORT) pCurrNvRam->niSyncTbl[id];
+ temp = ((temp & 0x03) + ((temp << 4) & 0xc0)) +
+ (((temp << 4) & 0x0300) + ((temp << 8) & 0xc000));
+ }else
+ temp = utilEERead(ioport, (USHORT)((SYNC_RATE_TBL/2)+id));
+
+ for (i = 0; i < 2; temp >>=8,i++) {
+
+ temp2 >>= 1;
+ temp3 >>= 1;
+ temp4 >>= 1;
+ temp5 >>= 1;
+ temp6 >>= 1;
+ switch (temp & 0x3)
+ {
+ case AUTO_RATE_20: /* Synchronous, 20 mega-transfers/second */
+ temp6 |= 0x8000; /* Fall through */
+ case AUTO_RATE_10: /* Synchronous, 10 mega-transfers/second */
+ temp5 |= 0x8000; /* Fall through */
+ case AUTO_RATE_05: /* Synchronous, 5 mega-transfers/second */
+ temp2 |= 0x8000; /* Fall through */
+ case AUTO_RATE_00: /* Asynchronous */
+ break;
+ }
+
+ if (temp & DISC_ENABLE_BIT)
+ temp3 |= 0x8000;
+
+ if (temp & WIDE_NEGO_BIT)
+ temp4 |= 0x8000;
+
+ }
+ }
+
+ pCardInfo->si_per_targ_init_sync = temp2;
+ pCardInfo->si_per_targ_no_disc = temp3;
+ pCardInfo->si_per_targ_wide_nego = temp4;
+ pCardInfo->si_per_targ_fast_nego = temp5;
+ pCardInfo->si_per_targ_ultra_nego = temp6;
+
+ if(pCurrNvRam)
+ i = pCurrNvRam->niSysConf;
+ else
+ i = (UCHAR)(utilEERead(ioport, (SYSTEM_CONFIG/2)));
+
+ if(pCurrNvRam)
+ ScamFlg = pCurrNvRam->niScamConf;
+ else
+ ScamFlg = (UCHAR) utilEERead(ioport, SCAM_CONFIG/2);
+
+ pCardInfo->si_flags = 0x0000;
+
+ if (i & 0x01)
+ pCardInfo->si_flags |= SCSI_PARITY_ENA;
+
+ if (!(i & 0x02))
+ pCardInfo->si_flags |= SOFT_RESET;
+
+ if (i & 0x10)
+ pCardInfo->si_flags |= EXTENDED_TRANSLATION;
+
+ if (ScamFlg & SCAM_ENABLED)
+ pCardInfo->si_flags |= FLAG_SCAM_ENABLED;
+
+ if (ScamFlg & SCAM_LEVEL2)
+ pCardInfo->si_flags |= FLAG_SCAM_LEVEL2;
+
+ j = (RD_HARPOON(ioport+hp_bm_ctrl) & ~SCSI_TERM_ENA_L);
+ if (i & 0x04) {
+ j |= SCSI_TERM_ENA_L;
+ }
+ WR_HARPOON(ioport+hp_bm_ctrl, j );
+
+ j = (RD_HARPOON(ioport+hp_ee_ctrl) & ~SCSI_TERM_ENA_H);
+ if (i & 0x08) {
+ j |= SCSI_TERM_ENA_H;
+ }
+ WR_HARPOON(ioport+hp_ee_ctrl, j );
+
+ if (!(RD_HARPOON(ioport+hp_page_ctrl) & NARROW_SCSI_CARD))
+
+ pCardInfo->si_flags |= SUPPORT_16TAR_32LUN;
+
+ pCardInfo->si_card_family = HARPOON_FAMILY;
+ pCardInfo->si_bustype = BUSTYPE_PCI;
+
+ if(pCurrNvRam){
+ pCardInfo->si_card_model[0] = '9';
+ switch(pCurrNvRam->niModel & 0x0f){
+ case MODEL_LT:
+ pCardInfo->si_card_model[1] = '3';
+ pCardInfo->si_card_model[2] = '0';
+ break;
+ case MODEL_LW:
+ pCardInfo->si_card_model[1] = '5';
+ pCardInfo->si_card_model[2] = '0';
+ break;
+ case MODEL_DL:
+ pCardInfo->si_card_model[1] = '3';
+ pCardInfo->si_card_model[2] = '2';
+ break;
+ case MODEL_DW:
+ pCardInfo->si_card_model[1] = '5';
+ pCardInfo->si_card_model[2] = '2';
+ break;
+ }
+ }else{
+ temp = utilEERead(ioport, (MODEL_NUMB_0/2));
+ pCardInfo->si_card_model[0] = (UCHAR)(temp >> 8);
+ temp = utilEERead(ioport, (MODEL_NUMB_2/2));
+
+ pCardInfo->si_card_model[1] = (UCHAR)(temp & 0x00FF);
+ pCardInfo->si_card_model[2] = (UCHAR)(temp >> 8);
+ }
+
+ if (pCardInfo->si_card_model[1] == '3')
+ {
+ if (RD_HARPOON(ioport+hp_ee_ctrl) & BIT(7))
+ pCardInfo->si_flags |= LOW_BYTE_TERM;
+ }
+ else if (pCardInfo->si_card_model[2] == '0')
+ {
+ temp = RD_HARPOON(ioport+hp_xfer_pad);
+ WR_HARPOON(ioport+hp_xfer_pad, (temp & ~BIT(4)));
+ if (RD_HARPOON(ioport+hp_ee_ctrl) & BIT(7))
+ pCardInfo->si_flags |= LOW_BYTE_TERM;
+ WR_HARPOON(ioport+hp_xfer_pad, (temp | BIT(4)));
+ if (RD_HARPOON(ioport+hp_ee_ctrl) & BIT(7))
+ pCardInfo->si_flags |= HIGH_BYTE_TERM;
+ WR_HARPOON(ioport+hp_xfer_pad, temp);
+ }
+ else
+ {
+ temp = RD_HARPOON(ioport+hp_ee_ctrl);
+ temp2 = RD_HARPOON(ioport+hp_xfer_pad);
+ WR_HARPOON(ioport+hp_ee_ctrl, (temp | SEE_CS));
+ WR_HARPOON(ioport+hp_xfer_pad, (temp2 | BIT(4)));
+ temp3 = 0;
+ for (i = 0; i < 8; i++)
+ {
+ temp3 <<= 1;
+ if (!(RD_HARPOON(ioport+hp_ee_ctrl) & BIT(7)))
+ temp3 |= 1;
+ WR_HARPOON(ioport+hp_xfer_pad, (temp2 & ~BIT(4)));
+ WR_HARPOON(ioport+hp_xfer_pad, (temp2 | BIT(4)));
+ }
+ WR_HARPOON(ioport+hp_ee_ctrl, temp);
+ WR_HARPOON(ioport+hp_xfer_pad, temp2);
+ if (!(temp3 & BIT(7)))
+ pCardInfo->si_flags |= LOW_BYTE_TERM;
+ if (!(temp3 & BIT(6)))
+ pCardInfo->si_flags |= HIGH_BYTE_TERM;
+ }
+
+
+ ARAM_ACCESS(ioport);
+
+ for ( i = 0; i < 4; i++ ) {
+
+ pCardInfo->si_XlatInfo[i] =
+ RD_HARPOON(ioport+hp_aramBase+BIOS_DATA_OFFSET+i);
+ }
+
+ /* return with -1 if no sort, else return with
+ logical card number sorted by BIOS (zero-based) */
+
+ pCardInfo->si_relative_cardnum =
+ (UCHAR)(RD_HARPOON(ioport+hp_aramBase+BIOS_RELATIVE_CARD)-1);
+
+ SGRAM_ACCESS(ioport);
+
+ s_PhaseTbl[0] = phaseDataOut;
+ s_PhaseTbl[1] = phaseDataIn;
+ s_PhaseTbl[2] = phaseIllegal;
+ s_PhaseTbl[3] = phaseIllegal;
+ s_PhaseTbl[4] = phaseCommand;
+ s_PhaseTbl[5] = phaseStatus;
+ s_PhaseTbl[6] = phaseMsgOut;
+ s_PhaseTbl[7] = phaseMsgIn;
+
+ pCardInfo->si_present = 0x01;
+
+#if defined(BUGBUG)
+
+
+ for (i = 0; i < MAX_CARDS; i++) {
+
+ for (id=0; id<debug_size; id++)
+ debug_int[i][id] = (UCHAR)0x00;
+ debug_index[i] = 0;
+ }
+
+#endif
+
+ return(0);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: SccbMgr_config_adapter
+ *
+ * Description: Setup adapter for normal operation (hard reset).
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+USHORT SccbMgr_config_adapter(PSCCBMGR_INFO pCardInfo)
+#else
+ULONG SccbMgr_config_adapter(PSCCBMGR_INFO pCardInfo)
+#endif
+{
+ PSCCBcard CurrCard = NULL;
+ PNVRamInfo pCurrNvRam;
+ UCHAR i,j,thisCard, ScamFlg;
+ USHORT temp,sync_bit_map,id;
+#if defined(DOS)
+ USHORT ioport;
+#else
+ ULONG ioport;
+#endif
+
+#if defined(DOS)
+ ioport = (USHORT)pCardInfo->si_baseaddr;
+#else
+ ioport = pCardInfo->si_baseaddr;
+#endif
+
+ for(thisCard =0; thisCard <= MAX_CARDS; thisCard++) {
+
+ if (thisCard == MAX_CARDS) {
+
+ return(FAILURE);
+ }
+
+ if (BL_Card[thisCard].ioPort == ioport) {
+
+ CurrCard = &BL_Card[thisCard];
+ SccbMgrTableInitCard(CurrCard,thisCard);
+ break;
+ }
+
+ else if (BL_Card[thisCard].ioPort == 0x00) {
+
+ BL_Card[thisCard].ioPort = ioport;
+ CurrCard = &BL_Card[thisCard];
+
+ if(mbCards)
+ for(i = 0; i < mbCards; i++){
+ if(CurrCard->ioPort == nvRamInfo[i].niBaseAddr)
+ CurrCard->pNvRamInfo = &nvRamInfo[i];
+ }
+ SccbMgrTableInitCard(CurrCard,thisCard);
+ CurrCard->cardIndex = thisCard;
+ CurrCard->cardInfo = pCardInfo;
+
+ break;
+ }
+ }
+
+ pCurrNvRam = CurrCard->pNvRamInfo;
+
+ if(pCurrNvRam){
+ ScamFlg = pCurrNvRam->niScamConf;
+ }
+ else{
+ ScamFlg = (UCHAR) utilEERead(ioport, SCAM_CONFIG/2);
+ }
+
+
+ BusMasterInit(ioport);
+ XbowInit(ioport, ScamFlg);
+
+#if defined (NO_BIOS_OPTION)
+
+
+ if (DiagXbow(ioport)) return(FAILURE);
+ if (DiagBusMaster(ioport)) return(FAILURE);
+
+#endif /* No BIOS Option */
+
+ autoLoadDefaultMap(ioport);
+
+
+ for (i = 0,id = 0x01; i != pCardInfo->si_id; i++,id <<= 1){}
+
+ WR_HARPOON(ioport+hp_selfid_0, id);
+ WR_HARPOON(ioport+hp_selfid_1, 0x00);
+ WR_HARPOON(ioport+hp_arb_id, pCardInfo->si_id);
+ CurrCard->ourId = pCardInfo->si_id;
+
+ i = (UCHAR) pCardInfo->si_flags;
+ if (i & SCSI_PARITY_ENA)
+ WR_HARPOON(ioport+hp_portctrl_1,(HOST_MODE8 | CHK_SCSI_P));
+
+ j = (RD_HARPOON(ioport+hp_bm_ctrl) & ~SCSI_TERM_ENA_L);
+ if (i & LOW_BYTE_TERM)
+ j |= SCSI_TERM_ENA_L;
+ WR_HARPOON(ioport+hp_bm_ctrl, j);
+
+ j = (RD_HARPOON(ioport+hp_ee_ctrl) & ~SCSI_TERM_ENA_H);
+ if (i & HIGH_BYTE_TERM)
+ j |= SCSI_TERM_ENA_H;
+ WR_HARPOON(ioport+hp_ee_ctrl, j );
+
+
+ if (!(pCardInfo->si_flags & SOFT_RESET)) {
+
+ sresb(ioport,thisCard);
+
+ scini(thisCard, pCardInfo->si_id, 0);
+ }
+
+
+
+ if (pCardInfo->si_flags & POST_ALL_UNDERRRUNS)
+ CurrCard->globalFlags |= F_NO_FILTER;
+
+ if(pCurrNvRam){
+ if(pCurrNvRam->niSysConf & 0x10)
+ CurrCard->globalFlags |= F_GREEN_PC;
+ }
+ else{
+ if (utilEERead(ioport, (SYSTEM_CONFIG/2)) & GREEN_PC_ENA)
+ CurrCard->globalFlags |= F_GREEN_PC;
+ }
+
+ /* Set global flag to indicate Re-Negotiation to be done on all
+ ckeck condition */
+ if(pCurrNvRam){
+ if(pCurrNvRam->niScsiConf & 0x04)
+ CurrCard->globalFlags |= F_DO_RENEGO;
+ }
+ else{
+ if (utilEERead(ioport, (SCSI_CONFIG/2)) & RENEGO_ENA)
+ CurrCard->globalFlags |= F_DO_RENEGO;
+ }
+
+ if(pCurrNvRam){
+ if(pCurrNvRam->niScsiConf & 0x08)
+ CurrCard->globalFlags |= F_CONLUN_IO;
+ }
+ else{
+ if (utilEERead(ioport, (SCSI_CONFIG/2)) & CONNIO_ENA)
+ CurrCard->globalFlags |= F_CONLUN_IO;
+ }
+
+
+ temp = pCardInfo->si_per_targ_no_disc;
+
+ for (i = 0,id = 1; i < MAX_SCSI_TAR; i++, id <<= 1) {
+
+ if (temp & id)
+ sccbMgrTbl[thisCard][i].TarStatus |= TAR_ALLOW_DISC;
+ }
+
+ sync_bit_map = 0x0001;
+
+ for (id = 0; id < (MAX_SCSI_TAR/2); id++) {
+
+ if(pCurrNvRam){
+ temp = (USHORT) pCurrNvRam->niSyncTbl[id];
+ temp = ((temp & 0x03) + ((temp << 4) & 0xc0)) +
+ (((temp << 4) & 0x0300) + ((temp << 8) & 0xc000));
+ }else
+ temp = utilEERead(ioport, (USHORT)((SYNC_RATE_TBL/2)+id));
+
+ for (i = 0; i < 2; temp >>=8,i++) {
+
+ if (pCardInfo->si_per_targ_init_sync & sync_bit_map) {
+
+ sccbMgrTbl[thisCard][id*2+i].TarEEValue = (UCHAR)temp;
+ }
+
+ else {
+ sccbMgrTbl[thisCard][id*2+i].TarStatus |= SYNC_SUPPORTED;
+ sccbMgrTbl[thisCard][id*2+i].TarEEValue =
+ (UCHAR)(temp & ~EE_SYNC_MASK);
+ }
+
+#if defined(WIDE_SCSI)
+/* if ((pCardInfo->si_per_targ_wide_nego & sync_bit_map) ||
+ (id*2+i >= 8)){
+*/
+ if (pCardInfo->si_per_targ_wide_nego & sync_bit_map){
+
+ sccbMgrTbl[thisCard][id*2+i].TarEEValue |= EE_WIDE_SCSI;
+
+ }
+
+ else { /* NARROW SCSI */
+ sccbMgrTbl[thisCard][id*2+i].TarStatus |= WIDE_NEGOCIATED;
+ }
+
+#else
+ sccbMgrTbl[thisCard][id*2+i].TarStatus |= WIDE_NEGOCIATED;
+#endif
+
+
+ sync_bit_map <<= 1;
+
+
+
+ }
+ }
+
+ WR_HARPOON((ioport+hp_semaphore),
+ (UCHAR)(RD_HARPOON((ioport+hp_semaphore)) | SCCB_MGR_PRESENT));
+
+#if defined(DOS)
+ return((USHORT)CurrCard);
+#else
+ return((ULONG)CurrCard);
+#endif
+}
+
+#else /* end (FW_TYPE==_SCCB_MGR_) */
+
+
+
+STATIC s16bits FP_PresenceCheck(PMGR_INFO pMgrInfo)
+{
+ PMGR_ENTRYPNTS pMgr_EntryPnts = &pMgrInfo->mi_Functions;
+
+ pMgr_EntryPnts->UCBMgr_probe_adapter = probe_adapter;
+ pMgr_EntryPnts->UCBMgr_init_adapter = init_adapter;
+ pMgr_EntryPnts->UCBMgr_start_UCB = SccbMgr_start_sccb;
+ pMgr_EntryPnts->UCBMgr_build_UCB = build_UCB;
+ pMgr_EntryPnts->UCBMgr_abort_UCB = SccbMgr_abort_sccb;
+ pMgr_EntryPnts->UCBMgr_my_int = SccbMgr_my_int;
+ pMgr_EntryPnts->UCBMgr_isr = SccbMgr_isr;
+ pMgr_EntryPnts->UCBMgr_scsi_reset = SccbMgr_scsi_reset;
+ pMgr_EntryPnts->UCBMgr_timer_expired = SccbMgr_timer_expired;
+#ifndef NO_IOCTLS
+ pMgr_EntryPnts->UCBMgr_unload_card = SccbMgr_unload_card;
+ pMgr_EntryPnts->UCBMgr_save_foreign_state =
+ SccbMgr_save_foreign_state;
+ pMgr_EntryPnts->UCBMgr_restore_foreign_state =
+ SccbMgr_restore_foreign_state;
+ pMgr_EntryPnts->UCBMgr_restore_native_state =
+ SccbMgr_restore_native_state;
+#endif /*NO_IOCTLS*/
+
+ pMgrInfo->mi_SGListFormat=0x01;
+ pMgrInfo->mi_DataPtrFormat=0x01;
+ pMgrInfo->mi_MaxSGElements= (u16bits) 0xffffffff;
+ pMgrInfo->mi_MgrPrivateLen=sizeof(SCCB);
+ pMgrInfo->mi_PCIVendorID=BL_VENDOR_ID;
+ pMgrInfo->mi_PCIDeviceID=FP_DEVICE_ID;
+ pMgrInfo->mi_MgrAttributes= ATTR_IO_MAPPED +
+ ATTR_PHYSICAL_ADDRESS +
+ ATTR_VIRTUAL_ADDRESS +
+ ATTR_OVERLAPPED_IO_IOCTLS_OK;
+ pMgrInfo->mi_IoRangeLen = 256;
+ return(0);
+}
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: probe_adapter
+ *
+ * Description: Setup and/or Search for cards and return info to caller.
+ *
+ *---------------------------------------------------------------------*/
+STATIC s32bits probe_adapter(PADAPTER_INFO pAdapterInfo)
+{
+ u16bits temp,temp2,temp3,temp4;
+ u08bits i,j,id;
+
+#if defined(DOS)
+#else
+ static u08bits first_time = 1;
+#endif
+ BASE_PORT ioport;
+ PNVRamInfo pCurrNvRam;
+
+ ioport = (BASE_PORT)pAdapterInfo->ai_baseaddr;
+
+
+
+ if (RD_HARPOON(ioport+hp_vendor_id_0) != ORION_VEND_0)
+ return(1);
+
+ if ((RD_HARPOON(ioport+hp_vendor_id_1) != ORION_VEND_1))
+ return(2);
+
+ if ((RD_HARPOON(ioport+hp_device_id_0) != ORION_DEV_0))
+ return(3);
+
+ if ((RD_HARPOON(ioport+hp_device_id_1) != ORION_DEV_1))
+ return(4);
+
+
+ if (RD_HARPOON(ioport+hp_rev_num) != 0x0f){
+
+
+/* For new Harpoon then check for sub_device ID LSB
+ the bits(0-3) must be all ZERO for compatible with
+ current version of SCCBMgr, else skip this Harpoon
+ device. */
+
+ if (RD_HARPOON(ioport+hp_sub_device_id_0) & 0x0f)
+ return(5);
+ }
+
+ if (first_time) {
+
+ SccbMgrTableInitAll();
+ first_time = 0;
+ mbCards = 0;
+ }
+
+ if(RdStack(ioport, 0) != 0x00) {
+ if(ChkIfChipInitialized(ioport) == FALSE)
+ {
+ pCurrNvRam = NULL;
+ WR_HARPOON(ioport+hp_semaphore, 0x00);
+ XbowInit(ioport, 0); /*Must Init the SCSI before attempting */
+ DiagEEPROM(ioport);
+ }
+ else
+ {
+ if(mbCards < MAX_MB_CARDS) {
+ pCurrNvRam = &nvRamInfo[mbCards];
+ mbCards++;
+ pCurrNvRam->niBaseAddr = ioport;
+ RNVRamData(pCurrNvRam);
+ }else
+ return((int) FAILURE);
+ }
+ }else
+ pCurrNvRam = NULL;
+
+#if defined (NO_BIOS_OPTION)
+ pCurrNvRam = NULL;
+ XbowInit(ioport, 0); /*Must Init the SCSI before attempting */
+ DiagEEPROM(ioport);
+#endif /* No BIOS Option */
+
+ WR_HARPOON(ioport+hp_clkctrl_0, CLKCTRL_DEFAULT);
+ WR_HARPOON(ioport+hp_sys_ctrl, 0x00);
+
+ if(pCurrNvRam)
+ pAdapterInfo->ai_id = pCurrNvRam->niAdapId;
+ else
+ pAdapterInfo->ai_id = (u08bits)(utilEERead(ioport, (ADAPTER_SCSI_ID/2)) &
+ (u08bits)0x0FF);
+
+ pAdapterInfo->ai_lun = 0x00;
+ pAdapterInfo->ai_fw_revision[0] = '3';
+ pAdapterInfo->ai_fw_revision[1] = '1';
+ pAdapterInfo->ai_fw_revision[2] = '1';
+ pAdapterInfo->ai_fw_revision[3] = ' ';
+ pAdapterInfo->ai_NumChannels = 1;
+
+ temp2 = 0x0000;
+ temp3 = 0x0000;
+ temp4 = 0x0000;
+
+ for (id = 0; id < (16/2); id++) {
+
+ if(pCurrNvRam){
+ temp = (USHORT) pCurrNvRam->niSyncTbl[id];
+ temp = ((temp & 0x03) + ((temp << 4) & 0xc0)) +
+ (((temp << 4) & 0x0300) + ((temp << 8) & 0xc000));
+ }else
+ temp = utilEERead(ioport, (u16bits)((SYNC_RATE_TBL/2)+id));
+
+ for (i = 0; i < 2; temp >>=8,i++) {
+
+ if ((temp & 0x03) != AUTO_RATE_00) {
+
+ temp2 >>= 0x01;
+ temp2 |= 0x8000;
+ }
+
+ else {
+ temp2 >>= 0x01;
+ }
+
+ if (temp & DISC_ENABLE_BIT) {
+
+ temp3 >>= 0x01;
+ temp3 |= 0x8000;
+ }
+
+ else {
+ temp3 >>= 0x01;
+ }
+
+ if (temp & WIDE_NEGO_BIT) {
+
+ temp4 >>= 0x01;
+ temp4 |= 0x8000;
+ }
+
+ else {
+ temp4 >>= 0x01;
+ }
+
+ }
+ }
+
+ pAdapterInfo->ai_per_targ_init_sync = temp2;
+ pAdapterInfo->ai_per_targ_no_disc = temp3;
+ pAdapterInfo->ai_per_targ_wide_nego = temp4;
+ if(pCurrNvRam)
+ i = pCurrNvRam->niSysConf;
+ else
+ i = (u08bits)(utilEERead(ioport, (SYSTEM_CONFIG/2)));
+
+ /*
+ ** interrupts always level-triggered for FlashPoint
+ */
+ pAdapterInfo->ai_stateinfo |= LEVEL_TRIG;
+
+ if (i & 0x01)
+ pAdapterInfo->ai_stateinfo |= SCSI_PARITY_ENA;
+
+ if (i & 0x02) /* SCSI Bus reset in AutoSCSI Set ? */
+ {
+ if(pCurrNvRam)
+ {
+ j = pCurrNvRam->niScamConf;
+ }
+ else
+ {
+ j = (u08bits) utilEERead(ioport, SCAM_CONFIG/2);
+ }
+ if(j & SCAM_ENABLED)
+ {
+ if(j & SCAM_LEVEL2)
+ {
+ pAdapterInfo->ai_stateinfo |= SCAM2_ENA;
+ }
+ else
+ {
+ pAdapterInfo->ai_stateinfo |= SCAM1_ENA;
+ }
+ }
+ }
+ j = (RD_HARPOON(ioport+hp_bm_ctrl) & ~SCSI_TERM_ENA_L);
+ if (i & 0x04) {
+ j |= SCSI_TERM_ENA_L;
+ pAdapterInfo->ai_stateinfo |= LOW_BYTE_TERM_ENA;
+ }
+ WR_HARPOON(ioport+hp_bm_ctrl, j );
+
+ j = (RD_HARPOON(ioport+hp_ee_ctrl) & ~SCSI_TERM_ENA_H);
+ if (i & 0x08) {
+ j |= SCSI_TERM_ENA_H;
+ pAdapterInfo->ai_stateinfo |= HIGH_BYTE_TERM_ENA;
+ }
+ WR_HARPOON(ioport+hp_ee_ctrl, j );
+
+ if(RD_HARPOON(ioport + hp_page_ctrl) & BIOS_SHADOW)
+ {
+ pAdapterInfo->ai_FlashRomSize = 64 * 1024; /* 64k ROM */
+ }
+ else
+ {
+ pAdapterInfo->ai_FlashRomSize = 32 * 1024; /* 32k ROM */
+ }
+
+ pAdapterInfo->ai_stateinfo |= (FAST20_ENA | TAG_QUEUE_ENA);
+ if (!(RD_HARPOON(ioport+hp_page_ctrl) & NARROW_SCSI_CARD))
+ {
+ pAdapterInfo->ai_attributes |= (WIDE_CAPABLE | FAST20_CAPABLE
+ | SCAM2_CAPABLE
+ | TAG_QUEUE_CAPABLE
+ | SUPRESS_UNDERRRUNS_CAPABLE
+ | SCSI_PARITY_CAPABLE);
+ pAdapterInfo->ai_MaxTarg = 16;
+ pAdapterInfo->ai_MaxLun = 32;
+ }
+ else
+ {
+ pAdapterInfo->ai_attributes |= (FAST20_CAPABLE | SCAM2_CAPABLE
+ | TAG_QUEUE_CAPABLE
+ | SUPRESS_UNDERRRUNS_CAPABLE
+ | SCSI_PARITY_CAPABLE);
+ pAdapterInfo->ai_MaxTarg = 8;
+ pAdapterInfo->ai_MaxLun = 8;
+ }
+
+ pAdapterInfo->ai_product_family = HARPOON_FAMILY;
+ pAdapterInfo->ai_HBAbustype = BUSTYPE_PCI;
+
+ for (i=0;i<CARD_MODEL_NAMELEN;i++)
+ {
+ pAdapterInfo->ai_card_model[i]=' '; /* initialize the ai_card_model */
+ }
+
+ if(pCurrNvRam){
+ pAdapterInfo->ai_card_model[0] = '9';
+ switch(pCurrNvRam->niModel & 0x0f){
+ case MODEL_LT:
+ pAdapterInfo->ai_card_model[1] = '3';
+ pAdapterInfo->ai_card_model[2] = '0';
+ break;
+ case MODEL_LW:
+ pAdapterInfo->ai_card_model[1] = '5';
+ pAdapterInfo->ai_card_model[2] = '0';
+ break;
+ case MODEL_DL:
+ pAdapterInfo->ai_card_model[1] = '3';
+ pAdapterInfo->ai_card_model[2] = '2';
+ break;
+ case MODEL_DW:
+ pAdapterInfo->ai_card_model[1] = '5';
+ pAdapterInfo->ai_card_model[2] = '2';
+ break;
+ }
+ }else{
+ temp = utilEERead(ioport, (MODEL_NUMB_0/2));
+ pAdapterInfo->ai_card_model[0] = (u08bits)(temp >> 8);
+ temp = utilEERead(ioport, (MODEL_NUMB_2/2));
+
+ pAdapterInfo->ai_card_model[1] = (u08bits)(temp & 0x00FF);
+ pAdapterInfo->ai_card_model[2] = (u08bits)(temp >> 8);
+ }
+
+
+
+ pAdapterInfo->ai_FiberProductType = 0;
+
+ pAdapterInfo->ai_secondary_range = 0;
+
+ for (i=0;i<WORLD_WIDE_NAMELEN;i++)
+ {
+ pAdapterInfo->ai_worldwidename[i]='\0';
+ }
+
+ for (i=0;i<VENDOR_NAMELEN;i++)
+ {
+ pAdapterInfo->ai_vendorstring[i]='\0';
+ }
+ pAdapterInfo->ai_vendorstring[0]='B';
+ pAdapterInfo->ai_vendorstring[1]='U';
+ pAdapterInfo->ai_vendorstring[2]='S';
+ pAdapterInfo->ai_vendorstring[3]='L';
+ pAdapterInfo->ai_vendorstring[4]='O';
+ pAdapterInfo->ai_vendorstring[5]='G';
+ pAdapterInfo->ai_vendorstring[6]='I';
+ pAdapterInfo->ai_vendorstring[7]='C';
+
+ for (i=0;i<FAMILY_NAMELEN;i++)
+ {
+ pAdapterInfo->ai_AdapterFamilyString[i]='\0';
+ }
+ pAdapterInfo->ai_AdapterFamilyString[0]='F';
+ pAdapterInfo->ai_AdapterFamilyString[1]='L';
+ pAdapterInfo->ai_AdapterFamilyString[2]='A';
+ pAdapterInfo->ai_AdapterFamilyString[3]='S';
+ pAdapterInfo->ai_AdapterFamilyString[4]='H';
+ pAdapterInfo->ai_AdapterFamilyString[5]='P';
+ pAdapterInfo->ai_AdapterFamilyString[6]='O';
+ pAdapterInfo->ai_AdapterFamilyString[7]='I';
+ pAdapterInfo->ai_AdapterFamilyString[8]='N';
+ pAdapterInfo->ai_AdapterFamilyString[9]='T';
+
+ ARAM_ACCESS(ioport);
+
+ for ( i = 0; i < 4; i++ ) {
+
+ pAdapterInfo->ai_XlatInfo[i] =
+ RD_HARPOON(ioport+hp_aramBase+BIOS_DATA_OFFSET+i);
+ }
+
+ /* return with -1 if no sort, else return with
+ logical card number sorted by BIOS (zero-based) */
+
+
+ pAdapterInfo->ai_relative_cardnum =
+ (u08bits)(RD_HARPOON(ioport+hp_aramBase+BIOS_RELATIVE_CARD)-1);
+
+ SGRAM_ACCESS(ioport);
+
+ s_PhaseTbl[0] = phaseDataOut;
+ s_PhaseTbl[1] = phaseDataIn;
+ s_PhaseTbl[2] = phaseIllegal;
+ s_PhaseTbl[3] = phaseIllegal;
+ s_PhaseTbl[4] = phaseCommand;
+ s_PhaseTbl[5] = phaseStatus;
+ s_PhaseTbl[6] = phaseMsgOut;
+ s_PhaseTbl[7] = phaseMsgIn;
+
+ pAdapterInfo->ai_present = 0x01;
+
+#if defined(BUGBUG)
+
+
+ for (i = 0; i < MAX_CARDS; i++) {
+
+ for (id=0; id<debug_size; id++)
+ debug_int[i][id] = (u08bits)0x00;
+ debug_index[i] = 0;
+ }
+
+#endif
+
+ return(0);
+}
+
+
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: init_adapter, exported to BUDI via UCBMgr_init_adapter entry
+ *
+ *
+ * Description: Setup adapter for normal operation (hard reset).
+ *
+ *---------------------------------------------------------------------*/
+STATIC CARD_HANDLE init_adapter(PADAPTER_INFO pCardInfo)
+{
+ PSCCBcard CurrCard;
+ PNVRamInfo pCurrNvRam;
+ u08bits i,j,thisCard, ScamFlg;
+ u16bits temp,sync_bit_map,id;
+ BASE_PORT ioport;
+
+ ioport = (BASE_PORT)pCardInfo->ai_baseaddr;
+
+ for(thisCard =0; thisCard <= MAX_CARDS; thisCard++) {
+
+ if (thisCard == MAX_CARDS) {
+
+ return(FAILURE);
+ }
+
+ if (BL_Card[thisCard].ioPort == ioport) {
+
+ CurrCard = &BL_Card[thisCard];
+ SccbMgrTableInitCard(CurrCard,thisCard);
+ break;
+ }
+
+ else if (BL_Card[thisCard].ioPort == 0x00) {
+
+ BL_Card[thisCard].ioPort = ioport;
+ CurrCard = &BL_Card[thisCard];
+
+ if(mbCards)
+ for(i = 0; i < mbCards; i++){
+ if(CurrCard->ioPort == nvRamInfo[i].niBaseAddr)
+ CurrCard->pNvRamInfo = &nvRamInfo[i];
+ }
+ SccbMgrTableInitCard(CurrCard,thisCard);
+ CurrCard->cardIndex = thisCard;
+ CurrCard->cardInfo = pCardInfo;
+
+ break;
+ }
+ }
+
+ pCurrNvRam = CurrCard->pNvRamInfo;
+
+
+ if(pCurrNvRam){
+ ScamFlg = pCurrNvRam->niScamConf;
+ }
+ else{
+ ScamFlg = (UCHAR) utilEERead(ioport, SCAM_CONFIG/2);
+ }
+
+
+ BusMasterInit(ioport);
+ XbowInit(ioport, ScamFlg);
+
+#if defined (NO_BIOS_OPTION)
+
+
+ if (DiagXbow(ioport)) return(FAILURE);
+ if (DiagBusMaster(ioport)) return(FAILURE);
+
+#endif /* No BIOS Option */
+
+ autoLoadDefaultMap(ioport);
+
+
+ for (i = 0,id = 0x01; i != pCardInfo->ai_id; i++,id <<= 1){}
+
+ WR_HARPOON(ioport+hp_selfid_0, id);
+ WR_HARPOON(ioport+hp_selfid_1, 0x00);
+ WR_HARPOON(ioport+hp_arb_id, pCardInfo->ai_id);
+ CurrCard->ourId = (unsigned char) pCardInfo->ai_id;
+
+ i = (u08bits) pCardInfo->ai_stateinfo;
+ if (i & SCSI_PARITY_ENA)
+ WR_HARPOON(ioport+hp_portctrl_1,(HOST_MODE8 | CHK_SCSI_P));
+
+ j = (RD_HARPOON(ioport+hp_bm_ctrl) & ~SCSI_TERM_ENA_L);
+ if (i & LOW_BYTE_TERM_ENA)
+ j |= SCSI_TERM_ENA_L;
+ WR_HARPOON(ioport+hp_bm_ctrl, j);
+
+ j = (RD_HARPOON(ioport+hp_ee_ctrl) & ~SCSI_TERM_ENA_H);
+ if (i & HIGH_BYTE_TERM_ENA)
+ j |= SCSI_TERM_ENA_H;
+ WR_HARPOON(ioport+hp_ee_ctrl, j );
+
+
+ if (!(pCardInfo->ai_stateinfo & NO_RESET_IN_INIT)) {
+
+ sresb(ioport,thisCard);
+
+ scini(thisCard, (u08bits) pCardInfo->ai_id, 0);
+ }
+
+
+
+ if (pCardInfo->ai_stateinfo & SUPRESS_UNDERRRUNS_ENA)
+ CurrCard->globalFlags |= F_NO_FILTER;
+
+ if(pCurrNvRam){
+ if(pCurrNvRam->niSysConf & 0x10)
+ CurrCard->globalFlags |= F_GREEN_PC;
+ }
+ else{
+ if (utilEERead(ioport, (SYSTEM_CONFIG/2)) & GREEN_PC_ENA)
+ CurrCard->globalFlags |= F_GREEN_PC;
+ }
+
+ /* Set global flag to indicate Re-Negotiation to be done on all
+ ckeck condition */
+ if(pCurrNvRam){
+ if(pCurrNvRam->niScsiConf & 0x04)
+ CurrCard->globalFlags |= F_DO_RENEGO;
+ }
+ else{
+ if (utilEERead(ioport, (SCSI_CONFIG/2)) & RENEGO_ENA)
+ CurrCard->globalFlags |= F_DO_RENEGO;
+ }
+
+ if(pCurrNvRam){
+ if(pCurrNvRam->niScsiConf & 0x08)
+ CurrCard->globalFlags |= F_CONLUN_IO;
+ }
+ else{
+ if (utilEERead(ioport, (SCSI_CONFIG/2)) & CONNIO_ENA)
+ CurrCard->globalFlags |= F_CONLUN_IO;
+ }
+
+ temp = pCardInfo->ai_per_targ_no_disc;
+
+ for (i = 0,id = 1; i < MAX_SCSI_TAR; i++, id <<= 1) {
+
+ if (temp & id)
+ sccbMgrTbl[thisCard][i].TarStatus |= TAR_ALLOW_DISC;
+ }
+
+ sync_bit_map = 0x0001;
+
+ for (id = 0; id < (MAX_SCSI_TAR/2); id++){
+
+ if(pCurrNvRam){
+ temp = (USHORT) pCurrNvRam->niSyncTbl[id];
+ temp = ((temp & 0x03) + ((temp << 4) & 0xc0)) +
+ (((temp << 4) & 0x0300) + ((temp << 8) & 0xc000));
+ }else
+ temp = utilEERead(ioport, (u16bits)((SYNC_RATE_TBL/2)+id));
+
+ for (i = 0; i < 2; temp >>=8,i++){
+
+ if (pCardInfo->ai_per_targ_init_sync & sync_bit_map){
+
+ sccbMgrTbl[thisCard][id*2+i].TarEEValue = (u08bits)temp;
+ }
+
+ else {
+ sccbMgrTbl[thisCard][id*2+i].TarStatus |= SYNC_SUPPORTED;
+ sccbMgrTbl[thisCard][id*2+i].TarEEValue =
+ (u08bits)(temp & ~EE_SYNC_MASK);
+ }
+
+#if defined(WIDE_SCSI)
+/* if ((pCardInfo->ai_per_targ_wide_nego & sync_bit_map) ||
+ (id*2+i >= 8)){
+*/
+ if (pCardInfo->ai_per_targ_wide_nego & sync_bit_map){
+
+ sccbMgrTbl[thisCard][id*2+i].TarEEValue |= EE_WIDE_SCSI;
+
+ }
+
+ else { /* NARROW SCSI */
+ sccbMgrTbl[thisCard][id*2+i].TarStatus |= WIDE_NEGOCIATED;
+ }
+
+#else
+ sccbMgrTbl[thisCard][id*2+i].TarStatus |= WIDE_NEGOCIATED;
+#endif
+
+
+ sync_bit_map <<= 1;
+ }
+ }
+
+
+ pCardInfo->ai_SGListFormat=0x01;
+ pCardInfo->ai_DataPtrFormat=0x01;
+ pCardInfo->ai_AEN_mask &= SCSI_RESET_COMPLETE;
+
+ WR_HARPOON((ioport+hp_semaphore),
+ (u08bits)(RD_HARPOON((ioport+hp_semaphore)) | SCCB_MGR_PRESENT));
+
+ return((u32bits)CurrCard);
+
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: build_ucb, exported to BUDI via UCBMgr_build_ucb entry
+ *
+ * Description: prepare fw portion of ucb. do not start, resource not guaranteed
+ * so don't manipulate anything that's derived from states which
+ * may change
+ *
+ *---------------------------------------------------------------------*/
+void build_UCB(CARD_HANDLE pCurrCard, PUCB p_ucb)
+{
+
+ u08bits thisCard;
+ u08bits i,j;
+
+ PSCCB p_sccb;
+
+
+ thisCard = ((PSCCBcard) pCurrCard)->cardIndex;
+
+
+ p_sccb=(PSCCB)p_ucb->UCB_MgrPrivatePtr;
+
+
+ p_sccb->Sccb_ucb_ptr=p_ucb;
+
+ switch (p_ucb->UCB_opcode & (OPC_DEVICE_RESET+OPC_XFER_SG+OPC_CHK_RESIDUAL))
+ {
+ case OPC_DEVICE_RESET:
+ p_sccb->OperationCode=RESET_COMMAND;
+ break;
+ case OPC_XFER_SG:
+ p_sccb->OperationCode=SCATTER_GATHER_COMMAND;
+ break;
+ case OPC_XFER_SG+OPC_CHK_RESIDUAL:
+ p_sccb->OperationCode=RESIDUAL_SG_COMMAND;
+ break;
+ case OPC_CHK_RESIDUAL:
+
+ p_sccb->OperationCode=RESIDUAL_COMMAND;
+ break;
+ default:
+ p_sccb->OperationCode=SCSI_INITIATOR_COMMAND;
+ break;
+ }
+
+ if (p_ucb->UCB_opcode & OPC_TQ_ENABLE)
+ {
+ p_sccb->ControlByte = (u08bits)((p_ucb->UCB_opcode & OPC_TQ_MASK)>>2) | F_USE_CMD_Q;
+ }
+ else
+ {
+ p_sccb->ControlByte = 0;
+ }
+
+
+ p_sccb->CdbLength = (u08bits)p_ucb->UCB_cdblen;
+
+ if (p_ucb->UCB_opcode & OPC_NO_AUTO_SENSE)
+ {
+ p_sccb->RequestSenseLength = 0;
+ }
+ else
+ {
+ p_sccb->RequestSenseLength = (unsigned char) p_ucb->UCB_senselen;
+ }
+
+
+ if (p_ucb->UCB_opcode & OPC_XFER_SG)
+ {
+ p_sccb->DataPointer=p_ucb->UCB_virt_dataptr;
+ p_sccb->DataLength = (((u32bits)p_ucb->UCB_NumSgElements)<<3);
+ }
+ else
+ {
+ p_sccb->DataPointer=p_ucb->UCB_phys_dataptr;
+ p_sccb->DataLength=p_ucb->UCB_datalen;
+ };
+
+ p_sccb->HostStatus=0;
+ p_sccb->TargetStatus=0;
+ p_sccb->TargID=(unsigned char)p_ucb->UCB_targid;
+ p_sccb->Lun=(unsigned char) p_ucb->UCB_lun;
+ p_sccb->SccbIOPort=((PSCCBcard)pCurrCard)->ioPort;
+
+ j=p_ucb->UCB_cdblen;
+ for (i=0;i<j;i++)
+ {
+ p_sccb->Cdb[i] = p_ucb->UCB_cdb[i];
+ }
+
+ p_sccb->SensePointer=p_ucb->UCB_phys_senseptr;
+
+ sinits(p_sccb,thisCard);
+
+}
+#ifndef NO_IOCTLS
+
+/*---------------------------------------------------------------------
+ *
+ * Function: GetDevSyncRate
+ *
+ *---------------------------------------------------------------------*/
+STATIC int GetDevSyncRate(PSCCBcard pCurrCard,PUCB p_ucb)
+{
+ struct _SYNC_RATE_INFO * pSyncStr;
+ PSCCBMgr_tar_info currTar_Info;
+ BASE_PORT ioport;
+ u08bits scsiID, j;
+
+#if (FW_TYPE != _SCCB_MGR_)
+ if( p_ucb->UCB_targid >= pCurrCard->cardInfo->ai_MaxTarg )
+ {
+ return(1);
+ }
+#endif
+
+ ioport = pCurrCard->ioPort;
+ pSyncStr = (struct _SYNC_RATE_INFO *) p_ucb->UCB_virt_dataptr;
+ scsiID = (u08bits) p_ucb->UCB_targid;
+ currTar_Info = &sccbMgrTbl[pCurrCard->cardIndex][scsiID];
+ j = currTar_Info->TarSyncCtrl;
+
+ switch (currTar_Info->TarEEValue & EE_SYNC_MASK)
+ {
+ case EE_SYNC_ASYNC:
+ pSyncStr->RequestMegaXferRate = 0x00;
+ break;
+ case EE_SYNC_5MB:
+ pSyncStr->RequestMegaXferRate = (j & NARROW_SCSI) ? 50 : 100;
+ break;
+ case EE_SYNC_10MB:
+ pSyncStr->RequestMegaXferRate = (j & NARROW_SCSI) ? 100 : 200;
+ break;
+ case EE_SYNC_20MB:
+ pSyncStr->RequestMegaXferRate = (j & NARROW_SCSI) ? 200 : 400;
+ break;
+ }
+
+ switch ((j >> 5) & 0x07)
+ {
+ case 0x00:
+ if((j & 0x07) == 0x00)
+ {
+ pSyncStr->ActualMegaXferRate = 0x00; /* Async Mode */
+ }
+ else
+ {
+ pSyncStr->ActualMegaXferRate = (j & NARROW_SCSI) ? 200 : 400;
+ }
+ break;
+ case 0x01:
+ pSyncStr->ActualMegaXferRate = (j & NARROW_SCSI) ? 100 : 200;
+ break;
+ case 0x02:
+ pSyncStr->ActualMegaXferRate = (j & NARROW_SCSI) ? 66 : 122;
+ break;
+ case 0x03:
+ pSyncStr->ActualMegaXferRate = (j & NARROW_SCSI) ? 50 : 100;
+ break;
+ case 0x04:
+ pSyncStr->ActualMegaXferRate = (j & NARROW_SCSI) ? 40 : 80;
+ break;
+ case 0x05:
+ pSyncStr->ActualMegaXferRate = (j & NARROW_SCSI) ? 33 : 66;
+ break;
+ case 0x06:
+ pSyncStr->ActualMegaXferRate = (j & NARROW_SCSI) ? 28 : 56;
+ break;
+ case 0x07:
+ pSyncStr->ActualMegaXferRate = (j & NARROW_SCSI) ? 25 : 50;
+ break;
+ }
+ pSyncStr->NegotiatedOffset = j & 0x0f;
+
+ return(0);
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: SetDevSyncRate
+ *
+ *---------------------------------------------------------------------*/
+STATIC int SetDevSyncRate(PSCCBcard pCurrCard, PUCB p_ucb)
+{
+ struct _SYNC_RATE_INFO * pSyncStr;
+ PSCCBMgr_tar_info currTar_Info;
+ BASE_PORT ioPort;
+ u08bits scsiID, i, j, syncVal;
+ u16bits syncOffset, actualXferRate;
+ union {
+ u08bits tempb[2];
+ u16bits tempw;
+ }temp2;
+
+#if (FW_TYPE != _SCCB_MGR_)
+ if( p_ucb->UCB_targid >= pCurrCard->cardInfo->ai_MaxTarg )
+ {
+ return(1);
+ }
+#endif
+
+ ioPort = pCurrCard->ioPort;
+ pSyncStr = (struct _SYNC_RATE_INFO *) p_ucb->UCB_virt_dataptr;
+ scsiID = (u08bits) p_ucb->UCB_targid;
+ currTar_Info = &sccbMgrTbl[pCurrCard->cardIndex][scsiID];
+ i = RD_HARPOON(ioPort+hp_xfer_pad); /* Save current value */
+ WR_HARPOON(ioPort+hp_xfer_pad, (i | ID_UNLOCK));
+ WR_HARPOON(ioPort+hp_select_id, ((scsiID << 4) | scsiID));
+ j = RD_HARPOON(ioPort+hp_synctarg_0);
+ WR_HARPOON(ioPort+hp_xfer_pad, i); /* restore value */
+
+ actualXferRate = pSyncStr->ActualMegaXferRate;
+ if(!(j & NARROW_SCSI))
+ {
+ actualXferRate <<= 1;
+ }
+ if(actualXferRate == 0x00)
+ {
+ syncVal = EE_SYNC_ASYNC; /* Async Mode */
+ }
+ if(actualXferRate == 0x0200)
+ {
+ syncVal = EE_SYNC_20MB; /* 20/40 MB Mode */
+ }
+ if(actualXferRate > 0x0050 && actualXferRate < 0x0200 )
+ {
+ syncVal = EE_SYNC_10MB; /* 10/20 MB Mode */
+ }
+ else
+ {
+ syncVal = EE_SYNC_5MB; /* 5/10 MB Mode */
+ }
+ if(currTar_Info->TarEEValue && EE_SYNC_MASK == syncVal)
+ return(0);
+ currTar_Info->TarEEValue = (!(EE_SYNC_MASK & currTar_Info->TarEEValue))
+ | syncVal;
+ syncOffset = (SYNC_RATE_TBL + scsiID) / 2;
+ temp2.tempw = utilEERead(ioPort, syncOffset);
+ if(scsiID & 0x01)
+ {
+ temp2.tempb[0] = (!(EE_SYNC_MASK & temp2.tempb[0])) | syncVal;
+ }
+ else
+ {
+ temp2.tempb[1] = (!(EE_SYNC_MASK & temp2.tempb[1])) | syncVal;
+ }
+ utilEEWriteOnOff(ioPort, 1);
+ utilEEWrite(ioPort, temp2.tempw, syncOffset);
+ utilEEWriteOnOff(ioPort, 0);
+ UpdateCheckSum(ioPort);
+
+ return(0);
+}
+/*---------------------------------------------------------------------
+ *
+ * Function: GetDevWideMode
+ *
+ *---------------------------------------------------------------------*/
+int GetDevWideMode(PSCCBcard pCurrCard,PUCB p_ucb)
+{
+ u08bits *pData;
+
+ pData = (u08bits *)p_ucb->UCB_virt_dataptr;
+ if(sccbMgrTbl[pCurrCard->cardIndex][p_ucb->UCB_targid].TarEEValue
+ & EE_WIDE_SCSI)
+ {
+ *pData = 1;
+ }
+ else
+ {
+ *pData = 0;
+ }
+
+ return(0);
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: SetDevWideMode
+ *
+ *---------------------------------------------------------------------*/
+int SetDevWideMode(PSCCBcard pCurrCard,PUCB p_ucb)
+{
+ u08bits *pData;
+ PSCCBMgr_tar_info currTar_Info;
+ BASE_PORT ioPort;
+ u08bits scsiID, scsiWideMode;
+ u16bits syncOffset;
+ union {
+ u08bits tempb[2];
+ u16bits tempw;
+ }temp2;
+
+#if (FW_TYPE != _SCCB_MGR_)
+ if( !(pCurrCard->cardInfo->ai_attributes & WIDE_CAPABLE) )
+ {
+ return(1);
+ }
+
+ if( p_ucb->UCB_targid >= pCurrCard->cardInfo->ai_MaxTarg )
+ {
+ return(1);
+ }
+#endif
+
+ ioPort = pCurrCard->ioPort;
+ pData = (u08bits *)p_ucb->UCB_virt_dataptr;
+ scsiID = (u08bits) p_ucb->UCB_targid;
+ currTar_Info = &sccbMgrTbl[pCurrCard->cardIndex][scsiID];
+
+ if(*pData)
+ {
+ if(currTar_Info->TarEEValue & EE_WIDE_SCSI)
+ {
+ return(0);
+ }
+ else
+ {
+ scsiWideMode = EE_WIDE_SCSI;
+ }
+ }
+ else
+ {
+ if(!(currTar_Info->TarEEValue & EE_WIDE_SCSI))
+ {
+ return(0);
+ }
+ else
+ {
+ scsiWideMode = 0;
+ }
+ }
+ currTar_Info->TarEEValue = (!(EE_WIDE_SCSI & currTar_Info->TarEEValue))
+ | scsiWideMode;
+
+ syncOffset = (SYNC_RATE_TBL + scsiID) / 2;
+ temp2.tempw = utilEERead(ioPort, syncOffset);
+ if(scsiID & 0x01)
+ {
+ temp2.tempb[0] = (!(EE_WIDE_SCSI & temp2.tempb[0])) | scsiWideMode;
+ }
+ else
+ {
+ temp2.tempb[1] = (!(EE_WIDE_SCSI & temp2.tempb[1])) | scsiWideMode;
+ }
+ utilEEWriteOnOff(ioPort, 1);
+ utilEEWrite(ioPort, temp2.tempw, syncOffset);
+ utilEEWriteOnOff(ioPort, 0);
+ UpdateCheckSum(ioPort);
+
+ return(0);
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: ReadNVRam
+ *
+ *---------------------------------------------------------------------*/
+void ReadNVRam(PSCCBcard pCurrCard,PUCB p_ucb)
+{
+ u08bits *pdata;
+ u16bits i,numwrds,numbytes,offset,temp;
+ u08bits OneMore = FALSE;
+#if defined(DOS)
+ u16bits ioport;
+#else
+ u32bits ioport;
+#endif
+
+ numbytes = (u16bits) p_ucb->UCB_datalen;
+ ioport = pCurrCard->ioPort;
+ pdata = (u08bits *) p_ucb->UCB_virt_dataptr;
+ offset = (u16bits) (p_ucb->UCB_IOCTLParams[0]);
+
+
+
+ if (offset & 0x1)
+ {
+ *((u16bits*) pdata) = utilEERead(ioport,(u16bits)((offset - 1) / 2)); /* 16 bit read */
+ *pdata = *(pdata + 1);
+ ++offset;
+ ++pdata;
+ --numbytes;
+ }
+
+ numwrds = numbytes / 2;
+ if (numbytes & 1)
+ OneMore = TRUE;
+
+ for (i = 0; i < numwrds; i++)
+ {
+ *((u16bits*) pdata) = utilEERead(ioport,(u16bits)(offset / 2));
+ pdata += 2;
+ offset += 2;
+ }
+ if (OneMore)
+ {
+ --pdata;
+ -- offset;
+ temp = utilEERead(ioport,(u16bits)(offset / 2));
+ *pdata = (u08bits) (temp);
+ }
+
+} /* end proc ReadNVRam */
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: WriteNVRam
+ *
+ *---------------------------------------------------------------------*/
+void WriteNVRam(PSCCBcard pCurrCard,PUCB p_ucb)
+{
+ u08bits *pdata;
+ u16bits i,numwrds,numbytes,offset, eeprom_end;
+ u08bits OneMore = FALSE;
+ union {
+ u08bits tempb[2];
+ u16bits tempw;
+ } temp2;
+
+#if defined(DOS)
+ u16bits ioport;
+#else
+ u32bits ioport;
+#endif
+
+ numbytes = (u16bits) p_ucb->UCB_datalen;
+ ioport = pCurrCard->ioPort;
+ pdata = (u08bits *) p_ucb->UCB_virt_dataptr;
+ offset = (u16bits) (p_ucb->UCB_IOCTLParams[0]);
+
+ if (RD_HARPOON(ioport+hp_page_ctrl) & NARROW_SCSI_CARD)
+ eeprom_end = 512;
+ else
+ eeprom_end = 768;
+
+ if(offset > eeprom_end)
+ return;
+
+ if((offset + numbytes) > eeprom_end)
+ numbytes = eeprom_end - offset;
+
+ utilEEWriteOnOff(ioport,1); /* Enable write access to the EEPROM */
+
+
+
+ if (offset & 0x1)
+ {
+ temp2.tempw = utilEERead(ioport,(u16bits)((offset - 1) / 2)); /* 16 bit read */
+ temp2.tempb[1] = *pdata;
+ utilEEWrite(ioport, temp2.tempw, (u16bits)((offset -1) / 2));
+ *pdata = *(pdata + 1);
+ ++offset;
+ ++pdata;
+ --numbytes;
+ }
+
+ numwrds = numbytes / 2;
+ if (numbytes & 1)
+ OneMore = TRUE;
+
+ for (i = 0; i < numwrds; i++)
+ {
+ utilEEWrite(ioport, *((pu16bits)pdata),(u16bits)(offset / 2));
+ pdata += 2;
+ offset += 2;
+ }
+ if (OneMore)
+ {
+
+ temp2.tempw = utilEERead(ioport,(u16bits)(offset / 2));
+ temp2.tempb[0] = *pdata;
+ utilEEWrite(ioport, temp2.tempw, (u16bits)(offset / 2));
+ }
+ utilEEWriteOnOff(ioport,0); /* Turn off write access */
+ UpdateCheckSum((u32bits)ioport);
+
+} /* end proc WriteNVRam */
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: UpdateCheckSum
+ *
+ * Description: Update Check Sum in EEPROM
+ *
+ *---------------------------------------------------------------------*/
+
+
+void UpdateCheckSum(u32bits baseport)
+{
+ USHORT i,sum_data, eeprom_end;
+
+ sum_data = 0x0000;
+
+
+ if (RD_HARPOON(baseport+hp_page_ctrl) & NARROW_SCSI_CARD)
+ eeprom_end = 512;
+ else
+ eeprom_end = 768;
+
+ for (i = 1; i < eeprom_end/2; i++)
+ {
+ sum_data += utilEERead(baseport, i);
+ }
+
+ utilEEWriteOnOff(baseport,1); /* Enable write access to the EEPROM */
+
+ utilEEWrite(baseport, sum_data, EEPROM_CHECK_SUM/2);
+ utilEEWriteOnOff(baseport,0); /* Turn off write access */
+}
+
+void SccbMgr_save_foreign_state(PADAPTER_INFO pAdapterInfo)
+{
+}
+
+
+void SccbMgr_restore_foreign_state(CARD_HANDLE pCurrCard)
+{
+}
+
+void SccbMgr_restore_native_state(CARD_HANDLE pCurrCard)
+{
+}
+
+#endif /* NO_IOCTLS */
+
+#endif /* (FW_TYPE==_UCB_MGR_) */
+
+#ifndef NO_IOCTLS
+#if (FW_TYPE==_UCB_MGR_)
+void SccbMgr_unload_card(CARD_HANDLE pCurrCard)
+#else
+#if defined(DOS)
+void SccbMgr_unload_card(USHORT pCurrCard)
+#else
+void SccbMgr_unload_card(ULONG pCurrCard)
+#endif
+#endif
+{
+ UCHAR i;
+#if defined(DOS)
+ USHORT portBase;
+ USHORT regOffset;
+#else
+ ULONG portBase;
+ ULONG regOffset;
+#endif
+ ULONG scamData;
+#if defined(OS2)
+ ULONG far *pScamTbl;
+#else
+ ULONG *pScamTbl;
+#endif
+ PNVRamInfo pCurrNvRam;
+
+ pCurrNvRam = ((PSCCBcard)pCurrCard)->pNvRamInfo;
+
+ if(pCurrNvRam){
+ WrStack(pCurrNvRam->niBaseAddr, 0, pCurrNvRam->niModel);
+ WrStack(pCurrNvRam->niBaseAddr, 1, pCurrNvRam->niSysConf);
+ WrStack(pCurrNvRam->niBaseAddr, 2, pCurrNvRam->niScsiConf);
+ WrStack(pCurrNvRam->niBaseAddr, 3, pCurrNvRam->niScamConf);
+ WrStack(pCurrNvRam->niBaseAddr, 4, pCurrNvRam->niAdapId);
+
+ for(i = 0; i < MAX_SCSI_TAR / 2; i++)
+ WrStack(pCurrNvRam->niBaseAddr, (UCHAR)(i+5), pCurrNvRam->niSyncTbl[i]);
+
+ portBase = pCurrNvRam->niBaseAddr;
+
+ for(i = 0; i < MAX_SCSI_TAR; i++){
+ regOffset = hp_aramBase + 64 + i*4;
+#if defined(OS2)
+ pScamTbl = (ULONG far *) &pCurrNvRam->niScamTbl[i];
+#else
+ pScamTbl = (ULONG *) &pCurrNvRam->niScamTbl[i];
+#endif
+ scamData = *pScamTbl;
+ WR_HARP32(portBase, regOffset, scamData);
+ }
+
+ }else{
+ WrStack(((PSCCBcard)pCurrCard)->ioPort, 0, 0);
+ }
+}
+#endif /* NO_IOCTLS */
+
+
+void RNVRamData(PNVRamInfo pNvRamInfo)
+{
+ UCHAR i;
+#if defined(DOS)
+ USHORT portBase;
+ USHORT regOffset;
+#else
+ ULONG portBase;
+ ULONG regOffset;
+#endif
+ ULONG scamData;
+#if defined (OS2)
+ ULONG far *pScamTbl;
+#else
+ ULONG *pScamTbl;
+#endif
+
+ pNvRamInfo->niModel = RdStack(pNvRamInfo->niBaseAddr, 0);
+ pNvRamInfo->niSysConf = RdStack(pNvRamInfo->niBaseAddr, 1);
+ pNvRamInfo->niScsiConf = RdStack(pNvRamInfo->niBaseAddr, 2);
+ pNvRamInfo->niScamConf = RdStack(pNvRamInfo->niBaseAddr, 3);
+ pNvRamInfo->niAdapId = RdStack(pNvRamInfo->niBaseAddr, 4);
+
+ for(i = 0; i < MAX_SCSI_TAR / 2; i++)
+ pNvRamInfo->niSyncTbl[i] = RdStack(pNvRamInfo->niBaseAddr, (UCHAR)(i+5));
+
+ portBase = pNvRamInfo->niBaseAddr;
+
+ for(i = 0; i < MAX_SCSI_TAR; i++){
+ regOffset = hp_aramBase + 64 + i*4;
+ RD_HARP32(portBase, regOffset, scamData);
+#if defined(OS2)
+ pScamTbl = (ULONG far *) &pNvRamInfo->niScamTbl[i];
+#else
+ pScamTbl = (ULONG *) &pNvRamInfo->niScamTbl[i];
+#endif
+ *pScamTbl = scamData;
+ }
+
+}
+
+#if defined(DOS)
+UCHAR RdStack(USHORT portBase, UCHAR index)
+#else
+UCHAR RdStack(ULONG portBase, UCHAR index)
+#endif
+{
+ WR_HARPOON(portBase + hp_stack_addr, index);
+ return(RD_HARPOON(portBase + hp_stack_data));
+}
+
+#if defined(DOS)
+void WrStack(USHORT portBase, UCHAR index, UCHAR data)
+#else
+void WrStack(ULONG portBase, UCHAR index, UCHAR data)
+#endif
+{
+ WR_HARPOON(portBase + hp_stack_addr, index);
+ WR_HARPOON(portBase + hp_stack_data, data);
+}
+
+
+#if (FW_TYPE==_UCB_MGR_)
+u08bits ChkIfChipInitialized(BASE_PORT ioPort)
+#else
+#if defined(DOS)
+UCHAR ChkIfChipInitialized(USHORT ioPort)
+#else
+UCHAR ChkIfChipInitialized(ULONG ioPort)
+#endif
+#endif
+{
+ if((RD_HARPOON(ioPort + hp_arb_id) & 0x0f) != RdStack(ioPort, 4))
+ return(FALSE);
+ if((RD_HARPOON(ioPort + hp_clkctrl_0) & CLKCTRL_DEFAULT)
+ != CLKCTRL_DEFAULT)
+ return(FALSE);
+ if((RD_HARPOON(ioPort + hp_seltimeout) == TO_250ms) ||
+ (RD_HARPOON(ioPort + hp_seltimeout) == TO_290ms))
+ return(TRUE);
+ return(FALSE);
+
+}
+/*---------------------------------------------------------------------
+ *
+ * Function: SccbMgr_start_sccb
+ *
+ * Description: Start a command pointed to by p_Sccb. When the
+ * command is completed it will be returned via the
+ * callback function.
+ *
+ *---------------------------------------------------------------------*/
+#if (FW_TYPE==_UCB_MGR_)
+void SccbMgr_start_sccb(CARD_HANDLE pCurrCard, PUCB p_ucb)
+#else
+#if defined(DOS)
+void SccbMgr_start_sccb(USHORT pCurrCard, PSCCB p_Sccb)
+#else
+void SccbMgr_start_sccb(ULONG pCurrCard, PSCCB p_Sccb)
+#endif
+#endif
+{
+#if defined(DOS)
+ USHORT ioport;
+#else
+ ULONG ioport;
+#endif
+ UCHAR thisCard, lun;
+ PSCCB pSaveSccb;
+ CALL_BK_FN callback;
+
+#if (FW_TYPE==_UCB_MGR_)
+ PSCCB p_Sccb;
+#endif
+
+ mOS_Lock((PSCCBcard)pCurrCard);
+ thisCard = ((PSCCBcard) pCurrCard)->cardIndex;
+ ioport = ((PSCCBcard) pCurrCard)->ioPort;
+
+#if (FW_TYPE==_UCB_MGR_)
+ p_Sccb = (PSCCB)p_ucb->UCB_MgrPrivatePtr;
+#endif
+
+ if((p_Sccb->TargID > MAX_SCSI_TAR) || (p_Sccb->Lun > MAX_LUN))
+ {
+
+#if (FW_TYPE==_UCB_MGR_)
+ p_ucb->UCB_hbastat = SCCB_COMPLETE;
+ p_ucb->UCB_status=SCCB_ERROR;
+ callback = (CALL_BK_FN)p_ucb->UCB_callback;
+ if (callback)
+ callback(p_ucb);
+#endif
+
+#if (FW_TYPE==_SCCB_MGR_)
+ p_Sccb->HostStatus = SCCB_COMPLETE;
+ p_Sccb->SccbStatus = SCCB_ERROR;
+ callback = (CALL_BK_FN)p_Sccb->SccbCallback;
+ if (callback)
+ callback(p_Sccb);
+#endif
+
+ mOS_UnLock((PSCCBcard)pCurrCard);
+ return;
+ }
+
+#if (FW_TYPE==_SCCB_MGR_)
+ sinits(p_Sccb,thisCard);
+#endif
+
+
+#if (FW_TYPE==_UCB_MGR_)
+#ifndef NO_IOCTLS
+
+ if (p_ucb->UCB_opcode & OPC_IOCTL)
+ {
+
+ switch (p_ucb->UCB_IOCTLCommand)
+ {
+ case READ_NVRAM:
+ ReadNVRam((PSCCBcard)pCurrCard,p_ucb);
+ p_ucb->UCB_status=UCB_SUCCESS;
+ callback = (CALL_BK_FN)p_ucb->UCB_callback;
+ if (callback)
+ callback(p_ucb);
+ mOS_UnLock((PSCCBcard)pCurrCard);
+ return;
+
+ case WRITE_NVRAM:
+ WriteNVRam((PSCCBcard)pCurrCard,p_ucb);
+ p_ucb->UCB_status=UCB_SUCCESS;
+ callback = (CALL_BK_FN)p_ucb->UCB_callback;
+ if (callback)
+ callback(p_ucb);
+ mOS_UnLock((PSCCBcard)pCurrCard);
+ return;
+
+ case SEND_SCSI_PASSTHRU:
+#if (FW_TYPE != _SCCB_MGR_)
+ if( p_ucb->UCB_targid >=
+ ((PSCCBcard)pCurrCard)->cardInfo->ai_MaxTarg )
+ {
+ p_ucb->UCB_status = UCB_ERROR;
+ p_ucb->UCB_hbastat = HASTAT_HW_ERROR;
+ callback = (CALL_BK_FN)p_ucb->UCB_callback;
+ if (callback)
+ callback(p_ucb);
+ mOS_UnLock((PSCCBcard)pCurrCard);
+ return;
+ }
+#endif
+ break;
+
+ case HARD_RESET:
+ p_ucb->UCB_status = UCB_INVALID;
+ callback = (CALL_BK_FN)p_ucb->UCB_callback;
+ if (callback)
+ callback(p_ucb);
+ mOS_UnLock((PSCCBcard)pCurrCard);
+ return;
+ case GET_DEVICE_SYNCRATE:
+ if( !GetDevSyncRate((PSCCBcard)pCurrCard,p_ucb) )
+ {
+ p_ucb->UCB_status = UCB_SUCCESS;
+ }
+ else
+ {
+ p_ucb->UCB_status = UCB_ERROR;
+ p_ucb->UCB_hbastat = HASTAT_HW_ERROR;
+ }
+ callback = (CALL_BK_FN)p_ucb->UCB_callback;
+ if (callback)
+ callback(p_ucb);
+ mOS_UnLock((PSCCBcard)pCurrCard);
+ return;
+ case SET_DEVICE_SYNCRATE:
+ if( !SetDevSyncRate((PSCCBcard)pCurrCard,p_ucb) )
+ {
+ p_ucb->UCB_status = UCB_SUCCESS;
+ }
+ else
+ {
+ p_ucb->UCB_status = UCB_ERROR;
+ p_ucb->UCB_hbastat = HASTAT_HW_ERROR;
+ }
+ callback = (CALL_BK_FN)p_ucb->UCB_callback;
+ if (callback)
+ callback(p_ucb);
+ mOS_UnLock((PSCCBcard)pCurrCard);
+ return;
+ case GET_WIDE_MODE:
+ if( !GetDevWideMode((PSCCBcard)pCurrCard,p_ucb) )
+ {
+ p_ucb->UCB_status = UCB_SUCCESS;
+ }
+ else
+ {
+ p_ucb->UCB_status = UCB_ERROR;
+ p_ucb->UCB_hbastat = HASTAT_HW_ERROR;
+ }
+ callback = (CALL_BK_FN)p_ucb->UCB_callback;
+ if (callback)
+ callback(p_ucb);
+ mOS_UnLock((PSCCBcard)pCurrCard);
+ return;
+ case SET_WIDE_MODE:
+ if( !SetDevWideMode((PSCCBcard)pCurrCard,p_ucb) )
+ {
+ p_ucb->UCB_status = UCB_SUCCESS;
+ }
+ else
+ {
+ p_ucb->UCB_status = UCB_ERROR;
+ p_ucb->UCB_hbastat = HASTAT_HW_ERROR;
+ }
+ callback = (CALL_BK_FN)p_ucb->UCB_callback;
+ if (callback)
+ callback(p_ucb);
+ mOS_UnLock((PSCCBcard)pCurrCard);
+ return;
+ default:
+ p_ucb->UCB_status=UCB_INVALID;
+ callback = (CALL_BK_FN)p_ucb->UCB_callback;
+ if (callback)
+ callback(p_ucb);
+ mOS_UnLock((PSCCBcard)pCurrCard);
+ return;
+ }
+ }
+#endif /* NO_IOCTLS */
+#endif /* (FW_TYPE==_UCB_MGR_) */
+
+
+ if (!((PSCCBcard) pCurrCard)->cmdCounter)
+ {
+ WR_HARPOON(ioport+hp_semaphore, (RD_HARPOON(ioport+hp_semaphore)
+ | SCCB_MGR_ACTIVE));
+
+ if (((PSCCBcard) pCurrCard)->globalFlags & F_GREEN_PC)
+ {
+ WR_HARPOON(ioport+hp_clkctrl_0, CLKCTRL_DEFAULT);
+ WR_HARPOON(ioport+hp_sys_ctrl, 0x00);
+ }
+ }
+
+ ((PSCCBcard)pCurrCard)->cmdCounter++;
+
+ if (RD_HARPOON(ioport+hp_semaphore) & BIOS_IN_USE) {
+
+ WR_HARPOON(ioport+hp_semaphore, (RD_HARPOON(ioport+hp_semaphore)
+ | TICKLE_ME));
+ if(p_Sccb->OperationCode == RESET_COMMAND)
+ {
+ pSaveSccb = ((PSCCBcard) pCurrCard)->currentSCCB;
+ ((PSCCBcard) pCurrCard)->currentSCCB = p_Sccb;
+ queueSelectFail(&BL_Card[thisCard], thisCard);
+ ((PSCCBcard) pCurrCard)->currentSCCB = pSaveSccb;
+ }
+ else
+ {
+ queueAddSccb(p_Sccb,thisCard);
+ }
+ }
+
+ else if ((RD_HARPOON(ioport+hp_page_ctrl) & G_INT_DISABLE)) {
+
+ if(p_Sccb->OperationCode == RESET_COMMAND)
+ {
+ pSaveSccb = ((PSCCBcard) pCurrCard)->currentSCCB;
+ ((PSCCBcard) pCurrCard)->currentSCCB = p_Sccb;
+ queueSelectFail(&BL_Card[thisCard], thisCard);
+ ((PSCCBcard) pCurrCard)->currentSCCB = pSaveSccb;
+ }
+ else
+ {
+ queueAddSccb(p_Sccb,thisCard);
+ }
+ }
+
+ else {
+
+ MDISABLE_INT(ioport);
+
+ if((((PSCCBcard) pCurrCard)->globalFlags & F_CONLUN_IO) &&
+ ((sccbMgrTbl[thisCard][p_Sccb->TargID].TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))
+ lun = p_Sccb->Lun;
+ else
+ lun = 0;
+ if ((((PSCCBcard) pCurrCard)->currentSCCB == NULL) &&
+ (sccbMgrTbl[thisCard][p_Sccb->TargID].TarSelQ_Cnt == 0) &&
+ (sccbMgrTbl[thisCard][p_Sccb->TargID].TarLUNBusy[lun]
+ == FALSE)) {
+
+ ((PSCCBcard) pCurrCard)->currentSCCB = p_Sccb;
+ mOS_UnLock((PSCCBcard)pCurrCard);
+#if defined(DOS)
+ ssel((USHORT)p_Sccb->SccbIOPort,thisCard);
+#else
+ ssel(p_Sccb->SccbIOPort,thisCard);
+#endif
+ mOS_Lock((PSCCBcard)pCurrCard);
+ }
+
+ else {
+
+ if(p_Sccb->OperationCode == RESET_COMMAND)
+ {
+ pSaveSccb = ((PSCCBcard) pCurrCard)->currentSCCB;
+ ((PSCCBcard) pCurrCard)->currentSCCB = p_Sccb;
+ queueSelectFail(&BL_Card[thisCard], thisCard);
+ ((PSCCBcard) pCurrCard)->currentSCCB = pSaveSccb;
+ }
+ else
+ {
+ queueAddSccb(p_Sccb,thisCard);
+ }
+ }
+
+
+ MENABLE_INT(ioport);
+ }
+
+ mOS_UnLock((PSCCBcard)pCurrCard);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: SccbMgr_abort_sccb
+ *
+ * Description: Abort the command pointed to by p_Sccb. When the
+ * command is completed it will be returned via the
+ * callback function.
+ *
+ *---------------------------------------------------------------------*/
+#if (FW_TYPE==_UCB_MGR_)
+s32bits SccbMgr_abort_sccb(CARD_HANDLE pCurrCard, PUCB p_ucb)
+#else
+#if defined(DOS)
+int SccbMgr_abort_sccb(USHORT pCurrCard, PSCCB p_Sccb)
+#else
+int SccbMgr_abort_sccb(ULONG pCurrCard, PSCCB p_Sccb)
+#endif
+#endif
+
+{
+#if defined(DOS)
+ USHORT ioport;
+#else
+ ULONG ioport;
+#endif
+
+ UCHAR thisCard;
+ CALL_BK_FN callback;
+ UCHAR TID;
+ PSCCB pSaveSCCB;
+ PSCCBMgr_tar_info currTar_Info;
+
+
+#if (FW_TYPE==_UCB_MGR_)
+ PSCCB p_Sccb;
+ p_Sccb=(PSCCB)p_ucb->UCB_MgrPrivatePtr;
+#endif
+
+ ioport = ((PSCCBcard) pCurrCard)->ioPort;
+
+ thisCard = ((PSCCBcard)pCurrCard)->cardIndex;
+
+ mOS_Lock((PSCCBcard)pCurrCard);
+
+ if (RD_HARPOON(ioport+hp_page_ctrl) & G_INT_DISABLE)
+ {
+ mOS_UnLock((PSCCBcard)pCurrCard);
+ }
+
+ else
+ {
+
+ if (queueFindSccb(p_Sccb,thisCard))
+ {
+
+ mOS_UnLock((PSCCBcard)pCurrCard);
+
+ ((PSCCBcard)pCurrCard)->cmdCounter--;
+
+ if (!((PSCCBcard)pCurrCard)->cmdCounter)
+ WR_HARPOON(ioport+hp_semaphore,(RD_HARPOON(ioport+hp_semaphore)
+ & (UCHAR)(~(SCCB_MGR_ACTIVE | TICKLE_ME)) ));
+
+#if (FW_TYPE==_SCCB_MGR_)
+ p_Sccb->SccbStatus = SCCB_ABORT;
+ callback = p_Sccb->SccbCallback;
+ callback(p_Sccb);
+#else
+ p_ucb->UCB_status=SCCB_ABORT;
+ callback = (CALL_BK_FN)p_ucb->UCB_callback;
+ callback(p_ucb);
+#endif
+
+ return(0);
+ }
+
+ else
+ {
+ mOS_UnLock((PSCCBcard)pCurrCard);
+
+ if (((PSCCBcard)pCurrCard)->currentSCCB == p_Sccb)
+ {
+ p_Sccb->SccbStatus = SCCB_ABORT;
+ return(0);
+
+ }
+
+ else
+ {
+
+ TID = p_Sccb->TargID;
+
+
+ if(p_Sccb->Sccb_tag)
+ {
+ MDISABLE_INT(ioport);
+ if (((PSCCBcard) pCurrCard)->discQ_Tbl[p_Sccb->Sccb_tag]==p_Sccb)
+ {
+ p_Sccb->SccbStatus = SCCB_ABORT;
+ p_Sccb->Sccb_scsistat = ABORT_ST;
+#if (FW_TYPE==_UCB_MGR_)
+ p_ucb->UCB_status=SCCB_ABORT;
+#endif
+ p_Sccb->Sccb_scsimsg = SMABORT_TAG;
+
+ if(((PSCCBcard) pCurrCard)->currentSCCB == NULL)
+ {
+ ((PSCCBcard) pCurrCard)->currentSCCB = p_Sccb;
+ ssel(ioport, thisCard);
+ }
+ else
+ {
+ pSaveSCCB = ((PSCCBcard) pCurrCard)->currentSCCB;
+ ((PSCCBcard) pCurrCard)->currentSCCB = p_Sccb;
+ queueSelectFail((PSCCBcard) pCurrCard, thisCard);
+ ((PSCCBcard) pCurrCard)->currentSCCB = pSaveSCCB;
+ }
+ }
+ MENABLE_INT(ioport);
+ return(0);
+ }
+ else
+ {
+ currTar_Info = &sccbMgrTbl[thisCard][p_Sccb->TargID];
+
+ if(BL_Card[thisCard].discQ_Tbl[currTar_Info->LunDiscQ_Idx[p_Sccb->Lun]]
+ == p_Sccb)
+ {
+ p_Sccb->SccbStatus = SCCB_ABORT;
+ return(0);
+ }
+ }
+ }
+ }
+ }
+ return(-1);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: SccbMgr_my_int
+ *
+ * Description: Do a quick check to determine if there is a pending
+ * interrupt for this card and disable the IRQ Pin if so.
+ *
+ *---------------------------------------------------------------------*/
+#if (FW_TYPE==_UCB_MGR_)
+u08bits SccbMgr_my_int(CARD_HANDLE pCurrCard)
+#else
+#if defined(DOS)
+UCHAR SccbMgr_my_int(USHORT pCurrCard)
+#else
+UCHAR SccbMgr_my_int(ULONG pCurrCard)
+#endif
+#endif
+{
+#if defined(DOS)
+ USHORT ioport;
+#else
+ ULONG ioport;
+#endif
+
+ ioport = ((PSCCBcard)pCurrCard)->ioPort;
+
+ if (RD_HARPOON(ioport+hp_int_status) & INT_ASSERTED)
+ {
+
+#if defined(DOS)
+ MDISABLE_INT(ioport);
+#endif
+
+ return(TRUE);
+ }
+
+ else
+
+ return(FALSE);
+}
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: SccbMgr_isr
+ *
+ * Description: This is our entry point when an interrupt is generated
+ * by the card and the upper level driver passes it on to
+ * us.
+ *
+ *---------------------------------------------------------------------*/
+#if (FW_TYPE==_UCB_MGR_)
+s32bits SccbMgr_isr(CARD_HANDLE pCurrCard)
+#else
+#if defined(DOS)
+int SccbMgr_isr(USHORT pCurrCard)
+#else
+int SccbMgr_isr(ULONG pCurrCard)
+#endif
+#endif
+{
+ PSCCB currSCCB;
+ UCHAR thisCard,result,bm_status, bm_int_st;
+ USHORT hp_int;
+ UCHAR i, target;
+#if defined(DOS)
+ USHORT ioport;
+#else
+ ULONG ioport;
+#endif
+
+ mOS_Lock((PSCCBcard)pCurrCard);
+
+ thisCard = ((PSCCBcard)pCurrCard)->cardIndex;
+ ioport = ((PSCCBcard)pCurrCard)->ioPort;
+
+ MDISABLE_INT(ioport);
+
+#if defined(BUGBUG)
+ WR_HARPOON(ioport+hp_user_defined_D, RD_HARPOON(ioport+hp_int_status));
+#endif
+
+ if ((bm_int_st=RD_HARPOON(ioport+hp_int_status)) & EXT_STATUS_ON)
+ bm_status = RD_HARPOON(ioport+hp_ext_status) & (UCHAR)BAD_EXT_STATUS;
+ else
+ bm_status = 0;
+
+ WR_HARPOON(ioport+hp_int_mask, (INT_CMD_COMPL | SCSI_INTERRUPT));
+
+ mOS_UnLock((PSCCBcard)pCurrCard);
+
+ while ((hp_int = RDW_HARPOON((ioport+hp_intstat)) & default_intena) |
+ bm_status)
+ {
+
+ currSCCB = ((PSCCBcard)pCurrCard)->currentSCCB;
+
+#if defined(BUGBUG)
+ Debug_Load(thisCard,(UCHAR) 0XFF);
+ Debug_Load(thisCard,bm_int_st);
+
+ Debug_Load(thisCard,hp_int_0);
+ Debug_Load(thisCard,hp_int_1);
+#endif
+
+
+ if (hp_int & (FIFO | TIMEOUT | RESET | SCAM_SEL) || bm_status) {
+ result = SccbMgr_bad_isr(ioport,thisCard,((PSCCBcard)pCurrCard),hp_int);
+ WRW_HARPOON((ioport+hp_intstat), (FIFO | TIMEOUT | RESET | SCAM_SEL));
+ bm_status = 0;
+
+ if (result) {
+
+ mOS_Lock((PSCCBcard)pCurrCard);
+ MENABLE_INT(ioport);
+ mOS_UnLock((PSCCBcard)pCurrCard);
+ return(result);
+ }
+ }
+
+
+ else if (hp_int & ICMD_COMP) {
+
+ if ( !(hp_int & BUS_FREE) ) {
+ /* Wait for the BusFree before starting a new command. We
+ must also check for being reselected since the BusFree
+ may not show up if another device reselects us in 1.5us or
+ less. SRR Wednesday, 3/8/1995.
+ */
+ while (!(RDW_HARPOON((ioport+hp_intstat)) & (BUS_FREE | RSEL))) ;
+ }
+
+ if (((PSCCBcard)pCurrCard)->globalFlags & F_HOST_XFER_ACT)
+
+ phaseChkFifo(ioport, thisCard);
+
+/* WRW_HARPOON((ioport+hp_intstat),
+ (BUS_FREE | ICMD_COMP | ITAR_DISC | XFER_CNT_0));
+ */
+
+ WRW_HARPOON((ioport+hp_intstat), CLR_ALL_INT_1);
+
+ autoCmdCmplt(ioport,thisCard);
+
+ }
+
+
+ else if (hp_int & ITAR_DISC)
+ {
+
+ if (((PSCCBcard)pCurrCard)->globalFlags & F_HOST_XFER_ACT) {
+
+ phaseChkFifo(ioport, thisCard);
+
+ }
+
+ if (RD_HARPOON(ioport+hp_gp_reg_1) == SMSAVE_DATA_PTR) {
+
+ WR_HARPOON(ioport+hp_gp_reg_1, 0x00);
+ currSCCB->Sccb_XferState |= F_NO_DATA_YET;
+
+ currSCCB->Sccb_savedATC = currSCCB->Sccb_ATC;
+ }
+
+ currSCCB->Sccb_scsistat = DISCONNECT_ST;
+ queueDisconnect(currSCCB,thisCard);
+
+ /* Wait for the BusFree before starting a new command. We
+ must also check for being reselected since the BusFree
+ may not show up if another device reselects us in 1.5us or
+ less. SRR Wednesday, 3/8/1995.
+ */
+ while (!(RDW_HARPOON((ioport+hp_intstat)) & (BUS_FREE | RSEL)) &&
+ !((RDW_HARPOON((ioport+hp_intstat)) & PHASE) &&
+ RD_HARPOON((ioport+hp_scsisig)) ==
+ (SCSI_BSY | SCSI_REQ | SCSI_CD | SCSI_MSG | SCSI_IOBIT))) ;
+
+ /*
+ The additional loop exit condition above detects a timing problem
+ with the revision D/E harpoon chips. The caller should reset the
+ host adapter to recover when 0xFE is returned.
+ */
+ if (!(RDW_HARPOON((ioport+hp_intstat)) & (BUS_FREE | RSEL)))
+ {
+ mOS_Lock((PSCCBcard)pCurrCard);
+ MENABLE_INT(ioport);
+ mOS_UnLock((PSCCBcard)pCurrCard);
+ return 0xFE;
+ }
+
+ WRW_HARPOON((ioport+hp_intstat), (BUS_FREE | ITAR_DISC));
+
+
+ ((PSCCBcard)pCurrCard)->globalFlags |= F_NEW_SCCB_CMD;
+
+ }
+
+
+ else if (hp_int & RSEL) {
+
+ WRW_HARPOON((ioport+hp_intstat), (PROG_HLT | RSEL | PHASE | BUS_FREE));
+
+ if (RDW_HARPOON((ioport+hp_intstat)) & ITAR_DISC)
+ {
+ if (((PSCCBcard)pCurrCard)->globalFlags & F_HOST_XFER_ACT)
+ {
+ phaseChkFifo(ioport, thisCard);
+ }
+
+ if (RD_HARPOON(ioport+hp_gp_reg_1) == SMSAVE_DATA_PTR)
+ {
+ WR_HARPOON(ioport+hp_gp_reg_1, 0x00);
+ currSCCB->Sccb_XferState |= F_NO_DATA_YET;
+ currSCCB->Sccb_savedATC = currSCCB->Sccb_ATC;
+ }
+
+ WRW_HARPOON((ioport+hp_intstat), (BUS_FREE | ITAR_DISC));
+ currSCCB->Sccb_scsistat = DISCONNECT_ST;
+ queueDisconnect(currSCCB,thisCard);
+ }
+
+ sres(ioport,thisCard,((PSCCBcard)pCurrCard));
+ phaseDecode(ioport,thisCard);
+
+ }
+
+
+ else if ((hp_int & IDO_STRT) && (!(hp_int & BUS_FREE)))
+ {
+
+ WRW_HARPOON((ioport+hp_intstat), (IDO_STRT | XFER_CNT_0));
+ phaseDecode(ioport,thisCard);
+
+ }
+
+
+ else if ( (hp_int & IUNKWN) || (hp_int & PROG_HLT) )
+ {
+ WRW_HARPOON((ioport+hp_intstat), (PHASE | IUNKWN | PROG_HLT));
+ if ((RD_HARPOON(ioport+hp_prgmcnt_0) & (UCHAR)0x3f)< (UCHAR)SELCHK)
+ {
+ phaseDecode(ioport,thisCard);
+ }
+ else
+ {
+ /* Harpoon problem some SCSI target device respond to selection
+ with short BUSY pulse (<400ns) this will make the Harpoon is not able
+ to latch the correct Target ID into reg. x53.
+ The work around require to correct this reg. But when write to this
+ reg. (0x53) also increment the FIFO write addr reg (0x6f), thus we
+ need to read this reg first then restore it later. After update to 0x53 */
+
+ i = (UCHAR)(RD_HARPOON(ioport+hp_fifowrite));
+ target = (UCHAR)(RD_HARPOON(ioport+hp_gp_reg_3));
+ WR_HARPOON(ioport+hp_xfer_pad, (UCHAR) ID_UNLOCK);
+ WR_HARPOON(ioport+hp_select_id, (UCHAR)(target | target<<4));
+ WR_HARPOON(ioport+hp_xfer_pad, (UCHAR) 0x00);
+ WR_HARPOON(ioport+hp_fifowrite, i);
+ WR_HARPOON(ioport+hp_autostart_3, (AUTO_IMMED+TAG_STRT));
+ }
+ }
+
+ else if (hp_int & XFER_CNT_0) {
+
+ WRW_HARPOON((ioport+hp_intstat), XFER_CNT_0);
+
+ schkdd(ioport,thisCard);
+
+ }
+
+
+ else if (hp_int & BUS_FREE) {
+
+ WRW_HARPOON((ioport+hp_intstat), BUS_FREE);
+
+ if (((PSCCBcard)pCurrCard)->globalFlags & F_HOST_XFER_ACT) {
+
+ hostDataXferAbort(ioport,thisCard,currSCCB);
+ }
+
+ phaseBusFree(ioport,thisCard);
+ }
+
+
+ else if (hp_int & ITICKLE) {
+
+ WRW_HARPOON((ioport+hp_intstat), ITICKLE);
+ ((PSCCBcard)pCurrCard)->globalFlags |= F_NEW_SCCB_CMD;
+ }
+
+
+
+ if (((PSCCBcard)pCurrCard)->globalFlags & F_NEW_SCCB_CMD) {
+
+
+ ((PSCCBcard)pCurrCard)->globalFlags &= ~F_NEW_SCCB_CMD;
+
+
+ if (((PSCCBcard)pCurrCard)->currentSCCB == NULL) {
+
+ queueSearchSelect(((PSCCBcard)pCurrCard),thisCard);
+ }
+
+ if (((PSCCBcard)pCurrCard)->currentSCCB != NULL) {
+ ((PSCCBcard)pCurrCard)->globalFlags &= ~F_NEW_SCCB_CMD;
+ ssel(ioport,thisCard);
+ }
+
+ break;
+
+ }
+
+ } /*end while */
+
+ mOS_Lock((PSCCBcard)pCurrCard);
+ MENABLE_INT(ioport);
+ mOS_UnLock((PSCCBcard)pCurrCard);
+
+ return(0);
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Sccb_bad_isr
+ *
+ * Description: Some type of interrupt has occurred which is slightly
+ * out of the ordinary. We will now decode it fully, in
+ * this routine. This is broken up in an attempt to save
+ * processing time.
+ *
+ *---------------------------------------------------------------------*/
+#if defined(DOS)
+UCHAR SccbMgr_bad_isr(USHORT p_port, UCHAR p_card, PSCCBcard pCurrCard, USHORT p_int)
+#else
+UCHAR SccbMgr_bad_isr(ULONG p_port, UCHAR p_card, PSCCBcard pCurrCard, USHORT p_int)
+#endif
+{
+#if defined(HARP_REVX)
+ ULONG timer;
+#endif
+UCHAR temp, ScamFlg;
+PSCCBMgr_tar_info currTar_Info;
+PNVRamInfo pCurrNvRam;
+
+
+ if (RD_HARPOON(p_port+hp_ext_status) &
+ (BM_FORCE_OFF | PCI_DEV_TMOUT | BM_PARITY_ERR | PIO_OVERRUN) )
+ {
+
+ if (pCurrCard->globalFlags & F_HOST_XFER_ACT)
+ {
+
+ hostDataXferAbort(p_port,p_card, pCurrCard->currentSCCB);
+ }
+
+ if (RD_HARPOON(p_port+hp_pci_stat_cfg) & REC_MASTER_ABORT)
+
+ {
+ WR_HARPOON(p_port+hp_pci_stat_cfg,
+ (RD_HARPOON(p_port+hp_pci_stat_cfg) & ~REC_MASTER_ABORT));
+
+ WR_HARPOON(p_port+hp_host_blk_cnt, 0x00);
+
+ }
+
+ if (pCurrCard->currentSCCB != NULL)
+ {
+
+ if (!pCurrCard->currentSCCB->HostStatus)
+ pCurrCard->currentSCCB->HostStatus = SCCB_BM_ERR;
+
+ sxfrp(p_port,p_card);
+
+ temp = (UCHAR)(RD_HARPOON(p_port+hp_ee_ctrl) &
+ (EXT_ARB_ACK | SCSI_TERM_ENA_H));
+ WR_HARPOON(p_port+hp_ee_ctrl, ((UCHAR)temp | SEE_MS | SEE_CS));
+ WR_HARPOON(p_port+hp_ee_ctrl, temp);
+
+ if (!(RDW_HARPOON((p_port+hp_intstat)) & (BUS_FREE | RESET)))
+ {
+ phaseDecode(p_port,p_card);
+ }
+ }
+ }
+
+
+ else if (p_int & RESET)
+ {
+
+ WR_HARPOON(p_port+hp_clkctrl_0, CLKCTRL_DEFAULT);
+ WR_HARPOON(p_port+hp_sys_ctrl, 0x00);
+ if (pCurrCard->currentSCCB != NULL) {
+
+ if (pCurrCard->globalFlags & F_HOST_XFER_ACT)
+
+ hostDataXferAbort(p_port,p_card, pCurrCard->currentSCCB);
+ }
+
+
+ DISABLE_AUTO(p_port);
+
+ sresb(p_port,p_card);
+
+ while(RD_HARPOON(p_port+hp_scsictrl_0) & SCSI_RST) {}
+
+ pCurrNvRam = pCurrCard->pNvRamInfo;
+ if(pCurrNvRam){
+ ScamFlg = pCurrNvRam->niScamConf;
+ }
+ else{
+ ScamFlg = (UCHAR) utilEERead(p_port, SCAM_CONFIG/2);
+ }
+
+ XbowInit(p_port, ScamFlg);
+
+ scini(p_card, pCurrCard->ourId, 0);
+
+ return(0xFF);
+ }
+
+
+ else if (p_int & FIFO) {
+
+ WRW_HARPOON((p_port+hp_intstat), FIFO);
+
+#if defined(HARP_REVX)
+
+ for (timer=0x00FFFFFFL; timer != 0x00000000L; timer--) {
+
+ if (RD_HARPOON(p_port+hp_xferstat) & FIFO_EMPTY)
+ break;
+
+ if (RDW_HARPOON((p_port+hp_intstat)) & BUS_FREE)
+ break;
+ }
+
+
+ if ( (RD_HARPOON(p_port+hp_xferstat) & FIFO_EMPTY) &&
+ (RD_HARPOON(p_port+hp_fiforead) !=
+ RD_HARPOON(p_port+hp_fifowrite)) &&
+ (RD_HARPOON(p_port+hp_xfercnt_0))
+ )
+
+ WR_HARPOON((p_port+hp_xferstat), 0x01);
+
+/* else
+ */
+/* sxfrp(p_port,p_card);
+ */
+#else
+ if (pCurrCard->currentSCCB != NULL)
+ sxfrp(p_port,p_card);
+#endif
+ }
+
+ else if (p_int & TIMEOUT)
+ {
+
+ DISABLE_AUTO(p_port);
+
+ WRW_HARPOON((p_port+hp_intstat),
+ (PROG_HLT | TIMEOUT | SEL |BUS_FREE | PHASE | IUNKWN));
+
+ pCurrCard->currentSCCB->HostStatus = SCCB_SELECTION_TIMEOUT;
+
+
+ currTar_Info = &sccbMgrTbl[p_card][pCurrCard->currentSCCB->TargID];
+ if((pCurrCard->globalFlags & F_CONLUN_IO) &&
+ ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))
+ currTar_Info->TarLUNBusy[pCurrCard->currentSCCB->Lun] = FALSE;
+ else
+ currTar_Info->TarLUNBusy[0] = FALSE;
+
+
+ if (currTar_Info->TarEEValue & EE_SYNC_MASK)
+ {
+ currTar_Info->TarSyncCtrl = 0;
+ currTar_Info->TarStatus &= ~TAR_SYNC_MASK;
+ }
+
+ if (currTar_Info->TarEEValue & EE_WIDE_SCSI)
+ {
+ currTar_Info->TarStatus &= ~TAR_WIDE_MASK;
+ }
+
+ sssyncv(p_port, pCurrCard->currentSCCB->TargID, NARROW_SCSI,currTar_Info);
+
+ queueCmdComplete(pCurrCard, pCurrCard->currentSCCB, p_card);
+
+ }
+
+#if defined(SCAM_LEV_2)
+
+ else if (p_int & SCAM_SEL)
+ {
+
+ scarb(p_port,LEVEL2_TAR);
+ scsel(p_port);
+ scasid(p_card, p_port);
+
+ scbusf(p_port);
+
+ WRW_HARPOON((p_port+hp_intstat), SCAM_SEL);
+ }
+#endif
+
+ return(0x00);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: SccbMgr_scsi_reset
+ *
+ * Description: A SCSI bus reset will be generated and all outstanding
+ * Sccbs will be returned via the callback.
+ *
+ *---------------------------------------------------------------------*/
+#if (FW_TYPE==_UCB_MGR_)
+void SccbMgr_scsi_reset(CARD_HANDLE pCurrCard)
+#else
+#if defined(DOS)
+void SccbMgr_scsi_reset(USHORT pCurrCard)
+#else
+void SccbMgr_scsi_reset(ULONG pCurrCard)
+#endif
+#endif
+{
+ UCHAR thisCard;
+
+ thisCard = ((PSCCBcard)pCurrCard)->cardIndex;
+
+ mOS_Lock((PSCCBcard)pCurrCard);
+
+ if (((PSCCBcard) pCurrCard)->globalFlags & F_GREEN_PC)
+ {
+ WR_HARPOON(((PSCCBcard) pCurrCard)->ioPort+hp_clkctrl_0, CLKCTRL_DEFAULT);
+ WR_HARPOON(((PSCCBcard) pCurrCard)->ioPort+hp_sys_ctrl, 0x00);
+ }
+
+ sresb(((PSCCBcard)pCurrCard)->ioPort,thisCard);
+
+ if (RD_HARPOON(((PSCCBcard)pCurrCard)->ioPort+hp_ext_status) & BM_CMD_BUSY)
+ {
+ WR_HARPOON(((PSCCBcard) pCurrCard)->ioPort+hp_page_ctrl,
+ (RD_HARPOON(((PSCCBcard) pCurrCard)->ioPort+hp_page_ctrl)
+ & ~SCATTER_EN));
+
+ WR_HARPOON(((PSCCBcard) pCurrCard)->ioPort+hp_sg_addr,0x00);
+
+ ((PSCCBcard) pCurrCard)->globalFlags &= ~F_HOST_XFER_ACT;
+ busMstrTimeOut(((PSCCBcard) pCurrCard)->ioPort);
+
+ WR_HARPOON(((PSCCBcard) pCurrCard)->ioPort+hp_int_mask,
+ (INT_CMD_COMPL | SCSI_INTERRUPT));
+ }
+
+/*
+ if (utilEERead(((PSCCBcard)pCurrCard)->ioPort, (SCAM_CONFIG/2))
+ & SCAM_ENABLED)
+*/
+ scini(thisCard, ((PSCCBcard)pCurrCard)->ourId, 0);
+
+#if (FW_TYPE==_UCB_MGR_)
+ ((PSCCBcard)pCurrCard)->cardInfo->ai_AEN_routine(0x01,pCurrCard,0,0,0,0);
+#endif
+
+ mOS_UnLock((PSCCBcard)pCurrCard);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: SccbMgr_timer_expired
+ *
+ * Description: This function allow me to kill my own job that has not
+ * yet completed, and has cause a timeout to occur. This
+ * timeout has caused the upper level driver to call this
+ * function.
+ *
+ *---------------------------------------------------------------------*/
+
+#if (FW_TYPE==_UCB_MGR_)
+void SccbMgr_timer_expired(CARD_HANDLE pCurrCard)
+#else
+#if defined(DOS)
+void SccbMgr_timer_expired(USHORT pCurrCard)
+#else
+void SccbMgr_timer_expired(ULONG pCurrCard)
+#endif
+#endif
+{
+}
+
+#if defined(DOS)
+/*---------------------------------------------------------------------
+ *
+ * Function: SccbMgr_status
+ *
+ * Description: This function returns the number of outstanding SCCB's.
+ * This is specific to the DOS enviroment, which needs this
+ * to help them keep protected and real mode commands staight.
+ *
+ *---------------------------------------------------------------------*/
+
+USHORT SccbMgr_status(USHORT pCurrCard)
+{
+ return(BL_Card[pCurrCard].cmdCounter);
+}
+#endif
+
+/*---------------------------------------------------------------------
+ *
+ * Function: SccbMgrTableInit
+ *
+ * Description: Initialize all Sccb manager data structures.
+ *
+ *---------------------------------------------------------------------*/
+
+void SccbMgrTableInitAll()
+{
+ UCHAR thisCard;
+
+ for (thisCard = 0; thisCard < MAX_CARDS; thisCard++)
+ {
+ SccbMgrTableInitCard(&BL_Card[thisCard],thisCard);
+
+ BL_Card[thisCard].ioPort = 0x00;
+ BL_Card[thisCard].cardInfo = NULL;
+ BL_Card[thisCard].cardIndex = 0xFF;
+ BL_Card[thisCard].ourId = 0x00;
+ BL_Card[thisCard].pNvRamInfo = NULL;
+ }
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: SccbMgrTableInit
+ *
+ * Description: Initialize all Sccb manager data structures.
+ *
+ *---------------------------------------------------------------------*/
+
+void SccbMgrTableInitCard(PSCCBcard pCurrCard, UCHAR p_card)
+{
+ UCHAR scsiID, qtag;
+
+ for (qtag = 0; qtag < QUEUE_DEPTH; qtag++)
+ {
+ BL_Card[p_card].discQ_Tbl[qtag] = NULL;
+ }
+
+ for (scsiID = 0; scsiID < MAX_SCSI_TAR; scsiID++)
+ {
+ sccbMgrTbl[p_card][scsiID].TarStatus = 0;
+ sccbMgrTbl[p_card][scsiID].TarEEValue = 0;
+ SccbMgrTableInitTarget(p_card, scsiID);
+ }
+
+ pCurrCard->scanIndex = 0x00;
+ pCurrCard->currentSCCB = NULL;
+ pCurrCard->globalFlags = 0x00;
+ pCurrCard->cmdCounter = 0x00;
+ pCurrCard->tagQ_Lst = 0x01;
+ pCurrCard->discQCount = 0;
+
+
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: SccbMgrTableInit
+ *
+ * Description: Initialize all Sccb manager data structures.
+ *
+ *---------------------------------------------------------------------*/
+
+void SccbMgrTableInitTarget(UCHAR p_card, UCHAR target)
+{
+
+ UCHAR lun, qtag;
+ PSCCBMgr_tar_info currTar_Info;
+
+ currTar_Info = &sccbMgrTbl[p_card][target];
+
+ currTar_Info->TarSelQ_Cnt = 0;
+ currTar_Info->TarSyncCtrl = 0;
+
+ currTar_Info->TarSelQ_Head = NULL;
+ currTar_Info->TarSelQ_Tail = NULL;
+ currTar_Info->TarTagQ_Cnt = 0;
+ currTar_Info->TarLUN_CA = FALSE;
+
+
+ for (lun = 0; lun < MAX_LUN; lun++)
+ {
+ currTar_Info->TarLUNBusy[lun] = FALSE;
+ currTar_Info->LunDiscQ_Idx[lun] = 0;
+ }
+
+ for (qtag = 0; qtag < QUEUE_DEPTH; qtag++)
+ {
+ if(BL_Card[p_card].discQ_Tbl[qtag] != NULL)
+ {
+ if(BL_Card[p_card].discQ_Tbl[qtag]->TargID == target)
+ {
+ BL_Card[p_card].discQ_Tbl[qtag] = NULL;
+ BL_Card[p_card].discQCount--;
+ }
+ }
+ }
+}
+
+#if defined(BUGBUG)
+
+/*****************************************************************
+ * Save the current byte in the debug array
+ *****************************************************************/
+
+
+void Debug_Load(UCHAR p_card, UCHAR p_bug_data)
+{
+ debug_int[p_card][debug_index[p_card]] = p_bug_data;
+ debug_index[p_card]++;
+
+ if (debug_index[p_card] == debug_size)
+
+ debug_index[p_card] = 0;
+}
+
+#endif
+
+/*----------------------------------------------------------------------
+ *
+ *
+ * Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+ *
+ * This file is available under both the GNU General Public License
+ * and a BSD-style copyright; see LICENSE.FlashPoint for details.
+ *
+ * $Workfile: sccb_dat.c $
+ *
+ * Description: Functions relating to handling of the SCCB interface
+ * between the device driver and the HARPOON.
+ *
+ * $Date: 1999/04/26 05:53:56 $
+ *
+ * $Revision: 1.1 $
+ *
+ *----------------------------------------------------------------------*/
+
+/*#include <globals.h>*/
+
+#if (FW_TYPE==_UCB_MGR_)
+ /*#include <budi.h>*/
+#endif
+
+/*#include <sccbmgr.h>*/
+/*#include <blx30.h>*/
+/*#include <target.h>*/
+/*#include <harpoon.h>*/
+
+/*
+** IMPORTANT NOTE!!!
+**
+** You MUST preassign all data to a valid value or zero. This is
+** required due to the MS compiler bug under OS/2 and Solaris Real-Mode
+** driver environment.
+*/
+
+
+SCCBMGR_TAR_INFO sccbMgrTbl[MAX_CARDS][MAX_SCSI_TAR] = { { { 0 } } };
+SCCBCARD BL_Card[MAX_CARDS] = { { 0 } };
+SCCBSCAM_INFO scamInfo[MAX_SCSI_TAR] = { { { 0 } } };
+NVRAMINFO nvRamInfo[MAX_MB_CARDS] = { { 0 } };
+
+
+#if defined(OS2)
+void (far *s_PhaseTbl[8]) (ULONG, UCHAR) = { 0 };
+UCHAR temp_id_string[ID_STRING_LENGTH] = { 0 };
+#elif defined(SOLARIS_REAL_MODE) || defined(__STDC__)
+void (*s_PhaseTbl[8]) (ULONG, UCHAR) = { 0 };
+#else
+void (*s_PhaseTbl[8]) ();
+#endif
+
+#if defined(DOS)
+UCHAR first_time = 0;
+#endif
+
+UCHAR mbCards = 0;
+UCHAR scamHAString[] = {0x63, 0x07, 'B', 'U', 'S', 'L', 'O', 'G', 'I', 'C', \
+ ' ', 'B', 'T', '-', '9', '3', '0', \
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, \
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20};
+
+USHORT default_intena = 0;
+
+#if defined(BUGBUG)
+UCHAR debug_int[MAX_CARDS][debug_size] = { 0 };
+UCHAR debug_index[MAX_CARDS] = { 0 };
+UCHAR reserved_1[3] = { 0 };
+#endif
+
+/*----------------------------------------------------------------------
+ *
+ *
+ * Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+ *
+ * This file is available under both the GNU General Public License
+ * and a BSD-style copyright; see LICENSE.FlashPoint for details.
+ *
+ * $Workfile: scsi.c $
+ *
+ * Description: Functions for handling SCSI bus functions such as
+ * selection/reselection, sync negotiation, message-in
+ * decoding.
+ *
+ * $Date: 1999/04/26 05:53:56 $
+ *
+ * $Revision: 1.1 $
+ *
+ *----------------------------------------------------------------------*/
+
+/*#include <globals.h>*/
+
+#if (FW_TYPE==_UCB_MGR_)
+ /*#include <budi.h>*/
+#endif
+
+/*#include <sccbmgr.h>*/
+/*#include <blx30.h>*/
+/*#include <target.h>*/
+/*#include <scsi2.h>*/
+/*#include <eeprom.h>*/
+/*#include <harpoon.h>*/
+
+
+/*
+extern SCCBCARD BL_Card[MAX_CARDS];
+extern SCCBMGR_TAR_INFO sccbMgrTbl[MAX_CARDS][MAX_SCSI_TAR];
+#if defined(BUGBUG)
+void Debug_Load(UCHAR p_card, UCHAR p_bug_data);
+#endif
+*/
+
+/*---------------------------------------------------------------------
+ *
+ * Function: sfetm
+ *
+ * Description: Read in a message byte from the SCSI bus, and check
+ * for a parity error.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+UCHAR sfm(USHORT port, PSCCB pCurrSCCB)
+#else
+UCHAR sfm(ULONG port, PSCCB pCurrSCCB)
+#endif
+{
+ UCHAR message;
+ USHORT TimeOutLoop;
+
+ TimeOutLoop = 0;
+ while( (!(RD_HARPOON(port+hp_scsisig) & SCSI_REQ)) &&
+ (TimeOutLoop++ < 20000) ){}
+
+
+ WR_HARPOON(port+hp_portctrl_0, SCSI_PORT);
+
+ message = RD_HARPOON(port+hp_scsidata_0);
+
+ WR_HARPOON(port+hp_scsisig, SCSI_ACK + S_MSGI_PH);
+
+
+ if (TimeOutLoop > 20000)
+ message = 0x00; /* force message byte = 0 if Time Out on Req */
+
+ if ((RDW_HARPOON((port+hp_intstat)) & PARITY) &&
+ (RD_HARPOON(port+hp_addstat) & SCSI_PAR_ERR))
+ {
+ WR_HARPOON(port+hp_scsisig, (SCSI_ACK + S_ILL_PH));
+ WR_HARPOON(port+hp_xferstat, 0);
+ WR_HARPOON(port+hp_fiforead, 0);
+ WR_HARPOON(port+hp_fifowrite, 0);
+ if (pCurrSCCB != NULL)
+ {
+ pCurrSCCB->Sccb_scsimsg = SMPARITY;
+ }
+ message = 0x00;
+ do
+ {
+ ACCEPT_MSG_ATN(port);
+ TimeOutLoop = 0;
+ while( (!(RD_HARPOON(port+hp_scsisig) & SCSI_REQ)) &&
+ (TimeOutLoop++ < 20000) ){}
+ if (TimeOutLoop > 20000)
+ {
+ WRW_HARPOON((port+hp_intstat), PARITY);
+ return(message);
+ }
+ if ((RD_HARPOON(port+hp_scsisig) & S_SCSI_PHZ) != S_MSGI_PH)
+ {
+ WRW_HARPOON((port+hp_intstat), PARITY);
+ return(message);
+ }
+ WR_HARPOON(port+hp_portctrl_0, SCSI_PORT);
+
+ RD_HARPOON(port+hp_scsidata_0);
+
+ WR_HARPOON(port+hp_scsisig, (SCSI_ACK + S_ILL_PH));
+
+ }while(1);
+
+ }
+ WR_HARPOON(port+hp_scsisig, (SCSI_ACK + S_ILL_PH));
+ WR_HARPOON(port+hp_xferstat, 0);
+ WR_HARPOON(port+hp_fiforead, 0);
+ WR_HARPOON(port+hp_fifowrite, 0);
+ return(message);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: ssel
+ *
+ * Description: Load up automation and select target device.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void ssel(USHORT port, UCHAR p_card)
+#else
+void ssel(ULONG port, UCHAR p_card)
+#endif
+{
+
+#if defined(DOS)
+ UCHAR auto_loaded, i, target, *theCCB;
+#elif defined(OS2)
+ UCHAR auto_loaded, i, target;
+ UCHAR far *theCCB;
+#else
+ UCHAR auto_loaded, i, target, *theCCB;
+#endif
+
+#if defined(DOS)
+ USHORT cdb_reg;
+#else
+ ULONG cdb_reg;
+#endif
+ PSCCBcard CurrCard;
+ PSCCB currSCCB;
+ PSCCBMgr_tar_info currTar_Info;
+ UCHAR lastTag, lun;
+
+ CurrCard = &BL_Card[p_card];
+ currSCCB = CurrCard->currentSCCB;
+ target = currSCCB->TargID;
+ currTar_Info = &sccbMgrTbl[p_card][target];
+ lastTag = CurrCard->tagQ_Lst;
+
+ ARAM_ACCESS(port);
+
+
+ if ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) == TAG_Q_REJECT)
+ currSCCB->ControlByte &= ~F_USE_CMD_Q;
+
+ if(((CurrCard->globalFlags & F_CONLUN_IO) &&
+ ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)))
+
+ lun = currSCCB->Lun;
+ else
+ lun = 0;
+
+
+#if defined(DOS)
+ currTar_Info->TarLUNBusy[lun] = TRUE;
+
+#else
+
+ if (CurrCard->globalFlags & F_TAG_STARTED)
+ {
+ if (!(currSCCB->ControlByte & F_USE_CMD_Q))
+ {
+ if ((currTar_Info->TarLUN_CA == FALSE)
+ && ((currTar_Info->TarStatus & TAR_TAG_Q_MASK)
+ == TAG_Q_TRYING))
+ {
+
+ if (currTar_Info->TarTagQ_Cnt !=0)
+ {
+ currTar_Info->TarLUNBusy[lun] = TRUE;
+ queueSelectFail(CurrCard,p_card);
+ SGRAM_ACCESS(port);
+ return;
+ }
+
+ else {
+ currTar_Info->TarLUNBusy[lun] = TRUE;
+ }
+
+ } /*End non-tagged */
+
+ else {
+ currTar_Info->TarLUNBusy[lun] = TRUE;
+ }
+
+ } /*!Use cmd Q Tagged */
+
+ else {
+ if (currTar_Info->TarLUN_CA == TRUE)
+ {
+ queueSelectFail(CurrCard,p_card);
+ SGRAM_ACCESS(port);
+ return;
+ }
+
+ currTar_Info->TarLUNBusy[lun] = TRUE;
+
+ } /*else use cmd Q tagged */
+
+ } /*if glob tagged started */
+
+ else {
+ currTar_Info->TarLUNBusy[lun] = TRUE;
+ }
+
+#endif /* DOS */
+
+
+
+ if((((CurrCard->globalFlags & F_CONLUN_IO) &&
+ ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))
+ || (!(currSCCB->ControlByte & F_USE_CMD_Q))))
+ {
+ if(CurrCard->discQCount >= QUEUE_DEPTH)
+ {
+ currTar_Info->TarLUNBusy[lun] = TRUE;
+ queueSelectFail(CurrCard,p_card);
+ SGRAM_ACCESS(port);
+ return;
+ }
+ for (i = 1; i < QUEUE_DEPTH; i++)
+ {
+ if (++lastTag >= QUEUE_DEPTH) lastTag = 1;
+ if (CurrCard->discQ_Tbl[lastTag] == NULL)
+ {
+ CurrCard->tagQ_Lst = lastTag;
+ currTar_Info->LunDiscQ_Idx[lun] = lastTag;
+ CurrCard->discQ_Tbl[lastTag] = currSCCB;
+ CurrCard->discQCount++;
+ break;
+ }
+ }
+ if(i == QUEUE_DEPTH)
+ {
+ currTar_Info->TarLUNBusy[lun] = TRUE;
+ queueSelectFail(CurrCard,p_card);
+ SGRAM_ACCESS(port);
+ return;
+ }
+ }
+
+
+
+ auto_loaded = FALSE;
+
+ WR_HARPOON(port+hp_select_id, target);
+ WR_HARPOON(port+hp_gp_reg_3, target); /* Use by new automation logic */
+
+ if (currSCCB->OperationCode == RESET_COMMAND) {
+ WRW_HARPOON((port+ID_MSG_STRT), (MPM_OP+AMSG_OUT+
+ (currSCCB->Sccb_idmsg & ~DISC_PRIV)));
+
+ WRW_HARPOON((port+ID_MSG_STRT+2),BRH_OP+ALWAYS+NP);
+
+ currSCCB->Sccb_scsimsg = SMDEV_RESET;
+
+ WR_HARPOON(port+hp_autostart_3, (SELECT+SELCHK_STRT));
+ auto_loaded = TRUE;
+ currSCCB->Sccb_scsistat = SELECT_BDR_ST;
+
+ if (currTar_Info->TarEEValue & EE_SYNC_MASK)
+ {
+ currTar_Info->TarSyncCtrl = 0;
+ currTar_Info->TarStatus &= ~TAR_SYNC_MASK;
+ }
+
+#if defined(WIDE_SCSI)
+
+ if (currTar_Info->TarEEValue & EE_WIDE_SCSI)
+ {
+ currTar_Info->TarStatus &= ~TAR_WIDE_MASK;
+ }
+#endif
+
+ sssyncv(port, target, NARROW_SCSI,currTar_Info);
+ SccbMgrTableInitTarget(p_card, target);
+
+ }
+
+ else if(currSCCB->Sccb_scsistat == ABORT_ST)
+ {
+ WRW_HARPOON((port+ID_MSG_STRT), (MPM_OP+AMSG_OUT+
+ (currSCCB->Sccb_idmsg & ~DISC_PRIV)));
+
+ WRW_HARPOON((port+ID_MSG_STRT+2),BRH_OP+ALWAYS+CMDPZ);
+
+ WRW_HARPOON((port+SYNC_MSGS+0), (MPM_OP+AMSG_OUT+
+ (((UCHAR)(currSCCB->ControlByte & TAG_TYPE_MASK)
+ >> 6) | (UCHAR)0x20)));
+ WRW_HARPOON((port+SYNC_MSGS+2),
+ (MPM_OP+AMSG_OUT+currSCCB->Sccb_tag));
+ WRW_HARPOON((port+SYNC_MSGS+4), (BRH_OP+ALWAYS+NP ));
+
+ WR_HARPOON(port+hp_autostart_3, (SELECT+SELCHK_STRT));
+ auto_loaded = TRUE;
+
+ }
+
+#if defined(WIDE_SCSI)
+
+
+ else if (!(currTar_Info->TarStatus & WIDE_NEGOCIATED)) {
+ auto_loaded = siwidn(port,p_card);
+ currSCCB->Sccb_scsistat = SELECT_WN_ST;
+ }
+
+#endif
+
+
+ else if (!((currTar_Info->TarStatus & TAR_SYNC_MASK)
+ == SYNC_SUPPORTED)) {
+ auto_loaded = sisyncn(port,p_card, FALSE);
+ currSCCB->Sccb_scsistat = SELECT_SN_ST;
+ }
+
+
+ if (!auto_loaded)
+ {
+
+#if !defined(DOS)
+ if (currSCCB->ControlByte & F_USE_CMD_Q)
+ {
+
+ CurrCard->globalFlags |= F_TAG_STARTED;
+
+ if ((currTar_Info->TarStatus & TAR_TAG_Q_MASK)
+ == TAG_Q_REJECT)
+ {
+ currSCCB->ControlByte &= ~F_USE_CMD_Q;
+
+ /* Fix up the start instruction with a jump to
+ Non-Tag-CMD handling */
+ WRW_HARPOON((port+ID_MSG_STRT),BRH_OP+ALWAYS+NTCMD);
+
+ WRW_HARPOON((port+NON_TAG_ID_MSG),
+ (MPM_OP+AMSG_OUT+currSCCB->Sccb_idmsg));
+
+ WR_HARPOON(port+hp_autostart_3, (SELECT+SELCHK_STRT));
+
+ /* Setup our STATE so we know what happend when
+ the wheels fall off. */
+ currSCCB->Sccb_scsistat = SELECT_ST;
+
+ currTar_Info->TarLUNBusy[lun] = TRUE;
+ }
+
+ else
+ {
+ WRW_HARPOON((port+ID_MSG_STRT), (MPM_OP+AMSG_OUT+currSCCB->Sccb_idmsg));
+
+ WRW_HARPOON((port+ID_MSG_STRT+2), (MPM_OP+AMSG_OUT+
+ (((UCHAR)(currSCCB->ControlByte & TAG_TYPE_MASK)
+ >> 6) | (UCHAR)0x20)));
+
+ for (i = 1; i < QUEUE_DEPTH; i++)
+ {
+ if (++lastTag >= QUEUE_DEPTH) lastTag = 1;
+ if (CurrCard->discQ_Tbl[lastTag] == NULL)
+ {
+ WRW_HARPOON((port+ID_MSG_STRT+6),
+ (MPM_OP+AMSG_OUT+lastTag));
+ CurrCard->tagQ_Lst = lastTag;
+ currSCCB->Sccb_tag = lastTag;
+ CurrCard->discQ_Tbl[lastTag] = currSCCB;
+ CurrCard->discQCount++;
+ break;
+ }
+ }
+
+
+ if ( i == QUEUE_DEPTH )
+ {
+ currTar_Info->TarLUNBusy[lun] = TRUE;
+ queueSelectFail(CurrCard,p_card);
+ SGRAM_ACCESS(port);
+ return;
+ }
+
+ currSCCB->Sccb_scsistat = SELECT_Q_ST;
+
+ WR_HARPOON(port+hp_autostart_3, (SELECT+SELCHK_STRT));
+ }
+ }
+
+ else
+ {
+#endif /* !DOS */
+
+ WRW_HARPOON((port+ID_MSG_STRT),BRH_OP+ALWAYS+NTCMD);
+
+ WRW_HARPOON((port+NON_TAG_ID_MSG),
+ (MPM_OP+AMSG_OUT+currSCCB->Sccb_idmsg));
+
+ currSCCB->Sccb_scsistat = SELECT_ST;
+
+ WR_HARPOON(port+hp_autostart_3, (SELECT+SELCHK_STRT));
+#if !defined(DOS)
+ }
+#endif
+
+
+#if defined(OS2)
+ theCCB = (UCHAR far *)&currSCCB->Cdb[0];
+#else
+ theCCB = (UCHAR *)&currSCCB->Cdb[0];
+#endif
+
+ cdb_reg = port + CMD_STRT;
+
+ for (i=0; i < currSCCB->CdbLength; i++)
+ {
+ WRW_HARPOON(cdb_reg, (MPM_OP + ACOMMAND + *theCCB));
+ cdb_reg +=2;
+ theCCB++;
+ }
+
+ if (currSCCB->CdbLength != TWELVE_BYTE_CMD)
+ WRW_HARPOON(cdb_reg, (BRH_OP+ALWAYS+ NP));
+
+ } /* auto_loaded */
+
+#if defined(WIDE_SCSI)
+ WRW_HARPOON((port+hp_fiforead), (USHORT) 0x00);
+ WR_HARPOON(port+hp_xferstat, 0x00);
+#endif
+
+ WRW_HARPOON((port+hp_intstat), (PROG_HLT | TIMEOUT | SEL | BUS_FREE));
+
+ WR_HARPOON(port+hp_portctrl_0,(SCSI_PORT));
+
+
+ if (!(currSCCB->Sccb_MGRFlags & F_DEV_SELECTED))
+ {
+ WR_HARPOON(port+hp_scsictrl_0, (SEL_TAR | ENA_ATN | ENA_RESEL | ENA_SCAM_SEL));
+ }
+ else
+ {
+
+/* auto_loaded = (RD_HARPOON(port+hp_autostart_3) & (UCHAR)0x1F);
+ auto_loaded |= AUTO_IMMED; */
+ auto_loaded = AUTO_IMMED;
+
+ DISABLE_AUTO(port);
+
+ WR_HARPOON(port+hp_autostart_3, auto_loaded);
+ }
+
+ SGRAM_ACCESS(port);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: sres
+ *
+ * Description: Hookup the correct CCB and handle the incoming messages.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void sres(USHORT port, UCHAR p_card, PSCCBcard pCurrCard)
+#else
+void sres(ULONG port, UCHAR p_card, PSCCBcard pCurrCard)
+#endif
+{
+
+#if defined(V302)
+#ifdef DOS
+ UCHAR our_target,message, msgRetryCount;
+ extern UCHAR lun, tag;
+#else
+ UCHAR our_target,message,lun,tag, msgRetryCount;
+#endif
+
+#else /* V302 */
+ UCHAR our_target, message, lun = 0, tag, msgRetryCount;
+#endif /* V302 */
+
+
+ PSCCBMgr_tar_info currTar_Info;
+ PSCCB currSCCB;
+
+
+
+
+ if(pCurrCard->currentSCCB != NULL)
+ {
+ currTar_Info = &sccbMgrTbl[p_card][pCurrCard->currentSCCB->TargID];
+ DISABLE_AUTO(port);
+
+
+ WR_HARPOON((port+hp_scsictrl_0),(ENA_RESEL | ENA_SCAM_SEL));
+
+
+ currSCCB = pCurrCard->currentSCCB;
+ if(currSCCB->Sccb_scsistat == SELECT_WN_ST)
+ {
+ currTar_Info->TarStatus &= ~TAR_WIDE_MASK;
+ currSCCB->Sccb_scsistat = BUS_FREE_ST;
+ }
+ if(currSCCB->Sccb_scsistat == SELECT_SN_ST)
+ {
+ currTar_Info->TarStatus &= ~TAR_SYNC_MASK;
+ currSCCB->Sccb_scsistat = BUS_FREE_ST;
+ }
+ if(((pCurrCard->globalFlags & F_CONLUN_IO) &&
+ ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)))
+ {
+ currTar_Info->TarLUNBusy[currSCCB->Lun] = FALSE;
+ if(currSCCB->Sccb_scsistat != ABORT_ST)
+ {
+ pCurrCard->discQCount--;
+ pCurrCard->discQ_Tbl[currTar_Info->LunDiscQ_Idx[currSCCB->Lun]]
+ = NULL;
+ }
+ }
+ else
+ {
+ currTar_Info->TarLUNBusy[0] = FALSE;
+ if(currSCCB->Sccb_tag)
+ {
+ if(currSCCB->Sccb_scsistat != ABORT_ST)
+ {
+ pCurrCard->discQCount--;
+ pCurrCard->discQ_Tbl[currSCCB->Sccb_tag] = NULL;
+ }
+ }else
+ {
+ if(currSCCB->Sccb_scsistat != ABORT_ST)
+ {
+ pCurrCard->discQCount--;
+ pCurrCard->discQ_Tbl[currTar_Info->LunDiscQ_Idx[0]] = NULL;
+ }
+ }
+ }
+
+ queueSelectFail(&BL_Card[p_card],p_card);
+ }
+
+#if defined(WIDE_SCSI)
+ WRW_HARPOON((port+hp_fiforead), (USHORT) 0x00);
+#endif
+
+
+ our_target = (UCHAR)(RD_HARPOON(port+hp_select_id) >> 4);
+ currTar_Info = &sccbMgrTbl[p_card][our_target];
+
+
+ msgRetryCount = 0;
+ do
+ {
+
+#if defined(V302)
+
+ message = GetTarLun(port, p_card, our_target, pCurrCard, &tag, &lun);
+
+#else /* V302 */
+
+ currTar_Info = &sccbMgrTbl[p_card][our_target];
+ tag = 0;
+
+
+ while(!(RD_HARPOON(port+hp_scsisig) & SCSI_REQ))
+ {
+ if (! (RD_HARPOON(port+hp_scsisig) & SCSI_BSY))
+ {
+
+ WRW_HARPOON((port+hp_intstat), PHASE);
+ return;
+ }
+ }
+
+ WRW_HARPOON((port+hp_intstat), PHASE);
+ if ((RD_HARPOON(port+hp_scsisig) & S_SCSI_PHZ) == S_MSGI_PH)
+ {
+
+ message = sfm(port,pCurrCard->currentSCCB);
+ if (message)
+ {
+
+ if (message <= (0x80 | LUN_MASK))
+ {
+ lun = message & (UCHAR)LUN_MASK;
+
+#if !defined(DOS)
+ if ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) == TAG_Q_TRYING)
+ {
+ if (currTar_Info->TarTagQ_Cnt != 0)
+ {
+
+ if (!(currTar_Info->TarLUN_CA))
+ {
+ ACCEPT_MSG(port); /*Release the ACK for ID msg. */
+
+
+ message = sfm(port,pCurrCard->currentSCCB);
+ if (message)
+ {
+ ACCEPT_MSG(port);
+ }
+
+ else
+ message = FALSE;
+
+ if(message != FALSE)
+ {
+ tag = sfm(port,pCurrCard->currentSCCB);
+
+ if (!(tag))
+ message = FALSE;
+ }
+
+ } /*C.A. exists! */
+
+ } /*End Q cnt != 0 */
+
+ } /*End Tag cmds supported! */
+#endif /* !DOS */
+
+ } /*End valid ID message. */
+
+ else
+ {
+
+ ACCEPT_MSG_ATN(port);
+ }
+
+ } /* End good id message. */
+
+ else
+ {
+
+ message = FALSE;
+ }
+ }
+ else
+ {
+ ACCEPT_MSG_ATN(port);
+
+ while (!(RDW_HARPOON((port+hp_intstat)) & (PHASE | RESET)) &&
+ !(RD_HARPOON(port+hp_scsisig) & SCSI_REQ) &&
+ (RD_HARPOON(port+hp_scsisig) & SCSI_BSY)) ;
+
+ return;
+ }
+
+#endif /* V302 */
+
+ if(message == FALSE)
+ {
+ msgRetryCount++;
+ if(msgRetryCount == 1)
+ {
+ SendMsg(port, SMPARITY);
+ }
+ else
+ {
+ SendMsg(port, SMDEV_RESET);
+
+ sssyncv(port, our_target, NARROW_SCSI,currTar_Info);
+
+ if (sccbMgrTbl[p_card][our_target].TarEEValue & EE_SYNC_MASK)
+ {
+
+ sccbMgrTbl[p_card][our_target].TarStatus &= ~TAR_SYNC_MASK;
+
+ }
+
+ if (sccbMgrTbl[p_card][our_target].TarEEValue & EE_WIDE_SCSI)
+ {
+
+ sccbMgrTbl[p_card][our_target].TarStatus &= ~TAR_WIDE_MASK;
+ }
+
+
+ queueFlushTargSccb(p_card, our_target, SCCB_COMPLETE);
+ SccbMgrTableInitTarget(p_card,our_target);
+ return;
+ }
+ }
+ }while(message == FALSE);
+
+
+
+ if(((pCurrCard->globalFlags & F_CONLUN_IO) &&
+ ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)))
+ {
+ currTar_Info->TarLUNBusy[lun] = TRUE;
+ pCurrCard->currentSCCB = pCurrCard->discQ_Tbl[currTar_Info->LunDiscQ_Idx[lun]];
+ if(pCurrCard->currentSCCB != NULL)
+ {
+ ACCEPT_MSG(port);
+ }
+ else
+ {
+ ACCEPT_MSG_ATN(port);
+ }
+ }
+ else
+ {
+ currTar_Info->TarLUNBusy[0] = TRUE;
+
+
+ if (tag)
+ {
+ if (pCurrCard->discQ_Tbl[tag] != NULL)
+ {
+ pCurrCard->currentSCCB = pCurrCard->discQ_Tbl[tag];
+ currTar_Info->TarTagQ_Cnt--;
+ ACCEPT_MSG(port);
+ }
+ else
+ {
+ ACCEPT_MSG_ATN(port);
+ }
+ }else
+ {
+ pCurrCard->currentSCCB = pCurrCard->discQ_Tbl[currTar_Info->LunDiscQ_Idx[0]];
+ if(pCurrCard->currentSCCB != NULL)
+ {
+ ACCEPT_MSG(port);
+ }
+ else
+ {
+ ACCEPT_MSG_ATN(port);
+ }
+ }
+ }
+
+ if(pCurrCard->currentSCCB != NULL)
+ {
+ if(pCurrCard->currentSCCB->Sccb_scsistat == ABORT_ST)
+ {
+ /* During Abort Tag command, the target could have got re-selected
+ and completed the command. Check the select Q and remove the CCB
+ if it is in the Select Q */
+ queueFindSccb(pCurrCard->currentSCCB, p_card);
+ }
+ }
+
+
+ while (!(RDW_HARPOON((port+hp_intstat)) & (PHASE | RESET)) &&
+ !(RD_HARPOON(port+hp_scsisig) & SCSI_REQ) &&
+ (RD_HARPOON(port+hp_scsisig) & SCSI_BSY)) ;
+}
+
+#if defined(V302)
+
+#if defined(DOS)
+UCHAR GetTarLun(USHORT port, UCHAR p_card, UCHAR our_target, PSCCBcard pCurrCard, PUCHAR tag, PUCHAR lun)
+#else
+UCHAR GetTarLun(ULONG port, UCHAR p_card, UCHAR our_target, PSCCBcard pCurrCard, PUCHAR tag, PUCHAR lun)
+#endif
+{
+ UCHAR message;
+ PSCCBMgr_tar_info currTar_Info;
+
+
+ currTar_Info = &sccbMgrTbl[p_card][our_target];
+ *tag = 0;
+
+
+ while(!(RD_HARPOON(port+hp_scsisig) & SCSI_REQ))
+ {
+ if (! (RD_HARPOON(port+hp_scsisig) & SCSI_BSY))
+ {
+
+ WRW_HARPOON((port+hp_intstat), PHASE);
+ return(TRUE);
+ }
+ }
+
+ WRW_HARPOON((port+hp_intstat), PHASE);
+ if ((RD_HARPOON(port+hp_scsisig) & S_SCSI_PHZ) == S_MSGI_PH)
+ {
+
+ message = sfm(port,pCurrCard->currentSCCB);
+ if (message)
+ {
+
+ if (message <= (0x80 | LUN_MASK))
+ {
+ *lun = message & (UCHAR)LUN_MASK;
+
+#if !defined(DOS)
+ if ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) == TAG_Q_TRYING)
+ {
+ if (currTar_Info->TarTagQ_Cnt != 0)
+ {
+
+ if (!(currTar_Info->TarLUN_CA))
+ {
+ ACCEPT_MSG(port); /*Release the ACK for ID msg. */
+
+
+ message = sfm(port,pCurrCard->currentSCCB);
+ if (message)
+ {
+ ACCEPT_MSG(port);
+ }
+
+ else
+ return(FALSE);
+
+ *tag = sfm(port,pCurrCard->currentSCCB);
+
+ if (!(*tag)) return(FALSE);
+
+ } /*C.A. exists! */
+
+ } /*End Q cnt != 0 */
+
+ } /*End Tag cmds supported! */
+#endif /* !DOS */
+
+ } /*End valid ID message. */
+
+ else
+ {
+
+ ACCEPT_MSG_ATN(port);
+ }
+
+ } /* End good id message. */
+
+ else
+ {
+
+ return(FALSE);
+ }
+ }
+ else
+ {
+ ACCEPT_MSG_ATN(port);
+ return(TRUE);
+ }
+ return(TRUE);
+}
+
+#endif /* V302 */
+
+#if defined(DOS)
+void SendMsg(USHORT port, UCHAR message)
+#else
+void SendMsg(ULONG port, UCHAR message)
+#endif
+{
+ while(!(RD_HARPOON(port+hp_scsisig) & SCSI_REQ))
+ {
+ if (! (RD_HARPOON(port+hp_scsisig) & SCSI_BSY))
+ {
+
+ WRW_HARPOON((port+hp_intstat), PHASE);
+ return;
+ }
+ }
+
+ WRW_HARPOON((port+hp_intstat), PHASE);
+ if ((RD_HARPOON(port+hp_scsisig) & S_SCSI_PHZ) == S_MSGO_PH)
+ {
+ WRW_HARPOON((port+hp_intstat), (BUS_FREE | PHASE | XFER_CNT_0));
+
+
+ WR_HARPOON(port+hp_portctrl_0, SCSI_BUS_EN);
+
+ WR_HARPOON(port+hp_scsidata_0,message);
+
+ WR_HARPOON(port+hp_scsisig, (SCSI_ACK + S_ILL_PH));
+
+ ACCEPT_MSG(port);
+
+ WR_HARPOON(port+hp_portctrl_0, 0x00);
+
+ if ((message == SMABORT) || (message == SMDEV_RESET) ||
+ (message == SMABORT_TAG) )
+ {
+ while(!(RDW_HARPOON((port+hp_intstat)) & (BUS_FREE | PHASE))) {}
+
+ if (RDW_HARPOON((port+hp_intstat)) & BUS_FREE)
+ {
+ WRW_HARPOON((port+hp_intstat), BUS_FREE);
+ }
+ }
+ }
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: sdecm
+ *
+ * Description: Determine the proper responce to the message from the
+ * target device.
+ *
+ *---------------------------------------------------------------------*/
+#if defined(DOS)
+void sdecm(UCHAR message, USHORT port, UCHAR p_card)
+#else
+void sdecm(UCHAR message, ULONG port, UCHAR p_card)
+#endif
+{
+ PSCCB currSCCB;
+ PSCCBcard CurrCard;
+ PSCCBMgr_tar_info currTar_Info;
+
+ CurrCard = &BL_Card[p_card];
+ currSCCB = CurrCard->currentSCCB;
+
+ currTar_Info = &sccbMgrTbl[p_card][currSCCB->TargID];
+
+ if (message == SMREST_DATA_PTR)
+ {
+ if (!(currSCCB->Sccb_XferState & F_NO_DATA_YET))
+ {
+ currSCCB->Sccb_ATC = currSCCB->Sccb_savedATC;
+
+ hostDataXferRestart(currSCCB);
+ }
+
+ ACCEPT_MSG(port);
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+ }
+
+ else if (message == SMCMD_COMP)
+ {
+
+
+ if (currSCCB->Sccb_scsistat == SELECT_Q_ST)
+ {
+ currTar_Info->TarStatus &= ~(UCHAR)TAR_TAG_Q_MASK;
+ currTar_Info->TarStatus |= (UCHAR)TAG_Q_REJECT;
+ }
+
+ ACCEPT_MSG(port);
+
+ }
+
+ else if ((message == SMNO_OP) || (message >= SMIDENT)
+ || (message == SMINIT_RECOVERY) || (message == SMREL_RECOVERY))
+ {
+
+ ACCEPT_MSG(port);
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+ }
+
+ else if (message == SMREJECT)
+ {
+
+ if ((currSCCB->Sccb_scsistat == SELECT_SN_ST) ||
+ (currSCCB->Sccb_scsistat == SELECT_WN_ST) ||
+ ((currTar_Info->TarStatus & TAR_SYNC_MASK) == SYNC_TRYING ) ||
+ ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) == TAG_Q_TRYING ) )
+
+ {
+ WRW_HARPOON((port+hp_intstat), BUS_FREE);
+
+ ACCEPT_MSG(port);
+
+
+ while ((!(RD_HARPOON(port+hp_scsisig) & SCSI_REQ)) &&
+ (!(RDW_HARPOON((port+hp_intstat)) & BUS_FREE))) {}
+
+ if(currSCCB->Lun == 0x00)
+ {
+ if ((currSCCB->Sccb_scsistat == SELECT_SN_ST))
+ {
+
+ currTar_Info->TarStatus |= (UCHAR)SYNC_SUPPORTED;
+
+ currTar_Info->TarEEValue &= ~EE_SYNC_MASK;
+ }
+
+#if defined(WIDE_SCSI)
+ else if ((currSCCB->Sccb_scsistat == SELECT_WN_ST))
+ {
+
+
+ currTar_Info->TarStatus = (currTar_Info->TarStatus &
+ ~WIDE_ENABLED) | WIDE_NEGOCIATED;
+
+ currTar_Info->TarEEValue &= ~EE_WIDE_SCSI;
+
+ }
+#endif
+
+ else if ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) == TAG_Q_TRYING )
+ {
+ currTar_Info->TarStatus = (currTar_Info->TarStatus &
+ ~(UCHAR)TAR_TAG_Q_MASK) | TAG_Q_REJECT;
+
+
+ currSCCB->ControlByte &= ~F_USE_CMD_Q;
+ CurrCard->discQCount--;
+ CurrCard->discQ_Tbl[currSCCB->Sccb_tag] = NULL;
+ currSCCB->Sccb_tag = 0x00;
+
+ }
+ }
+
+ if (RDW_HARPOON((port+hp_intstat)) & BUS_FREE)
+ {
+
+
+ if(currSCCB->Lun == 0x00)
+ {
+ WRW_HARPOON((port+hp_intstat), BUS_FREE);
+ CurrCard->globalFlags |= F_NEW_SCCB_CMD;
+ }
+ }
+
+ else
+ {
+
+ if((CurrCard->globalFlags & F_CONLUN_IO) &&
+ ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))
+ currTar_Info->TarLUNBusy[currSCCB->Lun] = TRUE;
+ else
+ currTar_Info->TarLUNBusy[0] = TRUE;
+
+
+ currSCCB->ControlByte &= ~(UCHAR)F_USE_CMD_Q;
+
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+
+ }
+ }
+
+ else
+ {
+ ACCEPT_MSG(port);
+
+ while ((!(RD_HARPOON(port+hp_scsisig) & SCSI_REQ)) &&
+ (!(RDW_HARPOON((port+hp_intstat)) & BUS_FREE))) {}
+
+ if (!(RDW_HARPOON((port+hp_intstat)) & BUS_FREE))
+ {
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+ }
+ }
+ }
+
+ else if (message == SMEXT)
+ {
+
+ ACCEPT_MSG(port);
+ shandem(port,p_card,currSCCB);
+ }
+
+ else if (message == SMIGNORWR)
+ {
+
+ ACCEPT_MSG(port); /* ACK the RESIDUE MSG */
+
+ message = sfm(port,currSCCB);
+
+ if(currSCCB->Sccb_scsimsg != SMPARITY)
+ ACCEPT_MSG(port);
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+ }
+
+
+ else
+ {
+
+ currSCCB->HostStatus = SCCB_PHASE_SEQUENCE_FAIL;
+ currSCCB->Sccb_scsimsg = SMREJECT;
+
+ ACCEPT_MSG_ATN(port);
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+ }
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: shandem
+ *
+ * Description: Decide what to do with the extended message.
+ *
+ *---------------------------------------------------------------------*/
+#if defined(DOS)
+void shandem(USHORT port, UCHAR p_card, PSCCB pCurrSCCB)
+#else
+void shandem(ULONG port, UCHAR p_card, PSCCB pCurrSCCB)
+#endif
+{
+ UCHAR length,message;
+
+ length = sfm(port,pCurrSCCB);
+ if (length)
+ {
+
+ ACCEPT_MSG(port);
+ message = sfm(port,pCurrSCCB);
+ if (message)
+ {
+
+ if (message == SMSYNC)
+ {
+
+ if (length == 0x03)
+ {
+
+ ACCEPT_MSG(port);
+ stsyncn(port,p_card);
+ }
+ else
+ {
+
+ pCurrSCCB->Sccb_scsimsg = SMREJECT;
+ ACCEPT_MSG_ATN(port);
+ }
+ }
+#if defined(WIDE_SCSI)
+ else if (message == SMWDTR)
+ {
+
+ if (length == 0x02)
+ {
+
+ ACCEPT_MSG(port);
+ stwidn(port,p_card);
+ }
+ else
+ {
+
+ pCurrSCCB->Sccb_scsimsg = SMREJECT;
+ ACCEPT_MSG_ATN(port);
+
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+ }
+ }
+#endif
+ else
+ {
+
+ pCurrSCCB->Sccb_scsimsg = SMREJECT;
+ ACCEPT_MSG_ATN(port);
+
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+ }
+ }
+ else
+ {
+ if(pCurrSCCB->Sccb_scsimsg != SMPARITY)
+ ACCEPT_MSG(port);
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+ }
+ }else
+ {
+ if(pCurrSCCB->Sccb_scsimsg == SMPARITY)
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+ }
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: sisyncn
+ *
+ * Description: Read in a message byte from the SCSI bus, and check
+ * for a parity error.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+UCHAR sisyncn(USHORT port, UCHAR p_card, UCHAR syncFlag)
+#else
+UCHAR sisyncn(ULONG port, UCHAR p_card, UCHAR syncFlag)
+#endif
+{
+ PSCCB currSCCB;
+ PSCCBMgr_tar_info currTar_Info;
+
+ currSCCB = BL_Card[p_card].currentSCCB;
+ currTar_Info = &sccbMgrTbl[p_card][currSCCB->TargID];
+
+ if (!((currTar_Info->TarStatus & TAR_SYNC_MASK) == SYNC_TRYING)) {
+
+
+ WRW_HARPOON((port+ID_MSG_STRT),
+ (MPM_OP+AMSG_OUT+(currSCCB->Sccb_idmsg & ~(UCHAR)DISC_PRIV)));
+
+ WRW_HARPOON((port+ID_MSG_STRT+2),BRH_OP+ALWAYS+CMDPZ);
+
+ WRW_HARPOON((port+SYNC_MSGS+0), (MPM_OP+AMSG_OUT+SMEXT ));
+ WRW_HARPOON((port+SYNC_MSGS+2), (MPM_OP+AMSG_OUT+0x03 ));
+ WRW_HARPOON((port+SYNC_MSGS+4), (MPM_OP+AMSG_OUT+SMSYNC));
+
+
+ if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_20MB)
+
+ WRW_HARPOON((port+SYNC_MSGS+6), (MPM_OP+AMSG_OUT+ 12));
+
+ else if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_10MB)
+
+ WRW_HARPOON((port+SYNC_MSGS+6), (MPM_OP+AMSG_OUT+ 25));
+
+ else if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_5MB)
+
+ WRW_HARPOON((port+SYNC_MSGS+6), (MPM_OP+AMSG_OUT+ 50));
+
+ else
+ WRW_HARPOON((port+SYNC_MSGS+6), (MPM_OP+AMSG_OUT+ 00));
+
+
+ WRW_HARPOON((port+SYNC_MSGS+8), (RAT_OP ));
+ WRW_HARPOON((port+SYNC_MSGS+10),(MPM_OP+AMSG_OUT+DEFAULT_OFFSET));
+ WRW_HARPOON((port+SYNC_MSGS+12),(BRH_OP+ALWAYS+NP ));
+
+
+ if(syncFlag == FALSE)
+ {
+ WR_HARPOON(port+hp_autostart_3, (SELECT+SELCHK_STRT));
+ currTar_Info->TarStatus = ((currTar_Info->TarStatus &
+ ~(UCHAR)TAR_SYNC_MASK) | (UCHAR)SYNC_TRYING);
+ }
+ else
+ {
+ WR_HARPOON(port+hp_autostart_3, (AUTO_IMMED + CMD_ONLY_STRT));
+ }
+
+
+ return(TRUE);
+ }
+
+ else {
+
+ currTar_Info->TarStatus |= (UCHAR)SYNC_SUPPORTED;
+ currTar_Info->TarEEValue &= ~EE_SYNC_MASK;
+ return(FALSE);
+ }
+}
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: stsyncn
+ *
+ * Description: The has sent us a Sync Nego message so handle it as
+ * necessary.
+ *
+ *---------------------------------------------------------------------*/
+#if defined(DOS)
+void stsyncn(USHORT port, UCHAR p_card)
+#else
+void stsyncn(ULONG port, UCHAR p_card)
+#endif
+{
+ UCHAR sync_msg,offset,sync_reg,our_sync_msg;
+ PSCCB currSCCB;
+ PSCCBMgr_tar_info currTar_Info;
+
+ currSCCB = BL_Card[p_card].currentSCCB;
+ currTar_Info = &sccbMgrTbl[p_card][currSCCB->TargID];
+
+ sync_msg = sfm(port,currSCCB);
+
+ if((sync_msg == 0x00) && (currSCCB->Sccb_scsimsg == SMPARITY))
+ {
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+ return;
+ }
+
+ ACCEPT_MSG(port);
+
+
+ offset = sfm(port,currSCCB);
+
+ if((offset == 0x00) && (currSCCB->Sccb_scsimsg == SMPARITY))
+ {
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+ return;
+ }
+
+ if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_20MB)
+
+ our_sync_msg = 12; /* Setup our Message to 20mb/s */
+
+ else if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_10MB)
+
+ our_sync_msg = 25; /* Setup our Message to 10mb/s */
+
+ else if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_5MB)
+
+ our_sync_msg = 50; /* Setup our Message to 5mb/s */
+ else
+
+ our_sync_msg = 0; /* Message = Async */
+
+ if (sync_msg < our_sync_msg) {
+ sync_msg = our_sync_msg; /*if faster, then set to max. */
+ }
+
+ if (offset == ASYNC)
+ sync_msg = ASYNC;
+
+ if (offset > MAX_OFFSET)
+ offset = MAX_OFFSET;
+
+ sync_reg = 0x00;
+
+ if (sync_msg > 12)
+
+ sync_reg = 0x20; /* Use 10MB/s */
+
+ if (sync_msg > 25)
+
+ sync_reg = 0x40; /* Use 6.6MB/s */
+
+ if (sync_msg > 38)
+
+ sync_reg = 0x60; /* Use 5MB/s */
+
+ if (sync_msg > 50)
+
+ sync_reg = 0x80; /* Use 4MB/s */
+
+ if (sync_msg > 62)
+
+ sync_reg = 0xA0; /* Use 3.33MB/s */
+
+ if (sync_msg > 75)
+
+ sync_reg = 0xC0; /* Use 2.85MB/s */
+
+ if (sync_msg > 87)
+
+ sync_reg = 0xE0; /* Use 2.5MB/s */
+
+ if (sync_msg > 100) {
+
+ sync_reg = 0x00; /* Use ASYNC */
+ offset = 0x00;
+ }
+
+
+#if defined(WIDE_SCSI)
+ if (currTar_Info->TarStatus & WIDE_ENABLED)
+
+ sync_reg |= offset;
+
+ else
+
+ sync_reg |= (offset | NARROW_SCSI);
+
+#else
+ sync_reg |= (offset | NARROW_SCSI);
+#endif
+
+ sssyncv(port,currSCCB->TargID,sync_reg,currTar_Info);
+
+
+ if (currSCCB->Sccb_scsistat == SELECT_SN_ST) {
+
+
+ ACCEPT_MSG(port);
+
+ currTar_Info->TarStatus = ((currTar_Info->TarStatus &
+ ~(UCHAR)TAR_SYNC_MASK) | (UCHAR)SYNC_SUPPORTED);
+
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+ }
+
+ else {
+
+
+ ACCEPT_MSG_ATN(port);
+
+ sisyncr(port,sync_msg,offset);
+
+ currTar_Info->TarStatus = ((currTar_Info->TarStatus &
+ ~(UCHAR)TAR_SYNC_MASK) | (UCHAR)SYNC_SUPPORTED);
+ }
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: sisyncr
+ *
+ * Description: Answer the targets sync message.
+ *
+ *---------------------------------------------------------------------*/
+#if defined(DOS)
+void sisyncr(USHORT port,UCHAR sync_pulse, UCHAR offset)
+#else
+void sisyncr(ULONG port,UCHAR sync_pulse, UCHAR offset)
+#endif
+{
+ ARAM_ACCESS(port);
+ WRW_HARPOON((port+SYNC_MSGS+0), (MPM_OP+AMSG_OUT+SMEXT ));
+ WRW_HARPOON((port+SYNC_MSGS+2), (MPM_OP+AMSG_OUT+0x03 ));
+ WRW_HARPOON((port+SYNC_MSGS+4), (MPM_OP+AMSG_OUT+SMSYNC));
+ WRW_HARPOON((port+SYNC_MSGS+6), (MPM_OP+AMSG_OUT+sync_pulse));
+ WRW_HARPOON((port+SYNC_MSGS+8), (RAT_OP ));
+ WRW_HARPOON((port+SYNC_MSGS+10),(MPM_OP+AMSG_OUT+offset));
+ WRW_HARPOON((port+SYNC_MSGS+12),(BRH_OP+ALWAYS+NP ));
+ SGRAM_ACCESS(port);
+
+ WR_HARPOON(port+hp_portctrl_0, SCSI_PORT);
+ WRW_HARPOON((port+hp_intstat), CLR_ALL_INT_1);
+
+ WR_HARPOON(port+hp_autostart_3, (AUTO_IMMED+CMD_ONLY_STRT));
+
+ while (!(RDW_HARPOON((port+hp_intstat)) & (BUS_FREE | AUTO_INT))) {}
+}
+
+
+
+#if defined(WIDE_SCSI)
+
+/*---------------------------------------------------------------------
+ *
+ * Function: siwidn
+ *
+ * Description: Read in a message byte from the SCSI bus, and check
+ * for a parity error.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+UCHAR siwidn(USHORT port, UCHAR p_card)
+#else
+UCHAR siwidn(ULONG port, UCHAR p_card)
+#endif
+{
+ PSCCB currSCCB;
+ PSCCBMgr_tar_info currTar_Info;
+
+ currSCCB = BL_Card[p_card].currentSCCB;
+ currTar_Info = &sccbMgrTbl[p_card][currSCCB->TargID];
+
+ if (!((currTar_Info->TarStatus & TAR_WIDE_MASK) == WIDE_NEGOCIATED)) {
+
+
+ WRW_HARPOON((port+ID_MSG_STRT),
+ (MPM_OP+AMSG_OUT+(currSCCB->Sccb_idmsg & ~(UCHAR)DISC_PRIV)));
+
+ WRW_HARPOON((port+ID_MSG_STRT+2),BRH_OP+ALWAYS+CMDPZ);
+
+ WRW_HARPOON((port+SYNC_MSGS+0), (MPM_OP+AMSG_OUT+SMEXT ));
+ WRW_HARPOON((port+SYNC_MSGS+2), (MPM_OP+AMSG_OUT+0x02 ));
+ WRW_HARPOON((port+SYNC_MSGS+4), (MPM_OP+AMSG_OUT+SMWDTR));
+ WRW_HARPOON((port+SYNC_MSGS+6), (RAT_OP ));
+ WRW_HARPOON((port+SYNC_MSGS+8), (MPM_OP+AMSG_OUT+ SM16BIT));
+ WRW_HARPOON((port+SYNC_MSGS+10),(BRH_OP+ALWAYS+NP ));
+
+ WR_HARPOON(port+hp_autostart_3, (SELECT+SELCHK_STRT));
+
+
+ currTar_Info->TarStatus = ((currTar_Info->TarStatus &
+ ~(UCHAR)TAR_WIDE_MASK) | (UCHAR)WIDE_ENABLED);
+
+ return(TRUE);
+ }
+
+ else {
+
+ currTar_Info->TarStatus = ((currTar_Info->TarStatus &
+ ~(UCHAR)TAR_WIDE_MASK) | WIDE_NEGOCIATED);
+
+ currTar_Info->TarEEValue &= ~EE_WIDE_SCSI;
+ return(FALSE);
+ }
+}
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: stwidn
+ *
+ * Description: The has sent us a Wide Nego message so handle it as
+ * necessary.
+ *
+ *---------------------------------------------------------------------*/
+#if defined(DOS)
+void stwidn(USHORT port, UCHAR p_card)
+#else
+void stwidn(ULONG port, UCHAR p_card)
+#endif
+{
+ UCHAR width;
+ PSCCB currSCCB;
+ PSCCBMgr_tar_info currTar_Info;
+
+ currSCCB = BL_Card[p_card].currentSCCB;
+ currTar_Info = &sccbMgrTbl[p_card][currSCCB->TargID];
+
+ width = sfm(port,currSCCB);
+
+ if((width == 0x00) && (currSCCB->Sccb_scsimsg == SMPARITY))
+ {
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+ return;
+ }
+
+
+ if (!(currTar_Info->TarEEValue & EE_WIDE_SCSI))
+ width = 0;
+
+ if (width) {
+ currTar_Info->TarStatus |= WIDE_ENABLED;
+ width = 0;
+ }
+ else {
+ width = NARROW_SCSI;
+ currTar_Info->TarStatus &= ~WIDE_ENABLED;
+ }
+
+
+ sssyncv(port,currSCCB->TargID,width,currTar_Info);
+
+
+ if (currSCCB->Sccb_scsistat == SELECT_WN_ST)
+ {
+
+
+
+ currTar_Info->TarStatus |= WIDE_NEGOCIATED;
+
+ if (!((currTar_Info->TarStatus & TAR_SYNC_MASK) == SYNC_SUPPORTED))
+ {
+ ACCEPT_MSG_ATN(port);
+ ARAM_ACCESS(port);
+ sisyncn(port,p_card, TRUE);
+ currSCCB->Sccb_scsistat = SELECT_SN_ST;
+ SGRAM_ACCESS(port);
+ }
+ else
+ {
+ ACCEPT_MSG(port);
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+ }
+ }
+
+ else {
+
+
+ ACCEPT_MSG_ATN(port);
+
+ if (currTar_Info->TarEEValue & EE_WIDE_SCSI)
+ width = SM16BIT;
+ else
+ width = SM8BIT;
+
+ siwidr(port,width);
+
+ currTar_Info->TarStatus |= (WIDE_NEGOCIATED | WIDE_ENABLED);
+ }
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: siwidr
+ *
+ * Description: Answer the targets Wide nego message.
+ *
+ *---------------------------------------------------------------------*/
+#if defined(DOS)
+void siwidr(USHORT port, UCHAR width)
+#else
+void siwidr(ULONG port, UCHAR width)
+#endif
+{
+ ARAM_ACCESS(port);
+ WRW_HARPOON((port+SYNC_MSGS+0), (MPM_OP+AMSG_OUT+SMEXT ));
+ WRW_HARPOON((port+SYNC_MSGS+2), (MPM_OP+AMSG_OUT+0x02 ));
+ WRW_HARPOON((port+SYNC_MSGS+4), (MPM_OP+AMSG_OUT+SMWDTR));
+ WRW_HARPOON((port+SYNC_MSGS+6), (RAT_OP ));
+ WRW_HARPOON((port+SYNC_MSGS+8),(MPM_OP+AMSG_OUT+width));
+ WRW_HARPOON((port+SYNC_MSGS+10),(BRH_OP+ALWAYS+NP ));
+ SGRAM_ACCESS(port);
+
+ WR_HARPOON(port+hp_portctrl_0, SCSI_PORT);
+ WRW_HARPOON((port+hp_intstat), CLR_ALL_INT_1);
+
+ WR_HARPOON(port+hp_autostart_3, (AUTO_IMMED+CMD_ONLY_STRT));
+
+ while (!(RDW_HARPOON((port+hp_intstat)) & (BUS_FREE | AUTO_INT))) {}
+}
+
+#endif
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: sssyncv
+ *
+ * Description: Write the desired value to the Sync Register for the
+ * ID specified.
+ *
+ *---------------------------------------------------------------------*/
+#if defined(DOS)
+void sssyncv(USHORT p_port, UCHAR p_id, UCHAR p_sync_value,PSCCBMgr_tar_info currTar_Info)
+#else
+void sssyncv(ULONG p_port, UCHAR p_id, UCHAR p_sync_value,PSCCBMgr_tar_info currTar_Info)
+#endif
+{
+ UCHAR index;
+
+ index = p_id;
+
+ switch (index) {
+
+ case 0:
+ index = 12; /* hp_synctarg_0 */
+ break;
+ case 1:
+ index = 13; /* hp_synctarg_1 */
+ break;
+ case 2:
+ index = 14; /* hp_synctarg_2 */
+ break;
+ case 3:
+ index = 15; /* hp_synctarg_3 */
+ break;
+ case 4:
+ index = 8; /* hp_synctarg_4 */
+ break;
+ case 5:
+ index = 9; /* hp_synctarg_5 */
+ break;
+ case 6:
+ index = 10; /* hp_synctarg_6 */
+ break;
+ case 7:
+ index = 11; /* hp_synctarg_7 */
+ break;
+ case 8:
+ index = 4; /* hp_synctarg_8 */
+ break;
+ case 9:
+ index = 5; /* hp_synctarg_9 */
+ break;
+ case 10:
+ index = 6; /* hp_synctarg_10 */
+ break;
+ case 11:
+ index = 7; /* hp_synctarg_11 */
+ break;
+ case 12:
+ index = 0; /* hp_synctarg_12 */
+ break;
+ case 13:
+ index = 1; /* hp_synctarg_13 */
+ break;
+ case 14:
+ index = 2; /* hp_synctarg_14 */
+ break;
+ case 15:
+ index = 3; /* hp_synctarg_15 */
+
+ }
+
+ WR_HARPOON(p_port+hp_synctarg_base+index, p_sync_value);
+
+ currTar_Info->TarSyncCtrl = p_sync_value;
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: sresb
+ *
+ * Description: Reset the desired card's SCSI bus.
+ *
+ *---------------------------------------------------------------------*/
+#if defined(DOS)
+void sresb(USHORT port, UCHAR p_card)
+#else
+void sresb(ULONG port, UCHAR p_card)
+#endif
+{
+ UCHAR scsiID, i;
+
+ PSCCBMgr_tar_info currTar_Info;
+
+ WR_HARPOON(port+hp_page_ctrl,
+ (RD_HARPOON(port+hp_page_ctrl) | G_INT_DISABLE));
+ WRW_HARPOON((port+hp_intstat), CLR_ALL_INT);
+
+ WR_HARPOON(port+hp_scsictrl_0, SCSI_RST);
+
+ scsiID = RD_HARPOON(port+hp_seltimeout);
+ WR_HARPOON(port+hp_seltimeout,TO_5ms);
+ WRW_HARPOON((port+hp_intstat), TIMEOUT);
+
+ WR_HARPOON(port+hp_portctrl_0,(SCSI_PORT | START_TO));
+
+ while (!(RDW_HARPOON((port+hp_intstat)) & TIMEOUT)) {}
+
+ WR_HARPOON(port+hp_seltimeout,scsiID);
+
+ WR_HARPOON(port+hp_scsictrl_0, ENA_SCAM_SEL);
+
+ Wait(port, TO_5ms);
+
+ WRW_HARPOON((port+hp_intstat), CLR_ALL_INT);
+
+ WR_HARPOON(port+hp_int_mask, (RD_HARPOON(port+hp_int_mask) | 0x00));
+
+ for (scsiID = 0; scsiID < MAX_SCSI_TAR; scsiID++)
+ {
+ currTar_Info = &sccbMgrTbl[p_card][scsiID];
+
+ if (currTar_Info->TarEEValue & EE_SYNC_MASK)
+ {
+ currTar_Info->TarSyncCtrl = 0;
+ currTar_Info->TarStatus &= ~TAR_SYNC_MASK;
+ }
+
+ if (currTar_Info->TarEEValue & EE_WIDE_SCSI)
+ {
+ currTar_Info->TarStatus &= ~TAR_WIDE_MASK;
+ }
+
+ sssyncv(port, scsiID, NARROW_SCSI,currTar_Info);
+
+ SccbMgrTableInitTarget(p_card, scsiID);
+ }
+
+ BL_Card[p_card].scanIndex = 0x00;
+ BL_Card[p_card].currentSCCB = NULL;
+ BL_Card[p_card].globalFlags &= ~(F_TAG_STARTED | F_HOST_XFER_ACT
+ | F_NEW_SCCB_CMD);
+ BL_Card[p_card].cmdCounter = 0x00;
+ BL_Card[p_card].discQCount = 0x00;
+ BL_Card[p_card].tagQ_Lst = 0x01;
+
+ for(i = 0; i < QUEUE_DEPTH; i++)
+ BL_Card[p_card].discQ_Tbl[i] = NULL;
+
+ WR_HARPOON(port+hp_page_ctrl,
+ (RD_HARPOON(port+hp_page_ctrl) & ~G_INT_DISABLE));
+
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: ssenss
+ *
+ * Description: Setup for the Auto Sense command.
+ *
+ *---------------------------------------------------------------------*/
+void ssenss(PSCCBcard pCurrCard)
+{
+ UCHAR i;
+ PSCCB currSCCB;
+
+ currSCCB = pCurrCard->currentSCCB;
+
+
+ currSCCB->Save_CdbLen = currSCCB->CdbLength;
+
+ for (i = 0; i < 6; i++) {
+
+ currSCCB->Save_Cdb[i] = currSCCB->Cdb[i];
+ }
+
+ currSCCB->CdbLength = SIX_BYTE_CMD;
+ currSCCB->Cdb[0] = SCSI_REQUEST_SENSE;
+ currSCCB->Cdb[1] = currSCCB->Cdb[1] & (UCHAR)0xE0; /*Keep LUN. */
+ currSCCB->Cdb[2] = 0x00;
+ currSCCB->Cdb[3] = 0x00;
+ currSCCB->Cdb[4] = currSCCB->RequestSenseLength;
+ currSCCB->Cdb[5] = 0x00;
+
+ currSCCB->Sccb_XferCnt = (unsigned long)currSCCB->RequestSenseLength;
+
+ currSCCB->Sccb_ATC = 0x00;
+
+ currSCCB->Sccb_XferState |= F_AUTO_SENSE;
+
+ currSCCB->Sccb_XferState &= ~F_SG_XFER;
+
+ currSCCB->Sccb_idmsg = currSCCB->Sccb_idmsg & ~(UCHAR)DISC_PRIV;
+
+ currSCCB->ControlByte = 0x00;
+
+ currSCCB->Sccb_MGRFlags &= F_STATUSLOADED;
+}
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: sxfrp
+ *
+ * Description: Transfer data into the bit bucket until the device
+ * decides to switch phase.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void sxfrp(USHORT p_port, UCHAR p_card)
+#else
+void sxfrp(ULONG p_port, UCHAR p_card)
+#endif
+{
+ UCHAR curr_phz;
+
+
+ DISABLE_AUTO(p_port);
+
+ if (BL_Card[p_card].globalFlags & F_HOST_XFER_ACT) {
+
+ hostDataXferAbort(p_port,p_card,BL_Card[p_card].currentSCCB);
+
+ }
+
+ /* If the Automation handled the end of the transfer then do not
+ match the phase or we will get out of sync with the ISR. */
+
+ if (RDW_HARPOON((p_port+hp_intstat)) & (BUS_FREE | XFER_CNT_0 | AUTO_INT))
+ return;
+
+ WR_HARPOON(p_port+hp_xfercnt_0, 0x00);
+
+ curr_phz = RD_HARPOON(p_port+hp_scsisig) & (UCHAR)S_SCSI_PHZ;
+
+ WRW_HARPOON((p_port+hp_intstat), XFER_CNT_0);
+
+
+ WR_HARPOON(p_port+hp_scsisig, curr_phz);
+
+ while ( !(RDW_HARPOON((p_port+hp_intstat)) & (BUS_FREE | RESET)) &&
+ (curr_phz == (RD_HARPOON(p_port+hp_scsisig) & (UCHAR)S_SCSI_PHZ)) )
+ {
+ if (curr_phz & (UCHAR)SCSI_IOBIT)
+ {
+ WR_HARPOON(p_port+hp_portctrl_0, (SCSI_PORT | HOST_PORT | SCSI_INBIT));
+
+ if (!(RD_HARPOON(p_port+hp_xferstat) & FIFO_EMPTY))
+ {
+ RD_HARPOON(p_port+hp_fifodata_0);
+ }
+ }
+ else
+ {
+ WR_HARPOON(p_port+hp_portctrl_0, (SCSI_PORT | HOST_PORT | HOST_WRT));
+ if (RD_HARPOON(p_port+hp_xferstat) & FIFO_EMPTY)
+ {
+ WR_HARPOON(p_port+hp_fifodata_0,0xFA);
+ }
+ }
+ } /* End of While loop for padding data I/O phase */
+
+ while ( !(RDW_HARPOON((p_port+hp_intstat)) & (BUS_FREE | RESET)))
+ {
+ if (RD_HARPOON(p_port+hp_scsisig) & SCSI_REQ)
+ break;
+ }
+
+ WR_HARPOON(p_port+hp_portctrl_0, (SCSI_PORT | HOST_PORT | SCSI_INBIT));
+ while (!(RD_HARPOON(p_port+hp_xferstat) & FIFO_EMPTY))
+ {
+ RD_HARPOON(p_port+hp_fifodata_0);
+ }
+
+ if ( !(RDW_HARPOON((p_port+hp_intstat)) & (BUS_FREE | RESET)))
+ {
+ WR_HARPOON(p_port+hp_autostart_0, (AUTO_IMMED+DISCONNECT_START));
+ while (!(RDW_HARPOON((p_port+hp_intstat)) & AUTO_INT)) {}
+
+ if (RDW_HARPOON((p_port+hp_intstat)) & (ICMD_COMP | ITAR_DISC))
+ while (!(RDW_HARPOON((p_port+hp_intstat)) & (BUS_FREE | RSEL))) ;
+ }
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: schkdd
+ *
+ * Description: Make sure data has been flushed from both FIFOs and abort
+ * the operations if necessary.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void schkdd(USHORT port, UCHAR p_card)
+#else
+void schkdd(ULONG port, UCHAR p_card)
+#endif
+{
+ USHORT TimeOutLoop;
+ UCHAR sPhase;
+
+ PSCCB currSCCB;
+
+ currSCCB = BL_Card[p_card].currentSCCB;
+
+
+ if ((currSCCB->Sccb_scsistat != DATA_OUT_ST) &&
+ (currSCCB->Sccb_scsistat != DATA_IN_ST)) {
+ return;
+ }
+
+
+
+ if (currSCCB->Sccb_XferState & F_ODD_BALL_CNT)
+ {
+
+ currSCCB->Sccb_ATC += (currSCCB->Sccb_XferCnt-1);
+
+ currSCCB->Sccb_XferCnt = 1;
+
+ currSCCB->Sccb_XferState &= ~F_ODD_BALL_CNT;
+ WRW_HARPOON((port+hp_fiforead), (USHORT) 0x00);
+ WR_HARPOON(port+hp_xferstat, 0x00);
+ }
+
+ else
+ {
+
+ currSCCB->Sccb_ATC += currSCCB->Sccb_XferCnt;
+
+ currSCCB->Sccb_XferCnt = 0;
+ }
+
+ if ((RDW_HARPOON((port+hp_intstat)) & PARITY) &&
+ (currSCCB->HostStatus == SCCB_COMPLETE)) {
+
+ currSCCB->HostStatus = SCCB_PARITY_ERR;
+ WRW_HARPOON((port+hp_intstat), PARITY);
+ }
+
+
+ hostDataXferAbort(port,p_card,currSCCB);
+
+
+ while (RD_HARPOON(port+hp_scsisig) & SCSI_ACK) {}
+
+ TimeOutLoop = 0;
+
+ while(RD_HARPOON(port+hp_xferstat) & FIFO_EMPTY)
+ {
+ if (RDW_HARPOON((port+hp_intstat)) & BUS_FREE) {
+ return;
+ }
+ if (RD_HARPOON(port+hp_offsetctr) & (UCHAR)0x1F) {
+ break;
+ }
+ if (RDW_HARPOON((port+hp_intstat)) & RESET) {
+ return;
+ }
+ if ((RD_HARPOON(port+hp_scsisig) & SCSI_REQ) || (TimeOutLoop++>0x3000) )
+ break;
+ }
+
+ sPhase = RD_HARPOON(port+hp_scsisig) & (SCSI_BSY | S_SCSI_PHZ);
+ if ((!(RD_HARPOON(port+hp_xferstat) & FIFO_EMPTY)) ||
+ (RD_HARPOON(port+hp_offsetctr) & (UCHAR)0x1F) ||
+ (sPhase == (SCSI_BSY | S_DATAO_PH)) ||
+ (sPhase == (SCSI_BSY | S_DATAI_PH)))
+ {
+
+ WR_HARPOON(port+hp_portctrl_0, SCSI_PORT);
+
+ if (!(currSCCB->Sccb_XferState & F_ALL_XFERRED))
+ {
+ if (currSCCB->Sccb_XferState & F_HOST_XFER_DIR) {
+ phaseDataIn(port,p_card);
+ }
+
+ else {
+ phaseDataOut(port,p_card);
+ }
+ }
+ else
+ {
+ sxfrp(port,p_card);
+ if (!(RDW_HARPOON((port+hp_intstat)) &
+ (BUS_FREE | ICMD_COMP | ITAR_DISC | RESET)))
+ {
+ WRW_HARPOON((port+hp_intstat), AUTO_INT);
+ phaseDecode(port,p_card);
+ }
+ }
+
+ }
+
+ else {
+ WR_HARPOON(port+hp_portctrl_0, 0x00);
+ }
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: sinits
+ *
+ * Description: Setup SCCB manager fields in this SCCB.
+ *
+ *---------------------------------------------------------------------*/
+
+void sinits(PSCCB p_sccb, UCHAR p_card)
+{
+ PSCCBMgr_tar_info currTar_Info;
+
+ if((p_sccb->TargID > MAX_SCSI_TAR) || (p_sccb->Lun > MAX_LUN))
+ {
+ return;
+ }
+ currTar_Info = &sccbMgrTbl[p_card][p_sccb->TargID];
+
+ p_sccb->Sccb_XferState = 0x00;
+ p_sccb->Sccb_XferCnt = p_sccb->DataLength;
+
+ if ((p_sccb->OperationCode == SCATTER_GATHER_COMMAND) ||
+ (p_sccb->OperationCode == RESIDUAL_SG_COMMAND)) {
+
+ p_sccb->Sccb_SGoffset = 0;
+ p_sccb->Sccb_XferState = F_SG_XFER;
+ p_sccb->Sccb_XferCnt = 0x00;
+ }
+
+ if (p_sccb->DataLength == 0x00)
+
+ p_sccb->Sccb_XferState |= F_ALL_XFERRED;
+
+ if (p_sccb->ControlByte & F_USE_CMD_Q)
+ {
+ if ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) == TAG_Q_REJECT)
+ p_sccb->ControlByte &= ~F_USE_CMD_Q;
+
+ else
+ currTar_Info->TarStatus |= TAG_Q_TRYING;
+ }
+
+/* For !single SCSI device in system & device allow Disconnect
+ or command is tag_q type then send Cmd with Disconnect Enable
+ else send Cmd with Disconnect Disable */
+
+/*
+ if (((!(BL_Card[p_card].globalFlags & F_SINGLE_DEVICE)) &&
+ (currTar_Info->TarStatus & TAR_ALLOW_DISC)) ||
+ (currTar_Info->TarStatus & TAG_Q_TRYING)) {
+*/
+ if ((currTar_Info->TarStatus & TAR_ALLOW_DISC) ||
+ (currTar_Info->TarStatus & TAG_Q_TRYING)) {
+ p_sccb->Sccb_idmsg = (UCHAR)(SMIDENT | DISC_PRIV) | p_sccb->Lun;
+ }
+
+ else {
+
+ p_sccb->Sccb_idmsg = (UCHAR)SMIDENT | p_sccb->Lun;
+ }
+
+ p_sccb->HostStatus = 0x00;
+ p_sccb->TargetStatus = 0x00;
+ p_sccb->Sccb_tag = 0x00;
+ p_sccb->Sccb_MGRFlags = 0x00;
+ p_sccb->Sccb_sgseg = 0x00;
+ p_sccb->Sccb_ATC = 0x00;
+ p_sccb->Sccb_savedATC = 0x00;
+/*
+ p_sccb->SccbVirtDataPtr = 0x00;
+ p_sccb->Sccb_forwardlink = NULL;
+ p_sccb->Sccb_backlink = NULL;
+ */
+ p_sccb->Sccb_scsistat = BUS_FREE_ST;
+ p_sccb->SccbStatus = SCCB_IN_PROCESS;
+ p_sccb->Sccb_scsimsg = SMNO_OP;
+
+}
+
+
+/*----------------------------------------------------------------------
+ *
+ *
+ * Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+ *
+ * This file is available under both the GNU General Public License
+ * and a BSD-style copyright; see LICENSE.FlashPoint for details.
+ *
+ * $Workfile: phase.c $
+ *
+ * Description: Functions to intially handle the SCSI bus phase when
+ * the target asserts request (and the automation is not
+ * enabled to handle the situation).
+ *
+ * $Date: 1999/04/26 05:53:56 $
+ *
+ * $Revision: 1.1 $
+ *
+ *----------------------------------------------------------------------*/
+
+/*#include <globals.h>*/
+
+#if (FW_TYPE==_UCB_MGR_)
+ /*#include <budi.h>*/
+#endif
+
+/*#include <sccbmgr.h>*/
+/*#include <blx30.h>*/
+/*#include <target.h>*/
+/*#include <scsi2.h>*/
+/*#include <harpoon.h>*/
+
+
+/*
+extern SCCBCARD BL_Card[MAX_CARDS];
+extern SCCBMGR_TAR_INFO sccbMgrTbl[MAX_CARDS][MAX_SCSI_TAR];
+
+#if defined(OS2)
+ extern void (far *s_PhaseTbl[8]) (ULONG, UCHAR);
+#else
+ #if defined(DOS)
+ extern void (*s_PhaseTbl[8]) (USHORT, UCHAR);
+ #else
+ extern void (*s_PhaseTbl[8]) (ULONG, UCHAR);
+ #endif
+#endif
+*/
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Phase Decode
+ *
+ * Description: Determine the phase and call the appropriate function.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void phaseDecode(USHORT p_port, UCHAR p_card)
+#else
+void phaseDecode(ULONG p_port, UCHAR p_card)
+#endif
+{
+ unsigned char phase_ref;
+#if defined(OS2)
+ void (far *phase) (ULONG, UCHAR);
+#else
+ #if defined(DOS)
+ void (*phase) (USHORT, UCHAR);
+ #else
+ void (*phase) (ULONG, UCHAR);
+ #endif
+#endif
+
+
+ DISABLE_AUTO(p_port);
+
+ phase_ref = (UCHAR) (RD_HARPOON(p_port+hp_scsisig) & S_SCSI_PHZ);
+
+ phase = s_PhaseTbl[phase_ref];
+
+ (*phase)(p_port, p_card); /* Call the correct phase func */
+}
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Data Out Phase
+ *
+ * Description: Start up both the BusMaster and Xbow.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(OS2)
+void far phaseDataOut(ULONG port, UCHAR p_card)
+#else
+#if defined(DOS)
+void phaseDataOut(USHORT port, UCHAR p_card)
+#else
+void phaseDataOut(ULONG port, UCHAR p_card)
+#endif
+#endif
+{
+
+ PSCCB currSCCB;
+
+ currSCCB = BL_Card[p_card].currentSCCB;
+ if (currSCCB == NULL)
+ {
+ return; /* Exit if No SCCB record */
+ }
+
+ currSCCB->Sccb_scsistat = DATA_OUT_ST;
+ currSCCB->Sccb_XferState &= ~(F_HOST_XFER_DIR | F_NO_DATA_YET);
+
+ WR_HARPOON(port+hp_portctrl_0, SCSI_PORT);
+
+ WRW_HARPOON((port+hp_intstat), XFER_CNT_0);
+
+ WR_HARPOON(port+hp_autostart_0, (END_DATA+END_DATA_START));
+
+ dataXferProcessor(port, &BL_Card[p_card]);
+
+#if defined(NOBUGBUG)
+ if (RDW_HARPOON((port+hp_intstat)) & XFER_CNT_0)
+ WRW_HARPOON((port+hp_intstat), XFER_CNT_0);
+
+#endif
+
+
+ if (currSCCB->Sccb_XferCnt == 0) {
+
+
+ if ((currSCCB->ControlByte & SCCB_DATA_XFER_OUT) &&
+ (currSCCB->HostStatus == SCCB_COMPLETE))
+ currSCCB->HostStatus = SCCB_DATA_OVER_RUN;
+
+ sxfrp(port,p_card);
+ if (!(RDW_HARPOON((port+hp_intstat)) & (BUS_FREE | RESET)))
+ phaseDecode(port,p_card);
+ }
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Data In Phase
+ *
+ * Description: Startup the BusMaster and the XBOW.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(OS2)
+void far phaseDataIn(ULONG port, UCHAR p_card)
+#else
+#if defined(DOS)
+void phaseDataIn(USHORT port, UCHAR p_card)
+#else
+void phaseDataIn(ULONG port, UCHAR p_card)
+#endif
+#endif
+{
+
+ PSCCB currSCCB;
+
+ currSCCB = BL_Card[p_card].currentSCCB;
+
+ if (currSCCB == NULL)
+ {
+ return; /* Exit if No SCCB record */
+ }
+
+
+ currSCCB->Sccb_scsistat = DATA_IN_ST;
+ currSCCB->Sccb_XferState |= F_HOST_XFER_DIR;
+ currSCCB->Sccb_XferState &= ~F_NO_DATA_YET;
+
+ WR_HARPOON(port+hp_portctrl_0, SCSI_PORT);
+
+ WRW_HARPOON((port+hp_intstat), XFER_CNT_0);
+
+ WR_HARPOON(port+hp_autostart_0, (END_DATA+END_DATA_START));
+
+ dataXferProcessor(port, &BL_Card[p_card]);
+
+ if (currSCCB->Sccb_XferCnt == 0) {
+
+
+ if ((currSCCB->ControlByte & SCCB_DATA_XFER_IN) &&
+ (currSCCB->HostStatus == SCCB_COMPLETE))
+ currSCCB->HostStatus = SCCB_DATA_OVER_RUN;
+
+ sxfrp(port,p_card);
+ if (!(RDW_HARPOON((port+hp_intstat)) & (BUS_FREE | RESET)))
+ phaseDecode(port,p_card);
+
+ }
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Command Phase
+ *
+ * Description: Load the CDB into the automation and start it up.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(OS2)
+void far phaseCommand(ULONG p_port, UCHAR p_card)
+#else
+#if defined(DOS)
+void phaseCommand(USHORT p_port, UCHAR p_card)
+#else
+void phaseCommand(ULONG p_port, UCHAR p_card)
+#endif
+#endif
+{
+ PSCCB currSCCB;
+#if defined(DOS)
+ USHORT cdb_reg;
+#else
+ ULONG cdb_reg;
+#endif
+ UCHAR i;
+
+ currSCCB = BL_Card[p_card].currentSCCB;
+
+ if (currSCCB->OperationCode == RESET_COMMAND) {
+
+ currSCCB->HostStatus = SCCB_PHASE_SEQUENCE_FAIL;
+ currSCCB->CdbLength = SIX_BYTE_CMD;
+ }
+
+ WR_HARPOON(p_port+hp_scsisig, 0x00);
+
+ ARAM_ACCESS(p_port);
+
+
+ cdb_reg = p_port + CMD_STRT;
+
+ for (i=0; i < currSCCB->CdbLength; i++) {
+
+ if (currSCCB->OperationCode == RESET_COMMAND)
+
+ WRW_HARPOON(cdb_reg, (MPM_OP + ACOMMAND + 0x00));
+
+ else
+ WRW_HARPOON(cdb_reg, (MPM_OP + ACOMMAND + currSCCB->Cdb[i]));
+ cdb_reg +=2;
+ }
+
+ if (currSCCB->CdbLength != TWELVE_BYTE_CMD)
+ WRW_HARPOON(cdb_reg, (BRH_OP+ALWAYS+ NP));
+
+ WR_HARPOON(p_port+hp_portctrl_0,(SCSI_PORT));
+
+ currSCCB->Sccb_scsistat = COMMAND_ST;
+
+ WR_HARPOON(p_port+hp_autostart_3, (AUTO_IMMED | CMD_ONLY_STRT));
+ SGRAM_ACCESS(p_port);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Status phase
+ *
+ * Description: Bring in the status and command complete message bytes
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(OS2)
+void far phaseStatus(ULONG port, UCHAR p_card)
+#else
+#if defined(DOS)
+void phaseStatus(USHORT port, UCHAR p_card)
+#else
+void phaseStatus(ULONG port, UCHAR p_card)
+#endif
+#endif
+{
+ /* Start-up the automation to finish off this command and let the
+ isr handle the interrupt for command complete when it comes in.
+ We could wait here for the interrupt to be generated?
+ */
+
+ WR_HARPOON(port+hp_scsisig, 0x00);
+
+ WR_HARPOON(port+hp_autostart_0, (AUTO_IMMED+END_DATA_START));
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Phase Message Out
+ *
+ * Description: Send out our message (if we have one) and handle whatever
+ * else is involed.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(OS2)
+void far phaseMsgOut(ULONG port, UCHAR p_card)
+#else
+#if defined(DOS)
+void phaseMsgOut(USHORT port, UCHAR p_card)
+#else
+void phaseMsgOut(ULONG port, UCHAR p_card)
+#endif
+#endif
+{
+ UCHAR message,scsiID;
+ PSCCB currSCCB;
+ PSCCBMgr_tar_info currTar_Info;
+
+ currSCCB = BL_Card[p_card].currentSCCB;
+
+ if (currSCCB != NULL) {
+
+ message = currSCCB->Sccb_scsimsg;
+ scsiID = currSCCB->TargID;
+
+ if (message == SMDEV_RESET)
+ {
+
+
+ currTar_Info = &sccbMgrTbl[p_card][scsiID];
+ currTar_Info->TarSyncCtrl = 0;
+ sssyncv(port, scsiID, NARROW_SCSI,currTar_Info);
+
+ if (sccbMgrTbl[p_card][scsiID].TarEEValue & EE_SYNC_MASK)
+ {
+
+ sccbMgrTbl[p_card][scsiID].TarStatus &= ~TAR_SYNC_MASK;
+
+ }
+
+ if (sccbMgrTbl[p_card][scsiID].TarEEValue & EE_WIDE_SCSI)
+ {
+
+ sccbMgrTbl[p_card][scsiID].TarStatus &= ~TAR_WIDE_MASK;
+ }
+
+
+ queueFlushSccb(p_card,SCCB_COMPLETE);
+ SccbMgrTableInitTarget(p_card,scsiID);
+ }
+ else if (currSCCB->Sccb_scsistat == ABORT_ST)
+ {
+ currSCCB->HostStatus = SCCB_COMPLETE;
+ if(BL_Card[p_card].discQ_Tbl[currSCCB->Sccb_tag] != NULL)
+ {
+ BL_Card[p_card].discQ_Tbl[currSCCB->Sccb_tag] = NULL;
+ sccbMgrTbl[p_card][scsiID].TarTagQ_Cnt--;
+ }
+
+ }
+
+ else if (currSCCB->Sccb_scsistat < COMMAND_ST)
+ {
+
+
+ if(message == SMNO_OP)
+ {
+ currSCCB->Sccb_MGRFlags |= F_DEV_SELECTED;
+
+ ssel(port,p_card);
+ return;
+ }
+ }
+ else
+ {
+
+
+ if (message == SMABORT)
+
+ queueFlushSccb(p_card,SCCB_COMPLETE);
+ }
+
+ }
+ else
+ {
+ message = SMABORT;
+ }
+
+ WRW_HARPOON((port+hp_intstat), (BUS_FREE | PHASE | XFER_CNT_0));
+
+
+ WR_HARPOON(port+hp_portctrl_0, SCSI_BUS_EN);
+
+ WR_HARPOON(port+hp_scsidata_0,message);
+
+ WR_HARPOON(port+hp_scsisig, (SCSI_ACK + S_ILL_PH));
+
+ ACCEPT_MSG(port);
+
+ WR_HARPOON(port+hp_portctrl_0, 0x00);
+
+ if ((message == SMABORT) || (message == SMDEV_RESET) ||
+ (message == SMABORT_TAG) )
+ {
+
+ while(!(RDW_HARPOON((port+hp_intstat)) & (BUS_FREE | PHASE))) {}
+
+ if (RDW_HARPOON((port+hp_intstat)) & BUS_FREE)
+ {
+ WRW_HARPOON((port+hp_intstat), BUS_FREE);
+
+ if (currSCCB != NULL)
+ {
+
+ if((BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
+ ((sccbMgrTbl[p_card][currSCCB->TargID].TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[currSCCB->Lun] = FALSE;
+ else
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[0] = FALSE;
+
+ queueCmdComplete(&BL_Card[p_card],currSCCB, p_card);
+ }
+
+ else
+ {
+ BL_Card[p_card].globalFlags |= F_NEW_SCCB_CMD;
+ }
+ }
+
+ else
+ {
+
+ sxfrp(port,p_card);
+ }
+ }
+
+ else
+ {
+
+ if(message == SMPARITY)
+ {
+ currSCCB->Sccb_scsimsg = SMNO_OP;
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+ }
+ else
+ {
+ sxfrp(port,p_card);
+ }
+ }
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Message In phase
+ *
+ * Description: Bring in the message and determine what to do with it.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(OS2)
+void far phaseMsgIn(ULONG port, UCHAR p_card)
+#else
+#if defined(DOS)
+void phaseMsgIn(USHORT port, UCHAR p_card)
+#else
+void phaseMsgIn(ULONG port, UCHAR p_card)
+#endif
+#endif
+{
+ UCHAR message;
+ PSCCB currSCCB;
+
+ currSCCB = BL_Card[p_card].currentSCCB;
+
+ if (BL_Card[p_card].globalFlags & F_HOST_XFER_ACT)
+ {
+
+ phaseChkFifo(port, p_card);
+ }
+
+ message = RD_HARPOON(port+hp_scsidata_0);
+ if ((message == SMDISC) || (message == SMSAVE_DATA_PTR))
+ {
+
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+END_DATA_START));
+
+ }
+
+ else
+ {
+
+ message = sfm(port,currSCCB);
+ if (message)
+ {
+
+
+ sdecm(message,port,p_card);
+
+ }
+ else
+ {
+ if(currSCCB->Sccb_scsimsg != SMPARITY)
+ ACCEPT_MSG(port);
+ WR_HARPOON(port+hp_autostart_1, (AUTO_IMMED+DISCONNECT_START));
+ }
+ }
+
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Illegal phase
+ *
+ * Description: Target switched to some illegal phase, so all we can do
+ * is report an error back to the host (if that is possible)
+ * and send an ABORT message to the misbehaving target.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(OS2)
+void far phaseIllegal(ULONG port, UCHAR p_card)
+#else
+#if defined(DOS)
+void phaseIllegal(USHORT port, UCHAR p_card)
+#else
+void phaseIllegal(ULONG port, UCHAR p_card)
+#endif
+#endif
+{
+ PSCCB currSCCB;
+
+ currSCCB = BL_Card[p_card].currentSCCB;
+
+ WR_HARPOON(port+hp_scsisig, RD_HARPOON(port+hp_scsisig));
+ if (currSCCB != NULL) {
+
+ currSCCB->HostStatus = SCCB_PHASE_SEQUENCE_FAIL;
+ currSCCB->Sccb_scsistat = ABORT_ST;
+ currSCCB->Sccb_scsimsg = SMABORT;
+ }
+
+ ACCEPT_MSG_ATN(port);
+}
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Phase Check FIFO
+ *
+ * Description: Make sure data has been flushed from both FIFOs and abort
+ * the operations if necessary.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void phaseChkFifo(USHORT port, UCHAR p_card)
+#else
+void phaseChkFifo(ULONG port, UCHAR p_card)
+#endif
+{
+ ULONG xfercnt;
+ PSCCB currSCCB;
+
+ currSCCB = BL_Card[p_card].currentSCCB;
+
+ if (currSCCB->Sccb_scsistat == DATA_IN_ST)
+ {
+
+ while((!(RD_HARPOON(port+hp_xferstat) & FIFO_EMPTY)) &&
+ (RD_HARPOON(port+hp_ext_status) & BM_CMD_BUSY)) {}
+
+
+ if (!(RD_HARPOON(port+hp_xferstat) & FIFO_EMPTY))
+ {
+ currSCCB->Sccb_ATC += currSCCB->Sccb_XferCnt;
+
+ currSCCB->Sccb_XferCnt = 0;
+
+ if ((RDW_HARPOON((port+hp_intstat)) & PARITY) &&
+ (currSCCB->HostStatus == SCCB_COMPLETE))
+ {
+ currSCCB->HostStatus = SCCB_PARITY_ERR;
+ WRW_HARPOON((port+hp_intstat), PARITY);
+ }
+
+ hostDataXferAbort(port,p_card,currSCCB);
+
+ dataXferProcessor(port, &BL_Card[p_card]);
+
+ while((!(RD_HARPOON(port+hp_xferstat) & FIFO_EMPTY)) &&
+ (RD_HARPOON(port+hp_ext_status) & BM_CMD_BUSY)) {}
+
+ }
+ } /*End Data In specific code. */
+
+
+
+#if defined(DOS)
+ asm { mov dx,port;
+ add dx,hp_xfercnt_2;
+ in al,dx;
+ dec dx;
+ xor ah,ah;
+ mov word ptr xfercnt+2,ax;
+ in al,dx;
+ dec dx;
+ mov ah,al;
+ in al,dx;
+ mov word ptr xfercnt,ax;
+ }
+#else
+ GET_XFER_CNT(port,xfercnt);
+#endif
+
+
+ WR_HARPOON(port+hp_xfercnt_0, 0x00);
+
+
+ WR_HARPOON(port+hp_portctrl_0, 0x00);
+
+ currSCCB->Sccb_ATC += (currSCCB->Sccb_XferCnt - xfercnt);
+
+ currSCCB->Sccb_XferCnt = xfercnt;
+
+ if ((RDW_HARPOON((port+hp_intstat)) & PARITY) &&
+ (currSCCB->HostStatus == SCCB_COMPLETE)) {
+
+ currSCCB->HostStatus = SCCB_PARITY_ERR;
+ WRW_HARPOON((port+hp_intstat), PARITY);
+ }
+
+
+ hostDataXferAbort(port,p_card,currSCCB);
+
+
+ WR_HARPOON(port+hp_fifowrite, 0x00);
+ WR_HARPOON(port+hp_fiforead, 0x00);
+ WR_HARPOON(port+hp_xferstat, 0x00);
+
+ WRW_HARPOON((port+hp_intstat), XFER_CNT_0);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Phase Bus Free
+ *
+ * Description: We just went bus free so figure out if it was
+ * because of command complete or from a disconnect.
+ *
+ *---------------------------------------------------------------------*/
+#if defined(DOS)
+void phaseBusFree(USHORT port, UCHAR p_card)
+#else
+void phaseBusFree(ULONG port, UCHAR p_card)
+#endif
+{
+ PSCCB currSCCB;
+
+ currSCCB = BL_Card[p_card].currentSCCB;
+
+ if (currSCCB != NULL)
+ {
+
+ DISABLE_AUTO(port);
+
+
+ if (currSCCB->OperationCode == RESET_COMMAND)
+ {
+
+ if((BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
+ ((sccbMgrTbl[p_card][currSCCB->TargID].TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[currSCCB->Lun] = FALSE;
+ else
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[0] = FALSE;
+
+ queueCmdComplete(&BL_Card[p_card], currSCCB, p_card);
+
+ queueSearchSelect(&BL_Card[p_card],p_card);
+
+ }
+
+ else if(currSCCB->Sccb_scsistat == SELECT_SN_ST)
+ {
+ sccbMgrTbl[p_card][currSCCB->TargID].TarStatus |=
+ (UCHAR)SYNC_SUPPORTED;
+ sccbMgrTbl[p_card][currSCCB->TargID].TarEEValue &= ~EE_SYNC_MASK;
+ }
+
+ else if(currSCCB->Sccb_scsistat == SELECT_WN_ST)
+ {
+ sccbMgrTbl[p_card][currSCCB->TargID].TarStatus =
+ (sccbMgrTbl[p_card][currSCCB->TargID].
+ TarStatus & ~WIDE_ENABLED) | WIDE_NEGOCIATED;
+
+ sccbMgrTbl[p_card][currSCCB->TargID].TarEEValue &= ~EE_WIDE_SCSI;
+ }
+
+#if !defined(DOS)
+ else if(currSCCB->Sccb_scsistat == SELECT_Q_ST)
+ {
+ /* Make sure this is not a phony BUS_FREE. If we were
+ reselected or if BUSY is NOT on then this is a
+ valid BUS FREE. SRR Wednesday, 5/10/1995. */
+
+ if ((!(RD_HARPOON(port+hp_scsisig) & SCSI_BSY)) ||
+ (RDW_HARPOON((port+hp_intstat)) & RSEL))
+ {
+ sccbMgrTbl[p_card][currSCCB->TargID].TarStatus &= ~TAR_TAG_Q_MASK;
+ sccbMgrTbl[p_card][currSCCB->TargID].TarStatus |= TAG_Q_REJECT;
+ }
+
+ else
+ {
+ return;
+ }
+ }
+#endif
+
+ else
+ {
+
+ currSCCB->Sccb_scsistat = BUS_FREE_ST;
+
+ if (!currSCCB->HostStatus)
+ {
+ currSCCB->HostStatus = SCCB_PHASE_SEQUENCE_FAIL;
+ }
+
+ if((BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
+ ((sccbMgrTbl[p_card][currSCCB->TargID].TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[currSCCB->Lun] = FALSE;
+ else
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[0] = FALSE;
+
+ queueCmdComplete(&BL_Card[p_card], currSCCB, p_card);
+ return;
+ }
+
+
+ BL_Card[p_card].globalFlags |= F_NEW_SCCB_CMD;
+
+ } /*end if !=null */
+}
+
+
+
+
+/*----------------------------------------------------------------------
+ *
+ *
+ * Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+ *
+ * This file is available under both the GNU General Public License
+ * and a BSD-style copyright; see LICENSE.FlashPoint for details.
+ *
+ * $Workfile: automate.c $
+ *
+ * Description: Functions relating to programming the automation of
+ * the HARPOON.
+ *
+ * $Date: 1999/04/26 05:53:56 $
+ *
+ * $Revision: 1.1 $
+ *
+ *----------------------------------------------------------------------*/
+
+/*#include <globals.h>*/
+
+#if (FW_TYPE==_UCB_MGR_)
+ /*#include <budi.h>*/
+#endif
+
+/*#include <sccbmgr.h>*/
+/*#include <blx30.h>*/
+/*#include <target.h>*/
+/*#include <scsi2.h>*/
+/*#include <harpoon.h>*/
+
+/*
+extern SCCBCARD BL_Card[MAX_CARDS];
+extern SCCBMGR_TAR_INFO sccbMgrTbl[MAX_CARDS][MAX_SCSI_TAR];
+extern SCCBCARD BL_Card[MAX_CARDS];
+*/
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Auto Load Default Map
+ *
+ * Description: Load the Automation RAM with the defualt map values.
+ *
+ *---------------------------------------------------------------------*/
+#if defined(DOS)
+void autoLoadDefaultMap(USHORT p_port)
+#else
+void autoLoadDefaultMap(ULONG p_port)
+#endif
+{
+#if defined(DOS)
+ USHORT map_addr;
+#else
+ ULONG map_addr;
+#endif
+
+ ARAM_ACCESS(p_port);
+ map_addr = p_port + hp_aramBase;
+
+ WRW_HARPOON(map_addr, (MPM_OP+AMSG_OUT+ 0xC0)); /*ID MESSAGE */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MPM_OP+AMSG_OUT+ 0x20)); /*SIMPLE TAG QUEUEING MSG */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, RAT_OP); /*RESET ATTENTION */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MPM_OP+AMSG_OUT+ 0x00)); /*TAG ID MSG */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MPM_OP+ACOMMAND+ 0x00)); /*CDB BYTE 0 */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MPM_OP+ACOMMAND+ 0x00)); /*CDB BYTE 1 */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MPM_OP+ACOMMAND+ 0x00)); /*CDB BYTE 2 */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MPM_OP+ACOMMAND+ 0x00)); /*CDB BYTE 3 */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MPM_OP+ACOMMAND+ 0x00)); /*CDB BYTE 4 */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MPM_OP+ACOMMAND+ 0x00)); /*CDB BYTE 5 */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MPM_OP+ACOMMAND+ 0x00)); /*CDB BYTE 6 */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MPM_OP+ACOMMAND+ 0x00)); /*CDB BYTE 7 */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MPM_OP+ACOMMAND+ 0x00)); /*CDB BYTE 8 */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MPM_OP+ACOMMAND+ 0x00)); /*CDB BYTE 9 */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MPM_OP+ACOMMAND+ 0x00)); /*CDB BYTE 10 */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MPM_OP+ACOMMAND+ 0x00)); /*CDB BYTE 11 */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (CPE_OP+ADATA_OUT+ DINT)); /*JUMP IF DATA OUT */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (TCB_OP+FIFO_0+ DI)); /*JUMP IF NO DATA IN FIFO */
+ map_addr +=2; /*This means AYNC DATA IN */
+ WRW_HARPOON(map_addr, (SSI_OP+ SSI_IDO_STRT)); /*STOP AND INTERRUPT */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (CPE_OP+ADATA_IN+DINT)); /*JUMP IF NOT DATA IN PHZ */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (CPN_OP+AMSG_IN+ ST)); /*IF NOT MSG IN CHECK 4 DATA IN */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (CRD_OP+SDATA+ 0x02)); /*SAVE DATA PTR MSG? */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (BRH_OP+NOT_EQ+ DC)); /*GO CHECK FOR DISCONNECT MSG */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MRR_OP+SDATA+ D_AR1)); /*SAVE DATA PTRS MSG */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (CPN_OP+AMSG_IN+ ST)); /*IF NOT MSG IN CHECK DATA IN */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (CRD_OP+SDATA+ 0x04)); /*DISCONNECT MSG? */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (BRH_OP+NOT_EQ+ UNKNWN));/*UKNKNOWN MSG */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MRR_OP+SDATA+ D_BUCKET));/*XFER DISCONNECT MSG */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (SSI_OP+ SSI_ITAR_DISC));/*STOP AND INTERRUPT */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (CPN_OP+ASTATUS+ UNKNWN));/*JUMP IF NOT STATUS PHZ. */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MRR_OP+SDATA+ D_AR0)); /*GET STATUS BYTE */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (CPN_OP+AMSG_IN+ CC)); /*ERROR IF NOT MSG IN PHZ */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (CRD_OP+SDATA+ 0x00)); /*CHECK FOR CMD COMPLETE MSG. */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (BRH_OP+NOT_EQ+ CC)); /*ERROR IF NOT CMD COMPLETE MSG. */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (MRR_OP+SDATA+ D_BUCKET));/*GET CMD COMPLETE MSG */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (SSI_OP+ SSI_ICMD_COMP));/*END OF COMMAND */
+ map_addr +=2;
+
+ WRW_HARPOON(map_addr, (SSI_OP+ SSI_IUNKWN)); /*RECEIVED UNKNOWN MSG BYTE */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (SSI_OP+ SSI_INO_CC)); /*NO COMMAND COMPLETE AFTER STATUS */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (SSI_OP+ SSI_ITICKLE)); /*BIOS Tickled the Mgr */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (SSI_OP+ SSI_IRFAIL)); /*EXPECTED ID/TAG MESSAGES AND */
+ map_addr +=2; /* DIDN'T GET ONE */
+ WRW_HARPOON(map_addr, (CRR_OP+AR3+ S_IDREG)); /* comp SCSI SEL ID & AR3*/
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (BRH_OP+EQUAL+ 0x00)); /*SEL ID OK then Conti. */
+ map_addr +=2;
+ WRW_HARPOON(map_addr, (SSI_OP+ SSI_INO_CC)); /*NO COMMAND COMPLETE AFTER STATUS */
+
+
+
+ SGRAM_ACCESS(p_port);
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Auto Command Complete
+ *
+ * Description: Post command back to host and find another command
+ * to execute.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void autoCmdCmplt(USHORT p_port, UCHAR p_card)
+#else
+void autoCmdCmplt(ULONG p_port, UCHAR p_card)
+#endif
+{
+ PSCCB currSCCB;
+ UCHAR status_byte;
+
+ currSCCB = BL_Card[p_card].currentSCCB;
+
+ status_byte = RD_HARPOON(p_port+hp_gp_reg_0);
+
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUN_CA = FALSE;
+
+ if (status_byte != SSGOOD) {
+
+ if (status_byte == SSQ_FULL) {
+
+
+ if(((BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
+ ((sccbMgrTbl[p_card][currSCCB->TargID].TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)))
+ {
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[currSCCB->Lun] = TRUE;
+ if(BL_Card[p_card].discQCount != 0)
+ BL_Card[p_card].discQCount--;
+ BL_Card[p_card].discQ_Tbl[sccbMgrTbl[p_card][currSCCB->TargID].LunDiscQ_Idx[currSCCB->Lun]] = NULL;
+ }
+ else
+ {
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[0] = TRUE;
+ if(currSCCB->Sccb_tag)
+ {
+ if(BL_Card[p_card].discQCount != 0)
+ BL_Card[p_card].discQCount--;
+ BL_Card[p_card].discQ_Tbl[currSCCB->Sccb_tag] = NULL;
+ }else
+ {
+ if(BL_Card[p_card].discQCount != 0)
+ BL_Card[p_card].discQCount--;
+ BL_Card[p_card].discQ_Tbl[sccbMgrTbl[p_card][currSCCB->TargID].LunDiscQ_Idx[0]] = NULL;
+ }
+ }
+
+ currSCCB->Sccb_MGRFlags |= F_STATUSLOADED;
+
+ queueSelectFail(&BL_Card[p_card],p_card);
+
+ return;
+ }
+
+ if(currSCCB->Sccb_scsistat == SELECT_SN_ST)
+ {
+ sccbMgrTbl[p_card][currSCCB->TargID].TarStatus |=
+ (UCHAR)SYNC_SUPPORTED;
+
+ sccbMgrTbl[p_card][currSCCB->TargID].TarEEValue &= ~EE_SYNC_MASK;
+ BL_Card[p_card].globalFlags |= F_NEW_SCCB_CMD;
+
+ if(((BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
+ ((sccbMgrTbl[p_card][currSCCB->TargID].TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)))
+ {
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[currSCCB->Lun] = TRUE;
+ if(BL_Card[p_card].discQCount != 0)
+ BL_Card[p_card].discQCount--;
+ BL_Card[p_card].discQ_Tbl[sccbMgrTbl[p_card][currSCCB->TargID].LunDiscQ_Idx[currSCCB->Lun]] = NULL;
+ }
+ else
+ {
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[0] = TRUE;
+ if(currSCCB->Sccb_tag)
+ {
+ if(BL_Card[p_card].discQCount != 0)
+ BL_Card[p_card].discQCount--;
+ BL_Card[p_card].discQ_Tbl[currSCCB->Sccb_tag] = NULL;
+ }else
+ {
+ if(BL_Card[p_card].discQCount != 0)
+ BL_Card[p_card].discQCount--;
+ BL_Card[p_card].discQ_Tbl[sccbMgrTbl[p_card][currSCCB->TargID].LunDiscQ_Idx[0]] = NULL;
+ }
+ }
+ return;
+
+ }
+
+ if(currSCCB->Sccb_scsistat == SELECT_WN_ST)
+ {
+
+ sccbMgrTbl[p_card][currSCCB->TargID].TarStatus =
+ (sccbMgrTbl[p_card][currSCCB->TargID].
+ TarStatus & ~WIDE_ENABLED) | WIDE_NEGOCIATED;
+
+ sccbMgrTbl[p_card][currSCCB->TargID].TarEEValue &= ~EE_WIDE_SCSI;
+ BL_Card[p_card].globalFlags |= F_NEW_SCCB_CMD;
+
+ if(((BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
+ ((sccbMgrTbl[p_card][currSCCB->TargID].TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)))
+ {
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[currSCCB->Lun] = TRUE;
+ if(BL_Card[p_card].discQCount != 0)
+ BL_Card[p_card].discQCount--;
+ BL_Card[p_card].discQ_Tbl[sccbMgrTbl[p_card][currSCCB->TargID].LunDiscQ_Idx[currSCCB->Lun]] = NULL;
+ }
+ else
+ {
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[0] = TRUE;
+ if(currSCCB->Sccb_tag)
+ {
+ if(BL_Card[p_card].discQCount != 0)
+ BL_Card[p_card].discQCount--;
+ BL_Card[p_card].discQ_Tbl[currSCCB->Sccb_tag] = NULL;
+ }else
+ {
+ if(BL_Card[p_card].discQCount != 0)
+ BL_Card[p_card].discQCount--;
+ BL_Card[p_card].discQ_Tbl[sccbMgrTbl[p_card][currSCCB->TargID].LunDiscQ_Idx[0]] = NULL;
+ }
+ }
+ return;
+
+ }
+
+ if (status_byte == SSCHECK)
+ {
+ if(BL_Card[p_card].globalFlags & F_DO_RENEGO)
+ {
+ if (sccbMgrTbl[p_card][currSCCB->TargID].TarEEValue & EE_SYNC_MASK)
+ {
+ sccbMgrTbl[p_card][currSCCB->TargID].TarStatus &= ~TAR_SYNC_MASK;
+ }
+ if (sccbMgrTbl[p_card][currSCCB->TargID].TarEEValue & EE_WIDE_SCSI)
+ {
+ sccbMgrTbl[p_card][currSCCB->TargID].TarStatus &= ~TAR_WIDE_MASK;
+ }
+ }
+ }
+
+ if (!(currSCCB->Sccb_XferState & F_AUTO_SENSE)) {
+
+ currSCCB->SccbStatus = SCCB_ERROR;
+ currSCCB->TargetStatus = status_byte;
+
+ if (status_byte == SSCHECK) {
+
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUN_CA
+ = TRUE;
+
+
+#if (FW_TYPE==_SCCB_MGR_)
+ if (currSCCB->RequestSenseLength != NO_AUTO_REQUEST_SENSE) {
+
+ if (currSCCB->RequestSenseLength == 0)
+ currSCCB->RequestSenseLength = 14;
+
+ ssenss(&BL_Card[p_card]);
+ BL_Card[p_card].globalFlags |= F_NEW_SCCB_CMD;
+
+ if(((BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
+ ((sccbMgrTbl[p_card][currSCCB->TargID].TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)))
+ {
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[currSCCB->Lun] = TRUE;
+ if(BL_Card[p_card].discQCount != 0)
+ BL_Card[p_card].discQCount--;
+ BL_Card[p_card].discQ_Tbl[sccbMgrTbl[p_card][currSCCB->TargID].LunDiscQ_Idx[currSCCB->Lun]] = NULL;
+ }
+ else
+ {
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[0] = TRUE;
+ if(currSCCB->Sccb_tag)
+ {
+ if(BL_Card[p_card].discQCount != 0)
+ BL_Card[p_card].discQCount--;
+ BL_Card[p_card].discQ_Tbl[currSCCB->Sccb_tag] = NULL;
+ }else
+ {
+ if(BL_Card[p_card].discQCount != 0)
+ BL_Card[p_card].discQCount--;
+ BL_Card[p_card].discQ_Tbl[sccbMgrTbl[p_card][currSCCB->TargID].LunDiscQ_Idx[0]] = NULL;
+ }
+ }
+ return;
+ }
+#else
+ if ((!(currSCCB->Sccb_ucb_ptr->UCB_opcode & OPC_NO_AUTO_SENSE)) &&
+ (currSCCB->RequestSenseLength))
+ {
+ ssenss(&BL_Card[p_card]);
+ BL_Card[p_card].globalFlags |= F_NEW_SCCB_CMD;
+
+ if(((BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
+ ((sccbMgrTbl[p_card][currSCCB->TargID].TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)))
+ {
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[currSCCB->Lun] = TRUE;
+ if(BL_Card[p_card].discQCount != 0)
+ BL_Card[p_card].discQCount--;
+ BL_Card[p_card].discQ_Tbl[sccbMgrTbl[p_card][currSCCB->TargID].LunDiscQ_Idx[currSCCB->Lun]] = NULL;
+ }
+ else
+ {
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[0] = TRUE;
+ if(currSCCB->Sccb_tag)
+ {
+ if(BL_Card[p_card].discQCount != 0)
+ BL_Card[p_card].discQCount--;
+ BL_Card[p_card].discQ_Tbl[currSCCB->Sccb_tag] = NULL;
+ }else
+ {
+ if(BL_Card[p_card].discQCount != 0)
+ BL_Card[p_card].discQCount--;
+ BL_Card[p_card].discQ_Tbl[sccbMgrTbl[p_card][currSCCB->TargID].LunDiscQ_Idx[0]] = NULL;
+ }
+ }
+ return;
+ }
+
+#endif
+ }
+ }
+ }
+
+
+ if((BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
+ ((sccbMgrTbl[p_card][currSCCB->TargID].TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[currSCCB->Lun] = FALSE;
+ else
+ sccbMgrTbl[p_card][currSCCB->TargID].TarLUNBusy[0] = FALSE;
+
+
+ queueCmdComplete(&BL_Card[p_card], currSCCB, p_card);
+}
+
+/*----------------------------------------------------------------------
+ *
+ *
+ * Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+ *
+ * This file is available under both the GNU General Public License
+ * and a BSD-style copyright; see LICENSE.FlashPoint for details.
+ *
+ * $Workfile: busmstr.c $
+ *
+ * Description: Functions to start, stop, and abort BusMaster operations.
+ *
+ * $Date: 1999/04/26 05:53:56 $
+ *
+ * $Revision: 1.1 $
+ *
+ *----------------------------------------------------------------------*/
+
+/*#include <globals.h>*/
+
+#if (FW_TYPE==_UCB_MGR_)
+ /*#include <budi.h>*/
+#endif
+
+/*#include <sccbmgr.h>*/
+/*#include <blx30.h>*/
+/*#include <target.h>*/
+/*#include <scsi2.h>*/
+/*#include <harpoon.h>*/
+
+
+/*
+extern SCCBCARD BL_Card[MAX_CARDS];
+extern SCCBMGR_TAR_INFO sccbMgrTbl[MAX_CARDS][MAX_SCSI_TAR];
+*/
+
+#define SHORT_WAIT 0x0000000F
+#define LONG_WAIT 0x0000FFFFL
+
+#if defined(BUGBUG)
+void Debug_Load(UCHAR p_card, UCHAR p_bug_data);
+#endif
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Data Transfer Processor
+ *
+ * Description: This routine performs two tasks.
+ * (1) Start data transfer by calling HOST_DATA_XFER_START
+ * function. Once data transfer is started, (2) Depends
+ * on the type of data transfer mode Scatter/Gather mode
+ * or NON Scatter/Gather mode. In NON Scatter/Gather mode,
+ * this routine checks Sccb_MGRFlag (F_HOST_XFER_ACT bit) for
+ * data transfer done. In Scatter/Gather mode, this routine
+ * checks bus master command complete and dual rank busy
+ * bit to keep chaining SC transfer command. Similarly,
+ * in Scatter/Gather mode, it checks Sccb_MGRFlag
+ * (F_HOST_XFER_ACT bit) for data transfer done.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void dataXferProcessor(USHORT port, PSCCBcard pCurrCard)
+#else
+void dataXferProcessor(ULONG port, PSCCBcard pCurrCard)
+#endif
+{
+ PSCCB currSCCB;
+
+ currSCCB = pCurrCard->currentSCCB;
+
+ if (currSCCB->Sccb_XferState & F_SG_XFER)
+ {
+ if (pCurrCard->globalFlags & F_HOST_XFER_ACT)
+
+ {
+ currSCCB->Sccb_sgseg += (UCHAR)SG_BUF_CNT;
+ currSCCB->Sccb_SGoffset = 0x00;
+ }
+ pCurrCard->globalFlags |= F_HOST_XFER_ACT;
+
+ busMstrSGDataXferStart(port, currSCCB);
+ }
+
+ else
+ {
+ if (!(pCurrCard->globalFlags & F_HOST_XFER_ACT))
+ {
+ pCurrCard->globalFlags |= F_HOST_XFER_ACT;
+
+ busMstrDataXferStart(port, currSCCB);
+ }
+ }
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: BusMaster Scatter Gather Data Transfer Start
+ *
+ * Description:
+ *
+ *---------------------------------------------------------------------*/
+#if defined(DOS)
+void busMstrSGDataXferStart(USHORT p_port, PSCCB pcurrSCCB)
+#else
+void busMstrSGDataXferStart(ULONG p_port, PSCCB pcurrSCCB)
+#endif
+{
+ ULONG count,addr,tmpSGCnt;
+ UINT sg_index;
+ UCHAR sg_count, i;
+#if defined(DOS)
+ USHORT reg_offset;
+#else
+ ULONG reg_offset;
+#endif
+
+
+ if (pcurrSCCB->Sccb_XferState & F_HOST_XFER_DIR) {
+
+ count = ((ULONG) HOST_RD_CMD)<<24;
+ }
+
+ else {
+ count = ((ULONG) HOST_WRT_CMD)<<24;
+ }
+
+ sg_count = 0;
+ tmpSGCnt = 0;
+ sg_index = pcurrSCCB->Sccb_sgseg;
+ reg_offset = hp_aramBase;
+
+
+ i = (UCHAR) (RD_HARPOON(p_port+hp_page_ctrl) & ~(SGRAM_ARAM|SCATTER_EN));
+
+
+ WR_HARPOON(p_port+hp_page_ctrl, i);
+
+ while ((sg_count < (UCHAR)SG_BUF_CNT) &&
+ ((ULONG)(sg_index * (UINT)SG_ELEMENT_SIZE) < pcurrSCCB->DataLength) ) {
+
+#if defined(COMPILER_16_BIT) && !defined(DOS)
+ tmpSGCnt += *(((ULONG far *)pcurrSCCB->DataPointer)+
+ (sg_index * 2));
+
+ count |= *(((ULONG far *)pcurrSCCB->DataPointer)+
+ (sg_index * 2));
+
+ addr = *(((ULONG far *)pcurrSCCB->DataPointer)+
+ ((sg_index * 2) + 1));
+
+#else
+ tmpSGCnt += *(((ULONG *)pcurrSCCB->DataPointer)+
+ (sg_index * 2));
+
+ count |= *(((ULONG *)pcurrSCCB->DataPointer)+
+ (sg_index * 2));
+
+ addr = *(((ULONG *)pcurrSCCB->DataPointer)+
+ ((sg_index * 2) + 1));
+#endif
+
+
+ if ((!sg_count) && (pcurrSCCB->Sccb_SGoffset)) {
+
+ addr += ((count & 0x00FFFFFFL) - pcurrSCCB->Sccb_SGoffset);
+ count = (count & 0xFF000000L) | pcurrSCCB->Sccb_SGoffset;
+
+ tmpSGCnt = count & 0x00FFFFFFL;
+ }
+
+ WR_HARP32(p_port,reg_offset,addr);
+ reg_offset +=4;
+
+ WR_HARP32(p_port,reg_offset,count);
+ reg_offset +=4;
+
+ count &= 0xFF000000L;
+ sg_index++;
+ sg_count++;
+
+ } /*End While */
+
+ pcurrSCCB->Sccb_XferCnt = tmpSGCnt;
+
+ WR_HARPOON(p_port+hp_sg_addr,(sg_count<<4));
+
+ if (pcurrSCCB->Sccb_XferState & F_HOST_XFER_DIR) {
+
+ WR_HARP32(p_port,hp_xfercnt_0,tmpSGCnt);
+
+
+ WR_HARPOON(p_port+hp_portctrl_0,(DMA_PORT | SCSI_PORT | SCSI_INBIT));
+ WR_HARPOON(p_port+hp_scsisig, S_DATAI_PH);
+ }
+
+ else {
+
+
+ if ((!(RD_HARPOON(p_port+hp_synctarg_0) & NARROW_SCSI)) &&
+ (tmpSGCnt & 0x000000001))
+ {
+
+ pcurrSCCB->Sccb_XferState |= F_ODD_BALL_CNT;
+ tmpSGCnt--;
+ }
+
+
+ WR_HARP32(p_port,hp_xfercnt_0,tmpSGCnt);
+
+ WR_HARPOON(p_port+hp_portctrl_0,(SCSI_PORT | DMA_PORT | DMA_RD));
+ WR_HARPOON(p_port+hp_scsisig, S_DATAO_PH);
+ }
+
+
+ WR_HARPOON(p_port+hp_page_ctrl, (UCHAR) (i | SCATTER_EN));
+
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: BusMaster Data Transfer Start
+ *
+ * Description:
+ *
+ *---------------------------------------------------------------------*/
+#if defined(DOS)
+void busMstrDataXferStart(USHORT p_port, PSCCB pcurrSCCB)
+#else
+void busMstrDataXferStart(ULONG p_port, PSCCB pcurrSCCB)
+#endif
+{
+ ULONG addr,count;
+
+ if (!(pcurrSCCB->Sccb_XferState & F_AUTO_SENSE)) {
+
+ count = pcurrSCCB->Sccb_XferCnt;
+
+ addr = (ULONG) pcurrSCCB->DataPointer + pcurrSCCB->Sccb_ATC;
+ }
+
+ else {
+ addr = pcurrSCCB->SensePointer;
+ count = pcurrSCCB->RequestSenseLength;
+
+ }
+
+#if defined(DOS)
+ asm { mov dx,p_port;
+ mov ax,word ptr count;
+ add dx,hp_xfer_cnt_lo;
+ out dx,al;
+ inc dx;
+ xchg ah,al
+ out dx,al;
+ inc dx;
+ mov ax,word ptr count+2;
+ out dx,al;
+ inc dx;
+ inc dx;
+ mov ax,word ptr addr;
+ out dx,al;
+ inc dx;
+ xchg ah,al
+ out dx,al;
+ inc dx;
+ mov ax,word ptr addr+2;
+ out dx,al;
+ inc dx;
+ xchg ah,al
+ out dx,al;
+ }
+
+ WR_HARP32(p_port,hp_xfercnt_0,count);
+
+#else
+ HP_SETUP_ADDR_CNT(p_port,addr,count);
+#endif
+
+
+ if (pcurrSCCB->Sccb_XferState & F_HOST_XFER_DIR) {
+
+ WR_HARPOON(p_port+hp_portctrl_0,(DMA_PORT | SCSI_PORT | SCSI_INBIT));
+ WR_HARPOON(p_port+hp_scsisig, S_DATAI_PH);
+
+ WR_HARPOON(p_port+hp_xfer_cmd,
+ (XFER_DMA_HOST | XFER_HOST_AUTO | XFER_DMA_8BIT));
+ }
+
+ else {
+
+ WR_HARPOON(p_port+hp_portctrl_0,(SCSI_PORT | DMA_PORT | DMA_RD));
+ WR_HARPOON(p_port+hp_scsisig, S_DATAO_PH);
+
+ WR_HARPOON(p_port+hp_xfer_cmd,
+ (XFER_HOST_DMA | XFER_HOST_AUTO | XFER_DMA_8BIT));
+
+ }
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: BusMaster Timeout Handler
+ *
+ * Description: This function is called after a bus master command busy time
+ * out is detected. This routines issue halt state machine
+ * with a software time out for command busy. If command busy
+ * is still asserted at the end of the time out, it issues
+ * hard abort with another software time out. It hard abort
+ * command busy is also time out, it'll just give up.
+ *
+ *---------------------------------------------------------------------*/
+#if defined(DOS)
+UCHAR busMstrTimeOut(USHORT p_port)
+#else
+UCHAR busMstrTimeOut(ULONG p_port)
+#endif
+{
+ ULONG timeout;
+
+ timeout = LONG_WAIT;
+
+ WR_HARPOON(p_port+hp_sys_ctrl, HALT_MACH);
+
+ while ((!(RD_HARPOON(p_port+hp_ext_status) & CMD_ABORTED)) && timeout--) {}
+
+
+
+ if (RD_HARPOON(p_port+hp_ext_status) & BM_CMD_BUSY) {
+ WR_HARPOON(p_port+hp_sys_ctrl, HARD_ABORT);
+
+ timeout = LONG_WAIT;
+ while ((RD_HARPOON(p_port+hp_ext_status) & BM_CMD_BUSY) && timeout--) {}
+ }
+
+ RD_HARPOON(p_port+hp_int_status); /*Clear command complete */
+
+ if (RD_HARPOON(p_port+hp_ext_status) & BM_CMD_BUSY) {
+ return(TRUE);
+ }
+
+ else {
+ return(FALSE);
+ }
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Host Data Transfer Abort
+ *
+ * Description: Abort any in progress transfer.
+ *
+ *---------------------------------------------------------------------*/
+#if defined(DOS)
+void hostDataXferAbort(USHORT port, UCHAR p_card, PSCCB pCurrSCCB)
+#else
+void hostDataXferAbort(ULONG port, UCHAR p_card, PSCCB pCurrSCCB)
+#endif
+{
+
+ ULONG timeout;
+ ULONG remain_cnt;
+ UINT sg_ptr;
+
+ BL_Card[p_card].globalFlags &= ~F_HOST_XFER_ACT;
+
+ if (pCurrSCCB->Sccb_XferState & F_AUTO_SENSE) {
+
+
+ if (!(RD_HARPOON(port+hp_int_status) & INT_CMD_COMPL)) {
+
+ WR_HARPOON(port+hp_bm_ctrl, (RD_HARPOON(port+hp_bm_ctrl) | FLUSH_XFER_CNTR));
+ timeout = LONG_WAIT;
+
+ while ((RD_HARPOON(port+hp_ext_status) & BM_CMD_BUSY) && timeout--) {}
+
+ WR_HARPOON(port+hp_bm_ctrl, (RD_HARPOON(port+hp_bm_ctrl) & ~FLUSH_XFER_CNTR));
+
+ if (RD_HARPOON(port+hp_ext_status) & BM_CMD_BUSY) {
+
+ if (busMstrTimeOut(port)) {
+
+ if (pCurrSCCB->HostStatus == 0x00)
+
+ pCurrSCCB->HostStatus = SCCB_BM_ERR;
+
+ }
+
+ if (RD_HARPOON(port+hp_int_status) & INT_EXT_STATUS)
+
+ if (RD_HARPOON(port+hp_ext_status) & BAD_EXT_STATUS)
+
+ if (pCurrSCCB->HostStatus == 0x00)
+
+ {
+ pCurrSCCB->HostStatus = SCCB_BM_ERR;
+#if defined(BUGBUG)
+ WR_HARPOON(port+hp_dual_addr_lo,
+ RD_HARPOON(port+hp_ext_status));
+#endif
+ }
+ }
+ }
+ }
+
+ else if (pCurrSCCB->Sccb_XferCnt) {
+
+ if (pCurrSCCB->Sccb_XferState & F_SG_XFER) {
+
+
+ WR_HARPOON(port+hp_page_ctrl, (RD_HARPOON(port+hp_page_ctrl) &
+ ~SCATTER_EN));
+
+ WR_HARPOON(port+hp_sg_addr,0x00);
+
+ sg_ptr = pCurrSCCB->Sccb_sgseg + SG_BUF_CNT;
+
+ if (sg_ptr > (UINT)(pCurrSCCB->DataLength / SG_ELEMENT_SIZE)) {
+
+ sg_ptr = (UINT)(pCurrSCCB->DataLength / SG_ELEMENT_SIZE);
+ }
+
+ remain_cnt = pCurrSCCB->Sccb_XferCnt;
+
+ while (remain_cnt < 0x01000000L) {
+
+ sg_ptr--;
+
+#if defined(COMPILER_16_BIT) && !defined(DOS)
+ if (remain_cnt > (ULONG)(*(((ULONG far *)pCurrSCCB->
+ DataPointer) + (sg_ptr * 2)))) {
+
+ remain_cnt -= (ULONG)(*(((ULONG far *)pCurrSCCB->
+ DataPointer) + (sg_ptr * 2)));
+ }
+
+#else
+ if (remain_cnt > (ULONG)(*(((ULONG *)pCurrSCCB->
+ DataPointer) + (sg_ptr * 2)))) {
+
+ remain_cnt -= (ULONG)(*(((ULONG *)pCurrSCCB->
+ DataPointer) + (sg_ptr * 2)));
+ }
+#endif
+
+ else {
+
+ break;
+ }
+ }
+
+
+
+ if (remain_cnt < 0x01000000L) {
+
+
+ pCurrSCCB->Sccb_SGoffset = remain_cnt;
+
+ pCurrSCCB->Sccb_sgseg = (USHORT)sg_ptr;
+
+
+ if ((ULONG)(sg_ptr * SG_ELEMENT_SIZE) == pCurrSCCB->DataLength
+ && (remain_cnt == 0))
+
+ pCurrSCCB->Sccb_XferState |= F_ALL_XFERRED;
+ }
+
+ else {
+
+
+ if (pCurrSCCB->HostStatus == 0x00) {
+
+ pCurrSCCB->HostStatus = SCCB_GROSS_FW_ERR;
+ }
+ }
+ }
+
+
+ if (!(pCurrSCCB->Sccb_XferState & F_HOST_XFER_DIR)) {
+
+
+ if (RD_HARPOON(port+hp_ext_status) & BM_CMD_BUSY) {
+
+ busMstrTimeOut(port);
+ }
+
+ else {
+
+ if (RD_HARPOON(port+hp_int_status) & INT_EXT_STATUS) {
+
+ if (RD_HARPOON(port+hp_ext_status) & BAD_EXT_STATUS) {
+
+ if (pCurrSCCB->HostStatus == 0x00) {
+
+ pCurrSCCB->HostStatus = SCCB_BM_ERR;
+#if defined(BUGBUG)
+ WR_HARPOON(port+hp_dual_addr_lo,
+ RD_HARPOON(port+hp_ext_status));
+#endif
+ }
+ }
+ }
+
+ }
+ }
+
+ else {
+
+
+ if ((RD_HARPOON(port+hp_fifo_cnt)) >= BM_THRESHOLD) {
+
+ timeout = SHORT_WAIT;
+
+ while ((RD_HARPOON(port+hp_ext_status) & BM_CMD_BUSY) &&
+ ((RD_HARPOON(port+hp_fifo_cnt)) >= BM_THRESHOLD) &&
+ timeout--) {}
+ }
+
+ if (RD_HARPOON(port+hp_ext_status) & BM_CMD_BUSY) {
+
+ WR_HARPOON(port+hp_bm_ctrl, (RD_HARPOON(port+hp_bm_ctrl) |
+ FLUSH_XFER_CNTR));
+
+ timeout = LONG_WAIT;
+
+ while ((RD_HARPOON(port+hp_ext_status) & BM_CMD_BUSY) &&
+ timeout--) {}
+
+ WR_HARPOON(port+hp_bm_ctrl, (RD_HARPOON(port+hp_bm_ctrl) &
+ ~FLUSH_XFER_CNTR));
+
+
+ if (RD_HARPOON(port+hp_ext_status) & BM_CMD_BUSY) {
+
+ if (pCurrSCCB->HostStatus == 0x00) {
+
+ pCurrSCCB->HostStatus = SCCB_BM_ERR;
+ }
+
+ busMstrTimeOut(port);
+ }
+ }
+
+ if (RD_HARPOON(port+hp_int_status) & INT_EXT_STATUS) {
+
+ if (RD_HARPOON(port+hp_ext_status) & BAD_EXT_STATUS) {
+
+ if (pCurrSCCB->HostStatus == 0x00) {
+
+ pCurrSCCB->HostStatus = SCCB_BM_ERR;
+#if defined(BUGBUG)
+ WR_HARPOON(port+hp_dual_addr_lo,
+ RD_HARPOON(port+hp_ext_status));
+#endif
+ }
+ }
+ }
+ }
+
+ }
+
+ else {
+
+
+ if (RD_HARPOON(port+hp_ext_status) & BM_CMD_BUSY) {
+
+ timeout = LONG_WAIT;
+
+ while ((RD_HARPOON(port+hp_ext_status) & BM_CMD_BUSY) && timeout--) {}
+
+ if (RD_HARPOON(port+hp_ext_status) & BM_CMD_BUSY) {
+
+ if (pCurrSCCB->HostStatus == 0x00) {
+
+ pCurrSCCB->HostStatus = SCCB_BM_ERR;
+ }
+
+ busMstrTimeOut(port);
+ }
+ }
+
+
+ if (RD_HARPOON(port+hp_int_status) & INT_EXT_STATUS) {
+
+ if (RD_HARPOON(port+hp_ext_status) & BAD_EXT_STATUS) {
+
+ if (pCurrSCCB->HostStatus == 0x00) {
+
+ pCurrSCCB->HostStatus = SCCB_BM_ERR;
+#if defined(BUGBUG)
+ WR_HARPOON(port+hp_dual_addr_lo,
+ RD_HARPOON(port+hp_ext_status));
+#endif
+ }
+ }
+
+ }
+
+ if (pCurrSCCB->Sccb_XferState & F_SG_XFER) {
+
+ WR_HARPOON(port+hp_page_ctrl, (RD_HARPOON(port+hp_page_ctrl) &
+ ~SCATTER_EN));
+
+ WR_HARPOON(port+hp_sg_addr,0x00);
+
+ pCurrSCCB->Sccb_sgseg += SG_BUF_CNT;
+
+ pCurrSCCB->Sccb_SGoffset = 0x00;
+
+
+ if ((ULONG)(pCurrSCCB->Sccb_sgseg * SG_ELEMENT_SIZE) >=
+ pCurrSCCB->DataLength) {
+
+ pCurrSCCB->Sccb_XferState |= F_ALL_XFERRED;
+
+ pCurrSCCB->Sccb_sgseg = (USHORT)(pCurrSCCB->DataLength / SG_ELEMENT_SIZE);
+
+ }
+ }
+
+ else {
+
+ if (!(pCurrSCCB->Sccb_XferState & F_AUTO_SENSE))
+
+ pCurrSCCB->Sccb_XferState |= F_ALL_XFERRED;
+ }
+ }
+
+ WR_HARPOON(port+hp_int_mask,(INT_CMD_COMPL | SCSI_INTERRUPT));
+}
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Host Data Transfer Restart
+ *
+ * Description: Reset the available count due to a restore data
+ * pointers message.
+ *
+ *---------------------------------------------------------------------*/
+void hostDataXferRestart(PSCCB currSCCB)
+{
+ ULONG data_count;
+ UINT sg_index;
+#if defined(COMPILER_16_BIT) && !defined(DOS)
+ ULONG far *sg_ptr;
+#else
+ ULONG *sg_ptr;
+#endif
+
+ if (currSCCB->Sccb_XferState & F_SG_XFER) {
+
+ currSCCB->Sccb_XferCnt = 0;
+
+ sg_index = 0xffff; /*Index by long words into sg list. */
+ data_count = 0; /*Running count of SG xfer counts. */
+
+#if defined(COMPILER_16_BIT) && !defined(DOS)
+ sg_ptr = (ULONG far *)currSCCB->DataPointer;
+#else
+ sg_ptr = (ULONG *)currSCCB->DataPointer;
+#endif
+
+ while (data_count < currSCCB->Sccb_ATC) {
+
+ sg_index++;
+ data_count += *(sg_ptr+(sg_index * 2));
+ }
+
+ if (data_count == currSCCB->Sccb_ATC) {
+
+ currSCCB->Sccb_SGoffset = 0;
+ sg_index++;
+ }
+
+ else {
+ currSCCB->Sccb_SGoffset = data_count - currSCCB->Sccb_ATC;
+ }
+
+ currSCCB->Sccb_sgseg = (USHORT)sg_index;
+ }
+
+ else {
+ currSCCB->Sccb_XferCnt = currSCCB->DataLength - currSCCB->Sccb_ATC;
+ }
+}
+
+/*----------------------------------------------------------------------
+ *
+ *
+ * Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+ *
+ * This file is available under both the GNU General Public License
+ * and a BSD-style copyright; see LICENSE.FlashPoint for details.
+ *
+ * $Workfile: scam.c $
+ *
+ * Description: Functions relating to handling of the SCAM selection
+ * and the determination of the SCSI IDs to be assigned
+ * to all perspective SCSI targets.
+ *
+ * $Date: 1999/04/26 05:53:56 $
+ *
+ * $Revision: 1.1 $
+ *
+ *----------------------------------------------------------------------*/
+
+/*#include <globals.h>*/
+
+#if (FW_TYPE==_UCB_MGR_)
+ /*#include <budi.h>*/
+#endif
+
+/*#include <sccbmgr.h>*/
+/*#include <blx30.h>*/
+/*#include <target.h>*/
+/*#include <scsi2.h>*/
+/*#include <eeprom.h>*/
+/*#include <harpoon.h>*/
+
+
+
+/*
+extern SCCBMGR_TAR_INFO sccbMgrTbl[MAX_CARDS][MAX_SCSI_TAR];
+extern SCCBCARD BL_Card[MAX_CARDS];
+extern SCCBSCAM_INFO scamInfo[MAX_SCSI_TAR];
+extern NVRAMINFO nvRamInfo[MAX_MB_CARDS];
+#if defined(DOS) || defined(OS2)
+extern UCHAR temp_id_string[ID_STRING_LENGTH];
+#endif
+extern UCHAR scamHAString[];
+*/
+/*---------------------------------------------------------------------
+ *
+ * Function: scini
+ *
+ * Description: Setup all data structures necessary for SCAM selection.
+ *
+ *---------------------------------------------------------------------*/
+
+void scini(UCHAR p_card, UCHAR p_our_id, UCHAR p_power_up)
+{
+
+#if defined(SCAM_LEV_2)
+ UCHAR loser,assigned_id;
+#endif
+#if defined(DOS)
+
+ USHORT p_port;
+#else
+ ULONG p_port;
+#endif
+
+ UCHAR i,k,ScamFlg ;
+ PSCCBcard currCard;
+ PNVRamInfo pCurrNvRam;
+
+ currCard = &BL_Card[p_card];
+ p_port = currCard->ioPort;
+ pCurrNvRam = currCard->pNvRamInfo;
+
+
+ if(pCurrNvRam){
+ ScamFlg = pCurrNvRam->niScamConf;
+ i = pCurrNvRam->niSysConf;
+ }
+ else{
+ ScamFlg = (UCHAR) utilEERead(p_port, SCAM_CONFIG/2);
+ i = (UCHAR)(utilEERead(p_port, (SYSTEM_CONFIG/2)));
+ }
+ if(!(i & 0x02)) /* check if reset bus in AutoSCSI parameter set */
+ return;
+
+ inisci(p_card,p_port, p_our_id);
+
+ /* Force to wait 1 sec after SCSI bus reset. Some SCAM device FW
+ too slow to return to SCAM selection */
+
+ /* if (p_power_up)
+ Wait1Second(p_port);
+ else
+ Wait(p_port, TO_250ms); */
+
+ Wait1Second(p_port);
+
+#if defined(SCAM_LEV_2)
+
+ if ((ScamFlg & SCAM_ENABLED) && (ScamFlg & SCAM_LEVEL2))
+ {
+ while (!(scarb(p_port,INIT_SELTD))) {}
+
+ scsel(p_port);
+
+ do {
+ scxferc(p_port,SYNC_PTRN);
+ scxferc(p_port,DOM_MSTR);
+ loser = scsendi(p_port,&scamInfo[p_our_id].id_string[0]);
+ } while ( loser == 0xFF );
+
+ scbusf(p_port);
+
+ if ((p_power_up) && (!loser))
+ {
+ sresb(p_port,p_card);
+ Wait(p_port, TO_250ms);
+
+ while (!(scarb(p_port,INIT_SELTD))) {}
+
+ scsel(p_port);
+
+ do {
+ scxferc(p_port, SYNC_PTRN);
+ scxferc(p_port, DOM_MSTR);
+ loser = scsendi(p_port,&scamInfo[p_our_id].
+ id_string[0]);
+ } while ( loser == 0xFF );
+
+ scbusf(p_port);
+ }
+ }
+
+ else
+ {
+ loser = FALSE;
+ }
+
+
+ if (!loser)
+ {
+
+#endif /* SCAM_LEV_2 */
+
+ scamInfo[p_our_id].state = ID_ASSIGNED;
+
+
+ if (ScamFlg & SCAM_ENABLED)
+ {
+
+ for (i=0; i < MAX_SCSI_TAR; i++)
+ {
+ if ((scamInfo[i].state == ID_UNASSIGNED) ||
+ (scamInfo[i].state == ID_UNUSED))
+ {
+ if (scsell(p_port,i))
+ {
+ scamInfo[i].state = LEGACY;
+ if ((scamInfo[i].id_string[0] != 0xFF) ||
+ (scamInfo[i].id_string[1] != 0xFA))
+ {
+
+ scamInfo[i].id_string[0] = 0xFF;
+ scamInfo[i].id_string[1] = 0xFA;
+ if(pCurrNvRam == NULL)
+ currCard->globalFlags |= F_UPDATE_EEPROM;
+ }
+ }
+ }
+ }
+
+ sresb(p_port,p_card);
+ Wait1Second(p_port);
+ while (!(scarb(p_port,INIT_SELTD))) {}
+ scsel(p_port);
+ scasid(p_card, p_port);
+ }
+
+#if defined(SCAM_LEV_2)
+
+ }
+
+ else if ((loser) && (ScamFlg & SCAM_ENABLED))
+ {
+ scamInfo[p_our_id].id_string[0] = SLV_TYPE_CODE0;
+ assigned_id = FALSE;
+ scwtsel(p_port);
+
+ do {
+ while (scxferc(p_port,0x00) != SYNC_PTRN) {}
+
+ i = scxferc(p_port,0x00);
+ if (i == ASSIGN_ID)
+ {
+ if (!(scsendi(p_port,&scamInfo[p_our_id].id_string[0])))
+ {
+ i = scxferc(p_port,0x00);
+ if (scvalq(i))
+ {
+ k = scxferc(p_port,0x00);
+
+ if (scvalq(k))
+ {
+ currCard->ourId =
+ ((UCHAR)(i<<3)+(k & (UCHAR)7)) & (UCHAR) 0x3F;
+ inisci(p_card, p_port, p_our_id);
+ scamInfo[currCard->ourId].state = ID_ASSIGNED;
+ scamInfo[currCard->ourId].id_string[0]
+ = SLV_TYPE_CODE0;
+ assigned_id = TRUE;
+ }
+ }
+ }
+ }
+
+ else if (i == SET_P_FLAG)
+ {
+ if (!(scsendi(p_port,
+ &scamInfo[p_our_id].id_string[0])))
+ scamInfo[p_our_id].id_string[0] |= 0x80;
+ }
+ }while (!assigned_id);
+
+ while (scxferc(p_port,0x00) != CFG_CMPLT) {}
+ }
+
+#endif /* SCAM_LEV_2 */
+ if (ScamFlg & SCAM_ENABLED)
+ {
+ scbusf(p_port);
+ if (currCard->globalFlags & F_UPDATE_EEPROM)
+ {
+ scsavdi(p_card, p_port);
+ currCard->globalFlags &= ~F_UPDATE_EEPROM;
+ }
+ }
+
+
+#if defined(DOS)
+ for (i=0; i < MAX_SCSI_TAR; i++)
+ {
+ if (((ScamFlg & SCAM_ENABLED) && (scamInfo[i].state == LEGACY))
+ || (i != p_our_id))
+ {
+ scsellDOS(p_port,i);
+ }
+ }
+#endif
+
+/*
+ for (i=0,k=0; i < MAX_SCSI_TAR; i++)
+ {
+ if ((scamInfo[i].state == ID_ASSIGNED) ||
+ (scamInfo[i].state == LEGACY))
+ k++;
+ }
+
+ if (k==2)
+ currCard->globalFlags |= F_SINGLE_DEVICE;
+ else
+ currCard->globalFlags &= ~F_SINGLE_DEVICE;
+*/
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: scarb
+ *
+ * Description: Gain control of the bus and wait SCAM select time (250ms)
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+int scarb(USHORT p_port, UCHAR p_sel_type)
+#else
+int scarb(ULONG p_port, UCHAR p_sel_type)
+#endif
+{
+ if (p_sel_type == INIT_SELTD)
+ {
+
+ while (RD_HARPOON(p_port+hp_scsisig) & (SCSI_SEL | SCSI_BSY)) {}
+
+
+ if (RD_HARPOON(p_port+hp_scsisig) & SCSI_SEL)
+ return(FALSE);
+
+ if (RD_HARPOON(p_port+hp_scsidata_0) != 00)
+ return(FALSE);
+
+ WR_HARPOON(p_port+hp_scsisig, (RD_HARPOON(p_port+hp_scsisig) | SCSI_BSY));
+
+ if (RD_HARPOON(p_port+hp_scsisig) & SCSI_SEL) {
+
+ WR_HARPOON(p_port+hp_scsisig, (RD_HARPOON(p_port+hp_scsisig) &
+ ~SCSI_BSY));
+ return(FALSE);
+ }
+
+
+ WR_HARPOON(p_port+hp_scsisig, (RD_HARPOON(p_port+hp_scsisig) | SCSI_SEL));
+
+ if (RD_HARPOON(p_port+hp_scsidata_0) != 00) {
+
+ WR_HARPOON(p_port+hp_scsisig, (RD_HARPOON(p_port+hp_scsisig) &
+ ~(SCSI_BSY | SCSI_SEL)));
+ return(FALSE);
+ }
+ }
+
+
+ WR_HARPOON(p_port+hp_clkctrl_0, (RD_HARPOON(p_port+hp_clkctrl_0)
+ & ~ACTdeassert));
+ WR_HARPOON(p_port+hp_scsireset, SCAM_EN);
+ WR_HARPOON(p_port+hp_scsidata_0, 0x00);
+#if defined(WIDE_SCSI)
+ WR_HARPOON(p_port+hp_scsidata_1, 0x00);
+#endif
+ WR_HARPOON(p_port+hp_portctrl_0, SCSI_BUS_EN);
+
+ WR_HARPOON(p_port+hp_scsisig, (RD_HARPOON(p_port+hp_scsisig) | SCSI_MSG));
+
+ WR_HARPOON(p_port+hp_scsisig, (RD_HARPOON(p_port+hp_scsisig)
+ & ~SCSI_BSY));
+
+ Wait(p_port,TO_250ms);
+
+ return(TRUE);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: scbusf
+ *
+ * Description: Release the SCSI bus and disable SCAM selection.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void scbusf(USHORT p_port)
+#else
+void scbusf(ULONG p_port)
+#endif
+{
+ WR_HARPOON(p_port+hp_page_ctrl,
+ (RD_HARPOON(p_port+hp_page_ctrl) | G_INT_DISABLE));
+
+
+ WR_HARPOON(p_port+hp_scsidata_0, 0x00);
+
+ WR_HARPOON(p_port+hp_portctrl_0, (RD_HARPOON(p_port+hp_portctrl_0)
+ & ~SCSI_BUS_EN));
+
+ WR_HARPOON(p_port+hp_scsisig, 0x00);
+
+
+ WR_HARPOON(p_port+hp_scsireset, (RD_HARPOON(p_port+hp_scsireset)
+ & ~SCAM_EN));
+
+ WR_HARPOON(p_port+hp_clkctrl_0, (RD_HARPOON(p_port+hp_clkctrl_0)
+ | ACTdeassert));
+
+#if defined(SCAM_LEV_2)
+ WRW_HARPOON((p_port+hp_intstat), (BUS_FREE | AUTO_INT | SCAM_SEL));
+#else
+ WRW_HARPOON((p_port+hp_intstat), (BUS_FREE | AUTO_INT));
+#endif
+
+ WR_HARPOON(p_port+hp_page_ctrl,
+ (RD_HARPOON(p_port+hp_page_ctrl) & ~G_INT_DISABLE));
+}
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: scasid
+ *
+ * Description: Assign an ID to all the SCAM devices.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void scasid(UCHAR p_card, USHORT p_port)
+#else
+void scasid(UCHAR p_card, ULONG p_port)
+#endif
+{
+#if defined(DOS) || defined(OS2)
+ /* Use external defined in global space area, instead of Stack
+ space. WIN/95 DOS doesnot work TINY mode. The OS doesnot intialize
+ SS equal to DS. Thus the array allocated on stack doesnot get
+ access correctly */
+#else
+ UCHAR temp_id_string[ID_STRING_LENGTH];
+#endif
+
+ UCHAR i,k,scam_id;
+ UCHAR crcBytes[3];
+ PNVRamInfo pCurrNvRam;
+ ushort_ptr pCrcBytes;
+
+ pCurrNvRam = BL_Card[p_card].pNvRamInfo;
+
+ i=FALSE;
+
+ while (!i)
+ {
+
+ for (k=0; k < ID_STRING_LENGTH; k++)
+ {
+ temp_id_string[k] = (UCHAR) 0x00;
+ }
+
+ scxferc(p_port,SYNC_PTRN);
+ scxferc(p_port,ASSIGN_ID);
+
+ if (!(sciso(p_port,&temp_id_string[0])))
+ {
+ if(pCurrNvRam){
+ pCrcBytes = (ushort_ptr)&crcBytes[0];
+ *pCrcBytes = CalcCrc16(&temp_id_string[0]);
+ crcBytes[2] = CalcLrc(&temp_id_string[0]);
+ temp_id_string[1] = crcBytes[2];
+ temp_id_string[2] = crcBytes[0];
+ temp_id_string[3] = crcBytes[1];
+ for(k = 4; k < ID_STRING_LENGTH; k++)
+ temp_id_string[k] = (UCHAR) 0x00;
+ }
+ i = scmachid(p_card,temp_id_string);
+
+ if (i == CLR_PRIORITY)
+ {
+ scxferc(p_port,MISC_CODE);
+ scxferc(p_port,CLR_P_FLAG);
+ i = FALSE; /*Not the last ID yet. */
+ }
+
+ else if (i != NO_ID_AVAIL)
+ {
+ if (i < 8 )
+ scxferc(p_port,ID_0_7);
+ else
+ scxferc(p_port,ID_8_F);
+
+ scam_id = (i & (UCHAR) 0x07);
+
+
+ for (k=1; k < 0x08; k <<= 1)
+ if (!( k & i ))
+ scam_id += 0x08; /*Count number of zeros in DB0-3. */
+
+ scxferc(p_port,scam_id);
+
+ i = FALSE; /*Not the last ID yet. */
+ }
+ }
+
+ else
+ {
+ i = TRUE;
+ }
+
+ } /*End while */
+
+ scxferc(p_port,SYNC_PTRN);
+ scxferc(p_port,CFG_CMPLT);
+}
+
+
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: scsel
+ *
+ * Description: Select all the SCAM devices.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void scsel(USHORT p_port)
+#else
+void scsel(ULONG p_port)
+#endif
+{
+
+ WR_HARPOON(p_port+hp_scsisig, SCSI_SEL);
+ scwiros(p_port, SCSI_MSG);
+
+ WR_HARPOON(p_port+hp_scsisig, (SCSI_SEL | SCSI_BSY));
+
+
+ WR_HARPOON(p_port+hp_scsisig, (SCSI_SEL | SCSI_BSY | SCSI_IOBIT | SCSI_CD));
+ WR_HARPOON(p_port+hp_scsidata_0, (UCHAR)(RD_HARPOON(p_port+hp_scsidata_0) |
+ (UCHAR)(BIT(7)+BIT(6))));
+
+
+ WR_HARPOON(p_port+hp_scsisig, (SCSI_BSY | SCSI_IOBIT | SCSI_CD));
+ scwiros(p_port, SCSI_SEL);
+
+ WR_HARPOON(p_port+hp_scsidata_0, (UCHAR)(RD_HARPOON(p_port+hp_scsidata_0) &
+ ~(UCHAR)BIT(6)));
+ scwirod(p_port, BIT(6));
+
+ WR_HARPOON(p_port+hp_scsisig, (SCSI_SEL | SCSI_BSY | SCSI_IOBIT | SCSI_CD));
+}
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: scxferc
+ *
+ * Description: Handshake the p_data (DB4-0) across the bus.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+UCHAR scxferc(USHORT p_port, UCHAR p_data)
+#else
+UCHAR scxferc(ULONG p_port, UCHAR p_data)
+#endif
+{
+ UCHAR curr_data, ret_data;
+
+ curr_data = p_data | BIT(7) | BIT(5); /*Start with DB7 & DB5 asserted. */
+
+ WR_HARPOON(p_port+hp_scsidata_0, curr_data);
+
+ curr_data &= ~BIT(7);
+
+ WR_HARPOON(p_port+hp_scsidata_0, curr_data);
+
+ scwirod(p_port,BIT(7)); /*Wait for DB7 to be released. */
+ while (!(RD_HARPOON(p_port+hp_scsidata_0) & BIT(5)));
+
+ ret_data = (RD_HARPOON(p_port+hp_scsidata_0) & (UCHAR) 0x1F);
+
+ curr_data |= BIT(6);
+
+ WR_HARPOON(p_port+hp_scsidata_0, curr_data);
+
+ curr_data &= ~BIT(5);
+
+ WR_HARPOON(p_port+hp_scsidata_0, curr_data);
+
+ scwirod(p_port,BIT(5)); /*Wait for DB5 to be released. */
+
+ curr_data &= ~(BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0)); /*Release data bits */
+ curr_data |= BIT(7);
+
+ WR_HARPOON(p_port+hp_scsidata_0, curr_data);
+
+ curr_data &= ~BIT(6);
+
+ WR_HARPOON(p_port+hp_scsidata_0, curr_data);
+
+ scwirod(p_port,BIT(6)); /*Wait for DB6 to be released. */
+
+ return(ret_data);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: scsendi
+ *
+ * Description: Transfer our Identification string to determine if we
+ * will be the dominant master.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+UCHAR scsendi(USHORT p_port, UCHAR p_id_string[])
+#else
+UCHAR scsendi(ULONG p_port, UCHAR p_id_string[])
+#endif
+{
+ UCHAR ret_data,byte_cnt,bit_cnt,defer;
+
+ defer = FALSE;
+
+ for (byte_cnt = 0; byte_cnt < ID_STRING_LENGTH; byte_cnt++) {
+
+ for (bit_cnt = 0x80; bit_cnt != 0 ; bit_cnt >>= 1) {
+
+ if (defer)
+ ret_data = scxferc(p_port,00);
+
+ else if (p_id_string[byte_cnt] & bit_cnt)
+
+ ret_data = scxferc(p_port,02);
+
+ else {
+
+ ret_data = scxferc(p_port,01);
+ if (ret_data & 02)
+ defer = TRUE;
+ }
+
+ if ((ret_data & 0x1C) == 0x10)
+ return(0x00); /*End of isolation stage, we won! */
+
+ if (ret_data & 0x1C)
+ return(0xFF);
+
+ if ((defer) && (!(ret_data & 0x1F)))
+ return(0x01); /*End of isolation stage, we lost. */
+
+ } /*bit loop */
+
+ } /*byte loop */
+
+ if (defer)
+ return(0x01); /*We lost */
+ else
+ return(0); /*We WON! Yeeessss! */
+}
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: sciso
+ *
+ * Description: Transfer the Identification string.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+UCHAR sciso(USHORT p_port, UCHAR p_id_string[])
+#else
+UCHAR sciso(ULONG p_port, UCHAR p_id_string[])
+#endif
+{
+ UCHAR ret_data,the_data,byte_cnt,bit_cnt;
+
+ the_data = 0;
+
+ for (byte_cnt = 0; byte_cnt < ID_STRING_LENGTH; byte_cnt++) {
+
+ for (bit_cnt = 0; bit_cnt < 8; bit_cnt++) {
+
+ ret_data = scxferc(p_port,0);
+
+ if (ret_data & 0xFC)
+ return(0xFF);
+
+ else {
+
+ the_data <<= 1;
+ if (ret_data & BIT(1)) {
+ the_data |= 1;
+ }
+ }
+
+ if ((ret_data & 0x1F) == 0)
+ {
+/*
+ if(bit_cnt != 0 || bit_cnt != 8)
+ {
+ byte_cnt = 0;
+ bit_cnt = 0;
+ scxferc(p_port, SYNC_PTRN);
+ scxferc(p_port, ASSIGN_ID);
+ continue;
+ }
+*/
+ if (byte_cnt)
+ return(0x00);
+ else
+ return(0xFF);
+ }
+
+ } /*bit loop */
+
+ p_id_string[byte_cnt] = the_data;
+
+ } /*byte loop */
+
+ return(0);
+}
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: scwirod
+ *
+ * Description: Sample the SCSI data bus making sure the signal has been
+ * deasserted for the correct number of consecutive samples.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void scwirod(USHORT p_port, UCHAR p_data_bit)
+#else
+void scwirod(ULONG p_port, UCHAR p_data_bit)
+#endif
+{
+ UCHAR i;
+
+ i = 0;
+ while ( i < MAX_SCSI_TAR ) {
+
+ if (RD_HARPOON(p_port+hp_scsidata_0) & p_data_bit)
+
+ i = 0;
+
+ else
+
+ i++;
+
+ }
+}
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: scwiros
+ *
+ * Description: Sample the SCSI Signal lines making sure the signal has been
+ * deasserted for the correct number of consecutive samples.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void scwiros(USHORT p_port, UCHAR p_data_bit)
+#else
+void scwiros(ULONG p_port, UCHAR p_data_bit)
+#endif
+{
+ UCHAR i;
+
+ i = 0;
+ while ( i < MAX_SCSI_TAR ) {
+
+ if (RD_HARPOON(p_port+hp_scsisig) & p_data_bit)
+
+ i = 0;
+
+ else
+
+ i++;
+
+ }
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: scvalq
+ *
+ * Description: Make sure we received a valid data byte.
+ *
+ *---------------------------------------------------------------------*/
+
+UCHAR scvalq(UCHAR p_quintet)
+{
+ UCHAR count;
+
+ for (count=1; count < 0x08; count<<=1) {
+ if (!(p_quintet & count))
+ p_quintet -= 0x80;
+ }
+
+ if (p_quintet & 0x18)
+ return(FALSE);
+
+ else
+ return(TRUE);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: scsell
+ *
+ * Description: Select the specified device ID using a selection timeout
+ * less than 4ms. If somebody responds then it is a legacy
+ * drive and this ID must be marked as such.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+UCHAR scsell(USHORT p_port, UCHAR targ_id)
+#else
+UCHAR scsell(ULONG p_port, UCHAR targ_id)
+#endif
+{
+#if defined(DOS)
+ USHORT i;
+#else
+ ULONG i;
+#endif
+
+ WR_HARPOON(p_port+hp_page_ctrl,
+ (RD_HARPOON(p_port+hp_page_ctrl) | G_INT_DISABLE));
+
+ ARAM_ACCESS(p_port);
+
+ WR_HARPOON(p_port+hp_addstat,(RD_HARPOON(p_port+hp_addstat) | SCAM_TIMER));
+ WR_HARPOON(p_port+hp_seltimeout,TO_4ms);
+
+
+ for (i = p_port+CMD_STRT; i < p_port+CMD_STRT+12; i+=2) {
+ WRW_HARPOON(i, (MPM_OP+ACOMMAND));
+ }
+ WRW_HARPOON(i, (BRH_OP+ALWAYS+ NP));
+
+ WRW_HARPOON((p_port+hp_intstat),
+ (RESET | TIMEOUT | SEL | BUS_FREE | AUTO_INT));
+
+ WR_HARPOON(p_port+hp_select_id, targ_id);
+
+ WR_HARPOON(p_port+hp_portctrl_0, SCSI_PORT);
+ WR_HARPOON(p_port+hp_autostart_3, (SELECT | CMD_ONLY_STRT));
+ WR_HARPOON(p_port+hp_scsictrl_0, (SEL_TAR | ENA_RESEL));
+
+
+ while (!(RDW_HARPOON((p_port+hp_intstat)) &
+ (RESET | PROG_HLT | TIMEOUT | AUTO_INT))) {}
+
+ if (RDW_HARPOON((p_port+hp_intstat)) & RESET)
+ Wait(p_port, TO_250ms);
+
+ DISABLE_AUTO(p_port);
+
+ WR_HARPOON(p_port+hp_addstat,(RD_HARPOON(p_port+hp_addstat) & ~SCAM_TIMER));
+ WR_HARPOON(p_port+hp_seltimeout,TO_290ms);
+
+ SGRAM_ACCESS(p_port);
+
+ if (RDW_HARPOON((p_port+hp_intstat)) & (RESET | TIMEOUT) ) {
+
+ WRW_HARPOON((p_port+hp_intstat),
+ (RESET | TIMEOUT | SEL | BUS_FREE | PHASE));
+
+ WR_HARPOON(p_port+hp_page_ctrl,
+ (RD_HARPOON(p_port+hp_page_ctrl) & ~G_INT_DISABLE));
+
+ return(FALSE); /*No legacy device */
+ }
+
+ else {
+
+ while(!(RDW_HARPOON((p_port+hp_intstat)) & BUS_FREE)) {
+ if (RD_HARPOON(p_port+hp_scsisig) & SCSI_REQ)
+ {
+ WR_HARPOON(p_port+hp_scsisig, (SCSI_ACK + S_ILL_PH));
+ ACCEPT_MSG(p_port);
+ }
+ }
+
+ WRW_HARPOON((p_port+hp_intstat), CLR_ALL_INT_1);
+
+ WR_HARPOON(p_port+hp_page_ctrl,
+ (RD_HARPOON(p_port+hp_page_ctrl) & ~G_INT_DISABLE));
+
+ return(TRUE); /*Found one of them oldies! */
+ }
+}
+
+#if defined(DOS)
+/*---------------------------------------------------------------------
+ *
+ * Function: scsell for DOS
+ *
+ * Description: Select the specified device ID using a selection timeout
+ * less than 2ms. This was specially required to solve
+ * the problem with Plextor 12X CD-ROM drive. This drive
+ * was responding the Selection at the end of 4ms and
+ * hanging the system.
+ *
+ *---------------------------------------------------------------------*/
+
+UCHAR scsellDOS(USHORT p_port, UCHAR targ_id)
+{
+ USHORT i;
+
+ WR_HARPOON(p_port+hp_page_ctrl,
+ (RD_HARPOON(p_port+hp_page_ctrl) | G_INT_DISABLE));
+
+ ARAM_ACCESS(p_port);
+
+ WR_HARPOON(p_port+hp_addstat,(RD_HARPOON(p_port+hp_addstat) | SCAM_TIMER));
+ WR_HARPOON(p_port+hp_seltimeout,TO_2ms);
+
+
+ for (i = p_port+CMD_STRT; i < p_port+CMD_STRT+12; i+=2) {
+ WRW_HARPOON(i, (MPM_OP+ACOMMAND));
+ }
+ WRW_HARPOON(i, (BRH_OP+ALWAYS+ NP));
+
+ WRW_HARPOON((p_port+hp_intstat),
+ (RESET | TIMEOUT | SEL | BUS_FREE | AUTO_INT));
+
+ WR_HARPOON(p_port+hp_select_id, targ_id);
+
+ WR_HARPOON(p_port+hp_portctrl_0, SCSI_PORT);
+ WR_HARPOON(p_port+hp_autostart_3, (SELECT | CMD_ONLY_STRT));
+ WR_HARPOON(p_port+hp_scsictrl_0, (SEL_TAR | ENA_RESEL));
+
+
+ while (!(RDW_HARPOON((p_port+hp_intstat)) &
+ (RESET | PROG_HLT | TIMEOUT | AUTO_INT))) {}
+
+ if (RDW_HARPOON((p_port+hp_intstat)) & RESET)
+ Wait(p_port, TO_250ms);
+
+ DISABLE_AUTO(p_port);
+
+ WR_HARPOON(p_port+hp_addstat,(RD_HARPOON(p_port+hp_addstat) & ~SCAM_TIMER));
+ WR_HARPOON(p_port+hp_seltimeout,TO_290ms);
+
+ SGRAM_ACCESS(p_port);
+
+ if (RDW_HARPOON((p_port+hp_intstat)) & (RESET | TIMEOUT) ) {
+
+ WRW_HARPOON((p_port+hp_intstat),
+ (RESET | TIMEOUT | SEL | BUS_FREE | PHASE));
+
+ WR_HARPOON(p_port+hp_page_ctrl,
+ (RD_HARPOON(p_port+hp_page_ctrl) & ~G_INT_DISABLE));
+
+ return(FALSE); /*No legacy device */
+ }
+
+ else {
+
+ while(!(RDW_HARPOON((p_port+hp_intstat)) & BUS_FREE)) {
+ if (RD_HARPOON(p_port+hp_scsisig) & SCSI_REQ)
+ {
+ WR_HARPOON(p_port+hp_scsisig, (SCSI_ACK + S_ILL_PH));
+ ACCEPT_MSG(p_port);
+ }
+ }
+
+ WRW_HARPOON((p_port+hp_intstat), CLR_ALL_INT_1);
+
+ WR_HARPOON(p_port+hp_page_ctrl,
+ (RD_HARPOON(p_port+hp_page_ctrl) & ~G_INT_DISABLE));
+
+ return(TRUE); /*Found one of them oldies! */
+ }
+}
+#endif /* DOS */
+
+/*---------------------------------------------------------------------
+ *
+ * Function: scwtsel
+ *
+ * Description: Wait to be selected by another SCAM initiator.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void scwtsel(USHORT p_port)
+#else
+void scwtsel(ULONG p_port)
+#endif
+{
+ while(!(RDW_HARPOON((p_port+hp_intstat)) & SCAM_SEL)) {}
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: inisci
+ *
+ * Description: Setup the data Structure with the info from the EEPROM.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void inisci(UCHAR p_card, USHORT p_port, UCHAR p_our_id)
+#else
+void inisci(UCHAR p_card, ULONG p_port, UCHAR p_our_id)
+#endif
+{
+ UCHAR i,k,max_id;
+ USHORT ee_data;
+ PNVRamInfo pCurrNvRam;
+
+ pCurrNvRam = BL_Card[p_card].pNvRamInfo;
+
+ if (RD_HARPOON(p_port+hp_page_ctrl) & NARROW_SCSI_CARD)
+ max_id = 0x08;
+
+ else
+ max_id = 0x10;
+
+ if(pCurrNvRam){
+ for(i = 0; i < max_id; i++){
+
+ for(k = 0; k < 4; k++)
+ scamInfo[i].id_string[k] = pCurrNvRam->niScamTbl[i][k];
+ for(k = 4; k < ID_STRING_LENGTH; k++)
+ scamInfo[i].id_string[k] = (UCHAR) 0x00;
+
+ if(scamInfo[i].id_string[0] == 0x00)
+ scamInfo[i].state = ID_UNUSED; /*Default to unused ID. */
+ else
+ scamInfo[i].state = ID_UNASSIGNED; /*Default to unassigned ID. */
+
+ }
+ }else {
+ for (i=0; i < max_id; i++)
+ {
+ for (k=0; k < ID_STRING_LENGTH; k+=2)
+ {
+ ee_data = utilEERead(p_port, (USHORT)((EE_SCAMBASE/2) +
+ (USHORT) (i*((USHORT)ID_STRING_LENGTH/2)) + (USHORT)(k/2)));
+ scamInfo[i].id_string[k] = (UCHAR) ee_data;
+ ee_data >>= 8;
+ scamInfo[i].id_string[k+1] = (UCHAR) ee_data;
+ }
+
+ if ((scamInfo[i].id_string[0] == 0x00) ||
+ (scamInfo[i].id_string[0] == 0xFF))
+
+ scamInfo[i].state = ID_UNUSED; /*Default to unused ID. */
+
+ else
+ scamInfo[i].state = ID_UNASSIGNED; /*Default to unassigned ID. */
+
+ }
+ }
+ for(k = 0; k < ID_STRING_LENGTH; k++)
+ scamInfo[p_our_id].id_string[k] = scamHAString[k];
+
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: scmachid
+ *
+ * Description: Match the Device ID string with our values stored in
+ * the EEPROM.
+ *
+ *---------------------------------------------------------------------*/
+
+UCHAR scmachid(UCHAR p_card, UCHAR p_id_string[])
+{
+
+ UCHAR i,k,match;
+
+
+ for (i=0; i < MAX_SCSI_TAR; i++) {
+
+#if !defined(SCAM_LEV_2)
+ if (scamInfo[i].state == ID_UNASSIGNED)
+ {
+#endif
+ match = TRUE;
+
+ for (k=0; k < ID_STRING_LENGTH; k++)
+ {
+ if (p_id_string[k] != scamInfo[i].id_string[k])
+ match = FALSE;
+ }
+
+ if (match)
+ {
+ scamInfo[i].state = ID_ASSIGNED;
+ return(i);
+ }
+
+#if !defined(SCAM_LEV_2)
+ }
+#endif
+
+ }
+
+
+
+ if (p_id_string[0] & BIT(5))
+ i = 8;
+ else
+ i = MAX_SCSI_TAR;
+
+ if (((p_id_string[0] & 0x06) == 0x02) || ((p_id_string[0] & 0x06) == 0x04))
+ match = p_id_string[1] & (UCHAR) 0x1F;
+ else
+ match = 7;
+
+ while (i > 0)
+ {
+ i--;
+
+ if (scamInfo[match].state == ID_UNUSED)
+ {
+ for (k=0; k < ID_STRING_LENGTH; k++)
+ {
+ scamInfo[match].id_string[k] = p_id_string[k];
+ }
+
+ scamInfo[match].state = ID_ASSIGNED;
+
+ if(BL_Card[p_card].pNvRamInfo == NULL)
+ BL_Card[p_card].globalFlags |= F_UPDATE_EEPROM;
+ return(match);
+
+ }
+
+
+ match--;
+
+ if (match == 0xFF)
+ {
+ if (p_id_string[0] & BIT(5))
+ match = 7;
+ else
+ match = MAX_SCSI_TAR-1;
+ }
+ }
+
+
+
+ if (p_id_string[0] & BIT(7))
+ {
+ return(CLR_PRIORITY);
+ }
+
+
+ if (p_id_string[0] & BIT(5))
+ i = 8;
+ else
+ i = MAX_SCSI_TAR;
+
+ if (((p_id_string[0] & 0x06) == 0x02) || ((p_id_string[0] & 0x06) == 0x04))
+ match = p_id_string[1] & (UCHAR) 0x1F;
+ else
+ match = 7;
+
+ while (i > 0)
+ {
+
+ i--;
+
+ if (scamInfo[match].state == ID_UNASSIGNED)
+ {
+ for (k=0; k < ID_STRING_LENGTH; k++)
+ {
+ scamInfo[match].id_string[k] = p_id_string[k];
+ }
+
+ scamInfo[match].id_string[0] |= BIT(7);
+ scamInfo[match].state = ID_ASSIGNED;
+ if(BL_Card[p_card].pNvRamInfo == NULL)
+ BL_Card[p_card].globalFlags |= F_UPDATE_EEPROM;
+ return(match);
+
+ }
+
+
+ match--;
+
+ if (match == 0xFF)
+ {
+ if (p_id_string[0] & BIT(5))
+ match = 7;
+ else
+ match = MAX_SCSI_TAR-1;
+ }
+ }
+
+ return(NO_ID_AVAIL);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: scsavdi
+ *
+ * Description: Save off the device SCAM ID strings.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void scsavdi(UCHAR p_card, USHORT p_port)
+#else
+void scsavdi(UCHAR p_card, ULONG p_port)
+#endif
+{
+ UCHAR i,k,max_id;
+ USHORT ee_data,sum_data;
+
+
+ sum_data = 0x0000;
+
+ for (i = 1; i < EE_SCAMBASE/2; i++)
+ {
+ sum_data += utilEERead(p_port, i);
+ }
+
+
+ utilEEWriteOnOff(p_port,1); /* Enable write access to the EEPROM */
+
+ if (RD_HARPOON(p_port+hp_page_ctrl) & NARROW_SCSI_CARD)
+ max_id = 0x08;
+
+ else
+ max_id = 0x10;
+
+ for (i=0; i < max_id; i++)
+ {
+
+ for (k=0; k < ID_STRING_LENGTH; k+=2)
+ {
+ ee_data = scamInfo[i].id_string[k+1];
+ ee_data <<= 8;
+ ee_data |= scamInfo[i].id_string[k];
+ sum_data += ee_data;
+ utilEEWrite(p_port, ee_data, (USHORT)((EE_SCAMBASE/2) +
+ (USHORT)(i*((USHORT)ID_STRING_LENGTH/2)) + (USHORT)(k/2)));
+ }
+ }
+
+
+ utilEEWrite(p_port, sum_data, EEPROM_CHECK_SUM/2);
+ utilEEWriteOnOff(p_port,0); /* Turn off write access */
+}
+
+/*----------------------------------------------------------------------
+ *
+ *
+ * Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+ *
+ * This file is available under both the GNU General Public License
+ * and a BSD-style copyright; see LICENSE.FlashPoint for details.
+ *
+ * $Workfile: diagnose.c $
+ *
+ * Description: Diagnostic funtions for testing the integrity of
+ * the HARPOON.
+ *
+ * $Date: 1999/04/26 05:53:56 $
+ *
+ * $Revision: 1.1 $
+ *
+ *----------------------------------------------------------------------*/
+
+/*#include <globals.h>*/
+
+#if (FW_TYPE==_UCB_MGR_)
+ /*#include <budi.h>*/
+#endif
+
+/*#include <sccbmgr.h>*/
+/*#include <blx30.h>*/
+/*#include <target.h>*/
+/*#include <eeprom.h>*/
+/*#include <harpoon.h>*/
+
+/*---------------------------------------------------------------------
+ *
+ * Function: XbowInit
+ *
+ * Description: Setup the Xbow for normal operation.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void XbowInit(USHORT port, UCHAR ScamFlg)
+#else
+void XbowInit(ULONG port, UCHAR ScamFlg)
+#endif
+{
+UCHAR i;
+
+ i = RD_HARPOON(port+hp_page_ctrl);
+ WR_HARPOON(port+hp_page_ctrl, (UCHAR) (i | G_INT_DISABLE));
+
+ WR_HARPOON(port+hp_scsireset,0x00);
+ WR_HARPOON(port+hp_portctrl_1,HOST_MODE8);
+
+ WR_HARPOON(port+hp_scsireset,(DMA_RESET | HPSCSI_RESET | PROG_RESET | \
+ FIFO_CLR));
+
+ WR_HARPOON(port+hp_scsireset,SCSI_INI);
+
+ WR_HARPOON(port+hp_clkctrl_0,CLKCTRL_DEFAULT);
+
+ WR_HARPOON(port+hp_scsisig,0x00); /* Clear any signals we might */
+ WR_HARPOON(port+hp_scsictrl_0,ENA_SCAM_SEL);
+
+ WRW_HARPOON((port+hp_intstat), CLR_ALL_INT);
+
+#if defined(SCAM_LEV_2)
+ default_intena = RESET | RSEL | PROG_HLT | TIMEOUT |
+ BUS_FREE | XFER_CNT_0 | AUTO_INT;
+
+ if ((ScamFlg & SCAM_ENABLED) && (ScamFlg & SCAM_LEVEL2))
+ default_intena |= SCAM_SEL;
+
+#else
+ default_intena = RESET | RSEL | PROG_HLT | TIMEOUT |
+ BUS_FREE | XFER_CNT_0 | AUTO_INT;
+#endif
+ WRW_HARPOON((port+hp_intena), default_intena);
+
+ WR_HARPOON(port+hp_seltimeout,TO_290ms);
+
+ /* Turn on SCSI_MODE8 for narrow cards to fix the
+ strapping issue with the DUAL CHANNEL card */
+ if (RD_HARPOON(port+hp_page_ctrl) & NARROW_SCSI_CARD)
+ WR_HARPOON(port+hp_addstat,SCSI_MODE8);
+
+#if defined(NO_BIOS_OPTION)
+
+ WR_HARPOON(port+hp_synctarg_0,NARROW_SCSI);
+ WR_HARPOON(port+hp_synctarg_1,NARROW_SCSI);
+ WR_HARPOON(port+hp_synctarg_2,NARROW_SCSI);
+ WR_HARPOON(port+hp_synctarg_3,NARROW_SCSI);
+ WR_HARPOON(port+hp_synctarg_4,NARROW_SCSI);
+ WR_HARPOON(port+hp_synctarg_5,NARROW_SCSI);
+ WR_HARPOON(port+hp_synctarg_6,NARROW_SCSI);
+ WR_HARPOON(port+hp_synctarg_7,NARROW_SCSI);
+ WR_HARPOON(port+hp_synctarg_8,NARROW_SCSI);
+ WR_HARPOON(port+hp_synctarg_9,NARROW_SCSI);
+ WR_HARPOON(port+hp_synctarg_10,NARROW_SCSI);
+ WR_HARPOON(port+hp_synctarg_11,NARROW_SCSI);
+ WR_HARPOON(port+hp_synctarg_12,NARROW_SCSI);
+ WR_HARPOON(port+hp_synctarg_13,NARROW_SCSI);
+ WR_HARPOON(port+hp_synctarg_14,NARROW_SCSI);
+ WR_HARPOON(port+hp_synctarg_15,NARROW_SCSI);
+
+#endif
+ WR_HARPOON(port+hp_page_ctrl, i);
+
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: BusMasterInit
+ *
+ * Description: Initialize the BusMaster for normal operations.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void BusMasterInit(USHORT p_port)
+#else
+void BusMasterInit(ULONG p_port)
+#endif
+{
+
+
+ WR_HARPOON(p_port+hp_sys_ctrl, DRVR_RST);
+ WR_HARPOON(p_port+hp_sys_ctrl, 0x00);
+
+ WR_HARPOON(p_port+hp_host_blk_cnt, XFER_BLK64);
+
+
+ WR_HARPOON(p_port+hp_bm_ctrl, (BMCTRL_DEFAULT));
+
+ WR_HARPOON(p_port+hp_ee_ctrl, (SCSI_TERM_ENA_H));
+
+
+#if defined(NT)
+
+ WR_HARPOON(p_port+hp_pci_cmd_cfg, (RD_HARPOON(p_port+hp_pci_cmd_cfg)
+ & ~MEM_SPACE_ENA));
+
+#endif
+
+ RD_HARPOON(p_port+hp_int_status); /*Clear interrupts. */
+ WR_HARPOON(p_port+hp_int_mask, (INT_CMD_COMPL | SCSI_INTERRUPT));
+ WR_HARPOON(p_port+hp_page_ctrl, (RD_HARPOON(p_port+hp_page_ctrl) &
+ ~SCATTER_EN));
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: DiagXbow
+ *
+ * Description: Test Xbow integrity. Non-zero return indicates an error.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+int DiagXbow(USHORT port)
+#else
+int DiagXbow(ULONG port)
+#endif
+{
+ unsigned char fifo_cnt,loop_cnt;
+
+ unsigned char fifodata[5];
+ fifodata[0] = 0x00;
+ fifodata[1] = 0xFF;
+ fifodata[2] = 0x55;
+ fifodata[3] = 0xAA;
+ fifodata[4] = 0x00;
+
+
+ WRW_HARPOON((port+hp_intstat), CLR_ALL_INT);
+ WRW_HARPOON((port+hp_intena), 0x0000);
+
+ WR_HARPOON(port+hp_seltimeout,TO_5ms);
+
+ WR_HARPOON(port+hp_portctrl_0,START_TO);
+
+
+ for(fifodata[4] = 0x01; fifodata[4] != (UCHAR) 0; fifodata[4] = fifodata[4] << 1) {
+
+ WR_HARPOON(port+hp_selfid_0,fifodata[4]);
+ WR_HARPOON(port+hp_selfid_1,fifodata[4]);
+
+ if ((RD_HARPOON(port+hp_selfid_0) != fifodata[4]) ||
+ (RD_HARPOON(port+hp_selfid_1) != fifodata[4]))
+ return(1);
+ }
+
+
+ for(loop_cnt = 0; loop_cnt < 4; loop_cnt++) {
+
+ WR_HARPOON(port+hp_portctrl_0,(HOST_PORT | HOST_WRT | START_TO));
+
+
+ for (fifo_cnt = 0; fifo_cnt < FIFO_LEN; fifo_cnt++) {
+
+ WR_HARPOON(port+hp_fifodata_0, fifodata[loop_cnt]);
+ }
+
+
+ if (!(RD_HARPOON(port+hp_xferstat) & FIFO_FULL))
+ return(1);
+
+
+ WR_HARPOON(port+hp_portctrl_0,(HOST_PORT | START_TO));
+
+ for (fifo_cnt = 0; fifo_cnt < FIFO_LEN; fifo_cnt++) {
+
+ if (RD_HARPOON(port+hp_fifodata_0) != fifodata[loop_cnt])
+ return(1);
+ }
+
+
+ if (!(RD_HARPOON(port+hp_xferstat) & FIFO_EMPTY))
+ return(1);
+ }
+
+
+ while(!(RDW_HARPOON((port+hp_intstat)) & TIMEOUT)) {}
+
+
+ WR_HARPOON(port+hp_seltimeout,TO_290ms);
+
+ WRW_HARPOON((port+hp_intstat), CLR_ALL_INT);
+
+ WRW_HARPOON((port+hp_intena), default_intena);
+
+ return(0);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: DiagBusMaster
+ *
+ * Description: Test BusMaster integrity. Non-zero return indicates an
+ * error.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+int DiagBusMaster(USHORT port)
+#else
+int DiagBusMaster(ULONG port)
+#endif
+{
+ UCHAR testdata;
+
+ for(testdata = (UCHAR) 1; testdata != (UCHAR)0; testdata = testdata << 1) {
+
+ WR_HARPOON(port+hp_xfer_cnt_lo,testdata);
+ WR_HARPOON(port+hp_xfer_cnt_mi,testdata);
+ WR_HARPOON(port+hp_xfer_cnt_hi,testdata);
+ WR_HARPOON(port+hp_host_addr_lo,testdata);
+ WR_HARPOON(port+hp_host_addr_lmi,testdata);
+ WR_HARPOON(port+hp_host_addr_hmi,testdata);
+ WR_HARPOON(port+hp_host_addr_hi,testdata);
+
+ if ((RD_HARPOON(port+hp_xfer_cnt_lo) != testdata) ||
+ (RD_HARPOON(port+hp_xfer_cnt_mi) != testdata) ||
+ (RD_HARPOON(port+hp_xfer_cnt_hi) != testdata) ||
+ (RD_HARPOON(port+hp_host_addr_lo) != testdata) ||
+ (RD_HARPOON(port+hp_host_addr_lmi) != testdata) ||
+ (RD_HARPOON(port+hp_host_addr_hmi) != testdata) ||
+ (RD_HARPOON(port+hp_host_addr_hi) != testdata))
+
+ return(1);
+ }
+ RD_HARPOON(port+hp_int_status); /*Clear interrupts. */
+ return(0);
+}
+
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: DiagEEPROM
+ *
+ * Description: Verfiy checksum and 'Key' and initialize the EEPROM if
+ * neccessary.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void DiagEEPROM(USHORT p_port)
+#else
+void DiagEEPROM(ULONG p_port)
+#endif
+
+{
+ USHORT index,temp,max_wd_cnt;
+
+ if (RD_HARPOON(p_port+hp_page_ctrl) & NARROW_SCSI_CARD)
+ max_wd_cnt = EEPROM_WD_CNT;
+ else
+ max_wd_cnt = EEPROM_WD_CNT * 2;
+
+ temp = utilEERead(p_port, FW_SIGNATURE/2);
+
+ if (temp == 0x4641) {
+
+ for (index = 2; index < max_wd_cnt; index++) {
+
+ temp += utilEERead(p_port, index);
+
+ }
+
+ if (temp == utilEERead(p_port, EEPROM_CHECK_SUM/2)) {
+
+ return; /*EEPROM is Okay so return now! */
+ }
+ }
+
+
+ utilEEWriteOnOff(p_port,(UCHAR)1);
+
+ for (index = 0; index < max_wd_cnt; index++) {
+
+ utilEEWrite(p_port, 0x0000, index);
+ }
+
+ temp = 0;
+
+ utilEEWrite(p_port, 0x4641, FW_SIGNATURE/2);
+ temp += 0x4641;
+ utilEEWrite(p_port, 0x3920, MODEL_NUMB_0/2);
+ temp += 0x3920;
+ utilEEWrite(p_port, 0x3033, MODEL_NUMB_2/2);
+ temp += 0x3033;
+ utilEEWrite(p_port, 0x2020, MODEL_NUMB_4/2);
+ temp += 0x2020;
+ utilEEWrite(p_port, 0x70D3, SYSTEM_CONFIG/2);
+ temp += 0x70D3;
+ utilEEWrite(p_port, 0x0010, BIOS_CONFIG/2);
+ temp += 0x0010;
+ utilEEWrite(p_port, 0x0003, SCAM_CONFIG/2);
+ temp += 0x0003;
+ utilEEWrite(p_port, 0x0007, ADAPTER_SCSI_ID/2);
+ temp += 0x0007;
+
+ utilEEWrite(p_port, 0x0000, IGNORE_B_SCAN/2);
+ temp += 0x0000;
+ utilEEWrite(p_port, 0x0000, SEND_START_ENA/2);
+ temp += 0x0000;
+ utilEEWrite(p_port, 0x0000, DEVICE_ENABLE/2);
+ temp += 0x0000;
+
+ utilEEWrite(p_port, 0x4242, SYNC_RATE_TBL01/2);
+ temp += 0x4242;
+ utilEEWrite(p_port, 0x4242, SYNC_RATE_TBL23/2);
+ temp += 0x4242;
+ utilEEWrite(p_port, 0x4242, SYNC_RATE_TBL45/2);
+ temp += 0x4242;
+ utilEEWrite(p_port, 0x4242, SYNC_RATE_TBL67/2);
+ temp += 0x4242;
+ utilEEWrite(p_port, 0x4242, SYNC_RATE_TBL89/2);
+ temp += 0x4242;
+ utilEEWrite(p_port, 0x4242, SYNC_RATE_TBLab/2);
+ temp += 0x4242;
+ utilEEWrite(p_port, 0x4242, SYNC_RATE_TBLcd/2);
+ temp += 0x4242;
+ utilEEWrite(p_port, 0x4242, SYNC_RATE_TBLef/2);
+ temp += 0x4242;
+
+
+ utilEEWrite(p_port, 0x6C46, 64/2); /*PRODUCT ID */
+ temp += 0x6C46;
+ utilEEWrite(p_port, 0x7361, 66/2); /* FlashPoint LT */
+ temp += 0x7361;
+ utilEEWrite(p_port, 0x5068, 68/2);
+ temp += 0x5068;
+ utilEEWrite(p_port, 0x696F, 70/2);
+ temp += 0x696F;
+ utilEEWrite(p_port, 0x746E, 72/2);
+ temp += 0x746E;
+ utilEEWrite(p_port, 0x4C20, 74/2);
+ temp += 0x4C20;
+ utilEEWrite(p_port, 0x2054, 76/2);
+ temp += 0x2054;
+ utilEEWrite(p_port, 0x2020, 78/2);
+ temp += 0x2020;
+
+ index = ((EE_SCAMBASE/2)+(7*16));
+ utilEEWrite(p_port, (0x0700+TYPE_CODE0), index);
+ temp += (0x0700+TYPE_CODE0);
+ index++;
+ utilEEWrite(p_port, 0x5542, index); /*Vendor ID code */
+ temp += 0x5542; /* BUSLOGIC */
+ index++;
+ utilEEWrite(p_port, 0x4C53, index);
+ temp += 0x4C53;
+ index++;
+ utilEEWrite(p_port, 0x474F, index);
+ temp += 0x474F;
+ index++;
+ utilEEWrite(p_port, 0x4349, index);
+ temp += 0x4349;
+ index++;
+ utilEEWrite(p_port, 0x5442, index); /*Vendor unique code */
+ temp += 0x5442; /* BT- 930 */
+ index++;
+ utilEEWrite(p_port, 0x202D, index);
+ temp += 0x202D;
+ index++;
+ utilEEWrite(p_port, 0x3339, index);
+ temp += 0x3339;
+ index++; /*Serial # */
+ utilEEWrite(p_port, 0x2030, index); /* 01234567 */
+ temp += 0x2030;
+ index++;
+ utilEEWrite(p_port, 0x5453, index);
+ temp += 0x5453;
+ index++;
+ utilEEWrite(p_port, 0x5645, index);
+ temp += 0x5645;
+ index++;
+ utilEEWrite(p_port, 0x2045, index);
+ temp += 0x2045;
+ index++;
+ utilEEWrite(p_port, 0x202F, index);
+ temp += 0x202F;
+ index++;
+ utilEEWrite(p_port, 0x4F4A, index);
+ temp += 0x4F4A;
+ index++;
+ utilEEWrite(p_port, 0x204E, index);
+ temp += 0x204E;
+ index++;
+ utilEEWrite(p_port, 0x3539, index);
+ temp += 0x3539;
+
+
+
+ utilEEWrite(p_port, temp, EEPROM_CHECK_SUM/2);
+
+ utilEEWriteOnOff(p_port,(UCHAR)0);
+
+}
+
+
+/*----------------------------------------------------------------------
+ *
+ *
+ * Copyright 1995-1996 by Mylex Corporation. All Rights Reserved
+ *
+ * This file is available under both the GNU General Public License
+ * and a BSD-style copyright; see LICENSE.FlashPoint for details.
+ *
+ * $Workfile: utility.c $
+ *
+ * Description: Utility functions relating to queueing and EEPROM
+ * manipulation and any other garbage functions.
+ *
+ * $Date: 1999/04/26 05:53:56 $
+ *
+ * $Revision: 1.1 $
+ *
+ *----------------------------------------------------------------------*/
+/*#include <globals.h>*/
+
+#if (FW_TYPE==_UCB_MGR_)
+ /*#include <budi.h>*/
+#endif
+
+/*#include <sccbmgr.h>*/
+/*#include <blx30.h>*/
+/*#include <target.h>*/
+/*#include <scsi2.h>*/
+/*#include <harpoon.h>*/
+
+
+/*
+extern SCCBCARD BL_Card[MAX_CARDS];
+extern SCCBMGR_TAR_INFO sccbMgrTbl[MAX_CARDS][MAX_SCSI_TAR];
+extern unsigned int SccbGlobalFlags;
+*/
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Queue Search Select
+ *
+ * Description: Try to find a new command to execute.
+ *
+ *---------------------------------------------------------------------*/
+
+void queueSearchSelect(PSCCBcard pCurrCard, UCHAR p_card)
+{
+ UCHAR scan_ptr, lun;
+ PSCCBMgr_tar_info currTar_Info;
+ PSCCB pOldSccb;
+
+ scan_ptr = pCurrCard->scanIndex;
+ do
+ {
+ currTar_Info = &sccbMgrTbl[p_card][scan_ptr];
+ if((pCurrCard->globalFlags & F_CONLUN_IO) &&
+ ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING))
+ {
+ if (currTar_Info->TarSelQ_Cnt != 0)
+ {
+
+ scan_ptr++;
+ if (scan_ptr == MAX_SCSI_TAR)
+ scan_ptr = 0;
+
+ for(lun=0; lun < MAX_LUN; lun++)
+ {
+ if(currTar_Info->TarLUNBusy[lun] == FALSE)
+ {
+
+ pCurrCard->currentSCCB = currTar_Info->TarSelQ_Head;
+ pOldSccb = NULL;
+
+ while((pCurrCard->currentSCCB != NULL) &&
+ (lun != pCurrCard->currentSCCB->Lun))
+ {
+ pOldSccb = pCurrCard->currentSCCB;
+ pCurrCard->currentSCCB = (PSCCB)(pCurrCard->currentSCCB)->
+ Sccb_forwardlink;
+ }
+ if(pCurrCard->currentSCCB == NULL)
+ continue;
+ if(pOldSccb != NULL)
+ {
+ pOldSccb->Sccb_forwardlink = (PSCCB)(pCurrCard->currentSCCB)->
+ Sccb_forwardlink;
+ pOldSccb->Sccb_backlink = (PSCCB)(pCurrCard->currentSCCB)->
+ Sccb_backlink;
+ currTar_Info->TarSelQ_Cnt--;
+ }
+ else
+ {
+ currTar_Info->TarSelQ_Head = (PSCCB)(pCurrCard->currentSCCB)->Sccb_forwardlink;
+
+ if (currTar_Info->TarSelQ_Head == NULL)
+ {
+ currTar_Info->TarSelQ_Tail = NULL;
+ currTar_Info->TarSelQ_Cnt = 0;
+ }
+ else
+ {
+ currTar_Info->TarSelQ_Cnt--;
+ currTar_Info->TarSelQ_Head->Sccb_backlink = (PSCCB)NULL;
+ }
+ }
+ pCurrCard->scanIndex = scan_ptr;
+
+ pCurrCard->globalFlags |= F_NEW_SCCB_CMD;
+
+ break;
+ }
+ }
+ }
+
+ else
+ {
+ scan_ptr++;
+ if (scan_ptr == MAX_SCSI_TAR) {
+ scan_ptr = 0;
+ }
+ }
+
+ }
+ else
+ {
+ if ((currTar_Info->TarSelQ_Cnt != 0) &&
+ (currTar_Info->TarLUNBusy[0] == FALSE))
+ {
+
+ pCurrCard->currentSCCB = currTar_Info->TarSelQ_Head;
+
+ currTar_Info->TarSelQ_Head = (PSCCB)(pCurrCard->currentSCCB)->Sccb_forwardlink;
+
+ if (currTar_Info->TarSelQ_Head == NULL)
+ {
+ currTar_Info->TarSelQ_Tail = NULL;
+ currTar_Info->TarSelQ_Cnt = 0;
+ }
+ else
+ {
+ currTar_Info->TarSelQ_Cnt--;
+ currTar_Info->TarSelQ_Head->Sccb_backlink = (PSCCB)NULL;
+ }
+
+ scan_ptr++;
+ if (scan_ptr == MAX_SCSI_TAR)
+ scan_ptr = 0;
+
+ pCurrCard->scanIndex = scan_ptr;
+
+ pCurrCard->globalFlags |= F_NEW_SCCB_CMD;
+
+ break;
+ }
+
+ else
+ {
+ scan_ptr++;
+ if (scan_ptr == MAX_SCSI_TAR)
+ {
+ scan_ptr = 0;
+ }
+ }
+ }
+ } while (scan_ptr != pCurrCard->scanIndex);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Queue Select Fail
+ *
+ * Description: Add the current SCCB to the head of the Queue.
+ *
+ *---------------------------------------------------------------------*/
+
+void queueSelectFail(PSCCBcard pCurrCard, UCHAR p_card)
+{
+ UCHAR thisTarg;
+ PSCCBMgr_tar_info currTar_Info;
+
+ if (pCurrCard->currentSCCB != NULL)
+ {
+ thisTarg = (UCHAR)(((PSCCB)(pCurrCard->currentSCCB))->TargID);
+ currTar_Info = &sccbMgrTbl[p_card][thisTarg];
+
+ pCurrCard->currentSCCB->Sccb_backlink = (PSCCB)NULL;
+
+ pCurrCard->currentSCCB->Sccb_forwardlink = currTar_Info->TarSelQ_Head;
+
+ if (currTar_Info->TarSelQ_Cnt == 0)
+ {
+ currTar_Info->TarSelQ_Tail = pCurrCard->currentSCCB;
+ }
+
+ else
+ {
+ currTar_Info->TarSelQ_Head->Sccb_backlink = pCurrCard->currentSCCB;
+ }
+
+
+ currTar_Info->TarSelQ_Head = pCurrCard->currentSCCB;
+
+ pCurrCard->currentSCCB = NULL;
+ currTar_Info->TarSelQ_Cnt++;
+ }
+}
+/*---------------------------------------------------------------------
+ *
+ * Function: Queue Command Complete
+ *
+ * Description: Call the callback function with the current SCCB.
+ *
+ *---------------------------------------------------------------------*/
+
+void queueCmdComplete(PSCCBcard pCurrCard, PSCCB p_sccb, UCHAR p_card)
+{
+
+#if (FW_TYPE==_UCB_MGR_)
+
+ u08bits SCSIcmd;
+ CALL_BK_FN callback;
+ PSCCBMgr_tar_info currTar_Info;
+
+ PUCB p_ucb;
+ p_ucb=p_sccb->Sccb_ucb_ptr;
+
+ SCSIcmd = p_sccb->Cdb[0];
+
+
+ if (!(p_sccb->Sccb_XferState & F_ALL_XFERRED))
+ {
+
+ if ((p_ucb->UCB_opcode & OPC_CHK_UNDER_OVER_RUN) &&
+ (p_sccb->HostStatus == SCCB_COMPLETE) &&
+ (p_sccb->TargetStatus != SSCHECK))
+
+ if ((SCSIcmd == SCSI_READ) ||
+ (SCSIcmd == SCSI_WRITE) ||
+ (SCSIcmd == SCSI_READ_EXTENDED) ||
+ (SCSIcmd == SCSI_WRITE_EXTENDED) ||
+ (SCSIcmd == SCSI_WRITE_AND_VERIFY) ||
+ (SCSIcmd == SCSI_START_STOP_UNIT) ||
+ (pCurrCard->globalFlags & F_NO_FILTER)
+ )
+ p_sccb->HostStatus = SCCB_DATA_UNDER_RUN;
+ }
+
+ p_ucb->UCB_status=SCCB_SUCCESS;
+
+ if ((p_ucb->UCB_hbastat=p_sccb->HostStatus) || (p_ucb->UCB_scsistat=p_sccb->TargetStatus))
+ {
+ p_ucb->UCB_status=SCCB_ERROR;
+ }
+
+ if ((p_sccb->OperationCode == RESIDUAL_SG_COMMAND) ||
+ (p_sccb->OperationCode == RESIDUAL_COMMAND))
+ {
+
+ utilUpdateResidual(p_sccb);
+
+ p_ucb->UCB_datalen=p_sccb->DataLength;
+ }
+
+ pCurrCard->cmdCounter--;
+ if (!pCurrCard->cmdCounter)
+ {
+
+ if (pCurrCard->globalFlags & F_GREEN_PC)
+ {
+ WR_HARPOON(pCurrCard->ioPort+hp_clkctrl_0,(PWR_DWN | CLKCTRL_DEFAULT));
+ WR_HARPOON(pCurrCard->ioPort+hp_sys_ctrl, STOP_CLK);
+ }
+
+ WR_HARPOON(pCurrCard->ioPort+hp_semaphore,
+ (RD_HARPOON(pCurrCard->ioPort+hp_semaphore) & ~SCCB_MGR_ACTIVE));
+ }
+
+ if(pCurrCard->discQCount != 0)
+ {
+ currTar_Info = &sccbMgrTbl[p_card][p_sccb->TargID];
+ if(((pCurrCard->globalFlags & F_CONLUN_IO) &&
+ ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)))
+ {
+ pCurrCard->discQCount--;
+ pCurrCard->discQ_Tbl[currTar_Info->LunDiscQ_Idx[p_sccb->Lun]] = NULL;
+ }
+ else
+ {
+ if(p_sccb->Sccb_tag)
+ {
+ pCurrCard->discQCount--;
+ pCurrCard->discQ_Tbl[p_sccb->Sccb_tag] = NULL;
+ }else
+ {
+ pCurrCard->discQCount--;
+ pCurrCard->discQ_Tbl[currTar_Info->LunDiscQ_Idx[0]] = NULL;
+ }
+ }
+
+ }
+ callback = (CALL_BK_FN)p_ucb->UCB_callback;
+ callback(p_ucb);
+ pCurrCard->globalFlags |= F_NEW_SCCB_CMD;
+ pCurrCard->currentSCCB = NULL;
+}
+
+
+
+
+#else
+
+ UCHAR i, SCSIcmd;
+ CALL_BK_FN callback;
+ PSCCBMgr_tar_info currTar_Info;
+
+ SCSIcmd = p_sccb->Cdb[0];
+
+
+ if (!(p_sccb->Sccb_XferState & F_ALL_XFERRED)) {
+
+ if ((p_sccb->ControlByte & (SCCB_DATA_XFER_OUT | SCCB_DATA_XFER_IN)) &&
+ (p_sccb->HostStatus == SCCB_COMPLETE) &&
+ (p_sccb->TargetStatus != SSCHECK))
+
+ if ((SCSIcmd == SCSI_READ) ||
+ (SCSIcmd == SCSI_WRITE) ||
+ (SCSIcmd == SCSI_READ_EXTENDED) ||
+ (SCSIcmd == SCSI_WRITE_EXTENDED) ||
+ (SCSIcmd == SCSI_WRITE_AND_VERIFY) ||
+ (SCSIcmd == SCSI_START_STOP_UNIT) ||
+ (pCurrCard->globalFlags & F_NO_FILTER)
+ )
+ p_sccb->HostStatus = SCCB_DATA_UNDER_RUN;
+ }
+
+
+ if(p_sccb->SccbStatus == SCCB_IN_PROCESS)
+ {
+ if (p_sccb->HostStatus || p_sccb->TargetStatus)
+ p_sccb->SccbStatus = SCCB_ERROR;
+ else
+ p_sccb->SccbStatus = SCCB_SUCCESS;
+ }
+
+ if (p_sccb->Sccb_XferState & F_AUTO_SENSE) {
+
+ p_sccb->CdbLength = p_sccb->Save_CdbLen;
+ for (i=0; i < 6; i++) {
+ p_sccb->Cdb[i] = p_sccb->Save_Cdb[i];
+ }
+ }
+
+ if ((p_sccb->OperationCode == RESIDUAL_SG_COMMAND) ||
+ (p_sccb->OperationCode == RESIDUAL_COMMAND)) {
+
+ utilUpdateResidual(p_sccb);
+ }
+
+ pCurrCard->cmdCounter--;
+ if (!pCurrCard->cmdCounter) {
+
+ if (pCurrCard->globalFlags & F_GREEN_PC) {
+ WR_HARPOON(pCurrCard->ioPort+hp_clkctrl_0,(PWR_DWN | CLKCTRL_DEFAULT));
+ WR_HARPOON(pCurrCard->ioPort+hp_sys_ctrl, STOP_CLK);
+ }
+
+ WR_HARPOON(pCurrCard->ioPort+hp_semaphore,
+ (RD_HARPOON(pCurrCard->ioPort+hp_semaphore) & ~SCCB_MGR_ACTIVE));
+
+ }
+
+ if(pCurrCard->discQCount != 0)
+ {
+ currTar_Info = &sccbMgrTbl[p_card][p_sccb->TargID];
+ if(((pCurrCard->globalFlags & F_CONLUN_IO) &&
+ ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)))
+ {
+ pCurrCard->discQCount--;
+ pCurrCard->discQ_Tbl[currTar_Info->LunDiscQ_Idx[p_sccb->Lun]] = NULL;
+ }
+ else
+ {
+ if(p_sccb->Sccb_tag)
+ {
+ pCurrCard->discQCount--;
+ pCurrCard->discQ_Tbl[p_sccb->Sccb_tag] = NULL;
+ }else
+ {
+ pCurrCard->discQCount--;
+ pCurrCard->discQ_Tbl[currTar_Info->LunDiscQ_Idx[0]] = NULL;
+ }
+ }
+
+ }
+
+ callback = (CALL_BK_FN)p_sccb->SccbCallback;
+ callback(p_sccb);
+ pCurrCard->globalFlags |= F_NEW_SCCB_CMD;
+ pCurrCard->currentSCCB = NULL;
+}
+#endif /* ( if FW_TYPE==...) */
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Queue Disconnect
+ *
+ * Description: Add SCCB to our disconnect array.
+ *
+ *---------------------------------------------------------------------*/
+void queueDisconnect(PSCCB p_sccb, UCHAR p_card)
+{
+ PSCCBMgr_tar_info currTar_Info;
+
+ currTar_Info = &sccbMgrTbl[p_card][p_sccb->TargID];
+
+ if(((BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
+ ((currTar_Info->TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)))
+ {
+ BL_Card[p_card].discQ_Tbl[currTar_Info->LunDiscQ_Idx[p_sccb->Lun]] = p_sccb;
+ }
+ else
+ {
+ if (p_sccb->Sccb_tag)
+ {
+ BL_Card[p_card].discQ_Tbl[p_sccb->Sccb_tag] = p_sccb;
+ sccbMgrTbl[p_card][p_sccb->TargID].TarLUNBusy[0] = FALSE;
+ sccbMgrTbl[p_card][p_sccb->TargID].TarTagQ_Cnt++;
+ }else
+ {
+ BL_Card[p_card].discQ_Tbl[currTar_Info->LunDiscQ_Idx[0]] = p_sccb;
+ }
+ }
+ BL_Card[p_card].currentSCCB = NULL;
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Queue Flush SCCB
+ *
+ * Description: Flush all SCCB's back to the host driver for this target.
+ *
+ *---------------------------------------------------------------------*/
+
+void queueFlushSccb(UCHAR p_card, UCHAR error_code)
+{
+ UCHAR qtag,thisTarg;
+ PSCCB currSCCB;
+ PSCCBMgr_tar_info currTar_Info;
+
+ currSCCB = BL_Card[p_card].currentSCCB;
+ if(currSCCB != NULL)
+ {
+ thisTarg = (UCHAR)currSCCB->TargID;
+ currTar_Info = &sccbMgrTbl[p_card][thisTarg];
+
+ for (qtag=0; qtag<QUEUE_DEPTH; qtag++) {
+
+ if (BL_Card[p_card].discQ_Tbl[qtag] &&
+ (BL_Card[p_card].discQ_Tbl[qtag]->TargID == thisTarg))
+ {
+
+ BL_Card[p_card].discQ_Tbl[qtag]->HostStatus = (UCHAR)error_code;
+
+ queueCmdComplete(&BL_Card[p_card],BL_Card[p_card].discQ_Tbl[qtag], p_card);
+
+ BL_Card[p_card].discQ_Tbl[qtag] = NULL;
+ currTar_Info->TarTagQ_Cnt--;
+
+ }
+ }
+ }
+
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Queue Flush Target SCCB
+ *
+ * Description: Flush all SCCB's back to the host driver for this target.
+ *
+ *---------------------------------------------------------------------*/
+
+void queueFlushTargSccb(UCHAR p_card, UCHAR thisTarg, UCHAR error_code)
+{
+ UCHAR qtag;
+ PSCCBMgr_tar_info currTar_Info;
+
+ currTar_Info = &sccbMgrTbl[p_card][thisTarg];
+
+ for (qtag=0; qtag<QUEUE_DEPTH; qtag++) {
+
+ if (BL_Card[p_card].discQ_Tbl[qtag] &&
+ (BL_Card[p_card].discQ_Tbl[qtag]->TargID == thisTarg))
+ {
+
+ BL_Card[p_card].discQ_Tbl[qtag]->HostStatus = (UCHAR)error_code;
+
+ queueCmdComplete(&BL_Card[p_card],BL_Card[p_card].discQ_Tbl[qtag], p_card);
+
+ BL_Card[p_card].discQ_Tbl[qtag] = NULL;
+ currTar_Info->TarTagQ_Cnt--;
+
+ }
+ }
+
+}
+
+
+
+
+
+void queueAddSccb(PSCCB p_SCCB, UCHAR p_card)
+{
+ PSCCBMgr_tar_info currTar_Info;
+ currTar_Info = &sccbMgrTbl[p_card][p_SCCB->TargID];
+
+ p_SCCB->Sccb_forwardlink = NULL;
+
+ p_SCCB->Sccb_backlink = currTar_Info->TarSelQ_Tail;
+
+ if (currTar_Info->TarSelQ_Cnt == 0) {
+
+ currTar_Info->TarSelQ_Head = p_SCCB;
+ }
+
+ else {
+
+ currTar_Info->TarSelQ_Tail->Sccb_forwardlink = p_SCCB;
+ }
+
+
+ currTar_Info->TarSelQ_Tail = p_SCCB;
+ currTar_Info->TarSelQ_Cnt++;
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Queue Find SCCB
+ *
+ * Description: Search the target select Queue for this SCCB, and
+ * remove it if found.
+ *
+ *---------------------------------------------------------------------*/
+
+UCHAR queueFindSccb(PSCCB p_SCCB, UCHAR p_card)
+{
+ PSCCB q_ptr;
+ PSCCBMgr_tar_info currTar_Info;
+
+ currTar_Info = &sccbMgrTbl[p_card][p_SCCB->TargID];
+
+ q_ptr = currTar_Info->TarSelQ_Head;
+
+ while(q_ptr != NULL) {
+
+ if (q_ptr == p_SCCB) {
+
+
+ if (currTar_Info->TarSelQ_Head == q_ptr) {
+
+ currTar_Info->TarSelQ_Head = q_ptr->Sccb_forwardlink;
+ }
+
+ if (currTar_Info->TarSelQ_Tail == q_ptr) {
+
+ currTar_Info->TarSelQ_Tail = q_ptr->Sccb_backlink;
+ }
+
+ if (q_ptr->Sccb_forwardlink != NULL) {
+ q_ptr->Sccb_forwardlink->Sccb_backlink = q_ptr->Sccb_backlink;
+ }
+
+ if (q_ptr->Sccb_backlink != NULL) {
+ q_ptr->Sccb_backlink->Sccb_forwardlink = q_ptr->Sccb_forwardlink;
+ }
+
+ currTar_Info->TarSelQ_Cnt--;
+
+ return(TRUE);
+ }
+
+ else {
+ q_ptr = q_ptr->Sccb_forwardlink;
+ }
+ }
+
+
+ return(FALSE);
+
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Utility Update Residual Count
+ *
+ * Description: Update the XferCnt to the remaining byte count.
+ * If we transferred all the data then just write zero.
+ * If Non-SG transfer then report Total Cnt - Actual Transfer
+ * Cnt. For SG transfers add the count fields of all
+ * remaining SG elements, as well as any partial remaining
+ * element.
+ *
+ *---------------------------------------------------------------------*/
+
+void utilUpdateResidual(PSCCB p_SCCB)
+{
+ ULONG partial_cnt;
+ UINT sg_index;
+#if defined(COMPILER_16_BIT) && !defined(DOS)
+ ULONG far *sg_ptr;
+#else
+ ULONG *sg_ptr;
+#endif
+
+ if (p_SCCB->Sccb_XferState & F_ALL_XFERRED) {
+
+ p_SCCB->DataLength = 0x0000;
+ }
+
+ else if (p_SCCB->Sccb_XferState & F_SG_XFER) {
+
+ partial_cnt = 0x0000;
+
+ sg_index = p_SCCB->Sccb_sgseg;
+
+#if defined(COMPILER_16_BIT) && !defined(DOS)
+ sg_ptr = (ULONG far *)p_SCCB->DataPointer;
+#else
+ sg_ptr = (ULONG *)p_SCCB->DataPointer;
+#endif
+
+ if (p_SCCB->Sccb_SGoffset) {
+
+ partial_cnt = p_SCCB->Sccb_SGoffset;
+ sg_index++;
+ }
+
+ while ( ((ULONG)sg_index * (ULONG)SG_ELEMENT_SIZE) <
+ p_SCCB->DataLength ) {
+
+ partial_cnt += *(sg_ptr+(sg_index * 2));
+ sg_index++;
+ }
+
+ p_SCCB->DataLength = partial_cnt;
+ }
+
+ else {
+
+ p_SCCB->DataLength -= p_SCCB->Sccb_ATC;
+ }
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Wait 1 Second
+ *
+ * Description: Wait for 1 second.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void Wait1Second(USHORT p_port)
+#else
+void Wait1Second(ULONG p_port)
+#endif
+{
+ UCHAR i;
+
+ for(i=0; i < 4; i++) {
+
+ Wait(p_port, TO_250ms);
+
+ if ((RD_HARPOON(p_port+hp_scsictrl_0) & SCSI_RST))
+ break;
+
+ if((RDW_HARPOON((p_port+hp_intstat)) & SCAM_SEL))
+ break;
+ }
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Wait
+ *
+ * Description: Wait the desired delay.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void Wait(USHORT p_port, UCHAR p_delay)
+#else
+void Wait(ULONG p_port, UCHAR p_delay)
+#endif
+{
+ UCHAR old_timer;
+ UCHAR green_flag;
+
+ old_timer = RD_HARPOON(p_port+hp_seltimeout);
+
+ green_flag=RD_HARPOON(p_port+hp_clkctrl_0);
+ WR_HARPOON(p_port+hp_clkctrl_0, CLKCTRL_DEFAULT);
+
+ WR_HARPOON(p_port+hp_seltimeout,p_delay);
+ WRW_HARPOON((p_port+hp_intstat), TIMEOUT);
+ WRW_HARPOON((p_port+hp_intena), (default_intena & ~TIMEOUT));
+
+
+ WR_HARPOON(p_port+hp_portctrl_0,
+ (RD_HARPOON(p_port+hp_portctrl_0) | START_TO));
+
+ while (!(RDW_HARPOON((p_port+hp_intstat)) & TIMEOUT)) {
+
+ if ((RD_HARPOON(p_port+hp_scsictrl_0) & SCSI_RST))
+ break;
+
+ if ((RDW_HARPOON((p_port+hp_intstat)) & SCAM_SEL))
+ break;
+ }
+
+ WR_HARPOON(p_port+hp_portctrl_0,
+ (RD_HARPOON(p_port+hp_portctrl_0) & ~START_TO));
+
+ WRW_HARPOON((p_port+hp_intstat), TIMEOUT);
+ WRW_HARPOON((p_port+hp_intena), default_intena);
+
+ WR_HARPOON(p_port+hp_clkctrl_0,green_flag);
+
+ WR_HARPOON(p_port+hp_seltimeout,old_timer);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Enable/Disable Write to EEPROM
+ *
+ * Description: The EEPROM must first be enabled for writes
+ * A total of 9 clocks are needed.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void utilEEWriteOnOff(USHORT p_port,UCHAR p_mode)
+#else
+void utilEEWriteOnOff(ULONG p_port,UCHAR p_mode)
+#endif
+{
+ UCHAR ee_value;
+
+ ee_value = (UCHAR)(RD_HARPOON(p_port+hp_ee_ctrl) & (EXT_ARB_ACK | SCSI_TERM_ENA_H));
+
+ if (p_mode)
+
+ utilEESendCmdAddr(p_port, EWEN, EWEN_ADDR);
+
+ else
+
+
+ utilEESendCmdAddr(p_port, EWDS, EWDS_ADDR);
+
+ WR_HARPOON(p_port+hp_ee_ctrl, (ee_value | SEE_MS)); /*Turn off CS */
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value); /*Turn off Master Select */
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Write EEPROM
+ *
+ * Description: Write a word to the EEPROM at the specified
+ * address.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void utilEEWrite(USHORT p_port, USHORT ee_data, USHORT ee_addr)
+#else
+void utilEEWrite(ULONG p_port, USHORT ee_data, USHORT ee_addr)
+#endif
+{
+
+ UCHAR ee_value;
+ USHORT i;
+
+ ee_value = (UCHAR)((RD_HARPOON(p_port+hp_ee_ctrl) & (EXT_ARB_ACK | SCSI_TERM_ENA_H))|
+ (SEE_MS | SEE_CS));
+
+
+
+ utilEESendCmdAddr(p_port, EE_WRITE, ee_addr);
+
+
+ ee_value |= (SEE_MS + SEE_CS);
+
+ for(i = 0x8000; i != 0; i>>=1) {
+
+ if (i & ee_data)
+ ee_value |= SEE_DO;
+ else
+ ee_value &= ~SEE_DO;
+
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ ee_value |= SEE_CLK; /* Clock data! */
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ ee_value &= ~SEE_CLK;
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ }
+ ee_value &= (EXT_ARB_ACK | SCSI_TERM_ENA_H);
+ WR_HARPOON(p_port+hp_ee_ctrl, (ee_value | SEE_MS));
+
+ Wait(p_port, TO_10ms);
+
+ WR_HARPOON(p_port+hp_ee_ctrl, (ee_value | SEE_MS | SEE_CS)); /* Set CS to EEPROM */
+ WR_HARPOON(p_port+hp_ee_ctrl, (ee_value | SEE_MS)); /* Turn off CS */
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value); /* Turn off Master Select */
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Read EEPROM
+ *
+ * Description: Read a word from the EEPROM at the desired
+ * address.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+USHORT utilEERead(USHORT p_port, USHORT ee_addr)
+#else
+USHORT utilEERead(ULONG p_port, USHORT ee_addr)
+#endif
+{
+ USHORT i, ee_data1, ee_data2;
+
+ i = 0;
+ ee_data1 = utilEEReadOrg(p_port, ee_addr);
+ do
+ {
+ ee_data2 = utilEEReadOrg(p_port, ee_addr);
+
+ if(ee_data1 == ee_data2)
+ return(ee_data1);
+
+ ee_data1 = ee_data2;
+ i++;
+
+ }while(i < 4);
+
+ return(ee_data1);
+}
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Read EEPROM Original
+ *
+ * Description: Read a word from the EEPROM at the desired
+ * address.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+USHORT utilEEReadOrg(USHORT p_port, USHORT ee_addr)
+#else
+USHORT utilEEReadOrg(ULONG p_port, USHORT ee_addr)
+#endif
+{
+
+ UCHAR ee_value;
+ USHORT i, ee_data;
+
+ ee_value = (UCHAR)((RD_HARPOON(p_port+hp_ee_ctrl) & (EXT_ARB_ACK | SCSI_TERM_ENA_H))|
+ (SEE_MS | SEE_CS));
+
+
+ utilEESendCmdAddr(p_port, EE_READ, ee_addr);
+
+
+ ee_value |= (SEE_MS + SEE_CS);
+ ee_data = 0;
+
+ for(i = 1; i <= 16; i++) {
+
+ ee_value |= SEE_CLK; /* Clock data! */
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ ee_value &= ~SEE_CLK;
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+
+ ee_data <<= 1;
+
+ if (RD_HARPOON(p_port+hp_ee_ctrl) & SEE_DI)
+ ee_data |= 1;
+ }
+
+ ee_value &= ~(SEE_MS + SEE_CS);
+ WR_HARPOON(p_port+hp_ee_ctrl, (ee_value | SEE_MS)); /*Turn off CS */
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value); /*Turn off Master Select */
+
+ return(ee_data);
+}
+
+
+/*---------------------------------------------------------------------
+ *
+ * Function: Send EE command and Address to the EEPROM
+ *
+ * Description: Transfers the correct command and sends the address
+ * to the eeprom.
+ *
+ *---------------------------------------------------------------------*/
+
+#if defined(DOS)
+void utilEESendCmdAddr(USHORT p_port, UCHAR ee_cmd, USHORT ee_addr)
+#else
+void utilEESendCmdAddr(ULONG p_port, UCHAR ee_cmd, USHORT ee_addr)
+#endif
+{
+ UCHAR ee_value;
+ UCHAR narrow_flg;
+
+ USHORT i;
+
+
+ narrow_flg= (UCHAR)(RD_HARPOON(p_port+hp_page_ctrl) & NARROW_SCSI_CARD);
+
+
+ ee_value = SEE_MS;
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+
+ ee_value |= SEE_CS; /* Set CS to EEPROM */
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+
+
+ for(i = 0x04; i != 0; i>>=1) {
+
+ if (i & ee_cmd)
+ ee_value |= SEE_DO;
+ else
+ ee_value &= ~SEE_DO;
+
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ ee_value |= SEE_CLK; /* Clock data! */
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ ee_value &= ~SEE_CLK;
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ }
+
+
+ if (narrow_flg)
+ i = 0x0080;
+
+ else
+ i = 0x0200;
+
+
+ while (i != 0) {
+
+ if (i & ee_addr)
+ ee_value |= SEE_DO;
+ else
+ ee_value &= ~SEE_DO;
+
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ ee_value |= SEE_CLK; /* Clock data! */
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ ee_value &= ~SEE_CLK;
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+ WR_HARPOON(p_port+hp_ee_ctrl, ee_value);
+
+ i >>= 1;
+ }
+}
+
+USHORT CalcCrc16(UCHAR buffer[])
+{
+ USHORT crc=0;
+ int i,j;
+ USHORT ch;
+ for (i=0; i < ID_STRING_LENGTH; i++)
+ {
+ ch = (USHORT) buffer[i];
+ for(j=0; j < 8; j++)
+ {
+ if ((crc ^ ch) & 1)
+ crc = (crc >> 1) ^ CRCMASK;
+ else
+ crc >>= 1;
+ ch >>= 1;
+ }
+ }
+ return(crc);
+}
+
+UCHAR CalcLrc(UCHAR buffer[])
+{
+ int i;
+ UCHAR lrc;
+ lrc = 0;
+ for(i = 0; i < ID_STRING_LENGTH; i++)
+ lrc ^= buffer[i];
+ return(lrc);
+}
+
+
+
+/*
+ The following inline definitions avoid type conflicts.
+*/
+
+static inline unsigned char
+FlashPoint__ProbeHostAdapter(FlashPoint_Info_T *FlashPointInfo)
+{
+ return FlashPoint_ProbeHostAdapter((PSCCBMGR_INFO) FlashPointInfo);
+}
+
+
+static inline FlashPoint_CardHandle_T
+FlashPoint__HardwareResetHostAdapter(FlashPoint_Info_T *FlashPointInfo)
+{
+ return FlashPoint_HardwareResetHostAdapter((PSCCBMGR_INFO) FlashPointInfo);
+}
+
+static inline void
+FlashPoint__ReleaseHostAdapter(FlashPoint_CardHandle_T CardHandle)
+{
+ FlashPoint_ReleaseHostAdapter(CardHandle);
+}
+
+
+static inline void
+FlashPoint__StartCCB(FlashPoint_CardHandle_T CardHandle, BusLogic_CCB_T *CCB)
+{
+ FlashPoint_StartCCB(CardHandle, (PSCCB) CCB);
+}
+
+
+static inline void
+FlashPoint__AbortCCB(FlashPoint_CardHandle_T CardHandle, BusLogic_CCB_T *CCB)
+{
+ FlashPoint_AbortCCB(CardHandle, (PSCCB) CCB);
+}
+
+
+static inline boolean
+FlashPoint__InterruptPending(FlashPoint_CardHandle_T CardHandle)
+{
+ return FlashPoint_InterruptPending(CardHandle);
+}
+
+
+static inline int
+FlashPoint__HandleInterrupt(FlashPoint_CardHandle_T CardHandle)
+{
+ return FlashPoint_HandleInterrupt(CardHandle);
+}
+
+
+#define FlashPoint_ProbeHostAdapter FlashPoint__ProbeHostAdapter
+#define FlashPoint_HardwareResetHostAdapter FlashPoint__HardwareResetHostAdapter
+#define FlashPoint_ReleaseHostAdapter FlashPoint__ReleaseHostAdapter
+#define FlashPoint_StartCCB FlashPoint__StartCCB
+#define FlashPoint_AbortCCB FlashPoint__AbortCCB
+#define FlashPoint_InterruptPending FlashPoint__InterruptPending
+#define FlashPoint_HandleInterrupt FlashPoint__HandleInterrupt
+
+
+/*
+ FlashPoint_InquireTargetInfo returns the Synchronous Period, Synchronous
+ Offset, and Wide Transfers Active information for TargetID on CardHandle.
+*/
+
+void FlashPoint_InquireTargetInfo(FlashPoint_CardHandle_T CardHandle,
+ int TargetID,
+ unsigned char *SynchronousPeriod,
+ unsigned char *SynchronousOffset,
+ unsigned char *WideTransfersActive)
+{
+ SCCBMGR_TAR_INFO *TargetInfo =
+ &sccbMgrTbl[((SCCBCARD *)CardHandle)->cardIndex][TargetID];
+ if ((TargetInfo->TarSyncCtrl & SYNC_OFFSET) > 0)
+ {
+ *SynchronousPeriod = 5 * ((TargetInfo->TarSyncCtrl >> 5) + 1);
+ *SynchronousOffset = TargetInfo->TarSyncCtrl & SYNC_OFFSET;
+ }
+ else
+ {
+ *SynchronousPeriod = 0;
+ *SynchronousOffset = 0;
+ }
+ *WideTransfersActive = (TargetInfo->TarSyncCtrl & NARROW_SCSI ? 0 : 1);
+}
+
+
+#else /* CONFIG_SCSI_OMIT_FLASHPOINT */
+
+
+/*
+ Define prototypes for the FlashPoint SCCB Manager Functions.
+*/
+
+extern unsigned char FlashPoint_ProbeHostAdapter(FlashPoint_Info_T *);
+extern FlashPoint_CardHandle_T
+ FlashPoint_HardwareResetHostAdapter(FlashPoint_Info_T *);
+extern void FlashPoint_StartCCB(FlashPoint_CardHandle_T, BusLogic_CCB_T *);
+extern int FlashPoint_AbortCCB(FlashPoint_CardHandle_T, BusLogic_CCB_T *);
+extern boolean FlashPoint_InterruptPending(FlashPoint_CardHandle_T);
+extern int FlashPoint_HandleInterrupt(FlashPoint_CardHandle_T);
+extern void FlashPoint_ReleaseHostAdapter(FlashPoint_CardHandle_T);
+extern void FlashPoint_InquireTargetInfo(FlashPoint_CardHandle_T,
+ int, unsigned char *,
+ unsigned char *, unsigned char *);
+
+
+#endif /* CONFIG_SCSI_OMIT_FLASHPOINT */
diff --git a/linux/src/drivers/scsi/NCR5380.c b/linux/src/drivers/scsi/NCR5380.c
new file mode 100644
index 0000000..4f085e9
--- /dev/null
+++ b/linux/src/drivers/scsi/NCR5380.c
@@ -0,0 +1,3246 @@
+#ifndef NDEBUG
+#define NDEBUG (NDEBUG_RESTART_SELECT | NDEBUG_ABORT)
+#endif
+/*
+ * NCR 5380 generic driver routines. These should make it *trivial*
+ * to implement 5380 SCSI drivers under Linux with a non-trantor
+ * architecture.
+ *
+ * Note that these routines also work with NR53c400 family chips.
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 666-5836
+ *
+ * DISTRIBUTION RELEASE 6.
+ *
+ * For more information, please consult
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+ *
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * 1+ (719) 578-3400
+ * 1+ (800) 334-5454
+ */
+
+/*
+ * Revision 1.7 1996/3/2 Ray Van Tassle (rayvt@comm.mot.com)
+ * added proc_info
+ * added support needed for DTC 3180/3280
+ * fixed a couple of bugs
+ *
+
+ * Revision 1.5 1994/01/19 09:14:57 drew
+ * Fixed udelay() hack that was being used on DATAOUT phases
+ * instead of a proper wait for the final handshake.
+ *
+ * Revision 1.4 1994/01/19 06:44:25 drew
+ * *** empty log message ***
+ *
+ * Revision 1.3 1994/01/19 05:24:40 drew
+ * Added support for TCR LAST_BYTE_SENT bit.
+ *
+ * Revision 1.2 1994/01/15 06:14:11 drew
+ * REAL DMA support, bug fixes.
+ *
+ * Revision 1.1 1994/01/15 06:00:54 drew
+ * Initial revision
+ *
+ */
+
+/*
+ * Further development / testing that should be done :
+ * 1. Cleanup the NCR5380_transfer_dma function and DMA operation complete
+ * code so that everything does the same thing that's done at the
+ * end of a pseudo-DMA read operation.
+ *
+ * 2. Fix REAL_DMA (interrupt driven, polled works fine) -
+ * basically, transfer size needs to be reduced by one
+ * and the last byte read as is done with PSEUDO_DMA.
+ *
+ * 3. Test USLEEP code
+ *
+ * 4. Test SCSI-II tagged queueing (I have no devices which support
+ * tagged queueing)
+ *
+ * 5. Test linked command handling code after Eric is ready with
+ * the high level code.
+ */
+
+#if (NDEBUG & NDEBUG_LISTS)
+#define LIST(x,y) {printk("LINE:%d Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); if ((x)==(y)) udelay(5); }
+#define REMOVE(w,x,y,z) {printk("LINE:%d Removing: %p->%p %p->%p \n", __LINE__, (void*)(w), (void*)(x), (void*)(y), (void*)(z)); if ((x)==(y)) udelay(5); }
+#else
+#define LIST(x,y)
+#define REMOVE(w,x,y,z)
+#endif
+
+#ifndef notyet
+#undef LINKED
+#undef USLEEP
+#undef REAL_DMA
+#endif
+
+#ifdef REAL_DMA_POLL
+#undef READ_OVERRUNS
+#define READ_OVERRUNS
+#endif
+
+/*
+ * Design
+ * Issues :
+ *
+ * The other Linux SCSI drivers were written when Linux was Intel PC-only,
+ * and specifically for each board rather than each chip. This makes their
+ * adaptation to platforms like the Mac (Some of which use NCR5380's)
+ * more difficult than it has to be.
+ *
+ * Also, many of the SCSI drivers were written before the command queuing
+ * routines were implemented, meaning their implementations of queued
+ * commands were hacked on rather than designed in from the start.
+ *
+ * When I designed the Linux SCSI drivers I figured that
+ * while having two different SCSI boards in a system might be useful
+ * for debugging things, two of the same type wouldn't be used.
+ * Well, I was wrong and a number of users have mailed me about running
+ * multiple high-performance SCSI boards in a server.
+ *
+ * Finally, when I get questions from users, I have no idea what
+ * revision of my driver they are running.
+ *
+ * This driver attempts to address these problems :
+ * This is a generic 5380 driver. To use it on a different platform,
+ * one simply writes appropriate system specific macros (ie, data
+ * transfer - some PC's will use the I/O bus, 68K's must use
+ * memory mapped) and drops this file in their 'C' wrapper.
+ *
+ * As far as command queueing, two queues are maintained for
+ * each 5380 in the system - commands that haven't been issued yet,
+ * and commands that are currently executing. This means that an
+ * unlimited number of commands may be queued, letting
+ * more commands propagate from the higher driver levels giving higher
+ * throughput. Note that both I_T_L and I_T_L_Q nexuses are supported,
+ * allowing multiple commands to propagate all the way to a SCSI-II device
+ * while a command is already executing.
+ *
+ * To solve the multiple-boards-in-the-same-system problem,
+ * there is a separate instance structure for each instance
+ * of a 5380 in the system. So, multiple NCR5380 drivers will
+ * be able to coexist with appropriate changes to the high level
+ * SCSI code.
+ *
+ * A NCR5380_PUBLIC_REVISION macro is provided, with the release
+ * number (updated for each public release) printed by the
+ * NCR5380_print_options command, which should be called from the
+ * wrapper detect function, so that I know what release of the driver
+ * users are using.
+ *
+ * Issues specific to the NCR5380 :
+ *
+ * When used in a PIO or pseudo-dma mode, the NCR5380 is a braindead
+ * piece of hardware that requires you to sit in a loop polling for
+ * the REQ signal as long as you are connected. Some devices are
+ * brain dead (ie, many TEXEL CD ROM drives) and won't disconnect
+ * while doing long seek operations.
+ *
+ * The workaround for this is to keep track of devices that have
+ * disconnected. If the device hasn't disconnected, for commands that
+ * should disconnect, we do something like
+ *
+ * while (!REQ is asserted) { sleep for N usecs; poll for M usecs }
+ *
+ * Some tweaking of N and M needs to be done. An algorithm based
+ * on "time to data" would give the best results as long as short time
+ * to datas (ie, on the same track) were considered, however these
+ * broken devices are the exception rather than the rule and I'd rather
+ * spend my time optimizing for the normal case.
+ *
+ * Architecture :
+ *
+ * At the heart of the design is a coroutine, NCR5380_main,
+ * which is started when not running by the interrupt handler,
+ * timer, and queue command function. It attempts to establish
+ * I_T_L or I_T_L_Q nexuses by removing the commands from the
+ * issue queue and calling NCR5380_select() if a nexus
+ * is not established.
+ *
+ * Once a nexus is established, the NCR5380_information_transfer()
+ * phase goes through the various phases as instructed by the target.
+ * if the target goes into MSG IN and sends a DISCONNECT message,
+ * the command structure is placed into the per instance disconnected
+ * queue, and NCR5380_main tries to find more work. If USLEEP
+ * was defined, and the target is idle for too long, the system
+ * will try to sleep.
+ *
+ * If a command has disconnected, eventually an interrupt will trigger,
+ * calling NCR5380_intr() which will in turn call NCR5380_reselect
+ * to reestablish a nexus. This will run main if necessary.
+ *
+ * On command termination, the done function will be called as
+ * appropriate.
+ *
+ * SCSI pointers are maintained in the SCp field of SCSI command
+ * structures, being initialized after the command is connected
+ * in NCR5380_select, and set as appropriate in NCR5380_information_transfer.
+ * Note that in violation of the standard, an implicit SAVE POINTERS operation
+ * is done, since some BROKEN disks fail to issue an explicit SAVE POINTERS.
+ */
+
+/*
+ * Using this file :
+ * This file a skeleton Linux SCSI driver for the NCR 5380 series
+ * of chips. To use it, you write a architecture specific functions
+ * and macros and include this file in your driver.
+ *
+ * These macros control options :
+ * AUTOPROBE_IRQ - if defined, the NCR5380_probe_irq() function will be
+ * defined.
+ *
+ * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
+ * for commands that return with a CHECK CONDITION status.
+ *
+ * DIFFERENTIAL - if defined, NCR53c81 chips will use external differential
+ * transceivers.
+ *
+ * DONT_USE_INTR - if defined, never use interrupts, even if we probe or
+ * override-configure an IRQ.
+ *
+ * LIMIT_TRANSFERSIZE - if defined, limit the pseudo-dma transfers to 512
+ * bytes at a time. Since interrupts are disabled by default during
+ * these transfers, we might need this to give reasonable interrupt
+ * service time if the transfer size gets too large.
+ *
+ * LINKED - if defined, linked commands are supported.
+ *
+ * PSEUDO_DMA - if defined, PSEUDO DMA is used during the data transfer phases.
+ *
+ * REAL_DMA - if defined, REAL DMA is used during the data transfer phases.
+ *
+ * REAL_DMA_POLL - if defined, REAL DMA is used but the driver doesn't
+ * rely on phase mismatch and EOP interrupts to determine end
+ * of phase.
+ *
+ * SCSI2 - if defined, SCSI-2 tagged queuing is used where possible
+ *
+ * UNSAFE - leave interrupts enabled during pseudo-DMA transfers. You
+ * only really want to use this if you're having a problem with
+ * dropped characters during high speed communications, and even
+ * then, you're going to be better off twiddling with transfersize
+ * in the high level code.
+ *
+ * USLEEP - if defined, on devices that aren't disconnecting from the
+ * bus, we will go to sleep so that the CPU can get real work done
+ * when we run a command that won't complete immediately.
+ *
+ * Note that if USLEEP is defined, NCR5380_TIMER *must* also be
+ * defined.
+ *
+ * Defaults for these will be provided if USLEEP is defined, although
+ * the user may want to adjust these to allocate CPU resources to
+ * the SCSI driver or "real" code.
+ *
+ * USLEEP_SLEEP - amount of time, in jiffies, to sleep
+ *
+ * USLEEP_POLL - amount of time, in jiffies, to poll
+ *
+ * These macros MUST be defined :
+ * NCR5380_local_declare() - declare any local variables needed for your
+ * transfer routines.
+ *
+ * NCR5380_setup(instance) - initialize any local variables needed from a given
+ * instance of the host adapter for NCR5380_{read,write,pread,pwrite}
+ *
+ * NCR5380_read(register) - read from the specified register
+ *
+ * NCR5380_write(register, value) - write to the specific register
+ *
+ * NCR5380_implementation_fields - additional fields needed for this
+ * specific implementation of the NCR5380
+ *
+ * Either real DMA *or* pseudo DMA may be implemented
+ * REAL functions :
+ * NCR5380_REAL_DMA should be defined if real DMA is to be used.
+ * Note that the DMA setup functions should return the number of bytes
+ * that they were able to program the controller for.
+ *
+ * Also note that generic i386/PC versions of these macros are
+ * available as NCR5380_i386_dma_write_setup,
+ * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual.
+ *
+ * NCR5380_dma_write_setup(instance, src, count) - initialize
+ * NCR5380_dma_read_setup(instance, dst, count) - initialize
+ * NCR5380_dma_residual(instance); - residual count
+ *
+ * PSEUDO functions :
+ * NCR5380_pwrite(instance, src, count)
+ * NCR5380_pread(instance, dst, count);
+ *
+ * If nothing specific to this implementation needs doing (ie, with external
+ * hardware), you must also define
+ *
+ * NCR5380_queue_command
+ * NCR5380_reset
+ * NCR5380_abort
+ * NCR5380_proc_info
+ *
+ * to be the global entry points into the specific driver, ie
+ * #define NCR5380_queue_command t128_queue_command.
+ *
+ * If this is not done, the routines will be defined as static functions
+ * with the NCR5380* names and the user must provide a globally
+ * accessible wrapper function.
+ *
+ * The generic driver is initialized by calling NCR5380_init(instance),
+ * after setting the appropriate host specific fields and ID. If the
+ * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance,
+ * possible) function may be used. Before the specific driver initialization
+ * code finishes, NCR5380_print_options should be called.
+ */
+
+static int do_abort (struct Scsi_Host *host);
+static void do_reset (struct Scsi_Host *host);
+static struct Scsi_Host *first_instance = NULL;
+static Scsi_Host_Template *the_template = NULL;
+
+/*
+ * Function : void initialize_SCp(Scsi_Cmnd *cmd)
+ *
+ * Purpose : initialize the saved data pointers for cmd to point to the
+ * start of the buffer.
+ *
+ * Inputs : cmd - Scsi_Cmnd structure to have pointers reset.
+ */
+
+static __inline__ void initialize_SCp(Scsi_Cmnd *cmd) {
+ /*
+ * Initialize the Scsi Pointer field so that all of the commands in the
+ * various queues are valid.
+ */
+
+ if (cmd->use_sg) {
+ cmd->SCp.buffer = (struct scatterlist *) cmd->buffer;
+ cmd->SCp.buffers_residual = cmd->use_sg - 1;
+ cmd->SCp.ptr = (char *) cmd->SCp.buffer->address;
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ } else {
+ cmd->SCp.buffer = NULL;
+ cmd->SCp.buffers_residual = 0;
+ cmd->SCp.ptr = (char *) cmd->request_buffer;
+ cmd->SCp.this_residual = cmd->request_bufflen;
+ }
+}
+
+#include <linux/delay.h>
+
+#ifdef NDEBUG
+static struct {
+ unsigned char mask;
+ const char * name;}
+signals[] = {{ SR_DBP, "PARITY"}, { SR_RST, "RST" }, { SR_BSY, "BSY" },
+ { SR_REQ, "REQ" }, { SR_MSG, "MSG" }, { SR_CD, "CD" }, { SR_IO, "IO" },
+ { SR_SEL, "SEL" }, {0, NULL}},
+basrs[] = {{BASR_ATN, "ATN"}, {BASR_ACK, "ACK"}, {0, NULL}},
+icrs[] = {{ICR_ASSERT_RST, "ASSERT RST"},{ICR_ASSERT_ACK, "ASSERT ACK"},
+ {ICR_ASSERT_BSY, "ASSERT BSY"}, {ICR_ASSERT_SEL, "ASSERT SEL"},
+ {ICR_ASSERT_ATN, "ASSERT ATN"}, {ICR_ASSERT_DATA, "ASSERT DATA"},
+ {0, NULL}},
+mrs[] = {{MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"}, {MR_TARGET, "MODE TARGET"},
+ {MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"}, {MR_ENABLE_PAR_INTR,
+ "MODE PARITY INTR"}, {MR_MONITOR_BSY, "MODE MONITOR BSY"},
+ {MR_DMA_MODE, "MODE DMA"}, {MR_ARBITRATE, "MODE ARBITRATION"},
+ {0, NULL}};
+
+/*
+ * Function : void NCR5380_print(struct Scsi_Host *instance)
+ *
+ * Purpose : print the SCSI bus signals for debugging purposes
+ *
+ * Input : instance - which NCR5380
+ */
+
+static void NCR5380_print(struct Scsi_Host *instance) {
+ NCR5380_local_declare();
+ unsigned char status, data, basr, mr, icr, i;
+ NCR5380_setup(instance);
+ cli();
+ data = NCR5380_read(CURRENT_SCSI_DATA_REG);
+ status = NCR5380_read(STATUS_REG);
+ mr = NCR5380_read(MODE_REG);
+ icr = NCR5380_read(INITIATOR_COMMAND_REG);
+ basr = NCR5380_read(BUS_AND_STATUS_REG);
+ sti();
+ printk("STATUS_REG: %02x ", status);
+ for (i = 0; signals[i].mask ; ++i)
+ if (status & signals[i].mask)
+ printk(",%s", signals[i].name);
+ printk("\nBASR: %02x ", basr);
+ for (i = 0; basrs[i].mask ; ++i)
+ if (basr & basrs[i].mask)
+ printk(",%s", basrs[i].name);
+ printk("\nICR: %02x ", icr);
+ for (i = 0; icrs[i].mask; ++i)
+ if (icr & icrs[i].mask)
+ printk(",%s", icrs[i].name);
+ printk("\nMODE: %02x ", mr);
+ for (i = 0; mrs[i].mask; ++i)
+ if (mr & mrs[i].mask)
+ printk(",%s", mrs[i].name);
+ printk("\n");
+}
+
+static struct {
+ unsigned char value;
+ const char *name;
+} phases[] = {
+{PHASE_DATAOUT, "DATAOUT"}, {PHASE_DATAIN, "DATAIN"}, {PHASE_CMDOUT, "CMDOUT"},
+{PHASE_STATIN, "STATIN"}, {PHASE_MSGOUT, "MSGOUT"}, {PHASE_MSGIN, "MSGIN"},
+{PHASE_UNKNOWN, "UNKNOWN"}};
+
+/*
+ * Function : void NCR5380_print_phase(struct Scsi_Host *instance)
+ *
+ * Purpose : print the current SCSI phase for debugging purposes
+ *
+ * Input : instance - which NCR5380
+ */
+
+static void NCR5380_print_phase(struct Scsi_Host *instance) {
+ NCR5380_local_declare();
+ unsigned char status;
+ int i;
+ NCR5380_setup(instance);
+
+ status = NCR5380_read(STATUS_REG);
+ if (!(status & SR_REQ))
+ printk("scsi%d : REQ not asserted, phase unknown.\n",
+ instance->host_no);
+ else {
+ for (i = 0; (phases[i].value != PHASE_UNKNOWN) &&
+ (phases[i].value != (status & PHASE_MASK)); ++i);
+ printk("scsi%d : phase %s\n", instance->host_no, phases[i].name);
+ }
+}
+#endif
+
+/*
+ * We need to have our coroutine active given these constraints :
+ * 1. The mutex flag, main_running, can only be set when the main
+ * routine can actually process data, otherwise SCSI commands
+ * will never get issued.
+ *
+ * 2. NCR5380_main() shouldn't be called before it has exited, because
+ * other drivers have had kernel stack overflows in similar
+ * situations.
+ *
+ * 3. We don't want to inline NCR5380_main() because of space concerns,
+ * even though it is only called in two places.
+ *
+ * So, the solution is to set the mutex in an inline wrapper for the
+ * main coroutine, and have the main coroutine exit with interrupts
+ * disabled after the final search through the queues so that no race
+ * conditions are possible.
+ */
+
+static volatile int main_running = 0;
+
+/*
+ * Function : run_main(void)
+ *
+ * Purpose : insure that the coroutine is running and will process our
+ * request. main_running is checked/set here (in an inline function)
+ * rather than in NCR5380_main itself to reduce the chances of stack
+ * overflow.
+ *
+ */
+
+static __inline__ void run_main(void) {
+ cli();
+ if (!main_running) {
+ main_running = 1;
+ NCR5380_main();
+ /*
+ * main_running is cleared in NCR5380_main once it can't do
+ * more work, and NCR5380_main exits with interrupts disabled.
+ */
+ sti();
+ } else
+ sti();
+}
+
+#ifdef USLEEP
+#ifndef NCR5380_TIMER
+#error "NCR5380_TIMER must be defined so that this type of NCR5380 driver gets a unique timer."
+#endif
+
+/*
+ * These need tweaking, and would probably work best as per-device
+ * flags initialized differently for disk, tape, cd, etc devices.
+ * People with broken devices are free to experiment as to what gives
+ * the best results for them.
+ *
+ * USLEEP_SLEEP should be a minimum seek time.
+ *
+ * USLEEP_POLL should be a maximum rotational latency.
+ */
+#ifndef USLEEP_SLEEP
+/* 20 ms (reasonable hard disk speed) */
+#define USLEEP_SLEEP (20*HZ/1000)
+#endif
+/* 300 RPM (floppy speed) */
+#ifndef USLEEP_POLL
+#define USLEEP_POLL (200*HZ/1000)
+#endif
+
+static struct Scsi_Host * expires_first = NULL;
+
+/*
+ * Function : int should_disconnect (unsigned char cmd)
+ *
+ * Purpose : decide weather a command would normally disconnect or
+ * not, since if it won't disconnect we should go to sleep.
+ *
+ * Input : cmd - opcode of SCSI command
+ *
+ * Returns : DISCONNECT_LONG if we should disconnect for a really long
+ * time (ie always, sleep, look for REQ active, sleep),
+ * DISCONNECT_TIME_TO_DATA if we would only disconnect for a normal
+ * time-to-data delay, DISCONNECT_NONE if this command would return
+ * immediately.
+ *
+ * Future sleep algorithms based on time to data can exploit
+ * something like this so they can differentiate between "normal"
+ * (ie, read, write, seek) and unusual commands (ie, * format).
+ *
+ * Note : We don't deal with commands that handle an immediate disconnect,
+ *
+ */
+
+static int should_disconnect (unsigned char cmd) {
+ switch (cmd) {
+ case READ_6:
+ case WRITE_6:
+ case SEEK_6:
+ case READ_10:
+ case WRITE_10:
+ case SEEK_10:
+ return DISCONNECT_TIME_TO_DATA;
+ case FORMAT_UNIT:
+ case SEARCH_HIGH:
+ case SEARCH_LOW:
+ case SEARCH_EQUAL:
+ return DISCONNECT_LONG;
+ default:
+ return DISCONNECT_NONE;
+ }
+}
+
+/*
+ * Assumes instance->time_expires has been set in higher level code.
+ */
+
+static int NCR5380_set_timer (struct Scsi_Host *instance) {
+ struct Scsi_Host *tmp, **prev;
+
+ cli();
+ if (((struct NCR5380_hostdata *) (instance->host_data))->next_timer) {
+ sti();
+ return -1;
+ }
+
+ for (prev = &expires_first, tmp = expires_first; tmp;
+ prev = &(((struct NCR5380_hostdata *) tmp->host_data)->next_timer),
+ tmp = ((struct NCR5380_hostdata *) tmp->host_data)->next_timer)
+ if (instance->time_expires < tmp->time_expires)
+ break;
+
+ instance->next_timer = tmp;
+ *prev = instance;
+ timer_table[NCR5380_TIMER].expires = expires_first->time_expires;
+ timer_active |= 1 << NCR5380_TIMER;
+ sti();
+ return 0;
+}
+
+/* Doing something about unwanted reentrancy here might be useful */
+void NCR5380_timer_fn(void) {
+ struct Scsi_Host *instance;
+ cli();
+ for (; expires_first && expires_first->time_expires >= jiffies; ) {
+ instance = ((NCR5380_hostdata *) expires_first->host_data)->
+ expires_next;
+ ((NCR5380_hostdata *) expires_first->host_data)->expires_next =
+ NULL;
+ ((NCR5380_hostdata *) expires_first->host_data)->time_expires =
+ 0;
+ expires_first = instance;
+ }
+
+ if (expires_first) {
+ timer_table[NCR5380_TIMER].expires = ((NCR5380_hostdata *)
+ expires_first->host_data)->time_expires;
+ timer_active |= (1 << NCR5380_TIMER);
+ } else {
+ timer_table[NCR5380_TIMER].expires = 0;
+ timer_active &= ~(1 << MCR5380_TIMER);
+ }
+ sti();
+
+ run_main();
+}
+#endif /* def USLEEP */
+
+static void NCR5380_all_init (void) {
+ static int done = 0;
+ if (!done) {
+#if (NDEBUG & NDEBUG_INIT)
+ printk("scsi : NCR5380_all_init()\n");
+#endif
+ done = 1;
+#ifdef USLEEP
+ timer_table[NCR5380_TIMER].expires = 0;
+ timer_table[NCR5380_TIMER].fn = NCR5380_timer_fn;
+#endif
+ }
+}
+
+#ifdef AUTOPROBE_IRQ
+/*
+ * Function : int NCR5380_probe_irq (struct Scsi_Host *instance, int possible)
+ *
+ * Purpose : autoprobe for the IRQ line used by the NCR5380.
+ *
+ * Inputs : instance - pointer to this instance of the NCR5380 driver,
+ * possible - bitmask of permissible interrupts.
+ *
+ * Returns : number of the IRQ selected, IRQ_NONE if no interrupt fired.
+ *
+ * XXX no effort is made to deal with spurious interrupts.
+ */
+
+
+static int probe_irq;
+static void probe_intr (int irq, void *dev_id, struct pt_regs * regs) {
+ probe_irq = irq;
+};
+
+static int NCR5380_probe_irq (struct Scsi_Host *instance, int possible) {
+ NCR5380_local_declare();
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *)
+ instance->hostdata;
+ unsigned long timeout;
+ int trying_irqs, i, mask;
+ NCR5380_setup(instance);
+
+ for (trying_irqs = i = 0, mask = 1; i < 16; ++i, mask <<= 1)
+ if ((mask & possible) && (request_irq(i, &probe_intr, SA_INTERRUPT, "NCR-probe", NULL)
+ == 0))
+ trying_irqs |= mask;
+
+ timeout = jiffies + 250*HZ/1000;
+ probe_irq = IRQ_NONE;
+
+/*
+ * A interrupt is triggered whenever BSY = false, SEL = true
+ * and a bit set in the SELECT_ENABLE_REG is asserted on the
+ * SCSI bus.
+ *
+ * Note that the bus is only driven when the phase control signals
+ * (I/O, C/D, and MSG) match those in the TCR, so we must reset that
+ * to zero.
+ */
+
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA |
+ ICR_ASSERT_SEL);
+
+ while (probe_irq == IRQ_NONE && jiffies < timeout)
+ barrier();
+
+ NCR5380_write(SELECT_ENABLE_REG, 0);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+
+ for (i = 0, mask = 1; i < 16; ++i, mask <<= 1)
+ if (trying_irqs & mask)
+ free_irq(i, NULL);
+
+ return probe_irq;
+}
+#endif /* AUTOPROBE_IRQ */
+
+/*
+ * Function : void NCR58380_print_options (struct Scsi_Host *instance)
+ *
+ * Purpose : called by probe code indicating the NCR5380 driver
+ * options that were selected.
+ *
+ * Inputs : instance, pointer to this instance. Unused.
+ */
+
+static void NCR5380_print_options (struct Scsi_Host *instance) {
+ printk(" generic options"
+#ifdef AUTOPROBE_IRQ
+ " AUTOPROBE_IRQ"
+#endif
+#ifdef AUTOSENSE
+ " AUTOSENSE"
+#endif
+#ifdef DIFFERENTIAL
+ " DIFFERENTIAL"
+#endif
+#ifdef REAL_DMA
+ " REAL DMA"
+#endif
+#ifdef REAL_DMA_POLL
+ " REAL DMA POLL"
+#endif
+#ifdef PARITY
+ " PARITY"
+#endif
+#ifdef PSEUDO_DMA
+ " PSEUDO DMA"
+#endif
+#ifdef SCSI2
+ " SCSI-2"
+#endif
+#ifdef UNSAFE
+ " UNSAFE "
+#endif
+ );
+#ifdef USLEEP
+ printk(" USLEEP, USLEEP_POLL=%d USLEEP_SLEEP=%d", USLEEP_POLL, USLEEP_SLEEP);
+#endif
+ printk(" generic release=%d", NCR5380_PUBLIC_RELEASE);
+ if (((struct NCR5380_hostdata *)instance->hostdata)->flags & FLAG_NCR53C400) {
+ printk(" ncr53c400 release=%d", NCR53C400_PUBLIC_RELEASE);
+ }
+}
+
+/*
+ * Function : void NCR5380_print_status (struct Scsi_Host *instance)
+ *
+ * Purpose : print commands in the various queues, called from
+ * NCR5380_abort and NCR5380_debug to aid debugging.
+ *
+ * Inputs : instance, pointer to this instance.
+ */
+
+static void NCR5380_print_status (struct Scsi_Host *instance) {
+ static char pr_bfr[512];
+ char *start;
+ int len;
+
+ printk("NCR5380 : coroutine is%s running.\n",
+ main_running ? "" : "n't");
+
+#ifdef NDEBUG
+ NCR5380_print (instance);
+ NCR5380_print_phase (instance);
+#endif
+
+ len = NCR5380_proc_info(pr_bfr, &start, 0, sizeof(pr_bfr),
+ instance->host_no, 0);
+ pr_bfr[len] = 0;
+ printk("\n%s\n", pr_bfr);
+ }
+
+/******************************************/
+/*
+ * /proc/scsi/[dtc pas16 t128 generic]/[0-ASC_NUM_BOARD_SUPPORTED]
+ *
+ * *buffer: I/O buffer
+ * **start: if inout == FALSE pointer into buffer where user read should start
+ * offset: current offset
+ * length: length of buffer
+ * hostno: Scsi_Host host_no
+ * inout: TRUE - user is writing; FALSE - user is reading
+ *
+ * Return the number of bytes read from or written
+*/
+
+#undef SPRINTF
+#define SPRINTF(args...) do { if(pos < buffer + length-80) pos += sprintf(pos, ## args); } while(0)
+static
+char *lprint_Scsi_Cmnd (Scsi_Cmnd *cmd, char *pos, char *buffer, int length);
+static
+char *lprint_command (unsigned char *cmd, char *pos, char *buffer, int len);
+static
+char *lprint_opcode(int opcode, char *pos, char *buffer, int length);
+
+#ifndef NCR5380_proc_info
+static
+#endif
+int NCR5380_proc_info (
+ char *buffer, char **start,off_t offset,
+ int length,int hostno,int inout)
+{
+ char *pos = buffer;
+ struct Scsi_Host *instance;
+ struct NCR5380_hostdata *hostdata;
+ Scsi_Cmnd *ptr;
+
+ for (instance = first_instance; instance &&
+ instance->host_no != hostno; instance=instance->next)
+ ;
+ if (!instance)
+ return(-ESRCH);
+ hostdata = (struct NCR5380_hostdata *)instance->hostdata;
+
+ if (inout) { /* Has data been written to the file ? */
+#ifdef DTC_PUBLIC_RELEASE
+ dtc_wmaxi = dtc_maxi = 0;
+#endif
+#ifdef PAS16_PUBLIC_RELEASE
+ pas_wmaxi = pas_maxi = 0;
+#endif
+ return(-ENOSYS); /* Currently this is a no-op */
+ }
+ SPRINTF("NCR5380 core release=%d. ", NCR5380_PUBLIC_RELEASE);
+ if (((struct NCR5380_hostdata *)instance->hostdata)->flags & FLAG_NCR53C400)
+ SPRINTF("ncr53c400 release=%d. ", NCR53C400_PUBLIC_RELEASE);
+#ifdef DTC_PUBLIC_RELEASE
+ SPRINTF("DTC 3180/3280 release %d", DTC_PUBLIC_RELEASE);
+#endif
+#ifdef T128_PUBLIC_RELEASE
+ SPRINTF("T128 release %d", T128_PUBLIC_RELEASE);
+#endif
+#ifdef GENERIC_NCR5380_PUBLIC_RELEASE
+ SPRINTF("Generic5380 release %d", GENERIC_NCR5380_PUBLIC_RELEASE);
+#endif
+#ifdef PAS16_PUBLIC_RELEASE
+SPRINTF("PAS16 release=%d", PAS16_PUBLIC_RELEASE);
+#endif
+
+ SPRINTF("\nBase Addr: 0x%05lX ", (long)instance->base);
+ SPRINTF("io_port: %04x ", (int)instance->io_port);
+ if (instance->irq == IRQ_NONE)
+ SPRINTF("IRQ: None.\n");
+ else
+ SPRINTF("IRQ: %d.\n", instance->irq);
+
+#ifdef DTC_PUBLIC_RELEASE
+ SPRINTF("Highwater I/O busy_spin_counts -- write: %d read: %d\n",
+ dtc_wmaxi, dtc_maxi);
+#endif
+#ifdef PAS16_PUBLIC_RELEASE
+ SPRINTF("Highwater I/O busy_spin_counts -- write: %d read: %d\n",
+ pas_wmaxi, pas_maxi);
+#endif
+ cli();
+ SPRINTF("NCR5380 : coroutine is%s running.\n", main_running ? "" : "n't");
+ if (!hostdata->connected)
+ SPRINTF("scsi%d: no currently connected command\n", instance->host_no);
+ else
+ pos = lprint_Scsi_Cmnd ((Scsi_Cmnd *) hostdata->connected,
+ pos, buffer, length);
+ SPRINTF("scsi%d: issue_queue\n", instance->host_no);
+ for (ptr = (Scsi_Cmnd *) hostdata->issue_queue; ptr;
+ ptr = (Scsi_Cmnd *) ptr->host_scribble)
+ pos = lprint_Scsi_Cmnd (ptr, pos, buffer, length);
+
+ SPRINTF("scsi%d: disconnected_queue\n", instance->host_no);
+ for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr;
+ ptr = (Scsi_Cmnd *) ptr->host_scribble)
+ pos = lprint_Scsi_Cmnd (ptr, pos, buffer, length);
+
+ sti();
+ *start=buffer;
+ if (pos - buffer < offset)
+ return 0;
+ else if (pos - buffer - offset < length)
+ return pos - buffer - offset;
+ return length;
+}
+
+static
+char *lprint_Scsi_Cmnd (Scsi_Cmnd *cmd, char *pos, char *buffer, int length) {
+ SPRINTF("scsi%d : destination target %d, lun %d\n",
+ cmd->host->host_no, cmd->target, cmd->lun);
+ SPRINTF(" command = ");
+ pos = lprint_command (cmd->cmnd, pos, buffer, length);
+ return (pos);
+}
+
+static
+char *lprint_command (unsigned char *command,
+ char *pos, char *buffer, int length) {
+ int i, s;
+ pos = lprint_opcode(command[0], pos, buffer, length);
+ for ( i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i)
+ SPRINTF("%02x ", command[i]);
+ SPRINTF("\n");
+ return(pos);
+}
+
+static
+char *lprint_opcode(int opcode, char *pos, char *buffer, int length) {
+ SPRINTF("%2d (0x%02x)", opcode, opcode);
+ return(pos);
+}
+
+
+/*
+ * Function : void NCR5380_init (struct Scsi_Host *instance, flags)
+ *
+ * Purpose : initializes *instance and corresponding 5380 chip,
+ * with flags OR'd into the initial flags value.
+ *
+ * Inputs : instance - instantiation of the 5380 driver.
+ *
+ * Notes : I assume that the host, hostno, and id bits have been
+ * set correctly. I don't care about the irq and other fields.
+ *
+ */
+
+static void NCR5380_init (struct Scsi_Host *instance, int flags) {
+ NCR5380_local_declare();
+ int i, pass;
+ unsigned long timeout;
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *)
+ instance->hostdata;
+
+ /*
+ * On NCR53C400 boards, NCR5380 registers are mapped 8 past
+ * the base address.
+ */
+
+#ifdef NCR53C400
+ if (flags & FLAG_NCR53C400)
+ instance->NCR5380_instance_name += NCR53C400_address_adjust;
+#endif
+
+ NCR5380_setup(instance);
+
+ NCR5380_all_init();
+
+ hostdata->aborted = 0;
+ hostdata->id_mask = 1 << instance->this_id;
+ for (i = hostdata->id_mask; i <= 0x80; i <<= 1)
+ if (i > hostdata->id_mask)
+ hostdata->id_higher_mask |= i;
+ for (i = 0; i < 8; ++i)
+ hostdata->busy[i] = 0;
+#ifdef REAL_DMA
+ hostdata->dmalen = 0;
+#endif
+ hostdata->targets_present = 0;
+ hostdata->connected = NULL;
+ hostdata->issue_queue = NULL;
+ hostdata->disconnected_queue = NULL;
+#ifdef NCR5380_STATS
+ for (i = 0; i < 8; ++i) {
+ hostdata->time_read[i] = 0;
+ hostdata->time_write[i] = 0;
+ hostdata->bytes_read[i] = 0;
+ hostdata->bytes_write[i] = 0;
+ }
+ hostdata->timebase = 0;
+ hostdata->pendingw = 0;
+ hostdata->pendingr = 0;
+#endif
+
+ /* The CHECK code seems to break the 53C400. Will check it later maybe */
+ if (flags & FLAG_NCR53C400)
+ hostdata->flags = FLAG_HAS_LAST_BYTE_SENT | flags;
+ else
+ hostdata->flags = FLAG_CHECK_LAST_BYTE_SENT | flags;
+
+ if (!the_template) {
+ the_template = instance->hostt;
+ first_instance = instance;
+ }
+
+
+#ifdef USLEEP
+ hostdata->time_expires = 0;
+ hostdata->next_timer = NULL;
+#endif
+
+#ifndef AUTOSENSE
+ if ((instance->cmd_per_lun > 1) || instance->can_queue > 1))
+ printk("scsi%d : WARNING : support for multiple outstanding commands enabled\n"
+ " without AUTOSENSE option, contingent allegiance conditions may\n"
+ " be incorrectly cleared.\n", instance->host_no);
+#endif /* def AUTOSENSE */
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+ NCR5380_write(SELECT_ENABLE_REG, 0);
+
+#ifdef NCR53C400
+ if (hostdata->flags & FLAG_NCR53C400) {
+ NCR5380_write(C400_CONTROL_STATUS_REG, CSR_BASE);
+ }
+#endif
+
+ /*
+ * Detect and correct bus wedge problems.
+ *
+ * If the system crashed, it may have crashed in a state
+ * where a SCSI command was still executing, and the
+ * SCSI bus is not in a BUS FREE STATE.
+ *
+ * If this is the case, we'll try to abort the currently
+ * established nexus which we know nothing about, and that
+ * failing, do a hard reset of the SCSI bus
+ */
+
+ for (pass = 1; (NCR5380_read(STATUS_REG) & SR_BSY) &&
+ pass <= 6 ; ++pass) {
+ switch (pass) {
+ case 1:
+ case 3:
+ case 5:
+ printk("scsi%d: SCSI bus busy, waiting up to five seconds\n",
+ instance->host_no);
+ timeout = jiffies + 5*HZ;
+ while (jiffies < timeout && (NCR5380_read(STATUS_REG) & SR_BSY));
+ break;
+ case 2:
+ printk("scsi%d: bus busy, attempting abort\n",
+ instance->host_no);
+ do_abort (instance);
+ break;
+ case 4:
+ printk("scsi%d: bus busy, attempting reset\n",
+ instance->host_no);
+ do_reset (instance);
+ break;
+ case 6:
+ printk("scsi%d: bus locked solid or invalid override\n",
+ instance->host_no);
+ }
+ }
+}
+
+/*
+ * Function : int NCR5380_queue_command (Scsi_Cmnd *cmd,
+ * void (*done)(Scsi_Cmnd *))
+ *
+ * Purpose : enqueues a SCSI command
+ *
+ * Inputs : cmd - SCSI command, done - function called on completion, with
+ * a pointer to the command descriptor.
+ *
+ * Returns : 0
+ *
+ * Side effects :
+ * cmd is added to the per instance issue_queue, with minor
+ * twiddling done to the host specific fields of cmd. If the
+ * main coroutine is not running, it is restarted.
+ *
+ */
+
+/* Only make static if a wrapper function is used */
+#ifndef NCR5380_queue_command
+static
+#endif
+int NCR5380_queue_command (Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) {
+ struct Scsi_Host *instance = cmd->host;
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *)
+ instance->hostdata;
+ Scsi_Cmnd *tmp;
+
+#if (NDEBUG & NDEBUG_NO_WRITE)
+ switch (cmd->cmnd[0]) {
+ case WRITE_6:
+ case WRITE_10:
+ printk("scsi%d : WRITE attempted with NO_WRITE debugging flag set\n",
+ instance->host_no);
+ cmd->result = (DID_ERROR << 16);
+ done(cmd);
+ return 0;
+ }
+#endif /* (NDEBUG & NDEBUG_NO_WRITE) */
+
+#ifdef NCR5380_STATS
+# if 0
+ if (!hostdata->connected && !hostdata->issue_queue &&
+ !hostdata->disconnected_queue) {
+ hostdata->timebase = jiffies;
+ }
+# endif
+# ifdef NCR5380_STAT_LIMIT
+ if (cmd->request_bufflen > NCR5380_STAT_LIMIT)
+# endif
+ switch (cmd->cmnd[0])
+ {
+ case WRITE:
+ case WRITE_6:
+ case WRITE_10:
+ hostdata->time_write[cmd->target] -= (jiffies - hostdata->timebase);
+ hostdata->bytes_write[cmd->target] += cmd->request_bufflen;
+ hostdata->pendingw++;
+ break;
+ case READ:
+ case READ_6:
+ case READ_10:
+ hostdata->time_read[cmd->target] -= (jiffies - hostdata->timebase);
+ hostdata->bytes_read[cmd->target] += cmd->request_bufflen;
+ hostdata->pendingr++;
+ break;
+ }
+#endif
+
+ /*
+ * We use the host_scribble field as a pointer to the next command
+ * in a queue
+ */
+
+ cmd->host_scribble = NULL;
+ cmd->scsi_done = done;
+
+ cmd->result = 0;
+
+
+ /*
+ * Insert the cmd into the issue queue. Note that REQUEST SENSE
+ * commands are added to the head of the queue since any command will
+ * clear the contingent allegiance condition that exists and the
+ * sense data is only guaranteed to be valid while the condition exists.
+ */
+
+ cli();
+ if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) {
+ LIST(cmd, hostdata->issue_queue);
+ cmd->host_scribble = (unsigned char *) hostdata->issue_queue;
+ hostdata->issue_queue = cmd;
+ } else {
+ for (tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp->host_scribble;
+ tmp = (Scsi_Cmnd *) tmp->host_scribble);
+ LIST(cmd, tmp);
+ tmp->host_scribble = (unsigned char *) cmd;
+ }
+#if (NDEBUG & NDEBUG_QUEUES)
+ printk("scsi%d : command added to %s of queue\n", instance->host_no,
+ (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail");
+#endif
+
+/* Run the coroutine if it isn't already running. */
+ run_main();
+ return 0;
+}
+
+/*
+ * Function : NCR5380_main (void)
+ *
+ * Purpose : NCR5380_main is a coroutine that runs as long as more work can
+ * be done on the NCR5380 host adapters in a system. Both
+ * NCR5380_queue_command() and NCR5380_intr() will try to start it
+ * in case it is not running.
+ *
+ * NOTE : NCR5380_main exits with interrupts *disabled*, the caller should
+ * reenable them. This prevents reentrancy and kernel stack overflow.
+ */
+
+static void NCR5380_main (void) {
+ Scsi_Cmnd *tmp, *prev;
+ struct Scsi_Host *instance;
+ struct NCR5380_hostdata *hostdata;
+ int done;
+
+ /*
+ * We run (with interrupts disabled) until we're sure that none of
+ * the host adapters have anything that can be done, at which point
+ * we set main_running to 0 and exit.
+ *
+ * Interrupts are enabled before doing various other internal
+ * instructions, after we've decided that we need to run through
+ * the loop again.
+ *
+ * this should prevent any race conditions.
+ */
+
+ do {
+ cli(); /* Freeze request queues */
+ done = 1;
+ for (instance = first_instance; instance &&
+ instance->hostt == the_template; instance=instance->next) {
+ hostdata = (struct NCR5380_hostdata *) instance->hostdata;
+ cli();
+ if (!hostdata->connected) {
+#if (NDEBUG & NDEBUG_MAIN)
+ printk("scsi%d : not connected\n", instance->host_no);
+#endif
+ /*
+ * Search through the issue_queue for a command destined
+ * for a target that's not busy.
+ */
+#if (NDEBUG & NDEBUG_LISTS)
+ for (tmp= (Scsi_Cmnd *) hostdata->issue_queue, prev=NULL; tmp && (tmp != prev); prev=tmp, tmp=(Scsi_Cmnd*)tmp->host_scribble)
+ ;
+ /*printk("%p ", tmp);*/
+ if ((tmp == prev) && tmp) printk(" LOOP\n");/* else printk("\n");*/
+#endif
+ for (tmp = (Scsi_Cmnd *) hostdata->issue_queue,
+ prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *)
+ tmp->host_scribble) {
+
+#if (NDEBUG & NDEBUG_LISTS)
+ if (prev != tmp)
+ printk("MAIN tmp=%p target=%d busy=%d lun=%d\n", tmp, tmp->target, hostdata->busy[tmp->target], tmp->lun);
+#endif
+ /* When we find one, remove it from the issue queue. */
+ if (!(hostdata->busy[tmp->target] & (1 << tmp->lun))) {
+ if (prev) {
+ REMOVE(prev,prev->host_scribble,tmp,tmp->host_scribble);
+ prev->host_scribble = tmp->host_scribble;
+ } else {
+ REMOVE(-1,hostdata->issue_queue,tmp,tmp->host_scribble);
+ hostdata->issue_queue = (Scsi_Cmnd *) tmp->host_scribble;
+ }
+ tmp->host_scribble = NULL;
+
+ /* reenable interrupts after finding one */
+ sti();
+
+ /*
+ * Attempt to establish an I_T_L nexus here.
+ * On success, instance->hostdata->connected is set.
+ * On failure, we must add the command back to the
+ * issue queue so we can keep trying.
+ */
+#if (NDEBUG & (NDEBUG_MAIN | NDEBUG_QUEUES))
+ printk("scsi%d : main() : command for target %d lun %d removed from issue_queue\n",
+ instance->host_no, tmp->target, tmp->lun);
+#endif
+
+ /*
+ * A successful selection is defined as one that
+ * leaves us with the command connected and
+ * in hostdata->connected, OR has terminated the
+ * command.
+ *
+ * With successful commands, we fall through
+ * and see if we can do an information transfer,
+ * with failures we will restart.
+ */
+
+ if (!NCR5380_select(instance, tmp,
+ /*
+ * REQUEST SENSE commands are issued without tagged
+ * queueing, even on SCSI-II devices because the
+ * contingent allegiance condition exists for the
+ * entire unit.
+ */
+ (tmp->cmnd[0] == REQUEST_SENSE) ? TAG_NONE :
+ TAG_NEXT)) {
+ break;
+ } else {
+ cli();
+ LIST(tmp, hostdata->issue_queue);
+ tmp->host_scribble = (unsigned char *)
+ hostdata->issue_queue;
+ hostdata->issue_queue = tmp;
+ done = 0;
+ sti();
+#if (NDEBUG & (NDEBUG_MAIN | NDEBUG_QUEUES))
+ printk("scsi%d : main(): select() failed, returned to issue_queue\n",
+ instance->host_no);
+#endif
+ }
+ } /* if target/lun is not busy */
+ } /* for */
+ } /* if (!hostdata->connected) */
+
+ if (hostdata->connected
+#ifdef REAL_DMA
+ && !hostdata->dmalen
+#endif
+#ifdef USLEEP
+ && (!hostdata->time_expires || hostdata->time_expires >= jiffies)
+#endif
+ ) {
+ sti();
+#if (NDEBUG & NDEBUG_MAIN)
+ printk("scsi%d : main() : performing information transfer\n",
+ instance->host_no);
+#endif
+ NCR5380_information_transfer(instance);
+#if (NDEBUG & NDEBUG_MAIN)
+ printk("scsi%d : main() : done set false\n", instance->host_no);
+#endif
+ done = 0;
+ } else
+ break;
+ } /* for instance */
+ } while (!done);
+ main_running = 0;
+}
+
+#ifndef DONT_USE_INTR
+/*
+ * Function : void NCR5380_intr (int irq)
+ *
+ * Purpose : handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses
+ * from the disconnected queue, and restarting NCR5380_main()
+ * as required.
+ *
+ * Inputs : int irq, irq that caused this interrupt.
+ *
+ */
+
+static void NCR5380_intr (int irq, void *dev_id, struct pt_regs * regs) {
+ NCR5380_local_declare();
+ struct Scsi_Host *instance;
+ int done;
+ unsigned char basr;
+#if (NDEBUG & NDEBUG_INTR)
+ printk("scsi : NCR5380 irq %d triggered\n", irq);
+#endif
+ do {
+ done = 1;
+ for (instance = first_instance; instance && (instance->hostt ==
+ the_template); instance = instance->next)
+ if (instance->irq == irq) {
+
+ /* Look for pending interrupts */
+ NCR5380_setup(instance);
+ basr = NCR5380_read(BUS_AND_STATUS_REG);
+ /* XXX dispatch to appropriate routine if found and done=0 */
+ if (basr & BASR_IRQ) {
+#if (NDEBUG & NDEBUG_INTR)
+ NCR5380_print(instance);
+#endif
+ if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) ==
+ (SR_SEL | SR_IO)) {
+ done = 0;
+ sti();
+#if (NDEBUG & NDEBUG_INTR)
+ printk("scsi%d : SEL interrupt\n", instance->host_no);
+#endif
+ NCR5380_reselect(instance);
+ (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ } else if (basr & BASR_PARITY_ERROR) {
+#if (NDEBUG & NDEBUG_INTR)
+ printk("scsi%d : PARITY interrupt\n", instance->host_no);
+#endif
+ (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ } else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) {
+#if (NDEBUG & NDEBUG_INTR)
+ printk("scsi%d : RESET interrupt\n", instance->host_no);
+#endif
+ (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ } else {
+/*
+ * XXX the rest of the interrupt conditions should *only* occur during a
+ * DMA transfer, which I haven't gotten around to fixing yet.
+ */
+
+#if defined(REAL_DMA)
+ /*
+ * We should only get PHASE MISMATCH and EOP interrupts
+ * if we have DMA enabled, so do a sanity check based on
+ * the current setting of the MODE register.
+ */
+
+ if ((NCR5380_read(MODE_REG) & MR_DMA) && ((basr &
+ BASR_END_DMA_TRANSFER) ||
+ !(basr & BASR_PHASE_MATCH))) {
+ int transfered;
+
+ if (!hostdata->connected)
+ panic("scsi%d : received end of DMA interrupt with no connected cmd\n",
+ instance->hostno);
+
+ transfered = (hostdata->dmalen - NCR5380_dma_residual(instance));
+ hostdata->connected->SCp.this_residual -= transferred;
+ hostdata->connected->SCp.ptr += transferred;
+ hostdata->dmalen = 0;
+
+ (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+#if NCR_TIMEOUT
+ {
+ unsigned long timeout = jiffies + NCR_TIMEOUT;
+
+ while (NCR5380_read(BUS_AND_STATUS_REG) & BASR_ACK
+ && jiffies < timeout)
+ ;
+ if (jiffies >= timeout)
+ printk("scsi%d: timeout at NCR5380.c:%d\n",
+ host->host_no, __LINE__);
+ }
+#else /* NCR_TIMEOUT */
+ while (NCR5380_read(BUS_AND_STATUS_REG) & BASR_ACK);
+#endif
+
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ }
+#else
+#if (NDEBUG & NDEBUG_INTR)
+ printk("scsi : unknown interrupt, BASR 0x%X, MR 0x%X, SR 0x%x\n", basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG));
+#endif
+ (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+#endif
+ }
+ } /* if BASR_IRQ */
+ if (!done)
+ run_main();
+ } /* if (instance->irq == irq) */
+ } while (!done);
+}
+#endif
+
+#ifdef NCR5380_STATS
+static void collect_stats(struct NCR5380_hostdata* hostdata, Scsi_Cmnd* cmd)
+{
+# ifdef NCR5380_STAT_LIMIT
+ if (cmd->request_bufflen > NCR5380_STAT_LIMIT)
+# endif
+ switch (cmd->cmnd[0])
+ {
+ case WRITE:
+ case WRITE_6:
+ case WRITE_10:
+ hostdata->time_write[cmd->target] += (jiffies - hostdata->timebase);
+ /*hostdata->bytes_write[cmd->target] += cmd->request_bufflen;*/
+ hostdata->pendingw--;
+ break;
+ case READ:
+ case READ_6:
+ case READ_10:
+ hostdata->time_read[cmd->target] += (jiffies - hostdata->timebase);
+ /*hostdata->bytes_read[cmd->target] += cmd->request_bufflen;*/
+ hostdata->pendingr--;
+ break;
+ }
+}
+#endif
+
+/*
+ * Function : int NCR5380_select (struct Scsi_Host *instance, Scsi_Cmnd *cmd,
+ * int tag);
+ *
+ * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command,
+ * including ARBITRATION, SELECTION, and initial message out for
+ * IDENTIFY and queue messages.
+ *
+ * Inputs : instance - instantiation of the 5380 driver on which this
+ * target lives, cmd - SCSI command to execute, tag - set to TAG_NEXT for
+ * new tag, TAG_NONE for untagged queueing, otherwise set to the tag for
+ * the command that is presently connected.
+ *
+ * Returns : -1 if selection could not execute for some reason,
+ * 0 if selection succeeded or failed because the target
+ * did not respond.
+ *
+ * Side effects :
+ * If bus busy, arbitration failed, etc, NCR5380_select() will exit
+ * with registers as they should have been on entry - ie
+ * SELECT_ENABLE will be set appropriately, the NCR5380
+ * will cease to drive any SCSI bus signals.
+ *
+ * If successful : I_T_L or I_T_L_Q nexus will be established,
+ * instance->connected will be set to cmd.
+ * SELECT interrupt will be disabled.
+ *
+ * If failed (no target) : cmd->scsi_done() will be called, and the
+ * cmd->result host byte set to DID_BAD_TARGET.
+ */
+
+static int NCR5380_select (struct Scsi_Host *instance, Scsi_Cmnd *cmd,
+ int tag) {
+ NCR5380_local_declare();
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata*)
+ instance->hostdata;
+ unsigned char tmp[3], phase;
+ unsigned char *data;
+ int len;
+ unsigned long timeout;
+ NCR5380_setup(instance);
+
+ hostdata->restart_select = 0;
+#if defined (NDEBUG) && (NDEBUG & NDEBUG_ARBITRATION)
+ NCR5380_print(instance);
+ printk("scsi%d : starting arbitration, id = %d\n", instance->host_no,
+ instance->this_id);
+#endif
+ cli();
+
+ /*
+ * Set the phase bits to 0, otherwise the NCR5380 won't drive the
+ * data bus during SELECTION.
+ */
+
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+
+
+ /*
+ * Start arbitration.
+ */
+
+ NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask);
+ NCR5380_write(MODE_REG, MR_ARBITRATE);
+
+ sti();
+
+ /* Wait for arbitration logic to complete */
+#if NCR_TIMEOUT
+ {
+ unsigned long timeout = jiffies + 2*NCR_TIMEOUT;
+
+ while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS)
+ && jiffies < timeout)
+ ;
+ if (jiffies >= timeout)
+ {
+ printk("scsi: arbitration timeout at %d\n", __LINE__);
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ return -1;
+ }
+ }
+#else /* NCR_TIMEOUT */
+ while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS));
+#endif
+
+#if (NDEBUG & NDEBUG_ARBITRATION)
+ printk("scsi%d : arbitration complete\n", instance->host_no);
+/* Avoid GCC 2.4.5 asm needs to many reloads error */
+ __asm__("nop");
+#endif
+
+ /*
+ * The arbitration delay is 2.2us, but this is a minimum and there is
+ * no maximum so we can safely sleep for ceil(2.2) usecs to accommodate
+ * the integral nature of udelay().
+ *
+ */
+
+ udelay(3);
+
+ /* Check for lost arbitration */
+ if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) ||
+ (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) ||
+ (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) {
+ NCR5380_write(MODE_REG, MR_BASE);
+#if (NDEBUG & NDEBUG_ARBITRATION)
+ printk("scsi%d : lost arbitration, deasserting MR_ARBITRATE\n",
+ instance->host_no);
+#endif
+ return -1;
+ }
+
+
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_SEL);
+
+ if (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) {
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+#if (NDEBUG & NDEBUG_ARBITRATION)
+ printk("scsi%d : lost arbitration, deasserting ICR_ASSERT_SEL\n",
+ instance->host_no);
+#endif
+ return -1;
+ }
+
+ /*
+ * Again, bus clear + bus settle time is 1.2us, however, this is
+ * a minimum so we'll udelay ceil(1.2)
+ */
+
+ udelay(2);
+
+#if (NDEBUG & NDEBUG_ARBITRATION)
+ printk("scsi%d : won arbitration\n", instance->host_no);
+#endif
+
+
+ /*
+ * Now that we have won arbitration, start Selection process, asserting
+ * the host and target ID's on the SCSI bus.
+ */
+
+ NCR5380_write(OUTPUT_DATA_REG, (hostdata->id_mask | (1 << cmd->target)));
+
+ /*
+ * Raise ATN while SEL is true before BSY goes false from arbitration,
+ * since this is the only way to guarantee that we'll get a MESSAGE OUT
+ * phase immediately after selection.
+ */
+
+ NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_BSY |
+ ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL ));
+ NCR5380_write(MODE_REG, MR_BASE);
+
+ /*
+ * Reselect interrupts must be turned off prior to the dropping of BSY,
+ * otherwise we will trigger an interrupt.
+ */
+ NCR5380_write(SELECT_ENABLE_REG, 0);
+
+ /*
+ * The initiator shall then wait at least two deskew delays and release
+ * the BSY signal.
+ */
+ udelay(1); /* wingel -- wait two bus deskew delay >2*45ns */
+
+ /* Reset BSY */
+ NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_DATA |
+ ICR_ASSERT_ATN | ICR_ASSERT_SEL));
+
+ /*
+ * Something weird happens when we cease to drive BSY - looks
+ * like the board/chip is letting us do another read before the
+ * appropriate propagation delay has expired, and we're confusing
+ * a BSY signal from ourselves as the target's response to SELECTION.
+ *
+ * A small delay (the 'C++' frontend breaks the pipeline with an
+ * unnecessary jump, making it work on my 386-33/Trantor T128, the
+ * tighter 'C' code breaks and requires this) solves the problem -
+ * the 1 us delay is arbitrary, and only used because this delay will
+ * be the same on other platforms and since it works here, it should
+ * work there.
+ *
+ * wingel suggests that this could be due to failing to wait
+ * one deskew delay.
+ */
+
+ udelay(1);
+
+#if (NDEBUG & NDEBUG_SELECTION)
+ printk("scsi%d : selecting target %d\n", instance->host_no, cmd->target);
+#endif
+
+ /*
+ * The SCSI specification calls for a 250 ms timeout for the actual
+ * selection.
+ */
+
+ timeout = jiffies + 250*HZ/1000;
+
+ /*
+ * XXX very interesting - we're seeing a bounce where the BSY we
+ * asserted is being reflected / still asserted (propagation delay?)
+ * and it's detecting as true. Sigh.
+ */
+
+ while ((jiffies < timeout) && !(NCR5380_read(STATUS_REG) &
+ (SR_BSY | SR_IO)));
+
+ if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) ==
+ (SR_SEL | SR_IO)) {
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ NCR5380_reselect(instance);
+ printk ("scsi%d : reselection after won arbitration?\n",
+ instance->host_no);
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ return -1;
+ }
+
+ /*
+ * No less than two deskew delays after the initiator detects the
+ * BSY signal is true, it shall release the SEL signal and may
+ * change the DATA BUS. -wingel
+ */
+
+ udelay(1);
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
+
+ if (!(NCR5380_read(STATUS_REG) & SR_BSY)) {
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ if (hostdata->targets_present & (1 << cmd->target)) {
+ printk("scsi%d : weirdness\n", instance->host_no);
+ if (hostdata->restart_select)
+ printk("\trestart select\n");
+#ifdef NDEBUG
+ NCR5380_print (instance);
+#endif
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ return -1;
+ }
+ cmd->result = DID_BAD_TARGET << 16;
+#ifdef NCR5380_STATS
+ collect_stats(hostdata, cmd);
+#endif
+ cmd->scsi_done(cmd);
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+#if (NDEBUG & NDEBUG_SELECTION)
+ printk("scsi%d : target did not respond within 250ms\n",
+ instance->host_no);
+#endif
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ return 0;
+ }
+
+ hostdata->targets_present |= (1 << cmd->target);
+
+ /*
+ * Since we followed the SCSI spec, and raised ATN while SEL
+ * was true but before BSY was false during selection, the information
+ * transfer phase should be a MESSAGE OUT phase so that we can send the
+ * IDENTIFY message.
+ *
+ * If SCSI-II tagged queuing is enabled, we also send a SIMPLE_QUEUE_TAG
+ * message (2 bytes) with a tag ID that we increment with every command
+ * until it wraps back to 0.
+ *
+ * XXX - it turns out that there are some broken SCSI-II devices,
+ * which claim to support tagged queuing but fail when more than
+ * some number of commands are issued at once.
+ */
+
+ /* Wait for start of REQ/ACK handshake */
+#ifdef NCR_TIMEOUT
+ {
+ unsigned long timeout = jiffies + NCR_TIMEOUT;
+
+ while (!(NCR5380_read(STATUS_REG) & SR_REQ) && jiffies < timeout);
+
+ if (jiffies >= timeout) {
+ printk("scsi%d: timeout at NCR5380.c:%d\n", __LINE__);
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ return -1;
+ }
+ }
+#else /* NCR_TIMEOUT */
+ while (!(NCR5380_read(STATUS_REG) & SR_REQ));
+#endif /* def NCR_TIMEOUT */
+
+#if (NDEBUG & NDEBUG_SELECTION)
+ printk("scsi%d : target %d selected, going into MESSAGE OUT phase.\n",
+ instance->host_no, cmd->target);
+#endif
+ tmp[0] = IDENTIFY(((instance->irq == IRQ_NONE) ? 0 : 1), cmd->lun);
+#ifdef SCSI2
+ if (cmd->device->tagged_queue && (tag != TAG_NONE)) {
+ tmp[1] = SIMPLE_QUEUE_TAG;
+ if (tag == TAG_NEXT) {
+ /* 0 is TAG_NONE, used to imply no tag for this command */
+ if (cmd->device->current_tag == 0)
+ cmd->device->current_tag = 1;
+
+ cmd->tag = cmd->device->current_tag;
+ cmd->device->current_tag++;
+ } else
+ cmd->tag = (unsigned char) tag;
+
+ tmp[2] = cmd->tag;
+ hostdata->last_message = SIMPLE_QUEUE_TAG;
+ len = 3;
+ } else
+#endif /* def SCSI2 */
+ {
+ len = 1;
+ cmd->tag=0;
+ }
+
+ /* Send message(s) */
+ data = tmp;
+ phase = PHASE_MSGOUT;
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+#if (NDEBUG & NDEBUG_SELECTION)
+ printk("scsi%d : nexus established.\n", instance->host_no);
+#endif
+ /* XXX need to handle errors here */
+ hostdata->connected = cmd;
+#ifdef SCSI2
+ if (!cmd->device->tagged_queue)
+#endif
+ hostdata->busy[cmd->target] |= (1 << cmd->lun);
+
+ initialize_SCp(cmd);
+
+
+ return 0;
+}
+
+/*
+ * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance,
+ * unsigned char *phase, int *count, unsigned char **data)
+ *
+ * Purpose : transfers data in given phase using polled I/O
+ *
+ * Inputs : instance - instance of driver, *phase - pointer to
+ * what phase is expected, *count - pointer to number of
+ * bytes to transfer, **data - pointer to data pointer.
+ *
+ * Returns : -1 when different phase is entered without transferring
+ * maximum number of bytes, 0 if all bytes or transfered or exit
+ * is in same phase.
+ *
+ * Also, *phase, *count, *data are modified in place.
+ *
+ * XXX Note : handling for bus free may be useful.
+ */
+
+/*
+ * Note : this code is not as quick as it could be, however it
+ * IS 100% reliable, and for the actual data transfer where speed
+ * counts, we will always do a pseudo DMA or DMA transfer.
+ */
+
+static int NCR5380_transfer_pio (struct Scsi_Host *instance,
+ unsigned char *phase, int *count, unsigned char **data) {
+ NCR5380_local_declare();
+ register unsigned char p = *phase, tmp;
+ register int c = *count;
+ register unsigned char *d = *data;
+ NCR5380_setup(instance);
+
+#if (NDEBUG & NDEBUG_PIO)
+ if (!(p & SR_IO))
+ printk("scsi%d : pio write %d bytes\n", instance->host_no, c);
+ else
+ printk("scsi%d : pio read %d bytes\n", instance->host_no, c);
+#endif
+
+ /*
+ * The NCR5380 chip will only drive the SCSI bus when the
+ * phase specified in the appropriate bits of the TARGET COMMAND
+ * REGISTER match the STATUS REGISTER
+ */
+
+ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p));
+
+ do {
+ /*
+ * Wait for assertion of REQ, after which the phase bits will be
+ * valid
+ */
+ while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ));
+
+#if (NDEBUG & NDEBUG_HANDSHAKE)
+ printk("scsi%d : REQ detected\n", instance->host_no);
+#endif
+
+ /* Check for phase mismatch */
+ if ((tmp & PHASE_MASK) != p) {
+#if (NDEBUG & NDEBUG_PIO)
+ printk("scsi%d : phase mismatch\n", instance->host_no);
+ NCR5380_print_phase(instance);
+#endif
+ break;
+ }
+
+ /* Do actual transfer from SCSI bus to / from memory */
+ if (!(p & SR_IO))
+ NCR5380_write(OUTPUT_DATA_REG, *d);
+ else
+ *d = NCR5380_read(CURRENT_SCSI_DATA_REG);
+
+ ++d;
+
+ /*
+ * The SCSI standard suggests that in MSGOUT phase, the initiator
+ * should drop ATN on the last byte of the message phase
+ * after REQ has been asserted for the handshake but before
+ * the initiator raises ACK.
+ */
+
+ if (!(p & SR_IO)) {
+ if (!((p & SR_MSG) && c > 1)) {
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
+ ICR_ASSERT_DATA);
+#if (NDEBUG & NDEBUG_PIO)
+ NCR5380_print(instance);
+#endif
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
+ ICR_ASSERT_DATA | ICR_ASSERT_ACK);
+ } else {
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
+ ICR_ASSERT_DATA | ICR_ASSERT_ATN);
+#if (NDEBUG & NDEBUG_PIO)
+ NCR5380_print(instance);
+#endif
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
+ ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
+ }
+ } else {
+#if (NDEBUG & NDEBUG_PIO)
+ NCR5380_print(instance);
+#endif
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
+ }
+
+ while (NCR5380_read(STATUS_REG) & SR_REQ);
+
+#if (NDEBUG & NDEBUG_HANDSHAKE)
+ printk("scsi%d : req false, handshake complete\n", instance->host_no);
+#endif
+
+/*
+ * We have several special cases to consider during REQ/ACK handshaking :
+ * 1. We were in MSGOUT phase, and we are on the last byte of the
+ * message. ATN must be dropped as ACK is dropped.
+ *
+ * 2. We are in a MSGIN phase, and we are on the last byte of the
+ * message. We must exit with ACK asserted, so that the calling
+ * code may raise ATN before dropping ACK to reject the message.
+ *
+ * 3. ACK and ATN are clear and the target may proceed as normal.
+ */
+ if (!(p == PHASE_MSGIN && c == 1)) {
+ if (p == PHASE_MSGOUT && c > 1)
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
+ else
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ }
+ } while (--c);
+
+#if (NDEBUG & NDEBUG_PIO)
+ printk("scsi%d : residual %d\n", instance->host_no, c);
+#endif
+
+ *count = c;
+ *data = d;
+ tmp = NCR5380_read(STATUS_REG);
+ if (tmp & SR_REQ)
+ *phase = tmp & PHASE_MASK;
+ else
+ *phase = PHASE_UNKNOWN;
+
+ if (!c || (*phase == p))
+ return 0;
+ else
+ return -1;
+}
+
+static void do_reset (struct Scsi_Host *host) {
+ NCR5380_local_declare();
+ NCR5380_setup(host);
+
+ cli();
+ NCR5380_write(TARGET_COMMAND_REG,
+ PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG) & PHASE_MASK));
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST);
+ udelay(25);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ sti();
+}
+
+/*
+ * Function : do_abort (Scsi_Host *host)
+ *
+ * Purpose : abort the currently established nexus. Should only be
+ * called from a routine which can drop into a
+ *
+ * Returns : 0 on success, -1 on failure.
+ */
+
+static int do_abort (struct Scsi_Host *host) {
+ NCR5380_local_declare();
+ unsigned char tmp, *msgptr, phase;
+ int len;
+ NCR5380_setup(host);
+
+
+ /* Request message out phase */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
+
+ /*
+ * Wait for the target to indicate a valid phase by asserting
+ * REQ. Once this happens, we'll have either a MSGOUT phase
+ * and can immediately send the ABORT message, or we'll have some
+ * other phase and will have to source/sink data.
+ *
+ * We really don't care what value was on the bus or what value
+ * the target sees, so we just handshake.
+ */
+
+ while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ));
+
+ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
+
+ if ((tmp & PHASE_MASK) != PHASE_MSGOUT) {
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN |
+ ICR_ASSERT_ACK);
+ while (NCR5380_read(STATUS_REG) & SR_REQ);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
+ }
+
+ tmp = ABORT;
+ msgptr = &tmp;
+ len = 1;
+ phase = PHASE_MSGOUT;
+ NCR5380_transfer_pio (host, &phase, &len, &msgptr);
+
+ /*
+ * If we got here, and the command completed successfully,
+ * we're about to go into bus free state.
+ */
+
+ return len ? -1 : 0;
+}
+
+#if defined(REAL_DMA) || defined(PSEUDO_DMA) || defined (REAL_DMA_POLL)
+/*
+ * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance,
+ * unsigned char *phase, int *count, unsigned char **data)
+ *
+ * Purpose : transfers data in given phase using either real
+ * or pseudo DMA.
+ *
+ * Inputs : instance - instance of driver, *phase - pointer to
+ * what phase is expected, *count - pointer to number of
+ * bytes to transfer, **data - pointer to data pointer.
+ *
+ * Returns : -1 when different phase is entered without transferring
+ * maximum number of bytes, 0 if all bytes or transfered or exit
+ * is in same phase.
+ *
+ * Also, *phase, *count, *data are modified in place.
+ *
+ */
+
+
+static int NCR5380_transfer_dma (struct Scsi_Host *instance,
+ unsigned char *phase, int *count, unsigned char **data) {
+ NCR5380_local_declare();
+ register int c = *count;
+ register unsigned char p = *phase;
+ register unsigned char *d = *data;
+ unsigned char tmp;
+ int foo;
+#if defined(REAL_DMA_POLL)
+ int cnt, toPIO;
+ unsigned char saved_data = 0, overrun = 0, residue;
+#endif
+
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *)
+ instance->hostdata;
+
+ NCR5380_setup(instance);
+
+ if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) {
+ *phase = tmp;
+ return -1;
+ }
+#if defined(REAL_DMA) || defined(REAL_DMA_POLL)
+#ifdef READ_OVERRUNS
+ if (p & SR_IO) {
+ c -= 2;
+ }
+#endif
+#if (NDEBUG & NDEBUG_DMA)
+ printk("scsi%d : initializing DMA channel %d for %s, %d bytes %s %0x\n",
+ instance->host_no, instance->dma_channel, (p & SR_IO) ? "reading" :
+ "writing", c, (p & SR_IO) ? "to" : "from", (unsigned) d);
+#endif
+ hostdata->dma_len = (p & SR_IO) ?
+ NCR5380_dma_read_setup(instance, d, c) :
+ NCR5380_dma_write_setup(instance, d, c);
+#endif
+
+ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p));
+
+#ifdef REAL_DMA
+ NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_ENABLE_EOP_INTR | MR_MONITOR_BSY);
+#elif defined(REAL_DMA_POLL)
+ NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE);
+#else
+ /*
+ * Note : on my sample board, watch-dog timeouts occurred when interrupts
+ * were not disabled for the duration of a single DMA transfer, from
+ * before the setting of DMA mode to after transfer of the last byte.
+ */
+
+#if defined(PSEUDO_DMA) && !defined(UNSAFE)
+ cli();
+#endif
+ /* KLL May need eop and parity in 53c400 */
+ if (hostdata->flags & FLAG_NCR53C400)
+ NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_ENABLE_PAR_CHECK
+ | MR_ENABLE_PAR_INTR | MR_ENABLE_EOP_INTR | MR_DMA_MODE
+ | MR_MONITOR_BSY);
+ else
+ NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE);
+#endif /* def REAL_DMA */
+
+#if (NDEBUG & NDEBUG_DMA) & 0
+ printk("scsi%d : mode reg = 0x%X\n", instance->host_no, NCR5380_read(MODE_REG));
+#endif
+
+/*
+ * FOO stuff. For some UNAPPARENT reason, I'm getting
+ * watchdog timers fired on bootup for NO APPARENT REASON, meaning it's
+ * probably a timing problem.
+ *
+ * Since this is the only place I have back-to-back writes, perhaps this
+ * is the problem?
+ */
+
+ if (p & SR_IO) {
+#ifndef FOO
+ udelay(1);
+#endif
+ NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0);
+ } else {
+#ifndef FOO
+ udelay(1);
+#endif
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA);
+#ifndef FOO
+ udelay(1);
+#endif
+ NCR5380_write(START_DMA_SEND_REG, 0);
+#ifndef FOO
+ udelay(1);
+#endif
+ }
+
+#if defined(REAL_DMA_POLL)
+ do {
+ tmp = NCR5380_read(BUS_AND_STATUS_REG);
+ } while ((tmp & BASR_PHASE_MATCH) && !(tmp & (BASR_BUSY_ERROR |
+ BASR_END_DMA_TRANSFER)));
+
+/*
+ At this point, either we've completed DMA, or we have a phase mismatch,
+ or we've unexpectedly lost BUSY (which is a real error).
+
+ For write DMAs, we want to wait until the last byte has been
+ transferred out over the bus before we turn off DMA mode. Alas, there
+ seems to be no terribly good way of doing this on a 5380 under all
+ conditions. For non-scatter-gather operations, we can wait until REQ
+ and ACK both go false, or until a phase mismatch occurs. Gather-writes
+ are nastier, since the device will be expecting more data than we
+ are prepared to send it, and REQ will remain asserted. On a 53C8[01] we
+ could test LAST BIT SENT to assure transfer (I imagine this is precisely
+ why this signal was added to the newer chips) but on the older 538[01]
+ this signal does not exist. The workaround for this lack is a watchdog;
+ we bail out of the wait-loop after a modest amount of wait-time if
+ the usual exit conditions are not met. Not a terribly clean or
+ correct solution :-%
+
+ Reads are equally tricky due to a nasty characteristic of the NCR5380.
+ If the chip is in DMA mode for an READ, it will respond to a target's
+ REQ by latching the SCSI data into the INPUT DATA register and asserting
+ ACK, even if it has _already_ been notified by the DMA controller that
+ the current DMA transfer has completed! If the NCR5380 is then taken
+ out of DMA mode, this already-acknowledged byte is lost.
+
+ This is not a problem for "one DMA transfer per command" reads, because
+ the situation will never arise... either all of the data is DMA'ed
+ properly, or the target switches to MESSAGE IN phase to signal a
+ disconnection (either operation bringing the DMA to a clean halt).
+ However, in order to handle scatter-reads, we must work around the
+ problem. The chosen fix is to DMA N-2 bytes, then check for the
+ condition before taking the NCR5380 out of DMA mode. One or two extra
+ bytes are transferred via PIO as necessary to fill out the original
+ request.
+*/
+
+ if (p & SR_IO) {
+#ifdef READ_OVERRUNS
+ udelay(10);
+ if (((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH|BASR_ACK)) ==
+ (BASR_PHASE_MATCH | BASR_ACK))) {
+ saved_data = NCR5380_read(INPUT_DATA_REGISTER);
+ overrun = 1;
+ }
+#endif
+ } else {
+ int limit = 100;
+ while (((tmp = NCR5380_read(BUS_AND_STATUS_REG)) & BASR_ACK) ||
+ (NCR5380_read(STATUS_REG) & SR_REQ)) {
+ if (!(tmp & BASR_PHASE_MATCH)) break;
+ if (--limit < 0) break;
+ }
+ }
+
+
+#if (NDEBUG & NDEBUG_DMA)
+ printk("scsi%d : polled DMA transfer complete, basr 0x%X, sr 0x%X\n",
+ instance->host_no, tmp, NCR5380_read(STATUS_REG));
+#endif
+
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+
+ residue = NCR5380_dma_residual(instance);
+ c -= residue;
+ *count -= c;
+ *data += c;
+ *phase = NCR5380_read(STATUS_REG) & PHASE_MASK;
+
+#ifdef READ_OVERRUNS
+ if (*phase == p && (p & SR_IO) && residue == 0) {
+ if (overrun) {
+#if (NDEBUG & NDEBUG_DMA)
+ printk("Got an input overrun, using saved byte\n");
+#endif
+ **data = saved_data;
+ *data += 1;
+ *count -= 1;
+ cnt = toPIO = 1;
+ } else {
+ printk("No overrun??\n");
+ cnt = toPIO = 2;
+ }
+#if (NDEBUG & NDEBUG_DMA)
+ printk("Doing %d-byte PIO to 0x%X\n", cnt, *data);
+#endif
+ NCR5380_transfer_pio(instance, phase, &cnt, data);
+ *count -= toPIO - cnt;
+ }
+#endif
+
+#if (NDEBUG & NDEBUG_DMA)
+ printk("Return with data ptr = 0x%X, count %d, last 0x%X, next 0x%X\n",
+ *data, *count, *(*data+*count-1), *(*data+*count));
+#endif
+ return 0;
+
+#elif defined(REAL_DMA)
+ return 0;
+#else /* defined(REAL_DMA_POLL) */
+ if (p & SR_IO) {
+#ifdef DMA_WORKS_RIGHT
+ foo = NCR5380_pread(instance, d, c);
+#else
+ int diff = 1;
+ if (hostdata->flags & FLAG_NCR53C400) {
+ diff=0;
+ }
+
+ if (!(foo = NCR5380_pread(instance, d, c - diff))) {
+ /*
+ * We can't disable DMA mode after successfully transferring
+ * what we plan to be the last byte, since that would open up
+ * a race condition where if the target asserted REQ before
+ * we got the DMA mode reset, the NCR5380 would have latched
+ * an additional byte into the INPUT DATA register and we'd
+ * have dropped it.
+ *
+ * The workaround was to transfer one fewer bytes than we
+ * intended to with the pseudo-DMA read function, wait for
+ * the chip to latch the last byte, read it, and then disable
+ * pseudo-DMA mode.
+ *
+ * After REQ is asserted, the NCR5380 asserts DRQ and ACK.
+ * REQ is deasserted when ACK is asserted, and not reasserted
+ * until ACK goes false. Since the NCR5380 won't lower ACK
+ * until DACK is asserted, which won't happen unless we twiddle
+ * the DMA port or we take the NCR5380 out of DMA mode, we
+ * can guarantee that we won't handshake another extra
+ * byte.
+ */
+
+ if (!(hostdata->flags & FLAG_NCR53C400)) {
+ while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ));
+ /* Wait for clean handshake */
+ while (NCR5380_read(STATUS_REG) & SR_REQ);
+ d[c - 1] = NCR5380_read(INPUT_DATA_REG);
+ }
+ }
+#endif
+ } else {
+#ifdef DMA_WORKS_RIGHT
+ foo = NCR5380_pwrite(instance, d, c);
+#else
+ int timeout;
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("About to pwrite %d bytes\n", c);
+#endif
+ if (!(foo = NCR5380_pwrite(instance, d, c))) {
+ /*
+ * Wait for the last byte to be sent. If REQ is being asserted for
+ * the byte we're interested, we'll ACK it and it will go false.
+ */
+ if (!(hostdata->flags & FLAG_HAS_LAST_BYTE_SENT)) {
+ timeout = 20000;
+#if 1
+#if 1
+ while (!(NCR5380_read(BUS_AND_STATUS_REG) &
+ BASR_DRQ) && (NCR5380_read(BUS_AND_STATUS_REG) &
+ BASR_PHASE_MATCH));
+#else
+ if (NCR5380_read(STATUS_REG) & SR_REQ) {
+ for (; timeout &&
+ !(NCR5380_read(BUS_AND_STATUS_REG) & BASR_ACK);
+ --timeout);
+ for (; timeout && (NCR5380_read(STATUS_REG) & SR_REQ);
+ --timeout);
+ }
+#endif
+
+
+#if (NDEBUG & NDEBUG_LAST_BYTE_SENT)
+ if (!timeout)
+ printk("scsi%d : timed out on last byte\n",
+ instance->host_no);
+#endif
+
+
+ if (hostdata->flags & FLAG_CHECK_LAST_BYTE_SENT) {
+ hostdata->flags &= ~FLAG_CHECK_LAST_BYTE_SENT;
+ if (NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT) {
+ hostdata->flags |= FLAG_HAS_LAST_BYTE_SENT;
+#if (NDEBUG & NDEBUG_LAST_BYTE_SENT)
+ printk("scsi%d : last bit sent works\n",
+ instance->host_no);
+#endif
+ }
+ }
+ } else {
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("Waiting for LASTBYTE\n");
+#endif
+ while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT));
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("Got LASTBYTE\n");
+#endif
+ }
+#else
+ udelay (5);
+#endif
+ }
+#endif
+ }
+
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+
+ if ((!(p & SR_IO)) && (hostdata->flags & FLAG_NCR53C400)) {
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: Checking for IRQ\n");
+#endif
+ if (NCR5380_read(BUS_AND_STATUS_REG) & BASR_IRQ) {
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: got it, reading reset interrupt reg\n");
+#endif
+ NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ } else {
+ printk("53C400w: IRQ NOT THERE!\n");
+ }
+ }
+
+ *data = d + c;
+ *count = 0;
+ *phase = NCR5380_read(STATUS_REG) & PHASE_MASK;
+#if 0
+ NCR5380_print_phase(instance);
+#endif
+#if defined(PSEUDO_DMA) && !defined(UNSAFE)
+ sti();
+#endif /* defined(REAL_DMA_POLL) */
+ return foo;
+#endif /* def REAL_DMA */
+}
+#endif /* defined(REAL_DMA) | defined(PSEUDO_DMA) */
+
+/*
+ * Function : NCR5380_information_transfer (struct Scsi_Host *instance)
+ *
+ * Purpose : run through the various SCSI phases and do as the target
+ * directs us to. Operates on the currently connected command,
+ * instance->connected.
+ *
+ * Inputs : instance, instance for which we are doing commands
+ *
+ * Side effects : SCSI things happen, the disconnected queue will be
+ * modified if a command disconnects, *instance->connected will
+ * change.
+ *
+ * XXX Note : we need to watch for bus free or a reset condition here
+ * to recover from an unexpected bus free condition.
+ */
+
+static void NCR5380_information_transfer (struct Scsi_Host *instance) {
+ NCR5380_local_declare();
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *)
+ instance->hostdata;
+ unsigned char msgout = NOP;
+ int sink = 0;
+ int len;
+#if defined(PSEUDO_DMA) || defined(REAL_DMA_POLL)
+ int transfersize;
+#endif
+ unsigned char *data;
+ unsigned char phase, tmp, extended_msg[10], old_phase=0xff;
+ Scsi_Cmnd *cmd = (Scsi_Cmnd *) hostdata->connected;
+ NCR5380_setup(instance);
+
+ while (1) {
+ tmp = NCR5380_read(STATUS_REG);
+ /* We only have a valid SCSI phase when REQ is asserted */
+ if (tmp & SR_REQ) {
+ phase = (tmp & PHASE_MASK);
+ if (phase != old_phase) {
+ old_phase = phase;
+#if (NDEBUG & NDEBUG_INFORMATION)
+ NCR5380_print_phase(instance);
+#endif
+ }
+
+ if (sink && (phase != PHASE_MSGOUT)) {
+ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN |
+ ICR_ASSERT_ACK);
+ while (NCR5380_read(STATUS_REG) & SR_REQ);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
+ ICR_ASSERT_ATN);
+ sink = 0;
+ continue;
+ }
+
+ switch (phase) {
+ case PHASE_DATAIN:
+ case PHASE_DATAOUT:
+#if (NDEBUG & NDEBUG_NO_DATAOUT)
+ printk("scsi%d : NDEBUG_NO_DATAOUT set, attempted DATAOUT aborted\n",
+ instance->host_no);
+ sink = 1;
+ do_abort(instance);
+ cmd->result = DID_ERROR << 16;
+ cmd->done(cmd);
+ return;
+#endif
+ /*
+ * If there is no room left in the current buffer in the
+ * scatter-gather list, move onto the next one.
+ */
+
+ if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
+ ++cmd->SCp.buffer;
+ --cmd->SCp.buffers_residual;
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ cmd->SCp.ptr = cmd->SCp.buffer->address;
+#if (NDEBUG & NDEBUG_INFORMATION)
+ printk("scsi%d : %d bytes and %d buffers left\n",
+ instance->host_no, cmd->SCp.this_residual,
+ cmd->SCp.buffers_residual);
+#endif
+ }
+
+ /*
+ * The preferred transfer method is going to be
+ * PSEUDO-DMA for systems that are strictly PIO,
+ * since we can let the hardware do the handshaking.
+ *
+ * For this to work, we need to know the transfersize
+ * ahead of time, since the pseudo-DMA code will sit
+ * in an unconditional loop.
+ */
+
+#if defined(PSEUDO_DMA) || defined(REAL_DMA_POLL)
+ /* KLL
+ * PSEUDO_DMA is defined here. If this is the g_NCR5380
+ * driver then it will always be defined, so the
+ * FLAG_NO_PSEUDO_DMA is used to inhibit PDMA in the base
+ * NCR5380 case. I think this is a fairly clean solution.
+ * We supplement these 2 if's with the flag.
+ */
+#ifdef NCR5380_dma_xfer_len
+ if (!cmd->device->borken &&
+ !(hostdata->flags & FLAG_NO_PSEUDO_DMA) &&
+ (transfersize = NCR5380_dma_xfer_len(instance, cmd)) != 0) {
+#else
+ transfersize = cmd->transfersize;
+
+#ifdef LIMIT_TRANSFERSIZE /* If we have problems with interrupt service */
+ if( transfersize > 512 )
+ transfersize = 512;
+#endif /* LIMIT_TRANSFERSIZE */
+
+ if (!cmd->device->borken && transfersize &&
+ !(hostdata->flags & FLAG_NO_PSEUDO_DMA) &&
+ cmd->SCp.this_residual && !(cmd->SCp.this_residual %
+ transfersize)) {
+ /* Limit transfers to 32K, for xx400 & xx406
+ * pseudoDMA that transfers in 128 bytes blocks. */
+ if (transfersize > 32*1024)
+ transfersize = 32*1024;
+#endif
+ len = transfersize;
+ if (NCR5380_transfer_dma(instance, &phase,
+ &len, (unsigned char **) &cmd->SCp.ptr)) {
+ /*
+ * If the watchdog timer fires, all future accesses to this
+ * device will use the polled-IO.
+ */
+ printk("scsi%d : switching target %d lun %d to slow handshake\n",
+ instance->host_no, cmd->target, cmd->lun);
+ cmd->device->borken = 1;
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
+ ICR_ASSERT_ATN);
+ sink = 1;
+ do_abort(instance);
+ cmd->result = DID_ERROR << 16;
+ cmd->done(cmd);
+ /* XXX - need to source or sink data here, as appropriate */
+ } else
+ cmd->SCp.this_residual -= transfersize - len;
+ } else
+#endif /* defined(PSEUDO_DMA) || defined(REAL_DMA_POLL) */
+ NCR5380_transfer_pio(instance, &phase,
+ (int *) &cmd->SCp.this_residual, (unsigned char **)
+ &cmd->SCp.ptr);
+ break;
+ case PHASE_MSGIN:
+ len = 1;
+ data = &tmp;
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+ cmd->SCp.Message = tmp;
+
+ switch (tmp) {
+ /*
+ * Linking lets us reduce the time required to get the
+ * next command out to the device, hopefully this will
+ * mean we don't waste another revolution due to the delays
+ * required by ARBITRATION and another SELECTION.
+ *
+ * In the current implementation proposal, low level drivers
+ * merely have to start the next command, pointed to by
+ * next_link, done() is called as with unlinked commands.
+ */
+#ifdef LINKED
+ case LINKED_CMD_COMPLETE:
+ case LINKED_FLG_CMD_COMPLETE:
+ /* Accept message by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+
+#if (NDEBUG & NDEBUG_LINKED)
+ printk("scsi%d : target %d lun %d linked command complete.\n",
+ instance->host_no, cmd->target, cmd->lun);
+#endif
+ /*
+ * Sanity check : A linked command should only terminate with
+ * one of these messages if there are more linked commands
+ * available.
+ */
+
+ if (!cmd->next_link) {
+ printk("scsi%d : target %d lun %d linked command complete, no next_link\n"
+ instance->host_no, cmd->target, cmd->lun);
+ sink = 1;
+ do_abort (instance);
+ return;
+ }
+
+ initialize_SCp(cmd->next_link);
+ /* The next command is still part of this process */
+ cmd->next_link->tag = cmd->tag;
+ cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
+#if (NDEBUG & NDEBUG_LINKED)
+ printk("scsi%d : target %d lun %d linked request done, calling scsi_done().\n",
+ instance->host_no, cmd->target, cmd->lun);
+#endif
+#ifdef NCR5380_STATS
+ collect_stats(hostdata, cmd);
+#endif
+ cmd->scsi_done(cmd);
+ cmd = hostdata->connected;
+ break;
+#endif /* def LINKED */
+ case ABORT:
+ case COMMAND_COMPLETE:
+ /* Accept message by clearing ACK */
+ sink = 1;
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ hostdata->connected = NULL;
+#if (NDEBUG & NDEBUG_QUEUES)
+ printk("scsi%d : command for target %d, lun %d completed\n",
+ instance->host_no, cmd->target, cmd->lun);
+#endif
+ hostdata->busy[cmd->target] &= ~(1 << cmd->lun);
+
+ /*
+ * I'm not sure what the correct thing to do here is :
+ *
+ * If the command that just executed is NOT a request
+ * sense, the obvious thing to do is to set the result
+ * code to the values of the stored parameters.
+ *
+ * If it was a REQUEST SENSE command, we need some way
+ * to differentiate between the failure code of the original
+ * and the failure code of the REQUEST sense - the obvious
+ * case is success, where we fall through and leave the result
+ * code unchanged.
+ *
+ * The non-obvious place is where the REQUEST SENSE failed
+ */
+
+ if (cmd->cmnd[0] != REQUEST_SENSE)
+ cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
+ else if (cmd->SCp.Status != GOOD)
+ cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
+
+#ifdef AUTOSENSE
+ if ((cmd->cmnd[0] != REQUEST_SENSE) &&
+ (cmd->SCp.Status == CHECK_CONDITION)) {
+#if (NDEBUG & NDEBUG_AUTOSENSE)
+ printk("scsi%d : performing request sense\n",
+ instance->host_no);
+#endif
+ cmd->cmnd[0] = REQUEST_SENSE;
+ cmd->cmnd[1] &= 0xe0;
+ cmd->cmnd[2] = 0;
+ cmd->cmnd[3] = 0;
+ cmd->cmnd[4] = sizeof(cmd->sense_buffer);
+ cmd->cmnd[5] = 0;
+
+ cmd->SCp.buffer = NULL;
+ cmd->SCp.buffers_residual = 0;
+ cmd->SCp.ptr = (char *) cmd->sense_buffer;
+ cmd->SCp.this_residual = sizeof(cmd->sense_buffer);
+
+ cli();
+ LIST(cmd,hostdata->issue_queue);
+ cmd->host_scribble = (unsigned char *)
+ hostdata->issue_queue;
+ hostdata->issue_queue = (Scsi_Cmnd *) cmd;
+ sti();
+#if (NDEBUG & NDEBUG_QUEUES)
+ printk("scsi%d : REQUEST SENSE added to head of issue queue\n",instance->host_no);
+#endif
+ } else {
+#endif /* def AUTOSENSE */
+#ifdef NCR5380_STATS
+ collect_stats(hostdata, cmd);
+#endif
+ cmd->scsi_done(cmd);
+ }
+
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ /*
+ * Restore phase bits to 0 so an interrupted selection,
+ * arbitration can resume.
+ */
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+
+ while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected)
+ barrier();
+ return;
+ case MESSAGE_REJECT:
+ /* Accept message by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ switch (hostdata->last_message) {
+ case HEAD_OF_QUEUE_TAG:
+ case ORDERED_QUEUE_TAG:
+ case SIMPLE_QUEUE_TAG:
+ cmd->device->tagged_queue = 0;
+ hostdata->busy[cmd->target] |= (1 << cmd->lun);
+ break;
+ default:
+ break;
+ }
+ case DISCONNECT:
+ /* Accept message by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ cmd->device->disconnect = 1;
+ cli();
+ LIST(cmd,hostdata->disconnected_queue);
+ cmd->host_scribble = (unsigned char *)
+ hostdata->disconnected_queue;
+ hostdata->connected = NULL;
+ hostdata->disconnected_queue = cmd;
+ sti();
+#if (NDEBUG & NDEBUG_QUEUES)
+ printk("scsi%d : command for target %d lun %d was moved from connected to"
+ " the disconnected_queue\n", instance->host_no,
+ cmd->target, cmd->lun);
+#endif
+ /*
+ * Restore phase bits to 0 so an interrupted selection,
+ * arbitration can resume.
+ */
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+
+ /* Enable reselect interrupts */
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ /* Wait for bus free to avoid nasty timeouts */
+ while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected)
+ barrier();
+#if 0
+ NCR5380_print_status(instance);
+#endif
+ return;
+ /*
+ * The SCSI data pointer is *IMPLICITLY* saved on a disconnect
+ * operation, in violation of the SCSI spec so we can safely
+ * ignore SAVE/RESTORE pointers calls.
+ *
+ * Unfortunately, some disks violate the SCSI spec and
+ * don't issue the required SAVE_POINTERS message before
+ * disconnecting, and we have to break spec to remain
+ * compatible.
+ */
+ case SAVE_POINTERS:
+ case RESTORE_POINTERS:
+ /* Accept message by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ break;
+ case EXTENDED_MESSAGE:
+/*
+ * Extended messages are sent in the following format :
+ * Byte
+ * 0 EXTENDED_MESSAGE == 1
+ * 1 length (includes one byte for code, doesn't
+ * include first two bytes)
+ * 2 code
+ * 3..length+1 arguments
+ *
+ * Start the extended message buffer with the EXTENDED_MESSAGE
+ * byte, since print_msg() wants the whole thing.
+ */
+ extended_msg[0] = EXTENDED_MESSAGE;
+ /* Accept first byte by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+
+#if (NDEBUG & NDEBUG_EXTENDED)
+ printk("scsi%d : receiving extended message\n",
+ instance->host_no);
+#endif
+
+ len = 2;
+ data = extended_msg + 1;
+ phase = PHASE_MSGIN;
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+
+#if (NDEBUG & NDEBUG_EXTENDED)
+ printk("scsi%d : length=%d, code=0x%02x\n",
+ instance->host_no, (int) extended_msg[1],
+ (int) extended_msg[2]);
+#endif
+
+ if (!len && extended_msg[1] <=
+ (sizeof (extended_msg) - 1)) {
+ /* Accept third byte by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ len = extended_msg[1] - 1;
+ data = extended_msg + 3;
+ phase = PHASE_MSGIN;
+
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+
+#if (NDEBUG & NDEBUG_EXTENDED)
+ printk("scsi%d : message received, residual %d\n",
+ instance->host_no, len);
+#endif
+
+ switch (extended_msg[2]) {
+ case EXTENDED_SDTR:
+ case EXTENDED_WDTR:
+ case EXTENDED_MODIFY_DATA_POINTER:
+ case EXTENDED_EXTENDED_IDENTIFY:
+ tmp = 0;
+ }
+ } else if (len) {
+ printk("scsi%d: error receiving extended message\n",
+ instance->host_no);
+ tmp = 0;
+ } else {
+ printk("scsi%d: extended message code %02x length %d is too long\n",
+ instance->host_no, extended_msg[2], extended_msg[1]);
+ tmp = 0;
+ }
+ /* Fall through to reject message */
+
+ /*
+ * If we get something weird that we aren't expecting,
+ * reject it.
+ */
+ default:
+ if (!tmp) {
+ printk("scsi%d: rejecting message ", instance->host_no);
+ print_msg (extended_msg);
+ printk("\n");
+ } else if (tmp != EXTENDED_MESSAGE)
+ printk("scsi%d: rejecting unknown message %02x from target %d, lun %d\n",
+ instance->host_no, tmp, cmd->target, cmd->lun);
+ else
+ printk("scsi%d: rejecting unknown extended message code %02x, length %d from target %d, lun %d\n",
+ instance->host_no, extended_msg[1], extended_msg[0], cmd->target, cmd->lun);
+
+ msgout = MESSAGE_REJECT;
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
+ ICR_ASSERT_ATN);
+ break;
+ } /* switch (tmp) */
+ break;
+ case PHASE_MSGOUT:
+ len = 1;
+ data = &msgout;
+ hostdata->last_message = msgout;
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+ if (msgout == ABORT) {
+ hostdata->busy[cmd->target] &= ~(1 << cmd->lun);
+ hostdata->connected = NULL;
+ cmd->result = DID_ERROR << 16;
+#ifdef NCR5380_STATS
+ collect_stats(hostdata, cmd);
+#endif
+ cmd->scsi_done(cmd);
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ return;
+ }
+ msgout = NOP;
+ break;
+ case PHASE_CMDOUT:
+ len = cmd->cmd_len;
+ data = cmd->cmnd;
+ /*
+ * XXX for performance reasons, on machines with a
+ * PSEUDO-DMA architecture we should probably
+ * use the dma transfer function.
+ */
+ NCR5380_transfer_pio(instance, &phase, &len,
+ &data);
+#ifdef USLEEP
+ if (!disconnect && should_disconnect(cmd->cmnd[0])) {
+ hostdata->time_expires = jiffies + USLEEP_SLEEP;
+#if (NDEBUG & NDEBUG_USLEEP)
+ printk("scsi%d : issued command, sleeping until %ul\n", instance->host_no,
+ hostdata->time_expires);
+#endif
+ NCR5380_set_timer (instance);
+ return;
+ }
+#endif /* def USLEEP */
+ break;
+ case PHASE_STATIN:
+ len = 1;
+ data = &tmp;
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+ cmd->SCp.Status = tmp;
+ break;
+ default:
+ printk("scsi%d : unknown phase\n", instance->host_no);
+#ifdef NDEBUG
+ NCR5380_print(instance);
+#endif
+ } /* switch(phase) */
+ } /* if (tmp * SR_REQ) */
+#ifdef USLEEP
+ else {
+ if (!disconnect && hostdata->time_expires && jiffies >
+ hostdata->time_expires) {
+ hostdata->time_expires = jiffies + USLEEP_SLEEP;
+#if (NDEBUG & NDEBUG_USLEEP)
+ printk("scsi%d : poll timed out, sleeping until %ul\n", instance->host_no,
+ hostdata->time_expires);
+#endif
+ NCR5380_set_timer (instance);
+ return;
+ }
+ }
+#endif
+ } /* while (1) */
+}
+
+/*
+ * Function : void NCR5380_reselect (struct Scsi_Host *instance)
+ *
+ * Purpose : does reselection, initializing the instance->connected
+ * field to point to the Scsi_Cmnd for which the I_T_L or I_T_L_Q
+ * nexus has been reestablished,
+ *
+ * Inputs : instance - this instance of the NCR5380.
+ *
+ */
+
+
+static void NCR5380_reselect (struct Scsi_Host *instance) {
+ NCR5380_local_declare();
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *)
+ instance->hostdata;
+ unsigned char target_mask;
+ unsigned char lun, phase;
+ int len;
+#ifdef SCSI2
+ unsigned char tag;
+#endif
+ unsigned char msg[3];
+ unsigned char *data;
+ Scsi_Cmnd *tmp = NULL, *prev;
+ int abort = 0;
+ NCR5380_setup(instance);
+
+ /*
+ * Disable arbitration, etc. since the host adapter obviously
+ * lost, and tell an interrupted NCR5380_select() to restart.
+ */
+
+ NCR5380_write(MODE_REG, MR_BASE);
+ hostdata->restart_select = 1;
+
+ target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask);
+
+#if (NDEBUG & NDEBUG_RESELECTION)
+ printk("scsi%d : reselect\n", instance->host_no);
+#endif
+
+ /*
+ * At this point, we have detected that our SCSI ID is on the bus,
+ * SEL is true and BSY was false for at least one bus settle delay
+ * (400 ns).
+ *
+ * We must assert BSY ourselves, until the target drops the SEL
+ * signal.
+ */
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY);
+
+ while (NCR5380_read(STATUS_REG) & SR_SEL);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+
+ /*
+ * Wait for target to go into MSGIN.
+ */
+
+ while (!(NCR5380_read(STATUS_REG) & SR_REQ));
+
+ len = 1;
+ data = msg;
+ phase = PHASE_MSGIN;
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+
+
+ if (!(msg[0] & 0x80)) {
+ printk("scsi%d : expecting IDENTIFY message, got ",
+ instance->host_no);
+ print_msg(msg);
+ abort = 1;
+ } else {
+ /* Accept message by clearing ACK */
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ lun = (msg[0] & 0x07);
+
+ /*
+ * We need to add code for SCSI-II to track which devices have
+ * I_T_L_Q nexuses established, and which have simple I_T_L
+ * nexuses so we can chose to do additional data transfer.
+ */
+
+#ifdef SCSI2
+#error "SCSI-II tagged queueing is not supported yet"
+#endif
+
+ /*
+ * Find the command corresponding to the I_T_L or I_T_L_Q nexus we
+ * just reestablished, and remove it from the disconnected queue.
+ */
+
+
+ for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue, prev = NULL;
+ tmp; prev = tmp, tmp = (Scsi_Cmnd *) tmp->host_scribble)
+ if ((target_mask == (1 << tmp->target)) && (lun == tmp->lun)
+#ifdef SCSI2
+ && (tag == tmp->tag)
+#endif
+) {
+ if (prev) {
+ REMOVE(prev,prev->host_scribble,tmp,tmp->host_scribble);
+ prev->host_scribble = tmp->host_scribble;
+ } else {
+ REMOVE(-1,hostdata->disconnected_queue,tmp,tmp->host_scribble);
+ hostdata->disconnected_queue = (Scsi_Cmnd *) tmp->host_scribble;
+ }
+ tmp->host_scribble = NULL;
+ break;
+ }
+
+ if (!tmp) {
+#ifdef SCSI2
+ printk("scsi%d : warning : target bitmask %02x lun %d tag %d not in disconnect_queue.\n",
+ instance->host_no, target_mask, lun, tag);
+#else
+ printk("scsi%d : warning : target bitmask %02x lun %d not in disconnect_queue.\n",
+ instance->host_no, target_mask, lun);
+#endif
+ /*
+ * Since we have an established nexus that we can't do anything with,
+ * we must abort it.
+ */
+ abort = 1;
+ }
+ }
+
+ if (abort) {
+ do_abort (instance);
+ } else {
+ hostdata->connected = tmp;
+#if (NDEBUG & NDEBUG_RESELECTION)
+ printk("scsi%d : nexus established, target = %d, lun = %d, tag = %d\n",
+ instance->host_no, tmp->target, tmp->lun, tmp->tag);
+#endif
+ }
+}
+
+/*
+ * Function : void NCR5380_dma_complete (struct Scsi_Host *instance)
+ *
+ * Purpose : called by interrupt handler when DMA finishes or a phase
+ * mismatch occurs (which would finish the DMA transfer).
+ *
+ * Inputs : instance - this instance of the NCR5380.
+ *
+ * Returns : pointer to the Scsi_Cmnd structure for which the I_T_L
+ * nexus has been reestablished, on failure NULL is returned.
+ */
+
+#ifdef REAL_DMA
+static void NCR5380_dma_complete (NCR5380_instance *instance) {
+ NCR5380_local_declare();
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *
+ instance->hostdata);
+ int transferred;
+ NCR5380_setup(instance);
+
+ /*
+ * XXX this might not be right.
+ *
+ * Wait for final byte to transfer, ie wait for ACK to go false.
+ *
+ * We should use the Last Byte Sent bit, unfortunately this is
+ * not available on the 5380/5381 (only the various CMOS chips)
+ */
+
+ while (NCR5380_read(BUS_AND_STATUS_REG) & BASR_ACK);
+
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+
+ /*
+ * The only places we should see a phase mismatch and have to send
+ * data from the same set of pointers will be the data transfer
+ * phases. So, residual, requested length are only important here.
+ */
+
+ if (!(hostdata->connected->SCp.phase & SR_CD)) {
+ transferred = instance->dmalen - NCR5380_dma_residual();
+ hostdata->connected->SCp.this_residual -= transferred;
+ hostdata->connected->SCp.ptr += transferred;
+ }
+}
+#endif /* def REAL_DMA */
+
+/*
+ * Function : int NCR5380_abort (Scsi_Cmnd *cmd)
+ *
+ * Purpose : abort a command
+ *
+ * Inputs : cmd - the Scsi_Cmnd to abort, code - code to set the
+ * host byte of the result field to, if zero DID_ABORTED is
+ * used.
+ *
+ * Returns : 0 - success, -1 on failure.
+ *
+ * XXX - there is no way to abort the command that is currently
+ * connected, you have to wait for it to complete. If this is
+ * a problem, we could implement longjmp() / setjmp(), setjmp()
+ * called where the loop started in NCR5380_main().
+ */
+
+#ifndef NCR5380_abort
+static
+#endif
+int NCR5380_abort (Scsi_Cmnd *cmd) {
+ NCR5380_local_declare();
+ struct Scsi_Host *instance = cmd->host;
+ struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *)
+ instance->hostdata;
+ Scsi_Cmnd *tmp, **prev;
+
+ printk("scsi%d : aborting command\n", instance->host_no);
+ print_Scsi_Cmnd (cmd);
+
+ NCR5380_print_status (instance);
+
+ printk("scsi%d : aborting command\n", instance->host_no);
+ print_Scsi_Cmnd (cmd);
+
+ NCR5380_print_status (instance);
+
+ cli();
+ NCR5380_setup(instance);
+
+#if (NDEBUG & NDEBUG_ABORT)
+ printk("scsi%d : abort called\n", instance->host_no);
+ printk(" basr 0x%X, sr 0x%X\n",
+ NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG));
+#endif
+
+#if 0
+/*
+ * Case 1 : If the command is the currently executing command,
+ * we'll set the aborted flag and return control so that
+ * information transfer routine can exit cleanly.
+ */
+
+ if (hostdata->connected == cmd) {
+#if (NDEBUG & NDEBUG_ABORT)
+ printk("scsi%d : aborting connected command\n", instance->host_no);
+#endif
+ hostdata->aborted = 1;
+/*
+ * We should perform BSY checking, and make sure we haven't slipped
+ * into BUS FREE.
+ */
+
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_ATN);
+/*
+ * Since we can't change phases until we've completed the current
+ * handshake, we have to source or sink a byte of data if the current
+ * phase is not MSGOUT.
+ */
+
+/*
+ * Return control to the executing NCR drive so we can clear the
+ * aborted flag and get back into our main loop.
+ */
+
+ return 0;
+ }
+#endif
+
+/*
+ * Case 2 : If the command hasn't been issued yet, we simply remove it
+ * from the issue queue.
+ */
+#if (NDEBUG & NDEBUG_ABORT)
+ /* KLL */
+ printk("scsi%d : abort going into loop.\n", instance->host_no);
+#endif
+ for (prev = (Scsi_Cmnd **) &(hostdata->issue_queue),
+ tmp = (Scsi_Cmnd *) hostdata->issue_queue;
+ tmp; prev = (Scsi_Cmnd **) &(tmp->host_scribble), tmp =
+ (Scsi_Cmnd *) tmp->host_scribble)
+ if (cmd == tmp) {
+ REMOVE(5,*prev,tmp,tmp->host_scribble);
+ (*prev) = (Scsi_Cmnd *) tmp->host_scribble;
+ tmp->host_scribble = NULL;
+ tmp->result = DID_ABORT << 16;
+ sti();
+#if (NDEBUG & NDEBUG_ABORT)
+ printk("scsi%d : abort removed command from issue queue.\n",
+ instance->host_no);
+#endif
+ tmp->done(tmp);
+ return SCSI_ABORT_SUCCESS;
+ }
+#if (NDEBUG & NDEBUG_ABORT)
+ /* KLL */
+ else if (prev == tmp) printk("scsi%d : LOOP\n", instance->host_no);
+#endif
+
+/*
+ * Case 3 : If any commands are connected, we're going to fail the abort
+ * and let the high level SCSI driver retry at a later time or
+ * issue a reset.
+ *
+ * Timeouts, and therefore aborted commands, will be highly unlikely
+ * and handling them cleanly in this situation would make the common
+ * case of noresets less efficient, and would pollute our code. So,
+ * we fail.
+ */
+
+ if (hostdata->connected) {
+ sti();
+#if (NDEBUG & NDEBUG_ABORT)
+ printk("scsi%d : abort failed, command connected.\n", instance->host_no);
+#endif
+ return SCSI_ABORT_NOT_RUNNING;
+ }
+
+/*
+ * Case 4: If the command is currently disconnected from the bus, and
+ * there are no connected commands, we reconnect the I_T_L or
+ * I_T_L_Q nexus associated with it, go into message out, and send
+ * an abort message.
+ *
+ * This case is especially ugly. In order to reestablish the nexus, we
+ * need to call NCR5380_select(). The easiest way to implement this
+ * function was to abort if the bus was busy, and let the interrupt
+ * handler triggered on the SEL for reselect take care of lost arbitrations
+ * where necessary, meaning interrupts need to be enabled.
+ *
+ * When interrupts are enabled, the queues may change - so we
+ * can't remove it from the disconnected queue before selecting it
+ * because that could cause a failure in hashing the nexus if that
+ * device reselected.
+ *
+ * Since the queues may change, we can't use the pointers from when we
+ * first locate it.
+ *
+ * So, we must first locate the command, and if NCR5380_select()
+ * succeeds, then issue the abort, relocate the command and remove
+ * it from the disconnected queue.
+ */
+
+ for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue; tmp;
+ tmp = (Scsi_Cmnd *) tmp->host_scribble)
+ if (cmd == tmp) {
+ sti();
+#if (NDEBUG & NDEBUG_ABORT)
+ printk("scsi%d : aborting disconnected command.\n", instance->host_no);
+#endif
+
+ if (NCR5380_select (instance, cmd, (int) cmd->tag))
+ return SCSI_ABORT_BUSY;
+
+#if (NDEBUG & NDEBUG_ABORT)
+ printk("scsi%d : nexus reestablished.\n", instance->host_no);
+#endif
+
+ do_abort (instance);
+
+ cli();
+ for (prev = (Scsi_Cmnd **) &(hostdata->disconnected_queue),
+ tmp = (Scsi_Cmnd *) hostdata->disconnected_queue;
+ tmp; prev = (Scsi_Cmnd **) &(tmp->host_scribble), tmp =
+ (Scsi_Cmnd *) tmp->host_scribble)
+ if (cmd == tmp) {
+ REMOVE(5,*prev,tmp,tmp->host_scribble);
+ *prev = (Scsi_Cmnd *) tmp->host_scribble;
+ tmp->host_scribble = NULL;
+ tmp->result = DID_ABORT << 16;
+ sti();
+ tmp->done(tmp);
+ return SCSI_ABORT_SUCCESS;
+ }
+ }
+
+/*
+ * Case 5 : If we reached this point, the command was not found in any of
+ * the queues.
+ *
+ * We probably reached this point because of an unlikely race condition
+ * between the command completing successfully and the abortion code,
+ * so we won't panic, but we will notify the user in case something really
+ * broke.
+ */
+
+ sti();
+ printk("scsi%d : warning : SCSI command probably completed successfully\n"
+ " before abortion\n", instance->host_no);
+ return SCSI_ABORT_NOT_RUNNING;
+}
+
+
+/*
+ * Function : int NCR5380_reset (Scsi_Cmnd *cmd, unsigned int reset_flags)
+ *
+ * Purpose : reset the SCSI bus.
+ *
+ * Returns : SCSI_RESET_WAKEUP
+ *
+ */
+
+#ifndef NCR5380_reset
+static
+#endif
+int NCR5380_reset (Scsi_Cmnd *cmd, unsigned int dummy) {
+ NCR5380_local_declare();
+ NCR5380_setup(cmd->host);
+
+ NCR5380_print_status (cmd->host);
+ do_reset (cmd->host);
+
+ return SCSI_RESET_WAKEUP;
+}
+
diff --git a/linux/src/drivers/scsi/NCR5380.h b/linux/src/drivers/scsi/NCR5380.h
new file mode 100644
index 0000000..c2a7519
--- /dev/null
+++ b/linux/src/drivers/scsi/NCR5380.h
@@ -0,0 +1,369 @@
+/*
+ * NCR 5380 defines
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 666-5836
+ *
+ * DISTRIBUTION RELEASE 7
+ *
+ * For more information, please consult
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * 1+ (719) 578-3400
+ * 1+ (800) 334-5454
+ */
+
+#ifndef NCR5380_H
+#define NCR5380_H
+
+#define NCR5380_PUBLIC_RELEASE 7
+#define NCR53C400_PUBLIC_RELEASE 2
+
+#define NDEBUG_ARBITRATION 0x1
+#define NDEBUG_AUTOSENSE 0x2
+#define NDEBUG_DMA 0x4
+#define NDEBUG_HANDSHAKE 0x8
+#define NDEBUG_INFORMATION 0x10
+#define NDEBUG_INIT 0x20
+#define NDEBUG_INTR 0x40
+#define NDEBUG_LINKED 0x80
+#define NDEBUG_MAIN 0x100
+#define NDEBUG_NO_DATAOUT 0x200
+#define NDEBUG_NO_WRITE 0x400
+#define NDEBUG_PIO 0x800
+#define NDEBUG_PSEUDO_DMA 0x1000
+#define NDEBUG_QUEUES 0x2000
+#define NDEBUG_RESELECTION 0x4000
+#define NDEBUG_SELECTION 0x8000
+#define NDEBUG_USLEEP 0x10000
+#define NDEBUG_LAST_BYTE_SENT 0x20000
+#define NDEBUG_RESTART_SELECT 0x40000
+#define NDEBUG_EXTENDED 0x80000
+#define NDEBUG_C400_PREAD 0x100000
+#define NDEBUG_C400_PWRITE 0x200000
+#define NDEBUG_LISTS 0x400000
+
+/*
+ * The contents of the OUTPUT DATA register are asserted on the bus when
+ * either arbitration is occurring or the phase-indicating signals (
+ * IO, CD, MSG) in the TARGET COMMAND register and the ASSERT DATA
+ * bit in the INITIATOR COMMAND register is set.
+ */
+
+#define OUTPUT_DATA_REG 0 /* wo DATA lines on SCSI bus */
+#define CURRENT_SCSI_DATA_REG 0 /* ro same */
+
+#define INITIATOR_COMMAND_REG 1 /* rw */
+#define ICR_ASSERT_RST 0x80 /* rw Set to assert RST */
+#define ICR_ARBITRATION_PROGRESS 0x40 /* ro Indicates arbitration complete */
+#define ICR_TRI_STATE 0x40 /* wo Set to tri-state drivers */
+#define ICR_ARBITRATION_LOST 0x20 /* ro Indicates arbitration lost */
+#define ICR_DIFF_ENABLE 0x20 /* wo Set to enable diff. drivers */
+#define ICR_ASSERT_ACK 0x10 /* rw ini Set to assert ACK */
+#define ICR_ASSERT_BSY 0x08 /* rw Set to assert BSY */
+#define ICR_ASSERT_SEL 0x04 /* rw Set to assert SEL */
+#define ICR_ASSERT_ATN 0x02 /* rw Set to assert ATN */
+#define ICR_ASSERT_DATA 0x01 /* rw SCSI_DATA_REG is asserted */
+
+#ifdef DIFFERENTIAL
+#define ICR_BASE ICR_DIFF_ENABLE
+#else
+#define ICR_BASE 0
+#endif
+
+#define MODE_REG 2
+/*
+ * Note : BLOCK_DMA code will keep DRQ asserted for the duration of the
+ * transfer, causing the chip to hog the bus. You probably don't want
+ * this.
+ */
+#define MR_BLOCK_DMA_MODE 0x80 /* rw block mode DMA */
+#define MR_TARGET 0x40 /* rw target mode */
+#define MR_ENABLE_PAR_CHECK 0x20 /* rw enable parity checking */
+#define MR_ENABLE_PAR_INTR 0x10 /* rw enable bad parity interrupt */
+#define MR_ENABLE_EOP_INTR 0x08 /* rw enable eop interrupt */
+#define MR_MONITOR_BSY 0x04 /* rw enable int on unexpected bsy fail */
+#define MR_DMA_MODE 0x02 /* rw DMA / pseudo DMA mode */
+#define MR_ARBITRATE 0x01 /* rw start arbitration */
+
+#ifdef PARITY
+#define MR_BASE MR_ENABLE_PAR_CHECK
+#else
+#define MR_BASE 0
+#endif
+
+#define TARGET_COMMAND_REG 3
+#define TCR_LAST_BYTE_SENT 0x80 /* ro DMA done */
+#define TCR_ASSERT_REQ 0x08 /* tgt rw assert REQ */
+#define TCR_ASSERT_MSG 0x04 /* tgt rw assert MSG */
+#define TCR_ASSERT_CD 0x02 /* tgt rw assert CD */
+#define TCR_ASSERT_IO 0x01 /* tgt rw assert IO */
+
+#define STATUS_REG 4 /* ro */
+/*
+ * Note : a set bit indicates an active signal, driven by us or another
+ * device.
+ */
+#define SR_RST 0x80
+#define SR_BSY 0x40
+#define SR_REQ 0x20
+#define SR_MSG 0x10
+#define SR_CD 0x08
+#define SR_IO 0x04
+#define SR_SEL 0x02
+#define SR_DBP 0x01
+
+/*
+ * Setting a bit in this register will cause an interrupt to be generated when
+ * BSY is false and SEL true and this bit is asserted on the bus.
+ */
+#define SELECT_ENABLE_REG 4 /* wo */
+
+#define BUS_AND_STATUS_REG 5 /* ro */
+#define BASR_END_DMA_TRANSFER 0x80 /* ro set on end of transfer */
+#define BASR_DRQ 0x40 /* ro mirror of DRQ pin */
+#define BASR_PARITY_ERROR 0x20 /* ro parity error detected */
+#define BASR_IRQ 0x10 /* ro mirror of IRQ pin */
+#define BASR_PHASE_MATCH 0x08 /* ro Set when MSG CD IO match TCR */
+#define BASR_BUSY_ERROR 0x04 /* ro Unexpected change to inactive state */
+#define BASR_ATN 0x02 /* ro BUS status */
+#define BASR_ACK 0x01 /* ro BUS status */
+
+/* Write any value to this register to start a DMA send */
+#define START_DMA_SEND_REG 5 /* wo */
+
+/*
+ * Used in DMA transfer mode, data is latched from the SCSI bus on
+ * the falling edge of REQ (ini) or ACK (tgt)
+ */
+#define INPUT_DATA_REG 6 /* ro */
+
+/* Write any value to this register to start a DMA receive */
+#define START_DMA_TARGET_RECEIVE_REG 6 /* wo */
+
+/* Read this register to clear interrupt conditions */
+#define RESET_PARITY_INTERRUPT_REG 7 /* ro */
+
+/* Write any value to this register to start an ini mode DMA receive */
+#define START_DMA_INITIATOR_RECEIVE_REG 7 /* wo */
+
+#define C400_CONTROL_STATUS_REG NCR53C400_register_offset-8 /* rw */
+
+#define CSR_RESET 0x80 /* wo Resets 53c400 */
+#define CSR_53C80_REG 0x80 /* ro 5380 registers busy */
+#define CSR_TRANS_DIR 0x40 /* rw Data transfer direction */
+#define CSR_SCSI_BUFF_INTR 0x20 /* rw Enable int on transfer ready */
+#define CSR_53C80_INTR 0x10 /* rw Enable 53c80 interrupts */
+#define CSR_SHARED_INTR 0x08 /* rw Interrupt sharing */
+#define CSR_HOST_BUF_NOT_RDY 0x04 /* ro Is Host buffer ready */
+#define CSR_SCSI_BUF_RDY 0x02 /* ro SCSI buffer read */
+#define CSR_GATED_53C80_IRQ 0x01 /* ro Last block xferred */
+
+#if 0
+#define CSR_BASE CSR_SCSI_BUFF_INTR | CSR_53C80_INTR
+#else
+#define CSR_BASE CSR_53C80_INTR
+#endif
+
+/* Number of 128-byte blocks to be transferred */
+#define C400_BLOCK_COUNTER_REG NCR53C400_register_offset-7 /* rw */
+
+/* Resume transfer after disconnect */
+#define C400_RESUME_TRANSFER_REG NCR53C400_register_offset-6 /* wo */
+
+/* Access to host buffer stack */
+#define C400_HOST_BUFFER NCR53C400_register_offset-4 /* rw */
+
+
+/* Note : PHASE_* macros are based on the values of the STATUS register */
+#define PHASE_MASK (SR_MSG | SR_CD | SR_IO)
+
+#define PHASE_DATAOUT 0
+#define PHASE_DATAIN SR_IO
+#define PHASE_CMDOUT SR_CD
+#define PHASE_STATIN (SR_CD | SR_IO)
+#define PHASE_MSGOUT (SR_MSG | SR_CD)
+#define PHASE_MSGIN (SR_MSG | SR_CD | SR_IO)
+#define PHASE_UNKNOWN 0xff
+
+/*
+ * Convert status register phase to something we can use to set phase in
+ * the target register so we can get phase mismatch interrupts on DMA
+ * transfers.
+ */
+
+#define PHASE_SR_TO_TCR(phase) ((phase) >> 2)
+
+/*
+ * The internal should_disconnect() function returns these based on the
+ * expected length of a disconnect if a device supports disconnect/
+ * reconnect.
+ */
+
+#define DISCONNECT_NONE 0
+#define DISCONNECT_TIME_TO_DATA 1
+#define DISCONNECT_LONG 2
+
+/*
+ * These are "special" values for the tag parameter passed to NCR5380_select.
+ */
+
+#define TAG_NEXT -1 /* Use next free tag */
+#define TAG_NONE -2 /*
+ * Establish I_T_L nexus instead of I_T_L_Q
+ * even on SCSI-II devices.
+ */
+
+/*
+ * These are "special" values for the irq and dma_channel fields of the
+ * Scsi_Host structure
+ */
+
+#define IRQ_NONE 255
+#define DMA_NONE 255
+#define IRQ_AUTO 254
+#define DMA_AUTO 254
+
+#define FLAG_HAS_LAST_BYTE_SENT 1 /* NCR53c81 or better */
+#define FLAG_CHECK_LAST_BYTE_SENT 2 /* Only test once */
+#define FLAG_NCR53C400 4 /* NCR53c400 */
+#define FLAG_NO_PSEUDO_DMA 8 /* Inhibit DMA */
+
+#ifndef ASM
+struct NCR5380_hostdata {
+ NCR5380_implementation_fields; /* implementation specific */
+ unsigned char id_mask, id_higher_mask; /* 1 << id, all bits greater */
+ unsigned char targets_present; /* targets we have connected
+ to, so we can call a select
+ failure a retryable condition */
+ volatile unsigned char busy[8]; /* index = target, bit = lun */
+#if defined(REAL_DMA) || defined(REAL_DMA_POLL)
+ volatile int dma_len; /* requested length of DMA */
+#endif
+ volatile unsigned char last_message; /* last message OUT */
+ volatile Scsi_Cmnd *connected; /* currently connected command */
+ volatile Scsi_Cmnd *issue_queue; /* waiting to be issued */
+ volatile Scsi_Cmnd *disconnected_queue; /* waiting for reconnect */
+ volatile int restart_select; /* we have disconnected,
+ used to restart
+ NCR5380_select() */
+ volatile unsigned aborted:1; /* flag, says aborted */
+ int flags;
+#ifdef USLEEP
+ unsigned long time_expires; /* in jiffies, set prior to sleeping */
+ struct Scsi_Host *next_timer;
+#endif
+#ifdef NCR5380_STATS
+ unsigned timebase; /* Base for time calcs */
+ long time_read[8]; /* time to do reads */
+ long time_write[8]; /* time to do writes */
+ unsigned long bytes_read[8]; /* bytes read */
+ unsigned long bytes_write[8]; /* bytes written */
+ unsigned pendingr;
+ unsigned pendingw;
+#endif
+};
+
+#ifdef __KERNEL__
+static struct Scsi_Host *first_instance; /* linked list of 5380's */
+
+#if defined(AUTOPROBE_IRQ)
+static int NCR5380_probe_irq (struct Scsi_Host *instance, int possible);
+#endif
+static void NCR5380_init (struct Scsi_Host *instance, int flags);
+static void NCR5380_information_transfer (struct Scsi_Host *instance);
+#ifndef DONT_USE_INTR
+static void NCR5380_intr (int irq, void *dev_id, struct pt_regs * regs);
+#endif
+static void NCR5380_main (void);
+static void NCR5380_print_options (struct Scsi_Host *instance);
+static void NCR5380_print_phase (struct Scsi_Host *instance);
+static void NCR5380_print (struct Scsi_Host *instance);
+#ifndef NCR5380_abort
+static
+#endif
+int NCR5380_abort (Scsi_Cmnd *cmd);
+#ifndef NCR5380_reset
+static
+#endif
+int NCR5380_reset (Scsi_Cmnd *cmd, unsigned int reset_flags);
+#ifndef NCR5380_queue_command
+static
+#endif
+int NCR5380_queue_command (Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *));
+
+
+static void NCR5380_reselect (struct Scsi_Host *instance);
+static int NCR5380_select (struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag);
+#if defined(PSEUDO_DMA) || defined(REAL_DMA) || defined(REAL_DMA_POLL)
+static int NCR5380_transfer_dma (struct Scsi_Host *instance,
+ unsigned char *phase, int *count, unsigned char **data);
+#endif
+static int NCR5380_transfer_pio (struct Scsi_Host *instance,
+ unsigned char *phase, int *count, unsigned char **data);
+
+#if (defined(REAL_DMA) || defined(REAL_DMA_POLL))
+
+#if defined(i386) || defined(__alpha__)
+
+static __inline__ int NCR5380_pc_dma_setup (struct Scsi_Host *instance,
+ unsigned char *ptr, unsigned int count, unsigned char mode) {
+ unsigned limit;
+ unsigned long bus_addr = virt_to_bus(ptr);
+
+ if (instance->dma_channel <=3) {
+ if (count > 65536)
+ count = 65536;
+ limit = 65536 - (bus_addr & 0xFFFF);
+ } else {
+ if (count > 65536 * 2)
+ count = 65536 * 2;
+ limit = 65536* 2 - (bus_addr & 0x1FFFF);
+ }
+
+ if (count > limit) count = limit;
+
+ if ((count & 1) || (bus_addr & 1))
+ panic ("scsi%d : attempted unaligned DMA transfer\n", instance->host_no);
+ cli();
+ disable_dma(instance->dma_channel);
+ clear_dma_ff(instance->dma_channel);
+ set_dma_addr(instance->dma_channel, bus_addr);
+ set_dma_count(instance->dma_channel, count);
+ set_dma_mode(instance->dma_channel, mode);
+ enable_dma(instance->dma_channel);
+ sti();
+ return count;
+}
+
+static __inline__ int NCR5380_pc_dma_write_setup (struct Scsi_Host *instance,
+ unsigned char *src, unsigned int count) {
+ return NCR5380_pc_dma_setup (instance, src, count, DMA_MODE_WRITE);
+}
+
+static __inline__ int NCR5380_pc_dma_read_setup (struct Scsi_Host *instance,
+ unsigned char *src, unsigned int count) {
+ return NCR5380_pc_dma_setup (instance, src, count, DMA_MODE_READ);
+}
+
+static __inline__ int NCR5380_pc_dma_residual (struct Scsi_Host *instance) {
+ register int tmp;
+ cli();
+ clear_dma_ff(instance->dma_channel);
+ tmp = get_dma_residue(instance->dma_channel);
+ sti();
+ return tmp;
+}
+#endif /* defined(i386) || defined(__alpha__) */
+#endif /* defined(REAL_DMA) */
+#endif __KERNEL_
+#endif /* ndef ASM */
+#endif /* NCR5380_H */
diff --git a/linux/src/drivers/scsi/NCR53c406a.c b/linux/src/drivers/scsi/NCR53c406a.c
new file mode 100644
index 0000000..7745f5a
--- /dev/null
+++ b/linux/src/drivers/scsi/NCR53c406a.c
@@ -0,0 +1,1079 @@
+/*
+ * NCR53c406.c
+ * Low-level SCSI driver for NCR53c406a chip.
+ * Copyright (C) 1994, 1995, 1996 Normunds Saumanis (normunds@fi.ibm.com)
+ *
+ * LILO command line usage: ncr53c406a=<PORTBASE>[,<IRQ>[,<FASTPIO>]]
+ * Specify IRQ = 0 for non-interrupt driven mode.
+ * FASTPIO = 1 for fast pio mode, 0 for slow mode.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#define NCR53C406A_DEBUG 0
+#define VERBOSE_NCR53C406A_DEBUG 0
+
+/* Set this to 1 for PIO mode (recommended) or to 0 for DMA mode */
+#define USE_PIO 1
+
+#define USE_BIOS 0
+/* #define BIOS_ADDR 0xD8000 */ /* define this if autoprobe fails */
+/* #define PORT_BASE 0x330 */ /* define this if autoprobe fails */
+/* #define IRQ_LEV 0 */ /* define this if autoprobe fails */
+#define DMA_CHAN 5 /* this is ignored if DMA is disabled */
+
+/* Set this to 0 if you encounter kernel lockups while transferring
+ * data in PIO mode */
+#define USE_FAST_PIO 1
+
+/* ============= End of user configurable parameters ============= */
+
+#include <linux/module.h>
+
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/bitops.h>
+#include <asm/irq.h>
+
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+
+#include "NCR53c406a.h"
+
+/* ============================================================= */
+
+#define WATCHDOG 5000000
+
+#define SYNC_MODE 0 /* Synchronous transfer mode */
+
+#if DEBUG
+#undef NCR53C406A_DEBUG
+#define NCR53C406A_DEBUG 1
+#endif
+
+#if USE_PIO
+#define USE_DMA 0
+#else
+#define USE_DMA 1
+#endif
+
+/* Default configuration */
+#define C1_IMG 0x07 /* ID=7 */
+#define C2_IMG 0x48 /* FE SCSI2 */
+#if USE_DMA
+#define C3_IMG 0x21 /* CDB TE */
+#else
+#define C3_IMG 0x20 /* CDB */
+#endif
+#define C4_IMG 0x04 /* ANE */
+#define C5_IMG 0xb6 /* AA PI SIE POL */
+
+#define REG0 (outb(C4_IMG, CONFIG4))
+#define REG1 (outb(C5_IMG, CONFIG5))
+
+#if NCR53C406A_DEBUG
+#define DEB(x) x
+#else
+#define DEB(x)
+#endif
+
+#if VERBOSE_NCR53C406A_DEBUG
+#define VDEB(x) x
+#else
+#define VDEB(x)
+#endif
+
+#define LOAD_DMA_COUNT(count) \
+ outb(count & 0xff, TC_LSB); \
+ outb((count >> 8) & 0xff, TC_MSB); \
+ outb((count >> 16) & 0xff, TC_HIGH);
+
+/* Chip commands */
+#define DMA_OP 0x80
+
+#define SCSI_NOP 0x00
+#define FLUSH_FIFO 0x01
+#define CHIP_RESET 0x02
+#define SCSI_RESET 0x03
+#define RESELECT 0x40
+#define SELECT_NO_ATN 0x41
+#define SELECT_ATN 0x42
+#define SELECT_ATN_STOP 0x43
+#define ENABLE_SEL 0x44
+#define DISABLE_SEL 0x45
+#define SELECT_ATN3 0x46
+#define RESELECT3 0x47
+#define TRANSFER_INFO 0x10
+#define INIT_CMD_COMPLETE 0x11
+#define MSG_ACCEPT 0x12
+#define TRANSFER_PAD 0x18
+#define SET_ATN 0x1a
+#define RESET_ATN 0x1b
+#define SEND_MSG 0x20
+#define SEND_STATUS 0x21
+#define SEND_DATA 0x22
+#define DISCONN_SEQ 0x23
+#define TERMINATE_SEQ 0x24
+#define TARG_CMD_COMPLETE 0x25
+#define DISCONN 0x27
+#define RECV_MSG 0x28
+#define RECV_CMD 0x29
+#define RECV_DATA 0x2a
+#define RECV_CMD_SEQ 0x2b
+#define TARGET_ABORT_DMA 0x04
+
+/*----------------------------------------------------------------*/
+/* the following will set the monitor border color (useful to find
+ where something crashed or gets stuck at */
+/* 1 = blue
+ 2 = green
+ 3 = cyan
+ 4 = red
+ 5 = magenta
+ 6 = yellow
+ 7 = white
+*/
+
+#if NCR53C406A_DEBUG
+#define rtrc(i) {inb(0x3da);outb(0x31,0x3c0);outb((i),0x3c0);}
+#else
+#define rtrc(i) {}
+#endif
+/*----------------------------------------------------------------*/
+
+enum Phase {
+ idle,
+ data_out,
+ data_in,
+ command_ph,
+ status_ph,
+ message_out,
+ message_in
+};
+
+/* Static function prototypes */
+static void NCR53c406a_intr(int, void *, struct pt_regs *);
+static void internal_done(Scsi_Cmnd *);
+static void wait_intr(void);
+static void chip_init(void);
+static void calc_port_addr(void);
+#ifndef IRQ_LEV
+static int irq_probe(void);
+#endif
+
+/* ================================================================= */
+
+#if USE_BIOS
+static void *bios_base = (void *)0;
+#endif
+
+#if PORT_BASE
+static int port_base = PORT_BASE;
+#else
+static int port_base = 0;
+#endif
+
+#if IRQ_LEV
+static int irq_level = IRQ_LEV;
+#else
+static int irq_level = -1; /* 0 is 'no irq', so use -1 for 'uninitialized'*/
+#endif
+
+#if USE_DMA
+static int dma_chan = 0;
+#endif
+
+#if USE_PIO
+static int fast_pio = USE_FAST_PIO;
+#endif
+
+static Scsi_Cmnd *current_SC = NULL;
+static volatile int internal_done_flag = 0;
+static volatile int internal_done_errcode = 0;
+static char info_msg[256];
+
+struct proc_dir_entry proc_scsi_NCR53c406a = {
+ PROC_SCSI_NCR53C406A, 7, "NCR53c406a",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+/* ================================================================= */
+
+/* possible BIOS locations */
+#if USE_BIOS
+static void *addresses[] = {
+ (void *)0xd8000,
+ (void *)0xc8000
+};
+#define ADDRESS_COUNT (sizeof( addresses ) / sizeof( unsigned ))
+#endif USE_BIOS
+
+/* possible i/o port addresses */
+static unsigned short ports[] = { 0x230, 0x330 };
+#define PORT_COUNT (sizeof( ports ) / sizeof( unsigned short ))
+
+/* possible interrupt channels */
+static unsigned short intrs[] = { 10, 11, 12, 15 };
+#define INTR_COUNT (sizeof( intrs ) / sizeof( unsigned short ))
+
+/* signatures for NCR 53c406a based controllers */
+#if USE_BIOS
+struct signature {
+ char *signature;
+ int sig_offset;
+ int sig_length;
+} signatures[] = {
+ /* 1 2 3 4 5 6 */
+ /* 123456789012345678901234567890123456789012345678901234567890 */
+ { "Copyright (C) Acculogic, Inc.\r\n2.8M Diskette Extension Bios ver 4.04.03 03/01/1993", 61, 82 },
+};
+#define SIGNATURE_COUNT (sizeof( signatures ) / sizeof( struct signature ))
+#endif USE_BIOS
+
+/* ============================================================ */
+
+/* Control Register Set 0 */
+static int TC_LSB; /* transfer counter lsb */
+static int TC_MSB; /* transfer counter msb */
+static int SCSI_FIFO; /* scsi fifo register */
+static int CMD_REG; /* command register */
+static int STAT_REG; /* status register */
+static int DEST_ID; /* selection/reselection bus id */
+static int INT_REG; /* interrupt status register */
+static int SRTIMOUT; /* select/reselect timeout reg */
+static int SEQ_REG; /* sequence step register */
+static int SYNCPRD; /* synchronous transfer period */
+static int FIFO_FLAGS; /* indicates # of bytes in fifo */
+static int SYNCOFF; /* synchronous offset register */
+static int CONFIG1; /* configuration register */
+static int CLKCONV; /* clock conversion reg */
+/*static int TESTREG;*/ /* test mode register */
+static int CONFIG2; /* Configuration 2 Register */
+static int CONFIG3; /* Configuration 3 Register */
+static int CONFIG4; /* Configuration 4 Register */
+static int TC_HIGH; /* Transfer Counter High */
+/*static int FIFO_BOTTOM;*/ /* Reserve FIFO byte register */
+
+/* Control Register Set 1 */
+/*static int JUMPER_SENSE;*/ /* Jumper sense port reg (r/w) */
+/*static int SRAM_PTR;*/ /* SRAM address pointer reg (r/w) */
+/*static int SRAM_DATA;*/ /* SRAM data register (r/w) */
+static int PIO_FIFO; /* PIO FIFO registers (r/w) */
+/*static int PIO_FIFO1;*/ /* */
+/*static int PIO_FIFO2;*/ /* */
+/*static int PIO_FIFO3;*/ /* */
+static int PIO_STATUS; /* PIO status (r/w) */
+/*static int ATA_CMD;*/ /* ATA command/status reg (r/w) */
+/*static int ATA_ERR;*/ /* ATA features/error register (r/w)*/
+static int PIO_FLAG; /* PIO flag interrupt enable (r/w) */
+static int CONFIG5; /* Configuration 5 register (r/w) */
+/*static int SIGNATURE;*/ /* Signature Register (r) */
+/*static int CONFIG6;*/ /* Configuration 6 register (r) */
+
+/* ============================================================== */
+
+#if USE_DMA
+static __inline__ int
+NCR53c406a_dma_setup (unsigned char *ptr,
+ unsigned int count,
+ unsigned char mode) {
+ unsigned limit;
+ unsigned long flags = 0;
+
+ VDEB(printk("dma: before count=%d ", count));
+ if (dma_chan <=3) {
+ if (count > 65536)
+ count = 65536;
+ limit = 65536 - (((unsigned) ptr) & 0xFFFF);
+ } else {
+ if (count > (65536<<1))
+ count = (65536<<1);
+ limit = (65536<<1) - (((unsigned) ptr) & 0x1FFFF);
+ }
+
+ if (count > limit) count = limit;
+
+ VDEB(printk("after count=%d\n", count));
+ if ((count & 1) || (((unsigned) ptr) & 1))
+ panic ("NCR53c406a: attempted unaligned DMA transfer\n");
+
+ save_flags(flags);
+ cli();
+ disable_dma(dma_chan);
+ clear_dma_ff(dma_chan);
+ set_dma_addr(dma_chan, (long) ptr);
+ set_dma_count(dma_chan, count);
+ set_dma_mode(dma_chan, mode);
+ enable_dma(dma_chan);
+ restore_flags(flags);
+
+ return count;
+}
+
+static __inline__ int
+NCR53c406a_dma_write(unsigned char *src, unsigned int count) {
+ return NCR53c406a_dma_setup (src, count, DMA_MODE_WRITE);
+}
+
+static __inline__ int
+NCR53c406a_dma_read(unsigned char *src, unsigned int count) {
+ return NCR53c406a_dma_setup (src, count, DMA_MODE_READ);
+}
+
+static __inline__ int
+NCR53c406a_dma_residual (void) {
+ register int tmp;
+ unsigned long flags = 0;
+ save_flags(flags);
+ cli();
+ clear_dma_ff(dma_chan);
+ tmp = get_dma_residue(dma_chan);
+ restore_flags(flags);
+
+ return tmp;
+}
+#endif USE_DMA
+
+#if USE_PIO
+static __inline__ int NCR53c406a_pio_read(unsigned char *request,
+ unsigned int reqlen)
+{
+ int i;
+ int len; /* current scsi fifo size */
+ unsigned long flags = 0;
+
+ REG1;
+ while (reqlen) {
+ i = inb(PIO_STATUS);
+ /* VDEB(printk("pio_status=%x\n", i)); */
+ if (i & 0x80)
+ return 0;
+
+ switch( i & 0x1e ) {
+ default:
+ case 0x10:
+ len=0; break;
+ case 0x0:
+ len=1; break;
+ case 0x8:
+ len=42; break;
+ case 0xc:
+ len=84; break;
+ case 0xe:
+ len=128; break;
+ }
+
+ if ((i & 0x40) && len == 0) { /* fifo empty and interrupt occurred */
+ return 0;
+ }
+
+ if (len) {
+ if( len > reqlen )
+ len = reqlen;
+
+ save_flags(flags);
+ cli();
+ if( fast_pio && len > 3 ) {
+ insl(PIO_FIFO,request,len>>2);
+ request += len & 0xfc;
+ reqlen -= len & 0xfc;
+ }
+ else {
+ while(len--) {
+ *request++ = inb(PIO_FIFO);
+ reqlen--;
+ }
+ }
+ restore_flags(flags);
+ }
+ }
+ return 0;
+}
+
+static __inline__ int NCR53c406a_pio_write(unsigned char *request,
+ unsigned int reqlen)
+{
+ int i = 0;
+ int len; /* current scsi fifo size */
+ unsigned long flags = 0;
+
+ REG1;
+ while (reqlen && !(i&0x40)) {
+ i = inb(PIO_STATUS);
+ /* VDEB(printk("pio_status=%x\n", i)); */
+ if (i & 0x80) /* error */
+ return 0;
+
+ switch( i & 0x1e ) {
+ case 0x10:
+ len=128; break;
+ case 0x0:
+ len=84; break;
+ case 0x8:
+ len=42; break;
+ case 0xc:
+ len=1; break;
+ default:
+ case 0xe:
+ len=0; break;
+ }
+
+ if (len) {
+ if( len > reqlen )
+ len = reqlen;
+
+ save_flags(flags);
+ cli();
+ if( fast_pio && len > 3 ) {
+ outsl(PIO_FIFO,request,len>>2);
+ request += len & 0xfc;
+ reqlen -= len & 0xfc;
+ }
+ else {
+ while(len--) {
+ outb(*request++, PIO_FIFO);
+ reqlen--;
+ }
+ }
+ restore_flags(flags);
+ }
+ }
+ return 0;
+}
+#endif USE_PIO
+
+int
+NCR53c406a_detect(Scsi_Host_Template * tpnt){
+ struct Scsi_Host *shpnt;
+#ifndef PORT_BASE
+ int i;
+#endif
+
+#if USE_BIOS
+ int ii, jj;
+ bios_base = 0;
+ /* look for a valid signature */
+ for( ii=0; ii < ADDRESS_COUNT && !bios_base; ii++)
+ for( jj=0; (jj < SIGNATURE_COUNT) && !bios_base; jj++)
+ if(!memcmp((void *) addresses[ii]+signatures[jj].sig_offset,
+ (void *) signatures[jj].signature,
+ (int) signatures[jj].sig_length))
+ bios_base=addresses[ii];
+
+ if(!bios_base){
+ printk("NCR53c406a: BIOS signature not found\n");
+ return 0;
+ }
+
+ DEB(printk("NCR53c406a BIOS found at %X\n", (unsigned int) bios_base););
+#endif USE_BIOS
+
+#ifdef PORT_BASE
+ if (check_region(port_base, 0x10)) /* ports already snatched */
+ port_base = 0;
+
+#else /* autodetect */
+ if (port_base) { /* LILO override */
+ if (check_region(port_base, 0x10))
+ port_base = 0;
+ }
+ else {
+ for(i=0; i<PORT_COUNT && !port_base; i++){
+ if(check_region(ports[i], 0x10)){
+ DEB(printk("NCR53c406a: port %x in use\n", ports[i]));
+ }
+ else {
+ VDEB(printk("NCR53c406a: port %x available\n", ports[i]));
+ outb(C5_IMG, ports[i] + 0x0d); /* reg set 1 */
+ if( (inb(ports[i] + 0x0e) ^ inb(ports[i] + 0x0e)) == 7
+ && (inb(ports[i] + 0x0e) ^ inb(ports[i] + 0x0e)) == 7
+ && (inb(ports[i] + 0x0e) & 0xf8) == 0x58 ) {
+ VDEB(printk("NCR53c406a: Sig register valid\n"));
+ VDEB(printk("port_base=%x\n", port_base));
+ port_base = ports[i];
+ }
+ }
+ }
+ }
+#endif PORT_BASE
+
+ if(!port_base){ /* no ports found */
+ printk("NCR53c406a: no available ports found\n");
+ return 0;
+ }
+
+ DEB(printk("NCR53c406a detected\n"));
+
+ calc_port_addr();
+ chip_init();
+
+#ifndef IRQ_LEV
+ if (irq_level < 0) { /* LILO override if >= 0*/
+ irq_level = -1; // XXX No probing irq_probe();
+ if (irq_level < 0) { /* Trouble */
+ printk("NCR53c406a: IRQ problem, irq_level=%d, giving up\n", irq_level);
+ return 0;
+ }
+ }
+#endif
+
+ DEB(printk("NCR53c406a: using port_base %x\n", port_base));
+ request_region(port_base, 0x10, "NCR53c406a");
+
+ if(irq_level > 0) {
+ if(request_irq(irq_level, NCR53c406a_intr, 0, "NCR53c406a", NULL)){
+ printk("NCR53c406a: unable to allocate IRQ %d\n", irq_level);
+ return 0;
+ }
+ tpnt->can_queue = 1;
+ DEB(printk("NCR53c406a: allocated IRQ %d\n", irq_level));
+ }
+ else if (irq_level == 0) {
+ tpnt->can_queue = 0;
+ DEB(printk("NCR53c406a: No interrupts detected\n"));
+#if USE_DMA
+ printk("NCR53c406a: No interrupts found and DMA mode defined. Giving up.\n");
+ return 0;
+#endif USE_DMA
+ }
+ else {
+ DEB(printk("NCR53c406a: Shouldn't get here!\n"));
+ return 0;
+ }
+
+#if USE_DMA
+ dma_chan = DMA_CHAN;
+ if(request_dma(dma_chan, "NCR53c406a") != 0){
+ printk("NCR53c406a: unable to allocate DMA channel %d\n", dma_chan);
+ return 0;
+ }
+
+ DEB(printk("Allocated DMA channel %d\n", dma_chan));
+#endif USE_DMA
+
+ tpnt->present = 1;
+ tpnt->proc_dir = &proc_scsi_NCR53c406a;
+
+ shpnt = scsi_register(tpnt, 0);
+ shpnt->irq = irq_level;
+ shpnt->io_port = port_base;
+ shpnt->n_io_port = 0x10;
+#if USE_DMA
+ shpnt->dma = dma_chan;
+#endif
+
+#if USE_DMA
+ sprintf(info_msg, "NCR53c406a at 0x%x, IRQ %d, DMA channel %d.",
+ port_base, irq_level, dma_chan);
+#else
+ sprintf(info_msg, "NCR53c406a at 0x%x, IRQ %d, %s PIO mode.",
+ port_base, irq_level, fast_pio ? "fast" : "slow");
+#endif
+
+ return (tpnt->present);
+}
+
+/* called from init/main.c */
+void NCR53c406a_setup(char *str, int *ints)
+{
+ static size_t setup_idx = 0;
+ size_t i;
+
+ DEB(printk("NCR53c406a: Setup called\n"););
+
+ if (setup_idx >= PORT_COUNT - 1) {
+ printk("NCR53c406a: Setup called too many times. Bad LILO params?\n");
+ return;
+ }
+ if (ints[0] < 1 || ints[0] > 3) {
+ printk("NCR53c406a: Malformed command line\n");
+ printk("NCR53c406a: Usage: ncr53c406a=<PORTBASE>[,<IRQ>[,<FASTPIO>]]\n");
+ return;
+ }
+ for (i = 0; i < PORT_COUNT && !port_base; i++)
+ if (ports[i] == ints[1]) {
+ port_base = ints[1];
+ DEB(printk("NCR53c406a: Specified port_base 0x%X\n", port_base);)
+ }
+ if (!port_base) {
+ printk("NCR53c406a: Invalid PORTBASE 0x%X specified\n", ints[1]);
+ return;
+ }
+
+ if (ints[0] > 1) {
+ if (ints[2] == 0) {
+ irq_level = 0;
+ DEB(printk("NCR53c406a: Specified irq %d\n", irq_level);)
+ }
+ else
+ for (i = 0; i < INTR_COUNT && irq_level < 0; i++)
+ if (intrs[i] == ints[2]) {
+ irq_level = ints[2];
+ DEB(printk("NCR53c406a: Specified irq %d\n", port_base);)
+ }
+ if (irq_level < 0)
+ printk("NCR53c406a: Invalid IRQ %d specified\n", ints[2]);
+ }
+
+ if (ints[0] > 2)
+ fast_pio = ints[3];
+
+ DEB(printk("NCR53c406a: port_base=0x%X, irq=%d, fast_pio=%d\n",
+ port_base, irq_level, fast_pio);)
+}
+
+const char*
+NCR53c406a_info(struct Scsi_Host *SChost){
+ DEB(printk("NCR53c406a_info called\n"));
+ return (info_msg);
+}
+
+static void internal_done(Scsi_Cmnd *SCpnt) {
+ internal_done_errcode = SCpnt->result;
+ ++internal_done_flag;
+}
+
+
+static void wait_intr() {
+ int i = jiffies + WATCHDOG;
+
+ while(i>jiffies && !(inb(STAT_REG)&0xe0)) /* wait for a pseudo-interrupt */
+ barrier();
+
+ if (i <= jiffies) { /* Timed out */
+ rtrc(0);
+ current_SC->result = DID_TIME_OUT << 16;
+ current_SC->SCp.phase = idle;
+ current_SC->scsi_done(current_SC);
+ return;
+ }
+
+ NCR53c406a_intr(0, NULL, NULL);
+}
+
+int NCR53c406a_command(Scsi_Cmnd *SCpnt){
+ DEB(printk("NCR53c406a_command called\n"));
+ NCR53c406a_queue(SCpnt, internal_done);
+ if(irq_level)
+ while (!internal_done_flag);
+ else /* interrupts not supported */
+ while (!internal_done_flag)
+ wait_intr();
+
+ internal_done_flag = 0;
+ return internal_done_errcode;
+}
+
+
+int
+NCR53c406a_queue(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)){
+ int i;
+ unsigned long flags = 0;
+
+ VDEB(printk("NCR53c406a_queue called\n"));
+ DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n",
+ SCpnt->cmnd[0],
+ SCpnt->cmd_len,
+ SCpnt->target,
+ SCpnt->lun,
+ SCpnt->request_bufflen));
+
+#if 0
+ VDEB(for(i=0; i<SCpnt->cmd_len; i++)
+ printk("cmd[%d]=%02x ", i, SCpnt->cmnd[i]));
+ VDEB(printk("\n"));
+#endif
+
+ current_SC = SCpnt;
+ current_SC->scsi_done = done;
+ current_SC->SCp.phase = command_ph;
+ current_SC->SCp.Status = 0;
+ current_SC->SCp.Message = 0;
+
+ save_flags(flags);
+ cli();
+ REG0;
+ outb(SCpnt->target, DEST_ID); /* set destination */
+ outb(FLUSH_FIFO, CMD_REG); /* reset the fifos */
+
+ for(i=0; i<SCpnt->cmd_len; i++){
+ outb(SCpnt->cmnd[i], SCSI_FIFO);
+ }
+ outb(SELECT_NO_ATN, CMD_REG);
+ restore_flags(flags);
+
+ rtrc(1);
+ return 0;
+}
+
+int
+NCR53c406a_abort(Scsi_Cmnd *SCpnt){
+ DEB(printk("NCR53c406a_abort called\n"));
+ return SCSI_ABORT_SNOOZE; /* Don't know how to abort */
+}
+
+int
+NCR53c406a_reset(Scsi_Cmnd *SCpnt, unsigned int flags){
+ DEB(printk("NCR53c406a_reset called\n"));
+ outb(C4_IMG, CONFIG4); /* Select reg set 0 */
+ outb(CHIP_RESET, CMD_REG);
+ outb(SCSI_NOP, CMD_REG); /* required after reset */
+ outb(SCSI_RESET, CMD_REG);
+ chip_init();
+
+ rtrc(2);
+ if (irq_level)
+ return SCSI_RESET_PENDING; /* should get an interrupt */
+ else
+ return SCSI_RESET_WAKEUP; /* won't get any interrupts */
+}
+
+int
+NCR53c406a_biosparm(Scsi_Disk *disk, kdev_t dev, int* info_array){
+ int size;
+
+ DEB(printk("NCR53c406a_biosparm called\n"));
+
+ size = disk->capacity;
+ info_array[0] = 64; /* heads */
+ info_array[1] = 32; /* sectors */
+ info_array[2] = size>>11; /* cylinders */
+ if (info_array[2] > 1024) { /* big disk */
+ info_array[0] = 255;
+ info_array[1] = 63;
+ info_array[2] = size / (255*63);
+ }
+ return 0;
+ }
+
+ static void
+NCR53c406a_intr(int unused, void *dev_id, struct pt_regs *regs){
+ DEB(unsigned char fifo_size;)
+ DEB(unsigned char seq_reg;)
+ unsigned char status, int_reg;
+ unsigned long flags = 0;
+#if USE_PIO
+ unsigned char pio_status;
+ struct scatterlist *sglist;
+ unsigned int sgcount;
+#endif
+
+ VDEB(printk("NCR53c406a_intr called\n"));
+
+ save_flags(flags);
+ cli();
+#if USE_PIO
+ REG1;
+ pio_status = inb(PIO_STATUS);
+#endif
+ REG0;
+ status = inb(STAT_REG);
+ DEB(seq_reg = inb(SEQ_REG));
+ int_reg = inb(INT_REG);
+ DEB(fifo_size = inb(FIFO_FLAGS) & 0x1f);
+ restore_flags(flags);
+
+#if NCR53C406A_DEBUG
+ printk("status=%02x, seq_reg=%02x, int_reg=%02x, fifo_size=%02x",
+ status, seq_reg, int_reg, fifo_size);
+#if (USE_DMA)
+ printk("\n");
+#else
+ printk(", pio=%02x\n", pio_status);
+#endif USE_DMA
+#endif NCR53C406A_DEBUG
+
+ if(int_reg & 0x80){ /* SCSI reset intr */
+ rtrc(3);
+ DEB(printk("NCR53c406a: reset intr received\n"));
+ current_SC->SCp.phase = idle;
+ current_SC->result = DID_RESET << 16;
+ current_SC->scsi_done(current_SC);
+ return;
+ }
+
+#if USE_PIO
+ if(pio_status & 0x80) {
+ printk("NCR53C406A: Warning: PIO error!\n");
+ current_SC->SCp.phase = idle;
+ current_SC->result = DID_ERROR << 16;
+ current_SC->scsi_done(current_SC);
+ return;
+ }
+#endif USE_PIO
+
+ if(status & 0x20) { /* Parity error */
+ printk("NCR53c406a: Warning: parity error!\n");
+ current_SC->SCp.phase = idle;
+ current_SC->result = DID_PARITY << 16;
+ current_SC->scsi_done(current_SC);
+ return;
+ }
+
+ if(status & 0x40) { /* Gross error */
+ printk("NCR53c406a: Warning: gross error!\n");
+ current_SC->SCp.phase = idle;
+ current_SC->result = DID_ERROR << 16;
+ current_SC->scsi_done(current_SC);
+ return;
+ }
+
+ if(int_reg & 0x20){ /* Disconnect */
+ DEB(printk("NCR53c406a: disconnect intr received\n"));
+ if(current_SC->SCp.phase != message_in){ /* Unexpected disconnect */
+ current_SC->result = DID_NO_CONNECT << 16;
+ }
+ else{ /* Command complete, return status and message */
+ current_SC->result = (current_SC->SCp.Status & 0xff)
+ | ((current_SC->SCp.Message & 0xff) << 8) | (DID_OK << 16);
+ }
+
+ rtrc(0);
+ current_SC->SCp.phase = idle;
+ current_SC->scsi_done( current_SC );
+ return;
+ }
+
+ switch(status & 0x07){ /* scsi phase */
+ case 0x00: /* DATA-OUT */
+ if(int_reg & 0x10){ /* Target requesting info transfer */
+ rtrc(5);
+ current_SC->SCp.phase = data_out;
+ VDEB(printk("NCR53c406a: Data-Out phase\n"));
+ outb(FLUSH_FIFO, CMD_REG);
+ LOAD_DMA_COUNT(current_SC->request_bufflen); /* Max transfer size */
+#if USE_DMA /* No s/g support for DMA */
+ NCR53c406a_dma_write(current_SC->request_buffer,
+ current_SC->request_bufflen);
+#endif USE_DMA
+ outb(TRANSFER_INFO | DMA_OP, CMD_REG);
+#if USE_PIO
+ if (!current_SC->use_sg) /* Don't use scatter-gather */
+ NCR53c406a_pio_write(current_SC->request_buffer,
+ current_SC->request_bufflen);
+ else { /* use scatter-gather */
+ sgcount = current_SC->use_sg;
+ sglist = current_SC->request_buffer;
+ while( sgcount-- ) {
+ NCR53c406a_pio_write(sglist->address, sglist->length);
+ sglist++;
+ }
+ }
+ REG0;
+#endif USE_PIO
+ }
+ break;
+
+ case 0x01: /* DATA-IN */
+ if(int_reg & 0x10){ /* Target requesting info transfer */
+ rtrc(6);
+ current_SC->SCp.phase = data_in;
+ VDEB(printk("NCR53c406a: Data-In phase\n"));
+ outb(FLUSH_FIFO, CMD_REG);
+ LOAD_DMA_COUNT(current_SC->request_bufflen); /* Max transfer size */
+#if USE_DMA /* No s/g support for DMA */
+ NCR53c406a_dma_read(current_SC->request_buffer,
+ current_SC->request_bufflen);
+#endif USE_DMA
+ outb(TRANSFER_INFO | DMA_OP, CMD_REG);
+#if USE_PIO
+ if (!current_SC->use_sg) /* Don't use scatter-gather */
+ NCR53c406a_pio_read(current_SC->request_buffer,
+ current_SC->request_bufflen);
+ else { /* Use scatter-gather */
+ sgcount = current_SC->use_sg;
+ sglist = current_SC->request_buffer;
+ while( sgcount-- ) {
+ NCR53c406a_pio_read(sglist->address, sglist->length);
+ sglist++;
+ }
+ }
+ REG0;
+#endif USE_PIO
+ }
+ break;
+
+ case 0x02: /* COMMAND */
+ current_SC->SCp.phase = command_ph;
+ printk("NCR53c406a: Warning: Unknown interrupt occurred in command phase!\n");
+ break;
+
+ case 0x03: /* STATUS */
+ rtrc(7);
+ current_SC->SCp.phase = status_ph;
+ VDEB(printk("NCR53c406a: Status phase\n"));
+ outb(FLUSH_FIFO, CMD_REG);
+ outb(INIT_CMD_COMPLETE, CMD_REG);
+ break;
+
+ case 0x04: /* Reserved */
+ case 0x05: /* Reserved */
+ printk("NCR53c406a: WARNING: Reserved phase!!!\n");
+ break;
+
+ case 0x06: /* MESSAGE-OUT */
+ DEB(printk("NCR53c406a: Message-Out phase\n"));
+ current_SC->SCp.phase = message_out;
+ outb(SET_ATN, CMD_REG); /* Reject the message */
+ outb(MSG_ACCEPT, CMD_REG);
+ break;
+
+ case 0x07: /* MESSAGE-IN */
+ rtrc(4);
+ VDEB(printk("NCR53c406a: Message-In phase\n"));
+ current_SC->SCp.phase = message_in;
+
+ current_SC->SCp.Status = inb(SCSI_FIFO);
+ current_SC->SCp.Message = inb(SCSI_FIFO);
+
+ VDEB(printk("SCSI FIFO size=%d\n", inb(FIFO_FLAGS) & 0x1f));
+ DEB(printk("Status = %02x Message = %02x\n",
+ current_SC->SCp.Status, current_SC->SCp.Message));
+
+ if(current_SC->SCp.Message == SAVE_POINTERS ||
+ current_SC->SCp.Message == DISCONNECT) {
+ outb(SET_ATN, CMD_REG); /* Reject message */
+ DEB(printk("Discarding SAVE_POINTERS message\n"));
+ }
+ outb(MSG_ACCEPT, CMD_REG);
+ break;
+ }
+}
+
+#ifndef IRQ_LEV
+static int irq_probe()
+{
+ int irqs, irq;
+ int i;
+
+ inb(INT_REG); /* clear the interrupt register */
+ sti();
+ irqs = probe_irq_on();
+
+ /* Invalid command will cause an interrupt */
+ REG0;
+ outb(0xff, CMD_REG);
+
+ /* Wait for the interrupt to occur */
+ i = jiffies + WATCHDOG;
+ while(i > jiffies && !(inb(STAT_REG) & 0x80))
+ barrier();
+ if (i <= jiffies) { /* Timed out, must be hardware trouble */
+ probe_irq_off(irqs);
+ return -1;
+ }
+
+ irq = probe_irq_off(irqs);
+
+ /* Kick the chip */
+ outb(CHIP_RESET, CMD_REG);
+ outb(SCSI_NOP, CMD_REG);
+ chip_init();
+
+ return irq;
+}
+#endif IRQ_LEV
+
+static void chip_init()
+{
+ REG1;
+#if USE_DMA
+ outb(0x00, PIO_STATUS);
+#else /* USE_PIO */
+ outb(0x01, PIO_STATUS);
+#endif
+ outb(0x00, PIO_FLAG);
+
+ outb(C4_IMG, CONFIG4); /* REG0; */
+ outb(C3_IMG, CONFIG3);
+ outb(C2_IMG, CONFIG2);
+ outb(C1_IMG, CONFIG1);
+
+ outb(0x05, CLKCONV); /* clock conversion factor */
+ outb(0x9C, SRTIMOUT); /* Selection timeout */
+ outb(0x05, SYNCPRD); /* Synchronous transfer period */
+ outb(SYNC_MODE, SYNCOFF); /* synchronous mode */
+}
+
+void calc_port_addr()
+{
+ /* Control Register Set 0 */
+ TC_LSB = (port_base+0x00);
+ TC_MSB = (port_base+0x01);
+ SCSI_FIFO = (port_base+0x02);
+ CMD_REG = (port_base+0x03);
+ STAT_REG = (port_base+0x04);
+ DEST_ID = (port_base+0x04);
+ INT_REG = (port_base+0x05);
+ SRTIMOUT = (port_base+0x05);
+ SEQ_REG = (port_base+0x06);
+ SYNCPRD = (port_base+0x06);
+ FIFO_FLAGS = (port_base+0x07);
+ SYNCOFF = (port_base+0x07);
+ CONFIG1 = (port_base+0x08);
+ CLKCONV = (port_base+0x09);
+ /* TESTREG = (port_base+0x0A); */
+ CONFIG2 = (port_base+0x0B);
+ CONFIG3 = (port_base+0x0C);
+ CONFIG4 = (port_base+0x0D);
+ TC_HIGH = (port_base+0x0E);
+ /* FIFO_BOTTOM = (port_base+0x0F); */
+
+ /* Control Register Set 1 */
+ /* JUMPER_SENSE = (port_base+0x00);*/
+ /* SRAM_PTR = (port_base+0x01);*/
+ /* SRAM_DATA = (port_base+0x02);*/
+ PIO_FIFO = (port_base+0x04);
+ /* PIO_FIFO1 = (port_base+0x05);*/
+ /* PIO_FIFO2 = (port_base+0x06);*/
+ /* PIO_FIFO3 = (port_base+0x07);*/
+ PIO_STATUS = (port_base+0x08);
+ /* ATA_CMD = (port_base+0x09);*/
+ /* ATA_ERR = (port_base+0x0A);*/
+ PIO_FLAG = (port_base+0x0B);
+ CONFIG5 = (port_base+0x0D);
+ /* SIGNATURE = (port_base+0x0E);*/
+ /* CONFIG6 = (port_base+0x0F);*/
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = NCR53c406a;
+
+#include "scsi_module.c"
+#endif
+
+/*
+ * Overrides for Emacs so that we get a uniform tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/NCR53c406a.h b/linux/src/drivers/scsi/NCR53c406a.h
new file mode 100644
index 0000000..88e45e5
--- /dev/null
+++ b/linux/src/drivers/scsi/NCR53c406a.h
@@ -0,0 +1,83 @@
+#ifndef _NCR53C406A_H
+#define _NCR53C406A_H
+
+/*
+ * NCR53c406a.h
+ *
+ * Copyright (C) 1994 Normunds Saumanis (normunds@rx.tech.swh.lv)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+/* NOTE: scatter-gather support only works in PIO mode.
+ * Use SG_NONE if DMA mode is enabled!
+ */
+#define NCR53c406a { \
+ NULL /* next */, \
+ NULL /* usage count */, \
+ &proc_scsi_NCR53c406a /* proc_dir */, \
+ NULL /* proc_info */, \
+ "NCR53c406a" /* name */, \
+ NCR53c406a_detect /* detect */, \
+ NULL /* release */, \
+ NCR53c406a_info /* info */, \
+ NCR53c406a_command /* command */, \
+ NCR53c406a_queue /* queuecommand */, \
+ NCR53c406a_abort /* abort */, \
+ NCR53c406a_reset /* reset */, \
+ NULL /* slave_attach */, \
+ NCR53c406a_biosparm /* biosparm */, \
+ 1 /* can_queue */, \
+ 7 /* SCSI ID of the chip */, \
+ 32 /*SG_ALL*/ /*SG_NONE*/, \
+ 1 /* commands per lun */, \
+ 0 /* number of boards in system */, \
+ 1 /* unchecked_isa_dma */, \
+ ENABLE_CLUSTERING \
+}
+
+extern struct proc_dir_entry proc_scsi_NCR53c406a;
+
+int NCR53c406a_detect(Scsi_Host_Template *);
+const char* NCR53c406a_info(struct Scsi_Host *);
+
+int NCR53c406a_command(Scsi_Cmnd *);
+int NCR53c406a_queue(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int NCR53c406a_abort(Scsi_Cmnd *);
+int NCR53c406a_reset(Scsi_Cmnd *, unsigned int);
+int NCR53c406a_biosparm(Disk *, kdev_t, int []);
+
+#endif /* _NCR53C406A_H */
+
+/*
+ * Overrides for Emacs so that we get a uniform tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
+
diff --git a/linux/src/drivers/scsi/advansys.c b/linux/src/drivers/scsi/advansys.c
new file mode 100644
index 0000000..7aea67c
--- /dev/null
+++ b/linux/src/drivers/scsi/advansys.c
@@ -0,0 +1,15554 @@
+/* $Id: advansys.c,v 1.1.4.1 2005/06/02 18:52:38 ams Exp $ */
+#define ASC_VERSION "3.1E" /* AdvanSys Driver Version */
+
+/*
+ * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
+ *
+ * Copyright (c) 1995-1998 Advanced System Products, Inc.
+ * All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that redistributions of source
+ * code retain the above copyright notice and this comment without
+ * modification.
+ *
+ * There is an AdvanSys Linux WWW page at:
+ * http://www.advansys.com/linux.html
+ *
+ * The latest version of the AdvanSys driver is available at:
+ * ftp://ftp.advansys.com/pub/linux/linux.tgz
+ *
+ * Please send questions, comments, bug reports to:
+ * bobf@advansys.com (Bob Frey)
+ */
+
+/*
+
+ Documentation for the AdvanSys Driver
+
+ A. Linux Kernel Testing
+ B. Adapters Supported by this Driver
+ C. Linux v1.2.X - Directions for Adding the AdvanSys Driver
+ D. Linux v1.3.1 - v1.3.57 - Directions for Adding the AdvanSys Driver
+ E. Linux v1.3.58 and Newer - Upgrading the AdvanSys Driver
+ F. Source Comments
+ G. Driver Compile Time Options and Debugging
+ H. Driver LILO Option
+ I. Release History
+ J. Known Problems or Issues
+ K. Credits
+ L. AdvanSys Contact Information
+
+ A. Linux Kernel Testing
+
+ This driver has been tested in the following Linux kernels: v1.2.13,
+ v1.3.57, v2.0.33, v2.1.77. These kernel versions are major releases
+ of Linux or the latest Linux kernel versions available when this version
+ of the driver was released. The driver should also work in earlier
+ versions of the Linux kernel. Beginning with v1.3.58 the AdvanSys driver
+ is included with all Linux kernels. Please refer to sections C, D, and
+ E for instructions on adding or upgrading the AdvanSys driver.
+
+ B. Adapters Supported by this Driver
+
+ AdvanSys (Advanced System Products, Inc.) manufactures the following
+ RISC-based, Bus-Mastering, Fast (10 Mhz) and Ultra (20 Mhz) Narrow
+ (8-bit transfer) SCSI Host Adapters for the ISA, EISA, VL, and PCI
+ buses and RISC-based, Bus-Mastering, Ultra (20 Mhz) Wide (16-bit
+ transfer) SCSI Host Adapters for the PCI bus.
+
+ The CDB counts below indicate the number of SCSI CDB (Command
+ Descriptor Block) requests that can be stored in the RISC chip
+ cache and board LRAM. A CDB is a single SCSI command. The driver
+ detect routine will display the number of CDBs available for each
+ adapter detected. The number of CDBs used by the driver can be
+ lowered in the BIOS by changing the 'Host Queue Size' adapter setting.
+
+ Connectivity Products:
+ ABP510/5150 - Bus-Master ISA (240 CDB) (Footnote 1)
+ ABP5140 - Bus-Master ISA PnP (16 CDB) (Footnote 1, 3)
+ ABP5142 - Bus-Master ISA PnP with floppy (16 CDB) (Footnote 4)
+ ABP920 - Bus-Master PCI (16 CDB)
+ ABP930 - Bus-Master PCI (16 CDB) (Footnote 5)
+ ABP930U - Bus-Master PCI Ultra (16 CDB)
+ ABP930UA - Bus-Master PCI Ultra (16 CDB)
+ ABP960 - Bus-Master PCI MAC/PC (16 CDB) (Footnote 2)
+ ABP960U - Bus-Master PCI MAC/PC Ultra (16 CDB) (Footnote 2)
+
+ Single Channel Products:
+ ABP542 - Bus-Master ISA with floppy (240 CDB)
+ ABP742 - Bus-Master EISA (240 CDB)
+ ABP842 - Bus-Master VL (240 CDB)
+ ABP940 - Bus-Master PCI (240 CDB)
+ ABP940U - Bus-Master PCI Ultra (240 CDB)
+ ABP970 - Bus-Master PCI MAC/PC (240 CDB)
+ ABP970U - Bus-Master PCI MAC/PC Ultra (240 CDB)
+ ABP940UW - Bus-Master PCI Ultra-Wide (240 CDB)
+
+ Multi Channel Products:
+ ABP752 - Dual Channel Bus-Master EISA (240 CDB Per Channel)
+ ABP852 - Dual Channel Bus-Master VL (240 CDB Per Channel)
+ ABP950 - Dual Channel Bus-Master PCI (240 CDB Per Channel)
+ ABP980 - Four Channel Bus-Master PCI (240 CDB Per Channel)
+ ABP980U - Four Channel Bus-Master PCI Ultra (240 CDB Per Channel)
+
+ Footnotes:
+ 1. This board has been shipped by HP with the 4020i CD-R drive.
+ The board has no BIOS so it cannot control a boot device, but
+ it can control any secondary SCSI device.
+ 2. This board has been sold by Iomega as a Jaz Jet PCI adapter.
+ 3. This board has been sold by SIIG as the i540 SpeedMaster.
+ 4. This board has been sold by SIIG as the i542 SpeedMaster.
+ 5. This board has been sold by SIIG as the Fast SCSI Pro PCI.
+
+ C. Linux v1.2.X - Directions for Adding the AdvanSys Driver
+
+ These directions apply to v1.2.13. For versions that follow v1.2.13.
+ but precede v1.3.57 some of the changes for Linux v1.3.X listed
+ below may need to be modified or included. A patch is available
+ for v1.2.13 from the AdvanSys WWW and FTP sites.
+
+ There are two source files: advansys.h and advansys.c. Copy
+ both of these files to the directory /usr/src/linux/drivers/scsi.
+
+ 1. Add the following line to /usr/src/linux/arch/i386/config.in
+ after "comment 'SCSI low-level drivers'":
+
+ bool 'AdvanSys SCSI support' CONFIG_SCSI_ADVANSYS y
+
+ 2. Add the following lines to /usr/src/linux/drivers/scsi/hosts.c
+ after "#include "hosts.h"":
+
+ #ifdef CONFIG_SCSI_ADVANSYS
+ #include "advansys.h"
+ #endif
+
+ and after "static Scsi_Host_Template builtin_scsi_hosts[] =":
+
+ #ifdef CONFIG_SCSI_ADVANSYS
+ ADVANSYS,
+ #endif
+
+ 3. Add the following lines to /usr/src/linux/drivers/scsi/Makefile:
+
+ ifdef CONFIG_SCSI_ADVANSYS
+ SCSI_SRCS := $(SCSI_SRCS) advansys.c
+ SCSI_OBJS := $(SCSI_OBJS) advansys.o
+ else
+ SCSI_MODULE_OBJS := $(SCSI_MODULE_OBJS) advansys.o
+ endif
+
+ 4. (Optional) If you would like to enable the LILO command line
+ and /etc/lilo.conf 'advansys' option, make the following changes.
+ This option can be used to disable I/O port scanning or to limit
+ I/O port scanning to specific addresses. Refer to the 'Driver
+ LILO Option' section below. Add the following lines to
+ /usr/src/linux/init/main.c in the prototype section:
+
+ extern void advansys_setup(char *str, int *ints);
+
+ and add the following lines to the bootsetups[] array.
+
+ #ifdef CONFIG_SCSI_ADVANSYS
+ { "advansys=", advansys_setup },
+ #endif
+
+ 5. If you have the HP 4020i CD-R driver and Linux v1.2.X you should
+ add a fix to the CD-ROM target driver. This fix will allow
+ you to mount CDs with the iso9660 file system. Linux v1.3.X
+ already has this fix. In the file /usr/src/linux/drivers/scsi/sr.c
+ and function get_sectorsize() after the line:
+
+ if(scsi_CDs[i].sector_size == 0) scsi_CDs[i].sector_size = 2048;
+
+ add the following line:
+
+ if(scsi_CDs[i].sector_size == 2340) scsi_CDs[i].sector_size = 2048;
+
+ 6. In the directory /usr/src/linux run 'make config' to configure
+ the AdvanSys driver, then run 'make vmlinux' or 'make zlilo' to
+ make the kernel. If the AdvanSys driver is not configured, then
+ a loadable module can be built by running 'make modules' and
+ 'make modules_install'. Use 'insmod' and 'rmmod' to install
+ and remove advansys.o.
+
+ D. Linux v1.3.1 - v1.3.57 - Directions for Adding the AdvanSys Driver
+
+ These directions apply to v1.3.57. For versions that precede v1.3.57
+ some of these changes may need to be modified or eliminated. A patch
+ is available for v1.3.57 from the AdvanSys WWW and FTP sites.
+ Beginning with v1.3.58 this driver is included with the Linux
+ distribution eliminating the need for making any changes.
+
+ There are two source files: advansys.h and advansys.c. Copy
+ both of these files to the directory /usr/src/linux/drivers/scsi.
+
+ 1. Add the following line to /usr/src/linux/drivers/scsi/Config.in
+ after "comment 'SCSI low-level drivers'":
+
+ dep_tristate 'AdvanSys SCSI support' CONFIG_SCSI_ADVANSYS $CONFIG_SCSI
+
+ 2. Add the following lines to /usr/src/linux/drivers/scsi/hosts.c
+ after "#include "hosts.h"":
+
+ #ifdef CONFIG_SCSI_ADVANSYS
+ #include "advansys.h"
+ #endif
+
+ and after "static Scsi_Host_Template builtin_scsi_hosts[] =":
+
+ #ifdef CONFIG_SCSI_ADVANSYS
+ ADVANSYS,
+ #endif
+
+ 3. Add the following lines to /usr/src/linux/drivers/scsi/Makefile:
+
+ ifeq ($(CONFIG_SCSI_ADVANSYS),y)
+ L_OBJS += advansys.o
+ else
+ ifeq ($(CONFIG_SCSI_ADVANSYS),m)
+ M_OBJS += advansys.o
+ endif
+ endif
+
+ 4. Add the following line to /usr/src/linux/include/linux/proc_fs.h
+ in the enum scsi_directory_inos array:
+
+ PROC_SCSI_ADVANSYS,
+
+ 5. (Optional) If you would like to enable the LILO command line
+ and /etc/lilo.conf 'advansys' option, make the following changes.
+ This option can be used to disable I/O port scanning or to limit
+ I/O port scanning to specific addresses. Refer to the 'Driver
+ LILO Option' section below. Add the following lines to
+ /usr/src/linux/init/main.c in the prototype section:
+
+ extern void advansys_setup(char *str, int *ints);
+
+ and add the following lines to the bootsetups[] array.
+
+ #ifdef CONFIG_SCSI_ADVANSYS
+ { "advansys=", advansys_setup },
+ #endif
+
+ 6. In the directory /usr/src/linux run 'make config' to configure
+ the AdvanSys driver, then run 'make vmlinux' or 'make zlilo' to
+ make the kernel. If the AdvanSys driver is not configured, then
+ a loadable module can be built by running 'make modules' and
+ 'make modules_install'. Use 'insmod' and 'rmmod' to install
+ and remove advansys.o.
+
+ E. Linux v1.3.58 and Newer - Upgrading the AdvanSys Driver
+
+ To upgrade the AdvanSys driver in a Linux v1.3.58 and newer
+ kernel, first check the version of the current driver. The
+ version is defined by the manifest constant ASC_VERSION at
+ the beginning of advansys.c. The new driver should have a
+ ASC_VERSION value greater than the current version. To install
+ the new driver rename advansys.c and advansys.h in the Linux
+ kernel source tree drivers/scsi directory to different names
+ or save them to a different directory in case you want to revert
+ to the old version of the driver. After the old driver is saved
+ copy the new advansys.c and advansys.h to drivers/scsi, rebuild
+ the kernel, and install the new kernel. No other changes are needed.
+
+ F. Source Comments
+
+ 1. Use tab stops set to 4 for the source files. For vi use 'se tabstops=4'.
+
+ 2. This driver should be maintained in multiple files. But to make
+ it easier to include with Linux and to follow Linux conventions,
+ the whole driver is maintained in the source files advansys.h and
+ advansys.c. In this file logical sections of the driver begin with
+ a comment that contains '---'. The following are the logical sections
+ of the driver below.
+
+ --- Linux Version
+ --- Linux Include Files
+ --- Driver Options
+ --- Debugging Header
+ --- Asc Library Constants and Macros
+ --- Adv Library Constants and Macros
+ --- Driver Constants and Macros
+ --- Driver Structures
+ --- Driver Data
+ --- Driver Function Prototypes
+ --- Linux 'Scsi_Host_Template' and advansys_setup() Functions
+ --- Loadable Driver Support
+ --- Miscellaneous Driver Functions
+ --- Functions Required by the Asc Library
+ --- Functions Required by the Adv Library
+ --- Tracing and Debugging Functions
+ --- Asc Library Functions
+ --- Adv Library Functions
+
+ 3. The string 'XXX' is used to flag code that needs to be re-written
+ or that contains a problem that needs to be addressed.
+
+ 4. I have stripped comments from and reformatted the source for the
+ Asc Library and Adv Library to reduce the size of this file. This
+ source can be found under the following headings. The Asc Library
+ is used to support Narrow Boards. The Adv Library is used to
+ support Wide Boards.
+
+ --- Asc Library Constants and Macros
+ --- Adv Library Constants and Macros
+ --- Asc Library Functions
+ --- Adv Library Functions
+
+ G. Driver Compile Time Options and Debugging
+
+ In this source file the following constants can be defined. They are
+ defined in the source below. Both of these options are enabled by
+ default.
+
+ 1. ADVANSYS_ASSERT - Enable driver assertions (Def: Enabled)
+
+ Enabling this option adds assertion logic statements to the
+ driver. If an assertion fails a message will be displayed to
+ the console, but the system will continue to operate. Any
+ assertions encountered should be reported to the person
+ responsible for the driver. Assertion statements may proactively
+ detect problems with the driver and facilitate fixing these
+ problems. Enabling assertions will add a small overhead to the
+ execution of the driver.
+
+ 2. ADVANSYS_DEBUG - Enable driver debugging (Def: Disabled)
+
+ Enabling this option adds tracing functions to the driver and
+ the ability to set a driver tracing level at boot time. This
+ option will also export symbols not required outside the driver to
+ the kernel name space. This option is very useful for debugging
+ the driver, but it will add to the size of the driver execution
+ image and add overhead to the execution of the driver.
+
+ The amount of debugging output can be controlled with the global
+ variable 'asc_dbglvl'. The higher the number the more output. By
+ default the debug level is 0.
+
+ If the driver is loaded at boot time and the LILO Driver Option
+ is included in the system, the debug level can be changed by
+ specifying a 5th (ASC_NUM_IOPORT_PROBE + 1) I/O Port. The
+ first three hex digits of the pseudo I/O Port must be set to
+ 'deb' and the fourth hex digit specifies the debug level: 0 - F.
+ The following command line will look for an adapter at 0x330
+ and set the debug level to 2.
+
+ linux advansys=0x330,0,0,0,0xdeb2
+
+ If the driver is built as a loadable module this variable can be
+ defined when the driver is loaded. The following insmod command
+ will set the debug level to one.
+
+ insmod advansys.o asc_dbglvl=1
+
+ Debugging Message Levels:
+ 0: Errors Only
+ 1: High-Level Tracing
+ 2-N: Verbose Tracing
+
+ I don't know the approved way for turning on printk()s to the
+ console. Here's a program I use to do this. Debug output is
+ logged in /var/adm/messages.
+
+ main()
+ {
+ syscall(103, 7, 0, 0);
+ }
+
+ I found that increasing LOG_BUF_LEN to 40960 in kernel/printk.c
+ prevents most level 1 debug messages from being lost.
+
+ 3. ADVANSYS_STATS - Enable statistics (Def: Enabled >= v1.3.0)
+
+ Enabling this option adds statistics collection and display
+ through /proc to the driver. The information is useful for
+ monitoring driver and device performance. It will add to the
+ size of the driver execution image and add minor overhead to
+ the execution of the driver.
+
+ Statistics are maintained on a per adapter basis. Driver entry
+ point call counts and transfer size counts are maintained.
+ Statistics are only available for kernels greater than or equal
+ to v1.3.0 with the CONFIG_PROC_FS (/proc) file system configured.
+
+ AdvanSys SCSI adapter files have the following path name format:
+
+ /proc/scsi/advansys/[0-(ASC_NUM_BOARD_SUPPORTED-1)]
+
+ This information can be displayed with cat. For example:
+
+ cat /proc/scsi/advansys/0
+
+ When ADVANSYS_STATS is not defined the AdvanSys /proc files only
+ contain adapter and device configuration information.
+
+ H. Driver LILO Option
+
+ If init/main.c is modified as described in the 'Directions for Adding
+ the AdvanSys Driver to Linux' section (B.4.) above, the driver will
+ recognize the 'advansys' LILO command line and /etc/lilo.conf option.
+ This option can be used to either disable I/O port scanning or to limit
+ scanning to 1 - 4 I/O ports. Regardless of the option setting EISA and
+ PCI boards will still be searched for and detected. This option only
+ affects searching for ISA and VL boards.
+
+ Examples:
+ 1. Eliminate I/O port scanning:
+ boot: linux advansys=
+ or
+ boot: linux advansys=0x0
+ 2. Limit I/O port scanning to one I/O port:
+ boot: linux advansys=0x110
+ 3. Limit I/O port scanning to four I/O ports:
+ boot: linux advansys=0x110,0x210,0x230,0x330
+
+ For a loadable module the same effect can be achieved by setting
+ the 'asc_iopflag' variable and 'asc_ioport' array when loading
+ the driver, e.g.
+
+ insmod advansys.o asc_iopflag=1 asc_ioport=0x110,0x330
+
+ If ADVANSYS_DEBUG is defined a 5th (ASC_NUM_IOPORT_PROBE + 1)
+ I/O Port may be added to specify the driver debug level. Refer to
+ the 'Driver Compile Time Options and Debugging' section above for
+ more information.
+
+ I. Release History
+
+ BETA-1.0 (12/23/95):
+ First Release
+
+ BETA-1.1 (12/28/95):
+ 1. Prevent advansys_detect() from being called twice.
+ 2. Add LILO 0xdeb[0-f] option to set 'asc_dbglvl'.
+
+ 1.2 (1/12/96):
+ 1. Prevent re-entrancy in the interrupt handler which
+ resulted in the driver hanging Linux.
+ 2. Fix problem that prevented ABP-940 cards from being
+ recognized on some PCI motherboards.
+ 3. Add support for the ABP-5140 PnP ISA card.
+ 4. Fix check condition return status.
+ 5. Add conditionally compiled code for Linux v1.3.X.
+
+ 1.3 (2/23/96):
+ 1. Fix problem in advansys_biosparam() that resulted in the
+ wrong drive geometry being returned for drives > 1GB with
+ extended translation enabled.
+ 2. Add additional tracing during device initialization.
+ 3. Change code that only applies to ISA PnP adapter.
+ 4. Eliminate 'make dep' warning.
+ 5. Try to fix problem with handling resets by increasing their
+ timeout value.
+
+ 1.4 (5/8/96):
+ 1. Change definitions to eliminate conflicts with other subsystems.
+ 2. Add versioning code for the shared interrupt changes.
+ 3. Eliminate problem in asc_rmqueue() with iterating after removing
+ a request.
+ 4. Remove reset request loop problem from the "Known Problems or
+ Issues" section. This problem was isolated and fixed in the
+ mid-level SCSI driver.
+
+ 1.5 (8/8/96):
+ 1. Add support for ABP-940U (PCI Ultra) adapter.
+ 2. Add support for IRQ sharing by setting the SA_SHIRQ flag for
+ request_irq and supplying a dev_id pointer to both request_irq()
+ and free_irq().
+ 3. In AscSearchIOPortAddr11() restore a call to check_region() which
+ should be used before I/O port probing.
+ 4. Fix bug in asc_prt_hex() which resulted in the displaying
+ the wrong data.
+ 5. Incorporate miscellaneous Asc Library bug fixes and new microcode.
+ 6. Change driver versioning to be specific to each Linux sub-level.
+ 7. Change statistics gathering to be per adapter instead of global
+ to the driver.
+ 8. Add more information and statistics to the adapter /proc file:
+ /proc/scsi/advansys[0...].
+ 9. Remove 'cmd_per_lun' from the "Known Problems or Issues" list.
+ This problem has been addressed with the SCSI mid-level changes
+ made in v1.3.89. The advansys_select_queue_depths() function
+ was added for the v1.3.89 changes.
+
+ 1.6 (9/10/96):
+ 1. Incorporate miscellaneous Asc Library bug fixes and new microcode.
+
+ 1.7 (9/25/96):
+ 1. Enable clustering and optimize the setting of the maximum number
+ of scatter gather elements for any particular board. Clustering
+ increases CPU utilization, but results in a relatively larger
+ increase in I/O throughput.
+ 2. Improve the performance of the request queuing functions by
+ adding a last pointer to the queue structure.
+ 3. Correct problems with reset and abort request handling that
+ could have hung or crashed Linux.
+ 4. Add more information to the adapter /proc file:
+ /proc/scsi/advansys[0...].
+ 5. Remove the request timeout issue form the driver issues list.
+ 6. Miscellaneous documentation additions and changes.
+
+ 1.8 (10/4/96):
+ 1. Make changes to handle the new v2.1.0 kernel memory mapping
+ in which a kernel virtual address may not be equivalent to its
+ bus or DMA memory address.
+ 2. Change abort and reset request handling to make it yet even
+ more robust.
+ 3. Try to mitigate request starvation by sending ordered requests
+ to heavily loaded, tag queuing enabled devices.
+ 4. Maintain statistics on request response time.
+ 5. Add request response time statistics and other information to
+ the adapter /proc file: /proc/scsi/advansys[0...].
+
+ 1.9 (10/21/96):
+ 1. Add conditionally compiled code (ASC_QUEUE_FLOW_CONTROL) to
+ make use of mid-level SCSI driver device queue depth flow
+ control mechanism. This will eliminate aborts caused by a
+ device being unable to keep up with requests and eliminate
+ repeat busy or QUEUE FULL status returned by a device.
+ 2. Incorporate miscellaneous Asc Library bug fixes.
+ 3. To allow the driver to work in kernels with broken module
+ support set 'cmd_per_lun' if the driver is compiled as a
+ module. This change affects kernels v1.3.89 to present.
+ 4. Remove PCI BIOS address from the driver banner. The PCI BIOS
+ is relocated by the motherboard BIOS and its new address can
+ not be determined by the driver.
+ 5. Add mid-level SCSI queue depth information to the adapter
+ /proc file: /proc/scsi/advansys[0...].
+
+ 2.0 (11/14/96):
+ 1. Change allocation of global structures used for device
+ initialization to guarantee they are in DMA-able memory.
+ Previously when the driver was loaded as a module these
+ structures might not have been in DMA-able memory, causing
+ device initialization to fail.
+
+ 2.1 (12/30/96):
+ 1. In advansys_reset(), if the request is a synchronous reset
+ request, even if the request serial number has changed, then
+ complete the request.
+ 2. Add Asc Library bug fixes including new microcode.
+ 3. Clear inquiry buffer before using it.
+ 4. Correct ifdef typo.
+
+ 2.2 (1/15/97):
+ 1. Add Asc Library bug fixes including new microcode.
+ 2. Add synchronous data transfer rate information to the
+ adapter /proc file: /proc/scsi/advansys[0...].
+ 3. Change ADVANSYS_DEBUG to be disabled by default. This
+ will reduce the size of the driver image, eliminate execution
+ overhead, and remove unneeded symbols from the kernel symbol
+ space that were previously added by the driver.
+ 4. Add new compile-time option ADVANSYS_ASSERT for assertion
+ code that used to be defined within ADVANSYS_DEBUG. This
+ option is enabled by default.
+
+ 2.8 (5/26/97):
+ 1. Change version number to 2.8 to synchronize the Linux driver
+ version numbering with other AdvanSys drivers.
+ 2. Reformat source files without tabs to present the same view
+ of the file to everyone regardless of the editor tab setting
+ being used.
+ 3. Add Asc Library bug fixes.
+
+ 3.1A (1/8/98):
+ 1. Change version number to 3.1 to indicate that support for
+ Ultra-Wide adapters (ABP-940UW) is included in this release.
+ 2. Add Asc Library (Narrow Board) bug fixes.
+ 3. Report an underrun condition with the host status byte set
+ to DID_UNDERRUN. Currently DID_UNDERRUN is defined to 0 which
+ causes the underrun condition to be ignored. When Linux defines
+ its own DID_UNDERRUN the constant defined in this file can be
+ removed.
+ 4. Add patch to AscWaitTixISRDone().
+ 5. Add support for up to 16 different AdvanSys host adapter SCSI
+ channels in one system. This allows four cards with four channels
+ to be used in one system.
+
+ 3.1B (1/9/98):
+ 1. Handle that PCI register base addresses are not always page
+ aligned even though ioremap() requires that the address argument
+ be page aligned.
+
+ 3.1C (1/10/98):
+ 1. Update latest BIOS version checked for from the /proc file.
+ 2. Don't set microcode SDTR variable at initialization. Instead
+ wait until device capabilities have been detected from an Inquiry
+ command.
+
+ 3.1D (1/21/98):
+ 1. Improve performance when the driver is compiled as module by
+ allowing up to 64 scatter-gather elements instead of 8.
+
+ 3.1E (5/1/98):
+ 1. Set time delay in AscWaitTixISRDone() to 1000 ms.
+ 2. Include SMP locking changes.
+ 3. For v2.1.93 and newer kernels use CONFIG_PCI and new PCI BIOS
+ access functions.
+ 4. Update board serial number printing.
+ 5. Try allocating an IRQ both with and without the SA_INTERRUPT
+ flag set to allow IRQ sharing with drivers that do not set
+ the SA_INTERRUPT flag. Also display a more descriptive error
+ message if request_irq() fails.
+ 5. Update to latest Asc and Adv Libraries.
+
+ J. Known Problems or Issues
+
+ 1. Remove conditional constants (ASC_QUEUE_FLOW_CONTROL) around
+ the queue depth flow control code when mid-level SCSI changes
+ are included in Linux.
+
+ K. Credits
+
+ Nathan Hartwell <mage@cdc3.cdc.net> provided the directions and
+ basis for the Linux v1.3.X changes which were included in the
+ 1.2 release.
+
+ Thomas E Zerucha <zerucha@shell.portal.com> pointed out a bug
+ in advansys_biosparam() which was fixed in the 1.3 release.
+
+ Erik Ratcliffe <erik@caldera.com> has done testing of the
+ AdvanSys driver in the Caldera releases.
+
+ Rik van Riel <H.H.vanRiel@fys.ruu.nl> provided a patch to
+ AscWaitTixISRDone() which he found necessary to make the
+ driver work with a SCSI-1 disk.
+
+ Mark Moran <mmoran@mmoran.com> has helped test Ultra-Wide
+ support in the 3.1A driver.
+
+ L. AdvanSys Contact Information
+
+ Mail: Advanced System Products, Inc.
+ 1150 Ringwood Court
+ San Jose, CA 95131
+ Operator: 1-408-383-9400
+ FAX: 1-408-383-9612
+ Tech Support: 1-800-525-7440/1-408-467-2930
+ BBS: 1-408-383-9540 (14400,N,8,1)
+ Interactive FAX: 1-408-383-9753
+ Customer Direct Sales: 1-800-525-7443/1-408-383-5777
+ Tech Support E-Mail: support@advansys.com
+ FTP Site: ftp.advansys.com (login: anonymous)
+ Web Site: http://www.advansys.com
+
+*/
+
+
+/*
+ * --- Linux Version
+ */
+
+/* Convert Linux Version, Patch-level, Sub-level to LINUX_VERSION_CODE. */
+#define ASC_LINUX_VERSION(V, P, S) (((V) * 65536) + ((P) * 256) + (S))
+
+#ifndef LINUX_VERSION_CODE
+#include <linux/version.h>
+#endif /* LINUX_VERSION_CODE */
+
+
+/*
+ * --- Linux Include Files
+ */
+
+#include <linux/config.h>
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+#ifdef MODULE
+#include <linux/module.h>
+#endif /* MODULE */
+#endif /* version >= v1.3.0 */
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/head.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/malloc.h>
+#include <linux/mm.h>
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+#include <linux/proc_fs.h>
+#endif /* version >= v1.3.0 */
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(2,1,23)
+#include <linux/init.h>
+#endif /* version >= v2.1.23 */
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/dma.h>
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,0)
+#include "../block/blk.h"
+#else /* version >= v1.3.0 */
+#include <linux/blk.h>
+#include <linux/stat.h>
+#endif /* version >= v1.3.0 */
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(2,1,95)
+#include <asm/spinlock.h>
+#endif /* version >= 2.1.95 */
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+#include "advansys.h"
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(2,1,93)
+#ifdef CONFIG_PCI
+#include <linux/pci.h>
+#endif /* CONFIG_PCI */
+#else /* version < v2.1.93 */
+/*
+ * For earlier than v2.1.93 the driver has its own PCI configuration.
+ * If PCI is not needed in a kernel before v2.1.93 this define can be
+ * turned-off to make the driver object smaller.
+ */
+#define ASC_CONFIG_PCI
+#endif /* version < v2.1.93 */
+
+/*
+ * If Linux eventually defines a DID_UNDERRUN, the constant here can be
+ * removed. The current value of zero for DID_UNDERRUN results in underrun
+ * conditions being ignored.
+ */
+#define DID_UNDERRUN 0
+
+
+/*
+ * --- Driver Options
+ */
+
+/* Enable driver assertions. */
+#define ADVANSYS_ASSERT
+
+/* Enable driver tracing. */
+/* #define ADVANSYS_DEBUG */
+
+/*
+ * Because of no /proc to display them, statistics are disabled
+ * for versions prior to v1.3.0.
+ */
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,0)
+#undef ADVANSYS_STATS /* Disable statistics */
+#else /* version >= v1.3.0 */
+#define ADVANSYS_STATS /* Enable statistics. */
+#endif /* version >= v1.3.0 */
+
+
+/*
+ * --- Debugging Header
+ */
+
+#ifdef ADVANSYS_DEBUG
+#define STATIC
+#else /* ADVANSYS_DEBUG */
+#define STATIC static
+#endif /* ADVANSYS_DEBUG */
+
+
+/*
+ * --- Asc Library Constants and Macros
+ */
+
+#define ASC_LIB_VERSION_MAJOR 1
+#define ASC_LIB_VERSION_MINOR 22
+#define ASC_LIB_SERIAL_NUMBER 113
+
+typedef unsigned char uchar;
+
+#ifndef NULL
+#define NULL (0)
+#endif
+#ifndef TRUE
+#define TRUE (1)
+#endif
+#ifndef FALSE
+#define FALSE (0)
+#endif
+#define REG register
+#define rchar REG __s8
+#define rshort REG __s16
+#define rint REG __s32
+#define rlong REG __s32
+#define ruchar REG __u8
+#define rushort REG __u16
+#define ruint REG __u32
+#define rulong REG __u32
+#define NULLPTR (void *)0
+#define FNULLPTR (void *)0UL
+#define EOF (-1)
+#define EOS '\0'
+#define ERR (-1)
+#define UB_ERR (uchar)(0xFF)
+#define UW_ERR (uint)(0xFFFF)
+#define UL_ERR (ulong)(0xFFFFFFFFUL)
+#define iseven_word(val) ((((uint)val) & (uint)0x0001) == 0)
+#define isodd_word(val) ((((uint)val) & (uint)0x0001) != 0)
+#define toeven_word(val) (((uint)val) & (uint)0xFFFE)
+#define biton(val, bits) (((uint)(val >> bits) & (uint)0x0001) != 0)
+#define bitoff(val, bits) (((uint)(val >> bits) & (uint)0x0001) == 0)
+#define lbiton(val, bits) (((ulong)(val >> bits) & (ulong)0x00000001UL) != 0)
+#define lbitoff(val, bits) (((ulong)(val >> bits) & (ulong)0x00000001UL) == 0)
+#define absh(val) ((val) < 0 ? -(val) : (val))
+#define swapbyte(ch) ((((ch) << 4) | ((ch) >> 4)))
+#ifndef GBYTE
+#define GBYTE (0x40000000UL)
+#endif
+#ifndef MBYTE
+#define MBYTE (0x100000UL)
+#endif
+#ifndef KBYTE
+#define KBYTE (0x400)
+#endif
+#define HI_BYTE(x) (*((__u8 *)(&x)+1))
+#define LO_BYTE(x) (*((__u8 *)&x))
+#define HI_WORD(x) (*((__u16 *)(&x)+1))
+#define LO_WORD(x) (*((__u16 *)&x))
+#ifndef MAKEWORD
+#define MAKEWORD(lo, hi) ((__u16) (((__u16) lo) | ((__u16) hi << 8)))
+#endif
+#ifndef MAKELONG
+#define MAKELONG(lo, hi) ((__u32) (((__u32) lo) | ((__u32) hi << 16)))
+#endif
+#define SwapWords(dWord) ((__u32) ((dWord >> 16) | (dWord << 16)))
+#define SwapBytes(word) ((__u16) ((word >> 8) | (word << 8)))
+#define BigToLittle(dWord) ((__u32) (SwapWords(MAKELONG(SwapBytes(LO_WORD(dWord)), SwapBytes(HI_WORD(dWord))))))
+#define LittleToBig(dWord) BigToLittle(dWord)
+#define AscPCIConfigVendorIDRegister 0x0000
+#define AscPCIConfigDeviceIDRegister 0x0002
+#define AscPCIConfigCommandRegister 0x0004
+#define AscPCIConfigStatusRegister 0x0006
+#define AscPCIConfigRevisionIDRegister 0x0008
+#define AscPCIConfigCacheSize 0x000C
+#define AscPCIConfigLatencyTimer 0x000D
+#define AscPCIIOBaseRegister 0x0010
+#define AscPCICmdRegBits_IOMemBusMaster 0x0007
+#define ASC_PCI_ID2BUS(id) ((id) & 0xFF)
+#define ASC_PCI_ID2DEV(id) (((id) >> 11) & 0x1F)
+#define ASC_PCI_ID2FUNC(id) (((id) >> 8) & 0x7)
+#define ASC_PCI_MKID(bus, dev, func) ((((dev) & 0x1F) << 11) | (((func) & 0x7) << 8) | ((bus) & 0xFF))
+#define ASC_PCI_VENDORID 0x10CD
+#define ASC_PCI_DEVICEID_1200A 0x1100
+#define ASC_PCI_DEVICEID_1200B 0x1200
+#define ASC_PCI_DEVICEID_ULTRA 0x1300
+#define ASC_PCI_REVISION_3150 0x02
+#define ASC_PCI_REVISION_3050 0x03
+
+#define ASC_DVCLIB_CALL_DONE (1)
+#define ASC_DVCLIB_CALL_FAILED (0)
+#define ASC_DVCLIB_CALL_ERROR (-1)
+
+#define PortAddr unsigned short /* port address size */
+#define Ptr2Func ulong
+#define inp(port) inb(port)
+#define inpw(port) inw(port)
+#define inpl(port) inl(port)
+#define outp(port, byte) outb((byte), (port))
+#define outpw(port, word) outw((word), (port))
+#define outpl(port, long) outl((long), (port))
+#define ASC_MAX_SG_QUEUE 7
+#define ASC_MAX_SG_LIST SG_ALL
+
+#define ASC_CS_TYPE unsigned short
+#ifndef asc_ptr_type
+#define asc_ptr_type
+#endif
+
+#ifndef ASC_GET_PTR2FUNC
+#define ASC_GET_PTR2FUNC(fun) (Ptr2Func)(fun)
+#endif
+#define FLIP_BYTE_NIBBLE(x) (((x<<4)& 0xFF) | (x>>4))
+#define ASC_IS_ISA (0x0001)
+#define ASC_IS_ISAPNP (0x0081)
+#define ASC_IS_EISA (0x0002)
+#define ASC_IS_PCI (0x0004)
+#define ASC_IS_PCI_ULTRA (0x0104)
+#define ASC_IS_PCMCIA (0x0008)
+#define ASC_IS_MCA (0x0020)
+#define ASC_IS_VL (0x0040)
+#define ASC_ISA_PNP_PORT_ADDR (0x279)
+#define ASC_ISA_PNP_PORT_WRITE (ASC_ISA_PNP_PORT_ADDR+0x800)
+#define ASC_IS_WIDESCSI_16 (0x0100)
+#define ASC_IS_WIDESCSI_32 (0x0200)
+#define ASC_IS_BIG_ENDIAN (0x8000)
+#define ASC_CHIP_MIN_VER_VL (0x01)
+#define ASC_CHIP_MAX_VER_VL (0x07)
+#define ASC_CHIP_MIN_VER_PCI (0x09)
+#define ASC_CHIP_MAX_VER_PCI (0x0F)
+#define ASC_CHIP_VER_PCI_BIT (0x08)
+#define ASC_CHIP_MIN_VER_ISA (0x11)
+#define ASC_CHIP_MIN_VER_ISA_PNP (0x21)
+#define ASC_CHIP_MAX_VER_ISA (0x27)
+#define ASC_CHIP_VER_ISA_BIT (0x30)
+#define ASC_CHIP_VER_ISAPNP_BIT (0x20)
+#define ASC_CHIP_VER_ASYN_BUG (0x21)
+#define ASC_CHIP_VER_PCI 0x08
+#define ASC_CHIP_VER_PCI_ULTRA_3150 (ASC_CHIP_VER_PCI | 0x02)
+#define ASC_CHIP_VER_PCI_ULTRA_3050 (ASC_CHIP_VER_PCI | 0x03)
+#define ASC_CHIP_MIN_VER_EISA (0x41)
+#define ASC_CHIP_MAX_VER_EISA (0x47)
+#define ASC_CHIP_VER_EISA_BIT (0x40)
+#define ASC_CHIP_LATEST_VER_EISA ((ASC_CHIP_MIN_VER_EISA - 1) + 3)
+#define ASC_MAX_LIB_SUPPORTED_ISA_CHIP_VER 0x21
+#define ASC_MAX_LIB_SUPPORTED_PCI_CHIP_VER 0x0A
+#define ASC_MAX_VL_DMA_ADDR (0x07FFFFFFL)
+#define ASC_MAX_VL_DMA_COUNT (0x07FFFFFFL)
+#define ASC_MAX_PCI_DMA_ADDR (0xFFFFFFFFL)
+#define ASC_MAX_PCI_DMA_COUNT (0xFFFFFFFFL)
+#define ASC_MAX_ISA_DMA_ADDR (0x00FFFFFFL)
+#define ASC_MAX_ISA_DMA_COUNT (0x00FFFFFFL)
+#define ASC_MAX_EISA_DMA_ADDR (0x07FFFFFFL)
+#define ASC_MAX_EISA_DMA_COUNT (0x07FFFFFFL)
+#ifndef inpw_noswap
+#define inpw_noswap(port) inpw(port)
+#endif
+#ifndef outpw_noswap
+#define outpw_noswap(port, data) outpw(port, data)
+#endif
+#define ASC_SCSI_ID_BITS 3
+#define ASC_SCSI_TIX_TYPE uchar
+#define ASC_ALL_DEVICE_BIT_SET 0xFF
+#ifdef ASC_WIDESCSI_16
+#undef ASC_SCSI_ID_BITS
+#define ASC_SCSI_ID_BITS 4
+#define ASC_ALL_DEVICE_BIT_SET 0xFFFF
+#endif
+#ifdef ASC_WIDESCSI_32
+#undef ASC_SCSI_ID_BITS
+#define ASC_SCSI_ID_BITS 5
+#define ASC_ALL_DEVICE_BIT_SET 0xFFFFFFFFL
+#endif
+#if ASC_SCSI_ID_BITS == 3
+#define ASC_SCSI_BIT_ID_TYPE uchar
+#define ASC_MAX_TID 7
+#define ASC_MAX_LUN 7
+#define ASC_SCSI_WIDTH_BIT_SET 0xFF
+#elif ASC_SCSI_ID_BITS == 4
+#define ASC_SCSI_BIT_ID_TYPE ushort
+#define ASC_MAX_TID 15
+#define ASC_MAX_LUN 7
+#define ASC_SCSI_WIDTH_BIT_SET 0xFFFF
+#elif ASC_SCSI_ID_BITS == 5
+#define ASC_SCSI_BIT_ID_TYPE ulong
+#define ASC_MAX_TID 31
+#define ASC_MAX_LUN 7
+#define ASC_SCSI_WIDTH_BIT_SET 0xFFFFFFFF
+#else
+#error ASC_SCSI_ID_BITS definition is wrong
+#endif
+#define ASC_MAX_SENSE_LEN 32
+#define ASC_MIN_SENSE_LEN 14
+#define ASC_MAX_CDB_LEN 12
+#define ASC_SCSI_RESET_HOLD_TIME_US 60
+#define SCSICMD_TestUnitReady 0x00
+#define SCSICMD_Rewind 0x01
+#define SCSICMD_Rezero 0x01
+#define SCSICMD_RequestSense 0x03
+#define SCSICMD_Format 0x04
+#define SCSICMD_FormatUnit 0x04
+#define SCSICMD_Read6 0x08
+#define SCSICMD_Write6 0x0A
+#define SCSICMD_Seek6 0x0B
+#define SCSICMD_Inquiry 0x12
+#define SCSICMD_Verify6 0x13
+#define SCSICMD_ModeSelect6 0x15
+#define SCSICMD_ModeSense6 0x1A
+#define SCSICMD_StartStopUnit 0x1B
+#define SCSICMD_LoadUnloadTape 0x1B
+#define SCSICMD_ReadCapacity 0x25
+#define SCSICMD_Read10 0x28
+#define SCSICMD_Write10 0x2A
+#define SCSICMD_Seek10 0x2B
+#define SCSICMD_Erase10 0x2C
+#define SCSICMD_WriteAndVerify10 0x2E
+#define SCSICMD_Verify10 0x2F
+#define SCSICMD_WriteBuffer 0x3B
+#define SCSICMD_ReadBuffer 0x3C
+#define SCSICMD_ReadLong 0x3E
+#define SCSICMD_WriteLong 0x3F
+#define SCSICMD_ReadTOC 0x43
+#define SCSICMD_ReadHeader 0x44
+#define SCSICMD_ModeSelect10 0x55
+#define SCSICMD_ModeSense10 0x5A
+#define SCSI_TYPE_DASD 0x00
+#define SCSI_TYPE_SASD 0x01
+#define SCSI_TYPE_PRN 0x02
+#define SCSI_TYPE_PROC 0x03
+#define SCSI_TYPE_WORM 0x04
+#define SCSI_TYPE_CDROM 0x05
+#define SCSI_TYPE_SCANNER 0x06
+#define SCSI_TYPE_OPTMEM 0x07
+#define SCSI_TYPE_MED_CHG 0x08
+#define SCSI_TYPE_COMM 0x09
+#define SCSI_TYPE_UNKNOWN 0x1F
+#define SCSI_TYPE_NO_DVC 0xFF
+#define ASC_SCSIDIR_NOCHK 0x00
+#define ASC_SCSIDIR_T2H 0x08
+#define ASC_SCSIDIR_H2T 0x10
+#define ASC_SCSIDIR_NODATA 0x18
+#define SCSI_SENKEY_NO_SENSE 0x00
+#define SCSI_SENKEY_UNDEFINED 0x01
+#define SCSI_SENKEY_NOT_READY 0x02
+#define SCSI_SENKEY_MEDIUM_ERR 0x03
+#define SCSI_SENKEY_HW_ERR 0x04
+#define SCSI_SENKEY_ILLEGAL 0x05
+#define SCSI_SENKEY_ATTENTION 0x06
+#define SCSI_SENKEY_PROTECTED 0x07
+#define SCSI_SENKEY_BLANK 0x08
+#define SCSI_SENKEY_V_UNIQUE 0x09
+#define SCSI_SENKEY_CPY_ABORT 0x0A
+#define SCSI_SENKEY_ABORT 0x0B
+#define SCSI_SENKEY_EQUAL 0x0C
+#define SCSI_SENKEY_VOL_OVERFLOW 0x0D
+#define SCSI_SENKEY_MISCOMP 0x0E
+#define SCSI_SENKEY_RESERVED 0x0F
+#define SCSI_ASC_NOMEDIA 0x3A
+#define ASC_SRB_HOST(x) ((uchar)((uchar)(x) >> 4))
+#define ASC_SRB_TID(x) ((uchar)((uchar)(x) & (uchar)0x0F))
+#define ASC_SRB_LUN(x) ((uchar)((uint)(x) >> 13))
+#define PUT_CDB1(x) ((uchar)((uint)(x) >> 8))
+#define SS_GOOD 0x00
+#define SS_CHK_CONDITION 0x02
+#define SS_CONDITION_MET 0x04
+#define SS_TARGET_BUSY 0x08
+#define SS_INTERMID 0x10
+#define SS_INTERMID_COND_MET 0x14
+#define SS_RSERV_CONFLICT 0x18
+#define SS_CMD_TERMINATED 0x22
+#define SS_QUEUE_FULL 0x28
+#define MS_CMD_DONE 0x00
+#define MS_EXTEND 0x01
+#define MS_SDTR_LEN 0x03
+#define MS_SDTR_CODE 0x01
+#define MS_WDTR_LEN 0x02
+#define MS_WDTR_CODE 0x03
+#define MS_MDP_LEN 0x05
+#define MS_MDP_CODE 0x00
+#define M1_SAVE_DATA_PTR 0x02
+#define M1_RESTORE_PTRS 0x03
+#define M1_DISCONNECT 0x04
+#define M1_INIT_DETECTED_ERR 0x05
+#define M1_ABORT 0x06
+#define M1_MSG_REJECT 0x07
+#define M1_NO_OP 0x08
+#define M1_MSG_PARITY_ERR 0x09
+#define M1_LINK_CMD_DONE 0x0A
+#define M1_LINK_CMD_DONE_WFLAG 0x0B
+#define M1_BUS_DVC_RESET 0x0C
+#define M1_ABORT_TAG 0x0D
+#define M1_CLR_QUEUE 0x0E
+#define M1_INIT_RECOVERY 0x0F
+#define M1_RELEASE_RECOVERY 0x10
+#define M1_KILL_IO_PROC 0x11
+#define M2_QTAG_MSG_SIMPLE 0x20
+#define M2_QTAG_MSG_HEAD 0x21
+#define M2_QTAG_MSG_ORDERED 0x22
+#define M2_IGNORE_WIDE_RESIDUE 0x23
+
+typedef struct {
+ uchar peri_dvc_type:5;
+ uchar peri_qualifier:3;
+} ASC_SCSI_INQ0;
+
+typedef struct {
+ uchar dvc_type_modifier:7;
+ uchar rmb:1;
+} ASC_SCSI_INQ1;
+
+typedef struct {
+ uchar ansi_apr_ver:3;
+ uchar ecma_ver:3;
+ uchar iso_ver:2;
+} ASC_SCSI_INQ2;
+
+typedef struct {
+ uchar rsp_data_fmt:4;
+ uchar res:2;
+ uchar TemIOP:1;
+ uchar aenc:1;
+} ASC_SCSI_INQ3;
+
+typedef struct {
+ uchar StfRe:1;
+ uchar CmdQue:1;
+ uchar Reserved:1;
+ uchar Linked:1;
+ uchar Sync:1;
+ uchar WBus16:1;
+ uchar WBus32:1;
+ uchar RelAdr:1;
+} ASC_SCSI_INQ7;
+
+typedef struct {
+ ASC_SCSI_INQ0 byte0;
+ ASC_SCSI_INQ1 byte1;
+ ASC_SCSI_INQ2 byte2;
+ ASC_SCSI_INQ3 byte3;
+ uchar add_len;
+ uchar res1;
+ uchar res2;
+ ASC_SCSI_INQ7 byte7;
+ uchar vendor_id[8];
+ uchar product_id[16];
+ uchar product_rev_level[4];
+} ASC_SCSI_INQUIRY;
+
+typedef struct asc_req_sense {
+ uchar err_code:7;
+ uchar info_valid:1;
+ uchar segment_no;
+ uchar sense_key:4;
+ uchar reserved_bit:1;
+ uchar sense_ILI:1;
+ uchar sense_EOM:1;
+ uchar file_mark:1;
+ uchar info1[4];
+ uchar add_sense_len;
+ uchar cmd_sp_info[4];
+ uchar asc;
+ uchar ascq;
+ uchar fruc;
+ uchar sks_byte0:7;
+ uchar sks_valid:1;
+ uchar sks_bytes[2];
+ uchar notused[2];
+ uchar ex_sense_code;
+ uchar info2[4];
+} ASC_REQ_SENSE;
+
+#define ASC_SG_LIST_PER_Q 7
+#define QS_FREE 0x00
+#define QS_READY 0x01
+#define QS_DISC1 0x02
+#define QS_DISC2 0x04
+#define QS_BUSY 0x08
+#define QS_ABORTED 0x40
+#define QS_DONE 0x80
+#define QC_NO_CALLBACK 0x01
+#define QC_SG_SWAP_QUEUE 0x02
+#define QC_SG_HEAD 0x04
+#define QC_DATA_IN 0x08
+#define QC_DATA_OUT 0x10
+#define QC_URGENT 0x20
+#define QC_MSG_OUT 0x40
+#define QC_REQ_SENSE 0x80
+#define QCSG_SG_XFER_LIST 0x02
+#define QCSG_SG_XFER_MORE 0x04
+#define QCSG_SG_XFER_END 0x08
+#define QD_IN_PROGRESS 0x00
+#define QD_NO_ERROR 0x01
+#define QD_ABORTED_BY_HOST 0x02
+#define QD_WITH_ERROR 0x04
+#define QD_INVALID_REQUEST 0x80
+#define QD_INVALID_HOST_NUM 0x81
+#define QD_INVALID_DEVICE 0x82
+#define QD_ERR_INTERNAL 0xFF
+#define QHSTA_NO_ERROR 0x00
+#define QHSTA_M_SEL_TIMEOUT 0x11
+#define QHSTA_M_DATA_OVER_RUN 0x12
+#define QHSTA_M_DATA_UNDER_RUN 0x12
+#define QHSTA_M_UNEXPECTED_BUS_FREE 0x13
+#define QHSTA_M_BAD_BUS_PHASE_SEQ 0x14
+#define QHSTA_D_QDONE_SG_LIST_CORRUPTED 0x21
+#define QHSTA_D_ASC_DVC_ERROR_CODE_SET 0x22
+#define QHSTA_D_HOST_ABORT_FAILED 0x23
+#define QHSTA_D_EXE_SCSI_Q_FAILED 0x24
+#define QHSTA_D_EXE_SCSI_Q_BUSY_TIMEOUT 0x25
+#define QHSTA_D_ASPI_NO_BUF_POOL 0x26
+#define QHSTA_M_WTM_TIMEOUT 0x41
+#define QHSTA_M_BAD_CMPL_STATUS_IN 0x42
+#define QHSTA_M_NO_AUTO_REQ_SENSE 0x43
+#define QHSTA_M_AUTO_REQ_SENSE_FAIL 0x44
+#define QHSTA_M_TARGET_STATUS_BUSY 0x45
+#define QHSTA_M_BAD_TAG_CODE 0x46
+#define QHSTA_M_BAD_QUEUE_FULL_OR_BUSY 0x47
+#define QHSTA_M_HUNG_REQ_SCSI_BUS_RESET 0x48
+#define QHSTA_D_LRAM_CMP_ERROR 0x81
+#define QHSTA_M_MICRO_CODE_ERROR_HALT 0xA1
+#define ASC_FLAG_SCSIQ_REQ 0x01
+#define ASC_FLAG_BIOS_SCSIQ_REQ 0x02
+#define ASC_FLAG_BIOS_ASYNC_IO 0x04
+#define ASC_FLAG_SRB_LINEAR_ADDR 0x08
+#define ASC_FLAG_WIN16 0x10
+#define ASC_FLAG_WIN32 0x20
+#define ASC_FLAG_ISA_OVER_16MB 0x40
+#define ASC_FLAG_DOS_VM_CALLBACK 0x80
+#define ASC_TAG_FLAG_EXTRA_BYTES 0x10
+#define ASC_TAG_FLAG_DISABLE_DISCONNECT 0x04
+#define ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX 0x08
+#define ASC_TAG_FLAG_DISABLE_CHK_COND_INT_HOST 0x40
+#define ASC_SCSIQ_CPY_BEG 4
+#define ASC_SCSIQ_SGHD_CPY_BEG 2
+#define ASC_SCSIQ_B_FWD 0
+#define ASC_SCSIQ_B_BWD 1
+#define ASC_SCSIQ_B_STATUS 2
+#define ASC_SCSIQ_B_QNO 3
+#define ASC_SCSIQ_B_CNTL 4
+#define ASC_SCSIQ_B_SG_QUEUE_CNT 5
+#define ASC_SCSIQ_D_DATA_ADDR 8
+#define ASC_SCSIQ_D_DATA_CNT 12
+#define ASC_SCSIQ_B_SENSE_LEN 20
+#define ASC_SCSIQ_DONE_INFO_BEG 22
+#define ASC_SCSIQ_D_SRBPTR 22
+#define ASC_SCSIQ_B_TARGET_IX 26
+#define ASC_SCSIQ_B_CDB_LEN 28
+#define ASC_SCSIQ_B_TAG_CODE 29
+#define ASC_SCSIQ_W_VM_ID 30
+#define ASC_SCSIQ_DONE_STATUS 32
+#define ASC_SCSIQ_HOST_STATUS 33
+#define ASC_SCSIQ_SCSI_STATUS 34
+#define ASC_SCSIQ_CDB_BEG 36
+#define ASC_SCSIQ_DW_REMAIN_XFER_ADDR 56
+#define ASC_SCSIQ_DW_REMAIN_XFER_CNT 60
+#define ASC_SCSIQ_B_SG_WK_QP 49
+#define ASC_SCSIQ_B_SG_WK_IX 50
+#define ASC_SCSIQ_W_REQ_COUNT 52
+#define ASC_SCSIQ_B_LIST_CNT 6
+#define ASC_SCSIQ_B_CUR_LIST_CNT 7
+#define ASC_SGQ_B_SG_CNTL 4
+#define ASC_SGQ_B_SG_HEAD_QP 5
+#define ASC_SGQ_B_SG_LIST_CNT 6
+#define ASC_SGQ_B_SG_CUR_LIST_CNT 7
+#define ASC_SGQ_LIST_BEG 8
+#define ASC_DEF_SCSI1_QNG 4
+#define ASC_MAX_SCSI1_QNG 4
+#define ASC_DEF_SCSI2_QNG 16
+#define ASC_MAX_SCSI2_QNG 32
+#define ASC_TAG_CODE_MASK 0x23
+#define ASC_STOP_REQ_RISC_STOP 0x01
+#define ASC_STOP_ACK_RISC_STOP 0x03
+#define ASC_STOP_CLEAN_UP_BUSY_Q 0x10
+#define ASC_STOP_CLEAN_UP_DISC_Q 0x20
+#define ASC_STOP_HOST_REQ_RISC_HALT 0x40
+#define ASC_TIDLUN_TO_IX(tid, lun) (ASC_SCSI_TIX_TYPE)((tid) + ((lun)<<ASC_SCSI_ID_BITS))
+#define ASC_TID_TO_TARGET_ID(tid) (ASC_SCSI_BIT_ID_TYPE)(0x01 << (tid))
+#define ASC_TIX_TO_TARGET_ID(tix) (0x01 << ((tix) & ASC_MAX_TID))
+#define ASC_TIX_TO_TID(tix) ((tix) & ASC_MAX_TID)
+#define ASC_TID_TO_TIX(tid) ((tid) & ASC_MAX_TID)
+#define ASC_TIX_TO_LUN(tix) (((tix) >> ASC_SCSI_ID_BITS) & ASC_MAX_LUN)
+#define ASC_QNO_TO_QADDR(q_no) ((ASC_QADR_BEG)+((int)(q_no) << 6))
+
+typedef struct asc_scisq_1 {
+ uchar status;
+ uchar q_no;
+ uchar cntl;
+ uchar sg_queue_cnt;
+ uchar target_id;
+ uchar target_lun;
+ ulong data_addr;
+ ulong data_cnt;
+ ulong sense_addr;
+ uchar sense_len;
+ uchar extra_bytes;
+} ASC_SCSIQ_1;
+
+typedef struct asc_scisq_2 {
+ ulong srb_ptr;
+ uchar target_ix;
+ uchar flag;
+ uchar cdb_len;
+ uchar tag_code;
+ ushort vm_id;
+} ASC_SCSIQ_2;
+
+typedef struct asc_scsiq_3 {
+ uchar done_stat;
+ uchar host_stat;
+ uchar scsi_stat;
+ uchar scsi_msg;
+} ASC_SCSIQ_3;
+
+typedef struct asc_scsiq_4 {
+ uchar cdb[ASC_MAX_CDB_LEN];
+ uchar y_first_sg_list_qp;
+ uchar y_working_sg_qp;
+ uchar y_working_sg_ix;
+ uchar y_res;
+ ushort x_req_count;
+ ushort x_reconnect_rtn;
+ ulong x_saved_data_addr;
+ ulong x_saved_data_cnt;
+} ASC_SCSIQ_4;
+
+typedef struct asc_q_done_info {
+ ASC_SCSIQ_2 d2;
+ ASC_SCSIQ_3 d3;
+ uchar q_status;
+ uchar q_no;
+ uchar cntl;
+ uchar sense_len;
+ uchar extra_bytes;
+ uchar res;
+ ulong remain_bytes;
+} ASC_QDONE_INFO;
+
+typedef struct asc_sg_list {
+ ulong addr;
+ ulong bytes;
+} ASC_SG_LIST;
+
+typedef struct asc_sg_head {
+ ushort entry_cnt;
+ ushort queue_cnt;
+ ushort entry_to_copy;
+ ushort res;
+ ASC_SG_LIST sg_list[ASC_MAX_SG_LIST];
+} ASC_SG_HEAD;
+
+#define ASC_MIN_SG_LIST 2
+
+typedef struct asc_min_sg_head {
+ ushort entry_cnt;
+ ushort queue_cnt;
+ ushort entry_to_copy;
+ ushort res;
+ ASC_SG_LIST sg_list[ASC_MIN_SG_LIST];
+} ASC_MIN_SG_HEAD;
+
+#define QCX_SORT (0x0001)
+#define QCX_COALEASE (0x0002)
+
+typedef struct asc_scsi_q {
+ ASC_SCSIQ_1 q1;
+ ASC_SCSIQ_2 q2;
+ uchar *cdbptr;
+ ASC_SG_HEAD *sg_head;
+} ASC_SCSI_Q;
+
+typedef struct asc_scsi_req_q {
+ ASC_SCSIQ_1 r1;
+ ASC_SCSIQ_2 r2;
+ uchar *cdbptr;
+ ASC_SG_HEAD *sg_head;
+ uchar *sense_ptr;
+ ASC_SCSIQ_3 r3;
+ uchar cdb[ASC_MAX_CDB_LEN];
+ uchar sense[ASC_MIN_SENSE_LEN];
+} ASC_SCSI_REQ_Q;
+
+typedef struct asc_scsi_bios_req_q {
+ ASC_SCSIQ_1 r1;
+ ASC_SCSIQ_2 r2;
+ uchar *cdbptr;
+ ASC_SG_HEAD *sg_head;
+ uchar *sense_ptr;
+ ASC_SCSIQ_3 r3;
+ uchar cdb[ASC_MAX_CDB_LEN];
+ uchar sense[ASC_MIN_SENSE_LEN];
+} ASC_SCSI_BIOS_REQ_Q;
+
+typedef struct asc_risc_q {
+ uchar fwd;
+ uchar bwd;
+ ASC_SCSIQ_1 i1;
+ ASC_SCSIQ_2 i2;
+ ASC_SCSIQ_3 i3;
+ ASC_SCSIQ_4 i4;
+} ASC_RISC_Q;
+
+typedef struct asc_sg_list_q {
+ uchar seq_no;
+ uchar q_no;
+ uchar cntl;
+ uchar sg_head_qp;
+ uchar sg_list_cnt;
+ uchar sg_cur_list_cnt;
+} ASC_SG_LIST_Q;
+
+typedef struct asc_risc_sg_list_q {
+ uchar fwd;
+ uchar bwd;
+ ASC_SG_LIST_Q sg;
+ ASC_SG_LIST sg_list[7];
+} ASC_RISC_SG_LIST_Q;
+
+#define ASC_EXE_SCSI_IO_MAX_IDLE_LOOP 0x1000000UL
+#define ASC_EXE_SCSI_IO_MAX_WAIT_LOOP 1024
+#define ASCQ_ERR_NO_ERROR 0
+#define ASCQ_ERR_IO_NOT_FOUND 1
+#define ASCQ_ERR_LOCAL_MEM 2
+#define ASCQ_ERR_CHKSUM 3
+#define ASCQ_ERR_START_CHIP 4
+#define ASCQ_ERR_INT_TARGET_ID 5
+#define ASCQ_ERR_INT_LOCAL_MEM 6
+#define ASCQ_ERR_HALT_RISC 7
+#define ASCQ_ERR_GET_ASPI_ENTRY 8
+#define ASCQ_ERR_CLOSE_ASPI 9
+#define ASCQ_ERR_HOST_INQUIRY 0x0A
+#define ASCQ_ERR_SAVED_SRB_BAD 0x0B
+#define ASCQ_ERR_QCNTL_SG_LIST 0x0C
+#define ASCQ_ERR_Q_STATUS 0x0D
+#define ASCQ_ERR_WR_SCSIQ 0x0E
+#define ASCQ_ERR_PC_ADDR 0x0F
+#define ASCQ_ERR_SYN_OFFSET 0x10
+#define ASCQ_ERR_SYN_XFER_TIME 0x11
+#define ASCQ_ERR_LOCK_DMA 0x12
+#define ASCQ_ERR_UNLOCK_DMA 0x13
+#define ASCQ_ERR_VDS_CHK_INSTALL 0x14
+#define ASCQ_ERR_MICRO_CODE_HALT 0x15
+#define ASCQ_ERR_SET_LRAM_ADDR 0x16
+#define ASCQ_ERR_CUR_QNG 0x17
+#define ASCQ_ERR_SG_Q_LINKS 0x18
+#define ASCQ_ERR_SCSIQ_PTR 0x19
+#define ASCQ_ERR_ISR_RE_ENTRY 0x1A
+#define ASCQ_ERR_CRITICAL_RE_ENTRY 0x1B
+#define ASCQ_ERR_ISR_ON_CRITICAL 0x1C
+#define ASCQ_ERR_SG_LIST_ODD_ADDRESS 0x1D
+#define ASCQ_ERR_XFER_ADDRESS_TOO_BIG 0x1E
+#define ASCQ_ERR_SCSIQ_NULL_PTR 0x1F
+#define ASCQ_ERR_SCSIQ_BAD_NEXT_PTR 0x20
+#define ASCQ_ERR_GET_NUM_OF_FREE_Q 0x21
+#define ASCQ_ERR_SEND_SCSI_Q 0x22
+#define ASCQ_ERR_HOST_REQ_RISC_HALT 0x23
+#define ASCQ_ERR_RESET_SDTR 0x24
+#define ASC_WARN_NO_ERROR 0x0000
+#define ASC_WARN_IO_PORT_ROTATE 0x0001
+#define ASC_WARN_EEPROM_CHKSUM 0x0002
+#define ASC_WARN_IRQ_MODIFIED 0x0004
+#define ASC_WARN_AUTO_CONFIG 0x0008
+#define ASC_WARN_CMD_QNG_CONFLICT 0x0010
+#define ASC_WARN_EEPROM_RECOVER 0x0020
+#define ASC_WARN_CFG_MSW_RECOVER 0x0040
+#define ASC_WARN_SET_PCI_CONFIG_SPACE 0x0080
+#define ASC_IERR_WRITE_EEPROM 0x0001
+#define ASC_IERR_MCODE_CHKSUM 0x0002
+#define ASC_IERR_SET_PC_ADDR 0x0004
+#define ASC_IERR_START_STOP_CHIP 0x0008
+#define ASC_IERR_IRQ_NO 0x0010
+#define ASC_IERR_SET_IRQ_NO 0x0020
+#define ASC_IERR_CHIP_VERSION 0x0040
+#define ASC_IERR_SET_SCSI_ID 0x0080
+#define ASC_IERR_GET_PHY_ADDR 0x0100
+#define ASC_IERR_BAD_SIGNATURE 0x0200
+#define ASC_IERR_NO_BUS_TYPE 0x0400
+#define ASC_IERR_SCAM 0x0800
+#define ASC_IERR_SET_SDTR 0x1000
+#define ASC_IERR_RW_LRAM 0x8000
+#define ASC_DEF_IRQ_NO 10
+#define ASC_MAX_IRQ_NO 15
+#define ASC_MIN_IRQ_NO 10
+#define ASC_MIN_REMAIN_Q (0x02)
+#define ASC_DEF_MAX_TOTAL_QNG (0xF0)
+#define ASC_MIN_TAG_Q_PER_DVC (0x04)
+#define ASC_DEF_TAG_Q_PER_DVC (0x04)
+#define ASC_MIN_FREE_Q ASC_MIN_REMAIN_Q
+#define ASC_MIN_TOTAL_QNG ((ASC_MAX_SG_QUEUE)+(ASC_MIN_FREE_Q))
+#define ASC_MAX_TOTAL_QNG 240
+#define ASC_MAX_PCI_ULTRA_INRAM_TOTAL_QNG 16
+#define ASC_MAX_PCI_ULTRA_INRAM_TAG_QNG 8
+#define ASC_MAX_PCI_INRAM_TOTAL_QNG 20
+#define ASC_MAX_INRAM_TAG_QNG 16
+#define ASC_IOADR_TABLE_MAX_IX 11
+#define ASC_IOADR_GAP 0x10
+#define ASC_SEARCH_IOP_GAP 0x10
+#define ASC_MIN_IOP_ADDR (PortAddr)0x0100
+#define ASC_MAX_IOP_ADDR (PortAddr)0x3F0
+#define ASC_IOADR_1 (PortAddr)0x0110
+#define ASC_IOADR_2 (PortAddr)0x0130
+#define ASC_IOADR_3 (PortAddr)0x0150
+#define ASC_IOADR_4 (PortAddr)0x0190
+#define ASC_IOADR_5 (PortAddr)0x0210
+#define ASC_IOADR_6 (PortAddr)0x0230
+#define ASC_IOADR_7 (PortAddr)0x0250
+#define ASC_IOADR_8 (PortAddr)0x0330
+#define ASC_IOADR_DEF ASC_IOADR_8
+#define ASC_LIB_SCSIQ_WK_SP 256
+#define ASC_MAX_SYN_XFER_NO 16
+#define ASC_SYN_MAX_OFFSET 0x0F
+#define ASC_DEF_SDTR_OFFSET 0x0F
+#define ASC_DEF_SDTR_INDEX 0x00
+#define ASC_SDTR_ULTRA_PCI_10MB_INDEX 0x02
+#define SYN_XFER_NS_0 25
+#define SYN_XFER_NS_1 30
+#define SYN_XFER_NS_2 35
+#define SYN_XFER_NS_3 40
+#define SYN_XFER_NS_4 50
+#define SYN_XFER_NS_5 60
+#define SYN_XFER_NS_6 70
+#define SYN_XFER_NS_7 85
+#define SYN_ULTRA_XFER_NS_0 12
+#define SYN_ULTRA_XFER_NS_1 19
+#define SYN_ULTRA_XFER_NS_2 25
+#define SYN_ULTRA_XFER_NS_3 32
+#define SYN_ULTRA_XFER_NS_4 38
+#define SYN_ULTRA_XFER_NS_5 44
+#define SYN_ULTRA_XFER_NS_6 50
+#define SYN_ULTRA_XFER_NS_7 57
+#define SYN_ULTRA_XFER_NS_8 63
+#define SYN_ULTRA_XFER_NS_9 69
+#define SYN_ULTRA_XFER_NS_10 75
+#define SYN_ULTRA_XFER_NS_11 82
+#define SYN_ULTRA_XFER_NS_12 88
+#define SYN_ULTRA_XFER_NS_13 94
+#define SYN_ULTRA_XFER_NS_14 100
+#define SYN_ULTRA_XFER_NS_15 107
+
+typedef struct ext_msg {
+ uchar msg_type;
+ uchar msg_len;
+ uchar msg_req;
+ union {
+ struct {
+ uchar sdtr_xfer_period;
+ uchar sdtr_req_ack_offset;
+ } sdtr;
+ struct {
+ uchar wdtr_width;
+ } wdtr;
+ struct {
+ uchar mdp_b3;
+ uchar mdp_b2;
+ uchar mdp_b1;
+ uchar mdp_b0;
+ } mdp;
+ } u_ext_msg;
+ uchar res;
+} EXT_MSG;
+
+#define xfer_period u_ext_msg.sdtr.sdtr_xfer_period
+#define req_ack_offset u_ext_msg.sdtr.sdtr_req_ack_offset
+#define wdtr_width u_ext_msg.wdtr.wdtr_width
+#define mdp_b3 u_ext_msg.mdp_b3
+#define mdp_b2 u_ext_msg.mdp_b2
+#define mdp_b1 u_ext_msg.mdp_b1
+#define mdp_b0 u_ext_msg.mdp_b0
+
+typedef struct asc_dvc_cfg {
+ ASC_SCSI_BIT_ID_TYPE can_tagged_qng;
+ ASC_SCSI_BIT_ID_TYPE cmd_qng_enabled;
+ ASC_SCSI_BIT_ID_TYPE disc_enable;
+ ASC_SCSI_BIT_ID_TYPE sdtr_enable;
+ uchar chip_scsi_id:4;
+ uchar isa_dma_speed:4;
+ uchar isa_dma_channel;
+ uchar chip_version;
+ ushort pci_device_id;
+ ushort lib_serial_no;
+ ushort lib_version;
+ ushort mcode_date;
+ ushort mcode_version;
+ uchar max_tag_qng[ASC_MAX_TID + 1];
+ uchar *overrun_buf;
+ uchar sdtr_period_offset[ASC_MAX_TID + 1];
+ ushort pci_slot_info;
+ uchar adapter_info[6];
+} ASC_DVC_CFG;
+
+#define ASC_DEF_DVC_CNTL 0xFFFF
+#define ASC_DEF_CHIP_SCSI_ID 7
+#define ASC_DEF_ISA_DMA_SPEED 4
+#define ASC_INIT_STATE_NULL 0x0000
+#define ASC_INIT_STATE_BEG_GET_CFG 0x0001
+#define ASC_INIT_STATE_END_GET_CFG 0x0002
+#define ASC_INIT_STATE_BEG_SET_CFG 0x0004
+#define ASC_INIT_STATE_END_SET_CFG 0x0008
+#define ASC_INIT_STATE_BEG_LOAD_MC 0x0010
+#define ASC_INIT_STATE_END_LOAD_MC 0x0020
+#define ASC_INIT_STATE_BEG_INQUIRY 0x0040
+#define ASC_INIT_STATE_END_INQUIRY 0x0080
+#define ASC_INIT_RESET_SCSI_DONE 0x0100
+#define ASC_INIT_STATE_WITHOUT_EEP 0x8000
+#define ASC_PCI_DEVICE_ID_REV_A 0x1100
+#define ASC_PCI_DEVICE_ID_REV_B 0x1200
+#define ASC_BUG_FIX_IF_NOT_DWB 0x0001
+#define ASC_BUG_FIX_ASYN_USE_SYN 0x0002
+#define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
+#define ASC_MIN_TAGGED_CMD 7
+#define ASC_MAX_SCSI_RESET_WAIT 30
+
+typedef struct asc_dvc_var {
+ PortAddr iop_base;
+ ushort err_code;
+ ushort dvc_cntl;
+ ushort bug_fix_cntl;
+ ushort bus_type;
+ Ptr2Func isr_callback;
+ Ptr2Func exe_callback;
+ ASC_SCSI_BIT_ID_TYPE init_sdtr;
+ ASC_SCSI_BIT_ID_TYPE sdtr_done;
+ ASC_SCSI_BIT_ID_TYPE use_tagged_qng;
+ ASC_SCSI_BIT_ID_TYPE unit_not_ready;
+ ASC_SCSI_BIT_ID_TYPE queue_full_or_busy;
+ ASC_SCSI_BIT_ID_TYPE start_motor;
+ uchar scsi_reset_wait;
+ uchar chip_no;
+ char is_in_int;
+ uchar max_total_qng;
+ uchar cur_total_qng;
+ uchar in_critical_cnt;
+ uchar irq_no;
+ uchar last_q_shortage;
+ ushort init_state;
+ uchar cur_dvc_qng[ASC_MAX_TID + 1];
+ uchar max_dvc_qng[ASC_MAX_TID + 1];
+ ASC_SCSI_Q *scsiq_busy_head[ASC_MAX_TID + 1];
+ ASC_SCSI_Q *scsiq_busy_tail[ASC_MAX_TID + 1];
+ uchar sdtr_period_tbl[ASC_MAX_SYN_XFER_NO];
+ ASC_DVC_CFG *cfg;
+ Ptr2Func saved_ptr2func;
+ ASC_SCSI_BIT_ID_TYPE pci_fix_asyn_xfer_always;
+ char redo_scam;
+ ushort res2;
+ uchar dos_int13_table[ASC_MAX_TID + 1];
+ ulong max_dma_count;
+ ASC_SCSI_BIT_ID_TYPE no_scam;
+ ASC_SCSI_BIT_ID_TYPE pci_fix_asyn_xfer;
+ uchar max_sdtr_index;
+ uchar host_init_sdtr_index;
+ ulong drv_ptr;
+ ulong uc_break;
+ ulong res7;
+ ulong res8;
+} ASC_DVC_VAR;
+
+typedef int (* ASC_ISR_CALLBACK) (ASC_DVC_VAR asc_ptr_type *, ASC_QDONE_INFO *);
+typedef int (* ASC_EXE_CALLBACK) (ASC_DVC_VAR asc_ptr_type *, ASC_SCSI_Q *);
+
+typedef struct asc_dvc_inq_info {
+ uchar type[ASC_MAX_TID + 1][ASC_MAX_LUN + 1];
+} ASC_DVC_INQ_INFO;
+
+typedef struct asc_cap_info {
+ ulong lba;
+ ulong blk_size;
+} ASC_CAP_INFO;
+
+typedef struct asc_cap_info_array {
+ ASC_CAP_INFO cap_info[ASC_MAX_TID + 1][ASC_MAX_LUN + 1];
+} ASC_CAP_INFO_ARRAY;
+
+#define ASC_MCNTL_NO_SEL_TIMEOUT (ushort)0x0001
+#define ASC_MCNTL_NULL_TARGET (ushort)0x0002
+#define ASC_CNTL_INITIATOR (ushort)0x0001
+#define ASC_CNTL_BIOS_GT_1GB (ushort)0x0002
+#define ASC_CNTL_BIOS_GT_2_DISK (ushort)0x0004
+#define ASC_CNTL_BIOS_REMOVABLE (ushort)0x0008
+#define ASC_CNTL_NO_SCAM (ushort)0x0010
+#define ASC_CNTL_INT_MULTI_Q (ushort)0x0080
+#define ASC_CNTL_NO_LUN_SUPPORT (ushort)0x0040
+#define ASC_CNTL_NO_VERIFY_COPY (ushort)0x0100
+#define ASC_CNTL_RESET_SCSI (ushort)0x0200
+#define ASC_CNTL_INIT_INQUIRY (ushort)0x0400
+#define ASC_CNTL_INIT_VERBOSE (ushort)0x0800
+#define ASC_CNTL_SCSI_PARITY (ushort)0x1000
+#define ASC_CNTL_BURST_MODE (ushort)0x2000
+#define ASC_CNTL_SDTR_ENABLE_ULTRA (ushort)0x4000
+#define ASC_EEP_DVC_CFG_BEG_VL 2
+#define ASC_EEP_MAX_DVC_ADDR_VL 15
+#define ASC_EEP_DVC_CFG_BEG 32
+#define ASC_EEP_MAX_DVC_ADDR 45
+#define ASC_EEP_DEFINED_WORDS 10
+#define ASC_EEP_MAX_ADDR 63
+#define ASC_EEP_RES_WORDS 0
+#define ASC_EEP_MAX_RETRY 20
+#define ASC_MAX_INIT_BUSY_RETRY 8
+#define ASC_EEP_ISA_PNP_WSIZE 16
+
+typedef struct asceep_config {
+ ushort cfg_lsw;
+ ushort cfg_msw;
+ uchar init_sdtr;
+ uchar disc_enable;
+ uchar use_cmd_qng;
+ uchar start_motor;
+ uchar max_total_qng;
+ uchar max_tag_qng;
+ uchar bios_scan;
+ uchar power_up_wait;
+ uchar no_scam;
+ uchar chip_scsi_id:4;
+ uchar isa_dma_speed:4;
+ uchar dos_int13_table[ASC_MAX_TID + 1];
+ uchar adapter_info[6];
+ ushort cntl;
+ ushort chksum;
+} ASCEEP_CONFIG;
+
+#define ASC_PCI_CFG_LSW_SCSI_PARITY 0x0800
+#define ASC_PCI_CFG_LSW_BURST_MODE 0x0080
+#define ASC_PCI_CFG_LSW_INTR_ABLE 0x0020
+
+#define ASC_EEP_CMD_READ 0x80
+#define ASC_EEP_CMD_WRITE 0x40
+#define ASC_EEP_CMD_WRITE_ABLE 0x30
+#define ASC_EEP_CMD_WRITE_DISABLE 0x00
+#define ASC_OVERRUN_BSIZE 0x00000048UL
+#define ASC_CTRL_BREAK_ONCE 0x0001
+#define ASC_CTRL_BREAK_STAY_IDLE 0x0002
+#define ASCV_MSGOUT_BEG 0x0000
+#define ASCV_MSGOUT_SDTR_PERIOD (ASCV_MSGOUT_BEG+3)
+#define ASCV_MSGOUT_SDTR_OFFSET (ASCV_MSGOUT_BEG+4)
+#define ASCV_BREAK_SAVED_CODE (ushort)0x0006
+#define ASCV_MSGIN_BEG (ASCV_MSGOUT_BEG+8)
+#define ASCV_MSGIN_SDTR_PERIOD (ASCV_MSGIN_BEG+3)
+#define ASCV_MSGIN_SDTR_OFFSET (ASCV_MSGIN_BEG+4)
+#define ASCV_SDTR_DATA_BEG (ASCV_MSGIN_BEG+8)
+#define ASCV_SDTR_DONE_BEG (ASCV_SDTR_DATA_BEG+8)
+#define ASCV_MAX_DVC_QNG_BEG (ushort)0x0020
+#define ASCV_BREAK_ADDR (ushort)0x0028
+#define ASCV_BREAK_NOTIFY_COUNT (ushort)0x002A
+#define ASCV_BREAK_CONTROL (ushort)0x002C
+#define ASCV_BREAK_HIT_COUNT (ushort)0x002E
+
+#define ASCV_ASCDVC_ERR_CODE_W (ushort)0x0030
+#define ASCV_MCODE_CHKSUM_W (ushort)0x0032
+#define ASCV_MCODE_SIZE_W (ushort)0x0034
+#define ASCV_STOP_CODE_B (ushort)0x0036
+#define ASCV_DVC_ERR_CODE_B (ushort)0x0037
+#define ASCV_OVERRUN_PADDR_D (ushort)0x0038
+#define ASCV_OVERRUN_BSIZE_D (ushort)0x003C
+#define ASCV_HALTCODE_W (ushort)0x0040
+#define ASCV_CHKSUM_W (ushort)0x0042
+#define ASCV_MC_DATE_W (ushort)0x0044
+#define ASCV_MC_VER_W (ushort)0x0046
+#define ASCV_NEXTRDY_B (ushort)0x0048
+#define ASCV_DONENEXT_B (ushort)0x0049
+#define ASCV_USE_TAGGED_QNG_B (ushort)0x004A
+#define ASCV_SCSIBUSY_B (ushort)0x004B
+#define ASCV_Q_DONE_IN_PROGRESS_B (ushort)0x004C
+#define ASCV_CURCDB_B (ushort)0x004D
+#define ASCV_RCLUN_B (ushort)0x004E
+#define ASCV_BUSY_QHEAD_B (ushort)0x004F
+#define ASCV_DISC1_QHEAD_B (ushort)0x0050
+#define ASCV_DISC_ENABLE_B (ushort)0x0052
+#define ASCV_CAN_TAGGED_QNG_B (ushort)0x0053
+#define ASCV_HOSTSCSI_ID_B (ushort)0x0055
+#define ASCV_MCODE_CNTL_B (ushort)0x0056
+#define ASCV_NULL_TARGET_B (ushort)0x0057
+#define ASCV_FREE_Q_HEAD_W (ushort)0x0058
+#define ASCV_DONE_Q_TAIL_W (ushort)0x005A
+#define ASCV_FREE_Q_HEAD_B (ushort)(ASCV_FREE_Q_HEAD_W+1)
+#define ASCV_DONE_Q_TAIL_B (ushort)(ASCV_DONE_Q_TAIL_W+1)
+#define ASCV_HOST_FLAG_B (ushort)0x005D
+#define ASCV_TOTAL_READY_Q_B (ushort)0x0064
+#define ASCV_VER_SERIAL_B (ushort)0x0065
+#define ASCV_HALTCODE_SAVED_W (ushort)0x0066
+#define ASCV_WTM_FLAG_B (ushort)0x0068
+#define ASCV_RISC_FLAG_B (ushort)0x006A
+#define ASCV_REQ_SG_LIST_QP (ushort)0x006B
+#define ASC_HOST_FLAG_IN_ISR 0x01
+#define ASC_HOST_FLAG_ACK_INT 0x02
+#define ASC_RISC_FLAG_GEN_INT 0x01
+#define ASC_RISC_FLAG_REQ_SG_LIST 0x02
+#define IOP_CTRL (0x0F)
+#define IOP_STATUS (0x0E)
+#define IOP_INT_ACK IOP_STATUS
+#define IOP_REG_IFC (0x0D)
+#define IOP_SYN_OFFSET (0x0B)
+#define IOP_EXTRA_CONTROL (0x0D)
+#define IOP_REG_PC (0x0C)
+#define IOP_RAM_ADDR (0x0A)
+#define IOP_RAM_DATA (0x08)
+#define IOP_EEP_DATA (0x06)
+#define IOP_EEP_CMD (0x07)
+#define IOP_VERSION (0x03)
+#define IOP_CONFIG_HIGH (0x04)
+#define IOP_CONFIG_LOW (0x02)
+#define IOP_SIG_BYTE (0x01)
+#define IOP_SIG_WORD (0x00)
+#define IOP_REG_DC1 (0x0E)
+#define IOP_REG_DC0 (0x0C)
+#define IOP_REG_SB (0x0B)
+#define IOP_REG_DA1 (0x0A)
+#define IOP_REG_DA0 (0x08)
+#define IOP_REG_SC (0x09)
+#define IOP_DMA_SPEED (0x07)
+#define IOP_REG_FLAG (0x07)
+#define IOP_FIFO_H (0x06)
+#define IOP_FIFO_L (0x04)
+#define IOP_REG_ID (0x05)
+#define IOP_REG_QP (0x03)
+#define IOP_REG_IH (0x02)
+#define IOP_REG_IX (0x01)
+#define IOP_REG_AX (0x00)
+#define IFC_REG_LOCK (0x00)
+#define IFC_REG_UNLOCK (0x09)
+#define IFC_WR_EN_FILTER (0x10)
+#define IFC_RD_NO_EEPROM (0x10)
+#define IFC_SLEW_RATE (0x20)
+#define IFC_ACT_NEG (0x40)
+#define IFC_INP_FILTER (0x80)
+#define IFC_INIT_DEFAULT (IFC_ACT_NEG | IFC_REG_UNLOCK)
+#define SC_SEL (uchar)(0x80)
+#define SC_BSY (uchar)(0x40)
+#define SC_ACK (uchar)(0x20)
+#define SC_REQ (uchar)(0x10)
+#define SC_ATN (uchar)(0x08)
+#define SC_IO (uchar)(0x04)
+#define SC_CD (uchar)(0x02)
+#define SC_MSG (uchar)(0x01)
+#define SEC_SCSI_CTL (uchar)(0x80)
+#define SEC_ACTIVE_NEGATE (uchar)(0x40)
+#define SEC_SLEW_RATE (uchar)(0x20)
+#define SEC_ENABLE_FILTER (uchar)(0x10)
+#define ASC_HALT_EXTMSG_IN (ushort)0x8000
+#define ASC_HALT_CHK_CONDITION (ushort)0x8100
+#define ASC_HALT_SS_QUEUE_FULL (ushort)0x8200
+#define ASC_HALT_DISABLE_ASYN_USE_SYN_FIX (ushort)0x8300
+#define ASC_HALT_ENABLE_ASYN_USE_SYN_FIX (ushort)0x8400
+#define ASC_HALT_SDTR_REJECTED (ushort)0x4000
+#define ASC_MAX_QNO 0xF8
+#define ASC_DATA_SEC_BEG (ushort)0x0080
+#define ASC_DATA_SEC_END (ushort)0x0080
+#define ASC_CODE_SEC_BEG (ushort)0x0080
+#define ASC_CODE_SEC_END (ushort)0x0080
+#define ASC_QADR_BEG (0x4000)
+#define ASC_QADR_USED (ushort)(ASC_MAX_QNO * 64)
+#define ASC_QADR_END (ushort)0x7FFF
+#define ASC_QLAST_ADR (ushort)0x7FC0
+#define ASC_QBLK_SIZE 0x40
+#define ASC_BIOS_DATA_QBEG 0xF8
+#define ASC_MIN_ACTIVE_QNO 0x01
+#define ASC_QLINK_END 0xFF
+#define ASC_EEPROM_WORDS 0x10
+#define ASC_MAX_MGS_LEN 0x10
+#define ASC_BIOS_ADDR_DEF 0xDC00
+#define ASC_BIOS_SIZE 0x3800
+#define ASC_BIOS_RAM_OFF 0x3800
+#define ASC_BIOS_RAM_SIZE 0x800
+#define ASC_BIOS_MIN_ADDR 0xC000
+#define ASC_BIOS_MAX_ADDR 0xEC00
+#define ASC_BIOS_BANK_SIZE 0x0400
+#define ASC_MCODE_START_ADDR 0x0080
+#define ASC_CFG0_HOST_INT_ON 0x0020
+#define ASC_CFG0_BIOS_ON 0x0040
+#define ASC_CFG0_VERA_BURST_ON 0x0080
+#define ASC_CFG0_SCSI_PARITY_ON 0x0800
+#define ASC_CFG1_SCSI_TARGET_ON 0x0080
+#define ASC_CFG1_LRAM_8BITS_ON 0x0800
+#define ASC_CFG_MSW_CLR_MASK 0x3080
+#define CSW_TEST1 (ASC_CS_TYPE)0x8000
+#define CSW_AUTO_CONFIG (ASC_CS_TYPE)0x4000
+#define CSW_RESERVED1 (ASC_CS_TYPE)0x2000
+#define CSW_IRQ_WRITTEN (ASC_CS_TYPE)0x1000
+#define CSW_33MHZ_SELECTED (ASC_CS_TYPE)0x0800
+#define CSW_TEST2 (ASC_CS_TYPE)0x0400
+#define CSW_TEST3 (ASC_CS_TYPE)0x0200
+#define CSW_RESERVED2 (ASC_CS_TYPE)0x0100
+#define CSW_DMA_DONE (ASC_CS_TYPE)0x0080
+#define CSW_FIFO_RDY (ASC_CS_TYPE)0x0040
+#define CSW_EEP_READ_DONE (ASC_CS_TYPE)0x0020
+#define CSW_HALTED (ASC_CS_TYPE)0x0010
+#define CSW_SCSI_RESET_ACTIVE (ASC_CS_TYPE)0x0008
+#define CSW_PARITY_ERR (ASC_CS_TYPE)0x0004
+#define CSW_SCSI_RESET_LATCH (ASC_CS_TYPE)0x0002
+#define CSW_INT_PENDING (ASC_CS_TYPE)0x0001
+#define CIW_CLR_SCSI_RESET_INT (ASC_CS_TYPE)0x1000
+#define CIW_INT_ACK (ASC_CS_TYPE)0x0100
+#define CIW_TEST1 (ASC_CS_TYPE)0x0200
+#define CIW_TEST2 (ASC_CS_TYPE)0x0400
+#define CIW_SEL_33MHZ (ASC_CS_TYPE)0x0800
+#define CIW_IRQ_ACT (ASC_CS_TYPE)0x1000
+#define CC_CHIP_RESET (uchar)0x80
+#define CC_SCSI_RESET (uchar)0x40
+#define CC_HALT (uchar)0x20
+#define CC_SINGLE_STEP (uchar)0x10
+#define CC_DMA_ABLE (uchar)0x08
+#define CC_TEST (uchar)0x04
+#define CC_BANK_ONE (uchar)0x02
+#define CC_DIAG (uchar)0x01
+#define ASC_1000_ID0W 0x04C1
+#define ASC_1000_ID0W_FIX 0x00C1
+#define ASC_1000_ID1B 0x25
+#define ASC_EISA_BIG_IOP_GAP (0x1C30-0x0C50)
+#define ASC_EISA_SMALL_IOP_GAP (0x0020)
+#define ASC_EISA_MIN_IOP_ADDR (0x0C30)
+#define ASC_EISA_MAX_IOP_ADDR (0xFC50)
+#define ASC_EISA_REV_IOP_MASK (0x0C83)
+#define ASC_EISA_PID_IOP_MASK (0x0C80)
+#define ASC_EISA_CFG_IOP_MASK (0x0C86)
+#define ASC_GET_EISA_SLOT(iop) (PortAddr)((iop) & 0xF000)
+#define ASC_EISA_ID_740 0x01745004UL
+#define ASC_EISA_ID_750 0x01755004UL
+#define INS_HALTINT (ushort)0x6281
+#define INS_HALT (ushort)0x6280
+#define INS_SINT (ushort)0x6200
+#define INS_RFLAG_WTM (ushort)0x7380
+#define ASC_MC_SAVE_CODE_WSIZE 0x500
+#define ASC_MC_SAVE_DATA_WSIZE 0x40
+
+typedef struct asc_mc_saved {
+ ushort data[ASC_MC_SAVE_DATA_WSIZE];
+ ushort code[ASC_MC_SAVE_CODE_WSIZE];
+} ASC_MC_SAVED;
+
+#define AscGetQDoneInProgress(port) AscReadLramByte((port), ASCV_Q_DONE_IN_PROGRESS_B)
+#define AscPutQDoneInProgress(port, val) AscWriteLramByte((port), ASCV_Q_DONE_IN_PROGRESS_B, val)
+#define AscGetVarFreeQHead(port) AscReadLramWord((port), ASCV_FREE_Q_HEAD_W)
+#define AscGetVarDoneQTail(port) AscReadLramWord((port), ASCV_DONE_Q_TAIL_W)
+#define AscPutVarFreeQHead(port, val) AscWriteLramWord((port), ASCV_FREE_Q_HEAD_W, val)
+#define AscPutVarDoneQTail(port, val) AscWriteLramWord((port), ASCV_DONE_Q_TAIL_W, val)
+#define AscGetRiscVarFreeQHead(port) AscReadLramByte((port), ASCV_NEXTRDY_B)
+#define AscGetRiscVarDoneQTail(port) AscReadLramByte((port), ASCV_DONENEXT_B)
+#define AscPutRiscVarFreeQHead(port, val) AscWriteLramByte((port), ASCV_NEXTRDY_B, val)
+#define AscPutRiscVarDoneQTail(port, val) AscWriteLramByte((port), ASCV_DONENEXT_B, val)
+#define AscPutMCodeSDTRDoneAtID(port, id, data) AscWriteLramByte((port), (ushort)((ushort)ASCV_SDTR_DONE_BEG+(ushort)id), (data)) ;
+#define AscGetMCodeSDTRDoneAtID(port, id) AscReadLramByte((port), (ushort)((ushort)ASCV_SDTR_DONE_BEG+(ushort)id)) ;
+#define AscPutMCodeInitSDTRAtID(port, id, data) AscWriteLramByte((port), (ushort)((ushort)ASCV_SDTR_DATA_BEG+(ushort)id), data) ;
+#define AscGetMCodeInitSDTRAtID(port, id) AscReadLramByte((port), (ushort)((ushort)ASCV_SDTR_DATA_BEG+(ushort)id)) ;
+#define AscSynIndexToPeriod(index) (uchar)(asc_dvc->sdtr_period_tbl[ (index) ])
+#define AscGetChipSignatureByte(port) (uchar)inp((port)+IOP_SIG_BYTE)
+#define AscGetChipSignatureWord(port) (ushort)inpw((port)+IOP_SIG_WORD)
+#define AscGetChipVerNo(port) (uchar)inp((port)+IOP_VERSION)
+#define AscGetChipCfgLsw(port) (ushort)inpw((port)+IOP_CONFIG_LOW)
+#define AscGetChipCfgMsw(port) (ushort)inpw((port)+IOP_CONFIG_HIGH)
+#define AscSetChipCfgLsw(port, data) outpw((port)+IOP_CONFIG_LOW, data)
+#define AscSetChipCfgMsw(port, data) outpw((port)+IOP_CONFIG_HIGH, data)
+#define AscGetChipEEPCmd(port) (uchar)inp((port)+IOP_EEP_CMD)
+#define AscSetChipEEPCmd(port, data) outp((port)+IOP_EEP_CMD, data)
+#define AscGetChipEEPData(port) (ushort)inpw((port)+IOP_EEP_DATA)
+#define AscSetChipEEPData(port, data) outpw((port)+IOP_EEP_DATA, data)
+#define AscGetChipLramAddr(port) (ushort)inpw((PortAddr)((port)+IOP_RAM_ADDR))
+#define AscSetChipLramAddr(port, addr) outpw((PortAddr)((port)+IOP_RAM_ADDR), addr)
+#define AscGetChipLramData(port) (ushort)inpw((port)+IOP_RAM_DATA)
+#define AscSetChipLramData(port, data) outpw((port)+IOP_RAM_DATA, data)
+#define AscGetChipLramDataNoSwap(port) (ushort)inpw_noswap((port)+IOP_RAM_DATA)
+#define AscSetChipLramDataNoSwap(port, data) outpw_noswap((port)+IOP_RAM_DATA, data)
+#define AscGetChipIFC(port) (uchar)inp((port)+IOP_REG_IFC)
+#define AscSetChipIFC(port, data) outp((port)+IOP_REG_IFC, data)
+#define AscGetChipStatus(port) (ASC_CS_TYPE)inpw((port)+IOP_STATUS)
+#define AscSetChipStatus(port, cs_val) outpw((port)+IOP_STATUS, cs_val)
+#define AscGetChipControl(port) (uchar)inp((port)+IOP_CTRL)
+#define AscSetChipControl(port, cc_val) outp((port)+IOP_CTRL, cc_val)
+#define AscGetChipSyn(port) (uchar)inp((port)+IOP_SYN_OFFSET)
+#define AscSetChipSyn(port, data) outp((port)+IOP_SYN_OFFSET, data)
+#define AscSetPCAddr(port, data) outpw((port)+IOP_REG_PC, data)
+#define AscGetPCAddr(port) (ushort)inpw((port)+IOP_REG_PC)
+#define AscIsIntPending(port) (AscGetChipStatus(port) & (CSW_INT_PENDING | CSW_SCSI_RESET_LATCH))
+#define AscGetChipScsiID(port) ((AscGetChipCfgLsw(port) >> 8) & ASC_MAX_TID)
+#define AscGetExtraControl(port) (uchar)inp((port)+IOP_EXTRA_CONTROL)
+#define AscSetExtraControl(port, data) outp((port)+IOP_EXTRA_CONTROL, data)
+#define AscReadChipAX(port) (ushort)inpw((port)+IOP_REG_AX)
+#define AscWriteChipAX(port, data) outpw((port)+IOP_REG_AX, data)
+#define AscReadChipIX(port) (uchar)inp((port)+IOP_REG_IX)
+#define AscWriteChipIX(port, data) outp((port)+IOP_REG_IX, data)
+#define AscReadChipIH(port) (ushort)inpw((port)+IOP_REG_IH)
+#define AscWriteChipIH(port, data) outpw((port)+IOP_REG_IH, data)
+#define AscReadChipQP(port) (uchar)inp((port)+IOP_REG_QP)
+#define AscWriteChipQP(port, data) outp((port)+IOP_REG_QP, data)
+#define AscReadChipFIFO_L(port) (ushort)inpw((port)+IOP_REG_FIFO_L)
+#define AscWriteChipFIFO_L(port, data) outpw((port)+IOP_REG_FIFO_L, data)
+#define AscReadChipFIFO_H(port) (ushort)inpw((port)+IOP_REG_FIFO_H)
+#define AscWriteChipFIFO_H(port, data) outpw((port)+IOP_REG_FIFO_H, data)
+#define AscReadChipDmaSpeed(port) (uchar)inp((port)+IOP_DMA_SPEED)
+#define AscWriteChipDmaSpeed(port, data) outp((port)+IOP_DMA_SPEED, data)
+#define AscReadChipDA0(port) (ushort)inpw((port)+IOP_REG_DA0)
+#define AscWriteChipDA0(port) outpw((port)+IOP_REG_DA0, data)
+#define AscReadChipDA1(port) (ushort)inpw((port)+IOP_REG_DA1)
+#define AscWriteChipDA1(port) outpw((port)+IOP_REG_DA1, data)
+#define AscReadChipDC0(port) (ushort)inpw((port)+IOP_REG_DC0)
+#define AscWriteChipDC0(port) outpw((port)+IOP_REG_DC0, data)
+#define AscReadChipDC1(port) (ushort)inpw((port)+IOP_REG_DC1)
+#define AscWriteChipDC1(port) outpw((port)+IOP_REG_DC1, data)
+#define AscReadChipDvcID(port) (uchar)inp((port)+IOP_REG_ID)
+#define AscWriteChipDvcID(port, data) outp((port)+IOP_REG_ID, data)
+
+STATIC int AscWriteEEPCmdReg(PortAddr iop_base, uchar cmd_reg);
+STATIC int AscWriteEEPDataReg(PortAddr iop_base, ushort data_reg);
+STATIC void AscWaitEEPRead(void);
+STATIC void AscWaitEEPWrite(void);
+STATIC ushort AscReadEEPWord(PortAddr, uchar);
+STATIC ushort AscWriteEEPWord(PortAddr, uchar, ushort);
+STATIC ushort AscGetEEPConfig(PortAddr, ASCEEP_CONFIG *, ushort);
+STATIC int AscSetEEPConfigOnce(PortAddr, ASCEEP_CONFIG *, ushort);
+STATIC int AscSetEEPConfig(PortAddr, ASCEEP_CONFIG *, ushort);
+STATIC int AscStartChip(PortAddr);
+STATIC int AscStopChip(PortAddr);
+STATIC void AscSetChipIH(PortAddr, ushort);
+STATIC int AscIsChipHalted(PortAddr);
+STATIC void AscAckInterrupt(PortAddr);
+STATIC void AscDisableInterrupt(PortAddr);
+STATIC void AscEnableInterrupt(PortAddr);
+STATIC void AscSetBank(PortAddr, uchar);
+STATIC int AscResetChipAndScsiBus(ASC_DVC_VAR *);
+STATIC ushort AscGetIsaDmaChannel(PortAddr);
+STATIC ushort AscSetIsaDmaChannel(PortAddr, ushort);
+STATIC uchar AscSetIsaDmaSpeed(PortAddr, uchar);
+STATIC uchar AscGetIsaDmaSpeed(PortAddr);
+STATIC uchar AscReadLramByte(PortAddr, ushort);
+STATIC ushort AscReadLramWord(PortAddr, ushort);
+STATIC ulong AscReadLramDWord(PortAddr, ushort);
+STATIC void AscWriteLramWord(PortAddr, ushort, ushort);
+STATIC void AscWriteLramDWord(PortAddr, ushort, ulong);
+STATIC void AscWriteLramByte(PortAddr, ushort, uchar);
+STATIC ulong AscMemSumLramWord(PortAddr, ushort, rint);
+STATIC void AscMemWordSetLram(PortAddr, ushort, ushort, rint);
+STATIC void AscMemWordCopyToLram(PortAddr, ushort, ushort *, int);
+STATIC void AscMemDWordCopyToLram(PortAddr, ushort, ulong *, int);
+STATIC void AscMemWordCopyFromLram(PortAddr, ushort, ushort *, int);
+STATIC ushort AscInitAscDvcVar(ASC_DVC_VAR asc_ptr_type *);
+STATIC ushort AscInitFromEEP(ASC_DVC_VAR asc_ptr_type *);
+STATIC ushort AscInitFromAscDvcVar(ASC_DVC_VAR asc_ptr_type *);
+STATIC ushort AscInitMicroCodeVar(ASC_DVC_VAR asc_ptr_type * asc_dvc);
+STATIC int AscTestExternalLram(ASC_DVC_VAR asc_ptr_type *);
+STATIC uchar AscMsgOutSDTR(ASC_DVC_VAR asc_ptr_type *, uchar, uchar);
+STATIC uchar AscCalSDTRData(ASC_DVC_VAR asc_ptr_type *, uchar, uchar);
+STATIC void AscSetChipSDTR(PortAddr, uchar, uchar);
+STATIC uchar AscGetSynPeriodIndex(ASC_DVC_VAR asc_ptr_type *, ruchar);
+STATIC uchar AscAllocFreeQueue(PortAddr, uchar);
+STATIC uchar AscAllocMultipleFreeQueue(PortAddr, uchar, uchar);
+STATIC int AscRiscHaltedAbortSRB(ASC_DVC_VAR asc_ptr_type *, ulong);
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+STATIC int AscRiscHaltedAbortTIX(ASC_DVC_VAR asc_ptr_type *, uchar);
+#endif /* version >= v1.3.89 */
+STATIC int AscHostReqRiscHalt(PortAddr);
+STATIC int AscStopQueueExe(PortAddr);
+STATIC int AscStartQueueExe(PortAddr);
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+STATIC int AscCleanUpDiscQueue(PortAddr);
+#endif /* version >= v1.3.89 */
+STATIC int AscCleanUpBusyQueue(PortAddr);
+STATIC int AscWaitTixISRDone(ASC_DVC_VAR asc_ptr_type *, uchar);
+STATIC int AscWaitISRDone(ASC_DVC_VAR asc_ptr_type *);
+STATIC ulong AscGetOnePhyAddr(ASC_DVC_VAR asc_ptr_type *, uchar *,
+ ulong);
+STATIC int AscSendScsiQueue(ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ASC_SCSI_Q * scsiq,
+ uchar n_q_required);
+STATIC int AscPutReadyQueue(ASC_DVC_VAR asc_ptr_type *,
+ ASC_SCSI_Q *, uchar);
+STATIC int AscPutReadySgListQueue(ASC_DVC_VAR asc_ptr_type *,
+ ASC_SCSI_Q *, uchar);
+STATIC int AscSetChipSynRegAtID(PortAddr, uchar, uchar);
+STATIC int AscSetRunChipSynRegAtID(PortAddr, uchar, uchar);
+STATIC ushort AscInitLram(ASC_DVC_VAR asc_ptr_type *);
+STATIC int AscReInitLram(ASC_DVC_VAR asc_ptr_type *);
+STATIC ushort AscInitQLinkVar(ASC_DVC_VAR asc_ptr_type *);
+STATIC int AscSetLibErrorCode(ASC_DVC_VAR asc_ptr_type *, ushort);
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+STATIC int _AscWaitQDone(PortAddr, ASC_SCSI_Q *);
+#endif /* version >= v1.3.89 */
+STATIC int AscIsrChipHalted(ASC_DVC_VAR asc_ptr_type *);
+STATIC uchar _AscCopyLramScsiDoneQ(PortAddr, ushort,
+ ASC_QDONE_INFO *, ulong);
+STATIC int AscIsrQDone(ASC_DVC_VAR asc_ptr_type *);
+STATIC int AscCompareString(uchar *, uchar *, int);
+STATIC ushort AscGetEisaChipCfg(PortAddr);
+STATIC ulong AscGetEisaProductID(PortAddr);
+STATIC PortAddr AscSearchIOPortAddrEISA(PortAddr);
+STATIC uchar AscGetChipScsiCtrl(PortAddr);
+STATIC uchar AscSetChipScsiID(PortAddr, uchar);
+STATIC uchar AscGetChipVersion(PortAddr, ushort);
+STATIC ushort AscGetChipBusType(PortAddr);
+STATIC ulong AscLoadMicroCode(PortAddr, ushort, ushort *, ushort);
+STATIC int AscFindSignature(PortAddr);
+STATIC PortAddr AscSearchIOPortAddr11(PortAddr);
+STATIC void AscToggleIRQAct(PortAddr);
+STATIC void AscSetISAPNPWaitForKey(void);
+STATIC uchar AscGetChipIRQ(PortAddr, ushort);
+STATIC uchar AscSetChipIRQ(PortAddr, uchar, ushort);
+STATIC ushort AscGetChipBiosAddress(PortAddr, ushort);
+STATIC long DvcEnterCritical(void);
+STATIC void DvcLeaveCritical(long);
+STATIC void DvcInPortWords(PortAddr, ushort *, int);
+STATIC void DvcOutPortWords(PortAddr, ushort *, int);
+STATIC void DvcOutPortDWords(PortAddr, ulong *, int);
+STATIC uchar DvcReadPCIConfigByte(ASC_DVC_VAR asc_ptr_type *, ushort);
+STATIC void DvcWritePCIConfigByte(ASC_DVC_VAR asc_ptr_type *,
+ ushort, uchar);
+STATIC ushort AscGetChipBiosAddress(PortAddr, ushort);
+STATIC void DvcSleepMilliSecond(ulong);
+STATIC void DvcDelayNanoSecond(ASC_DVC_VAR asc_ptr_type *, ulong);
+STATIC ulong DvcGetSGList(ASC_DVC_VAR asc_ptr_type *, uchar *,
+ ulong, ASC_SG_HEAD *);
+STATIC void DvcPutScsiQ(PortAddr, ushort, ushort *, int);
+STATIC void DvcGetQinfo(PortAddr, ushort, ushort *, int);
+STATIC PortAddr AscSearchIOPortAddr(PortAddr, ushort);
+STATIC ushort AscInitGetConfig(ASC_DVC_VAR asc_ptr_type *);
+STATIC ushort AscInitSetConfig(ASC_DVC_VAR asc_ptr_type *);
+STATIC ushort AscInitAsc1000Driver(ASC_DVC_VAR asc_ptr_type *);
+STATIC void AscAsyncFix(ASC_DVC_VAR asc_ptr_type *, uchar,
+ ASC_SCSI_INQUIRY *);
+STATIC int AscTagQueuingSafe(ASC_SCSI_INQUIRY *);
+STATIC void AscInquiryHandling(ASC_DVC_VAR asc_ptr_type *,
+ uchar, ASC_SCSI_INQUIRY *);
+STATIC int AscExeScsiQueue(ASC_DVC_VAR asc_ptr_type *, ASC_SCSI_Q *);
+STATIC int AscISR(ASC_DVC_VAR asc_ptr_type *);
+STATIC uint AscGetNumOfFreeQueue(ASC_DVC_VAR asc_ptr_type *, uchar,
+ uchar);
+STATIC int AscSgListToQueue(int);
+STATIC int AscAbortSRB(ASC_DVC_VAR asc_ptr_type *, ulong);
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+STATIC int AscResetDevice(ASC_DVC_VAR asc_ptr_type *, uchar);
+#endif /* version >= v1.3.89 */
+STATIC int AscResetSB(ASC_DVC_VAR asc_ptr_type *);
+STATIC void AscEnableIsaDma(uchar);
+STATIC ulong AscGetMaxDmaCount(ushort);
+
+
+/*
+ * --- Adv Library Constants and Macros
+ */
+
+#define ADV_LIB_VERSION_MAJOR 3
+#define ADV_LIB_VERSION_MINOR 45
+
+/* d_os_dep.h */
+#define ADV_OS_LINUX
+
+/*
+ * Define Adv Library required special types.
+ */
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,0)
+#define AdvPortAddr unsigned short /* I/O Port address size */
+#else /* version >= v1,3,0 */
+#define AdvPortAddr unsigned long /* Virtual memory address size */
+#endif /* version >= v1,3,0 */
+
+/*
+ * Define Adv Library required memory access macros.
+ */
+#define ADV_MEM_READB(addr) readb(addr)
+#define ADV_MEM_READW(addr) readw(addr)
+#define ADV_MEM_WRITEB(addr, byte) writeb(byte, addr)
+#define ADV_MEM_WRITEW(addr, word) writew(word, addr)
+
+/*
+ * The I/O memory mapping function names changed in 2.1.X.
+ */
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,0)
+#define ioremap vremap
+#define iounmap vfree
+#endif /* version < v2.1.0 */
+
+/*
+ * Define total number of simultaneous maximum element scatter-gather
+ * requests, i.e. ADV_TOT_SG_LIST * ADV_MAX_SG_LIST is the total number
+ * of simultaneous scatter-gather elements supported per wide adapter.
+ */
+#define ADV_TOT_SG_LIST 64
+
+/*
+ * Define Adv Library required per request scatter-gather element limit.
+ */
+#define ADV_MAX_SG_LIST 64
+
+/*
+ * Scatter-Gather Definitions per request.
+ *
+ * Because SG block memory is allocated in virtual memory but is
+ * referenced by the microcode as physical memory, we need to do
+ * calculations to insure there will be enough physically contiguous
+ * memory to support ADV_MAX_SG_LIST SG entries.
+ */
+
+/* Number of SG blocks needed. */
+#define ADV_NUM_SG_BLOCK \
+ ((ADV_MAX_SG_LIST + (NO_OF_SG_PER_BLOCK - 1))/NO_OF_SG_PER_BLOCK)
+
+/* Total contiguous memory needed for SG blocks. */
+#define ADV_SG_TOTAL_MEM_SIZE \
+ (sizeof(ADV_SG_BLOCK) * ADV_NUM_SG_BLOCK)
+
+#define ASC_PAGE_SIZE PAGE_SIZE
+
+/*
+ * Number of page crossings possible for the total contiguous virtual memory
+ * needed for SG blocks.
+ *
+ * We need to allocate this many additional SG blocks in virtual memory to
+ * insure there will be space for ADV_NUM_SG_BLOCK physically contiguous
+ * scatter-gather blocks.
+ */
+#define ADV_NUM_PAGE_CROSSING \
+ ((ADV_SG_TOTAL_MEM_SIZE + (ASC_PAGE_SIZE - 1))/ASC_PAGE_SIZE)
+
+/*
+ * Define Adv Library Assertion Macro.
+ */
+
+#define ADV_ASSERT(a) ASC_ASSERT(a)
+
+/* a_condor.h */
+#define ADV_PCI_VENDOR_ID 0x10CD
+#define ADV_PCI_DEVICE_ID_REV_A 0x2300
+
+#define ASC_EEP_DVC_CFG_BEGIN (0x00)
+#define ASC_EEP_DVC_CFG_END (0x15)
+#define ASC_EEP_DVC_CTL_BEGIN (0x16) /* location of OEM name */
+#define ASC_EEP_MAX_WORD_ADDR (0x1E)
+
+#define ASC_EEP_DELAY_MS 100
+
+/*
+ * EEPROM bits reference by the RISC after initialization.
+ */
+#define ADV_EEPROM_BIG_ENDIAN 0x8000 /* EEPROM Bit 15 */
+#define ADV_EEPROM_BIOS_ENABLE 0x4000 /* EEPROM Bit 14 */
+#define ADV_EEPROM_TERM_POL 0x2000 /* EEPROM Bit 13 */
+
+/*
+ * EEPROM configuration format
+ *
+ * Field naming convention:
+ *
+ * *_enable indicates the field enables or disables the feature. The
+ * value is never reset.
+ *
+ * *_able indicates both whether a feature should be enabled or disabled
+ * and whether a device isi capable of the feature. At initialization
+ * this field may be set, but later if a device is found to be incapable
+ * of the feature, the field is cleared.
+ *
+ * Default values are maintained in a_init.c in the structure
+ * Default_EEPROM_Config.
+ */
+typedef struct adveep_config
+{
+ /* Word Offset, Description */
+
+ ushort cfg_lsw; /* 00 power up initialization */
+ /* bit 13 set - Term Polarity Control */
+ /* bit 14 set - BIOS Enable */
+ /* bit 15 set - Big Endian Mode */
+ ushort cfg_msw; /* 01 unused */
+ ushort disc_enable; /* 02 disconnect enable */
+ ushort wdtr_able; /* 03 Wide DTR able */
+ ushort sdtr_able; /* 04 Synchronous DTR able */
+ ushort start_motor; /* 05 send start up motor */
+ ushort tagqng_able; /* 06 tag queuing able */
+ ushort bios_scan; /* 07 BIOS device control */
+ ushort scam_tolerant; /* 08 no scam */
+
+ uchar adapter_scsi_id; /* 09 Host Adapter ID */
+ uchar bios_boot_delay; /* power up wait */
+
+ uchar scsi_reset_delay; /* 10 reset delay */
+ uchar bios_id_lun; /* first boot device scsi id & lun */
+ /* high nibble is lun */
+ /* low nibble is scsi id */
+
+ uchar termination; /* 11 0 - automatic */
+ /* 1 - low off / high off */
+ /* 2 - low off / high on */
+ /* 3 - low on / high on */
+ /* There is no low on / high off */
+
+ uchar reserved1; /* reserved byte (not used) */
+
+ ushort bios_ctrl; /* 12 BIOS control bits */
+ /* bit 0 set: BIOS don't act as initiator. */
+ /* bit 1 set: BIOS > 1 GB support */
+ /* bit 2 set: BIOS > 2 Disk Support */
+ /* bit 3 set: BIOS don't support removables */
+ /* bit 4 set: BIOS support bootable CD */
+ /* bit 5 set: */
+ /* bit 6 set: BIOS support multiple LUNs */
+ /* bit 7 set: BIOS display of message */
+ /* bit 8 set: */
+ /* bit 9 set: Reset SCSI bus during init. */
+ /* bit 10 set: */
+ /* bit 11 set: No verbose initialization. */
+ /* bit 12 set: SCSI parity enabled */
+ /* bit 13 set: */
+ /* bit 14 set: */
+ /* bit 15 set: */
+ ushort ultra_able; /* 13 ULTRA speed able */
+ ushort reserved2; /* 14 reserved */
+ uchar max_host_qng; /* 15 maximum host queuing */
+ uchar max_dvc_qng; /* maximum per device queuing */
+ ushort dvc_cntl; /* 16 control bit for driver */
+ ushort bug_fix; /* 17 control bit for bug fix */
+ ushort serial_number_word1; /* 18 Board serial number word 1 */
+ ushort serial_number_word2; /* 19 Board serial number word 2 */
+ ushort serial_number_word3; /* 20 Board serial number word 3 */
+ ushort check_sum; /* 21 EEP check sum */
+ uchar oem_name[16]; /* 22 OEM name */
+ ushort dvc_err_code; /* 30 last device driver error code */
+ ushort adv_err_code; /* 31 last uc and Adv Lib error code */
+ ushort adv_err_addr; /* 32 last uc error address */
+ ushort saved_dvc_err_code; /* 33 saved last dev. driver error code */
+ ushort saved_adv_err_code; /* 34 saved last uc and Adv Lib error code */
+ ushort saved_adv_err_addr; /* 35 saved last uc error address */
+ ushort num_of_err; /* 36 number of error */
+} ADVEEP_CONFIG;
+
+/*
+ * EEPROM Commands
+ */
+#define ASC_EEP_CMD_DONE 0x0200
+#define ASC_EEP_CMD_DONE_ERR 0x0001
+
+/* cfg_word */
+#define EEP_CFG_WORD_BIG_ENDIAN 0x8000
+
+/* bios_ctrl */
+#define BIOS_CTRL_BIOS 0x0001
+#define BIOS_CTRL_EXTENDED_XLAT 0x0002
+#define BIOS_CTRL_GT_2_DISK 0x0004
+#define BIOS_CTRL_BIOS_REMOVABLE 0x0008
+#define BIOS_CTRL_BOOTABLE_CD 0x0010
+#define BIOS_CTRL_MULTIPLE_LUN 0x0040
+#define BIOS_CTRL_DISPLAY_MSG 0x0080
+#define BIOS_CTRL_NO_SCAM 0x0100
+#define BIOS_CTRL_RESET_SCSI_BUS 0x0200
+#define BIOS_CTRL_INIT_VERBOSE 0x0800
+#define BIOS_CTRL_SCSI_PARITY 0x1000
+
+/*
+ * ASC 3550 Internal Memory Size - 8KB
+ */
+#define ADV_CONDOR_MEMSIZE 0x2000 /* 8 KB Internal Memory */
+
+/*
+ * ASC 3550 I/O Length - 64 bytes
+ */
+#define ADV_CONDOR_IOLEN 0x40 /* I/O Port Range in bytes */
+
+/*
+ * Byte I/O register address from base of 'iop_base'.
+ */
+#define IOPB_INTR_STATUS_REG 0x00
+#define IOPB_CHIP_ID_1 0x01
+#define IOPB_INTR_ENABLES 0x02
+#define IOPB_CHIP_TYPE_REV 0x03
+#define IOPB_RES_ADDR_4 0x04
+#define IOPB_RES_ADDR_5 0x05
+#define IOPB_RAM_DATA 0x06
+#define IOPB_RES_ADDR_7 0x07
+#define IOPB_FLAG_REG 0x08
+#define IOPB_RES_ADDR_9 0x09
+#define IOPB_RISC_CSR 0x0A
+#define IOPB_RES_ADDR_B 0x0B
+#define IOPB_RES_ADDR_C 0x0C
+#define IOPB_RES_ADDR_D 0x0D
+#define IOPB_RES_ADDR_E 0x0E
+#define IOPB_RES_ADDR_F 0x0F
+#define IOPB_MEM_CFG 0x10
+#define IOPB_RES_ADDR_11 0x11
+#define IOPB_RES_ADDR_12 0x12
+#define IOPB_RES_ADDR_13 0x13
+#define IOPB_FLASH_PAGE 0x14
+#define IOPB_RES_ADDR_15 0x15
+#define IOPB_RES_ADDR_16 0x16
+#define IOPB_RES_ADDR_17 0x17
+#define IOPB_FLASH_DATA 0x18
+#define IOPB_RES_ADDR_19 0x19
+#define IOPB_RES_ADDR_1A 0x1A
+#define IOPB_RES_ADDR_1B 0x1B
+#define IOPB_RES_ADDR_1C 0x1C
+#define IOPB_RES_ADDR_1D 0x1D
+#define IOPB_RES_ADDR_1E 0x1E
+#define IOPB_RES_ADDR_1F 0x1F
+#define IOPB_DMA_CFG0 0x20
+#define IOPB_DMA_CFG1 0x21
+#define IOPB_TICKLE 0x22
+#define IOPB_DMA_REG_WR 0x23
+#define IOPB_SDMA_STATUS 0x24
+#define IOPB_SCSI_BYTE_CNT 0x25
+#define IOPB_HOST_BYTE_CNT 0x26
+#define IOPB_BYTE_LEFT_TO_XFER 0x27
+#define IOPB_BYTE_TO_XFER_0 0x28
+#define IOPB_BYTE_TO_XFER_1 0x29
+#define IOPB_BYTE_TO_XFER_2 0x2A
+#define IOPB_BYTE_TO_XFER_3 0x2B
+#define IOPB_ACC_GRP 0x2C
+#define IOPB_RES_ADDR_2D 0x2D
+#define IOPB_DEV_ID 0x2E
+#define IOPB_RES_ADDR_2F 0x2F
+#define IOPB_SCSI_DATA 0x30
+#define IOPB_RES_ADDR_31 0x31
+#define IOPB_RES_ADDR_32 0x32
+#define IOPB_SCSI_DATA_HSHK 0x33
+#define IOPB_SCSI_CTRL 0x34
+#define IOPB_RES_ADDR_35 0x35
+#define IOPB_RES_ADDR_36 0x36
+#define IOPB_RES_ADDR_37 0x37
+#define IOPB_RES_ADDR_38 0x38
+#define IOPB_RES_ADDR_39 0x39
+#define IOPB_RES_ADDR_3A 0x3A
+#define IOPB_RES_ADDR_3B 0x3B
+#define IOPB_RFIFO_CNT 0x3C
+#define IOPB_RES_ADDR_3D 0x3D
+#define IOPB_RES_ADDR_3E 0x3E
+#define IOPB_RES_ADDR_3F 0x3F
+
+/*
+ * Word I/O register address from base of 'iop_base'.
+ */
+#define IOPW_CHIP_ID_0 0x00 /* CID0 */
+#define IOPW_CTRL_REG 0x02 /* CC */
+#define IOPW_RAM_ADDR 0x04 /* LA */
+#define IOPW_RAM_DATA 0x06 /* LD */
+#define IOPW_RES_ADDR_08 0x08
+#define IOPW_RISC_CSR 0x0A /* CSR */
+#define IOPW_SCSI_CFG0 0x0C /* CFG0 */
+#define IOPW_SCSI_CFG1 0x0E /* CFG1 */
+#define IOPW_RES_ADDR_10 0x10
+#define IOPW_SEL_MASK 0x12 /* SM */
+#define IOPW_RES_ADDR_14 0x14
+#define IOPW_FLASH_ADDR 0x16 /* FA */
+#define IOPW_RES_ADDR_18 0x18
+#define IOPW_EE_CMD 0x1A /* EC */
+#define IOPW_EE_DATA 0x1C /* ED */
+#define IOPW_SFIFO_CNT 0x1E /* SFC */
+#define IOPW_RES_ADDR_20 0x20
+#define IOPW_Q_BASE 0x22 /* QB */
+#define IOPW_QP 0x24 /* QP */
+#define IOPW_IX 0x26 /* IX */
+#define IOPW_SP 0x28 /* SP */
+#define IOPW_PC 0x2A /* PC */
+#define IOPW_RES_ADDR_2C 0x2C
+#define IOPW_RES_ADDR_2E 0x2E
+#define IOPW_SCSI_DATA 0x30 /* SD */
+#define IOPW_SCSI_DATA_HSHK 0x32 /* SDH */
+#define IOPW_SCSI_CTRL 0x34 /* SC */
+#define IOPW_HSHK_CFG 0x36 /* HCFG */
+#define IOPW_SXFR_STATUS 0x36 /* SXS */
+#define IOPW_SXFR_CNTL 0x38 /* SXL */
+#define IOPW_SXFR_CNTH 0x3A /* SXH */
+#define IOPW_RES_ADDR_3C 0x3C
+#define IOPW_RFIFO_DATA 0x3E /* RFD */
+
+/*
+ * Doubleword I/O register address from base of 'iop_base'.
+ */
+#define IOPDW_RES_ADDR_0 0x00
+#define IOPDW_RAM_DATA 0x04
+#define IOPDW_RES_ADDR_8 0x08
+#define IOPDW_RES_ADDR_C 0x0C
+#define IOPDW_RES_ADDR_10 0x10
+#define IOPDW_RES_ADDR_14 0x14
+#define IOPDW_RES_ADDR_18 0x18
+#define IOPDW_RES_ADDR_1C 0x1C
+#define IOPDW_SDMA_ADDR0 0x20
+#define IOPDW_SDMA_ADDR1 0x24
+#define IOPDW_SDMA_COUNT 0x28
+#define IOPDW_SDMA_ERROR 0x2C
+#define IOPDW_RDMA_ADDR0 0x30
+#define IOPDW_RDMA_ADDR1 0x34
+#define IOPDW_RDMA_COUNT 0x38
+#define IOPDW_RDMA_ERROR 0x3C
+
+#define ADV_CHIP_ID_BYTE 0x25
+#define ADV_CHIP_ID_WORD 0x04C1
+
+#define ADV_SC_SCSI_BUS_RESET 0x2000
+
+#define ADV_INTR_ENABLE_HOST_INTR 0x01
+#define ADV_INTR_ENABLE_SEL_INTR 0x02
+#define ADV_INTR_ENABLE_DPR_INTR 0x04
+#define ADV_INTR_ENABLE_RTA_INTR 0x08
+#define ADV_INTR_ENABLE_RMA_INTR 0x10
+#define ADV_INTR_ENABLE_RST_INTR 0x20
+#define ADV_INTR_ENABLE_DPE_INTR 0x40
+#define ADV_INTR_ENABLE_GLOBAL_INTR 0x80
+
+#define ADV_INTR_STATUS_INTRA 0x01
+#define ADV_INTR_STATUS_INTRB 0x02
+#define ADV_INTR_STATUS_INTRC 0x04
+
+#define ADV_RISC_CSR_STOP (0x0000)
+#define ADV_RISC_TEST_COND (0x2000)
+#define ADV_RISC_CSR_RUN (0x4000)
+#define ADV_RISC_CSR_SINGLE_STEP (0x8000)
+
+#define ADV_CTRL_REG_HOST_INTR 0x0100
+#define ADV_CTRL_REG_SEL_INTR 0x0200
+#define ADV_CTRL_REG_DPR_INTR 0x0400
+#define ADV_CTRL_REG_RTA_INTR 0x0800
+#define ADV_CTRL_REG_RMA_INTR 0x1000
+#define ADV_CTRL_REG_RES_BIT14 0x2000
+#define ADV_CTRL_REG_DPE_INTR 0x4000
+#define ADV_CTRL_REG_POWER_DONE 0x8000
+#define ADV_CTRL_REG_ANY_INTR 0xFF00
+
+#define ADV_CTRL_REG_CMD_RESET 0x00C6
+#define ADV_CTRL_REG_CMD_WR_IO_REG 0x00C5
+#define ADV_CTRL_REG_CMD_RD_IO_REG 0x00C4
+#define ADV_CTRL_REG_CMD_WR_PCI_CFG_SPACE 0x00C3
+#define ADV_CTRL_REG_CMD_RD_PCI_CFG_SPACE 0x00C2
+
+#define ADV_SCSI_CTRL_RSTOUT 0x2000
+
+#define AdvIsIntPending(port) \
+ (AdvReadWordRegister(port, IOPW_CTRL_REG) & ADV_CTRL_REG_HOST_INTR)
+
+/*
+ * SCSI_CFG0 Register bit definitions
+ */
+#define TIMER_MODEAB 0xC000 /* Watchdog, Second, and Select. Timer Ctrl. */
+#define PARITY_EN 0x2000 /* Enable SCSI Parity Error detection */
+#define EVEN_PARITY 0x1000 /* Select Even Parity */
+#define WD_LONG 0x0800 /* Watchdog Interval, 1: 57 min, 0: 13 sec */
+#define QUEUE_128 0x0400 /* Queue Size, 1: 128 byte, 0: 64 byte */
+#define PRIM_MODE 0x0100 /* Primitive SCSI mode */
+#define SCAM_EN 0x0080 /* Enable SCAM selection */
+#define SEL_TMO_LONG 0x0040 /* Sel/Resel Timeout, 1: 400 ms, 0: 1.6 ms */
+#define CFRM_ID 0x0020 /* SCAM id sel. confirm., 1: fast, 0: 6.4 ms */
+#define OUR_ID_EN 0x0010 /* Enable OUR_ID bits */
+#define OUR_ID 0x000F /* SCSI ID */
+
+/*
+ * SCSI_CFG1 Register bit definitions
+ */
+#define BIG_ENDIAN 0x8000 /* Enable Big Endian Mode MIO:15, EEP:15 */
+#define TERM_POL 0x2000 /* Terminator Polarity Ctrl. MIO:13, EEP:13 */
+#define SLEW_RATE 0x1000 /* SCSI output buffer slew rate */
+#define FILTER_SEL 0x0C00 /* Filter Period Selection */
+#define FLTR_DISABLE 0x0000 /* Input Filtering Disabled */
+#define FLTR_11_TO_20NS 0x0800 /* Input Filtering 11ns to 20ns */
+#define FLTR_21_TO_39NS 0x0C00 /* Input Filtering 21ns to 39ns */
+#define ACTIVE_DBL 0x0200 /* Disable Active Negation */
+#define DIFF_MODE 0x0100 /* SCSI differential Mode (Read-Only) */
+#define DIFF_SENSE 0x0080 /* 1: No SE cables, 0: SE cable (Read-Only) */
+#define TERM_CTL_SEL 0x0040 /* Enable TERM_CTL_H and TERM_CTL_L */
+#define TERM_CTL 0x0030 /* External SCSI Termination Bits */
+#define TERM_CTL_H 0x0020 /* Enable External SCSI Upper Termination */
+#define TERM_CTL_L 0x0010 /* Enable External SCSI Lower Termination */
+#define CABLE_DETECT 0x000F /* External SCSI Cable Connection Status */
+
+#define CABLE_ILLEGAL_A 0x7
+ /* x 0 0 0 | on on | Illegal (all 3 connectors are used) */
+
+#define CABLE_ILLEGAL_B 0xB
+ /* 0 x 0 0 | on on | Illegal (all 3 connectors are used) */
+
+/*
+ The following table details the SCSI_CFG1 Termination Polarity,
+ Termination Control and Cable Detect bits.
+
+ Cable Detect | Termination
+ Bit 3 2 1 0 | 5 4 | Notes
+ _____________|________|____________________
+ 1 1 1 0 | on on | Internal wide only
+ 1 1 0 1 | on on | Internal narrow only
+ 1 0 1 1 | on on | External narrow only
+ 0 x 1 1 | on on | External wide only
+ 1 1 0 0 | on off| Internal wide and internal narrow
+ 1 0 1 0 | on off| Internal wide and external narrow
+ 0 x 1 0 | off off| Internal wide and external wide
+ 1 0 0 1 | on off| Internal narrow and external narrow
+ 0 x 0 1 | on off| Internal narrow and external wide
+ 1 1 1 1 | on on | No devices are attached
+ x 0 0 0 | on on | Illegal (all 3 connectors are used)
+ 0 x 0 0 | on on | Illegal (all 3 connectors are used)
+
+ x means don't-care (either '0' or '1')
+
+ If term_pol (bit 13) is '0' (active-low terminator enable), then:
+ 'on' is '0' and 'off' is '1'.
+
+ If term_pol bit is '1' (meaning active-hi terminator enable), then:
+ 'on' is '1' and 'off' is '0'.
+ */
+
+/*
+ * MEM_CFG Register bit definitions
+ */
+#define BIOS_EN 0x40 /* BIOS Enable MIO:14,EEP:14 */
+#define FAST_EE_CLK 0x20 /* Diagnostic Bit */
+#define RAM_SZ 0x1C /* Specify size of RAM to RISC */
+#define RAM_SZ_2KB 0x00 /* 2 KB */
+#define RAM_SZ_4KB 0x04 /* 4 KB */
+#define RAM_SZ_8KB 0x08 /* 8 KB */
+#define RAM_SZ_16KB 0x0C /* 16 KB */
+#define RAM_SZ_32KB 0x10 /* 32 KB */
+#define RAM_SZ_64KB 0x14 /* 64 KB */
+
+/*
+ * DMA_CFG0 Register bit definitions
+ *
+ * This register is only accessible to the host.
+ */
+#define BC_THRESH_ENB 0x80 /* PCI DMA Start Conditions */
+#define FIFO_THRESH 0x70 /* PCI DMA FIFO Threshold */
+#define FIFO_THRESH_16B 0x00 /* 16 bytes */
+#define FIFO_THRESH_32B 0x20 /* 32 bytes */
+#define FIFO_THRESH_48B 0x30 /* 48 bytes */
+#define FIFO_THRESH_64B 0x40 /* 64 bytes */
+#define FIFO_THRESH_80B 0x50 /* 80 bytes (default) */
+#define FIFO_THRESH_96B 0x60 /* 96 bytes */
+#define FIFO_THRESH_112B 0x70 /* 112 bytes */
+#define START_CTL 0x0C /* DMA start conditions */
+#define START_CTL_TH 0x00 /* Wait threshold level (default) */
+#define START_CTL_ID 0x04 /* Wait SDMA/SBUS idle */
+#define START_CTL_THID 0x08 /* Wait threshold and SDMA/SBUS idle */
+#define START_CTL_EMFU 0x0C /* Wait SDMA FIFO empty/full */
+#define READ_CMD 0x03 /* Memory Read Method */
+#define READ_CMD_MR 0x00 /* Memory Read */
+#define READ_CMD_MRL 0x02 /* Memory Read Long */
+#define READ_CMD_MRM 0x03 /* Memory Read Multiple (default) */
+
+/* a_advlib.h */
+
+/*
+ * Adv Library Status Definitions
+ */
+#define ADV_TRUE 1
+#define ADV_FALSE 0
+#define ADV_NOERROR 1
+#define ADV_SUCCESS 1
+#define ADV_BUSY 0
+#define ADV_ERROR (-1)
+
+
+/*
+ * ASC_DVC_VAR 'warn_code' values
+ */
+#define ASC_WARN_EEPROM_CHKSUM 0x0002 /* EEP check sum error */
+#define ASC_WARN_EEPROM_TERMINATION 0x0004 /* EEP termination bad field */
+#define ASC_WARN_SET_PCI_CONFIG_SPACE 0x0080 /* PCI config space set error */
+#define ASC_WARN_ERROR 0xFFFF /* ADV_ERROR return */
+
+#define ADV_MAX_TID 15 /* max. target identifier */
+#define ADV_MAX_LUN 7 /* max. logical unit number */
+
+
+/*
+ * AscInitGetConfig() and AscInitAsc1000Driver() Definitions
+ *
+ * Error code values are set in ASC_DVC_VAR 'err_code'.
+ */
+#define ASC_IERR_WRITE_EEPROM 0x0001 /* write EEPROM error */
+#define ASC_IERR_MCODE_CHKSUM 0x0002 /* micro code check sum error */
+#define ASC_IERR_START_STOP_CHIP 0x0008 /* start/stop chip failed */
+#define ASC_IERR_CHIP_VERSION 0x0040 /* wrong chip version */
+#define ASC_IERR_SET_SCSI_ID 0x0080 /* set SCSI ID failed */
+#define ASC_IERR_BAD_SIGNATURE 0x0200 /* signature not found */
+#define ASC_IERR_ILLEGAL_CONNECTION 0x0400 /* Illegal cable connection */
+#define ASC_IERR_SINGLE_END_DEVICE 0x0800 /* Single-end used w/differential */
+#define ASC_IERR_REVERSED_CABLE 0x1000 /* Narrow flat cable reversed */
+#define ASC_IERR_RW_LRAM 0x8000 /* read/write local RAM error */
+
+/*
+ * Fixed locations of microcode operating variables.
+ */
+#define ASC_MC_CODE_BEGIN_ADDR 0x0028 /* microcode start address */
+#define ASC_MC_CODE_END_ADDR 0x002A /* microcode end address */
+#define ASC_MC_CODE_CHK_SUM 0x002C /* microcode code checksum */
+#define ASC_MC_STACK_BEGIN 0x002E /* microcode stack begin */
+#define ASC_MC_STACK_END 0x0030 /* microcode stack end */
+#define ASC_MC_VERSION_DATE 0x0038 /* microcode version */
+#define ASC_MC_VERSION_NUM 0x003A /* microcode number */
+#define ASCV_VER_SERIAL_W 0x003C /* used in dos_init */
+#define ASC_MC_BIOSMEM 0x0040 /* BIOS RISC Memory Start */
+#define ASC_MC_BIOSLEN 0x0050 /* BIOS RISC Memory Length */
+#define ASC_MC_HALTCODE 0x0094 /* microcode halt code */
+#define ASC_MC_CALLERPC 0x0096 /* microcode halt caller PC */
+#define ASC_MC_ADAPTER_SCSI_ID 0x0098 /* one ID byte + reserved */
+#define ASC_MC_ULTRA_ABLE 0x009C
+#define ASC_MC_SDTR_ABLE 0x009E
+#define ASC_MC_TAGQNG_ABLE 0x00A0
+#define ASC_MC_DISC_ENABLE 0x00A2
+#define ASC_MC_IDLE_CMD 0x00A6
+#define ASC_MC_IDLE_PARA_STAT 0x00A8
+#define ASC_MC_DEFAULT_SCSI_CFG0 0x00AC
+#define ASC_MC_DEFAULT_SCSI_CFG1 0x00AE
+#define ASC_MC_DEFAULT_MEM_CFG 0x00B0
+#define ASC_MC_DEFAULT_SEL_MASK 0x00B2
+#define ASC_MC_RISC_NEXT_READY 0x00B4
+#define ASC_MC_RISC_NEXT_DONE 0x00B5
+#define ASC_MC_SDTR_DONE 0x00B6
+#define ASC_MC_NUMBER_OF_QUEUED_CMD 0x00C0
+#define ASC_MC_NUMBER_OF_MAX_CMD 0x00D0
+#define ASC_MC_DEVICE_HSHK_CFG_TABLE 0x0100
+#define ASC_MC_WDTR_ABLE 0x0120 /* Wide Transfer TID bitmask. */
+#define ASC_MC_CONTROL_FLAG 0x0122 /* Microcode control flag. */
+#define ASC_MC_WDTR_DONE 0x0124
+#define ASC_MC_HOST_NEXT_READY 0x0128 /* Host Next Ready RQL Entry. */
+#define ASC_MC_HOST_NEXT_DONE 0x0129 /* Host Next Done RQL Entry. */
+
+/*
+ * BIOS LRAM variable absolute offsets.
+ */
+#define BIOS_CODESEG 0x54
+#define BIOS_CODELEN 0x56
+#define BIOS_SIGNATURE 0x58
+#define BIOS_VERSION 0x5A
+#define BIOS_SIGNATURE 0x58
+
+/*
+ * Microcode Control Flags
+ *
+ * Flags set by the Adv Library in RISC variable 'control_flag' (0x122)
+ * and handled by the microcode.
+ */
+#define CONTROL_FLAG_IGNORE_PERR 0x0001 /* Ignore DMA Parity Errors */
+
+/*
+ * ASC_MC_DEVICE_HSHK_CFG_TABLE microcode table or HSHK_CFG register format
+ */
+#define HSHK_CFG_WIDE_XFR 0x8000
+#define HSHK_CFG_RATE 0x0F00
+#define HSHK_CFG_OFFSET 0x001F
+
+/*
+ * LRAM RISC Queue Lists (LRAM addresses 0x1200 - 0x19FF)
+ *
+ * Each of the 255 Adv Library/Microcode RISC queue lists or mailboxes
+ * starting at LRAM address 0x1200 is 8 bytes and has the following
+ * structure. Only 253 of these are actually used for command queues.
+ */
+
+#define ASC_MC_RISC_Q_LIST_BASE 0x1200
+#define ASC_MC_RISC_Q_LIST_SIZE 0x0008
+#define ASC_MC_RISC_Q_TOTAL_CNT 0x00FF /* Num. queue slots in LRAM. */
+#define ASC_MC_RISC_Q_FIRST 0x0001
+#define ASC_MC_RISC_Q_LAST 0x00FF
+
+#define ASC_DEF_MAX_HOST_QNG 0xFD /* Max. number of host commands (253) */
+#define ASC_DEF_MIN_HOST_QNG 0x10 /* Min. number of host commands (16) */
+#define ASC_DEF_MAX_DVC_QNG 0x3F /* Max. number commands per device (63) */
+#define ASC_DEF_MIN_DVC_QNG 0x04 /* Min. number commands per device (4) */
+
+/* RISC Queue List structure - 8 bytes */
+#define RQL_FWD 0 /* forward pointer (1 byte) */
+#define RQL_BWD 1 /* backward pointer (1 byte) */
+#define RQL_STATE 2 /* state byte - free, ready, done, aborted (1 byte) */
+#define RQL_TID 3 /* request target id (1 byte) */
+#define RQL_PHYADDR 4 /* request physical pointer (4 bytes) */
+
+/* RISC Queue List state values */
+#define ASC_MC_QS_FREE 0x00
+#define ASC_MC_QS_READY 0x01
+#define ASC_MC_QS_DONE 0x40
+#define ASC_MC_QS_ABORTED 0x80
+
+/* RISC Queue List pointer values */
+#define ASC_MC_NULL_Q 0x00 /* NULL_Q == 0 */
+#define ASC_MC_BIOS_Q 0xFF /* BIOS_Q = 255 */
+
+/* ASC_SCSI_REQ_Q 'cntl' field values */
+#define ASC_MC_QC_START_MOTOR 0x02 /* Issue start motor. */
+#define ASC_MC_QC_NO_OVERRUN 0x04 /* Don't report overrun. */
+#define ASC_MC_QC_FIRST_DMA 0x08 /* Internal microcode flag. */
+#define ASC_MC_QC_ABORTED 0x10 /* Request aborted by host. */
+#define ASC_MC_QC_REQ_SENSE 0x20 /* Auto-Request Sense. */
+#define ASC_MC_QC_DOS_REQ 0x80 /* Request issued by DOS. */
+
+
+/*
+ * ASC_SCSI_REQ_Q 'a_flag' definitions
+ *
+ * The Adv Library should limit use to the lower nibble (4 bits) of
+ * a_flag. Drivers are free to use the upper nibble (4 bits) of a_flag.
+ */
+#define ADV_POLL_REQUEST 0x01 /* poll for request completion */
+#define ADV_SCSIQ_DONE 0x02 /* request done */
+
+/*
+ * Adapter temporary configuration structure
+ *
+ * This structure can be discarded after initialization. Don't add
+ * fields here needed after initialization.
+ *
+ * Field naming convention:
+ *
+ * *_enable indicates the field enables or disables a feature. The
+ * value of the field is never reset.
+ */
+typedef struct adv_dvc_cfg {
+ ushort disc_enable; /* enable disconnection */
+ uchar chip_version; /* chip version */
+ uchar termination; /* Term. Ctrl. bits 6-5 of SCSI_CFG1 register */
+ ushort pci_device_id; /* PCI device code number */
+ ushort lib_version; /* Adv Library version number */
+ ushort control_flag; /* Microcode Control Flag */
+ ushort mcode_date; /* Microcode date */
+ ushort mcode_version; /* Microcode version */
+ ushort pci_slot_info; /* high byte device/function number */
+ /* bits 7-3 device num., bits 2-0 function num. */
+ /* low byte bus num. */
+ ushort bios_boot_wait; /* BIOS boot time delay */
+ ushort serial1; /* EEPROM serial number word 1 */
+ ushort serial2; /* EEPROM serial number word 2 */
+ ushort serial3; /* EEPROM serial number word 3 */
+} ADV_DVC_CFG;
+
+/*
+ * Adapter operation variable structure.
+ *
+ * One structure is required per host adapter.
+ *
+ * Field naming convention:
+ *
+ * *_able indicates both whether a feature should be enabled or disabled
+ * and whether a device isi capable of the feature. At initialization
+ * this field may be set, but later if a device is found to be incapable
+ * of the feature, the field is cleared.
+ */
+typedef struct adv_dvc_var {
+ AdvPortAddr iop_base; /* I/O port address */
+ ushort err_code; /* fatal error code */
+ ushort bios_ctrl; /* BIOS control word, EEPROM word 12 */
+ Ptr2Func isr_callback; /* pointer to function, called in AdvISR() */
+ Ptr2Func sbreset_callback; /* pointer to function, called in AdvISR() */
+ ushort wdtr_able; /* try WDTR for a device */
+ ushort sdtr_able; /* try SDTR for a device */
+ ushort ultra_able; /* try SDTR Ultra speed for a device */
+ ushort tagqng_able; /* try tagged queuing with a device */
+ uchar max_dvc_qng; /* maximum number of tagged commands per device */
+ ushort start_motor; /* start motor command allowed */
+ uchar scsi_reset_wait; /* delay in seconds after scsi bus reset */
+ uchar chip_no; /* should be assigned by caller */
+ uchar max_host_qng; /* maximum number of Q'ed command allowed */
+ uchar cur_host_qng; /* total number of queue command */
+ uchar irq_no; /* IRQ number */
+ ushort no_scam; /* scam_tolerant of EEPROM */
+ ushort idle_cmd_done; /* microcode idle command done set by AdvISR() */
+ ulong drv_ptr; /* driver pointer to private structure */
+ uchar chip_scsi_id; /* chip SCSI target ID */
+ /*
+ * Note: The following fields will not be used after initialization. The
+ * driver may discard the buffer after initialization is done.
+ */
+ ADV_DVC_CFG *cfg; /* temporary configuration structure */
+} ADV_DVC_VAR;
+
+#define NO_OF_SG_PER_BLOCK 15
+
+typedef struct asc_sg_block {
+ uchar reserved1;
+ uchar reserved2;
+ uchar first_entry_no; /* starting entry number */
+ uchar last_entry_no; /* last entry number */
+ struct asc_sg_block *sg_ptr; /* links to the next sg block */
+ struct {
+ ulong sg_addr; /* SG element address */
+ ulong sg_count; /* SG element count */
+ } sg_list[NO_OF_SG_PER_BLOCK];
+} ADV_SG_BLOCK;
+
+/*
+ * ASC_SCSI_REQ_Q - microcode request structure
+ *
+ * All fields in this structure up to byte 60 are used by the microcode.
+ * The microcode makes assumptions about the size and ordering of fields
+ * in this structure. Do not change the structure definition here without
+ * coordinating the change with the microcode.
+ */
+typedef struct adv_scsi_req_q {
+ uchar cntl; /* Ucode flags and state (ASC_MC_QC_*). */
+ uchar sg_entry_cnt; /* SG element count. Zero for no SG. */
+ uchar target_id; /* Device target identifier. */
+ uchar target_lun; /* Device target logical unit number. */
+ ulong data_addr; /* Data buffer physical address. */
+ ulong data_cnt; /* Data count. Ucode sets to residual. */
+ ulong sense_addr; /* Sense buffer physical address. */
+ ulong srb_ptr; /* Driver request pointer. */
+ uchar a_flag; /* Adv Library flag field. */
+ uchar sense_len; /* Auto-sense length. Ucode sets to residual. */
+ uchar cdb_len; /* SCSI CDB length. */
+ uchar tag_code; /* SCSI-2 Tag Queue Code: 00, 20-22. */
+ uchar done_status; /* Completion status. */
+ uchar scsi_status; /* SCSI status byte. */
+ uchar host_status; /* Ucode host status. */
+ uchar ux_sg_ix; /* Ucode working SG variable. */
+ uchar cdb[12]; /* SCSI command block. */
+ ulong sg_real_addr; /* SG list physical address. */
+ struct adv_scsi_req_q *free_scsiq_link;
+ ulong ux_wk_data_cnt; /* Saved data count at disconnection. */
+ struct adv_scsi_req_q *scsiq_ptr;
+ ADV_SG_BLOCK *sg_list_ptr; /* SG list virtual address. */
+ /*
+ * End of microcode structure - 60 bytes. The rest of the structure
+ * is used by the Adv Library and ignored by the microcode.
+ */
+ ulong vsense_addr; /* Sense buffer virtual address. */
+ ulong vdata_addr; /* Data buffer virtual address. */
+ uchar orig_sense_len; /* Original length of sense buffer. */
+} ADV_SCSI_REQ_Q; /* BIOS - 70 bytes, DOS - 76 bytes, W95, WNT - 69 bytes */
+
+/*
+ * Microcode idle loop commands
+ */
+#define IDLE_CMD_COMPLETED 0
+#define IDLE_CMD_STOP_CHIP 0x0001
+#define IDLE_CMD_STOP_CHIP_SEND_INT 0x0002
+#define IDLE_CMD_SEND_INT 0x0004
+#define IDLE_CMD_ABORT 0x0008
+#define IDLE_CMD_DEVICE_RESET 0x0010
+#define IDLE_CMD_SCSI_RESET 0x0020
+
+/*
+ * AdvSendIdleCmd() flag definitions.
+ */
+#define ADV_NOWAIT 0x01
+
+/*
+ * Wait loop time out values.
+ */
+#define SCSI_WAIT_10_SEC 10 /* 10 seconds */
+#define SCSI_MS_PER_SEC 1000 /* milliseconds per second */
+
+/*
+ * Device drivers must define the following functions.
+ */
+STATIC long DvcEnterCritical(void);
+STATIC void DvcLeaveCritical(long);
+STATIC void DvcSleepMilliSecond(ulong);
+STATIC uchar DvcAdvReadPCIConfigByte(ADV_DVC_VAR *, ushort);
+STATIC void DvcAdvWritePCIConfigByte(ADV_DVC_VAR *, ushort, uchar);
+STATIC ulong DvcGetPhyAddr(ADV_DVC_VAR *, ADV_SCSI_REQ_Q *,
+ uchar *, long *, int);
+STATIC void DvcDelayMicroSecond(ADV_DVC_VAR *, ushort);
+
+/*
+ * Adv Library functions available to drivers.
+ */
+STATIC int AdvExeScsiQueue(ADV_DVC_VAR *,
+ ADV_SCSI_REQ_Q *);
+STATIC int AdvISR(ADV_DVC_VAR *);
+STATIC int AdvInitGetConfig(ADV_DVC_VAR *);
+STATIC int AdvInitAsc3550Driver(ADV_DVC_VAR *);
+STATIC int AdvResetSB(ADV_DVC_VAR *);
+
+/*
+ * Internal Adv Library functions.
+ */
+STATIC int AdvSendIdleCmd(ADV_DVC_VAR *, ushort, ulong, int);
+STATIC void AdvResetChip(ADV_DVC_VAR *);
+STATIC int AdvSendScsiCmd(ADV_DVC_VAR *, ADV_SCSI_REQ_Q *);
+STATIC void AdvInquiryHandling(ADV_DVC_VAR *, ADV_SCSI_REQ_Q *);
+STATIC int AdvInitFromEEP(ADV_DVC_VAR *);
+STATIC ushort AdvGetEEPConfig(AdvPortAddr, ADVEEP_CONFIG *);
+STATIC void AdvSetEEPConfig(AdvPortAddr, ADVEEP_CONFIG *);
+STATIC void AdvWaitEEPCmd(AdvPortAddr);
+STATIC ushort AdvReadEEPWord(AdvPortAddr, int);
+STATIC void AdvResetSCSIBus(ADV_DVC_VAR *);
+
+/*
+ * PCI Bus Definitions
+ */
+#define AscPCICmdRegBits_BusMastering 0x0007
+#define AscPCICmdRegBits_ParErrRespCtrl 0x0040
+
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,0)
+
+/* Read byte from a register. */
+#define AdvReadByteRegister(iop_base, reg_off) \
+ (inp((iop_base) + (reg_off)))
+
+/* Write byte to a register. */
+#define AdvWriteByteRegister(iop_base, reg_off, byte) \
+ (outp((iop_base) + (reg_off), (byte)))
+
+/* Read word (2 bytes) from a register. */
+#define AdvReadWordRegister(iop_base, reg_off) \
+ (inpw((iop_base) + (reg_off)))
+
+/* Write word (2 bytes) to a register. */
+#define AdvWriteWordRegister(iop_base, reg_off, word) \
+ (outpw((iop_base) + (reg_off), (word)))
+
+/* Read byte from LRAM. */
+#define AdvReadByteLram(iop_base, addr, byte) \
+do { \
+ outpw((iop_base) + IOPW_RAM_ADDR, (addr)); \
+ (byte) = inp((iop_base) + IOPB_RAM_DATA); \
+} while (0)
+
+/* Write byte to LRAM. */
+#define AdvWriteByteLram(iop_base, addr, byte) \
+ (outpw((iop_base) + IOPW_RAM_ADDR, (addr)), \
+ outp((iop_base) + IOPB_RAM_DATA, (byte)))
+
+/* Read word (2 bytes) from LRAM. */
+#define AdvReadWordLram(iop_base, addr, word) \
+do { \
+ outpw((iop_base) + IOPW_RAM_ADDR, (addr)); \
+ (word) = inpw((iop_base) + IOPW_RAM_DATA); \
+} while (0)
+
+/* Write word (2 bytes) to LRAM. */
+#define AdvWriteWordLram(iop_base, addr, word) \
+ (outpw((iop_base) + IOPW_RAM_ADDR, (addr)), \
+ outpw((iop_base) + IOPW_RAM_DATA, (word)))
+
+/* Write double word (4 bytes) to LRAM */
+/* Because of unspecified C language ordering don't use auto-increment. */
+#define AdvWriteDWordLram(iop_base, addr, dword) \
+ ((outpw((iop_base) + IOPW_RAM_ADDR, (addr)), \
+ outpw((iop_base) + IOPW_RAM_DATA, (ushort) ((dword) & 0xFFFF))), \
+ (outpw((iop_base) + IOPW_RAM_ADDR, (addr) + 2), \
+ outpw((iop_base) + IOPW_RAM_DATA, (ushort) ((dword >> 16) & 0xFFFF))))
+
+/* Read word (2 bytes) from LRAM assuming that the address is already set. */
+#define AdvReadWordAutoIncLram(iop_base) \
+ (inpw((iop_base) + IOPW_RAM_DATA))
+
+/* Write word (2 bytes) to LRAM assuming that the address is already set. */
+#define AdvWriteWordAutoIncLram(iop_base, word) \
+ (outpw((iop_base) + IOPW_RAM_DATA, (word)))
+
+#else /* version >= v1,3,0 */
+
+/* Read byte from a register. */
+#define AdvReadByteRegister(iop_base, reg_off) \
+ (ADV_MEM_READB((iop_base) + (reg_off)))
+
+/* Write byte to a register. */
+#define AdvWriteByteRegister(iop_base, reg_off, byte) \
+ (ADV_MEM_WRITEB((iop_base) + (reg_off), (byte)))
+
+/* Read word (2 bytes) from a register. */
+#define AdvReadWordRegister(iop_base, reg_off) \
+ (ADV_MEM_READW((iop_base) + (reg_off)))
+
+/* Write word (2 bytes) to a register. */
+#define AdvWriteWordRegister(iop_base, reg_off, word) \
+ (ADV_MEM_WRITEW((iop_base) + (reg_off), (word)))
+
+/* Read byte from LRAM. */
+#define AdvReadByteLram(iop_base, addr, byte) \
+do { \
+ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)); \
+ (byte) = ADV_MEM_READB((iop_base) + IOPB_RAM_DATA); \
+} while (0)
+
+/* Write byte to LRAM. */
+#define AdvWriteByteLram(iop_base, addr, byte) \
+ (ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)), \
+ ADV_MEM_WRITEB((iop_base) + IOPB_RAM_DATA, (byte)))
+
+/* Read word (2 bytes) from LRAM. */
+#define AdvReadWordLram(iop_base, addr, word) \
+do { \
+ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)); \
+ (word) = ADV_MEM_READW((iop_base) + IOPW_RAM_DATA); \
+} while (0)
+
+/* Write word (2 bytes) to LRAM. */
+#define AdvWriteWordLram(iop_base, addr, word) \
+ (ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)), \
+ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_DATA, (word)))
+
+/* Write double word (4 bytes) to LRAM */
+/* Because of unspecified C language ordering don't use auto-increment. */
+#define AdvWriteDWordLram(iop_base, addr, dword) \
+ ((ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr)), \
+ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_DATA, \
+ (ushort) ((dword) & 0xFFFF))), \
+ (ADV_MEM_WRITEW((iop_base) + IOPW_RAM_ADDR, (addr) + 2), \
+ ADV_MEM_WRITEW((iop_base) + IOPW_RAM_DATA, \
+ (ushort) ((dword >> 16) & 0xFFFF))))
+
+/* Read word (2 bytes) from LRAM assuming that the address is already set. */
+#define AdvReadWordAutoIncLram(iop_base) \
+ (ADV_MEM_READW((iop_base) + IOPW_RAM_DATA))
+
+/* Write word (2 bytes) to LRAM assuming that the address is already set. */
+#define AdvWriteWordAutoIncLram(iop_base, word) \
+ (ADV_MEM_WRITEW((iop_base) + IOPW_RAM_DATA, (word)))
+
+#endif /* version >= v1,3,0 */
+
+/*
+ * Define macro to check for Condor signature.
+ *
+ * Evaluate to ADV_TRUE if a Condor chip is found the specified port
+ * address 'iop_base'. Otherwise evalue to ADV_FALSE.
+ */
+#define AdvFindSignature(iop_base) \
+ (((AdvReadByteRegister((iop_base), IOPB_CHIP_ID_1) == \
+ ADV_CHIP_ID_BYTE) && \
+ (AdvReadWordRegister((iop_base), IOPW_CHIP_ID_0) == \
+ ADV_CHIP_ID_WORD)) ? ADV_TRUE : ADV_FALSE)
+
+/*
+ * Define macro to Return the version number of the chip at 'iop_base'.
+ *
+ * The second parameter 'bus_type' is currently unused.
+ */
+#define AdvGetChipVersion(iop_base, bus_type) \
+ AdvReadByteRegister((iop_base), IOPB_CHIP_TYPE_REV)
+
+/*
+ * Abort an SRB in the chip's RISC Memory. The 'srb_ptr' argument must
+ * match the ASC_SCSI_REQ_Q 'srb_ptr' field.
+ *
+ * If the request has not yet been sent to the device it will simply be
+ * aborted from RISC memory. If the request is disconnected it will be
+ * aborted on reselection by sending an Abort Message to the target ID.
+ *
+ * Return value:
+ * ADV_TRUE(1) - Queue was successfully aborted.
+ * ADV_FALSE(0) - Queue was not found on the active queue list.
+ */
+#define AdvAbortSRB(asc_dvc, srb_ptr) \
+ AdvSendIdleCmd((asc_dvc), (ushort) IDLE_CMD_ABORT, \
+ (ulong) (srb_ptr), 0)
+
+/*
+ * Send a Bus Device Reset Message to the specified target ID.
+ *
+ * All outstanding commands will be purged if sending the
+ * Bus Device Reset Message is successful.
+ *
+ * Return Value:
+ * ADV_TRUE(1) - All requests on the target are purged.
+ * ADV_FALSE(0) - Couldn't issue Bus Device Reset Message; Requests
+ * are not purged.
+ */
+#define AdvResetDevice(asc_dvc, target_id) \
+ AdvSendIdleCmd((asc_dvc), (ushort) IDLE_CMD_DEVICE_RESET, \
+ (ulong) (target_id), 0)
+
+/*
+ * SCSI Wide Type definition.
+ */
+#define ADV_SCSI_BIT_ID_TYPE ushort
+
+/*
+ * AdvInitScsiTarget() 'cntl_flag' options.
+ */
+#define ADV_SCAN_LUN 0x01
+#define ADV_CAPINFO_NOLUN 0x02
+
+/*
+ * Convert target id to target id bit mask.
+ */
+#define ADV_TID_TO_TIDMASK(tid) (0x01 << ((tid) & ADV_MAX_TID))
+
+/*
+ * ASC_SCSI_REQ_Q 'done_status' and 'host_status' return values.
+ */
+
+#define QD_NO_STATUS 0x00 /* Request not completed yet. */
+#define QD_NO_ERROR 0x01
+#define QD_ABORTED_BY_HOST 0x02
+#define QD_WITH_ERROR 0x04
+
+#define QHSTA_NO_ERROR 0x00
+#define QHSTA_M_SEL_TIMEOUT 0x11
+#define QHSTA_M_DATA_OVER_RUN 0x12
+#define QHSTA_M_UNEXPECTED_BUS_FREE 0x13
+#define QHSTA_M_QUEUE_ABORTED 0x15
+#define QHSTA_M_SXFR_SDMA_ERR 0x16 /* SXFR_STATUS SCSI DMA Error */
+#define QHSTA_M_SXFR_SXFR_PERR 0x17 /* SXFR_STATUS SCSI Bus Parity Error */
+#define QHSTA_M_RDMA_PERR 0x18 /* RISC PCI DMA parity error */
+#define QHSTA_M_SXFR_OFF_UFLW 0x19 /* SXFR_STATUS Offset Underflow */
+#define QHSTA_M_SXFR_OFF_OFLW 0x20 /* SXFR_STATUS Offset Overflow */
+#define QHSTA_M_SXFR_WD_TMO 0x21 /* SXFR_STATUS Watchdog Timeout */
+#define QHSTA_M_SXFR_DESELECTED 0x22 /* SXFR_STATUS Deselected */
+/* Note: QHSTA_M_SXFR_XFR_OFLW is identical to QHSTA_M_DATA_OVER_RUN. */
+#define QHSTA_M_SXFR_XFR_OFLW 0x12 /* SXFR_STATUS Transfer Overflow */
+#define QHSTA_M_SXFR_XFR_PH_ERR 0x24 /* SXFR_STATUS Transfer Phase Error */
+#define QHSTA_M_SXFR_UNKNOWN_ERROR 0x25 /* SXFR_STATUS Unknown Error */
+#define QHSTA_M_WTM_TIMEOUT 0x41
+#define QHSTA_M_BAD_CMPL_STATUS_IN 0x42
+#define QHSTA_M_NO_AUTO_REQ_SENSE 0x43
+#define QHSTA_M_AUTO_REQ_SENSE_FAIL 0x44
+#define QHSTA_M_INVALID_DEVICE 0x45 /* Bad target ID */
+
+typedef int (* ADV_ISR_CALLBACK)
+ (ADV_DVC_VAR *, ADV_SCSI_REQ_Q *);
+
+typedef int (* ADV_SBRESET_CALLBACK)
+ (ADV_DVC_VAR *);
+
+/*
+ * Default EEPROM Configuration structure defined in a_init.c.
+ */
+STATIC ADVEEP_CONFIG Default_EEPROM_Config;
+
+/*
+ * DvcGetPhyAddr() flag arguments
+ */
+#define ADV_IS_SCSIQ_FLAG 0x01 /* 'addr' is ASC_SCSI_REQ_Q pointer */
+#define ADV_ASCGETSGLIST_VADDR 0x02 /* 'addr' is AscGetSGList() virtual addr */
+#define ADV_IS_SENSE_FLAG 0x04 /* 'addr' is sense virtual pointer */
+#define ADV_IS_DATA_FLAG 0x08 /* 'addr' is data virtual pointer */
+#define ADV_IS_SGLIST_FLAG 0x10 /* 'addr' is sglist virtual pointer */
+
+/* Return the address that is aligned at the next doubleword >= to 'addr'. */
+#define ADV_DWALIGN(addr) (((ulong) (addr) + 0x3) & ~0x3)
+
+/*
+ * Total contiguous memory needed for driver SG blocks.
+ *
+ * ADV_MAX_SG_LIST must be defined by a driver. It is the maximum
+ * number of scatter-gather elements the driver supports in a
+ * single request.
+ */
+
+#ifndef ADV_MAX_SG_LIST
+Forced Error: Driver must define ADV_MAX_SG_LIST.
+#endif /* ADV_MAX_SG_LIST */
+
+#define ADV_SG_LIST_MAX_BYTE_SIZE \
+ (sizeof(ADV_SG_BLOCK) * \
+ ((ADV_MAX_SG_LIST + (NO_OF_SG_PER_BLOCK - 1))/NO_OF_SG_PER_BLOCK))
+
+/*
+ * A driver may optionally define the assertion macro ADV_ASSERT() in
+ * its d_os_dep.h file. If the macro has not already been defined,
+ * then define the macro to a no-op.
+ */
+#ifndef ADV_ASSERT
+#define ADV_ASSERT(a)
+#endif /* ADV_ASSERT */
+
+
+/*
+ * --- Driver Constants and Macros
+ */
+
+#define ASC_NUM_BOARD_SUPPORTED 16
+#define ASC_NUM_IOPORT_PROBE 4
+#define ASC_NUM_BUS 4
+
+/* Reference Scsi_Host hostdata */
+#define ASC_BOARDP(host) ((asc_board_t *) &((host)->hostdata))
+
+/* asc_board_t flags */
+#define ASC_HOST_IN_RESET 0x01
+#define ASC_HOST_IN_ABORT 0x02
+#define ASC_IS_WIDE_BOARD 0x04 /* AdvanSys Wide Board */
+#define ASC_SELECT_QUEUE_DEPTHS 0x08
+
+#define ASC_NARROW_BOARD(boardp) (((boardp)->flags & ASC_IS_WIDE_BOARD) == 0)
+#define ASC_WIDE_BOARD(boardp) ((boardp)->flags & ASC_IS_WIDE_BOARD)
+
+#define NO_ISA_DMA 0xff /* No ISA DMA Channel Used */
+
+/*
+ * If the Linux kernel version supports freeing initialization code
+ * and data after loading, define macros for this purpose. These macros
+ * are not used when the driver is built as a module, cf. linux/init.h.
+ */
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,23)
+#define ASC_INITFUNC(func) func
+#define ASC_INITDATA
+#define ASC_INIT
+#else /* version >= v2.1.23 */
+#define ASC_INITFUNC(func) __initfunc(func)
+#define ASC_INITDATA __initdata
+#define ASC_INIT __init
+#endif /* version >= v2.1.23 */
+
+#define ASC_INFO_SIZE 128 /* advansys_info() line size */
+
+/* /proc/scsi/advansys/[0...] related definitions */
+#define ASC_PRTBUF_SIZE 2048
+#define ASC_PRTLINE_SIZE 160
+
+#define ASC_PRT_NEXT() \
+ if (cp) { \
+ totlen += len; \
+ leftlen -= len; \
+ if (leftlen == 0) { \
+ return totlen; \
+ } \
+ cp += len; \
+ }
+
+#define ASC_MIN(a, b) (((a) < (b)) ? (a) : (b))
+
+/* Asc Library return codes */
+#define ASC_TRUE 1
+#define ASC_FALSE 0
+#define ASC_NOERROR 1
+#define ASC_BUSY 0
+#define ASC_ERROR (-1)
+
+/* Scsi_Cmnd function return codes */
+#define STATUS_BYTE(byte) (byte)
+#define MSG_BYTE(byte) ((byte) << 8)
+#define HOST_BYTE(byte) ((byte) << 16)
+#define DRIVER_BYTE(byte) ((byte) << 24)
+
+/*
+ * The following definitions and macros are OS independent interfaces to
+ * the queue functions:
+ * REQ - SCSI request structure
+ * REQP - pointer to SCSI request structure
+ * REQPTID(reqp) - reqp's target id
+ * REQPNEXT(reqp) - reqp's next pointer
+ * REQPNEXTP(reqp) - pointer to reqp's next pointer
+ * REQPTIME(reqp) - reqp's time stamp value
+ * REQTIMESTAMP() - system time stamp value
+ */
+typedef Scsi_Cmnd REQ, *REQP;
+#define REQPNEXT(reqp) ((reqp)->host_scribble)
+#define REQPNEXTP(reqp) ((REQP *) &((reqp)->host_scribble))
+#define REQPTID(reqp) ((reqp)->target)
+#define REQPTIME(reqp) ((reqp)->SCp.this_residual)
+#define REQTIMESTAMP() (jiffies)
+
+#define REQTIMESTAT(function, ascq, reqp, tid) \
+{ \
+ /*
+ * If the request time stamp is less than the system time stamp, then \
+ * maybe the system time stamp wrapped. Set the request time to zero.\
+ */ \
+ if (REQPTIME(reqp) <= REQTIMESTAMP()) { \
+ REQPTIME(reqp) = REQTIMESTAMP() - REQPTIME(reqp); \
+ } else { \
+ /* Indicate an error occurred with the assertion. */ \
+ ASC_ASSERT(REQPTIME(reqp) <= REQTIMESTAMP()); \
+ REQPTIME(reqp) = 0; \
+ } \
+ /* Handle first minimum time case without external initialization. */ \
+ if (((ascq)->q_tot_cnt[tid] == 1) || \
+ (REQPTIME(reqp) < (ascq)->q_min_tim[tid])) { \
+ (ascq)->q_min_tim[tid] = REQPTIME(reqp); \
+ ASC_DBG3(1, "%s: new q_min_tim[%d] %u\n", \
+ (function), (tid), (ascq)->q_min_tim[tid]); \
+ } \
+ if (REQPTIME(reqp) > (ascq)->q_max_tim[tid]) { \
+ (ascq)->q_max_tim[tid] = REQPTIME(reqp); \
+ ASC_DBG3(1, "%s: new q_max_tim[%d] %u\n", \
+ (function), tid, (ascq)->q_max_tim[tid]); \
+ } \
+ (ascq)->q_tot_tim[tid] += REQPTIME(reqp); \
+ /* Reset the time stamp field. */ \
+ REQPTIME(reqp) = 0; \
+}
+
+/* asc_enqueue() flags */
+#define ASC_FRONT 1
+#define ASC_BACK 2
+
+/* asc_dequeue_list() argument */
+#define ASC_TID_ALL (-1)
+
+/* Return non-zero, if the queue is empty. */
+#define ASC_QUEUE_EMPTY(ascq) ((ascq)->q_tidmask == 0)
+
+/* PCI configuration declarations */
+
+#define PCI_BASE_CLASS_PREDEFINED 0x00
+#define PCI_BASE_CLASS_MASS_STORAGE 0x01
+#define PCI_BASE_CLASS_NETWORK 0x02
+#define PCI_BASE_CLASS_DISPLAY 0x03
+#define PCI_BASE_CLASS_MULTIMEDIA 0x04
+#define PCI_BASE_CLASS_MEMORY_CONTROLLER 0x05
+#define PCI_BASE_CLASS_BRIDGE_DEVICE 0x06
+
+/* MASS STORAGE */
+#define PCI_SUB_CLASS_SCSI_CONTROLLER 0x00
+#define PCI_SUB_CLASS_IDE_CONTROLLER 0x01
+#define PCI_SUB_CLASS_FLOPPY_DISK_CONTROLLER 0x02
+#define PCI_SUB_CLASS_IPI_BUS_CONTROLLER 0x03
+#define PCI_SUB_CLASS_OTHER_MASS_CONTROLLER 0x80
+
+/* NETWORK CONTROLLER */
+#define PCI_SUB_CLASS_ETHERNET_CONTROLLER 0x00
+#define PCI_SUB_CLASS_TOKEN_RING_CONTROLLER 0x01
+#define PCI_SUB_CLASS_FDDI_CONTROLLER 0x02
+#define PCI_SUB_CLASS_OTHER_NETWORK_CONTROLLER 0x80
+
+/* DISPLAY CONTROLLER */
+#define PCI_SUB_CLASS_VGA_CONTROLLER 0x00
+#define PCI_SUB_CLASS_XGA_CONTROLLER 0x01
+#define PCI_SUB_CLASS_OTHER_DISPLAY_CONTROLLER 0x80
+
+/* MULTIMEDIA CONTROLLER */
+#define PCI_SUB_CLASS_VIDEO_DEVICE 0x00
+#define PCI_SUB_CLASS_AUDIO_DEVICE 0x01
+#define PCI_SUB_CLASS_OTHER_MULTIMEDIA_DEVICE 0x80
+
+/* MEMORY CONTROLLER */
+#define PCI_SUB_CLASS_RAM_CONTROLLER 0x00
+#define PCI_SUB_CLASS_FLASH_CONTROLLER 0x01
+#define PCI_SUB_CLASS_OTHER_MEMORY_CONTROLLER 0x80
+
+/* BRIDGE CONTROLLER */
+#define PCI_SUB_CLASS_HOST_BRIDGE_CONTROLLER 0x00
+#define PCI_SUB_CLASS_ISA_BRIDGE_CONTROLLER 0x01
+#define PCI_SUB_CLASS_EISA_BRIDGE_CONTROLLER 0x02
+#define PCI_SUB_CLASS_MC_BRIDGE_CONTROLLER 0x03
+#define PCI_SUB_CLASS_PCI_TO_PCI_BRIDGE_CONTROLLER 0x04
+#define PCI_SUB_CLASS_PCMCIA_BRIDGE_CONTROLLER 0x05
+#define PCI_SUB_CLASS_OTHER_BRIDGE_CONTROLLER 0x80
+
+#define PCI_MAX_SLOT 0x1F
+#define PCI_MAX_BUS 0xFF
+#define PCI_IOADDRESS_MASK 0xFFFE
+#define ASC_PCI_VENDORID 0x10CD
+#define ASC_PCI_DEVICE_ID_CNT 4 /* PCI Device ID count. */
+#define ASC_PCI_DEVICE_ID_1100 0x1100
+#define ASC_PCI_DEVICE_ID_1200 0x1200
+#define ASC_PCI_DEVICE_ID_1300 0x1300
+#define ASC_PCI_DEVICE_ID_2300 0x2300
+
+/* PCI IO Port Addresses to generate special cycle */
+
+#define PCI_CONFIG_ADDRESS_MECH1 0x0CF8
+#define PCI_CONFIG_DATA_MECH1 0x0CFC
+
+#define PCI_CONFIG_FORWARD_REGISTER 0x0CFA /* 0=type 0; 1=type 1; */
+
+#define PCI_CONFIG_BUS_NUMBER_MASK 0x00FF0000
+#define PCI_CONFIG_DEVICE_FUNCTION_MASK 0x0000FF00
+#define PCI_CONFIG_REGISTER_NUMBER_MASK 0x000000F8
+
+#define PCI_DEVICE_FOUND 0x0000
+#define PCI_DEVICE_NOT_FOUND 0xffff
+
+#define SUBCLASS_OFFSET 0x0A
+#define CLASSCODE_OFFSET 0x0B
+#define VENDORID_OFFSET 0x00
+#define DEVICEID_OFFSET 0x02
+
+#ifndef ADVANSYS_STATS
+#define ASC_STATS(shp, counter)
+#define ASC_STATS_ADD(shp, counter, count)
+#else /* ADVANSYS_STATS */
+#define ASC_STATS(shp, counter) \
+ (ASC_BOARDP(shp)->asc_stats.counter++)
+
+#define ASC_STATS_ADD(shp, counter, count) \
+ (ASC_BOARDP(shp)->asc_stats.counter += (count))
+#endif /* ADVANSYS_STATS */
+
+#define ASC_CEILING(val, unit) (((val) + ((unit) - 1))/(unit))
+
+/* If the result wraps when calculating tenths, return 0. */
+#define ASC_TENTHS(num, den) \
+ (((10 * ((num)/(den))) > (((num) * 10)/(den))) ? \
+ 0 : ((((num) * 10)/(den)) - (10 * ((num)/(den)))))
+
+/*
+ * Display a message to the console.
+ */
+#define ASC_PRINT(s) \
+ { \
+ printk("advansys: "); \
+ printk(s); \
+ }
+
+#define ASC_PRINT1(s, a1) \
+ { \
+ printk("advansys: "); \
+ printk((s), (a1)); \
+ }
+
+#define ASC_PRINT2(s, a1, a2) \
+ { \
+ printk("advansys: "); \
+ printk((s), (a1), (a2)); \
+ }
+
+#define ASC_PRINT3(s, a1, a2, a3) \
+ { \
+ printk("advansys: "); \
+ printk((s), (a1), (a2), (a3)); \
+ }
+
+#define ASC_PRINT4(s, a1, a2, a3, a4) \
+ { \
+ printk("advansys: "); \
+ printk((s), (a1), (a2), (a3), (a4)); \
+ }
+
+
+#ifndef ADVANSYS_DEBUG
+
+#define ASC_DBG(lvl, s)
+#define ASC_DBG1(lvl, s, a1)
+#define ASC_DBG2(lvl, s, a1, a2)
+#define ASC_DBG3(lvl, s, a1, a2, a3)
+#define ASC_DBG4(lvl, s, a1, a2, a3, a4)
+#define ASC_DBG_PRT_SCSI_HOST(lvl, s)
+#define ASC_DBG_PRT_SCSI_CMND(lvl, s)
+#define ASC_DBG_PRT_ASC_SCSI_Q(lvl, scsiqp)
+#define ASC_DBG_PRT_ADV_SCSI_REQ_Q(lvl, scsiqp)
+#define ASC_DBG_PRT_ASC_QDONE_INFO(lvl, qdone)
+#define ADV_DBG_PRT_ADV_SCSI_REQ_Q(lvl, scsiqp)
+#define ASC_DBG_PRT_HEX(lvl, name, start, length)
+#define ASC_DBG_PRT_CDB(lvl, cdb, len)
+#define ASC_DBG_PRT_SENSE(lvl, sense, len)
+#define ASC_DBG_PRT_INQUIRY(lvl, inq, len)
+
+#else /* ADVANSYS_DEBUG */
+
+/*
+ * Debugging Message Levels:
+ * 0: Errors Only
+ * 1: High-Level Tracing
+ * 2-N: Verbose Tracing
+ */
+
+#define ASC_DBG(lvl, s) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ printk(s); \
+ } \
+ }
+
+#define ASC_DBG1(lvl, s, a1) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ printk((s), (a1)); \
+ } \
+ }
+
+#define ASC_DBG2(lvl, s, a1, a2) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ printk((s), (a1), (a2)); \
+ } \
+ }
+
+#define ASC_DBG3(lvl, s, a1, a2, a3) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ printk((s), (a1), (a2), (a3)); \
+ } \
+ }
+
+#define ASC_DBG4(lvl, s, a1, a2, a3, a4) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ printk((s), (a1), (a2), (a3), (a4)); \
+ } \
+ }
+
+#define ASC_DBG_PRT_SCSI_HOST(lvl, s) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ asc_prt_scsi_host(s); \
+ } \
+ }
+
+#define ASC_DBG_PRT_SCSI_CMND(lvl, s) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ asc_prt_scsi_cmnd(s); \
+ } \
+ }
+
+#define ASC_DBG_PRT_ASC_SCSI_Q(lvl, scsiqp) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ asc_prt_asc_scsi_q(scsiqp); \
+ } \
+ }
+
+#define ASC_DBG_PRT_ASC_QDONE_INFO(lvl, qdone) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ asc_prt_asc_qdone_info(qdone); \
+ } \
+ }
+
+#define ASC_DBG_PRT_ADV_SCSI_REQ_Q(lvl, scsiqp) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ asc_prt_adv_scsi_req_q(scsiqp); \
+ } \
+ }
+
+#define ASC_DBG_PRT_HEX(lvl, name, start, length) \
+ { \
+ if (asc_dbglvl >= (lvl)) { \
+ asc_prt_hex((name), (start), (length)); \
+ } \
+ }
+
+#define ASC_DBG_PRT_CDB(lvl, cdb, len) \
+ ASC_DBG_PRT_HEX((lvl), "CDB", (uchar *) (cdb), (len));
+
+#define ASC_DBG_PRT_SENSE(lvl, sense, len) \
+ ASC_DBG_PRT_HEX((lvl), "SENSE", (uchar *) (sense), (len));
+
+#define ASC_DBG_PRT_INQUIRY(lvl, inq, len) \
+ ASC_DBG_PRT_HEX((lvl), "INQUIRY", (uchar *) (inq), (len));
+#endif /* ADVANSYS_DEBUG */
+
+#ifndef ADVANSYS_ASSERT
+#define ASC_ASSERT(a)
+#else /* ADVANSYS_ASSERT */
+
+#define ASC_ASSERT(a) \
+ { \
+ if (!(a)) { \
+ printk("ASC_ASSERT() Failure: file %s, line %d\n", \
+ __FILE__, __LINE__); \
+ } \
+ }
+
+#endif /* ADVANSYS_ASSERT */
+
+
+/*
+ * --- Driver Structures
+ */
+
+#ifdef ADVANSYS_STATS
+
+/* Per board statistics structure */
+struct asc_stats {
+ /* Driver Entrypoint Statistics */
+ ulong command; /* # calls to advansys_command() */
+ ulong queuecommand; /* # calls to advansys_queuecommand() */
+ ulong abort; /* # calls to advansys_abort() */
+ ulong reset; /* # calls to advansys_reset() */
+ ulong biosparam; /* # calls to advansys_biosparam() */
+ ulong interrupt; /* # advansys_interrupt() calls */
+ ulong callback; /* # calls to asc/adv_isr_callback() */
+ ulong done; /* # calls to request's scsi_done function */
+ ulong build_error; /* # asc/adv_build_req() ASC_ERROR returns. */
+ ulong adv_build_noreq; /* # adv_build_req() adv_req_t alloc. fail. */
+ ulong adv_build_nosg; /* # adv_build_req() adv_sgblk_t alloc. fail. */
+ /* AscExeScsiQueue()/AdvExeScsiQueue() Statistics */
+ ulong exe_noerror; /* # ASC_NOERROR returns. */
+ ulong exe_busy; /* # ASC_BUSY returns. */
+ ulong exe_error; /* # ASC_ERROR returns. */
+ ulong exe_unknown; /* # unknown returns. */
+ /* Data Transfer Statistics */
+ ulong cont_cnt; /* # non-scatter-gather I/O requests received */
+ ulong cont_xfer; /* # contiguous transfer 512-bytes */
+ ulong sg_cnt; /* # scatter-gather I/O requests received */
+ ulong sg_elem; /* # scatter-gather elements */
+ ulong sg_xfer; /* # scatter-gather transfer 512-bytes */
+};
+#endif /* ADVANSYS_STATS */
+
+/*
+ * Request queuing structure
+ */
+typedef struct asc_queue {
+ ADV_SCSI_BIT_ID_TYPE q_tidmask; /* queue mask */
+ REQP q_first[ADV_MAX_TID+1]; /* first queued request */
+ REQP q_last[ADV_MAX_TID+1]; /* last queued request */
+#ifdef ADVANSYS_STATS
+ short q_cur_cnt[ADV_MAX_TID+1]; /* current queue count */
+ short q_max_cnt[ADV_MAX_TID+1]; /* maximum queue count */
+ ulong q_tot_cnt[ADV_MAX_TID+1]; /* total enqueue count */
+ ulong q_tot_tim[ADV_MAX_TID+1]; /* total time queued */
+ ushort q_max_tim[ADV_MAX_TID+1]; /* maximum time queued */
+ ushort q_min_tim[ADV_MAX_TID+1]; /* minimum time queued */
+#endif /* ADVANSYS_STATS */
+} asc_queue_t;
+
+/*
+ * Adv Library Request Structures
+ *
+ * The following two se structures are used to process Wide Board requests.
+ * One structure is needed for each command received from the Mid-Level SCSI
+ * driver.
+ *
+ * The ADV_SCSI_REQ_Q structure in adv_req_t is passed to the Adv Library
+ * and microcode with the ADV_SCSI_REQ_Q field 'srb_ptr' pointing to the
+ * adv_req_t. The adv_req_t structure 'cmndp' field in turn points to the
+ * Mid-Level SCSI request structure.
+ *
+ * The adv_sgblk_t structure is used to handle requests that include
+ * scatter-gather elements.
+ */
+typedef struct adv_sgblk {
+ ADV_SG_BLOCK sg_block[ADV_NUM_SG_BLOCK + ADV_NUM_PAGE_CROSSING];
+ uchar align2[4]; /* Sgblock structure padding. */
+ struct adv_sgblk *next_sgblkp; /* Next scatter-gather structure. */
+} adv_sgblk_t;
+
+typedef struct adv_req {
+ ADV_SCSI_REQ_Q scsi_req_q; /* Adv Library request structure. */
+ uchar align1[4]; /* Request structure padding. */
+ Scsi_Cmnd *cmndp; /* Mid-Level SCSI command pointer. */
+ adv_sgblk_t *sgblkp; /* Adv Library scatter-gather pointer. */
+ struct adv_req *next_reqp; /* Next Request Structure. */
+} adv_req_t;
+
+/*
+ * Structure allocated for each board.
+ *
+ * This structure is allocated by scsi_register() at the end
+ * of the 'Scsi_Host' structure starting at the 'hostdata'
+ * field. It is guaranteed to be allocated from DMA-able memory.
+ */
+typedef struct asc_board {
+ int id; /* Board Id */
+ uint flags; /* Board flags */
+ union {
+ ASC_DVC_VAR asc_dvc_var; /* Narrow board */
+ ADV_DVC_VAR adv_dvc_var; /* Wide board */
+ } dvc_var;
+ union {
+ ASC_DVC_CFG asc_dvc_cfg; /* Narrow board */
+ ADV_DVC_CFG adv_dvc_cfg; /* Wide board */
+ } dvc_cfg;
+ asc_queue_t active; /* Active command queue */
+ asc_queue_t waiting; /* Waiting command queue */
+ asc_queue_t done; /* Done command queue */
+ ADV_SCSI_BIT_ID_TYPE init_tidmask; /* Target init./valid mask */
+ Scsi_Device *device[ADV_MAX_TID+1]; /* Mid-Level Scsi Device */
+ ushort reqcnt[ADV_MAX_TID+1]; /* Starvation request count */
+#if ASC_QUEUE_FLOW_CONTROL
+ ushort nerrcnt[ADV_MAX_TID+1]; /* No error request count */
+#endif /* ASC_QUEUE_FLOW_CONTROL */
+ ADV_SCSI_BIT_ID_TYPE queue_full; /* Queue full mask */
+ ushort queue_full_cnt[ADV_MAX_TID+1]; /* Queue full count */
+ union {
+ ASCEEP_CONFIG asc_eep; /* Narrow EEPROM config. */
+ ADVEEP_CONFIG adv_eep; /* Wide EEPROM config. */
+ } eep_config;
+ ulong last_reset; /* Saved last reset time */
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+ /* /proc/scsi/advansys/[0...] */
+ char *prtbuf; /* Statistics Print Buffer */
+#endif /* version >= v1.3.0 */
+#ifdef ADVANSYS_STATS
+ struct asc_stats asc_stats; /* Board statistics */
+#endif /* ADVANSYS_STATS */
+ /*
+ * The following fields are used only for Narrow Boards.
+ */
+ /* The following three structures must be in DMA-able memory. */
+ ASC_SCSI_REQ_Q scsireqq;
+ ASC_CAP_INFO cap_info;
+ ASC_SCSI_INQUIRY inquiry;
+ uchar sdtr_data[ASC_MAX_TID+1]; /* SDTR information */
+ /*
+ * The following fields are used only for Wide Boards.
+ */
+ void *ioremap_addr; /* I/O Memory remap address. */
+ ushort ioport; /* I/O Port address. */
+ adv_req_t *orig_reqp; /* adv_req_t memory block. */
+ adv_req_t *adv_reqp; /* Request structures. */
+ adv_sgblk_t *orig_sgblkp; /* adv_sgblk_t memory block. */
+ adv_sgblk_t *adv_sgblkp; /* Scatter-gather structures. */
+ ushort bios_signature; /* BIOS Signature. */
+ ushort bios_version; /* BIOS Version. */
+ ushort bios_codeseg; /* BIOS Code Segment. */
+ ushort bios_codelen; /* BIOS Code Segment Length. */
+} asc_board_t;
+
+/*
+ * PCI configuration structures
+ */
+typedef struct _PCI_DATA_
+{
+ uchar type;
+ uchar bus;
+ uchar slot;
+ uchar func;
+ uchar offset;
+} PCI_DATA;
+
+typedef struct _PCI_DEVICE_
+{
+ ushort vendorID;
+ ushort deviceID;
+ ushort slotNumber;
+ ushort slotFound;
+ uchar busNumber;
+ uchar maxBusNumber;
+ uchar devFunc;
+ ushort startSlot;
+ ushort endSlot;
+ uchar bridge;
+ uchar type;
+} PCI_DEVICE;
+
+typedef struct _PCI_CONFIG_SPACE_
+{
+ ushort vendorID;
+ ushort deviceID;
+ ushort command;
+ ushort status;
+ uchar revision;
+ uchar classCode[3];
+ uchar cacheSize;
+ uchar latencyTimer;
+ uchar headerType;
+ uchar bist;
+ ulong baseAddress[6];
+ ushort reserved[4];
+ ulong optionRomAddr;
+ ushort reserved2[4];
+ uchar irqLine;
+ uchar irqPin;
+ uchar minGnt;
+ uchar maxLatency;
+} PCI_CONFIG_SPACE;
+
+
+/*
+ * --- Driver Data
+ */
+
+/* Note: All driver global data should be initialized. */
+
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+struct proc_dir_entry proc_scsi_advansys =
+{
+ PROC_SCSI_ADVANSYS, /* unsigned short low_ino */
+ 8, /* unsigned short namelen */
+ "advansys", /* const char *name */
+ S_IFDIR | S_IRUGO | S_IXUGO, /* mode_t mode */
+ 2 /* nlink_t nlink */
+};
+#endif /* version >= v1.3.0 */
+
+/* Number of boards detected in system. */
+STATIC int asc_board_count = 0;
+STATIC struct Scsi_Host *asc_host[ASC_NUM_BOARD_SUPPORTED] = { 0 };
+
+/* Overrun buffer shared between all boards. */
+STATIC uchar overrun_buf[ASC_OVERRUN_BSIZE] = { 0 };
+
+/*
+ * Global structures required to issue a command.
+ */
+STATIC ASC_SCSI_Q asc_scsi_q = { { 0 } };
+STATIC ASC_SG_HEAD asc_sg_head = { 0 };
+
+/* List of supported bus types. */
+STATIC ushort asc_bus[ASC_NUM_BUS] ASC_INITDATA = {
+ ASC_IS_ISA,
+ ASC_IS_VL,
+ ASC_IS_EISA,
+ ASC_IS_PCI,
+};
+
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,93)
+#ifdef ASC_CONFIG_PCI
+STATIC int pci_scan_method ASC_INITDATA = -1;
+#endif /* ASC_CONFIG_PCI */
+#endif /* version < v2.1.93 */
+
+/*
+ * Used with the LILO 'advansys' option to eliminate or
+ * limit I/O port probing at boot time, cf. advansys_setup().
+ */
+STATIC int asc_iopflag = ASC_FALSE;
+STATIC int asc_ioport[ASC_NUM_IOPORT_PROBE] = { 0, 0, 0, 0 };
+
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,0)
+/*
+ * In kernels earlier than v1.3.0, kmalloc() does not work
+ * during driver initialization. Therefore statically declare
+ * 16 elements of each structure. v1.3.0 kernels will probably
+ * not need any more than this number.
+ */
+uchar adv_req_buf[16 * sizeof(adv_req_t)] = { 0 };
+uchar adv_sgblk_buf[16 * sizeof(adv_sgblk_t)] = { 0 };
+#endif /* version >= v1,3,0 */
+
+#ifdef ADVANSYS_DEBUG
+STATIC char *
+asc_bus_name[ASC_NUM_BUS] = {
+ "ASC_IS_ISA",
+ "ASC_IS_VL",
+ "ASC_IS_EISA",
+ "ASC_IS_PCI",
+};
+
+STATIC int asc_dbglvl = 0;
+#endif /* ADVANSYS_DEBUG */
+
+/* Declaration for Asc Library internal data referenced by driver. */
+STATIC PortAddr _asc_def_iop_base[];
+
+
+/*
+ * --- Driver Function Prototypes
+ *
+ * advansys.h contains function prototypes for functions global to Linux.
+ */
+
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+STATIC int asc_proc_copy(off_t, off_t, char *, int , char *, int);
+#endif /* version >= v1.3.0 */
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,70)
+STATIC void advansys_interrupt(int, struct pt_regs *);
+#else /* version >= v1.3.70 */
+STATIC void advansys_interrupt(int, void *, struct pt_regs *);
+#endif /* version >= v1.3.70 */
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+STATIC void advansys_select_queue_depths(struct Scsi_Host *,
+ Scsi_Device *);
+#endif /* version >= v1.3.89 */
+STATIC void advansys_command_done(Scsi_Cmnd *);
+STATIC void asc_scsi_done_list(Scsi_Cmnd *);
+STATIC int asc_execute_scsi_cmnd(Scsi_Cmnd *);
+STATIC int asc_build_req(asc_board_t *, Scsi_Cmnd *);
+STATIC int adv_build_req(asc_board_t *, Scsi_Cmnd *, ADV_SCSI_REQ_Q **);
+STATIC int adv_get_sglist(ADV_DVC_VAR *, ADV_SCSI_REQ_Q *, Scsi_Cmnd *);
+STATIC void asc_isr_callback(ASC_DVC_VAR *, ASC_QDONE_INFO *);
+STATIC void adv_isr_callback(ADV_DVC_VAR *, ADV_SCSI_REQ_Q *);
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,93)
+#ifdef ASC_CONFIG_PCI
+STATIC int asc_srch_pci_dev(PCI_DEVICE *);
+STATIC uchar asc_scan_method(void);
+STATIC int asc_pci_find_dev(PCI_DEVICE *);
+STATIC void asc_get_pci_cfg(PCI_DEVICE *, PCI_CONFIG_SPACE *);
+STATIC ushort asc_get_cfg_word(PCI_DATA *);
+STATIC uchar asc_get_cfg_byte(PCI_DATA *);
+STATIC void asc_put_cfg_byte(PCI_DATA *, uchar);
+#endif /* ASC_CONFIG_PCI */
+#endif /* version < v2.1.93 */
+STATIC void asc_enqueue(asc_queue_t *, REQP, int);
+STATIC REQP asc_dequeue(asc_queue_t *, int);
+STATIC REQP asc_dequeue_list(asc_queue_t *, REQP *, int);
+STATIC int asc_rmqueue(asc_queue_t *, REQP);
+STATIC int asc_isqueued(asc_queue_t *, REQP);
+STATIC void asc_execute_queue(asc_queue_t *);
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+STATIC int asc_prt_board_devices(struct Scsi_Host *, char *, int);
+STATIC int asc_prt_adv_bios(struct Scsi_Host *, char *, int);
+STATIC int asc_get_eeprom_string(ushort *serialnum, uchar *cp);
+STATIC int asc_prt_asc_board_eeprom(struct Scsi_Host *, char *, int);
+STATIC int asc_prt_adv_board_eeprom(struct Scsi_Host *, char *, int);
+STATIC int asc_prt_driver_conf(struct Scsi_Host *, char *, int);
+STATIC int asc_prt_asc_board_info(struct Scsi_Host *, char *, int);
+STATIC int asc_prt_adv_board_info(struct Scsi_Host *, char *, int);
+STATIC int asc_prt_line(char *, int, char *fmt, ...);
+#endif /* version >= v1.3.0 */
+
+/* Declaration for Asc Library internal functions reference by driver. */
+STATIC int AscFindSignature(PortAddr);
+STATIC ushort AscGetEEPConfig(PortAddr, ASCEEP_CONFIG *, ushort);
+
+#ifdef ADVANSYS_STATS
+STATIC int asc_prt_board_stats(struct Scsi_Host *, char *, int);
+#endif /* ADVANSYS_STATS */
+
+#ifdef ADVANSYS_DEBUG
+STATIC void asc_prt_scsi_host(struct Scsi_Host *);
+STATIC void asc_prt_scsi_cmnd(Scsi_Cmnd *);
+STATIC void asc_prt_asc_dvc_cfg(ASC_DVC_CFG *);
+STATIC void asc_prt_asc_dvc_var(ASC_DVC_VAR *);
+STATIC void asc_prt_asc_scsi_q(ASC_SCSI_Q *);
+STATIC void asc_prt_asc_qdone_info(ASC_QDONE_INFO *);
+STATIC void asc_prt_adv_dvc_cfg(ADV_DVC_CFG *);
+STATIC void asc_prt_adv_dvc_var(ADV_DVC_VAR *);
+STATIC void asc_prt_adv_scsi_req_q(ADV_SCSI_REQ_Q *);
+STATIC void asc_prt_adv_sgblock(int, ADV_SG_BLOCK *);
+STATIC void asc_prt_hex(char *f, uchar *, int);
+#endif /* ADVANSYS_DEBUG */
+
+#ifdef ADVANSYS_ASSERT
+STATIC int interrupts_enabled(void);
+#endif /* ADVANSYS_ASSERT */
+
+
+/*
+ * --- Linux 'Scsi_Host_Template' and advansys_setup() Functions
+ */
+
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+/*
+ * advansys_proc_info() - /proc/scsi/advansys/[0-(ASC_NUM_BOARD_SUPPORTED-1)]
+ *
+ * *buffer: I/O buffer
+ * **start: if inout == FALSE pointer into buffer where user read should start
+ * offset: current offset into a /proc/scsi/advansys/[0...] file
+ * length: length of buffer
+ * hostno: Scsi_Host host_no
+ * inout: TRUE - user is writing; FALSE - user is reading
+ *
+ * Return the number of bytes read from or written to a
+ * /proc/scsi/advansys/[0...] file.
+ *
+ * Note: This function uses the per board buffer 'prtbuf' which is
+ * allocated when the board is initialized in advansys_detect(). The
+ * buffer is ASC_PRTBUF_SIZE bytes. The function asc_proc_copy() is
+ * used to write to the buffer. The way asc_proc_copy() is written
+ * if 'prtbuf' is too small it will not be overwritten. Instead the
+ * user just won't get all the available statistics.
+ */
+int
+advansys_proc_info(char *buffer, char **start, off_t offset, int length,
+ int hostno, int inout)
+{
+ struct Scsi_Host *shp;
+ asc_board_t *boardp;
+ int i;
+ char *cp;
+ int cplen;
+ int cnt;
+ int totcnt;
+ int leftlen;
+ char *curbuf;
+ off_t advoffset;
+ Scsi_Device *scd;
+
+ ASC_DBG(1, "advansys_proc_info: begin\n");
+
+ /*
+ * User write not supported.
+ */
+ if (inout == TRUE) {
+ return(-ENOSYS);
+ }
+
+ /*
+ * User read of /proc/scsi/advansys/[0...] file.
+ */
+
+ /* Find the specified board. */
+ for (i = 0; i < asc_board_count; i++) {
+ if (asc_host[i]->host_no == hostno) {
+ break;
+ }
+ }
+ if (i == asc_board_count) {
+ return(-ENOENT);
+ }
+
+ shp = asc_host[i];
+ boardp = ASC_BOARDP(shp);
+
+ /* Copy read data starting at the beginning of the buffer. */
+ *start = buffer;
+ curbuf = buffer;
+ advoffset = 0;
+ totcnt = 0;
+ leftlen = length;
+
+ /*
+ * Get board configuration information.
+ *
+ * advansys_info() returns the board string from its own static buffer.
+ */
+ cp = (char *) advansys_info(shp);
+ strcat(cp, "\n");
+ cplen = strlen(cp);
+ /* Copy board information. */
+ cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen);
+ totcnt += cnt;
+ leftlen -= cnt;
+ if (leftlen == 0) {
+ ASC_DBG1(1, "advansys_proc_info: totcnt %d\n", totcnt);
+ return totcnt;
+ }
+ advoffset += cplen;
+ curbuf += cnt;
+
+ /*
+ * Display Wide Board BIOS Information.
+ */
+ if (ASC_WIDE_BOARD(boardp)) {
+ cp = boardp->prtbuf;
+ cplen = asc_prt_adv_bios(shp, cp, ASC_PRTBUF_SIZE);
+ ASC_ASSERT(cplen < ASC_PRTBUF_SIZE);
+ cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen);
+ totcnt += cnt;
+ leftlen -= cnt;
+ if (leftlen == 0) {
+ ASC_DBG1(1, "advansys_proc_info: totcnt %d\n", totcnt);
+ return totcnt;
+ }
+ advoffset += cplen;
+ curbuf += cnt;
+ }
+
+ /*
+ * Display driver information for each device attached to the board.
+ */
+ cp = boardp->prtbuf;
+ cplen = asc_prt_board_devices(shp, cp, ASC_PRTBUF_SIZE);
+ ASC_ASSERT(cplen < ASC_PRTBUF_SIZE);
+ cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen);
+ totcnt += cnt;
+ leftlen -= cnt;
+ if (leftlen == 0) {
+ ASC_DBG1(1, "advansys_proc_info: totcnt %d\n", totcnt);
+ return totcnt;
+ }
+ advoffset += cplen;
+ curbuf += cnt;
+
+ /*
+ * Display target driver information for each device attached
+ * to the board.
+ */
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,75)
+ for (scd = scsi_devices; scd; scd = scd->next)
+#else /* version >= v2.1.75 */
+ for (scd = shp->host_queue; scd; scd = scd->next)
+#endif /* version >= v2.1.75 */
+ {
+ if (scd->host == shp) {
+ cp = boardp->prtbuf;
+ /*
+ * Note: If proc_print_scsidevice() writes more than
+ * ASC_PRTBUF_SIZE bytes, it will overrun 'prtbuf'.
+ */
+ proc_print_scsidevice(scd, cp, &cplen, 0);
+ ASC_ASSERT(cplen < ASC_PRTBUF_SIZE);
+ cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen);
+ totcnt += cnt;
+ leftlen -= cnt;
+ if (leftlen == 0) {
+ ASC_DBG1(1, "advansys_proc_info: totcnt %d\n", totcnt);
+ return totcnt;
+ }
+ advoffset += cplen;
+ curbuf += cnt;
+ }
+ }
+
+ /*
+ * Display EEPROM configuration for the board.
+ */
+ cp = boardp->prtbuf;
+ if (ASC_NARROW_BOARD(boardp)) {
+ cplen = asc_prt_asc_board_eeprom(shp, cp, ASC_PRTBUF_SIZE);
+ } else {
+ cplen = asc_prt_adv_board_eeprom(shp, cp, ASC_PRTBUF_SIZE);
+ }
+ ASC_ASSERT(cplen < ASC_PRTBUF_SIZE);
+ cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen);
+ totcnt += cnt;
+ leftlen -= cnt;
+ if (leftlen == 0) {
+ ASC_DBG1(1, "advansys_proc_info: totcnt %d\n", totcnt);
+ return totcnt;
+ }
+ advoffset += cplen;
+ curbuf += cnt;
+
+ /*
+ * Display driver configuration and information for the board.
+ */
+ cp = boardp->prtbuf;
+ cplen = asc_prt_driver_conf(shp, cp, ASC_PRTBUF_SIZE);
+ ASC_ASSERT(cplen < ASC_PRTBUF_SIZE);
+ cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen);
+ totcnt += cnt;
+ leftlen -= cnt;
+ if (leftlen == 0) {
+ ASC_DBG1(1, "advansys_proc_info: totcnt %d\n", totcnt);
+ return totcnt;
+ }
+ advoffset += cplen;
+ curbuf += cnt;
+
+#ifdef ADVANSYS_STATS
+ /*
+ * Display driver statistics for the board.
+ */
+ cp = boardp->prtbuf;
+ cplen = asc_prt_board_stats(shp, cp, ASC_PRTBUF_SIZE);
+ ASC_ASSERT(cplen < ASC_PRTBUF_SIZE);
+ cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen);
+ totcnt += cnt;
+ leftlen -= cnt;
+ if (leftlen == 0) {
+ ASC_DBG1(1, "advansys_proc_info: totcnt %d\n", totcnt);
+ return totcnt;
+ }
+ advoffset += cplen;
+ curbuf += cnt;
+#endif /* ADVANSYS_STATS */
+
+ /*
+ * Display Asc Library dynamic configuration information
+ * for the board.
+ */
+ cp = boardp->prtbuf;
+ if (ASC_NARROW_BOARD(boardp)) {
+ cplen = asc_prt_asc_board_info(shp, cp, ASC_PRTBUF_SIZE);
+ } else {
+ cplen = asc_prt_adv_board_info(shp, cp, ASC_PRTBUF_SIZE);
+ }
+ ASC_ASSERT(cplen < ASC_PRTBUF_SIZE);
+ cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen);
+ totcnt += cnt;
+ leftlen -= cnt;
+ if (leftlen == 0) {
+ ASC_DBG1(1, "advansys_proc_info: totcnt %d\n", totcnt);
+ return totcnt;
+ }
+ advoffset += cplen;
+ curbuf += cnt;
+
+ ASC_DBG1(1, "advansys_proc_info: totcnt %d\n", totcnt);
+
+ return totcnt;
+}
+#endif /* version >= v1.3.0 */
+
+/*
+ * advansys_detect()
+ *
+ * Detect function for AdvanSys adapters.
+ *
+ * Argument is a pointer to the host driver's scsi_hosts entry.
+ *
+ * Return number of adapters found.
+ *
+ * Note: Because this function is called during system initialization
+ * it must not call SCSI mid-level functions including scsi_malloc()
+ * and scsi_free().
+ */
+ASC_INITFUNC(
+int
+advansys_detect(Scsi_Host_Template *tpnt)
+)
+{
+ static int detect_called = ASC_FALSE;
+ int iop;
+ int bus;
+ struct Scsi_Host *shp;
+ asc_board_t *boardp;
+ ASC_DVC_VAR *asc_dvc_varp = NULL;
+ ADV_DVC_VAR *adv_dvc_varp = NULL;
+ int ioport = 0;
+ int share_irq = FALSE;
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,93)
+#ifdef ASC_CONFIG_PCI
+ PCI_DEVICE pciDevice;
+ PCI_CONFIG_SPACE pciConfig;
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+ unsigned long pci_memory_address;
+#endif /* version >= v1,3,0 */
+#endif /* ASC_CONFIG_PCI */
+#else /* version >= v2.1.93 */
+#ifdef CONFIG_PCI
+ struct pci_dev *pci_devp = NULL;
+ int pci_device_id_cnt = 0;
+ unsigned int pci_device_id[ASC_PCI_DEVICE_ID_CNT] = {
+ ASC_PCI_DEVICE_ID_1100,
+ ASC_PCI_DEVICE_ID_1200,
+ ASC_PCI_DEVICE_ID_1300,
+ ASC_PCI_DEVICE_ID_2300
+ };
+ unsigned long pci_memory_address;
+#endif /* CONFIG_PCI */
+#endif /* version >= v2.1.93 */
+ int warn_code, err_code;
+ int ret;
+
+ if (detect_called == ASC_FALSE) {
+ detect_called = ASC_TRUE;
+ } else {
+ printk("AdvanSys SCSI: advansys_detect() multiple calls ignored\n");
+ return 0;
+ }
+
+ ASC_DBG(1, "advansys_detect: begin\n");
+
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+ tpnt->proc_dir = &proc_scsi_advansys;
+#endif /* version >= v1.3.0 */
+
+ asc_board_count = 0;
+
+ /*
+ * If I/O port probing has been modified, then verify and
+ * clean-up the 'asc_ioport' list.
+ */
+ if (asc_iopflag == ASC_TRUE) {
+ for (ioport = 0; ioport < ASC_NUM_IOPORT_PROBE; ioport++) {
+ ASC_DBG2(1, "advansys_detect: asc_ioport[%d] %x\n",
+ ioport, asc_ioport[ioport]);
+ if (asc_ioport[ioport] != 0) {
+ for (iop = 0; iop < ASC_IOADR_TABLE_MAX_IX; iop++) {
+ if (_asc_def_iop_base[iop] == asc_ioport[ioport]) {
+ break;
+ }
+ }
+ if (iop == ASC_IOADR_TABLE_MAX_IX) {
+ printk(
+"AdvanSys SCSI: specified I/O Port 0x%X is invalid\n",
+ asc_ioport[ioport]);
+ asc_ioport[ioport] = 0;
+ }
+ }
+ }
+ ioport = 0;
+ }
+
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,93)
+#ifdef ASC_CONFIG_PCI
+ memset(&pciDevice, 0, sizeof(PCI_DEVICE));
+ memset(&pciConfig, 0, sizeof(PCI_CONFIG_SPACE));
+ pciDevice.maxBusNumber = PCI_MAX_BUS;
+ pciDevice.endSlot = PCI_MAX_SLOT;
+#endif /* ASC_CONFIG_PCI */
+#endif /* version < v2.1.93 */
+
+ for (bus = 0; bus < ASC_NUM_BUS; bus++) {
+
+ ASC_DBG2(1, "advansys_detect: bus search type %d (%s)\n",
+ bus, asc_bus_name[bus]);
+ iop = 0;
+
+ while (asc_board_count < ASC_NUM_BOARD_SUPPORTED) {
+
+ ASC_DBG1(2, "advansys_detect: asc_board_count %d\n",
+ asc_board_count);
+
+ switch (asc_bus[bus]) {
+ case ASC_IS_ISA:
+ case ASC_IS_VL:
+ if (asc_iopflag == ASC_FALSE) {
+ iop = AscSearchIOPortAddr(iop, asc_bus[bus]);
+ } else {
+ /*
+ * ISA and VL I/O port scanning has either been
+ * eliminated or limited to selected ports on
+ * the LILO command line, /etc/lilo.conf, or
+ * by setting variables when the module was loaded.
+ */
+ ASC_DBG(1, "advansys_detect: I/O port scanning modified\n");
+ ioport_try_again:
+ iop = 0;
+ for (; ioport < ASC_NUM_IOPORT_PROBE; ioport++) {
+ if ((iop = asc_ioport[ioport]) != 0) {
+ break;
+ }
+ }
+ if (iop) {
+ ASC_DBG1(1, "advansys_detect: probing I/O port %x...\n",
+ iop);
+ if (check_region(iop, ASC_IOADR_GAP) != 0) {
+ printk(
+"AdvanSys SCSI: specified I/O Port 0x%X is busy\n", iop);
+ /* Don't try this I/O port twice. */
+ asc_ioport[ioport] = 0;
+ goto ioport_try_again;
+ } else if (AscFindSignature(iop) == ASC_FALSE) {
+ printk(
+"AdvanSys SCSI: specified I/O Port 0x%X has no adapter\n", iop);
+ /* Don't try this I/O port twice. */
+ asc_ioport[ioport] = 0;
+ goto ioport_try_again;
+ } else {
+ /*
+ * If this isn't an ISA board, then it must be
+ * a VL board. If currently looking an ISA
+ * board is being looked for then try for
+ * another ISA board in 'asc_ioport'.
+ */
+ if (asc_bus[bus] == ASC_IS_ISA &&
+ (AscGetChipVersion(iop, ASC_IS_ISA) &
+ ASC_CHIP_VER_ISA_BIT) == 0) {
+ /*
+ * Don't clear 'asc_ioport[ioport]'. Try
+ * this board again for VL. Increment
+ * 'ioport' past this board.
+ */
+ ioport++;
+ goto ioport_try_again;
+ }
+ }
+ /*
+ * This board appears good, don't try the I/O port
+ * again by clearing its value. Increment 'ioport'
+ * for the next iteration.
+ */
+ asc_ioport[ioport++] = 0;
+ }
+ }
+ break;
+
+ case ASC_IS_EISA:
+ iop = AscSearchIOPortAddr(iop, asc_bus[bus]);
+ break;
+
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,93)
+#ifdef ASC_CONFIG_PCI
+ case ASC_IS_PCI:
+ if (asc_srch_pci_dev(&pciDevice) != PCI_DEVICE_FOUND) {
+ iop = 0;
+ } else {
+ ASC_DBG2(2,
+ "advansys_detect: slotFound %d, busNumber %d\n",
+ pciDevice.slotFound, pciDevice.busNumber);
+ asc_get_pci_cfg(&pciDevice, &pciConfig);
+ iop = pciConfig.baseAddress[0] & PCI_IOADDRESS_MASK;
+ ASC_DBG2(1,
+ "advansys_detect: vendorID %X, deviceID %X\n",
+ pciConfig.vendorID, pciConfig.deviceID);
+ ASC_DBG2(2, "advansys_detect: iop %X, irqLine %d\n",
+ iop, pciConfig.irqLine);
+ }
+ break;
+#endif /* ASC_CONFIG_PCI */
+#else /* version >= v2.1.93 */
+#ifdef CONFIG_PCI
+ case ASC_IS_PCI:
+ while (pci_device_id_cnt < ASC_PCI_DEVICE_ID_CNT) {
+ if ((pci_devp = pci_find_device(ASC_PCI_VENDORID,
+ pci_device_id[pci_device_id_cnt], pci_devp)) == NULL) {
+ pci_device_id_cnt++;
+ } else {
+ break;
+ }
+ }
+ if (pci_devp == NULL) {
+ iop = 0;
+ } else {
+ ASC_DBG2(2,
+ "advansys_detect: devfn %d, bus number %d\n",
+ pci_devp->devfn, pci_devp->bus->number);
+ iop = pci_devp->base_address[0] & PCI_IOADDRESS_MASK;
+ ASC_DBG2(1,
+ "advansys_detect: vendorID %X, deviceID %X\n",
+ pci_devp->vendor, pci_devp->device);
+ ASC_DBG2(2, "advansys_detect: iop %X, irqLine %d\n",
+ iop, pci_devp->irq);
+ }
+ break;
+#endif /* CONFIG_PCI */
+#endif /* version >= v2.1.93 */
+
+ default:
+ ASC_PRINT1("advansys_detect: unknown bus type: %d\n",
+ asc_bus[bus]);
+ break;
+ }
+ ASC_DBG1(1, "advansys_detect: iop %x\n", iop);
+
+ /*
+ * Adapter not found, try next bus type.
+ */
+ if (iop == 0) {
+ break;
+ }
+
+ /*
+ * Adapter found.
+ *
+ * Register the adapter, get its configuration, and
+ * initialize it.
+ */
+ ASC_DBG(2, "advansys_detect: scsi_register()\n");
+ shp = scsi_register(tpnt, sizeof(asc_board_t));
+
+ /* Save a pointer to the Scsi_host of each board found. */
+ asc_host[asc_board_count++] = shp;
+
+ /* Initialize private per board data */
+ boardp = ASC_BOARDP(shp);
+ memset(boardp, 0, sizeof(asc_board_t));
+ boardp->id = asc_board_count - 1;
+
+ /*
+ * Handle both narrow and wide boards.
+ *
+ * If a Wide board was detected, set the board structure
+ * wide board flag. Set-up the board structure based on
+ * the board type.
+ */
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,93)
+#ifdef ASC_CONFIG_PCI
+ if (asc_bus[bus] == ASC_IS_PCI &&
+ pciConfig.deviceID == ASC_PCI_DEVICE_ID_2300) {
+ boardp->flags |= ASC_IS_WIDE_BOARD;
+ }
+#endif /* ASC_CONFIG_PCI */
+#else /* version >= v2.1.93 */
+#ifdef CONFIG_PCI
+ if (asc_bus[bus] == ASC_IS_PCI &&
+ pci_devp->device == ASC_PCI_DEVICE_ID_2300) {
+ boardp->flags |= ASC_IS_WIDE_BOARD;
+ }
+#endif /* CONFIG_PCI */
+#endif /* version >= v2.1.93 */
+
+ if (ASC_NARROW_BOARD(boardp)) {
+ ASC_DBG(1, "advansys_detect: narrow board\n");
+ asc_dvc_varp = &boardp->dvc_var.asc_dvc_var;
+ asc_dvc_varp->bus_type = asc_bus[bus];
+ asc_dvc_varp->drv_ptr = (ulong) boardp;
+ asc_dvc_varp->cfg = &boardp->dvc_cfg.asc_dvc_cfg;
+ asc_dvc_varp->cfg->overrun_buf = &overrun_buf[0];
+ asc_dvc_varp->iop_base = iop;
+ asc_dvc_varp->isr_callback = (Ptr2Func) asc_isr_callback;
+ } else {
+ ASC_DBG(1, "advansys_detect: wide board\n");
+ adv_dvc_varp = &boardp->dvc_var.adv_dvc_var;
+ adv_dvc_varp->drv_ptr = (ulong) boardp;
+ adv_dvc_varp->cfg = &boardp->dvc_cfg.adv_dvc_cfg;
+ adv_dvc_varp->isr_callback = (Ptr2Func) adv_isr_callback;
+
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,0)
+ adv_dvc_varp->iop_base = iop;
+#else /* version >= v1,3,0 */
+ /*
+ * Map the board's registers into virtual memory for
+ * PCI slave access. Only memory accesses are used to
+ * access the board's registers.
+ *
+ * Note: The PCI register base address is not always
+ * page aligned, but the address passed to ioremap()
+ * must be page aligned. It is guaranteed that the
+ * PCI register base address will not cross a page
+ * boundary.
+ */
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,93)
+#ifdef ASC_CONFIG_PCI
+ pci_memory_address = pciConfig.baseAddress[1];
+ if ((boardp->ioremap_addr =
+ ioremap(pci_memory_address & PAGE_MASK,
+ PAGE_SIZE)) == 0) {
+ ASC_PRINT3(
+"advansys_detect: board %d: ioremap(%lx, %d) returned NULL\n",
+ boardp->id, pci_memory_address, ADV_CONDOR_IOLEN);
+ scsi_unregister(shp);
+ asc_board_count--;
+ continue;
+ }
+ adv_dvc_varp->iop_base = (AdvPortAddr)
+ (boardp->ioremap_addr +
+ (pci_memory_address - (pci_memory_address & PAGE_MASK)));
+#endif /* ASC_CONFIG_PCI */
+#else /* version >= v2.1.93 */
+#ifdef CONFIG_PCI
+ pci_memory_address = pci_devp->base_address[1];
+ if ((boardp->ioremap_addr =
+ ioremap(pci_memory_address & PAGE_MASK,
+ PAGE_SIZE)) == 0) {
+ ASC_PRINT3(
+"advansys_detect: board %d: ioremap(%lx, %d) returned NULL\n",
+ boardp->id, pci_memory_address, ADV_CONDOR_IOLEN);
+ scsi_unregister(shp);
+ asc_board_count--;
+ continue;
+ }
+ adv_dvc_varp->iop_base = (AdvPortAddr)
+ (boardp->ioremap_addr +
+ (pci_memory_address - (pci_memory_address & PAGE_MASK)));
+#endif /* CONFIG_PCI */
+#endif /* version >= v2.1.93 */
+#endif /* version >= v1,3,0 */
+
+ /*
+ * Even though it isn't used to access the board in
+ * kernels greater than or equal to v1.3.0, save
+ * the I/O Port address so that it can be reported and
+ * displayed.
+ */
+ boardp->ioport = iop;
+ }
+
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+ /*
+ * Allocate buffer for printing information from
+ * /proc/scsi/advansys/[0...].
+ */
+ if ((boardp->prtbuf =
+ kmalloc(ASC_PRTBUF_SIZE, GFP_ATOMIC)) == NULL) {
+ ASC_PRINT3(
+"advansys_detect: board %d: kmalloc(%d, %d) returned NULL\n",
+ boardp->id, ASC_PRTBUF_SIZE, GFP_ATOMIC);
+ scsi_unregister(shp);
+ asc_board_count--;
+ continue;
+ }
+#endif /* version >= v1.3.0 */
+
+ if (ASC_NARROW_BOARD(boardp)) {
+ /*
+ * Set the board bus type and PCI IRQ before
+ * calling AscInitGetConfig().
+ */
+ switch (asc_dvc_varp->bus_type) {
+ case ASC_IS_ISA:
+ shp->unchecked_isa_dma = TRUE;
+ share_irq = FALSE;
+ break;
+ case ASC_IS_VL:
+ shp->unchecked_isa_dma = FALSE;
+ share_irq = FALSE;
+ break;
+ case ASC_IS_EISA:
+ shp->unchecked_isa_dma = FALSE;
+ share_irq = TRUE;
+ break;
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,93)
+#ifdef ASC_CONFIG_PCI
+ case ASC_IS_PCI:
+ shp->irq = asc_dvc_varp->irq_no = pciConfig.irqLine;
+ asc_dvc_varp->cfg->pci_device_id = pciConfig.deviceID;
+ asc_dvc_varp->cfg->pci_slot_info =
+ ASC_PCI_MKID(pciDevice.busNumber,
+ pciDevice.slotFound,
+ pciDevice.devFunc);
+ shp->unchecked_isa_dma = FALSE;
+ share_irq = TRUE;
+ break;
+#endif /* ASC_CONFIG_PCI */
+#else /* version >= v2.1.93 */
+#ifdef CONFIG_PCI
+ case ASC_IS_PCI:
+ shp->irq = asc_dvc_varp->irq_no = pci_devp->irq;
+ asc_dvc_varp->cfg->pci_device_id = pci_devp->device;
+ asc_dvc_varp->cfg->pci_slot_info =
+ ASC_PCI_MKID(pci_devp->bus->number,
+ PCI_SLOT(pci_devp->devfn),
+ PCI_FUNC(pci_devp->devfn));
+ shp->unchecked_isa_dma = FALSE;
+ share_irq = TRUE;
+ break;
+#endif /* CONFIG_PCI */
+#endif /* version >= v2.1.93 */
+ default:
+ ASC_PRINT2(
+"advansys_detect: board %d: unknown adapter type: %d\n",
+ boardp->id, asc_dvc_varp->bus_type);
+ shp->unchecked_isa_dma = TRUE;
+ share_irq = FALSE;
+ break;
+ }
+ } else {
+ /*
+ * For Wide boards set PCI information before calling
+ * AdvInitGetConfig().
+ */
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,93)
+#ifdef ASC_CONFIG_PCI
+ shp->irq = adv_dvc_varp->irq_no = pciConfig.irqLine;
+ adv_dvc_varp->cfg->pci_device_id = pciConfig.deviceID;
+ adv_dvc_varp->cfg->pci_slot_info =
+ ASC_PCI_MKID(pciDevice.busNumber,
+ pciDevice.slotFound,
+ pciDevice.devFunc);
+ shp->unchecked_isa_dma = FALSE;
+ share_irq = TRUE;
+#endif /* ASC_CONFIG_PCI */
+#else /* version >= v2.1.93 */
+#ifdef CONFIG_PCI
+ shp->irq = adv_dvc_varp->irq_no = pci_devp->irq;
+ adv_dvc_varp->cfg->pci_device_id = pci_devp->device;
+ adv_dvc_varp->cfg->pci_slot_info =
+ ASC_PCI_MKID(pci_devp->bus->number,
+ PCI_SLOT(pci_devp->devfn),
+ PCI_FUNC(pci_devp->devfn));
+ shp->unchecked_isa_dma = FALSE;
+ share_irq = TRUE;
+#endif /* CONFIG_PCI */
+#endif /* version >= v2.1.93 */
+ }
+
+ /*
+ * Read the board configuration.
+ */
+ if (ASC_NARROW_BOARD(boardp)) {
+ /*
+ * NOTE: AscInitGetConfig() may change the board's
+ * bus_type value. The asc_bus[bus] value should no
+ * longer be used. If the bus_type field must be
+ * referenced only use the bit-wise AND operator "&".
+ */
+ ASC_DBG(2, "advansys_detect: AscInitGetConfig()\n");
+ switch(ret = AscInitGetConfig(asc_dvc_varp)) {
+ case 0: /* No error */
+ break;
+ case ASC_WARN_IO_PORT_ROTATE:
+ ASC_PRINT1(
+"AscInitGetConfig: board %d: I/O port address modified\n",
+ boardp->id);
+ break;
+ case ASC_WARN_AUTO_CONFIG:
+ ASC_PRINT1(
+"AscInitGetConfig: board %d: I/O port increment switch enabled\n",
+ boardp->id);
+ break;
+ case ASC_WARN_EEPROM_CHKSUM:
+ ASC_PRINT1(
+"AscInitGetConfig: board %d: EEPROM checksum error\n",
+ boardp->id);
+ break;
+ case ASC_WARN_IRQ_MODIFIED:
+ ASC_PRINT1(
+"AscInitGetConfig: board %d: IRQ modified\n",
+ boardp->id);
+ break;
+ case ASC_WARN_CMD_QNG_CONFLICT:
+ ASC_PRINT1(
+"AscInitGetConfig: board %d: tag queuing enabled w/o disconnects\n",
+ boardp->id);
+ break;
+ default:
+ ASC_PRINT2(
+"AscInitGetConfig: board %d: unknown warning: %x\n",
+ boardp->id, ret);
+ break;
+ }
+ if ((err_code = asc_dvc_varp->err_code) != 0) {
+ ASC_PRINT3(
+"AscInitGetConfig: board %d error: init_state %x, err_code %x\n",
+ boardp->id, asc_dvc_varp->init_state,
+ asc_dvc_varp->err_code);
+ }
+ } else {
+ ASC_DBG(2, "advansys_detect: AdvInitGetConfig()\n");
+ if ((ret = AdvInitGetConfig(adv_dvc_varp)) != 0) {
+ ASC_PRINT2("AdvInitGetConfig: board %d: warning: %x\n",
+ boardp->id, ret);
+ }
+ if ((err_code = adv_dvc_varp->err_code) != 0) {
+ ASC_PRINT2(
+"AdvInitGetConfig: board %d error: err_code %x\n",
+ boardp->id, adv_dvc_varp->err_code);
+ }
+ }
+
+ if (err_code != 0) {
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+ kfree(boardp->prtbuf);
+#endif /* version >= v1.3.0 */
+ scsi_unregister(shp);
+ asc_board_count--;
+ continue;
+ }
+
+ /*
+ * Save the EEPROM configuration so that it can be displayed
+ * from /proc/scsi/advansys/[0...].
+ */
+ if (ASC_NARROW_BOARD(boardp)) {
+
+ ASCEEP_CONFIG *ep;
+
+ /*
+ * Set the adapter's target id bit in the 'init_tidmask' field.
+ */
+ boardp->init_tidmask |=
+ ADV_TID_TO_TIDMASK(asc_dvc_varp->cfg->chip_scsi_id);
+
+ /*
+ * Save EEPROM settings for the board.
+ */
+ ep = &boardp->eep_config.asc_eep;
+
+ ep->init_sdtr = asc_dvc_varp->cfg->sdtr_enable;
+ ep->disc_enable = asc_dvc_varp->cfg->disc_enable;
+ ep->use_cmd_qng = asc_dvc_varp->cfg->cmd_qng_enabled;
+ ep->isa_dma_speed = asc_dvc_varp->cfg->isa_dma_speed;
+ ep->start_motor = asc_dvc_varp->start_motor;
+ ep->cntl = asc_dvc_varp->dvc_cntl;
+ ep->no_scam = asc_dvc_varp->no_scam;
+ ep->max_total_qng = asc_dvc_varp->max_total_qng;
+ ep->chip_scsi_id = asc_dvc_varp->cfg->chip_scsi_id;
+ /* 'max_tag_qng' is set to the same value for every device. */
+ ep->max_tag_qng = asc_dvc_varp->cfg->max_tag_qng[0];
+ ep->adapter_info[0] = asc_dvc_varp->cfg->adapter_info[0];
+ ep->adapter_info[1] = asc_dvc_varp->cfg->adapter_info[1];
+ ep->adapter_info[2] = asc_dvc_varp->cfg->adapter_info[2];
+ ep->adapter_info[3] = asc_dvc_varp->cfg->adapter_info[3];
+ ep->adapter_info[4] = asc_dvc_varp->cfg->adapter_info[4];
+ ep->adapter_info[5] = asc_dvc_varp->cfg->adapter_info[5];
+
+ /*
+ * Modify board configuration.
+ */
+ ASC_DBG(2, "advansys_detect: AscInitSetConfig()\n");
+ switch (ret = AscInitSetConfig(asc_dvc_varp)) {
+ case 0: /* No error. */
+ break;
+ case ASC_WARN_IO_PORT_ROTATE:
+ ASC_PRINT1(
+"AscInitSetConfig: board %d: I/O port address modified\n",
+ boardp->id);
+ break;
+ case ASC_WARN_AUTO_CONFIG:
+ ASC_PRINT1(
+"AscInitSetConfig: board %d: I/O port increment switch enabled\n",
+ boardp->id);
+ break;
+ case ASC_WARN_EEPROM_CHKSUM:
+ ASC_PRINT1(
+"AscInitSetConfig: board %d: EEPROM checksum error\n",
+ boardp->id);
+ break;
+ case ASC_WARN_IRQ_MODIFIED:
+ ASC_PRINT1(
+"AscInitSetConfig: board %d: IRQ modified\n",
+ boardp->id);
+ break;
+ case ASC_WARN_CMD_QNG_CONFLICT:
+ ASC_PRINT1(
+"AscInitSetConfig: board %d: tag queuing w/o disconnects\n",
+ boardp->id);
+ break;
+ default:
+ ASC_PRINT2(
+"AscInitSetConfig: board %d: unknown warning: %x\n",
+ boardp->id, ret);
+ break;
+ }
+ if (asc_dvc_varp->err_code != 0) {
+ ASC_PRINT3(
+"AscInitSetConfig: board %d error: init_state %x, err_code %x\n",
+ boardp->id, asc_dvc_varp->init_state,
+ asc_dvc_varp->err_code);
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+ kfree(boardp->prtbuf);
+#endif /* version >= v1.3.0 */
+ scsi_unregister(shp);
+ asc_board_count--;
+ continue;
+ }
+
+ /*
+ * Finish initializing the 'Scsi_Host' structure.
+ */
+ /* AscInitSetConfig() will set the IRQ for non-PCI boards. */
+ if ((asc_dvc_varp->bus_type & ASC_IS_PCI) == 0) {
+ shp->irq = asc_dvc_varp->irq_no;
+ }
+ } else {
+
+ ADVEEP_CONFIG *ep;
+
+ /*
+ * Save Wide EEP Configuration Information.
+ */
+ ep = &boardp->eep_config.adv_eep;
+
+ ep->adapter_scsi_id = adv_dvc_varp->chip_scsi_id;
+ ep->max_host_qng = adv_dvc_varp->max_host_qng;
+ ep->max_dvc_qng = adv_dvc_varp->max_dvc_qng;
+ ep->termination = adv_dvc_varp->cfg->termination;
+ ep->disc_enable = adv_dvc_varp->cfg->disc_enable;
+ ep->bios_ctrl = adv_dvc_varp->bios_ctrl;
+ ep->wdtr_able = adv_dvc_varp->wdtr_able;
+ ep->sdtr_able = adv_dvc_varp->sdtr_able;
+ ep->ultra_able = adv_dvc_varp->ultra_able;
+ ep->tagqng_able = adv_dvc_varp->tagqng_able;
+ ep->start_motor = adv_dvc_varp->start_motor;
+ ep->scsi_reset_delay = adv_dvc_varp->scsi_reset_wait;
+ ep->bios_boot_delay = adv_dvc_varp->cfg->bios_boot_wait;
+ ep->serial_number_word1 = adv_dvc_varp->cfg->serial1;
+ ep->serial_number_word2 = adv_dvc_varp->cfg->serial2;
+ ep->serial_number_word3 = adv_dvc_varp->cfg->serial3;
+
+ /*
+ * Set the adapter's target id bit in the 'init_tidmask' field.
+ */
+ boardp->init_tidmask |=
+ ADV_TID_TO_TIDMASK(adv_dvc_varp->chip_scsi_id);
+
+ /*
+ * Finish initializing the 'Scsi_Host' structure.
+ */
+ shp->irq = adv_dvc_varp->irq_no;
+ }
+
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+ /*
+ * Channels are numbered beginning with 0. For AdvanSys One host
+ * structure supports one channel. Multi-channel boards have a
+ * separate host structure for each channel.
+ */
+ shp->max_channel = 0;
+#endif /* version >= v1.3.89 */
+ if (ASC_NARROW_BOARD(boardp)) {
+ shp->max_id = ASC_MAX_TID + 1;
+ shp->max_lun = ASC_MAX_LUN + 1;
+
+ shp->io_port = asc_dvc_varp->iop_base;
+ shp->n_io_port = ASC_IOADR_GAP;
+ shp->this_id = asc_dvc_varp->cfg->chip_scsi_id;
+
+ /* Set maximum number of queues the adapter can handle. */
+ shp->can_queue = asc_dvc_varp->max_total_qng;
+ } else {
+ shp->max_id = ADV_MAX_TID + 1;
+ shp->max_lun = ADV_MAX_LUN + 1;
+
+ /*
+ * Save the I/O Port address and length even though the
+ * in v1.3.0 and greater kernels the region is not used
+ * by a Wide board. Instead the board is accessed with
+ * Memory Mapped I/O.
+ */
+ shp->io_port = iop;
+ shp->n_io_port = ADV_CONDOR_IOLEN;
+
+ shp->this_id = adv_dvc_varp->chip_scsi_id;
+
+ /* Set maximum number of queues the adapter can handle. */
+ shp->can_queue = adv_dvc_varp->max_host_qng;
+ }
+
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,89)
+ /*
+ * In old kernels without tag queuing support and with memory
+ * allocation problems set a conservative 'cmd_per_lun' value.
+ */
+#ifdef MODULE
+ shp->cmd_per_lun = 1;
+#else /* MODULE */
+ shp->cmd_per_lun = 4;
+#endif /* MODULE */
+ ASC_DBG1(1, "advansys_detect: cmd_per_lun: %d\n", shp->cmd_per_lun);
+#else /* version >= v1.3.89 */
+ /*
+ * Following v1.3.89, 'cmd_per_lun' is no longer needed
+ * and should be set to zero.
+ *
+ * But because of a bug introduced in v1.3.89 if the driver is
+ * compiled as a module and 'cmd_per_lun' is zero, the Mid-Level
+ * SCSI function 'allocate_device' will panic. To allow the driver
+ * to work as a module in these kernels set 'cmd_per_lun' to 1.
+ */
+#ifdef MODULE
+ shp->cmd_per_lun = 1;
+#else /* MODULE */
+ shp->cmd_per_lun = 0;
+#endif /* MODULE */
+ /*
+ * Use the host 'select_queue_depths' function to determine
+ * the number of commands to queue per device.
+ */
+ shp->select_queue_depths = advansys_select_queue_depths;
+#endif /* version >= v1.3.89 */
+
+ /*
+ * Set the maximum number of scatter-gather elements the
+ * adapter can handle.
+ */
+ if (ASC_NARROW_BOARD(boardp)) {
+ /*
+ * Allow two commands with 'sg_tablesize' scatter-gather
+ * elements to be executed simultaneously. This value is
+ * the theoretical hardware limit. It may be decreased
+ * below.
+ */
+ shp->sg_tablesize =
+ (((asc_dvc_varp->max_total_qng - 2) / 2) *
+ ASC_SG_LIST_PER_Q) + 1;
+ } else {
+ shp->sg_tablesize = ADV_MAX_SG_LIST;
+ }
+
+#ifdef MODULE
+ /*
+ * If the driver is compiled as a module, set a limit on the
+ * 'sg_tablesize' value to prevent memory allocation failures.
+ * Memory allocation errors are more likely to occur at module
+ * load time, then at driver initialization time.
+ */
+ if (shp->sg_tablesize > 64) {
+ shp->sg_tablesize = 64;
+ }
+#endif /* MODULE */
+
+ /*
+ * The value of 'sg_tablesize' can not exceed the SCSI
+ * mid-level driver definition of SG_ALL. SG_ALL also
+ * must not be exceeded, because it is used to define the
+ * size of the scatter-gather table in 'struct asc_sg_head'.
+ */
+ if (shp->sg_tablesize > SG_ALL) {
+ shp->sg_tablesize = SG_ALL;
+ }
+
+ ASC_DBG1(1, "advansys_detect: sg_tablesize: %d\n",
+ shp->sg_tablesize);
+
+ /* BIOS start address. */
+ if (ASC_NARROW_BOARD(boardp)) {
+ shp->base = (char *) ((ulong) AscGetChipBiosAddress(
+ asc_dvc_varp->iop_base,
+ asc_dvc_varp->bus_type));
+ } else {
+ /*
+ * Fill-in BIOS board variables. The Wide BIOS saves
+ * information in LRAM that is used by the driver.
+ */
+ AdvReadWordLram(adv_dvc_varp->iop_base, BIOS_SIGNATURE,
+ boardp->bios_signature);
+ AdvReadWordLram(adv_dvc_varp->iop_base, BIOS_VERSION,
+ boardp->bios_version);
+ AdvReadWordLram(adv_dvc_varp->iop_base, BIOS_CODESEG,
+ boardp->bios_codeseg);
+ AdvReadWordLram(adv_dvc_varp->iop_base, BIOS_CODELEN,
+ boardp->bios_codelen);
+
+ ASC_DBG2(1,
+ "advansys_detect: bios_signature %x, bios_version %x\n",
+ boardp->bios_signature, boardp->bios_version);
+
+ ASC_DBG2(1,
+ "advansys_detect: bios_codeseg %x, bios_codelen %x\n",
+ boardp->bios_codeseg, boardp->bios_codelen);
+
+ /*
+ * If the BIOS saved a valid signature, then fill in
+ * the BIOS code segment base address.
+ */
+ if (boardp->bios_signature == 0x55AA) {
+ /*
+ * Convert x86 realmode code segment to a linear
+ * address by shifting left 4.
+ */
+ shp->base = (uchar *) (boardp->bios_codeseg << 4);
+ } else {
+ shp->base = 0;
+ }
+ }
+
+ /*
+ * Register Board Resources - I/O Port, DMA, IRQ
+ */
+
+ /* Register I/O port range. */
+ ASC_DBG(2, "advansys_detect: request_region()\n");
+ request_region(shp->io_port, shp->n_io_port, "advansys");
+
+ /* Register DMA Channel for Narrow boards. */
+ shp->dma_channel = NO_ISA_DMA; /* Default to no ISA DMA. */
+ if (ASC_NARROW_BOARD(boardp)) {
+ /* Register DMA channel for ISA bus. */
+ if (asc_dvc_varp->bus_type & ASC_IS_ISA) {
+ shp->dma_channel = asc_dvc_varp->cfg->isa_dma_channel;
+ if ((ret =
+ request_dma(shp->dma_channel, "advansys")) != 0) {
+ ASC_PRINT3(
+"advansys_detect: board %d: request_dma() %d failed %d\n",
+ boardp->id, shp->dma_channel, ret);
+ release_region(shp->io_port, shp->n_io_port);
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+ kfree(boardp->prtbuf);
+#endif /* version >= v1.3.0 */
+ scsi_unregister(shp);
+ asc_board_count--;
+ continue;
+ }
+ AscEnableIsaDma(shp->dma_channel);
+ }
+ }
+
+ /* Register IRQ Number. */
+ ASC_DBG1(2, "advansys_detect: request_irq() %d\n", shp->irq);
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,70)
+ if ((ret = request_irq(shp->irq, advansys_interrupt,
+ SA_INTERRUPT, "advansys")) != 0)
+#else /* version >= v1.3.70 */
+ /*
+ * If request_irq() fails with the SA_INTERRUPT flag set,
+ * then try again without the SA_INTERRUPT flag set. This
+ * allows IRQ sharing to work even with other drivers that
+ * do not set the SA_INTERRUPT flag.
+ *
+ * If SA_INTERRUPT is not set, then interrupts are enabled
+ * before the driver interrupt function is called.
+ */
+ if (((ret = request_irq(shp->irq, advansys_interrupt,
+ SA_INTERRUPT | (share_irq == TRUE ? SA_SHIRQ : 0),
+ "advansys", boardp)) != 0) &&
+ ((ret = request_irq(shp->irq, advansys_interrupt,
+ (share_irq == TRUE ? SA_SHIRQ : 0),
+ "advansys", boardp)) != 0))
+#endif /* version >= v1.3.70 */
+ {
+ if (ret == -EBUSY) {
+ ASC_PRINT2(
+"advansys_detect: board %d: request_irq(): IRQ %d already in use.\n",
+ boardp->id, shp->irq);
+ } else if (ret == -EINVAL) {
+ ASC_PRINT2(
+"advansys_detect: board %d: request_irq(): IRQ %d not valid.\n",
+ boardp->id, shp->irq);
+ } else {
+ ASC_PRINT3(
+"advansys_detect: board %d: request_irq(): IRQ %d failed with %d\n",
+ boardp->id, shp->irq, ret);
+ }
+ release_region(shp->io_port, shp->n_io_port);
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+ iounmap(boardp->ioremap_addr);
+#endif /* version >= v1,3,0 */
+ if (shp->dma_channel != NO_ISA_DMA) {
+ free_dma(shp->dma_channel);
+ }
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+ kfree(boardp->prtbuf);
+#endif /* version >= v1.3.0 */
+ scsi_unregister(shp);
+ asc_board_count--;
+ continue;
+ }
+
+ /*
+ * Initialize board RISC chip and enable interrupts.
+ */
+ if (ASC_NARROW_BOARD(boardp)) {
+ ASC_DBG(2, "advansys_detect: AscInitAsc1000Driver()\n");
+ warn_code = AscInitAsc1000Driver(asc_dvc_varp);
+ err_code = asc_dvc_varp->err_code;
+
+ if (warn_code || err_code) {
+ ASC_PRINT4(
+"AscInitAsc1000Driver: board %d: error: init_state %x, warn %x error %x\n",
+ boardp->id, asc_dvc_varp->init_state,
+ warn_code, err_code);
+ }
+ } else {
+ int req_cnt;
+ adv_req_t *reqp = NULL;
+ int sg_cnt;
+ adv_sgblk_t *sgp = NULL;
+
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,0)
+ req_cnt = sizeof(adv_req_buf)/sizeof(adv_req_t);
+ sg_cnt = sizeof(adv_sgblk_buf)/sizeof(adv_sgblk_t);
+ reqp = (adv_req_t *) &adv_req_buf[0];
+ sgp = (adv_sgblk_t *) &adv_sgblk_buf[0];
+#else /* version >= v1.3.0 */
+ /*
+ * Allocate up to 'max_host_qng' request structures for
+ * the Wide board.
+ */
+ for (req_cnt = adv_dvc_varp->max_host_qng;
+ req_cnt > 0; req_cnt--) {
+
+ reqp = (adv_req_t *)
+ kmalloc(sizeof(adv_req_t) * req_cnt, GFP_ATOMIC);
+
+ ASC_DBG3(1,
+ "advansys_detect: reqp %x, req_cnt %d, bytes %d\n",
+ (unsigned) reqp, req_cnt, sizeof(adv_req_t) * req_cnt);
+
+ if (reqp != NULL) {
+ break;
+ }
+ }
+
+ /*
+ * Allocate up to ADV_TOT_SG_LIST request structures for
+ * the Wide board.
+ */
+ for (sg_cnt = ADV_TOT_SG_LIST; sg_cnt > 0; sg_cnt--) {
+
+ sgp = (adv_sgblk_t *)
+ kmalloc(sizeof(adv_sgblk_t) * sg_cnt, GFP_ATOMIC);
+
+ ASC_DBG3(1,
+ "advansys_detect: sgp %x, sg_cnt %d, bytes %d\n",
+ (unsigned) sgp, sg_cnt, sizeof(adv_sgblk_t) * sg_cnt);
+
+ if (sgp != NULL) {
+ break;
+ }
+ }
+#endif /* version >= v1.3.0 */
+
+ /*
+ * If no request structures or scatter-gather structures could
+ * be allocated, then return an error. Otherwise continue with
+ * initialization.
+ */
+ if (reqp == NULL) {
+ ASC_PRINT1(
+"advansys_detect: board %d: error: failed to kmalloc() adv_req_t buffer.\n",
+ boardp->id);
+ err_code = ADV_ERROR;
+ } else if (sgp == NULL) {
+ kfree(reqp);
+ ASC_PRINT1(
+"advansys_detect: board %d: error: failed to kmalloc() adv_sgblk_t buffer.\n",
+ boardp->id);
+ err_code = ADV_ERROR;
+ } else {
+
+ /*
+ * Save original pointer for kfree() in case the
+ * driver is built as a module and can be unloaded.
+ */
+ boardp->orig_reqp = reqp;
+
+ /*
+ * Point 'adv_reqp' to the request structures and
+ * link them together.
+ */
+ req_cnt--;
+ reqp[req_cnt].next_reqp = NULL;
+ for (; req_cnt > 0; req_cnt--) {
+ reqp[req_cnt - 1].next_reqp = &reqp[req_cnt];
+ }
+ boardp->adv_reqp = &reqp[0];
+
+ /*
+ * Save original pointer for kfree() in case the
+ * driver is built as a module and can be unloaded.
+ */
+ boardp->orig_sgblkp = sgp;
+
+ /*
+ * Point 'adv_sgblkp' to the request structures and
+ * link them together.
+ */
+ sg_cnt--;
+ sgp[sg_cnt].next_sgblkp = NULL;
+ for (; sg_cnt > 0; sg_cnt--) {
+ sgp[sg_cnt - 1].next_sgblkp = &sgp[sg_cnt];
+ }
+ boardp->adv_sgblkp = &sgp[0];
+
+ ASC_DBG(2, "advansys_detect: AdvInitAsc3550Driver()\n");
+ warn_code = AdvInitAsc3550Driver(adv_dvc_varp);
+ err_code = adv_dvc_varp->err_code;
+
+ if (warn_code || err_code) {
+ ASC_PRINT3(
+"AdvInitAsc3550Driver: board %d: error: warn %x, error %x\n",
+ boardp->id, warn_code, adv_dvc_varp->err_code);
+ }
+ }
+ }
+
+ if (err_code != 0) {
+ release_region(shp->io_port, shp->n_io_port);
+ if (ASC_WIDE_BOARD(boardp)) {
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+ iounmap(boardp->ioremap_addr);
+#endif /* version >= v1,3,0 */
+ if (boardp->orig_reqp) {
+ kfree(boardp->orig_reqp);
+ boardp->orig_reqp = boardp->adv_reqp = NULL;
+ }
+ if (boardp->orig_sgblkp) {
+ kfree(boardp->orig_sgblkp);
+ boardp->orig_sgblkp = boardp->adv_sgblkp = NULL;
+ }
+ }
+ if (shp->dma_channel != NO_ISA_DMA) {
+ free_dma(shp->dma_channel);
+ }
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+ kfree(boardp->prtbuf);
+#endif /* version >= v1.3.0 */
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,70)
+ free_irq(shp->irq);
+#else /* version >= v1.3.70 */
+ free_irq(shp->irq, boardp);
+#endif /* version >= v1.3.70 */
+ scsi_unregister(shp);
+ asc_board_count--;
+ continue;
+ }
+ ASC_DBG_PRT_SCSI_HOST(2, shp);
+ }
+ }
+ ASC_DBG1(1, "advansys_detect: done: asc_board_count %d\n", asc_board_count);
+ return asc_board_count;
+}
+
+/*
+ * advansys_release()
+ *
+ * Release resources allocated for a single AdvanSys adapter.
+ */
+int
+advansys_release(struct Scsi_Host *shp)
+{
+ asc_board_t *boardp;
+
+ ASC_DBG(1, "advansys_release: begin\n");
+ boardp = ASC_BOARDP(shp);
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,70)
+ free_irq(shp->irq);
+#else /* version >= v1.3.70 */
+ free_irq(shp->irq, boardp);
+#endif /* version >= v1.3.70 */
+ if (shp->dma_channel != NO_ISA_DMA) {
+ ASC_DBG(1, "advansys_release: free_dma()\n");
+ free_dma(shp->dma_channel);
+ }
+ release_region(shp->io_port, shp->n_io_port);
+ if (ASC_WIDE_BOARD(boardp)) {
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+ iounmap(boardp->ioremap_addr);
+#endif /* version >= v1,3,0 */
+ if (boardp->orig_reqp) {
+ kfree(boardp->orig_reqp);
+ boardp->orig_reqp = boardp->adv_reqp = NULL;
+ }
+ if (boardp->orig_sgblkp) {
+ kfree(boardp->orig_sgblkp);
+ boardp->orig_sgblkp = boardp->adv_sgblkp = NULL;
+ }
+ }
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+ ASC_ASSERT(boardp->prtbuf != NULL);
+ kfree(boardp->prtbuf);
+#endif /* version >= v1.3.0 */
+ scsi_unregister(shp);
+ ASC_DBG(1, "advansys_release: end\n");
+ return 0;
+}
+
+/*
+ * advansys_info()
+ *
+ * Return suitable for printing on the console with the argument
+ * adapter's configuration information.
+ *
+ * Note: The information line should not exceed ASC_INFO_SIZE bytes,
+ * otherwise the static 'info' array will be overrun.
+ */
+const char *
+advansys_info(struct Scsi_Host *shp)
+{
+ static char info[ASC_INFO_SIZE];
+ asc_board_t *boardp;
+ ASC_DVC_VAR *asc_dvc_varp;
+ ADV_DVC_VAR *adv_dvc_varp;
+ char *busname;
+
+ boardp = ASC_BOARDP(shp);
+ if (ASC_NARROW_BOARD(boardp)) {
+ asc_dvc_varp = &boardp->dvc_var.asc_dvc_var;
+ ASC_DBG(1, "advansys_info: begin\n");
+ if (asc_dvc_varp->bus_type & ASC_IS_ISA) {
+ if ((asc_dvc_varp->bus_type & ASC_IS_ISAPNP) == ASC_IS_ISAPNP) {
+ busname = "ISA PnP";
+ } else {
+ busname = "ISA";
+ }
+ sprintf(info,
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,92)
+"AdvanSys SCSI %s: %s %u CDB: BIOS %X, IO %X/%X, IRQ %u, DMA %u",
+#else /* version >= v2.1.92 */
+"AdvanSys SCSI %s: %s %u CDB: BIOS %X, IO %lX/%X, IRQ %u, DMA %u",
+#endif /* version >= v2.1.92 */
+ ASC_VERSION, busname, asc_dvc_varp->max_total_qng,
+ (unsigned) shp->base,
+ shp->io_port, shp->n_io_port - 1,
+ shp->irq, shp->dma_channel);
+ } else if (asc_dvc_varp->bus_type & ASC_IS_PCI) {
+ if ((asc_dvc_varp->bus_type & ASC_IS_PCI_ULTRA)
+ == ASC_IS_PCI_ULTRA) {
+ busname = "PCI Ultra";
+ } else {
+ busname = "PCI";
+ }
+ sprintf(info,
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,92)
+ "AdvanSys SCSI %s: %s %u CDB: IO %X/%X, IRQ %u",
+#else /* version >= v2.1.92 */
+ "AdvanSys SCSI %s: %s %u CDB: IO %lX/%X, IRQ %u",
+#endif /* version >= v2.1.92 */
+ ASC_VERSION, busname, asc_dvc_varp->max_total_qng,
+ shp->io_port, shp->n_io_port - 1, shp->irq);
+ } else {
+ if (asc_dvc_varp->bus_type & ASC_IS_VL) {
+ busname = "VL";
+ } else if (asc_dvc_varp->bus_type & ASC_IS_EISA) {
+ busname = "EISA";
+ } else {
+ busname = "?";
+ ASC_PRINT2(
+ "advansys_info: board %d: unknown bus type %d\n",
+ boardp->id, asc_dvc_varp->bus_type);
+ }
+ sprintf(info,
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,92)
+ "AdvanSys SCSI %s: %s %u CDB: BIOS %X, IO %X/%X, IRQ %u",
+#else /* version >= v2.1.92 */
+ "AdvanSys SCSI %s: %s %u CDB: BIOS %X, IO %lX/%X, IRQ %u",
+#endif /* version >= v2.1.92 */
+ ASC_VERSION, busname, asc_dvc_varp->max_total_qng,
+ (unsigned) shp->base, shp->io_port - 1,
+ shp->n_io_port, shp->irq);
+ }
+ } else {
+ /*
+ * Wide Adapter Information
+ *
+ * Memory-mapped I/O is used instead of I/O space to access
+ * the adapter, but display the I/O Port range. The Memory
+ * I/O address is displayed through the driver /proc file.
+ */
+ adv_dvc_varp = &boardp->dvc_var.adv_dvc_var;
+ if (boardp->bios_signature == 0x55AA) {
+ sprintf(info,
+"AdvanSys SCSI %s: PCI Ultra-Wide: BIOS %X/%X, IO %X/%X, IRQ %u",
+ ASC_VERSION,
+ boardp->bios_codeseg << 4,
+ boardp->bios_codelen > 0 ?
+ (boardp->bios_codelen << 9) - 1 : 0,
+ (unsigned) boardp->ioport, ADV_CONDOR_IOLEN - 1,
+ shp->irq);
+ } else {
+ sprintf(info,
+"AdvanSys SCSI %s: PCI Ultra-Wide: IO %X/%X, IRQ %u",
+ ASC_VERSION,
+ (unsigned) boardp->ioport,
+ (ADV_CONDOR_IOLEN - 1),
+ shp->irq);
+ }
+ }
+ ASC_ASSERT(strlen(info) < ASC_INFO_SIZE);
+ ASC_DBG(1, "advansys_info: end\n");
+ return info;
+}
+
+/*
+ * advansys_command() - polled I/O entrypoint.
+ *
+ * Apparently host drivers shouldn't return until the command
+ * is finished.
+ *
+ * Note: This is an old interface that is no longer used by the SCSI
+ * mid-level driver. The new interface, advansys_queuecommand(),
+ * currently handles all requests.
+ */
+int
+advansys_command(Scsi_Cmnd *scp)
+{
+ ASC_DBG1(1, "advansys_command: scp %x\n", (unsigned) scp);
+ ASC_STATS(scp->host, command);
+ scp->SCp.Status = 0; /* Set to a known state */
+ advansys_queuecommand(scp, advansys_command_done);
+ while (scp->SCp.Status == 0) {
+ continue;
+ }
+ ASC_DBG1(1, "advansys_command: result %x\n", scp->result);
+ return scp->result;
+}
+
+/*
+ * advansys_queuecommand() - interrupt-driven I/O entrypoint.
+ *
+ * This function always returns 0. Command return status is saved
+ * in the 'scp' result field.
+ */
+int
+advansys_queuecommand(Scsi_Cmnd *scp, void (*done)(Scsi_Cmnd *))
+{
+ struct Scsi_Host *shp;
+ asc_board_t *boardp;
+ long flags;
+ Scsi_Cmnd *done_scp;
+
+ shp = scp->host;
+ boardp = ASC_BOARDP(shp);
+ ASC_STATS(shp, queuecommand);
+
+ /*
+ * Disable interrupts to preserve request ordering and provide
+ * mutually exclusive access to global structures used to initiate
+ * a request.
+ */
+ save_flags(flags);
+ cli();
+
+ /*
+ * Block new commands while handling a reset or abort request.
+ */
+ if (boardp->flags & (ASC_HOST_IN_RESET | ASC_HOST_IN_ABORT)) {
+ if (boardp->flags & ASC_HOST_IN_RESET) {
+ ASC_DBG1(1,
+ "advansys_queuecommand: scp %x blocked for reset request\n",
+ (unsigned) scp);
+ scp->result = HOST_BYTE(DID_RESET);
+ } else {
+ ASC_DBG1(1,
+ "advansys_queuecommand: scp %x blocked for abort request\n",
+ (unsigned) scp);
+ scp->result = HOST_BYTE(DID_ABORT);
+ }
+
+ /*
+ * Add blocked requests to the board's 'done' queue. The queued
+ * requests will be completed at the end of the abort or reset
+ * handling.
+ */
+ asc_enqueue(&boardp->done, scp, ASC_BACK);
+ restore_flags(flags);
+ return 0;
+ }
+
+ /*
+ * Attempt to execute any waiting commands for the board.
+ */
+ if (!ASC_QUEUE_EMPTY(&boardp->waiting)) {
+ ASC_DBG(1,
+ "advansys_queuecommand: before asc_execute_queue() waiting\n");
+ asc_execute_queue(&boardp->waiting);
+ }
+
+ /*
+ * Save the function pointer to Linux mid-level 'done' function
+ * and attempt to execute the command.
+ *
+ * If ASC_ERROR is returned the request has been added to the
+ * board's 'active' queue and will be completed by the interrupt
+ * handler.
+ *
+ * If ASC_BUSY is returned add the request to the board's per
+ * target waiting list.
+ *
+ * If an error occurred, the request will have been placed on the
+ * board's 'done' queue and must be completed before returning.
+ */
+ scp->scsi_done = done;
+ switch (asc_execute_scsi_cmnd(scp)) {
+ case ASC_NOERROR:
+ break;
+ case ASC_BUSY:
+ asc_enqueue(&boardp->waiting, scp, ASC_BACK);
+ break;
+ case ASC_ERROR:
+ default:
+ done_scp = asc_dequeue_list(&boardp->done, NULL, ASC_TID_ALL);
+ /* Interrupts could be enabled here. */
+ asc_scsi_done_list(done_scp);
+ break;
+ }
+
+ restore_flags(flags);
+ return 0;
+}
+
+/*
+ * advansys_abort()
+ *
+ * Abort the command specified by 'scp'.
+ */
+int
+advansys_abort(Scsi_Cmnd *scp)
+{
+ struct Scsi_Host *shp;
+ asc_board_t *boardp;
+ ASC_DVC_VAR *asc_dvc_varp;
+ ADV_DVC_VAR *adv_dvc_varp;
+ long flags;
+ int do_scsi_done;
+ int scp_found;
+ Scsi_Cmnd *done_scp = NULL;
+ int ret;
+
+ /* Save current flags and disable interrupts. */
+ save_flags(flags);
+ cli();
+
+ ASC_DBG1(1, "advansys_abort: scp %x\n", (unsigned) scp);
+
+#ifdef ADVANSYS_STATS
+ if (scp->host != NULL) {
+ ASC_STATS(scp->host, abort);
+ }
+#endif /* ADVANSYS_STATS */
+
+#ifdef ADVANSYS_ASSERT
+ do_scsi_done = ASC_ERROR;
+ scp_found = ASC_ERROR;
+ ret = ASC_ERROR;
+#endif /* ADVANSYS_ASSERT */
+
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+ if (scp->serial_number != scp->serial_number_at_timeout) {
+ ASC_PRINT1(
+"advansys_abort: timeout serial number changed for request %x\n",
+ (unsigned) scp);
+ do_scsi_done = ASC_FALSE;
+ scp_found = ASC_FALSE;
+ ret = SCSI_ABORT_NOT_RUNNING;
+ } else
+#endif /* version >= v1.3.89 */
+ if ((shp = scp->host) == NULL) {
+ scp->result = HOST_BYTE(DID_ERROR);
+ do_scsi_done = ASC_TRUE;
+ scp_found = ASC_FALSE;
+ ret = SCSI_ABORT_ERROR;
+ } else if ((boardp = ASC_BOARDP(shp))->flags &
+ (ASC_HOST_IN_RESET | ASC_HOST_IN_ABORT)) {
+ ASC_PRINT2(
+"advansys_abort: board %d: Nested host reset or abort, flags 0x%x\n",
+ boardp->id, boardp->flags);
+ do_scsi_done = ASC_TRUE;
+ if ((asc_rmqueue(&boardp->active, scp) == ASC_TRUE) ||
+ (asc_rmqueue(&boardp->waiting, scp) == ASC_TRUE)) {
+ scp_found = ASC_TRUE;
+ } else {
+ scp_found = ASC_FALSE;
+ }
+ scp->result = HOST_BYTE(DID_ERROR);
+ ret = SCSI_ABORT_ERROR;
+ } else {
+ /* Set abort flag to avoid nested reset or abort requests. */
+ boardp->flags |= ASC_HOST_IN_ABORT;
+
+ do_scsi_done = ASC_TRUE;
+ if (asc_rmqueue(&boardp->waiting, scp) == ASC_TRUE) {
+ /*
+ * If asc_rmqueue() found the command on the waiting
+ * queue, it had not been sent to the device. After
+ * the queue is removed, no other handling is required.
+ */
+ ASC_DBG1(1, "advansys_abort: scp %x found on waiting queue\n",
+ (unsigned) scp);
+ scp_found = ASC_TRUE;
+ scp->result = HOST_BYTE(DID_ABORT);
+ ret = SCSI_ABORT_SUCCESS;
+ } else if (asc_isqueued(&boardp->active, scp) == ASC_TRUE) {
+ /*
+ * If asc_isqueued() found the command on the active
+ * queue, it has been sent to the device. The command
+ * will be returned through the interrupt handler after
+ * it has been aborted.
+ */
+
+ if (ASC_NARROW_BOARD(boardp)) {
+ /*
+ * Narrow Board
+ */
+ asc_dvc_varp = &boardp->dvc_var.asc_dvc_var;
+ scp->result = HOST_BYTE(DID_ABORT);
+
+ sti(); /* Enable interrupts for AscAbortSRB(). */
+ ASC_DBG1(1, "advansys_abort: before AscAbortSRB(), scp %x\n",
+ (unsigned) scp);
+ switch (AscAbortSRB(asc_dvc_varp, (ulong) scp)) {
+ case ASC_TRUE:
+ /* asc_isr_callback() will be called */
+ ASC_DBG(1, "advansys_abort: AscAbortSRB() TRUE\n");
+ ret = SCSI_ABORT_PENDING;
+ break;
+ case ASC_FALSE:
+ /* Request has apparently already completed. */
+ ASC_DBG(1, "advansys_abort: AscAbortSRB() FALSE\n");
+ ret = SCSI_ABORT_NOT_RUNNING;
+ break;
+ case ASC_ERROR:
+ default:
+ ASC_DBG(1, "advansys_abort: AscAbortSRB() ERROR\n");
+ ret = SCSI_ABORT_ERROR;
+ break;
+ }
+ cli();
+ } else {
+ /*
+ * Wide Board
+ */
+ adv_dvc_varp = &boardp->dvc_var.adv_dvc_var;
+ scp->result = HOST_BYTE(DID_ABORT);
+
+ ASC_DBG1(1, "advansys_abort: before AdvAbortSRB(), scp %x\n",
+ (unsigned) scp);
+ switch (AdvAbortSRB(adv_dvc_varp, (ulong) scp)) {
+ case ASC_TRUE:
+ /* asc_isr_callback() will be called */
+ ASC_DBG(1, "advansys_abort: AdvAbortSRB() TRUE\n");
+ ret = SCSI_ABORT_PENDING;
+ break;
+ case ASC_FALSE:
+ /* Request has apparently already completed. */
+ ASC_DBG(1, "advansys_abort: AdvAbortSRB() FALSE\n");
+ ret = SCSI_ABORT_NOT_RUNNING;
+ break;
+ case ASC_ERROR:
+ default:
+ ASC_DBG(1, "advansys_abort: AdvAbortSRB() ERROR\n");
+ ret = SCSI_ABORT_ERROR;
+ break;
+ }
+ /*
+ * Ensure all requests completed by the microcode have
+ * been processed by calling AdvISR().
+ */
+ (void) AdvISR(adv_dvc_varp);
+ }
+
+ /*
+ * The request will either still be on the active queue
+ * or have been added to the board's done queue.
+ */
+ if (asc_rmqueue(&boardp->active, scp) == ASC_TRUE) {
+ scp->result = HOST_BYTE(DID_ABORT);
+ scp_found = ASC_TRUE;
+ } else {
+ scp_found = asc_rmqueue(&boardp->done, scp);
+ ASC_ASSERT(scp_found == ASC_TRUE);
+ }
+
+ } else {
+ /*
+ * The command was not found on the active or waiting queues.
+ */
+ do_scsi_done = ASC_TRUE;
+ scp_found = ASC_FALSE;
+ ret = SCSI_ABORT_NOT_RUNNING;
+ }
+
+ /* Clear abort flag. */
+ boardp->flags &= ~ASC_HOST_IN_ABORT;
+
+ /*
+ * Because the ASC_HOST_IN_ABORT flag causes both
+ * 'advansys_interrupt' and 'asc_isr_callback' to
+ * queue requests to the board's 'done' queue and
+ * prevents waiting commands from being executed,
+ * these queued requests must be handled here.
+ */
+ done_scp = asc_dequeue_list(&boardp->done, NULL, ASC_TID_ALL);
+
+ /*
+ * Start any waiting commands for the board.
+ */
+ if (!ASC_QUEUE_EMPTY(&boardp->waiting)) {
+ ASC_DBG(1, "advansys_interrupt: before asc_execute_queue()\n");
+ asc_execute_queue(&boardp->waiting);
+ }
+ }
+
+ /* Interrupts could be enabled here. */
+
+ /*
+ * Complete the request to be aborted, unless it has been
+ * restarted as detected above, even if it was not found on
+ * the device active or waiting queues.
+ */
+ ASC_ASSERT(do_scsi_done != ASC_ERROR);
+ ASC_ASSERT(scp_found != ASC_ERROR);
+ if (do_scsi_done == ASC_TRUE) {
+ if (scp->scsi_done == NULL) {
+ ASC_PRINT1(
+"advansys_abort: aborted request scsi_done() is NULL, %x\n",
+ (unsigned) scp);
+ } else {
+ if (scp_found == ASC_FALSE) {
+ ASC_PRINT1(
+"advansys_abort: abort request not active or waiting, completing anyway %x\n",
+ (unsigned) scp);
+ }
+ ASC_STATS(scp->host, done);
+ scp->scsi_done(scp);
+ }
+ }
+
+ /*
+ * It is possible for the request done function to re-enable
+ * interrupts without confusing the driver. But here interrupts
+ * aren't enabled until all requests have been completed.
+ */
+ if (done_scp != NULL) {
+ asc_scsi_done_list(done_scp);
+ }
+
+ ASC_DBG1(1, "advansys_abort: ret %d\n", ret);
+
+ /* Re-enable interrupts, if they were enabled on entry. */
+ restore_flags(flags);
+
+ ASC_ASSERT(ret != ASC_ERROR);
+ return ret;
+}
+
+/*
+ * advansys_reset()
+ *
+ * Reset the device associated with the command 'scp'.
+ */
+int
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,89)
+advansys_reset(Scsi_Cmnd *scp)
+#else /* version >= v1.3.89 */
+advansys_reset(Scsi_Cmnd *scp, unsigned int reset_flags)
+#endif /* version >= v1.3.89 */
+{
+ struct Scsi_Host *shp;
+ asc_board_t *boardp;
+ ASC_DVC_VAR *asc_dvc_varp;
+ ADV_DVC_VAR *adv_dvc_varp;
+ long flags;
+ Scsi_Cmnd *done_scp = NULL, *last_scp = NULL;
+ Scsi_Cmnd *tscp, *new_last_scp;
+ int do_scsi_done;
+ int scp_found;
+ int status;
+ int target;
+ int ret;
+ int device_reset = ASC_FALSE;
+
+ /* Save current flags and disable interrupts. */
+ save_flags(flags);
+ cli();
+
+ ASC_DBG1(1, "advansys_reset: %x\n", (unsigned) scp);
+
+#ifdef ADVANSYS_STATS
+ if (scp->host != NULL) {
+ ASC_STATS(scp->host, reset);
+ }
+#endif /* ADVANSYS_STATS */
+
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+ if ((reset_flags & SCSI_RESET_ASYNCHRONOUS) &&
+ (scp->serial_number != scp->serial_number_at_timeout)) {
+ ASC_PRINT1(
+"advansys_reset: timeout serial number changed for request %x\n",
+ (unsigned) scp);
+ do_scsi_done = ASC_FALSE;
+ scp_found = ASC_FALSE;
+ ret = SCSI_RESET_NOT_RUNNING;
+ } else
+#endif /* version >= v1.3.89 */
+ if ((shp = scp->host) == NULL) {
+ scp->result = HOST_BYTE(DID_ERROR);
+ do_scsi_done = ASC_TRUE;
+ scp_found = ASC_FALSE;
+ ret = SCSI_RESET_ERROR;
+ } else if ((boardp = ASC_BOARDP(shp))->flags &
+ (ASC_HOST_IN_RESET | ASC_HOST_IN_ABORT)) {
+ ASC_PRINT2(
+"advansys_reset: board %d: Nested host reset or abort, flags 0x%x\n",
+ boardp->id, boardp->flags);
+ do_scsi_done = ASC_TRUE;
+ if ((asc_rmqueue(&boardp->active, scp) == ASC_TRUE) ||
+ (asc_rmqueue(&boardp->waiting, scp) == ASC_TRUE)) {
+ scp_found = ASC_TRUE;
+ } else {
+ scp_found = ASC_FALSE;
+ }
+ scp->result = HOST_BYTE(DID_ERROR);
+ ret = SCSI_RESET_ERROR;
+ } else if (jiffies >= boardp->last_reset &&
+ jiffies < (boardp->last_reset + (10 * HZ))) {
+ /*
+ * Don't allow a reset to be attempted within 10 seconds
+ * of the last reset.
+ *
+ * If 'jiffies' wrapping occurs, the reset request will go
+ * through, because a wrapped 'jiffies' would not pass the
+ * test above.
+ */
+ ASC_DBG(1,
+ "advansys_reset: reset within 10 sec of last reset ignored\n");
+ do_scsi_done = ASC_TRUE;
+ if ((asc_rmqueue(&boardp->active, scp) == ASC_TRUE) ||
+ (asc_rmqueue(&boardp->waiting, scp) == ASC_TRUE)) {
+ scp_found = ASC_TRUE;
+ } else {
+ scp_found = ASC_FALSE;
+ }
+ scp->result = HOST_BYTE(DID_ERROR);
+ ret = SCSI_RESET_ERROR;
+ } else {
+ do_scsi_done = ASC_TRUE;
+
+ /* Set reset flag to avoid nested reset or abort requests. */
+ boardp->flags |= ASC_HOST_IN_RESET;
+
+ /*
+ * If the request is on the target waiting or active queue
+ * or the board done queue, then remove it and note that it
+ * was found.
+ */
+ if (asc_rmqueue(&boardp->active, scp) == ASC_TRUE) {
+ ASC_DBG(1, "advansys_reset: active scp_found = TRUE\n");
+ scp_found = ASC_TRUE;
+ } else if (asc_rmqueue(&boardp->waiting, scp) == ASC_TRUE) {
+ ASC_DBG(1, "advansys_reset: waiting scp_found = TRUE\n");
+ scp_found = ASC_TRUE;
+ } else if (asc_rmqueue(&boardp->done, scp) == ASC_TRUE) {
+ scp_found = ASC_TRUE;
+ } else {
+ scp_found = ASC_FALSE;
+ }
+
+
+ if (ASC_NARROW_BOARD(boardp)) {
+ /*
+ * Narrow Board
+ *
+ * If the suggest reset bus flags are set, then reset the bus.
+ * Otherwise only reset the device.
+ */
+ asc_dvc_varp = &boardp->dvc_var.asc_dvc_var;
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+ if (reset_flags &
+ (SCSI_RESET_SUGGEST_BUS_RESET |
+ SCSI_RESET_SUGGEST_HOST_RESET)) {
+#endif /* version >= v1.3.89 */
+
+ /*
+ * Reset the target's SCSI bus.
+ */
+ ASC_DBG(1, "advansys_reset: before AscResetSB()\n");
+ sti(); /* Enable interrupts for AscResetSB(). */
+ status = AscResetSB(asc_dvc_varp);
+ cli();
+ switch (status) {
+ case ASC_TRUE:
+ ASC_DBG(1, "advansys_reset: AscResetSB() success\n");
+ ret = SCSI_RESET_SUCCESS;
+ break;
+ case ASC_ERROR:
+ default:
+ ASC_DBG(1, "advansys_reset: AscResetSB() failed\n");
+ ret = SCSI_RESET_ERROR;
+ break;
+ }
+
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+ } else {
+ /*
+ * Reset the specified device. If the device reset fails,
+ * then reset the SCSI bus.
+ */
+
+ ASC_DBG1(1,
+ "advansys_reset: before AscResetDevice(), target %d\n",
+ scp->target);
+ sti(); /* Enable interrupts for AscResetDevice(). */
+ status = AscResetDevice(asc_dvc_varp, scp->target);
+ cli();
+
+ switch (status) {
+ case ASC_TRUE:
+ ASC_DBG(1, "advansys_reset: AscResetDevice() success\n");
+ device_reset = ASC_TRUE;
+ ret = SCSI_RESET_SUCCESS;
+ break;
+ case ASC_ERROR:
+ default:
+ ASC_DBG(1,
+"advansys_reset: AscResetDevice() failed; Calling AscResetSB()\n");
+ sti(); /* Enable interrupts for AscResetSB(). */
+ status = AscResetSB(asc_dvc_varp);
+ cli();
+ switch (status) {
+ case ASC_TRUE:
+ ASC_DBG(1, "advansys_reset: AscResetSB() TRUE\n");
+ ret = SCSI_RESET_SUCCESS;
+ break;
+ case ASC_ERROR:
+ default:
+ ASC_DBG(1, "advansys_reset: AscResetSB() ERROR\n");
+ ret = SCSI_RESET_ERROR;
+ break;
+ }
+ break;
+ }
+ }
+#endif /* version >= v1.3.89 */
+ } else {
+ /*
+ * Wide Board
+ *
+ * If the suggest reset bus flags are set, then reset the bus.
+ * Otherwise only reset the device.
+ */
+ adv_dvc_varp = &boardp->dvc_var.adv_dvc_var;
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+ if (reset_flags &
+ (SCSI_RESET_SUGGEST_BUS_RESET |
+ SCSI_RESET_SUGGEST_HOST_RESET)) {
+#endif /* version >= v1.3.89 */
+
+ /*
+ * Reset the target's SCSI bus.
+ */
+ ASC_DBG(1, "advansys_reset: before AdvResetSB()\n");
+ switch (AdvResetSB(adv_dvc_varp)) {
+ case ASC_TRUE:
+ ASC_DBG(1, "advansys_reset: AdvResetSB() success\n");
+ ret = SCSI_RESET_SUCCESS;
+ break;
+ case ASC_FALSE:
+ default:
+ ASC_DBG(1, "advansys_reset: AdvResetSB() failed\n");
+ ret = SCSI_RESET_ERROR;
+ break;
+ }
+ /*
+ * Ensure all requests completed by the microcode have
+ * been processed by calling AdvISR().
+ */
+ (void) AdvISR(adv_dvc_varp);
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+ } else {
+ /*
+ * Reset the specified device. If the device reset fails,
+ * then reset the SCSI bus.
+ */
+
+ ASC_DBG1(1,
+ "advansys_reset: before AdvResetDevice(), target %d\n",
+ scp->target);
+
+ switch (AdvResetDevice(adv_dvc_varp, scp->target)) {
+ case ASC_TRUE:
+ ASC_DBG(1, "advansys_reset: AdvResetDevice() success\n");
+ device_reset = ASC_TRUE;
+ ret = SCSI_RESET_SUCCESS;
+ break;
+ case ASC_FALSE:
+ default:
+ ASC_DBG(1,
+"advansys_reset: AdvResetDevice() failed; Calling AdvResetSB()\n");
+
+ switch (AdvResetSB(adv_dvc_varp)) {
+ case ASC_TRUE:
+ ASC_DBG(1, "advansys_reset: AdvResetSB() TRUE\n");
+ ret = SCSI_RESET_SUCCESS;
+ break;
+ case ASC_FALSE:
+ default:
+ ASC_DBG(1, "advansys_reset: AdvResetSB() ERROR\n");
+ ret = SCSI_RESET_ERROR;
+ break;
+ }
+ break;
+ }
+ /*
+ * Ensure all requests completed by the microcode have
+ * been processed by calling AdvISR().
+ */
+ (void) AdvISR(adv_dvc_varp);
+ }
+#endif /* version >= v1.3.89 */
+ }
+
+ /*
+ * Because the ASC_HOST_IN_RESET flag causes both
+ * 'advansys_interrupt' and 'asc_isr_callback' to
+ * queue requests to the board's 'done' queue and
+ * prevents waiting commands from being executed,
+ * these queued requests must be handled here.
+ */
+ done_scp = asc_dequeue_list(&boardp->done, &last_scp,
+ ASC_TID_ALL);
+
+ /*
+ * If a device reset was performed dequeue all waiting
+ * and active requests for the device and set the request
+ * status to DID_RESET.
+ *
+ * If a SCSI bus reset was performed dequeue all waiting
+ * and active requests for all devices and set the request
+ * status to DID_RESET.
+ */
+ if (device_reset == ASC_TRUE) {
+ target = scp->target;
+ } else {
+ target = ASC_TID_ALL;
+ }
+
+ /*
+ * Add active requests to 'done_scp' and set the request status
+ * to DID_RESET.
+ */
+ if (done_scp == NULL) {
+ done_scp = asc_dequeue_list(&boardp->active, &last_scp, target);
+ for (tscp = done_scp; tscp; tscp = (REQP) REQPNEXT(tscp)) {
+ tscp->result = HOST_BYTE(DID_RESET);
+ }
+ } else {
+ ASC_ASSERT(last_scp != NULL);
+ REQPNEXT(last_scp) =
+ (unsigned char *) asc_dequeue_list(&boardp->active,
+ &new_last_scp, target);
+ if (new_last_scp != (Scsi_Cmnd *) NULL) {
+ ASC_ASSERT((REQP) REQPNEXT(last_scp) != NULL);
+ for (tscp = (Scsi_Cmnd *) REQPNEXT(last_scp);
+ tscp;
+ tscp = (Scsi_Cmnd *) REQPNEXT(tscp)) {
+ tscp->result = HOST_BYTE(DID_RESET);
+ }
+ last_scp = new_last_scp;
+ }
+ }
+
+ /*
+ * Add waiting requests to 'done_scp' and set the request status
+ * to DID_RESET.
+ */
+ if (done_scp == NULL) {
+ done_scp = asc_dequeue_list(&boardp->waiting, &last_scp, target);
+ for (tscp = done_scp; tscp; tscp = (REQP) REQPNEXT(tscp)) {
+ tscp->result = HOST_BYTE(DID_RESET);
+ }
+ } else {
+ ASC_ASSERT(last_scp != NULL);
+ REQPNEXT(last_scp) =
+ (unsigned char *) asc_dequeue_list(&boardp->waiting,
+ &new_last_scp, target);
+ if (new_last_scp != NULL) {
+ ASC_ASSERT((REQP) REQPNEXT(last_scp) != NULL);
+ for (tscp = (REQP) REQPNEXT(last_scp);
+ tscp;
+ tscp = (REQP) REQPNEXT(tscp)) {
+ tscp->result = HOST_BYTE(DID_RESET);
+ }
+ last_scp = new_last_scp;
+ }
+ }
+
+ /* Save the time of the most recently completed reset. */
+ boardp->last_reset = jiffies;
+
+ /* Clear reset flag. */
+ boardp->flags &= ~ASC_HOST_IN_RESET;
+
+ /*
+ * Start any waiting commands for the board.
+ */
+ if (!ASC_QUEUE_EMPTY(&boardp->waiting)) {
+ ASC_DBG(1, "advansys_interrupt: before asc_execute_queue()\n");
+ asc_execute_queue(&boardp->waiting);
+ }
+ ret = SCSI_RESET_SUCCESS;
+ }
+
+ /* Interrupts could be enabled here. */
+
+ ASC_ASSERT(do_scsi_done != ASC_ERROR);
+ ASC_ASSERT(scp_found != ASC_ERROR);
+ if (do_scsi_done == ASC_TRUE) {
+ if (scp->scsi_done == NULL) {
+ ASC_PRINT1(
+"advansys_reset: reset request scsi_done() is NULL, %x\n",
+ (unsigned) scp);
+ } else {
+ if (scp_found == ASC_FALSE) {
+ ASC_PRINT1(
+"advansys_reset: reset request not active or waiting, completing anyway %x\n",
+ (unsigned) scp);
+ }
+ ASC_STATS(scp->host, done);
+ scp->scsi_done(scp);
+ }
+ }
+
+ /*
+ * It is possible for the request done function to re-enable
+ * interrupts without confusing the driver. But here interrupts
+ * aren't enabled until requests have been completed.
+ */
+ if (done_scp != NULL) {
+ asc_scsi_done_list(done_scp);
+ }
+
+ ASC_DBG1(1, "advansys_reset: ret %d\n", ret);
+
+ /* Re-enable interrupts, if they were enabled on entry. */
+ restore_flags(flags);
+
+ ASC_ASSERT(ret != ASC_ERROR);
+ return ret;
+}
+
+/*
+ * advansys_biosparam()
+ *
+ * Translate disk drive geometry if the "BIOS greater than 1 GB"
+ * support is enabled for a drive.
+ *
+ * ip (information pointer) is an int array with the following definition:
+ * ip[0]: heads
+ * ip[1]: sectors
+ * ip[2]: cylinders
+ */
+int
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,0)
+advansys_biosparam(Disk *dp, int dep, int ip[])
+#else /* version >= v1.3.0 */
+advansys_biosparam(Disk *dp, kdev_t dep, int ip[])
+#endif /* version >= v1.3.0 */
+{
+ asc_board_t *boardp;
+
+ ASC_DBG(1, "advansys_biosparam: begin\n");
+ ASC_STATS(dp->device->host, biosparam);
+ boardp = ASC_BOARDP(dp->device->host);
+ if (ASC_NARROW_BOARD(boardp)) {
+ if ((boardp->dvc_var.asc_dvc_var.dvc_cntl &
+ ASC_CNTL_BIOS_GT_1GB) && dp->capacity > 0x200000) {
+ ip[0] = 255;
+ ip[1] = 63;
+ } else {
+ ip[0] = 64;
+ ip[1] = 32;
+ }
+ } else {
+ if ((boardp->dvc_var.adv_dvc_var.bios_ctrl &
+ BIOS_CTRL_EXTENDED_XLAT) && dp->capacity > 0x200000) {
+ ip[0] = 255;
+ ip[1] = 63;
+ } else {
+ ip[0] = 64;
+ ip[1] = 32;
+ }
+ }
+ ip[2] = dp->capacity / (ip[0] * ip[1]);
+ ASC_DBG(1, "advansys_biosparam: end\n");
+ return 0;
+}
+
+/*
+ * advansys_setup()
+ *
+ * This function is called from init/main.c at boot time.
+ * It it passed LILO parameters that can be set from the
+ * LILO command line or in /etc/lilo.conf.
+ *
+ * It is used by the AdvanSys driver to either disable I/O
+ * port scanning or to limit scanning to 1 - 4 I/O ports.
+ * Regardless of the option setting EISA and PCI boards
+ * will still be searched for and detected. This option
+ * only affects searching for ISA and VL boards.
+ *
+ * If ADVANSYS_DEBUG is defined the driver debug level may
+ * be set using the 5th (ASC_NUM_IOPORT_PROBE + 1) I/O Port.
+ *
+ * Examples:
+ * 1. Eliminate I/O port scanning:
+ * boot: linux advansys=
+ * or
+ * boot: linux advansys=0x0
+ * 2. Limit I/O port scanning to one I/O port:
+ * boot: linux advansys=0x110
+ * 3. Limit I/O port scanning to four I/O ports:
+ * boot: linux advansys=0x110,0x210,0x230,0x330
+ * 4. If ADVANSYS_DEBUG, limit I/O port scanning to four I/O ports and
+ * set the driver debug level to 2.
+ * boot: linux advansys=0x110,0x210,0x230,0x330,0xdeb2
+ *
+ * ints[0] - number of arguments
+ * ints[1] - first argument
+ * ints[2] - second argument
+ * ...
+ */
+ASC_INITFUNC(
+void
+advansys_setup(char *str, int *ints)
+)
+{
+ int i;
+
+ if (asc_iopflag == ASC_TRUE) {
+ printk("AdvanSys SCSI: 'advansys' LILO option may appear only once\n");
+ return;
+ }
+
+ asc_iopflag = ASC_TRUE;
+
+ if (ints[0] > ASC_NUM_IOPORT_PROBE) {
+#ifdef ADVANSYS_DEBUG
+ if ((ints[0] == ASC_NUM_IOPORT_PROBE + 1) &&
+ (ints[ASC_NUM_IOPORT_PROBE + 1] >> 4 == 0xdeb)) {
+ asc_dbglvl = ints[ASC_NUM_IOPORT_PROBE + 1] & 0xf;
+ } else {
+#endif /* ADVANSYS_DEBUG */
+ printk("AdvanSys SCSI: only %d I/O ports accepted\n",
+ ASC_NUM_IOPORT_PROBE);
+#ifdef ADVANSYS_DEBUG
+ }
+#endif /* ADVANSYS_DEBUG */
+ }
+
+#ifdef ADVANSYS_DEBUG
+ ASC_DBG1(1, "advansys_setup: ints[0] %d\n", ints[0]);
+ for (i = 1; i < ints[0]; i++) {
+ ASC_DBG2(1, " ints[%d] %x", i, ints[i]);
+ }
+ ASC_DBG(1, "\n");
+#endif /* ADVANSYS_DEBUG */
+
+ for (i = 1; i <= ints[0] && i <= ASC_NUM_IOPORT_PROBE; i++) {
+ asc_ioport[i-1] = ints[i];
+ ASC_DBG2(1, "advansys_setup: asc_ioport[%d] %x\n",
+ i - 1, asc_ioport[i-1]);
+ }
+}
+
+
+/*
+ * --- Loadable Driver Support
+ */
+
+#ifdef MODULE
+Scsi_Host_Template driver_template = ADVANSYS;
+# include "scsi_module.c"
+#endif /* MODULE */
+
+
+/*
+ * --- Miscellaneous Driver Functions
+ */
+
+/*
+ * First-level interrupt handler.
+ *
+ * For versions > v1.3.70, 'dev_id' is a pointer to the interrupting
+ * adapter's asc_board_t. Because all boards are currently checked
+ * for interrupts on each interrupt, 'dev_id' is not referenced. 'dev_id'
+ * could be used to identify an interrupt passed to the AdvanSys driver,
+ * which is for a device sharing an interrupt with an AdvanSys adapter.
+ */
+STATIC void
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,70)
+advansys_interrupt(int irq, struct pt_regs *regs)
+#else /* version >= v1.3.70 */
+advansys_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+#endif /* version >= v1.3.70 */
+{
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,95)
+ long flags;
+#else /* version >= v2.1.95 */
+ unsigned long flags;
+#endif /* version >= v2.1.95 */
+ int i;
+ asc_board_t *boardp;
+ Scsi_Cmnd *done_scp = NULL, *last_scp = NULL;
+ Scsi_Cmnd *new_last_scp;
+
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,95)
+ /* Disable interrupts, if they aren't already disabled. */
+ save_flags(flags);
+ cli();
+#else /* version >= v2.1.95 */
+ /*
+ * Disable interrupts, if they aren't already disabled and acquire
+ * the I/O spinlock.
+ */
+ spin_lock_irqsave(&io_request_lock, flags);
+#endif /* version >= v2.1.95 */
+
+ ASC_DBG(1, "advansys_interrupt: begin\n");
+
+ /*
+ * Check for interrupts on all boards.
+ * AscISR() will call asc_isr_callback().
+ */
+ for (i = 0; i < asc_board_count; i++) {
+ boardp = ASC_BOARDP(asc_host[i]);
+ ASC_DBG2(2, "advansys_interrupt: i %d, boardp %lx\n",
+ i, (ulong) boardp)
+ if (ASC_NARROW_BOARD(boardp)) {
+ /*
+ * Narrow Board
+ */
+ if (AscIsIntPending(asc_host[i]->io_port)) {
+ ASC_STATS(asc_host[i], interrupt);
+ ASC_DBG(1, "advansys_interrupt: before AscISR()\n");
+ AscISR(&boardp->dvc_var.asc_dvc_var);
+ }
+ } else {
+ /*
+ * Wide Board
+ */
+ ASC_DBG(1, "advansys_interrupt: before AdvISR()\n");
+ if (AdvISR(&boardp->dvc_var.adv_dvc_var)) {
+ ASC_STATS(asc_host[i], interrupt);
+ }
+ }
+
+ /*
+ * Start waiting requests and create a list of completed requests.
+ *
+ * If a reset or abort request is being performed for the board,
+ * the reset or abort handler will complete pending requests after
+ * it has completed.
+ */
+ if ((boardp->flags & (ASC_HOST_IN_RESET | ASC_HOST_IN_ABORT)) == 0) {
+ ASC_DBG2(1, "advansys_interrupt: done_scp %lx, last_scp %lx\n",
+ (ulong) done_scp, (ulong) last_scp);
+
+ /* Start any waiting commands for the board. */
+ if (!ASC_QUEUE_EMPTY(&boardp->waiting)) {
+ ASC_DBG(1, "advansys_interrupt: before asc_execute_queue()\n");
+ asc_execute_queue(&boardp->waiting);
+ }
+
+ /*
+ * Add to the list of requests that must be completed.
+ *
+ * 'done_scp' will always be NULL on the first iteration
+ * of this loop. 'last_scp' is set at the same time as
+ * 'done_scp'.
+ */
+ if (done_scp == NULL) {
+ done_scp = asc_dequeue_list(&boardp->done, &last_scp,
+ ASC_TID_ALL);
+ } else {
+ ASC_ASSERT(last_scp != NULL);
+ REQPNEXT(last_scp) =
+ (unsigned char *) asc_dequeue_list(&boardp->done,
+ &new_last_scp,
+ ASC_TID_ALL);
+ if (new_last_scp != NULL) {
+ ASC_ASSERT(REQPNEXT(last_scp) != NULL);
+ last_scp = new_last_scp;
+ }
+ }
+ }
+ }
+
+ /* Interrupts could be enabled here. */
+
+ /*
+ * It is possible for the request done function to re-enable
+ * interrupts without confusing the driver. But here the
+ * original flags aren't restored until all requests have been
+ * completed.
+ */
+ asc_scsi_done_list(done_scp);
+
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,95)
+ /*
+ * Restore the original flags which will enable interrupts
+ * if and only if they were enabled on entry.
+ */
+ restore_flags(flags);
+#else /* version >= v2.1.95 */
+ /*
+ * Release the I/O spinlock and restore the original flags
+ * which will enable interrupts if and only if they were
+ * enabled on entry.
+ */
+ spin_unlock_irqrestore(&io_request_lock, flags);
+#endif /* version >= v2.1.95 */
+
+ ASC_DBG(1, "advansys_interrupt: end\n");
+ return;
+}
+
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+/*
+ * Set the number of commands to queue per device for the
+ * specified host adapter.
+ */
+STATIC void
+advansys_select_queue_depths(struct Scsi_Host *shp, Scsi_Device *devicelist)
+{
+ Scsi_Device *device;
+ asc_board_t *boardp;
+
+ boardp = ASC_BOARDP(shp);
+ boardp->flags |= ASC_SELECT_QUEUE_DEPTHS;
+ for (device = devicelist; device != NULL; device = device->next) {
+ if (device->host != shp) {
+ continue;
+ }
+ /*
+ * Save a pointer to the device and set its initial/maximum
+ * queue depth.
+ */
+ boardp->device[device->id] = device;
+ if (ASC_NARROW_BOARD(boardp)) {
+ device->queue_depth =
+ boardp->dvc_var.asc_dvc_var.max_dvc_qng[device->id];
+ } else {
+ device->queue_depth =
+ boardp->dvc_var.adv_dvc_var.max_dvc_qng;
+ }
+ ASC_DBG3(1, "advansys_select_queue_depths: shp %x, id %d, depth %d\n",
+ (unsigned) shp, device->id, device->queue_depth);
+ }
+}
+#endif /* version >= v1.3.89 */
+
+/*
+ * Function used only with polled I/O requests that are initiated by
+ * advansys_command().
+ */
+STATIC void
+advansys_command_done(Scsi_Cmnd *scp)
+{
+ ASC_DBG1(1, "advansys_command_done: scp %x\n", (unsigned) scp);
+ scp->SCp.Status = 1;
+}
+
+/*
+ * Complete all requests on the singly linked list pointed
+ * to by 'scp'.
+ *
+ * Interrupts can be enabled on entry.
+ */
+STATIC void
+asc_scsi_done_list(Scsi_Cmnd *scp)
+{
+ Scsi_Cmnd *tscp;
+
+ ASC_DBG(2, "asc_scsi_done_list: begin\n");
+ while (scp != NULL) {
+ ASC_DBG1(3, "asc_scsi_done_list: scp %x\n", (unsigned) scp);
+ tscp = (REQP) REQPNEXT(scp);
+ REQPNEXT(scp) = NULL;
+ ASC_STATS(scp->host, done);
+ ASC_ASSERT(scp->scsi_done != NULL);
+ scp->scsi_done(scp);
+ scp = tscp;
+ }
+ ASC_DBG(2, "asc_scsi_done_list: done\n");
+ return;
+}
+
+/*
+ * Execute a single 'Scsi_Cmnd'.
+ *
+ * The function 'done' is called when the request has been completed.
+ *
+ * Scsi_Cmnd:
+ *
+ * host - board controlling device
+ * device - device to send command
+ * target - target of device
+ * lun - lun of device
+ * cmd_len - length of SCSI CDB
+ * cmnd - buffer for SCSI 8, 10, or 12 byte CDB
+ * use_sg - if non-zero indicates scatter-gather request with use_sg elements
+ *
+ * if (use_sg == 0) {
+ * request_buffer - buffer address for request
+ * request_bufflen - length of request buffer
+ * } else {
+ * request_buffer - pointer to scatterlist structure
+ * }
+ *
+ * sense_buffer - sense command buffer
+ *
+ * result (4 bytes of an int):
+ * Byte Meaning
+ * 0 SCSI Status Byte Code
+ * 1 SCSI One Byte Message Code
+ * 2 Host Error Code
+ * 3 Mid-Level Error Code
+ *
+ * host driver fields:
+ * SCp - Scsi_Pointer used for command processing status
+ * scsi_done - used to save caller's done function
+ * host_scribble - used for pointer to another Scsi_Cmnd
+ *
+ * If this function returns ASC_NOERROR or ASC_ERROR the request
+ * has been enqueued on the board's 'done' queue and must be
+ * completed by the caller.
+ *
+ * If ASC_BUSY is returned the request must be enqueued by the
+ * caller and re-tried later.
+ */
+STATIC int
+asc_execute_scsi_cmnd(Scsi_Cmnd *scp)
+{
+ asc_board_t *boardp;
+ ASC_DVC_VAR *asc_dvc_varp;
+ ADV_DVC_VAR *adv_dvc_varp;
+ ADV_SCSI_REQ_Q *adv_scsiqp;
+ Scsi_Device *device;
+ int ret;
+
+ ASC_ASSERT(interrupts_enabled() == ASC_FALSE);
+ ASC_DBG2(1, "asc_execute_scsi_cmnd: scp %x, done %x\n",
+ (unsigned) scp, (unsigned) scp->scsi_done);
+
+ boardp = ASC_BOARDP(scp->host);
+ device = boardp->device[scp->target];
+
+ if (ASC_NARROW_BOARD(boardp)) {
+ /*
+ * Build and execute Narrow Board request.
+ */
+
+ asc_dvc_varp = &boardp->dvc_var.asc_dvc_var;
+
+ /*
+ * Build Asc Library request structure using the
+ * global structures 'asc_scsi_req' and 'asc_sg_head'.
+ *
+ * asc_build_req() can not return ASC_BUSY.
+ */
+ if (asc_build_req(boardp, scp) == ASC_ERROR) {
+ ASC_STATS(scp->host, build_error);
+ return ASC_ERROR;
+ }
+
+ /*
+ * Execute the command. If there is no error, add the command
+ * to the active queue.
+ */
+ switch (ret = AscExeScsiQueue(asc_dvc_varp, &asc_scsi_q)) {
+ case ASC_NOERROR:
+ ASC_STATS(scp->host, exe_noerror);
+ /*
+ * Increment monotonically increasing per device successful
+ * request counter. Wrapping doesn't matter.
+ */
+ boardp->reqcnt[scp->target]++;
+
+#if ASC_QUEUE_FLOW_CONTROL
+ /*
+ * Conditionally increment the device queue depth.
+ *
+ * If no error occurred and there have been 100 consecutive
+ * successful requests and the current queue depth is less
+ * than the maximum queue depth, then increment the current
+ * queue depth.
+ */
+ if (boardp->nerrcnt[scp->target]++ > 100) {
+ boardp->nerrcnt[scp->target] = 0;
+ if (device != NULL &&
+ (device->queue_curr_depth < device->queue_depth) &&
+ (!(boardp->queue_full &
+ ADV_TID_TO_TIDMASK(scp->target)) ||
+ (boardp->queue_full_cnt[scp->target] >
+ device->queue_curr_depth))) {
+ device->queue_curr_depth++;
+ }
+ }
+#endif /* ASC_QUEUE_FLOW_CONTROL */
+ asc_enqueue(&boardp->active, scp, ASC_BACK);
+ ASC_DBG(1,
+ "asc_execute_scsi_cmnd: AscExeScsiQueue(), ASC_NOERROR\n");
+ break;
+ case ASC_BUSY:
+ /* Caller must enqueue request and retry later. */
+ ASC_STATS(scp->host, exe_busy);
+#if ASC_QUEUE_FLOW_CONTROL
+ /*
+ * Clear consecutive no error counter and if possible decrement
+ * queue depth.
+ */
+ boardp->nerrcnt[scp->target] = 0;
+ if (device != NULL && device->queue_curr_depth > 1) {
+ device->queue_curr_depth--;
+ }
+#endif /* ASC_QUEUE_FLOW_CONTROL */
+ break;
+ case ASC_ERROR:
+ ASC_PRINT2(
+"asc_execute_scsi_cmnd: board %d: AscExeScsiQueue() ASC_ERROR, err_code %x\n",
+ boardp->id, asc_dvc_varp->err_code);
+ ASC_STATS(scp->host, exe_error);
+#if ASC_QUEUE_FLOW_CONTROL
+ /* Clear consecutive no error counter. */
+ boardp->nerrcnt[scp->target] = 0;
+#endif /* ASC_QUEUE_FLOW_CONTROL */
+ scp->result = HOST_BYTE(DID_ERROR);
+ asc_enqueue(&boardp->done, scp, ASC_BACK);
+ break;
+ default:
+ ASC_PRINT2(
+"asc_execute_scsi_cmnd: board %d: AscExeScsiQueue() unknown, err_code %x\n",
+ boardp->id, asc_dvc_varp->err_code);
+ ASC_STATS(scp->host, exe_unknown);
+#if ASC_QUEUE_FLOW_CONTROL
+ /* Clear consecutive no error counter. */
+ boardp->nerrcnt[scp->target] = 0;
+#endif /* ASC_QUEUE_FLOW_CONTROL */
+ scp->result = HOST_BYTE(DID_ERROR);
+ asc_enqueue(&boardp->done, scp, ASC_BACK);
+ break;
+ }
+ } else {
+ /*
+ * Build and execute Wide Board request.
+ */
+ adv_dvc_varp = &boardp->dvc_var.adv_dvc_var;
+
+ /*
+ * Build and get a pointer to an Adv Library request structure.
+ *
+ * If the request is successfully built then send it below,
+ * otherwise return with an error.
+ */
+ switch (adv_build_req(boardp, scp, &adv_scsiqp)) {
+ case ASC_NOERROR:
+ ASC_DBG(3, "asc_execute_scsi_cmnd: adv_build_req ASC_NOERROR\n");
+ break;
+ case ASC_BUSY:
+ ASC_DBG(1, "asc_execute_scsi_cmnd: adv_build_req ASC_BUSY\n");
+ return ASC_BUSY;
+ case ASC_ERROR:
+ default:
+ ASC_DBG(1, "asc_execute_scsi_cmnd: adv_build_req ASC_ERROR\n");
+ ASC_STATS(scp->host, build_error);
+ return ASC_ERROR;
+ }
+
+ /*
+ * Execute the command. If there is no error, add the command
+ * to the active queue.
+ */
+ switch (ret = AdvExeScsiQueue(adv_dvc_varp, adv_scsiqp)) {
+ case ASC_NOERROR:
+ ASC_STATS(scp->host, exe_noerror);
+ /*
+ * Increment monotonically increasing per device successful
+ * request counter. Wrapping doesn't matter.
+ */
+ boardp->reqcnt[scp->target]++;
+ asc_enqueue(&boardp->active, scp, ASC_BACK);
+ ASC_DBG(1,
+ "asc_execute_scsi_cmnd: AdvExeScsiQueue(), ASC_NOERROR\n");
+ break;
+ case ASC_BUSY:
+ /* Caller must enqueue request and retry later. */
+ ASC_STATS(scp->host, exe_busy);
+ break;
+ case ASC_ERROR:
+ ASC_PRINT2(
+"asc_execute_scsi_cmnd: board %d: AdvExeScsiQueue() ASC_ERROR, err_code %x\n",
+ boardp->id, adv_dvc_varp->err_code);
+ ASC_STATS(scp->host, exe_error);
+ scp->result = HOST_BYTE(DID_ERROR);
+ asc_enqueue(&boardp->done, scp, ASC_BACK);
+ break;
+ default:
+ ASC_PRINT2(
+"asc_execute_scsi_cmnd: board %d: AdvExeScsiQueue() unknown, err_code %x\n",
+ boardp->id, adv_dvc_varp->err_code);
+ ASC_STATS(scp->host, exe_unknown);
+ scp->result = HOST_BYTE(DID_ERROR);
+ asc_enqueue(&boardp->done, scp, ASC_BACK);
+ break;
+ }
+ }
+
+ ASC_DBG(1, "asc_execute_scsi_cmnd: end\n");
+ ASC_ASSERT(interrupts_enabled() == ASC_FALSE);
+ return ret;
+}
+
+/*
+ * Build a request structure for the Asc Library (Narrow Board).
+ *
+ * The global structures 'asc_scsi_q' and 'asc_sg_head' are
+ * used to build the request.
+ *
+ * If an error occurs, then return ASC_ERROR.
+ */
+STATIC int
+asc_build_req(asc_board_t *boardp, Scsi_Cmnd *scp)
+{
+ /*
+ * Mutually exclusive access is required to 'asc_scsi_q' and
+ * 'asc_sg_head' until after the request is started.
+ */
+ memset(&asc_scsi_q, 0, sizeof(ASC_SCSI_Q));
+
+ /*
+ * Point the ASC_SCSI_Q to the 'Scsi_Cmnd'.
+ */
+ asc_scsi_q.q2.srb_ptr = (ulong) scp;
+
+ /*
+ * Build the ASC_SCSI_Q request.
+ */
+ ASC_ASSERT(scp->cmd_len <= ASC_MAX_CDB_LEN);
+ if (scp->cmd_len > ASC_MAX_CDB_LEN) {
+ scp->cmd_len = ASC_MAX_CDB_LEN;
+ }
+ asc_scsi_q.cdbptr = &scp->cmnd[0];
+ asc_scsi_q.q2.cdb_len = scp->cmd_len;
+ asc_scsi_q.q1.target_id = ASC_TID_TO_TARGET_ID(scp->target);
+ asc_scsi_q.q1.target_lun = scp->lun;
+ asc_scsi_q.q2.target_ix = ASC_TIDLUN_TO_IX(scp->target, scp->lun);
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,0,0)
+ asc_scsi_q.q1.sense_addr = (ulong) &scp->sense_buffer[0];
+#else /* version >= v2.0.0 */
+ asc_scsi_q.q1.sense_addr = virt_to_bus(&scp->sense_buffer[0]);
+#endif /* version >= v2.0.0 */
+ asc_scsi_q.q1.sense_len = sizeof(scp->sense_buffer);
+
+ /*
+ * If there are any outstanding requests for the current target,
+ * then every 255th request send an ORDERED request. This heuristic
+ * tries to retain the benefit of request sorting while preventing
+ * request starvation. 255 is the max number of tags or pending commands
+ * a device may have outstanding.
+ *
+ * The request count is incremented below for every successfully
+ * started request.
+ *
+ */
+ if ((boardp->dvc_var.asc_dvc_var.cur_dvc_qng[scp->target] > 0) &&
+ (boardp->reqcnt[scp->target] % 255) == 0) {
+ asc_scsi_q.q2.tag_code = M2_QTAG_MSG_ORDERED;
+ } else {
+ asc_scsi_q.q2.tag_code = M2_QTAG_MSG_SIMPLE;
+ }
+
+ /*
+ * Build ASC_SCSI_Q for a contiguous buffer or a scatter-gather
+ * buffer command.
+ */
+ if (scp->use_sg == 0) {
+ /*
+ * CDB request of single contiguous buffer.
+ */
+ ASC_STATS(scp->host, cont_cnt);
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,0,0)
+ asc_scsi_q.q1.data_addr = (ulong) scp->request_buffer;
+#else /* version >= v2.0.0 */
+ asc_scsi_q.q1.data_addr = virt_to_bus(scp->request_buffer);
+#endif /* version >= v2.0.0 */
+ asc_scsi_q.q1.data_cnt = scp->request_bufflen;
+ ASC_STATS_ADD(scp->host, cont_xfer,
+ ASC_CEILING(scp->request_bufflen, 512));
+ asc_scsi_q.q1.sg_queue_cnt = 0;
+ asc_scsi_q.sg_head = NULL;
+ } else {
+ /*
+ * CDB scatter-gather request list.
+ */
+ int sgcnt;
+ struct scatterlist *slp;
+
+ if (scp->use_sg > scp->host->sg_tablesize) {
+ ASC_PRINT3(
+"asc_build_req: board %d: use_sg %d > sg_tablesize %d\n",
+ boardp->id, scp->use_sg, scp->host->sg_tablesize);
+ scp->result = HOST_BYTE(DID_ERROR);
+ asc_enqueue(&boardp->done, scp, ASC_BACK);
+ return ASC_ERROR;
+ }
+
+ ASC_STATS(scp->host, sg_cnt);
+
+ /*
+ * Use global ASC_SG_HEAD structure and set the ASC_SCSI_Q
+ * structure to point to it.
+ */
+ memset(&asc_sg_head, 0, sizeof(ASC_SG_HEAD));
+
+ asc_scsi_q.q1.cntl |= QC_SG_HEAD;
+ asc_scsi_q.sg_head = &asc_sg_head;
+ asc_scsi_q.q1.data_cnt = 0;
+ asc_scsi_q.q1.data_addr = 0;
+ asc_sg_head.entry_cnt = asc_scsi_q.q1.sg_queue_cnt = scp->use_sg;
+ ASC_STATS_ADD(scp->host, sg_elem, asc_sg_head.entry_cnt);
+
+ /*
+ * Convert scatter-gather list into ASC_SG_HEAD list.
+ */
+ slp = (struct scatterlist *) scp->request_buffer;
+ for (sgcnt = 0; sgcnt < scp->use_sg; sgcnt++, slp++) {
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,0,0)
+ asc_sg_head.sg_list[sgcnt].addr = (ulong) slp->address;
+#else /* version >= v2.0.0 */
+ asc_sg_head.sg_list[sgcnt].addr = virt_to_bus(slp->address);
+#endif /* version >= v2.0.0 */
+ asc_sg_head.sg_list[sgcnt].bytes = slp->length;
+ ASC_STATS_ADD(scp->host, sg_xfer, ASC_CEILING(slp->length, 512));
+ }
+ }
+
+ ASC_DBG_PRT_ASC_SCSI_Q(2, &asc_scsi_q);
+ ASC_DBG_PRT_CDB(1, scp->cmnd, scp->cmd_len);
+
+ return ASC_NOERROR;
+}
+
+/*
+ * Build a request structure for the Adv Library (Wide Board).
+ *
+ * If an adv_req_t can not be allocated to issue the request,
+ * then return ASC_BUSY. If an error occurs, then return ASC_ERROR.
+ */
+STATIC int
+adv_build_req(asc_board_t *boardp, Scsi_Cmnd *scp,
+ ADV_SCSI_REQ_Q **adv_scsiqpp)
+{
+ adv_req_t *reqp;
+ ADV_SCSI_REQ_Q *scsiqp;
+ int i;
+
+ /*
+ * Allocate an adv_req_t structure from the board to execute
+ * the command.
+ */
+ if (boardp->adv_reqp == NULL) {
+ ASC_DBG(1, "adv_build_req: no free adv_req_t\n");
+ ASC_STATS(scp->host, adv_build_noreq);
+ return ASC_BUSY;
+ } else {
+ reqp = boardp->adv_reqp;
+ boardp->adv_reqp = reqp->next_reqp;
+ reqp->next_reqp = NULL;
+ }
+
+ /*
+ * Get 4-byte aligned ADV_SCSI_REQ_Q and ADV_SG_BLOCK pointers.
+ */
+ scsiqp = (ADV_SCSI_REQ_Q *) ADV_DWALIGN(&reqp->scsi_req_q);
+ memset(scsiqp, 0, sizeof(ADV_SCSI_REQ_Q));
+
+ /*
+ * Set the ADV_SCSI_REQ_Q 'srb_ptr' to point to the adv_req_t structure.
+ */
+ scsiqp->srb_ptr = (ulong) reqp;
+
+ /*
+ * Set the adv_req_t 'cmndp' to point to the Scsi_Cmnd structure.
+ */
+ reqp->cmndp = scp;
+
+ /*
+ * Build the ADV_SCSI_REQ_Q request.
+ */
+
+ /*
+ * Set CDB length and copy it to the request structure.
+ */
+ ASC_ASSERT(scp->cmd_len <= ASC_MAX_CDB_LEN);
+ if (scp->cmd_len > ASC_MAX_CDB_LEN) {
+ scp->cmd_len = ASC_MAX_CDB_LEN;
+ }
+ scsiqp->cdb_len = scp->cmd_len;
+ for (i = 0; i < scp->cmd_len; i++) {
+ scsiqp->cdb[i] = scp->cmnd[i];
+ }
+
+ scsiqp->target_id = scp->target;
+ scsiqp->target_lun = scp->lun;
+
+ scsiqp->vsense_addr = (ulong) &scp->sense_buffer[0];
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,0,0)
+ scsiqp->sense_addr = (ulong) &scp->sense_buffer[0];
+#else /* version >= v2.0.0 */
+ scsiqp->sense_addr = virt_to_bus(&scp->sense_buffer[0]);
+#endif /* version >= v2.0.0 */
+ scsiqp->sense_len = sizeof(scp->sense_buffer);
+
+ /*
+ * Build ADV_SCSI_REQ_Q for a contiguous buffer or a scatter-gather
+ * buffer command.
+ */
+ scsiqp->data_cnt = scp->request_bufflen;
+ scsiqp->vdata_addr = (ulong) scp->request_buffer;
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,0,0)
+ scsiqp->data_addr = (ulong) scp->request_buffer;
+#else /* version >= v2.0.0 */
+ scsiqp->data_addr = virt_to_bus(scp->request_buffer);
+#endif /* version >= v2.0.0 */
+
+ if (scp->use_sg == 0) {
+ /*
+ * CDB request of single contiguous buffer.
+ */
+ reqp->sgblkp = NULL;
+ scsiqp->sg_list_ptr = NULL;
+ ASC_STATS(scp->host, cont_cnt);
+ ASC_STATS_ADD(scp->host, cont_xfer,
+ ASC_CEILING(scp->request_bufflen, 512));
+ } else {
+ /*
+ * CDB scatter-gather request list.
+ */
+ if (scp->use_sg > ADV_MAX_SG_LIST) {
+ ASC_PRINT3(
+"adv_build_req: board %d: use_sg %d > ADV_MAX_SG_LIST %d\n",
+ boardp->id, scp->use_sg, scp->host->sg_tablesize);
+ scp->result = HOST_BYTE(DID_ERROR);
+ asc_enqueue(&boardp->done, scp, ASC_BACK);
+
+ /*
+ * Free the 'adv_req_t' structure by adding it back to the
+ * board free list.
+ */
+ reqp->next_reqp = boardp->adv_reqp;
+ boardp->adv_reqp = reqp;
+
+ return ASC_ERROR;
+ }
+
+ /*
+ * Allocate an 'adv_sgblk_t' structure from the board to
+ * execute the command.
+ */
+ if (boardp->adv_sgblkp == NULL) {
+ ASC_DBG(1, "adv_build_req: no free adv_sgblk_t\n");
+ ASC_STATS(scp->host, adv_build_nosg);
+ /*
+ * Free the 'adv_req_t' structure by adding it back to the
+ * board free list.
+ */
+ reqp->next_reqp = boardp->adv_reqp;
+ boardp->adv_reqp = reqp;
+ return ASC_BUSY;
+ } else {
+ reqp->sgblkp = boardp->adv_sgblkp;
+ boardp->adv_sgblkp = reqp->sgblkp->next_sgblkp;
+ reqp->sgblkp->next_sgblkp = NULL;
+ }
+
+ /*
+ * Build scatter-gather list.
+ */
+ scsiqp->sg_list_ptr = (ADV_SG_BLOCK *)
+ ADV_DWALIGN(&reqp->sgblkp->sg_block[0]);
+
+ memset(scsiqp->sg_list_ptr, 0, sizeof(ADV_SG_BLOCK) *
+ (ADV_NUM_SG_BLOCK + ADV_NUM_PAGE_CROSSING));
+
+ if (adv_get_sglist(&boardp->dvc_var.adv_dvc_var, scsiqp, scp) ==
+ ADV_ERROR) {
+
+ /*
+ * Free the adv_sgblk_t structure, if any, by adding it back
+ * to the board free list.
+ */
+ ASC_ASSERT(reqp->sgblkp != NULL);
+ reqp->sgblkp->next_sgblkp = boardp->adv_sgblkp;
+ boardp->adv_sgblkp = reqp->sgblkp;
+
+ /*
+ * Free the adv_req_t structure by adding it back to the
+ * board free list.
+ */
+ reqp->next_reqp = boardp->adv_reqp;
+ boardp->adv_reqp = reqp;
+
+ return ADV_ERROR;
+ }
+
+ ASC_STATS(scp->host, sg_cnt);
+ ASC_STATS_ADD(scp->host, sg_elem, scp->use_sg);
+ }
+
+ ASC_DBG_PRT_ADV_SCSI_REQ_Q(2, scsiqp);
+ ASC_DBG_PRT_CDB(1, scp->cmnd, scp->cmd_len);
+
+ *adv_scsiqpp = scsiqp;
+
+ return ASC_NOERROR;
+}
+
+/*
+ * Build scatter-gather list for Adv Library (Wide Board).
+ *
+ * Return:
+ * ADV_SUCCESS(1) - SG List successfully created
+ * ADV_ERROR(-1) - SG List creation failed
+ */
+STATIC int
+adv_get_sglist(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp,
+ Scsi_Cmnd *scp)
+{
+ ADV_SG_BLOCK *sg_block; /* virtual address of a SG */
+ ulong sg_block_next_addr; /* block and its next */
+ ulong sg_block_physical_addr;
+ int sg_block_index, i; /* how many SG entries */
+ struct scatterlist *slp;
+ int sg_elem_cnt;
+
+ slp = (struct scatterlist *) scp->request_buffer;
+ sg_elem_cnt = scp->use_sg;
+
+ sg_block = scsiqp->sg_list_ptr;
+ sg_block_next_addr = (ulong) sg_block; /* allow math operation */
+ sg_block_physical_addr =
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,0,0)
+ (ulong) scsiqp->sg_list_ptr;
+#else /* version >= v2.0.0 */
+ virt_to_bus(scsiqp->sg_list_ptr);
+#endif /* version >= v2.0.0 */
+ ADV_ASSERT(ADV_DWALIGN(sg_block_physical_addr) ==
+ sg_block_physical_addr);
+ scsiqp->sg_real_addr = sg_block_physical_addr;
+
+ sg_block_index = 0;
+ do
+ {
+ sg_block->first_entry_no = sg_block_index;
+ for (i = 0; i < NO_OF_SG_PER_BLOCK; i++)
+ {
+ sg_block->sg_list[i].sg_addr =
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,0,0)
+ (ulong) slp->address;
+#else /* version >= v2.0.0 */
+ virt_to_bus(slp->address);
+#endif /* version >= v2.0.0 */
+ sg_block->sg_list[i].sg_count = slp->length;
+ ASC_STATS_ADD(scp->host, sg_xfer, ASC_CEILING(slp->length, 512));
+
+ if (--sg_elem_cnt == 0)
+ { /* last entry, get out */
+ scsiqp->sg_entry_cnt = sg_block_index + i + 1;
+ sg_block->last_entry_no = sg_block_index + i;
+ sg_block->sg_ptr = 0L; /* next link = NULL */
+ return ADV_SUCCESS;
+ }
+ slp++;
+ }
+ sg_block_next_addr += sizeof(ADV_SG_BLOCK);
+ sg_block_physical_addr += sizeof(ADV_SG_BLOCK);
+ ADV_ASSERT(ADV_DWALIGN(sg_block_physical_addr) ==
+ sg_block_physical_addr);
+
+ sg_block_index += NO_OF_SG_PER_BLOCK;
+ sg_block->sg_ptr = (ADV_SG_BLOCK *) sg_block_physical_addr;
+ sg_block->last_entry_no = sg_block_index - 1;
+ sg_block = (ADV_SG_BLOCK *) sg_block_next_addr; /* virtual addr */
+ }
+ while (1);
+ /* NOTREACHED */
+}
+
+/*
+ * asc_isr_callback() - Second Level Interrupt Handler called by AscISR().
+ *
+ * Interrupt callback function for the Narrow SCSI Asc Library.
+ */
+STATIC void
+asc_isr_callback(ASC_DVC_VAR *asc_dvc_varp, ASC_QDONE_INFO *qdonep)
+{
+ asc_board_t *boardp;
+ Scsi_Cmnd *scp;
+ struct Scsi_Host *shp;
+ int underrun = ASC_FALSE;
+ int i;
+
+ ASC_ASSERT(interrupts_enabled() == ASC_FALSE);
+ ASC_DBG2(1, "asc_isr_callback: asc_dvc_varp %x, qdonep %x\n",
+ (unsigned) asc_dvc_varp, (unsigned) qdonep);
+ ASC_DBG_PRT_ASC_QDONE_INFO(2, qdonep);
+
+ /*
+ * Get the Scsi_Cmnd structure and Scsi_Host structure for the
+ * command that has been completed.
+ */
+ scp = (Scsi_Cmnd *) qdonep->d2.srb_ptr;
+ ASC_DBG1(1, "asc_isr_callback: scp %x\n", (unsigned) scp);
+
+ if (scp == NULL) {
+ ASC_PRINT("asc_isr_callback: scp is NULL\n");
+ return;
+ }
+ ASC_DBG_PRT_CDB(2, scp->cmnd, scp->cmd_len);
+
+ /*
+ * If the request's host pointer is not valid, display a
+ * message and return.
+ */
+ shp = scp->host;
+ for (i = 0; i < asc_board_count; i++) {
+ if (asc_host[i] == shp) {
+ break;
+ }
+ }
+ if (i == asc_board_count) {
+ ASC_PRINT2("asc_isr_callback: scp %x has bad host pointer, host %x\n",
+ (unsigned) scp, (unsigned) shp);
+ return;
+ }
+
+ ASC_STATS(shp, callback);
+ ASC_DBG1(1, "asc_isr_callback: shp %x\n", (unsigned) shp);
+
+ /*
+ * If the request isn't found on the active queue, it may
+ * have been removed to handle a reset or abort request.
+ * Display a message and return.
+ */
+ boardp = ASC_BOARDP(shp);
+ ASC_ASSERT(asc_dvc_varp == &boardp->dvc_var.asc_dvc_var);
+ if (asc_rmqueue(&boardp->active, scp) == ASC_FALSE) {
+ ASC_PRINT2("asc_isr_callback: board %d: scp %x not on active queue\n",
+ boardp->id, (unsigned) scp);
+ return;
+ }
+
+ /*
+ * Check for an underrun condition.
+ */
+ if (scp->request_bufflen != 0 && qdonep->remain_bytes != 0 &&
+ qdonep->remain_bytes <= scp->request_bufflen != 0) {
+ ASC_DBG1(1, "asc_isr_callback: underrun condition %u bytes\n",
+ (unsigned) qdonep->remain_bytes);
+ underrun = ASC_TRUE;
+ }
+
+ /*
+ * 'qdonep' contains the command's ending status.
+ */
+ switch (qdonep->d3.done_stat) {
+ case QD_NO_ERROR:
+ ASC_DBG(2, "asc_isr_callback: QD_NO_ERROR\n");
+ switch (qdonep->d3.host_stat) {
+ case QHSTA_NO_ERROR:
+ scp->result = 0;
+ break;
+ default:
+ /* QHSTA error occurred */
+ scp->result = HOST_BYTE(DID_ERROR);
+ break;
+ }
+
+ /*
+ * If an INQUIRY command completed successfully, then call
+ * the AscInquiryHandling() function to set-up the device.
+ */
+ if (scp->cmnd[0] == SCSICMD_Inquiry && scp->lun == 0 &&
+ (scp->request_bufflen - qdonep->remain_bytes) >= 8)
+ {
+ AscInquiryHandling(asc_dvc_varp, scp->target & 0x7,
+ (ASC_SCSI_INQUIRY *) scp->request_buffer);
+ }
+
+ /*
+ * If there was an underrun without any other error,
+ * set DID_ERROR to indicate the underrun error.
+ *
+ * Note: There is no way yet to indicate the number
+ * of underrun bytes.
+ */
+ if (scp->result == 0 && underrun == ASC_TRUE) {
+ scp->result = HOST_BYTE(DID_UNDERRUN);
+ }
+ break;
+
+ case QD_WITH_ERROR:
+ ASC_DBG(2, "asc_isr_callback: QD_WITH_ERROR\n");
+ switch (qdonep->d3.host_stat) {
+ case QHSTA_NO_ERROR:
+ if (qdonep->d3.scsi_stat == SS_CHK_CONDITION) {
+ ASC_DBG(2, "asc_isr_callback: SS_CHK_CONDITION\n");
+ ASC_DBG_PRT_SENSE(2, scp->sense_buffer,
+ sizeof(scp->sense_buffer));
+ /*
+ * Note: The 'status_byte()' macro used by target drivers
+ * defined in scsi.h shifts the status byte returned by
+ * host drivers right by 1 bit. This is why target drivers
+ * also use right shifted status byte definitions. For
+ * instance target drivers use CHECK_CONDITION, defined to
+ * 0x1, instead of the SCSI defined check condition value
+ * of 0x2. Host drivers are supposed to return the status
+ * byte as it is defined by SCSI.
+ */
+ scp->result = DRIVER_BYTE(DRIVER_SENSE) |
+ STATUS_BYTE(qdonep->d3.scsi_stat);
+ } else {
+ scp->result = STATUS_BYTE(qdonep->d3.scsi_stat);
+ }
+ break;
+
+ default:
+ /* QHSTA error occurred */
+ ASC_DBG1(1, "asc_isr_callback: host_stat %x\n",
+ qdonep->d3.host_stat);
+ scp->result = HOST_BYTE(DID_BAD_TARGET);
+ break;
+ }
+ break;
+
+ case QD_ABORTED_BY_HOST:
+ ASC_DBG(1, "asc_isr_callback: QD_ABORTED_BY_HOST\n");
+ scp->result = HOST_BYTE(DID_ABORT) | MSG_BYTE(qdonep->d3.scsi_msg) |
+ STATUS_BYTE(qdonep->d3.scsi_stat);
+ break;
+
+ default:
+ ASC_DBG1(1, "asc_isr_callback: done_stat %x\n", qdonep->d3.done_stat);
+ scp->result = HOST_BYTE(DID_ERROR) | MSG_BYTE(qdonep->d3.scsi_msg) |
+ STATUS_BYTE(qdonep->d3.scsi_stat);
+ break;
+ }
+
+ /*
+ * If the 'init_tidmask' bit isn't already set for the target and the
+ * current request finished normally, then set the bit for the target
+ * to indicate that a device is present.
+ */
+ if ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(scp->target)) == 0 &&
+ qdonep->d3.done_stat == QD_NO_ERROR &&
+ qdonep->d3.host_stat == QHSTA_NO_ERROR) {
+ boardp->init_tidmask |= ADV_TID_TO_TIDMASK(scp->target);
+ }
+
+ /*
+ * Because interrupts may be enabled by the 'Scsi_Cmnd' done
+ * function, add the command to the end of the board's done queue.
+ * The done function for the command will be called from
+ * advansys_interrupt().
+ */
+ asc_enqueue(&boardp->done, scp, ASC_BACK);
+
+ return;
+}
+
+/*
+ * adv_isr_callback() - Second Level Interrupt Handler called by AdvISR().
+ *
+ * Callback function for the Wide SCSI Adv Library.
+ */
+STATIC void
+adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
+{
+ asc_board_t *boardp;
+ adv_req_t *reqp;
+ Scsi_Cmnd *scp;
+ struct Scsi_Host *shp;
+ int underrun = ASC_FALSE;
+ int i;
+
+ ASC_ASSERT(interrupts_enabled() == ASC_FALSE);
+ ASC_DBG2(1, "adv_isr_callback: adv_dvc_varp %x, scsiqp %x\n",
+ (unsigned) adv_dvc_varp, (unsigned) scsiqp);
+ ASC_DBG_PRT_ADV_SCSI_REQ_Q(2, scsiqp);
+
+ /*
+ * Get the adv_req_t structure for the command that has been
+ * completed. The adv_req_t structure actually contains the
+ * completed ADV_SCSI_REQ_Q structure.
+ */
+ reqp = (adv_req_t *) scsiqp->srb_ptr;
+ ASC_DBG1(1, "adv_isr_callback: reqp %x\n", (unsigned) reqp);
+ if (reqp == NULL) {
+ ASC_PRINT("adv_isr_callback: reqp is NULL\n");
+ return;
+ }
+
+ /*
+ * Get the Scsi_Cmnd structure and Scsi_Host structure for the
+ * command that has been completed.
+ *
+ * Note: The adv_req_t request structure and adv_sgblk_t structure,
+ * if any, * dropped, because a board structure pointer can not be
+ * determined.
+ */
+ scp = reqp->cmndp;
+ ASC_DBG1(1, "adv_isr_callback: scp %x\n", (unsigned) scp);
+ if (scp == NULL) {
+ ASC_PRINT("adv_isr_callback: scp is NULL; adv_req_t dropped.\n");
+ return;
+ }
+ ASC_DBG_PRT_CDB(2, scp->cmnd, scp->cmd_len);
+
+ /*
+ * If the request's host pointer is not valid, display a message
+ * and return.
+ */
+ shp = scp->host;
+ for (i = 0; i < asc_board_count; i++) {
+ if (asc_host[i] == shp) {
+ break;
+ }
+ }
+ /*
+ * Note: If the host structure is not found, the adv_req_t request
+ * structure and adv_sgblk_t structure, if any, is dropped.
+ */
+ if (i == asc_board_count) {
+ ASC_PRINT2("adv_isr_callback: scp %x has bad host pointer, host %x\n",
+ (unsigned) scp, (unsigned) shp);
+ return;
+ }
+
+ ASC_STATS(shp, callback);
+ ASC_DBG1(1, "adv_isr_callback: shp %x\n", (unsigned) shp);
+
+ /*
+ * If the request isn't found on the active queue, it may have been
+ * removed to handle a reset or abort request. Display a message and
+ * return.
+ *
+ * Note: Because the structure may still be in use don't attempt
+ * to free the adv_req_t and adv_sgblk_t, if any, structures.
+ */
+ boardp = ASC_BOARDP(shp);
+ ASC_ASSERT(adv_dvc_varp == &boardp->dvc_var.adv_dvc_var);
+ if (asc_rmqueue(&boardp->active, scp) == ASC_FALSE) {
+ ASC_PRINT2("adv_isr_callback: board %d: scp %x not on active queue\n",
+ boardp->id, (unsigned) scp);
+ return;
+ }
+
+ /*
+ * Check for an underrun condition.
+ */
+ if (scp->request_bufflen != 0 && scsiqp->data_cnt != 0) {
+ ASC_DBG1(1, "adv_isr_callback: underrun condition %lu bytes\n",
+ scsiqp->data_cnt);
+ underrun = ASC_TRUE;
+ }
+
+ /*
+ * 'done_status' contains the command's ending status.
+ */
+ switch (scsiqp->done_status) {
+ case QD_NO_ERROR:
+ ASC_DBG(2, "adv_isr_callback: QD_NO_ERROR\n");
+ switch (scsiqp->host_status) {
+ case QHSTA_NO_ERROR:
+ scp->result = 0;
+ break;
+ default:
+ /* QHSTA error occurred. */
+ ASC_DBG1(2, "adv_isr_callback: host_status %x\n",
+ scsiqp->host_status);
+ scp->result = HOST_BYTE(DID_ERROR);
+ break;
+ }
+ /*
+ * If there was an underrun without any other error,
+ * set DID_ERROR to indicate the underrun error.
+ *
+ * Note: There is no way yet to indicate the number
+ * of underrun bytes.
+ */
+ if (scp->result == 0 && underrun == ASC_TRUE) {
+ scp->result = HOST_BYTE(DID_UNDERRUN);
+ }
+ break;
+
+ case QD_WITH_ERROR:
+ ASC_DBG(2, "adv_isr_callback: QD_WITH_ERROR\n");
+ switch (scsiqp->host_status) {
+ case QHSTA_NO_ERROR:
+ if (scsiqp->scsi_status == SS_CHK_CONDITION) {
+ ASC_DBG(2, "adv_isr_callback: SS_CHK_CONDITION\n");
+ ASC_DBG_PRT_SENSE(2, scp->sense_buffer,
+ sizeof(scp->sense_buffer));
+ /*
+ * Note: The 'status_byte()' macro used by target drivers
+ * defined in scsi.h shifts the status byte returned by
+ * host drivers right by 1 bit. This is why target drivers
+ * also use right shifted status byte definitions. For
+ * instance target drivers use CHECK_CONDITION, defined to
+ * 0x1, instead of the SCSI defined check condition value
+ * of 0x2. Host drivers are supposed to return the status
+ * byte as it is defined by SCSI.
+ */
+ scp->result = DRIVER_BYTE(DRIVER_SENSE) |
+ STATUS_BYTE(scsiqp->scsi_status);
+ } else {
+ scp->result = STATUS_BYTE(scsiqp->scsi_status);
+ }
+ break;
+
+ default:
+ /* Some other QHSTA error occurred. */
+ ASC_DBG1(1, "adv_isr_callback: host_status %x\n",
+ scsiqp->host_status);
+ scp->result = HOST_BYTE(DID_BAD_TARGET);
+ break;
+ }
+ break;
+
+ case QD_ABORTED_BY_HOST:
+ ASC_DBG(1, "adv_isr_callback: QD_ABORTED_BY_HOST\n");
+ scp->result = HOST_BYTE(DID_ABORT) | STATUS_BYTE(scsiqp->scsi_status);
+ break;
+
+ default:
+ ASC_DBG1(1, "adv_isr_callback: done_status %x\n", scsiqp->done_status);
+ scp->result = HOST_BYTE(DID_ERROR) | STATUS_BYTE(scsiqp->scsi_status);
+ break;
+ }
+
+ /*
+ * If the 'init_tidmask' bit isn't already set for the target and the
+ * current request finished normally, then set the bit for the target
+ * to indicate that a device is present.
+ */
+ if ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(scp->target)) == 0 &&
+ scsiqp->done_status == QD_NO_ERROR &&
+ scsiqp->host_status == QHSTA_NO_ERROR) {
+ boardp->init_tidmask |= ADV_TID_TO_TIDMASK(scp->target);
+ }
+
+ /*
+ * Because interrupts may be enabled by the 'Scsi_Cmnd' done
+ * function, add the command to the end of the board's done queue.
+ * The done function for the command will be called from
+ * advansys_interrupt().
+ */
+ asc_enqueue(&boardp->done, scp, ASC_BACK);
+
+ /*
+ * Free the adv_sgblk_t structure, if any, by adding it back
+ * to the board free list.
+ */
+ if (reqp->sgblkp != NULL) {
+ reqp->sgblkp->next_sgblkp = boardp->adv_sgblkp;
+ boardp->adv_sgblkp = reqp->sgblkp;
+ }
+
+ /*
+ * Free the adv_req_t structure used with the command by adding
+ * it back to the board free list.
+ */
+ reqp->next_reqp = boardp->adv_reqp;
+ boardp->adv_reqp = reqp;
+
+ ASC_DBG(1, "adv_isr_callback: done\n");
+
+ return;
+}
+
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,93)
+#ifdef ASC_CONFIG_PCI
+/*
+ * Search for an AdvanSys PCI device in the PCI configuration space.
+ */
+ASC_INITFUNC(
+STATIC int
+asc_srch_pci_dev(PCI_DEVICE *pciDevice)
+)
+{
+ int ret = PCI_DEVICE_NOT_FOUND;
+
+ ASC_DBG(2, "asc_srch_pci_dev: begin\n");
+
+ if (pci_scan_method == -1) {
+ pci_scan_method = asc_scan_method();
+ }
+ pciDevice->type = pci_scan_method;
+ ASC_DBG1(2, "asc_srch_pci_dev: type %d\n", pciDevice->type);
+
+ ret = asc_pci_find_dev(pciDevice);
+ ASC_DBG1(2, "asc_srch_pci_dev: asc_pci_find_dev() return %d\n", ret);
+ if (ret == PCI_DEVICE_FOUND) {
+ pciDevice->slotNumber = pciDevice->slotFound + 1;
+ pciDevice->startSlot = pciDevice->slotFound + 1;
+ } else {
+ if (pciDevice->bridge > pciDevice->busNumber) {
+ ASC_DBG2(2, "asc_srch_pci_dev: bridge %x, busNumber %x\n",
+ pciDevice->bridge, pciDevice->busNumber);
+ pciDevice->busNumber++;
+ pciDevice->slotNumber = 0;
+ pciDevice->startSlot = 0;
+ pciDevice->endSlot = 0x0f;
+ ret = asc_srch_pci_dev(pciDevice);
+ ASC_DBG1(2, "asc_srch_pci_dev: recursive call return %d\n", ret);
+ }
+ }
+
+ ASC_DBG1(2, "asc_srch_pci_dev: return %d\n", ret);
+ return ret;
+}
+
+/*
+ * Determine the access method to be used for 'pciDevice'.
+ */
+ASC_INITFUNC(
+STATIC uchar
+asc_scan_method(void)
+)
+{
+ ushort data;
+ PCI_DATA pciData;
+ uchar type;
+ uchar slot;
+
+ ASC_DBG(2, "asc_scan_method: begin\n");
+ memset(&pciData, 0, sizeof(pciData));
+ for (type = 1; type < 3; type++) {
+ pciData.type = type;
+ for (slot = 0; slot < PCI_MAX_SLOT; slot++) {
+ pciData.slot = slot;
+ data = asc_get_cfg_word(&pciData);
+ if ((data != 0xFFFF) && (data != 0x0000)) {
+ ASC_DBG2(4, "asc_scan_method: data %x, type %d\n", data, type);
+ return (type);
+ }
+ }
+ }
+ ASC_DBG1(4, "asc_scan_method: type %d\n", type);
+ return (type);
+}
+
+/*
+ * Check for an AdvanSys PCI device in 'pciDevice'.
+ *
+ * Return PCI_DEVICE_FOUND if found, otherwise return PCI_DEVICE_NOT_FOUND.
+ */
+ASC_INITFUNC(
+STATIC int
+asc_pci_find_dev(PCI_DEVICE *pciDevice)
+)
+{
+ PCI_DATA pciData;
+ ushort vendorid, deviceid;
+ uchar classcode, subclass;
+ uchar lslot;
+
+ ASC_DBG(3, "asc_pci_find_dev: begin\n");
+ pciData.type = pciDevice->type;
+ pciData.bus = pciDevice->busNumber;
+ pciData.func = pciDevice->devFunc;
+ lslot = pciDevice->startSlot;
+ for (; lslot < pciDevice->endSlot; lslot++) {
+ pciData.slot = lslot;
+ pciData.offset = VENDORID_OFFSET;
+ vendorid = asc_get_cfg_word(&pciData);
+ ASC_DBG1(3, "asc_pci_find_dev: vendorid %x\n", vendorid);
+ if (vendorid != 0xffff) {
+ pciData.offset = DEVICEID_OFFSET;
+ deviceid = asc_get_cfg_word(&pciData);
+ ASC_DBG1(3, "asc_pci_find_dev: deviceid %x\n", deviceid);
+ if ((vendorid == ASC_PCI_VENDORID) &&
+ ((deviceid == ASC_PCI_DEVICE_ID_1100) ||
+ (deviceid == ASC_PCI_DEVICE_ID_1200) ||
+ (deviceid == ASC_PCI_DEVICE_ID_1300) ||
+ (deviceid == ASC_PCI_DEVICE_ID_2300))) {
+ pciDevice->slotFound = lslot;
+ ASC_DBG(3, "asc_pci_find_dev: PCI_DEVICE_FOUND\n");
+ return PCI_DEVICE_FOUND;
+ } else {
+ pciData.offset = SUBCLASS_OFFSET;
+ subclass = asc_get_cfg_byte(&pciData);
+ pciData.offset = CLASSCODE_OFFSET;
+ classcode = asc_get_cfg_byte(&pciData);
+ if ((classcode & PCI_BASE_CLASS_BRIDGE_DEVICE) &&
+ (subclass & PCI_SUB_CLASS_PCI_TO_PCI_BRIDGE_CONTROLLER)) {
+ pciDevice->bridge++;
+ }
+ ASC_DBG2(3, "asc_pci_find_dev: subclass %x, classcode %x\n",
+ subclass, classcode);
+ }
+ }
+ }
+ return PCI_DEVICE_NOT_FOUND;
+}
+
+/*
+ * Read PCI configuration data into 'pciConfig'.
+ */
+ASC_INITFUNC(
+STATIC void
+asc_get_pci_cfg(PCI_DEVICE *pciDevice, PCI_CONFIG_SPACE *pciConfig)
+)
+{
+ PCI_DATA pciData;
+ uchar counter;
+ uchar *localConfig;
+
+ ASC_DBG1(4, "asc_get_pci_cfg: slotFound %d\n ",
+ pciDevice->slotFound);
+
+ pciData.type = pciDevice->type;
+ pciData.bus = pciDevice->busNumber;
+ pciData.slot = pciDevice->slotFound;
+ pciData.func = pciDevice->devFunc;
+ localConfig = (uchar *) pciConfig;
+
+ for (counter = 0; counter < sizeof(PCI_CONFIG_SPACE); counter++) {
+ pciData.offset = counter;
+ *localConfig = asc_get_cfg_byte(&pciData);
+ ASC_DBG1(4, "asc_get_pci_cfg: byte %x\n", *localConfig);
+ localConfig++;
+ }
+ ASC_DBG1(4, "asc_get_pci_cfg: counter %d\n", counter);
+}
+
+/*
+ * Read a word (16 bits) from the PCI configuration space.
+ *
+ * The configuration mechanism is checked for the correct access method.
+ */
+ASC_INITFUNC(
+STATIC ushort
+asc_get_cfg_word(PCI_DATA *pciData)
+)
+{
+ ushort tmp;
+ ulong address;
+ ulong lbus = pciData->bus;
+ ulong lslot = pciData->slot;
+ ulong lfunc = pciData->func;
+ uchar t2CFA, t2CF8;
+ ulong t1CF8, t1CFC;
+
+ ASC_DBG4(4, "asc_get_cfg_word: type %d, bus %lu, slot %lu, func %lu\n",
+ pciData->type, lbus, lslot, lfunc);
+
+ /*
+ * Check type of configuration mechanism.
+ */
+ if (pciData->type == 2) {
+ /*
+ * Save registers to be restored later.
+ */
+ t2CFA = inp(0xCFA); /* save PCI bus register */
+ t2CF8 = inp(0xCF8); /* save config space enable register */
+
+ /*
+ * Write the bus and enable registers.
+ */
+ /* set for type 1 cycle, if needed */
+ outp(0xCFA, pciData->bus);
+ /* set the function number */
+ outp(0xCF8, 0x10 | (pciData->func << 1)) ;
+
+ /*
+ * Read the configuration space type 2 locations.
+ */
+ tmp = (ushort) inpw(0xC000 | ((pciData->slot << 8) + pciData->offset));
+
+ outp(0xCFA, t2CFA); /* save PCI bus register */
+ outp(0xCF8, t2CF8); /* save config space enable register */
+ } else {
+ /*
+ * Type 1 or 3 configuration mechanism.
+ *
+ * Save the CONFIG_ADDRESS and CONFIG_DATA register values.
+ */
+ t1CF8 = inpl(0xCF8);
+ t1CFC = inpl(0xCFC);
+
+ /*
+ * enable <31>, bus = <23:16>, slot = <15:11>,
+ * func = <10:8>, reg = <7:2>
+ */
+ address = (ulong) ((lbus << 16) | (lslot << 11) |
+ (lfunc << 8) | (pciData->offset & 0xFC) | 0x80000000L);
+
+ /*
+ * Write out the address to CONFIG_ADDRESS.
+ */
+ outpl(0xCF8, address);
+
+ /*
+ * Read in word from CONFIG_DATA.
+ */
+ tmp = (ushort) ((inpl(0xCFC) >>
+ ((pciData->offset & 2) * 8)) & 0xFFFF);
+
+ /*
+ * Restore registers.
+ */
+ outpl(0xCF8, t1CF8);
+ outpl(0xCFC, t1CFC);
+ }
+ ASC_DBG1(4, "asc_get_cfg_word: config data: %x\n", tmp);
+ return tmp;
+}
+
+/*
+ * Reads a byte from the PCI configuration space.
+ *
+ * The configuration mechanism is checked for the correct access method.
+ */
+ASC_INITFUNC(
+STATIC uchar
+asc_get_cfg_byte(PCI_DATA *pciData)
+)
+{
+ uchar tmp;
+ ulong address;
+ ulong lbus = pciData->bus, lslot = pciData->slot, lfunc = pciData->func;
+ uchar t2CFA, t2CF8;
+ ulong t1CF8, t1CFC;
+
+ ASC_DBG1(4, "asc_get_cfg_byte: type: %d\n", pciData->type);
+
+ /*
+ * Check type of configuration mechanism.
+ */
+ if (pciData->type == 2) {
+ /*
+ * Save registers to be restored later.
+ */
+ t2CFA = inp(0xCFA); /* save PCI bus register */
+ t2CF8 = inp(0xCF8); /* save config space enable register */
+
+ /*
+ * Write the bus and enable registers.
+ */
+ /* set for type 1 cycle, if needed */
+ outp(0xCFA, pciData->bus);
+ /* set the function number */
+ outp(0xCF8, 0x10 | (pciData->func << 1));
+
+ /*
+ * Read configuration space type 2 locations.
+ */
+ tmp = inp(0xC000 | ((pciData->slot << 8) + pciData->offset));
+
+ /*
+ * Restore registers.
+ */
+ outp(0xCF8, t2CF8); /* restore the enable register */
+ outp(0xCFA, t2CFA); /* restore PCI bus register */
+ } else {
+ /*
+ * Type 1 or 3 configuration mechanism.
+ *
+ * Save CONFIG_ADDRESS and CONFIG_DATA register values.
+ */
+ t1CF8 = inpl(0xCF8);
+ t1CFC = inpl(0xCFC);
+
+ /*
+ * enable <31>, bus = <23:16>, slot = <15:11>, func = <10:8>,
+ * reg = <7:2>
+ */
+ address = (ulong) ((lbus << 16) | (lslot << 11) |
+ (lfunc << 8) | (pciData->offset & 0xFC) | 0x80000000L);
+
+ /*
+ * Write out address to CONFIG_ADDRESS.
+ */
+ outpl(0xCF8, address);
+
+ /*
+ * Read in word from CONFIG_DATA.
+ */
+ tmp = (uchar) ((inpl(0xCFC) >> ((pciData->offset & 3) * 8)) & 0xFF);
+
+ /*
+ * Restore registers.
+ */
+ outpl(0xCF8, t1CF8);
+ outpl(0xCFC, t1CFC);
+ }
+ ASC_DBG1(4, "asc_get_cfg_byte: config data: %x\n", tmp);
+ return tmp;
+}
+
+/*
+ * Write a byte to the PCI configuration space.
+ */
+ASC_INITFUNC(
+STATIC void
+asc_put_cfg_byte(PCI_DATA *pciData, uchar byte_data)
+)
+{
+ ulong tmpl;
+ ulong address;
+ ulong lbus = pciData->bus, lslot = pciData->slot, lfunc = pciData->func;
+ uchar t2CFA, t2CF8;
+ ulong t1CF8, t1CFC;
+
+ ASC_DBG2(4, "asc_put_cfg_byte: type: %d, byte_data %x\n",
+ pciData->type, byte_data);
+
+ /*
+ * Check type of configuration mechanism.
+ */
+ if (pciData->type == 2) {
+
+ /*
+ * Save registers to be restored later.
+ */
+ t2CFA = inp(0xCFA); /* save PCI bus register */
+ t2CF8 = inp(0xCF8); /* save config space enable register */
+
+ /*
+ * Write bus and enable registers.
+ */
+ outp(0xCFA, pciData->bus);
+
+ /*
+ * Set the function number.
+ */
+ outp(0xCF8, 0x10 | (pciData->func << 1));
+
+ /*
+ * Write the configuration space type 2 locations.
+ */
+ outp(0xC000 | ((pciData->slot << 8) + pciData->offset), byte_data);
+
+ /*
+ * Restore registers.
+ */
+ outp(0xCF8, t2CF8); /* restore the enable register */
+ outp(0xCFA, t2CFA); /* restore PCI bus register */
+ } else {
+
+ /*
+ * Type 1 or 3 configuration mechanism.
+ *
+ * Save the CONFIG_ADDRESS and CONFIG_DATA register values.
+ */
+ t1CF8 = inpl(0xCF8);
+ t1CFC = inpl(0xCFC);
+
+ /*
+ * enable <31>, bus = <23:16>, slot = <15:11>, func = <10:8>,
+ * reg = <7:2>
+ */
+ address = (ulong) ((lbus << 16) | (lslot << 11) | (lfunc << 8) |
+ (pciData->offset & 0xFC) | 0x80000000L);
+ /*
+ * Write out address to CONFIG_ADDRESS.
+ */
+ outpl(0xCF8, address);
+
+ /*
+ * Write double word to CONFIG_DATA preserving the bytes
+ * in the double not written.
+ */
+ tmpl = inpl(0xCFC) & ~(0xFF << ((pciData->offset & 3) * 8));
+ outpl(0xCFC, tmpl | (byte_data << ((pciData->offset & 3) * 8)));
+
+ /*
+ * Restore registers.
+ */
+ outpl(0xCF8, t1CF8);
+ outpl(0xCFC, t1CFC);
+ }
+ ASC_DBG(4, "asc_put_cfg_byte: end\n");
+}
+#endif /* ASC_CONFIG_PCI */
+#endif /* version < v2.1.93 */
+
+/*
+ * Add a 'REQP' to the end of specified queue. Set 'tidmask'
+ * to indicate a command is queued for the device.
+ *
+ * 'flag' may be either ASC_FRONT or ASC_BACK.
+ *
+ * 'REQPNEXT(reqp)' returns reqp's next pointer.
+ */
+STATIC void
+asc_enqueue(asc_queue_t *ascq, REQP reqp, int flag)
+{
+ int tid;
+
+ ASC_DBG3(3, "asc_enqueue: ascq %x, reqp %x, flag %d\n",
+ (unsigned) ascq, (unsigned) reqp, flag);
+ ASC_ASSERT(interrupts_enabled() == ASC_FALSE);
+ ASC_ASSERT(reqp != NULL);
+ ASC_ASSERT(flag == ASC_FRONT || flag == ASC_BACK);
+ tid = REQPTID(reqp);
+ ASC_ASSERT(tid >= 0 && tid <= ADV_MAX_TID);
+ if (flag == ASC_FRONT) {
+ REQPNEXT(reqp) = (unsigned char *) ascq->q_first[tid];
+ ascq->q_first[tid] = reqp;
+ /* If the queue was empty, set the last pointer. */
+ if (ascq->q_last[tid] == NULL) {
+ ascq->q_last[tid] = reqp;
+ }
+ } else { /* ASC_BACK */
+ if (ascq->q_last[tid] != NULL) {
+ REQPNEXT(ascq->q_last[tid]) = (unsigned char *) reqp;
+ }
+ ascq->q_last[tid] = reqp;
+ REQPNEXT(reqp) = NULL;
+ /* If the queue was empty, set the first pointer. */
+ if (ascq->q_first[tid] == NULL) {
+ ascq->q_first[tid] = reqp;
+ }
+ }
+ /* The queue has at least one entry, set its bit. */
+ ascq->q_tidmask |= ADV_TID_TO_TIDMASK(tid);
+#ifdef ADVANSYS_STATS
+ /* Maintain request queue statistics. */
+ ascq->q_tot_cnt[tid]++;
+ ascq->q_cur_cnt[tid]++;
+ if (ascq->q_cur_cnt[tid] > ascq->q_max_cnt[tid]) {
+ ascq->q_max_cnt[tid] = ascq->q_cur_cnt[tid];
+ ASC_DBG2(2, "asc_enqueue: new q_max_cnt[%d] %d\n",
+ tid, ascq->q_max_cnt[tid]);
+ }
+ REQPTIME(reqp) = REQTIMESTAMP();
+#endif /* ADVANSYS_STATS */
+ ASC_DBG1(3, "asc_enqueue: reqp %x\n", (unsigned) reqp);
+ return;
+}
+
+/*
+ * Return first queued 'REQP' on the specified queue for
+ * the specified target device. Clear the 'tidmask' bit for
+ * the device if no more commands are left queued for it.
+ *
+ * 'REQPNEXT(reqp)' returns reqp's next pointer.
+ */
+STATIC REQP
+asc_dequeue(asc_queue_t *ascq, int tid)
+{
+ REQP reqp;
+
+ ASC_DBG2(3, "asc_dequeue: ascq %x, tid %d\n", (unsigned) ascq, tid);
+ ASC_ASSERT(interrupts_enabled() == ASC_FALSE);
+ ASC_ASSERT(tid >= 0 && tid <= ADV_MAX_TID);
+ if ((reqp = ascq->q_first[tid]) != NULL) {
+ ASC_ASSERT(ascq->q_tidmask & ADV_TID_TO_TIDMASK(tid));
+ ascq->q_first[tid] = (REQP) REQPNEXT(reqp);
+ /* If the queue is empty, clear its bit and the last pointer. */
+ if (ascq->q_first[tid] == NULL) {
+ ascq->q_tidmask &= ~ADV_TID_TO_TIDMASK(tid);
+ ASC_ASSERT(ascq->q_last[tid] == reqp);
+ ascq->q_last[tid] = NULL;
+ }
+#ifdef ADVANSYS_STATS
+ /* Maintain request queue statistics. */
+ ascq->q_cur_cnt[tid]--;
+ ASC_ASSERT(ascq->q_cur_cnt[tid] >= 0);
+ REQTIMESTAT("asc_dequeue", ascq, reqp, tid);
+#endif /* ADVANSYS_STATS */
+ }
+ ASC_DBG1(3, "asc_dequeue: reqp %x\n", (unsigned) reqp);
+ return reqp;
+}
+
+/*
+ * Return a pointer to a singly linked list of all the requests queued
+ * for 'tid' on the 'asc_queue_t' pointed to by 'ascq'.
+ *
+ * If 'lastpp' is not NULL, '*lastpp' will be set to point to the
+ * the last request returned in the singly linked list.
+ *
+ * 'tid' should either be a valid target id or if it is ASC_TID_ALL,
+ * then all queued requests are concatenated into one list and
+ * returned.
+ *
+ * Note: If 'lastpp' is used to append a new list to the end of
+ * an old list, only change the old list last pointer if '*lastpp'
+ * (or the function return value) is not NULL, i.e. use a temporary
+ * variable for 'lastpp' and check its value after the function return
+ * before assigning it to the list last pointer.
+ *
+ * Unfortunately collecting queuing time statistics adds overhead to
+ * the function that isn't inherent to the function's algorithm.
+ */
+STATIC REQP
+asc_dequeue_list(asc_queue_t *ascq, REQP *lastpp, int tid)
+{
+ REQP firstp, lastp;
+ int i;
+
+ ASC_DBG2(3, "asc_dequeue_list: ascq %x, tid %d\n", (unsigned) ascq, tid);
+ ASC_ASSERT(interrupts_enabled() == ASC_FALSE);
+ ASC_ASSERT((tid == ASC_TID_ALL) || (tid >= 0 && tid <= ADV_MAX_TID));
+
+ /*
+ * If 'tid' is not ASC_TID_ALL, return requests only for
+ * the specified 'tid'. If 'tid' is ASC_TID_ALL, return all
+ * requests for all tids.
+ */
+ if (tid != ASC_TID_ALL) {
+ /* Return all requests for the specified 'tid'. */
+ if ((ascq->q_tidmask & ADV_TID_TO_TIDMASK(tid)) == 0) {
+ /* List is empty; Set first and last return pointers to NULL. */
+ firstp = lastp = NULL;
+ } else {
+ firstp = ascq->q_first[tid];
+ lastp = ascq->q_last[tid];
+ ascq->q_first[tid] = ascq->q_last[tid] = NULL;
+ ascq->q_tidmask &= ~ADV_TID_TO_TIDMASK(tid);
+#ifdef ADVANSYS_STATS
+ {
+ REQP reqp;
+ ascq->q_cur_cnt[tid] = 0;
+ for (reqp = firstp; reqp; reqp = (REQP) REQPNEXT(reqp)) {
+ REQTIMESTAT("asc_dequeue_list", ascq, reqp, tid);
+ }
+ }
+#endif /* ADVANSYS_STATS */
+ }
+ } else {
+ /* Return all requests for all tids. */
+ firstp = lastp = NULL;
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ if (ascq->q_tidmask & ADV_TID_TO_TIDMASK(i)) {
+ if (firstp == NULL) {
+ firstp = ascq->q_first[i];
+ lastp = ascq->q_last[i];
+ } else {
+ ASC_ASSERT(lastp != NULL);
+ REQPNEXT(lastp) = (unsigned char *) ascq->q_first[i];
+ lastp = ascq->q_last[i];
+ }
+ ascq->q_first[i] = ascq->q_last[i] = NULL;
+ ascq->q_tidmask &= ~ADV_TID_TO_TIDMASK(i);
+#ifdef ADVANSYS_STATS
+ ascq->q_cur_cnt[i] = 0;
+#endif /* ADVANSYS_STATS */
+ }
+ }
+#ifdef ADVANSYS_STATS
+ {
+ REQP reqp;
+ for (reqp = firstp; reqp; reqp = (REQP) REQPNEXT(reqp)) {
+ REQTIMESTAT("asc_dequeue_list", ascq, reqp, reqp->target);
+ }
+ }
+#endif /* ADVANSYS_STATS */
+ }
+ if (lastpp) {
+ *lastpp = lastp;
+ }
+ ASC_DBG1(3, "asc_dequeue_list: firstp %x\n", (unsigned) firstp);
+ return firstp;
+}
+
+/*
+ * Remove the specified 'REQP' from the specified queue for
+ * the specified target device. Clear the 'tidmask' bit for the
+ * device if no more commands are left queued for it.
+ *
+ * 'REQPNEXT(reqp)' returns reqp's the next pointer.
+ *
+ * Return ASC_TRUE if the command was found and removed,
+ * otherwise return ASC_FALSE.
+ */
+STATIC int
+asc_rmqueue(asc_queue_t *ascq, REQP reqp)
+{
+ REQP currp, prevp;
+ int tid;
+ int ret = ASC_FALSE;
+
+ ASC_DBG2(3, "asc_rmqueue: ascq %x, reqp %x\n",
+ (unsigned) ascq, (unsigned) reqp);
+ ASC_ASSERT(interrupts_enabled() == ASC_FALSE);
+ ASC_ASSERT(reqp != NULL);
+
+ tid = REQPTID(reqp);
+ ASC_ASSERT(tid >= 0 && tid <= ADV_MAX_TID);
+
+ /*
+ * Handle the common case of 'reqp' being the first
+ * entry on the queue.
+ */
+ if (reqp == ascq->q_first[tid]) {
+ ret = ASC_TRUE;
+ ascq->q_first[tid] = (REQP) REQPNEXT(reqp);
+ /* If the queue is now empty, clear its bit and the last pointer. */
+ if (ascq->q_first[tid] == NULL) {
+ ascq->q_tidmask &= ~ADV_TID_TO_TIDMASK(tid);
+ ASC_ASSERT(ascq->q_last[tid] == reqp);
+ ascq->q_last[tid] = NULL;
+ }
+ } else if (ascq->q_first[tid] != NULL) {
+ ASC_ASSERT(ascq->q_last[tid] != NULL);
+ /*
+ * Because the case of 'reqp' being the first entry has been
+ * handled above and it is known the queue is not empty, if
+ * 'reqp' is found on the queue it is guaranteed the queue will
+ * not become empty and that 'q_first[tid]' will not be changed.
+ *
+ * Set 'prevp' to the first entry, 'currp' to the second entry,
+ * and search for 'reqp'.
+ */
+ for (prevp = ascq->q_first[tid], currp = (REQP) REQPNEXT(prevp);
+ currp; prevp = currp, currp = (REQP) REQPNEXT(currp)) {
+ if (currp == reqp) {
+ ret = ASC_TRUE;
+ REQPNEXT(prevp) = REQPNEXT(currp);
+ REQPNEXT(reqp) = NULL;
+ if (ascq->q_last[tid] == reqp) {
+ ascq->q_last[tid] = prevp;
+ }
+ break;
+ }
+ }
+ }
+#ifdef ADVANSYS_STATS
+ /* Maintain request queue statistics. */
+ if (ret == ASC_TRUE) {
+ ascq->q_cur_cnt[tid]--;
+ REQTIMESTAT("asc_rmqueue", ascq, reqp, tid);
+ }
+ ASC_ASSERT(ascq->q_cur_cnt[tid] >= 0);
+#endif /* ADVANSYS_STATS */
+ ASC_DBG2(3, "asc_rmqueue: reqp %x, ret %d\n", (unsigned) reqp, ret);
+ return ret;
+}
+
+/*
+ * If the specified 'REQP' is queued on the specified queue for
+ * the specified target device, return ASC_TRUE.
+ */
+STATIC int
+asc_isqueued(asc_queue_t *ascq, REQP reqp)
+{
+ REQP treqp;
+ int tid;
+ int ret = ASC_FALSE;
+
+ ASC_DBG2(3, "asc_isqueued: ascq %x, reqp %x\n",
+ (unsigned) ascq, (unsigned) reqp);
+ ASC_ASSERT(interrupts_enabled() == ASC_FALSE);
+ ASC_ASSERT(reqp != NULL);
+
+ tid = REQPTID(reqp);
+ ASC_ASSERT(tid >= 0 && tid <= ADV_MAX_TID);
+
+ for (treqp = ascq->q_first[tid]; treqp; treqp = (REQP) REQPNEXT(treqp)) {
+ ASC_ASSERT(ascq->q_tidmask & ADV_TID_TO_TIDMASK(tid));
+ if (treqp == reqp) {
+ ret = ASC_TRUE;
+ break;
+ }
+ }
+ ASC_DBG1(3, "asc_isqueued: ret %x\n", ret);
+ return ret;
+}
+
+/*
+ * Execute as many queued requests as possible for the specified queue.
+ *
+ * Calls asc_execute_scsi_cmnd() to execute a REQP/Scsi_Cmnd.
+ */
+STATIC void
+asc_execute_queue(asc_queue_t *ascq)
+{
+ ADV_SCSI_BIT_ID_TYPE scan_tidmask;
+ REQP reqp;
+ int i;
+
+ ASC_DBG1(1, "asc_execute_queue: ascq %x\n", (unsigned) ascq);
+ ASC_ASSERT(interrupts_enabled() == ASC_FALSE);
+ /*
+ * Execute queued commands for devices attached to
+ * the current board in round-robin fashion.
+ */
+ scan_tidmask = ascq->q_tidmask;
+ do {
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ if (scan_tidmask & ADV_TID_TO_TIDMASK(i)) {
+ if ((reqp = asc_dequeue(ascq, i)) == NULL) {
+ scan_tidmask &= ~ADV_TID_TO_TIDMASK(i);
+ } else if (asc_execute_scsi_cmnd((Scsi_Cmnd *) reqp)
+ == ASC_BUSY) {
+ scan_tidmask &= ~ADV_TID_TO_TIDMASK(i);
+ /* Put the request back at front of the list. */
+ asc_enqueue(ascq, reqp, ASC_FRONT);
+ }
+ }
+ }
+ } while (scan_tidmask);
+ return;
+}
+
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,0)
+/*
+ * asc_prt_board_devices()
+ *
+ * Print driver information for devices attached to the board.
+ *
+ * Note: no single line should be greater than ASC_PRTLINE_SIZE,
+ * cf. asc_prt_line().
+ *
+ * Return the number of characters copied into 'cp'. No more than
+ * 'cplen' characters will be copied to 'cp'.
+ */
+STATIC int
+asc_prt_board_devices(struct Scsi_Host *shp, char *cp, int cplen)
+{
+ asc_board_t *boardp;
+ int leftlen;
+ int totlen;
+ int len;
+ int chip_scsi_id;
+ int i;
+
+ boardp = ASC_BOARDP(shp);
+ leftlen = cplen;
+ totlen = len = 0;
+
+ len = asc_prt_line(cp, leftlen,
+"\nDevice Information for AdvanSys SCSI Host %d:\n", shp->host_no);
+ ASC_PRT_NEXT();
+
+ if (ASC_NARROW_BOARD(boardp)) {
+ chip_scsi_id = boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id;
+ } else {
+ chip_scsi_id = boardp->dvc_var.adv_dvc_var.chip_scsi_id;
+ }
+
+ len = asc_prt_line(cp, leftlen, "Target IDs Detected:");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ if (boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) {
+ len = asc_prt_line(cp, leftlen, " %X,", i);
+ ASC_PRT_NEXT();
+ }
+ }
+ len = asc_prt_line(cp, leftlen, " (%X=Host Adapter)\n", chip_scsi_id);
+ ASC_PRT_NEXT();
+
+ return totlen;
+}
+
+/*
+ * Display Wide Board BIOS Information.
+ */
+STATIC int
+asc_prt_adv_bios(struct Scsi_Host *shp, char *cp, int cplen)
+{
+ asc_board_t *boardp;
+ int leftlen;
+ int totlen;
+ int len;
+ int upgrade = ASC_FALSE;
+ ushort major, minor, letter;
+
+ boardp = ASC_BOARDP(shp);
+ leftlen = cplen;
+ totlen = len = 0;
+
+ len = asc_prt_line(cp, leftlen, "\nROM BIOS Version: ");
+ ASC_PRT_NEXT();
+
+ /*
+ * If the BIOS saved a valid signature, then fill in
+ * the BIOS code segment base address.
+ */
+ if (boardp->bios_signature != 0x55AA) {
+ len = asc_prt_line(cp, leftlen, "Pre-3.1\n");
+ ASC_PRT_NEXT();
+ upgrade = ASC_TRUE;
+ } else {
+ major = (boardp->bios_version >> 12) & 0xF;
+ minor = (boardp->bios_version >> 8) & 0xF;
+ letter = (boardp->bios_version & 0xFF);
+
+ len = asc_prt_line(cp, leftlen, "%d.%d%c\n",
+ major, minor, letter >= 26 ? '?' : letter + 'A');
+ ASC_PRT_NEXT();
+
+ /* Current available ROM BIOS release is 3.1C. */
+ if (major < 3 || (major <= 3 && minor < 1) ||
+ (major <= 3 && minor <= 1 && letter < ('C'- 'A'))) {
+ upgrade = ASC_TRUE;
+ }
+ }
+ if (upgrade == ASC_TRUE) {
+ len = asc_prt_line(cp, leftlen,
+"Newer version of ROM BIOS available: ftp://ftp.advansys.com/pub\n");
+ ASC_PRT_NEXT();
+ }
+
+ return totlen;
+}
+
+/*
+ * Add serial number to information bar if signature AAh
+ * is found in at bit 15-9 (7 bits) of word 1.
+ *
+ * Serial Number consists fo 12 alpha-numeric digits.
+ *
+ * 1 - Product type (A,B,C,D..) Word0: 15-13 (3 bits)
+ * 2 - MFG Location (A,B,C,D..) Word0: 12-10 (3 bits)
+ * 3-4 - Product ID (0-99) Word0: 9-0 (10 bits)
+ * 5 - Product revision (A-J) Word0: " "
+ *
+ * Signature Word1: 15-9 (7 bits)
+ * 6 - Year (0-9) Word1: 8-6 (3 bits) & Word2: 15 (1 bit)
+ * 7-8 - Week of the year (1-52) Word1: 5-0 (6 bits)
+ *
+ * 9-12 - Serial Number (A001-Z999) Word2: 14-0 (15 bits)
+ *
+ * Note 1: Only production cards will have a serial number.
+ *
+ * Note 2: Signature is most significant 7 bits (0xFE).
+ *
+ * Returns ASC_TRUE if serial number found, otherwise returns ASC_FALSE.
+ */
+STATIC int
+asc_get_eeprom_string(ushort *serialnum, uchar *cp)
+{
+ ushort w, num;
+
+ if ((serialnum[1] & 0xFE00) != ((ushort) 0xAA << 8)) {
+ return ASC_FALSE;
+ } else {
+ /*
+ * First word - 6 digits.
+ */
+ w = serialnum[0];
+
+ /* Product type - 1st digit. */
+ if ((*cp = 'A' + ((w & 0xE000) >> 13)) == 'H') {
+ /* Product type is P=Prototype */
+ *cp += 0x8;
+ }
+ cp++;
+
+ /* Manufacturing location - 2nd digit. */
+ *cp++ = 'A' + ((w & 0x1C00) >> 10);
+
+ /* Product ID - 3rd, 4th digits. */
+ num = w & 0x3FF;
+ *cp++ = '0' + (num / 100);
+ num %= 100;
+ *cp++ = '0' + (num / 10);
+
+ /* Product revision - 5th digit. */
+ *cp++ = 'A' + (num % 10);
+
+ /*
+ * Second word
+ */
+ w = serialnum[1];
+
+ /*
+ * Year - 6th digit.
+ *
+ * If bit 15 of third word is set, then the
+ * last digit of the year is greater than 7.
+ */
+ if (serialnum[2] & 0x8000) {
+ *cp++ = '8' + ((w & 0x1C0) >> 6);
+ } else {
+ *cp++ = '0' + ((w & 0x1C0) >> 6);
+ }
+
+ /* Week of year - 7th, 8th digits. */
+ num = w & 0x003F;
+ *cp++ = '0' + num / 10;
+ num %= 10;
+ *cp++ = '0' + num;
+
+ /*
+ * Third word
+ */
+ w = serialnum[2] & 0x7FFF;
+
+ /* Serial number - 9th digit. */
+ *cp++ = 'A' + (w / 1000);
+
+ /* 10th, 11th, 12th digits. */
+ num = w % 1000;
+ *cp++ = '0' + num / 100;
+ num %= 100;
+ *cp++ = '0' + num / 10;
+ num %= 10;
+ *cp++ = '0' + num;
+
+ *cp = '\0'; /* Null Terminate the string. */
+ return ASC_TRUE;
+ }
+}
+
+/*
+ * asc_prt_asc_board_eeprom()
+ *
+ * Print board EEPROM configuration.
+ *
+ * Note: no single line should be greater than ASC_PRTLINE_SIZE,
+ * cf. asc_prt_line().
+ *
+ * Return the number of characters copied into 'cp'. No more than
+ * 'cplen' characters will be copied to 'cp'.
+ */
+STATIC int
+asc_prt_asc_board_eeprom(struct Scsi_Host *shp, char *cp, int cplen)
+{
+ asc_board_t *boardp;
+ ASC_DVC_VAR *asc_dvc_varp;
+ int leftlen;
+ int totlen;
+ int len;
+ ASCEEP_CONFIG *ep;
+ int i;
+ int isa_dma_speed[] = { 10, 8, 7, 6, 5, 4, 3, 2 };
+ uchar serialstr[13];
+
+ boardp = ASC_BOARDP(shp);
+ asc_dvc_varp = &boardp->dvc_var.asc_dvc_var;
+ ep = &boardp->eep_config.asc_eep;
+
+ leftlen = cplen;
+ totlen = len = 0;
+
+ len = asc_prt_line(cp, leftlen,
+"\nEEPROM Settings for AdvanSys SCSI Host %d:\n", shp->host_no);
+ ASC_PRT_NEXT();
+
+ if (asc_get_eeprom_string((ushort *) &ep->adapter_info[0], serialstr) ==
+ ASC_TRUE) {
+ len = asc_prt_line(cp, leftlen, " Serial Number: %s\n", serialstr);
+ ASC_PRT_NEXT();
+ } else {
+ if (ep->adapter_info[5] == 0xBB) {
+ len = asc_prt_line(cp, leftlen,
+ " Default Settings Used for EEPROM-less Adapter.\n");
+ ASC_PRT_NEXT();
+ } else {
+ len = asc_prt_line(cp, leftlen,
+ " Serial Number Signature Not Present.\n");
+ ASC_PRT_NEXT();
+ }
+ }
+
+ len = asc_prt_line(cp, leftlen,
+" Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n",
+ ep->chip_scsi_id, ep->max_total_qng, ep->max_tag_qng);
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" cntl %x, no_scam %x\n",
+ ep->cntl, ep->no_scam);
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Target ID: ");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ len = asc_prt_line(cp, leftlen, " %d", i);
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Disconnects: ");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ len = asc_prt_line(cp, leftlen, " %c",
+ (ep->disc_enable & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Command Queuing: ");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ len = asc_prt_line(cp, leftlen, " %c",
+ (ep->use_cmd_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Start Motor: ");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ len = asc_prt_line(cp, leftlen, " %c",
+ (ep->start_motor & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Synchronous Transfer:");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ len = asc_prt_line(cp, leftlen, " %c",
+ (ep->init_sdtr & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ if (asc_dvc_varp->bus_type & ASC_IS_ISA) {
+ len = asc_prt_line(cp, leftlen,
+" Host ISA DMA speed: %d MB/S\n",
+ isa_dma_speed[ep->isa_dma_speed]);
+ ASC_PRT_NEXT();
+ }
+
+ return totlen;
+}
+
+/*
+ * asc_prt_adv_board_eeprom()
+ *
+ * Print board EEPROM configuration.
+ *
+ * Note: no single line should be greater than ASC_PRTLINE_SIZE,
+ * cf. asc_prt_line().
+ *
+ * Return the number of characters copied into 'cp'. No more than
+ * 'cplen' characters will be copied to 'cp'.
+ */
+STATIC int
+asc_prt_adv_board_eeprom(struct Scsi_Host *shp, char *cp, int cplen)
+{
+ asc_board_t *boardp;
+ ADV_DVC_VAR *adv_dvc_varp;
+ int leftlen;
+ int totlen;
+ int len;
+ int i;
+ char *termstr;
+ uchar serialstr[13];
+ ADVEEP_CONFIG *ep;
+
+ boardp = ASC_BOARDP(shp);
+ adv_dvc_varp = &boardp->dvc_var.adv_dvc_var;
+ ep = &boardp->eep_config.adv_eep;
+
+ leftlen = cplen;
+ totlen = len = 0;
+
+ len = asc_prt_line(cp, leftlen,
+"\nEEPROM Settings for AdvanSys SCSI Host %d:\n", shp->host_no);
+ ASC_PRT_NEXT();
+
+ if (asc_get_eeprom_string(&ep->serial_number_word1, serialstr) ==
+ ASC_TRUE) {
+ len = asc_prt_line(cp, leftlen, " Serial Number: %s\n", serialstr);
+ ASC_PRT_NEXT();
+ } else {
+ len = asc_prt_line(cp, leftlen,
+ " Serial Number Signature Not Present.\n");
+ ASC_PRT_NEXT();
+ }
+
+ len = asc_prt_line(cp, leftlen,
+" Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n",
+ ep->adapter_scsi_id, ep->max_host_qng, ep->max_dvc_qng);
+ ASC_PRT_NEXT();
+
+ switch (ep->termination) {
+ case 1:
+ termstr = "Low Off/High Off";
+ break;
+ case 2:
+ termstr = "Low Off/High On";
+ break;
+ case 3:
+ termstr = "Low On/High On";
+ break;
+ default:
+ case 0:
+ termstr = "Automatic";
+ break;
+ }
+
+ len = asc_prt_line(cp, leftlen,
+" termination: %u (%s), bios_ctrl: %x\n",
+ ep->termination, termstr, ep->bios_ctrl);
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Target ID: ");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ len = asc_prt_line(cp, leftlen, " %X", i);
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Disconnects: ");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ len = asc_prt_line(cp, leftlen, " %c",
+ (ep->disc_enable & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Command Queuing: ");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ len = asc_prt_line(cp, leftlen, " %c",
+ (ep->tagqng_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Start Motor: ");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ len = asc_prt_line(cp, leftlen, " %c",
+ (ep->start_motor & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Synchronous Transfer:");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ len = asc_prt_line(cp, leftlen, " %c",
+ (ep->sdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Ultra Transfer: ");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ len = asc_prt_line(cp, leftlen, " %c",
+ (ep->ultra_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Wide Transfer: ");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ len = asc_prt_line(cp, leftlen, " %c",
+ (ep->wdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ return totlen;
+}
+
+/*
+ * asc_prt_driver_conf()
+ *
+ * Note: no single line should be greater than ASC_PRTLINE_SIZE,
+ * cf. asc_prt_line().
+ *
+ * Return the number of characters copied into 'cp'. No more than
+ * 'cplen' characters will be copied to 'cp'.
+ */
+STATIC int
+asc_prt_driver_conf(struct Scsi_Host *shp, char *cp, int cplen)
+{
+ asc_board_t *boardp;
+ int leftlen;
+ int totlen;
+ int len;
+ int chip_scsi_id;
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+ int i;
+#endif /* version >= v1.3.89 */
+
+ boardp = ASC_BOARDP(shp);
+
+ leftlen = cplen;
+ totlen = len = 0;
+
+ len = asc_prt_line(cp, leftlen,
+"\nLinux Driver Configuration and Information for AdvanSys SCSI Host %d:\n",
+ shp->host_no);
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,89)
+" host_busy %u, last_reset %u, max_id %u, max_lun %u\n",
+ shp->host_busy, shp->last_reset, shp->max_id, shp->max_lun);
+#else /* version >= v1.3.89 */
+" host_busy %u, last_reset %u, max_id %u, max_lun %u, max_channel %u\n",
+ shp->host_busy, shp->last_reset, shp->max_id, shp->max_lun,
+ shp->max_channel);
+#endif /* version >= v1.3.89 */
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,57)
+" can_queue %d, this_id %d, sg_tablesize %u, cmd_per_lun %u\n",
+ shp->can_queue, shp->this_id, shp->sg_tablesize, shp->cmd_per_lun);
+#else /* version >= v1.3.57 */
+" unique_id %d, can_queue %d, this_id %d, sg_tablesize %u, cmd_per_lun %u\n",
+ shp->unique_id, shp->can_queue, shp->this_id, shp->sg_tablesize,
+ shp->cmd_per_lun);
+#endif /* version >= v1.3.57 */
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,57)
+" unchecked_isa_dma %d, loaded_as_module %d\n",
+ shp->unchecked_isa_dma, shp->loaded_as_module);
+#else /* version >= v1.3.57 */
+" unchecked_isa_dma %d, use_clustering %d, loaded_as_module %d\n",
+ shp->unchecked_isa_dma, shp->use_clustering, shp->loaded_as_module);
+#endif /* version >= v1.3.57 */
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen, " flags %x, last_reset %x, jiffies %x\n",
+ boardp->flags, boardp->last_reset, jiffies);
+ ASC_PRT_NEXT();
+
+ if (ASC_NARROW_BOARD(boardp)) {
+ chip_scsi_id = boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id;
+ } else {
+ chip_scsi_id = boardp->dvc_var.adv_dvc_var.chip_scsi_id;
+ }
+
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+ if (boardp->flags & ASC_SELECT_QUEUE_DEPTHS) {
+ len = asc_prt_line(cp, leftlen, " queue_depth:");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ if ((chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+ if (boardp->device[i] == NULL) {
+ continue;
+ }
+ len = asc_prt_line(cp, leftlen, " %X:%d",
+ i, boardp->device[i]->queue_depth);
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+ }
+#endif /* version >= v1.3.89 */
+
+#if ASC_QUEUE_FLOW_CONTROL
+ if (ASC_NARROW_BOARD(boardp)) {
+ len = asc_prt_line(cp, leftlen, " queue_curr_depth:");
+ ASC_PRT_NEXT();
+ /* Use ASC_MAX_TID for Narrow Board. */
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ if ((boardp->asc_dvc_cfg.chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+ if (boardp->device[i] == NULL) {
+ continue;
+ }
+ len = asc_prt_line(cp, leftlen, " %d:%d",
+ i, boardp->device[i]->queue_curr_depth);
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen, " queue_count:");
+ ASC_PRT_NEXT();
+ /* Use ASC_MAX_TID for Narrow Board. */
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ if ((boardp->asc_dvc_cfg.chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+ if (boardp->device[i] == NULL) {
+ continue;
+ }
+ len = asc_prt_line(cp, leftlen, " %d:%d",
+ i, boardp->device[i]->queue_count);
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+ }
+#endif /* ASC_QUEUE_FLOW_CONTROL */
+
+ return totlen;
+}
+
+/*
+ * asc_prt_asc_board_info()
+ *
+ * Print dynamic board configuration information.
+ *
+ * Note: no single line should be greater than ASC_PRTLINE_SIZE,
+ * cf. asc_prt_line().
+ *
+ * Return the number of characters copied into 'cp'. No more than
+ * 'cplen' characters will be copied to 'cp'.
+ */
+STATIC int
+asc_prt_asc_board_info(struct Scsi_Host *shp, char *cp, int cplen)
+{
+ asc_board_t *boardp;
+ int leftlen;
+ int totlen;
+ int len;
+ ASC_DVC_VAR *v;
+ ASC_DVC_CFG *c;
+ int i;
+
+ boardp = ASC_BOARDP(shp);
+ v = &boardp->dvc_var.asc_dvc_var;
+ c = &boardp->dvc_cfg.asc_dvc_cfg;
+
+ leftlen = cplen;
+ totlen = len = 0;
+
+ len = asc_prt_line(cp, leftlen,
+"\nAsc Library Configuration and Statistics for AdvanSys SCSI Host %d:\n",
+ shp->host_no);
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" chip_version %u, lib_version %x, lib_serial_no %u, mcode_date %x\n",
+ c->chip_version, c->lib_version, c->lib_serial_no, c->mcode_date);
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" mcode_version %x, err_code %u\n",
+ c->mcode_version, v->err_code);
+ ASC_PRT_NEXT();
+
+ /* Current number of commands waiting for the host. */
+ len = asc_prt_line(cp, leftlen,
+" Total Command Pending: %d\n", v->cur_total_qng);
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Command Queuing:");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ if ((boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+ len = asc_prt_line(cp, leftlen, " %d:%c",
+ i, (v->use_tagged_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ /* Current number of commands waiting for a device. */
+ len = asc_prt_line(cp, leftlen,
+" Command Queue Pending:");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ if ((boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+ len = asc_prt_line(cp, leftlen, " %d:%u", i, v->cur_dvc_qng[i]);
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ /* Current limit on number of commands that can be sent to a device. */
+ len = asc_prt_line(cp, leftlen,
+" Command Queue Limit:");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ if ((boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+ len = asc_prt_line(cp, leftlen, " %d:%u", i, v->max_dvc_qng[i]);
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ /* Indicate whether the device has returned queue full status. */
+ len = asc_prt_line(cp, leftlen,
+" Command Queue Full:");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ if ((boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+ if (boardp->queue_full & ADV_TID_TO_TIDMASK(i)) {
+ len = asc_prt_line(cp, leftlen, " %d:Y-%d",
+ i, boardp->queue_full_cnt[i]);
+ } else {
+ len = asc_prt_line(cp, leftlen, " %d:N", i);
+ }
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Synchronous Transfer:");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ if ((boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+ len = asc_prt_line(cp, leftlen, " %d:%c",
+ i, (v->sdtr_done & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ uchar syn_period_ix;
+
+ if ((boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+ if ((v->sdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) {
+ continue;
+ }
+ syn_period_ix = (boardp->sdtr_data[i] >> 4) & (v->max_sdtr_index - 1);
+ len = asc_prt_line(cp, leftlen, " %d:", i);
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+ " Transfer Period Factor: %d (%d.%d Mhz),",
+ v->sdtr_period_tbl[syn_period_ix],
+ 250 / v->sdtr_period_tbl[syn_period_ix],
+ ASC_TENTHS(250, v->sdtr_period_tbl[syn_period_ix]));
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen, " REQ/ACK Offset: %d\n",
+ boardp->sdtr_data[i] & ASC_SYN_MAX_OFFSET);
+ ASC_PRT_NEXT();
+ }
+
+ return totlen;
+}
+
+/*
+ * asc_prt_adv_board_info()
+ *
+ * Print dynamic board configuration information.
+ *
+ * Note: no single line should be greater than ASC_PRTLINE_SIZE,
+ * cf. asc_prt_line().
+ *
+ * Return the number of characters copied into 'cp'. No more than
+ * 'cplen' characters will be copied to 'cp'.
+ */
+STATIC int
+asc_prt_adv_board_info(struct Scsi_Host *shp, char *cp, int cplen)
+{
+ asc_board_t *boardp;
+ int leftlen;
+ int totlen;
+ int len;
+ int i;
+ ADV_DVC_VAR *v;
+ ADV_DVC_CFG *c;
+ AdvPortAddr iop_base;
+ ushort chip_scsi_id;
+ ushort lramword;
+ uchar lrambyte;
+ ushort sdtr_able;
+ ushort period;
+
+ boardp = ASC_BOARDP(shp);
+ v = &boardp->dvc_var.adv_dvc_var;
+ c = &boardp->dvc_cfg.adv_dvc_cfg;
+ iop_base = v->iop_base;
+ chip_scsi_id = v->chip_scsi_id;
+
+ leftlen = cplen;
+ totlen = len = 0;
+
+ len = asc_prt_line(cp, leftlen,
+"\nAdv Library Configuration and Statistics for AdvanSys SCSI Host %d:\n",
+ shp->host_no);
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" iop_base %lx, cable_detect: %X, err_code %u, idle_cmd_done %u\n",
+ v->iop_base,
+ AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1) & CABLE_DETECT,
+ v->err_code, v->idle_cmd_done);
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" chip_version %u, lib_version %x, mcode_date %x, mcode_version %x\n",
+ c->chip_version, c->lib_version, c->mcode_date, c->mcode_version);
+ ASC_PRT_NEXT();
+
+ AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, lramword);
+ len = asc_prt_line(cp, leftlen,
+" Queuing Enabled:");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ if ((chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+
+ len = asc_prt_line(cp, leftlen, " %X:%c",
+ i, (lramword & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Queue Limit:");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ if ((chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+
+ AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + i, lrambyte);
+
+ len = asc_prt_line(cp, leftlen, " %X:%d", i, lrambyte);
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Command Pending:");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ if ((chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+
+ AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_QUEUED_CMD + i, lrambyte);
+
+ len = asc_prt_line(cp, leftlen, " %X:%d", i, lrambyte);
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, lramword);
+ len = asc_prt_line(cp, leftlen,
+" Wide Enabled:");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ if ((chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+
+ len = asc_prt_line(cp, leftlen, " %X:%c",
+ i, (lramword & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" Transfer Bit Width:");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ if ((chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+
+ AdvReadWordLram(iop_base, ASC_MC_DEVICE_HSHK_CFG_TABLE + (2 * i),
+ lramword);
+ len = asc_prt_line(cp, leftlen, " %X:%d",
+ i, (lramword & 0x8000) ? 16 : 8);
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able);
+ len = asc_prt_line(cp, leftlen,
+" Synchronous Enabled:");
+ ASC_PRT_NEXT();
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+ if ((chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+
+ len = asc_prt_line(cp, leftlen, " %X:%c",
+ i, (sdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
+ ASC_PRT_NEXT();
+ }
+ len = asc_prt_line(cp, leftlen, "\n");
+ ASC_PRT_NEXT();
+
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+
+ AdvReadWordLram(iop_base, ASC_MC_DEVICE_HSHK_CFG_TABLE + (2 * i),
+ lramword);
+ lramword &= ~0x8000;
+
+ if ((chip_scsi_id == i) ||
+ ((sdtr_able & ADV_TID_TO_TIDMASK(i)) == 0) ||
+ (lramword == 0)) {
+ continue;
+ }
+
+ len = asc_prt_line(cp, leftlen, " %X:", i);
+ ASC_PRT_NEXT();
+
+ period = (((lramword >> 8) * 25) + 50)/4;
+
+ len = asc_prt_line(cp, leftlen,
+ " Transfer Period Factor: %d (%d.%d Mhz),",
+ period, 250/period, ASC_TENTHS(250, period));
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen, " REQ/ACK Offset: %d\n",
+ lramword & 0x1F);
+ ASC_PRT_NEXT();
+ }
+
+ return totlen;
+}
+
+/*
+ * asc_proc_copy()
+ *
+ * Copy proc information to a read buffer taking into account the current
+ * read offset in the file and the remaining space in the read buffer.
+ */
+STATIC int
+asc_proc_copy(off_t advoffset, off_t offset, char *curbuf, int leftlen,
+ char *cp, int cplen)
+{
+ int cnt = 0;
+
+ ASC_DBG3(2, "asc_proc_copy: offset %d, advoffset %d, cplen %d\n",
+ (unsigned) offset, (unsigned) advoffset, cplen);
+ if (offset <= advoffset) {
+ /* Read offset below current offset, copy everything. */
+ cnt = ASC_MIN(cplen, leftlen);
+ ASC_DBG3(2, "asc_proc_copy: curbuf %x, cp %x, cnt %d\n",
+ (unsigned) curbuf, (unsigned) cp, cnt);
+ memcpy(curbuf, cp, cnt);
+ } else if (offset < advoffset + cplen) {
+ /* Read offset within current range, partial copy. */
+ cnt = (advoffset + cplen) - offset;
+ cp = (cp + cplen) - cnt;
+ cnt = ASC_MIN(cnt, leftlen);
+ ASC_DBG3(2, "asc_proc_copy: curbuf %x, cp %x, cnt %d\n",
+ (unsigned) curbuf, (unsigned) cp, cnt);
+ memcpy(curbuf, cp, cnt);
+ }
+ return cnt;
+}
+
+/*
+ * asc_prt_line()
+ *
+ * If 'cp' is NULL print to the console, otherwise print to a buffer.
+ *
+ * Return 0 if printing to the console, otherwise return the number of
+ * bytes written to the buffer.
+ *
+ * Note: If any single line is greater than ASC_PRTLINE_SIZE bytes the stack
+ * will be corrupted. 's[]' is defined to be ASC_PRTLINE_SIZE bytes.
+ */
+STATIC int
+asc_prt_line(char *buf, int buflen, char *fmt, ...)
+{
+ va_list args;
+ int ret;
+ char s[ASC_PRTLINE_SIZE];
+
+ va_start(args, fmt);
+ ret = vsprintf(s, fmt, args);
+ ASC_ASSERT(ret < ASC_PRTLINE_SIZE);
+ if (buf == NULL) {
+ (void) printk("%s", s);
+ ret = 0;
+ } else {
+ ret = ASC_MIN(buflen, ret);
+ memcpy(buf, s, ret);
+ }
+ va_end(args);
+ return ret;
+}
+#endif /* version >= v1.3.0 */
+
+
+/*
+ * --- Functions Required by the Asc Library
+ */
+
+/*
+ * Delay for 'n' milliseconds. Don't use the 'jiffies'
+ * global variable which is incremented once every 5 ms
+ * from a timer interrupt, because this function may be
+ * called when interrupts are disabled.
+ */
+STATIC void
+DvcSleepMilliSecond(ulong n)
+{
+ ulong i;
+
+ ASC_DBG1(4, "DvcSleepMilliSecond: %lu\n", n);
+ for (i = 0; i < n; i++) {
+ udelay(1000);
+ }
+}
+
+STATIC long
+DvcEnterCritical(void)
+{
+ long flags;
+
+ save_flags(flags);
+ cli();
+ return flags;
+}
+
+STATIC void
+DvcLeaveCritical(long flags)
+{
+ restore_flags(flags);
+}
+
+STATIC ulong
+DvcGetSGList(ASC_DVC_VAR *asc_dvc_sg, uchar *buf_addr, ulong buf_len,
+ ASC_SG_HEAD *asc_sg_head_ptr)
+{
+ ulong buf_size;
+
+ buf_size = buf_len;
+ asc_sg_head_ptr->entry_cnt = 1;
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,0,0)
+ asc_sg_head_ptr->sg_list[0].addr = (ulong) buf_addr;
+#else /* version >= v2.0.0 */
+ asc_sg_head_ptr->sg_list[0].addr = virt_to_bus(buf_addr);
+#endif /* version >= v2.0.0 */
+ asc_sg_head_ptr->sg_list[0].bytes = buf_size;
+ return buf_size;
+}
+
+/*
+ * void
+ * DvcPutScsiQ(PortAddr iop_base, ushort s_addr, ushort *outbuf, int words)
+ *
+ * Calling/Exit State:
+ * none
+ *
+ * Description:
+ * Output an ASC_SCSI_Q structure to the chip
+ */
+STATIC void
+DvcPutScsiQ(PortAddr iop_base, ushort s_addr, ushort *outbuf, int words)
+{
+ int i;
+
+ ASC_DBG_PRT_HEX(2, "DvcPutScsiQ", (uchar *) outbuf, 2 * words);
+ AscSetChipLramAddr(iop_base, s_addr);
+ for (i = 0; i < words; i++, outbuf++) {
+ if (i == 2 || i == 10) {
+ continue;
+ }
+ AscSetChipLramDataNoSwap(iop_base, *outbuf);
+ }
+}
+
+/*
+ * void
+ * DvcGetQinfo(PortAddr iop_base, ushort s_addr, ushort *inbuf, int words)
+ *
+ * Calling/Exit State:
+ * none
+ *
+ * Description:
+ * Input an ASC_QDONE_INFO structure from the chip
+ */
+STATIC void
+DvcGetQinfo(PortAddr iop_base, ushort s_addr, ushort *inbuf, int words)
+{
+ int i;
+
+ AscSetChipLramAddr(iop_base, s_addr);
+ for (i = 0; i < words; i++, inbuf++) {
+ if (i == 5) {
+ continue;
+ }
+ *inbuf = AscGetChipLramDataNoSwap(iop_base);
+ }
+ ASC_DBG_PRT_HEX(2, "DvcGetQinfo", (uchar *) inbuf, 2 * words);
+}
+
+/*
+ * void DvcOutPortWords(ushort iop_base, ushort &outbuf, int words)
+ *
+ * Calling/Exit State:
+ * none
+ *
+ * Description:
+ * output a buffer to an i/o port address
+ */
+STATIC void
+DvcOutPortWords(ushort iop_base, ushort *outbuf, int words)
+{
+ int i;
+
+ for (i = 0; i < words; i++, outbuf++)
+ outpw(iop_base, *outbuf);
+}
+
+/*
+ * void DvcInPortWords(ushort iop_base, ushort &outbuf, int words)
+ *
+ * Calling/Exit State:
+ * none
+ *
+ * Description:
+ * input a buffer from an i/o port address
+ */
+STATIC void
+DvcInPortWords(ushort iop_base, ushort *inbuf, int words)
+{
+ int i;
+
+ for (i = 0; i < words; i++, inbuf++)
+ *inbuf = inpw(iop_base);
+}
+
+/*
+ * void DvcOutPortDWords(PortAddr port, ulong *pdw, int dwords)
+ *
+ * Calling/Exit State:
+ * none
+ *
+ * Description:
+ * output a buffer of 32-bit integers to an i/o port address in
+ * 16 bit integer units
+ */
+STATIC void
+DvcOutPortDWords(PortAddr port, ulong *pdw, int dwords)
+{
+ int i;
+ int words;
+ ushort *pw;
+
+ pw = (ushort *) pdw;
+ words = dwords << 1;
+ for(i = 0; i < words; i++, pw++) {
+ outpw(port, *pw);
+ }
+ return;
+}
+
+/*
+ * Read a PCI configuration byte.
+ */
+ASC_INITFUNC(
+STATIC uchar
+DvcReadPCIConfigByte(
+ ASC_DVC_VAR asc_ptr_type *asc_dvc,
+ ushort offset)
+)
+{
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,93)
+#ifdef ASC_CONFIG_PCI
+ PCI_DATA pciData;
+
+ pciData.bus = ASC_PCI_ID2BUS(asc_dvc->cfg->pci_slot_info);
+ pciData.slot = ASC_PCI_ID2DEV(asc_dvc->cfg->pci_slot_info);
+ pciData.func = ASC_PCI_ID2FUNC(asc_dvc->cfg->pci_slot_info);
+ pciData.offset = offset;
+ pciData.type = pci_scan_method;
+ return asc_get_cfg_byte(&pciData);
+#else /* ASC_CONFIG_PCI */
+ return 0;
+#endif /* ASC_CONFIG_PCI */
+#else /* version >= v2.1.93 */
+#ifdef CONFIG_PCI
+ uchar byte_data;
+ pcibios_read_config_byte(ASC_PCI_ID2BUS(asc_dvc->cfg->pci_slot_info),
+ PCI_DEVFN(ASC_PCI_ID2DEV(asc_dvc->cfg->pci_slot_info),
+ ASC_PCI_ID2FUNC(asc_dvc->cfg->pci_slot_info)),
+ offset, &byte_data);
+ return byte_data;
+#else /* CONFIG_PCI */
+ return 0;
+#endif /* CONFIG_PCI */
+#endif /* version >= v2.1.93 */
+}
+
+/*
+ * Write a PCI configuration byte.
+ */
+ASC_INITFUNC(
+STATIC void
+DvcWritePCIConfigByte(
+ ASC_DVC_VAR asc_ptr_type *asc_dvc,
+ ushort offset,
+ uchar byte_data)
+)
+{
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,93)
+#ifdef ASC_CONFIG_PCI
+ PCI_DATA pciData;
+
+ pciData.bus = ASC_PCI_ID2BUS(asc_dvc->cfg->pci_slot_info);
+ pciData.slot = ASC_PCI_ID2DEV(asc_dvc->cfg->pci_slot_info);
+ pciData.func = ASC_PCI_ID2FUNC(asc_dvc->cfg->pci_slot_info);
+ pciData.offset = offset;
+ pciData.type = pci_scan_method;
+ asc_put_cfg_byte(&pciData, byte_data);
+#endif /* ASC_CONFIG_PCI */
+#else /* version >= v2.1.93 */
+#ifdef CONFIG_PCI
+ pcibios_write_config_byte(ASC_PCI_ID2BUS(asc_dvc->cfg->pci_slot_info),
+ PCI_DEVFN(ASC_PCI_ID2DEV(asc_dvc->cfg->pci_slot_info),
+ ASC_PCI_ID2FUNC(asc_dvc->cfg->pci_slot_info)),
+ offset, byte_data);
+#endif /* CONFIG_PCI */
+#endif /* version >= v2.1.93 */
+}
+
+/*
+ * Return the BIOS address of the adapter at the specified
+ * I/O port and with the specified bus type.
+ */
+ASC_INITFUNC(
+STATIC ushort
+AscGetChipBiosAddress(
+ PortAddr iop_base,
+ ushort bus_type
+)
+)
+{
+ ushort cfg_lsw ;
+ ushort bios_addr ;
+
+ /*
+ * The PCI BIOS is re-located by the motherboard BIOS. Because
+ * of this the driver can not determine where a PCI BIOS is
+ * loaded and executes.
+ */
+ if (bus_type & ASC_IS_PCI)
+ {
+ return(0);
+ }
+
+ if((bus_type & ASC_IS_EISA) != 0)
+ {
+ cfg_lsw = AscGetEisaChipCfg(iop_base) ;
+ cfg_lsw &= 0x000F ;
+ bios_addr = (ushort)(ASC_BIOS_MIN_ADDR +
+ (cfg_lsw * ASC_BIOS_BANK_SIZE)) ;
+ return(bios_addr) ;
+ }/* if */
+
+ cfg_lsw = AscGetChipCfgLsw(iop_base) ;
+
+ /*
+ * ISA PnP uses the top bit as the 32K BIOS flag
+ */
+ if (bus_type == ASC_IS_ISAPNP)
+ {
+ cfg_lsw &= 0x7FFF;
+ }/* if */
+
+ bios_addr = (ushort)(((cfg_lsw >> 12) * ASC_BIOS_BANK_SIZE) +
+ ASC_BIOS_MIN_ADDR) ;
+ return(bios_addr) ;
+}
+
+
+/*
+ * --- Functions Required by the Adv Library
+ */
+
+/*
+ * DvcGetPhyAddr()
+ *
+ * Return the physical address of 'vaddr' and set '*lenp' to the
+ * number of physically contiguous bytes that follow 'vaddr'.
+ * 'flag' indicates the type of structure whose physical address
+ * is being translated.
+ *
+ * Note: Because Linux currently doesn't page the kernel and all
+ * kernel buffers are physically contiguous, leave '*lenp' unchanged.
+ */
+ulong
+DvcGetPhyAddr(ADV_DVC_VAR *asc_dvc, ADV_SCSI_REQ_Q *scsiq,
+ uchar *vaddr, long *lenp, int flag)
+{
+ ulong paddr;
+
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,0,0)
+ paddr = (ulong) vaddr;
+#else /* version >= v2.0.0 */
+ paddr = virt_to_bus(vaddr);
+#endif /* version >= v2.0.0 */
+
+ ASC_DBG4(4,
+ "DvcGetPhyAddr: vaddr 0x%lx, lenp 0x%lx *lenp %lu, paddr 0x%lx\n",
+ (ulong) vaddr, (ulong) lenp, (ulong) *((ulong *) lenp), paddr);
+
+ return paddr;
+}
+
+/*
+ * Read a PCI configuration byte.
+ */
+ASC_INITFUNC(
+STATIC uchar
+DvcAdvReadPCIConfigByte(
+ ADV_DVC_VAR *asc_dvc,
+ ushort offset)
+)
+{
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,93)
+#ifdef ASC_CONFIG_PCI
+ PCI_DATA pciData;
+
+ pciData.bus = ASC_PCI_ID2BUS(asc_dvc->cfg->pci_slot_info);
+ pciData.slot = ASC_PCI_ID2DEV(asc_dvc->cfg->pci_slot_info);
+ pciData.func = ASC_PCI_ID2FUNC(asc_dvc->cfg->pci_slot_info);
+ pciData.offset = offset;
+ pciData.type = pci_scan_method;
+ return asc_get_cfg_byte(&pciData);
+#else /* ASC_CONFIG_PCI */
+ return 0;
+#endif /* ASC_CONFIG_PCI */
+#else /* version >= v2.1.93 */
+#ifdef CONFIG_PCI
+ uchar byte_data;
+ pcibios_read_config_byte(ASC_PCI_ID2BUS(asc_dvc->cfg->pci_slot_info),
+ PCI_DEVFN(ASC_PCI_ID2DEV(asc_dvc->cfg->pci_slot_info),
+ ASC_PCI_ID2FUNC(asc_dvc->cfg->pci_slot_info)),
+ offset, &byte_data);
+ return byte_data;
+#else /* CONFIG_PCI */
+ return 0;
+#endif /* CONFIG_PCI */
+#endif /* version >= v2.1.93 */
+}
+
+/*
+ * Write a PCI configuration byte.
+ */
+ASC_INITFUNC(
+STATIC void
+DvcAdvWritePCIConfigByte(
+ ADV_DVC_VAR *asc_dvc,
+ ushort offset,
+ uchar byte_data)
+)
+{
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,93)
+#ifdef ASC_CONFIG_PCI
+ PCI_DATA pciData;
+
+ pciData.bus = ASC_PCI_ID2BUS(asc_dvc->cfg->pci_slot_info);
+ pciData.slot = ASC_PCI_ID2DEV(asc_dvc->cfg->pci_slot_info);
+ pciData.func = ASC_PCI_ID2FUNC(asc_dvc->cfg->pci_slot_info);
+ pciData.offset = offset;
+ pciData.type = pci_scan_method;
+ asc_put_cfg_byte(&pciData, byte_data);
+#endif /* ASC_CONFIG_PCI */
+#else /* version >= v2.1.93 */
+#ifdef CONFIG_PCI
+ pcibios_write_config_byte(ASC_PCI_ID2BUS(asc_dvc->cfg->pci_slot_info),
+ PCI_DEVFN(ASC_PCI_ID2DEV(asc_dvc->cfg->pci_slot_info),
+ ASC_PCI_ID2FUNC(asc_dvc->cfg->pci_slot_info)),
+ offset, byte_data);
+#endif /* CONFIG_PCI */
+#endif /* version >= v2.1.93 */
+}
+
+/*
+ * --- Tracing and Debugging Functions
+ */
+
+#ifdef ADVANSYS_STATS
+/*
+ * asc_prt_board_stats()
+ *
+ * Note: no single line should be greater than ASC_PRTLINE_SIZE,
+ * cf. asc_prt_line().
+ *
+ * Return the number of characters copied into 'cp'. No more than
+ * 'cplen' characters will be copied to 'cp'.
+ */
+STATIC int
+asc_prt_board_stats(struct Scsi_Host *shp, char *cp, int cplen)
+{
+ int leftlen;
+ int totlen;
+ int len;
+ struct asc_stats *s;
+ int i;
+ ushort chip_scsi_id;
+ asc_board_t *boardp;
+ asc_queue_t *active;
+ asc_queue_t *waiting;
+
+ leftlen = cplen;
+ totlen = len = 0;
+
+ boardp = ASC_BOARDP(shp);
+ s = &boardp->asc_stats;
+
+ len = asc_prt_line(cp, leftlen,
+"\nLinux Driver Statistics for AdvanSys SCSI Host %d:\n", shp->host_no);
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" command %lu, queuecommand %lu, abort %lu, reset %lu, biosparam %lu\n",
+ s->command, s->queuecommand, s->abort, s->reset, s->biosparam);
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" interrupt %lu, callback %lu, done %lu\n",
+ s->interrupt, s->callback, s->done);
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" exe_noerror %lu, exe_busy %lu, exe_error %lu, exe_unknown %lu\n",
+ s->exe_noerror, s->exe_busy, s->exe_error, s->exe_unknown);
+ ASC_PRT_NEXT();
+
+ if (ASC_NARROW_BOARD(boardp)) {
+ len = asc_prt_line(cp, leftlen,
+" build_error %lu\n",
+ s->build_error);
+ } else {
+ len = asc_prt_line(cp, leftlen,
+" build_error %lu, build_noreq %lu, build_nosg %lu\n",
+ s->build_error, s->adv_build_noreq, s->adv_build_nosg);
+ }
+ ASC_PRT_NEXT();
+
+ /*
+ * Display data transfer statistics.
+ */
+ if (s->cont_cnt > 0) {
+ len = asc_prt_line(cp, leftlen, " cont_cnt %lu, ", s->cont_cnt);
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen, "cont_xfer %lu.%01lu kb ",
+ s->cont_xfer/2,
+ ASC_TENTHS(s->cont_xfer, 2));
+ ASC_PRT_NEXT();
+
+ /* Contiguous transfer average size */
+ len = asc_prt_line(cp, leftlen, "avg_xfer %lu.%01lu kb\n",
+ (s->cont_xfer/2)/s->cont_cnt,
+ ASC_TENTHS((s->cont_xfer/2), s->cont_cnt));
+ ASC_PRT_NEXT();
+ }
+
+ if (s->sg_cnt > 0) {
+
+ len = asc_prt_line(cp, leftlen, " sg_cnt %lu, sg_elem %lu, ",
+ s->sg_cnt, s->sg_elem);
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen, "sg_xfer %lu.%01lu kb\n",
+ s->sg_xfer/2,
+ ASC_TENTHS(s->sg_xfer, 2));
+ ASC_PRT_NEXT();
+
+ /* Scatter gather transfer statistics */
+ len = asc_prt_line(cp, leftlen, " avg_num_elem %lu.%01lu, ",
+ s->sg_elem/s->sg_cnt,
+ ASC_TENTHS(s->sg_elem, s->sg_cnt));
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen, "avg_elem_size %lu.%01lu kb, ",
+ (s->sg_xfer/2)/s->sg_elem,
+ ASC_TENTHS((s->sg_xfer/2), s->sg_elem));
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen, "avg_xfer_size %lu.%01lu kb\n",
+ (s->sg_xfer/2)/s->sg_cnt,
+ ASC_TENTHS((s->sg_xfer/2), s->sg_cnt));
+ ASC_PRT_NEXT();
+ }
+
+ /*
+ * Display request queuing statistics.
+ */
+ len = asc_prt_line(cp, leftlen,
+" Active and Waiting Request Queues (Time Unit: %d HZ):\n", HZ);
+ ASC_PRT_NEXT();
+
+ active = &ASC_BOARDP(shp)->active;
+ waiting = &ASC_BOARDP(shp)->waiting;
+
+ if (ASC_NARROW_BOARD(boardp)) {
+ chip_scsi_id = boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id;
+ } else {
+ chip_scsi_id = boardp->dvc_var.adv_dvc_var.chip_scsi_id;
+ }
+
+ for (i = 0; i <= ADV_MAX_TID; i++) {
+
+ if ((chip_scsi_id == i) ||
+ ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) {
+ continue;
+ }
+
+ if (active->q_tot_cnt[i] > 0 || waiting->q_tot_cnt[i] > 0) {
+ len = asc_prt_line(cp, leftlen, " target %d\n", i);
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" active: cnt [cur %d, max %d, tot %u], time [min %d, max %d, avg %lu.%01lu]\n",
+ active->q_cur_cnt[i], active->q_max_cnt[i],
+ active->q_tot_cnt[i],
+ active->q_min_tim[i], active->q_max_tim[i],
+ (active->q_tot_cnt[i] == 0) ? 0 :
+ (active->q_tot_tim[i]/active->q_tot_cnt[i]),
+ (active->q_tot_cnt[i] == 0) ? 0 :
+ ASC_TENTHS(active->q_tot_tim[i], active->q_tot_cnt[i]));
+ ASC_PRT_NEXT();
+
+ len = asc_prt_line(cp, leftlen,
+" waiting: cnt [cur %d, max %d, tot %u], time [min %u, max %u, avg %lu.%01lu]\n",
+ waiting->q_cur_cnt[i], waiting->q_max_cnt[i],
+ waiting->q_tot_cnt[i],
+ waiting->q_min_tim[i], waiting->q_max_tim[i],
+ (waiting->q_tot_cnt[i] == 0) ? 0 :
+ (waiting->q_tot_tim[i]/waiting->q_tot_cnt[i]),
+ (waiting->q_tot_cnt[i] == 0) ? 0 :
+ ASC_TENTHS(waiting->q_tot_tim[i], waiting->q_tot_cnt[i]));
+ ASC_PRT_NEXT();
+ }
+ }
+
+ return totlen;
+}
+#endif /* ADVANSYS_STATS */
+
+#ifdef ADVANSYS_DEBUG
+/*
+ * asc_prt_scsi_host()
+ */
+STATIC void
+asc_prt_scsi_host(struct Scsi_Host *s)
+{
+ asc_board_t *boardp;
+
+ boardp = ASC_BOARDP(s);
+
+ printk("Scsi_Host at addr %x\n", (unsigned) s);
+ printk(
+" next %x, extra_bytes %u, host_busy %u, host_no %d, last_reset %d,\n",
+ (unsigned) s->next, s->extra_bytes, s->host_busy, s->host_no,
+ (unsigned) s->last_reset);
+
+ printk(
+" host_wait %x, host_queue %x, hostt %x, block %x,\n",
+ (unsigned) s->host_wait, (unsigned) s->host_queue,
+ (unsigned) s->hostt, (unsigned) s->block);
+
+ printk(
+" wish_block %d, base %x, io_port %d, n_io_port %d, irq %d, dma_channel %d,\n",
+ s->wish_block, (unsigned) s->base, s->io_port, s->n_io_port,
+ s->irq, s->dma_channel);
+
+ printk(
+" this_id %d, can_queue %d,\n", s->this_id, s->can_queue);
+
+ printk(
+" cmd_per_lun %d, sg_tablesize %d, unchecked_isa_dma %d, loaded_as_module %d\n",
+ s->cmd_per_lun, s->sg_tablesize, s->unchecked_isa_dma,
+ s->loaded_as_module);
+
+ if (ASC_NARROW_BOARD(boardp)) {
+ asc_prt_asc_dvc_var(&ASC_BOARDP(s)->dvc_var.asc_dvc_var);
+ asc_prt_asc_dvc_cfg(&ASC_BOARDP(s)->dvc_cfg.asc_dvc_cfg);
+ } else {
+ asc_prt_adv_dvc_var(&ASC_BOARDP(s)->dvc_var.adv_dvc_var);
+ asc_prt_adv_dvc_cfg(&ASC_BOARDP(s)->dvc_cfg.adv_dvc_cfg);
+ }
+}
+
+/*
+ * asc_prt_scsi_cmnd()
+ */
+STATIC void
+asc_prt_scsi_cmnd(Scsi_Cmnd *s)
+{
+ printk("Scsi_Cmnd at addr %x\n", (unsigned) s);
+
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,0)
+ printk(
+" host %x, device %x, target %u, lun %u\n",
+ (unsigned) s->host, (unsigned) s->device, s->target, s->lun);
+#else /* version >= v1.3.0 */
+ printk(
+" host %x, device %x, target %u, lun %u, channel %u,\n",
+ (unsigned) s->host, (unsigned) s->device, s->target, s->lun,
+ s->channel);
+#endif /* version >= v1.3.0 */
+
+ asc_prt_hex(" CDB", s->cmnd, s->cmd_len);
+
+ printk(
+" use_sg %u, sglist_len %u, abort_reason %x\n",
+ s->use_sg, s->sglist_len, s->abort_reason);
+
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,89)
+ printk(
+" retries %d, allowed %d\n",
+ s->retries, s->allowed);
+#else /* version >= v1.3.89 */
+ printk(
+" serial_number %x, serial_number_at_timeout %x, retries %d, allowed %d\n",
+ (unsigned) s->serial_number, (unsigned) s->serial_number_at_timeout,
+ s->retries, s->allowed);
+#endif /* version >= v1.3.89 */
+
+ printk(
+" timeout_per_command %d, timeout_total %d, timeout %d\n",
+ s->timeout_per_command, s->timeout_total, s->timeout);
+
+ printk(
+" internal_timeout %u, flags %u, this_count %d\n",
+ s->internal_timeout, s->flags, s->this_count);
+
+ printk(
+" scsi_done %x, done %x, host_scribble %x, result %x\n",
+ (unsigned) s->scsi_done, (unsigned) s->done,
+ (unsigned) s->host_scribble, s->result);
+
+ printk(
+" tag %u, pid %u\n",
+ (unsigned) s->tag, (unsigned) s->pid);
+}
+
+/*
+ * asc_prt_asc_dvc_var()
+ */
+STATIC void
+asc_prt_asc_dvc_var(ASC_DVC_VAR *h)
+{
+ printk("ASC_DVC_VAR at addr %x\n", (unsigned) h);
+
+ printk(
+" iop_base %x, err_code %x, dvc_cntl %x, bug_fix_cntl %d,\n",
+ h->iop_base, h->err_code, h->dvc_cntl, h->bug_fix_cntl);
+
+ printk(
+" bus_type %d, isr_callback %x, exe_callback %x, init_sdtr %x,\n",
+ h->bus_type, (unsigned) h->isr_callback, (unsigned) h->exe_callback,
+ (unsigned) h->init_sdtr);
+
+ printk(
+" sdtr_done %x, use_tagged_qng %x, unit_not_ready %x, chip_no %x,\n",
+ (unsigned) h->sdtr_done, (unsigned) h->use_tagged_qng,
+ (unsigned) h->unit_not_ready, (unsigned) h->chip_no);
+
+ printk(
+" queue_full_or_busy %x, start_motor %x, scsi_reset_wait %x, irq_no %x,\n",
+ (unsigned) h->queue_full_or_busy, (unsigned) h->start_motor,
+ (unsigned) h->scsi_reset_wait, (unsigned) h->irq_no);
+
+ printk(
+" is_in_int %x, max_total_qng %x, cur_total_qng %x, in_critical_cnt %x,\n",
+ (unsigned) h->is_in_int, (unsigned) h->max_total_qng,
+ (unsigned) h->cur_total_qng, (unsigned) h->in_critical_cnt);
+
+ printk(
+" last_q_shortage %x, init_state %x, no_scam %x, pci_fix_asyn_xfer %x,\n",
+ (unsigned) h->last_q_shortage, (unsigned) h->init_state,
+ (unsigned) h->no_scam, (unsigned) h->pci_fix_asyn_xfer);
+
+ printk(
+" cfg %x, saved_ptr2func %x\n",
+ (unsigned) h->cfg, (unsigned) h->saved_ptr2func);
+}
+
+/*
+ * asc_prt_asc_dvc_cfg()
+ */
+STATIC void
+asc_prt_asc_dvc_cfg(ASC_DVC_CFG *h)
+{
+ printk("ASC_DVC_CFG at addr %x\n", (unsigned) h);
+
+ printk(
+" can_tagged_qng %x, cmd_qng_enabled %x, disc_enable %x, sdtr_enable %x,\n",
+ h->can_tagged_qng, h->cmd_qng_enabled, h->disc_enable,
+ h->sdtr_enable);
+
+ printk(
+" chip_scsi_id %d, isa_dma_speed %d, isa_dma_channel %d, chip_version %d,\n",
+ h->chip_scsi_id, h->isa_dma_speed, h->isa_dma_channel,
+ h->chip_version);
+
+ printk(
+" pci_device_id %d, lib_serial_no %x, lib_version %x, mcode_date %x,\n",
+ h->pci_device_id, h->lib_serial_no, h->lib_version, h->mcode_date);
+
+ printk(
+" mcode_version %d, overrun_buf %x\n",
+ h->mcode_version, (unsigned) h->overrun_buf);
+}
+
+/*
+ * asc_prt_asc_scsi_q()
+ */
+STATIC void
+asc_prt_asc_scsi_q(ASC_SCSI_Q *q)
+{
+ ASC_SG_HEAD *sgp;
+ int i;
+
+ printk("ASC_SCSI_Q at addr %x\n", (unsigned) q);
+
+ printk(
+" target_ix %u, target_lun %u, srb_ptr %x, tag_code %u,\n",
+ q->q2.target_ix, q->q1.target_lun,
+ (unsigned) q->q2.srb_ptr, q->q2.tag_code);
+
+ printk(
+" data_addr %x, data_cnt %lu, sense_addr %x, sense_len %u,\n",
+ (unsigned) q->q1.data_addr, q->q1.data_cnt,
+ (unsigned) q->q1.sense_addr, q->q1.sense_len);
+
+ printk(
+" cdbptr %x, cdb_len %u, sg_head %x, sg_queue_cnt %u\n",
+ (unsigned) q->cdbptr, q->q2.cdb_len,
+ (unsigned) q->sg_head, q->q1.sg_queue_cnt);
+
+ if (q->sg_head) {
+ sgp = q->sg_head;
+ printk("ASC_SG_HEAD at addr %x\n", (unsigned) sgp);
+ printk(" entry_cnt %u, queue_cnt %u\n", sgp->entry_cnt, sgp->queue_cnt);
+ for (i = 0; i < sgp->entry_cnt; i++) {
+ printk(" [%u]: addr %x, bytes %lu\n",
+ i, (unsigned) sgp->sg_list[i].addr, sgp->sg_list[i].bytes);
+ }
+
+ }
+}
+
+/*
+ * asc_prt_asc_qdone_info()
+ */
+STATIC void
+asc_prt_asc_qdone_info(ASC_QDONE_INFO *q)
+{
+ printk("ASC_QDONE_INFO at addr %x\n", (unsigned) q);
+ printk(
+" srb_ptr %x, target_ix %u, cdb_len %u, tag_code %u, done_stat %x\n",
+ (unsigned) q->d2.srb_ptr, q->d2.target_ix, q->d2.cdb_len,
+ q->d2.tag_code, q->d3.done_stat);
+ printk(
+" host_stat %x, scsi_stat %x, scsi_msg %x\n",
+ q->d3.host_stat, q->d3.scsi_stat, q->d3.scsi_msg);
+}
+
+/*
+ * asc_prt_adv_dvc_var()
+ *
+ * Display an ADV_DVC_VAR structure.
+ */
+STATIC void
+asc_prt_adv_dvc_var(ADV_DVC_VAR *h)
+{
+ printk(" ADV_DVC_VAR at addr 0x%lx\n", (ulong) h);
+
+ printk(
+" iop_base 0x%lx, err_code 0x%x, ultra_able 0x%x\n",
+ (ulong) h->iop_base, h->err_code, (unsigned) h->ultra_able);
+
+ printk(
+" isr_callback 0x%x, sdtr_able 0x%x, wdtr_able 0x%x\n",
+ (unsigned) h->isr_callback, (unsigned) h->wdtr_able,
+ (unsigned) h->sdtr_able);
+
+ printk(
+" start_motor 0x%x, scsi_reset_wait 0x%x, irq_no 0x%x,\n",
+ (unsigned) h->start_motor,
+ (unsigned) h->scsi_reset_wait, (unsigned) h->irq_no);
+
+ printk(
+" max_host_qng 0x%x, cur_host_qng 0x%x, max_dvc_qng 0x%x\n",
+ (unsigned) h->max_host_qng, (unsigned) h->cur_host_qng,
+ (unsigned) h->max_dvc_qng);
+
+ printk(
+" no_scam 0x%x, tagqng_able 0x%x, chip_scsi_id 0x%x, cfg 0x%lx\n",
+ (unsigned) h->no_scam, (unsigned) h->tagqng_able,
+ (unsigned) h->chip_scsi_id, (ulong) h->cfg);
+
+}
+
+/*
+ * asc_prt_adv_dvc_cfg()
+ *
+ * Display an ADV_DVC_CFG structure.
+ */
+STATIC void
+asc_prt_adv_dvc_cfg(ADV_DVC_CFG *h)
+{
+ printk(" ADV_DVC_CFG at addr 0x%lx\n", (ulong) h);
+
+ printk(
+" disc_enable 0x%x, termination 0x%x\n",
+ h->disc_enable, h->termination);
+
+ printk(
+" chip_version 0x%x, mcode_date 0x%x\n",
+ h->chip_version, h->mcode_date);
+
+ printk(
+" mcode_version 0x%x, pci_device_id 0x%x, lib_version 0x%x\n",
+ h->mcode_version, h->pci_device_id, h->lib_version);
+
+ printk(
+" control_flag 0x%x, pci_slot_info 0x%x\n",
+ h->control_flag, h->pci_slot_info);
+}
+
+/*
+ * asc_prt_adv_scsi_req_q()
+ *
+ * Display an ADV_SCSI_REQ_Q structure.
+ */
+STATIC void
+asc_prt_adv_scsi_req_q(ADV_SCSI_REQ_Q *q)
+{
+ int i;
+ struct asc_sg_block *sg_ptr;
+
+ printk("ADV_SCSI_REQ_Q at addr %x\n", (unsigned) q);
+
+ printk(
+" target_id %u, target_lun %u, srb_ptr 0x%lx, a_flag 0x%x\n",
+ q->target_id, q->target_lun, q->srb_ptr, q->a_flag);
+
+ printk(" cntl 0x%x, data_addr 0x%lx, vdata_addr 0x%lx\n",
+ q->cntl, q->data_addr, q->vdata_addr);
+
+ printk(
+" data_cnt %lu, sense_addr 0x%lx, sense_len %u,\n",
+ q->data_cnt, q->sense_addr, q->sense_len);
+
+ printk(
+" cdb_len %u, done_status 0x%x, host_status 0x%x, scsi_status 0x%x\n",
+ q->cdb_len, q->done_status, q->host_status, q->scsi_status);
+
+ printk(
+" vsense_addr 0x%lx, scsiq_ptr 0x%lx, ux_wk_data_cnt %lu\n",
+ (ulong) q->vsense_addr, (ulong) q->scsiq_ptr,
+ (ulong) q->ux_wk_data_cnt);
+
+ printk(
+" sg_list_ptr 0x%lx, sg_real_addr 0x%lx, sg_entry_cnt %u\n",
+ (ulong) q->sg_list_ptr, (ulong) q->sg_real_addr, q->sg_entry_cnt);
+
+ printk(
+" ux_sg_ix %u, orig_sense_len %u\n",
+ q->ux_sg_ix, q->orig_sense_len);
+
+ /* Display the request's ADV_SG_BLOCK structures. */
+ for (sg_ptr = q->sg_list_ptr, i = 0; sg_ptr != NULL;
+ sg_ptr = sg_ptr->sg_ptr, i++) {
+ /*
+ * 'sg_ptr' is a physical address. Convert it to a virtual
+ * address by indexing 'i' into the virtual address array
+ * 'sg_list_ptr'.
+ *
+ * At the end of the each iteration of the loop 'sg_ptr' is
+ * converted back into a physical address by setting 'sg_ptr'
+ * to the next pointer 'sg_ptr->sg_ptr'.
+ */
+ sg_ptr = &(((ADV_SG_BLOCK *) (q->sg_list_ptr))[i]);
+ asc_prt_adv_sgblock(i, sg_ptr);
+ }
+}
+
+/*
+ * asc_prt_adv_sgblock()
+ *
+ * Display an ADV_SG_BLOCK structure.
+ */
+STATIC void
+asc_prt_adv_sgblock(int sgblockno, ADV_SG_BLOCK *b)
+{
+ int i, s;
+
+ /* Calculate starting entry number for the current block. */
+ s = sgblockno * NO_OF_SG_PER_BLOCK;
+
+ printk(" ADV_SG_BLOCK at addr 0x%lx (sgblockno %lu)\n",
+ (ulong) b, (ulong) sgblockno);
+ printk(
+" first_entry_no %lu, last_entry_no %lu, sg_ptr 0x%lx\n",
+ (ulong) b->first_entry_no, (ulong) b->last_entry_no, (ulong) b->sg_ptr);
+ ASC_ASSERT(b->first_entry_no - s >= 0);
+ ASC_ASSERT(b->last_entry_no - s >= 0);
+ ASC_ASSERT(b->last_entry_no - s <= NO_OF_SG_PER_BLOCK);
+ ASC_ASSERT(b->first_entry_no - s <= NO_OF_SG_PER_BLOCK);
+ ASC_ASSERT(b->first_entry_no - s <= NO_OF_SG_PER_BLOCK);
+ ASC_ASSERT(b->first_entry_no - s <= b->last_entry_no - s);
+ for (i = b->first_entry_no - s; i <= b->last_entry_no - s; i++) {
+ printk(" [%lu]: sg_addr 0x%lx, sg_count 0x%lx\n",
+ (ulong) i, (ulong) b->sg_list[i].sg_addr,
+ (ulong) b->sg_list[i].sg_count);
+ }
+}
+
+/*
+ * asc_prt_hex()
+ *
+ * Print hexadecimal output in 4 byte groupings 32 bytes
+ * or 8 double-words per line.
+ */
+STATIC void
+asc_prt_hex(char *f, uchar *s, int l)
+{
+ int i;
+ int j;
+ int k;
+ int m;
+
+ printk("%s: (%d bytes)\n", f, l);
+
+ for (i = 0; i < l; i += 32) {
+
+ /* Display a maximum of 8 double-words per line. */
+ if ((k = (l - i) / 4) >= 8) {
+ k = 8;
+ m = 0;
+ } else {
+ m = (l - i) % 4 ;
+ }
+
+ for (j = 0; j < k; j++) {
+ printk(" %2.2X%2.2X%2.2X%2.2X",
+ (unsigned) s[i+(j*4)], (unsigned) s[i+(j*4)+1],
+ (unsigned) s[i+(j*4)+2], (unsigned) s[i+(j*4)+3]);
+ }
+
+ switch (m) {
+ case 0:
+ default:
+ break;
+ case 1:
+ printk(" %2.2X",
+ (unsigned) s[i+(j*4)]);
+ break;
+ case 2:
+ printk(" %2.2X%2.2X",
+ (unsigned) s[i+(j*4)],
+ (unsigned) s[i+(j*4)+1]);
+ break;
+ case 3:
+ printk(" %2.2X%2.2X%2.2X",
+ (unsigned) s[i+(j*4)+1],
+ (unsigned) s[i+(j*4)+2],
+ (unsigned) s[i+(j*4)+3]);
+ break;
+ }
+
+ printk("\n");
+ }
+}
+#endif /* ADVANSYS_DEBUG */
+
+#ifdef ADVANSYS_ASSERT
+/*
+ * interrupts_enabled()
+ *
+ * Return 1 if interrupts are enabled, otherwise return 0.
+ */
+STATIC int
+interrupts_enabled(void)
+{
+ long flags;
+
+ save_flags(flags);
+ if (flags & 0x0200) {
+ return ASC_TRUE;
+ } else {
+ return ASC_FALSE;
+ }
+}
+#endif /* ADVANSYS_ASSERT */
+
+
+/*
+ * --- Asc Library Functions
+ */
+
+ASC_INITFUNC(
+STATIC ushort
+AscGetEisaChipCfg(
+ PortAddr iop_base
+)
+)
+{
+ PortAddr eisa_cfg_iop;
+
+ eisa_cfg_iop = (PortAddr) ASC_GET_EISA_SLOT(iop_base) |
+ (PortAddr) (ASC_EISA_CFG_IOP_MASK);
+ return (inpw(eisa_cfg_iop));
+}
+
+ASC_INITFUNC(
+STATIC uchar
+AscSetChipScsiID(
+ PortAddr iop_base,
+ uchar new_host_id
+)
+)
+{
+ ushort cfg_lsw;
+
+ if (AscGetChipScsiID(iop_base) == new_host_id) {
+ return (new_host_id);
+ }
+ cfg_lsw = AscGetChipCfgLsw(iop_base);
+ cfg_lsw &= 0xF8FF;
+ cfg_lsw |= (ushort) ((new_host_id & ASC_MAX_TID) << 8);
+ AscSetChipCfgLsw(iop_base, cfg_lsw);
+ return (AscGetChipScsiID(iop_base));
+}
+
+ASC_INITFUNC(
+STATIC uchar
+AscGetChipScsiCtrl(
+ PortAddr iop_base
+)
+)
+{
+ uchar sc;
+
+ AscSetBank(iop_base, 1);
+ sc = inp(iop_base + IOP_REG_SC);
+ AscSetBank(iop_base, 0);
+ return (sc);
+}
+
+ASC_INITFUNC(
+STATIC uchar
+AscGetChipVersion(
+ PortAddr iop_base,
+ ushort bus_type
+)
+)
+{
+ if ((bus_type & ASC_IS_EISA) != 0) {
+ PortAddr eisa_iop;
+ uchar revision;
+ eisa_iop = (PortAddr) ASC_GET_EISA_SLOT(iop_base) |
+ (PortAddr) ASC_EISA_REV_IOP_MASK;
+ revision = inp(eisa_iop);
+ return ((uchar) ((ASC_CHIP_MIN_VER_EISA - 1) + revision));
+ }
+ return (AscGetChipVerNo(iop_base));
+}
+
+ASC_INITFUNC(
+STATIC ushort
+AscGetChipBusType(
+ PortAddr iop_base
+)
+)
+{
+ ushort chip_ver;
+
+ chip_ver = AscGetChipVerNo(iop_base);
+ if (
+ (chip_ver >= ASC_CHIP_MIN_VER_VL)
+ && (chip_ver <= ASC_CHIP_MAX_VER_VL)
+) {
+ if (
+ ((iop_base & 0x0C30) == 0x0C30)
+ || ((iop_base & 0x0C50) == 0x0C50)
+) {
+ return (ASC_IS_EISA);
+ }
+ return (ASC_IS_VL);
+ }
+ if ((chip_ver >= ASC_CHIP_MIN_VER_ISA) &&
+ (chip_ver <= ASC_CHIP_MAX_VER_ISA)) {
+ if (chip_ver >= ASC_CHIP_MIN_VER_ISA_PNP) {
+ return (ASC_IS_ISAPNP);
+ }
+ return (ASC_IS_ISA);
+ } else if ((chip_ver >= ASC_CHIP_MIN_VER_PCI) &&
+ (chip_ver <= ASC_CHIP_MAX_VER_PCI)) {
+ return (ASC_IS_PCI);
+ }
+ return (0);
+}
+
+ASC_INITFUNC(
+STATIC ulong
+AscLoadMicroCode(
+ PortAddr iop_base,
+ ushort s_addr,
+ ushort *mcode_buf,
+ ushort mcode_size
+)
+)
+{
+ ulong chksum;
+ ushort mcode_word_size;
+ ushort mcode_chksum;
+
+ mcode_word_size = (ushort) (mcode_size >> 1);
+ AscMemWordSetLram(iop_base, s_addr, 0, mcode_word_size);
+ AscMemWordCopyToLram(iop_base, s_addr, mcode_buf, mcode_word_size);
+ chksum = AscMemSumLramWord(iop_base, s_addr, mcode_word_size);
+ mcode_chksum = (ushort) AscMemSumLramWord(iop_base,
+ (ushort) ASC_CODE_SEC_BEG,
+ (ushort) ((mcode_size - s_addr - (ushort) ASC_CODE_SEC_BEG) / 2));
+ AscWriteLramWord(iop_base, ASCV_MCODE_CHKSUM_W, mcode_chksum);
+ AscWriteLramWord(iop_base, ASCV_MCODE_SIZE_W, mcode_size);
+ return (chksum);
+}
+
+ASC_INITFUNC(
+STATIC int
+AscFindSignature(
+ PortAddr iop_base
+)
+)
+{
+ ushort sig_word;
+
+ if (AscGetChipSignatureByte(iop_base) == (uchar) ASC_1000_ID1B) {
+ sig_word = AscGetChipSignatureWord(iop_base);
+ if ((sig_word == (ushort) ASC_1000_ID0W) ||
+ (sig_word == (ushort) ASC_1000_ID0W_FIX)) {
+ return (1);
+ }
+ }
+ return (0);
+}
+
+STATIC uchar _isa_pnp_inited ASC_INITDATA = 0;
+STATIC PortAddr _asc_def_iop_base[ASC_IOADR_TABLE_MAX_IX] ASC_INITDATA =
+{
+ 0x100, ASC_IOADR_1, 0x120, ASC_IOADR_2, 0x140, ASC_IOADR_3, ASC_IOADR_4,
+ ASC_IOADR_5, ASC_IOADR_6, ASC_IOADR_7, ASC_IOADR_8
+};
+
+ASC_INITFUNC(
+STATIC PortAddr
+AscSearchIOPortAddr(
+ PortAddr iop_beg,
+ ushort bus_type
+)
+)
+{
+ if (bus_type & ASC_IS_VL) {
+ while ((iop_beg = AscSearchIOPortAddr11(iop_beg)) != 0) {
+ if (AscGetChipVersion(iop_beg, bus_type) <= ASC_CHIP_MAX_VER_VL) {
+ return (iop_beg);
+ }
+ }
+ return (0);
+ }
+ if (bus_type & ASC_IS_ISA) {
+ if (_isa_pnp_inited == 0) {
+ AscSetISAPNPWaitForKey();
+ _isa_pnp_inited++;
+ }
+ while ((iop_beg = AscSearchIOPortAddr11(iop_beg)) != 0) {
+ if ((AscGetChipVersion(iop_beg, bus_type) & ASC_CHIP_VER_ISA_BIT) != 0) {
+ return (iop_beg);
+ }
+ }
+ return (0);
+ }
+ if (bus_type & ASC_IS_EISA) {
+ if ((iop_beg = AscSearchIOPortAddrEISA(iop_beg)) != 0) {
+ return (iop_beg);
+ }
+ return (0);
+ }
+ return (0);
+}
+
+ASC_INITFUNC(
+STATIC PortAddr
+AscSearchIOPortAddr11(
+ PortAddr s_addr
+)
+)
+{
+ int i;
+ PortAddr iop_base;
+
+ for (i = 0; i < ASC_IOADR_TABLE_MAX_IX; i++) {
+ if (_asc_def_iop_base[i] > s_addr) {
+ break;
+ }
+ }
+ for (; i < ASC_IOADR_TABLE_MAX_IX; i++) {
+ iop_base = _asc_def_iop_base[i];
+ if (check_region(iop_base, ASC_IOADR_GAP) != 0) {
+ ASC_DBG1(1,
+ "AscSearchIOPortAddr11: check_region() failed I/O port %x\n",
+ iop_base);
+ continue;
+ }
+ ASC_DBG1(1, "AscSearchIOPortAddr11: probing I/O port %x\n", iop_base);
+ if (AscFindSignature(iop_base)) {
+ return (iop_base);
+ }
+ }
+ return (0);
+}
+
+ASC_INITFUNC(
+STATIC void
+AscToggleIRQAct(
+ PortAddr iop_base
+)
+)
+{
+ AscSetChipStatus(iop_base, CIW_IRQ_ACT);
+ AscSetChipStatus(iop_base, 0);
+ return;
+}
+
+ASC_INITFUNC(
+STATIC void
+AscSetISAPNPWaitForKey(
+ void)
+)
+{
+ outp(ASC_ISA_PNP_PORT_ADDR, 0x02);
+ outp(ASC_ISA_PNP_PORT_WRITE, 0x02);
+ return;
+}
+
+ASC_INITFUNC(
+STATIC uchar
+AscGetChipIRQ(
+ PortAddr iop_base,
+ ushort bus_type
+)
+)
+{
+ ushort cfg_lsw;
+ uchar chip_irq;
+
+ if ((bus_type & ASC_IS_EISA) != 0) {
+ cfg_lsw = AscGetEisaChipCfg(iop_base);
+ chip_irq = (uchar) (((cfg_lsw >> 8) & 0x07) + 10);
+ if ((chip_irq == 13) || (chip_irq > 15)) {
+ return (0);
+ }
+ return (chip_irq);
+ }
+ if ((bus_type & ASC_IS_VL) != 0) {
+ cfg_lsw = AscGetChipCfgLsw(iop_base);
+ chip_irq = (uchar) (((cfg_lsw >> 2) & 0x07));
+ if ((chip_irq == 0) ||
+ (chip_irq == 4) ||
+ (chip_irq == 7)) {
+ return (0);
+ }
+ return ((uchar) (chip_irq + (ASC_MIN_IRQ_NO - 1)));
+ }
+ cfg_lsw = AscGetChipCfgLsw(iop_base);
+ chip_irq = (uchar) (((cfg_lsw >> 2) & 0x03));
+ if (chip_irq == 3)
+ chip_irq += (uchar) 2;
+ return ((uchar) (chip_irq + ASC_MIN_IRQ_NO));
+}
+
+ASC_INITFUNC(
+STATIC uchar
+AscSetChipIRQ(
+ PortAddr iop_base,
+ uchar irq_no,
+ ushort bus_type
+)
+)
+{
+ ushort cfg_lsw;
+
+ if ((bus_type & ASC_IS_VL) != 0) {
+ if (irq_no != 0) {
+ if ((irq_no < ASC_MIN_IRQ_NO) || (irq_no > ASC_MAX_IRQ_NO)) {
+ irq_no = 0;
+ } else {
+ irq_no -= (uchar) ((ASC_MIN_IRQ_NO - 1));
+ }
+ }
+ cfg_lsw = (ushort) (AscGetChipCfgLsw(iop_base) & 0xFFE3);
+ cfg_lsw |= (ushort) 0x0010;
+ AscSetChipCfgLsw(iop_base, cfg_lsw);
+ AscToggleIRQAct(iop_base);
+ cfg_lsw = (ushort) (AscGetChipCfgLsw(iop_base) & 0xFFE0);
+ cfg_lsw |= (ushort) ((irq_no & 0x07) << 2);
+ AscSetChipCfgLsw(iop_base, cfg_lsw);
+ AscToggleIRQAct(iop_base);
+ return (AscGetChipIRQ(iop_base, bus_type));
+ }
+ if ((bus_type & (ASC_IS_ISA)) != 0) {
+ if (irq_no == 15)
+ irq_no -= (uchar) 2;
+ irq_no -= (uchar) ASC_MIN_IRQ_NO;
+ cfg_lsw = (ushort) (AscGetChipCfgLsw(iop_base) & 0xFFF3);
+ cfg_lsw |= (ushort) ((irq_no & 0x03) << 2);
+ AscSetChipCfgLsw(iop_base, cfg_lsw);
+ return (AscGetChipIRQ(iop_base, bus_type));
+ }
+ return (0);
+}
+
+ASC_INITFUNC(
+STATIC void
+AscEnableIsaDma(
+ uchar dma_channel
+)
+)
+{
+ if (dma_channel < 4) {
+ outp(0x000B, (ushort) (0xC0 | dma_channel));
+ outp(0x000A, dma_channel);
+ } else if (dma_channel < 8) {
+ outp(0x00D6, (ushort) (0xC0 | (dma_channel - 4)));
+ outp(0x00D4, (ushort) (dma_channel - 4));
+ }
+ return;
+}
+
+STATIC int
+AscIsrChipHalted(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ EXT_MSG ext_msg;
+ EXT_MSG out_msg;
+ ushort halt_q_addr;
+ int sdtr_accept;
+ ushort int_halt_code;
+ ASC_SCSI_BIT_ID_TYPE scsi_busy;
+ ASC_SCSI_BIT_ID_TYPE target_id;
+ PortAddr iop_base;
+ uchar tag_code;
+ uchar q_status;
+ uchar halt_qp;
+ uchar sdtr_data;
+ uchar target_ix;
+ uchar q_cntl, tid_no;
+ uchar cur_dvc_qng;
+ uchar asyn_sdtr;
+ uchar scsi_status;
+ asc_board_t *boardp;
+
+ ASC_ASSERT(asc_dvc->drv_ptr != 0);
+ boardp = (asc_board_t *) asc_dvc->drv_ptr;
+
+ iop_base = asc_dvc->iop_base;
+ int_halt_code = AscReadLramWord(iop_base, ASCV_HALTCODE_W);
+
+ halt_qp = AscReadLramByte(iop_base, ASCV_CURCDB_B);
+ halt_q_addr = ASC_QNO_TO_QADDR(halt_qp);
+ target_ix = AscReadLramByte(iop_base,
+ (ushort) (halt_q_addr + (ushort) ASC_SCSIQ_B_TARGET_IX));
+ q_cntl = AscReadLramByte(iop_base,
+ (ushort) (halt_q_addr + (ushort) ASC_SCSIQ_B_CNTL));
+ tid_no = ASC_TIX_TO_TID(target_ix);
+ target_id = (uchar) ASC_TID_TO_TARGET_ID(tid_no);
+ if (asc_dvc->pci_fix_asyn_xfer & target_id) {
+
+ asyn_sdtr = ASYN_SDTR_DATA_FIX_PCI_REV_AB;
+ } else {
+ asyn_sdtr = 0;
+ }
+ if (int_halt_code == ASC_HALT_DISABLE_ASYN_USE_SYN_FIX) {
+ if (asc_dvc->pci_fix_asyn_xfer & target_id) {
+ AscSetChipSDTR(iop_base, 0, tid_no);
+ boardp->sdtr_data[tid_no] = 0;
+ }
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ return (0);
+ } else if (int_halt_code == ASC_HALT_ENABLE_ASYN_USE_SYN_FIX) {
+ if (asc_dvc->pci_fix_asyn_xfer & target_id) {
+ AscSetChipSDTR(iop_base, asyn_sdtr, tid_no);
+ boardp->sdtr_data[tid_no] = asyn_sdtr;
+ }
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ return (0);
+ } else if (int_halt_code == ASC_HALT_EXTMSG_IN) {
+
+ AscMemWordCopyFromLram(iop_base,
+ ASCV_MSGIN_BEG,
+ (ushort *) & ext_msg,
+ (ushort) (sizeof (EXT_MSG) >> 1));
+
+ if (ext_msg.msg_type == MS_EXTEND &&
+ ext_msg.msg_req == MS_SDTR_CODE &&
+ ext_msg.msg_len == MS_SDTR_LEN) {
+ sdtr_accept = TRUE;
+ if ((ext_msg.req_ack_offset > ASC_SYN_MAX_OFFSET)) {
+
+ sdtr_accept = FALSE;
+ ext_msg.req_ack_offset = ASC_SYN_MAX_OFFSET;
+ }
+ if ((ext_msg.xfer_period <
+ asc_dvc->sdtr_period_tbl[asc_dvc->host_init_sdtr_index]) ||
+ (ext_msg.xfer_period >
+ asc_dvc->sdtr_period_tbl[asc_dvc->max_sdtr_index])) {
+ sdtr_accept = FALSE;
+ ext_msg.xfer_period =
+ asc_dvc->sdtr_period_tbl[asc_dvc->host_init_sdtr_index];
+ }
+ if (sdtr_accept) {
+ sdtr_data = AscCalSDTRData(asc_dvc, ext_msg.xfer_period,
+ ext_msg.req_ack_offset);
+ if ((sdtr_data == 0xFF)) {
+
+ q_cntl |= QC_MSG_OUT;
+ asc_dvc->init_sdtr &= ~target_id;
+ asc_dvc->sdtr_done &= ~target_id;
+ AscSetChipSDTR(iop_base, asyn_sdtr, tid_no);
+ boardp->sdtr_data[tid_no] = asyn_sdtr;
+ }
+ }
+ if (ext_msg.req_ack_offset == 0) {
+
+ q_cntl &= ~QC_MSG_OUT;
+ asc_dvc->init_sdtr &= ~target_id;
+ asc_dvc->sdtr_done &= ~target_id;
+ AscSetChipSDTR(iop_base, asyn_sdtr, tid_no);
+ } else {
+ if (sdtr_accept && (q_cntl & QC_MSG_OUT)) {
+
+ q_cntl &= ~QC_MSG_OUT;
+ asc_dvc->sdtr_done |= target_id;
+ asc_dvc->init_sdtr |= target_id;
+ asc_dvc->pci_fix_asyn_xfer &= ~target_id;
+ sdtr_data = AscCalSDTRData(asc_dvc, ext_msg.xfer_period,
+ ext_msg.req_ack_offset);
+ AscSetChipSDTR(iop_base, sdtr_data, tid_no);
+ boardp->sdtr_data[tid_no] = sdtr_data;
+ } else {
+
+ q_cntl |= QC_MSG_OUT;
+ AscMsgOutSDTR(asc_dvc,
+ ext_msg.xfer_period,
+ ext_msg.req_ack_offset);
+ asc_dvc->pci_fix_asyn_xfer &= ~target_id;
+ sdtr_data = AscCalSDTRData(asc_dvc, ext_msg.xfer_period,
+ ext_msg.req_ack_offset);
+ AscSetChipSDTR(iop_base, sdtr_data, tid_no);
+ boardp->sdtr_data[tid_no] = sdtr_data;
+ asc_dvc->sdtr_done |= target_id;
+ asc_dvc->init_sdtr |= target_id;
+ }
+ }
+
+ AscWriteLramByte(iop_base,
+ (ushort) (halt_q_addr + (ushort) ASC_SCSIQ_B_CNTL),
+ q_cntl);
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ return (0);
+ } else if (ext_msg.msg_type == MS_EXTEND &&
+ ext_msg.msg_req == MS_WDTR_CODE &&
+ ext_msg.msg_len == MS_WDTR_LEN) {
+
+ ext_msg.wdtr_width = 0;
+ AscMemWordCopyToLram(iop_base,
+ ASCV_MSGOUT_BEG,
+ (ushort *) & ext_msg,
+ (ushort) (sizeof (EXT_MSG) >> 1));
+ q_cntl |= QC_MSG_OUT;
+ AscWriteLramByte(iop_base,
+ (ushort) (halt_q_addr + (ushort) ASC_SCSIQ_B_CNTL),
+ q_cntl);
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ return (0);
+ } else {
+
+ ext_msg.msg_type = M1_MSG_REJECT;
+ AscMemWordCopyToLram(iop_base,
+ ASCV_MSGOUT_BEG,
+ (ushort *) & ext_msg,
+ (ushort) (sizeof (EXT_MSG) >> 1));
+ q_cntl |= QC_MSG_OUT;
+ AscWriteLramByte(iop_base,
+ (ushort) (halt_q_addr + (ushort) ASC_SCSIQ_B_CNTL),
+ q_cntl);
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ return (0);
+ }
+ } else if (int_halt_code == ASC_HALT_CHK_CONDITION) {
+
+ q_cntl |= QC_REQ_SENSE;
+
+ if ((asc_dvc->init_sdtr & target_id) != 0) {
+
+ asc_dvc->sdtr_done &= ~target_id;
+
+ sdtr_data = AscGetMCodeInitSDTRAtID(iop_base, tid_no);
+ q_cntl |= QC_MSG_OUT;
+ AscMsgOutSDTR(asc_dvc,
+ asc_dvc->sdtr_period_tbl[(sdtr_data >> 4) &
+ (uchar) (asc_dvc->max_sdtr_index - 1)],
+ (uchar) (sdtr_data & (uchar) ASC_SYN_MAX_OFFSET));
+ }
+
+ AscWriteLramByte(iop_base,
+ (ushort) (halt_q_addr + (ushort) ASC_SCSIQ_B_CNTL),
+ q_cntl);
+
+ tag_code = AscReadLramByte(iop_base,
+ (ushort) (halt_q_addr + (ushort) ASC_SCSIQ_B_TAG_CODE));
+ tag_code &= 0xDC;
+ if (
+ (asc_dvc->pci_fix_asyn_xfer & target_id)
+ && !(asc_dvc->pci_fix_asyn_xfer_always & target_id)
+) {
+
+ tag_code |= (ASC_TAG_FLAG_DISABLE_DISCONNECT
+ | ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX);
+
+ }
+ AscWriteLramByte(iop_base,
+ (ushort) (halt_q_addr + (ushort) ASC_SCSIQ_B_TAG_CODE),
+ tag_code);
+
+ q_status = AscReadLramByte(iop_base,
+ (ushort) (halt_q_addr + (ushort) ASC_SCSIQ_B_STATUS));
+ q_status |= (QS_READY | QS_BUSY);
+ AscWriteLramByte(iop_base,
+ (ushort) (halt_q_addr + (ushort) ASC_SCSIQ_B_STATUS),
+ q_status);
+
+ scsi_busy = AscReadLramByte(iop_base,
+ (ushort) ASCV_SCSIBUSY_B);
+ scsi_busy &= ~target_id;
+ AscWriteLramByte(iop_base, (ushort) ASCV_SCSIBUSY_B, scsi_busy);
+
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ return (0);
+ } else if (int_halt_code == ASC_HALT_SDTR_REJECTED) {
+
+ AscMemWordCopyFromLram(iop_base,
+ ASCV_MSGOUT_BEG,
+ (ushort *) & out_msg,
+ (ushort) (sizeof (EXT_MSG) >> 1));
+
+ if ((out_msg.msg_type == MS_EXTEND) &&
+ (out_msg.msg_len == MS_SDTR_LEN) &&
+ (out_msg.msg_req == MS_SDTR_CODE)) {
+
+ asc_dvc->init_sdtr &= ~target_id;
+ asc_dvc->sdtr_done &= ~target_id;
+ AscSetChipSDTR(iop_base, asyn_sdtr, tid_no);
+ boardp->sdtr_data[tid_no] = asyn_sdtr;
+ }
+ q_cntl &= ~QC_MSG_OUT;
+ AscWriteLramByte(iop_base,
+ (ushort) (halt_q_addr + (ushort) ASC_SCSIQ_B_CNTL),
+ q_cntl);
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ return (0);
+ } else if (int_halt_code == ASC_HALT_SS_QUEUE_FULL) {
+
+ scsi_status = AscReadLramByte(iop_base,
+ (ushort) ((ushort) halt_q_addr + (ushort) ASC_SCSIQ_SCSI_STATUS));
+ cur_dvc_qng = AscReadLramByte(iop_base,
+ (ushort) ((ushort) ASC_QADR_BEG + (ushort) target_ix));
+ if ((cur_dvc_qng > 0) &&
+ (asc_dvc->cur_dvc_qng[tid_no] > 0)) {
+
+ scsi_busy = AscReadLramByte(iop_base,
+ (ushort) ASCV_SCSIBUSY_B);
+ scsi_busy |= target_id;
+ AscWriteLramByte(iop_base,
+ (ushort) ASCV_SCSIBUSY_B, scsi_busy);
+ asc_dvc->queue_full_or_busy |= target_id;
+
+ if (scsi_status == SS_QUEUE_FULL) {
+ if (cur_dvc_qng > ASC_MIN_TAGGED_CMD) {
+ cur_dvc_qng -= 1;
+ asc_dvc->max_dvc_qng[tid_no] = cur_dvc_qng;
+
+ AscWriteLramByte(iop_base,
+ (ushort) ((ushort) ASCV_MAX_DVC_QNG_BEG +
+ (ushort) tid_no),
+ cur_dvc_qng);
+
+ /*
+ * Set the device queue depth to the number of
+ * active requests when the QUEUE FULL condition
+ * was encountered.
+ */
+ boardp->queue_full |= target_id;
+ boardp->queue_full_cnt[tid_no] = cur_dvc_qng;
+#if ASC_QUEUE_FLOW_CONTROL
+ if (boardp->device[tid_no] != NULL &&
+ boardp->device[tid_no]->queue_curr_depth >
+ cur_dvc_qng) {
+ boardp->device[tid_no]->queue_curr_depth =
+ cur_dvc_qng;
+ }
+#endif /* ASC_QUEUE_FLOW_CONTROL */
+ }
+ }
+ }
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ return (0);
+ }
+ return (0);
+}
+
+STATIC uchar
+_AscCopyLramScsiDoneQ(
+ PortAddr iop_base,
+ ushort q_addr,
+ REG ASC_QDONE_INFO * scsiq,
+ ulong max_dma_count
+)
+{
+ ushort _val;
+ uchar sg_queue_cnt;
+
+ DvcGetQinfo(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_DONE_INFO_BEG),
+ (ushort *) scsiq,
+ (ushort) ((sizeof (ASC_SCSIQ_2) + sizeof (ASC_SCSIQ_3)) / 2));
+ _val = AscReadLramWord(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_B_STATUS));
+ scsiq->q_status = (uchar) _val;
+ scsiq->q_no = (uchar) (_val >> 8);
+ _val = AscReadLramWord(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_B_CNTL));
+ scsiq->cntl = (uchar) _val;
+ sg_queue_cnt = (uchar) (_val >> 8);
+ _val = AscReadLramWord(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_B_SENSE_LEN));
+ scsiq->sense_len = (uchar) _val;
+ scsiq->extra_bytes = (uchar) (_val >> 8);
+ scsiq->remain_bytes = AscReadLramWord(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_DW_REMAIN_XFER_CNT));
+ scsiq->remain_bytes &= max_dma_count;
+ return (sg_queue_cnt);
+}
+
+STATIC int
+AscIsrQDone(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ uchar next_qp;
+ uchar n_q_used;
+ uchar sg_list_qp;
+ uchar sg_queue_cnt;
+ uchar q_cnt;
+ uchar done_q_tail;
+ uchar tid_no;
+ ASC_SCSI_BIT_ID_TYPE scsi_busy;
+ ASC_SCSI_BIT_ID_TYPE target_id;
+ PortAddr iop_base;
+ ushort q_addr;
+ ushort sg_q_addr;
+ uchar cur_target_qng;
+ ASC_QDONE_INFO scsiq_buf;
+ REG ASC_QDONE_INFO *scsiq;
+ int false_overrun;
+ ASC_ISR_CALLBACK asc_isr_callback;
+
+ iop_base = asc_dvc->iop_base;
+ asc_isr_callback = (ASC_ISR_CALLBACK) asc_dvc->isr_callback;
+ n_q_used = 1;
+ scsiq = (ASC_QDONE_INFO *) & scsiq_buf;
+ done_q_tail = (uchar) AscGetVarDoneQTail(iop_base);
+ q_addr = ASC_QNO_TO_QADDR(done_q_tail);
+ next_qp = AscReadLramByte(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_B_FWD));
+ if (next_qp != ASC_QLINK_END) {
+ AscPutVarDoneQTail(iop_base, next_qp);
+ q_addr = ASC_QNO_TO_QADDR(next_qp);
+ sg_queue_cnt = _AscCopyLramScsiDoneQ(iop_base, q_addr, scsiq,
+ asc_dvc->max_dma_count);
+ AscWriteLramByte(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_B_STATUS),
+ (uchar) (scsiq->q_status & (uchar) ~ (QS_READY | QS_ABORTED)));
+ tid_no = ASC_TIX_TO_TID(scsiq->d2.target_ix);
+ target_id = ASC_TIX_TO_TARGET_ID(scsiq->d2.target_ix);
+ if ((scsiq->cntl & QC_SG_HEAD) != 0) {
+ sg_q_addr = q_addr;
+ sg_list_qp = next_qp;
+ for (q_cnt = 0; q_cnt < sg_queue_cnt; q_cnt++) {
+ sg_list_qp = AscReadLramByte(iop_base,
+ (ushort) (sg_q_addr + (ushort) ASC_SCSIQ_B_FWD));
+ sg_q_addr = ASC_QNO_TO_QADDR(sg_list_qp);
+ if (sg_list_qp == ASC_QLINK_END) {
+ AscSetLibErrorCode(asc_dvc, ASCQ_ERR_SG_Q_LINKS);
+ scsiq->d3.done_stat = QD_WITH_ERROR;
+ scsiq->d3.host_stat = QHSTA_D_QDONE_SG_LIST_CORRUPTED;
+ goto FATAL_ERR_QDONE;
+ }
+ AscWriteLramByte(iop_base,
+ (ushort) (sg_q_addr + (ushort) ASC_SCSIQ_B_STATUS),
+ QS_FREE);
+ }
+ n_q_used = sg_queue_cnt + 1;
+ AscPutVarDoneQTail(iop_base, sg_list_qp);
+ }
+ if (asc_dvc->queue_full_or_busy & target_id) {
+ cur_target_qng = AscReadLramByte(iop_base,
+ (ushort) ((ushort) ASC_QADR_BEG + (ushort) scsiq->d2.target_ix));
+ if (cur_target_qng < asc_dvc->max_dvc_qng[tid_no]) {
+ scsi_busy = AscReadLramByte(iop_base,
+ (ushort) ASCV_SCSIBUSY_B);
+ scsi_busy &= ~target_id;
+ AscWriteLramByte(iop_base,
+ (ushort) ASCV_SCSIBUSY_B, scsi_busy);
+ asc_dvc->queue_full_or_busy &= ~target_id;
+ }
+ }
+ if (asc_dvc->cur_total_qng >= n_q_used) {
+ asc_dvc->cur_total_qng -= n_q_used;
+ if (asc_dvc->cur_dvc_qng[tid_no] != 0) {
+ asc_dvc->cur_dvc_qng[tid_no]--;
+ }
+ } else {
+ AscSetLibErrorCode(asc_dvc, ASCQ_ERR_CUR_QNG);
+ scsiq->d3.done_stat = QD_WITH_ERROR;
+ goto FATAL_ERR_QDONE;
+ }
+ if ((scsiq->d2.srb_ptr == 0UL) ||
+ ((scsiq->q_status & QS_ABORTED) != 0)) {
+ return (0x11);
+ } else if (scsiq->q_status == QS_DONE) {
+ false_overrun = FALSE;
+ if (scsiq->extra_bytes != 0) {
+ scsiq->remain_bytes += (ulong) scsiq->extra_bytes;
+ }
+ if (scsiq->d3.done_stat == QD_WITH_ERROR) {
+ if (scsiq->d3.host_stat == QHSTA_M_DATA_OVER_RUN) {
+ if ((scsiq->cntl & (QC_DATA_IN | QC_DATA_OUT)) == 0) {
+ scsiq->d3.done_stat = QD_NO_ERROR;
+ scsiq->d3.host_stat = QHSTA_NO_ERROR;
+ } else if (false_overrun) {
+ scsiq->d3.done_stat = QD_NO_ERROR;
+ scsiq->d3.host_stat = QHSTA_NO_ERROR;
+ }
+ } else if (scsiq->d3.host_stat ==
+ QHSTA_M_HUNG_REQ_SCSI_BUS_RESET) {
+ AscStopChip(iop_base);
+ AscSetChipControl(iop_base,
+ (uchar) (CC_SCSI_RESET | CC_HALT));
+ DvcDelayNanoSecond(asc_dvc, 60000);
+ AscSetChipControl(iop_base, CC_HALT);
+ AscSetChipStatus(iop_base, CIW_CLR_SCSI_RESET_INT);
+ AscSetChipStatus(iop_base, 0);
+ AscSetChipControl(iop_base, 0);
+ }
+ }
+ if ((scsiq->cntl & QC_NO_CALLBACK) == 0) {
+ (*asc_isr_callback) (asc_dvc, scsiq);
+ } else {
+ if ((AscReadLramByte(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_CDB_BEG)) ==
+ SCSICMD_StartStopUnit)) {
+ asc_dvc->unit_not_ready &= ~target_id;
+ if (scsiq->d3.done_stat != QD_NO_ERROR) {
+ asc_dvc->start_motor &= ~target_id;
+ }
+ }
+ }
+ return (1);
+ } else {
+ AscSetLibErrorCode(asc_dvc, ASCQ_ERR_Q_STATUS);
+ FATAL_ERR_QDONE:
+ if ((scsiq->cntl & QC_NO_CALLBACK) == 0) {
+ (*asc_isr_callback) (asc_dvc, scsiq);
+ }
+ return (0x80);
+ }
+ }
+ return (0);
+}
+
+STATIC int
+AscISR(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ ASC_CS_TYPE chipstat;
+ PortAddr iop_base;
+ ushort saved_ram_addr;
+ uchar ctrl_reg;
+ uchar saved_ctrl_reg;
+ int int_pending;
+ int status;
+ uchar host_flag;
+
+ iop_base = asc_dvc->iop_base;
+ int_pending = FALSE;
+ if (((asc_dvc->init_state & ASC_INIT_STATE_END_LOAD_MC) == 0)
+ || (asc_dvc->isr_callback == 0)
+) {
+ return (ERR);
+ }
+ if (asc_dvc->in_critical_cnt != 0) {
+ AscSetLibErrorCode(asc_dvc, ASCQ_ERR_ISR_ON_CRITICAL);
+ return (ERR);
+ }
+ if (asc_dvc->is_in_int) {
+ AscSetLibErrorCode(asc_dvc, ASCQ_ERR_ISR_RE_ENTRY);
+ return (ERR);
+ }
+ asc_dvc->is_in_int = TRUE;
+ ctrl_reg = AscGetChipControl(iop_base);
+ saved_ctrl_reg = ctrl_reg & (~(CC_SCSI_RESET | CC_CHIP_RESET |
+ CC_SINGLE_STEP | CC_DIAG | CC_TEST));
+ chipstat = AscGetChipStatus(iop_base);
+ if (chipstat & CSW_SCSI_RESET_LATCH) {
+ if (!(asc_dvc->bus_type & (ASC_IS_VL | ASC_IS_EISA))) {
+ int_pending = TRUE;
+ asc_dvc->sdtr_done = 0;
+ saved_ctrl_reg &= (uchar) (~CC_HALT);
+ while (AscGetChipStatus(iop_base) & CSW_SCSI_RESET_ACTIVE) ;
+ AscSetChipControl(iop_base, (CC_CHIP_RESET | CC_HALT));
+ AscSetChipControl(iop_base, CC_HALT);
+ AscSetChipStatus(iop_base, CIW_CLR_SCSI_RESET_INT);
+ AscSetChipStatus(iop_base, 0);
+ chipstat = AscGetChipStatus(iop_base);
+ }
+ }
+ saved_ram_addr = AscGetChipLramAddr(iop_base);
+ host_flag = AscReadLramByte(iop_base,
+ ASCV_HOST_FLAG_B) & (uchar) (~ASC_HOST_FLAG_IN_ISR);
+ AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B,
+ (uchar) (host_flag | (uchar) ASC_HOST_FLAG_IN_ISR));
+ if ((chipstat & CSW_INT_PENDING)
+ || (int_pending)
+) {
+ AscAckInterrupt(iop_base);
+ int_pending = TRUE;
+ if ((chipstat & CSW_HALTED) &&
+ (ctrl_reg & CC_SINGLE_STEP)) {
+ if (AscIsrChipHalted(asc_dvc) == ERR) {
+ goto ISR_REPORT_QDONE_FATAL_ERROR;
+ } else {
+ saved_ctrl_reg &= (uchar) (~CC_HALT);
+ }
+ } else {
+ ISR_REPORT_QDONE_FATAL_ERROR:
+ if ((asc_dvc->dvc_cntl & ASC_CNTL_INT_MULTI_Q) != 0) {
+ while (((status = AscIsrQDone(asc_dvc)) & 0x01) != 0) {
+ }
+ } else {
+ do {
+ if ((status = AscIsrQDone(asc_dvc)) == 1) {
+ break;
+ }
+ } while (status == 0x11);
+ }
+ if ((status & 0x80) != 0)
+ int_pending = ERR;
+ }
+ }
+ AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B, host_flag);
+ AscSetChipLramAddr(iop_base, saved_ram_addr);
+ AscSetChipControl(iop_base, saved_ctrl_reg);
+ asc_dvc->is_in_int = FALSE;
+ return (int_pending);
+}
+
+STATIC uchar _asc_mcode_buf[] ASC_INITDATA =
+{
+ 0x01, 0x03, 0x01, 0x19, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x91, 0x10, 0x0A, 0x05, 0x01, 0x00, 0x00, 0x00, 0x00, 0xFF, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xFF, 0x80, 0xFF, 0xFF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x23, 0x00, 0x24, 0x00, 0x00, 0x00, 0x07, 0x00, 0xFF, 0x00, 0x00, 0x00, 0x00,
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE2, 0x88, 0x00, 0x00, 0x00, 0x00,
+ 0x80, 0x73, 0x48, 0x04, 0x36, 0x00, 0x00, 0xA2, 0xC2, 0x00, 0x80, 0x73, 0x03, 0x23, 0x36, 0x40,
+ 0xB6, 0x00, 0x36, 0x00, 0x05, 0xD6, 0x0C, 0xD2, 0x12, 0xDA, 0x00, 0xA2, 0xC2, 0x00, 0x92, 0x80,
+ 0x1E, 0x98, 0x50, 0x00, 0xF5, 0x00, 0x48, 0x98, 0xDF, 0x23, 0x36, 0x60, 0xB6, 0x00, 0x92, 0x80,
+ 0x4F, 0x00, 0xF5, 0x00, 0x48, 0x98, 0xEF, 0x23, 0x36, 0x60, 0xB6, 0x00, 0x92, 0x80, 0x80, 0x62,
+ 0x92, 0x80, 0x00, 0x46, 0x17, 0xEE, 0x13, 0xEA, 0x02, 0x01, 0x09, 0xD8, 0xCD, 0x04, 0x4D, 0x00,
+ 0x00, 0xA3, 0xD6, 0x00, 0xA6, 0x97, 0x7F, 0x23, 0x04, 0x61, 0x84, 0x01, 0xE6, 0x84, 0xD2, 0xC1,
+ 0x80, 0x73, 0xCD, 0x04, 0x4D, 0x00, 0x00, 0xA3, 0xE2, 0x01, 0xA6, 0x97, 0xCE, 0x81, 0x00, 0x33,
+ 0x02, 0x00, 0xC0, 0x88, 0x80, 0x73, 0x80, 0x77, 0x00, 0x01, 0x01, 0xA1, 0x02, 0x01, 0x4F, 0x00,
+ 0x84, 0x97, 0x07, 0xA6, 0x0C, 0x01, 0x00, 0x33, 0x03, 0x00, 0xC0, 0x88, 0x03, 0x03, 0x03, 0xDE,
+ 0x00, 0x33, 0x05, 0x00, 0xC0, 0x88, 0xCE, 0x00, 0x69, 0x60, 0xCE, 0x00, 0x02, 0x03, 0x4A, 0x60,
+ 0x00, 0xA2, 0x80, 0x01, 0x80, 0x63, 0x07, 0xA6, 0x2C, 0x01, 0x80, 0x81, 0x03, 0x03, 0x80, 0x63,
+ 0xE2, 0x00, 0x07, 0xA6, 0x3C, 0x01, 0x00, 0x33, 0x04, 0x00, 0xC0, 0x88, 0x03, 0x07, 0x02, 0x01,
+ 0x04, 0xCA, 0x0D, 0x23, 0x68, 0x98, 0x4D, 0x04, 0x04, 0x85, 0x05, 0xD8, 0x0D, 0x23, 0x68, 0x98,
+ 0xCD, 0x04, 0x15, 0x23, 0xF6, 0x88, 0xFB, 0x23, 0x02, 0x61, 0x82, 0x01, 0x80, 0x63, 0x02, 0x03,
+ 0x06, 0xA3, 0x6A, 0x01, 0x00, 0x33, 0x0A, 0x00, 0xC0, 0x88, 0x4E, 0x00, 0x07, 0xA3, 0x76, 0x01,
+ 0x00, 0x33, 0x0B, 0x00, 0xC0, 0x88, 0xCD, 0x04, 0x36, 0x2D, 0x00, 0x33, 0x1A, 0x00, 0xC0, 0x88,
+ 0x50, 0x04, 0x90, 0x81, 0x06, 0xAB, 0x8A, 0x01, 0x90, 0x81, 0x4E, 0x00, 0x07, 0xA3, 0x9A, 0x01,
+ 0x50, 0x00, 0x00, 0xA3, 0x44, 0x01, 0x00, 0x05, 0x84, 0x81, 0x46, 0x97, 0x02, 0x01, 0x05, 0xC6,
+ 0x04, 0x23, 0xA0, 0x01, 0x15, 0x23, 0xA1, 0x01, 0xC6, 0x81, 0xFD, 0x23, 0x02, 0x61, 0x82, 0x01,
+ 0x0A, 0xDA, 0x4A, 0x00, 0x06, 0x61, 0x00, 0xA0, 0xBC, 0x01, 0x80, 0x63, 0xCD, 0x04, 0x36, 0x2D,
+ 0x00, 0x33, 0x1B, 0x00, 0xC0, 0x88, 0x06, 0x23, 0x68, 0x98, 0xCD, 0x04, 0xE6, 0x84, 0x06, 0x01,
+ 0x00, 0xA2, 0xDC, 0x01, 0x57, 0x60, 0x00, 0xA0, 0xE2, 0x01, 0xE6, 0x84, 0x80, 0x23, 0xA0, 0x01,
+ 0xE6, 0x84, 0x80, 0x73, 0x4B, 0x00, 0x06, 0x61, 0x00, 0xA2, 0x08, 0x02, 0x04, 0x01, 0x0C, 0xDE,
+ 0x02, 0x01, 0x03, 0xCC, 0x4F, 0x00, 0x84, 0x97, 0x04, 0x82, 0x08, 0x23, 0x02, 0x41, 0x82, 0x01,
+ 0x4F, 0x00, 0x62, 0x97, 0x48, 0x04, 0x84, 0x80, 0xF0, 0x97, 0x00, 0x46, 0x56, 0x00, 0x03, 0xC0,
+ 0x01, 0x23, 0xE8, 0x00, 0x81, 0x73, 0x06, 0x29, 0x03, 0x42, 0x06, 0xE2, 0x03, 0xEE, 0x67, 0xEB,
+ 0x11, 0x23, 0xF6, 0x88, 0x04, 0x98, 0xF4, 0x80, 0x80, 0x73, 0x80, 0x77, 0x07, 0xA4, 0x32, 0x02,
+ 0x7C, 0x95, 0x06, 0xA6, 0x3C, 0x02, 0x03, 0xA6, 0x4C, 0x04, 0xC0, 0x88, 0x04, 0x01, 0x03, 0xD8,
+ 0xB2, 0x98, 0x6A, 0x96, 0x4E, 0x82, 0xFE, 0x95, 0x80, 0x67, 0x83, 0x03, 0x80, 0x63, 0xB6, 0x2D,
+ 0x02, 0xA6, 0x78, 0x02, 0x07, 0xA6, 0x66, 0x02, 0x06, 0xA6, 0x6A, 0x02, 0x03, 0xA6, 0x6E, 0x02,
+ 0x00, 0x33, 0x10, 0x00, 0xC0, 0x88, 0x7C, 0x95, 0x50, 0x82, 0x60, 0x96, 0x50, 0x82, 0x04, 0x23,
+ 0xA0, 0x01, 0x14, 0x23, 0xA1, 0x01, 0x3C, 0x84, 0x04, 0x01, 0x0C, 0xDC, 0xE0, 0x23, 0x25, 0x61,
+ 0xEF, 0x00, 0x14, 0x01, 0x4F, 0x04, 0xA8, 0x01, 0x6F, 0x00, 0xA5, 0x01, 0x03, 0x23, 0xA4, 0x01,
+ 0x06, 0x23, 0x9C, 0x01, 0x24, 0x2B, 0x1C, 0x01, 0x02, 0xA6, 0xB6, 0x02, 0x07, 0xA6, 0x66, 0x02,
+ 0x06, 0xA6, 0x6A, 0x02, 0x03, 0xA6, 0x20, 0x04, 0x01, 0xA6, 0xC0, 0x02, 0x00, 0xA6, 0xC0, 0x02,
+ 0x00, 0x33, 0x12, 0x00, 0xC0, 0x88, 0x00, 0x0E, 0x80, 0x63, 0x00, 0x43, 0x00, 0xA0, 0x98, 0x02,
+ 0x4D, 0x04, 0x04, 0x01, 0x0B, 0xDC, 0xE7, 0x23, 0x04, 0x61, 0x84, 0x01, 0x10, 0x31, 0x12, 0x35,
+ 0x14, 0x01, 0xEC, 0x00, 0x6C, 0x38, 0x00, 0x3F, 0x00, 0x00, 0xF6, 0x82, 0x18, 0x23, 0x04, 0x61,
+ 0x18, 0xA0, 0xEE, 0x02, 0x04, 0x01, 0x9C, 0xC8, 0x00, 0x33, 0x1F, 0x00, 0xC0, 0x88, 0x08, 0x31,
+ 0x0A, 0x35, 0x0C, 0x39, 0x0E, 0x3D, 0x7E, 0x98, 0xB6, 0x2D, 0x01, 0xA6, 0x20, 0x03, 0x00, 0xA6,
+ 0x20, 0x03, 0x07, 0xA6, 0x18, 0x03, 0x06, 0xA6, 0x1C, 0x03, 0x03, 0xA6, 0x20, 0x04, 0x02, 0xA6,
+ 0x78, 0x02, 0x00, 0x33, 0x33, 0x00, 0xC0, 0x88, 0x7C, 0x95, 0xFA, 0x82, 0x60, 0x96, 0xFA, 0x82,
+ 0x82, 0x98, 0x80, 0x42, 0x7E, 0x98, 0x60, 0xE4, 0x04, 0x01, 0x29, 0xC8, 0x31, 0x05, 0x07, 0x01,
+ 0x00, 0xA2, 0x60, 0x03, 0x00, 0x43, 0x87, 0x01, 0x05, 0x05, 0x86, 0x98, 0x7E, 0x98, 0x00, 0xA6,
+ 0x22, 0x03, 0x07, 0xA6, 0x58, 0x03, 0x03, 0xA6, 0x3C, 0x04, 0x06, 0xA6, 0x5C, 0x03, 0x01, 0xA6,
+ 0x22, 0x03, 0x00, 0x33, 0x25, 0x00, 0xC0, 0x88, 0x7C, 0x95, 0x3E, 0x83, 0x60, 0x96, 0x3E, 0x83,
+ 0x04, 0x01, 0x0C, 0xCE, 0x03, 0xC8, 0x00, 0x33, 0x42, 0x00, 0xC0, 0x88, 0x00, 0x01, 0x05, 0x05,
+ 0xFF, 0xA2, 0x7E, 0x03, 0xB1, 0x01, 0x08, 0x23, 0xB2, 0x01, 0x3A, 0x83, 0x05, 0x05, 0x15, 0x01,
+ 0x00, 0xA2, 0x9E, 0x03, 0xEC, 0x00, 0x6E, 0x00, 0x95, 0x01, 0x6C, 0x38, 0x00, 0x3F, 0x00, 0x00,
+ 0x01, 0xA6, 0x9A, 0x03, 0x00, 0xA6, 0x9A, 0x03, 0x12, 0x84, 0x80, 0x42, 0x7E, 0x98, 0x01, 0xA6,
+ 0xA8, 0x03, 0x00, 0xA6, 0xC0, 0x03, 0x12, 0x84, 0xA6, 0x98, 0x80, 0x42, 0x01, 0xA6, 0xA8, 0x03,
+ 0x07, 0xA6, 0xB6, 0x03, 0xD8, 0x83, 0x7C, 0x95, 0xAC, 0x83, 0x00, 0x33, 0x2F, 0x00, 0xC0, 0x88,
+ 0xA6, 0x98, 0x80, 0x42, 0x00, 0xA6, 0xC0, 0x03, 0x07, 0xA6, 0xCE, 0x03, 0xD8, 0x83, 0x7C, 0x95,
+ 0xC4, 0x83, 0x00, 0x33, 0x26, 0x00, 0xC0, 0x88, 0x38, 0x2B, 0x80, 0x32, 0x80, 0x36, 0x04, 0x23,
+ 0xA0, 0x01, 0x12, 0x23, 0xA1, 0x01, 0x12, 0x84, 0x06, 0xF0, 0x06, 0xA4, 0xF6, 0x03, 0x80, 0x6B,
+ 0x05, 0x23, 0x83, 0x03, 0x80, 0x63, 0x03, 0xA6, 0x10, 0x04, 0x07, 0xA6, 0x08, 0x04, 0x06, 0xA6,
+ 0x0C, 0x04, 0x00, 0x33, 0x17, 0x00, 0xC0, 0x88, 0x7C, 0x95, 0xF6, 0x83, 0x60, 0x96, 0xF6, 0x83,
+ 0x20, 0x84, 0x06, 0xF0, 0x06, 0xA4, 0x20, 0x04, 0x80, 0x6B, 0x05, 0x23, 0x83, 0x03, 0x80, 0x63,
+ 0xB6, 0x2D, 0x03, 0xA6, 0x3C, 0x04, 0x07, 0xA6, 0x34, 0x04, 0x06, 0xA6, 0x38, 0x04, 0x00, 0x33,
+ 0x30, 0x00, 0xC0, 0x88, 0x7C, 0x95, 0x20, 0x84, 0x60, 0x96, 0x20, 0x84, 0x1D, 0x01, 0x06, 0xCC,
+ 0x00, 0x33, 0x00, 0x84, 0xC0, 0x20, 0x00, 0x23, 0xEA, 0x00, 0x81, 0x62, 0xA2, 0x0D, 0x80, 0x63,
+ 0x07, 0xA6, 0x5A, 0x04, 0x00, 0x33, 0x18, 0x00, 0xC0, 0x88, 0x03, 0x03, 0x80, 0x63, 0xA3, 0x01,
+ 0x07, 0xA4, 0x64, 0x04, 0x23, 0x01, 0x00, 0xA2, 0x86, 0x04, 0x0A, 0xA0, 0x76, 0x04, 0xE0, 0x00,
+ 0x00, 0x33, 0x1D, 0x00, 0xC0, 0x88, 0x0B, 0xA0, 0x82, 0x04, 0xE0, 0x00, 0x00, 0x33, 0x1E, 0x00,
+ 0xC0, 0x88, 0x42, 0x23, 0xF6, 0x88, 0x00, 0x23, 0x22, 0xA3, 0xE6, 0x04, 0x08, 0x23, 0x22, 0xA3,
+ 0xA2, 0x04, 0x28, 0x23, 0x22, 0xA3, 0xAE, 0x04, 0x02, 0x23, 0x22, 0xA3, 0xC4, 0x04, 0x42, 0x23,
+ 0xF6, 0x88, 0x4A, 0x00, 0x06, 0x61, 0x00, 0xA0, 0xAE, 0x04, 0x45, 0x23, 0xF6, 0x88, 0x04, 0x98,
+ 0x00, 0xA2, 0xC0, 0x04, 0xB2, 0x98, 0x00, 0x33, 0x00, 0x82, 0xC0, 0x20, 0x81, 0x62, 0xF0, 0x81,
+ 0x47, 0x23, 0xF6, 0x88, 0x04, 0x01, 0x0B, 0xDE, 0x04, 0x98, 0xB2, 0x98, 0x00, 0x33, 0x00, 0x81,
+ 0xC0, 0x20, 0x81, 0x62, 0x14, 0x01, 0x00, 0xA0, 0x08, 0x02, 0x43, 0x23, 0xF6, 0x88, 0x04, 0x23,
+ 0xA0, 0x01, 0x44, 0x23, 0xA1, 0x01, 0x80, 0x73, 0x4D, 0x00, 0x03, 0xA3, 0xF4, 0x04, 0x00, 0x33,
+ 0x27, 0x00, 0xC0, 0x88, 0x04, 0x01, 0x04, 0xDC, 0x02, 0x23, 0xA2, 0x01, 0x04, 0x23, 0xA0, 0x01,
+ 0x04, 0x98, 0x26, 0x95, 0x4B, 0x00, 0xF6, 0x00, 0x4F, 0x04, 0x4F, 0x00, 0x00, 0xA3, 0x22, 0x05,
+ 0x00, 0x05, 0x76, 0x00, 0x06, 0x61, 0x00, 0xA2, 0x1C, 0x05, 0x0A, 0x85, 0x46, 0x97, 0xCD, 0x04,
+ 0x24, 0x85, 0x48, 0x04, 0x84, 0x80, 0x02, 0x01, 0x03, 0xDA, 0x80, 0x23, 0x82, 0x01, 0x34, 0x85,
+ 0x02, 0x23, 0xA0, 0x01, 0x4A, 0x00, 0x06, 0x61, 0x00, 0xA2, 0x40, 0x05, 0x1D, 0x01, 0x04, 0xD6,
+ 0xFF, 0x23, 0x86, 0x41, 0x4B, 0x60, 0xCB, 0x00, 0xFF, 0x23, 0x80, 0x01, 0x49, 0x00, 0x81, 0x01,
+ 0x04, 0x01, 0x02, 0xC8, 0x30, 0x01, 0x80, 0x01, 0xF7, 0x04, 0x03, 0x01, 0x49, 0x04, 0x80, 0x01,
+ 0xC9, 0x00, 0x00, 0x05, 0x00, 0x01, 0xFF, 0xA0, 0x60, 0x05, 0x77, 0x04, 0x01, 0x23, 0xEA, 0x00,
+ 0x5D, 0x00, 0xFE, 0xC7, 0x00, 0x62, 0x00, 0x23, 0xEA, 0x00, 0x00, 0x63, 0x07, 0xA4, 0xF8, 0x05,
+ 0x03, 0x03, 0x02, 0xA0, 0x8E, 0x05, 0xF4, 0x85, 0x00, 0x33, 0x2D, 0x00, 0xC0, 0x88, 0x04, 0xA0,
+ 0xB8, 0x05, 0x80, 0x63, 0x00, 0x23, 0xDF, 0x00, 0x4A, 0x00, 0x06, 0x61, 0x00, 0xA2, 0xA4, 0x05,
+ 0x1D, 0x01, 0x06, 0xD6, 0x02, 0x23, 0x02, 0x41, 0x82, 0x01, 0x50, 0x00, 0x62, 0x97, 0x04, 0x85,
+ 0x04, 0x23, 0x02, 0x41, 0x82, 0x01, 0x04, 0x85, 0x08, 0xA0, 0xBE, 0x05, 0xF4, 0x85, 0x03, 0xA0,
+ 0xC4, 0x05, 0xF4, 0x85, 0x01, 0xA0, 0xCE, 0x05, 0x88, 0x00, 0x80, 0x63, 0xCC, 0x86, 0x07, 0xA0,
+ 0xEE, 0x05, 0x5F, 0x00, 0x00, 0x2B, 0xDF, 0x08, 0x00, 0xA2, 0xE6, 0x05, 0x80, 0x67, 0x80, 0x63,
+ 0x01, 0xA2, 0x7A, 0x06, 0x7C, 0x85, 0x06, 0x23, 0x68, 0x98, 0x48, 0x23, 0xF6, 0x88, 0x07, 0x23,
+ 0x80, 0x00, 0x06, 0x87, 0x80, 0x63, 0x7C, 0x85, 0x00, 0x23, 0xDF, 0x00, 0x00, 0x63, 0x4A, 0x00,
+ 0x06, 0x61, 0x00, 0xA2, 0x36, 0x06, 0x1D, 0x01, 0x16, 0xD4, 0xC0, 0x23, 0x07, 0x41, 0x83, 0x03,
+ 0x80, 0x63, 0x06, 0xA6, 0x1C, 0x06, 0x00, 0x33, 0x37, 0x00, 0xC0, 0x88, 0x1D, 0x01, 0x01, 0xD6,
+ 0x20, 0x23, 0x63, 0x60, 0x83, 0x03, 0x80, 0x63, 0x02, 0x23, 0xDF, 0x00, 0x07, 0xA6, 0x7C, 0x05,
+ 0xEF, 0x04, 0x6F, 0x00, 0x00, 0x63, 0x4B, 0x00, 0x06, 0x41, 0xCB, 0x00, 0x52, 0x00, 0x06, 0x61,
+ 0x00, 0xA2, 0x4E, 0x06, 0x1D, 0x01, 0x03, 0xCA, 0xC0, 0x23, 0x07, 0x41, 0x00, 0x63, 0x1D, 0x01,
+ 0x04, 0xCC, 0x00, 0x33, 0x00, 0x83, 0xC0, 0x20, 0x81, 0x62, 0x80, 0x23, 0x07, 0x41, 0x00, 0x63,
+ 0x80, 0x67, 0x08, 0x23, 0x83, 0x03, 0x80, 0x63, 0x00, 0x63, 0x01, 0x23, 0xDF, 0x00, 0x06, 0xA6,
+ 0x84, 0x06, 0x07, 0xA6, 0x7C, 0x05, 0x80, 0x67, 0x80, 0x63, 0x00, 0x33, 0x00, 0x40, 0xC0, 0x20,
+ 0x81, 0x62, 0x00, 0x63, 0x00, 0x00, 0xFE, 0x95, 0x83, 0x03, 0x80, 0x63, 0x06, 0xA6, 0x94, 0x06,
+ 0x07, 0xA6, 0x7C, 0x05, 0x00, 0x00, 0x01, 0xA0, 0x14, 0x07, 0x00, 0x2B, 0x40, 0x0E, 0x80, 0x63,
+ 0x01, 0x00, 0x06, 0xA6, 0xAA, 0x06, 0x07, 0xA6, 0x7C, 0x05, 0x40, 0x0E, 0x80, 0x63, 0x00, 0x43,
+ 0x00, 0xA0, 0xA2, 0x06, 0x06, 0xA6, 0xBC, 0x06, 0x07, 0xA6, 0x7C, 0x05, 0x80, 0x67, 0x40, 0x0E,
+ 0x80, 0x63, 0x07, 0xA6, 0x7C, 0x05, 0x00, 0x23, 0xDF, 0x00, 0x00, 0x63, 0x07, 0xA6, 0xD6, 0x06,
+ 0x00, 0x33, 0x2A, 0x00, 0xC0, 0x88, 0x03, 0x03, 0x80, 0x63, 0x89, 0x00, 0x0A, 0x2B, 0x07, 0xA6,
+ 0xE8, 0x06, 0x00, 0x33, 0x29, 0x00, 0xC0, 0x88, 0x00, 0x43, 0x00, 0xA2, 0xF4, 0x06, 0xC0, 0x0E,
+ 0x80, 0x63, 0xDE, 0x86, 0xC0, 0x0E, 0x00, 0x33, 0x00, 0x80, 0xC0, 0x20, 0x81, 0x62, 0x04, 0x01,
+ 0x02, 0xDA, 0x80, 0x63, 0x7C, 0x85, 0x80, 0x7B, 0x80, 0x63, 0x06, 0xA6, 0x8C, 0x06, 0x00, 0x33,
+ 0x2C, 0x00, 0xC0, 0x88, 0x0C, 0xA2, 0x2E, 0x07, 0xFE, 0x95, 0x83, 0x03, 0x80, 0x63, 0x06, 0xA6,
+ 0x2C, 0x07, 0x07, 0xA6, 0x7C, 0x05, 0x00, 0x33, 0x3D, 0x00, 0xC0, 0x88, 0x00, 0x00, 0x80, 0x67,
+ 0x83, 0x03, 0x80, 0x63, 0x0C, 0xA0, 0x44, 0x07, 0x07, 0xA6, 0x7C, 0x05, 0xBF, 0x23, 0x04, 0x61,
+ 0x84, 0x01, 0xE6, 0x84, 0x00, 0x63, 0xF0, 0x04, 0x01, 0x01, 0xF1, 0x00, 0x00, 0x01, 0xF2, 0x00,
+ 0x01, 0x05, 0x80, 0x01, 0x72, 0x04, 0x71, 0x00, 0x81, 0x01, 0x70, 0x04, 0x80, 0x05, 0x81, 0x05,
+ 0x00, 0x63, 0xF0, 0x04, 0xF2, 0x00, 0x72, 0x04, 0x01, 0x01, 0xF1, 0x00, 0x70, 0x00, 0x81, 0x01,
+ 0x70, 0x04, 0x71, 0x00, 0x81, 0x01, 0x72, 0x00, 0x80, 0x01, 0x71, 0x04, 0x70, 0x00, 0x80, 0x01,
+ 0x70, 0x04, 0x00, 0x63, 0xF0, 0x04, 0xF2, 0x00, 0x72, 0x04, 0x00, 0x01, 0xF1, 0x00, 0x70, 0x00,
+ 0x80, 0x01, 0x70, 0x04, 0x71, 0x00, 0x80, 0x01, 0x72, 0x00, 0x81, 0x01, 0x71, 0x04, 0x70, 0x00,
+ 0x81, 0x01, 0x70, 0x04, 0x00, 0x63, 0x00, 0x23, 0xB3, 0x01, 0x83, 0x05, 0xA3, 0x01, 0xA2, 0x01,
+ 0xA1, 0x01, 0x01, 0x23, 0xA0, 0x01, 0x00, 0x01, 0xC8, 0x00, 0x03, 0xA1, 0xC4, 0x07, 0x00, 0x33,
+ 0x07, 0x00, 0xC0, 0x88, 0x80, 0x05, 0x81, 0x05, 0x04, 0x01, 0x11, 0xC8, 0x48, 0x00, 0xB0, 0x01,
+ 0xB1, 0x01, 0x08, 0x23, 0xB2, 0x01, 0x05, 0x01, 0x48, 0x04, 0x00, 0x43, 0x00, 0xA2, 0xE4, 0x07,
+ 0x00, 0x05, 0xDA, 0x87, 0x00, 0x01, 0xC8, 0x00, 0xFF, 0x23, 0x80, 0x01, 0x05, 0x05, 0x00, 0x63,
+ 0xF7, 0x04, 0x1A, 0x09, 0xF6, 0x08, 0x6E, 0x04, 0x00, 0x02, 0x80, 0x43, 0x76, 0x08, 0x80, 0x02,
+ 0x77, 0x04, 0x00, 0x63, 0xF7, 0x04, 0x1A, 0x09, 0xF6, 0x08, 0x6E, 0x04, 0x00, 0x02, 0x00, 0xA0,
+ 0x14, 0x08, 0x16, 0x88, 0x00, 0x43, 0x76, 0x08, 0x80, 0x02, 0x77, 0x04, 0x00, 0x63, 0xF3, 0x04,
+ 0x00, 0x23, 0xF4, 0x00, 0x74, 0x00, 0x80, 0x43, 0xF4, 0x00, 0xCF, 0x40, 0x00, 0xA2, 0x44, 0x08,
+ 0x74, 0x04, 0x02, 0x01, 0xF7, 0xC9, 0xF6, 0xD9, 0x00, 0x01, 0x01, 0xA1, 0x24, 0x08, 0x04, 0x98,
+ 0x26, 0x95, 0x24, 0x88, 0x73, 0x04, 0x00, 0x63, 0xF3, 0x04, 0x75, 0x04, 0x5A, 0x88, 0x02, 0x01,
+ 0x04, 0xD8, 0x46, 0x97, 0x04, 0x98, 0x26, 0x95, 0x4A, 0x88, 0x75, 0x00, 0x00, 0xA3, 0x64, 0x08,
+ 0x00, 0x05, 0x4E, 0x88, 0x73, 0x04, 0x00, 0x63, 0x80, 0x7B, 0x80, 0x63, 0x06, 0xA6, 0x76, 0x08,
+ 0x00, 0x33, 0x3E, 0x00, 0xC0, 0x88, 0x80, 0x67, 0x83, 0x03, 0x80, 0x63, 0x00, 0x63, 0x38, 0x2B,
+ 0x9C, 0x88, 0x38, 0x2B, 0x92, 0x88, 0x32, 0x09, 0x31, 0x05, 0x92, 0x98, 0x05, 0x05, 0xB2, 0x09,
+ 0x00, 0x63, 0x00, 0x32, 0x00, 0x36, 0x00, 0x3A, 0x00, 0x3E, 0x00, 0x63, 0x80, 0x32, 0x80, 0x36,
+ 0x80, 0x3A, 0x80, 0x3E, 0x00, 0x63, 0x38, 0x2B, 0x40, 0x32, 0x40, 0x36, 0x40, 0x3A, 0x40, 0x3E,
+ 0x00, 0x63, 0x5A, 0x20, 0xC9, 0x40, 0x00, 0xA0, 0xB2, 0x08, 0x5D, 0x00, 0xFE, 0xC3, 0x00, 0x63,
+ 0x80, 0x73, 0xE6, 0x20, 0x02, 0x23, 0xE8, 0x00, 0x82, 0x73, 0xFF, 0xFD, 0x80, 0x73, 0x13, 0x23,
+ 0xF6, 0x88, 0x66, 0x20, 0xC0, 0x20, 0x04, 0x23, 0xA0, 0x01, 0xA1, 0x23, 0xA1, 0x01, 0x81, 0x62,
+ 0xE0, 0x88, 0x80, 0x73, 0x80, 0x77, 0x68, 0x00, 0x00, 0xA2, 0x80, 0x00, 0x03, 0xC2, 0xF1, 0xC7,
+ 0x41, 0x23, 0xF6, 0x88, 0x11, 0x23, 0xA1, 0x01, 0x04, 0x23, 0xA0, 0x01, 0xE6, 0x84,
+};
+
+STATIC ushort _asc_mcode_size ASC_INITDATA = sizeof(_asc_mcode_buf);
+STATIC ulong _asc_mcode_chksum ASC_INITDATA = 0x012B5442UL;
+
+#define ASC_SYN_OFFSET_ONE_DISABLE_LIST 16
+STATIC uchar _syn_offset_one_disable_cmd[ASC_SYN_OFFSET_ONE_DISABLE_LIST] =
+{
+ SCSICMD_Inquiry,
+ SCSICMD_RequestSense,
+ SCSICMD_ReadCapacity,
+ SCSICMD_ReadTOC,
+ SCSICMD_ModeSelect6,
+ SCSICMD_ModeSense6,
+ SCSICMD_ModeSelect10,
+ SCSICMD_ModeSense10,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF,
+ 0xFF
+};
+
+STATIC int
+AscExeScsiQueue(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ REG ASC_SCSI_Q * scsiq
+)
+{
+ PortAddr iop_base;
+ int last_int_level;
+ int sta;
+ int n_q_required;
+ int disable_syn_offset_one_fix;
+ int i;
+ ulong addr;
+ ASC_EXE_CALLBACK asc_exe_callback;
+ ushort sg_entry_cnt = 0;
+ ushort sg_entry_cnt_minus_one = 0;
+ uchar target_ix;
+ uchar tid_no;
+ uchar sdtr_data;
+ uchar extra_bytes;
+ uchar scsi_cmd;
+ uchar disable_cmd;
+ ASC_SG_HEAD *sg_head;
+ ulong data_cnt;
+
+ iop_base = asc_dvc->iop_base;
+ sg_head = scsiq->sg_head;
+ asc_exe_callback = (ASC_EXE_CALLBACK) asc_dvc->exe_callback;
+ if (asc_dvc->err_code != 0)
+ return (ERR);
+ if (scsiq == (ASC_SCSI_Q *) 0L) {
+ AscSetLibErrorCode(asc_dvc, ASCQ_ERR_SCSIQ_NULL_PTR);
+ return (ERR);
+ }
+ scsiq->q1.q_no = 0;
+ if ((scsiq->q2.tag_code & ASC_TAG_FLAG_EXTRA_BYTES) == 0) {
+ scsiq->q1.extra_bytes = 0;
+ }
+ sta = 0;
+ target_ix = scsiq->q2.target_ix;
+ tid_no = ASC_TIX_TO_TID(target_ix);
+ n_q_required = 1;
+ if (scsiq->cdbptr[0] == SCSICMD_RequestSense) {
+ if ((asc_dvc->init_sdtr & scsiq->q1.target_id) != 0) {
+ asc_dvc->sdtr_done &= ~scsiq->q1.target_id ;
+ sdtr_data = AscGetMCodeInitSDTRAtID(iop_base, tid_no);
+ AscMsgOutSDTR(asc_dvc,
+ asc_dvc->sdtr_period_tbl[(sdtr_data >> 4) &
+ (uchar) (asc_dvc->max_sdtr_index - 1)],
+ (uchar) (sdtr_data & (uchar) ASC_SYN_MAX_OFFSET));
+ scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT);
+ }
+ }
+ last_int_level = DvcEnterCritical();
+ if (asc_dvc->in_critical_cnt != 0) {
+ DvcLeaveCritical(last_int_level);
+ AscSetLibErrorCode(asc_dvc, ASCQ_ERR_CRITICAL_RE_ENTRY);
+ return (ERR);
+ }
+ asc_dvc->in_critical_cnt++;
+ if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
+ if ((sg_entry_cnt = sg_head->entry_cnt) == 0) {
+ asc_dvc->in_critical_cnt--;
+ DvcLeaveCritical(last_int_level);
+ return (ERR);
+ }
+ if (sg_entry_cnt > ASC_MAX_SG_LIST) {
+ return (ERR);
+ }
+ if (sg_entry_cnt == 1) {
+ scsiq->q1.data_addr = sg_head->sg_list[0].addr;
+ scsiq->q1.data_cnt = sg_head->sg_list[0].bytes;
+ scsiq->q1.cntl &= ~(QC_SG_HEAD | QC_SG_SWAP_QUEUE);
+ }
+ sg_entry_cnt_minus_one = sg_entry_cnt - 1;
+ }
+ scsi_cmd = scsiq->cdbptr[0];
+ disable_syn_offset_one_fix = FALSE;
+ if ((asc_dvc->pci_fix_asyn_xfer & scsiq->q1.target_id) &&
+ !(asc_dvc->pci_fix_asyn_xfer_always & scsiq->q1.target_id)) {
+ if (scsiq->q1.cntl & QC_SG_HEAD) {
+ data_cnt = 0;
+ for (i = 0; i < sg_entry_cnt; i++) {
+ data_cnt += sg_head->sg_list[i].bytes;
+ }
+ } else {
+ data_cnt = scsiq->q1.data_cnt;
+ }
+ if (data_cnt != 0UL) {
+ if (data_cnt < 512UL) {
+ disable_syn_offset_one_fix = TRUE;
+ } else {
+ for (i = 0; i < ASC_SYN_OFFSET_ONE_DISABLE_LIST; i++) {
+ disable_cmd = _syn_offset_one_disable_cmd[i];
+ if (disable_cmd == 0xFF) {
+ break;
+ }
+ if (scsi_cmd == disable_cmd) {
+ disable_syn_offset_one_fix = TRUE;
+ break;
+ }
+ }
+ }
+ }
+ }
+ if (disable_syn_offset_one_fix) {
+ scsiq->q2.tag_code &= ~M2_QTAG_MSG_SIMPLE;
+ scsiq->q2.tag_code |= (ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX |
+ ASC_TAG_FLAG_DISABLE_DISCONNECT);
+ } else {
+ scsiq->q2.tag_code &= 0x23;
+ }
+ if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
+ if (asc_dvc->bug_fix_cntl) {
+ if (asc_dvc->bug_fix_cntl & ASC_BUG_FIX_IF_NOT_DWB) {
+ if ((scsi_cmd == SCSICMD_Read6) ||
+ (scsi_cmd == SCSICMD_Read10)) {
+ addr = sg_head->sg_list[sg_entry_cnt_minus_one].addr +
+ sg_head->sg_list[sg_entry_cnt_minus_one].bytes;
+ extra_bytes = (uchar) ((ushort) addr & 0x0003);
+ if ((extra_bytes != 0) &&
+ ((scsiq->q2.tag_code & ASC_TAG_FLAG_EXTRA_BYTES)
+ == 0)) {
+ scsiq->q2.tag_code |= ASC_TAG_FLAG_EXTRA_BYTES;
+ scsiq->q1.extra_bytes = extra_bytes;
+ sg_head->sg_list[sg_entry_cnt_minus_one].bytes -=
+ (ulong) extra_bytes;
+ }
+ }
+ }
+ }
+ sg_head->entry_to_copy = sg_head->entry_cnt;
+ n_q_required = AscSgListToQueue(sg_entry_cnt);
+ if ((AscGetNumOfFreeQueue(asc_dvc, target_ix, n_q_required) >=
+ (uint) n_q_required) || ((scsiq->q1.cntl & QC_URGENT) != 0)) {
+ if ((sta = AscSendScsiQueue(asc_dvc, scsiq,
+ n_q_required)) == 1) {
+ asc_dvc->in_critical_cnt--;
+ if (asc_exe_callback != 0) {
+ (*asc_exe_callback) (asc_dvc, scsiq);
+ }
+ DvcLeaveCritical(last_int_level);
+ return (sta);
+ }
+ }
+ } else {
+ if (asc_dvc->bug_fix_cntl) {
+ if (asc_dvc->bug_fix_cntl & ASC_BUG_FIX_IF_NOT_DWB) {
+ if ((scsi_cmd == SCSICMD_Read6) ||
+ (scsi_cmd == SCSICMD_Read10)) {
+ addr = scsiq->q1.data_addr + scsiq->q1.data_cnt;
+ extra_bytes = (uchar) ((ushort) addr & 0x0003);
+ if ((extra_bytes != 0) &&
+ ((scsiq->q2.tag_code & ASC_TAG_FLAG_EXTRA_BYTES)
+ == 0)) {
+ if (((ushort) scsiq->q1.data_cnt & 0x01FF) == 0) {
+ scsiq->q2.tag_code |= ASC_TAG_FLAG_EXTRA_BYTES;
+ scsiq->q1.data_cnt -= (ulong) extra_bytes;
+ scsiq->q1.extra_bytes = extra_bytes;
+ }
+ }
+ }
+ }
+ }
+ n_q_required = 1;
+ if ((AscGetNumOfFreeQueue(asc_dvc, target_ix, 1) >= 1) ||
+ ((scsiq->q1.cntl & QC_URGENT) != 0)) {
+ if ((sta = AscSendScsiQueue(asc_dvc, scsiq,
+ n_q_required)) == 1) {
+ asc_dvc->in_critical_cnt--;
+ if (asc_exe_callback != 0) {
+ (*asc_exe_callback) (asc_dvc, scsiq);
+ }
+ DvcLeaveCritical(last_int_level);
+ return (sta);
+ }
+ }
+ }
+ asc_dvc->in_critical_cnt--;
+ DvcLeaveCritical(last_int_level);
+ return (sta);
+}
+
+STATIC int
+AscSendScsiQueue(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ REG ASC_SCSI_Q * scsiq,
+ uchar n_q_required
+)
+{
+ PortAddr iop_base;
+ uchar free_q_head;
+ uchar next_qp;
+ uchar tid_no;
+ uchar target_ix;
+ int sta;
+
+ iop_base = asc_dvc->iop_base;
+ target_ix = scsiq->q2.target_ix;
+ tid_no = ASC_TIX_TO_TID(target_ix);
+ sta = 0;
+ free_q_head = (uchar) AscGetVarFreeQHead(iop_base);
+ if (n_q_required > 1) {
+ if ((next_qp = AscAllocMultipleFreeQueue(iop_base,
+ free_q_head, (uchar) (n_q_required)))
+ != (uchar) ASC_QLINK_END) {
+ asc_dvc->last_q_shortage = 0;
+ scsiq->sg_head->queue_cnt = n_q_required - 1;
+ scsiq->q1.q_no = free_q_head;
+ if ((sta = AscPutReadySgListQueue(asc_dvc, scsiq,
+ free_q_head)) == 1) {
+ AscPutVarFreeQHead(iop_base, next_qp);
+ asc_dvc->cur_total_qng += (uchar) (n_q_required);
+ asc_dvc->cur_dvc_qng[tid_no]++;
+ }
+ return (sta);
+ }
+ } else if (n_q_required == 1) {
+ if ((next_qp = AscAllocFreeQueue(iop_base,
+ free_q_head)) != ASC_QLINK_END) {
+ scsiq->q1.q_no = free_q_head;
+ if ((sta = AscPutReadyQueue(asc_dvc, scsiq,
+ free_q_head)) == 1) {
+ AscPutVarFreeQHead(iop_base, next_qp);
+ asc_dvc->cur_total_qng++;
+ asc_dvc->cur_dvc_qng[tid_no]++;
+ }
+ return (sta);
+ }
+ }
+ return (sta);
+}
+
+STATIC int
+AscSgListToQueue(
+ int sg_list
+)
+{
+ int n_sg_list_qs;
+
+ n_sg_list_qs = ((sg_list - 1) / ASC_SG_LIST_PER_Q);
+ if (((sg_list - 1) % ASC_SG_LIST_PER_Q) != 0)
+ n_sg_list_qs++;
+ return (n_sg_list_qs + 1);
+}
+
+
+STATIC uint
+AscGetNumOfFreeQueue(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ uchar target_ix,
+ uchar n_qs
+)
+{
+ uint cur_used_qs;
+ uint cur_free_qs;
+ ASC_SCSI_BIT_ID_TYPE target_id;
+ uchar tid_no;
+
+ target_id = ASC_TIX_TO_TARGET_ID(target_ix);
+ tid_no = ASC_TIX_TO_TID(target_ix);
+ if ((asc_dvc->unit_not_ready & target_id) ||
+ (asc_dvc->queue_full_or_busy & target_id)) {
+ return (0);
+ }
+ if (n_qs == 1) {
+ cur_used_qs = (uint) asc_dvc->cur_total_qng +
+ (uint) asc_dvc->last_q_shortage +
+ (uint) ASC_MIN_FREE_Q;
+ } else {
+ cur_used_qs = (uint) asc_dvc->cur_total_qng +
+ (uint) ASC_MIN_FREE_Q;
+ }
+ if ((uint) (cur_used_qs + n_qs) <= (uint) asc_dvc->max_total_qng) {
+ cur_free_qs = (uint) asc_dvc->max_total_qng - cur_used_qs;
+ if (asc_dvc->cur_dvc_qng[tid_no] >=
+ asc_dvc->max_dvc_qng[tid_no]) {
+ return (0);
+ }
+ return (cur_free_qs);
+ }
+ if (n_qs > 1) {
+ if ((n_qs > asc_dvc->last_q_shortage) && (n_qs <= (asc_dvc->max_total_qng - ASC_MIN_FREE_Q))) {
+ asc_dvc->last_q_shortage = n_qs;
+ }
+ }
+ return (0);
+}
+
+STATIC int
+AscPutReadyQueue(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ REG ASC_SCSI_Q * scsiq,
+ uchar q_no
+)
+{
+ ushort q_addr;
+ uchar tid_no;
+ uchar sdtr_data;
+ uchar syn_period_ix;
+ uchar syn_offset;
+ PortAddr iop_base;
+
+ iop_base = asc_dvc->iop_base;
+ if (((asc_dvc->init_sdtr & scsiq->q1.target_id) != 0) &&
+ ((asc_dvc->sdtr_done & scsiq->q1.target_id) == 0)) {
+ tid_no = ASC_TIX_TO_TID(scsiq->q2.target_ix);
+ sdtr_data = AscGetMCodeInitSDTRAtID(iop_base, tid_no);
+ syn_period_ix = (sdtr_data >> 4) & (asc_dvc->max_sdtr_index - 1);
+ syn_offset = sdtr_data & ASC_SYN_MAX_OFFSET;
+ AscMsgOutSDTR(asc_dvc,
+ asc_dvc->sdtr_period_tbl[syn_period_ix],
+ syn_offset);
+ scsiq->q1.cntl |= QC_MSG_OUT;
+ }
+ q_addr = ASC_QNO_TO_QADDR(q_no);
+ if ((scsiq->q1.target_id & asc_dvc->use_tagged_qng) == 0) {
+ scsiq->q2.tag_code &= ~M2_QTAG_MSG_SIMPLE;
+ }
+ scsiq->q1.status = QS_FREE;
+ AscMemWordCopyToLram(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_CDB_BEG),
+ (ushort *) scsiq->cdbptr,
+ (ushort) ((ushort) scsiq->q2.cdb_len >> 1));
+ DvcPutScsiQ(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_CPY_BEG),
+ (ushort *) & scsiq->q1.cntl,
+ (ushort) ((((sizeof (ASC_SCSIQ_1) + sizeof (ASC_SCSIQ_2)) / 2) - 1)));
+ AscWriteLramWord(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_B_STATUS),
+ (ushort) (((ushort) scsiq->q1.q_no << 8) | (ushort) QS_READY));
+ return (1);
+}
+
+STATIC int
+AscPutReadySgListQueue(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ REG ASC_SCSI_Q * scsiq,
+ uchar q_no
+)
+{
+ int sta;
+ int i;
+ ASC_SG_HEAD *sg_head;
+ ASC_SG_LIST_Q scsi_sg_q;
+ ulong saved_data_addr;
+ ulong saved_data_cnt;
+ PortAddr iop_base;
+ ushort sg_list_dwords;
+ ushort sg_index;
+ ushort sg_entry_cnt;
+ ushort q_addr;
+ uchar next_qp;
+
+ iop_base = asc_dvc->iop_base;
+ sg_head = scsiq->sg_head;
+ saved_data_addr = scsiq->q1.data_addr;
+ saved_data_cnt = scsiq->q1.data_cnt;
+ scsiq->q1.data_addr = sg_head->sg_list[0].addr;
+ scsiq->q1.data_cnt = sg_head->sg_list[0].bytes;
+ sg_entry_cnt = sg_head->entry_cnt - 1;
+ if (sg_entry_cnt != 0) {
+ scsiq->q1.cntl |= QC_SG_HEAD;
+ q_addr = ASC_QNO_TO_QADDR(q_no);
+ sg_index = 1;
+ scsiq->q1.sg_queue_cnt = sg_head->queue_cnt;
+ scsi_sg_q.sg_head_qp = q_no;
+ scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
+ for (i = 0; i < sg_head->queue_cnt; i++) {
+ scsi_sg_q.seq_no = i + 1;
+ if (sg_entry_cnt > ASC_SG_LIST_PER_Q) {
+ sg_list_dwords = (uchar) (ASC_SG_LIST_PER_Q * 2);
+ sg_entry_cnt -= ASC_SG_LIST_PER_Q;
+ if (i == 0) {
+ scsi_sg_q.sg_list_cnt = ASC_SG_LIST_PER_Q;
+ scsi_sg_q.sg_cur_list_cnt = ASC_SG_LIST_PER_Q;
+ } else {
+ scsi_sg_q.sg_list_cnt = ASC_SG_LIST_PER_Q - 1;
+ scsi_sg_q.sg_cur_list_cnt = ASC_SG_LIST_PER_Q - 1;
+ }
+ } else {
+ scsi_sg_q.cntl |= QCSG_SG_XFER_END;
+ sg_list_dwords = sg_entry_cnt << 1;
+ if (i == 0) {
+ scsi_sg_q.sg_list_cnt = sg_entry_cnt;
+ scsi_sg_q.sg_cur_list_cnt = sg_entry_cnt;
+ } else {
+ scsi_sg_q.sg_list_cnt = sg_entry_cnt - 1;
+ scsi_sg_q.sg_cur_list_cnt = sg_entry_cnt - 1;
+ }
+ sg_entry_cnt = 0;
+ }
+ next_qp = AscReadLramByte(iop_base,
+ (ushort) (q_addr + ASC_SCSIQ_B_FWD));
+ scsi_sg_q.q_no = next_qp;
+ q_addr = ASC_QNO_TO_QADDR(next_qp);
+ AscMemWordCopyToLram(iop_base,
+ (ushort) (q_addr + ASC_SCSIQ_SGHD_CPY_BEG),
+ (ushort *) & scsi_sg_q,
+ (ushort) (sizeof (ASC_SG_LIST_Q) >> 1));
+ AscMemDWordCopyToLram(iop_base,
+ (ushort) (q_addr + ASC_SGQ_LIST_BEG),
+ (ulong *) & sg_head->sg_list[sg_index],
+ (ushort) sg_list_dwords);
+ sg_index += ASC_SG_LIST_PER_Q;
+ }
+ } else {
+ scsiq->q1.cntl &= ~QC_SG_HEAD;
+ }
+ sta = AscPutReadyQueue(asc_dvc, scsiq, q_no);
+ scsiq->q1.data_addr = saved_data_addr;
+ scsiq->q1.data_cnt = saved_data_cnt;
+ return (sta);
+}
+
+STATIC int
+AscAbortSRB(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ulong srb_ptr
+)
+{
+ int sta;
+ ASC_SCSI_BIT_ID_TYPE saved_unit_not_ready;
+ PortAddr iop_base;
+
+ iop_base = asc_dvc->iop_base;
+ sta = ERR;
+ saved_unit_not_ready = asc_dvc->unit_not_ready;
+ asc_dvc->unit_not_ready = 0xFF;
+ AscWaitISRDone(asc_dvc);
+ if (AscStopQueueExe(iop_base) == 1) {
+ if (AscRiscHaltedAbortSRB(asc_dvc, srb_ptr) == 1) {
+ sta = 1;
+ AscCleanUpBusyQueue(iop_base);
+ AscStartQueueExe(iop_base);
+ } else {
+ sta = 0;
+ AscStartQueueExe(iop_base);
+ }
+ }
+ asc_dvc->unit_not_ready = saved_unit_not_ready;
+ return (sta);
+}
+
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+STATIC int
+AscResetDevice(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ uchar target_ix
+)
+{
+ PortAddr iop_base;
+ int sta;
+ uchar tid_no;
+
+ ASC_SCSI_BIT_ID_TYPE target_id;
+ int i;
+ ASC_SCSI_REQ_Q scsiq_buf;
+ ASC_SCSI_REQ_Q *scsiq;
+ uchar *buf;
+ ASC_SCSI_BIT_ID_TYPE saved_unit_not_ready;
+ iop_base = asc_dvc->iop_base;
+ tid_no = ASC_TIX_TO_TID(target_ix);
+ target_id = ASC_TID_TO_TARGET_ID(tid_no);
+ saved_unit_not_ready = asc_dvc->unit_not_ready;
+ asc_dvc->unit_not_ready = target_id;
+ sta = ERR;
+ AscWaitTixISRDone(asc_dvc, target_ix);
+ if (AscStopQueueExe(iop_base) == 1) {
+ if (AscRiscHaltedAbortTIX(asc_dvc, target_ix) == 1) {
+ AscCleanUpBusyQueue(iop_base);
+ AscStartQueueExe(iop_base);
+ AscWaitTixISRDone(asc_dvc, target_ix);
+ sta = TRUE;
+ scsiq = (ASC_SCSI_REQ_Q *) & scsiq_buf;
+ buf = (uchar *) & scsiq_buf;
+ for (i = 0; i < sizeof (ASC_SCSI_REQ_Q); i++) {
+ *buf++ = 0x00;
+ }
+ scsiq->r1.status = (uchar) QS_READY;
+ scsiq->r2.cdb_len = 6;
+ scsiq->r2.tag_code = M2_QTAG_MSG_SIMPLE;
+ scsiq->r1.target_id = target_id;
+ scsiq->r2.target_ix = ASC_TIDLUN_TO_IX(tid_no, 0);
+ scsiq->cdbptr = (uchar *) scsiq->cdb;
+ scsiq->r1.cntl = QC_NO_CALLBACK | QC_MSG_OUT | QC_URGENT;
+ AscWriteLramByte(asc_dvc->iop_base, ASCV_MSGOUT_BEG,
+ M1_BUS_DVC_RESET);
+ asc_dvc->unit_not_ready &= ~target_id;
+ asc_dvc->sdtr_done |= target_id;
+ if (AscExeScsiQueue(asc_dvc, (ASC_SCSI_Q *) scsiq)
+ == 1) {
+ asc_dvc->unit_not_ready = target_id;
+ DvcSleepMilliSecond(1000);
+ _AscWaitQDone(iop_base, (ASC_SCSI_Q *) scsiq);
+ if (AscStopQueueExe(iop_base) == 1) {
+ AscCleanUpDiscQueue(iop_base);
+ AscStartQueueExe(iop_base);
+ if (asc_dvc->pci_fix_asyn_xfer & target_id) {
+ AscSetRunChipSynRegAtID(iop_base, tid_no,
+ ASYN_SDTR_DATA_FIX_PCI_REV_AB);
+ }
+ AscWaitTixISRDone(asc_dvc, target_ix);
+ }
+ } else {
+ sta = 0;
+ }
+ asc_dvc->sdtr_done &= ~target_id;
+ } else {
+ sta = ERR;
+ AscStartQueueExe(iop_base);
+ }
+ }
+ asc_dvc->unit_not_ready = saved_unit_not_ready;
+ return (sta);
+}
+#endif /* version >= v1.3.89 */
+
+STATIC int
+AscResetSB(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ int sta;
+ int i;
+ PortAddr iop_base;
+
+ iop_base = asc_dvc->iop_base;
+ asc_dvc->unit_not_ready = 0xFF;
+ sta = TRUE;
+ AscWaitISRDone(asc_dvc);
+ AscStopQueueExe(iop_base);
+ asc_dvc->sdtr_done = 0;
+ AscResetChipAndScsiBus(asc_dvc);
+ DvcSleepMilliSecond((ulong) ((ushort) asc_dvc->scsi_reset_wait * 1000));
+ AscReInitLram(asc_dvc);
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ asc_dvc->cur_dvc_qng[i] = 0;
+ if (asc_dvc->pci_fix_asyn_xfer & (ASC_SCSI_BIT_ID_TYPE) (0x01 << i)) {
+ AscSetChipSynRegAtID(iop_base, i, ASYN_SDTR_DATA_FIX_PCI_REV_AB);
+ }
+ }
+ asc_dvc->err_code = 0;
+ AscSetPCAddr(iop_base, ASC_MCODE_START_ADDR);
+ if (AscGetPCAddr(iop_base) != ASC_MCODE_START_ADDR) {
+ sta = ERR;
+ }
+ if (AscStartChip(iop_base) == 0) {
+ sta = ERR;
+ }
+ AscStartQueueExe(iop_base);
+ asc_dvc->unit_not_ready = 0;
+ asc_dvc->queue_full_or_busy = 0;
+ return (sta);
+}
+
+STATIC int
+AscSetRunChipSynRegAtID(
+ PortAddr iop_base,
+ uchar tid_no,
+ uchar sdtr_data
+)
+{
+ int sta = FALSE;
+
+ if (AscHostReqRiscHalt(iop_base)) {
+ sta = AscSetChipSynRegAtID(iop_base, tid_no, sdtr_data);
+ AscStartChip(iop_base);
+ return (sta);
+ }
+ return (sta);
+}
+
+STATIC int
+AscSetChipSynRegAtID(
+ PortAddr iop_base,
+ uchar id,
+ uchar sdtr_data
+)
+{
+ ASC_SCSI_BIT_ID_TYPE org_id;
+ int i;
+ int sta = TRUE;
+
+ AscSetBank(iop_base, 1);
+ org_id = AscReadChipDvcID(iop_base);
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ if (org_id == (0x01 << i))
+ break;
+ }
+ org_id = i;
+ AscWriteChipDvcID(iop_base, id);
+ if (AscReadChipDvcID(iop_base) == (0x01 << id)) {
+ AscSetBank(iop_base, 0);
+ AscSetChipSyn(iop_base, sdtr_data);
+ if (AscGetChipSyn(iop_base) != sdtr_data) {
+ sta = FALSE;
+ }
+ } else {
+ sta = FALSE;
+ }
+ AscSetBank(iop_base, 1);
+ AscWriteChipDvcID(iop_base, org_id);
+ AscSetBank(iop_base, 0);
+ return (sta);
+}
+
+STATIC int
+AscReInitLram(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ AscInitLram(asc_dvc);
+ AscInitQLinkVar(asc_dvc);
+ return (0);
+}
+
+STATIC ushort
+AscInitLram(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ uchar i;
+ ushort s_addr;
+ PortAddr iop_base;
+ ushort warn_code;
+
+ iop_base = asc_dvc->iop_base;
+ warn_code = 0;
+ AscMemWordSetLram(iop_base, ASC_QADR_BEG, 0,
+ (ushort) (((int) (asc_dvc->max_total_qng + 2 + 1) * 64) >> 1)
+);
+ i = ASC_MIN_ACTIVE_QNO;
+ s_addr = ASC_QADR_BEG + ASC_QBLK_SIZE;
+ AscWriteLramByte(iop_base, (ushort) (s_addr + ASC_SCSIQ_B_FWD),
+ (uchar) (i + 1));
+ AscWriteLramByte(iop_base, (ushort) (s_addr + ASC_SCSIQ_B_BWD),
+ (uchar) (asc_dvc->max_total_qng));
+ AscWriteLramByte(iop_base, (ushort) (s_addr + ASC_SCSIQ_B_QNO),
+ (uchar) i);
+ i++;
+ s_addr += ASC_QBLK_SIZE;
+ for (; i < asc_dvc->max_total_qng; i++, s_addr += ASC_QBLK_SIZE) {
+ AscWriteLramByte(iop_base, (ushort) (s_addr + ASC_SCSIQ_B_FWD),
+ (uchar) (i + 1));
+ AscWriteLramByte(iop_base, (ushort) (s_addr + ASC_SCSIQ_B_BWD),
+ (uchar) (i - 1));
+ AscWriteLramByte(iop_base, (ushort) (s_addr + ASC_SCSIQ_B_QNO),
+ (uchar) i);
+ }
+ AscWriteLramByte(iop_base, (ushort) (s_addr + ASC_SCSIQ_B_FWD),
+ (uchar) ASC_QLINK_END);
+ AscWriteLramByte(iop_base, (ushort) (s_addr + ASC_SCSIQ_B_BWD),
+ (uchar) (asc_dvc->max_total_qng - 1));
+ AscWriteLramByte(iop_base, (ushort) (s_addr + ASC_SCSIQ_B_QNO),
+ (uchar) asc_dvc->max_total_qng);
+ i++;
+ s_addr += ASC_QBLK_SIZE;
+ for (; i <= (uchar) (asc_dvc->max_total_qng + 3);
+ i++, s_addr += ASC_QBLK_SIZE) {
+ AscWriteLramByte(iop_base,
+ (ushort) (s_addr + (ushort) ASC_SCSIQ_B_FWD), i);
+ AscWriteLramByte(iop_base,
+ (ushort) (s_addr + (ushort) ASC_SCSIQ_B_BWD), i);
+ AscWriteLramByte(iop_base,
+ (ushort) (s_addr + (ushort) ASC_SCSIQ_B_QNO), i);
+ }
+ return (warn_code);
+}
+
+STATIC ushort
+AscInitQLinkVar(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ PortAddr iop_base;
+ int i;
+ ushort lram_addr;
+
+ iop_base = asc_dvc->iop_base;
+ AscPutRiscVarFreeQHead(iop_base, 1);
+ AscPutRiscVarDoneQTail(iop_base, asc_dvc->max_total_qng);
+ AscPutVarFreeQHead(iop_base, 1);
+ AscPutVarDoneQTail(iop_base, asc_dvc->max_total_qng);
+ AscWriteLramByte(iop_base, ASCV_BUSY_QHEAD_B,
+ (uchar) ((int) asc_dvc->max_total_qng + 1));
+ AscWriteLramByte(iop_base, ASCV_DISC1_QHEAD_B,
+ (uchar) ((int) asc_dvc->max_total_qng + 2));
+ AscWriteLramByte(iop_base, (ushort) ASCV_TOTAL_READY_Q_B,
+ asc_dvc->max_total_qng);
+ AscWriteLramWord(iop_base, ASCV_ASCDVC_ERR_CODE_W, 0);
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0);
+ AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, 0);
+ AscWriteLramByte(iop_base, ASCV_SCSIBUSY_B, 0);
+ AscWriteLramByte(iop_base, ASCV_WTM_FLAG_B, 0);
+ AscPutQDoneInProgress(iop_base, 0);
+ lram_addr = ASC_QADR_BEG;
+ for (i = 0; i < 32; i++, lram_addr += 2) {
+ AscWriteLramWord(iop_base, lram_addr, 0);
+ }
+ return (0);
+}
+
+STATIC int
+AscSetLibErrorCode(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ushort err_code
+)
+{
+ if (asc_dvc->err_code == 0) {
+ asc_dvc->err_code = err_code;
+ AscWriteLramWord(asc_dvc->iop_base, ASCV_ASCDVC_ERR_CODE_W,
+ err_code);
+ }
+ return (err_code);
+}
+
+
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+STATIC int
+_AscWaitQDone(
+ PortAddr iop_base,
+ REG ASC_SCSI_Q * scsiq
+)
+{
+ ushort q_addr;
+ uchar q_status;
+ int count = 0;
+
+ while (scsiq->q1.q_no == 0) ;
+ q_addr = ASC_QNO_TO_QADDR(scsiq->q1.q_no);
+ do {
+ q_status = AscReadLramByte(iop_base, q_addr + ASC_SCSIQ_B_STATUS);
+ DvcSleepMilliSecond(100L);
+ if (count++ > 30) {
+ return (0);
+ }
+ } while ((q_status & QS_READY) != 0);
+ return (1);
+}
+#endif /* version >= v1.3.89 */
+
+STATIC uchar
+AscMsgOutSDTR(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ uchar sdtr_period,
+ uchar sdtr_offset
+)
+{
+ EXT_MSG sdtr_buf;
+ uchar sdtr_period_index;
+ PortAddr iop_base;
+
+ iop_base = asc_dvc->iop_base;
+ sdtr_buf.msg_type = MS_EXTEND;
+ sdtr_buf.msg_len = MS_SDTR_LEN;
+ sdtr_buf.msg_req = MS_SDTR_CODE;
+ sdtr_buf.xfer_period = sdtr_period;
+ sdtr_offset &= ASC_SYN_MAX_OFFSET;
+ sdtr_buf.req_ack_offset = sdtr_offset;
+ if ((sdtr_period_index =
+ AscGetSynPeriodIndex(asc_dvc, sdtr_period)) <=
+ asc_dvc->max_sdtr_index) {
+ AscMemWordCopyToLram(iop_base,
+ ASCV_MSGOUT_BEG,
+ (ushort *) & sdtr_buf,
+ (ushort) (sizeof (EXT_MSG) >> 1));
+ return ((sdtr_period_index << 4) | sdtr_offset);
+ } else {
+
+ sdtr_buf.req_ack_offset = 0;
+ AscMemWordCopyToLram(iop_base,
+ ASCV_MSGOUT_BEG,
+ (ushort *) & sdtr_buf,
+ (ushort) (sizeof (EXT_MSG) >> 1));
+ return (0);
+ }
+}
+
+STATIC uchar
+AscCalSDTRData(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ uchar sdtr_period,
+ uchar syn_offset
+)
+{
+ uchar byte;
+ uchar sdtr_period_ix;
+
+ sdtr_period_ix = AscGetSynPeriodIndex(asc_dvc, sdtr_period);
+ if (
+ (sdtr_period_ix > asc_dvc->max_sdtr_index)
+) {
+ return (0xFF);
+ }
+ byte = (sdtr_period_ix << 4) | (syn_offset & ASC_SYN_MAX_OFFSET);
+ return (byte);
+}
+
+STATIC void
+AscSetChipSDTR(
+ PortAddr iop_base,
+ uchar sdtr_data,
+ uchar tid_no
+)
+{
+ AscSetChipSynRegAtID(iop_base, tid_no, sdtr_data);
+ AscPutMCodeSDTRDoneAtID(iop_base, tid_no, sdtr_data);
+ return;
+}
+
+STATIC uchar
+AscGetSynPeriodIndex(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ruchar syn_time
+)
+{
+ ruchar *period_table;
+ int max_index;
+ int min_index;
+ int i;
+
+ period_table = asc_dvc->sdtr_period_tbl;
+ max_index = (int) asc_dvc->max_sdtr_index;
+ min_index = (int)asc_dvc->host_init_sdtr_index ;
+ if ((syn_time <= period_table[max_index])) {
+ for (i = min_index; i < (max_index - 1); i++) {
+ if (syn_time <= period_table[i]) {
+ return ((uchar) i);
+ }
+ }
+ return ((uchar) max_index);
+ } else {
+ return ((uchar) (max_index + 1));
+ }
+}
+
+STATIC uchar
+AscAllocFreeQueue(
+ PortAddr iop_base,
+ uchar free_q_head
+)
+{
+ ushort q_addr;
+ uchar next_qp;
+ uchar q_status;
+
+ q_addr = ASC_QNO_TO_QADDR(free_q_head);
+ q_status = (uchar) AscReadLramByte(iop_base,
+ (ushort) (q_addr + ASC_SCSIQ_B_STATUS));
+ next_qp = AscReadLramByte(iop_base,
+ (ushort) (q_addr + ASC_SCSIQ_B_FWD));
+ if (((q_status & QS_READY) == 0) && (next_qp != ASC_QLINK_END)) {
+ return (next_qp);
+ }
+ return (ASC_QLINK_END);
+}
+
+STATIC uchar
+AscAllocMultipleFreeQueue(
+ PortAddr iop_base,
+ uchar free_q_head,
+ uchar n_free_q
+)
+{
+ uchar i;
+
+ for (i = 0; i < n_free_q; i++) {
+ if ((free_q_head = AscAllocFreeQueue(iop_base, free_q_head))
+ == ASC_QLINK_END) {
+ return (ASC_QLINK_END);
+ }
+ }
+ return (free_q_head);
+}
+
+STATIC int
+AscRiscHaltedAbortSRB(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ ulong srb_ptr
+)
+{
+ PortAddr iop_base;
+ ushort q_addr;
+ uchar q_no;
+ ASC_QDONE_INFO scsiq_buf;
+ ASC_QDONE_INFO *scsiq;
+ ASC_ISR_CALLBACK asc_isr_callback;
+ int last_int_level;
+
+ iop_base = asc_dvc->iop_base;
+ asc_isr_callback = (ASC_ISR_CALLBACK) asc_dvc->isr_callback;
+ last_int_level = DvcEnterCritical();
+ scsiq = (ASC_QDONE_INFO *) & scsiq_buf;
+ for (q_no = ASC_MIN_ACTIVE_QNO; q_no <= asc_dvc->max_total_qng;
+ q_no++) {
+ q_addr = ASC_QNO_TO_QADDR(q_no);
+ scsiq->d2.srb_ptr = AscReadLramDWord(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_D_SRBPTR));
+ if (scsiq->d2.srb_ptr == srb_ptr) {
+ _AscCopyLramScsiDoneQ(iop_base, q_addr, scsiq, asc_dvc->max_dma_count);
+ if (((scsiq->q_status & QS_READY) != 0)
+ && ((scsiq->q_status & QS_ABORTED) == 0)
+ && ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)) {
+ scsiq->q_status |= QS_ABORTED;
+ scsiq->d3.done_stat = QD_ABORTED_BY_HOST;
+ AscWriteLramDWord(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_D_SRBPTR),
+ 0L);
+ AscWriteLramByte(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_B_STATUS),
+ scsiq->q_status);
+ (*asc_isr_callback) (asc_dvc, scsiq);
+ return (1);
+ }
+ }
+ }
+ DvcLeaveCritical(last_int_level);
+ return (0);
+}
+
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+STATIC int
+AscRiscHaltedAbortTIX(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ uchar target_ix
+)
+{
+ PortAddr iop_base;
+ ushort q_addr;
+ uchar q_no;
+ ASC_QDONE_INFO scsiq_buf;
+ ASC_QDONE_INFO *scsiq;
+ ASC_ISR_CALLBACK asc_isr_callback;
+ int last_int_level;
+
+ iop_base = asc_dvc->iop_base;
+ asc_isr_callback = (ASC_ISR_CALLBACK) asc_dvc->isr_callback;
+ last_int_level = DvcEnterCritical();
+ scsiq = (ASC_QDONE_INFO *) & scsiq_buf;
+ for (q_no = ASC_MIN_ACTIVE_QNO; q_no <= asc_dvc->max_total_qng;
+ q_no++) {
+ q_addr = ASC_QNO_TO_QADDR(q_no);
+ _AscCopyLramScsiDoneQ(iop_base, q_addr, scsiq, asc_dvc->max_dma_count);
+ if (((scsiq->q_status & QS_READY) != 0) &&
+ ((scsiq->q_status & QS_ABORTED) == 0) &&
+ ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)) {
+ if (scsiq->d2.target_ix == target_ix) {
+ scsiq->q_status |= QS_ABORTED;
+ scsiq->d3.done_stat = QD_ABORTED_BY_HOST;
+ AscWriteLramDWord(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_D_SRBPTR),
+ 0L);
+ AscWriteLramByte(iop_base,
+ (ushort) (q_addr + (ushort) ASC_SCSIQ_B_STATUS),
+ scsiq->q_status);
+ (*asc_isr_callback) (asc_dvc, scsiq);
+ }
+ }
+ }
+ DvcLeaveCritical(last_int_level);
+ return (1);
+}
+#endif /* version >= v1.3.89 */
+
+STATIC int
+AscHostReqRiscHalt(
+ PortAddr iop_base
+)
+{
+ int count = 0;
+ int sta = 0;
+ uchar saved_stop_code;
+
+ if (AscIsChipHalted(iop_base))
+ return (1);
+ saved_stop_code = AscReadLramByte(iop_base, ASCV_STOP_CODE_B);
+ AscWriteLramByte(iop_base, ASCV_STOP_CODE_B,
+ ASC_STOP_HOST_REQ_RISC_HALT | ASC_STOP_REQ_RISC_STOP
+);
+ do {
+ if (AscIsChipHalted(iop_base)) {
+ sta = 1;
+ break;
+ }
+ DvcSleepMilliSecond(100);
+ } while (count++ < 20);
+ AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, saved_stop_code);
+ return (sta);
+}
+
+STATIC int
+AscStopQueueExe(
+ PortAddr iop_base
+)
+{
+ int count = 0;
+
+ if (AscReadLramByte(iop_base, ASCV_STOP_CODE_B) == 0) {
+ AscWriteLramByte(iop_base, ASCV_STOP_CODE_B,
+ ASC_STOP_REQ_RISC_STOP);
+ do {
+ if (
+ AscReadLramByte(iop_base, ASCV_STOP_CODE_B) &
+ ASC_STOP_ACK_RISC_STOP) {
+ return (1);
+ }
+ DvcSleepMilliSecond(100);
+ } while (count++ < 20);
+ }
+ return (0);
+}
+
+STATIC int
+AscStartQueueExe(
+ PortAddr iop_base
+)
+{
+ if (AscReadLramByte(iop_base, ASCV_STOP_CODE_B) != 0) {
+ AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, 0);
+ }
+ return (1);
+}
+
+STATIC int
+AscCleanUpBusyQueue(
+ PortAddr iop_base
+)
+{
+ int count;
+ uchar stop_code;
+
+ count = 0;
+ if (AscReadLramByte(iop_base, ASCV_STOP_CODE_B) != 0) {
+ AscWriteLramByte(iop_base, ASCV_STOP_CODE_B,
+ ASC_STOP_CLEAN_UP_BUSY_Q);
+ do {
+ stop_code = AscReadLramByte(iop_base, ASCV_STOP_CODE_B);
+ if ((stop_code & ASC_STOP_CLEAN_UP_BUSY_Q) == 0)
+ break;
+ DvcSleepMilliSecond(100);
+ } while (count++ < 20);
+ }
+ return (1);
+}
+
+#if LINUX_VERSION_CODE >= ASC_LINUX_VERSION(1,3,89)
+STATIC int
+AscCleanUpDiscQueue(
+ PortAddr iop_base
+)
+{
+ int count;
+ uchar stop_code;
+
+ count = 0;
+ if (AscReadLramByte(iop_base, ASCV_STOP_CODE_B) != 0) {
+ AscWriteLramByte(iop_base, ASCV_STOP_CODE_B,
+ ASC_STOP_CLEAN_UP_DISC_Q);
+ do {
+ stop_code = AscReadLramByte(iop_base, ASCV_STOP_CODE_B);
+ if ((stop_code & ASC_STOP_CLEAN_UP_DISC_Q) == 0)
+ break;
+ DvcSleepMilliSecond(100);
+ } while (count++ < 20);
+ }
+ return (1);
+}
+#endif /* version >= v1.3.89 */
+
+STATIC int
+AscWaitTixISRDone(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ uchar target_ix
+)
+{
+ uchar cur_req;
+ uchar tid_no;
+ int i = 0;
+
+ tid_no = ASC_TIX_TO_TID(target_ix);
+ while (i++ < 10) {
+ if ((cur_req = asc_dvc->cur_dvc_qng[tid_no]) == 0) {
+ break;
+ }
+ DvcSleepMilliSecond(1000L);
+ if (asc_dvc->cur_dvc_qng[tid_no] == cur_req) {
+ break;
+ }
+ }
+ return (1);
+}
+
+STATIC int
+AscWaitISRDone(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+{
+ int tid;
+
+ for (tid = 0; tid <= ASC_MAX_TID; tid++) {
+ AscWaitTixISRDone(asc_dvc, ASC_TID_TO_TIX(tid));
+ }
+ return (1);
+}
+
+STATIC ulong
+AscGetOnePhyAddr(
+ REG ASC_DVC_VAR asc_ptr_type * asc_dvc,
+ uchar * buf_addr,
+ ulong buf_size
+)
+{
+ ASC_MIN_SG_HEAD sg_head;
+
+ sg_head.entry_cnt = ASC_MIN_SG_LIST;
+ if (DvcGetSGList(asc_dvc, (uchar *) buf_addr,
+ buf_size, (ASC_SG_HEAD *) & sg_head) != buf_size) {
+ return (0L);
+ }
+ if (sg_head.entry_cnt > 1) {
+ return (0L);
+ }
+ return (sg_head.sg_list[0].addr);
+}
+
+STATIC void
+DvcDelayMicroSecond(ADV_DVC_VAR *asc_dvc, ushort micro_sec)
+{
+ udelay(micro_sec);
+}
+
+STATIC void
+DvcDelayNanoSecond(ASC_DVC_VAR asc_ptr_type * asc_dvc, ulong nano_sec)
+{
+ udelay((nano_sec + 999)/1000);
+}
+
+ASC_INITFUNC(
+STATIC ulong
+AscGetEisaProductID(
+ PortAddr iop_base
+)
+)
+{
+ PortAddr eisa_iop;
+ ushort product_id_high, product_id_low;
+ ulong product_id;
+
+ eisa_iop = ASC_GET_EISA_SLOT(iop_base) | ASC_EISA_PID_IOP_MASK;
+ product_id_low = inpw(eisa_iop);
+ product_id_high = inpw(eisa_iop + 2);
+ product_id = ((ulong) product_id_high << 16) | (ulong) product_id_low;
+ return (product_id);
+}
+
+ASC_INITFUNC(
+STATIC PortAddr
+AscSearchIOPortAddrEISA(
+ PortAddr iop_base
+)
+)
+{
+ ulong eisa_product_id;
+
+ if (iop_base == 0) {
+ iop_base = ASC_EISA_MIN_IOP_ADDR;
+ } else {
+ if (iop_base == ASC_EISA_MAX_IOP_ADDR)
+ return (0);
+ if ((iop_base & 0x0050) == 0x0050) {
+ iop_base += ASC_EISA_BIG_IOP_GAP;
+ } else {
+ iop_base += ASC_EISA_SMALL_IOP_GAP;
+ }
+ }
+ while (iop_base <= ASC_EISA_MAX_IOP_ADDR) {
+ eisa_product_id = AscGetEisaProductID(iop_base);
+ if ((eisa_product_id == ASC_EISA_ID_740) ||
+ (eisa_product_id == ASC_EISA_ID_750)) {
+ if (AscFindSignature(iop_base)) {
+ inpw(iop_base + 4);
+ return (iop_base);
+ }
+ }
+ if (iop_base == ASC_EISA_MAX_IOP_ADDR)
+ return (0);
+ if ((iop_base & 0x0050) == 0x0050) {
+ iop_base += ASC_EISA_BIG_IOP_GAP;
+ } else {
+ iop_base += ASC_EISA_SMALL_IOP_GAP;
+ }
+ }
+ return (0);
+}
+
+STATIC int
+AscStartChip(
+ PortAddr iop_base
+)
+{
+ AscSetChipControl(iop_base, 0);
+ if ((AscGetChipStatus(iop_base) & CSW_HALTED) != 0) {
+ return (0);
+ }
+ return (1);
+}
+
+STATIC int
+AscStopChip(
+ PortAddr iop_base
+)
+{
+ uchar cc_val;
+
+ cc_val = AscGetChipControl(iop_base) & (~(CC_SINGLE_STEP | CC_TEST | CC_DIAG));
+ AscSetChipControl(iop_base, (uchar) (cc_val | CC_HALT));
+ AscSetChipIH(iop_base, INS_HALT);
+ AscSetChipIH(iop_base, INS_RFLAG_WTM);
+ if ((AscGetChipStatus(iop_base) & CSW_HALTED) == 0) {
+ return (0);
+ }
+ return (1);
+}
+
+STATIC int
+AscIsChipHalted(
+ PortAddr iop_base
+)
+{
+ if ((AscGetChipStatus(iop_base) & CSW_HALTED) != 0) {
+ if ((AscGetChipControl(iop_base) & CC_HALT) != 0) {
+ return (1);
+ }
+ }
+ return (0);
+}
+
+STATIC void
+AscSetChipIH(
+ PortAddr iop_base,
+ ushort ins_code
+)
+{
+ AscSetBank(iop_base, 1);
+ AscWriteChipIH(iop_base, ins_code);
+ AscSetBank(iop_base, 0);
+ return;
+}
+
+STATIC void
+AscAckInterrupt(
+ PortAddr iop_base
+)
+{
+ uchar host_flag;
+ uchar risc_flag;
+ ushort loop;
+
+ loop = 0;
+ do {
+ risc_flag = AscReadLramByte(iop_base, ASCV_RISC_FLAG_B);
+ if (loop++ > 0x7FFF) {
+ break;
+ }
+ } while ((risc_flag & ASC_RISC_FLAG_GEN_INT) != 0);
+ host_flag = AscReadLramByte(iop_base, ASCV_HOST_FLAG_B) & (~ASC_HOST_FLAG_ACK_INT);
+ AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B,
+ (uchar) (host_flag | ASC_HOST_FLAG_ACK_INT));
+ AscSetChipStatus(iop_base, CIW_INT_ACK);
+ loop = 0;
+ while (AscGetChipStatus(iop_base) & CSW_INT_PENDING) {
+ AscSetChipStatus(iop_base, CIW_INT_ACK);
+ if (loop++ > 3) {
+ break;
+ }
+ }
+ AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B, host_flag);
+ return;
+}
+
+STATIC void
+AscDisableInterrupt(
+ PortAddr iop_base
+)
+{
+ ushort cfg;
+
+ cfg = AscGetChipCfgLsw(iop_base);
+ AscSetChipCfgLsw(iop_base, cfg & (~ASC_CFG0_HOST_INT_ON));
+ return;
+}
+
+STATIC void
+AscEnableInterrupt(
+ PortAddr iop_base
+)
+{
+ ushort cfg;
+
+ cfg = AscGetChipCfgLsw(iop_base);
+ AscSetChipCfgLsw(iop_base, cfg | ASC_CFG0_HOST_INT_ON);
+ return;
+}
+
+
+
+STATIC void
+AscSetBank(
+ PortAddr iop_base,
+ uchar bank
+)
+{
+ uchar val;
+
+ val = AscGetChipControl(iop_base) &
+ (~(CC_SINGLE_STEP | CC_TEST | CC_DIAG | CC_SCSI_RESET | CC_CHIP_RESET));
+ if (bank == 1) {
+ val |= CC_BANK_ONE;
+ } else if (bank == 2) {
+ val |= CC_DIAG | CC_BANK_ONE;
+ } else {
+ val &= ~CC_BANK_ONE;
+ }
+ AscSetChipControl(iop_base, val);
+ return;
+}
+
+STATIC int
+AscResetChipAndScsiBus(
+ ASC_DVC_VAR *asc_dvc
+)
+{
+ PortAddr iop_base;
+
+ iop_base = asc_dvc->iop_base;
+ while (AscGetChipStatus(iop_base) & CSW_SCSI_RESET_ACTIVE) ;
+ AscStopChip(iop_base);
+ AscSetChipControl(iop_base, CC_CHIP_RESET | CC_SCSI_RESET | CC_HALT);
+ DvcDelayNanoSecond(asc_dvc, 60000);
+ AscSetChipIH(iop_base, INS_RFLAG_WTM);
+ AscSetChipIH(iop_base, INS_HALT);
+ AscSetChipControl(iop_base, CC_CHIP_RESET | CC_HALT);
+ AscSetChipControl(iop_base, CC_HALT);
+ DvcSleepMilliSecond(200);
+ AscSetChipStatus(iop_base, CIW_CLR_SCSI_RESET_INT);
+ AscSetChipStatus(iop_base, 0);
+ return (AscIsChipHalted(iop_base));
+}
+
+ASC_INITFUNC(
+STATIC ulong
+AscGetMaxDmaCount(
+ ushort bus_type
+)
+)
+{
+ if (bus_type & ASC_IS_ISA)
+ return (ASC_MAX_ISA_DMA_COUNT);
+ else if (bus_type & (ASC_IS_EISA | ASC_IS_VL))
+ return (ASC_MAX_VL_DMA_COUNT);
+ return (ASC_MAX_PCI_DMA_COUNT);
+}
+
+ASC_INITFUNC(
+STATIC ushort
+AscGetIsaDmaChannel(
+ PortAddr iop_base
+)
+)
+{
+ ushort channel;
+
+ channel = AscGetChipCfgLsw(iop_base) & 0x0003;
+ if (channel == 0x03)
+ return (0);
+ else if (channel == 0x00)
+ return (7);
+ return (channel + 4);
+}
+
+ASC_INITFUNC(
+STATIC ushort
+AscSetIsaDmaChannel(
+ PortAddr iop_base,
+ ushort dma_channel
+)
+)
+{
+ ushort cfg_lsw;
+ uchar value;
+
+ if ((dma_channel >= 5) && (dma_channel <= 7)) {
+ if (dma_channel == 7)
+ value = 0x00;
+ else
+ value = dma_channel - 4;
+ cfg_lsw = AscGetChipCfgLsw(iop_base) & 0xFFFC;
+ cfg_lsw |= value;
+ AscSetChipCfgLsw(iop_base, cfg_lsw);
+ return (AscGetIsaDmaChannel(iop_base));
+ }
+ return (0);
+}
+
+ASC_INITFUNC(
+STATIC uchar
+AscSetIsaDmaSpeed(
+ PortAddr iop_base,
+ uchar speed_value
+)
+)
+{
+ speed_value &= 0x07;
+ AscSetBank(iop_base, 1);
+ AscWriteChipDmaSpeed(iop_base, speed_value);
+ AscSetBank(iop_base, 0);
+ return (AscGetIsaDmaSpeed(iop_base));
+}
+
+ASC_INITFUNC(
+STATIC uchar
+AscGetIsaDmaSpeed(
+ PortAddr iop_base
+)
+)
+{
+ uchar speed_value;
+
+ AscSetBank(iop_base, 1);
+ speed_value = AscReadChipDmaSpeed(iop_base);
+ speed_value &= 0x07;
+ AscSetBank(iop_base, 0);
+ return (speed_value);
+}
+
+ASC_INITFUNC(
+STATIC ushort
+AscReadPCIConfigWord(
+ ASC_DVC_VAR asc_ptr_type *asc_dvc,
+ ushort pci_config_offset)
+)
+{
+ uchar lsb, msb;
+
+ lsb = DvcReadPCIConfigByte(asc_dvc, pci_config_offset);
+ msb = DvcReadPCIConfigByte(asc_dvc, pci_config_offset + 1);
+ return ((ushort) ((msb << 8) | lsb));
+}
+
+ASC_INITFUNC(
+STATIC ushort
+AscInitGetConfig(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+)
+{
+ ushort warn_code;
+ PortAddr iop_base;
+ ushort PCIDeviceID;
+ ushort PCIVendorID;
+ uchar PCIRevisionID;
+ uchar prevCmdRegBits;
+
+ warn_code = 0;
+ iop_base = asc_dvc->iop_base;
+ asc_dvc->init_state = ASC_INIT_STATE_BEG_GET_CFG;
+ if (asc_dvc->err_code != 0) {
+ return (UW_ERR);
+ }
+ if (asc_dvc->bus_type == ASC_IS_PCI) {
+ PCIVendorID = AscReadPCIConfigWord(asc_dvc,
+ AscPCIConfigVendorIDRegister);
+
+ PCIDeviceID = AscReadPCIConfigWord(asc_dvc,
+ AscPCIConfigDeviceIDRegister);
+
+ PCIRevisionID = DvcReadPCIConfigByte(asc_dvc,
+ AscPCIConfigRevisionIDRegister);
+
+ if (PCIVendorID != ASC_PCI_VENDORID) {
+ warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE;
+ }
+ prevCmdRegBits = DvcReadPCIConfigByte(asc_dvc,
+ AscPCIConfigCommandRegister);
+
+ if ((prevCmdRegBits & AscPCICmdRegBits_IOMemBusMaster) !=
+ AscPCICmdRegBits_IOMemBusMaster) {
+ DvcWritePCIConfigByte(asc_dvc,
+ AscPCIConfigCommandRegister,
+ (prevCmdRegBits |
+ AscPCICmdRegBits_IOMemBusMaster));
+
+ if ((DvcReadPCIConfigByte(asc_dvc,
+ AscPCIConfigCommandRegister)
+ & AscPCICmdRegBits_IOMemBusMaster)
+ != AscPCICmdRegBits_IOMemBusMaster) {
+ warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE;
+ }
+ }
+ if ((PCIDeviceID == ASC_PCI_DEVICEID_1200A) ||
+ (PCIDeviceID == ASC_PCI_DEVICEID_1200B)) {
+ DvcWritePCIConfigByte(asc_dvc,
+ AscPCIConfigLatencyTimer, 0x00);
+ if (DvcReadPCIConfigByte(asc_dvc, AscPCIConfigLatencyTimer)
+ != 0x00) {
+ warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE;
+ }
+ } else if (PCIDeviceID == ASC_PCI_DEVICEID_ULTRA) {
+ if (DvcReadPCIConfigByte(asc_dvc,
+ AscPCIConfigLatencyTimer) < 0x20) {
+ DvcWritePCIConfigByte(asc_dvc,
+ AscPCIConfigLatencyTimer, 0x20);
+
+ if (DvcReadPCIConfigByte(asc_dvc,
+ AscPCIConfigLatencyTimer) < 0x20) {
+ warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE;
+ }
+ }
+ }
+ }
+
+ if (AscFindSignature(iop_base)) {
+ warn_code |= AscInitAscDvcVar(asc_dvc);
+ warn_code |= AscInitFromEEP(asc_dvc);
+ asc_dvc->init_state |= ASC_INIT_STATE_END_GET_CFG;
+ if (asc_dvc->scsi_reset_wait > ASC_MAX_SCSI_RESET_WAIT) {
+ asc_dvc->scsi_reset_wait = ASC_MAX_SCSI_RESET_WAIT;
+ }
+ } else {
+ asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE;
+ }
+ return(warn_code);
+}
+
+ASC_INITFUNC(
+STATIC ushort
+AscInitSetConfig(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+)
+{
+ ushort warn_code = 0;
+
+ asc_dvc->init_state |= ASC_INIT_STATE_BEG_SET_CFG;
+ if (asc_dvc->err_code != 0)
+ return (UW_ERR);
+ if (AscFindSignature(asc_dvc->iop_base)) {
+ warn_code |= AscInitFromAscDvcVar(asc_dvc);
+ asc_dvc->init_state |= ASC_INIT_STATE_END_SET_CFG;
+ } else {
+ asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE;
+ }
+ return (warn_code);
+}
+
+ASC_INITFUNC(
+STATIC ushort
+AscInitFromAscDvcVar(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+)
+{
+ PortAddr iop_base;
+ ushort cfg_msw;
+ ushort warn_code;
+ ushort pci_device_id;
+
+ iop_base = asc_dvc->iop_base;
+ pci_device_id = asc_dvc->cfg->pci_device_id;
+ warn_code = 0;
+ cfg_msw = AscGetChipCfgMsw(iop_base);
+ if ((cfg_msw & ASC_CFG_MSW_CLR_MASK) != 0) {
+ cfg_msw &= (~(ASC_CFG_MSW_CLR_MASK));
+ warn_code |= ASC_WARN_CFG_MSW_RECOVER;
+ AscSetChipCfgMsw(iop_base, cfg_msw);
+ }
+ if ((asc_dvc->cfg->cmd_qng_enabled & asc_dvc->cfg->disc_enable) !=
+ asc_dvc->cfg->cmd_qng_enabled) {
+ asc_dvc->cfg->disc_enable = asc_dvc->cfg->cmd_qng_enabled;
+ warn_code |= ASC_WARN_CMD_QNG_CONFLICT;
+ }
+ if (AscGetChipStatus(iop_base) & CSW_AUTO_CONFIG) {
+ warn_code |= ASC_WARN_AUTO_CONFIG;
+ }
+ if ((asc_dvc->bus_type & (ASC_IS_ISA | ASC_IS_VL)) != 0) {
+ if (AscSetChipIRQ(iop_base, asc_dvc->irq_no, asc_dvc->bus_type)
+ != asc_dvc->irq_no) {
+ asc_dvc->err_code |= ASC_IERR_SET_IRQ_NO;
+ }
+ }
+ if (asc_dvc->bus_type & ASC_IS_PCI) {
+ cfg_msw &= 0xFFC0;
+ AscSetChipCfgMsw(iop_base, cfg_msw);
+ if ((asc_dvc->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA) {
+ } else {
+ if ((pci_device_id == ASC_PCI_DEVICE_ID_REV_A) ||
+ (pci_device_id == ASC_PCI_DEVICE_ID_REV_B)) {
+ asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_IF_NOT_DWB;
+ asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_ASYN_USE_SYN;
+ }
+ }
+ } else if (asc_dvc->bus_type == ASC_IS_ISAPNP) {
+ if (AscGetChipVersion(iop_base, asc_dvc->bus_type)
+ == ASC_CHIP_VER_ASYN_BUG) {
+ asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_ASYN_USE_SYN;
+ }
+ }
+ if (AscSetChipScsiID(iop_base, asc_dvc->cfg->chip_scsi_id) !=
+ asc_dvc->cfg->chip_scsi_id) {
+ asc_dvc->err_code |= ASC_IERR_SET_SCSI_ID;
+ }
+ if (asc_dvc->bus_type & ASC_IS_ISA) {
+ AscSetIsaDmaChannel(iop_base, asc_dvc->cfg->isa_dma_channel);
+ AscSetIsaDmaSpeed(iop_base, asc_dvc->cfg->isa_dma_speed);
+ }
+ return (warn_code);
+}
+
+ASC_INITFUNC(
+STATIC ushort
+AscInitAsc1000Driver(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+)
+{
+ ushort warn_code;
+ PortAddr iop_base;
+ extern ushort _asc_mcode_size;
+ extern ulong _asc_mcode_chksum;
+ extern uchar _asc_mcode_buf[];
+
+ iop_base = asc_dvc->iop_base;
+ warn_code = 0;
+ if ((asc_dvc->dvc_cntl & ASC_CNTL_RESET_SCSI) &&
+ !(asc_dvc->init_state & ASC_INIT_RESET_SCSI_DONE)) {
+ AscResetChipAndScsiBus(asc_dvc);
+ DvcSleepMilliSecond((ulong) ((ushort) asc_dvc->scsi_reset_wait * 1000));
+ }
+ asc_dvc->init_state |= ASC_INIT_STATE_BEG_LOAD_MC;
+ if (asc_dvc->err_code != 0)
+ return (UW_ERR);
+ if (!AscFindSignature(asc_dvc->iop_base)) {
+ asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE;
+ return (warn_code);
+ }
+ AscDisableInterrupt(iop_base);
+ warn_code |= AscInitLram(asc_dvc);
+ if (asc_dvc->err_code != 0)
+ return (UW_ERR);
+ if (AscLoadMicroCode(iop_base, 0, (ushort *) _asc_mcode_buf,
+ _asc_mcode_size) != _asc_mcode_chksum) {
+ asc_dvc->err_code |= ASC_IERR_MCODE_CHKSUM;
+ return (warn_code);
+ }
+ warn_code |= AscInitMicroCodeVar(asc_dvc);
+ asc_dvc->init_state |= ASC_INIT_STATE_END_LOAD_MC;
+ AscEnableInterrupt(iop_base);
+ return (warn_code);
+}
+
+ASC_INITFUNC(
+STATIC ushort
+AscInitAscDvcVar(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+)
+{
+ int i;
+ PortAddr iop_base;
+ ushort warn_code;
+ uchar chip_version;
+
+ iop_base = asc_dvc->iop_base;
+ warn_code = 0;
+ asc_dvc->err_code = 0;
+ if ((asc_dvc->bus_type &
+ (ASC_IS_ISA | ASC_IS_PCI | ASC_IS_EISA | ASC_IS_VL)) == 0) {
+ asc_dvc->err_code |= ASC_IERR_NO_BUS_TYPE;
+ }
+ AscSetChipControl(iop_base, CC_HALT);
+ AscSetChipStatus(iop_base, 0);
+ asc_dvc->bug_fix_cntl = 0;
+ asc_dvc->pci_fix_asyn_xfer = 0;
+ asc_dvc->pci_fix_asyn_xfer_always = 0;
+ asc_dvc->init_state = 0;
+ asc_dvc->sdtr_done = 0;
+ asc_dvc->cur_total_qng = 0;
+ asc_dvc->is_in_int = 0;
+ asc_dvc->in_critical_cnt = 0;
+ asc_dvc->last_q_shortage = 0;
+ asc_dvc->use_tagged_qng = 0;
+ asc_dvc->no_scam = 0;
+ asc_dvc->unit_not_ready = 0;
+ asc_dvc->queue_full_or_busy = 0;
+ asc_dvc->redo_scam = 0 ;
+ asc_dvc->res2 = 0 ;
+ asc_dvc->host_init_sdtr_index = 0 ;
+ asc_dvc->res7 = 0 ;
+ asc_dvc->res8 = 0 ;
+ asc_dvc->cfg->can_tagged_qng = 0 ;
+ asc_dvc->cfg->cmd_qng_enabled = 0;
+ asc_dvc->dvc_cntl = ASC_DEF_DVC_CNTL;
+ asc_dvc->init_sdtr = 0;
+ asc_dvc->max_total_qng = ASC_DEF_MAX_TOTAL_QNG;
+ asc_dvc->scsi_reset_wait = 3;
+ asc_dvc->start_motor = ASC_SCSI_WIDTH_BIT_SET;
+ asc_dvc->max_dma_count = AscGetMaxDmaCount(asc_dvc->bus_type);
+ asc_dvc->cfg->sdtr_enable = ASC_SCSI_WIDTH_BIT_SET;
+ asc_dvc->cfg->disc_enable = ASC_SCSI_WIDTH_BIT_SET;
+ asc_dvc->cfg->chip_scsi_id = ASC_DEF_CHIP_SCSI_ID;
+ asc_dvc->cfg->lib_serial_no = ASC_LIB_SERIAL_NUMBER;
+ asc_dvc->cfg->lib_version = (ASC_LIB_VERSION_MAJOR << 8) |
+ ASC_LIB_VERSION_MINOR;
+ chip_version = AscGetChipVersion(iop_base, asc_dvc->bus_type);
+ asc_dvc->cfg->chip_version = chip_version;
+ asc_dvc->sdtr_period_tbl[0] = SYN_XFER_NS_0;
+ asc_dvc->sdtr_period_tbl[1] = SYN_XFER_NS_1;
+ asc_dvc->sdtr_period_tbl[2] = SYN_XFER_NS_2;
+ asc_dvc->sdtr_period_tbl[3] = SYN_XFER_NS_3;
+ asc_dvc->sdtr_period_tbl[4] = SYN_XFER_NS_4;
+ asc_dvc->sdtr_period_tbl[5] = SYN_XFER_NS_5;
+ asc_dvc->sdtr_period_tbl[6] = SYN_XFER_NS_6;
+ asc_dvc->sdtr_period_tbl[7] = SYN_XFER_NS_7;
+ asc_dvc->max_sdtr_index = 7;
+ if ((asc_dvc->bus_type & ASC_IS_PCI) &&
+ (chip_version >= ASC_CHIP_VER_PCI_ULTRA_3150)) {
+ asc_dvc->bus_type = ASC_IS_PCI_ULTRA;
+ asc_dvc->sdtr_period_tbl[0] = SYN_ULTRA_XFER_NS_0;
+ asc_dvc->sdtr_period_tbl[1] = SYN_ULTRA_XFER_NS_1;
+ asc_dvc->sdtr_period_tbl[2] = SYN_ULTRA_XFER_NS_2;
+ asc_dvc->sdtr_period_tbl[3] = SYN_ULTRA_XFER_NS_3;
+ asc_dvc->sdtr_period_tbl[4] = SYN_ULTRA_XFER_NS_4;
+ asc_dvc->sdtr_period_tbl[5] = SYN_ULTRA_XFER_NS_5;
+ asc_dvc->sdtr_period_tbl[6] = SYN_ULTRA_XFER_NS_6;
+ asc_dvc->sdtr_period_tbl[7] = SYN_ULTRA_XFER_NS_7;
+ asc_dvc->sdtr_period_tbl[8] = SYN_ULTRA_XFER_NS_8;
+ asc_dvc->sdtr_period_tbl[9] = SYN_ULTRA_XFER_NS_9;
+ asc_dvc->sdtr_period_tbl[10] = SYN_ULTRA_XFER_NS_10;
+ asc_dvc->sdtr_period_tbl[11] = SYN_ULTRA_XFER_NS_11;
+ asc_dvc->sdtr_period_tbl[12] = SYN_ULTRA_XFER_NS_12;
+ asc_dvc->sdtr_period_tbl[13] = SYN_ULTRA_XFER_NS_13;
+ asc_dvc->sdtr_period_tbl[14] = SYN_ULTRA_XFER_NS_14;
+ asc_dvc->sdtr_period_tbl[15] = SYN_ULTRA_XFER_NS_15;
+ asc_dvc->max_sdtr_index = 15;
+ if (chip_version == ASC_CHIP_VER_PCI_ULTRA_3150)
+ {
+ AscSetExtraControl(iop_base,
+ (SEC_ACTIVE_NEGATE | SEC_SLEW_RATE));
+ } else if (chip_version >= ASC_CHIP_VER_PCI_ULTRA_3050) {
+ AscSetExtraControl(iop_base,
+ (SEC_ACTIVE_NEGATE | SEC_ENABLE_FILTER));
+ }
+ }
+ if (asc_dvc->bus_type == ASC_IS_PCI) {
+ AscSetExtraControl(iop_base, (SEC_ACTIVE_NEGATE | SEC_SLEW_RATE));
+ }
+
+ asc_dvc->cfg->isa_dma_speed = ASC_DEF_ISA_DMA_SPEED;
+ if (AscGetChipBusType(iop_base) == ASC_IS_ISAPNP) {
+ AscSetChipIFC(iop_base, IFC_INIT_DEFAULT);
+ asc_dvc->bus_type = ASC_IS_ISAPNP;
+ }
+ if ((asc_dvc->bus_type & ASC_IS_ISA) != 0) {
+ asc_dvc->cfg->isa_dma_channel = (uchar) AscGetIsaDmaChannel(iop_base);
+ }
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ asc_dvc->cur_dvc_qng[i] = 0;
+ asc_dvc->max_dvc_qng[i] = ASC_MAX_SCSI1_QNG;
+ asc_dvc->scsiq_busy_head[i] = (ASC_SCSI_Q *) 0L;
+ asc_dvc->scsiq_busy_tail[i] = (ASC_SCSI_Q *) 0L;
+ asc_dvc->cfg->max_tag_qng[i] = ASC_MAX_INRAM_TAG_QNG;
+ }
+ return (warn_code);
+}
+
+ASC_INITFUNC(
+STATIC ushort
+AscInitFromEEP(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+)
+{
+ ASCEEP_CONFIG eep_config_buf;
+ ASCEEP_CONFIG *eep_config;
+ PortAddr iop_base;
+ ushort chksum;
+ ushort warn_code;
+ ushort cfg_msw, cfg_lsw;
+ int i;
+ int write_eep = 0;
+
+ iop_base = asc_dvc->iop_base;
+ warn_code = 0;
+ AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0x00FE);
+ AscStopQueueExe(iop_base);
+ if ((AscStopChip(iop_base) == FALSE) ||
+ (AscGetChipScsiCtrl(iop_base) != 0)) {
+ asc_dvc->init_state |= ASC_INIT_RESET_SCSI_DONE;
+ AscResetChipAndScsiBus(asc_dvc);
+ DvcSleepMilliSecond((ulong) ((ushort) asc_dvc->scsi_reset_wait * 1000));
+ }
+ if (AscIsChipHalted(iop_base) == FALSE) {
+ asc_dvc->err_code |= ASC_IERR_START_STOP_CHIP;
+ return (warn_code);
+ }
+ AscSetPCAddr(iop_base, ASC_MCODE_START_ADDR);
+ if (AscGetPCAddr(iop_base) != ASC_MCODE_START_ADDR) {
+ asc_dvc->err_code |= ASC_IERR_SET_PC_ADDR;
+ return (warn_code);
+ }
+ eep_config = (ASCEEP_CONFIG *) & eep_config_buf;
+ cfg_msw = AscGetChipCfgMsw(iop_base);
+ cfg_lsw = AscGetChipCfgLsw(iop_base);
+ if ((cfg_msw & ASC_CFG_MSW_CLR_MASK) != 0) {
+ cfg_msw &= (~(ASC_CFG_MSW_CLR_MASK));
+ warn_code |= ASC_WARN_CFG_MSW_RECOVER;
+ AscSetChipCfgMsw(iop_base, cfg_msw);
+ }
+ chksum = AscGetEEPConfig(iop_base, eep_config, asc_dvc->bus_type);
+ if (chksum == 0) {
+ chksum = 0xaa55;
+ }
+ if (AscGetChipStatus(iop_base) & CSW_AUTO_CONFIG) {
+ warn_code |= ASC_WARN_AUTO_CONFIG;
+ if (asc_dvc->cfg->chip_version == 3) {
+ if (eep_config->cfg_lsw != cfg_lsw) {
+ warn_code |= ASC_WARN_EEPROM_RECOVER;
+ eep_config->cfg_lsw = AscGetChipCfgLsw(iop_base);
+ }
+ if (eep_config->cfg_msw != cfg_msw) {
+ warn_code |= ASC_WARN_EEPROM_RECOVER;
+ eep_config->cfg_msw = AscGetChipCfgMsw(iop_base);
+ }
+ }
+ }
+ eep_config->cfg_msw &= ~ASC_CFG_MSW_CLR_MASK;
+ eep_config->cfg_lsw |= ASC_CFG0_HOST_INT_ON;
+ if (chksum != eep_config->chksum) {
+ if (AscGetChipVersion(iop_base, asc_dvc->bus_type) ==
+ ASC_CHIP_VER_PCI_ULTRA_3050 )
+ {
+ eep_config->init_sdtr = 0xFF;
+ eep_config->disc_enable = 0xFF;
+ eep_config->start_motor = 0xFF;
+ eep_config->use_cmd_qng = 0;
+ eep_config->max_total_qng = 0xF0;
+ eep_config->max_tag_qng = 0x20;
+ eep_config->cntl = 0xBFFF;
+ eep_config->chip_scsi_id = 7;
+ eep_config->no_scam = 0;
+ eep_config->adapter_info[0] = 0;
+ eep_config->adapter_info[1] = 0;
+ eep_config->adapter_info[2] = 0;
+ eep_config->adapter_info[3] = 0;
+ eep_config->adapter_info[4] = 0;
+ /* Indicate EEPROM-less board. */
+ eep_config->adapter_info[5] = 0xBB;
+ } else {
+ write_eep = 1 ;
+ warn_code |= ASC_WARN_EEPROM_CHKSUM ;
+ }
+ }
+ asc_dvc->cfg->sdtr_enable = eep_config->init_sdtr ;
+ asc_dvc->cfg->disc_enable = eep_config->disc_enable;
+ asc_dvc->cfg->cmd_qng_enabled = eep_config->use_cmd_qng;
+ asc_dvc->cfg->isa_dma_speed = eep_config->isa_dma_speed;
+ asc_dvc->start_motor = eep_config->start_motor;
+ asc_dvc->dvc_cntl = eep_config->cntl;
+ asc_dvc->no_scam = eep_config->no_scam;
+ asc_dvc->cfg->adapter_info[0] = eep_config->adapter_info[0];
+ asc_dvc->cfg->adapter_info[1] = eep_config->adapter_info[1];
+ asc_dvc->cfg->adapter_info[2] = eep_config->adapter_info[2];
+ asc_dvc->cfg->adapter_info[3] = eep_config->adapter_info[3];
+ asc_dvc->cfg->adapter_info[4] = eep_config->adapter_info[4];
+ asc_dvc->cfg->adapter_info[5] = eep_config->adapter_info[5];
+ if (!AscTestExternalLram(asc_dvc)) {
+ if (((asc_dvc->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA)) {
+ eep_config->max_total_qng = ASC_MAX_PCI_ULTRA_INRAM_TOTAL_QNG;
+ eep_config->max_tag_qng = ASC_MAX_PCI_ULTRA_INRAM_TAG_QNG;
+ } else {
+ eep_config->cfg_msw |= 0x0800;
+ cfg_msw |= 0x0800;
+ AscSetChipCfgMsw(iop_base, cfg_msw);
+ eep_config->max_total_qng = ASC_MAX_PCI_INRAM_TOTAL_QNG;
+ eep_config->max_tag_qng = ASC_MAX_INRAM_TAG_QNG;
+ }
+ } else {
+ }
+ if (eep_config->max_total_qng < ASC_MIN_TOTAL_QNG) {
+ eep_config->max_total_qng = ASC_MIN_TOTAL_QNG;
+ }
+ if (eep_config->max_total_qng > ASC_MAX_TOTAL_QNG) {
+ eep_config->max_total_qng = ASC_MAX_TOTAL_QNG;
+ }
+ if (eep_config->max_tag_qng > eep_config->max_total_qng) {
+ eep_config->max_tag_qng = eep_config->max_total_qng;
+ }
+ if (eep_config->max_tag_qng < ASC_MIN_TAG_Q_PER_DVC) {
+ eep_config->max_tag_qng = ASC_MIN_TAG_Q_PER_DVC;
+ }
+ asc_dvc->max_total_qng = eep_config->max_total_qng;
+ if ((eep_config->use_cmd_qng & eep_config->disc_enable) !=
+ eep_config->use_cmd_qng) {
+ eep_config->disc_enable = eep_config->use_cmd_qng;
+ warn_code |= ASC_WARN_CMD_QNG_CONFLICT;
+ }
+ if (asc_dvc->bus_type & (ASC_IS_ISA | ASC_IS_VL | ASC_IS_EISA)) {
+ asc_dvc->irq_no = AscGetChipIRQ(iop_base, asc_dvc->bus_type);
+ }
+ eep_config->chip_scsi_id &= ASC_MAX_TID;
+ asc_dvc->cfg->chip_scsi_id = eep_config->chip_scsi_id;
+ if (((asc_dvc->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA) &&
+ !(asc_dvc->dvc_cntl & ASC_CNTL_SDTR_ENABLE_ULTRA)) {
+ asc_dvc->host_init_sdtr_index = ASC_SDTR_ULTRA_PCI_10MB_INDEX;
+ }
+
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ asc_dvc->dos_int13_table[i] = eep_config->dos_int13_table[i];
+ asc_dvc->cfg->max_tag_qng[i] = eep_config->max_tag_qng;
+ asc_dvc->cfg->sdtr_period_offset[i] =
+ (uchar) (ASC_DEF_SDTR_OFFSET |
+ (asc_dvc->host_init_sdtr_index << 4));
+ }
+ eep_config->cfg_msw = AscGetChipCfgMsw(iop_base);
+ if (write_eep) {
+ (void) AscSetEEPConfig(iop_base, eep_config, asc_dvc->bus_type);
+ }
+ return (warn_code);
+}
+
+ASC_INITFUNC(
+STATIC ushort
+AscInitMicroCodeVar(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+)
+{
+ int i;
+ ushort warn_code;
+ PortAddr iop_base;
+ ulong phy_addr;
+
+ iop_base = asc_dvc->iop_base;
+ warn_code = 0;
+ for (i = 0; i <= ASC_MAX_TID; i++) {
+ AscPutMCodeInitSDTRAtID(iop_base, i,
+ asc_dvc->cfg->sdtr_period_offset[i]
+);
+ }
+ AscInitQLinkVar(asc_dvc);
+ AscWriteLramByte(iop_base, ASCV_DISC_ENABLE_B,
+ asc_dvc->cfg->disc_enable);
+ AscWriteLramByte(iop_base, ASCV_HOSTSCSI_ID_B,
+ ASC_TID_TO_TARGET_ID(asc_dvc->cfg->chip_scsi_id));
+ if ((phy_addr = AscGetOnePhyAddr(asc_dvc,
+ (uchar *) asc_dvc->cfg->overrun_buf,
+ ASC_OVERRUN_BSIZE)) == 0L) {
+ asc_dvc->err_code |= ASC_IERR_GET_PHY_ADDR;
+ } else {
+ phy_addr = (phy_addr & 0xFFFFFFF8UL) + 8;
+ AscWriteLramDWord(iop_base, ASCV_OVERRUN_PADDR_D, phy_addr);
+ AscWriteLramDWord(iop_base, ASCV_OVERRUN_BSIZE_D,
+ ASC_OVERRUN_BSIZE - 8);
+ }
+ asc_dvc->cfg->mcode_date = AscReadLramWord(iop_base,
+ (ushort) ASCV_MC_DATE_W);
+ asc_dvc->cfg->mcode_version = AscReadLramWord(iop_base,
+ (ushort) ASCV_MC_VER_W);
+ AscSetPCAddr(iop_base, ASC_MCODE_START_ADDR);
+ if (AscGetPCAddr(iop_base) != ASC_MCODE_START_ADDR) {
+ asc_dvc->err_code |= ASC_IERR_SET_PC_ADDR;
+ return (warn_code);
+ }
+ if (AscStartChip(iop_base) != 1) {
+ asc_dvc->err_code |= ASC_IERR_START_STOP_CHIP;
+ return (warn_code);
+ }
+ return (warn_code);
+}
+
+ASC_INITFUNC(
+STATIC int
+AscTestExternalLram(
+ ASC_DVC_VAR asc_ptr_type * asc_dvc
+)
+)
+{
+ PortAddr iop_base;
+ ushort q_addr;
+ ushort saved_word;
+ int sta;
+
+ iop_base = asc_dvc->iop_base;
+ sta = 0;
+ q_addr = ASC_QNO_TO_QADDR(241);
+ saved_word = AscReadLramWord(iop_base, q_addr);
+ AscSetChipLramAddr(iop_base, q_addr);
+ AscSetChipLramData(iop_base, 0x55AA);
+ DvcSleepMilliSecond(10);
+ AscSetChipLramAddr(iop_base, q_addr);
+ if (AscGetChipLramData(iop_base) == 0x55AA) {
+ sta = 1;
+ AscWriteLramWord(iop_base, q_addr, saved_word);
+ }
+ return (sta);
+}
+
+ASC_INITFUNC(
+STATIC int
+AscWriteEEPCmdReg(
+ PortAddr iop_base,
+ uchar cmd_reg
+)
+)
+{
+ uchar read_back;
+ int retry;
+
+ retry = 0;
+ while (TRUE) {
+ AscSetChipEEPCmd(iop_base, cmd_reg);
+ DvcSleepMilliSecond(1);
+ read_back = AscGetChipEEPCmd(iop_base);
+ if (read_back == cmd_reg) {
+ return (1);
+ }
+ if (retry++ > ASC_EEP_MAX_RETRY) {
+ return (0);
+ }
+ }
+}
+
+ASC_INITFUNC(
+STATIC int
+AscWriteEEPDataReg(
+ PortAddr iop_base,
+ ushort data_reg
+)
+)
+{
+ ushort read_back;
+ int retry;
+
+ retry = 0;
+ while (TRUE) {
+ AscSetChipEEPData(iop_base, data_reg);
+ DvcSleepMilliSecond(1);
+ read_back = AscGetChipEEPData(iop_base);
+ if (read_back == data_reg) {
+ return (1);
+ }
+ if (retry++ > ASC_EEP_MAX_RETRY) {
+ return (0);
+ }
+ }
+}
+
+ASC_INITFUNC(
+STATIC void
+AscWaitEEPRead(
+ void
+)
+)
+{
+ DvcSleepMilliSecond(1);
+ return;
+}
+
+ASC_INITFUNC(
+STATIC void
+AscWaitEEPWrite(
+ void
+)
+)
+{
+ DvcSleepMilliSecond(20);
+ return;
+}
+
+ASC_INITFUNC(
+STATIC ushort
+AscReadEEPWord(
+ PortAddr iop_base,
+ uchar addr
+)
+)
+{
+ ushort read_wval;
+ uchar cmd_reg;
+
+ AscWriteEEPCmdReg(iop_base, ASC_EEP_CMD_WRITE_DISABLE);
+ AscWaitEEPRead();
+ cmd_reg = addr | ASC_EEP_CMD_READ;
+ AscWriteEEPCmdReg(iop_base, cmd_reg);
+ AscWaitEEPRead();
+ read_wval = AscGetChipEEPData(iop_base);
+ AscWaitEEPRead();
+ return (read_wval);
+}
+
+ASC_INITFUNC(
+STATIC ushort
+AscWriteEEPWord(
+ PortAddr iop_base,
+ uchar addr,
+ ushort word_val
+)
+)
+{
+ ushort read_wval;
+
+ read_wval = AscReadEEPWord(iop_base, addr);
+ if (read_wval != word_val) {
+ AscWriteEEPCmdReg(iop_base, ASC_EEP_CMD_WRITE_ABLE);
+ AscWaitEEPRead();
+ AscWriteEEPDataReg(iop_base, word_val);
+ AscWaitEEPRead();
+ AscWriteEEPCmdReg(iop_base,
+ (uchar) ((uchar) ASC_EEP_CMD_WRITE | addr));
+ AscWaitEEPWrite();
+ AscWriteEEPCmdReg(iop_base, ASC_EEP_CMD_WRITE_DISABLE);
+ AscWaitEEPRead();
+ return (AscReadEEPWord(iop_base, addr));
+ }
+ return (read_wval);
+}
+
+ASC_INITFUNC(
+STATIC ushort
+AscGetEEPConfig(
+ PortAddr iop_base,
+ ASCEEP_CONFIG * cfg_buf, ushort bus_type
+)
+)
+{
+ ushort wval;
+ ushort sum;
+ ushort *wbuf;
+ int cfg_beg;
+ int cfg_end;
+ int s_addr;
+ int isa_pnp_wsize;
+
+ wbuf = (ushort *) cfg_buf;
+ sum = 0;
+ isa_pnp_wsize = 0;
+ for (s_addr = 0; s_addr < (2 + isa_pnp_wsize); s_addr++, wbuf++) {
+ wval = AscReadEEPWord(iop_base, (uchar) s_addr);
+ sum += wval;
+ *wbuf = wval;
+ }
+ if (bus_type & ASC_IS_VL) {
+ cfg_beg = ASC_EEP_DVC_CFG_BEG_VL;
+ cfg_end = ASC_EEP_MAX_DVC_ADDR_VL;
+ } else {
+ cfg_beg = ASC_EEP_DVC_CFG_BEG;
+ cfg_end = ASC_EEP_MAX_DVC_ADDR;
+ }
+ for (s_addr = cfg_beg; s_addr <= (cfg_end - 1);
+ s_addr++, wbuf++) {
+ wval = AscReadEEPWord(iop_base, (uchar) s_addr);
+ sum += wval;
+ *wbuf = wval;
+ }
+ *wbuf = AscReadEEPWord(iop_base, (uchar) s_addr);
+ return (sum);
+}
+
+ASC_INITFUNC(
+STATIC int
+AscSetEEPConfigOnce(
+ PortAddr iop_base,
+ ASCEEP_CONFIG * cfg_buf, ushort bus_type
+)
+)
+{
+ int n_error;
+ ushort *wbuf;
+ ushort sum;
+ int s_addr;
+ int cfg_beg;
+ int cfg_end;
+
+ wbuf = (ushort *) cfg_buf;
+ n_error = 0;
+ sum = 0;
+ for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
+ sum += *wbuf;
+ if (*wbuf != AscWriteEEPWord(iop_base, (uchar) s_addr, *wbuf)) {
+ n_error++;
+ }
+ }
+ if (bus_type & ASC_IS_VL) {
+ cfg_beg = ASC_EEP_DVC_CFG_BEG_VL;
+ cfg_end = ASC_EEP_MAX_DVC_ADDR_VL;
+ } else {
+ cfg_beg = ASC_EEP_DVC_CFG_BEG;
+ cfg_end = ASC_EEP_MAX_DVC_ADDR;
+ }
+ for (s_addr = cfg_beg; s_addr <= (cfg_end - 1);
+ s_addr++, wbuf++) {
+ sum += *wbuf;
+ if (*wbuf != AscWriteEEPWord(iop_base, (uchar) s_addr, *wbuf)) {
+ n_error++;
+ }
+ }
+ *wbuf = sum;
+ if (sum != AscWriteEEPWord(iop_base, (uchar) s_addr, sum)) {
+ n_error++;
+ }
+ wbuf = (ushort *) cfg_buf;
+ for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
+ if (*wbuf != AscReadEEPWord(iop_base, (uchar) s_addr)) {
+ n_error++;
+ }
+ }
+ for (s_addr = cfg_beg; s_addr <= cfg_end;
+ s_addr++, wbuf++) {
+ if (*wbuf != AscReadEEPWord(iop_base, (uchar) s_addr)) {
+ n_error++;
+ }
+ }
+ return (n_error);
+}
+
+ASC_INITFUNC(
+STATIC int
+AscSetEEPConfig(
+ PortAddr iop_base,
+ ASCEEP_CONFIG * cfg_buf, ushort bus_type
+)
+)
+{
+ int retry;
+ int n_error;
+
+ retry = 0;
+ while (TRUE) {
+ if ((n_error = AscSetEEPConfigOnce(iop_base, cfg_buf,
+ bus_type)) == 0) {
+ break;
+ }
+ if (++retry > ASC_EEP_MAX_RETRY) {
+ break;
+ }
+ }
+ return (n_error);
+}
+
+STATIC void
+AscAsyncFix(
+ ASC_DVC_VAR asc_ptr_type *asc_dvc,
+ uchar tid_no,
+ ASC_SCSI_INQUIRY *inq)
+{
+ uchar dvc_type;
+ ASC_SCSI_BIT_ID_TYPE tid_bits;
+
+ dvc_type = inq->byte0.peri_dvc_type;
+ tid_bits = ASC_TIX_TO_TARGET_ID(tid_no);
+
+ if (asc_dvc->bug_fix_cntl & ASC_BUG_FIX_ASYN_USE_SYN) {
+ if (!(asc_dvc->init_sdtr & tid_bits)) {
+ if ((dvc_type == SCSI_TYPE_CDROM) &&
+ (AscCompareString((uchar *) inq->vendor_id,
+ (uchar *) "HP ", 3) == 0)) {
+ asc_dvc->pci_fix_asyn_xfer_always |= tid_bits;
+ }
+ asc_dvc->pci_fix_asyn_xfer |= tid_bits;
+ if ((dvc_type == SCSI_TYPE_PROC) ||
+ (dvc_type == SCSI_TYPE_SCANNER)) {
+ asc_dvc->pci_fix_asyn_xfer &= ~tid_bits;
+ }
+ if ((dvc_type == SCSI_TYPE_SASD) &&
+ (AscCompareString((uchar *) inq->vendor_id,
+ (uchar *) "TANDBERG", 8) == 0) &&
+ (AscCompareString((uchar *) inq->product_id,
+ (uchar *) " TDC 36", 7) == 0)) {
+ asc_dvc->pci_fix_asyn_xfer &= ~tid_bits;
+ }
+ if ((dvc_type == SCSI_TYPE_SASD) &&
+ (AscCompareString((uchar *) inq->vendor_id,
+ (uchar *) "WANGTEK ", 8) == 0)) {
+ asc_dvc->pci_fix_asyn_xfer &= ~tid_bits;
+ }
+
+ if ((dvc_type == SCSI_TYPE_CDROM) &&
+ (AscCompareString((uchar *) inq->vendor_id,
+ (uchar *) "NEC ", 8) == 0) &&
+ (AscCompareString((uchar *) inq->product_id,
+ (uchar *) "CD-ROM DRIVE ", 16) == 0)) {
+ asc_dvc->pci_fix_asyn_xfer &= ~tid_bits;
+ }
+
+ if ((dvc_type == SCSI_TYPE_CDROM) &&
+ (AscCompareString((uchar *) inq->vendor_id,
+ (uchar *) "YAMAHA", 6) == 0) &&
+ (AscCompareString((uchar *) inq->product_id,
+ (uchar *) "CDR400", 6) == 0)) {
+ asc_dvc->pci_fix_asyn_xfer &= ~tid_bits;
+ }
+ if (asc_dvc->pci_fix_asyn_xfer & tid_bits) {
+ AscSetRunChipSynRegAtID(asc_dvc->iop_base, tid_no,
+ ASYN_SDTR_DATA_FIX_PCI_REV_AB);
+ }
+ }
+ }
+ return;
+}
+
+STATIC int
+AscTagQueuingSafe(ASC_SCSI_INQUIRY *inq)
+{
+ if ((inq->add_len >= 32) &&
+ (AscCompareString((uchar *) inq->vendor_id,
+ (uchar *) "QUANTUM XP34301", 15) == 0) &&
+ (AscCompareString((uchar *) inq->product_rev_level,
+ (uchar *) "1071", 4) == 0))
+ {
+ return 0;
+ }
+ return 1;
+}
+
+STATIC void
+AscInquiryHandling(ASC_DVC_VAR asc_ptr_type *asc_dvc,
+ uchar tid_no, ASC_SCSI_INQUIRY *inq)
+{
+ ASC_SCSI_BIT_ID_TYPE tid_bit = ASC_TIX_TO_TARGET_ID(tid_no);
+ ASC_SCSI_BIT_ID_TYPE orig_init_sdtr, orig_use_tagged_qng;
+
+ orig_init_sdtr = asc_dvc->init_sdtr;
+ orig_use_tagged_qng = asc_dvc->use_tagged_qng;
+
+ asc_dvc->init_sdtr &= ~tid_bit;
+ asc_dvc->cfg->can_tagged_qng &= ~tid_bit;
+ asc_dvc->use_tagged_qng &= ~tid_bit;
+
+ if (inq->byte3.rsp_data_fmt >= 2 || inq->byte2.ansi_apr_ver >= 2) {
+ if ((asc_dvc->cfg->sdtr_enable & tid_bit) && inq->byte7.Sync) {
+ asc_dvc->init_sdtr |= tid_bit;
+ }
+ if ((asc_dvc->cfg->cmd_qng_enabled & tid_bit) && inq->byte7.CmdQue) {
+ if (AscTagQueuingSafe(inq)) {
+ asc_dvc->use_tagged_qng |= tid_bit;
+ asc_dvc->cfg->can_tagged_qng |= tid_bit;
+ }
+ }
+ }
+ if (orig_use_tagged_qng != asc_dvc->use_tagged_qng) {
+ AscWriteLramByte(asc_dvc->iop_base, ASCV_DISC_ENABLE_B,
+ asc_dvc->cfg->disc_enable);
+ AscWriteLramByte(asc_dvc->iop_base, ASCV_USE_TAGGED_QNG_B,
+ asc_dvc->use_tagged_qng);
+ AscWriteLramByte(asc_dvc->iop_base, ASCV_CAN_TAGGED_QNG_B,
+ asc_dvc->cfg->can_tagged_qng);
+
+ asc_dvc->max_dvc_qng[tid_no] =
+ asc_dvc->cfg->max_tag_qng[tid_no];
+ AscWriteLramByte(asc_dvc->iop_base,
+ (ushort) (ASCV_MAX_DVC_QNG_BEG + tid_no),
+ asc_dvc->max_dvc_qng[tid_no]);
+ }
+ if (orig_init_sdtr != asc_dvc->init_sdtr) {
+ AscAsyncFix(asc_dvc, tid_no, inq);
+ }
+ return;
+}
+
+STATIC int
+AscCompareString(
+ ruchar * str1,
+ ruchar * str2,
+ int len
+)
+{
+ int i;
+ int diff;
+
+ for (i = 0; i < len; i++) {
+ diff = (int) (str1[i] - str2[i]);
+ if (diff != 0)
+ return (diff);
+ }
+ return (0);
+}
+
+STATIC uchar
+AscReadLramByte(
+ PortAddr iop_base,
+ ushort addr
+)
+{
+ uchar byte_data;
+ ushort word_data;
+
+ if (isodd_word(addr)) {
+ AscSetChipLramAddr(iop_base, addr - 1);
+ word_data = AscGetChipLramData(iop_base);
+ byte_data = (uchar) ((word_data >> 8) & 0xFF);
+ } else {
+ AscSetChipLramAddr(iop_base, addr);
+ word_data = AscGetChipLramData(iop_base);
+ byte_data = (uchar) (word_data & 0xFF);
+ }
+ return (byte_data);
+}
+
+STATIC ushort
+AscReadLramWord(
+ PortAddr iop_base,
+ ushort addr
+)
+{
+ ushort word_data;
+
+ AscSetChipLramAddr(iop_base, addr);
+ word_data = AscGetChipLramData(iop_base);
+ return (word_data);
+}
+
+STATIC ulong
+AscReadLramDWord(
+ PortAddr iop_base,
+ ushort addr
+)
+{
+ ushort val_low, val_high;
+ ulong dword_data;
+
+ AscSetChipLramAddr(iop_base, addr);
+ val_low = AscGetChipLramData(iop_base);
+ val_high = AscGetChipLramData(iop_base);
+ dword_data = ((ulong) val_high << 16) | (ulong) val_low;
+ return (dword_data);
+}
+
+STATIC void
+AscWriteLramWord(
+ PortAddr iop_base,
+ ushort addr,
+ ushort word_val
+)
+{
+ AscSetChipLramAddr(iop_base, addr);
+ AscSetChipLramData(iop_base, word_val);
+ return;
+}
+
+STATIC void
+AscWriteLramDWord(
+ PortAddr iop_base,
+ ushort addr,
+ ulong dword_val
+)
+{
+ ushort word_val;
+
+ AscSetChipLramAddr(iop_base, addr);
+ word_val = (ushort) dword_val;
+ AscSetChipLramData(iop_base, word_val);
+ word_val = (ushort) (dword_val >> 16);
+ AscSetChipLramData(iop_base, word_val);
+ return;
+}
+
+STATIC void
+AscWriteLramByte(
+ PortAddr iop_base,
+ ushort addr,
+ uchar byte_val
+)
+{
+ ushort word_data;
+
+ if (isodd_word(addr)) {
+ addr--;
+ word_data = AscReadLramWord(iop_base, addr);
+ word_data &= 0x00FF;
+ word_data |= (((ushort) byte_val << 8) & 0xFF00);
+ } else {
+ word_data = AscReadLramWord(iop_base, addr);
+ word_data &= 0xFF00;
+ word_data |= ((ushort) byte_val & 0x00FF);
+ }
+ AscWriteLramWord(iop_base, addr, word_data);
+ return;
+}
+
+STATIC void
+AscMemWordCopyToLram(
+ PortAddr iop_base,
+ ushort s_addr,
+ ushort * s_buffer,
+ int words
+)
+{
+ AscSetChipLramAddr(iop_base, s_addr);
+ DvcOutPortWords(iop_base + IOP_RAM_DATA, s_buffer, words);
+ return;
+}
+
+STATIC void
+AscMemDWordCopyToLram(
+ PortAddr iop_base,
+ ushort s_addr,
+ ulong * s_buffer,
+ int dwords
+)
+{
+ AscSetChipLramAddr(iop_base, s_addr);
+ DvcOutPortDWords(iop_base + IOP_RAM_DATA, s_buffer, dwords);
+ return;
+}
+
+STATIC void
+AscMemWordCopyFromLram(
+ PortAddr iop_base,
+ ushort s_addr,
+ ushort * d_buffer,
+ int words
+)
+{
+ AscSetChipLramAddr(iop_base, s_addr);
+ DvcInPortWords(iop_base + IOP_RAM_DATA, d_buffer, words);
+ return;
+}
+
+STATIC ulong
+AscMemSumLramWord(
+ PortAddr iop_base,
+ ushort s_addr,
+ rint words
+)
+{
+ ulong sum;
+ int i;
+
+ sum = 0L;
+ for (i = 0; i < words; i++, s_addr += 2) {
+ sum += AscReadLramWord(iop_base, s_addr);
+ }
+ return (sum);
+}
+
+STATIC void
+AscMemWordSetLram(
+ PortAddr iop_base,
+ ushort s_addr,
+ ushort set_wval,
+ rint words
+)
+{
+ rint i;
+
+ AscSetChipLramAddr(iop_base, s_addr);
+ for (i = 0; i < words; i++) {
+ AscSetChipLramData(iop_base, set_wval);
+ }
+ return;
+}
+
+
+/*
+ * --- Adv Library Functions
+ */
+
+/* a_qswap.h */
+STATIC unsigned char _adv_mcode_buf[] ASC_INITDATA = {
+ 0x9C, 0xF0, 0x80, 0x01, 0x00, 0xF0, 0x44, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x72, 0x01, 0xD6, 0x11, 0x00, 0x00, 0x70, 0x01,
+ 0x30, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0x10, 0x2D, 0x03, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x56, 0x34, 0x12,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x04, 0xF7, 0x70, 0x01, 0x0C, 0x1C, 0x06, 0xF7, 0x02, 0x00, 0x00, 0xF2, 0xD6, 0x0A,
+ 0x04, 0xF7, 0x70, 0x01, 0x06, 0xF7, 0x02, 0x00, 0x3E, 0x57, 0x3C, 0x56, 0x0C, 0x1C, 0x00, 0xFC,
+ 0xA6, 0x00, 0x01, 0x58, 0xAA, 0x13, 0x20, 0xF0, 0xA6, 0x03, 0x06, 0xEC, 0xB9, 0x00, 0x0E, 0x47,
+ 0x03, 0xE6, 0x10, 0x00, 0xCE, 0x45, 0x02, 0x13, 0x3E, 0x57, 0x06, 0xEA, 0xB9, 0x00, 0x47, 0x4B,
+ 0x03, 0xF6, 0xE0, 0x00, 0x00, 0xF2, 0x68, 0x0A, 0x01, 0x48, 0x4E, 0x12, 0x03, 0xF6, 0xC0, 0x00,
+ 0x00, 0xF2, 0x68, 0x0A, 0x41, 0x58, 0x03, 0xF6, 0xD0, 0x00, 0x00, 0xF2, 0x68, 0x0A, 0x49, 0x44,
+ 0x59, 0xF0, 0x0A, 0x02, 0x03, 0xF6, 0xE0, 0x00, 0x00, 0xF2, 0x68, 0x0A, 0x44, 0x58, 0x00, 0xF2,
+ 0xE2, 0x0D, 0x02, 0xCC, 0x4A, 0xE4, 0x01, 0x00, 0x55, 0xF0, 0x08, 0x03, 0x45, 0xF4, 0x02, 0x00,
+ 0x83, 0x5A, 0x04, 0xCC, 0x01, 0x4A, 0x12, 0x12, 0x00, 0xF2, 0xE2, 0x0D, 0x00, 0xCD, 0x48, 0xE4,
+ 0x01, 0x00, 0xE9, 0x13, 0x00, 0xF2, 0xC6, 0x0F, 0xFA, 0x10, 0x0E, 0x47, 0x03, 0xE6, 0x10, 0x00,
+ 0xCE, 0x45, 0x02, 0x13, 0x3E, 0x57, 0xCE, 0x47, 0x97, 0x13, 0x04, 0xEC, 0xB4, 0x00, 0x00, 0xF2,
+ 0xE2, 0x0D, 0x00, 0xCD, 0x48, 0xE4, 0x00, 0x00, 0x12, 0x12, 0x3E, 0x57, 0x06, 0xCC, 0x45, 0xF4,
+ 0x02, 0x00, 0x83, 0x5A, 0x00, 0xCC, 0x00, 0xEA, 0xB4, 0x00, 0x92, 0x10, 0x00, 0xF0, 0x8C, 0x01,
+ 0x43, 0xF0, 0x5C, 0x02, 0x44, 0xF0, 0x60, 0x02, 0x45, 0xF0, 0x64, 0x02, 0x46, 0xF0, 0x68, 0x02,
+ 0x47, 0xF0, 0x6E, 0x02, 0x48, 0xF0, 0x9E, 0x02, 0xB9, 0x54, 0x62, 0x10, 0x00, 0x1C, 0x5A, 0x10,
+ 0x02, 0x1C, 0x56, 0x10, 0x1E, 0x1C, 0x52, 0x10, 0x00, 0xF2, 0x1E, 0x11, 0x50, 0x10, 0x06, 0xFC,
+ 0xA8, 0x00, 0x03, 0xF6, 0xBE, 0x00, 0x00, 0xF2, 0x4E, 0x0A, 0x8C, 0x10, 0x01, 0xF6, 0x01, 0x00,
+ 0x01, 0xFA, 0xA8, 0x00, 0x00, 0xF2, 0x2C, 0x0B, 0x06, 0x10, 0xB9, 0x54, 0x01, 0xFA, 0xA8, 0x00,
+ 0x03, 0xF6, 0xBE, 0x00, 0x00, 0xF2, 0x58, 0x0A, 0x01, 0xFC, 0xA8, 0x00, 0x20, 0x10, 0x58, 0x1C,
+ 0x00, 0xF2, 0x1C, 0x0B, 0x5A, 0x1C, 0x01, 0xF6, 0x01, 0x00, 0x38, 0x54, 0x00, 0xFA, 0xA6, 0x00,
+ 0x01, 0xFA, 0xA8, 0x00, 0x20, 0x1C, 0x00, 0xF0, 0x72, 0x01, 0x01, 0xF6, 0x01, 0x00, 0x38, 0x54,
+ 0x00, 0xFA, 0xA6, 0x00, 0x01, 0xFA, 0xA8, 0x00, 0x20, 0x1C, 0x00, 0xF0, 0x80, 0x01, 0x03, 0xF6,
+ 0xE0, 0x00, 0x00, 0xF2, 0x68, 0x0A, 0x01, 0x48, 0x0A, 0x13, 0x00, 0xF2, 0x38, 0x10, 0x00, 0xF2,
+ 0x54, 0x0F, 0x24, 0x10, 0x03, 0xF6, 0xC0, 0x00, 0x00, 0xF2, 0x68, 0x0A, 0x02, 0xF6, 0xD0, 0x00,
+ 0x02, 0x57, 0x03, 0x59, 0x01, 0xCC, 0x49, 0x44, 0x5B, 0xF0, 0x04, 0x03, 0x00, 0xF2, 0x9C, 0x0F,
+ 0x00, 0xF0, 0x80, 0x01, 0x00, 0xF2, 0x14, 0x10, 0x0C, 0x1C, 0x02, 0x4B, 0xBF, 0x57, 0x9E, 0x43,
+ 0x77, 0x57, 0x07, 0x4B, 0x20, 0xF0, 0xA6, 0x03, 0x40, 0x1C, 0x1E, 0xF0, 0x30, 0x03, 0x26, 0xF0,
+ 0x2C, 0x03, 0xA0, 0xF0, 0x1A, 0x03, 0x11, 0xF0, 0xA6, 0x03, 0x12, 0x10, 0x9F, 0xF0, 0x3E, 0x03,
+ 0x46, 0x1C, 0x82, 0xE7, 0x05, 0x00, 0x9E, 0xE7, 0x11, 0x00, 0x00, 0xF0, 0x06, 0x0A, 0x0C, 0x1C,
+ 0x48, 0x1C, 0x46, 0x1C, 0x38, 0x54, 0x00, 0xEC, 0xBA, 0x00, 0x08, 0x44, 0x00, 0xEA, 0xBA, 0x00,
+ 0x03, 0xF6, 0xC0, 0x00, 0x00, 0xF2, 0x68, 0x0A, 0x08, 0x44, 0x00, 0x4C, 0x82, 0xE7, 0x02, 0x00,
+ 0x00, 0xF2, 0x12, 0x11, 0x00, 0xF2, 0x12, 0x11, 0x85, 0xF0, 0x70, 0x03, 0x00, 0xF2, 0x60, 0x0B,
+ 0x06, 0xF0, 0x80, 0x03, 0x09, 0xF0, 0x24, 0x09, 0x1E, 0xF0, 0xFC, 0x09, 0x00, 0xF0, 0x02, 0x0A,
+ 0x00, 0xFC, 0xBE, 0x00, 0x98, 0x57, 0x55, 0xF0, 0xAC, 0x04, 0x01, 0xE6, 0x0C, 0x00, 0x00, 0xF2,
+ 0x4E, 0x0D, 0x00, 0xF2, 0x12, 0x11, 0x00, 0xF2, 0xBC, 0x11, 0x00, 0xF2, 0xC8, 0x11, 0x01, 0xF0,
+ 0x7C, 0x02, 0x00, 0xF0, 0x8A, 0x02, 0x46, 0x1C, 0x0C, 0x1C, 0x67, 0x1B, 0xBF, 0x57, 0x77, 0x57,
+ 0x02, 0x4B, 0x48, 0x1C, 0x32, 0x1C, 0x00, 0xF2, 0x92, 0x0D, 0x30, 0x1C, 0x96, 0xF0, 0xBC, 0x03,
+ 0xB1, 0xF0, 0xC0, 0x03, 0x1E, 0xF0, 0xFC, 0x09, 0x85, 0xF0, 0x02, 0x0A, 0x00, 0xFC, 0xBE, 0x00,
+ 0x98, 0x57, 0x14, 0x12, 0x01, 0xE6, 0x0C, 0x00, 0x00, 0xF2, 0x4E, 0x0D, 0x00, 0xF2, 0x12, 0x11,
+ 0x01, 0xF0, 0x7C, 0x02, 0x00, 0xF0, 0x8A, 0x02, 0x03, 0xF6, 0xE0, 0x00, 0x00, 0xF2, 0x68, 0x0A,
+ 0x01, 0x48, 0x55, 0xF0, 0x98, 0x04, 0x03, 0x82, 0x03, 0xFC, 0xA0, 0x00, 0x9B, 0x57, 0x40, 0x12,
+ 0x69, 0x18, 0x00, 0xF2, 0x12, 0x11, 0x85, 0xF0, 0x42, 0x04, 0x69, 0x08, 0x00, 0xF2, 0x12, 0x11,
+ 0x85, 0xF0, 0x02, 0x0A, 0x68, 0x08, 0x4C, 0x44, 0x28, 0x12, 0x44, 0x48, 0x03, 0xF6, 0xE0, 0x00,
+ 0x00, 0xF2, 0x68, 0x0A, 0x45, 0x58, 0x00, 0xF2, 0xF6, 0x0D, 0x00, 0xCC, 0x01, 0x48, 0x55, 0xF0,
+ 0x98, 0x04, 0x4C, 0x44, 0xEF, 0x13, 0x00, 0xF2, 0xC6, 0x0F, 0x00, 0xF2, 0x14, 0x10, 0x08, 0x10,
+ 0x68, 0x18, 0x45, 0x5A, 0x00, 0xF2, 0xF6, 0x0D, 0x04, 0x80, 0x18, 0xE4, 0x10, 0x00, 0x28, 0x12,
+ 0x01, 0xE6, 0x06, 0x00, 0x04, 0x80, 0x18, 0xE4, 0x01, 0x00, 0x04, 0x12, 0x01, 0xE6, 0x0D, 0x00,
+ 0x00, 0xF2, 0x4E, 0x0D, 0x00, 0xF2, 0x12, 0x11, 0x04, 0xE6, 0x02, 0x00, 0x9E, 0xE7, 0x15, 0x00,
+ 0x01, 0xF0, 0x1C, 0x0A, 0x00, 0xF0, 0x02, 0x0A, 0x69, 0x08, 0x05, 0x80, 0x48, 0xE4, 0x00, 0x00,
+ 0x0C, 0x12, 0x00, 0xE6, 0x11, 0x00, 0x00, 0xEA, 0xB8, 0x00, 0x00, 0xF2, 0xB6, 0x10, 0x82, 0xE7,
+ 0x02, 0x00, 0x1C, 0x90, 0x40, 0x5C, 0x00, 0x16, 0x01, 0xE6, 0x06, 0x00, 0x00, 0xF2, 0x4E, 0x0D,
+ 0x01, 0xF0, 0x80, 0x01, 0x1E, 0xF0, 0x80, 0x01, 0x00, 0xF0, 0xA0, 0x04, 0x42, 0x5B, 0x06, 0xF7,
+ 0x03, 0x00, 0x46, 0x59, 0xBF, 0x57, 0x77, 0x57, 0x01, 0xE6, 0x80, 0x00, 0x07, 0x80, 0x31, 0x44,
+ 0x04, 0x80, 0x18, 0xE4, 0x20, 0x00, 0x56, 0x13, 0x20, 0x80, 0x48, 0xE4, 0x03, 0x00, 0x4E, 0x12,
+ 0x00, 0xFC, 0xA2, 0x00, 0x98, 0x57, 0x55, 0xF0, 0x1C, 0x05, 0x31, 0xE4, 0x40, 0x00, 0x00, 0xFC,
+ 0xA0, 0x00, 0x98, 0x57, 0x36, 0x12, 0x4C, 0x1C, 0x00, 0xF2, 0x12, 0x11, 0x89, 0x48, 0x00, 0xF2,
+ 0x12, 0x11, 0x86, 0xF0, 0x2E, 0x05, 0x82, 0xE7, 0x06, 0x00, 0x1B, 0x80, 0x48, 0xE4, 0x22, 0x00,
+ 0x5B, 0xF0, 0x0C, 0x05, 0x48, 0xE4, 0x20, 0x00, 0x59, 0xF0, 0x10, 0x05, 0x00, 0xE6, 0x20, 0x00,
+ 0x09, 0x48, 0x00, 0xF2, 0x12, 0x11, 0x86, 0xF0, 0x2E, 0x05, 0x83, 0x80, 0x04, 0x10, 0x00, 0xF2,
+ 0xA2, 0x0D, 0x00, 0xE6, 0x01, 0x00, 0x00, 0xEA, 0x26, 0x01, 0x01, 0xEA, 0x27, 0x01, 0x04, 0x80,
+ 0x18, 0xE4, 0x10, 0x00, 0x36, 0x12, 0xB9, 0x54, 0x00, 0xF2, 0xF6, 0x0E, 0x01, 0xE6, 0x06, 0x00,
+ 0x04, 0x80, 0x18, 0xE4, 0x01, 0x00, 0x04, 0x12, 0x01, 0xE6, 0x0D, 0x00, 0x00, 0xF2, 0x4E, 0x0D,
+ 0x00, 0xF2, 0x12, 0x11, 0x00, 0xF2, 0xBC, 0x11, 0x00, 0xF2, 0xC8, 0x11, 0x04, 0xE6, 0x02, 0x00,
+ 0x9E, 0xE7, 0x15, 0x00, 0x01, 0xF0, 0x1C, 0x0A, 0x00, 0xF0, 0x02, 0x0A, 0x00, 0xFC, 0x20, 0x01,
+ 0x98, 0x57, 0x34, 0x12, 0x00, 0xFC, 0x24, 0x01, 0x98, 0x57, 0x2C, 0x13, 0xB9, 0x54, 0x00, 0xF2,
+ 0xF6, 0x0E, 0x86, 0xF0, 0xA8, 0x05, 0x03, 0xF6, 0x01, 0x00, 0x00, 0xF2, 0x8C, 0x0E, 0x85, 0xF0,
+ 0x9E, 0x05, 0x82, 0xE7, 0x03, 0x00, 0x00, 0xF2, 0x60, 0x0B, 0x82, 0xE7, 0x02, 0x00, 0x00, 0xFC,
+ 0x24, 0x01, 0xB0, 0x57, 0x00, 0xFA, 0x24, 0x01, 0x00, 0xFC, 0x9E, 0x00, 0x98, 0x57, 0x5A, 0x12,
+ 0x00, 0xFC, 0xB6, 0x00, 0x98, 0x57, 0x52, 0x13, 0x03, 0xE6, 0x0C, 0x00, 0x00, 0xFC, 0x9C, 0x00,
+ 0x98, 0x57, 0x04, 0x13, 0x03, 0xE6, 0x19, 0x00, 0x05, 0xE6, 0x08, 0x00, 0x00, 0xF6, 0x00, 0x01,
+ 0x00, 0x57, 0x00, 0x57, 0x03, 0x58, 0x00, 0xDC, 0x18, 0xF4, 0x00, 0x80, 0x04, 0x13, 0x05, 0xE6,
+ 0x0F, 0x00, 0xB9, 0x54, 0x00, 0xF2, 0xF6, 0x0E, 0x86, 0xF0, 0x0A, 0x06, 0x00, 0xF2, 0xBA, 0x0E,
+ 0x85, 0xF0, 0x00, 0x06, 0x82, 0xE7, 0x03, 0x00, 0x00, 0xF2, 0x60, 0x0B, 0x82, 0xE7, 0x02, 0x00,
+ 0x00, 0xFC, 0xB6, 0x00, 0xB0, 0x57, 0x00, 0xFA, 0xB6, 0x00, 0x01, 0xF6, 0x01, 0x00, 0x00, 0xF2,
+ 0xF6, 0x0E, 0x9C, 0x32, 0x4E, 0x1C, 0x32, 0x1C, 0x00, 0xF2, 0x92, 0x0D, 0x30, 0x1C, 0x82, 0xE7,
+ 0x04, 0x00, 0xB1, 0xF0, 0x22, 0x06, 0x0A, 0xF0, 0x3E, 0x06, 0x05, 0xF0, 0xD6, 0x06, 0x06, 0xF0,
+ 0xDC, 0x06, 0x09, 0xF0, 0x24, 0x09, 0x1E, 0xF0, 0xFC, 0x09, 0x00, 0xF0, 0x02, 0x0A, 0x04, 0x80,
+ 0x18, 0xE4, 0x20, 0x00, 0x30, 0x12, 0x09, 0xE7, 0x03, 0x00, 0x00, 0xF2, 0x12, 0x11, 0x21, 0x80,
+ 0x18, 0xE4, 0xE0, 0x00, 0x09, 0x48, 0x00, 0xF2, 0x12, 0x11, 0x09, 0xE7, 0x00, 0x00, 0x00, 0xF2,
+ 0x12, 0x11, 0x09, 0xE7, 0x00, 0x00, 0x00, 0xF2, 0x12, 0x11, 0x99, 0xA4, 0x00, 0xF2, 0x12, 0x11,
+ 0x09, 0xE7, 0x00, 0x00, 0x9A, 0x10, 0x04, 0x80, 0x18, 0xE4, 0x02, 0x00, 0x34, 0x12, 0x09, 0xE7,
+ 0x1B, 0x00, 0x00, 0xF2, 0x12, 0x11, 0x21, 0x80, 0x18, 0xE4, 0xE0, 0x00, 0x09, 0x48, 0x00, 0xF2,
+ 0x12, 0x11, 0x09, 0xE7, 0x00, 0x00, 0x00, 0xF2, 0x12, 0x11, 0x09, 0xE7, 0x00, 0x00, 0x00, 0xF2,
+ 0x12, 0x11, 0x09, 0xE7, 0x01, 0x00, 0x00, 0xF2, 0x12, 0x11, 0x09, 0xE7, 0x00, 0x00, 0x00, 0xF0,
+ 0x0C, 0x09, 0xBB, 0x55, 0x9A, 0x81, 0x03, 0xF7, 0x20, 0x00, 0x09, 0x6F, 0x93, 0x45, 0x55, 0xF0,
+ 0xE2, 0x06, 0xB1, 0xF0, 0xC2, 0x06, 0x0A, 0xF0, 0xBA, 0x06, 0x09, 0xF0, 0x24, 0x09, 0x1E, 0xF0,
+ 0xFC, 0x09, 0x00, 0xF0, 0x02, 0x0A, 0x00, 0xF2, 0x60, 0x0B, 0x47, 0x10, 0x09, 0xE7, 0x08, 0x00,
+ 0x41, 0x10, 0x05, 0x80, 0x48, 0xE4, 0x00, 0x00, 0x1E, 0x12, 0x00, 0xE6, 0x11, 0x00, 0x00, 0xEA,
+ 0xB8, 0x00, 0x00, 0xF2, 0xB6, 0x10, 0x2C, 0x90, 0xAE, 0x90, 0x08, 0x50, 0x8A, 0x50, 0x38, 0x54,
+ 0x1F, 0x40, 0x00, 0xF2, 0xB4, 0x0D, 0x08, 0x10, 0x08, 0x90, 0x8A, 0x90, 0x30, 0x50, 0xB2, 0x50,
+ 0x9C, 0x32, 0x0C, 0x92, 0x8E, 0x92, 0x38, 0x54, 0x04, 0x80, 0x30, 0xE4, 0x08, 0x00, 0x04, 0x40,
+ 0x0C, 0x1C, 0x00, 0xF6, 0x03, 0x00, 0xB1, 0xF0, 0x26, 0x07, 0x9E, 0xF0, 0x3A, 0x07, 0x01, 0x48,
+ 0x55, 0xF0, 0xFC, 0x09, 0x0C, 0x1C, 0x10, 0x44, 0xED, 0x10, 0x0B, 0xF0, 0x5E, 0x07, 0x0C, 0xF0,
+ 0x62, 0x07, 0x05, 0xF0, 0x52, 0x07, 0x06, 0xF0, 0x58, 0x07, 0x09, 0xF0, 0x24, 0x09, 0x00, 0xF0,
+ 0x02, 0x0A, 0x00, 0xF2, 0x60, 0x0B, 0xCF, 0x10, 0x09, 0xE7, 0x08, 0x00, 0xC9, 0x10, 0x2E, 0x1C,
+ 0x02, 0x10, 0x2C, 0x1C, 0xAA, 0xF0, 0x64, 0x07, 0xAC, 0xF0, 0x72, 0x07, 0x40, 0x10, 0x34, 0x1C,
+ 0xF3, 0x10, 0xAD, 0xF0, 0x7C, 0x07, 0xC8, 0x10, 0x36, 0x1C, 0xE9, 0x10, 0x2B, 0xF0, 0x82, 0x08,
+ 0x6B, 0x18, 0x18, 0xF4, 0x00, 0xFE, 0x20, 0x12, 0x01, 0x58, 0xD2, 0xF0, 0x82, 0x08, 0x76, 0x18,
+ 0x18, 0xF4, 0x03, 0x00, 0xEC, 0x12, 0x00, 0xFC, 0x22, 0x01, 0x18, 0xF4, 0x01, 0x00, 0xE2, 0x12,
+ 0x0B, 0xF0, 0x64, 0x07, 0x0C, 0xF0, 0x64, 0x07, 0x36, 0x1C, 0x34, 0x1C, 0xB7, 0x10, 0x38, 0x54,
+ 0xB9, 0x54, 0x84, 0x80, 0x19, 0xE4, 0x20, 0x00, 0xB2, 0x13, 0x85, 0x80, 0x81, 0x48, 0x66, 0x12,
+ 0x04, 0x80, 0x18, 0xE4, 0x08, 0x00, 0x58, 0x13, 0x1F, 0x80, 0x08, 0x44, 0xC8, 0x44, 0x9F, 0x12,
+ 0x1F, 0x40, 0x34, 0x91, 0xB6, 0x91, 0x44, 0x55, 0xE5, 0x55, 0x02, 0xEC, 0xB8, 0x00, 0x02, 0x49,
+ 0xBB, 0x55, 0x82, 0x81, 0xC0, 0x55, 0x48, 0xF4, 0x0F, 0x00, 0x5A, 0xF0, 0x1A, 0x08, 0x4A, 0xE4,
+ 0x17, 0x00, 0xD5, 0xF0, 0xFA, 0x07, 0x02, 0xF6, 0x0F, 0x00, 0x02, 0xF4, 0x02, 0x00, 0x02, 0xEA,
+ 0xB8, 0x00, 0x04, 0x91, 0x86, 0x91, 0x02, 0x4B, 0x2C, 0x90, 0x08, 0x50, 0x2E, 0x90, 0x0A, 0x50,
+ 0x2C, 0x51, 0xAE, 0x51, 0x00, 0xF2, 0xB6, 0x10, 0x38, 0x54, 0x00, 0xF2, 0xB4, 0x0D, 0x56, 0x10,
+ 0x34, 0x91, 0xB6, 0x91, 0x0C, 0x10, 0x04, 0x80, 0x18, 0xE4, 0x08, 0x00, 0x41, 0x12, 0x0C, 0x91,
+ 0x8E, 0x91, 0x04, 0x80, 0x18, 0xE4, 0xF7, 0x00, 0x04, 0x40, 0x30, 0x90, 0xB2, 0x90, 0x36, 0x10,
+ 0x02, 0x80, 0x48, 0xE4, 0x10, 0x00, 0x31, 0x12, 0x82, 0xE7, 0x10, 0x00, 0x84, 0x80, 0x19, 0xE4,
+ 0x20, 0x00, 0x10, 0x13, 0x0C, 0x90, 0x8E, 0x90, 0x5D, 0xF0, 0x78, 0x07, 0x0C, 0x58, 0x8D, 0x58,
+ 0x00, 0xF0, 0x64, 0x07, 0x38, 0x54, 0xB9, 0x54, 0x19, 0x80, 0xF1, 0x10, 0x3A, 0x55, 0x19, 0x81,
+ 0xBB, 0x55, 0x10, 0x90, 0x92, 0x90, 0x10, 0x58, 0x91, 0x58, 0x14, 0x59, 0x95, 0x59, 0x00, 0xF0,
+ 0x64, 0x07, 0x04, 0x80, 0x18, 0xE4, 0x20, 0x00, 0x06, 0x12, 0x6C, 0x19, 0x19, 0x41, 0x7C, 0x10,
+ 0x6C, 0x19, 0x0C, 0x51, 0xED, 0x19, 0x8E, 0x51, 0x6B, 0x18, 0x18, 0xF4, 0x00, 0xFF, 0x02, 0x13,
+ 0x6A, 0x10, 0x01, 0x58, 0xD2, 0xF0, 0xC0, 0x08, 0x76, 0x18, 0x18, 0xF4, 0x03, 0x00, 0x0A, 0x12,
+ 0x00, 0xFC, 0x22, 0x01, 0x18, 0xF4, 0x01, 0x00, 0x06, 0x13, 0x9E, 0xE7, 0x16, 0x00, 0x4C, 0x10,
+ 0xD1, 0xF0, 0xCA, 0x08, 0x9E, 0xE7, 0x17, 0x00, 0x42, 0x10, 0xD0, 0xF0, 0xD4, 0x08, 0x9E, 0xE7,
+ 0x19, 0x00, 0x38, 0x10, 0xCF, 0xF0, 0xDE, 0x08, 0x9E, 0xE7, 0x20, 0x00, 0x2E, 0x10, 0xCE, 0xF0,
+ 0xE8, 0x08, 0x9E, 0xE7, 0x21, 0x00, 0x24, 0x10, 0xCD, 0xF0, 0xF2, 0x08, 0x9E, 0xE7, 0x22, 0x00,
+ 0x1A, 0x10, 0xCC, 0xF0, 0x04, 0x09, 0x84, 0x80, 0x19, 0xE4, 0x04, 0x00, 0x06, 0x12, 0x9E, 0xE7,
+ 0x12, 0x00, 0x08, 0x10, 0xCB, 0xF0, 0x0C, 0x09, 0x9E, 0xE7, 0x24, 0x00, 0xB1, 0xF0, 0x0C, 0x09,
+ 0x05, 0xF0, 0x1E, 0x09, 0x09, 0xF0, 0x24, 0x09, 0x1E, 0xF0, 0xFC, 0x09, 0xE4, 0x10, 0x00, 0xF2,
+ 0x60, 0x0B, 0xE9, 0x10, 0x9C, 0x32, 0x82, 0xE7, 0x20, 0x00, 0x32, 0x1C, 0xE9, 0x09, 0x00, 0xF2,
+ 0x12, 0x11, 0x85, 0xF0, 0x02, 0x0A, 0x69, 0x08, 0x01, 0xF0, 0x44, 0x09, 0x1E, 0xF0, 0xFC, 0x09,
+ 0x00, 0xF0, 0x38, 0x09, 0x30, 0x44, 0x06, 0x12, 0x9E, 0xE7, 0x42, 0x00, 0xB8, 0x10, 0x04, 0xF6,
+ 0x01, 0x00, 0xB3, 0x45, 0x74, 0x12, 0x04, 0x80, 0x18, 0xE4, 0x20, 0x00, 0x22, 0x13, 0x4B, 0xE4,
+ 0x02, 0x00, 0x36, 0x12, 0x4B, 0xE4, 0x28, 0x00, 0xAC, 0x13, 0x00, 0xF2, 0xBC, 0x11, 0x00, 0xF2,
+ 0xC8, 0x11, 0x03, 0xF6, 0xD0, 0x00, 0xFA, 0x14, 0x82, 0xE7, 0x01, 0x00, 0x00, 0xF0, 0x80, 0x01,
+ 0x9E, 0xE7, 0x44, 0x00, 0x4B, 0xE4, 0x02, 0x00, 0x06, 0x12, 0x03, 0xE6, 0x02, 0x00, 0x76, 0x10,
+ 0x00, 0xF2, 0xA2, 0x0D, 0x03, 0xE6, 0x02, 0x00, 0x6C, 0x10, 0x00, 0xF2, 0xA2, 0x0D, 0x19, 0x82,
+ 0x34, 0x46, 0x0A, 0x13, 0x03, 0xE6, 0x02, 0x00, 0x9E, 0xE7, 0x43, 0x00, 0x68, 0x10, 0x04, 0x80,
+ 0x30, 0xE4, 0x20, 0x00, 0x04, 0x40, 0x00, 0xF2, 0xBC, 0x11, 0x00, 0xF2, 0xC8, 0x11, 0x82, 0xE7,
+ 0x01, 0x00, 0x06, 0xF7, 0x02, 0x00, 0x00, 0xF0, 0x08, 0x03, 0x04, 0x80, 0x18, 0xE4, 0x20, 0x00,
+ 0x06, 0x12, 0x03, 0xE6, 0x02, 0x00, 0x3E, 0x10, 0x04, 0x80, 0x18, 0xE4, 0x02, 0x00, 0x3A, 0x12,
+ 0x04, 0x80, 0x18, 0xE4, 0xFD, 0x00, 0x04, 0x40, 0x1C, 0x1C, 0x9D, 0xF0, 0xEA, 0x09, 0x1C, 0x1C,
+ 0x9D, 0xF0, 0xF0, 0x09, 0xC1, 0x10, 0x9E, 0xE7, 0x13, 0x00, 0x0A, 0x10, 0x9E, 0xE7, 0x41, 0x00,
+ 0x04, 0x10, 0x9E, 0xE7, 0x24, 0x00, 0x00, 0xFC, 0xBE, 0x00, 0x98, 0x57, 0xD5, 0xF0, 0x8A, 0x02,
+ 0x04, 0xE6, 0x04, 0x00, 0x06, 0x10, 0x04, 0xE6, 0x04, 0x00, 0x9D, 0x41, 0x1C, 0x42, 0x9F, 0xE7,
+ 0x00, 0x00, 0x06, 0xF7, 0x02, 0x00, 0x03, 0xF6, 0xE0, 0x00, 0x3C, 0x14, 0x44, 0x58, 0x45, 0x58,
+ 0x00, 0xF2, 0xF6, 0x0D, 0x00, 0xF2, 0x7E, 0x10, 0x00, 0xF2, 0xC6, 0x0F, 0x3C, 0x14, 0x1E, 0x1C,
+ 0x00, 0xF0, 0x80, 0x01, 0x12, 0x1C, 0x22, 0x1C, 0xD2, 0x14, 0x00, 0xF0, 0x72, 0x01, 0x83, 0x59,
+ 0x03, 0xDC, 0x73, 0x57, 0x80, 0x5D, 0x00, 0x16, 0x83, 0x59, 0x03, 0xDC, 0x38, 0x54, 0x70, 0x57,
+ 0x33, 0x54, 0x3B, 0x54, 0x80, 0x5D, 0x00, 0x16, 0x03, 0x57, 0x83, 0x59, 0x38, 0x54, 0x00, 0xCC,
+ 0x00, 0x16, 0x03, 0x57, 0x83, 0x59, 0x00, 0x4C, 0x00, 0x16, 0x02, 0x80, 0x48, 0xE4, 0x01, 0x00,
+ 0x0E, 0x12, 0x48, 0xE4, 0x05, 0x00, 0x08, 0x12, 0x00, 0xF2, 0xBC, 0x11, 0x00, 0xF2, 0xC8, 0x11,
+ 0xC1, 0x5A, 0x3A, 0x55, 0x02, 0xEC, 0xB5, 0x00, 0x45, 0x59, 0x00, 0xF2, 0xF6, 0x0D, 0x83, 0x58,
+ 0x30, 0xE7, 0x00, 0x00, 0x10, 0x4D, 0x30, 0xE7, 0x40, 0x00, 0x10, 0x4F, 0x38, 0x90, 0xBA, 0x90,
+ 0x10, 0x5C, 0x80, 0x5C, 0x83, 0x5A, 0x10, 0x4E, 0x04, 0xEA, 0xB5, 0x00, 0x43, 0x5B, 0x03, 0xF4,
+ 0xE0, 0x00, 0x83, 0x59, 0x04, 0xCC, 0x01, 0x4A, 0x0A, 0x12, 0x45, 0x5A, 0x00, 0xF2, 0xF6, 0x0D,
+ 0x00, 0xF2, 0x38, 0x10, 0x00, 0x16, 0x08, 0x1C, 0x00, 0xFC, 0xAC, 0x00, 0x06, 0x58, 0x67, 0x18,
+ 0x18, 0xF4, 0x8F, 0xE1, 0x01, 0xFC, 0xAE, 0x00, 0x19, 0xF4, 0x70, 0x1E, 0xB0, 0x54, 0x07, 0x58,
+ 0x00, 0xFC, 0xB0, 0x00, 0x08, 0x58, 0x00, 0xFC, 0xB2, 0x00, 0x09, 0x58, 0x0A, 0x1C, 0x00, 0xE6,
+ 0x0F, 0x00, 0x00, 0xEA, 0xB9, 0x00, 0x38, 0x54, 0x00, 0xFA, 0x24, 0x01, 0x00, 0xFA, 0xB6, 0x00,
+ 0x18, 0x1C, 0x14, 0x1C, 0x10, 0x1C, 0x32, 0x1C, 0x12, 0x1C, 0x00, 0x16, 0x3E, 0x57, 0x0C, 0x14,
+ 0x0E, 0x47, 0x07, 0xE6, 0x10, 0x00, 0xCE, 0x47, 0xF5, 0x13, 0x00, 0x16, 0x00, 0xF2, 0xA2, 0x0D,
+ 0x02, 0x4B, 0x03, 0xF6, 0xE0, 0x00, 0x00, 0xF2, 0x68, 0x0A, 0x01, 0x48, 0x20, 0x12, 0x44, 0x58,
+ 0x45, 0x58, 0x9E, 0xE7, 0x15, 0x00, 0x9C, 0xE7, 0x04, 0x00, 0x00, 0xF2, 0xF6, 0x0D, 0x00, 0xF2,
+ 0x7E, 0x10, 0x00, 0xF2, 0xC6, 0x0F, 0x00, 0xF2, 0x7A, 0x0A, 0x1E, 0x1C, 0xD5, 0x10, 0x00, 0x16,
+ 0x69, 0x08, 0x48, 0xE4, 0x04, 0x00, 0x64, 0x12, 0x48, 0xE4, 0x02, 0x00, 0x20, 0x12, 0x48, 0xE4,
+ 0x03, 0x00, 0x1A, 0x12, 0x48, 0xE4, 0x08, 0x00, 0x14, 0x12, 0x48, 0xE4, 0x01, 0x00, 0xF0, 0x12,
+ 0x48, 0xE4, 0x07, 0x00, 0x12, 0x12, 0x01, 0xE6, 0x07, 0x00, 0x00, 0xF2, 0x4E, 0x0D, 0x00, 0xF2,
+ 0x12, 0x11, 0x05, 0xF0, 0x60, 0x0B, 0x00, 0x16, 0x00, 0xE6, 0x01, 0x00, 0x00, 0xEA, 0x99, 0x00,
+ 0x02, 0x80, 0x48, 0xE4, 0x03, 0x00, 0xE7, 0x12, 0x48, 0xE4, 0x06, 0x00, 0xE1, 0x12, 0x01, 0xE6,
+ 0x06, 0x00, 0x00, 0xF2, 0x4E, 0x0D, 0x00, 0xF2, 0x12, 0x11, 0x04, 0xE6, 0x02, 0x00, 0x9E, 0xE7,
+ 0x15, 0x00, 0x01, 0xF0, 0x1C, 0x0A, 0x00, 0xF0, 0x02, 0x0A, 0x00, 0x16, 0x02, 0x80, 0x48, 0xE4,
+ 0x10, 0x00, 0x1C, 0x12, 0x82, 0xE7, 0x08, 0x00, 0x3C, 0x56, 0x03, 0x82, 0x00, 0xF2, 0xE2, 0x0D,
+ 0x30, 0xE7, 0x08, 0x00, 0x04, 0xF7, 0x70, 0x01, 0x06, 0xF7, 0x02, 0x00, 0x00, 0xF0, 0x80, 0x01,
+ 0x6C, 0x19, 0xED, 0x19, 0x5D, 0xF0, 0xD4, 0x0B, 0x44, 0x55, 0xE5, 0x55, 0x59, 0xF0, 0x52, 0x0C,
+ 0x04, 0x55, 0xA5, 0x55, 0x1F, 0x80, 0x01, 0xEC, 0xB8, 0x00, 0x82, 0x48, 0x82, 0x80, 0x49, 0x44,
+ 0x2E, 0x13, 0x01, 0xEC, 0xB8, 0x00, 0x41, 0xE4, 0x02, 0x00, 0x01, 0xEA, 0xB8, 0x00, 0x49, 0xE4,
+ 0x11, 0x00, 0x59, 0xF0, 0x2E, 0x0C, 0x01, 0xE6, 0x17, 0x00, 0x01, 0xEA, 0xB8, 0x00, 0x02, 0x4B,
+ 0x88, 0x90, 0xAC, 0x50, 0x8A, 0x90, 0xAE, 0x50, 0x01, 0xEC, 0xB8, 0x00, 0x82, 0x48, 0x82, 0x80,
+ 0x10, 0x44, 0x02, 0x4B, 0x1F, 0x40, 0xC0, 0x44, 0x00, 0xF2, 0xB4, 0x0D, 0x04, 0x55, 0xA5, 0x55,
+ 0x9F, 0x10, 0x0C, 0x51, 0x8E, 0x51, 0x30, 0x90, 0xB2, 0x90, 0x00, 0x56, 0xA1, 0x56, 0x30, 0x50,
+ 0xB2, 0x50, 0x34, 0x90, 0xB6, 0x90, 0x40, 0x56, 0xE1, 0x56, 0x34, 0x50, 0xB6, 0x50, 0x65, 0x10,
+ 0xB1, 0xF0, 0x70, 0x0C, 0x85, 0xF0, 0xCA, 0x0B, 0xE9, 0x09, 0x4B, 0xE4, 0x03, 0x00, 0x78, 0x12,
+ 0x4B, 0xE4, 0x02, 0x00, 0x01, 0x13, 0xB1, 0xF0, 0x86, 0x0C, 0x85, 0xF0, 0xCA, 0x0B, 0x69, 0x08,
+ 0x48, 0xE4, 0x03, 0x00, 0xD5, 0xF0, 0x86, 0x0B, 0x00, 0xF2, 0x12, 0x11, 0x85, 0xF0, 0xCA, 0x0B,
+ 0xE8, 0x09, 0x3C, 0x56, 0x00, 0xFC, 0x20, 0x01, 0x98, 0x57, 0x02, 0x13, 0xBB, 0x45, 0x4B, 0xE4,
+ 0x00, 0x00, 0x08, 0x12, 0x03, 0xE6, 0x01, 0x00, 0x04, 0xF6, 0x00, 0x80, 0xA8, 0x14, 0xD2, 0x14,
+ 0x30, 0x1C, 0x02, 0x80, 0x48, 0xE4, 0x03, 0x00, 0x10, 0x13, 0x00, 0xFC, 0xB6, 0x00, 0x98, 0x57,
+ 0x02, 0x13, 0x4C, 0x1C, 0x3E, 0x1C, 0x00, 0xF0, 0x8E, 0x0B, 0x00, 0xFC, 0x24, 0x01, 0xB0, 0x57,
+ 0x00, 0xFA, 0x24, 0x01, 0x4C, 0x1C, 0x3E, 0x1C, 0x00, 0xF2, 0x12, 0x11, 0x86, 0xF0, 0x8E, 0x0B,
+ 0x00, 0xF2, 0x8C, 0x0E, 0x00, 0xF0, 0x8E, 0x0B, 0xB1, 0xF0, 0xF8, 0x0C, 0x85, 0xF0, 0x86, 0x0B,
+ 0x69, 0x08, 0x48, 0xE4, 0x01, 0x00, 0xD5, 0xF0, 0x86, 0x0B, 0xFC, 0x14, 0x42, 0x58, 0x6C, 0x14,
+ 0x80, 0x14, 0x30, 0x1C, 0x4A, 0xF4, 0x02, 0x00, 0x55, 0xF0, 0x86, 0x0B, 0x4A, 0xF4, 0x01, 0x00,
+ 0x0E, 0x12, 0x02, 0x80, 0x48, 0xE4, 0x03, 0x00, 0x06, 0x13, 0x3E, 0x1C, 0x00, 0xF0, 0x8E, 0x0B,
+ 0x00, 0xFC, 0xB6, 0x00, 0xB0, 0x57, 0x00, 0xFA, 0xB6, 0x00, 0x4C, 0x1C, 0x3E, 0x1C, 0x00, 0xF2,
+ 0x12, 0x11, 0x86, 0xF0, 0x8E, 0x0B, 0x00, 0xF2, 0xBA, 0x0E, 0x00, 0xF0, 0x8E, 0x0B, 0x4C, 0x1C,
+ 0xB1, 0xF0, 0x50, 0x0D, 0x85, 0xF0, 0x5C, 0x0D, 0x69, 0x08, 0xF3, 0x10, 0x86, 0xF0, 0x64, 0x0D,
+ 0x4E, 0x1C, 0x89, 0x48, 0x00, 0x16, 0x00, 0xF6, 0x00, 0x01, 0x00, 0x57, 0x00, 0x57, 0x03, 0x58,
+ 0x00, 0xDC, 0x18, 0xF4, 0xFF, 0x7F, 0x30, 0x56, 0x00, 0x5C, 0x00, 0x16, 0x00, 0xF6, 0x00, 0x01,
+ 0x00, 0x57, 0x00, 0x57, 0x03, 0x58, 0x00, 0xDC, 0x18, 0xF4, 0x00, 0x80, 0x30, 0x56, 0x00, 0x5C,
+ 0x00, 0x16, 0x00, 0xF6, 0x00, 0x01, 0x00, 0x57, 0x00, 0x57, 0x03, 0x58, 0x00, 0xDC, 0x0B, 0x58,
+ 0x00, 0x16, 0x03, 0xF6, 0x24, 0x01, 0x00, 0xF2, 0x58, 0x0A, 0x03, 0xF6, 0xB6, 0x00, 0x00, 0xF2,
+ 0x58, 0x0A, 0x00, 0x16, 0x02, 0xEC, 0xB8, 0x00, 0x02, 0x49, 0x18, 0xF4, 0xFF, 0x00, 0x00, 0x54,
+ 0x00, 0x54, 0x00, 0x54, 0x00, 0xF4, 0x08, 0x00, 0xE1, 0x18, 0x80, 0x54, 0x03, 0x58, 0x00, 0xDD,
+ 0x01, 0xDD, 0x02, 0xDD, 0x03, 0xDC, 0x02, 0x4B, 0x30, 0x50, 0xB2, 0x50, 0x34, 0x51, 0xB6, 0x51,
+ 0x00, 0x16, 0x45, 0x5A, 0x1D, 0xF4, 0xFF, 0x00, 0x85, 0x56, 0x85, 0x56, 0x85, 0x56, 0x05, 0xF4,
+ 0x02, 0x12, 0x83, 0x5A, 0x00, 0x16, 0x1D, 0xF4, 0xFF, 0x00, 0x85, 0x56, 0x85, 0x56, 0x85, 0x56,
+ 0x05, 0xF4, 0x00, 0x12, 0x83, 0x5A, 0x00, 0x16, 0x38, 0x54, 0xBB, 0x55, 0x3C, 0x56, 0xBD, 0x56,
+ 0x00, 0xF2, 0x12, 0x11, 0x85, 0xF0, 0x82, 0x0E, 0xE9, 0x09, 0xC1, 0x59, 0x00, 0xF2, 0x12, 0x11,
+ 0x85, 0xF0, 0x82, 0x0E, 0xE8, 0x0A, 0x83, 0x55, 0x83, 0x55, 0x4B, 0xF4, 0x90, 0x01, 0x5C, 0xF0,
+ 0x36, 0x0E, 0xBD, 0x56, 0x40, 0x10, 0x4B, 0xF4, 0x30, 0x00, 0x59, 0xF0, 0x48, 0x0E, 0x01, 0xF6,
+ 0x0C, 0x00, 0x00, 0xF6, 0x01, 0x00, 0x2E, 0x10, 0x02, 0xFC, 0x9C, 0x00, 0x9A, 0x57, 0x14, 0x13,
+ 0x4B, 0xF4, 0x64, 0x00, 0x59, 0xF0, 0x64, 0x0E, 0x03, 0xF6, 0x64, 0x00, 0x01, 0xF6, 0x19, 0x00,
+ 0x00, 0xF6, 0x01, 0x00, 0x43, 0xF4, 0x33, 0x00, 0x56, 0xF0, 0x76, 0x0E, 0x04, 0xF4, 0x00, 0x01,
+ 0x43, 0xF4, 0x19, 0x00, 0xF3, 0x10, 0xB4, 0x56, 0xC3, 0x58, 0x02, 0xFC, 0x9E, 0x00, 0x9A, 0x57,
+ 0x08, 0x13, 0x3C, 0x56, 0x00, 0xF6, 0x02, 0x00, 0x00, 0x16, 0x00, 0x16, 0x09, 0xE7, 0x01, 0x00,
+ 0x00, 0xF2, 0x12, 0x11, 0x86, 0xF0, 0xB8, 0x0E, 0x09, 0xE7, 0x02, 0x00, 0x00, 0xF2, 0x12, 0x11,
+ 0x86, 0xF0, 0xB8, 0x0E, 0x09, 0xE7, 0x03, 0x00, 0x00, 0xF2, 0x12, 0x11, 0x86, 0xF0, 0xB8, 0x0E,
+ 0x4E, 0x1C, 0x89, 0x49, 0x00, 0xF2, 0x12, 0x11, 0x00, 0x16, 0x09, 0xE7, 0x01, 0x00, 0x00, 0xF2,
+ 0x12, 0x11, 0x86, 0xF0, 0xF2, 0x0E, 0x09, 0xE7, 0x03, 0x00, 0x00, 0xF2, 0x12, 0x11, 0x86, 0xF0,
+ 0xF2, 0x0E, 0x09, 0xE7, 0x01, 0x00, 0x00, 0xF2, 0x12, 0x11, 0x86, 0xF0, 0xF2, 0x0E, 0x89, 0x49,
+ 0x00, 0xF2, 0x12, 0x11, 0x86, 0xF0, 0xF2, 0x0E, 0x4E, 0x1C, 0x89, 0x4A, 0x00, 0xF2, 0x12, 0x11,
+ 0x00, 0x16, 0x3C, 0x56, 0x00, 0x16, 0x00, 0xEC, 0x26, 0x01, 0x48, 0xE4, 0x01, 0x00, 0x1E, 0x13,
+ 0x38, 0x44, 0x00, 0xEA, 0x26, 0x01, 0x49, 0xF4, 0x00, 0x00, 0x04, 0x12, 0x4E, 0x1C, 0x02, 0x10,
+ 0x4C, 0x1C, 0x01, 0xEC, 0x27, 0x01, 0x89, 0x48, 0x00, 0xF2, 0x12, 0x11, 0x02, 0x14, 0x00, 0x16,
+ 0x85, 0xF0, 0x52, 0x0F, 0x38, 0x54, 0x00, 0xEA, 0x99, 0x00, 0x00, 0xF2, 0x60, 0x0B, 0x02, 0x80,
+ 0x48, 0xE4, 0x06, 0x00, 0x1C, 0x13, 0x00, 0xEC, 0x99, 0x00, 0x48, 0xE4, 0x01, 0x00, 0x0A, 0x12,
+ 0x04, 0x80, 0x30, 0xE4, 0x01, 0x00, 0x04, 0x40, 0x08, 0x10, 0x04, 0x80, 0x18, 0xE4, 0xFE, 0x00,
+ 0x04, 0x40, 0x00, 0x16, 0x02, 0xF6, 0xE0, 0x00, 0x02, 0x57, 0x03, 0x59, 0x01, 0xCC, 0x81, 0x48,
+ 0x22, 0x12, 0x00, 0x4E, 0x83, 0x5A, 0x90, 0x4C, 0x20, 0xE7, 0x00, 0x00, 0xC3, 0x58, 0x1B, 0xF4,
+ 0xFF, 0x00, 0x83, 0x55, 0x83, 0x55, 0x83, 0x55, 0x03, 0xF4, 0x00, 0x12, 0x8B, 0x55, 0x83, 0x59,
+ 0x00, 0x4E, 0x00, 0x16, 0x00, 0x4E, 0x02, 0xF6, 0xF0, 0x00, 0x02, 0x57, 0x03, 0x59, 0x00, 0x4E,
+ 0x83, 0x5A, 0x30, 0xE7, 0x00, 0x00, 0x20, 0xE7, 0x00, 0x00, 0x00, 0x16, 0x02, 0xF6, 0xF0, 0x00,
+ 0x02, 0x57, 0x03, 0x59, 0x01, 0xCC, 0x00, 0x4E, 0x83, 0x5A, 0x30, 0xE7, 0x00, 0x00, 0x80, 0x4C,
+ 0xC3, 0x58, 0x1B, 0xF4, 0xFF, 0x00, 0x83, 0x55, 0x83, 0x55, 0x83, 0x55, 0x03, 0xF4, 0x00, 0x12,
+ 0x83, 0x59, 0x00, 0x4E, 0x00, 0x16, 0x03, 0xF6, 0xE0, 0x00, 0x03, 0x57, 0x83, 0x59, 0x3A, 0x55,
+ 0x02, 0xCC, 0x45, 0x5A, 0x00, 0xF2, 0xF6, 0x0D, 0xC0, 0x5A, 0x40, 0x5C, 0x38, 0x54, 0x00, 0xCD,
+ 0x01, 0xCC, 0x4A, 0x46, 0x0A, 0x13, 0x83, 0x59, 0x00, 0x4C, 0x01, 0x48, 0x16, 0x13, 0x0C, 0x10,
+ 0xC5, 0x58, 0x00, 0xF2, 0xF6, 0x0D, 0x00, 0x4C, 0x01, 0x48, 0x08, 0x13, 0x05, 0xF6, 0xF0, 0x00,
+ 0x05, 0x57, 0x08, 0x10, 0x45, 0x58, 0x00, 0xF2, 0xF6, 0x0D, 0x8D, 0x56, 0x83, 0x5A, 0x80, 0x4C,
+ 0x05, 0x17, 0x00, 0x16, 0x02, 0x4B, 0x06, 0xF7, 0x04, 0x00, 0x62, 0x0B, 0x03, 0x82, 0x00, 0xF2,
+ 0xE2, 0x0D, 0x02, 0x80, 0x00, 0x4C, 0x45, 0xF4, 0x02, 0x00, 0x52, 0x14, 0x06, 0xF7, 0x02, 0x00,
+ 0x06, 0x14, 0x00, 0xF2, 0x54, 0x0F, 0x00, 0x16, 0x02, 0x4B, 0x01, 0xF6, 0xFF, 0x00, 0x38, 0x1C,
+ 0x05, 0xF4, 0x04, 0x00, 0x83, 0x5A, 0x18, 0xDF, 0x19, 0xDF, 0x1D, 0xF7, 0x3C, 0x00, 0xB8, 0xF0,
+ 0x4E, 0x10, 0x9C, 0x14, 0x01, 0x48, 0x1C, 0x13, 0x0E, 0xF7, 0x3C, 0x00, 0x03, 0xF7, 0x04, 0x00,
+ 0xAF, 0x19, 0x03, 0x42, 0x45, 0xF4, 0x02, 0x00, 0x83, 0x5A, 0x02, 0xCC, 0x02, 0x41, 0x45, 0xF4,
+ 0x02, 0x00, 0x00, 0x16, 0x91, 0x44, 0xD5, 0xF0, 0x3E, 0x10, 0x00, 0xF0, 0x9E, 0x02, 0x01, 0xF6,
+ 0xFF, 0x00, 0x38, 0x1C, 0x05, 0xF4, 0x04, 0x00, 0x83, 0x5A, 0x18, 0xDF, 0x19, 0xDF, 0x0E, 0xF7,
+ 0x3C, 0x00, 0x03, 0xF7, 0x04, 0x00, 0x0F, 0x79, 0x1C, 0xF7, 0x3C, 0x00, 0xB8, 0xF0, 0x9C, 0x10,
+ 0x4E, 0x14, 0x01, 0x48, 0x06, 0x13, 0x45, 0xF4, 0x04, 0x00, 0x00, 0x16, 0x91, 0x44, 0xD5, 0xF0,
+ 0x82, 0x10, 0x00, 0xF0, 0x9E, 0x02, 0x02, 0xF6, 0xFF, 0x00, 0x38, 0x1C, 0x2C, 0xBC, 0xAE, 0xBC,
+ 0xE2, 0x08, 0x00, 0xEC, 0xB8, 0x00, 0x02, 0x48, 0x1D, 0xF7, 0x80, 0x00, 0xB8, 0xF0, 0xCC, 0x10,
+ 0x1E, 0x14, 0x01, 0x48, 0x0E, 0x13, 0x0E, 0xF7, 0x80, 0x00, 0x38, 0x54, 0x03, 0x58, 0xAF, 0x19,
+ 0x82, 0x48, 0x00, 0x16, 0x82, 0x48, 0x12, 0x45, 0xD5, 0xF0, 0xBA, 0x10, 0x00, 0xF0, 0x9E, 0x02,
+ 0x39, 0xF0, 0xF8, 0x10, 0x38, 0x44, 0x00, 0x16, 0x7E, 0x18, 0x18, 0xF4, 0x03, 0x00, 0x04, 0x13,
+ 0x61, 0x18, 0x00, 0x16, 0x38, 0x1C, 0x00, 0xFC, 0x22, 0x01, 0x18, 0xF4, 0x01, 0x00, 0xF1, 0x12,
+ 0xE3, 0x10, 0x30, 0x44, 0x30, 0x44, 0x30, 0x44, 0xB1, 0xF0, 0x18, 0x11, 0x00, 0x16, 0x3E, 0x57,
+ 0x03, 0xF6, 0xE0, 0x00, 0x03, 0x57, 0x83, 0x59, 0x04, 0xCC, 0x01, 0x4A, 0x6A, 0x12, 0x45, 0x5A,
+ 0x00, 0xF2, 0xF6, 0x0D, 0x02, 0x4B, 0x70, 0x14, 0x34, 0x13, 0x02, 0x80, 0x48, 0xE4, 0x08, 0x00,
+ 0x18, 0x12, 0x9C, 0xE7, 0x02, 0x00, 0x9E, 0xE7, 0x15, 0x00, 0x00, 0xF2, 0xC6, 0x0F, 0x00, 0xF2,
+ 0x7A, 0x0A, 0x1E, 0x1C, 0x01, 0xF6, 0x01, 0x00, 0x00, 0x16, 0x30, 0xE4, 0x10, 0x00, 0x04, 0x40,
+ 0x00, 0xF2, 0xE2, 0x0D, 0x20, 0xE7, 0x01, 0x00, 0x01, 0xF6, 0x01, 0x00, 0x00, 0x16, 0x04, 0xDC,
+ 0x01, 0x4A, 0x24, 0x12, 0x45, 0x5A, 0x00, 0xF2, 0xF6, 0x0D, 0x43, 0x5B, 0x06, 0xEC, 0x98, 0x00,
+ 0x00, 0xF2, 0x38, 0x10, 0xC6, 0x59, 0x20, 0x14, 0x0A, 0x13, 0x00, 0xF2, 0xC6, 0x0F, 0x00, 0xF2,
+ 0x14, 0x10, 0xA7, 0x10, 0x83, 0x5A, 0xD7, 0x10, 0x0E, 0x47, 0x07, 0xE6, 0x10, 0x00, 0xCE, 0x47,
+ 0x5A, 0xF0, 0x20, 0x11, 0xB9, 0x54, 0x00, 0x16, 0x14, 0x90, 0x96, 0x90, 0x02, 0xFC, 0xA8, 0x00,
+ 0x03, 0xFC, 0xAA, 0x00, 0x48, 0x55, 0x02, 0x13, 0xC9, 0x55, 0x00, 0x16, 0x00, 0xEC, 0xBA, 0x00,
+ 0x10, 0x44, 0x00, 0xEA, 0xBA, 0x00, 0x00, 0x16, 0x03, 0xF6, 0xC0, 0x00, 0x00, 0xF2, 0x68, 0x0A,
+ 0x10, 0x44, 0x00, 0x4C, 0x00, 0x16
+};
+
+unsigned short _adv_mcode_size ASC_INITDATA =
+ sizeof(_adv_mcode_buf); /* 0x11D6 */
+unsigned long _adv_mcode_chksum ASC_INITDATA = 0x03494981UL;
+
+/* a_init.c */
+/*
+ * EEPROM Configuration.
+ *
+ * All drivers should use this structure to set the default EEPROM
+ * configuration. The BIOS now uses this structure when it is built.
+ * Additional structure information can be found in a_condor.h where
+ * the structure is defined.
+ */
+STATIC ADVEEP_CONFIG
+Default_EEPROM_Config ASC_INITDATA = {
+ ADV_EEPROM_BIOS_ENABLE, /* cfg_msw */
+ 0x0000, /* cfg_lsw */
+ 0xFFFF, /* disc_enable */
+ 0xFFFF, /* wdtr_able */
+ 0xFFFF, /* sdtr_able */
+ 0xFFFF, /* start_motor */
+ 0xFFFF, /* tagqng_able */
+ 0xFFFF, /* bios_scan */
+ 0, /* scam_tolerant */
+ 7, /* adapter_scsi_id */
+ 0, /* bios_boot_delay */
+ 3, /* scsi_reset_delay */
+ 0, /* bios_id_lun */
+ 0, /* termination */
+ 0, /* reserved1 */
+ 0xFFEF, /* bios_ctrl */
+ 0xFFFF, /* ultra_able */
+ 0, /* reserved2 */
+ ASC_DEF_MAX_HOST_QNG, /* max_host_qng */
+ ASC_DEF_MAX_DVC_QNG, /* max_dvc_qng */
+ 0, /* dvc_cntl */
+ 0, /* bug_fix */
+ 0, /* serial_number_word1 */
+ 0, /* serial_number_word2 */
+ 0, /* serial_number_word3 */
+ 0, /* check_sum */
+ { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 }, /* oem_name[16] */
+ 0, /* dvc_err_code */
+ 0, /* adv_err_code */
+ 0, /* adv_err_addr */
+ 0, /* saved_dvc_err_code */
+ 0, /* saved_adv_err_code */
+ 0, /* saved_adv_err_addr */
+ 0 /* num_of_err */
+};
+
+/*
+ * Initialize the ADV_DVC_VAR structure.
+ *
+ * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR.
+ *
+ * For a non-fatal error return a warning code. If there are no warnings
+ * then 0 is returned.
+ */
+ASC_INITFUNC(
+int
+AdvInitGetConfig(ADV_DVC_VAR *asc_dvc)
+)
+{
+ ushort warn_code;
+ AdvPortAddr iop_base;
+ uchar pci_cmd_reg;
+ int status;
+
+ warn_code = 0;
+ asc_dvc->err_code = 0;
+ iop_base = asc_dvc->iop_base;
+
+ /*
+ * PCI Command Register
+ */
+
+ if (((pci_cmd_reg = DvcAdvReadPCIConfigByte(asc_dvc,
+ AscPCIConfigCommandRegister))
+ & AscPCICmdRegBits_BusMastering)
+ != AscPCICmdRegBits_BusMastering)
+ {
+ pci_cmd_reg |= AscPCICmdRegBits_BusMastering;
+
+ DvcAdvWritePCIConfigByte(asc_dvc,
+ AscPCIConfigCommandRegister, pci_cmd_reg);
+
+ if (((DvcAdvReadPCIConfigByte(asc_dvc, AscPCIConfigCommandRegister))
+ & AscPCICmdRegBits_BusMastering)
+ != AscPCICmdRegBits_BusMastering)
+ {
+ warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE;
+ }
+ }
+
+ /*
+ * PCI Latency Timer
+ *
+ * If the "latency timer" register is 0x20 or above, then we don't need
+ * to change it. Otherwise, set it to 0x20 (i.e. set it to 0x20 if it
+ * comes up less than 0x20).
+ */
+ if (DvcAdvReadPCIConfigByte(asc_dvc, AscPCIConfigLatencyTimer) < 0x20) {
+ DvcAdvWritePCIConfigByte(asc_dvc, AscPCIConfigLatencyTimer, 0x20);
+ if (DvcAdvReadPCIConfigByte(asc_dvc, AscPCIConfigLatencyTimer) < 0x20)
+ {
+ warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE;
+ }
+ }
+
+ /*
+ * Save the state of the PCI Configuration Command Register
+ * "Parity Error Response Control" Bit. If the bit is clear (0),
+ * in AdvInitAsc3550Driver() tell the microcode to ignore DMA
+ * parity errors.
+ */
+ asc_dvc->cfg->control_flag = 0;
+ if (((DvcAdvReadPCIConfigByte(asc_dvc, AscPCIConfigCommandRegister)
+ & AscPCICmdRegBits_ParErrRespCtrl)) == 0)
+ {
+ asc_dvc->cfg->control_flag |= CONTROL_FLAG_IGNORE_PERR;
+ }
+
+ asc_dvc->cur_host_qng = 0;
+
+ asc_dvc->cfg->lib_version = (ADV_LIB_VERSION_MAJOR << 8) |
+ ADV_LIB_VERSION_MINOR;
+ asc_dvc->cfg->chip_version =
+ AdvGetChipVersion(iop_base, asc_dvc->bus_type);
+
+ /*
+ * Reset the chip to start and allow register writes.
+ */
+ if (AdvFindSignature(iop_base) == 0)
+ {
+ asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE;
+ return ADV_ERROR;
+ }
+ else {
+
+ AdvResetChip(asc_dvc);
+
+ if ((status = AdvInitFromEEP(asc_dvc)) == ADV_ERROR)
+ {
+ return ADV_ERROR;
+ }
+ warn_code |= status;
+
+ /*
+ * Reset the SCSI Bus if the EEPROM indicates that SCSI Bus
+ * Resets should be performed.
+ */
+ if (asc_dvc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS)
+ {
+ AdvResetSCSIBus(asc_dvc);
+ }
+ }
+
+ return warn_code;
+}
+
+/*
+ * Initialize the ASC3550.
+ *
+ * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR.
+ *
+ * For a non-fatal error return a warning code. If there are no warnings
+ * then 0 is returned.
+ */
+ASC_INITFUNC(
+int
+AdvInitAsc3550Driver(ADV_DVC_VAR *asc_dvc)
+)
+{
+ AdvPortAddr iop_base;
+ ushort warn_code;
+ ulong sum;
+ int begin_addr;
+ int end_addr;
+ int code_sum;
+ int word;
+ int rql_addr; /* RISC Queue List address */
+ int i;
+ ushort scsi_cfg1;
+ uchar biosmem[ASC_MC_BIOSLEN]; /* BIOS RISC Memory 0x40-0x8F. */
+
+ /* If there is already an error, don't continue. */
+ if (asc_dvc->err_code != 0)
+ {
+ return ADV_ERROR;
+ }
+
+ warn_code = 0;
+ iop_base = asc_dvc->iop_base;
+
+ /*
+ * Save the RISC memory BIOS region before writing the microcode.
+ * The BIOS may already be loaded and using its RISC LRAM region
+ * so its region must be saved and restored.
+ *
+ * Note: This code makes the assumption, which is currently true,
+ * that a chip reset does not clear RISC LRAM.
+ */
+ for (i = 0; i < ASC_MC_BIOSLEN; i++)
+ {
+ AdvReadByteLram(iop_base, ASC_MC_BIOSMEM + i, biosmem[i]);
+ }
+
+ /*
+ * Load the Microcode
+ *
+ * Write the microcode image to RISC memory starting at address 0.
+ */
+ AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, 0);
+ for (word = 0; word < _adv_mcode_size; word += 2)
+ {
+ AdvWriteWordAutoIncLram(iop_base,
+ *((ushort *) (&_adv_mcode_buf[word])));
+ }
+
+ /*
+ * Clear the rest of Condor's Internal RAM (8KB).
+ */
+ for (; word < ADV_CONDOR_MEMSIZE; word += 2)
+ {
+ AdvWriteWordAutoIncLram(iop_base, 0);
+ }
+
+ /*
+ * Verify the microcode checksum.
+ */
+ sum = 0;
+ AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, 0);
+ for (word = 0; word < _adv_mcode_size; word += 2)
+ {
+ sum += AdvReadWordAutoIncLram(iop_base);
+ }
+
+ if (sum != _adv_mcode_chksum)
+ {
+ asc_dvc->err_code |= ASC_IERR_MCODE_CHKSUM;
+ return ADV_ERROR;
+ }
+
+ /*
+ * Restore the RISC memory BIOS region.
+ */
+ for (i = 0; i < ASC_MC_BIOSLEN; i++)
+ {
+ AdvWriteByteLram(iop_base, ASC_MC_BIOSMEM + i, biosmem[i]);
+ }
+
+ /*
+ * Calculate and write the microcode code checksum to the microcode
+ * code checksum location ASC_MC_CODE_CHK_SUM (0x2C).
+ */
+ AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, begin_addr);
+ AdvReadWordLram(iop_base, ASC_MC_CODE_END_ADDR, end_addr);
+ code_sum = 0;
+ for (word = begin_addr; word < end_addr; word += 2)
+ {
+ code_sum += *((ushort *) (&_adv_mcode_buf[word]));
+ }
+ AdvWriteWordLram(iop_base, ASC_MC_CODE_CHK_SUM, code_sum);
+
+ /*
+ * Read microcode version and date.
+ */
+ AdvReadWordLram(iop_base, ASC_MC_VERSION_DATE, asc_dvc->cfg->mcode_date);
+ AdvReadWordLram(iop_base, ASC_MC_VERSION_NUM, asc_dvc->cfg->mcode_version);
+
+ /*
+ * Initialize microcode operating variables
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_ADAPTER_SCSI_ID,
+ asc_dvc->chip_scsi_id);
+
+ /*
+ * If the PCI Configuration Command Register "Parity Error Response
+ * Control" Bit was clear (0), then set the microcode variable
+ * 'control_flag' CONTROL_FLAG_IGNORE_PERR flag to tell the microcode
+ * to ignore DMA parity errors.
+ */
+ if (asc_dvc->cfg->control_flag & CONTROL_FLAG_IGNORE_PERR)
+ {
+ /*
+ * Note: Don't remove the use of a temporary variable in
+ * the following code, otherwise the Microsoft C compiler
+ * will turn the following lines into a no-op.
+ */
+ AdvReadWordLram(iop_base, ASC_MC_CONTROL_FLAG, word);
+ word |= CONTROL_FLAG_IGNORE_PERR;
+ AdvWriteWordLram(iop_base, ASC_MC_CONTROL_FLAG, word);
+ }
+
+ /*
+ * Set default microcode operating variables for WDTR, SDTR, and
+ * command tag queuing based on the EEPROM configuration values.
+ *
+ * These ADV_DVC_VAR fields and the microcode variables will be
+ * changed in AdvInquiryHandling() if it is found a device is
+ * incapable of a particular feature.
+ */
+
+ /*
+ * Set the microcode ULTRA target mask from EEPROM value. The
+ * SDTR target mask overrides the ULTRA target mask in the
+ * microcode so it is safe to set this value without determining
+ * whether the device supports SDTR.
+ *
+ * Note: There is no way to know whether a device supports ULTRA
+ * speed without attempting a SDTR ULTRA speed negotiation with
+ * the device. The device will reject the speed if it does not
+ * support it by responding with an SDTR message containing a
+ * slower speed.
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_ULTRA_ABLE, asc_dvc->ultra_able);
+ AdvWriteWordLram(iop_base, ASC_MC_DISC_ENABLE, asc_dvc->cfg->disc_enable);
+
+
+ /*
+ * Set SCSI_CFG0 Microcode Default Value.
+ *
+ * The microcode will set the SCSI_CFG0 register using this value
+ * after it is started below.
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG0,
+ PARITY_EN | SEL_TMO_LONG | OUR_ID_EN | asc_dvc->chip_scsi_id);
+
+ /*
+ * Determine SCSI_CFG1 Microcode Default Value.
+ *
+ * The microcode will set the SCSI_CFG1 register using this value
+ * after it is started below.
+ */
+
+ /* Read current SCSI_CFG1 Register value. */
+ scsi_cfg1 = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1);
+
+ /*
+ * If all three connectors are in use, return an error.
+ */
+ if ((scsi_cfg1 & CABLE_ILLEGAL_A) == 0 ||
+ (scsi_cfg1 & CABLE_ILLEGAL_B) == 0)
+ {
+ asc_dvc->err_code |= ASC_IERR_ILLEGAL_CONNECTION;
+ return ADV_ERROR;
+ }
+
+ /*
+ * If the internal narrow cable is reversed all of the SCSI_CTRL
+ * register signals will be set. Check for and return an error if
+ * this condition is found.
+ */
+ if ((AdvReadWordRegister(iop_base, IOPW_SCSI_CTRL) & 0x3F07) == 0x3F07)
+ {
+ asc_dvc->err_code |= ASC_IERR_REVERSED_CABLE;
+ return ADV_ERROR;
+ }
+
+ /*
+ * If this is a differential board and a single-ended device
+ * is attached to one of the connectors, return an error.
+ */
+ if ((scsi_cfg1 & DIFF_MODE) && (scsi_cfg1 & DIFF_SENSE) == 0)
+ {
+ asc_dvc->err_code |= ASC_IERR_SINGLE_END_DEVICE;
+ return ADV_ERROR;
+ }
+
+ /*
+ * If automatic termination control is enabled, then set the
+ * termination value based on a table listed in a_condor.h.
+ *
+ * If manual termination was specified with an EEPROM setting
+ * then 'termination' was set-up in AdvInitFromEEP() and
+ * is ready to be 'ored' into SCSI_CFG1.
+ */
+ if (asc_dvc->cfg->termination == 0)
+ {
+ /*
+ * The software always controls termination by setting TERM_CTL_SEL.
+ * If TERM_CTL_SEL were set to 0, the hardware would set termination.
+ */
+ asc_dvc->cfg->termination |= TERM_CTL_SEL;
+
+ switch(scsi_cfg1 & CABLE_DETECT)
+ {
+ /* TERM_CTL_H: on, TERM_CTL_L: on */
+ case 0x3: case 0x7: case 0xB: case 0xD: case 0xE: case 0xF:
+ asc_dvc->cfg->termination |= (TERM_CTL_H | TERM_CTL_L);
+ break;
+
+ /* TERM_CTL_H: on, TERM_CTL_L: off */
+ case 0x1: case 0x5: case 0x9: case 0xA: case 0xC:
+ asc_dvc->cfg->termination |= TERM_CTL_H;
+ break;
+
+ /* TERM_CTL_H: off, TERM_CTL_L: off */
+ case 0x2: case 0x6:
+ break;
+ }
+ }
+
+ /*
+ * Clear any set TERM_CTL_H and TERM_CTL_L bits.
+ */
+ scsi_cfg1 &= ~TERM_CTL;
+
+ /*
+ * Invert the TERM_CTL_H and TERM_CTL_L bits and then
+ * set 'scsi_cfg1'. The TERM_POL bit does not need to be
+ * referenced, because the hardware internally inverts
+ * the Termination High and Low bits if TERM_POL is set.
+ */
+ scsi_cfg1 |= (TERM_CTL_SEL | (~asc_dvc->cfg->termination & TERM_CTL));
+
+ /*
+ * Set SCSI_CFG1 Microcode Default Value
+ *
+ * Set filter value and possibly modified termination control
+ * bits in the Microcode SCSI_CFG1 Register Value.
+ *
+ * The microcode will set the SCSI_CFG1 register using this value
+ * after it is started below.
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SCSI_CFG1,
+ FLTR_11_TO_20NS | scsi_cfg1);
+
+ /*
+ * Set SEL_MASK Microcode Default Value
+ *
+ * The microcode will set the SEL_MASK register using this value
+ * after it is started below.
+ */
+ AdvWriteWordLram(iop_base, ASC_MC_DEFAULT_SEL_MASK,
+ ADV_TID_TO_TIDMASK(asc_dvc->chip_scsi_id));
+
+ /*
+ * Link all the RISC Queue Lists together in a doubly-linked
+ * NULL terminated list.
+ *
+ * Skip the NULL (0) queue which is not used.
+ */
+ for (i = 1, rql_addr = ASC_MC_RISC_Q_LIST_BASE + ASC_MC_RISC_Q_LIST_SIZE;
+ i < ASC_MC_RISC_Q_TOTAL_CNT;
+ i++, rql_addr += ASC_MC_RISC_Q_LIST_SIZE)
+ {
+ /*
+ * Set the current RISC Queue List's RQL_FWD and RQL_BWD pointers
+ * in a one word write and set the state (RQL_STATE) to free.
+ */
+ AdvWriteWordLram(iop_base, rql_addr, ((i + 1) + ((i - 1) << 8)));
+ AdvWriteByteLram(iop_base, rql_addr + RQL_STATE, ASC_MC_QS_FREE);
+ }
+
+ /*
+ * Set the Host and RISC Queue List pointers.
+ *
+ * Both sets of pointers are initialized with the same values:
+ * ASC_MC_RISC_Q_FIRST(0x01) and ASC_MC_RISC_Q_LAST (0xFF).
+ */
+ AdvWriteByteLram(iop_base, ASC_MC_HOST_NEXT_READY, ASC_MC_RISC_Q_FIRST);
+ AdvWriteByteLram(iop_base, ASC_MC_HOST_NEXT_DONE, ASC_MC_RISC_Q_LAST);
+
+ AdvWriteByteLram(iop_base, ASC_MC_RISC_NEXT_READY, ASC_MC_RISC_Q_FIRST);
+ AdvWriteByteLram(iop_base, ASC_MC_RISC_NEXT_DONE, ASC_MC_RISC_Q_LAST);
+
+ /*
+ * Finally, set up the last RISC Queue List (255) with
+ * a NULL forward pointer.
+ */
+ AdvWriteWordLram(iop_base, rql_addr, (ASC_MC_NULL_Q + ((i - 1) << 8)));
+ AdvWriteByteLram(iop_base, rql_addr + RQL_STATE, ASC_MC_QS_FREE);
+
+ AdvWriteByteRegister(iop_base, IOPB_INTR_ENABLES,
+ (ADV_INTR_ENABLE_HOST_INTR | ADV_INTR_ENABLE_GLOBAL_INTR));
+
+ /*
+ * Note: Don't remove the use of a temporary variable in
+ * the following code, otherwise the Microsoft C compiler
+ * will turn the following lines into a no-op.
+ */
+ AdvReadWordLram(iop_base, ASC_MC_CODE_BEGIN_ADDR, word);
+ AdvWriteWordRegister(iop_base, IOPW_PC, word);
+
+ /* finally, finally, gentlemen, start your engine */
+ AdvWriteWordRegister(iop_base, IOPW_RISC_CSR, ADV_RISC_CSR_RUN);
+
+ return warn_code;
+}
+
+/*
+ * Read the board's EEPROM configuration. Set fields in ADV_DVC_VAR and
+ * ADV_DVC_CFG based on the EEPROM settings. The chip is stopped while
+ * all of this is done.
+ *
+ * On failure set the ADV_DVC_VAR field 'err_code' and return ADV_ERROR.
+ *
+ * For a non-fatal error return a warning code. If there are no warnings
+ * then 0 is returned.
+ *
+ * Note: Chip is stopped on entry.
+ */
+ASC_INITFUNC(
+STATIC int
+AdvInitFromEEP(ADV_DVC_VAR *asc_dvc)
+)
+{
+ AdvPortAddr iop_base;
+ ushort warn_code;
+ ADVEEP_CONFIG eep_config;
+ int i;
+
+ iop_base = asc_dvc->iop_base;
+
+ warn_code = 0;
+
+ /*
+ * Read the board's EEPROM configuration.
+ *
+ * Set default values if a bad checksum is found.
+ */
+ if (AdvGetEEPConfig(iop_base, &eep_config) != eep_config.check_sum)
+ {
+ warn_code |= ASC_WARN_EEPROM_CHKSUM;
+
+ /*
+ * Set EEPROM default values.
+ */
+ for (i = 0; i < sizeof(ADVEEP_CONFIG); i++)
+ {
+ *((uchar *) &eep_config + i) =
+ *((uchar *) &Default_EEPROM_Config + i);
+ }
+
+ /*
+ * Assume the 6 byte board serial number that was read
+ * from EEPROM is correct even if the EEPROM checksum
+ * failed.
+ */
+ eep_config.serial_number_word3 =
+ AdvReadEEPWord(iop_base, ASC_EEP_DVC_CFG_END - 1);
+ eep_config.serial_number_word2 =
+ AdvReadEEPWord(iop_base, ASC_EEP_DVC_CFG_END - 2);
+ eep_config.serial_number_word1 =
+ AdvReadEEPWord(iop_base, ASC_EEP_DVC_CFG_END - 3);
+ AdvSetEEPConfig(iop_base, &eep_config);
+ }
+
+ /*
+ * Set ADV_DVC_VAR and ADV_DVC_CFG variables from the
+ * EEPROM configuration that was read.
+ *
+ * This is the mapping of EEPROM fields to Adv Library fields.
+ */
+ asc_dvc->wdtr_able = eep_config.wdtr_able;
+ asc_dvc->sdtr_able = eep_config.sdtr_able;
+ asc_dvc->ultra_able = eep_config.ultra_able;
+ asc_dvc->tagqng_able = eep_config.tagqng_able;
+ asc_dvc->cfg->disc_enable = eep_config.disc_enable;
+ asc_dvc->max_host_qng = eep_config.max_host_qng;
+ asc_dvc->max_dvc_qng = eep_config.max_dvc_qng;
+ asc_dvc->chip_scsi_id = (eep_config.adapter_scsi_id & ADV_MAX_TID);
+ asc_dvc->start_motor = eep_config.start_motor;
+ asc_dvc->scsi_reset_wait = eep_config.scsi_reset_delay;
+ asc_dvc->cfg->bios_boot_wait = eep_config.bios_boot_delay;
+ asc_dvc->bios_ctrl = eep_config.bios_ctrl;
+ asc_dvc->no_scam = eep_config.scam_tolerant;
+ asc_dvc->cfg->serial1 = eep_config.serial_number_word1;
+ asc_dvc->cfg->serial2 = eep_config.serial_number_word2;
+ asc_dvc->cfg->serial3 = eep_config.serial_number_word3;
+
+ /*
+ * Set the host maximum queuing (max. 253, min. 16) and the per device
+ * maximum queuing (max. 63, min. 4).
+ */
+ if (eep_config.max_host_qng > ASC_DEF_MAX_HOST_QNG)
+ {
+ eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG;
+ } else if (eep_config.max_host_qng < ASC_DEF_MIN_HOST_QNG)
+ {
+ /* If the value is zero, assume it is uninitialized. */
+ if (eep_config.max_host_qng == 0)
+ {
+ eep_config.max_host_qng = ASC_DEF_MAX_HOST_QNG;
+ } else
+ {
+ eep_config.max_host_qng = ASC_DEF_MIN_HOST_QNG;
+ }
+ }
+
+ if (eep_config.max_dvc_qng > ASC_DEF_MAX_DVC_QNG)
+ {
+ eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG;
+ } else if (eep_config.max_dvc_qng < ASC_DEF_MIN_DVC_QNG)
+ {
+ /* If the value is zero, assume it is uninitialized. */
+ if (eep_config.max_dvc_qng == 0)
+ {
+ eep_config.max_dvc_qng = ASC_DEF_MAX_DVC_QNG;
+ } else
+ {
+ eep_config.max_dvc_qng = ASC_DEF_MIN_DVC_QNG;
+ }
+ }
+
+ /*
+ * If 'max_dvc_qng' is greater than 'max_host_qng', then
+ * set 'max_dvc_qng' to 'max_host_qng'.
+ */
+ if (eep_config.max_dvc_qng > eep_config.max_host_qng)
+ {
+ eep_config.max_dvc_qng = eep_config.max_host_qng;
+ }
+
+ /*
+ * Set ADV_DVC_VAR 'max_host_qng' and ADV_DVC_CFG 'max_dvc_qng'
+ * values based on possibly adjusted EEPROM values.
+ */
+ asc_dvc->max_host_qng = eep_config.max_host_qng;
+ asc_dvc->max_dvc_qng = eep_config.max_dvc_qng;
+
+
+ /*
+ * If the EEPROM 'termination' field is set to automatic (0), then set
+ * the ADV_DVC_CFG 'termination' field to automatic also.
+ *
+ * If the termination is specified with a non-zero 'termination'
+ * value check that a legal value is set and set the ADV_DVC_CFG
+ * 'termination' field appropriately.
+ */
+ if (eep_config.termination == 0)
+ {
+ asc_dvc->cfg->termination = 0; /* auto termination */
+ } else
+ {
+ /* Enable manual control with low off / high off. */
+ if (eep_config.termination == 1)
+ {
+ asc_dvc->cfg->termination = TERM_CTL_SEL;
+
+ /* Enable manual control with low off / high on. */
+ } else if (eep_config.termination == 2)
+ {
+ asc_dvc->cfg->termination = TERM_CTL_SEL | TERM_CTL_H;
+
+ /* Enable manual control with low on / high on. */
+ } else if (eep_config.termination == 3)
+ {
+ asc_dvc->cfg->termination = TERM_CTL_SEL | TERM_CTL_H | TERM_CTL_L;
+ } else
+ {
+ /*
+ * The EEPROM 'termination' field contains a bad value. Use
+ * automatic termination instead.
+ */
+ asc_dvc->cfg->termination = 0;
+ warn_code |= ASC_WARN_EEPROM_TERMINATION;
+ }
+ }
+
+ return warn_code;
+}
+
+/*
+ * Read EEPROM configuration into the specified buffer.
+ *
+ * Return a checksum based on the EEPROM configuration read.
+ */
+ASC_INITFUNC(
+STATIC ushort
+AdvGetEEPConfig(AdvPortAddr iop_base, ADVEEP_CONFIG *cfg_buf)
+)
+{
+ ushort wval, chksum;
+ ushort *wbuf;
+ int eep_addr;
+
+ wbuf = (ushort *) cfg_buf;
+ chksum = 0;
+
+ for (eep_addr = ASC_EEP_DVC_CFG_BEGIN;
+ eep_addr < ASC_EEP_DVC_CFG_END;
+ eep_addr++, wbuf++)
+ {
+ wval = AdvReadEEPWord(iop_base, eep_addr);
+ chksum += wval;
+ *wbuf = wval;
+ }
+ *wbuf = AdvReadEEPWord(iop_base, eep_addr);
+ wbuf++;
+ for (eep_addr = ASC_EEP_DVC_CTL_BEGIN;
+ eep_addr < ASC_EEP_MAX_WORD_ADDR;
+ eep_addr++, wbuf++)
+ {
+ *wbuf = AdvReadEEPWord(iop_base, eep_addr);
+ }
+ return chksum;
+}
+
+/*
+ * Read the EEPROM from specified location
+ */
+ASC_INITFUNC(
+STATIC ushort
+AdvReadEEPWord(AdvPortAddr iop_base, int eep_word_addr)
+)
+{
+ AdvWriteWordRegister(iop_base, IOPW_EE_CMD,
+ ASC_EEP_CMD_READ | eep_word_addr);
+ AdvWaitEEPCmd(iop_base);
+ return AdvReadWordRegister(iop_base, IOPW_EE_DATA);
+}
+
+/*
+ * Wait for EEPROM command to complete
+ */
+ASC_INITFUNC(
+STATIC void
+AdvWaitEEPCmd(AdvPortAddr iop_base)
+)
+{
+ int eep_delay_ms;
+
+ for (eep_delay_ms = 0; eep_delay_ms < ASC_EEP_DELAY_MS; eep_delay_ms++)
+ {
+ if (AdvReadWordRegister(iop_base, IOPW_EE_CMD) & ASC_EEP_CMD_DONE)
+ {
+ break;
+ }
+ DvcSleepMilliSecond(1);
+ }
+ if ((AdvReadWordRegister(iop_base, IOPW_EE_CMD) & ASC_EEP_CMD_DONE) == 0)
+ {
+ ADV_ASSERT(0);
+ }
+ return;
+}
+
+/*
+ * Write the EEPROM from 'cfg_buf'.
+ */
+ASC_INITFUNC(
+STATIC void
+AdvSetEEPConfig(AdvPortAddr iop_base, ADVEEP_CONFIG *cfg_buf)
+)
+{
+ ushort *wbuf;
+ ushort addr, chksum;
+
+ wbuf = (ushort *) cfg_buf;
+ chksum = 0;
+
+ AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_ABLE);
+ AdvWaitEEPCmd(iop_base);
+
+ /*
+ * Write EEPROM from word 0 to word 15
+ */
+ for (addr = ASC_EEP_DVC_CFG_BEGIN;
+ addr < ASC_EEP_DVC_CFG_END; addr++, wbuf++)
+ {
+ chksum += *wbuf;
+ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, *wbuf);
+ AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr);
+ AdvWaitEEPCmd(iop_base);
+ DvcSleepMilliSecond(ASC_EEP_DELAY_MS);
+ }
+
+ /*
+ * Write EEPROM checksum at word 18
+ */
+ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, chksum);
+ AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr);
+ AdvWaitEEPCmd(iop_base);
+ wbuf++; /* skip over check_sum */
+
+ /*
+ * Write EEPROM OEM name at words 19 to 26
+ */
+ for (addr = ASC_EEP_DVC_CTL_BEGIN;
+ addr < ASC_EEP_MAX_WORD_ADDR; addr++, wbuf++)
+ {
+ AdvWriteWordRegister(iop_base, IOPW_EE_DATA, *wbuf);
+ AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE | addr);
+ AdvWaitEEPCmd(iop_base);
+ }
+ AdvWriteWordRegister(iop_base, IOPW_EE_CMD, ASC_EEP_CMD_WRITE_DISABLE);
+ AdvWaitEEPCmd(iop_base);
+ return;
+}
+
+/*
+ * This function resets the chip and SCSI bus
+ *
+ * It is up to the caller to add a delay to let the bus settle after
+ * calling this function.
+ *
+ * The SCSI_CFG0, SCSI_CFG1, and MEM_CFG registers are set-up in
+ * AdvInitAsc3550Driver(). Here when doing a write to one of these
+ * registers read first and then write.
+ *
+ * Note: A SCSI Bus Reset can not be done until after the EEPROM
+ * configuration is read to determine whether SCSI Bus Resets
+ * should be performed.
+ */
+ASC_INITFUNC(
+STATIC void
+AdvResetChip(ADV_DVC_VAR *asc_dvc)
+)
+{
+ AdvPortAddr iop_base;
+ ushort word;
+ uchar byte;
+
+ iop_base = asc_dvc->iop_base;
+
+ /*
+ * Reset Chip.
+ */
+ AdvWriteWordRegister(iop_base, IOPW_CTRL_REG, ADV_CTRL_REG_CMD_RESET);
+ DvcSleepMilliSecond(100);
+ AdvWriteWordRegister(iop_base, IOPW_CTRL_REG, ADV_CTRL_REG_CMD_WR_IO_REG);
+
+ /*
+ * Initialize Chip registers.
+ *
+ * Note: Don't remove the use of a temporary variable in the following
+ * code, otherwise the Microsoft C compiler will turn the following lines
+ * into a no-op.
+ */
+ byte = AdvReadByteRegister(iop_base, IOPB_MEM_CFG);
+ byte |= RAM_SZ_8KB;
+ AdvWriteByteRegister(iop_base, IOPB_MEM_CFG, byte);
+
+ word = AdvReadWordRegister(iop_base, IOPW_SCSI_CFG1);
+ word &= ~BIG_ENDIAN;
+ AdvWriteWordRegister(iop_base, IOPW_SCSI_CFG1, word);
+
+ /*
+ * Setting the START_CTL_EMFU 3:2 bits sets a FIFO threshold
+ * of 128 bytes. This register is only accessible to the host.
+ */
+ AdvWriteByteRegister(iop_base, IOPB_DMA_CFG0,
+ START_CTL_EMFU | READ_CMD_MRM);
+}
+
+/* a_advlib.c */
+/*
+ * Description:
+ * Send a SCSI request to the ASC3550 chip
+ *
+ * If there is no SG list for the request, set 'sg_entry_cnt' to 0.
+ *
+ * If 'sg_real_addr' is non-zero on entry, AscGetSGList() will not be
+ * called. It is assumed the caller has already initialized 'sg_real_addr'.
+ *
+ * Return:
+ * ADV_SUCCESS(1) - the request is in the mailbox
+ * ADV_BUSY(0) - total request count > 253, try later
+ * ADV_ERROR(-1) - invalid scsi request Q
+ */
+STATIC int
+AdvExeScsiQueue(ADV_DVC_VAR *asc_dvc,
+ ADV_SCSI_REQ_Q *scsiq)
+{
+ if (scsiq == (ADV_SCSI_REQ_Q *) 0L)
+ {
+ /* 'scsiq' should never be NULL. */
+ ADV_ASSERT(0);
+ return ADV_ERROR;
+ }
+
+ return AdvSendScsiCmd(asc_dvc, scsiq);
+}
+
+/*
+ * Reset SCSI Bus and purge all outstanding requests.
+ *
+ * Return Value:
+ * ADV_TRUE(1) - All requests are purged and SCSI Bus is reset.
+ *
+ * Note: Should always return ADV_TRUE.
+ */
+STATIC int
+AdvResetSB(ADV_DVC_VAR *asc_dvc)
+{
+ int status;
+
+ status = AdvSendIdleCmd(asc_dvc, (ushort) IDLE_CMD_SCSI_RESET, 0L, 0);
+
+ AdvResetSCSIBus(asc_dvc);
+
+ return status;
+}
+
+/*
+ * Reset SCSI Bus and delay.
+ */
+STATIC void
+AdvResetSCSIBus(ADV_DVC_VAR *asc_dvc)
+{
+ AdvPortAddr iop_base;
+ ushort scsi_ctrl;
+
+ iop_base = asc_dvc->iop_base;
+
+ /*
+ * The microcode currently sets the SCSI Bus Reset signal while
+ * handling the AscSendIdleCmd() IDLE_CMD_SCSI_RESET command above.
+ * But the SCSI Bus Reset Hold Time in the microcode is not deterministic
+ * (it may in fact be for less than the SCSI Spec. minimum of 25 us).
+ * Therefore on return the Adv Library sets the SCSI Bus Reset signal
+ * for ASC_SCSI_RESET_HOLD_TIME_US, which is defined to be greater
+ * than 25 us.
+ */
+ scsi_ctrl = AdvReadWordRegister(iop_base, IOPW_SCSI_CTRL);
+ AdvWriteWordRegister(iop_base, IOPW_SCSI_CTRL,
+ scsi_ctrl | ADV_SCSI_CTRL_RSTOUT);
+ DvcDelayMicroSecond(asc_dvc, (ushort) ASC_SCSI_RESET_HOLD_TIME_US);
+ AdvWriteWordRegister(iop_base, IOPW_SCSI_CTRL,
+ scsi_ctrl & ~ADV_SCSI_CTRL_RSTOUT);
+
+ DvcSleepMilliSecond((ulong) asc_dvc->scsi_reset_wait * 1000);
+}
+
+
+/*
+ * Adv Library Interrupt Service Routine
+ *
+ * This function is called by a driver's interrupt service routine.
+ * The function disables and re-enables interrupts.
+ *
+ * When a microcode idle command is completed, the ADV_DVC_VAR
+ * 'idle_cmd_done' field is set to ADV_TRUE.
+ *
+ * Note: AdvISR() can be called when interrupts are disabled or even
+ * when there is no hardware interrupt condition present. It will
+ * always check for completed idle commands and microcode requests.
+ * This is an important feature that shouldn't be changed because it
+ * allows commands to be completed from polling mode loops.
+ *
+ * Return:
+ * ADV_TRUE(1) - interrupt was pending
+ * ADV_FALSE(0) - no interrupt was pending
+ */
+STATIC int
+AdvISR(ADV_DVC_VAR *asc_dvc)
+{
+ AdvPortAddr iop_base;
+ uchar int_stat;
+ ushort next_done_loc, target_bit;
+ int completed_q;
+ long flags;
+ ADV_SCSI_REQ_Q *scsiq;
+ ASC_REQ_SENSE *sense_data;
+ int ret;
+
+ flags = DvcEnterCritical();
+ iop_base = asc_dvc->iop_base;
+
+ if (AdvIsIntPending(iop_base))
+ {
+ ret = ADV_TRUE;
+ } else
+ {
+ ret = ADV_FALSE;
+ }
+
+ /* Reading the register clears the interrupt. */
+ int_stat = AdvReadByteRegister(iop_base, IOPB_INTR_STATUS_REG);
+
+ if (int_stat & ADV_INTR_STATUS_INTRB)
+ {
+ asc_dvc->idle_cmd_done = ADV_TRUE;
+ }
+
+ /*
+ * Notify the driver of a hardware detected SCSI Bus Reset.
+ */
+ if (int_stat & ADV_INTR_STATUS_INTRC)
+ {
+ if (asc_dvc->sbreset_callback != 0)
+ {
+ (*(ADV_SBRESET_CALLBACK) asc_dvc->sbreset_callback)(asc_dvc);
+ }
+ }
+
+ /*
+ * ASC_MC_HOST_NEXT_DONE (0x129) is actually the last completed RISC
+ * Queue List request. Its forward pointer (RQL_FWD) points to the
+ * current completed RISC Queue List request.
+ */
+ AdvReadByteLram(iop_base, ASC_MC_HOST_NEXT_DONE, next_done_loc);
+ next_done_loc = ASC_MC_RISC_Q_LIST_BASE +
+ (next_done_loc * ASC_MC_RISC_Q_LIST_SIZE) + RQL_FWD;
+
+ AdvReadByteLram(iop_base, next_done_loc, completed_q);
+
+ /* Loop until all completed Q's are processed. */
+ while (completed_q != ASC_MC_NULL_Q)
+ {
+ AdvWriteByteLram(iop_base, ASC_MC_HOST_NEXT_DONE, completed_q);
+
+ next_done_loc = ASC_MC_RISC_Q_LIST_BASE +
+ (completed_q * ASC_MC_RISC_Q_LIST_SIZE);
+
+ /*
+ * Read the ADV_SCSI_REQ_Q virtual address pointer from
+ * the RISC list entry. The microcode has changed the
+ * ADV_SCSI_REQ_Q physical address to its virtual address.
+ *
+ * Refer to comments at the end of AdvSendScsiCmd() for
+ * more information on the RISC list structure.
+ */
+ {
+ ushort lsw, msw;
+ AdvReadWordLram(iop_base, next_done_loc + RQL_PHYADDR, lsw);
+ AdvReadWordLram(iop_base, next_done_loc + RQL_PHYADDR + 2, msw);
+
+ scsiq = (ADV_SCSI_REQ_Q *) (((ulong) msw << 16) | lsw);
+ }
+ ADV_ASSERT(scsiq != NULL);
+
+ target_bit = ADV_TID_TO_TIDMASK(scsiq->target_id);
+
+ /*
+ * Clear request microcode control flag.
+ */
+ scsiq->cntl = 0;
+
+ /*
+ * Check Condition handling
+ */
+ if ((scsiq->done_status == QD_WITH_ERROR) &&
+ (scsiq->scsi_status == SS_CHK_CONDITION) &&
+ (sense_data = (ASC_REQ_SENSE *) scsiq->vsense_addr) != 0 &&
+ (scsiq->orig_sense_len - scsiq->sense_len) >= ASC_MIN_SENSE_LEN)
+ {
+ /*
+ * Command returned with a check condition and valid
+ * sense data.
+ */
+ }
+ /*
+ * If the command that completed was a SCSI INQUIRY and
+ * LUN 0 was sent the command, then process the INQUIRY
+ * command information for the device.
+ */
+ else if (scsiq->done_status == QD_NO_ERROR &&
+ scsiq->cdb[0] == SCSICMD_Inquiry &&
+ scsiq->target_lun == 0)
+ {
+ AdvInquiryHandling(asc_dvc, scsiq);
+ }
+
+
+ /* Change the RISC Queue List state to free. */
+ AdvWriteByteLram(iop_base, next_done_loc + RQL_STATE, ASC_MC_QS_FREE);
+
+ /* Get the RISC Queue List forward pointer. */
+ AdvReadByteLram(iop_base, next_done_loc + RQL_FWD, completed_q);
+
+ /*
+ * Notify the driver of the completed request by passing
+ * the ADV_SCSI_REQ_Q pointer to its callback function.
+ */
+ ADV_ASSERT(asc_dvc->cur_host_qng > 0);
+ asc_dvc->cur_host_qng--;
+ scsiq->a_flag |= ADV_SCSIQ_DONE;
+ (*(ADV_ISR_CALLBACK) asc_dvc->isr_callback)(asc_dvc, scsiq);
+ /*
+ * Note: After the driver callback function is called, 'scsiq'
+ * can no longer be referenced.
+ *
+ * Fall through and continue processing other completed
+ * requests...
+ */
+
+ /*
+ * Disable interrupts again in case the driver inadvertently
+ * enabled interrupts in its callback function.
+ *
+ * The DvcEnterCritical() return value is ignored, because
+ * the 'flags' saved when AdvISR() was first entered will be
+ * used to restore the interrupt flag on exit.
+ */
+ (void) DvcEnterCritical();
+ }
+ DvcLeaveCritical(flags);
+ return ret;
+}
+
+/*
+ * Send an idle command to the chip and wait for completion.
+ *
+ * Interrupts do not have to be enabled on entry.
+ *
+ * Return Values:
+ * ADV_TRUE - command completed successfully
+ * ADV_FALSE - command failed
+ */
+STATIC int
+AdvSendIdleCmd(ADV_DVC_VAR *asc_dvc,
+ ushort idle_cmd,
+ ulong idle_cmd_parameter,
+ int flags)
+{
+ int last_int_level;
+ ulong i;
+ AdvPortAddr iop_base;
+ int ret;
+
+ asc_dvc->idle_cmd_done = 0;
+
+ last_int_level = DvcEnterCritical();
+ iop_base = asc_dvc->iop_base;
+
+ /*
+ * Write the idle command value after the idle command parameter
+ * has been written to avoid a race condition. If the order is not
+ * followed, the microcode may process the idle command before the
+ * parameters have been written to LRAM.
+ */
+ AdvWriteDWordLram(iop_base, ASC_MC_IDLE_PARA_STAT, idle_cmd_parameter);
+ AdvWriteWordLram(iop_base, ASC_MC_IDLE_CMD, idle_cmd);
+ DvcLeaveCritical(last_int_level);
+
+ /*
+ * If the 'flags' argument contains the ADV_NOWAIT flag, then
+ * return with success.
+ */
+ if (flags & ADV_NOWAIT)
+ {
+ return ADV_TRUE;
+ }
+
+ for (i = 0; i < SCSI_WAIT_10_SEC * SCSI_MS_PER_SEC; i++)
+ {
+ /*
+ * 'idle_cmd_done' is set by AdvISR().
+ */
+ if (asc_dvc->idle_cmd_done)
+ {
+ break;
+ }
+ DvcSleepMilliSecond(1);
+
+ /*
+ * If interrupts were disabled on entry to AdvSendIdleCmd(),
+ * then they will still be disabled here. Call AdvISR() to
+ * check for the idle command completion.
+ */
+ (void) AdvISR(asc_dvc);
+ }
+
+ last_int_level = DvcEnterCritical();
+
+ if (asc_dvc->idle_cmd_done == ADV_FALSE)
+ {
+ ADV_ASSERT(0); /* The idle command should never timeout. */
+ return ADV_FALSE;
+ } else
+ {
+ AdvReadWordLram(iop_base, ASC_MC_IDLE_PARA_STAT, ret);
+ return ret;
+ }
+}
+
+/*
+ * Send the SCSI request block to the adapter
+ *
+ * Each of the 255 Adv Library/Microcode RISC Lists or mailboxes has the
+ * following structure:
+ *
+ * 0: RQL_FWD - RISC list forward pointer (1 byte)
+ * 1: RQL_BWD - RISC list backward pointer (1 byte)
+ * 2: RQL_STATE - RISC list state byte - free, ready, done, aborted (1 byte)
+ * 3: RQL_TID - request target id (1 byte)
+ * 4: RQL_PHYADDR - ADV_SCSI_REQ_Q physical pointer (4 bytes)
+ *
+ * Return:
+ * ADV_SUCCESS(1) - the request is in the mailbox
+ * ADV_BUSY(0) - total request count > 253, try later
+ */
+STATIC int
+AdvSendScsiCmd(
+ ADV_DVC_VAR *asc_dvc,
+ ADV_SCSI_REQ_Q *scsiq)
+{
+ ushort next_ready_loc;
+ uchar next_ready_loc_fwd;
+ int last_int_level;
+ AdvPortAddr iop_base;
+ long req_size;
+ ulong q_phy_addr;
+
+ /*
+ * The ADV_SCSI_REQ_Q 'target_id' field should never be equal
+ * to the host adapter ID or exceed ADV_MAX_TID.
+ */
+ if (scsiq->target_id == asc_dvc->chip_scsi_id ||
+ scsiq->target_id > ADV_MAX_TID)
+ {
+ scsiq->host_status = QHSTA_M_INVALID_DEVICE;
+ scsiq->done_status = QD_WITH_ERROR;
+ return ADV_ERROR;
+ }
+
+ iop_base = asc_dvc->iop_base;
+
+ last_int_level = DvcEnterCritical();
+
+ if (asc_dvc->cur_host_qng >= asc_dvc->max_host_qng)
+ {
+ DvcLeaveCritical(last_int_level);
+ return ADV_BUSY;
+ } else
+ {
+ ADV_ASSERT(asc_dvc->cur_host_qng < ASC_MC_RISC_Q_TOTAL_CNT);
+ asc_dvc->cur_host_qng++;
+ }
+
+ /*
+ * Clear the ADV_SCSI_REQ_Q done flag.
+ */
+ scsiq->a_flag &= ~ADV_SCSIQ_DONE;
+
+ /*
+ * Save the original sense buffer length.
+ *
+ * After the request completes 'sense_len' will be set to the residual
+ * byte count of the Auto-Request Sense if a command returns CHECK
+ * CONDITION and the Sense Data is valid indicated by 'host_status' not
+ * being set to QHSTA_M_AUTO_REQ_SENSE_FAIL. To determine the valid
+ * Sense Data Length subtract 'sense_len' from 'orig_sense_len'.
+ */
+ scsiq->orig_sense_len = scsiq->sense_len;
+
+ AdvReadByteLram(iop_base, ASC_MC_HOST_NEXT_READY, next_ready_loc);
+ next_ready_loc = ASC_MC_RISC_Q_LIST_BASE +
+ (next_ready_loc * ASC_MC_RISC_Q_LIST_SIZE);
+
+ /*
+ * Write the physical address of the Q to the mailbox.
+ * We need to skip the first four bytes, because the microcode
+ * uses them internally for linking Q's together.
+ */
+ req_size = sizeof(ADV_SCSI_REQ_Q);
+ q_phy_addr = DvcGetPhyAddr(asc_dvc, scsiq,
+ (uchar *) scsiq, &req_size,
+ ADV_IS_SCSIQ_FLAG);
+ ADV_ASSERT(ADV_DWALIGN(q_phy_addr) == q_phy_addr);
+ ADV_ASSERT(req_size >= sizeof(ADV_SCSI_REQ_Q));
+
+ scsiq->scsiq_ptr = (ADV_SCSI_REQ_Q *) scsiq;
+
+ /*
+ * The RISC list structure, which 'next_ready_loc' is a pointer
+ * to in microcode LRAM, has the format detailed in the comment
+ * header for this function.
+ *
+ * Write the ADV_SCSI_REQ_Q physical pointer to 'next_ready_loc' request.
+ */
+ AdvWriteDWordLram(iop_base, next_ready_loc + RQL_PHYADDR, q_phy_addr);
+
+ /* Write target_id to 'next_ready_loc' request. */
+ AdvWriteByteLram(iop_base, next_ready_loc + RQL_TID, scsiq->target_id);
+
+ /*
+ * Set the ASC_MC_HOST_NEXT_READY (0x128) microcode variable to
+ * the 'next_ready_loc' request forward pointer.
+ *
+ * Do this *before* changing the 'next_ready_loc' queue to QS_READY.
+ * After the state is changed to QS_READY 'RQL_FWD' will be changed
+ * by the microcode.
+ *
+ * NOTE: The temporary variable 'next_ready_loc_fwd' is required to
+ * prevent some compilers from optimizing out 'AdvReadByteLram()' if
+ * it were used as the 3rd argument to 'AdvWriteByteLram()'.
+ */
+ AdvReadByteLram(iop_base, next_ready_loc + RQL_FWD, next_ready_loc_fwd);
+ AdvWriteByteLram(iop_base, ASC_MC_HOST_NEXT_READY, next_ready_loc_fwd);
+
+ /*
+ * Change the state of 'next_ready_loc' request from QS_FREE to
+ * QS_READY which will cause the microcode to pick it up and
+ * execute it.
+ *
+ * Can't reference 'next_ready_loc' after changing the request
+ * state to QS_READY. The microcode now owns the request.
+ */
+ AdvWriteByteLram(iop_base, next_ready_loc + RQL_STATE, ASC_MC_QS_READY);
+
+ DvcLeaveCritical(last_int_level);
+ return ADV_SUCCESS;
+}
+
+/*
+ * Inquiry Information Byte 7 Handling
+ *
+ * Handle SCSI Inquiry Command information for a device by setting
+ * microcode operating variables that affect WDTR, SDTR, and Tag
+ * Queuing.
+ */
+STATIC void
+AdvInquiryHandling(
+ ADV_DVC_VAR *asc_dvc,
+ ADV_SCSI_REQ_Q *scsiq)
+{
+ AdvPortAddr iop_base;
+ uchar tid;
+ ASC_SCSI_INQUIRY *inq;
+ ushort tidmask;
+ ushort cfg_word;
+
+ /*
+ * AdvInquiryHandling() requires up to INQUIRY information Byte 7
+ * to be available.
+ *
+ * If less than 8 bytes of INQUIRY information were requested or less
+ * than 8 bytes were transferred, then return. cdb[4] is the request
+ * length and the ADV_SCSI_REQ_Q 'data_cnt' field is set by the
+ * microcode to the transfer residual count.
+ */
+ if (scsiq->cdb[4] < 8 || (scsiq->cdb[4] - scsiq->data_cnt) < 8)
+ {
+ return;
+ }
+
+ iop_base = asc_dvc->iop_base;
+ tid = scsiq->target_id;
+ inq = (ASC_SCSI_INQUIRY *) scsiq->vdata_addr;
+
+ /*
+ * WDTR, SDTR, and Tag Queuing cannot be enabled for old devices.
+ */
+ if (inq->byte3.rsp_data_fmt < 2 && inq->byte2.ansi_apr_ver < 2)
+ {
+ return;
+ } else
+ {
+ /*
+ * INQUIRY Byte 7 Handling
+ *
+ * Use a device's INQUIRY byte 7 to determine whether it
+ * supports WDTR, SDTR, and Tag Queuing. If the feature
+ * is enabled in the EEPROM and the device supports the
+ * feature, then enable it in the microcode.
+ */
+
+ tidmask = ADV_TID_TO_TIDMASK(tid);
+
+ /*
+ * Wide Transfers
+ *
+ * If the EEPROM enabled WDTR for the device and the device
+ * supports wide bus (16 bit) transfers, then turn on the
+ * device's 'wdtr_able' bit and write the new value to the
+ * microcode.
+ */
+ if ((asc_dvc->wdtr_able & tidmask) && inq->byte7.WBus16)
+ {
+ AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, cfg_word);
+ if ((cfg_word & tidmask) == 0)
+ {
+ cfg_word |= tidmask;
+ AdvWriteWordLram(iop_base, ASC_MC_WDTR_ABLE, cfg_word);
+
+ /*
+ * Clear the microcode "WDTR negotiation" done indicator
+ * for the target to cause it to negotiate with the new
+ * setting set above.
+ */
+ AdvReadWordLram(iop_base, ASC_MC_WDTR_DONE, cfg_word);
+ cfg_word &= ~tidmask;
+ AdvWriteWordLram(iop_base, ASC_MC_WDTR_DONE, cfg_word);
+ }
+ }
+
+ /*
+ * Synchronous Transfers
+ *
+ * If the EEPROM enabled SDTR for the device and the device
+ * supports synchronous transfers, then turn on the device's
+ * 'sdtr_able' bit. Write the new value to the microcode.
+ */
+ if ((asc_dvc->sdtr_able & tidmask) && inq->byte7.Sync)
+ {
+ AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, cfg_word);
+ if ((cfg_word & tidmask) == 0)
+ {
+ cfg_word |= tidmask;
+ AdvWriteWordLram(iop_base, ASC_MC_SDTR_ABLE, cfg_word);
+
+ /*
+ * Clear the microcode "SDTR negotiation" done indicator
+ * for the target to cause it to negotiate with the new
+ * setting set above.
+ */
+ AdvReadWordLram(iop_base, ASC_MC_SDTR_DONE, cfg_word);
+ cfg_word &= ~tidmask;
+ AdvWriteWordLram(iop_base, ASC_MC_SDTR_DONE, cfg_word);
+ }
+ }
+
+ /*
+ * If the EEPROM enabled Tag Queuing for device and the
+ * device supports Tag Queuing, then turn on the device's
+ * 'tagqng_enable' bit in the microcode and set the microcode
+ * maximum command count to the ADV_DVC_VAR 'max_dvc_qng'
+ * value.
+ *
+ * Tag Queuing is disabled for the BIOS which runs in polled
+ * mode and would see no benefit from Tag Queuing. Also by
+ * disabling Tag Queuing in the BIOS devices with Tag Queuing
+ * bugs will at least work with the BIOS.
+ */
+ if ((asc_dvc->tagqng_able & tidmask) && inq->byte7.CmdQue)
+ {
+ AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, cfg_word);
+ cfg_word |= tidmask;
+ AdvWriteWordLram(iop_base, ASC_MC_TAGQNG_ABLE, cfg_word);
+ AdvWriteByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + tid,
+ asc_dvc->max_dvc_qng);
+ }
+ }
+}
diff --git a/linux/src/drivers/scsi/advansys.h b/linux/src/drivers/scsi/advansys.h
new file mode 100644
index 0000000..72e8aef
--- /dev/null
+++ b/linux/src/drivers/scsi/advansys.h
@@ -0,0 +1,174 @@
+/* $Id: advansys.h,v 1.1 1999/04/26 05:54:08 tb Exp $ */
+
+/*
+ * advansys.h - Linux Host Driver for AdvanSys SCSI Adapters
+ *
+ * Copyright (c) 1995-1998 Advanced System Products, Inc.
+ * All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that redistributions of source
+ * code retain the above copyright notice and this comment without
+ * modification.
+ *
+ * There is an AdvanSys Linux WWW page at:
+ * http://www.advansys.com/linux.html
+ *
+ * The latest version of the AdvanSys driver is available at:
+ * ftp://ftp.advansys.com/pub/linux
+ *
+ * Please send questions, comments, bug reports to:
+ * bobf@advansys.com (Bob Frey)
+ */
+
+#ifndef _ADVANSYS_H
+#define _ADVANSYS_H
+
+/* Convert Linux Version, Patch-level, Sub-level to LINUX_VERSION_CODE. */
+#define ASC_LINUX_VERSION(V, P, S) (((V) * 65536) + ((P) * 256) + (S))
+
+#ifndef LINUX_VERSION_CODE
+#include <linux/version.h>
+#endif /* LINUX_VERSION_CODE */
+
+/*
+ * Scsi_Host_Template function prototypes.
+ */
+int advansys_detect(Scsi_Host_Template *);
+int advansys_release(struct Scsi_Host *);
+const char *advansys_info(struct Scsi_Host *);
+int advansys_command(Scsi_Cmnd *);
+int advansys_queuecommand(Scsi_Cmnd *, void (* done)(Scsi_Cmnd *));
+int advansys_abort(Scsi_Cmnd *);
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,89)
+int advansys_reset(Scsi_Cmnd *);
+#else /* version >= v1.3.89 */
+int advansys_reset(Scsi_Cmnd *, unsigned int);
+#endif /* version >= v1.3.89 */
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,0)
+int advansys_biosparam(Disk *, int, int[]);
+#else /* version >= v1.3.0 */
+int advansys_biosparam(Disk *, kdev_t, int[]);
+extern struct proc_dir_entry proc_scsi_advansys;
+int advansys_proc_info(char *, char **, off_t, int, int, int);
+#endif /* version >= v1.3.0 */
+
+/* init/main.c setup function */
+void advansys_setup(char *, int *);
+
+/*
+ * AdvanSys Host Driver Scsi_Host_Template (struct SHT) from hosts.h.
+ */
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,0)
+#define ADVANSYS { \
+ NULL, /* struct SHT *next */ \
+ NULL, /* int *usage_count */ \
+ "advansys", /* char *name */ \
+ advansys_detect, /* int (*detect)(struct SHT *) */ \
+ advansys_release, /* int (*release)(struct Scsi_Host *) */ \
+ advansys_info, /* const char *(*info)(struct Scsi_Host *) */ \
+ advansys_command, /* int (*command)(Scsi_Cmnd *) */ \
+ advansys_queuecommand, \
+ /* int (*queuecommand)(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)) */ \
+ advansys_abort, /* int (*abort)(Scsi_Cmnd *) */ \
+ advansys_reset, /* int (*reset)(Scsi_Cmnd *) */ \
+ NULL, /* int (*slave_attach)(int, int) */ \
+ advansys_biosparam, /* int (* bios_param)(Disk *, int, int []) */ \
+ /* \
+ * The following fields are set per adapter in advansys_detect(). \
+ */ \
+ 0, /* int can_queue */ \
+ 0, /* int this_id */ \
+ 0, /* short unsigned int sg_tablesize */ \
+ 0, /* short cmd_per_lun */ \
+ 0, /* unsigned char present */ \
+ /* \
+ * Because the driver may control an ISA adapter 'unchecked_isa_dma' \
+ * must be set. The flag will be cleared in advansys_detect for non-ISA \
+ * adapters. Refer to the comment in scsi_module.c for more information. \
+ */ \
+ 1, /* unsigned unchecked_isa_dma:1 */ \
+ /* \
+ * All adapters controlled by this driver are capable of large \
+ * scatter-gather lists. According to the mid-level SCSI documentation \
+ * this obviates any performance gain provided by setting \
+ * 'use_clustering'. But empirically while CPU utilization is increased \
+ * by enabling clustering, I/O throughput increases as well. \
+ */ \
+ ENABLE_CLUSTERING, /* unsigned use_clustering:1 */ \
+}
+#elif LINUX_VERSION_CODE < ASC_LINUX_VERSION(2,1,75)
+#define ADVANSYS { \
+ NULL, /* struct SHT *next */ \
+ NULL, \
+ /* version < v2.1.23 long *usage_count */ \
+ /* version >= v2.1.23 struct module * */ \
+ &proc_scsi_advansys, /* struct proc_dir_entry *proc_dir */ \
+ advansys_proc_info, \
+ /* int (*proc_info)(char *, char **, off_t, int, int, int) */ \
+ "advansys", /* const char *name */ \
+ advansys_detect, /* int (*detect)(struct SHT *) */ \
+ advansys_release, /* int (*release)(struct Scsi_Host *) */ \
+ advansys_info, /* const char *(*info)(struct Scsi_Host *) */ \
+ advansys_command, /* int (*command)(Scsi_Cmnd *) */ \
+ advansys_queuecommand, \
+ /* int (*queuecommand)(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)) */ \
+ advansys_abort, /* int (*abort)(Scsi_Cmnd *) */ \
+ advansys_reset, \
+ /* version < v1.3.89 int (*reset)(Scsi_Cmnd *) */ \
+ /* version >= v1.3.89 int (*reset)(Scsi_Cmnd *, unsigned int) */ \
+ NULL, /* int (*slave_attach)(int, int) */ \
+ advansys_biosparam, /* int (* bios_param)(Disk *, kdev_t, int []) */ \
+ /* \
+ * The following fields are set per adapter in advansys_detect(). \
+ */ \
+ 0, /* int can_queue */ \
+ 0, /* int this_id */ \
+ 0, /* short unsigned int sg_tablesize */ \
+ 0, /* short cmd_per_lun */ \
+ 0, /* unsigned char present */ \
+ /* \
+ * Because the driver may control an ISA adapter 'unchecked_isa_dma' \
+ * must be set. The flag will be cleared in advansys_detect for non-ISA \
+ * adapters. Refer to the comment in scsi_module.c for more information. \
+ */ \
+ 1, /* unsigned unchecked_isa_dma:1 */ \
+ /* \
+ * All adapters controlled by this driver are capable of large \
+ * scatter-gather lists. According to the mid-level SCSI documentation \
+ * this obviates any performance gain provided by setting \
+ * 'use_clustering'. But empirically while CPU utilization is increased \
+ * by enabling clustering, I/O throughput increases as well. \
+ */ \
+ ENABLE_CLUSTERING, /* unsigned use_clustering:1 */ \
+}
+#else /* version >= v2.1.75 */
+#define ADVANSYS { \
+ proc_dir: &proc_scsi_advansys, \
+ proc_info: advansys_proc_info, \
+ name: "advansys", \
+ detect: advansys_detect, \
+ release: advansys_release, \
+ info: advansys_info, \
+ command: advansys_command, \
+ queuecommand: advansys_queuecommand, \
+ abort: advansys_abort, \
+ reset: advansys_reset, \
+ bios_param: advansys_biosparam, \
+ /* \
+ * Because the driver may control an ISA adapter 'unchecked_isa_dma' \
+ * must be set. The flag will be cleared in advansys_detect for non-ISA \
+ * adapters. Refer to the comment in scsi_module.c for more information. \
+ */ \
+ unchecked_isa_dma: 1, \
+ /* \
+ * All adapters controlled by this driver are capable of large \
+ * scatter-gather lists. According to the mid-level SCSI documentation \
+ * this obviates any performance gain provided by setting \
+ * 'use_clustering'. But empirically while CPU utilization is increased \
+ * by enabling clustering, I/O throughput increases as well. \
+ */ \
+ use_clustering: ENABLE_CLUSTERING, \
+}
+#endif /* version >= v2.1.75 */
+#endif /* _ADVANSYS_H */
diff --git a/linux/src/drivers/scsi/aha152x.c b/linux/src/drivers/scsi/aha152x.c
new file mode 100644
index 0000000..44fe1b0
--- /dev/null
+++ b/linux/src/drivers/scsi/aha152x.c
@@ -0,0 +1,3280 @@
+/* aha152x.c -- Adaptec AHA-152x driver
+ * Author: Jürgen E. Fischer, fischer@et-inf.fho-emden.de
+ * Copyright 1993, 1994, 1995, 1996 Jürgen E. Fischer
+ *
+ *
+ * This driver is based on
+ * fdomain.c -- Future Domain TMC-16x0 driver
+ * which is
+ * Copyright 1992, 1993 Rickard E. Faith (faith@cs.unc.edu)
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ *
+ * $Id: aha152x.c,v 1.1.4.2 2007/03/27 21:04:30 tschwinge Exp $
+ *
+ * Revision 1.18 1996/09/07 20:10:40 fischer
+ * - fixed can_queue handling (multiple outstanding commands working again)
+ *
+ * Revision 1.17 1996/08/17 16:05:14 fischer
+ * - biosparam improved
+ * - interrupt verification
+ * - updated documentation
+ * - cleanups
+ *
+ * Revision 1.16 1996/06/09 00:04:56 root
+ * - added configuration symbols for insmod (aha152x/aha152x1)
+ *
+ * Revision 1.15 1996/04/30 14:52:06 fischer
+ * - proc info fixed
+ * - support for extended translation for >1GB disks
+ *
+ * Revision 1.14 1996/01/17 15:11:20 fischer
+ * - fixed lockup in MESSAGE IN phase after reconnection
+ *
+ * Revision 1.13 1996/01/09 02:15:53 fischer
+ * - some cleanups
+ * - moved request_irq behind controller initialization
+ * (to avoid spurious interrupts)
+ *
+ * Revision 1.12 1995/12/16 12:26:07 fischer
+ * - barrier()s added
+ * - configurable RESET delay added
+ *
+ * Revision 1.11 1995/12/06 21:18:35 fischer
+ * - some minor updates
+ *
+ * Revision 1.10 1995/07/22 19:18:45 fischer
+ * - support for 2 controllers
+ * - started synchronous data transfers (not working yet)
+ *
+ * Revision 1.9 1995/03/18 09:20:24 root
+ * - patches for PCMCIA and modules
+ *
+ * Revision 1.8 1995/01/21 22:07:19 root
+ * - snarf_region => request_region
+ * - aha152x_intr interface change
+ *
+ * Revision 1.7 1995/01/02 23:19:36 root
+ * - updated COMMAND_SIZE to cmd_len
+ * - changed sti() to restore_flags()
+ * - fixed some #ifdef which generated warnings
+ *
+ * Revision 1.6 1994/11/24 20:35:27 root
+ * - problem with odd number of bytes in fifo fixed
+ *
+ * Revision 1.5 1994/10/30 14:39:56 root
+ * - abort code fixed
+ * - debugging improved
+ *
+ * Revision 1.4 1994/09/12 11:33:01 root
+ * - irqaction to request_irq
+ * - abortion updated
+ *
+ * Revision 1.3 1994/08/04 13:53:05 root
+ * - updates for mid-level-driver changes
+ * - accept unexpected BUSFREE phase as error condition
+ * - parity check now configurable
+ *
+ * Revision 1.2 1994/07/03 12:56:36 root
+ * - cleaned up debugging code
+ * - more tweaking on reset delays
+ * - updated abort/reset code (pretty untested...)
+ *
+ * Revision 1.1 1994/05/28 21:18:49 root
+ * - update for mid-level interface change (abort-reset)
+ * - delays after resets adjusted for some slow devices
+ *
+ * Revision 1.0 1994/03/25 12:52:00 root
+ * - Fixed "more data than expected" problem
+ * - added new BIOS signatures
+ *
+ * Revision 0.102 1994/01/31 20:44:12 root
+ * - minor changes in insw/outsw handling
+ *
+ * Revision 0.101 1993/12/13 01:16:27 root
+ * - fixed STATUS phase (non-GOOD stati were dropped sometimes;
+ * fixes problems with CD-ROM sector size detection & media change)
+ *
+ * Revision 0.100 1993/12/10 16:58:47 root
+ * - fix for unsuccessful selections in case of non-continuous id assignments
+ * on the scsi bus.
+ *
+ * Revision 0.99 1993/10/24 16:19:59 root
+ * - fixed DATA IN (rare read errors gone)
+ *
+ * Revision 0.98 1993/10/17 12:54:44 root
+ * - fixed some recent fixes (shame on me)
+ * - moved initialization of scratch area to aha152x_queue
+ *
+ * Revision 0.97 1993/10/09 18:53:53 root
+ * - DATA IN fixed. Rarely left data in the fifo.
+ *
+ * Revision 0.96 1993/10/03 00:53:59 root
+ * - minor changes on DATA IN
+ *
+ * Revision 0.95 1993/09/24 10:36:01 root
+ * - change handling of MSGI after reselection
+ * - fixed sti/cli
+ * - minor changes
+ *
+ * Revision 0.94 1993/09/18 14:08:22 root
+ * - fixed bug in multiple outstanding command code
+ * - changed detection
+ * - support for kernel command line configuration
+ * - reset corrected
+ * - changed message handling
+ *
+ * Revision 0.93 1993/09/15 20:41:19 root
+ * - fixed bugs with multiple outstanding commands
+ *
+ * Revision 0.92 1993/09/13 02:46:33 root
+ * - multiple outstanding commands work (no problems with IBM drive)
+ *
+ * Revision 0.91 1993/09/12 20:51:46 root
+ * added multiple outstanding commands
+ * (some problem with this $%&? IBM device remain)
+ *
+ * Revision 0.9 1993/09/12 11:11:22 root
+ * - corrected auto-configuration
+ * - changed the auto-configuration (added some '#define's)
+ * - added support for dis-/reconnection
+ *
+ * Revision 0.8 1993/09/06 23:09:39 root
+ * - added support for the drive activity light
+ * - minor changes
+ *
+ * Revision 0.7 1993/09/05 14:30:15 root
+ * - improved phase detection
+ * - now using the new snarf_region code of 0.99pl13
+ *
+ * Revision 0.6 1993/09/02 11:01:38 root
+ * first public release; added some signatures and biosparam()
+ *
+ * Revision 0.5 1993/08/30 10:23:30 root
+ * fixed timing problems with my IBM drive
+ *
+ * Revision 0.4 1993/08/29 14:06:52 root
+ * fixed some problems with timeouts due incomplete commands
+ *
+ * Revision 0.3 1993/08/28 15:55:03 root
+ * writing data works too. mounted and worked on a dos partition
+ *
+ * Revision 0.2 1993/08/27 22:42:07 root
+ * reading data works. Mounted a msdos partition.
+ *
+ * Revision 0.1 1993/08/25 13:38:30 root
+ * first "damn thing doesn't work" version
+ *
+ * Revision 0.0 1993/08/14 19:54:25 root
+ * empty function bodies; detect() works.
+ *
+ *
+ **************************************************************************
+
+
+
+ DESCRIPTION:
+
+ This is the Linux low-level SCSI driver for Adaptec AHA-1520/1522 SCSI
+ host adapters.
+
+
+ CONFIGURATION ARGUMENTS:
+
+ IOPORT base io address (0x340/0x140)
+ IRQ interrupt level (9-12; default 11)
+ SCSI_ID scsi id of controller (0-7; default 7)
+ RECONNECT allow targets to disconnect from the bus (0/1; default 1 [on])
+ PARITY enable parity checking (0/1; default 1 [on])
+ SYNCHRONOUS enable synchronous transfers (0/1; default 0 [off])
+ (NOT WORKING YET)
+ DELAY: bus reset delay (default 100)
+ EXT_TRANS: enable extended translation (0/1: default 0 [off])
+ (see NOTES below)
+
+ COMPILE TIME CONFIGURATION (put into AHA152X in drivers/scsi/Makefile):
+
+ -DAUTOCONF
+ use configuration the controller reports (AHA-152x only)
+
+ -DSKIP_BIOSTEST
+ Don't test for BIOS signature (AHA-1510 or disabled BIOS)
+
+ -DSETUP0="{ IOPORT, IRQ, SCSI_ID, RECONNECT, PARITY, SYNCHRONOUS, DELAY, EXT_TRANS }"
+ override for the first controller
+
+ -DSETUP1="{ IOPORT, IRQ, SCSI_ID, RECONNECT, PARITY, SYNCHRONOUS, DELAY, EXT_TRANS }"
+ override for the second controller
+
+
+ LILO COMMAND LINE OPTIONS:
+
+ aha152x=<IOPORT>[,<IRQ>[,<SCSI-ID>[,<RECONNECT>[,<PARITY>[,<SYNCHRONOUS>[,<DELAY> [,<EXT_TRANS]]]]]]]
+
+ The normal configuration can be overridden by specifying a command line.
+ When you do this, the BIOS test is skipped. Entered values have to be
+ valid (known). Don't use values that aren't supported under normal
+ operation. If you think that you need other values: contact me.
+ For two controllers use the aha152x statement twice.
+
+
+ SYMBOLS FOR MODULE CONFIGURATION:
+
+ aha152x=IOPORT,IRQ,SCSI_ID,RECONNECT,PARITY,SYNCHRONOUS,DELAY,EXT_TRANS
+ configuration override of first controller
+
+
+ aha152x1=IOPORT,IRQ,SCSI_ID,RECONNECT,PARITY,SYNCHRONOUS,DELAY,EXT_TRANS
+ configuration override of second controller
+
+
+ NOTES ON EXT_TRANS:
+
+ SCSI uses block numbers to address blocks/sectors on a device.
+ The BIOS uses a cylinder/head/sector addressing scheme (C/H/S)
+ scheme instead. DOS expects a BIOS or driver that understands this
+ C/H/S addressing.
+
+ The number of cylinders/heads/sectors is called geometry and is required
+ as base for requests in C/H/S adressing. SCSI only knows about the
+ total capacity of disks in blocks (sectors).
+
+ Therefore the SCSI BIOS/DOS driver has to calculate a logical/virtual
+ geometry just to be able to support that addressing scheme. The geometry
+ returned by the SCSI BIOS is a pure calculation and has nothing to
+ do with the real/physical geometry of the disk (which is usually
+ irrelevant anyway).
+
+ Basically this has no impact at all on Linux, because it also uses block
+ instead of C/H/S addressing. Unfortunately C/H/S addressing is also used
+ in the partition table and therefore every operating system has to know
+ the right geometry to be able to interpret it.
+
+ Moreover there are certain limitations to the C/H/S addressing scheme,
+ namely the address space is limited to upto 255 heads, upto 63 sectors
+ and a maximum of 1023 cylinders.
+
+ The AHA-1522 BIOS calculates the geometry by fixing the number of heads
+ to 64, the number of sectors to 32 and by calculating the number of
+ cylinders by dividing the capacity reported by the disk by 64*32 (1 MB).
+ This is considered to be the default translation.
+
+ With respect to the limit of 1023 cylinders using C/H/S you can only
+ address the first GB of your disk in the partition table. Therefore
+ BIOSes of some newer controllers based on the AIC-6260/6360 support
+ extended translation. This means that the BIOS uses 255 for heads,
+ 63 for sectors and then divides the capacity of the disk by 255*63
+ (about 8 MB), as soon it sees a disk greater than 1 GB. That results
+ in a maximum of about 8 GB adressable diskspace in the partition table
+ (but there are already bigger disks out there today).
+
+ To make it even more complicated the translation mode might/might
+ not be configurable in certain BIOS setups.
+
+ This driver does some more or less failsafe guessing to get the
+ geometry right in most cases:
+
+ - for disks<1GB: use default translation (C/32/64)
+ - for disks>1GB:
+ - take current geometry from the partition table
+ (using scsicam_bios_param and accept only `valid' geometries,
+ ie. either (C/32/64) or (C/63/255)). This can be extended
+ translation even if it's not enabled in the driver.
+ - if that fails, take extended translation if enabled by override,
+ kernel or module parameter, otherwise take default translation and
+ ask the user for verification. This might on not yet partitioned
+ disks or
+
+
+ REFERENCES USED:
+
+ "AIC-6260 SCSI Chip Specification", Adaptec Corporation.
+
+ "SCSI COMPUTER SYSTEM INTERFACE - 2 (SCSI-2)", X3T9.2/86-109 rev. 10h
+
+ "Writing a SCSI device driver for Linux", Rik Faith (faith@cs.unc.edu)
+
+ "Kernel Hacker's Guide", Michael K. Johnson (johnsonm@sunsite.unc.edu)
+
+ "Adaptec 1520/1522 User's Guide", Adaptec Corporation.
+
+ Michael K. Johnson (johnsonm@sunsite.unc.edu)
+
+ Drew Eckhardt (drew@cs.colorado.edu)
+
+ Eric Youngdale (ericy@cais.com)
+
+ special thanks to Eric Youngdale for the free(!) supplying the
+ documentation on the chip.
+
+ **************************************************************************/
+
+#ifdef MACH
+#define AUTOCONF 1
+#endif
+
+#ifdef PCMCIA
+#define MODULE
+#endif
+
+#include <linux/module.h>
+
+#ifdef PCMCIA
+#undef MODULE
+#endif
+
+#include <linux/sched.h>
+#include <asm/io.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "sd.h"
+#include "hosts.h"
+#include "constants.h"
+#include <asm/system.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/wait.h>
+#include <linux/ioport.h>
+#include <linux/proc_fs.h>
+
+#include "aha152x.h"
+#include <linux/stat.h>
+
+#include <scsi/scsicam.h>
+
+struct proc_dir_entry proc_scsi_aha152x = {
+ PROC_SCSI_AHA152X, 7, "aha152x",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+/* DEFINES */
+
+/* For PCMCIA cards, always use AUTOCONF */
+#if defined(PCMCIA) || defined(MODULE)
+#if !defined(AUTOCONF)
+#define AUTOCONF
+#endif
+#endif
+
+#if !defined(AUTOCONF) && !defined(SETUP0)
+#error define AUTOCONF or SETUP0
+#endif
+
+#if defined(DEBUG_AHA152X)
+
+#undef SKIP_PORTS /* don't display ports */
+
+#undef DEBUG_QUEUE /* debug queue() */
+#undef DEBUG_RESET /* debug reset() */
+#undef DEBUG_INTR /* debug intr() */
+#undef DEBUG_SELECTION /* debug selection part in intr() */
+#undef DEBUG_MSGO /* debug message out phase in intr() */
+#undef DEBUG_MSGI /* debug message in phase in intr() */
+#undef DEBUG_STATUS /* debug status phase in intr() */
+#undef DEBUG_CMD /* debug command phase in intr() */
+#undef DEBUG_DATAI /* debug data in phase in intr() */
+#undef DEBUG_DATAO /* debug data out phase in intr() */
+#undef DEBUG_ABORT /* debug abort() */
+#undef DEBUG_DONE /* debug done() */
+#undef DEBUG_BIOSPARAM /* debug biosparam() */
+
+#undef DEBUG_RACE /* debug race conditions */
+#undef DEBUG_PHASES /* debug phases (useful to trace) */
+#undef DEBUG_QUEUES /* debug reselection */
+
+/* recently used for debugging */
+#if 0
+#endif
+
+#define DEBUG_SELECTION
+#define DEBUG_PHASES
+#define DEBUG_RESET
+#define DEBUG_ABORT
+
+#define DEBUG_DEFAULT (debug_reset|debug_abort)
+
+#endif
+
+/* END OF DEFINES */
+
+extern unsigned long loops_per_sec;
+
+#define DELAY_DEFAULT 100
+
+/* some additional "phases" for getphase() */
+#define P_BUSFREE 1
+#define P_PARITY 2
+
+/* possible irq range */
+#define IRQ_MIN 9
+#define IRQ_MAX 12
+#define IRQS IRQ_MAX-IRQ_MIN+1
+
+enum {
+ not_issued = 0x0001,
+ in_selection = 0x0002,
+ disconnected = 0x0004,
+ aborted = 0x0008,
+ sent_ident = 0x0010,
+ in_other = 0x0020,
+ in_sync = 0x0040,
+ sync_ok = 0x0080,
+};
+
+#if defined(MODULE)
+#if defined(DEBUG_AHA152X)
+int aha152x[] = { 0, 11, 7, 1, 1, 0, DELAY_DEFAULT, 0, DEBUG_DEFAULT };
+int aha152x1[] = { 0, 11, 7, 1, 1, 0, DELAY_DEFAULT, 0, DEBUG_DEFAULT };
+#else
+int aha152x[] = { 0, 11, 7, 1, 1, 0, DELAY_DEFAULT, 0 };
+int aha152x1[] = { 0, 11, 7, 1, 1, 0, DELAY_DEFAULT, 0 };
+#endif
+#endif
+
+/* set by aha152x_setup according to the command line */
+static int setup_count=0;
+static struct aha152x_setup {
+ int io_port;
+ int irq;
+ int scsiid;
+ int reconnect;
+ int parity;
+ int synchronous;
+ int delay;
+ int ext_trans;
+#ifdef DEBUG_AHA152X
+ int debug;
+#endif
+ char *conf;
+} setup[2];
+
+static struct Scsi_Host *aha152x_host[IRQS];
+
+#define HOSTDATA(shpnt) ((struct aha152x_hostdata *) &shpnt->hostdata)
+#define CURRENT_SC (HOSTDATA(shpnt)->current_SC)
+#define ISSUE_SC (HOSTDATA(shpnt)->issue_SC)
+#define DISCONNECTED_SC (HOSTDATA(shpnt)->disconnected_SC)
+#define DELAY (HOSTDATA(shpnt)->delay)
+#define EXT_TRANS (HOSTDATA(shpnt)->ext_trans)
+#define SYNCRATE (HOSTDATA(shpnt)->syncrate[CURRENT_SC->target])
+#define MSG(i) (HOSTDATA(shpnt)->message[i])
+#define MSGLEN (HOSTDATA(shpnt)->message_len)
+#define ADDMSG(x) (MSG(MSGLEN++)=x)
+
+struct aha152x_hostdata {
+ Scsi_Cmnd *issue_SC;
+ Scsi_Cmnd *current_SC;
+ Scsi_Cmnd *disconnected_SC;
+ int aborting;
+ int abortion_complete;
+ int abort_result;
+ int commands;
+
+ int reconnect;
+ int parity;
+ int synchronous;
+ int delay;
+ int ext_trans;
+
+ int swint;
+
+ unsigned char syncrate[8];
+
+ unsigned char message[256];
+ int message_len;
+
+#ifdef DEBUG_AHA152X
+ int debug;
+#endif
+};
+
+void aha152x_intr(int irq, void *dev_id, struct pt_regs *);
+void aha152x_done(struct Scsi_Host *shpnt, int error);
+void aha152x_setup(char *str, int *ints);
+int aha152x_checksetup(struct aha152x_setup *setup);
+
+static void aha152x_reset_ports(struct Scsi_Host *shpnt);
+static void aha152x_panic(struct Scsi_Host *shpnt, char *msg);
+
+static void disp_ports(struct Scsi_Host *shpnt);
+static void show_command(Scsi_Cmnd *ptr);
+static void show_queues(struct Scsi_Host *shpnt);
+static void disp_enintr(struct Scsi_Host *shpnt);
+
+#if defined(DEBUG_RACE)
+static void enter_driver(const char *);
+static void leave_driver(const char *);
+#endif
+
+/* possible i/o addresses for the AIC-6260 */
+static unsigned short ports[] =
+{
+ 0x340, /* default first */
+ 0x140
+};
+#define PORT_COUNT (sizeof(ports) / sizeof(unsigned short))
+
+#if !defined(SKIP_BIOSTEST)
+/* possible locations for the Adaptec BIOS */
+static void *addresses[] =
+{
+ (void *) 0xdc000, /* default first */
+ (void *) 0xc8000,
+ (void *) 0xcc000,
+ (void *) 0xd0000,
+ (void *) 0xd4000,
+ (void *) 0xd8000,
+ (void *) 0xe0000,
+ (void *) 0xeb800, /* VTech Platinum SMP */
+ (void *) 0xf0000,
+};
+#define ADDRESS_COUNT (sizeof(addresses) / sizeof(void *))
+
+/* signatures for various AIC-6[23]60 based controllers.
+ The point in detecting signatures is to avoid useless and maybe
+ harmful probes on ports. I'm not sure that all listed boards pass
+ auto-configuration. For those which fail the BIOS signature is
+ obsolete, because user intervention to supply the configuration is
+ needed anyway. May be an information whether or not the BIOS supports
+ extended translation could be also useful here. */
+static struct signature {
+ char *signature;
+ int sig_offset;
+ int sig_length;
+} signatures[] =
+{
+ { "Adaptec AHA-1520 BIOS", 0x102e, 21 }, /* Adaptec 152x */
+ { "Adaptec AHA-1520B", 0x0b, 17 }, /* Adaptec 152x rev B */
+ { "Adaptec AHA-1520B/1522B", 0x3e20, 23 }, /* Adaptec 1520B/1522B */
+ { "Adaptec ASW-B626 BIOS", 0x1029, 21 }, /* on-board controller */
+ { "Adaptec BIOS: ASW-B626", 0x0f, 22 }, /* on-board controller */
+ { "Adaptec ASW-B626 S2", 0x2e6c, 19 }, /* on-board controller */
+ { "Adaptec BIOS:AIC-6360", 0xc, 21 }, /* on-board controller */
+ { "ScsiPro SP-360 BIOS", 0x2873, 19 }, /* ScsiPro-Controller */
+ { "GA-400 LOCAL BUS SCSI BIOS", 0x102e, 26 }, /* Gigabyte Local-Bus-SCSI */
+ { "Adaptec BIOS:AVA-282X", 0xc, 21 }, /* Adaptec 282x */
+ { "Adaptec IBM Dock II SCSI", 0x2edd, 24 }, /* IBM Thinkpad Dock II */
+ { "Adaptec BIOS:AHA-1532P", 0x1c, 22 }, /* IBM Thinkpad Dock II SCSI */
+};
+#define SIGNATURE_COUNT (sizeof(signatures) / sizeof(struct signature))
+#endif
+
+
+static void do_pause(unsigned amount) /* Pause for amount*10 milliseconds */
+{
+ unsigned long the_time = jiffies + amount; /* 0.01 seconds per jiffy */
+
+ while (jiffies < the_time)
+ barrier();
+}
+
+/*
+ * queue services:
+ */
+static inline void append_SC(Scsi_Cmnd **SC, Scsi_Cmnd *new_SC)
+{
+ Scsi_Cmnd *end;
+
+ new_SC->host_scribble = (unsigned char *) NULL;
+ if(!*SC)
+ *SC=new_SC;
+ else {
+ for(end=*SC; end->host_scribble; end = (Scsi_Cmnd *) end->host_scribble)
+ ;
+ end->host_scribble = (unsigned char *) new_SC;
+ }
+}
+
+static inline Scsi_Cmnd *remove_first_SC(Scsi_Cmnd **SC)
+{
+ Scsi_Cmnd *ptr;
+
+ ptr=*SC;
+ if(ptr)
+ *SC= (Scsi_Cmnd *) (*SC)->host_scribble;
+ return ptr;
+}
+
+static inline Scsi_Cmnd *remove_SC(Scsi_Cmnd **SC, int target, int lun)
+{
+ Scsi_Cmnd *ptr, *prev;
+
+ for(ptr=*SC, prev=NULL;
+ ptr && ((ptr->target!=target) || (ptr->lun!=lun));
+ prev = ptr, ptr = (Scsi_Cmnd *) ptr->host_scribble)
+ ;
+
+ if(ptr){
+ if(prev)
+ prev->host_scribble = ptr->host_scribble;
+ else
+ *SC= (Scsi_Cmnd *) ptr->host_scribble;
+ }
+
+ return ptr;
+}
+
+/*
+ * read inbound byte and wait for ACK to get low
+ */
+static void make_acklow(struct Scsi_Host *shpnt)
+{
+ SETPORT(SXFRCTL0, CH1|SPIOEN);
+ GETPORT(SCSIDAT);
+ SETPORT(SXFRCTL0, CH1);
+
+ while(TESTHI(SCSISIG, ACKI))
+ barrier();
+}
+
+/*
+ * detect current phase more reliable:
+ * phase is valid, when the target asserts REQ after we've deasserted ACK.
+ *
+ * return value is a valid phase or an error code.
+ *
+ * errorcodes:
+ * P_BUSFREE BUS FREE phase detected
+ * P_PARITY parity error in DATA phase
+ */
+static int getphase(struct Scsi_Host *shpnt)
+{
+ int phase, sstat1;
+
+ while(1) {
+ do {
+ while(!((sstat1 = GETPORT(SSTAT1)) & (BUSFREE|SCSIRSTI|REQINIT)))
+ barrier();
+ if(sstat1 & BUSFREE)
+ return P_BUSFREE;
+ if(sstat1 & SCSIRSTI) {
+ printk("aha152x: RESET IN\n");
+ SETPORT(SSTAT1, SCSIRSTI);
+ }
+ } while(TESTHI(SCSISIG, ACKI) || TESTLO(SSTAT1, REQINIT));
+
+ SETPORT(SSTAT1, CLRSCSIPERR);
+
+ phase = GETPORT(SCSISIG) & P_MASK ;
+
+ if(TESTHI(SSTAT1, SCSIPERR)) {
+ if((phase & (CDO|MSGO))==0) /* DATA phase */
+ return P_PARITY;
+
+ make_acklow(shpnt);
+ } else
+ return phase;
+ }
+}
+
+/* called from init/main.c */
+void aha152x_setup(char *str, int *ints)
+{
+ if(setup_count>2)
+ panic("aha152x: you can only configure up to two controllers\n");
+
+ setup[setup_count].conf = str;
+ setup[setup_count].io_port = ints[0] >= 1 ? ints[1] : 0x340;
+ setup[setup_count].irq = ints[0] >= 2 ? ints[2] : 11;
+ setup[setup_count].scsiid = ints[0] >= 3 ? ints[3] : 7;
+ setup[setup_count].reconnect = ints[0] >= 4 ? ints[4] : 1;
+ setup[setup_count].parity = ints[0] >= 5 ? ints[5] : 1;
+ setup[setup_count].synchronous = ints[0] >= 6 ? ints[6] : 0 /* FIXME: 1 */;
+ setup[setup_count].delay = ints[0] >= 7 ? ints[7] : DELAY_DEFAULT;
+ setup[setup_count].ext_trans = ints[0] >= 8 ? ints[8] : 0;
+#ifdef DEBUG_AHA152X
+ setup[setup_count].debug = ints[0] >= 9 ? ints[9] : DEBUG_DEFAULT;
+ if(ints[0]>9) {
+ printk("aha152x: usage: aha152x=<IOBASE>[,<IRQ>[,<SCSI ID>"
+ "[,<RECONNECT>[,<PARITY>[,<SYNCHRONOUS>[,<DELAY>[,<EXT_TRANS>[,<DEBUG>]]]]]]]]\n");
+#else
+ if(ints[0]>8) {
+ printk("aha152x: usage: aha152x=<IOBASE>[,<IRQ>[,<SCSI ID>"
+ "[,<RECONNECT>[,<PARITY>[,<SYNCHRONOUS>[,<DELAY>[,<EXT_TRANS>]]]]]]]\n");
+#endif
+ } else
+ setup_count++;
+}
+
+/*
+ * Test, if port_base is valid.
+ */
+static int aha152x_porttest(int io_port)
+{
+ int i;
+
+ if(check_region(io_port, IO_RANGE))
+ return 0;
+
+ SETPORT(io_port+O_DMACNTRL1, 0); /* reset stack pointer */
+ for(i=0; i<16; i++)
+ SETPORT(io_port+O_STACK, i);
+
+ SETPORT(io_port+O_DMACNTRL1, 0); /* reset stack pointer */
+ for(i=0; i<16 && GETPORT(io_port+O_STACK)==i; i++)
+ ;
+
+ return(i==16);
+}
+
+int aha152x_checksetup(struct aha152x_setup *setup)
+{
+ int i;
+
+#ifndef PCMCIA
+ for(i=0; i<PORT_COUNT && (setup->io_port != ports[i]); i++)
+ ;
+
+ if(i==PORT_COUNT)
+ return 0;
+#endif
+
+ if(!aha152x_porttest(setup->io_port))
+ return 0;
+
+ if(setup->irq<IRQ_MIN && setup->irq>IRQ_MAX)
+ return 0;
+
+ if((setup->scsiid < 0) || (setup->scsiid > 7))
+ return 0;
+
+ if((setup->reconnect < 0) || (setup->reconnect > 1))
+ return 0;
+
+ if((setup->parity < 0) || (setup->parity > 1))
+ return 0;
+
+ if((setup->synchronous < 0) || (setup->synchronous > 1))
+ return 0;
+
+ if((setup->ext_trans < 0) || (setup->ext_trans > 1))
+ return 0;
+
+
+ return 1;
+}
+
+void aha152x_swintr(int irqno, void *dev_id, struct pt_regs * regs)
+{
+ struct Scsi_Host *shpnt = aha152x_host[irqno-IRQ_MIN];
+
+ if(!shpnt)
+ panic("aha152x: catched software interrupt for unknown controller.\n");
+
+ HOSTDATA(shpnt)->swint++;
+}
+
+
+int aha152x_detect(Scsi_Host_Template * tpnt)
+{
+ int i, j, ok;
+#if defined(AUTOCONF)
+ aha152x_config conf;
+#endif
+
+ tpnt->proc_dir = &proc_scsi_aha152x;
+
+ for(i=0; i<IRQS; i++)
+ aha152x_host[i] = (struct Scsi_Host *) NULL;
+
+ if(setup_count) {
+ printk("aha152x: processing commandline: ");
+
+ for(i=0; i<setup_count; i++)
+ if(!aha152x_checksetup(&setup[i])) {
+ printk("\naha152x: %s\n", setup[i].conf);
+ printk("aha152x: invalid line (controller=%d)\n", i+1);
+ }
+
+ printk("ok\n");
+ }
+
+#ifdef SETUP0
+ if(setup_count<2) {
+ struct aha152x_setup override = SETUP0;
+
+ if(setup_count==0 || (override.io_port != setup[0].io_port))
+ if(!aha152x_checksetup(&override)) {
+ printk("\naha152x: invalid override SETUP0={0x%x,%d,%d,%d,%d,%d,%d,%d}\n",
+ override.io_port,
+ override.irq,
+ override.scsiid,
+ override.reconnect,
+ override.parity,
+ override.synchronous,
+ override.delay,
+ override.ext_trans);
+ } else
+ setup[setup_count++] = override;
+ }
+#endif
+
+#ifdef SETUP1
+ if(setup_count<2) {
+ struct aha152x_setup override = SETUP1;
+
+ if(setup_count==0 || (override.io_port != setup[0].io_port))
+ if(!aha152x_checksetup(&override)) {
+ printk("\naha152x: invalid override SETUP1={0x%x,%d,%d,%d,%d,%d,%d,%d}\n",
+ override.io_port,
+ override.irq,
+ override.scsiid,
+ override.reconnect,
+ override.parity,
+ override.synchronous,
+ override.delay,
+ override.ext_trans);
+ } else
+ setup[setup_count++] = override;
+ }
+#endif
+
+#if defined(MODULE)
+ if(setup_count<2 && aha152x[0]!=0) {
+ setup[setup_count].conf = "";
+ setup[setup_count].io_port = aha152x[0];
+ setup[setup_count].irq = aha152x[1];
+ setup[setup_count].scsiid = aha152x[2];
+ setup[setup_count].reconnect = aha152x[3];
+ setup[setup_count].parity = aha152x[4];
+ setup[setup_count].synchronous = aha152x[5];
+ setup[setup_count].delay = aha152x[6];
+ setup[setup_count].ext_trans = aha152x[7];
+#ifdef DEBUG_AHA152X
+ setup[setup_count].debug = aha152x[8];
+#endif
+ if(aha152x_checksetup(&setup[setup_count]))
+ setup_count++;
+ else
+ printk("\naha152x: invalid module argument aha152x=0x%x,%d,%d,%d,%d,%d,%d,%d\n",
+ setup[setup_count].io_port,
+ setup[setup_count].irq,
+ setup[setup_count].scsiid,
+ setup[setup_count].reconnect,
+ setup[setup_count].parity,
+ setup[setup_count].synchronous,
+ setup[setup_count].delay,
+ setup[setup_count].ext_trans);
+ }
+
+ if(setup_count<2 && aha152x1[0]!=0) {
+ setup[setup_count].conf = "";
+ setup[setup_count].io_port = aha152x1[0];
+ setup[setup_count].irq = aha152x1[1];
+ setup[setup_count].scsiid = aha152x1[2];
+ setup[setup_count].reconnect = aha152x1[3];
+ setup[setup_count].parity = aha152x1[4];
+ setup[setup_count].synchronous = aha152x1[5];
+ setup[setup_count].delay = aha152x1[6];
+ setup[setup_count].ext_trans = aha152x1[7];
+#ifdef DEBUG_AHA152X
+ setup[setup_count].debug = aha152x1[8];
+#endif
+ if(aha152x_checksetup(&setup[setup_count]))
+ setup_count++;
+ else
+ printk("\naha152x: invalid module argument aha152x1=0x%x,%d,%d,%d,%d,%d,%d,%d\n",
+ setup[setup_count].io_port,
+ setup[setup_count].irq,
+ setup[setup_count].scsiid,
+ setup[setup_count].reconnect,
+ setup[setup_count].parity,
+ setup[setup_count].synchronous,
+ setup[setup_count].delay,
+ setup[setup_count].ext_trans);
+ }
+#endif
+
+#if defined(AUTOCONF)
+ if(setup_count<2) {
+#if !defined(SKIP_BIOSTEST)
+ ok=0;
+ for(i=0; i < ADDRESS_COUNT && !ok; i++)
+ for(j=0; (j < SIGNATURE_COUNT) && !ok; j++)
+ ok=!memcmp((void *) addresses[i]+signatures[j].sig_offset,
+ (void *) signatures[j].signature,
+ (int) signatures[j].sig_length);
+
+ if(!ok && setup_count==0)
+ return 0;
+
+ printk("aha152x: BIOS test: passed, ");
+#else
+ printk("aha152x: ");
+#endif /* !SKIP_BIOSTEST */
+
+ ok=0;
+ for(i=0; i<PORT_COUNT && setup_count<2; i++) {
+ if((setup_count==1) && (setup[0].io_port == ports[i]))
+ continue;
+
+ if(aha152x_porttest(ports[i])) {
+ ok++;
+ setup[setup_count].io_port = ports[i];
+
+ conf.cf_port =
+ (GETPORT(ports[i]+O_PORTA)<<8) + GETPORT(ports[i]+O_PORTB);
+
+ setup[setup_count].irq = IRQ_MIN + conf.cf_irq;
+ setup[setup_count].scsiid = conf.cf_id;
+ setup[setup_count].reconnect = conf.cf_tardisc;
+ setup[setup_count].parity = !conf.cf_parity;
+ setup[setup_count].synchronous = 0 /* FIXME: conf.cf_syncneg */;
+ setup[setup_count].delay = DELAY_DEFAULT;
+ setup[setup_count].ext_trans = 0;
+#ifdef DEBUG_AHA152X
+ setup[setup_count].debug = DEBUG_DEFAULT;
+#endif
+ setup_count++;
+ }
+ }
+
+ if(ok)
+ printk("auto configuration: ok, ");
+ }
+#endif
+
+ printk("detected %d controller(s)\n", setup_count);
+
+ for(i=0; i<setup_count; i++) {
+ struct Scsi_Host *shpnt;
+ unsigned long int the_time;
+
+ shpnt = aha152x_host[setup[i].irq-IRQ_MIN] =
+ scsi_register(tpnt, sizeof(struct aha152x_hostdata));
+
+ shpnt->io_port = setup[i].io_port;
+ shpnt->n_io_port = IO_RANGE;
+ shpnt->irq = setup[i].irq;
+
+ ISSUE_SC = (Scsi_Cmnd *) NULL;
+ CURRENT_SC = (Scsi_Cmnd *) NULL;
+ DISCONNECTED_SC = (Scsi_Cmnd *) NULL;
+
+ HOSTDATA(shpnt)->reconnect = setup[i].reconnect;
+ HOSTDATA(shpnt)->parity = setup[i].parity;
+ HOSTDATA(shpnt)->synchronous = setup[i].synchronous;
+ HOSTDATA(shpnt)->delay = setup[i].delay;
+ HOSTDATA(shpnt)->ext_trans = setup[i].ext_trans;
+#ifdef DEBUG_AHA152X
+ HOSTDATA(shpnt)->debug = setup[i].debug;
+#endif
+
+ HOSTDATA(shpnt)->aborting = 0;
+ HOSTDATA(shpnt)->abortion_complete = 0;
+ HOSTDATA(shpnt)->abort_result = 0;
+ HOSTDATA(shpnt)->commands = 0;
+
+ HOSTDATA(shpnt)->message_len = 0;
+
+ for(j=0; j<8; j++)
+ HOSTDATA(shpnt)->syncrate[j] = 0;
+
+ SETPORT(SCSIID, setup[i].scsiid << 4);
+ shpnt->this_id=setup[i].scsiid;
+
+ if(setup[i].reconnect)
+ shpnt->can_queue=AHA152X_MAXQUEUE;
+
+ /* RESET OUT */
+ SETBITS(SCSISEQ, SCSIRSTO);
+ do_pause(30);
+ CLRBITS(SCSISEQ, SCSIRSTO);
+ do_pause(setup[i].delay);
+
+ aha152x_reset_ports(shpnt);
+
+ printk("aha152x%d: vital data: PORTBASE=0x%03x, IRQ=%d, SCSI ID=%d,"
+ " reconnect=%s, parity=%s, synchronous=%s, delay=%d, extended translation=%s\n",
+ i,
+ shpnt->io_port,
+ shpnt->irq,
+ shpnt->this_id,
+ HOSTDATA(shpnt)->reconnect ? "enabled" : "disabled",
+ HOSTDATA(shpnt)->parity ? "enabled" : "disabled",
+ HOSTDATA(shpnt)->synchronous ? "enabled" : "disabled",
+ HOSTDATA(shpnt)->delay,
+ HOSTDATA(shpnt)->ext_trans ? "enabled" : "disabled");
+
+ request_region(shpnt->io_port, IO_RANGE, "aha152x"); /* Register */
+
+ /* not expecting any interrupts */
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, 0);
+
+ SETBITS(DMACNTRL0, INTEN);
+
+ ok = request_irq(shpnt->irq, aha152x_swintr, SA_INTERRUPT, "aha152x", NULL);
+ if(ok<0) {
+ if(ok == -EINVAL)
+ printk("aha152x%d: bad IRQ %d.\n", i, shpnt->irq);
+ else if(ok == -EBUSY)
+ printk("aha152x%d: IRQ %d already in use.\n", i, shpnt->irq);
+ else
+ printk("\naha152x%d: Unexpected error code %d on requesting IRQ %d.\n", i, ok, shpnt->irq);
+ printk("aha152x: driver needs an IRQ.\n");
+
+ scsi_unregister(shpnt);
+ shpnt=aha152x_host[shpnt->irq-IRQ_MIN]=0;
+ continue;
+ }
+
+ HOSTDATA(shpnt)->swint=0;
+
+ printk("aha152x: trying software interrupt, ");
+ SETBITS(DMACNTRL0, SWINT);
+
+ the_time=jiffies+100;
+ while(!HOSTDATA(shpnt)->swint && jiffies<the_time)
+ barrier();
+
+ free_irq(shpnt->irq,0);
+
+ if(!HOSTDATA(shpnt)->swint) {
+ if(TESTHI(DMASTAT, INTSTAT)) {
+ printk("lost.\n");
+ } else {
+ printk("failed.\n");
+ }
+
+ printk("aha152x: IRQ %d possibly wrong. Please verify.\n", shpnt->irq);
+
+ scsi_unregister(shpnt);
+ shpnt=aha152x_host[shpnt->irq-IRQ_MIN]=0;
+ continue;
+ }
+
+ printk("ok.\n");
+
+ CLRBITS(DMACNTRL0, SWINT);
+
+ /* clear interrupts */
+ SETPORT(SSTAT0, 0x7f);
+ SETPORT(SSTAT1, 0xef);
+
+ if(request_irq(shpnt->irq,aha152x_intr,SA_INTERRUPT,"aha152x",NULL)<0) {
+ printk("aha152x: failed to reassign interrupt.\n");
+ }
+ }
+
+ return (setup_count>0);
+}
+
+/*
+ * Queue a command and setup interrupts for a free bus.
+ */
+int aha152x_queue(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
+{
+ struct Scsi_Host *shpnt = SCpnt->host;
+ unsigned long flags;
+
+#if defined(DEBUG_RACE)
+ enter_driver("queue");
+#else
+#if defined(DEBUG_QUEUE)
+ if(HOSTDATA(shpnt)->debug & debug_queue)
+ printk("aha152x: queue(), ");
+#endif
+#endif
+
+#if defined(DEBUG_QUEUE)
+ if(HOSTDATA(shpnt)->debug & debug_queue) {
+ printk("SCpnt (target = %d lun = %d cmnd = ",
+ SCpnt->target, SCpnt->lun);
+ print_command(SCpnt->cmnd);
+ printk(", cmd_len=%d, pieces = %d size = %u), ",
+ SCpnt->cmd_len, SCpnt->use_sg, SCpnt->request_bufflen);
+ disp_ports(shpnt);
+ }
+#endif
+
+ SCpnt->scsi_done = done;
+
+ /* setup scratch area
+ SCp.ptr : buffer pointer
+ SCp.this_residual : buffer length
+ SCp.buffer : next buffer
+ SCp.buffers_residual : left buffers in list
+ SCp.phase : current state of the command */
+ SCpnt->SCp.phase = not_issued;
+ if (SCpnt->use_sg) {
+ SCpnt->SCp.buffer = (struct scatterlist *) SCpnt->request_buffer;
+ SCpnt->SCp.ptr = SCpnt->SCp.buffer->address;
+ SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
+ SCpnt->SCp.buffers_residual = SCpnt->use_sg - 1;
+ } else {
+ SCpnt->SCp.ptr = (char *)SCpnt->request_buffer;
+ SCpnt->SCp.this_residual = SCpnt->request_bufflen;
+ SCpnt->SCp.buffer = NULL;
+ SCpnt->SCp.buffers_residual = 0;
+ }
+
+ SCpnt->SCp.Status = CHECK_CONDITION;
+ SCpnt->SCp.Message = 0;
+ SCpnt->SCp.have_data_in = 0;
+ SCpnt->SCp.sent_command = 0;
+
+ /* Turn led on, when this is the first command. */
+ save_flags(flags);
+ cli();
+ HOSTDATA(shpnt)->commands++;
+ if(HOSTDATA(shpnt)->commands==1)
+ SETPORT(PORTA, 1);
+
+#if defined(DEBUG_QUEUES)
+ if(HOSTDATA(shpnt)->debug & debug_queues)
+ printk("i+ (%d), ", HOSTDATA(shpnt)->commands);
+#endif
+ append_SC(&ISSUE_SC, SCpnt);
+
+ /* Enable bus free interrupt, when we aren't currently on the bus */
+ if(!CURRENT_SC) {
+ SETPORT(SIMODE0, DISCONNECTED_SC ? ENSELDI : 0);
+ SETPORT(SIMODE1, ISSUE_SC ? ENBUSFREE : 0);
+ }
+ restore_flags(flags);
+
+#if defined(DEBUG_RACE)
+ leave_driver("queue");
+#endif
+
+ return 0;
+}
+
+/*
+ * We only support commands in interrupt-driven fashion
+ */
+int aha152x_command(Scsi_Cmnd *SCpnt)
+{
+ printk("aha152x: interrupt driven driver; use aha152x_queue()\n");
+ return -1;
+}
+
+/*
+ * Abort a queued command
+ * (commands that are on the bus can't be aborted easily)
+ */
+int aha152x_abort(Scsi_Cmnd *SCpnt)
+{
+ struct Scsi_Host *shpnt = SCpnt->host;
+ unsigned long flags;
+ Scsi_Cmnd *ptr, *prev;
+
+ save_flags(flags);
+ cli();
+
+#if defined(DEBUG_ABORT)
+ if(HOSTDATA(shpnt)->debug & debug_abort) {
+ printk("aha152x: abort(), SCpnt=0x%08x, ", (unsigned int) SCpnt);
+ show_queues(shpnt);
+ }
+#endif
+
+ /* look for command in issue queue */
+ for(ptr=ISSUE_SC, prev=NULL;
+ ptr && ptr!=SCpnt;
+ prev=ptr, ptr=(Scsi_Cmnd *) ptr->host_scribble)
+ ;
+
+ if(ptr) {
+ /* dequeue */
+ if(prev)
+ prev->host_scribble = ptr->host_scribble;
+ else
+ ISSUE_SC = (Scsi_Cmnd *) ptr->host_scribble;
+
+ HOSTDATA(shpnt)->commands--;
+
+ restore_flags(flags);
+
+ ptr->host_scribble = NULL;
+ ptr->result = DID_ABORT << 16;
+ ptr->scsi_done(ptr);
+
+ return SCSI_ABORT_SUCCESS;
+ }
+
+ /* if the bus is busy or a command is currently processed,
+ we can't do anything more */
+ if (TESTLO(SSTAT1, BUSFREE) || (CURRENT_SC && CURRENT_SC!=SCpnt)) {
+ /* fail abortion, if bus is busy */
+
+ if(!CURRENT_SC)
+ printk("bus busy w/o current command, ");
+
+ restore_flags(flags);
+
+ return SCSI_ABORT_BUSY;
+ }
+
+ /* bus is free */
+
+ if(CURRENT_SC) {
+ HOSTDATA(shpnt)->commands--;
+
+ /* target entered bus free before COMMAND COMPLETE, nothing to abort */
+ restore_flags(flags);
+ CURRENT_SC->result = DID_ERROR << 16;
+ CURRENT_SC->scsi_done(CURRENT_SC);
+ CURRENT_SC = (Scsi_Cmnd *) NULL;
+
+ return SCSI_ABORT_SUCCESS;
+ }
+
+ /* look for command in disconnected queue */
+ for(ptr=DISCONNECTED_SC, prev=NULL;
+ ptr && ptr!=SCpnt;
+ prev=ptr, ptr=(Scsi_Cmnd *) ptr->host_scribble)
+ ;
+
+ if(!ptr) {
+ /* command wasn't found */
+ printk("command not found\n");
+ restore_flags(flags);
+
+ return SCSI_ABORT_NOT_RUNNING;
+ }
+
+ if(!HOSTDATA(shpnt)->aborting) {
+ /* dequeue */
+ if(prev)
+ prev->host_scribble = ptr->host_scribble;
+ else
+ DISCONNECTED_SC = (Scsi_Cmnd *) ptr->host_scribble;
+
+ HOSTDATA(shpnt)->commands--;
+
+ /* set command current and initiate selection,
+ let the interrupt routine take care of the abortion */
+ CURRENT_SC = ptr;
+ ptr->SCp.phase = in_selection|aborted;
+ SETPORT(SCSIID, (shpnt->this_id << OID_) | CURRENT_SC->target);
+
+ ADDMSG(ABORT);
+
+ /* enable interrupts for SELECTION OUT DONE and SELECTION TIME OUT */
+ SETPORT(SIMODE0, ENSELDO | (DISCONNECTED_SC ? ENSELDI : 0));
+ SETPORT(SIMODE1, ENSELTIMO);
+
+ /* Enable SELECTION OUT sequence */
+ SETBITS(SCSISEQ, ENSELO | ENAUTOATNO);
+
+ SETBITS(DMACNTRL0, INTEN);
+ HOSTDATA(shpnt)->abort_result=SCSI_ABORT_SUCCESS;
+ HOSTDATA(shpnt)->aborting++;
+ HOSTDATA(shpnt)->abortion_complete=0;
+
+ sti(); /* Hi Eric, guess what ;-) */
+
+ /* sleep until the abortion is complete */
+ while(!HOSTDATA(shpnt)->abortion_complete)
+ barrier();
+ HOSTDATA(shpnt)->aborting=0;
+
+ return HOSTDATA(shpnt)->abort_result;
+ } else {
+ /* we're already aborting a command */
+ restore_flags(flags);
+
+ return SCSI_ABORT_BUSY;
+ }
+}
+
+/*
+ * Restore default values to the AIC-6260 registers and reset the fifos
+ */
+static void aha152x_reset_ports(struct Scsi_Host *shpnt)
+{
+ /* disable interrupts */
+ SETPORT(DMACNTRL0, RSTFIFO);
+
+ SETPORT(SCSISEQ, 0);
+
+ SETPORT(SXFRCTL1, 0);
+ SETPORT(SCSISIG, 0);
+ SETPORT(SCSIRATE, 0);
+
+ /* clear all interrupt conditions */
+ SETPORT(SSTAT0, 0x7f);
+ SETPORT(SSTAT1, 0xef);
+
+ SETPORT(SSTAT4, SYNCERR|FWERR|FRERR);
+
+ SETPORT(DMACNTRL0, 0);
+ SETPORT(DMACNTRL1, 0);
+
+ SETPORT(BRSTCNTRL, 0xf1);
+
+ /* clear SCSI fifo and transfer count */
+ SETPORT(SXFRCTL0, CH1|CLRCH1|CLRSTCNT);
+ SETPORT(SXFRCTL0, CH1);
+
+ /* enable interrupts */
+ SETPORT(SIMODE0, DISCONNECTED_SC ? ENSELDI : 0);
+ SETPORT(SIMODE1, ISSUE_SC ? ENBUSFREE : 0);
+}
+
+/*
+ * Reset registers, reset a hanging bus and
+ * kill active and disconnected commands for target w/o soft reset
+ */
+int aha152x_reset(Scsi_Cmnd *SCpnt, unsigned int unused)
+{
+ struct Scsi_Host *shpnt = SCpnt->host;
+ unsigned long flags;
+ Scsi_Cmnd *ptr, *prev, *next;
+
+ aha152x_reset_ports(shpnt);
+
+ /* Reset, if bus hangs */
+ if(TESTLO(SSTAT1, BUSFREE)) {
+ CLRBITS(DMACNTRL0, INTEN);
+
+#if defined(DEBUG_RESET)
+ if(HOSTDATA(shpnt)->debug & debug_reset) {
+ printk("aha152x: reset(), bus not free: SCSI RESET OUT\n");
+ show_queues(shpnt);
+ }
+#endif
+
+ ptr=CURRENT_SC;
+ if(ptr && !ptr->device->soft_reset) {
+ ptr->host_scribble = NULL;
+ ptr->result = DID_RESET << 16;
+ ptr->scsi_done(CURRENT_SC);
+ CURRENT_SC=NULL;
+ }
+
+ save_flags(flags);
+ cli();
+ prev=NULL; ptr=DISCONNECTED_SC;
+ while(ptr) {
+ if(!ptr->device->soft_reset) {
+ if(prev)
+ prev->host_scribble = ptr->host_scribble;
+ else
+ DISCONNECTED_SC = (Scsi_Cmnd *) ptr->host_scribble;
+
+ next = (Scsi_Cmnd *) ptr->host_scribble;
+
+ ptr->host_scribble = NULL;
+ ptr->result = DID_RESET << 16;
+ ptr->scsi_done(ptr);
+
+ ptr = next;
+ } else {
+ prev=ptr;
+ ptr = (Scsi_Cmnd *) ptr->host_scribble;
+ }
+ }
+ restore_flags(flags);
+
+#if defined(DEBUG_RESET)
+ if(HOSTDATA(shpnt)->debug & debug_reset) {
+ printk("commands on targets w/ soft-resets:\n");
+ show_queues(shpnt);
+ }
+#endif
+
+ /* RESET OUT */
+ SETPORT(SCSISEQ, SCSIRSTO);
+ do_pause(30);
+ SETPORT(SCSISEQ, 0);
+ do_pause(DELAY);
+
+ SETPORT(SIMODE0, DISCONNECTED_SC ? ENSELDI : 0);
+ SETPORT(SIMODE1, ISSUE_SC ? ENBUSFREE : 0);
+
+ SETPORT(DMACNTRL0, INTEN);
+ }
+
+ return SCSI_RESET_SUCCESS;
+}
+
+/*
+ * Return the "logical geometry"
+ */
+int aha152x_biosparam(Scsi_Disk * disk, kdev_t dev, int *info_array)
+{
+ struct Scsi_Host *shpnt=disk->device->host;
+
+#if defined(DEBUG_BIOSPARAM)
+ if(HOSTDATA(shpnt)->debug & debug_biosparam)
+ printk("aha152x_biosparam: dev=%s, size=%d, ",
+ kdevname(dev), disk->capacity);
+#endif
+
+ /* try default translation */
+ info_array[0]=64;
+ info_array[1]=32;
+ info_array[2]=disk->capacity / (64 * 32);
+
+ /* for disks >1GB do some guessing */
+ if(info_array[2]>=1024) {
+ int info[3];
+
+ /* try to figure out the geometry from the partition table */
+ if(scsicam_bios_param(disk, dev, info)<0 ||
+ !((info[0]==64 && info[1]==32) || (info[0]==255 && info[1]==63))) {
+ if(EXT_TRANS) {
+ printk("aha152x: unable to verify geometry for disk with >1GB.\n"
+ " using extended translation.\n");
+ info_array[0] = 255;
+ info_array[1] = 63;
+ info_array[2] = disk->capacity / (255 * 63);
+ } else {
+ printk("aha152x: unable to verify geometry for disk with >1GB.\n"
+ " Using default translation. Please verify yourself.\n"
+ " Perhaps you need to enable extended translation in the driver.\n"
+ " See /usr/src/linux/drivers/scsi/aha152x.c for details.\n");
+ }
+ } else {
+ info_array[0]=info[0];
+ info_array[1]=info[1];
+ info_array[2]=info[2];
+
+ if(info[0]==255 && !EXT_TRANS) {
+ printk("aha152x: current partition table is using extended translation.\n"
+ " using it also, although it's not explicty enabled.\n");
+ }
+ }
+ }
+
+#if defined(DEBUG_BIOSPARAM)
+ if(HOSTDATA(shpnt)->debug & debug_biosparam) {
+ printk("bios geometry: head=%d, sec=%d, cyl=%d\n",
+ info_array[0], info_array[1], info_array[2]);
+ printk("WARNING: check, if the bios geometry is correct.\n");
+ }
+#endif
+
+ return 0;
+}
+
+/*
+ * Internal done function
+ */
+void aha152x_done(struct Scsi_Host *shpnt, int error)
+{
+ unsigned long flags;
+ Scsi_Cmnd *done_SC;
+
+#if defined(DEBUG_DONE)
+ if(HOSTDATA(shpnt)->debug & debug_done) {
+ printk("\naha152x: done(), ");
+ disp_ports(shpnt);
+ }
+#endif
+
+ if(CURRENT_SC) {
+#if defined(DEBUG_DONE)
+ if(HOSTDATA(shpnt)->debug & debug_done)
+ printk("done(%x), ", error);
+#endif
+
+ save_flags(flags);
+ cli();
+
+ done_SC = CURRENT_SC;
+ CURRENT_SC = NULL;
+
+ /* turn led off, when no commands are in the driver */
+ HOSTDATA(shpnt)->commands--;
+ if(!HOSTDATA(shpnt)->commands)
+ SETPORT(PORTA, 0); /* turn led off */
+
+#if defined(DEBUG_QUEUES)
+ if(HOSTDATA(shpnt)->debug & debug_queues)
+ printk("ok (%d), ", HOSTDATA(shpnt)->commands);
+#endif
+ restore_flags(flags);
+
+ SETPORT(SIMODE0, DISCONNECTED_SC ? ENSELDI : 0);
+ SETPORT(SIMODE1, ISSUE_SC ? ENBUSFREE : 0);
+
+#if 0
+/* Why poll for the BUS FREE phase, when we have setup the interrupt!? */
+#if defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & debug_phases)
+ printk("BUS FREE loop, ");
+#endif
+ while(TESTLO(SSTAT1, BUSFREE))
+ barrier();
+#if defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & debug_phases)
+ printk("BUS FREE\n");
+#endif
+#endif
+
+ done_SC->result = error;
+ if(done_SC->scsi_done) {
+#if defined(DEBUG_DONE)
+ if(HOSTDATA(shpnt)->debug & debug_done)
+ printk("calling scsi_done, ");
+#endif
+ done_SC->scsi_done(done_SC);
+#if defined(DEBUG_DONE)
+ if(HOSTDATA(shpnt)->debug & debug_done)
+ printk("done returned, ");
+#endif
+ } else
+ panic("aha152x: current_SC->scsi_done() == NULL");
+ } else
+ aha152x_panic(shpnt, "done() called outside of command");
+}
+
+/*
+ * Interrupts handler (main routine of the driver)
+ */
+void aha152x_intr(int irqno, void *dev_id, struct pt_regs * regs)
+{
+ struct Scsi_Host *shpnt = aha152x_host[irqno-IRQ_MIN];
+ unsigned long flags;
+ int done=0, phase;
+
+#if defined(DEBUG_RACE)
+ enter_driver("intr");
+#else
+#if defined(DEBUG_INTR)
+ if(HOSTDATA(shpnt)->debug & debug_intr)
+ printk("\naha152x: intr(), ");
+#endif
+#endif
+
+ if(!shpnt)
+ panic("aha152x: catched interrupt for unknown controller.\n");
+
+ /* no more interrupts from the controller, while we're busy.
+ INTEN has to be restored, when we're ready to leave
+ intr(). To avoid race conditions, we have to return
+ immediately afterwards. */
+ CLRBITS(DMACNTRL0, INTEN);
+ sti(); /* Yes, sti() really needs to be here */
+
+ /* disconnected target is trying to reconnect.
+ Only possible, if we have disconnected nexuses and
+ nothing is occupying the bus.
+ */
+ if(TESTHI(SSTAT0, SELDI) &&
+ DISCONNECTED_SC &&
+ (!CURRENT_SC || (CURRENT_SC->SCp.phase & in_selection)) ) {
+ int identify_msg, target, i;
+
+ /* Avoid conflicts when a target reconnects
+ while we are trying to connect to another. */
+ if(CURRENT_SC) {
+#if defined(DEBUG_QUEUES)
+ if(HOSTDATA(shpnt)->debug & debug_queues)
+ printk("i+, ");
+#endif
+ save_flags(flags);
+ cli();
+ append_SC(&ISSUE_SC, CURRENT_SC);
+ CURRENT_SC=NULL;
+ restore_flags(flags);
+ }
+
+ /* disable sequences */
+ SETPORT(SCSISEQ, 0);
+ SETPORT(SSTAT0, CLRSELDI);
+ SETPORT(SSTAT1, CLRBUSFREE);
+
+#if defined(DEBUG_QUEUES) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_queues|debug_phases))
+ printk("reselected, ");
+#endif
+
+ i = GETPORT(SELID) & ~(1 << shpnt->this_id);
+ target=0;
+
+ if(i==0)
+ aha152x_panic(shpnt, "reconnecting target unknown");
+
+ for(; (i & 1)==0; target++, i>>=1)
+ ;
+
+#if defined(DEBUG_QUEUES)
+ if(HOSTDATA(shpnt)->debug & debug_queues)
+ printk("SELID=%02x, target=%d, ", GETPORT(SELID), target);
+#endif
+ SETPORT(SCSIID, (shpnt->this_id << OID_) | target);
+ SETPORT(SCSISEQ, ENRESELI);
+
+ if(TESTLO(SSTAT0, SELDI))
+ aha152x_panic(shpnt, "RESELI failed");
+
+ SETPORT(SCSIRATE, HOSTDATA(shpnt)->syncrate[target]&0x7f);
+
+ SETPORT(SCSISIG, P_MSGI);
+
+ /* Get identify message */
+ if((i=getphase(shpnt))!=P_MSGI) {
+ printk("target doesn't enter MSGI to identify (phase=%02x)\n", i);
+ aha152x_panic(shpnt, "unknown lun");
+ }
+ SETPORT(SCSISEQ, 0);
+
+ SETPORT(SXFRCTL0, CH1);
+
+ identify_msg = GETPORT(SCSIBUS);
+
+ if(!(identify_msg & IDENTIFY_BASE)) {
+ printk("target=%d, inbound message (%02x) != IDENTIFY\n",
+ target, identify_msg);
+ aha152x_panic(shpnt, "unknown lun");
+ }
+
+
+#if defined(DEBUG_QUEUES)
+ if(HOSTDATA(shpnt)->debug & debug_queues)
+ printk("identify=%02x, lun=%d, ", identify_msg, identify_msg & 0x3f);
+#endif
+
+ save_flags(flags);
+ cli();
+
+#if defined(DEBUG_QUEUES)
+ if(HOSTDATA(shpnt)->debug & debug_queues)
+ printk("d-, ");
+#endif
+ CURRENT_SC = remove_SC(&DISCONNECTED_SC, target, identify_msg & 0x3f);
+
+ if(!CURRENT_SC) {
+ printk("lun=%d, ", identify_msg & 0x3f);
+ aha152x_panic(shpnt, "no disconnected command for that lun");
+ }
+
+ CURRENT_SC->SCp.phase &= ~disconnected;
+ restore_flags(flags);
+
+ make_acklow(shpnt);
+ if(getphase(shpnt)!=P_MSGI) {
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, ENPHASEMIS|ENBUSFREE);
+#if defined(DEBUG_RACE)
+ leave_driver("(reselected) intr");
+#endif
+ SETBITS(DMACNTRL0, INTEN);
+ return;
+ }
+ }
+
+ /* Check, if we aren't busy with a command */
+ if(!CURRENT_SC) {
+ /* bus is free to issue a queued command */
+ if(TESTHI(SSTAT1, BUSFREE) && ISSUE_SC) {
+ save_flags(flags);
+ cli();
+#if defined(DEBUG_QUEUES)
+ if(HOSTDATA(shpnt)->debug & debug_queues)
+ printk("i-, ");
+#endif
+ CURRENT_SC = remove_first_SC(&ISSUE_SC);
+ restore_flags(flags);
+
+#if defined(DEBUG_INTR) || defined(DEBUG_SELECTION) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_intr|debug_selection|debug_phases))
+ printk("issuing command, ");
+#endif
+ CURRENT_SC->SCp.phase = in_selection;
+
+#if defined(DEBUG_INTR) || defined(DEBUG_SELECTION) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_intr|debug_selection|debug_phases))
+ printk("selecting %d, ", CURRENT_SC->target);
+#endif
+ SETPORT(SCSIID, (shpnt->this_id << OID_) | CURRENT_SC->target);
+
+ /* Enable interrupts for SELECTION OUT DONE and SELECTION OUT INITIATED */
+ SETPORT(SXFRCTL1, HOSTDATA(shpnt)->parity ? (ENSPCHK|ENSTIMER) : ENSTIMER);
+
+ /* enable interrupts for SELECTION OUT DONE and SELECTION TIME OUT */
+ SETPORT(SIMODE0, ENSELDO | (DISCONNECTED_SC ? ENSELDI : 0));
+ SETPORT(SIMODE1, ENSELTIMO);
+
+ /* Enable SELECTION OUT sequence */
+ SETBITS(SCSISEQ, ENSELO | ENAUTOATNO);
+
+ } else {
+ /* No command we are busy with and no new to issue */
+ printk("aha152x: ignoring spurious interrupt, nothing to do\n");
+ if(TESTHI(DMACNTRL0, SWINT)) {
+ printk("aha152x: SWINT is set! Why?\n");
+ CLRBITS(DMACNTRL0, SWINT);
+ }
+ show_queues(shpnt);
+ }
+
+#if defined(DEBUG_RACE)
+ leave_driver("(selecting) intr");
+#endif
+ SETBITS(DMACNTRL0, INTEN);
+ return;
+ }
+
+ /* the bus is busy with something */
+
+#if defined(DEBUG_INTR)
+ if(HOSTDATA(shpnt)->debug & debug_intr)
+ disp_ports(shpnt);
+#endif
+
+ /* we are waiting for the result of a selection attempt */
+ if(CURRENT_SC->SCp.phase & in_selection) {
+ if(TESTLO(SSTAT1, SELTO)) {
+ /* no timeout */
+ if(TESTHI(SSTAT0, SELDO)) {
+ /* clear BUS FREE interrupt */
+ SETPORT(SSTAT1, CLRBUSFREE);
+
+ /* Disable SELECTION OUT sequence */
+ CLRBITS(SCSISEQ, ENSELO|ENAUTOATNO);
+
+ /* Disable SELECTION OUT DONE interrupt */
+ CLRBITS(SIMODE0, ENSELDO);
+ CLRBITS(SIMODE1, ENSELTIMO);
+
+ if(TESTLO(SSTAT0, SELDO)) {
+ printk("aha152x: passing bus free condition\n");
+
+#if defined(DEBUG_RACE)
+ leave_driver("(passing bus free) intr");
+#endif
+ SETBITS(DMACNTRL0, INTEN);
+
+ if(CURRENT_SC->SCp.phase & aborted) {
+ HOSTDATA(shpnt)->abort_result=SCSI_ABORT_ERROR;
+ HOSTDATA(shpnt)->abortion_complete++;
+ }
+
+ aha152x_done(shpnt, DID_NO_CONNECT << 16);
+
+ return;
+ }
+#if defined(DEBUG_SELECTION) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_selection|debug_phases))
+ printk("SELDO (SELID=%x), ", GETPORT(SELID));
+#endif
+
+ /* selection was done */
+ SETPORT(SSTAT0, CLRSELDO);
+
+#if defined(DEBUG_ABORT)
+ if((HOSTDATA(shpnt)->debug & debug_abort) && (CURRENT_SC->SCp.phase & aborted))
+ printk("(ABORT) target selected, ");
+#endif
+
+ CURRENT_SC->SCp.phase &= ~in_selection;
+ CURRENT_SC->SCp.phase |= in_other;
+
+ ADDMSG(IDENTIFY(HOSTDATA(shpnt)->reconnect,CURRENT_SC->lun));
+
+ if(!(SYNCRATE&0x80) && HOSTDATA(shpnt)->synchronous) {
+ ADDMSG(EXTENDED_MESSAGE);
+ ADDMSG(3);
+ ADDMSG(EXTENDED_SDTR);
+ ADDMSG(50);
+ ADDMSG(8);
+
+ printk("outbound SDTR: ");
+ print_msg(&MSG(MSGLEN-5));
+
+ SYNCRATE=0x80;
+ CURRENT_SC->SCp.phase |= in_sync;
+ }
+
+#if defined(DEBUG_RACE)
+ leave_driver("(SELDO) intr");
+#endif
+ SETPORT(SCSIRATE, SYNCRATE&0x7f);
+
+ SETPORT(SCSISIG, P_MSGO);
+
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, ENREQINIT|ENBUSFREE);
+ SETBITS(DMACNTRL0, INTEN);
+
+ return;
+ } else
+ aha152x_panic(shpnt, "neither timeout nor selection\007");
+ } else {
+#if defined(DEBUG_SELECTION) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_selection|debug_phases))
+ printk("SELTO, ");
+#endif
+ /* end selection attempt */
+ CLRBITS(SCSISEQ, ENSELO|ENAUTOATNO);
+
+ /* timeout */
+ SETPORT(SSTAT1, CLRSELTIMO);
+
+ SETPORT(SIMODE0, DISCONNECTED_SC ? ENSELDI : 0);
+ SETPORT(SIMODE1, ISSUE_SC ? ENBUSFREE : 0);
+ SETBITS(DMACNTRL0, INTEN);
+#if defined(DEBUG_RACE)
+ leave_driver("(SELTO) intr");
+#endif
+
+ if(CURRENT_SC->SCp.phase & aborted) {
+#if defined(DEBUG_ABORT)
+ if(HOSTDATA(shpnt)->debug & debug_abort)
+ printk("(ABORT) selection timeout, ");
+#endif
+ HOSTDATA(shpnt)->abort_result=SCSI_ABORT_ERROR;
+ HOSTDATA(shpnt)->abortion_complete++;
+ }
+
+ if(TESTLO(SSTAT0, SELINGO))
+ /* ARBITRATION not won */
+ aha152x_done(shpnt, DID_BUS_BUSY << 16);
+ else
+ /* ARBITRATION won, but SELECTION failed */
+ aha152x_done(shpnt, DID_NO_CONNECT << 16);
+
+ return;
+ }
+ }
+
+ /* enable interrupt, when target leaves current phase */
+ phase = getphase(shpnt);
+ if(!(phase & ~P_MASK)) /* "real" phase */
+ SETPORT(SCSISIG, phase);
+ SETPORT(SSTAT1, CLRPHASECHG);
+ CURRENT_SC->SCp.phase =
+ (CURRENT_SC->SCp.phase & ~((P_MASK|1)<<16)) | (phase << 16);
+
+ /* information transfer phase */
+ switch(phase) {
+ case P_MSGO: /* MESSAGE OUT */
+ {
+ int i, identify=0, abort=0;
+
+#if defined(DEBUG_INTR) || defined(DEBUG_MSGO) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_intr|debug_msgo|debug_phases))
+ printk("MESSAGE OUT, ");
+#endif
+ if(MSGLEN==0) {
+ ADDMSG(MESSAGE_REJECT);
+#if defined(DEBUG_MSGO)
+ if(HOSTDATA(shpnt)->debug & debug_msgo)
+ printk("unexpected MESSAGE OUT phase; rejecting, ");
+#endif
+ }
+
+ CLRBITS(SXFRCTL0, ENDMA);
+
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, ENPHASEMIS|ENREQINIT|ENBUSFREE);
+
+ /* wait for data latch to become ready or a phase change */
+ while(TESTLO(DMASTAT, INTSTAT))
+ barrier();
+
+#if defined(DEBUG_MSGO)
+ if(HOSTDATA(shpnt)->debug & debug_msgo) {
+ int i;
+
+ printk("messages (");
+ for(i=0; i<MSGLEN; i+=print_msg(&MSG(i)), printk(" "))
+ ;
+ printk("), ");
+ }
+#endif
+
+ for(i=0; i<MSGLEN && TESTLO(SSTAT1, PHASEMIS); i++) {
+#if defined(DEBUG_MSGO)
+ if(HOSTDATA(shpnt)->debug & debug_msgo)
+ printk("%x ", MSG(i));
+#endif
+ if(i==MSGLEN-1) {
+ /* Leave MESSAGE OUT after transfer */
+ SETPORT(SSTAT1, CLRATNO);
+ }
+
+ SETPORT(SCSIDAT, MSG(i));
+
+ make_acklow(shpnt);
+ getphase(shpnt);
+
+ if(MSG(i)==IDENTIFY(HOSTDATA(shpnt)->reconnect,CURRENT_SC->lun))
+ identify++;
+
+ if(MSG(i)==ABORT)
+ abort++;
+
+ }
+
+ MSGLEN=0;
+
+ if(identify)
+ CURRENT_SC->SCp.phase |= sent_ident;
+
+ if(abort) {
+ /* revive abort(); abort() enables interrupts */
+ HOSTDATA(shpnt)->abort_result=SCSI_ABORT_SUCCESS;
+ HOSTDATA(shpnt)->abortion_complete++;
+
+ CURRENT_SC->SCp.phase &= ~(P_MASK<<16);
+
+ /* exit */
+ SETBITS(DMACNTRL0, INTEN);
+#if defined(DEBUG_RACE)
+ leave_driver("(ABORT) intr");
+#endif
+ aha152x_done(shpnt, DID_ABORT<<16);
+
+ return;
+ }
+ }
+ break;
+
+ case P_CMD: /* COMMAND phase */
+#if defined(DEBUG_INTR) || defined(DEBUG_CMD) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_intr|debug_cmd|debug_phases))
+ printk("COMMAND, ");
+#endif
+ if(!(CURRENT_SC->SCp.sent_command)) {
+ int i;
+
+ CLRBITS(SXFRCTL0, ENDMA);
+
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, ENPHASEMIS|ENREQINIT|ENBUSFREE);
+
+ /* wait for data latch to become ready or a phase change */
+ while(TESTLO(DMASTAT, INTSTAT))
+ barrier();
+
+ for(i=0; i<CURRENT_SC->cmd_len && TESTLO(SSTAT1, PHASEMIS); i++) {
+ SETPORT(SCSIDAT, CURRENT_SC->cmnd[i]);
+
+ make_acklow(shpnt);
+ getphase(shpnt);
+ }
+
+ if(i<CURRENT_SC->cmd_len && TESTHI(SSTAT1, PHASEMIS))
+ aha152x_panic(shpnt, "target left COMMAND");
+
+ CURRENT_SC->SCp.sent_command++;
+ } else
+ aha152x_panic(shpnt, "Nothing to send while in COMMAND");
+ break;
+
+ case P_MSGI: /* MESSAGE IN phase */
+ {
+ int start_sync=0;
+
+#if defined(DEBUG_INTR) || defined(DEBUG_MSGI) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_intr|debug_msgi|debug_phases))
+ printk("MESSAGE IN, ");
+#endif
+ SETPORT(SXFRCTL0, CH1);
+
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, ENBUSFREE);
+
+ while(phase == P_MSGI) {
+ CURRENT_SC->SCp.Message = GETPORT(SCSIDAT);
+ switch(CURRENT_SC->SCp.Message) {
+ case DISCONNECT:
+#if defined(DEBUG_MSGI) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_msgi|debug_phases))
+ printk("target disconnected, ");
+#endif
+ CURRENT_SC->SCp.Message = 0;
+ CURRENT_SC->SCp.phase |= disconnected;
+ if(!HOSTDATA(shpnt)->reconnect)
+ aha152x_panic(shpnt, "target was not allowed to disconnect");
+
+ break;
+
+ case COMMAND_COMPLETE:
+#if defined(DEBUG_MSGI) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_msgi|debug_phases))
+ printk("inbound message (COMMAND COMPLETE), ");
+#endif
+ done++;
+ break;
+
+ case MESSAGE_REJECT:
+ if(CURRENT_SC->SCp.phase & in_sync) {
+ CURRENT_SC->SCp.phase &= ~in_sync;
+ SYNCRATE=0x80;
+ printk("synchronous rejected, ");
+ } else
+ printk("inbound message (MESSAGE REJECT), ");
+#if defined(DEBUG_MSGI)
+ if(HOSTDATA(shpnt)->debug & debug_msgi)
+ printk("inbound message (MESSAGE REJECT), ");
+#endif
+ break;
+
+ case SAVE_POINTERS:
+#if defined(DEBUG_MSGI)
+ if(HOSTDATA(shpnt)->debug & debug_msgi)
+ printk("inbound message (SAVE DATA POINTERS), ");
+#endif
+ break;
+
+ case RESTORE_POINTERS:
+#if defined(DEBUG_MSGI)
+ if(HOSTDATA(shpnt)->debug & debug_msgi)
+ printk("inbound message (RESTORE DATA POINTERS), ");
+#endif
+ break;
+
+ case EXTENDED_MESSAGE:
+ {
+ char buffer[16];
+ int i;
+
+#if defined(DEBUG_MSGI)
+ if(HOSTDATA(shpnt)->debug & debug_msgi)
+ printk("inbound message (EXTENDED MESSAGE), ");
+#endif
+ make_acklow(shpnt);
+ if(getphase(shpnt)!=P_MSGI)
+ break;
+
+ buffer[0]=EXTENDED_MESSAGE;
+ buffer[1]=GETPORT(SCSIDAT);
+
+ for(i=0; i<buffer[1] &&
+ (make_acklow(shpnt), getphase(shpnt)==P_MSGI); i++)
+ buffer[2+i]=GETPORT(SCSIDAT);
+
+#if defined(DEBUG_MSGI)
+ if(HOSTDATA(shpnt)->debug & debug_msgi)
+ print_msg(buffer);
+#endif
+
+ switch(buffer [2]) {
+ case EXTENDED_SDTR:
+ {
+ long ticks;
+
+ if(buffer[1]!=3)
+ aha152x_panic(shpnt, "SDTR message length != 3");
+
+ if(!HOSTDATA(shpnt)->synchronous)
+ break;
+
+ printk("inbound SDTR: "); print_msg(buffer);
+
+ ticks=(buffer[3]*4+49)/50;
+
+ if(CURRENT_SC->SCp.phase & in_sync) {
+ /* we initiated SDTR */
+ if(ticks>9 || buffer[4]<1 || buffer[4]>8)
+ aha152x_panic(shpnt, "received SDTR invalid");
+
+ SYNCRATE |= ((ticks-2)<<4) + buffer[4];
+ } else if(ticks<=9 && buffer[4]>=1) {
+ if(buffer[4]>8)
+ buffer[4]=8;
+
+ ADDMSG(EXTENDED_MESSAGE);
+ ADDMSG(3);
+ ADDMSG(EXTENDED_SDTR);
+ if(ticks<4) {
+ ticks=4;
+ ADDMSG(50);
+ } else
+ ADDMSG(buffer[3]);
+
+ ADDMSG(buffer[4]);
+
+ printk("outbound SDTR: ");
+ print_msg(&MSG(MSGLEN-5));
+
+ CURRENT_SC->SCp.phase |= in_sync;
+
+ SYNCRATE |= ((ticks-2)<<4) + buffer[4];
+
+ start_sync++;
+ } else {
+ /* requested SDTR is too slow, do it asynchronously */
+ ADDMSG(MESSAGE_REJECT);
+ SYNCRATE = 0;
+ }
+
+ SETPORT(SCSIRATE, SYNCRATE&0x7f);
+ }
+ break;
+
+ case EXTENDED_MODIFY_DATA_POINTER:
+ case EXTENDED_EXTENDED_IDENTIFY:
+ case EXTENDED_WDTR:
+ default:
+ ADDMSG(MESSAGE_REJECT);
+ break;
+ }
+ }
+ break;
+
+ default:
+ printk("unsupported inbound message %x, ", CURRENT_SC->SCp.Message);
+ break;
+
+ }
+
+ make_acklow(shpnt);
+ phase=getphase(shpnt);
+ }
+
+ if(start_sync)
+ CURRENT_SC->SCp.phase |= in_sync;
+ else
+ CURRENT_SC->SCp.phase &= ~in_sync;
+
+ if(MSGLEN>0)
+ SETPORT(SCSISIG, P_MSGI|ATNO);
+
+ /* clear SCSI fifo on BUSFREE */
+ if(phase==P_BUSFREE)
+ SETPORT(SXFRCTL0, CH1|CLRCH1);
+
+ if(CURRENT_SC->SCp.phase & disconnected) {
+ save_flags(flags);
+ cli();
+#if defined(DEBUG_QUEUES)
+ if(HOSTDATA(shpnt)->debug & debug_queues)
+ printk("d+, ");
+#endif
+ append_SC(&DISCONNECTED_SC, CURRENT_SC);
+ CURRENT_SC->SCp.phase |= 1<<16;
+ CURRENT_SC = NULL;
+ restore_flags(flags);
+
+ SETBITS(SCSISEQ, ENRESELI);
+
+ SETPORT(SIMODE0, DISCONNECTED_SC ? ENSELDI : 0);
+ SETPORT(SIMODE1, ISSUE_SC ? ENBUSFREE : 0);
+
+ SETBITS(DMACNTRL0, INTEN);
+
+ return;
+ }
+ }
+ break;
+
+ case P_STATUS: /* STATUS IN phase */
+#if defined(DEBUG_STATUS) || defined(DEBUG_INTR) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_status|debug_intr|debug_phases))
+ printk("STATUS, ");
+#endif
+ SETPORT(SXFRCTL0, CH1);
+
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, ENREQINIT|ENBUSFREE);
+
+ if(TESTHI(SSTAT1, PHASEMIS))
+ printk("aha152x: passing STATUS phase");
+
+ CURRENT_SC->SCp.Status = GETPORT(SCSIBUS);
+ make_acklow(shpnt);
+ getphase(shpnt);
+
+#if defined(DEBUG_STATUS)
+ if(HOSTDATA(shpnt)->debug & debug_status) {
+ printk("inbound status ");
+ print_status(CURRENT_SC->SCp.Status);
+ printk(", ");
+ }
+#endif
+ break;
+
+ case P_DATAI: /* DATA IN phase */
+ {
+ int fifodata, data_count, done;
+
+#if defined(DEBUG_DATAI) || defined(DEBUG_INTR) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_datai|debug_intr|debug_phases))
+ printk("DATA IN, ");
+#endif
+
+#if 0
+ if(GETPORT(FIFOSTAT) || GETPORT(SSTAT2) & (SFULL|SFCNT))
+ printk("aha152x: P_DATAI: %d(%d) bytes left in FIFO, resetting\n",
+ GETPORT(FIFOSTAT), GETPORT(SSTAT2) & (SFULL|SFCNT));
+#endif
+
+ /* reset host fifo */
+ SETPORT(DMACNTRL0, RSTFIFO);
+ SETPORT(DMACNTRL0, RSTFIFO|ENDMA);
+
+ SETPORT(SXFRCTL0, CH1|SCSIEN|DMAEN);
+
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, ENPHASEMIS|ENBUSFREE);
+
+ /* done is set when the FIFO is empty after the target left DATA IN */
+ done=0;
+
+ /* while the target stays in DATA to transfer data */
+ while (!done) {
+#if defined(DEBUG_DATAI)
+ if(HOSTDATA(shpnt)->debug & debug_datai)
+ printk("expecting data, ");
+#endif
+ /* wait for PHASEMIS or full FIFO */
+ while(TESTLO(DMASTAT, DFIFOFULL|INTSTAT))
+ barrier();
+
+#if defined(DEBUG_DATAI)
+ if(HOSTDATA(shpnt)->debug & debug_datai)
+ printk("ok, ");
+#endif
+
+ if(TESTHI(DMASTAT, DFIFOFULL))
+ fifodata=GETPORT(FIFOSTAT);
+ else {
+ /* wait for SCSI fifo to get empty */
+ while(TESTLO(SSTAT2, SEMPTY))
+ barrier();
+
+ /* rest of data in FIFO */
+ fifodata=GETPORT(FIFOSTAT);
+#if defined(DEBUG_DATAI)
+ if(HOSTDATA(shpnt)->debug & debug_datai)
+ printk("last transfer, ");
+#endif
+ done=1;
+ }
+
+#if defined(DEBUG_DATAI)
+ if(HOSTDATA(shpnt)->debug & debug_datai)
+ printk("fifodata=%d, ", fifodata);
+#endif
+
+ while(fifodata && CURRENT_SC->SCp.this_residual) {
+ data_count=fifodata;
+
+ /* limit data transfer to size of first sg buffer */
+ if(data_count > CURRENT_SC->SCp.this_residual)
+ data_count = CURRENT_SC->SCp.this_residual;
+
+ fifodata -= data_count;
+
+#if defined(DEBUG_DATAI)
+ if(HOSTDATA(shpnt)->debug & debug_datai)
+ printk("data_count=%d, ", data_count);
+#endif
+
+ if(data_count&1) {
+ /* get a single byte in byte mode */
+ SETBITS(DMACNTRL0, _8BIT);
+ *CURRENT_SC->SCp.ptr++ = GETPORT(DATAPORT);
+ CURRENT_SC->SCp.this_residual--;
+ }
+
+ if(data_count>1) {
+ CLRBITS(DMACNTRL0, _8BIT);
+ data_count >>= 1; /* Number of words */
+ insw(DATAPORT, CURRENT_SC->SCp.ptr, data_count);
+#if defined(DEBUG_DATAI)
+ if(HOSTDATA(shpnt)->debug & debug_datai)
+ /* show what comes with the last transfer */
+ if(done) {
+#if 0
+ int i;
+ unsigned char *data;
+#endif
+
+ printk("data on last transfer (%d bytes) ",
+ 2*data_count);
+#if 0
+ printk("data on last transfer (%d bytes: ",
+ 2*data_count);
+ data = (unsigned char *) CURRENT_SC->SCp.ptr;
+ for(i=0; i<2*data_count; i++)
+ printk("%2x ", *data++);
+ printk("), ");
+#endif
+ }
+#endif
+ CURRENT_SC->SCp.ptr += 2 * data_count;
+ CURRENT_SC->SCp.this_residual -= 2 * data_count;
+ }
+
+ /* if this buffer is full and there are more buffers left */
+ if(!CURRENT_SC->SCp.this_residual &&
+ CURRENT_SC->SCp.buffers_residual) {
+ /* advance to next buffer */
+ CURRENT_SC->SCp.buffers_residual--;
+ CURRENT_SC->SCp.buffer++;
+ CURRENT_SC->SCp.ptr = CURRENT_SC->SCp.buffer->address;
+ CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length;
+ }
+ }
+
+ /*
+ * FIFO should be empty
+ */
+ if(fifodata>0) {
+ printk("aha152x: more data than expected (%d bytes)\n",
+ GETPORT(FIFOSTAT));
+ SETBITS(DMACNTRL0, _8BIT);
+ printk("aha152x: data (");
+ while(fifodata--)
+ printk("%2x ", GETPORT(DATAPORT));
+ printk(")\n");
+ }
+
+#if defined(DEBUG_DATAI)
+ if(HOSTDATA(shpnt)->debug & debug_datai)
+ if(!fifodata)
+ printk("fifo empty, ");
+ else
+ printk("something left in fifo, ");
+#endif
+ }
+
+#if defined(DEBUG_DATAI)
+ if((HOSTDATA(shpnt)->debug & debug_datai) &&
+ (CURRENT_SC->SCp.buffers_residual ||
+ CURRENT_SC->SCp.this_residual))
+ printk("left buffers (buffers=%d, bytes=%d), ",
+ CURRENT_SC->SCp.buffers_residual, CURRENT_SC->SCp.this_residual);
+#endif
+ /* transfer can be considered ended, when SCSIEN reads back zero */
+ CLRBITS(SXFRCTL0, SCSIEN|DMAEN);
+ while(TESTHI(SXFRCTL0, SCSIEN))
+ barrier();
+ CLRBITS(DMACNTRL0, ENDMA);
+
+#if defined(DEBUG_DATAI) || defined(DEBUG_INTR)
+ if(HOSTDATA(shpnt)->debug & (debug_datai|debug_intr))
+ printk("got %d bytes, ", GETSTCNT());
+#endif
+
+ CURRENT_SC->SCp.have_data_in++;
+ }
+ break;
+
+ case P_DATAO: /* DATA OUT phase */
+ {
+ int data_count;
+
+#if defined(DEBUG_DATAO) || defined(DEBUG_INTR) || defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & (debug_datao|debug_intr|debug_phases))
+ printk("DATA OUT, ");
+#endif
+#if defined(DEBUG_DATAO)
+ if(HOSTDATA(shpnt)->debug & debug_datao)
+ printk("got data to send (bytes=%d, buffers=%d), ",
+ CURRENT_SC->SCp.this_residual,
+ CURRENT_SC->SCp.buffers_residual);
+#endif
+
+ if(GETPORT(FIFOSTAT) || GETPORT(SSTAT2) & (SFULL|SFCNT)) {
+ printk("%d(%d) left in FIFO, ",
+ GETPORT(FIFOSTAT), GETPORT(SSTAT2) & (SFULL|SFCNT));
+ aha152x_panic(shpnt, "FIFO should be empty");
+ }
+
+ SETPORT(SXFRCTL0, CH1|CLRSTCNT|CLRCH1);
+ SETPORT(SXFRCTL0, SCSIEN|DMAEN|CH1);
+
+ SETPORT(DMACNTRL0, WRITE_READ|RSTFIFO);
+ SETPORT(DMACNTRL0, ENDMA|WRITE_READ);
+
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, ENPHASEMIS|ENBUSFREE);
+
+ /* while current buffer is not empty or
+ there are more buffers to transfer */
+ while(TESTLO(SSTAT1, PHASEMIS) &&
+ (CURRENT_SC->SCp.this_residual ||
+ CURRENT_SC->SCp.buffers_residual)) {
+#if defined(DEBUG_DATAO)
+ if(HOSTDATA(shpnt)->debug & debug_datao)
+ printk("sending data (left: bytes=%d, buffers=%d), waiting, ",
+ CURRENT_SC->SCp.this_residual,
+ CURRENT_SC->SCp.buffers_residual);
+#endif
+ /* transfer rest of buffer, but max. 128 byte */
+ data_count =
+ CURRENT_SC->SCp.this_residual > 128 ?
+ 128 : CURRENT_SC->SCp.this_residual ;
+
+#if defined(DEBUG_DATAO)
+ if(HOSTDATA(shpnt)->debug & debug_datao)
+ printk("data_count=%d, ", data_count);
+#endif
+
+ if(data_count&1) {
+ /* put a single byte in byte mode */
+ SETBITS(DMACNTRL0, _8BIT);
+ SETPORT(DATAPORT, *CURRENT_SC->SCp.ptr++);
+ CURRENT_SC->SCp.this_residual--;
+ }
+ if(data_count>1) {
+ CLRBITS(DMACNTRL0, _8BIT);
+ data_count >>= 1; /* number of words */
+ outsw(DATAPORT, CURRENT_SC->SCp.ptr, data_count);
+ CURRENT_SC->SCp.ptr += 2 * data_count;
+ CURRENT_SC->SCp.this_residual -= 2 * data_count;
+ }
+
+ /* wait for FIFO to get empty */
+ while(TESTLO(DMASTAT, DFIFOEMP|INTSTAT))
+ barrier();
+
+#if defined(DEBUG_DATAO)
+ if(HOSTDATA(shpnt)->debug & debug_datao)
+ printk("fifo (%d bytes), transfered (%d bytes), ",
+ GETPORT(FIFOSTAT), GETSTCNT());
+#endif
+
+ /* if this buffer is empty and there are more buffers left */
+ if(TESTLO(SSTAT1, PHASEMIS) &&
+ !CURRENT_SC->SCp.this_residual &&
+ CURRENT_SC->SCp.buffers_residual) {
+ /* advance to next buffer */
+ CURRENT_SC->SCp.buffers_residual--;
+ CURRENT_SC->SCp.buffer++;
+ CURRENT_SC->SCp.ptr = CURRENT_SC->SCp.buffer->address;
+ CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length;
+ }
+ }
+
+ if(CURRENT_SC->SCp.this_residual || CURRENT_SC->SCp.buffers_residual) {
+ /* target leaves DATA OUT for an other phase (perhaps disconnect) */
+
+ /* data in fifos has to be resend */
+ data_count = GETPORT(SSTAT2) & (SFULL|SFCNT);
+
+ data_count += GETPORT(FIFOSTAT) ;
+ CURRENT_SC->SCp.ptr -= data_count;
+ CURRENT_SC->SCp.this_residual += data_count;
+#if defined(DEBUG_DATAO)
+ if(HOSTDATA(shpnt)->debug & debug_datao)
+ printk("left data (bytes=%d, buffers=%d), fifos (bytes=%d), "
+ "transfer incomplete, resetting fifo, ",
+ CURRENT_SC->SCp.this_residual,
+ CURRENT_SC->SCp.buffers_residual,
+ data_count);
+#endif
+ SETPORT(DMACNTRL0, WRITE_READ|RSTFIFO);
+ CLRBITS(SXFRCTL0, SCSIEN|DMAEN);
+ CLRBITS(DMACNTRL0, ENDMA);
+ } else {
+#if defined(DEBUG_DATAO)
+ if(HOSTDATA(shpnt)->debug & debug_datao)
+ printk("waiting for SCSI fifo to get empty, ");
+#endif
+ /* wait for SCSI fifo to get empty */
+ while(TESTLO(SSTAT2, SEMPTY))
+ barrier();
+#if defined(DEBUG_DATAO)
+ if(HOSTDATA(shpnt)->debug & debug_datao)
+ printk("ok, left data (bytes=%d, buffers=%d) ",
+ CURRENT_SC->SCp.this_residual,
+ CURRENT_SC->SCp.buffers_residual);
+#endif
+ CLRBITS(SXFRCTL0, SCSIEN|DMAEN);
+
+ /* transfer can be considered ended, when SCSIEN reads back zero */
+ while(TESTHI(SXFRCTL0, SCSIEN))
+ barrier();
+
+ CLRBITS(DMACNTRL0, ENDMA);
+ }
+
+#if defined(DEBUG_DATAO) || defined(DEBUG_INTR)
+ if(HOSTDATA(shpnt)->debug & (debug_datao|debug_intr))
+ printk("sent %d data bytes, ", GETSTCNT());
+#endif
+ }
+ break;
+
+ case P_BUSFREE: /* BUSFREE */
+#if defined(DEBUG_RACE)
+ leave_driver("(BUSFREE) intr");
+#endif
+#if defined(DEBUG_PHASES)
+ if(HOSTDATA(shpnt)->debug & debug_phases)
+ printk("unexpected BUS FREE, ");
+#endif
+ CURRENT_SC->SCp.phase &= ~(P_MASK<<16);
+
+ aha152x_done(shpnt, DID_ERROR << 16); /* Don't know any better */
+ return;
+ break;
+
+ case P_PARITY: /* parity error in DATA phase */
+#if defined(DEBUG_RACE)
+ leave_driver("(DID_PARITY) intr");
+#endif
+ printk("PARITY error in DATA phase, ");
+
+ CURRENT_SC->SCp.phase &= ~(P_MASK<<16);
+
+ SETBITS(DMACNTRL0, INTEN);
+ aha152x_done(shpnt, DID_PARITY << 16);
+ return;
+ break;
+
+ default:
+ printk("aha152x: unexpected phase\n");
+ break;
+ }
+
+ if(done) {
+#if defined(DEBUG_INTR)
+ if(HOSTDATA(shpnt)->debug & debug_intr)
+ printk("command done.\n");
+#endif
+#if defined(DEBUG_RACE)
+ leave_driver("(done) intr");
+#endif
+
+ SETPORT(SIMODE0, DISCONNECTED_SC ? ENSELDI : 0);
+ SETPORT(SIMODE1, ISSUE_SC ? ENBUSFREE : 0);
+ SETPORT(SCSISEQ, DISCONNECTED_SC ? ENRESELI : 0);
+
+ SETBITS(DMACNTRL0, INTEN);
+
+ aha152x_done(shpnt,
+ (CURRENT_SC->SCp.Status & 0xff)
+ | ((CURRENT_SC->SCp.Message & 0xff) << 8)
+ | (DID_OK << 16));
+
+#if defined(DEBUG_RACE)
+ printk("done returned (DID_OK: Status=%x; Message=%x).\n",
+ CURRENT_SC->SCp.Status, CURRENT_SC->SCp.Message);
+#endif
+ return;
+ }
+
+ if(CURRENT_SC)
+ CURRENT_SC->SCp.phase |= 1<<16;
+
+ SETPORT(SIMODE0, 0);
+ SETPORT(SIMODE1, ENPHASEMIS|ENBUSFREE);
+#if defined(DEBUG_INTR)
+ if(HOSTDATA(shpnt)->debug & debug_intr)
+ disp_enintr(shpnt);
+#endif
+#if defined(DEBUG_RACE)
+ leave_driver("(PHASEEND) intr");
+#endif
+
+ SETBITS(DMACNTRL0, INTEN);
+ return;
+}
+
+/*
+ * Dump the current driver status and panic...
+ */
+static void aha152x_panic(struct Scsi_Host *shpnt, char *msg)
+{
+ printk("\naha152x: %s\n", msg);
+ show_queues(shpnt);
+ panic("aha152x panic");
+}
+
+/*
+ * Display registers of AIC-6260
+ */
+static void disp_ports(struct Scsi_Host *shpnt)
+{
+#ifdef DEBUG_AHA152X
+ int s;
+
+#ifdef SKIP_PORTS
+ if(HOSTDATA(shpnt)->debug & debug_skipports)
+ return;
+#endif
+
+ printk("\n%s: ", CURRENT_SC ? "on bus" : "waiting");
+
+ s=GETPORT(SCSISEQ);
+ printk("SCSISEQ (");
+ if(s & TEMODEO) printk("TARGET MODE ");
+ if(s & ENSELO) printk("SELO ");
+ if(s & ENSELI) printk("SELI ");
+ if(s & ENRESELI) printk("RESELI ");
+ if(s & ENAUTOATNO) printk("AUTOATNO ");
+ if(s & ENAUTOATNI) printk("AUTOATNI ");
+ if(s & ENAUTOATNP) printk("AUTOATNP ");
+ if(s & SCSIRSTO) printk("SCSIRSTO ");
+ printk(");");
+
+ printk(" SCSISIG (");
+ s=GETPORT(SCSISIG);
+ switch(s & P_MASK) {
+ case P_DATAO:
+ printk("DATA OUT");
+ break;
+ case P_DATAI:
+ printk("DATA IN");
+ break;
+ case P_CMD:
+ printk("COMMAND");
+ break;
+ case P_STATUS:
+ printk("STATUS");
+ break;
+ case P_MSGO:
+ printk("MESSAGE OUT");
+ break;
+ case P_MSGI:
+ printk("MESSAGE IN");
+ break;
+ default:
+ printk("*illegal*");
+ break;
+ }
+
+ printk("); ");
+
+ printk("INTSTAT (%s); ", TESTHI(DMASTAT, INTSTAT) ? "hi" : "lo");
+
+ printk("SSTAT (");
+ s=GETPORT(SSTAT0);
+ if(s & TARGET) printk("TARGET ");
+ if(s & SELDO) printk("SELDO ");
+ if(s & SELDI) printk("SELDI ");
+ if(s & SELINGO) printk("SELINGO ");
+ if(s & SWRAP) printk("SWRAP ");
+ if(s & SDONE) printk("SDONE ");
+ if(s & SPIORDY) printk("SPIORDY ");
+ if(s & DMADONE) printk("DMADONE ");
+
+ s=GETPORT(SSTAT1);
+ if(s & SELTO) printk("SELTO ");
+ if(s & ATNTARG) printk("ATNTARG ");
+ if(s & SCSIRSTI) printk("SCSIRSTI ");
+ if(s & PHASEMIS) printk("PHASEMIS ");
+ if(s & BUSFREE) printk("BUSFREE ");
+ if(s & SCSIPERR) printk("SCSIPERR ");
+ if(s & PHASECHG) printk("PHASECHG ");
+ if(s & REQINIT) printk("REQINIT ");
+ printk("); ");
+
+
+ printk("SSTAT (");
+
+ s=GETPORT(SSTAT0) & GETPORT(SIMODE0);
+
+ if(s & TARGET) printk("TARGET ");
+ if(s & SELDO) printk("SELDO ");
+ if(s & SELDI) printk("SELDI ");
+ if(s & SELINGO) printk("SELINGO ");
+ if(s & SWRAP) printk("SWRAP ");
+ if(s & SDONE) printk("SDONE ");
+ if(s & SPIORDY) printk("SPIORDY ");
+ if(s & DMADONE) printk("DMADONE ");
+
+ s=GETPORT(SSTAT1) & GETPORT(SIMODE1);
+
+ if(s & SELTO) printk("SELTO ");
+ if(s & ATNTARG) printk("ATNTARG ");
+ if(s & SCSIRSTI) printk("SCSIRSTI ");
+ if(s & PHASEMIS) printk("PHASEMIS ");
+ if(s & BUSFREE) printk("BUSFREE ");
+ if(s & SCSIPERR) printk("SCSIPERR ");
+ if(s & PHASECHG) printk("PHASECHG ");
+ if(s & REQINIT) printk("REQINIT ");
+ printk("); ");
+
+ printk("SXFRCTL0 (");
+
+ s=GETPORT(SXFRCTL0);
+ if(s & SCSIEN) printk("SCSIEN ");
+ if(s & DMAEN) printk("DMAEN ");
+ if(s & CH1) printk("CH1 ");
+ if(s & CLRSTCNT) printk("CLRSTCNT ");
+ if(s & SPIOEN) printk("SPIOEN ");
+ if(s & CLRCH1) printk("CLRCH1 ");
+ printk("); ");
+
+ printk("SIGNAL (");
+
+ s=GETPORT(SCSISIG);
+ if(s & ATNI) printk("ATNI ");
+ if(s & SELI) printk("SELI ");
+ if(s & BSYI) printk("BSYI ");
+ if(s & REQI) printk("REQI ");
+ if(s & ACKI) printk("ACKI ");
+ printk("); ");
+
+ printk("SELID (%02x), ", GETPORT(SELID));
+
+ printk("SSTAT2 (");
+
+ s=GETPORT(SSTAT2);
+ if(s & SOFFSET) printk("SOFFSET ");
+ if(s & SEMPTY) printk("SEMPTY ");
+ if(s & SFULL) printk("SFULL ");
+ printk("); SFCNT (%d); ", s & (SFULL|SFCNT));
+
+ s=GETPORT(SSTAT3);
+ printk("SCSICNT (%d), OFFCNT(%d), ", (s&0xf0)>>4, s&0x0f);
+
+ printk("SSTAT4 (");
+ s=GETPORT(SSTAT4);
+ if(s & SYNCERR) printk("SYNCERR ");
+ if(s & FWERR) printk("FWERR ");
+ if(s & FRERR) printk("FRERR ");
+ printk("); ");
+
+ printk("DMACNTRL0 (");
+ s=GETPORT(DMACNTRL0);
+ printk("%s ", s & _8BIT ? "8BIT" : "16BIT");
+ printk("%s ", s & DMA ? "DMA" : "PIO" );
+ printk("%s ", s & WRITE_READ ? "WRITE" : "READ" );
+ if(s & ENDMA) printk("ENDMA ");
+ if(s & INTEN) printk("INTEN ");
+ if(s & RSTFIFO) printk("RSTFIFO ");
+ if(s & SWINT) printk("SWINT ");
+ printk("); ");
+
+ printk("DMASTAT (");
+ s=GETPORT(DMASTAT);
+ if(s & ATDONE) printk("ATDONE ");
+ if(s & WORDRDY) printk("WORDRDY ");
+ if(s & DFIFOFULL) printk("DFIFOFULL ");
+ if(s & DFIFOEMP) printk("DFIFOEMP ");
+ printk(")");
+
+ printk("\n");
+#endif
+}
+
+/*
+ * display enabled interrupts
+ */
+static void disp_enintr(struct Scsi_Host *shpnt)
+{
+ int s;
+
+ printk("enabled interrupts (");
+
+ s=GETPORT(SIMODE0);
+ if(s & ENSELDO) printk("ENSELDO ");
+ if(s & ENSELDI) printk("ENSELDI ");
+ if(s & ENSELINGO) printk("ENSELINGO ");
+ if(s & ENSWRAP) printk("ENSWRAP ");
+ if(s & ENSDONE) printk("ENSDONE ");
+ if(s & ENSPIORDY) printk("ENSPIORDY ");
+ if(s & ENDMADONE) printk("ENDMADONE ");
+
+ s=GETPORT(SIMODE1);
+ if(s & ENSELTIMO) printk("ENSELTIMO ");
+ if(s & ENATNTARG) printk("ENATNTARG ");
+ if(s & ENPHASEMIS) printk("ENPHASEMIS ");
+ if(s & ENBUSFREE) printk("ENBUSFREE ");
+ if(s & ENSCSIPERR) printk("ENSCSIPERR ");
+ if(s & ENPHASECHG) printk("ENPHASECHG ");
+ if(s & ENREQINIT) printk("ENREQINIT ");
+ printk(")\n");
+}
+
+#if defined(DEBUG_RACE)
+
+static const char *should_leave;
+static int in_driver=0;
+
+/*
+ * Only one routine can be in the driver at once.
+ */
+static void enter_driver(const char *func)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ printk("aha152x: entering %s() (%x)\n", func, jiffies);
+ if(in_driver) {
+ printk("%s should leave first.\n", should_leave);
+ panic("aha152x: already in driver\n");
+ }
+
+ in_driver++;
+ should_leave=func;
+ restore_flags(flags);
+}
+
+static void leave_driver(const char *func)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ printk("\naha152x: leaving %s() (%x)\n", func, jiffies);
+ if(!in_driver) {
+ printk("aha152x: %s already left.\n", should_leave);
+ panic("aha152x: %s already left driver.\n");
+ }
+
+ in_driver--;
+ should_leave=func;
+ restore_flags(flags);
+}
+#endif
+
+/*
+ * Show the command data of a command
+ */
+static void show_command(Scsi_Cmnd *ptr)
+{
+ printk("0x%08x: target=%d; lun=%d; cmnd=(",
+ (unsigned int) ptr, ptr->target, ptr->lun);
+
+ print_command(ptr->cmnd);
+
+ printk("); residual=%d; buffers=%d; phase |",
+ ptr->SCp.this_residual, ptr->SCp.buffers_residual);
+
+ if(ptr->SCp.phase & not_issued ) printk("not issued|");
+ if(ptr->SCp.phase & in_selection) printk("in selection|");
+ if(ptr->SCp.phase & disconnected) printk("disconnected|");
+ if(ptr->SCp.phase & aborted ) printk("aborted|");
+ if(ptr->SCp.phase & sent_ident ) printk("send_ident|");
+ if(ptr->SCp.phase & in_other) {
+ printk("; in other(");
+ switch((ptr->SCp.phase >> 16) & P_MASK) {
+ case P_DATAO:
+ printk("DATA OUT");
+ break;
+ case P_DATAI:
+ printk("DATA IN");
+ break;
+ case P_CMD:
+ printk("COMMAND");
+ break;
+ case P_STATUS:
+ printk("STATUS");
+ break;
+ case P_MSGO:
+ printk("MESSAGE OUT");
+ break;
+ case P_MSGI:
+ printk("MESSAGE IN");
+ break;
+ default:
+ printk("*illegal*");
+ break;
+ }
+ printk(")");
+ if(ptr->SCp.phase & (1<<16))
+ printk("; phaseend");
+ }
+ printk("; next=0x%08x\n", (unsigned int) ptr->host_scribble);
+}
+
+/*
+ * Dump the queued data
+ */
+static void show_queues(struct Scsi_Host *shpnt)
+{
+ unsigned long flags;
+ Scsi_Cmnd *ptr;
+
+ save_flags(flags);
+ cli();
+ printk("QUEUE STATUS:\nissue_SC:\n");
+ for(ptr=ISSUE_SC; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble)
+ show_command(ptr);
+
+ printk("current_SC:\n");
+ if(CURRENT_SC)
+ show_command(CURRENT_SC);
+ else
+ printk("none\n");
+
+ printk("disconnected_SC:\n");
+ for(ptr=DISCONNECTED_SC; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble)
+ show_command(ptr);
+
+ disp_ports(shpnt);
+ disp_enintr(shpnt);
+ restore_flags(flags);
+}
+
+int aha152x_set_info(char *buffer, int length, struct Scsi_Host *shpnt)
+{
+ return(-ENOSYS); /* Currently this is a no-op */
+}
+
+#undef SPRINTF
+#define SPRINTF(args...) pos += sprintf(pos, ## args)
+
+static int get_command(char *pos, Scsi_Cmnd *ptr)
+{
+ char *start = pos;
+ int i;
+
+ SPRINTF("0x%08x: target=%d; lun=%d; cmnd=( ",
+ (unsigned int) ptr, ptr->target, ptr->lun);
+
+ for(i=0; i<COMMAND_SIZE(ptr->cmnd[0]); i++)
+ SPRINTF("0x%02x ", ptr->cmnd[i]);
+
+ SPRINTF("); residual=%d; buffers=%d; phase |",
+ ptr->SCp.this_residual, ptr->SCp.buffers_residual);
+
+ if(ptr->SCp.phase & not_issued ) SPRINTF("not issued|");
+ if(ptr->SCp.phase & in_selection) SPRINTF("in selection|");
+ if(ptr->SCp.phase & disconnected) SPRINTF("disconnected|");
+ if(ptr->SCp.phase & aborted ) SPRINTF("aborted|");
+ if(ptr->SCp.phase & sent_ident ) SPRINTF("send_ident|");
+ if(ptr->SCp.phase & in_other) {
+ SPRINTF("; in other(");
+ switch((ptr->SCp.phase >> 16) & P_MASK) {
+ case P_DATAO:
+ SPRINTF("DATA OUT");
+ break;
+ case P_DATAI:
+ SPRINTF("DATA IN");
+ break;
+ case P_CMD:
+ SPRINTF("COMMAND");
+ break;
+ case P_STATUS:
+ SPRINTF("STATUS");
+ break;
+ case P_MSGO:
+ SPRINTF("MESSAGE OUT");
+ break;
+ case P_MSGI:
+ SPRINTF("MESSAGE IN");
+ break;
+ default:
+ SPRINTF("*illegal*");
+ break;
+ }
+ SPRINTF(")");
+ if(ptr->SCp.phase & (1<<16))
+ SPRINTF("; phaseend");
+ }
+ SPRINTF("; next=0x%08x\n", (unsigned int) ptr->host_scribble);
+
+ return(pos-start);
+}
+
+static int get_ports(struct Scsi_Host *shpnt, char *pos)
+{
+ char *start = pos;
+ int s;
+
+#ifdef SKIP_PORTS
+ if(HOSTDATA(shpnt)->debug & debug_skipports)
+ return;
+#endif
+
+ SPRINTF("\n%s: ", CURRENT_SC ? "on bus" : "waiting");
+
+ s=GETPORT(SCSISEQ);
+ SPRINTF("SCSISEQ (");
+ if(s & TEMODEO) SPRINTF("TARGET MODE ");
+ if(s & ENSELO) SPRINTF("SELO ");
+ if(s & ENSELI) SPRINTF("SELI ");
+ if(s & ENRESELI) SPRINTF("RESELI ");
+ if(s & ENAUTOATNO) SPRINTF("AUTOATNO ");
+ if(s & ENAUTOATNI) SPRINTF("AUTOATNI ");
+ if(s & ENAUTOATNP) SPRINTF("AUTOATNP ");
+ if(s & SCSIRSTO) SPRINTF("SCSIRSTO ");
+ SPRINTF(");");
+
+ SPRINTF(" SCSISIG (");
+ s=GETPORT(SCSISIG);
+ switch(s & P_MASK) {
+ case P_DATAO:
+ SPRINTF("DATA OUT");
+ break;
+ case P_DATAI:
+ SPRINTF("DATA IN");
+ break;
+ case P_CMD:
+ SPRINTF("COMMAND");
+ break;
+ case P_STATUS:
+ SPRINTF("STATUS");
+ break;
+ case P_MSGO:
+ SPRINTF("MESSAGE OUT");
+ break;
+ case P_MSGI:
+ SPRINTF("MESSAGE IN");
+ break;
+ default:
+ SPRINTF("*illegal*");
+ break;
+ }
+
+ SPRINTF("); ");
+
+ SPRINTF("INTSTAT (%s); ", TESTHI(DMASTAT, INTSTAT) ? "hi" : "lo");
+
+ SPRINTF("SSTAT (");
+ s=GETPORT(SSTAT0);
+ if(s & TARGET) SPRINTF("TARGET ");
+ if(s & SELDO) SPRINTF("SELDO ");
+ if(s & SELDI) SPRINTF("SELDI ");
+ if(s & SELINGO) SPRINTF("SELINGO ");
+ if(s & SWRAP) SPRINTF("SWRAP ");
+ if(s & SDONE) SPRINTF("SDONE ");
+ if(s & SPIORDY) SPRINTF("SPIORDY ");
+ if(s & DMADONE) SPRINTF("DMADONE ");
+
+ s=GETPORT(SSTAT1);
+ if(s & SELTO) SPRINTF("SELTO ");
+ if(s & ATNTARG) SPRINTF("ATNTARG ");
+ if(s & SCSIRSTI) SPRINTF("SCSIRSTI ");
+ if(s & PHASEMIS) SPRINTF("PHASEMIS ");
+ if(s & BUSFREE) SPRINTF("BUSFREE ");
+ if(s & SCSIPERR) SPRINTF("SCSIPERR ");
+ if(s & PHASECHG) SPRINTF("PHASECHG ");
+ if(s & REQINIT) SPRINTF("REQINIT ");
+ SPRINTF("); ");
+
+
+ SPRINTF("SSTAT (");
+
+ s=GETPORT(SSTAT0) & GETPORT(SIMODE0);
+
+ if(s & TARGET) SPRINTF("TARGET ");
+ if(s & SELDO) SPRINTF("SELDO ");
+ if(s & SELDI) SPRINTF("SELDI ");
+ if(s & SELINGO) SPRINTF("SELINGO ");
+ if(s & SWRAP) SPRINTF("SWRAP ");
+ if(s & SDONE) SPRINTF("SDONE ");
+ if(s & SPIORDY) SPRINTF("SPIORDY ");
+ if(s & DMADONE) SPRINTF("DMADONE ");
+
+ s=GETPORT(SSTAT1) & GETPORT(SIMODE1);
+
+ if(s & SELTO) SPRINTF("SELTO ");
+ if(s & ATNTARG) SPRINTF("ATNTARG ");
+ if(s & SCSIRSTI) SPRINTF("SCSIRSTI ");
+ if(s & PHASEMIS) SPRINTF("PHASEMIS ");
+ if(s & BUSFREE) SPRINTF("BUSFREE ");
+ if(s & SCSIPERR) SPRINTF("SCSIPERR ");
+ if(s & PHASECHG) SPRINTF("PHASECHG ");
+ if(s & REQINIT) SPRINTF("REQINIT ");
+ SPRINTF("); ");
+
+ SPRINTF("SXFRCTL0 (");
+
+ s=GETPORT(SXFRCTL0);
+ if(s & SCSIEN) SPRINTF("SCSIEN ");
+ if(s & DMAEN) SPRINTF("DMAEN ");
+ if(s & CH1) SPRINTF("CH1 ");
+ if(s & CLRSTCNT) SPRINTF("CLRSTCNT ");
+ if(s & SPIOEN) SPRINTF("SPIOEN ");
+ if(s & CLRCH1) SPRINTF("CLRCH1 ");
+ SPRINTF("); ");
+
+ SPRINTF("SIGNAL (");
+
+ s=GETPORT(SCSISIG);
+ if(s & ATNI) SPRINTF("ATNI ");
+ if(s & SELI) SPRINTF("SELI ");
+ if(s & BSYI) SPRINTF("BSYI ");
+ if(s & REQI) SPRINTF("REQI ");
+ if(s & ACKI) SPRINTF("ACKI ");
+ SPRINTF("); ");
+
+ SPRINTF("SELID (%02x), ", GETPORT(SELID));
+
+ SPRINTF("SSTAT2 (");
+
+ s=GETPORT(SSTAT2);
+ if(s & SOFFSET) SPRINTF("SOFFSET ");
+ if(s & SEMPTY) SPRINTF("SEMPTY ");
+ if(s & SFULL) SPRINTF("SFULL ");
+ SPRINTF("); SFCNT (%d); ", s & (SFULL|SFCNT));
+
+ s=GETPORT(SSTAT3);
+ SPRINTF("SCSICNT (%d), OFFCNT(%d), ", (s&0xf0)>>4, s&0x0f);
+
+ SPRINTF("SSTAT4 (");
+ s=GETPORT(SSTAT4);
+ if(s & SYNCERR) SPRINTF("SYNCERR ");
+ if(s & FWERR) SPRINTF("FWERR ");
+ if(s & FRERR) SPRINTF("FRERR ");
+ SPRINTF("); ");
+
+ SPRINTF("DMACNTRL0 (");
+ s=GETPORT(DMACNTRL0);
+ SPRINTF("%s ", s & _8BIT ? "8BIT" : "16BIT");
+ SPRINTF("%s ", s & DMA ? "DMA" : "PIO" );
+ SPRINTF("%s ", s & WRITE_READ ? "WRITE" : "READ" );
+ if(s & ENDMA) SPRINTF("ENDMA ");
+ if(s & INTEN) SPRINTF("INTEN ");
+ if(s & RSTFIFO) SPRINTF("RSTFIFO ");
+ if(s & SWINT) SPRINTF("SWINT ");
+ SPRINTF("); ");
+
+ SPRINTF("DMASTAT (");
+ s=GETPORT(DMASTAT);
+ if(s & ATDONE) SPRINTF("ATDONE ");
+ if(s & WORDRDY) SPRINTF("WORDRDY ");
+ if(s & DFIFOFULL) SPRINTF("DFIFOFULL ");
+ if(s & DFIFOEMP) SPRINTF("DFIFOEMP ");
+ SPRINTF(")\n\n");
+
+ SPRINTF("enabled interrupts (");
+
+ s=GETPORT(SIMODE0);
+ if(s & ENSELDO) SPRINTF("ENSELDO ");
+ if(s & ENSELDI) SPRINTF("ENSELDI ");
+ if(s & ENSELINGO) SPRINTF("ENSELINGO ");
+ if(s & ENSWRAP) SPRINTF("ENSWRAP ");
+ if(s & ENSDONE) SPRINTF("ENSDONE ");
+ if(s & ENSPIORDY) SPRINTF("ENSPIORDY ");
+ if(s & ENDMADONE) SPRINTF("ENDMADONE ");
+
+ s=GETPORT(SIMODE1);
+ if(s & ENSELTIMO) SPRINTF("ENSELTIMO ");
+ if(s & ENATNTARG) SPRINTF("ENATNTARG ");
+ if(s & ENPHASEMIS) SPRINTF("ENPHASEMIS ");
+ if(s & ENBUSFREE) SPRINTF("ENBUSFREE ");
+ if(s & ENSCSIPERR) SPRINTF("ENSCSIPERR ");
+ if(s & ENPHASECHG) SPRINTF("ENPHASECHG ");
+ if(s & ENREQINIT) SPRINTF("ENREQINIT ");
+ SPRINTF(")\n");
+
+ return (pos-start);
+}
+
+#undef SPRINTF
+#define SPRINTF(args...) do { if(pos < buffer + length) pos += sprintf(pos, ## args); } while(0)
+
+int aha152x_proc_info(char *buffer, char **start,
+ off_t offset, int length, int hostno, int inout)
+{
+ int i;
+ char *pos = buffer;
+ struct Scsi_Host *shpnt;
+ unsigned long flags;
+ Scsi_Cmnd *ptr;
+
+ for(i=0, shpnt= (struct Scsi_Host *) NULL; i<IRQS; i++)
+ if(aha152x_host[i] && aha152x_host[i]->host_no == hostno)
+ shpnt=aha152x_host[i];
+
+ if(!shpnt)
+ return(-ESRCH);
+
+ if(inout) /* Has data been written to the file ? */
+ return(aha152x_set_info(buffer, length, shpnt));
+
+ SPRINTF(AHA152X_REVID "\n");
+
+ save_flags(flags);
+ cli();
+
+ SPRINTF("ioports 0x%04x to 0x%04x\n",
+ shpnt->io_port, shpnt->io_port+shpnt->n_io_port-1);
+ SPRINTF("interrupt 0x%02x\n", shpnt->irq);
+ SPRINTF("disconnection/reconnection %s\n",
+ HOSTDATA(shpnt)->reconnect ? "enabled" : "disabled");
+ SPRINTF("parity checking %s\n",
+ HOSTDATA(shpnt)->parity ? "enabled" : "disabled");
+ SPRINTF("synchronous transfers %s\n",
+ HOSTDATA(shpnt)->synchronous ? "enabled" : "disabled");
+ SPRINTF("%d commands currently queued\n", HOSTDATA(shpnt)->commands);
+
+ if(HOSTDATA(shpnt)->synchronous) {
+#if 0
+ SPRINTF("synchronously operating targets (tick=%ld ns):\n",
+ 250000000/loops_per_sec);
+ for(i=0; i<8; i++)
+ if(HOSTDATA(shpnt)->syncrate[i]&0x7f)
+ SPRINTF("target %d: period %dT/%ldns; req/ack offset %d\n",
+ i,
+ (((HOSTDATA(shpnt)->syncrate[i]&0x70)>>4)+2),
+ (((HOSTDATA(shpnt)->syncrate[i]&0x70)>>4)+2)*
+ 250000000/loops_per_sec,
+ HOSTDATA(shpnt)->syncrate[i]&0x0f);
+#else
+ SPRINTF("synchronously operating targets (tick=50 ns):\n");
+ for(i=0; i<8; i++)
+ if(HOSTDATA(shpnt)->syncrate[i]&0x7f)
+ SPRINTF("target %d: period %dT/%dns; req/ack offset %d\n",
+ i,
+ (((HOSTDATA(shpnt)->syncrate[i]&0x70)>>4)+2),
+ (((HOSTDATA(shpnt)->syncrate[i]&0x70)>>4)+2)*50,
+ HOSTDATA(shpnt)->syncrate[i]&0x0f);
+#endif
+ }
+
+#ifdef DEBUG_AHA152X
+#define PDEBUG(flags,txt) if(HOSTDATA(shpnt)->debug & flags) SPRINTF("(%s) ", txt);
+
+ SPRINTF("enabled debugging options: ");
+
+ PDEBUG(debug_skipports, "skip ports");
+ PDEBUG(debug_queue, "queue");
+ PDEBUG(debug_intr, "interrupt");
+ PDEBUG(debug_selection, "selection");
+ PDEBUG(debug_msgo, "message out");
+ PDEBUG(debug_msgi, "message in");
+ PDEBUG(debug_status, "status");
+ PDEBUG(debug_cmd, "command");
+ PDEBUG(debug_datai, "data in");
+ PDEBUG(debug_datao, "data out");
+ PDEBUG(debug_abort, "abort");
+ PDEBUG(debug_done, "done");
+ PDEBUG(debug_biosparam, "bios parameters");
+ PDEBUG(debug_phases, "phases");
+ PDEBUG(debug_queues, "queues");
+ PDEBUG(debug_reset, "reset");
+
+ SPRINTF("\n");
+#endif
+
+ SPRINTF("\nqueue status:\n");
+ if(ISSUE_SC) {
+ SPRINTF("not yet issued commands:\n");
+ for(ptr=ISSUE_SC; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble)
+ pos += get_command(pos, ptr);
+ } else
+ SPRINTF("no not yet issued commands\n");
+
+ if(CURRENT_SC) {
+ SPRINTF("current command:\n");
+ pos += get_command(pos, CURRENT_SC);
+ } else
+ SPRINTF("no current command\n");
+
+ if(DISCONNECTED_SC) {
+ SPRINTF("disconnected commands:\n");
+ for(ptr=DISCONNECTED_SC; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble)
+ pos += get_command(pos, ptr);
+ } else
+ SPRINTF("no disconnected commands\n");
+
+ restore_flags(flags);
+
+ pos += get_ports(shpnt, pos);
+
+ *start=buffer+offset;
+ if (pos - buffer < offset)
+ return 0;
+ else if (pos - buffer - offset < length)
+ return pos - buffer - offset;
+ else
+ return length;
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = AHA152X;
+
+#include "scsi_module.c"
+#endif
diff --git a/linux/src/drivers/scsi/aha152x.h b/linux/src/drivers/scsi/aha152x.h
new file mode 100644
index 0000000..ca1a202
--- /dev/null
+++ b/linux/src/drivers/scsi/aha152x.h
@@ -0,0 +1,357 @@
+#ifndef _AHA152X_H
+#define _AHA152X_H
+
+/*
+ * $Id: aha152x.h,v 1.1 1999/04/26 05:54:10 tb Exp $
+ */
+
+#if defined(__KERNEL__)
+
+#include <linux/blk.h>
+#include "scsi.h"
+#include <asm/io.h>
+
+int aha152x_detect(Scsi_Host_Template *);
+int aha152x_command(Scsi_Cmnd *);
+int aha152x_queue(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int aha152x_abort(Scsi_Cmnd *);
+int aha152x_reset(Scsi_Cmnd *, unsigned int);
+int aha152x_biosparam(Disk *, kdev_t, int*);
+int aha152x_proc_info(char *buffer, char **start, off_t offset, int length, int hostno, int inout);
+
+/* number of queueable commands
+ (unless we support more than 1 cmd_per_lun this should do) */
+#define AHA152X_MAXQUEUE 7
+
+#define AHA152X_REVID "Adaptec 152x SCSI driver; $Revision: 1.1 $"
+
+extern struct proc_dir_entry proc_scsi_aha152x;
+
+/* Initial value of Scsi_Host entry */
+#define AHA152X { /* next */ 0, \
+ /* usage_count */ 0, \
+ /* proc_dir */ &proc_scsi_aha152x, \
+ /* proc_info */ aha152x_proc_info, \
+ /* name */ AHA152X_REVID, \
+ /* detect */ aha152x_detect, \
+ /* release */ 0, \
+ /* info */ 0, \
+ /* command */ aha152x_command, \
+ /* queuecommand */ aha152x_queue, \
+ /* abort */ aha152x_abort, \
+ /* reset */ aha152x_reset, \
+ /* slave_attach */ 0, \
+ /* bios_param */ aha152x_biosparam, \
+ /* can_queue */ 1, \
+ /* this_id */ 7, \
+ /* sg_tablesize */ SG_ALL, \
+ /* cmd_per_lun */ 1, \
+ /* present */ 0, \
+ /* unchecked_isa_dma */ 0, \
+ /* use_clustering */ DISABLE_CLUSTERING }
+#endif
+
+
+/* port addresses */
+#define SCSISEQ (shpnt->io_port+0x00) /* SCSI sequence control */
+#define SXFRCTL0 (shpnt->io_port+0x01) /* SCSI transfer control 0 */
+#define SXFRCTL1 (shpnt->io_port+0x02) /* SCSI transfer control 1 */
+#define SCSISIG (shpnt->io_port+0x03) /* SCSI signal in/out */
+#define SCSIRATE (shpnt->io_port+0x04) /* SCSI rate control */
+#define SELID (shpnt->io_port+0x05) /* selection/reselection ID */
+#define SCSIID SELID /* SCSI ID */
+#define SCSIDAT (shpnt->io_port+0x06) /* SCSI latched data */
+#define SCSIBUS (shpnt->io_port+0x07) /* SCSI data bus */
+#define STCNT0 (shpnt->io_port+0x08) /* SCSI transfer count 0 */
+#define STCNT1 (shpnt->io_port+0x09) /* SCSI transfer count 1 */
+#define STCNT2 (shpnt->io_port+0x0a) /* SCSI transfer count 2 */
+#define SSTAT0 (shpnt->io_port+0x0b) /* SCSI interrupt status 0 */
+#define SSTAT1 (shpnt->io_port+0x0c) /* SCSI interrupt status 1 */
+#define SSTAT2 (shpnt->io_port+0x0d) /* SCSI interrupt status 2 */
+#define SCSITEST (shpnt->io_port+0x0e) /* SCSI test control */
+#define SSTAT3 SCSITEST /* SCSI interrupt status 3 */
+#define SSTAT4 (shpnt->io_port+0x0f) /* SCSI status 4 */
+#define SIMODE0 (shpnt->io_port+0x10) /* SCSI interrupt mode 0 */
+#define SIMODE1 (shpnt->io_port+0x11) /* SCSI interrupt mode 1 */
+#define DMACNTRL0 (shpnt->io_port+0x12) /* DMA control 0 */
+#define DMACNTRL1 (shpnt->io_port+0x13) /* DMA control 1 */
+#define DMASTAT (shpnt->io_port+0x14) /* DMA status */
+#define FIFOSTAT (shpnt->io_port+0x15) /* FIFO status */
+#define DATAPORT (shpnt->io_port+0x16) /* DATA port */
+#define BRSTCNTRL (shpnt->io_port+0x18) /* burst control */
+#define PORTA (shpnt->io_port+0x1a) /* PORT A */
+#define PORTB (shpnt->io_port+0x1b) /* PORT B */
+#define REV (shpnt->io_port+0x1c) /* revision */
+#define STACK (shpnt->io_port+0x1d) /* stack */
+#define TEST (shpnt->io_port+0x1e) /* test register */
+
+/* used in aha152x_porttest */
+#define O_PORTA 0x1a /* PORT A */
+#define O_PORTB 0x1b /* PORT B */
+#define O_DMACNTRL1 0x13 /* DMA control 1 */
+#define O_STACK 0x1d /* stack */
+#define IO_RANGE 0x20
+
+/* bits and bitmasks to ports */
+
+/* SCSI sequence control */
+#define TEMODEO 0x80
+#define ENSELO 0x40
+#define ENSELI 0x20
+#define ENRESELI 0x10
+#define ENAUTOATNO 0x08
+#define ENAUTOATNI 0x04
+#define ENAUTOATNP 0x02
+#define SCSIRSTO 0x01
+
+/* SCSI transfer control 0 */
+#define SCSIEN 0x80
+#define DMAEN 0x40
+#define CH1 0x20
+#define CLRSTCNT 0x10
+#define SPIOEN 0x08
+#define CLRCH1 0x02
+
+/* SCSI transfer control 1 */
+#define BITBUCKET 0x80
+#define SWRAPEN 0x40
+#define ENSPCHK 0x20
+#define STIMESEL 0x18 /* mask */
+#define STIMESEL_ 3
+#define ENSTIMER 0x04
+#define BYTEALIGN 0x02
+
+/* SCSI signal IN */
+#define CDI 0x80
+#define IOI 0x40
+#define MSGI 0x20
+#define ATNI 0x10
+#define SELI 0x08
+#define BSYI 0x04
+#define REQI 0x02
+#define ACKI 0x01
+
+/* SCSI Phases */
+#define P_MASK (MSGI|CDI|IOI)
+#define P_DATAO (0)
+#define P_DATAI (IOI)
+#define P_CMD (CDI)
+#define P_STATUS (CDI|IOI)
+#define P_MSGO (MSGI|CDI)
+#define P_MSGI (MSGI|CDI|IOI)
+
+/* SCSI signal OUT */
+#define CDO 0x80
+#define IOO 0x40
+#define MSGO 0x20
+#define ATNO 0x10
+#define SELO 0x08
+#define BSYO 0x04
+#define REQO 0x02
+#define ACKO 0x01
+
+/* SCSI rate control */
+#define SXFR 0x70 /* mask */
+#define SXFR_ 4
+#define SOFS 0x0f /* mask */
+
+/* SCSI ID */
+#define OID 0x70
+#define OID_ 4
+#define TID 0x07
+
+/* SCSI transfer count */
+#define GETSTCNT() ( (GETPORT(STCNT2)<<16) \
+ + (GETPORT(STCNT1)<< 8) \
+ + GETPORT(STCNT0) )
+
+#define SETSTCNT(X) { SETPORT(STCNT2, ((X) & 0xFF0000) >> 16); \
+ SETPORT(STCNT1, ((X) & 0x00FF00) >> 8); \
+ SETPORT(STCNT0, ((X) & 0x0000FF) ); }
+
+/* SCSI interrupt status */
+#define TARGET 0x80
+#define SELDO 0x40
+#define SELDI 0x20
+#define SELINGO 0x10
+#define SWRAP 0x08
+#define SDONE 0x04
+#define SPIORDY 0x02
+#define DMADONE 0x01
+
+#define SETSDONE 0x80
+#define CLRSELDO 0x40
+#define CLRSELDI 0x20
+#define CLRSELINGO 0x10
+#define CLRSWRAP 0x08
+#define CLRSDONE 0x04
+#define CLRSPIORDY 0x02
+#define CLRDMADONE 0x01
+
+/* SCSI status 1 */
+#define SELTO 0x80
+#define ATNTARG 0x40
+#define SCSIRSTI 0x20
+#define PHASEMIS 0x10
+#define BUSFREE 0x08
+#define SCSIPERR 0x04
+#define PHASECHG 0x02
+#define REQINIT 0x01
+
+#define CLRSELTIMO 0x80
+#define CLRATNO 0x40
+#define CLRSCSIRSTI 0x20
+#define CLRBUSFREE 0x08
+#define CLRSCSIPERR 0x04
+#define CLRPHASECHG 0x02
+#define CLRREQINIT 0x01
+
+/* SCSI status 2 */
+#define SOFFSET 0x20
+#define SEMPTY 0x10
+#define SFULL 0x08
+#define SFCNT 0x07 /* mask */
+
+/* SCSI status 3 */
+#define SCSICNT 0xf0 /* mask */
+#define SCSICNT_ 4
+#define OFFCNT 0x0f /* mask */
+
+/* SCSI TEST control */
+#define SCTESTU 0x08
+#define SCTESTD 0x04
+#define STCTEST 0x01
+
+/* SCSI status 4 */
+#define SYNCERR 0x04
+#define FWERR 0x02
+#define FRERR 0x01
+
+#define CLRSYNCERR 0x04
+#define CLRFWERR 0x02
+#define CLRFRERR 0x01
+
+/* SCSI interrupt mode 0 */
+#define ENSELDO 0x40
+#define ENSELDI 0x20
+#define ENSELINGO 0x10
+#define ENSWRAP 0x08
+#define ENSDONE 0x04
+#define ENSPIORDY 0x02
+#define ENDMADONE 0x01
+
+/* SCSI interrupt mode 1 */
+#define ENSELTIMO 0x80
+#define ENATNTARG 0x40
+#define ENSCSIRST 0x20
+#define ENPHASEMIS 0x10
+#define ENBUSFREE 0x08
+#define ENSCSIPERR 0x04
+#define ENPHASECHG 0x02
+#define ENREQINIT 0x01
+
+/* DMA control 0 */
+#define ENDMA 0x80
+#define _8BIT 0x40
+#define DMA 0x20
+#define WRITE_READ 0x08
+#define INTEN 0x04
+#define RSTFIFO 0x02
+#define SWINT 0x01
+
+/* DMA control 1 */
+#define PWRDWN 0x80
+#define STK 0x07 /* mask */
+
+/* DMA status */
+#define ATDONE 0x80
+#define WORDRDY 0x40
+#define INTSTAT 0x20
+#define DFIFOFULL 0x10
+#define DFIFOEMP 0x08
+
+/* BURST control */
+#define BON 0xf0
+#define BOFF 0x0f
+
+/* TEST REGISTER */
+#define BOFFTMR 0x40
+#define BONTMR 0x20
+#define STCNTH 0x10
+#define STCNTM 0x08
+#define STCNTL 0x04
+#define SCSIBLK 0x02
+#define DMABLK 0x01
+
+/* On the AHA-152x board PORTA and PORTB contain
+ some information about the board's configuration. */
+typedef union {
+ struct {
+ unsigned reserved:2; /* reserved */
+ unsigned tardisc:1; /* Target disconnect: 0=disabled, 1=enabled */
+ unsigned syncneg:1; /* Initial sync neg: 0=disabled, 1=enabled */
+ unsigned msgclasses:2; /* Message classes
+ 0=#4
+ 1=#0, #1, #2, #3, #4
+ 2=#0, #3, #4
+ 3=#0, #4
+ */
+ unsigned boot:1; /* boot: 0=disabled, 1=enabled */
+ unsigned dma:1; /* Transfer mode: 0=PIO; 1=DMA */
+ unsigned id:3; /* SCSI-id */
+ unsigned irq:2; /* IRQ-Channel: 0,3=12, 1=10, 2=11 */
+ unsigned dmachan:2; /* DMA-Channel: 0=0, 1=5, 2=6, 3=7 */
+ unsigned parity:1; /* SCSI-parity: 1=enabled 0=disabled */
+ } fields;
+ unsigned short port;
+} aha152x_config ;
+
+#define cf_parity fields.parity
+#define cf_dmachan fields.dmachan
+#define cf_irq fields.irq
+#define cf_id fields.id
+#define cf_dma fields.dma
+#define cf_boot fields.boot
+#define cf_msgclasses fields.msgclasses
+#define cf_syncneg fields.syncneg
+#define cf_tardisc fields.tardisc
+#define cf_port port
+
+/* Some macros to manipulate ports and their bits */
+
+#define SETPORT(PORT, VAL) outb( (VAL), (PORT) )
+#define SETPORTP(PORT, VAL) outb_p( (VAL), (PORT) )
+#define SETPORTW(PORT, VAL) outw( (VAL), (PORT) )
+
+#define GETPORT(PORT) inb( PORT )
+#define GETPORTW(PORT) inw( PORT )
+
+#define SETBITS(PORT, BITS) outb( (inb(PORT) | (BITS)), (PORT) )
+#define CLRBITS(PORT, BITS) outb( (inb(PORT) & ~(BITS)), (PORT) )
+#define CLRSETBITS(PORT, CLR, SET) outb( (inb(PORT) & ~(CLR)) | (SET) , (PORT) )
+
+#define TESTHI(PORT, BITS) ((inb(PORT) & (BITS)) == BITS)
+#define TESTLO(PORT, BITS) ((inb(PORT) & (BITS)) == 0)
+
+#ifdef DEBUG_AHA152X
+enum {
+ debug_skipports = 0x0001,
+ debug_queue = 0x0002,
+ debug_intr = 0x0004,
+ debug_selection = 0x0008,
+ debug_msgo = 0x0010,
+ debug_msgi = 0x0020,
+ debug_status = 0x0040,
+ debug_cmd = 0x0080,
+ debug_datai = 0x0100,
+ debug_datao = 0x0200,
+ debug_abort = 0x0400,
+ debug_done = 0x0800,
+ debug_biosparam = 0x1000,
+ debug_phases = 0x2000,
+ debug_queues = 0x4000,
+ debug_reset = 0x8000,
+};
+#endif
+
+#endif /* _AHA152X_H */
diff --git a/linux/src/drivers/scsi/aha1542.c b/linux/src/drivers/scsi/aha1542.c
new file mode 100644
index 0000000..cc27e5c
--- /dev/null
+++ b/linux/src/drivers/scsi/aha1542.c
@@ -0,0 +1,1325 @@
+/* $Id: aha1542.c,v 1.1 1999/04/26 05:54:11 tb Exp $
+ * linux/kernel/aha1542.c
+ *
+ * Copyright (C) 1992 Tommy Thorn
+ * Copyright (C) 1993, 1994, 1995 Eric Youngdale
+ *
+ * Modified by Eric Youngdale
+ * Use request_irq and request_dma to help prevent unexpected conflicts
+ * Set up on-board DMA controller, such that we do not have to
+ * have the bios enabled to use the aha1542.
+ * Modified by David Gentzel
+ * Don't call request_dma if dma mask is 0 (for BusLogic BT-445S VL-Bus
+ * controller).
+ * Modified by Matti Aarnio
+ * Accept parameters from LILO cmd-line. -- 1-Oct-94
+ * Modified by Mike McLagan <mike.mclagan@linux.org>
+ * Recognise extended mode on AHA1542CP, different bit than 1542CF
+ * 1-Jan-97
+ * Modified by Bjorn L. Thordarson and Einar Thor Einarsson
+ * Recognize that DMA0 is valid DMA channel -- 13-Jul-98
+ */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/head.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/proc_fs.h>
+#include <asm/dma.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+
+
+#include "aha1542.h"
+
+#include<linux/stat.h>
+
+struct proc_dir_entry proc_scsi_aha1542 = {
+ PROC_SCSI_AHA1542, 7, "aha1542",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+#ifdef DEBUG
+#define DEB(x) x
+#else
+#define DEB(x)
+#endif
+/*
+static const char RCSid[] = "$Header: cvs/gnumach/linux/src/drivers/scsi/Attic/aha1542.c,v 1.1 1999/04/26 05:54:11 tb Exp $";
+*/
+
+/* The adaptec can be configured for quite a number of addresses, but
+I generally do not want the card poking around at random. We allow
+two addresses - this allows people to use the Adaptec with a Midi
+card, which also used 0x330 -- can be overridden with LILO! */
+
+#define MAXBOARDS 2 /* Increase this and the sizes of the
+ arrays below, if you need more.. */
+
+static unsigned int bases[MAXBOARDS]={0x330, 0x334};
+
+/* set by aha1542_setup according to the command line */
+static int setup_called[MAXBOARDS] = {0,0};
+static int setup_buson[MAXBOARDS] = {0,0};
+static int setup_busoff[MAXBOARDS] = {0,0};
+static int setup_dmaspeed[MAXBOARDS] = {-1,-1};
+
+static char *setup_str[MAXBOARDS] = {(char *)NULL,(char *)NULL};
+
+/*
+ * LILO params: aha1542=<PORTBASE>[,<BUSON>,<BUSOFF>[,<DMASPEED>]]
+ *
+ * Where: <PORTBASE> is any of the valid AHA addresses:
+ * 0x130, 0x134, 0x230, 0x234, 0x330, 0x334
+ * <BUSON> is the time (in microsecs) that AHA spends on the AT-bus
+ * when transferring data. 1542A power-on default is 11us,
+ * valid values are in range: 2..15 (decimal)
+ * <BUSOFF> is the time that AHA spends OFF THE BUS after while
+ * it is transferring data (not to monopolize the bus).
+ * Power-on default is 4us, valid range: 1..64 microseconds.
+ * <DMASPEED> Default is jumper selected (1542A: on the J1),
+ * but experimenter can alter it with this.
+ * Valid values: 5, 6, 7, 8, 10 (MB/s)
+ * Factory default is 5 MB/s.
+ */
+
+#define BIOS_TRANSLATION_1632 0 /* Used by some old 1542A boards */
+#define BIOS_TRANSLATION_6432 1 /* Default case these days */
+#define BIOS_TRANSLATION_25563 2 /* Big disk case */
+
+struct aha1542_hostdata{
+ /* This will effectively start both of them at the first mailbox */
+ int bios_translation; /* Mapping bios uses - for compatibility */
+ int aha1542_last_mbi_used;
+ int aha1542_last_mbo_used;
+ Scsi_Cmnd * SCint[AHA1542_MAILBOXES];
+ struct mailbox mb[2*AHA1542_MAILBOXES];
+ struct ccb ccb[AHA1542_MAILBOXES];
+};
+
+#define HOSTDATA(host) ((struct aha1542_hostdata *) &host->hostdata)
+
+static struct Scsi_Host * aha_host[7] = {NULL,}; /* One for each IRQ level (9-15) */
+
+
+
+
+#define WAITnexttimeout 3000000
+
+static void setup_mailboxes(int base_io, struct Scsi_Host * shpnt);
+static int aha1542_restart(struct Scsi_Host * shost);
+
+#define aha1542_intr_reset(base) outb(IRST, CONTROL(base))
+
+#define WAIT(port, mask, allof, noneof) \
+ { register int WAITbits; \
+ register int WAITtimeout = WAITnexttimeout; \
+ while (1) { \
+ WAITbits = inb(port) & (mask); \
+ if ((WAITbits & (allof)) == (allof) && ((WAITbits & (noneof)) == 0)) \
+ break; \
+ if (--WAITtimeout == 0) goto fail; \
+ } \
+ }
+
+/* Similar to WAIT, except we use the udelay call to regulate the
+ amount of time we wait. */
+#define WAITd(port, mask, allof, noneof, timeout) \
+ { register int WAITbits; \
+ register int WAITtimeout = timeout; \
+ while (1) { \
+ WAITbits = inb(port) & (mask); \
+ if ((WAITbits & (allof)) == (allof) && ((WAITbits & (noneof)) == 0)) \
+ break; \
+ udelay(1000); \
+ if (--WAITtimeout == 0) goto fail; \
+ } \
+ }
+
+static void aha1542_stat(void)
+{
+/* int s = inb(STATUS), i = inb(INTRFLAGS);
+ printk("status=%x intrflags=%x\n", s, i, WAITnexttimeout-WAITtimeout); */
+}
+
+/* This is a bit complicated, but we need to make sure that an interrupt
+ routine does not send something out while we are in the middle of this.
+ Fortunately, it is only at boot time that multi-byte messages
+ are ever sent. */
+static int aha1542_out(unsigned int base, unchar *cmdp, int len)
+{
+ unsigned long flags = 0;
+
+ save_flags(flags);
+ if(len == 1) {
+ while(1==1){
+ WAIT(STATUS(base), CDF, 0, CDF);
+ cli();
+ if(inb(STATUS(base)) & CDF) {restore_flags(flags); continue;}
+ outb(*cmdp, DATA(base));
+ restore_flags(flags);
+ return 0;
+ }
+ } else {
+ cli();
+ while (len--)
+ {
+ WAIT(STATUS(base), CDF, 0, CDF);
+ outb(*cmdp++, DATA(base));
+ }
+ restore_flags(flags);
+ }
+ return 0;
+ fail:
+ restore_flags(flags);
+ printk("aha1542_out failed(%d): ", len+1); aha1542_stat();
+ return 1;
+}
+
+/* Only used at boot time, so we do not need to worry about latency as much
+ here */
+static int aha1542_in(unsigned int base, unchar *cmdp, int len)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ while (len--)
+ {
+ WAIT(STATUS(base), DF, DF, 0);
+ *cmdp++ = inb(DATA(base));
+ }
+ restore_flags(flags);
+ return 0;
+ fail:
+ restore_flags(flags);
+ printk("aha1542_in failed(%d): ", len+1); aha1542_stat();
+ return 1;
+}
+
+/* Similar to aha1542_in, except that we wait a very short period of time.
+ We use this if we know the board is alive and awake, but we are not sure
+ if the board will respond to the command we are about to send or not */
+static int aha1542_in1(unsigned int base, unchar *cmdp, int len)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ while (len--)
+ {
+ WAITd(STATUS(base), DF, DF, 0, 100);
+ *cmdp++ = inb(DATA(base));
+ }
+ restore_flags(flags);
+ return 0;
+ fail:
+ restore_flags(flags);
+ return 1;
+}
+
+static int makecode(unsigned hosterr, unsigned scsierr)
+{
+ switch (hosterr) {
+ case 0x0:
+ case 0xa: /* Linked command complete without error and linked normally */
+ case 0xb: /* Linked command complete without error, interrupt generated */
+ hosterr = 0;
+ break;
+
+ case 0x11: /* Selection time out-The initiator selection or target
+ reselection was not complete within the SCSI Time out period */
+ hosterr = DID_TIME_OUT;
+ break;
+
+ case 0x12: /* Data overrun/underrun-The target attempted to transfer more data
+ than was allocated by the Data Length field or the sum of the
+ Scatter / Gather Data Length fields. */
+
+ case 0x13: /* Unexpected bus free-The target dropped the SCSI BSY at an unexpected time. */
+
+ case 0x15: /* MBO command was not 00, 01 or 02-The first byte of the CB was
+ invalid. This usually indicates a software failure. */
+
+ case 0x16: /* Invalid CCB Operation Code-The first byte of the CCB was invalid.
+ This usually indicates a software failure. */
+
+ case 0x17: /* Linked CCB does not have the same LUN-A subsequent CCB of a set
+ of linked CCB's does not specify the same logical unit number as
+ the first. */
+ case 0x18: /* Invalid Target Direction received from Host-The direction of a
+ Target Mode CCB was invalid. */
+
+ case 0x19: /* Duplicate CCB Received in Target Mode-More than once CCB was
+ received to service data transfer between the same target LUN
+ and initiator SCSI ID in the same direction. */
+
+ case 0x1a: /* Invalid CCB or Segment List Parameter-A segment list with a zero
+ length segment or invalid segment list boundaries was received.
+ A CCB parameter was invalid. */
+ DEB(printk("Aha1542: %x %x\n", hosterr, scsierr));
+ hosterr = DID_ERROR; /* Couldn't find any better */
+ break;
+
+ case 0x14: /* Target bus phase sequence failure-An invalid bus phase or bus
+ phase sequence was requested by the target. The host adapter
+ will generate a SCSI Reset Condition, notifying the host with
+ a SCRD interrupt */
+ hosterr = DID_RESET;
+ break;
+ default:
+ printk("makecode: unknown hoststatus %x\n", hosterr);
+ break;
+ }
+ return scsierr|(hosterr << 16);
+}
+
+static int aha1542_test_port(int bse, struct Scsi_Host * shpnt)
+{
+ int i;
+ unchar inquiry_cmd[] = {CMD_INQUIRY };
+ unchar inquiry_result[4];
+ unchar *cmdp;
+ int len;
+ volatile int debug = 0;
+
+ /* Quick and dirty test for presence of the card. */
+ if(inb(STATUS(bse)) == 0xff) return 0;
+
+ /* Reset the adapter. I ought to make a hard reset, but it's not really necessary */
+
+ /* DEB(printk("aha1542_test_port called \n")); */
+
+ /* In case some other card was probing here, reset interrupts */
+ aha1542_intr_reset(bse); /* reset interrupts, so they don't block */
+
+ outb(SRST|IRST/*|SCRST*/, CONTROL(bse));
+
+ i = jiffies + 2;
+ while (i>jiffies); /* Wait a little bit for things to settle down. */
+
+ debug = 1;
+ /* Expect INIT and IDLE, any of the others are bad */
+ WAIT(STATUS(bse), STATMASK, INIT|IDLE, STST|DIAGF|INVDCMD|DF|CDF);
+
+ debug = 2;
+ /* Shouldn't have generated any interrupts during reset */
+ if (inb(INTRFLAGS(bse))&INTRMASK) goto fail;
+
+
+ /* Perform a host adapter inquiry instead so we do not need to set
+ up the mailboxes ahead of time */
+
+ aha1542_out(bse, inquiry_cmd, 1);
+
+ debug = 3;
+ len = 4;
+ cmdp = &inquiry_result[0];
+
+ while (len--)
+ {
+ WAIT(STATUS(bse), DF, DF, 0);
+ *cmdp++ = inb(DATA(bse));
+ }
+
+ debug = 8;
+ /* Reading port should reset DF */
+ if (inb(STATUS(bse)) & DF) goto fail;
+
+ debug = 9;
+ /* When HACC, command is completed, and we're though testing */
+ WAIT(INTRFLAGS(bse), HACC, HACC, 0);
+ /* now initialize adapter */
+
+ debug = 10;
+ /* Clear interrupts */
+ outb(IRST, CONTROL(bse));
+
+ debug = 11;
+
+ return debug; /* 1 = ok */
+ fail:
+ return 0; /* 0 = not ok */
+}
+
+/* A "high" level interrupt handler */
+static void aha1542_intr_handle(int irq, void *dev_id, struct pt_regs *regs)
+{
+ void (*my_done)(Scsi_Cmnd *) = NULL;
+ int errstatus, mbi, mbo, mbistatus;
+ int number_serviced;
+ unsigned long flags;
+ struct Scsi_Host * shost;
+ Scsi_Cmnd * SCtmp;
+ int flag;
+ int needs_restart;
+ struct mailbox * mb;
+ struct ccb *ccb;
+
+ shost = aha_host[irq - 9];
+ if(!shost) panic("Splunge!");
+
+ mb = HOSTDATA(shost)->mb;
+ ccb = HOSTDATA(shost)->ccb;
+
+#ifdef DEBUG
+ {
+ flag = inb(INTRFLAGS(shost->io_port));
+ printk("aha1542_intr_handle: ");
+ if (!(flag&ANYINTR)) printk("no interrupt?");
+ if (flag&MBIF) printk("MBIF ");
+ if (flag&MBOA) printk("MBOF ");
+ if (flag&HACC) printk("HACC ");
+ if (flag&SCRD) printk("SCRD ");
+ printk("status %02x\n", inb(STATUS(shost->io_port)));
+ };
+#endif
+ number_serviced = 0;
+ needs_restart = 0;
+
+ while(1==1){
+ flag = inb(INTRFLAGS(shost->io_port));
+
+ /* Check for unusual interrupts. If any of these happen, we should
+ probably do something special, but for now just printing a message
+ is sufficient. A SCSI reset detected is something that we really
+ need to deal with in some way. */
+ if (flag & ~MBIF) {
+ if (flag&MBOA) printk("MBOF ");
+ if (flag&HACC) printk("HACC ");
+ if (flag&SCRD) {
+ needs_restart = 1;
+ printk("SCRD ");
+ }
+ }
+
+ aha1542_intr_reset(shost->io_port);
+
+ save_flags(flags);
+ cli();
+ mbi = HOSTDATA(shost)->aha1542_last_mbi_used + 1;
+ if (mbi >= 2*AHA1542_MAILBOXES) mbi = AHA1542_MAILBOXES;
+
+ do{
+ if(mb[mbi].status != 0) break;
+ mbi++;
+ if (mbi >= 2*AHA1542_MAILBOXES) mbi = AHA1542_MAILBOXES;
+ } while (mbi != HOSTDATA(shost)->aha1542_last_mbi_used);
+
+ if(mb[mbi].status == 0){
+ restore_flags(flags);
+ /* Hmm, no mail. Must have read it the last time around */
+ if (!number_serviced && !needs_restart)
+ printk("aha1542.c: interrupt received, but no mail.\n");
+ /* We detected a reset. Restart all pending commands for
+ devices that use the hard reset option */
+ if(needs_restart) aha1542_restart(shost);
+ return;
+ };
+
+ mbo = (scsi2int(mb[mbi].ccbptr) - ((unsigned int) &ccb[0])) / sizeof(struct ccb);
+ mbistatus = mb[mbi].status;
+ mb[mbi].status = 0;
+ HOSTDATA(shost)->aha1542_last_mbi_used = mbi;
+ restore_flags(flags);
+
+#ifdef DEBUG
+ {
+ if (ccb[mbo].tarstat|ccb[mbo].hastat)
+ printk("aha1542_command: returning %x (status %d)\n",
+ ccb[mbo].tarstat + ((int) ccb[mbo].hastat << 16), mb[mbi].status);
+ };
+#endif
+
+ if(mbistatus == 3) continue; /* Aborted command not found */
+
+#ifdef DEBUG
+ printk("...done %d %d\n",mbo, mbi);
+#endif
+
+ SCtmp = HOSTDATA(shost)->SCint[mbo];
+
+ if (!SCtmp || !SCtmp->scsi_done) {
+ printk("aha1542_intr_handle: Unexpected interrupt\n");
+ printk("tarstat=%x, hastat=%x idlun=%x ccb#=%d \n", ccb[mbo].tarstat,
+ ccb[mbo].hastat, ccb[mbo].idlun, mbo);
+ return;
+ }
+
+ my_done = SCtmp->scsi_done;
+ if (SCtmp->host_scribble) scsi_free(SCtmp->host_scribble, 512);
+
+ /* Fetch the sense data, and tuck it away, in the required slot. The
+ Adaptec automatically fetches it, and there is no guarantee that
+ we will still have it in the cdb when we come back */
+ if (ccb[mbo].tarstat == 2)
+ memcpy(SCtmp->sense_buffer, &ccb[mbo].cdb[ccb[mbo].cdblen],
+ sizeof(SCtmp->sense_buffer));
+
+
+ /* is there mail :-) */
+
+ /* more error checking left out here */
+ if (mbistatus != 1)
+ /* This is surely wrong, but I don't know what's right */
+ errstatus = makecode(ccb[mbo].hastat, ccb[mbo].tarstat);
+ else
+ errstatus = 0;
+
+#ifdef DEBUG
+ if(errstatus) printk("(aha1542 error:%x %x %x) ",errstatus,
+ ccb[mbo].hastat, ccb[mbo].tarstat);
+#endif
+
+ if (ccb[mbo].tarstat == 2) {
+#ifdef DEBUG
+ int i;
+#endif
+ DEB(printk("aha1542_intr_handle: sense:"));
+#ifdef DEBUG
+ for (i = 0; i < 12; i++)
+ printk("%02x ", ccb[mbo].cdb[ccb[mbo].cdblen+i]);
+ printk("\n");
+#endif
+ /*
+ DEB(printk("aha1542_intr_handle: buf:"));
+ for (i = 0; i < bufflen; i++)
+ printk("%02x ", ((unchar *)buff)[i]);
+ printk("\n");
+ */
+ }
+ DEB(if (errstatus) printk("aha1542_intr_handle: returning %6x\n", errstatus));
+ SCtmp->result = errstatus;
+ HOSTDATA(shost)->SCint[mbo] = NULL; /* This effectively frees up the mailbox slot, as
+ far as queuecommand is concerned */
+ my_done(SCtmp);
+ number_serviced++;
+ };
+}
+
+int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
+{
+ unchar ahacmd = CMD_START_SCSI;
+ unchar direction;
+ unchar *cmd = (unchar *) SCpnt->cmnd;
+ unchar target = SCpnt->target;
+ unchar lun = SCpnt->lun;
+ unsigned long flags;
+ void *buff = SCpnt->request_buffer;
+ int bufflen = SCpnt->request_bufflen;
+ int mbo;
+ struct mailbox * mb;
+ struct ccb *ccb;
+
+ DEB(int i);
+
+ mb = HOSTDATA(SCpnt->host)->mb;
+ ccb = HOSTDATA(SCpnt->host)->ccb;
+
+ DEB(if (target > 1) {
+ SCpnt->result = DID_TIME_OUT << 16;
+ done(SCpnt); return 0;});
+
+ if(*cmd == REQUEST_SENSE){
+#ifndef DEBUG
+ if (bufflen != sizeof(SCpnt->sense_buffer)) {
+ printk("Wrong buffer length supplied for request sense (%d)\n",bufflen);
+ };
+#endif
+ SCpnt->result = 0;
+ done(SCpnt);
+ return 0;
+ };
+
+#ifdef DEBUG
+ if (*cmd == READ_10 || *cmd == WRITE_10)
+ i = xscsi2int(cmd+2);
+ else if (*cmd == READ_6 || *cmd == WRITE_6)
+ i = scsi2int(cmd+2);
+ else
+ i = -1;
+ if (done)
+ printk("aha1542_queuecommand: dev %d cmd %02x pos %d len %d ", target, *cmd, i, bufflen);
+ else
+ printk("aha1542_command: dev %d cmd %02x pos %d len %d ", target, *cmd, i, bufflen);
+ aha1542_stat();
+ printk("aha1542_queuecommand: dumping scsi cmd:");
+ for (i = 0; i < SCpnt->cmd_len; i++) printk("%02x ", cmd[i]);
+ printk("\n");
+ if (*cmd == WRITE_10 || *cmd == WRITE_6)
+ return 0; /* we are still testing, so *don't* write */
+#endif
+/* Use the outgoing mailboxes in a round-robin fashion, because this
+ is how the host adapter will scan for them */
+
+ save_flags(flags);
+ cli();
+ mbo = HOSTDATA(SCpnt->host)->aha1542_last_mbo_used + 1;
+ if (mbo >= AHA1542_MAILBOXES) mbo = 0;
+
+ do{
+ if(mb[mbo].status == 0 && HOSTDATA(SCpnt->host)->SCint[mbo] == NULL)
+ break;
+ mbo++;
+ if (mbo >= AHA1542_MAILBOXES) mbo = 0;
+ } while (mbo != HOSTDATA(SCpnt->host)->aha1542_last_mbo_used);
+
+ if(mb[mbo].status || HOSTDATA(SCpnt->host)->SCint[mbo])
+ panic("Unable to find empty mailbox for aha1542.\n");
+
+ HOSTDATA(SCpnt->host)->SCint[mbo] = SCpnt; /* This will effectively prevent someone else from
+ screwing with this cdb. */
+
+ HOSTDATA(SCpnt->host)->aha1542_last_mbo_used = mbo;
+ restore_flags(flags);
+
+#ifdef DEBUG
+ printk("Sending command (%d %x)...",mbo, done);
+#endif
+
+ any2scsi(mb[mbo].ccbptr, &ccb[mbo]); /* This gets trashed for some reason*/
+
+ memset(&ccb[mbo], 0, sizeof(struct ccb));
+
+ ccb[mbo].cdblen = SCpnt->cmd_len;
+
+ direction = 0;
+ if (*cmd == READ_10 || *cmd == READ_6)
+ direction = 8;
+ else if (*cmd == WRITE_10 || *cmd == WRITE_6)
+ direction = 16;
+
+ memcpy(ccb[mbo].cdb, cmd, ccb[mbo].cdblen);
+
+ if (SCpnt->use_sg) {
+ struct scatterlist * sgpnt;
+ struct chain * cptr;
+#ifdef DEBUG
+ unsigned char * ptr;
+#endif
+ int i;
+ ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather*/
+ SCpnt->host_scribble = (unsigned char *) scsi_malloc(512);
+ sgpnt = (struct scatterlist *) SCpnt->request_buffer;
+ cptr = (struct chain *) SCpnt->host_scribble;
+ if (cptr == NULL) panic("aha1542.c: unable to allocate DMA memory\n");
+ for(i=0; i<SCpnt->use_sg; i++) {
+ if(sgpnt[i].length == 0 || SCpnt->use_sg > 16 ||
+ (((int)sgpnt[i].address) & 1) || (sgpnt[i].length & 1)){
+ unsigned char * ptr;
+ printk("Bad segment list supplied to aha1542.c (%d, %d)\n",SCpnt->use_sg,i);
+ for(i=0;i<SCpnt->use_sg;i++){
+ printk("%d: %x %x %d\n",i,(unsigned int) sgpnt[i].address, (unsigned int) sgpnt[i].alt_address,
+ sgpnt[i].length);
+ };
+ printk("cptr %x: ",(unsigned int) cptr);
+ ptr = (unsigned char *) &cptr[i];
+ for(i=0;i<18;i++) printk("%02x ", ptr[i]);
+ panic("Foooooooood fight!");
+ };
+ any2scsi(cptr[i].dataptr, sgpnt[i].address);
+ if(((unsigned int) sgpnt[i].address) & 0xff000000) goto baddma;
+ any2scsi(cptr[i].datalen, sgpnt[i].length);
+ };
+ any2scsi(ccb[mbo].datalen, SCpnt->use_sg * sizeof(struct chain));
+ any2scsi(ccb[mbo].dataptr, cptr);
+#ifdef DEBUG
+ printk("cptr %x: ",cptr);
+ ptr = (unsigned char *) cptr;
+ for(i=0;i<18;i++) printk("%02x ", ptr[i]);
+#endif
+ } else {
+ ccb[mbo].op = 0; /* SCSI Initiator Command */
+ SCpnt->host_scribble = NULL;
+ any2scsi(ccb[mbo].datalen, bufflen);
+ if(((unsigned int) buff & 0xff000000)) goto baddma;
+ any2scsi(ccb[mbo].dataptr, buff);
+ };
+ ccb[mbo].idlun = (target&7)<<5 | direction | (lun & 7); /*SCSI Target Id*/
+ ccb[mbo].rsalen = 16;
+ ccb[mbo].linkptr[0] = ccb[mbo].linkptr[1] = ccb[mbo].linkptr[2] = 0;
+ ccb[mbo].commlinkid = 0;
+
+#ifdef DEBUG
+ { int i;
+ printk("aha1542_command: sending.. ");
+ for (i = 0; i < sizeof(ccb[mbo])-10; i++)
+ printk("%02x ", ((unchar *)&ccb[mbo])[i]);
+ };
+#endif
+
+ if (done) {
+ DEB(printk("aha1542_queuecommand: now waiting for interrupt "); aha1542_stat());
+ SCpnt->scsi_done = done;
+ mb[mbo].status = 1;
+ aha1542_out(SCpnt->host->io_port, &ahacmd, 1); /* start scsi command */
+ DEB(aha1542_stat());
+ }
+ else
+ printk("aha1542_queuecommand: done can't be NULL\n");
+
+ return 0;
+ baddma:
+ panic("Buffer at address > 16Mb used for 1542B");
+}
+
+static void internal_done(Scsi_Cmnd * SCpnt)
+{
+ SCpnt->SCp.Status++;
+}
+
+int aha1542_command(Scsi_Cmnd * SCpnt)
+{
+ DEB(printk("aha1542_command: ..calling aha1542_queuecommand\n"));
+
+ aha1542_queuecommand(SCpnt, internal_done);
+
+ SCpnt->SCp.Status = 0;
+ while (!SCpnt->SCp.Status)
+ barrier();
+ return SCpnt->result;
+}
+
+/* Initialize mailboxes */
+static void setup_mailboxes(int bse, struct Scsi_Host * shpnt)
+{
+ int i;
+ struct mailbox * mb;
+ struct ccb *ccb;
+
+ unchar cmd[5] = {CMD_MBINIT, AHA1542_MAILBOXES, 0, 0, 0};
+
+ mb = HOSTDATA(shpnt)->mb;
+ ccb = HOSTDATA(shpnt)->ccb;
+
+ for(i=0; i<AHA1542_MAILBOXES; i++){
+ mb[i].status = mb[AHA1542_MAILBOXES+i].status = 0;
+ any2scsi(mb[i].ccbptr, &ccb[i]);
+ };
+ aha1542_intr_reset(bse); /* reset interrupts, so they don't block */
+ any2scsi((cmd+2), mb);
+ aha1542_out(bse, cmd, 5);
+ WAIT(INTRFLAGS(bse), INTRMASK, HACC, 0);
+ while (0) {
+ fail:
+ printk("aha1542_detect: failed setting up mailboxes\n");
+ }
+ aha1542_intr_reset(bse);
+}
+
+static int aha1542_getconfig(int base_io, unsigned char * irq_level, unsigned char * dma_chan, unsigned char * scsi_id)
+{
+ unchar inquiry_cmd[] = {CMD_RETCONF };
+ unchar inquiry_result[3];
+ int i;
+ i = inb(STATUS(base_io));
+ if (i & DF) {
+ i = inb(DATA(base_io));
+ };
+ aha1542_out(base_io, inquiry_cmd, 1);
+ aha1542_in(base_io, inquiry_result, 3);
+ WAIT(INTRFLAGS(base_io), INTRMASK, HACC, 0);
+ while (0) {
+ fail:
+ printk("aha1542_detect: query board settings\n");
+ }
+ aha1542_intr_reset(base_io);
+ switch(inquiry_result[0]){
+ case 0x80:
+ *dma_chan = 7;
+ break;
+ case 0x40:
+ *dma_chan = 6;
+ break;
+ case 0x20:
+ *dma_chan = 5;
+ break;
+ case 0x01:
+ *dma_chan = 0;
+ break;
+ case 0:
+ /* This means that the adapter, although Adaptec 1542 compatible, doesn't use a DMA channel.
+ Currently only aware of the BusLogic BT-445S VL-Bus adapter which needs this. */
+ *dma_chan = 0xFF;
+ break;
+ default:
+ printk("Unable to determine Adaptec DMA priority. Disabling board\n");
+ return -1;
+ };
+ switch(inquiry_result[1]){
+ case 0x40:
+ *irq_level = 15;
+ break;
+ case 0x20:
+ *irq_level = 14;
+ break;
+ case 0x8:
+ *irq_level = 12;
+ break;
+ case 0x4:
+ *irq_level = 11;
+ break;
+ case 0x2:
+ *irq_level = 10;
+ break;
+ case 0x1:
+ *irq_level = 9;
+ break;
+ default:
+ printk("Unable to determine Adaptec IRQ level. Disabling board\n");
+ return -1;
+ };
+ *scsi_id=inquiry_result[2] & 7;
+ return 0;
+}
+
+/* This function should only be called for 1542C boards - we can detect
+ the special firmware settings and unlock the board */
+
+static int aha1542_mbenable(int base)
+{
+ static unchar mbenable_cmd[3];
+ static unchar mbenable_result[2];
+ int retval;
+
+ retval = BIOS_TRANSLATION_6432;
+
+ mbenable_cmd[0]=CMD_EXTBIOS;
+ aha1542_out(base,mbenable_cmd,1);
+ if(aha1542_in1(base,mbenable_result,2))
+ return retval;
+ WAITd(INTRFLAGS(base),INTRMASK,HACC,0,100);
+ aha1542_intr_reset(base);
+
+ if ((mbenable_result[0] & 0x08) || mbenable_result[1]) {
+ mbenable_cmd[0]=CMD_MBENABLE;
+ mbenable_cmd[1]=0;
+ mbenable_cmd[2]=mbenable_result[1];
+
+ if((mbenable_result[0] & 0x08) && (mbenable_result[1] & 0x03)) retval = BIOS_TRANSLATION_25563;
+
+ aha1542_out(base,mbenable_cmd,3);
+ WAIT(INTRFLAGS(base),INTRMASK,HACC,0);
+ };
+ while(0) {
+fail:
+ printk("aha1542_mbenable: Mailbox init failed\n");
+ }
+aha1542_intr_reset(base);
+return retval;
+}
+
+/* Query the board to find out if it is a 1542 or a 1740, or whatever. */
+static int aha1542_query(int base_io, int * transl)
+{
+ unchar inquiry_cmd[] = {CMD_INQUIRY };
+ unchar inquiry_result[4];
+ int i;
+ i = inb(STATUS(base_io));
+ if (i & DF) {
+ i = inb(DATA(base_io));
+ };
+ aha1542_out(base_io, inquiry_cmd, 1);
+ aha1542_in(base_io, inquiry_result, 4);
+ WAIT(INTRFLAGS(base_io), INTRMASK, HACC, 0);
+ while (0) {
+ fail:
+ printk("aha1542_detect: query card type\n");
+ }
+ aha1542_intr_reset(base_io);
+
+ *transl = BIOS_TRANSLATION_6432; /* Default case */
+
+/* For an AHA1740 series board, we ignore the board since there is a
+ hardware bug which can lead to wrong blocks being returned if the board
+ is operating in the 1542 emulation mode. Since there is an extended mode
+ driver, we simply ignore the board and let the 1740 driver pick it up.
+*/
+
+ if (inquiry_result[0] == 0x43) {
+ printk("aha1542.c: Emulation mode not supported for AHA 174N hardware.\n");
+ return 1;
+ };
+
+ /* Always call this - boards that do not support extended bios translation
+ will ignore the command, and we will set the proper default */
+
+ *transl = aha1542_mbenable(base_io);
+
+ return 0;
+}
+
+/* called from init/main.c */
+void aha1542_setup( char *str, int *ints)
+{
+ const char *ahausage = "aha1542: usage: aha1542=<PORTBASE>[,<BUSON>,<BUSOFF>[,<DMASPEED>]]\n";
+ static int setup_idx = 0;
+ int setup_portbase;
+
+ if(setup_idx >= MAXBOARDS)
+ {
+ printk("aha1542: aha1542_setup called too many times! Bad LILO params ?\n");
+ printk(" Entryline 1: %s\n",setup_str[0]);
+ printk(" Entryline 2: %s\n",setup_str[1]);
+ printk(" This line: %s\n",str);
+ return;
+ }
+ if (ints[0] < 1 || ints[0] > 4)
+ {
+ printk("aha1542: %s\n", str );
+ printk("%s", ahausage);
+ printk("aha1542: Wrong parameters may cause system malfunction.. We try anyway..\n");
+ }
+
+ setup_called[setup_idx]=ints[0];
+ setup_str[setup_idx]=str;
+
+ setup_portbase = ints[0] >= 1 ? ints[1] : 0; /* Preserve the default value.. */
+ setup_buson [setup_idx] = ints[0] >= 2 ? ints[2] : 7;
+ setup_busoff [setup_idx] = ints[0] >= 3 ? ints[3] : 5;
+ if (ints[0] >= 4) {
+ int atbt = -1;
+ switch (ints[4]) {
+ case 5:
+ atbt = 0x00;
+ break;
+ case 6:
+ atbt = 0x04;
+ break;
+ case 7:
+ atbt = 0x01;
+ break;
+ case 8:
+ atbt = 0x02;
+ break;
+ case 10:
+ atbt = 0x03;
+ break;
+ default:
+ printk("aha1542: %s\n", str );
+ printk("%s", ahausage);
+ printk("aha1542: Valid values for DMASPEED are 5-8, 10 MB/s. Using jumper defaults.\n");
+ break;
+ }
+ setup_dmaspeed[setup_idx] = atbt;
+ }
+
+ if (setup_portbase != 0)
+ bases[setup_idx] = setup_portbase;
+
+ ++setup_idx;
+}
+
+/* return non-zero on detection */
+int aha1542_detect(Scsi_Host_Template * tpnt)
+{
+ unsigned char dma_chan;
+ unsigned char irq_level;
+ unsigned char scsi_id;
+ unsigned long flags;
+ unsigned int base_io;
+ int trans;
+ struct Scsi_Host * shpnt = NULL;
+ int count = 0;
+ int indx;
+
+ DEB(printk("aha1542_detect: \n"));
+
+ tpnt->proc_dir = &proc_scsi_aha1542;
+
+ for(indx = 0; indx < sizeof(bases)/sizeof(bases[0]); indx++)
+ if(bases[indx] != 0 && !check_region(bases[indx], 4)) {
+ shpnt = scsi_register(tpnt,
+ sizeof(struct aha1542_hostdata));
+
+ /* For now we do this - until kmalloc is more intelligent
+ we are resigned to stupid hacks like this */
+ if ((unsigned int) shpnt > 0xffffff) {
+ printk("Invalid address for shpnt with 1542.\n");
+ goto unregister;
+ }
+
+ if(!aha1542_test_port(bases[indx], shpnt)) goto unregister;
+
+
+ base_io = bases[indx];
+
+ /* Set the Bus on/off-times as not to ruin floppy performance */
+ {
+ unchar oncmd[] = {CMD_BUSON_TIME, 7};
+ unchar offcmd[] = {CMD_BUSOFF_TIME, 5};
+
+ if(setup_called[indx])
+ {
+ oncmd[1] = setup_buson[indx];
+ offcmd[1] = setup_busoff[indx];
+ }
+
+ aha1542_intr_reset(base_io);
+ aha1542_out(base_io, oncmd, 2);
+ WAIT(INTRFLAGS(base_io), INTRMASK, HACC, 0);
+ aha1542_intr_reset(base_io);
+ aha1542_out(base_io, offcmd, 2);
+ WAIT(INTRFLAGS(base_io), INTRMASK, HACC, 0);
+ if (setup_dmaspeed[indx] >= 0)
+ {
+ unchar dmacmd[] = {CMD_DMASPEED, 0};
+ dmacmd[1] = setup_dmaspeed[indx];
+ aha1542_intr_reset(base_io);
+ aha1542_out(base_io, dmacmd, 2);
+ WAIT(INTRFLAGS(base_io), INTRMASK, HACC, 0);
+ }
+ while (0) {
+ fail:
+ printk("aha1542_detect: setting bus on/off-time failed\n");
+ }
+ aha1542_intr_reset(base_io);
+ }
+ if(aha1542_query(base_io, &trans)) goto unregister;
+
+ if (aha1542_getconfig(base_io, &irq_level, &dma_chan, &scsi_id) == -1) goto unregister;
+
+ printk("Configuring Adaptec (SCSI-ID %d) at IO:%x, IRQ %d", scsi_id, base_io, irq_level);
+ if (dma_chan != 0xFF)
+ printk(", DMA priority %d", dma_chan);
+ printk("\n");
+
+ DEB(aha1542_stat());
+ setup_mailboxes(base_io, shpnt);
+
+ DEB(aha1542_stat());
+
+ DEB(printk("aha1542_detect: enable interrupt channel %d\n", irq_level));
+ save_flags(flags);
+ cli();
+ if (request_irq(irq_level,aha1542_intr_handle, 0, "aha1542", NULL)) {
+ printk("Unable to allocate IRQ for adaptec controller.\n");
+ restore_flags(flags);
+ goto unregister;
+ }
+
+ if (dma_chan != 0xFF) {
+ if (request_dma(dma_chan,"aha1542")) {
+ printk("Unable to allocate DMA channel for Adaptec.\n");
+ free_irq(irq_level, NULL);
+ restore_flags(flags);
+ goto unregister;
+ }
+
+ if (dma_chan == 0 || dma_chan >= 5) {
+ set_dma_mode(dma_chan, DMA_MODE_CASCADE);
+ enable_dma(dma_chan);
+ }
+ }
+ aha_host[irq_level - 9] = shpnt;
+ shpnt->this_id = scsi_id;
+ shpnt->unique_id = base_io;
+ shpnt->io_port = base_io;
+ shpnt->n_io_port = 4; /* Number of bytes of I/O space used */
+ shpnt->dma_channel = dma_chan;
+ shpnt->irq = irq_level;
+ HOSTDATA(shpnt)->bios_translation = trans;
+ if(trans == BIOS_TRANSLATION_25563)
+ printk("aha1542.c: Using extended bios translation\n");
+ HOSTDATA(shpnt)->aha1542_last_mbi_used = (2*AHA1542_MAILBOXES - 1);
+ HOSTDATA(shpnt)->aha1542_last_mbo_used = (AHA1542_MAILBOXES - 1);
+ memset(HOSTDATA(shpnt)->SCint, 0, sizeof(HOSTDATA(shpnt)->SCint));
+ restore_flags(flags);
+#if 0
+ DEB(printk(" *** READ CAPACITY ***\n"));
+
+ {
+ unchar buf[8];
+ static unchar cmd[] = { READ_CAPACITY, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ int i;
+
+ for (i = 0; i < sizeof(buf); ++i) buf[i] = 0x87;
+ for (i = 0; i < 2; ++i)
+ if (!aha1542_command(i, cmd, buf, sizeof(buf))) {
+ printk("aha_detect: LU %d sector_size %d device_size %d\n",
+ i, xscsi2int(buf+4), xscsi2int(buf));
+ }
+ }
+
+ DEB(printk(" *** NOW RUNNING MY OWN TEST *** \n"));
+
+ for (i = 0; i < 4; ++i)
+ {
+ unsigned char cmd[10];
+ static buffer[512];
+
+ cmd[0] = READ_10;
+ cmd[1] = 0;
+ xany2scsi(cmd+2, i);
+ cmd[6] = 0;
+ cmd[7] = 0;
+ cmd[8] = 1;
+ cmd[9] = 0;
+ aha1542_command(0, cmd, buffer, 512);
+ }
+#endif
+ request_region(bases[indx], 4,"aha1542"); /* Register the IO ports that we use */
+ count++;
+ continue;
+ unregister:
+ scsi_unregister(shpnt);
+ continue;
+
+ };
+
+ return count;
+}
+
+static int aha1542_restart(struct Scsi_Host * shost)
+{
+ int i;
+ int count = 0;
+#if 0
+ unchar ahacmd = CMD_START_SCSI;
+#endif
+
+ for(i=0; i< AHA1542_MAILBOXES; i++)
+ if(HOSTDATA(shost)->SCint[i] &&
+ !(HOSTDATA(shost)->SCint[i]->device->soft_reset))
+ {
+#if 0
+ HOSTDATA(shost)->mb[i].status = 1; /* Indicate ready to restart... */
+#endif
+ count++;
+ }
+
+ printk("Potential to restart %d stalled commands...\n", count);
+#if 0
+ /* start scsi command */
+ if (count) aha1542_out(shost->io_port, &ahacmd, 1);
+#endif
+ return 0;
+}
+
+/* The abort command does not leave the device in a clean state where
+ it is available to be used again. Until this gets worked out, we will
+ leave it commented out. */
+
+int aha1542_abort(Scsi_Cmnd * SCpnt)
+{
+#if 0
+ unchar ahacmd = CMD_START_SCSI;
+ unsigned long flags;
+ struct mailbox * mb;
+ int mbi, mbo, i;
+
+ printk("In aha1542_abort: %x %x\n",
+ inb(STATUS(SCpnt->host->io_port)),
+ inb(INTRFLAGS(SCpnt->host->io_port)));
+
+ save_flags(flags);
+ cli();
+ mb = HOSTDATA(SCpnt->host)->mb;
+ mbi = HOSTDATA(SCpnt->host)->aha1542_last_mbi_used + 1;
+ if (mbi >= 2*AHA1542_MAILBOXES) mbi = AHA1542_MAILBOXES;
+
+ do{
+ if(mb[mbi].status != 0) break;
+ mbi++;
+ if (mbi >= 2*AHA1542_MAILBOXES) mbi = AHA1542_MAILBOXES;
+ } while (mbi != HOSTDATA(SCpnt->host)->aha1542_last_mbi_used);
+ restore_flags(flags);
+
+ if(mb[mbi].status) {
+ printk("Lost interrupt discovered on irq %d - attempting to recover\n",
+ SCpnt->host->irq);
+ aha1542_intr_handle(SCpnt->host->irq, NULL);
+ return 0;
+ }
+
+ /* OK, no lost interrupt. Try looking to see how many pending commands
+ we think we have. */
+
+ for(i=0; i< AHA1542_MAILBOXES; i++)
+ if(HOSTDATA(SCpnt->host)->SCint[i])
+ {
+ if(HOSTDATA(SCpnt->host)->SCint[i] == SCpnt) {
+ printk("Timed out command pending for %s\n",
+ kdevname(SCpnt->request.rq_dev));
+ if (HOSTDATA(SCpnt->host)->mb[i].status) {
+ printk("OGMB still full - restarting\n");
+ aha1542_out(SCpnt->host->io_port, &ahacmd, 1);
+ };
+ } else
+ printk("Other pending command %s\n",
+ kdevname(SCpnt->request.rq_dev));
+ }
+
+#endif
+
+ DEB(printk("aha1542_abort\n"));
+#if 0
+ save_flags(flags);
+ cli();
+ for(mbo = 0; mbo < AHA1542_MAILBOXES; mbo++)
+ if (SCpnt == HOSTDATA(SCpnt->host)->SCint[mbo]){
+ mb[mbo].status = 2; /* Abort command */
+ aha1542_out(SCpnt->host->io_port, &ahacmd, 1); /* start scsi command */
+ restore_flags(flags);
+ break;
+ };
+#endif
+ return SCSI_ABORT_SNOOZE;
+}
+
+/* We do not implement a reset function here, but the upper level code
+ assumes that it will get some kind of response for the command in
+ SCpnt. We must oblige, or the command will hang the scsi system.
+ For a first go, we assume that the 1542 notifies us with all of the
+ pending commands (it does implement soft reset, after all). */
+
+int aha1542_reset(Scsi_Cmnd * SCpnt, unsigned int reset_flags)
+{
+ unchar ahacmd = CMD_START_SCSI;
+ int i;
+
+ /*
+ * See if a bus reset was suggested.
+ */
+ if( reset_flags & SCSI_RESET_SUGGEST_BUS_RESET )
+ {
+ /*
+ * This does a scsi reset for all devices on the bus.
+ * In principle, we could also reset the 1542 - should
+ * we do this? Try this first, and we can add that later
+ * if it turns out to be useful.
+ */
+ outb(HRST | SCRST, CONTROL(SCpnt->host->io_port));
+
+ /*
+ * Wait for the thing to settle down a bit. Unfortunately
+ * this is going to basically lock up the machine while we
+ * wait for this to complete. To be 100% correct, we need to
+ * check for timeout, and if we are doing something like this
+ * we are pretty desperate anyways.
+ */
+ WAIT(STATUS(SCpnt->host->io_port),
+ STATMASK, INIT|IDLE, STST|DIAGF|INVDCMD|DF|CDF);
+
+ /*
+ * We need to do this too before the 1542 can interact with
+ * us again.
+ */
+ setup_mailboxes(SCpnt->host->io_port, SCpnt->host);
+
+ /*
+ * Now try to pick up the pieces. Restart all commands
+ * that are currently active on the bus, and reset all of
+ * the datastructures. We have some time to kill while
+ * things settle down, so print a nice message.
+ */
+ printk("Sent BUS RESET to scsi host %d\n", SCpnt->host->host_no);
+
+ for(i=0; i< AHA1542_MAILBOXES; i++)
+ if(HOSTDATA(SCpnt->host)->SCint[i] != NULL)
+ {
+ Scsi_Cmnd * SCtmp;
+ SCtmp = HOSTDATA(SCpnt->host)->SCint[i];
+ SCtmp->result = DID_RESET << 16;
+ if (SCtmp->host_scribble) scsi_free(SCtmp->host_scribble, 512);
+ printk("Sending DID_RESET for target %d\n", SCpnt->target);
+ SCtmp->scsi_done(SCpnt);
+
+ HOSTDATA(SCpnt->host)->SCint[i] = NULL;
+ HOSTDATA(SCpnt->host)->mb[i].status = 0;
+ }
+ /*
+ * Now tell the mid-level code what we did here. Since
+ * we have restarted all of the outstanding commands,
+ * then report SUCCESS.
+ */
+ return (SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET);
+fail:
+ printk("aha1542.c: Unable to perform hard reset.\n");
+ printk("Power cycle machine to reset\n");
+ return (SCSI_RESET_ERROR | SCSI_RESET_BUS_RESET);
+
+
+ }
+ else
+ {
+ /* This does a selective reset of just the one device */
+ /* First locate the ccb for this command */
+ for(i=0; i< AHA1542_MAILBOXES; i++)
+ if(HOSTDATA(SCpnt->host)->SCint[i] == SCpnt)
+ {
+ HOSTDATA(SCpnt->host)->ccb[i].op = 0x81; /* BUS DEVICE RESET */
+ /* Now tell the 1542 to flush all pending commands for this target */
+ aha1542_out(SCpnt->host->io_port, &ahacmd, 1);
+
+ /* Here is the tricky part. What to do next. Do we get an interrupt
+ for the commands that we aborted with the specified target, or
+ do we generate this on our own? Try it without first and see
+ what happens */
+ printk("Sent BUS DEVICE RESET to target %d\n", SCpnt->target);
+
+ /* If the first does not work, then try the second. I think the
+ first option is more likely to be correct. Free the command
+ block for all commands running on this target... */
+ for(i=0; i< AHA1542_MAILBOXES; i++)
+ if(HOSTDATA(SCpnt->host)->SCint[i] &&
+ HOSTDATA(SCpnt->host)->SCint[i]->target == SCpnt->target)
+ {
+ Scsi_Cmnd * SCtmp;
+ SCtmp = HOSTDATA(SCpnt->host)->SCint[i];
+ SCtmp->result = DID_RESET << 16;
+ if (SCtmp->host_scribble) scsi_free(SCtmp->host_scribble, 512);
+ printk("Sending DID_RESET for target %d\n", SCpnt->target);
+ SCtmp->scsi_done(SCpnt);
+
+ HOSTDATA(SCpnt->host)->SCint[i] = NULL;
+ HOSTDATA(SCpnt->host)->mb[i].status = 0;
+ }
+ return SCSI_RESET_SUCCESS;
+ }
+ }
+ /* No active command at this time, so this means that each time we got
+ some kind of response the last time through. Tell the mid-level code
+ to request sense information in order to decide what to do next. */
+ return SCSI_RESET_PUNT;
+}
+
+#include "sd.h"
+
+int aha1542_biosparam(Scsi_Disk * disk, kdev_t dev, int * ip)
+{
+ int translation_algorithm;
+ int size = disk->capacity;
+
+ translation_algorithm = HOSTDATA(disk->device->host)->bios_translation;
+
+ if((size>>11) > 1024 && translation_algorithm == BIOS_TRANSLATION_25563) {
+ /* Please verify that this is the same as what DOS returns */
+ ip[0] = 255;
+ ip[1] = 63;
+ ip[2] = size /255/63;
+ } else {
+ ip[0] = 64;
+ ip[1] = 32;
+ ip[2] = size >> 11;
+ }
+
+ return 0;
+}
+
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = AHA1542;
+
+#include "scsi_module.c"
+#endif
+
diff --git a/linux/src/drivers/scsi/aha1542.h b/linux/src/drivers/scsi/aha1542.h
new file mode 100644
index 0000000..4f90c1c
--- /dev/null
+++ b/linux/src/drivers/scsi/aha1542.h
@@ -0,0 +1,170 @@
+#ifndef _AHA1542_H
+
+/* $Id: aha1542.h,v 1.1.4.1 2007/03/27 21:04:30 tschwinge Exp $
+ *
+ * Header file for the adaptec 1542 driver for Linux
+ *
+ * Revision 1.1 1992/07/24 06:27:38 root
+ * Initial revision
+ *
+ * Revision 1.2 1992/07/04 18:41:49 root
+ * Replaced distribution with current drivers
+ *
+ * Revision 1.3 1992/06/23 23:58:20 root
+ * Fixes.
+ *
+ * Revision 1.2 1992/05/26 22:13:23 root
+ * Changed bug that prevented DMA above first 2 mbytes.
+ *
+ * Revision 1.1 1992/05/22 21:00:29 root
+ * Initial revision
+ *
+ * Revision 1.1 1992/04/24 18:01:50 root
+ * Initial revision
+ *
+ * Revision 1.1 1992/04/02 03:23:13 drew
+ * Initial revision
+ *
+ * Revision 1.3 1992/01/27 14:46:29 tthorn
+ * *** empty log message ***
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+
+/* I/O Port interface 4.2 */
+/* READ */
+#define STATUS(base) base
+#define STST 0x80 /* Self Test in Progress */
+#define DIAGF 0x40 /* Internal Diagnostic Failure */
+#define INIT 0x20 /* Mailbox Initialization Required */
+#define IDLE 0x10 /* SCSI Host Adapter Idle */
+#define CDF 0x08 /* Command/Data Out Port Full */
+#define DF 0x04 /* Data In Port Full */
+#define INVDCMD 0x01 /* Invalid H A Command */
+#define STATMASK 0xfd /* 0x02 is reserved */
+
+#define INTRFLAGS(base) (STATUS(base)+2)
+#define ANYINTR 0x80 /* Any Interrupt */
+#define SCRD 0x08 /* SCSI Reset Detected */
+#define HACC 0x04 /* HA Command Complete */
+#define MBOA 0x02 /* MBO Empty */
+#define MBIF 0x01 /* MBI Full */
+#define INTRMASK 0x8f
+
+/* WRITE */
+#define CONTROL(base) STATUS(base)
+#define HRST 0x80 /* Hard Reset */
+#define SRST 0x40 /* Soft Reset */
+#define IRST 0x20 /* Interrupt Reset */
+#define SCRST 0x10 /* SCSI Bus Reset */
+
+/* READ/WRITE */
+#define DATA(base) (STATUS(base)+1)
+#define CMD_NOP 0x00 /* No Operation */
+#define CMD_MBINIT 0x01 /* Mailbox Initialization */
+#define CMD_START_SCSI 0x02 /* Start SCSI Command */
+#define CMD_INQUIRY 0x04 /* Adapter Inquiry */
+#define CMD_EMBOI 0x05 /* Enable MailBox Out Interrupt */
+#define CMD_BUSON_TIME 0x07 /* Set Bus-On Time */
+#define CMD_BUSOFF_TIME 0x08 /* Set Bus-Off Time */
+#define CMD_DMASPEED 0x09 /* Set AT Bus Transfer Speed */
+#define CMD_RETDEVS 0x0a /* Return Installed Devices */
+#define CMD_RETCONF 0x0b /* Return Configuration Data */
+#define CMD_RETSETUP 0x0d /* Return Setup Data */
+#define CMD_ECHO 0x1f /* ECHO Command Data */
+
+#define CMD_EXTBIOS 0x28 /* Return extend bios information only 1542C */
+#define CMD_MBENABLE 0x29 /* Set Mailbox Interface enable only 1542C */
+
+/* Mailbox Definition 5.2.1 and 5.2.2 */
+struct mailbox {
+ unchar status; /* Command/Status */
+ unchar ccbptr[3]; /* msb, .., lsb */
+};
+
+/* This is used with scatter-gather */
+struct chain {
+ unchar datalen[3]; /* Size of this part of chain */
+ unchar dataptr[3]; /* Location of data */
+};
+
+/* These belong in scsi.h also */
+#define any2scsi(up, p) \
+(up)[0] = (((unsigned long)(p)) >> 16) ; \
+(up)[1] = (((unsigned long)(p)) >> 8); \
+(up)[2] = ((unsigned long)(p));
+
+#define scsi2int(up) ( (((long)*(up)) << 16) + (((long)(up)[1]) << 8) + ((long)(up)[2]) )
+
+#define xany2scsi(up, p) \
+(up)[0] = ((long)(p)) >> 24; \
+(up)[1] = ((long)(p)) >> 16; \
+(up)[2] = ((long)(p)) >> 8; \
+(up)[3] = ((long)(p));
+
+#define xscsi2int(up) ( (((long)(up)[0]) << 24) + (((long)(up)[1]) << 16) \
+ + (((long)(up)[2]) << 8) + ((long)(up)[3]) )
+
+#define MAX_CDB 12
+#define MAX_SENSE 14
+
+struct ccb { /* Command Control Block 5.3 */
+ unchar op; /* Command Control Block Operation Code */
+ unchar idlun; /* op=0,2:Target Id, op=1:Initiator Id */
+ /* Outbound data transfer, length is checked*/
+ /* Inbound data transfer, length is checked */
+ /* Logical Unit Number */
+ unchar cdblen; /* SCSI Command Length */
+ unchar rsalen; /* Request Sense Allocation Length/Disable */
+ unchar datalen[3]; /* Data Length (msb, .., lsb) */
+ unchar dataptr[3]; /* Data Pointer */
+ unchar linkptr[3]; /* Link Pointer */
+ unchar commlinkid; /* Command Linking Identifier */
+ unchar hastat; /* Host Adapter Status (HASTAT) */
+ unchar tarstat; /* Target Device Status */
+ unchar reserved[2];
+ unchar cdb[MAX_CDB+MAX_SENSE];/* SCSI Command Descriptor Block */
+ /* REQUEST SENSE */
+};
+
+int aha1542_detect(Scsi_Host_Template *);
+int aha1542_command(Scsi_Cmnd *);
+int aha1542_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int aha1542_abort(Scsi_Cmnd *);
+int aha1542_reset(Scsi_Cmnd *, unsigned int);
+int aha1542_biosparam(Disk *, kdev_t, int*);
+
+#define AHA1542_MAILBOXES 8
+#define AHA1542_SCATTER 16
+#define AHA1542_CMDLUN 1
+
+#ifndef NULL
+ #define NULL 0
+#endif
+
+extern struct proc_dir_entry proc_scsi_aha1542;
+
+#define AHA1542 { NULL, NULL, \
+ &proc_scsi_aha1542,/* proc_dir_entry */ \
+ NULL, \
+ "Adaptec 1542", \
+ aha1542_detect, \
+ NULL, \
+ NULL, \
+ aha1542_command, \
+ aha1542_queuecommand, \
+ aha1542_abort, \
+ aha1542_reset, \
+ NULL, \
+ aha1542_biosparam, \
+ AHA1542_MAILBOXES, \
+ 7, \
+ AHA1542_SCATTER, \
+ AHA1542_CMDLUN, \
+ 0, \
+ 1, \
+ ENABLE_CLUSTERING}
+
+#endif
diff --git a/linux/src/drivers/scsi/aha1740.c b/linux/src/drivers/scsi/aha1740.c
new file mode 100644
index 0000000..013218c
--- /dev/null
+++ b/linux/src/drivers/scsi/aha1740.c
@@ -0,0 +1,614 @@
+/* $Id: aha1740.c,v 1.1 1999/04/26 05:54:13 tb Exp $
+ * 1993/03/31
+ * linux/kernel/aha1740.c
+ *
+ * Based loosely on aha1542.c which is
+ * Copyright (C) 1992 Tommy Thorn and
+ * Modified by Eric Youngdale
+ *
+ * This file is aha1740.c, written and
+ * Copyright (C) 1992,1993 Brad McLean
+ *
+ * Modifications to makecode and queuecommand
+ * for proper handling of multiple devices courteously
+ * provided by Michael Weller, March, 1993
+ *
+ * Multiple adapter support, extended translation detection,
+ * update to current scsi subsystem changes, proc fs support,
+ * working (!) module support based on patches from Andreas Arens,
+ * by Andreas Degert <ad@papyrus.hamburg.com>, 2/1997
+ *
+ * aha1740_makecode may still need even more work
+ * if it doesn't work for your devices, take a look.
+ */
+
+#ifdef MODULE
+#include <linux/module.h>
+#endif
+
+#include <linux/kernel.h>
+#include <linux/head.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <asm/dma.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+
+#include "aha1740.h"
+#include<linux/stat.h>
+
+struct proc_dir_entry proc_scsi_aha1740 = {
+ PROC_SCSI_AHA1740, 7, "aha1740",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+/* IF YOU ARE HAVING PROBLEMS WITH THIS DRIVER, AND WANT TO WATCH
+ IT WORK, THEN:
+#define DEBUG
+*/
+#ifdef DEBUG
+#define DEB(x) x
+#else
+#define DEB(x)
+#endif
+
+/*
+static const char RCSid[] = "$Header: cvs/gnumach/linux/src/drivers/scsi/Attic/aha1740.c,v 1.1 1999/04/26 05:54:13 tb Exp $";
+*/
+
+struct aha1740_hostdata {
+ unsigned int slot;
+ unsigned int translation;
+ unsigned int last_ecb_used;
+ struct ecb ecb[AHA1740_ECBS];
+};
+
+#define HOSTDATA(host) ((struct aha1740_hostdata *) &host->hostdata)
+
+/* One for each IRQ level (9-15) */
+static struct Scsi_Host * aha_host[8] = {NULL, };
+
+int aha1740_proc_info(char *buffer, char **start, off_t offset,
+ int length, int hostno, int inout)
+{
+ int len;
+ struct Scsi_Host * shpnt;
+ struct aha1740_hostdata *host;
+
+ if (inout)
+ return(-ENOSYS);
+
+ for (len = 0; len < 8; len++) {
+ shpnt = aha_host[len];
+ if (shpnt && shpnt->host_no == hostno)
+ break;
+ }
+ host = HOSTDATA(shpnt);
+
+ len = sprintf(buffer, "aha174x at IO:%x, IRQ %d, SLOT %d.\n"
+ "Extended translation %sabled.\n",
+ shpnt->io_port, shpnt->irq, host->slot,
+ host->translation ? "en" : "dis");
+
+ if (offset > len) {
+ *start = buffer;
+ return 0;
+ }
+
+ *start = buffer + offset;
+ len -= offset;
+ if (len > length)
+ len = length;
+ return len;
+}
+
+
+int aha1740_makecode(unchar *sense, unchar *status)
+{
+ struct statusword
+ {
+ ushort don:1, /* Command Done - No Error */
+ du:1, /* Data underrun */
+ :1, qf:1, /* Queue full */
+ sc:1, /* Specification Check */
+ dor:1, /* Data overrun */
+ ch:1, /* Chaining Halted */
+ intr:1, /* Interrupt issued */
+ asa:1, /* Additional Status Available */
+ sns:1, /* Sense information Stored */
+ :1, ini:1, /* Initialization Required */
+ me:1, /* Major error or exception */
+ :1, eca:1, /* Extended Contingent alliance */
+ :1;
+ } status_word;
+ int retval = DID_OK;
+
+ status_word = * (struct statusword *) status;
+#ifdef DEBUG
+ printk("makecode from %x,%x,%x,%x %x,%x,%x,%x",
+ status[0], status[1], status[2], status[3],
+ sense[0], sense[1], sense[2], sense[3]);
+#endif
+ if (!status_word.don) /* Anything abnormal was detected */
+ {
+ if ( (status[1]&0x18) || status_word.sc ) /*Additional info available*/
+ {
+ /* Use the supplied info for further diagnostics */
+ switch ( status[2] )
+ {
+ case 0x12:
+ if ( status_word.dor )
+ retval=DID_ERROR; /* It's an Overrun */
+ /* If not overrun, assume underrun and ignore it! */
+ case 0x00: /* No info, assume no error, should not occur */
+ break;
+ case 0x11:
+ case 0x21:
+ retval=DID_TIME_OUT;
+ break;
+ case 0x0a:
+ retval=DID_BAD_TARGET;
+ break;
+ case 0x04:
+ case 0x05:
+ retval=DID_ABORT;
+ /* Either by this driver or the AHA1740 itself */
+ break;
+ default:
+ retval=DID_ERROR; /* No further diagnostics possible */
+ }
+ }
+ else
+ { /* Michael suggests, and Brad concurs: */
+ if ( status_word.qf )
+ {
+ retval = DID_TIME_OUT; /* forces a redo */
+ /* I think this specific one should not happen -Brad */
+ printk("aha1740.c: WARNING: AHA1740 queue overflow!\n");
+ }
+ else if ( status[0]&0x60 )
+ {
+ retval = DID_ERROR; /* Didn't find a better error */
+ }
+ /* In any other case return DID_OK so for example
+ CONDITION_CHECKS make it through to the appropriate
+ device driver */
+ }
+ }
+ /* Under all circumstances supply the target status -Michael */
+ return status[3] | retval << 16;
+}
+
+int aha1740_test_port(unsigned int base)
+{
+ char name[4], tmp;
+
+ /* Okay, look for the EISA ID's */
+ name[0]= 'A' -1 + ((tmp = inb(HID0(base))) >> 2); /* First character */
+ name[1]= 'A' -1 + ((tmp & 3) << 3);
+ name[1]+= ((tmp = inb(HID1(base))) >> 5)&0x7; /* Second Character */
+ name[2]= 'A' -1 + (tmp & 0x1f); /* Third Character */
+ name[3]=0;
+ tmp = inb(HID2(base));
+ if ( strcmp ( name, HID_MFG ) || inb(HID2(base)) != HID_PRD )
+ return 0; /* Not an Adaptec 174x */
+
+/* if ( inb(HID3(base)) != HID_REV )
+ printk("aha174x: Warning; board revision of %d; expected %d\n",
+ inb(HID3(base)),HID_REV); */
+
+ if ( inb(EBCNTRL(base)) != EBCNTRL_VALUE )
+ {
+ printk("aha174x: Board detected, but EBCNTRL = %x, so disabled it.\n",
+ inb(EBCNTRL(base)));
+ return 0;
+ }
+
+ if ( inb(PORTADR(base)) & PORTADDR_ENH )
+ return 1; /* Okay, we're all set */
+
+ printk("aha174x: Board detected, but not in enhanced mode, so disabled it.\n");
+ return 0;
+}
+
+/* A "high" level interrupt handler */
+void aha1740_intr_handle(int irq, void *dev_id, struct pt_regs * regs)
+{
+ void (*my_done)(Scsi_Cmnd *);
+ int errstatus, adapstat;
+ int number_serviced;
+ struct ecb *ecbptr;
+ Scsi_Cmnd *SCtmp;
+ unsigned int base;
+
+ if (!aha_host[irq - 9])
+ panic("aha1740.c: Irq from unknown host!\n");
+ base = aha_host[irq - 9]->io_port;
+ number_serviced = 0;
+
+ while(inb(G2STAT(base)) & G2STAT_INTPEND)
+ {
+ DEB(printk("aha1740_intr top of loop.\n"));
+ adapstat = inb(G2INTST(base));
+ ecbptr = (struct ecb *) bus_to_virt(inl(MBOXIN0(base)));
+ outb(G2CNTRL_IRST,G2CNTRL(base)); /* interrupt reset */
+
+ switch ( adapstat & G2INTST_MASK )
+ {
+ case G2INTST_CCBRETRY:
+ case G2INTST_CCBERROR:
+ case G2INTST_CCBGOOD:
+ /* Host Ready -> Mailbox in complete */
+ outb(G2CNTRL_HRDY,G2CNTRL(base));
+ if (!ecbptr)
+ {
+ printk("Aha1740 null ecbptr in interrupt (%x,%x,%x,%d)\n",
+ inb(G2STAT(base)),adapstat,
+ inb(G2INTST(base)), number_serviced++);
+ continue;
+ }
+ SCtmp = ecbptr->SCpnt;
+ if (!SCtmp)
+ {
+ printk("Aha1740 null SCtmp in interrupt (%x,%x,%x,%d)\n",
+ inb(G2STAT(base)),adapstat,
+ inb(G2INTST(base)), number_serviced++);
+ continue;
+ }
+ if (SCtmp->host_scribble)
+ scsi_free(SCtmp->host_scribble, 512);
+ /* Fetch the sense data, and tuck it away, in the required slot.
+ The Adaptec automatically fetches it, and there is no
+ guarantee that we will still have it in the cdb when we come
+ back */
+ if ( (adapstat & G2INTST_MASK) == G2INTST_CCBERROR )
+ {
+ memcpy(SCtmp->sense_buffer, ecbptr->sense,
+ sizeof(SCtmp->sense_buffer));
+ errstatus = aha1740_makecode(ecbptr->sense,ecbptr->status);
+ }
+ else
+ errstatus = 0;
+ DEB(if (errstatus) printk("aha1740_intr_handle: returning %6x\n",
+ errstatus));
+ SCtmp->result = errstatus;
+ my_done = ecbptr->done;
+ memset(ecbptr,0,sizeof(struct ecb));
+ if ( my_done )
+ my_done(SCtmp);
+ break;
+ case G2INTST_HARDFAIL:
+ printk(KERN_ALERT "aha1740 hardware failure!\n");
+ panic("aha1740.c"); /* Goodbye */
+ case G2INTST_ASNEVENT:
+ printk("aha1740 asynchronous event: %02x %02x %02x %02x %02x\n",
+ adapstat, inb(MBOXIN0(base)), inb(MBOXIN1(base)),
+ inb(MBOXIN2(base)), inb(MBOXIN3(base))); /* Say What? */
+ /* Host Ready -> Mailbox in complete */
+ outb(G2CNTRL_HRDY,G2CNTRL(base));
+ break;
+ case G2INTST_CMDGOOD:
+ /* set immediate command success flag here: */
+ break;
+ case G2INTST_CMDERROR:
+ /* Set immediate command failure flag here: */
+ break;
+ }
+ number_serviced++;
+ }
+}
+
+int aha1740_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
+{
+ unchar direction;
+ unchar *cmd = (unchar *) SCpnt->cmnd;
+ unchar target = SCpnt->target;
+ struct aha1740_hostdata *host = HOSTDATA(SCpnt->host);
+ unsigned long flags;
+ void *buff = SCpnt->request_buffer;
+ int bufflen = SCpnt->request_bufflen;
+ int ecbno;
+ DEB(int i);
+
+ if(*cmd == REQUEST_SENSE)
+ {
+ if (bufflen != sizeof(SCpnt->sense_buffer))
+ {
+ printk("Wrong buffer length supplied for request sense (%d)\n",
+ bufflen);
+ }
+ SCpnt->result = 0;
+ done(SCpnt);
+ return 0;
+ }
+
+#ifdef DEBUG
+ if (*cmd == READ_10 || *cmd == WRITE_10)
+ i = xscsi2int(cmd+2);
+ else if (*cmd == READ_6 || *cmd == WRITE_6)
+ i = scsi2int(cmd+2);
+ else
+ i = -1;
+ printk("aha1740_queuecommand: dev %d cmd %02x pos %d len %d ",
+ target, *cmd, i, bufflen);
+ printk("scsi cmd:");
+ for (i = 0; i < SCpnt->cmd_len; i++) printk("%02x ", cmd[i]);
+ printk("\n");
+#endif
+
+ /* locate an available ecb */
+ save_flags(flags);
+ cli();
+ ecbno = host->last_ecb_used + 1; /* An optimization */
+ if (ecbno >= AHA1740_ECBS)
+ ecbno = 0;
+ do {
+ if (!host->ecb[ecbno].cmdw)
+ break;
+ ecbno++;
+ if (ecbno >= AHA1740_ECBS)
+ ecbno = 0;
+ } while (ecbno != host->last_ecb_used);
+
+ if (host->ecb[ecbno].cmdw)
+ panic("Unable to find empty ecb for aha1740.\n");
+
+ host->ecb[ecbno].cmdw = AHA1740CMD_INIT; /* SCSI Initiator Command
+ doubles as reserved flag */
+
+ host->last_ecb_used = ecbno;
+ restore_flags(flags);
+
+#ifdef DEBUG
+ printk("Sending command (%d %x)...", ecbno, done);
+#endif
+
+ host->ecb[ecbno].cdblen = SCpnt->cmd_len; /* SCSI Command Descriptor Block Length */
+
+ direction = 0;
+ if (*cmd == READ_10 || *cmd == READ_6)
+ direction = 1;
+ else if (*cmd == WRITE_10 || *cmd == WRITE_6)
+ direction = 0;
+
+ memcpy(host->ecb[ecbno].cdb, cmd, SCpnt->cmd_len);
+
+ if (SCpnt->use_sg)
+ {
+ struct scatterlist * sgpnt;
+ struct aha1740_chain * cptr;
+ int i;
+ DEB(unsigned char * ptr);
+
+ host->ecb[ecbno].sg = 1; /* SCSI Initiator Command w/scatter-gather*/
+ SCpnt->host_scribble = (unsigned char *) scsi_malloc(512);
+ sgpnt = (struct scatterlist *) SCpnt->request_buffer;
+ cptr = (struct aha1740_chain *) SCpnt->host_scribble;
+ if (cptr == NULL) panic("aha1740.c: unable to allocate DMA memory\n");
+ for(i=0; i<SCpnt->use_sg; i++)
+ {
+ cptr[i].datalen = sgpnt[i].length;
+ cptr[i].dataptr = virt_to_bus(sgpnt[i].address);
+ }
+ host->ecb[ecbno].datalen = SCpnt->use_sg * sizeof(struct aha1740_chain);
+ host->ecb[ecbno].dataptr = virt_to_bus(cptr);
+#ifdef DEBUG
+ printk("cptr %x: ",cptr);
+ ptr = (unsigned char *) cptr;
+ for(i=0;i<24;i++) printk("%02x ", ptr[i]);
+#endif
+ }
+ else
+ {
+ SCpnt->host_scribble = NULL;
+ host->ecb[ecbno].datalen = bufflen;
+ host->ecb[ecbno].dataptr = virt_to_bus(buff);
+ }
+ host->ecb[ecbno].lun = SCpnt->lun;
+ host->ecb[ecbno].ses = 1; /* Suppress underrun errors */
+ host->ecb[ecbno].dir = direction;
+ host->ecb[ecbno].ars = 1; /* Yes, get the sense on an error */
+ host->ecb[ecbno].senselen = 12;
+ host->ecb[ecbno].senseptr = virt_to_bus(host->ecb[ecbno].sense);
+ host->ecb[ecbno].statusptr = virt_to_bus(host->ecb[ecbno].status);
+ host->ecb[ecbno].done = done;
+ host->ecb[ecbno].SCpnt = SCpnt;
+#ifdef DEBUG
+ {
+ int i;
+ printk("aha1740_command: sending.. ");
+ for (i = 0; i < sizeof(host->ecb[ecbno]) - 10; i++)
+ printk("%02x ", ((unchar *)&host->ecb[ecbno])[i]);
+ }
+ printk("\n");
+#endif
+ if (done)
+ { /* The Adaptec Spec says the card is so fast that the loops will
+ only be executed once in the code below. Even if this was true
+ with the fastest processors when the spec was written, it doesn't
+ seem to be true with todays fast processors. We print a warning
+ if the code is executed more often than LOOPCNT_WARN. If this
+ happens, it should be investigated. If the count reaches
+ LOOPCNT_MAX, we assume something is broken; since there is no
+ way to return an error (the return value is ignored by the
+ mid-level scsi layer) we have to panic (and maybe that's the
+ best thing we can do then anyhow). */
+
+#define LOOPCNT_WARN 10 /* excessive mbxout wait -> syslog-msg */
+#define LOOPCNT_MAX 1000000 /* mbxout deadlock -> panic() after ~ 2 sec. */
+ int loopcnt;
+ unsigned int base = SCpnt->host->io_port;
+ DEB(printk("aha1740[%d] critical section\n",ecbno));
+ save_flags(flags);
+ cli();
+ for (loopcnt = 0; ; loopcnt++) {
+ if (inb(G2STAT(base)) & G2STAT_MBXOUT) break;
+ if (loopcnt == LOOPCNT_WARN) {
+ printk("aha1740[%d]_mbxout wait!\n",ecbno);
+ cli(); /* printk may have done a sti()! */
+ }
+ if (loopcnt == LOOPCNT_MAX)
+ panic("aha1740.c: mbxout busy!\n");
+ }
+ outl(virt_to_bus(host->ecb + ecbno), MBOXOUT0(base));
+ for (loopcnt = 0; ; loopcnt++) {
+ if (! (inb(G2STAT(base)) & G2STAT_BUSY)) break;
+ if (loopcnt == LOOPCNT_WARN) {
+ printk("aha1740[%d]_attn wait!\n",ecbno);
+ cli();
+ }
+ if (loopcnt == LOOPCNT_MAX)
+ panic("aha1740.c: attn wait failed!\n");
+ }
+ outb(ATTN_START | (target & 7), ATTN(base)); /* Start it up */
+ restore_flags(flags);
+ DEB(printk("aha1740[%d] request queued.\n",ecbno));
+ }
+ else
+ printk(KERN_ALERT "aha1740_queuecommand: done can't be NULL\n");
+ return 0;
+}
+
+static void internal_done(Scsi_Cmnd * SCpnt)
+{
+ SCpnt->SCp.Status++;
+}
+
+int aha1740_command(Scsi_Cmnd * SCpnt)
+{
+ aha1740_queuecommand(SCpnt, internal_done);
+ SCpnt->SCp.Status = 0;
+ while (!SCpnt->SCp.Status)
+ barrier();
+ return SCpnt->result;
+}
+
+/* Query the board for its irq_level. Nothing else matters
+ in enhanced mode on an EISA bus. */
+
+void aha1740_getconfig(unsigned int base, unsigned int *irq_level,
+ unsigned int *translation)
+{
+ static int intab[] = { 9, 10, 11, 12, 0, 14, 15, 0 };
+
+ *irq_level = intab[inb(INTDEF(base)) & 0x7];
+ *translation = inb(RESV1(base)) & 0x1;
+ outb(inb(INTDEF(base)) | 0x10, INTDEF(base));
+}
+
+int aha1740_detect(Scsi_Host_Template * tpnt)
+{
+ int count = 0, slot;
+
+ DEB(printk("aha1740_detect: \n"));
+
+ for ( slot=MINEISA; slot <= MAXEISA; slot++ )
+ {
+ int slotbase;
+ unsigned int irq_level, translation;
+ struct Scsi_Host *shpnt;
+ struct aha1740_hostdata *host;
+ slotbase = SLOTBASE(slot);
+ /*
+ * The ioports for eisa boards are generally beyond that used in the
+ * check/allocate region code, but this may change at some point,
+ * so we go through the motions.
+ */
+ if (check_region(slotbase, SLOTSIZE)) /* See if in use */
+ continue;
+ if (!aha1740_test_port(slotbase))
+ continue;
+ aha1740_getconfig(slotbase,&irq_level,&translation);
+ if ((inb(G2STAT(slotbase)) &
+ (G2STAT_MBXOUT|G2STAT_BUSY)) != G2STAT_MBXOUT)
+ { /* If the card isn't ready, hard reset it */
+ outb(G2CNTRL_HRST, G2CNTRL(slotbase));
+ outb(0, G2CNTRL(slotbase));
+ }
+ printk("Configuring aha174x at IO:%x, IRQ %d\n", slotbase, irq_level);
+ printk("aha174x: Extended translation %sabled.\n",
+ translation ? "en" : "dis");
+ DEB(printk("aha1740_detect: enable interrupt channel %d\n",irq_level));
+ if (request_irq(irq_level,aha1740_intr_handle,0,"aha1740",NULL)) {
+ printk("Unable to allocate IRQ for adaptec controller.\n");
+ continue;
+ }
+ shpnt = scsi_register(tpnt, sizeof(struct aha1740_hostdata));
+ request_region(slotbase, SLOTSIZE, "aha1740");
+ shpnt->base = 0;
+ shpnt->io_port = slotbase;
+ shpnt->n_io_port = SLOTSIZE;
+ shpnt->irq = irq_level;
+ shpnt->dma_channel = 0xff;
+ host = HOSTDATA(shpnt);
+ host->slot = slot;
+ host->translation = translation;
+ aha_host[irq_level - 9] = shpnt;
+ count++;
+ }
+ return count;
+}
+
+/* Note: They following two functions do not apply very well to the Adaptec,
+ which basically manages its own affairs quite well without our interference,
+ so I haven't put anything into them. I can faintly imagine someone with a
+ *very* badly behaved SCSI target (perhaps an old tape?) wanting the abort(),
+ but it hasn't happened yet, and doing aborts brings the Adaptec to its
+ knees. I cannot (at this moment in time) think of any reason to reset the
+ card once it's running. So there. */
+
+int aha1740_abort(Scsi_Cmnd * SCpnt)
+{
+ DEB(printk("aha1740_abort called\n"));
+ return SCSI_ABORT_SNOOZE;
+}
+
+/* We do not implement a reset function here, but the upper level code assumes
+ that it will get some kind of response for the command in SCpnt. We must
+ oblige, or the command will hang the scsi system */
+
+int aha1740_reset(Scsi_Cmnd * SCpnt, unsigned int ignored)
+{
+ DEB(printk("aha1740_reset called\n"));
+ return SCSI_RESET_PUNT;
+}
+
+int aha1740_biosparam(Disk * disk, kdev_t dev, int* ip)
+{
+ int size = disk->capacity;
+ int extended = HOSTDATA(disk->device->host)->translation;
+
+ DEB(printk("aha1740_biosparam\n"));
+ if (extended && (ip[2] > 1024))
+ {
+ ip[0] = 255;
+ ip[1] = 63;
+ ip[2] = size / (255 * 63);
+ }
+ else
+ {
+ ip[0] = 64;
+ ip[1] = 32;
+ ip[2] = size >> 11;
+ }
+ return 0;
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = AHA1740;
+
+#include "scsi_module.c"
+#endif
+
+/* Okay, you made it all the way through. As of this writing, 3/31/93, I'm
+brad@saturn.gaylord.com or brad@bradpc.gaylord.com. I'll try to help as time
+permits if you have any trouble with this driver. Happy Linuxing! */
diff --git a/linux/src/drivers/scsi/aha1740.h b/linux/src/drivers/scsi/aha1740.h
new file mode 100644
index 0000000..478e59a
--- /dev/null
+++ b/linux/src/drivers/scsi/aha1740.h
@@ -0,0 +1,196 @@
+#ifndef _AHA1740_H
+
+/* $Id: aha1740.h,v 1.1 1999/04/26 05:54:13 tb Exp $
+ *
+ * Header file for the adaptec 1740 driver for Linux
+ *
+ * With minor revisions 3/31/93
+ * Written and (C) 1992,1993 Brad McLean. See aha1740.c
+ * for more info
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+
+/* Eisa Enhanced mode operation - slot locating and addressing */
+#define MINEISA 1 /* I don't have an EISA Spec to know these ranges, so I */
+#define MAXEISA 8 /* Just took my machine's specifications. Adjust to fit.*/
+ /* I just saw an ad, and bumped this from 6 to 8 */
+#define SLOTBASE(x) ((x << 12) + 0xc80)
+#define SLOTSIZE 0x5c
+
+/* EISA configuration registers & values */
+#define HID0(base) (base + 0x0)
+#define HID1(base) (base + 0x1)
+#define HID2(base) (base + 0x2)
+#define HID3(base) (base + 0x3)
+#define EBCNTRL(base) (base + 0x4)
+#define PORTADR(base) (base + 0x40)
+#define BIOSADR(base) (base + 0x41)
+#define INTDEF(base) (base + 0x42)
+#define SCSIDEF(base) (base + 0x43)
+#define BUSDEF(base) (base + 0x44)
+#define RESV0(base) (base + 0x45)
+#define RESV1(base) (base + 0x46)
+#define RESV2(base) (base + 0x47)
+
+#define HID_MFG "ADP"
+#define HID_PRD 0
+#define HID_REV 2
+#define EBCNTRL_VALUE 1
+#define PORTADDR_ENH 0x80
+/* READ */
+#define G2INTST(base) (base + 0x56)
+#define G2STAT(base) (base + 0x57)
+#define MBOXIN0(base) (base + 0x58)
+#define MBOXIN1(base) (base + 0x59)
+#define MBOXIN2(base) (base + 0x5a)
+#define MBOXIN3(base) (base + 0x5b)
+#define G2STAT2(base) (base + 0x5c)
+
+#define G2INTST_MASK 0xf0 /* isolate the status */
+#define G2INTST_CCBGOOD 0x10 /* CCB Completed */
+#define G2INTST_CCBRETRY 0x50 /* CCB Completed with a retry */
+#define G2INTST_HARDFAIL 0x70 /* Adapter Hardware Failure */
+#define G2INTST_CMDGOOD 0xa0 /* Immediate command success */
+#define G2INTST_CCBERROR 0xc0 /* CCB Completed with error */
+#define G2INTST_ASNEVENT 0xd0 /* Asynchronous Event Notification */
+#define G2INTST_CMDERROR 0xe0 /* Immediate command error */
+
+#define G2STAT_MBXOUT 4 /* Mailbox Out Empty Bit */
+#define G2STAT_INTPEND 2 /* Interrupt Pending Bit */
+#define G2STAT_BUSY 1 /* Busy Bit (attention pending) */
+
+#define G2STAT2_READY 0 /* Host Ready Bit */
+
+/* WRITE (and ReadBack) */
+#define MBOXOUT0(base) (base + 0x50)
+#define MBOXOUT1(base) (base + 0x51)
+#define MBOXOUT2(base) (base + 0x52)
+#define MBOXOUT3(base) (base + 0x53)
+#define ATTN(base) (base + 0x54)
+#define G2CNTRL(base) (base + 0x55)
+
+#define ATTN_IMMED 0x10 /* Immediate Command */
+#define ATTN_START 0x40 /* Start CCB */
+#define ATTN_ABORT 0x50 /* Abort CCB */
+
+#define G2CNTRL_HRST 0x80 /* Hard Reset */
+#define G2CNTRL_IRST 0x40 /* Clear EISA Interrupt */
+#define G2CNTRL_HRDY 0x20 /* Sets HOST ready */
+
+/* This is used with scatter-gather */
+struct aha1740_chain {
+ u32 dataptr; /* Location of data */
+ u32 datalen; /* Size of this part of chain */
+};
+
+/* These belong in scsi.h */
+#define any2scsi(up, p) \
+(up)[0] = (((unsigned long)(p)) >> 16) ; \
+(up)[1] = (((unsigned long)(p)) >> 8); \
+(up)[2] = ((unsigned long)(p));
+
+#define scsi2int(up) ( (((long)*(up)) << 16) + (((long)(up)[1]) << 8) + ((long)(up)[2]) )
+
+#define xany2scsi(up, p) \
+(up)[0] = ((long)(p)) >> 24; \
+(up)[1] = ((long)(p)) >> 16; \
+(up)[2] = ((long)(p)) >> 8; \
+(up)[3] = ((long)(p));
+
+#define xscsi2int(up) ( (((long)(up)[0]) << 24) + (((long)(up)[1]) << 16) \
+ + (((long)(up)[2]) << 8) + ((long)(up)[3]) )
+
+#define MAX_CDB 12
+#define MAX_SENSE 14
+#define MAX_STATUS 32
+
+struct ecb { /* Enhanced Control Block 6.1 */
+ u16 cmdw; /* Command Word */
+ /* Flag Word 1 */
+ u16 cne:1, /* Control Block Chaining */
+ :6, di:1, /* Disable Interrupt */
+ :2, ses:1, /* Suppress Underrun error */
+ :1, sg:1, /* Scatter/Gather */
+ :1, dsb:1, /* Disable Status Block */
+ ars:1; /* Automatic Request Sense */
+ /* Flag Word 2 */
+ u16 lun:3, /* Logical Unit */
+ tag:1, /* Tagged Queuing */
+ tt:2, /* Tag Type */
+ nd:1, /* No Disconnect */
+ :1, dat:1, /* Data transfer - check direction */
+ dir:1, /* Direction of transfer 1 = datain */
+ st:1, /* Suppress Transfer */
+ chk:1, /* Calculate Checksum */
+ :2, rec:1, :1; /* Error Recovery */
+ u16 nil0; /* nothing */
+ u32 dataptr; /* Data or Scatter List ptr */
+ u32 datalen; /* Data or Scatter List len */
+ u32 statusptr; /* Status Block ptr */
+ u32 linkptr; /* Chain Address */
+ u32 nil1; /* nothing */
+ u32 senseptr; /* Sense Info Pointer */
+ u8 senselen; /* Sense Length */
+ u8 cdblen; /* CDB Length */
+ u16 datacheck; /* Data checksum */
+ u8 cdb[MAX_CDB]; /* CDB area */
+/* Hardware defined portion ends here, rest is driver defined */
+ u8 sense[MAX_SENSE]; /* Sense area */
+ u8 status[MAX_STATUS]; /* Status area */
+ Scsi_Cmnd *SCpnt; /* Link to the SCSI Command Block */
+ void (*done)(Scsi_Cmnd *); /* Completion Function */
+};
+
+#define AHA1740CMD_NOP 0x00 /* No OP */
+#define AHA1740CMD_INIT 0x01 /* Initiator SCSI Command */
+#define AHA1740CMD_DIAG 0x05 /* Run Diagnostic Command */
+#define AHA1740CMD_SCSI 0x06 /* Initialize SCSI */
+#define AHA1740CMD_SENSE 0x08 /* Read Sense Information */
+#define AHA1740CMD_DOWN 0x09 /* Download Firmware (yeah, I bet!) */
+#define AHA1740CMD_RINQ 0x0a /* Read Host Adapter Inquiry Data */
+#define AHA1740CMD_TARG 0x10 /* Target SCSI Command */
+
+int aha1740_detect(Scsi_Host_Template *);
+int aha1740_command(Scsi_Cmnd *);
+int aha1740_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int aha1740_abort(Scsi_Cmnd *);
+int aha1740_reset(Scsi_Cmnd *, unsigned int);
+int aha1740_biosparam(Disk *, kdev_t, int*);
+int aha1740_proc_info(char *buffer, char **start, off_t offset,
+ int length, int hostno, int inout);
+
+#define AHA1740_ECBS 32
+#define AHA1740_SCATTER 16
+#define AHA1740_CMDLUN 1
+
+#ifndef NULL
+ #define NULL 0
+#endif
+
+extern struct proc_dir_entry proc_scsi_aha1740;
+
+#define AHA1740 {NULL, NULL, \
+ &proc_scsi_aha1740, \
+ aha1740_proc_info, \
+ "Adaptec 174x (EISA)", \
+ aha1740_detect, \
+ NULL, \
+ NULL, \
+ aha1740_command, \
+ aha1740_queuecommand, \
+ aha1740_abort, \
+ aha1740_reset, \
+ NULL, \
+ aha1740_biosparam, \
+ AHA1740_ECBS, \
+ 7, \
+ AHA1740_SCATTER, \
+ AHA1740_CMDLUN, \
+ 0, \
+ 0, \
+ ENABLE_CLUSTERING}
+
+#endif
diff --git a/linux/src/drivers/scsi/aic7xxx.c b/linux/src/drivers/scsi/aic7xxx.c
new file mode 100644
index 0000000..93bed41
--- /dev/null
+++ b/linux/src/drivers/scsi/aic7xxx.c
@@ -0,0 +1,11404 @@
+/*+M*************************************************************************
+ * Adaptec AIC7xxx device driver for Linux.
+ *
+ * Copyright (c) 1994 John Aycock
+ * The University of Calgary Department of Computer Science.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Sources include the Adaptec 1740 driver (aha1740.c), the Ultrastor 24F
+ * driver (ultrastor.c), various Linux kernel source, the Adaptec EISA
+ * config file (!adp7771.cfg), the Adaptec AHA-2740A Series User's Guide,
+ * the Linux Kernel Hacker's Guide, Writing a SCSI Device Driver for Linux,
+ * the Adaptec 1542 driver (aha1542.c), the Adaptec EISA overlay file
+ * (adp7770.ovl), the Adaptec AHA-2740 Series Technical Reference Manual,
+ * the Adaptec AIC-7770 Data Book, the ANSI SCSI specification, the
+ * ANSI SCSI-2 specification (draft 10c), ...
+ *
+ * --------------------------------------------------------------------------
+ *
+ * Modifications by Daniel M. Eischen (deischen@iworks.InterWorks.org):
+ *
+ * Substantially modified to include support for wide and twin bus
+ * adapters, DMAing of SCBs, tagged queueing, IRQ sharing, bug fixes,
+ * SCB paging, and other rework of the code.
+ *
+ * Parts of this driver were also based on the FreeBSD driver by
+ * Justin T. Gibbs. His copyright follows:
+ *
+ * --------------------------------------------------------------------------
+ * Copyright (c) 1994-1997 Justin Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * Where this Software is combined with software released under the terms of
+ * the GNU Public License ("GPL") and the terms of the GPL would require the
+ * combined work to also be released under the terms of the GPL, the terms
+ * and conditions of this License will apply in addition to those of the
+ * GPL with the exception of any terms or conditions of this License that
+ * conflict with, or are expressly prohibited by, the GPL.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: aic7xxx.c,v 1.1.4.1 2004/01/16 22:41:26 roland Exp $
+ *---------------------------------------------------------------------------
+ *
+ * Thanks also go to (in alphabetical order) the following:
+ *
+ * Rory Bolt - Sequencer bug fixes
+ * Jay Estabrook - Initial DEC Alpha support
+ * Doug Ledford - Much needed abort/reset bug fixes
+ * Kai Makisara - DMAing of SCBs
+ *
+ * A Boot time option was also added for not resetting the scsi bus.
+ *
+ * Form: aic7xxx=extended
+ * aic7xxx=no_reset
+ * aic7xxx=ultra
+ * aic7xxx=irq_trigger:[0,1] # 0 edge, 1 level
+ * aic7xxx=verbose
+ *
+ * Daniel M. Eischen, deischen@iworks.InterWorks.org, 1/23/97
+ *
+ *-M*************************************************************************/
+
+/*+M**************************************************************************
+ *
+ * Further driver modifications made by Doug Ledford <dledford@redhat.com>
+ *
+ * Copyright (c) 1997-1998 Doug Ledford
+ *
+ * These changes are released under the same licensing terms as the FreeBSD
+ * driver written by Justin Gibbs. Please see his Copyright notice above
+ * for the exact terms and conditions covering my changes as well as the
+ * warranty statement.
+ *
+ * Modifications made to the aic7xxx.c,v 4.1 driver from Dan Eischen include
+ * but are not limited to:
+ *
+ * 1: Import of the latest FreeBSD sequencer code for this driver
+ * 2: Modification of kernel code to accomodate different sequencer semantics
+ * 3: Extensive changes throughout kernel portion of driver to improve
+ * abort/reset processing and error hanndling
+ * 4: Other work contributed by various people on the Internet
+ * 5: Changes to printk information and verbosity selection code
+ * 6: General reliability related changes, especially in IRQ management
+ * 7: Modifications to the default probe/attach order for supported cards
+ * 8: SMP friendliness has been improved
+ *
+ * Overall, this driver represents a significant departure from the official
+ * aic7xxx driver released by Dan Eischen in two ways. First, in the code
+ * itself. A diff between the two version of the driver is now a several
+ * thousand line diff. Second, in approach to solving the same problem. The
+ * problem is importing the FreeBSD aic7xxx driver code to linux can be a
+ * difficult and time consuming process, that also can be error prone. Dan
+ * Eischen's official driver uses the approach that the linux and FreeBSD
+ * drivers should be as identical as possible. To that end, his next version
+ * of this driver will be using a mid-layer code library that he is developing
+ * to moderate communications between the linux mid-level SCSI code and the
+ * low level FreeBSD driver. He intends to be able to essentially drop the
+ * FreeBSD driver into the linux kernel with only a few minor tweaks to some
+ * include files and the like and get things working, making for fast easy
+ * imports of the FreeBSD code into linux.
+ *
+ * I disagree with Dan's approach. Not that I don't think his way of doing
+ * things would be nice, easy to maintain, and create a more uniform driver
+ * between FreeBSD and Linux. I have no objection to those issues. My
+ * disagreement is on the needed functionality. There simply are certain
+ * things that are done differently in FreeBSD than linux that will cause
+ * problems for this driver regardless of any middle ware Dan implements.
+ * The biggest example of this at the moment is interrupt semantics. Linux
+ * doesn't provide the same protection techniques as FreeBSD does, nor can
+ * they be easily implemented in any middle ware code since they would truly
+ * belong in the kernel proper and would effect all drivers. For the time
+ * being, I see issues such as these as major stumbling blocks to the
+ * reliability of code based upon such middle ware. Therefore, I choose to
+ * use a different approach to importing the FreeBSD code that doesn't
+ * involve any middle ware type code. My approach is to import the sequencer
+ * code from FreeBSD wholesale. Then, to only make changes in the kernel
+ * portion of the driver as they are needed for the new sequencer semantics.
+ * In this way, the portion of the driver that speaks to the rest of the
+ * linux kernel is fairly static and can be changed/modified to solve
+ * any problems one might encounter without concern for the FreeBSD driver.
+ *
+ * Note: If time and experience should prove me wrong that the middle ware
+ * code Dan writes is reliable in its operation, then I'll retract my above
+ * statements. But, for those that don't know, I'm from Missouri (in the US)
+ * and our state motto is "The Show-Me State". Well, before I will put
+ * faith into it, you'll have to show me that it works :)
+ *
+ *_M*************************************************************************/
+
+/*
+ * The next three defines are user configurable. These should be the only
+ * defines a user might need to get in here and change. There are other
+ * defines buried deeper in the code, but those really shouldn't need touched
+ * under normal conditions.
+ */
+
+/*
+ * AIC7XXX_FAKE_NEGOTIATION_CMDS
+ * We now have two distinctly different methods of device negotiation
+ * in this code. The two methods are selected by either defining or not
+ * defining this option. The difference is as follows:
+ *
+ * With AIC7XXX_FAKE_NEGOTIATION_CMDS not set (commented out)
+ * When the driver is in need of issuing a negotiation command for any
+ * given device, it will add the negotiation message on to part of a
+ * regular SCSI command for the device. In the process, if the device
+ * is configured for and using tagged queueing, then the code will
+ * also issue that single command as a non-tagged command, attach the
+ * negotiation message to that one command, and use a temporary
+ * queue depth of one to keep the untagged and tagged commands from
+ * overlapping.
+ * Pros: This doesn't use any extra SCB structures, it's simple, it
+ * works most of the time (if not all of the time now), and
+ * since we get the device capability info frmo the INQUIRY data
+ * now, shouldn't cause any problems.
+ * Cons: When we need to send a negotiation command to a device, we
+ * must use a command that is being sent to LUN 0 of the device.
+ * If we try sending one to high LUN numbers, then some devices
+ * get noticeably upset. Since we have to wait for a command with
+ * LUN == 0 to come along, we may not be able to renegotiate when
+ * we want if the user is actually using say LUN 1 of a CD Changer
+ * instead of using LUN 0 for an extended period of time.
+ *
+ * With AIC7XXX_FAKE_NEGOTIATION_CMDS defined
+ * When we need to negotiate with a device, instead of attaching our
+ * negotiation message to an existing command, we insert our own
+ * fictional Scsi_Cmnd into the chain that has the negotiation message
+ * attached to it. We send this one command as untagged regardless
+ * of the device type, and we fiddle with the queue depth the same as
+ * we would with the option unset to avoid overlapping commands. The
+ * primary difference between this and the unset option is that the
+ * negotiation message is no longer attached to a specific command,
+ * instead it is its own command and is merely triggered by a
+ * combination of both A) We need to negotiate and B) The mid level
+ * SCSI code has sent us a command. We still don't do any negotiation
+ * unless there is a valid SCSI command to be processed.
+ * Pros: This fixes the problem above in the Cons section. Since we
+ * issue our own fake command, we can set the LUN to 0 regardless
+ * of what the LUN is in the real command. It also means that if
+ * the device get's nasty over negotiation issues, it won't be
+ * showing up on a regular command, so we won't get any SENSE buffer
+ * data or STATUS_BYTE returns to the mid level code that are caused
+ * by snits in the negotiation code.
+ * Cons: We add more code, and more complexity. This means more ways
+ * in which things could break. It means a larger driver. It means
+ * more resource consumption for the fake commands. However, the
+ * biggest problem is this. Take a system where there is a CD-ROM
+ * on the SCSI bus. Someone has a CD in the CD-ROM and is using it.
+ * For some reason the SCSI bus gets reset. We don't touch the
+ * CD-ROM again for quite a period of time (so we don't renegotiate
+ * after the reset until we do touch the CD-ROM again). In the
+ * time while we aren't using the CD-ROM, the current disc is
+ * removed and a new one put in. When we go to check that disc, we
+ * will first have to renegotiate. In so doing, we issue our fake
+ * SCSI command, which happens to be TEST_UNIT_READY. The CD-ROM
+ * negotiates with us, then responds to our fake command with a
+ * CHECK_CONDITION status. We REQUEST_SENSE from the CD-ROM, it
+ * then sends the SENSE data to our fake command to tell it that
+ * it has been through a disc change. There, now we've cleared out
+ * the SENSE data along with our negotiation command, and when the
+ * real command executes, it won't pick up that the CD was changed.
+ * That's the biggest Con to this approach. In the future, I could
+ * probably code around this problem though, so this option is still
+ * viable.
+ *
+ * So, which command style should you use? I would appreciate it if people
+ * could try out both types. I want to know about any cases where one
+ * method works and the other doesn't. If one method works on significantly
+ * more systems than another, then it will become the default. If the second
+ * option turns out to work best, then I'll find a way to work around that
+ * big con I listed.
+ *
+ * -- July 7, 02:33
+ * OK...I just added some code that should make the Con listed for the
+ * fake commands a non issue now. However, it needs testing. For now,
+ * I'm going to make the default to use the fake commands, we'll see how
+ * it goes.
+ */
+
+#define AIC7XXX_FAKE_NEGOTIATION_CMDS
+
+/*
+ * AIC7XXX_STRICT_PCI_SETUP
+ * Should we assume the PCI config options on our controllers are set with
+ * sane and proper values, or should we be anal about our PCI config
+ * registers and force them to what we want? The main advantage to
+ * defining this option is on non-Intel hardware where the BIOS may not
+ * have been run to set things up, or if you have one of the BIOSless
+ * Adaptec controllers, such as a 2910, that don't get set up by the
+ * BIOS. However, keep in mind that we really do set the most important
+ * items in the driver regardless of this setting, this only controls some
+ * of the more esoteric PCI options on these cards. In that sense, I
+ * would default to leaving this off. However, if people wish to try
+ * things both ways, that would also help me to know if there are some
+ * machines where it works one way but not another.
+ *
+ * -- July 7, 17:09
+ * OK...I need this on my machine for testing, so the default is to
+ * leave it defined.
+ *
+ * -- July 7, 18:49
+ * I needed it for testing, but it didn't make any difference, so back
+ * off she goes.
+ *
+ * -- July 16, 23:04
+ * I turned it back on to try and compensate for the 2.1.x PCI code
+ * which no longer relies solely on the BIOS and now tries to set
+ * things itself.
+ */
+
+#define AIC7XXX_STRICT_PCI_SETUP
+
+/*
+ * AIC7XXX_VERBOSE_DEBUGGING
+ * This option enables a lot of extra printk();s in the code, surrounded
+ * by if (aic7xxx_verbose ...) statements. Executing all of those if
+ * statements and the extra checks can get to where it actually does have
+ * an impact on CPU usage and such, as well as code size. Disabling this
+ * define will keep some of those from becoming part of the code.
+ *
+ * NOTE: Currently, this option has no real effect, I will be adding the
+ * various #ifdef's in the code later when I've decided a section is
+ * complete and no longer needs debugging. OK...a lot of things are now
+ * surrounded by this define, so turning this off does have an impact.
+ */
+
+/*
+ * #define AIC7XXX_VERBOSE_DEBUGGING
+ */
+
+#if defined(MODULE) || defined(PCMCIA)
+#include <linux/module.h>
+#endif
+
+#if defined(PCMCIA)
+# undef MODULE
+#endif
+
+#include <stdarg.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+#include <linux/version.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/blk.h>
+#include <linux/tqueue.h>
+#include <linux/tasks.h>
+#include "sd.h"
+#include "scsi.h"
+#include "hosts.h"
+#include "aic7xxx.h"
+
+#include "aic7xxx/sequencer.h"
+#include "aic7xxx/scsi_message.h"
+#include "aic7xxx_reg.h"
+#include <scsi/scsicam.h>
+
+#include <linux/stat.h>
+#include <linux/malloc.h> /* for kmalloc() */
+
+#include <linux/config.h> /* for CONFIG_PCI */
+
+/*
+ * To generate the correct addresses for the controller to issue
+ * on the bus. Originally added for DEC Alpha support.
+ */
+#define VIRT_TO_BUS(a) (unsigned int)virt_to_bus((void *)(a))
+
+struct proc_dir_entry proc_scsi_aic7xxx = {
+ PROC_SCSI_AIC7XXX, 7, "aic7xxx",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2,
+ 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL
+};
+
+#define AIC7XXX_C_VERSION "5.1.13"
+
+#define NUMBER(arr) (sizeof(arr) / sizeof(arr[0]))
+#define MIN(a,b) (((a) < (b)) ? (a) : (b))
+#define MAX(a,b) (((a) > (b)) ? (a) : (b))
+#define ALL_TARGETS -1
+#define ALL_CHANNELS -1
+#define ALL_LUNS -1
+#define MAX_TARGETS 16
+#define MAX_LUNS 8
+#ifndef TRUE
+# define TRUE 1
+#endif
+#ifndef FALSE
+# define FALSE 0
+#endif
+
+#ifndef KERNEL_VERSION
+# define KERNEL_VERSION(x,y,z) (((x)<<16)+((y)<<8)+(z))
+#endif
+
+/*
+ * We need the bios32.h file if we are kernel version 2.1.92 or less. The
+ * full set of pci_* changes wasn't in place until 2.1.93
+ */
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,1,92)
+# if defined(__sparc_v9__) || defined(__powerpc__)
+# error "PPC and Sparc platforms are only support under 2.1.92 and above"
+# endif
+# include <linux/bios32.h>
+#endif
+
+#if defined(__powerpc__)
+# define MMAPIO
+# ifdef mb
+# undef mb
+# endif
+# define mb() \
+ __asm__ __volatile__("eieio" ::: "memory")
+#elif defined(__i386__)
+# define MMAPIO
+# ifdef mb
+# undef mb
+# endif
+# define mb() \
+ __asm__ __volatile__("lock ; addl $0,0(%%esp)": : :"memory")
+#elif defined(__alpha__)
+# ifdef mb
+# undef mb
+# endif
+# define mb() \
+ __asm__ __volatile__("mb": : :"memory")
+#endif
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,0)
+# include <asm/spinlock.h>
+# include <linux/smp.h>
+# define cpuid smp_processor_id()
+# if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
+# define DRIVER_LOCK_INIT \
+ spin_lock_init(&p->spin_lock);
+# define DRIVER_LOCK \
+ if(!p->cpu_lock_count[cpuid]) { \
+ spin_lock_irqsave(&p->spin_lock, cpu_flags); \
+ p->cpu_lock_count[cpuid]++; \
+ } else { \
+ p->cpu_lock_count[cpuid]++; \
+ }
+# define DRIVER_UNLOCK \
+ if(--p->cpu_lock_count[cpuid] == 0) \
+ spin_unlock_irqrestore(&p->spin_lock, cpu_flags);
+# else
+# define DRIVER_LOCK_INIT
+# define DRIVER_LOCK
+# define DRIVER_UNLOCK
+# endif
+#else
+# define cpuid 0
+# define DRIVER_LOCK_INIT
+# define DRIVER_LOCK \
+ save_flags(cpu_flags); \
+ cli();
+# define DRIVER_UNLOCK \
+ restore_flags(cpu_flags);
+# define le32_to_cpu(x) (x)
+# define cpu_to_le32(x) (x)
+#endif
+
+/*
+ * You can try raising me if tagged queueing is enabled, or lowering
+ * me if you only have 4 SCBs.
+ */
+#ifdef CONFIG_AIC7XXX_CMDS_PER_DEVICE
+#define AIC7XXX_CMDS_PER_DEVICE CONFIG_AIC7XXX_CMDS_PER_DEVICE
+#else
+#define AIC7XXX_CMDS_PER_DEVICE 8
+#endif
+
+/* Set this to the delay in seconds after SCSI bus reset. */
+#ifdef CONFIG_AIC7XXX_RESET_DELAY
+#define AIC7XXX_RESET_DELAY CONFIG_AIC7XXX_RESET_DELAY
+#else
+#define AIC7XXX_RESET_DELAY 5
+#endif
+
+/*
+ * Control collection of SCSI transfer statistics for the /proc filesystem.
+ *
+ * NOTE: Do NOT enable this when running on kernels version 1.2.x and below.
+ * NOTE: This does affect performance since it has to maintain statistics.
+ */
+#ifdef CONFIG_AIC7XXX_PROC_STATS
+#define AIC7XXX_PROC_STATS
+#endif
+
+/*
+ * NOTE: Uncommenting the define below no longer has any effect, the
+ * tagged queue value array is always active now. I've added
+ * a setup option to set this particular array and I'm hoping
+ * insmod will be smart enough to set it properly as well. It's
+ * by use of this array that a person can enable tagged queueing.
+ * The DEFAULT_TAG_COMMANDS define has been changed to disable
+ * tagged queueing by default, so if your devices can handle tagged
+ * queueing you will need to add a line to their lilo.conf file like:
+ * append="aic7xxx=verbose,tag_info:{{32,32,32,32},{32,32,32,32}}"
+ * which will result in the first four devices on the first two
+ * controllers being set to a tagged queue depth of 32.
+ *
+ * Set this for defining the number of tagged commands on a device
+ * by device, and controller by controller basis. The first set
+ * of tagged commands will be used for the first detected aic7xxx
+ * controller, the second set will be used for the second detected
+ * aic7xxx controller, and so on. These values will *only* be used
+ * for targets that are tagged queueing capable; these values will
+ * be ignored in all other cases. The tag_commands is an array of
+ * 16 to allow for wide and twin adapters. Twin adapters will use
+ * indexes 0-7 for channel 0, and indexes 8-15 for channel 1.
+ *
+ * *** Determining commands per LUN ***
+ *
+ * When AIC7XXX_CMDS_PER_DEVICE is not defined, the driver will use its
+ * own algorithm to determine the commands/LUN. If SCB paging is
+ * enabled, which is always now, the default is 8 commands per lun
+ * that indicates it supports tagged queueing. All non-tagged devices
+ * use an internal queue depth of 3, with no more than one of those
+ * three commands active at one time.
+ */
+/* #define AIC7XXX_TAGGED_QUEUEING_BY_DEVICE */
+
+typedef struct
+{
+ unsigned char tag_commands[16]; /* Allow for wide/twin adapters. */
+} adapter_tag_info_t;
+
+/*
+ * Make a define that will tell the driver not to use tagged queueing
+ * by default.
+ */
+#ifdef CONFIG_AIC7XXX_TCQ_ON_BY_DEFAULT
+#define DEFAULT_TAG_COMMANDS {0, 0, 0, 0, 0, 0, 0, 0,\
+ 0, 0, 0, 0, 0, 0, 0, 0}
+#else
+#define DEFAULT_TAG_COMMANDS {255, 255, 255, 255, 255, 255, 255, 255,\
+ 255, 255, 255, 255, 255, 255, 255, 255}
+#endif
+
+/*
+ * Modify this as you see fit for your system. By setting tag_commands
+ * to 0, the driver will use it's own algorithm for determining the
+ * number of commands to use (see above). When 255, the driver will
+ * not enable tagged queueing for that particular device. When positive
+ * (> 0) and (< 255) the values in the array are used for the queue_depth.
+ * Note that the maximum value for an entry is 254, but you're insane if
+ * you try to use that many commands on one device.
+ *
+ * In this example, the first line will disable tagged queueing for all
+ * the devices on the first probed aic7xxx adapter.
+ *
+ * The second line enables tagged queueing with 4 commands/LUN for IDs
+ * (1, 2-11, 13-15), disables tagged queueing for ID 12, and tells the
+ * driver to use its own algorithm for ID 1.
+ *
+ * The third line is the same as the first line.
+ *
+ * The fourth line disables tagged queueing for devices 0 and 3. It
+ * enables tagged queueing for the other IDs, with 16 commands/LUN
+ * for IDs 1 and 4, 127 commands/LUN for ID 8, and 4 commands/LUN for
+ * IDs 2, 5-7, and 9-15.
+ */
+
+/*
+ * NOTE: The below structure is for reference only, the actual structure
+ * to modify in order to change things is located around line
+ * number 1305
+adapter_tag_info_t aic7xxx_tag_info[] =
+{
+ {DEFAULT_TAG_COMMANDS},
+ {{4, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 255, 4, 4, 4}},
+ {DEFAULT_TAG_COMMANDS},
+ {{255, 16, 4, 255, 16, 4, 4, 4, 127, 4, 4, 4, 4, 4, 4, 4}}
+};
+*/
+
+static adapter_tag_info_t aic7xxx_tag_info[] =
+{
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS},
+ {DEFAULT_TAG_COMMANDS}
+};
+
+
+/*
+ * Define an array of board names that can be indexed by aha_type.
+ * Don't forget to change this when changing the types!
+ */
+static const char *board_names[] = {
+ "AIC-7xxx Unknown", /* AIC_NONE */
+ "Adaptec AIC-7810 Hardware RAID Controller", /* AIC_7810 */
+ "Adaptec AIC-7770 SCSI host adapter", /* AIC_7770 */
+ "Adaptec AHA-274X SCSI host adapter", /* AIC_7771 */
+ "Adaptec AHA-284X SCSI host adapter", /* AIC_284x */
+ "Adaptec AIC-7850 SCSI host adapter", /* AIC_7850 */
+ "Adaptec AIC-7855 SCSI host adapter", /* AIC_7855 */
+ "Adaptec AIC-7860 Ultra SCSI host adapter", /* AIC_7860 */
+ "Adaptec AHA-2940A Ultra SCSI host adapter", /* AIC_7861 */
+ "Adaptec AIC-7870 SCSI host adapter", /* AIC_7870 */
+ "Adaptec AHA-294X SCSI host adapter", /* AIC_7871 */
+ "Adaptec AHA-394X SCSI host adapter", /* AIC_7872 */
+ "Adaptec AHA-398X SCSI host adapter", /* AIC_7873 */
+ "Adaptec AHA-2944 SCSI host adapter", /* AIC_7874 */
+ "Adaptec AIC-7880 Ultra SCSI host adapter", /* AIC_7880 */
+ "Adaptec AHA-294X Ultra SCSI host adapter", /* AIC_7881 */
+ "Adaptec AHA-394X Ultra SCSI host adapter", /* AIC_7882 */
+ "Adaptec AHA-398X Ultra SCSI host adapter", /* AIC_7883 */
+ "Adaptec AHA-2944 Ultra SCSI host adapter", /* AIC_7884 */
+ "Adaptec AIC-7895 Ultra SCSI host adapter", /* AIC_7895 */
+ "Adaptec AIC-7890/1 Ultra2 SCSI host adapter", /* AIC_7890 */
+ "Adaptec AHA-293X Ultra2 SCSI host adapter", /* AIC_7890 */
+ "Adaptec AHA-294X Ultra2 SCSI host adapter", /* AIC_7890 */
+ "Adaptec AIC-7896/7 Ultra2 SCSI host adapter", /* AIC_7896 */
+ "Adaptec AHA-394X Ultra2 SCSI host adapter", /* AIC_7897 */
+ "Adaptec AHA-395X Ultra2 SCSI host adapter", /* AIC_7897 */
+ "Adaptec PCMCIA SCSI controller", /* card bus stuff */
+ "Adaptec AIC-7892 Ultra 160/m SCSI host adapter", /* AIC_7892 */
+ "Adaptec AIC-7899 Ultra 160/m SCSI host adapter", /* AIC_7899 */
+};
+
+/*
+ * There should be a specific return value for this in scsi.h, but
+ * it seems that most drivers ignore it.
+ */
+#define DID_UNDERFLOW DID_ERROR
+
+/*
+ * What we want to do is have the higher level scsi driver requeue
+ * the command to us. There is no specific driver status for this
+ * condition, but the higher level scsi driver will requeue the
+ * command on a DID_BUS_BUSY error.
+ *
+ * Upon further inspection and testing, it seems that DID_BUS_BUSY
+ * will *always* retry the command. We can get into an infinite loop
+ * if this happens when we really want some sort of counter that
+ * will automatically abort/reset the command after so many retries.
+ * Using DID_ERROR will do just that. (Made by a suggestion by
+ * Doug Ledford 8/1/96)
+ */
+#define DID_RETRY_COMMAND DID_ERROR
+
+#define HSCSIID 0x07
+#define SCSI_RESET 0x040
+
+/*
+ * EISA/VL-bus stuff
+ */
+#define MINSLOT 1
+#define MAXSLOT 15
+#define SLOTBASE(x) ((x) << 12)
+#define BASE_TO_SLOT(x) ((x) >> 12)
+
+/*
+ * Standard EISA Host ID regs (Offset from slot base)
+ */
+#define AHC_HID0 0x80 /* 0,1: msb of ID2, 2-7: ID1 */
+#define AHC_HID1 0x81 /* 0-4: ID3, 5-7: LSB ID2 */
+#define AHC_HID2 0x82 /* product */
+#define AHC_HID3 0x83 /* firmware revision */
+
+/*
+ * AIC-7770 I/O range to reserve for a card
+ */
+#define MINREG 0xC00
+#define MAXREG 0xCBF
+
+#define INTDEF 0x5C /* Interrupt Definition Register */
+
+/*
+ * AIC-78X0 PCI registers
+ */
+#define CLASS_PROGIF_REVID 0x08
+#define DEVREVID 0x000000FFul
+#define PROGINFC 0x0000FF00ul
+#define SUBCLASS 0x00FF0000ul
+#define BASECLASS 0xFF000000ul
+
+#define CSIZE_LATTIME 0x0C
+#define CACHESIZE 0x0000003Ful /* only 5 bits */
+#define LATTIME 0x0000FF00ul
+
+#define DEVCONFIG 0x40
+#define SCBSIZE32 0x00010000ul /* aic789X only */
+#define MPORTMODE 0x00000400ul /* aic7870 only */
+#define RAMPSM 0x00000200ul /* aic7870 only */
+#define RAMPSM_ULTRA2 0x00000004
+#define VOLSENSE 0x00000100ul
+#define SCBRAMSEL 0x00000080ul
+#define SCBRAMSEL_ULTRA2 0x00000008
+#define MRDCEN 0x00000040ul
+#define EXTSCBTIME 0x00000020ul /* aic7870 only */
+#define EXTSCBPEN 0x00000010ul /* aic7870 only */
+#define BERREN 0x00000008ul
+#define DACEN 0x00000004ul
+#define STPWLEVEL 0x00000002ul
+#define DIFACTNEGEN 0x00000001ul /* aic7870 only */
+
+#define SCAMCTL 0x1a /* Ultra2 only */
+#define CCSCBBADDR 0xf0 /* aic7895/6/7 */
+
+/*
+ * Define the different types of SEEPROMs on aic7xxx adapters
+ * and make it also represent the address size used in accessing
+ * its registers. The 93C46 chips have 1024 bits organized into
+ * 64 16-bit words, while the 93C56 chips have 2048 bits organized
+ * into 128 16-bit words. The C46 chips use 6 bits to address
+ * each word, while the C56 and C66 (4096 bits) use 8 bits to
+ * address each word.
+ */
+typedef enum {C46 = 6, C56_66 = 8} seeprom_chip_type;
+
+/*
+ *
+ * Define the format of the SEEPROM registers (16 bits).
+ *
+ */
+struct seeprom_config {
+
+/*
+ * SCSI ID Configuration Flags
+ */
+#define CFXFER 0x0007 /* synchronous transfer rate */
+#define CFSYNCH 0x0008 /* enable synchronous transfer */
+#define CFDISC 0x0010 /* enable disconnection */
+#define CFWIDEB 0x0020 /* wide bus device (wide card) */
+#define CFSYNCHISULTRA 0x0040 /* CFSYNC is an ultra offset */
+#define CFNEWULTRAFORMAT 0x0080 /* Use the Ultra2 SEEPROM format */
+#define CFSTART 0x0100 /* send start unit SCSI command */
+#define CFINCBIOS 0x0200 /* include in BIOS scan */
+#define CFRNFOUND 0x0400 /* report even if not found */
+#define CFMULTILUN 0x0800 /* probe mult luns in BIOS scan */
+#define CFWBCACHEYES 0x4000 /* Enable W-Behind Cache on drive */
+#define CFWBCACHENC 0xc000 /* Don't change W-Behind Cache */
+/* UNUSED 0x3000 */
+ unsigned short device_flags[16]; /* words 0-15 */
+
+/*
+ * BIOS Control Bits
+ */
+#define CFSUPREM 0x0001 /* support all removable drives */
+#define CFSUPREMB 0x0002 /* support removable drives for boot only */
+#define CFBIOSEN 0x0004 /* BIOS enabled */
+/* UNUSED 0x0008 */
+#define CFSM2DRV 0x0010 /* support more than two drives */
+#define CF284XEXTEND 0x0020 /* extended translation (284x cards) */
+/* UNUSED 0x0040 */
+#define CFEXTEND 0x0080 /* extended translation enabled */
+/* UNUSED 0xFF00 */
+ unsigned short bios_control; /* word 16 */
+
+/*
+ * Host Adapter Control Bits
+ */
+#define CFAUTOTERM 0x0001 /* Perform Auto termination */
+#define CFULTRAEN 0x0002 /* Ultra SCSI speed enable (Ultra cards) */
+#define CF284XSELTO 0x0003 /* Selection timeout (284x cards) */
+#define CF284XFIFO 0x000C /* FIFO Threshold (284x cards) */
+#define CFSTERM 0x0004 /* SCSI low byte termination */
+#define CFWSTERM 0x0008 /* SCSI high byte termination (wide card) */
+#define CFSPARITY 0x0010 /* SCSI parity */
+#define CF284XSTERM 0x0020 /* SCSI low byte termination (284x cards) */
+#define CFRESETB 0x0040 /* reset SCSI bus at boot */
+#define CFBPRIMARY 0x0100 /* Channel B primary on 7895 chipsets */
+#define CFSEAUTOTERM 0x0400 /* aic7890 Perform SE Auto Term */
+#define CFLVDSTERM 0x0800 /* aic7890 LVD Termination */
+/* UNUSED 0xF280 */
+ unsigned short adapter_control; /* word 17 */
+
+/*
+ * Bus Release, Host Adapter ID
+ */
+#define CFSCSIID 0x000F /* host adapter SCSI ID */
+/* UNUSED 0x00F0 */
+#define CFBRTIME 0xFF00 /* bus release time */
+ unsigned short brtime_id; /* word 18 */
+
+/*
+ * Maximum targets
+ */
+#define CFMAXTARG 0x00FF /* maximum targets */
+/* UNUSED 0xFF00 */
+ unsigned short max_targets; /* word 19 */
+
+ unsigned short res_1[11]; /* words 20-30 */
+ unsigned short checksum; /* word 31 */
+};
+
+#define SELBUS_MASK 0x0a
+#define SELNARROW 0x00
+#define SELBUSB 0x08
+#define SINGLE_BUS 0x00
+
+#define SCB_TARGET(scb) \
+ (((scb)->hscb->target_channel_lun & TID) >> 4)
+#define SCB_LUN(scb) \
+ ((scb)->hscb->target_channel_lun & LID)
+#define SCB_IS_SCSIBUS_B(scb) \
+ (((scb)->hscb->target_channel_lun & SELBUSB) != 0)
+
+/*
+ * If an error occurs during a data transfer phase, run the command
+ * to completion - it's easier that way - making a note of the error
+ * condition in this location. This then will modify a DID_OK status
+ * into an appropriate error for the higher-level SCSI code.
+ */
+#define aic7xxx_error(cmd) ((cmd)->SCp.Status)
+
+/*
+ * Keep track of the targets returned status.
+ */
+#define aic7xxx_status(cmd) ((cmd)->SCp.sent_command)
+
+/*
+ * The position of the SCSI commands scb within the scb array.
+ */
+#define aic7xxx_position(cmd) ((cmd)->SCp.have_data_in)
+
+/*
+ * So we can keep track of our host structs
+ */
+static struct aic7xxx_host *first_aic7xxx = NULL;
+
+/*
+ * As of Linux 2.1, the mid-level SCSI code uses virtual addresses
+ * in the scatter-gather lists. We need to convert the virtual
+ * addresses to physical addresses.
+ */
+struct hw_scatterlist {
+ unsigned int address;
+ unsigned int length;
+};
+
+/*
+ * Maximum number of SG segments these cards can support.
+ */
+#define AIC7XXX_MAX_SG 128
+
+/*
+ * The maximum number of SCBs we could have for ANY type
+ * of card. DON'T FORGET TO CHANGE THE SCB MASK IN THE
+ * SEQUENCER CODE IF THIS IS MODIFIED!
+ */
+#define AIC7XXX_MAXSCB 255
+
+
+struct aic7xxx_hwscb {
+/* ------------ Begin hardware supported fields ---------------- */
+/* 0*/ unsigned char control;
+/* 1*/ unsigned char target_channel_lun; /* 4/1/3 bits */
+/* 2*/ unsigned char target_status;
+/* 3*/ unsigned char SG_segment_count;
+/* 4*/ unsigned int SG_list_pointer;
+/* 8*/ unsigned char residual_SG_segment_count;
+/* 9*/ unsigned char residual_data_count[3];
+/*12*/ unsigned int data_pointer;
+/*16*/ unsigned int data_count;
+/*20*/ unsigned int SCSI_cmd_pointer;
+/*24*/ unsigned char SCSI_cmd_length;
+/*25*/ unsigned char tag; /* Index into our kernel SCB array.
+ * Also used as the tag for tagged I/O
+ */
+#define SCB_PIO_TRANSFER_SIZE 26 /* amount we need to upload/download
+ * via PIO to initialize a transaction.
+ */
+/*26*/ unsigned char next; /* Used to thread SCBs awaiting selection
+ * or disconnected down in the sequencer.
+ */
+/*27*/ unsigned char prev;
+/*28*/ unsigned int pad; /*
+ * Unused by the kernel, but we require
+ * the padding so that the array of
+ * hardware SCBs is alligned on 32 byte
+ * boundaries so the sequencer can index
+ */
+};
+
+typedef enum {
+ SCB_FREE = 0x0000,
+ SCB_WAITINGQ = 0x0002,
+ SCB_ACTIVE = 0x0004,
+ SCB_SENSE = 0x0008,
+ SCB_ABORT = 0x0010,
+ SCB_DEVICE_RESET = 0x0020,
+ SCB_RESET = 0x0040,
+ SCB_RECOVERY_SCB = 0x0080,
+ SCB_WAS_BUSY = 0x0100,
+ SCB_MSGOUT_SENT = 0x0200,
+ SCB_MSGOUT_SDTR = 0x0400,
+ SCB_MSGOUT_WDTR = 0x0800,
+ SCB_MSGOUT_BITS = SCB_MSGOUT_SENT |
+ SCB_MSGOUT_SDTR |
+ SCB_MSGOUT_WDTR,
+ SCB_QUEUED_ABORT = 0x1000,
+ SCB_QUEUED_FOR_DONE = 0x2000
+} scb_flag_type;
+
+typedef enum {
+ AHC_FNONE = 0x00000000,
+ AHC_PAGESCBS = 0x00000001,
+ AHC_CHANNEL_B_PRIMARY = 0x00000002,
+ AHC_USEDEFAULTS = 0x00000004,
+ AHC_INDIRECT_PAGING = 0x00000008,
+ AHC_CHNLB = 0x00000020,
+ AHC_CHNLC = 0x00000040,
+ AHC_EXTEND_TRANS_A = 0x00000100,
+ AHC_EXTEND_TRANS_B = 0x00000200,
+ AHC_TERM_ENB_A = 0x00000400,
+ AHC_TERM_ENB_SE_LOW = 0x00000400,
+ AHC_TERM_ENB_B = 0x00000800,
+ AHC_TERM_ENB_SE_HIGH = 0x00000800,
+ AHC_HANDLING_REQINITS = 0x00001000,
+ AHC_TARGETMODE = 0x00002000,
+ AHC_NEWEEPROM_FMT = 0x00004000,
+ /*
+ * Here ends the FreeBSD defined flags and here begins the linux defined
+ * flags. NOTE: I did not preserve the old flag name during this change
+ * specifically to force me to evaluate what flags were being used properly
+ * and what flags weren't. This way, I could clean up the flag usage on
+ * a use by use basis. Doug Ledford
+ */
+ AHC_RESET_DELAY = 0x00080000,
+ AHC_A_SCANNED = 0x00100000,
+ AHC_B_SCANNED = 0x00200000,
+ AHC_MULTI_CHANNEL = 0x00400000,
+ AHC_BIOS_ENABLED = 0x00800000,
+ AHC_SEEPROM_FOUND = 0x01000000,
+ AHC_TERM_ENB_LVD = 0x02000000,
+ AHC_ABORT_PENDING = 0x04000000,
+ AHC_RESET_PENDING = 0x08000000,
+#define AHC_IN_ISR_BIT 28
+ AHC_IN_ISR = 0x10000000,
+ AHC_IN_ABORT = 0x20000000,
+ AHC_IN_RESET = 0x40000000,
+ AHC_EXTERNAL_SRAM = 0x80000000
+} ahc_flag_type;
+
+typedef enum {
+ AHC_NONE = 0x0000,
+ AHC_CHIPID_MASK = 0x00ff,
+ AHC_AIC7770 = 0x0001,
+ AHC_AIC7850 = 0x0002,
+ AHC_AIC7860 = 0x0003,
+ AHC_AIC7870 = 0x0004,
+ AHC_AIC7880 = 0x0005,
+ AHC_AIC7890 = 0x0006,
+ AHC_AIC7895 = 0x0007,
+ AHC_AIC7896 = 0x0008,
+ AHC_AIC7892 = 0x0009,
+ AHC_AIC7899 = 0x000a,
+ AHC_VL = 0x0100,
+ AHC_EISA = 0x0200,
+ AHC_PCI = 0x0400,
+} ahc_chip;
+
+typedef enum {
+ AHC_FENONE = 0x0000,
+ AHC_ULTRA = 0x0001,
+ AHC_ULTRA2 = 0x0002,
+ AHC_WIDE = 0x0004,
+ AHC_TWIN = 0x0008,
+ AHC_MORE_SRAM = 0x0010,
+ AHC_CMD_CHAN = 0x0020,
+ AHC_QUEUE_REGS = 0x0040,
+ AHC_SG_PRELOAD = 0x0080,
+ AHC_SPIOCAP = 0x0100,
+ AHC_ULTRA160 = 0x0200,
+ AHC_AIC7770_FE = AHC_FENONE,
+ AHC_AIC7850_FE = AHC_SPIOCAP,
+ AHC_AIC7860_FE = AHC_ULTRA|AHC_SPIOCAP,
+ AHC_AIC7870_FE = AHC_FENONE,
+ AHC_AIC7880_FE = AHC_ULTRA,
+ AHC_AIC7890_FE = AHC_MORE_SRAM|AHC_CMD_CHAN|AHC_ULTRA2|
+ AHC_QUEUE_REGS|AHC_SG_PRELOAD,
+ AHC_AIC7895_FE = AHC_MORE_SRAM|AHC_CMD_CHAN|AHC_ULTRA,
+ AHC_AIC7896_FE = AHC_AIC7890_FE,
+ AHC_AIC7892_FE = AHC_AIC7890_FE|AHC_ULTRA160,
+ AHC_AIC7899_FE = AHC_AIC7890_FE|AHC_ULTRA160,
+} ahc_feature;
+
+struct aic7xxx_scb {
+ struct aic7xxx_hwscb *hscb; /* corresponding hardware scb */
+ Scsi_Cmnd *cmd; /* Scsi_Cmnd for this scb */
+ struct aic7xxx_scb *q_next; /* next scb in queue */
+ volatile scb_flag_type flags; /* current state of scb */
+ struct hw_scatterlist *sg_list; /* SG list in adapter format */
+ unsigned char tag_action;
+ unsigned char sg_count;
+ unsigned char sense_cmd[6]; /*
+ * Allocate 6 characters for
+ * sense command.
+ */
+ unsigned int sg_length; /* We init this during buildscb so we
+ * don't have to calculate anything
+ * during underflow/overflow/stat code
+ */
+ void *kmalloc_ptr;
+};
+
+/*
+ * Define a linked list of SCBs.
+ */
+typedef struct {
+ struct aic7xxx_scb *head;
+ struct aic7xxx_scb *tail;
+} scb_queue_type;
+
+static struct {
+ unsigned char errno;
+ const char *errmesg;
+} hard_error[] = {
+ { ILLHADDR, "Illegal Host Access" },
+ { ILLSADDR, "Illegal Sequencer Address referenced" },
+ { ILLOPCODE, "Illegal Opcode in sequencer program" },
+ { SQPARERR, "Sequencer Ram Parity Error" },
+ { DPARERR, "Data-Path Ram Parity Error" },
+ { MPARERR, "Scratch Ram/SCB Array Ram Parity Error" },
+ { PCIERRSTAT,"PCI Error detected" },
+ { CIOPARERR, "CIOBUS Parity Error" }
+};
+
+static unsigned char
+generic_sense[] = { REQUEST_SENSE, 0, 0, 0, 255, 0 };
+
+typedef struct {
+ scb_queue_type free_scbs; /*
+ * SCBs assigned to free slot on
+ * card (no paging required)
+ */
+ struct aic7xxx_scb *scb_array[AIC7XXX_MAXSCB];
+ struct aic7xxx_hwscb *hscbs;
+ unsigned char numscbs; /* current number of scbs */
+ unsigned char maxhscbs; /* hardware scbs */
+ unsigned char maxscbs; /* max scbs including pageable scbs */
+ void *hscb_kmalloc_ptr;
+} scb_data_type;
+
+struct target_cmd {
+ unsigned char mesg_bytes[4];
+ unsigned char command[28];
+};
+
+#define AHC_TRANS_CUR 0x0001
+#define AHC_TRANS_ACTIVE 0x0002
+#define AHC_TRANS_GOAL 0x0004
+#define AHC_TRANS_USER 0x0008
+#define AHC_TRANS_QUITE 0x0010
+typedef struct {
+ unsigned char cur_width;
+ unsigned char goal_width;
+ unsigned char cur_period;
+ unsigned char goal_period;
+ unsigned char cur_offset;
+ unsigned char goal_offset;
+ unsigned char user_width;
+ unsigned char user_period;
+ unsigned char user_offset;
+} transinfo_type;
+
+/*
+ * Define a structure used for each host adapter. Note, in order to avoid
+ * problems with architectures I can't test on (because I don't have one,
+ * such as the Alpha based systems) which happen to give faults for
+ * non-aligned memory accesses, care was taken to align this structure
+ * in a way that gauranteed all accesses larger than 8 bits were aligned
+ * on the appropriate boundary. It's also organized to try and be more
+ * cache line efficient. Be careful when changing this lest you might hurt
+ * overall performance and bring down the wrath of the masses.
+ */
+struct aic7xxx_host {
+ /*
+ * This is the first 64 bytes in the host struct
+ */
+
+ /*
+ * We are grouping things here....first, items that get either read or
+ * written with nearly every interrupt
+ */
+ volatile ahc_flag_type flags;
+ ahc_feature features; /* chip features */
+ unsigned long base; /* card base address */
+ volatile unsigned char *maddr; /* memory mapped address */
+ unsigned long isr_count; /* Interrupt count */
+ unsigned long spurious_int;
+ scb_data_type *scb_data;
+ volatile unsigned short needsdtr;
+ volatile unsigned short sdtr_pending;
+ volatile unsigned short needwdtr;
+ volatile unsigned short wdtr_pending;
+ struct aic7xxx_cmd_queue {
+ Scsi_Cmnd *head;
+ Scsi_Cmnd *tail;
+ } completeq;
+
+ /*
+ * Things read/written on nearly every entry into aic7xxx_queue()
+ */
+ volatile scb_queue_type waiting_scbs;
+ unsigned short discenable; /* Targets allowed to disconnect */
+ unsigned short tagenable; /* Targets using tagged I/O */
+ unsigned short orderedtag; /* Ordered Q tags allowed */
+ unsigned char unpause; /* unpause value for HCNTRL */
+ unsigned char pause; /* pause value for HCNTRL */
+ volatile unsigned char qoutfifonext;
+ volatile unsigned char activescbs; /* active scbs */
+ volatile unsigned char max_activescbs;
+ volatile unsigned char qinfifonext;
+
+#define DEVICE_PRESENT 0x01
+#define BUS_DEVICE_RESET_PENDING 0x02
+#define DEVICE_RESET_DELAY 0x04
+#define DEVICE_PRINT_SDTR 0x08
+#define DEVICE_PRINT_WDTR 0x10
+#define DEVICE_WAS_BUSY 0x20
+#define DEVICE_SCANNED 0x80
+ volatile unsigned char dev_flags[MAX_TARGETS];
+ volatile unsigned char dev_active_cmds[MAX_TARGETS];
+ volatile unsigned char dev_temp_queue_depth[MAX_TARGETS];
+ unsigned char dev_commands_sent[MAX_TARGETS];
+
+ unsigned int dev_timer_active; /* Which devs have a timer set */
+ struct timer_list dev_timer;
+ unsigned long dev_expires[MAX_TARGETS];
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,0)
+ spinlock_t spin_lock;
+ volatile unsigned char cpu_lock_count[NR_CPUS];
+#endif
+
+
+#ifdef AIC7XXX_FAKE_NEGOTIATION_CMDS
+ Scsi_Cmnd *dev_wdtr_cmnd[MAX_TARGETS];
+ Scsi_Cmnd *dev_sdtr_cmnd[MAX_TARGETS];
+#endif
+
+ unsigned char dev_last_queue_full[MAX_TARGETS];
+ unsigned char dev_last_queue_full_count[MAX_TARGETS];
+ unsigned char dev_max_queue_depth[MAX_TARGETS];
+
+ volatile scb_queue_type delayed_scbs[MAX_TARGETS];
+
+
+ unsigned char msg_buf[9]; /* The message for the target */
+ unsigned char msg_type;
+#define MSG_TYPE_NONE 0x00
+#define MSG_TYPE_INITIATOR_MSGOUT 0x01
+#define MSG_TYPE_INITIATOR_MSGIN 0x02
+ unsigned char msg_len; /* Length of message */
+ unsigned char msg_index; /* Index into msg_buf array */
+ transinfo_type transinfo[MAX_TARGETS];
+
+
+ /*
+ * We put the less frequently used host structure items after the more
+ * frequently used items to try and ease the burden on the cache subsystem.
+ * These entries are not *commonly* accessed, whereas the preceding entries
+ * are accessed very often. The only exceptions are the qinfifo, qoutfifo,
+ * and untagged_scbs array. But, they are often accessed only once and each
+ * access into these arrays is likely to blow a cache line, so they are put
+ * down here so we can minimize the number of cache lines required to hold
+ * the preceeding entries.
+ */
+
+ volatile unsigned char untagged_scbs[256];
+ volatile unsigned char qoutfifo[256];
+ volatile unsigned char qinfifo[256];
+ unsigned int irq; /* IRQ for this adapter */
+ int instance; /* aic7xxx instance number */
+ int scsi_id; /* host adapter SCSI ID */
+ int scsi_id_b; /* channel B for twin adapters */
+ unsigned int bios_address;
+ int board_name_index;
+ unsigned short needsdtr_copy; /* default config */
+ unsigned short needwdtr_copy; /* default config */
+ unsigned short ultraenb; /* Ultra mode target list */
+ unsigned short bios_control; /* bios control - SEEPROM */
+ unsigned short adapter_control; /* adapter control - SEEPROM */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,92)
+ struct pci_dev *pdev;
+#endif
+ unsigned char pci_bus;
+ unsigned char pci_device_fn;
+ struct seeprom_config sc;
+ unsigned short sc_type;
+ unsigned short sc_size;
+ struct aic7xxx_host *next; /* allow for multiple IRQs */
+ struct Scsi_Host *host; /* pointer to scsi host */
+ int host_no; /* SCSI host number */
+ unsigned long mbase; /* I/O memory address */
+ ahc_chip chip; /* chip type */
+
+ /*
+ * Statistics Kept:
+ *
+ * Total Xfers (count for each command that has a data xfer),
+ * broken down further by reads && writes.
+ *
+ * Binned sizes, writes && reads:
+ * < 512, 512, 1-2K, 2-4K, 4-8K, 8-16K, 16-32K, 32-64K, 64K-128K, > 128K
+ *
+ * Total amounts read/written above 512 bytes (amts under ignored)
+ *
+ * NOTE: Enabling this feature is likely to cause a noticeable performance
+ * decrease as the accesses into the stats structures blows apart multiple
+ * cache lines and is CPU time consuming.
+ *
+ * NOTE: Since it doesn't really buy us much, but consumes *tons* of RAM
+ * and blows apart all sorts of cache lines, I modified this so that we
+ * no longer look at the LUN. All LUNs now go into the same bin on each
+ * device for stats purposes.
+ */
+ struct aic7xxx_xferstats {
+ long w_total; /* total writes */
+ long r_total; /* total reads */
+#ifdef AIC7XXX_PROC_STATS
+ long w_bins[8]; /* binned write */
+ long r_bins[8]; /* binned reads */
+#endif /* AIC7XXX_PROC_STATS */
+ } stats[MAX_TARGETS]; /* [(channel << 3)|target] */
+
+#if 0
+ struct target_cmd *targetcmds;
+ unsigned int num_targetcmds;
+#endif
+
+};
+
+/*
+ * Valid SCSIRATE values. (p. 3-17)
+ * Provides a mapping of transfer periods in ns/4 to the proper value to
+ * stick in the SCSIRATE reg to use that transfer rate.
+ */
+#define AHC_SYNCRATE_ULTRA2 0
+#define AHC_SYNCRATE_ULTRA 2
+#define AHC_SYNCRATE_FAST 5
+static struct aic7xxx_syncrate {
+ /* Rates in Ultra mode have bit 8 of sxfr set */
+#define ULTRA_SXFR 0x100
+ int sxfr_ultra2;
+ int sxfr;
+ unsigned char period;
+ const char *rate[2];
+} aic7xxx_syncrates[] = {
+ { 0x13, 0x000, 10, {"40.0", "80.0"} },
+ { 0x14, 0x000, 11, {"33.0", "66.6"} },
+ { 0x15, 0x100, 12, {"20.0", "40.0"} },
+ { 0x16, 0x110, 15, {"16.0", "32.0"} },
+ { 0x17, 0x120, 18, {"13.4", "26.8"} },
+ { 0x18, 0x000, 25, {"10.0", "20.0"} },
+ { 0x19, 0x010, 31, {"8.0", "16.0"} },
+ { 0x1a, 0x020, 37, {"6.67", "13.3"} },
+ { 0x1b, 0x030, 43, {"5.7", "11.4"} },
+ { 0x10, 0x040, 50, {"5.0", "10.0"} },
+ { 0x00, 0x050, 56, {"4.4", "8.8" } },
+ { 0x00, 0x060, 62, {"4.0", "8.0" } },
+ { 0x00, 0x070, 68, {"3.6", "7.2" } },
+ { 0x00, 0x000, 0, {NULL, NULL} },
+};
+
+#define CTL_OF_SCB(scb) (((scb->hscb)->target_channel_lun >> 3) & 0x1), \
+ (((scb->hscb)->target_channel_lun >> 4) & 0xf), \
+ ((scb->hscb)->target_channel_lun & 0x07)
+
+#define CTL_OF_CMD(cmd) ((cmd->channel) & 0x01), \
+ ((cmd->target) & 0x0f), \
+ ((cmd->lun) & 0x07)
+
+#define TARGET_INDEX(cmd) ((cmd)->target | ((cmd)->channel << 3))
+
+/*
+ * A nice little define to make doing our printks a little easier
+ */
+
+#define WARN_LEAD KERN_WARNING "(scsi%d:%d:%d:%d) "
+#define INFO_LEAD KERN_INFO "(scsi%d:%d:%d:%d) "
+
+/*
+ * XXX - these options apply unilaterally to _all_ 274x/284x/294x
+ * cards in the system. This should be fixed. Exceptions to this
+ * rule are noted in the comments.
+ */
+
+
+/*
+ * Skip the scsi bus reset. Non 0 make us skip the reset at startup. This
+ * has no effect on any later resets that might occur due to things like
+ * SCSI bus timeouts.
+ */
+static unsigned int aic7xxx_no_reset = 0;
+/*
+ * Certain PCI motherboards will scan PCI devices from highest to lowest,
+ * others scan from lowest to highest, and they tend to do all kinds of
+ * strange things when they come into contact with PCI bridge chips. The
+ * net result of all this is that the PCI card that is actually used to boot
+ * the machine is very hard to detect. Most motherboards go from lowest
+ * PCI slot number to highest, and the first SCSI controller found is the
+ * one you boot from. The only exceptions to this are when a controller
+ * has its BIOS disabled. So, we by default sort all of our SCSI controllers
+ * from lowest PCI slot number to highest PCI slot number. We also force
+ * all controllers with their BIOS disabled to the end of the list. This
+ * works on *almost* all computers. Where it doesn't work, we have this
+ * option. Setting this option to non-0 will reverse the order of the sort
+ * to highest first, then lowest, but will still leave cards with their BIOS
+ * disabled at the very end. That should fix everyone up unless there are
+ * really strange cirumstances.
+ */
+static int aic7xxx_reverse_scan = 0;
+/*
+ * Should we force EXTENDED translation on a controller.
+ * 0 == Use whatever is in the SEEPROM or default to off
+ * 1 == Use whatever is in the SEEPROM or default to on
+ */
+static unsigned int aic7xxx_extended = 0;
+/*
+ * The IRQ trigger method used on EISA controllers. Does not effect PCI cards.
+ * -1 = Use detected settings.
+ * 0 = Force Edge triggered mode.
+ * 1 = Force Level triggered mode.
+ */
+static int aic7xxx_irq_trigger = -1;
+/*
+ * This variable is used to override the termination settings on a controller.
+ * This should not be used under normal conditions. However, in the case
+ * that a controller does not have a readable SEEPROM (so that we can't
+ * read the SEEPROM settings directly) and that a controller has a buggered
+ * version of the cable detection logic, this can be used to force the
+ * correct termination. It is preferable to use the manual termination
+ * settings in the BIOS if possible, but some motherboard controllers store
+ * those settings in a format we can't read. In other cases, auto term
+ * should also work, but the chipset was put together with no auto term
+ * logic (common on motherboard controllers). In those cases, we have
+ * 32 bits here to work with. That's good for 8 controllers/channels. The
+ * bits are organized as 4 bits per channel, with scsi0 getting the lowest
+ * 4 bits in the int. A 1 in a bit position indicates the termination setting
+ * that corresponds to that bit should be enabled, a 0 is disabled.
+ * It looks something like this:
+ *
+ * 0x0f = 1111-Single Ended Low Byte Termination on/off
+ * ||\-Single Ended High Byte Termination on/off
+ * |\-LVD Low Byte Termination on/off
+ * \-LVD High Byte Termination on/off
+ *
+ * For non-Ultra2 controllers, the upper 2 bits are not important. So, to
+ * enable both high byte and low byte termination on scsi0, I would need to
+ * make sure that the override_term variable was set to 0x03 (bits 0011).
+ * To make sure that all termination is enabled on an Ultra2 controller at
+ * scsi2 and only high byte termination on scsi1 and high and low byte
+ * termination on scsi0, I would set override_term=0xf23 (bits 1111 0010 0011)
+ *
+ * For the most part, users should never have to use this, that's why I
+ * left it fairly cryptic instead of easy to understand. If you need it,
+ * most likely someone will be telling you what your's needs to be set to.
+ */
+static int aic7xxx_override_term = -1;
+/*
+ * Certain motherboard chipset controllers tend to screw
+ * up the polarity of the term enable output pin. Use this variable
+ * to force the correct polarity for your system. This is a bitfield variable
+ * similar to the previous one, but this one has one bit per channel instead
+ * of four.
+ * 0 = Force the setting to active low.
+ * 1 = Force setting to active high.
+ * Most Adaptec cards are active high, several motherboards are active low.
+ * To force a 2940 card at SCSI 0 to active high and a motherboard 7895
+ * controller at scsi1 and scsi2 to active low, and a 2910 card at scsi3
+ * to active high, you would need to set stpwlev=0x9 (bits 1001).
+ *
+ * People shouldn't need to use this, but if you are experiencing lots of
+ * SCSI timeout problems, this may help. There is one sure way to test what
+ * this option needs to be. Using a boot floppy to boot the system, configure
+ * your system to enable all SCSI termination (in the Adaptec SCSI BIOS) and
+ * if needed then also pass a value to override_term to make sure that the
+ * driver is enabling SCSI termination, then set this variable to either 0
+ * or 1. When the driver boots, make sure there are *NO* SCSI cables
+ * connected to your controller. If it finds and inits the controller
+ * without problem, then the setting you passed to stpwlev was correct. If
+ * the driver goes into a reset loop and hangs the system, then you need the
+ * other setting for this variable. If neither setting lets the machine
+ * boot then you have definite termination problems that may not be fixable.
+ */
+static int aic7xxx_stpwlev = -1;
+/*
+ * Set this to non-0 in order to force the driver to panic the kernel
+ * and print out debugging info on a SCSI abort or reset cycle.
+ */
+static int aic7xxx_panic_on_abort = 0;
+/*
+ * PCI bus parity checking of the Adaptec controllers. This is somewhat
+ * dubious at best. To my knowledge, this option has never actually
+ * solved a PCI parity problem, but on certain machines with broken PCI
+ * chipset configurations, it can generate tons of false error messages.
+ * It's included in the driver for completeness.
+ * 0 = Shut off PCI parity check
+ * -1 = Normal polarity pci parity checking
+ * 1 = reverse polarity pci parity checking
+ *
+ * NOTE: you can't actually pass -1 on the lilo prompt. So, to set this
+ * variable to -1 you would actually want to simply pass the variable
+ * name without a number. That will invert the 0 which will result in
+ * -1.
+ */
+static int aic7xxx_pci_parity = 0;
+/*
+ * Set this to any non-0 value to cause us to dump the contents of all
+ * the card's registers in a hex dump format tailored to each model of
+ * controller.
+ *
+ * NOTE: THE CONTROLLER IS LEFT IN AN UNUSEABLE STATE BY THIS OPTION.
+ * YOU CANNOT BOOT UP WITH THIS OPTION, IT IS FOR DEBUGGING PURPOSES
+ * ONLY
+ */
+static int aic7xxx_dump_card = 0;
+/*
+ * Set this to a non-0 value to make us dump out the 32 bit instruction
+ * registers on the card after completing the sequencer download. This
+ * allows the actual sequencer download to be verified. It is possible
+ * to use this option and still boot up and run your system. This is
+ * only intended for debugging purposes.
+ */
+static int aic7xxx_dump_sequencer = 0;
+/*
+ * Certain newer motherboards have put new PCI based devices into the
+ * IO spaces that used to typically be occupied by VLB or EISA cards.
+ * This overlap can cause these newer motherboards to lock up when scanned
+ * for older EISA and VLB devices. Setting this option to non-0 will
+ * cause the driver to skip scanning for any VLB or EISA controllers and
+ * only support the PCI controllers. NOTE: this means that if the kernel
+ * os compiled with PCI support disabled, then setting this to non-0
+ * would result in never finding any devices :)
+ */
+static int aic7xxx_no_probe = 0;
+
+/*
+ * So that insmod can find the variable and make it point to something
+ */
+#ifdef MODULE
+static char * aic7xxx = NULL;
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,18)
+MODULE_PARM(aic7xxx, "s");
+#endif
+
+/*
+ * Just in case someone uses commas to separate items on the insmod
+ * command line, we define a dummy buffer here to avoid having insmod
+ * write wild stuff into our code segment
+ */
+static char dummy_buffer[60] = "Please don't trounce on me insmod!!\n";
+
+#endif
+
+#define VERBOSE_NORMAL 0x0000
+#define VERBOSE_NEGOTIATION 0x0001
+#define VERBOSE_SEQINT 0x0002
+#define VERBOSE_SCSIINT 0x0004
+#define VERBOSE_PROBE 0x0008
+#define VERBOSE_PROBE2 0x0010
+#define VERBOSE_NEGOTIATION2 0x0020
+#define VERBOSE_MINOR_ERROR 0x0040
+#define VERBOSE_TRACING 0x0080
+#define VERBOSE_ABORT 0x0f00
+#define VERBOSE_ABORT_MID 0x0100
+#define VERBOSE_ABORT_FIND 0x0200
+#define VERBOSE_ABORT_PROCESS 0x0400
+#define VERBOSE_ABORT_RETURN 0x0800
+#define VERBOSE_RESET 0xf000
+#define VERBOSE_RESET_MID 0x1000
+#define VERBOSE_RESET_FIND 0x2000
+#define VERBOSE_RESET_PROCESS 0x4000
+#define VERBOSE_RESET_RETURN 0x8000
+static int aic7xxx_verbose = VERBOSE_NORMAL | VERBOSE_NEGOTIATION |
+ VERBOSE_PROBE; /* verbose messages */
+
+
+/****************************************************************************
+ *
+ * We're going to start putting in function declarations so that order of
+ * functions is no longer important. As needed, they are added here.
+ *
+ ***************************************************************************/
+
+static void aic7xxx_panic_abort(struct aic7xxx_host *p, Scsi_Cmnd *cmd);
+static void aic7xxx_print_card(struct aic7xxx_host *p);
+static void aic7xxx_print_scratch_ram(struct aic7xxx_host *p);
+static void aic7xxx_print_sequencer(struct aic7xxx_host *p, int downloaded);
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+static void aic7xxx_check_scbs(struct aic7xxx_host *p, char *buffer);
+#endif
+
+/****************************************************************************
+ *
+ * These functions are now used. They happen to be wrapped in useless
+ * inb/outb port read/writes around the real reads and writes because it
+ * seems that certain very fast CPUs have a problem dealing with us when
+ * going at full speed.
+ *
+ ***************************************************************************/
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,0)
+static inline void
+mdelay(int milliseconds)
+{
+ int i;
+
+ for(i=0; i<milliseconds; i++)
+ udelay(1000);
+}
+
+static inline int
+time_after_eq(unsigned long a, unsigned long b)
+{
+ return((long)((a) - (b)) >= 0L);
+}
+
+static inline int
+timer_pending(struct timer_list *timer)
+{
+ return( timer->prev != NULL );
+}
+
+#define PCI_DEVICE_ID_ADAPTEC_1480A 0x6075
+
+#endif
+
+static inline unsigned char
+aic_inb(struct aic7xxx_host *p, long port)
+{
+#ifdef MMAPIO
+ unsigned char x;
+ if(p->maddr)
+ {
+ x = p->maddr[port];
+ }
+ else
+ {
+ x = inb(p->base + port);
+ }
+ mb();
+ return(x);
+#else
+ return(inb(p->base + port));
+#endif
+}
+
+static inline void
+aic_outb(struct aic7xxx_host *p, unsigned char val, long port)
+{
+#ifdef MMAPIO
+ if(p->maddr)
+ {
+ p->maddr[port] = val;
+ }
+ else
+ {
+ outb(val, p->base + port);
+ }
+ mb();
+#else
+ outb(val, p->base + port);
+#endif
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_setup
+ *
+ * Description:
+ * Handle Linux boot parameters. This routine allows for assigning a value
+ * to a parameter with a ':' between the parameter and the value.
+ * ie. aic7xxx=unpause:0x0A,extended
+ *-F*************************************************************************/
+void
+aic7xxx_setup(char *s, int *dummy)
+{
+ int i, n;
+ char *p;
+ char *end;
+
+ static struct {
+ const char *name;
+ unsigned int *flag;
+ } options[] = {
+ { "extended", &aic7xxx_extended },
+ { "no_reset", &aic7xxx_no_reset },
+ { "irq_trigger", &aic7xxx_irq_trigger },
+ { "verbose", &aic7xxx_verbose },
+ { "reverse_scan",&aic7xxx_reverse_scan },
+ { "override_term", &aic7xxx_override_term },
+ { "stpwlev", &aic7xxx_stpwlev },
+ { "no_probe", &aic7xxx_no_probe },
+ { "panic_on_abort", &aic7xxx_panic_on_abort },
+ { "pci_parity", &aic7xxx_pci_parity },
+ { "dump_card", &aic7xxx_dump_card },
+ { "dump_sequencer", &aic7xxx_dump_sequencer },
+ { "tag_info", NULL }
+ };
+
+ end = strchr(s, '\0');
+
+ for (p = strtok(s, ",."); p; p = strtok(NULL, ",."))
+ {
+ for (i = 0; i < NUMBER(options); i++)
+ {
+ n = strlen(options[i].name);
+ if (!strncmp(options[i].name, p, n))
+ {
+ if (!strncmp(p, "tag_info", n))
+ {
+ if (p[n] == ':')
+ {
+ char *base;
+ char *tok, *tok_end, *tok_end2;
+ char tok_list[] = { '.', ',', '{', '}', '\0' };
+ int i, instance = -1, device = -1;
+ unsigned char done = FALSE;
+
+ base = p;
+ tok = base + n + 1; /* Forward us just past the ':' */
+ tok_end = strchr(tok, '\0');
+ if (tok_end < end)
+ *tok_end = ',';
+ while(!done)
+ {
+ switch(*tok)
+ {
+ case '{':
+ if (instance == -1)
+ instance = 0;
+ else if (device == -1)
+ device = 0;
+ tok++;
+ break;
+ case '}':
+ if (device != -1)
+ device = -1;
+ else if (instance != -1)
+ instance = -1;
+ tok++;
+ break;
+ case ',':
+ case '.':
+ if (instance == -1)
+ done = TRUE;
+ else if (device >= 0)
+ device++;
+ else if (instance >= 0)
+ instance++;
+ if ( (device >= MAX_TARGETS) ||
+ (instance >= NUMBER(aic7xxx_tag_info)) )
+ done = TRUE;
+ tok++;
+ if (!done)
+ {
+ base = tok;
+ }
+ break;
+ case '\0':
+ done = TRUE;
+ break;
+ default:
+ done = TRUE;
+ tok_end = strchr(tok, '\0');
+ for(i=0; tok_list[i]; i++)
+ {
+ tok_end2 = strchr(tok, tok_list[i]);
+ if ( (tok_end2) && (tok_end2 < tok_end) )
+ {
+ tok_end = tok_end2;
+ done = FALSE;
+ }
+ }
+ if ( (instance >= 0) && (device >= 0) &&
+ (instance < NUMBER(aic7xxx_tag_info)) &&
+ (device < MAX_TARGETS) )
+ aic7xxx_tag_info[instance].tag_commands[device] =
+ simple_strtoul(tok, NULL, 0) & 0xff;
+ tok = tok_end;
+ break;
+ }
+ }
+ while((p != base) && (p != NULL))
+ p = strtok(NULL, ",.");
+ }
+ }
+ else if (p[n] == ':')
+ {
+ *(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0);
+ }
+ else if (!strncmp(p, "verbose", n))
+ {
+ *(options[i].flag) = 0xff09;
+ }
+ else
+ {
+ *(options[i].flag) = ~(*(options[i].flag));
+ }
+ }
+ }
+ }
+}
+
+/*+F*************************************************************************
+ * Function:
+ * pause_sequencer
+ *
+ * Description:
+ * Pause the sequencer and wait for it to actually stop - this
+ * is important since the sequencer can disable pausing for critical
+ * sections.
+ *-F*************************************************************************/
+static inline void
+pause_sequencer(struct aic7xxx_host *p)
+{
+ aic_outb(p, p->pause, HCNTRL);
+ while ((aic_inb(p, HCNTRL) & PAUSE) == 0)
+ {
+ ;
+ }
+}
+
+/*+F*************************************************************************
+ * Function:
+ * unpause_sequencer
+ *
+ * Description:
+ * Unpause the sequencer. Unremarkable, yet done often enough to
+ * warrant an easy way to do it.
+ *-F*************************************************************************/
+static inline void
+unpause_sequencer(struct aic7xxx_host *p, int unpause_always)
+{
+ if (unpause_always ||
+ ( !(aic_inb(p, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) &&
+ !(p->flags & AHC_HANDLING_REQINITS) ) )
+ {
+ aic_outb(p, p->unpause, HCNTRL);
+ }
+}
+
+/*+F*************************************************************************
+ * Function:
+ * restart_sequencer
+ *
+ * Description:
+ * Restart the sequencer program from address zero. This assumes
+ * that the sequencer is already paused.
+ *-F*************************************************************************/
+static inline void
+restart_sequencer(struct aic7xxx_host *p)
+{
+ aic_outb(p, 0, SEQADDR0);
+ aic_outb(p, 0, SEQADDR1);
+ aic_outb(p, FASTMODE, SEQCTL);
+}
+
+/*
+ * We include the aic7xxx_seq.c file here so that the other defines have
+ * already been made, and so that it comes before the code that actually
+ * downloads the instructions (since we don't typically use function
+ * prototype, our code has to be ordered that way, it's a left-over from
+ * the original driver days.....I should fix it some time DL).
+ */
+#include "aic7xxx_seq.c"
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_check_patch
+ *
+ * Description:
+ * See if the next patch to download should be downloaded.
+ *-F*************************************************************************/
+static int
+aic7xxx_check_patch(struct aic7xxx_host *p,
+ struct sequencer_patch **start_patch, int start_instr, int *skip_addr)
+{
+ struct sequencer_patch *cur_patch;
+ struct sequencer_patch *last_patch;
+ int num_patches;
+
+ num_patches = sizeof(sequencer_patches)/sizeof(struct sequencer_patch);
+ last_patch = &sequencer_patches[num_patches];
+ cur_patch = *start_patch;
+
+ while ((cur_patch < last_patch) && (start_instr == cur_patch->begin))
+ {
+ if (cur_patch->patch_func(p) == 0)
+ {
+ /*
+ * Start rejecting code.
+ */
+ *skip_addr = start_instr + cur_patch->skip_instr;
+ cur_patch += cur_patch->skip_patch;
+ }
+ else
+ {
+ /*
+ * Found an OK patch. Advance the patch pointer to the next patch
+ * and wait for our instruction pointer to get here.
+ */
+ cur_patch++;
+ }
+ }
+
+ *start_patch = cur_patch;
+ if (start_instr < *skip_addr)
+ /*
+ * Still skipping
+ */
+ return (0);
+ return(1);
+}
+
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_download_instr
+ *
+ * Description:
+ * Find the next patch to download.
+ *-F*************************************************************************/
+static void
+aic7xxx_download_instr(struct aic7xxx_host *p, int instrptr,
+ unsigned char *dconsts)
+{
+ union ins_formats instr;
+ struct ins_format1 *fmt1_ins;
+ struct ins_format3 *fmt3_ins;
+ unsigned char opcode;
+
+ instr = *(union ins_formats*) &seqprog[instrptr * 4];
+
+ instr.integer = le32_to_cpu(instr.integer);
+
+ fmt1_ins = &instr.format1;
+ fmt3_ins = NULL;
+
+ /* Pull the opcode */
+ opcode = instr.format1.opcode;
+ switch (opcode)
+ {
+ case AIC_OP_JMP:
+ case AIC_OP_JC:
+ case AIC_OP_JNC:
+ case AIC_OP_CALL:
+ case AIC_OP_JNE:
+ case AIC_OP_JNZ:
+ case AIC_OP_JE:
+ case AIC_OP_JZ:
+ {
+ struct sequencer_patch *cur_patch;
+ int address_offset;
+ unsigned int address;
+ int skip_addr;
+ int i;
+
+ fmt3_ins = &instr.format3;
+ address_offset = 0;
+ address = fmt3_ins->address;
+ cur_patch = sequencer_patches;
+ skip_addr = 0;
+
+ for (i = 0; i < address;)
+ {
+ aic7xxx_check_patch(p, &cur_patch, i, &skip_addr);
+ if (skip_addr > i)
+ {
+ int end_addr;
+
+ end_addr = MIN(address, skip_addr);
+ address_offset += end_addr - i;
+ i = skip_addr;
+ }
+ else
+ {
+ i++;
+ }
+ }
+ address -= address_offset;
+ fmt3_ins->address = address;
+ /* Fall Through to the next code section */
+ }
+ case AIC_OP_OR:
+ case AIC_OP_AND:
+ case AIC_OP_XOR:
+ case AIC_OP_ADD:
+ case AIC_OP_ADC:
+ case AIC_OP_BMOV:
+ if (fmt1_ins->parity != 0)
+ {
+ fmt1_ins->immediate = dconsts[fmt1_ins->immediate];
+ }
+ fmt1_ins->parity = 0;
+ /* Fall Through to the next code section */
+ case AIC_OP_ROL:
+ if ((p->features & AHC_ULTRA2) != 0)
+ {
+ int i, count;
+
+ /* Calculate odd parity for the instruction */
+ for ( i=0, count=0; i < 31; i++)
+ {
+ unsigned int mask;
+
+ mask = 0x01 << i;
+ if ((instr.integer & mask) != 0)
+ count++;
+ }
+ if (!(count & 0x01))
+ instr.format1.parity = 1;
+ }
+ else
+ {
+ if (fmt3_ins != NULL)
+ {
+ instr.integer = fmt3_ins->immediate |
+ (fmt3_ins->source << 8) |
+ (fmt3_ins->address << 16) |
+ (fmt3_ins->opcode << 25);
+ }
+ else
+ {
+ instr.integer = fmt1_ins->immediate |
+ (fmt1_ins->source << 8) |
+ (fmt1_ins->destination << 16) |
+ (fmt1_ins->ret << 24) |
+ (fmt1_ins->opcode << 25);
+ }
+ }
+ aic_outb(p, (instr.integer & 0xff), SEQRAM);
+ aic_outb(p, ((instr.integer >> 8) & 0xff), SEQRAM);
+ aic_outb(p, ((instr.integer >> 16) & 0xff), SEQRAM);
+ aic_outb(p, ((instr.integer >> 24) & 0xff), SEQRAM);
+ break;
+
+ default:
+ panic("aic7xxx: Unknown opcode encountered in sequencer program.");
+ break;
+ }
+}
+
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_loadseq
+ *
+ * Description:
+ * Load the sequencer code into the controller memory.
+ *-F*************************************************************************/
+static void
+aic7xxx_loadseq(struct aic7xxx_host *p)
+{
+ struct sequencer_patch *cur_patch;
+ int i;
+ int downloaded;
+ int skip_addr;
+ unsigned char download_consts[4] = {0, 0, 0, 0};
+
+ if (aic7xxx_verbose & VERBOSE_PROBE)
+ {
+ printk(KERN_INFO "(scsi%d) Downloading sequencer code...", p->host_no);
+ }
+#if 0
+ download_consts[TMODE_NUMCMDS] = p->num_targetcmds;
+#endif
+ download_consts[TMODE_NUMCMDS] = 0;
+ cur_patch = &sequencer_patches[0];
+ downloaded = 0;
+ skip_addr = 0;
+
+ aic_outb(p, PERRORDIS|LOADRAM|FAILDIS|FASTMODE, SEQCTL);
+ aic_outb(p, 0, SEQADDR0);
+ aic_outb(p, 0, SEQADDR1);
+
+ for (i = 0; i < sizeof(seqprog) / 4; i++)
+ {
+ if (aic7xxx_check_patch(p, &cur_patch, i, &skip_addr) == 0)
+ {
+ /* Skip this instruction for this configuration. */
+ continue;
+ }
+ aic7xxx_download_instr(p, i, &download_consts[0]);
+ downloaded++;
+ }
+
+ aic_outb(p, 0, SEQADDR0);
+ aic_outb(p, 0, SEQADDR1);
+ aic_outb(p, FASTMODE | FAILDIS, SEQCTL);
+ unpause_sequencer(p, TRUE);
+ mdelay(1);
+ pause_sequencer(p);
+ aic_outb(p, FASTMODE, SEQCTL);
+ if (aic7xxx_verbose & VERBOSE_PROBE)
+ {
+ printk(" %d instructions downloaded\n", downloaded);
+ }
+ if (aic7xxx_dump_sequencer)
+ aic7xxx_print_sequencer(p, downloaded);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_print_sequencer
+ *
+ * Description:
+ * Print the contents of the sequencer memory to the screen.
+ *-F*************************************************************************/
+static void
+aic7xxx_print_sequencer(struct aic7xxx_host *p, int downloaded)
+{
+ int i, k, temp;
+
+ aic_outb(p, PERRORDIS|LOADRAM|FAILDIS|FASTMODE, SEQCTL);
+ aic_outb(p, 0, SEQADDR0);
+ aic_outb(p, 0, SEQADDR1);
+
+ k = 0;
+ for (i=0; i < downloaded; i++)
+ {
+ if ( k == 0 )
+ printk("%03x: ", i);
+ temp = aic_inb(p, SEQRAM);
+ temp |= (aic_inb(p, SEQRAM) << 8);
+ temp |= (aic_inb(p, SEQRAM) << 16);
+ temp |= (aic_inb(p, SEQRAM) << 24);
+ printk("%08x", temp);
+ if ( ++k == 8 )
+ {
+ printk("\n");
+ k = 0;
+ }
+ else
+ printk(" ");
+ }
+ aic_outb(p, 0, SEQADDR0);
+ aic_outb(p, 0, SEQADDR1);
+ aic_outb(p, FASTMODE | FAILDIS, SEQCTL);
+ unpause_sequencer(p, TRUE);
+ mdelay(1);
+ pause_sequencer(p);
+ aic_outb(p, FASTMODE, SEQCTL);
+ printk("\n");
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_delay
+ *
+ * Description:
+ * Delay for specified amount of time. We use mdelay because the timer
+ * interrupt is not guaranteed to be enabled. This will cause an
+ * infinite loop since jiffies (clock ticks) is not updated.
+ *-F*************************************************************************/
+static void
+aic7xxx_delay(int seconds)
+{
+ mdelay(seconds * 1000);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_info
+ *
+ * Description:
+ * Return a string describing the driver.
+ *-F*************************************************************************/
+const char *
+aic7xxx_info(struct Scsi_Host *dooh)
+{
+ static char buffer[256];
+ char *bp;
+ struct aic7xxx_host *p;
+
+ bp = &buffer[0];
+ p = (struct aic7xxx_host *)dooh->hostdata;
+ memset(bp, 0, sizeof(buffer));
+ strcpy(bp, "Adaptec AHA274x/284x/294x (EISA/VLB/PCI-Fast SCSI) ");
+ strcat(bp, AIC7XXX_C_VERSION);
+ strcat(bp, "/");
+ strcat(bp, AIC7XXX_H_VERSION);
+ strcat(bp, "\n");
+ strcat(bp, " <");
+ strcat(bp, board_names[p->board_name_index]);
+ strcat(bp, ">");
+
+ return(bp);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_find_syncrate
+ *
+ * Description:
+ * Look up the valid period to SCSIRATE conversion in our table
+ *-F*************************************************************************/
+static struct aic7xxx_syncrate *
+aic7xxx_find_syncrate(struct aic7xxx_host *p, unsigned int *period,
+ unsigned int maxsync)
+{
+ struct aic7xxx_syncrate *syncrate;
+
+ syncrate = &aic7xxx_syncrates[maxsync];
+ while ( (syncrate->rate[0] != NULL) &&
+ (!(p->features & AHC_ULTRA2) || syncrate->sxfr_ultra2) )
+ {
+ if ( *period <= syncrate->period )
+ {
+ /*
+ * When responding to a target that requests sync, the requested rate
+ * may fall between two rates that we can output, but still be a rate
+ * that we can receive. Because of this, we want to respond with the
+ * same rate that it sent to us even if the persiod we use to send
+ * data to it is lower. Only lower the response period if we must.
+ */
+ if(syncrate == &aic7xxx_syncrates[maxsync])
+ {
+ *period = syncrate->period;
+ }
+ break;
+ }
+ syncrate++;
+ }
+ if ( (*period == 0) || (syncrate->rate[0] == NULL) ||
+ ((p->features & AHC_ULTRA2) && (syncrate->sxfr_ultra2 == 0)) )
+ {
+ /*
+ * Use async transfers for this target
+ */
+ *period = 0;
+ syncrate = NULL;
+ }
+ return (syncrate);
+}
+
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_find_period
+ *
+ * Description:
+ * Look up the valid SCSIRATE to period conversion in our table
+ *-F*************************************************************************/
+static unsigned int
+aic7xxx_find_period(struct aic7xxx_host *p, unsigned int scsirate,
+ unsigned int maxsync)
+{
+ struct aic7xxx_syncrate *syncrate;
+
+ if ((p->features & AHC_ULTRA2) != 0)
+ {
+ scsirate &= SXFR_ULTRA2;
+ }
+ else
+ {
+ scsirate &= SXFR;
+ }
+
+ syncrate = &aic7xxx_syncrates[maxsync];
+ while (syncrate->rate[0] != NULL)
+ {
+ if ((p->features & AHC_ULTRA2) != 0)
+ {
+ if (syncrate->sxfr_ultra2 == 0)
+ break;
+ else if (scsirate == syncrate->sxfr_ultra2)
+ return (syncrate->period);
+ }
+ else if (scsirate == (syncrate->sxfr & ~ULTRA_SXFR))
+ {
+ return (syncrate->period);
+ }
+ syncrate++;
+ }
+ return (0); /* async */
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_validate_offset
+ *
+ * Description:
+ * Set a valid offset value for a particular card in use and transfer
+ * settings in use.
+ *-F*************************************************************************/
+static void
+aic7xxx_validate_offset(struct aic7xxx_host *p,
+ struct aic7xxx_syncrate *syncrate, unsigned int *offset, int wide)
+{
+ unsigned int maxoffset;
+
+ /* Limit offset to what the card (and device) can do */
+ if (syncrate == NULL)
+ {
+ maxoffset = 0;
+ }
+ else if (p->features & AHC_ULTRA2)
+ {
+ maxoffset = MAX_OFFSET_ULTRA2;
+ }
+ else
+ {
+ if (wide)
+ maxoffset = MAX_OFFSET_16BIT;
+ else
+ maxoffset = MAX_OFFSET_8BIT;
+ }
+ *offset = MIN(*offset, maxoffset);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_set_syncrate
+ *
+ * Description:
+ * Set the actual syncrate down in the card and in our host structs
+ *-F*************************************************************************/
+static void
+aic7xxx_set_syncrate(struct aic7xxx_host *p, struct aic7xxx_syncrate *syncrate,
+ int target, int channel, unsigned int period, unsigned int offset,
+ unsigned int type)
+{
+ unsigned char tindex;
+ unsigned short target_mask;
+ unsigned char lun;
+ unsigned int old_period, old_offset;
+
+ tindex = target | (channel << 3);
+ target_mask = 0x01 << tindex;
+ lun = aic_inb(p, SCB_TCL) & 0x07;
+
+ if (syncrate == NULL)
+ {
+ period = 0;
+ offset = 0;
+ }
+
+ old_period = p->transinfo[tindex].cur_period;
+ old_offset = p->transinfo[tindex].cur_offset;
+
+
+ if (type & AHC_TRANS_CUR)
+ {
+ unsigned int scsirate;
+
+ scsirate = aic_inb(p, TARG_SCSIRATE + tindex);
+ if (p->features & AHC_ULTRA2)
+ {
+ scsirate &= ~SXFR_ULTRA2;
+ if (syncrate != NULL)
+ {
+ scsirate |= syncrate->sxfr_ultra2;
+ }
+ if (type & AHC_TRANS_ACTIVE)
+ {
+ aic_outb(p, offset, SCSIOFFSET);
+ }
+ aic_outb(p, offset, TARG_OFFSET + tindex);
+ }
+ else /* Not an Ultra2 controller */
+ {
+ scsirate &= ~(SXFR|SOFS);
+ p->ultraenb &= ~target_mask;
+ if (syncrate != NULL)
+ {
+ if (syncrate->sxfr & ULTRA_SXFR)
+ {
+ p->ultraenb |= target_mask;
+ }
+ scsirate |= (syncrate->sxfr & SXFR);
+ scsirate |= (offset & SOFS);
+ }
+ if (type & AHC_TRANS_ACTIVE)
+ {
+ unsigned char sxfrctl0;
+
+ sxfrctl0 = aic_inb(p, SXFRCTL0);
+ sxfrctl0 &= ~FAST20;
+ if (p->ultraenb & target_mask)
+ sxfrctl0 |= FAST20;
+ aic_outb(p, sxfrctl0, SXFRCTL0);
+ }
+ aic_outb(p, p->ultraenb & 0xff, ULTRA_ENB);
+ aic_outb(p, (p->ultraenb >> 8) & 0xff, ULTRA_ENB + 1 );
+ }
+ if (type & AHC_TRANS_ACTIVE)
+ {
+ aic_outb(p, scsirate, SCSIRATE);
+ }
+ aic_outb(p, scsirate, TARG_SCSIRATE + tindex);
+ p->transinfo[tindex].cur_period = period;
+ p->transinfo[tindex].cur_offset = offset;
+ if ( !(type & AHC_TRANS_QUITE) &&
+ (aic7xxx_verbose & VERBOSE_NEGOTIATION) &&
+ (p->dev_flags[tindex] & DEVICE_PRINT_SDTR) )
+ {
+ if (offset)
+ {
+ int rate_mod = (scsirate & WIDEXFER) ? 1 : 0;
+
+ printk(INFO_LEAD "Synchronous at %s Mbyte/sec, "
+ "offset %d.\n", p->host_no, channel, target, lun,
+ syncrate->rate[rate_mod], offset);
+ }
+ else
+ {
+ printk(INFO_LEAD "Using asynchronous transfers.\n",
+ p->host_no, channel, target, lun);
+ }
+ p->dev_flags[tindex] &= ~DEVICE_PRINT_SDTR;
+ }
+ }
+
+ if (type & AHC_TRANS_GOAL)
+ {
+ p->transinfo[tindex].goal_period = period;
+ p->transinfo[tindex].goal_offset = offset;
+ }
+
+ if (type & AHC_TRANS_USER)
+ {
+ p->transinfo[tindex].user_period = period;
+ p->transinfo[tindex].user_offset = offset;
+ }
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_set_width
+ *
+ * Description:
+ * Set the actual width down in the card and in our host structs
+ *-F*************************************************************************/
+static void
+aic7xxx_set_width(struct aic7xxx_host *p, int target, int channel, int lun,
+ unsigned int width, unsigned int type)
+{
+ unsigned char tindex;
+ unsigned short target_mask;
+ unsigned int old_width, new_offset;
+
+ tindex = target | (channel << 3);
+ target_mask = 1 << tindex;
+
+ old_width = p->transinfo[tindex].cur_width;
+
+ if (p->features & AHC_ULTRA2)
+ new_offset = MAX_OFFSET_ULTRA2;
+ else if (width == MSG_EXT_WDTR_BUS_16_BIT)
+ new_offset = MAX_OFFSET_16BIT;
+ else
+ new_offset = MAX_OFFSET_8BIT;
+
+ if (type & AHC_TRANS_CUR)
+ {
+ unsigned char scsirate;
+
+ scsirate = aic_inb(p, TARG_SCSIRATE + tindex);
+
+ scsirate &= ~WIDEXFER;
+ if (width == MSG_EXT_WDTR_BUS_16_BIT)
+ scsirate |= WIDEXFER;
+
+ aic_outb(p, scsirate, TARG_SCSIRATE + tindex);
+
+ if (type & AHC_TRANS_ACTIVE)
+ aic_outb(p, scsirate, SCSIRATE);
+
+ p->transinfo[tindex].cur_width = width;
+
+ if ((aic7xxx_verbose & VERBOSE_NEGOTIATION2) &&
+ (p->dev_flags[tindex] & DEVICE_PRINT_WDTR))
+ {
+ printk(INFO_LEAD "Using %s transfers\n", p->host_no, channel, target,
+ lun, (scsirate & WIDEXFER) ? "Wide(16bit)" : "Narrow(8bit)" );
+ p->dev_flags[tindex] &= ~DEVICE_PRINT_WDTR;
+ }
+ }
+
+ if (type & AHC_TRANS_GOAL)
+ p->transinfo[tindex].goal_width = width;
+ if (type & AHC_TRANS_USER)
+ p->transinfo[tindex].user_width = width;
+
+ /*
+ * Having just set the width, the SDTR should come next, and we need a valid
+ * offset for the SDTR. So, we make sure we put a valid one in here now as
+ * the goal_offset.
+ */
+ if (p->transinfo[tindex].goal_offset)
+ p->transinfo[tindex].goal_offset = new_offset;
+
+}
+
+/*+F*************************************************************************
+ * Function:
+ * scbq_init
+ *
+ * Description:
+ * SCB queue initialization.
+ *
+ *-F*************************************************************************/
+static void
+scbq_init(volatile scb_queue_type *queue)
+{
+ queue->head = NULL;
+ queue->tail = NULL;
+}
+
+/*+F*************************************************************************
+ * Function:
+ * scbq_insert_head
+ *
+ * Description:
+ * Add an SCB to the head of the list.
+ *
+ *-F*************************************************************************/
+static inline void
+scbq_insert_head(volatile scb_queue_type *queue, struct aic7xxx_scb *scb)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
+ unsigned long cpu_flags;
+#endif
+
+ DRIVER_LOCK
+ scb->q_next = queue->head;
+ queue->head = scb;
+ if (queue->tail == NULL) /* If list was empty, update tail. */
+ queue->tail = queue->head;
+ DRIVER_UNLOCK
+}
+
+/*+F*************************************************************************
+ * Function:
+ * scbq_remove_head
+ *
+ * Description:
+ * Remove an SCB from the head of the list.
+ *
+ *-F*************************************************************************/
+static inline struct aic7xxx_scb *
+scbq_remove_head(volatile scb_queue_type *queue)
+{
+ struct aic7xxx_scb * scbp;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
+ unsigned long cpu_flags;
+#endif
+
+ DRIVER_LOCK
+ scbp = queue->head;
+ if (queue->head != NULL)
+ queue->head = queue->head->q_next;
+ if (queue->head == NULL) /* If list is now empty, update tail. */
+ queue->tail = NULL;
+ DRIVER_UNLOCK
+ return(scbp);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * scbq_remove
+ *
+ * Description:
+ * Removes an SCB from the list.
+ *
+ *-F*************************************************************************/
+static inline void
+scbq_remove(volatile scb_queue_type *queue, struct aic7xxx_scb *scb)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
+ unsigned long cpu_flags;
+#endif
+
+ DRIVER_LOCK
+ if (queue->head == scb)
+ {
+ /* At beginning of queue, remove from head. */
+ scbq_remove_head(queue);
+ }
+ else
+ {
+ struct aic7xxx_scb *curscb = queue->head;
+
+ /*
+ * Search until the next scb is the one we're looking for, or
+ * we run out of queue.
+ */
+ while ((curscb != NULL) && (curscb->q_next != scb))
+ {
+ curscb = curscb->q_next;
+ }
+ if (curscb != NULL)
+ {
+ /* Found it. */
+ curscb->q_next = scb->q_next;
+ if (scb->q_next == NULL)
+ {
+ /* Update the tail when removing the tail. */
+ queue->tail = curscb;
+ }
+ }
+ }
+ DRIVER_UNLOCK
+}
+
+/*+F*************************************************************************
+ * Function:
+ * scbq_insert_tail
+ *
+ * Description:
+ * Add an SCB at the tail of the list.
+ *
+ *-F*************************************************************************/
+static inline void
+scbq_insert_tail(volatile scb_queue_type *queue, struct aic7xxx_scb *scb)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
+ unsigned long cpu_flags;
+#endif
+
+ DRIVER_LOCK
+ scb->q_next = NULL;
+ if (queue->tail != NULL) /* Add the scb at the end of the list. */
+ queue->tail->q_next = scb;
+ queue->tail = scb; /* Update the tail. */
+ if (queue->head == NULL) /* If list was empty, update head. */
+ queue->head = queue->tail;
+ DRIVER_UNLOCK
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_match_scb
+ *
+ * Description:
+ * Checks to see if an scb matches the target/channel as specified.
+ * If target is ALL_TARGETS (-1), then we're looking for any device
+ * on the specified channel; this happens when a channel is going
+ * to be reset and all devices on that channel must be aborted.
+ *-F*************************************************************************/
+static int
+aic7xxx_match_scb(struct aic7xxx_host *p, struct aic7xxx_scb *scb,
+ int target, int channel, int lun, unsigned char tag)
+{
+ int targ = (scb->hscb->target_channel_lun >> 4) & 0x0F;
+ int chan = (scb->hscb->target_channel_lun >> 3) & 0x01;
+ int slun = scb->hscb->target_channel_lun & 0x07;
+ int match;
+
+ match = ((chan == channel) || (channel == ALL_CHANNELS));
+ if (match != 0)
+ match = ((targ == target) || (target == ALL_TARGETS));
+ if (match != 0)
+ match = ((lun == slun) || (lun == ALL_LUNS));
+ if (match != 0)
+ match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL));
+
+ if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS))
+ {
+ printk(KERN_INFO "(scsi%d:%d:%d:%d:tag%d) %s search criteria"
+ " (scsi%d:%d:%d:%d:tag%d)\n", p->host_no, CTL_OF_SCB(scb),
+ scb->hscb->tag, (match) ? "matches" : "doesn't match",
+ p->host_no, channel, target, lun, tag);
+ }
+
+ return (match);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_add_curscb_to_free_list
+ *
+ * Description:
+ * Adds the current scb (in SCBPTR) to the list of free SCBs.
+ *-F*************************************************************************/
+static void
+aic7xxx_add_curscb_to_free_list(struct aic7xxx_host *p)
+{
+ /*
+ * Invalidate the tag so that aic7xxx_find_scb doesn't think
+ * it's active
+ */
+ aic_outb(p, SCB_LIST_NULL, SCB_TAG);
+ aic_outb(p, 0, SCB_CONTROL);
+
+ aic_outb(p, aic_inb(p, FREE_SCBH), SCB_NEXT);
+ aic_outb(p, aic_inb(p, SCBPTR), FREE_SCBH);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_rem_scb_from_disc_list
+ *
+ * Description:
+ * Removes the current SCB from the disconnected list and adds it
+ * to the free list.
+ *-F*************************************************************************/
+static unsigned char
+aic7xxx_rem_scb_from_disc_list(struct aic7xxx_host *p, unsigned char scbptr)
+{
+ unsigned char next;
+ unsigned char prev;
+
+ aic_outb(p, scbptr, SCBPTR);
+ next = aic_inb(p, SCB_NEXT);
+ prev = aic_inb(p, SCB_PREV);
+ aic7xxx_add_curscb_to_free_list(p);
+
+ if (prev != SCB_LIST_NULL)
+ {
+ aic_outb(p, prev, SCBPTR);
+ aic_outb(p, next, SCB_NEXT);
+ }
+ else
+ {
+ aic_outb(p, next, DISCONNECTED_SCBH);
+ }
+
+ if (next != SCB_LIST_NULL)
+ {
+ aic_outb(p, next, SCBPTR);
+ aic_outb(p, prev, SCB_PREV);
+ }
+ return next;
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_busy_target
+ *
+ * Description:
+ * Set the specified target busy.
+ *-F*************************************************************************/
+static inline void
+aic7xxx_busy_target(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
+{
+ p->untagged_scbs[scb->hscb->target_channel_lun] = scb->hscb->tag;
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_index_busy_target
+ *
+ * Description:
+ * Returns the index of the busy target, and optionally sets the
+ * target inactive.
+ *-F*************************************************************************/
+static inline unsigned char
+aic7xxx_index_busy_target(struct aic7xxx_host *p, unsigned char tcl,
+ int unbusy)
+{
+ unsigned char busy_scbid;
+
+ busy_scbid = p->untagged_scbs[tcl];
+ if (unbusy)
+ {
+ p->untagged_scbs[tcl] = SCB_LIST_NULL;
+ }
+ return (busy_scbid);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_find_scb
+ *
+ * Description:
+ * Look through the SCB array of the card and attempt to find the
+ * hardware SCB that corresponds to the passed in SCB. Return
+ * SCB_LIST_NULL if unsuccessful. This routine assumes that the
+ * card is already paused.
+ *-F*************************************************************************/
+static unsigned char
+aic7xxx_find_scb(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
+{
+ unsigned char saved_scbptr;
+ unsigned char curindex;
+
+ saved_scbptr = aic_inb(p, SCBPTR);
+ curindex = 0;
+ for (curindex = 0; curindex < p->scb_data->maxhscbs; curindex++)
+ {
+ aic_outb(p, curindex, SCBPTR);
+ if (aic_inb(p, SCB_TAG) == scb->hscb->tag)
+ {
+ break;
+ }
+ }
+ aic_outb(p, saved_scbptr, SCBPTR);
+ if (curindex >= p->scb_data->maxhscbs)
+ {
+ curindex = SCB_LIST_NULL;
+ }
+
+ return (curindex);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_allocate_scb
+ *
+ * Description:
+ * Get an SCB from the free list or by allocating a new one.
+ *-F*************************************************************************/
+static int
+aic7xxx_allocate_scb(struct aic7xxx_host *p)
+{
+ struct aic7xxx_scb *scbp = NULL;
+ int scb_size = sizeof(struct aic7xxx_scb) +
+ sizeof (struct hw_scatterlist) * AIC7XXX_MAX_SG;
+ int i;
+ int step = PAGE_SIZE / 1024;
+ unsigned long scb_count = 0;
+ struct hw_scatterlist *hsgp;
+ struct aic7xxx_scb *scb_ap;
+ unsigned long temp;
+
+
+ if (p->scb_data->numscbs < p->scb_data->maxscbs)
+ {
+ /*
+ * Calculate the optimal number of SCBs to allocate.
+ *
+ * NOTE: This formula works because the sizeof(sg_array) is always
+ * 1024. Therefore, scb_size * i would always be > PAGE_SIZE *
+ * (i/step). The (i-1) allows the left hand side of the equation
+ * to grow into the right hand side to a point of near perfect
+ * efficiency since scb_size * (i -1) is growing slightly faster
+ * than the right hand side. If the number of SG array elements
+ * is changed, this function may not be near so efficient any more.
+ */
+ for ( i=step;; i *= 2 )
+ {
+ if ( (scb_size * (i-1)) >= ( (PAGE_SIZE * (i/step)) - 64 ) )
+ {
+ i /= 2;
+ break;
+ }
+ }
+ scb_count = MIN( (i-1), p->scb_data->maxscbs - p->scb_data->numscbs);
+ scb_ap = (struct aic7xxx_scb *)kmalloc(scb_size * scb_count, GFP_ATOMIC);
+ if (scb_ap != NULL)
+ {
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if (aic7xxx_verbose > 0xffff)
+ {
+ if (p->scb_data->numscbs == 0)
+ printk(INFO_LEAD "Allocating initial %ld SCB structures.\n",
+ p->host_no, -1, -1, -1, scb_count);
+ else
+ printk(INFO_LEAD "Allocating %ld additional SCB structures.\n",
+ p->host_no, -1, -1, -1, scb_count);
+ }
+#endif
+ memset(scb_ap, 0, scb_count * scb_size);
+ temp = (unsigned long) &scb_ap[scb_count];
+ temp += 1023;
+ temp &= ~1023;
+ hsgp = (struct hw_scatterlist *)temp;
+ for (i=0; i < scb_count; i++)
+ {
+ scbp = &scb_ap[i];
+ scbp->hscb = &p->scb_data->hscbs[p->scb_data->numscbs];
+ scbp->sg_list = &hsgp[i * AIC7XXX_MAX_SG];
+ memset(scbp->hscb, 0, sizeof(struct aic7xxx_hwscb));
+ scbp->hscb->tag = p->scb_data->numscbs;
+ /*
+ * Place in the scb array; never is removed
+ */
+ p->scb_data->scb_array[p->scb_data->numscbs++] = scbp;
+ scbq_insert_head(&p->scb_data->free_scbs, scbp);
+ }
+ scbp->kmalloc_ptr = scb_ap;
+ }
+ else
+ {
+ return(0);
+ }
+ }
+ return(scb_count);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_queue_cmd_complete
+ *
+ * Description:
+ * Due to race conditions present in the SCSI subsystem, it is easier
+ * to queue completed commands, then call scsi_done() on them when
+ * we're finished. This function queues the completed commands.
+ *-F*************************************************************************/
+static void
+aic7xxx_queue_cmd_complete(struct aic7xxx_host *p, Scsi_Cmnd *cmd)
+{
+ cmd->host_scribble = (char *)p->completeq.head;
+ p->completeq.head = cmd;
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_done_cmds_complete
+ *
+ * Description:
+ * Process the completed command queue.
+ *-F*************************************************************************/
+static void
+aic7xxx_done_cmds_complete(struct aic7xxx_host *p)
+{
+ Scsi_Cmnd *cmd;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
+ unsigned long cpu_flags = 0;
+#endif
+
+ DRIVER_LOCK
+ while (p->completeq.head != NULL)
+ {
+ cmd = p->completeq.head;
+ p->completeq.head = (Scsi_Cmnd *)cmd->host_scribble;
+ cmd->host_scribble = NULL;
+ cmd->scsi_done(cmd);
+ }
+ DRIVER_UNLOCK
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_free_scb
+ *
+ * Description:
+ * Free the scb and insert into the free scb list.
+ *-F*************************************************************************/
+static void
+aic7xxx_free_scb(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
+{
+
+ scb->flags = SCB_FREE;
+ scb->cmd = NULL;
+ scb->sg_count = 0;
+ scb->sg_length = 0;
+ scb->tag_action = 0;
+ scb->hscb->control = 0;
+ scb->hscb->target_status = 0;
+ scb->hscb->target_channel_lun = SCB_LIST_NULL;
+
+ scbq_insert_head(&p->scb_data->free_scbs, scb);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_done
+ *
+ * Description:
+ * Calls the higher level scsi done function and frees the scb.
+ *-F*************************************************************************/
+static void
+aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
+{
+ Scsi_Cmnd *cmd = scb->cmd;
+ int tindex = TARGET_INDEX(cmd);
+ struct aic7xxx_scb *scbp;
+ unsigned char queue_depth;
+
+ if (scb->flags & SCB_RECOVERY_SCB)
+ {
+ p->flags &= ~AHC_ABORT_PENDING;
+ }
+ if (scb->flags & SCB_RESET)
+ {
+ cmd->result = (DID_RESET << 16) | (cmd->result & 0xffff);
+ }
+ else if (scb->flags & SCB_ABORT)
+ {
+ cmd->result = (DID_RESET << 16) | (cmd->result & 0xffff);
+ }
+ else if (!(p->dev_flags[tindex] & DEVICE_SCANNED))
+ {
+ if ( (cmd->cmnd[0] == INQUIRY) && (cmd->result == DID_OK) )
+ {
+ char *buffer;
+
+ p->dev_flags[tindex] |= DEVICE_PRESENT;
+ if(cmd->use_sg)
+ {
+ struct scatterlist *sg;
+
+ sg = (struct scatterlist *)cmd->request_buffer;
+ buffer = (char *)sg[0].address;
+ }
+ else
+ {
+ buffer = (char *)cmd->request_buffer;
+ }
+#define WIDE_INQUIRY_BITS 0x60
+#define SYNC_INQUIRY_BITS 0x10
+ if ( (buffer[7] & WIDE_INQUIRY_BITS) &&
+ (p->features & AHC_WIDE) )
+ {
+ p->needwdtr |= (1<<tindex);
+ p->needwdtr_copy |= (1<<tindex);
+ if ( (p->flags & AHC_SEEPROM_FOUND) &&
+ (p->transinfo[tindex].user_width != MSG_EXT_WDTR_BUS_16_BIT) )
+ p->transinfo[tindex].goal_width = MSG_EXT_WDTR_BUS_8_BIT;
+ else
+ p->transinfo[tindex].goal_width = MSG_EXT_WDTR_BUS_16_BIT;
+ }
+ else
+ {
+ p->needwdtr &= ~(1<<tindex);
+ p->needwdtr_copy &= ~(1<<tindex);
+ pause_sequencer(p);
+ aic7xxx_set_width(p, cmd->target, cmd->channel, cmd->lun,
+ MSG_EXT_WDTR_BUS_8_BIT, (AHC_TRANS_ACTIVE |
+ AHC_TRANS_GOAL |
+ AHC_TRANS_CUR) );
+ unpause_sequencer(p, FALSE);
+ }
+ if (buffer[7] & SYNC_INQUIRY_BITS)
+ {
+ p->needsdtr |= (1<<tindex);
+ p->needsdtr_copy |= (1<<tindex);
+
+ if (p->flags & AHC_SEEPROM_FOUND)
+ {
+ p->transinfo[tindex].goal_period = p->transinfo[tindex].user_period;
+ p->transinfo[tindex].goal_offset = p->transinfo[tindex].user_offset;
+ }
+ else
+ {
+ if (p->features & AHC_ULTRA2)
+ {
+ p->transinfo[tindex].goal_period =
+ aic7xxx_syncrates[AHC_SYNCRATE_ULTRA2].period;
+ }
+ else if (p->features & AHC_ULTRA)
+ {
+ p->transinfo[tindex].goal_period =
+ aic7xxx_syncrates[AHC_SYNCRATE_ULTRA].period;
+ }
+ else
+ {
+ p->transinfo[tindex].goal_period =
+ aic7xxx_syncrates[AHC_SYNCRATE_FAST].period;
+ }
+ if (p->features & AHC_ULTRA2)
+ p->transinfo[tindex].goal_offset = MAX_OFFSET_ULTRA2;
+ else if (p->transinfo[tindex].goal_width == MSG_EXT_WDTR_BUS_16_BIT)
+ p->transinfo[tindex].goal_offset = MAX_OFFSET_16BIT;
+ else
+ p->transinfo[tindex].goal_offset = MAX_OFFSET_8BIT;
+ }
+ }
+ else
+ {
+ p->needsdtr &= ~(1<<tindex);
+ p->needsdtr_copy &= ~(1<<tindex);
+ p->transinfo[tindex].goal_period = 0;
+ p->transinfo[tindex].goal_offset = 0;
+ }
+ p->dev_flags[tindex] |= DEVICE_SCANNED;
+ p->dev_flags[tindex] |= DEVICE_PRINT_WDTR | DEVICE_PRINT_SDTR;
+#undef WIDE_INQUIRY_BITS
+#undef SYNC_INQUIRY_BITS
+ }
+ }
+ else if ((scb->flags & (SCB_MSGOUT_WDTR | SCB_MSGOUT_SDTR)) != 0)
+ {
+ unsigned short mask;
+ int message_error = FALSE;
+
+ mask = 0x01 << tindex;
+
+ /*
+ * Check to see if we get an invalid message or a message error
+ * after failing to negotiate a wide or sync transfer message.
+ */
+ if ((scb->flags & SCB_SENSE) &&
+ ((scb->cmd->sense_buffer[12] == 0x43) || /* INVALID_MESSAGE */
+ (scb->cmd->sense_buffer[12] == 0x49))) /* MESSAGE_ERROR */
+ {
+ message_error = TRUE;
+ }
+
+ if (scb->flags & SCB_MSGOUT_WDTR)
+ {
+ p->wdtr_pending &= ~mask;
+ if (message_error)
+ {
+ if ( (aic7xxx_verbose & VERBOSE_NEGOTIATION2) &&
+ (p->dev_flags[tindex] & DEVICE_PRINT_WDTR) )
+ {
+ printk(INFO_LEAD "Device failed to complete Wide Negotiation "
+ "processing and\n", p->host_no, CTL_OF_SCB(scb));
+ printk(INFO_LEAD "returned a sense error code for invalid message, "
+ "disabling future\n", p->host_no, CTL_OF_SCB(scb));
+ printk(INFO_LEAD "Wide negotiation to this device.\n", p->host_no,
+ CTL_OF_SCB(scb));
+ p->dev_flags[tindex] &= ~DEVICE_PRINT_WDTR;
+ }
+ p->needwdtr &= ~mask;
+ p->needwdtr_copy &= ~mask;
+ }
+ }
+ if (scb->flags & SCB_MSGOUT_SDTR)
+ {
+ p->sdtr_pending &= ~mask;
+ if (message_error)
+ {
+ if ( (aic7xxx_verbose & VERBOSE_NEGOTIATION2) &&
+ (p->dev_flags[tindex] & DEVICE_PRINT_SDTR) )
+ {
+ printk(INFO_LEAD "Device failed to complete Sync Negotiation "
+ "processing and\n", p->host_no, CTL_OF_SCB(scb));
+ printk(INFO_LEAD "returned a sense error code for invalid message, "
+ "disabling future\n", p->host_no, CTL_OF_SCB(scb));
+ printk(INFO_LEAD "Sync negotiation to this device.\n", p->host_no,
+ CTL_OF_SCB(scb));
+ p->dev_flags[tindex] &= ~DEVICE_PRINT_SDTR;
+ }
+ p->needsdtr &= ~mask;
+ p->needsdtr_copy &= ~mask;
+ }
+ }
+ }
+ queue_depth = p->dev_temp_queue_depth[tindex];
+ if (queue_depth >= p->dev_active_cmds[tindex])
+ {
+ scbp = scbq_remove_head(&p->delayed_scbs[tindex]);
+ if (scbp)
+ {
+ if (queue_depth == 1)
+ {
+ /*
+ * Give extra preference to untagged devices, such as CD-R devices
+ * This makes it more likely that a drive *won't* stuff up while
+ * waiting on data at a critical time, such as CD-R writing and
+ * audio CD ripping operations. Should also benefit tape drives.
+ */
+ scbq_insert_head(&p->waiting_scbs, scbp);
+ }
+ else
+ {
+ scbq_insert_tail(&p->waiting_scbs, scbp);
+ }
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if (aic7xxx_verbose > 0xffff)
+ printk(INFO_LEAD "Moving SCB from delayed to waiting queue.\n",
+ p->host_no, CTL_OF_SCB(scbp));
+#endif
+ if (queue_depth > p->dev_active_cmds[tindex])
+ {
+ scbp = scbq_remove_head(&p->delayed_scbs[tindex]);
+ if (scbp)
+ scbq_insert_tail(&p->waiting_scbs, scbp);
+ }
+ }
+ }
+ if ( !(scb->tag_action) && (p->tagenable & (1<<tindex)) )
+ {
+ p->dev_temp_queue_depth[tindex] = p->dev_max_queue_depth[tindex];
+ }
+ p->dev_active_cmds[tindex]--;
+ p->activescbs--;
+
+ /*
+ * If this was an untagged I/O, unbusy the target so the sequencer won't
+ * mistake things later
+ */
+ if (aic7xxx_index_busy_target(p, scb->hscb->target_channel_lun, FALSE) ==
+ scb->hscb->tag)
+ {
+ aic7xxx_index_busy_target(p, scb->hscb->target_channel_lun, TRUE);
+ }
+
+ {
+ int actual;
+
+ /*
+ * XXX: we should actually know how much actually transferred
+ * XXX: for each command, but apparently that's too difficult.
+ *
+ * We set a lower limit of 512 bytes on the transfer length. We
+ * ignore anything less than this because we don't have a real
+ * reason to count it. Read/Writes to tapes are usually about 20K
+ * and disks are a minimum of 512 bytes unless you want to count
+ * non-read/write commands (such as TEST_UNIT_READY) which we don't
+ */
+ actual = scb->sg_length;
+ if ((actual >= 512) && (((cmd->result >> 16) & 0xf) == DID_OK))
+ {
+ struct aic7xxx_xferstats *sp;
+#ifdef AIC7XXX_PROC_STATS
+ long *ptr;
+ int x;
+#endif /* AIC7XXX_PROC_STATS */
+
+ sp = &p->stats[TARGET_INDEX(cmd)];
+
+ /*
+ * For block devices, cmd->request.cmd is always == either READ or
+ * WRITE. For character devices, this isn't always set properly, so
+ * we check data_cmnd[0]. This catches the conditions for st.c, but
+ * I'm still not sure if request.cmd is valid for sg devices.
+ */
+ if ( (cmd->request.cmd == WRITE) || (cmd->data_cmnd[0] == WRITE_6) ||
+ (cmd->data_cmnd[0] == WRITE_FILEMARKS) )
+ {
+ sp->w_total++;
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if ( (sp->w_total > 16) && (aic7xxx_verbose > 0xffff) )
+ aic7xxx_verbose &= 0xffff;
+#endif
+#ifdef AIC7XXX_PROC_STATS
+ ptr = sp->w_bins;
+#endif /* AIC7XXX_PROC_STATS */
+ }
+ else
+ {
+ sp->r_total++;
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if ( (sp->r_total > 16) && (aic7xxx_verbose > 0xffff) )
+ aic7xxx_verbose &= 0xffff;
+#endif
+#ifdef AIC7XXX_PROC_STATS
+ ptr = sp->r_bins;
+#endif /* AIC7XXX_PROC_STATS */
+ }
+#ifdef AIC7XXX_PROC_STATS
+ x = -10;
+ while(actual)
+ {
+ actual >>= 1;
+ x++;
+ }
+ if (x < 0)
+ {
+ ptr[0]++;
+ }
+ else if (x > 7)
+ {
+ ptr[7]++;
+ }
+ else
+ {
+ ptr[x]++;
+ }
+#endif /* AIC7XXX_PROC_STATS */
+ }
+ }
+ aic7xxx_free_scb(p, scb);
+ aic7xxx_queue_cmd_complete(p, cmd);
+
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_run_done_queue
+ *
+ * Description:
+ * Calls the aic7xxx_done() for the Scsi_Cmnd of each scb in the
+ * aborted list, and adds each scb to the free list. If complete
+ * is TRUE, we also process the commands complete list.
+ *-F*************************************************************************/
+static void
+aic7xxx_run_done_queue(struct aic7xxx_host *p, /*complete*/ int complete)
+{
+ struct aic7xxx_scb *scb;
+ int i, found = 0;
+
+ for (i = 0; i < p->scb_data->numscbs; i++)
+ {
+ scb = p->scb_data->scb_array[i];
+ if (scb->flags & SCB_QUEUED_FOR_DONE)
+ {
+ if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS))
+ printk(INFO_LEAD "Aborting scb %d\n",
+ p->host_no, CTL_OF_SCB(scb), scb->hscb->tag);
+ found++;
+ aic7xxx_done(p, scb);
+ }
+ }
+ if (aic7xxx_verbose & (VERBOSE_ABORT_RETURN | VERBOSE_RESET_RETURN))
+ {
+ printk(INFO_LEAD "%d commands found and queued for "
+ "completion.\n", p->host_no, -1, -1, -1, found);
+ }
+ if (complete)
+ {
+ aic7xxx_done_cmds_complete(p);
+ }
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_abort_waiting_scb
+ *
+ * Description:
+ * Manipulate the waiting for selection list and return the
+ * scb that follows the one that we remove.
+ *-F*************************************************************************/
+static unsigned char
+aic7xxx_abort_waiting_scb(struct aic7xxx_host *p, struct aic7xxx_scb *scb,
+ unsigned char scbpos, unsigned char prev)
+{
+ unsigned char curscb, next;
+
+ /*
+ * Select the SCB we want to abort and pull the next pointer out of it.
+ */
+ curscb = aic_inb(p, SCBPTR);
+ aic_outb(p, scbpos, SCBPTR);
+ next = aic_inb(p, SCB_NEXT);
+
+ aic7xxx_add_curscb_to_free_list(p);
+
+ /*
+ * Update the waiting list
+ */
+ if (prev == SCB_LIST_NULL)
+ {
+ /*
+ * First in the list
+ */
+ aic_outb(p, next, WAITING_SCBH);
+ }
+ else
+ {
+ /*
+ * Select the scb that pointed to us and update its next pointer.
+ */
+ aic_outb(p, prev, SCBPTR);
+ aic_outb(p, next, SCB_NEXT);
+ }
+ /*
+ * Point us back at the original scb position and inform the SCSI
+ * system that the command has been aborted.
+ */
+ aic_outb(p, curscb, SCBPTR);
+ return (next);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_search_qinfifo
+ *
+ * Description:
+ * Search the queue-in FIFO for matching SCBs and conditionally
+ * requeue. Returns the number of matching SCBs.
+ *-F*************************************************************************/
+static int
+aic7xxx_search_qinfifo(struct aic7xxx_host *p, int target, int channel,
+ int lun, unsigned char tag, int flags, int requeue,
+ volatile scb_queue_type *queue)
+{
+ int found;
+ unsigned char qinpos, qintail;
+ struct aic7xxx_scb *scbp;
+
+ found = 0;
+ qinpos = aic_inb(p, QINPOS);
+ qintail = p->qinfifonext;
+
+ p->qinfifonext = qinpos;
+
+ while (qinpos != qintail)
+ {
+ scbp = p->scb_data->scb_array[p->qinfifo[qinpos++]];
+ if (aic7xxx_match_scb(p, scbp, target, channel, lun, tag))
+ {
+ /*
+ * We found an scb that needs to be removed.
+ */
+ if (requeue && (queue != NULL))
+ {
+ if (scbp->flags & SCB_WAITINGQ)
+ {
+ scbq_remove(queue, scbp);
+ scbq_remove(&p->waiting_scbs, scbp);
+ scbq_remove(&p->delayed_scbs[TARGET_INDEX(scbp->cmd)], scbp);
+ p->dev_active_cmds[TARGET_INDEX(scbp->cmd)]++;
+ p->activescbs++;
+ }
+ scbq_insert_tail(queue, scbp);
+ p->dev_active_cmds[TARGET_INDEX(scbp->cmd)]--;
+ p->activescbs--;
+ scbp->flags |= SCB_WAITINGQ;
+ if ( !(scbp->tag_action & TAG_ENB) )
+ {
+ aic7xxx_index_busy_target(p, scbp->hscb->target_channel_lun,
+ TRUE);
+ }
+ }
+ else if (requeue)
+ {
+ p->qinfifo[p->qinfifonext++] = scbp->hscb->tag;
+ }
+ else
+ {
+ /*
+ * Preserve any SCB_RECOVERY_SCB flags on this scb then set the
+ * flags we were called with, presumeably so aic7xxx_run_done_queue
+ * can find this scb
+ */
+ scbp->flags = flags | (scbp->flags & SCB_RECOVERY_SCB);
+ if (aic7xxx_index_busy_target(p, scbp->hscb->target_channel_lun,
+ FALSE) == scbp->hscb->tag)
+ {
+ aic7xxx_index_busy_target(p, scbp->hscb->target_channel_lun,
+ TRUE);
+ }
+ }
+ found++;
+ }
+ else
+ {
+ p->qinfifo[p->qinfifonext++] = scbp->hscb->tag;
+ }
+ }
+ /*
+ * Now that we've done the work, clear out any left over commands in the
+ * qinfifo and update the KERNEL_QINPOS down on the card.
+ *
+ * NOTE: This routine expect the sequencer to already be paused when
+ * it is run....make sure it's that way!
+ */
+ qinpos = p->qinfifonext;
+ while(qinpos != qintail)
+ {
+ p->qinfifo[qinpos++] = SCB_LIST_NULL;
+ }
+ if (p->features & AHC_QUEUE_REGS)
+ aic_outb(p, p->qinfifonext, HNSCB_QOFF);
+ else
+ aic_outb(p, p->qinfifonext, KERNEL_QINPOS);
+
+ return (found);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_scb_on_qoutfifo
+ *
+ * Description:
+ * Is the scb that was passed to us currently on the qoutfifo?
+ *-F*************************************************************************/
+static int
+aic7xxx_scb_on_qoutfifo(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
+{
+ int i=0;
+
+ while(p->qoutfifo[(p->qoutfifonext + i) & 0xff ] != SCB_LIST_NULL)
+ {
+ if(p->qoutfifo[(p->qoutfifonext + i) & 0xff ] == scb->hscb->tag)
+ return TRUE;
+ else
+ i++;
+ }
+ return FALSE;
+}
+
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_reset_device
+ *
+ * Description:
+ * The device at the given target/channel has been reset. Abort
+ * all active and queued scbs for that target/channel. This function
+ * need not worry about linked next pointers because if was a MSG_ABORT_TAG
+ * then we had a tagged command (no linked next), if it was MSG_ABORT or
+ * MSG_BUS_DEV_RESET then the device won't know about any commands any more
+ * and no busy commands will exist, and if it was a bus reset, then nothing
+ * knows about any linked next commands any more. In all cases, we don't
+ * need to worry about the linked next or busy scb, we just need to clear
+ * them.
+ *-F*************************************************************************/
+static void
+aic7xxx_reset_device(struct aic7xxx_host *p, int target, int channel,
+ int lun, unsigned char tag)
+{
+ struct aic7xxx_scb *scbp;
+ unsigned char active_scb, tcl;
+ int i = 0, j, init_lists = FALSE;
+
+ /*
+ * Restore this when we're done
+ */
+ active_scb = aic_inb(p, SCBPTR);
+
+ if (aic7xxx_verbose & (VERBOSE_RESET_PROCESS | VERBOSE_ABORT_PROCESS))
+ printk(INFO_LEAD "Reset device, active_scb %d\n",
+ p->host_no, channel, target, lun, active_scb);
+ /*
+ * Deal with the busy target and linked next issues.
+ */
+ {
+ int min_target, max_target;
+ struct aic7xxx_scb *scbp, *prev_scbp;
+
+ /* Make all targets 'relative' to bus A. */
+ if (target == ALL_TARGETS)
+ {
+ switch (channel)
+ {
+ case 0:
+ min_target = 0;
+ max_target = (p->features & AHC_WIDE) ? 15 : 7;
+ break;
+ case 1:
+ min_target = 8;
+ max_target = 15;
+ break;
+ case ALL_CHANNELS:
+ default:
+ min_target = 0;
+ max_target = (p->features & (AHC_TWIN|AHC_WIDE)) ? 15 : 7;
+ break;
+ }
+ }
+ else
+ {
+ min_target = target | (channel << 3);
+ max_target = min_target;
+ }
+
+
+ for (i = min_target; i <= max_target; i++)
+ {
+ if ( i == p->scsi_id )
+ {
+ continue;
+ }
+ if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS))
+ printk(INFO_LEAD "Cleaning up status information "
+ "and delayed_scbs.\n", p->host_no, channel, i, lun);
+ p->dev_flags[i] &= ~BUS_DEVICE_RESET_PENDING;
+ if ( tag == SCB_LIST_NULL )
+ {
+ p->dev_flags[i] |= DEVICE_PRINT_WDTR | DEVICE_PRINT_SDTR |
+ DEVICE_RESET_DELAY;
+ p->dev_expires[i] = jiffies + (4 * HZ);
+ p->dev_timer_active |= (0x01 << i);
+ p->dev_last_queue_full_count[i] = 0;
+ p->dev_last_queue_full[i] = 0;
+ p->dev_temp_queue_depth[i] =
+ p->dev_max_queue_depth[i];
+ }
+ for(j=0; j<MAX_LUNS; j++)
+ {
+ if (channel == 1)
+ tcl = ((i << 4) & 0x70) | (channel << 3) | j;
+ else
+ tcl = (i << 4) | (channel << 3) | j;
+ if ( (aic7xxx_index_busy_target(p, tcl, FALSE) == tag) ||
+ (tag == SCB_LIST_NULL) )
+ aic7xxx_index_busy_target(p, tcl, /* unbusy */ TRUE);
+ }
+ j = 0;
+ prev_scbp = NULL;
+ scbp = p->delayed_scbs[i].head;
+ while ( (scbp != NULL) && (j++ <= (p->scb_data->numscbs + 1)) )
+ {
+ prev_scbp = scbp;
+ scbp = scbp->q_next;
+ if ( prev_scbp == scbp )
+ {
+ if (aic7xxx_verbose & (VERBOSE_ABORT | VERBOSE_RESET))
+ printk(WARN_LEAD "Yikes!! scb->q_next == scb "
+ "in the delayed_scbs queue!\n", p->host_no, channel, i, lun);
+ scbp = NULL;
+ prev_scbp->q_next = NULL;
+ p->delayed_scbs[i].tail = prev_scbp;
+ }
+ if (aic7xxx_match_scb(p, prev_scbp, target, channel, lun, tag))
+ {
+ scbq_remove(&p->delayed_scbs[i], prev_scbp);
+ if (prev_scbp->flags & SCB_WAITINGQ)
+ {
+ p->dev_active_cmds[i]++;
+ p->activescbs++;
+ }
+ prev_scbp->flags &= ~(SCB_ACTIVE | SCB_WAITINGQ);
+ prev_scbp->flags |= SCB_RESET | SCB_QUEUED_FOR_DONE;
+ }
+ }
+ if ( j > (p->scb_data->maxscbs + 1) )
+ {
+ if (aic7xxx_verbose & (VERBOSE_ABORT | VERBOSE_RESET))
+ printk(WARN_LEAD "Yikes!! There's a loop in the "
+ "delayed_scbs queue!\n", p->host_no, channel, i, lun);
+ scbq_init(&p->delayed_scbs[i]);
+ }
+ if ( !(p->dev_timer_active & (0x01 << MAX_TARGETS)) ||
+ time_after_eq(p->dev_timer.expires, p->dev_expires[i]) )
+ {
+ del_timer(&p->dev_timer);
+ p->dev_timer.expires = p->dev_expires[i];
+ add_timer(&p->dev_timer);
+ p->dev_timer_active |= (0x01 << MAX_TARGETS);
+ }
+ }
+ }
+
+ if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS))
+ printk(INFO_LEAD "Cleaning QINFIFO.\n", p->host_no, channel, target, lun );
+ aic7xxx_search_qinfifo(p, target, channel, lun, tag,
+ SCB_RESET | SCB_QUEUED_FOR_DONE, /* requeue */ FALSE, NULL);
+
+/*
+ * Search the waiting_scbs queue for matches, this catches any SCB_QUEUED
+ * ABORT/RESET commands.
+ */
+ if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS))
+ printk(INFO_LEAD "Cleaning waiting_scbs.\n", p->host_no, channel,
+ target, lun );
+ {
+ struct aic7xxx_scb *scbp, *prev_scbp;
+
+ j = 0;
+ prev_scbp = NULL;
+ scbp = p->waiting_scbs.head;
+ while ( (scbp != NULL) && (j++ <= (p->scb_data->numscbs + 1)) )
+ {
+ prev_scbp = scbp;
+ scbp = scbp->q_next;
+ if ( prev_scbp == scbp )
+ {
+ if (aic7xxx_verbose & (VERBOSE_ABORT | VERBOSE_RESET))
+ printk(WARN_LEAD "Yikes!! scb->q_next == scb "
+ "in the waiting_scbs queue!\n", p->host_no, CTL_OF_SCB(scbp));
+ scbp = NULL;
+ prev_scbp->q_next = NULL;
+ p->waiting_scbs.tail = prev_scbp;
+ }
+ if (aic7xxx_match_scb(p, prev_scbp, target, channel, lun, tag))
+ {
+ scbq_remove(&p->waiting_scbs, prev_scbp);
+ if (prev_scbp->flags & SCB_WAITINGQ)
+ {
+ p->dev_active_cmds[TARGET_INDEX(prev_scbp->cmd)]++;
+ p->activescbs++;
+ }
+ prev_scbp->flags &= ~(SCB_ACTIVE | SCB_WAITINGQ);
+ prev_scbp->flags |= SCB_RESET | SCB_QUEUED_FOR_DONE;
+ }
+ }
+ if ( j > (p->scb_data->maxscbs + 1) )
+ {
+ if (aic7xxx_verbose & (VERBOSE_ABORT | VERBOSE_RESET))
+ printk(WARN_LEAD "Yikes!! There's a loop in the "
+ "waiting_scbs queue!\n", p->host_no, channel, target, lun);
+ scbq_init(&p->waiting_scbs);
+ }
+ }
+
+
+ /*
+ * Search waiting for selection list.
+ */
+ if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS))
+ printk(INFO_LEAD "Cleaning waiting for selection "
+ "list.\n", p->host_no, channel, target, lun);
+ {
+ unsigned char next, prev, scb_index;
+
+ next = aic_inb(p, WAITING_SCBH); /* Start at head of list. */
+ prev = SCB_LIST_NULL;
+ j = 0;
+ while ( (next != SCB_LIST_NULL) && (j++ <= (p->scb_data->maxscbs + 1)) )
+ {
+ aic_outb(p, next, SCBPTR);
+ scb_index = aic_inb(p, SCB_TAG);
+ if (scb_index >= p->scb_data->numscbs)
+ {
+ /*
+ * No aic7xxx_verbose check here.....we want to see this since it
+ * means either the kernel driver or the sequencer screwed things up
+ */
+ printk(WARN_LEAD "Waiting List inconsistency; SCB index=%d, "
+ "numscbs=%d\n", p->host_no, channel, target, lun, scb_index,
+ p->scb_data->numscbs);
+ next = aic_inb(p, SCB_NEXT);
+ aic7xxx_add_curscb_to_free_list(p);
+ }
+ else
+ {
+ scbp = p->scb_data->scb_array[scb_index];
+ if (aic7xxx_match_scb(p, scbp, target, channel, lun, tag))
+ {
+ next = aic7xxx_abort_waiting_scb(p, scbp, next, prev);
+ if (scbp->flags & SCB_WAITINGQ)
+ {
+ p->dev_active_cmds[TARGET_INDEX(scbp->cmd)]++;
+ p->activescbs++;
+ }
+ scbp->flags &= ~(SCB_ACTIVE | SCB_WAITINGQ);
+ scbp->flags |= SCB_RESET | SCB_QUEUED_FOR_DONE;
+ if (prev == SCB_LIST_NULL)
+ {
+ /*
+ * This is either the first scb on the waiting list, or we
+ * have already yanked the first and haven't left any behind.
+ * Either way, we need to turn off the selection hardware if
+ * it isn't already off.
+ */
+ aic_outb(p, aic_inb(p, SCSISEQ) & ~ENSELO, SCSISEQ);
+ aic_outb(p, CLRSELTIMEO, CLRSINT1);
+ }
+ }
+ else
+ {
+ prev = next;
+ next = aic_inb(p, SCB_NEXT);
+ }
+ }
+ }
+ if ( j > (p->scb_data->maxscbs + 1) )
+ {
+ printk(WARN_LEAD "Yikes!! There is a loop in the waiting for "
+ "selection list!\n", p->host_no, channel, target, lun);
+ init_lists = TRUE;
+ }
+ }
+
+ /*
+ * Go through disconnected list and remove any entries we have queued
+ * for completion, zeroing their control byte too.
+ */
+ if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS))
+ printk(INFO_LEAD "Cleaning disconnected scbs "
+ "list.\n", p->host_no, channel, target, lun);
+ if (p->flags & AHC_PAGESCBS)
+ {
+ unsigned char next, prev, scb_index;
+
+ next = aic_inb(p, DISCONNECTED_SCBH);
+ prev = SCB_LIST_NULL;
+ j = 0;
+ while ( (next != SCB_LIST_NULL) && (j++ <= (p->scb_data->maxscbs + 1)) )
+ {
+ aic_outb(p, next, SCBPTR);
+ scb_index = aic_inb(p, SCB_TAG);
+ if (scb_index > p->scb_data->numscbs)
+ {
+ printk(WARN_LEAD "Disconnected List inconsistency; SCB index=%d, "
+ "numscbs=%d\n", p->host_no, channel, target, lun, scb_index,
+ p->scb_data->numscbs);
+ next = aic7xxx_rem_scb_from_disc_list(p, next);
+ }
+ else
+ {
+ scbp = p->scb_data->scb_array[scb_index];
+ if (aic7xxx_match_scb(p, scbp, target, channel, lun, tag))
+ {
+ next = aic7xxx_rem_scb_from_disc_list(p, next);
+ if (scbp->flags & SCB_WAITINGQ)
+ {
+ p->dev_active_cmds[TARGET_INDEX(scbp->cmd)]++;
+ p->activescbs++;
+ }
+ scbp->flags &= ~(SCB_ACTIVE | SCB_WAITINGQ);
+ scbp->flags |= SCB_RESET | SCB_QUEUED_FOR_DONE;
+ scbp->hscb->control = 0;
+ }
+ else
+ {
+ prev = next;
+ next = aic_inb(p, SCB_NEXT);
+ }
+ }
+ }
+ if ( j > (p->scb_data->maxscbs + 1) )
+ {
+ printk(WARN_LEAD "Yikes!! There is a loop in the disconnected list!\n",
+ p->host_no, channel, target, lun);
+ init_lists = TRUE;
+ }
+ }
+
+ /*
+ * Walk the free list making sure no entries on the free list have
+ * a valid SCB_TAG value or SCB_CONTROL byte.
+ */
+ if (p->flags & AHC_PAGESCBS)
+ {
+ unsigned char next;
+
+ j = 0;
+ next = aic_inb(p, FREE_SCBH);
+ if ( (next >= p->scb_data->maxhscbs) && (next != SCB_LIST_NULL) )
+ {
+ printk(WARN_LEAD "Bogus FREE_SCBH!.\n", p->host_no, channel,
+ target, lun);
+ init_lists = TRUE;
+ next = SCB_LIST_NULL;
+ }
+ while ( (next != SCB_LIST_NULL) && (j++ <= (p->scb_data->maxscbs + 1)) )
+ {
+ aic_outb(p, next, SCBPTR);
+ if (aic_inb(p, SCB_TAG) < p->scb_data->numscbs)
+ {
+ printk(WARN_LEAD "Free list inconsistency!.\n", p->host_no, channel,
+ target, lun);
+ init_lists = TRUE;
+ next = SCB_LIST_NULL;
+ }
+ else
+ {
+ aic_outb(p, SCB_LIST_NULL, SCB_TAG);
+ aic_outb(p, 0, SCB_CONTROL);
+ next = aic_inb(p, SCB_NEXT);
+ }
+ }
+ if ( j > (p->scb_data->maxscbs + 1) )
+ {
+ printk(WARN_LEAD "Yikes!! There is a loop in the free list!\n",
+ p->host_no, channel, target, lun);
+ init_lists = TRUE;
+ }
+ }
+
+ /*
+ * Go through the hardware SCB array looking for commands that
+ * were active but not on any list.
+ */
+ if (init_lists)
+ {
+ aic_outb(p, SCB_LIST_NULL, FREE_SCBH);
+ aic_outb(p, SCB_LIST_NULL, WAITING_SCBH);
+ aic_outb(p, SCB_LIST_NULL, DISCONNECTED_SCBH);
+ }
+ for (i = p->scb_data->maxhscbs - 1; i >= 0; i--)
+ {
+ unsigned char scbid;
+
+ aic_outb(p, i, SCBPTR);
+ if (init_lists)
+ {
+ aic_outb(p, SCB_LIST_NULL, SCB_TAG);
+ aic_outb(p, SCB_LIST_NULL, SCB_NEXT);
+ aic_outb(p, SCB_LIST_NULL, SCB_PREV);
+ aic_outb(p, 0, SCB_CONTROL);
+ aic7xxx_add_curscb_to_free_list(p);
+ }
+ else
+ {
+ scbid = aic_inb(p, SCB_TAG);
+ if (scbid < p->scb_data->numscbs)
+ {
+ scbp = p->scb_data->scb_array[scbid];
+ if (aic7xxx_match_scb(p, scbp, target, channel, lun, tag))
+ {
+ aic_outb(p, 0, SCB_CONTROL);
+ aic_outb(p, SCB_LIST_NULL, SCB_TAG);
+ aic7xxx_add_curscb_to_free_list(p);
+ }
+ }
+ }
+ }
+
+ /*
+ * Go through the entire SCB array now and look for commands for
+ * for this target that are stillactive. These are other (most likely
+ * tagged) commands that were disconnected when the reset occurred.
+ * Any commands we find here we know this about, it wasn't on any queue,
+ * it wasn't in the qinfifo, it wasn't in the disconnected or waiting
+ * lists, so it really must have been a paged out SCB. In that case,
+ * we shouldn't need to bother with updating any counters, just mark
+ * the correct flags and go on.
+ */
+ for (i = 0; i < p->scb_data->numscbs; i++)
+ {
+ scbp = p->scb_data->scb_array[i];
+ if ((scbp->flags & SCB_ACTIVE) &&
+ aic7xxx_match_scb(p, scbp, target, channel, lun, tag) &&
+ !aic7xxx_scb_on_qoutfifo(p, scbp))
+ {
+ if (scbp->flags & SCB_WAITINGQ)
+ {
+ scbq_remove(&p->waiting_scbs, scbp);
+ scbq_remove(&p->delayed_scbs[TARGET_INDEX(scbp->cmd)], scbp);
+ p->dev_active_cmds[TARGET_INDEX(scbp->cmd)]++;
+ p->activescbs++;
+ }
+ scbp->flags |= SCB_RESET | SCB_QUEUED_FOR_DONE;
+ scbp->flags &= ~(SCB_ACTIVE | SCB_WAITINGQ);
+ }
+ }
+
+ aic_outb(p, active_scb, SCBPTR);
+}
+
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_clear_intstat
+ *
+ * Description:
+ * Clears the interrupt status.
+ *-F*************************************************************************/
+static void
+aic7xxx_clear_intstat(struct aic7xxx_host *p)
+{
+ /* Clear any interrupt conditions this may have caused. */
+ aic_outb(p, CLRSELDO | CLRSELDI | CLRSELINGO, CLRSINT0);
+ aic_outb(p, CLRSELTIMEO | CLRATNO | CLRSCSIRSTI | CLRBUSFREE | CLRSCSIPERR |
+ CLRPHASECHG | CLRREQINIT, CLRSINT1);
+ aic_outb(p, CLRSCSIINT | CLRSEQINT | CLRBRKADRINT | CLRPARERR, CLRINT);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_reset_current_bus
+ *
+ * Description:
+ * Reset the current SCSI bus.
+ *-F*************************************************************************/
+static void
+aic7xxx_reset_current_bus(struct aic7xxx_host *p)
+{
+
+ /* Disable reset interrupts. */
+ aic_outb(p, aic_inb(p, SIMODE1) & ~ENSCSIRST, SIMODE1);
+
+ /* Turn off the bus' current operations, after all, we shouldn't have any
+ * valid commands left to cause a RSELI and SELO once we've tossed the
+ * bus away with this reset, so we might as well shut down the sequencer
+ * until the bus is restarted as oppossed to saving the current settings
+ * and restoring them (which makes no sense to me). */
+
+ /* Turn on the bus reset. */
+ aic_outb(p, aic_inb(p, SCSISEQ) | SCSIRSTO, SCSISEQ);
+ while ( (aic_inb(p, SCSISEQ) & SCSIRSTO) == 0)
+ mdelay(5);
+
+ mdelay(10);
+
+ /* Turn off the bus reset. */
+ aic_outb(p, 0, SCSISEQ);
+ mdelay(5);
+
+ aic7xxx_clear_intstat(p);
+ /* Re-enable reset interrupts. */
+ aic_outb(p, aic_inb(p, SIMODE1) | ENSCSIRST, SIMODE1);
+
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_reset_channel
+ *
+ * Description:
+ * Reset the channel.
+ *-F*************************************************************************/
+static void
+aic7xxx_reset_channel(struct aic7xxx_host *p, int channel, int initiate_reset)
+{
+ unsigned long offset_min, offset_max;
+ unsigned char sblkctl;
+ int cur_channel;
+
+ if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
+ printk(INFO_LEAD "Reset channel called, %s initiate reset.\n",
+ p->host_no, channel, -1, -1, (initiate_reset==TRUE) ? "will" : "won't" );
+
+
+ if (channel == 1)
+ {
+ p->needsdtr |= (p->needsdtr_copy & 0xFF00);
+ p->sdtr_pending &= 0x00FF;
+ offset_min = 8;
+ offset_max = 16;
+ }
+ else
+ {
+ if (p->features & AHC_WIDE)
+ {
+ p->needsdtr = p->needsdtr_copy;
+ p->needwdtr = p->needwdtr_copy;
+ p->sdtr_pending = 0x0;
+ p->wdtr_pending = 0x0;
+ offset_min = 0;
+ offset_max = 16;
+ }
+ else
+ {
+ /* Channel A */
+ p->needsdtr |= (p->needsdtr_copy & 0x00FF);
+ p->sdtr_pending &= 0xFF00;
+ offset_min = 0;
+ offset_max = 8;
+ }
+ }
+
+ while (offset_min < offset_max)
+ {
+ /*
+ * Revert to async/narrow transfers until we renegotiate.
+ */
+ aic_outb(p, 0, TARG_SCSIRATE + offset_min);
+ if (p->features & AHC_ULTRA2)
+ {
+ aic_outb(p, 0, TARG_OFFSET + offset_min);
+ }
+ offset_min++;
+ }
+
+ /*
+ * Reset the bus and unpause/restart the controller
+ */
+ sblkctl = aic_inb(p, SBLKCTL);
+ if ( (p->chip & AHC_CHIPID_MASK) == AHC_AIC7770 )
+ cur_channel = (sblkctl & SELBUSB) >> 3;
+ else
+ cur_channel = 0;
+ if ( (cur_channel != channel) && (p->features & AHC_TWIN) )
+ {
+ /*
+ * Case 1: Command for another bus is active
+ */
+ if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
+ printk(INFO_LEAD "Stealthily resetting idle channel.\n", p->host_no,
+ channel, -1, -1);
+ /*
+ * Stealthily reset the other bus without upsetting the current bus.
+ */
+ aic_outb(p, sblkctl ^ SELBUSB, SBLKCTL);
+ aic_outb(p, aic_inb(p, SIMODE1) & ~ENBUSFREE, SIMODE1);
+ if (initiate_reset)
+ {
+ aic7xxx_reset_current_bus(p);
+ }
+ aic_outb(p, aic_inb(p, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP), SCSISEQ);
+ aic7xxx_clear_intstat(p);
+ aic_outb(p, sblkctl, SBLKCTL);
+ }
+ else
+ {
+ /*
+ * Case 2: A command from this bus is active or we're idle.
+ */
+ if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
+ printk(INFO_LEAD "Resetting currently active channel.\n", p->host_no,
+ channel, -1, -1);
+ aic_outb(p, aic_inb(p, SIMODE1) & ~(ENBUSFREE|ENREQINIT),
+ SIMODE1);
+ p->flags &= ~AHC_HANDLING_REQINITS;
+ p->msg_type = MSG_TYPE_NONE;
+ p->msg_len = 0;
+ if (initiate_reset)
+ {
+ aic7xxx_reset_current_bus(p);
+ }
+ aic_outb(p, aic_inb(p, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP), SCSISEQ);
+ aic7xxx_clear_intstat(p);
+ }
+ if (aic7xxx_verbose & VERBOSE_RESET_RETURN)
+ printk(INFO_LEAD "Channel reset\n", p->host_no, channel, -1, -1);
+ /*
+ * Clean up all the state information for the pending transactions
+ * on this bus.
+ */
+ aic7xxx_reset_device(p, ALL_TARGETS, channel, ALL_LUNS, SCB_LIST_NULL);
+
+ if ( !(p->features & AHC_TWIN) )
+ {
+ restart_sequencer(p);
+ }
+
+ return;
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_run_waiting_queues
+ *
+ * Description:
+ * Scan the awaiting_scbs queue downloading and starting as many
+ * scbs as we can.
+ *-F*************************************************************************/
+static void
+aic7xxx_run_waiting_queues(struct aic7xxx_host *p)
+{
+ struct aic7xxx_scb *scb;
+ int tindex;
+ int sent;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
+ unsigned long cpu_flags = 0;
+#endif
+
+
+ if (p->waiting_scbs.head == NULL)
+ return;
+
+ sent = 0;
+
+ /*
+ * First handle SCBs that are waiting but have been assigned a slot.
+ */
+ DRIVER_LOCK
+ while ((scb = scbq_remove_head(&p->waiting_scbs)) != NULL)
+ {
+ tindex = TARGET_INDEX(scb->cmd);
+ if ( !scb->tag_action && (p->tagenable & (1<<tindex)) )
+ {
+ p->dev_temp_queue_depth[tindex] = 1;
+ }
+ if ( (p->dev_active_cmds[tindex] >=
+ p->dev_temp_queue_depth[tindex]) ||
+ (p->dev_flags[tindex] & (DEVICE_RESET_DELAY|DEVICE_WAS_BUSY)) ||
+ (p->flags & AHC_RESET_DELAY) )
+ {
+ scbq_insert_tail(&p->delayed_scbs[tindex], scb);
+ }
+ else
+ {
+ scb->flags &= ~SCB_WAITINGQ;
+ p->dev_active_cmds[tindex]++;
+ p->activescbs++;
+ if ( !(scb->tag_action) )
+ {
+ aic7xxx_busy_target(p, scb);
+ }
+ p->qinfifo[p->qinfifonext++] = scb->hscb->tag;
+ sent++;
+ }
+ }
+ if (sent)
+ {
+ if (p->features & AHC_QUEUE_REGS)
+ aic_outb(p, p->qinfifonext, HNSCB_QOFF);
+ else
+ {
+ pause_sequencer(p);
+ aic_outb(p, p->qinfifonext, KERNEL_QINPOS);
+ unpause_sequencer(p, FALSE);
+ }
+ if (p->activescbs > p->max_activescbs)
+ p->max_activescbs = p->activescbs;
+ }
+ DRIVER_UNLOCK
+}
+
+#ifdef CONFIG_PCI
+
+#define DPE 0x80
+#define SSE 0x40
+#define RMA 0x20
+#define RTA 0x10
+#define STA 0x08
+#define DPR 0x01
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_pci_intr
+ *
+ * Description:
+ * Check the scsi card for PCI errors and clear the interrupt
+ *
+ * NOTE: If you don't have this function and a 2940 card encounters
+ * a PCI error condition, the machine will end up locked as the
+ * interrupt handler gets slammed with non-stop PCI error interrupts
+ *-F*************************************************************************/
+static void
+aic7xxx_pci_intr(struct aic7xxx_host *p)
+{
+ unsigned char status1;
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,92)
+ pci_read_config_byte(p->pdev, PCI_STATUS + 1, &status1);
+#else
+ pcibios_read_config_byte(p->pci_bus, p->pci_device_fn,
+ PCI_STATUS + 1, &status1);
+#endif
+
+ if ( (status1 & DPE) && (aic7xxx_verbose & VERBOSE_MINOR_ERROR) )
+ printk(WARN_LEAD "Data Parity Error during PCI address or PCI write"
+ "phase.\n", p->host_no, -1, -1, -1);
+ if ( (status1 & SSE) && (aic7xxx_verbose & VERBOSE_MINOR_ERROR) )
+ printk(WARN_LEAD "Signal System Error Detected\n", p->host_no,
+ -1, -1, -1);
+ if ( (status1 & RMA) && (aic7xxx_verbose & VERBOSE_MINOR_ERROR) )
+ printk(WARN_LEAD "Received a PCI Master Abort\n", p->host_no,
+ -1, -1, -1);
+ if ( (status1 & RTA) && (aic7xxx_verbose & VERBOSE_MINOR_ERROR) )
+ printk(WARN_LEAD "Received a PCI Target Abort\n", p->host_no,
+ -1, -1, -1);
+ if ( (status1 & STA) && (aic7xxx_verbose & VERBOSE_MINOR_ERROR) )
+ printk(WARN_LEAD "Signaled a PCI Target Abort\n", p->host_no,
+ -1, -1, -1);
+ if ( (status1 & DPR) && (aic7xxx_verbose & VERBOSE_MINOR_ERROR) )
+ printk(WARN_LEAD "Data Parity Error has been reported via PCI pin "
+ "PERR#\n", p->host_no, -1, -1, -1);
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,92)
+ pci_write_config_byte(p->pdev, PCI_STATUS + 1, status1);
+#else
+ pcibios_write_config_byte(p->pci_bus, p->pci_device_fn,
+ PCI_STATUS + 1, status1);
+#endif
+ if (status1 & (DPR|RMA|RTA))
+ aic_outb(p, CLRPARERR, CLRINT);
+
+ if ( (aic7xxx_panic_on_abort) && (p->spurious_int > 500) )
+ aic7xxx_panic_abort(p, NULL);
+
+}
+#endif /* CONFIG_PCI */
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_timer
+ *
+ * Description:
+ * Take expired extries off of delayed queues and place on waiting queue
+ * then run waiting queue to start commands.
+ ***************************************************************************/
+static void
+aic7xxx_timer(struct aic7xxx_host *p)
+{
+ int i, j;
+ unsigned long cpu_flags = 0;
+ struct aic7xxx_scb *scb;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
+ DRIVER_LOCK
+#else
+ spin_lock_irqsave(&io_request_lock, cpu_flags);
+#endif
+ p->dev_timer_active &= ~(0x01 << MAX_TARGETS);
+ if ( (p->dev_timer_active & (0x01 << p->scsi_id)) &&
+ time_after_eq(jiffies, p->dev_expires[p->scsi_id]) )
+ {
+ p->flags &= ~AHC_RESET_DELAY;
+ p->dev_timer_active &= ~(0x01 << p->scsi_id);
+ }
+ for(i=0; i<MAX_TARGETS; i++)
+ {
+ if ( (i != p->scsi_id) &&
+ (p->dev_timer_active & (0x01 << i)) &&
+ time_after_eq(jiffies, p->dev_expires[i]) )
+ {
+ p->dev_timer_active &= ~(0x01 << i);
+ p->dev_flags[i] &= ~(DEVICE_RESET_DELAY|DEVICE_WAS_BUSY);
+ p->dev_temp_queue_depth[i] = p->dev_max_queue_depth[i];
+ j = 0;
+ while ( ((scb = scbq_remove_head(&p->delayed_scbs[i])) != NULL) &&
+ (j++ < p->scb_data->numscbs) )
+ {
+ scbq_insert_tail(&p->waiting_scbs, scb);
+ }
+ if (j == p->scb_data->numscbs)
+ {
+ printk(INFO_LEAD "timer: Yikes, loop in delayed_scbs list.\n",
+ p->host_no, 0, i, -1);
+ scbq_init(&p->delayed_scbs[i]);
+ scbq_init(&p->waiting_scbs);
+ /*
+ * Well, things are screwed now, wait for a reset to clean the junk
+ * out.
+ */
+ }
+ }
+ else if ( p->dev_timer_active & (0x01 << i) )
+ {
+ if ( p->dev_timer_active & (0x01 << MAX_TARGETS) )
+ {
+ if ( time_after_eq(p->dev_timer.expires, p->dev_expires[i]) )
+ {
+ p->dev_timer.expires = p->dev_expires[i];
+ }
+ }
+ else
+ {
+ p->dev_timer.expires = p->dev_expires[i];
+ p->dev_timer_active |= (0x01 << MAX_TARGETS);
+ }
+ }
+ }
+ if ( p->dev_timer_active & (0x01 << MAX_TARGETS) )
+ {
+ add_timer(&p->dev_timer);
+ }
+
+ aic7xxx_run_waiting_queues(p);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
+ DRIVER_UNLOCK
+#else
+ spin_unlock_irqrestore(&io_request_lock, cpu_flags);
+#endif
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_construct_sdtr
+ *
+ * Description:
+ * Constucts a synchronous data transfer message in the message
+ * buffer on the sequencer.
+ *-F*************************************************************************/
+static void
+aic7xxx_construct_sdtr(struct aic7xxx_host *p, unsigned char period,
+ unsigned char offset)
+{
+ p->msg_buf[p->msg_index++] = MSG_EXTENDED;
+ p->msg_buf[p->msg_index++] = MSG_EXT_SDTR_LEN;
+ p->msg_buf[p->msg_index++] = MSG_EXT_SDTR;
+ p->msg_buf[p->msg_index++] = period;
+ p->msg_buf[p->msg_index++] = offset;
+ p->msg_len += 5;
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_construct_wdtr
+ *
+ * Description:
+ * Constucts a wide data transfer message in the message buffer
+ * on the sequencer.
+ *-F*************************************************************************/
+static void
+aic7xxx_construct_wdtr(struct aic7xxx_host *p, unsigned char bus_width)
+{
+ p->msg_buf[p->msg_index++] = MSG_EXTENDED;
+ p->msg_buf[p->msg_index++] = MSG_EXT_WDTR_LEN;
+ p->msg_buf[p->msg_index++] = MSG_EXT_WDTR;
+ p->msg_buf[p->msg_index++] = bus_width;
+ p->msg_len += 4;
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_calc_residual
+ *
+ * Description:
+ * Calculate the residual data not yet transferred.
+ *-F*************************************************************************/
+static void
+aic7xxx_calculate_residual (struct aic7xxx_host *p, struct aic7xxx_scb *scb)
+{
+ struct aic7xxx_hwscb *hscb;
+ Scsi_Cmnd *cmd;
+ int actual, i;
+
+ cmd = scb->cmd;
+ hscb = scb->hscb;
+
+ /*
+ * Don't destroy valid residual information with
+ * residual coming from a check sense operation.
+ */
+ if (((scb->hscb->control & DISCONNECTED) == 0) &&
+ (scb->flags & SCB_SENSE) == 0)
+ {
+ /*
+ * We had an underflow. At this time, there's only
+ * one other driver that bothers to check for this,
+ * and cmd->underflow seems to be set rather half-
+ * heartedly in the higher-level SCSI code.
+ */
+ actual = scb->sg_length;
+ for (i=1; i < hscb->residual_SG_segment_count; i++)
+ {
+ actual -= scb->sg_list[scb->sg_count - i].length;
+ }
+ actual -= (hscb->residual_data_count[2] << 16) |
+ (hscb->residual_data_count[1] << 8) |
+ hscb->residual_data_count[0];
+
+ if (actual < cmd->underflow)
+ {
+ if (aic7xxx_verbose & VERBOSE_MINOR_ERROR)
+ printk(INFO_LEAD "Underflow - Wanted %u, %s %u, residual SG "
+ "count %d.\n", p->host_no, CTL_OF_SCB(scb), cmd->underflow,
+ (cmd->request.cmd == WRITE) ? "wrote" : "read", actual,
+ hscb->residual_SG_segment_count);
+ aic7xxx_error(cmd) = DID_RETRY_COMMAND;
+ aic7xxx_status(cmd) = hscb->target_status;
+ }
+ }
+
+ /*
+ * Clean out the residual information in the SCB for the
+ * next consumer.
+ */
+ hscb->residual_data_count[2] = 0;
+ hscb->residual_data_count[1] = 0;
+ hscb->residual_data_count[0] = 0;
+ hscb->residual_SG_segment_count = 0;
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_handle_device_reset
+ *
+ * Description:
+ * Interrupt handler for sequencer interrupts (SEQINT).
+ *-F*************************************************************************/
+static void
+aic7xxx_handle_device_reset(struct aic7xxx_host *p, int target, int channel)
+{
+ unsigned short targ_mask;
+ unsigned char tindex = target;
+
+ tindex |= ((channel & 0x01) << 3);
+
+ targ_mask = (0x01 << tindex);
+ /*
+ * Go back to async/narrow transfers and renegotiate.
+ */
+ p->needsdtr |= (p->needsdtr_copy & targ_mask);
+ p->needwdtr |= (p->needwdtr_copy & targ_mask);
+ p->sdtr_pending &= ~targ_mask;
+ p->wdtr_pending &= ~targ_mask;
+ aic_outb(p, 0, TARG_SCSIRATE + tindex);
+ if (p->features & AHC_ULTRA2)
+ aic_outb(p, 0, TARG_OFFSET + tindex);
+ aic7xxx_reset_device(p, target, channel, ALL_LUNS, SCB_LIST_NULL);
+ if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
+ printk(INFO_LEAD "Bus Device Reset delivered.\n", p->host_no, channel,
+ target, -1);
+ aic7xxx_run_done_queue(p, /*complete*/ FALSE);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_handle_seqint
+ *
+ * Description:
+ * Interrupt handler for sequencer interrupts (SEQINT).
+ *-F*************************************************************************/
+static void
+aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat)
+{
+ struct aic7xxx_scb *scb;
+ unsigned short target_mask;
+ unsigned char target, lun, tindex;
+ unsigned char queue_flag = FALSE;
+ char channel;
+
+ target = ((aic_inb(p, SAVED_TCL) >> 4) & 0x0f);
+ if ( (p->chip & AHC_CHIPID_MASK) == AHC_AIC7770 )
+ channel = (aic_inb(p, SBLKCTL) & SELBUSB) >> 3;
+ else
+ channel = 0;
+ tindex = target + (channel << 3);
+ lun = aic_inb(p, SAVED_TCL) & 0x07;
+ target_mask = (0x01 << tindex);
+
+ /*
+ * Go ahead and clear the SEQINT now, that avoids any interrupt race
+ * conditions later on in case we enable some other interrupt.
+ */
+ aic_outb(p, CLRSEQINT, CLRINT);
+ switch (intstat & SEQINT_MASK)
+ {
+ case NO_MATCH:
+ {
+ aic_outb(p, aic_inb(p, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP),
+ SCSISEQ);
+ printk(WARN_LEAD "No active SCB for reconnecting target - Issuing "
+ "BUS DEVICE RESET.\n", p->host_no, channel, target, lun);
+ printk(WARN_LEAD " SAVED_TCL=0x%x, ARG_1=0x%x, SEQADDR=0x%x\n",
+ p->host_no, channel, target, lun,
+ aic_inb(p, SAVED_TCL), aic_inb(p, ARG_1),
+ (aic_inb(p, SEQADDR1) << 8) | aic_inb(p, SEQADDR0));
+ }
+ break;
+
+ case SEND_REJECT:
+ {
+ if (aic7xxx_verbose & VERBOSE_MINOR_ERROR)
+ printk(INFO_LEAD "Rejecting unknown message (0x%x) received from "
+ "target, SEQ_FLAGS=0x%x\n", p->host_no, channel, target, lun,
+ aic_inb(p, ACCUM), aic_inb(p, SEQ_FLAGS));
+ }
+ break;
+
+ case NO_IDENT:
+ {
+ /*
+ * The reconnecting target either did not send an identify
+ * message, or did, but we didn't find an SCB to match and
+ * before it could respond to our ATN/abort, it hit a dataphase.
+ * The only safe thing to do is to blow it away with a bus
+ * reset.
+ */
+ if (aic7xxx_verbose & (VERBOSE_SEQINT | VERBOSE_RESET_MID))
+ printk(INFO_LEAD "Target did not send an IDENTIFY message; "
+ "LASTPHASE 0x%x, SAVED_TCL 0x%x\n", p->host_no, channel, target,
+ lun, aic_inb(p, LASTPHASE), aic_inb(p, SAVED_TCL));
+
+ aic7xxx_reset_channel(p, channel, /*initiate reset*/ TRUE);
+ aic7xxx_run_done_queue(p, FALSE);
+
+ }
+ break;
+
+ case BAD_PHASE:
+ if (aic_inb(p, LASTPHASE) == P_BUSFREE)
+ {
+ if (aic7xxx_verbose & VERBOSE_SEQINT)
+ printk(INFO_LEAD "Missed busfree.\n", p->host_no, channel,
+ target, lun);
+ restart_sequencer(p);
+ }
+ else
+ {
+ if (aic7xxx_verbose & VERBOSE_SEQINT)
+ printk(INFO_LEAD "Unknown scsi bus phase, continuing\n", p->host_no,
+ channel, target, lun);
+ }
+ break;
+
+ case EXTENDED_MSG:
+ {
+ p->msg_type = MSG_TYPE_INITIATOR_MSGIN;
+ p->msg_len = 0;
+ p->msg_index = 0;
+
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if (aic7xxx_verbose > 0xffff)
+ printk(INFO_LEAD "Enabling REQINITs for MSG_IN\n", p->host_no,
+ channel, target, lun);
+#endif
+
+ /*
+ * To actually receive the message, simply turn on
+ * REQINIT interrupts and let our interrupt handler
+ * do the rest (REQINIT should already be true).
+ */
+ p->flags |= AHC_HANDLING_REQINITS;
+ aic_outb(p, aic_inb(p, SIMODE1) | ENREQINIT, SIMODE1);
+
+ /*
+ * We don't want the sequencer unpaused yet so we return early
+ */
+ return;
+ }
+
+ case REJECT_MSG:
+ {
+ /*
+ * What we care about here is if we had an outstanding SDTR
+ * or WDTR message for this target. If we did, this is a
+ * signal that the target is refusing negotiation.
+ */
+ unsigned char scb_index;
+ unsigned char last_msg;
+
+ scb_index = aic_inb(p, SCB_TAG);
+ scb = p->scb_data->scb_array[scb_index];
+ last_msg = aic_inb(p, LAST_MSG);
+
+ if ( (last_msg == MSG_IDENTIFYFLAG) &&
+ (scb->tag_action) &&
+ !(scb->flags & SCB_MSGOUT_BITS) )
+ {
+ if (scb->tag_action == MSG_ORDERED_Q_TAG)
+ {
+ /*
+ * OK...the device seems able to accept tagged commands, but
+ * not ordered tag commands, only simple tag commands. So, we
+ * disable ordered tag commands and go on with life just like
+ * normal.
+ */
+ p->orderedtag &= ~target_mask;
+ scb->tag_action = MSG_SIMPLE_Q_TAG;
+ scb->hscb->control &= ~SCB_TAG_TYPE;
+ scb->hscb->control |= MSG_SIMPLE_Q_TAG;
+ aic_outb(p, scb->hscb->control, SCB_CONTROL);
+ /*
+ * OK..we set the tag type to simple tag command, now we re-assert
+ * ATNO and hope this will take us into the identify phase again
+ * so we can resend the tag type and info to the device.
+ */
+ aic_outb(p, MSG_IDENTIFYFLAG, MSG_OUT);
+ aic_outb(p, aic_inb(p, SCSISIGI) | ATNO, SCSISIGO);
+ }
+ else if (scb->tag_action == MSG_SIMPLE_Q_TAG)
+ {
+ unsigned char i, reset = 0;
+ struct aic7xxx_scb *scbp;
+ int old_verbose;
+ /*
+ * Hmmmm....the device is flaking out on tagged commands. The
+ * bad thing is that we already have tagged commands enabled in
+ * the device struct in the mid level code. We also have a queue
+ * set according to the tagged queue depth. Gonna have to live
+ * with it by controlling our queue depth internally and making
+ * sure we don't set the tagged command flag any more.
+ */
+ p->tagenable &= ~target_mask;
+ p->orderedtag &= ~target_mask;
+ p->dev_max_queue_depth[tindex] =
+ p->dev_temp_queue_depth[tindex] = 1;
+ /*
+ * We set this command up as a bus device reset. However, we have
+ * to clear the tag type as it's causing us problems. We shouldnt
+ * have to worry about any other commands being active, since if
+ * the device is refusing tagged commands, this should be the
+ * first tagged command sent to the device, however, we do have
+ * to worry about any other tagged commands that may already be
+ * in the qinfifo. The easiest way to do this, is to issue a BDR,
+ * send all the commands back to the mid level code, then let them
+ * come back and get rebuilt as untagged commands.
+ */
+ scb->tag_action = 0;
+ scb->hscb->control &= ~(TAG_ENB | SCB_TAG_TYPE);
+ aic_outb(p, scb->hscb->control, SCB_CONTROL);
+
+ old_verbose = aic7xxx_verbose;
+ aic7xxx_verbose &= ~(VERBOSE_RESET|VERBOSE_ABORT);
+ for (i=0; i!=p->scb_data->numscbs; i++)
+ {
+ scbp = p->scb_data->scb_array[i];
+ if ((scbp->flags & SCB_ACTIVE) && (scbp != scb))
+ {
+ if (aic7xxx_match_scb(p, scbp, target, channel, lun, i))
+ {
+ aic7xxx_reset_device(p, target, channel, lun, i);
+ reset++;
+ }
+ aic7xxx_run_done_queue(p, FALSE);
+ }
+ }
+ aic7xxx_verbose = old_verbose;
+ /*
+ * Wait until after the for loop to set the busy index since
+ * aic7xxx_reset_device will clear the busy index during its
+ * operation.
+ */
+ aic7xxx_busy_target(p, scb);
+ printk(INFO_LEAD "Device is refusing tagged commands, using "
+ "untagged I/O.\n", p->host_no, channel, target, lun);
+ aic_outb(p, MSG_IDENTIFYFLAG, MSG_OUT);
+ aic_outb(p, aic_inb(p, SCSISIGI) | ATNO, SCSISIGO);
+ }
+ }
+ else if (scb->flags & SCB_MSGOUT_WDTR)
+ {
+ /*
+ * note 8bit xfers and clear flag
+ */
+ p->needwdtr &= ~target_mask;
+ p->needwdtr_copy &= ~target_mask;
+ p->wdtr_pending &= ~target_mask;
+ scb->flags &= ~SCB_MSGOUT_BITS;
+ aic7xxx_set_width(p, target, channel, lun, MSG_EXT_WDTR_BUS_8_BIT,
+ (AHC_TRANS_ACTIVE|AHC_TRANS_GOAL|AHC_TRANS_CUR));
+ aic7xxx_set_syncrate(p, NULL, target, channel, 0, 0,
+ AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE);
+ if ( (p->needsdtr_copy & target_mask) &&
+ !(p->sdtr_pending & target_mask) )
+ {
+ p->sdtr_pending |= target_mask;
+ scb->flags |= SCB_MSGOUT_SDTR;
+ aic_outb(p, HOST_MSG, MSG_OUT);
+ aic_outb(p, aic_inb(p, SCSISIGO) | ATNO, SCSISIGO);
+ }
+ }
+ else if (scb->flags & SCB_MSGOUT_SDTR)
+ {
+ /*
+ * note asynch xfers and clear flag
+ */
+ p->needsdtr &= ~target_mask;
+ p->needsdtr_copy &= ~target_mask;
+ p->sdtr_pending &= ~target_mask;
+ scb->flags &= ~SCB_MSGOUT_SDTR;
+ aic7xxx_set_syncrate(p, NULL, target, channel, 0, 0,
+ (AHC_TRANS_CUR|AHC_TRANS_ACTIVE|AHC_TRANS_GOAL));
+ }
+ else if (aic7xxx_verbose & VERBOSE_SEQINT)
+ {
+ /*
+ * Otherwise, we ignore it.
+ */
+ printk(INFO_LEAD "Received MESSAGE_REJECT for unknown cause. "
+ "Ignoring.\n", p->host_no, channel, target, lun);
+ }
+ }
+ break;
+
+ case BAD_STATUS:
+ {
+ unsigned char scb_index;
+ struct aic7xxx_hwscb *hscb;
+ Scsi_Cmnd *cmd;
+
+ /* The sequencer will notify us when a command has an error that
+ * would be of interest to the kernel. This allows us to leave
+ * the sequencer running in the common case of command completes
+ * without error. The sequencer will have DMA'd the SCB back
+ * up to us, so we can reference the drivers SCB array.
+ *
+ * Set the default return value to 0 indicating not to send
+ * sense. The sense code will change this if needed and this
+ * reduces code duplication.
+ */
+ aic_outb(p, 0, RETURN_1);
+ scb_index = aic_inb(p, SCB_TAG);
+ if (scb_index > p->scb_data->numscbs)
+ {
+ printk(WARN_LEAD "Invalid SCB during SEQINT 0x%02x, SCB_TAG %d.\n",
+ p->host_no, channel, target, lun, intstat, scb_index);
+ break;
+ }
+ scb = p->scb_data->scb_array[scb_index];
+ hscb = scb->hscb;
+
+ if (!(scb->flags & SCB_ACTIVE) || (scb->cmd == NULL))
+ {
+ printk(WARN_LEAD "Invalid SCB during SEQINT 0x%x, scb %d, flags 0x%x,"
+ " cmd 0x%lx.\n", p->host_no, channel, target, lun, intstat,
+ scb_index, scb->flags, (unsigned long) scb->cmd);
+ }
+ else
+ {
+ cmd = scb->cmd;
+ hscb->target_status = aic_inb(p, SCB_TARGET_STATUS);
+ aic7xxx_status(cmd) = hscb->target_status;
+
+ cmd->result = hscb->target_status;
+
+ switch (status_byte(hscb->target_status))
+ {
+ case GOOD:
+ if (aic7xxx_verbose & VERBOSE_SEQINT)
+ printk(INFO_LEAD "Interrupted for status of GOOD???\n",
+ p->host_no, CTL_OF_SCB(scb));
+ break;
+
+ case COMMAND_TERMINATED:
+ case CHECK_CONDITION:
+ if ( !(scb->flags & SCB_SENSE) )
+ {
+ /*
+ * XXX - How do we save the residual (if there is one).
+ */
+ if ( hscb->residual_SG_segment_count != 0 )
+ aic7xxx_calculate_residual(p, scb);
+
+ /*
+ * Send a sense command to the requesting target.
+ * XXX - revisit this and get rid of the memcopys.
+ */
+ memcpy(&scb->sense_cmd[0], &generic_sense[0],
+ sizeof(generic_sense));
+
+ scb->sense_cmd[1] = (cmd->lun << 5);
+ scb->sense_cmd[4] = sizeof(cmd->sense_buffer);
+
+ scb->sg_list[0].address =
+ cpu_to_le32(VIRT_TO_BUS(&cmd->sense_buffer[0]));
+ scb->sg_list[0].length =
+ cpu_to_le32(sizeof(cmd->sense_buffer));
+
+ /*
+ * XXX - We should allow disconnection, but can't as it
+ * might allow overlapped tagged commands.
+ */
+ /* hscb->control &= DISCENB; */
+ hscb->control = 0;
+ hscb->target_status = 0;
+ hscb->SG_list_pointer =
+ cpu_to_le32(VIRT_TO_BUS(&scb->sg_list[0]));
+ hscb->data_pointer = scb->sg_list[0].address;
+ hscb->data_count = scb->sg_list[0].length;
+ hscb->SCSI_cmd_pointer =
+ cpu_to_le32(VIRT_TO_BUS(&scb->sense_cmd[0]));
+ hscb->SCSI_cmd_length = COMMAND_SIZE(scb->sense_cmd[0]);
+ hscb->residual_SG_segment_count = 0;
+ hscb->residual_data_count[0] = 0;
+ hscb->residual_data_count[1] = 0;
+ hscb->residual_data_count[2] = 0;
+
+ scb->sg_count = hscb->SG_segment_count = 1;
+ scb->sg_length = sizeof(cmd->sense_buffer);
+ scb->tag_action = 0;
+ /*
+ * This problem could be caused if the target has lost power
+ * or found some other way to loose the negotiation settings,
+ * so if needed, we'll re-negotiate while doing the sense cmd.
+ * However, if this SCB already was attempting to negotiate,
+ * then we assume this isn't the problem and skip this part.
+ */
+#ifdef AIC7XXX_FAKE_NEGOTIATION_CMDS
+ if ( (scb->cmd->cmnd[0] != TEST_UNIT_READY) &&
+ (p->dev_flags[tindex] & DEVICE_SCANNED) &&
+ !(p->wdtr_pending & target_mask) &&
+ !(p->sdtr_pending & target_mask) )
+ {
+ p->needwdtr |= (p->needwdtr_copy & target_mask);
+ p->needsdtr |= (p->needsdtr_copy & target_mask);
+ }
+ else if ( (scb->cmd == p->dev_wdtr_cmnd[tindex]) ||
+ (scb->cmd == p->dev_sdtr_cmnd[tindex]) )
+ {
+ /*
+ * This is already a negotiation command, so we must have
+ * already done either WDTR or SDTR (or maybe both). So
+ * we simply check sdtr_pending and needsdtr to see if we
+ * should throw out SDTR on this command.
+ *
+ * Note: Don't check the needsdtr_copy here, instead just
+ * check to see if WDTR wiped out our SDTR and set needsdtr.
+ * Even if WDTR did wipe out SDTR and set needsdtr, if
+ * parse_msg() then turned around and started our SDTR
+ * in back to back fasion, then conclusion of that should
+ * have negated any needsdtr setting. That's why we only
+ * check needsdtr and sdtr_pending.
+ */
+ scb->flags &= ~SCB_MSGOUT_BITS;
+ if ( (scb->cmd == p->dev_wdtr_cmnd[tindex]) &&
+ !(p->sdtr_pending & target_mask) &&
+ (p->needsdtr & target_mask) )
+ {
+ p->sdtr_pending |= target_mask;
+ hscb->control |= MK_MESSAGE;
+ scb->flags |= SCB_MSGOUT_SDTR;
+ }
+
+ /*
+ * This is the important part though. We are getting sense
+ * info back from this device. It's going into a fake
+ * command. We need to put that into the real command
+ * instead so that the mid level SCSI code can act upon it.
+ * So, when we set up these fake commands, the next pointer
+ * is used to point to the real command. Use that to change
+ * the address of our sense_buffer[] to the real command.
+ * However, don't do this if the real command is also a
+ * TEST_UNIT_READY as it will most likely pull down its own
+ * SENSE information anyway.
+ */
+ if (cmd->next->cmnd[0] != TEST_UNIT_READY)
+ {
+ scb->sg_list[0].address =
+ cpu_to_le32(VIRT_TO_BUS(&cmd->next->sense_buffer[0]));
+ hscb->data_pointer = scb->sg_list[0].address;
+ }
+ }
+#else
+ if ( (scb->cmd->cmnd[0] != TEST_UNIT_READY) &&
+ !(scb->flags & SCB_MSGOUT_BITS) &&
+ (scb->cmd->lun == 0) &&
+ (p->dev_flags[TARGET_INDEX(scb->cmd)] & DEVICE_SCANNED) )
+ {
+ if ( (p->needwdtr_copy & target_mask) &&
+ !(p->wdtr_pending & target_mask) &&
+ !(p->sdtr_pending & target_mask) )
+ {
+ p->needwdtr |= target_mask;
+ p->wdtr_pending |= target_mask;
+ hscb->control |= MK_MESSAGE;
+ scb->flags |= SCB_MSGOUT_WDTR;
+ }
+ if ( p->needsdtr_copy & target_mask )
+ {
+ p->needsdtr |= target_mask;
+ if ( !(p->wdtr_pending & target_mask) &&
+ !(p->sdtr_pending & target_mask) )
+ {
+ p->sdtr_pending |= target_mask;
+ hscb->control |= MK_MESSAGE;
+ scb->flags |= SCB_MSGOUT_SDTR;
+ }
+ }
+ }
+ else
+ scb->flags &= ~SCB_MSGOUT_BITS;
+#endif /* AIC7XXX_FAKE_NEGOTIATION_CMDS */
+ scb->flags |= SCB_SENSE;
+ /*
+ * Ensure the target is busy since this will be an
+ * an untagged request.
+ */
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if (aic7xxx_verbose > 0xffff)
+ {
+ if (scb->flags & SCB_MSGOUT_BITS)
+ printk(INFO_LEAD "Requesting SENSE with %s\n", p->host_no,
+ CTL_OF_SCB(scb), (scb->flags & SCB_MSGOUT_SDTR) ?
+ "SDTR" : "WDTR");
+ else
+ printk(INFO_LEAD "Requesting SENSE, no MSG\n", p->host_no,
+ CTL_OF_SCB(scb));
+ }
+#endif
+ aic7xxx_busy_target(p, scb);
+ aic_outb(p, SEND_SENSE, RETURN_1);
+ aic7xxx_error(cmd) = DID_OK;
+ break;
+ } /* first time sense, no errors */
+ aic7xxx_error(cmd) = DID_OK;
+ scb->flags &= ~SCB_SENSE;
+ break;
+
+ case QUEUE_FULL:
+ queue_flag = TRUE; /* Mark that this is a QUEUE_FULL and */
+ case BUSY: /* drop through to here */
+ {
+ struct aic7xxx_scb *next_scbp, *prev_scbp;
+ unsigned char active_hscb, next_hscb, prev_hscb, scb_index;
+ /*
+ * We have to look three places for queued commands:
+ * 1: QINFIFO
+ * 2: p->waiting_scbs queue
+ * 3: WAITING_SCBS list on card (for commands that are started
+ * but haven't yet made it to the device)
+ */
+ aic7xxx_search_qinfifo(p, target, channel, lun,
+ SCB_LIST_NULL, 0, TRUE,
+ &p->delayed_scbs[tindex]);
+ next_scbp = p->waiting_scbs.head;
+ while ( next_scbp != NULL )
+ {
+ prev_scbp = next_scbp;
+ next_scbp = next_scbp->q_next;
+ if ( aic7xxx_match_scb(p, prev_scbp, target, channel, lun,
+ SCB_LIST_NULL) )
+ {
+ scbq_remove(&p->waiting_scbs, prev_scbp);
+ scbq_insert_tail(&p->delayed_scbs[tindex],
+ prev_scbp);
+ }
+ }
+ next_scbp = NULL;
+ active_hscb = aic_inb(p, SCBPTR);
+ prev_hscb = next_hscb = scb_index = SCB_LIST_NULL;
+ next_hscb = aic_inb(p, WAITING_SCBH);
+ while (next_hscb != SCB_LIST_NULL)
+ {
+ aic_outb(p, next_hscb, SCBPTR);
+ scb_index = aic_inb(p, SCB_TAG);
+ if (scb_index < p->scb_data->numscbs)
+ {
+ next_scbp = p->scb_data->scb_array[scb_index];
+ if (aic7xxx_match_scb(p, next_scbp, target, channel, lun,
+ SCB_LIST_NULL) )
+ {
+ if (next_scbp->flags & SCB_WAITINGQ)
+ {
+ p->dev_active_cmds[tindex]++;
+ p->activescbs--;
+ scbq_remove(&p->delayed_scbs[tindex], next_scbp);
+ scbq_remove(&p->waiting_scbs, next_scbp);
+ }
+ scbq_insert_head(&p->delayed_scbs[tindex],
+ next_scbp);
+ next_scbp->flags |= SCB_WAITINGQ;
+ p->dev_active_cmds[tindex]--;
+ p->activescbs--;
+ next_hscb = aic_inb(p, SCB_NEXT);
+ aic_outb(p, 0, SCB_CONTROL);
+ aic_outb(p, SCB_LIST_NULL, SCB_TAG);
+ aic7xxx_add_curscb_to_free_list(p);
+ if (prev_hscb == SCB_LIST_NULL)
+ {
+ /* We were first on the list,
+ * so we kill the selection
+ * hardware. Let the sequencer
+ * re-init the hardware itself
+ */
+ aic_outb(p, aic_inb(p, SCSISEQ) & ~ENSELO, SCSISEQ);
+ aic_outb(p, CLRSELTIMEO, CLRSINT1);
+ aic_outb(p, next_hscb, WAITING_SCBH);
+ }
+ else
+ {
+ aic_outb(p, prev_hscb, SCBPTR);
+ aic_outb(p, next_hscb, SCB_NEXT);
+ }
+ }
+ else
+ {
+ prev_hscb = next_hscb;
+ next_hscb = aic_inb(p, SCB_NEXT);
+ }
+ } /* scb_index >= p->scb_data->numscbs */
+ }
+ aic_outb(p, active_hscb, SCBPTR);
+ if (scb->flags & SCB_WAITINGQ)
+ {
+ scbq_remove(&p->delayed_scbs[tindex], scb);
+ scbq_remove(&p->waiting_scbs, scb);
+ p->dev_active_cmds[tindex]++;
+ p->activescbs++;
+ }
+ scbq_insert_head(&p->delayed_scbs[tindex], scb);
+ p->dev_active_cmds[tindex]--;
+ p->activescbs--;
+ scb->flags |= SCB_WAITINGQ | SCB_WAS_BUSY;
+
+ if ( !(p->dev_timer_active & (0x01 << tindex)) )
+ {
+ p->dev_timer_active |= (0x01 << tindex);
+ if ( p->dev_active_cmds[tindex] )
+ {
+ p->dev_expires[tindex] = jiffies + HZ;
+ }
+ else
+ {
+ p->dev_expires[tindex] = jiffies + (HZ / 10);
+ }
+ if ( !(p->dev_timer_active & (0x01 << MAX_TARGETS)) )
+ {
+ p->dev_timer.expires = p->dev_expires[tindex];
+ p->dev_timer_active |= (0x01 << MAX_TARGETS);
+ add_timer(&p->dev_timer);
+ }
+ else if ( time_after_eq(p->dev_timer.expires,
+ p->dev_expires[tindex]) )
+ {
+ del_timer(&p->dev_timer);
+ p->dev_timer.expires = p->dev_expires[tindex];
+ add_timer(&p->dev_timer);
+ }
+ }
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if (aic7xxx_verbose & VERBOSE_MINOR_ERROR)
+ {
+ if (queue_flag)
+ printk(INFO_LEAD "Queue full received; queue depth %d, "
+ "active %d\n", p->host_no, CTL_OF_SCB(scb),
+ p->dev_max_queue_depth[tindex],
+ p->dev_active_cmds[tindex]);
+ else
+ printk(INFO_LEAD "Target busy\n", p->host_no, CTL_OF_SCB(scb));
+
+ }
+#endif
+ if (queue_flag)
+ {
+ p->dev_temp_queue_depth[tindex] =
+ p->dev_active_cmds[tindex];
+ if ( p->dev_last_queue_full[tindex] !=
+ p->dev_active_cmds[tindex] )
+ {
+ p->dev_last_queue_full[tindex] =
+ p->dev_active_cmds[tindex];
+ p->dev_last_queue_full_count[tindex] = 0;
+ }
+ else
+ {
+ p->dev_last_queue_full_count[tindex]++;
+ }
+ if ( (p->dev_last_queue_full_count[tindex] > 14) &&
+ (p->dev_active_cmds[tindex] > 4) )
+ {
+ if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
+ printk(INFO_LEAD "Queue depth reduced to %d\n", p->host_no,
+ CTL_OF_SCB(scb), p->dev_active_cmds[tindex]);
+ p->dev_max_queue_depth[tindex] =
+ p->dev_active_cmds[tindex];
+ p->dev_last_queue_full[tindex] = 0;
+ p->dev_last_queue_full_count[tindex] = 0;
+ }
+ else
+ {
+ p->dev_flags[tindex] |= DEVICE_WAS_BUSY;
+ }
+ }
+ break;
+ }
+
+ default:
+ if (aic7xxx_verbose & VERBOSE_SEQINT)
+ printk(INFO_LEAD "Unexpected target status 0x%x.\n", p->host_no,
+ CTL_OF_SCB(scb), scb->hscb->target_status);
+ if (!aic7xxx_error(cmd))
+ {
+ aic7xxx_error(cmd) = DID_RETRY_COMMAND;
+ }
+ break;
+ } /* end switch */
+ } /* end else of */
+ }
+ break;
+
+ case AWAITING_MSG:
+ {
+ unsigned char scb_index, msg_out;
+
+ scb_index = aic_inb(p, SCB_TAG);
+ msg_out = aic_inb(p, MSG_OUT);
+ scb = p->scb_data->scb_array[scb_index];
+ p->msg_index = p->msg_len = 0;
+ /*
+ * This SCB had a MK_MESSAGE set in its control byte informing
+ * the sequencer that we wanted to send a special message to
+ * this target.
+ */
+
+ if ( !(scb->flags & SCB_DEVICE_RESET) &&
+ (aic_inb(p, MSG_OUT) == MSG_IDENTIFYFLAG) &&
+ (scb->hscb->control & TAG_ENB) )
+ {
+ p->msg_buf[p->msg_index++] = scb->tag_action;
+ p->msg_buf[p->msg_index++] = scb->hscb->tag;
+ p->msg_len += 2;
+ }
+
+ if (scb->flags & SCB_DEVICE_RESET)
+ {
+ p->msg_buf[p->msg_index++] = MSG_BUS_DEV_RESET;
+ p->msg_len++;
+ if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
+ printk(INFO_LEAD "Bus device reset mailed.\n",
+ p->host_no, CTL_OF_SCB(scb));
+ }
+ else if (scb->flags & SCB_ABORT)
+ {
+ if (scb->tag_action)
+ {
+ p->msg_buf[p->msg_index++] = MSG_ABORT_TAG;
+ }
+ else
+ {
+ p->msg_buf[p->msg_index++] = MSG_ABORT;
+ }
+ p->msg_len++;
+ if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS)
+ printk(INFO_LEAD "Abort message mailed.\n", p->host_no,
+ CTL_OF_SCB(scb));
+ }
+ else if (scb->flags & SCB_MSGOUT_WDTR)
+ {
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if (aic7xxx_verbose > 0xffff)
+ printk(INFO_LEAD "Sending WDTR message.\n", p->host_no,
+ CTL_OF_SCB(scb));
+#endif
+ aic7xxx_construct_wdtr(p,
+ p->transinfo[TARGET_INDEX(scb->cmd)].goal_width);
+ }
+ else if (scb->flags & SCB_MSGOUT_SDTR)
+ {
+ unsigned int max_sync, period;
+ /*
+ * We need to set an accurate goal_offset instead of
+ * the ridiculously high one we default to. We should
+ * now know if we are wide. Plus, the WDTR code will
+ * set our goal_offset for us as well.
+ */
+ if (p->transinfo[tindex].goal_offset)
+ {
+ if (p->features & AHC_ULTRA2)
+ p->transinfo[tindex].goal_offset = MAX_OFFSET_ULTRA2;
+ else if (p->transinfo[tindex].cur_width == MSG_EXT_WDTR_BUS_16_BIT)
+ p->transinfo[tindex].goal_offset = MAX_OFFSET_16BIT;
+ else
+ p->transinfo[tindex].goal_offset = MAX_OFFSET_8BIT;
+ }
+ /*
+ * Now that the device is selected, use the bits in SBLKCTL and
+ * SSTAT2 to determine the max sync rate for this device.
+ */
+ if (p->features & AHC_ULTRA2)
+ {
+ if ( (aic_inb(p, SBLKCTL) & ENAB40) &&
+ !(aic_inb(p, SSTAT2) & EXP_ACTIVE) )
+ {
+ max_sync = AHC_SYNCRATE_ULTRA2;
+ }
+ else
+ {
+ max_sync = AHC_SYNCRATE_ULTRA;
+ }
+ }
+ else if (p->features & AHC_ULTRA)
+ {
+ max_sync = AHC_SYNCRATE_ULTRA;
+ }
+ else
+ {
+ max_sync = AHC_SYNCRATE_FAST;
+ }
+ period = p->transinfo[tindex].goal_period;
+ aic7xxx_find_syncrate(p, &period, max_sync);
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if (aic7xxx_verbose > 0xffff)
+ printk(INFO_LEAD "Sending SDTR %d/%d message.\n", p->host_no,
+ CTL_OF_SCB(scb),
+ p->transinfo[tindex].goal_period,
+ p->transinfo[tindex].goal_offset);
+#endif
+ aic7xxx_construct_sdtr(p, period,
+ p->transinfo[tindex].goal_offset);
+ }
+ else
+ {
+ sti();
+ panic("aic7xxx: AWAITING_MSG for an SCB that does "
+ "not have a waiting message.\n");
+ }
+ /*
+ * We've set everything up to send our message, now to actually do
+ * so we need to enable reqinit interrupts and let the interrupt
+ * handler do the rest. We don't want to unpause the sequencer yet
+ * though so we'll return early. We also have to make sure that
+ * we clear the SEQINT *BEFORE* we set the REQINIT handler active
+ * or else it's possible on VLB cards to loose the first REQINIT
+ * interrupt. Edge triggered EISA cards could also loose this
+ * interrupt, although PCI and level triggered cards should not
+ * have this problem since they continually interrupt the kernel
+ * until we take care of the situation.
+ */
+ scb->flags |= SCB_MSGOUT_SENT;
+ p->msg_index = 0;
+ p->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
+ p->flags |= AHC_HANDLING_REQINITS;
+ aic_outb(p, aic_inb(p, SIMODE1) | ENREQINIT, SIMODE1);
+ return;
+ }
+ break;
+
+ case DATA_OVERRUN:
+ {
+ unsigned char scb_index = aic_inb(p, SCB_TAG);
+ unsigned char lastphase = aic_inb(p, LASTPHASE);
+ unsigned int i;
+
+ scb = (p->scb_data->scb_array[scb_index]);
+ /*
+ * XXX - What do we really want to do on an overrun? The
+ * mid-level SCSI code should handle this, but for now,
+ * we'll just indicate that the command should retried.
+ * If we retrieved sense info on this target, then the
+ * base SENSE info should have been saved prior to the
+ * overrun error. In that case, we return DID_OK and let
+ * the mid level code pick up on the sense info. Otherwise
+ * we return DID_ERROR so the command will get retried.
+ */
+ if ( !(scb->flags & SCB_SENSE) )
+ {
+ printk(WARN_LEAD "Data overrun detected in %s phase, tag %d;\n",
+ p->host_no, CTL_OF_SCB(scb),
+ (lastphase == P_DATAIN) ? "Data-In" : "Data-Out", scb->hscb->tag);
+ printk(KERN_WARNING " %s seen Data Phase. Length=%d, NumSGs=%d.\n",
+ (aic_inb(p, SEQ_FLAGS) & DPHASE) ? "Have" : "Haven't",
+ scb->sg_length, scb->sg_count);
+ for (i = 0; i < scb->sg_count; i++)
+ {
+ printk(KERN_WARNING " sg[%d] - Addr 0x%x : Length %d\n",
+ i,
+ le32_to_cpu(scb->sg_list[i].address),
+ le32_to_cpu(scb->sg_list[i].length) );
+ }
+ aic7xxx_error(scb->cmd) = DID_ERROR;
+ }
+ else
+ printk(INFO_LEAD "Data Overrun during SEND_SENSE operation.\n",
+ p->host_no, CTL_OF_SCB(scb));
+ }
+ break;
+
+#if AIC7XXX_NOT_YET
+ case TRACEPOINT:
+ {
+ printk(INFO_LEAD "Tracepoint #1 reached.\n", p->host_no, channel,
+ target, lun);
+ }
+ break;
+
+ case TRACEPOINT2:
+ {
+ printk(INFO_LEAD "Tracepoint #2 reached.\n", p->host_no, channel,
+ target, lun);
+ }
+ break;
+
+ /* XXX Fill these in later */
+ case MSG_BUFFER_BUSY:
+ printk("aic7xxx: Message buffer busy.\n");
+ break;
+ case MSGIN_PHASEMIS:
+ printk("aic7xxx: Message-in phasemis.\n");
+ break;
+#endif
+
+ default: /* unknown */
+ printk(WARN_LEAD "Unknown SEQINT, INTSTAT 0x%x, SCSISIGI 0x%x.\n",
+ p->host_no, channel, target, lun, intstat,
+ aic_inb(p, SCSISIGI));
+ break;
+ }
+
+ /*
+ * Clear the sequencer interrupt and unpause the sequencer.
+ */
+ unpause_sequencer(p, /* unpause always */ TRUE);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_parse_msg
+ *
+ * Description:
+ * Parses incoming messages into actions on behalf of
+ * aic7xxx_handle_reqinit
+ *_F*************************************************************************/
+static int
+aic7xxx_parse_msg(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
+{
+ int reject, reply, done;
+ unsigned char target_scsirate, tindex;
+ unsigned short target_mask;
+ unsigned char target, channel, lun;
+
+ target = scb->cmd->target;
+ channel = scb->cmd->channel;
+ lun = scb->cmd->lun;
+ reply = reject = done = FALSE;
+ tindex = TARGET_INDEX(scb->cmd);
+ target_scsirate = aic_inb(p, TARG_SCSIRATE + tindex);
+ target_mask = (0x01 << tindex);
+
+ /*
+ * Parse as much of the message as is availible,
+ * rejecting it if we don't support it. When
+ * the entire message is availible and has been
+ * handled, return TRUE indicating that we have
+ * parsed an entire message.
+ */
+
+ if (p->msg_buf[0] != MSG_EXTENDED)
+ {
+ reject = TRUE;
+ }
+
+ /*
+ * Just accept the length byte outright and perform
+ * more checking once we know the message type.
+ */
+
+ if ( !reject && (p->msg_len > 2) )
+ {
+ switch(p->msg_buf[2])
+ {
+ case MSG_EXT_SDTR:
+ {
+ unsigned int period, offset;
+ unsigned char maxsync, saved_offset;
+ struct aic7xxx_syncrate *syncrate;
+
+ if (p->msg_buf[1] != MSG_EXT_SDTR_LEN)
+ {
+ reject = TRUE;
+ break;
+ }
+
+ if (p->msg_len < (MSG_EXT_SDTR_LEN + 2))
+ {
+ break;
+ }
+
+ period = p->msg_buf[3];
+ saved_offset = offset = p->msg_buf[4];
+
+ if (p->features & AHC_ULTRA2)
+ {
+ if ( (aic_inb(p, SBLKCTL) & ENAB40) &&
+ !(aic_inb(p, SSTAT2) & EXP_ACTIVE) )
+ {
+ maxsync = AHC_SYNCRATE_ULTRA2;
+ }
+ else
+ {
+ maxsync = AHC_SYNCRATE_ULTRA;
+ }
+ }
+ else if (p->features & AHC_ULTRA)
+ {
+ maxsync = AHC_SYNCRATE_ULTRA;
+ }
+ else
+ {
+ maxsync = AHC_SYNCRATE_FAST;
+ }
+ /*
+ * We might have a device that is starting negotiation with us
+ * before we can start up negotiation with it....be prepared to
+ * have a device ask for a higher speed then we want to give it
+ * in that case
+ */
+ if ( (scb->flags & (SCB_MSGOUT_SENT|SCB_MSGOUT_SDTR)) !=
+ (SCB_MSGOUT_SENT|SCB_MSGOUT_SDTR) )
+ {
+ if (!(p->dev_flags[tindex] & DEVICE_SCANNED))
+ {
+ /*
+ * Not only is the device starting this up, but it also hasn't
+ * been scanned yet, so this would likely be our TUR or our
+ * INQUIRY command at scan time, so we need to use the
+ * settings from the SEEPROM if they existed. Of course, even
+ * if we didn't find a SEEPROM, we stuffed default values into
+ * the user settings anyway, so use those in all cases.
+ */
+ p->transinfo[tindex].goal_period =
+ p->transinfo[tindex].user_period;
+ p->transinfo[tindex].goal_offset =
+ p->transinfo[tindex].user_offset;
+ p->needsdtr_copy |= target_mask;
+ }
+ if ( !p->transinfo[tindex].goal_offset )
+ period = 255;
+ if ( p->transinfo[tindex].goal_period > period )
+ period = p->transinfo[tindex].goal_period;
+ }
+
+ syncrate = aic7xxx_find_syncrate(p, &period, maxsync);
+ aic7xxx_validate_offset(p, syncrate, &offset,
+ target_scsirate & WIDEXFER);
+ aic7xxx_set_syncrate(p, syncrate, target, channel, period,
+ offset, AHC_TRANS_ACTIVE|AHC_TRANS_CUR);
+
+ /*
+ * Did we drop to async? If so, are we sending a reply? If we are,
+ * then we have to make sure that the reply value reflects the proper
+ * settings so we need to set the goal values according to what
+ * we need to send.
+ */
+ if ( (offset == 0) || (offset != saved_offset) ||
+ ((scb->flags & (SCB_MSGOUT_SENT|SCB_MSGOUT_SDTR)) !=
+ (SCB_MSGOUT_SENT|SCB_MSGOUT_SDTR) ) )
+ {
+ aic7xxx_set_syncrate(p, syncrate, target, channel, period,
+ offset, AHC_TRANS_GOAL|AHC_TRANS_QUITE);
+ if ( offset == 0 )
+ {
+ p->needsdtr_copy &= ~target_mask;
+ }
+ }
+
+ /*
+ * Did we start this, if not, or if we went to low and had to
+ * go async, then send an SDTR back to the target
+ */
+ p->needsdtr &= ~target_mask;
+ p->sdtr_pending &= ~target_mask;
+ if ( ((scb->flags & (SCB_MSGOUT_SENT|SCB_MSGOUT_SDTR)) ==
+ (SCB_MSGOUT_SENT|SCB_MSGOUT_SDTR)) &&
+ (offset == saved_offset) )
+ {
+ scb->flags &= ~SCB_MSGOUT_BITS;
+ }
+ else
+ {
+ scb->flags &= ~SCB_MSGOUT_BITS;
+ scb->flags |= SCB_MSGOUT_SDTR;
+ aic_outb(p, HOST_MSG, MSG_OUT);
+ aic_outb(p, aic_inb(p, SCSISIGO) | ATNO, SCSISIGO);
+ }
+ done = TRUE;
+ break;
+ }
+ case MSG_EXT_WDTR:
+ {
+ unsigned char bus_width;
+
+ if (p->msg_buf[1] != MSG_EXT_WDTR_LEN)
+ {
+ reject = TRUE;
+ break;
+ }
+
+ if (p->msg_len < (MSG_EXT_WDTR_LEN + 2))
+ {
+ break;
+ }
+
+ bus_width = p->msg_buf[3];
+ if ( (scb->flags & (SCB_MSGOUT_SENT|SCB_MSGOUT_WDTR)) ==
+ (SCB_MSGOUT_SENT|SCB_MSGOUT_WDTR) )
+ {
+ switch(bus_width)
+ {
+ default:
+ {
+ reject = TRUE;
+ if ( (aic7xxx_verbose & VERBOSE_NEGOTIATION2) &&
+ ((p->dev_flags[tindex] & DEVICE_PRINT_WDTR) ||
+ (aic7xxx_verbose > 0xffff)) )
+ {
+ printk(INFO_LEAD "Requesting %d bit transfers, rejecting.\n",
+ p->host_no, CTL_OF_SCB(scb), 8 * (0x01 << bus_width));
+ p->dev_flags[tindex] &= ~DEVICE_PRINT_WDTR;
+ }
+ } /* We fall through on purpose */
+ case MSG_EXT_WDTR_BUS_8_BIT:
+ {
+ bus_width = MSG_EXT_WDTR_BUS_8_BIT;
+ p->needwdtr_copy &= ~target_mask;
+ break;
+ }
+ case MSG_EXT_WDTR_BUS_16_BIT:
+ {
+ break;
+ }
+ }
+ scb->flags &= ~SCB_MSGOUT_BITS;
+ p->wdtr_pending &= ~target_mask;
+ p->needwdtr &= ~target_mask;
+ }
+ else
+ {
+ scb->flags &= ~SCB_MSGOUT_BITS;
+ scb->flags |= SCB_MSGOUT_WDTR;
+ reply = TRUE;
+ if ( !(p->dev_flags[tindex] & DEVICE_SCANNED) )
+ {
+ /*
+ * Well, we now know the WDTR and SYNC caps of this device since
+ * it contacted us first, mark it as such and copy the user stuff
+ * over to the goal stuff.
+ */
+ p->transinfo[tindex].goal_period =
+ p->transinfo[tindex].user_period;
+ p->transinfo[tindex].goal_offset =
+ p->transinfo[tindex].user_offset;
+ p->transinfo[tindex].goal_width =
+ p->transinfo[tindex].user_width;
+ p->needwdtr_copy |= target_mask;
+ p->needsdtr_copy |= target_mask;
+ }
+ switch(bus_width)
+ {
+ default:
+ {
+ if ( (p->features & AHC_WIDE) &&
+ (p->transinfo[tindex].goal_width ==
+ MSG_EXT_WDTR_BUS_16_BIT) )
+ {
+ bus_width = MSG_EXT_WDTR_BUS_16_BIT;
+ break;
+ }
+ } /* Fall through if we aren't a wide card */
+ case MSG_EXT_WDTR_BUS_8_BIT:
+ {
+ p->needwdtr_copy &= ~target_mask;
+ bus_width = MSG_EXT_WDTR_BUS_8_BIT;
+ aic7xxx_set_width(p, target, channel, lun, bus_width,
+ AHC_TRANS_GOAL|AHC_TRANS_QUITE);
+ break;
+ }
+ }
+ p->needwdtr &= ~target_mask;
+ p->wdtr_pending &= ~target_mask;
+ aic_outb(p, HOST_MSG, MSG_OUT);
+ aic_outb(p, aic_inb(p, SCSISIGO) | ATNO, SCSISIGO);
+ }
+ aic7xxx_set_width(p, target, channel, lun, bus_width,
+ AHC_TRANS_ACTIVE|AHC_TRANS_CUR);
+
+ /*
+ * By virtue of the SCSI spec, a WDTR message negates any existing
+ * SDTR negotiations. So, even if needsdtr isn't marked for this
+ * device, we still have to do a new SDTR message if the device
+ * supports SDTR at all. Therefore, we check needsdtr_copy instead
+ * of needstr.
+ */
+ aic7xxx_set_syncrate(p, NULL, target, channel, 0, 0,
+ AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE);
+ if ( (p->needsdtr_copy & target_mask) &&
+ !(p->sdtr_pending & target_mask))
+ {
+ p->needsdtr |= target_mask;
+ if ( !reject && !reply )
+ {
+ scb->flags &= ~SCB_MSGOUT_WDTR;
+ if (p->transinfo[tindex].goal_period)
+ {
+ p->sdtr_pending |= target_mask;
+ scb->flags |= SCB_MSGOUT_SDTR;
+ aic_outb(p, HOST_MSG, MSG_OUT);
+ aic_outb(p, aic_inb(p, SCSISIGO) | ATNO, SCSISIGO);
+ }
+ }
+ }
+ done = TRUE;
+ break;
+ }
+ default:
+ {
+ reject = TRUE;
+ break;
+ }
+ } /* end of switch(p->msg_type) */
+ } /* end of if (!reject && (p->msg_len > 2)) */
+
+ if (reject)
+ {
+ aic_outb(p, MSG_MESSAGE_REJECT, MSG_OUT);
+ aic_outb(p, aic_inb(p, SCSISIGO) | ATNO, SCSISIGO);
+ done = TRUE;
+ }
+ return(done);
+}
+
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_handle_reqinit
+ *
+ * Description:
+ * Interrupt handler for REQINIT interrupts (used to transfer messages to
+ * and from devices).
+ *_F*************************************************************************/
+static void
+aic7xxx_handle_reqinit(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
+{
+ unsigned char lastbyte;
+ unsigned char phasemis;
+ int done = FALSE;
+
+ switch(p->msg_type)
+ {
+ case MSG_TYPE_INITIATOR_MSGOUT:
+ {
+ if (p->msg_len == 0)
+ panic("aic7xxx: REQINIT with no active message!\n");
+
+ lastbyte = (p->msg_index == (p->msg_len - 1));
+ phasemis = ( aic_inb(p, SCSISIGI) & PHASE_MASK) != P_MESGOUT;
+
+ if (lastbyte || phasemis)
+ {
+ /* Time to end the message */
+ p->msg_len = 0;
+ p->msg_type = MSG_TYPE_NONE;
+ /*
+ * NOTE-TO-MYSELF: If you clear the REQINIT after you
+ * disable REQINITs, then cases of REJECT_MSG stop working
+ * and hang the bus
+ */
+ aic_outb(p, aic_inb(p, SIMODE1) & ~ENREQINIT, SIMODE1);
+ aic_outb(p, CLRSCSIINT, CLRINT);
+ p->flags &= ~AHC_HANDLING_REQINITS;
+
+ if (phasemis == 0)
+ {
+ aic_outb(p, p->msg_buf[p->msg_index], SINDEX);
+ aic_outb(p, 0, RETURN_1);
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if (aic7xxx_verbose > 0xffff)
+ printk(INFO_LEAD "Completed sending of REQINIT message.\n",
+ p->host_no, CTL_OF_SCB(scb));
+#endif
+ }
+ else
+ {
+ aic_outb(p, MSGOUT_PHASEMIS, RETURN_1);
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if (aic7xxx_verbose > 0xffff)
+ printk(INFO_LEAD "PHASEMIS while sending REQINIT message.\n",
+ p->host_no, CTL_OF_SCB(scb));
+#endif
+ }
+ unpause_sequencer(p, TRUE);
+ }
+ else
+ {
+ /*
+ * Present the byte on the bus (clearing REQINIT) but don't
+ * unpause the sequencer.
+ */
+ aic_outb(p, CLRREQINIT, CLRSINT1);
+ aic_outb(p, CLRSCSIINT, CLRINT);
+ aic_outb(p, p->msg_buf[p->msg_index++], SCSIDATL);
+ }
+ break;
+ }
+ case MSG_TYPE_INITIATOR_MSGIN:
+ {
+ phasemis = ( aic_inb(p, SCSISIGI) & PHASE_MASK ) != P_MESGIN;
+
+ if (phasemis == 0)
+ {
+ p->msg_len++;
+ /* Pull the byte in without acking it */
+ p->msg_buf[p->msg_index] = aic_inb(p, SCSIBUSL);
+ done = aic7xxx_parse_msg(p, scb);
+ /* Ack the byte */
+ aic_outb(p, CLRREQINIT, CLRSINT1);
+ aic_outb(p, CLRSCSIINT, CLRINT);
+ aic_inb(p, SCSIDATL);
+ p->msg_index++;
+ }
+ if (phasemis || done)
+ {
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if (aic7xxx_verbose > 0xffff)
+ {
+ if (phasemis)
+ printk(INFO_LEAD "PHASEMIS while receiving REQINIT message.\n",
+ p->host_no, CTL_OF_SCB(scb));
+ else
+ printk(INFO_LEAD "Completed receipt of REQINIT message.\n",
+ p->host_no, CTL_OF_SCB(scb));
+ }
+#endif
+ /* Time to end our message session */
+ p->msg_len = 0;
+ p->msg_type = MSG_TYPE_NONE;
+ aic_outb(p, aic_inb(p, SIMODE1) & ~ENREQINIT, SIMODE1);
+ aic_outb(p, CLRSCSIINT, CLRINT);
+ p->flags &= ~AHC_HANDLING_REQINITS;
+ unpause_sequencer(p, TRUE);
+ }
+ break;
+ }
+ default:
+ {
+ panic("aic7xxx: Unknown REQINIT message type.\n");
+ break;
+ }
+ } /* End of switch(p->msg_type) */
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_handle_scsiint
+ *
+ * Description:
+ * Interrupt handler for SCSI interrupts (SCSIINT).
+ *-F*************************************************************************/
+static void
+aic7xxx_handle_scsiint(struct aic7xxx_host *p, unsigned char intstat)
+{
+ unsigned char scb_index;
+ unsigned char status;
+ struct aic7xxx_scb *scb;
+
+ scb_index = aic_inb(p, SCB_TAG);
+ status = aic_inb(p, SSTAT1);
+
+ if (scb_index < p->scb_data->numscbs)
+ {
+ scb = p->scb_data->scb_array[scb_index];
+ if ((scb->flags & SCB_ACTIVE) == 0)
+ {
+ scb = NULL;
+ }
+ }
+ else
+ {
+ scb = NULL;
+ }
+
+
+ if ((status & SCSIRSTI) != 0)
+ {
+ int channel;
+
+ if ( (p->chip & AHC_CHIPID_MASK) == AHC_AIC7770 )
+ channel = (aic_inb(p, SBLKCTL) & SELBUSB) >> 3;
+ else
+ channel = 0;
+
+ if (aic7xxx_verbose & VERBOSE_RESET)
+ printk(WARN_LEAD "Someone else reset the channel!!\n",
+ p->host_no, channel, -1, -1);
+ /*
+ * Go through and abort all commands for the channel, but do not
+ * reset the channel again.
+ */
+ aic7xxx_reset_channel(p, channel, /* Initiate Reset */ FALSE);
+ aic7xxx_run_done_queue(p, FALSE);
+ scb = NULL;
+ }
+ else if ( ((status & BUSFREE) != 0) && ((status & SELTO) == 0) )
+ {
+ /*
+ * First look at what phase we were last in. If it's message-out,
+ * chances are pretty good that the bus free was in response to
+ * one of our abort requests.
+ */
+ unsigned char lastphase = aic_inb(p, LASTPHASE);
+ unsigned char saved_tcl = aic_inb(p, SAVED_TCL);
+ unsigned char target = (saved_tcl >> 4) & 0x0F;
+ int channel;
+ int printerror = TRUE;
+
+ if ( (p->chip & AHC_CHIPID_MASK) == AHC_AIC7770 )
+ channel = (aic_inb(p, SBLKCTL) & SELBUSB) >> 3;
+ else
+ channel = 0;
+
+ aic_outb(p, aic_inb(p, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP),
+ SCSISEQ);
+ if (lastphase == P_MESGOUT)
+ {
+ unsigned char message;
+
+ message = aic_inb(p, SINDEX);
+
+ if ((message == MSG_ABORT) || (message == MSG_ABORT_TAG))
+ {
+ if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS)
+ printk(INFO_LEAD "SCB %d abort delivered.\n", p->host_no,
+ CTL_OF_SCB(scb), scb->hscb->tag);
+ aic7xxx_reset_device(p, target, channel, ALL_LUNS,
+ (message == MSG_ABORT) ? SCB_LIST_NULL : scb->hscb->tag );
+ aic7xxx_run_done_queue(p, FALSE);
+ scb = NULL;
+ printerror = 0;
+ }
+ else if (message == MSG_BUS_DEV_RESET)
+ {
+ aic7xxx_handle_device_reset(p, target, channel);
+ scb = NULL;
+ printerror = 0;
+ }
+ }
+ if (printerror != 0)
+ {
+ if (scb != NULL)
+ {
+ unsigned char tag;
+
+ if ((scb->hscb->control & TAG_ENB) != 0)
+ {
+ tag = scb->hscb->tag;
+ }
+ else
+ {
+ tag = SCB_LIST_NULL;
+ }
+ aic7xxx_reset_device(p, target, channel, ALL_LUNS, tag);
+ aic7xxx_run_done_queue(p, FALSE);
+ }
+ printk(INFO_LEAD "Unexpected busfree, LASTPHASE = 0x%x, "
+ "SEQADDR = 0x%x\n", p->host_no, channel, target, -1, lastphase,
+ (aic_inb(p, SEQADDR1) << 8) | aic_inb(p, SEQADDR0));
+ scb = NULL;
+ }
+ aic_outb(p, MSG_NOOP, MSG_OUT);
+ aic_outb(p, aic_inb(p, SIMODE1) & ~(ENBUSFREE|ENREQINIT),
+ SIMODE1);
+ p->flags &= ~AHC_HANDLING_REQINITS;
+ aic_outb(p, CLRBUSFREE, CLRSINT1);
+ aic_outb(p, CLRSCSIINT, CLRINT);
+ restart_sequencer(p);
+ unpause_sequencer(p, TRUE);
+ }
+ else if ((status & SELTO) != 0)
+ {
+ unsigned char scbptr;
+ unsigned char nextscb;
+ Scsi_Cmnd *cmd;
+
+ scbptr = aic_inb(p, WAITING_SCBH);
+ if (scbptr > p->scb_data->maxhscbs)
+ {
+ /*
+ * I'm still trying to track down exactly how this happens, but until
+ * I find it, this code will make sure we aren't passing bogus values
+ * into the SCBPTR register, even if that register will just wrap
+ * things around, we still don't like having out of range variables.
+ *
+ * NOTE: Don't check the aic7xxx_verbose variable, I want this message
+ * to always be displayed.
+ */
+ printk(INFO_LEAD "Invalid WAITING_SCBH value %d, improvising.\n",
+ p->host_no, -1, -1, -1, scbptr);
+ if (p->scb_data->maxhscbs > 4)
+ scbptr &= (p->scb_data->maxhscbs - 1);
+ else
+ scbptr &= 0x03;
+ }
+ aic_outb(p, scbptr, SCBPTR);
+ scb_index = aic_inb(p, SCB_TAG);
+
+ scb = NULL;
+ if (scb_index < p->scb_data->numscbs)
+ {
+ scb = p->scb_data->scb_array[scb_index];
+ if ((scb->flags & SCB_ACTIVE) == 0)
+ {
+ scb = NULL;
+ }
+ }
+ if (scb == NULL)
+ {
+ printk(WARN_LEAD "Referenced SCB %d not valid during SELTO.\n",
+ p->host_no, -1, -1, -1, scb_index);
+ printk(KERN_WARNING " SCSISEQ = 0x%x SEQADDR = 0x%x SSTAT0 = 0x%x "
+ "SSTAT1 = 0x%x\n", aic_inb(p, SCSISEQ),
+ aic_inb(p, SEQADDR0) | (aic_inb(p, SEQADDR1) << 8),
+ aic_inb(p, SSTAT0), aic_inb(p, SSTAT1));
+ if (aic7xxx_panic_on_abort)
+ aic7xxx_panic_abort(p, NULL);
+ }
+ else
+ {
+ cmd = scb->cmd;
+ cmd->result = (DID_TIME_OUT << 16);
+
+ /*
+ * Clear out this hardware SCB
+ */
+ aic_outb(p, 0, SCB_CONTROL);
+
+ /*
+ * Clear out a few values in the card that are in an undetermined
+ * state.
+ */
+ aic_outb(p, MSG_NOOP, MSG_OUT);
+
+ /*
+ * Shift the waiting for selection queue forward
+ */
+ nextscb = aic_inb(p, SCB_NEXT);
+ aic_outb(p, nextscb, WAITING_SCBH);
+
+ /*
+ * Put this SCB back on the free list.
+ */
+ aic7xxx_add_curscb_to_free_list(p);
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if (aic7xxx_verbose > 0xffff)
+ printk(INFO_LEAD "Selection Timeout.\n", p->host_no, CTL_OF_SCB(scb));
+#endif
+ if (scb->flags & SCB_QUEUED_ABORT)
+ {
+ /*
+ * We know that this particular SCB had to be the queued abort since
+ * the disconnected SCB would have gotten a reconnect instead.
+ * What we need to do then is to let the command timeout again so
+ * we get a reset since this abort just failed.
+ */
+ cmd->result = 0;
+ scb = NULL;
+ }
+ }
+ /*
+ * Restarting the sequencer will stop the selection and make sure devices
+ * are allowed to reselect in.
+ */
+ aic_outb(p, 0, SCSISEQ);
+ aic_outb(p, aic_inb(p, SIMODE1) & ~(ENREQINIT|ENBUSFREE), SIMODE1);
+ p->flags &= ~AHC_HANDLING_REQINITS;
+ aic_outb(p, CLRSELTIMEO | CLRBUSFREE, CLRSINT1);
+ aic_outb(p, CLRSCSIINT, CLRINT);
+ restart_sequencer(p);
+ unpause_sequencer(p, TRUE);
+ }
+ else if (scb == NULL)
+ {
+ printk(WARN_LEAD "aic7xxx_isr - referenced scb not valid "
+ "during scsiint 0x%x scb(%d)\n"
+ " SIMODE0 0x%x, SIMODE1 0x%x, SSTAT0 0x%x, SEQADDR 0x%x\n",
+ p->host_no, -1, -1, -1, status, scb_index, aic_inb(p, SIMODE0),
+ aic_inb(p, SIMODE1), aic_inb(p, SSTAT0),
+ (aic_inb(p, SEQADDR1) << 8) | aic_inb(p, SEQADDR0));
+ /*
+ * Turn off the interrupt and set status to zero, so that it
+ * falls through the rest of the SCSIINT code.
+ */
+ aic_outb(p, status, CLRSINT1);
+ aic_outb(p, CLRSCSIINT, CLRINT);
+ unpause_sequencer(p, /* unpause always */ TRUE);
+ scb = NULL;
+ }
+ else if (status & SCSIPERR)
+ {
+ /*
+ * Determine the bus phase and queue an appropriate message.
+ */
+ char *phase;
+ Scsi_Cmnd *cmd;
+ unsigned char mesg_out = MSG_NOOP;
+ unsigned char lastphase = aic_inb(p, LASTPHASE);
+
+ cmd = scb->cmd;
+ switch (lastphase)
+ {
+ case P_DATAOUT:
+ phase = "Data-Out";
+ break;
+ case P_DATAIN:
+ phase = "Data-In";
+ mesg_out = MSG_INITIATOR_DET_ERR;
+ break;
+ case P_COMMAND:
+ phase = "Command";
+ break;
+ case P_MESGOUT:
+ phase = "Message-Out";
+ break;
+ case P_STATUS:
+ phase = "Status";
+ mesg_out = MSG_INITIATOR_DET_ERR;
+ break;
+ case P_MESGIN:
+ phase = "Message-In";
+ mesg_out = MSG_PARITY_ERROR;
+ break;
+ default:
+ phase = "unknown";
+ break;
+ }
+
+ /*
+ * A parity error has occurred during a data
+ * transfer phase. Flag it and continue.
+ */
+ printk(WARN_LEAD "Parity error during %s phase.\n",
+ p->host_no, CTL_OF_SCB(scb), phase);
+
+ /*
+ * We've set the hardware to assert ATN if we get a parity
+ * error on "in" phases, so all we need to do is stuff the
+ * message buffer with the appropriate message. "In" phases
+ * have set mesg_out to something other than MSG_NOP.
+ */
+ if (mesg_out != MSG_NOOP)
+ {
+ aic_outb(p, mesg_out, MSG_OUT);
+ scb = NULL;
+ }
+ aic_outb(p, CLRSCSIPERR, CLRSINT1);
+ aic_outb(p, CLRSCSIINT, CLRINT);
+ unpause_sequencer(p, /* unpause_always */ TRUE);
+ }
+ else if ( (status & REQINIT) &&
+ (p->flags & AHC_HANDLING_REQINITS) )
+ {
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if (aic7xxx_verbose > 0xffff)
+ printk(INFO_LEAD "Handling REQINIT, SSTAT1=0x%x.\n", p->host_no,
+ CTL_OF_SCB(scb), aic_inb(p, SSTAT1));
+#endif
+ aic7xxx_handle_reqinit(p, scb);
+ return;
+ }
+ else
+ {
+ /*
+ * We don't know what's going on. Turn off the
+ * interrupt source and try to continue.
+ */
+ if (aic7xxx_verbose & VERBOSE_SCSIINT)
+ printk(INFO_LEAD "Unknown SCSIINT status, SSTAT1(0x%x).\n",
+ p->host_no, -1, -1, -1, status);
+ aic_outb(p, status, CLRSINT1);
+ aic_outb(p, CLRSCSIINT, CLRINT);
+ unpause_sequencer(p, /* unpause always */ TRUE);
+ scb = NULL;
+ }
+ if (scb != NULL)
+ {
+ aic7xxx_done(p, scb);
+ }
+}
+
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+static void
+aic7xxx_check_scbs(struct aic7xxx_host *p, char *buffer)
+{
+ unsigned char saved_scbptr, free_scbh, dis_scbh, wait_scbh, temp;
+ int i, bogus, lost;
+ static unsigned char scb_status[AIC7XXX_MAXSCB];
+
+#define SCB_NO_LIST 0
+#define SCB_FREE_LIST 1
+#define SCB_WAITING_LIST 2
+#define SCB_DISCONNECTED_LIST 4
+#define SCB_CURRENTLY_ACTIVE 8
+
+ /*
+ * Note, these checks will fail on a regular basis once the machine moves
+ * beyond the bus scan phase. The problem is race conditions concerning
+ * the scbs and where they are linked in. When you have 30 or so commands
+ * outstanding on the bus, and run this twice with every interrupt, the
+ * chances get pretty good that you'll catch the sequencer with an SCB
+ * only partially linked in. Therefore, once we pass the scan phase
+ * of the bus, we really should disable this function.
+ */
+ bogus = FALSE;
+ memset(&scb_status[0], 0, sizeof(scb_status));
+ pause_sequencer(p);
+ saved_scbptr = aic_inb(p, SCBPTR);
+ if (saved_scbptr >= p->scb_data->maxhscbs)
+ {
+ printk("Bogus SCBPTR %d\n", saved_scbptr);
+ bogus = TRUE;
+ }
+ scb_status[saved_scbptr] = SCB_CURRENTLY_ACTIVE;
+ free_scbh = aic_inb(p, FREE_SCBH);
+ if ( (free_scbh != SCB_LIST_NULL) &&
+ (free_scbh >= p->scb_data->maxhscbs) )
+ {
+ printk("Bogus FREE_SCBH %d\n", free_scbh);
+ bogus = TRUE;
+ }
+ else
+ {
+ temp = free_scbh;
+ while( (temp != SCB_LIST_NULL) && (temp < p->scb_data->maxhscbs) )
+ {
+ if(scb_status[temp] & 0x07)
+ {
+ printk("HSCB %d on multiple lists, status 0x%02x", temp,
+ scb_status[temp] | SCB_FREE_LIST);
+ bogus = TRUE;
+ }
+ scb_status[temp] |= SCB_FREE_LIST;
+ aic_outb(p, temp, SCBPTR);
+ temp = aic_inb(p, SCB_NEXT);
+ }
+ }
+
+ dis_scbh = aic_inb(p, DISCONNECTED_SCBH);
+ if ( (dis_scbh != SCB_LIST_NULL) &&
+ (dis_scbh >= p->scb_data->maxhscbs) )
+ {
+ printk("Bogus DISCONNECTED_SCBH %d\n", dis_scbh);
+ bogus = TRUE;
+ }
+ else
+ {
+ temp = dis_scbh;
+ while( (temp != SCB_LIST_NULL) && (temp < p->scb_data->maxhscbs) )
+ {
+ if(scb_status[temp] & 0x07)
+ {
+ printk("HSCB %d on multiple lists, status 0x%02x", temp,
+ scb_status[temp] | SCB_DISCONNECTED_LIST);
+ bogus = TRUE;
+ }
+ scb_status[temp] |= SCB_DISCONNECTED_LIST;
+ aic_outb(p, temp, SCBPTR);
+ temp = aic_inb(p, SCB_NEXT);
+ }
+ }
+
+ wait_scbh = aic_inb(p, WAITING_SCBH);
+ if ( (wait_scbh != SCB_LIST_NULL) &&
+ (wait_scbh >= p->scb_data->maxhscbs) )
+ {
+ printk("Bogus WAITING_SCBH %d\n", wait_scbh);
+ bogus = TRUE;
+ }
+ else
+ {
+ temp = wait_scbh;
+ while( (temp != SCB_LIST_NULL) && (temp < p->scb_data->maxhscbs) )
+ {
+ if(scb_status[temp] & 0x07)
+ {
+ printk("HSCB %d on multiple lists, status 0x%02x", temp,
+ scb_status[temp] | SCB_WAITING_LIST);
+ bogus = TRUE;
+ }
+ scb_status[temp] |= SCB_WAITING_LIST;
+ aic_outb(p, temp, SCBPTR);
+ temp = aic_inb(p, SCB_NEXT);
+ }
+ }
+
+ lost=0;
+ for(i=0; i < p->scb_data->maxhscbs; i++)
+ {
+ aic_outb(p, i, SCBPTR);
+ temp = aic_inb(p, SCB_NEXT);
+ if ( ((temp != SCB_LIST_NULL) &&
+ (temp >= p->scb_data->maxhscbs)) )
+ {
+ printk("HSCB %d bad, SCB_NEXT invalid(%d).\n", i, temp);
+ bogus = TRUE;
+ }
+ if ( temp == i )
+ {
+ printk("HSCB %d bad, SCB_NEXT points to self.\n", i);
+ bogus = TRUE;
+ }
+ temp = aic_inb(p, SCB_PREV);
+ if ((temp != SCB_LIST_NULL) &&
+ (temp >= p->scb_data->maxhscbs))
+ {
+ printk("HSCB %d bad, SCB_PREV invalid(%d).\n", i, temp);
+ bogus = TRUE;
+ }
+ if (scb_status[i] == 0)
+ lost++;
+ if (lost > 1)
+ {
+ printk("Too many lost scbs.\n");
+ bogus=TRUE;
+ }
+ }
+ aic_outb(p, saved_scbptr, SCBPTR);
+ unpause_sequencer(p, FALSE);
+ if (bogus)
+ {
+ printk("Bogus parameters found in card SCB array structures.\n");
+ printk("%s\n", buffer);
+ aic7xxx_panic_abort(p, NULL);
+ }
+ return;
+}
+#endif
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_isr
+ *
+ * Description:
+ * SCSI controller interrupt handler.
+ *-F*************************************************************************/
+static void
+aic7xxx_isr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct aic7xxx_host *p;
+ unsigned char intstat;
+
+ p = (struct aic7xxx_host *)dev_id;
+
+ /*
+ * Just a few sanity checks. Make sure that we have an int pending.
+ * Also, if PCI, then we are going to check for a PCI bus error status
+ * should we get too many spurious interrupts.
+ */
+ if (!((intstat = aic_inb(p, INTSTAT)) & INT_PEND))
+ {
+#ifdef CONFIG_PCI
+ if ( (p->chip & AHC_PCI) && (p->spurious_int > 500) &&
+ !(p->flags & AHC_HANDLING_REQINITS) )
+ {
+ if ( aic_inb(p, ERROR) & PCIERRSTAT )
+ {
+ aic7xxx_pci_intr(p);
+ }
+ p->spurious_int = 0;
+ }
+ else if ( !(p->flags & AHC_HANDLING_REQINITS) )
+ {
+ p->spurious_int++;
+ }
+#endif
+ return;
+ }
+
+ p->spurious_int = 0;
+
+ /*
+ * Keep track of interrupts for /proc/scsi
+ */
+ p->isr_count++;
+
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if ( (p->isr_count < 16) && (aic7xxx_verbose > 0xffff) &&
+ (aic7xxx_panic_on_abort) && (p->flags & AHC_PAGESCBS) )
+ aic7xxx_check_scbs(p, "Bogus settings at start of interrupt.");
+#endif
+
+ /*
+ * Handle all the interrupt sources - especially for SCSI
+ * interrupts, we won't get a second chance at them.
+ */
+ if (intstat & CMDCMPLT)
+ {
+ struct aic7xxx_scb *scb = NULL;
+ Scsi_Cmnd *cmd;
+ unsigned char scb_index;
+
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if(aic7xxx_verbose > 0xffff)
+ printk(INFO_LEAD "Command Complete Int.\n", p->host_no, -1, -1, -1);
+#endif
+
+ /*
+ * Clear interrupt status before running the completion loop.
+ * This eliminates a race condition whereby a command could
+ * complete between the last check of qoutfifo and the
+ * CLRCMDINT statement. This would result in us thinking the
+ * qoutfifo was empty when it wasn't, and in actuality be a lost
+ * completion interrupt. With multiple devices or tagged queueing
+ * this could be very bad if we caught all but the last completion
+ * and no more are imediately sent.
+ */
+ aic_outb(p, CLRCMDINT, CLRINT);
+ /*
+ * The sequencer will continue running when it
+ * issues this interrupt. There may be >1 commands
+ * finished, so loop until we've processed them all.
+ */
+
+ while (p->qoutfifo[p->qoutfifonext] != SCB_LIST_NULL)
+ {
+ scb_index = p->qoutfifo[p->qoutfifonext];
+ p->qoutfifo[p->qoutfifonext++] = SCB_LIST_NULL;
+ if ( scb_index >= p->scb_data->numscbs )
+ scb = NULL;
+ else
+ scb = p->scb_data->scb_array[scb_index];
+ if (scb == NULL)
+ {
+ printk(WARN_LEAD "CMDCMPLT with invalid SCB index %d\n", p->host_no,
+ -1, -1, -1, scb_index);
+ continue;
+ }
+ else if (!(scb->flags & SCB_ACTIVE) || (scb->cmd == NULL))
+ {
+ printk(WARN_LEAD "CMDCMPLT without command for SCB %d, SCB flags "
+ "0x%x, cmd 0x%lx\n", p->host_no, -1, -1, -1, scb_index, scb->flags,
+ (unsigned long) scb->cmd);
+ continue;
+ }
+ else if (scb->flags & SCB_QUEUED_ABORT)
+ {
+ pause_sequencer(p);
+ if ( ((aic_inb(p, LASTPHASE) & PHASE_MASK) != P_BUSFREE) &&
+ (aic_inb(p, SCB_TAG) == scb->hscb->tag) )
+ {
+ unpause_sequencer(p, FALSE);
+ continue;
+ }
+ aic7xxx_reset_device(p, scb->cmd->target, scb->cmd->channel,
+ scb->cmd->lun, scb->hscb->tag);
+ scb->flags &= ~(SCB_QUEUED_FOR_DONE | SCB_RESET | SCB_ABORT |
+ SCB_QUEUED_ABORT);
+ unpause_sequencer(p, FALSE);
+ }
+ else if (scb->flags & SCB_ABORT)
+ {
+ /*
+ * We started to abort this, but it completed on us, let it
+ * through as successful
+ */
+ scb->flags &= ~(SCB_ABORT|SCB_RESET);
+ }
+ switch (status_byte(scb->hscb->target_status))
+ {
+ case QUEUE_FULL:
+ case BUSY:
+ scb->hscb->target_status = 0;
+ scb->cmd->result = 0;
+ aic7xxx_error(scb->cmd) = DID_OK;
+ break;
+ default:
+ cmd = scb->cmd;
+ if (scb->hscb->residual_SG_segment_count != 0)
+ {
+ aic7xxx_calculate_residual(p, scb);
+ }
+ cmd->result |= (aic7xxx_error(cmd) << 16);
+ aic7xxx_done(p, scb);
+ break;
+ }
+ }
+ }
+
+ if (intstat & BRKADRINT)
+ {
+ int i;
+ unsigned char errno = aic_inb(p, ERROR);
+
+ printk(KERN_ERR "(scsi%d) BRKADRINT error(0x%x):\n", p->host_no, errno);
+ for (i = 0; i < NUMBER(hard_error); i++)
+ {
+ if (errno & hard_error[i].errno)
+ {
+ printk(KERN_ERR " %s\n", hard_error[i].errmesg);
+ }
+ }
+ printk(KERN_ERR "(scsi%d) SEQADDR=0x%x\n", p->host_no,
+ (((aic_inb(p, SEQADDR1) << 8) & 0x100) | aic_inb(p, SEQADDR0)));
+ if (aic7xxx_panic_on_abort)
+ aic7xxx_panic_abort(p, NULL);
+#ifdef CONFIG_PCI
+ if (errno & PCIERRSTAT)
+ aic7xxx_pci_intr(p);
+#endif
+ if (errno & (SQPARERR | ILLOPCODE | ILLSADDR))
+ {
+ sti();
+ panic("aic7xxx: unrecoverable BRKADRINT.\n");
+ }
+ if (errno & ILLHADDR)
+ {
+ printk(KERN_ERR "(scsi%d) BUG! Driver accessed chip without first "
+ "pausing controller!\n", p->host_no);
+ }
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if (errno & DPARERR)
+ {
+ if (aic_inb(p, DMAPARAMS) & DIRECTION)
+ printk("(scsi%d) while DMAing SCB from host to card.\n", p->host_no);
+ else
+ printk("(scsi%d) while DMAing SCB from card to host.\n", p->host_no);
+ }
+#endif
+ aic_outb(p, CLRPARERR | CLRBRKADRINT, CLRINT);
+ unpause_sequencer(p, FALSE);
+ }
+
+ if (intstat & SEQINT)
+ {
+ aic7xxx_handle_seqint(p, intstat);
+ }
+
+ if (intstat & SCSIINT)
+ {
+ aic7xxx_handle_scsiint(p, intstat);
+ }
+
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if ( (p->isr_count < 16) && (aic7xxx_verbose > 0xffff) &&
+ (aic7xxx_panic_on_abort) && (p->flags & AHC_PAGESCBS) )
+ aic7xxx_check_scbs(p, "Bogus settings at end of interrupt.");
+#endif
+
+}
+
+/*+F*************************************************************************
+ * Function:
+ * do_aic7xxx_isr
+ *
+ * Description:
+ * This is a gross hack to solve a problem in linux kernels 2.1.85 and
+ * above. Please, children, do not try this at home, and if you ever see
+ * anything like it, please inform the Gross Hack Police immediately
+ *-F*************************************************************************/
+static void
+do_aic7xxx_isr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ unsigned long cpu_flags;
+ struct aic7xxx_host *p;
+
+ p = (struct aic7xxx_host *)dev_id;
+ if(!p)
+ return;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,1,95)
+ spin_lock_irqsave(&io_request_lock, cpu_flags);
+ if(test_and_set_bit(AHC_IN_ISR_BIT, &p->flags))
+ {
+ return;
+ }
+ do
+ {
+ aic7xxx_isr(irq, dev_id, regs);
+ } while ( (aic_inb(p, INTSTAT) & INT_PEND) );
+ aic7xxx_done_cmds_complete(p);
+ aic7xxx_run_waiting_queues(p);
+ clear_bit(AHC_IN_ISR_BIT, &p->flags);
+ spin_unlock_irqrestore(&io_request_lock, cpu_flags);
+#else
+ if(set_bit(AHC_IN_ISR_BIT, (int *)&p->flags))
+ {
+ return;
+ }
+ DRIVER_LOCK
+ do
+ {
+ aic7xxx_isr(irq, dev_id, regs);
+ } while ( (aic_inb(p, INTSTAT) & INT_PEND) );
+ DRIVER_UNLOCK
+ aic7xxx_done_cmds_complete(p);
+ aic7xxx_run_waiting_queues(p);
+ clear_bit(AHC_IN_ISR_BIT, (int *)&p->flags);
+#endif
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_device_queue_depth
+ *
+ * Description:
+ * Determines the queue depth for a given device. There are two ways
+ * a queue depth can be obtained for a tagged queueing device. One
+ * way is the default queue depth which is determined by whether
+ * AIC7XXX_CMDS_PER_DEVICE is defined. If it is defined, then it is used
+ * as the default queue depth. Otherwise, we use either 4 or 8 as the
+ * default queue depth (dependent on the number of hardware SCBs).
+ * The other way we determine queue depth is through the use of the
+ * aic7xxx_tag_info array which is enabled by defining
+ * AIC7XXX_TAGGED_QUEUEING_BY_DEVICE. This array can be initialized
+ * with queue depths for individual devices. It also allows tagged
+ * queueing to be [en|dis]abled for a specific adapter.
+ *-F*************************************************************************/
+static void
+aic7xxx_device_queue_depth(struct aic7xxx_host *p, Scsi_Device *device)
+{
+ int default_depth = 3;
+ unsigned char tindex;
+ unsigned short target_mask;
+
+ tindex = device->id | (device->channel << 3);
+ target_mask = (1 << tindex);
+
+ device->queue_depth = default_depth;
+ p->dev_temp_queue_depth[tindex] = 1;
+ p->dev_max_queue_depth[tindex] = 1;
+ p->tagenable &= ~target_mask;
+
+ if (device->tagged_supported)
+ {
+ int tag_enabled = TRUE;
+
+ default_depth = AIC7XXX_CMDS_PER_DEVICE;
+
+ if (!(p->discenable & target_mask))
+ {
+ if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
+ printk(INFO_LEAD "Disconnection disabled, unable to "
+ "enable tagged queueing.\n",
+ p->host_no, device->channel, device->id, device->lun);
+ }
+ else
+ {
+ if (p->instance >= NUMBER(aic7xxx_tag_info))
+ {
+ static int print_warning = TRUE;
+ if(print_warning)
+ {
+ printk(KERN_INFO "aic7xxx: WARNING, insufficient tag_info instances for"
+ " installed controllers.\n");
+ printk(KERN_INFO "aic7xxx: Please update the aic7xxx_tag_info array in"
+ " the aic7xxx.c source file.\n");
+ print_warning = FALSE;
+ }
+ device->queue_depth = default_depth;
+ }
+ else
+ {
+
+ if (aic7xxx_tag_info[p->instance].tag_commands[tindex] == 255)
+ {
+ tag_enabled = FALSE;
+ device->queue_depth = 3; /* Tagged queueing is disabled. */
+ }
+ else if (aic7xxx_tag_info[p->instance].tag_commands[tindex] == 0)
+ {
+ device->queue_depth = default_depth;
+ }
+ else
+ {
+ device->queue_depth =
+ aic7xxx_tag_info[p->instance].tag_commands[tindex];
+ }
+ }
+ if ((device->tagged_queue == 0) && tag_enabled)
+ {
+ if (aic7xxx_verbose & VERBOSE_NEGOTIATION2)
+ {
+ printk(INFO_LEAD "Enabled tagged queuing, queue depth %d.\n",
+ p->host_no, device->channel, device->id,
+ device->lun, device->queue_depth);
+ }
+ p->dev_max_queue_depth[tindex] = device->queue_depth;
+ p->dev_temp_queue_depth[tindex] = device->queue_depth;
+ p->tagenable |= target_mask;
+ p->orderedtag |= target_mask;
+ device->tagged_queue = 1;
+ device->current_tag = SCB_LIST_NULL;
+ }
+ }
+ }
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_select_queue_depth
+ *
+ * Description:
+ * Sets the queue depth for each SCSI device hanging off the input
+ * host adapter. We use a queue depth of 2 for devices that do not
+ * support tagged queueing. If AIC7XXX_CMDS_PER_LUN is defined, we
+ * use that for tagged queueing devices; otherwise we use our own
+ * algorithm for determining the queue depth based on the maximum
+ * SCBs for the controller.
+ *-F*************************************************************************/
+static void
+aic7xxx_select_queue_depth(struct Scsi_Host *host,
+ Scsi_Device *scsi_devs)
+{
+ Scsi_Device *device;
+ struct aic7xxx_host *p = (struct aic7xxx_host *) host->hostdata;
+ int scbnum;
+
+ scbnum = 0;
+ for (device = scsi_devs; device != NULL; device = device->next)
+ {
+ if (device->host == host)
+ {
+ aic7xxx_device_queue_depth(p, device);
+ scbnum += device->queue_depth;
+ }
+ }
+ while (scbnum > p->scb_data->numscbs)
+ {
+ /*
+ * Pre-allocate the needed SCBs to get around the possibility of having
+ * to allocate some when memory is more or less exhausted and we need
+ * the SCB in order to perform a swap operation (possible deadlock)
+ */
+ if ( aic7xxx_allocate_scb(p) == 0 )
+ return;
+ }
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_probe
+ *
+ * Description:
+ * Probing for EISA boards: it looks like the first two bytes
+ * are a manufacturer code - three characters, five bits each:
+ *
+ * BYTE 0 BYTE 1 BYTE 2 BYTE 3
+ * ?1111122 22233333 PPPPPPPP RRRRRRRR
+ *
+ * The characters are baselined off ASCII '@', so add that value
+ * to each to get the real ASCII code for it. The next two bytes
+ * appear to be a product and revision number, probably vendor-
+ * specific. This is what is being searched for at each port,
+ * and what should probably correspond to the ID= field in the
+ * ECU's .cfg file for the card - if your card is not detected,
+ * make sure your signature is listed in the array.
+ *
+ * The fourth byte's lowest bit seems to be an enabled/disabled
+ * flag (rest of the bits are reserved?).
+ *
+ * NOTE: This function is only needed on Intel and Alpha platforms,
+ * the other platforms we support don't have EISA/VLB busses. So,
+ * we #ifdef this entire function to avoid compiler warnings about
+ * an unused function.
+ *-F*************************************************************************/
+#if defined(__i386__) || defined(__alpha__)
+static int
+aic7xxx_probe(int slot, int base, ahc_flag_type *flags)
+{
+ int i;
+ unsigned char buf[4];
+
+ static struct {
+ int n;
+ unsigned char signature[sizeof(buf)];
+ ahc_chip type;
+ int bios_disabled;
+ } AIC7xxx[] = {
+ { 4, { 0x04, 0x90, 0x77, 0x70 },
+ AHC_AIC7770|AHC_EISA, FALSE }, /* mb 7770 */
+ { 4, { 0x04, 0x90, 0x77, 0x71 },
+ AHC_AIC7770|AHC_EISA, FALSE }, /* host adapter 274x */
+ { 4, { 0x04, 0x90, 0x77, 0x56 },
+ AHC_AIC7770|AHC_VL, FALSE }, /* 284x BIOS enabled */
+ { 4, { 0x04, 0x90, 0x77, 0x57 },
+ AHC_AIC7770|AHC_VL, TRUE } /* 284x BIOS disabled */
+ };
+
+ /*
+ * The VL-bus cards need to be primed by
+ * writing before a signature check.
+ */
+ for (i = 0; i < sizeof(buf); i++)
+ {
+ outb(0x80 + i, base);
+ buf[i] = inb(base + i);
+ }
+
+ for (i = 0; i < NUMBER(AIC7xxx); i++)
+ {
+ /*
+ * Signature match on enabled card?
+ */
+ if (!memcmp(buf, AIC7xxx[i].signature, AIC7xxx[i].n))
+ {
+ if (inb(base + 4) & 1)
+ {
+ if (AIC7xxx[i].bios_disabled)
+ {
+ *flags |= AHC_USEDEFAULTS;
+ }
+ else
+ {
+ *flags |= AHC_BIOS_ENABLED;
+ }
+ return (i);
+ }
+
+ printk("aic7xxx: <Adaptec 7770 SCSI Host Adapter> "
+ "disabled at slot %d, ignored.\n", slot);
+ }
+ }
+
+ return (-1);
+}
+#endif /* (__i386__) || (__alpha__) */
+
+
+/*+F*************************************************************************
+ * Function:
+ * read_2840_seeprom
+ *
+ * Description:
+ * Reads the 2840 serial EEPROM and returns 1 if successful and 0 if
+ * not successful.
+ *
+ * See read_seeprom (for the 2940) for the instruction set of the 93C46
+ * chip.
+ *
+ * The 2840 interface to the 93C46 serial EEPROM is through the
+ * STATUS_2840 and SEECTL_2840 registers. The CS_2840, CK_2840, and
+ * DO_2840 bits of the SEECTL_2840 register are connected to the chip
+ * select, clock, and data out lines respectively of the serial EEPROM.
+ * The DI_2840 bit of the STATUS_2840 is connected to the data in line
+ * of the serial EEPROM. The EEPROM_TF bit of STATUS_2840 register is
+ * useful in that it gives us an 800 nsec timer. After a read from the
+ * SEECTL_2840 register the timing flag is cleared and goes high 800 nsec
+ * later.
+ *-F*************************************************************************/
+static int
+read_284x_seeprom(struct aic7xxx_host *p, struct seeprom_config *sc)
+{
+ int i = 0, k = 0;
+ unsigned char temp;
+ unsigned short checksum = 0;
+ unsigned short *seeprom = (unsigned short *) sc;
+ struct seeprom_cmd {
+ unsigned char len;
+ unsigned char bits[3];
+ };
+ struct seeprom_cmd seeprom_read = {3, {1, 1, 0}};
+
+#define CLOCK_PULSE(p) \
+ while ((aic_inb(p, STATUS_2840) & EEPROM_TF) == 0) \
+ { \
+ ; /* Do nothing */ \
+ } \
+ (void) aic_inb(p, SEECTL_2840);
+
+ /*
+ * Read the first 32 registers of the seeprom. For the 2840,
+ * the 93C46 SEEPROM is a 1024-bit device with 64 16-bit registers
+ * but only the first 32 are used by Adaptec BIOS. The loop
+ * will range from 0 to 31.
+ */
+ for (k = 0; k < (sizeof(*sc) / 2); k++)
+ {
+ /*
+ * Send chip select for one clock cycle.
+ */
+ aic_outb(p, CK_2840 | CS_2840, SEECTL_2840);
+ CLOCK_PULSE(p);
+
+ /*
+ * Now we're ready to send the read command followed by the
+ * address of the 16-bit register we want to read.
+ */
+ for (i = 0; i < seeprom_read.len; i++)
+ {
+ temp = CS_2840 | seeprom_read.bits[i];
+ aic_outb(p, temp, SEECTL_2840);
+ CLOCK_PULSE(p);
+ temp = temp ^ CK_2840;
+ aic_outb(p, temp, SEECTL_2840);
+ CLOCK_PULSE(p);
+ }
+ /*
+ * Send the 6 bit address (MSB first, LSB last).
+ */
+ for (i = 5; i >= 0; i--)
+ {
+ temp = k;
+ temp = (temp >> i) & 1; /* Mask out all but lower bit. */
+ temp = CS_2840 | temp;
+ aic_outb(p, temp, SEECTL_2840);
+ CLOCK_PULSE(p);
+ temp = temp ^ CK_2840;
+ aic_outb(p, temp, SEECTL_2840);
+ CLOCK_PULSE(p);
+ }
+
+ /*
+ * Now read the 16 bit register. An initial 0 precedes the
+ * register contents which begins with bit 15 (MSB) and ends
+ * with bit 0 (LSB). The initial 0 will be shifted off the
+ * top of our word as we let the loop run from 0 to 16.
+ */
+ for (i = 0; i <= 16; i++)
+ {
+ temp = CS_2840;
+ aic_outb(p, temp, SEECTL_2840);
+ CLOCK_PULSE(p);
+ temp = temp ^ CK_2840;
+ seeprom[k] = (seeprom[k] << 1) | (aic_inb(p, STATUS_2840) & DI_2840);
+ aic_outb(p, temp, SEECTL_2840);
+ CLOCK_PULSE(p);
+ }
+ /*
+ * The serial EEPROM has a checksum in the last word. Keep a
+ * running checksum for all words read except for the last
+ * word. We'll verify the checksum after all words have been
+ * read.
+ */
+ if (k < (sizeof(*sc) / 2) - 1)
+ {
+ checksum = checksum + seeprom[k];
+ }
+
+ /*
+ * Reset the chip select for the next command cycle.
+ */
+ aic_outb(p, 0, SEECTL_2840);
+ CLOCK_PULSE(p);
+ aic_outb(p, CK_2840, SEECTL_2840);
+ CLOCK_PULSE(p);
+ aic_outb(p, 0, SEECTL_2840);
+ CLOCK_PULSE(p);
+ }
+
+#if 0
+ printk("Computed checksum 0x%x, checksum read 0x%x\n", checksum, sc->checksum);
+ printk("Serial EEPROM:");
+ for (k = 0; k < (sizeof(*sc) / 2); k++)
+ {
+ if (((k % 8) == 0) && (k != 0))
+ {
+ printk("\n ");
+ }
+ printk(" 0x%x", seeprom[k]);
+ }
+ printk("\n");
+#endif
+
+ if (checksum != sc->checksum)
+ {
+ printk("aic7xxx: SEEPROM checksum error, ignoring SEEPROM settings.\n");
+ return (0);
+ }
+
+ return (1);
+#undef CLOCK_PULSE
+}
+
+/*+F*************************************************************************
+ * Function:
+ * acquire_seeprom
+ *
+ * Description:
+ * Acquires access to the memory port on PCI controllers.
+ *-F*************************************************************************/
+static int
+acquire_seeprom(struct aic7xxx_host *p)
+{
+ int wait;
+
+ /*
+ * Request access of the memory port. When access is
+ * granted, SEERDY will go high. We use a 1 second
+ * timeout which should be near 1 second more than
+ * is needed. Reason: after the 7870 chip reset, there
+ * should be no contention.
+ */
+ aic_outb(p, SEEMS, SEECTL);
+ wait = 1000; /* 1000 msec = 1 second */
+ while ((wait > 0) && ((aic_inb(p, SEECTL) & SEERDY) == 0))
+ {
+ wait--;
+ mdelay(1); /* 1 msec */
+ }
+ if ((aic_inb(p, SEECTL) & SEERDY) == 0)
+ {
+ aic_outb(p, 0, SEECTL);
+ return (0);
+ }
+ return (1);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * release_seeprom
+ *
+ * Description:
+ * Releases access to the memory port on PCI controllers.
+ *-F*************************************************************************/
+static void
+release_seeprom(struct aic7xxx_host *p)
+{
+ aic_outb(p, 0, SEECTL);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * read_seeprom
+ *
+ * Description:
+ * Reads the serial EEPROM and returns 1 if successful and 0 if
+ * not successful.
+ *
+ * The instruction set of the 93C46/56/66 chips is as follows:
+ *
+ * Start OP
+ * Function Bit Code Address Data Description
+ * -------------------------------------------------------------------
+ * READ 1 10 A5 - A0 Reads data stored in memory,
+ * starting at specified address
+ * EWEN 1 00 11XXXX Write enable must precede
+ * all programming modes
+ * ERASE 1 11 A5 - A0 Erase register A5A4A3A2A1A0
+ * WRITE 1 01 A5 - A0 D15 - D0 Writes register
+ * ERAL 1 00 10XXXX Erase all registers
+ * WRAL 1 00 01XXXX D15 - D0 Writes to all registers
+ * EWDS 1 00 00XXXX Disables all programming
+ * instructions
+ * *Note: A value of X for address is a don't care condition.
+ * *Note: The 93C56 and 93C66 have 8 address bits.
+ *
+ *
+ * The 93C46 has a four wire interface: clock, chip select, data in, and
+ * data out. In order to perform one of the above functions, you need
+ * to enable the chip select for a clock period (typically a minimum of
+ * 1 usec, with the clock high and low a minimum of 750 and 250 nsec
+ * respectively. While the chip select remains high, you can clock in
+ * the instructions (above) starting with the start bit, followed by the
+ * OP code, Address, and Data (if needed). For the READ instruction, the
+ * requested 16-bit register contents is read from the data out line but
+ * is preceded by an initial zero (leading 0, followed by 16-bits, MSB
+ * first). The clock cycling from low to high initiates the next data
+ * bit to be sent from the chip.
+ *
+ * The 78xx interface to the 93C46 serial EEPROM is through the SEECTL
+ * register. After successful arbitration for the memory port, the
+ * SEECS bit of the SEECTL register is connected to the chip select.
+ * The SEECK, SEEDO, and SEEDI are connected to the clock, data out,
+ * and data in lines respectively. The SEERDY bit of SEECTL is useful
+ * in that it gives us an 800 nsec timer. After a write to the SEECTL
+ * register, the SEERDY goes high 800 nsec later. The one exception
+ * to this is when we first request access to the memory port. The
+ * SEERDY goes high to signify that access has been granted and, for
+ * this case, has no implied timing.
+ *-F*************************************************************************/
+static int
+read_seeprom(struct aic7xxx_host *p, int offset,
+ unsigned short *scarray, unsigned int len, seeprom_chip_type chip)
+{
+ int i = 0, k;
+ unsigned char temp;
+ unsigned short checksum = 0;
+ struct seeprom_cmd {
+ unsigned char len;
+ unsigned char bits[3];
+ };
+ struct seeprom_cmd seeprom_read = {3, {1, 1, 0}};
+
+#define CLOCK_PULSE(p) \
+ while ((aic_inb(p, SEECTL) & SEERDY) == 0) \
+ { \
+ ; /* Do nothing */ \
+ }
+
+ /*
+ * Request access of the memory port.
+ */
+ if (acquire_seeprom(p) == 0)
+ {
+ return (0);
+ }
+
+ /*
+ * Read 'len' registers of the seeprom. For the 7870, the 93C46
+ * SEEPROM is a 1024-bit device with 64 16-bit registers but only
+ * the first 32 are used by Adaptec BIOS. Some adapters use the
+ * 93C56 SEEPROM which is a 2048-bit device. The loop will range
+ * from 0 to 'len' - 1.
+ */
+ for (k = 0; k < len; k++)
+ {
+ /*
+ * Send chip select for one clock cycle.
+ */
+ aic_outb(p, SEEMS | SEECK | SEECS, SEECTL);
+ CLOCK_PULSE(p);
+
+ /*
+ * Now we're ready to send the read command followed by the
+ * address of the 16-bit register we want to read.
+ */
+ for (i = 0; i < seeprom_read.len; i++)
+ {
+ temp = SEEMS | SEECS | (seeprom_read.bits[i] << 1);
+ aic_outb(p, temp, SEECTL);
+ CLOCK_PULSE(p);
+ temp = temp ^ SEECK;
+ aic_outb(p, temp, SEECTL);
+ CLOCK_PULSE(p);
+ }
+ /*
+ * Send the 6 or 8 bit address (MSB first, LSB last).
+ */
+ for (i = ((int) chip - 1); i >= 0; i--)
+ {
+ temp = k + offset;
+ temp = (temp >> i) & 1; /* Mask out all but lower bit. */
+ temp = SEEMS | SEECS | (temp << 1);
+ aic_outb(p, temp, SEECTL);
+ CLOCK_PULSE(p);
+ temp = temp ^ SEECK;
+ aic_outb(p, temp, SEECTL);
+ CLOCK_PULSE(p);
+ }
+
+ /*
+ * Now read the 16 bit register. An initial 0 precedes the
+ * register contents which begins with bit 15 (MSB) and ends
+ * with bit 0 (LSB). The initial 0 will be shifted off the
+ * top of our word as we let the loop run from 0 to 16.
+ */
+ for (i = 0; i <= 16; i++)
+ {
+ temp = SEEMS | SEECS;
+ aic_outb(p, temp, SEECTL);
+ CLOCK_PULSE(p);
+ temp = temp ^ SEECK;
+ scarray[k] = (scarray[k] << 1) | (aic_inb(p, SEECTL) & SEEDI);
+ aic_outb(p, temp, SEECTL);
+ CLOCK_PULSE(p);
+ }
+
+ /*
+ * The serial EEPROM should have a checksum in the last word.
+ * Keep a running checksum for all words read except for the
+ * last word. We'll verify the checksum after all words have
+ * been read.
+ */
+ if (k < (len - 1))
+ {
+ checksum = checksum + scarray[k];
+ }
+
+ /*
+ * Reset the chip select for the next command cycle.
+ */
+ aic_outb(p, SEEMS, SEECTL);
+ CLOCK_PULSE(p);
+ aic_outb(p, SEEMS | SEECK, SEECTL);
+ CLOCK_PULSE(p);
+ aic_outb(p, SEEMS, SEECTL);
+ CLOCK_PULSE(p);
+ }
+
+ /*
+ * Release access to the memory port and the serial EEPROM.
+ */
+ release_seeprom(p);
+
+#if 0
+ printk("Computed checksum 0x%x, checksum read 0x%x\n",
+ checksum, scarray[len - 1]);
+ printk("Serial EEPROM:");
+ for (k = 0; k < len; k++)
+ {
+ if (((k % 8) == 0) && (k != 0))
+ {
+ printk("\n ");
+ }
+ printk(" 0x%x", scarray[k]);
+ }
+ printk("\n");
+#endif
+ if ( (checksum != scarray[len - 1]) || (checksum == 0) )
+ {
+ return (0);
+ }
+
+ return (1);
+#undef CLOCK_PULSE
+}
+
+/*+F*************************************************************************
+ * Function:
+ * write_brdctl
+ *
+ * Description:
+ * Writes a value to the BRDCTL register.
+ *-F*************************************************************************/
+static void
+write_brdctl(struct aic7xxx_host *p, unsigned char value)
+{
+ unsigned char brdctl;
+
+ if ((p->chip & AHC_CHIPID_MASK) == AHC_AIC7895)
+ {
+ brdctl = BRDSTB;
+ if (p->flags & AHC_CHNLB)
+ brdctl |= BRDCS;
+ }
+ else if (p->features & AHC_ULTRA2)
+ brdctl = 0;
+ else
+ brdctl = BRDSTB | BRDCS;
+ aic_outb(p, brdctl, BRDCTL);
+ udelay(1);
+ brdctl |= value;
+ aic_outb(p, brdctl, BRDCTL);
+ udelay(1);
+ if (p->features & AHC_ULTRA2)
+ brdctl |= BRDSTB_ULTRA2;
+ else
+ brdctl &= ~BRDSTB;
+ aic_outb(p, brdctl, BRDCTL);
+ udelay(1);
+ if (p->features & AHC_ULTRA2)
+ brdctl = 0;
+ else
+ brdctl &= ~BRDCS;
+ aic_outb(p, brdctl, BRDCTL);
+ udelay(1);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * read_brdctl
+ *
+ * Description:
+ * Reads the BRDCTL register.
+ *-F*************************************************************************/
+static unsigned char
+read_brdctl(struct aic7xxx_host *p)
+{
+ unsigned char brdctl, value;
+
+ if ((p->chip & AHC_CHIPID_MASK) == AHC_AIC7895)
+ {
+ brdctl = BRDRW;
+ if (p->flags & AHC_CHNLB)
+ brdctl |= BRDCS;
+ }
+ else if (p->features & AHC_ULTRA2)
+ brdctl = BRDRW_ULTRA2;
+ else
+ brdctl = BRDRW | BRDCS;
+ aic_outb(p, brdctl, BRDCTL);
+ udelay(1);
+ value = aic_inb(p, BRDCTL);
+ aic_outb(p, 0, BRDCTL);
+ udelay(1);
+ return (value);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic785x_cable_detect
+ *
+ * Description:
+ * Detect the cables that are present on aic785x class controller chips
+ *-F*************************************************************************/
+static void
+aic785x_cable_detect(struct aic7xxx_host *p, int *int_50,
+ int *ext_present, int *eeprom)
+{
+ unsigned char brdctl;
+
+ aic_outb(p, BRDRW | BRDCS, BRDCTL);
+ udelay(1);
+ aic_outb(p, 0, BRDCTL);
+ udelay(1);
+ brdctl = aic_inb(p, BRDCTL);
+ udelay(1);
+ *int_50 = !(brdctl & BRDDAT5);
+ *ext_present = !(brdctl & BRDDAT6);
+ *eeprom = (aic_inb(p, SPIOCAP) & EEPROM);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic787x_cable_detect
+ *
+ * Description:
+ * Detect the cables that are present on aic787x class controller chips
+ *
+ * NOTE: This functions assumes the SEEPROM will have already been aquired
+ * prior to invocation of this function.
+ *-F*************************************************************************/
+static void
+aic787x_cable_detect(struct aic7xxx_host *p, int *int_50, int *int_68,
+ int *ext_present, int *eeprom)
+{
+ unsigned char brdctl;
+
+ /*
+ * First read the status of our cables. Set the rom bank to
+ * 0 since the bank setting serves as a multiplexor for the
+ * cable detection logic. BRDDAT5 controls the bank switch.
+ */
+ write_brdctl(p, 0);
+
+ /*
+ * Now we read the state of the two internal connectors. BRDDAT6
+ * is internal 50, BRDDAT7 is internal 68. For each, the cable is
+ * present if the bit is 0
+ */
+ brdctl = read_brdctl(p);
+ *int_50 = !(brdctl & BRDDAT6);
+ *int_68 = !(brdctl & BRDDAT7);
+
+ /*
+ * Set the bank bit in brdctl and then read the external cable state
+ * and the EEPROM status
+ */
+ write_brdctl(p, BRDDAT5);
+ brdctl = read_brdctl(p);
+
+ *ext_present = !(brdctl & BRDDAT6);
+ *eeprom = !(brdctl & BRDDAT7);
+
+ /*
+ * We're done, the calling function will release the SEEPROM for us
+ */
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic787x_ultra2_term_detect
+ *
+ * Description:
+ * Detect the termination settings present on ultra2 class controllers
+ *
+ * NOTE: This functions assumes the SEEPROM will have already been aquired
+ * prior to invocation of this function.
+ *-F*************************************************************************/
+static void
+aic7xxx_ultra2_term_detect(struct aic7xxx_host *p, int *enableSE_low,
+ int *enableSE_high, int *enableLVD_low,
+ int *enableLVD_high, int *eprom_present)
+{
+ unsigned char brdctl;
+
+ brdctl = read_brdctl(p);
+
+ *eprom_present = (brdctl & BRDDAT7);
+ *enableSE_high = (brdctl & BRDDAT6);
+ *enableSE_low = (brdctl & BRDDAT5);
+ *enableLVD_high = (brdctl & BRDDAT4);
+ *enableLVD_low = (brdctl & BRDDAT3);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * configure_termination
+ *
+ * Description:
+ * Configures the termination settings on PCI adapters that have
+ * SEEPROMs available.
+ *-F*************************************************************************/
+static void
+configure_termination(struct aic7xxx_host *p)
+{
+ int internal50_present = 0;
+ int internal68_present = 0;
+ int external_present = 0;
+ int eprom_present = 0;
+ int enableSE_low = 0;
+ int enableSE_high = 0;
+ int enableLVD_low = 0;
+ int enableLVD_high = 0;
+ unsigned char brddat = 0;
+ unsigned char max_target = 0;
+ unsigned char sxfrctl1 = aic_inb(p, SXFRCTL1);
+
+ if (acquire_seeprom(p))
+ {
+ if (p->features & (AHC_WIDE|AHC_TWIN))
+ max_target = 16;
+ else
+ max_target = 8;
+ aic_outb(p, SEEMS | SEECS, SEECTL);
+ sxfrctl1 &= ~STPWEN;
+ if ( (p->adapter_control & CFAUTOTERM) ||
+ (p->features & AHC_ULTRA2) )
+ {
+ if ( (p->adapter_control & CFAUTOTERM) && !(p->features & AHC_ULTRA2) )
+ {
+ printk(KERN_INFO "(scsi%d) Warning - detected auto-termination\n",
+ p->host_no);
+ printk(KERN_INFO "(scsi%d) Please verify driver detected settings are "
+ "correct.\n", p->host_no);
+ printk(KERN_INFO "(scsi%d) If not, then please properly set the device "
+ "termination\n", p->host_no);
+ printk(KERN_INFO "(scsi%d) in the Adaptec SCSI BIOS by hitting CTRL-A "
+ "when prompted\n", p->host_no);
+ printk(KERN_INFO "(scsi%d) during machine bootup.\n", p->host_no);
+ }
+ /* Configure auto termination. */
+
+ if (p->features & AHC_ULTRA2)
+ {
+ if (aic7xxx_override_term == -1)
+ aic7xxx_ultra2_term_detect(p, &enableSE_low, &enableSE_high,
+ &enableLVD_low, &enableLVD_high,
+ &eprom_present);
+ if (!(p->adapter_control & CFSEAUTOTERM))
+ {
+ enableSE_low = (p->adapter_control & CFSTERM);
+ enableSE_high = (p->adapter_control & CFWSTERM);
+ }
+ if (!(p->adapter_control & CFAUTOTERM))
+ {
+ enableLVD_low = enableLVD_high = (p->adapter_control & CFLVDSTERM);
+ }
+ internal50_present = 0;
+ internal68_present = 1;
+ external_present = 1;
+ }
+ else if ( (p->chip & AHC_CHIPID_MASK) >= AHC_AIC7870 )
+ {
+ aic787x_cable_detect(p, &internal50_present, &internal68_present,
+ &external_present, &eprom_present);
+ }
+ else
+ {
+ aic785x_cable_detect(p, &internal50_present, &external_present,
+ &eprom_present);
+ }
+
+ if (max_target <= 8)
+ internal68_present = 0;
+
+ if ( !(p->features & AHC_ULTRA2) )
+ {
+ if (max_target > 8)
+ {
+ printk(KERN_INFO "(scsi%d) Cables present (Int-50 %s, Int-68 %s, "
+ "Ext-68 %s)\n", p->host_no,
+ internal50_present ? "YES" : "NO",
+ internal68_present ? "YES" : "NO",
+ external_present ? "YES" : "NO");
+ }
+ else
+ {
+ printk(KERN_INFO "(scsi%d) Cables present (Int-50 %s, Ext-50 %s)\n",
+ p->host_no,
+ internal50_present ? "YES" : "NO",
+ external_present ? "YES" : "NO");
+ }
+ }
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk(KERN_INFO "(scsi%d) EEPROM %s present.\n", p->host_no,
+ eprom_present ? "is" : "is not");
+
+ /*
+ * Now set the termination based on what we found. BRDDAT6
+ * controls wide termination enable.
+ * Flash Enable = BRDDAT7
+ * SE High Term Enable = BRDDAT6
+ * SE Low Term Enable = BRDDAT5 (7890)
+ * LVD High Term Enable = BRDDAT4 (7890)
+ */
+ if ( !(p->features & AHC_ULTRA2) &&
+ (internal50_present && internal68_present && external_present) )
+ {
+ printk(KERN_INFO "(scsi%d) Illegal cable configuration!! Only two\n",
+ p->host_no);
+ printk(KERN_INFO "(scsi%d) connectors on the SCSI controller may be "
+ "in use at a time!\n", p->host_no);
+ /*
+ * Force termination (low and high byte) on. This is safer than
+ * leaving it completely off, especially since this message comes
+ * most often from motherboard controllers that don't even have 3
+ * connectors, but instead are failing the cable detection.
+ */
+ internal50_present = external_present = 0;
+ enableSE_high = enableSE_low = 1;
+ }
+
+ if ((max_target > 8) &&
+ ((external_present == 0) || (internal68_present == 0) ||
+ (enableSE_high != 0)))
+ {
+ brddat |= BRDDAT6;
+ p->flags |= AHC_TERM_ENB_SE_HIGH;
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk(KERN_INFO "(scsi%d) SE High byte termination Enabled\n",
+ p->host_no);
+ }
+
+ if ( (((internal50_present ? 1 : 0) +
+ (internal68_present ? 1 : 0) +
+ (external_present ? 1 : 0)) <= 1) ||
+ (enableSE_low != 0) )
+ {
+ if (p->features & AHC_ULTRA2)
+ brddat |= BRDDAT5;
+ else
+ sxfrctl1 |= STPWEN;
+ p->flags |= AHC_TERM_ENB_SE_LOW;
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk(KERN_INFO "(scsi%d) SE Low byte termination Enabled\n",
+ p->host_no);
+ }
+
+ if (enableLVD_low != 0)
+ {
+ sxfrctl1 |= STPWEN;
+ p->flags |= AHC_TERM_ENB_LVD;
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk(KERN_INFO "(scsi%d) LVD Low byte termination Enabled\n",
+ p->host_no);
+ }
+
+ if (enableLVD_high != 0)
+ {
+ brddat |= BRDDAT4;
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk(KERN_INFO "(scsi%d) LVD High byte termination Enabled\n",
+ p->host_no);
+ }
+ }
+ else
+ {
+ if (p->adapter_control & CFSTERM)
+ {
+ if (p->features & AHC_ULTRA2)
+ brddat |= BRDDAT5;
+ else
+ sxfrctl1 |= STPWEN;
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk(KERN_INFO "(scsi%d) SE Low byte termination Enabled\n",
+ p->host_no);
+ }
+
+ if (p->adapter_control & CFWSTERM)
+ {
+ brddat |= BRDDAT6;
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk(KERN_INFO "(scsi%d) SE High byte termination Enabled\n",
+ p->host_no);
+ }
+ }
+ write_brdctl(p, brddat);
+ release_seeprom(p);
+ aic_outb(p, sxfrctl1, SXFRCTL1);
+ }
+}
+
+/*+F*************************************************************************
+ * Function:
+ * detect_maxscb
+ *
+ * Description:
+ * Detects the maximum number of SCBs for the controller and returns
+ * the count and a mask in p (p->maxscbs, p->qcntmask).
+ *-F*************************************************************************/
+static void
+detect_maxscb(struct aic7xxx_host *p)
+{
+ int i;
+
+ /*
+ * It's possible that we've already done this for multichannel
+ * adapters.
+ */
+ if (p->scb_data->maxhscbs == 0)
+ {
+ /*
+ * We haven't initialized the SCB settings yet. Walk the SCBs to
+ * determince how many there are.
+ */
+ aic_outb(p, 0, FREE_SCBH);
+
+ for (i = 0; i < AIC7XXX_MAXSCB; i++)
+ {
+ aic_outb(p, i, SCBPTR);
+ aic_outb(p, i, SCB_CONTROL);
+ if (aic_inb(p, SCB_CONTROL) != i)
+ break;
+ aic_outb(p, 0, SCBPTR);
+ if (aic_inb(p, SCB_CONTROL) != 0)
+ break;
+
+ aic_outb(p, i, SCBPTR);
+ aic_outb(p, 0, SCB_CONTROL); /* Clear the control byte. */
+ aic_outb(p, i + 1, SCB_NEXT); /* Set the next pointer. */
+ aic_outb(p, i - 1, SCB_PREV); /* Set the prev pointer. */
+ aic_outb(p, SCB_LIST_NULL, SCB_TAG); /* Make the tag invalid. */
+ aic_outb(p, SCB_LIST_NULL, SCB_BUSYTARGETS); /* no busy untagged */
+ aic_outb(p, SCB_LIST_NULL, SCB_BUSYTARGETS+1);/* targets active yet */
+ aic_outb(p, SCB_LIST_NULL, SCB_BUSYTARGETS+2);
+ aic_outb(p, SCB_LIST_NULL, SCB_BUSYTARGETS+3);
+ }
+
+ /* Make sure the last SCB terminates the free list. */
+ aic_outb(p, i - 1, SCBPTR);
+ aic_outb(p, SCB_LIST_NULL, SCB_NEXT);
+
+ /* Ensure we clear the first (0) SCBs control byte. */
+ aic_outb(p, 0, SCBPTR);
+ aic_outb(p, 0, SCB_CONTROL);
+
+ p->scb_data->maxhscbs = i;
+ /*
+ * Use direct indexing instead for speed
+ */
+ if ( i == AIC7XXX_MAXSCB )
+ p->flags &= ~AHC_PAGESCBS;
+ }
+
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_register
+ *
+ * Description:
+ * Register a Adaptec aic7xxx chip SCSI controller with the kernel.
+ *-F*************************************************************************/
+static int
+aic7xxx_register(Scsi_Host_Template *template, struct aic7xxx_host *p,
+ int reset_delay)
+{
+ int i, result;
+ int max_targets;
+ int found = 1;
+ unsigned char term, scsi_conf;
+ struct Scsi_Host *host;
+
+ /*
+ * Lock out other contenders for our i/o space.
+ */
+ request_region(p->base, MAXREG - MINREG, "aic7xxx");
+
+
+ host = p->host;
+
+ p->scb_data->maxscbs = AIC7XXX_MAXSCB;
+ host->can_queue = AIC7XXX_MAXSCB;
+ host->cmd_per_lun = 3;
+ host->sg_tablesize = AIC7XXX_MAX_SG;
+ host->select_queue_depths = aic7xxx_select_queue_depth;
+ host->this_id = p->scsi_id;
+ host->io_port = p->base;
+ host->n_io_port = 0xFF;
+ host->base = (unsigned char *) p->mbase;
+ host->irq = p->irq;
+ if (p->features & AHC_WIDE)
+ {
+ host->max_id = 16;
+ }
+ if (p->features & AHC_TWIN)
+ {
+ host->max_channel = 1;
+ }
+
+ p->host = host;
+ p->host_no = host->host_no;
+ host->unique_id = p->instance;
+ p->isr_count = 0;
+ p->next = NULL;
+ p->completeq.head = NULL;
+ p->completeq.tail = NULL;
+ scbq_init(&p->scb_data->free_scbs);
+ scbq_init(&p->waiting_scbs);
+ init_timer(&p->dev_timer);
+ p->dev_timer.data = (unsigned long)p;
+ p->dev_timer.function = (void *)aic7xxx_timer;
+ p->dev_timer_active = 0;
+
+ for (i = 0; i < NUMBER(p->untagged_scbs); i++)
+ {
+ p->untagged_scbs[i] = SCB_LIST_NULL;
+ p->qinfifo[i] = SCB_LIST_NULL;
+ p->qoutfifo[i] = SCB_LIST_NULL;
+ }
+ /*
+ * We currently have no commands of any type
+ */
+ p->qinfifonext = 0;
+ p->qoutfifonext = 0;
+
+ for (i = 0; i < MAX_TARGETS; i++)
+ {
+ p->dev_commands_sent[i] = 0;
+ p->dev_flags[i] = 0;
+ p->dev_active_cmds[i] = 0;
+ p->dev_last_queue_full[i] = 0;
+ p->dev_last_queue_full_count[i] = 0;
+ p->dev_max_queue_depth[i] = 1;
+ p->dev_temp_queue_depth[i] = 1;
+ p->dev_expires[i] = 0;
+ scbq_init(&p->delayed_scbs[i]);
+ }
+
+ printk(KERN_INFO "(scsi%d) <%s> found at ", p->host_no,
+ board_names[p->board_name_index]);
+ switch(p->chip)
+ {
+ case (AHC_AIC7770|AHC_EISA):
+ printk("EISA slot %d\n", p->pci_device_fn);
+ break;
+ case (AHC_AIC7770|AHC_VL):
+ printk("VLB slot %d\n", p->pci_device_fn);
+ break;
+ default:
+ printk("PCI %d/%d\n", PCI_SLOT(p->pci_device_fn),
+ PCI_FUNC(p->pci_device_fn));
+ break;
+ }
+ if (p->features & AHC_TWIN)
+ {
+ printk(KERN_INFO "(scsi%d) Twin Channel, A SCSI ID %d, B SCSI ID %d, ",
+ p->host_no, p->scsi_id, p->scsi_id_b);
+ }
+ else
+ {
+ char *channel;
+
+ channel = "";
+
+ if ((p->flags & AHC_MULTI_CHANNEL) != 0)
+ {
+ channel = " A";
+
+ if ( (p->flags & (AHC_CHNLB|AHC_CHNLC)) != 0 )
+ {
+ channel = (p->flags & AHC_CHNLB) ? " B" : " C";
+ }
+ }
+ if (p->features & AHC_WIDE)
+ {
+ printk(KERN_INFO "(scsi%d) Wide ", p->host_no);
+ }
+ else
+ {
+ printk(KERN_INFO "(scsi%d) Narrow ", p->host_no);
+ }
+ printk("Channel%s, SCSI ID=%d, ", channel, p->scsi_id);
+ }
+ aic_outb(p, 0, SEQ_FLAGS);
+
+ detect_maxscb(p);
+
+
+ printk("%d/%d SCBs\n", p->scb_data->maxhscbs, p->scb_data->maxscbs);
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ {
+ printk(KERN_INFO "(scsi%d) BIOS %sabled, IO Port 0x%lx, IRQ %d\n",
+ p->host_no, (p->flags & AHC_BIOS_ENABLED) ? "en" : "dis",
+ p->base, p->irq);
+ printk(KERN_INFO "(scsi%d) IO Memory at 0x%lx, MMAP Memory at 0x%lx\n",
+ p->host_no, p->mbase, (unsigned long)p->maddr);
+ }
+
+#ifdef CONFIG_PCI
+ /*
+ * Now that we know our instance number, we can set the flags we need to
+ * force termination if need be.
+ */
+ if (aic7xxx_stpwlev != -1)
+ {
+ /*
+ * This option only applies to PCI controllers.
+ */
+ if ( (p->chip & ~AHC_CHIPID_MASK) == AHC_PCI)
+ {
+ unsigned char devconfig;
+
+#if LINUX_KERNEL_VERSION > KERNEL_VERSION(2,1,92)
+ pci_read_config_byte(p->pdev, DEVCONFIG, &devconfig);
+#else
+ pcibios_read_config_byte(p->pci_bus, p->pci_device_fn,
+ DEVCONFIG, &devconfig);
+#endif
+ if ( (aic7xxx_stpwlev >> p->instance) & 0x01 )
+ {
+ devconfig |= 0x02;
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk("(scsi%d) Force setting STPWLEV bit\n", p->host_no);
+ }
+ else
+ {
+ devconfig &= ~0x02;
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk("(scsi%d) Force clearing STPWLEV bit\n", p->host_no);
+ }
+#if LINUX_KERNEL_VERSION > KERNEL_VERSION(2,1,92)
+ pci_write_config_byte(p->pdev, DEVCONFIG, devconfig);
+#else
+ pcibios_write_config_byte(p->pci_bus, p->pci_device_fn,
+ DEVCONFIG, devconfig);
+#endif
+ }
+ }
+#endif
+
+ /*
+ * That took care of devconfig and stpwlev, now for the actual termination
+ * settings.
+ */
+ if (aic7xxx_override_term != -1)
+ {
+ /*
+ * Again, this only applies to PCI controllers. We don't have problems
+ * with the termination on 274x controllers to the best of my knowledge.
+ */
+ if ( (p->chip & ~AHC_CHIPID_MASK) == AHC_PCI)
+ {
+ unsigned char term_override;
+
+ term_override = ( (aic7xxx_override_term >> (p->instance * 4)) & 0x0f);
+ p->adapter_control &=
+ ~(CFSTERM|CFWSTERM|CFLVDSTERM|CFAUTOTERM|CFSEAUTOTERM);
+ if ( (p->features & AHC_ULTRA2) && (term_override & 0x0c) )
+ {
+ p->adapter_control |= CFLVDSTERM;
+ }
+ if (term_override & 0x02)
+ {
+ p->adapter_control |= CFWSTERM;
+ }
+ if (term_override & 0x01)
+ {
+ p->adapter_control |= CFSTERM;
+ }
+ }
+ }
+
+ if ( (p->flags & AHC_SEEPROM_FOUND) || (aic7xxx_override_term != -1) )
+ {
+ if (p->features & AHC_SPIOCAP)
+ {
+ if ( aic_inb(p, SPIOCAP) & SSPIOCPS )
+ /*
+ * Update the settings in sxfrctl1 to match the termination
+ * settings.
+ */
+ configure_termination(p);
+ }
+ else if ((p->chip & AHC_CHIPID_MASK) >= AHC_AIC7870)
+ {
+ configure_termination(p);
+ }
+ }
+
+ /*
+ * Clear out any possible pending interrupts.
+ */
+ aic7xxx_clear_intstat(p);
+
+ /*
+ * Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels
+ */
+ if (p->features & AHC_TWIN)
+ {
+ /* Select channel B */
+ aic_outb(p, aic_inb(p, SBLKCTL) | SELBUSB, SBLKCTL);
+
+ term = ((p->flags & AHC_TERM_ENB_B) != 0) ? STPWEN : 0;
+ aic_outb(p, p->scsi_id_b, SCSIID);
+ scsi_conf = aic_inb(p, SCSICONF + 1);
+ aic_outb(p, DFON | SPIOEN, SXFRCTL0);
+ aic_outb(p, (scsi_conf & ENSPCHK) | STIMESEL | term |
+ ENSTIMER | ACTNEGEN, SXFRCTL1);
+ aic_outb(p, 0, SIMODE0);
+ aic_outb(p, ENSELTIMO | ENSCSIRST | ENSCSIPERR, SIMODE1);
+ aic_outb(p, 0, SCSIRATE);
+
+ /* Select channel A */
+ aic_outb(p, aic_inb(p, SBLKCTL) & ~SELBUSB, SBLKCTL);
+ }
+
+ term = ((p->flags & AHC_TERM_ENB_SE_LOW) != 0) ? STPWEN : 0;
+ if (p->features & AHC_ULTRA2)
+ aic_outb(p, p->scsi_id, SCSIID_ULTRA2);
+ else
+ aic_outb(p, p->scsi_id, SCSIID);
+ scsi_conf = aic_inb(p, SCSICONF);
+ aic_outb(p, DFON | SPIOEN, SXFRCTL0);
+ aic_outb(p, (scsi_conf & ENSPCHK) | STIMESEL | term |
+ ENSTIMER | ACTNEGEN, SXFRCTL1);
+ aic_outb(p, 0, SIMODE0);
+ aic_outb(p, ENSELTIMO | ENSCSIRST | ENSCSIPERR, SIMODE1);
+ aic_outb(p, 0, SCSIRATE);
+ if ( p->features & AHC_ULTRA2)
+ aic_outb(p, 0, SCSIOFFSET);
+
+ /*
+ * Look at the information that board initialization or the board
+ * BIOS has left us. In the lower four bits of each target's
+ * scratch space any value other than 0 indicates that we should
+ * initiate synchronous transfers. If it's zero, the user or the
+ * BIOS has decided to disable synchronous negotiation to that
+ * target so we don't activate the needsdtr flag.
+ */
+ if ((p->features & (AHC_TWIN|AHC_WIDE)) == 0)
+ {
+ max_targets = 8;
+ }
+ else
+ {
+ max_targets = 16;
+ }
+
+ if (!(aic7xxx_no_reset))
+ {
+ /*
+ * If we reset the bus, then clear the transfer settings, else leave
+ * them be
+ */
+ for (i = 0; i < max_targets; i++)
+ {
+ aic_outb(p, 0, TARG_SCSIRATE + i);
+ if (p->features & AHC_ULTRA2)
+ {
+ aic_outb(p, 0, TARG_OFFSET + i);
+ }
+ p->transinfo[i].cur_offset = 0;
+ p->transinfo[i].cur_period = 0;
+ p->transinfo[i].cur_width = MSG_EXT_WDTR_BUS_8_BIT;
+ }
+
+ /*
+ * If we reset the bus, then clear the transfer settings, else leave
+ * them be.
+ */
+ aic_outb(p, 0, ULTRA_ENB);
+ aic_outb(p, 0, ULTRA_ENB + 1);
+ p->ultraenb = 0;
+ }
+
+ /*
+ * Allocate enough hardware scbs to handle the maximum number of
+ * concurrent transactions we can have. We have to make sure that
+ * the allocated memory is contiguous memory. The Linux kmalloc
+ * routine should only allocate contiguous memory, but note that
+ * this could be a problem if kmalloc() is changed.
+ */
+ {
+ size_t array_size;
+ unsigned int hscb_physaddr;
+ unsigned long temp;
+
+ array_size = p->scb_data->maxscbs * sizeof(struct aic7xxx_hwscb);
+ if (p->scb_data->hscbs == NULL)
+ {
+ /*
+ * A little padding so we can align thing the way we want
+ */
+ p->scb_data->hscbs = kmalloc(array_size + 0x1f, GFP_ATOMIC);
+ }
+ if (p->scb_data->hscbs == NULL)
+ {
+ printk("(scsi%d) Unable to allocate hardware SCB array; "
+ "failing detection.\n", p->host_no);
+ p->irq = 0;
+ return(0);
+ }
+ /*
+ * Save the actual kmalloc buffer pointer off, then align our
+ * buffer to a 32 byte boundary
+ */
+ p->scb_data->hscb_kmalloc_ptr = p->scb_data->hscbs;
+ temp = (unsigned long)p->scb_data->hscbs;
+ temp += 0x1f;
+ temp &= ~0x1f;
+ p->scb_data->hscbs = (struct aic7xxx_hwscb *)temp;
+ /* At least the control byte of each SCB needs to be 0. */
+ memset(p->scb_data->hscbs, 0, array_size);
+
+ /* Tell the sequencer where it can find the hardware SCB array. */
+ hscb_physaddr = VIRT_TO_BUS(p->scb_data->hscbs);
+ aic_outb(p, hscb_physaddr & 0xFF, HSCB_ADDR);
+ aic_outb(p, (hscb_physaddr >> 8) & 0xFF, HSCB_ADDR + 1);
+ aic_outb(p, (hscb_physaddr >> 16) & 0xFF, HSCB_ADDR + 2);
+ aic_outb(p, (hscb_physaddr >> 24) & 0xFF, HSCB_ADDR + 3);
+
+ /* Set up the fifo areas at the same time */
+ hscb_physaddr = VIRT_TO_BUS(&p->untagged_scbs[0]);
+ aic_outb(p, hscb_physaddr & 0xFF, SCBID_ADDR);
+ aic_outb(p, (hscb_physaddr >> 8) & 0xFF, SCBID_ADDR + 1);
+ aic_outb(p, (hscb_physaddr >> 16) & 0xFF, SCBID_ADDR + 2);
+ aic_outb(p, (hscb_physaddr >> 24) & 0xFF, SCBID_ADDR + 3);
+ }
+
+ /* The Q-FIFOs we just set up are all empty */
+ aic_outb(p, 0, QINPOS);
+ aic_outb(p, 0, KERNEL_QINPOS);
+ aic_outb(p, 0, QOUTPOS);
+
+ if(p->features & AHC_QUEUE_REGS)
+ {
+ aic_outb(p, SCB_QSIZE_256, QOFF_CTLSTA);
+ aic_outb(p, 0, SDSCB_QOFF);
+ aic_outb(p, 0, SNSCB_QOFF);
+ aic_outb(p, 0, HNSCB_QOFF);
+ }
+
+ /*
+ * We don't have any waiting selections or disconnected SCBs.
+ */
+ aic_outb(p, SCB_LIST_NULL, WAITING_SCBH);
+ aic_outb(p, SCB_LIST_NULL, DISCONNECTED_SCBH);
+
+ /*
+ * Message out buffer starts empty
+ */
+ aic_outb(p, MSG_NOOP, MSG_OUT);
+ aic_outb(p, MSG_NOOP, LAST_MSG);
+
+ /*
+ * Set all the other asundry items that haven't been set yet.
+ * This includes just dumping init values to a lot of registers simply
+ * to make sure they've been touched and are ready for use parity wise
+ * speaking.
+ */
+ aic_outb(p, 0, TMODE_CMDADDR);
+ aic_outb(p, 0, TMODE_CMDADDR + 1);
+ aic_outb(p, 0, TMODE_CMDADDR + 2);
+ aic_outb(p, 0, TMODE_CMDADDR + 3);
+ aic_outb(p, 0, TMODE_CMDADDR_NEXT);
+
+ /*
+ * Link us into the list of valid hosts
+ */
+ p->next = first_aic7xxx;
+ first_aic7xxx = p;
+
+ /*
+ * Clear out any possible pending interrupts, again.
+ */
+ aic7xxx_clear_intstat(p);
+
+ /*
+ * Allocate the first set of scbs for this controller. This is to stream-
+ * line code elsewhere in the driver. If we have to check for the existence
+ * of scbs in certain code sections, it slows things down. However, as
+ * soon as we register the IRQ for this card, we could get an interrupt that
+ * includes possibly the SCSI_RSTI interrupt. If we catch that interrupt
+ * then we are likely to segfault if we don't have at least one chunk of
+ * SCBs allocated or add checks all through the reset code to make sure
+ * that the SCBs have been allocated which is an invalid running condition
+ * and therefore I think it's preferable to simply pre-allocate the first
+ * chunk of SCBs.
+ */
+ aic7xxx_allocate_scb(p);
+
+ /*
+ * Load the sequencer program, then re-enable the board -
+ * resetting the AIC-7770 disables it, leaving the lights
+ * on with nobody home.
+ */
+ aic7xxx_loadseq(p);
+
+ /*
+ * Make sure the AUTOFLUSHDIS bit is *not* set in the SBLKCTL register
+ */
+ aic_outb(p, aic_inb(p, SBLKCTL) & ~AUTOFLUSHDIS, SBLKCTL);
+
+ if ( (p->chip & AHC_CHIPID_MASK) == AHC_AIC7770 )
+ {
+ aic_outb(p, ENABLE, BCTL); /* Enable the boards BUS drivers. */
+ }
+
+ if ( !(aic7xxx_no_reset) )
+ {
+ if (p->features & AHC_TWIN)
+ {
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk(KERN_INFO "(scsi%d) Resetting channel B\n", p->host_no);
+ aic_outb(p, aic_inb(p, SBLKCTL) | SELBUSB, SBLKCTL);
+ aic7xxx_reset_current_bus(p);
+ aic_outb(p, aic_inb(p, SBLKCTL) & ~SELBUSB, SBLKCTL);
+ }
+ /* Reset SCSI bus A. */
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ { /* In case we are a 3940, 3985, or 7895, print the right channel */
+ char *channel = "";
+ if (p->flags & AHC_MULTI_CHANNEL)
+ {
+ channel = " A";
+ if (p->flags & (AHC_CHNLB|AHC_CHNLC))
+ channel = (p->flags & AHC_CHNLB) ? " B" : " C";
+ }
+ printk(KERN_INFO "(scsi%d) Resetting channel%s\n", p->host_no, channel);
+ }
+
+ /*
+ * Some of the new Ultra2 chipsets need a longer delay after a chip
+ * reset than just the init setup creates, so we have to delay here
+ * before we go into a reset in order to make the chips happy.
+ */
+ if (p->features & AHC_ULTRA2)
+ mdelay(250);
+ aic7xxx_reset_current_bus(p);
+
+ /*
+ * Delay for the reset delay.
+ */
+ if (!reset_delay)
+ aic7xxx_delay(AIC7XXX_RESET_DELAY);
+ }
+ else
+ {
+ if (!reset_delay)
+ {
+ printk(KERN_INFO "(scsi%d) Not resetting SCSI bus. Note: Don't use "
+ "the no_reset\n", p->host_no);
+ printk(KERN_INFO "(scsi%d) option unless you have a verifiable need "
+ "for it.\n", p->host_no);
+ printk(KERN_INFO "(scsi%d) The no_reset option is known to break some "
+ "systems,\n", p->host_no);
+ printk(KERN_INFO "(scsi%d) and is not supported by the driver author\n",
+ p->host_no);
+ aic7xxx_delay(AIC7XXX_RESET_DELAY);
+ }
+ }
+
+ /*
+ * Register IRQ with the kernel. Only allow sharing IRQs with
+ * PCI devices.
+ */
+ if (!(p->chip & AHC_PCI))
+ {
+ result = (request_irq(p->irq, do_aic7xxx_isr, 0, "aic7xxx", p));
+ }
+ else
+ {
+ result = (request_irq(p->irq, do_aic7xxx_isr, SA_SHIRQ,
+ "aic7xxx", p));
+ if (result < 0)
+ {
+ result = (request_irq(p->irq, do_aic7xxx_isr, SA_INTERRUPT | SA_SHIRQ,
+ "aic7xxx", p));
+ }
+ }
+ if (result < 0)
+ {
+ printk(KERN_WARNING "(scsi%d) Couldn't register IRQ %d, ignoring "
+ "controller.\n", p->host_no, p->irq);
+ p->irq = 0;
+ return (0);
+ }
+
+ unpause_sequencer(p, /* unpause_always */ TRUE);
+
+ return (found);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_chip_reset
+ *
+ * Description:
+ * Perform a chip reset on the aic7xxx SCSI controller. The controller
+ * is paused upon return.
+ *-F*************************************************************************/
+int
+aic7xxx_chip_reset(struct aic7xxx_host *p)
+{
+ unsigned char sblkctl;
+ int wait;
+
+ /*
+ * For some 274x boards, we must clear the CHIPRST bit and pause
+ * the sequencer. For some reason, this makes the driver work.
+ */
+ aic_outb(p, PAUSE | CHIPRST, HCNTRL);
+
+ /*
+ * In the future, we may call this function as a last resort for
+ * error handling. Let's be nice and not do any unecessary delays.
+ */
+ wait = 1000; /* 1 second (1000 * 1 msec) */
+ while (--wait && !(aic_inb(p, HCNTRL) & CHIPRSTACK))
+ {
+ mdelay(1); /* 1 msec */
+ }
+
+ pause_sequencer(p);
+
+ sblkctl = aic_inb(p, SBLKCTL) & (SELBUSB|SELWIDE);
+ if (p->chip & AHC_PCI)
+ sblkctl &= ~SELBUSB;
+ switch( sblkctl )
+ {
+ case 0: /* normal narrow card */
+ break;
+ case 2: /* Wide card */
+ p->features |= AHC_WIDE;
+ break;
+ case 8: /* Twin card */
+ p->features |= AHC_TWIN;
+ p->flags |= AHC_MULTI_CHANNEL;
+ break;
+ default: /* hmmm...we don't know what this is */
+ printk(KERN_WARNING "aic7xxx: Unsupported adapter type %d, ignoring.\n",
+ aic_inb(p, SBLKCTL) & 0x0a);
+ return(-1);
+ }
+ return(0);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_alloc
+ *
+ * Description:
+ * Allocate and initialize a host structure. Returns NULL upon error
+ * and a pointer to a aic7xxx_host struct upon success.
+ *-F*************************************************************************/
+static struct aic7xxx_host *
+aic7xxx_alloc(Scsi_Host_Template *sht, struct aic7xxx_host *temp)
+{
+ struct aic7xxx_host *p = NULL;
+ struct Scsi_Host *host;
+ int i;
+
+ /*
+ * Allocate a storage area by registering us with the mid-level
+ * SCSI layer.
+ */
+ host = scsi_register(sht, sizeof(struct aic7xxx_host));
+
+ if (host != NULL)
+ {
+ p = (struct aic7xxx_host *) host->hostdata;
+ memset(p, 0, sizeof(struct aic7xxx_host));
+ *p = *temp;
+ p->host = host;
+
+ p->scb_data = kmalloc(sizeof(scb_data_type), GFP_ATOMIC);
+ if (p->scb_data != NULL)
+ {
+ memset(p->scb_data, 0, sizeof(scb_data_type));
+ scbq_init (&p->scb_data->free_scbs);
+ }
+ else
+ {
+ /*
+ * For some reason we don't have enough memory. Free the
+ * allocated memory for the aic7xxx_host struct, and return NULL.
+ */
+ release_region(p->base, MAXREG - MINREG);
+ scsi_unregister(host);
+ return(NULL);
+ }
+ p->host_no = host->host_no;
+ p->tagenable = 0;
+ p->orderedtag = 0;
+ for (i=0; i<MAX_TARGETS; i++)
+ {
+ p->transinfo[i].goal_period = 0;
+ p->transinfo[i].goal_offset = 0;
+ p->transinfo[i].goal_width = MSG_EXT_WDTR_BUS_8_BIT;
+ }
+ DRIVER_LOCK_INIT
+ }
+ return (p);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_free
+ *
+ * Description:
+ * Frees and releases all resources associated with an instance of
+ * the driver (struct aic7xxx_host *).
+ *-F*************************************************************************/
+static void
+aic7xxx_free(struct aic7xxx_host *p)
+{
+ int i;
+
+ /*
+ * Free the allocated hardware SCB space.
+ */
+ if (p->scb_data != NULL)
+ {
+ if (p->scb_data->hscbs != NULL)
+ {
+ kfree(p->scb_data->hscb_kmalloc_ptr);
+ p->scb_data->hscbs = p->scb_data->hscb_kmalloc_ptr = NULL;
+ }
+ /*
+ * Free the driver SCBs. These were allocated on an as-need
+ * basis. We allocated these in groups depending on how many
+ * we could fit into a given amount of RAM. The tail SCB for
+ * these allocations has a pointer to the alloced area.
+ */
+ for (i = 0; i < p->scb_data->numscbs; i++)
+ {
+ if (p->scb_data->scb_array[i]->kmalloc_ptr != NULL)
+ kfree(p->scb_data->scb_array[i]->kmalloc_ptr);
+ p->scb_data->scb_array[i] = NULL;
+ }
+
+ /*
+ * Free the SCB data area.
+ */
+ kfree(p->scb_data);
+ }
+
+ /*
+ * Free any alloced Scsi_Cmnd structures that might be around for
+ * negotiation purposes....
+ */
+ for (i = 0; i < MAX_TARGETS; i++)
+ {
+ if(p->dev_wdtr_cmnd[i])
+ kfree(p->dev_wdtr_cmnd[i]);
+ if(p->dev_sdtr_cmnd[i])
+ kfree(p->dev_sdtr_cmnd[i]);
+ }
+
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_load_seeprom
+ *
+ * Description:
+ * Load the seeprom and configure adapter and target settings.
+ * Returns 1 if the load was successful and 0 otherwise.
+ *-F*************************************************************************/
+static void
+aic7xxx_load_seeprom(struct aic7xxx_host *p, unsigned char *sxfrctl1)
+{
+ int have_seeprom = 0;
+ int i, max_targets, mask;
+ unsigned char scsirate, scsi_conf;
+ unsigned short scarray[128];
+ struct seeprom_config *sc = (struct seeprom_config *) scarray;
+
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ {
+ printk(KERN_INFO "aic7xxx: Loading serial EEPROM...");
+ }
+ switch (p->chip)
+ {
+ case (AHC_AIC7770|AHC_EISA): /* None of these adapters have seeproms. */
+ if (aic_inb(p, SCSICONF) & TERM_ENB)
+ p->flags |= AHC_TERM_ENB_A;
+ if ( (p->features & AHC_TWIN) && (aic_inb(p, SCSICONF + 1) & TERM_ENB) )
+ p->flags |= AHC_TERM_ENB_B;
+ aic_outb(p, 0, DISC_DSB);
+ aic_outb(p, 0, DISC_DSB + 1);
+ break;
+
+ case (AHC_AIC7770|AHC_VL):
+ have_seeprom = read_284x_seeprom(p, (struct seeprom_config *) scarray);
+ break;
+
+ default:
+ have_seeprom = read_seeprom(p, (p->flags & (AHC_CHNLB|AHC_CHNLC)),
+ scarray, p->sc_size, p->sc_type);
+ if (!have_seeprom)
+ {
+ if(p->sc_type == C46)
+ have_seeprom = read_seeprom(p, (p->flags & (AHC_CHNLB|AHC_CHNLC)),
+ scarray, p->sc_size, C56_66);
+ else
+ have_seeprom = read_seeprom(p, (p->flags & (AHC_CHNLB|AHC_CHNLC)),
+ scarray, p->sc_size, C46);
+ }
+ if (!have_seeprom)
+ {
+ p->sc_size = 128;
+ have_seeprom = read_seeprom(p, (p->flags & (AHC_CHNLB|AHC_CHNLC)),
+ scarray, p->sc_size, p->sc_type);
+ if (!have_seeprom)
+ {
+ if(p->sc_type == C46)
+ have_seeprom = read_seeprom(p, (p->flags & (AHC_CHNLB|AHC_CHNLC)),
+ scarray, p->sc_size, C56_66);
+ else
+ have_seeprom = read_seeprom(p, (p->flags & (AHC_CHNLB|AHC_CHNLC)),
+ scarray, p->sc_size, C46);
+ }
+ }
+ break;
+ }
+
+ if (!have_seeprom)
+ {
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ {
+ printk("\naic7xxx: No SEEPROM available.\n");
+ }
+ p->flags |= AHC_NEWEEPROM_FMT;
+ if (aic_inb(p, SCSISEQ) == 0)
+ {
+ p->flags |= AHC_USEDEFAULTS;
+ p->flags &= ~AHC_BIOS_ENABLED;
+ p->scsi_id = p->scsi_id_b = 7;
+ *sxfrctl1 |= STPWEN;
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ {
+ printk("aic7xxx: Using default values.\n");
+ }
+ }
+ else if (aic7xxx_verbose & VERBOSE_PROBE2)
+ {
+ printk("aic7xxx: Using leftover BIOS values.\n");
+ }
+ if ( ((p->chip & ~AHC_CHIPID_MASK) == AHC_PCI) && (*sxfrctl1 & STPWEN) )
+ {
+ p->flags |= AHC_TERM_ENB_SE_LOW | AHC_TERM_ENB_SE_HIGH;
+ sc->adapter_control &= ~CFAUTOTERM;
+ sc->adapter_control |= CFSTERM | CFWSTERM | CFLVDSTERM;
+ }
+ if (aic7xxx_extended)
+ p->flags |= (AHC_EXTEND_TRANS_A | AHC_EXTEND_TRANS_B);
+ else
+ p->flags &= ~(AHC_EXTEND_TRANS_A | AHC_EXTEND_TRANS_B);
+ }
+ else
+ {
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ {
+ printk("done\n");
+ }
+
+ /*
+ * Note things in our flags
+ */
+ p->flags |= AHC_SEEPROM_FOUND;
+
+ /*
+ * Update the settings in sxfrctl1 to match the termination settings.
+ */
+ *sxfrctl1 = 0;
+
+ /*
+ * Get our SCSI ID from the SEEPROM setting...
+ */
+ p->scsi_id = (sc->brtime_id & CFSCSIID);
+
+ /*
+ * First process the settings that are different between the VLB
+ * and PCI adapter seeproms.
+ */
+ if ((p->chip & AHC_CHIPID_MASK) == AHC_AIC7770)
+ {
+ /* VLB adapter seeproms */
+ if (sc->bios_control & CF284XEXTEND)
+ p->flags |= AHC_EXTEND_TRANS_A;
+
+ if (sc->adapter_control & CF284XSTERM)
+ {
+ *sxfrctl1 |= STPWEN;
+ p->flags |= AHC_TERM_ENB_SE_LOW | AHC_TERM_ENB_SE_HIGH;
+ }
+ }
+ else
+ {
+ /* PCI adapter seeproms */
+ if (sc->bios_control & CFEXTEND)
+ p->flags |= AHC_EXTEND_TRANS_A;
+ if (sc->bios_control & CFBIOSEN)
+ p->flags |= AHC_BIOS_ENABLED;
+ else
+ p->flags &= ~AHC_BIOS_ENABLED;
+
+ if (sc->adapter_control & CFSTERM)
+ {
+ *sxfrctl1 |= STPWEN;
+ p->flags |= AHC_TERM_ENB_SE_LOW | AHC_TERM_ENB_SE_HIGH;
+ }
+ }
+ p->sc = *sc;
+ }
+
+ p->discenable = 0;
+
+ /*
+ * Limit to 16 targets just in case. The 2842 for one is known to
+ * blow the max_targets setting, future cards might also.
+ */
+ max_targets = MIN(sc->max_targets & CFMAXTARG,
+ ((p->features & (AHC_TWIN | AHC_WIDE)) ? 16 : 8));
+
+ if (have_seeprom)
+ {
+ for (i = 0; i < max_targets; i++)
+ {
+ if( ((p->features & AHC_ULTRA) &&
+ !(sc->adapter_control & CFULTRAEN) &&
+ (sc->device_flags[i] & CFSYNCHISULTRA)) ||
+ (sc->device_flags[i] & CFNEWULTRAFORMAT) )
+ {
+ p->flags |= AHC_NEWEEPROM_FMT;
+ break;
+ }
+ }
+ }
+
+ for (i = 0; i < max_targets; i++)
+ {
+ mask = (0x01 << i);
+ if (!have_seeprom)
+ {
+ if (aic_inb(p, SCSISEQ) != 0)
+ {
+ /*
+ * OK...the BIOS set things up and left behind the settings we need.
+ * Just make our sc->device_flags[i] entry match what the card has
+ * set for this device.
+ */
+ p->discenable =
+ ~(aic_inb(p, DISC_DSB) | (aic_inb(p, DISC_DSB + 1) << 8) );
+ p->ultraenb =
+ (aic_inb(p, ULTRA_ENB) | (aic_inb(p, ULTRA_ENB + 1) << 8) );
+ sc->device_flags[i] = (p->discenable & mask) ? CFDISC : 0;
+ if (aic_inb(p, TARG_SCSIRATE + i) & WIDEXFER)
+ sc->device_flags[i] |= CFWIDEB;
+ if (p->features & AHC_ULTRA2)
+ {
+ if (aic_inb(p, TARG_OFFSET + i))
+ {
+ sc->device_flags[i] |= CFSYNCH;
+ sc->device_flags[i] |= (aic_inb(p, TARG_SCSIRATE + i) & 0x07);
+ if ( (aic_inb(p, TARG_SCSIRATE + i) & 0x18) == 0x18 )
+ sc->device_flags[i] |= CFSYNCHISULTRA;
+ }
+ }
+ else
+ {
+ if (aic_inb(p, TARG_SCSIRATE + i) & ~WIDEXFER)
+ {
+ sc->device_flags[i] |= CFSYNCH;
+ if (p->features & AHC_ULTRA)
+ sc->device_flags[i] |= ((p->ultraenb & mask) ?
+ CFSYNCHISULTRA : 0);
+ }
+ }
+ }
+ else
+ {
+ /*
+ * Assume the BIOS has NOT been run on this card and nothing between
+ * the card and the devices is configured yet.
+ */
+ sc->device_flags[i] = CFDISC;
+ if (p->features & AHC_WIDE)
+ sc->device_flags[i] |= CFWIDEB;
+ if (p->features & AHC_ULTRA2)
+ sc->device_flags[i] |= 3;
+ else if (p->features & AHC_ULTRA)
+ sc->device_flags[i] |= CFSYNCHISULTRA;
+ sc->device_flags[i] |= CFSYNCH;
+ aic_outb(p, 0, TARG_SCSIRATE + i);
+ if (p->features & AHC_ULTRA2)
+ aic_outb(p, 0, TARG_OFFSET + i);
+ }
+ }
+ if (sc->device_flags[i] & CFDISC)
+ {
+ p->discenable |= mask;
+ }
+ if (p->flags & AHC_NEWEEPROM_FMT)
+ {
+ if ( (sc->device_flags[i] & CFNEWULTRAFORMAT) &&
+ !(p->features & AHC_ULTRA2) )
+ {
+ /*
+ * I know of two different Ultra BIOSes that do this differently.
+ * One on the Gigabyte 6BXU mb that wants flags[i] & CFXFER to
+ * be == to 0x03 and SYNCISULTRA to be true to mean 40MByte/s
+ * while on the IBM Netfinity 5000 they want the same thing
+ * to be something else, while flags[i] & CFXFER == 0x03 and
+ * SYNCISULTRA false should be 40MByte/s. So, we set both to
+ * 40MByte/s and the lower speeds be damned. People will have
+ * to select around the conversely mapped lower speeds in order
+ * to select lower speeds on these boards.
+ */
+ if ((sc->device_flags[i] & (CFXFER)) == 0x03)
+ {
+ sc->device_flags[i] &= ~CFXFER;
+ sc->device_flags[i] |= CFSYNCHISULTRA;
+ }
+ }
+ if (sc->device_flags[i] & CFSYNCHISULTRA)
+ {
+ p->ultraenb |= mask;
+ }
+ }
+ else if (sc->adapter_control & CFULTRAEN)
+ {
+ p->ultraenb |= mask;
+ }
+ if ( (sc->device_flags[i] & CFSYNCH) == 0)
+ {
+ sc->device_flags[i] &= ~CFXFER;
+ p->ultraenb &= ~mask;
+ p->transinfo[i].user_offset = 0;
+ p->transinfo[i].user_period = 0;
+ p->transinfo[i].cur_offset = 0;
+ p->transinfo[i].cur_period = 0;
+ p->needsdtr_copy &= ~mask;
+ }
+ else
+ {
+ if (p->features & AHC_ULTRA2)
+ {
+ p->transinfo[i].user_offset = MAX_OFFSET_ULTRA2;
+ p->transinfo[i].cur_offset = aic_inb(p, TARG_OFFSET + i);
+ scsirate = (sc->device_flags[i] & CFXFER) |
+ ((p->ultraenb & mask) ? 0x18 : 0x10);
+ p->transinfo[i].user_period = aic7xxx_find_period(p, scsirate,
+ AHC_SYNCRATE_ULTRA2);
+ p->transinfo[i].cur_period = aic7xxx_find_period(p,
+ aic_inb(p, TARG_SCSIRATE + i),
+ AHC_SYNCRATE_ULTRA2);
+ }
+ else
+ {
+ scsirate = (sc->device_flags[i] & CFXFER) << 4;
+ if (sc->device_flags[i] & CFWIDEB)
+ p->transinfo[i].user_offset = MAX_OFFSET_16BIT;
+ else
+ p->transinfo[i].user_offset = MAX_OFFSET_8BIT;
+ if (p->features & AHC_ULTRA)
+ {
+ short ultraenb;
+ ultraenb = aic_inb(p, ULTRA_ENB) |
+ (aic_inb(p, ULTRA_ENB + 1) << 8);
+ p->transinfo[i].user_period = aic7xxx_find_period(p,
+ scsirate,
+ (p->ultraenb & mask) ?
+ AHC_SYNCRATE_ULTRA :
+ AHC_SYNCRATE_FAST);
+ p->transinfo[i].cur_period = aic7xxx_find_period(p,
+ aic_inb(p, TARG_SCSIRATE + i),
+ (ultraenb & mask) ?
+ AHC_SYNCRATE_ULTRA :
+ AHC_SYNCRATE_FAST);
+ }
+ else
+ p->transinfo[i].user_period = aic7xxx_find_period(p,
+ scsirate, AHC_SYNCRATE_FAST);
+ }
+ p->needsdtr_copy |= mask;
+ }
+ if ( (sc->device_flags[i] & CFWIDEB) && (p->features & AHC_WIDE) )
+ {
+ p->transinfo[i].user_width = MSG_EXT_WDTR_BUS_16_BIT;
+ p->needwdtr_copy |= mask;
+ }
+ else
+ {
+ p->transinfo[i].user_width = MSG_EXT_WDTR_BUS_8_BIT;
+ p->needwdtr_copy &= ~mask;
+ }
+ p->transinfo[i].cur_width =
+ (aic_inb(p, TARG_SCSIRATE + i) & WIDEXFER) ?
+ MSG_EXT_WDTR_BUS_16_BIT : MSG_EXT_WDTR_BUS_8_BIT;
+ }
+ aic_outb(p, ~(p->discenable & 0xFF), DISC_DSB);
+ aic_outb(p, ~((p->discenable >> 8) & 0xFF), DISC_DSB + 1);
+ p->needwdtr = p->needwdtr_copy;
+ p->needsdtr = p->needsdtr_copy;
+ p->wdtr_pending = p->sdtr_pending = 0;
+
+ /*
+ * We set the p->ultraenb from the SEEPROM to begin with, but now we make
+ * it match what is already down in the card. If we are doing a reset
+ * on the card then this will get put back to a default state anyway.
+ * This allows us to not have to pre-emptively negotiate when using the
+ * no_reset option.
+ */
+ if (p->features & AHC_ULTRA)
+ p->ultraenb = aic_inb(p, ULTRA_ENB) | (aic_inb(p, ULTRA_ENB + 1) << 8);
+
+
+ scsi_conf = (p->scsi_id & HSCSIID);
+
+ if(have_seeprom)
+ {
+ p->adapter_control = sc->adapter_control;
+ p->bios_control = sc->bios_control;
+
+ switch (p->chip & AHC_CHIPID_MASK)
+ {
+ case AHC_AIC7895:
+ case AHC_AIC7896:
+ if (p->adapter_control & CFBPRIMARY)
+ p->flags |= AHC_CHANNEL_B_PRIMARY;
+ default:
+ break;
+ }
+
+ if (sc->adapter_control & CFSPARITY)
+ scsi_conf |= ENSPCHK;
+ }
+ else
+ {
+ scsi_conf |= ENSPCHK | RESET_SCSI;
+ }
+
+ /*
+ * Only set the SCSICONF and SCSICONF + 1 registers if we are a PCI card.
+ * The 2842 and 2742 cards already have these registers set and we don't
+ * want to muck with them since we don't set all the bits they do.
+ */
+ if ( (p->chip & ~AHC_CHIPID_MASK) == AHC_PCI )
+ {
+ /* Set the host ID */
+ aic_outb(p, scsi_conf, SCSICONF);
+ /* In case we are a wide card */
+ aic_outb(p, p->scsi_id, SCSICONF + 1);
+ }
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_detect
+ *
+ * Description:
+ * Try to detect and register an Adaptec 7770 or 7870 SCSI controller.
+ *
+ * XXX - This should really be called aic7xxx_probe(). A sequence of
+ * probe(), attach()/detach(), and init() makes more sense than
+ * one do-it-all function. This may be useful when (and if) the
+ * mid-level SCSI code is overhauled.
+ *-F*************************************************************************/
+int
+aic7xxx_detect(Scsi_Host_Template *template)
+{
+ struct aic7xxx_host *temp_p = NULL;
+ struct aic7xxx_host *current_p = NULL;
+ struct aic7xxx_host *list_p = NULL;
+ int found = 0;
+#if defined(__i386__) || defined(__alpha__)
+ ahc_flag_type flags = 0;
+ int type;
+#endif
+ unsigned char sxfrctl1;
+#if defined(__i386__) || defined(__alpha__)
+ unsigned char hcntrl, hostconf;
+ unsigned int slot, base;
+#endif
+
+#ifdef MODULE
+ /*
+ * If we are called as a module, the aic7xxx pointer may not be null
+ * and it would point to our bootup string, just like on the lilo
+ * command line. IF not NULL, then process this config string with
+ * aic7xxx_setup
+ */
+ if(aic7xxx)
+ aic7xxx_setup(aic7xxx, NULL);
+ if(dummy_buffer[0] != 'P')
+ printk(KERN_WARNING "aic7xxx: Please read the file /usr/src/linux/drivers"
+ "/scsi/README.aic7xxx\n"
+ "aic7xxx: to see the proper way to specify options to the aic7xxx "
+ "module\n"
+ "aic7xxx: Specifically, don't use any commas when passing arguments to\n"
+ "aic7xxx: insmod or else it might trash certain memory areas.\n");
+#endif
+
+ template->proc_dir = &proc_scsi_aic7xxx;
+ template->sg_tablesize = AIC7XXX_MAX_SG;
+
+
+#if defined(__i386__) || defined(__alpha__)
+ /*
+ * EISA/VL-bus card signature probe.
+ */
+ slot = MINSLOT;
+ while ( (slot <= MAXSLOT) && !(aic7xxx_no_probe) )
+ {
+ base = SLOTBASE(slot) + MINREG;
+
+ if (check_region(base, MAXREG - MINREG))
+ {
+ /*
+ * Some other driver has staked a
+ * claim to this i/o region already.
+ */
+ slot++;
+ continue; /* back to the beginning of the for loop */
+ }
+ flags = 0;
+ type = aic7xxx_probe(slot, base + AHC_HID0, &flags);
+ if (type == -1)
+ {
+ slot++;
+ continue;
+ }
+ temp_p = kmalloc(sizeof(struct aic7xxx_host), GFP_ATOMIC);
+ if (temp_p == NULL)
+ {
+ printk(KERN_WARNING "aic7xxx: Unable to allocate device space.\n");
+ slot++;
+ continue; /* back to the beginning of the while loop */
+ }
+ /*
+ * Pause the card preserving the IRQ type. Allow the operator
+ * to override the IRQ trigger.
+ */
+ if (aic7xxx_irq_trigger == 1)
+ hcntrl = IRQMS; /* Level */
+ else if (aic7xxx_irq_trigger == 0)
+ hcntrl = 0; /* Edge */
+ else
+ hcntrl = inb(base + HCNTRL) & IRQMS; /* Default */
+ memset(temp_p, 0, sizeof(struct aic7xxx_host));
+ temp_p->unpause = hcntrl | INTEN;
+ temp_p->pause = hcntrl | PAUSE | INTEN;
+ temp_p->base = base;
+ temp_p->mbase = 0;
+ temp_p->maddr = 0;
+ temp_p->pci_bus = 0;
+ temp_p->pci_device_fn = slot;
+ aic_outb(temp_p, hcntrl | PAUSE, HCNTRL);
+ while( (aic_inb(temp_p, HCNTRL) & PAUSE) == 0 ) ;
+ if (aic7xxx_chip_reset(temp_p) == -1)
+ temp_p->irq = 0;
+ else
+ temp_p->irq = aic_inb(temp_p, INTDEF) & 0x0F;
+ temp_p->flags |= AHC_PAGESCBS;
+
+ switch (temp_p->irq)
+ {
+ case 9:
+ case 10:
+ case 11:
+ case 12:
+ case 14:
+ case 15:
+ break;
+
+ default:
+ printk(KERN_WARNING "aic7xxx: Host adapter uses unsupported IRQ "
+ "level %d, ignoring.\n", temp_p->irq);
+ kfree(temp_p);
+ slot++;
+ continue; /* back to the beginning of the while loop */
+ }
+
+ /*
+ * We are commited now, everything has been checked and this card
+ * has been found, now we just set it up
+ */
+
+ /*
+ * Insert our new struct into the list at the end
+ */
+ if (list_p == NULL)
+ {
+ list_p = current_p = temp_p;
+ }
+ else
+ {
+ current_p = list_p;
+ while (current_p->next != NULL)
+ current_p = current_p->next;
+ current_p->next = temp_p;
+ }
+
+ switch (type)
+ {
+ case 0:
+ temp_p->board_name_index = 2;
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk("aic7xxx: <%s> at EISA %d\n",
+ board_names[2], slot);
+ /* FALLTHROUGH */
+ case 1:
+ {
+ temp_p->chip = AHC_AIC7770 | AHC_EISA;
+ temp_p->features |= AHC_AIC7770_FE;
+ temp_p->bios_control = aic_inb(temp_p, HA_274_BIOSCTRL);
+
+ /*
+ * Get the primary channel information. Right now we don't
+ * do anything with this, but someday we will be able to inform
+ * the mid-level SCSI code which channel is primary.
+ */
+ if (temp_p->board_name_index == 0)
+ {
+ temp_p->board_name_index = 3;
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk("aic7xxx: <%s> at EISA %d\n",
+ board_names[3], slot);
+ }
+ if (temp_p->bios_control & CHANNEL_B_PRIMARY)
+ {
+ temp_p->flags |= AHC_CHANNEL_B_PRIMARY;
+ }
+
+ if ((temp_p->bios_control & BIOSMODE) == BIOSDISABLED)
+ {
+ temp_p->flags &= ~AHC_BIOS_ENABLED;
+ }
+ else
+ {
+ temp_p->flags &= ~AHC_USEDEFAULTS;
+ temp_p->flags |= AHC_BIOS_ENABLED;
+ if ( (temp_p->bios_control & 0x20) == 0 )
+ {
+ temp_p->bios_address = 0xcc000;
+ temp_p->bios_address += (0x4000 * (temp_p->bios_control & 0x07));
+ }
+ else
+ {
+ temp_p->bios_address = 0xd0000;
+ temp_p->bios_address += (0x8000 * (temp_p->bios_control & 0x06));
+ }
+ }
+ temp_p->adapter_control = aic_inb(temp_p, SCSICONF) << 8;
+ temp_p->adapter_control |= aic_inb(temp_p, SCSICONF + 1);
+ if (temp_p->features & AHC_WIDE)
+ {
+ temp_p->scsi_id = temp_p->adapter_control & HWSCSIID;
+ temp_p->scsi_id_b = temp_p->scsi_id;
+ }
+ else
+ {
+ temp_p->scsi_id = (temp_p->adapter_control >> 8) & HSCSIID;
+ temp_p->scsi_id_b = temp_p->adapter_control & HSCSIID;
+ }
+ aic7xxx_load_seeprom(temp_p, &sxfrctl1);
+ break;
+ }
+
+ case 2:
+ case 3:
+ temp_p->chip = AHC_AIC7770 | AHC_VL;
+ temp_p->features |= AHC_AIC7770_FE;
+ if (type == 2)
+ temp_p->flags |= AHC_BIOS_ENABLED;
+ else
+ temp_p->flags &= ~AHC_BIOS_ENABLED;
+ if (aic_inb(temp_p, SCSICONF) & TERM_ENB)
+ sxfrctl1 = STPWEN;
+ aic7xxx_load_seeprom(temp_p, &sxfrctl1);
+ temp_p->board_name_index = 4;
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk("aic7xxx: <%s> at VLB %d\n",
+ board_names[2], slot);
+ switch( aic_inb(temp_p, STATUS_2840) & BIOS_SEL )
+ {
+ case 0x00:
+ temp_p->bios_address = 0xe0000;
+ break;
+ case 0x20:
+ temp_p->bios_address = 0xc8000;
+ break;
+ case 0x40:
+ temp_p->bios_address = 0xd0000;
+ break;
+ case 0x60:
+ temp_p->bios_address = 0xd8000;
+ break;
+ default:
+ break; /* can't get here */
+ }
+ break;
+
+ default: /* Won't get here. */
+ break;
+ }
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ {
+ printk(KERN_INFO "aic7xxx: BIOS %sabled, IO Port 0x%lx, IRQ %d (%s)\n",
+ (temp_p->flags & AHC_USEDEFAULTS) ? "dis" : "en", temp_p->base,
+ temp_p->irq,
+ (temp_p->pause & IRQMS) ? "level sensitive" : "edge triggered");
+ printk(KERN_INFO "aic7xxx: Extended translation %sabled.\n",
+ (temp_p->flags & AHC_EXTEND_TRANS_A) ? "en" : "dis");
+ }
+
+ /*
+ * Set the FIFO threshold and the bus off time.
+ */
+ hostconf = aic_inb(temp_p, HOSTCONF);
+ aic_outb(temp_p, hostconf & DFTHRSH, BUSSPD);
+ aic_outb(temp_p, (hostconf << 2) & BOFF, BUSTIME);
+ slot++;
+ found++;
+ }
+
+#endif /* defined(__i386__) || defined(__alpha__) */
+
+#ifdef CONFIG_PCI
+ /*
+ * PCI-bus probe.
+ */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,92)
+ if (pci_present())
+#else
+ if (pcibios_present())
+#endif
+ {
+ struct
+ {
+ unsigned short vendor_id;
+ unsigned short device_id;
+ ahc_chip chip;
+ ahc_flag_type flags;
+ ahc_feature features;
+ int board_name_index;
+ unsigned short seeprom_size;
+ unsigned short seeprom_type;
+ } const aic_pdevs[] = {
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7810, AHC_NONE,
+ AHC_FNONE, AHC_FENONE, 1,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7850, AHC_AIC7850,
+ AHC_PAGESCBS, AHC_AIC7850_FE, 5,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7855, AHC_AIC7850,
+ AHC_PAGESCBS, AHC_AIC7850_FE, 6,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7821, AHC_AIC7860,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7860_FE, 7,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_3860, AHC_AIC7860,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7860_FE, 7,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7860, AHC_AIC7860,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7860_FE, 7,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7861, AHC_AIC7860,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7860_FE, 8,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7870, AHC_AIC7870,
+ AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7870_FE, 9,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7871, AHC_AIC7870,
+ AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7870_FE, 10,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7872, AHC_AIC7870,
+ AHC_PAGESCBS | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
+ AHC_AIC7870_FE, 11,
+ 32, C56_66 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7873, AHC_AIC7870,
+ AHC_PAGESCBS | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
+ AHC_AIC7870_FE, 12,
+ 32, C56_66 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7874, AHC_AIC7870,
+ AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7870_FE, 13,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7880, AHC_AIC7880,
+ AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE, 14,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7881, AHC_AIC7880,
+ AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE, 15,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7882, AHC_AIC7880,
+ AHC_PAGESCBS | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
+ AHC_AIC7880_FE, 16,
+ 32, C56_66 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7883, AHC_AIC7880,
+ AHC_PAGESCBS | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
+ AHC_AIC7880_FE, 17,
+ 32, C56_66 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7884, AHC_AIC7880,
+ AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE, 18,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7885, AHC_AIC7880,
+ AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE, 18,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7886, AHC_AIC7880,
+ AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE, 18,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7887, AHC_AIC7880,
+ AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE, 18,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7888, AHC_AIC7880,
+ AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE, 18,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7895, AHC_AIC7895,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
+ AHC_AIC7895_FE, 19,
+ 32, C56_66 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7890, AHC_AIC7890,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7890_FE, 20,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7890B, AHC_AIC7890,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7890_FE, 20,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_2930U2, AHC_AIC7890,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7890_FE, 21,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_2940U2, AHC_AIC7890,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7890_FE, 22,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7896, AHC_AIC7896,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
+ AHC_AIC7896_FE, 23,
+ 32, C56_66 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_3940U2, AHC_AIC7896,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
+ AHC_AIC7896_FE, 24,
+ 32, C56_66 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_3950U2D, AHC_AIC7896,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL,
+ AHC_AIC7896_FE, 25,
+ 32, C56_66 },
+ {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_1480A, AHC_AIC7860,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7860_FE, 26,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7892A, AHC_AIC7892,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7892_FE, 27,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7892B, AHC_AIC7892,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7892_FE, 27,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7892D, AHC_AIC7892,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7892_FE, 27,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7892P, AHC_AIC7892,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7892_FE, 27,
+ 32, C46 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7899A, AHC_AIC7899,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7899_FE, 28,
+ 32, C56_66 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7899B, AHC_AIC7899,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7899_FE, 28,
+ 32, C56_66 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7899D, AHC_AIC7899,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7899_FE, 28,
+ 32, C56_66 },
+ {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7899P, AHC_AIC7899,
+ AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED,
+ AHC_AIC7899_FE, 28,
+ 32, C56_66 },
+ };
+
+ unsigned short command;
+ unsigned int devconfig, i, oldverbose;
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,92)
+ struct pci_dev *pdev = NULL;
+#else
+ int index;
+ unsigned int piobase, mmapbase;
+ unsigned char pci_bus, pci_devfn, pci_irq;
+#endif
+
+ for (i = 0; i < NUMBER(aic_pdevs); i++)
+ {
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,92)
+ pdev = NULL;
+ while ((pdev = pci_find_device(aic_pdevs[i].vendor_id,
+ aic_pdevs[i].device_id,
+ pdev)))
+#else
+ index = 0;
+ while (!(pcibios_find_device(aic_pdevs[i].vendor_id,
+ aic_pdevs[i].device_id,
+ index++, &pci_bus, &pci_devfn)) )
+#endif
+ {
+ if ( i == 0 ) /* We found one, but it's the 7810 RAID cont. */
+ {
+ if (aic7xxx_verbose & (VERBOSE_PROBE|VERBOSE_PROBE2))
+ {
+ printk(KERN_INFO "aic7xxx: The 7810 RAID controller is not "
+ "supported by\n");
+ printk(KERN_INFO " this driver, we are ignoring it.\n");
+ }
+ }
+ else if ( (temp_p = kmalloc(sizeof(struct aic7xxx_host),
+ GFP_ATOMIC)) != NULL )
+ {
+ memset(temp_p, 0, sizeof(struct aic7xxx_host));
+ temp_p->chip = aic_pdevs[i].chip | AHC_PCI;
+ temp_p->flags = aic_pdevs[i].flags;
+ temp_p->features = aic_pdevs[i].features;
+ temp_p->board_name_index = aic_pdevs[i].board_name_index;
+ temp_p->sc_size = aic_pdevs[i].seeprom_size;
+ temp_p->sc_type = aic_pdevs[i].seeprom_type;
+
+ /*
+ * Read sundry information from PCI BIOS.
+ */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,92)
+ temp_p->irq = pdev->irq;
+ temp_p->pdev = pdev;
+ temp_p->pci_bus = pdev->bus->number;
+ temp_p->pci_device_fn = pdev->devfn;
+ temp_p->base = pdev->base_address[0];
+ temp_p->mbase = pdev->base_address[1];
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk("aic7xxx: <%s> at PCI %d/%d\n",
+ board_names[aic_pdevs[i].board_name_index],
+ PCI_SLOT(temp_p->pdev->devfn),
+ PCI_FUNC(temp_p->pdev->devfn));
+ pci_read_config_word(pdev, PCI_COMMAND, &command);
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ {
+ printk("aic7xxx: Initial PCI_COMMAND value was 0x%x\n",
+ (int)command);
+ }
+#ifdef AIC7XXX_STRICT_PCI_SETUP
+ command |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY |
+ PCI_COMMAND_INVALIDATE | PCI_COMMAND_MASTER |
+ PCI_COMMAND_MEMORY | PCI_COMMAND_IO;
+#else
+ command |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO;
+#endif
+ if (aic7xxx_pci_parity == 0)
+ command &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
+ pci_write_config_word(pdev, PCI_COMMAND, command);
+#ifdef AIC7XXX_STRICT_PCI_SETUP
+ pci_read_config_dword(pdev, DEVCONFIG, &devconfig);
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ {
+ printk("aic7xxx: Initial DEVCONFIG value was 0x%x\n", devconfig);
+ }
+ devconfig |= 0x80000000;
+ if ((aic7xxx_pci_parity == 0) || (aic7xxx_pci_parity == -1))
+ {
+ devconfig &= ~(0x00000008);
+ }
+ else
+ {
+ devconfig |= 0x00000008;
+ }
+ pci_write_config_dword(pdev, DEVCONFIG, devconfig);
+#endif /* AIC7XXX_STRICT_PCI_SETUP */
+#else /* LINUX_VERSION_CODE > KERNEL_VERSION(2,1,92) */
+ temp_p->pci_bus = pci_bus;
+ temp_p->pci_device_fn = pci_devfn;
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk("aic7xxx: <%s> at PCI %d/%d\n",
+ board_names[aic_pdevs[i].board_name_index],
+ PCI_SLOT(temp_p->pci_device_fn),
+ PCI_FUNC(temp_p->pci_device_fn));
+ pcibios_read_config_byte(pci_bus, pci_devfn, PCI_INTERRUPT_LINE,
+ &pci_irq);
+ temp_p->irq = pci_irq;
+ pcibios_read_config_dword(pci_bus, pci_devfn, PCI_BASE_ADDRESS_0,
+ &piobase);
+ temp_p->base = piobase;
+ pcibios_read_config_dword(pci_bus, pci_devfn, PCI_BASE_ADDRESS_1,
+ &mmapbase);
+ temp_p->mbase = mmapbase;
+ pcibios_read_config_word(pci_bus, pci_devfn, PCI_COMMAND, &command);
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ {
+ printk("aic7xxx: Initial PCI_COMMAND value was 0x%x\n",
+ (int)command);
+ }
+#ifdef AIC7XXX_STRICT_PCI_SETUP
+ command |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY |
+ PCI_COMMAND_INVALIDATE | PCI_COMMAND_MASTER |
+ PCI_COMMAND_MEMORY | PCI_COMMAND_IO;
+#else
+ command |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO;
+#endif
+ if (aic7xxx_pci_parity == 0)
+ command &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
+ pcibios_write_config_word(pci_bus, pci_devfn, PCI_COMMAND, command);
+#ifdef AIC7XXX_STRICT_PCI_SETUP
+ pcibios_read_config_dword(pci_bus, pci_devfn, DEVCONFIG, &devconfig);
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ {
+ printk("aic7xxx: Initial DEVCONFIG value was 0x%x\n", devconfig);
+ }
+ devconfig |= 0x80000000;
+ if ((aic7xxx_pci_parity == 0) || (aic7xxx_pci_parity == -1))
+ {
+ devconfig &= ~(0x00000008);
+ }
+ else
+ {
+ devconfig |= 0x00000008;
+ }
+ pcibios_write_config_dword(pci_bus, pci_devfn, DEVCONFIG, devconfig);
+#endif /* AIC7XXX_STRICT_PCI_SETUP */
+#endif /* LINUIX_VERSION_CODE > KERNEL_VERSION(2,1,92) */
+
+ /*
+ * The first bit (LSB) of PCI_BASE_ADDRESS_0 is always set, so
+ * we mask it off.
+ */
+ temp_p->base &= PCI_BASE_ADDRESS_IO_MASK;
+ temp_p->mbase &= PCI_BASE_ADDRESS_MEM_MASK;
+ temp_p->unpause = INTEN;
+ temp_p->pause = temp_p->unpause | PAUSE;
+ if ( ((temp_p->base == 0) &&
+ (temp_p->mbase == 0)) ||
+ (temp_p->irq == 0) )
+ {
+ printk("aic7xxx: <%s> at PCI %d/%d\n",
+ board_names[aic_pdevs[i].board_name_index],
+ PCI_SLOT(temp_p->pci_device_fn),
+ PCI_FUNC(temp_p->pci_device_fn));
+ printk("aic7xxx: Controller disabled by BIOS, ignoring.\n");
+ kfree(temp_p);
+ temp_p = NULL;
+ continue;
+ }
+
+#ifdef MMAPIO
+ {
+ unsigned long page_offset, base;
+
+ base = temp_p->mbase & PAGE_MASK;
+ page_offset = temp_p->mbase - base;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,1,0)
+ temp_p->maddr = ioremap_nocache(base, page_offset + 256);
+#else
+ temp_p->maddr = vremap(base, page_offset + 256);
+#endif
+ if(temp_p->maddr)
+ {
+ temp_p->maddr += page_offset;
+ /*
+ * We need to check the I/O with the MMAPed address. Some machines
+ * simply fail to work with MMAPed I/O and certain controllers.
+ */
+ if(aic_inb(temp_p, HCNTRL) == 0xff)
+ {
+ /*
+ * OK.....we failed our test....go back to programmed I/O
+ */
+ printk(KERN_INFO "aic7xxx: <%s> at PCI %d/%d\n",
+ board_names[aic_pdevs[i].board_name_index],
+ PCI_SLOT(temp_p->pci_device_fn),
+ PCI_FUNC(temp_p->pci_device_fn));
+ printk(KERN_INFO "aic7xxx: MMAPed I/O failed, reverting to "
+ "Programmed I/O.\n");
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,0)
+ iounmap((void *) (((unsigned long) temp_p->maddr) & PAGE_MASK));
+#else
+ vfree((void *) (((unsigned long) temp_p->maddr) & PAGE_MASK));
+#endif
+ temp_p->maddr = 0;
+ }
+ }
+ }
+#endif
+
+ /*
+ * We HAVE to make sure the first pause_sequencer() and all other
+ * subsequent I/O that isn't PCI config space I/O takes place
+ * after the MMAPed I/O region is configured and tested. The
+ * problem is the PowerPC architecture that doesn't support
+ * programmed I/O at all, so we have to have the MMAP I/O set up
+ * for this pause to even work on those machines.
+ */
+ pause_sequencer(temp_p);
+
+ /*
+ * Clear out any pending PCI error status messages. Also set
+ * verbose to 0 so that we don't emit strange PCI error messages
+ * while cleaning out the current status bits.
+ */
+ oldverbose = aic7xxx_verbose;
+ aic7xxx_verbose = 0;
+ aic7xxx_pci_intr(temp_p);
+ aic7xxx_verbose = oldverbose;
+
+ temp_p->bios_address = 0;
+
+ /*
+ * Remember how the card was setup in case there is no seeprom.
+ */
+ if (temp_p->features & AHC_ULTRA2)
+ temp_p->scsi_id = aic_inb(temp_p, SCSIID_ULTRA2) & OID;
+ else
+ temp_p->scsi_id = aic_inb(temp_p, SCSIID) & OID;
+ /*
+ * Get current termination setting
+ */
+ sxfrctl1 = aic_inb(temp_p, SXFRCTL1) & STPWEN;
+
+ if (aic7xxx_chip_reset(temp_p) == -1)
+ {
+ kfree(temp_p);
+ temp_p = NULL;
+ continue;
+ }
+
+ /*
+ * We need to set the CHNL? assignments before loading the SEEPROM
+ * The 3940 and 3985 cards (original stuff, not any of the later
+ * stuff) are 7870 and 7880 class chips. The Ultra2 stuff falls
+ * under 7896 and 7897. The 7895 is in a class by itself :)
+ */
+ switch (temp_p->chip & AHC_CHIPID_MASK)
+ {
+ case AHC_AIC7870: /* 3840 / 3985 */
+ case AHC_AIC7880: /* 3840 UW / 3985 UW */
+ if(temp_p->flags & AHC_MULTI_CHANNEL)
+ {
+ switch(PCI_SLOT(temp_p->pci_device_fn))
+ {
+ case 5:
+ temp_p->flags |= AHC_CHNLB;
+ break;
+ case 8:
+ temp_p->flags |= AHC_CHNLB;
+ break;
+ case 12:
+ temp_p->flags |= AHC_CHNLC;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+
+ case AHC_AIC7895: /* 7895 */
+ case AHC_AIC7896: /* 7896/7 */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,92)
+ if (PCI_FUNC(temp_p->pdev->devfn) != 0)
+ {
+ temp_p->flags |= AHC_CHNLB;
+ }
+ /*
+ * The 7895 is the only chipset that sets the SCBSIZE32 param
+ * in the DEVCONFIG register. The Ultra2 chipsets use
+ * the DSCOMMAND0 register instead.
+ */
+ if ((temp_p->chip & AHC_CHIPID_MASK) == AHC_AIC7895)
+ {
+ pci_read_config_dword(pdev, DEVCONFIG, &devconfig);
+ devconfig |= SCBSIZE32;
+ pci_write_config_dword(pdev, DEVCONFIG, devconfig);
+ }
+#else
+ if (PCI_FUNC(temp_p->pci_device_fn) != 0)
+ {
+ temp_p->flags |= AHC_CHNLB;
+ }
+ /*
+ * The 7895 is the only chipset that sets the SCBSIZE32 param
+ * in the DEVCONFIG register. The Ultra2 chipsets use
+ * the DSCOMMAND0 register instead.
+ */
+ if ((temp_p->chip & AHC_CHIPID_MASK) == AHC_AIC7895)
+ {
+ pcibios_read_config_dword(pci_bus, pci_devfn, DEVCONFIG,
+ &devconfig);
+ devconfig |= SCBSIZE32;
+ pcibios_write_config_dword(pci_bus, pci_devfn, DEVCONFIG,
+ devconfig);
+ }
+#endif
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Loading of the SEEPROM needs to come after we've set the flags
+ * to indicate possible CHNLB and CHNLC assigments. Otherwise,
+ * on 394x and 398x cards we'll end up reading the wrong settings
+ * for channels B and C
+ */
+ switch (temp_p->chip & AHC_CHIPID_MASK)
+ {
+ case AHC_AIC7890:
+ case AHC_AIC7896:
+ aic_outb(temp_p, 0, SCAMCTL);
+ /*
+ * We used to set DPARCKEN in this register, but after talking
+ * to a tech from Adaptec, I found out they don't use that
+ * particular bit in their own register settings, and when you
+ * combine that with the fact that I determined that we were
+ * seeing Data-Path Parity Errors on things we shouldn't see
+ * them on, I think there is a bug in the silicon and the way
+ * to work around it is to disable this particular check. Also
+ * This bug only showed up on certain commands, so it seems to
+ * be pattern related or some such. The commands we would
+ * typically send as a linux TEST_UNIT_READY or INQUIRY command
+ * could cause it to be triggered, while regular commands that
+ * actually made reasonable use of the SG array capabilities
+ * seemed not to cause the problem.
+ */
+ /*
+ aic_outb(temp_p, aic_inb(temp_p, DSCOMMAND0) |
+ CACHETHEN | DPARCKEN | MPARCKEN |
+ USCBSIZE32 | CIOPARCKEN,
+ DSCOMMAND0);
+ */
+ aic_outb(temp_p, (aic_inb(temp_p, DSCOMMAND0) |
+ CACHETHEN | MPARCKEN | USCBSIZE32 |
+ CIOPARCKEN) & ~DPARCKEN, DSCOMMAND0);
+ aic7xxx_load_seeprom(temp_p, &sxfrctl1);
+ break;
+ case AHC_AIC7850:
+ case AHC_AIC7860:
+ /*
+ * Set the DSCOMMAND0 register on these cards different from
+ * on the 789x cards. Also, read the SEEPROM as well.
+ */
+ aic_outb(temp_p, (aic_inb(temp_p, DSCOMMAND0) |
+ CACHETHEN | MPARCKEN) & ~DPARCKEN,
+ DSCOMMAND0);
+ /* FALLTHROUGH */
+ default:
+ aic7xxx_load_seeprom(temp_p, &sxfrctl1);
+ break;
+ case AHC_AIC7880:
+ /*
+ * Check the rev of the chipset before we change DSCOMMAND0
+ */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,92)
+ pci_read_config_dword(pdev, DEVCONFIG, &devconfig);
+#else
+ pcibios_read_config_dword(pci_bus, pci_devfn, DEVCONFIG,
+ &devconfig);
+#endif
+ if ((devconfig & 0xff) >= 1)
+ {
+ aic_outb(temp_p, (aic_inb(temp_p, DSCOMMAND0) |
+ CACHETHEN | MPARCKEN) & ~DPARCKEN,
+ DSCOMMAND0);
+ }
+ aic7xxx_load_seeprom(temp_p, &sxfrctl1);
+ break;
+ }
+
+
+ /*
+ * and then we need another switch based on the type in order to
+ * make sure the channel B primary flag is set properly on 7895
+ * controllers....Arrrgggghhh!!! We also have to catch the fact
+ * that when you disable the BIOS on the 7895 on the Intel DK440LX
+ * motherboard, and possibly others, it only sets the BIOS disabled
+ * bit on the A channel...I think I'm starting to lean towards
+ * going postal....
+ */
+ switch(temp_p->chip & AHC_CHIPID_MASK)
+ {
+ case AHC_AIC7895:
+ case AHC_AIC7896:
+ current_p = list_p;
+ while(current_p != NULL)
+ {
+ if ( (current_p->pci_bus == temp_p->pci_bus) &&
+ (PCI_SLOT(current_p->pci_device_fn) ==
+ PCI_SLOT(temp_p->pci_device_fn)) )
+ {
+ if ( PCI_FUNC(current_p->pci_device_fn) == 0 )
+ {
+ temp_p->flags |=
+ (current_p->flags & AHC_CHANNEL_B_PRIMARY);
+ temp_p->flags &= ~(AHC_BIOS_ENABLED|AHC_USEDEFAULTS);
+ temp_p->flags |=
+ (current_p->flags & (AHC_BIOS_ENABLED|AHC_USEDEFAULTS));
+ }
+ else
+ {
+ current_p->flags |=
+ (temp_p->flags & AHC_CHANNEL_B_PRIMARY);
+ current_p->flags &= ~(AHC_BIOS_ENABLED|AHC_USEDEFAULTS);
+ current_p->flags |=
+ (temp_p->flags & (AHC_BIOS_ENABLED|AHC_USEDEFAULTS));
+ }
+ }
+ current_p = current_p->next;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * We only support external SCB RAM on the 7895/6/7 chipsets.
+ * We could support it on the 7890/1 easy enough, but I don't
+ * know of any 7890/1 based cards that have it. I do know
+ * of 7895/6/7 cards that have it and they work properly.
+ */
+ switch(temp_p->chip & AHC_CHIPID_MASK)
+ {
+ default:
+ break;
+ case AHC_AIC7895:
+ case AHC_AIC7896:
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,92)
+ pci_read_config_dword(pdev, DEVCONFIG, &devconfig);
+#else
+ pcibios_read_config_dword(pci_bus, pci_devfn, DEVCONFIG,
+ &devconfig);
+#endif
+ if (temp_p->features & AHC_ULTRA2)
+ {
+ if (aic_inb(temp_p, DSCOMMAND0) & RAMPSM_ULTRA2)
+ {
+ aic_outb(temp_p,
+ aic_inb(temp_p, DSCOMMAND0) & ~SCBRAMSEL_ULTRA2,
+ DSCOMMAND0);
+ temp_p->flags |= AHC_EXTERNAL_SRAM;
+ devconfig |= EXTSCBPEN;
+ }
+ }
+ else if (devconfig & RAMPSM)
+ {
+ devconfig &= ~SCBRAMSEL;
+ devconfig |= EXTSCBPEN;
+ temp_p->flags |= AHC_EXTERNAL_SRAM;
+ }
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,92)
+ pci_write_config_dword(pdev, DEVCONFIG, devconfig);
+#else
+ pcibios_write_config_dword(pci_bus, pci_devfn, DEVCONFIG,
+ devconfig);
+#endif
+ if ( (temp_p->flags & AHC_EXTERNAL_SRAM) &&
+ (temp_p->flags & AHC_CHNLB) )
+ aic_outb(temp_p, 1, CCSCBBADDR);
+ break;
+ }
+
+ /*
+ * Take the LED out of diagnostic mode
+ */
+ aic_outb(temp_p,
+ (aic_inb(temp_p, SBLKCTL) & ~(DIAGLEDEN | DIAGLEDON)),
+ SBLKCTL);
+
+ /*
+ * We don't know where this is set in the SEEPROM or by the
+ * BIOS, so we default to 100%. On Ultra2 controllers, use 75%
+ * instead.
+ */
+ if (temp_p->features & AHC_ULTRA2)
+ {
+ aic_outb(temp_p, RD_DFTHRSH_75 | WR_DFTHRSH_75, DFF_THRSH);
+ }
+ else
+ {
+ aic_outb(temp_p, DFTHRSH_100, DSPCISTATUS);
+ }
+
+ if ( list_p == NULL )
+ {
+ list_p = current_p = temp_p;
+ }
+ else
+ {
+ current_p = list_p;
+ while(current_p->next != NULL)
+ current_p = current_p->next;
+ current_p->next = temp_p;
+ }
+ temp_p->next = NULL;
+ found++;
+ } /* Found an Adaptec PCI device. */
+ else /* Well, we found one, but we couldn't get any memory */
+ {
+ printk("aic7xxx: Found <%s>\n",
+ board_names[aic_pdevs[i].board_name_index]);
+ printk(KERN_INFO "aic7xxx: Unable to allocate device memory, "
+ "skipping.\n");
+ }
+ } /* while(pdev=....) */
+ } /* for PCI_DEVICES */
+ } /* PCI BIOS present */
+#endif CONFIG_PCI
+ /*
+ * Now, we re-order the probed devices by BIOS address and BUS class.
+ * In general, we follow this algorithm to make the adapters show up
+ * in the same order under linux that the computer finds them.
+ * 1: All VLB/EISA cards with BIOS_ENABLED first, according to BIOS
+ * address, going from lowest to highest.
+ * 2: All PCI controllers with BIOS_ENABLED next, according to BIOS
+ * address, going from lowest to highest.
+ * 3: Remaining VLB/EISA controllers going in slot order.
+ * 4: Remaining PCI controllers, going in PCI device order (reversable)
+ */
+
+ {
+ struct aic7xxx_host *sort_list[4] = { NULL, NULL, NULL, NULL };
+ struct aic7xxx_host *vlb, *pci;
+ struct aic7xxx_host *prev_p;
+ struct aic7xxx_host *p;
+ unsigned char left;
+
+ prev_p = vlb = pci = NULL;
+
+ temp_p = list_p;
+ while (temp_p != NULL)
+ {
+ switch(temp_p->chip & ~AHC_CHIPID_MASK)
+ {
+ case AHC_EISA:
+ case AHC_VL:
+ {
+ p = temp_p;
+ if (p->flags & AHC_BIOS_ENABLED)
+ vlb = sort_list[0];
+ else
+ vlb = sort_list[2];
+
+ if (vlb == NULL)
+ {
+ vlb = temp_p;
+ temp_p = temp_p->next;
+ vlb->next = NULL;
+ }
+ else
+ {
+ current_p = vlb;
+ prev_p = NULL;
+ while ( (current_p != NULL) &&
+ (current_p->bios_address < temp_p->bios_address))
+ {
+ prev_p = current_p;
+ current_p = current_p->next;
+ }
+ if (prev_p != NULL)
+ {
+ prev_p->next = temp_p;
+ temp_p = temp_p->next;
+ prev_p->next->next = current_p;
+ }
+ else
+ {
+ vlb = temp_p;
+ temp_p = temp_p->next;
+ vlb->next = current_p;
+ }
+ }
+
+ if (p->flags & AHC_BIOS_ENABLED)
+ sort_list[0] = vlb;
+ else
+ sort_list[2] = vlb;
+
+ break;
+ }
+ default: /* All PCI controllers fall through to default */
+ {
+
+ p = temp_p;
+ if (p->flags & AHC_BIOS_ENABLED)
+ pci = sort_list[1];
+ else
+ pci = sort_list[3];
+
+ if (pci == NULL)
+ {
+ pci = temp_p;
+ temp_p = temp_p->next;
+ pci->next = NULL;
+ }
+ else
+ {
+ current_p = pci;
+ prev_p = NULL;
+ if (!aic7xxx_reverse_scan)
+ {
+ while ( (current_p != NULL) &&
+ ( (PCI_SLOT(current_p->pci_device_fn) |
+ (current_p->pci_bus << 8)) <
+ (PCI_SLOT(temp_p->pci_device_fn) |
+ (temp_p->pci_bus << 8)) ) )
+ {
+ prev_p = current_p;
+ current_p = current_p->next;
+ }
+ }
+ else
+ {
+ while ( (current_p != NULL) &&
+ ( (PCI_SLOT(current_p->pci_device_fn) |
+ (current_p->pci_bus << 8)) >
+ (PCI_SLOT(temp_p->pci_device_fn) |
+ (temp_p->pci_bus << 8)) ) )
+ {
+ prev_p = current_p;
+ current_p = current_p->next;
+ }
+ }
+ /*
+ * Are we dealing with a 7985 where we need to sort the
+ * channels as well, if so, the bios_address values should
+ * be the same
+ */
+ if ( (current_p) && (temp_p->flags & AHC_MULTI_CHANNEL) &&
+ (temp_p->pci_bus == current_p->pci_bus) &&
+ (PCI_SLOT(temp_p->pci_device_fn) ==
+ PCI_SLOT(current_p->pci_device_fn)) )
+ {
+ if (temp_p->flags & AHC_CHNLB)
+ {
+ if ( !(temp_p->flags & AHC_CHANNEL_B_PRIMARY) )
+ {
+ prev_p = current_p;
+ current_p = current_p->next;
+ }
+ }
+ else
+ {
+ if (temp_p->flags & AHC_CHANNEL_B_PRIMARY)
+ {
+ prev_p = current_p;
+ current_p = current_p->next;
+ }
+ }
+ }
+ if (prev_p != NULL)
+ {
+ prev_p->next = temp_p;
+ temp_p = temp_p->next;
+ prev_p->next->next = current_p;
+ }
+ else
+ {
+ pci = temp_p;
+ temp_p = temp_p->next;
+ pci->next = current_p;
+ }
+ }
+
+ if (p->flags & AHC_BIOS_ENABLED)
+ sort_list[1] = pci;
+ else
+ sort_list[3] = pci;
+
+ break;
+ }
+ } /* End of switch(temp_p->type) */
+ } /* End of while (temp_p != NULL) */
+ /*
+ * At this point, the cards have been broken into 4 sorted lists, now
+ * we run through the lists in order and register each controller
+ */
+ {
+ int i;
+
+ left = found;
+ for (i=0; i<NUMBER(sort_list); i++)
+ {
+ temp_p = sort_list[i];
+ while(temp_p != NULL)
+ {
+ template->name = board_names[temp_p->board_name_index];
+ p = aic7xxx_alloc(template, temp_p);
+ if (p != NULL)
+ {
+ p->instance = found - left;
+ if (aic7xxx_register(template, p, (--left)) == 0)
+ {
+ found--;
+ aic7xxx_release(p->host);
+ scsi_unregister(p->host);
+ }
+ else if (aic7xxx_dump_card)
+ {
+ pause_sequencer(p);
+ aic7xxx_print_card(p);
+ aic7xxx_print_scratch_ram(p);
+ unpause_sequencer(p, TRUE);
+ }
+ }
+ current_p = temp_p;
+ temp_p = (struct aic7xxx_host *)temp_p->next;
+ kfree(current_p);
+ }
+ }
+ }
+ }
+ return (found);
+}
+
+#ifdef AIC7XXX_FAKE_NEGOTIATION_CMDS
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_negotiation_complete
+ *
+ * Description:
+ * Handle completion events for our Negotiation commands. Clear out the
+ * struct and get it ready for its next use.
+ *-F*************************************************************************/
+static void
+aic7xxx_negotiation_complete(Scsi_Cmnd *cmd)
+{
+ return;
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_build_negotiation_command
+ *
+ * Description:
+ * Build a Scsi_Cmnd structure to perform negotiation with or else send
+ * a pre-built command specifically for this purpose.
+ *-F*************************************************************************/
+static void
+aic7xxx_build_negotiation_cmnd(struct aic7xxx_host *p, Scsi_Cmnd *old_cmd,
+ int tindex)
+{
+
+ if ( (p->needwdtr & (1<<tindex)) && !(p->wdtr_pending & (1<<tindex)) )
+ {
+ if(p->dev_wdtr_cmnd[tindex] == NULL)
+ {
+ Scsi_Cmnd *cmd;
+
+ if (!(p->dev_wdtr_cmnd[tindex] = kmalloc(sizeof(Scsi_Cmnd), GFP_ATOMIC)) )
+ {
+ return;
+ }
+ cmd = p->dev_wdtr_cmnd[tindex];
+ memset(cmd, 0, sizeof(Scsi_Cmnd));
+ memcpy(cmd, old_cmd, sizeof(Scsi_Cmnd));
+ memset(&cmd->cmnd[0], 0, sizeof(cmd->cmnd));
+ memset(&cmd->data_cmnd[0], 0, sizeof(cmd->data_cmnd));
+ cmd->lun = 0;
+ cmd->request_bufflen = 0;
+ cmd->request_buffer = NULL;
+ cmd->use_sg = cmd->old_use_sg = cmd->sglist_len = 0;
+ cmd->bufflen = 0;
+ cmd->buffer = NULL;
+ cmd->underflow = 0;
+ cmd->cmd_len = 6;
+ }
+ /*
+ * Before sending this thing out, we also amke the cmd->next pointer
+ * point to the real command so we can stuff any possible SENSE data
+ * intp the real command instead of this fake command. This has to be
+ * done each time the command is built, not just the first time, hence
+ * it's outside of the above if()...
+ */
+ p->dev_wdtr_cmnd[tindex]->next = old_cmd;
+ aic7xxx_queue(p->dev_wdtr_cmnd[tindex],
+ aic7xxx_negotiation_complete);
+ }
+ else if ( (p->needsdtr & (1<<tindex)) && !(p->sdtr_pending & (1<<tindex)) &&
+ !(p->wdtr_pending & (1<<tindex)) )
+ {
+ if(p->dev_sdtr_cmnd[tindex] == NULL)
+ {
+ Scsi_Cmnd *cmd;
+
+ if (!(p->dev_sdtr_cmnd[tindex] = kmalloc(sizeof(Scsi_Cmnd), GFP_ATOMIC)) )
+ {
+ return;
+ }
+ cmd = p->dev_sdtr_cmnd[tindex];
+ memset(cmd, 0, sizeof(Scsi_Cmnd));
+ memcpy(cmd, old_cmd, sizeof(Scsi_Cmnd));
+ memset(&cmd->cmnd[0], 0, sizeof(cmd->cmnd));
+ memset(&cmd->data_cmnd[0], 0, sizeof(cmd->data_cmnd));
+ cmd->lun = 0;
+ cmd->request_bufflen = 0;
+ cmd->request_buffer = NULL;
+ cmd->use_sg = cmd->old_use_sg = cmd->sglist_len = 0;
+ cmd->bufflen = 0;
+ cmd->buffer = NULL;
+ cmd->underflow = 0;
+ cmd->cmd_len = 6;
+ }
+ /*
+ * Before sending this thing out, we also amke the cmd->next pointer
+ * point to the real command so we can stuff any possible SENSE data
+ * intp the real command instead of this fake command. This has to be
+ * done each time the command is built, not just the first time, hence
+ * it's outside of the above if()...
+ */
+ p->dev_sdtr_cmnd[tindex]->next = old_cmd;
+ aic7xxx_queue(p->dev_sdtr_cmnd[tindex],
+ aic7xxx_negotiation_complete);
+ }
+}
+
+#endif
+
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_print_scb
+ *
+ * Description:
+ * Dump the byte codes for an about to be sent SCB.
+ *-F*************************************************************************/
+static void
+aic7xxx_print_scb(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
+{
+ int i;
+ unsigned char *x;
+
+ x = (unsigned char *)&scb->hscb->control;
+
+ for(i=0; i<32; i++)
+ {
+ printk("%02x ", x[i]);
+ }
+ printk("\n");
+}
+#endif
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_buildscb
+ *
+ * Description:
+ * Build a SCB.
+ *-F*************************************************************************/
+static void
+aic7xxx_buildscb(struct aic7xxx_host *p, Scsi_Cmnd *cmd,
+ struct aic7xxx_scb *scb)
+{
+ unsigned short mask;
+ struct aic7xxx_hwscb *hscb;
+
+ mask = (0x01 << TARGET_INDEX(cmd));
+ hscb = scb->hscb;
+
+ /*
+ * Setup the control byte if we need negotiation and have not
+ * already requested it.
+ */
+ hscb->control = 0;
+ scb->tag_action = 0;
+ if (p->discenable & mask)
+ {
+ hscb->control |= DISCENB;
+ if (p->tagenable & mask)
+ {
+ cmd->tag = hscb->tag;
+ p->dev_commands_sent[TARGET_INDEX(cmd)]++;
+ if (p->dev_commands_sent[TARGET_INDEX(cmd)] < 200)
+ {
+ hscb->control |= MSG_SIMPLE_Q_TAG;
+ scb->tag_action = MSG_SIMPLE_Q_TAG;
+ }
+ else
+ {
+ if (p->orderedtag & mask)
+ {
+ hscb->control |= MSG_ORDERED_Q_TAG;
+ scb->tag_action = MSG_ORDERED_Q_TAG;
+ }
+ else
+ {
+ hscb->control |= MSG_SIMPLE_Q_TAG;
+ scb->tag_action = MSG_SIMPLE_Q_TAG;
+ }
+ p->dev_commands_sent[TARGET_INDEX(cmd)] = 0;
+ }
+ }
+ }
+ if (p->dev_flags[TARGET_INDEX(cmd)] & DEVICE_SCANNED)
+ {
+#ifdef AIC7XXX_FAKE_NEGOTIATION_CMDS
+ if ( (p->needwdtr & mask) && !(p->wdtr_pending & mask) )
+ {
+ if (cmd == p->dev_wdtr_cmnd[TARGET_INDEX(cmd)])
+ {
+ p->wdtr_pending |= mask;
+ scb->flags |= SCB_MSGOUT_WDTR;
+ hscb->control &= DISCENB;
+ hscb->control |= MK_MESSAGE;
+ scb->tag_action = 0;
+ }
+ else
+ {
+ aic7xxx_build_negotiation_cmnd(p, cmd, TARGET_INDEX(cmd));
+ }
+ }
+ else if ( (p->needsdtr & mask) && !(p->sdtr_pending & mask) &&
+ !(p->wdtr_pending & mask) )
+ {
+ if (cmd == p->dev_sdtr_cmnd[TARGET_INDEX(cmd)])
+ {
+ p->sdtr_pending |= mask;
+ scb->flags |= SCB_MSGOUT_SDTR;
+ hscb->control &= DISCENB;
+ hscb->control |= MK_MESSAGE;
+ scb->tag_action = 0;
+ }
+ else if (cmd != p->dev_wdtr_cmnd[TARGET_INDEX(cmd)])
+ {
+ aic7xxx_build_negotiation_cmnd(p, cmd, TARGET_INDEX(cmd));
+ }
+ }
+#else
+ if ( (p->needwdtr & mask) && !(p->wdtr_pending & mask) &&
+ !(p->sdtr_pending & mask) && (cmd->lun == 0) )
+ {
+ p->wdtr_pending |= mask;
+ scb->flags |= SCB_MSGOUT_WDTR;
+ hscb->control &= DISCENB;
+ hscb->control |= MK_MESSAGE;
+ scb->tag_action = 0;
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if (aic7xxx_verbose > 0xffff)
+ printk(INFO_LEAD "Building WDTR command.\n", p->host_no,
+ CTL_OF_CMD(cmd));
+#endif
+ }
+ else if ( (p->needsdtr & mask) && !(p->wdtr_pending & mask) &&
+ !(p->sdtr_pending & mask) && (cmd->lun == 0) )
+ {
+ p->sdtr_pending |= mask;
+ scb->flags |= SCB_MSGOUT_SDTR;
+ hscb->control &= DISCENB;
+ hscb->control |= MK_MESSAGE;
+ scb->tag_action = 0;
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if (aic7xxx_verbose > 0xffff)
+ printk(INFO_LEAD "Building SDTR command.\n", p->host_no,
+ CTL_OF_CMD(cmd));
+#endif
+ }
+#endif
+ }
+ hscb->target_channel_lun = ((cmd->target << 4) & 0xF0) |
+ ((cmd->channel & 0x01) << 3) | (cmd->lun & 0x07);
+
+ /*
+ * The interpretation of request_buffer and request_bufflen
+ * changes depending on whether or not use_sg is zero; a
+ * non-zero use_sg indicates the number of elements in the
+ * scatter-gather array.
+ */
+
+ /*
+ * XXX - this relies on the host data being stored in a
+ * little-endian format.
+ */
+ hscb->SCSI_cmd_length = cmd->cmd_len;
+ hscb->SCSI_cmd_pointer = cpu_to_le32(VIRT_TO_BUS(cmd->cmnd));
+
+ if (cmd->use_sg)
+ {
+ struct scatterlist *sg; /* Must be mid-level SCSI code scatterlist */
+
+ /*
+ * We must build an SG list in adapter format, as the kernel's SG list
+ * cannot be used directly because of data field size (__alpha__)
+ * differences and the kernel SG list uses virtual addresses where
+ * we need physical addresses.
+ */
+ int i;
+
+ sg = (struct scatterlist *)cmd->request_buffer;
+ scb->sg_length = 0;
+ /*
+ * Copy the segments into the SG array. NOTE!!! - We used to
+ * have the first entry both in the data_pointer area and the first
+ * SG element. That has changed somewhat. We still have the first
+ * entry in both places, but now we download the address of
+ * scb->sg_list[1] instead of 0 to the sg pointer in the hscb.
+ */
+ for (i = 0; i < cmd->use_sg; i++)
+ {
+ scb->sg_list[i].address = cpu_to_le32(VIRT_TO_BUS(sg[i].address));
+ scb->sg_list[i].length = cpu_to_le32(sg[i].length);
+ scb->sg_length += sg[i].length;
+ }
+ /* Copy the first SG into the data pointer area. */
+ hscb->data_pointer = scb->sg_list[0].address;
+ hscb->data_count = scb->sg_list[0].length;
+ scb->sg_count = cmd->use_sg;
+ hscb->SG_segment_count = cmd->use_sg;
+ hscb->SG_list_pointer = cpu_to_le32(VIRT_TO_BUS(&scb->sg_list[1]));
+
+ }
+ else
+ {
+ if (cmd->request_bufflen)
+ {
+ scb->sg_count = 1;
+ scb->sg_list[0].address = cpu_to_le32(VIRT_TO_BUS(cmd->request_buffer));
+ scb->sg_list[0].length = cpu_to_le32(cmd->request_bufflen);
+ scb->sg_length = cmd->request_bufflen;
+ hscb->SG_segment_count = 1;
+ hscb->SG_list_pointer = cpu_to_le32(VIRT_TO_BUS(&scb->sg_list[0]));
+ hscb->data_count = scb->sg_list[0].length;
+ hscb->data_pointer = scb->sg_list[0].address;
+ }
+ else
+ {
+ scb->sg_count = 0;
+ scb->sg_length = 0;
+ hscb->SG_segment_count = 0;
+ hscb->SG_list_pointer = 0;
+ hscb->data_count = 0;
+ hscb->data_pointer = 0;
+ }
+ }
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if((cmd->cmnd[0] == TEST_UNIT_READY) && (aic7xxx_verbose & VERBOSE_PROBE2))
+ {
+ aic7xxx_print_scb(p, scb);
+ }
+#endif
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_queue
+ *
+ * Description:
+ * Queue a SCB to the controller.
+ *-F*************************************************************************/
+int
+aic7xxx_queue(Scsi_Cmnd *cmd, void (*fn)(Scsi_Cmnd *))
+{
+ struct aic7xxx_host *p;
+ struct aic7xxx_scb *scb;
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ int tindex = TARGET_INDEX(cmd);
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
+ unsigned long cpu_flags = 0;
+#endif
+
+ p = (struct aic7xxx_host *) cmd->host->hostdata;
+ /*
+ * Check to see if channel was scanned.
+ */
+
+#ifdef AIC7XXX_VERBOSE_DEBUGGING
+ if (!(p->flags & AHC_A_SCANNED) && (cmd->channel == 0))
+ {
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk(INFO_LEAD "Scanning channel for devices.\n",
+ p->host_no, 0, -1, -1);
+ p->flags |= AHC_A_SCANNED;
+ }
+ else
+ {
+ if (!(p->flags & AHC_B_SCANNED) && (cmd->channel == 1))
+ {
+ if (aic7xxx_verbose & VERBOSE_PROBE2)
+ printk(INFO_LEAD "Scanning channel for devices.\n",
+ p->host_no, 1, -1, -1);
+ p->flags |= AHC_B_SCANNED;
+ }
+ }
+
+ if (p->dev_active_cmds[tindex] > (cmd->device->queue_depth + 1))
+ {
+ printk(WARN_LEAD "Commands queued exceeds queue "
+ "depth, active=%d\n",
+ p->host_no, CTL_OF_CMD(cmd),
+ p->dev_active_cmds[tindex]);
+ if ( p->dev_active_cmds[tindex] > 220 )
+ p->dev_active_cmds[tindex] = 0;
+ }
+#endif
+
+ scb = scbq_remove_head(&p->scb_data->free_scbs);
+ if (scb == NULL)
+ {
+ DRIVER_LOCK
+ aic7xxx_allocate_scb(p);
+ DRIVER_UNLOCK
+ scb = scbq_remove_head(&p->scb_data->free_scbs);
+ }
+ if (scb == NULL)
+ {
+ printk(WARN_LEAD "Couldn't get a free SCB.\n", p->host_no,
+ CTL_OF_CMD(cmd));
+ cmd->result = (DID_BUS_BUSY << 16);
+ DRIVER_LOCK
+ aic7xxx_queue_cmd_complete(p, cmd);
+ DRIVER_UNLOCK
+ return 0;
+ }
+ else
+ {
+ scb->cmd = cmd;
+ aic7xxx_position(cmd) = scb->hscb->tag;
+
+ /*
+ * Construct the SCB beforehand, so the sequencer is
+ * paused a minimal amount of time.
+ */
+ aic7xxx_buildscb(p, cmd, scb);
+
+ /*
+ * Make sure the Scsi_Cmnd pointer is saved, the struct it points to
+ * is set up properly, and the parity error flag is reset, then send
+ * the SCB to the sequencer and watch the fun begin.
+ */
+ cmd->scsi_done = fn;
+ cmd->result = DID_OK;
+ memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
+ aic7xxx_error(cmd) = DID_OK;
+ aic7xxx_status(cmd) = 0;
+ cmd->host_scribble = NULL;
+
+ scb->flags |= SCB_ACTIVE | SCB_WAITINGQ;
+
+ DRIVER_LOCK
+ scbq_insert_tail(&p->waiting_scbs, scb);
+ if ( (p->flags & (AHC_IN_ISR | AHC_IN_ABORT | AHC_IN_RESET)) == 0)
+ {
+ aic7xxx_run_waiting_queues(p);
+ }
+ DRIVER_UNLOCK
+ }
+ return (0);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_bus_device_reset
+ *
+ * Description:
+ * Abort or reset the current SCSI command(s). If the scb has not
+ * previously been aborted, then we attempt to send a BUS_DEVICE_RESET
+ * message to the target. If the scb has previously been unsuccessfully
+ * aborted, then we will reset the channel and have all devices renegotiate.
+ * Returns an enumerated type that indicates the status of the operation.
+ *-F*************************************************************************/
+static int
+aic7xxx_bus_device_reset(struct aic7xxx_host *p, Scsi_Cmnd *cmd)
+{
+ struct aic7xxx_scb *scb;
+ struct aic7xxx_hwscb *hscb;
+ int result = -1;
+ int channel;
+ unsigned char saved_scbptr, lastphase;
+ unsigned char hscb_index;
+ int disconnected;
+
+ scb = (p->scb_data->scb_array[aic7xxx_position(cmd)]);
+ hscb = scb->hscb;
+
+ lastphase = aic_inb(p, LASTPHASE);
+ if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
+ {
+ printk(INFO_LEAD "Bus Device reset, scb flags 0x%x, ",
+ p->host_no, CTL_OF_SCB(scb), scb->flags);
+ switch (lastphase)
+ {
+ case P_DATAOUT:
+ printk("Data-Out phase\n");
+ break;
+ case P_DATAIN:
+ printk("Data-In phase\n");
+ break;
+ case P_COMMAND:
+ printk("Command phase\n");
+ break;
+ case P_MESGOUT:
+ printk("Message-Out phase\n");
+ break;
+ case P_STATUS:
+ printk("Status phase\n");
+ break;
+ case P_MESGIN:
+ printk("Message-In phase\n");
+ break;
+ default:
+ /*
+ * We're not in a valid phase, so assume we're idle.
+ */
+ printk("while idle, LASTPHASE = 0x%x\n", lastphase);
+ break;
+ }
+ printk(INFO_LEAD "SCSISIGI 0x%x, SEQADDR 0x%x, SSTAT0 0x%x, SSTAT1 "
+ "0x%x\n", p->host_no, CTL_OF_SCB(scb),
+ aic_inb(p, SCSISIGI),
+ aic_inb(p, SEQADDR0) | (aic_inb(p, SEQADDR1) << 8),
+ aic_inb(p, SSTAT0), aic_inb(p, SSTAT1));
+ }
+
+ channel = cmd->channel;
+
+ /*
+ * Send a Device Reset Message:
+ * The target that is holding up the bus may not be the same as
+ * the one that triggered this timeout (different commands have
+ * different timeout lengths). Our strategy here is to queue an
+ * abort message to the timed out target if it is disconnected.
+ * Otherwise, if we have an active target we stuff the message buffer
+ * with an abort message and assert ATN in the hopes that the target
+ * will let go of the bus and go to the mesgout phase. If this
+ * fails, we'll get another timeout a few seconds later which will
+ * attempt a bus reset.
+ */
+ saved_scbptr = aic_inb(p, SCBPTR);
+ disconnected = FALSE;
+
+ if (lastphase != P_BUSFREE)
+ {
+ if (aic_inb(p, SCB_TAG) >= p->scb_data->numscbs)
+ {
+ printk(WARN_LEAD "Invalid SCB ID %d is active, "
+ "SCB flags = 0x%x.\n", p->host_no,
+ CTL_OF_CMD(cmd), scb->hscb->tag, scb->flags);
+ return(SCSI_RESET_ERROR);
+ }
+ if (scb->hscb->tag == aic_inb(p, SCB_TAG))
+ {
+ if ( (lastphase != P_MESGOUT) && (lastphase != P_MESGIN) )
+ {
+ if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
+ printk(INFO_LEAD "Device reset message in "
+ "message buffer\n", p->host_no, CTL_OF_SCB(scb));
+ scb->flags |= SCB_RESET | SCB_DEVICE_RESET;
+ aic7xxx_error(scb->cmd) = DID_RESET;
+ p->dev_flags[TARGET_INDEX(scb->cmd)] |=
+ BUS_DEVICE_RESET_PENDING;
+ /* Send the abort message to the active SCB. */
+ aic_outb(p, HOST_MSG, MSG_OUT);
+ aic_outb(p, lastphase | ATNO, SCSISIGO);
+ return(SCSI_RESET_PENDING);
+ }
+ else
+ {
+ /* We want to send out the message, but it could screw an already */
+ /* in place and being used message. Instead, we return an error */
+ /* to try and start the bus reset phase since this command is */
+ /* probably hung (aborts failed, and now reset is failing). We */
+ /* also make sure to set BUS_DEVICE_RESET_PENDING so we won't try */
+ /* any more on this device, but instead will escalate to a bus or */
+ /* host reset (additionally, we won't try to abort any more). */
+ printk(WARN_LEAD "Device reset, Message buffer "
+ "in use\n", p->host_no, CTL_OF_SCB(scb));
+ scb->flags |= SCB_RESET | SCB_DEVICE_RESET;
+ aic7xxx_error(scb->cmd) = DID_RESET;
+ p->dev_flags[TARGET_INDEX(scb->cmd)] |=
+ BUS_DEVICE_RESET_PENDING;
+ return(SCSI_RESET_ERROR);
+ }
+ }
+ } /* if (last_phase != P_BUSFREE).....indicates we are idle and can work */
+ hscb_index = aic7xxx_find_scb(p, scb);
+ if (hscb_index == SCB_LIST_NULL)
+ {
+ disconnected = (aic7xxx_scb_on_qoutfifo(p, scb)) ? FALSE : TRUE;
+ }
+ else
+ {
+ aic_outb(p, hscb_index, SCBPTR);
+ if (aic_inb(p, SCB_CONTROL) & DISCONNECTED)
+ {
+ disconnected = TRUE;
+ }
+ }
+ if (disconnected)
+ {
+ /*
+ * Simply set the MK_MESSAGE flag and the SEQINT handler will do
+ * the rest on a reconnect.
+ */
+ scb->hscb->control |= MK_MESSAGE;
+ scb->flags |= SCB_RESET | SCB_DEVICE_RESET;
+ p->dev_flags[TARGET_INDEX(scb->cmd)] |=
+ BUS_DEVICE_RESET_PENDING;
+ if (hscb_index != SCB_LIST_NULL)
+ {
+ unsigned char scb_control;
+
+ aic_outb(p, hscb_index, SCBPTR);
+ scb_control = aic_inb(p, SCB_CONTROL);
+ aic_outb(p, scb_control | MK_MESSAGE, SCB_CONTROL);
+ }
+ /*
+ * Actually requeue this SCB in case we can select the
+ * device before it reconnects. If the transaction we
+ * want to abort is not tagged, then this will be the only
+ * outstanding command and we can simply shove it on the
+ * qoutfifo and be done. If it is tagged, then it goes right
+ * in with all the others, no problem :) We need to add it
+ * to the qinfifo and let the sequencer know it is there.
+ * Now, the only problem left to deal with is, *IF* this
+ * command completes, in spite of the MK_MESSAGE bit in the
+ * control byte, then we need to pick that up in the interrupt
+ * routine and clean things up. This *shouldn't* ever happen.
+ */
+ if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
+ printk(INFO_LEAD "Queueing device reset "
+ "command.\n", p->host_no, CTL_OF_SCB(scb));
+ p->qinfifo[p->qinfifonext++] = scb->hscb->tag;
+ if (p->features & AHC_QUEUE_REGS)
+ aic_outb(p, p->qinfifonext, HNSCB_QOFF);
+ else
+ aic_outb(p, p->qinfifonext, KERNEL_QINPOS);
+ scb->flags |= SCB_QUEUED_ABORT;
+ result = SCSI_RESET_PENDING;
+ }
+ else if (result == -1)
+ {
+ result = SCSI_RESET_ERROR;
+ }
+ aic_outb(p, saved_scbptr, SCBPTR);
+ return (result);
+}
+
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_panic_abort
+ *
+ * Description:
+ * Abort the current SCSI command(s).
+ *-F*************************************************************************/
+void
+aic7xxx_panic_abort(struct aic7xxx_host *p, Scsi_Cmnd *cmd)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,0)
+ int i, mask, found, need_tag;
+ struct aic7xxx_scb *scb;
+ unsigned char qinpos, hscbp;
+
+ found = FALSE;
+#endif
+
+ printk("aic7xxx driver version %s/%s\n", AIC7XXX_C_VERSION,
+ UTS_RELEASE);
+ printk("Controller type:\n %s\n", board_names[p->board_name_index]);
+ printk("p->flags=0x%x, p->chip=0x%x, p->features=0x%x, "
+ "sequencer %s paused\n",
+ p->flags, p->chip, p->features,
+ (aic_inb(p, HCNTRL) & PAUSE) ? "is" : "isn't" );
+ pause_sequencer(p);
+ disable_irq(p->irq);
+ aic7xxx_print_card(p);
+ aic7xxx_print_scratch_ram(p);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,0)
+ for(i=0; i<MAX_TARGETS; i++)
+ {
+ if(p->dev_flags[i] & DEVICE_PRESENT)
+ {
+ mask = (0x01 << i);
+ printk(INFO_LEAD "dev_flags=0x%x, WDTR:%c/%c/%c, SDTR:%c/%c/%c,"
+ " q_depth=%d:%d\n",
+ p->host_no, 0, i, 0, p->dev_flags[i],
+ (p->wdtr_pending & mask) ? 'Y' : 'N',
+ (p->needwdtr & mask) ? 'Y' : 'N',
+ (p->needwdtr_copy & mask) ? 'Y' : 'N',
+ (p->sdtr_pending & mask) ? 'Y' : 'N',
+ (p->needsdtr & mask) ? 'Y' : 'N',
+ (p->needsdtr_copy & mask) ? 'Y' : 'N',
+ p->dev_active_cmds[i],
+ p->dev_max_queue_depth[i] );
+ printk(INFO_LEAD "targ_scsirate=0x%x", p->host_no, 0, i, 0,
+ aic_inb(p, TARG_SCSIRATE + i));
+ if (p->features & AHC_ULTRA2)
+ printk(", targ_offset=%d", aic_inb(p, TARG_OFFSET + i));
+ printk("\n");
+ }
+ }
+ /*
+ * Search for this command and see if we can't track it down, it's the
+ * one causing the timeout. Print out this command first, then all other
+ * active commands afterwords.
+ */
+ need_tag = -1;
+ if ( cmd )
+ {
+ scb = p->scb_data->scb_array[aic7xxx_position(cmd)];
+ if ( (scb->flags & SCB_ACTIVE) && (scb->cmd == cmd) )
+ {
+ printk("Timed out command is scb #%d:\n", scb->hscb->tag);
+ printk("Tag%d: flags=0x%x, control=0x%x, TCL=0x%x, %s\n", scb->hscb->tag,
+ scb->flags, scb->hscb->control, scb->hscb->target_channel_lun,
+ (scb->flags & SCB_WAITINGQ) ? "WAITINGQ" : "Sent" );
+ need_tag = scb->hscb->tag;
+ if (scb->flags & SCB_WAITINGQ) found=TRUE;
+ }
+ }
+ printk("QINFIFO: (TAG) ");
+ qinpos = aic_inb(p, QINPOS);
+ while ( qinpos != p->qinfifonext )
+ {
+ if (p->qinfifo[qinpos] == need_tag)
+ found=TRUE;
+ printk("%d ", p->qinfifo[qinpos++]);
+ }
+ printk("\n");
+ printk("Current SCB: (SCBPTR/TAG/CONTROL) %d/%d/0x%x\n", aic_inb(p, SCBPTR),
+ aic_inb(p, SCB_TAG), aic_inb(p, SCB_CONTROL) );
+ if (aic_inb(p, SCB_TAG) == need_tag) found=TRUE;
+ printk("WAITING_SCBS: (SCBPTR/TAG/CONTROL) %d->",
+ hscbp = aic_inb(p, WAITING_SCBH));
+ while (hscbp != SCB_LIST_NULL)
+ {
+ aic_outb(p, hscbp, SCBPTR);
+ printk("%d/%d/0x%x ", hscbp, aic_inb(p, SCB_TAG), aic_inb(p, SCB_CONTROL));
+ hscbp = aic_inb(p, SCB_NEXT);
+ if (aic_inb(p, SCB_TAG) == need_tag) found=TRUE;
+ }
+ printk("\n");
+ printk("DISCONNECTED_SCBS: (SCBPTR/TAG/CONTROL) %d->",
+ hscbp = aic_inb(p, DISCONNECTED_SCBH));
+ while (hscbp != SCB_LIST_NULL)
+ {
+ aic_outb(p, hscbp, SCBPTR);
+ printk("%d/%d/0x%x ", hscbp, aic_inb(p, SCB_TAG), aic_inb(p, SCB_CONTROL));
+ hscbp = aic_inb(p, SCB_NEXT);
+ if (aic_inb(p, SCB_TAG) == need_tag) found=TRUE;
+ }
+ printk("\n");
+ printk("FREE_SCBS: (SCBPTR/TAG/CONTROL) %d->",
+ hscbp = aic_inb(p, FREE_SCBH));
+ while (hscbp != SCB_LIST_NULL)
+ {
+ aic_outb(p, hscbp, SCBPTR);
+ printk("%d/%d/0x%x ", hscbp, aic_inb(p, SCB_TAG), aic_inb(p, SCB_CONTROL));
+ hscbp = aic_inb(p, SCB_NEXT);
+ }
+ printk("\n");
+
+ if (found == FALSE)
+ {
+ /*
+ * We haven't found the offending SCB yet, and it should be around
+ * somewhere, so go look for it in the cards SCBs.
+ */
+ printk("SCBPTR CONTROL TAG PREV NEXT\n");
+ for(i=0; i<p->scb_data->maxhscbs; i++)
+ {
+ aic_outb(p, i, SCBPTR);
+ printk(" %3d %02x %02x %02x %02x\n", i,
+ aic_inb(p, SCB_CONTROL), aic_inb(p, SCB_TAG),
+ aic_inb(p, SCB_PREV), aic_inb(p, SCB_NEXT));
+ }
+ }
+
+
+ for (i=0; i < p->scb_data->numscbs; i++)
+ {
+ scb = p->scb_data->scb_array[i];
+ if ( (scb->flags & SCB_ACTIVE) && (scb->cmd != cmd) )
+ {
+ printk("Tag%d: flags=0x%x, control=0x%x, TCL=0x%x, %s\n", scb->hscb->tag,
+ scb->flags, scb->hscb->control, scb->hscb->target_channel_lun,
+ (scb->flags & SCB_WAITINGQ) ? "WAITINGQ" : "Sent" );
+ }
+ }
+#endif
+ sti();
+ for(;;) barrier();
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_abort
+ *
+ * Description:
+ * Abort the current SCSI command(s).
+ *-F*************************************************************************/
+int
+aic7xxx_abort(Scsi_Cmnd *cmd)
+{
+ struct aic7xxx_scb *scb = NULL;
+ struct aic7xxx_host *p;
+ int result, found=0;
+ unsigned char tmp_char, saved_hscbptr, next_hscbptr, prev_hscbptr;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
+ unsigned long cpu_flags = 0;
+#endif
+ Scsi_Cmnd *cmd_next, *cmd_prev;
+
+ p = (struct aic7xxx_host *) cmd->host->hostdata;
+ scb = (p->scb_data->scb_array[aic7xxx_position(cmd)]);
+
+ /*
+ * I added a new config option to the driver: "panic_on_abort" that will
+ * cause the driver to panic and the machine to stop on the first abort
+ * or reset call into the driver. At that point, it prints out a lot of
+ * usefull information for me which I can then use to try and debug the
+ * problem. Simply enable the boot time prompt in order to activate this
+ * code.
+ */
+ if (aic7xxx_panic_on_abort)
+ aic7xxx_panic_abort(p, cmd);
+
+ DRIVER_LOCK
+
+/*
+ * Run the isr to grab any command in the QOUTFIFO and any other misc.
+ * assundry tasks. This should also set up the bh handler if there is
+ * anything to be done, but it won't run until we are done here since
+ * we are following a straight code path without entering the scheduler
+ * code.
+ */
+
+ pause_sequencer(p);
+ while ( (aic_inb(p, INTSTAT) & INT_PEND) && !(p->flags & AHC_IN_ISR))
+ {
+ aic7xxx_isr(p->irq, p, (void *)NULL);
+ pause_sequencer(p);
+ aic7xxx_done_cmds_complete(p);
+ }
+
+ if ((scb == NULL) || (cmd->serial_number != cmd->serial_number_at_timeout))
+ /* Totally bogus cmd since it points beyond our */
+ { /* valid SCB range or doesn't even match it's own*/
+ /* timeout serial number. */
+ if (aic7xxx_verbose & VERBOSE_ABORT_MID)
+ printk(INFO_LEAD "Abort called with bogus Scsi_Cmnd "
+ "pointer.\n", p->host_no, CTL_OF_CMD(cmd));
+ unpause_sequencer(p, FALSE);
+ DRIVER_UNLOCK
+ return(SCSI_ABORT_NOT_RUNNING);
+ }
+ if (scb->cmd != cmd) /* Hmmm...either this SCB is currently free with a */
+ { /* NULL cmd pointer (NULLed out when freed) or it */
+ /* has already been recycled for another command */
+ /* Either way, this SCB has nothing to do with this*/
+ /* command and we need to deal with cmd without */
+ /* touching the SCB. */
+ /* The theory here is to return a value that will */
+ /* make the queued for complete command actually */
+ /* finish successfully, or to indicate that we */
+ /* don't have this cmd any more and the mid level */
+ /* code needs to find it. */
+ cmd_next = p->completeq.head;
+ cmd_prev = NULL;
+ while (cmd_next != NULL)
+ {
+ if (cmd_next == cmd)
+ {
+ if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS)
+ printk(INFO_LEAD "Abort called for command "
+ "on completeq, completing.\n", p->host_no, CTL_OF_CMD(cmd));
+ if ( cmd_prev == NULL )
+ p->completeq.head = (Scsi_Cmnd *)cmd_next->host_scribble;
+ else
+ cmd_prev->host_scribble = cmd_next->host_scribble;
+ cmd_next->scsi_done(cmd_next);
+ unpause_sequencer(p, FALSE);
+ DRIVER_UNLOCK
+ return(SCSI_ABORT_NOT_RUNNING); /* It's already back as a successful
+ * completion */
+ }
+ cmd_prev = cmd_next;
+ cmd_next = (Scsi_Cmnd *)cmd_next->host_scribble;
+ }
+ if (aic7xxx_verbose & VERBOSE_ABORT_MID)
+ printk(INFO_LEAD "Abort called for already completed"
+ " command.\n", p->host_no, CTL_OF_CMD(cmd));
+ unpause_sequencer(p, FALSE);
+ DRIVER_UNLOCK
+ return(SCSI_ABORT_NOT_RUNNING);
+ }
+
+/* At this point we know the following:
+ * the SCB pointer is valid
+ * the command pointer passed in to us and the scb->cmd pointer match
+ * this then means that the command we need to abort is the same as the
+ * command held by the scb pointer and is a valid abort request.
+ * Now, we just have to figure out what to do from here. Current plan is:
+ * if we have already been here on this command, escalate to a reset
+ * if scb is on waiting list or QINFIFO, send it back as aborted, but
+ * we also need to be aware of the possibility that we could be using
+ * a faked negotiation command that is holding this command up, if
+ * so we need to take care of that command instead, which means we
+ * would then treat this one like it was sitting around disconnected
+ * instead.
+ * if scb is on WAITING_SCB list in sequencer, free scb and send back
+ * if scb is disconnected and not completed, abort with abort message
+ * if scb is currently running, then it may be causing the bus to hang
+ * so we want a return value that indicates a reset would be appropriate
+ * if the command does not finish shortly
+ * if scb is already complete but not on completeq, we're screwed because
+ * this can't happen (except if the command is in the QOUTFIFO, in which
+ * case we would like it to complete successfully instead of having to
+ * to be re-done)
+ * All other scenarios already dealt with by previous code.
+ */
+
+ if ( scb->flags & (SCB_ABORT | SCB_RESET | SCB_QUEUED_ABORT) )
+ {
+ if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS)
+ printk(INFO_LEAD "SCB aborted once already, "
+ "escalating.\n", p->host_no, CTL_OF_SCB(scb));
+ unpause_sequencer(p, FALSE);
+ DRIVER_UNLOCK
+ return(SCSI_ABORT_SNOOZE);
+ }
+ if ( (p->flags & (AHC_RESET_PENDING | AHC_ABORT_PENDING)) ||
+ (p->dev_flags[TARGET_INDEX(scb->cmd)] &
+ BUS_DEVICE_RESET_PENDING) )
+ {
+ if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS)
+ printk(INFO_LEAD "Reset/Abort pending for this "
+ "device, not wasting our time.\n", p->host_no, CTL_OF_SCB(scb));
+ unpause_sequencer(p, FALSE);
+ DRIVER_UNLOCK
+ return(SCSI_ABORT_PENDING);
+ }
+
+ found = 0;
+ p->flags |= AHC_IN_ABORT;
+ if (aic7xxx_verbose & VERBOSE_ABORT)
+ printk(INFO_LEAD "Aborting scb %d, flags 0x%x\n",
+ p->host_no, CTL_OF_SCB(scb), scb->hscb->tag, scb->flags);
+
+/*
+ * First, let's check to see if the currently running command is our target
+ * since if it is, the return is fairly easy and quick since we don't want
+ * to touch the command in case it might complete, but we do want a timeout
+ * in case it's actually hung, so we really do nothing, but tell the mid
+ * level code to reset the timeout.
+ */
+
+ if ( scb->hscb->tag == aic_inb(p, SCB_TAG) )
+ {
+ /*
+ * Check to see if the sequencer is just sitting on this command, or
+ * if it's actively being run.
+ */
+ result = aic_inb(p, LASTPHASE);
+ switch (result)
+ {
+ case P_DATAOUT: /* For any of these cases, we can assume we are */
+ case P_DATAIN: /* an active command and act according. For */
+ case P_COMMAND: /* anything else we are going to fall on through*/
+ case P_STATUS: /* The SCSI_ABORT_SNOOZE will give us two abort */
+ case P_MESGOUT: /* chances to finish and then escalate to a */
+ case P_MESGIN: /* reset call */
+ if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS)
+ printk(INFO_LEAD "SCB is currently active. "
+ "Waiting on completion.\n", p->host_no, CTL_OF_SCB(scb));
+ unpause_sequencer(p, FALSE);
+ p->flags &= ~AHC_IN_ABORT;
+ scb->flags |= SCB_RECOVERY_SCB; /* Note the fact that we've been */
+ p->flags |= AHC_ABORT_PENDING; /* here so we will know not to */
+ DRIVER_UNLOCK /* muck with other SCBs if this */
+ return(SCSI_ABORT_PENDING); /* one doesn't complete and clear */
+ break; /* out. */
+ default:
+ break;
+ }
+ }
+
+ if ((found == 0) && (scb->flags & SCB_WAITINGQ))
+ {
+ int tindex = TARGET_INDEX(cmd);
+#ifdef AIC7XXX_FAKE_NEGOTIATION_CMDS
+ unsigned short mask;
+
+ mask = (1 << tindex);
+
+ if (p->wdtr_pending & mask)
+ {
+ if (p->dev_wdtr_cmnd[tindex]->next != cmd)
+ found = 1;
+ else
+ found = 0;
+ }
+ else if (p->sdtr_pending & mask)
+ {
+ if (p->dev_sdtr_cmnd[tindex]->next != cmd)
+ found = 1;
+ else
+ found = 0;
+ }
+ else
+ {
+ found = 1;
+ }
+ if (found == 0)
+ {
+ /*
+ * OK..this means the command we are currently getting an abort
+ * for has an outstanding negotiation command in front of it.
+ * We don't really have a way to tie back into the negotiation
+ * commands, so we just send this back as pending, then it
+ * will get reset in 2 seconds.
+ */
+ unpause_sequencer(p, TRUE);
+ scb->flags |= SCB_ABORT;
+ DRIVER_UNLOCK
+ return(SCSI_ABORT_PENDING);
+ }
+#endif
+ if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS)
+ printk(INFO_LEAD "SCB found on waiting list and "
+ "aborted.\n", p->host_no, CTL_OF_SCB(scb));
+ scbq_remove(&p->waiting_scbs, scb);
+ scbq_remove(&p->delayed_scbs[tindex], scb);
+ p->dev_active_cmds[tindex]++;
+ p->activescbs++;
+ scb->flags &= ~(SCB_WAITINGQ | SCB_ACTIVE);
+ scb->flags |= SCB_ABORT | SCB_QUEUED_FOR_DONE;
+ found = 1;
+ }
+
+/*
+ * We just checked the waiting_q, now for the QINFIFO
+ */
+ if ( found == 0 )
+ {
+ if ( ((found = aic7xxx_search_qinfifo(p, cmd->target,
+ cmd->channel,
+ cmd->lun, scb->hscb->tag, SCB_ABORT | SCB_QUEUED_FOR_DONE,
+ FALSE, NULL)) != 0) &&
+ (aic7xxx_verbose & VERBOSE_ABORT_PROCESS))
+ printk(INFO_LEAD "SCB found in QINFIFO and "
+ "aborted.\n", p->host_no, CTL_OF_SCB(scb));
+ }
+
+/*
+ * QINFIFO, waitingq, completeq done. Next, check WAITING_SCB list in card
+ */
+
+ if ( found == 0 )
+ {
+ unsigned char scb_next_ptr;
+ prev_hscbptr = SCB_LIST_NULL;
+ saved_hscbptr = aic_inb(p, SCBPTR);
+ next_hscbptr = aic_inb(p, WAITING_SCBH);
+ while ( next_hscbptr != SCB_LIST_NULL )
+ {
+ aic_outb(p, next_hscbptr, SCBPTR );
+ if ( scb->hscb->tag == aic_inb(p, SCB_TAG) )
+ {
+ found = 1;
+ if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS)
+ printk(INFO_LEAD "SCB found on hardware waiting"
+ " list and aborted.\n", p->host_no, CTL_OF_SCB(scb));
+ if ( prev_hscbptr == SCB_LIST_NULL )
+ {
+ aic_outb(p, aic_inb(p, SCB_NEXT), WAITING_SCBH);
+ /* stop the selection since we just
+ * grabbed the scb out from under the
+ * card
+ */
+ aic_outb(p, aic_inb(p, SCSISEQ) & ~ENSELO, SCSISEQ);
+ aic_outb(p, CLRSELTIMEO, CLRSINT1);
+ }
+ else
+ {
+ scb_next_ptr = aic_inb(p, SCB_NEXT);
+ aic_outb(p, prev_hscbptr, SCBPTR);
+ aic_outb(p, scb_next_ptr, SCB_NEXT);
+ aic_outb(p, next_hscbptr, SCBPTR);
+ }
+ aic_outb(p, SCB_LIST_NULL, SCB_TAG);
+ aic_outb(p, 0, SCB_CONTROL);
+ aic7xxx_add_curscb_to_free_list(p);
+ scb->flags = SCB_ABORT | SCB_QUEUED_FOR_DONE;
+ break;
+ }
+ prev_hscbptr = next_hscbptr;
+ next_hscbptr = aic_inb(p, SCB_NEXT);
+ }
+ aic_outb(p, saved_hscbptr, SCBPTR );
+ }
+
+/*
+ * Hmmm...completeq, QOUTFIFO, QINFIFO, WAITING_SCBH, waitingq all checked.
+ * OK...the sequencer's paused, interrupts are off, and we haven't found the
+ * command anyplace where it could be easily aborted. Time for the hard
+ * work. We also know the command is valid. This essentially means the
+ * command is disconnected, or connected but not into any phases yet, which
+ * we know due to the tests we ran earlier on the current active scb phase.
+ * At this point we can queue the abort tag and go on with life.
+ */
+
+ if ( found == 0 )
+ {
+ p->flags |= AHC_ABORT_PENDING;
+ scb->flags |= SCB_QUEUED_ABORT | SCB_ABORT | SCB_RECOVERY_SCB;
+ scb->hscb->control |= MK_MESSAGE;
+ result=aic7xxx_find_scb(p, scb);
+ if ( result != SCB_LIST_NULL )
+ {
+ saved_hscbptr = aic_inb(p, SCBPTR);
+ aic_outb(p, result, SCBPTR);
+ tmp_char = aic_inb(p, SCB_CONTROL);
+ aic_outb(p, tmp_char | MK_MESSAGE, SCB_CONTROL);
+ aic_outb(p, saved_hscbptr, SCBPTR);
+ }
+ if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS)
+ printk(INFO_LEAD "SCB disconnected. Queueing Abort"
+ " SCB.\n", p->host_no, CTL_OF_SCB(scb));
+ p->qinfifo[p->qinfifonext++] = scb->hscb->tag;
+ if (p->features & AHC_QUEUE_REGS)
+ aic_outb(p, p->qinfifonext, HNSCB_QOFF);
+ else
+ aic_outb(p, p->qinfifonext, KERNEL_QINPOS);
+ }
+ if (found)
+ {
+ aic7xxx_run_done_queue(p, TRUE);
+ aic7xxx_run_waiting_queues(p);
+ }
+ p->flags &= ~AHC_IN_ABORT;
+ unpause_sequencer(p, FALSE);
+ DRIVER_UNLOCK
+
+/*
+ * On the return value. If we found the command and aborted it, then we know
+ * it's already sent back and there is no reason for a further timeout, so
+ * we use SCSI_ABORT_SUCCESS. On the queued abort side, we aren't so certain
+ * there hasn't been a bus hang or something that might keep the abort from
+ * from completing. Therefore, we use SCSI_ABORT_PENDING. The first time this
+ * is passed back, the timeout on the command gets extended, the second time
+ * we pass this back, the mid level SCSI code calls our reset function, which
+ * would shake loose a hung bus.
+ */
+ if ( found != 0 )
+ return(SCSI_ABORT_SUCCESS);
+ else
+ return(SCSI_ABORT_PENDING);
+}
+
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_reset
+ *
+ * Description:
+ * Resetting the bus always succeeds - is has to, otherwise the
+ * kernel will panic! Try a surgical technique - sending a BUS
+ * DEVICE RESET message - on the offending target before pulling
+ * the SCSI bus reset line.
+ *-F*************************************************************************/
+int
+aic7xxx_reset(Scsi_Cmnd *cmd, unsigned int flags)
+{
+ struct aic7xxx_scb *scb = NULL;
+ struct aic7xxx_host *p;
+ int tindex;
+ int result = -1;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95)
+ unsigned long cpu_flags = 0;
+#endif
+#define DEVICE_RESET 0x01
+#define BUS_RESET 0x02
+#define HOST_RESET 0x04
+#define FAIL 0x08
+#define RESET_DELAY 0x10
+ int action;
+ Scsi_Cmnd *cmd_prev, *cmd_next;
+
+
+ if ( cmd == NULL )
+ {
+ printk(KERN_WARNING "(scsi?:?:?:?) Reset called with NULL Scsi_Cmnd "
+ "pointer, failing.\n");
+ return(SCSI_RESET_SNOOZE);
+ }
+
+ p = (struct aic7xxx_host *) cmd->host->hostdata;
+ scb = (p->scb_data->scb_array[aic7xxx_position(cmd)]);
+ tindex = TARGET_INDEX(cmd);
+
+ /*
+ * I added a new config option to the driver: "panic_on_abort" that will
+ * cause the driver to panic and the machine to stop on the first abort
+ * or reset call into the driver. At that point, it prints out a lot of
+ * usefull information for me which I can then use to try and debug the
+ * problem. Simply enable the boot time prompt in order to activate this
+ * code.
+ */
+ if (aic7xxx_panic_on_abort)
+ aic7xxx_panic_abort(p, cmd);
+
+ DRIVER_LOCK
+
+ pause_sequencer(p);
+ while ( (aic_inb(p, INTSTAT) & INT_PEND) && !(p->flags & AHC_IN_ISR))
+ {
+ aic7xxx_isr(p->irq, p, (void *)NULL );
+ pause_sequencer(p);
+ aic7xxx_done_cmds_complete(p);
+ }
+
+ if (scb == NULL)
+ {
+ if (aic7xxx_verbose & VERBOSE_RESET_MID)
+ printk(INFO_LEAD "Reset called with bogus Scsi_Cmnd"
+ "->SCB mapping, improvising.\n", p->host_no, CTL_OF_CMD(cmd));
+ if ( flags & SCSI_RESET_SUGGEST_HOST_RESET )
+ {
+ action = HOST_RESET;
+ }
+ else
+ {
+ action = BUS_RESET;
+ }
+ }
+ else if (scb->cmd != cmd)
+ {
+ if (aic7xxx_verbose & VERBOSE_RESET_MID)
+ printk(INFO_LEAD "Reset called with recycled SCB "
+ "for cmd.\n", p->host_no, CTL_OF_CMD(cmd));
+ cmd_prev = NULL;
+ cmd_next = p->completeq.head;
+ while ( cmd_next != NULL )
+ {
+ if (cmd_next == cmd)
+ {
+ if (aic7xxx_verbose & VERBOSE_RESET_RETURN)
+ printk(INFO_LEAD "Reset, found cmd on completeq"
+ ", completing.\n", p->host_no, CTL_OF_CMD(cmd));
+ unpause_sequencer(p, FALSE);
+ DRIVER_UNLOCK
+ return(SCSI_RESET_NOT_RUNNING);
+ }
+ cmd_prev = cmd_next;
+ cmd_next = (Scsi_Cmnd *)cmd_next->host_scribble;
+ }
+ if ( !(flags & SCSI_RESET_SYNCHRONOUS) )
+ {
+ if (aic7xxx_verbose & VERBOSE_RESET_RETURN)
+ printk(INFO_LEAD "Reset, cmd not found,"
+ " failing.\n", p->host_no, CTL_OF_CMD(cmd));
+ unpause_sequencer(p, FALSE);
+ DRIVER_UNLOCK
+ return(SCSI_RESET_NOT_RUNNING);
+ }
+ else
+ {
+ if (aic7xxx_verbose & VERBOSE_RESET_MID)
+ printk(INFO_LEAD "Reset called, no scb, "
+ "flags 0x%x\n", p->host_no, CTL_OF_CMD(cmd), flags);
+ scb = NULL;
+ action = HOST_RESET;
+ }
+ }
+ else
+ {
+ if (aic7xxx_verbose & VERBOSE_RESET_MID)
+ printk(INFO_LEAD "Reset called, scb %d, flags "
+ "0x%x\n", p->host_no, CTL_OF_SCB(scb), scb->hscb->tag, scb->flags);
+ if ( aic7xxx_scb_on_qoutfifo(p, scb) )
+ {
+ if(aic7xxx_verbose & VERBOSE_RESET_RETURN)
+ printk(INFO_LEAD "SCB on qoutfifo, returning.\n", p->host_no,
+ CTL_OF_SCB(scb));
+ unpause_sequencer(p, FALSE);
+ DRIVER_UNLOCK
+ return(SCSI_RESET_NOT_RUNNING);
+ }
+ if ( flags & SCSI_RESET_SUGGEST_HOST_RESET )
+ {
+ action = HOST_RESET;
+ }
+ else if ( flags & SCSI_RESET_SUGGEST_BUS_RESET )
+ {
+ action = BUS_RESET;
+ }
+ else
+ {
+ action = DEVICE_RESET;
+ }
+ }
+ if ( (action & DEVICE_RESET) &&
+ (p->dev_flags[tindex] & BUS_DEVICE_RESET_PENDING) )
+ {
+ if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
+ printk(INFO_LEAD "Bus device reset already sent to "
+ "device, escalating.\n", p->host_no, CTL_OF_CMD(cmd));
+ action = BUS_RESET;
+ }
+ if ( (action & DEVICE_RESET) &&
+ (scb->flags & SCB_QUEUED_ABORT) )
+ {
+ if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
+ {
+ printk(INFO_LEAD "Have already attempted to reach "
+ "device with queued\n", p->host_no, CTL_OF_CMD(cmd));
+ printk(INFO_LEAD "message, will escalate to bus "
+ "reset.\n", p->host_no, CTL_OF_CMD(cmd));
+ }
+ action = BUS_RESET;
+ }
+ if ( (action & DEVICE_RESET) &&
+ (p->flags & (AHC_RESET_PENDING | AHC_ABORT_PENDING)) )
+ {
+ if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
+ printk(INFO_LEAD "Bus device reset stupid when "
+ "other action has failed.\n", p->host_no, CTL_OF_CMD(cmd));
+ action = BUS_RESET;
+ }
+ if ( (action & BUS_RESET) && !(p->features & AHC_TWIN) )
+ {
+ action = HOST_RESET;
+ }
+ if ( (p->dev_flags[tindex] & DEVICE_RESET_DELAY) &&
+ !(action & (HOST_RESET | BUS_RESET)))
+ {
+ if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
+ {
+ printk(INFO_LEAD "Reset called too soon after last "
+ "reset without requesting\n", p->host_no, CTL_OF_CMD(cmd));
+ printk(INFO_LEAD "bus or host reset, escalating.\n", p->host_no,
+ CTL_OF_CMD(cmd));
+ }
+ action = BUS_RESET;
+ }
+ if ( (p->flags & AHC_RESET_DELAY) &&
+ (action & (HOST_RESET | BUS_RESET)) )
+ {
+ if (aic7xxx_verbose & VERBOSE_RESET_PROCESS)
+ printk(INFO_LEAD "Reset called too soon after "
+ "last bus reset, delaying.\n", p->host_no, CTL_OF_CMD(cmd));
+ action = RESET_DELAY;
+ }
+/*
+ * By this point, we want to already know what we are going to do and
+ * only have the following code implement our course of action.
+ */
+ switch (action)
+ {
+ case RESET_DELAY:
+ unpause_sequencer(p, FALSE);
+ DRIVER_UNLOCK
+ return(SCSI_RESET_PENDING);
+ break;
+ case FAIL:
+ unpause_sequencer(p, FALSE);
+ DRIVER_UNLOCK
+ return(SCSI_RESET_ERROR);
+ break;
+ case DEVICE_RESET:
+ p->flags |= AHC_IN_RESET;
+ result = aic7xxx_bus_device_reset(p, cmd);
+ aic7xxx_run_done_queue(p, TRUE);
+ /* We can't rely on run_waiting_queues to unpause the sequencer for
+ * PCI based controllers since we use AAP */
+ aic7xxx_run_waiting_queues(p);
+ unpause_sequencer(p, FALSE);
+ p->flags &= ~AHC_IN_RESET;
+ DRIVER_UNLOCK
+ return(result);
+ break;
+ case BUS_RESET:
+ case HOST_RESET:
+ default:
+ p->flags |= AHC_IN_RESET | AHC_RESET_DELAY;
+ p->dev_expires[p->scsi_id] = jiffies + (3 * HZ);
+ p->dev_timer_active |= (0x01 << p->scsi_id);
+ if ( !(p->dev_timer_active & (0x01 << MAX_TARGETS)) ||
+ time_after_eq(p->dev_timer.expires, p->dev_expires[p->scsi_id]) )
+ {
+ del_timer(&p->dev_timer);
+ p->dev_timer.expires = p->dev_expires[p->scsi_id];
+ add_timer(&p->dev_timer);
+ p->dev_timer_active |= (0x01 << MAX_TARGETS);
+ }
+ aic7xxx_reset_channel(p, cmd->channel, TRUE);
+ if ( (p->features & AHC_TWIN) && (action & HOST_RESET) )
+ {
+ aic7xxx_reset_channel(p, cmd->channel ^ 0x01, TRUE);
+ restart_sequencer(p);
+ }
+ if (action != HOST_RESET)
+ result = SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET;
+ else
+ {
+ result = SCSI_RESET_SUCCESS | SCSI_RESET_HOST_RESET;
+ aic_outb(p, aic_inb(p, SIMODE1) & ~(ENREQINIT|ENBUSFREE),
+ SIMODE1);
+ aic7xxx_clear_intstat(p);
+ p->flags &= ~AHC_HANDLING_REQINITS;
+ p->msg_type = MSG_TYPE_NONE;
+ p->msg_index = 0;
+ p->msg_len = 0;
+ }
+ aic7xxx_run_done_queue(p, TRUE);
+ /*
+ * If this a SCSI_RESET_SYNCHRONOUS then the command we were given is
+ * in need of being re-started, so send it on through to aic7xxx_queue
+ * and let it set until the delay is over. This keeps it from dying
+ * entirely and avoids getting a bogus dead command back through the
+ * mid-level code due to too many retries.
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,132)
+ if ( flags & SCSI_RESET_SYNCHRONOUS )
+ {
+ cmd->result = DID_BUS_BUSY << 16;
+ cmd->done(cmd);
+ }
+#endif
+ p->flags &= ~AHC_IN_RESET;
+ /*
+ * We can't rely on run_waiting_queues to unpause the sequencer for
+ * PCI based controllers since we use AAP. NOTE: this also sets
+ * the timer for the one command we might have queued in the case
+ * of a synch reset.
+ */
+ aic7xxx_run_waiting_queues(p);
+ unpause_sequencer(p, FALSE);
+ DRIVER_UNLOCK
+ return(result);
+ break;
+ }
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_biosparam
+ *
+ * Description:
+ * Return the disk geometry for the given SCSI device.
+ *-F*************************************************************************/
+int
+aic7xxx_biosparam(Disk *disk, kdev_t dev, int geom[])
+{
+ int heads, sectors, cylinders, ret;
+ struct aic7xxx_host *p;
+ struct buffer_head *bh;
+
+ p = (struct aic7xxx_host *) disk->device->host->hostdata;
+ bh = bread(MKDEV(MAJOR(dev), MINOR(dev)&~0xf), 0, 1024);
+
+ if ( bh )
+ {
+ ret = scsi_partsize(bh, disk->capacity, &geom[2], &geom[0], &geom[1]);
+ brelse(bh);
+ if ( ret != -1 )
+ return(ret);
+ }
+
+ heads = 64;
+ sectors = 32;
+ cylinders = disk->capacity / (heads * sectors);
+
+ if ((p->flags & AHC_EXTEND_TRANS_A) && (cylinders > 1024))
+ {
+ heads = 255;
+ sectors = 63;
+ cylinders = disk->capacity / (heads * sectors);
+ }
+
+ geom[0] = heads;
+ geom[1] = sectors;
+ geom[2] = cylinders;
+
+ return (0);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_release
+ *
+ * Description:
+ * Free the passed in Scsi_Host memory structures prior to unloading the
+ * module.
+ *-F*************************************************************************/
+int
+aic7xxx_release(struct Scsi_Host *host)
+{
+ struct aic7xxx_host *p = (struct aic7xxx_host *) host->hostdata;
+ struct aic7xxx_host *next, *prev;
+
+ if(p->irq)
+ free_irq(p->irq, p);
+ release_region(p->base, MAXREG - MINREG);
+#ifdef MMAPIO
+ if(p->maddr)
+ {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,0)
+ vfree((void *) (((unsigned long) p->maddr) & PAGE_MASK));
+#else
+ iounmap((void *) (((unsigned long) p->maddr) & PAGE_MASK));
+#endif
+ }
+#endif /* MMAPIO */
+ prev = NULL;
+ next = first_aic7xxx;
+ while(next != NULL)
+ {
+ if(next == p)
+ {
+ if(prev == NULL)
+ first_aic7xxx = next->next;
+ else
+ prev->next = next->next;
+ }
+ else
+ {
+ prev = next;
+ }
+ next = next->next;
+ }
+ aic7xxx_free(p);
+ return(0);
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_print_card
+ *
+ * Description:
+ * Print out all of the control registers on the card
+ *
+ * NOTE: This function is not yet safe for use on the VLB and EISA
+ * controllers, so it isn't used on those controllers at all.
+ *-F*************************************************************************/
+static void
+aic7xxx_print_card(struct aic7xxx_host *p)
+{
+ int i, j, k, chip;
+ static struct register_ranges {
+ int num_ranges;
+ int range_val[32];
+ } cards_ds[] = {
+ { 0, {0,} }, /* none */
+ {10, {0x00, 0x05, 0x08, 0x11, 0x18, 0x19, 0x1f, 0x1f, 0x60, 0x60, /*7771*/
+ 0x62, 0x66, 0x80, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9b, 0x9f} },
+ { 9, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1f, 0x60, 0x60, 0x62, 0x66, /*7850*/
+ 0x80, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9f} },
+ { 9, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1f, 0x60, 0x60, 0x62, 0x66, /*7860*/
+ 0x80, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9f} },
+ {10, {0x00, 0x05, 0x08, 0x11, 0x18, 0x19, 0x1c, 0x1f, 0x60, 0x60, /*7870*/
+ 0x62, 0x66, 0x80, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9f} },
+ {10, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1a, 0x1c, 0x1f, 0x60, 0x60, /*7880*/
+ 0x62, 0x66, 0x80, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9f} },
+ {16, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1f, 0x60, 0x60, 0x62, 0x66, /*7890*/
+ 0x84, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9a, 0x9f, 0x9f,
+ 0xe0, 0xf1, 0xf4, 0xf4, 0xf6, 0xf6, 0xf8, 0xf8, 0xfa, 0xfc,
+ 0xfe, 0xff} },
+ {12, {0x00, 0x05, 0x08, 0x11, 0x18, 0x19, 0x1b, 0x1f, 0x60, 0x60, /*7895*/
+ 0x62, 0x66, 0x80, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9a,
+ 0x9f, 0x9f, 0xe0, 0xf1} },
+ {16, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1f, 0x60, 0x60, 0x62, 0x66, /*7896*/
+ 0x84, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9a, 0x9f, 0x9f,
+ 0xe0, 0xf1, 0xf4, 0xf4, 0xf6, 0xf6, 0xf8, 0xf8, 0xfa, 0xfc,
+ 0xfe, 0xff} },
+ };
+#ifdef CONFIG_PCI
+ static struct register_ranges cards_ns[] = {
+ { 0, {0,} }, /* none */
+ { 0, {0,} }, /* 7771 */
+ { 7, {0x04, 0x08, 0x0c, 0x0e, 0x10, 0x17, 0x28, 0x2b, 0x30, 0x33,
+ 0x3c, 0x41, 0x43, 0x47} },
+ { 7, {0x04, 0x08, 0x0c, 0x0e, 0x10, 0x17, 0x28, 0x2b, 0x30, 0x33,
+ 0x3c, 0x41, 0x43, 0x47} },
+ { 5, {0x04, 0x08, 0x0c, 0x0e, 0x10, 0x17, 0x30, 0x33, 0x3c, 0x41} },
+ { 5, {0x04, 0x08, 0x0c, 0x0e, 0x10, 0x17, 0x30, 0x34, 0x3c, 0x47} },
+ { 5, {0x04, 0x08, 0x0c, 0x1b, 0x30, 0x34, 0x3c, 0x43, 0xdc, 0xe3} },
+ { 6, {0x04, 0x08, 0x0c, 0x0e, 0x10, 0x17, 0x30, 0x34, 0x3c, 0x47,
+ 0xdc, 0xe3} },
+ { 6, {0x04, 0x08, 0x0c, 0x1b, 0x30, 0x34, 0x3c, 0x43, 0xdc, 0xe3,
+ 0xff, 0xff} }
+ };
+#endif
+ chip = p->chip & AHC_CHIPID_MASK;
+ /*
+ * Let's run through the PCI space first....
+ */
+ printk("%s at ",
+ board_names[p->board_name_index]);
+ switch(p->chip & ~AHC_CHIPID_MASK)
+ {
+ case AHC_VL:
+ printk("VLB Slot %d.\n", p->pci_device_fn);
+ break;
+ case AHC_EISA:
+ printk("EISA Slot %d.\n", p->pci_device_fn);
+ break;
+ case AHC_PCI:
+ default:
+ printk("PCI %d/%d.\n", PCI_SLOT(p->pci_device_fn),
+ PCI_FUNC(p->pci_device_fn));
+ break;
+ }
+
+#ifdef CONFIG_PCI
+ {
+ unsigned char temp;
+
+ printk("PCI Dump:\n");
+ k=0;
+ for(i=0; i<cards_ns[chip].num_ranges; i++)
+ {
+ for(j = cards_ns[chip].range_val[ i * 2 ];
+ j <= cards_ns[chip].range_val[ i * 2 + 1 ] ;
+ j++)
+ {
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,92)
+ pci_read_config_byte(p->pdev, j, &temp);
+#else
+ pcibios_read_config_byte(p->pci_bus, p->pci_device_fn, j, &temp);
+#endif
+ printk("%02x:%02x ", j, temp);
+ if(++k == 13)
+ {
+ printk("\n");
+ k = 0;
+ }
+ }
+ }
+ }
+ if(k != 0)
+ printk("\n");
+#endif /* CONFIG_PCI */
+
+ /*
+ * Now the registers on the card....
+ */
+ printk("Card Dump:\n");
+ k = 0;
+ for(i=0; i<cards_ds[chip].num_ranges; i++)
+ {
+ for(j = cards_ds[chip].range_val[ i * 2 ];
+ j <= cards_ds[chip].range_val[ i * 2 + 1 ] ;
+ j++)
+ {
+ printk("%02x:%02x ", j, aic_inb(p, j));
+ if(++k == 13)
+ {
+ printk("\n");
+ k=0;
+ }
+ }
+ }
+ if(k != 0)
+ printk("\n");
+ if (p->flags & AHC_SEEPROM_FOUND)
+ {
+ unsigned short *sc1;
+ sc1 = (unsigned short *)&p->sc;
+
+ printk("SEEPROM dump.\n");
+ for(i=1; i<=32; i++)
+ {
+ printk("0x%04x", sc1[i-1]);
+ if ( (i % 8) == 0 )
+ printk("\n");
+ else
+ printk(" ");
+ }
+ }
+
+ /*
+ * If this was an Ultra2 controller, then we just hosed the card in terms
+ * of the QUEUE REGS. This function is only called at init time or by
+ * the panic_abort function, so it's safe to assume a generic init time
+ * setting here
+ */
+
+ if(p->features & AHC_QUEUE_REGS)
+ {
+ aic_outb(p, 0, SDSCB_QOFF);
+ aic_outb(p, 0, SNSCB_QOFF);
+ aic_outb(p, 0, HNSCB_QOFF);
+ }
+
+}
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_print_scratch_ram
+ *
+ * Description:
+ * Print out the scratch RAM values on the card.
+ *-F*************************************************************************/
+static void
+aic7xxx_print_scratch_ram(struct aic7xxx_host *p)
+{
+ int i, k;
+
+ k = 0;
+ printk("Scratch RAM:\n");
+ for(i = SRAM_BASE; i < SEQCTL; i++)
+ {
+ printk("%02x:%02x ", i, aic_inb(p, i));
+ if(++k == 13)
+ {
+ printk("\n");
+ k=0;
+ }
+ }
+ if (p->features & AHC_MORE_SRAM)
+ {
+ for(i = TARG_OFFSET; i < 0x80; i++)
+ {
+ printk("%02x:%02x ", i, aic_inb(p, i));
+ if(++k == 13)
+ {
+ printk("\n");
+ k=0;
+ }
+ }
+ }
+ printk("\n");
+}
+
+
+#include "aic7xxx_proc.c"
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = AIC7XXX;
+
+#include "scsi_module.c"
+#endif
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 2
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -2
+ * c-argdecl-indent: 2
+ * c-label-offset: -2
+ * c-continued-statement-offset: 2
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/aic7xxx.h b/linux/src/drivers/scsi/aic7xxx.h
new file mode 100644
index 0000000..8d18f3c
--- /dev/null
+++ b/linux/src/drivers/scsi/aic7xxx.h
@@ -0,0 +1,114 @@
+/*+M*************************************************************************
+ * Adaptec AIC7xxx device driver for Linux.
+ *
+ * Copyright (c) 1994 John Aycock
+ * The University of Calgary Department of Computer Science.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * $Id: aic7xxx.h,v 1.1.4.1 2004/01/16 22:41:26 roland Exp $
+ *-M*************************************************************************/
+#ifndef _aic7xxx_h
+#define _aic7xxx_h
+
+#define AIC7XXX_H_VERSION "3.2.4"
+
+#ifndef LINUX_VERSION_CODE
+#include <linux/version.h>
+#endif
+
+#ifndef KERNEL_VERSION
+#define KERNEL_VERSION(x,y,z) (((x)<<16)+((y)<<8)+(z))
+#endif
+
+#if defined(__i386__)
+# define AIC7XXX_BIOSPARAM aic7xxx_biosparam
+#else
+# define AIC7XXX_BIOSPARAM NULL
+#endif
+
+/*
+ * Scsi_Host_Template (see hosts.h) for AIC-7xxx - some fields
+ * to do with card config are filled in after the card is detected.
+ */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,65)
+#define AIC7XXX { \
+ next: NULL, \
+ module: NULL, \
+ proc_dir: NULL, \
+ proc_info: aic7xxx_proc_info, \
+ name: NULL, \
+ detect: aic7xxx_detect, \
+ release: aic7xxx_release, \
+ info: aic7xxx_info, \
+ command: NULL, \
+ queuecommand: aic7xxx_queue, \
+ eh_strategy_handler: NULL, \
+ eh_abort_handler: NULL, \
+ eh_device_reset_handler: NULL, \
+ eh_bus_reset_handler: NULL, \
+ eh_host_reset_handler: NULL, \
+ abort: aic7xxx_abort, \
+ reset: aic7xxx_reset, \
+ slave_attach: NULL, \
+ bios_param: AIC7XXX_BIOSPARAM, \
+ can_queue: 255, /* max simultaneous cmds */\
+ this_id: -1, /* scsi id of host adapter */\
+ sg_tablesize: 0, /* max scatter-gather cmds */\
+ cmd_per_lun: 3, /* cmds per lun (linked cmds) */\
+ present: 0, /* number of 7xxx's present */\
+ unchecked_isa_dma: 0, /* no memory DMA restrictions */\
+ use_clustering: ENABLE_CLUSTERING, \
+ use_new_eh_code: 0 \
+}
+#else
+#define AIC7XXX { \
+ next: NULL, \
+ usage_count: NULL, \
+ proc_dir: NULL, \
+ proc_info: aic7xxx_proc_info, \
+ name: NULL, \
+ detect: aic7xxx_detect, \
+ release: aic7xxx_release, \
+ info: aic7xxx_info, \
+ command: NULL, \
+ queuecommand: aic7xxx_queue, \
+ abort: aic7xxx_abort, \
+ reset: aic7xxx_reset, \
+ slave_attach: NULL, \
+ bios_param: AIC7XXX_BIOSPARAM, \
+ can_queue: 255, /* max simultaneous cmds */\
+ this_id: -1, /* scsi id of host adapter */\
+ sg_tablesize: 0, /* max scatter-gather cmds */\
+ cmd_per_lun: 3, /* cmds per lun (linked cmds) */\
+ present: 0, /* number of 7xxx's present */\
+ unchecked_isa_dma: 0, /* no memory DMA restrictions */\
+ use_clustering: ENABLE_CLUSTERING \
+}
+#endif
+
+extern int aic7xxx_queue(Scsi_Cmnd *, void (*)(Scsi_Cmnd *));
+extern int aic7xxx_biosparam(Disk *, kdev_t, int[]);
+extern int aic7xxx_detect(Scsi_Host_Template *);
+extern int aic7xxx_command(Scsi_Cmnd *);
+extern int aic7xxx_reset(Scsi_Cmnd *, unsigned int);
+extern int aic7xxx_abort(Scsi_Cmnd *);
+extern int aic7xxx_release(struct Scsi_Host *);
+
+extern const char *aic7xxx_info(struct Scsi_Host *);
+
+extern int aic7xxx_proc_info(char *, char **, off_t, int, int, int);
+
+#endif /* _aic7xxx_h */
diff --git a/linux/src/drivers/scsi/aic7xxx/scsi_message.h b/linux/src/drivers/scsi/aic7xxx/scsi_message.h
new file mode 100644
index 0000000..16c4013
--- /dev/null
+++ b/linux/src/drivers/scsi/aic7xxx/scsi_message.h
@@ -0,0 +1,41 @@
+/* Messages (1 byte) */ /* I/T (M)andatory or (O)ptional */
+#define MSG_CMDCOMPLETE 0x00 /* M/M */
+#define MSG_EXTENDED 0x01 /* O/O */
+#define MSG_SAVEDATAPOINTER 0x02 /* O/O */
+#define MSG_RESTOREPOINTERS 0x03 /* O/O */
+#define MSG_DISCONNECT 0x04 /* O/O */
+#define MSG_INITIATOR_DET_ERR 0x05 /* M/M */
+#define MSG_ABORT 0x06 /* O/M */
+#define MSG_MESSAGE_REJECT 0x07 /* M/M */
+#define MSG_NOOP 0x08 /* M/M */
+#define MSG_PARITY_ERROR 0x09 /* M/M */
+#define MSG_LINK_CMD_COMPLETE 0x0a /* O/O */
+#define MSG_LINK_CMD_COMPLETEF 0x0b /* O/O */
+#define MSG_BUS_DEV_RESET 0x0c /* O/M */
+#define MSG_ABORT_TAG 0x0d /* O/O */
+#define MSG_CLEAR_QUEUE 0x0e /* O/O */
+#define MSG_INIT_RECOVERY 0x0f /* O/O */
+#define MSG_REL_RECOVERY 0x10 /* O/O */
+#define MSG_TERM_IO_PROC 0x11 /* O/O */
+
+/* Messages (2 byte) */
+#define MSG_SIMPLE_Q_TAG 0x20 /* O/O */
+#define MSG_HEAD_OF_Q_TAG 0x21 /* O/O */
+#define MSG_ORDERED_Q_TAG 0x22 /* O/O */
+#define MSG_IGN_WIDE_RESIDUE 0x23 /* O/O */
+
+/* Identify message */ /* M/M */
+#define MSG_IDENTIFYFLAG 0x80
+#define MSG_IDENTIFY_DISCFLAG 0x40
+#define MSG_IDENTIFY(lun, disc) (((disc) ? 0xc0 : MSG_IDENTIFYFLAG) | (lun))
+#define MSG_ISIDENTIFY(m) ((m) & MSG_IDENTIFYFLAG)
+
+/* Extended messages (opcode and length) */
+#define MSG_EXT_SDTR 0x01
+#define MSG_EXT_SDTR_LEN 0x03
+
+#define MSG_EXT_WDTR 0x03
+#define MSG_EXT_WDTR_LEN 0x02
+#define MSG_EXT_WDTR_BUS_8_BIT 0x00
+#define MSG_EXT_WDTR_BUS_16_BIT 0x01
+#define MSG_EXT_WDTR_BUS_32_BIT 0x02
diff --git a/linux/src/drivers/scsi/aic7xxx/sequencer.h b/linux/src/drivers/scsi/aic7xxx/sequencer.h
new file mode 100644
index 0000000..7c0121e
--- /dev/null
+++ b/linux/src/drivers/scsi/aic7xxx/sequencer.h
@@ -0,0 +1,135 @@
+/*
+ * Instruction formats for the sequencer program downloaded to
+ * Aic7xxx SCSI host adapters
+ *
+ * Copyright (c) 1997, 1998 Justin T. Gibbs.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * Where this Software is combined with software released under the terms of
+ * the GNU Public License ("GPL") and the terms of the GPL would require the
+ * combined work to also be released under the terms of the GPL, the terms
+ * and conditions of this License will apply in addition to those of the
+ * GPL with the exception of any terms or conditions of this License that
+ * conflict with, or are expressly prohibited by, the GPL.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $Id: sequencer.h,v 1.1 1999/04/26 05:55:33 tb Exp $
+ */
+
+#ifdef __LITTLE_ENDIAN_BITFIELD
+struct ins_format1 {
+ unsigned int
+ immediate : 8,
+ source : 9,
+ destination : 9,
+ ret : 1,
+ opcode : 4,
+ parity : 1;
+};
+
+struct ins_format2 {
+ unsigned int
+ shift_control : 8,
+ source : 9,
+ destination : 9,
+ ret : 1,
+ opcode : 4,
+ parity : 1;
+};
+
+struct ins_format3 {
+ unsigned int
+ immediate : 8,
+ source : 9,
+ address : 10,
+ opcode : 4,
+ parity : 1;
+};
+#elif defined(__BIG_ENDIAN_BITFIELD)
+struct ins_format1 {
+ unsigned int
+ parity : 1,
+ opcode : 4,
+ ret : 1,
+ destination : 9,
+ source : 9,
+ immediate : 8;
+};
+
+struct ins_format2 {
+ unsigned int
+ parity : 1,
+ opcode : 4,
+ ret : 1,
+ destination : 9,
+ source : 9,
+ shift_control : 8;
+};
+
+struct ins_format3 {
+ unsigned int
+ parity : 1,
+ opcode : 4,
+ address : 10,
+ source : 9,
+ immediate : 8;
+};
+#endif
+
+union ins_formats {
+ struct ins_format1 format1;
+ struct ins_format2 format2;
+ struct ins_format3 format3;
+ unsigned char bytes[4];
+ unsigned int integer;
+};
+struct instruction {
+ union ins_formats format;
+ unsigned int srcline;
+ struct symbol *patch_label;
+ struct {
+ struct instruction *stqe_next;
+ } links;
+};
+
+#define AIC_OP_OR 0x0
+#define AIC_OP_AND 0x1
+#define AIC_OP_XOR 0x2
+#define AIC_OP_ADD 0x3
+#define AIC_OP_ADC 0x4
+#define AIC_OP_ROL 0x5
+#define AIC_OP_BMOV 0x6
+
+#define AIC_OP_JMP 0x8
+#define AIC_OP_JC 0x9
+#define AIC_OP_JNC 0xa
+#define AIC_OP_CALL 0xb
+#define AIC_OP_JNE 0xc
+#define AIC_OP_JNZ 0xd
+#define AIC_OP_JE 0xe
+#define AIC_OP_JZ 0xf
+
+/* Pseudo Ops */
+#define AIC_OP_SHL 0x10
+#define AIC_OP_SHR 0x20
+#define AIC_OP_ROR 0x30
diff --git a/linux/src/drivers/scsi/aic7xxx_proc.c b/linux/src/drivers/scsi/aic7xxx_proc.c
new file mode 100644
index 0000000..87665d0
--- /dev/null
+++ b/linux/src/drivers/scsi/aic7xxx_proc.c
@@ -0,0 +1,384 @@
+/*+M*************************************************************************
+ * Adaptec AIC7xxx device driver proc support for Linux.
+ *
+ * Copyright (c) 1995, 1996 Dean W. Gehnert
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * ----------------------------------------------------------------
+ * o Modified from the EATA-DMA /proc support.
+ * o Additional support for device block statistics provided by
+ * Matthew Jacob.
+ * o Correction of overflow by Heinz Mauelshagen
+ * o Adittional corrections by Doug Ledford
+ *
+ * Dean W. Gehnert, deang@teleport.com, 05/01/96
+ *
+ * $Id: aic7xxx_proc.c,v 1.1.4.1 2004/01/16 22:41:26 roland Exp $
+ *-M*************************************************************************/
+
+#define BLS (&aic7xxx_buffer[size])
+#define HDRB \
+" < 2K 2K+ 4K+ 8K+ 16K+ 32K+ 64K+ 128K+"
+
+#ifdef PROC_DEBUG
+extern int vsprintf(char *, const char *, va_list);
+
+static void
+proc_debug(const char *fmt, ...)
+{
+ va_list ap;
+ char buf[256];
+
+ va_start(ap, fmt);
+ vsprintf(buf, fmt, ap);
+ printk(buf);
+ va_end(ap);
+}
+#else /* PROC_DEBUG */
+# define proc_debug(fmt, args...)
+#endif /* PROC_DEBUG */
+
+static int aic7xxx_buffer_size = 0;
+static char *aic7xxx_buffer = NULL;
+
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_set_info
+ *
+ * Description:
+ * Set parameters for the driver from the /proc filesystem.
+ *-F*************************************************************************/
+int
+aic7xxx_set_info(char *buffer, int length, struct Scsi_Host *HBAptr)
+{
+ proc_debug("aic7xxx_set_info(): %s\n", buffer);
+ return (-ENOSYS); /* Currently this is a no-op */
+}
+
+
+/*+F*************************************************************************
+ * Function:
+ * aic7xxx_proc_info
+ *
+ * Description:
+ * Return information to handle /proc support for the driver.
+ *-F*************************************************************************/
+int
+aic7xxx_proc_info ( char *buffer, char **start, off_t offset, int length,
+ int hostno, int inout)
+{
+ struct Scsi_Host *HBAptr;
+ struct aic7xxx_host *p;
+ int size = 0;
+ unsigned char i;
+ struct aic7xxx_xferstats *sp;
+ unsigned char target;
+
+ HBAptr = NULL;
+
+ for(p=first_aic7xxx; p->host->host_no != hostno; p=p->next)
+ ;
+
+ if (!p)
+ {
+ size += sprintf(buffer, "Can't find adapter for host number %d\n", hostno);
+ if (size > length)
+ {
+ return (size);
+ }
+ else
+ {
+ return (length);
+ }
+ }
+
+ HBAptr = p->host;
+
+ if (inout == TRUE) /* Has data been written to the file? */
+ {
+ return (aic7xxx_set_info(buffer, length, HBAptr));
+ }
+
+ p = (struct aic7xxx_host *) HBAptr->hostdata;
+
+ /*
+ * It takes roughly 1K of space to hold all relevant card info, not
+ * counting any proc stats, so we start out with a 1.5k buffer size and
+ * if proc_stats is defined, then we sweep the stats structure to see
+ * how many drives we will be printing out for and add 384 bytes per
+ * device with active stats.
+ *
+ * Hmmmm...that 1.5k seems to keep growing as items get added so they
+ * can be easily viewed for debugging purposes. So, we bumped that
+ * 1.5k to 4k so we can quit having to bump it all the time.
+ */
+
+ size = 4096;
+ for (target = 0; target < MAX_TARGETS; target++)
+ {
+ if (p->dev_flags[target] & DEVICE_PRESENT)
+#ifdef AIC7XXX_PROC_STATS
+ size += 512;
+#else
+ size += 256;
+#endif
+ }
+ if (aic7xxx_buffer_size != size)
+ {
+ if (aic7xxx_buffer != NULL)
+ {
+ kfree(aic7xxx_buffer);
+ aic7xxx_buffer_size = 0;
+ }
+ aic7xxx_buffer = kmalloc(size, GFP_KERNEL);
+ }
+ if (aic7xxx_buffer == NULL)
+ {
+ size = sprintf(buffer, "AIC7xxx - kmalloc error at line %d\n",
+ __LINE__);
+ return size;
+ }
+ aic7xxx_buffer_size = size;
+
+ size = 0;
+ size += sprintf(BLS, "Adaptec AIC7xxx driver version: ");
+ size += sprintf(BLS, "%s/", AIC7XXX_C_VERSION);
+ size += sprintf(BLS, "%s", AIC7XXX_H_VERSION);
+ size += sprintf(BLS, "\n");
+ size += sprintf(BLS, "Compile Options:\n");
+#ifdef CONFIG_AIC7XXX_TCQ_ON_BY_DEFAULT
+ size += sprintf(BLS, " TCQ Enabled By Default : Enabled\n");
+#else
+ size += sprintf(BLS, " TCQ Enabled By Default : Disabled\n");
+#endif
+#ifdef AIC7XXX_PROC_STATS
+ size += sprintf(BLS, " AIC7XXX_PROC_STATS : Enabled\n");
+#else
+ size += sprintf(BLS, " AIC7XXX_PROC_STATS : Disabled\n");
+#endif
+ size += sprintf(BLS, " AIC7XXX_RESET_DELAY : %d\n", AIC7XXX_RESET_DELAY);
+ size += sprintf(BLS, "\n");
+ size += sprintf(BLS, "Adapter Configuration:\n");
+ size += sprintf(BLS, " SCSI Adapter: %s\n",
+ board_names[p->board_name_index]);
+ if (p->flags & AHC_TWIN)
+ size += sprintf(BLS, " Twin Channel\n");
+ else
+ {
+ char *channel = "";
+ char *ultra = "";
+ char *wide = "Narrow ";
+ if (p->flags & AHC_MULTI_CHANNEL)
+ {
+ channel = " Channel A";
+ if (p->flags & (AHC_CHNLB|AHC_CHNLC))
+ channel = (p->flags & AHC_CHNLB) ? " Channel B" : " Channel C";
+ }
+ if (p->features & AHC_WIDE)
+ wide = "Wide ";
+ if (p->features & AHC_ULTRA2)
+ ultra = "Ultra2-LVD/SE ";
+ else if (p->features & AHC_ULTRA)
+ ultra = "Ultra ";
+ size += sprintf(BLS, " %s%sController%s\n",
+ ultra, wide, channel);
+ }
+ if( !(p->maddr) )
+ {
+ size += sprintf(BLS, " Programmed I/O Base: %lx\n", p->base);
+ }
+ else
+ {
+ size += sprintf(BLS, " PCI MMAPed I/O Base: 0x%lx\n", p->mbase);
+ }
+ if( (p->chip & (AHC_VL | AHC_EISA)) )
+ {
+ size += sprintf(BLS, " BIOS Memory Address: 0x%08x\n", p->bios_address);
+ }
+ size += sprintf(BLS, " Adapter SEEPROM Config: %s\n",
+ (p->flags & AHC_SEEPROM_FOUND) ? "SEEPROM found and used." :
+ ((p->flags & AHC_USEDEFAULTS) ? "SEEPROM not found, using defaults." :
+ "SEEPROM not found, using leftover BIOS values.") );
+ size += sprintf(BLS, " Adaptec SCSI BIOS: %s\n",
+ (p->flags & AHC_BIOS_ENABLED) ? "Enabled" : "Disabled");
+ size += sprintf(BLS, " IRQ: %d\n", HBAptr->irq);
+ size += sprintf(BLS, " SCBs: Active %d, Max Active %d,\n",
+ p->activescbs, p->max_activescbs);
+ size += sprintf(BLS, " Allocated %d, HW %d, "
+ "Page %d\n", p->scb_data->numscbs, p->scb_data->maxhscbs,
+ p->scb_data->maxscbs);
+ if (p->flags & AHC_EXTERNAL_SRAM)
+ size += sprintf(BLS, " Using External SCB SRAM\n");
+ size += sprintf(BLS, " Interrupts: %ld", p->isr_count);
+ if (p->chip & AHC_EISA)
+ {
+ size += sprintf(BLS, " %s\n",
+ (p->pause & IRQMS) ? "(Level Sensitive)" : "(Edge Triggered)");
+ }
+ else
+ {
+ size += sprintf(BLS, "\n");
+ }
+ size += sprintf(BLS, " BIOS Control Word: 0x%04x\n",
+ p->bios_control);
+ size += sprintf(BLS, " Adapter Control Word: 0x%04x\n",
+ p->adapter_control);
+ size += sprintf(BLS, " Extended Translation: %sabled\n",
+ (p->flags & AHC_EXTEND_TRANS_A) ? "En" : "Dis");
+ size += sprintf(BLS, "Disconnect Enable Flags: 0x%04x\n", p->discenable);
+ if (p->features & (AHC_ULTRA | AHC_ULTRA2))
+ {
+ size += sprintf(BLS, " Ultra Enable Flags: 0x%04x\n", p->ultraenb);
+ }
+ size += sprintf(BLS, " Tag Queue Enable Flags: 0x%04x\n", p->tagenable);
+ size += sprintf(BLS, "Ordered Queue Tag Flags: 0x%04x\n", p->orderedtag);
+ size += sprintf(BLS, "Default Tag Queue Depth: %d\n", AIC7XXX_CMDS_PER_DEVICE);
+ size += sprintf(BLS, " Tagged Queue By Device array for aic7xxx host "
+ "instance %d:\n", p->instance);
+ size += sprintf(BLS, " {");
+ for(i=0; i < (MAX_TARGETS - 1); i++)
+ size += sprintf(BLS, "%d,",aic7xxx_tag_info[p->instance].tag_commands[i]);
+ size += sprintf(BLS, "%d}\n",aic7xxx_tag_info[p->instance].tag_commands[i]);
+ size += sprintf(BLS, " Actual queue depth per device for aic7xxx host "
+ "instance %d:\n", p->instance);
+ size += sprintf(BLS, " {");
+ for(i=0; i < (MAX_TARGETS - 1); i++)
+ size += sprintf(BLS, "%d,", p->dev_max_queue_depth[i]);
+ size += sprintf(BLS, "%d}\n", p->dev_max_queue_depth[i]);
+
+ size += sprintf(BLS, "\n");
+ size += sprintf(BLS, "Statistics:\n\n");
+ for (target = 0; target < MAX_TARGETS; target++)
+ {
+ sp = &p->stats[target];
+ if ((p->dev_flags[target] & DEVICE_PRESENT) == 0)
+ {
+ continue;
+ }
+ if (p->features & AHC_TWIN)
+ {
+ size += sprintf(BLS, "(scsi%d:%d:%d:%d)\n",
+ p->host_no, (target >> 3), (target & 0x7), 0);
+ }
+ else
+ {
+ size += sprintf(BLS, "(scsi%d:%d:%d:%d)\n",
+ p->host_no, 0, target, 0);
+ }
+ size += sprintf(BLS, " Device using %s/%s",
+ (p->transinfo[target].cur_width == MSG_EXT_WDTR_BUS_16_BIT) ?
+ "Wide" : "Narrow",
+ (p->transinfo[target].cur_offset != 0) ?
+ "Sync transfers at " : "Async transfers.\n" );
+ if (p->transinfo[target].cur_offset != 0)
+ {
+ struct aic7xxx_syncrate *sync_rate;
+ int period = p->transinfo[target].cur_period;
+ int rate = (p->transinfo[target].cur_width ==
+ MSG_EXT_WDTR_BUS_16_BIT) ? 1 : 0;
+
+ sync_rate = aic7xxx_find_syncrate(p, &period, AHC_SYNCRATE_ULTRA2);
+ if (sync_rate != NULL)
+ {
+ size += sprintf(BLS, "%s MByte/sec, offset %d\n",
+ sync_rate->rate[rate],
+ p->transinfo[target].cur_offset );
+ }
+ else
+ {
+ size += sprintf(BLS, "3.3 MByte/sec, offset %d\n",
+ p->transinfo[target].cur_offset );
+ }
+ }
+ size += sprintf(BLS, " Transinfo settings: ");
+ size += sprintf(BLS, "current(%d/%d/%d), ",
+ p->transinfo[target].cur_period,
+ p->transinfo[target].cur_offset,
+ p->transinfo[target].cur_width);
+ size += sprintf(BLS, "goal(%d/%d/%d), ",
+ p->transinfo[target].goal_period,
+ p->transinfo[target].goal_offset,
+ p->transinfo[target].goal_width);
+ size += sprintf(BLS, "user(%d/%d/%d)\n",
+ p->transinfo[target].user_period,
+ p->transinfo[target].user_offset,
+ p->transinfo[target].user_width);
+#ifdef AIC7XXX_PROC_STATS
+ size += sprintf(BLS, " Total transfers %ld (%ld reads and %ld writes)\n",
+ sp->r_total + sp->w_total, sp->r_total, sp->w_total);
+ size += sprintf(BLS, "%s\n", HDRB);
+ size += sprintf(BLS, " Reads:");
+ for (i = 0; i < NUMBER(sp->r_bins); i++)
+ {
+ size += sprintf(BLS, " %7ld", sp->r_bins[i]);
+ }
+ size += sprintf(BLS, "\n");
+ size += sprintf(BLS, " Writes:");
+ for (i = 0; i < NUMBER(sp->w_bins); i++)
+ {
+ size += sprintf(BLS, " %7ld", sp->w_bins[i]);
+ }
+ size += sprintf(BLS, "\n");
+#else
+ size += sprintf(BLS, " Total transfers %ld (%ld reads and %ld writes)\n",
+ sp->r_total + sp->w_total, sp->r_total, sp->w_total);
+#endif /* AIC7XXX_PROC_STATS */
+ size += sprintf(BLS, "\n\n");
+ }
+
+ if (size >= aic7xxx_buffer_size)
+ {
+ printk(KERN_WARNING "aic7xxx: Overflow in aic7xxx_proc.c\n");
+ }
+
+ if (offset > size - 1)
+ {
+ kfree(aic7xxx_buffer);
+ aic7xxx_buffer = NULL;
+ aic7xxx_buffer_size = length = 0;
+ *start = NULL;
+ }
+ else
+ {
+ *start = &aic7xxx_buffer[offset]; /* Start of wanted data */
+ if (size - offset < length)
+ {
+ length = size - offset;
+ }
+ }
+
+ return (length);
+}
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 2
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -2
+ * c-argdecl-indent: 2
+ * c-label-offset: -2
+ * c-continued-statement-offset: 2
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/aic7xxx_reg.h b/linux/src/drivers/scsi/aic7xxx_reg.h
new file mode 100644
index 0000000..d12d1b6
--- /dev/null
+++ b/linux/src/drivers/scsi/aic7xxx_reg.h
@@ -0,0 +1,587 @@
+/*
+ * DO NOT EDIT - This file is automatically generated.
+ */
+
+#define SCSISEQ 0x00
+#define TEMODE 0x80
+#define ENSELO 0x40
+#define ENSELI 0x20
+#define ENRSELI 0x10
+#define ENAUTOATNO 0x08
+#define ENAUTOATNI 0x04
+#define ENAUTOATNP 0x02
+#define SCSIRSTO 0x01
+
+#define SXFRCTL0 0x01
+#define DFON 0x80
+#define DFPEXP 0x40
+#define FAST20 0x20
+#define CLRSTCNT 0x10
+#define SPIOEN 0x08
+#define SCAMEN 0x04
+#define CLRCHN 0x02
+
+#define SXFRCTL1 0x02
+#define BITBUCKET 0x80
+#define SWRAPEN 0x40
+#define ENSPCHK 0x20
+#define STIMESEL 0x18
+#define ENSTIMER 0x04
+#define ACTNEGEN 0x02
+#define STPWEN 0x01
+
+#define SCSISIGO 0x03
+#define CDO 0x80
+#define IOO 0x40
+#define MSGO 0x20
+#define ATNO 0x10
+#define SELO 0x08
+#define BSYO 0x04
+#define REQO 0x02
+#define ACKO 0x01
+
+#define SCSISIGI 0x03
+#define ATNI 0x10
+#define SELI 0x08
+#define BSYI 0x04
+#define REQI 0x02
+#define ACKI 0x01
+
+#define SCSIRATE 0x04
+#define WIDEXFER 0x80
+#define SXFR_ULTRA2 0x7f
+#define SXFR 0x70
+#define SOFS 0x0f
+
+#define SCSIID 0x05
+#define SCSIOFFSET 0x05
+#define SOFS_ULTRA2 0x7f
+
+#define SCSIDATL 0x06
+
+#define SCSIDATH 0x07
+
+#define STCNT 0x08
+
+#define CLRSINT0 0x0b
+#define CLRSELDO 0x40
+#define CLRSELDI 0x20
+#define CLRSELINGO 0x10
+#define CLRSWRAP 0x08
+#define CLRSPIORDY 0x02
+
+#define SSTAT0 0x0b
+#define TARGET 0x80
+#define SELDO 0x40
+#define SELDI 0x20
+#define SELINGO 0x10
+#define IOERR 0x08
+#define SWRAP 0x08
+#define SDONE 0x04
+#define SPIORDY 0x02
+#define DMADONE 0x01
+
+#define CLRSINT1 0x0c
+#define CLRSELTIMEO 0x80
+#define CLRATNO 0x40
+#define CLRSCSIRSTI 0x20
+#define CLRBUSFREE 0x08
+#define CLRSCSIPERR 0x04
+#define CLRPHASECHG 0x02
+#define CLRREQINIT 0x01
+
+#define SSTAT1 0x0c
+#define SELTO 0x80
+#define ATNTARG 0x40
+#define SCSIRSTI 0x20
+#define PHASEMIS 0x10
+#define BUSFREE 0x08
+#define SCSIPERR 0x04
+#define PHASECHG 0x02
+#define REQINIT 0x01
+
+#define SSTAT2 0x0d
+#define OVERRUN 0x80
+#define SFCNT 0x1f
+#define EXP_ACTIVE 0x10
+
+#define SSTAT3 0x0e
+#define SCSICNT 0xf0
+#define OFFCNT 0x0f
+
+#define SCSIID_ULTRA2 0x0f
+#define OID 0x0f
+
+#define SIMODE0 0x10
+#define ENSELDO 0x40
+#define ENSELDI 0x20
+#define ENSELINGO 0x10
+#define ENIOERR 0x08
+#define ENSWRAP 0x08
+#define ENSDONE 0x04
+#define ENSPIORDY 0x02
+#define ENDMADONE 0x01
+
+#define SIMODE1 0x11
+#define ENSELTIMO 0x80
+#define ENATNTARG 0x40
+#define ENSCSIRST 0x20
+#define ENPHASEMIS 0x10
+#define ENBUSFREE 0x08
+#define ENSCSIPERR 0x04
+#define ENPHASECHG 0x02
+#define ENREQINIT 0x01
+
+#define SCSIBUSL 0x12
+
+#define SCSIBUSH 0x13
+
+#define SHADDR 0x14
+
+#define SELTIMER 0x18
+#define STAGE6 0x20
+#define STAGE5 0x10
+#define STAGE4 0x08
+#define STAGE3 0x04
+#define STAGE2 0x02
+#define STAGE1 0x01
+
+#define SELID 0x19
+#define SELID_MASK 0xf0
+#define ONEBIT 0x08
+
+#define SPIOCAP 0x1b
+#define SOFT1 0x80
+#define SOFT0 0x40
+#define SOFTCMDEN 0x20
+#define HAS_BRDCTL 0x10
+#define SEEPROM 0x08
+#define EEPROM 0x04
+#define ROM 0x02
+#define SSPIOCPS 0x01
+
+#define BRDCTL 0x1d
+#define BRDDAT7 0x80
+#define BRDDAT6 0x40
+#define BRDDAT5 0x20
+#define BRDDAT4 0x10
+#define BRDSTB 0x10
+#define BRDCS 0x08
+#define BRDDAT3 0x08
+#define BRDDAT2 0x04
+#define BRDRW 0x04
+#define BRDRW_ULTRA2 0x02
+#define BRDCTL1 0x02
+#define BRDSTB_ULTRA2 0x01
+#define BRDCTL0 0x01
+
+#define SEECTL 0x1e
+#define EXTARBACK 0x80
+#define EXTARBREQ 0x40
+#define SEEMS 0x20
+#define SEERDY 0x10
+#define SEECS 0x08
+#define SEECK 0x04
+#define SEEDO 0x02
+#define SEEDI 0x01
+
+#define SBLKCTL 0x1f
+#define DIAGLEDEN 0x80
+#define DIAGLEDON 0x40
+#define AUTOFLUSHDIS 0x20
+#define ENAB40 0x08
+#define ENAB20 0x04
+#define SELWIDE 0x02
+#define XCVR 0x01
+
+#define SRAM_BASE 0x20
+
+#define TARG_SCSIRATE 0x20
+
+#define ULTRA_ENB 0x30
+
+#define DISC_DSB 0x32
+
+#define MSG_OUT 0x34
+
+#define DMAPARAMS 0x35
+#define PRELOADEN 0x80
+#define WIDEODD 0x40
+#define SCSIEN 0x20
+#define SDMAENACK 0x10
+#define SDMAEN 0x10
+#define HDMAEN 0x08
+#define HDMAENACK 0x08
+#define DIRECTION 0x04
+#define FIFOFLUSH 0x02
+#define FIFORESET 0x01
+
+#define SEQ_FLAGS 0x36
+#define IDENTIFY_SEEN 0x80
+#define SCBPTR_VALID 0x20
+#define DPHASE 0x10
+#define AMTARGET 0x08
+#define WIDE_BUS 0x02
+#define TWIN_BUS 0x01
+
+#define SAVED_TCL 0x37
+
+#define SG_COUNT 0x38
+
+#define SG_NEXT 0x39
+
+#define LASTPHASE 0x3d
+#define P_MESGIN 0xe0
+#define PHASE_MASK 0xe0
+#define P_STATUS 0xc0
+#define P_MESGOUT 0xa0
+#define P_COMMAND 0x80
+#define CDI 0x80
+#define IOI 0x40
+#define P_DATAIN 0x40
+#define MSGI 0x20
+#define P_BUSFREE 0x01
+#define P_DATAOUT 0x00
+
+#define WAITING_SCBH 0x3e
+
+#define DISCONNECTED_SCBH 0x3f
+
+#define FREE_SCBH 0x40
+
+#define HSCB_ADDR 0x41
+
+#define SCBID_ADDR 0x45
+
+#define TMODE_CMDADDR 0x49
+
+#define KERNEL_QINPOS 0x4d
+
+#define QINPOS 0x4e
+
+#define QOUTPOS 0x4f
+
+#define TMODE_CMDADDR_NEXT 0x50
+
+#define ARG_1 0x51
+#define RETURN_1 0x51
+#define SEND_MSG 0x80
+#define SEND_SENSE 0x40
+#define SEND_REJ 0x20
+#define MSGOUT_PHASEMIS 0x10
+
+#define ARG_2 0x52
+#define RETURN_2 0x52
+
+#define LAST_MSG 0x53
+
+#define PREFETCH_CNT 0x54
+
+#define SCSICONF 0x5a
+#define TERM_ENB 0x80
+#define RESET_SCSI 0x40
+#define HWSCSIID 0x0f
+#define HSCSIID 0x07
+
+#define HOSTCONF 0x5d
+
+#define HA_274_BIOSCTRL 0x5f
+#define BIOSMODE 0x30
+#define BIOSDISABLED 0x30
+#define CHANNEL_B_PRIMARY 0x08
+
+#define SEQCTL 0x60
+#define PERRORDIS 0x80
+#define PAUSEDIS 0x40
+#define FAILDIS 0x20
+#define FASTMODE 0x10
+#define BRKADRINTEN 0x08
+#define STEP 0x04
+#define SEQRESET 0x02
+#define LOADRAM 0x01
+
+#define SEQRAM 0x61
+
+#define SEQADDR0 0x62
+
+#define SEQADDR1 0x63
+#define SEQADDR1_MASK 0x01
+
+#define ACCUM 0x64
+
+#define SINDEX 0x65
+
+#define DINDEX 0x66
+
+#define ALLONES 0x69
+
+#define ALLZEROS 0x6a
+
+#define NONE 0x6a
+
+#define FLAGS 0x6b
+#define ZERO 0x02
+#define CARRY 0x01
+
+#define SINDIR 0x6c
+
+#define DINDIR 0x6d
+
+#define FUNCTION1 0x6e
+
+#define STACK 0x6f
+
+#define TARG_OFFSET 0x70
+
+#define BCTL 0x84
+#define ACE 0x08
+#define ENABLE 0x01
+
+#define DSCOMMAND0 0x84
+#define INTSCBRAMSEL 0x08
+#define RAMPS 0x04
+#define USCBSIZE32 0x02
+#define CIOPARCKEN 0x01
+
+#define DSCOMMAND 0x84
+#define CACHETHEN 0x80
+#define DPARCKEN 0x40
+#define MPARCKEN 0x20
+#define EXTREQLCK 0x10
+
+#define BUSTIME 0x85
+#define BOFF 0xf0
+#define BON 0x0f
+
+#define BUSSPD 0x86
+#define DFTHRSH 0xc0
+#define STBOFF 0x38
+#define STBON 0x07
+
+#define DSPCISTATUS 0x86
+#define DFTHRSH_100 0xc0
+
+#define HCNTRL 0x87
+#define POWRDN 0x40
+#define SWINT 0x10
+#define IRQMS 0x08
+#define PAUSE 0x04
+#define INTEN 0x02
+#define CHIPRST 0x01
+#define CHIPRSTACK 0x01
+
+#define HADDR 0x88
+
+#define HCNT 0x8c
+
+#define SCBPTR 0x90
+
+#define INTSTAT 0x91
+#define SEQINT_MASK 0xf1
+#define DATA_OVERRUN 0xe1
+#define MSGIN_PHASEMIS 0xd1
+#define TRACEPOINT2 0xc1
+#define TRACEPOINT 0xb1
+#define AWAITING_MSG 0xa1
+#define RESIDUAL 0x81
+#define BAD_STATUS 0x71
+#define REJECT_MSG 0x61
+#define ABORT_REQUESTED 0x51
+#define EXTENDED_MSG 0x41
+#define NO_MATCH 0x31
+#define NO_IDENT 0x21
+#define SEND_REJECT 0x11
+#define INT_PEND 0x0f
+#define BRKADRINT 0x08
+#define SCSIINT 0x04
+#define CMDCMPLT 0x02
+#define BAD_PHASE 0x01
+#define SEQINT 0x01
+
+#define CLRINT 0x92
+#define CLRPARERR 0x10
+#define CLRBRKADRINT 0x08
+#define CLRSCSIINT 0x04
+#define CLRCMDINT 0x02
+#define CLRSEQINT 0x01
+
+#define ERROR 0x92
+#define CIOPARERR 0x80
+#define PCIERRSTAT 0x40
+#define MPARERR 0x20
+#define DPARERR 0x10
+#define SQPARERR 0x08
+#define ILLOPCODE 0x04
+#define ILLSADDR 0x02
+#define ILLHADDR 0x01
+
+#define DFCNTRL 0x93
+
+#define DFSTATUS 0x94
+#define PRELOAD_AVAIL 0x80
+#define DWORDEMP 0x20
+#define MREQPEND 0x10
+#define HDONE 0x08
+#define DFTHRESH 0x04
+#define FIFOFULL 0x02
+#define FIFOEMP 0x01
+
+#define DFDAT 0x99
+
+#define SCBCNT 0x9a
+#define SCBAUTO 0x80
+#define SCBCNT_MASK 0x1f
+
+#define QINFIFO 0x9b
+
+#define QINCNT 0x9c
+
+#define QOUTFIFO 0x9d
+
+#define QOUTCNT 0x9e
+
+#define SFUNCT 0x9f
+
+#define SCB_CONTROL 0xa0
+#define MK_MESSAGE 0x80
+#define DISCENB 0x40
+#define TAG_ENB 0x20
+#define DISCONNECTED 0x04
+#define SCB_TAG_TYPE 0x03
+
+#define SCB_BASE 0xa0
+
+#define SCB_TCL 0xa1
+#define TID 0xf0
+#define SELBUSB 0x08
+#define LID 0x07
+
+#define SCB_TARGET_STATUS 0xa2
+
+#define SCB_SGCOUNT 0xa3
+
+#define SCB_SGPTR 0xa4
+
+#define SCB_RESID_SGCNT 0xa8
+
+#define SCB_RESID_DCNT 0xa9
+
+#define SCB_DATAPTR 0xac
+
+#define SCB_DATACNT 0xb0
+
+#define SCB_CMDPTR 0xb4
+
+#define SCB_CMDLEN 0xb8
+
+#define SCB_TAG 0xb9
+
+#define SCB_NEXT 0xba
+
+#define SCB_PREV 0xbb
+
+#define SCB_BUSYTARGETS 0xbc
+
+#define SEECTL_2840 0xc0
+#define CS_2840 0x04
+#define CK_2840 0x02
+#define DO_2840 0x01
+
+#define STATUS_2840 0xc1
+#define EEPROM_TF 0x80
+#define BIOS_SEL 0x60
+#define ADSEL 0x1e
+#define DI_2840 0x01
+
+#define CCHADDR 0xe0
+
+#define CCHCNT 0xe8
+
+#define CCSGRAM 0xe9
+
+#define CCSGADDR 0xea
+
+#define CCSGCTL 0xeb
+#define CCSGDONE 0x80
+#define CCSGEN 0x08
+#define FLAG 0x02
+#define CCSGRESET 0x01
+
+#define CCSCBRAM 0xec
+
+#define CCSCBADDR 0xed
+
+#define CCSCBCTL 0xee
+#define CCSCBDONE 0x80
+#define ARRDONE 0x40
+#define CCARREN 0x10
+#define CCSCBEN 0x08
+#define CCSCBDIR 0x04
+#define CCSCBRESET 0x01
+
+#define CCSCBCNT 0xef
+
+#define CCSCBPTR 0xf1
+
+#define HNSCB_QOFF 0xf4
+
+#define SNSCB_QOFF 0xf6
+
+#define SDSCB_QOFF 0xf8
+
+#define QOFF_CTLSTA 0xfa
+#define SCB_AVAIL 0x40
+#define SNSCB_ROLLOVER 0x20
+#define SDSCB_ROLLOVER 0x10
+#define SCB_QSIZE 0x07
+#define SCB_QSIZE_256 0x06
+
+#define DFF_THRSH 0xfb
+#define WR_DFTHRSH 0x70
+#define WR_DFTHRSH_MAX 0x70
+#define WR_DFTHRSH_90 0x60
+#define WR_DFTHRSH_85 0x50
+#define WR_DFTHRSH_75 0x40
+#define WR_DFTHRSH_63 0x30
+#define WR_DFTHRSH_50 0x20
+#define WR_DFTHRSH_25 0x10
+#define RD_DFTHRSH_MAX 0x07
+#define RD_DFTHRSH 0x07
+#define RD_DFTHRSH_90 0x06
+#define RD_DFTHRSH_85 0x05
+#define RD_DFTHRSH_75 0x04
+#define RD_DFTHRSH_63 0x03
+#define RD_DFTHRSH_50 0x02
+#define RD_DFTHRSH_25 0x01
+#define WR_DFTHRSH_MIN 0x00
+#define RD_DFTHRSH_MIN 0x00
+
+#define SG_CACHEPTR 0xfc
+#define SG_USER_DATA 0xfc
+#define LAST_SEG 0x02
+#define LAST_SEG_DONE 0x01
+
+
+#define CMD_GROUP2_BYTE_DELTA 0xfa
+#define MAX_OFFSET_8BIT 0x0f
+#define BUS_16_BIT 0x01
+#define QINFIFO_OFFSET 0x02
+#define CMD_GROUP5_BYTE_DELTA 0x0b
+#define CMD_GROUP_CODE_SHIFT 0x05
+#define MAX_OFFSET_ULTRA2 0x7f
+#define MAX_OFFSET_16BIT 0x08
+#define BUS_8_BIT 0x00
+#define QOUTFIFO_OFFSET 0x01
+#define UNTAGGEDSCB_OFFSET 0x00
+#define CCSGRAM_MAXSEGS 0x10
+#define SCB_LIST_NULL 0xff
+#define SG_SIZEOF 0x08
+#define CMD_GROUP4_BYTE_DELTA 0x04
+#define CMD_GROUP0_BYTE_DELTA 0xfc
+#define HOST_MSG 0xff
+#define BUS_32_BIT 0x02
+#define CCSGADDR_MAX 0x80
+
+
+/* Downloaded Constant Definitions */
+#define TMODE_NUMCMDS 0x00
diff --git a/linux/src/drivers/scsi/aic7xxx_seq.c b/linux/src/drivers/scsi/aic7xxx_seq.c
new file mode 100644
index 0000000..9205cc4
--- /dev/null
+++ b/linux/src/drivers/scsi/aic7xxx_seq.c
@@ -0,0 +1,769 @@
+/*
+ * DO NOT EDIT - This file is automatically generated.
+ */
+static unsigned char seqprog[] = {
+ 0xff, 0x6a, 0x06, 0x08,
+ 0x32, 0x6a, 0x00, 0x00,
+ 0x12, 0x6a, 0x00, 0x00,
+ 0xff, 0x6a, 0xd6, 0x09,
+ 0xff, 0x6a, 0xdc, 0x09,
+ 0x00, 0x65, 0x38, 0x59,
+ 0xf7, 0x01, 0x02, 0x08,
+ 0xff, 0x4e, 0xc8, 0x08,
+ 0xbf, 0x60, 0xc0, 0x08,
+ 0x60, 0x0b, 0x7c, 0x68,
+ 0x40, 0x00, 0x0e, 0x68,
+ 0x08, 0x1f, 0x3e, 0x10,
+ 0x60, 0x0b, 0x7c, 0x68,
+ 0x40, 0x00, 0x0e, 0x68,
+ 0x08, 0x1f, 0x3e, 0x10,
+ 0xff, 0x3e, 0x3e, 0x60,
+ 0x40, 0xfa, 0x10, 0x78,
+ 0xff, 0xf6, 0xd4, 0x08,
+ 0x01, 0x4e, 0x9c, 0x18,
+ 0x40, 0x60, 0xc0, 0x00,
+ 0x00, 0x4d, 0x10, 0x70,
+ 0x01, 0x4e, 0x9c, 0x18,
+ 0xbf, 0x60, 0xc0, 0x08,
+ 0x00, 0x6a, 0x72, 0x5c,
+ 0xff, 0x4e, 0xc8, 0x18,
+ 0x02, 0x6a, 0x88, 0x5b,
+ 0xff, 0x52, 0x20, 0x09,
+ 0x0d, 0x6a, 0x6a, 0x00,
+ 0x00, 0x52, 0xfe, 0x5b,
+ 0xff, 0x3e, 0x74, 0x09,
+ 0xff, 0x90, 0x7c, 0x08,
+ 0xff, 0x3e, 0x20, 0x09,
+ 0x00, 0x65, 0x44, 0x58,
+ 0x00, 0x65, 0x0e, 0x40,
+ 0xf7, 0x1f, 0xca, 0x08,
+ 0x08, 0xa1, 0xc8, 0x08,
+ 0x00, 0x65, 0xca, 0x00,
+ 0xff, 0x65, 0x3e, 0x08,
+ 0xf0, 0xa1, 0xc8, 0x08,
+ 0x0f, 0x0f, 0x1e, 0x08,
+ 0x00, 0x0f, 0x1e, 0x00,
+ 0xf0, 0xa1, 0xc8, 0x08,
+ 0x0f, 0x05, 0x0a, 0x08,
+ 0x00, 0x05, 0x0a, 0x00,
+ 0x5a, 0x6a, 0x00, 0x04,
+ 0x12, 0x65, 0xc8, 0x00,
+ 0x00, 0x01, 0x02, 0x00,
+ 0x31, 0x6a, 0xca, 0x00,
+ 0x80, 0x37, 0x64, 0x68,
+ 0xff, 0x65, 0xca, 0x18,
+ 0xff, 0x37, 0xdc, 0x08,
+ 0xff, 0x6e, 0xc8, 0x08,
+ 0x00, 0x6c, 0x6c, 0x78,
+ 0x20, 0x01, 0x02, 0x00,
+ 0x4c, 0x37, 0xc8, 0x28,
+ 0x08, 0x1f, 0x74, 0x78,
+ 0x08, 0x37, 0x6e, 0x00,
+ 0x08, 0x64, 0xc8, 0x00,
+ 0x70, 0x64, 0xca, 0x18,
+ 0xff, 0x6c, 0x0a, 0x08,
+ 0x20, 0x64, 0xca, 0x18,
+ 0xff, 0x6c, 0x08, 0x0c,
+ 0x40, 0x0b, 0x04, 0x69,
+ 0x80, 0x0b, 0xf6, 0x78,
+ 0xa4, 0x6a, 0x06, 0x00,
+ 0x40, 0x6a, 0x16, 0x00,
+ 0x10, 0x03, 0xf2, 0x78,
+ 0xff, 0x50, 0xc8, 0x08,
+ 0x88, 0x6a, 0xcc, 0x00,
+ 0x49, 0x6a, 0xee, 0x5b,
+ 0x01, 0x6a, 0x26, 0x01,
+ 0xff, 0x6a, 0xca, 0x08,
+ 0x08, 0x01, 0x02, 0x00,
+ 0x02, 0x0b, 0x92, 0x78,
+ 0xf7, 0x01, 0x02, 0x08,
+ 0xff, 0x06, 0xcc, 0x08,
+ 0xff, 0x66, 0x32, 0x09,
+ 0x01, 0x65, 0xca, 0x18,
+ 0x80, 0x66, 0xa0, 0x78,
+ 0xff, 0x66, 0xa2, 0x08,
+ 0x10, 0x03, 0x90, 0x68,
+ 0xfc, 0x65, 0xc8, 0x18,
+ 0x00, 0x65, 0xa8, 0x48,
+ 0xff, 0x6a, 0x32, 0x01,
+ 0x01, 0x64, 0x18, 0x19,
+ 0xff, 0x6a, 0x1a, 0x09,
+ 0xff, 0x6a, 0x1c, 0x09,
+ 0x84, 0x6a, 0x06, 0x00,
+ 0x08, 0x01, 0x02, 0x00,
+ 0x02, 0x0b, 0xb2, 0x78,
+ 0xff, 0x06, 0xc8, 0x08,
+ 0xff, 0x64, 0x32, 0x09,
+ 0xff, 0x6a, 0xca, 0x08,
+ 0x5b, 0x64, 0xc8, 0x28,
+ 0x00, 0x62, 0xc4, 0x18,
+ 0xfc, 0x65, 0xca, 0x18,
+ 0xff, 0x6a, 0xd4, 0x08,
+ 0xfa, 0x65, 0xca, 0x18,
+ 0xff, 0x6a, 0xd4, 0x08,
+ 0x04, 0x65, 0xca, 0x18,
+ 0x0b, 0x65, 0xca, 0x18,
+ 0xff, 0x65, 0xc8, 0x08,
+ 0x00, 0x8c, 0x18, 0x19,
+ 0x02, 0x0b, 0xce, 0x78,
+ 0x01, 0x65, 0xd4, 0x60,
+ 0xf7, 0x01, 0x02, 0x08,
+ 0xff, 0x06, 0x32, 0x09,
+ 0xff, 0x65, 0xca, 0x18,
+ 0xff, 0x65, 0xce, 0x68,
+ 0x0a, 0x93, 0x26, 0x01,
+ 0x00, 0x65, 0x64, 0x5c,
+ 0x40, 0x51, 0xe6, 0x78,
+ 0xe4, 0x6a, 0x06, 0x00,
+ 0x08, 0x01, 0x02, 0x00,
+ 0x04, 0x6a, 0x18, 0x5b,
+ 0x01, 0x50, 0xa0, 0x18,
+ 0x00, 0x50, 0xec, 0xe0,
+ 0xff, 0x6a, 0xa0, 0x08,
+ 0xff, 0x6a, 0x3a, 0x01,
+ 0x02, 0x6a, 0x22, 0x01,
+ 0x40, 0x51, 0xf2, 0x68,
+ 0xff, 0x6a, 0x06, 0x08,
+ 0x00, 0x65, 0x0e, 0x40,
+ 0x20, 0x6a, 0x16, 0x00,
+ 0xf0, 0x19, 0x6e, 0x08,
+ 0x08, 0x6a, 0x18, 0x00,
+ 0x08, 0x11, 0x22, 0x00,
+ 0x08, 0x6a, 0x5a, 0x58,
+ 0x08, 0x6a, 0x68, 0x00,
+ 0x00, 0x65, 0x18, 0x41,
+ 0x12, 0x6a, 0x00, 0x00,
+ 0x40, 0x6a, 0x16, 0x00,
+ 0xff, 0x3e, 0x20, 0x09,
+ 0xff, 0xba, 0x7c, 0x08,
+ 0xff, 0xa1, 0x6e, 0x08,
+ 0x08, 0x6a, 0x18, 0x00,
+ 0x08, 0x11, 0x22, 0x00,
+ 0x08, 0x6a, 0x5a, 0x58,
+ 0x80, 0x6a, 0x68, 0x00,
+ 0x80, 0x36, 0x6c, 0x00,
+ 0x00, 0x65, 0xd2, 0x5b,
+ 0xff, 0x3d, 0xc8, 0x08,
+ 0xbf, 0x64, 0x48, 0x79,
+ 0x80, 0x64, 0xf0, 0x71,
+ 0xa0, 0x64, 0x0e, 0x72,
+ 0xc0, 0x64, 0x08, 0x72,
+ 0xe0, 0x64, 0x52, 0x72,
+ 0x01, 0x6a, 0x22, 0x01,
+ 0x00, 0x65, 0x18, 0x41,
+ 0xf7, 0x11, 0x22, 0x08,
+ 0x00, 0x65, 0x38, 0x59,
+ 0xff, 0x06, 0xd4, 0x08,
+ 0xf7, 0x01, 0x02, 0x08,
+ 0x09, 0x0c, 0x32, 0x79,
+ 0x08, 0x0c, 0x0e, 0x68,
+ 0x01, 0x6a, 0x22, 0x01,
+ 0xff, 0x6a, 0x26, 0x09,
+ 0xff, 0x6a, 0x08, 0x08,
+ 0xdf, 0x01, 0x02, 0x08,
+ 0x01, 0x6a, 0x7a, 0x00,
+ 0x03, 0x36, 0x6c, 0x0c,
+ 0x08, 0x6a, 0xcc, 0x00,
+ 0xa9, 0x6a, 0xe8, 0x5b,
+ 0x00, 0x65, 0x66, 0x41,
+ 0xa8, 0x6a, 0x6a, 0x00,
+ 0x79, 0x6a, 0x6a, 0x00,
+ 0x40, 0x3d, 0x50, 0x69,
+ 0x04, 0x35, 0x6a, 0x00,
+ 0x00, 0x65, 0x3a, 0x5b,
+ 0x80, 0x6a, 0xd4, 0x01,
+ 0x10, 0x36, 0x42, 0x69,
+ 0x10, 0x36, 0x6c, 0x00,
+ 0x07, 0xac, 0x10, 0x31,
+ 0x88, 0x6a, 0xcc, 0x00,
+ 0xac, 0x6a, 0xe0, 0x5b,
+ 0x00, 0x65, 0xda, 0x5b,
+ 0xff, 0xa3, 0x70, 0x08,
+ 0x39, 0x6a, 0xcc, 0x00,
+ 0xa4, 0x6a, 0xe6, 0x5b,
+ 0xff, 0x38, 0x74, 0x69,
+ 0x80, 0x02, 0x04, 0x00,
+ 0xe7, 0x35, 0x6a, 0x08,
+ 0x03, 0x69, 0x18, 0x31,
+ 0xff, 0x6a, 0x10, 0x00,
+ 0xff, 0x6a, 0x12, 0x00,
+ 0xff, 0x6a, 0x14, 0x00,
+ 0x01, 0x38, 0x7a, 0x61,
+ 0x02, 0xfc, 0xf8, 0x01,
+ 0xbf, 0x35, 0x6a, 0x08,
+ 0xff, 0x69, 0xca, 0x08,
+ 0xff, 0x35, 0x26, 0x09,
+ 0x04, 0x0b, 0x7e, 0x69,
+ 0x04, 0x0b, 0x8a, 0x69,
+ 0x10, 0x0c, 0x80, 0x79,
+ 0x04, 0x0b, 0x88, 0x69,
+ 0xff, 0x6a, 0xca, 0x08,
+ 0x00, 0x35, 0x22, 0x5b,
+ 0x80, 0x02, 0xd6, 0x69,
+ 0xff, 0x65, 0xc8, 0x79,
+ 0xff, 0x38, 0x70, 0x18,
+ 0xff, 0x38, 0xc8, 0x79,
+ 0x80, 0xea, 0xaa, 0x61,
+ 0xef, 0x38, 0xc8, 0x18,
+ 0x80, 0x6a, 0xc8, 0x00,
+ 0x00, 0x65, 0x9c, 0x49,
+ 0x33, 0x38, 0xc8, 0x28,
+ 0xff, 0x64, 0xd0, 0x09,
+ 0x04, 0x39, 0xc0, 0x31,
+ 0x09, 0x6a, 0xd6, 0x01,
+ 0x80, 0xeb, 0xa2, 0x79,
+ 0xf7, 0xeb, 0xd6, 0x09,
+ 0x08, 0xeb, 0xa6, 0x69,
+ 0x01, 0x6a, 0xd6, 0x01,
+ 0x08, 0xe9, 0x10, 0x31,
+ 0x88, 0x6a, 0xcc, 0x00,
+ 0x39, 0x6a, 0xe6, 0x5b,
+ 0x08, 0x6a, 0x18, 0x01,
+ 0xff, 0x6a, 0x1a, 0x09,
+ 0xff, 0x6a, 0x1c, 0x09,
+ 0x0d, 0x93, 0x26, 0x01,
+ 0x00, 0x65, 0x64, 0x5c,
+ 0x88, 0x6a, 0x54, 0x5c,
+ 0x00, 0x65, 0xda, 0x5b,
+ 0xff, 0x6a, 0xc8, 0x08,
+ 0x08, 0x39, 0x72, 0x18,
+ 0x00, 0x3a, 0x74, 0x20,
+ 0x10, 0x0c, 0x66, 0x79,
+ 0x80, 0x93, 0x26, 0x01,
+ 0x00, 0x65, 0xe0, 0x59,
+ 0xff, 0x08, 0x52, 0x09,
+ 0xff, 0x09, 0x54, 0x09,
+ 0xff, 0x0a, 0x56, 0x09,
+ 0xff, 0x38, 0x50, 0x09,
+ 0x12, 0x01, 0x02, 0x00,
+ 0x00, 0x65, 0x18, 0x41,
+ 0x00, 0x65, 0xe0, 0x59,
+ 0x12, 0x01, 0x02, 0x00,
+ 0x7f, 0x02, 0x04, 0x08,
+ 0xe1, 0x6a, 0x22, 0x01,
+ 0x00, 0x65, 0x18, 0x41,
+ 0x04, 0x93, 0xea, 0x69,
+ 0xdf, 0x93, 0x26, 0x09,
+ 0x20, 0x93, 0xe4, 0x69,
+ 0x02, 0x93, 0x26, 0x01,
+ 0x01, 0x94, 0xe6, 0x79,
+ 0xd7, 0x93, 0x26, 0x09,
+ 0x08, 0x93, 0xec, 0x69,
+ 0xff, 0x6a, 0xd4, 0x0c,
+ 0x00, 0x65, 0x3a, 0x5b,
+ 0x02, 0xfc, 0xf8, 0x01,
+ 0x05, 0xb4, 0x10, 0x31,
+ 0x02, 0x6a, 0x1a, 0x31,
+ 0x88, 0x6a, 0xcc, 0x00,
+ 0xb4, 0x6a, 0xe4, 0x5b,
+ 0xff, 0x6a, 0x1a, 0x09,
+ 0xff, 0x6a, 0x1c, 0x09,
+ 0x00, 0x65, 0xda, 0x5b,
+ 0x3d, 0x6a, 0x22, 0x5b,
+ 0xac, 0x6a, 0x22, 0x5b,
+ 0x00, 0x65, 0x18, 0x41,
+ 0x00, 0x65, 0x3a, 0x5b,
+ 0xff, 0x06, 0x44, 0x09,
+ 0x00, 0x65, 0x18, 0x41,
+ 0xff, 0x34, 0xca, 0x08,
+ 0x80, 0x65, 0x32, 0x62,
+ 0x0f, 0xa1, 0xca, 0x08,
+ 0x07, 0xa1, 0xca, 0x08,
+ 0x40, 0xa0, 0xc8, 0x08,
+ 0x00, 0x65, 0xca, 0x00,
+ 0x80, 0x65, 0xca, 0x00,
+ 0x80, 0xa0, 0x22, 0x7a,
+ 0xff, 0x65, 0x0c, 0x08,
+ 0x00, 0x65, 0x34, 0x42,
+ 0x20, 0xa0, 0x3a, 0x7a,
+ 0xff, 0x65, 0x0c, 0x08,
+ 0x00, 0x65, 0xd2, 0x5b,
+ 0xa0, 0x3d, 0x46, 0x62,
+ 0x23, 0xa0, 0x0c, 0x08,
+ 0x00, 0x65, 0xd2, 0x5b,
+ 0xa0, 0x3d, 0x46, 0x62,
+ 0x00, 0xb9, 0x3a, 0x42,
+ 0xff, 0x65, 0x3a, 0x62,
+ 0xa1, 0x6a, 0x22, 0x01,
+ 0xff, 0x6a, 0xd4, 0x08,
+ 0x10, 0x51, 0x46, 0x72,
+ 0x40, 0x6a, 0x18, 0x00,
+ 0xff, 0x65, 0x0c, 0x08,
+ 0x00, 0x65, 0xd2, 0x5b,
+ 0xa0, 0x3d, 0x46, 0x62,
+ 0x10, 0x3d, 0x06, 0x00,
+ 0x00, 0x65, 0x0e, 0x42,
+ 0x40, 0x6a, 0x18, 0x00,
+ 0xff, 0x34, 0xa6, 0x08,
+ 0x80, 0x34, 0x4e, 0x62,
+ 0x7f, 0xa0, 0x40, 0x09,
+ 0x08, 0x6a, 0x68, 0x00,
+ 0x00, 0x65, 0x18, 0x41,
+ 0x64, 0x6a, 0x12, 0x5b,
+ 0x80, 0x64, 0xbe, 0x6a,
+ 0x04, 0x64, 0xa4, 0x72,
+ 0x02, 0x64, 0xaa, 0x72,
+ 0x00, 0x6a, 0x6c, 0x72,
+ 0x03, 0x64, 0xba, 0x72,
+ 0x01, 0x64, 0xa0, 0x72,
+ 0x07, 0x64, 0x00, 0x73,
+ 0x08, 0x64, 0x68, 0x72,
+ 0x11, 0x6a, 0x22, 0x01,
+ 0x07, 0x6a, 0x04, 0x5b,
+ 0xff, 0x06, 0xd4, 0x08,
+ 0x00, 0x65, 0x18, 0x41,
+ 0xff, 0xa8, 0x70, 0x6a,
+ 0xff, 0xa2, 0x88, 0x7a,
+ 0x01, 0x6a, 0x6a, 0x00,
+ 0x00, 0xb9, 0xfe, 0x5b,
+ 0xff, 0xa2, 0x88, 0x7a,
+ 0x71, 0x6a, 0x22, 0x01,
+ 0xff, 0x6a, 0xd4, 0x08,
+ 0x40, 0x51, 0x88, 0x62,
+ 0x0d, 0x6a, 0x6a, 0x00,
+ 0x00, 0xb9, 0xfe, 0x5b,
+ 0xff, 0x3e, 0x74, 0x09,
+ 0xff, 0x90, 0x7c, 0x08,
+ 0x00, 0x65, 0x44, 0x58,
+ 0x00, 0x65, 0x2a, 0x41,
+ 0x20, 0xa0, 0x90, 0x6a,
+ 0xff, 0x37, 0xc8, 0x08,
+ 0x00, 0x6a, 0xa8, 0x5b,
+ 0xff, 0x6a, 0xbe, 0x5b,
+ 0xff, 0xf8, 0xc8, 0x08,
+ 0xff, 0x4f, 0xc8, 0x08,
+ 0x01, 0x6a, 0xa8, 0x5b,
+ 0x00, 0xb9, 0xbe, 0x5b,
+ 0x01, 0x4f, 0x9e, 0x18,
+ 0x02, 0x6a, 0x22, 0x01,
+ 0x00, 0x65, 0x6c, 0x5c,
+ 0x00, 0x65, 0x2a, 0x41,
+ 0x41, 0x6a, 0x22, 0x01,
+ 0x00, 0x65, 0x18, 0x41,
+ 0x04, 0xa0, 0x40, 0x01,
+ 0x00, 0x65, 0x84, 0x5c,
+ 0x00, 0x65, 0x2a, 0x41,
+ 0x10, 0x36, 0x68, 0x7a,
+ 0xff, 0x38, 0x46, 0x09,
+ 0xa4, 0x6a, 0xcc, 0x00,
+ 0x39, 0x6a, 0xe6, 0x5b,
+ 0xac, 0x6a, 0xcc, 0x00,
+ 0x14, 0x6a, 0xe6, 0x5b,
+ 0xa9, 0x6a, 0xe8, 0x5b,
+ 0x00, 0x65, 0x68, 0x42,
+ 0xef, 0x36, 0x6c, 0x08,
+ 0x00, 0x65, 0x68, 0x42,
+ 0x0f, 0x64, 0xc8, 0x08,
+ 0x07, 0x64, 0xc8, 0x08,
+ 0x00, 0x37, 0x6e, 0x00,
+ 0x00, 0x65, 0x78, 0x5b,
+ 0xff, 0x51, 0xce, 0x72,
+ 0x20, 0x36, 0xde, 0x7a,
+ 0x00, 0x90, 0x5c, 0x5b,
+ 0x00, 0x65, 0xe0, 0x42,
+ 0xff, 0x06, 0xd4, 0x08,
+ 0x00, 0x65, 0xd2, 0x5b,
+ 0xe0, 0x3d, 0xfa, 0x62,
+ 0x20, 0x12, 0xfa, 0x62,
+ 0x51, 0x6a, 0x08, 0x5b,
+ 0xff, 0x51, 0x20, 0x09,
+ 0x20, 0xa0, 0xfa, 0x7a,
+ 0x00, 0x90, 0x5c, 0x5b,
+ 0x00, 0x65, 0x56, 0x5b,
+ 0xff, 0x37, 0xc8, 0x08,
+ 0x00, 0xa1, 0xf2, 0x62,
+ 0x04, 0xa0, 0xf2, 0x7a,
+ 0xfb, 0xa0, 0x40, 0x09,
+ 0x80, 0x36, 0x6c, 0x00,
+ 0x80, 0xa0, 0x68, 0x7a,
+ 0x7f, 0xa0, 0x40, 0x09,
+ 0xff, 0x6a, 0x04, 0x5b,
+ 0x00, 0x65, 0x68, 0x42,
+ 0x04, 0xa0, 0xf8, 0x7a,
+ 0x00, 0x65, 0x84, 0x5c,
+ 0x00, 0x65, 0xfa, 0x42,
+ 0x00, 0x65, 0x6c, 0x5c,
+ 0x31, 0x6a, 0x22, 0x01,
+ 0x0c, 0x6a, 0x04, 0x5b,
+ 0x00, 0x65, 0x68, 0x42,
+ 0x61, 0x6a, 0x22, 0x01,
+ 0x00, 0x65, 0x68, 0x42,
+ 0x10, 0x3d, 0x06, 0x00,
+ 0xff, 0x65, 0x68, 0x0c,
+ 0xff, 0x06, 0xd4, 0x08,
+ 0x01, 0x0c, 0x0a, 0x7b,
+ 0x04, 0x0c, 0x0a, 0x6b,
+ 0xe0, 0x03, 0x7a, 0x08,
+ 0xe0, 0x3d, 0x1e, 0x63,
+ 0xff, 0x65, 0xcc, 0x08,
+ 0xff, 0x12, 0xda, 0x0c,
+ 0xff, 0x06, 0xd4, 0x0c,
+ 0xff, 0x65, 0x0c, 0x08,
+ 0x02, 0x0b, 0x1a, 0x7b,
+ 0xff, 0x6a, 0xd4, 0x0c,
+ 0xd1, 0x6a, 0x22, 0x01,
+ 0x00, 0x65, 0x18, 0x41,
+ 0xff, 0x65, 0x26, 0x09,
+ 0x01, 0x0b, 0x32, 0x6b,
+ 0x10, 0x0c, 0x24, 0x7b,
+ 0x04, 0x0b, 0x2c, 0x6b,
+ 0xff, 0x6a, 0xca, 0x08,
+ 0x04, 0x93, 0x30, 0x6b,
+ 0x01, 0x94, 0x2e, 0x7b,
+ 0x10, 0x94, 0x30, 0x6b,
+ 0xc7, 0x93, 0x26, 0x09,
+ 0xff, 0x99, 0xd4, 0x08,
+ 0x08, 0x93, 0x34, 0x6b,
+ 0xff, 0x6a, 0xd4, 0x0c,
+ 0x80, 0x36, 0x38, 0x6b,
+ 0x21, 0x6a, 0x22, 0x05,
+ 0xff, 0x65, 0x20, 0x09,
+ 0xff, 0x51, 0x46, 0x63,
+ 0xff, 0x37, 0xc8, 0x08,
+ 0xa1, 0x6a, 0x50, 0x43,
+ 0xff, 0x51, 0xc8, 0x08,
+ 0xb9, 0x6a, 0x50, 0x43,
+ 0xff, 0xba, 0x54, 0x73,
+ 0xff, 0xba, 0x20, 0x09,
+ 0xff, 0x65, 0xca, 0x18,
+ 0x00, 0x6c, 0x4a, 0x63,
+ 0xff, 0x90, 0xca, 0x0c,
+ 0xff, 0x6a, 0xca, 0x04,
+ 0x20, 0x36, 0x72, 0x7b,
+ 0x00, 0x90, 0x3e, 0x5b,
+ 0xff, 0x65, 0x72, 0x73,
+ 0xff, 0xba, 0x66, 0x73,
+ 0xff, 0xbb, 0xcc, 0x08,
+ 0xff, 0xba, 0x20, 0x09,
+ 0xff, 0x66, 0x76, 0x09,
+ 0xff, 0x65, 0x20, 0x09,
+ 0xff, 0xbb, 0x70, 0x73,
+ 0xff, 0xba, 0xcc, 0x08,
+ 0xff, 0xbb, 0x20, 0x09,
+ 0xff, 0x66, 0x74, 0x09,
+ 0xff, 0x65, 0x20, 0x0d,
+ 0xff, 0xba, 0x7e, 0x0c,
+ 0x00, 0x6a, 0x72, 0x5c,
+ 0x0d, 0x6a, 0x6a, 0x00,
+ 0x00, 0x51, 0xfe, 0x43,
+ 0xff, 0x3f, 0xcc, 0x73,
+ 0xff, 0x6a, 0xa2, 0x00,
+ 0x00, 0x3f, 0x3e, 0x5b,
+ 0xff, 0x65, 0xcc, 0x73,
+ 0x20, 0x36, 0x6c, 0x00,
+ 0x20, 0xa0, 0x86, 0x6b,
+ 0xff, 0xb9, 0xa2, 0x0c,
+ 0xff, 0x6a, 0xa2, 0x04,
+ 0xff, 0x65, 0xa4, 0x08,
+ 0xe0, 0x6a, 0xcc, 0x00,
+ 0x45, 0x6a, 0xf2, 0x5b,
+ 0x01, 0x6a, 0xd0, 0x01,
+ 0x09, 0x6a, 0xd6, 0x01,
+ 0x80, 0xeb, 0x92, 0x7b,
+ 0x01, 0x6a, 0xd6, 0x01,
+ 0x01, 0xe9, 0xa4, 0x34,
+ 0x88, 0x6a, 0xcc, 0x00,
+ 0x45, 0x6a, 0xf2, 0x5b,
+ 0x01, 0x6a, 0x18, 0x01,
+ 0xff, 0x6a, 0x1a, 0x09,
+ 0xff, 0x6a, 0x1c, 0x09,
+ 0x0d, 0x6a, 0x26, 0x01,
+ 0x00, 0x65, 0x64, 0x5c,
+ 0xff, 0x99, 0xa4, 0x0c,
+ 0xff, 0x65, 0xa4, 0x08,
+ 0xe0, 0x6a, 0xcc, 0x00,
+ 0x45, 0x6a, 0xf2, 0x5b,
+ 0x01, 0x6a, 0xd0, 0x01,
+ 0x01, 0x6a, 0xdc, 0x05,
+ 0x88, 0x6a, 0xcc, 0x00,
+ 0x45, 0x6a, 0xf2, 0x5b,
+ 0x01, 0x6a, 0x18, 0x01,
+ 0xff, 0x6a, 0x1a, 0x09,
+ 0xff, 0x6a, 0x1c, 0x09,
+ 0x01, 0x6a, 0x26, 0x05,
+ 0x01, 0x65, 0xd8, 0x31,
+ 0x09, 0xee, 0xdc, 0x01,
+ 0x80, 0xee, 0xc2, 0x7b,
+ 0xff, 0x6a, 0xdc, 0x0d,
+ 0xff, 0x65, 0x32, 0x09,
+ 0x0a, 0x93, 0x26, 0x01,
+ 0x00, 0x65, 0x64, 0x44,
+ 0xff, 0x37, 0xc8, 0x08,
+ 0x00, 0x6a, 0x88, 0x5b,
+ 0xff, 0x52, 0xa2, 0x0c,
+ 0x01, 0x0c, 0xd2, 0x7b,
+ 0x04, 0x0c, 0xd2, 0x6b,
+ 0xe0, 0x03, 0x7a, 0x08,
+ 0xff, 0x3d, 0x06, 0x0c,
+ 0xff, 0x8c, 0x10, 0x08,
+ 0xff, 0x8d, 0x12, 0x08,
+ 0xff, 0x8e, 0x14, 0x0c,
+ 0xff, 0x6c, 0xda, 0x08,
+ 0xff, 0x6c, 0xda, 0x08,
+ 0xff, 0x6c, 0xda, 0x08,
+ 0xff, 0x6c, 0xda, 0x08,
+ 0xff, 0x6c, 0xda, 0x08,
+ 0xff, 0x6c, 0xda, 0x08,
+ 0xff, 0x6c, 0xda, 0x0c,
+ 0x3d, 0x64, 0xa4, 0x28,
+ 0x55, 0x64, 0xc8, 0x28,
+ 0x00, 0x6c, 0xda, 0x18,
+ 0xff, 0x52, 0xc8, 0x08,
+ 0x00, 0x6c, 0xda, 0x20,
+ 0xff, 0x6a, 0xc8, 0x08,
+ 0x00, 0x6c, 0xda, 0x20,
+ 0x00, 0x6c, 0xda, 0x24,
+ 0xff, 0x65, 0xc8, 0x08,
+ 0xe0, 0x6a, 0xcc, 0x00,
+ 0x41, 0x6a, 0xee, 0x5b,
+ 0xff, 0x90, 0xe2, 0x09,
+ 0x20, 0x6a, 0xd0, 0x01,
+ 0x04, 0x35, 0x10, 0x7c,
+ 0x1d, 0x6a, 0xdc, 0x01,
+ 0xdc, 0xee, 0x0c, 0x64,
+ 0x00, 0x65, 0x1c, 0x44,
+ 0x01, 0x6a, 0xdc, 0x01,
+ 0x20, 0xa0, 0xd8, 0x31,
+ 0x09, 0xee, 0xdc, 0x01,
+ 0x80, 0xee, 0x16, 0x7c,
+ 0x19, 0x6a, 0xdc, 0x01,
+ 0xd8, 0xee, 0x1a, 0x64,
+ 0xff, 0x6a, 0xdc, 0x09,
+ 0x18, 0xee, 0x1e, 0x6c,
+ 0xff, 0x6a, 0xd4, 0x0c,
+ 0x88, 0x6a, 0xcc, 0x00,
+ 0x41, 0x6a, 0xee, 0x5b,
+ 0x20, 0x6a, 0x18, 0x01,
+ 0xff, 0x6a, 0x1a, 0x09,
+ 0xff, 0x6a, 0x1c, 0x09,
+ 0xff, 0x35, 0x26, 0x09,
+ 0x04, 0x35, 0x48, 0x6c,
+ 0xa0, 0x6a, 0xca, 0x00,
+ 0x20, 0x65, 0xc8, 0x18,
+ 0xff, 0x6c, 0x32, 0x09,
+ 0xff, 0x6c, 0x32, 0x09,
+ 0xff, 0x6c, 0x32, 0x09,
+ 0xff, 0x6c, 0x32, 0x09,
+ 0xff, 0x6c, 0x32, 0x09,
+ 0xff, 0x6c, 0x32, 0x09,
+ 0xff, 0x6c, 0x32, 0x09,
+ 0xff, 0x6c, 0x32, 0x09,
+ 0x00, 0x65, 0x34, 0x64,
+ 0x0a, 0x93, 0x26, 0x01,
+ 0x00, 0x65, 0x64, 0x5c,
+ 0x04, 0x35, 0x38, 0x7b,
+ 0xa0, 0x6a, 0x54, 0x5c,
+ 0x00, 0x65, 0x56, 0x5c,
+ 0x00, 0x65, 0x56, 0x5c,
+ 0x00, 0x65, 0x56, 0x44,
+ 0xff, 0x65, 0xcc, 0x08,
+ 0xff, 0x99, 0xda, 0x08,
+ 0xff, 0x99, 0xda, 0x08,
+ 0xff, 0x99, 0xda, 0x08,
+ 0xff, 0x99, 0xda, 0x08,
+ 0xff, 0x99, 0xda, 0x08,
+ 0xff, 0x99, 0xda, 0x08,
+ 0xff, 0x99, 0xda, 0x0c,
+ 0x08, 0x94, 0x64, 0x7c,
+ 0xf7, 0x93, 0x26, 0x09,
+ 0x08, 0x93, 0x68, 0x6c,
+ 0xff, 0x6a, 0xd4, 0x0c,
+ 0xff, 0x40, 0x74, 0x09,
+ 0xff, 0x90, 0x80, 0x08,
+ 0xff, 0x6a, 0x72, 0x05,
+ 0xff, 0x40, 0x80, 0x64,
+ 0xff, 0x3f, 0x78, 0x64,
+ 0xff, 0x6a, 0xca, 0x04,
+ 0xff, 0x3f, 0x20, 0x09,
+ 0x01, 0x6a, 0x6a, 0x00,
+ 0x00, 0xb9, 0xfe, 0x5b,
+ 0x00, 0x90, 0x5c, 0x43,
+ 0xff, 0x40, 0x20, 0x09,
+ 0xff, 0xba, 0x80, 0x0c,
+ 0xff, 0x6a, 0x76, 0x01,
+ 0xff, 0x3f, 0x74, 0x09,
+ 0xff, 0x90, 0x7e, 0x08,
+ 0xff, 0xba, 0x38, 0x73,
+ 0xff, 0xba, 0x20, 0x09,
+ 0xff, 0x3f, 0x76, 0x09,
+ 0xff, 0x3f, 0x20, 0x0d,
+};
+
+static int aic7xxx_patch12_func(struct aic7xxx_host *p);
+
+static int
+aic7xxx_patch12_func(struct aic7xxx_host *p)
+{
+ return ((p->chip & AHC_CHIPID_MASK) == AHC_AIC7895);
+}
+
+static int aic7xxx_patch11_func(struct aic7xxx_host *p);
+
+static int
+aic7xxx_patch11_func(struct aic7xxx_host *p)
+{
+ return ((p->features & AHC_WIDE) != 0);
+}
+
+static int aic7xxx_patch10_func(struct aic7xxx_host *p);
+
+static int
+aic7xxx_patch10_func(struct aic7xxx_host *p)
+{
+ return ((p->features & AHC_ULTRA2) == 0);
+}
+
+static int aic7xxx_patch9_func(struct aic7xxx_host *p);
+
+static int
+aic7xxx_patch9_func(struct aic7xxx_host *p)
+{
+ return ((p->features & AHC_ULTRA) != 0);
+}
+
+static int aic7xxx_patch8_func(struct aic7xxx_host *p);
+
+static int
+aic7xxx_patch8_func(struct aic7xxx_host *p)
+{
+ return ((p->features & AHC_ULTRA2) != 0);
+}
+
+static int aic7xxx_patch7_func(struct aic7xxx_host *p);
+
+static int
+aic7xxx_patch7_func(struct aic7xxx_host *p)
+{
+ return ((p->flags & AHC_PAGESCBS) == 0);
+}
+
+static int aic7xxx_patch6_func(struct aic7xxx_host *p);
+
+static int
+aic7xxx_patch6_func(struct aic7xxx_host *p)
+{
+ return ((p->flags & AHC_PAGESCBS) != 0);
+}
+
+static int aic7xxx_patch5_func(struct aic7xxx_host *p);
+
+static int
+aic7xxx_patch5_func(struct aic7xxx_host *p)
+{
+ return ((p->features & AHC_QUEUE_REGS) != 0);
+}
+
+static int aic7xxx_patch4_func(struct aic7xxx_host *p);
+
+static int
+aic7xxx_patch4_func(struct aic7xxx_host *p)
+{
+ return ((p->features & AHC_TWIN) != 0);
+}
+
+static int aic7xxx_patch3_func(struct aic7xxx_host *p);
+
+static int
+aic7xxx_patch3_func(struct aic7xxx_host *p)
+{
+ return ((p->features & AHC_QUEUE_REGS) == 0);
+}
+
+static int aic7xxx_patch2_func(struct aic7xxx_host *p);
+
+static int
+aic7xxx_patch2_func(struct aic7xxx_host *p)
+{
+ return ((p->features & AHC_CMD_CHAN) != 0);
+}
+
+static int aic7xxx_patch1_func(struct aic7xxx_host *p);
+
+static int
+aic7xxx_patch1_func(struct aic7xxx_host *p)
+{
+ return ((p->flags & AHC_TARGETMODE) != 0);
+}
+
+static int aic7xxx_patch0_func(struct aic7xxx_host *p);
+
+static int
+aic7xxx_patch0_func(struct aic7xxx_host *p)
+{
+ return (0);
+}
+
+struct sequencer_patch {
+ int (*patch_func)(struct aic7xxx_host *);
+ unsigned int begin :10,
+ skip_instr :10,
+ skip_patch :12;
+} sequencer_patches[] = {
+ { aic7xxx_patch1_func, 1, 1, 2 },
+ { aic7xxx_patch0_func, 2, 1, 1 },
+ { aic7xxx_patch2_func, 3, 2, 1 },
+ { aic7xxx_patch3_func, 7, 1, 1 },
+ { aic7xxx_patch3_func, 8, 1, 1 },
+ { aic7xxx_patch4_func, 11, 4, 1 },
+ { aic7xxx_patch5_func, 16, 3, 2 },
+ { aic7xxx_patch0_func, 19, 4, 1 },
+ { aic7xxx_patch6_func, 23, 1, 1 },
+ { aic7xxx_patch7_func, 26, 1, 1 },
+ { aic7xxx_patch4_func, 34, 4, 1 },
+ { aic7xxx_patch8_func, 38, 3, 2 },
+ { aic7xxx_patch0_func, 41, 3, 1 },
+ { aic7xxx_patch9_func, 47, 7, 1 },
+ { aic7xxx_patch4_func, 55, 3, 1 },
+ { aic7xxx_patch8_func, 58, 2, 1 },
+ { aic7xxx_patch1_func, 63, 60, 1 },
+ { aic7xxx_patch8_func, 164, 1, 2 },
+ { aic7xxx_patch0_func, 165, 1, 1 },
+ { aic7xxx_patch2_func, 169, 1, 1 },
+ { aic7xxx_patch2_func, 172, 1, 2 },
+ { aic7xxx_patch0_func, 173, 2, 1 },
+ { aic7xxx_patch10_func, 175, 1, 1 },
+ { aic7xxx_patch8_func, 182, 1, 2 },
+ { aic7xxx_patch0_func, 183, 3, 1 },
+ { aic7xxx_patch8_func, 187, 1, 2 },
+ { aic7xxx_patch0_func, 188, 1, 1 },
+ { aic7xxx_patch8_func, 189, 7, 2 },
+ { aic7xxx_patch0_func, 196, 1, 1 },
+ { aic7xxx_patch2_func, 201, 13, 2 },
+ { aic7xxx_patch0_func, 214, 8, 1 },
+ { aic7xxx_patch10_func, 222, 1, 1 },
+ { aic7xxx_patch8_func, 227, 1, 1 },
+ { aic7xxx_patch8_func, 228, 1, 1 },
+ { aic7xxx_patch8_func, 233, 1, 1 },
+ { aic7xxx_patch8_func, 235, 2, 1 },
+ { aic7xxx_patch8_func, 240, 8, 1 },
+ { aic7xxx_patch8_func, 249, 1, 1 },
+ { aic7xxx_patch2_func, 250, 2, 2 },
+ { aic7xxx_patch0_func, 252, 4, 1 },
+ { aic7xxx_patch10_func, 256, 2, 2 },
+ { aic7xxx_patch0_func, 258, 1, 1 },
+ { aic7xxx_patch11_func, 265, 1, 2 },
+ { aic7xxx_patch0_func, 266, 1, 1 },
+ { aic7xxx_patch5_func, 328, 1, 2 },
+ { aic7xxx_patch0_func, 329, 1, 1 },
+ { aic7xxx_patch3_func, 332, 1, 1 },
+ { aic7xxx_patch11_func, 351, 1, 2 },
+ { aic7xxx_patch0_func, 352, 1, 1 },
+ { aic7xxx_patch6_func, 356, 1, 1 },
+ { aic7xxx_patch7_func, 364, 3, 2 },
+ { aic7xxx_patch0_func, 367, 1, 1 },
+ { aic7xxx_patch1_func, 396, 3, 1 },
+ { aic7xxx_patch10_func, 410, 1, 1 },
+ { aic7xxx_patch2_func, 453, 7, 2 },
+ { aic7xxx_patch0_func, 460, 8, 1 },
+ { aic7xxx_patch2_func, 469, 4, 2 },
+ { aic7xxx_patch0_func, 473, 6, 1 },
+ { aic7xxx_patch2_func, 479, 4, 2 },
+ { aic7xxx_patch0_func, 483, 3, 1 },
+ { aic7xxx_patch2_func, 512, 17, 4 },
+ { aic7xxx_patch12_func, 520, 4, 2 },
+ { aic7xxx_patch0_func, 524, 2, 1 },
+ { aic7xxx_patch0_func, 529, 33, 1 },
+ { aic7xxx_patch6_func, 566, 2, 1 },
+ { aic7xxx_patch6_func, 569, 9, 1 },
+
+};
diff --git a/linux/src/drivers/scsi/constants.c b/linux/src/drivers/scsi/constants.c
new file mode 100644
index 0000000..1495a5d
--- /dev/null
+++ b/linux/src/drivers/scsi/constants.c
@@ -0,0 +1,683 @@
+/*
+ * ASCII values for a number of symbolic constants, printing functions,
+ * etc.
+ */
+
+/*
+ * Don't import our own symbols, as this would severely mess up our
+ * symbol tables.
+ */
+#define _SCSI_SYMS_VER_
+#define __NO_VERSION__
+#include <linux/module.h>
+
+#include <linux/config.h>
+#include <linux/blk.h>
+#include <linux/kernel.h>
+#include "scsi.h"
+#include "hosts.h"
+
+#define CONST_COMMAND 0x01
+#define CONST_STATUS 0x02
+#define CONST_SENSE 0x04
+#define CONST_XSENSE 0x08
+#define CONST_CMND 0x10
+#define CONST_MSG 0x20
+#define CONST_HOST 0x40
+#define CONST_DRIVER 0x80
+
+static const char unknown[] = "UNKNOWN";
+
+#ifdef CONFIG_SCSI_CONSTANTS
+#ifdef CONSTANTS
+#undef CONSTANTS
+#endif
+#define CONSTANTS (CONST_COMMAND | CONST_STATUS | CONST_SENSE | CONST_XSENSE \
+ | CONST_CMND | CONST_MSG | CONST_HOST | CONST_DRIVER)
+#endif
+
+#if (CONSTANTS & CONST_COMMAND)
+static const char * group_0_commands[] = {
+/* 00-03 */ "Test Unit Ready", "Rezero Unit", unknown, "Request Sense",
+/* 04-07 */ "Format Unit", "Read Block Limits", unknown, "Reasssign Blocks",
+/* 08-0d */ "Read (6)", unknown, "Write (6)", "Seek (6)", unknown, unknown,
+/* 0e-12 */ unknown, "Read Reverse", "Write Filemarks", "Space", "Inquiry",
+/* 13-16 */ unknown, "Recover Buffered Data", "Mode Select", "Reserve",
+/* 17-1b */ "Release", "Copy", "Erase", "Mode Sense", "Start/Stop Unit",
+/* 1c-1d */ "Receive Diagnostic", "Send Diagnostic",
+/* 1e-1f */ "Prevent/Allow Medium Removal", unknown,
+};
+
+
+static const char *group_1_commands[] = {
+/* 20-22 */ unknown, unknown, unknown,
+/* 23-28 */ unknown, unknown, "Read Capacity", unknown, unknown, "Read (10)",
+/* 29-2d */ unknown, "Write (10)", "Seek (10)", unknown, unknown,
+/* 2e-31 */ "Write Verify","Verify", "Search High", "Search Equal",
+/* 32-34 */ "Search Low", "Set Limits", "Prefetch or Read Position",
+/* 35-37 */ "Synchronize Cache","Lock/Unlock Cache", "Read Defect Data",
+/* 38-3c */ "Medium Scan", "Compare","Copy Verify", "Write Buffer", "Read Buffer",
+/* 3d-3f */ "Update Block", "Read Long", "Write Long",
+};
+
+
+static const char *group_2_commands[] = {
+/* 40-41 */ "Change Definition", "Write Same",
+/* 42-48 */ unknown, "Read TOC", unknown, unknown, unknown, unknown, unknown,
+/* 49-4f */ unknown, unknown, unknown, "Log Select", "Log Sense", unknown, unknown,
+/* 50-55 */ unknown, unknown, unknown, unknown, unknown, "Mode Select (10)",
+/* 56-5b */ unknown, unknown, unknown, unknown, "Mode Sense (10)", unknown,
+/* 5c-5f */ unknown, unknown, unknown,
+};
+
+
+
+#define group(opcode) (((opcode) >> 5) & 7)
+
+#define RESERVED_GROUP 0
+#define VENDOR_GROUP 1
+#define NOTEXT_GROUP 2
+
+static const char **commands[] = {
+ group_0_commands, group_1_commands, group_2_commands,
+ (const char **) RESERVED_GROUP, (const char **) RESERVED_GROUP,
+ (const char **) NOTEXT_GROUP, (const char **) VENDOR_GROUP,
+ (const char **) VENDOR_GROUP
+};
+
+static const char reserved[] = "RESERVED";
+static const char vendor[] = "VENDOR SPECIFIC";
+
+static void print_opcode(int opcode) {
+ const char **table = commands[ group(opcode) ];
+ switch ((unsigned long) table) {
+ case RESERVED_GROUP:
+ printk("%s(0x%02x) ", reserved, opcode);
+ break;
+ case NOTEXT_GROUP:
+ printk("%s(0x%02x) ", unknown, opcode);
+ break;
+ case VENDOR_GROUP:
+ printk("%s(0x%02x) ", vendor, opcode);
+ break;
+ default:
+ if (table[opcode & 0x1f] != unknown)
+ printk("%s ",table[opcode & 0x1f]);
+ else
+ printk("%s(0x%02x) ", unknown, opcode);
+ break;
+ }
+}
+#else /* CONST & CONST_COMMAND */
+static void print_opcode(int opcode) {
+ printk("0x%02x ", opcode);
+}
+#endif
+
+void print_command (unsigned char *command) {
+ int i,s;
+ print_opcode(command[0]);
+ for ( i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i)
+ printk("%02x ", command[i]);
+ printk("\n");
+}
+
+#if (CONSTANTS & CONST_STATUS)
+static const char * statuses[] = {
+/* 0-4 */ "Good", "Check Condition", "Condition Good", unknown, "Busy",
+/* 5-9 */ unknown, unknown, unknown, "Intermediate Good", unknown,
+/* a-d */ "Intermediate Good", unknown, "Reservation Conflict", unknown,
+/* e-f */ unknown, unknown,
+};
+#endif
+
+void print_status (int status) {
+ status = (status >> 1) & 0xf;
+#if (CONSTANTS & CONST_STATUS)
+ printk("%s ",statuses[status]);
+#else
+ printk("0x%0x ", status);
+#endif
+}
+
+#if (CONSTANTS & CONST_XSENSE)
+#define D 0x001 /* DIRECT ACCESS DEVICE (disk) */
+#define T 0x002 /* SEQUENTIAL ACCESS DEVICE (tape) */
+#define L 0x004 /* PRINTER DEVICE */
+#define P 0x008 /* PROCESSOR DEVICE */
+#define W 0x010 /* WRITE ONCE READ MULTIPLE DEVICE */
+#define R 0x020 /* READ ONLY (CD-ROM) DEVICE */
+#define S 0x040 /* SCANNER DEVICE */
+#define O 0x080 /* OPTICAL MEMORY DEVICE */
+#define M 0x100 /* MEDIA CHANGER DEVICE */
+#define C 0x200 /* COMMUNICATION DEVICE */
+
+struct error_info{
+ unsigned char code1, code2;
+ unsigned short int devices;
+ const char * text;
+};
+
+struct error_info2{
+ unsigned char code1, code2_min, code2_max;
+ unsigned short int devices;
+ const char * text;
+};
+
+static struct error_info2 additional2[] =
+{
+ {0x40,0x00,0x7f,D,"Ram failure (%x)"},
+ {0x40,0x80,0xff,D|T|L|P|W|R|S|O|M|C,"Diagnostic failure on component (%x)"},
+ {0x41,0x00,0xff,D,"Data path failure (%x)"},
+ {0x42,0x00,0xff,D,"Power-on or self-test failure (%x)"},
+ {0, 0, 0, 0, NULL}
+};
+
+static struct error_info additional[] =
+{
+ {0x00,0x01,T,"Filemark detected"},
+ {0x00,0x02,T|S,"End-of-partition/medium detected"},
+ {0x00,0x03,T,"Setmark detected"},
+ {0x00,0x04,T|S,"Beginning-of-partition/medium detected"},
+ {0x00,0x05,T|S,"End-of-data detected"},
+ {0x00,0x06,D|T|L|P|W|R|S|O|M|C,"I/O process terminated"},
+ {0x00,0x11,R,"Audio play operation in progress"},
+ {0x00,0x12,R,"Audio play operation paused"},
+ {0x00,0x13,R,"Audio play operation successfully completed"},
+ {0x00,0x14,R,"Audio play operation stopped due to error"},
+ {0x00,0x15,R,"No current audio status to return"},
+ {0x01,0x00,D|W|O,"No index/sector signal"},
+ {0x02,0x00,D|W|R|O|M,"No seek complete"},
+ {0x03,0x00,D|T|L|W|S|O,"Peripheral device write fault"},
+ {0x03,0x01,T,"No write current"},
+ {0x03,0x02,T,"Excessive write errors"},
+ {0x04,0x00,D|T|L|P|W|R|S|O|M|C,
+ "Logical unit not ready, cause not reportable"},
+ {0x04,0x01,D|T|L|P|W|R|S|O|M|C,
+ "Logical unit is in process of becoming ready"},
+ {0x04,0x02,D|T|L|P|W|R|S|O|M|C,
+ "Logical unit not ready, initializing command required"},
+ {0x04,0x03,D|T|L|P|W|R|S|O|M|C,
+ "Logical unit not ready, manual intervention required"},
+ {0x04,0x04,D|T|L|O,"Logical unit not ready, format in progress"},
+ {0x05,0x00,D|T|L|W|R|S|O|M|C,"Logical unit does not respond to selection"},
+ {0x06,0x00,D|W|R|O|M,"No reference position found"},
+ {0x07,0x00,D|T|L|W|R|S|O|M,"Multiple peripheral devices selected"},
+ {0x08,0x00,D|T|L|W|R|S|O|M|C,"Logical unit communication failure"},
+ {0x08,0x01,D|T|L|W|R|S|O|M|C,"Logical unit communication time-out"},
+ {0x08,0x02,D|T|L|W|R|S|O|M|C,"Logical unit communication parity error"},
+ {0x09,0x00,D|T|W|R|O,"Track following error"},
+ {0x09,0x01,W|R|O,"Tracking servo failure"},
+ {0x09,0x02,W|R|O,"Focus servo failure"},
+ {0x09,0x03,W|R|O,"Spindle servo failure"},
+ {0x0A,0x00,D|T|L|P|W|R|S|O|M|C,"Error log overflow"},
+ {0x0C,0x00,T|S,"Write error"},
+ {0x0C,0x01,D|W|O,"Write error recovered with auto reallocation"},
+ {0x0C,0x02,D|W|O,"Write error - auto reallocation failed"},
+ {0x10,0x00,D|W|O,"Id crc or ecc error"},
+ {0x11,0x00,D|T|W|R|S|O,"Unrecovered read error"},
+ {0x11,0x01,D|T|W|S|O,"Read retries exhausted"},
+ {0x11,0x02,D|T|W|S|O,"Error too long to correct"},
+ {0x11,0x03,D|T|W|S|O,"Multiple read errors"},
+ {0x11,0x04,D|W|O,"Unrecovered read error - auto reallocate failed"},
+ {0x11,0x05,W|R|O,"L-ec uncorrectable error"},
+ {0x11,0x06,W|R|O,"Circ unrecovered error"},
+ {0x11,0x07,W|O,"Data resynchronization error"},
+ {0x11,0x08,T,"Incomplete block read"},
+ {0x11,0x09,T,"No gap found"},
+ {0x11,0x0A,D|T|O,"Miscorrected error"},
+ {0x11,0x0B,D|W|O,"Unrecovered read error - recommend reassignment"},
+ {0x11,0x0C,D|W|O,"Unrecovered read error - recommend rewrite the data"},
+ {0x12,0x00,D|W|O,"Address mark not found for id field"},
+ {0x13,0x00,D|W|O,"Address mark not found for data field"},
+ {0x14,0x00,D|T|L|W|R|S|O,"Recorded entity not found"},
+ {0x14,0x01,D|T|W|R|O,"Record not found"},
+ {0x14,0x02,T,"Filemark or setmark not found"},
+ {0x14,0x03,T,"End-of-data not found"},
+ {0x14,0x04,T,"Block sequence error"},
+ {0x15,0x00,D|T|L|W|R|S|O|M,"Random positioning error"},
+ {0x15,0x01,D|T|L|W|R|S|O|M,"Mechanical positioning error"},
+ {0x15,0x02,D|T|W|R|O,"Positioning error detected by read of medium"},
+ {0x16,0x00,D|W|O,"Data synchronization mark error"},
+ {0x17,0x00,D|T|W|R|S|O,"Recovered data with no error correction applied"},
+ {0x17,0x01,D|T|W|R|S|O,"Recovered data with retries"},
+ {0x17,0x02,D|T|W|R|O,"Recovered data with positive head offset"},
+ {0x17,0x03,D|T|W|R|O,"Recovered data with negative head offset"},
+ {0x17,0x04,W|R|O,"Recovered data with retries and/or circ applied"},
+ {0x17,0x05,D|W|R|O,"Recovered data using previous sector id"},
+ {0x17,0x06,D|W|O,"Recovered data without ecc - data auto-reallocated"},
+ {0x17,0x07,D|W|O,"Recovered data without ecc - recommend reassignment"},
+ {0x18,0x00,D|T|W|R|O,"Recovered data with error correction applied"},
+ {0x18,0x01,D|W|R|O,"Recovered data with error correction and retries applied"},
+ {0x18,0x02,D|W|R|O,"Recovered data - data auto-reallocated"},
+ {0x18,0x03,R,"Recovered data with circ"},
+ {0x18,0x04,R,"Recovered data with lec"},
+ {0x18,0x05,D|W|R|O,"Recovered data - recommend reassignment"},
+ {0x19,0x00,D|O,"Defect list error"},
+ {0x19,0x01,D|O,"Defect list not available"},
+ {0x19,0x02,D|O,"Defect list error in primary list"},
+ {0x19,0x03,D|O,"Defect list error in grown list"},
+ {0x1A,0x00,D|T|L|P|W|R|S|O|M|C,"Parameter list length error"},
+ {0x1B,0x00,D|T|L|P|W|R|S|O|M|C,"Synchronous data transfer error"},
+ {0x1C,0x00,D|O,"Defect list not found"},
+ {0x1C,0x01,D|O,"Primary defect list not found"},
+ {0x1C,0x02,D|O,"Grown defect list not found"},
+ {0x1D,0x00,D|W|O,"Miscompare during verify operation"},
+ {0x1E,0x00,D|W|O,"Recovered id with ecc correction"},
+ {0x20,0x00,D|T|L|P|W|R|S|O|M|C,"Invalid command operation code"},
+ {0x21,0x00,D|T|W|R|O|M,"Logical block address out of range"},
+ {0x21,0x01,M,"Invalid element address"},
+ {0x22,0x00,D,"Illegal function (should use 20 00, 24 00, or 26 00)"},
+ {0x24,0x00,D|T|L|P|W|R|S|O|M|C,"Invalid field in cdb"},
+ {0x25,0x00,D|T|L|P|W|R|S|O|M|C,"Logical unit not supported"},
+ {0x26,0x00,D|T|L|P|W|R|S|O|M|C,"Invalid field in parameter list"},
+ {0x26,0x01,D|T|L|P|W|R|S|O|M|C,"Parameter not supported"},
+ {0x26,0x02,D|T|L|P|W|R|S|O|M|C,"Parameter value invalid"},
+ {0x26,0x03,D|T|L|P|W|R|S|O|M|C,"Threshold parameters not supported"},
+ {0x27,0x00,D|T|W|O,"Write protected"},
+ {0x28,0x00,D|T|L|P|W|R|S|O|M|C,"Not ready to ready transition (medium may have changed)"},
+ {0x28,0x01,M,"Import or export element accessed"},
+ {0x29,0x00,D|T|L|P|W|R|S|O|M|C,"Power on, reset, or bus device reset occurred"},
+ {0x2A,0x00,D|T|L|W|R|S|O|M|C,"Parameters changed"},
+ {0x2A,0x01,D|T|L|W|R|S|O|M|C,"Mode parameters changed"},
+ {0x2A,0x02,D|T|L|W|R|S|O|M|C,"Log parameters changed"},
+ {0x2B,0x00,D|T|L|P|W|R|S|O|C,"Copy cannot execute since host cannot disconnect"},
+ {0x2C,0x00,D|T|L|P|W|R|S|O|M|C,"Command sequence error"},
+ {0x2C,0x01,S,"Too many windows specified"},
+ {0x2C,0x02,S,"Invalid combination of windows specified"},
+ {0x2D,0x00,T,"Overwrite error on update in place"},
+ {0x2F,0x00,D|T|L|P|W|R|S|O|M|C,"Commands cleared by another initiator"},
+ {0x30,0x00,D|T|W|R|O|M,"Incompatible medium installed"},
+ {0x30,0x01,D|T|W|R|O,"Cannot read medium - unknown format"},
+ {0x30,0x02,D|T|W|R|O,"Cannot read medium - incompatible format"},
+ {0x30,0x03,D|T,"Cleaning cartridge installed"},
+ {0x31,0x00,D|T|W|O,"Medium format corrupted"},
+ {0x31,0x01,D|L|O,"Format command failed"},
+ {0x32,0x00,D|W|O,"No defect spare location available"},
+ {0x32,0x01,D|W|O,"Defect list update failure"},
+ {0x33,0x00,T,"Tape length error"},
+ {0x36,0x00,L,"Ribbon, ink, or toner failure"},
+ {0x37,0x00,D|T|L|W|R|S|O|M|C,"Rounded parameter"},
+ {0x39,0x00,D|T|L|W|R|S|O|M|C,"Saving parameters not supported"},
+ {0x3A,0x00,D|T|L|W|R|S|O|M,"Medium not present"},
+ {0x3B,0x00,T|L,"Sequential positioning error"},
+ {0x3B,0x01,T,"Tape position error at beginning-of-medium"},
+ {0x3B,0x02,T,"Tape position error at end-of-medium"},
+ {0x3B,0x03,L,"Tape or electronic vertical forms unit not ready"},
+ {0x3B,0x04,L,"Slew failure"},
+ {0x3B,0x05,L,"Paper jam"},
+ {0x3B,0x06,L,"Failed to sense top-of-form"},
+ {0x3B,0x07,L,"Failed to sense bottom-of-form"},
+ {0x3B,0x08,T,"Reposition error"},
+ {0x3B,0x09,S,"Read past end of medium"},
+ {0x3B,0x0A,S,"Read past beginning of medium"},
+ {0x3B,0x0B,S,"Position past end of medium"},
+ {0x3B,0x0C,S,"Position past beginning of medium"},
+ {0x3B,0x0D,M,"Medium destination element full"},
+ {0x3B,0x0E,M,"Medium source element empty"},
+ {0x3D,0x00,D|T|L|P|W|R|S|O|M|C,"Invalid bits in identify message"},
+ {0x3E,0x00,D|T|L|P|W|R|S|O|M|C,"Logical unit has not self-configured yet"},
+ {0x3F,0x00,D|T|L|P|W|R|S|O|M|C,"Target operating conditions have changed"},
+ {0x3F,0x01,D|T|L|P|W|R|S|O|M|C,"Microcode has been changed"},
+ {0x3F,0x02,D|T|L|P|W|R|S|O|M|C,"Changed operating definition"},
+ {0x3F,0x03,D|T|L|P|W|R|S|O|M|C,"Inquiry data has changed"},
+ {0x43,0x00,D|T|L|P|W|R|S|O|M|C,"Message error"},
+ {0x44,0x00,D|T|L|P|W|R|S|O|M|C,"Internal target failure"},
+ {0x45,0x00,D|T|L|P|W|R|S|O|M|C,"Select or reselect failure"},
+ {0x46,0x00,D|T|L|P|W|R|S|O|M|C,"Unsuccessful soft reset"},
+ {0x47,0x00,D|T|L|P|W|R|S|O|M|C,"Scsi parity error"},
+ {0x48,0x00,D|T|L|P|W|R|S|O|M|C,"Initiator detected error message received"},
+ {0x49,0x00,D|T|L|P|W|R|S|O|M|C,"Invalid message error"},
+ {0x4A,0x00,D|T|L|P|W|R|S|O|M|C,"Command phase error"},
+ {0x4B,0x00,D|T|L|P|W|R|S|O|M|C,"Data phase error"},
+ {0x4C,0x00,D|T|L|P|W|R|S|O|M|C,"Logical unit failed self-configuration"},
+ {0x4E,0x00,D|T|L|P|W|R|S|O|M|C,"Overlapped commands attempted"},
+ {0x50,0x00,T,"Write append error"},
+ {0x50,0x01,T,"Write append position error"},
+ {0x50,0x02,T,"Position error related to timing"},
+ {0x51,0x00,T|O,"Erase failure"},
+ {0x52,0x00,T,"Cartridge fault"},
+ {0x53,0x00,D|T|L|W|R|S|O|M,"Media load or eject failed"},
+ {0x53,0x01,T,"Unload tape failure"},
+ {0x53,0x02,D|T|W|R|O|M,"Medium removal prevented"},
+ {0x54,0x00,P,"Scsi to host system interface failure"},
+ {0x55,0x00,P,"System resource failure"},
+ {0x57,0x00,R,"Unable to recover table-of-contents"},
+ {0x58,0x00,O,"Generation does not exist"},
+ {0x59,0x00,O,"Updated block read"},
+ {0x5A,0x00,D|T|L|P|W|R|S|O|M,"Operator request or state change input (unspecified)"},
+ {0x5A,0x01,D|T|W|R|O|M,"Operator medium removal request"},
+ {0x5A,0x02,D|T|W|O,"Operator selected write protect"},
+ {0x5A,0x03,D|T|W|O,"Operator selected write permit"},
+ {0x5B,0x00,D|T|L|P|W|R|S|O|M,"Log exception"},
+ {0x5B,0x01,D|T|L|P|W|R|S|O|M,"Threshold condition met"},
+ {0x5B,0x02,D|T|L|P|W|R|S|O|M,"Log counter at maximum"},
+ {0x5B,0x03,D|T|L|P|W|R|S|O|M,"Log list codes exhausted"},
+ {0x5C,0x00,D|O,"Rpl status change"},
+ {0x5C,0x01,D|O,"Spindles synchronized"},
+ {0x5C,0x02,D|O,"Spindles not synchronized"},
+ {0x60,0x00,S,"Lamp failure"},
+ {0x61,0x00,S,"Video acquisition error"},
+ {0x61,0x01,S,"Unable to acquire video"},
+ {0x61,0x02,S,"Out of focus"},
+ {0x62,0x00,S,"Scan head positioning error"},
+ {0x63,0x00,R,"End of user area encountered on this track"},
+ {0x64,0x00,R,"Illegal mode for this track"},
+ {0, 0, 0, NULL}
+};
+#endif
+
+#if (CONSTANTS & CONST_SENSE)
+static const char *snstext[] = {
+ "None", /* There is no sense information */
+ "Recovered Error", /* The last command completed successfully
+ but used error correction */
+ "Not Ready", /* The addressed target is not ready */
+ "Medium Error", /* Data error detected on the medium */
+ "Hardware Error", /* Controller or device failure */
+ "Illegal Request",
+ "Unit Attention", /* Removable medium was changed, or
+ the target has been reset */
+ "Data Protect", /* Access to the data is blocked */
+ "Blank Check", /* Reached unexpected written or unwritten
+ region of the medium */
+ "Key=9", /* Vendor specific */
+ "Copy Aborted", /* COPY or COMPARE was aborted */
+ "Aborted Command", /* The target aborted the command */
+ "Equal", /* A SEARCH DATA command found data equal */
+ "Volume Overflow", /* Medium full with still data to be written */
+ "Miscompare", /* Source data and data on the medium
+ do not agree */
+ "Key=15" /* Reserved */
+};
+#endif
+
+/* Print sense information */
+void print_sense(const char * devclass, Scsi_Cmnd * SCpnt)
+{
+ int i, s;
+ int sense_class, valid, code;
+ unsigned char * sense_buffer = SCpnt->sense_buffer;
+ const char * error = NULL;
+
+ sense_class = (sense_buffer[0] >> 4) & 0x07;
+ code = sense_buffer[0] & 0xf;
+ valid = sense_buffer[0] & 0x80;
+
+ if (sense_class == 7) { /* extended sense data */
+ s = sense_buffer[7] + 8;
+ if(s > sizeof(SCpnt->sense_buffer))
+ s = sizeof(SCpnt->sense_buffer);
+
+ if (!valid)
+ printk("extra data not valid ");
+
+ if (sense_buffer[2] & 0x80)
+ printk( "FMK "); /* current command has read a filemark */
+ if (sense_buffer[2] & 0x40)
+ printk( "EOM "); /* end-of-medium condition exists */
+ if (sense_buffer[2] & 0x20)
+ printk( "ILI "); /* incorrect block length requested */
+
+ switch (code) {
+ case 0x0:
+ error = "Current"; /* error concerns current command */
+ break;
+ case 0x1:
+ error = "Deferred"; /* error concerns some earlier command */
+ /* e.g., an earlier write to disk cache succeeded, but
+ now the disk discovers that it cannot write the data */
+ break;
+ default:
+ error = "Invalid";
+ }
+
+ printk("%s error ", error);
+
+#if (CONSTANTS & CONST_SENSE)
+ printk( "%s%s: sense key %s\n", devclass,
+ kdevname(SCpnt->request.rq_dev), snstext[sense_buffer[2] & 0x0f]);
+#else
+ printk("%s%s: sns = %2x %2x\n", devclass,
+ kdevname(SCpnt->request.rq_dev), sense_buffer[0], sense_buffer[2]);
+#endif
+
+ /* Check to see if additional sense information is available */
+ if(sense_buffer[7] + 7 < 13 ||
+ (sense_buffer[12] == 0 && sense_buffer[13] == 0)) goto done;
+
+#if (CONSTANTS & CONST_XSENSE)
+ for(i=0; additional[i].text; i++)
+ if(additional[i].code1 == sense_buffer[12] &&
+ additional[i].code2 == sense_buffer[13])
+ printk("Additional sense indicates %s\n", additional[i].text);
+
+ for(i=0; additional2[i].text; i++)
+ if(additional2[i].code1 == sense_buffer[12] &&
+ additional2[i].code2_min >= sense_buffer[13] &&
+ additional2[i].code2_max <= sense_buffer[13]) {
+ printk("Additional sense indicates ");
+ printk(additional2[i].text, sense_buffer[13]);
+ printk("\n");
+ };
+#else
+ printk("ASC=%2x ASCQ=%2x\n", sense_buffer[12], sense_buffer[13]);
+#endif
+ } else { /* non-extended sense data */
+
+ /*
+ * Standard says:
+ * sense_buffer[0] & 0200 : address valid
+ * sense_buffer[0] & 0177 : vendor-specific error code
+ * sense_buffer[1] & 0340 : vendor-specific
+ * sense_buffer[1..3] : 21-bit logical block address
+ */
+
+#if (CONSTANTS & CONST_SENSE)
+ if (sense_buffer[0] < 15)
+ printk("%s%s: old sense key %s\n", devclass,
+ kdevname(SCpnt->request.rq_dev), snstext[sense_buffer[0] & 0x0f]);
+ else
+#endif
+ printk("%s%s: sns = %2x %2x\n", devclass,
+ kdevname(SCpnt->request.rq_dev), sense_buffer[0], sense_buffer[2]);
+
+ printk("Non-extended sense class %d code 0x%0x ", sense_class, code);
+ s = 4;
+ }
+
+ done:
+#if !(CONSTANTS & CONST_SENSE)
+ printk("Raw sense data:");
+ for (i = 0; i < s; ++i)
+ printk("0x%02x ", sense_buffer[i]);
+ printk("\n");
+#endif
+ return;
+}
+
+#if (CONSTANTS & CONST_MSG)
+static const char *one_byte_msgs[] = {
+/* 0x00 */ "Command Complete", NULL, "Save Pointers",
+/* 0x03 */ "Restore Pointers", "Disconnect", "Initiator Error",
+/* 0x06 */ "Abort", "Message Reject", "Nop", "Message Parity Error",
+/* 0x0a */ "Linked Command Complete", "Linked Command Complete w/flag",
+/* 0x0c */ "Bus device reset", "Abort Tag", "Clear Queue",
+/* 0x0f */ "Initiate Recovery", "Release Recovery"
+};
+
+#define NO_ONE_BYTE_MSGS (sizeof(one_byte_msgs) / sizeof (const char *))
+
+static const char *two_byte_msgs[] = {
+/* 0x20 */ "Simple Queue Tag", "Head of Queue Tag", "Ordered Queue Tag"
+/* 0x23 */ "Ignore Wide Residue"
+};
+
+#define NO_TWO_BYTE_MSGS (sizeof(two_byte_msgs) / sizeof (const char *))
+
+static const char *extended_msgs[] = {
+/* 0x00 */ "Modify Data Pointer", "Synchronous Data Transfer Request",
+/* 0x02 */ "SCSI-I Extended Identify", "Wide Data Transfer Request"
+};
+
+#define NO_EXTENDED_MSGS (sizeof(two_byte_msgs) / sizeof (const char *))
+#endif /* (CONSTANTS & CONST_MSG) */
+
+int print_msg (const unsigned char *msg) {
+ int len = 0, i;
+ if (msg[0] == EXTENDED_MESSAGE) {
+ len = 3 + msg[1];
+#if (CONSTANTS & CONST_MSG)
+ if (msg[2] < NO_EXTENDED_MSGS)
+ printk ("%s ", extended_msgs[msg[2]]);
+ else
+ printk ("Extended Message, reserved code (0x%02x) ", (int) msg[2]);
+ switch (msg[2]) {
+ case EXTENDED_MODIFY_DATA_POINTER:
+ printk("pointer = %d", (int) (msg[3] << 24) | (msg[4] << 16) |
+ (msg[5] << 8) | msg[6]);
+ break;
+ case EXTENDED_SDTR:
+ printk("period = %d ns, offset = %d", (int) msg[3] * 4, (int)
+ msg[4]);
+ break;
+ case EXTENDED_WDTR:
+ printk("width = 2^%d bytes", msg[3]);
+ break;
+ default:
+ for (i = 2; i < len; ++i)
+ printk("%02x ", msg[i]);
+ }
+#else
+ for (i = 0; i < len; ++i)
+ printk("%02x ", msg[i]);
+#endif
+ /* Identify */
+ } else if (msg[0] & 0x80) {
+#if (CONSTANTS & CONST_MSG)
+ printk("Identify disconnect %sallowed %s %d ",
+ (msg[0] & 0x40) ? "" : "not ",
+ (msg[0] & 0x20) ? "target routine" : "lun",
+ msg[0] & 0x7);
+#else
+ printk("%02x ", msg[0]);
+#endif
+ len = 1;
+ /* Normal One byte */
+ } else if (msg[0] < 0x1f) {
+#if (CONSTANTS & CONST_MSG)
+ if (msg[0] < NO_ONE_BYTE_MSGS)
+ printk(one_byte_msgs[msg[0]]);
+ else
+ printk("reserved (%02x) ", msg[0]);
+#else
+ printk("%02x ", msg[0]);
+#endif
+ len = 1;
+ /* Two byte */
+ } else if (msg[0] <= 0x2f) {
+#if (CONSTANTS & CONST_MSG)
+ if ((msg[0] - 0x20) < NO_TWO_BYTE_MSGS)
+ printk("%s %02x ", two_byte_msgs[msg[0] - 0x20],
+ msg[1]);
+ else
+ printk("reserved two byte (%02x %02x) ",
+ msg[0], msg[1]);
+#else
+ printk("%02x %02x", msg[0], msg[1]);
+#endif
+ len = 2;
+ } else
+#if (CONSTANTS & CONST_MSG)
+ printk(reserved);
+#else
+ printk("%02x ", msg[0]);
+#endif
+ return len;
+}
+
+void print_Scsi_Cmnd (Scsi_Cmnd *cmd) {
+ printk("scsi%d : destination target %d, lun %d\n",
+ cmd->host->host_no,
+ cmd->target,
+ cmd->lun);
+ printk(" command = ");
+ print_command (cmd->cmnd);
+}
+
+#if (CONSTANTS & CONST_HOST)
+static const char * hostbyte_table[]={
+"DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET",
+"DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR",NULL};
+
+void print_hostbyte(int scsiresult)
+{ static int maxcode=0;
+ int i;
+
+ if(!maxcode) {
+ for(i=0;hostbyte_table[i];i++) ;
+ maxcode=i-1;
+ }
+ printk("Hostbyte=0x%02x",host_byte(scsiresult));
+ if(host_byte(scsiresult)>maxcode) {
+ printk("is invalid ");
+ return;
+ }
+ printk("(%s) ",hostbyte_table[host_byte(scsiresult)]);
+}
+#else
+void print_hostbyte(int scsiresult)
+{ printk("Hostbyte=0x%02x ",host_byte(scsiresult));
+}
+#endif
+
+#if (CONSTANTS & CONST_DRIVER)
+static const char * driverbyte_table[]={
+"DRIVER_OK", "DRIVER_BUSY", "DRIVER_SOFT", "DRIVER_MEDIA", "DRIVER_ERROR",
+"DRIVER_INVALID", "DRIVER_TIMEOUT", "DRIVER_HARD",NULL };
+
+static const char * driversuggest_table[]={"SUGGEST_OK",
+"SUGGEST_RETRY", "SUGGEST_ABORT", "SUGGEST_REMAP", "SUGGEST_DIE",
+unknown,unknown,unknown, "SUGGEST_SENSE",NULL};
+
+
+void print_driverbyte(int scsiresult)
+{ static int driver_max=0,suggest_max=0;
+ int i,dr=driver_byte(scsiresult)&DRIVER_MASK,
+ su=(driver_byte(scsiresult)&SUGGEST_MASK)>>4;
+
+ if(!driver_max) {
+ for(i=0;driverbyte_table[i];i++) ;
+ driver_max=i;
+ for(i=0;driversuggest_table[i];i++) ;
+ suggest_max=i;
+ }
+ printk("Driverbyte=0x%02x",driver_byte(scsiresult));
+ printk("(%s,%s) ",
+ dr<driver_max ? driverbyte_table[dr]:"invalid",
+ su<suggest_max ? driversuggest_table[su]:"invalid");
+}
+#else
+void print_driverbyte(int scsiresult)
+{ printk("Driverbyte=0x%02x ",driver_byte(scsiresult));
+}
+#endif
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/constants.h b/linux/src/drivers/scsi/constants.h
new file mode 100644
index 0000000..e10527e
--- /dev/null
+++ b/linux/src/drivers/scsi/constants.h
@@ -0,0 +1,6 @@
+#ifndef _CONSTANTS_H
+#define _CONSTANTS_H
+extern int print_msg(unsigned char *);
+extern void print_status(int);
+extern void print_Scsi_Cmnd (Scsi_Cmnd *);
+#endif /* def _CONSTANTS_H */
diff --git a/linux/src/drivers/scsi/dc390.h b/linux/src/drivers/scsi/dc390.h
new file mode 100644
index 0000000..18c7e03
--- /dev/null
+++ b/linux/src/drivers/scsi/dc390.h
@@ -0,0 +1,147 @@
+/***********************************************************************
+ * FILE NAME : DC390.H *
+ * BY : C.L. Huang *
+ * Description: Device Driver for Tekram DC-390(T) PCI SCSI *
+ * Bus Master Host Adapter *
+ ***********************************************************************/
+
+/* Kernel version autodetection */
+
+#include <linux/version.h>
+/* Convert Linux Version, Patch-level, Sub-level to LINUX_VERSION_CODE. */
+#define ASC_LINUX_VERSION(V, P, S) (((V) * 65536) + ((P) * 256) + (S))
+
+#if LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,50)
+#define VERSION_ELF_1_2_13
+#elseif LINUX_VERSION_CODE < ASC_LINUX_VERSION(1,3,95)
+#define VERSION_1_3_85
+#else
+#define VERSION_2_0_0
+#endif
+
+/*
+ * AMD 53C974 driver, header file
+ */
+
+#ifndef DC390_H
+#define DC390_H
+
+#if defined(HOSTS_C) || defined(MODULE)
+
+#ifdef VERSION_2_0_0
+#include <scsi/scsicam.h>
+#else
+#include <linux/scsicam.h>
+#endif
+
+extern int DC390_detect(Scsi_Host_Template *psht);
+extern int DC390_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *));
+extern int DC390_abort(Scsi_Cmnd *cmd);
+
+#ifdef VERSION_2_0_0
+extern int DC390_reset(Scsi_Cmnd *cmd, unsigned int resetFlags);
+#else
+extern int DC390_reset(Scsi_Cmnd *cmd);
+#endif
+
+#ifdef VERSION_ELF_1_2_13
+extern int DC390_bios_param(Disk *disk, int devno, int geom[]);
+#else
+extern int DC390_bios_param(Disk *disk, kdev_t devno, int geom[]);
+#endif
+
+#ifdef MODULE
+static int DC390_release(struct Scsi_Host *);
+#else
+#define DC390_release NULL
+#endif
+
+#ifndef VERSION_ELF_1_2_13
+extern struct proc_dir_entry proc_scsi_tmscsim;
+extern int tmscsim_proc_info(char *buffer, char **start, off_t offset, int length, int hostno, int inout);
+#endif
+
+#ifdef VERSION_2_0_0
+
+#define DC390_T { \
+ NULL, /* *next */ \
+ NULL, /* *usage_count */ \
+ &proc_scsi_tmscsim, /* *proc_dir */ \
+ tmscsim_proc_info, /* (*proc_info)() */ \
+ "Tekram DC390(T) V1.11 Feb-05-1997", /* *name */ \
+ DC390_detect, \
+ DC390_release, /* (*release)() */ \
+ NULL, /* *(*info)() */ \
+ NULL, /* (*command)() */ \
+ DC390_queue_command, \
+ DC390_abort, \
+ DC390_reset, \
+ NULL, /* slave attach */\
+ DC390_bios_param, \
+ 10,/* can queue(-1) */ \
+ 7, /* id(-1) */ \
+ SG_ALL, \
+ 2, /* cmd per lun(2) */ \
+ 0, /* present */ \
+ 0, /* unchecked isa dma */ \
+ DISABLE_CLUSTERING \
+ }
+#endif
+
+
+#ifdef VERSION_1_3_85
+
+#define DC390_T { \
+ NULL, /* *next */ \
+ NULL, /* *usage_count */ \
+ &proc_scsi_tmscsim, /* *proc_dir */ \
+ tmscsim_proc_info, /* (*proc_info)() */ \
+ "Tekram DC390(T) V1.11 Feb-05-1997", /* *name */ \
+ DC390_detect, \
+ DC390_release, /* (*release)() */ \
+ NULL, /* *(*info)() */ \
+ NULL, /* (*command)() */ \
+ DC390_queue_command, \
+ DC390_abort, \
+ DC390_reset, \
+ NULL, /* slave attach */\
+ DC390_bios_param, \
+ 10,/* can queue(-1) */ \
+ 7, /* id(-1) */ \
+ SG_ALL, \
+ 2, /* cmd per lun(2) */ \
+ 0, /* present */ \
+ 0, /* unchecked isa dma */ \
+ DISABLE_CLUSTERING \
+ }
+#endif
+
+
+#ifdef VERSION_ELF_1_2_13
+
+#define DC390_T { \
+ NULL, \
+ NULL, \
+ "Tekram DC390(T) V1.11 Feb-05-1997",\
+ DC390_detect, \
+ DC390_release, \
+ NULL, /* info */ \
+ NULL, /* command, deprecated */ \
+ DC390_queue_command, \
+ DC390_abort, \
+ DC390_reset, \
+ NULL, /* slave attach */\
+ DC390_bios_param, \
+ 10,/* can queue(-1) */ \
+ 7, /* id(-1) */ \
+ 16,/* old (SG_ALL) */ \
+ 2, /* cmd per lun(2) */ \
+ 0, /* present */ \
+ 0, /* unchecked isa dma */ \
+ DISABLE_CLUSTERING \
+ }
+#endif
+
+#endif /* defined(HOSTS_C) || defined(MODULE) */
+
+#endif /* DC390_H */
diff --git a/linux/src/drivers/scsi/dtc.c b/linux/src/drivers/scsi/dtc.c
new file mode 100644
index 0000000..94c3e33
--- /dev/null
+++ b/linux/src/drivers/scsi/dtc.c
@@ -0,0 +1,400 @@
+
+#define AUTOSENSE
+#define PSEUDO_DMA
+#define DONT_USE_INTR
+#define UNSAFE /* Leave interrupts enabled during pseudo-dma I/O */
+#define xNDEBUG (NDEBUG_INTR+NDEBUG_RESELECTION+\
+ NDEBUG_SELECTION+NDEBUG_ARBITRATION)
+#define DMA_WORKS_RIGHT
+
+
+/*
+ * DTC 3180/3280 driver, by
+ * Ray Van Tassle rayvt@comm.mot.com
+ *
+ * taken from ...
+ * Trantor T128/T128F/T228 driver by...
+ *
+ * Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 440-4894
+ *
+ * DISTRIBUTION RELEASE 1.
+ *
+ * For more information, please consult
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+*/
+
+/*
+ * Options :
+ * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
+ * for commands that return with a CHECK CONDITION status.
+ *
+ * PSEUDO_DMA - enables PSEUDO-DMA hardware, should give a 3-4X performance
+ * increase compared to polled I/O.
+ *
+ * PARITY - enable parity checking. Not supported.
+ *
+ * UNSAFE - leave interrupts enabled during pseudo-DMA transfers.
+ * You probably want this.
+ *
+ * The card is detected and initialized in one of several ways :
+ * 1. Autoprobe (default) - since the board is memory mapped,
+ * a BIOS signature is scanned for to locate the registers.
+ * An interrupt is triggered to autoprobe for the interrupt
+ * line.
+ *
+ * 2. With command line overrides - dtc=address,irq may be
+ * used on the LILO command line to override the defaults.
+ *
+*/
+
+/*----------------------------------------------------------------*/
+/* the following will set the monitor border color (useful to find
+ where something crashed or gets stuck at */
+/* 1 = blue
+ 2 = green
+ 3 = cyan
+ 4 = red
+ 5 = magenta
+ 6 = yellow
+ 7 = white
+*/
+#if 0
+#define rtrc(i) {inb(0x3da); outb(0x31, 0x3c0); outb((i), 0x3c0);}
+#else
+#define rtrc(i) {}
+#endif
+
+
+#include <asm/system.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/blk.h>
+#include <asm/io.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "dtc.h"
+#define AUTOPROBE_IRQ
+#include "NCR5380.h"
+#include "constants.h"
+#include "sd.h"
+#include<linux/stat.h>
+#include<linux/string.h>
+
+struct proc_dir_entry proc_scsi_dtc = {
+ PROC_SCSI_T128, 7, "dtc3x80",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+ };
+
+
+static struct override {
+ __u32 address;
+ int irq;
+} overrides
+#ifdef OVERRIDE
+[] = OVERRIDE;
+#else
+[4] = {{0, IRQ_AUTO}, {0, IRQ_AUTO}, {0, IRQ_AUTO},
+ {0, IRQ_AUTO}};
+#endif
+
+#define NO_OVERRIDES (sizeof(overrides) / sizeof(struct override))
+
+static struct base {
+ __u32 address;
+ int noauto;
+} bases[] = {{0xcc000, 0}, {0xc8000, 0},
+{0xdc000, 0}, {0xd8000, 0}};
+
+#define NO_BASES (sizeof (bases) / sizeof (struct base))
+
+static const struct signature {
+ const char *string;
+ int offset;
+} signatures[] = { {"DATA TECHNOLOGY CORPORATION BIOS", 0x25}, };
+
+#define NO_SIGNATURES (sizeof (signatures) / sizeof (struct signature))
+
+/*
+ * Function : dtc_setup(char *str, int *ints)
+ *
+ * Purpose : LILO command line initialization of the overrides array,
+ *
+ * Inputs : str - unused, ints - array of integer parameters with ints[0]
+ * equal to the number of ints.
+ *
+*/
+
+void dtc_setup(char *str, int *ints) {
+ static int commandline_current = 0;
+ int i;
+ if (ints[0] != 2)
+ printk("dtc_setup: usage dtc=address,irq\n");
+ else
+ if (commandline_current < NO_OVERRIDES) {
+ overrides[commandline_current].address = ints[1];
+ overrides[commandline_current].irq = ints[2];
+ for (i = 0; i < NO_BASES; ++i)
+ if (bases[i].address == ints[1]) {
+ bases[i].noauto = 1;
+ break;
+ }
+ ++commandline_current;
+ }
+}
+
+/*
+ * Function : int dtc_detect(Scsi_Host_Template * tpnt)
+ *
+ * Purpose : detects and initializes DTC 3180/3280 controllers
+ * that were autoprobed, overridden on the LILO command line,
+ * or specified at compile time.
+ *
+ * Inputs : tpnt - template for this SCSI adapter.
+ *
+ * Returns : 1 if a host adapter was found, 0 if not.
+ *
+*/
+
+
+int dtc_detect(Scsi_Host_Template * tpnt) {
+ static int current_override = 0, current_base = 0;
+ struct Scsi_Host *instance;
+ unsigned char *base;
+ int sig, count;
+
+ tpnt->proc_dir = &proc_scsi_dtc;
+ tpnt->proc_info = &dtc_proc_info;
+
+ for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
+ base = NULL;
+
+ if (overrides[current_override].address)
+ base = (unsigned char *)overrides[current_override].address;
+ else
+ for (; !base && (current_base < NO_BASES); ++current_base) {
+#if (DTCDEBUG & DTCDEBUG_INIT)
+ printk("scsi : probing address %08x\n", (unsigned int) bases[current_base].address);
+#endif
+ for (sig = 0; sig < NO_SIGNATURES; ++sig)
+ if (!bases[current_base].noauto && !memcmp
+ ((unsigned char *)(bases[current_base].address + signatures[sig].offset),
+ signatures[sig].string, strlen(signatures[sig].string))) {
+ base = (unsigned char *)bases[current_base].address;
+#if (DTCDEBUG & DTCDEBUG_INIT)
+ printk("scsi-dtc : detected board.\n");
+#endif
+ break;
+ }
+ }
+
+#if defined(DTCDEBUG) && (DTCDEBUG & DTCDEBUG_INIT)
+ printk("scsi-dtc : base = %08x\n", (unsigned int) base);
+#endif
+
+ if (!base)
+ break;
+
+ instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
+ instance->base = base;
+
+ NCR5380_init(instance, 0);
+
+ NCR5380_write(DTC_CONTROL_REG, CSR_5380_INTR); /* Enable int's */
+ if (overrides[current_override].irq != IRQ_AUTO)
+ instance->irq = overrides[current_override].irq;
+ else
+ instance->irq = NCR5380_probe_irq(instance, DTC_IRQS);
+
+#ifndef DONT_USE_INTR
+/* With interrupts enabled, it will sometimes hang when doing heavy
+ * reads. So better not enable them until I finger it out. */
+ if (instance->irq != IRQ_NONE)
+ if (request_irq(instance->irq, dtc_intr, SA_INTERRUPT, "dtc")) {
+ printk("scsi%d : IRQ%d not free, interrupts disabled\n",
+ instance->host_no, instance->irq);
+ instance->irq = IRQ_NONE;
+ }
+
+ if (instance->irq == IRQ_NONE) {
+ printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
+ printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
+ }
+#else
+ if (instance->irq != IRQ_NONE)
+ printk("scsi%d : interrupts not used. Might as well not jumper it.\n",
+ instance->host_no);
+ instance->irq = IRQ_NONE;
+#endif
+#if defined(DTCDEBUG) && (DTCDEBUG & DTCDEBUG_INIT)
+ printk("scsi%d : irq = %d\n", instance->host_no, instance->irq);
+#endif
+
+ printk("scsi%d : at 0x%05X", instance->host_no, (int)instance->base);
+ if (instance->irq == IRQ_NONE)
+ printk (" interrupts disabled");
+ else
+ printk (" irq %d", instance->irq);
+ printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
+ CAN_QUEUE, CMD_PER_LUN, DTC_PUBLIC_RELEASE);
+ NCR5380_print_options(instance);
+ printk("\n");
+
+ ++current_override;
+ ++count;
+ }
+ return count;
+}
+
+/*
+ * Function : int dtc_biosparam(Disk * disk, kdev_t dev, int *ip)
+ *
+ * Purpose : Generates a BIOS / DOS compatible H-C-S mapping for
+ * the specified device / size.
+ *
+ * Inputs : size = size of device in sectors (512 bytes), dev = block device
+ * major / minor, ip[] = {heads, sectors, cylinders}
+ *
+ * Returns : always 0 (success), initializes ip
+ *
+*/
+
+/*
+ * XXX Most SCSI boards use this mapping, I could be incorrect. Some one
+ * using hard disks on a trantor should verify that this mapping corresponds
+ * to that used by the BIOS / ASPI driver by running the linux fdisk program
+ * and matching the H_C_S coordinates to what DOS uses.
+*/
+
+int dtc_biosparam(Disk * disk, kdev_t dev, int * ip)
+{
+ int size = disk->capacity;
+
+ ip[0] = 64;
+ ip[1] = 32;
+ ip[2] = size >> 11;
+ return 0;
+}
+
+/****************************************************************
+ * Function : int NCR5380_pread (struct Scsi_Host *instance,
+ * unsigned char *dst, int len)
+ *
+ * Purpose : Fast 5380 pseudo-dma read function, reads len bytes to
+ * dst
+ *
+ * Inputs : dst = destination, len = length in bytes
+ *
+ * Returns : 0 on success, non zero on a failure such as a watchdog
+ * timeout.
+*/
+
+static int dtc_maxi = 0;
+static int dtc_wmaxi = 0;
+
+static inline int NCR5380_pread (struct Scsi_Host *instance,
+ unsigned char *dst, int len)
+ {
+ unsigned char *d = dst;
+ int i; /* For counting time spent in the poll-loop */
+ NCR5380_local_declare();
+ NCR5380_setup(instance);
+
+ i = 0;
+ NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ NCR5380_write(MODE_REG, MR_ENABLE_EOP_INTR | MR_DMA_MODE);
+ if (instance->irq == IRQ_NONE)
+ NCR5380_write(DTC_CONTROL_REG, CSR_DIR_READ);
+ else
+ NCR5380_write(DTC_CONTROL_REG, CSR_DIR_READ | CSR_INT_BASE);
+ NCR5380_write(DTC_BLK_CNT, len >> 7); /* Block count */
+ rtrc(1);
+ while (len > 0) {
+ rtrc(2);
+ while (NCR5380_read(DTC_CONTROL_REG) & CSR_HOST_BUF_NOT_RDY)
+ ++i;
+ rtrc(3);
+ memcpy(d, (char *)(base + DTC_DATA_BUF), 128);
+ d += 128;
+ len -= 128;
+ rtrc(7); /*** with int's on, it sometimes hangs after here.
+ * Looks like something makes HBNR go away. */
+ }
+ rtrc(4);
+ while ( !(NCR5380_read(DTC_CONTROL_REG) & D_CR_ACCESS))
+ ++i;
+ NCR5380_write(MODE_REG, 0); /* Clear the operating mode */
+ rtrc(0);
+ NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ if (i > dtc_maxi)
+ dtc_maxi = i;
+ return(0);
+}
+
+/****************************************************************
+ * Function : int NCR5380_pwrite (struct Scsi_Host *instance,
+ * unsigned char *src, int len)
+ *
+ * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from
+ * src
+ *
+ * Inputs : src = source, len = length in bytes
+ *
+ * Returns : 0 on success, non zero on a failure such as a watchdog
+ * timeout.
+*/
+
+static inline int NCR5380_pwrite (struct Scsi_Host *instance,
+ unsigned char *src, int len) {
+ int i;
+ NCR5380_local_declare();
+ NCR5380_setup(instance);
+
+ NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ NCR5380_write(MODE_REG, MR_ENABLE_EOP_INTR | MR_DMA_MODE);
+ /* set direction (write) */
+ if (instance->irq == IRQ_NONE)
+ NCR5380_write(DTC_CONTROL_REG, 0);
+ else
+ NCR5380_write(DTC_CONTROL_REG, CSR_5380_INTR);
+ NCR5380_write(DTC_BLK_CNT, len >> 7); /* Block count */
+ for (i = 0; len > 0; ++i) {
+ rtrc(5);
+ /* Poll until the host buffer can accept data. */
+ while (NCR5380_read(DTC_CONTROL_REG) & CSR_HOST_BUF_NOT_RDY)
+ ++i;
+ rtrc(3);
+ memcpy((char *)(base + DTC_DATA_BUF), src, 128);
+ src += 128;
+ len -= 128;
+ }
+ rtrc(4);
+ while ( !(NCR5380_read(DTC_CONTROL_REG) & D_CR_ACCESS))
+ ++i;
+ rtrc(6);
+ /* Wait until the last byte has been sent to the disk */
+ while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT))
+ ++i;
+ rtrc(7);
+ /* Check for parity error here. fixme. */
+ NCR5380_write(MODE_REG, 0); /* Clear the operating mode */
+ rtrc(0);
+ if (i > dtc_wmaxi)
+ dtc_wmaxi = i;
+ return (0);
+}
+
+#include "NCR5380.c"
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = DTC3x80;
+
+#include "scsi_module.c"
+#endif
diff --git a/linux/src/drivers/scsi/dtc.h b/linux/src/drivers/scsi/dtc.h
new file mode 100644
index 0000000..4c41237
--- /dev/null
+++ b/linux/src/drivers/scsi/dtc.h
@@ -0,0 +1,169 @@
+/*
+ * DTC controller, taken from T128 driver by...
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 440-4894
+ *
+ * DISTRIBUTION RELEASE 1.
+ *
+ * For more information, please consult
+ *
+ *
+ *
+ * and
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+ *
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * 1+ (719) 578-3400
+ * 1+ (800) 334-5454
+ */
+
+#ifndef DTC3280_H
+#define DTC3280_H
+
+#define DTC_PUBLIC_RELEASE 1
+
+/*#define DTCDEBUG 0x1*/
+#define DTCDEBUG_INIT 0x1
+#define DTCDEBUG_TRANSFER 0x2
+
+/*
+ * The DTC3180 & 3280 boards are memory mapped.
+ *
+ */
+
+/*
+ */
+/* Offset from DTC_5380_OFFSET */
+#define DTC_CONTROL_REG 0x100 /* rw */
+#define D_CR_ACCESS 0x80 /* ro set=can access 3280 registers */
+#define CSR_DIR_READ 0x40 /* rw direction, 1 = read 0 = write */
+
+#define CSR_RESET 0x80 /* wo Resets 53c400 */
+#define CSR_5380_REG 0x80 /* ro 5380 registers can be accessed */
+#define CSR_TRANS_DIR 0x40 /* rw Data transfer direction */
+#define CSR_SCSI_BUFF_INTR 0x20 /* rw Enable int on transfer ready */
+#define CSR_5380_INTR 0x10 /* rw Enable 5380 interrupts */
+#define CSR_SHARED_INTR 0x08 /* rw Interrupt sharing */
+#define CSR_HOST_BUF_NOT_RDY 0x04 /* ro Host buffer not ready */
+#define CSR_SCSI_BUF_RDY 0x02 /* ro SCSI buffer ready */
+#define CSR_GATED_5380_IRQ 0x01 /* ro Last block xferred */
+#define CSR_INT_BASE (CSR_SCSI_BUFF_INTR | CSR_5380_INTR)
+
+
+#define DTC_BLK_CNT 0x101 /* rw
+ * # of 128-byte blocks to transfer */
+
+
+#define D_CR_ACCESS 0x80 /* ro set=can access 3280 registers */
+
+#define DTC_SWITCH_REG 0x3982 /* ro - DIP switches */
+#define DTC_RESUME_XFER 0x3982 /* wo - resume data xfer
+ * after disconnect/reconnect*/
+
+#define DTC_5380_OFFSET 0x3880 /* 8 registers here, see NCR5380.h */
+
+/*!!!! for dtc, it's a 128 byte buffer at 3900 !!! */
+#define DTC_DATA_BUF 0x3900 /* rw 128 bytes long */
+
+
+#ifndef ASM
+int dtc_abort(Scsi_Cmnd *);
+int dtc_biosparam(Disk *, kdev_t, int*);
+int dtc_detect(Scsi_Host_Template *);
+int dtc_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int dtc_reset(Scsi_Cmnd *, unsigned int reset_flags);
+int dtc_proc_info (char *buffer, char **start, off_t offset,
+ int length, int hostno, int inout);
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#ifndef CMD_PER_LUN
+#define CMD_PER_LUN 2
+#endif
+
+#ifndef CAN_QUEUE
+#define CAN_QUEUE 32
+#endif
+
+/*
+ * I hadn't thought of this with the earlier drivers - but to prevent
+ * macro definition conflicts, we shouldn't define all of the internal
+ * macros when this is being used solely for the host stub.
+ */
+
+#if defined(HOSTS_C) || defined(MODULE)
+
+#define DTC3x80 {NULL, NULL, NULL, NULL, \
+ "DTC 3180/3280 ", dtc_detect, NULL, \
+ NULL, \
+ NULL, dtc_queue_command, dtc_abort, dtc_reset, NULL, \
+ dtc_biosparam, \
+ /* can queue */ CAN_QUEUE, /* id */ 7, SG_ALL, \
+ /* cmd per lun */ CMD_PER_LUN , 0, 0, DISABLE_CLUSTERING}
+
+#endif
+
+#ifndef HOSTS_C
+
+#define NCR5380_implementation_fields \
+ volatile unsigned char *base
+
+#define NCR5380_local_declare() \
+ volatile unsigned char *base
+
+#define NCR5380_setup(instance) \
+ base = (volatile unsigned char *) (instance)->base
+
+#define DTC_address(reg) (base + DTC_5380_OFFSET + reg)
+
+#define dbNCR5380_read(reg) \
+ (rval=*(DTC_address(reg)), \
+ (((unsigned char) printk("DTC : read register %d at addr %08x is: %02x\n"\
+ , (reg), (int)DTC_address(reg), rval)), rval ) )
+
+#define dbNCR5380_write(reg, value) do { \
+ printk("DTC : write %02x to register %d at address %08x\n", \
+ (value), (reg), (int)DTC_address(reg)); \
+ *(DTC_address(reg)) = (value);} while(0)
+
+
+#if !(DTCDEBUG & DTCDEBUG_TRANSFER)
+#define NCR5380_read(reg) (*(DTC_address(reg)))
+#define NCR5380_write(reg, value) (*(DTC_address(reg)) = (value))
+#else
+#define NCR5380_read(reg) (*(DTC_address(reg)))
+#define xNCR5380_read(reg) \
+ (((unsigned char) printk("DTC : read register %d at address %08x\n"\
+ , (reg), DTC_address(reg))), *(DTC_address(reg)))
+
+#define NCR5380_write(reg, value) do { \
+ printk("DTC : write %02x to register %d at address %08x\n", \
+ (value), (reg), (int)DTC_address(reg)); \
+ *(DTC_address(reg)) = (value); } while(0)
+#endif
+
+#define NCR5380_intr dtc_intr
+#define NCR5380_queue_command dtc_queue_command
+#define NCR5380_abort dtc_abort
+#define NCR5380_reset dtc_reset
+#define NCR5380_proc_info dtc_proc_info
+
+/* 15 12 11 10
+ 1001 1100 0000 0000 */
+
+#define DTC_IRQS 0x9c00
+
+
+#endif /* else def HOSTS_C */
+#endif /* ndef ASM */
+#endif /* DTC3280_H */
diff --git a/linux/src/drivers/scsi/eata.c b/linux/src/drivers/scsi/eata.c
new file mode 100644
index 0000000..49f0827
--- /dev/null
+++ b/linux/src/drivers/scsi/eata.c
@@ -0,0 +1,2331 @@
+/*
+ * eata.c - Low-level driver for EATA/DMA SCSI host adapters.
+ *
+ * 26 Jul 1998 Rev. 4.33 for linux 2.0.35 and 2.1.111
+ * + Added command line option (rs:[y|n]) to reverse the scan order
+ * of PCI boards. The default is rs:y, which reverses the BIOS order
+ * while registering PCI boards. The default value rs:y generates
+ * the same order of all previous revisions of this driver.
+ * Pls. note that "BIOS order" might have been reversed itself
+ * after the 2.1.9x PCI modifications in the linux kernel.
+ * The rs value is ignored when the explicit list of addresses
+ * is used by the "eata=port0,port1,..." command line option.
+ * + Added command line option (et:[y|n]) to force use of extended
+ * translation (255 heads, 63 sectors) as disk geometry.
+ * The default is et:n, which uses the disk geometry returned
+ * by scsicam_bios_param. The default value et:n is compatible with
+ * all previous revisions of this driver.
+ *
+ * 28 May 1998 Rev. 4.32 for linux 2.0.33 and 2.1.104
+ * Increased busy timeout from 10 msec. to 200 msec. while
+ * processing interrupts.
+ *
+ * 16 May 1998 Rev. 4.31 for linux 2.0.33 and 2.1.102
+ * Improved abort handling during the eh recovery process.
+ *
+ * 13 May 1998 Rev. 4.30 for linux 2.0.33 and 2.1.101
+ * The driver is now fully SMP safe, including the
+ * abort and reset routines.
+ * Added command line options (eh:[y|n]) to choose between
+ * new_eh_code and the old scsi code.
+ * If linux version >= 2.1.101 the default is eh:y, while the eh
+ * option is ignored for previous releases and the old scsi code
+ * is used.
+ *
+ * 18 Apr 1998 Rev. 4.20 for linux 2.0.33 and 2.1.97
+ * Reworked interrupt handler.
+ *
+ * 11 Apr 1998 rev. 4.05 for linux 2.0.33 and 2.1.95
+ * Major reliability improvement: when a batch with overlapping
+ * requests is detected, requests are queued one at a time
+ * eliminating any possible board or drive reordering.
+ *
+ * 10 Apr 1998 rev. 4.04 for linux 2.0.33 and 2.1.95
+ * Improved SMP support (if linux version >= 2.1.95).
+ *
+ * 9 Apr 1998 rev. 4.03 for linux 2.0.33 and 2.1.94
+ * Added support for new PCI code and IO-APIC remapping of irqs.
+ * Performance improvement: when sequential i/o is detected,
+ * always use direct sort instead of reverse sort.
+ *
+ * 4 Apr 1998 rev. 4.02 for linux 2.0.33 and 2.1.92
+ * io_port is now unsigned long.
+ *
+ * 17 Mar 1998 rev. 4.01 for linux 2.0.33 and 2.1.88
+ * Use new scsi error handling code (if linux version >= 2.1.88).
+ * Use new interrupt code.
+ *
+ * 12 Sep 1997 rev. 3.11 for linux 2.0.30 and 2.1.55
+ * Use of udelay inside the wait loops to avoid timeout
+ * problems with fast cpus.
+ * Removed check about useless calls to the interrupt service
+ * routine (reported on SMP systems only).
+ * At initialization time "sorted/unsorted" is displayed instead
+ * of "linked/unlinked" to reinforce the fact that "linking" is
+ * nothing but "elevator sorting" in the actual implementation.
+ *
+ * 17 May 1997 rev. 3.10 for linux 2.0.30 and 2.1.38
+ * Use of serial_number_at_timeout in abort and reset processing.
+ * Use of the __initfunc and __initdata macro in setup code.
+ * Minor cleanups in the list_statistics code.
+ * Increased controller busy timeout in order to better support
+ * slow SCSI devices.
+ *
+ * 24 Feb 1997 rev. 3.00 for linux 2.0.29 and 2.1.26
+ * When loading as a module, parameter passing is now supported
+ * both in 2.0 and in 2.1 style.
+ * Fixed data transfer direction for some SCSI opcodes.
+ * Immediate acknowledge to request sense commands.
+ * Linked commands to each disk device are now reordered by elevator
+ * sorting. Rare cases in which reordering of write requests could
+ * cause wrong results are managed.
+ * Fixed spurious timeouts caused by long simple queue tag sequences.
+ * New command line option (tm:[0-3]) to choose the type of tags:
+ * 0 -> mixed (default); 1 -> simple; 2 -> head; 3 -> ordered.
+ *
+ * 18 Jan 1997 rev. 2.60 for linux 2.1.21 and 2.0.28
+ * Added command line options to enable/disable linked commands
+ * (lc:[y|n]), tagged commands (tc:[y|n]) and to set the max queue
+ * depth (mq:xx). Default is "eata=lc:n,tc:n,mq:16".
+ * Improved command linking.
+ * Documented how to setup RAID-0 with DPT SmartRAID boards.
+ *
+ * 8 Jan 1997 rev. 2.50 for linux 2.1.20 and 2.0.27
+ * Added linked command support.
+ * Improved detection of PCI boards using ISA base addresses.
+ *
+ * 3 Dec 1996 rev. 2.40 for linux 2.1.14 and 2.0.27
+ * Added support for tagged commands and queue depth adjustment.
+ *
+ * 22 Nov 1996 rev. 2.30 for linux 2.1.12 and 2.0.26
+ * When CONFIG_PCI is defined, BIOS32 is used to include in the
+ * list of i/o ports to be probed all the PCI SCSI controllers.
+ * The list of i/o ports to be probed can be overwritten by the
+ * "eata=port0,port1,...." boot command line option.
+ * Scatter/gather lists are now allocated by a number of kmalloc
+ * calls, in order to avoid the previous size limit of 64Kb.
+ *
+ * 16 Nov 1996 rev. 2.20 for linux 2.1.10 and 2.0.25
+ * Added support for EATA 2.0C, PCI, multichannel and wide SCSI.
+ *
+ * 27 Sep 1996 rev. 2.12 for linux 2.1.0
+ * Portability cleanups (virtual/bus addressing, little/big endian
+ * support).
+ *
+ * 09 Jul 1996 rev. 2.11 for linux 2.0.4
+ * Number of internal retries is now limited.
+ *
+ * 16 Apr 1996 rev. 2.10 for linux 1.3.90
+ * New argument "reset_flags" to the reset routine.
+ *
+ * 6 Jul 1995 rev. 2.01 for linux 1.3.7
+ * Update required by the new /proc/scsi support.
+ *
+ * 11 Mar 1995 rev. 2.00 for linux 1.2.0
+ * Fixed a bug which prevented media change detection for removable
+ * disk drives.
+ *
+ * 23 Feb 1995 rev. 1.18 for linux 1.1.94
+ * Added a check for scsi_register returning NULL.
+ *
+ * 11 Feb 1995 rev. 1.17 for linux 1.1.91
+ * Now DEBUG_RESET is disabled by default.
+ * Register a board even if it does not assert DMA protocol support
+ * (DPT SK2011B does not report correctly the dmasup bit).
+ *
+ * 9 Feb 1995 rev. 1.16 for linux 1.1.90
+ * Use host->wish_block instead of host->block.
+ * New list of Data Out SCSI commands.
+ *
+ * 8 Feb 1995 rev. 1.15 for linux 1.1.89
+ * Cleared target_time_out counter while performing a reset.
+ * All external symbols renamed to avoid possible name conflicts.
+ *
+ * 28 Jan 1995 rev. 1.14 for linux 1.1.86
+ * Added module support.
+ * Log and do a retry when a disk drive returns a target status
+ * different from zero on a recovered error.
+ *
+ * 24 Jan 1995 rev. 1.13 for linux 1.1.85
+ * Use optimized board configuration, with a measured performance
+ * increase in the range 10%-20% on i/o throughput.
+ *
+ * 16 Jan 1995 rev. 1.12 for linux 1.1.81
+ * Fix mscp structure comments (no functional change).
+ * Display a message if check_region detects a port address
+ * already in use.
+ *
+ * 17 Dec 1994 rev. 1.11 for linux 1.1.74
+ * Use the scsicam_bios_param routine. This allows an easy
+ * migration path from disk partition tables created using
+ * different SCSI drivers and non optimal disk geometry.
+ *
+ * 15 Dec 1994 rev. 1.10 for linux 1.1.74
+ * Added support for ISA EATA boards (DPT PM2011, DPT PM2021).
+ * The host->block flag is set for all the detected ISA boards.
+ * The detect routine no longer enforces LEVEL triggering
+ * for EISA boards, it just prints a warning message.
+ *
+ * 30 Nov 1994 rev. 1.09 for linux 1.1.68
+ * Redo i/o on target status CHECK_CONDITION for TYPE_DISK only.
+ * Added optional support for using a single board at a time.
+ *
+ * 18 Nov 1994 rev. 1.08 for linux 1.1.64
+ * Forces sg_tablesize = 64 and can_queue = 64 if these
+ * values are not correctly detected (DPT PM2012).
+ *
+ * 14 Nov 1994 rev. 1.07 for linux 1.1.63 Final BETA release.
+ * 04 Aug 1994 rev. 1.00 for linux 1.1.39 First BETA release.
+ *
+ *
+ * This driver is based on the CAM (Common Access Method Committee)
+ * EATA (Enhanced AT Bus Attachment) rev. 2.0A, using DMA protocol.
+ *
+ * Copyright (C) 1994-1998 Dario Ballabio (dario@milano.europe.dg.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that redistributions of source
+ * code retain the above copyright notice and this comment without
+ * modification.
+ *
+ */
+
+/*
+ *
+ * Here is a brief description of the DPT SCSI host adapters.
+ * All these boards provide an EATA/DMA compatible programming interface
+ * and are fully supported by this driver in any configuration, including
+ * multiple SCSI channels:
+ *
+ * PM2011B/9X - Entry Level ISA
+ * PM2021A/9X - High Performance ISA
+ * PM2012A Old EISA
+ * PM2012B Old EISA
+ * PM2022A/9X - Entry Level EISA
+ * PM2122A/9X - High Performance EISA
+ * PM2322A/9X - Extra High Performance EISA
+ * PM3021 - SmartRAID Adapter for ISA
+ * PM3222 - SmartRAID Adapter for EISA (PM3222W is 16-bit wide SCSI)
+ * PM3224 - SmartRAID Adapter for PCI (PM3224W is 16-bit wide SCSI)
+ *
+ * The above list is just an indication: as a matter of fact all DPT
+ * boards using the EATA/DMA protocol are supported by this driver,
+ * since they use exactely the same programming interface.
+ *
+ * The DPT PM2001 provides only the EATA/PIO interface and hence is not
+ * supported by this driver.
+ *
+ * This code has been tested with up to 3 Distributed Processing Technology
+ * PM2122A/9X (DPT SCSI BIOS v002.D1, firmware v05E.0) EISA controllers,
+ * in any combination of private and shared IRQ.
+ * PCI support has been tested using up to 2 DPT PM3224W (DPT SCSI BIOS
+ * v003.D0, firmware v07G.0).
+ *
+ * DPT SmartRAID boards support "Hardware Array" - a group of disk drives
+ * which are all members of the same RAID-0, RAID-1 or RAID-5 array implemented
+ * in host adapter hardware. Hardware Arrays are fully compatible with this
+ * driver, since they look to it as a single disk drive.
+ *
+ * WARNING: to create a RAID-0 "Hardware Array" you must select "Other Unix"
+ * as the current OS in the DPTMGR "Initial System Installation" menu.
+ * Otherwise RAID-0 is generated as an "Array Group" (i.e. software RAID-0),
+ * which is not supported by the actual SCSI subsystem.
+ * To get the "Array Group" functionality, the Linux MD driver must be used
+ * instead of the DPT "Array Group" feature.
+ *
+ * Multiple ISA, EISA and PCI boards can be configured in the same system.
+ * It is suggested to put all the EISA boards on the same IRQ level, all
+ * the PCI boards on another IRQ level, while ISA boards cannot share
+ * interrupts.
+ *
+ * If you configure multiple boards on the same IRQ, the interrupt must
+ * be _level_ triggered (not _edge_ triggered).
+ *
+ * This driver detects EATA boards by probes at fixed port addresses,
+ * so no BIOS32 or PCI BIOS support is required.
+ * The suggested way to detect a generic EATA PCI board is to force on it
+ * any unused EISA address, even if there are other controllers on the EISA
+ * bus, or even if you system has no EISA bus at all.
+ * Do not force any ISA address on EATA PCI boards.
+ *
+ * If PCI bios support is configured into the kernel, BIOS32 is used to
+ * include in the list of i/o ports to be probed all the PCI SCSI controllers.
+ *
+ * Due to a DPT BIOS "feature", it might not be possible to force an EISA
+ * address on more then a single DPT PCI board, so in this case you have to
+ * let the PCI BIOS assign the addresses.
+ *
+ * The sequence of detection probes is:
+ *
+ * - ISA 0x1F0;
+ * - PCI SCSI controllers (only if BIOS32 is available);
+ * - EISA/PCI 0x1C88 through 0xFC88 (corresponding to EISA slots 1 to 15);
+ * - ISA 0x170, 0x230, 0x330.
+ *
+ * The above list of detection probes can be totally replaced by the
+ * boot command line option: "eata=port0,port1,port2,...", where the
+ * port0, port1... arguments are ISA/EISA/PCI addresses to be probed.
+ * For example using "eata=0x7410,0x7450,0x230", the driver probes
+ * only the two PCI addresses 0x7410 and 0x7450 and the ISA address 0x230,
+ * in this order; "eata=0" totally disables this driver.
+ *
+ * After the optional list of detection probes, other possible command line
+ * options are:
+ *
+ * eh:y use new scsi code (linux 2.2 only);
+ * eh:n use old scsi code;
+ * et:y force use of extended translation (255 heads, 63 sectors);
+ * et:n use disk geometry detected by scsicam_bios_param;
+ * rs:y reverse scan order while detecting PCI boards;
+ * rs:n use BIOS order while detecting PCI boards;
+ * lc:y enables linked commands;
+ * lc:n disables linked commands;
+ * tc:y enables tagged commands;
+ * tc:n disables tagged commands;
+ * tm:0 use head/simple/ordered queue tag sequences;
+ * tm:1 use only simple queue tags;
+ * tm:2 use only head of queue tags;
+ * tm:3 use only ordered queue tags;
+ * mq:xx set the max queue depth to the value xx (2 <= xx <= 32).
+ *
+ * The default value is: "eata=lc:n,tc:n,mq:16,tm:0,et:n,rs:n".
+ * An example using the list of detection probes could be:
+ * "eata=0x7410,0x230,lc:y,tc:n,mq:4,eh:n,et:n".
+ *
+ * When loading as a module, parameters can be specified as well.
+ * The above example would be (use 1 in place of y and 0 in place of n):
+ *
+ * modprobe eata io_port=0x7410,0x230 linked_comm=1 tagged_comm=0 \
+ * max_queue_depth=4 tag_mode=0 use_new_eh_code=0 \
+ * ext_tran=0 rev_scan=1
+ *
+ * ----------------------------------------------------------------------------
+ * In this implementation, linked commands are designed to work with any DISK
+ * or CD-ROM, since this linking has only the intent of clustering (time-wise)
+ * and reordering by elevator sorting commands directed to each device,
+ * without any relation with the actual SCSI protocol between the controller
+ * and the device.
+ * If Q is the queue depth reported at boot time for each device (also named
+ * cmds/lun) and Q > 2, whenever there is already an active command to the
+ * device all other commands to the same device (up to Q-1) are kept waiting
+ * in the elevator sorting queue. When the active command completes, the
+ * commands in this queue are sorted by sector address. The sort is chosen
+ * between increasing or decreasing by minimizing the seek distance between
+ * the sector of the commands just completed and the sector of the first
+ * command in the list to be sorted.
+ * Trivial math assures that the unsorted average seek distance when doing
+ * random seeks over S sectors is S/3.
+ * When (Q-1) requests are uniformly distributed over S sectors, the average
+ * distance between two adjacent requests is S/((Q-1) + 1), so the sorted
+ * average seek distance for (Q-1) random requests over S sectors is S/Q.
+ * The elevator sorting hence divides the seek distance by a factor Q/3.
+ * The above pure geometric remarks are valid in all cases and the
+ * driver effectively reduces the seek distance by the predicted factor
+ * when there are Q concurrent read i/o operations on the device, but this
+ * does not necessarily results in a noticeable performance improvement:
+ * your mileage may vary....
+ *
+ * Note: command reordering inside a batch of queued commands could cause
+ * wrong results only if there is at least one write request and the
+ * intersection (sector-wise) of all requests is not empty.
+ * When the driver detects a batch including overlapping requests
+ * (a really rare event) strict serial (pid) order is enforced.
+ * ----------------------------------------------------------------------------
+ * The extended translation option (et:y) is useful when using large physical
+ * disks/arrays. It could also be useful when switching between Adaptec boards
+ * and DPT boards without reformatting the disk.
+ * When a boot disk is partitioned with extended translation, in order to
+ * be able to boot it with a DPT board is could be necessary to add to
+ * lilo.conf additional commands as in the following example:
+ *
+ * fix-table
+ * disk=/dev/sda bios=0x80 sectors=63 heads=128 cylindres=546
+ *
+ * where the above geometry should be replaced with the one reported at
+ * power up by the DPT controller.
+ * ----------------------------------------------------------------------------
+ *
+ * The boards are named EATA0, EATA1,... according to the detection order.
+ *
+ * In order to support multiple ISA boards in a reliable way,
+ * the driver sets host->wish_block = TRUE for all ISA boards.
+ */
+
+#include <linux/version.h>
+
+#define LinuxVersionCode(v, p, s) (((v)<<16)+((p)<<8)+(s))
+#define MAX_INT_PARAM 10
+
+#if defined(MODULE)
+#include <linux/module.h>
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,26)
+MODULE_PARM(io_port, "1-" __MODULE_STRING(MAX_INT_PARAM) "i");
+MODULE_PARM(linked_comm, "i");
+MODULE_PARM(tagged_comm, "i");
+MODULE_PARM(link_statistics, "i");
+MODULE_PARM(max_queue_depth, "i");
+MODULE_PARM(tag_mode, "i");
+MODULE_PARM(use_new_eh_code, "i");
+MODULE_PARM(ext_tran, "i");
+MODULE_PARM(rev_scan, "i");
+MODULE_AUTHOR("Dario Ballabio");
+#endif
+
+#endif
+
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/byteorder.h>
+#include <linux/proc_fs.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include "eata.h"
+#include <linux/stat.h>
+#include <linux/config.h>
+#include <linux/pci.h>
+
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,1,93)
+#include <linux/bios32.h>
+#endif
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,36)
+#include <linux/init.h>
+#else
+#define __initfunc(A) A
+#define __initdata
+#define __init
+#endif
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,101)
+#include <asm/spinlock.h>
+#define IRQ_FLAGS
+#define IRQ_LOCK
+#define IRQ_LOCK_SAVE
+#define IRQ_UNLOCK
+#define IRQ_UNLOCK_RESTORE
+#define SPIN_FLAGS unsigned long spin_flags;
+#define SPIN_LOCK spin_lock_irq(&io_request_lock);
+#define SPIN_LOCK_SAVE spin_lock_irqsave(&io_request_lock, spin_flags);
+#define SPIN_UNLOCK spin_unlock_irq(&io_request_lock);
+#define SPIN_UNLOCK_RESTORE \
+ spin_unlock_irqrestore(&io_request_lock, spin_flags);
+static int use_new_eh_code = TRUE;
+#else
+#define IRQ_FLAGS unsigned long irq_flags;
+#define IRQ_LOCK cli();
+#define IRQ_LOCK_SAVE do {save_flags(irq_flags); cli();} while (0);
+#define IRQ_UNLOCK sti();
+#define IRQ_UNLOCK_RESTORE do {restore_flags(irq_flags);} while (0);
+#define SPIN_FLAGS
+#define SPIN_LOCK
+#define SPIN_LOCK_SAVE
+#define SPIN_UNLOCK
+#define SPIN_UNLOCK_RESTORE
+static int use_new_eh_code = FALSE;
+#endif
+
+struct proc_dir_entry proc_scsi_eata2x = {
+ PROC_SCSI_EATA2X, 6, "eata2x",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+/* Subversion values */
+#define ISA 0
+#define ESA 1
+
+#undef FORCE_CONFIG
+
+#undef DEBUG_LINKED_COMMANDS
+#undef DEBUG_DETECT
+#undef DEBUG_PCI_DETECT
+#undef DEBUG_INTERRUPT
+#undef DEBUG_RESET
+#undef DEBUG_GENERATE_ERRORS
+#undef DEBUG_GENERATE_ABORTS
+#undef DEBUG_GEOMETRY
+
+#define MAX_ISA 4
+#define MAX_VESA 0
+#define MAX_EISA 15
+#define MAX_PCI 16
+#define MAX_BOARDS (MAX_ISA + MAX_VESA + MAX_EISA + MAX_PCI)
+#define MAX_CHANNEL 4
+#define MAX_LUN 32
+#define MAX_TARGET 32
+#define MAX_MAILBOXES 64
+#define MAX_SGLIST 64
+#define MAX_LARGE_SGLIST 122
+#define MAX_INTERNAL_RETRIES 64
+#define MAX_CMD_PER_LUN 2
+#define MAX_TAGGED_CMD_PER_LUN (MAX_MAILBOXES - MAX_CMD_PER_LUN)
+
+#define SKIP ULONG_MAX
+#define FALSE 0
+#define TRUE 1
+#define FREE 0
+#define IN_USE 1
+#define LOCKED 2
+#define IN_RESET 3
+#define IGNORE 4
+#define READY 5
+#define ABORTING 6
+#define NO_DMA 0xff
+#define MAXLOOP 10000
+#define TAG_MIXED 0
+#define TAG_SIMPLE 1
+#define TAG_HEAD 2
+#define TAG_ORDERED 3
+
+#define REG_CMD 7
+#define REG_STATUS 7
+#define REG_AUX_STATUS 8
+#define REG_DATA 0
+#define REG_DATA2 1
+#define REG_SEE 6
+#define REG_LOW 2
+#define REG_LM 3
+#define REG_MID 4
+#define REG_MSB 5
+#define REGION_SIZE 9
+#define MAX_ISA_ADDR 0x03ff
+#define MIN_EISA_ADDR 0x1c88
+#define MAX_EISA_ADDR 0xfc88
+#define BSY_ASSERTED 0x80
+#define DRQ_ASSERTED 0x08
+#define ABSY_ASSERTED 0x01
+#define IRQ_ASSERTED 0x02
+#define READ_CONFIG_PIO 0xf0
+#define SET_CONFIG_PIO 0xf1
+#define SEND_CP_PIO 0xf2
+#define RECEIVE_SP_PIO 0xf3
+#define TRUNCATE_XFR_PIO 0xf4
+#define RESET_PIO 0xf9
+#define READ_CONFIG_DMA 0xfd
+#define SET_CONFIG_DMA 0xfe
+#define SEND_CP_DMA 0xff
+#define ASOK 0x00
+#define ASST 0x01
+
+#define ARRAY_SIZE(arr) (sizeof (arr) / sizeof (arr)[0])
+#define YESNO(a) ((a) ? 'y' : 'n')
+#define TLDEV(type) ((type) == TYPE_DISK || (type) == TYPE_ROM)
+
+/* "EATA", in Big Endian format */
+#define EATA_SIGNATURE 0x41544145
+
+/* Number of valid bytes in the board config structure for EATA 2.0x */
+#define EATA_2_0A_SIZE 28
+#define EATA_2_0B_SIZE 30
+#define EATA_2_0C_SIZE 34
+
+/* Board info structure */
+struct eata_info {
+ ulong data_len; /* Number of valid bytes after this field */
+ ulong sign; /* ASCII "EATA" signature */
+ unchar :4, /* unused low nibble */
+ version:4; /* EATA version, should be 0x1 */
+ unchar ocsena:1, /* Overlap Command Support Enabled */
+ tarsup:1, /* Target Mode Supported */
+ trnxfr:1, /* Truncate Transfer Cmd NOT Necessary */
+ morsup:1, /* More Supported */
+ dmasup:1, /* DMA Supported */
+ drqvld:1, /* DRQ Index (DRQX) is valid */
+ ata:1, /* This is an ATA device */
+ haaval:1; /* Host Adapter Address Valid */
+ ushort cp_pad_len; /* Number of pad bytes after cp_len */
+ unchar host_addr[4]; /* Host Adapter SCSI ID for channels 3, 2, 1, 0 */
+ ulong cp_len; /* Number of valid bytes in cp */
+ ulong sp_len; /* Number of valid bytes in sp */
+ ushort queue_size; /* Max number of cp that can be queued */
+ ushort unused;
+ ushort scatt_size; /* Max number of entries in scatter/gather table */
+ unchar irq:4, /* Interrupt Request assigned to this controller */
+ irq_tr:1, /* 0 for edge triggered, 1 for level triggered */
+ second:1, /* 1 if this is a secondary (not primary) controller */
+ drqx:2; /* DRQ Index (0=DMA0, 1=DMA7, 2=DMA6, 3=DMA5) */
+ unchar sync; /* 1 if scsi target id 7...0 is running sync scsi */
+
+ /* Structure extension defined in EATA 2.0B */
+ unchar isaena:1, /* ISA i/o addressing is disabled/enabled */
+ forcaddr:1, /* Port address has been forced */
+ large_sg:1, /* 1 if large SG lists are supported */
+ res1:1,
+ :4;
+ unchar max_id:5, /* Max SCSI target ID number */
+ max_chan:3; /* Max SCSI channel number on this board */
+
+ /* Structure extension defined in EATA 2.0C */
+ unchar max_lun; /* Max SCSI LUN number */
+ unchar :4,
+ m1:1, /* This is a PCI with an M1 chip installed */
+ idquest:1, /* RAIDNUM returned is questionable */
+ pci:1, /* This board is PCI */
+ eisa:1; /* This board is EISA */
+ unchar raidnum; /* Uniquely identifies this HBA in a system */
+ unchar notused;
+
+ ushort ipad[247];
+ };
+
+/* Board config structure */
+struct eata_config {
+ ushort len; /* Number of bytes following this field */
+ unchar edis:1, /* Disable EATA interface after config command */
+ ocena:1, /* Overlapped Commands Enabled */
+ mdpena:1, /* Transfer all Modified Data Pointer Messages */
+ tarena:1, /* Target Mode Enabled for this controller */
+ :4;
+ unchar cpad[511];
+ };
+
+/* Returned status packet structure */
+struct mssp {
+ unchar adapter_status:7, /* State related to current command */
+ eoc:1; /* End Of Command (1 = command completed) */
+ unchar target_status; /* SCSI status received after data transfer */
+ unchar unused[2];
+ ulong inv_res_len; /* Number of bytes not transferred */
+ struct mscp *cpp; /* Address set in cp */
+ char mess[12];
+ };
+
+struct sg_list {
+ unsigned int address; /* Segment Address */
+ unsigned int num_bytes; /* Segment Length */
+ };
+
+/* MailBox SCSI Command Packet */
+struct mscp {
+ unchar sreset:1, /* SCSI Bus Reset Signal should be asserted */
+ init:1, /* Re-initialize controller and self test */
+ reqsen:1, /* Transfer Request Sense Data to addr using DMA */
+ sg:1, /* Use Scatter/Gather */
+ :1,
+ interp:1, /* The controller interprets cp, not the target */
+ dout:1, /* Direction of Transfer is Out (Host to Target) */
+ din:1; /* Direction of Transfer is In (Target to Host) */
+ unchar sense_len; /* Request Sense Length */
+ unchar unused[3];
+ unchar fwnest:1, /* Send command to a component of an Array Group */
+ :7;
+ unchar phsunit:1, /* Send to Target Physical Unit (bypass RAID) */
+ iat:1, /* Inhibit Address Translation */
+ hbaci:1, /* Inhibit HBA Caching for this command */
+ :5;
+ unchar target:5, /* SCSI target ID */
+ channel:3; /* SCSI channel number */
+ unchar lun:5, /* SCSI logical unit number */
+ luntar:1, /* This cp is for Target (not LUN) */
+ dispri:1, /* Disconnect Privilege granted */
+ one:1; /* 1 */
+ unchar mess[3]; /* Massage to/from Target */
+ unchar cdb[12]; /* Command Descriptor Block */
+ ulong data_len; /* If sg=0 Data Length, if sg=1 sglist length */
+ struct mscp *cpp; /* Address to be returned in sp */
+ ulong data_address; /* If sg=0 Data Address, if sg=1 sglist address */
+ ulong sp_addr; /* Address where sp is DMA'ed when cp completes */
+ ulong sense_addr; /* Address where Sense Data is DMA'ed on error */
+ Scsi_Cmnd *SCpnt;
+ unsigned int index; /* cp index */
+ struct sg_list *sglist;
+ };
+
+struct hostdata {
+ struct mscp cp[MAX_MAILBOXES]; /* Mailboxes for this board */
+ unsigned int cp_stat[MAX_MAILBOXES]; /* FREE, IN_USE, LOCKED, IN_RESET */
+ unsigned int last_cp_used; /* Index of last mailbox used */
+ unsigned int iocount; /* Total i/o done for this board */
+ int board_number; /* Number of this board */
+ char board_name[16]; /* Name of this board */
+ char board_id[256]; /* data from INQUIRY on this board */
+ int in_reset; /* True if board is doing a reset */
+ int target_to[MAX_TARGET][MAX_CHANNEL]; /* N. of timeout errors on target */
+ int target_redo[MAX_TARGET][MAX_CHANNEL]; /* If TRUE redo i/o on target */
+ unsigned int retries; /* Number of internal retries */
+ unsigned long last_retried_pid; /* Pid of last retried command */
+ unsigned char subversion; /* Bus type, either ISA or EISA/PCI */
+ unsigned char protocol_rev; /* EATA 2.0 rev., 'A' or 'B' or 'C' */
+ struct mssp sp[2]; /* Returned status for this board */
+ };
+
+static struct Scsi_Host *sh[MAX_BOARDS + 1];
+static const char *driver_name = "EATA";
+static char sha[MAX_BOARDS];
+
+/* Initialize num_boards so that ihdlr can work while detect is in progress */
+static unsigned int num_boards = MAX_BOARDS;
+
+static unsigned long io_port[] __initdata = {
+
+ /* Space for MAX_INT_PARAM ports usable while loading as a module */
+ SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP,
+ SKIP, SKIP,
+
+ /* First ISA */
+ 0x1f0,
+
+ /* Space for MAX_PCI ports possibly reported by PCI_BIOS */
+ SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP,
+ SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP,
+
+ /* MAX_EISA ports */
+ 0x1c88, 0x2c88, 0x3c88, 0x4c88, 0x5c88, 0x6c88, 0x7c88, 0x8c88,
+ 0x9c88, 0xac88, 0xbc88, 0xcc88, 0xdc88, 0xec88, 0xfc88,
+
+ /* Other (MAX_ISA - 1) ports */
+ 0x170, 0x230, 0x330,
+
+ /* End of list */
+ 0x0
+ };
+
+#define HD(board) ((struct hostdata *) &sh[board]->hostdata)
+#define BN(board) (HD(board)->board_name)
+
+#define H2DEV(x) htonl(x)
+#define DEV2H(x) H2DEV(x)
+#define V2DEV(addr) ((addr) ? H2DEV(virt_to_bus((void *)addr)) : 0)
+#define DEV2V(addr) ((addr) ? DEV2H(bus_to_virt((unsigned long)addr)) : 0)
+
+static void do_interrupt_handler(int, void *, struct pt_regs *);
+static void flush_dev(Scsi_Device *, unsigned long, unsigned int, unsigned int);
+static int do_trace = FALSE;
+static int setup_done = FALSE;
+static int link_statistics = 0;
+static int tag_mode = TAG_MIXED;
+static int ext_tran = FALSE;
+static int rev_scan = TRUE;
+
+#if defined(CONFIG_SCSI_EATA_TAGGED_QUEUE)
+static int tagged_comm = TRUE;
+#else
+static int tagged_comm = FALSE;
+#endif
+
+#if defined(CONFIG_SCSI_EATA_LINKED_COMMANDS)
+static int linked_comm = TRUE;
+#else
+static int linked_comm = FALSE;
+#endif
+
+#if defined(CONFIG_SCSI_EATA_MAX_TAGS)
+static int max_queue_depth = CONFIG_SCSI_EATA_MAX_TAGS;
+#else
+static int max_queue_depth = MAX_CMD_PER_LUN;
+#endif
+
+static void select_queue_depths(struct Scsi_Host *host, Scsi_Device *devlist) {
+ Scsi_Device *dev;
+ int j, ntag = 0, nuntag = 0, tqd, utqd;
+ IRQ_FLAGS
+
+ IRQ_LOCK_SAVE
+ j = ((struct hostdata *) host->hostdata)->board_number;
+
+ for(dev = devlist; dev; dev = dev->next) {
+
+ if (dev->host != host) continue;
+
+ if (TLDEV(dev->type) && (dev->tagged_supported || linked_comm))
+ ntag++;
+ else
+ nuntag++;
+ }
+
+ utqd = MAX_CMD_PER_LUN;
+
+ tqd = (host->can_queue - utqd * nuntag) / (ntag ? ntag : 1);
+
+ if (tqd > max_queue_depth) tqd = max_queue_depth;
+
+ if (tqd < MAX_CMD_PER_LUN) tqd = MAX_CMD_PER_LUN;
+
+ for(dev = devlist; dev; dev = dev->next) {
+ char *tag_suffix = "", *link_suffix = "";
+
+ if (dev->host != host) continue;
+
+ if (TLDEV(dev->type) && (dev->tagged_supported || linked_comm))
+ dev->queue_depth = tqd;
+ else
+ dev->queue_depth = utqd;
+
+ if (TLDEV(dev->type)) {
+ if (linked_comm && dev->queue_depth > 2)
+ link_suffix = ", sorted";
+ else
+ link_suffix = ", unsorted";
+ }
+
+ if (tagged_comm && dev->tagged_supported && TLDEV(dev->type)) {
+ dev->tagged_queue = 1;
+ dev->current_tag = 1;
+ }
+
+ if (dev->tagged_supported && TLDEV(dev->type) && dev->tagged_queue)
+ tag_suffix = ", tagged";
+ else if (dev->tagged_supported && TLDEV(dev->type))
+ tag_suffix = ", untagged";
+
+ printk("%s: scsi%d, channel %d, id %d, lun %d, cmds/lun %d%s%s.\n",
+ BN(j), host->host_no, dev->channel, dev->id, dev->lun,
+ dev->queue_depth, link_suffix, tag_suffix);
+ }
+
+ IRQ_UNLOCK_RESTORE
+ return;
+}
+
+static inline int wait_on_busy(unsigned long iobase, unsigned int loop) {
+
+ while (inb(iobase + REG_AUX_STATUS) & ABSY_ASSERTED) {
+ udelay(1L);
+ if (--loop == 0) return TRUE;
+ }
+
+ return FALSE;
+}
+
+static inline int do_dma(unsigned long iobase, unsigned int addr, unchar cmd) {
+
+ if (wait_on_busy(iobase, (addr ? MAXLOOP * 100 : MAXLOOP))) return TRUE;
+
+ if ((addr = V2DEV(addr))) {
+ outb((char) (addr >> 24), iobase + REG_LOW);
+ outb((char) (addr >> 16), iobase + REG_LM);
+ outb((char) (addr >> 8), iobase + REG_MID);
+ outb((char) addr, iobase + REG_MSB);
+ }
+
+ outb(cmd, iobase + REG_CMD);
+ return FALSE;
+}
+
+static inline int read_pio(unsigned long iobase, ushort *start, ushort *end) {
+ unsigned int loop = MAXLOOP;
+ ushort *p;
+
+ for (p = start; p <= end; p++) {
+
+ while (!(inb(iobase + REG_STATUS) & DRQ_ASSERTED)) {
+ udelay(1L);
+ if (--loop == 0) return TRUE;
+ }
+
+ loop = MAXLOOP;
+ *p = inw(iobase);
+ }
+
+ return FALSE;
+}
+
+__initfunc (static inline int
+ get_pci_irq(unsigned long port_base, unsigned char *apic_irq)) {
+
+#if defined(CONFIG_PCI)
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,93)
+
+ unsigned int addr;
+ struct pci_dev *dev = NULL;
+
+ if (!pci_present()) return FALSE;
+
+ while((dev = pci_find_class(PCI_CLASS_STORAGE_SCSI << 8, dev))) {
+
+ if (pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &addr)) continue;
+
+#if defined(DEBUG_PCI_DETECT)
+ printk("%s: get_pci_irq, bus %d, devfn 0x%x, addr 0x%x, apic_irq %u.\n",
+ driver_name, dev->bus->number, dev->devfn, addr, dev->irq);
+#endif
+
+ if ((addr & PCI_BASE_ADDRESS_SPACE) != PCI_BASE_ADDRESS_SPACE_IO)
+ continue;
+
+ if ((addr & PCI_BASE_ADDRESS_IO_MASK) + PCI_BASE_ADDRESS_0 == port_base) {
+ *apic_irq = dev->irq;
+ return TRUE;
+ }
+
+ }
+
+#endif /* end new style PCI code */
+
+#endif /* end CONFIG_PCI */
+
+ return FALSE;
+}
+
+__initfunc (static inline int port_detect \
+ (unsigned long port_base, unsigned int j, Scsi_Host_Template *tpnt)) {
+ unsigned char irq, dma_channel, subversion, i;
+ unsigned char protocol_rev, apic_irq;
+ struct eata_info info;
+ char *bus_type, dma_name[16], tag_type;
+
+ /* Allowed DMA channels for ISA (0 indicates reserved) */
+ unsigned char dma_channel_table[4] = { 5, 6, 7, 0 };
+
+ char name[16];
+
+ sprintf(name, "%s%d", driver_name, j);
+
+ printk("\rprobing eata on %lx", port_base);
+
+ if(check_region(port_base, REGION_SIZE)) {
+ printk("%s: address 0x%03lx in use, skipping probe.\n", name, port_base);
+ return FALSE;
+ }
+
+ if (do_dma(port_base, 0, READ_CONFIG_PIO)) return FALSE;
+
+ /* Read the info structure */
+ if (read_pio(port_base, (ushort *)&info, (ushort *)&info.ipad[0]))
+ return FALSE;
+
+ /* Check the controller "EATA" signature */
+ if (info.sign != EATA_SIGNATURE) return FALSE;
+
+ if (DEV2H(info.data_len) < EATA_2_0A_SIZE) {
+ printk("%s: config structure size (%ld bytes) too short, detaching.\n",
+ name, DEV2H(info.data_len));
+ return FALSE;
+ }
+ else if (DEV2H(info.data_len) == EATA_2_0A_SIZE)
+ protocol_rev = 'A';
+ else if (DEV2H(info.data_len) == EATA_2_0B_SIZE)
+ protocol_rev = 'B';
+ else
+ protocol_rev = 'C';
+
+ if (!setup_done && j > 0 && j <= MAX_PCI) {
+ bus_type = "PCI";
+ subversion = ESA;
+ }
+ else if (port_base > MAX_EISA_ADDR || (protocol_rev == 'C' && info.pci)) {
+ bus_type = "PCI";
+ subversion = ESA;
+ }
+ else if (port_base >= MIN_EISA_ADDR || (protocol_rev == 'C' && info.eisa)) {
+ bus_type = "EISA";
+ subversion = ESA;
+ }
+ else if (protocol_rev == 'C' && !info.eisa && !info.pci) {
+ bus_type = "ISA";
+ subversion = ISA;
+ }
+ else if (port_base > MAX_ISA_ADDR) {
+ bus_type = "PCI";
+ subversion = ESA;
+ }
+ else {
+ bus_type = "ISA";
+ subversion = ISA;
+ }
+
+ if (!info.haaval || info.ata) {
+ printk("%s: address 0x%03lx, unusable %s board (%d%d), detaching.\n",
+ name, port_base, bus_type, info.haaval, info.ata);
+ return FALSE;
+ }
+
+ if (info.drqvld) {
+
+ if (subversion == ESA)
+ printk("%s: warning, weird %s board using DMA.\n", name, bus_type);
+
+ subversion = ISA;
+ dma_channel = dma_channel_table[3 - info.drqx];
+ }
+ else {
+
+ if (subversion == ISA)
+ printk("%s: warning, weird %s board not using DMA.\n", name, bus_type);
+
+ subversion = ESA;
+ dma_channel = NO_DMA;
+ }
+
+ if (!info.dmasup)
+ printk("%s: warning, DMA protocol support not asserted.\n", name);
+
+ irq = info.irq;
+
+ if (subversion == ESA && !info.irq_tr)
+ printk("%s: warning, LEVEL triggering is suggested for IRQ %u.\n",
+ name, irq);
+
+ if (get_pci_irq(port_base, &apic_irq) && (irq != apic_irq)) {
+ printk("%s: IRQ %u mapped to IO-APIC IRQ %u.\n", name, irq, apic_irq);
+ irq = apic_irq;
+ }
+
+ /* Board detected, allocate its IRQ */
+ if (request_irq(irq, do_interrupt_handler,
+ SA_INTERRUPT | ((subversion == ESA) ? SA_SHIRQ : 0),
+ driver_name, (void *) &sha[j])) {
+ printk("%s: unable to allocate IRQ %u, detaching.\n", name, irq);
+ return FALSE;
+ }
+
+ if (subversion == ISA && request_dma(dma_channel, driver_name)) {
+ printk("%s: unable to allocate DMA channel %u, detaching.\n",
+ name, dma_channel);
+ free_irq(irq, &sha[j]);
+ return FALSE;
+ }
+
+#if defined(FORCE_CONFIG)
+ {
+ struct eata_config config;
+
+ /* Set board configuration */
+ memset((char *)&config, 0, sizeof(struct eata_config));
+ config.len = (ushort) htons((ushort)510);
+ config.ocena = TRUE;
+
+ if (do_dma(port_base, (unsigned int)&config, SET_CONFIG_DMA)) {
+ printk("%s: busy timeout sending configuration, detaching.\n", name);
+ return FALSE;
+ }
+ }
+#endif
+
+ sh[j] = scsi_register(tpnt, sizeof(struct hostdata));
+
+ if (sh[j] == NULL) {
+ printk("%s: unable to register host, detaching.\n", name);
+
+ free_irq(irq, &sha[j]);
+
+ if (subversion == ISA) free_dma(dma_channel);
+
+ return FALSE;
+ }
+
+ sh[j]->io_port = port_base;
+ sh[j]->unique_id = port_base;
+ sh[j]->n_io_port = REGION_SIZE;
+ sh[j]->dma_channel = dma_channel;
+ sh[j]->irq = irq;
+ sh[j]->sg_tablesize = (ushort) ntohs(info.scatt_size);
+ sh[j]->this_id = (ushort) info.host_addr[3];
+ sh[j]->can_queue = (ushort) ntohs(info.queue_size);
+ sh[j]->cmd_per_lun = MAX_CMD_PER_LUN;
+ sh[j]->select_queue_depths = select_queue_depths;
+
+ /* Register the I/O space that we use */
+ request_region(sh[j]->io_port, sh[j]->n_io_port, driver_name);
+
+ memset(HD(j), 0, sizeof(struct hostdata));
+ HD(j)->subversion = subversion;
+ HD(j)->protocol_rev = protocol_rev;
+ HD(j)->board_number = j;
+
+ if (HD(j)->subversion == ESA)
+ sh[j]->unchecked_isa_dma = FALSE;
+ else {
+ sh[j]->wish_block = TRUE;
+ sh[j]->unchecked_isa_dma = TRUE;
+ disable_dma(dma_channel);
+ clear_dma_ff(dma_channel);
+ set_dma_mode(dma_channel, DMA_MODE_CASCADE);
+ enable_dma(dma_channel);
+ }
+
+ strcpy(BN(j), name);
+
+ /* DPT PM2012 does not allow to detect sg_tablesize correctly */
+ if (sh[j]->sg_tablesize > MAX_SGLIST || sh[j]->sg_tablesize < 2) {
+ printk("%s: detect, wrong n. of SG lists %d, fixed.\n",
+ BN(j), sh[j]->sg_tablesize);
+ sh[j]->sg_tablesize = MAX_SGLIST;
+ }
+
+ /* DPT PM2012 does not allow to detect can_queue correctly */
+ if (sh[j]->can_queue > MAX_MAILBOXES || sh[j]->can_queue < 2) {
+ printk("%s: detect, wrong n. of mbox %d, fixed.\n",
+ BN(j), sh[j]->can_queue);
+ sh[j]->can_queue = MAX_MAILBOXES;
+ }
+
+ if (protocol_rev != 'A') {
+
+ if (info.max_chan > 0 && info.max_chan < MAX_CHANNEL)
+ sh[j]->max_channel = info.max_chan;
+
+ if (info.max_id > 7 && info.max_id < MAX_TARGET)
+ sh[j]->max_id = info.max_id + 1;
+
+ if (info.large_sg && sh[j]->sg_tablesize == MAX_SGLIST)
+ sh[j]->sg_tablesize = MAX_LARGE_SGLIST;
+ }
+
+ if (protocol_rev == 'C') {
+
+ if (info.max_lun > 7 && info.max_lun < MAX_LUN)
+ sh[j]->max_lun = info.max_lun + 1;
+ }
+
+ if (dma_channel == NO_DMA) sprintf(dma_name, "%s", "BMST");
+ else sprintf(dma_name, "DMA %u", dma_channel);
+
+ for (i = 0; i < sh[j]->can_queue; i++)
+ if (! ((&HD(j)->cp[i])->sglist = kmalloc(
+ sh[j]->sg_tablesize * sizeof(struct sg_list),
+ (sh[j]->unchecked_isa_dma ? GFP_DMA : 0) | GFP_ATOMIC))) {
+ printk("%s: kmalloc SGlist failed, mbox %d, detaching.\n", BN(j), i);
+ eata2x_release(sh[j]);
+ return FALSE;
+ }
+
+ if (max_queue_depth > MAX_TAGGED_CMD_PER_LUN)
+ max_queue_depth = MAX_TAGGED_CMD_PER_LUN;
+
+ if (max_queue_depth < MAX_CMD_PER_LUN) max_queue_depth = MAX_CMD_PER_LUN;
+
+ if (tagged_comm) {
+ if (tag_mode == TAG_SIMPLE) tag_type = '1';
+ else if (tag_mode == TAG_HEAD) tag_type = '2';
+ else if (tag_mode == TAG_ORDERED) tag_type = '3';
+ else tag_type = 'y';
+ }
+ else tag_type = 'n';
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,101)
+ sh[j]->hostt->use_new_eh_code = use_new_eh_code;
+#else
+ use_new_eh_code = FALSE;
+#endif
+
+ if (j == 0) {
+ printk("EATA/DMA 2.0x: Copyright (C) 1994-1998 Dario Ballabio.\n");
+ printk("%s config options -> tc:%c, lc:%c, mq:%d, eh:%c, rs:%c, et:%c.\n",
+ driver_name, tag_type, YESNO(linked_comm), max_queue_depth,
+ YESNO(use_new_eh_code), YESNO(rev_scan), YESNO(ext_tran));
+ }
+
+ printk("%s: 2.0%c, %s 0x%03lx, IRQ %u, %s, SG %d, MB %d.\n",
+ BN(j), HD(j)->protocol_rev, bus_type, (unsigned long)sh[j]->io_port,
+ sh[j]->irq, dma_name, sh[j]->sg_tablesize, sh[j]->can_queue);
+
+ if (sh[j]->max_id > 8 || sh[j]->max_lun > 8)
+ printk("%s: wide SCSI support enabled, max_id %u, max_lun %u.\n",
+ BN(j), sh[j]->max_id, sh[j]->max_lun);
+
+ for (i = 0; i <= sh[j]->max_channel; i++)
+ printk("%s: SCSI channel %u enabled, host target ID %d.\n",
+ BN(j), i, info.host_addr[3 - i]);
+
+#if defined(DEBUG_DETECT)
+ printk("%s: Vers. 0x%x, ocs %u, tar %u, trnxfr %u, more %u, SYNC 0x%x, "\
+ "sec. %u, infol %ld, cpl %ld spl %ld.\n", name, info.version,
+ info.ocsena, info.tarsup, info.trnxfr, info.morsup, info.sync,
+ info.second, DEV2H(info.data_len), DEV2H(info.cp_len),
+ DEV2H(info.sp_len));
+
+ if (protocol_rev == 'B' || protocol_rev == 'C')
+ printk("%s: isaena %u, forcaddr %u, max_id %u, max_chan %u, "\
+ "large_sg %u, res1 %u.\n", name, info.isaena, info.forcaddr,
+ info.max_id, info.max_chan, info.large_sg, info.res1);
+
+ if (protocol_rev == 'C')
+ printk("%s: max_lun %u, m1 %u, idquest %u, pci %u, eisa %u, "\
+ "raidnum %u.\n", name, info.max_lun, info.m1, info.idquest,
+ info.pci, info.eisa, info.raidnum);
+#endif
+
+ return TRUE;
+}
+
+__initfunc (void eata2x_setup(char *str, int *ints)) {
+ int i, argc = ints[0];
+ char *cur = str, *pc;
+
+ if (argc > 0) {
+
+ if (argc > MAX_INT_PARAM) argc = MAX_INT_PARAM;
+
+ for (i = 0; i < argc; i++) io_port[i] = ints[i + 1];
+
+ io_port[i] = 0;
+ setup_done = TRUE;
+ }
+
+ while (cur && (pc = strchr(cur, ':'))) {
+ int val = 0, c = *++pc;
+
+ if (c == 'n' || c == 'N') val = FALSE;
+ else if (c == 'y' || c == 'Y') val = TRUE;
+ else val = (int) simple_strtoul(pc, NULL, 0);
+
+ if (!strncmp(cur, "lc:", 3)) linked_comm = val;
+ else if (!strncmp(cur, "tc:", 3)) tagged_comm = val;
+ else if (!strncmp(cur, "tm:", 3)) tag_mode = val;
+ else if (!strncmp(cur, "mq:", 3)) max_queue_depth = val;
+ else if (!strncmp(cur, "ls:", 3)) link_statistics = val;
+ else if (!strncmp(cur, "eh:", 3)) use_new_eh_code = val;
+ else if (!strncmp(cur, "et:", 3)) ext_tran = val;
+ else if (!strncmp(cur, "rs:", 3)) rev_scan = val;
+
+ if ((cur = strchr(cur, ','))) ++cur;
+ }
+
+ return;
+}
+
+__initfunc (static void add_pci_ports(void)) {
+
+#if defined(CONFIG_PCI)
+
+ unsigned int addr, k;
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,93)
+
+ struct pci_dev *dev = NULL;
+
+ if (!pci_present()) return;
+
+ for (k = 0; k < MAX_PCI; k++) {
+
+ if (!(dev = pci_find_class(PCI_CLASS_STORAGE_SCSI << 8, dev))) break;
+
+ if (pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &addr)) continue;
+
+#if defined(DEBUG_PCI_DETECT)
+ printk("%s: detect, seq. %d, bus %d, devfn 0x%x, addr 0x%x.\n",
+ driver_name, k, dev->bus->number, dev->devfn, addr);
+#endif
+
+ if ((addr & PCI_BASE_ADDRESS_SPACE) != PCI_BASE_ADDRESS_SPACE_IO)
+ continue;
+
+ /* Order addresses according to rev_scan value */
+ io_port[MAX_INT_PARAM + (rev_scan ? (MAX_PCI - k) : (1 + k))] =
+ (addr & PCI_BASE_ADDRESS_IO_MASK) + PCI_BASE_ADDRESS_0;
+ }
+
+#else /* else old style PCI code */
+
+ unsigned short i = 0;
+ unsigned char bus, devfn;
+
+ if (!pcibios_present()) return;
+
+ for (k = 0; k < MAX_PCI; k++) {
+
+ if (pcibios_find_class(PCI_CLASS_STORAGE_SCSI << 8, i++, &bus, &devfn)
+ != PCIBIOS_SUCCESSFUL) break;
+
+ if (pcibios_read_config_dword(bus, devfn, PCI_BASE_ADDRESS_0, &addr)
+ != PCIBIOS_SUCCESSFUL) continue;
+
+#if defined(DEBUG_PCI_DETECT)
+ printk("%s: detect, seq. %d, bus %d, devfn 0x%x, addr 0x%x.\n",
+ driver_name, k, bus, devfn, addr);
+#endif
+
+ if ((addr & PCI_BASE_ADDRESS_SPACE) != PCI_BASE_ADDRESS_SPACE_IO)
+ continue;
+
+ /* Order addresses according to rev_scan value */
+ io_port[MAX_INT_PARAM + (rev_scan ? (MAX_PCI - k) : (1 + k))] =
+ (addr & PCI_BASE_ADDRESS_IO_MASK) + PCI_BASE_ADDRESS_0;
+ }
+
+#endif /* end old style PCI code */
+
+#endif /* end CONFIG_PCI */
+
+ return;
+}
+
+__initfunc (int eata2x_detect(Scsi_Host_Template *tpnt)) {
+ unsigned int j = 0, k;
+ IRQ_FLAGS
+
+ IRQ_LOCK_SAVE
+ tpnt->proc_dir = &proc_scsi_eata2x;
+
+#if defined(MODULE)
+ /* io_port could have been modified when loading as a module */
+ if(io_port[0] != SKIP) {
+ setup_done = TRUE;
+ io_port[MAX_INT_PARAM] = 0;
+ }
+#endif
+
+ for (k = 0; k < MAX_BOARDS + 1; k++) sh[k] = NULL;
+
+ if (!setup_done) add_pci_ports();
+
+ for (k = 0; io_port[k]; k++) {
+
+ if (io_port[k] == SKIP) continue;
+
+ if (j < MAX_BOARDS && port_detect(io_port[k], j, tpnt)) j++;
+ }
+
+ num_boards = j;
+ IRQ_UNLOCK_RESTORE
+ return j;
+}
+
+static inline void build_sg_list(struct mscp *cpp, Scsi_Cmnd *SCpnt) {
+ unsigned int k;
+ struct scatterlist *sgpnt;
+
+ sgpnt = (struct scatterlist *) SCpnt->request_buffer;
+
+ for (k = 0; k < SCpnt->use_sg; k++) {
+ cpp->sglist[k].address = V2DEV(sgpnt[k].address);
+ cpp->sglist[k].num_bytes = H2DEV(sgpnt[k].length);
+ }
+
+ cpp->data_address = V2DEV(cpp->sglist);
+ cpp->data_len = H2DEV((SCpnt->use_sg * sizeof(struct sg_list)));
+}
+
+static inline int do_qcomm(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) {
+ unsigned int i, j, k;
+ struct mscp *cpp;
+ struct mssp *spp;
+
+ static const unsigned char data_out_cmds[] = {
+ 0x0a, 0x2a, 0x15, 0x55, 0x04, 0x07, 0x18, 0x1d, 0x24, 0x2e,
+ 0x30, 0x31, 0x32, 0x38, 0x39, 0x3a, 0x3b, 0x3d, 0x3f, 0x40,
+ 0x41, 0x4c, 0xaa, 0xae, 0xb0, 0xb1, 0xb2, 0xb6, 0xea, 0x1b
+ };
+
+ static const unsigned char data_none_cmds[] = {
+ 0x01, 0x0b, 0x10, 0x11, 0x13, 0x16, 0x17, 0x19, 0x2b, 0x1e,
+ 0x2c, 0xac, 0x2f, 0xaf, 0x33, 0xb3, 0x35, 0x36, 0x45, 0x47,
+ 0x48, 0x49, 0xa9, 0x4b, 0xa5, 0xa6, 0xb5
+ };
+
+ /* j is the board number */
+ j = ((struct hostdata *) SCpnt->host->hostdata)->board_number;
+
+ if (SCpnt->host_scribble)
+ panic("%s: qcomm, pid %ld, SCpnt %p already active.\n",
+ BN(j), SCpnt->pid, SCpnt);
+
+ /* i is the mailbox number, look for the first free mailbox
+ starting from last_cp_used */
+ i = HD(j)->last_cp_used + 1;
+
+ for (k = 0; k < sh[j]->can_queue; k++, i++) {
+
+ if (i >= sh[j]->can_queue) i = 0;
+
+ if (HD(j)->cp_stat[i] == FREE) {
+ HD(j)->last_cp_used = i;
+ break;
+ }
+ }
+
+ if (k == sh[j]->can_queue) {
+ printk("%s: qcomm, no free mailbox.\n", BN(j));
+ return 1;
+ }
+
+ /* Set pointer to control packet structure */
+ cpp = &HD(j)->cp[i];
+
+ memset(cpp, 0, sizeof(struct mscp) - sizeof(struct sg_list *));
+
+ /* Set pointer to status packet structure */
+ spp = &HD(j)->sp[0];
+
+ /* The EATA protocol uses Big Endian format */
+ cpp->sp_addr = V2DEV(spp);
+
+ cpp->cpp = cpp;
+ SCpnt->scsi_done = done;
+ cpp->index = i;
+ SCpnt->host_scribble = (unsigned char *) &cpp->index;
+
+ if (do_trace) printk("%s: qcomm, mbox %d, target %d.%d:%d, pid %ld.\n",
+ BN(j), i, SCpnt->channel, SCpnt->target,
+ SCpnt->lun, SCpnt->pid);
+
+ for (k = 0; k < ARRAY_SIZE(data_out_cmds); k++)
+ if (SCpnt->cmnd[0] == data_out_cmds[k]) {
+ cpp->dout = TRUE;
+ break;
+ }
+
+ if ((cpp->din = !cpp->dout))
+ for (k = 0; k < ARRAY_SIZE(data_none_cmds); k++)
+ if (SCpnt->cmnd[0] == data_none_cmds[k]) {
+ cpp->din = FALSE;
+ break;
+ }
+
+ cpp->reqsen = TRUE;
+ cpp->dispri = TRUE;
+#if 0
+ if (SCpnt->device->type == TYPE_TAPE) cpp->hbaci = TRUE;
+#endif
+ cpp->one = TRUE;
+ cpp->channel = SCpnt->channel;
+ cpp->target = SCpnt->target;
+ cpp->lun = SCpnt->lun;
+ cpp->SCpnt = SCpnt;
+ cpp->sense_addr = V2DEV(SCpnt->sense_buffer);
+ cpp->sense_len = sizeof SCpnt->sense_buffer;
+
+ if (SCpnt->device->tagged_queue) {
+
+ if (HD(j)->target_redo[SCpnt->target][SCpnt->channel] ||
+ HD(j)->target_to[SCpnt->target][SCpnt->channel])
+ cpp->mess[0] = ORDERED_QUEUE_TAG;
+ else if (tag_mode == TAG_SIMPLE) cpp->mess[0] = SIMPLE_QUEUE_TAG;
+ else if (tag_mode == TAG_HEAD) cpp->mess[0] = HEAD_OF_QUEUE_TAG;
+ else if (tag_mode == TAG_ORDERED) cpp->mess[0] = ORDERED_QUEUE_TAG;
+ else if (SCpnt->device->current_tag == 0)
+ cpp->mess[0] = ORDERED_QUEUE_TAG;
+ else if (SCpnt->device->current_tag == 1)
+ cpp->mess[0] = HEAD_OF_QUEUE_TAG;
+ else
+ cpp->mess[0] = SIMPLE_QUEUE_TAG;
+
+ cpp->mess[1] = SCpnt->device->current_tag++;
+ }
+
+ if (SCpnt->use_sg) {
+ cpp->sg = TRUE;
+ build_sg_list(cpp, SCpnt);
+ }
+ else {
+ cpp->data_address = V2DEV(SCpnt->request_buffer);
+ cpp->data_len = H2DEV(SCpnt->request_bufflen);
+ }
+
+ memcpy(cpp->cdb, SCpnt->cmnd, SCpnt->cmd_len);
+
+ if (linked_comm && SCpnt->device->queue_depth > 2
+ && TLDEV(SCpnt->device->type)) {
+ HD(j)->cp_stat[i] = READY;
+ flush_dev(SCpnt->device, SCpnt->request.sector, j, FALSE);
+ return 0;
+ }
+
+ /* Send control packet to the board */
+ if (do_dma(sh[j]->io_port, (unsigned int) cpp, SEND_CP_DMA)) {
+ SCpnt->host_scribble = NULL;
+ printk("%s: qcomm, target %d.%d:%d, pid %ld, adapter busy.\n",
+ BN(j), SCpnt->channel, SCpnt->target, SCpnt->lun, SCpnt->pid);
+ return 1;
+ }
+
+ HD(j)->cp_stat[i] = IN_USE;
+ return 0;
+}
+
+int eata2x_queuecommand(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) {
+ int rtn;
+ IRQ_FLAGS
+
+ IRQ_LOCK_SAVE
+ rtn = do_qcomm(SCpnt, done);
+ IRQ_UNLOCK_RESTORE
+ return rtn;
+}
+
+static inline int do_old_abort(Scsi_Cmnd *SCarg) {
+ unsigned int i, j;
+
+ j = ((struct hostdata *) SCarg->host->hostdata)->board_number;
+
+ if (SCarg->host_scribble == NULL ||
+ (SCarg->serial_number_at_timeout &&
+ (SCarg->serial_number != SCarg->serial_number_at_timeout))) {
+ printk("%s: abort, target %d.%d:%d, pid %ld inactive.\n",
+ BN(j), SCarg->channel, SCarg->target, SCarg->lun, SCarg->pid);
+ return SCSI_ABORT_NOT_RUNNING;
+ }
+
+ i = *(unsigned int *)SCarg->host_scribble;
+ printk("%s: abort, mbox %d, target %d.%d:%d, pid %ld.\n",
+ BN(j), i, SCarg->channel, SCarg->target, SCarg->lun, SCarg->pid);
+
+ if (i >= sh[j]->can_queue)
+ panic("%s: abort, invalid SCarg->host_scribble.\n", BN(j));
+
+ if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
+ printk("%s: abort, timeout error.\n", BN(j));
+ return SCSI_ABORT_ERROR;
+ }
+
+ if (HD(j)->cp_stat[i] == FREE) {
+ printk("%s: abort, mbox %d is free.\n", BN(j), i);
+ return SCSI_ABORT_NOT_RUNNING;
+ }
+
+ if (HD(j)->cp_stat[i] == IN_USE) {
+ printk("%s: abort, mbox %d is in use.\n", BN(j), i);
+
+ if (SCarg != HD(j)->cp[i].SCpnt)
+ panic("%s: abort, mbox %d, SCarg %p, cp SCpnt %p.\n",
+ BN(j), i, SCarg, HD(j)->cp[i].SCpnt);
+
+ if (inb(sh[j]->io_port + REG_AUX_STATUS) & IRQ_ASSERTED)
+ printk("%s: abort, mbox %d, interrupt pending.\n", BN(j), i);
+
+ return SCSI_ABORT_SNOOZE;
+ }
+
+ if (HD(j)->cp_stat[i] == IN_RESET) {
+ printk("%s: abort, mbox %d is in reset.\n", BN(j), i);
+ return SCSI_ABORT_ERROR;
+ }
+
+ if (HD(j)->cp_stat[i] == LOCKED) {
+ printk("%s: abort, mbox %d is locked.\n", BN(j), i);
+ return SCSI_ABORT_NOT_RUNNING;
+ }
+
+ if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) {
+ SCarg->result = DID_ABORT << 16;
+ SCarg->host_scribble = NULL;
+ HD(j)->cp_stat[i] = FREE;
+ printk("%s, abort, mbox %d ready, DID_ABORT, pid %ld done.\n",
+ BN(j), i, SCarg->pid);
+ SCarg->scsi_done(SCarg);
+ return SCSI_ABORT_SUCCESS;
+ }
+
+ panic("%s: abort, mbox %d, invalid cp_stat.\n", BN(j), i);
+}
+
+int eata2x_old_abort(Scsi_Cmnd *SCarg) {
+ int rtn;
+ IRQ_FLAGS
+
+ IRQ_LOCK_SAVE
+ rtn = do_old_abort(SCarg);
+ IRQ_UNLOCK_RESTORE
+ return rtn;
+}
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,101)
+
+static inline int do_abort(Scsi_Cmnd *SCarg) {
+ unsigned int i, j;
+
+ j = ((struct hostdata *) SCarg->host->hostdata)->board_number;
+
+ if (SCarg->host_scribble == NULL) {
+ printk("%s: abort, target %d.%d:%d, pid %ld inactive.\n",
+ BN(j), SCarg->channel, SCarg->target, SCarg->lun, SCarg->pid);
+ return SUCCESS;
+ }
+
+ i = *(unsigned int *)SCarg->host_scribble;
+ printk("%s: abort, mbox %d, target %d.%d:%d, pid %ld.\n",
+ BN(j), i, SCarg->channel, SCarg->target, SCarg->lun, SCarg->pid);
+
+ if (i >= sh[j]->can_queue)
+ panic("%s: abort, invalid SCarg->host_scribble.\n", BN(j));
+
+ if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
+ printk("%s: abort, timeout error.\n", BN(j));
+ return FAILED;
+ }
+
+ if (HD(j)->cp_stat[i] == FREE) {
+ printk("%s: abort, mbox %d is free.\n", BN(j), i);
+ return SUCCESS;
+ }
+
+ if (HD(j)->cp_stat[i] == IN_USE) {
+ printk("%s: abort, mbox %d is in use.\n", BN(j), i);
+
+ if (SCarg != HD(j)->cp[i].SCpnt)
+ panic("%s: abort, mbox %d, SCarg %p, cp SCpnt %p.\n",
+ BN(j), i, SCarg, HD(j)->cp[i].SCpnt);
+
+ if (inb(sh[j]->io_port + REG_AUX_STATUS) & IRQ_ASSERTED)
+ printk("%s: abort, mbox %d, interrupt pending.\n", BN(j), i);
+
+ if (SCarg->eh_state == SCSI_STATE_TIMEOUT) {
+ SCarg->host_scribble = NULL;
+ HD(j)->cp_stat[i] = FREE;
+ printk("%s, abort, mbox %d, eh_state timeout, pid %ld.\n",
+ BN(j), i, SCarg->pid);
+ return SUCCESS;
+ }
+
+ return FAILED;
+ }
+
+ if (HD(j)->cp_stat[i] == IN_RESET) {
+ printk("%s: abort, mbox %d is in reset.\n", BN(j), i);
+ return FAILED;
+ }
+
+ if (HD(j)->cp_stat[i] == LOCKED) {
+ printk("%s: abort, mbox %d is locked.\n", BN(j), i);
+ return SUCCESS;
+ }
+
+ if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) {
+ SCarg->result = DID_ABORT << 16;
+ SCarg->host_scribble = NULL;
+ HD(j)->cp_stat[i] = FREE;
+ printk("%s, abort, mbox %d ready, DID_ABORT, pid %ld done.\n",
+ BN(j), i, SCarg->pid);
+ SCarg->scsi_done(SCarg);
+ return SUCCESS;
+ }
+
+ panic("%s: abort, mbox %d, invalid cp_stat.\n", BN(j), i);
+}
+
+int eata2x_abort(Scsi_Cmnd *SCarg) {
+
+ return do_abort(SCarg);
+}
+
+#endif /* new_eh_code */
+
+static inline int do_old_reset(Scsi_Cmnd *SCarg) {
+ unsigned int i, j, time, k, c, limit = 0;
+ int arg_done = FALSE;
+ Scsi_Cmnd *SCpnt;
+
+ j = ((struct hostdata *) SCarg->host->hostdata)->board_number;
+ printk("%s: reset, enter, target %d.%d:%d, pid %ld.\n",
+ BN(j), SCarg->channel, SCarg->target, SCarg->lun, SCarg->pid);
+
+ if (SCarg->host_scribble == NULL)
+ printk("%s: reset, pid %ld inactive.\n", BN(j), SCarg->pid);
+
+ if (SCarg->serial_number_at_timeout &&
+ (SCarg->serial_number != SCarg->serial_number_at_timeout)) {
+ printk("%s: reset, pid %ld, reset not running.\n", BN(j), SCarg->pid);
+ return SCSI_RESET_NOT_RUNNING;
+ }
+
+ if (HD(j)->in_reset) {
+ printk("%s: reset, exit, already in reset.\n", BN(j));
+ return SCSI_RESET_ERROR;
+ }
+
+ if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
+ printk("%s: reset, exit, timeout error.\n", BN(j));
+ return SCSI_RESET_ERROR;
+ }
+
+ HD(j)->retries = 0;
+
+ for (c = 0; c <= sh[j]->max_channel; c++)
+ for (k = 0; k < sh[j]->max_id; k++) {
+ HD(j)->target_redo[k][c] = TRUE;
+ HD(j)->target_to[k][c] = 0;
+ }
+
+ for (i = 0; i < sh[j]->can_queue; i++) {
+
+ if (HD(j)->cp_stat[i] == FREE) continue;
+
+ if (HD(j)->cp_stat[i] == LOCKED) {
+ HD(j)->cp_stat[i] = FREE;
+ printk("%s: reset, locked mbox %d forced free.\n", BN(j), i);
+ continue;
+ }
+
+ if (!(SCpnt = HD(j)->cp[i].SCpnt))
+ panic("%s: reset, mbox %d, SCpnt == NULL.\n", BN(j), i);
+
+ if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) {
+ HD(j)->cp_stat[i] = ABORTING;
+ printk("%s: reset, mbox %d aborting, pid %ld.\n",
+ BN(j), i, SCpnt->pid);
+ }
+
+ else {
+ HD(j)->cp_stat[i] = IN_RESET;
+ printk("%s: reset, mbox %d in reset, pid %ld.\n",
+ BN(j), i, SCpnt->pid);
+ }
+
+ if (SCpnt->host_scribble == NULL)
+ panic("%s: reset, mbox %d, garbled SCpnt.\n", BN(j), i);
+
+ if (*(unsigned int *)SCpnt->host_scribble != i)
+ panic("%s: reset, mbox %d, index mismatch.\n", BN(j), i);
+
+ if (SCpnt->scsi_done == NULL)
+ panic("%s: reset, mbox %d, SCpnt->scsi_done == NULL.\n", BN(j), i);
+
+ if (SCpnt == SCarg) arg_done = TRUE;
+ }
+
+ if (do_dma(sh[j]->io_port, 0, RESET_PIO)) {
+ printk("%s: reset, cannot reset, timeout error.\n", BN(j));
+ return SCSI_RESET_ERROR;
+ }
+
+ printk("%s: reset, board reset done, enabling interrupts.\n", BN(j));
+
+#if defined(DEBUG_RESET)
+ do_trace = TRUE;
+#endif
+
+ HD(j)->in_reset = TRUE;
+ SPIN_UNLOCK
+ IRQ_UNLOCK
+ time = jiffies;
+ while ((jiffies - time) < (10 * HZ) && limit++ < 200000) udelay(100L);
+ IRQ_LOCK
+ SPIN_LOCK
+ printk("%s: reset, interrupts disabled, loops %d.\n", BN(j), limit);
+
+ for (i = 0; i < sh[j]->can_queue; i++) {
+
+ if (HD(j)->cp_stat[i] == IN_RESET) {
+ SCpnt = HD(j)->cp[i].SCpnt;
+ SCpnt->result = DID_RESET << 16;
+ SCpnt->host_scribble = NULL;
+
+ /* This mailbox is still waiting for its interrupt */
+ HD(j)->cp_stat[i] = LOCKED;
+
+ printk("%s, reset, mbox %d locked, DID_RESET, pid %ld done.\n",
+ BN(j), i, SCpnt->pid);
+ }
+
+ else if (HD(j)->cp_stat[i] == ABORTING) {
+ SCpnt = HD(j)->cp[i].SCpnt;
+ SCpnt->result = DID_RESET << 16;
+ SCpnt->host_scribble = NULL;
+
+ /* This mailbox was never queued to the adapter */
+ HD(j)->cp_stat[i] = FREE;
+
+ printk("%s, reset, mbox %d aborting, DID_RESET, pid %ld done.\n",
+ BN(j), i, SCpnt->pid);
+ }
+
+ else
+
+ /* Any other mailbox has already been set free by interrupt */
+ continue;
+
+ SCpnt->scsi_done(SCpnt);
+ IRQ_LOCK
+ }
+
+ HD(j)->in_reset = FALSE;
+ do_trace = FALSE;
+
+ if (arg_done) {
+ printk("%s: reset, exit, success.\n", BN(j));
+ return SCSI_RESET_SUCCESS;
+ }
+ else {
+ printk("%s: reset, exit, wakeup.\n", BN(j));
+ return SCSI_RESET_PUNT;
+ }
+}
+
+int eata2x_old_reset(Scsi_Cmnd *SCarg, unsigned int reset_flags) {
+ int rtn;
+ IRQ_FLAGS
+
+ IRQ_LOCK_SAVE
+ rtn = do_old_reset(SCarg);
+ IRQ_UNLOCK_RESTORE
+ return rtn;
+}
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,101)
+
+static inline int do_reset(Scsi_Cmnd *SCarg) {
+ unsigned int i, j, time, k, c, limit = 0;
+ int arg_done = FALSE;
+ Scsi_Cmnd *SCpnt;
+
+ j = ((struct hostdata *) SCarg->host->hostdata)->board_number;
+ printk("%s: reset, enter, target %d.%d:%d, pid %ld.\n",
+ BN(j), SCarg->channel, SCarg->target, SCarg->lun, SCarg->pid);
+
+ if (SCarg->host_scribble == NULL)
+ printk("%s: reset, pid %ld inactive.\n", BN(j), SCarg->pid);
+
+ if (HD(j)->in_reset) {
+ printk("%s: reset, exit, already in reset.\n", BN(j));
+ return FAILED;
+ }
+
+ if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
+ printk("%s: reset, exit, timeout error.\n", BN(j));
+ return FAILED;
+ }
+
+ HD(j)->retries = 0;
+
+ for (c = 0; c <= sh[j]->max_channel; c++)
+ for (k = 0; k < sh[j]->max_id; k++) {
+ HD(j)->target_redo[k][c] = TRUE;
+ HD(j)->target_to[k][c] = 0;
+ }
+
+ for (i = 0; i < sh[j]->can_queue; i++) {
+
+ if (HD(j)->cp_stat[i] == FREE) continue;
+
+ if (HD(j)->cp_stat[i] == LOCKED) {
+ HD(j)->cp_stat[i] = FREE;
+ printk("%s: reset, locked mbox %d forced free.\n", BN(j), i);
+ continue;
+ }
+
+ if (!(SCpnt = HD(j)->cp[i].SCpnt))
+ panic("%s: reset, mbox %d, SCpnt == NULL.\n", BN(j), i);
+
+ if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) {
+ HD(j)->cp_stat[i] = ABORTING;
+ printk("%s: reset, mbox %d aborting, pid %ld.\n",
+ BN(j), i, SCpnt->pid);
+ }
+
+ else {
+ HD(j)->cp_stat[i] = IN_RESET;
+ printk("%s: reset, mbox %d in reset, pid %ld.\n",
+ BN(j), i, SCpnt->pid);
+ }
+
+ if (SCpnt->host_scribble == NULL)
+ panic("%s: reset, mbox %d, garbled SCpnt.\n", BN(j), i);
+
+ if (*(unsigned int *)SCpnt->host_scribble != i)
+ panic("%s: reset, mbox %d, index mismatch.\n", BN(j), i);
+
+ if (SCpnt->scsi_done == NULL)
+ panic("%s: reset, mbox %d, SCpnt->scsi_done == NULL.\n", BN(j), i);
+
+ if (SCpnt == SCarg) arg_done = TRUE;
+ }
+
+ if (do_dma(sh[j]->io_port, 0, RESET_PIO)) {
+ printk("%s: reset, cannot reset, timeout error.\n", BN(j));
+ return FAILED;
+ }
+
+ printk("%s: reset, board reset done, enabling interrupts.\n", BN(j));
+
+#if defined(DEBUG_RESET)
+ do_trace = TRUE;
+#endif
+
+ HD(j)->in_reset = TRUE;
+ SPIN_UNLOCK
+ IRQ_UNLOCK
+ time = jiffies;
+ while ((jiffies - time) < (10 * HZ) && limit++ < 200000) udelay(100L);
+ IRQ_LOCK
+ SPIN_LOCK
+ printk("%s: reset, interrupts disabled, loops %d.\n", BN(j), limit);
+
+ for (i = 0; i < sh[j]->can_queue; i++) {
+
+ if (HD(j)->cp_stat[i] == IN_RESET) {
+ SCpnt = HD(j)->cp[i].SCpnt;
+ SCpnt->result = DID_RESET << 16;
+ SCpnt->host_scribble = NULL;
+
+ /* This mailbox is still waiting for its interrupt */
+ HD(j)->cp_stat[i] = LOCKED;
+
+ printk("%s, reset, mbox %d locked, DID_RESET, pid %ld done.\n",
+ BN(j), i, SCpnt->pid);
+ }
+
+ else if (HD(j)->cp_stat[i] == ABORTING) {
+ SCpnt = HD(j)->cp[i].SCpnt;
+ SCpnt->result = DID_RESET << 16;
+ SCpnt->host_scribble = NULL;
+
+ /* This mailbox was never queued to the adapter */
+ HD(j)->cp_stat[i] = FREE;
+
+ printk("%s, reset, mbox %d aborting, DID_RESET, pid %ld done.\n",
+ BN(j), i, SCpnt->pid);
+ }
+
+ else
+
+ /* Any other mailbox has already been set free by interrupt */
+ continue;
+
+ SCpnt->scsi_done(SCpnt);
+ IRQ_LOCK
+ }
+
+ HD(j)->in_reset = FALSE;
+ do_trace = FALSE;
+
+ if (arg_done) printk("%s: reset, exit, pid %ld done.\n", BN(j), SCarg->pid);
+ else printk("%s: reset, exit.\n", BN(j));
+
+ return SUCCESS;
+}
+
+int eata2x_reset(Scsi_Cmnd *SCarg) {
+
+ return do_reset(SCarg);
+}
+
+#endif /* new_eh_code */
+
+int eata2x_biosparam(Disk *disk, kdev_t dev, int *dkinfo) {
+ int size = disk->capacity;
+
+ if (ext_tran || (scsicam_bios_param(disk, dev, dkinfo) < 0)) {
+ dkinfo[0] = 255;
+ dkinfo[1] = 63;
+ dkinfo[2] = size / (dkinfo[0] * dkinfo[1]);
+ }
+
+#if defined (DEBUG_GEOMETRY)
+ printk ("%s: biosparam, head=%d, sec=%d, cyl=%d.\n", driver_name,
+ dkinfo[0], dkinfo[1], dkinfo[2]);
+#endif
+
+ return FALSE;
+}
+
+static void sort(unsigned long sk[], unsigned int da[], unsigned int n,
+ unsigned int rev) {
+ unsigned int i, j, k, y;
+ unsigned long x;
+
+ for (i = 0; i < n - 1; i++) {
+ k = i;
+
+ for (j = k + 1; j < n; j++)
+ if (rev) {
+ if (sk[j] > sk[k]) k = j;
+ }
+ else {
+ if (sk[j] < sk[k]) k = j;
+ }
+
+ if (k != i) {
+ x = sk[k]; sk[k] = sk[i]; sk[i] = x;
+ y = da[k]; da[k] = da[i]; da[i] = y;
+ }
+ }
+
+ return;
+ }
+
+static inline int reorder(unsigned int j, unsigned long cursec,
+ unsigned int ihdlr, unsigned int il[], unsigned int n_ready) {
+ Scsi_Cmnd *SCpnt;
+ struct mscp *cpp;
+ unsigned int k, n;
+ unsigned int rev = FALSE, s = TRUE, r = TRUE;
+ unsigned int input_only = TRUE, overlap = FALSE;
+ unsigned long sl[n_ready], pl[n_ready], ll[n_ready];
+ unsigned long maxsec = 0, minsec = ULONG_MAX, seek = 0, iseek = 0;
+ unsigned long ioseek = 0;
+
+ static unsigned int flushcount = 0, batchcount = 0, sortcount = 0;
+ static unsigned int readycount = 0, ovlcount = 0, inputcount = 0;
+ static unsigned int readysorted = 0, revcount = 0;
+ static unsigned long seeksorted = 0, seeknosort = 0;
+
+ if (link_statistics && !(++flushcount % link_statistics))
+ printk("fc %d bc %d ic %d oc %d rc %d rs %d sc %d re %d"\
+ " av %ldK as %ldK.\n", flushcount, batchcount, inputcount,
+ ovlcount, readycount, readysorted, sortcount, revcount,
+ seeknosort / (readycount + 1),
+ seeksorted / (readycount + 1));
+
+ if (n_ready <= 1) return FALSE;
+
+ for (n = 0; n < n_ready; n++) {
+ k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
+
+ if (!cpp->din) input_only = FALSE;
+
+ if (SCpnt->request.sector < minsec) minsec = SCpnt->request.sector;
+ if (SCpnt->request.sector > maxsec) maxsec = SCpnt->request.sector;
+
+ sl[n] = SCpnt->request.sector;
+ ioseek += SCpnt->request.nr_sectors;
+
+ if (!n) continue;
+
+ if (sl[n] < sl[n - 1]) s = FALSE;
+ if (sl[n] > sl[n - 1]) r = FALSE;
+
+ if (link_statistics) {
+ if (sl[n] > sl[n - 1])
+ seek += sl[n] - sl[n - 1];
+ else
+ seek += sl[n - 1] - sl[n];
+ }
+
+ }
+
+ if (link_statistics) {
+ if (cursec > sl[0]) seek += cursec - sl[0]; else seek += sl[0] - cursec;
+ }
+
+ if (cursec > ((maxsec + minsec) / 2)) rev = TRUE;
+
+ if (ioseek > ((maxsec - minsec) / 2)) rev = FALSE;
+
+ if (!((rev && r) || (!rev && s))) sort(sl, il, n_ready, rev);
+
+ if (!input_only) for (n = 0; n < n_ready; n++) {
+ k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
+ ll[n] = SCpnt->request.nr_sectors; pl[n] = SCpnt->pid;
+
+ if (!n) continue;
+
+ if ((sl[n] == sl[n - 1]) || (!rev && ((sl[n - 1] + ll[n - 1]) > sl[n]))
+ || (rev && ((sl[n] + ll[n]) > sl[n - 1]))) overlap = TRUE;
+ }
+
+ if (overlap) sort(pl, il, n_ready, FALSE);
+
+ if (link_statistics) {
+ if (cursec > sl[0]) iseek = cursec - sl[0]; else iseek = sl[0] - cursec;
+ batchcount++; readycount += n_ready, seeknosort += seek / 1024;
+ if (input_only) inputcount++;
+ if (overlap) { ovlcount++; seeksorted += iseek / 1024; }
+ else seeksorted += (iseek + maxsec - minsec) / 1024;
+ if (rev && !r) { revcount++; readysorted += n_ready; }
+ if (!rev && !s) { sortcount++; readysorted += n_ready; }
+ }
+
+#if defined(DEBUG_LINKED_COMMANDS)
+ if (link_statistics && (overlap || !(flushcount % link_statistics)))
+ for (n = 0; n < n_ready; n++) {
+ k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
+ printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %ld"\
+ " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
+ (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target,
+ SCpnt->lun, SCpnt->pid, k, flushcount, n_ready,
+ SCpnt->request.sector, SCpnt->request.nr_sectors, cursec,
+ YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only),
+ YESNO(overlap), cpp->din);
+ }
+#endif
+ return overlap;
+}
+
+static void flush_dev(Scsi_Device *dev, unsigned long cursec, unsigned int j,
+ unsigned int ihdlr) {
+ Scsi_Cmnd *SCpnt;
+ struct mscp *cpp;
+ unsigned int k, n, n_ready = 0, il[MAX_MAILBOXES];
+
+ for (k = 0; k < sh[j]->can_queue; k++) {
+
+ if (HD(j)->cp_stat[k] != READY && HD(j)->cp_stat[k] != IN_USE) continue;
+
+ cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
+
+ if (SCpnt->device != dev) continue;
+
+ if (HD(j)->cp_stat[k] == IN_USE) return;
+
+ il[n_ready++] = k;
+ }
+
+ if (reorder(j, cursec, ihdlr, il, n_ready)) n_ready = 1;
+
+ for (n = 0; n < n_ready; n++) {
+ k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
+
+ if (do_dma(sh[j]->io_port, (unsigned int) cpp, SEND_CP_DMA)) {
+ printk("%s: %s, target %d.%d:%d, pid %ld, mbox %d, adapter"\
+ " busy, will abort.\n", BN(j), (ihdlr ? "ihdlr" : "qcomm"),
+ SCpnt->channel, SCpnt->target, SCpnt->lun, SCpnt->pid, k);
+ HD(j)->cp_stat[k] = ABORTING;
+ continue;
+ }
+
+ HD(j)->cp_stat[k] = IN_USE;
+ }
+
+}
+
+static inline void ihdlr(int irq, unsigned int j) {
+ Scsi_Cmnd *SCpnt;
+ unsigned int i, k, c, status, tstatus, reg;
+ struct mssp *dspp, *spp;
+ struct mscp *cpp;
+
+ if (sh[j]->irq != irq)
+ panic("%s: ihdlr, irq %d, sh[j]->irq %d.\n", BN(j), irq, sh[j]->irq);
+
+ /* Check if this board need to be serviced */
+ if (!(inb(sh[j]->io_port + REG_AUX_STATUS) & IRQ_ASSERTED)) return;
+
+ HD(j)->iocount++;
+
+ if (do_trace) printk("%s: ihdlr, enter, irq %d, count %d.\n", BN(j), irq,
+ HD(j)->iocount);
+
+ /* Check if this board is still busy */
+ if (wait_on_busy(sh[j]->io_port, 20 * MAXLOOP)) {
+ reg = inb(sh[j]->io_port + REG_STATUS);
+ printk("%s: ihdlr, busy timeout error, irq %d, reg 0x%x, count %d.\n",
+ BN(j), irq, reg, HD(j)->iocount);
+ return;
+ }
+
+ dspp = &HD(j)->sp[0];
+ spp = &HD(j)->sp[1];
+
+ /* Make a local copy just before clearing the interrupt indication */
+ memcpy(spp, dspp, sizeof(struct mssp));
+
+ /* Clear the completion flag and cp pointer on the dynamic copy of sp */
+ memset(dspp, 0, sizeof(struct mssp));
+
+ /* Read the status register to clear the interrupt indication */
+ reg = inb(sh[j]->io_port + REG_STATUS);
+
+ /* Reject any sp with supspect data */
+ if (spp->eoc == FALSE)
+ printk("%s: ihdlr, spp->eoc == FALSE, irq %d, reg 0x%x, count %d.\n",
+ BN(j), irq, reg, HD(j)->iocount);
+ if (spp->cpp == NULL)
+ printk("%s: ihdlr, spp->cpp == NULL, irq %d, reg 0x%x, count %d.\n",
+ BN(j), irq, reg, HD(j)->iocount);
+ if (spp->eoc == FALSE || spp->cpp == NULL) return;
+
+ cpp = spp->cpp;
+
+#if defined(DEBUG_GENERATE_ABORTS)
+ if ((HD(j)->iocount > 500) && ((HD(j)->iocount % 500) < 3)) return;
+#endif
+
+ /* Find the mailbox to be serviced on this board */
+ i = cpp - HD(j)->cp;
+
+ if (cpp < HD(j)->cp || cpp >= HD(j)->cp + sh[j]->can_queue
+ || i >= sh[j]->can_queue)
+ panic("%s: ihdlr, invalid mscp bus address %p, cp0 %p.\n", BN(j),
+ cpp, HD(j)->cp);
+
+ if (HD(j)->cp_stat[i] == IGNORE) {
+ HD(j)->cp_stat[i] = FREE;
+ return;
+ }
+ else if (HD(j)->cp_stat[i] == LOCKED) {
+ HD(j)->cp_stat[i] = FREE;
+ printk("%s: ihdlr, mbox %d unlocked, count %d.\n", BN(j), i,
+ HD(j)->iocount);
+ return;
+ }
+ else if (HD(j)->cp_stat[i] == FREE) {
+ printk("%s: ihdlr, mbox %d is free, count %d.\n", BN(j), i,
+ HD(j)->iocount);
+ return;
+ }
+ else if (HD(j)->cp_stat[i] == IN_RESET)
+ printk("%s: ihdlr, mbox %d is in reset.\n", BN(j), i);
+ else if (HD(j)->cp_stat[i] != IN_USE)
+ panic("%s: ihdlr, mbox %d, invalid cp_stat: %d.\n",
+ BN(j), i, HD(j)->cp_stat[i]);
+
+ HD(j)->cp_stat[i] = FREE;
+ SCpnt = cpp->SCpnt;
+
+ if (SCpnt == NULL) panic("%s: ihdlr, mbox %d, SCpnt == NULL.\n", BN(j), i);
+
+ if (SCpnt->host_scribble == NULL)
+ panic("%s: ihdlr, mbox %d, pid %ld, SCpnt %p garbled.\n", BN(j), i,
+ SCpnt->pid, SCpnt);
+
+ if (*(unsigned int *)SCpnt->host_scribble != i)
+ panic("%s: ihdlr, mbox %d, pid %ld, index mismatch %d.\n",
+ BN(j), i, SCpnt->pid, *(unsigned int *)SCpnt->host_scribble);
+
+ if (linked_comm && SCpnt->device->queue_depth > 2
+ && TLDEV(SCpnt->device->type))
+ flush_dev(SCpnt->device, SCpnt->request.sector, j, TRUE);
+
+ tstatus = status_byte(spp->target_status);
+
+#if defined(DEBUG_GENERATE_ERRORS)
+ if ((HD(j)->iocount > 500) && ((HD(j)->iocount % 200) < 2))
+ spp->adapter_status = 0x01;
+#endif
+
+ switch (spp->adapter_status) {
+ case ASOK: /* status OK */
+
+ /* Forces a reset if a disk drive keeps returning BUSY */
+ if (tstatus == BUSY && SCpnt->device->type != TYPE_TAPE)
+ status = DID_ERROR << 16;
+
+ /* If there was a bus reset, redo operation on each target */
+ else if (tstatus != GOOD && SCpnt->device->type == TYPE_DISK
+ && HD(j)->target_redo[SCpnt->target][SCpnt->channel])
+ status = DID_BUS_BUSY << 16;
+
+ /* Works around a flaw in scsi.c */
+ else if (tstatus == CHECK_CONDITION
+ && SCpnt->device->type == TYPE_DISK
+ && (SCpnt->sense_buffer[2] & 0xf) == RECOVERED_ERROR)
+ status = DID_BUS_BUSY << 16;
+
+ else
+ status = DID_OK << 16;
+
+ if (tstatus == GOOD)
+ HD(j)->target_redo[SCpnt->target][SCpnt->channel] = FALSE;
+
+ if (spp->target_status && SCpnt->device->type == TYPE_DISK)
+ printk("%s: ihdlr, target %d.%d:%d, pid %ld, "\
+ "target_status 0x%x, sense key 0x%x.\n", BN(j),
+ SCpnt->channel, SCpnt->target, SCpnt->lun,
+ SCpnt->pid, spp->target_status,
+ SCpnt->sense_buffer[2]);
+
+ HD(j)->target_to[SCpnt->target][SCpnt->channel] = 0;
+
+ if (HD(j)->last_retried_pid == SCpnt->pid) HD(j)->retries = 0;
+
+ break;
+ case ASST: /* Selection Time Out */
+ case 0x02: /* Command Time Out */
+
+ if (HD(j)->target_to[SCpnt->target][SCpnt->channel] > 1)
+ status = DID_ERROR << 16;
+ else {
+ status = DID_TIME_OUT << 16;
+ HD(j)->target_to[SCpnt->target][SCpnt->channel]++;
+ }
+
+ break;
+
+ /* Perform a limited number of internal retries */
+ case 0x03: /* SCSI Bus Reset Received */
+ case 0x04: /* Initial Controller Power-up */
+
+ for (c = 0; c <= sh[j]->max_channel; c++)
+ for (k = 0; k < sh[j]->max_id; k++)
+ HD(j)->target_redo[k][c] = TRUE;
+
+ if (SCpnt->device->type != TYPE_TAPE
+ && HD(j)->retries < MAX_INTERNAL_RETRIES) {
+
+#if defined(DID_SOFT_ERROR)
+ status = DID_SOFT_ERROR << 16;
+#else
+ status = DID_BUS_BUSY << 16;
+#endif
+ HD(j)->retries++;
+ HD(j)->last_retried_pid = SCpnt->pid;
+ }
+ else
+ status = DID_ERROR << 16;
+
+ break;
+ case 0x05: /* Unexpected Bus Phase */
+ case 0x06: /* Unexpected Bus Free */
+ case 0x07: /* Bus Parity Error */
+ case 0x08: /* SCSI Hung */
+ case 0x09: /* Unexpected Message Reject */
+ case 0x0a: /* SCSI Bus Reset Stuck */
+ case 0x0b: /* Auto Request-Sense Failed */
+ case 0x0c: /* Controller Ram Parity Error */
+ default:
+ status = DID_ERROR << 16;
+ break;
+ }
+
+ SCpnt->result = status | spp->target_status;
+
+#if defined(DEBUG_INTERRUPT)
+ if (SCpnt->result || do_trace)
+#else
+ if ((spp->adapter_status != ASOK && HD(j)->iocount > 1000) ||
+ (spp->adapter_status != ASOK &&
+ spp->adapter_status != ASST && HD(j)->iocount <= 1000) ||
+ do_trace || msg_byte(spp->target_status))
+#endif
+ printk("%s: ihdlr, mbox %2d, err 0x%x:%x,"\
+ " target %d.%d:%d, pid %ld, reg 0x%x, count %d.\n",
+ BN(j), i, spp->adapter_status, spp->target_status,
+ SCpnt->channel, SCpnt->target, SCpnt->lun, SCpnt->pid,
+ reg, HD(j)->iocount);
+
+ /* Set the command state to inactive */
+ SCpnt->host_scribble = NULL;
+
+ SCpnt->scsi_done(SCpnt);
+
+ if (do_trace) printk("%s: ihdlr, exit, irq %d, count %d.\n", BN(j), irq,
+ HD(j)->iocount);
+
+ return;
+}
+
+static void do_interrupt_handler(int irq, void *shap, struct pt_regs *regs) {
+ unsigned int j;
+ IRQ_FLAGS
+ SPIN_FLAGS
+
+ /* Check if the interrupt must be processed by this handler */
+ if ((j = (unsigned int)((char *)shap - sha)) >= num_boards) return;
+
+ SPIN_LOCK_SAVE
+ IRQ_LOCK_SAVE
+ ihdlr(irq, j);
+ IRQ_UNLOCK_RESTORE
+ SPIN_UNLOCK_RESTORE
+}
+
+int eata2x_release(struct Scsi_Host *shpnt) {
+ unsigned int i, j;
+ IRQ_FLAGS
+
+ IRQ_LOCK_SAVE
+
+ for (j = 0; sh[j] != NULL && sh[j] != shpnt; j++);
+
+ if (sh[j] == NULL) panic("%s: release, invalid Scsi_Host pointer.\n",
+ driver_name);
+
+ for (i = 0; i < sh[j]->can_queue; i++)
+ if ((&HD(j)->cp[i])->sglist) kfree((&HD(j)->cp[i])->sglist);
+
+ free_irq(sh[j]->irq, &sha[j]);
+
+ if (sh[j]->dma_channel != NO_DMA) free_dma(sh[j]->dma_channel);
+
+ release_region(sh[j]->io_port, sh[j]->n_io_port);
+ scsi_unregister(sh[j]);
+ IRQ_UNLOCK_RESTORE
+ return FALSE;
+}
+
+#if defined(MODULE)
+Scsi_Host_Template driver_template = EATA;
+
+#include "scsi_module.c"
+#endif
diff --git a/linux/src/drivers/scsi/eata.h b/linux/src/drivers/scsi/eata.h
new file mode 100644
index 0000000..f1641f4
--- /dev/null
+++ b/linux/src/drivers/scsi/eata.h
@@ -0,0 +1,60 @@
+/*
+ * eata.h - used by the low-level driver for EATA/DMA SCSI host adapters.
+ */
+#ifndef _EATA_H
+#define _EATA_H
+
+#include <scsi/scsicam.h>
+#include <linux/version.h>
+
+int eata2x_detect(Scsi_Host_Template *);
+int eata2x_release(struct Scsi_Host *);
+int eata2x_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int eata2x_abort(Scsi_Cmnd *);
+int eata2x_old_abort(Scsi_Cmnd *);
+int eata2x_reset(Scsi_Cmnd *);
+int eata2x_old_reset(Scsi_Cmnd *, unsigned int);
+int eata2x_biosparam(Disk *, kdev_t, int *);
+
+#define EATA_VERSION "4.33.00"
+
+#define LinuxVersionCode(v, p, s) (((v)<<16)+((p)<<8)+(s))
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,101)
+
+#define EATA { \
+ name: "EATA/DMA 2.0x rev. " EATA_VERSION " ", \
+ detect: eata2x_detect, \
+ release: eata2x_release, \
+ queuecommand: eata2x_queuecommand, \
+ abort: eata2x_old_abort, \
+ reset: eata2x_old_reset, \
+ eh_abort_handler: eata2x_abort, \
+ eh_device_reset_handler: NULL, \
+ eh_bus_reset_handler: NULL, \
+ eh_host_reset_handler: eata2x_reset, \
+ bios_param: eata2x_biosparam, \
+ this_id: 7, \
+ unchecked_isa_dma: 1, \
+ use_clustering: ENABLE_CLUSTERING, \
+ use_new_eh_code: 1 /* Enable new error code */ \
+ }
+
+#else /* Use old scsi code */
+
+#define EATA { \
+ name: "EATA/DMA 2.0x rev. " EATA_VERSION " ", \
+ detect: eata2x_detect, \
+ release: eata2x_release, \
+ queuecommand: eata2x_queuecommand, \
+ abort: eata2x_old_abort, \
+ reset: eata2x_old_reset, \
+ bios_param: eata2x_biosparam, \
+ this_id: 7, \
+ unchecked_isa_dma: 1, \
+ use_clustering: ENABLE_CLUSTERING \
+ }
+
+#endif
+
+#endif
diff --git a/linux/src/drivers/scsi/eata_dma.c b/linux/src/drivers/scsi/eata_dma.c
new file mode 100644
index 0000000..c019813
--- /dev/null
+++ b/linux/src/drivers/scsi/eata_dma.c
@@ -0,0 +1,1603 @@
+/************************************************************
+ * *
+ * Linux EATA SCSI driver *
+ * *
+ * based on the CAM document CAM/89-004 rev. 2.0c, *
+ * DPT's driver kit, some internal documents and source, *
+ * and several other Linux scsi drivers and kernel docs. *
+ * *
+ * The driver currently: *
+ * -supports all ISA based EATA-DMA boards *
+ * like PM2011, PM2021, PM2041, PM3021 *
+ * -supports all EISA based EATA-DMA boards *
+ * like PM2012B, PM2022, PM2122, PM2322, PM2042, *
+ * PM3122, PM3222, PM3332 *
+ * -supports all PCI based EATA-DMA boards *
+ * like PM2024, PM2124, PM2044, PM2144, PM3224, *
+ * PM3334 *
+ * -supports the Wide, Ultra Wide and Differential *
+ * versions of the boards *
+ * -supports multiple HBAs with & without IRQ sharing *
+ * -supports all SCSI channels on multi channel boards *
+ * -supports ix86 and MIPS, untested on ALPHA *
+ * -needs identical IDs on all channels of a HBA *
+ * -can be loaded as module *
+ * -displays statistical and hardware information *
+ * in /proc/scsi/eata_dma *
+ * -provides rudimentary latency measurement *
+ * possibilities via /proc/scsi/eata_dma/<hostnum> *
+ * *
+ * (c)1993-96 Michael Neuffer *
+ * mike@i-Connect.Net *
+ * neuffer@mail.uni-mainz.de *
+ * *
+ * This program is free software; you can redistribute it *
+ * and/or modify it under the terms of the GNU General *
+ * Public License as published by the Free Software *
+ * Foundation; either version 2 of the License, or *
+ * (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be *
+ * useful, but WITHOUT ANY WARRANTY; without even the *
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A *
+ * PARTICULAR PURPOSE. See the GNU General Public License *
+ * for more details. *
+ * *
+ * You should have received a copy of the GNU General *
+ * Public License along with this kernel; if not, write to *
+ * the Free Software Foundation, Inc., 675 Mass Ave, *
+ * Cambridge, MA 02139, USA. *
+ * *
+ * I have to thank DPT for their excellent support. I took *
+ * me almost a year and a stopover at their HQ, on my first *
+ * trip to the USA, to get it, but since then they've been *
+ * very helpful and tried to give me all the infos and *
+ * support I need. *
+ * *
+ * Thanks also to Simon Shapiro, Greg Hosler and Mike *
+ * Jagdis who did a lot of testing and found quite a number *
+ * of bugs during the development. *
+ ************************************************************
+ * last change: 96/10/21 OS: Linux 2.0.23 *
+ ************************************************************/
+
+/* Look in eata_dma.h for configuration and revision information */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/in.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <asm/types.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/pgtable.h>
+#ifdef __mips__
+#include <asm/cachectl.h>
+#endif
+#include <linux/blk.h>
+#include "scsi.h"
+#include "sd.h"
+#include "hosts.h"
+#include "eata_dma.h"
+#include "eata_dma_proc.h"
+
+#include <linux/stat.h>
+#include <linux/config.h> /* for CONFIG_PCI */
+
+struct proc_dir_entry proc_scsi_eata_dma = {
+ PROC_SCSI_EATA, 8, "eata_dma",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+static u32 ISAbases[] =
+{0x1F0, 0x170, 0x330, 0x230};
+static unchar EISAbases[] =
+{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
+static uint registered_HBAs = 0;
+static struct Scsi_Host *last_HBA = NULL;
+static struct Scsi_Host *first_HBA = NULL;
+static unchar reg_IRQ[] =
+{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+static unchar reg_IRQL[] =
+{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+static struct eata_sp *status = 0; /* Statuspacket array */
+static void *dma_scratch = 0;
+
+static struct eata_register *fake_int_base;
+static int fake_int_result;
+static int fake_int_happened;
+
+static ulong int_counter = 0;
+static ulong queue_counter = 0;
+
+void eata_scsi_done (Scsi_Cmnd * scmd)
+{
+ scmd->request.rq_status = RQ_SCSI_DONE;
+
+ if (scmd->request.sem != NULL)
+ up(scmd->request.sem);
+
+ return;
+}
+
+void eata_fake_int_handler(s32 irq, void *dev_id, struct pt_regs * regs)
+{
+ fake_int_result = inb((ulong)fake_int_base + HA_RSTATUS);
+ fake_int_happened = TRUE;
+ DBG(DBG_INTR3, printk("eata_fake_int_handler called irq%d base %p"
+ " res %#x\n", irq, fake_int_base, fake_int_result));
+ return;
+}
+
+#include "eata_dma_proc.c"
+
+#ifdef MODULE
+int eata_release(struct Scsi_Host *sh)
+{
+ uint i;
+ if (sh->irq && reg_IRQ[sh->irq] == 1) free_irq(sh->irq, NULL);
+ else reg_IRQ[sh->irq]--;
+
+ scsi_init_free((void *)status, 512);
+ scsi_init_free((void *)dma_scratch - 4, 1024);
+ for (i = 0; i < sh->can_queue; i++){ /* Free all SG arrays */
+ if(SD(sh)->ccb[i].sg_list != NULL)
+ scsi_init_free((void *) SD(sh)->ccb[i].sg_list,
+ sh->sg_tablesize * sizeof(struct eata_sg_list));
+ }
+
+ if (SD(sh)->channel == 0) {
+ if (sh->dma_channel != BUSMASTER) free_dma(sh->dma_channel);
+ if (sh->io_port && sh->n_io_port)
+ release_region(sh->io_port, sh->n_io_port);
+ }
+ return(TRUE);
+}
+#endif
+
+
+inline void eata_latency_in(struct eata_ccb *cp, hostdata *hd)
+{
+ uint time;
+ time = jiffies - cp->timestamp;
+ if(hd->all_lat[1] > time)
+ hd->all_lat[1] = time;
+ if(hd->all_lat[2] < time)
+ hd->all_lat[2] = time;
+ hd->all_lat[3] += time;
+ hd->all_lat[0]++;
+ if((cp->rw_latency) == WRITE) { /* was WRITE */
+ if(hd->writes_lat[cp->sizeindex][1] > time)
+ hd->writes_lat[cp->sizeindex][1] = time;
+ if(hd->writes_lat[cp->sizeindex][2] < time)
+ hd->writes_lat[cp->sizeindex][2] = time;
+ hd->writes_lat[cp->sizeindex][3] += time;
+ hd->writes_lat[cp->sizeindex][0]++;
+ } else if((cp->rw_latency) == READ) {
+ if(hd->reads_lat[cp->sizeindex][1] > time)
+ hd->reads_lat[cp->sizeindex][1] = time;
+ if(hd->reads_lat[cp->sizeindex][2] < time)
+ hd->reads_lat[cp->sizeindex][2] = time;
+ hd->reads_lat[cp->sizeindex][3] += time;
+ hd->reads_lat[cp->sizeindex][0]++;
+ }
+}
+
+inline void eata_latency_out(struct eata_ccb *cp, Scsi_Cmnd *cmd)
+{
+ int x, z;
+ short *sho;
+ long *lon;
+ x = 0; /* just to keep GCC quiet */
+ cp->timestamp = jiffies; /* For latency measurements */
+ switch(cmd->cmnd[0]) {
+ case WRITE_6:
+ x = cmd->cmnd[4]/2;
+ cp->rw_latency = WRITE;
+ break;
+ case READ_6:
+ x = cmd->cmnd[4]/2;
+ cp->rw_latency = READ;
+ break;
+ case WRITE_10:
+ sho = (short *) &cmd->cmnd[7];
+ x = ntohs(*sho)/2;
+ cp->rw_latency = WRITE;
+ break;
+ case READ_10:
+ sho = (short *) &cmd->cmnd[7];
+ x = ntohs(*sho)/2;
+ cp->rw_latency = READ;
+ break;
+ case WRITE_12:
+ lon = (long *) &cmd->cmnd[6];
+ x = ntohl(*lon)/2;
+ cp->rw_latency = WRITE;
+ break;
+ case READ_12:
+ lon = (long *) &cmd->cmnd[6];
+ x = ntohl(*lon)/2;
+ cp->rw_latency = READ;
+ break;
+ default:
+ cp->rw_latency = OTHER;
+ break;
+ }
+ if (cmd->cmnd[0] == WRITE_6 || cmd->cmnd[0] == WRITE_10 ||
+ cmd->cmnd[0] == WRITE_12 || cmd->cmnd[0] == READ_6 ||
+ cmd->cmnd[0] == READ_10 || cmd->cmnd[0] == READ_12) {
+ for(z = 0; (x > (1 << z)) && (z <= 11); z++)
+ /* nothing */;
+ cp->sizeindex = z;
+ }
+}
+
+
+void eata_int_handler(int irq, void *dev_id, struct pt_regs * regs)
+{
+ uint i, result = 0;
+ uint hba_stat, scsi_stat, eata_stat;
+ Scsi_Cmnd *cmd;
+ struct eata_ccb *ccb;
+ struct eata_sp *sp;
+ uint base;
+ uint x;
+ struct Scsi_Host *sh;
+
+ for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->next) {
+ if (sh->irq != irq)
+ continue;
+
+ while(inb((uint)sh->base + HA_RAUXSTAT) & HA_AIRQ) {
+
+ int_counter++;
+
+ sp = &SD(sh)->sp;
+#ifdef __mips__
+ sys_cacheflush(sp, sizeof(struct eata_sp), 2);
+#endif
+ ccb = sp->ccb;
+
+ if(ccb == NULL) {
+ eata_stat = inb((uint)sh->base + HA_RSTATUS);
+ printk("eata_dma: int_handler, Spurious IRQ %d "
+ "received. CCB pointer not set.\n", irq);
+ break;
+ }
+
+ cmd = ccb->cmd;
+ base = (uint) cmd->host->base;
+ hba_stat = sp->hba_stat;
+
+ scsi_stat = (sp->scsi_stat >> 1) & 0x1f;
+
+ if (sp->EOC == FALSE) {
+ eata_stat = inb(base + HA_RSTATUS);
+ printk(KERN_WARNING "eata_dma: int_handler, board: %x cmd %lx "
+ "returned unfinished.\n"
+ "EATA: %x HBA: %x SCSI: %x spadr %lx spadrirq %lx, "
+ "irq%d\n", base, (long)ccb, eata_stat, hba_stat,
+ scsi_stat,(long)&status, (long)&status[irq], irq);
+ cmd->result = DID_ERROR << 16;
+ ccb->status = FREE;
+ cmd->scsi_done(cmd);
+ break;
+ }
+
+ sp->EOC = FALSE; /* Clean out this flag */
+
+ if (ccb->status == LOCKED || ccb->status == RESET) {
+ printk("eata_dma: int_handler, reseted command pid %ld returned"
+ "\n", cmd->pid);
+ DBG(DBG_INTR && DBG_DELAY, DELAY(1));
+ }
+
+ eata_stat = inb(base + HA_RSTATUS);
+ DBG(DBG_INTR, printk("IRQ %d received, base %#.4x, pid %ld, "
+ "target: %x, lun: %x, ea_s: %#.2x, hba_s: "
+ "%#.2x \n", irq, base, cmd->pid, cmd->target,
+ cmd->lun, eata_stat, hba_stat));
+
+ switch (hba_stat) {
+ case HA_NO_ERROR: /* NO Error */
+ if(HD(cmd)->do_latency == TRUE && ccb->timestamp)
+ eata_latency_in(ccb, HD(cmd));
+ result = DID_OK << 16;
+ break;
+ case HA_ERR_SEL_TO: /* Selection Timeout */
+ case HA_ERR_CMD_TO: /* Command Timeout */
+ result = DID_TIME_OUT << 16;
+ break;
+ case HA_BUS_RESET: /* SCSI Bus Reset Received */
+ result = DID_RESET << 16;
+ DBG(DBG_STATUS, printk(KERN_WARNING "scsi%d: BUS RESET "
+ "received on cmd %ld\n",
+ HD(cmd)->HBA_number, cmd->pid));
+ break;
+ case HA_INIT_POWERUP: /* Initial Controller Power-up */
+ if (cmd->device->type != TYPE_TAPE)
+ result = DID_BUS_BUSY << 16;
+ else
+ result = DID_ERROR << 16;
+
+ for (i = 0; i < MAXTARGET; i++)
+ DBG(DBG_STATUS, printk(KERN_DEBUG "scsi%d: cmd pid %ld "
+ "returned with INIT_POWERUP\n",
+ HD(cmd)->HBA_number, cmd->pid));
+ break;
+ case HA_CP_ABORT_NA:
+ case HA_CP_ABORTED:
+ result = DID_ABORT << 16;
+ DBG(DBG_STATUS, printk(KERN_WARNING "scsi%d: aborted cmd "
+ "returned\n", HD(cmd)->HBA_number));
+ break;
+ case HA_CP_RESET_NA:
+ case HA_CP_RESET:
+ HD(cmd)->resetlevel[cmd->channel] = 0;
+ result = DID_RESET << 16;
+ DBG(DBG_STATUS, printk(KERN_WARNING "scsi%d: reseted cmd "
+ "pid %ldreturned\n",
+ HD(cmd)->HBA_number, cmd->pid));
+ case HA_SCSI_HUNG: /* SCSI Hung */
+ printk(KERN_ERR "scsi%d: SCSI hung\n", HD(cmd)->HBA_number);
+ result = DID_ERROR << 16;
+ break;
+ case HA_RSENSE_FAIL: /* Auto Request-Sense Failed */
+ DBG(DBG_STATUS, printk(KERN_ERR "scsi%d: Auto Request Sense "
+ "Failed\n", HD(cmd)->HBA_number));
+ result = DID_ERROR << 16;
+ break;
+ case HA_UNX_BUSPHASE: /* Unexpected Bus Phase */
+ case HA_UNX_BUS_FREE: /* Unexpected Bus Free */
+ case HA_BUS_PARITY: /* Bus Parity Error */
+ case HA_UNX_MSGRJCT: /* Unexpected Message Reject */
+ case HA_RESET_STUCK: /* SCSI Bus Reset Stuck */
+ case HA_PARITY_ERR: /* Controller Ram Parity */
+ default:
+ result = DID_ERROR << 16;
+ break;
+ }
+ cmd->result = result | (scsi_stat << 1);
+
+#if DBG_INTR2
+ if (scsi_stat || result || hba_stat || eata_stat != 0x50
+ || cmd->scsi_done == NULL || cmd->device->id == 7)
+ printk("HBA: %d, channel %d, id: %d, lun %d, pid %ld:\n"
+ "eata_stat %#x, hba_stat %#.2x, scsi_stat %#.2x, "
+ "sense_key: %#x, result: %#.8x\n", x,
+ cmd->device->channel, cmd->device->id, cmd->device->lun,
+ cmd->pid, eata_stat, hba_stat, scsi_stat,
+ cmd->sense_buffer[2] & 0xf, cmd->result);
+ DBG(DBG_INTR&&DBG_DELAY,DELAY(1));
+#endif
+
+ ccb->status = FREE; /* now we can release the slot */
+ cmd->scsi_done(cmd);
+ }
+ }
+
+ return;
+}
+
+inline int eata_send_command(u32 addr, u32 base, u8 command)
+{
+ long loop = R_LIMIT;
+
+ while (inb(base + HA_RAUXSTAT) & HA_ABUSY)
+ if (--loop == 0)
+ return(FALSE);
+
+ if(addr != (u32) NULL)
+ addr = virt_to_bus((void *)addr);
+
+ /*
+ * This is overkill.....but the MIPSen seem to need this
+ * and it will be optimized away for i86 and ALPHA machines.
+ */
+ flush_cache_all();
+
+ /* And now the address in nice little byte chunks */
+#ifdef __LITTLE_ENDIAN
+ outb(addr, base + HA_WDMAADDR);
+ outb(addr >> 8, base + HA_WDMAADDR + 1);
+ outb(addr >> 16, base + HA_WDMAADDR + 2);
+ outb(addr >> 24, base + HA_WDMAADDR + 3);
+#else
+ outb(addr >> 24, base + HA_WDMAADDR);
+ outb(addr >> 16, base + HA_WDMAADDR + 1);
+ outb(addr >> 8, base + HA_WDMAADDR + 2);
+ outb(addr, base + HA_WDMAADDR + 3);
+#endif
+ outb(command, base + HA_WCOMMAND);
+ return(TRUE);
+}
+
+inline int eata_send_immediate(u32 base, u32 addr, u8 ifc, u8 code, u8 code2)
+{
+ if(addr != (u32) NULL)
+ addr = virt_to_bus((void *)addr);
+
+ /*
+ * This is overkill.....but the MIPSen seem to need this
+ * and it will be optimized away for i86 and ALPHA machines.
+ */
+ flush_cache_all();
+
+ outb(0x0, base + HA_WDMAADDR - 1);
+ if(addr){
+#ifdef __LITTLE_ENDIAN
+ outb(addr, base + HA_WDMAADDR);
+ outb(addr >> 8, base + HA_WDMAADDR + 1);
+ outb(addr >> 16, base + HA_WDMAADDR + 2);
+ outb(addr >> 24, base + HA_WDMAADDR + 3);
+#else
+ outb(addr >> 24, base + HA_WDMAADDR);
+ outb(addr >> 16, base + HA_WDMAADDR + 1);
+ outb(addr >> 8, base + HA_WDMAADDR + 2);
+ outb(addr, base + HA_WDMAADDR + 3);
+#endif
+ } else {
+ outb(0x0, base + HA_WDMAADDR);
+ outb(0x0, base + HA_WDMAADDR + 1);
+ outb(code2, base + HA_WCODE2);
+ outb(code, base + HA_WCODE);
+ }
+
+ outb(ifc, base + HA_WIFC);
+ outb(EATA_CMD_IMMEDIATE, base + HA_WCOMMAND);
+ return(TRUE);
+}
+
+int eata_queue(Scsi_Cmnd * cmd, void (* done) (Scsi_Cmnd *))
+{
+ unsigned int i, x, y;
+ ulong flags;
+ hostdata *hd;
+ struct Scsi_Host *sh;
+ struct eata_ccb *ccb;
+ struct scatterlist *sl;
+
+
+ save_flags(flags);
+ cli();
+
+#if 0
+ for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->next) {
+ if(inb((uint)sh->base + HA_RAUXSTAT) & HA_AIRQ) {
+ printk("eata_dma: scsi%d interrupt pending in eata_queue.\n"
+ " Calling interrupt handler.\n", sh->host_no);
+ eata_int_handler(sh->irq, 0, 0);
+ }
+ }
+#endif
+
+ queue_counter++;
+
+ hd = HD(cmd);
+ sh = cmd->host;
+
+ if (cmd->cmnd[0] == REQUEST_SENSE && cmd->sense_buffer[0] != 0) {
+ DBG(DBG_REQSENSE, printk(KERN_DEBUG "Tried to REQUEST SENSE\n"));
+ cmd->result = DID_OK << 16;
+ done(cmd);
+
+ return(0);
+ }
+
+ /* check for free slot */
+ for (y = hd->last_ccb + 1, x = 0; x < sh->can_queue; x++, y++) {
+ if (y >= sh->can_queue)
+ y = 0;
+ if (hd->ccb[y].status == FREE)
+ break;
+ }
+
+ hd->last_ccb = y;
+
+ if (x >= sh->can_queue) {
+ cmd->result = DID_BUS_BUSY << 16;
+ DBG(DBG_QUEUE && DBG_ABNORM,
+ printk(KERN_CRIT "eata_queue pid %ld, HBA QUEUE FULL..., "
+ "returning DID_BUS_BUSY\n", cmd->pid));
+ done(cmd);
+ restore_flags(flags);
+ return(0);
+ }
+ ccb = &hd->ccb[y];
+
+ memset(ccb, 0, sizeof(struct eata_ccb) - sizeof(struct eata_sg_list *));
+
+ ccb->status = USED; /* claim free slot */
+
+ restore_flags(flags);
+
+ DBG(DBG_QUEUE, printk("eata_queue pid %ld, target: %x, lun: %x, y %d\n",
+ cmd->pid, cmd->target, cmd->lun, y));
+ DBG(DBG_QUEUE && DBG_DELAY, DELAY(1));
+
+ if(hd->do_latency == TRUE)
+ eata_latency_out(ccb, cmd);
+
+ cmd->scsi_done = (void *)done;
+
+ switch (cmd->cmnd[0]) {
+ case CHANGE_DEFINITION: case COMPARE: case COPY:
+ case COPY_VERIFY: case LOG_SELECT: case MODE_SELECT:
+ case MODE_SELECT_10: case SEND_DIAGNOSTIC: case WRITE_BUFFER:
+ case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE:
+ case SEARCH_EQUAL: case SEARCH_HIGH: case SEARCH_LOW:
+ case WRITE_6: case WRITE_10: case WRITE_VERIFY:
+ case UPDATE_BLOCK: case WRITE_LONG: case WRITE_SAME:
+ case SEARCH_HIGH_12: case SEARCH_EQUAL_12: case SEARCH_LOW_12:
+ case WRITE_12: case WRITE_VERIFY_12: case SET_WINDOW:
+ case MEDIUM_SCAN: case SEND_VOLUME_TAG:
+ case 0xea: /* alternate number for WRITE LONG */
+ ccb->DataOut = TRUE; /* Output mode */
+ break;
+ case TEST_UNIT_READY:
+ default:
+ ccb->DataIn = TRUE; /* Input mode */
+ }
+
+ /* FIXME: This will have to be changed once the midlevel driver
+ * allows different HBA IDs on every channel.
+ */
+ if (cmd->target == sh->this_id)
+ ccb->Interpret = TRUE; /* Interpret command */
+
+ if (cmd->use_sg) {
+ ccb->scatter = TRUE; /* SG mode */
+ if (ccb->sg_list == NULL) {
+ ccb->sg_list = kmalloc(sh->sg_tablesize * sizeof(struct eata_sg_list),
+ GFP_ATOMIC | GFP_DMA);
+ }
+ if (ccb->sg_list == NULL)
+ panic("eata_dma: Run out of DMA memory for SG lists !\n");
+ ccb->cp_dataDMA = htonl(virt_to_bus(ccb->sg_list));
+
+ ccb->cp_datalen = htonl(cmd->use_sg * sizeof(struct eata_sg_list));
+ sl=(struct scatterlist *)cmd->request_buffer;
+ for(i = 0; i < cmd->use_sg; i++, sl++){
+ ccb->sg_list[i].data = htonl(virt_to_bus(sl->address));
+ ccb->sg_list[i].len = htonl((u32) sl->length);
+ }
+ } else {
+ ccb->scatter = FALSE;
+ ccb->cp_datalen = htonl(cmd->request_bufflen);
+ ccb->cp_dataDMA = htonl(virt_to_bus(cmd->request_buffer));
+ }
+
+ ccb->Auto_Req_Sen = TRUE;
+ ccb->cp_reqDMA = htonl(virt_to_bus(cmd->sense_buffer));
+ ccb->reqlen = sizeof(cmd->sense_buffer);
+
+ ccb->cp_id = cmd->target;
+ ccb->cp_channel = cmd->channel;
+ ccb->cp_lun = cmd->lun;
+ ccb->cp_dispri = TRUE;
+ ccb->cp_identify = TRUE;
+ memcpy(ccb->cp_cdb, cmd->cmnd, cmd->cmd_len);
+
+ ccb->cp_statDMA = htonl(virt_to_bus(&(hd->sp)));
+
+ ccb->cp_viraddr = ccb; /* This will be passed thru, so we don't need to
+ * convert it */
+ ccb->cmd = cmd;
+ cmd->host_scribble = (char *)&hd->ccb[y];
+
+ if(eata_send_command((u32) ccb, (u32) sh->base, EATA_CMD_DMA_SEND_CP) == FALSE) {
+ cmd->result = DID_BUS_BUSY << 16;
+ DBG(DBG_QUEUE && DBG_ABNORM,
+ printk("eata_queue target %d, pid %ld, HBA busy, "
+ "returning DID_BUS_BUSY\n",cmd->target, cmd->pid));
+ ccb->status = FREE;
+ done(cmd);
+ return(0);
+ }
+ DBG(DBG_QUEUE, printk("Queued base %#.4x pid: %ld target: %x lun: %x "
+ "slot %d irq %d\n", (s32)sh->base, cmd->pid,
+ cmd->target, cmd->lun, y, sh->irq));
+ DBG(DBG_QUEUE && DBG_DELAY, DELAY(1));
+
+ return(0);
+}
+
+
+int eata_abort(Scsi_Cmnd * cmd)
+{
+ ulong loop = HZ / 2;
+ ulong flags;
+ int x;
+ struct Scsi_Host *sh;
+
+ save_flags(flags);
+ cli();
+
+ DBG(DBG_ABNORM, printk("eata_abort called pid: %ld target: %x lun: %x"
+ " reason %x\n", cmd->pid, cmd->target, cmd->lun,
+ cmd->abort_reason));
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+
+ /* Some interrupt controllers seem to loose interrupts */
+ for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->next) {
+ if(inb((uint)sh->base + HA_RAUXSTAT) & HA_AIRQ) {
+ printk("eata_dma: scsi%d interrupt pending in eata_abort.\n"
+ " Calling interrupt handler.\n", sh->host_no);
+ eata_int_handler(sh->irq, 0, 0);
+ }
+ }
+
+ while (inb((u32)(cmd->host->base) + HA_RAUXSTAT) & HA_ABUSY) {
+ if (--loop == 0) {
+ printk("eata_dma: abort, timeout error.\n");
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ restore_flags(flags);
+ return (SCSI_ABORT_ERROR);
+ }
+ }
+ if (CD(cmd)->status == RESET) {
+ printk("eata_dma: abort, command reset error.\n");
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ restore_flags(flags);
+ return (SCSI_ABORT_ERROR);
+ }
+ if (CD(cmd)->status == LOCKED) {
+ DBG(DBG_ABNORM, printk("eata_dma: abort, queue slot locked.\n"));
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ restore_flags(flags);
+ return (SCSI_ABORT_NOT_RUNNING);
+ }
+ if (CD(cmd)->status == USED) {
+ DBG(DBG_ABNORM, printk("Returning: SCSI_ABORT_BUSY\n"));
+ restore_flags(flags);
+ return (SCSI_ABORT_BUSY); /* SNOOZE */
+ }
+ if (CD(cmd)->status == FREE) {
+ DBG(DBG_ABNORM, printk("Returning: SCSI_ABORT_NOT_RUNNING\n"));
+ restore_flags(flags);
+ return (SCSI_ABORT_NOT_RUNNING);
+ }
+ restore_flags(flags);
+ panic("eata_dma: abort: invalid slot status\n");
+}
+
+int eata_reset(Scsi_Cmnd * cmd, unsigned int resetflags)
+{
+ uint x;
+ ulong loop = loops_per_sec / 3;
+ ulong flags;
+ unchar success = FALSE;
+ Scsi_Cmnd *sp;
+ struct Scsi_Host *sh;
+
+ save_flags(flags);
+ cli();
+
+ DBG(DBG_ABNORM, printk("eata_reset called pid:%ld target: %x lun: %x"
+ " reason %x\n", cmd->pid, cmd->target, cmd->lun,
+ cmd->abort_reason));
+
+ for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->next) {
+ if(inb((uint)sh->base + HA_RAUXSTAT) & HA_AIRQ) {
+ printk("eata_dma: scsi%d interrupt pending in eata_reset.\n"
+ " Calling interrupt handler.\n", sh->host_no);
+ eata_int_handler(sh->irq, 0, 0);
+ }
+ }
+
+ if (HD(cmd)->state == RESET) {
+ printk("eata_reset: exit, already in reset.\n");
+ restore_flags(flags);
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ return (SCSI_RESET_ERROR);
+ }
+
+ while (inb((u32)(cmd->host->base) + HA_RAUXSTAT) & HA_ABUSY)
+ if (--loop == 0) {
+ printk("eata_reset: exit, timeout error.\n");
+ restore_flags(flags);
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ return (SCSI_RESET_ERROR);
+ }
+
+ for (x = 0; x < cmd->host->can_queue; x++) {
+ if (HD(cmd)->ccb[x].status == FREE)
+ continue;
+
+ if (HD(cmd)->ccb[x].status == LOCKED) {
+ HD(cmd)->ccb[x].status = FREE;
+ printk("eata_reset: locked slot %d forced free.\n", x);
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ continue;
+ }
+
+
+ sp = HD(cmd)->ccb[x].cmd;
+ HD(cmd)->ccb[x].status = RESET;
+
+ if (sp == NULL)
+ panic("eata_reset: slot %d, sp==NULL.\n", x);
+
+ printk("eata_reset: slot %d in reset, pid %ld.\n", x, sp->pid);
+
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+
+ if (sp == cmd)
+ success = TRUE;
+ }
+
+ /* hard reset the HBA */
+ inb((u32) (cmd->host->base) + HA_RSTATUS); /* This might cause trouble */
+ eata_send_command(0, (u32) cmd->host->base, EATA_CMD_RESET);
+
+ HD(cmd)->state = RESET;
+
+ DBG(DBG_ABNORM, printk("eata_reset: board reset done, enabling "
+ "interrupts.\n"));
+
+ DELAY(2); /* In theorie we should get interrupts and set free all
+ * used queueslots */
+
+ DBG(DBG_ABNORM, printk("eata_reset: interrupts disabled again.\n"));
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+
+ for (x = 0; x < cmd->host->can_queue; x++) {
+
+ /* Skip slots already set free by interrupt and those that
+ * are still LOCKED from the last reset */
+ if (HD(cmd)->ccb[x].status != RESET)
+ continue;
+
+ sp = HD(cmd)->ccb[x].cmd;
+ sp->result = DID_RESET << 16;
+
+ /* This mailbox is still waiting for its interrupt */
+ HD(cmd)->ccb[x].status = LOCKED;
+
+ printk("eata_reset: slot %d locked, DID_RESET, pid %ld done.\n",
+ x, sp->pid);
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+
+ sp->scsi_done(sp);
+ }
+
+ HD(cmd)->state = FALSE;
+ restore_flags(flags);
+
+ if (success) {
+ DBG(DBG_ABNORM, printk("eata_reset: exit, pending.\n"));
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ return (SCSI_RESET_PENDING);
+ } else {
+ DBG(DBG_ABNORM, printk("eata_reset: exit, wakeup.\n"));
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ return (SCSI_RESET_PUNT);
+ }
+}
+
+/* Here we try to determine the optimum queue depth for
+ * each attached device.
+ *
+ * At the moment the algorithm is rather simple
+ */
+static void eata_select_queue_depths(struct Scsi_Host *host,
+ Scsi_Device *devicelist)
+{
+ Scsi_Device *device;
+ int devcount = 0;
+ int factor = 0;
+
+#if CRIPPLE_QUEUE
+ for(device = devicelist; device != NULL; device = device->next) {
+ if(device->host == host)
+ device->queue_depth = 2;
+ }
+#else
+ /* First we do a sample run go find out what we have */
+ for(device = devicelist; device != NULL; device = device->next) {
+ if (device->host == host) {
+ devcount++;
+ switch(device->type) {
+ case TYPE_DISK:
+ case TYPE_MOD:
+ factor += TYPE_DISK_QUEUE;
+ break;
+ case TYPE_TAPE:
+ factor += TYPE_TAPE_QUEUE;
+ break;
+ case TYPE_WORM:
+ case TYPE_ROM:
+ factor += TYPE_ROM_QUEUE;
+ break;
+ case TYPE_PROCESSOR:
+ case TYPE_SCANNER:
+ default:
+ factor += TYPE_OTHER_QUEUE;
+ break;
+ }
+ }
+ }
+
+ DBG(DBG_REGISTER, printk(KERN_DEBUG "scsi%d: needed queueslots %d\n",
+ host->host_no, factor));
+
+ if(factor == 0) /* We don't want to get a DIV BY ZERO error */
+ factor = 1;
+
+ factor = (SD(host)->queuesize * 10) / factor;
+
+ DBG(DBG_REGISTER, printk(KERN_DEBUG "scsi%d: using factor %dE-1\n",
+ host->host_no, factor));
+
+ /* Now that have the factor we can set the individual queuesizes */
+ for(device = devicelist; device != NULL; device = device->next) {
+ if(device->host == host) {
+ if(SD(device->host)->bustype != IS_ISA){
+ switch(device->type) {
+ case TYPE_DISK:
+ case TYPE_MOD:
+ device->queue_depth = (TYPE_DISK_QUEUE * factor) / 10;
+ break;
+ case TYPE_TAPE:
+ device->queue_depth = (TYPE_TAPE_QUEUE * factor) / 10;
+ break;
+ case TYPE_WORM:
+ case TYPE_ROM:
+ device->queue_depth = (TYPE_ROM_QUEUE * factor) / 10;
+ break;
+ case TYPE_PROCESSOR:
+ case TYPE_SCANNER:
+ default:
+ device->queue_depth = (TYPE_OTHER_QUEUE * factor) / 10;
+ break;
+ }
+ } else /* ISA forces us to limit the queue depth because of the
+ * bounce buffer memory overhead. I know this is cruel */
+ device->queue_depth = 2;
+
+ /*
+ * It showed that we need to set an upper limit of commands
+ * we can allow to queue for a single device on the bus.
+ * If we get above that limit, the broken midlevel SCSI code
+ * will produce bogus timeouts and aborts en masse. :-(
+ */
+ if(device->queue_depth > UPPER_DEVICE_QUEUE_LIMIT)
+ device->queue_depth = UPPER_DEVICE_QUEUE_LIMIT;
+ if(device->queue_depth == 0)
+ device->queue_depth = 1;
+
+ printk(KERN_INFO "scsi%d: queue depth for target %d on channel %d "
+ "set to %d\n", host->host_no, device->id, device->channel,
+ device->queue_depth);
+ }
+ }
+#endif
+}
+
+#if CHECK_BLINK
+int check_blink_state(long base)
+{
+ ushort loops = 10;
+ u32 blinkindicator;
+ u32 state = 0x12345678;
+ u32 oldstate = 0;
+
+ blinkindicator = htonl(0x54504442);
+ while ((loops--) && (state != oldstate)) {
+ oldstate = state;
+ state = inl((uint) base + 1);
+ }
+
+ DBG(DBG_BLINK, printk("Did Blink check. Status: %d\n",
+ (state == oldstate) && (state == blinkindicator)));
+
+ if ((state == oldstate) && (state == blinkindicator))
+ return(TRUE);
+ else
+ return (FALSE);
+}
+#endif
+
+char * get_board_data(u32 base, u32 irq, u32 id)
+{
+ struct eata_ccb *cp;
+ struct eata_sp *sp;
+ static char *buff;
+ ulong i;
+
+ cp = (struct eata_ccb *) scsi_init_malloc(sizeof(struct eata_ccb),
+ GFP_ATOMIC | GFP_DMA);
+ sp = (struct eata_sp *) scsi_init_malloc(sizeof(struct eata_sp),
+ GFP_ATOMIC | GFP_DMA);
+
+ buff = dma_scratch;
+
+ memset(cp, 0, sizeof(struct eata_ccb));
+ memset(sp, 0, sizeof(struct eata_sp));
+ memset(buff, 0, 256);
+
+ cp->DataIn = TRUE;
+ cp->Interpret = TRUE; /* Interpret command */
+ cp->cp_dispri = TRUE;
+ cp->cp_identify = TRUE;
+
+ cp->cp_datalen = htonl(56);
+ cp->cp_dataDMA = htonl(virt_to_bus(buff));
+ cp->cp_statDMA = htonl(virt_to_bus(sp));
+ cp->cp_viraddr = cp;
+
+ cp->cp_id = id;
+ cp->cp_lun = 0;
+
+ cp->cp_cdb[0] = INQUIRY;
+ cp->cp_cdb[1] = 0;
+ cp->cp_cdb[2] = 0;
+ cp->cp_cdb[3] = 0;
+ cp->cp_cdb[4] = 56;
+ cp->cp_cdb[5] = 0;
+
+ fake_int_base = (struct eata_register *) base;
+ fake_int_result = FALSE;
+ fake_int_happened = FALSE;
+
+ eata_send_command((u32) cp, (u32) base, EATA_CMD_DMA_SEND_CP);
+
+ i = jiffies + (3 * HZ);
+ while (fake_int_happened == FALSE && jiffies <= i)
+ barrier();
+
+ DBG(DBG_INTR3, printk(KERN_DEBUG "fake_int_result: %#x hbastat %#x "
+ "scsistat %#x, buff %p sp %p\n",
+ fake_int_result, (u32) (sp->hba_stat /*& 0x7f*/),
+ (u32) sp->scsi_stat, buff, sp));
+
+ scsi_init_free((void *)cp, sizeof(struct eata_ccb));
+ scsi_init_free((void *)sp, sizeof(struct eata_sp));
+
+ if ((fake_int_result & HA_SERROR) || jiffies > i){
+ printk(KERN_WARNING "eata_dma: trying to reset HBA at %x to clear "
+ "possible blink state\n", base);
+ /* hard reset the HBA */
+ inb((u32) (base) + HA_RSTATUS);
+ eata_send_command(0, base, EATA_CMD_RESET);
+ DELAY(1);
+ return (NULL);
+ } else
+ return (buff);
+}
+
+
+int get_conf_PIO(u32 base, struct get_conf *buf)
+{
+ ulong loop = R_LIMIT;
+ u16 *p;
+
+ if(check_region(base, 9))
+ return (FALSE);
+
+ memset(buf, 0, sizeof(struct get_conf));
+
+ while (inb(base + HA_RSTATUS) & HA_SBUSY)
+ if (--loop == 0)
+ return (FALSE);
+
+ fake_int_base = (struct eata_register *) base;
+ fake_int_result = FALSE;
+ fake_int_happened = FALSE;
+
+ DBG(DBG_PIO && DBG_PROBE,
+ printk("Issuing PIO READ CONFIG to HBA at %#x\n", base));
+ eata_send_command(0, base, EATA_CMD_PIO_READ_CONFIG);
+
+ loop = R_LIMIT;
+ for (p = (u16 *) buf;
+ (long)p <= ((long)buf + (sizeof(struct get_conf) / 2)); p++) {
+ while (!(inb(base + HA_RSTATUS) & HA_SDRQ))
+ if (--loop == 0)
+ return (FALSE);
+
+ loop = R_LIMIT;
+ *p = inw(base + HA_RDATA);
+ }
+
+ if (!(inb(base + HA_RSTATUS) & HA_SERROR)) { /* Error ? */
+ if (htonl(EATA_SIGNATURE) == buf->signature) {
+ DBG(DBG_PIO&&DBG_PROBE, printk("EATA Controller found at %x "
+ "EATA Level: %x\n", (uint) base,
+ (uint) (buf->version)));
+
+ while (inb(base + HA_RSTATUS) & HA_SDRQ)
+ inw(base + HA_RDATA);
+ return (TRUE);
+ }
+ } else {
+ DBG(DBG_PROBE, printk("eata_dma: get_conf_PIO, error during transfer "
+ "for HBA at %lx\n", (long)base));
+ }
+ return (FALSE);
+}
+
+
+void print_config(struct get_conf *gc)
+{
+ printk("LEN: %d ver:%d OCS:%d TAR:%d TRNXFR:%d MORES:%d DMAS:%d\n",
+ (u32) ntohl(gc->len), gc->version,
+ gc->OCS_enabled, gc->TAR_support, gc->TRNXFR, gc->MORE_support,
+ gc->DMA_support);
+ printk("DMAV:%d HAAV:%d SCSIID0:%d ID1:%d ID2:%d QUEUE:%d SG:%d SEC:%d\n",
+ gc->DMA_valid, gc->HAA_valid, gc->scsi_id[3], gc->scsi_id[2],
+ gc->scsi_id[1], ntohs(gc->queuesiz), ntohs(gc->SGsiz), gc->SECOND);
+ printk("IRQ:%d IRQT:%d DMAC:%d FORCADR:%d SG_64K:%d SG_UAE:%d MID:%d "
+ "MCH:%d MLUN:%d\n",
+ gc->IRQ, gc->IRQ_TR, (8 - gc->DMA_channel) & 7, gc->FORCADR,
+ gc->SG_64K, gc->SG_UAE, gc->MAX_ID, gc->MAX_CHAN, gc->MAX_LUN);
+ printk("RIDQ:%d PCI:%d EISA:%d\n",
+ gc->ID_qest, gc->is_PCI, gc->is_EISA);
+ DBG(DPT_DEBUG, DELAY(14));
+}
+
+short register_HBA(u32 base, struct get_conf *gc, Scsi_Host_Template * tpnt,
+ u8 bustype)
+{
+ ulong size = 0;
+ unchar dma_channel = 0;
+ char *buff = 0;
+ unchar bugs = 0;
+ struct Scsi_Host *sh;
+ hostdata *hd;
+ int x;
+
+
+ DBG(DBG_REGISTER, print_config(gc));
+
+ if (gc->DMA_support == FALSE) {
+ printk("The EATA HBA at %#.4x does not support DMA.\n"
+ "Please use the EATA-PIO driver.\n", base);
+ return (FALSE);
+ }
+ if(gc->HAA_valid == FALSE || ntohl(gc->len) < 0x22)
+ gc->MAX_CHAN = 0;
+
+ if (reg_IRQ[gc->IRQ] == FALSE) { /* Interrupt already registered ? */
+ if (!request_irq(gc->IRQ, (void *) eata_fake_int_handler, SA_INTERRUPT,
+ "eata_dma", NULL)){
+ reg_IRQ[gc->IRQ]++;
+ if (!gc->IRQ_TR)
+ reg_IRQL[gc->IRQ] = TRUE; /* IRQ is edge triggered */
+ } else {
+ printk("Couldn't allocate IRQ %d, Sorry.", gc->IRQ);
+ return (FALSE);
+ }
+ } else { /* More than one HBA on this IRQ */
+ if (reg_IRQL[gc->IRQ] == TRUE) {
+ printk("Can't support more than one HBA on this IRQ,\n"
+ " if the IRQ is edge triggered. Sorry.\n");
+ return (FALSE);
+ } else
+ reg_IRQ[gc->IRQ]++;
+ }
+
+
+ /* If DMA is supported but DMA_valid isn't set to indicate that
+ * the channel number is given we must have pre 2.0 firmware (1.7?)
+ * which leaves us to guess since the "newer ones" also don't set the
+ * DMA_valid bit.
+ */
+ if (gc->DMA_support && !gc->DMA_valid && gc->DMA_channel) {
+ printk(KERN_WARNING "eata_dma: If you are using a pre 2.0 firmware "
+ "please update it !\n"
+ " You can get new firmware releases from ftp.dpt.com\n");
+ gc->DMA_channel = (base == 0x1f0 ? 3 /* DMA=5 */ : 2 /* DMA=6 */);
+ gc->DMA_valid = TRUE;
+ }
+
+ /* if gc->DMA_valid it must be an ISA HBA and we have to register it */
+ dma_channel = BUSMASTER;
+ if (gc->DMA_valid) {
+ if (request_dma(dma_channel = (8 - gc->DMA_channel) & 7, "eata_dma")) {
+ printk(KERN_WARNING "Unable to allocate DMA channel %d for ISA HBA"
+ " at %#.4x.\n", dma_channel, base);
+ reg_IRQ[gc->IRQ]--;
+ if (reg_IRQ[gc->IRQ] == 0)
+ free_irq(gc->IRQ, NULL);
+ if (gc->IRQ_TR == FALSE)
+ reg_IRQL[gc->IRQ] = FALSE;
+ return (FALSE);
+ }
+ }
+
+ if (dma_channel != BUSMASTER) {
+ disable_dma(dma_channel);
+ clear_dma_ff(dma_channel);
+ set_dma_mode(dma_channel, DMA_MODE_CASCADE);
+ enable_dma(dma_channel);
+ }
+
+ if (bustype != IS_EISA && bustype != IS_ISA)
+ buff = get_board_data(base, gc->IRQ, gc->scsi_id[3]);
+
+ if (buff == NULL) {
+ if (bustype == IS_EISA || bustype == IS_ISA) {
+ bugs = bugs || BROKEN_INQUIRY;
+ } else {
+ if (gc->DMA_support == FALSE)
+ printk(KERN_WARNING "HBA at %#.4x doesn't support DMA. "
+ "Sorry\n", base);
+ else
+ printk(KERN_WARNING "HBA at %#.4x does not react on INQUIRY. "
+ "Sorry.\n", base);
+ if (gc->DMA_valid)
+ free_dma(dma_channel);
+ reg_IRQ[gc->IRQ]--;
+ if (reg_IRQ[gc->IRQ] == 0)
+ free_irq(gc->IRQ, NULL);
+ if (gc->IRQ_TR == FALSE)
+ reg_IRQL[gc->IRQ] = FALSE;
+ return (FALSE);
+ }
+ }
+
+ if (gc->DMA_support == FALSE && buff != NULL)
+ printk(KERN_WARNING "HBA %.12sat %#.4x doesn't set the DMA_support "
+ "flag correctly.\n", &buff[16], base);
+
+ request_region(base, 9, "eata_dma"); /* We already checked the
+ * availability, so this
+ * should not fail.
+ */
+
+ if(ntohs(gc->queuesiz) == 0) {
+ gc->queuesiz = ntohs(64);
+ printk(KERN_WARNING "Warning: Queue size has to be corrected. Assuming"
+ " 64 queueslots\n"
+ " This might be a PM2012B with a defective Firmware\n"
+ " Contact DPT support@dpt.com for an upgrade\n");
+ }
+
+ size = sizeof(hostdata) + ((sizeof(struct eata_ccb) + sizeof(long))
+ * ntohs(gc->queuesiz));
+
+ DBG(DBG_REGISTER, printk("scsi_register size: %ld\n", size));
+
+ sh = scsi_register(tpnt, size);
+
+ if(sh != NULL) {
+
+ hd = SD(sh);
+
+ memset(hd->reads, 0, sizeof(u32) * 26);
+
+ sh->select_queue_depths = eata_select_queue_depths;
+
+ hd->bustype = bustype;
+
+ /*
+ * If we are using a ISA board, we can't use extended SG,
+ * because we would need excessive amounts of memory for
+ * bounce buffers.
+ */
+ if (gc->SG_64K==TRUE && ntohs(gc->SGsiz)==64 && hd->bustype!=IS_ISA){
+ sh->sg_tablesize = SG_SIZE_BIG;
+ } else {
+ sh->sg_tablesize = ntohs(gc->SGsiz);
+ if (sh->sg_tablesize > SG_SIZE || sh->sg_tablesize == 0) {
+ if (sh->sg_tablesize == 0)
+ printk(KERN_WARNING "Warning: SG size had to be fixed.\n"
+ "This might be a PM2012 with a defective Firmware"
+ "\nContact DPT support@dpt.com for an upgrade\n");
+ sh->sg_tablesize = SG_SIZE;
+ }
+ }
+ hd->sgsize = sh->sg_tablesize;
+ }
+
+ if(sh != NULL) {
+ sh->can_queue = hd->queuesize = ntohs(gc->queuesiz);
+ sh->cmd_per_lun = 0;
+ }
+
+ if(sh == NULL) {
+ DBG(DBG_REGISTER, printk(KERN_NOTICE "eata_dma: couldn't register HBA"
+ " at%x \n", base));
+ scsi_unregister(sh);
+ if (gc->DMA_valid)
+ free_dma(dma_channel);
+
+ reg_IRQ[gc->IRQ]--;
+ if (reg_IRQ[gc->IRQ] == 0)
+ free_irq(gc->IRQ, NULL);
+ if (gc->IRQ_TR == FALSE)
+ reg_IRQL[gc->IRQ] = FALSE;
+ return (FALSE);
+ }
+
+
+ hd->broken_INQUIRY = (bugs & BROKEN_INQUIRY);
+
+ if(hd->broken_INQUIRY == TRUE) {
+ strcpy(hd->vendor, "DPT");
+ strcpy(hd->name, "??????????");
+ strcpy(hd->revision, "???.?");
+ hd->firmware_revision = 0;
+ } else {
+ strncpy(hd->vendor, &buff[8], 8);
+ hd->vendor[8] = 0;
+ strncpy(hd->name, &buff[16], 17);
+ hd->name[17] = 0;
+ hd->revision[0] = buff[32];
+ hd->revision[1] = buff[33];
+ hd->revision[2] = buff[34];
+ hd->revision[3] = '.';
+ hd->revision[4] = buff[35];
+ hd->revision[5] = 0;
+ hd->firmware_revision = (buff[32] << 24) + (buff[33] << 16)
+ + (buff[34] << 8) + buff[35];
+ }
+
+ if (hd->firmware_revision >= (('0'<<24) + ('7'<<16) + ('G'<< 8) + '0'))
+ hd->immediate_support = 1;
+ else
+ hd->immediate_support = 0;
+
+ switch (ntohl(gc->len)) {
+ case 0x1c:
+ hd->EATA_revision = 'a';
+ break;
+ case 0x1e:
+ hd->EATA_revision = 'b';
+ break;
+ case 0x22:
+ hd->EATA_revision = 'c';
+ break;
+ case 0x24:
+ hd->EATA_revision = 'z';
+ default:
+ hd->EATA_revision = '?';
+ }
+
+
+ if(ntohl(gc->len) >= 0x22) {
+ sh->max_id = gc->MAX_ID + 1;
+ sh->max_lun = gc->MAX_LUN + 1;
+ } else {
+ sh->max_id = 8;
+ sh->max_lun = 8;
+ }
+
+ hd->HBA_number = sh->host_no;
+ hd->channel = gc->MAX_CHAN;
+ sh->max_channel = gc->MAX_CHAN;
+ sh->unique_id = base;
+ sh->base = (char *) base;
+ sh->io_port = base;
+ sh->n_io_port = 9;
+ sh->irq = gc->IRQ;
+ sh->dma_channel = dma_channel;
+
+ /* FIXME:
+ * SCSI midlevel code should support different HBA ids on every channel
+ */
+ sh->this_id = gc->scsi_id[3];
+
+ if (gc->SECOND)
+ hd->primary = FALSE;
+ else
+ hd->primary = TRUE;
+
+ sh->wish_block = FALSE;
+
+ if (hd->bustype != IS_ISA) {
+ sh->unchecked_isa_dma = FALSE;
+ } else {
+ sh->unchecked_isa_dma = TRUE; /* We're doing ISA DMA */
+ }
+
+ for(x = 0; x <= 11; x++){ /* Initialize min. latency */
+ hd->writes_lat[x][1] = 0xffffffff;
+ hd->reads_lat[x][1] = 0xffffffff;
+ }
+ hd->all_lat[1] = 0xffffffff;
+
+ hd->next = NULL; /* build a linked list of all HBAs */
+ hd->prev = last_HBA;
+ if(hd->prev != NULL)
+ SD(hd->prev)->next = sh;
+ last_HBA = sh;
+ if (first_HBA == NULL)
+ first_HBA = sh;
+ registered_HBAs++;
+
+ return (TRUE);
+}
+
+
+
+void find_EISA(struct get_conf *buf, Scsi_Host_Template * tpnt)
+{
+ u32 base;
+ int i;
+
+#if CHECKPAL
+ u8 pal1, pal2, pal3;
+#endif
+
+ for (i = 0; i < MAXEISA; i++) {
+ if (EISAbases[i] == TRUE) { /* Still a possibility ? */
+
+ base = 0x1c88 + (i * 0x1000);
+#if CHECKPAL
+ pal1 = inb((u16)base - 8);
+ pal2 = inb((u16)base - 7);
+ pal3 = inb((u16)base - 6);
+
+ if (((pal1 == DPT_ID1) && (pal2 == DPT_ID2)) ||
+ ((pal1 == NEC_ID1) && (pal2 == NEC_ID2) && (pal3 == NEC_ID3))||
+ ((pal1 == ATT_ID1) && (pal2 == ATT_ID2) && (pal3 == ATT_ID3))){
+ DBG(DBG_PROBE, printk("EISA EATA id tags found: %x %x %x \n",
+ (int)pal1, (int)pal2, (int)pal3));
+#endif
+ if (get_conf_PIO(base, buf) == TRUE) {
+ if (buf->IRQ) {
+ DBG(DBG_EISA, printk("Registering EISA HBA\n"));
+ register_HBA(base, buf, tpnt, IS_EISA);
+ } else
+ printk("eata_dma: No valid IRQ. HBA removed from list\n");
+ }
+#if CHECK_BLINK
+ else {
+ if (check_blink_state(base))
+ printk("HBA is in BLINK state. Consult your HBAs "
+ "Manual to correct this.\n");
+ }
+#endif
+ /* Nothing found here so we take it from the list */
+ EISAbases[i] = 0;
+#if CHECKPAL
+ }
+#endif
+ }
+ }
+ return;
+}
+
+void find_ISA(struct get_conf *buf, Scsi_Host_Template * tpnt)
+{
+ int i;
+
+ for (i = 0; i < MAXISA; i++) {
+ if (ISAbases[i]) {
+ if (get_conf_PIO(ISAbases[i],buf) == TRUE){
+ DBG(DBG_ISA, printk("Registering ISA HBA\n"));
+ register_HBA(ISAbases[i], buf, tpnt, IS_ISA);
+ }
+#if CHECK_BLINK
+ else {
+ if (check_blink_state(ISAbases[i]))
+ printk("HBA is in BLINK state. Consult your HBAs "
+ "Manual to correct this.\n");
+ }
+#endif
+ ISAbases[i] = 0;
+ }
+ }
+ return;
+}
+
+void find_PCI(struct get_conf *buf, Scsi_Host_Template * tpnt)
+{
+
+#ifndef CONFIG_PCI
+ printk("eata_dma: kernel PCI support not enabled. Skipping scan for PCI HBAs.\n");
+#else
+
+ u8 pci_bus, pci_device_fn;
+ static s16 pci_index = 0; /* Device index to PCI BIOS calls */
+ u32 base = 0;
+ u16 com_adr;
+ u16 rev_device;
+ u32 error, i, x;
+ u8 pal1, pal2, pal3;
+
+ if (pcibios_present()) {
+ for (i = 0; i <= MAXPCI; ++i, ++pci_index) {
+ if (pcibios_find_device(PCI_VENDOR_ID_DPT, PCI_DEVICE_ID_DPT,
+ pci_index, &pci_bus, &pci_device_fn))
+ break;
+ DBG(DBG_PROBE && DBG_PCI,
+ printk("eata_dma: find_PCI, HBA at bus %d, device %d,"
+ " function %d, index %d\n", (s32)pci_bus,
+ (s32)((pci_device_fn & 0xf8) >> 3),
+ (s32)(pci_device_fn & 7), pci_index));
+
+ if (!(error = pcibios_read_config_word(pci_bus, pci_device_fn,
+ PCI_CLASS_DEVICE, &rev_device))) {
+ if (rev_device == PCI_CLASS_STORAGE_SCSI) {
+ if (!(error = pcibios_read_config_word(pci_bus,
+ pci_device_fn, PCI_COMMAND,
+ (u16 *) & com_adr))) {
+ if (!((com_adr & PCI_COMMAND_IO) &&
+ (com_adr & PCI_COMMAND_MASTER))) {
+ printk("eata_dma: find_PCI, HBA has IO or"
+ " BUSMASTER mode disabled\n");
+ continue;
+ }
+ } else
+ printk("eata_dma: find_PCI, error %x while reading "
+ "PCI_COMMAND\n", error);
+ } else
+ printk("eata_dma: find_PCI, DEVICECLASSID %x didn't match\n",
+ rev_device);
+ } else {
+ printk("eata_dma: find_PCI, error %x while reading "
+ "PCI_CLASS_BASE\n",
+ error);
+ continue;
+ }
+
+ if (!(error = pcibios_read_config_dword(pci_bus, pci_device_fn,
+ PCI_BASE_ADDRESS_0, (int *) &base))){
+
+ /* Check if the address is valid */
+ if (base & 0x01) {
+ base &= 0xfffffffe;
+ /* EISA tag there ? */
+ pal1 = inb(base);
+ pal2 = inb(base + 1);
+ pal3 = inb(base + 2);
+ if (((pal1 == DPT_ID1) && (pal2 == DPT_ID2)) ||
+ ((pal1 == NEC_ID1) && (pal2 == NEC_ID2) &&
+ (pal3 == NEC_ID3)) ||
+ ((pal1 == ATT_ID1) && (pal2 == ATT_ID2) &&
+ (pal3 == ATT_ID3)))
+ base += 0x08;
+ else
+ base += 0x10; /* Now, THIS is the real address */
+
+ if (base != 0x1f8) {
+ /* We didn't find it in the primary search */
+ if (get_conf_PIO(base, buf) == TRUE) {
+
+ /* OK. We made it till here, so we can go now
+ * and register it. We only have to check and
+ * eventually remove it from the EISA and ISA list
+ */
+ DBG(DBG_PCI, printk("Registering PCI HBA\n"));
+ register_HBA(base, buf, tpnt, IS_PCI);
+
+ if (base < 0x1000) {
+ for (x = 0; x < MAXISA; ++x) {
+ if (ISAbases[x] == base) {
+ ISAbases[x] = 0;
+ break;
+ }
+ }
+ } else if ((base & 0x0fff) == 0x0c88)
+ EISAbases[(base >> 12) & 0x0f] = 0;
+ continue; /* break; */
+ }
+#if CHECK_BLINK
+ else if (check_blink_state(base) == TRUE) {
+ printk("eata_dma: HBA is in BLINK state.\n"
+ "Consult your HBAs manual to correct this.\n");
+ }
+#endif
+ }
+ }
+ } else {
+ printk("eata_dma: error %x while reading "
+ "PCI_BASE_ADDRESS_0\n", error);
+ }
+ }
+ } else {
+ printk("eata_dma: No BIOS32 extensions present. This driver release "
+ "still depends on it.\n"
+ " Skipping scan for PCI HBAs. \n");
+ }
+#endif /* #ifndef CONFIG_PCI */
+ return;
+}
+
+int eata_detect(Scsi_Host_Template * tpnt)
+{
+ struct Scsi_Host *HBA_ptr;
+ struct get_conf gc;
+ int i;
+
+ DBG((DBG_PROBE && DBG_DELAY) || DPT_DEBUG,
+ printk("Using lots of delays to let you read the debugging output\n"));
+
+ tpnt->proc_dir = &proc_scsi_eata_dma;
+
+ status = scsi_init_malloc(512, GFP_ATOMIC | GFP_DMA);
+ dma_scratch = scsi_init_malloc(1024, GFP_ATOMIC | GFP_DMA);
+
+ if(status == NULL || dma_scratch == NULL) {
+ printk("eata_dma: can't allocate enough memory to probe for hosts !\n");
+ return(0);
+ }
+
+ dma_scratch += 4;
+
+ find_PCI(&gc, tpnt);
+
+ find_EISA(&gc, tpnt);
+
+ find_ISA(&gc, tpnt);
+
+ for (i = 0; i < MAXIRQ; i++) { /* Now that we know what we have, we */
+ if (reg_IRQ[i] >= 1){ /* exchange the interrupt handler which */
+ free_irq(i, NULL); /* we used for probing with the real one */
+ request_irq(i, (void *)(eata_int_handler), SA_INTERRUPT|SA_SHIRQ,
+ "eata_dma", NULL);
+ }
+ }
+
+ HBA_ptr = first_HBA;
+
+ if (registered_HBAs != 0) {
+ printk("EATA (Extended Attachment) driver version: %d.%d%s"
+ "\ndeveloped in co-operation with DPT\n"
+ "(c) 1993-96 Michael Neuffer, mike@i-Connect.Net\n",
+ VER_MAJOR, VER_MINOR, VER_SUB);
+ printk("Registered HBAs:");
+ printk("\nHBA no. Boardtype Revis EATA Bus BaseIO IRQ"
+ " DMA Ch ID Pr QS S/G IS\n");
+ for (i = 1; i <= registered_HBAs; i++) {
+ printk("scsi%-2d: %.12s v%s 2.0%c %s %#.4x %2d",
+ HBA_ptr->host_no, SD(HBA_ptr)->name, SD(HBA_ptr)->revision,
+ SD(HBA_ptr)->EATA_revision, (SD(HBA_ptr)->bustype == 'P')?
+ "PCI ":(SD(HBA_ptr)->bustype == 'E')?"EISA":"ISA ",
+ (u32) HBA_ptr->base, HBA_ptr->irq);
+ if(HBA_ptr->dma_channel != BUSMASTER)
+ printk(" %2x ", HBA_ptr->dma_channel);
+ else
+ printk(" %s", "BMST");
+ printk(" %d %d %c %3d %3d %c\n",
+ SD(HBA_ptr)->channel+1, HBA_ptr->this_id,
+ (SD(HBA_ptr)->primary == TRUE)?'Y':'N',
+ HBA_ptr->can_queue, HBA_ptr->sg_tablesize,
+ (SD(HBA_ptr)->immediate_support == TRUE)?'Y':'N');
+ HBA_ptr = SD(HBA_ptr)->next;
+ }
+ } else {
+ scsi_init_free((void *)status, 512);
+ }
+
+ scsi_init_free((void *)dma_scratch - 4, 1024);
+
+ DBG(DPT_DEBUG, DELAY(12));
+
+ return(registered_HBAs);
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = EATA_DMA;
+#include "scsi_module.c"
+#endif
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/eata_dma.h b/linux/src/drivers/scsi/eata_dma.h
new file mode 100644
index 0000000..a23931b
--- /dev/null
+++ b/linux/src/drivers/scsi/eata_dma.h
@@ -0,0 +1,128 @@
+/********************************************************
+* Header file for eata_dma.c Linux EATA-DMA SCSI driver *
+* (c) 1993-96 Michael Neuffer *
+* mike@i-Connect.Net *
+* neuffer@mail.uni-mainz.de *
+*********************************************************
+* last change: 96/10/14 *
+********************************************************/
+
+#ifndef _EATA_DMA_H
+#define _EATA_DMA_H
+
+#ifndef HOSTS_C
+
+#include "eata_generic.h"
+
+
+#define VER_MAJOR 2
+#define VER_MINOR 5
+#define VER_SUB "9b"
+
+
+/************************************************************************
+ * Here you can switch parts of the code on and of *
+ ************************************************************************/
+
+#define CHECKPAL 0 /* EISA pal checking on/off */
+#define CHECK_BLINK 1 /* Switch Blink state check off, might *
+ * be nessessary for some MIPS machines*/
+#define CRIPPLE_QUEUE 0 /* Only enable this if the interrupt
+ * controller on your motherboard is
+ * broken and you are experiencing
+ * massive interrupt losses */
+
+/************************************************************************
+ * Debug options. *
+ * Enable DEBUG and whichever options you require. *
+ ************************************************************************/
+#define DEBUG_EATA 1 /* Enable debug code. */
+#define DPT_DEBUG 0 /* Bobs special */
+#define DBG_DELAY 0 /* Build in delays so debug messages can be
+ * be read before they vanish of the top of
+ * the screen! */
+#define DBG_PROBE 0 /* Debug probe routines. */
+#define DBG_PCI 0 /* Trace PCI routines */
+#define DBG_EISA 0 /* Trace EISA routines */
+#define DBG_ISA 0 /* Trace ISA routines */
+#define DBG_BLINK 0 /* Trace Blink check */
+#define DBG_PIO 0 /* Trace get_config_PIO */
+#define DBG_COM 0 /* Trace command call */
+#define DBG_QUEUE 0 /* Trace command queueing. */
+#define DBG_QUEUE2 0 /* Trace command queueing SG. */
+#define DBG_INTR 0 /* Trace interrupt service routine. */
+#define DBG_INTR2 0 /* Trace interrupt service routine. */
+#define DBG_INTR3 0 /* Trace get_board_data interrupts. */
+#define DBG_REQSENSE 0 /* Trace request sense commands */
+#define DBG_RESET 0 /* Trace reset calls */
+#define DBG_STATUS 0 /* Trace status generation */
+#define DBG_PROC 0 /* Debug proc-fs related statistics */
+#define DBG_PROC_WRITE 0
+#define DBG_REGISTER 0 /* */
+#define DBG_ABNORM 1 /* Debug abnormal actions (reset, abort)*/
+
+#if DEBUG_EATA
+#define DBG(x, y) if ((x)) {y;}
+#else
+#define DBG(x, y)
+#endif
+
+#endif /* !HOSTS_C */
+
+int eata_detect(Scsi_Host_Template *);
+const char *eata_info(struct Scsi_Host *);
+int eata_command(Scsi_Cmnd *);
+int eata_queue(Scsi_Cmnd *, void (* done)(Scsi_Cmnd *));
+int eata_abort(Scsi_Cmnd *);
+int eata_reset(Scsi_Cmnd *, unsigned int);
+int eata_proc_info(char *, char **, off_t, int, int, int);
+#ifdef MODULE
+int eata_release(struct Scsi_Host *);
+#else
+#define eata_release NULL
+#endif
+
+#include <scsi/scsicam.h>
+
+#define EATA_DMA { \
+ NULL, NULL, \
+ NULL, /* proc_dir_entry */ \
+ eata_proc_info, /* procinfo */ \
+ "EATA (Extended Attachment) HBA driver", \
+ eata_detect, \
+ eata_release, \
+ NULL, NULL, \
+ eata_queue, \
+ eata_abort, \
+ eata_reset, \
+ NULL, /* Slave attach */ \
+ scsicam_bios_param, \
+ 0, /* Canqueue */ \
+ 0, /* this_id */ \
+ 0, /* sg_tablesize */ \
+ 0, /* cmd_per_lun */ \
+ 0, /* present */ \
+ 1, /* True if ISA */ \
+ ENABLE_CLUSTERING }
+
+
+#endif /* _EATA_DMA_H */
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/eata_dma_proc.c b/linux/src/drivers/scsi/eata_dma_proc.c
new file mode 100644
index 0000000..14a4c96
--- /dev/null
+++ b/linux/src/drivers/scsi/eata_dma_proc.c
@@ -0,0 +1,493 @@
+
+void swap_statistics(u8 *p)
+{
+ u32 y;
+ u32 *lp, h_lp;
+ u16 *sp, h_sp;
+ u8 *bp;
+
+ lp = (u32 *)p;
+ sp = ((short *)lp) + 1; /* Convert Header */
+ h_sp = *sp = ntohs(*sp);
+ lp++;
+
+ do {
+ sp = (u16 *)lp; /* Convert SubHeader */
+ *sp = ntohs(*sp);
+ bp = (u8 *) lp;
+ y = *(bp + 3);
+ lp++;
+ for (h_lp = (u32)lp; (u32)lp < h_lp + ((u32)*(bp + 3)); lp++)
+ *lp = ntohl(*lp);
+ }while ((u32)lp < ((u32)p) + 4 + h_sp);
+
+}
+
+/*
+ * eata_set_info
+ * buffer : pointer to the data that has been written to the hostfile
+ * length : number of bytes written to the hostfile
+ * HBA_ptr: pointer to the Scsi_Host struct
+ */
+int eata_set_info(char *buffer, int length, struct Scsi_Host *HBA_ptr)
+{
+ int orig_length = length;
+
+ if (length >= 8 && strncmp(buffer, "eata_dma", 8) == 0) {
+ buffer += 9;
+ length -= 9;
+ if(length >= 8 && strncmp(buffer, "latency", 7) == 0) {
+ SD(HBA_ptr)->do_latency = TRUE;
+ return(orig_length);
+ }
+
+ if(length >=10 && strncmp(buffer, "nolatency", 9) == 0) {
+ SD(HBA_ptr)->do_latency = FALSE;
+ return(orig_length);
+ }
+
+ printk("Unknown command:%s length: %d\n", buffer, length);
+ } else
+ printk("Wrong Signature:%10s\n", buffer);
+
+ return(-EINVAL);
+}
+
+/*
+ * eata_proc_info
+ * inout : decides on the direction of the dataflow and the meaning of the
+ * variables
+ * buffer: If inout==FALSE data is being written to it else read from it
+ * *start: If inout==FALSE start of the valid data in the buffer
+ * offset: If inout==FALSE offset from the beginning of the imaginary file
+ * from which we start writing into the buffer
+ * length: If inout==FALSE max number of bytes to be written into the buffer
+ * else number of bytes in the buffer
+ */
+int eata_proc_info(char *buffer, char **start, off_t offset, int length,
+ int hostno, int inout)
+{
+
+ Scsi_Device *scd, SDev;
+ struct Scsi_Host *HBA_ptr;
+ Scsi_Cmnd scmd;
+ char cmnd[12];
+ static u8 buff[512];
+ static u8 buff2[512];
+ hst_cmd_stat *rhcs, *whcs;
+ coco *cc;
+ scsitrans *st;
+ scsimod *sm;
+ hobu *hb;
+ scbu *sb;
+ boty *bt;
+ memco *mc;
+ firm *fm;
+ subinf *si;
+ pcinf *pi;
+ arrlim *al;
+ int i, x;
+ int size, len = 0;
+ off_t begin = 0;
+ off_t pos = 0;
+ scd = NULL;
+
+ HBA_ptr = first_HBA;
+ for (i = 1; i <= registered_HBAs; i++) {
+ if (HBA_ptr->host_no == hostno)
+ break;
+ HBA_ptr = SD(HBA_ptr)->next;
+ }
+
+ if(inout == TRUE) /* Has data been written to the file ? */
+ return(eata_set_info(buffer, length, HBA_ptr));
+
+ if (offset == 0)
+ memset(buff, 0, sizeof(buff));
+
+ cc = (coco *) (buff + 0x148);
+ st = (scsitrans *)(buff + 0x164);
+ sm = (scsimod *) (buff + 0x16c);
+ hb = (hobu *) (buff + 0x172);
+ sb = (scbu *) (buff + 0x178);
+ bt = (boty *) (buff + 0x17e);
+ mc = (memco *) (buff + 0x186);
+ fm = (firm *) (buff + 0x18e);
+ si = (subinf *) (buff + 0x196);
+ pi = (pcinf *) (buff + 0x19c);
+ al = (arrlim *) (buff + 0x1a2);
+
+ size = sprintf(buffer+len, "EATA (Extended Attachment) driver version: "
+ "%d.%d%s\n",VER_MAJOR, VER_MINOR, VER_SUB);
+ len += size; pos = begin + len;
+ size = sprintf(buffer + len, "queued commands: %10ld\n"
+ "processed interrupts:%10ld\n", queue_counter, int_counter);
+ len += size; pos = begin + len;
+
+ size = sprintf(buffer + len, "\nscsi%-2d: HBA %.10s\n",
+ HBA_ptr->host_no, SD(HBA_ptr)->name);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "Firmware revision: v%s\n",
+ SD(HBA_ptr)->revision);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "Hardware Configuration:\n");
+ len += size;
+ pos = begin + len;
+
+ if(SD(HBA_ptr)->broken_INQUIRY == TRUE) {
+ if (HBA_ptr->dma_channel == BUSMASTER)
+ size = sprintf(buffer + len, "DMA: BUSMASTER\n");
+ else
+ size = sprintf(buffer + len, "DMA: %d\n", HBA_ptr->dma_channel);
+ len += size;
+ pos = begin + len;
+
+ size = sprintf(buffer + len, "Base IO : %#.4x\n", (u32) HBA_ptr->base);
+ len += size;
+ pos = begin + len;
+
+ size = sprintf(buffer + len, "Host Bus: EISA\n");
+ len += size;
+ pos = begin + len;
+
+ } else {
+ memset(&SDev, 0, sizeof(Scsi_Device));
+ memset(&scmd, 0, sizeof(Scsi_Cmnd));
+
+ SDev.host = HBA_ptr;
+ SDev.id = HBA_ptr->this_id;
+ SDev.lun = 0;
+ SDev.channel = 0;
+
+ cmnd[0] = LOG_SENSE;
+ cmnd[1] = 0;
+ cmnd[2] = 0x33 + (3<<6);
+ cmnd[3] = 0;
+ cmnd[4] = 0;
+ cmnd[5] = 0;
+ cmnd[6] = 0;
+ cmnd[7] = 0x00;
+ cmnd[8] = 0x66;
+ cmnd[9] = 0;
+
+ scmd.cmd_len = 10;
+
+ scmd.host = HBA_ptr;
+ scmd.device = &SDev;
+ scmd.target = HBA_ptr->this_id;
+ scmd.lun = 0;
+ scmd.channel = 0;
+ scmd.use_sg = 0;
+
+ /*
+ * Do the command and wait for it to finish.
+ */
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ scmd.request.rq_status = RQ_SCSI_BUSY;
+ scmd.request.sem = &sem;
+ scsi_do_cmd (&scmd, cmnd, buff + 0x144, 0x66,
+ eata_scsi_done, 1 * HZ, 1);
+ down(&sem);
+ }
+
+ size = sprintf(buffer + len, "IRQ: %2d, %s triggered\n", cc->interrupt,
+ (cc->intt == TRUE)?"level":"edge");
+ len += size;
+ pos = begin + len;
+ if (HBA_ptr->dma_channel == 0xff)
+ size = sprintf(buffer + len, "DMA: BUSMASTER\n");
+ else
+ size = sprintf(buffer + len, "DMA: %d\n", HBA_ptr->dma_channel);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "CPU: MC680%02d %dMHz\n", bt->cpu_type,
+ bt->cpu_speed);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "Base IO : %#.4x\n", (u32) HBA_ptr->base);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "Host Bus: %s\n",
+ (SD(HBA_ptr)->bustype == IS_PCI)?"PCI ":
+ (SD(HBA_ptr)->bustype == IS_EISA)?"EISA":"ISA ");
+
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "SCSI Bus:%s%s Speed: %sMB/sec. %s\n",
+ (sb->wide == TRUE)?" WIDE":"",
+ (sb->dif == TRUE)?" DIFFERENTIAL":"",
+ (sb->speed == 0)?"5":(sb->speed == 1)?"10":"20",
+ (sb->ext == TRUE)?"With external cable detection":"");
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "SCSI channel expansion Module: %s present\n",
+ (bt->sx1 == TRUE)?"SX1 (one channel)":
+ ((bt->sx2 == TRUE)?"SX2 (two channels)":"not"));
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "SmartRAID hardware: %spresent.\n",
+ (cc->srs == TRUE)?"":"not ");
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, " Type: %s\n",
+ ((cc->key == TRUE)?((bt->dmi == TRUE)?"integrated"
+ :((bt->dm4 == TRUE)?"DM401X"
+ :(bt->dm4k == TRUE)?"DM4000"
+ :"-"))
+ :"-"));
+ len += size;
+ pos = begin + len;
+
+ size = sprintf(buffer + len, " Max array groups: %d\n",
+ (al->code == 0x0e)?al->max_groups:7);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, " Max drives per RAID 0 array: %d\n",
+ (al->code == 0x0e)?al->raid0_drv:7);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, " Max drives per RAID 3/5 array: %d\n",
+ (al->code == 0x0e)?al->raid35_drv:7);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "Cache Module: %spresent.\n",
+ (cc->csh)?"":"not ");
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, " Type: %s\n",
+ ((cc->csh == TRUE)?((bt->cmi == TRUE)?"integrated"
+ :((bt->cm4 == TRUE)?"CM401X"
+ :((bt->cm4k == TRUE)?"CM4000"
+ :"-")))
+ :"-"));
+ len += size;
+ pos = begin + len;
+ for (x = 0; x <= 3; x++) {
+ size = sprintf(buffer + len, " Bank%d: %dMB with%s ECC\n",x,
+ mc->banksize[x] & 0x7f,
+ (mc->banksize[x] & 0x80)?"":"out");
+ len += size;
+ pos = begin + len;
+ }
+ size = sprintf(buffer + len, "Timer Mod.: %spresent\n",
+ (cc->tmr == TRUE)?"":"not ");
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "NVRAM : %spresent\n",
+ (cc->nvr == TRUE)?"":"not ");
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "SmartROM : %sabled\n",
+ (bt->srom == TRUE)?"dis":"en");
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "Alarm : %s\n",
+ (bt->alrm == TRUE)?"on":"off");
+ len += size;
+ pos = begin + len;
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+ if (pos > offset + length)
+ goto stop_output;
+
+ if(SD(HBA_ptr)->do_latency == FALSE) {
+
+ cmnd[0] = LOG_SENSE;
+ cmnd[1] = 0;
+ cmnd[2] = 0x32 + (3<<6);
+ cmnd[3] = 0;
+ cmnd[4] = 0;
+ cmnd[5] = 0;
+ cmnd[6] = 0;
+ cmnd[7] = 0x01;
+ cmnd[8] = 0x44;
+ cmnd[9] = 0;
+
+ scmd.cmd_len = 10;
+
+ /*
+ * Do the command and wait for it to finish.
+ */
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ scmd.request.rq_status = RQ_SCSI_BUSY;
+ scmd.request.sem = &sem;
+ scsi_do_cmd (&scmd, cmnd, buff2, 0x144,
+ eata_scsi_done, 1 * HZ, 1);
+ down(&sem);
+ }
+
+ swap_statistics(buff2);
+ rhcs = (hst_cmd_stat *)(buff2 + 0x2c);
+ whcs = (hst_cmd_stat *)(buff2 + 0x8c);
+
+ for (x = 0; x <= 11; x++) {
+ SD(HBA_ptr)->reads[x] += rhcs->sizes[x];
+ SD(HBA_ptr)->writes[x] += whcs->sizes[x];
+ SD(HBA_ptr)->reads[12] += rhcs->sizes[x];
+ SD(HBA_ptr)->writes[12] += whcs->sizes[x];
+ }
+ size = sprintf(buffer + len, "Host<->Disk command statistics:\n"
+ " Reads: Writes:\n");
+ len += size;
+ pos = begin + len;
+ for (x = 0; x <= 10; x++) {
+ size = sprintf(buffer+len,"%5dk:%12u %12u\n", 1 << x,
+ SD(HBA_ptr)->reads[x],
+ SD(HBA_ptr)->writes[x]);
+ len += size;
+ pos = begin + len;
+ }
+ size = sprintf(buffer+len,">1024k:%12u %12u\n",
+ SD(HBA_ptr)->reads[11],
+ SD(HBA_ptr)->writes[11]);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer+len,"Sum :%12u %12u\n",
+ SD(HBA_ptr)->reads[12],
+ SD(HBA_ptr)->writes[12]);
+ len += size;
+ pos = begin + len;
+ }
+ }
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+ if (pos > offset + length)
+ goto stop_output;
+
+ if(SD(HBA_ptr)->do_latency == TRUE) {
+ int factor = 1024/HZ;
+ size = sprintf(buffer + len, "Host Latency Command Statistics:\n"
+ "Current timer resolution: %2dms\n"
+ " Reads: Min:(ms) Max:(ms) Ave:(ms)\n",
+ factor);
+ len += size;
+ pos = begin + len;
+ for (x = 0; x <= 10; x++) {
+ size = sprintf(buffer+len,"%5dk:%12u %12u %12u %12u\n",
+ 1 << x,
+ SD(HBA_ptr)->reads_lat[x][0],
+ (SD(HBA_ptr)->reads_lat[x][1] == 0xffffffff)
+ ? 0:(SD(HBA_ptr)->reads_lat[x][1] * factor),
+ SD(HBA_ptr)->reads_lat[x][2] * factor,
+ SD(HBA_ptr)->reads_lat[x][3] * factor /
+ ((SD(HBA_ptr)->reads_lat[x][0])
+ ? SD(HBA_ptr)->reads_lat[x][0]:1));
+ len += size;
+ pos = begin + len;
+ }
+ size = sprintf(buffer+len,">1024k:%12u %12u %12u %12u\n",
+ SD(HBA_ptr)->reads_lat[11][0],
+ (SD(HBA_ptr)->reads_lat[11][1] == 0xffffffff)
+ ? 0:(SD(HBA_ptr)->reads_lat[11][1] * factor),
+ SD(HBA_ptr)->reads_lat[11][2] * factor,
+ SD(HBA_ptr)->reads_lat[11][3] * factor /
+ ((SD(HBA_ptr)->reads_lat[x][0])
+ ? SD(HBA_ptr)->reads_lat[x][0]:1));
+ len += size;
+ pos = begin + len;
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+ if (pos > offset + length)
+ goto stop_output;
+
+ size = sprintf(buffer + len,
+ " Writes: Min:(ms) Max:(ms) Ave:(ms)\n");
+ len += size;
+ pos = begin + len;
+ for (x = 0; x <= 10; x++) {
+ size = sprintf(buffer+len,"%5dk:%12u %12u %12u %12u\n",
+ 1 << x,
+ SD(HBA_ptr)->writes_lat[x][0],
+ (SD(HBA_ptr)->writes_lat[x][1] == 0xffffffff)
+ ? 0:(SD(HBA_ptr)->writes_lat[x][1] * factor),
+ SD(HBA_ptr)->writes_lat[x][2] * factor,
+ SD(HBA_ptr)->writes_lat[x][3] * factor /
+ ((SD(HBA_ptr)->writes_lat[x][0])
+ ? SD(HBA_ptr)->writes_lat[x][0]:1));
+ len += size;
+ pos = begin + len;
+ }
+ size = sprintf(buffer+len,">1024k:%12u %12u %12u %12u\n",
+ SD(HBA_ptr)->writes_lat[11][0],
+ (SD(HBA_ptr)->writes_lat[11][1] == 0xffffffff)
+ ? 0:(SD(HBA_ptr)->writes_lat[x][1] * factor),
+ SD(HBA_ptr)->writes_lat[11][2] * factor,
+ SD(HBA_ptr)->writes_lat[11][3] * factor /
+ ((SD(HBA_ptr)->writes_lat[x][0])
+ ? SD(HBA_ptr)->writes_lat[x][0]:1));
+ len += size;
+ pos = begin + len;
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+ if (pos > offset + length)
+ goto stop_output;
+ }
+
+#if 0
+ scd = scsi_devices;
+
+ size = sprintf(buffer+len,"Attached devices: %s\n", (scd)?"":"none");
+ len += size;
+ pos = begin + len;
+
+ while (scd) {
+ if (scd->host == HBA_ptr) {
+ proc_print_scsidevice(scd, buffer, &size, len);
+ len += size;
+ pos = begin + len;
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+ if (pos > offset + length)
+ goto stop_output;
+ }
+ scd = scd->next;
+ }
+#endif
+
+ stop_output:
+ DBG(DBG_PROC, printk("2pos: %ld offset: %ld len: %d\n", pos, offset, len));
+ *start=buffer+(offset-begin); /* Start of wanted data */
+ len-=(offset-begin); /* Start slop */
+ if(len>length)
+ len = length; /* Ending slop */
+ DBG(DBG_PROC, printk("3pos: %ld offset: %ld len: %d\n", pos, offset, len));
+
+ return (len);
+}
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/eata_dma_proc.h b/linux/src/drivers/scsi/eata_dma_proc.h
new file mode 100644
index 0000000..d49f348
--- /dev/null
+++ b/linux/src/drivers/scsi/eata_dma_proc.h
@@ -0,0 +1,260 @@
+
+struct lun_map {
+ __u8 id:5,
+ chan:3;
+ __u8 lun;
+};
+
+typedef struct emul_pp {
+ __u8 p_code:6,
+ null:1,
+ p_save:1;
+ __u8 p_length;
+ __u16 cylinder;
+ __u8 heads;
+ __u8 sectors;
+ __u8 null2;
+ __u8 s_lunmap:4,
+ ems:1;
+ __u16 drive_type; /* In Little Endian ! */
+ struct lun_map lunmap[4];
+}emulpp;
+
+
+/* Log Sense pages */
+
+typedef struct log_sheader {
+ __u8 page_code,
+ reserved;
+ __u16 length;
+}logsh;
+
+
+/* Log Sense Statistics */
+
+typedef struct read_command_statistics {
+ __u16 code; /* 0x01 */
+ __u8 flags;
+ __u8 length; /* 0x24 */
+ __u32 h_commands,
+ uncached,
+ la_cmds,
+ la_blks,
+ la_hits,
+ missed,
+ hits,
+ seq_la_blks,
+ seq_la_hits;
+}r_cmd_stat;
+
+typedef struct write_command_statistics {
+ __u16 code; /* 0x03 */
+ __u8 flags;
+ __u8 length; /* 0x28 */
+ __u32 h_commands,
+ uncached,
+ thru,
+ bypass,
+ soft_err,
+ hits,
+ b_idle,
+ b_activ,
+ b_blks,
+ b_blks_clean;
+}w_cmd_stat;
+
+typedef struct host_command_statistics {
+ __u16 code; /* 0x02, 0x04 */
+ __u8 flags;
+ __u8 length; /* 0x30 */
+ __u32 sizes[12];
+}hst_cmd_stat;
+
+typedef struct physical_command_statistics {
+ __u16 code; /* 0x06, 0x07 */
+ __u8 flags;
+ __u8 length; /* 0x34 */
+ __u32 sizes[13];
+}phy_cmd_stat;
+
+typedef struct misc_device_statistics {
+ __u16 code; /* 0x05 */
+ __u8 flags;
+ __u8 length; /* 0x10 */
+ __u32 disconnect,
+ pass_thru,
+ sg_commands,
+ stripe_boundary_crosses;
+}msc_stats;
+
+/* Configuration Pages */
+
+typedef struct controller_configuration {
+ __u16 code; /* 0x01 */
+ __u8 flags;
+ __u8 length; /* 0x02 */
+ __u8 intt:1,
+ sec:1,
+ csh:1,
+ key:1,
+ tmr:1,
+ srs:1,
+ nvr:1;
+ __u8 interrupt;
+}coco;
+
+typedef struct controller_hardware_errors {
+ __u16 code; /* 0x02 */
+ __u8 flags;
+ __u8 length; /* 0x02 */
+ __u8 unused:1,
+ per:1;
+ __u8 interrupt;
+}coher;
+
+typedef struct memory_map {
+ __u16 code; /* 0x03, 0x04 */
+ __u8 flags;
+ __u8 length; /* 0x04 */
+ __u32 memory_map;
+}mema;
+
+typedef struct scsi_transfer {
+ __u16 code; /* 0x05 */
+ __u8 flags;
+ __u8 length; /* 0x04 */
+ __u8 offset,
+ period;
+ __u16 speed;
+}scsitrans;
+
+typedef struct scsi_modes {
+ __u16 code; /* 0x06 */
+ __u8 flags;
+ __u8 length; /* 0x02 */
+ __u8 que:1,
+ cdis:1,
+ wtru:1,
+ dasd:1,
+ ncr:1,
+ awre:1;
+ __u8 reserved;
+}scsimod;
+
+typedef struct host_bus {
+ __u16 code; /* 0x07 */
+ __u8 flags;
+ __u8 length; /* 0x02 */
+ __u8 speed:6,
+ pci:1,
+ eisa:1;
+ __u8 reserved;
+}hobu;
+
+typedef struct scsi_bus {
+ __u16 code; /* 0x08 */
+ __u8 flags;
+ __u8 length; /* 0x02 */
+ __u8 speed:4,
+ res:1,
+ ext:1,
+ wide:1,
+ dif:1;
+ __u8 busnum;
+}scbu;
+
+typedef struct board_type {
+ __u16 code; /* 0x09 */
+ __u8 flags;
+ __u8 length; /* 0x04 */
+ __u8 unused:1,
+ cmi:1,
+ dmi:1,
+ cm4k:1,
+ cm4:1,
+ dm4k:1,
+ dm4:1,
+ hba:1;
+ __u8 cpu_type,
+ cpu_speed;
+ __u8 sx1:1,
+ sx2:1,
+ unused2:4,
+ alrm:1,
+ srom:1;
+}boty;
+
+typedef struct memory_config {
+ __u16 code; /* 0x0a */
+ __u8 flags;
+ __u8 length; /* 0x04 */
+ __u8 banksize[4];
+}memco;
+
+typedef struct firmware_info {
+ __u16 code; /* 0x0b */
+ __u8 flags;
+ __u8 length; /* 0x04 */
+ __u8 dnld:1,
+ bs528:1,
+ fmt:1,
+ fw528:1;
+ __u8 unused1,
+ fw_type,
+ unused;
+}firm;
+
+typedef struct subsystem_info {
+ __u16 code; /* 0x0c */
+ __u8 flags;
+ __u8 length; /* 0x02 */
+ __u8 shlf:1,
+ swap:1,
+ noss:1;
+ __u8 reserved;
+}subinf;
+
+typedef struct per_channel_info {
+ __u16 code; /* 0x0d */
+ __u8 flags;
+ __u8 length; /* 0x02 */
+ __u8 channel;
+ __u8 shlf:1,
+ swap:1,
+ noss:1,
+ srs:1,
+ que:1,
+ ext:1,
+ wide:1,
+ diff:1;
+}pcinf;
+
+typedef struct array_limits {
+ __u16 code; /* 0x0e */
+ __u8 flags;
+ __u8 length; /* 0x04 */
+ __u8 max_groups,
+ raid0_drv,
+ raid35_drv,
+ unused;
+}arrlim;
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
+
diff --git a/linux/src/drivers/scsi/eata_generic.h b/linux/src/drivers/scsi/eata_generic.h
new file mode 100644
index 0000000..c884def
--- /dev/null
+++ b/linux/src/drivers/scsi/eata_generic.h
@@ -0,0 +1,414 @@
+/********************************************************
+* Header file for eata_dma.c and eata_pio.c *
+* Linux EATA SCSI drivers *
+* (c) 1993-96 Michael Neuffer *
+* mike@i-Connect.Net *
+* neuffer@mail.uni-mainz.de *
+*********************************************************
+* last change: 96/08/14 *
+********************************************************/
+
+
+#ifndef _EATA_GENERIC_H
+#define _EATA_GENERIC_H
+
+
+
+/*********************************************
+ * Misc. definitions *
+ *********************************************/
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#define min(a,b) ((a<b)?(a):(b))
+
+#define R_LIMIT 0x20000
+
+#define MAXISA 4
+#define MAXEISA 16
+#define MAXPCI 16
+#define MAXIRQ 16
+#define MAXTARGET 16
+#define MAXCHANNEL 3
+
+#define IS_ISA 'I'
+#define IS_EISA 'E'
+#define IS_PCI 'P'
+
+#define BROKEN_INQUIRY 1
+
+#define BUSMASTER 0xff
+#define PIO 0xfe
+
+#define EATA_SIGNATURE 0x45415441 /* BIG ENDIAN coded "EATA" sig. */
+
+#define DPT_ID1 0x12
+#define DPT_ID2 0x14
+
+#define ATT_ID1 0x06
+#define ATT_ID2 0x94
+#define ATT_ID3 0x0
+
+#define NEC_ID1 0x38
+#define NEC_ID2 0xa3
+#define NEC_ID3 0x82
+
+
+#define EATA_CP_SIZE 44
+
+#define MAX_PCI_DEVICES 32 /* Maximum # Of Devices Per Bus */
+#define MAX_METHOD_2 16 /* Max Devices For Method 2 */
+#define MAX_PCI_BUS 16 /* Maximum # Of Busses Allowed */
+
+#define SG_SIZE 64
+#define SG_SIZE_BIG 252 /* max. 8096 elements, 64k */
+
+#define UPPER_DEVICE_QUEUE_LIMIT 64 /* The limit we have to set for the
+ * device queue to keep the broken
+ * midlevel SCSI code from producing
+ * bogus timeouts
+ */
+
+#define TYPE_DISK_QUEUE 16
+#define TYPE_TAPE_QUEUE 4
+#define TYPE_ROM_QUEUE 4
+#define TYPE_OTHER_QUEUE 2
+
+#define FREE 0
+#define OK 0
+#define NO_TIMEOUT 0
+#define USED 1
+#define TIMEOUT 2
+#define RESET 4
+#define LOCKED 8
+#define ABORTED 16
+
+#define READ 0
+#define WRITE 1
+#define OTHER 2
+
+#define HD(cmd) ((hostdata *)&(cmd->host->hostdata))
+#define CD(cmd) ((struct eata_ccb *)(cmd->host_scribble))
+#define SD(host) ((hostdata *)&(host->hostdata))
+
+#define DELAY(x) { ulong flags, i; \
+ save_flags(flags); sti(); \
+ i = jiffies + (x * HZ); \
+ while (jiffies < i); \
+ restore_flags(flags); }
+
+/***********************************************
+ * EATA Command & Register definitions *
+ ***********************************************/
+#define PCI_REG_DPTconfig 0x40
+#define PCI_REG_PumpModeAddress 0x44
+#define PCI_REG_PumpModeData 0x48
+#define PCI_REG_ConfigParam1 0x50
+#define PCI_REG_ConfigParam2 0x54
+
+
+#define EATA_CMD_PIO_SETUPTEST 0xc6
+#define EATA_CMD_PIO_READ_CONFIG 0xf0
+#define EATA_CMD_PIO_SET_CONFIG 0xf1
+#define EATA_CMD_PIO_SEND_CP 0xf2
+#define EATA_CMD_PIO_RECEIVE_SP 0xf3
+#define EATA_CMD_PIO_TRUNC 0xf4
+
+#define EATA_CMD_RESET 0xf9
+#define EATA_CMD_IMMEDIATE 0xfa
+
+#define EATA_CMD_DMA_READ_CONFIG 0xfd
+#define EATA_CMD_DMA_SET_CONFIG 0xfe
+#define EATA_CMD_DMA_SEND_CP 0xff
+
+#define ECS_EMULATE_SENSE 0xd4
+
+#define EATA_GENERIC_ABORT 0x00
+#define EATA_SPECIFIC_RESET 0x01
+#define EATA_BUS_RESET 0x02
+#define EATA_SPECIFIC_ABORT 0x03
+#define EATA_QUIET_INTR 0x04
+#define EATA_COLD_BOOT_HBA 0x06 /* Only as a last resort */
+#define EATA_FORCE_IO 0x07
+
+#define HA_CTRLREG 0x206 /* control register for HBA */
+#define HA_CTRL_DISINT 0x02 /* CTRLREG: disable interrupts */
+#define HA_CTRL_RESCPU 0x04 /* CTRLREG: reset processor */
+#define HA_CTRL_8HEADS 0x08 /* CTRLREG: set for drives with*
+ * >=8 heads (WD1003 rudimentary :-) */
+
+#define HA_WCOMMAND 0x07 /* command register offset */
+#define HA_WIFC 0x06 /* immediate command offset */
+#define HA_WCODE 0x05
+#define HA_WCODE2 0x04
+#define HA_WDMAADDR 0x02 /* DMA address LSB offset */
+#define HA_RAUXSTAT 0x08 /* aux status register offset*/
+#define HA_RSTATUS 0x07 /* status register offset */
+#define HA_RDATA 0x00 /* data register (16bit) */
+#define HA_WDATA 0x00 /* data register (16bit) */
+
+#define HA_ABUSY 0x01 /* aux busy bit */
+#define HA_AIRQ 0x02 /* aux IRQ pending bit */
+#define HA_SERROR 0x01 /* pr. command ended in error*/
+#define HA_SMORE 0x02 /* more data soon to come */
+#define HA_SCORR 0x04 /* data corrected */
+#define HA_SDRQ 0x08 /* data request active */
+#define HA_SSC 0x10 /* seek complete */
+#define HA_SFAULT 0x20 /* write fault */
+#define HA_SREADY 0x40 /* drive ready */
+#define HA_SBUSY 0x80 /* drive busy */
+#define HA_SDRDY HA_SSC+HA_SREADY+HA_SDRQ
+
+/**********************************************
+ * Message definitions *
+ **********************************************/
+
+#define HA_NO_ERROR 0x00 /* No Error */
+#define HA_ERR_SEL_TO 0x01 /* Selection Timeout */
+#define HA_ERR_CMD_TO 0x02 /* Command Timeout */
+#define HA_BUS_RESET 0x03 /* SCSI Bus Reset Received */
+#define HA_INIT_POWERUP 0x04 /* Initial Controller Power-up */
+#define HA_UNX_BUSPHASE 0x05 /* Unexpected Bus Phase */
+#define HA_UNX_BUS_FREE 0x06 /* Unexpected Bus Free */
+#define HA_BUS_PARITY 0x07 /* Bus Parity Error */
+#define HA_SCSI_HUNG 0x08 /* SCSI Hung */
+#define HA_UNX_MSGRJCT 0x09 /* Unexpected Message Rejected */
+#define HA_RESET_STUCK 0x0a /* SCSI Bus Reset Stuck */
+#define HA_RSENSE_FAIL 0x0b /* Auto Request-Sense Failed */
+#define HA_PARITY_ERR 0x0c /* Controller Ram Parity Error */
+#define HA_CP_ABORT_NA 0x0d /* Abort Message sent to non-active cmd */
+#define HA_CP_ABORTED 0x0e /* Abort Message sent to active cmd */
+#define HA_CP_RESET_NA 0x0f /* Reset Message sent to non-active cmd */
+#define HA_CP_RESET 0x10 /* Reset Message sent to active cmd */
+#define HA_ECC_ERR 0x11 /* Controller Ram ECC Error */
+#define HA_PCI_PARITY 0x12 /* PCI Parity Error */
+#define HA_PCI_MABORT 0x13 /* PCI Master Abort */
+#define HA_PCI_TABORT 0x14 /* PCI Target Abort */
+#define HA_PCI_STABORT 0x15 /* PCI Signaled Target Abort */
+
+/**********************************************
+ * Other definitions *
+ **********************************************/
+
+struct reg_bit { /* reading this one will clear the interrupt */
+ __u8 error:1; /* previous command ended in an error */
+ __u8 more:1; /* more DATA coming soon, poll BSY & DRQ (PIO) */
+ __u8 corr:1; /* data read was successfully corrected with ECC*/
+ __u8 drq:1; /* data request active */
+ __u8 sc:1; /* seek complete */
+ __u8 fault:1; /* write fault */
+ __u8 ready:1; /* drive ready */
+ __u8 busy:1; /* controller busy */
+};
+
+struct reg_abit { /* reading this won't clear the interrupt */
+ __u8 abusy:1; /* auxiliary busy */
+ __u8 irq:1; /* set when drive interrupt is asserted */
+ __u8 dummy:6;
+};
+
+struct eata_register { /* EATA register set */
+ __u8 data_reg[2]; /* R, couldn't figure this one out */
+ __u8 cp_addr[4]; /* W, CP address register */
+ union {
+ __u8 command; /* W, command code: [read|set] conf, send CP*/
+ struct reg_bit status; /* R, see register_bit1 */
+ __u8 statusbyte;
+ } ovr;
+ struct reg_abit aux_stat; /* R, see register_bit2 */
+};
+
+struct get_conf { /* Read Configuration Array */
+ __u32 len; /* Should return 0x22, 0x24, etc */
+ __u32 signature; /* Signature MUST be "EATA" */
+ __u8 version2:4,
+ version:4; /* EATA Version level */
+ __u8 OCS_enabled:1, /* Overlap Command Support enabled */
+ TAR_support:1, /* SCSI Target Mode supported */
+ TRNXFR:1, /* Truncate Transfer Cmd not necessary *
+ * Only used in PIO Mode */
+ MORE_support:1, /* MORE supported (only PIO Mode) */
+ DMA_support:1, /* DMA supported Driver uses only *
+ * this mode */
+ DMA_valid:1, /* DRQ value in Byte 30 is valid */
+ ATA:1, /* ATA device connected (not supported) */
+ HAA_valid:1; /* Hostadapter Address is valid */
+
+ __u16 cppadlen; /* Number of pad bytes send after CD data *
+ * set to zero for DMA commands */
+ __u8 scsi_id[4]; /* SCSI ID of controller 2-0 Byte 0 res. *
+ * if not, zero is returned */
+ __u32 cplen; /* CP length: number of valid cp bytes */
+ __u32 splen; /* Number of bytes returned after *
+ * Receive SP command */
+ __u16 queuesiz; /* max number of queueable CPs */
+ __u16 dummy;
+ __u16 SGsiz; /* max number of SG table entries */
+ __u8 IRQ:4, /* IRQ used this HA */
+ IRQ_TR:1, /* IRQ Trigger: 0=edge, 1=level */
+ SECOND:1, /* This is a secondary controller */
+ DMA_channel:2; /* DRQ index, DRQ is 2comp of DRQX */
+ __u8 sync; /* device at ID 7 tru 0 is running in *
+ * synchronous mode, this will disappear */
+ __u8 DSBLE:1, /* ISA i/o addressing is disabled */
+ FORCADR:1, /* i/o address has been forced */
+ SG_64K:1,
+ SG_UAE:1,
+ :4;
+ __u8 MAX_ID:5, /* Max number of SCSI target IDs */
+ MAX_CHAN:3; /* Number of SCSI busses on HBA */
+ __u8 MAX_LUN; /* Max number of LUNs */
+ __u8 :3,
+ AUTOTRM:1,
+ M1_inst:1,
+ ID_qest:1, /* Raidnum ID is questionable */
+ is_PCI:1, /* HBA is PCI */
+ is_EISA:1; /* HBA is EISA */
+ __u8 RAIDNUM; /* unique HBA identifier */
+ __u8 unused[474];
+};
+
+struct eata_sg_list
+{
+ __u32 data;
+ __u32 len;
+};
+
+struct eata_ccb { /* Send Command Packet structure */
+
+ __u8 SCSI_Reset:1, /* Cause a SCSI Bus reset on the cmd */
+ HBA_Init:1, /* Cause Controller to reinitialize */
+ Auto_Req_Sen:1, /* Do Auto Request Sense on errors */
+ scatter:1, /* Data Ptr points to a SG Packet */
+ Resrvd:1, /* RFU */
+ Interpret:1, /* Interpret the SCSI cdb of own use */
+ DataOut:1, /* Data Out phase with command */
+ DataIn:1; /* Data In phase with command */
+ __u8 reqlen; /* Request Sense Length *
+ * Valid if Auto_Req_Sen=1 */
+ __u8 unused[3];
+ __u8 FWNEST:1, /* send cmd to phys RAID component */
+ unused2:7;
+ __u8 Phsunit:1, /* physical unit on mirrored pair */
+ I_AT:1, /* inhibit address translation */
+ I_HBA_C:1, /* HBA inhibit caching */
+ unused3:5;
+
+ __u8 cp_id:5, /* SCSI Device ID of target */
+ cp_channel:3; /* SCSI Channel # of HBA */
+ __u8 cp_lun:3,
+ :2,
+ cp_luntar:1, /* CP is for target ROUTINE */
+ cp_dispri:1, /* Grant disconnect privilege */
+ cp_identify:1; /* Always TRUE */
+ __u8 cp_msg1; /* Message bytes 0-3 */
+ __u8 cp_msg2;
+ __u8 cp_msg3;
+ __u8 cp_cdb[12]; /* Command Descriptor Block */
+ __u32 cp_datalen; /* Data Transfer Length *
+ * If scatter=1 len of sg package */
+ void *cp_viraddr; /* address of this ccb */
+ __u32 cp_dataDMA; /* Data Address, if scatter=1 *
+ * address of scatter packet */
+ __u32 cp_statDMA; /* address for Status Packet */
+ __u32 cp_reqDMA; /* Request Sense Address, used if *
+ * CP command ends with error */
+ /* Additional CP info begins here */
+ __u32 timestamp; /* Needed to measure command latency */
+ __u32 timeout;
+ __u8 sizeindex;
+ __u8 rw_latency;
+ __u8 retries;
+ __u8 status; /* status of this queueslot */
+ Scsi_Cmnd *cmd; /* address of cmd */
+ struct eata_sg_list *sg_list;
+};
+
+
+struct eata_sp {
+ __u8 hba_stat:7, /* HBA status */
+ EOC:1; /* True if command finished */
+ __u8 scsi_stat; /* Target SCSI status */
+ __u8 reserved[2];
+ __u32 residue_len; /* Number of bytes not transferred */
+ struct eata_ccb *ccb; /* Address set in COMMAND PACKET */
+ __u8 msg[12];
+};
+
+typedef struct hstd {
+ __u8 vendor[9];
+ __u8 name[18];
+ __u8 revision[6];
+ __u8 EATA_revision;
+ __u32 firmware_revision;
+ __u8 HBA_number;
+ __u8 bustype; /* bustype of HBA */
+ __u8 channel; /* # of avail. scsi channels */
+ __u8 state; /* state of HBA */
+ __u8 primary; /* true if primary */
+ __u8 more_support:1, /* HBA supports MORE flag */
+ immediate_support:1, /* HBA supports IMMEDIATE CMDs*/
+ broken_INQUIRY:1; /* This is an EISA HBA with *
+ * broken INQUIRY */
+ __u8 do_latency; /* Latency measurement flag */
+ __u32 reads[13];
+ __u32 writes[13];
+ __u32 reads_lat[12][4];
+ __u32 writes_lat[12][4];
+ __u32 all_lat[4];
+ __u8 resetlevel[MAXCHANNEL];
+ __u32 last_ccb; /* Last used ccb */
+ __u32 cplen; /* size of CP in words */
+ __u16 cppadlen; /* pad length of cp in words */
+ __u16 queuesize;
+ __u16 sgsize; /* # of entries in the SG list*/
+ __u16 devflags; /* bits set for detected devices */
+ __u8 hostid; /* SCSI ID of HBA */
+ __u8 moresupport; /* HBA supports MORE flag */
+ struct Scsi_Host *next;
+ struct Scsi_Host *prev;
+ struct eata_sp sp; /* status packet */
+ struct eata_ccb ccb[0]; /* ccb array begins here */
+}hostdata;
+
+/* structure for max. 2 emulated drives */
+struct drive_geom_emul {
+ __u8 trans; /* translation flag 1=transl */
+ __u8 channel; /* SCSI channel number */
+ __u8 HBA; /* HBA number (prim/sec) */
+ __u8 id; /* drive id */
+ __u8 lun; /* drive lun */
+ __u32 heads; /* number of heads */
+ __u32 sectors; /* number of sectors */
+ __u32 cylinder; /* number of cylinders */
+};
+
+struct geom_emul {
+ __u8 bios_drives; /* number of emulated drives */
+ struct drive_geom_emul drv[2]; /* drive structures */
+};
+
+#endif /* _EATA_GENERIC_H */
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/eata_pio.c b/linux/src/drivers/scsi/eata_pio.c
new file mode 100644
index 0000000..469b720
--- /dev/null
+++ b/linux/src/drivers/scsi/eata_pio.c
@@ -0,0 +1,1042 @@
+/************************************************************
+ * *
+ * Linux EATA SCSI PIO driver *
+ * *
+ * based on the CAM document CAM/89-004 rev. 2.0c, *
+ * DPT's driver kit, some internal documents and source, *
+ * and several other Linux scsi drivers and kernel docs. *
+ * *
+ * The driver currently: *
+ * -supports all EATA-PIO boards *
+ * -only supports DASD devices *
+ * *
+ * (c)1993-96 Michael Neuffer, Alfred Arnold *
+ * neuffer@goofy.zdv.uni-mainz.de *
+ * a.arnold@kfa-juelich.de *
+ * *
+ * This program is free software; you can redistribute it *
+ * and/or modify it under the terms of the GNU General *
+ * Public License as published by the Free Software *
+ * Foundation; either version 2 of the License, or *
+ * (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be *
+ * useful, but WITHOUT ANY WARRANTY; without even the *
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A *
+ * PARTICULAR PURPOSE. See the GNU General Public License *
+ * for more details. *
+ * *
+ * You should have received a copy of the GNU General *
+ * Public License along with this kernel; if not, write to *
+ * the Free Software Foundation, Inc., 675 Mass Ave, *
+ * Cambridge, MA 02139, USA. *
+ * *
+ ************************************************************
+ * last change: 96/07/16 OS: Linux 2.0.8 *
+ ************************************************************/
+
+/* Look in eata_pio.h for configuration information */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/malloc.h>
+#include <linux/in.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <asm/io.h>
+#include "eata_pio.h"
+#include "eata_dma_proc.h"
+#include "scsi.h"
+#include "sd.h"
+
+#include <linux/stat.h>
+#include <linux/config.h> /* for CONFIG_PCI */
+
+struct proc_dir_entry proc_scsi_eata_pio = {
+ PROC_SCSI_EATA_PIO, 9, "eata_pio",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+static uint ISAbases[MAXISA] =
+{0x1F0, 0x170, 0x330, 0x230};
+static uint ISAirqs[MAXISA] =
+{14,12,15,11};
+static unchar EISAbases[] =
+{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
+static uint registered_HBAs = 0;
+static struct Scsi_Host *last_HBA = NULL;
+static struct Scsi_Host *first_HBA = NULL;
+static unchar reg_IRQ[] =
+{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+static unchar reg_IRQL[] =
+{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+static ulong int_counter = 0;
+static ulong queue_counter = 0;
+
+#include "eata_pio_proc.c"
+
+#ifdef MODULE
+int eata_pio_release(struct Scsi_Host *sh)
+{
+ if (sh->irq && reg_IRQ[sh->irq] == 1) free_irq(sh->irq, NULL);
+ else reg_IRQ[sh->irq]--;
+ if (SD(sh)->channel == 0) {
+ if (sh->io_port && sh->n_io_port)
+ release_region(sh->io_port, sh->n_io_port);
+ }
+ return(TRUE);
+}
+#endif
+
+void IncStat(Scsi_Pointer *SCp, uint Increment)
+{
+ SCp->ptr+=Increment;
+ if ((SCp->this_residual-=Increment)==0)
+ {
+ if ((--SCp->buffers_residual)==0) SCp->Status=FALSE;
+ else
+ {
+ SCp->buffer++;
+ SCp->ptr=SCp->buffer->address;
+ SCp->this_residual=SCp->buffer->length;
+ }
+ }
+}
+
+void eata_pio_int_handler(int irq, void *dev_id, struct pt_regs * regs)
+{
+ uint eata_stat = 0xfffff;
+ Scsi_Cmnd *cmd;
+ hostdata *hd;
+ struct eata_ccb *cp;
+ uint base;
+ ulong flags;
+ uint x,z;
+ struct Scsi_Host *sh;
+ ushort zwickel=0;
+ unchar stat,odd;
+
+ save_flags(flags);
+ cli();
+
+ for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->prev) {
+ if (sh->irq != irq)
+ continue;
+ if (inb((uint)sh->base + HA_RSTATUS) & HA_SBUSY)
+ continue;
+
+ int_counter++;
+
+ hd=SD(sh);
+
+ cp = &hd->ccb[0];
+ cmd = cp->cmd;
+ base = (uint) cmd->host->base;
+
+ do
+ {
+ stat=inb(base+HA_RSTATUS);
+ if (stat&HA_SDRQ)
+ if (cp->DataIn)
+ {
+ z=256; odd=FALSE;
+ while ((cmd->SCp.Status)&&((z>0)||(odd)))
+ {
+ if (odd)
+ {
+ *(cmd->SCp.ptr)=zwickel>>8;
+ IncStat(&cmd->SCp,1);
+ odd=FALSE;
+ }
+ x=min(z,cmd->SCp.this_residual/2);
+ insw(base+HA_RDATA,cmd->SCp.ptr,x);
+ z-=x;
+ IncStat(&cmd->SCp,2*x);
+ if ((z>0)&&(cmd->SCp.this_residual==1))
+ {
+ zwickel=inw(base+HA_RDATA);
+ *(cmd->SCp.ptr)=zwickel&0xff;
+ IncStat(&cmd->SCp,1); z--;
+ odd=TRUE;
+ }
+ }
+ while (z>0) {
+ zwickel=inw(base+HA_RDATA);
+ z--;
+ }
+ }
+ else /* cp->DataOut */
+ {
+ odd=FALSE; z=256;
+ while ((cmd->SCp.Status)&&((z>0)||(odd)))
+ {
+ if (odd)
+ {
+ zwickel+=*(cmd->SCp.ptr)<<8;
+ IncStat(&cmd->SCp,1);
+ outw(zwickel,base+HA_RDATA);
+ z--;
+ odd=FALSE;
+ }
+ x=min(z,cmd->SCp.this_residual/2);
+ outsw(base+HA_RDATA,cmd->SCp.ptr,x);
+ z-=x;
+ IncStat(&cmd->SCp,2*x);
+ if ((z>0)&&(cmd->SCp.this_residual==1))
+ {
+ zwickel=*(cmd->SCp.ptr);
+ zwickel&=0xff;
+ IncStat(&cmd->SCp,1);
+ odd=TRUE;
+ }
+ }
+ while (z>0||odd) {
+ outw(zwickel,base+HA_RDATA);
+ z--;
+ odd=FALSE;
+ }
+ }
+ }
+ while ((stat&HA_SDRQ)||((stat&HA_SMORE)&&hd->moresupport));
+
+ /* terminate handler if HBA goes busy again, i.e. transfers
+ * more data */
+
+ if (stat&HA_SBUSY) break;
+
+ /* OK, this is quite stupid, but I haven't found any correct
+ * way to get HBA&SCSI status so far */
+
+ if (!(inb(base+HA_RSTATUS)&HA_SERROR))
+ {
+ cmd->result=(DID_OK<<16);
+ hd->devflags|=(1<<cp->cp_id);
+ }
+ else if (hd->devflags&1<<cp->cp_id)
+ cmd->result=(DID_OK<<16)+0x02;
+ else cmd->result=(DID_NO_CONNECT<<16);
+
+ if (cp->status == LOCKED) {
+ cp->status = FREE;
+ eata_stat = inb(base + HA_RSTATUS);
+ printk(KERN_NOTICE "eata_pio: int_handler, freeing locked "
+ "queueslot\n");
+ DBG(DBG_INTR&&DBG_DELAY,DELAY(1));
+ restore_flags(flags);
+ return;
+ }
+
+#if DBG_INTR2
+ if (stat != 0x50)
+ printk(KERN_DEBUG "stat: %#.2x, result: %#.8x\n", stat,
+ cmd->result);
+ DBG(DBG_INTR&&DBG_DELAY,DELAY(1));
+#endif
+
+ cp->status = FREE; /* now we can release the slot */
+
+ restore_flags(flags);
+ cmd->scsi_done(cmd);
+ save_flags(flags);
+ cli();
+ }
+ restore_flags(flags);
+
+ return;
+}
+
+inline uint eata_pio_send_command(uint base, unchar command)
+{
+ uint loop = HZ/2;
+
+ while (inb(base + HA_RSTATUS) & HA_SBUSY)
+ if (--loop == 0)
+ return(TRUE);
+
+ /* Enable interrupts for HBA. It is not the best way to do it at this
+ * place, but I hope that it doesn't interfere with the IDE driver
+ * initialization this way */
+
+ outb(HA_CTRL_8HEADS,base+HA_CTRLREG);
+
+ outb(command, base + HA_WCOMMAND);
+ return(FALSE);
+}
+
+int eata_pio_queue(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
+{
+ uint x, y;
+ long flags;
+ uint base;
+
+ hostdata *hd;
+ struct Scsi_Host *sh;
+ struct eata_ccb *cp;
+
+ save_flags(flags);
+ cli();
+
+ queue_counter++;
+
+ hd = HD(cmd);
+ sh = cmd->host;
+ base = (uint) sh->base;
+
+ /* use only slot 0, as 2001 can handle only one cmd at a time */
+
+ y = x = 0;
+
+ if (hd->ccb[y].status!=FREE) {
+
+ DBG(DBG_QUEUE, printk(KERN_EMERG "can_queue %d, x %d, y %d\n",
+ sh->can_queue,x,y));
+#if DEBUG_EATA
+ panic(KERN_EMERG "eata_pio: run out of queue slots cmdno:%ld "
+ "intrno: %ld\n", queue_counter, int_counter);
+#else
+ panic(KERN_EMERG "eata_pio: run out of queue slots....\n");
+#endif
+ }
+
+ cp = &hd->ccb[y];
+
+ memset(cp, 0, sizeof(struct eata_ccb));
+ memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
+
+ cp->status = USED; /* claim free slot */
+
+ DBG(DBG_QUEUE, printk(KERN_DEBUG "eata_pio_queue pid %ld, target: %x, lun:"
+ " %x, y %d\n", cmd->pid, cmd->target, cmd->lun, y));
+ DBG(DBG_QUEUE && DBG_DELAY, DELAY(1));
+
+ cmd->scsi_done = (void *)done;
+
+ switch (cmd->cmnd[0]) {
+ case CHANGE_DEFINITION: case COMPARE: case COPY:
+ case COPY_VERIFY: case LOG_SELECT: case MODE_SELECT:
+ case MODE_SELECT_10: case SEND_DIAGNOSTIC: case WRITE_BUFFER:
+ case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE:
+ case SEARCH_EQUAL: case SEARCH_HIGH: case SEARCH_LOW:
+ case WRITE_6: case WRITE_10: case WRITE_VERIFY:
+ case UPDATE_BLOCK: case WRITE_LONG: case WRITE_SAME:
+ case SEARCH_HIGH_12: case SEARCH_EQUAL_12: case SEARCH_LOW_12:
+ case WRITE_12: case WRITE_VERIFY_12: case SET_WINDOW:
+ case MEDIUM_SCAN: case SEND_VOLUME_TAG:
+ case 0xea: /* alternate number for WRITE LONG */
+ cp->DataOut = TRUE; /* Output mode */
+ break;
+ case TEST_UNIT_READY:
+ default:
+ cp->DataIn = TRUE; /* Input mode */
+ }
+
+ cp->Interpret = (cmd->target == hd->hostid);
+ cp->cp_datalen = htonl((ulong)cmd->request_bufflen);
+ cp->Auto_Req_Sen = FALSE;
+ cp->cp_reqDMA = htonl(0);
+ cp->reqlen = 0;
+
+ cp->cp_id = cmd->target;
+ cp->cp_lun = cmd->lun;
+ cp->cp_dispri = FALSE;
+ cp->cp_identify = TRUE;
+ memcpy(cp->cp_cdb, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd));
+
+ cp->cp_statDMA = htonl(0);
+
+ cp->cp_viraddr = cp;
+ cp->cmd = cmd;
+ cmd->host_scribble = (char *)&hd->ccb[y];
+
+ if (cmd->use_sg == 0)
+ {
+ cmd->SCp.buffers_residual=1;
+ cmd->SCp.ptr = cmd->request_buffer;
+ cmd->SCp.this_residual = cmd->request_bufflen;
+ cmd->SCp.buffer = NULL;
+ } else {
+ cmd->SCp.buffer = cmd->request_buffer;
+ cmd->SCp.buffers_residual = cmd->use_sg;
+ cmd->SCp.ptr = cmd->SCp.buffer->address;
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ }
+ cmd->SCp.Status = (cmd->SCp.this_residual != 0); /* TRUE as long as bytes
+ * are to transfer */
+
+ if (eata_pio_send_command(base, EATA_CMD_PIO_SEND_CP))
+ {
+ cmd->result = DID_BUS_BUSY << 16;
+ printk(KERN_NOTICE "eata_pio_queue target %d, pid %ld, HBA busy, "
+ "returning DID_BUS_BUSY, done.\n", cmd->target, cmd->pid);
+ done(cmd);
+ cp->status = FREE;
+ restore_flags(flags);
+ return (0);
+ }
+ while (!(inb(base + HA_RSTATUS) & HA_SDRQ));
+ outsw(base + HA_RDATA, cp, hd->cplen);
+ outb(EATA_CMD_PIO_TRUNC, base + HA_WCOMMAND);
+ for (x = 0; x < hd->cppadlen; x++) outw(0, base + HA_RDATA);
+
+ DBG(DBG_QUEUE,printk(KERN_DEBUG "Queued base %#.4lx pid: %ld target: %x "
+ "lun: %x slot %d irq %d\n", (long)sh->base, cmd->pid,
+ cmd->target, cmd->lun, y, sh->irq));
+ DBG(DBG_QUEUE && DBG_DELAY, DELAY(1));
+
+ restore_flags(flags);
+ return (0);
+}
+
+int eata_pio_abort(Scsi_Cmnd * cmd)
+{
+ ulong flags;
+ uint loop = HZ;
+
+ save_flags(flags);
+ cli();
+
+ DBG(DBG_ABNORM, printk(KERN_WARNING "eata_pio_abort called pid: %ld "
+ "target: %x lun: %x reason %x\n", cmd->pid,
+ cmd->target, cmd->lun, cmd->abort_reason));
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+
+
+ while (inb((uint)(cmd->host->base) + HA_RAUXSTAT) & HA_ABUSY)
+ if (--loop == 0) {
+ printk(KERN_WARNING "eata_pio: abort, timeout error.\n");
+ restore_flags(flags);
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ return (SCSI_ABORT_ERROR);
+ }
+ if (CD(cmd)->status == FREE) {
+ DBG(DBG_ABNORM, printk(KERN_WARNING "Returning: SCSI_ABORT_NOT_RUNNING\n"));
+ restore_flags(flags);
+ return (SCSI_ABORT_NOT_RUNNING);
+ }
+ if (CD(cmd)->status == USED) {
+ DBG(DBG_ABNORM, printk(KERN_WARNING "Returning: SCSI_ABORT_BUSY\n"));
+ restore_flags(flags);
+ return (SCSI_ABORT_BUSY); /* SNOOZE */
+ }
+ if (CD(cmd)->status == RESET) {
+ restore_flags(flags);
+ printk(KERN_WARNING "eata_pio: abort, command reset error.\n");
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ return (SCSI_ABORT_ERROR);
+ }
+ if (CD(cmd)->status == LOCKED) {
+ restore_flags(flags);
+ DBG(DBG_ABNORM, printk(KERN_WARNING "eata_pio: abort, queue slot "
+ "locked.\n"));
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ return (SCSI_ABORT_NOT_RUNNING);
+ }
+ restore_flags(flags);
+ panic("eata_pio: abort: invalid slot status\n");
+}
+
+int eata_pio_reset(Scsi_Cmnd * cmd, unsigned int dummy)
+{
+ uint x, time, limit = 0;
+ ulong flags;
+ unchar success = FALSE;
+ Scsi_Cmnd *sp;
+
+ save_flags(flags);
+ cli();
+ DBG(DBG_ABNORM, printk(KERN_WARNING "eata_pio_reset called pid:%ld target:"
+ " %x lun: %x reason %x\n", cmd->pid, cmd->target,
+ cmd->lun, cmd->abort_reason));
+
+ if (HD(cmd)->state == RESET) {
+ printk(KERN_WARNING "eata_pio_reset: exit, already in reset.\n");
+ restore_flags(flags);
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ return (SCSI_RESET_ERROR);
+ }
+
+ /* force all slots to be free */
+
+ for (x = 0; x < cmd->host->can_queue; x++) {
+
+ if (HD(cmd)->ccb[x].status == FREE)
+ continue;
+
+ sp = HD(cmd)->ccb[x].cmd;
+ HD(cmd)->ccb[x].status = RESET;
+ printk(KERN_WARNING "eata_pio_reset: slot %d in reset, pid %ld.\n", x,
+ sp->pid);
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+
+ if (sp == NULL)
+ panic("eata_pio_reset: slot %d, sp==NULL.\n", x);
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ }
+
+ /* hard reset the HBA */
+ outb(EATA_CMD_RESET, (uint) cmd->host->base+HA_WCOMMAND);
+
+ DBG(DBG_ABNORM, printk(KERN_WARNING "eata_pio_reset: board reset done.\n"));
+ HD(cmd)->state = RESET;
+
+ time = jiffies;
+ while (jiffies < (time + (3 * HZ)) && limit++ < 10000000);
+
+ DBG(DBG_ABNORM, printk(KERN_WARNING "eata_pio_reset: interrupts disabled, "
+ "loops %d.\n", limit));
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+
+ for (x = 0; x < cmd->host->can_queue; x++) {
+
+ /* Skip slots already set free by interrupt */
+ if (HD(cmd)->ccb[x].status != RESET)
+ continue;
+
+ sp = HD(cmd)->ccb[x].cmd;
+ sp->result = DID_RESET << 16;
+
+ /* This mailbox is terminated */
+ printk(KERN_WARNING "eata_pio_reset: reset ccb %d.\n",x);
+ HD(cmd)->ccb[x].status = FREE;
+
+ restore_flags(flags);
+ sp->scsi_done(sp);
+ cli();
+ }
+
+ HD(cmd)->state = FALSE;
+ restore_flags(flags);
+
+ if (success) { /* hmmm... */
+ DBG(DBG_ABNORM, printk(KERN_WARNING "eata_pio_reset: exit, success.\n"));
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ return (SCSI_RESET_SUCCESS);
+ } else {
+ DBG(DBG_ABNORM, printk(KERN_WARNING "eata_pio_reset: exit, wakeup.\n"));
+ DBG(DBG_ABNORM && DBG_DELAY, DELAY(1));
+ return (SCSI_RESET_PUNT);
+ }
+}
+
+char * get_pio_board_data(ulong base, uint irq, uint id, ulong cplen, ushort cppadlen)
+{
+ struct eata_ccb cp;
+ static char buff[256];
+ int z;
+
+ memset(&cp, 0, sizeof(struct eata_ccb));
+ memset(buff, 0, sizeof(buff));
+
+ cp.DataIn = TRUE;
+ cp.Interpret = TRUE; /* Interpret command */
+
+ cp.cp_datalen = htonl(254);
+ cp.cp_dataDMA = htonl(0);
+
+ cp.cp_id = id;
+ cp.cp_lun = 0;
+
+ cp.cp_cdb[0] = INQUIRY;
+ cp.cp_cdb[1] = 0;
+ cp.cp_cdb[2] = 0;
+ cp.cp_cdb[3] = 0;
+ cp.cp_cdb[4] = 254;
+ cp.cp_cdb[5] = 0;
+
+ if (eata_pio_send_command((uint) base, EATA_CMD_PIO_SEND_CP))
+ return (NULL);
+ while (!(inb(base + HA_RSTATUS) & HA_SDRQ));
+ outsw(base + HA_RDATA, &cp, cplen);
+ outb(EATA_CMD_PIO_TRUNC, base + HA_WCOMMAND);
+ for (z = 0; z < cppadlen; z++) outw(0, base + HA_RDATA);
+
+ while (inb(base + HA_RSTATUS) & HA_SBUSY);
+ if (inb(base + HA_RSTATUS) & HA_SERROR)
+ return (NULL);
+ else if (!(inb(base + HA_RSTATUS) & HA_SDRQ))
+ return (NULL);
+ else
+ {
+ insw(base+HA_RDATA, &buff, 127);
+ while (inb(base + HA_RSTATUS)&HA_SDRQ) inw(base + HA_RDATA);
+ return (buff);
+ }
+}
+
+int get_pio_conf_PIO(u32 base, struct get_conf *buf)
+{
+ ulong loop = HZ/2;
+ int z;
+ ushort *p;
+
+ if(check_region(base, 9))
+ return (FALSE);
+
+ memset(buf, 0, sizeof(struct get_conf));
+
+ while (inb(base + HA_RSTATUS) & HA_SBUSY)
+ if (--loop == 0)
+ return (FALSE);
+
+ DBG(DBG_PIO && DBG_PROBE,
+ printk(KERN_DEBUG "Issuing PIO READ CONFIG to HBA at %#x\n", base));
+ eata_pio_send_command(base, EATA_CMD_PIO_READ_CONFIG);
+
+ loop = HZ/2;
+ for (p = (ushort *) buf;
+ (long)p <= ((long)buf + (sizeof(struct get_conf) / 2)); p++) {
+ while (!(inb(base + HA_RSTATUS) & HA_SDRQ))
+ if (--loop == 0)
+ return (FALSE);
+
+ loop = HZ/2;
+ *p = inw(base + HA_RDATA);
+ }
+ if (!(inb(base + HA_RSTATUS) & HA_SERROR)) { /* Error ? */
+ if (htonl(EATA_SIGNATURE) == buf->signature) {
+ DBG(DBG_PIO&&DBG_PROBE, printk(KERN_NOTICE "EATA Controller found "
+ "at %#4x EATA Level: %x\n", base,
+ (uint) (buf->version)));
+
+ while (inb(base + HA_RSTATUS) & HA_SDRQ)
+ inw(base + HA_RDATA);
+ if(ALLOW_DMA_BOARDS == FALSE) {
+ for (z = 0; z < MAXISA; z++)
+ if (base == ISAbases[z]) {
+ buf->IRQ = ISAirqs[z];
+ break;
+ }
+ }
+ return (TRUE);
+ }
+ } else {
+ DBG(DBG_PROBE, printk("eata_dma: get_conf_PIO, error during transfer "
+ "for HBA at %x\n", base));
+ }
+ return (FALSE);
+}
+
+void print_pio_config(struct get_conf *gc)
+{
+ printk("Please check values: (read config data)\n");
+ printk("LEN: %d ver:%d OCS:%d TAR:%d TRNXFR:%d MORES:%d\n",
+ (uint) ntohl(gc->len), gc->version,
+ gc->OCS_enabled, gc->TAR_support, gc->TRNXFR, gc->MORE_support);
+ printk("HAAV:%d SCSIID0:%d ID1:%d ID2:%d QUEUE:%d SG:%d SEC:%d\n",
+ gc->HAA_valid, gc->scsi_id[3], gc->scsi_id[2],
+ gc->scsi_id[1], ntohs(gc->queuesiz), ntohs(gc->SGsiz), gc->SECOND);
+ printk("IRQ:%d IRQT:%d FORCADR:%d MCH:%d RIDQ:%d\n",
+ gc->IRQ, gc->IRQ_TR, gc->FORCADR,
+ gc->MAX_CHAN, gc->ID_qest);
+ DBG(DPT_DEBUG, DELAY(14));
+}
+
+static uint print_selftest(uint base)
+{
+ unchar buffer[512];
+#ifdef VERBOSE_SETUP
+ int z;
+#endif
+
+ printk("eata_pio: executing controller self test & setup...\n");
+ while (inb(base + HA_RSTATUS) & HA_SBUSY);
+ outb(EATA_CMD_PIO_SETUPTEST, base + HA_WCOMMAND);
+ do {
+ while (inb(base + HA_RSTATUS) & HA_SBUSY)
+ /* nothing */ ;
+ if (inb(base + HA_RSTATUS) & HA_SDRQ)
+ {
+ insw(base + HA_RDATA, &buffer, 256);
+#ifdef VERBOSE_SETUP
+ /* no beeps please... */
+ for (z = 0; z < 511 && buffer[z]; z++)
+ if (buffer[z] != 7) printk("%c", buffer[z]);
+#endif
+ }
+ } while (inb(base+HA_RSTATUS) & (HA_SBUSY|HA_SDRQ));
+
+ return (!(inb(base+HA_RSTATUS) & HA_SERROR));
+}
+
+int register_pio_HBA(long base, struct get_conf *gc, Scsi_Host_Template * tpnt)
+{
+ ulong size = 0;
+ char *buff;
+ ulong cplen;
+ ushort cppadlen;
+ struct Scsi_Host *sh;
+ hostdata *hd;
+
+ DBG(DBG_REGISTER, print_pio_config(gc));
+
+ if (gc->DMA_support == TRUE) {
+ printk("HBA at %#.4lx supports DMA. Please use EATA-DMA driver.\n",base);
+ if(ALLOW_DMA_BOARDS == FALSE)
+ return (FALSE);
+ }
+
+ if ((buff = get_pio_board_data((uint)base, gc->IRQ, gc->scsi_id[3],
+ cplen =(htonl(gc->cplen )+1)/2,
+ cppadlen=(htons(gc->cppadlen)+1)/2)) == NULL)
+ {
+ printk("HBA at %#lx didn't react on INQUIRY. Sorry.\n", (ulong) base);
+ return (FALSE);
+ }
+
+ if (print_selftest(base) == FALSE && ALLOW_DMA_BOARDS == FALSE)
+ {
+ printk("HBA at %#lx failed while performing self test & setup.\n",
+ (ulong) base);
+ return (FALSE);
+ }
+
+ if (!reg_IRQ[gc->IRQ]) { /* Interrupt already registered ? */
+ if (!request_irq(gc->IRQ, eata_pio_int_handler, SA_INTERRUPT,
+ "EATA-PIO", NULL)){
+ reg_IRQ[gc->IRQ]++;
+ if (!gc->IRQ_TR)
+ reg_IRQL[gc->IRQ] = TRUE; /* IRQ is edge triggered */
+ } else {
+ printk("Couldn't allocate IRQ %d, Sorry.\n", gc->IRQ);
+ return (FALSE);
+ }
+ } else { /* More than one HBA on this IRQ */
+ if (reg_IRQL[gc->IRQ] == TRUE) {
+ printk("Can't support more than one HBA on this IRQ,\n"
+ " if the IRQ is edge triggered. Sorry.\n");
+ return (FALSE);
+ } else
+ reg_IRQ[gc->IRQ]++;
+ }
+
+ request_region(base, 8, "eata_pio");
+
+ size = sizeof(hostdata) + (sizeof(struct eata_ccb) * ntohs(gc->queuesiz));
+
+ sh = scsi_register(tpnt, size);
+ hd = SD(sh);
+
+ memset(hd->ccb, 0, (sizeof(struct eata_ccb) * ntohs(gc->queuesiz)));
+ memset(hd->reads, 0, sizeof(ulong) * 26);
+
+ strncpy(SD(sh)->vendor, &buff[8], 8);
+ SD(sh)->vendor[8] = 0;
+ strncpy(SD(sh)->name, &buff[16], 17);
+ SD(sh)->name[17] = 0;
+ SD(sh)->revision[0] = buff[32];
+ SD(sh)->revision[1] = buff[33];
+ SD(sh)->revision[2] = buff[34];
+ SD(sh)->revision[3] = '.';
+ SD(sh)->revision[4] = buff[35];
+ SD(sh)->revision[5] = 0;
+
+ switch (ntohl(gc->len)) {
+ case 0x1c:
+ SD(sh)->EATA_revision = 'a';
+ break;
+ case 0x1e:
+ SD(sh)->EATA_revision = 'b';
+ break;
+ case 0x22:
+ SD(sh)->EATA_revision = 'c';
+ break;
+ case 0x24:
+ SD(sh)->EATA_revision = 'z';
+ default:
+ SD(sh)->EATA_revision = '?';
+ }
+
+ if(ntohl(gc->len) >= 0x22) {
+ if (gc->is_PCI == TRUE)
+ hd->bustype = IS_PCI;
+ else if (gc->is_EISA == TRUE)
+ hd->bustype = IS_EISA;
+ else
+ hd->bustype = IS_ISA;
+ } else {
+ if (buff[21] == '4')
+ hd->bustype = IS_PCI;
+ else if (buff[21] == '2')
+ hd->bustype = IS_EISA;
+ else
+ hd->bustype = IS_ISA;
+ }
+
+ SD(sh)->cplen=cplen;
+ SD(sh)->cppadlen=cppadlen;
+ SD(sh)->hostid=gc->scsi_id[3];
+ SD(sh)->devflags=1<<gc->scsi_id[3];
+ SD(sh)->moresupport=gc->MORE_support;
+ sh->unique_id = base;
+ sh->base = (char *) base;
+ sh->io_port = base;
+ sh->n_io_port = 8;
+ sh->irq = gc->IRQ;
+ sh->dma_channel = PIO;
+ sh->this_id = gc->scsi_id[3];
+ sh->can_queue = 1;
+ sh->cmd_per_lun = 1;
+ sh->sg_tablesize = SG_ALL;
+
+ hd->channel = 0;
+
+ sh->max_id = 8;
+ sh->max_lun = 8;
+
+ if (gc->SECOND)
+ hd->primary = FALSE;
+ else
+ hd->primary = TRUE;
+
+ sh->unchecked_isa_dma = FALSE; /* We can only do PIO */
+
+ hd->next = NULL; /* build a linked list of all HBAs */
+ hd->prev = last_HBA;
+ if(hd->prev != NULL)
+ SD(hd->prev)->next = sh;
+ last_HBA = sh;
+ if (first_HBA == NULL)
+ first_HBA = sh;
+ registered_HBAs++;
+ return (1);
+}
+
+void find_pio_ISA(struct get_conf *buf, Scsi_Host_Template * tpnt)
+{
+ int i;
+
+ for (i = 0; i < MAXISA; i++) {
+ if (ISAbases[i]) {
+ if (get_pio_conf_PIO(ISAbases[i], buf) == TRUE){
+ register_pio_HBA(ISAbases[i], buf, tpnt);
+ }
+ ISAbases[i] = 0;
+ }
+ }
+ return;
+}
+
+void find_pio_EISA(struct get_conf *buf, Scsi_Host_Template * tpnt)
+{
+ u32 base;
+ int i;
+
+#if CHECKPAL
+ u8 pal1, pal2, pal3;
+#endif
+
+ for (i = 0; i < MAXEISA; i++) {
+ if (EISAbases[i] == TRUE) { /* Still a possibility ? */
+
+ base = 0x1c88 + (i * 0x1000);
+#if CHECKPAL
+ pal1 = inb((u16)base - 8);
+ pal2 = inb((u16)base - 7);
+ pal3 = inb((u16)base - 6);
+
+ if (((pal1 == 0x12) && (pal2 == 0x14)) ||
+ ((pal1 == 0x38) && (pal2 == 0xa3) && (pal3 == 0x82)) ||
+ ((pal1 == 0x06) && (pal2 == 0x94) && (pal3 == 0x24))) {
+ DBG(DBG_PROBE, printk(KERN_NOTICE "EISA EATA id tags found: "
+ "%x %x %x \n",
+ (int)pal1, (int)pal2, (int)pal3));
+#endif
+ if (get_pio_conf_PIO(base, buf) == TRUE) {
+ DBG(DBG_PROBE && DBG_EISA, print_pio_config(buf));
+ if (buf->IRQ) {
+ register_pio_HBA(base, buf, tpnt);
+ } else
+ printk(KERN_NOTICE "eata_dma: No valid IRQ. HBA "
+ "removed from list\n");
+ }
+ /* Nothing found here so we take it from the list */
+ EISAbases[i] = 0;
+#if CHECKPAL
+ }
+#endif
+ }
+ }
+ return;
+}
+
+void find_pio_PCI(struct get_conf *buf, Scsi_Host_Template * tpnt)
+{
+
+#ifndef CONFIG_PCI
+ printk(KERN_ERR "eata_pio: kernel PCI support not enabled. Skipping scan "
+ "for PCI HBAs.\n");
+#else
+
+ u8 pci_bus, pci_device_fn;
+ static s16 pci_index = 0; /* Device index to PCI BIOS calls */
+ u32 base = 0;
+ u16 com_adr;
+ u16 rev_device;
+ u32 error, i, x;
+
+ if (pcibios_present()) {
+ for (i = 0; i <= MAXPCI; ++i, ++pci_index) {
+ if (pcibios_find_device(PCI_VENDOR_ID_DPT, PCI_DEVICE_ID_DPT,
+ pci_index, &pci_bus, &pci_device_fn))
+ break;
+ DBG(DBG_PROBE && DBG_PCI,
+ printk("eata_pio: HBA at bus %d, device %d,"
+ " function %d, index %d\n", (s32)pci_bus,
+ (s32)((pci_device_fn & 0xf8) >> 3),
+ (s32)(pci_device_fn & 7), pci_index));
+
+ if (!(error = pcibios_read_config_word(pci_bus, pci_device_fn,
+ PCI_CLASS_DEVICE, &rev_device))) {
+ if (rev_device == PCI_CLASS_STORAGE_SCSI) {
+ if (!(error = pcibios_read_config_word(pci_bus,
+ pci_device_fn, PCI_COMMAND,
+ (u16 *) & com_adr))) {
+ if (!((com_adr & PCI_COMMAND_IO) &&
+ (com_adr & PCI_COMMAND_MASTER))) {
+ printk("HBA has IO or BUSMASTER mode disabled\n");
+ continue;
+ }
+ } else
+ printk("eata_pio: error %x while reading "
+ "PCI_COMMAND\n", error);
+ } else
+ printk("DEVICECLASSID %x didn't match\n", rev_device);
+ } else {
+ printk("eata_pio: error %x while reading PCI_CLASS_BASE\n",
+ error);
+ continue;
+ }
+
+ if (!(error = pcibios_read_config_dword(pci_bus, pci_device_fn,
+ PCI_BASE_ADDRESS_0, (int *) &base))){
+
+ /* Check if the address is valid */
+ if (base & 0x01) {
+ base &= 0xfffffffe;
+ /* EISA tag there ? */
+ if ((inb(base) == 0x12) && (inb(base + 1) == 0x14))
+ continue; /* Jep, it's forced, so move on */
+ base += 0x10; /* Now, THIS is the real address */
+ if (base != 0x1f8) {
+ /* We didn't find it in the primary search */
+ if (get_pio_conf_PIO(base, buf) == TRUE) {
+ if (buf->FORCADR) /* If the address is forced */
+ continue; /* we'll find it later */
+
+ /* OK. We made it till here, so we can go now
+ * and register it. We only have to check and
+ * eventually remove it from the EISA and ISA list
+ */
+
+ register_pio_HBA(base, buf, tpnt);
+
+ if (base < 0x1000) {
+ for (x = 0; x < MAXISA; ++x) {
+ if (ISAbases[x] == base) {
+ ISAbases[x] = 0;
+ break;
+ }
+ }
+ } else if ((base & 0x0fff) == 0x0c88) {
+ x = (base >> 12) & 0x0f;
+ EISAbases[x] = 0;
+ }
+ continue; /* break; */
+ }
+ }
+ }
+ } else
+ printk("eata_pio: error %x while reading "
+ "PCI_BASE_ADDRESS_0\n", error);
+ }
+ } else
+ printk("eata_pio: No BIOS32 extensions present. This driver release "
+ "still depends on it.\n"
+ " Skipping scan for PCI HBAs.\n");
+#endif /* #ifndef CONFIG_PCI */
+ return;
+}
+
+
+int eata_pio_detect(Scsi_Host_Template * tpnt)
+{
+ struct Scsi_Host *HBA_ptr;
+ struct get_conf gc;
+ int i;
+
+ DBG((DBG_PROBE && DBG_DELAY) || DPT_DEBUG,
+ printk("Using lots of delays to let you read the debugging output\n"));
+
+ tpnt->proc_dir = &proc_scsi_eata_pio;
+
+ find_pio_PCI(&gc, tpnt);
+
+ find_pio_EISA(&gc, tpnt);
+
+ find_pio_ISA(&gc, tpnt);
+
+ for (i = 0; i < MAXIRQ; i++)
+ if (reg_IRQ[i])
+ request_irq(i, eata_pio_int_handler, SA_INTERRUPT, "EATA-PIO", NULL);
+
+ HBA_ptr = first_HBA;
+
+ if (registered_HBAs != 0) {
+ printk("EATA (Extended Attachment) PIO driver version: %d.%d%s\n"
+ "(c) 1993-95 Michael Neuffer, neuffer@goofy.zdv.uni-mainz.de\n"
+ " Alfred Arnold, a.arnold@kfa-juelich.de\n"
+ "This release only supports DASD devices (harddisks)\n",
+ VER_MAJOR, VER_MINOR, VER_SUB);
+
+ printk("Registered HBAs:\n");
+ printk("HBA no. Boardtype: Revis: EATA: Bus: BaseIO: IRQ: Ch: ID: Pr:"
+ " QS: SG: CPL:\n");
+ for (i = 1; i <= registered_HBAs; i++) {
+ printk("scsi%-2d: %.10s v%s 2.0%c %s %#.4x %2d %d %d %c"
+ " %2d %2d %2d\n",
+ HBA_ptr->host_no, SD(HBA_ptr)->name, SD(HBA_ptr)->revision,
+ SD(HBA_ptr)->EATA_revision, (SD(HBA_ptr)->bustype == 'P')?
+ "PCI ":(SD(HBA_ptr)->bustype == 'E')?"EISA":"ISA ",
+ (uint) HBA_ptr->base, HBA_ptr->irq, SD(HBA_ptr)->channel,
+ HBA_ptr->this_id, (SD(HBA_ptr)->primary == TRUE)?'Y':'N',
+ HBA_ptr->can_queue, HBA_ptr->sg_tablesize,
+ HBA_ptr->cmd_per_lun);
+ HBA_ptr = SD(HBA_ptr)->next;
+ }
+ }
+ DBG(DPT_DEBUG,DELAY(12));
+
+ return (registered_HBAs);
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = EATA_PIO;
+
+#include "scsi_module.c"
+#endif
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/eata_pio.h b/linux/src/drivers/scsi/eata_pio.h
new file mode 100644
index 0000000..333f7c4
--- /dev/null
+++ b/linux/src/drivers/scsi/eata_pio.h
@@ -0,0 +1,116 @@
+/********************************************************
+* Header file for eata_pio.c Linux EATA-PIO SCSI driver *
+* (c) 1993-96 Michael Neuffer *
+*********************************************************
+* last change: 96/05/05 *
+********************************************************/
+
+
+#ifndef _EATA_PIO_H
+#define _EATA_PIO_H
+
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include <scsi/scsicam.h>
+
+#ifndef HOSTS_C
+#include "eata_generic.h"
+
+#define VER_MAJOR 0
+#define VER_MINOR 0
+#define VER_SUB "1b"
+
+/************************************************************************
+ * Here you can switch parts of the code on and of *
+ ************************************************************************/
+
+#define VERBOSE_SETUP /* show startup screen of 2001 */
+#define ALLOW_DMA_BOARDS 1
+
+/************************************************************************
+ * Debug options. *
+ * Enable DEBUG and whichever options you require. *
+ ************************************************************************/
+#define DEBUG_EATA 1 /* Enable debug code. */
+#define DPT_DEBUG 0 /* Bobs special */
+#define DBG_DELAY 0 /* Build in delays so debug messages can be
+ * be read before they vanish of the top of
+ * the screen!
+ */
+#define DBG_PROBE 0 /* Debug probe routines. */
+#define DBG_ISA 0 /* Trace ISA routines */
+#define DBG_EISA 0 /* Trace EISA routines */
+#define DBG_PCI 0 /* Trace PCI routines */
+#define DBG_PIO 0 /* Trace get_config_PIO */
+#define DBG_COM 0 /* Trace command call */
+#define DBG_QUEUE 0 /* Trace command queueing. */
+#define DBG_INTR 0 /* Trace interrupt service routine. */
+#define DBG_INTR2 0 /* Trace interrupt service routine. */
+#define DBG_PROC 0 /* Debug proc-fs related statistics */
+#define DBG_PROC_WRITE 0
+#define DBG_REGISTER 0 /* */
+#define DBG_ABNORM 1 /* Debug abnormal actions (reset, abort) */
+
+#if DEBUG_EATA
+#define DBG(x, y) if ((x)) {y;}
+#else
+#define DBG(x, y)
+#endif
+
+#endif /* !HOSTS_C */
+
+int eata_pio_detect(Scsi_Host_Template *);
+const char *eata_pio_info(struct Scsi_Host *);
+int eata_pio_command(Scsi_Cmnd *);
+int eata_pio_queue(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int eata_pio_abort(Scsi_Cmnd *);
+int eata_pio_reset(Scsi_Cmnd *, unsigned int);
+int eata_pio_proc_info(char *, char **, off_t, int, int, int);
+#ifdef MODULE
+int eata_pio_release(struct Scsi_Host *);
+#else
+#define eata_pio_release NULL
+#endif
+
+
+#define EATA_PIO { \
+ NULL, NULL, \
+ NULL, /* proc_dir_entry */ \
+ eata_pio_proc_info, /* procinfo */ \
+ "EATA (Extended Attachment) PIO driver", \
+ eata_pio_detect, \
+ eata_pio_release, \
+ NULL, NULL, \
+ eata_pio_queue, \
+ eata_pio_abort, \
+ eata_pio_reset, \
+ NULL, /* Slave attach */ \
+ scsicam_bios_param, \
+ 0, /* Canqueue */ \
+ 0, /* this_id */ \
+ 0, /* sg_tablesize */ \
+ 0, /* cmd_per_lun */ \
+ 0, /* present */ \
+ 1, /* True if ISA */ \
+ ENABLE_CLUSTERING }
+
+#endif /* _EATA_PIO_H */
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/eata_pio_proc.c b/linux/src/drivers/scsi/eata_pio_proc.c
new file mode 100644
index 0000000..54783f2
--- /dev/null
+++ b/linux/src/drivers/scsi/eata_pio_proc.c
@@ -0,0 +1,135 @@
+
+/*
+ * eata_set_info
+ * buffer : pointer to the data that has been written to the hostfile
+ * length : number of bytes written to the hostfile
+ * HBA_ptr: pointer to the Scsi_Host struct
+ */
+int eata_pio_set_info(char *buffer, int length, struct Scsi_Host *HBA_ptr)
+{
+ DBG(DBG_PROC_WRITE, printk("%s\n", buffer));
+ return(-ENOSYS); /* Currently this is a no-op */
+}
+
+/*
+ * eata_proc_info
+ * inout : decides on the direction of the dataflow and the meaning of the
+ * variables
+ * buffer: If inout==FALSE data is being written to it else read from it
+ * *start: If inout==FALSE start of the valid data in the buffer
+ * offset: If inout==FALSE offset from the beginning of the imaginary file
+ * from which we start writing into the buffer
+ * length: If inout==FALSE max number of bytes to be written into the buffer
+ * else number of bytes in the buffer
+ */
+int eata_pio_proc_info(char *buffer, char **start, off_t offset, int length,
+ int hostno, int inout)
+{
+
+ Scsi_Device *scd;
+ struct Scsi_Host *HBA_ptr;
+ static u8 buff[512];
+ int i;
+ int size, len = 0;
+ off_t begin = 0;
+ off_t pos = 0;
+
+ HBA_ptr = first_HBA;
+ for (i = 1; i <= registered_HBAs; i++) {
+ if (HBA_ptr->host_no == hostno)
+ break;
+ HBA_ptr = SD(HBA_ptr)->next;
+ }
+
+ if(inout == TRUE) /* Has data been written to the file ? */
+ return(eata_pio_set_info(buffer, length, HBA_ptr));
+
+ if (offset == 0)
+ memset(buff, 0, sizeof(buff));
+
+ size = sprintf(buffer+len, "EATA (Extended Attachment) PIO driver version: "
+ "%d.%d%s\n",VER_MAJOR, VER_MINOR, VER_SUB);
+ len += size; pos = begin + len;
+ size = sprintf(buffer + len, "queued commands: %10ld\n"
+ "processed interrupts:%10ld\n", queue_counter, int_counter);
+ len += size; pos = begin + len;
+
+ size = sprintf(buffer + len, "\nscsi%-2d: HBA %.10s\n",
+ HBA_ptr->host_no, SD(HBA_ptr)->name);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "Firmware revision: v%s\n",
+ SD(HBA_ptr)->revision);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "IO: PIO\n");
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "Base IO : %#.4x\n", (u32) HBA_ptr->base);
+ len += size;
+ pos = begin + len;
+ size = sprintf(buffer + len, "Host Bus: %s\n",
+ (SD(HBA_ptr)->bustype == 'P')?"PCI ":
+ (SD(HBA_ptr)->bustype == 'E')?"EISA":"ISA ");
+
+ len += size;
+ pos = begin + len;
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+ if (pos > offset + length)
+ goto stop_output;
+
+ scd = scsi_devices;
+
+ size = sprintf(buffer+len,"Attached devices: %s\n", (scd)?"":"none");
+ len += size;
+ pos = begin + len;
+
+ while (scd) {
+ if (scd->host == HBA_ptr) {
+ proc_print_scsidevice(scd, buffer, &size, len);
+ len += size;
+ pos = begin + len;
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+ if (pos > offset + length)
+ goto stop_output;
+ }
+ scd = scd->next;
+ }
+
+ stop_output:
+ DBG(DBG_PROC, printk("2pos: %ld offset: %ld len: %d\n", pos, offset, len));
+ *start=buffer+(offset-begin); /* Start of wanted data */
+ len-=(offset-begin); /* Start slop */
+ if(len>length)
+ len = length; /* Ending slop */
+ DBG(DBG_PROC, printk("3pos: %ld offset: %ld len: %d\n", pos, offset, len));
+
+ return (len);
+}
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * tab-width: 8
+ * End:
+ */
+
diff --git a/linux/src/drivers/scsi/fdomain.c b/linux/src/drivers/scsi/fdomain.c
new file mode 100644
index 0000000..df71f5c
--- /dev/null
+++ b/linux/src/drivers/scsi/fdomain.c
@@ -0,0 +1,2082 @@
+/* fdomain.c -- Future Domain TMC-16x0 SCSI driver
+ * Created: Sun May 3 18:53:19 1992 by faith@cs.unc.edu
+ * Revised: Sat Nov 2 09:27:47 1996 by root@cs.unc.edu
+ * Author: Rickard E. Faith, faith@cs.unc.edu
+ * Copyright 1992, 1993, 1994, 1995, 1996 Rickard E. Faith
+ *
+ * $Id: fdomain.c,v 1.1 1999/04/26 05:54:32 tb Exp $
+
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ **************************************************************************
+
+ SUMMARY:
+
+ Future Domain BIOS versions supported for autodetect:
+ 2.0, 3.0, 3.2, 3.4 (1.0), 3.5 (2.0), 3.6, 3.61
+ Chips are supported:
+ TMC-1800, TMC-18C50, TMC-18C30, TMC-36C70
+ Boards supported:
+ Future Domain TMC-1650, TMC-1660, TMC-1670, TMC-1680, TMC-1610M/MER/MEX
+ Future Domain TMC-3260 (PCI)
+ Quantum ISA-200S, ISA-250MG
+ Adaptec AHA-2920 (PCI)
+ IBM ?
+ LILO command-line options:
+ fdomain=<PORT_BASE>,<IRQ>[,<ADAPTER_ID>]
+
+
+
+ DESCRIPTION:
+
+ This is the Linux low-level SCSI driver for Future Domain TMC-1660/1680
+ TMC-1650/1670, and TMC-3260 SCSI host adapters. The 1650 and 1670 have a
+ 25-pin external connector, whereas the 1660 and 1680 have a SCSI-2 50-pin
+ high-density external connector. The 1670 and 1680 have floppy disk
+ controllers built in. The TMC-3260 is a PCI bus card.
+
+ Future Domain's older boards are based on the TMC-1800 chip, and this
+ driver was originally written for a TMC-1680 board with the TMC-1800 chip.
+ More recently, boards are being produced with the TMC-18C50 and TMC-18C30
+ chips. The latest and greatest board may not work with this driver. If
+ you have to patch this driver so that it will recognize your board's BIOS
+ signature, then the driver may fail to function after the board is
+ detected.
+
+ Please note that the drive ordering that Future Domain implemented in BIOS
+ versions 3.4 and 3.5 is the opposite of the order (currently) used by the
+ rest of the SCSI industry. If you have BIOS version 3.4 or 3.5, and have
+ more then one drive, then the drive ordering will be the reverse of that
+ which you see under DOS. For example, under DOS SCSI ID 0 will be D: and
+ SCSI ID 1 will be C: (the boot device). Under Linux, SCSI ID 0 will be
+ /dev/sda and SCSI ID 1 will be /dev/sdb. The Linux ordering is consistent
+ with that provided by all the other SCSI drivers for Linux. If you want
+ this changed, you will probably have to patch the higher level SCSI code.
+ If you do so, please send me patches that are protected by #ifdefs.
+
+ If you have a TMC-8xx or TMC-9xx board, then this is not the driver for
+ your board. Please refer to the Seagate driver for more information and
+ possible support.
+
+
+
+ HISTORY:
+
+ Linux Driver Driver
+ Version Version Date Support/Notes
+
+ 0.0 3 May 1992 V2.0 BIOS; 1800 chip
+ 0.97 1.9 28 Jul 1992
+ 0.98.6 3.1 27 Nov 1992
+ 0.99 3.2 9 Dec 1992
+
+ 0.99.3 3.3 10 Jan 1993 V3.0 BIOS
+ 0.99.5 3.5 18 Feb 1993
+ 0.99.10 3.6 15 May 1993 V3.2 BIOS; 18C50 chip
+ 0.99.11 3.17 3 Jul 1993 (now under RCS)
+ 0.99.12 3.18 13 Aug 1993
+ 0.99.14 5.6 31 Oct 1993 (reselection code removed)
+
+ 0.99.15 5.9 23 Jan 1994 V3.4 BIOS (preliminary)
+ 1.0.8/1.1.1 5.15 1 Apr 1994 V3.4 BIOS; 18C30 chip (preliminary)
+ 1.0.9/1.1.3 5.16 7 Apr 1994 V3.4 BIOS; 18C30 chip
+ 1.1.38 5.18 30 Jul 1994 36C70 chip (PCI version of 18C30)
+ 1.1.62 5.20 2 Nov 1994 V3.5 BIOS
+ 1.1.73 5.22 7 Dec 1994 Quantum ISA-200S board; V2.0 BIOS
+
+ 1.1.82 5.26 14 Jan 1995 V3.5 BIOS; TMC-1610M/MER/MEX board
+ 1.2.10 5.28 5 Jun 1995 Quantum ISA-250MG board; V2.0, V2.01 BIOS
+ 1.3.4 5.31 23 Jun 1995 PCI BIOS-32 detection (preliminary)
+ 1.3.7 5.33 4 Jul 1995 PCI BIOS-32 detection
+ 1.3.28 5.36 17 Sep 1995 V3.61 BIOS; LILO command-line support
+ 1.3.34 5.39 12 Oct 1995 V3.60 BIOS; /proc
+ 1.3.72 5.39 8 Feb 1996 Adaptec AHA-2920 board
+ 1.3.85 5.41 4 Apr 1996
+ 2.0.12 5.44 8 Aug 1996 Use ID 7 for all PCI cards
+
+
+
+ REFERENCES USED:
+
+ "TMC-1800 SCSI Chip Specification (FDC-1800T)", Future Domain Corporation,
+ 1990.
+
+ "Technical Reference Manual: 18C50 SCSI Host Adapter Chip", Future Domain
+ Corporation, January 1992.
+
+ "LXT SCSI Products: Specifications and OEM Technical Manual (Revision
+ B/September 1991)", Maxtor Corporation, 1991.
+
+ "7213S product Manual (Revision P3)", Maxtor Corporation, 1992.
+
+ "Draft Proposed American National Standard: Small Computer System
+ Interface - 2 (SCSI-2)", Global Engineering Documents. (X3T9.2/86-109,
+ revision 10h, October 17, 1991)
+
+ Private communications, Drew Eckhardt (drew@cs.colorado.edu) and Eric
+ Youngdale (ericy@cais.com), 1992.
+
+ Private communication, Tuong Le (Future Domain Engineering department),
+ 1994. (Disk geometry computations for Future Domain BIOS version 3.4, and
+ TMC-18C30 detection.)
+
+ Hogan, Thom. The Programmer's PC Sourcebook. Microsoft Press, 1988. Page
+ 60 (2.39: Disk Partition Table Layout).
+
+ "18C30 Technical Reference Manual", Future Domain Corporation, 1993, page
+ 6-1.
+
+
+
+ NOTES ON REFERENCES:
+
+ The Maxtor manuals were free. Maxtor telephone technical support is
+ great!
+
+ The Future Domain manuals were $25 and $35. They document the chip, not
+ the TMC-16x0 boards, so some information I had to guess at. In 1992,
+ Future Domain sold DOS BIOS source for $250 and the UN*X driver source was
+ $750, but these required a non-disclosure agreement, so even if I could
+ have afforded them, they would *not* have been useful for writing this
+ publically distributable driver. Future Domain technical support has
+ provided some information on the phone and have sent a few useful FAXs.
+ They have been much more helpful since they started to recognize that the
+ word "Linux" refers to an operating system :-).
+
+
+
+ ALPHA TESTERS:
+
+ There are many other alpha testers that come and go as the driver
+ develops. The people listed here were most helpful in times of greatest
+ need (mostly early on -- I've probably left out a few worthy people in
+ more recent times):
+
+ Todd Carrico (todd@wutc.wustl.edu), Dan Poirier (poirier@cs.unc.edu ), Ken
+ Corey (kenc@sol.acs.unt.edu), C. de Bruin (bruin@bruin@sterbbs.nl), Sakari
+ Aaltonen (sakaria@vipunen.hit.fi), John Rice (rice@xanth.cs.odu.edu), Brad
+ Yearwood (brad@optilink.com), and Ray Toy (toy@soho.crd.ge.com).
+
+ Special thanks to Tien-Wan Yang (twyang@cs.uh.edu), who graciously lent me
+ his 18C50-based card for debugging. He is the sole reason that this
+ driver works with the 18C50 chip.
+
+ Thanks to Dave Newman (dnewman@crl.com) for providing initial patches for
+ the version 3.4 BIOS.
+
+ Thanks to James T. McKinley (mckinley@msupa.pa.msu.edu) for providing
+ patches that support the TMC-3260, a PCI bus card with the 36C70 chip.
+ The 36C70 chip appears to be "completely compatible" with the 18C30 chip.
+
+ Thanks to Eric Kasten (tigger@petroglyph.cl.msu.edu) for providing the
+ patch for the version 3.5 BIOS.
+
+ Thanks for Stephen Henson (shenson@nyx10.cs.du.edu) for providing the
+ patch for the Quantum ISA-200S SCSI adapter.
+
+ Thanks to Adam Bowen for the signature to the 1610M/MER/MEX scsi cards, to
+ Martin Andrews (andrewm@ccfadm.eeg.ccf.org) for the signature to some
+ random TMC-1680 repackaged by IBM; and to Mintak Ng (mintak@panix.com) for
+ the version 3.61 BIOS signature.
+
+ Thanks for Mark Singer (elf@netcom.com) and Richard Simpson
+ (rsimpson@ewrcsdra.demon.co.uk) for more Quantum signatures and detective
+ work on the Quantum RAM layout.
+
+ Special thanks to James T. McKinley (mckinley@msupa.pa.msu.edu) for
+ providing patches for proper PCI BIOS32-mediated detection of the TMC-3260
+ card (a PCI bus card with the 36C70 chip). Please send James PCI-related
+ bug reports.
+
+ Thanks to Tom Cavin (tec@usa1.com) for preliminary command-line option
+ patches.
+
+ All of the alpha testers deserve much thanks.
+
+
+
+ NOTES ON USER DEFINABLE OPTIONS:
+
+ DEBUG: This turns on the printing of various debug information.
+
+ ENABLE_PARITY: This turns on SCSI parity checking. With the current
+ driver, all attached devices must support SCSI parity. If none of your
+ devices support parity, then you can probably get the driver to work by
+ turning this option off. I have no way of testing this, however, and it
+ would appear that no one ever uses this option.
+
+ FIFO_COUNT: The host adapter has an 8K cache (host adapters based on the
+ 18C30 chip have a 2k cache). When this many 512 byte blocks are filled by
+ the SCSI device, an interrupt will be raised. Therefore, this could be as
+ low as 0, or as high as 16. Note, however, that values which are too high
+ or too low seem to prevent any interrupts from occurring, and thereby lock
+ up the machine. I have found that 2 is a good number, but throughput may
+ be increased by changing this value to values which are close to 2.
+ Please let me know if you try any different values.
+
+ DO_DETECT: This activates some old scan code which was needed before the
+ high level drivers got fixed. If you are having trouble with the driver,
+ turning this on should not hurt, and might help. Please let me know if
+ this is the case, since this code will be removed from future drivers.
+
+ RESELECTION: This is no longer an option, since I gave up trying to
+ implement it in version 4.x of this driver. It did not improve
+ performance at all and made the driver unstable (because I never found one
+ of the two race conditions which were introduced by the multiple
+ outstanding command code). The instability seems a very high price to pay
+ just so that you don't have to wait for the tape to rewind. If you want
+ this feature implemented, send me patches. I'll be happy to send a copy
+ of my (broken) driver to anyone who would like to see a copy.
+
+ **************************************************************************/
+
+#ifdef PCMCIA
+#define MODULE
+#endif
+
+#ifdef MODULE
+#include <linux/module.h>
+#endif
+
+#ifdef PCMCIA
+#undef MODULE
+#endif
+
+#include <linux/sched.h>
+#include <asm/io.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "fdomain.h"
+#include <asm/system.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/proc_fs.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#include <linux/stat.h>
+
+#include <linux/config.h> /* for CONFIG_PCI */
+
+struct proc_dir_entry proc_scsi_fdomain = {
+ PROC_SCSI_FDOMAIN, 7, "fdomain",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+#define VERSION "$Revision: 1.1 $"
+
+/* START OF USER DEFINABLE OPTIONS */
+
+#define DEBUG 1 /* Enable debugging output */
+#define ENABLE_PARITY 1 /* Enable SCSI Parity */
+#define FIFO_COUNT 2 /* Number of 512 byte blocks before INTR */
+#define DO_DETECT 0 /* Do device detection here (see scsi.c) */
+
+/* END OF USER DEFINABLE OPTIONS */
+
+#if DEBUG
+#define EVERY_ACCESS 0 /* Write a line on every scsi access */
+#define ERRORS_ONLY 1 /* Only write a line if there is an error */
+#define DEBUG_DETECT 0 /* Debug fdomain_16x0_detect() */
+#define DEBUG_MESSAGES 1 /* Debug MESSAGE IN phase */
+#define DEBUG_ABORT 1 /* Debug abort() routine */
+#define DEBUG_RESET 1 /* Debug reset() routine */
+#define DEBUG_RACE 1 /* Debug interrupt-driven race condition */
+#else
+#define EVERY_ACCESS 0 /* LEAVE THESE ALONE--CHANGE THE ONES ABOVE */
+#define ERRORS_ONLY 0
+#define DEBUG_DETECT 0
+#define DEBUG_MESSAGES 0
+#define DEBUG_ABORT 0
+#define DEBUG_RESET 0
+#define DEBUG_RACE 0
+#endif
+
+/* Errors are reported on the line, so we don't need to report them again */
+#if EVERY_ACCESS
+#undef ERRORS_ONLY
+#define ERRORS_ONLY 0
+#endif
+
+#if ENABLE_PARITY
+#define PARITY_MASK 0x08
+#else
+#define PARITY_MASK 0x00
+#endif
+
+enum chip_type {
+ unknown = 0x00,
+ tmc1800 = 0x01,
+ tmc18c50 = 0x02,
+ tmc18c30 = 0x03,
+};
+
+enum {
+ in_arbitration = 0x02,
+ in_selection = 0x04,
+ in_other = 0x08,
+ disconnect = 0x10,
+ aborted = 0x20,
+ sent_ident = 0x40,
+};
+
+enum in_port_type {
+ Read_SCSI_Data = 0,
+ SCSI_Status = 1,
+ TMC_Status = 2,
+ FIFO_Status = 3, /* tmc18c50/tmc18c30 only */
+ Interrupt_Cond = 4, /* tmc18c50/tmc18c30 only */
+ LSB_ID_Code = 5,
+ MSB_ID_Code = 6,
+ Read_Loopback = 7,
+ SCSI_Data_NoACK = 8,
+ Interrupt_Status = 9,
+ Configuration1 = 10,
+ Configuration2 = 11, /* tmc18c50/tmc18c30 only */
+ Read_FIFO = 12,
+ FIFO_Data_Count = 14
+};
+
+enum out_port_type {
+ Write_SCSI_Data = 0,
+ SCSI_Cntl = 1,
+ Interrupt_Cntl = 2,
+ SCSI_Mode_Cntl = 3,
+ TMC_Cntl = 4,
+ Memory_Cntl = 5, /* tmc18c50/tmc18c30 only */
+ Write_Loopback = 7,
+ IO_Control = 11, /* tmc18c30 only */
+ Write_FIFO = 12
+};
+
+static int port_base = 0;
+static void *bios_base = NULL;
+static int bios_major = 0;
+static int bios_minor = 0;
+static int PCI_bus = 0;
+static int Quantum = 0; /* Quantum board variant */
+static int interrupt_level = 0;
+static volatile int in_command = 0;
+static Scsi_Cmnd *current_SC = NULL;
+static enum chip_type chip = unknown;
+static int adapter_mask = 0;
+static int this_id = 0;
+static int setup_called = 0;
+
+#if DEBUG_RACE
+static volatile int in_interrupt_flag = 0;
+#endif
+
+static int SCSI_Mode_Cntl_port;
+static int FIFO_Data_Count_port;
+static int Interrupt_Cntl_port;
+static int Interrupt_Status_port;
+static int Read_FIFO_port;
+static int Read_SCSI_Data_port;
+static int SCSI_Cntl_port;
+static int SCSI_Data_NoACK_port;
+static int SCSI_Status_port;
+static int TMC_Cntl_port;
+static int TMC_Status_port;
+static int Write_FIFO_port;
+static int Write_SCSI_Data_port;
+
+static int FIFO_Size = 0x2000; /* 8k FIFO for
+ pre-tmc18c30 chips */
+
+extern void fdomain_16x0_intr( int irq, void *dev_id, struct pt_regs * regs );
+
+static void *addresses[] = {
+ (void *)0xc8000,
+ (void *)0xca000,
+ (void *)0xce000,
+ (void *)0xde000,
+ (void *)0xcc000, /* Extra addresses for PCI boards */
+ (void *)0xd0000,
+ (void *)0xe0000,
+};
+#define ADDRESS_COUNT (sizeof( addresses ) / sizeof( unsigned ))
+
+static unsigned short ports[] = { 0x140, 0x150, 0x160, 0x170 };
+#define PORT_COUNT (sizeof( ports ) / sizeof( unsigned short ))
+
+static unsigned short ints[] = { 3, 5, 10, 11, 12, 14, 15, 0 };
+
+/*
+
+ READ THIS BEFORE YOU ADD A SIGNATURE!
+
+ READING THIS SHORT NOTE CAN SAVE YOU LOTS OF TIME!
+
+ READ EVERY WORD, ESPECIALLY THE WORD *NOT*
+
+ This driver works *ONLY* for Future Domain cards using the TMC-1800,
+ TMC-18C50, or TMC-18C30 chip. This includes models TMC-1650, 1660, 1670,
+ and 1680.
+
+ The following BIOS signature signatures are for boards which do *NOT*
+ work with this driver (these TMC-8xx and TMC-9xx boards may work with the
+ Seagate driver):
+
+ FUTURE DOMAIN CORP. (C) 1986-1988 V4.0I 03/16/88
+ FUTURE DOMAIN CORP. (C) 1986-1989 V5.0C2/14/89
+ FUTURE DOMAIN CORP. (C) 1986-1989 V6.0A7/28/89
+ FUTURE DOMAIN CORP. (C) 1986-1990 V6.0105/31/90
+ FUTURE DOMAIN CORP. (C) 1986-1990 V6.0209/18/90
+ FUTURE DOMAIN CORP. (C) 1986-1990 V7.009/18/90
+ FUTURE DOMAIN CORP. (C) 1992 V8.00.004/02/92
+
+*/
+
+struct signature {
+ const char *signature;
+ int sig_offset;
+ int sig_length;
+ int major_bios_version;
+ int minor_bios_version;
+ int flag; /* 1 == PCI_bus, 2 == ISA_200S, 3 == ISA_250MG, 4 == ISA_200S */
+} signatures[] = {
+ /* 1 2 3 4 5 6 */
+ /* 123456789012345678901234567890123456789012345678901234567890 */
+ { "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V2.07/28/89", 5, 50, 2, 0, 0 },
+ { "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V1.07/28/89", 5, 50, 2, 0, 0 },
+ { "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V2.07/28/89", 72, 50, 2, 0, 2 },
+ { "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V2.0", 73, 43, 2, 0, 3 },
+ { "FUTURE DOMAIN CORP. (C) 1991 1800-V2.0.", 72, 39, 2, 0, 4 },
+ { "FUTURE DOMAIN CORP. (C) 1992 V3.00.004/02/92", 5, 44, 3, 0, 0 },
+ { "FUTURE DOMAIN TMC-18XX (C) 1993 V3.203/12/93", 5, 44, 3, 2, 0 },
+ { "IBM F1 P2 BIOS v1.0104/29/93", 5, 28, 3, -1, 0 },
+ { "Future Domain Corp. V1.0008/18/93", 5, 33, 3, 4, 0 },
+ { "Future Domain Corp. V1.0008/18/93", 26, 33, 3, 4, 1 },
+ { "Adaptec AHA-2920 PCI-SCSI Card", 42, 31, 3, -1, 1 },
+ { "IBM F1 P264/32", 5, 14, 3, -1, 1 },
+ /* This next signature may not be a 3.5 bios */
+ { "Future Domain Corp. V2.0108/18/93", 5, 33, 3, 5, 0 },
+ { "FUTURE DOMAIN CORP. V3.5008/18/93", 5, 34, 3, 5, 0 },
+ { "FUTURE DOMAIN 18c30/18c50/1800 (C) 1994 V3.5", 5, 44, 3, 5, 0 },
+ { "FUTURE DOMAIN CORP. V3.6008/18/93", 5, 34, 3, 6, 0 },
+ { "FUTURE DOMAIN CORP. V3.6108/18/93", 5, 34, 3, 6, 0 },
+ { "FUTURE DOMAIN TMC-18XX", 5, 22, -1, -1, 0 },
+
+ /* READ NOTICE ABOVE *BEFORE* YOU WASTE YOUR TIME ADDING A SIGNATURE
+ Also, fix the disk geometry code for your signature and send your
+ changes for faith@cs.unc.edu. Above all, do *NOT* change any old
+ signatures!
+
+ Note that the last line will match a "generic" 18XX bios. Because
+ Future Domain has changed the host SCSI ID and/or the location of the
+ geometry information in the on-board RAM area for each of the first
+ three BIOS's, it is still important to enter a fully qualified
+ signature in the table for any new BIOS's (after the host SCSI ID and
+ geometry location are verified). */
+};
+
+#define SIGNATURE_COUNT (sizeof( signatures ) / sizeof( struct signature ))
+
+static void print_banner( struct Scsi_Host *shpnt )
+{
+ if (!shpnt) return; /* This won't ever happen */
+
+ if (bios_major < 0 && bios_minor < 0) {
+ printk( "scsi%d <fdomain>: No BIOS; using scsi id %d\n",
+ shpnt->host_no, shpnt->this_id );
+ } else {
+ printk( "scsi%d <fdomain>: BIOS version ", shpnt->host_no );
+
+ if (bios_major >= 0) printk( "%d.", bios_major );
+ else printk( "?." );
+
+ if (bios_minor >= 0) printk( "%d", bios_minor );
+ else printk( "?." );
+
+ printk( " at 0x%x using scsi id %d\n",
+ (unsigned)bios_base, shpnt->this_id );
+ }
+
+ /* If this driver works for later FD PCI
+ boards, we will have to modify banner
+ for additional PCI cards, but for now if
+ it's PCI it's a TMC-3260 - JTM */
+ printk( "scsi%d <fdomain>: %s chip at 0x%x irq ",
+ shpnt->host_no,
+ chip == tmc1800 ? "TMC-1800"
+ : (chip == tmc18c50 ? "TMC-18C50"
+ : (chip == tmc18c30 ?
+ (PCI_bus ? "TMC-36C70 (PCI bus)" : "TMC-18C30")
+ : "Unknown")),
+ port_base );
+
+ if (interrupt_level) printk( "%d", interrupt_level );
+ else printk( "<none>" );
+
+ printk( "\n" );
+}
+
+void fdomain_setup( char *str, int *ints )
+{
+ if (setup_called++ || ints[0] < 2 || ints[0] > 3) {
+ printk( "fdomain: usage: fdomain=<PORT_BASE>,<IRQ>[,<ADAPTER_ID>]\n" );
+ printk( "fdomain: bad LILO parameters?\n" );
+ }
+
+ port_base = ints[0] >= 1 ? ints[1] : 0;
+ interrupt_level = ints[0] >= 2 ? ints[2] : 0;
+ this_id = ints[0] >= 3 ? ints[3] : 0;
+
+ bios_major = bios_minor = -1; /* Use geometry for BIOS version >= 3.4 */
+}
+
+
+static void do_pause( unsigned amount ) /* Pause for amount*10 milliseconds */
+{
+ unsigned long the_time = jiffies + amount; /* 0.01 seconds per jiffy */
+
+ while (jiffies < the_time);
+}
+
+inline static void fdomain_make_bus_idle( void )
+{
+ outb( 0, SCSI_Cntl_port );
+ outb( 0, SCSI_Mode_Cntl_port );
+ if (chip == tmc18c50 || chip == tmc18c30)
+ outb( 0x21 | PARITY_MASK, TMC_Cntl_port ); /* Clear forced intr. */
+ else
+ outb( 0x01 | PARITY_MASK, TMC_Cntl_port );
+}
+
+static int fdomain_is_valid_port( int port )
+{
+#if DEBUG_DETECT
+ printk( " (%x%x),",
+ inb( port + MSB_ID_Code ), inb( port + LSB_ID_Code ) );
+#endif
+
+ /* The MCA ID is a unique id for each MCA compatible board. We
+ are using ISA boards, but Future Domain provides the MCA ID
+ anyway. We can use this ID to ensure that this is a Future
+ Domain TMC-1660/TMC-1680.
+ */
+
+ if (inb( port + LSB_ID_Code ) != 0xe9) { /* test for 0x6127 id */
+ if (inb( port + LSB_ID_Code ) != 0x27) return 0;
+ if (inb( port + MSB_ID_Code ) != 0x61) return 0;
+ chip = tmc1800;
+ } else { /* test for 0xe960 id */
+ if (inb( port + MSB_ID_Code ) != 0x60) return 0;
+ chip = tmc18c50;
+
+#if 0
+
+ /* Try to toggle 32-bit mode. This only
+ works on an 18c30 chip. (User reports
+ say this works, so we should switch to
+ it in the near future.) */
+
+ outb( 0x80, port + IO_Control );
+ if ((inb( port + Configuration2 ) & 0x80) == 0x80) {
+ outb( 0x00, port + IO_Control );
+ if ((inb( port + Configuration2 ) & 0x80) == 0x00) {
+ chip = tmc18c30;
+ FIFO_Size = 0x800; /* 2k FIFO */
+ }
+ }
+#else
+
+ /* That should have worked, but appears to
+ have problems. Let's assume it is an
+ 18c30 if the RAM is disabled. */
+
+ if (inb( port + Configuration2 ) & 0x02) {
+ chip = tmc18c30;
+ FIFO_Size = 0x800; /* 2k FIFO */
+ }
+#endif
+ /* If that failed, we are an 18c50. */
+ }
+
+ return 1;
+}
+
+static int fdomain_test_loopback( void )
+{
+ int i;
+ int result;
+
+ for (i = 0; i < 255; i++) {
+ outb( i, port_base + Write_Loopback );
+ result = inb( port_base + Read_Loopback );
+ if (i != result)
+ return 1;
+ }
+ return 0;
+}
+
+/* fdomain_get_irq assumes that we have a valid MCA ID for a
+ TMC-1660/TMC-1680 Future Domain board. Now, check to be sure the
+ bios_base matches these ports. If someone was unlucky enough to have
+ purchased more than one Future Domain board, then they will have to
+ modify this code, as we only detect one board here. [The one with the
+ lowest bios_base.]
+
+ Note that this routine is only used for systems without a PCI BIOS32
+ (e.g., ISA bus). For PCI bus systems, this routine will likely fail
+ unless one of the IRQs listed in the ints array is used by the board.
+ Sometimes it is possible to use the computer's BIOS setup screen to
+ configure a PCI system so that one of these IRQs will be used by the
+ Future Domain card. */
+
+static int fdomain_get_irq( int base )
+{
+ int options = inb( base + Configuration1 );
+
+#if DEBUG_DETECT
+ printk( " Options = %x\n", options );
+#endif
+
+ /* Check for board with lowest bios_base --
+ this isn't valid for the 18c30 or for
+ boards on the PCI bus, so just assume we
+ have the right board. */
+
+ if (chip != tmc18c30
+ && !PCI_bus
+ && addresses[ (options & 0xc0) >> 6 ] != bios_base) return 0;
+
+ return ints[ (options & 0x0e) >> 1 ];
+}
+
+static int fdomain_isa_detect( int *irq, int *iobase )
+{
+ int i;
+ int base;
+ int flag = 0;
+
+ if (bios_major == 2) {
+ /* The TMC-1660/TMC-1680 has a RAM area just after the BIOS ROM.
+ Assuming the ROM is enabled (otherwise we wouldn't have been
+ able to read the ROM signature :-), then the ROM sets up the
+ RAM area with some magic numbers, such as a list of port
+ base addresses and a list of the disk "geometry" reported to
+ DOS (this geometry has nothing to do with physical geometry).
+ */
+
+ switch (Quantum) {
+ case 2: /* ISA_200S */
+ case 3: /* ISA_250MG */
+ base = *((char *)bios_base + 0x1fa2)
+ + (*((char *)bios_base + 0x1fa3) << 8);
+ break;
+ case 4: /* ISA_200S (another one) */
+ base = *((char *)bios_base + 0x1fa3)
+ + (*((char *)bios_base + 0x1fa4) << 8);
+ break;
+ default:
+ base = *((char *)bios_base + 0x1fcc)
+ + (*((char *)bios_base + 0x1fcd) << 8);
+ break;
+ }
+
+#if DEBUG_DETECT
+ printk( " %x,", base );
+#endif
+
+ for (flag = 0, i = 0; !flag && i < PORT_COUNT; i++) {
+ if (base == ports[i])
+ ++flag;
+ }
+
+ if (flag && fdomain_is_valid_port( base )) {
+ *irq = fdomain_get_irq( base );
+ *iobase = base;
+ return 1;
+ }
+
+ /* This is a bad sign. It usually means that someone patched the
+ BIOS signature list (the signatures variable) to contain a BIOS
+ signature for a board *OTHER THAN* the TMC-1660/TMC-1680. */
+
+#if DEBUG_DETECT
+ printk( " RAM FAILED, " );
+#endif
+ }
+
+ /* Anyway, the alternative to finding the address in the RAM is to just
+ search through every possible port address for one that is attached
+ to the Future Domain card. Don't panic, though, about reading all
+ these random port addresses -- there are rumors that the Future
+ Domain BIOS does something very similar.
+
+ Do not, however, check ports which the kernel knows are being used by
+ another driver. */
+
+ for (i = 0; i < PORT_COUNT; i++) {
+ base = ports[i];
+ if (check_region( base, 0x10 )) {
+#if DEBUG_DETECT
+ printk( " (%x inuse),", base );
+#endif
+ continue;
+ }
+#if DEBUG_DETECT
+ printk( " %x,", base );
+#endif
+ if ((flag = fdomain_is_valid_port( base ))) break;
+ }
+
+ if (!flag) return 0; /* iobase not found */
+
+ *irq = fdomain_get_irq( base );
+ *iobase = base;
+
+ return 1; /* success */
+}
+
+static int fdomain_pci_nobios_detect( int *irq, int *iobase )
+{
+ int i;
+ int flag = 0;
+
+ /* The proper way of doing this is to use ask the PCI bus for the device
+ IRQ and interrupt level. But we can't do that if PCI BIOS32 support
+ isn't compiled into the kernel, or if a PCI BIOS32 isn't present.
+
+ Instead, we scan down a bunch of addresses (Future Domain tech
+ support says we will probably find the address before we get to
+ 0xf800). This works fine on some systems -- other systems may have
+ to scan more addresses. If you have to modify this section for your
+ installation, please send mail to faith@cs.unc.edu. */
+
+ for (i = 0xfff8; i > 0xe000; i -= 8) {
+ if (check_region( i, 0x10 )) {
+#if DEBUG_DETECT
+ printk( " (%x inuse)," , i );
+#endif
+ continue;
+ }
+ if ((flag = fdomain_is_valid_port( i ))) break;
+ }
+
+ if (!flag) return 0; /* iobase not found */
+
+ *irq = fdomain_get_irq( i );
+ *iobase = i;
+
+ return 1; /* success */
+}
+
+/* PCI detection function: int fdomain_pci_bios_detect(int* irq, int*
+ iobase) This function gets the Interrupt Level and I/O base address from
+ the PCI configuration registers. The I/O base address is masked with
+ 0xfff8 since on my card the address read from the PCI config registers
+ is off by one from the actual I/O base address necessary for accessing
+ the status and control registers on the card (PCI config register gives
+ 0xf801, actual address is 0xf800). This is likely a bug in the FD
+ config code that writes to the PCI registers, however using a mask
+ should be safe since I think the scan done by the card to determine the
+ I/O base is done in increments of 8 (i.e., 0xf800, 0xf808, ...), at
+ least the old scan code we used to use to get the I/O base did... Also,
+ the device ID from the PCI config registers is 0x0 and should be 0x60e9
+ as it is in the status registers (offset 5 from I/O base). If this is
+ changed in future hardware/BIOS changes it will need to be fixed in this
+ detection function. Comments, bug reports, etc... on this function
+ should be sent to mckinley@msupa.pa.msu.edu - James T. McKinley. */
+
+#ifdef CONFIG_PCI
+static int fdomain_pci_bios_detect( int *irq, int *iobase )
+{
+ int error;
+ unsigned char pci_bus, pci_dev_fn; /* PCI bus & device function */
+ unsigned char pci_irq; /* PCI interrupt line */
+ unsigned int pci_base; /* PCI I/O base address */
+ unsigned short pci_vendor, pci_device; /* PCI vendor & device IDs */
+
+ /* If the PCI BIOS doesn't exist, use the old-style detection routines.
+ Otherwise, get the I/O base address and interrupt from the PCI config
+ registers. */
+
+ if (!pcibios_present()) return fdomain_pci_nobios_detect( irq, iobase );
+
+#if DEBUG_DETECT
+ /* Tell how to print a list of the known PCI devices from bios32 and
+ list vendor and device IDs being used if in debug mode. */
+
+ printk( "\nINFO: cat /proc/pci to see list of PCI devices from bios32\n" );
+ printk( "\nTMC-3260 detect:"
+ " Using PCI Vendor ID: 0x%x, PCI Device ID: 0x%x\n",
+ PCI_VENDOR_ID_FD,
+ PCI_DEVICE_ID_FD_36C70 );
+#endif
+
+ /* We will have to change this if more than 1 PCI bus is present and the
+ FD scsi host is not on the first bus (i.e., a PCI to PCI bridge,
+ which is not supported by bios32 right now anyway). This should
+ probably be done by a call to pcibios_find_device but I can't get it
+ to work... Also the device ID reported from the PCI config registers
+ does not match the device ID quoted in the tech manual or available
+ from offset 5 from the I/O base address. It should be 0x60E9, but it
+ is 0x0 if read from the PCI config registers. I guess the FD folks
+ neglected to write it to the PCI registers... This loop is necessary
+ to get the device function (at least until someone can get
+ pcibios_find_device to work, I cannot but 53c7,8xx.c uses it...). */
+
+ pci_bus = 0;
+
+ for (pci_dev_fn = 0x0; pci_dev_fn < 0xff; pci_dev_fn++) {
+ pcibios_read_config_word( pci_bus,
+ pci_dev_fn,
+ PCI_VENDOR_ID,
+ &pci_vendor );
+
+ if (pci_vendor == PCI_VENDOR_ID_FD) {
+ pcibios_read_config_word( pci_bus,
+ pci_dev_fn,
+ PCI_DEVICE_ID,
+ &pci_device );
+
+ if (pci_device == PCI_DEVICE_ID_FD_36C70) {
+ /* Break out once we have the correct device. If other FD
+ PCI devices are added to this driver we will need to add
+ an or of the other PCI_DEVICE_ID_FD_XXXXX's here. */
+ break;
+ } else {
+ /* If we can't find an FD scsi card we give up. */
+ return 0;
+ }
+ }
+ }
+
+#if DEBUG_DETECT
+ printk( "Future Domain 36C70 : at PCI bus %u, device %u, function %u\n",
+ pci_bus,
+ (pci_dev_fn & 0xf8) >> 3,
+ pci_dev_fn & 7 );
+#endif
+
+ /* We now have the appropriate device function for the FD board so we
+ just read the PCI config info from the registers. */
+
+ if ((error = pcibios_read_config_dword( pci_bus,
+ pci_dev_fn,
+ PCI_BASE_ADDRESS_0,
+ &pci_base ))
+ || (error = pcibios_read_config_byte( pci_bus,
+ pci_dev_fn,
+ PCI_INTERRUPT_LINE,
+ &pci_irq ))) {
+ printk ( "PCI ERROR: Future Domain 36C70 not initializing"
+ " due to error reading configuration space\n" );
+ return 0;
+ } else {
+#if DEBUG_DETECT
+ printk( "TMC-3260 PCI: IRQ = %u, I/O base = 0x%lx\n",
+ pci_irq, pci_base );
+#endif
+
+ /* Now we have the I/O base address and interrupt from the PCI
+ configuration registers. Unfortunately it seems that the I/O base
+ address is off by one on my card so I mask it with 0xfff8. This
+ must be some kind of goof in the FD code that does the autoconfig
+ and writes to the PCI registers (or maybe I just don't understand
+ something). If they fix it in later versions of the card or BIOS
+ we may have to adjust the address based on the signature or
+ something... */
+
+ *irq = pci_irq;
+ *iobase = (pci_base & 0xfff8);
+
+#if DEBUG_DETECT
+ printk( "TMC-3260 fix: Masking I/O base address with 0xff00.\n" );
+ printk( "TMC-3260: IRQ = %d, I/O base = 0x%x\n", *irq, *iobase );
+#endif
+
+ if (!fdomain_is_valid_port( *iobase )) return 0;
+ return 1;
+ }
+ return 0;
+}
+#endif
+
+int fdomain_16x0_detect( Scsi_Host_Template *tpnt )
+{
+ int i, j;
+ int retcode;
+ struct Scsi_Host *shpnt;
+#if DO_DETECT
+ const int buflen = 255;
+ Scsi_Cmnd SCinit;
+ unsigned char do_inquiry[] = { INQUIRY, 0, 0, 0, buflen, 0 };
+ unsigned char do_request_sense[] = { REQUEST_SENSE, 0, 0, 0, buflen, 0 };
+ unsigned char do_read_capacity[] = { READ_CAPACITY,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+ unsigned char buf[buflen];
+#endif
+
+#if DEBUG_DETECT
+ printk( "fdomain_16x0_detect()," );
+#endif
+ tpnt->proc_dir = &proc_scsi_fdomain;
+
+ if (setup_called) {
+#if DEBUG_DETECT
+ printk( "no BIOS, using port_base = 0x%x, irq = %d\n",
+ port_base, interrupt_level );
+#endif
+ if (!fdomain_is_valid_port( port_base )) {
+ printk( "fdomain: cannot locate chip at port base 0x%x\n",
+ port_base );
+ printk( "fdomain: bad LILO parameters?\n" );
+ return 0;
+ }
+ } else {
+ int flag = 0;
+
+ for (i = 0; !bios_base && i < ADDRESS_COUNT; i++) {
+#if DEBUG_DETECT
+ printk( " %x(%x),", (unsigned)addresses[i], (unsigned)bios_base );
+#endif
+ for (j = 0; !bios_base && j < SIGNATURE_COUNT; j++) {
+ if (!memcmp( ((char *)addresses[i] + signatures[j].sig_offset),
+ signatures[j].signature, signatures[j].sig_length )) {
+ bios_major = signatures[j].major_bios_version;
+ bios_minor = signatures[j].minor_bios_version;
+ PCI_bus = (signatures[j].flag == 1);
+ Quantum = (signatures[j].flag > 1) ? signatures[j].flag : 0;
+ bios_base = addresses[i];
+ }
+ }
+ }
+
+ if (!bios_base) {
+#if DEBUG_DETECT
+ printk( " FAILED: NO BIOS\n" );
+#endif
+ return 0;
+ }
+
+ if (!PCI_bus) {
+ flag = fdomain_isa_detect( &interrupt_level, &port_base );
+ } else {
+#ifdef CONFIG_PCI
+ flag = fdomain_pci_bios_detect( &interrupt_level, &port_base );
+#else
+ flag = fdomain_pci_nobios_detect( &interrupt_level, &port_base );
+#endif
+ }
+
+ if (!flag) {
+#if DEBUG_DETECT
+ printk( " FAILED: NO PORT\n" );
+#endif
+#ifdef CONFIG_PCI
+ printk( "\nTMC-3260 36C70 PCI scsi chip detection failed.\n" );
+ printk( "Send mail to mckinley@msupa.pa.msu.edu.\n" );
+#endif
+ return 0; /* Cannot find valid set of ports */
+ }
+ }
+
+ SCSI_Mode_Cntl_port = port_base + SCSI_Mode_Cntl;
+ FIFO_Data_Count_port = port_base + FIFO_Data_Count;
+ Interrupt_Cntl_port = port_base + Interrupt_Cntl;
+ Interrupt_Status_port = port_base + Interrupt_Status;
+ Read_FIFO_port = port_base + Read_FIFO;
+ Read_SCSI_Data_port = port_base + Read_SCSI_Data;
+ SCSI_Cntl_port = port_base + SCSI_Cntl;
+ SCSI_Data_NoACK_port = port_base + SCSI_Data_NoACK;
+ SCSI_Status_port = port_base + SCSI_Status;
+ TMC_Cntl_port = port_base + TMC_Cntl;
+ TMC_Status_port = port_base + TMC_Status;
+ Write_FIFO_port = port_base + Write_FIFO;
+ Write_SCSI_Data_port = port_base + Write_SCSI_Data;
+
+ fdomain_16x0_reset( NULL, 0 );
+
+ if (fdomain_test_loopback()) {
+#if DEBUG_DETECT
+ printk( "fdomain: LOOPBACK TEST FAILED, FAILING DETECT!\n" );
+#endif
+ if (setup_called) {
+ printk( "fdomain: loopback test failed at port base 0x%x\n",
+ port_base );
+ printk( "fdomain: bad LILO parameters?\n" );
+ }
+ return 0;
+ }
+
+ if (this_id) {
+ tpnt->this_id = (this_id & 0x07);
+ adapter_mask = (1 << tpnt->this_id);
+ } else {
+ if (PCI_bus || (bios_major == 3 && bios_minor >= 2) || bios_major < 0) {
+ tpnt->this_id = 7;
+ adapter_mask = 0x80;
+ } else {
+ tpnt->this_id = 6;
+ adapter_mask = 0x40;
+ }
+ }
+
+ /* Print out a banner here in case we can't
+ get resources. */
+
+ shpnt = scsi_register( tpnt, 0 );
+ shpnt->irq = interrupt_level;
+ shpnt->io_port = port_base;
+ shpnt->n_io_port = 0x10;
+ print_banner( shpnt );
+
+ /* Log IRQ with kernel */
+ if (!interrupt_level) {
+ panic( "fdomain: *NO* interrupt level selected!\n" );
+ } else {
+ /* Register the IRQ with the kernel */
+
+ retcode = request_irq( interrupt_level,
+ fdomain_16x0_intr, SA_INTERRUPT, "fdomain", NULL);
+
+ if (retcode < 0) {
+ if (retcode == -EINVAL) {
+ printk( "fdomain: IRQ %d is bad!\n", interrupt_level );
+ printk( " This shouldn't happen!\n" );
+ printk( " Send mail to faith@cs.unc.edu\n" );
+ } else if (retcode == -EBUSY) {
+ printk( "fdomain: IRQ %d is already in use!\n", interrupt_level );
+ printk( " Please use another IRQ!\n" );
+ } else {
+ printk( "fdomain: Error getting IRQ %d\n", interrupt_level );
+ printk( " This shouldn't happen!\n" );
+ printk( " Send mail to faith@cs.unc.edu\n" );
+ }
+ panic( "fdomain: Driver requires interruptions\n" );
+ }
+ }
+
+ /* Log I/O ports with kernel */
+ request_region( port_base, 0x10, "fdomain" );
+
+#if DO_DETECT
+
+ /* These routines are here because of the way the SCSI bus behaves after
+ a reset. This appropriate behavior was not handled correctly by the
+ higher level SCSI routines when I first wrote this driver. Now,
+ however, correct scan routines are part of scsi.c and these routines
+ are no longer needed. However, this code is still good for
+ debugging. */
+
+ SCinit.request_buffer = SCinit.buffer = buf;
+ SCinit.request_bufflen = SCinit.bufflen = sizeof(buf)-1;
+ SCinit.use_sg = 0;
+ SCinit.lun = 0;
+
+ printk( "fdomain: detection routine scanning for devices:\n" );
+ for (i = 0; i < 8; i++) {
+ SCinit.target = i;
+ if (i == tpnt->this_id) /* Skip host adapter */
+ continue;
+ memcpy(SCinit.cmnd, do_request_sense, sizeof(do_request_sense));
+ retcode = fdomain_16x0_command(&SCinit);
+ if (!retcode) {
+ memcpy(SCinit.cmnd, do_inquiry, sizeof(do_inquiry));
+ retcode = fdomain_16x0_command(&SCinit);
+ if (!retcode) {
+ printk( " SCSI ID %d: ", i );
+ for (j = 8; j < (buf[4] < 32 ? buf[4] : 32); j++)
+ printk( "%c", buf[j] >= 20 ? buf[j] : ' ' );
+ memcpy(SCinit.cmnd, do_read_capacity, sizeof(do_read_capacity));
+ retcode = fdomain_16x0_command(&SCinit);
+ if (!retcode) {
+ unsigned long blocks, size, capacity;
+
+ blocks = (buf[0] << 24) | (buf[1] << 16)
+ | (buf[2] << 8) | buf[3];
+ size = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
+ capacity = +( +(blocks / 1024L) * +(size * 10L)) / 1024L;
+
+ printk( "%lu MB (%lu byte blocks)",
+ ((capacity + 5L) / 10L), size );
+ } else {
+ memcpy(SCinit.cmnd, do_request_sense, sizeof(do_request_sense));
+ retcode = fdomain_16x0_command(&SCinit);
+ }
+ printk ("\n" );
+ } else {
+ memcpy(SCinit.cmnd, do_request_sense, sizeof(do_request_sense));
+ retcode = fdomain_16x0_command(&SCinit);
+ }
+ }
+ }
+#endif
+
+ return 1; /* Maximum of one adapter will be detected. */
+}
+
+const char *fdomain_16x0_info( struct Scsi_Host *ignore )
+{
+ static char buffer[80];
+ char *pt;
+
+ strcpy( buffer, "Future Domain TMC-16x0 SCSI driver, version" );
+ if (strchr( VERSION, ':')) { /* Assume VERSION is an RCS Revision string */
+ strcat( buffer, strchr( VERSION, ':' ) + 1 );
+ pt = strrchr( buffer, '$') - 1;
+ if (!pt) /* Stripped RCS Revision string? */
+ pt = buffer + strlen( buffer ) - 1;
+ if (*pt != ' ')
+ ++pt;
+ *pt = '\0';
+ } else { /* Assume VERSION is a number */
+ strcat( buffer, " " VERSION );
+ }
+
+ return buffer;
+}
+
+ /* First pass at /proc information routine. */
+/*
+ * inout : decides on the direction of the dataflow and the meaning of the
+ * variables
+ * buffer: If inout==FALSE data is being written to it else read from it
+ * *start: If inout==FALSE start of the valid data in the buffer
+ * offset: If inout==FALSE offset from the beginning of the imaginary file
+ * from which we start writing into the buffer
+ * length: If inout==FALSE max number of bytes to be written into the buffer
+ * else number of bytes in the buffer
+ */
+int fdomain_16x0_proc_info( char *buffer, char **start, off_t offset,
+ int length, int hostno, int inout )
+{
+ const char *info = fdomain_16x0_info( NULL );
+ int len;
+ int pos;
+ int begin;
+
+ if (inout) return(-ENOSYS);
+
+ begin = 0;
+ strcpy( buffer, info );
+ strcat( buffer, "\n" );
+
+ pos = len = strlen( buffer );
+
+ if(pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+
+ *start = buffer + (offset - begin); /* Start of wanted data */
+ len -= (offset - begin);
+ if(len > length) len = length;
+
+ return(len);
+}
+
+#if 0
+static int fdomain_arbitrate( void )
+{
+ int status = 0;
+ unsigned long timeout;
+
+#if EVERY_ACCESS
+ printk( "fdomain_arbitrate()\n" );
+#endif
+
+ outb( 0x00, SCSI_Cntl_port ); /* Disable data drivers */
+ outb( adapter_mask, port_base + SCSI_Data_NoACK ); /* Set our id bit */
+ outb( 0x04 | PARITY_MASK, TMC_Cntl_port ); /* Start arbitration */
+
+ timeout = jiffies + 50; /* 500 mS */
+ while (jiffies < timeout) {
+ status = inb( TMC_Status_port ); /* Read adapter status */
+ if (status & 0x02) /* Arbitration complete */
+ return 0;
+ }
+
+ /* Make bus idle */
+ fdomain_make_bus_idle();
+
+#if EVERY_ACCESS
+ printk( "Arbitration failed, status = %x\n", status );
+#endif
+#if ERRORS_ONLY
+ printk( "fdomain: Arbitration failed, status = %x\n", status );
+#endif
+ return 1;
+}
+#endif
+
+static int fdomain_select( int target )
+{
+ int status;
+ unsigned long timeout;
+ static int flag = 0;
+
+
+ outb( 0x82, SCSI_Cntl_port ); /* Bus Enable + Select */
+ outb( adapter_mask | (1 << target), SCSI_Data_NoACK_port );
+
+ /* Stop arbitration and enable parity */
+ outb( PARITY_MASK, TMC_Cntl_port );
+
+ timeout = jiffies + 35; /* 350mS -- because of timeouts
+ (was 250mS) */
+
+ while (jiffies < timeout) {
+ status = inb( SCSI_Status_port ); /* Read adapter status */
+ if (status & 1) { /* Busy asserted */
+ /* Enable SCSI Bus (on error, should make bus idle with 0) */
+ outb( 0x80, SCSI_Cntl_port );
+ return 0;
+ }
+ }
+ /* Make bus idle */
+ fdomain_make_bus_idle();
+#if EVERY_ACCESS
+ if (!target) printk( "Selection failed\n" );
+#endif
+#if ERRORS_ONLY
+ if (!target) {
+ if (!flag) /* Skip first failure for all chips. */
+ ++flag;
+ else
+ printk( "fdomain: Selection failed\n" );
+ }
+#endif
+ return 1;
+}
+
+void my_done( int error )
+{
+ if (in_command) {
+ in_command = 0;
+ outb( 0x00, Interrupt_Cntl_port );
+ fdomain_make_bus_idle();
+ current_SC->result = error;
+ if (current_SC->scsi_done)
+ current_SC->scsi_done( current_SC );
+ else panic( "fdomain: current_SC->scsi_done() == NULL" );
+ } else {
+ panic( "fdomain: my_done() called outside of command\n" );
+ }
+#if DEBUG_RACE
+ in_interrupt_flag = 0;
+#endif
+}
+
+void fdomain_16x0_intr( int irq, void *dev_id, struct pt_regs * regs )
+{
+ int status;
+ int done = 0;
+ unsigned data_count;
+
+ /* The fdomain_16x0_intr is only called via
+ the interrupt handler. The goal of the
+ sti() here is to allow other
+ interruptions while this routine is
+ running. */
+
+ sti(); /* Yes, we really want sti() here */
+
+ outb( 0x00, Interrupt_Cntl_port );
+
+ /* We usually have one spurious interrupt after each command. Ignore it. */
+ if (!in_command || !current_SC) { /* Spurious interrupt */
+#if EVERY_ACCESS
+ printk( "Spurious interrupt, in_command = %d, current_SC = %x\n",
+ in_command, current_SC );
+#endif
+ return;
+ }
+
+ /* Abort calls my_done, so we do nothing here. */
+ if (current_SC->SCp.phase & aborted) {
+#if DEBUG_ABORT
+ printk( "Interrupt after abort, ignoring\n" );
+#endif
+ /*
+ return; */
+ }
+
+#if DEBUG_RACE
+ ++in_interrupt_flag;
+#endif
+
+ if (current_SC->SCp.phase & in_arbitration) {
+ status = inb( TMC_Status_port ); /* Read adapter status */
+ if (!(status & 0x02)) {
+#if EVERY_ACCESS
+ printk( " AFAIL " );
+#endif
+ my_done( DID_BUS_BUSY << 16 );
+ return;
+ }
+ current_SC->SCp.phase = in_selection;
+
+ outb( 0x40 | FIFO_COUNT, Interrupt_Cntl_port );
+
+ outb( 0x82, SCSI_Cntl_port ); /* Bus Enable + Select */
+ outb( adapter_mask | (1 << current_SC->target), SCSI_Data_NoACK_port );
+
+ /* Stop arbitration and enable parity */
+ outb( 0x10 | PARITY_MASK, TMC_Cntl_port );
+#if DEBUG_RACE
+ in_interrupt_flag = 0;
+#endif
+ return;
+ } else if (current_SC->SCp.phase & in_selection) {
+ status = inb( SCSI_Status_port );
+ if (!(status & 0x01)) {
+ /* Try again, for slow devices */
+ if (fdomain_select( current_SC->target )) {
+#if EVERY_ACCESS
+ printk( " SFAIL " );
+#endif
+ my_done( DID_NO_CONNECT << 16 );
+ return;
+ } else {
+#if EVERY_ACCESS
+ printk( " AltSel " );
+#endif
+ /* Stop arbitration and enable parity */
+ outb( 0x10 | PARITY_MASK, TMC_Cntl_port );
+ }
+ }
+ current_SC->SCp.phase = in_other;
+ outb( 0x90 | FIFO_COUNT, Interrupt_Cntl_port );
+ outb( 0x80, SCSI_Cntl_port );
+#if DEBUG_RACE
+ in_interrupt_flag = 0;
+#endif
+ return;
+ }
+
+ /* current_SC->SCp.phase == in_other: this is the body of the routine */
+
+ status = inb( SCSI_Status_port );
+
+ if (status & 0x10) { /* REQ */
+
+ switch (status & 0x0e) {
+
+ case 0x08: /* COMMAND OUT */
+ outb( current_SC->cmnd[current_SC->SCp.sent_command++],
+ Write_SCSI_Data_port );
+#if EVERY_ACCESS
+ printk( "CMD = %x,",
+ current_SC->cmnd[ current_SC->SCp.sent_command - 1] );
+#endif
+ break;
+ case 0x00: /* DATA OUT -- tmc18c50/tmc18c30 only */
+ if (chip != tmc1800 && !current_SC->SCp.have_data_in) {
+ current_SC->SCp.have_data_in = -1;
+ outb( 0xd0 | PARITY_MASK, TMC_Cntl_port );
+ }
+ break;
+ case 0x04: /* DATA IN -- tmc18c50/tmc18c30 only */
+ if (chip != tmc1800 && !current_SC->SCp.have_data_in) {
+ current_SC->SCp.have_data_in = 1;
+ outb( 0x90 | PARITY_MASK, TMC_Cntl_port );
+ }
+ break;
+ case 0x0c: /* STATUS IN */
+ current_SC->SCp.Status = inb( Read_SCSI_Data_port );
+#if EVERY_ACCESS
+ printk( "Status = %x, ", current_SC->SCp.Status );
+#endif
+#if ERRORS_ONLY
+ if (current_SC->SCp.Status
+ && current_SC->SCp.Status != 2
+ && current_SC->SCp.Status != 8) {
+ printk( "fdomain: target = %d, command = %x, status = %x\n",
+ current_SC->target,
+ current_SC->cmnd[0],
+ current_SC->SCp.Status );
+ }
+#endif
+ break;
+ case 0x0a: /* MESSAGE OUT */
+ outb( MESSAGE_REJECT, Write_SCSI_Data_port ); /* Reject */
+ break;
+ case 0x0e: /* MESSAGE IN */
+ current_SC->SCp.Message = inb( Read_SCSI_Data_port );
+#if EVERY_ACCESS
+ printk( "Message = %x, ", current_SC->SCp.Message );
+#endif
+ if (!current_SC->SCp.Message) ++done;
+#if DEBUG_MESSAGES || EVERY_ACCESS
+ if (current_SC->SCp.Message) {
+ printk( "fdomain: message = %x\n", current_SC->SCp.Message );
+ }
+#endif
+ break;
+ }
+ }
+
+ if (chip == tmc1800
+ && !current_SC->SCp.have_data_in
+ && (current_SC->SCp.sent_command
+ >= current_SC->cmd_len)) {
+ /* We have to get the FIFO direction
+ correct, so I've made a table based
+ on the SCSI Standard of which commands
+ appear to require a DATA OUT phase.
+ */
+ /*
+ p. 94: Command for all device types
+ CHANGE DEFINITION 40 DATA OUT
+ COMPARE 39 DATA OUT
+ COPY 18 DATA OUT
+ COPY AND VERIFY 3a DATA OUT
+ INQUIRY 12
+ LOG SELECT 4c DATA OUT
+ LOG SENSE 4d
+ MODE SELECT (6) 15 DATA OUT
+ MODE SELECT (10) 55 DATA OUT
+ MODE SENSE (6) 1a
+ MODE SENSE (10) 5a
+ READ BUFFER 3c
+ RECEIVE DIAGNOSTIC RESULTS 1c
+ REQUEST SENSE 03
+ SEND DIAGNOSTIC 1d DATA OUT
+ TEST UNIT READY 00
+ WRITE BUFFER 3b DATA OUT
+
+ p.178: Commands for direct-access devices (not listed on p. 94)
+ FORMAT UNIT 04 DATA OUT
+ LOCK-UNLOCK CACHE 36
+ PRE-FETCH 34
+ PREVENT-ALLOW MEDIUM REMOVAL 1e
+ READ (6)/RECEIVE 08
+ READ (10) 3c
+ READ CAPACITY 25
+ READ DEFECT DATA (10) 37
+ READ LONG 3e
+ REASSIGN BLOCKS 07 DATA OUT
+ RELEASE 17
+ RESERVE 16 DATA OUT
+ REZERO UNIT/REWIND 01
+ SEARCH DATA EQUAL (10) 31 DATA OUT
+ SEARCH DATA HIGH (10) 30 DATA OUT
+ SEARCH DATA LOW (10) 32 DATA OUT
+ SEEK (6) 0b
+ SEEK (10) 2b
+ SET LIMITS (10) 33
+ START STOP UNIT 1b
+ SYNCHRONIZE CACHE 35
+ VERIFY (10) 2f
+ WRITE (6)/PRINT/SEND 0a DATA OUT
+ WRITE (10)/SEND 2a DATA OUT
+ WRITE AND VERIFY (10) 2e DATA OUT
+ WRITE LONG 3f DATA OUT
+ WRITE SAME 41 DATA OUT ?
+
+ p. 261: Commands for sequential-access devices (not previously listed)
+ ERASE 19
+ LOAD UNLOAD 1b
+ LOCATE 2b
+ READ BLOCK LIMITS 05
+ READ POSITION 34
+ READ REVERSE 0f
+ RECOVER BUFFERED DATA 14
+ SPACE 11
+ WRITE FILEMARKS 10 ?
+
+ p. 298: Commands for printer devices (not previously listed)
+ ****** NOT SUPPORTED BY THIS DRIVER, since 0b is SEEK (6) *****
+ SLEW AND PRINT 0b DATA OUT -- same as seek
+ STOP PRINT 1b
+ SYNCHRONIZE BUFFER 10
+
+ p. 315: Commands for processor devices (not previously listed)
+
+ p. 321: Commands for write-once devices (not previously listed)
+ MEDIUM SCAN 38
+ READ (12) a8
+ SEARCH DATA EQUAL (12) b1 DATA OUT
+ SEARCH DATA HIGH (12) b0 DATA OUT
+ SEARCH DATA LOW (12) b2 DATA OUT
+ SET LIMITS (12) b3
+ VERIFY (12) af
+ WRITE (12) aa DATA OUT
+ WRITE AND VERIFY (12) ae DATA OUT
+
+ p. 332: Commands for CD-ROM devices (not previously listed)
+ PAUSE/RESUME 4b
+ PLAY AUDIO (10) 45
+ PLAY AUDIO (12) a5
+ PLAY AUDIO MSF 47
+ PLAY TRACK RELATIVE (10) 49
+ PLAY TRACK RELATIVE (12) a9
+ READ HEADER 44
+ READ SUB-CHANNEL 42
+ READ TOC 43
+
+ p. 370: Commands for scanner devices (not previously listed)
+ GET DATA BUFFER STATUS 34
+ GET WINDOW 25
+ OBJECT POSITION 31
+ SCAN 1b
+ SET WINDOW 24 DATA OUT
+
+ p. 391: Commands for optical memory devices (not listed)
+ ERASE (10) 2c
+ ERASE (12) ac
+ MEDIUM SCAN 38 DATA OUT
+ READ DEFECT DATA (12) b7
+ READ GENERATION 29
+ READ UPDATED BLOCK 2d
+ UPDATE BLOCK 3d DATA OUT
+
+ p. 419: Commands for medium changer devices (not listed)
+ EXCHANGE MEDIUM 46
+ INITIALIZE ELEMENT STATUS 07
+ MOVE MEDIUM a5
+ POSITION TO ELEMENT 2b
+ READ ELEMENT STATUS b8
+ REQUEST VOL. ELEMENT ADDRESS b5
+ SEND VOLUME TAG b6 DATA OUT
+
+ p. 454: Commands for communications devices (not listed previously)
+ GET MESSAGE (6) 08
+ GET MESSAGE (10) 28
+ GET MESSAGE (12) a8
+ */
+
+ switch (current_SC->cmnd[0]) {
+ case CHANGE_DEFINITION: case COMPARE: case COPY:
+ case COPY_VERIFY: case LOG_SELECT: case MODE_SELECT:
+ case MODE_SELECT_10: case SEND_DIAGNOSTIC: case WRITE_BUFFER:
+
+ case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE:
+ case SEARCH_EQUAL: case SEARCH_HIGH: case SEARCH_LOW:
+ case WRITE_6: case WRITE_10: case WRITE_VERIFY:
+ case 0x3f: case 0x41:
+
+ case 0xb1: case 0xb0: case 0xb2:
+ case 0xaa: case 0xae:
+
+ case 0x24:
+
+ case 0x38: case 0x3d:
+
+ case 0xb6:
+
+ case 0xea: /* alternate number for WRITE LONG */
+
+ current_SC->SCp.have_data_in = -1;
+ outb( 0xd0 | PARITY_MASK, TMC_Cntl_port );
+ break;
+
+ case 0x00:
+ default:
+
+ current_SC->SCp.have_data_in = 1;
+ outb( 0x90 | PARITY_MASK, TMC_Cntl_port );
+ break;
+ }
+ }
+
+ if (current_SC->SCp.have_data_in == -1) { /* DATA OUT */
+ while ( (data_count = FIFO_Size - inw( FIFO_Data_Count_port )) > 512 ) {
+#if EVERY_ACCESS
+ printk( "DC=%d, ", data_count ) ;
+#endif
+ if (data_count > current_SC->SCp.this_residual)
+ data_count = current_SC->SCp.this_residual;
+ if (data_count > 0) {
+#if EVERY_ACCESS
+ printk( "%d OUT, ", data_count );
+#endif
+ if (data_count == 1) {
+ outb( *current_SC->SCp.ptr++, Write_FIFO_port );
+ --current_SC->SCp.this_residual;
+ } else {
+ data_count >>= 1;
+ outsw( Write_FIFO_port, current_SC->SCp.ptr, data_count );
+ current_SC->SCp.ptr += 2 * data_count;
+ current_SC->SCp.this_residual -= 2 * data_count;
+ }
+ }
+ if (!current_SC->SCp.this_residual) {
+ if (current_SC->SCp.buffers_residual) {
+ --current_SC->SCp.buffers_residual;
+ ++current_SC->SCp.buffer;
+ current_SC->SCp.ptr = current_SC->SCp.buffer->address;
+ current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
+ } else
+ break;
+ }
+ }
+ }
+
+ if (current_SC->SCp.have_data_in == 1) { /* DATA IN */
+ while ((data_count = inw( FIFO_Data_Count_port )) > 0) {
+#if EVERY_ACCESS
+ printk( "DC=%d, ", data_count );
+#endif
+ if (data_count > current_SC->SCp.this_residual)
+ data_count = current_SC->SCp.this_residual;
+ if (data_count) {
+#if EVERY_ACCESS
+ printk( "%d IN, ", data_count );
+#endif
+ if (data_count == 1) {
+ *current_SC->SCp.ptr++ = inb( Read_FIFO_port );
+ --current_SC->SCp.this_residual;
+ } else {
+ data_count >>= 1; /* Number of words */
+ insw( Read_FIFO_port, current_SC->SCp.ptr, data_count );
+ current_SC->SCp.ptr += 2 * data_count;
+ current_SC->SCp.this_residual -= 2 * data_count;
+ }
+ }
+ if (!current_SC->SCp.this_residual
+ && current_SC->SCp.buffers_residual) {
+ --current_SC->SCp.buffers_residual;
+ ++current_SC->SCp.buffer;
+ current_SC->SCp.ptr = current_SC->SCp.buffer->address;
+ current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
+ }
+ }
+ }
+
+ if (done) {
+#if EVERY_ACCESS
+ printk( " ** IN DONE %d ** ", current_SC->SCp.have_data_in );
+#endif
+
+#if ERRORS_ONLY
+ if (current_SC->cmnd[0] == REQUEST_SENSE && !current_SC->SCp.Status) {
+ if ((unsigned char)(*((char *)current_SC->request_buffer+2)) & 0x0f) {
+ unsigned char key;
+ unsigned char code;
+ unsigned char qualifier;
+
+ key = (unsigned char)(*((char *)current_SC->request_buffer + 2))
+ & 0x0f;
+ code = (unsigned char)(*((char *)current_SC->request_buffer + 12));
+ qualifier = (unsigned char)(*((char *)current_SC->request_buffer
+ + 13));
+
+ if (key != UNIT_ATTENTION
+ && !(key == NOT_READY
+ && code == 0x04
+ && (!qualifier || qualifier == 0x02 || qualifier == 0x01))
+ && !(key == ILLEGAL_REQUEST && (code == 0x25
+ || code == 0x24
+ || !code)))
+
+ printk( "fdomain: REQUEST SENSE "
+ "Key = %x, Code = %x, Qualifier = %x\n",
+ key, code, qualifier );
+ }
+ }
+#endif
+#if EVERY_ACCESS
+ printk( "BEFORE MY_DONE. . ." );
+#endif
+ my_done( (current_SC->SCp.Status & 0xff)
+ | ((current_SC->SCp.Message & 0xff) << 8) | (DID_OK << 16) );
+#if EVERY_ACCESS
+ printk( "RETURNING.\n" );
+#endif
+
+ } else {
+ if (current_SC->SCp.phase & disconnect) {
+ outb( 0xd0 | FIFO_COUNT, Interrupt_Cntl_port );
+ outb( 0x00, SCSI_Cntl_port );
+ } else {
+ outb( 0x90 | FIFO_COUNT, Interrupt_Cntl_port );
+ }
+ }
+#if DEBUG_RACE
+ in_interrupt_flag = 0;
+#endif
+ return;
+}
+
+int fdomain_16x0_queue( Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
+{
+ if (in_command) {
+ panic( "fdomain: fdomain_16x0_queue() NOT REENTRANT!\n" );
+ }
+#if EVERY_ACCESS
+ printk( "queue: target = %d cmnd = 0x%02x pieces = %d size = %u\n",
+ SCpnt->target,
+ *(unsigned char *)SCpnt->cmnd,
+ SCpnt->use_sg,
+ SCpnt->request_bufflen );
+#endif
+
+ fdomain_make_bus_idle();
+
+ current_SC = SCpnt; /* Save this for the done function */
+ current_SC->scsi_done = done;
+
+ /* Initialize static data */
+
+ if (current_SC->use_sg) {
+ current_SC->SCp.buffer =
+ (struct scatterlist *)current_SC->request_buffer;
+ current_SC->SCp.ptr = current_SC->SCp.buffer->address;
+ current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
+ current_SC->SCp.buffers_residual = current_SC->use_sg - 1;
+ } else {
+ current_SC->SCp.ptr = (char *)current_SC->request_buffer;
+ current_SC->SCp.this_residual = current_SC->request_bufflen;
+ current_SC->SCp.buffer = NULL;
+ current_SC->SCp.buffers_residual = 0;
+ }
+
+
+ current_SC->SCp.Status = 0;
+ current_SC->SCp.Message = 0;
+ current_SC->SCp.have_data_in = 0;
+ current_SC->SCp.sent_command = 0;
+ current_SC->SCp.phase = in_arbitration;
+
+ /* Start arbitration */
+ outb( 0x00, Interrupt_Cntl_port );
+ outb( 0x00, SCSI_Cntl_port ); /* Disable data drivers */
+ outb( adapter_mask, SCSI_Data_NoACK_port ); /* Set our id bit */
+ ++in_command;
+ outb( 0x20, Interrupt_Cntl_port );
+ outb( 0x14 | PARITY_MASK, TMC_Cntl_port ); /* Start arbitration */
+
+ return 0;
+}
+
+/* The following code, which simulates the old-style command function, was
+ taken from Tommy Thorn's aha1542.c file. This code is Copyright (C)
+ 1992 Tommy Thorn. */
+
+static volatile int internal_done_flag = 0;
+static volatile int internal_done_errcode = 0;
+
+static void internal_done( Scsi_Cmnd *SCpnt )
+{
+ internal_done_errcode = SCpnt->result;
+ ++internal_done_flag;
+}
+
+int fdomain_16x0_command( Scsi_Cmnd *SCpnt )
+{
+ fdomain_16x0_queue( SCpnt, internal_done );
+
+ while (!internal_done_flag)
+ ;
+ internal_done_flag = 0;
+ return internal_done_errcode;
+}
+
+/* End of code derived from Tommy Thorn's work. */
+
+void print_info( Scsi_Cmnd *SCpnt )
+{
+ unsigned int imr;
+ unsigned int irr;
+ unsigned int isr;
+
+ if (!SCpnt || !SCpnt->host) {
+ printk( "fdomain: cannot provide detailed information\n" );
+ }
+
+ printk( "%s\n", fdomain_16x0_info( SCpnt->host ) );
+ print_banner( SCpnt->host );
+ switch (SCpnt->SCp.phase) {
+ case in_arbitration: printk( "arbitration " ); break;
+ case in_selection: printk( "selection " ); break;
+ case in_other: printk( "other " ); break;
+ default: printk( "unknown " ); break;
+ }
+
+ printk( "(%d), target = %d cmnd = 0x%02x pieces = %d size = %u\n",
+ SCpnt->SCp.phase,
+ SCpnt->target,
+ *(unsigned char *)SCpnt->cmnd,
+ SCpnt->use_sg,
+ SCpnt->request_bufflen );
+ printk( "sent_command = %d, have_data_in = %d, timeout = %d\n",
+ SCpnt->SCp.sent_command,
+ SCpnt->SCp.have_data_in,
+ SCpnt->timeout );
+#if DEBUG_RACE
+ printk( "in_interrupt_flag = %d\n", in_interrupt_flag );
+#endif
+
+ imr = (inb( 0x0a1 ) << 8) + inb( 0x21 );
+ outb( 0x0a, 0xa0 );
+ irr = inb( 0xa0 ) << 8;
+ outb( 0x0a, 0x20 );
+ irr += inb( 0x20 );
+ outb( 0x0b, 0xa0 );
+ isr = inb( 0xa0 ) << 8;
+ outb( 0x0b, 0x20 );
+ isr += inb( 0x20 );
+
+ /* Print out interesting information */
+ printk( "IMR = 0x%04x", imr );
+ if (imr & (1 << interrupt_level))
+ printk( " (masked)" );
+ printk( ", IRR = 0x%04x, ISR = 0x%04x\n", irr, isr );
+
+ printk( "SCSI Status = 0x%02x\n", inb( SCSI_Status_port ) );
+ printk( "TMC Status = 0x%02x", inb( TMC_Status_port ) );
+ if (inb( TMC_Status_port & 1))
+ printk( " (interrupt)" );
+ printk( "\n" );
+ printk( "Interrupt Status = 0x%02x", inb( Interrupt_Status_port ) );
+ if (inb( Interrupt_Status_port ) & 0x08)
+ printk( " (enabled)" );
+ printk( "\n" );
+ if (chip == tmc18c50 || chip == tmc18c30) {
+ printk( "FIFO Status = 0x%02x\n", inb( port_base + FIFO_Status ) );
+ printk( "Int. Condition = 0x%02x\n",
+ inb( port_base + Interrupt_Cond ) );
+ }
+ printk( "Configuration 1 = 0x%02x\n", inb( port_base + Configuration1 ) );
+ if (chip == tmc18c50 || chip == tmc18c30)
+ printk( "Configuration 2 = 0x%02x\n",
+ inb( port_base + Configuration2 ) );
+}
+
+int fdomain_16x0_abort( Scsi_Cmnd *SCpnt)
+{
+ unsigned long flags;
+#if EVERY_ACCESS || ERRORS_ONLY || DEBUG_ABORT
+ printk( "fdomain: abort " );
+#endif
+
+ save_flags( flags );
+ cli();
+ if (!in_command) {
+#if EVERY_ACCESS || ERRORS_ONLY
+ printk( " (not in command)\n" );
+#endif
+ restore_flags( flags );
+ return SCSI_ABORT_NOT_RUNNING;
+ } else printk( "\n" );
+
+#if DEBUG_ABORT
+ print_info( SCpnt );
+#endif
+
+ fdomain_make_bus_idle();
+
+ current_SC->SCp.phase |= aborted;
+
+ current_SC->result = DID_ABORT << 16;
+
+ restore_flags( flags );
+
+ /* Aborts are not done well. . . */
+ my_done( DID_ABORT << 16 );
+
+ return SCSI_ABORT_SUCCESS;
+}
+
+int fdomain_16x0_reset( Scsi_Cmnd *SCpnt, unsigned int flags )
+{
+#if DEBUG_RESET
+ static int called_once = 0;
+#endif
+
+#if ERRORS_ONLY
+ if (SCpnt) printk( "fdomain: SCSI Bus Reset\n" );
+#endif
+
+#if DEBUG_RESET
+ if (called_once) print_info( current_SC );
+ called_once = 1;
+#endif
+
+ outb( 1, SCSI_Cntl_port );
+ do_pause( 2 );
+ outb( 0, SCSI_Cntl_port );
+ do_pause( 115 );
+ outb( 0, SCSI_Mode_Cntl_port );
+ outb( PARITY_MASK, TMC_Cntl_port );
+
+ /* Unless this is the very first call (i.e., SCPnt == NULL), everything
+ is probably hosed at this point. We will, however, try to keep
+ things going by informing the high-level code that we need help. */
+
+ return SCSI_RESET_WAKEUP;
+}
+
+#include "sd.h"
+#include <scsi/scsi_ioctl.h>
+
+int fdomain_16x0_biosparam( Scsi_Disk *disk, kdev_t dev, int *info_array )
+{
+ int drive;
+ unsigned char buf[512 + sizeof( int ) * 2];
+ int size = disk->capacity;
+ int *sizes = (int *)buf;
+ unsigned char *data = (unsigned char *)(sizes + 2);
+ unsigned char do_read[] = { READ_6, 0, 0, 0, 1, 0 };
+ int retcode;
+ struct drive_info {
+ unsigned short cylinders;
+ unsigned char heads;
+ unsigned char sectors;
+ } *i;
+
+ /* NOTES:
+ The RAM area starts at 0x1f00 from the bios_base address.
+
+ For BIOS Version 2.0:
+
+ The drive parameter table seems to start at 0x1f30.
+ The first byte's purpose is not known.
+ Next is the cylinder, head, and sector information.
+ The last 4 bytes appear to be the drive's size in sectors.
+ The other bytes in the drive parameter table are unknown.
+ If anyone figures them out, please send me mail, and I will
+ update these notes.
+
+ Tape drives do not get placed in this table.
+
+ There is another table at 0x1fea:
+ If the byte is 0x01, then the SCSI ID is not in use.
+ If the byte is 0x18 or 0x48, then the SCSI ID is in use,
+ although tapes don't seem to be in this table. I haven't
+ seen any other numbers (in a limited sample).
+
+ 0x1f2d is a drive count (i.e., not including tapes)
+
+ The table at 0x1fcc are I/O ports addresses for the various
+ operations. I calculate these by hand in this driver code.
+
+
+
+ For the ISA-200S version of BIOS Version 2.0:
+
+ The drive parameter table starts at 0x1f33.
+
+ WARNING: Assume that the table entry is 25 bytes long. Someone needs
+ to check this for the Quantum ISA-200S card.
+
+
+
+ For BIOS Version 3.2:
+
+ The drive parameter table starts at 0x1f70. Each entry is
+ 0x0a bytes long. Heads are one less than we need to report.
+ */
+
+ drive = MINOR(dev) / 16;
+
+ if (bios_major == 2) {
+ switch (Quantum) {
+ case 2: /* ISA_200S */
+ /* The value of 25 has never been verified.
+ It should probably be 15. */
+ i = (struct drive_info *)( (char *)bios_base + 0x1f33 + drive * 25 );
+ break;
+ case 3: /* ISA_250MG */
+ i = (struct drive_info *)( (char *)bios_base + 0x1f36 + drive * 15 );
+ break;
+ case 4: /* ISA_200S (another one) */
+ i = (struct drive_info *)( (char *)bios_base + 0x1f34 + drive * 15 );
+ break;
+ default:
+ i = (struct drive_info *)( (char *)bios_base + 0x1f31 + drive * 25 );
+ break;
+ }
+ info_array[0] = i->heads;
+ info_array[1] = i->sectors;
+ info_array[2] = i->cylinders;
+ } else if (bios_major == 3
+ && bios_minor >= 0
+ && bios_minor < 4) { /* 3.0 and 3.2 BIOS */
+ i = (struct drive_info *)( (char *)bios_base + 0x1f71 + drive * 10 );
+ info_array[0] = i->heads + 1;
+ info_array[1] = i->sectors;
+ info_array[2] = i->cylinders;
+ } else { /* 3.4 BIOS (and up?) */
+ /* This algorithm was provided by Future Domain (much thanks!). */
+
+ sizes[0] = 0; /* zero bytes out */
+ sizes[1] = 512; /* one sector in */
+ memcpy( data, do_read, sizeof( do_read ) );
+ retcode = kernel_scsi_ioctl( disk->device,
+ SCSI_IOCTL_SEND_COMMAND,
+ (void *)buf );
+ if (!retcode /* SCSI command ok */
+ && data[511] == 0xaa && data[510] == 0x55 /* Partition table valid */
+ && data[0x1c2]) { /* Partition type */
+
+ /* The partition table layout is as follows:
+
+ Start: 0x1b3h
+ Offset: 0 = partition status
+ 1 = starting head
+ 2 = starting sector and cylinder (word, encoded)
+ 4 = partition type
+ 5 = ending head
+ 6 = ending sector and cylinder (word, encoded)
+ 8 = starting absolute sector (double word)
+ c = number of sectors (double word)
+ Signature: 0x1fe = 0x55aa
+
+ So, this algorithm assumes:
+ 1) the first partition table is in use,
+ 2) the data in the first entry is correct, and
+ 3) partitions never divide cylinders
+
+ Note that (1) may be FALSE for NetBSD (and other BSD flavors),
+ as well as for Linux. Note also, that Linux doesn't pay any
+ attention to the fields that are used by this algorithm -- it
+ only uses the absolute sector data. Recent versions of Linux's
+ fdisk(1) will fill this data in correctly, and forthcoming
+ versions will check for consistency.
+
+ Checking for a non-zero partition type is not part of the
+ Future Domain algorithm, but it seemed to be a reasonable thing
+ to do, especially in the Linux and BSD worlds. */
+
+ info_array[0] = data[0x1c3] + 1; /* heads */
+ info_array[1] = data[0x1c4] & 0x3f; /* sectors */
+ } else {
+
+ /* Note that this new method guarantees that there will always be
+ less than 1024 cylinders on a platter. This is good for drives
+ up to approximately 7.85GB (where 1GB = 1024 * 1024 kB). */
+
+ if ((unsigned int)size >= 0x7e0000U) {
+ info_array[0] = 0xff; /* heads = 255 */
+ info_array[1] = 0x3f; /* sectors = 63 */
+ } else if ((unsigned int)size >= 0x200000U) {
+ info_array[0] = 0x80; /* heads = 128 */
+ info_array[1] = 0x3f; /* sectors = 63 */
+ } else {
+ info_array[0] = 0x40; /* heads = 64 */
+ info_array[1] = 0x20; /* sectors = 32 */
+ }
+ }
+ /* For both methods, compute the cylinders */
+ info_array[2] = (unsigned int)size / (info_array[0] * info_array[1] );
+ }
+
+ return 0;
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = FDOMAIN_16X0;
+
+#include "scsi_module.c"
+#endif
diff --git a/linux/src/drivers/scsi/fdomain.h b/linux/src/drivers/scsi/fdomain.h
new file mode 100644
index 0000000..bea9998
--- /dev/null
+++ b/linux/src/drivers/scsi/fdomain.h
@@ -0,0 +1,61 @@
+/* fdomain.h -- Header for Future Domain TMC-16x0 driver
+ * Created: Sun May 3 18:47:33 1992 by faith@cs.unc.edu
+ * Revised: Thu Oct 12 13:21:35 1995 by r.faith@ieee.org
+ * Author: Rickard E. Faith, faith@cs.unc.edu
+ * Copyright 1992, 1993, 1994, 1995 Rickard E. Faith
+ *
+ * $Id: fdomain.h,v 1.1 1999/04/26 05:54:33 tb Exp $
+
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ */
+
+#ifndef _FDOMAIN_H
+#define _FDOMAIN_H
+
+int fdomain_16x0_detect( Scsi_Host_Template * );
+int fdomain_16x0_command( Scsi_Cmnd * );
+int fdomain_16x0_abort( Scsi_Cmnd * );
+const char *fdomain_16x0_info( struct Scsi_Host * );
+int fdomain_16x0_reset( Scsi_Cmnd *, unsigned int );
+int fdomain_16x0_queue( Scsi_Cmnd *, void (*done)(Scsi_Cmnd *) );
+int fdomain_16x0_biosparam( Disk *, kdev_t, int * );
+int fdomain_16x0_proc_info( char *buffer, char **start, off_t offset,
+ int length, int hostno, int inout );
+
+extern struct proc_dir_entry proc_scsi_fdomain;
+
+#define FDOMAIN_16X0 { NULL, \
+ NULL, \
+ NULL, \
+ fdomain_16x0_proc_info, \
+ NULL, \
+ fdomain_16x0_detect, \
+ NULL, \
+ fdomain_16x0_info, \
+ fdomain_16x0_command, \
+ fdomain_16x0_queue, \
+ fdomain_16x0_abort, \
+ fdomain_16x0_reset, \
+ NULL, \
+ fdomain_16x0_biosparam, \
+ 1, \
+ 6, \
+ 64, \
+ 1, \
+ 0, \
+ 0, \
+ DISABLE_CLUSTERING }
+#endif
diff --git a/linux/src/drivers/scsi/g_NCR5380.c b/linux/src/drivers/scsi/g_NCR5380.c
new file mode 100644
index 0000000..a141b93
--- /dev/null
+++ b/linux/src/drivers/scsi/g_NCR5380.c
@@ -0,0 +1,729 @@
+/*
+ * Generic Generic NCR5380 driver
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 440-4894
+ *
+ * NCR53C400 extensions (c) 1994,1995,1996, Kevin Lentin
+ * K.Lentin@cs.monash.edu.au
+ *
+ * ALPHA RELEASE 1.
+ *
+ * For more information, please consult
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+ *
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * 1+ (719) 578-3400
+ * 1+ (800) 334-5454
+ */
+
+/*
+ * TODO : flesh out DMA support, find some one actually using this (I have
+ * a memory mapped Trantor board that works fine)
+ */
+
+/*
+ * Options :
+ *
+ * PARITY - enable parity checking. Not supported.
+ *
+ * SCSI2 - enable support for SCSI-II tagged queueing. Untested.
+ *
+ * USLEEP - enable support for devices that don't disconnect. Untested.
+ *
+ * The card is detected and initialized in one of several ways :
+ * 1. With command line overrides - NCR5380=port,irq may be
+ * used on the LILO command line to override the defaults.
+ *
+ * 2. With the GENERIC_NCR5380_OVERRIDE compile time define. This is
+ * specified as an array of address, irq, dma, board tuples. Ie, for
+ * one board at 0x350, IRQ5, no dma, I could say
+ * -DGENERIC_NCR5380_OVERRIDE={{0xcc000, 5, DMA_NONE, BOARD_NCR5380}}
+ *
+ * -1 should be specified for no or DMA interrupt, -2 to autoprobe for an
+ * IRQ line if overridden on the command line.
+ *
+ * 3. When included as a module, with arguments passed on the command line:
+ * ncr_irq=xx the interrupt
+ * ncr_addr=xx the port or base address (for port or memory
+ * mapped, resp.)
+ * ncr_dma=xx the DMA
+ * ncr_5380=1 to set up for a NCR5380 board
+ * ncr_53c400=1 to set up for a NCR53C400 board
+ * e.g.
+ * modprobe g_NCR5380 ncr_irq=5 ncr_addr=0x350 ncr_5380=1
+ * for a port mapped NCR5380 board or
+ * modprobe g_NCR5380 ncr_irq=255 ncr_addr=0xc8000 ncr_53c400=1
+ * for a memory mapped NCR53C400 board with interrupts disabled.
+ *
+ * 255 should be specified for no or DMA interrupt, 254 to autoprobe for an
+ * IRQ line if overridden on the command line.
+ *
+ */
+
+#define AUTOPROBE_IRQ
+#define AUTOSENSE
+
+#include <linux/config.h>
+
+#ifdef CONFIG_SCSI_GENERIC_NCR53C400
+#define NCR53C400_PSEUDO_DMA 1
+#define PSEUDO_DMA
+#define NCR53C400
+#define NCR5380_STATS
+#undef NCR5380_STAT_LIMIT
+#endif
+#if defined(CONFIG_SCSI_G_NCR5380_PORT) && defined(CONFIG_SCSI_G_NCR5380_MEM)
+#error You can not configure the Generic NCR 5380 SCSI Driver for memory mapped I/O and port mapped I/O at the same time (yet)
+#endif
+#if !defined(CONFIG_SCSI_G_NCR5380_PORT) && !defined(CONFIG_SCSI_G_NCR5380_MEM)
+#error You must configure the Generic NCR 5380 SCSI Driver for one of memory mapped I/O and port mapped I/O.
+#endif
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "g_NCR5380.h"
+#include "NCR5380.h"
+#include "constants.h"
+#include "sd.h"
+#include<linux/stat.h>
+
+struct proc_dir_entry proc_scsi_g_ncr5380 = {
+ PROC_SCSI_GENERIC_NCR5380, 9, "g_NCR5380",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+#define NCR_NOT_SET 0
+static int ncr_irq=NCR_NOT_SET;
+static int ncr_dma=NCR_NOT_SET;
+static int ncr_addr=NCR_NOT_SET;
+static int ncr_5380=NCR_NOT_SET;
+static int ncr_53c400=NCR_NOT_SET;
+
+static struct override {
+ NCR5380_implementation_fields;
+ int irq;
+ int dma;
+ int board; /* Use NCR53c400, Ricoh, etc. extensions ? */
+} overrides
+#ifdef GENERIC_NCR5380_OVERRIDE
+ [] = GENERIC_NCR5380_OVERRIDE
+#else
+ [1] = {{0,},};
+#endif
+
+#define NO_OVERRIDES (sizeof(overrides) / sizeof(struct override))
+
+/*
+ * Function : static internal_setup(int board, char *str, int *ints)
+ *
+ * Purpose : LILO command line initialization of the overrides array,
+ *
+ * Inputs : board - either BOARD_NCR5380 for a normal NCR5380 board,
+ * or BOARD_NCR53C400 for a NCR53C400 board. str - unused, ints -
+ * array of integer parameters with ints[0] equal to the number of ints.
+ *
+ */
+
+static void internal_setup(int board, char *str, int *ints) {
+ static int commandline_current = 0;
+ switch (board) {
+ case BOARD_NCR5380:
+ if (ints[0] != 2 && ints[0] != 3) {
+ printk("generic_NCR5380_setup : usage ncr5380=" STRVAL(NCR5380_map_name) ",irq,dma\n");
+ return;
+ }
+ case BOARD_NCR53C400:
+ if (ints[0] != 2) {
+ printk("generic_NCR53C400_setup : usage ncr53c400=" STRVAL(NCR5380_map_name) ",irq\n");
+ return;
+ }
+ }
+
+ if (commandline_current < NO_OVERRIDES) {
+ overrides[commandline_current].NCR5380_map_name = (NCR5380_map_type)ints[1];
+ overrides[commandline_current].irq = ints[2];
+ if (ints[0] == 3)
+ overrides[commandline_current].dma = ints[3];
+ else
+ overrides[commandline_current].dma = DMA_NONE;
+ overrides[commandline_current].board = board;
+ ++commandline_current;
+ }
+}
+
+/*
+ * Function : generic_NCR5380_setup (char *str, int *ints)
+ *
+ * Purpose : LILO command line initialization of the overrides array,
+ *
+ * Inputs : str - unused, ints - array of integer parameters with ints[0]
+ * equal to the number of ints.
+ */
+
+void generic_NCR5380_setup (char *str, int *ints) {
+ internal_setup (BOARD_NCR5380, str, ints);
+}
+
+/*
+ * Function : generic_NCR53C400_setup (char *str, int *ints)
+ *
+ * Purpose : LILO command line initialization of the overrides array,
+ *
+ * Inputs : str - unused, ints - array of integer parameters with ints[0]
+ * equal to the number of ints.
+ */
+
+void generic_NCR53C400_setup (char *str, int *ints) {
+ internal_setup (BOARD_NCR53C400, str, ints);
+}
+
+/*
+ * Function : int generic_NCR5380_detect(Scsi_Host_Template * tpnt)
+ *
+ * Purpose : initializes generic NCR5380 driver based on the
+ * command line / compile time port and irq definitions.
+ *
+ * Inputs : tpnt - template for this SCSI adapter.
+ *
+ * Returns : 1 if a host adapter was found, 0 if not.
+ *
+ */
+
+int generic_NCR5380_detect(Scsi_Host_Template * tpnt) {
+ static int current_override = 0;
+ int count;
+ int flags = 0;
+ struct Scsi_Host *instance;
+
+ if (ncr_irq != NCR_NOT_SET)
+ overrides[0].irq=ncr_irq;
+ if (ncr_dma != NCR_NOT_SET)
+ overrides[0].dma=ncr_dma;
+ if (ncr_addr != NCR_NOT_SET)
+ overrides[0].NCR5380_map_name=(NCR5380_map_type)ncr_addr;
+ if (ncr_5380 != NCR_NOT_SET)
+ overrides[0].board=BOARD_NCR5380;
+ else if (ncr_53c400 != NCR_NOT_SET)
+ overrides[0].board=BOARD_NCR53C400;
+
+ tpnt->proc_dir = &proc_scsi_g_ncr5380;
+
+ for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
+ if (!(overrides[current_override].NCR5380_map_name))
+ continue;
+
+ switch (overrides[current_override].board) {
+ case BOARD_NCR5380:
+ flags = FLAG_NO_PSEUDO_DMA;
+ break;
+ case BOARD_NCR53C400:
+ flags = FLAG_NCR53C400;
+ break;
+ }
+
+ instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
+ instance->NCR5380_instance_name = overrides[current_override].NCR5380_map_name;
+
+ NCR5380_init(instance, flags);
+
+ if (overrides[current_override].irq != IRQ_AUTO)
+ instance->irq = overrides[current_override].irq;
+ else
+ instance->irq = NCR5380_probe_irq(instance, 0xffff);
+
+ if (instance->irq != IRQ_NONE)
+ if (request_irq(instance->irq, generic_NCR5380_intr, SA_INTERRUPT, "NCR5380", NULL)) {
+ printk("scsi%d : IRQ%d not free, interrupts disabled\n",
+ instance->host_no, instance->irq);
+ instance->irq = IRQ_NONE;
+ }
+
+ if (instance->irq == IRQ_NONE) {
+ printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
+ printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
+ }
+
+ printk("scsi%d : at " STRVAL(NCR5380_map_name) " 0x%x", instance->host_no, (unsigned int)instance->NCR5380_instance_name);
+ if (instance->irq == IRQ_NONE)
+ printk (" interrupts disabled");
+ else
+ printk (" irq %d", instance->irq);
+ printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
+ CAN_QUEUE, CMD_PER_LUN, GENERIC_NCR5380_PUBLIC_RELEASE);
+ NCR5380_print_options(instance);
+ printk("\n");
+
+ ++current_override;
+ ++count;
+ }
+ return count;
+}
+
+const char * generic_NCR5380_info (struct Scsi_Host* host) {
+ static const char string[]="Generic NCR5380/53C400 Driver";
+ return string;
+}
+
+int generic_NCR5380_release_resources(struct Scsi_Host * instance)
+{
+ NCR5380_local_declare();
+
+ NCR5380_setup(instance);
+
+ if (instance->irq != IRQ_NONE)
+ free_irq(instance->irq, NULL);
+
+ return 0;
+}
+
+#ifdef BIOSPARAM
+/*
+ * Function : int generic_NCR5380_biosparam(Disk * disk, kdev_t dev, int *ip)
+ *
+ * Purpose : Generates a BIOS / DOS compatible H-C-S mapping for
+ * the specified device / size.
+ *
+ * Inputs : size = size of device in sectors (512 bytes), dev = block device
+ * major / minor, ip[] = {heads, sectors, cylinders}
+ *
+ * Returns : always 0 (success), initializes ip
+ *
+ */
+
+/*
+ * XXX Most SCSI boards use this mapping, I could be incorrect. Some one
+ * using hard disks on a trantor should verify that this mapping corresponds
+ * to that used by the BIOS / ASPI driver by running the linux fdisk program
+ * and matching the H_C_S coordinates to what DOS uses.
+ */
+
+int generic_NCR5380_biosparam(Disk * disk, kdev_t dev, int *ip)
+{
+ int size = disk->capacity;
+ ip[0] = 64;
+ ip[1] = 32;
+ ip[2] = size >> 11;
+ return 0;
+}
+#endif
+
+#if NCR53C400_PSEUDO_DMA
+static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst, int len)
+{
+ int blocks = len / 128;
+ int start = 0;
+ int i;
+ int bl;
+ NCR5380_local_declare();
+
+ NCR5380_setup(instance);
+
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: About to read %d blocks for %d bytes\n", blocks, len);
+#endif
+
+ NCR5380_write(C400_CONTROL_STATUS_REG, CSR_BASE | CSR_TRANS_DIR);
+ NCR5380_write(C400_BLOCK_COUNTER_REG, blocks);
+ while (1) {
+
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: %d blocks left\n", blocks);
+#endif
+
+ if ((bl=NCR5380_read(C400_BLOCK_COUNTER_REG)) == 0) {
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ if (blocks)
+ printk("53C400r: blocks still == %d\n", blocks);
+ else
+ printk("53C400r: Exiting loop\n");
+#endif
+ break;
+ }
+
+#if 1
+ if (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_GATED_53C80_IRQ) {
+ printk("53C400r: Got 53C80_IRQ start=%d, blocks=%d\n", start, blocks);
+ return -1;
+ }
+#endif
+
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: Waiting for buffer, bl=%d\n", bl);
+#endif
+
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY)
+ ;
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: Transferring 128 bytes\n");
+#endif
+
+#ifdef CONFIG_SCSI_G_NCR5380_PORT
+ for (i=0; i<128; i++)
+ dst[start+i] = NCR5380_read(C400_HOST_BUFFER);
+#else
+ /* implies CONFIG_SCSI_G_NCR5380_MEM */
+ memmove(dst+start,NCR53C400_host_buffer+NCR5380_map_name,128);
+#endif
+ start+=128;
+ blocks--;
+ }
+
+ if (blocks) {
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: EXTRA: Waiting for buffer\n");
+#endif
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY)
+ ;
+
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: Transferring EXTRA 128 bytes\n");
+#endif
+#ifdef CONFIG_SCSI_G_NCR5380_PORT
+ for (i=0; i<128; i++)
+ dst[start+i] = NCR5380_read(C400_HOST_BUFFER);
+#else
+ /* implies CONFIG_SCSI_G_NCR5380_MEM */
+ memmove(dst+start,NCR53C400_host_buffer+NCR5380_map_name,128);
+#endif
+ start+=128;
+ blocks--;
+ }
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ else
+ printk("53C400r: No EXTRA required\n");
+#endif
+
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ printk("53C400r: Final values: blocks=%d start=%d\n", blocks, start);
+#endif
+
+ if (!(NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_GATED_53C80_IRQ))
+ printk("53C400r: no 53C80 gated irq after transfer");
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ else
+ printk("53C400r: Got 53C80 interrupt and tried to clear it\n");
+#endif
+
+/* DON'T DO THIS - THEY NEVER ARRIVE!
+ printk("53C400r: Waiting for 53C80 registers\n");
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_53C80_REG)
+ ;
+*/
+
+ if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_END_DMA_TRANSFER))
+ printk("53C400r: no end dma signal\n");
+#if (NDEBUG & NDEBUG_C400_PREAD)
+ else
+ printk("53C400r: end dma as expected\n");
+#endif
+
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ return 0;
+}
+
+static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src, int len)
+{
+ int blocks = len / 128;
+ int start = 0;
+ int i;
+ int bl;
+ NCR5380_local_declare();
+
+ NCR5380_setup(instance);
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: About to write %d blocks for %d bytes\n", blocks, len);
+#endif
+
+ NCR5380_write(C400_CONTROL_STATUS_REG, CSR_BASE);
+ NCR5380_write(C400_BLOCK_COUNTER_REG, blocks);
+ while (1) {
+ if (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_GATED_53C80_IRQ) {
+ printk("53C400w: Got 53C80_IRQ start=%d, blocks=%d\n", start, blocks);
+ return -1;
+ }
+
+ if ((bl=NCR5380_read(C400_BLOCK_COUNTER_REG)) == 0) {
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ if (blocks)
+ printk("53C400w: exiting loop, blocks still == %d\n", blocks);
+ else
+ printk("53C400w: exiting loop\n");
+#endif
+ break;
+ }
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: %d blocks left\n", blocks);
+
+ printk("53C400w: waiting for buffer, bl=%d\n", bl);
+#endif
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY)
+ ;
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: transferring 128 bytes\n");
+#endif
+#ifdef CONFIG_SCSI_G_NCR5380_PORT
+ for (i=0; i<128; i++)
+ NCR5380_write(C400_HOST_BUFFER, src[start+i]);
+#else
+ /* implies CONFIG_SCSI_G_NCR5380_MEM */
+ memmove(NCR53C400_host_buffer+NCR5380_map_name,src+start,128);
+#endif
+ start+=128;
+ blocks--;
+ }
+ if (blocks) {
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: EXTRA waiting for buffer\n");
+#endif
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_HOST_BUF_NOT_RDY)
+ ;
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: transferring EXTRA 128 bytes\n");
+#endif
+#ifdef CONFIG_SCSI_G_NCR5380_PORT
+ for (i=0; i<128; i++)
+ NCR5380_write(C400_HOST_BUFFER, src[start+i]);
+#else
+ /* implies CONFIG_SCSI_G_NCR5380_MEM */
+ memmove(NCR53C400_host_buffer+NCR5380_map_name,src+start,128);
+#endif
+ start+=128;
+ blocks--;
+ }
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ else
+ printk("53C400w: No EXTRA required\n");
+#endif
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: Final values: blocks=%d start=%d\n", blocks, start);
+#endif
+
+#if 0
+ printk("53C400w: waiting for registers to be available\n");
+ THEY NEVER DO!
+ while (NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_53C80_REG)
+ ;
+ printk("53C400w: Got em\n");
+#endif
+
+ /* Let's wait for this instead - could be ugly */
+ /* All documentation says to check for this. Maybe my hardware is too
+ * fast. Waiting for it seems to work fine! KLL
+ */
+ while (!(i = NCR5380_read(C400_CONTROL_STATUS_REG) & CSR_GATED_53C80_IRQ))
+ ;
+
+ /*
+ * I know. i is certainly != 0 here but the loop is new. See previous
+ * comment.
+ */
+ if (i) {
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: got 53C80 gated irq (last block)\n");
+#endif
+ if (!((i=NCR5380_read(BUS_AND_STATUS_REG)) & BASR_END_DMA_TRANSFER))
+ printk("53C400w: No END OF DMA bit - WHOOPS! BASR=%0x\n",i);
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ else
+ printk("53C400w: Got END OF DMA\n");
+#endif
+ }
+ else
+ printk("53C400w: no 53C80 gated irq after transfer (last block)\n");
+
+#if 0
+ if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_END_DMA_TRANSFER)) {
+ printk("53C400w: no end dma signal\n");
+ }
+#endif
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: waiting for last byte...\n");
+#endif
+ while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT))
+ ;
+
+#if (NDEBUG & NDEBUG_C400_PWRITE)
+ printk("53C400w: got last byte.\n");
+ printk("53C400w: pwrite exiting with status 0, whoopee!\n");
+#endif
+ return 0;
+}
+#endif /* PSEUDO_DMA */
+
+#include "NCR5380.c"
+
+#define PRINTP(x) len += sprintf(buffer+len, x)
+#define ANDP ,
+
+static int sprint_opcode(char* buffer, int len, int opcode) {
+ int start = len;
+ PRINTP("0x%02x " ANDP opcode);
+ return len-start;
+}
+
+static int sprint_command (char* buffer, int len, unsigned char *command) {
+ int i,s,start=len;
+ len += sprint_opcode(buffer, len, command[0]);
+ for ( i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i)
+ PRINTP("%02x " ANDP command[i]);
+ PRINTP("\n");
+ return len-start;
+}
+
+static int sprint_Scsi_Cmnd (char* buffer, int len, Scsi_Cmnd *cmd) {
+ int start = len;
+ PRINTP("host number %d destination target %d, lun %d\n" ANDP
+ cmd->host->host_no ANDP
+ cmd->target ANDP
+ cmd->lun);
+ PRINTP(" command = ");
+ len += sprint_command (buffer, len, cmd->cmnd);
+ return len-start;
+}
+
+int generic_NCR5380_proc_info(char* buffer, char** start, off_t offset, int length, int hostno, int inout)
+{
+ int len = 0;
+ NCR5380_local_declare();
+ unsigned char status;
+ int i;
+ struct Scsi_Host *scsi_ptr;
+ Scsi_Cmnd *ptr;
+ Scsi_Device *dev;
+ struct NCR5380_hostdata *hostdata;
+
+ cli();
+
+ for (scsi_ptr = first_instance; scsi_ptr; scsi_ptr=scsi_ptr->next)
+ if (scsi_ptr->host_no == hostno)
+ break;
+ NCR5380_setup(scsi_ptr);
+ hostdata = (struct NCR5380_hostdata *)scsi_ptr->hostdata;
+
+ PRINTP("SCSI host number %d : %s\n" ANDP scsi_ptr->host_no ANDP scsi_ptr->hostt->name);
+ PRINTP("Generic NCR5380 driver version %d\n" ANDP GENERIC_NCR5380_PUBLIC_RELEASE);
+ PRINTP("NCR5380 core version %d\n" ANDP NCR5380_PUBLIC_RELEASE);
+#ifdef NCR53C400
+ PRINTP("NCR53C400 extension version %d\n" ANDP NCR53C400_PUBLIC_RELEASE);
+ PRINTP("NCR53C400 card%s detected\n" ANDP (((struct NCR5380_hostdata *)scsi_ptr->hostdata)->flags & FLAG_NCR53C400)?"":" not");
+# if NCR53C400_PSEUDO_DMA
+ PRINTP("NCR53C400 pseudo DMA used\n");
+# endif
+#else
+ PRINTP("NO NCR53C400 driver extensions\n");
+#endif
+ PRINTP("Using %s mapping at %s 0x%x, " ANDP STRVAL(NCR5380_map_config) ANDP STRVAL(NCR5380_map_name) ANDP scsi_ptr->NCR5380_instance_name);
+ if (scsi_ptr->irq == IRQ_NONE)
+ PRINTP("no interrupt\n");
+ else
+ PRINTP("on interrupt %d\n" ANDP scsi_ptr->irq);
+
+#ifdef NCR5380_STATS
+ if (hostdata->connected || hostdata->issue_queue || hostdata->disconnected_queue)
+ PRINTP("There are commands pending, transfer rates may be crud\n");
+ if (hostdata->pendingr)
+ PRINTP(" %d pending reads" ANDP hostdata->pendingr);
+ if (hostdata->pendingw)
+ PRINTP(" %d pending writes" ANDP hostdata->pendingw);
+ if (hostdata->pendingr || hostdata->pendingw)
+ PRINTP("\n");
+ for (dev = scsi_devices; dev; dev=dev->next) {
+ if (dev->host == scsi_ptr) {
+ unsigned long br = hostdata->bytes_read[dev->id];
+ unsigned long bw = hostdata->bytes_write[dev->id];
+ long tr = hostdata->time_read[dev->id] / HZ;
+ long tw = hostdata->time_write[dev->id] / HZ;
+
+ PRINTP(" T:%d %s " ANDP dev->id ANDP (dev->type < MAX_SCSI_DEVICE_CODE) ? scsi_device_types[(int)dev->type] : "Unknown");
+ for (i=0; i<8; i++)
+ if (dev->vendor[i] >= 0x20)
+ *(buffer+(len++)) = dev->vendor[i];
+ *(buffer+(len++)) = ' ';
+ for (i=0; i<16; i++)
+ if (dev->model[i] >= 0x20)
+ *(buffer+(len++)) = dev->model[i];
+ *(buffer+(len++)) = ' ';
+ for (i=0; i<4; i++)
+ if (dev->rev[i] >= 0x20)
+ *(buffer+(len++)) = dev->rev[i];
+ *(buffer+(len++)) = ' ';
+
+ PRINTP("\n%10ld kb read in %5ld secs" ANDP br/1024 ANDP tr);
+ if (tr)
+ PRINTP(" @ %5ld bps" ANDP br / tr);
+
+ PRINTP("\n%10ld kb written in %5ld secs" ANDP bw/1024 ANDP tw);
+ if (tw)
+ PRINTP(" @ %5ld bps" ANDP bw / tw);
+ PRINTP("\n");
+ }
+ }
+#endif
+
+ status = NCR5380_read(STATUS_REG);
+ if (!(status & SR_REQ))
+ PRINTP("REQ not asserted, phase unknown.\n");
+ else {
+ for (i = 0; (phases[i].value != PHASE_UNKNOWN) &&
+ (phases[i].value != (status & PHASE_MASK)); ++i)
+ ;
+ PRINTP("Phase %s\n" ANDP phases[i].name);
+ }
+
+ if (!hostdata->connected) {
+ PRINTP("No currently connected command\n");
+ } else {
+ len += sprint_Scsi_Cmnd (buffer, len, (Scsi_Cmnd *) hostdata->connected);
+ }
+
+ PRINTP("issue_queue\n");
+
+ for (ptr = (Scsi_Cmnd *) hostdata->issue_queue; ptr;
+ ptr = (Scsi_Cmnd *) ptr->host_scribble)
+ len += sprint_Scsi_Cmnd (buffer, len, ptr);
+
+ PRINTP("disconnected_queue\n");
+
+ for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr;
+ ptr = (Scsi_Cmnd *) ptr->host_scribble)
+ len += sprint_Scsi_Cmnd (buffer, len, ptr);
+
+ *start = buffer + offset;
+ len -= offset;
+ if (len > length)
+ len = length;
+ sti();
+ return len;
+}
+
+#undef PRINTP
+#undef ANDP
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = GENERIC_NCR5380;
+
+#include <linux/module.h>
+#include "scsi_module.c"
+#endif
diff --git a/linux/src/drivers/scsi/g_NCR5380.h b/linux/src/drivers/scsi/g_NCR5380.h
new file mode 100644
index 0000000..a30e133
--- /dev/null
+++ b/linux/src/drivers/scsi/g_NCR5380.h
@@ -0,0 +1,162 @@
+/*
+ * Generic Generic NCR5380 driver defines
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 440-4894
+ *
+ * NCR53C400 extensions (c) 1994,1995,1996, Kevin Lentin
+ * K.Lentin@cs.monash.edu.au
+ *
+ * ALPHA RELEASE 1.
+ *
+ * For more information, please consult
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+ *
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * 1+ (719) 578-3400
+ * 1+ (800) 334-5454
+ */
+
+#ifndef GENERIC_NCR5380_H
+#define GENERIC_NCR5380_H
+
+#include <linux/config.h>
+
+#define GENERIC_NCR5380_PUBLIC_RELEASE 1
+
+#ifdef NCR53C400
+#define BIOSPARAM
+#define NCR5380_BIOSPARAM generic_NCR5380_biosparam
+#else
+#define NCR5380_BIOSPARAM NULL
+#endif
+
+#ifndef ASM
+int generic_NCR5380_abort(Scsi_Cmnd *);
+int generic_NCR5380_detect(Scsi_Host_Template *);
+int generic_NCR5380_release_resources(struct Scsi_Host *);
+int generic_NCR5380_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int generic_NCR5380_reset(Scsi_Cmnd *, unsigned int);
+int notyet_generic_proc_info (char *buffer ,char **start, off_t offset,
+ int length, int hostno, int inout);
+const char* generic_NCR5380_info(struct Scsi_Host *);
+#ifdef BIOSPARAM
+int generic_NCR5380_biosparam(Disk *, kdev_t, int *);
+#endif
+
+int generic_NCR5380_proc_info(char* buffer, char** start, off_t offset, int length, int hostno, int inout);
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#ifndef CMD_PER_LUN
+#define CMD_PER_LUN 2
+#endif
+
+#ifndef CAN_QUEUE
+#define CAN_QUEUE 16
+#endif
+
+#if defined(HOSTS_C) || defined(MODULE)
+
+#define GENERIC_NCR5380 {NULL, NULL, NULL, \
+ generic_NCR5380_proc_info, \
+ "Generic NCR5380/NCR53C400 Scsi Driver", \
+ generic_NCR5380_detect, generic_NCR5380_release_resources, \
+ (void *)generic_NCR5380_info, NULL, \
+ generic_NCR5380_queue_command, generic_NCR5380_abort, \
+ generic_NCR5380_reset, NULL, \
+ NCR5380_BIOSPARAM, \
+ /* can queue */ CAN_QUEUE, /* id */ 7, SG_ALL, \
+ /* cmd per lun */ CMD_PER_LUN , 0, 0, DISABLE_CLUSTERING}
+
+#endif
+
+#ifndef HOSTS_C
+
+#define __STRVAL(x) #x
+#define STRVAL(x) __STRVAL(x)
+
+#ifdef CONFIG_SCSI_G_NCR5380_PORT
+
+#define NCR5380_map_config port
+
+#define NCR5380_map_type int
+
+#define NCR5380_map_name port
+
+#define NCR5380_instance_name io_port
+
+#define NCR53C400_register_offset 0
+
+#define NCR53C400_address_adjust 8
+
+#ifdef NCR53C400
+#define NCR5380_region_size 16
+#else
+#define NCR5380_region_size 8
+#endif
+
+#define NCR5380_read(reg) (inb(NCR5380_map_name + (reg)))
+#define NCR5380_write(reg, value) (outb((value), (NCR5380_map_name + (reg))))
+
+#else
+/* therefore CONFIG_SCSI_G_NCR5380_MEM */
+
+#define NCR5380_map_config memory
+
+#define NCR5380_map_type volatile unsigned char*
+
+#define NCR5380_map_name base
+
+#define NCR5380_instance_name base
+
+#define NCR53C400_register_offset 0x108
+
+#define NCR53C400_address_adjust 0
+
+#define NCR53C400_mem_base 0x3880
+
+#define NCR53C400_host_buffer 0x3900
+
+#define NCR5380_region_size 0x3a00
+
+
+#define NCR5380_read(reg) (*(NCR5380_map_name + NCR53C400_mem_base + (reg)))
+#define NCR5380_write(reg, value) (*(NCR5380_map_name + NCR53C400_mem_base + (reg)) = value)
+
+#endif
+
+#define NCR5380_implementation_fields \
+ NCR5380_map_type NCR5380_map_name
+
+#define NCR5380_local_declare() \
+ register NCR5380_implementation_fields
+
+#define NCR5380_setup(instance) \
+ NCR5380_map_name = (NCR5380_map_type)((instance)->NCR5380_instance_name)
+
+#define NCR5380_intr generic_NCR5380_intr
+#define NCR5380_queue_command generic_NCR5380_queue_command
+#define NCR5380_abort generic_NCR5380_abort
+#define NCR5380_reset generic_NCR5380_reset
+#define NCR5380_pread generic_NCR5380_pread
+#define NCR5380_pwrite generic_NCR5380_pwrite
+#define NCR5380_proc_info notyet_generic_proc_info
+
+#define BOARD_NCR5380 0
+#define BOARD_NCR53C400 1
+
+#endif /* else def HOSTS_C */
+#endif /* ndef ASM */
+#endif /* GENERIC_NCR5380_H */
+
diff --git a/linux/src/drivers/scsi/gdth.c b/linux/src/drivers/scsi/gdth.c
new file mode 100644
index 0000000..0a4bef8
--- /dev/null
+++ b/linux/src/drivers/scsi/gdth.c
@@ -0,0 +1,3598 @@
+/************************************************************************
+ * GDT ISA/EISA/PCI Disk Array Controller driver for Linux *
+ * *
+ * gdth.c *
+ * Copyright (C) 1995-98 ICP vortex Computersysteme GmbH, Achim Leubner *
+ * *
+ * <achim@vortex.de> *
+ * *
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License as published *
+ * by the Free Software Foundation; either version 2 of the License, *
+ * or (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this kernel; if not, write to the Free Software *
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *
+ * *
+ * Tested with Linux 1.2.13, ..., 2.1.103 *
+ * *
+ * Revision 1.16 1998/09/28 16:08:46 achim
+ * GDT_PCIMPR: DPMEM remapping, if required
+ * mdelay() added
+ *
+ * Revision 1.15 1998/06/03 14:54:06 achim
+ * gdth_delay(), gdth_flush() implemented
+ * Bugfix: gdth_release() changed
+ *
+ * Revision 1.14 1998/05/22 10:01:17 achim
+ * mj: pcibios_strerror() removed
+ * Improved SMP support (if version >= 2.1.95)
+ * gdth_halt(): halt_called flag added (if version < 2.1)
+ *
+ * Revision 1.13 1998/04/16 09:14:57 achim
+ * Reserve drives (for raw service) implemented
+ * New error handling code enabled
+ * Get controller name from board_info() IOCTL
+ * Final round of PCI device driver patches by Martin Mares
+ *
+ * Revision 1.12 1998/03/03 09:32:37 achim
+ * Fibre channel controller support added
+ *
+ * Revision 1.11 1998/01/27 16:19:14 achim
+ * SA_SHIRQ added
+ * add_timer()/del_timer() instead of GDTH_TIMER
+ * scsi_add_timer()/scsi_del_timer() instead of SCSI_TIMER
+ * New error handling included
+ *
+ * Revision 1.10 1997/10/31 12:29:57 achim
+ * Read heads/sectors from host drive
+ *
+ * Revision 1.9 1997/09/04 10:07:25 achim
+ * IO-mapping with virt_to_bus(), readb(), writeb(), ...
+ * register_reboot_notifier() to get a notify on shutdown used
+ *
+ * Revision 1.8 1997/04/02 12:14:30 achim
+ * Version 1.00 (see gdth.h), tested with kernel 2.0.29
+ *
+ * Revision 1.7 1997/03/12 13:33:37 achim
+ * gdth_reset() changed, new async. events
+ *
+ * Revision 1.6 1997/03/04 14:01:11 achim
+ * Shutdown routine gdth_halt() implemented
+ *
+ * Revision 1.5 1997/02/21 09:08:36 achim
+ * New controller included (RP, RP1, RP2 series)
+ * IOCTL interface implemented
+ *
+ * Revision 1.4 1996/07/05 12:48:55 achim
+ * Function gdth_bios_param() implemented
+ * New constant GDTH_MAXC_P_L inserted
+ * GDT_WRITE_THR, GDT_EXT_INFO implemented
+ * Function gdth_reset() changed
+ *
+ * Revision 1.3 1996/05/10 09:04:41 achim
+ * Small changes for Linux 1.2.13
+ *
+ * Revision 1.2 1996/05/09 12:45:27 achim
+ * Loadable module support implemented
+ * /proc support corrections made
+ *
+ * Revision 1.1 1996/04/11 07:35:57 achim
+ * Initial revision
+ *
+ *
+ * $Id: gdth.c,v 1.1.4.1 2007/03/27 21:04:30 tschwinge Exp $
+ ************************************************************************/
+
+#ifdef MODULE
+#include <linux/module.h>
+#endif
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/head.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/in.h>
+#include <linux/proc_fs.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#if LINUX_VERSION_CODE >= 0x020100
+#include <linux/reboot.h>
+#else
+#include <linux/bios32.h>
+#endif
+
+#include <asm/dma.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#if LINUX_VERSION_CODE >= 0x02015F
+#include <asm/spinlock.h>
+#endif
+
+#if LINUX_VERSION_CODE >= 0x010300
+#include <linux/blk.h>
+#else
+#include "../block/blk.h"
+#endif
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+
+#include "gdth.h"
+
+/****************************************************************************/
+
+/* LILO params: gdth=<IRQ>
+ *
+ * Where: <IRQ> is any of the valid IRQs for EISA controllers (10,11,12,14)
+ * Sets the IRQ of the GDT3000/3020 EISA controller to this value,
+ * if the IRQ can not automat. detect (controller BIOS disabled)
+ * See gdth_init_eisa()
+ *
+ * You can use the command line gdth=0 to disable the driver
+ */
+static unchar irqs[MAXHA] = {0xff};
+static unchar disable_gdth_scan = FALSE;
+
+/* Reserve drives for raw service: Fill the following structure with the
+ * appropriate values: Controller number, Channel, Target ID
+ */
+static gdth_reserve_str reserve_list[] = {
+ /* { 0, 1, 4 }, Example: Controller 0, Channel B, ID 4 */
+ { 0xff, 0xff, 0xff } /* end of list */
+};
+
+/****************************************************************************/
+
+#if LINUX_VERSION_CODE >= 0x02015F
+static void gdth_interrupt(int irq,void *dev_id,struct pt_regs *regs);
+static void do_gdth_interrupt(int irq,void *dev_id,struct pt_regs *regs);
+#elif LINUX_VERSION_CODE >= 0x010346
+static void gdth_interrupt(int irq,void *dev_id,struct pt_regs *regs);
+#else
+static void gdth_interrupt(int irq,struct pt_regs *regs);
+#endif
+static int gdth_sync_event(int hanum,int service,unchar index,Scsi_Cmnd *scp);
+static int gdth_async_event(int hanum,int service);
+
+static void gdth_putq(int hanum,Scsi_Cmnd *scp,unchar priority);
+static void gdth_next(int hanum);
+static int gdth_fill_raw_cmd(int hanum,Scsi_Cmnd *scp,unchar b);
+static int gdth_special_cmd(int hanum,Scsi_Cmnd *scp,unchar b);
+static gdth_evt_str *gdth_store_event(ushort source, ushort idx,
+ gdth_evt_data *evt);
+static int gdth_read_event(int handle, gdth_evt_str *estr);
+static void gdth_readapp_event(unchar application, gdth_evt_str *estr);
+static void gdth_clear_events(void);
+
+static void gdth_copy_internal_data(Scsi_Cmnd *scp,char *buffer,ushort count);
+static int gdth_internal_cache_cmd(int hanum,Scsi_Cmnd *scp,
+ unchar b,ulong *flags);
+static int gdth_fill_cache_cmd(int hanum,Scsi_Cmnd *scp,ushort hdrive);
+
+static int gdth_search_eisa(ushort eisa_adr);
+static int gdth_search_isa(ulong bios_adr);
+static int gdth_search_pci(ushort device_id,ushort index,gdth_pci_str *pcistr);
+static int gdth_init_eisa(ushort eisa_adr,gdth_ha_str *ha);
+static int gdth_init_isa(ulong bios_adr,gdth_ha_str *ha);
+static int gdth_init_pci(gdth_pci_str *pcistr,gdth_ha_str *ha);
+
+static void gdth_enable_int(int hanum);
+static int gdth_get_status(unchar *pIStatus,int irq);
+static int gdth_test_busy(int hanum);
+static int gdth_get_cmd_index(int hanum);
+static void gdth_release_event(int hanum);
+static int gdth_wait(int hanum,int index,ulong time);
+static int gdth_internal_cmd(int hanum,unchar service,ushort opcode,ulong p1,
+ ulong p2,ulong p3);
+static int gdth_search_drives(int hanum);
+
+static void *gdth_mmap(ulong paddr, ulong size);
+static void gdth_munmap(void *addr);
+
+static const char *gdth_ctr_name(int hanum);
+
+static void gdth_flush(int hanum);
+#if LINUX_VERSION_CODE >= 0x020100
+static int gdth_halt(struct notifier_block *nb, ulong event, void *buf);
+#else
+static int halt_called = FALSE;
+void gdth_halt(void);
+#endif
+
+#ifdef DEBUG_GDTH
+static unchar DebugState = DEBUG_GDTH;
+extern int sys_syslog(int,char*,int);
+#define LOGEN sys_syslog(7,NULL,0)
+
+#ifdef __SERIAL__
+#define MAX_SERBUF 160
+static void ser_init(void);
+static void ser_puts(char *str);
+static void ser_putc(char c);
+static int ser_printk(const char *fmt, ...);
+static char strbuf[MAX_SERBUF+1];
+#ifdef __COM2__
+#define COM_BASE 0x2f8
+#else
+#define COM_BASE 0x3f8
+#endif
+static void ser_init()
+{
+ unsigned port=COM_BASE;
+
+ outb(0x80,port+3);
+ outb(0,port+1);
+ /* 19200 Baud, if 9600: outb(12,port) */
+ outb(6, port);
+ outb(3,port+3);
+ outb(0,port+1);
+ /*
+ ser_putc('I');
+ ser_putc(' ');
+ */
+}
+
+static void ser_puts(char *str)
+{
+ char *ptr;
+
+ ser_init();
+ for (ptr=str;*ptr;++ptr)
+ ser_putc(*ptr);
+}
+
+static void ser_putc(char c)
+{
+ unsigned port=COM_BASE;
+
+ while ((inb(port+5) & 0x20)==0);
+ outb(c,port);
+ if (c==0x0a)
+ {
+ while ((inb(port+5) & 0x20)==0);
+ outb(0x0d,port);
+ }
+}
+
+static int ser_printk(const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args,fmt);
+ i = vsprintf(strbuf,fmt,args);
+ ser_puts(strbuf);
+ va_end(args);
+ return i;
+}
+
+#define TRACE(a) {if (DebugState==1) {ser_printk a;}}
+#define TRACE2(a) {if (DebugState==1 || DebugState==2) {ser_printk a;}}
+#define TRACE3(a) {if (DebugState!=0) {ser_printk a;}}
+
+#else /* !__SERIAL__ */
+#define TRACE(a) {if (DebugState==1) {LOGEN;printk a;}}
+#define TRACE2(a) {if (DebugState==1 || DebugState==2) {LOGEN;printk a;}}
+#define TRACE3(a) {if (DebugState!=0) {LOGEN;printk a;}}
+#endif
+
+#else /* !DEBUG */
+#define TRACE(a)
+#define TRACE2(a)
+#define TRACE3(a)
+#endif
+
+#ifdef GDTH_STATISTICS
+static ulong max_rq=0, max_index=0, max_sg=0;
+static ulong act_ints=0, act_ios=0, act_stats=0, act_rq=0;
+static struct timer_list gdth_timer;
+#endif
+
+#define PTR2USHORT(a) (ushort)(ulong)(a)
+#define GDTOFFSOF(a,b) (size_t)&(((a*)0)->b)
+#define INDEX_OK(i,t) ((i)<sizeof(t)/sizeof((t)[0]))
+
+#define NUMDATA(a) ( (gdth_num_str *)((a)->hostdata))
+#define HADATA(a) (&((gdth_ext_str *)((a)->hostdata))->haext)
+#define CMDDATA(a) (&((gdth_ext_str *)((a)->hostdata))->cmdext)
+#define DMADATA(a) (&((gdth_ext_str *)((a)->hostdata))->dmaext)
+
+
+#if LINUX_VERSION_CODE < 0x010300
+static void *gdth_mmap(ulong paddr, ulong size)
+{
+ if (paddr >= high_memory)
+ return NULL;
+ else
+ return (void *)paddr;
+}
+static void gdth_munmap(void *addr)
+{
+}
+inline ulong virt_to_phys(volatile void *addr)
+{
+ return (ulong)addr;
+}
+inline void *phys_to_virt(ulong addr)
+{
+ return (void *)addr;
+}
+#define virt_to_bus virt_to_phys
+#define bus_to_virt phys_to_virt
+#define readb(addr) (*(volatile unchar *)(addr))
+#define readw(addr) (*(volatile ushort *)(addr))
+#define readl(addr) (*(volatile ulong *)(addr))
+#define writeb(b,addr) (*(volatile unchar *)(addr) = (b))
+#define writew(b,addr) (*(volatile ushort *)(addr) = (b))
+#define writel(b,addr) (*(volatile ulong *)(addr) = (b))
+#define memset_io(a,b,c) memset((void *)(a),(b),(c))
+#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
+#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
+
+#elif LINUX_VERSION_CODE < 0x020100
+static int remapped = FALSE;
+static void *gdth_mmap(ulong paddr, ulong size)
+{
+ if ( paddr >= high_memory) {
+ remapped = TRUE;
+ return vremap(paddr, size);
+ } else {
+ return (void *)paddr;
+ }
+}
+static void gdth_munmap(void *addr)
+{
+ if (remapped)
+ vfree(addr);
+ remapped = FALSE;
+}
+#else
+static void *gdth_mmap(ulong paddr, ulong size)
+{
+ return ioremap(paddr, size);
+}
+static void gdth_munmap(void *addr)
+{
+ return iounmap(addr);
+}
+#endif
+
+
+static unchar gdth_drq_tab[4] = {5,6,7,7}; /* DRQ table */
+static unchar gdth_irq_tab[6] = {0,10,11,12,14,0}; /* IRQ table */
+static unchar gdth_polling; /* polling if TRUE */
+static unchar gdth_from_wait = FALSE; /* gdth_wait() */
+static int wait_index,wait_hanum; /* gdth_wait() */
+static int gdth_ctr_count = 0; /* controller count */
+static int gdth_ctr_vcount = 0; /* virt. ctr. count */
+static int gdth_ctr_released = 0; /* gdth_release() */
+static struct Scsi_Host *gdth_ctr_tab[MAXHA]; /* controller table */
+static struct Scsi_Host *gdth_ctr_vtab[MAXHA*MAXBUS]; /* virt. ctr. table */
+static unchar gdth_write_through = FALSE; /* write through */
+static char *gdth_ioctl_tab[4][MAXHA]; /* ioctl buffer */
+static gdth_evt_str ebuffer[MAX_EVENTS]; /* event buffer */
+static int elastidx;
+static int eoldidx;
+
+static struct {
+ Scsi_Cmnd *cmnd; /* pending request */
+ ushort service; /* service */
+} gdth_cmd_tab[GDTH_MAXCMDS][MAXHA]; /* table of pend. requests */
+
+#define DIN 1 /* IN data direction */
+#define DOU 2 /* OUT data direction */
+#define DNO DIN /* no data transfer */
+#define DUN DIN /* unknown data direction */
+static unchar gdth_direction_tab[0x100] = {
+ DNO,DNO,DIN,DIN,DOU,DIN,DIN,DOU,DIN,DUN,DOU,DOU,DUN,DUN,DUN,DIN,
+ DNO,DIN,DIN,DOU,DIN,DOU,DNO,DNO,DOU,DNO,DIN,DNO,DIN,DOU,DNO,DUN,
+ DIN,DUN,DIN,DUN,DOU,DIN,DUN,DUN,DIN,DIN,DIN,DUN,DUN,DIN,DIN,DIN,
+ DIN,DIN,DIN,DNO,DIN,DNO,DNO,DIN,DIN,DIN,DIN,DIN,DIN,DIN,DIN,DIN,
+ DIN,DIN,DIN,DIN,DIN,DNO,DUN,DNO,DNO,DNO,DUN,DNO,DIN,DIN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DIN,DUN,DUN,DUN,DUN,DIN,DUN,DUN,DUN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DNO,DNO,DUN,DIN,DNO,DIN,DUN,DNO,DUN,DIN,DIN,
+ DIN,DIN,DIN,DNO,DUN,DIN,DIN,DIN,DIN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN
+};
+
+/* __initfunc, __initdata macros */
+#if LINUX_VERSION_CODE >= 0x020126
+#include <linux/init.h>
+#else
+#define __initfunc(A) A
+#define __initdata
+#define __init
+#endif
+
+/* /proc support */
+#if LINUX_VERSION_CODE >= 0x010300
+#include <linux/stat.h>
+struct proc_dir_entry proc_scsi_gdth = {
+ PROC_SCSI_GDTH, 4, "gdth",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+#include "gdth_proc.h"
+#include "gdth_proc.c"
+#endif
+
+#if LINUX_VERSION_CODE >= 0x020100
+/* notifier block to get a notify on system shutdown/halt/reboot */
+static struct notifier_block gdth_notifier = {
+ gdth_halt, NULL, 0
+};
+#endif
+
+static void gdth_delay(int milliseconds)
+{
+ if (milliseconds == 0) {
+ udelay(1);
+ } else {
+#if LINUX_VERSION_CODE >= 0x020168
+ mdelay(milliseconds);
+#else
+ int i;
+ for (i = 0; i < milliseconds; ++i)
+ udelay(1000);
+#endif
+ }
+}
+
+/* controller search and initialization functions */
+
+__initfunc (static int gdth_search_eisa(ushort eisa_adr))
+{
+ ulong id;
+
+ TRACE(("gdth_search_eisa() adr. %x\n",eisa_adr));
+ id = inl(eisa_adr+ID0REG);
+ if (id == GDT3A_ID || id == GDT3B_ID) { /* GDT3000A or GDT3000B */
+ if ((inb(eisa_adr+EISAREG) & 8) == 0)
+ return 0; /* not EISA configured */
+ return 1;
+ }
+ if (id == GDT3_ID) /* GDT3000 */
+ return 1;
+
+ return 0;
+}
+
+
+__initfunc (static int gdth_search_isa(ulong bios_adr))
+{
+ void *addr;
+ ulong id;
+
+ TRACE(("gdth_search_isa() bios adr. %lx\n",bios_adr));
+ if ((addr = gdth_mmap(bios_adr+BIOS_ID_OFFS, sizeof(ulong))) != NULL) {
+ id = readl(addr);
+ gdth_munmap(addr);
+ if (id == GDT2_ID) /* GDT2000 */
+ return 1;
+ }
+ return 0;
+}
+
+
+__initfunc (static int gdth_search_pci(ushort device_id,ushort index,gdth_pci_str *pcistr))
+{
+ int error;
+ ulong base0,base1,base2;
+
+ TRACE(("gdth_search_pci() device_id %d, index %d\n",
+ device_id,index));
+
+#if LINUX_VERSION_CODE >= 0x20155
+ if (!pci_present())
+ return 0;
+#else
+ if (!pcibios_present())
+ return 0;
+#endif
+
+ if (pcibios_find_device(PCI_VENDOR_ID_VORTEX,device_id,index,
+ &pcistr->bus,&pcistr->device_fn))
+ return 0;
+
+ /* GDT PCI controller found, now read resources from config space */
+#if LINUX_VERSION_CODE >= 0x20155
+ {
+ struct pci_dev *pdev = pci_find_slot(pcistr->bus, pcistr->device_fn);
+ base0 = pdev->base_address[0];
+ base1 = pdev->base_address[1];
+ base2 = pdev->base_address[2];
+ if ((error = pcibios_read_config_dword(pcistr->bus,pcistr->device_fn,
+ PCI_ROM_ADDRESS,
+ (int *) &pcistr->bios))) {
+ printk("GDT-PCI: error %d reading configuration space", error);
+ return -1;
+ }
+ pcistr->irq = pdev->irq;
+ }
+#else
+#if LINUX_VERSION_CODE >= 0x010300
+#define GDTH_BASEP (int *)
+#else
+#define GDTH_BASEP
+#endif
+ if ((error = pcibios_read_config_dword(pcistr->bus,pcistr->device_fn,
+ PCI_BASE_ADDRESS_0,
+ GDTH_BASEP&base0)) ||
+ (error = pcibios_read_config_dword(pcistr->bus,pcistr->device_fn,
+ PCI_BASE_ADDRESS_1,
+ GDTH_BASEP&base1)) ||
+ (error = pcibios_read_config_dword(pcistr->bus,pcistr->device_fn,
+ PCI_BASE_ADDRESS_2,
+ GDTH_BASEP&base2)) ||
+ (error = pcibios_read_config_dword(pcistr->bus,pcistr->device_fn,
+ PCI_ROM_ADDRESS,
+ GDTH_BASEP&pcistr->bios)) ||
+ (error = pcibios_read_config_byte(pcistr->bus,pcistr->device_fn,
+ PCI_INTERRUPT_LINE,&pcistr->irq))) {
+ printk("GDT-PCI: error %d reading configuration space", error);
+ return -1;
+ }
+#endif
+
+ pcistr->device_id = device_id;
+ if (device_id <= PCI_DEVICE_ID_VORTEX_GDT6000B || /* GDT6000 or GDT6000B */
+ device_id >= PCI_DEVICE_ID_VORTEX_GDT6x17RP) { /* MPR */
+ if ((base0 & PCI_BASE_ADDRESS_SPACE)!=PCI_BASE_ADDRESS_SPACE_MEMORY)
+ return -1;
+ pcistr->dpmem = base0 & PCI_BASE_ADDRESS_MEM_MASK;
+ } else { /* GDT6110, GDT6120, .. */
+ if ((base0 & PCI_BASE_ADDRESS_SPACE)!=PCI_BASE_ADDRESS_SPACE_MEMORY ||
+ (base2 & PCI_BASE_ADDRESS_SPACE)!=PCI_BASE_ADDRESS_SPACE_MEMORY ||
+ (base1 & PCI_BASE_ADDRESS_SPACE)!=PCI_BASE_ADDRESS_SPACE_IO)
+ return -1;
+ pcistr->dpmem = base2 & PCI_BASE_ADDRESS_MEM_MASK;
+ pcistr->io_mm = base0 & PCI_BASE_ADDRESS_MEM_MASK;
+ pcistr->io = base1 & PCI_BASE_ADDRESS_IO_MASK;
+ }
+ return 1;
+}
+
+
+__initfunc (static int gdth_init_eisa(ushort eisa_adr,gdth_ha_str *ha))
+{
+ ulong retries,id;
+ unchar prot_ver,eisacf,i,irq_found;
+
+ TRACE(("gdth_init_eisa() adr. %x\n",eisa_adr));
+
+ /* disable board interrupts, deinitialize services */
+ outb(0xff,eisa_adr+EDOORREG);
+ outb(0x00,eisa_adr+EDENABREG);
+ outb(0x00,eisa_adr+EINTENABREG);
+
+ outb(0xff,eisa_adr+LDOORREG);
+ retries = INIT_RETRIES;
+ gdth_delay(20);
+ while (inb(eisa_adr+EDOORREG) != 0xff) {
+ if (--retries == 0) {
+ printk("GDT-EISA: Initialization error (DEINIT failed)\n");
+ return 0;
+ }
+ gdth_delay(1);
+ TRACE2(("wait for DEINIT: retries=%ld\n",retries));
+ }
+ prot_ver = inb(eisa_adr+MAILBOXREG);
+ outb(0xff,eisa_adr+EDOORREG);
+ if (prot_ver != PROTOCOL_VERSION) {
+ printk("GDT-EISA: Illegal protocol version\n");
+ return 0;
+ }
+ ha->bmic = eisa_adr;
+ ha->brd_phys = (ulong)eisa_adr >> 12;
+
+ outl(0,eisa_adr+MAILBOXREG);
+ outl(0,eisa_adr+MAILBOXREG+4);
+ outl(0,eisa_adr+MAILBOXREG+8);
+ outl(0,eisa_adr+MAILBOXREG+12);
+
+ /* detect IRQ */
+ if ((id = inl(eisa_adr+ID0REG)) == GDT3_ID) {
+ ha->type = GDT_EISA;
+ ha->stype = id;
+ outl(1,eisa_adr+MAILBOXREG+8);
+ outb(0xfe,eisa_adr+LDOORREG);
+ retries = INIT_RETRIES;
+ gdth_delay(20);
+ while (inb(eisa_adr+EDOORREG) != 0xfe) {
+ if (--retries == 0) {
+ printk("GDT-EISA: Initialization error (get IRQ failed)\n");
+ return 0;
+ }
+ gdth_delay(1);
+ }
+ ha->irq = inb(eisa_adr+MAILBOXREG);
+ outb(0xff,eisa_adr+EDOORREG);
+ TRACE2(("GDT3000/3020: IRQ=%d\n",ha->irq));
+ /* check the result */
+ if (ha->irq == 0) {
+ TRACE2(("Unknown IRQ, check IRQ table from cmd line !\n"));
+ for (i=0,irq_found=FALSE; i<MAXHA && irqs[i]!=0xff; ++i) {
+ if (irqs[i]!=0) {
+ irq_found=TRUE;
+ break;
+ }
+ }
+ if (irq_found) {
+ ha->irq = irqs[i];
+ irqs[i] = 0;
+ printk("GDT-EISA: Can not detect controller IRQ,\n");
+ printk("Use IRQ setting from command line (IRQ = %d)\n",
+ ha->irq);
+ } else {
+ printk("GDT-EISA: Initialization error (unknown IRQ), Enable\n");
+ printk("the controller BIOS or use command line parameters\n");
+ return 0;
+ }
+ }
+ } else {
+ eisacf = inb(eisa_adr+EISAREG) & 7;
+ if (eisacf > 4) /* level triggered */
+ eisacf -= 4;
+ ha->irq = gdth_irq_tab[eisacf];
+ ha->type = GDT_EISA;
+ ha->stype= id;
+ }
+ return 1;
+}
+
+
+__initfunc (static int gdth_init_isa(ulong bios_adr,gdth_ha_str *ha))
+{
+ register gdt2_dpram_str *dp2_ptr;
+ int i;
+ unchar irq_drq,prot_ver;
+ ulong retries;
+
+ TRACE(("gdth_init_isa() bios adr. %lx\n",bios_adr));
+
+ ha->brd = gdth_mmap(bios_adr, sizeof(gdt2_dpram_str));
+ if (ha->brd == NULL) {
+ printk("GDT-ISA: Initialization error (DPMEM remap error)\n");
+ return 0;
+ }
+ dp2_ptr = (gdt2_dpram_str *)ha->brd;
+ writeb(1, &dp2_ptr->io.memlock); /* switch off write protection */
+ /* reset interface area */
+ memset_io((char *)&dp2_ptr->u,0,sizeof(dp2_ptr->u));
+ if (readl(&dp2_ptr->u) != 0) {
+ printk("GDT-PCI: Initialization error (DPMEM write error)\n");
+ gdth_munmap(ha->brd);
+ return 0;
+ }
+
+ /* disable board interrupts, read DRQ and IRQ */
+ writeb(0xff, &dp2_ptr->io.irqdel);
+ writeb(0x00, &dp2_ptr->io.irqen);
+ writeb(0x00, &dp2_ptr->u.ic.S_Status);
+ writeb(0x00, &dp2_ptr->u.ic.Cmd_Index);
+
+ irq_drq = readb(&dp2_ptr->io.rq);
+ for (i=0; i<3; ++i) {
+ if ((irq_drq & 1)==0)
+ break;
+ irq_drq >>= 1;
+ }
+ ha->drq = gdth_drq_tab[i];
+
+ irq_drq = readb(&dp2_ptr->io.rq) >> 3;
+ for (i=1; i<5; ++i) {
+ if ((irq_drq & 1)==0)
+ break;
+ irq_drq >>= 1;
+ }
+ ha->irq = gdth_irq_tab[i];
+
+ /* deinitialize services */
+ writel(bios_adr, &dp2_ptr->u.ic.S_Info[0]);
+ writeb(0xff, &dp2_ptr->u.ic.S_Cmd_Indx);
+ writeb(0, &dp2_ptr->io.event);
+ retries = INIT_RETRIES;
+ gdth_delay(20);
+ while (readb(&dp2_ptr->u.ic.S_Status) != 0xff) {
+ if (--retries == 0) {
+ printk("GDT-ISA: Initialization error (DEINIT failed)\n");
+ gdth_munmap(ha->brd);
+ return 0;
+ }
+ gdth_delay(1);
+ }
+ prot_ver = (unchar)readl(&dp2_ptr->u.ic.S_Info[0]);
+ writeb(0, &dp2_ptr->u.ic.Status);
+ writeb(0xff, &dp2_ptr->io.irqdel);
+ if (prot_ver != PROTOCOL_VERSION) {
+ printk("GDT-ISA: Illegal protocol version\n");
+ gdth_munmap(ha->brd);
+ return 0;
+ }
+
+ ha->type = GDT_ISA;
+ ha->ic_all_size = sizeof(dp2_ptr->u);
+ ha->stype= GDT2_ID;
+ ha->brd_phys = bios_adr >> 4;
+
+ /* special request to controller BIOS */
+ writel(0x00, &dp2_ptr->u.ic.S_Info[0]);
+ writel(0x00, &dp2_ptr->u.ic.S_Info[1]);
+ writel(0x01, &dp2_ptr->u.ic.S_Info[2]);
+ writel(0x00, &dp2_ptr->u.ic.S_Info[3]);
+ writeb(0xfe, &dp2_ptr->u.ic.S_Cmd_Indx);
+ writeb(0, &dp2_ptr->io.event);
+ retries = INIT_RETRIES;
+ gdth_delay(20);
+ while (readb(&dp2_ptr->u.ic.S_Status) != 0xfe) {
+ if (--retries == 0) {
+ printk("GDT-ISA: Initialization error\n");
+ gdth_munmap(ha->brd);
+ return 0;
+ }
+ gdth_delay(1);
+ }
+ writeb(0, &dp2_ptr->u.ic.Status);
+ writeb(0xff, &dp2_ptr->io.irqdel);
+ return 1;
+}
+
+
+__initfunc (static int gdth_init_pci(gdth_pci_str *pcistr,gdth_ha_str *ha))
+{
+ register gdt6_dpram_str *dp6_ptr;
+ register gdt6c_dpram_str *dp6c_ptr;
+ register gdt6m_dpram_str *dp6m_ptr;
+ ulong retries;
+ unchar prot_ver;
+ int i, found = FALSE;
+
+ TRACE(("gdth_init_pci()\n"));
+
+ ha->brd_phys = (pcistr->bus << 8) | (pcistr->device_fn & 0xf8);
+ ha->stype = (ulong)pcistr->device_id;
+ ha->irq = pcistr->irq;
+
+ if (ha->stype <= PCI_DEVICE_ID_VORTEX_GDT6000B) { /* GDT6000 or GDT6000B */
+ TRACE2(("init_pci() dpmem %lx irq %d\n",pcistr->dpmem,ha->irq));
+ ha->brd = gdth_mmap(pcistr->dpmem, sizeof(gdt6_dpram_str));
+ if (ha->brd == NULL) {
+ printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
+ return 0;
+ }
+ dp6_ptr = (gdt6_dpram_str *)ha->brd;
+ /* reset interface area */
+ memset_io((char *)&dp6_ptr->u,0,sizeof(dp6_ptr->u));
+ if (readl(&dp6_ptr->u) != 0) {
+ printk("GDT-PCI: Initialization error (DPMEM write error)\n");
+ gdth_munmap(ha->brd);
+ return 0;
+ }
+
+ /* disable board interrupts, deinit services */
+ writeb(0xff, &dp6_ptr->io.irqdel);
+ writeb(0x00, &dp6_ptr->io.irqen);;
+ writeb(0x00, &dp6_ptr->u.ic.S_Status);
+ writeb(0x00, &dp6_ptr->u.ic.Cmd_Index);
+
+ writel(pcistr->dpmem, &dp6_ptr->u.ic.S_Info[0]);
+ writeb(0xff, &dp6_ptr->u.ic.S_Cmd_Indx);
+ writeb(0, &dp6_ptr->io.event);
+ retries = INIT_RETRIES;
+ gdth_delay(20);
+ while (readb(&dp6_ptr->u.ic.S_Status) != 0xff) {
+ if (--retries == 0) {
+ printk("GDT-PCI: Initialization error (DEINIT failed)\n");
+ gdth_munmap(ha->brd);
+ return 0;
+ }
+ gdth_delay(1);
+ }
+ prot_ver = (unchar)readl(&dp6_ptr->u.ic.S_Info[0]);
+ writeb(0, &dp6_ptr->u.ic.S_Status);
+ writeb(0xff, &dp6_ptr->io.irqdel);
+ if (prot_ver != PROTOCOL_VERSION) {
+ printk("GDT-PCI: Illegal protocol version\n");
+ gdth_munmap(ha->brd);
+ return 0;
+ }
+
+ ha->type = GDT_PCI;
+ ha->ic_all_size = sizeof(dp6_ptr->u);
+
+ /* special command to controller BIOS */
+ writel(0x00, &dp6_ptr->u.ic.S_Info[0]);
+ writel(0x00, &dp6_ptr->u.ic.S_Info[1]);
+ writel(0x01, &dp6_ptr->u.ic.S_Info[2]);
+ writel(0x00, &dp6_ptr->u.ic.S_Info[3]);
+ writeb(0xfe, &dp6_ptr->u.ic.S_Cmd_Indx);
+ writeb(0, &dp6_ptr->io.event);
+ retries = INIT_RETRIES;
+ gdth_delay(20);
+ while (readb(&dp6_ptr->u.ic.S_Status) != 0xfe) {
+ if (--retries == 0) {
+ printk("GDT-PCI: Initialization error\n");
+ gdth_munmap(ha->brd);
+ return 0;
+ }
+ gdth_delay(1);
+ }
+ writeb(0, &dp6_ptr->u.ic.S_Status);
+ writeb(0xff, &dp6_ptr->io.irqdel);
+
+ } else if (ha->stype <= PCI_DEVICE_ID_VORTEX_GDT6555) { /* GDT6110, GDT6120, .. */
+ ha->plx = (gdt6c_plx_regs *)pcistr->io;
+ TRACE2(("init_pci_new() dpmem %lx io %lx irq %d\n",
+ pcistr->dpmem,(ulong)ha->plx,ha->irq));
+ ha->brd = gdth_mmap(pcistr->dpmem, sizeof(gdt6c_dpram_str));
+ if (ha->brd == NULL) {
+ printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
+ gdth_munmap(ha->brd);
+ return 0;
+ }
+ dp6c_ptr = (gdt6c_dpram_str *)ha->brd;
+ /* reset interface area */
+ memset_io((char *)&dp6c_ptr->u,0,sizeof(dp6c_ptr->u));
+ if (readl(&dp6c_ptr->u) != 0) {
+ printk("GDT-PCI: Initialization error (DPMEM write error)\n");
+ gdth_munmap(ha->brd);
+ return 0;
+ }
+
+ /* disable board interrupts, deinit services */
+ outb(0x00,PTR2USHORT(&ha->plx->control1));
+ outb(0xff,PTR2USHORT(&ha->plx->edoor_reg));
+
+ writeb(0x00, &dp6c_ptr->u.ic.S_Status);
+ writeb(0x00, &dp6c_ptr->u.ic.Cmd_Index);
+
+ writel(pcistr->dpmem, &dp6c_ptr->u.ic.S_Info[0]);
+ writeb(0xff, &dp6c_ptr->u.ic.S_Cmd_Indx);
+
+ outb(1,PTR2USHORT(&ha->plx->ldoor_reg));
+
+ retries = INIT_RETRIES;
+ gdth_delay(20);
+ while (readb(&dp6c_ptr->u.ic.S_Status) != 0xff) {
+ if (--retries == 0) {
+ printk("GDT-PCI: Initialization error (DEINIT failed)\n");
+ gdth_munmap(ha->brd);
+ return 0;
+ }
+ gdth_delay(1);
+ }
+ prot_ver = (unchar)readl(&dp6c_ptr->u.ic.S_Info[0]);
+ writeb(0, &dp6c_ptr->u.ic.Status);
+ if (prot_ver != PROTOCOL_VERSION) {
+ printk("GDT-PCI: Illegal protocol version\n");
+ gdth_munmap(ha->brd);
+ return 0;
+ }
+
+ ha->type = GDT_PCINEW;
+ ha->ic_all_size = sizeof(dp6c_ptr->u);
+
+ /* special command to controller BIOS */
+ writel(0x00, &dp6c_ptr->u.ic.S_Info[0]);
+ writel(0x00, &dp6c_ptr->u.ic.S_Info[1]);
+ writel(0x01, &dp6c_ptr->u.ic.S_Info[2]);
+ writel(0x00, &dp6c_ptr->u.ic.S_Info[3]);
+ writeb(0xfe, &dp6c_ptr->u.ic.S_Cmd_Indx);
+
+ outb(1,PTR2USHORT(&ha->plx->ldoor_reg));
+
+ retries = INIT_RETRIES;
+ gdth_delay(20);
+ while (readb(&dp6c_ptr->u.ic.S_Status) != 0xfe) {
+ if (--retries == 0) {
+ printk("GDT-PCI: Initialization error\n");
+ gdth_munmap(ha->brd);
+ return 0;
+ }
+ gdth_delay(1);
+ }
+ writeb(0, &dp6c_ptr->u.ic.S_Status);
+
+ } else { /* MPR */
+ TRACE2(("init_pci_mpr() dpmem %lx irq %d\n",pcistr->dpmem,ha->irq));
+ ha->brd = gdth_mmap(pcistr->dpmem, sizeof(gdt6m_dpram_str));
+ if (ha->brd == NULL) {
+ printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
+ return 0;
+ }
+
+ /* check and reset interface area */
+ dp6m_ptr = (gdt6m_dpram_str *)ha->brd;
+ writel(DPMEM_MAGIC, &dp6m_ptr->u);
+ if (readl(&dp6m_ptr->u) != DPMEM_MAGIC) {
+ printk("GDT-PCI: Cannot access DPMEM at 0x%x (shadowed?)\n",
+ (int)ha->brd);
+ found = FALSE;
+ for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
+ pcibios_write_config_dword( pcistr->bus, pcistr->device_fn,
+ PCI_BASE_ADDRESS_0, i );
+ gdth_munmap( ha->brd );
+ ha->brd = gdth_mmap(i, sizeof(gdt6m_dpram_str));
+ if (ha->brd == NULL) {
+ printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
+ return 0;
+ }
+ dp6m_ptr = (gdt6m_dpram_str *)ha->brd;
+ writel(DPMEM_MAGIC, &dp6m_ptr->u);
+ if (readl(&dp6m_ptr->u) == DPMEM_MAGIC) {
+ printk("GDT-PCI: Use free address at 0x%x\n",
+ (int)ha->brd);
+ found = TRUE;
+ break;
+ }
+ }
+ if (!found) {
+ printk("GDT-PCI: No free address found!\n");
+ gdth_munmap( ha->brd );
+ return 0;
+ }
+ }
+ memset_io((char *)&dp6m_ptr->u,0,sizeof(dp6m_ptr->u));
+
+ /* disable board interrupts, deinit services */
+ writeb(readb(&dp6m_ptr->i960r.edoor_en_reg) | 4,
+ &dp6m_ptr->i960r.edoor_en_reg);
+ writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
+ writeb(0x00, &dp6m_ptr->u.ic.S_Status);
+ writeb(0x00, &dp6m_ptr->u.ic.Cmd_Index);
+
+ writel(pcistr->dpmem, &dp6m_ptr->u.ic.S_Info[0]);
+ writeb(0xff, &dp6m_ptr->u.ic.S_Cmd_Indx);
+ writeb(1, &dp6m_ptr->i960r.ldoor_reg);
+ retries = INIT_RETRIES;
+ gdth_delay(20);
+ while (readb(&dp6m_ptr->u.ic.S_Status) != 0xff) {
+ if (--retries == 0) {
+ printk("GDT-PCI: Initialization error (DEINIT failed)\n");
+ gdth_munmap(ha->brd);
+ return 0;
+ }
+ gdth_delay(1);
+ }
+ prot_ver = (unchar)readl(&dp6m_ptr->u.ic.S_Info[0]);
+ writeb(0, &dp6m_ptr->u.ic.S_Status);
+ if (prot_ver != PROTOCOL_VERSION) {
+ printk("GDT-PCI: Illegal protocol version\n");
+ gdth_munmap(ha->brd);
+ return 0;
+ }
+
+ ha->type = GDT_PCIMPR;
+ ha->ic_all_size = sizeof(dp6m_ptr->u);
+
+ /* special command to controller BIOS */
+ writel(0x00, &dp6m_ptr->u.ic.S_Info[0]);
+ writel(0x00, &dp6m_ptr->u.ic.S_Info[1]);
+ writel(0x01, &dp6m_ptr->u.ic.S_Info[2]);
+ writel(0x00, &dp6m_ptr->u.ic.S_Info[3]);
+ writeb(0xfe, &dp6m_ptr->u.ic.S_Cmd_Indx);
+ writeb(1, &dp6m_ptr->i960r.ldoor_reg);
+ retries = INIT_RETRIES;
+ gdth_delay(20);
+ while (readb(&dp6m_ptr->u.ic.S_Status) != 0xfe) {
+ if (--retries == 0) {
+ printk("GDT-PCI: Initialization error\n");
+ gdth_munmap(ha->brd);
+ return 0;
+ }
+ gdth_delay(1);
+ }
+ writeb(0, &dp6m_ptr->u.ic.S_Status);
+ }
+
+ return 1;
+}
+
+
+/* controller protocol functions */
+
+__initfunc (static void gdth_enable_int(int hanum))
+{
+ gdth_ha_str *ha;
+ ulong flags;
+ gdt2_dpram_str *dp2_ptr;
+ gdt6_dpram_str *dp6_ptr;
+ gdt6m_dpram_str *dp6m_ptr;
+
+ TRACE(("gdth_enable_int() hanum %d\n",hanum));
+ ha = HADATA(gdth_ctr_tab[hanum]);
+
+ save_flags(flags);
+ cli();
+
+ if (ha->type == GDT_EISA) {
+ outb(0xff, ha->bmic + EDOORREG);
+ outb(0xff, ha->bmic + EDENABREG);
+ outb(0x01, ha->bmic + EINTENABREG);
+ } else if (ha->type == GDT_ISA) {
+ dp2_ptr = (gdt2_dpram_str *)ha->brd;
+ writeb(1, &dp2_ptr->io.irqdel);
+ writeb(0, &dp2_ptr->u.ic.Cmd_Index);
+ writeb(1, &dp2_ptr->io.irqen);
+ } else if (ha->type == GDT_PCI) {
+ dp6_ptr = (gdt6_dpram_str *)ha->brd;
+ writeb(1, &dp6_ptr->io.irqdel);
+ writeb(0, &dp6_ptr->u.ic.Cmd_Index);
+ writeb(1, &dp6_ptr->io.irqen);
+ } else if (ha->type == GDT_PCINEW) {
+ outb(0xff, PTR2USHORT(&ha->plx->edoor_reg));
+ outb(0x03, PTR2USHORT(&ha->plx->control1));
+ } else if (ha->type == GDT_PCIMPR) {
+ dp6m_ptr = (gdt6m_dpram_str *)ha->brd;
+ writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
+ writeb(readb(&dp6m_ptr->i960r.edoor_en_reg) & ~4,
+ &dp6m_ptr->i960r.edoor_en_reg);
+ }
+ restore_flags(flags);
+}
+
+
+static int gdth_get_status(unchar *pIStatus,int irq)
+{
+ register gdth_ha_str *ha;
+ int i;
+
+ TRACE(("gdth_get_status() irq %d ctr_count %d\n",
+ irq,gdth_ctr_count));
+
+ *pIStatus = 0;
+ for (i=0; i<gdth_ctr_count; ++i) {
+ ha = HADATA(gdth_ctr_tab[i]);
+ if (ha->irq != (unchar)irq) /* check IRQ */
+ continue;
+ if (ha->type == GDT_EISA)
+ *pIStatus = inb((ushort)ha->bmic + EDOORREG);
+ else if (ha->type == GDT_ISA)
+ *pIStatus = readb(&((gdt2_dpram_str *)ha->brd)->u.ic.Cmd_Index);
+ else if (ha->type == GDT_PCI)
+ *pIStatus = readb(&((gdt6_dpram_str *)ha->brd)->u.ic.Cmd_Index);
+ else if (ha->type == GDT_PCINEW)
+ *pIStatus = inb(PTR2USHORT(&ha->plx->edoor_reg));
+ else if (ha->type == GDT_PCIMPR)
+ *pIStatus = readb(&((gdt6m_dpram_str *)ha->brd)->i960r.edoor_reg);
+
+ if (*pIStatus)
+ return i; /* board found */
+ }
+ return -1;
+}
+
+
+static int gdth_test_busy(int hanum)
+{
+ register gdth_ha_str *ha;
+ register int gdtsema0 = 0;
+
+ TRACE(("gdth_test_busy() hanum %d\n",hanum));
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ if (ha->type == GDT_EISA)
+ gdtsema0 = (int)inb(ha->bmic + SEMA0REG);
+ else if (ha->type == GDT_ISA)
+ gdtsema0 = (int)readb(&((gdt2_dpram_str *)ha->brd)->u.ic.Sema0);
+ else if (ha->type == GDT_PCI)
+ gdtsema0 = (int)readb(&((gdt6_dpram_str *)ha->brd)->u.ic.Sema0);
+ else if (ha->type == GDT_PCINEW)
+ gdtsema0 = (int)inb(PTR2USHORT(&ha->plx->sema0_reg));
+ else if (ha->type == GDT_PCIMPR)
+ gdtsema0 = (int)readb(&((gdt6m_dpram_str *)ha->brd)->i960r.sema0_reg);
+
+ return (gdtsema0 & 1);
+}
+
+
+static int gdth_get_cmd_index(int hanum)
+{
+ register gdth_ha_str *ha;
+ int i;
+
+ TRACE(("gdth_get_cmd_index() hanum %d\n",hanum));
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ for (i=0; i<GDTH_MAXCMDS; ++i) {
+ if (gdth_cmd_tab[i][hanum].cmnd == UNUSED_CMND) {
+ gdth_cmd_tab[i][hanum].cmnd = ha->pccb->RequestBuffer;
+ gdth_cmd_tab[i][hanum].service = ha->pccb->Service;
+ ha->pccb->CommandIndex = (ulong)i+2;
+ return (i+2);
+ }
+ }
+ return 0;
+}
+
+
+static void gdth_set_sema0(int hanum)
+{
+ register gdth_ha_str *ha;
+
+ TRACE(("gdth_set_sema0() hanum %d\n",hanum));
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ if (ha->type == GDT_EISA)
+ outb(1, ha->bmic + SEMA0REG);
+ else if (ha->type == GDT_ISA)
+ writeb(1, &((gdt2_dpram_str *)ha->brd)->u.ic.Sema0);
+ else if (ha->type == GDT_PCI)
+ writeb(1, &((gdt6_dpram_str *)ha->brd)->u.ic.Sema0);
+ else if (ha->type == GDT_PCINEW)
+ outb(1, PTR2USHORT(&ha->plx->sema0_reg));
+ else if (ha->type == GDT_PCIMPR)
+ writeb(1, &((gdt6m_dpram_str *)ha->brd)->i960r.sema0_reg);
+
+}
+
+
+static void gdth_copy_command(int hanum)
+{
+ register gdth_ha_str *ha;
+ register gdth_cmd_str *cmd_ptr;
+ register gdt6m_dpram_str *dp6m_ptr;
+ register gdt6c_dpram_str *dp6c_ptr;
+ gdt6_dpram_str *dp6_ptr;
+ gdt2_dpram_str *dp2_ptr;
+ ushort cp_count,dp_offset,cmd_no;
+
+ TRACE(("gdth_copy_command() hanum %d\n",hanum));
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ cp_count = ha->cmd_len;
+ dp_offset= ha->cmd_offs_dpmem;
+ cmd_no = ha->cmd_cnt;
+ cmd_ptr = ha->pccb;
+
+ ++ha->cmd_cnt;
+ if (ha->type == GDT_EISA)
+ return; /* no DPMEM, no copy */
+
+ /* set cpcount dword aligned */
+ if (cp_count & 3)
+ cp_count += (4 - (cp_count & 3));
+
+ ha->cmd_offs_dpmem += cp_count;
+
+ /* set offset and service, copy command to DPMEM */
+ if (ha->type == GDT_ISA) {
+ dp2_ptr = (gdt2_dpram_str *)ha->brd;
+ writew(dp_offset + DPMEM_COMMAND_OFFSET,
+ &dp2_ptr->u.ic.comm_queue[cmd_no].offset);
+ writew((ushort)cmd_ptr->Service,
+ &dp2_ptr->u.ic.comm_queue[cmd_no].serv_id);
+ memcpy_toio(&dp2_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
+ } else if (ha->type == GDT_PCI) {
+ dp6_ptr = (gdt6_dpram_str *)ha->brd;
+ writew(dp_offset + DPMEM_COMMAND_OFFSET,
+ &dp6_ptr->u.ic.comm_queue[cmd_no].offset);
+ writew((ushort)cmd_ptr->Service,
+ &dp6_ptr->u.ic.comm_queue[cmd_no].serv_id);
+ memcpy_toio(&dp6_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
+ } else if (ha->type == GDT_PCINEW) {
+ dp6c_ptr = (gdt6c_dpram_str *)ha->brd;
+ writew(dp_offset + DPMEM_COMMAND_OFFSET,
+ &dp6c_ptr->u.ic.comm_queue[cmd_no].offset);
+ writew((ushort)cmd_ptr->Service,
+ &dp6c_ptr->u.ic.comm_queue[cmd_no].serv_id);
+ memcpy_toio(&dp6c_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
+ } else if (ha->type == GDT_PCIMPR) {
+ dp6m_ptr = (gdt6m_dpram_str *)ha->brd;
+ writew(dp_offset + DPMEM_COMMAND_OFFSET,
+ &dp6m_ptr->u.ic.comm_queue[cmd_no].offset);
+ writew((ushort)cmd_ptr->Service,
+ &dp6m_ptr->u.ic.comm_queue[cmd_no].serv_id);
+ memcpy_toio(&dp6m_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
+ }
+}
+
+
+static void gdth_release_event(int hanum)
+{
+ register gdth_ha_str *ha;
+
+#ifdef GDTH_STATISTICS
+ ulong i,j;
+ for (i=0,j=0; j<GDTH_MAXCMDS; ++j) {
+ if (gdth_cmd_tab[j][hanum].cmnd != UNUSED_CMND)
+ ++i;
+ }
+ if (max_index < i) {
+ max_index = i;
+ TRACE3(("GDT: max_index = %d\n",(ushort)i));
+ }
+#endif
+
+ TRACE(("gdth_release_event() hanum %d\n",hanum));
+ ha = HADATA(gdth_ctr_tab[hanum]);
+
+ if (ha->pccb->OpCode == GDT_INIT)
+ ha->pccb->Service |= 0x80;
+
+ if (ha->type == GDT_EISA) {
+ outb(ha->pccb->Service, ha->bmic + LDOORREG);
+ if (ha->pccb->OpCode == GDT_INIT) /* store DMA buffer */
+ outl((ulong)ha->pccb, ha->bmic + MAILBOXREG);
+ } else if (ha->type == GDT_ISA)
+ writeb(0, &((gdt2_dpram_str *)ha->brd)->io.event);
+ else if (ha->type == GDT_PCI)
+ writeb(0, &((gdt6_dpram_str *)ha->brd)->io.event);
+ else if (ha->type == GDT_PCINEW)
+ outb(1, PTR2USHORT(&ha->plx->ldoor_reg));
+ else if (ha->type == GDT_PCIMPR)
+ writeb(1, &((gdt6m_dpram_str *)ha->brd)->i960r.ldoor_reg);
+}
+
+
+static int gdth_wait(int hanum,int index,ulong time)
+{
+ gdth_ha_str *ha;
+ int answer_found = FALSE;
+
+ TRACE(("gdth_wait() hanum %d index %d time %ld\n",hanum,index,time));
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ if (index == 0)
+ return 1; /* no wait required */
+
+ gdth_from_wait = TRUE;
+ do {
+#if LINUX_VERSION_CODE >= 0x010346
+ gdth_interrupt((int)ha->irq,NULL,NULL);
+#else
+ gdth_interrupt((int)ha->irq,NULL);
+#endif
+ if (wait_hanum==hanum && wait_index==index) {
+ answer_found = TRUE;
+ break;
+ }
+ gdth_delay(1);
+ } while (--time);
+ gdth_from_wait = FALSE;
+
+ while (gdth_test_busy(hanum))
+ gdth_delay(0);
+
+ return (answer_found);
+}
+
+
+static int gdth_internal_cmd(int hanum,unchar service,ushort opcode,ulong p1,
+ ulong p2,ulong p3)
+{
+ register gdth_ha_str *ha;
+ register gdth_cmd_str *cmd_ptr;
+ int retries,index;
+
+ TRACE2(("gdth_internal_cmd() service %d opcode %d\n",service,opcode));
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ cmd_ptr = ha->pccb;
+ memset((char*)cmd_ptr,0,sizeof(gdth_cmd_str));
+
+ /* make command */
+ for (retries = INIT_RETRIES;;) {
+ cmd_ptr->Service = service;
+ cmd_ptr->RequestBuffer = INTERNAL_CMND;
+ if (!(index=gdth_get_cmd_index(hanum))) {
+ TRACE(("GDT: No free command index found\n"));
+ return 0;
+ }
+ gdth_set_sema0(hanum);
+ cmd_ptr->OpCode = opcode;
+ cmd_ptr->BoardNode = LOCALBOARD;
+ if (service == CACHESERVICE) {
+ if (opcode == GDT_IOCTL) {
+ cmd_ptr->u.ioctl.subfunc = p1;
+ cmd_ptr->u.ioctl.channel = p2;
+ cmd_ptr->u.ioctl.param_size = (ushort)p3;
+ cmd_ptr->u.ioctl.p_param = virt_to_bus(ha->pscratch);
+ } else {
+ cmd_ptr->u.cache.DeviceNo = (ushort)p1;
+ cmd_ptr->u.cache.BlockNo = p2;
+ }
+ } else if (service == SCSIRAWSERVICE) {
+ cmd_ptr->u.raw.direction = p1;
+ cmd_ptr->u.raw.bus = (unchar)p2;
+ cmd_ptr->u.raw.target = (unchar)p3;
+ cmd_ptr->u.raw.lun = 0;
+ }
+ ha->cmd_len = sizeof(gdth_cmd_str);
+ ha->cmd_offs_dpmem = 0;
+ ha->cmd_cnt = 0;
+ gdth_copy_command(hanum);
+ gdth_release_event(hanum);
+ gdth_delay(20);
+ if (!gdth_wait(hanum,index,INIT_TIMEOUT)) {
+ printk("GDT: Initialization error (timeout service %d)\n",service);
+ return 0;
+ }
+ if (ha->status != S_BSY || --retries == 0)
+ break;
+ gdth_delay(1);
+ }
+
+ return (ha->status != S_OK ? 0:1);
+}
+
+
+/* search for devices */
+
+__initfunc (static int gdth_search_drives(int hanum))
+{
+ register gdth_ha_str *ha;
+ ushort cdev_cnt,i;
+ unchar b,t,pos_found;
+ ulong drv_cyls, drv_hds, drv_secs;
+ ulong bus_no;
+ gdth_getch_str *chn;
+ gdth_iochan_str *ioc;
+
+ TRACE(("gdth_search_drives() hanum %d\n",hanum));
+ ha = HADATA(gdth_ctr_tab[hanum]);
+
+ /* initialize controller services, at first: screen service */
+ if (!gdth_internal_cmd(hanum,SCREENSERVICE,GDT_INIT,0,0,0)) {
+ printk("GDT: Initialization error screen service (code %d)\n",
+ ha->status);
+ return 0;
+ }
+ TRACE2(("gdth_search_drives(): SCREENSERVICE initialized\n"));
+
+ /* initialize cache service */
+ if (!gdth_internal_cmd(hanum,CACHESERVICE,GDT_INIT,LINUX_OS,0,0)) {
+ printk("GDT: Initialization error cache service (code %d)\n",
+ ha->status);
+ return 0;
+ }
+ TRACE2(("gdth_search_drives(): CACHESERVICE initialized\n"));
+ cdev_cnt = (ushort)ha->info;
+
+ /* mount all cache devices */
+ gdth_internal_cmd(hanum,CACHESERVICE,GDT_MOUNT,0xffff,1,0);
+ TRACE2(("gdth_search_drives(): mountall CACHESERVICE OK\n"));
+
+ /* initialize cache service after mountall */
+ if (!gdth_internal_cmd(hanum,CACHESERVICE,GDT_INIT,LINUX_OS,0,0)) {
+ printk("GDT: Initialization error cache service (code %d)\n",
+ ha->status);
+ return 0;
+ }
+ TRACE2(("gdth_search_drives() CACHES. init. after mountall\n"));
+ cdev_cnt = (ushort)ha->info;
+
+ /* detect number of SCSI buses - try new IOCTL */
+ ioc = (gdth_iochan_str *)DMADATA(gdth_ctr_tab[hanum]);
+ ioc->version = -1UL;
+ ioc->list_entries = MAXBUS;
+ ioc->first_chan = 0;
+ ioc->last_chan = MAXBUS-1;
+ ioc->list_offset = GDTOFFSOF(gdth_iochan_str, list[0]);
+ if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL,GET_IOCHAN_DESC,
+ INVALID_CHANNEL,sizeof(gdth_iochan_str))) {
+ TRACE2(("GET_IOCHAN_DESC supported!\n"));
+ ha->bus_cnt = ioc->chan_count;
+ for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no)
+ if (ioc->list[bus_no].proc_id < MAXID)
+ ha->id[bus_no][ioc->list[bus_no].proc_id].type = SIOP_DTYP;
+ } else {
+ /* old method */
+ chn = (gdth_getch_str *)DMADATA(gdth_ctr_tab[hanum]);
+ for (bus_no = 0; bus_no < MAXBUS; ++bus_no) {
+ chn->channel_no = bus_no;
+ if (!gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL,
+ SCSI_CHAN_CNT | L_CTRL_PATTERN,
+ IO_CHANNEL | INVALID_CHANNEL,
+ sizeof(gdth_getch_str))) {
+ if (bus_no == 0) {
+ printk("GDT: Error detecting SCSI channel count (0x%x)\n",
+ ha->status);
+ return 0;
+ }
+ break;
+ }
+ if (chn->siop_id < MAXID)
+ ha->id[bus_no][chn->siop_id].type = SIOP_DTYP;
+ }
+ ha->bus_cnt = (unchar)bus_no;
+ }
+ TRACE2(("gdth_search_drives() %d SCSI channels\n",ha->bus_cnt));
+
+ /* read cache configuration */
+ if (!gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL,CACHE_INFO,
+ INVALID_CHANNEL,sizeof(gdth_cinfo_str))) {
+ printk("GDT: Initialization error cache service (code %d)\n",
+ ha->status);
+ return 0;
+ }
+ ha->cpar = ((gdth_cinfo_str *)DMADATA(gdth_ctr_tab[hanum]))->cpar;
+ TRACE2(("gdth_search_drives() cinfo: vs %lx sta %d str %d dw %d b %d\n",
+ ha->cpar.version,ha->cpar.state,ha->cpar.strategy,
+ ha->cpar.write_back,ha->cpar.block_size));
+
+ /* read board info, fill ctr_name[] */
+ if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_IOCTL,BOARD_INFO,
+ INVALID_CHANNEL,sizeof(gdth_binfo_str))) {
+ TRACE2(("BOARD_INFO supported!\n"));
+ strcpy(ha->ctr_name, ((gdth_binfo_str *)DMADATA(gdth_ctr_tab[hanum]))->type_string);
+ } else {
+ strcpy(ha->ctr_name, gdth_ctr_name(hanum));
+ }
+ TRACE2(("Controller name: %s\n",ha->ctr_name));
+
+ /* initialize raw service */
+ if (!gdth_internal_cmd(hanum,SCSIRAWSERVICE,GDT_INIT,0,0,0)) {
+ printk("GDT: Initialization error raw service (code %d)\n",
+ ha->status);
+ return 0;
+ }
+ TRACE2(("gdth_search_drives(): RAWSERVICE initialized\n"));
+
+ /* set/get features raw service (scatter/gather) */
+ ha->raw_feat = 0;
+ if (gdth_internal_cmd(hanum,SCSIRAWSERVICE,GDT_SET_FEAT,SCATTER_GATHER,
+ 0,0)) {
+ TRACE2(("gdth_search_drives(): set features RAWSERVICE OK\n"));
+ if (gdth_internal_cmd(hanum,SCSIRAWSERVICE,GDT_GET_FEAT,0,0,0)) {
+ TRACE2(("gdth_search_dr(): get feat RAWSERVICE %ld\n",
+ ha->info));
+ ha->raw_feat = (ushort)ha->info;
+ }
+ }
+
+ /* set/get features cache service (equal to raw service) */
+ if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_SET_FEAT,0,
+ SCATTER_GATHER,0)) {
+ TRACE2(("gdth_search_drives(): set features CACHESERVICE OK\n"));
+ if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_GET_FEAT,0,0,0)) {
+ TRACE2(("gdth_search_dr(): get feat CACHESERV. %ld\n",
+ ha->info));
+ ha->cache_feat = (ushort)ha->info;
+ }
+ }
+
+ /* reserve drives for raw service */
+ for (i = 0; reserve_list[i].hanum != 0xff; ++i) {
+ if (reserve_list[i].hanum < MAXHA && reserve_list[i].hanum == hanum &&
+ reserve_list[i].bus < MAXBUS && reserve_list[i].id < MAXID) {
+ TRACE2(("gdth_search_drives(): reserve ha %d bus %d id %d\n",
+ reserve_list[i].hanum, reserve_list[i].bus,
+ reserve_list[i].id));
+ if (!gdth_internal_cmd(hanum,SCSIRAWSERVICE,GDT_RESERVE,0,
+ reserve_list[i].bus, reserve_list[i].id)) {
+ printk("GDT: Error raw service (RESERVE, code %d)\n",
+ ha->status);
+ }
+ }
+ }
+
+ /* scanning for raw devices */
+ for (b=0; b<ha->bus_cnt; ++b) {
+ for (t=0; t<MAXID; ++t) {
+ TRACE(("gdth_search_drives() rawd. bus %d id %d\n",b,t));
+ if (ha->id[b][t].type != SIOP_DTYP &&
+ gdth_internal_cmd(hanum,SCSIRAWSERVICE,GDT_INFO,0,b,t)) {
+ ha->id[b][t].type = RAW_DTYP;
+ }
+ }
+ }
+
+ /* scanning for cache devices */
+ for (i=0; i<cdev_cnt && i<MAX_HDRIVES; ++i) {
+ TRACE(("gdth_search_drives() cachedev. %d\n",i));
+ if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_INFO,i,0,0)) {
+ /* dynamic relation between host drive number and Bus/ID */
+ /* search free position */
+ pos_found = FALSE;
+ for (b=0,t=0; b<ha->bus_cnt; ++b) {
+ for (t=0; t<MAXID; ++t) {
+ if (ha->id[b][t].type == EMPTY_DTYP) {
+ pos_found = TRUE;
+ break;
+ }
+ }
+ if (pos_found)
+ break;
+ }
+ TRACE(("gdth_search_dr() drive %d free pos at bus/id %d/%d\n",
+ i,b,t));
+
+ ha->id[b][t].type = CACHE_DTYP;
+ ha->id[b][t].devtype = 0;
+ ha->id[b][t].size = ha->info;
+ ha->id[b][t].hostdrive = i;
+
+ /* evaluate mapping (sectors per head, heads per cylinder) */
+ ha->id[b][t].size &= ~SECS32;
+ if (ha->info2 == 0) {
+ drv_cyls = ha->id[b][t].size /HEADS/SECS;
+ if (drv_cyls <= MAXCYLS) {
+ drv_hds = HEADS;
+ drv_secs= SECS;
+ } else { /* too high for 64*32 */
+ drv_cyls = ha->id[b][t].size /MEDHEADS/MEDSECS;
+ if (drv_cyls <= MAXCYLS) {
+ drv_hds = MEDHEADS;
+ drv_secs= MEDSECS;
+ } else { /* too high for 127*63 */
+ drv_cyls = ha->id[b][t].size /BIGHEADS/BIGSECS;
+ drv_hds = BIGHEADS;
+ drv_secs= BIGSECS;
+ }
+ }
+ } else {
+ drv_hds = ha->info2 & 0xff;
+ drv_secs = (ha->info2 >> 8) & 0xff;
+ drv_cyls = ha->id[b][t].size /drv_hds/drv_secs;
+ }
+ ha->id[b][t].heads = (unchar)drv_hds;
+ ha->id[b][t].secs = (unchar)drv_secs;
+ /* round size */
+ ha->id[b][t].size = drv_cyls * drv_hds * drv_secs;
+ TRACE2(("gdth_search_dr() cdr. %d size %ld hds %ld scs %ld\n",
+ i,ha->id[b][t].size,drv_hds,drv_secs));
+
+ /* get informations about device */
+ if (gdth_internal_cmd(hanum,CACHESERVICE,GDT_DEVTYPE,i,
+ 0,0)) {
+ TRACE(("gdth_search_dr() cache drive %d devtype %ld\n",
+ i,ha->info));
+ ha->id[b][t].devtype = (ushort)ha->info;
+ }
+ }
+ }
+
+ TRACE(("gdth_search_drives() OK\n"));
+ return 1;
+}
+
+
+/* command queueing/sending functions */
+
+static void gdth_putq(int hanum,Scsi_Cmnd *scp,unchar priority)
+{
+ register gdth_ha_str *ha;
+ register Scsi_Cmnd *pscp;
+ register Scsi_Cmnd *nscp;
+ ulong flags;
+ unchar b, t;
+
+ TRACE(("gdth_putq() priority %d\n",priority));
+ save_flags(flags);
+ cli();
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ scp->SCp.this_residual = (int)priority;
+ gdth_update_timeout(hanum, scp, scp->timeout_per_command * 6);
+#if LINUX_VERSION_CODE >= 0x020000
+ b = scp->channel;
+#else
+ b = NUMDATA(nscp->host)->busnum;
+#endif
+ t = scp->target;
+#if LINUX_VERSION_CODE >= 0x010300
+ if (priority >= DEFAULT_PRI && ha->id[b][t].lock) {
+ TRACE2(("gdth_putq(): locked IO -> update_timeout()\n"));
+ scp->SCp.buffers_residual = gdth_update_timeout(hanum, scp, 0);
+ }
+#endif
+
+ if (ha->req_first==NULL) {
+ ha->req_first = scp; /* queue was empty */
+ scp->SCp.ptr = NULL;
+ } else { /* queue not empty */
+ pscp = ha->req_first;
+ nscp = (Scsi_Cmnd *)pscp->SCp.ptr;
+ /* priority: 0-highest,..,0xff-lowest */
+ while (nscp && (unchar)nscp->SCp.this_residual <= priority) {
+ pscp = nscp;
+ nscp = (Scsi_Cmnd *)pscp->SCp.ptr;
+ }
+ pscp->SCp.ptr = (char *)scp;
+ scp->SCp.ptr = (char *)nscp;
+ }
+ restore_flags(flags);
+
+#ifdef GDTH_STATISTICS
+ flags = 0;
+ for (nscp=ha->req_first; nscp; nscp=(Scsi_Cmnd*)nscp->SCp.ptr)
+ ++flags;
+ if (max_rq < flags) {
+ max_rq = flags;
+ TRACE3(("GDT: max_rq = %d\n",(ushort)max_rq));
+ }
+#endif
+}
+
+static void gdth_next(int hanum)
+{
+ register gdth_ha_str *ha;
+ register Scsi_Cmnd *pscp;
+ register Scsi_Cmnd *nscp;
+ unchar b, t, next_cmd, firsttime;
+ ushort hdrive;
+ ulong flags;
+ int cmd_index;
+
+ TRACE(("gdth_next() hanum %d\n",hanum));
+ save_flags(flags);
+ cli();
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ ha->cmd_cnt = ha->cmd_offs_dpmem = 0;
+ next_cmd = firsttime = TRUE;
+ cmd_index = 0;
+
+ for (nscp = pscp = ha->req_first; nscp; nscp = (Scsi_Cmnd *)nscp->SCp.ptr) {
+ if (nscp != pscp && nscp != (Scsi_Cmnd *)pscp->SCp.ptr)
+ pscp = (Scsi_Cmnd *)pscp->SCp.ptr;
+#if LINUX_VERSION_CODE >= 0x020000
+ b = nscp->channel;
+#else
+ b = NUMDATA(nscp->host)->busnum;
+#endif
+ t = nscp->target;
+ if (nscp->SCp.this_residual < DEFAULT_PRI || !ha->id[b][t].lock) {
+
+ if (firsttime) {
+ if (gdth_test_busy(hanum)) { /* controller busy ? */
+ TRACE(("gdth_next() controller %d busy !\n",hanum));
+ if (!gdth_polling) {
+ restore_flags(flags);
+ return;
+ }
+ while (gdth_test_busy(hanum))
+ gdth_delay(1);
+ }
+ firsttime = FALSE;
+ }
+
+#if LINUX_VERSION_CODE >= 0x010300
+ if (nscp->done == gdth_scsi_done) {
+ if (!(cmd_index=gdth_special_cmd(hanum,nscp,b)))
+ next_cmd = FALSE;
+ } else
+#endif
+ if (ha->id[b][t].type != CACHE_DTYP) {
+ if (!(cmd_index=gdth_fill_raw_cmd(hanum,nscp,b)))
+ next_cmd = FALSE;
+ } else {
+ hdrive = ha->id[b][t].hostdrive;
+ switch (nscp->cmnd[0]) {
+ case TEST_UNIT_READY:
+ case INQUIRY:
+ case REQUEST_SENSE:
+ case READ_CAPACITY:
+ case VERIFY:
+ case START_STOP:
+ case MODE_SENSE:
+ TRACE2(("cache cmd %x/%x/%x/%x/%x/%x\n",nscp->cmnd[0],
+ nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3],
+ nscp->cmnd[4],nscp->cmnd[5]));
+ gdth_internal_cache_cmd(hanum,nscp,b,&flags);
+ break;
+
+ case ALLOW_MEDIUM_REMOVAL:
+ TRACE2(("cache cmd %x/%x/%x/%x/%x/%x\n",nscp->cmnd[0],
+ nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3],
+ nscp->cmnd[4],nscp->cmnd[5]));
+ if ( (nscp->cmnd[4]&1) && !(ha->id[b][t].devtype&1) ) {
+ TRACE2(("Prevent r. nonremov. drive->do nothing\n"));
+ nscp->result = DID_OK << 16;
+ restore_flags( flags );
+ if (!nscp->SCp.have_data_in)
+ nscp->SCp.have_data_in++;
+ else
+ nscp->scsi_done(nscp);
+ save_flags( flags );
+ cli();
+ } else {
+ nscp->cmnd[3] = (ha->id[b][t].devtype&1) ? 1:0;
+ TRACE2(("Prevent/allow r. %d rem. drive %d\n",
+ nscp->cmnd[4],nscp->cmnd[3]));
+ if (!(cmd_index=gdth_fill_cache_cmd(hanum,nscp,hdrive)))
+ next_cmd = FALSE;
+ }
+ break;
+
+ case READ_6:
+ case WRITE_6:
+ case READ_10:
+ case WRITE_10:
+ if (!(cmd_index=gdth_fill_cache_cmd(hanum,nscp,hdrive)))
+ next_cmd = FALSE;
+ break;
+
+ default:
+ TRACE2(("cache cmd %x/%x/%x/%x/%x/%x\n",nscp->cmnd[0],
+ nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3],
+ nscp->cmnd[4],nscp->cmnd[5]));
+ printk("GDT: Unknown SCSI command 0x%x to cache service !\n",
+ nscp->cmnd[0]);
+ nscp->result = DID_ABORT << 16;
+ restore_flags( flags );
+ if (!nscp->SCp.have_data_in)
+ nscp->SCp.have_data_in++;
+ else
+ nscp->scsi_done(nscp);
+ save_flags( flags );
+ cli();
+ break;
+ }
+ }
+
+ if (!next_cmd)
+ break;
+ if (nscp == ha->req_first)
+ ha->req_first = pscp = (Scsi_Cmnd *)nscp->SCp.ptr;
+ else
+ pscp->SCp.ptr = nscp->SCp.ptr;
+ if (gdth_polling)
+ break;
+ }
+ }
+
+ if (ha->cmd_cnt > 0) {
+ gdth_release_event(hanum);
+ }
+
+ restore_flags(flags);
+
+ if (gdth_polling && ha->cmd_cnt > 0) {
+ if (!gdth_wait(hanum,cmd_index,POLL_TIMEOUT))
+ printk("GDT: Controller %d: Command %d timed out !\n",
+ hanum,cmd_index);
+ }
+}
+
+static void gdth_copy_internal_data(Scsi_Cmnd *scp,char *buffer,ushort count)
+{
+ ushort cpcount,i;
+ ushort cpsum,cpnow;
+ struct scatterlist *sl;
+
+ cpcount = count<=(ushort)scp->bufflen ? count:(ushort)scp->bufflen;
+ if (scp->use_sg) {
+ sl = (struct scatterlist *)scp->request_buffer;
+ for (i=0,cpsum=0; i<scp->use_sg; ++i,++sl) {
+ cpnow = (ushort)sl->length;
+ TRACE(("copy_internal() now %d sum %d count %d %d\n",
+ cpnow,cpsum,cpcount,(ushort)scp->bufflen));
+ if (cpsum+cpnow > cpcount)
+ cpnow = cpcount - cpsum;
+ cpsum += cpnow;
+ memcpy((char*)sl->address,buffer,cpnow);
+ if (cpsum == cpcount)
+ break;
+ buffer += cpnow;
+ }
+ } else {
+ TRACE(("copy_internal() count %d\n",cpcount));
+ memcpy((char*)scp->request_buffer,buffer,cpcount);
+ }
+}
+
+static int gdth_internal_cache_cmd(int hanum,Scsi_Cmnd *scp,
+ unchar b,ulong *flags)
+{
+ register gdth_ha_str *ha;
+ ushort hdrive;
+ unchar t;
+ gdth_inq_data inq;
+ gdth_rdcap_data rdc;
+ gdth_sense_data sd;
+ gdth_modep_data mpd;
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ t = scp->target;
+ hdrive = ha->id[b][t].hostdrive;
+ TRACE(("gdth_internal_cache_cmd() cmd 0x%x hdrive %d\n",
+ scp->cmnd[0],hdrive));
+
+ if (scp->lun !=0)
+ scp->result = DID_BAD_TARGET << 16;
+ else {
+ switch (scp->cmnd[0]) {
+ case TEST_UNIT_READY:
+ case VERIFY:
+ case START_STOP:
+ TRACE2(("Test/Verify/Start hdrive %d\n",hdrive));
+ break;
+
+ case INQUIRY:
+ TRACE2(("Inquiry hdrive %d devtype %d\n",
+ hdrive,ha->id[b][t].devtype));
+ inq.type_qual = (ha->id[b][t].devtype&4) ? TYPE_ROM:TYPE_DISK;
+ /* you can here set all disks to removable, if you want to do
+ a flush using the ALLOW_MEDIUM_REMOVAL command */
+ inq.modif_rmb = ha->id[b][t].devtype&1 ? 0x80:0x00;
+ inq.version = 2;
+ inq.resp_aenc = 2;
+ inq.add_length= 32;
+ strcpy(inq.vendor,"ICP ");
+ sprintf(inq.product,"Host Drive #%02d",hdrive);
+ strcpy(inq.revision," ");
+ gdth_copy_internal_data(scp,(char*)&inq,sizeof(gdth_inq_data));
+ break;
+
+ case REQUEST_SENSE:
+ TRACE2(("Request sense hdrive %d\n",hdrive));
+ sd.errorcode = 0x70;
+ sd.segno = 0x00;
+ sd.key = NO_SENSE;
+ sd.info = 0;
+ sd.add_length= 0;
+ gdth_copy_internal_data(scp,(char*)&sd,sizeof(gdth_sense_data));
+ break;
+
+ case MODE_SENSE:
+ TRACE2(("Mode sense hdrive %d\n",hdrive));
+ memset((char*)&mpd,0,sizeof(gdth_modep_data));
+ mpd.hd.data_length = sizeof(gdth_modep_data);
+ mpd.hd.dev_par = (ha->id[b][t].devtype&2) ? 0x80:0;
+ mpd.hd.bd_length = sizeof(mpd.bd);
+ mpd.bd.block_length[0] = (SECTOR_SIZE & 0x00ff0000) >> 16;
+ mpd.bd.block_length[1] = (SECTOR_SIZE & 0x0000ff00) >> 8;
+ mpd.bd.block_length[2] = (SECTOR_SIZE & 0x000000ff);
+ gdth_copy_internal_data(scp,(char*)&mpd,sizeof(gdth_modep_data));
+ break;
+
+ case READ_CAPACITY:
+ TRACE2(("Read capacity hdrive %d\n",hdrive));
+ rdc.last_block_no = ntohl(ha->id[b][t].size-1);
+ rdc.block_length = ntohl(SECTOR_SIZE);
+ gdth_copy_internal_data(scp,(char*)&rdc,sizeof(gdth_rdcap_data));
+ break;
+
+ default:
+ TRACE2(("Internal cache cmd 0x%x unknown\n",scp->cmnd[0]));
+ break;
+ }
+ scp->result = DID_OK << 16;
+ }
+
+ restore_flags(*flags);
+ if (!scp->SCp.have_data_in)
+ scp->SCp.have_data_in++;
+ else
+ scp->scsi_done(scp);
+ save_flags(*flags);
+ cli();
+ return 1;
+}
+
+static int gdth_fill_cache_cmd(int hanum,Scsi_Cmnd *scp,ushort hdrive)
+{
+ register gdth_ha_str *ha;
+ register gdth_cmd_str *cmdp;
+ struct scatterlist *sl;
+ ushort i;
+ int cmd_index;
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ cmdp = ha->pccb;
+ TRACE(("gdth_fill_cache_cmd() cmd 0x%x cmdsize %d hdrive %d\n",
+ scp->cmnd[0],scp->cmd_len,hdrive));
+
+ if (ha->type==GDT_EISA && ha->cmd_cnt>0)
+ return 0;
+
+ cmdp->Service = CACHESERVICE;
+ cmdp->RequestBuffer = scp;
+ /* search free command index */
+ if (!(cmd_index=gdth_get_cmd_index(hanum))) {
+ TRACE(("GDT: No free command index found\n"));
+ return 0;
+ }
+ /* if it's the first command, set command semaphore */
+ if (ha->cmd_cnt == 0)
+ gdth_set_sema0(hanum);
+
+ /* fill command */
+ if (scp->cmnd[0]==ALLOW_MEDIUM_REMOVAL) {
+ if (scp->cmnd[4] & 1) /* prevent ? */
+ cmdp->OpCode = GDT_MOUNT;
+ else if (scp->cmnd[3] & 1) /* removable drive ? */
+ cmdp->OpCode = GDT_UNMOUNT;
+ else
+ cmdp->OpCode = GDT_FLUSH;
+ } else {
+ if (scp->cmnd[0]==WRITE_6 || scp->cmnd[0]==WRITE_10) {
+ if (gdth_write_through)
+ cmdp->OpCode = GDT_WRITE_THR;
+ else
+ cmdp->OpCode = GDT_WRITE;
+ } else {
+ cmdp->OpCode = GDT_READ;
+ }
+ }
+
+ cmdp->BoardNode = LOCALBOARD;
+ cmdp->u.cache.DeviceNo = hdrive;
+
+ if (scp->cmnd[0]==ALLOW_MEDIUM_REMOVAL) {
+ cmdp->u.cache.BlockNo = 1;
+ cmdp->u.cache.sg_canz = 0;
+ } else {
+ if (scp->cmd_len != 6) {
+ cmdp->u.cache.BlockNo = ntohl(*(ulong*)&scp->cmnd[2]);
+ cmdp->u.cache.BlockCnt= (ulong)ntohs(*(ushort*)&scp->cmnd[7]);
+ } else {
+ cmdp->u.cache.BlockNo = ntohl(*(ulong*)&scp->cmnd[0]) & 0x001fffffUL;
+ cmdp->u.cache.BlockCnt= scp->cmnd[4]==0 ? 0x100 : scp->cmnd[4];
+ }
+
+ if (scp->use_sg) {
+ cmdp->u.cache.DestAddr= -1UL;
+ sl = (struct scatterlist *)scp->request_buffer;
+ for (i=0; i<scp->use_sg; ++i,++sl) {
+ cmdp->u.cache.sg_lst[i].sg_ptr = virt_to_bus(sl->address);
+ cmdp->u.cache.sg_lst[i].sg_len = (ulong)sl->length;
+ }
+ cmdp->u.cache.sg_canz = (ulong)i;
+
+#ifdef GDTH_STATISTICS
+ if (max_sg < (ulong)i) {
+ max_sg = (ulong)i;
+ TRACE3(("GDT: max_sg = %d\n",i));
+ }
+#endif
+ if (i<GDTH_MAXSG)
+ cmdp->u.cache.sg_lst[i].sg_len = 0;
+ } else {
+ if (ha->cache_feat & SCATTER_GATHER) {
+ cmdp->u.cache.DestAddr = -1UL;
+ cmdp->u.cache.sg_canz = 1;
+ cmdp->u.cache.sg_lst[0].sg_ptr = virt_to_bus(scp->request_buffer);
+ cmdp->u.cache.sg_lst[0].sg_len = scp->request_bufflen;
+ cmdp->u.cache.sg_lst[1].sg_len = 0;
+ } else {
+ cmdp->u.cache.DestAddr = virt_to_bus(scp->request_buffer);
+ cmdp->u.cache.sg_canz= 0;
+ }
+ }
+ }
+ TRACE(("cache cmd: addr. %lx sganz %lx sgptr0 %lx sglen0 %lx\n",
+ cmdp->u.cache.DestAddr,cmdp->u.cache.sg_canz,
+ cmdp->u.cache.sg_lst[0].sg_ptr,
+ cmdp->u.cache.sg_lst[0].sg_len));
+ TRACE(("cache cmd: cmd %d blockno. %ld, blockcnt %ld\n",
+ cmdp->OpCode,cmdp->u.cache.BlockNo,cmdp->u.cache.BlockCnt));
+
+ /* evaluate command size, check space */
+ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) +
+ (ushort)cmdp->u.cache.sg_canz * sizeof(gdth_sg_str);
+ if (ha->cmd_len & 3)
+ ha->cmd_len += (4 - (ha->cmd_len & 3));
+
+ if (ha->cmd_cnt > 0) {
+ if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) >
+ ha->ic_all_size) {
+ TRACE2(("gdth_fill_cache() DPMEM overflow\n"));
+ gdth_cmd_tab[cmd_index-2][hanum].cmnd = UNUSED_CMND;
+ return 0;
+ }
+ }
+
+ /* copy command */
+ gdth_copy_command(hanum);
+ return cmd_index;
+}
+
+static int gdth_fill_raw_cmd(int hanum,Scsi_Cmnd *scp,unchar b)
+{
+ register gdth_ha_str *ha;
+ register gdth_cmd_str *cmdp;
+ struct scatterlist *sl;
+ ushort i;
+ int cmd_index;
+ unchar t,l;
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ t = scp->target;
+ l = scp->lun;
+ cmdp = ha->pccb;
+ TRACE(("gdth_fill_raw_cmd() cmd 0x%x bus %d ID %d LUN %d\n",
+ scp->cmnd[0],b,t,l));
+
+ if (ha->type==GDT_EISA && ha->cmd_cnt>0)
+ return 0;
+
+ cmdp->Service = SCSIRAWSERVICE;
+ cmdp->RequestBuffer = scp;
+ /* search free command index */
+ if (!(cmd_index=gdth_get_cmd_index(hanum))) {
+ TRACE(("GDT: No free command index found\n"));
+ return 0;
+ }
+ /* if it's the first command, set command semaphore */
+ if (ha->cmd_cnt == 0)
+ gdth_set_sema0(hanum);
+
+ /* fill command */
+ cmdp->OpCode = GDT_WRITE; /* always */
+ cmdp->BoardNode = LOCALBOARD;
+ cmdp->u.raw.reserved = 0;
+ cmdp->u.raw.mdisc_time = 0;
+ cmdp->u.raw.mcon_time = 0;
+ cmdp->u.raw.clen = scp->cmd_len;
+ cmdp->u.raw.target = t;
+ cmdp->u.raw.lun = l;
+ cmdp->u.raw.bus = b;
+ cmdp->u.raw.priority = 0;
+ cmdp->u.raw.link_p = NULL;
+ cmdp->u.raw.sdlen = scp->request_bufflen;
+ cmdp->u.raw.sense_len = 16;
+ cmdp->u.raw.sense_data = virt_to_bus(scp->sense_buffer);
+ cmdp->u.raw.direction =
+ gdth_direction_tab[scp->cmnd[0]]==DOU ? DATA_OUT : DATA_IN;
+ memcpy(cmdp->u.raw.cmd,scp->cmnd,12);
+
+ if (scp->use_sg) {
+ cmdp->u.raw.sdata = -1UL;
+ sl = (struct scatterlist *)scp->request_buffer;
+ for (i=0; i<scp->use_sg; ++i,++sl) {
+ cmdp->u.raw.sg_lst[i].sg_ptr = virt_to_bus(sl->address);
+ cmdp->u.raw.sg_lst[i].sg_len = (ulong)sl->length;
+ }
+ cmdp->u.raw.sg_ranz = (ulong)i;
+
+#ifdef GDTH_STATISTICS
+ if (max_sg < (ulong)i) {
+ max_sg = (ulong)i;
+ TRACE3(("GDT: max_sg = %d\n",i));
+ }
+#endif
+ if (i<GDTH_MAXSG)
+ cmdp->u.raw.sg_lst[i].sg_len = 0;
+ } else {
+ if (ha->raw_feat & SCATTER_GATHER) {
+ cmdp->u.raw.sdata = -1UL;
+ cmdp->u.raw.sg_ranz= 1;
+ cmdp->u.raw.sg_lst[0].sg_ptr = virt_to_bus(scp->request_buffer);
+ cmdp->u.raw.sg_lst[0].sg_len = scp->request_bufflen;
+ cmdp->u.raw.sg_lst[1].sg_len = 0;
+ } else {
+ cmdp->u.raw.sdata = virt_to_bus(scp->request_buffer);
+ cmdp->u.raw.sg_ranz= 0;
+ }
+ }
+ TRACE(("raw cmd: addr. %lx sganz %lx sgptr0 %lx sglen0 %lx\n",
+ cmdp->u.raw.sdata,cmdp->u.raw.sg_ranz,
+ cmdp->u.raw.sg_lst[0].sg_ptr,
+ cmdp->u.raw.sg_lst[0].sg_len));
+
+ /* evaluate command size, check space */
+ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) +
+ (ushort)cmdp->u.raw.sg_ranz * sizeof(gdth_sg_str);
+ if (ha->cmd_len & 3)
+ ha->cmd_len += (4 - (ha->cmd_len & 3));
+
+ if (ha->cmd_cnt > 0) {
+ if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) >
+ ha->ic_all_size) {
+ TRACE2(("gdth_fill_raw() DPMEM overflow\n"));
+ gdth_cmd_tab[cmd_index-2][hanum].cmnd = UNUSED_CMND;
+ return 0;
+ }
+ }
+
+ /* copy command */
+ gdth_copy_command(hanum);
+ return cmd_index;
+}
+
+static int gdth_special_cmd(int hanum,Scsi_Cmnd *scp,unchar b)
+{
+ register gdth_ha_str *ha;
+ register gdth_cmd_str *cmdp;
+ int cmd_index;
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ cmdp= ha->pccb;
+ TRACE2(("gdth_special_cmd(): "));
+
+ if (ha->type==GDT_EISA && ha->cmd_cnt>0)
+ return 0;
+
+ memcpy( cmdp, scp->request_buffer, sizeof(gdth_cmd_str));
+ cmdp->RequestBuffer = scp;
+
+ /* search free command index */
+ if (!(cmd_index=gdth_get_cmd_index(hanum))) {
+ TRACE(("GDT: No free command index found\n"));
+ return 0;
+ }
+
+ /* if it's the first command, set command semaphore */
+ if (ha->cmd_cnt == 0)
+ gdth_set_sema0(hanum);
+
+ /* evaluate command size, check space */
+ if (cmdp->OpCode == GDT_IOCTL) {
+ TRACE2(("IOCTL\n"));
+ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.ioctl.p_param) + sizeof(ulong);
+ } else if (cmdp->Service == CACHESERVICE) {
+ TRACE2(("cache command %d\n",cmdp->OpCode));
+ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) + sizeof(gdth_sg_str);
+ } else if (cmdp->Service == SCSIRAWSERVICE) {
+ TRACE2(("raw command %d/%d\n",cmdp->OpCode,cmdp->u.raw.cmd[0]));
+ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) + sizeof(gdth_sg_str);
+ }
+
+ if (ha->cmd_len & 3)
+ ha->cmd_len += (4 - (ha->cmd_len & 3));
+
+ if (ha->cmd_cnt > 0) {
+ if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) >
+ ha->ic_all_size) {
+ TRACE2(("gdth_special_cmd() DPMEM overflow\n"));
+ gdth_cmd_tab[cmd_index-2][hanum].cmnd = UNUSED_CMND;
+ return 0;
+ }
+ }
+
+ /* copy command */
+ gdth_copy_command(hanum);
+ return cmd_index;
+}
+
+
+/* Controller event handling functions */
+static gdth_evt_str *gdth_store_event(ushort source, ushort idx,
+ gdth_evt_data *evt)
+{
+ gdth_evt_str *e;
+ ulong flags;
+ struct timeval tv;
+
+ TRACE2(("gdth_store_event() source %d idx %d\n", source, idx));
+ if (source == 0) /* no source -> no event */
+ return 0;
+
+ save_flags(flags);
+ cli();
+ if (ebuffer[elastidx].event_source == source &&
+ ebuffer[elastidx].event_idx == idx &&
+ !memcmp((char *)&ebuffer[elastidx].event_data.eu,
+ (char *)&evt->eu, evt->size)) {
+ e = &ebuffer[elastidx];
+ do_gettimeofday(&tv);
+ e->last_stamp = tv.tv_sec;
+ ++e->same_count;
+ } else {
+ if (ebuffer[elastidx].event_source != 0) { /* entry not free ? */
+ ++elastidx;
+ if (elastidx == MAX_EVENTS)
+ elastidx = 0;
+ if (elastidx == eoldidx) { /* reached mark ? */
+ ++eoldidx;
+ if (eoldidx == MAX_EVENTS)
+ eoldidx = 0;
+ }
+ }
+ e = &ebuffer[elastidx];
+ e->event_source = source;
+ e->event_idx = idx;
+ do_gettimeofday(&tv);
+ e->first_stamp = e->last_stamp = tv.tv_sec;
+ e->same_count = 1;
+ e->event_data = *evt;
+ }
+ restore_flags(flags);
+ return e;
+}
+
+static int gdth_read_event(int handle, gdth_evt_str *estr)
+{
+ gdth_evt_str *e;
+ int eindex;
+ ulong flags;
+
+ TRACE2(("gdth_read_event() handle %d\n", handle));
+ save_flags(flags);
+ cli();
+ if (handle == -1)
+ eindex = eoldidx;
+ else
+ eindex = handle;
+ estr->event_source = 0;
+
+ if (eindex >= MAX_EVENTS) {
+ restore_flags(flags);
+ return eindex;
+ }
+ e = &ebuffer[eindex];
+ if (e->event_source != 0) {
+ if (eindex != elastidx) {
+ if (++eindex == MAX_EVENTS)
+ eindex = 0;
+ } else {
+ eindex = -1;
+ }
+ memcpy(estr, e, sizeof(gdth_evt_str));
+ }
+ restore_flags(flags);
+ return eindex;
+}
+
+static void gdth_readapp_event(unchar application, gdth_evt_str *estr)
+{
+ gdth_evt_str *e;
+ int eindex;
+ ulong flags;
+ unchar found = FALSE;
+
+ TRACE2(("gdth_readapp_event() app. %d\n", application));
+ save_flags(flags);
+ cli();
+ eindex = eoldidx;
+ for (;;) {
+ e = &ebuffer[eindex];
+ if (e->event_source == 0)
+ break;
+ if ((e->application & application) == 0) {
+ e->application |= application;
+ found = TRUE;
+ break;
+ }
+ if (eindex == elastidx)
+ break;
+ if (++eindex == MAX_EVENTS)
+ eindex = 0;
+ }
+ if (found)
+ memcpy(estr, e, sizeof(gdth_evt_str));
+ else
+ estr->event_source = 0;
+ restore_flags(flags);
+}
+
+static void gdth_clear_events()
+{
+ ulong flags;
+
+ TRACE(("gdth_clear_events()"));
+ save_flags(flags);
+ cli();
+
+ eoldidx = elastidx = 0;
+ ebuffer[0].event_source = 0;
+ restore_flags(flags);
+}
+
+
+/* SCSI interface functions */
+
+#if LINUX_VERSION_CODE >= 0x02015F
+static void do_gdth_interrupt(int irq,void *dev_id,struct pt_regs *regs)
+{
+ ulong flags;
+
+ spin_lock_irqsave(&io_request_lock, flags);
+ gdth_interrupt(irq, dev_id, regs);
+ spin_unlock_irqrestore(&io_request_lock, flags);
+}
+#endif
+
+#if LINUX_VERSION_CODE >= 0x010346
+static void gdth_interrupt(int irq,void *dev_id,struct pt_regs *regs)
+#else
+static void gdth_interrupt(int irq,struct pt_regs *regs)
+#endif
+{
+ register gdth_ha_str *ha;
+ gdt6m_dpram_str *dp6m_ptr;
+ gdt6_dpram_str *dp6_ptr;
+ gdt2_dpram_str *dp2_ptr;
+ Scsi_Cmnd *scp;
+ int hanum;
+ unchar IStatus;
+ ushort CmdStatus, Service = 0;
+ ulong InfoBytes, InfoBytes2 = 0;
+ gdth_evt_data dvr;
+
+ TRACE(("gdth_interrupt() IRQ %d\n",irq));
+
+ /* if polling and not from gdth_wait() -> return */
+ if (gdth_polling) {
+ if (!gdth_from_wait) {
+ return;
+ }
+ }
+
+ wait_index = 0;
+
+ /* search controller */
+ if ((hanum = gdth_get_status(&IStatus,irq)) == -1) {
+ /*
+ TRACE2(("gdth_interrupt(): Spurious interrupt received\n"));
+ */
+ return;
+ }
+
+#ifdef GDTH_STATISTICS
+ ++act_ints;
+#endif
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ if (ha->type == GDT_EISA) {
+ if (IStatus & 0x80) { /* error flag */
+ IStatus &= ~0x80;
+ CmdStatus = inw(ha->bmic + MAILBOXREG+8);
+ TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,CmdStatus));
+ if (IStatus == ASYNCINDEX) { /* async. event ? */
+ Service = inw(ha->bmic + MAILBOXREG+10);
+ InfoBytes2 = inl(ha->bmic + MAILBOXREG+4);
+ }
+ } else /* no error */
+ CmdStatus = S_OK;
+ InfoBytes = inl(ha->bmic + MAILBOXREG+12);
+ if (gdth_polling) /* init. -> more info */
+ InfoBytes2 = inl(ha->bmic + MAILBOXREG+4);
+ outb(0xff, ha->bmic + EDOORREG); /* acknowledge interrupt */
+ outb(0x00, ha->bmic + SEMA1REG); /* reset status semaphore */
+ } else if (ha->type == GDT_ISA) {
+ dp2_ptr = (gdt2_dpram_str *)ha->brd;
+ if (IStatus & 0x80) { /* error flag */
+ IStatus &= ~0x80;
+ CmdStatus = readw(&dp2_ptr->u.ic.Status);
+ TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,CmdStatus));
+ if (IStatus == ASYNCINDEX) { /* async. event ? */
+ Service = readw(&dp2_ptr->u.ic.Service);
+ InfoBytes2 = readl(&dp2_ptr->u.ic.Info[1]);
+ }
+ } else /* no error */
+ CmdStatus = S_OK;
+ InfoBytes = readl(&dp2_ptr->u.ic.Info[0]);
+ if (gdth_polling) /* init. -> more info */
+ InfoBytes2 = readl(&dp2_ptr->u.ic.Info[1]);
+ writeb(0xff, &dp2_ptr->io.irqdel); /* acknowledge interrupt */
+ writeb(0, &dp2_ptr->u.ic.Cmd_Index); /* reset command index */
+ writeb(0, &dp2_ptr->io.Sema1); /* reset status semaphore */
+ } else if (ha->type == GDT_PCI) {
+ dp6_ptr = (gdt6_dpram_str *)ha->brd;
+ if (IStatus & 0x80) { /* error flag */
+ IStatus &= ~0x80;
+ CmdStatus = readw(&dp6_ptr->u.ic.Status);
+ TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,CmdStatus));
+ if (IStatus == ASYNCINDEX) { /* async. event ? */
+ Service = readw(&dp6_ptr->u.ic.Service);
+ InfoBytes2 = readl(&dp6_ptr->u.ic.Info[1]);
+ }
+ } else /* no error */
+ CmdStatus = S_OK;
+ InfoBytes = readl(&dp6_ptr->u.ic.Info[0]);
+ if (gdth_polling) /* init. -> more info */
+ InfoBytes2 = readl(&dp6_ptr->u.ic.Info[1]);
+ writeb(0xff, &dp6_ptr->io.irqdel); /* acknowledge interrupt */
+ writeb(0, &dp6_ptr->u.ic.Cmd_Index); /* reset command index */
+ writeb(0, &dp6_ptr->io.Sema1); /* reset status semaphore */
+ } else if (ha->type == GDT_PCINEW) {
+ if (IStatus & 0x80) { /* error flag */
+ IStatus &= ~0x80;
+ CmdStatus = inw(PTR2USHORT(&ha->plx->status));
+ TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,CmdStatus));
+ if (IStatus == ASYNCINDEX) { /* async. event ? */
+ Service = inw(PTR2USHORT(&ha->plx->service));
+ InfoBytes2 = inl(PTR2USHORT(&ha->plx->info[1]));
+ }
+ } else
+ CmdStatus = S_OK;
+
+ InfoBytes = inl(PTR2USHORT(&ha->plx->info[0]));
+ if (gdth_polling) /* init. -> more info */
+ InfoBytes2 = inl(PTR2USHORT(&ha->plx->info[1]));
+ outb(0xff, PTR2USHORT(&ha->plx->edoor_reg));
+ outb(0x00, PTR2USHORT(&ha->plx->sema1_reg));
+ } else if (ha->type == GDT_PCIMPR) {
+ dp6m_ptr = (gdt6m_dpram_str *)ha->brd;
+ if (IStatus & 0x80) { /* error flag */
+ IStatus &= ~0x80;
+ CmdStatus = readw(&dp6m_ptr->i960r.status);
+ TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,CmdStatus));
+ if (IStatus == ASYNCINDEX) { /* async. event ? */
+ Service = readw(&dp6m_ptr->i960r.service);
+ InfoBytes2 = readl(&dp6m_ptr->i960r.info[1]);
+ }
+ } else /* no error */
+ CmdStatus = S_OK;
+ InfoBytes = readl(&dp6m_ptr->i960r.info[0]);
+ if (gdth_polling) /* init. -> more info */
+ InfoBytes2 = readl(&dp6m_ptr->i960r.info[1]);
+ writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
+ writeb(0, &dp6m_ptr->i960r.sema1_reg);
+ } else {
+ TRACE2(("gdth_interrupt() unknown controller type\n"));
+ return;
+ }
+
+ TRACE(("gdth_interrupt() index %d stat %d info %ld\n",
+ IStatus,CmdStatus,InfoBytes));
+ ha->status = CmdStatus;
+ ha->info = InfoBytes;
+ ha->info2 = InfoBytes2;
+
+ if (gdth_from_wait) {
+ wait_hanum = hanum;
+ wait_index = (int)IStatus;
+ }
+
+ if (IStatus == ASYNCINDEX) {
+ TRACE2(("gdth_interrupt() async. event\n"));
+ gdth_async_event(hanum,Service);
+ } else {
+ if (IStatus == SPEZINDEX) {
+ TRACE2(("Service unknown or not initialized !\n"));
+ dvr.size = sizeof(dvr.eu.driver);
+ dvr.eu.driver.ionode = hanum;
+ gdth_store_event(ES_DRIVER, 4, &dvr);
+ return;
+ }
+ scp = gdth_cmd_tab[IStatus-2][hanum].cmnd;
+ Service = gdth_cmd_tab[IStatus-2][hanum].service;
+ gdth_cmd_tab[IStatus-2][hanum].cmnd = UNUSED_CMND;
+ if (scp == UNUSED_CMND) {
+ TRACE2(("gdth_interrupt() index to unused command (%d)\n",IStatus));
+ dvr.size = sizeof(dvr.eu.driver);
+ dvr.eu.driver.ionode = hanum;
+ dvr.eu.driver.index = IStatus;
+ gdth_store_event(ES_DRIVER, 1, &dvr);
+ return;
+ }
+ if (scp == INTERNAL_CMND) {
+ TRACE(("gdth_interrupt() answer to internal command\n"));
+ return;
+ }
+ TRACE(("gdth_interrupt() sync. status\n"));
+ gdth_sync_event(hanum,Service,IStatus,scp);
+ }
+ gdth_next(hanum);
+}
+
+static int gdth_sync_event(int hanum,int service,unchar index,Scsi_Cmnd *scp)
+{
+ register gdth_ha_str *ha;
+ gdth_msg_str *msg;
+ gdth_cmd_str *cmdp;
+ char c='\r';
+ ushort i;
+ gdth_evt_data dvr;
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ cmdp = ha->pccb;
+ TRACE(("gdth_sync_event() scp %lx serv %d status %d\n",
+ (ulong)scp,service,ha->status));
+
+ if (service == SCREENSERVICE) {
+ msg = (gdth_msg_str *)ha->pscratch;
+ TRACE(("len: %ld, answer: %d, ext: %d, alen: %ld\n",
+ msg->msg_len,msg->msg_answer,msg->msg_ext,msg->msg_alen));
+ if (msg->msg_len)
+ if (!(msg->msg_answer && msg->msg_ext)) {
+ msg->msg_text[msg->msg_len] = '\0';
+ printk("%s",msg->msg_text);
+ }
+
+ if (msg->msg_ext && !msg->msg_answer) {
+ while (gdth_test_busy(hanum))
+ gdth_delay(0);
+ cmdp->Service = SCREENSERVICE;
+ cmdp->RequestBuffer = SCREEN_CMND;
+ gdth_get_cmd_index(hanum);
+ gdth_set_sema0(hanum);
+ cmdp->OpCode = GDT_READ;
+ cmdp->BoardNode = LOCALBOARD;
+ cmdp->u.screen.reserved = 0;
+ cmdp->u.screen.msg_handle= msg->msg_handle;
+ cmdp->u.screen.msg_addr = (ulong)msg;
+ ha->cmd_offs_dpmem = 0;
+ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.msg_addr)
+ + sizeof(ulong);
+ ha->cmd_cnt = 0;
+ gdth_copy_command(hanum);
+ gdth_release_event(hanum);
+ return 1;
+ }
+
+ if (msg->msg_answer && msg->msg_alen) {
+ for (i=0; i<msg->msg_alen && i<MSGLEN; ++i) {
+ /* getchar() ?? */
+ /* .. */
+ if (c == '\r')
+ break;
+ msg->msg_text[i] = c;
+ }
+ msg->msg_alen -= i;
+ if (c!='\r' && msg->msg_alen!=0) {
+ msg->msg_answer = 1;
+ msg->msg_ext = 1;
+ } else {
+ msg->msg_ext = 0;
+ msg->msg_answer = 0;
+ }
+ msg->msg_len = i;
+ while (gdth_test_busy(hanum))
+ gdth_delay(0);
+ cmdp->Service = SCREENSERVICE;
+ cmdp->RequestBuffer = SCREEN_CMND;
+ gdth_get_cmd_index(hanum);
+ gdth_set_sema0(hanum);
+ cmdp->OpCode = GDT_WRITE;
+ cmdp->BoardNode = LOCALBOARD;
+ cmdp->u.screen.reserved = 0;
+ cmdp->u.screen.msg_handle= msg->msg_handle;
+ cmdp->u.screen.msg_addr = (ulong)msg;
+ ha->cmd_offs_dpmem = 0;
+ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.msg_addr)
+ + sizeof(ulong);
+ ha->cmd_cnt = 0;
+ gdth_copy_command(hanum);
+ gdth_release_event(hanum);
+ return 1;
+ }
+ printk("\n");
+
+ } else {
+ scp->SCp.Message = (int)ha->status;
+ /* cache or raw service */
+ if (ha->status == S_OK) {
+ scp->result = DID_OK << 16;
+ } else if (ha->status == S_BSY) {
+ TRACE2(("Controller busy -> retry !\n"));
+ gdth_putq(hanum,scp,scp->SCp.this_residual);
+ return 1;
+ } else {
+ if (service == CACHESERVICE) {
+ memset((char*)scp->sense_buffer,0,16);
+ scp->sense_buffer[0] = 0x70;
+ scp->sense_buffer[2] = NOT_READY;
+ scp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
+
+ if (scp->done != gdth_scsi_done) {
+ dvr.size = sizeof(dvr.eu.sync);
+ dvr.eu.sync.ionode = hanum;
+ dvr.eu.sync.service = service;
+ dvr.eu.sync.status = ha->status;
+ dvr.eu.sync.info = ha->info;
+ dvr.eu.sync.hostdrive =
+#if LINUX_VERSION_CODE >= 0x020000
+ ha->id[scp->channel][scp->target].hostdrive;
+#else
+ ha->id[NUMDATA(scp->host)->busnum][scp->target].hostdrive;
+#endif
+ if (ha->status >= 0x8000)
+ gdth_store_event(ES_SYNC, 0, &dvr);
+ else
+ gdth_store_event(ES_SYNC, service, &dvr);
+ }
+ } else {
+ if (ha->status!=S_RAW_SCSI || ha->status==S_RAW_ILL || ha->info>=0x100) {
+ scp->result = DID_BAD_TARGET << 16;
+ } else {
+ scp->result = (DID_OK << 16) | ha->info;
+ }
+ }
+ }
+ if (!scp->SCp.have_data_in)
+ scp->SCp.have_data_in++;
+ else
+ scp->scsi_done(scp);
+ }
+
+ return 1;
+}
+
+static char *async_cache_tab[] = {
+/* 0*/ "\011\000\002\002\002\004\002\006\004"
+ "GDT HA %u, service %u, async. status %u/%lu unknown",
+/* 1*/ "\011\000\002\002\002\004\002\006\004"
+ "GDT HA %u, service %u, async. status %u/%lu unknown",
+/* 2*/ "\005\000\002\006\004"
+ "GDT HA %u, Host Drive %lu not ready",
+/* 3*/ "\005\000\002\006\004"
+ "GDT HA %u, Host Drive %lu: REASSIGN not successful and/or data error on reassigned blocks. Drive may crash in the future and should be replaced",
+/* 4*/ "\005\000\002\006\004"
+ "GDT HA %u, mirror update on Host Drive %lu failed",
+/* 5*/ "\005\000\002\006\004"
+ "GDT HA %u, Mirror Drive %lu failed",
+/* 6*/ "\005\000\002\006\004"
+ "GDT HA %u, Mirror Drive %lu: REASSIGN not successful and/or data error on reassigned blocks. Drive may crash in the future and should be replaced",
+/* 7*/ "\005\000\002\006\004"
+ "GDT HA %u, Host Drive %lu write protected",
+/* 8*/ "\005\000\002\006\004"
+ "GDT HA %u, media changed in Host Drive %lu",
+/* 9*/ "\005\000\002\006\004"
+ "GDT HA %u, Host Drive %lu is offline",
+/*10*/ "\005\000\002\006\004"
+ "GDT HA %u, media change of Mirror Drive %lu",
+/*11*/ "\005\000\002\006\004"
+ "GDT HA %u, Mirror Drive %lu is write protected",
+/*12*/ "\005\000\002\006\004"
+ "GDT HA %u, general error on Host Drive %lu. Please check the devices of this drive!",
+/*13*/ "\007\000\002\006\002\010\002"
+ "GDT HA %u, Array Drive %u: Cache Drive %u failed",
+/*14*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: FAIL state entered",
+/*15*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: error",
+/*16*/ "\007\000\002\006\002\010\002"
+ "GDT HA %u, Array Drive %u: failed drive replaced by Cache Drive %u",
+/*17*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: parity build failed",
+/*18*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: drive rebuild failed",
+/*19*/ "\007\000\002\010\002"
+ "GDT HA %u, Test of Hot Fix %u failed",
+/*20*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: drive build finished successfully",
+/*21*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: drive rebuild finished successfully",
+/*22*/ "\007\000\002\006\002\010\002"
+ "GDT HA %u, Array Drive %u: Hot Fix %u activated",
+/*23*/ "\005\000\002\006\002"
+ "GDT HA %u, Host Drive %u: processing of i/o aborted due to serious drive error",
+/*24*/ "\005\000\002\010\002"
+ "GDT HA %u, mirror update on Cache Drive %u completed",
+/*25*/ "\005\000\002\010\002"
+ "GDT HA %u, mirror update on Cache Drive %lu failed",
+/*26*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: drive rebuild started",
+/*27*/ "\005\000\002\012\001"
+ "GDT HA %u, Fault bus %u: SHELF OK detected",
+/*28*/ "\005\000\002\012\001"
+ "GDT HA %u, Fault bus %u: SHELF not OK detected",
+/*29*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug started",
+/*30*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: new disk detected",
+/*31*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: old disk detected",
+/*32*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: plugging an active disk is illegal",
+/*33*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: illegal device detected",
+/*34*/ "\011\000\002\012\001\013\001\006\004"
+ "GDT HA %u, Fault bus %u, ID %u: insufficient disk capacity (%lu MB required)",
+/*35*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: disk write protected",
+/*36*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: disk not available",
+/*37*/ "\007\000\002\012\001\006\004"
+ "GDT HA %u, Fault bus %u: swap detected (%lu)",
+/*38*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug finished successfully",
+/*39*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug aborted due to user Hot Plug",
+/*40*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug aborted",
+/*41*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug for Hot Fix started",
+/*42*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: drive build started",
+/*43*/ "\003\000\002"
+ "GDT HA %u, DRAM parity error detected",
+/*44*/ "\005\000\002\006\002"
+ "GDT HA %u, Mirror Drive %u: update started",
+/*45*/ "\007\000\002\006\002\010\002"
+ "GDT HA %u, Mirror Drive %u: Hot Fix %u activated",
+/*46*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: no matching Pool Hot Fix Drive available",
+/*47*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: Pool Hot Fix Drive available",
+/*48*/ "\005\000\002\006\002"
+ "GDT HA %u, Mirror Drive %u: no matching Pool Hot Fix Drive available",
+/*49*/ "\005\000\002\006\002"
+ "GDT HA %u, Mirror Drive %u: Pool Hot Fix Drive available",
+/*50*/ "\007\000\002\012\001\013\001"
+ "GDT HA %u, SCSI bus %u, ID %u: IGNORE_WIDE_RESIDUE message received",
+/*51*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: expand started",
+/*52*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: expand finished successfully",
+/*53*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: expand failed",
+/*54*/ "\003\000\002"
+ "GDT HA %u, CPU temperature critical",
+/*55*/ "\003\000\002"
+ "GDT HA %u, CPU temperature OK",
+/*56*/ "\005\000\002\006\004"
+ "GDT HA %u, Host drive %lu created",
+/*57*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: expand restarted",
+/*58*/ "\005\000\002\006\002"
+ "GDT HA %u, Array Drive %u: expand stopped",
+};
+
+
+static int gdth_async_event(int hanum,int service)
+{
+ gdth_stackframe stack;
+ gdth_evt_data dvr;
+ char *f = NULL;
+ int i,j;
+ gdth_ha_str *ha;
+ gdth_msg_str *msg;
+ gdth_cmd_str *cmdp;
+ int cmd_index;
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ cmdp= ha->pccb;
+ msg = (gdth_msg_str *)ha->pscratch;
+ TRACE2(("gdth_async_event() ha %d serv %d\n",
+ hanum,service));
+
+ if (service == SCREENSERVICE) {
+ if (ha->status == MSG_REQUEST) {
+ while (gdth_test_busy(hanum))
+ gdth_delay(0);
+ cmdp->Service = SCREENSERVICE;
+ cmdp->RequestBuffer = SCREEN_CMND;
+ cmd_index = gdth_get_cmd_index(hanum);
+ gdth_set_sema0(hanum);
+ cmdp->OpCode = GDT_READ;
+ cmdp->BoardNode = LOCALBOARD;
+ cmdp->u.screen.reserved = 0;
+ cmdp->u.screen.msg_handle= MSG_INV_HANDLE;
+ cmdp->u.screen.msg_addr = (ulong)msg;
+ ha->cmd_offs_dpmem = 0;
+ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.msg_addr)
+ + sizeof(ulong);
+ ha->cmd_cnt = 0;
+ gdth_copy_command(hanum);
+ if (ha->type == GDT_EISA)
+ printk("[EISA slot %d] ",(ushort)ha->brd_phys);
+ else if (ha->type == GDT_ISA)
+ printk("[DPMEM 0x%4X] ",(ushort)ha->brd_phys);
+ else
+ printk("[PCI %d/%d] ",(ushort)(ha->brd_phys>>8),
+ (ushort)((ha->brd_phys>>3)&0x1f));
+ gdth_release_event(hanum);
+ }
+
+ } else {
+ dvr.size = sizeof(dvr.eu.async);
+ dvr.eu.async.ionode = hanum;
+ dvr.eu.async.service = service;
+ dvr.eu.async.status = ha->status;
+ dvr.eu.async.info = ha->info;
+ *(ulong *)dvr.eu.async.scsi_coord = ha->info2;
+ gdth_store_event(ES_ASYNC, service, &dvr);
+
+ if (service==CACHESERVICE && INDEX_OK(ha->status,async_cache_tab)) {
+ TRACE2(("GDT: Async. event cache service, event no.: %d\n",
+ ha->status));
+
+ f = async_cache_tab[ha->status];
+
+ /* i: parameter to push, j: stack element to fill */
+ for (j=0,i=1; i < f[0]; i+=2) {
+ switch (f[i+1]) {
+ case 4:
+ stack.b[j++] = *(ulong*)&dvr.eu.stream[(int)f[i]];
+ break;
+ case 2:
+ stack.b[j++] = *(ushort*)&dvr.eu.stream[(int)f[i]];
+ break;
+ case 1:
+ stack.b[j++] = *(unchar*)&dvr.eu.stream[(int)f[i]];
+ break;
+ default:
+ break;
+ }
+ }
+
+ printk(&f[f[0]],stack); printk("\n");
+
+ } else {
+ printk("GDT: Unknown async. event service %d event no. %d\n",
+ service,ha->status);
+ }
+ }
+ return 1;
+}
+
+
+#ifdef GDTH_STATISTICS
+void gdth_timeout(ulong data)
+{
+ ulong flags,i;
+ Scsi_Cmnd *nscp;
+ gdth_ha_str *ha;
+ int hanum = 0;
+
+ save_flags(flags);
+ cli();
+
+ for (act_stats=0,i=0; i<GDTH_MAXCMDS; ++i)
+ if (gdth_cmd_tab[i][hanum].cmnd != UNUSED_CMND)
+ ++act_stats;
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ for (act_rq=0,nscp=ha->req_first; nscp; nscp=(Scsi_Cmnd*)nscp->SCp.ptr)
+ ++act_rq;
+
+ TRACE2(("gdth_to(): ints %ld, ios %ld, act_stats %ld, act_rq %ld\n",
+ act_ints, act_ios, act_stats, act_rq));
+ act_ints = act_ios = 0;
+
+ gdth_timer.expires = jiffies + 30 * HZ;
+ add_timer(&gdth_timer);
+ restore_flags(flags);
+}
+#endif
+
+
+__initfunc (int gdth_detect(Scsi_Host_Template *shtp))
+{
+ struct Scsi_Host *shp;
+ gdth_ha_str *ha;
+ unsigned long flags;
+ ulong isa_bios;
+ ushort eisa_slot,device_id,index;
+ gdth_pci_str pcistr;
+ int i,j,hanum;
+#if LINUX_VERSION_CODE < 0x020000
+ unchar b;
+#endif
+
+#ifdef DEBUG_GDTH
+ printk("GDT: This driver contains debugging information !! Trace level = %d\n",
+ DebugState);
+ printk(" Destination of debugging information: ");
+#ifdef __SERIAL__
+#ifdef __COM2__
+ printk("Serial port COM2\n");
+#else
+ printk("Serial port COM1\n");
+#endif
+#else
+ printk("Console\n");
+#endif
+ gdth_delay(3000);
+#endif
+
+ TRACE(("gdth_detect()\n"));
+
+ if (disable_gdth_scan) {
+ printk("GDT: Controller driver disabled from command line !\n");
+ return 0;
+ }
+
+ /* initializations */
+ gdth_polling = TRUE;
+ for (i=0; i<GDTH_MAXCMDS; ++i)
+ for (j=0; j<MAXHA; ++j)
+ gdth_cmd_tab[i][j].cmnd = UNUSED_CMND;
+ for (i=0; i<4; ++i)
+ for (j=0; j<MAXHA; ++j)
+ gdth_ioctl_tab[i][j] = NULL;
+ gdth_clear_events();
+
+ /* scanning for controllers, at first: ISA controller */
+ for (isa_bios=0xc8000UL; isa_bios<=0xd8000UL; isa_bios+=0x8000UL) {
+ if (gdth_search_isa(isa_bios)) { /* controller found */
+ shp = scsi_register(shtp,sizeof(gdth_ext_str));
+ ha = HADATA(shp);
+ if (!gdth_init_isa(isa_bios,ha)) {
+ scsi_unregister(shp);
+ continue;
+ }
+ /* controller found and initialized */
+ printk("Configuring GDT-ISA HA at BIOS 0x%05lX IRQ %u DRQ %u\n",
+ isa_bios,ha->irq,ha->drq);
+
+ save_flags(flags);
+ cli();
+#if LINUX_VERSION_CODE >= 0x02015F
+ if (request_irq(ha->irq,do_gdth_interrupt,SA_INTERRUPT,"gdth",NULL))
+#elif LINUX_VERSION_CODE >= 0x010346
+ if (request_irq(ha->irq,gdth_interrupt,SA_INTERRUPT,"gdth",NULL))
+#else
+ if (request_irq(ha->irq,gdth_interrupt,SA_INTERRUPT,"gdth"))
+#endif
+ {
+ printk("GDT-ISA: Unable to allocate IRQ\n");
+ restore_flags(flags);
+ scsi_unregister(shp);
+ continue;
+ }
+ if (request_dma(ha->drq,"gdth")) {
+ printk("GDT-ISA: Unable to allocate DMA channel\n");
+#if LINUX_VERSION_CODE >= 0x010346
+ free_irq(ha->irq,NULL);
+#else
+ free_irq(ha->irq);
+#endif
+ restore_flags(flags);
+ scsi_unregister(shp);
+ continue;
+ }
+ set_dma_mode(ha->drq,DMA_MODE_CASCADE);
+ enable_dma(ha->drq);
+ shp->unchecked_isa_dma = 1;
+ shp->irq = ha->irq;
+ shp->dma_channel = ha->drq;
+ for (i=0; i<MAXID; ++i) {
+ if (ha->id[0][i].type==SIOP_DTYP) {
+ shp->this_id = i;
+ break;
+ }
+ }
+ hanum = gdth_ctr_count;
+ gdth_ctr_tab[gdth_ctr_count++] = shp;
+ gdth_ctr_vtab[gdth_ctr_vcount++] = shp;
+
+ NUMDATA(shp)->hanum = (ushort)hanum;
+ NUMDATA(shp)->busnum= 0;
+
+ ha->pccb = CMDDATA(shp);
+ ha->pscratch = DMADATA(shp);
+ ha->req_first = NULL;
+ for (i=0; i<MAXBUS; ++i) {
+ for (j=0; j<MAXID; ++j) {
+ ha->id[i][j].type = EMPTY_DTYP;
+ ha->id[i][j].lock = 0;
+ ha->id[i][j].heads = 0;
+ }
+ }
+ restore_flags(flags);
+
+ if (!gdth_search_drives(hanum)) {
+ printk("GDT-ISA: Error during device scan\n");
+ --gdth_ctr_count;
+ --gdth_ctr_vcount;
+ save_flags(flags);
+ cli();
+#if LINUX_VERSION_CODE >= 0x010346
+ free_irq(ha->irq,NULL);
+#else
+ free_irq(ha->irq);
+#endif
+ restore_flags(flags);
+ scsi_unregister(shp);
+ continue;
+ }
+
+#if LINUX_VERSION_CODE >= 0x020000
+ shp->max_id = 8;
+ shp->max_lun = MAXLUN;
+ shp->max_channel = ha->bus_cnt - 1;
+#else
+ /* register addit. SCSI channels as virtual controllers */
+ for (b=1; b<ha->bus_cnt; ++b) {
+ shp = scsi_register(shtp,sizeof(gdth_num_str));
+ shp->unchecked_isa_dma = 1;
+ shp->irq = ha->irq;
+ shp->dma_channel = ha->drq;
+ for (i=0; i<MAXID; ++i) {
+ if (ha->id[b][i].type==SIOP_DTYP) {
+ shp->this_id = i;
+ break;
+ }
+ }
+ gdth_ctr_vtab[gdth_ctr_vcount++] = shp;
+ NUMDATA(shp)->hanum = (ushort)hanum;
+ NUMDATA(shp)->busnum = b;
+ }
+#endif
+
+ gdth_enable_int(hanum);
+ }
+ }
+
+ /* scanning for EISA controllers */
+ for (eisa_slot=0x1000; eisa_slot<=0x8000; eisa_slot+=0x1000) {
+ if (gdth_search_eisa(eisa_slot)) { /* controller found */
+ shp = scsi_register(shtp,sizeof(gdth_ext_str));
+ ha = HADATA(shp);
+ if (!gdth_init_eisa(eisa_slot,ha)) {
+ scsi_unregister(shp);
+ continue;
+ }
+ /* controller found and initialized */
+ printk("Configuring GDT-EISA HA at Slot %d IRQ %u\n",
+ eisa_slot>>12,ha->irq);
+
+ save_flags(flags);
+ cli();
+#if LINUX_VERSION_CODE >= 0x02015F
+ if (request_irq(ha->irq,do_gdth_interrupt,SA_INTERRUPT,"gdth",NULL))
+#elif LINUX_VERSION_CODE >= 0x010346
+ if (request_irq(ha->irq,gdth_interrupt,SA_INTERRUPT,"gdth",NULL))
+#else
+ if (request_irq(ha->irq,gdth_interrupt,SA_INTERRUPT,"gdth"))
+#endif
+ {
+ printk("GDT-EISA: Unable to allocate IRQ\n");
+ restore_flags(flags);
+ scsi_unregister(shp);
+ continue;
+ }
+ shp->unchecked_isa_dma = 0;
+ shp->irq = ha->irq;
+ shp->dma_channel = 0xff;
+ for (i=0; i<MAXID; ++i) {
+ if (ha->id[0][i].type==SIOP_DTYP) {
+ shp->this_id = i;
+ break;
+ }
+ }
+ hanum = gdth_ctr_count;
+ gdth_ctr_tab[gdth_ctr_count++] = shp;
+ gdth_ctr_vtab[gdth_ctr_vcount++] = shp;
+
+ NUMDATA(shp)->hanum = (ushort)hanum;
+ NUMDATA(shp)->busnum= 0;
+ TRACE2(("EISA detect Bus 0: shp %lx hanum %d\n",
+ (ulong)shp,NUMDATA(shp)->hanum));
+
+ ha->pccb = CMDDATA(shp);
+ ha->pscratch = DMADATA(shp);
+ ha->req_first = NULL;
+ for (i=0; i<MAXBUS; ++i) {
+ for (j=0; j<MAXID; ++j) {
+ ha->id[i][j].type = EMPTY_DTYP;
+ ha->id[i][j].lock = 0;
+ ha->id[i][j].heads = 0;
+ }
+ }
+ restore_flags(flags);
+
+ if (!gdth_search_drives(hanum)) {
+ printk("GDT-EISA: Error during device scan\n");
+ --gdth_ctr_count;
+ --gdth_ctr_vcount;
+ save_flags(flags);
+ cli();
+#if LINUX_VERSION_CODE >= 0x010346
+ free_irq(ha->irq,NULL);
+#else
+ free_irq(ha->irq);
+#endif
+ restore_flags(flags);
+ scsi_unregister(shp);
+ continue;
+ }
+
+#if LINUX_VERSION_CODE >= 0x020000
+ shp->max_id = 8;
+ shp->max_lun = MAXLUN;
+ shp->max_channel = ha->bus_cnt - 1;
+#else
+ /* register addit. SCSI channels as virtual controllers */
+ for (b=1; b<ha->bus_cnt; ++b) {
+ shp = scsi_register(shtp,sizeof(gdth_num_str));
+ shp->unchecked_isa_dma = 0;
+ shp->irq = ha->irq;
+ shp->dma_channel = 0xff;
+ for (i=0; i<MAXID; ++i) {
+ if (ha->id[b][i].type==SIOP_DTYP) {
+ shp->this_id = i;
+ break;
+ }
+ }
+ gdth_ctr_vtab[gdth_ctr_vcount++] = shp;
+ NUMDATA(shp)->hanum = (ushort)hanum;
+ NUMDATA(shp)->busnum = b;
+ TRACE2(("EISA detect Bus %d: shp %lx hanum %d\n",
+ NUMDATA(shp)->busnum,(ulong)shp,
+ NUMDATA(shp)->hanum));
+ }
+#endif
+
+ gdth_enable_int(hanum);
+ }
+ }
+
+ /* scanning for PCI controllers */
+ for (device_id = 0; device_id <= PCI_DEVICE_ID_VORTEX_GDTMAXRP; ++device_id) {
+ if (device_id > PCI_DEVICE_ID_VORTEX_GDT6555 &&
+ device_id < PCI_DEVICE_ID_VORTEX_GDT6x17RP)
+ continue;
+ for (index = 0; ; ++index) {
+ if (!gdth_search_pci(device_id,index,&pcistr))
+ break; /* next device_id */
+ shp = scsi_register(shtp,sizeof(gdth_ext_str));
+ ha = HADATA(shp);
+ if (!gdth_init_pci(&pcistr,ha)) {
+ scsi_unregister(shp);
+ continue;
+ }
+ /* controller found and initialized */
+ printk("Configuring GDT-PCI HA at %d/%d IRQ %u\n",
+ pcistr.bus,pcistr.device_fn>>3,ha->irq);
+
+ save_flags(flags);
+ cli();
+#if LINUX_VERSION_CODE >= 0x02015F
+ if (request_irq(ha->irq,do_gdth_interrupt,SA_INTERRUPT|SA_SHIRQ,"gdth",NULL))
+#elif LINUX_VERSION_CODE >= 0x010346
+ if (request_irq(ha->irq,gdth_interrupt,SA_INTERRUPT|SA_SHIRQ,"gdth",NULL))
+#else
+ if (request_irq(ha->irq,gdth_interrupt,SA_INTERRUPT|SA_SHIRQ,"gdth"))
+#endif
+ {
+ printk("GDT-PCI: Unable to allocate IRQ\n");
+ restore_flags(flags);
+ scsi_unregister(shp);
+ continue;
+ }
+ shp->unchecked_isa_dma = 0;
+ shp->irq = ha->irq;
+ shp->dma_channel = 0xff;
+ for (i=0; i<MAXID; ++i) {
+ if (ha->id[0][i].type==SIOP_DTYP) {
+ shp->this_id = i;
+ break;
+ }
+ }
+ hanum = gdth_ctr_count;
+ gdth_ctr_tab[gdth_ctr_count++] = shp;
+ gdth_ctr_vtab[gdth_ctr_vcount++] = shp;
+
+ NUMDATA(shp)->hanum = (ushort)hanum;
+ NUMDATA(shp)->busnum= 0;
+
+ ha->pccb = CMDDATA(shp);
+ ha->pscratch = DMADATA(shp);
+ ha->req_first = NULL;
+ for (i=0; i<MAXBUS; ++i) {
+ for (j=0; j<MAXID; ++j) {
+ ha->id[i][j].type = EMPTY_DTYP;
+ ha->id[i][j].lock = 0;
+ ha->id[i][j].heads = 0;
+ }
+ }
+ restore_flags(flags);
+
+ if (!gdth_search_drives(hanum)) {
+ printk("GDT-PCI: Error during device scan\n");
+ --gdth_ctr_count;
+ --gdth_ctr_vcount;
+ save_flags(flags);
+ cli();
+#if LINUX_VERSION_CODE >= 0x010346
+ free_irq(ha->irq,NULL);
+#else
+ free_irq(ha->irq);
+#endif
+ restore_flags(flags);
+ scsi_unregister(shp);
+ continue;
+ }
+
+#if LINUX_VERSION_CODE >= 0x020000
+ shp->max_id = MAXID;
+ shp->max_lun = MAXLUN;
+ shp->max_channel = ha->bus_cnt - 1;
+#else
+ /* register addit. SCSI channels as virtual controllers */
+ for (b=1; b<ha->bus_cnt; ++b) {
+ shp = scsi_register(shtp,sizeof(gdth_num_str));
+ shp->unchecked_isa_dma = 0;
+ shp->irq = ha->irq;
+ shp->dma_channel = 0xff;
+ for (i=0; i<MAXID; ++i) {
+ if (ha->id[b][i].type==SIOP_DTYP) {
+ shp->this_id = i;
+ break;
+ }
+ }
+ gdth_ctr_vtab[gdth_ctr_vcount++] = shp;
+ NUMDATA(shp)->hanum = (ushort)hanum;
+ NUMDATA(shp)->busnum = b;
+ }
+#endif
+
+ gdth_enable_int(hanum);
+ }
+ }
+
+ TRACE2(("gdth_detect() %d controller detected\n",gdth_ctr_count));
+ if (gdth_ctr_count > 0) {
+#ifdef GDTH_STATISTICS
+ TRACE2(("gdth_detect(): Initializing timer !\n"));
+ init_timer(&gdth_timer);
+ gdth_timer.expires = jiffies + HZ;
+ gdth_timer.data = 0L;
+ gdth_timer.function = gdth_timeout;
+ add_timer(&gdth_timer);
+#endif
+#if LINUX_VERSION_CODE >= 0x020100
+ register_reboot_notifier(&gdth_notifier);
+#endif
+ }
+ gdth_polling = FALSE;
+ return gdth_ctr_vcount;
+}
+
+
+int gdth_release(struct Scsi_Host *shp)
+{
+ unsigned long flags;
+
+ TRACE2(("gdth_release()\n"));
+
+ if (NUMDATA(shp)->busnum == 0) {
+ gdth_flush(NUMDATA(shp)->hanum);
+
+ save_flags(flags);
+ cli();
+ if (shp->irq) {
+#if LINUX_VERSION_CODE >= 0x010346
+ free_irq(shp->irq,NULL);
+#else
+ free_irq(shp->irq);
+#endif
+ }
+ if (shp->dma_channel != 0xff) {
+ free_dma(shp->dma_channel);
+ }
+ restore_flags(flags);
+ gdth_ctr_released++;
+ TRACE2(("gdth_release(): HA %d of %d\n",
+ gdth_ctr_released, gdth_ctr_count));
+
+ if (gdth_ctr_released == gdth_ctr_count) {
+#ifdef GDTH_STATISTICS
+ del_timer(&gdth_timer);
+#endif
+#if LINUX_VERSION_CODE >= 0x020100
+ unregister_reboot_notifier(&gdth_notifier);
+#endif
+ }
+ }
+
+ scsi_unregister(shp);
+ return 0;
+}
+
+
+static const char *gdth_ctr_name(int hanum)
+{
+ gdth_ha_str *ha;
+
+ TRACE2(("gdth_ctr_name()\n"));
+
+ ha = HADATA(gdth_ctr_tab[hanum]);
+
+ if (ha->type == GDT_EISA) {
+ switch (ha->stype) {
+ case GDT3_ID:
+ return("GDT3000/3020");
+ case GDT3A_ID:
+ return("GDT3000A/3020A/3050A");
+ case GDT3B_ID:
+ return("GDT3000B/3010A");
+ }
+ } else if (ha->type == GDT_ISA) {
+ return("GDT2000/2020");
+ } else if (ha->type == GDT_PCI) {
+ switch (ha->stype) {
+ case PCI_DEVICE_ID_VORTEX_GDT60x0:
+ return("GDT6000/6020/6050");
+ case PCI_DEVICE_ID_VORTEX_GDT6000B:
+ return("GDT6000B/6010");
+ }
+ }
+ /* new controllers (GDT_PCINEW, GDT_PCIMPR, ..) use board_info IOCTL! */
+
+ return("");
+}
+
+const char *gdth_info(struct Scsi_Host *shp)
+{
+ int hanum;
+ gdth_ha_str *ha;
+
+ TRACE2(("gdth_info()\n"));
+ hanum = NUMDATA(shp)->hanum;
+ ha = HADATA(gdth_ctr_tab[hanum]);
+
+ return ((const char *)ha->ctr_name);
+}
+
+/* old error handling */
+int gdth_abort(Scsi_Cmnd *scp)
+{
+ TRACE2(("gdth_abort() reason %d\n",scp->abort_reason));
+ return SCSI_ABORT_SNOOZE;
+}
+
+#if LINUX_VERSION_CODE >= 0x010346
+int gdth_reset(Scsi_Cmnd *scp, unsigned int reset_flags)
+#else
+int gdth_reset(Scsi_Cmnd *scp)
+#endif
+{
+ TRACE2(("gdth_reset()\n"));
+ return SCSI_RESET_PUNT;
+}
+
+#if LINUX_VERSION_CODE >= 0x02015F
+/* new error handling */
+int gdth_eh_abort(Scsi_Cmnd *scp)
+{
+ TRACE2(("gdth_eh_abort()\n"));
+ return FAILED;
+}
+
+int gdth_eh_device_reset(Scsi_Cmnd *scp)
+{
+ TRACE2(("gdth_eh_device_reset()\n"));
+ return FAILED;
+}
+
+int gdth_eh_bus_reset(Scsi_Cmnd *scp)
+{
+ TRACE2(("gdth_eh_bus_reset()\n"));
+ return FAILED;
+}
+
+int gdth_eh_host_reset(Scsi_Cmnd *scp)
+{
+ TRACE2(("gdth_eh_host_reset()\n"));
+ return FAILED;
+}
+#endif
+
+#if LINUX_VERSION_CODE >= 0x010300
+int gdth_bios_param(Disk *disk,kdev_t dev,int *ip)
+#else
+int gdth_bios_param(Disk *disk,int dev,int *ip)
+#endif
+{
+ unchar b, t;
+ int hanum;
+ gdth_ha_str *ha;
+ int drv_hds, drv_secs;
+
+ hanum = NUMDATA(disk->device->host)->hanum;
+ b = disk->device->channel;
+ t = disk->device->id;
+ TRACE2(("gdth_bios_param() ha %d bus %d target %d\n", hanum, b, t));
+ ha = HADATA(gdth_ctr_tab[hanum]);
+
+ if (ha->id[b][t].heads == 0) {
+ /* raw device: evaluate mapping (sectors per head, heads per cylinder) */
+ if (disk->capacity /HEADS/SECS <= MAXCYLS) {
+ drv_hds = HEADS;
+ drv_secs= SECS;
+ } else if (disk->capacity /MEDHEADS/MEDSECS <= MAXCYLS) {
+ drv_hds = MEDHEADS;
+ drv_secs= MEDSECS;
+ } else {
+ drv_hds = BIGHEADS;
+ drv_secs= BIGSECS;
+ }
+ ha->id[b][t].heads = drv_hds;
+ ha->id[b][t].secs = drv_secs;
+ TRACE2(("gdth_bios_param(): raw device -> params evaluated\n"));
+ }
+
+ ip[0] = ha->id[b][t].heads;
+ ip[1] = ha->id[b][t].secs;
+ ip[2] = disk->capacity / ip[0] / ip[1];
+
+ TRACE2(("gdth_bios_param(): %d heads, %d secs, %d cyls\n",
+ ip[0],ip[1],ip[2]));
+ return 0;
+}
+
+
+static void internal_done(Scsi_Cmnd *scp)
+{
+ scp->SCp.sent_command++;
+}
+
+int gdth_command(Scsi_Cmnd *scp)
+{
+ TRACE2(("gdth_command()\n"));
+
+ scp->SCp.sent_command = 0;
+ gdth_queuecommand(scp,internal_done);
+
+ while (!scp->SCp.sent_command)
+ barrier();
+ return scp->result;
+}
+
+
+int gdth_queuecommand(Scsi_Cmnd *scp,void (*done)(Scsi_Cmnd *))
+{
+ int hanum;
+ int priority;
+
+ TRACE(("gdth_queuecommand() cmd 0x%x id %d lun %d\n",
+ scp->cmnd[0],scp->target,scp->lun));
+
+ scp->scsi_done = (void *)done;
+ scp->SCp.have_data_in = 1;
+ hanum = NUMDATA(scp->host)->hanum;
+#ifdef GDTH_STATISTICS
+ ++act_ios;
+#endif
+
+ priority = DEFAULT_PRI;
+#if LINUX_VERSION_CODE >= 0x010300
+ if (scp->done == gdth_scsi_done)
+ priority = scp->SCp.this_residual;
+#endif
+ gdth_putq( hanum, scp, priority );
+ gdth_next( hanum );
+ return 0;
+}
+
+/* flush routine */
+static void gdth_flush(int hanum)
+{
+ int i, j;
+ gdth_ha_str *ha;
+ Scsi_Cmnd scp;
+ Scsi_Device sdev;
+ gdth_cmd_str gdtcmd;
+ char cmnd[12];
+
+ TRACE2(("gdth_flush() hanum %d\n",hanum));
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ memset(&sdev,0,sizeof(Scsi_Device));
+ memset(&scp, 0,sizeof(Scsi_Cmnd));
+ sdev.host = gdth_ctr_tab[hanum];
+ sdev.id = sdev.host->this_id;
+ scp.cmd_len = 12;
+ scp.host = gdth_ctr_tab[hanum];
+ scp.target = sdev.host->this_id;
+ scp.device = &sdev;
+ scp.use_sg = 0;
+
+ for (i = 0; i < MAXBUS; ++i) {
+ for (j = 0; j < MAXID; ++j) {
+ if (ha->id[i][j].type == CACHE_DTYP) {
+ gdtcmd.BoardNode = LOCALBOARD;
+ gdtcmd.Service = CACHESERVICE;
+ gdtcmd.OpCode = GDT_FLUSH;
+ gdtcmd.u.cache.DeviceNo = ha->id[i][j].hostdrive;
+ gdtcmd.u.cache.BlockNo = 1;
+ gdtcmd.u.cache.sg_canz = 0;
+ TRACE2(("gdth_flush(): flush ha %d drive %d\n",
+ hanum, ha->id[i][j].hostdrive));
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ scp.request.rq_status = RQ_SCSI_BUSY;
+ scp.request.sem = &sem;
+ scp.SCp.this_residual = IOCTL_PRI;
+ scsi_do_cmd(&scp, cmnd, &gdtcmd,
+ sizeof(gdth_cmd_str), gdth_scsi_done,
+ 30*HZ, 1);
+ down(&sem);
+ }
+ }
+ }
+ }
+}
+
+/* shutdown routine */
+#if LINUX_VERSION_CODE >= 0x020100
+static int gdth_halt(struct notifier_block *nb, ulong event, void *buf)
+#else
+void gdth_halt(void)
+#endif
+{
+ int hanum;
+ Scsi_Cmnd scp;
+ Scsi_Device sdev;
+ gdth_cmd_str gdtcmd;
+ char cmnd[12];
+
+#if LINUX_VERSION_CODE >= 0x020100
+ TRACE2(("gdth_halt() event %d\n",event));
+ if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
+ return NOTIFY_DONE;
+#else
+ TRACE2(("gdth_halt()\n"));
+ if (halt_called) {
+ TRACE2(("already called\n"));
+ return;
+ }
+ halt_called = TRUE;
+#endif
+ printk("GDT: Flushing all host drives .. ");
+ for (hanum = 0; hanum < gdth_ctr_count; ++hanum) {
+ gdth_flush(hanum);
+
+ /* controller reset */
+ memset(&sdev,0,sizeof(Scsi_Device));
+ memset(&scp, 0,sizeof(Scsi_Cmnd));
+ sdev.host = gdth_ctr_tab[hanum];
+ sdev.id = sdev.host->this_id;
+ scp.cmd_len = 12;
+ scp.host = gdth_ctr_tab[hanum];
+ scp.target = sdev.host->this_id;
+ scp.device = &sdev;
+ scp.use_sg = 0;
+
+ gdtcmd.BoardNode = LOCALBOARD;
+ gdtcmd.Service = CACHESERVICE;
+ gdtcmd.OpCode = GDT_RESET;
+ TRACE2(("gdth_halt(): reset controller %d\n", hanum));
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ scp.request.rq_status = RQ_SCSI_BUSY;
+ scp.request.sem = &sem;
+ scp.SCp.this_residual = IOCTL_PRI;
+ scsi_do_cmd(&scp, cmnd, &gdtcmd,
+ sizeof(gdth_cmd_str), gdth_scsi_done,
+ 10*HZ, 1);
+ down(&sem);
+ }
+ }
+ printk("Done.\n");
+
+#ifdef GDTH_STATISTICS
+ del_timer(&gdth_timer);
+#endif
+#if LINUX_VERSION_CODE >= 0x020100
+ unregister_reboot_notifier(&gdth_notifier);
+ return NOTIFY_OK;
+#endif
+}
+
+
+/* called from init/main.c */
+__initfunc (void gdth_setup(char *str,int *ints))
+{
+ static size_t setup_idx = 0;
+
+ TRACE2(("gdth_setup() str %s ints[0] %d ints[1] %d\n",
+ str ? str:"NULL", ints[0],
+ ints[0] ? ints[1]:0));
+
+ if (setup_idx >= MAXHA) {
+ printk("GDT: gdth_setup() called too many times. Bad LILO params ?\n");
+ return;
+ }
+ if (ints[0] != 1) {
+ printk("GDT: Illegal command line !\n");
+ printk("Usage: gdth=<IRQ>\n");
+ printk("Where: <IRQ>: valid EISA controller IRQ (10,11,12,14)\n");
+ printk(" or 0 to disable controller driver\n");
+ return;
+ }
+ if (ints[1] == 10 || ints[1] == 11 || ints[1] == 12 || ints[1] == 14) {
+ irqs[setup_idx++] = ints[1];
+ irqs[setup_idx] = 0xff;
+ return;
+ }
+ if (ints[1] == 0) {
+ disable_gdth_scan = TRUE;
+ return;
+ }
+ printk("GDT: Invalid IRQ (%d) specified\n",ints[1]);
+}
+
+
+#ifdef MODULE
+Scsi_Host_Template driver_template = GDTH;
+#include "scsi_module.c"
+#endif
+
diff --git a/linux/src/drivers/scsi/gdth.h b/linux/src/drivers/scsi/gdth.h
new file mode 100644
index 0000000..6eafd1f
--- /dev/null
+++ b/linux/src/drivers/scsi/gdth.h
@@ -0,0 +1,819 @@
+#ifndef _GDTH_H
+#define _GDTH_H
+
+/*
+ * Header file for the GDT ISA/EISA/PCI Disk Array Controller driver for Linux
+ *
+ * gdth.h Copyright (C) 1995-98 ICP vortex Computersysteme GmbH, Achim Leubner
+ * See gdth.c for further informations and
+ * below for supported controller types
+ *
+ * <achim@vortex.de>
+ *
+ * $Id: gdth.h,v 1.1 1999/04/26 05:54:37 tb Exp $
+ */
+
+#include <linux/version.h>
+#include <linux/types.h>
+
+#ifndef NULL
+#define NULL 0
+#endif
+#ifndef TRUE
+#define TRUE 1
+#endif
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+/* defines, macros */
+
+/* driver version */
+#define GDTH_VERSION_STR "1.07"
+#define GDTH_VERSION 1
+#define GDTH_SUBVERSION 7
+
+/* protocol version */
+#define PROTOCOL_VERSION 1
+
+/* controller classes */
+#define GDT_ISA 0x01 /* ISA controller */
+#define GDT_EISA 0x02 /* EISA controller */
+#define GDT_PCI 0x03 /* PCI controller */
+#define GDT_PCINEW 0x04 /* new PCI controller */
+#define GDT_PCIMPR 0x05 /* PCI MPR controller */
+/* GDT_EISA, controller subtypes EISA */
+#define GDT3_ID 0x0130941c /* GDT3000/3020 */
+#define GDT3A_ID 0x0230941c /* GDT3000A/3020A/3050A */
+#define GDT3B_ID 0x0330941c /* GDT3000B/3010A */
+/* GDT_ISA */
+#define GDT2_ID 0x0120941c /* GDT2000/2020 */
+/* vendor ID, device IDs (PCI) */
+/* these defines should already exist in <linux/pci.h> */
+#ifndef PCI_VENDOR_ID_VORTEX
+#define PCI_VENDOR_ID_VORTEX 0x1119 /* PCI controller vendor ID */
+#endif
+#ifndef PCI_DEVICE_ID_VORTEX_GDT60x0
+/* GDT_PCI */
+#define PCI_DEVICE_ID_VORTEX_GDT60x0 0 /* GDT6000/6020/6050 */
+#define PCI_DEVICE_ID_VORTEX_GDT6000B 1 /* GDT6000B/6010 */
+/* GDT_PCINEW */
+#define PCI_DEVICE_ID_VORTEX_GDT6x10 2 /* GDT6110/6510 */
+#define PCI_DEVICE_ID_VORTEX_GDT6x20 3 /* GDT6120/6520 */
+#define PCI_DEVICE_ID_VORTEX_GDT6530 4 /* GDT6530 */
+#define PCI_DEVICE_ID_VORTEX_GDT6550 5 /* GDT6550 */
+/* GDT_PCINEW, wide/ultra SCSI controllers */
+#define PCI_DEVICE_ID_VORTEX_GDT6x17 6 /* GDT6117/6517 */
+#define PCI_DEVICE_ID_VORTEX_GDT6x27 7 /* GDT6127/6527 */
+#define PCI_DEVICE_ID_VORTEX_GDT6537 8 /* GDT6537 */
+#define PCI_DEVICE_ID_VORTEX_GDT6557 9 /* GDT6557/6557-ECC */
+/* GDT_PCINEW, wide SCSI controllers */
+#define PCI_DEVICE_ID_VORTEX_GDT6x15 10 /* GDT6115/6515 */
+#define PCI_DEVICE_ID_VORTEX_GDT6x25 11 /* GDT6125/6525 */
+#define PCI_DEVICE_ID_VORTEX_GDT6535 12 /* GDT6535 */
+#define PCI_DEVICE_ID_VORTEX_GDT6555 13 /* GDT6555/6555-ECC */
+#endif
+
+#ifndef PCI_DEVICE_ID_VORTEX_GDT6x17RP
+/* GDT_MPR, RP series, wide/ultra SCSI */
+#define PCI_DEVICE_ID_VORTEX_GDT6x17RP 0x100 /* GDT6117RP/GDT6517RP */
+#define PCI_DEVICE_ID_VORTEX_GDT6x27RP 0x101 /* GDT6127RP/GDT6527RP */
+#define PCI_DEVICE_ID_VORTEX_GDT6537RP 0x102 /* GDT6537RP */
+#define PCI_DEVICE_ID_VORTEX_GDT6557RP 0x103 /* GDT6557RP */
+/* GDT_MPR, RP series, narrow/ultra SCSI */
+#define PCI_DEVICE_ID_VORTEX_GDT6x11RP 0x104 /* GDT6111RP/GDT6511RP */
+#define PCI_DEVICE_ID_VORTEX_GDT6x21RP 0x105 /* GDT6121RP/GDT6521RP */
+/* GDT_MPR, RP1 series, wide/ultra SCSI */
+#define PCI_DEVICE_ID_VORTEX_GDT6x17RP1 0x110 /* GDT6117RP1/GDT6517RP1 */
+#define PCI_DEVICE_ID_VORTEX_GDT6x27RP1 0x111 /* GDT6127RP1/GDT6527RP1 */
+#define PCI_DEVICE_ID_VORTEX_GDT6537RP1 0x112 /* GDT6537RP1 */
+#define PCI_DEVICE_ID_VORTEX_GDT6557RP1 0x113 /* GDT6557RP1 */
+/* GDT_MPR, RP1 series, narrow/ultra SCSI */
+#define PCI_DEVICE_ID_VORTEX_GDT6x11RP1 0x114 /* GDT6111RP1/GDT6511RP1 */
+#define PCI_DEVICE_ID_VORTEX_GDT6x21RP1 0x115 /* GDT6121RP1/GDT6521RP1 */
+/* GDT_MPR, RP2 series, wide/ultra SCSI */
+#define PCI_DEVICE_ID_VORTEX_GDT6x17RP2 0x120 /* GDT6117RP2/GDT6517RP2 */
+#define PCI_DEVICE_ID_VORTEX_GDT6x27RP2 0x121 /* GDT6127RP2/GDT6527RP2 */
+#define PCI_DEVICE_ID_VORTEX_GDT6537RP2 0x122 /* GDT6537RP2 */
+#define PCI_DEVICE_ID_VORTEX_GDT6557RP2 0x123 /* GDT6557RP2 */
+/* GDT_MPR, RP2 series, narrow/ultra SCSI */
+#define PCI_DEVICE_ID_VORTEX_GDT6x11RP2 0x124 /* GDT6111RP2/GDT6511RP2 */
+#define PCI_DEVICE_ID_VORTEX_GDT6x21RP2 0x125 /* GDT6121RP2/GDT6521RP2 */
+#endif
+
+#ifndef PCI_DEVICE_ID_VORTEX_GDT6519RD
+/* GDT_MPR, Fibre Channel */
+#define PCI_DEVICE_ID_VORTEX_GDT6519RD 0x210 /* GDT6519RD */
+#define PCI_DEVICE_ID_VORTEX_GDT6529RD 0x211 /* GDT6529RD */
+#endif
+
+#ifndef PCI_DEVICE_ID_VORTEX_GDTMAXRP
+/* GDT_MPR, last device ID */
+#define PCI_DEVICE_ID_VORTEX_GDTMAXRP 0x2ff
+#endif
+
+/* limits */
+#define GDTH_SCRATCH 4096 /* 4KB scratch buffer */
+#define GDTH_MAXCMDS 124
+#define GDTH_MAXC_P_L 16 /* max. cmds per lun */
+#define MAXOFFSETS 128
+#define MAXHA 8
+#define MAXID 16
+#define MAXLUN 8
+#define MAXBUS 6
+#define MAX_HDRIVES 35 /* max. host drive count */
+#define MAX_EVENTS 100 /* event buffer count */
+#define MAXCYLS 1024
+#define HEADS 64
+#define SECS 32 /* mapping 64*32 */
+#define MEDHEADS 127
+#define MEDSECS 63 /* mapping 127*63 */
+#define BIGHEADS 255
+#define BIGSECS 63 /* mapping 255*63 */
+
+/* special command ptr. */
+#define UNUSED_CMND ((Scsi_Cmnd *)-1)
+#define INTERNAL_CMND ((Scsi_Cmnd *)-2)
+#define SCREEN_CMND ((Scsi_Cmnd *)-3)
+#define SPECIAL_SCP(p) (p==UNUSED_CMND || p==INTERNAL_CMND || p==SCREEN_CMND)
+
+/* device types */
+#define EMPTY_DTYP 0
+#define CACHE_DTYP 1
+#define RAW_DTYP 2
+#define SIOP_DTYP 3 /* the SCSI processor */
+
+/* controller services */
+#define SCSIRAWSERVICE 3
+#define CACHESERVICE 9
+#define SCREENSERVICE 11
+
+/* screenservice defines */
+#define MSG_INV_HANDLE -1 /* special message handle */
+#define MSGLEN 16 /* size of message text */
+#define MSG_SIZE 34 /* size of message structure */
+#define MSG_REQUEST 0 /* async. event: message */
+
+/* cacheservice defines */
+#define SECTOR_SIZE 0x200 /* always 512 bytes per sector */
+
+/* DPMEM constants */
+#define DPMEM_MAGIC 0xC0FFEE11
+#define IC_HEADER_BYTES 48
+#define IC_QUEUE_BYTES 4
+#define DPMEM_COMMAND_OFFSET IC_HEADER_BYTES+IC_QUEUE_BYTES*MAXOFFSETS
+
+/* service commands */
+#define GDT_INIT 0 /* service initialization */
+#define GDT_READ 1 /* read command */
+#define GDT_WRITE 2 /* write command */
+#define GDT_INFO 3 /* information about devices */
+#define GDT_FLUSH 4 /* flush dirty cache buffers */
+#define GDT_IOCTL 5 /* ioctl command */
+#define GDT_DEVTYPE 9 /* additional information */
+#define GDT_MOUNT 10 /* mount cache device */
+#define GDT_UNMOUNT 11 /* unmount cache device */
+#define GDT_SET_FEAT 12 /* set feat. (scatter/gather) */
+#define GDT_GET_FEAT 13 /* get features */
+#define GDT_RESERVE 14 /* reserve dev. to raw service */
+#define GDT_WRITE_THR 16 /* write through */
+#define GDT_EXT_INFO 18 /* extended info */
+#define GDT_RESET 19 /* controller reset */
+
+/* IOCTL command defines */
+#define SCSI_CHAN_CNT 5 /* subfunctions */
+#define GET_IOCHAN_DESC 0x5e
+#define L_CTRL_PATTERN 0x20000000L
+#define CACHE_INFO 4
+#define CACHE_CONFIG 5
+#define BOARD_INFO 0x28
+#define IO_CHANNEL 0x00020000L /* channels */
+#define INVALID_CHANNEL 0x0000ffffL
+
+/* IOCTLs */
+#define GDTIOCTL_MASK ('J'<<8)
+#define GDTIOCTL_GENERAL (GDTIOCTL_MASK | 0) /* general IOCTL */
+#define GDTIOCTL_DRVERS (GDTIOCTL_MASK | 1) /* get driver version */
+#define GDTIOCTL_CTRTYPE (GDTIOCTL_MASK | 2) /* get controller type */
+#define GDTIOCTL_CTRCNT (GDTIOCTL_MASK | 5) /* get controller count */
+#define GDTIOCTL_LOCKDRV (GDTIOCTL_MASK | 6) /* lock host drive */
+#define GDTIOCTL_LOCKCHN (GDTIOCTL_MASK | 7) /* lock channel */
+#define GDTIOCTL_EVENT (GDTIOCTL_MASK | 8) /* read controller events */
+
+/* service errors */
+#define S_OK 1 /* no error */
+#define S_BSY 7 /* controller busy */
+#define S_RAW_SCSI 12 /* raw serv.: target error */
+#define S_RAW_ILL 0xff /* raw serv.: illegal */
+
+/* timeout values */
+#define INIT_RETRIES 10000 /* 10000 * 1ms = 10s */
+#define INIT_TIMEOUT 100000 /* 1000 * 1ms = 1s */
+#define POLL_TIMEOUT 10000 /* 10000 * 1ms = 10s */
+
+/* priorities */
+#define DEFAULT_PRI 0x20
+#define IOCTL_PRI 0x10
+
+/* data directions */
+#define DATA_IN 0x01000000L /* data from target */
+#define DATA_OUT 0x00000000L /* data to target */
+
+/* BMIC registers (EISA controllers) */
+#define ID0REG 0x0c80 /* board ID */
+#define EINTENABREG 0x0c89 /* interrupt enable */
+#define SEMA0REG 0x0c8a /* command semaphore */
+#define SEMA1REG 0x0c8b /* status semaphore */
+#define LDOORREG 0x0c8d /* local doorbell */
+#define EDENABREG 0x0c8e /* EISA system doorbell enable */
+#define EDOORREG 0x0c8f /* EISA system doorbell */
+#define MAILBOXREG 0x0c90 /* mailbox reg. (16 bytes) */
+#define EISAREG 0x0cc0 /* EISA configuration */
+
+/* other defines */
+#define LINUX_OS 8 /* used for cache optim. */
+#define SCATTER_GATHER 1 /* s/g feature */
+#define GDTH_MAXSG 32 /* max. s/g elements */
+#define SECS32 0x1f /* round capacity */
+#define BIOS_ID_OFFS 0x10 /* offset contr. ID in ISABIOS */
+#define LOCALBOARD 0 /* board node always 0 */
+#define ASYNCINDEX 0 /* cmd index async. event */
+#define SPEZINDEX 1 /* cmd index unknown service */
+#define GDT_WR_THROUGH 0x100 /* WRITE_THROUGH supported */
+
+/* typedefs */
+
+#pragma pack(1)
+
+typedef struct {
+ char buffer[GDTH_SCRATCH]; /* scratch buffer */
+} gdth_scratch_str;
+
+/* screenservice message */
+typedef struct {
+ ulong msg_handle; /* message handle */
+ ulong msg_len; /* size of message */
+ ulong msg_alen; /* answer length */
+ unchar msg_answer; /* answer flag */
+ unchar msg_ext; /* more messages */
+ unchar msg_reserved[2];
+ char msg_text[MSGLEN+2]; /* the message text */
+} gdth_msg_str;
+
+/* get channel count IOCTL */
+typedef struct {
+ ulong channel_no; /* number of channel */
+ ulong drive_cnt; /* number of drives */
+ unchar siop_id; /* SCSI processor ID */
+ unchar siop_state; /* SCSI processor state */
+} gdth_getch_str;
+
+/* get raw channel count IOCTL (NEW!) */
+typedef struct {
+ ulong version; /* version of information (-1UL: newest) */
+ unchar list_entries; /* list entry count */
+ unchar first_chan; /* first channel number */
+ unchar last_chan; /* last channel number */
+ unchar chan_count; /* (R) channel count */
+ ulong list_offset; /* offset of list[0] */
+ struct {
+ unchar proc_id; /* processor id */
+ unchar proc_defect; /* defect ? */
+ unchar reserved[2];
+ } list[MAXBUS];
+} gdth_iochan_str;
+
+/* cache info/config IOCTL */
+typedef struct {
+ ulong version; /* firmware version */
+ ushort state; /* cache state (on/off) */
+ ushort strategy; /* cache strategy */
+ ushort write_back; /* write back state (on/off) */
+ ushort block_size; /* cache block size */
+} gdth_cpar_str;
+
+typedef struct {
+ ulong csize; /* cache size */
+ ulong read_cnt; /* read/write counter */
+ ulong write_cnt;
+ ulong tr_hits; /* hits */
+ ulong sec_hits;
+ ulong sec_miss; /* misses */
+} gdth_cstat_str;
+
+typedef struct {
+ gdth_cpar_str cpar;
+ gdth_cstat_str cstat;
+} gdth_cinfo_str;
+
+/* board info IOCTL */
+typedef struct {
+ ulong ser_no; /* serial no. */
+ unchar oem_id[2]; /* OEM ID */
+ ushort ep_flags; /* eprom flags */
+ ulong proc_id; /* processor ID */
+ ulong memsize; /* memory size (bytes) */
+ unchar mem_banks; /* memory banks */
+ unchar chan_type; /* channel type */
+ unchar chan_count; /* channel count */
+ unchar rdongle_pres; /* dongle present? */
+ ulong epr_fw_ver; /* (eprom) firmware version */
+ ulong upd_fw_ver; /* (update) firmware version */
+ ulong upd_revision; /* update revision */
+ char type_string[16]; /* controller name */
+ char raid_string[16]; /* RAID firmware name */
+ unchar update_pres; /* update present? */
+ unchar xor_pres; /* XOR engine present? */
+ unchar prom_type; /* ROM type (eprom/flash eprom) */
+ unchar prom_count; /* number of ROM devices */
+ ulong dup_pres; /* duplexing module present? */
+ ulong chan_pres; /* number of expansion channels */
+ ulong mem_pres; /* memory expansion installed? */
+ unchar ft_bus_system; /* fault bus supported? */
+ unchar subtype_valid; /* board_subtype valid? */
+ unchar board_subtype; /* controller subtype/hardware level */
+ unchar ramparity_pres; /* RAM parity check hardware present? */
+} gdth_binfo_str;
+
+/* scatter/gather element */
+typedef struct {
+ ulong sg_ptr; /* address */
+ ulong sg_len; /* length */
+} gdth_sg_str;
+
+/* command structure */
+typedef struct {
+ ulong BoardNode; /* board node (always 0) */
+ ulong CommandIndex; /* command number */
+ ushort OpCode; /* the command (READ,..) */
+ union {
+ struct {
+ ushort DeviceNo; /* number of cache drive */
+ ulong BlockNo; /* block number */
+ ulong BlockCnt; /* block count */
+ ulong DestAddr; /* dest. addr. (if s/g: -1) */
+ ulong sg_canz; /* s/g element count */
+ gdth_sg_str sg_lst[GDTH_MAXSG]; /* s/g list */
+ } cache; /* cache service cmd. str. */
+ struct {
+ ushort param_size; /* size of p_param buffer */
+ ulong subfunc; /* IOCTL function */
+ ulong channel; /* device */
+ ulong p_param; /* buffer */
+ } ioctl; /* IOCTL command structure */
+ struct {
+ ushort reserved;
+ ulong msg_handle; /* message handle */
+ ulong msg_addr; /* message buffer address */
+ } screen; /* screen service cmd. str. */
+ struct {
+ ushort reserved;
+ ulong direction; /* data direction */
+ ulong mdisc_time; /* disc. time (0: no timeout)*/
+ ulong mcon_time; /* connect time(0: no to.) */
+ ulong sdata; /* dest. addr. (if s/g: -1) */
+ ulong sdlen; /* data length (bytes) */
+ ulong clen; /* SCSI cmd. length(6,10,12) */
+ unchar cmd[12]; /* SCSI command */
+ unchar target; /* target ID */
+ unchar lun; /* LUN */
+ unchar bus; /* SCSI bus number */
+ unchar priority; /* only 0 used */
+ ulong sense_len; /* sense data length */
+ ulong sense_data; /* sense data addr. */
+ struct raw *link_p; /* linked cmds (not supp.) */
+ ulong sg_ranz; /* s/g element count */
+ gdth_sg_str sg_lst[GDTH_MAXSG]; /* s/g list */
+ } raw; /* raw service cmd. struct. */
+ } u;
+ /* additional variables */
+ unchar Service; /* controller service */
+ ushort Status; /* command result */
+ ulong Info; /* additional information */
+ Scsi_Cmnd *RequestBuffer; /* request buffer */
+} gdth_cmd_str;
+
+/* controller event structure */
+#define ES_ASYNC 1
+#define ES_DRIVER 2
+#define ES_TEST 3
+#define ES_SYNC 4
+typedef struct {
+ ushort size; /* size of structure */
+ union {
+ char stream[16];
+ struct {
+ ushort ionode;
+ ushort service;
+ ulong index;
+ } driver;
+ struct {
+ ushort ionode;
+ ushort service;
+ ushort status;
+ ulong info;
+ unchar scsi_coord[3];
+ } async;
+ struct {
+ ushort ionode;
+ ushort service;
+ ushort status;
+ ulong info;
+ ushort hostdrive;
+ unchar scsi_coord[3];
+ unchar sense_key;
+ } sync;
+ struct {
+ ulong l1, l2, l3, l4;
+ } test;
+ } eu;
+} gdth_evt_data;
+
+typedef struct {
+ ulong first_stamp;
+ ulong last_stamp;
+ ushort same_count;
+ ushort event_source;
+ ushort event_idx;
+ unchar application;
+ unchar reserved;
+ gdth_evt_data event_data;
+} gdth_evt_str;
+
+
+/* DPRAM structures */
+
+/* interface area ISA/PCI */
+typedef struct {
+ unchar S_Cmd_Indx; /* special command */
+ unchar volatile S_Status; /* status special command */
+ ushort reserved1;
+ ulong S_Info[4]; /* add. info special command */
+ unchar volatile Sema0; /* command semaphore */
+ unchar reserved2[3];
+ unchar Cmd_Index; /* command number */
+ unchar reserved3[3];
+ ushort volatile Status; /* command status */
+ ushort Service; /* service(for async.events) */
+ ulong Info[2]; /* additional info */
+ struct {
+ ushort offset; /* command offs. in the DPRAM*/
+ ushort serv_id; /* service */
+ } comm_queue[MAXOFFSETS]; /* command queue */
+ ulong bios_reserved[2];
+ unchar gdt_dpr_cmd[1]; /* commands */
+} gdt_dpr_if;
+
+/* SRAM structure PCI controllers */
+typedef struct {
+ ulong magic; /* controller ID from BIOS */
+ ushort need_deinit; /* switch betw. BIOS/driver */
+ unchar switch_support; /* see need_deinit */
+ unchar padding[9];
+ unchar os_used[16]; /* OS code per service */
+ unchar unused[28];
+ unchar fw_magic; /* contr. ID from firmware */
+} gdt_pci_sram;
+
+/* SRAM structure EISA controllers (but NOT GDT3000/3020) */
+typedef struct {
+ unchar os_used[16]; /* OS code per service */
+ ushort need_deinit; /* switch betw. BIOS/driver */
+ unchar switch_support; /* see need_deinit */
+ unchar padding;
+} gdt_eisa_sram;
+
+
+/* DPRAM ISA controllers */
+typedef struct {
+ union {
+ struct {
+ unchar bios_used[0x3c00-32]; /* 15KB - 32Bytes BIOS */
+ ulong magic; /* controller (EISA) ID */
+ ushort need_deinit; /* switch betw. BIOS/driver */
+ unchar switch_support; /* see need_deinit */
+ unchar padding[9];
+ unchar os_used[16]; /* OS code per service */
+ } dp_sram;
+ unchar bios_area[0x4000]; /* 16KB reserved for BIOS */
+ } bu;
+ union {
+ gdt_dpr_if ic; /* interface area */
+ unchar if_area[0x3000]; /* 12KB for interface */
+ } u;
+ struct {
+ unchar memlock; /* write protection DPRAM */
+ unchar event; /* release event */
+ unchar irqen; /* board interrupts enable */
+ unchar irqdel; /* acknowledge board int. */
+ unchar volatile Sema1; /* status semaphore */
+ unchar rq; /* IRQ/DRQ configuration */
+ } io;
+} gdt2_dpram_str;
+
+/* DPRAM PCI controllers */
+typedef struct {
+ union {
+ gdt_dpr_if ic; /* interface area */
+ unchar if_area[0xff0-sizeof(gdt_pci_sram)];
+ } u;
+ gdt_pci_sram gdt6sr; /* SRAM structure */
+ struct {
+ unchar unused0[1];
+ unchar volatile Sema1; /* command semaphore */
+ unchar unused1[3];
+ unchar irqen; /* board interrupts enable */
+ unchar unused2[2];
+ unchar event; /* release event */
+ unchar unused3[3];
+ unchar irqdel; /* acknowledge board int. */
+ unchar unused4[3];
+ } io;
+} gdt6_dpram_str;
+
+/* PLX register structure (new PCI controllers) */
+typedef struct {
+ unchar cfg_reg; /* DPRAM cfg.(2:below 1MB,0:anywhere)*/
+ unchar unused1[0x3f];
+ unchar volatile sema0_reg; /* command semaphore */
+ unchar volatile sema1_reg; /* status semaphore */
+ unchar unused2[2];
+ ushort volatile status; /* command status */
+ ushort service; /* service */
+ ulong info[2]; /* additional info */
+ unchar unused3[0x10];
+ unchar ldoor_reg; /* PCI to local doorbell */
+ unchar unused4[3];
+ unchar volatile edoor_reg; /* local to PCI doorbell */
+ unchar unused5[3];
+ unchar control0; /* control0 register(unused) */
+ unchar control1; /* board interrupts enable */
+ unchar unused6[0x16];
+} gdt6c_plx_regs;
+
+/* DPRAM new PCI controllers */
+typedef struct {
+ union {
+ gdt_dpr_if ic; /* interface area */
+ unchar if_area[0x4000-sizeof(gdt_pci_sram)];
+ } u;
+ gdt_pci_sram gdt6sr; /* SRAM structure */
+} gdt6c_dpram_str;
+
+/* i960 register structure (PCI MPR controllers) */
+typedef struct {
+ unchar unused1[16];
+ unchar volatile sema0_reg; /* command semaphore */
+ unchar unused2;
+ unchar volatile sema1_reg; /* status semaphore */
+ unchar unused3;
+ ushort volatile status; /* command status */
+ ushort service; /* service */
+ ulong info[2]; /* additional info */
+ unchar ldoor_reg; /* PCI to local doorbell */
+ unchar unused4[11];
+ unchar volatile edoor_reg; /* local to PCI doorbell */
+ unchar unused5[7];
+ unchar edoor_en_reg; /* board interrupts enable */
+ unchar unused6[27];
+ ulong unused7[1004]; /* size: 4 KB */
+} gdt6m_i960_regs;
+
+/* DPRAM PCI MPR controllers */
+typedef struct {
+ gdt6m_i960_regs i960r; /* 4KB i960 registers */
+ union {
+ gdt_dpr_if ic; /* interface area */
+ unchar if_area[0x3000-sizeof(gdt_pci_sram)];
+ } u;
+ gdt_pci_sram gdt6sr; /* SRAM structure */
+} gdt6m_dpram_str;
+
+
+/* PCI resources */
+typedef struct {
+ ushort device_id; /* device ID (0,..,9) */
+ unchar bus; /* PCI bus */
+ unchar device_fn; /* PCI device/function no. */
+ ulong dpmem; /* DPRAM address */
+ ulong io; /* IO address */
+ ulong io_mm; /* IO address mem. mapped */
+ ulong bios; /* BIOS address */
+ unchar irq; /* IRQ */
+} gdth_pci_str;
+
+
+/* controller information structure */
+typedef struct {
+ unchar bus_cnt; /* SCSI bus count */
+ unchar type; /* controller class */
+ ushort raw_feat; /* feat. raw service (s/g,..) */
+ ulong stype; /* controller subtype */
+ ushort cache_feat; /* feat. cache serv. (s/g,..) */
+ ushort bmic; /* BMIC address (EISA) */
+ void *brd; /* DPRAM address */
+ ulong brd_phys; /* slot number/BIOS address */
+ gdt6c_plx_regs *plx; /* PLX regs (new PCI contr.) */
+ gdth_cmd_str *pccb; /* address command structure */
+ gdth_scratch_str *pscratch;
+ unchar irq; /* IRQ */
+ unchar drq; /* DRQ (ISA controllers) */
+ ushort status; /* command status */
+ ulong info;
+ ulong info2; /* additional info */
+ Scsi_Cmnd *req_first; /* top of request queue */
+ struct {
+ unchar type; /* device type */
+ unchar heads; /* mapping */
+ unchar secs;
+ unchar lock; /* drive locked ? (hot plug) */
+ ushort hostdrive; /* host drive number */
+ ushort devtype; /* further information */
+ ulong size; /* capacity */
+ } id[MAXBUS][MAXID];
+ ushort cmd_cnt; /* command count in DPRAM */
+ ushort cmd_len; /* length of actual command */
+ ushort cmd_offs_dpmem; /* actual offset in DPRAM */
+ ushort ic_all_size; /* sizeof DPRAM interf. area */
+ unchar reserved;
+ unchar mode; /* information from /proc */
+ ushort param_size;
+ gdth_cpar_str cpar; /* controller cache par. */
+ char ctr_name[16]; /* controller name */
+} gdth_ha_str;
+
+/* structure for scsi_register(), SCSI bus != 0 */
+typedef struct {
+ ushort hanum;
+ ushort busnum;
+} gdth_num_str;
+
+/* structure for scsi_register() */
+typedef struct {
+ gdth_num_str numext; /* must be the first element */
+ gdth_ha_str haext;
+ gdth_cmd_str cmdext;
+ gdth_scratch_str dmaext;
+} gdth_ext_str;
+
+
+/* INQUIRY data format */
+typedef struct {
+ unchar type_qual;
+ unchar modif_rmb;
+ unchar version;
+ unchar resp_aenc;
+ unchar add_length;
+ unchar reserved1;
+ unchar reserved2;
+ unchar misc;
+ unchar vendor[8];
+ unchar product[16];
+ unchar revision[4];
+} gdth_inq_data;
+
+/* READ_CAPACITY data format */
+typedef struct {
+ ulong last_block_no;
+ ulong block_length;
+} gdth_rdcap_data;
+
+/* REQUEST_SENSE data format */
+typedef struct {
+ unchar errorcode;
+ unchar segno;
+ unchar key;
+ ulong info;
+ unchar add_length;
+ ulong cmd_info;
+ unchar adsc;
+ unchar adsq;
+ unchar fruc;
+ unchar key_spec[3];
+} gdth_sense_data;
+
+/* MODE_SENSE data format */
+typedef struct {
+ struct {
+ unchar data_length;
+ unchar med_type;
+ unchar dev_par;
+ unchar bd_length;
+ } hd;
+ struct {
+ unchar dens_code;
+ unchar block_count[3];
+ unchar reserved;
+ unchar block_length[3];
+ } bd;
+} gdth_modep_data;
+
+/* stack frame */
+typedef struct {
+ ulong b[10]; /* 32 bit compiler ! */
+} gdth_stackframe;
+
+#pragma pack()
+
+
+/* data structure for reserve drives */
+typedef struct {
+ unchar hanum;
+ unchar bus;
+ unchar id;
+} gdth_reserve_str;
+
+
+/* function prototyping */
+
+int gdth_detect(Scsi_Host_Template *);
+int gdth_release(struct Scsi_Host *);
+int gdth_command(Scsi_Cmnd *);
+int gdth_queuecommand(Scsi_Cmnd *,void (*done)(Scsi_Cmnd *));
+int gdth_abort(Scsi_Cmnd *);
+#if LINUX_VERSION_CODE >= 0x010346
+int gdth_reset(Scsi_Cmnd *, unsigned int reset_flags);
+#else
+int gdth_reset(Scsi_Cmnd *);
+#endif
+const char *gdth_info(struct Scsi_Host *);
+
+#if LINUX_VERSION_CODE >= 0x02015F
+int gdth_bios_param(Disk *,kdev_t,int *);
+extern struct proc_dir_entry proc_scsi_gdth;
+int gdth_proc_info(char *,char **,off_t,int,int,int);
+int gdth_eh_abort(Scsi_Cmnd *scp);
+int gdth_eh_device_reset(Scsi_Cmnd *scp);
+int gdth_eh_bus_reset(Scsi_Cmnd *scp);
+int gdth_eh_host_reset(Scsi_Cmnd *scp);
+#define GDTH { proc_dir: &proc_scsi_gdth, \
+ proc_info: gdth_proc_info, \
+ name: "GDT SCSI Disk Array Controller",\
+ detect: gdth_detect, \
+ release: gdth_release, \
+ info: gdth_info, \
+ command: gdth_command, \
+ queuecommand: gdth_queuecommand, \
+ eh_abort_handler: gdth_eh_abort, \
+ eh_device_reset_handler: gdth_eh_device_reset, \
+ eh_bus_reset_handler: gdth_eh_bus_reset, \
+ eh_host_reset_handler: gdth_eh_host_reset, \
+ abort: gdth_abort, \
+ reset: gdth_reset, \
+ bios_param: gdth_bios_param, \
+ can_queue: GDTH_MAXCMDS, \
+ this_id: -1, \
+ sg_tablesize: GDTH_MAXSG, \
+ cmd_per_lun: GDTH_MAXC_P_L, \
+ present: 0, \
+ unchecked_isa_dma: 1, \
+ use_clustering: ENABLE_CLUSTERING, \
+ use_new_eh_code: 1 /* use new error code */ }
+#elif LINUX_VERSION_CODE >= 0x010300
+int gdth_bios_param(Disk *,kdev_t,int *);
+extern struct proc_dir_entry proc_scsi_gdth;
+int gdth_proc_info(char *,char **,off_t,int,int,int);
+#define GDTH { NULL, NULL, \
+ &proc_scsi_gdth, \
+ gdth_proc_info, \
+ "GDT SCSI Disk Array Controller", \
+ gdth_detect, \
+ gdth_release, \
+ gdth_info, \
+ gdth_command, \
+ gdth_queuecommand, \
+ gdth_abort, \
+ gdth_reset, \
+ NULL, \
+ gdth_bios_param, \
+ GDTH_MAXCMDS, \
+ -1, \
+ GDTH_MAXSG, \
+ GDTH_MAXC_P_L, \
+ 0, \
+ 1, \
+ ENABLE_CLUSTERING}
+#else
+int gdth_bios_param(Disk *,int,int *);
+#define GDTH { NULL, NULL, \
+ "GDT SCSI Disk Array Controller", \
+ gdth_detect, \
+ gdth_release, \
+ gdth_info, \
+ gdth_command, \
+ gdth_queuecommand, \
+ gdth_abort, \
+ gdth_reset, \
+ NULL, \
+ gdth_bios_param, \
+ GDTH_MAXCMDS, \
+ -1, \
+ GDTH_MAXSG, \
+ GDTH_MAXC_P_L, \
+ 0, \
+ 1, \
+ ENABLE_CLUSTERING}
+#endif
+
+#endif
+
diff --git a/linux/src/drivers/scsi/gdth_ioctl.h b/linux/src/drivers/scsi/gdth_ioctl.h
new file mode 100644
index 0000000..bf15554
--- /dev/null
+++ b/linux/src/drivers/scsi/gdth_ioctl.h
@@ -0,0 +1,86 @@
+#ifndef _GDTH_IOCTL_H
+#define _GDTH_IOCTL_H
+
+/* gdth_ioctl.h
+ * $Id: gdth_ioctl.h,v 1.1 1999/04/26 05:54:37 tb Exp $
+ */
+
+/* IOCTLs */
+#define GDTIOCTL_MASK ('J'<<8)
+#define GDTIOCTL_GENERAL (GDTIOCTL_MASK | 0) /* general IOCTL */
+#define GDTIOCTL_DRVERS (GDTIOCTL_MASK | 1) /* get driver version */
+#define GDTIOCTL_CTRTYPE (GDTIOCTL_MASK | 2) /* get controller type */
+#define GDTIOCTL_OSVERS (GDTIOCTL_MASK | 3) /* get OS version */
+#define GDTIOCTL_CTRCNT (GDTIOCTL_MASK | 5) /* get controller count */
+#define GDTIOCTL_LOCKDRV (GDTIOCTL_MASK | 6) /* lock host drive */
+#define GDTIOCTL_LOCKCHN (GDTIOCTL_MASK | 7) /* lock channel */
+#define GDTIOCTL_EVENT (GDTIOCTL_MASK | 8) /* read controller events */
+
+#define GDTIOCTL_MAGIC 0x06030f07UL
+
+
+/* IOCTL structure (write) */
+typedef struct {
+ ulong magic; /* IOCTL magic */
+ ushort ioctl; /* IOCTL */
+ ushort ionode; /* controller number */
+ ushort service; /* controller service */
+ ushort timeout; /* timeout */
+ union {
+ struct {
+ unchar command[512]; /* controller command */
+ unchar data[1]; /* add. data */
+ } general;
+ struct {
+ unchar lock; /* lock/unlock */
+ unchar drive_cnt; /* drive count */
+ ushort drives[35]; /* drives */
+ } lockdrv;
+ struct {
+ unchar lock; /* lock/unlock */
+ unchar channel; /* channel */
+ } lockchn;
+ struct {
+ int erase; /* erase event ? */
+ int handle;
+ } event;
+ } iu;
+} gdth_iowr_str;
+
+/* IOCTL structure (read) */
+typedef struct {
+ ulong size; /* buffer size */
+ ulong status; /* IOCTL error code */
+ union {
+ struct {
+ unchar data[1]; /* data */
+ } general;
+ struct {
+ ushort version; /* driver version */
+ } drvers;
+ struct {
+ unchar type; /* controller type */
+ ushort info; /* slot etc. */
+ ushort oem_id; /* OEM ID */
+ ushort bios_ver; /* not used */
+ ushort access; /* not used */
+ ushort ext_type; /* extended type */
+ } ctrtype;
+ struct {
+ unchar version; /* OS version */
+ unchar subversion; /* OS subversion */
+ ushort revision; /* revision */
+ } osvers;
+ struct {
+ ushort count; /* controller count */
+ } ctrcnt;
+ struct {
+ int handle;
+ unchar evt[32]; /* event structure */
+ } event;
+ } iu;
+} gdth_iord_str;
+
+
+#endif
+
diff --git a/linux/src/drivers/scsi/gdth_proc.c b/linux/src/drivers/scsi/gdth_proc.c
new file mode 100644
index 0000000..8764d55
--- /dev/null
+++ b/linux/src/drivers/scsi/gdth_proc.c
@@ -0,0 +1,656 @@
+/* gdth_proc.c
+ * $Id: gdth_proc.c,v 1.1 1999/04/26 05:54:38 tb Exp $
+ */
+
+#include "gdth_ioctl.h"
+
+int gdth_proc_info(char *buffer,char **start,off_t offset,int length,
+ int hostno,int inout)
+{
+ int hanum,busnum,i;
+
+ TRACE2(("gdth_proc_info() length %d ha %d offs %d inout %d\n",
+ length,hostno,(int)offset,inout));
+
+ for (i=0; i<gdth_ctr_vcount; ++i) {
+ if (gdth_ctr_vtab[i]->host_no == hostno)
+ break;
+ }
+ if (i==gdth_ctr_vcount)
+ return(-EINVAL);
+
+ hanum = NUMDATA(gdth_ctr_vtab[i])->hanum;
+ busnum= NUMDATA(gdth_ctr_vtab[i])->busnum;
+
+ if (inout)
+ return(gdth_set_info(buffer,length,i,hanum,busnum));
+ else
+ return(gdth_get_info(buffer,start,offset,length,i,hanum,busnum));
+}
+
+static int gdth_set_info(char *buffer,int length,int vh,int hanum,int busnum)
+{
+ int ret_val;
+ Scsi_Cmnd scp;
+ Scsi_Device sdev;
+ gdth_iowr_str *piowr;
+
+ TRACE2(("gdth_set_info() ha %d bus %d\n",hanum,busnum));
+ piowr = (gdth_iowr_str *)buffer;
+
+ memset(&sdev,0,sizeof(Scsi_Device));
+ memset(&scp, 0,sizeof(Scsi_Cmnd));
+ sdev.host = gdth_ctr_vtab[vh];
+ sdev.id = sdev.host->this_id;
+ scp.cmd_len = 12;
+ scp.host = gdth_ctr_vtab[vh];
+ scp.target = sdev.host->this_id;
+ scp.device = &sdev;
+ scp.use_sg = 0;
+
+ if (length >= 4) {
+ if (strncmp(buffer,"gdth",4) == 0) {
+ buffer += 5;
+ length -= 5;
+ ret_val = gdth_set_asc_info( buffer, length, hanum, scp );
+ } else if (piowr->magic == GDTIOCTL_MAGIC) {
+ ret_val = gdth_set_bin_info( buffer, length, hanum, scp );
+ } else {
+ printk("GDT: Wrong signature: %6s\n",buffer);
+ ret_val = -EINVAL;
+ }
+ } else {
+ ret_val = -EINVAL;
+ }
+ return ret_val;
+}
+
+static int gdth_set_asc_info(char *buffer,int length,int hanum,Scsi_Cmnd scp)
+{
+ int orig_length, drive, wb_mode;
+ char cmnd[12];
+ int i, j, found;
+ gdth_ha_str *ha;
+ gdth_cmd_str gdtcmd;
+ gdth_cpar_str *pcpar;
+
+ TRACE2(("gdth_set_asc_info() ha %d\n",hanum));
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ memset(cmnd, 0,10);
+ orig_length = length + 5;
+ drive = -1;
+ wb_mode = 0;
+ found = FALSE;
+
+ if (length >= 5 && strncmp(buffer,"flush",5)==0) {
+ buffer += 6;
+ length -= 6;
+ if (length && *buffer>='0' && *buffer<='9') {
+ drive = (int)(*buffer-'0');
+ ++buffer; --length;
+ if (length && *buffer>='0' && *buffer<='9') {
+ drive = drive*10 + (int)(*buffer-'0');
+ ++buffer; --length;
+ }
+ printk("GDT: Flushing host drive %d .. ",drive);
+ } else {
+ printk("GDT: Flushing all host drives .. ");
+ }
+ for (i = 0; i < MAXBUS; ++i) {
+ for (j = 0; j < MAXID; ++j) {
+ if (ha->id[i][j].type == CACHE_DTYP) {
+ if (drive != -1 &&
+ ha->id[i][j].hostdrive != (ushort)drive)
+ continue;
+ found = TRUE;
+ gdtcmd.BoardNode = LOCALBOARD;
+ gdtcmd.Service = CACHESERVICE;
+ gdtcmd.OpCode = GDT_FLUSH;
+ gdtcmd.u.cache.DeviceNo = ha->id[i][j].hostdrive;
+ gdtcmd.u.cache.BlockNo = 1;
+ gdtcmd.u.cache.sg_canz = 0;
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ scp.request.rq_status = RQ_SCSI_BUSY;
+ scp.request.sem = &sem;
+ scp.SCp.this_residual = IOCTL_PRI;
+ scsi_do_cmd(&scp, cmnd, &gdtcmd,
+ sizeof(gdth_cmd_str), gdth_scsi_done,
+ 30*HZ, 1);
+ down(&sem);
+ }
+ }
+ }
+ }
+ if (!found)
+ printk("\nNo host drive found !\n");
+ else
+ printk("Done.\n");
+ return(orig_length);
+ }
+
+ if (length >= 7 && strncmp(buffer,"wbp_off",7)==0) {
+ buffer += 8;
+ length -= 8;
+ printk("GDT: Disabling write back permanently .. ");
+ wb_mode = 1;
+ } else if (length >= 6 && strncmp(buffer,"wbp_on",6)==0) {
+ buffer += 7;
+ length -= 7;
+ printk("GDT: Enabling write back permanently .. ");
+ wb_mode = 2;
+ } else if (length >= 6 && strncmp(buffer,"wb_off",6)==0) {
+ buffer += 7;
+ length -= 7;
+ printk("GDT: Disabling write back commands .. ");
+ if (ha->cache_feat & GDT_WR_THROUGH) {
+ gdth_write_through = TRUE;
+ printk("Done.\n");
+ } else {
+ printk("Not supported !\n");
+ }
+ return(orig_length);
+ } else if (length >= 5 && strncmp(buffer,"wb_on",5)==0) {
+ buffer += 6;
+ length -= 6;
+ printk("GDT: Enabling write back commands .. ");
+ gdth_write_through = FALSE;
+ printk("Done.\n");
+ return(orig_length);
+ }
+
+ if (wb_mode) {
+ pcpar = (gdth_cpar_str *)kmalloc( sizeof(gdth_cpar_str),
+ GFP_ATOMIC | GFP_DMA );
+ if (pcpar == NULL) {
+ TRACE2(("gdth_set_info(): Unable to allocate memory.\n"));
+ printk("Unable to allocate memory.\n");
+ return(-EINVAL);
+ }
+ memcpy( pcpar, &ha->cpar, sizeof(gdth_cpar_str) );
+ gdtcmd.BoardNode = LOCALBOARD;
+ gdtcmd.Service = CACHESERVICE;
+ gdtcmd.OpCode = GDT_IOCTL;
+ gdtcmd.u.ioctl.p_param = virt_to_bus(pcpar);
+ gdtcmd.u.ioctl.param_size = sizeof(gdth_cpar_str);
+ gdtcmd.u.ioctl.subfunc = CACHE_CONFIG;
+ gdtcmd.u.ioctl.channel = INVALID_CHANNEL;
+ pcpar->write_back = wb_mode==1 ? 0:1;
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ scp.request.rq_status = RQ_SCSI_BUSY;
+ scp.request.sem = &sem;
+ scp.SCp.this_residual = IOCTL_PRI;
+ scsi_do_cmd(&scp, cmnd, &gdtcmd, sizeof(gdth_cmd_str),
+ gdth_scsi_done, 30*HZ, 1);
+ down(&sem);
+ }
+ kfree( pcpar );
+ printk("Done.\n");
+ return(orig_length);
+ }
+
+ printk("GDT: Unknown command: %s Length: %d\n",buffer,length);
+ return(-EINVAL);
+}
+
+static int gdth_set_bin_info(char *buffer,int length,int hanum,Scsi_Cmnd scp)
+{
+ char cmnd[12];
+ int id;
+ unchar i, j, k, found;
+ gdth_ha_str *ha;
+ gdth_iowr_str *piowr;
+ gdth_iord_str *piord;
+ gdth_cmd_str *pcmd;
+ ulong *ppadd;
+ ulong add_size, flags;
+
+
+ TRACE2(("gdth_set_bin_info() ha %d\n",hanum));
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ memset(cmnd, 0,10);
+ piowr = (gdth_iowr_str *)buffer;
+ piord = NULL;
+ pcmd = NULL;
+
+ if (length < GDTOFFSOF(gdth_iowr_str,iu))
+ return(-EINVAL);
+
+ switch (piowr->ioctl) {
+ case GDTIOCTL_GENERAL:
+ if (length < GDTOFFSOF(gdth_iowr_str,iu.general.data[0]))
+ return(-EINVAL);
+ pcmd = (gdth_cmd_str *)piowr->iu.general.command;
+ pcmd->Service = piowr->service;
+ if (pcmd->OpCode == GDT_IOCTL) {
+ ppadd = &pcmd->u.ioctl.p_param;
+ add_size = pcmd->u.ioctl.param_size;
+ } else if (piowr->service == CACHESERVICE) {
+ add_size = pcmd->u.cache.BlockCnt * SECTOR_SIZE;
+ if (ha->cache_feat & SCATTER_GATHER) {
+ ppadd = &pcmd->u.cache.sg_lst[0].sg_ptr;
+ pcmd->u.cache.DestAddr = -1UL;
+ pcmd->u.cache.sg_lst[0].sg_len = add_size;
+ pcmd->u.cache.sg_canz = 1;
+ } else {
+ ppadd = &pcmd->u.cache.DestAddr;
+ pcmd->u.cache.sg_canz = 0;
+ }
+ } else if (piowr->service == SCSIRAWSERVICE) {
+ add_size = pcmd->u.raw.sdlen;
+ if (ha->raw_feat & SCATTER_GATHER) {
+ ppadd = &pcmd->u.raw.sg_lst[0].sg_ptr;
+ pcmd->u.raw.sdata = -1UL;
+ pcmd->u.raw.sg_lst[0].sg_len = add_size;
+ pcmd->u.raw.sg_ranz = 1;
+ } else {
+ ppadd = &pcmd->u.raw.sdata;
+ pcmd->u.raw.sg_ranz = 0;
+ }
+ } else {
+ return(-EINVAL);
+ }
+ id = gdth_ioctl_alloc( hanum, sizeof(gdth_iord_str) + add_size );
+ if (id == -1)
+ return(-EBUSY);
+ piord = (gdth_iord_str *)gdth_ioctl_tab[id-1][hanum];
+
+ piord->size = sizeof(gdth_iord_str) + add_size;
+ if (add_size > 0) {
+ memcpy(piord->iu.general.data, piowr->iu.general.data, add_size);
+ *ppadd = virt_to_bus(piord->iu.general.data);
+ }
+ /* do IOCTL */
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ scp.request.rq_status = RQ_SCSI_BUSY;
+ scp.request.sem = &sem;
+ scp.SCp.this_residual = IOCTL_PRI;
+ scsi_do_cmd(&scp, cmnd, pcmd,
+ sizeof(gdth_cmd_str), gdth_scsi_done,
+ piowr->timeout*HZ, 1);
+ down(&sem);
+ piord->status = (ulong)scp.SCp.Message;
+ }
+ break;
+
+ case GDTIOCTL_DRVERS:
+ id = gdth_ioctl_alloc( hanum, sizeof(gdth_iord_str) );
+ if (id == -1)
+ return(-EBUSY);
+ piord = (gdth_iord_str *)gdth_ioctl_tab[id-1][hanum];
+ piord->size = sizeof(gdth_iord_str);
+ piord->status = S_OK;
+ piord->iu.drvers.version = (GDTH_VERSION<<8) | GDTH_SUBVERSION;
+ break;
+
+ case GDTIOCTL_CTRTYPE:
+ id = gdth_ioctl_alloc( hanum, sizeof(gdth_iord_str) );
+ if (id == -1)
+ return(-EBUSY);
+ piord = (gdth_iord_str *)gdth_ioctl_tab[id-1][hanum];
+ piord->size = sizeof(gdth_iord_str);
+ piord->status = S_OK;
+ if (ha->type == GDT_ISA || ha->type == GDT_EISA) {
+ piord->iu.ctrtype.type = (unchar)((ha->stype>>20) - 0x10);
+ } else if (ha->type != GDT_PCIMPR) {
+ piord->iu.ctrtype.type = (unchar)((ha->stype<<8) + 6);
+ } else {
+ piord->iu.ctrtype.type = 0xfe;
+ piord->iu.ctrtype.ext_type = 0x6000 | ha->stype;
+ }
+ piord->iu.ctrtype.info = ha->brd_phys;
+ piord->iu.ctrtype.oem_id = (ushort)GDT3_ID;
+ break;
+
+ case GDTIOCTL_CTRCNT:
+ id = gdth_ioctl_alloc( hanum, sizeof(gdth_iord_str) );
+ if (id == -1)
+ return(-EBUSY);
+ piord = (gdth_iord_str *)gdth_ioctl_tab[id-1][hanum];
+ piord->size = sizeof(gdth_iord_str);
+ piord->status = S_OK;
+ piord->iu.ctrcnt.count = (ushort)gdth_ctr_count;
+ break;
+
+ case GDTIOCTL_OSVERS:
+ id = gdth_ioctl_alloc( hanum, sizeof(gdth_iord_str) );
+ if (id == -1)
+ return(-EBUSY);
+ piord = (gdth_iord_str *)gdth_ioctl_tab[id-1][hanum];
+ piord->size = sizeof(gdth_iord_str);
+ piord->status = S_OK;
+ piord->iu.osvers.version = (unchar)(LINUX_VERSION_CODE >> 16);
+ piord->iu.osvers.subversion = (unchar)(LINUX_VERSION_CODE >> 8);
+ piord->iu.osvers.revision = (ushort)(LINUX_VERSION_CODE & 0xff);
+ break;
+
+ case GDTIOCTL_LOCKDRV:
+ id = gdth_ioctl_alloc( hanum, sizeof(gdth_iord_str) );
+ if (id == -1)
+ return(-EBUSY);
+ piord = (gdth_iord_str *)gdth_ioctl_tab[id-1][hanum];
+ for (i = k = 0; i < piowr->iu.lockdrv.drive_cnt; ++i) {
+ found = FALSE;
+ for (j = 0; j < ha->bus_cnt; ++j) {
+ for (k = 0; k < MAXID; ++k) {
+ if (ha->id[j][k].type == CACHE_DTYP &&
+ ha->id[j][k].hostdrive == piowr->iu.lockdrv.drives[i]) {
+ found = TRUE;
+ break;
+ }
+ }
+ if (found)
+ break;
+ }
+ if (!found)
+ continue;
+
+ if (piowr->iu.lockdrv.lock) {
+ save_flags( flags );
+ cli();
+ ha->id[j][k].lock = 1;
+ restore_flags( flags );
+ gdth_wait_completion( hanum, j, k );
+ gdth_stop_timeout( hanum, j, k );
+ } else {
+ save_flags( flags );
+ cli();
+ ha->id[j][k].lock = 0;
+ restore_flags( flags );
+ gdth_start_timeout( hanum, j, k );
+ gdth_next( hanum );
+ }
+ }
+ piord->size = sizeof(gdth_iord_str);
+ piord->status = S_OK;
+ break;
+
+ case GDTIOCTL_LOCKCHN:
+ id = gdth_ioctl_alloc( hanum, sizeof(gdth_iord_str) );
+ if (id == -1)
+ return(-EBUSY);
+ for (k = 0, j = piowr->iu.lockchn.channel; k < MAXID; ++k) {
+ if (ha->id[j][k].type != RAW_DTYP)
+ continue;
+
+ if (piowr->iu.lockchn.lock) {
+ save_flags( flags );
+ cli();
+ ha->id[j][k].lock = 1;
+ restore_flags( flags );
+ gdth_wait_completion( hanum, j, k );
+ gdth_stop_timeout( hanum, j, k );
+ } else {
+ save_flags( flags );
+ cli();
+ ha->id[j][k].lock = 0;
+ restore_flags( flags );
+ gdth_start_timeout( hanum, j, k );
+ gdth_next( hanum );
+ }
+ }
+ piord = (gdth_iord_str *)gdth_ioctl_tab[id-1][hanum];
+ piord->size = sizeof(gdth_iord_str);
+ piord->status = S_OK;
+ break;
+
+ case GDTIOCTL_EVENT:
+ id = gdth_ioctl_alloc( hanum, sizeof(gdth_iord_str) );
+ if (id == -1)
+ return(-EBUSY);
+ piord = (gdth_iord_str *)gdth_ioctl_tab[id-1][hanum];
+ if (piowr->iu.event.erase == 0) {
+ piord->iu.event.handle = gdth_read_event( piowr->iu.event.handle,
+ (gdth_evt_str *)piord->iu.event.evt );
+ } else {
+ piord->iu.event.handle = piowr->iu.event.handle;
+ gdth_readapp_event( (unchar)piowr->iu.event.erase,
+ (gdth_evt_str *)piord->iu.event.evt );
+ }
+ piord->size = sizeof(gdth_iord_str);
+ piord->status = S_OK;
+ break;
+
+ default:
+ return(-EINVAL);
+ }
+ /* we return a buffer ID to detect the right buffer during READ-IOCTL */
+ return id;
+}
+
+static int gdth_get_info(char *buffer,char **start,off_t offset,
+ int length,int vh,int hanum,int busnum)
+{
+ int size = 0,len = 0;
+ off_t begin = 0,pos = 0;
+ gdth_ha_str *ha;
+ gdth_iord_str *piord;
+ int id;
+
+ TRACE2(("gdth_get_info() ha %d bus %d\n",hanum,busnum));
+ ha = HADATA(gdth_ctr_tab[hanum]);
+ id = length;
+
+ /* look for buffer ID in length */
+ if (id > 4) {
+#if LINUX_VERSION_CODE >= 0x020000
+ size = sprintf(buffer+len,
+ "%s SCSI Disk Array Controller\n",
+ ha->ctr_name);
+#else
+ size = sprintf(buffer+len,
+ "%s SCSI Disk Array Controller (SCSI Bus %d)\n",
+ ha->ctr_name,busnum);
+#endif
+ len += size; pos = begin + len;
+ size = sprintf(buffer+len,
+ "Firmware Version: %d.%2d\tDriver Version: %s\n",
+ (unchar)(ha->cpar.version>>8),
+ (unchar)(ha->cpar.version),GDTH_VERSION_STR);
+ len += size; pos = begin + len;
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+ if (pos > offset + length)
+ goto stop_output;
+
+ } else {
+ piord = (gdth_iord_str *)gdth_ioctl_tab[id-1][hanum];
+ if (piord == NULL)
+ goto stop_output;
+ length = piord->size;
+ memcpy(buffer+len, (char *)piord, length);
+ gdth_ioctl_free(hanum, id);
+ len += length; pos = begin + len;
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+ if (pos > offset + length)
+ goto stop_output;
+ }
+
+stop_output:
+ *start = buffer +(offset-begin);
+ len -= (offset-begin);
+ if (len > length)
+ len = length;
+ TRACE2(("get_info() len %d pos %d begin %d offset %d length %d size %d\n",
+ len,(int)pos,(int)begin,(int)offset,length,size));
+ return(len);
+}
+
+
+void gdth_scsi_done(Scsi_Cmnd *scp)
+{
+ TRACE2(("gdth_scsi_done()\n"));
+
+ scp->request.rq_status = RQ_SCSI_DONE;
+
+ if (scp->request.sem != NULL)
+ up(scp->request.sem);
+}
+
+static int gdth_ioctl_alloc(int hanum, ushort size)
+{
+ ulong flags;
+ int i;
+
+ if (size == 0)
+ return -1;
+
+ save_flags(flags);
+ cli();
+
+ for (i = 0; i < 4; ++i) {
+ if (gdth_ioctl_tab[i][hanum] == NULL) {
+ gdth_ioctl_tab[i][hanum] = kmalloc( size, GFP_ATOMIC | GFP_DMA );
+ break;
+ }
+ }
+
+ restore_flags(flags);
+ if (i == 4 || gdth_ioctl_tab[i][hanum] == NULL)
+ return -1;
+ return (i+1);
+}
+
+static void gdth_ioctl_free(int hanum, int idx)
+{
+ ulong flags;
+
+ save_flags(flags);
+ cli();
+
+ kfree( gdth_ioctl_tab[idx-1][hanum] );
+ gdth_ioctl_tab[idx-1][hanum] = NULL;
+
+ restore_flags(flags);
+}
+
+static void gdth_wait_completion(int hanum, int busnum, int id)
+{
+ ulong flags;
+ int i;
+ Scsi_Cmnd *scp;
+
+ save_flags(flags);
+ cli();
+
+ for (i = 0; i < GDTH_MAXCMDS; ++i) {
+ scp = gdth_cmd_tab[i][hanum].cmnd;
+#if LINUX_VERSION_CODE >= 0x020000
+ if (!SPECIAL_SCP(scp) && scp->target == (unchar)id &&
+ scp->channel == (unchar)busnum)
+#else
+ if (!SPECIAL_SCP(scp) && scp->target == (unchar)id &&
+ NUMDATA(scp->host)->busnum == (unchar)busnum)
+#endif
+ {
+ scp->SCp.have_data_in = 0;
+ restore_flags(flags);
+ while (!scp->SCp.have_data_in)
+ barrier();
+ scp->scsi_done(scp);
+ save_flags(flags);
+ cli();
+ }
+ }
+ restore_flags(flags);
+}
+
+static void gdth_stop_timeout(int hanum, int busnum, int id)
+{
+ ulong flags;
+ Scsi_Cmnd *scp;
+ gdth_ha_str *ha;
+
+ save_flags(flags);
+ cli();
+ ha = HADATA(gdth_ctr_tab[hanum]);
+
+ for (scp = ha->req_first; scp; scp = (Scsi_Cmnd *)scp->SCp.ptr) {
+#if LINUX_VERSION_CODE >= 0x020000
+ if (scp->target == (unchar)id &&
+ scp->channel == (unchar)busnum)
+#else
+ if (scp->target == (unchar)id &&
+ NUMDATA(scp->host)->busnum == (unchar)busnum)
+#endif
+ {
+ TRACE2(("gdth_stop_timeout(): update_timeout()\n"));
+ scp->SCp.buffers_residual = gdth_update_timeout(hanum, scp, 0);
+ }
+ }
+ restore_flags(flags);
+}
+
+static void gdth_start_timeout(int hanum, int busnum, int id)
+{
+ ulong flags;
+ Scsi_Cmnd *scp;
+ gdth_ha_str *ha;
+
+ save_flags(flags);
+ cli();
+ ha = HADATA(gdth_ctr_tab[hanum]);
+
+ for (scp = ha->req_first; scp; scp = (Scsi_Cmnd *)scp->SCp.ptr) {
+#if LINUX_VERSION_CODE >= 0x020000
+ if (scp->target == (unchar)id &&
+ scp->channel == (unchar)busnum)
+#else
+ if (scp->target == (unchar)id &&
+ NUMDATA(scp->host)->busnum == (unchar)busnum)
+#endif
+ {
+ TRACE2(("gdth_start_timeout(): update_timeout()\n"));
+ gdth_update_timeout(hanum, scp, scp->SCp.buffers_residual);
+ }
+ }
+ restore_flags(flags);
+}
+
+static int gdth_update_timeout(int hanum, Scsi_Cmnd *scp, int timeout)
+{
+ ulong flags;
+ int oldto;
+
+ save_flags(flags);
+ cli();
+ oldto = scp->timeout_per_command;
+ scp->timeout_per_command = timeout;
+
+#if LINUX_VERSION_CODE >= 0x02014B
+ if (timeout == 0) {
+ del_timer(&scp->eh_timeout);
+ scp->eh_timeout.data = (unsigned long) NULL;
+ scp->eh_timeout.expires = 0;
+ } else {
+ if (scp->eh_timeout.data != (unsigned long) NULL)
+ del_timer(&scp->eh_timeout);
+ scp->eh_timeout.data = (unsigned long) scp;
+ scp->eh_timeout.expires = jiffies + timeout;
+ add_timer(&scp->eh_timeout);
+ }
+#else
+ if (timeout > 0) {
+ if (timer_table[SCSI_TIMER].expires == 0) {
+ timer_table[SCSI_TIMER].expires = jiffies + timeout;
+ timer_active |= 1 << SCSI_TIMER;
+ } else {
+ if (jiffies + timeout < timer_table[SCSI_TIMER].expires)
+ timer_table[SCSI_TIMER].expires = jiffies + timeout;
+ }
+ }
+#endif
+
+ restore_flags(flags);
+ return oldto;
+}
+
diff --git a/linux/src/drivers/scsi/gdth_proc.h b/linux/src/drivers/scsi/gdth_proc.h
new file mode 100644
index 0000000..708b077
--- /dev/null
+++ b/linux/src/drivers/scsi/gdth_proc.h
@@ -0,0 +1,24 @@
+#ifndef _GDTH_PROC_H
+#define _GDTH_PROC_H
+
+/* gdth_proc.h
+ * $Id: gdth_proc.h,v 1.1 1999/04/26 05:54:39 tb Exp $
+ */
+
+static int gdth_set_info(char *buffer,int length,int vh,int hanum,int busnum);
+static int gdth_set_asc_info(char *buffer,int length,int hanum,Scsi_Cmnd scp);
+static int gdth_set_bin_info(char *buffer,int length,int hanum,Scsi_Cmnd scp);
+static int gdth_get_info(char *buffer,char **start,off_t offset,
+ int length,int vh,int hanum,int busnum);
+
+static int gdth_ioctl_alloc(int hanum, ushort size);
+static void gdth_ioctl_free(int hanum, int id);
+static void gdth_wait_completion(int hanum, int busnum, int id);
+static void gdth_stop_timeout(int hanum, int busnum, int id);
+static void gdth_start_timeout(int hanum, int busnum, int id);
+static int gdth_update_timeout(int hanum, Scsi_Cmnd *scp, int timeout);
+
+void gdth_scsi_done(Scsi_Cmnd *scp);
+
+#endif
+
diff --git a/linux/src/drivers/scsi/hosts.c b/linux/src/drivers/scsi/hosts.c
new file mode 100644
index 0000000..0f1bedd
--- /dev/null
+++ b/linux/src/drivers/scsi/hosts.c
@@ -0,0 +1,554 @@
+/*
+ * hosts.c Copyright (C) 1992 Drew Eckhardt
+ * Copyright (C) 1993, 1994, 1995 Eric Youngdale
+ *
+ * mid to lowlevel SCSI driver interface
+ * Initial versions: Drew Eckhardt
+ * Subsequent revisions: Eric Youngdale
+ *
+ * <drew@colorado.edu>
+ */
+
+
+/*
+ * This file contains the medium level SCSI
+ * host interface initialization, as well as the scsi_hosts array of SCSI
+ * hosts currently present in the system.
+ */
+
+/*
+ * Don't import our own symbols, as this would severely mess up our
+ * symbol tables.
+ */
+#define _SCSI_SYMS_VER_
+#define __NO_VERSION__
+#include <linux/module.h>
+
+#include <linux/config.h>
+#include <linux/blk.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/proc_fs.h>
+
+#include "scsi.h"
+
+#ifndef NULL
+#define NULL 0L
+#endif
+
+#define HOSTS_C
+
+#include "hosts.h"
+
+#ifdef CONFIG_A3000_SCSI
+#include "a3000.h"
+#endif
+
+#ifdef CONFIG_A2091_SCSI
+#include "a2091.h"
+#endif
+
+#ifdef CONFIG_GVP11_SCSI
+#include "gvp11.h"
+#endif
+
+#ifdef CONFIG_ATARI_SCSI
+#include "atari_scsi.h"
+#endif
+
+#ifdef CONFIG_SCSI_ADVANSYS
+#include "advansys.h"
+#endif
+
+#ifdef CONFIG_SCSI_AHA152X
+#include "aha152x.h"
+#endif
+
+#ifdef CONFIG_SCSI_AHA1542
+#include "aha1542.h"
+#endif
+
+#ifdef CONFIG_SCSI_AHA1740
+#include "aha1740.h"
+#endif
+
+#ifdef CONFIG_SCSI_AIC7XXX
+#include "aic7xxx.h"
+#endif
+
+#ifdef CONFIG_SCSI_BUSLOGIC
+#include "BusLogic.h"
+#endif
+
+#ifdef CONFIG_SCSI_EATA_DMA
+#include "eata_dma.h"
+#endif
+
+#ifdef CONFIG_SCSI_EATA_PIO
+#include "eata_pio.h"
+#endif
+
+#ifdef CONFIG_SCSI_U14_34F
+#include "u14-34f.h"
+#endif
+
+#ifdef CONFIG_SCSI_FUTURE_DOMAIN
+#include "fdomain.h"
+#endif
+
+#ifdef CONFIG_SCSI_GENERIC_NCR5380
+#include "g_NCR5380.h"
+#endif
+
+#ifdef CONFIG_SCSI_IN2000
+#include "in2000.h"
+#endif
+
+#ifdef CONFIG_SCSI_PAS16
+#include "pas16.h"
+#endif
+
+#ifdef CONFIG_SCSI_QLOGIC_FAS
+#include "qlogicfas.h"
+#endif
+
+#ifdef CONFIG_SCSI_QLOGIC_ISP
+#include "qlogicisp.h"
+#endif
+
+#ifdef CONFIG_SCSI_SEAGATE
+#include "seagate.h"
+#endif
+
+#ifdef CONFIG_SCSI_T128
+#include "t128.h"
+#endif
+
+#ifdef CONFIG_SCSI_DTC3280
+#include "dtc.h"
+#endif
+
+#ifdef CONFIG_SCSI_NCR53C7xx
+#include "53c7,8xx.h"
+#endif
+
+#ifdef CONFIG_SCSI_SYM53C8XX
+#include "sym53c8xx.h"
+#endif
+
+#ifdef CONFIG_SCSI_NCR53C8XX
+#include "ncr53c8xx.h"
+#endif
+
+#ifdef CONFIG_SCSI_ULTRASTOR
+#include "ultrastor.h"
+#endif
+
+#ifdef CONFIG_SCSI_7000FASST
+#include "wd7000.h"
+#endif
+
+#ifdef CONFIG_SCSI_EATA
+#include "eata.h"
+#endif
+
+#ifdef CONFIG_SCSI_NCR53C406A
+#include "NCR53c406a.h"
+#endif
+
+#ifdef CONFIG_SCSI_DC390T
+#include "dc390.h"
+#endif
+
+#ifdef CONFIG_SCSI_AM53C974
+#include "AM53C974.h"
+#endif
+
+#ifdef CONFIG_SCSI_MEGARAID
+#include "megaraid.h"
+#endif
+
+#ifdef CONFIG_SCSI_PPA
+#include "ppa.h"
+#endif
+
+#ifdef CONFIG_SCSI_SUNESP
+#include "esp.h"
+#endif
+
+#ifdef CONFIG_BLK_DEV_IDESCSI
+#include "ide-scsi.h"
+#endif
+
+#ifdef CONFIG_SCSI_GDTH
+#include "gdth.h"
+#endif
+
+#ifdef CONFIG_SCSI_DEBUG
+#include "scsi_debug.h"
+#endif
+
+
+/*
+static const char RCSid[] = "$Header: cvs/gnumach/linux/src/drivers/scsi/Attic/hosts.c,v 1.1 1999/04/26 05:54:40 tb Exp $";
+*/
+
+/*
+ * The scsi host entries should be in the order you wish the
+ * cards to be detected. A driver may appear more than once IFF
+ * it can deal with being detected (and therefore initialized)
+ * with more than one simultaneous host number, can handle being
+ * reentrant, etc.
+ *
+ * They may appear in any order, as each SCSI host is told which host
+ * number it is during detection.
+ */
+
+/* This is a placeholder for controllers that are not configured into
+ * the system - we do this to ensure that the controller numbering is
+ * always consistent, no matter how the kernel is configured. */
+
+#define NO_CONTROLLER {NULL, NULL, NULL, NULL, NULL, NULL, NULL, \
+ NULL, NULL, 0, 0, 0, 0, 0, 0}
+
+/*
+ * When figure is run, we don't want to link to any object code. Since
+ * the macro for each host will contain function pointers, we cannot
+ * use it and instead must use a "blank" that does no such
+ * idiocy.
+ */
+
+Scsi_Host_Template * scsi_hosts = NULL;
+
+static Scsi_Host_Template builtin_scsi_hosts[] =
+{
+#ifdef CONFIG_AMIGA
+#ifdef CONFIG_A3000_SCSI
+ A3000_SCSI,
+#endif
+#ifdef CONFIG_A2091_SCSI
+ A2091_SCSI,
+#endif
+#ifdef CONFIG_GVP11_SCSI
+ GVP11_SCSI,
+#endif
+#endif
+
+#ifdef CONFIG_ATARI
+#ifdef CONFIG_ATARI_SCSI
+ ATARI_SCSI,
+#endif
+#endif
+
+#ifdef CONFIG_SCSI_ADVANSYS
+ ADVANSYS,
+#endif
+/* BusLogic must come before aha1542.c */
+#ifdef CONFIG_SCSI_BUSLOGIC
+ BUSLOGIC,
+#endif
+#ifdef CONFIG_SCSI_U14_34F
+ ULTRASTOR_14_34F,
+#endif
+#ifdef CONFIG_SCSI_ULTRASTOR
+ ULTRASTOR_14F,
+#endif
+#ifdef CONFIG_SCSI_AHA152X
+ AHA152X,
+#endif
+#ifdef CONFIG_SCSI_AHA1542
+ AHA1542,
+#endif
+#ifdef CONFIG_SCSI_AHA1740
+ AHA1740,
+#endif
+#ifdef CONFIG_SCSI_AIC7XXX
+ AIC7XXX,
+#endif
+#ifdef CONFIG_SCSI_FUTURE_DOMAIN
+ FDOMAIN_16X0,
+#endif
+#ifdef CONFIG_SCSI_IN2000
+ IN2000,
+#endif
+#ifdef CONFIG_SCSI_GENERIC_NCR5380
+ GENERIC_NCR5380,
+#endif
+#ifdef CONFIG_SCSI_NCR53C406A /* 53C406A should come before QLOGIC */
+ NCR53c406a,
+#endif
+#ifdef CONFIG_SCSI_QLOGIC_FAS
+ QLOGICFAS,
+#endif
+#ifdef CONFIG_SCSI_QLOGIC_ISP
+ QLOGICISP,
+#endif
+#ifdef CONFIG_SCSI_PAS16
+ MV_PAS16,
+#endif
+#ifdef CONFIG_SCSI_SEAGATE
+ SEAGATE_ST0X,
+#endif
+#ifdef CONFIG_SCSI_T128
+ TRANTOR_T128,
+#endif
+#ifdef CONFIG_SCSI_DTC3280
+ DTC3x80,
+#endif
+#ifdef CONFIG_SCSI_DC390T
+ DC390_T,
+#endif
+#ifdef CONFIG_SCSI_NCR53C7xx
+ NCR53c7xx,
+#endif
+#ifdef CONFIG_SCSI_SYM53C8XX
+ SYM53C8XX,
+#endif
+#ifdef CONFIG_SCSI_NCR53C8XX
+ NCR53C8XX,
+#endif
+#ifdef CONFIG_SCSI_EATA_DMA
+ EATA_DMA,
+#endif
+#ifdef CONFIG_SCSI_EATA_PIO
+ EATA_PIO,
+#endif
+#ifdef CONFIG_SCSI_7000FASST
+ WD7000,
+#endif
+#ifdef CONFIG_SCSI_EATA
+ EATA,
+#endif
+#ifdef CONFIG_SCSI_AM53C974
+ AM53C974,
+#endif
+#ifdef CONFIG_SCSI_MEGARAID
+ MEGARAID,
+#endif
+#ifdef CONFIG_SCSI_PPA
+ PPA,
+#endif
+#ifdef CONFIG_SCSI_SUNESP
+ SCSI_SPARC_ESP,
+#endif
+#ifdef CONFIG_SCSI_GDTH
+ GDTH,
+#endif
+#ifdef CONFIG_BLK_DEV_IDESCSI
+ IDESCSI,
+#endif
+#ifdef CONFIG_SCSI_DEBUG
+ SCSI_DEBUG,
+#endif
+};
+
+#define MAX_SCSI_HOSTS (sizeof(builtin_scsi_hosts) / sizeof(Scsi_Host_Template))
+
+
+/*
+ * Our semaphores and timeout counters, where size depends on
+ * MAX_SCSI_HOSTS here.
+ */
+
+struct Scsi_Host * scsi_hostlist = NULL;
+struct Scsi_Device_Template * scsi_devicelist = NULL;
+
+int max_scsi_hosts = 0;
+int next_scsi_host = 0;
+
+void
+scsi_unregister(struct Scsi_Host * sh){
+ struct Scsi_Host * shpnt;
+
+ if(scsi_hostlist == sh)
+ scsi_hostlist = sh->next;
+ else {
+ shpnt = scsi_hostlist;
+ while(shpnt->next != sh) shpnt = shpnt->next;
+ shpnt->next = shpnt->next->next;
+ }
+
+ /* If we are removing the last host registered, it is safe to reuse
+ * its host number (this avoids "holes" at boot time) (DB)
+ * It is also safe to reuse those of numbers directly below which have
+ * been released earlier (to avoid some holes in numbering).
+ */
+ if(sh->host_no == max_scsi_hosts - 1) {
+ while(--max_scsi_hosts >= next_scsi_host) {
+ shpnt = scsi_hostlist;
+ while(shpnt && shpnt->host_no != max_scsi_hosts - 1)
+ shpnt = shpnt->next;
+ if(shpnt)
+ break;
+ }
+ }
+ next_scsi_host--;
+ scsi_init_free((char *) sh, sizeof(struct Scsi_Host) + sh->extra_bytes);
+}
+
+/* We call this when we come across a new host adapter. We only do this
+ * once we are 100% sure that we want to use this host adapter - it is a
+ * pain to reverse this, so we try to avoid it
+ */
+
+struct Scsi_Host * scsi_register(Scsi_Host_Template * tpnt, int j){
+ struct Scsi_Host * retval, *shpnt;
+ retval = (struct Scsi_Host *)scsi_init_malloc(sizeof(struct Scsi_Host) + j,
+ (tpnt->unchecked_isa_dma && j ? GFP_DMA : 0) | GFP_ATOMIC);
+ retval->host_busy = 0;
+ retval->block = NULL;
+ retval->wish_block = 0;
+ if(j > 0xffff) panic("Too many extra bytes requested\n");
+ retval->extra_bytes = j;
+ retval->loaded_as_module = scsi_loadable_module_flag;
+ retval->host_no = max_scsi_hosts++; /* never reuse host_no (DB) */
+ next_scsi_host++;
+ retval->host_queue = NULL;
+ retval->host_wait = NULL;
+ retval->last_reset = 0;
+ retval->irq = 0;
+ retval->dma_channel = 0xff;
+
+ /* These three are default values which can be overridden */
+ retval->max_channel = 0;
+ retval->max_id = 8;
+ retval->max_lun = 8;
+
+ retval->unique_id = 0;
+ retval->io_port = 0;
+ retval->hostt = tpnt;
+ retval->next = NULL;
+#ifdef DEBUG
+ printk("Register %x %x: %d\n", (int)retval, (int)retval->hostt, j);
+#endif
+
+ /* The next six are the default values which can be overridden
+ * if need be */
+ retval->this_id = tpnt->this_id;
+ retval->can_queue = tpnt->can_queue;
+ retval->sg_tablesize = tpnt->sg_tablesize;
+ retval->cmd_per_lun = tpnt->cmd_per_lun;
+ retval->unchecked_isa_dma = tpnt->unchecked_isa_dma;
+ retval->use_clustering = tpnt->use_clustering;
+
+ retval->select_queue_depths = NULL;
+
+ if(!scsi_hostlist)
+ scsi_hostlist = retval;
+ else
+ {
+ shpnt = scsi_hostlist;
+ while(shpnt->next) shpnt = shpnt->next;
+ shpnt->next = retval;
+ }
+
+ return retval;
+}
+
+int
+scsi_register_device(struct Scsi_Device_Template * sdpnt)
+{
+ if(sdpnt->next) panic("Device already registered");
+ sdpnt->next = scsi_devicelist;
+ scsi_devicelist = sdpnt;
+ return 0;
+}
+
+unsigned int scsi_init()
+{
+ static int called = 0;
+ int i, pcount;
+ Scsi_Host_Template * tpnt;
+ struct Scsi_Host * shpnt;
+ const char * name;
+
+ if(called) return 0;
+
+ called = 1;
+ for (tpnt = &builtin_scsi_hosts[0], i = 0; i < MAX_SCSI_HOSTS; ++i, tpnt++)
+ {
+ /*
+ * Initialize our semaphores. -1 is interpreted to mean
+ * "inactive" - where as 0 will indicate a time out condition.
+ */
+ printk("\rprobing scsi %d/%d: %s \e[K", tpnt-builtin_scsi_hosts, MAX_SCSI_HOSTS, tpnt->name);
+
+ pcount = next_scsi_host;
+ if ((tpnt->detect) &&
+ (tpnt->present =
+ tpnt->detect(tpnt)))
+ {
+ /* The only time this should come up is when people use
+ * some kind of patched driver of some kind or another. */
+ if(pcount == next_scsi_host) {
+ if(tpnt->present > 1)
+ panic("Failure to register low-level scsi driver");
+ /* The low-level driver failed to register a driver. We
+ * can do this now. */
+ scsi_register(tpnt,0);
+ }
+ tpnt->next = scsi_hosts;
+ scsi_hosts = tpnt;
+
+ /* Add the driver to /proc/scsi */
+#if CONFIG_PROC_FS
+ build_proc_dir_entries(tpnt);
+#endif
+ }
+ }
+ printk("\ndone\n");
+
+ for(shpnt=scsi_hostlist; shpnt; shpnt = shpnt->next)
+ {
+ if(shpnt->hostt->info)
+ name = shpnt->hostt->info(shpnt);
+ else
+ name = shpnt->hostt->name;
+ printk ("scsi%d : %s\n", /* And print a little message */
+ shpnt->host_no, name);
+ }
+
+ printk ("scsi : %d host%s.\n", next_scsi_host,
+ (next_scsi_host == 1) ? "" : "s");
+
+ scsi_make_blocked_list();
+
+ /* Now attach the high level drivers */
+#ifdef CONFIG_BLK_DEV_SD
+ scsi_register_device(&sd_template);
+#endif
+#ifdef CONFIG_BLK_DEV_SR
+ scsi_register_device(&sr_template);
+#endif
+#ifdef CONFIG_CHR_DEV_ST
+ scsi_register_device(&st_template);
+#endif
+#ifdef CONFIG_CHR_DEV_SG
+ scsi_register_device(&sg_template);
+#endif
+
+#if 0
+ max_scsi_hosts = next_scsi_host;
+#endif
+ return 0;
+}
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/hosts.h b/linux/src/drivers/scsi/hosts.h
new file mode 100644
index 0000000..8f3f4e0
--- /dev/null
+++ b/linux/src/drivers/scsi/hosts.h
@@ -0,0 +1,405 @@
+/*
+ * hosts.h Copyright (C) 1992 Drew Eckhardt
+ * Copyright (C) 1993, 1994, 1995 Eric Youngdale
+ *
+ * mid to low-level SCSI driver interface header
+ * Initial versions: Drew Eckhardt
+ * Subsequent revisions: Eric Youngdale
+ *
+ * <drew@colorado.edu>
+ *
+ * Modified by Eric Youngdale eric@aib.com to
+ * add scatter-gather, multiple outstanding request, and other
+ * enhancements.
+ *
+ * Further modified by Eric Youngdale to support multiple host adapters
+ * of the same type.
+ */
+
+#ifndef _HOSTS_H
+#define _HOSTS_H
+
+/*
+ $Header: cvs/gnumach/linux/src/drivers/scsi/Attic/hosts.h,v 1.1 1999/04/26 05:54:41 tb Exp $
+*/
+
+#include <linux/proc_fs.h>
+
+/* It is senseless to set SG_ALL any higher than this - the performance
+ * does not get any better, and it wastes memory
+ */
+#define SG_NONE 0
+#define SG_ALL 0xff
+
+#define DISABLE_CLUSTERING 0
+#define ENABLE_CLUSTERING 1
+
+/* The various choices mean:
+ * NONE: Self evident. Host adapter is not capable of scatter-gather.
+ * ALL: Means that the host adapter module can do scatter-gather,
+ * and that there is no limit to the size of the table to which
+ * we scatter/gather data.
+ * Anything else: Indicates the maximum number of chains that can be
+ * used in one scatter-gather request.
+ */
+
+/*
+ * The Scsi_Host_Template type has all that is needed to interface with a SCSI
+ * host in a device independent matter. There is one entry for each different
+ * type of host adapter that is supported on the system.
+ */
+
+typedef struct scsi_disk Disk;
+
+typedef struct SHT
+{
+
+ /* Used with loadable modules so we can construct a linked list. */
+ struct SHT * next;
+
+ /* Used with loadable modules so that we know when it is safe to unload */
+ long * usage_count;
+
+ /* The pointer to the /proc/scsi directory entry */
+ struct proc_dir_entry *proc_dir;
+
+ /* proc-fs info function.
+ * Can be used to export driver statistics and other infos to the world
+ * outside the kernel ie. userspace and it also provides an interface
+ * to feed the driver with information. Check eata_dma_proc.c for reference
+ */
+ int (*proc_info)(char *, char **, off_t, int, int, int);
+
+ /*
+ * The name pointer is a pointer to the name of the SCSI
+ * device detected.
+ */
+ const char *name;
+
+ /*
+ * The detect function shall return non zero on detection,
+ * indicating the number of host adapters of this particular
+ * type were found. It should also
+ * initialize all data necessary for this particular
+ * SCSI driver. It is passed the host number, so this host
+ * knows where the first entry is in the scsi_hosts[] array.
+ *
+ * Note that the detect routine MUST not call any of the mid level
+ * functions to queue commands because things are not guaranteed
+ * to be set up yet. The detect routine can send commands to
+ * the host adapter as long as the program control will not be
+ * passed to scsi.c in the processing of the command. Note
+ * especially that scsi_malloc/scsi_free must not be called.
+ */
+ int (* detect)(struct SHT *);
+
+ /* Used with loadable modules to unload the host structures. Note:
+ * there is a default action built into the modules code which may
+ * be sufficient for most host adapters. Thus you may not have to supply
+ * this at all.
+ */
+ int (*release)(struct Scsi_Host *);
+
+ /*
+ * The info function will return whatever useful
+ * information the developer sees fit. If not provided, then
+ * the name field will be used instead.
+ */
+ const char *(* info)(struct Scsi_Host *);
+
+ /*
+ * The command function takes a target, a command (this is a SCSI
+ * command formatted as per the SCSI spec, nothing strange), a
+ * data buffer pointer, and data buffer length pointer. The return
+ * is a status int, bit fielded as follows :
+ * Byte What
+ * 0 SCSI status code
+ * 1 SCSI 1 byte message
+ * 2 host error return.
+ * 3 mid level error return
+ */
+ int (* command)(Scsi_Cmnd *);
+
+ /*
+ * The QueueCommand function works in a similar manner
+ * to the command function. It takes an additional parameter,
+ * void (* done)(int host, int code) which is passed the host
+ * # and exit result when the command is complete.
+ * Host number is the POSITION IN THE hosts array of THIS
+ * host adapter.
+ */
+ int (* queuecommand)(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+
+ /*
+ * Since the mid level driver handles time outs, etc, we want to
+ * be able to abort the current command. Abort returns 0 if the
+ * abortion was successful. The field SCpnt->abort reason
+ * can be filled in with the appropriate reason why we wanted
+ * the abort in the first place, and this will be used
+ * in the mid-level code instead of the host_byte().
+ * If non-zero, the code passed to it
+ * will be used as the return code, otherwise
+ * DID_ABORT should be returned.
+ *
+ * Note that the scsi driver should "clean up" after itself,
+ * resetting the bus, etc. if necessary.
+ */
+ int (* abort)(Scsi_Cmnd *);
+
+ /*
+ * The reset function will reset the SCSI bus. Any executing
+ * commands should fail with a DID_RESET in the host byte.
+ * The Scsi_Cmnd is passed so that the reset routine can figure
+ * out which host adapter should be reset, and also which command
+ * within the command block was responsible for the reset in
+ * the first place. Some hosts do not implement a reset function,
+ * and these hosts must call scsi_request_sense(SCpnt) to keep
+ * the command alive.
+ */
+ int (* reset)(Scsi_Cmnd *, unsigned int);
+
+ /*
+ * This function is used to select synchronous communications,
+ * which will result in a higher data throughput. Not implemented
+ * yet.
+ */
+ int (* slave_attach)(int, int);
+
+ /*
+ * This function determines the bios parameters for a given
+ * harddisk. These tend to be numbers that are made up by
+ * the host adapter. Parameters:
+ * size, device number, list (heads, sectors, cylinders)
+ */
+ int (* bios_param)(Disk *, kdev_t, int []);
+
+ /*
+ * This determines if we will use a non-interrupt driven
+ * or an interrupt driven scheme, It is set to the maximum number
+ * of simultaneous commands a given host adapter will accept.
+ */
+ int can_queue;
+
+ /*
+ * In many instances, especially where disconnect / reconnect are
+ * supported, our host also has an ID on the SCSI bus. If this is
+ * the case, then it must be reserved. Please set this_id to -1 if
+ * your setup is in single initiator mode, and the host lacks an
+ * ID.
+ */
+ int this_id;
+
+ /*
+ * This determines the degree to which the host adapter is capable
+ * of scatter-gather.
+ */
+ short unsigned int sg_tablesize;
+
+ /*
+ * True if this host adapter can make good use of linked commands.
+ * This will allow more than one command to be queued to a given
+ * unit on a given host. Set this to the maximum number of command
+ * blocks to be provided for each device. Set this to 1 for one
+ * command block per lun, 2 for two, etc. Do not set this to 0.
+ * You should make sure that the host adapter will do the right thing
+ * before you try setting this above 1.
+ */
+ short cmd_per_lun;
+
+ /*
+ * present contains counter indicating how many boards of this
+ * type were found when we did the scan.
+ */
+ unsigned char present;
+
+ /*
+ * true if this host adapter uses unchecked DMA onto an ISA bus.
+ */
+ unsigned unchecked_isa_dma:1;
+
+ /*
+ * true if this host adapter can make good use of clustering.
+ * I originally thought that if the tablesize was large that it
+ * was a waste of CPU cycles to prepare a cluster list, but
+ * it works out that the Buslogic is faster if you use a smaller
+ * number of segments (i.e. use clustering). I guess it is
+ * inefficient.
+ */
+ unsigned use_clustering:1;
+
+} Scsi_Host_Template;
+
+/*
+ * The scsi_hosts array is the array containing the data for all
+ * possible <supported> scsi hosts. This is similar to the
+ * Scsi_Host_Template, except that we have one entry for each
+ * actual physical host adapter on the system, stored as a linked
+ * list. Note that if there are 2 aha1542 boards, then there will
+ * be two Scsi_Host entries, but only 1 Scsi_Host_Template entry.
+ */
+
+struct Scsi_Host
+{
+ struct Scsi_Host * next;
+ unsigned short extra_bytes;
+ volatile unsigned char host_busy;
+ char host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
+ unsigned long last_reset;
+ struct wait_queue *host_wait;
+ Scsi_Cmnd *host_queue;
+ Scsi_Host_Template * hostt;
+
+ /*
+ * These three parameters can be used to allow for wide scsi,
+ * and for host adapters that support multiple busses
+ * The first two should be set to 1 more than the actual max id
+ * or lun (i.e. 8 for normal systems).
+ */
+ unsigned int max_id;
+ unsigned int max_lun;
+ unsigned int max_channel;
+
+ /*
+ * Pointer to a circularly linked list - this indicates the hosts
+ * that should be locked out of performing I/O while we have an active
+ * command on this host.
+ */
+ struct Scsi_Host * block;
+ unsigned wish_block:1;
+
+ /* These parameters should be set by the detect routine */
+ unsigned char *base;
+ unsigned int io_port;
+ unsigned char n_io_port;
+ unsigned char irq;
+ unsigned char dma_channel;
+
+ /*
+ * This is a unique identifier that must be assigned so that we
+ * have some way of identifying each detected host adapter properly
+ * and uniquely. For hosts that do not support more than one card
+ * in the system at one time, this does not need to be set. It is
+ * initialized to 0 in scsi_register.
+ */
+ unsigned int unique_id;
+
+ /*
+ * The rest can be copied from the template, or specifically
+ * initialized, as required.
+ */
+
+ int this_id;
+ int can_queue;
+ short cmd_per_lun;
+ short unsigned int sg_tablesize;
+ unsigned unchecked_isa_dma:1;
+ unsigned use_clustering:1;
+ /*
+ * True if this host was loaded as a loadable module
+ */
+ unsigned loaded_as_module:1;
+
+ void (*select_queue_depths)(struct Scsi_Host *, Scsi_Device *);
+
+ unsigned long hostdata[0]; /* Used for storage of host specific stuff */
+};
+
+extern struct Scsi_Host * scsi_hostlist;
+extern struct Scsi_Device_Template * scsi_devicelist;
+
+extern Scsi_Host_Template * scsi_hosts;
+
+extern void build_proc_dir_entries(Scsi_Host_Template *);
+
+
+/*
+ * scsi_init initializes the scsi hosts.
+ */
+
+/*
+ * We use these goofy things because the MM is not set up when we init
+ * the scsi subsystem. By using these functions we can write code that
+ * looks normal. Also, it makes it possible to use the same code for a
+ * loadable module.
+ */
+
+extern void * scsi_init_malloc(unsigned int size, int priority);
+extern void scsi_init_free(char * ptr, unsigned int size);
+
+extern int next_scsi_host;
+
+extern int scsi_loadable_module_flag;
+unsigned int scsi_init(void);
+extern struct Scsi_Host * scsi_register(Scsi_Host_Template *, int j);
+extern void scsi_unregister(struct Scsi_Host * i);
+
+#define BLANK_HOST {"", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+
+struct Scsi_Device_Template
+{
+ struct Scsi_Device_Template * next;
+ const char * name;
+ const char * tag;
+ long * usage_count; /* Used for loadable modules */
+ unsigned char scsi_type;
+ unsigned char major;
+ unsigned char nr_dev; /* Number currently attached */
+ unsigned char dev_noticed; /* Number of devices detected. */
+ unsigned char dev_max; /* Current size of arrays */
+ unsigned blk:1; /* 0 if character device */
+ int (*detect)(Scsi_Device *); /* Returns 1 if we can attach this device */
+ int (*init)(void); /* Sizes arrays based upon number of devices
+ * detected */
+ void (*finish)(void); /* Perform initialization after attachment */
+ int (*attach)(Scsi_Device *); /* Attach devices to arrays */
+ void (*detach)(Scsi_Device *);
+};
+
+extern struct Scsi_Device_Template sd_template;
+extern struct Scsi_Device_Template st_template;
+extern struct Scsi_Device_Template sr_template;
+extern struct Scsi_Device_Template sg_template;
+
+int scsi_register_device(struct Scsi_Device_Template * sdpnt);
+
+/* These are used by loadable modules */
+extern int scsi_register_module(int, void *);
+extern void scsi_unregister_module(int, void *);
+
+/* The different types of modules that we can load and unload */
+#define MODULE_SCSI_HA 1
+#define MODULE_SCSI_CONST 2
+#define MODULE_SCSI_IOCTL 3
+#define MODULE_SCSI_DEV 4
+
+
+/*
+ * This is an ugly hack. If we expect to be able to load devices at run time,
+ * we need to leave extra room in some of the data structures. Doing a
+ * realloc to enlarge the structures would be riddled with race conditions,
+ * so until a better solution is discovered, we use this crude approach
+ */
+#define SD_EXTRA_DEVS 2
+#define ST_EXTRA_DEVS 2
+#define SR_EXTRA_DEVS 2
+#define SG_EXTRA_DEVS (SD_EXTRA_DEVS + SR_EXTRA_DEVS + ST_EXTRA_DEVS)
+
+#endif
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/in2000.c b/linux/src/drivers/scsi/in2000.c
new file mode 100644
index 0000000..aaa260c
--- /dev/null
+++ b/linux/src/drivers/scsi/in2000.c
@@ -0,0 +1,2379 @@
+/*
+ * in2000.c - Linux device driver for the
+ * Always IN2000 ISA SCSI card.
+ *
+ * Copyright (c) 1996 John Shifflett, GeoLog Consulting
+ * john@geolog.com
+ * jshiffle@netcom.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * Drew Eckhardt's excellent 'Generic NCR5380' sources provided
+ * much of the inspiration and some of the code for this driver.
+ * The Linux IN2000 driver distributed in the Linux kernels through
+ * version 1.2.13 was an extremely valuable reference on the arcane
+ * (and still mysterious) workings of the IN2000's fifo. It also
+ * is where I lifted in2000_biosparam(), the gist of the card
+ * detection scheme, and other bits of code. Many thanks to the
+ * talented and courageous people who wrote, contributed to, and
+ * maintained that driver (including Brad McLean, Shaun Savage,
+ * Bill Earnest, Larry Doolittle, Roger Sunshine, John Luckey,
+ * Matt Postiff, Peter Lu, zerucha@shell.portal.com, and Eric
+ * Youngdale). I should also mention the driver written by
+ * Hamish Macdonald for the (GASP!) Amiga A2091 card, included
+ * in the Linux-m68k distribution; it gave me a good initial
+ * understanding of the proper way to run a WD33c93 chip, and I
+ * ended up stealing lots of code from it.
+ *
+ * _This_ driver is (I feel) an improvement over the old one in
+ * several respects:
+ * - All problems relating to the data size of a SCSI request are
+ * gone (as far as I know). The old driver couldn't handle
+ * swapping to partitions because that involved 4k blocks, nor
+ * could it deal with the st.c tape driver unmodified, because
+ * that usually involved 4k - 32k blocks. The old driver never
+ * quite got away from a morbid dependence on 2k block sizes -
+ * which of course is the size of the card's fifo.
+ *
+ * - Target Disconnection/Reconnection is now supported. Any
+ * system with more than one device active on the SCSI bus
+ * will benefit from this. The driver defaults to what I'm
+ * calling 'adaptive disconnect' - meaning that each command
+ * is evaluated individually as to whether or not it should
+ * be run with the option to disconnect/reselect (if the
+ * device chooses), or as a "SCSI-bus-hog".
+ *
+ * - Synchronous data transfers are now supported. Because there
+ * are a few devices (and many improperly terminated systems)
+ * that choke when doing sync, the default is sync DISABLED
+ * for all devices. This faster protocol can (and should!)
+ * be enabled on selected devices via the command-line.
+ *
+ * - Runtime operating parameters can now be specified through
+ * either the LILO or the 'insmod' command line. For LILO do:
+ * "in2000=blah,blah,blah"
+ * and with insmod go like:
+ * "insmod /usr/src/linux/modules/in2000.o setup_strings=blah,blah"
+ * The defaults should be good for most people. See the comment
+ * for 'setup_strings' below for more details.
+ *
+ * - The old driver relied exclusively on what the Western Digital
+ * docs call "Combination Level 2 Commands", which are a great
+ * idea in that the CPU is relieved of a lot of interrupt
+ * overhead. However, by accepting a certain (user-settable)
+ * amount of additional interrupts, this driver achieves
+ * better control over the SCSI bus, and data transfers are
+ * almost as fast while being much easier to define, track,
+ * and debug.
+ *
+ * - You can force detection of a card whose BIOS has been disabled.
+ *
+ * - Multiple IN2000 cards might almost be supported. I've tried to
+ * keep it in mind, but have no way to test...
+ *
+ *
+ * TODO:
+ * tagged queuing. multiple cards.
+ *
+ *
+ * NOTE:
+ * When using this or any other SCSI driver as a module, you'll
+ * find that with the stock kernel, at most _two_ SCSI hard
+ * drives will be linked into the device list (ie, usable).
+ * If your IN2000 card has more than 2 disks on its bus, you
+ * might want to change the define of 'SD_EXTRA_DEVS' in the
+ * 'hosts.h' file from 2 to whatever is appropriate. It took
+ * me a while to track down this surprisingly obscure and
+ * undocumented little "feature".
+ *
+ *
+ * People with bug reports, wish-lists, complaints, comments,
+ * or improvements are asked to pah-leeez email me (John Shifflett)
+ * at john@geolog.com or jshiffle@netcom.com! I'm anxious to get
+ * this thing into as good a shape as possible, and I'm positive
+ * there are lots of lurking bugs and "Stupid Places".
+ *
+ */
+
+#include <linux/module.h>
+
+#include <asm/system.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <asm/io.h>
+#include <linux/ioport.h>
+#include <linux/blkdev.h>
+
+#include <linux/blk.h>
+#include <linux/stat.h>
+
+#include "scsi.h"
+#include "sd.h"
+#include "hosts.h"
+
+#define IN2000_VERSION "1.33"
+#define IN2000_DATE "26/August/1998"
+
+#include "in2000.h"
+
+
+/*
+ * 'setup_strings' is a single string used to pass operating parameters and
+ * settings from the kernel/module command-line to the driver. 'setup_args[]'
+ * is an array of strings that define the compile-time default values for
+ * these settings. If Linux boots with a LILO or insmod command-line, those
+ * settings are combined with 'setup_args[]'. Note that LILO command-lines
+ * are prefixed with "in2000=" while insmod uses a "setup_strings=" prefix.
+ * The driver recognizes the following keywords (lower case required) and
+ * arguments:
+ *
+ * - ioport:addr -Where addr is IO address of a (usually ROM-less) card.
+ * - noreset -No optional args. Prevents SCSI bus reset at boot time.
+ * - nosync:x -x is a bitmask where the 1st 7 bits correspond with
+ * the 7 possible SCSI devices (bit 0 for device #0, etc).
+ * Set a bit to PREVENT sync negotiation on that device.
+ * The driver default is sync DISABLED on all devices.
+ * - period:ns -ns is the minimum # of nanoseconds in a SCSI data transfer
+ * period. Default is 500; acceptable values are 250 - 1000.
+ * - disconnect:x -x = 0 to never allow disconnects, 2 to always allow them.
+ * x = 1 does 'adaptive' disconnects, which is the default
+ * and generally the best choice.
+ * - debug:x -If 'DEBUGGING_ON' is defined, x is a bitmask that causes
+ * various types of debug output to printed - see the DB_xxx
+ * defines in in2000.h
+ * - proc:x -If 'PROC_INTERFACE' is defined, x is a bitmask that
+ * determines how the /proc interface works and what it
+ * does - see the PR_xxx defines in in2000.h
+ *
+ * Syntax Notes:
+ * - Numeric arguments can be decimal or the '0x' form of hex notation. There
+ * _must_ be a colon between a keyword and its numeric argument, with no
+ * spaces.
+ * - Keywords are separated by commas, no spaces, in the standard kernel
+ * command-line manner.
+ * - A keyword in the 'nth' comma-separated command-line member will overwrite
+ * the 'nth' element of setup_args[]. A blank command-line member (in
+ * other words, a comma with no preceding keyword) will _not_ overwrite
+ * the corresponding setup_args[] element.
+ *
+ * A few LILO examples (for insmod, use 'setup_strings' instead of 'in2000'):
+ * - in2000=ioport:0x220,noreset
+ * - in2000=period:250,disconnect:2,nosync:0x03
+ * - in2000=debug:0x1e
+ * - in2000=proc:3
+ */
+
+/* Normally, no defaults are specified... */
+static char *setup_args[] =
+ {"","","","","","","","",""};
+
+/* filled in by 'insmod' */
+static char *setup_strings = 0;
+
+#ifdef MODULE_PARM
+MODULE_PARM(setup_strings, "s");
+#endif
+
+
+static struct Scsi_Host *instance_list = 0;
+
+
+
+static inline uchar read_3393(struct IN2000_hostdata *hostdata, uchar reg_num)
+{
+ write1_io(reg_num,IO_WD_ADDR);
+ return read1_io(IO_WD_DATA);
+}
+
+
+#define READ_AUX_STAT() read1_io(IO_WD_ASR)
+
+
+static inline void write_3393(struct IN2000_hostdata *hostdata, uchar reg_num, uchar value)
+{
+ write1_io(reg_num,IO_WD_ADDR);
+ write1_io(value,IO_WD_DATA);
+}
+
+
+static inline void write_3393_cmd(struct IN2000_hostdata *hostdata, uchar cmd)
+{
+/* while (READ_AUX_STAT() & ASR_CIP)
+ printk("|");*/
+ write1_io(WD_COMMAND,IO_WD_ADDR);
+ write1_io(cmd,IO_WD_DATA);
+}
+
+
+static uchar read_1_byte(struct IN2000_hostdata *hostdata)
+{
+uchar asr, x = 0;
+
+ write_3393(hostdata,WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
+ write_3393_cmd(hostdata,WD_CMD_TRANS_INFO|0x80);
+ do {
+ asr = READ_AUX_STAT();
+ if (asr & ASR_DBR)
+ x = read_3393(hostdata,WD_DATA);
+ } while (!(asr & ASR_INT));
+ return x;
+}
+
+
+static void write_3393_count(struct IN2000_hostdata *hostdata, unsigned long value)
+{
+ write1_io(WD_TRANSFER_COUNT_MSB,IO_WD_ADDR);
+ write1_io((value >> 16),IO_WD_DATA);
+ write1_io((value >> 8),IO_WD_DATA);
+ write1_io(value,IO_WD_DATA);
+}
+
+
+static unsigned long read_3393_count(struct IN2000_hostdata *hostdata)
+{
+unsigned long value;
+
+ write1_io(WD_TRANSFER_COUNT_MSB,IO_WD_ADDR);
+ value = read1_io(IO_WD_DATA) << 16;
+ value |= read1_io(IO_WD_DATA) << 8;
+ value |= read1_io(IO_WD_DATA);
+ return value;
+}
+
+
+/* The 33c93 needs to be told which direction a command transfers its
+ * data; we use this function to figure it out. Returns true if there
+ * will be a DATA_OUT phase with this command, false otherwise.
+ * (Thanks to Joerg Dorchain for the research and suggestion.)
+ */
+static int is_dir_out(Scsi_Cmnd *cmd)
+{
+ switch (cmd->cmnd[0]) {
+ case WRITE_6: case WRITE_10: case WRITE_12:
+ case WRITE_LONG: case WRITE_SAME: case WRITE_BUFFER:
+ case WRITE_VERIFY: case WRITE_VERIFY_12:
+ case COMPARE: case COPY: case COPY_VERIFY:
+ case SEARCH_EQUAL: case SEARCH_HIGH: case SEARCH_LOW:
+ case SEARCH_EQUAL_12: case SEARCH_HIGH_12: case SEARCH_LOW_12:
+ case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE:
+ case MODE_SELECT: case MODE_SELECT_10: case LOG_SELECT:
+ case SEND_DIAGNOSTIC: case CHANGE_DEFINITION: case UPDATE_BLOCK:
+ case SET_WINDOW: case MEDIUM_SCAN: case SEND_VOLUME_TAG:
+ case 0xea:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+
+
+static struct sx_period sx_table[] = {
+ { 1, 0x20},
+ {252, 0x20},
+ {376, 0x30},
+ {500, 0x40},
+ {624, 0x50},
+ {752, 0x60},
+ {876, 0x70},
+ {1000,0x00},
+ {0, 0} };
+
+static int round_period(unsigned int period)
+{
+int x;
+
+ for (x=1; sx_table[x].period_ns; x++) {
+ if ((period <= sx_table[x-0].period_ns) &&
+ (period > sx_table[x-1].period_ns)) {
+ return x;
+ }
+ }
+ return 7;
+}
+
+static uchar calc_sync_xfer(unsigned int period, unsigned int offset)
+{
+uchar result;
+
+ period *= 4; /* convert SDTR code to ns */
+ result = sx_table[round_period(period)].reg_value;
+ result |= (offset < OPTIMUM_SX_OFF)?offset:OPTIMUM_SX_OFF;
+ return result;
+}
+
+
+
+static void in2000_execute(struct Scsi_Host *instance);
+
+int in2000_queuecommand (Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
+{
+struct IN2000_hostdata *hostdata;
+Scsi_Cmnd *tmp;
+unsigned long flags;
+
+ hostdata = (struct IN2000_hostdata *)cmd->host->hostdata;
+
+DB(DB_QUEUE_COMMAND,printk("Q-%d-%02x-%ld(",cmd->target,cmd->cmnd[0],cmd->pid))
+
+/* Set up a few fields in the Scsi_Cmnd structure for our own use:
+ * - host_scribble is the pointer to the next cmd in the input queue
+ * - scsi_done points to the routine we call when a cmd is finished
+ * - result is what you'd expect
+ */
+
+ cmd->host_scribble = NULL;
+ cmd->scsi_done = done;
+ cmd->result = 0;
+
+/* We use the Scsi_Pointer structure that's included with each command
+ * as a scratchpad (as it's intended to be used!). The handy thing about
+ * the SCp.xxx fields is that they're always associated with a given
+ * cmd, and are preserved across disconnect-reselect. This means we
+ * can pretty much ignore SAVE_POINTERS and RESTORE_POINTERS messages
+ * if we keep all the critical pointers and counters in SCp:
+ * - SCp.ptr is the pointer into the RAM buffer
+ * - SCp.this_residual is the size of that buffer
+ * - SCp.buffer points to the current scatter-gather buffer
+ * - SCp.buffers_residual tells us how many S.G. buffers there are
+ * - SCp.have_data_in helps keep track of >2048 byte transfers
+ * - SCp.sent_command is not used
+ * - SCp.phase records this command's SRCID_ER bit setting
+ */
+
+ if (cmd->use_sg) {
+ cmd->SCp.buffer = (struct scatterlist *)cmd->buffer;
+ cmd->SCp.buffers_residual = cmd->use_sg - 1;
+ cmd->SCp.ptr = (char *)cmd->SCp.buffer->address;
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ }
+ else {
+ cmd->SCp.buffer = NULL;
+ cmd->SCp.buffers_residual = 0;
+ cmd->SCp.ptr = (char *)cmd->request_buffer;
+ cmd->SCp.this_residual = cmd->request_bufflen;
+ }
+ cmd->SCp.have_data_in = 0;
+
+/* We don't set SCp.phase here - that's done in in2000_execute() */
+
+/* WD docs state that at the conclusion of a "LEVEL2" command, the
+ * status byte can be retrieved from the LUN register. Apparently,
+ * this is the case only for *uninterrupted* LEVEL2 commands! If
+ * there are any unexpected phases entered, even if they are 100%
+ * legal (different devices may choose to do things differently),
+ * the LEVEL2 command sequence is exited. This often occurs prior
+ * to receiving the status byte, in which case the driver does a
+ * status phase interrupt and gets the status byte on its own.
+ * While such a command can then be "resumed" (ie restarted to
+ * finish up as a LEVEL2 command), the LUN register will NOT be
+ * a valid status byte at the command's conclusion, and we must
+ * use the byte obtained during the earlier interrupt. Here, we
+ * preset SCp.Status to an illegal value (0xff) so that when
+ * this command finally completes, we can tell where the actual
+ * status byte is stored.
+ */
+
+ cmd->SCp.Status = ILLEGAL_STATUS_BYTE;
+
+/* We need to disable interrupts before messing with the input
+ * queue and calling in2000_execute().
+ */
+
+ save_flags(flags);
+ cli();
+
+ /*
+ * Add the cmd to the end of 'input_Q'. Note that REQUEST_SENSE
+ * commands are added to the head of the queue so that the desired
+ * sense data is not lost before REQUEST_SENSE executes.
+ */
+
+ if (!(hostdata->input_Q) || (cmd->cmnd[0] == REQUEST_SENSE)) {
+ cmd->host_scribble = (uchar *)hostdata->input_Q;
+ hostdata->input_Q = cmd;
+ }
+ else { /* find the end of the queue */
+ for (tmp=(Scsi_Cmnd *)hostdata->input_Q; tmp->host_scribble;
+ tmp=(Scsi_Cmnd *)tmp->host_scribble)
+ ;
+ tmp->host_scribble = (uchar *)cmd;
+ }
+
+/* We know that there's at least one command in 'input_Q' now.
+ * Go see if any of them are runnable!
+ */
+
+ in2000_execute(cmd->host);
+
+DB(DB_QUEUE_COMMAND,printk(")Q-%ld ",cmd->pid))
+
+ restore_flags(flags);
+ return 0;
+}
+
+
+
+/*
+ * This routine attempts to start a scsi command. If the host_card is
+ * already connected, we give up immediately. Otherwise, look through
+ * the input_Q, using the first command we find that's intended
+ * for a currently non-busy target/lun.
+ * Note that this function is always called with interrupts already
+ * disabled (either from in2000_queuecommand() or in2000_intr()).
+ */
+static void in2000_execute (struct Scsi_Host *instance)
+{
+struct IN2000_hostdata *hostdata;
+Scsi_Cmnd *cmd, *prev;
+int i;
+unsigned short *sp;
+unsigned short f;
+unsigned short flushbuf[16];
+
+
+ hostdata = (struct IN2000_hostdata *)instance->hostdata;
+
+DB(DB_EXECUTE,printk("EX("))
+
+ if (hostdata->selecting || hostdata->connected) {
+
+DB(DB_EXECUTE,printk(")EX-0 "))
+
+ return;
+ }
+
+ /*
+ * Search through the input_Q for a command destined
+ * for an idle target/lun.
+ */
+
+ cmd = (Scsi_Cmnd *)hostdata->input_Q;
+ prev = 0;
+ while (cmd) {
+ if (!(hostdata->busy[cmd->target] & (1 << cmd->lun)))
+ break;
+ prev = cmd;
+ cmd = (Scsi_Cmnd *)cmd->host_scribble;
+ }
+
+ /* quit if queue empty or all possible targets are busy */
+
+ if (!cmd) {
+
+DB(DB_EXECUTE,printk(")EX-1 "))
+
+ return;
+ }
+
+ /* remove command from queue */
+
+ if (prev)
+ prev->host_scribble = cmd->host_scribble;
+ else
+ hostdata->input_Q = (Scsi_Cmnd *)cmd->host_scribble;
+
+#ifdef PROC_STATISTICS
+ hostdata->cmd_cnt[cmd->target]++;
+#endif
+
+/*
+ * Start the selection process
+ */
+
+ if (is_dir_out(cmd))
+ write_3393(hostdata,WD_DESTINATION_ID, cmd->target);
+ else
+ write_3393(hostdata,WD_DESTINATION_ID, cmd->target | DSTID_DPD);
+
+/* Now we need to figure out whether or not this command is a good
+ * candidate for disconnect/reselect. We guess to the best of our
+ * ability, based on a set of hierarchical rules. When several
+ * devices are operating simultaneously, disconnects are usually
+ * an advantage. In a single device system, or if only 1 device
+ * is being accessed, transfers usually go faster if disconnects
+ * are not allowed:
+ *
+ * + Commands should NEVER disconnect if hostdata->disconnect =
+ * DIS_NEVER (this holds for tape drives also), and ALWAYS
+ * disconnect if hostdata->disconnect = DIS_ALWAYS.
+ * + Tape drive commands should always be allowed to disconnect.
+ * + Disconnect should be allowed if disconnected_Q isn't empty.
+ * + Commands should NOT disconnect if input_Q is empty.
+ * + Disconnect should be allowed if there are commands in input_Q
+ * for a different target/lun. In this case, the other commands
+ * should be made disconnect-able, if not already.
+ *
+ * I know, I know - this code would flunk me out of any
+ * "C Programming 101" class ever offered. But it's easy
+ * to change around and experiment with for now.
+ */
+
+ cmd->SCp.phase = 0; /* assume no disconnect */
+ if (hostdata->disconnect == DIS_NEVER)
+ goto no;
+ if (hostdata->disconnect == DIS_ALWAYS)
+ goto yes;
+ if (cmd->device->type == 1) /* tape drive? */
+ goto yes;
+ if (hostdata->disconnected_Q) /* other commands disconnected? */
+ goto yes;
+ if (!(hostdata->input_Q)) /* input_Q empty? */
+ goto no;
+ for (prev=(Scsi_Cmnd *)hostdata->input_Q; prev;
+ prev=(Scsi_Cmnd *)prev->host_scribble) {
+ if ((prev->target != cmd->target) || (prev->lun != cmd->lun)) {
+ for (prev=(Scsi_Cmnd *)hostdata->input_Q; prev;
+ prev=(Scsi_Cmnd *)prev->host_scribble)
+ prev->SCp.phase = 1;
+ goto yes;
+ }
+ }
+ goto no;
+
+yes:
+ cmd->SCp.phase = 1;
+
+#ifdef PROC_STATISTICS
+ hostdata->disc_allowed_cnt[cmd->target]++;
+#endif
+
+no:
+ write_3393(hostdata,WD_SOURCE_ID,((cmd->SCp.phase)?SRCID_ER:0));
+
+ write_3393(hostdata,WD_TARGET_LUN, cmd->lun);
+ write_3393(hostdata,WD_SYNCHRONOUS_TRANSFER,hostdata->sync_xfer[cmd->target]);
+ hostdata->busy[cmd->target] |= (1 << cmd->lun);
+
+ if ((hostdata->level2 <= L2_NONE) ||
+ (hostdata->sync_stat[cmd->target] == SS_UNSET)) {
+
+ /*
+ * Do a 'Select-With-ATN' command. This will end with
+ * one of the following interrupts:
+ * CSR_RESEL_AM: failure - can try again later.
+ * CSR_TIMEOUT: failure - give up.
+ * CSR_SELECT: success - proceed.
+ */
+
+ hostdata->selecting = cmd;
+
+/* Every target has its own synchronous transfer setting, kept in
+ * the sync_xfer array, and a corresponding status byte in sync_stat[].
+ * Each target's sync_stat[] entry is initialized to SS_UNSET, and its
+ * sync_xfer[] entry is initialized to the default/safe value. SS_UNSET
+ * means that the parameters are undetermined as yet, and that we
+ * need to send an SDTR message to this device after selection is
+ * complete. We set SS_FIRST to tell the interrupt routine to do so,
+ * unless we don't want to even _try_ synchronous transfers: In this
+ * case we set SS_SET to make the defaults final.
+ */
+ if (hostdata->sync_stat[cmd->target] == SS_UNSET) {
+ if (hostdata->sync_off & (1 << cmd->target))
+ hostdata->sync_stat[cmd->target] = SS_SET;
+ else
+ hostdata->sync_stat[cmd->target] = SS_FIRST;
+ }
+ hostdata->state = S_SELECTING;
+ write_3393_count(hostdata,0); /* this guarantees a DATA_PHASE interrupt */
+ write_3393_cmd(hostdata,WD_CMD_SEL_ATN);
+ }
+
+ else {
+
+ /*
+ * Do a 'Select-With-ATN-Xfer' command. This will end with
+ * one of the following interrupts:
+ * CSR_RESEL_AM: failure - can try again later.
+ * CSR_TIMEOUT: failure - give up.
+ * anything else: success - proceed.
+ */
+
+ hostdata->connected = cmd;
+ write_3393(hostdata,WD_COMMAND_PHASE, 0);
+
+ /* copy command_descriptor_block into WD chip
+ * (take advantage of auto-incrementing)
+ */
+
+ write1_io(WD_CDB_1, IO_WD_ADDR);
+ for (i=0; i<cmd->cmd_len; i++)
+ write1_io(cmd->cmnd[i], IO_WD_DATA);
+
+ /* The wd33c93 only knows about Group 0, 1, and 5 commands when
+ * it's doing a 'select-and-transfer'. To be safe, we write the
+ * size of the CDB into the OWN_ID register for every case. This
+ * way there won't be problems with vendor-unique, audio, etc.
+ */
+
+ write_3393(hostdata, WD_OWN_ID, cmd->cmd_len);
+
+ /* When doing a non-disconnect command, we can save ourselves a DATA
+ * phase interrupt later by setting everything up now. With writes we
+ * need to pre-fill the fifo; if there's room for the 32 flush bytes,
+ * put them in there too - that'll avoid a fifo interrupt. Reads are
+ * somewhat simpler.
+ * KLUDGE NOTE: It seems that you can't completely fill the fifo here:
+ * This results in the IO_FIFO_COUNT register rolling over to zero,
+ * and apparently the gate array logic sees this as empty, not full,
+ * so the 3393 chip is never signalled to start reading from the
+ * fifo. Or maybe it's seen as a permanent fifo interrupt condition.
+ * Regardless, we fix this by temporarily pretending that the fifo
+ * is 16 bytes smaller. (I see now that the old driver has a comment
+ * about "don't fill completely" in an analogous place - must be the
+ * same deal.) This results in CDROM, swap partitions, and tape drives
+ * needing an extra interrupt per write command - I think we can live
+ * with that!
+ */
+
+ if (!(cmd->SCp.phase)) {
+ write_3393_count(hostdata, cmd->SCp.this_residual);
+ write_3393(hostdata,WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_BUS);
+ write1_io(0, IO_FIFO_WRITE); /* clear fifo counter, write mode */
+
+ if (is_dir_out(cmd)) {
+ hostdata->fifo = FI_FIFO_WRITING;
+ if ((i = cmd->SCp.this_residual) > (IN2000_FIFO_SIZE - 16) )
+ i = IN2000_FIFO_SIZE - 16;
+ cmd->SCp.have_data_in = i; /* this much data in fifo */
+ i >>= 1; /* Gulp. Assuming modulo 2. */
+ sp = (unsigned short *)cmd->SCp.ptr;
+ f = hostdata->io_base + IO_FIFO;
+
+#ifdef FAST_WRITE_IO
+
+ FAST_WRITE2_IO();
+#else
+ while (i--)
+ write2_io(*sp++,IO_FIFO);
+
+#endif
+
+ /* Is there room for the flush bytes? */
+
+ if (cmd->SCp.have_data_in <= ((IN2000_FIFO_SIZE - 16) - 32)) {
+ sp = flushbuf;
+ i = 16;
+
+#ifdef FAST_WRITE_IO
+
+ FAST_WRITE2_IO();
+#else
+ while (i--)
+ write2_io(0,IO_FIFO);
+
+#endif
+
+ }
+ }
+
+ else {
+ write1_io(0, IO_FIFO_READ); /* put fifo in read mode */
+ hostdata->fifo = FI_FIFO_READING;
+ cmd->SCp.have_data_in = 0; /* nothing transfered yet */
+ }
+
+ }
+ else {
+ write_3393_count(hostdata,0); /* this guarantees a DATA_PHASE interrupt */
+ }
+ hostdata->state = S_RUNNING_LEVEL2;
+ write_3393_cmd(hostdata,WD_CMD_SEL_ATN_XFER);
+ }
+
+ /*
+ * Since the SCSI bus can handle only 1 connection at a time,
+ * we get out of here now. If the selection fails, or when
+ * the command disconnects, we'll come back to this routine
+ * to search the input_Q again...
+ */
+
+DB(DB_EXECUTE,printk("%s%ld)EX-2 ",(cmd->SCp.phase)?"d:":"",cmd->pid))
+
+}
+
+
+
+static void transfer_pio(uchar *buf, int cnt,
+ int data_in_dir, struct IN2000_hostdata *hostdata)
+{
+uchar asr;
+
+DB(DB_TRANSFER,printk("(%p,%d,%s)",buf,cnt,data_in_dir?"in":"out"))
+
+ write_3393(hostdata,WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
+ write_3393_count(hostdata,cnt);
+ write_3393_cmd(hostdata,WD_CMD_TRANS_INFO);
+ if (data_in_dir) {
+ do {
+ asr = READ_AUX_STAT();
+ if (asr & ASR_DBR)
+ *buf++ = read_3393(hostdata,WD_DATA);
+ } while (!(asr & ASR_INT));
+ }
+ else {
+ do {
+ asr = READ_AUX_STAT();
+ if (asr & ASR_DBR)
+ write_3393(hostdata,WD_DATA, *buf++);
+ } while (!(asr & ASR_INT));
+ }
+
+ /* Note: we are returning with the interrupt UN-cleared.
+ * Since (presumably) an entire I/O operation has
+ * completed, the bus phase is probably different, and
+ * the interrupt routine will discover this when it
+ * responds to the uncleared int.
+ */
+
+}
+
+
+
+static void transfer_bytes(Scsi_Cmnd *cmd, int data_in_dir)
+{
+struct IN2000_hostdata *hostdata;
+unsigned short *sp;
+unsigned short f;
+int i;
+
+ hostdata = (struct IN2000_hostdata *)cmd->host->hostdata;
+
+/* Normally, you'd expect 'this_residual' to be non-zero here.
+ * In a series of scatter-gather transfers, however, this
+ * routine will usually be called with 'this_residual' equal
+ * to 0 and 'buffers_residual' non-zero. This means that a
+ * previous transfer completed, clearing 'this_residual', and
+ * now we need to setup the next scatter-gather buffer as the
+ * source or destination for THIS transfer.
+ */
+ if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
+ ++cmd->SCp.buffer;
+ --cmd->SCp.buffers_residual;
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ cmd->SCp.ptr = cmd->SCp.buffer->address;
+ }
+
+/* Set up hardware registers */
+
+ write_3393(hostdata,WD_SYNCHRONOUS_TRANSFER,hostdata->sync_xfer[cmd->target]);
+ write_3393_count(hostdata,cmd->SCp.this_residual);
+ write_3393(hostdata,WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_BUS);
+ write1_io(0,IO_FIFO_WRITE); /* zero counter, assume write */
+
+/* Reading is easy. Just issue the command and return - we'll
+ * get an interrupt later when we have actual data to worry about.
+ */
+
+ if (data_in_dir) {
+ write1_io(0,IO_FIFO_READ);
+ if ((hostdata->level2 >= L2_DATA) ||
+ (hostdata->level2 == L2_BASIC && cmd->SCp.phase == 0)) {
+ write_3393(hostdata,WD_COMMAND_PHASE,0x45);
+ write_3393_cmd(hostdata,WD_CMD_SEL_ATN_XFER);
+ hostdata->state = S_RUNNING_LEVEL2;
+ }
+ else
+ write_3393_cmd(hostdata,WD_CMD_TRANS_INFO);
+ hostdata->fifo = FI_FIFO_READING;
+ cmd->SCp.have_data_in = 0;
+ return;
+ }
+
+/* Writing is more involved - we'll start the WD chip and write as
+ * much data to the fifo as we can right now. Later interrupts will
+ * write any bytes that don't make it at this stage.
+ */
+
+ if ((hostdata->level2 >= L2_DATA) ||
+ (hostdata->level2 == L2_BASIC && cmd->SCp.phase == 0)) {
+ write_3393(hostdata,WD_COMMAND_PHASE,0x45);
+ write_3393_cmd(hostdata,WD_CMD_SEL_ATN_XFER);
+ hostdata->state = S_RUNNING_LEVEL2;
+ }
+ else
+ write_3393_cmd(hostdata,WD_CMD_TRANS_INFO);
+ hostdata->fifo = FI_FIFO_WRITING;
+ sp = (unsigned short *)cmd->SCp.ptr;
+
+ if ((i = cmd->SCp.this_residual) > IN2000_FIFO_SIZE)
+ i = IN2000_FIFO_SIZE;
+ cmd->SCp.have_data_in = i;
+ i >>= 1; /* Gulp. We assume this_residual is modulo 2 */
+ f = hostdata->io_base + IO_FIFO;
+
+#ifdef FAST_WRITE_IO
+
+ FAST_WRITE2_IO();
+#else
+ while (i--)
+ write2_io(*sp++,IO_FIFO);
+
+#endif
+
+}
+
+
+/* We need to use spin_lock_irqsave() & spin_unlock_irqrestore() in this
+ * function in order to work in an SMP environment. (I'd be surprised
+ * if the driver is ever used by anyone on a real multi-CPU motherboard,
+ * but it _does_ need to be able to compile and run in an SMP kernel.)
+ */
+
+static void in2000_intr (int irqnum, void * dev_id, struct pt_regs *ptregs)
+{
+struct Scsi_Host *instance;
+struct IN2000_hostdata *hostdata;
+Scsi_Cmnd *patch, *cmd;
+uchar asr, sr, phs, id, lun, *ucp, msg;
+int i,j;
+unsigned long length;
+unsigned short *sp;
+unsigned short f;
+unsigned long flags;
+
+ for (instance = instance_list; instance; instance = instance->next) {
+ if (instance->irq == irqnum)
+ break;
+ }
+ if (!instance) {
+ printk("*** Hmm... interrupts are screwed up! ***\n");
+ return;
+ }
+ hostdata = (struct IN2000_hostdata *)instance->hostdata;
+
+/* Get the spin_lock and disable further ints, for SMP */
+
+ CLISPIN_LOCK(flags);
+
+#ifdef PROC_STATISTICS
+ hostdata->int_cnt++;
+#endif
+
+/* The IN2000 card has 2 interrupt sources OR'ed onto its IRQ line - the
+ * WD3393 chip and the 2k fifo (which is actually a dual-port RAM combined
+ * with a big logic array, so it's a little different than what you might
+ * expect). As far as I know, there's no reason that BOTH can't be active
+ * at the same time, but there's a problem: while we can read the 3393
+ * to tell if _it_ wants an interrupt, I don't know of a way to ask the
+ * fifo the same question. The best we can do is check the 3393 and if
+ * it _isn't_ the source of the interrupt, then we can be pretty sure
+ * that the fifo is the culprit.
+ * UPDATE: I have it on good authority (Bill Earnest) that bit 0 of the
+ * IO_FIFO_COUNT register mirrors the fifo interrupt state. I
+ * assume that bit clear means interrupt active. As it turns
+ * out, the driver really doesn't need to check for this after
+ * all, so my remarks above about a 'problem' can safely be
+ * ignored. The way the logic is set up, there's no advantage
+ * (that I can see) to worrying about it.
+ *
+ * It seems that the fifo interrupt signal is negated when we extract
+ * bytes during read or write bytes during write.
+ * - fifo will interrupt when data is moving from it to the 3393, and
+ * there are 31 (or less?) bytes left to go. This is sort of short-
+ * sighted: what if you don't WANT to do more? In any case, our
+ * response is to push more into the fifo - either actual data or
+ * dummy bytes if need be. Note that we apparently have to write at
+ * least 32 additional bytes to the fifo after an interrupt in order
+ * to get it to release the ones it was holding on to - writing fewer
+ * than 32 will result in another fifo int.
+ * UPDATE: Again, info from Bill Earnest makes this more understandable:
+ * 32 bytes = two counts of the fifo counter register. He tells
+ * me that the fifo interrupt is a non-latching signal derived
+ * from a straightforward boolean interpretation of the 7
+ * highest bits of the fifo counter and the fifo-read/fifo-write
+ * state. Who'd a thought?
+ */
+
+ write1_io(0, IO_LED_ON);
+ asr = READ_AUX_STAT();
+ if (!(asr & ASR_INT)) { /* no WD33c93 interrupt? */
+
+/* Ok. This is definitely a FIFO-only interrupt.
+ *
+ * If FI_FIFO_READING is set, there are up to 2048 bytes waiting to be read,
+ * maybe more to come from the SCSI bus. Read as many as we can out of the
+ * fifo and into memory at the location of SCp.ptr[SCp.have_data_in], and
+ * update have_data_in afterwards.
+ *
+ * If we have FI_FIFO_WRITING, the FIFO has almost run out of bytes to move
+ * into the WD3393 chip (I think the interrupt happens when there are 31
+ * bytes left, but it may be fewer...). The 3393 is still waiting, so we
+ * shove some more into the fifo, which gets things moving again. If the
+ * original SCSI command specified more than 2048 bytes, there may still
+ * be some of that data left: fine - use it (from SCp.ptr[SCp.have_data_in]).
+ * Don't forget to update have_data_in. If we've already written out the
+ * entire buffer, feed 32 dummy bytes to the fifo - they're needed to
+ * push out the remaining real data.
+ * (Big thanks to Bill Earnest for getting me out of the mud in here.)
+ */
+
+ cmd = (Scsi_Cmnd *)hostdata->connected; /* assume we're connected */
+CHECK_NULL(cmd,"fifo_int")
+
+ if (hostdata->fifo == FI_FIFO_READING) {
+
+DB(DB_FIFO,printk("{R:%02x} ",read1_io(IO_FIFO_COUNT)))
+
+ sp = (unsigned short *)(cmd->SCp.ptr + cmd->SCp.have_data_in);
+ i = read1_io(IO_FIFO_COUNT) & 0xfe;
+ i <<= 2; /* # of words waiting in the fifo */
+ f = hostdata->io_base + IO_FIFO;
+
+#ifdef FAST_READ_IO
+
+ FAST_READ2_IO();
+#else
+ while (i--)
+ *sp++ = read2_io(IO_FIFO);
+
+#endif
+
+ i = sp - (unsigned short *)(cmd->SCp.ptr + cmd->SCp.have_data_in);
+ i <<= 1;
+ cmd->SCp.have_data_in += i;
+ }
+
+ else if (hostdata->fifo == FI_FIFO_WRITING) {
+
+DB(DB_FIFO,printk("{W:%02x} ",read1_io(IO_FIFO_COUNT)))
+
+/* If all bytes have been written to the fifo, flush out the stragglers.
+ * Note that while writing 16 dummy words seems arbitrary, we don't
+ * have another choice that I can see. What we really want is to read
+ * the 3393 transfer count register (that would tell us how many bytes
+ * needed flushing), but the TRANSFER_INFO command hasn't completed
+ * yet (not enough bytes!) and that register won't be accessible. So,
+ * we use 16 words - a number obtained through trial and error.
+ * UPDATE: Bill says this is exactly what Always does, so there.
+ * More thanks due him for help in this section.
+ */
+
+ if (cmd->SCp.this_residual == cmd->SCp.have_data_in) {
+ i = 16;
+ while (i--) /* write 32 dummy bytes */
+ write2_io(0,IO_FIFO);
+ }
+
+/* If there are still bytes left in the SCSI buffer, write as many as we
+ * can out to the fifo.
+ */
+
+ else {
+ sp = (unsigned short *)(cmd->SCp.ptr + cmd->SCp.have_data_in);
+ i = cmd->SCp.this_residual - cmd->SCp.have_data_in; /* bytes yet to go */
+ j = read1_io(IO_FIFO_COUNT) & 0xfe;
+ j <<= 2; /* how many words the fifo has room for */
+ if ((j << 1) > i)
+ j = (i >> 1);
+ while (j--)
+ write2_io(*sp++,IO_FIFO);
+
+ i = sp - (unsigned short *)(cmd->SCp.ptr + cmd->SCp.have_data_in);
+ i <<= 1;
+ cmd->SCp.have_data_in += i;
+ }
+ }
+
+ else {
+ printk("*** Spurious FIFO interrupt ***");
+ }
+
+ write1_io(0, IO_LED_OFF);
+
+/* release the SMP spin_lock and restore irq state */
+ CLISPIN_UNLOCK(flags);
+ return;
+ }
+
+/* This interrupt was triggered by the WD33c93 chip. The fifo interrupt
+ * may also be asserted, but we don't bother to check it: we get more
+ * detailed info from FIFO_READING and FIFO_WRITING (see below).
+ */
+
+ cmd = (Scsi_Cmnd *)hostdata->connected; /* assume we're connected */
+ sr = read_3393(hostdata,WD_SCSI_STATUS); /* clear the interrupt */
+ phs = read_3393(hostdata,WD_COMMAND_PHASE);
+
+ if (!cmd && (sr != CSR_RESEL_AM && sr != CSR_TIMEOUT && sr != CSR_SELECT)) {
+ printk("\nNR:wd-intr-1\n");
+ write1_io(0, IO_LED_OFF);
+
+/* release the SMP spin_lock and restore irq state */
+ CLISPIN_UNLOCK(flags);
+ return;
+ }
+
+DB(DB_INTR,printk("{%02x:%02x-",asr,sr))
+
+/* After starting a FIFO-based transfer, the next _WD3393_ interrupt is
+ * guaranteed to be in response to the completion of the transfer.
+ * If we were reading, there's probably data in the fifo that needs
+ * to be copied into RAM - do that here. Also, we have to update
+ * 'this_residual' and 'ptr' based on the contents of the
+ * TRANSFER_COUNT register, in case the device decided to do an
+ * intermediate disconnect (a device may do this if it has to
+ * do a seek, or just to be nice and let other devices have
+ * some bus time during long transfers).
+ * After doing whatever is necessary with the fifo, we go on and
+ * service the WD3393 interrupt normally.
+ */
+
+ if (hostdata->fifo == FI_FIFO_READING) {
+
+/* buffer index = start-of-buffer + #-of-bytes-already-read */
+
+ sp = (unsigned short *)(cmd->SCp.ptr + cmd->SCp.have_data_in);
+
+/* bytes remaining in fifo = (total-wanted - #-not-got) - #-already-read */
+
+ i = (cmd->SCp.this_residual - read_3393_count(hostdata)) - cmd->SCp.have_data_in;
+ i >>= 1; /* Gulp. We assume this will always be modulo 2 */
+ f = hostdata->io_base + IO_FIFO;
+
+#ifdef FAST_READ_IO
+
+ FAST_READ2_IO();
+#else
+ while (i--)
+ *sp++ = read2_io(IO_FIFO);
+
+#endif
+
+ hostdata->fifo = FI_FIFO_UNUSED;
+ length = cmd->SCp.this_residual;
+ cmd->SCp.this_residual = read_3393_count(hostdata);
+ cmd->SCp.ptr += (length - cmd->SCp.this_residual);
+
+DB(DB_TRANSFER,printk("(%p,%d)",cmd->SCp.ptr,cmd->SCp.this_residual))
+
+ }
+
+ else if (hostdata->fifo == FI_FIFO_WRITING) {
+ hostdata->fifo = FI_FIFO_UNUSED;
+ length = cmd->SCp.this_residual;
+ cmd->SCp.this_residual = read_3393_count(hostdata);
+ cmd->SCp.ptr += (length - cmd->SCp.this_residual);
+
+DB(DB_TRANSFER,printk("(%p,%d)",cmd->SCp.ptr,cmd->SCp.this_residual))
+
+ }
+
+/* Respond to the specific WD3393 interrupt - there are quite a few! */
+
+ switch (sr) {
+
+ case CSR_TIMEOUT:
+DB(DB_INTR,printk("TIMEOUT"))
+
+ if (hostdata->state == S_RUNNING_LEVEL2)
+ hostdata->connected = NULL;
+ else {
+ cmd = (Scsi_Cmnd *)hostdata->selecting; /* get a valid cmd */
+CHECK_NULL(cmd,"csr_timeout")
+ hostdata->selecting = NULL;
+ }
+
+ cmd->result = DID_NO_CONNECT << 16;
+ hostdata->busy[cmd->target] &= ~(1 << cmd->lun);
+ hostdata->state = S_UNCONNECTED;
+ cmd->scsi_done(cmd);
+
+/* We are not connected to a target - check to see if there
+ * are commands waiting to be executed.
+ */
+
+ in2000_execute(instance);
+ break;
+
+
+/* Note: this interrupt should not occur in a LEVEL2 command */
+
+ case CSR_SELECT:
+DB(DB_INTR,printk("SELECT"))
+ hostdata->connected = cmd = (Scsi_Cmnd *)hostdata->selecting;
+CHECK_NULL(cmd,"csr_select")
+ hostdata->selecting = NULL;
+
+ /* construct an IDENTIFY message with correct disconnect bit */
+
+ hostdata->outgoing_msg[0] = (0x80 | 0x00 | cmd->lun);
+ if (cmd->SCp.phase)
+ hostdata->outgoing_msg[0] |= 0x40;
+
+ if (hostdata->sync_stat[cmd->target] == SS_FIRST) {
+#ifdef SYNC_DEBUG
+printk(" sending SDTR ");
+#endif
+
+ hostdata->sync_stat[cmd->target] = SS_WAITING;
+
+ /* tack on a 2nd message to ask about synchronous transfers */
+
+ hostdata->outgoing_msg[1] = EXTENDED_MESSAGE;
+ hostdata->outgoing_msg[2] = 3;
+ hostdata->outgoing_msg[3] = EXTENDED_SDTR;
+ hostdata->outgoing_msg[4] = OPTIMUM_SX_PER/4;
+ hostdata->outgoing_msg[5] = OPTIMUM_SX_OFF;
+ hostdata->outgoing_len = 6;
+ }
+ else
+ hostdata->outgoing_len = 1;
+
+ hostdata->state = S_CONNECTED;
+ break;
+
+
+ case CSR_XFER_DONE|PHS_DATA_IN:
+ case CSR_UNEXP |PHS_DATA_IN:
+ case CSR_SRV_REQ |PHS_DATA_IN:
+DB(DB_INTR,printk("IN-%d.%d",cmd->SCp.this_residual,cmd->SCp.buffers_residual))
+ transfer_bytes(cmd, DATA_IN_DIR);
+ if (hostdata->state != S_RUNNING_LEVEL2)
+ hostdata->state = S_CONNECTED;
+ break;
+
+
+ case CSR_XFER_DONE|PHS_DATA_OUT:
+ case CSR_UNEXP |PHS_DATA_OUT:
+ case CSR_SRV_REQ |PHS_DATA_OUT:
+DB(DB_INTR,printk("OUT-%d.%d",cmd->SCp.this_residual,cmd->SCp.buffers_residual))
+ transfer_bytes(cmd, DATA_OUT_DIR);
+ if (hostdata->state != S_RUNNING_LEVEL2)
+ hostdata->state = S_CONNECTED;
+ break;
+
+
+/* Note: this interrupt should not occur in a LEVEL2 command */
+
+ case CSR_XFER_DONE|PHS_COMMAND:
+ case CSR_UNEXP |PHS_COMMAND:
+ case CSR_SRV_REQ |PHS_COMMAND:
+DB(DB_INTR,printk("CMND-%02x,%ld",cmd->cmnd[0],cmd->pid))
+ transfer_pio(cmd->cmnd, cmd->cmd_len, DATA_OUT_DIR, hostdata);
+ hostdata->state = S_CONNECTED;
+ break;
+
+
+ case CSR_XFER_DONE|PHS_STATUS:
+ case CSR_UNEXP |PHS_STATUS:
+ case CSR_SRV_REQ |PHS_STATUS:
+DB(DB_INTR,printk("STATUS="))
+
+ cmd->SCp.Status = read_1_byte(hostdata);
+DB(DB_INTR,printk("%02x",cmd->SCp.Status))
+ if (hostdata->level2 >= L2_BASIC) {
+ sr = read_3393(hostdata,WD_SCSI_STATUS); /* clear interrupt */
+ hostdata->state = S_RUNNING_LEVEL2;
+ write_3393(hostdata,WD_COMMAND_PHASE, 0x50);
+ write_3393_cmd(hostdata,WD_CMD_SEL_ATN_XFER);
+ }
+ else {
+ hostdata->state = S_CONNECTED;
+ }
+ break;
+
+
+ case CSR_XFER_DONE|PHS_MESS_IN:
+ case CSR_UNEXP |PHS_MESS_IN:
+ case CSR_SRV_REQ |PHS_MESS_IN:
+DB(DB_INTR,printk("MSG_IN="))
+
+ msg = read_1_byte(hostdata);
+ sr = read_3393(hostdata,WD_SCSI_STATUS); /* clear interrupt */
+
+ hostdata->incoming_msg[hostdata->incoming_ptr] = msg;
+ if (hostdata->incoming_msg[0] == EXTENDED_MESSAGE)
+ msg = EXTENDED_MESSAGE;
+ else
+ hostdata->incoming_ptr = 0;
+
+ cmd->SCp.Message = msg;
+ switch (msg) {
+
+ case COMMAND_COMPLETE:
+DB(DB_INTR,printk("CCMP-%ld",cmd->pid))
+ write_3393_cmd(hostdata,WD_CMD_NEGATE_ACK);
+ hostdata->state = S_PRE_CMP_DISC;
+ break;
+
+ case SAVE_POINTERS:
+DB(DB_INTR,printk("SDP"))
+ write_3393_cmd(hostdata,WD_CMD_NEGATE_ACK);
+ hostdata->state = S_CONNECTED;
+ break;
+
+ case RESTORE_POINTERS:
+DB(DB_INTR,printk("RDP"))
+ if (hostdata->level2 >= L2_BASIC) {
+ write_3393(hostdata,WD_COMMAND_PHASE, 0x45);
+ write_3393_cmd(hostdata,WD_CMD_SEL_ATN_XFER);
+ hostdata->state = S_RUNNING_LEVEL2;
+ }
+ else {
+ write_3393_cmd(hostdata,WD_CMD_NEGATE_ACK);
+ hostdata->state = S_CONNECTED;
+ }
+ break;
+
+ case DISCONNECT:
+DB(DB_INTR,printk("DIS"))
+ cmd->device->disconnect = 1;
+ write_3393_cmd(hostdata,WD_CMD_NEGATE_ACK);
+ hostdata->state = S_PRE_TMP_DISC;
+ break;
+
+ case MESSAGE_REJECT:
+DB(DB_INTR,printk("REJ"))
+#ifdef SYNC_DEBUG
+printk("-REJ-");
+#endif
+ if (hostdata->sync_stat[cmd->target] == SS_WAITING)
+ hostdata->sync_stat[cmd->target] = SS_SET;
+ write_3393_cmd(hostdata,WD_CMD_NEGATE_ACK);
+ hostdata->state = S_CONNECTED;
+ break;
+
+ case EXTENDED_MESSAGE:
+DB(DB_INTR,printk("EXT"))
+
+ ucp = hostdata->incoming_msg;
+
+#ifdef SYNC_DEBUG
+printk("%02x",ucp[hostdata->incoming_ptr]);
+#endif
+ /* Is this the last byte of the extended message? */
+
+ if ((hostdata->incoming_ptr >= 2) &&
+ (hostdata->incoming_ptr == (ucp[1] + 1))) {
+
+ switch (ucp[2]) { /* what's the EXTENDED code? */
+ case EXTENDED_SDTR:
+ id = calc_sync_xfer(ucp[3],ucp[4]);
+ if (hostdata->sync_stat[cmd->target] != SS_WAITING) {
+
+/* A device has sent an unsolicited SDTR message; rather than go
+ * through the effort of decoding it and then figuring out what
+ * our reply should be, we're just gonna say that we have a
+ * synchronous fifo depth of 0. This will result in asynchronous
+ * transfers - not ideal but so much easier.
+ * Actually, this is OK because it assures us that if we don't
+ * specifically ask for sync transfers, we won't do any.
+ */
+
+ write_3393_cmd(hostdata,WD_CMD_ASSERT_ATN); /* want MESS_OUT */
+ hostdata->outgoing_msg[0] = EXTENDED_MESSAGE;
+ hostdata->outgoing_msg[1] = 3;
+ hostdata->outgoing_msg[2] = EXTENDED_SDTR;
+ hostdata->outgoing_msg[3] = hostdata->default_sx_per/4;
+ hostdata->outgoing_msg[4] = 0;
+ hostdata->outgoing_len = 5;
+ hostdata->sync_xfer[cmd->target] =
+ calc_sync_xfer(hostdata->default_sx_per/4,0);
+ }
+ else {
+ hostdata->sync_xfer[cmd->target] = id;
+ }
+#ifdef SYNC_DEBUG
+printk("sync_xfer=%02x",hostdata->sync_xfer[cmd->target]);
+#endif
+ hostdata->sync_stat[cmd->target] = SS_SET;
+ write_3393_cmd(hostdata,WD_CMD_NEGATE_ACK);
+ hostdata->state = S_CONNECTED;
+ break;
+ case EXTENDED_WDTR:
+ write_3393_cmd(hostdata,WD_CMD_ASSERT_ATN); /* want MESS_OUT */
+ printk("sending WDTR ");
+ hostdata->outgoing_msg[0] = EXTENDED_MESSAGE;
+ hostdata->outgoing_msg[1] = 2;
+ hostdata->outgoing_msg[2] = EXTENDED_WDTR;
+ hostdata->outgoing_msg[3] = 0; /* 8 bit transfer width */
+ hostdata->outgoing_len = 4;
+ write_3393_cmd(hostdata,WD_CMD_NEGATE_ACK);
+ hostdata->state = S_CONNECTED;
+ break;
+ default:
+ write_3393_cmd(hostdata,WD_CMD_ASSERT_ATN); /* want MESS_OUT */
+ printk("Rejecting Unknown Extended Message(%02x). ",ucp[2]);
+ hostdata->outgoing_msg[0] = MESSAGE_REJECT;
+ hostdata->outgoing_len = 1;
+ write_3393_cmd(hostdata,WD_CMD_NEGATE_ACK);
+ hostdata->state = S_CONNECTED;
+ break;
+ }
+ hostdata->incoming_ptr = 0;
+ }
+
+ /* We need to read more MESS_IN bytes for the extended message */
+
+ else {
+ hostdata->incoming_ptr++;
+ write_3393_cmd(hostdata,WD_CMD_NEGATE_ACK);
+ hostdata->state = S_CONNECTED;
+ }
+ break;
+
+ default:
+ printk("Rejecting Unknown Message(%02x) ",msg);
+ write_3393_cmd(hostdata,WD_CMD_ASSERT_ATN); /* want MESS_OUT */
+ hostdata->outgoing_msg[0] = MESSAGE_REJECT;
+ hostdata->outgoing_len = 1;
+ write_3393_cmd(hostdata,WD_CMD_NEGATE_ACK);
+ hostdata->state = S_CONNECTED;
+ }
+ break;
+
+
+/* Note: this interrupt will occur only after a LEVEL2 command */
+
+ case CSR_SEL_XFER_DONE:
+
+/* Make sure that reselection is enabled at this point - it may
+ * have been turned off for the command that just completed.
+ */
+
+ write_3393(hostdata,WD_SOURCE_ID, SRCID_ER);
+ if (phs == 0x60) {
+DB(DB_INTR,printk("SX-DONE-%ld",cmd->pid))
+ cmd->SCp.Message = COMMAND_COMPLETE;
+ lun = read_3393(hostdata,WD_TARGET_LUN);
+DB(DB_INTR,printk(":%d.%d",cmd->SCp.Status,lun))
+ hostdata->connected = NULL;
+ hostdata->busy[cmd->target] &= ~(1 << cmd->lun);
+ hostdata->state = S_UNCONNECTED;
+ if (cmd->SCp.Status == ILLEGAL_STATUS_BYTE)
+ cmd->SCp.Status = lun;
+ if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD)
+ cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
+ else
+ cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
+ cmd->scsi_done(cmd);
+
+/* We are no longer connected to a target - check to see if
+ * there are commands waiting to be executed.
+ */
+
+ in2000_execute(instance);
+ }
+ else {
+ printk("%02x:%02x:%02x-%ld: Unknown SEL_XFER_DONE phase!!---",asr,sr,phs,cmd->pid);
+ }
+ break;
+
+
+/* Note: this interrupt will occur only after a LEVEL2 command */
+
+ case CSR_SDP:
+DB(DB_INTR,printk("SDP"))
+ hostdata->state = S_RUNNING_LEVEL2;
+ write_3393(hostdata,WD_COMMAND_PHASE, 0x41);
+ write_3393_cmd(hostdata,WD_CMD_SEL_ATN_XFER);
+ break;
+
+
+ case CSR_XFER_DONE|PHS_MESS_OUT:
+ case CSR_UNEXP |PHS_MESS_OUT:
+ case CSR_SRV_REQ |PHS_MESS_OUT:
+DB(DB_INTR,printk("MSG_OUT="))
+
+/* To get here, we've probably requested MESSAGE_OUT and have
+ * already put the correct bytes in outgoing_msg[] and filled
+ * in outgoing_len. We simply send them out to the SCSI bus.
+ * Sometimes we get MESSAGE_OUT phase when we're not expecting
+ * it - like when our SDTR message is rejected by a target. Some
+ * targets send the REJECT before receiving all of the extended
+ * message, and then seem to go back to MESSAGE_OUT for a byte
+ * or two. Not sure why, or if I'm doing something wrong to
+ * cause this to happen. Regardless, it seems that sending
+ * NOP messages in these situations results in no harm and
+ * makes everyone happy.
+ */
+
+ if (hostdata->outgoing_len == 0) {
+ hostdata->outgoing_len = 1;
+ hostdata->outgoing_msg[0] = NOP;
+ }
+ transfer_pio(hostdata->outgoing_msg, hostdata->outgoing_len,
+ DATA_OUT_DIR, hostdata);
+DB(DB_INTR,printk("%02x",hostdata->outgoing_msg[0]))
+ hostdata->outgoing_len = 0;
+ hostdata->state = S_CONNECTED;
+ break;
+
+
+ case CSR_UNEXP_DISC:
+
+/* I think I've seen this after a request-sense that was in response
+ * to an error condition, but not sure. We certainly need to do
+ * something when we get this interrupt - the question is 'what?'.
+ * Let's think positively, and assume some command has finished
+ * in a legal manner (like a command that provokes a request-sense),
+ * so we treat it as a normal command-complete-disconnect.
+ */
+
+
+/* Make sure that reselection is enabled at this point - it may
+ * have been turned off for the command that just completed.
+ */
+
+ write_3393(hostdata,WD_SOURCE_ID, SRCID_ER);
+ if (cmd == NULL) {
+ printk(" - Already disconnected! ");
+ hostdata->state = S_UNCONNECTED;
+
+/* release the SMP spin_lock and restore irq state */
+ CLISPIN_UNLOCK(flags);
+ return;
+ }
+DB(DB_INTR,printk("UNEXP_DISC-%ld",cmd->pid))
+ hostdata->connected = NULL;
+ hostdata->busy[cmd->target] &= ~(1 << cmd->lun);
+ hostdata->state = S_UNCONNECTED;
+ if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD)
+ cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
+ else
+ cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
+ cmd->scsi_done(cmd);
+
+/* We are no longer connected to a target - check to see if
+ * there are commands waiting to be executed.
+ */
+
+ in2000_execute(instance);
+ break;
+
+
+ case CSR_DISC:
+
+/* Make sure that reselection is enabled at this point - it may
+ * have been turned off for the command that just completed.
+ */
+
+ write_3393(hostdata,WD_SOURCE_ID, SRCID_ER);
+DB(DB_INTR,printk("DISC-%ld",cmd->pid))
+ if (cmd == NULL) {
+ printk(" - Already disconnected! ");
+ hostdata->state = S_UNCONNECTED;
+ }
+ switch (hostdata->state) {
+ case S_PRE_CMP_DISC:
+ hostdata->connected = NULL;
+ hostdata->busy[cmd->target] &= ~(1 << cmd->lun);
+ hostdata->state = S_UNCONNECTED;
+DB(DB_INTR,printk(":%d",cmd->SCp.Status))
+ if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD)
+ cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
+ else
+ cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
+ cmd->scsi_done(cmd);
+ break;
+ case S_PRE_TMP_DISC:
+ case S_RUNNING_LEVEL2:
+ cmd->host_scribble = (uchar *)hostdata->disconnected_Q;
+ hostdata->disconnected_Q = cmd;
+ hostdata->connected = NULL;
+ hostdata->state = S_UNCONNECTED;
+
+#ifdef PROC_STATISTICS
+ hostdata->disc_done_cnt[cmd->target]++;
+#endif
+
+ break;
+ default:
+ printk("*** Unexpected DISCONNECT interrupt! ***");
+ hostdata->state = S_UNCONNECTED;
+ }
+
+/* We are no longer connected to a target - check to see if
+ * there are commands waiting to be executed.
+ */
+
+ in2000_execute(instance);
+ break;
+
+
+ case CSR_RESEL_AM:
+DB(DB_INTR,printk("RESEL"))
+
+ /* First we have to make sure this reselection didn't */
+ /* happen during Arbitration/Selection of some other device. */
+ /* If yes, put losing command back on top of input_Q. */
+
+ if (hostdata->level2 <= L2_NONE) {
+
+ if (hostdata->selecting) {
+ cmd = (Scsi_Cmnd *)hostdata->selecting;
+ hostdata->selecting = NULL;
+ hostdata->busy[cmd->target] &= ~(1 << cmd->lun);
+ cmd->host_scribble = (uchar *)hostdata->input_Q;
+ hostdata->input_Q = cmd;
+ }
+ }
+
+ else {
+
+ if (cmd) {
+ if (phs == 0x00) {
+ hostdata->busy[cmd->target] &= ~(1 << cmd->lun);
+ cmd->host_scribble = (uchar *)hostdata->input_Q;
+ hostdata->input_Q = cmd;
+ }
+ else {
+ printk("---%02x:%02x:%02x-TROUBLE: Intrusive ReSelect!---",asr,sr,phs);
+ while (1)
+ printk("\r");
+ }
+ }
+
+ }
+
+ /* OK - find out which device reselected us. */
+
+ id = read_3393(hostdata,WD_SOURCE_ID);
+ id &= SRCID_MASK;
+
+ /* and extract the lun from the ID message. (Note that we don't
+ * bother to check for a valid message here - I guess this is
+ * not the right way to go, but....)
+ */
+
+ lun = read_3393(hostdata,WD_DATA);
+ if (hostdata->level2 < L2_RESELECT)
+ write_3393_cmd(hostdata,WD_CMD_NEGATE_ACK);
+ lun &= 7;
+
+ /* Now we look for the command that's reconnecting. */
+
+ cmd = (Scsi_Cmnd *)hostdata->disconnected_Q;
+ patch = NULL;
+ while (cmd) {
+ if (id == cmd->target && lun == cmd->lun)
+ break;
+ patch = cmd;
+ cmd = (Scsi_Cmnd *)cmd->host_scribble;
+ }
+
+ /* Hmm. Couldn't find a valid command.... What to do? */
+
+ if (!cmd) {
+ printk("---TROUBLE: target %d.%d not in disconnect queue---",id,lun);
+ break;
+ }
+
+ /* Ok, found the command - now start it up again. */
+
+ if (patch)
+ patch->host_scribble = cmd->host_scribble;
+ else
+ hostdata->disconnected_Q = (Scsi_Cmnd *)cmd->host_scribble;
+ hostdata->connected = cmd;
+
+ /* We don't need to worry about 'initialize_SCp()' or 'hostdata->busy[]'
+ * because these things are preserved over a disconnect.
+ * But we DO need to fix the DPD bit so it's correct for this command.
+ */
+
+ if (is_dir_out(cmd))
+ write_3393(hostdata,WD_DESTINATION_ID,cmd->target);
+ else
+ write_3393(hostdata,WD_DESTINATION_ID,cmd->target | DSTID_DPD);
+ if (hostdata->level2 >= L2_RESELECT) {
+ write_3393_count(hostdata,0); /* we want a DATA_PHASE interrupt */
+ write_3393(hostdata,WD_COMMAND_PHASE, 0x45);
+ write_3393_cmd(hostdata,WD_CMD_SEL_ATN_XFER);
+ hostdata->state = S_RUNNING_LEVEL2;
+ }
+ else
+ hostdata->state = S_CONNECTED;
+
+DB(DB_INTR,printk("-%ld",cmd->pid))
+ break;
+
+ default:
+ printk("--UNKNOWN INTERRUPT:%02x:%02x:%02x--",asr,sr,phs);
+ }
+
+ write1_io(0, IO_LED_OFF);
+
+DB(DB_INTR,printk("} "))
+
+/* release the SMP spin_lock and restore irq state */
+ CLISPIN_UNLOCK(flags);
+
+}
+
+
+
+#define RESET_CARD 0
+#define RESET_CARD_AND_BUS 1
+#define B_FLAG 0x80
+
+static int reset_hardware(struct Scsi_Host *instance, int type)
+{
+struct IN2000_hostdata *hostdata;
+int qt,x;
+unsigned long flags;
+
+ hostdata = (struct IN2000_hostdata *)instance->hostdata;
+
+ write1_io(0, IO_LED_ON);
+ if (type == RESET_CARD_AND_BUS) {
+ write1_io(0,IO_CARD_RESET);
+ x = read1_io(IO_HARDWARE);
+ }
+ x = read_3393(hostdata,WD_SCSI_STATUS); /* clear any WD intrpt */
+ write_3393(hostdata,WD_OWN_ID, instance->this_id |
+ OWNID_EAF | OWNID_RAF | OWNID_FS_8);
+ write_3393(hostdata,WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
+ write_3393(hostdata,WD_SYNCHRONOUS_TRANSFER,
+ calc_sync_xfer(hostdata->default_sx_per/4,DEFAULT_SX_OFF));
+ save_flags(flags);
+ cli();
+ write1_io(0,IO_FIFO_WRITE); /* clear fifo counter */
+ write1_io(0,IO_FIFO_READ); /* start fifo out in read mode */
+ write_3393(hostdata,WD_COMMAND, WD_CMD_RESET);
+ while (!(READ_AUX_STAT() & ASR_INT))
+ ; /* wait for RESET to complete */
+
+ x = read_3393(hostdata,WD_SCSI_STATUS); /* clear interrupt */
+ restore_flags(flags);
+ write_3393(hostdata,WD_QUEUE_TAG,0xa5); /* any random number */
+ qt = read_3393(hostdata,WD_QUEUE_TAG);
+ if (qt == 0xa5) {
+ x |= B_FLAG;
+ write_3393(hostdata,WD_QUEUE_TAG,0);
+ }
+ write_3393(hostdata,WD_TIMEOUT_PERIOD, TIMEOUT_PERIOD_VALUE);
+ write_3393(hostdata,WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
+ write1_io(0, IO_LED_OFF);
+ return x;
+}
+
+
+
+int in2000_reset(Scsi_Cmnd *cmd, unsigned int reset_flags)
+{
+unsigned long flags;
+struct Scsi_Host *instance;
+struct IN2000_hostdata *hostdata;
+int x;
+
+ instance = cmd->host;
+ hostdata = (struct IN2000_hostdata *)instance->hostdata;
+
+ printk("scsi%d: Reset. ", instance->host_no);
+ save_flags(flags);
+ cli();
+
+ /* do scsi-reset here */
+
+ reset_hardware(instance, RESET_CARD_AND_BUS);
+ for (x = 0; x < 8; x++) {
+ hostdata->busy[x] = 0;
+ hostdata->sync_xfer[x] = calc_sync_xfer(DEFAULT_SX_PER/4,DEFAULT_SX_OFF);
+ hostdata->sync_stat[x] = SS_UNSET; /* using default sync values */
+ }
+ hostdata->input_Q = NULL;
+ hostdata->selecting = NULL;
+ hostdata->connected = NULL;
+ hostdata->disconnected_Q = NULL;
+ hostdata->state = S_UNCONNECTED;
+ hostdata->fifo = FI_FIFO_UNUSED;
+ hostdata->incoming_ptr = 0;
+ hostdata->outgoing_len = 0;
+
+ cmd->result = DID_RESET << 16;
+ restore_flags(flags);
+ return 0;
+}
+
+
+
+int in2000_abort (Scsi_Cmnd *cmd)
+{
+struct Scsi_Host *instance;
+struct IN2000_hostdata *hostdata;
+Scsi_Cmnd *tmp, *prev;
+unsigned long flags;
+uchar sr, asr;
+unsigned long timeout;
+
+ save_flags (flags);
+ cli();
+
+ instance = cmd->host;
+ hostdata = (struct IN2000_hostdata *)instance->hostdata;
+
+ printk ("scsi%d: Abort-", instance->host_no);
+ printk("(asr=%02x,count=%ld,resid=%d,buf_resid=%d,have_data=%d,FC=%02x)- ",
+ READ_AUX_STAT(),read_3393_count(hostdata),cmd->SCp.this_residual,cmd->SCp.buffers_residual,
+ cmd->SCp.have_data_in,read1_io(IO_FIFO_COUNT));
+
+/*
+ * Case 1 : If the command hasn't been issued yet, we simply remove it
+ * from the inout_Q.
+ */
+
+ tmp = (Scsi_Cmnd *)hostdata->input_Q;
+ prev = 0;
+ while (tmp) {
+ if (tmp == cmd) {
+ if (prev)
+ prev->host_scribble = cmd->host_scribble;
+ cmd->host_scribble = NULL;
+ cmd->result = DID_ABORT << 16;
+ printk("scsi%d: Abort - removing command %ld from input_Q. ",
+ instance->host_no, cmd->pid);
+ cmd->scsi_done(cmd);
+ restore_flags(flags);
+ return SCSI_ABORT_SUCCESS;
+ }
+ prev = tmp;
+ tmp = (Scsi_Cmnd *)tmp->host_scribble;
+ }
+
+/*
+ * Case 2 : If the command is connected, we're going to fail the abort
+ * and let the high level SCSI driver retry at a later time or
+ * issue a reset.
+ *
+ * Timeouts, and therefore aborted commands, will be highly unlikely
+ * and handling them cleanly in this situation would make the common
+ * case of noresets less efficient, and would pollute our code. So,
+ * we fail.
+ */
+
+ if (hostdata->connected == cmd) {
+
+ printk("scsi%d: Aborting connected command %ld - ",
+ instance->host_no, cmd->pid);
+
+ printk("sending wd33c93 ABORT command - ");
+ write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
+ write_3393_cmd(hostdata, WD_CMD_ABORT);
+
+/* Now we have to attempt to flush out the FIFO... */
+
+ printk("flushing fifo - ");
+ timeout = 1000000;
+ do {
+ asr = READ_AUX_STAT();
+ if (asr & ASR_DBR)
+ read_3393(hostdata, WD_DATA);
+ } while (!(asr & ASR_INT) && timeout-- > 0);
+ sr = read_3393(hostdata, WD_SCSI_STATUS);
+ printk("asr=%02x, sr=%02x, %ld bytes un-transferred (timeout=%ld) - ",
+ asr, sr, read_3393_count(hostdata), timeout);
+
+ /*
+ * Abort command processed.
+ * Still connected.
+ * We must disconnect.
+ */
+
+ printk("sending wd33c93 DISCONNECT command - ");
+ write_3393_cmd(hostdata, WD_CMD_DISCONNECT);
+
+ timeout = 1000000;
+ asr = READ_AUX_STAT();
+ while ((asr & ASR_CIP) && timeout-- > 0)
+ asr = READ_AUX_STAT();
+ sr = read_3393(hostdata, WD_SCSI_STATUS);
+ printk("asr=%02x, sr=%02x.",asr,sr);
+
+ hostdata->busy[cmd->target] &= ~(1 << cmd->lun);
+ hostdata->connected = NULL;
+ hostdata->state = S_UNCONNECTED;
+ cmd->result = DID_ABORT << 16;
+ cmd->scsi_done(cmd);
+
+ in2000_execute (instance);
+
+ restore_flags(flags);
+ return SCSI_ABORT_SUCCESS;
+ }
+
+/*
+ * Case 3: If the command is currently disconnected from the bus,
+ * we're not going to expend much effort here: Let's just return
+ * an ABORT_SNOOZE and hope for the best...
+ */
+
+ for (tmp=(Scsi_Cmnd *)hostdata->disconnected_Q; tmp;
+ tmp=(Scsi_Cmnd *)tmp->host_scribble)
+ if (cmd == tmp) {
+ restore_flags(flags);
+ printk("Sending ABORT_SNOOZE. ");
+ return SCSI_ABORT_SNOOZE;
+ }
+
+/*
+ * Case 4 : If we reached this point, the command was not found in any of
+ * the queues.
+ *
+ * We probably reached this point because of an unlikely race condition
+ * between the command completing successfully and the abortion code,
+ * so we won't panic, but we will notify the user in case something really
+ * broke.
+ */
+
+ in2000_execute (instance);
+
+ restore_flags(flags);
+ printk("scsi%d: warning : SCSI command probably completed successfully"
+ " before abortion. ", instance->host_no);
+ return SCSI_ABORT_NOT_RUNNING;
+}
+
+
+
+#define MAX_IN2000_HOSTS 3
+#define MAX_SETUP_ARGS (sizeof(setup_args) / sizeof(char *))
+#define SETUP_BUFFER_SIZE 200
+static char setup_buffer[SETUP_BUFFER_SIZE];
+static char setup_used[MAX_SETUP_ARGS];
+static int done_setup = 0;
+
+in2000__INITFUNC( void in2000_setup (char *str, int *ints) )
+{
+int i;
+char *p1,*p2;
+
+ strncpy(setup_buffer,str,SETUP_BUFFER_SIZE);
+ setup_buffer[SETUP_BUFFER_SIZE - 1] = '\0';
+ p1 = setup_buffer;
+ i = 0;
+ while (*p1 && (i < MAX_SETUP_ARGS)) {
+ p2 = strchr(p1, ',');
+ if (p2) {
+ *p2 = '\0';
+ if (p1 != p2)
+ setup_args[i] = p1;
+ p1 = p2 + 1;
+ i++;
+ }
+ else {
+ setup_args[i] = p1;
+ break;
+ }
+ }
+ for (i=0; i<MAX_SETUP_ARGS; i++)
+ setup_used[i] = 0;
+ done_setup = 1;
+}
+
+
+/* check_setup_args() returns index if key found, 0 if not
+ */
+
+in2000__INITFUNC( static int check_setup_args(char *key, int *flags, int *val, char *buf) )
+{
+int x;
+char *cp;
+
+ for (x=0; x<MAX_SETUP_ARGS; x++) {
+ if (setup_used[x])
+ continue;
+ if (!strncmp(setup_args[x], key, strlen(key)))
+ break;
+ }
+ if (x == MAX_SETUP_ARGS)
+ return 0;
+ setup_used[x] = 1;
+ cp = setup_args[x] + strlen(key);
+ *val = -1;
+ if (*cp != ':')
+ return ++x;
+ cp++;
+ if ((*cp >= '0') && (*cp <= '9')) {
+ *val = simple_strtoul(cp,NULL,0);
+ }
+ return ++x;
+}
+
+
+
+/* The "correct" (ie portable) way to access memory-mapped hardware
+ * such as the IN2000 EPROM and dip switch is through the use of
+ * special macros declared in 'asm/io.h'. We use readb() and readl()
+ * when reading from the card's BIOS area in in2000_detect().
+ */
+static const unsigned int *bios_tab[] in2000__INITDATA = {
+ (unsigned int *)0xc8000,
+ (unsigned int *)0xd0000,
+ (unsigned int *)0xd8000,
+ 0
+ };
+
+static const unsigned short base_tab[] in2000__INITDATA = {
+ 0x220,
+ 0x200,
+ 0x110,
+ 0x100,
+ };
+
+static const int int_tab[] in2000__INITDATA = {
+ 15,
+ 14,
+ 11,
+ 10
+ };
+
+
+in2000__INITFUNC( int in2000_detect(Scsi_Host_Template * tpnt) )
+{
+struct Scsi_Host *instance;
+struct IN2000_hostdata *hostdata;
+int detect_count;
+int bios;
+int x;
+unsigned short base;
+uchar switches;
+uchar hrev;
+int flags;
+int val;
+char buf[32];
+
+/* Thanks to help from Bill Earnest, probing for IN2000 cards is a
+ * pretty straightforward and fool-proof operation. There are 3
+ * possible locations for the IN2000 EPROM in memory space - if we
+ * find a BIOS signature, we can read the dip switch settings from
+ * the byte at BIOS+32 (shadowed in by logic on the card). From 2
+ * of the switch bits we get the card's address in IO space. There's
+ * an image of the dip switch there, also, so we have a way to back-
+ * check that this really is an IN2000 card. Very nifty. Use the
+ * 'ioport:xx' command-line parameter if your BIOS EPROM is absent
+ * or disabled.
+ */
+
+ if (!done_setup && setup_strings)
+ in2000_setup(setup_strings,0);
+
+ detect_count = 0;
+ for (bios = 0; bios_tab[bios]; bios++) {
+ if (check_setup_args("ioport",&flags,&val,buf)) {
+ base = val;
+ switches = ~inb(base + IO_SWITCHES) & 0xff;
+ printk("Forcing IN2000 detection at IOport 0x%x ",base);
+ bios = 2;
+ }
+/*
+ * There have been a couple of BIOS versions with different layouts
+ * for the obvious ID strings. We look for the 2 most common ones and
+ * hope that they cover all the cases...
+ */
+ else if (readl(bios_tab[bios]+0x04) == 0x41564f4e ||
+ readl(bios_tab[bios]+0x0c) == 0x61776c41) {
+ printk("Found IN2000 BIOS at 0x%x ",(unsigned int)bios_tab[bios]);
+
+/* Read the switch image that's mapped into EPROM space */
+
+ switches = ~((readb(bios_tab[bios]+0x08) & 0xff));
+
+/* Find out where the IO space is */
+
+ x = switches & (SW_ADDR0 | SW_ADDR1);
+ base = base_tab[x];
+
+/* Check for the IN2000 signature in IO space. */
+
+ x = ~inb(base + IO_SWITCHES) & 0xff;
+ if (x != switches) {
+ printk("Bad IO signature: %02x vs %02x.\n",x,switches);
+ continue;
+ }
+ }
+ else
+ continue;
+
+/* OK. We have a base address for the IO ports - run a few safety checks */
+
+ if (!(switches & SW_BIT7)) { /* I _think_ all cards do this */
+ printk("There is no IN-2000 SCSI card at IOport 0x%03x!\n",base);
+ continue;
+ }
+
+/* Let's assume any hardware version will work, although the driver
+ * has only been tested on 0x21, 0x22, 0x25, 0x26, and 0x27. We'll
+ * print out the rev number for reference later, but accept them all.
+ */
+
+ hrev = inb(base + IO_HARDWARE);
+
+ /* Bit 2 tells us if interrupts are disabled */
+ if (switches & SW_DISINT) {
+ printk("The IN-2000 SCSI card at IOport 0x%03x ",base);
+ printk("is not configured for interrupt operation!\n");
+ printk("This driver requires an interrupt: cancelling detection.\n");
+ continue;
+ }
+
+/* Ok. We accept that there's an IN2000 at ioaddr 'base'. Now
+ * initialize it.
+ */
+
+ tpnt->proc_dir = &proc_scsi_in2000; /* done more than once? harmless. */
+ detect_count++;
+ instance = scsi_register(tpnt, sizeof(struct IN2000_hostdata));
+ if (!instance_list)
+ instance_list = instance;
+ hostdata = (struct IN2000_hostdata *)instance->hostdata;
+ instance->io_port = hostdata->io_base = base;
+ hostdata->dip_switch = switches;
+ hostdata->hrev = hrev;
+
+ write1_io(0,IO_FIFO_WRITE); /* clear fifo counter */
+ write1_io(0,IO_FIFO_READ); /* start fifo out in read mode */
+ write1_io(0,IO_INTR_MASK); /* allow all ints */
+ x = int_tab[(switches & (SW_INT0 | SW_INT1)) >> SW_INT_SHIFT];
+ if (request_irq(x, in2000_intr, SA_INTERRUPT, "in2000", NULL)) {
+ printk("in2000_detect: Unable to allocate IRQ.\n");
+ detect_count--;
+ continue;
+ }
+ instance->irq = x;
+ instance->n_io_port = 13;
+ request_region(base, 13, "in2000"); /* lock in this IO space for our use */
+
+ for (x = 0; x < 8; x++) {
+ hostdata->busy[x] = 0;
+ hostdata->sync_xfer[x] = calc_sync_xfer(DEFAULT_SX_PER/4,DEFAULT_SX_OFF);
+ hostdata->sync_stat[x] = SS_UNSET; /* using default sync values */
+#ifdef PROC_STATISTICS
+ hostdata->cmd_cnt[x] = 0;
+ hostdata->disc_allowed_cnt[x] = 0;
+ hostdata->disc_done_cnt[x] = 0;
+#endif
+ }
+ hostdata->input_Q = NULL;
+ hostdata->selecting = NULL;
+ hostdata->connected = NULL;
+ hostdata->disconnected_Q = NULL;
+ hostdata->state = S_UNCONNECTED;
+ hostdata->fifo = FI_FIFO_UNUSED;
+ hostdata->level2 = L2_BASIC;
+ hostdata->disconnect = DIS_ADAPTIVE;
+ hostdata->args = DEBUG_DEFAULTS;
+ hostdata->incoming_ptr = 0;
+ hostdata->outgoing_len = 0;
+ hostdata->default_sx_per = DEFAULT_SX_PER;
+
+/* Older BIOS's had a 'sync on/off' switch - use its setting */
+
+ if (readl(bios_tab[bios]+0x04) == 0x41564f4e && (switches & SW_SYNC_DOS5))
+ hostdata->sync_off = 0x00; /* sync defaults to on */
+ else
+ hostdata->sync_off = 0xff; /* sync defaults to off */
+
+#ifdef PROC_INTERFACE
+ hostdata->proc = PR_VERSION|PR_INFO|PR_STATISTICS|
+ PR_CONNECTED|PR_INPUTQ|PR_DISCQ|
+ PR_STOP;
+#ifdef PROC_STATISTICS
+ hostdata->int_cnt = 0;
+#endif
+#endif
+
+ if (check_setup_args("nosync",&flags,&val,buf))
+ hostdata->sync_off = val;
+
+ if (check_setup_args("period",&flags,&val,buf))
+ hostdata->default_sx_per = sx_table[round_period((unsigned int)val)].period_ns;
+
+ if (check_setup_args("disconnect",&flags,&val,buf)) {
+ if ((val >= DIS_NEVER) && (val <= DIS_ALWAYS))
+ hostdata->disconnect = val;
+ else
+ hostdata->disconnect = DIS_ADAPTIVE;
+ }
+
+ if (check_setup_args("noreset",&flags,&val,buf))
+ hostdata->args ^= A_NO_SCSI_RESET;
+
+ if (check_setup_args("level2",&flags,&val,buf))
+ hostdata->level2 = val;
+
+ if (check_setup_args("debug",&flags,&val,buf))
+ hostdata->args = (val & DB_MASK);
+
+#ifdef PROC_INTERFACE
+ if (check_setup_args("proc",&flags,&val,buf))
+ hostdata->proc = val;
+#endif
+
+
+ x = reset_hardware(instance,(hostdata->args & A_NO_SCSI_RESET)?RESET_CARD:RESET_CARD_AND_BUS);
+
+ hostdata->microcode = read_3393(hostdata,WD_CDB_1);
+ if (x & 0x01) {
+ if (x & B_FLAG)
+ hostdata->chip = C_WD33C93B;
+ else
+ hostdata->chip = C_WD33C93A;
+ }
+ else
+ hostdata->chip = C_WD33C93;
+
+ printk("dip_switch=%02x irq=%d ioport=%02x floppy=%s sync/DOS5=%s ",
+ (switches & 0x7f),
+ instance->irq,hostdata->io_base,
+ (switches & SW_FLOPPY)?"Yes":"No",
+ (switches & SW_SYNC_DOS5)?"Yes":"No");
+ printk("hardware_ver=%02x chip=%s microcode=%02x\n",
+ hrev,
+ (hostdata->chip==C_WD33C93)?"WD33c93":
+ (hostdata->chip==C_WD33C93A)?"WD33c93A":
+ (hostdata->chip==C_WD33C93B)?"WD33c93B":"unknown",
+ hostdata->microcode);
+#ifdef DEBUGGING_ON
+ printk("setup_args = ");
+ for (x=0; x<MAX_SETUP_ARGS; x++)
+ printk("%s,",setup_args[x]);
+ printk("\n");
+#endif
+ if (hostdata->sync_off == 0xff)
+ printk("Sync-transfer DISABLED on all devices: ENABLE from command-line\n");
+ printk("IN2000 driver version %s - %s\n",IN2000_VERSION,IN2000_DATE);
+ }
+
+ return detect_count;
+}
+
+
+/* NOTE: I lifted this function straight out of the old driver,
+ * and have not tested it. Presumably it does what it's
+ * supposed to do...
+ */
+
+int in2000_biosparam(Disk *disk, kdev_t dev, int *iinfo)
+{
+int size;
+
+ size = disk->capacity;
+ iinfo[0] = 64;
+ iinfo[1] = 32;
+ iinfo[2] = size >> 11;
+
+/* This should approximate the large drive handling that the DOS ASPI manager
+ uses. Drives very near the boundaries may not be handled correctly (i.e.
+ near 2.0 Gb and 4.0 Gb) */
+
+ if (iinfo[2] > 1024) {
+ iinfo[0] = 64;
+ iinfo[1] = 63;
+ iinfo[2] = disk->capacity / (iinfo[0] * iinfo[1]);
+ }
+ if (iinfo[2] > 1024) {
+ iinfo[0] = 128;
+ iinfo[1] = 63;
+ iinfo[2] = disk->capacity / (iinfo[0] * iinfo[1]);
+ }
+ if (iinfo[2] > 1024) {
+ iinfo[0] = 255;
+ iinfo[1] = 63;
+ iinfo[2] = disk->capacity / (iinfo[0] * iinfo[1]);
+ }
+ return 0;
+}
+
+
+
+struct proc_dir_entry proc_scsi_in2000 = {
+ PROC_SCSI_IN2000, 6, "in2000",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+ };
+
+
+int in2000_proc_info(char *buf, char **start, off_t off, int len, int hn, int in)
+{
+
+#ifdef PROC_INTERFACE
+
+char *bp;
+char tbuf[128];
+unsigned long flags;
+struct Scsi_Host *instance;
+struct IN2000_hostdata *hd;
+Scsi_Cmnd *cmd;
+int x,i;
+static int stop = 0;
+
+ for (instance=instance_list; instance; instance=instance->next) {
+ if (instance->host_no == hn)
+ break;
+ }
+ if (!instance) {
+ printk("*** Hmm... Can't find host #%d!\n",hn);
+ return (-ESRCH);
+ }
+ hd = (struct IN2000_hostdata *)instance->hostdata;
+
+/* If 'in' is TRUE we need to _read_ the proc file. We accept the following
+ * keywords (same format as command-line, but only ONE per read):
+ * debug
+ * disconnect
+ * period
+ * resync
+ * proc
+ */
+
+ if (in) {
+ buf[len] = '\0';
+ bp = buf;
+ if (!strncmp(bp,"debug:",6)) {
+ bp += 6;
+ hd->args = simple_strtoul(bp,NULL,0) & DB_MASK;
+ }
+ else if (!strncmp(bp,"disconnect:",11)) {
+ bp += 11;
+ x = simple_strtoul(bp,NULL,0);
+ if (x < DIS_NEVER || x > DIS_ALWAYS)
+ x = DIS_ADAPTIVE;
+ hd->disconnect = x;
+ }
+ else if (!strncmp(bp,"period:",7)) {
+ bp += 7;
+ x = simple_strtoul(bp,NULL,0);
+ hd->default_sx_per = sx_table[round_period((unsigned int)x)].period_ns;
+ }
+ else if (!strncmp(bp,"resync:",7)) {
+ bp += 7;
+ x = simple_strtoul(bp,NULL,0);
+ for (i=0; i<7; i++)
+ if (x & (1<<i))
+ hd->sync_stat[i] = SS_UNSET;
+ }
+ else if (!strncmp(bp,"proc:",5)) {
+ bp += 5;
+ hd->proc = simple_strtoul(bp,NULL,0);
+ }
+ else if (!strncmp(bp,"level2:",7)) {
+ bp += 7;
+ hd->level2 = simple_strtoul(bp,NULL,0);
+ }
+ return len;
+ }
+
+ save_flags(flags);
+ cli();
+ bp = buf;
+ *bp = '\0';
+ if (hd->proc & PR_VERSION) {
+ /* Don't create varied object files each time this file is compiled. */
+ /* sprintf(tbuf,"\nVersion %s - %s. Compiled %s %s",
+ IN2000_VERSION,IN2000_DATE,__DATE__,__TIME__); */
+ sprintf(tbuf,"\nVersion %s - %s.",
+ IN2000_VERSION,IN2000_DATE);
+ strcat(bp,tbuf);
+ }
+ if (hd->proc & PR_INFO) {
+ sprintf(tbuf,"\ndip_switch=%02x: irq=%d io=%02x floppy=%s sync/DOS5=%s",
+ (hd->dip_switch & 0x7f), instance->irq, hd->io_base,
+ (hd->dip_switch & 0x40)?"Yes":"No",
+ (hd->dip_switch & 0x20)?"Yes":"No");
+ strcat(bp,tbuf);
+ strcat(bp,"\nsync_xfer[] = ");
+ for (x=0; x<7; x++) {
+ sprintf(tbuf,"\t%02x",hd->sync_xfer[x]);
+ strcat(bp,tbuf);
+ }
+ strcat(bp,"\nsync_stat[] = ");
+ for (x=0; x<7; x++) {
+ sprintf(tbuf,"\t%02x",hd->sync_stat[x]);
+ strcat(bp,tbuf);
+ }
+ }
+#ifdef PROC_STATISTICS
+ if (hd->proc & PR_STATISTICS) {
+ strcat(bp,"\ncommands issued: ");
+ for (x=0; x<7; x++) {
+ sprintf(tbuf,"\t%ld",hd->cmd_cnt[x]);
+ strcat(bp,tbuf);
+ }
+ strcat(bp,"\ndisconnects allowed:");
+ for (x=0; x<7; x++) {
+ sprintf(tbuf,"\t%ld",hd->disc_allowed_cnt[x]);
+ strcat(bp,tbuf);
+ }
+ strcat(bp,"\ndisconnects done: ");
+ for (x=0; x<7; x++) {
+ sprintf(tbuf,"\t%ld",hd->disc_done_cnt[x]);
+ strcat(bp,tbuf);
+ }
+ sprintf(tbuf,"\ninterrupts: \t%ld",hd->int_cnt);
+ strcat(bp,tbuf);
+ }
+#endif
+ if (hd->proc & PR_CONNECTED) {
+ strcat(bp,"\nconnected: ");
+ if (hd->connected) {
+ cmd = (Scsi_Cmnd *)hd->connected;
+ sprintf(tbuf," %ld-%d:%d(%02x)",
+ cmd->pid, cmd->target, cmd->lun, cmd->cmnd[0]);
+ strcat(bp,tbuf);
+ }
+ }
+ if (hd->proc & PR_INPUTQ) {
+ strcat(bp,"\ninput_Q: ");
+ cmd = (Scsi_Cmnd *)hd->input_Q;
+ while (cmd) {
+ sprintf(tbuf," %ld-%d:%d(%02x)",
+ cmd->pid, cmd->target, cmd->lun, cmd->cmnd[0]);
+ strcat(bp,tbuf);
+ cmd = (Scsi_Cmnd *)cmd->host_scribble;
+ }
+ }
+ if (hd->proc & PR_DISCQ) {
+ strcat(bp,"\ndisconnected_Q:");
+ cmd = (Scsi_Cmnd *)hd->disconnected_Q;
+ while (cmd) {
+ sprintf(tbuf," %ld-%d:%d(%02x)",
+ cmd->pid, cmd->target, cmd->lun, cmd->cmnd[0]);
+ strcat(bp,tbuf);
+ cmd = (Scsi_Cmnd *)cmd->host_scribble;
+ }
+ }
+ if (hd->proc & PR_TEST) {
+ ; /* insert your own custom function here */
+ }
+ strcat(bp,"\n");
+ restore_flags(flags);
+ *start = buf;
+ if (stop) {
+ stop = 0;
+ return 0; /* return 0 to signal end-of-file */
+ }
+ if (off > 0x40000) /* ALWAYS stop after 256k bytes have been read */
+ stop = 1;;
+ if (hd->proc & PR_STOP) /* stop every other time */
+ stop = 1;
+ return strlen(bp);
+
+#else /* PROC_INTERFACE */
+
+ return 0;
+
+#endif /* PROC_INTERFACE */
+
+}
+
+
+#ifdef MODULE
+
+Scsi_Host_Template driver_template = IN2000;
+
+#include "scsi_module.c"
+
+#endif
+
diff --git a/linux/src/drivers/scsi/in2000.h b/linux/src/drivers/scsi/in2000.h
new file mode 100644
index 0000000..732bab8
--- /dev/null
+++ b/linux/src/drivers/scsi/in2000.h
@@ -0,0 +1,465 @@
+/*
+ * in2000.h - Linux device driver definitions for the
+ * Always IN2000 ISA SCSI card.
+ *
+ * IMPORTANT: This file is for version 1.33 - 26/Aug/1998
+ *
+ * Copyright (c) 1996 John Shifflett, GeoLog Consulting
+ * john@geolog.com
+ * jshiffle@netcom.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef IN2000_H
+#define IN2000_H
+
+#include <asm/io.h>
+
+#define PROC_INTERFACE /* add code for /proc/scsi/in2000/xxx interface */
+#ifdef PROC_INTERFACE
+#define PROC_STATISTICS /* add code for keeping various real time stats */
+#endif
+
+#define SYNC_DEBUG /* extra info on sync negotiation printed */
+#define DEBUGGING_ON /* enable command-line debugging bitmask */
+#define DEBUG_DEFAULTS 0 /* default bitmask - change from command-line */
+
+#define FAST_READ_IO /* No problems with these on my machine */
+#define FAST_WRITE_IO
+
+#ifdef DEBUGGING_ON
+#define DB(f,a) if (hostdata->args & (f)) a;
+#define CHECK_NULL(p,s) /* if (!(p)) {printk("\n"); while (1) printk("NP:%s\r",(s));} */
+#else
+#define DB(f,a)
+#define CHECK_NULL(p,s)
+#endif
+
+#define uchar unsigned char
+
+#define read1_io(a) (inb(hostdata->io_base+(a)))
+#define read2_io(a) (inw(hostdata->io_base+(a)))
+#define write1_io(b,a) (outb((b),hostdata->io_base+(a)))
+#define write2_io(w,a) (outw((w),hostdata->io_base+(a)))
+
+/* These inline assembly defines are derived from a patch
+ * sent to me by Bill Earnest. He's done a lot of very
+ * valuable thinking, testing, and coding during his effort
+ * to squeeze more speed out of this driver. I really think
+ * that we are doing IO at close to the maximum now with
+ * the fifo. (And yes, insw uses 'edi' while outsw uses
+ * 'esi'. Thanks Bill!)
+ */
+
+#define FAST_READ2_IO() \
+({ \
+int __dummy_1,__dummy_2; \
+ __asm__ __volatile__ ("\n \
+ cld \n \
+ orl %%ecx, %%ecx \n \
+ jz 1f \n \
+ rep \n \
+ insw (%%dx),%%es:(%%edi) \n \
+1: " \
+ : "=D" (sp) ,"=c" (__dummy_1) ,"=d" (__dummy_2) /* output */ \
+ : "2" (f), "0" (sp), "1" (i) /* input */ \
+ ); /* trashed */ \
+})
+
+#define FAST_WRITE2_IO() \
+({ \
+int __dummy_1,__dummy_2; \
+ __asm__ __volatile__ ("\n \
+ cld \n \
+ orl %%ecx, %%ecx \n \
+ jz 1f \n \
+ rep \n \
+ outsw %%ds:(%%esi),(%%dx) \n \
+1: " \
+ : "=S" (sp) ,"=c" (__dummy_1) ,"=d" (__dummy_2)/* output */ \
+ : "2" (f), "0" (sp), "1" (i) /* input */ \
+ ); /* trashed */ \
+})
+
+/* IN2000 io_port offsets */
+#define IO_WD_ASR 0x00 /* R - 3393 auxstat reg */
+#define ASR_INT 0x80
+#define ASR_LCI 0x40
+#define ASR_BSY 0x20
+#define ASR_CIP 0x10
+#define ASR_PE 0x02
+#define ASR_DBR 0x01
+#define IO_WD_ADDR 0x00 /* W - 3393 address reg */
+#define IO_WD_DATA 0x01 /* R/W - rest of 3393 regs */
+#define IO_FIFO 0x02 /* R/W - in2000 dual-port fifo (16 bits) */
+#define IN2000_FIFO_SIZE 2048 /* fifo capacity in bytes */
+#define IO_CARD_RESET 0x03 /* W - in2000 start master reset */
+#define IO_FIFO_COUNT 0x04 /* R - in2000 fifo counter */
+#define IO_FIFO_WRITE 0x05 /* W - clear fifo counter, start write */
+#define IO_FIFO_READ 0x07 /* W - start fifo read */
+#define IO_LED_OFF 0x08 /* W - turn off in2000 activity LED */
+#define IO_SWITCHES 0x08 /* R - read in2000 dip switch */
+#define SW_ADDR0 0x01 /* bit 0 = bit 0 of index to io addr */
+#define SW_ADDR1 0x02 /* bit 1 = bit 1 of index io addr */
+#define SW_DISINT 0x04 /* bit 2 true if ints disabled */
+#define SW_INT0 0x08 /* bit 3 = bit 0 of index to interrupt */
+#define SW_INT1 0x10 /* bit 4 = bit 1 of index to interrupt */
+#define SW_INT_SHIFT 3 /* shift right this amount to right justify int bits */
+#define SW_SYNC_DOS5 0x20 /* bit 5 used by Always BIOS */
+#define SW_FLOPPY 0x40 /* bit 6 true if floppy enabled */
+#define SW_BIT7 0x80 /* bit 7 hardwired true (ground) */
+#define IO_LED_ON 0x09 /* W - turn on in2000 activity LED */
+#define IO_HARDWARE 0x0a /* R - read in2000 hardware rev, stop reset */
+#define IO_INTR_MASK 0x0c /* W - in2000 interrupt mask reg */
+#define IMASK_WD 0x01 /* WD33c93 interrupt mask */
+#define IMASK_FIFO 0x02 /* FIFO interrupt mask */
+
+/* wd register names */
+#define WD_OWN_ID 0x00
+#define WD_CONTROL 0x01
+#define WD_TIMEOUT_PERIOD 0x02
+#define WD_CDB_1 0x03
+#define WD_CDB_2 0x04
+#define WD_CDB_3 0x05
+#define WD_CDB_4 0x06
+#define WD_CDB_5 0x07
+#define WD_CDB_6 0x08
+#define WD_CDB_7 0x09
+#define WD_CDB_8 0x0a
+#define WD_CDB_9 0x0b
+#define WD_CDB_10 0x0c
+#define WD_CDB_11 0x0d
+#define WD_CDB_12 0x0e
+#define WD_TARGET_LUN 0x0f
+#define WD_COMMAND_PHASE 0x10
+#define WD_SYNCHRONOUS_TRANSFER 0x11
+#define WD_TRANSFER_COUNT_MSB 0x12
+#define WD_TRANSFER_COUNT 0x13
+#define WD_TRANSFER_COUNT_LSB 0x14
+#define WD_DESTINATION_ID 0x15
+#define WD_SOURCE_ID 0x16
+#define WD_SCSI_STATUS 0x17
+#define WD_COMMAND 0x18
+#define WD_DATA 0x19
+#define WD_QUEUE_TAG 0x1a
+#define WD_AUXILIARY_STATUS 0x1f
+
+/* WD commands */
+#define WD_CMD_RESET 0x00
+#define WD_CMD_ABORT 0x01
+#define WD_CMD_ASSERT_ATN 0x02
+#define WD_CMD_NEGATE_ACK 0x03
+#define WD_CMD_DISCONNECT 0x04
+#define WD_CMD_RESELECT 0x05
+#define WD_CMD_SEL_ATN 0x06
+#define WD_CMD_SEL 0x07
+#define WD_CMD_SEL_ATN_XFER 0x08
+#define WD_CMD_SEL_XFER 0x09
+#define WD_CMD_RESEL_RECEIVE 0x0a
+#define WD_CMD_RESEL_SEND 0x0b
+#define WD_CMD_WAIT_SEL_RECEIVE 0x0c
+#define WD_CMD_TRANS_ADDR 0x18
+#define WD_CMD_TRANS_INFO 0x20
+#define WD_CMD_TRANSFER_PAD 0x21
+#define WD_CMD_SBT_MODE 0x80
+
+/* SCSI Bus Phases */
+#define PHS_DATA_OUT 0x00
+#define PHS_DATA_IN 0x01
+#define PHS_COMMAND 0x02
+#define PHS_STATUS 0x03
+#define PHS_MESS_OUT 0x06
+#define PHS_MESS_IN 0x07
+
+/* Command Status Register definitions */
+
+ /* reset state interrupts */
+#define CSR_RESET 0x00
+#define CSR_RESET_AF 0x01
+
+ /* successful completion interrupts */
+#define CSR_RESELECT 0x10
+#define CSR_SELECT 0x11
+#define CSR_SEL_XFER_DONE 0x16
+#define CSR_XFER_DONE 0x18
+
+ /* paused or aborted interrupts */
+#define CSR_MSGIN 0x20
+#define CSR_SDP 0x21
+#define CSR_SEL_ABORT 0x22
+#define CSR_RESEL_ABORT 0x25
+#define CSR_RESEL_ABORT_AM 0x27
+#define CSR_ABORT 0x28
+
+ /* terminated interrupts */
+#define CSR_INVALID 0x40
+#define CSR_UNEXP_DISC 0x41
+#define CSR_TIMEOUT 0x42
+#define CSR_PARITY 0x43
+#define CSR_PARITY_ATN 0x44
+#define CSR_BAD_STATUS 0x45
+#define CSR_UNEXP 0x48
+
+ /* service required interrupts */
+#define CSR_RESEL 0x80
+#define CSR_RESEL_AM 0x81
+#define CSR_DISC 0x85
+#define CSR_SRV_REQ 0x88
+
+ /* Own ID/CDB Size register */
+#define OWNID_EAF 0x08
+#define OWNID_EHP 0x10
+#define OWNID_RAF 0x20
+#define OWNID_FS_8 0x00
+#define OWNID_FS_12 0x40
+#define OWNID_FS_16 0x80
+
+ /* Control register */
+#define CTRL_HSP 0x01
+#define CTRL_HA 0x02
+#define CTRL_IDI 0x04
+#define CTRL_EDI 0x08
+#define CTRL_HHP 0x10
+#define CTRL_POLLED 0x00
+#define CTRL_BURST 0x20
+#define CTRL_BUS 0x40
+#define CTRL_DMA 0x80
+
+ /* Timeout Period register */
+#define TIMEOUT_PERIOD_VALUE 20 /* results in 200 ms. */
+
+ /* Synchronous Transfer Register */
+#define STR_FSS 0x80
+
+ /* Destination ID register */
+#define DSTID_DPD 0x40
+#define DATA_OUT_DIR 0
+#define DATA_IN_DIR 1
+#define DSTID_SCC 0x80
+
+ /* Source ID register */
+#define SRCID_MASK 0x07
+#define SRCID_SIV 0x08
+#define SRCID_DSP 0x20
+#define SRCID_ES 0x40
+#define SRCID_ER 0x80
+
+
+
+#define ILLEGAL_STATUS_BYTE 0xff
+
+
+#define DEFAULT_SX_PER 500 /* (ns) fairly safe */
+#define DEFAULT_SX_OFF 0 /* aka async */
+
+#define OPTIMUM_SX_PER 252 /* (ns) best we can do (mult-of-4) */
+#define OPTIMUM_SX_OFF 12 /* size of in2000 fifo */
+
+struct sx_period {
+ unsigned int period_ns;
+ uchar reg_value;
+ };
+
+
+struct IN2000_hostdata {
+ struct Scsi_Host *next;
+ uchar chip; /* what kind of wd33c93 chip? */
+ uchar microcode; /* microcode rev if 'B' */
+ unsigned short io_base; /* IO port base */
+ unsigned int dip_switch; /* dip switch settings */
+ unsigned int hrev; /* hardware revision of card */
+ volatile uchar busy[8]; /* index = target, bit = lun */
+ volatile Scsi_Cmnd *input_Q; /* commands waiting to be started */
+ volatile Scsi_Cmnd *selecting; /* trying to select this command */
+ volatile Scsi_Cmnd *connected; /* currently connected command */
+ volatile Scsi_Cmnd *disconnected_Q;/* commands waiting for reconnect */
+ uchar state; /* what we are currently doing */
+ uchar fifo; /* what the FIFO is up to */
+ uchar level2; /* extent to which Level-2 commands are used */
+ uchar disconnect; /* disconnect/reselect policy */
+ unsigned int args; /* set from command-line argument */
+ uchar incoming_msg[8]; /* filled during message_in phase */
+ int incoming_ptr; /* mainly used with EXTENDED messages */
+ uchar outgoing_msg[8]; /* send this during next message_out */
+ int outgoing_len; /* length of outgoing message */
+ unsigned int default_sx_per; /* default transfer period for SCSI bus */
+ uchar sync_xfer[8]; /* sync_xfer reg settings per target */
+ uchar sync_stat[8]; /* status of sync negotiation per target */
+ uchar sync_off; /* bit mask: don't use sync with these targets */
+#ifdef PROC_INTERFACE
+ uchar proc; /* bit mask: what's in proc output */
+#ifdef PROC_STATISTICS
+ unsigned long cmd_cnt[8]; /* # of commands issued per target */
+ unsigned long int_cnt; /* # of interrupts serviced */
+ unsigned long disc_allowed_cnt[8]; /* # of disconnects allowed per target */
+ unsigned long disc_done_cnt[8]; /* # of disconnects done per target*/
+#endif
+#endif
+ };
+
+
+/* defines for hostdata->chip */
+
+#define C_WD33C93 0
+#define C_WD33C93A 1
+#define C_WD33C93B 2
+#define C_UNKNOWN_CHIP 100
+
+/* defines for hostdata->state */
+
+#define S_UNCONNECTED 0
+#define S_SELECTING 1
+#define S_RUNNING_LEVEL2 2
+#define S_CONNECTED 3
+#define S_PRE_TMP_DISC 4
+#define S_PRE_CMP_DISC 5
+
+/* defines for hostdata->fifo */
+
+#define FI_FIFO_UNUSED 0
+#define FI_FIFO_READING 1
+#define FI_FIFO_WRITING 2
+
+/* defines for hostdata->level2 */
+/* NOTE: only the first 3 are trustworthy at this point -
+ * having trouble when more than 1 device is reading/writing
+ * at the same time...
+ */
+
+#define L2_NONE 0 /* no combination commands - we get lots of ints */
+#define L2_SELECT 1 /* start with SEL_ATN_XFER, but never resume it */
+#define L2_BASIC 2 /* resume after STATUS ints & RDP messages */
+#define L2_DATA 3 /* resume after DATA_IN/OUT ints */
+#define L2_MOST 4 /* resume after anything except a RESELECT int */
+#define L2_RESELECT 5 /* resume after everything, including RESELECT ints */
+#define L2_ALL 6 /* always resume */
+
+/* defines for hostdata->disconnect */
+
+#define DIS_NEVER 0
+#define DIS_ADAPTIVE 1
+#define DIS_ALWAYS 2
+
+/* defines for hostdata->args */
+
+#define DB_TEST 1<<0
+#define DB_FIFO 1<<1
+#define DB_QUEUE_COMMAND 1<<2
+#define DB_EXECUTE 1<<3
+#define DB_INTR 1<<4
+#define DB_TRANSFER 1<<5
+#define DB_MASK 0x3f
+
+#define A_NO_SCSI_RESET 1<<15
+
+
+/* defines for hostdata->sync_xfer[] */
+
+#define SS_UNSET 0
+#define SS_FIRST 1
+#define SS_WAITING 2
+#define SS_SET 3
+
+/* defines for hostdata->proc */
+
+#define PR_VERSION 1<<0
+#define PR_INFO 1<<1
+#define PR_STATISTICS 1<<2
+#define PR_CONNECTED 1<<3
+#define PR_INPUTQ 1<<4
+#define PR_DISCQ 1<<5
+#define PR_TEST 1<<6
+#define PR_STOP 1<<7
+
+
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE < 0x020100 /* 2.0.xx */
+# define in2000__INITFUNC(function) function
+# define in2000__INIT
+# define in2000__INITDATA
+# define CLISPIN_LOCK(flags) do { save_flags(flags); cli(); } while(0)
+# define CLISPIN_UNLOCK(flags) restore_flags(flags)
+#else /* 2.1.xxx */
+# include <linux/init.h>
+# include <asm/spinlock.h>
+# define in2000__INITFUNC(function) __initfunc(function)
+# define in2000__INIT __init
+# define in2000__INITDATA __initdata
+# define CLISPIN_LOCK(flags) spin_lock_irqsave(&io_request_lock, flags)
+# define CLISPIN_UNLOCK(flags) spin_unlock_irqrestore(&io_request_lock, flags)
+#endif
+
+
+int in2000_detect(Scsi_Host_Template *) in2000__INIT;
+int in2000_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int in2000_abort(Scsi_Cmnd *);
+void in2000_setup(char *, int *) in2000__INIT;
+int in2000_proc_info(char *, char **, off_t, int, int, int);
+extern struct proc_dir_entry proc_scsi_in2000;
+int in2000_biosparam(struct scsi_disk *, kdev_t, int *);
+int in2000_reset(Scsi_Cmnd *, unsigned int);
+
+
+#define IN2000_CAN_Q 16
+#define IN2000_SG SG_ALL
+#define IN2000_CPL 2
+#define IN2000_HOST_ID 7
+
+#if LINUX_VERSION_CODE < 0x020100 /* 2.0.xx */
+
+#define IN2000 { NULL, /* link pointer for modules */ \
+ NULL, /* usage_count for modules */ \
+ &proc_scsi_in2000, /* pointer to /proc/scsi directory entry */ \
+ in2000_proc_info, /* pointer to proc info function */ \
+ "Always IN2000", /* device name */ \
+ in2000_detect, /* returns number of in2000's found */ \
+ NULL, /* optional unload function for modules */ \
+ NULL, /* optional misc info function */ \
+ NULL, /* send scsi command, wait for completion */ \
+ in2000_queuecommand, /* queue scsi command, don't wait */ \
+ in2000_abort, /* abort current command */ \
+ in2000_reset, /* reset scsi bus */ \
+ NULL, /* slave_attach - unused */ \
+ in2000_biosparam, /* figures out BIOS parameters for lilo, etc */ \
+ IN2000_CAN_Q, /* max commands we can queue up */ \
+ IN2000_HOST_ID, /* host-adapter scsi id */ \
+ IN2000_SG, /* scatter-gather table size */ \
+ IN2000_CPL, /* commands per lun */ \
+ 0, /* board counter */ \
+ 0, /* unchecked dma */ \
+ DISABLE_CLUSTERING \
+ }
+
+#else /* 2.1.xxx */
+
+#define IN2000 { proc_dir: &proc_scsi_in2000, /* pointer to /proc/scsi directory entry */ \
+ proc_info: in2000_proc_info, /* pointer to proc info function */ \
+ name: "Always IN2000", /* device name */ \
+ detect: in2000_detect, /* returns number of in2000's found */ \
+ queuecommand: in2000_queuecommand, /* queue scsi command, don't wait */ \
+ abort: in2000_abort, /* abort current command */ \
+ reset: in2000_reset, /* reset scsi bus */ \
+ bios_param: in2000_biosparam, /* figures out BIOS parameters for lilo, etc */ \
+ can_queue: IN2000_CAN_Q, /* max commands we can queue up */ \
+ this_id: IN2000_HOST_ID, /* host-adapter scsi id */ \
+ sg_tablesize: IN2000_SG, /* scatter-gather table size */ \
+ cmd_per_lun: IN2000_CPL, /* commands per lun */ \
+ use_clustering: DISABLE_CLUSTERING, /* ENABLE_CLUSTERING may speed things up */ \
+ use_new_eh_code: 0 /* new error code - not using it yet */ \
+ }
+
+#endif
+
+
+#endif /* IN2000_H */
diff --git a/linux/src/drivers/scsi/ncr53c8xx.c b/linux/src/drivers/scsi/ncr53c8xx.c
new file mode 100644
index 0000000..0a58429
--- /dev/null
+++ b/linux/src/drivers/scsi/ncr53c8xx.c
@@ -0,0 +1,10795 @@
+/******************************************************************************
+** Device driver for the PCI-SCSI NCR538XX controller family.
+**
+** Copyright (C) 1994 Wolfgang Stanglmeier
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+** You should have received a copy of the GNU General Public License
+** along with this program; if not, write to the Free Software
+** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+**
+**-----------------------------------------------------------------------------
+**
+** This driver has been ported to Linux from the FreeBSD NCR53C8XX driver
+** and is currently maintained by
+**
+** Gerard Roudier <groudier@club-internet.fr>
+**
+** Being given that this driver originates from the FreeBSD version, and
+** in order to keep synergy on both, any suggested enhancements and corrections
+** received on Linux are automatically a potential candidate for the FreeBSD
+** version.
+**
+** The original driver has been written for 386bsd and FreeBSD by
+** Wolfgang Stanglmeier <wolf@cologne.de>
+** Stefan Esser <se@mi.Uni-Koeln.de>
+**
+** And has been ported to NetBSD by
+** Charles M. Hannum <mycroft@gnu.ai.mit.edu>
+**
+**-----------------------------------------------------------------------------
+**
+** Brief history
+**
+** December 10 1995 by Gerard Roudier:
+** Initial port to Linux.
+**
+** June 23 1996 by Gerard Roudier:
+** Support for 64 bits architectures (Alpha).
+**
+** November 30 1996 by Gerard Roudier:
+** Support for Fast-20 scsi.
+** Support for large DMA fifo and 128 dwords bursting.
+**
+** February 27 1997 by Gerard Roudier:
+** Support for Fast-40 scsi.
+** Support for on-Board RAM.
+**
+** May 3 1997 by Gerard Roudier:
+** Full support for scsi scripts instructions pre-fetching.
+**
+** May 19 1997 by Richard Waltham <dormouse@farsrobt.demon.co.uk>:
+** Support for NvRAM detection and reading.
+**
+** August 18 1997 by Cort <cort@cs.nmt.edu>:
+** Support for Power/PC (Big Endian).
+**
+*******************************************************************************
+*/
+
+/*
+** 30 January 1998, version 2.5f.1
+**
+** Supported SCSI-II features:
+** Synchronous negotiation
+** Wide negotiation (depends on the NCR Chip)
+** Enable disconnection
+** Tagged command queuing
+** Parity checking
+** Etc...
+**
+** Supported NCR chips:
+** 53C810 (8 bits, Fast SCSI-2, no rom BIOS)
+** 53C815 (8 bits, Fast SCSI-2, on board rom BIOS)
+** 53C820 (Wide, Fast SCSI-2, no rom BIOS)
+** 53C825 (Wide, Fast SCSI-2, on board rom BIOS)
+** 53C860 (8 bits, Fast 20, no rom BIOS)
+** 53C875 (Wide, Fast 20, on board rom BIOS)
+** 53C895 (Wide, Fast 40, on board rom BIOS)
+**
+** Other features:
+** Memory mapped IO (linux-1.3.X and above only)
+** Module
+** Shared IRQ (since linux-1.3.72)
+*/
+
+#define SCSI_NCR_DEBUG_FLAGS (0)
+
+#define NCR_GETCC_WITHMSG
+
+/*==========================================================
+**
+** Include files
+**
+**==========================================================
+*/
+
+#define LinuxVersionCode(v, p, s) (((v)<<16)+((p)<<8)+(s))
+
+#ifdef MODULE
+#include <linux/module.h>
+#endif
+
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <linux/delay.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/malloc.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/stat.h>
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,0)
+#include <linux/blk.h>
+#else
+#include "../block/blk.h"
+#endif
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,35)
+#include <linux/init.h>
+#else
+#ifndef __initdata
+#define __initdata
+#endif
+#ifndef __initfunc
+#define __initfunc(__arginit) __arginit
+#endif
+#endif
+
+#include "scsi.h"
+#include "hosts.h"
+#include "constants.h"
+#include "sd.h"
+
+#include <linux/types.h>
+
+/*
+** Define the BSD style u_int32 type
+*/
+typedef u32 u_int32;
+
+#include "ncr53c8xx.h"
+
+/*==========================================================
+**
+** Configuration and Debugging
+**
+**==========================================================
+*/
+
+/*
+** SCSI address of this device.
+** The boot routines should have set it.
+** If not, use this.
+*/
+
+#ifndef SCSI_NCR_MYADDR
+#define SCSI_NCR_MYADDR (7)
+#endif
+
+/*
+** The maximum number of tags per logic unit.
+** Used only for disk devices that support tags.
+*/
+
+#ifndef SCSI_NCR_MAX_TAGS
+#define SCSI_NCR_MAX_TAGS (4)
+#endif
+
+/*
+** Number of targets supported by the driver.
+** n permits target numbers 0..n-1.
+** Default is 7, meaning targets #0..#6.
+** #7 .. is myself.
+*/
+
+#ifdef SCSI_NCR_MAX_TARGET
+#define MAX_TARGET (SCSI_NCR_MAX_TARGET)
+#else
+#define MAX_TARGET (16)
+#endif
+
+/*
+** Number of logic units supported by the driver.
+** n enables logic unit numbers 0..n-1.
+** The common SCSI devices require only
+** one lun, so take 1 as the default.
+*/
+
+#ifdef SCSI_NCR_MAX_LUN
+#define MAX_LUN SCSI_NCR_MAX_LUN
+#else
+#define MAX_LUN (1)
+#endif
+
+/*
+** Asynchronous pre-scaler (ns). Shall be 40
+*/
+
+#ifndef SCSI_NCR_MIN_ASYNC
+#define SCSI_NCR_MIN_ASYNC (40)
+#endif
+
+/*
+** The maximum number of jobs scheduled for starting.
+** There should be one slot per target, and one slot
+** for each tag of each target in use.
+** The calculation below is actually quite silly ...
+*/
+
+#ifdef SCSI_NCR_CAN_QUEUE
+#define MAX_START (SCSI_NCR_CAN_QUEUE + 4)
+#else
+#define MAX_START (MAX_TARGET + 7 * SCSI_NCR_MAX_TAGS)
+#endif
+
+/*
+** The maximum number of segments a transfer is split into.
+*/
+
+#define MAX_SCATTER (SCSI_NCR_MAX_SCATTER)
+
+/*
+** Io mapped or memory mapped.
+*/
+
+#if defined(SCSI_NCR_IOMAPPED)
+#define NCR_IOMAPPED
+#endif
+
+/*
+** other
+*/
+
+#define NCR_SNOOP_TIMEOUT (1000000)
+
+/*==========================================================
+**
+** Defines for Linux.
+**
+** Linux and Bsd kernel functions are quite different.
+** These defines allow a minimum change of the original
+** code.
+**
+**==========================================================
+*/
+
+ /*
+ ** Obvious definitions
+ */
+
+#define printf printk
+#define u_char unsigned char
+#define u_short unsigned short
+#define u_int unsigned int
+#define u_long unsigned long
+
+#ifndef MACH
+typedef u_long vm_offset_t;
+typedef int vm_size_t;
+#endif
+
+#define bcopy(s, d, n) memcpy((d), (s), (n))
+#define bzero(d, n) memset((d), 0, (n))
+
+#ifndef offsetof
+#define offsetof(t, m) ((size_t) (&((t *)0)->m))
+#endif
+
+/*
+** Address translation
+**
+** On Linux 1.3.X, virt_to_bus() must be used to translate
+** virtual memory addresses of the kernel data segment into
+** IO bus adresses.
+** On i386 architecture, IO bus addresses match the physical
+** addresses. But on other architectures they can be different.
+** In the original Bsd driver, vtophys() is called to translate
+** data addresses to IO bus addresses. In order to minimize
+** change, I decide to define vtophys() as virt_to_bus().
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,0)
+#define vtophys(p) virt_to_bus(p)
+
+/*
+** Memory mapped IO
+**
+** Since linux-2.1, we must use ioremap() to map the io memory space.
+** iounmap() to unmap it. That allows portability.
+** Linux 1.3.X and 2.0.X allow to remap physical pages addresses greater
+** than the highest physical memory address to kernel virtual pages with
+** vremap() / vfree(). That was not portable but worked with i386
+** architecture.
+*/
+
+#ifndef NCR_IOMAPPED
+__initfunc(
+static vm_offset_t remap_pci_mem(u_long base, u_long size)
+)
+{
+ u_long page_base = ((u_long) base) & PAGE_MASK;
+ u_long page_offs = ((u_long) base) - page_base;
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,0)
+ u_long page_remapped = (u_long) ioremap(page_base, page_offs+size);
+#else
+ u_long page_remapped = (u_long) vremap(page_base, page_offs+size);
+#endif
+
+ return (vm_offset_t) (page_remapped ? (page_remapped + page_offs) : 0UL);
+}
+
+__initfunc(
+static void unmap_pci_mem(vm_offset_t vaddr, u_long size)
+)
+{
+ if (vaddr)
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,0)
+ iounmap((void *) (vaddr & PAGE_MASK));
+#else
+ vfree((void *) (vaddr & PAGE_MASK));
+#endif
+}
+#endif /* !NCR_IOMAPPED */
+
+#else /* linux-1.2.13 */
+
+/*
+** Linux 1.2.X assumes that addresses (virtual, physical, bus)
+** are the same.
+**
+** I have not found how to do MMIO. It seems that only processes can
+** map high physical pages to virtual (Xservers can do MMIO).
+*/
+
+#define vtophys(p) ((u_long) (p))
+#endif
+
+/*
+** Insert a delay in micro-seconds.
+*/
+
+static void DELAY(long us)
+{
+ for (;us>1000;us-=1000) udelay(1000);
+ if (us) udelay(us);
+}
+
+/*
+** Internal data structure allocation.
+**
+** Linux scsi memory poor pool is adjusted for the need of
+** middle-level scsi driver.
+** We allocate our control blocks in the kernel memory pool
+** to avoid scsi pool shortage.
+** I notice that kmalloc() returns NULL during host attach under
+** Linux 1.2.13. But this ncr driver is reliable enough to
+** accomodate with this joke.
+**
+** kmalloc() only ensure 8 bytes boundary alignment.
+** The NCR need better alignment for cache line bursting.
+** The global header is moved betewen the NCB and CCBs and need
+** origin and destination addresses to have same lower four bits.
+**
+** We use 32 boundary alignment for NCB and CCBs and offset multiple
+** of 32 for global header fields. That's too much but at least enough.
+*/
+
+#define ALIGN_SIZE(shift) (1UL << shift)
+#define ALIGN_MASK(shift) (~(ALIGN_SIZE(shift)-1))
+
+#define NCB_ALIGN_SHIFT 5
+#define CCB_ALIGN_SHIFT 5
+#define LCB_ALIGN_SHIFT 5
+#define SCR_ALIGN_SHIFT 5
+
+#define NCB_ALIGN_SIZE ALIGN_SIZE(NCB_ALIGN_SHIFT)
+#define NCB_ALIGN_MASK ALIGN_MASK(NCB_ALIGN_SHIFT)
+#define CCB_ALIGN_SIZE ALIGN_SIZE(CCB_ALIGN_SHIFT)
+#define CCB_ALIGN_MASK ALIGN_MASK(CCB_ALIGN_SHIFT)
+#define SCR_ALIGN_SIZE ALIGN_SIZE(SCR_ALIGN_SHIFT)
+#define SCR_ALIGN_MASK ALIGN_MASK(SCR_ALIGN_SHIFT)
+
+static void *m_alloc(int size, int a_shift)
+{
+ u_long addr;
+ void *ptr;
+ u_long a_size, a_mask;
+
+ if (a_shift < 3)
+ a_shift = 3;
+
+ a_size = ALIGN_SIZE(a_shift);
+ a_mask = ALIGN_MASK(a_shift);
+
+ ptr = (void *) kmalloc(size + a_size, GFP_ATOMIC);
+ if (ptr) {
+ addr = (((u_long) ptr) + a_size) & a_mask;
+ *((void **) (addr - sizeof(void *))) = ptr;
+ ptr = (void *) addr;
+ }
+
+ return ptr;
+}
+
+#ifdef MODULE
+static void m_free(void *ptr, int size)
+{
+ u_long addr;
+
+ if (ptr) {
+ addr = (u_long) ptr;
+ ptr = *((void **) (addr - sizeof(void *)));
+
+ kfree(ptr);
+ }
+}
+#endif
+
+/*
+** Transfer direction
+**
+** Low-level scsi drivers under Linux do not receive the expected
+** data transfer direction from upper scsi drivers.
+** The driver will only check actual data direction for common
+** scsi opcodes. Other ones may cause problem, since they may
+** depend on device type or be vendor specific.
+** I would prefer to never trust the device for data direction,
+** but that is not possible.
+**
+** The original driver requires the expected direction to be known.
+** The Linux version of the driver has been enhanced in order to
+** be able to transfer data in the direction choosen by the target.
+*/
+
+#define XferNone 0
+#define XferIn 1
+#define XferOut 2
+#define XferBoth 3
+static int guess_xfer_direction(int opcode);
+
+/*
+** Head of list of NCR boards
+**
+** For kernel version < 1.3.70, host is retrieved by its irq level.
+** For later kernels, the internal host control block address
+** (struct ncb) is used as device id parameter of the irq stuff.
+*/
+
+static struct Scsi_Host *first_host = NULL;
+static Scsi_Host_Template *the_template = NULL;
+
+
+/*
+** /proc directory entry and proc_info function
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,0)
+struct proc_dir_entry proc_scsi_ncr53c8xx = {
+ PROC_SCSI_NCR53C8XX, 9, "ncr53c8xx",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+# ifdef SCSI_NCR_PROC_INFO_SUPPORT
+int ncr53c8xx_proc_info(char *buffer, char **start, off_t offset,
+ int length, int hostno, int func);
+# endif
+#endif
+
+/*
+** Table of target capabilities.
+**
+** This bitmap is anded with the byte 7 of inquiry data on completion of
+** INQUIRY command.
+** The driver never see zeroed bits and will ignore the corresponding
+** capabilities of the target.
+*/
+
+static struct {
+ unsigned char and_map[MAX_TARGET];
+} target_capabilities[SCSI_NCR_MAX_HOST] = { NCR53C8XX_TARGET_CAPABILITIES };
+
+/*
+** Driver setup.
+**
+** This structure is initialized from linux config options.
+** It can be overridden at boot-up by the boot command line.
+*/
+struct ncr_driver_setup {
+ unsigned master_parity : 1;
+ unsigned scsi_parity : 1;
+ unsigned disconnection : 1;
+ unsigned special_features : 2;
+ unsigned ultra_scsi : 2;
+ unsigned force_sync_nego: 1;
+ unsigned reverse_probe: 1;
+ unsigned pci_fix_up: 4;
+ u_char use_nvram;
+ u_char verbose;
+ u_char default_tags;
+ u_short default_sync;
+ u_short debug;
+ u_char burst_max;
+ u_char led_pin;
+ u_char max_wide;
+ u_char settle_delay;
+ u_char diff_support;
+ u_char irqm;
+ u_char bus_check;
+};
+
+static struct ncr_driver_setup
+ driver_setup = SCSI_NCR_DRIVER_SETUP;
+
+#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
+static struct ncr_driver_setup
+ driver_safe_setup __initdata = SCSI_NCR_DRIVER_SAFE_SETUP;
+#ifdef MODULE
+char *ncr53c8xx = 0; /* command line passed by insmod */
+#endif
+#endif
+
+/*
+** Other Linux definitions
+*/
+
+#define ScsiResult(host_code, scsi_code) (((host_code) << 16) + ((scsi_code) & 0x7f))
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,0,0)
+static void ncr53c8xx_select_queue_depths(struct Scsi_Host *host, struct scsi_device *devlist);
+#endif
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,70)
+static void ncr53c8xx_intr(int irq, void *dev_id, struct pt_regs * regs);
+#else
+static void ncr53c8xx_intr(int irq, struct pt_regs * regs);
+#endif
+
+static void ncr53c8xx_timeout(unsigned long np);
+
+#define initverbose (driver_setup.verbose)
+#define bootverbose (np->verbose)
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+/*
+** Symbios NvRAM data format
+*/
+#define SYMBIOS_NVRAM_SIZE 368
+#define SYMBIOS_NVRAM_ADDRESS 0x100
+
+struct Symbios_nvram {
+/* Header 6 bytes */
+ u_short start_marker; /* 0x0000 */
+ u_short byte_count; /* excluding header/trailer */
+ u_short checksum;
+
+/* Controller set up 20 bytes */
+ u_short word0; /* 0x3000 */
+ u_short word2; /* 0x0000 */
+ u_short word4; /* 0x0000 */
+ u_short flags;
+#define SYMBIOS_SCAM_ENABLE (1)
+#define SYMBIOS_PARITY_ENABLE (1<<1)
+#define SYMBIOS_VERBOSE_MSGS (1<<2)
+ u_short flags1;
+#define SYMBIOS_SCAN_HI_LO (1)
+ u_short word10; /* 0x00 */
+ u_short flags3; /* 0x00 */
+#define SYMBIOS_REMOVABLE_FLAGS (3) /* 0=none, 1=bootable, 2=all */
+ u_char host_id;
+ u_char byte15; /* 0x04 */
+ u_short word16; /* 0x0410 */
+ u_short word18; /* 0x0000 */
+
+/* Boot order 14 bytes * 4 */
+ struct Symbios_host{
+ u_char word0; /* 0x0004:ok / 0x0000:nok */
+ u_short device_id; /* PCI device id */
+ u_short vendor_id; /* PCI vendor id */
+ u_char byte6; /* 0x00 */
+ u_char device_fn; /* PCI device/function number << 3*/
+ u_short word8;
+ u_short flags;
+#define SYMBIOS_INIT_SCAN_AT_BOOT (1)
+ u_short io_port; /* PCI io_port address */
+ } host[4];
+
+/* Targets 8 bytes * 16 */
+ struct Symbios_target {
+ u_short flags;
+#define SYMBIOS_DISCONNECT_ENABLE (1)
+#define SYMBIOS_SCAN_AT_BOOT_TIME (1<<1)
+#define SYMBIOS_SCAN_LUNS (1<<2)
+#define SYMBIOS_QUEUE_TAGS_ENABLED (1<<3)
+ u_char bus_width; /* 0x08/0x10 */
+ u_char sync_offset;
+ u_char sync_period; /* 4*period factor */
+ u_char byte6; /* 0x00 */
+ u_short timeout;
+ } target[16];
+ u_char spare_devices[19*8];
+ u_char trailer[6]; /* 0xfe 0xfe 0x00 0x00 0x00 0x00 */
+};
+typedef struct Symbios_nvram Symbios_nvram;
+typedef struct Symbios_host Symbios_host;
+typedef struct Symbios_target Symbios_target;
+
+/*
+** Tekram NvRAM data format.
+*/
+#define TEKRAM_NVRAM_SIZE 64
+#define TEKRAM_NVRAM_ADDRESS 0
+
+struct Tekram_nvram {
+ struct Tekram_target {
+ u_char flags;
+#define TEKRAM_PARITY_CHECK (1)
+#define TEKRAM_SYNC_NEGO (1<<1)
+#define TEKRAM_DISCONNECT_ENABLE (1<<2)
+#define TEKRAM_START_CMD (1<<3)
+#define TEKRAM_TAGGED_COMMANDS (1<<4)
+#define TEKRAM_WIDE_NEGO (1<<5)
+ u_char sync_index;
+ u_short word2;
+ } target[16];
+ u_char host_id;
+ u_char flags;
+#define TEKRAM_MORE_THAN_2_DRIVES (1)
+#define TEKRAM_DRIVES_SUP_1GB (1<<1)
+#define TEKRAM_RESET_ON_POWER_ON (1<<2)
+#define TEKRAM_ACTIVE_NEGATION (1<<3)
+#define TEKRAM_IMMEDIATE_SEEK (1<<4)
+#define TEKRAM_SCAN_LUNS (1<<5)
+#define TEKRAM_REMOVABLE_FLAGS (3<<6) /* 0: disable; 1: boot device; 2:all */
+ u_char boot_delay_index;
+ u_char max_tags_index;
+ u_short flags1;
+#define TEKRAM_F2_F6_ENABLED (1)
+ u_short spare[29];
+};
+typedef struct Tekram_nvram Tekram_nvram;
+typedef struct Tekram_target Tekram_target;
+
+static u_char Tekram_sync[12] __initdata = {25,31,37,43,50,62,75,125,12,15,18,21};
+
+#endif /* SCSI_NCR_NVRAM_SUPPORT */
+
+/*
+** Structures used by ncr53c8xx_detect/ncr53c8xx_pci_init to
+** transmit device configuration to the ncr_attach() function.
+*/
+typedef struct {
+ int bus;
+ u_char device_fn;
+ u_int base;
+ u_int base_2;
+ u_int io_port;
+ int irq;
+/* port and reg fields to use INB, OUTB macros */
+ u_int port;
+ volatile struct ncr_reg *reg;
+} ncr_slot;
+
+typedef struct {
+ int type;
+#define SCSI_NCR_SYMBIOS_NVRAM (1)
+#define SCSI_NCR_TEKRAM_NVRAM (2)
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ union {
+ Symbios_nvram Symbios;
+ Tekram_nvram Tekram;
+ } data;
+#endif
+} ncr_nvram;
+
+/*
+** Structure used by ncr53c8xx_detect/ncr53c8xx_pci_init
+** to save data on each detected board for ncr_attach().
+*/
+typedef struct {
+ ncr_slot slot;
+ ncr_chip chip;
+ ncr_nvram *nvram;
+ int attach_done;
+} ncr_device;
+
+/*==========================================================
+**
+** Debugging tags
+**
+**==========================================================
+*/
+
+#define DEBUG_ALLOC (0x0001)
+#define DEBUG_PHASE (0x0002)
+#define DEBUG_POLL (0x0004)
+#define DEBUG_QUEUE (0x0008)
+#define DEBUG_RESULT (0x0010)
+#define DEBUG_SCATTER (0x0020)
+#define DEBUG_SCRIPT (0x0040)
+#define DEBUG_TINY (0x0080)
+#define DEBUG_TIMING (0x0100)
+#define DEBUG_NEGO (0x0200)
+#define DEBUG_TAGS (0x0400)
+#define DEBUG_FREEZE (0x0800)
+#define DEBUG_RESTART (0x1000)
+
+/*
+** Enable/Disable debug messages.
+** Can be changed at runtime too.
+*/
+
+#ifdef SCSI_NCR_DEBUG_INFO_SUPPORT
+ #define DEBUG_FLAGS ncr_debug
+#else
+ #define DEBUG_FLAGS SCSI_NCR_DEBUG_FLAGS
+#endif
+
+
+
+/*==========================================================
+**
+** assert ()
+**
+**==========================================================
+**
+** modified copy from 386bsd:/usr/include/sys/assert.h
+**
+**----------------------------------------------------------
+*/
+
+#define assert(expression) { \
+ if (!(expression)) { \
+ (void)printf(\
+ "assertion \"%s\" failed: file \"%s\", line %d\n", \
+ #expression, \
+ __FILE__, __LINE__); \
+ } \
+}
+
+/*==========================================================
+**
+** Big/Little endian support.
+**
+**==========================================================
+*/
+
+/*
+** If the NCR uses big endian addressing mode over the
+** PCI, actual io register addresses for byte and word
+** accesses must be changed according to lane routing.
+** Btw, ncr_offb() and ncr_offw() macros only apply to
+** constants and so donnot generate bloated code.
+*/
+
+#if defined(SCSI_NCR_BIG_ENDIAN)
+
+#define ncr_offb(o) (((o)&~3)+((~((o)&3))&3))
+#define ncr_offw(o) (((o)&~3)+((~((o)&3))&2))
+
+#else
+
+#define ncr_offb(o) (o)
+#define ncr_offw(o) (o)
+
+#endif
+
+/*
+** If the CPU and the NCR use same endian-ness adressing,
+** no byte reordering is needed for script patching.
+** Macro cpu_to_scr() is to be used for script patching.
+** Macro scr_to_cpu() is to be used for getting a DWORD
+** from the script.
+*/
+
+#if defined(__BIG_ENDIAN) && !defined(SCSI_NCR_BIG_ENDIAN)
+
+#define cpu_to_scr(dw) cpu_to_le32(dw)
+#define scr_to_cpu(dw) le32_to_cpu(dw)
+
+#elif defined(__LITTLE_ENDIAN) && defined(SCSI_NCR_BIG_ENDIAN)
+
+#define cpu_to_scr(dw) cpu_to_be32(dw)
+#define scr_to_cpu(dw) be32_to_cpu(dw)
+
+#else
+
+#define cpu_to_scr(dw) (dw)
+#define scr_to_cpu(dw) (dw)
+
+#endif
+
+/*==========================================================
+**
+** Access to the controller chip.
+**
+** If NCR_IOMAPPED is defined, only IO are used by the driver.
+**
+**==========================================================
+*/
+
+/*
+** If the CPU and the NCR use same endian-ness adressing,
+** no byte reordering is needed for accessing chip io
+** registers. Functions suffixed by '_raw' are assumed
+** to access the chip over the PCI without doing byte
+** reordering. Functions suffixed by '_l2b' are
+** assumed to perform little-endian to big-endian byte
+** reordering, those suffixed by '_b2l' blah, blah,
+** blah, ...
+*/
+
+#if defined(NCR_IOMAPPED)
+
+/*
+** IO mapped only input / ouput
+*/
+
+#define INB_OFF(o) inb (np->port + ncr_offb(o))
+#define OUTB_OFF(o, val) outb ((val), np->port + ncr_offb(o))
+
+#if defined(__BIG_ENDIAN) && !defined(SCSI_NCR_BIG_ENDIAN)
+
+#define INW_OFF(o) inw_l2b (np->port + ncr_offw(o))
+#define INL_OFF(o) inl_l2b (np->port + (o))
+
+#define OUTW_OFF(o, val) outw_b2l ((val), np->port + ncr_offw(o))
+#define OUTL_OFF(o, val) outl_b2l ((val), np->port + (o))
+
+#elif defined(__LITTLE_ENDIAN) && defined(SCSI_NCR_BIG_ENDIAN)
+
+#define INW_OFF(o) inw_b2l (np->port + ncr_offw(o))
+#define INL_OFF(o) inl_b2l (np->port + (o))
+
+#define OUTW_OFF(o, val) outw_l2b ((val), np->port + ncr_offw(o))
+#define OUTL_OFF(o, val) outl_l2b ((val), np->port + (o))
+
+#else
+
+#define INW_OFF(o) inw_raw (np->port + ncr_offw(o))
+#define INL_OFF(o) inl_raw (np->port + (o))
+
+#define OUTW_OFF(o, val) outw_raw ((val), np->port + ncr_offw(o))
+#define OUTL_OFF(o, val) outl_raw ((val), np->port + (o))
+
+#endif /* ENDIANs */
+
+#else /* defined NCR_IOMAPPED */
+
+/*
+** MEMORY mapped IO input / output
+*/
+
+#define INB_OFF(o) readb((char *)np->reg + ncr_offb(o))
+#define OUTB_OFF(o, val) writeb((val), (char *)np->reg + ncr_offb(o))
+
+#if defined(__BIG_ENDIAN) && !defined(SCSI_NCR_BIG_ENDIAN)
+
+#define INW_OFF(o) readw_l2b((char *)np->reg + ncr_offw(o))
+#define INL_OFF(o) readl_l2b((char *)np->reg + (o))
+
+#define OUTW_OFF(o, val) writew_b2l((val), (char *)np->reg + ncr_offw(o))
+#define OUTL_OFF(o, val) writel_b2l((val), (char *)np->reg + (o))
+
+#elif defined(__LITTLE_ENDIAN) && defined(SCSI_NCR_BIG_ENDIAN)
+
+#define INW_OFF(o) readw_b2l((char *)np->reg + ncr_offw(o))
+#define INL_OFF(o) readl_b2l((char *)np->reg + (o))
+
+#define OUTW_OFF(o, val) writew_l2b((val), (char *)np->reg + ncr_offw(o))
+#define OUTL_OFF(o, val) writel_l2b((val), (char *)np->reg + (o))
+
+#else
+
+#define INW_OFF(o) readw_raw((char *)np->reg + ncr_offw(o))
+#define INL_OFF(o) readl_raw((char *)np->reg + (o))
+
+#define OUTW_OFF(o, val) writew_raw((val), (char *)np->reg + ncr_offw(o))
+#define OUTL_OFF(o, val) writel_raw((val), (char *)np->reg + (o))
+
+#endif
+
+#endif /* defined NCR_IOMAPPED */
+
+#define INB(r) INB_OFF (offsetof(struct ncr_reg,r))
+#define INW(r) INW_OFF (offsetof(struct ncr_reg,r))
+#define INL(r) INL_OFF (offsetof(struct ncr_reg,r))
+
+#define OUTB(r, val) OUTB_OFF (offsetof(struct ncr_reg,r), (val))
+#define OUTW(r, val) OUTW_OFF (offsetof(struct ncr_reg,r), (val))
+#define OUTL(r, val) OUTL_OFF (offsetof(struct ncr_reg,r), (val))
+
+/*
+** Set bit field ON, OFF
+*/
+
+#define OUTONB(r, m) OUTB(r, INB(r) | (m))
+#define OUTOFFB(r, m) OUTB(r, INB(r) & ~(m))
+#define OUTONW(r, m) OUTW(r, INW(r) | (m))
+#define OUTOFFW(r, m) OUTW(r, INW(r) & ~(m))
+#define OUTONL(r, m) OUTL(r, INL(r) | (m))
+#define OUTOFFL(r, m) OUTL(r, INL(r) & ~(m))
+
+
+/*==========================================================
+**
+** Command control block states.
+**
+**==========================================================
+*/
+
+#define HS_IDLE (0)
+#define HS_BUSY (1)
+#define HS_NEGOTIATE (2) /* sync/wide data transfer*/
+#define HS_DISCONNECT (3) /* Disconnected by target */
+
+#define HS_COMPLETE (4)
+#define HS_SEL_TIMEOUT (5) /* Selection timeout */
+#define HS_RESET (6) /* SCSI reset */
+#define HS_ABORTED (7) /* Transfer aborted */
+#define HS_TIMEOUT (8) /* Software timeout */
+#define HS_FAIL (9) /* SCSI or PCI bus errors */
+#define HS_UNEXPECTED (10) /* Unexpected disconnect */
+
+#define HS_DONEMASK (0xfc)
+
+/*==========================================================
+**
+** Software Interrupt Codes
+**
+**==========================================================
+*/
+
+#define SIR_SENSE_RESTART (1)
+#define SIR_SENSE_FAILED (2)
+#define SIR_STALL_RESTART (3)
+#define SIR_STALL_QUEUE (4)
+#define SIR_NEGO_SYNC (5)
+#define SIR_NEGO_WIDE (6)
+#define SIR_NEGO_FAILED (7)
+#define SIR_NEGO_PROTO (8)
+#define SIR_REJECT_RECEIVED (9)
+#define SIR_REJECT_SENT (10)
+#define SIR_IGN_RESIDUE (11)
+#define SIR_MISSING_SAVE (12)
+#define SIR_DATA_IO_IS_OUT (13)
+#define SIR_DATA_IO_IS_IN (14)
+#define SIR_MAX (14)
+
+/*==========================================================
+**
+** Extended error codes.
+** xerr_status field of struct ccb.
+**
+**==========================================================
+*/
+
+#define XE_OK (0)
+#define XE_EXTRA_DATA (1) /* unexpected data phase */
+#define XE_BAD_PHASE (2) /* illegal phase (4/5) */
+
+/*==========================================================
+**
+** Negotiation status.
+** nego_status field of struct ccb.
+**
+**==========================================================
+*/
+
+#define NS_SYNC (1)
+#define NS_WIDE (2)
+
+/*==========================================================
+**
+** "Special features" of targets.
+** quirks field of struct tcb.
+** actualquirks field of struct ccb.
+**
+**==========================================================
+*/
+
+#define QUIRK_AUTOSAVE (0x01)
+#define QUIRK_NOMSG (0x02)
+#define QUIRK_NOSYNC (0x10)
+#define QUIRK_NOWIDE16 (0x20)
+#define QUIRK_UPDATE (0x80)
+
+/*==========================================================
+**
+** Capability bits in Inquire response byte 7.
+**
+**==========================================================
+*/
+
+#define INQ7_QUEUE (0x02)
+#define INQ7_SYNC (0x10)
+#define INQ7_WIDE16 (0x20)
+
+/*==========================================================
+**
+** Misc.
+**
+**==========================================================
+*/
+
+#define CCB_MAGIC (0xf2691ad2)
+
+/*==========================================================
+**
+** Declaration of structs.
+**
+**==========================================================
+*/
+
+struct tcb;
+struct lcb;
+struct ccb;
+struct ncb;
+struct script;
+
+typedef struct ncb * ncb_p;
+typedef struct tcb * tcb_p;
+typedef struct lcb * lcb_p;
+typedef struct ccb * ccb_p;
+
+struct link {
+ ncrcmd l_cmd;
+ ncrcmd l_paddr;
+};
+
+struct usrcmd {
+ u_long target;
+ u_long lun;
+ u_long data;
+ u_long cmd;
+};
+
+#define UC_SETSYNC 10
+#define UC_SETTAGS 11
+#define UC_SETDEBUG 12
+#define UC_SETORDER 13
+#define UC_SETWIDE 14
+#define UC_SETFLAG 15
+#define UC_CLEARPROF 16
+
+#ifdef SCSI_NCR_DEBUG_ERROR_RECOVERY_SUPPORT
+#define UC_DEBUG_ERROR_RECOVERY 17
+#endif
+
+#define UF_TRACE (0x01)
+#define UF_NODISC (0x02)
+#define UF_NOSCAN (0x04)
+
+/*---------------------------------------
+**
+** Timestamps for profiling
+**
+**---------------------------------------
+*/
+
+struct tstamp {
+ u_long start;
+ u_long end;
+ u_long select;
+ u_long command;
+ u_long status;
+ u_long disconnect;
+ u_long reselect;
+};
+
+/*
+** profiling data (per device)
+*/
+
+struct profile {
+ u_long num_trans;
+ u_long num_kbytes;
+ u_long rest_bytes;
+ u_long num_disc;
+ u_long num_break;
+ u_long num_int;
+ u_long num_fly;
+ u_long ms_setup;
+ u_long ms_data;
+ u_long ms_disc;
+ u_long ms_post;
+};
+
+/*==========================================================
+**
+** Declaration of structs: target control block
+**
+**==========================================================
+*/
+
+struct tcb {
+ /*
+ ** during reselection the ncr jumps to this point
+ ** with SFBR set to the encoded target number
+ ** with bit 7 set.
+ ** if it's not this target, jump to the next.
+ **
+ ** JUMP IF (SFBR != #target#)
+ ** @(next tcb)
+ */
+
+ struct link jump_tcb;
+
+ /*
+ ** load the actual values for the sxfer and the scntl3
+ ** register (sync/wide mode).
+ **
+ ** SCR_COPY (1);
+ ** @(sval field of this tcb)
+ ** @(sxfer register)
+ ** SCR_COPY (1);
+ ** @(wval field of this tcb)
+ ** @(scntl3 register)
+ */
+
+ ncrcmd getscr[6];
+
+ /*
+ ** if next message is "identify"
+ ** then load the message to SFBR,
+ ** else load 0 to SFBR.
+ **
+ ** CALL
+ ** <RESEL_LUN>
+ */
+
+ struct link call_lun;
+
+ /*
+ ** now look for the right lun.
+ **
+ ** JUMP
+ ** @(first ccb of this lun)
+ */
+
+ struct link jump_lcb;
+
+ /*
+ ** pointer to interrupted getcc ccb
+ */
+
+ ccb_p hold_cp;
+
+ /*
+ ** pointer to ccb used for negotiating.
+ ** Avoid to start a nego for all queued commands
+ ** when tagged command queuing is enabled.
+ */
+
+ ccb_p nego_cp;
+
+ /*
+ ** statistical data
+ */
+
+ u_long transfers;
+ u_long bytes;
+
+ /*
+ ** user settable limits for sync transfer
+ ** and tagged commands.
+ ** These limits are read from the NVRAM if present.
+ */
+
+ u_char usrsync;
+ u_char usrwide;
+ u_char usrtags;
+ u_char usrflag;
+
+ u_char numtags;
+ u_char maxtags;
+ u_short num_good;
+
+ /*
+ ** negotiation of wide and synch transfer.
+ ** device quirks.
+ */
+
+/*0*/ u_char minsync;
+/*1*/ u_char sval;
+/*2*/ u_short period;
+/*0*/ u_char maxoffs;
+
+/*1*/ u_char quirks;
+
+/*2*/ u_char widedone;
+/*3*/ u_char wval;
+ /*
+ ** inquire data
+ */
+#define MAX_INQUIRE 36
+ u_char inqdata[MAX_INQUIRE];
+
+ /*
+ ** the lcb's of this tcb
+ */
+
+ lcb_p lp[MAX_LUN];
+};
+
+/*==========================================================
+**
+** Declaration of structs: lun control block
+**
+**==========================================================
+*/
+
+struct lcb {
+ /*
+ ** during reselection the ncr jumps to this point
+ ** with SFBR set to the "Identify" message.
+ ** if it's not this lun, jump to the next.
+ **
+ ** JUMP IF (SFBR != #lun#)
+ ** @(next lcb of this target)
+ */
+
+ struct link jump_lcb;
+
+ /*
+ ** if next message is "simple tag",
+ ** then load the tag to SFBR,
+ ** else load 0 to SFBR.
+ **
+ ** CALL
+ ** <RESEL_TAG>
+ */
+
+ struct link call_tag;
+
+ /*
+ ** now look for the right ccb.
+ **
+ ** JUMP
+ ** @(first ccb of this lun)
+ */
+
+ struct link jump_ccb;
+
+ /*
+ ** start of the ccb chain
+ */
+
+ ccb_p next_ccb;
+
+ /*
+ ** Control of tagged queueing
+ */
+
+ u_char reqccbs;
+ u_char actccbs;
+ u_char reqlink;
+ u_char actlink;
+ u_char usetags;
+ u_char lasttag;
+
+ /*
+ ** Linux specific fields:
+ ** Number of active commands and current credit.
+ ** Should be managed by the generic scsi driver
+ */
+
+ u_char active;
+ u_char opennings;
+
+ /*-----------------------------------------------
+ ** Flag to force M_ORDERED_TAG on next command
+ ** in order to avoid spurious timeout when
+ ** M_SIMPLE_TAG is used for all operations.
+ **-----------------------------------------------
+ */
+ u_char force_ordered_tag;
+#define NCR_TIMEOUT_INCREASE (5*HZ)
+};
+
+/*==========================================================
+**
+** Declaration of structs: COMMAND control block
+**
+**==========================================================
+**
+** This substructure is copied from the ccb to a
+** global address after selection (or reselection)
+** and copied back before disconnect.
+**
+** These fields are accessible to the script processor.
+**
+**----------------------------------------------------------
+*/
+
+struct head {
+ /*
+ ** Execution of a ccb starts at this point.
+ ** It's a jump to the "SELECT" label
+ ** of the script.
+ **
+ ** After successful selection the script
+ ** processor overwrites it with a jump to
+ ** the IDLE label of the script.
+ */
+
+ struct link launch;
+
+ /*
+ ** Saved data pointer.
+ ** Points to the position in the script
+ ** responsible for the actual transfer
+ ** of data.
+ ** It's written after reception of a
+ ** "SAVE_DATA_POINTER" message.
+ ** The goalpointer points after
+ ** the last transfer command.
+ */
+
+ u_int32 savep;
+ u_int32 lastp;
+ u_int32 goalp;
+
+ /*
+ ** The virtual address of the ccb
+ ** containing this header.
+ */
+
+ ccb_p cp;
+
+ /*
+ ** space for some timestamps to gather
+ ** profiling data about devices and this driver.
+ */
+
+ struct tstamp stamp;
+
+ /*
+ ** status fields.
+ */
+
+ u_char scr_st[4]; /* script status */
+ u_char status[4]; /* host status. Must be the last */
+ /* DWORD of the CCB header */
+};
+
+/*
+** The status bytes are used by the host and the script processor.
+**
+** The byte corresponding to the host_status must be stored in the
+** last DWORD of the CCB header since it is used for command
+** completion (ncr_wakeup()). Doing so, we are sure that the header
+** has been entirely copied back to the CCB when the host_status is
+** seen complete by the CPU.
+**
+** The last four bytes (status[4]) are copied to the scratchb register
+** (declared as scr0..scr3 in ncr_reg.h) just after the select/reselect,
+** and copied back just after disconnecting.
+** Inside the script the XX_REG are used.
+**
+** The first four bytes (scr_st[4]) are used inside the script by
+** "COPY" commands.
+** Because source and destination must have the same alignment
+** in a DWORD, the fields HAVE to be at the choosen offsets.
+** xerr_st 0 (0x34) scratcha
+** sync_st 1 (0x05) sxfer
+** wide_st 3 (0x03) scntl3
+*/
+
+/*
+** Last four bytes (script)
+*/
+#define QU_REG scr0
+#define HS_REG scr1
+#define HS_PRT nc_scr1
+#define SS_REG scr2
+#define PS_REG scr3
+
+/*
+** Last four bytes (host)
+*/
+#define actualquirks phys.header.status[0]
+#define host_status phys.header.status[1]
+#define scsi_status phys.header.status[2]
+#define parity_status phys.header.status[3]
+
+/*
+** First four bytes (script)
+*/
+#define xerr_st header.scr_st[0]
+#define sync_st header.scr_st[1]
+#define nego_st header.scr_st[2]
+#define wide_st header.scr_st[3]
+
+/*
+** First four bytes (host)
+*/
+#define xerr_status phys.xerr_st
+#define sync_status phys.sync_st
+#define nego_status phys.nego_st
+#define wide_status phys.wide_st
+
+/*==========================================================
+**
+** Declaration of structs: Data structure block
+**
+**==========================================================
+**
+** During execution of a ccb by the script processor,
+** the DSA (data structure address) register points
+** to this substructure of the ccb.
+** This substructure contains the header with
+** the script-processor-changable data and
+** data blocks for the indirect move commands.
+**
+**----------------------------------------------------------
+*/
+
+struct dsb {
+
+ /*
+ ** Header.
+ ** Has to be the first entry,
+ ** because it's jumped to by the
+ ** script processor
+ */
+
+ struct head header;
+
+ /*
+ ** Table data for Script
+ */
+
+ struct scr_tblsel select;
+ struct scr_tblmove smsg ;
+ struct scr_tblmove smsg2 ;
+ struct scr_tblmove cmd ;
+ struct scr_tblmove scmd ;
+ struct scr_tblmove sense ;
+ struct scr_tblmove data [MAX_SCATTER];
+};
+
+/*==========================================================
+**
+** Declaration of structs: Command control block.
+**
+**==========================================================
+**
+** During execution of a ccb by the script processor,
+** the DSA (data structure address) register points
+** to this substructure of the ccb.
+** This substructure contains the header with
+** the script-processor-changable data and then
+** data blocks for the indirect move commands.
+**
+**----------------------------------------------------------
+*/
+
+
+struct ccb {
+ /*
+ ** This field forces 32 bytes alignement for phys.header,
+ ** in order to use cache line bursting when copying it
+ ** to the ncb.
+ */
+
+ struct link filler[2];
+
+ /*
+ ** during reselection the ncr jumps to this point.
+ ** If a "SIMPLE_TAG" message was received,
+ ** then SFBR is set to the tag.
+ ** else SFBR is set to 0
+ ** If looking for another tag, jump to the next ccb.
+ **
+ ** JUMP IF (SFBR != #TAG#)
+ ** @(next ccb of this lun)
+ */
+
+ struct link jump_ccb;
+
+ /*
+ ** After execution of this call, the return address
+ ** (in the TEMP register) points to the following
+ ** data structure block.
+ ** So copy it to the DSA register, and start
+ ** processing of this data structure.
+ **
+ ** CALL
+ ** <RESEL_TMP>
+ */
+
+ struct link call_tmp;
+
+ /*
+ ** This is the data structure which is
+ ** to be executed by the script processor.
+ */
+
+ struct dsb phys;
+
+ /*
+ ** If a data transfer phase is terminated too early
+ ** (after reception of a message (i.e. DISCONNECT)),
+ ** we have to prepare a mini script to transfer
+ ** the rest of the data.
+ */
+
+ ncrcmd patch[8];
+
+ /*
+ ** The general SCSI driver provides a
+ ** pointer to a control block.
+ */
+
+ Scsi_Cmnd *cmd;
+ int data_len;
+
+ /*
+ ** We prepare a message to be sent after selection,
+ ** and a second one to be sent after getcc selection.
+ ** Contents are IDENTIFY and SIMPLE_TAG.
+ ** While negotiating sync or wide transfer,
+ ** a SDTM or WDTM message is appended.
+ */
+
+ u_char scsi_smsg [8];
+ u_char scsi_smsg2[8];
+
+ /*
+ ** Lock this ccb.
+ ** Flag is used while looking for a free ccb.
+ */
+
+ u_long magic;
+
+ /*
+ ** Physical address of this instance of ccb
+ */
+
+ u_long p_ccb;
+
+ /*
+ ** Completion time out for this job.
+ ** It's set to time of start + allowed number of seconds.
+ */
+
+ u_long tlimit;
+
+ /*
+ ** All ccbs of one hostadapter are chained.
+ */
+
+ ccb_p link_ccb;
+
+ /*
+ ** All ccbs of one target/lun are chained.
+ */
+
+ ccb_p next_ccb;
+
+ /*
+ ** Sense command
+ */
+
+ u_char sensecmd[6];
+
+ /*
+ ** Tag for this transfer.
+ ** It's patched into jump_ccb.
+ ** If it's not zero, a SIMPLE_TAG
+ ** message is included in smsg.
+ */
+
+ u_char tag;
+
+ /*
+ ** Number of segments of the scatter list.
+ ** Used for recalculation of savep/goalp/lastp on
+ ** SIR_DATA_IO_IS_OUT interrupt.
+ */
+
+ u_char segments;
+};
+
+#define CCB_PHYS(cp,lbl) (cp->p_ccb + offsetof(struct ccb, lbl))
+
+/*==========================================================
+**
+** Declaration of structs: NCR device descriptor
+**
+**==========================================================
+*/
+
+struct ncb {
+ /*
+ ** The global header.
+ ** Accessible to both the host and the
+ ** script-processor.
+ ** Is 32 bytes aligned since ncb is, in order to
+ ** allow cache line bursting when copying it from or
+ ** to ccbs.
+ */
+ struct head header;
+
+ /*-----------------------------------------------
+ ** Specific Linux fields
+ **-----------------------------------------------
+ */
+ int unit; /* Unit number */
+ char chip_name[8]; /* Chip name */
+ char inst_name[16]; /* Instance name */
+ struct timer_list timer; /* Timer link header */
+ int ncr_cache; /* Cache test variable */
+ Scsi_Cmnd *waiting_list; /* Waiting list header for commands */
+ /* that we can't put into the squeue */
+ u_long settle_time; /* Reset in progess */
+ u_char release_stage; /* Synchronisation stage on release */
+ u_char verbose; /* Boot verbosity for this controller*/
+#ifdef SCSI_NCR_DEBUG_ERROR_RECOVERY_SUPPORT
+ u_char debug_error_recovery;
+ u_char stalling;
+ u_char assert_atn;
+#endif
+
+ /*-----------------------------------------------
+ ** Added field to support differences
+ ** between ncr chips.
+ ** sv_xxx are some io register bit value at start-up and
+ ** so assumed to have been set by the sdms bios.
+ ** rv_xxx are the bit fields of io register that will keep
+ ** the features used by the driver.
+ **-----------------------------------------------
+ */
+ u_short device_id;
+ u_char revision_id;
+
+ u_char sv_scntl0;
+ u_char sv_scntl3;
+ u_char sv_dmode;
+ u_char sv_dcntl;
+ u_char sv_ctest3;
+ u_char sv_ctest4;
+ u_char sv_ctest5;
+ u_char sv_gpcntl;
+ u_char sv_stest2;
+ u_char sv_stest4;
+
+ u_char rv_scntl0;
+ u_char rv_scntl3;
+ u_char rv_dmode;
+ u_char rv_dcntl;
+ u_char rv_ctest3;
+ u_char rv_ctest4;
+ u_char rv_ctest5;
+ u_char rv_stest2;
+
+ u_char scsi_mode;
+
+ /*-----------------------------------------------
+ ** Scripts ..
+ **-----------------------------------------------
+ **
+ ** During reselection the ncr jumps to this point.
+ ** The SFBR register is loaded with the encoded target id.
+ **
+ ** Jump to the first target.
+ **
+ ** JUMP
+ ** @(next tcb)
+ */
+ struct link jump_tcb;
+
+ /*-----------------------------------------------
+ ** Configuration ..
+ **-----------------------------------------------
+ **
+ ** virtual and physical addresses
+ ** of the 53c810 chip.
+ */
+ vm_offset_t vaddr;
+ vm_offset_t paddr;
+
+ vm_offset_t vaddr2;
+ vm_offset_t paddr2;
+
+ /*
+ ** pointer to the chip's registers.
+ */
+ volatile
+ struct ncr_reg* reg;
+
+ /*
+ ** A copy of the scripts, relocated for this ncb.
+ */
+ struct script *script0;
+ struct scripth *scripth0;
+
+ /*
+ ** Scripts instance virtual address.
+ */
+ struct script *script;
+ struct scripth *scripth;
+
+ /*
+ ** Scripts instance physical address.
+ */
+ u_long p_script;
+ u_long p_scripth;
+
+ /*
+ ** The SCSI address of the host adapter.
+ */
+ u_char myaddr;
+
+ /*
+ ** Max dwords burst supported by the adapter.
+ */
+ u_char maxburst; /* log base 2 of dwords burst */
+
+ /*
+ ** timing parameters
+ */
+ u_char minsync; /* Minimum sync period factor */
+ u_char maxsync; /* Maximum sync period factor */
+ u_char maxoffs; /* Max scsi offset */
+ u_char multiplier; /* Clock multiplier (1,2,4) */
+ u_char clock_divn; /* Number of clock divisors */
+ u_long clock_khz; /* SCSI clock frequency in KHz */
+ u_int features; /* Chip features map */
+
+
+ /*-----------------------------------------------
+ ** Link to the generic SCSI driver
+ **-----------------------------------------------
+ */
+
+ /* struct scsi_link sc_link; */
+
+ /*-----------------------------------------------
+ ** Job control
+ **-----------------------------------------------
+ **
+ ** Commands from user
+ */
+ struct usrcmd user;
+ u_char order;
+
+ /*
+ ** Target data
+ */
+ struct tcb target[MAX_TARGET];
+
+ /*
+ ** Start queue.
+ */
+ u_int32 squeue [MAX_START];
+ u_short squeueput;
+ u_short actccbs;
+
+ /*
+ ** Timeout handler
+ */
+#if 0
+ u_long heartbeat;
+ u_short ticks;
+ u_short latetime;
+#endif
+ u_long lasttime;
+
+ /*-----------------------------------------------
+ ** Debug and profiling
+ **-----------------------------------------------
+ **
+ ** register dump
+ */
+ struct ncr_reg regdump;
+ u_long regtime;
+
+ /*
+ ** Profiling data
+ */
+ struct profile profile;
+ u_long disc_phys;
+ u_long disc_ref;
+
+ /*
+ ** The global control block.
+ ** It's used only during the configuration phase.
+ ** A target control block will be created
+ ** after the first successful transfer.
+ */
+ struct ccb *ccb;
+
+ /*
+ ** message buffers.
+ ** Should be longword aligned,
+ ** because they're written with a
+ ** COPY script command.
+ */
+ u_char msgout[8];
+ u_char msgin [8];
+ u_int32 lastmsg;
+
+ /*
+ ** Buffer for STATUS_IN phase.
+ */
+ u_char scratch;
+
+ /*
+ ** controller chip dependent maximal transfer width.
+ */
+ u_char maxwide;
+
+ /*
+ ** option for M_IDENTIFY message: enables disconnecting
+ */
+ u_char disc;
+
+ /*
+ ** address of the ncr control registers in io space
+ */
+ u_int port;
+
+ /*
+ ** irq level
+ */
+ u_short irq;
+};
+
+#define NCB_SCRIPT_PHYS(np,lbl) (np->p_script + offsetof (struct script, lbl))
+#define NCB_SCRIPTH_PHYS(np,lbl) (np->p_scripth + offsetof (struct scripth, lbl))
+
+/*==========================================================
+**
+**
+** Script for NCR-Processor.
+**
+** Use ncr_script_fill() to create the variable parts.
+** Use ncr_script_copy_and_bind() to make a copy and
+** bind to physical addresses.
+**
+**
+**==========================================================
+**
+** We have to know the offsets of all labels before
+** we reach them (for forward jumps).
+** Therefore we declare a struct here.
+** If you make changes inside the script,
+** DON'T FORGET TO CHANGE THE LENGTHS HERE!
+**
+**----------------------------------------------------------
+*/
+
+/*
+** Script fragments which are loaded into the on-board RAM
+** of 825A, 875 and 895 chips.
+*/
+struct script {
+ ncrcmd start [ 4];
+ ncrcmd start0 [ 2];
+ ncrcmd start1 [ 3];
+ ncrcmd startpos [ 1];
+ ncrcmd trysel [ 8];
+ ncrcmd skip [ 8];
+ ncrcmd skip2 [ 3];
+ ncrcmd idle [ 2];
+ ncrcmd select [ 22];
+ ncrcmd prepare [ 4];
+ ncrcmd loadpos [ 14];
+ ncrcmd prepare2 [ 24];
+ ncrcmd setmsg [ 5];
+ ncrcmd clrack [ 2];
+ ncrcmd dispatch [ 38];
+ ncrcmd no_data [ 17];
+ ncrcmd checkatn [ 10];
+ ncrcmd command [ 15];
+ ncrcmd status [ 27];
+ ncrcmd msg_in [ 26];
+ ncrcmd msg_bad [ 6];
+ ncrcmd complete [ 13];
+ ncrcmd cleanup [ 12];
+ ncrcmd cleanup0 [ 11];
+ ncrcmd signal [ 10];
+ ncrcmd save_dp [ 5];
+ ncrcmd restore_dp [ 5];
+ ncrcmd disconnect [ 12];
+ ncrcmd disconnect0 [ 5];
+ ncrcmd disconnect1 [ 23];
+ ncrcmd msg_out [ 9];
+ ncrcmd msg_out_done [ 7];
+ ncrcmd badgetcc [ 6];
+ ncrcmd reselect [ 8];
+ ncrcmd reselect1 [ 8];
+ ncrcmd reselect2 [ 8];
+ ncrcmd resel_tmp [ 5];
+ ncrcmd resel_lun [ 18];
+ ncrcmd resel_tag [ 24];
+ ncrcmd data_io [ 6];
+ ncrcmd data_in [MAX_SCATTER * 4 + 4];
+};
+
+/*
+** Script fragments which stay in main memory for all chips.
+*/
+struct scripth {
+ ncrcmd tryloop [MAX_START*5+2];
+ ncrcmd msg_parity [ 6];
+ ncrcmd msg_reject [ 8];
+ ncrcmd msg_ign_residue [ 32];
+ ncrcmd msg_extended [ 18];
+ ncrcmd msg_ext_2 [ 18];
+ ncrcmd msg_wdtr [ 27];
+ ncrcmd msg_ext_3 [ 18];
+ ncrcmd msg_sdtr [ 27];
+ ncrcmd msg_out_abort [ 10];
+ ncrcmd getcc [ 4];
+ ncrcmd getcc1 [ 5];
+#ifdef NCR_GETCC_WITHMSG
+ ncrcmd getcc2 [ 33];
+#else
+ ncrcmd getcc2 [ 14];
+#endif
+ ncrcmd getcc3 [ 10];
+ ncrcmd data_out [MAX_SCATTER * 4 + 4];
+ ncrcmd aborttag [ 4];
+ ncrcmd abort [ 22];
+ ncrcmd snooptest [ 9];
+ ncrcmd snoopend [ 2];
+};
+
+/*==========================================================
+**
+**
+** Function headers.
+**
+**
+**==========================================================
+*/
+
+static void ncr_alloc_ccb (ncb_p np, u_long t, u_long l);
+static void ncr_complete (ncb_p np, ccb_p cp);
+static void ncr_exception (ncb_p np);
+static void ncr_free_ccb (ncb_p np, ccb_p cp, u_long t, u_long l);
+static void ncr_getclock (ncb_p np, int mult);
+static void ncr_selectclock (ncb_p np, u_char scntl3);
+static ccb_p ncr_get_ccb (ncb_p np, u_long t,u_long l);
+static void ncr_init (ncb_p np, int reset, char * msg, u_long code);
+static int ncr_int_sbmc (ncb_p np);
+static int ncr_int_par (ncb_p np);
+static void ncr_int_ma (ncb_p np);
+static void ncr_int_sir (ncb_p np);
+static void ncr_int_sto (ncb_p np);
+static u_long ncr_lookup (char* id);
+static void ncr_negotiate (struct ncb* np, struct tcb* tp);
+static void ncr_opennings (ncb_p np, lcb_p lp, Scsi_Cmnd * xp);
+
+#ifdef SCSI_NCR_PROFILE_SUPPORT
+static void ncb_profile (ncb_p np, ccb_p cp);
+#endif
+
+static void ncr_script_copy_and_bind
+ (ncb_p np, ncrcmd *src, ncrcmd *dst, int len);
+static void ncr_script_fill (struct script * scr, struct scripth * scripth);
+static int ncr_scatter (ccb_p cp, Scsi_Cmnd *cmd);
+static void ncr_setmaxtags (ncb_p np, tcb_p tp, u_long numtags);
+static void ncr_getsync (ncb_p np, u_char sfac, u_char *fakp, u_char *scntl3p);
+static void ncr_setsync (ncb_p np, ccb_p cp, u_char scntl3, u_char sxfer);
+static void ncr_settags (tcb_p tp, lcb_p lp);
+static void ncr_setwide (ncb_p np, ccb_p cp, u_char wide, u_char ack);
+static int ncr_show_msg (u_char * msg);
+static int ncr_snooptest (ncb_p np);
+static void ncr_timeout (ncb_p np);
+static void ncr_wakeup (ncb_p np, u_long code);
+static void ncr_start_reset (ncb_p np, int settle_delay);
+static int ncr_reset_scsi_bus (ncb_p np, int enab_int, int settle_delay);
+
+#ifdef SCSI_NCR_USER_COMMAND_SUPPORT
+static void ncr_usercmd (ncb_p np);
+#endif
+
+static int ncr_attach (Scsi_Host_Template *tpnt, int unit, ncr_device *device);
+
+static void insert_into_waiting_list(ncb_p np, Scsi_Cmnd *cmd);
+static Scsi_Cmnd *retrieve_from_waiting_list(int to_remove, ncb_p np, Scsi_Cmnd *cmd);
+static void process_waiting_list(ncb_p np, int sts);
+
+#define remove_from_waiting_list(np, cmd) \
+ retrieve_from_waiting_list(1, (np), (cmd))
+#define requeue_waiting_list(np) process_waiting_list((np), DID_OK)
+#define reset_waiting_list(np) process_waiting_list((np), DID_RESET)
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+static int ncr_get_Symbios_nvram (ncr_slot *np, Symbios_nvram *nvram);
+static int ncr_get_Tekram_nvram (ncr_slot *np, Tekram_nvram *nvram);
+#endif
+
+/*==========================================================
+**
+**
+** Global static data.
+**
+**
+**==========================================================
+*/
+
+#ifdef SCSI_NCR_DEBUG_INFO_SUPPORT
+static int ncr_debug = SCSI_NCR_DEBUG_FLAGS;
+#endif
+
+static inline char *ncr_name (ncb_p np)
+{
+ return np->inst_name;
+}
+
+
+/*==========================================================
+**
+**
+** Scripts for NCR-Processor.
+**
+** Use ncr_script_bind for binding to physical addresses.
+**
+**
+**==========================================================
+**
+** NADDR generates a reference to a field of the controller data.
+** PADDR generates a reference to another part of the script.
+** RADDR generates a reference to a script processor register.
+** FADDR generates a reference to a script processor register
+** with offset.
+**
+**----------------------------------------------------------
+*/
+
+#define RELOC_SOFTC 0x40000000
+#define RELOC_LABEL 0x50000000
+#define RELOC_REGISTER 0x60000000
+#define RELOC_KVAR 0x70000000
+#define RELOC_LABELH 0x80000000
+#define RELOC_MASK 0xf0000000
+
+#define NADDR(label) (RELOC_SOFTC | offsetof(struct ncb, label))
+#define PADDR(label) (RELOC_LABEL | offsetof(struct script, label))
+#define PADDRH(label) (RELOC_LABELH | offsetof(struct scripth, label))
+#define RADDR(label) (RELOC_REGISTER | REG(label))
+#define FADDR(label,ofs)(RELOC_REGISTER | ((REG(label))+(ofs)))
+#define KVAR(which) (RELOC_KVAR | (which))
+
+#define SCRIPT_KVAR_JIFFIES (0)
+
+#define SCRIPT_KVAR_FIRST SCRIPT_KVAR_JIFFIES
+#define SCRIPT_KVAR_LAST SCRIPT_KVAR_JIFFIES
+
+/*
+ * Kernel variables referenced in the scripts.
+ * THESE MUST ALL BE ALIGNED TO A 4-BYTE BOUNDARY.
+ */
+static void *script_kvars[] __initdata =
+ { (void *)&jiffies };
+
+static struct script script0 __initdata = {
+/*--------------------------< START >-----------------------*/ {
+#if 0
+ /*
+ ** Claim to be still alive ...
+ */
+ SCR_COPY (sizeof (((struct ncb *)0)->heartbeat)),
+ KVAR(SCRIPT_KVAR_JIFFIES),
+ NADDR (heartbeat),
+#endif
+ /*
+ ** Make data structure address invalid.
+ ** clear SIGP.
+ */
+ SCR_LOAD_REG (dsa, 0xff),
+ 0,
+ SCR_FROM_REG (ctest2),
+ 0,
+}/*-------------------------< START0 >----------------------*/,{
+ /*
+ ** Hook for interrupted GetConditionCode.
+ ** Will be patched to ... IFTRUE by
+ ** the interrupt handler.
+ */
+ SCR_INT ^ IFFALSE (0),
+ SIR_SENSE_RESTART,
+
+}/*-------------------------< START1 >----------------------*/,{
+ /*
+ ** Hook for stalled start queue.
+ ** Will be patched to IFTRUE by the interrupt handler.
+ */
+ SCR_INT ^ IFFALSE (0),
+ SIR_STALL_RESTART,
+ /*
+ ** Then jump to a certain point in tryloop.
+ ** Due to the lack of indirect addressing the code
+ ** is self modifying here.
+ */
+ SCR_JUMP,
+}/*-------------------------< STARTPOS >--------------------*/,{
+ PADDRH(tryloop),
+}/*-------------------------< TRYSEL >----------------------*/,{
+ /*
+ ** Now:
+ ** DSA: Address of a Data Structure
+ ** or Address of the IDLE-Label.
+ **
+ ** TEMP: Address of a script, which tries to
+ ** start the NEXT entry.
+ **
+ ** Save the TEMP register into the SCRATCHA register.
+ ** Then copy the DSA to TEMP and RETURN.
+ ** This is kind of an indirect jump.
+ ** (The script processor has NO stack, so the
+ ** CALL is actually a jump and link, and the
+ ** RETURN is an indirect jump.)
+ **
+ ** If the slot was empty, DSA contains the address
+ ** of the IDLE part of this script. The processor
+ ** jumps to IDLE and waits for a reselect.
+ ** It will wake up and try the same slot again
+ ** after the SIGP bit becomes set by the host.
+ **
+ ** If the slot was not empty, DSA contains
+ ** the address of the phys-part of a ccb.
+ ** The processor jumps to this address.
+ ** phys starts with head,
+ ** head starts with launch,
+ ** so actually the processor jumps to
+ ** the lauch part.
+ ** If the entry is scheduled for execution,
+ ** then launch contains a jump to SELECT.
+ ** If it's not scheduled, it contains a jump to IDLE.
+ */
+ SCR_COPY (4),
+ RADDR (temp),
+ RADDR (scratcha),
+ SCR_COPY (4),
+ RADDR (dsa),
+ RADDR (temp),
+ SCR_RETURN,
+ 0
+
+}/*-------------------------< SKIP >------------------------*/,{
+ /*
+ ** This entry has been canceled.
+ ** Next time use the next slot.
+ */
+ SCR_COPY (4),
+ RADDR (scratcha),
+ PADDR (startpos),
+ /*
+ ** patch the launch field.
+ ** should look like an idle process.
+ */
+ SCR_COPY_F (4),
+ RADDR (dsa),
+ PADDR (skip2),
+ SCR_COPY (8),
+ PADDR (idle),
+}/*-------------------------< SKIP2 >-----------------------*/,{
+ 0,
+ SCR_JUMP,
+ PADDR(start),
+}/*-------------------------< IDLE >------------------------*/,{
+ /*
+ ** Nothing to do?
+ ** Wait for reselect.
+ */
+ SCR_JUMP,
+ PADDR(reselect),
+
+}/*-------------------------< SELECT >----------------------*/,{
+ /*
+ ** DSA contains the address of a scheduled
+ ** data structure.
+ **
+ ** SCRATCHA contains the address of the script,
+ ** which starts the next entry.
+ **
+ ** Set Initiator mode.
+ **
+ ** (Target mode is left as an exercise for the reader)
+ */
+
+ SCR_CLR (SCR_TRG),
+ 0,
+ SCR_LOAD_REG (HS_REG, 0xff),
+ 0,
+
+ /*
+ ** And try to select this target.
+ */
+ SCR_SEL_TBL_ATN ^ offsetof (struct dsb, select),
+ PADDR (reselect),
+
+ /*
+ ** Now there are 4 possibilities:
+ **
+ ** (1) The ncr looses arbitration.
+ ** This is ok, because it will try again,
+ ** when the bus becomes idle.
+ ** (But beware of the timeout function!)
+ **
+ ** (2) The ncr is reselected.
+ ** Then the script processor takes the jump
+ ** to the RESELECT label.
+ **
+ ** (3) The ncr completes the selection.
+ ** Then it will execute the next statement.
+ **
+ ** (4) There is a selection timeout.
+ ** Then the ncr should interrupt the host and stop.
+ ** Unfortunately, it seems to continue execution
+ ** of the script. But it will fail with an
+ ** IID-interrupt on the next WHEN.
+ */
+
+ SCR_JUMPR ^ IFTRUE (WHEN (SCR_MSG_IN)),
+ 0,
+
+ /*
+ ** Save target id to ctest0 register
+ */
+
+ SCR_FROM_REG (sdid),
+ 0,
+ SCR_TO_REG (ctest0),
+ 0,
+ /*
+ ** Send the IDENTIFY and SIMPLE_TAG messages
+ ** (and the M_X_SYNC_REQ message)
+ */
+ SCR_MOVE_TBL ^ SCR_MSG_OUT,
+ offsetof (struct dsb, smsg),
+#ifdef undef /* XXX better fail than try to deal with this ... */
+ SCR_JUMPR ^ IFTRUE (WHEN (SCR_MSG_OUT)),
+ -16,
+#endif
+ SCR_CLR (SCR_ATN),
+ 0,
+ SCR_COPY (1),
+ RADDR (sfbr),
+ NADDR (lastmsg),
+ /*
+ ** Selection complete.
+ ** Next time use the next slot.
+ */
+ SCR_COPY (4),
+ RADDR (scratcha),
+ PADDR (startpos),
+}/*-------------------------< PREPARE >----------------------*/,{
+ /*
+ ** The ncr doesn't have an indirect load
+ ** or store command. So we have to
+ ** copy part of the control block to a
+ ** fixed place, where we can access it.
+ **
+ ** We patch the address part of a
+ ** COPY command with the DSA-register.
+ */
+ SCR_COPY_F (4),
+ RADDR (dsa),
+ PADDR (loadpos),
+ /*
+ ** then we do the actual copy.
+ */
+ SCR_COPY (sizeof (struct head)),
+ /*
+ ** continued after the next label ...
+ */
+
+}/*-------------------------< LOADPOS >---------------------*/,{
+ 0,
+ NADDR (header),
+ /*
+ ** Mark this ccb as not scheduled.
+ */
+ SCR_COPY (8),
+ PADDR (idle),
+ NADDR (header.launch),
+ /*
+ ** Set a time stamp for this selection
+ */
+ SCR_COPY (sizeof (u_long)),
+ KVAR(SCRIPT_KVAR_JIFFIES),
+ NADDR (header.stamp.select),
+ /*
+ ** load the savep (saved pointer) into
+ ** the TEMP register (actual pointer)
+ */
+ SCR_COPY (4),
+ NADDR (header.savep),
+ RADDR (temp),
+ /*
+ ** Initialize the status registers
+ */
+ SCR_COPY (4),
+ NADDR (header.status),
+ RADDR (scr0),
+
+}/*-------------------------< PREPARE2 >---------------------*/,{
+ /*
+ ** Load the synchronous mode register
+ */
+ SCR_COPY (1),
+ NADDR (sync_st),
+ RADDR (sxfer),
+ /*
+ ** Load the wide mode and timing register
+ */
+ SCR_COPY (1),
+ NADDR (wide_st),
+ RADDR (scntl3),
+ /*
+ ** Initialize the msgout buffer with a NOOP message.
+ */
+ SCR_LOAD_REG (scratcha, M_NOOP),
+ 0,
+ SCR_COPY (1),
+ RADDR (scratcha),
+ NADDR (msgout),
+ SCR_COPY (1),
+ RADDR (scratcha),
+ NADDR (msgin),
+ /*
+ ** Message in phase ?
+ */
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ PADDR (dispatch),
+ /*
+ ** Extended or reject message ?
+ */
+ SCR_FROM_REG (sbdl),
+ 0,
+ SCR_JUMP ^ IFTRUE (DATA (M_EXTENDED)),
+ PADDR (msg_in),
+ SCR_JUMP ^ IFTRUE (DATA (M_REJECT)),
+ PADDRH (msg_reject),
+ /*
+ ** normal processing
+ */
+ SCR_JUMP,
+ PADDR (dispatch),
+}/*-------------------------< SETMSG >----------------------*/,{
+ SCR_COPY (1),
+ RADDR (scratcha),
+ NADDR (msgout),
+ SCR_SET (SCR_ATN),
+ 0,
+}/*-------------------------< CLRACK >----------------------*/,{
+ /*
+ ** Terminate possible pending message phase.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+
+}/*-----------------------< DISPATCH >----------------------*/,{
+ SCR_FROM_REG (HS_REG),
+ 0,
+ SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)),
+ SIR_NEGO_FAILED,
+ /*
+ ** remove bogus output signals
+ */
+ SCR_REG_REG (socl, SCR_AND, CACK|CATN),
+ 0,
+ SCR_RETURN ^ IFTRUE (WHEN (SCR_DATA_OUT)),
+ 0,
+ /*
+ ** DEL 397 - 53C875 Rev 3 - Part Number 609-0392410 - ITEM 4.
+ ** Possible data corruption during Memory Write and Invalidate.
+ ** This work-around resets the addressing logic prior to the
+ ** start of the first MOVE of a DATA IN phase.
+ ** (See README.ncr53c8xx for more information)
+ */
+ SCR_JUMPR ^ IFFALSE (IF (SCR_DATA_IN)),
+ 20,
+ SCR_COPY (4),
+ RADDR (scratcha),
+ RADDR (scratcha),
+ SCR_RETURN,
+ 0,
+
+ SCR_JUMP ^ IFTRUE (IF (SCR_MSG_OUT)),
+ PADDR (msg_out),
+ SCR_JUMP ^ IFTRUE (IF (SCR_MSG_IN)),
+ PADDR (msg_in),
+ SCR_JUMP ^ IFTRUE (IF (SCR_COMMAND)),
+ PADDR (command),
+ SCR_JUMP ^ IFTRUE (IF (SCR_STATUS)),
+ PADDR (status),
+ /*
+ ** Discard one illegal phase byte, if required.
+ */
+ SCR_LOAD_REG (scratcha, XE_BAD_PHASE),
+ 0,
+ SCR_COPY (1),
+ RADDR (scratcha),
+ NADDR (xerr_st),
+ SCR_JUMPR ^ IFFALSE (IF (SCR_ILG_OUT)),
+ 8,
+ SCR_MOVE_ABS (1) ^ SCR_ILG_OUT,
+ NADDR (scratch),
+ SCR_JUMPR ^ IFFALSE (IF (SCR_ILG_IN)),
+ 8,
+ SCR_MOVE_ABS (1) ^ SCR_ILG_IN,
+ NADDR (scratch),
+ SCR_JUMP,
+ PADDR (dispatch),
+
+}/*-------------------------< NO_DATA >--------------------*/,{
+ /*
+ ** The target wants to tranfer too much data
+ ** or in the wrong direction.
+ ** Remember that in extended error.
+ */
+ SCR_LOAD_REG (scratcha, XE_EXTRA_DATA),
+ 0,
+ SCR_COPY (1),
+ RADDR (scratcha),
+ NADDR (xerr_st),
+ /*
+ ** Discard one data byte, if required.
+ */
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_OUT)),
+ 8,
+ SCR_MOVE_ABS (1) ^ SCR_DATA_OUT,
+ NADDR (scratch),
+ SCR_JUMPR ^ IFFALSE (IF (SCR_DATA_IN)),
+ 8,
+ SCR_MOVE_ABS (1) ^ SCR_DATA_IN,
+ NADDR (scratch),
+ /*
+ ** .. and repeat as required.
+ */
+ SCR_CALL,
+ PADDR (dispatch),
+ SCR_JUMP,
+ PADDR (no_data),
+}/*-------------------------< CHECKATN >--------------------*/,{
+ /*
+ ** If AAP (bit 1 of scntl0 register) is set
+ ** and a parity error is detected,
+ ** the script processor asserts ATN.
+ **
+ ** The target should switch to a MSG_OUT phase
+ ** to get the message.
+ */
+ SCR_FROM_REG (socl),
+ 0,
+ SCR_JUMP ^ IFFALSE (MASK (CATN, CATN)),
+ PADDR (dispatch),
+ /*
+ ** count it
+ */
+ SCR_REG_REG (PS_REG, SCR_ADD, 1),
+ 0,
+ /*
+ ** Prepare a M_ID_ERROR message
+ ** (initiator detected error).
+ ** The target should retry the transfer.
+ */
+ SCR_LOAD_REG (scratcha, M_ID_ERROR),
+ 0,
+ SCR_JUMP,
+ PADDR (setmsg),
+
+}/*-------------------------< COMMAND >--------------------*/,{
+ /*
+ ** If this is not a GETCC transfer ...
+ */
+ SCR_FROM_REG (SS_REG),
+ 0,
+/*<<<*/ SCR_JUMPR ^ IFTRUE (DATA (S_CHECK_COND)),
+ 28,
+ /*
+ ** ... set a timestamp ...
+ */
+ SCR_COPY (sizeof (u_long)),
+ KVAR(SCRIPT_KVAR_JIFFIES),
+ NADDR (header.stamp.command),
+ /*
+ ** ... and send the command
+ */
+ SCR_MOVE_TBL ^ SCR_COMMAND,
+ offsetof (struct dsb, cmd),
+ SCR_JUMP,
+ PADDR (dispatch),
+ /*
+ ** Send the GETCC command
+ */
+/*>>>*/ SCR_MOVE_TBL ^ SCR_COMMAND,
+ offsetof (struct dsb, scmd),
+ SCR_JUMP,
+ PADDR (dispatch),
+
+}/*-------------------------< STATUS >--------------------*/,{
+ /*
+ ** set the timestamp.
+ */
+ SCR_COPY (sizeof (u_long)),
+ KVAR(SCRIPT_KVAR_JIFFIES),
+ NADDR (header.stamp.status),
+ /*
+ ** If this is a GETCC transfer,
+ */
+ SCR_FROM_REG (SS_REG),
+ 0,
+/*<<<*/ SCR_JUMPR ^ IFFALSE (DATA (S_CHECK_COND)),
+ 40,
+ /*
+ ** get the status
+ */
+ SCR_MOVE_ABS (1) ^ SCR_STATUS,
+ NADDR (scratch),
+ /*
+ ** Save status to scsi_status.
+ ** Mark as complete.
+ ** And wait for disconnect.
+ */
+ SCR_TO_REG (SS_REG),
+ 0,
+ SCR_REG_REG (SS_REG, SCR_OR, S_SENSE),
+ 0,
+ SCR_LOAD_REG (HS_REG, HS_COMPLETE),
+ 0,
+ SCR_JUMP,
+ PADDR (checkatn),
+ /*
+ ** If it was no GETCC transfer,
+ ** save the status to scsi_status.
+ */
+/*>>>*/ SCR_MOVE_ABS (1) ^ SCR_STATUS,
+ NADDR (scratch),
+ SCR_TO_REG (SS_REG),
+ 0,
+ /*
+ ** if it was no check condition ...
+ */
+ SCR_JUMP ^ IFTRUE (DATA (S_CHECK_COND)),
+ PADDR (checkatn),
+ /*
+ ** ... mark as complete.
+ */
+ SCR_LOAD_REG (HS_REG, HS_COMPLETE),
+ 0,
+ SCR_JUMP,
+ PADDR (checkatn),
+
+}/*-------------------------< MSG_IN >--------------------*/,{
+ /*
+ ** Get the first byte of the message
+ ** and save it to SCRATCHA.
+ **
+ ** The script processor doesn't negate the
+ ** ACK signal after this transfer.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[0]),
+ /*
+ ** Check for message parity error.
+ */
+ SCR_TO_REG (scratcha),
+ 0,
+ SCR_FROM_REG (socl),
+ 0,
+ SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)),
+ PADDRH (msg_parity),
+ SCR_FROM_REG (scratcha),
+ 0,
+ /*
+ ** Parity was ok, handle this message.
+ */
+ SCR_JUMP ^ IFTRUE (DATA (M_COMPLETE)),
+ PADDR (complete),
+ SCR_JUMP ^ IFTRUE (DATA (M_SAVE_DP)),
+ PADDR (save_dp),
+ SCR_JUMP ^ IFTRUE (DATA (M_RESTORE_DP)),
+ PADDR (restore_dp),
+ SCR_JUMP ^ IFTRUE (DATA (M_DISCONNECT)),
+ PADDR (disconnect),
+ SCR_JUMP ^ IFTRUE (DATA (M_EXTENDED)),
+ PADDRH (msg_extended),
+ SCR_JUMP ^ IFTRUE (DATA (M_NOOP)),
+ PADDR (clrack),
+ SCR_JUMP ^ IFTRUE (DATA (M_REJECT)),
+ PADDRH (msg_reject),
+ SCR_JUMP ^ IFTRUE (DATA (M_IGN_RESIDUE)),
+ PADDRH (msg_ign_residue),
+ /*
+ ** Rest of the messages left as
+ ** an exercise ...
+ **
+ ** Unimplemented messages:
+ ** fall through to MSG_BAD.
+ */
+}/*-------------------------< MSG_BAD >------------------*/,{
+ /*
+ ** unimplemented message - reject it.
+ */
+ SCR_INT,
+ SIR_REJECT_SENT,
+ SCR_LOAD_REG (scratcha, M_REJECT),
+ 0,
+ SCR_JUMP,
+ PADDR (setmsg),
+
+}/*-------------------------< COMPLETE >-----------------*/,{
+ /*
+ ** Complete message.
+ **
+ ** If it's not the get condition code,
+ ** copy TEMP register to LASTP in header.
+ */
+ SCR_FROM_REG (SS_REG),
+ 0,
+/*<<<*/ SCR_JUMPR ^ IFTRUE (MASK (S_SENSE, S_SENSE)),
+ 12,
+ SCR_COPY (4),
+ RADDR (temp),
+ NADDR (header.lastp),
+/*>>>*/ /*
+ ** When we terminate the cycle by clearing ACK,
+ ** the target may disconnect immediately.
+ **
+ ** We don't want to be told of an
+ ** "unexpected disconnect",
+ ** so we disable this feature.
+ */
+ SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+ 0,
+ /*
+ ** Terminate cycle ...
+ */
+ SCR_CLR (SCR_ACK|SCR_ATN),
+ 0,
+ /*
+ ** ... and wait for the disconnect.
+ */
+ SCR_WAIT_DISC,
+ 0,
+}/*-------------------------< CLEANUP >-------------------*/,{
+ /*
+ ** dsa: Pointer to ccb
+ ** or xxxxxxFF (no ccb)
+ **
+ ** HS_REG: Host-Status (<>0!)
+ */
+ SCR_FROM_REG (dsa),
+ 0,
+ SCR_JUMP ^ IFTRUE (DATA (0xff)),
+ PADDR (signal),
+ /*
+ ** dsa is valid.
+ ** save the status registers
+ */
+ SCR_COPY (4),
+ RADDR (scr0),
+ NADDR (header.status),
+ /*
+ ** and copy back the header to the ccb.
+ */
+ SCR_COPY_F (4),
+ RADDR (dsa),
+ PADDR (cleanup0),
+ SCR_COPY (sizeof (struct head)),
+ NADDR (header),
+}/*-------------------------< CLEANUP0 >--------------------*/,{
+ 0,
+
+ /*
+ ** If command resulted in "check condition"
+ ** status and is not yet completed,
+ ** try to get the condition code.
+ */
+ SCR_FROM_REG (HS_REG),
+ 0,
+/*<<<*/ SCR_JUMPR ^ IFFALSE (MASK (0, HS_DONEMASK)),
+ 16,
+ SCR_FROM_REG (SS_REG),
+ 0,
+ SCR_JUMP ^ IFTRUE (DATA (S_CHECK_COND)),
+ PADDRH(getcc2),
+ /*
+ ** And make the DSA register invalid.
+ */
+/*>>>*/ SCR_LOAD_REG (dsa, 0xff), /* invalid */
+ 0,
+}/*-------------------------< SIGNAL >----------------------*/,{
+ /*
+ ** if status = queue full,
+ ** reinsert in startqueue and stall queue.
+ */
+ SCR_FROM_REG (SS_REG),
+ 0,
+ SCR_INT ^ IFTRUE (DATA (S_QUEUE_FULL)),
+ SIR_STALL_QUEUE,
+ /*
+ ** if job completed ...
+ */
+ SCR_FROM_REG (HS_REG),
+ 0,
+ /*
+ ** ... signal completion to the host
+ */
+ SCR_INT_FLY ^ IFFALSE (MASK (0, HS_DONEMASK)),
+ 0,
+ /*
+ ** Auf zu neuen Schandtaten!
+ */
+ SCR_JUMP,
+ PADDR(start),
+
+}/*-------------------------< SAVE_DP >------------------*/,{
+ /*
+ ** SAVE_DP message:
+ ** Copy TEMP register to SAVEP in header.
+ */
+ SCR_COPY (4),
+ RADDR (temp),
+ NADDR (header.savep),
+ SCR_JUMP,
+ PADDR (clrack),
+}/*-------------------------< RESTORE_DP >---------------*/,{
+ /*
+ ** RESTORE_DP message:
+ ** Copy SAVEP in header to TEMP register.
+ */
+ SCR_COPY (4),
+ NADDR (header.savep),
+ RADDR (temp),
+ SCR_JUMP,
+ PADDR (clrack),
+
+}/*-------------------------< DISCONNECT >---------------*/,{
+ /*
+ ** If QUIRK_AUTOSAVE is set,
+ ** do an "save pointer" operation.
+ */
+ SCR_FROM_REG (QU_REG),
+ 0,
+/*<<<*/ SCR_JUMPR ^ IFFALSE (MASK (QUIRK_AUTOSAVE, QUIRK_AUTOSAVE)),
+ 12,
+ /*
+ ** like SAVE_DP message:
+ ** Copy TEMP register to SAVEP in header.
+ */
+ SCR_COPY (4),
+ RADDR (temp),
+ NADDR (header.savep),
+/*>>>*/ /*
+ ** Check if temp==savep or temp==goalp:
+ ** if not, log a missing save pointer message.
+ ** In fact, it's a comparison mod 256.
+ **
+ ** Hmmm, I hadn't thought that I would be urged to
+ ** write this kind of ugly self modifying code.
+ **
+ ** It's unbelievable, but the ncr53c8xx isn't able
+ ** to subtract one register from another.
+ */
+ SCR_FROM_REG (temp),
+ 0,
+ /*
+ ** You are not expected to understand this ..
+ **
+ ** CAUTION: only little endian architectures supported! XXX
+ */
+ SCR_COPY_F (1),
+ NADDR (header.savep),
+ PADDR (disconnect0),
+}/*-------------------------< DISCONNECT0 >--------------*/,{
+/*<<<*/ SCR_JUMPR ^ IFTRUE (DATA (1)),
+ 20,
+ /*
+ ** neither this
+ */
+ SCR_COPY_F (1),
+ NADDR (header.goalp),
+ PADDR (disconnect1),
+}/*-------------------------< DISCONNECT1 >--------------*/,{
+ SCR_INT ^ IFFALSE (DATA (1)),
+ SIR_MISSING_SAVE,
+/*>>>*/
+
+ /*
+ ** DISCONNECTing ...
+ **
+ ** disable the "unexpected disconnect" feature,
+ ** and remove the ACK signal.
+ */
+ SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+ 0,
+ SCR_CLR (SCR_ACK|SCR_ATN),
+ 0,
+ /*
+ ** Wait for the disconnect.
+ */
+ SCR_WAIT_DISC,
+ 0,
+ /*
+ ** Profiling:
+ ** Set a time stamp,
+ ** and count the disconnects.
+ */
+ SCR_COPY (sizeof (u_long)),
+ KVAR(SCRIPT_KVAR_JIFFIES),
+ NADDR (header.stamp.disconnect),
+ SCR_COPY (4),
+ NADDR (disc_phys),
+ RADDR (temp),
+ SCR_REG_REG (temp, SCR_ADD, 0x01),
+ 0,
+ SCR_COPY (4),
+ RADDR (temp),
+ NADDR (disc_phys),
+ /*
+ ** Status is: DISCONNECTED.
+ */
+ SCR_LOAD_REG (HS_REG, HS_DISCONNECT),
+ 0,
+ SCR_JUMP,
+ PADDR (cleanup),
+
+}/*-------------------------< MSG_OUT >-------------------*/,{
+ /*
+ ** The target requests a message.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_OUT,
+ NADDR (msgout),
+ SCR_COPY (1),
+ RADDR (sfbr),
+ NADDR (lastmsg),
+ /*
+ ** If it was no ABORT message ...
+ */
+ SCR_JUMP ^ IFTRUE (DATA (M_ABORT)),
+ PADDRH (msg_out_abort),
+ /*
+ ** ... wait for the next phase
+ ** if it's a message out, send it again, ...
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)),
+ PADDR (msg_out),
+}/*-------------------------< MSG_OUT_DONE >--------------*/,{
+ /*
+ ** ... else clear the message ...
+ */
+ SCR_LOAD_REG (scratcha, M_NOOP),
+ 0,
+ SCR_COPY (4),
+ RADDR (scratcha),
+ NADDR (msgout),
+ /*
+ ** ... and process the next phase
+ */
+ SCR_JUMP,
+ PADDR (dispatch),
+}/*------------------------< BADGETCC >---------------------*/,{
+ /*
+ ** If SIGP was set, clear it and try again.
+ */
+ SCR_FROM_REG (ctest2),
+ 0,
+ SCR_JUMP ^ IFTRUE (MASK (CSIGP,CSIGP)),
+ PADDRH (getcc2),
+ SCR_INT,
+ SIR_SENSE_FAILED,
+}/*-------------------------< RESELECT >--------------------*/,{
+ /*
+ ** This NOP will be patched with LED OFF
+ ** SCR_REG_REG (gpreg, SCR_OR, 0x01)
+ */
+ SCR_NO_OP,
+ 0,
+ /*
+ ** make the DSA invalid.
+ */
+ SCR_LOAD_REG (dsa, 0xff),
+ 0,
+ SCR_CLR (SCR_TRG),
+ 0,
+ /*
+ ** Sleep waiting for a reselection.
+ ** If SIGP is set, special treatment.
+ **
+ ** Zu allem bereit ..
+ */
+ SCR_WAIT_RESEL,
+ PADDR(reselect2),
+}/*-------------------------< RESELECT1 >--------------------*/,{
+ /*
+ ** This NOP will be patched with LED ON
+ ** SCR_REG_REG (gpreg, SCR_AND, 0xfe)
+ */
+ SCR_NO_OP,
+ 0,
+ /*
+ ** ... zu nichts zu gebrauchen ?
+ **
+ ** load the target id into the SFBR
+ ** and jump to the control block.
+ **
+ ** Look at the declarations of
+ ** - struct ncb
+ ** - struct tcb
+ ** - struct lcb
+ ** - struct ccb
+ ** to understand what's going on.
+ */
+ SCR_REG_SFBR (ssid, SCR_AND, 0x8F),
+ 0,
+ SCR_TO_REG (ctest0),
+ 0,
+ SCR_JUMP,
+ NADDR (jump_tcb),
+}/*-------------------------< RESELECT2 >-------------------*/,{
+ /*
+ ** This NOP will be patched with LED ON
+ ** SCR_REG_REG (gpreg, SCR_AND, 0xfe)
+ */
+ SCR_NO_OP,
+ 0,
+ /*
+ ** If it's not connected :(
+ ** -> interrupted by SIGP bit.
+ ** Jump to start.
+ */
+ SCR_FROM_REG (ctest2),
+ 0,
+ SCR_JUMP ^ IFTRUE (MASK (CSIGP,CSIGP)),
+ PADDR (start),
+ SCR_JUMP,
+ PADDR (reselect),
+
+}/*-------------------------< RESEL_TMP >-------------------*/,{
+ /*
+ ** The return address in TEMP
+ ** is in fact the data structure address,
+ ** so copy it to the DSA register.
+ */
+ SCR_COPY (4),
+ RADDR (temp),
+ RADDR (dsa),
+ SCR_JUMP,
+ PADDR (prepare),
+
+}/*-------------------------< RESEL_LUN >-------------------*/,{
+ /*
+ ** come back to this point
+ ** to get an IDENTIFY message
+ ** Wait for a msg_in phase.
+ */
+/*<<<*/ SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ 48,
+ /*
+ ** message phase
+ ** It's not a sony, it's a trick:
+ ** read the data without acknowledging it.
+ */
+ SCR_FROM_REG (sbdl),
+ 0,
+/*<<<*/ SCR_JUMPR ^ IFFALSE (MASK (M_IDENTIFY, 0x98)),
+ 32,
+ /*
+ ** It WAS an Identify message.
+ ** get it and ack it!
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin),
+ SCR_CLR (SCR_ACK),
+ 0,
+ /*
+ ** Mask out the lun.
+ */
+ SCR_REG_REG (sfbr, SCR_AND, 0x07),
+ 0,
+ SCR_RETURN,
+ 0,
+ /*
+ ** No message phase or no IDENTIFY message:
+ ** return 0.
+ */
+/*>>>*/ SCR_LOAD_SFBR (0),
+ 0,
+ SCR_RETURN,
+ 0,
+
+}/*-------------------------< RESEL_TAG >-------------------*/,{
+ /*
+ ** come back to this point
+ ** to get a SIMPLE_TAG message
+ ** Wait for a MSG_IN phase.
+ */
+/*<<<*/ SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ 64,
+ /*
+ ** message phase
+ ** It's a trick - read the data
+ ** without acknowledging it.
+ */
+ SCR_FROM_REG (sbdl),
+ 0,
+/*<<<*/ SCR_JUMPR ^ IFFALSE (DATA (M_SIMPLE_TAG)),
+ 48,
+ /*
+ ** It WAS a SIMPLE_TAG message.
+ ** get it and ack it!
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin),
+ SCR_CLR (SCR_ACK),
+ 0,
+ /*
+ ** Wait for the second byte (the tag)
+ */
+/*<<<*/ SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ 24,
+ /*
+ ** Get it and ack it!
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin),
+ SCR_CLR (SCR_ACK|SCR_CARRY),
+ 0,
+ SCR_RETURN,
+ 0,
+ /*
+ ** No message phase or no SIMPLE_TAG message
+ ** or no second byte: return 0.
+ */
+/*>>>*/ SCR_LOAD_SFBR (0),
+ 0,
+ SCR_SET (SCR_CARRY),
+ 0,
+ SCR_RETURN,
+ 0,
+
+}/*-------------------------< DATA_IO >--------------------*/,{
+/*
+** Because Linux does not provide xfer data direction
+** to low-level scsi drivers, we must trust the target
+** for actual data direction when we cannot guess it.
+** The programmed interrupt patches savep, lastp, goalp,
+** etc.., and restarts the scsi script at data_out/in.
+*/
+ SCR_INT ^ IFTRUE (WHEN (SCR_DATA_OUT)),
+ SIR_DATA_IO_IS_OUT,
+ SCR_INT ^ IFTRUE (WHEN (SCR_DATA_IN)),
+ SIR_DATA_IO_IS_IN,
+ SCR_JUMP,
+ PADDR (no_data),
+
+}/*-------------------------< DATA_IN >--------------------*/,{
+/*
+** Because the size depends on the
+** #define MAX_SCATTER parameter,
+** it is filled in at runtime.
+**
+** ##===========< i=0; i<MAX_SCATTER >=========
+** || SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_IN)),
+** || PADDR (checkatn),
+** || SCR_MOVE_TBL ^ SCR_DATA_IN,
+** || offsetof (struct dsb, data[ i]),
+** ##==========================================
+**
+** SCR_CALL,
+** PADDR (checkatn),
+** SCR_JUMP,
+** PADDR (no_data),
+*/
+0
+}/*--------------------------------------------------------*/
+};
+
+static struct scripth scripth0 __initdata = {
+/*-------------------------< TRYLOOP >---------------------*/{
+/*
+** Load an entry of the start queue into dsa
+** and try to start it by jumping to TRYSEL.
+**
+** Because the size depends on the
+** #define MAX_START parameter, it is filled
+** in at runtime.
+**
+**-----------------------------------------------------------
+**
+** ##===========< I=0; i<MAX_START >===========
+** || SCR_COPY (4),
+** || NADDR (squeue[i]),
+** || RADDR (dsa),
+** || SCR_CALL,
+** || PADDR (trysel),
+** ##==========================================
+**
+** SCR_JUMP,
+** PADDRH(tryloop),
+**
+**-----------------------------------------------------------
+*/
+0
+},/*-------------------------< MSG_PARITY >---------------*/{
+ /*
+ ** count it
+ */
+ SCR_REG_REG (PS_REG, SCR_ADD, 0x01),
+ 0,
+ /*
+ ** send a "message parity error" message.
+ */
+ SCR_LOAD_REG (scratcha, M_PARITY),
+ 0,
+ SCR_JUMP,
+ PADDR (setmsg),
+}/*-------------------------< MSG_REJECT >---------------*/,{
+ /*
+ ** If a negotiation was in progress,
+ ** negotiation failed.
+ */
+ SCR_FROM_REG (HS_REG),
+ 0,
+ SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)),
+ SIR_NEGO_FAILED,
+ /*
+ ** else make host log this message
+ */
+ SCR_INT ^ IFFALSE (DATA (HS_NEGOTIATE)),
+ SIR_REJECT_RECEIVED,
+ SCR_JUMP,
+ PADDR (clrack),
+
+}/*-------------------------< MSG_IGN_RESIDUE >----------*/,{
+ /*
+ ** Terminate cycle
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ PADDR (dispatch),
+ /*
+ ** get residue size.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[1]),
+ /*
+ ** Check for message parity error.
+ */
+ SCR_TO_REG (scratcha),
+ 0,
+ SCR_FROM_REG (socl),
+ 0,
+ SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)),
+ PADDRH (msg_parity),
+ SCR_FROM_REG (scratcha),
+ 0,
+ /*
+ ** Size is 0 .. ignore message.
+ */
+ SCR_JUMP ^ IFTRUE (DATA (0)),
+ PADDR (clrack),
+ /*
+ ** Size is not 1 .. have to interrupt.
+ */
+/*<<<*/ SCR_JUMPR ^ IFFALSE (DATA (1)),
+ 40,
+ /*
+ ** Check for residue byte in swide register
+ */
+ SCR_FROM_REG (scntl2),
+ 0,
+/*<<<*/ SCR_JUMPR ^ IFFALSE (MASK (WSR, WSR)),
+ 16,
+ /*
+ ** There IS data in the swide register.
+ ** Discard it.
+ */
+ SCR_REG_REG (scntl2, SCR_OR, WSR),
+ 0,
+ SCR_JUMP,
+ PADDR (clrack),
+ /*
+ ** Load again the size to the sfbr register.
+ */
+/*>>>*/ SCR_FROM_REG (scratcha),
+ 0,
+/*>>>*/ SCR_INT,
+ SIR_IGN_RESIDUE,
+ SCR_JUMP,
+ PADDR (clrack),
+
+}/*-------------------------< MSG_EXTENDED >-------------*/,{
+ /*
+ ** Terminate cycle
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ PADDR (dispatch),
+ /*
+ ** get length.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[1]),
+ /*
+ ** Check for message parity error.
+ */
+ SCR_TO_REG (scratcha),
+ 0,
+ SCR_FROM_REG (socl),
+ 0,
+ SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)),
+ PADDRH (msg_parity),
+ SCR_FROM_REG (scratcha),
+ 0,
+ /*
+ */
+ SCR_JUMP ^ IFTRUE (DATA (3)),
+ PADDRH (msg_ext_3),
+ SCR_JUMP ^ IFFALSE (DATA (2)),
+ PADDR (msg_bad),
+}/*-------------------------< MSG_EXT_2 >----------------*/,{
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ PADDR (dispatch),
+ /*
+ ** get extended message code.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[2]),
+ /*
+ ** Check for message parity error.
+ */
+ SCR_TO_REG (scratcha),
+ 0,
+ SCR_FROM_REG (socl),
+ 0,
+ SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)),
+ PADDRH (msg_parity),
+ SCR_FROM_REG (scratcha),
+ 0,
+ SCR_JUMP ^ IFTRUE (DATA (M_X_WIDE_REQ)),
+ PADDRH (msg_wdtr),
+ /*
+ ** unknown extended message
+ */
+ SCR_JUMP,
+ PADDR (msg_bad)
+}/*-------------------------< MSG_WDTR >-----------------*/,{
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ PADDR (dispatch),
+ /*
+ ** get data bus width
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[3]),
+ SCR_FROM_REG (socl),
+ 0,
+ SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)),
+ PADDRH (msg_parity),
+ /*
+ ** let the host do the real work.
+ */
+ SCR_INT,
+ SIR_NEGO_WIDE,
+ /*
+ ** let the target fetch our answer.
+ */
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_CLR (SCR_ACK),
+ 0,
+
+ SCR_INT ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ SIR_NEGO_PROTO,
+ /*
+ ** Send the M_X_WIDE_REQ
+ */
+ SCR_MOVE_ABS (4) ^ SCR_MSG_OUT,
+ NADDR (msgout),
+ SCR_CLR (SCR_ATN),
+ 0,
+ SCR_COPY (1),
+ RADDR (sfbr),
+ NADDR (lastmsg),
+ SCR_JUMP,
+ PADDR (msg_out_done),
+
+}/*-------------------------< MSG_EXT_3 >----------------*/,{
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ PADDR (dispatch),
+ /*
+ ** get extended message code.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[2]),
+ /*
+ ** Check for message parity error.
+ */
+ SCR_TO_REG (scratcha),
+ 0,
+ SCR_FROM_REG (socl),
+ 0,
+ SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)),
+ PADDRH (msg_parity),
+ SCR_FROM_REG (scratcha),
+ 0,
+ SCR_JUMP ^ IFTRUE (DATA (M_X_SYNC_REQ)),
+ PADDRH (msg_sdtr),
+ /*
+ ** unknown extended message
+ */
+ SCR_JUMP,
+ PADDR (msg_bad)
+
+}/*-------------------------< MSG_SDTR >-----------------*/,{
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ PADDR (dispatch),
+ /*
+ ** get period and offset
+ */
+ SCR_MOVE_ABS (2) ^ SCR_MSG_IN,
+ NADDR (msgin[3]),
+ SCR_FROM_REG (socl),
+ 0,
+ SCR_JUMP ^ IFTRUE (MASK (CATN, CATN)),
+ PADDRH (msg_parity),
+ /*
+ ** let the host do the real work.
+ */
+ SCR_INT,
+ SIR_NEGO_SYNC,
+ /*
+ ** let the target fetch our answer.
+ */
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_CLR (SCR_ACK),
+ 0,
+
+ SCR_INT ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ SIR_NEGO_PROTO,
+ /*
+ ** Send the M_X_SYNC_REQ
+ */
+ SCR_MOVE_ABS (5) ^ SCR_MSG_OUT,
+ NADDR (msgout),
+ SCR_CLR (SCR_ATN),
+ 0,
+ SCR_COPY (1),
+ RADDR (sfbr),
+ NADDR (lastmsg),
+ SCR_JUMP,
+ PADDR (msg_out_done),
+
+}/*-------------------------< MSG_OUT_ABORT >-------------*/,{
+ /*
+ ** After ABORT message,
+ **
+ ** expect an immediate disconnect, ...
+ */
+ SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+ 0,
+ SCR_CLR (SCR_ACK|SCR_ATN),
+ 0,
+ SCR_WAIT_DISC,
+ 0,
+ /*
+ ** ... and set the status to "ABORTED"
+ */
+ SCR_LOAD_REG (HS_REG, HS_ABORTED),
+ 0,
+ SCR_JUMP,
+ PADDR (cleanup),
+
+}/*-------------------------< GETCC >-----------------------*/,{
+ /*
+ ** The ncr doesn't have an indirect load
+ ** or store command. So we have to
+ ** copy part of the control block to a
+ ** fixed place, where we can modify it.
+ **
+ ** We patch the address part of a COPY command
+ ** with the address of the dsa register ...
+ */
+ SCR_COPY_F (4),
+ RADDR (dsa),
+ PADDRH (getcc1),
+ /*
+ ** ... then we do the actual copy.
+ */
+ SCR_COPY (sizeof (struct head)),
+}/*-------------------------< GETCC1 >----------------------*/,{
+ 0,
+ NADDR (header),
+ /*
+ ** Initialize the status registers
+ */
+ SCR_COPY (4),
+ NADDR (header.status),
+ RADDR (scr0),
+}/*-------------------------< GETCC2 >----------------------*/,{
+ /*
+ ** Get the condition code from a target.
+ **
+ ** DSA points to a data structure.
+ ** Set TEMP to the script location
+ ** that receives the condition code.
+ **
+ ** Because there is no script command
+ ** to load a longword into a register,
+ ** we use a CALL command.
+ */
+/*<<<*/ SCR_CALLR,
+ 24,
+ /*
+ ** Get the condition code.
+ */
+ SCR_MOVE_TBL ^ SCR_DATA_IN,
+ offsetof (struct dsb, sense),
+ /*
+ ** No data phase may follow!
+ */
+ SCR_CALL,
+ PADDR (checkatn),
+ SCR_JUMP,
+ PADDR (no_data),
+/*>>>*/
+
+ /*
+ ** The CALL jumps to this point.
+ ** Prepare for a RESTORE_POINTER message.
+ ** Save the TEMP register into the saved pointer.
+ */
+ SCR_COPY (4),
+ RADDR (temp),
+ NADDR (header.savep),
+ /*
+ ** Load scratcha, because in case of a selection timeout,
+ ** the host will expect a new value for startpos in
+ ** the scratcha register.
+ */
+ SCR_COPY (4),
+ PADDR (startpos),
+ RADDR (scratcha),
+#ifdef NCR_GETCC_WITHMSG
+ /*
+ ** If QUIRK_NOMSG is set, select without ATN.
+ ** and don't send a message.
+ */
+ SCR_FROM_REG (QU_REG),
+ 0,
+ SCR_JUMP ^ IFTRUE (MASK (QUIRK_NOMSG, QUIRK_NOMSG)),
+ PADDRH(getcc3),
+ /*
+ ** Then try to connect to the target.
+ ** If we are reselected, special treatment
+ ** of the current job is required before
+ ** accepting the reselection.
+ */
+ SCR_SEL_TBL_ATN ^ offsetof (struct dsb, select),
+ PADDR(badgetcc),
+ /*
+ ** save target id.
+ */
+ SCR_FROM_REG (sdid),
+ 0,
+ SCR_TO_REG (ctest0),
+ 0,
+ /*
+ ** Send the IDENTIFY message.
+ ** In case of short transfer, remove ATN.
+ */
+ SCR_MOVE_TBL ^ SCR_MSG_OUT,
+ offsetof (struct dsb, smsg2),
+ SCR_CLR (SCR_ATN),
+ 0,
+ /*
+ ** save the first byte of the message.
+ */
+ SCR_COPY (1),
+ RADDR (sfbr),
+ NADDR (lastmsg),
+ SCR_JUMP,
+ PADDR (prepare2),
+
+#endif
+}/*-------------------------< GETCC3 >----------------------*/,{
+ /*
+ ** Try to connect to the target.
+ ** If we are reselected, special treatment
+ ** of the current job is required before
+ ** accepting the reselection.
+ **
+ ** Silly target won't accept a message.
+ ** Select without ATN.
+ */
+ SCR_SEL_TBL ^ offsetof (struct dsb, select),
+ PADDR(badgetcc),
+ /*
+ ** save target id.
+ */
+ SCR_FROM_REG (sdid),
+ 0,
+ SCR_TO_REG (ctest0),
+ 0,
+ /*
+ ** Force error if selection timeout
+ */
+ SCR_JUMPR ^ IFTRUE (WHEN (SCR_MSG_IN)),
+ 0,
+ /*
+ ** don't negotiate.
+ */
+ SCR_JUMP,
+ PADDR (prepare2),
+
+}/*-------------------------< DATA_OUT >-------------------*/,{
+/*
+** Because the size depends on the
+** #define MAX_SCATTER parameter,
+** it is filled in at runtime.
+**
+** ##===========< i=0; i<MAX_SCATTER >=========
+** || SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_OUT)),
+** || PADDR (dispatch),
+** || SCR_MOVE_TBL ^ SCR_DATA_OUT,
+** || offsetof (struct dsb, data[ i]),
+** ##==========================================
+**
+** SCR_CALL,
+** PADDR (dispatch),
+** SCR_JUMP,
+** PADDR (no_data),
+**
+**---------------------------------------------------------
+*/
+0
+}/*-------------------------< ABORTTAG >-------------------*/,{
+ /*
+ ** Abort a bad reselection.
+ ** Set the message to ABORT vs. ABORT_TAG
+ */
+ SCR_LOAD_REG (scratcha, M_ABORT_TAG),
+ 0,
+ SCR_JUMPR ^ IFFALSE (CARRYSET),
+ 8,
+}/*-------------------------< ABORT >----------------------*/,{
+ SCR_LOAD_REG (scratcha, M_ABORT),
+ 0,
+ SCR_COPY (1),
+ RADDR (scratcha),
+ NADDR (msgout),
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_CLR (SCR_ACK),
+ 0,
+ /*
+ ** and send it.
+ ** we expect an immediate disconnect
+ */
+ SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+ 0,
+ SCR_MOVE_ABS (1) ^ SCR_MSG_OUT,
+ NADDR (msgout),
+ SCR_COPY (1),
+ RADDR (sfbr),
+ NADDR (lastmsg),
+ SCR_CLR (SCR_ACK|SCR_ATN),
+ 0,
+ SCR_WAIT_DISC,
+ 0,
+ SCR_JUMP,
+ PADDR (start),
+}/*-------------------------< SNOOPTEST >-------------------*/,{
+ /*
+ ** Read the variable.
+ */
+ SCR_COPY (4),
+ NADDR(ncr_cache),
+ RADDR (scratcha),
+ /*
+ ** Write the variable.
+ */
+ SCR_COPY (4),
+ RADDR (temp),
+ NADDR(ncr_cache),
+ /*
+ ** Read back the variable.
+ */
+ SCR_COPY (4),
+ NADDR(ncr_cache),
+ RADDR (temp),
+}/*-------------------------< SNOOPEND >-------------------*/,{
+ /*
+ ** And stop.
+ */
+ SCR_INT,
+ 99,
+}/*--------------------------------------------------------*/
+};
+
+/*==========================================================
+**
+**
+** Fill in #define dependent parts of the script
+**
+**
+**==========================================================
+*/
+
+__initfunc(
+void ncr_script_fill (struct script * scr, struct scripth * scrh)
+)
+{
+ int i;
+ ncrcmd *p;
+
+ p = scrh->tryloop;
+ for (i=0; i<MAX_START; i++) {
+ *p++ =SCR_COPY (4);
+ *p++ =NADDR (squeue[i]);
+ *p++ =RADDR (dsa);
+ *p++ =SCR_CALL;
+ *p++ =PADDR (trysel);
+ };
+ *p++ =SCR_JUMP;
+ *p++ =PADDRH(tryloop);
+
+ assert ((u_long)p == (u_long)&scrh->tryloop + sizeof (scrh->tryloop));
+
+ p = scr->data_in;
+
+ for (i=0; i<MAX_SCATTER; i++) {
+ *p++ =SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_IN));
+ *p++ =PADDR (checkatn);
+ *p++ =SCR_MOVE_TBL ^ SCR_DATA_IN;
+ *p++ =offsetof (struct dsb, data[i]);
+ };
+
+ *p++ =SCR_CALL;
+ *p++ =PADDR (checkatn);
+ *p++ =SCR_JUMP;
+ *p++ =PADDR (no_data);
+
+ assert ((u_long)p == (u_long)&scr->data_in + sizeof (scr->data_in));
+
+ p = scrh->data_out;
+
+ for (i=0; i<MAX_SCATTER; i++) {
+ *p++ =SCR_CALL ^ IFFALSE (WHEN (SCR_DATA_OUT));
+ *p++ =PADDR (dispatch);
+ *p++ =SCR_MOVE_TBL ^ SCR_DATA_OUT;
+ *p++ =offsetof (struct dsb, data[i]);
+ };
+
+ *p++ =SCR_CALL;
+ *p++ =PADDR (dispatch);
+ *p++ =SCR_JUMP;
+ *p++ =PADDR (no_data);
+
+ assert ((u_long)p == (u_long)&scrh->data_out + sizeof (scrh->data_out));
+}
+
+/*==========================================================
+**
+**
+** Copy and rebind a script.
+**
+**
+**==========================================================
+*/
+
+__initfunc(
+static void ncr_script_copy_and_bind (ncb_p np, ncrcmd *src, ncrcmd *dst, int len)
+)
+{
+ ncrcmd opcode, new, old, tmp1, tmp2;
+ ncrcmd *start, *end;
+ int relocs;
+ int opchanged = 0;
+
+ start = src;
+ end = src + len/4;
+
+ while (src < end) {
+
+ opcode = *src++;
+ *dst++ = cpu_to_scr(opcode);
+
+ /*
+ ** If we forget to change the length
+ ** in struct script, a field will be
+ ** padded with 0. This is an illegal
+ ** command.
+ */
+
+ if (opcode == 0) {
+ printf ("%s: ERROR0 IN SCRIPT at %d.\n",
+ ncr_name(np), (int) (src-start-1));
+ DELAY (1000000);
+ };
+
+ if (DEBUG_FLAGS & DEBUG_SCRIPT)
+ printf ("%p: <%x>\n",
+ (src-1), (unsigned)opcode);
+
+ /*
+ ** We don't have to decode ALL commands
+ */
+ switch (opcode >> 28) {
+
+ case 0xc:
+ /*
+ ** COPY has TWO arguments.
+ */
+ relocs = 2;
+ tmp1 = src[0];
+ if ((tmp1 & RELOC_MASK) == RELOC_KVAR)
+ tmp1 = 0;
+ tmp2 = src[1];
+ if ((tmp2 & RELOC_MASK) == RELOC_KVAR)
+ tmp2 = 0;
+ if ((tmp1 ^ tmp2) & 3) {
+ printf ("%s: ERROR1 IN SCRIPT at %d.\n",
+ ncr_name(np), (int) (src-start-1));
+ DELAY (1000000);
+ }
+ /*
+ ** If PREFETCH feature not enabled, remove
+ ** the NO FLUSH bit if present.
+ */
+ if ((opcode & SCR_NO_FLUSH) && !(np->features & FE_PFEN)) {
+ dst[-1] = cpu_to_scr(opcode & ~SCR_NO_FLUSH);
+ ++opchanged;
+ }
+ break;
+
+ case 0x0:
+ /*
+ ** MOVE (absolute address)
+ */
+ relocs = 1;
+ break;
+
+ case 0x8:
+ /*
+ ** JUMP / CALL
+ ** don't relocate if relative :-)
+ */
+ if (opcode & 0x00800000)
+ relocs = 0;
+ else
+ relocs = 1;
+ break;
+
+ case 0x4:
+ case 0x5:
+ case 0x6:
+ case 0x7:
+ relocs = 1;
+ break;
+
+ default:
+ relocs = 0;
+ break;
+ };
+
+ if (relocs) {
+ while (relocs--) {
+ old = *src++;
+
+ switch (old & RELOC_MASK) {
+ case RELOC_REGISTER:
+ new = (old & ~RELOC_MASK) + np->paddr;
+ break;
+ case RELOC_LABEL:
+ new = (old & ~RELOC_MASK) + np->p_script;
+ break;
+ case RELOC_LABELH:
+ new = (old & ~RELOC_MASK) + np->p_scripth;
+ break;
+ case RELOC_SOFTC:
+ new = (old & ~RELOC_MASK) + vtophys(np);
+ break;
+ case RELOC_KVAR:
+ if (((old & ~RELOC_MASK) <
+ SCRIPT_KVAR_FIRST) ||
+ ((old & ~RELOC_MASK) >
+ SCRIPT_KVAR_LAST))
+ panic("ncr KVAR out of range");
+ new = vtophys(script_kvars[old &
+ ~RELOC_MASK]);
+ break;
+ case 0:
+ /* Don't relocate a 0 address. */
+ if (old == 0) {
+ new = old;
+ break;
+ }
+ /* fall through */
+ default:
+ panic("ncr_script_copy_and_bind: weird relocation %x\n", old);
+ break;
+ }
+
+ *dst++ = cpu_to_scr(new);
+ }
+ } else
+ *dst++ = cpu_to_scr(*src++);
+
+ };
+ if (bootverbose > 1 && opchanged)
+ printf("%s: NO FLUSH bit removed from %d script instructions\n",
+ ncr_name(np), opchanged);
+}
+
+/*==========================================================
+**
+**
+** Auto configuration: attach and init a host adapter.
+**
+**
+**==========================================================
+*/
+
+/*
+** Linux host data structure
+**
+** The script area is allocated in the host data structure
+** because kmalloc() returns NULL during scsi initialisations
+** with Linux 1.2.X
+*/
+
+struct host_data {
+ struct ncb *ncb;
+
+ char ncb_align[NCB_ALIGN_SIZE-1]; /* Filler for alignment */
+ struct ncb _ncb_data;
+
+ char ccb_align[CCB_ALIGN_SIZE-1]; /* Filler for alignment */
+ struct ccb _ccb_data;
+
+ char scr_align[SCR_ALIGN_SIZE-1]; /* Filler for alignment */
+ struct script script_data;
+
+ struct scripth scripth_data;
+};
+
+/*
+** Print something which allow to retrieve the controler type, unit,
+** target, lun concerned by a kernel message.
+*/
+
+#define PRINT_LUN(np, target, lun) \
+printf(KERN_INFO "%s-<%d,%d>: ", ncr_name(np), (int) (target), (int) (lun))
+
+static void PRINT_ADDR(Scsi_Cmnd *cmd)
+{
+ struct host_data *host_data = (struct host_data *) cmd->host->hostdata;
+ ncb_p np = host_data->ncb;
+ if (np) PRINT_LUN(np, cmd->target, cmd->lun);
+}
+
+/*==========================================================
+**
+** NCR chip clock divisor table.
+** Divisors are multiplied by 10,000,000 in order to make
+** calculations more simple.
+**
+**==========================================================
+*/
+
+#define _5M 5000000
+static u_long div_10M[] =
+ {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M};
+
+
+/*===============================================================
+**
+** Prepare io register values used by ncr_init() according
+** to selected and supported features.
+**
+** NCR chips allow burst lengths of 2, 4, 8, 16, 32, 64, 128
+** transfers. 32,64,128 are only supported by 875 and 895 chips.
+** We use log base 2 (burst length) as internal code, with
+** value 0 meaning "burst disabled".
+**
+**===============================================================
+*/
+
+/*
+ * Burst length from burst code.
+ */
+#define burst_length(bc) (!(bc))? 0 : 1 << (bc)
+
+/*
+ * Burst code from io register bits.
+ */
+#define burst_code(dmode, ctest4, ctest5) \
+ (ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ((ctest5) & 0x04) + 1
+
+/*
+ * Set initial io register bits from burst code.
+ */
+static inline void ncr_init_burst(ncb_p np, u_char bc)
+{
+ np->rv_ctest4 &= ~0x80;
+ np->rv_dmode &= ~(0x3 << 6);
+ np->rv_ctest5 &= ~0x4;
+
+ if (!bc) {
+ np->rv_ctest4 |= 0x80;
+ }
+ else {
+ --bc;
+ np->rv_dmode |= ((bc & 0x3) << 6);
+ np->rv_ctest5 |= (bc & 0x4);
+ }
+}
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+
+/*
+** Get target set-up from Symbios format NVRAM.
+*/
+
+__initfunc(
+static void
+ ncr_Symbios_setup_target(ncb_p np, int target, Symbios_nvram *nvram)
+)
+{
+ tcb_p tp = &np->target[target];
+ Symbios_target *tn = &nvram->target[target];
+
+ tp->usrsync = tn->sync_period ? (tn->sync_period + 3) / 4 : 255;
+ tp->usrwide = tn->bus_width == 0x10 ? 1 : 0;
+ tp->usrtags =
+ (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? SCSI_NCR_MAX_TAGS : 0;
+
+ if (!(tn->flags & SYMBIOS_DISCONNECT_ENABLE))
+ tp->usrflag |= UF_NODISC;
+ if (!(tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME))
+ tp->usrflag |= UF_NOSCAN;
+}
+
+/*
+** Get target set-up from Tekram format NVRAM.
+*/
+
+__initfunc(
+static void
+ ncr_Tekram_setup_target(ncb_p np, int target, Tekram_nvram *nvram)
+)
+{
+ tcb_p tp = &np->target[target];
+ struct Tekram_target *tn = &nvram->target[target];
+ int i;
+
+ if (tn->flags & TEKRAM_SYNC_NEGO) {
+ i = tn->sync_index & 0xf;
+ tp->usrsync = i < 12 ? Tekram_sync[i] : 255;
+ }
+
+ tp->usrwide = (tn->flags & TEKRAM_WIDE_NEGO) ? 1 : 0;
+
+ if (tn->flags & TEKRAM_TAGGED_COMMANDS) {
+ tp->usrtags = 2 << nvram->max_tags_index;
+ if (tp->usrtags > SCSI_NCR_MAX_TAGS)
+ tp->usrtags = SCSI_NCR_MAX_TAGS;
+ }
+
+ if (!(tn->flags & TEKRAM_DISCONNECT_ENABLE))
+ tp->usrflag = UF_NODISC;
+
+ /* If any device does not support parity, we will not use this option */
+ if (!(tn->flags & TEKRAM_PARITY_CHECK))
+ np->rv_scntl0 &= ~0x0a; /* SCSI parity checking disabled */
+}
+#endif /* SCSI_NCR_NVRAM_SUPPORT */
+
+__initfunc(
+static int ncr_prepare_setting(ncb_p np, ncr_nvram *nvram)
+)
+{
+ u_char burst_max;
+ u_long period;
+ int i;
+
+ /*
+ ** Save assumed BIOS setting
+ */
+
+ np->sv_scntl0 = INB(nc_scntl0) & 0x0a;
+ np->sv_scntl3 = INB(nc_scntl3) & 0x07;
+ np->sv_dmode = INB(nc_dmode) & 0xce;
+ np->sv_dcntl = INB(nc_dcntl) & 0xa8;
+ np->sv_ctest3 = INB(nc_ctest3) & 0x01;
+ np->sv_ctest4 = INB(nc_ctest4) & 0x80;
+ np->sv_ctest5 = INB(nc_ctest5) & 0x24;
+ np->sv_gpcntl = INB(nc_gpcntl);
+ np->sv_stest2 = INB(nc_stest2) & 0x20;
+ np->sv_stest4 = INB(nc_stest4);
+
+ /*
+ ** Wide ?
+ */
+
+ np->maxwide = (np->features & FE_WIDE)? 1 : 0;
+
+ /*
+ ** Get the frequency of the chip's clock.
+ ** Find the right value for scntl3.
+ */
+
+ if (np->features & FE_QUAD)
+ np->multiplier = 4;
+ else if (np->features & FE_DBLR)
+ np->multiplier = 2;
+ else
+ np->multiplier = 1;
+
+ np->clock_khz = (np->features & FE_CLK80)? 80000 : 40000;
+ np->clock_khz *= np->multiplier;
+
+ if (np->clock_khz != 40000)
+ ncr_getclock(np, np->multiplier);
+
+ /*
+ * Divisor to be used for async (timer pre-scaler).
+ */
+ i = np->clock_divn - 1;
+ while (i >= 0) {
+ --i;
+ if (10ul * SCSI_NCR_MIN_ASYNC * np->clock_khz > div_10M[i]) {
+ ++i;
+ break;
+ }
+ }
+ np->rv_scntl3 = i+1;
+
+ /*
+ * Minimum synchronous period factor supported by the chip.
+ * Btw, 'period' is in tenths of nanoseconds.
+ */
+
+ period = (4 * div_10M[0] + np->clock_khz - 1) / np->clock_khz;
+ if (period <= 250) np->minsync = 10;
+ else if (period <= 303) np->minsync = 11;
+ else if (period <= 500) np->minsync = 12;
+ else np->minsync = (period + 40 - 1) / 40;
+
+ /*
+ * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2).
+ */
+
+ if (np->minsync < 25 && !(np->features & (FE_ULTRA|FE_ULTRA2)))
+ np->minsync = 25;
+ else if (np->minsync < 12 && !(np->features & FE_ULTRA2))
+ np->minsync = 12;
+
+ /*
+ * Maximum synchronous period factor supported by the chip.
+ */
+
+ period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz);
+ np->maxsync = period > 2540 ? 254 : period / 10;
+
+ /*
+ ** Prepare initial value of other IO registers
+ */
+#if defined SCSI_NCR_TRUST_BIOS_SETTING
+ np->rv_scntl0 = np->sv_scntl0;
+ np->rv_dmode = np->sv_dmode;
+ np->rv_dcntl = np->sv_dcntl;
+ np->rv_ctest3 = np->sv_ctest3;
+ np->rv_ctest4 = np->sv_ctest4;
+ np->rv_ctest5 = np->sv_ctest5;
+ burst_max = burst_code(np->sv_dmode, np->sv_ctest4, np->sv_ctest5);
+#else
+
+ /*
+ ** Select burst length (dwords)
+ */
+ burst_max = driver_setup.burst_max;
+ if (burst_max == 255)
+ burst_max = burst_code(np->sv_dmode, np->sv_ctest4, np->sv_ctest5);
+ if (burst_max > 7)
+ burst_max = 7;
+ if (burst_max > np->maxburst)
+ burst_max = np->maxburst;
+
+ /*
+ ** Select all supported special features
+ */
+ if (np->features & FE_ERL)
+ np->rv_dmode |= ERL; /* Enable Read Line */
+ if (np->features & FE_BOF)
+ np->rv_dmode |= BOF; /* Burst Opcode Fetch */
+ if (np->features & FE_ERMP)
+ np->rv_dmode |= ERMP; /* Enable Read Multiple */
+ if (np->features & FE_PFEN)
+ np->rv_dcntl |= PFEN; /* Prefetch Enable */
+ if (np->features & FE_CLSE)
+ np->rv_dcntl |= CLSE; /* Cache Line Size Enable */
+ if (np->features & FE_WRIE)
+ np->rv_ctest3 |= WRIE; /* Write and Invalidate */
+ if (np->features & FE_DFS)
+ np->rv_ctest5 |= DFS; /* Dma Fifo Size */
+
+ /*
+ ** Select some other
+ */
+ if (driver_setup.master_parity)
+ np->rv_ctest4 |= MPEE; /* Master parity checking */
+ if (driver_setup.scsi_parity)
+ np->rv_scntl0 |= 0x0a; /* full arb., ena parity, par->ATN */
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ /*
+ ** Get parity checking, host ID and verbose mode from NVRAM
+ **/
+ if (nvram) {
+ switch(nvram->type) {
+ case SCSI_NCR_TEKRAM_NVRAM:
+ np->myaddr = nvram->data.Tekram.host_id & 0x0f;
+ break;
+ case SCSI_NCR_SYMBIOS_NVRAM:
+ if (!(nvram->data.Symbios.flags & SYMBIOS_PARITY_ENABLE))
+ np->rv_scntl0 &= ~0x0a;
+ np->myaddr = nvram->data.Symbios.host_id & 0x0f;
+ if (nvram->data.Symbios.flags & SYMBIOS_VERBOSE_MSGS)
+ np->verbose += 1;
+ break;
+ }
+ }
+#endif
+ /*
+ ** Get SCSI addr of host adapter (set by bios?).
+ */
+ if (!np->myaddr) np->myaddr = INB(nc_scid) & 0x07;
+ if (!np->myaddr) np->myaddr = SCSI_NCR_MYADDR;
+
+
+#endif /* SCSI_NCR_TRUST_BIOS_SETTING */
+
+ /*
+ * Prepare initial io register bits for burst length
+ */
+ ncr_init_burst(np, burst_max);
+
+ /*
+ ** Set differential mode and LED support.
+ ** Ignore these features for boards known to use a
+ ** specific GPIO wiring (Tekram only for now).
+ ** Probe initial setting of GPREG and GPCNTL for
+ ** other ones.
+ */
+ if (!nvram || nvram->type != SCSI_NCR_TEKRAM_NVRAM) {
+ switch(driver_setup.diff_support) {
+ case 3:
+ if (INB(nc_gpreg) & 0x08)
+ break;
+ case 2:
+ np->rv_stest2 |= 0x20;
+ break;
+ case 1:
+ np->rv_stest2 |= (np->sv_stest2 & 0x20);
+ break;
+ default:
+ break;
+ }
+ }
+ if ((driver_setup.led_pin ||
+ (nvram && nvram->type == SCSI_NCR_SYMBIOS_NVRAM)) &&
+ !(np->sv_gpcntl & 0x01))
+ np->features |= FE_LED0;
+
+ /*
+ ** Set irq mode.
+ */
+ switch(driver_setup.irqm) {
+ case 2:
+ np->rv_dcntl |= IRQM;
+ break;
+ case 1:
+ np->rv_dcntl |= (np->sv_dcntl & IRQM);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ ** Configure targets according to driver setup.
+ ** If NVRAM present get targets setup from NVRAM.
+ ** Allow to override sync, wide and NOSCAN from
+ ** boot command line.
+ */
+ for (i = 0 ; i < MAX_TARGET ; i++) {
+ tcb_p tp = &np->target[i];
+
+ tp->usrsync = 255;
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ if (nvram) {
+ switch(nvram->type) {
+ case SCSI_NCR_TEKRAM_NVRAM:
+ ncr_Tekram_setup_target(np, i, &nvram->data.Tekram);
+ break;
+ case SCSI_NCR_SYMBIOS_NVRAM:
+ ncr_Symbios_setup_target(np, i, &nvram->data.Symbios);
+ break;
+ }
+ if (driver_setup.use_nvram & 0x2)
+ tp->usrsync = driver_setup.default_sync;
+ if (driver_setup.use_nvram & 0x4)
+ tp->usrwide = driver_setup.max_wide;
+ if (driver_setup.use_nvram & 0x8)
+ tp->usrflag &= ~UF_NOSCAN;
+ }
+ else {
+#else
+ if (1) {
+#endif
+ tp->usrsync = driver_setup.default_sync;
+ tp->usrwide = driver_setup.max_wide;
+ tp->usrtags = driver_setup.default_tags;
+ if (!driver_setup.disconnection)
+ np->target[i].usrflag = UF_NODISC;
+ }
+ }
+
+ /*
+ ** Announce all that stuff to user.
+ */
+
+ i = nvram ? nvram->type : 0;
+ printf(KERN_INFO "%s: %sID %d, Fast-%d%s%s\n", ncr_name(np),
+ i == SCSI_NCR_SYMBIOS_NVRAM ? "Symbios format NVRAM, " :
+ (i == SCSI_NCR_TEKRAM_NVRAM ? "Tekram format NVRAM, " : ""),
+ np->myaddr,
+ np->minsync < 12 ? 40 : (np->minsync < 25 ? 20 : 10),
+ (np->rv_scntl0 & 0xa) ? ", Parity Checking" : ", NO Parity",
+ (np->rv_stest2 & 0x20) ? ", Differential" : "");
+
+ if (bootverbose > 1) {
+ printf ("%s: initial SCNTL3/DMODE/DCNTL/CTEST3/4/5 = "
+ "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n",
+ ncr_name(np), np->sv_scntl3, np->sv_dmode, np->sv_dcntl,
+ np->sv_ctest3, np->sv_ctest4, np->sv_ctest5);
+
+ printf ("%s: final SCNTL3/DMODE/DCNTL/CTEST3/4/5 = "
+ "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n",
+ ncr_name(np), np->rv_scntl3, np->rv_dmode, np->rv_dcntl,
+ np->rv_ctest3, np->rv_ctest4, np->rv_ctest5);
+ }
+
+ if (bootverbose && np->paddr2)
+ printf (KERN_INFO "%s: on-board RAM at 0x%lx\n",
+ ncr_name(np), np->paddr2);
+
+ return 0;
+}
+
+
+#ifdef SCSI_NCR_DEBUG_NVRAM
+
+__initfunc(
+void ncr_display_Symbios_nvram(ncb_p np, Symbios_nvram *nvram)
+)
+{
+ int i;
+
+ /* display Symbios nvram host data */
+ printf("%s: HOST ID=%d%s%s%s%s\n",
+ ncr_name(np), nvram->host_id & 0x0f,
+ (nvram->flags & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"",
+ (nvram->flags & SYMBIOS_PARITY_ENABLE) ? " PARITY" :"",
+ (nvram->flags & SYMBIOS_VERBOSE_MSGS) ? " VERSBOSE" :"",
+ (nvram->flags1 & SYMBIOS_SCAN_HI_LO) ? " HI_LO" :"");
+
+ /* display Symbios nvram drive data */
+ for (i = 0 ; i < 15 ; i++) {
+ struct Symbios_target *tn = &nvram->target[i];
+ printf("%s-%d:%s%s%s%s WIDTH=%d SYNC=%d TMO=%d\n",
+ ncr_name(np), i,
+ (tn->flags & SYMBIOS_DISCONNECT_ENABLE) ? " DISC" : "",
+ (tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME) ? " SCAN_BOOT" : "",
+ (tn->flags & SYMBIOS_SCAN_LUNS) ? " SCAN_LUNS" : "",
+ (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? " TCQ" : "",
+ tn->bus_width,
+ tn->sync_period / 4,
+ tn->timeout);
+ }
+}
+
+static u_char Tekram_boot_delay[7] __initdata = {3, 5, 10, 20, 30, 60, 120};
+
+__initfunc(
+void ncr_display_Tekram_nvram(ncb_p np, Tekram_nvram *nvram)
+)
+{
+ int i, tags, boot_delay;
+ char *rem;
+
+ /* display Tekram nvram host data */
+ tags = 2 << nvram->max_tags_index;
+ boot_delay = 0;
+ if (nvram->boot_delay_index < 6)
+ boot_delay = Tekram_boot_delay[nvram->boot_delay_index];
+ switch((nvram->flags & TEKRAM_REMOVABLE_FLAGS) >> 6) {
+ default:
+ case 0: rem = ""; break;
+ case 1: rem = " REMOVABLE=boot device"; break;
+ case 2: rem = " REMOVABLE=all"; break;
+ }
+
+ printf("%s: HOST ID=%d%s%s%s%s%s%s%s%s%s BOOT DELAY=%d tags=%d\n",
+ ncr_name(np), nvram->host_id & 0x0f,
+ (nvram->flags1 & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"",
+ (nvram->flags & TEKRAM_MORE_THAN_2_DRIVES) ? " >2DRIVES" :"",
+ (nvram->flags & TEKRAM_DRIVES_SUP_1GB) ? " >1GB" :"",
+ (nvram->flags & TEKRAM_RESET_ON_POWER_ON) ? " RESET" :"",
+ (nvram->flags & TEKRAM_ACTIVE_NEGATION) ? " ACT_NEG" :"",
+ (nvram->flags & TEKRAM_IMMEDIATE_SEEK) ? " IMM_SEEK" :"",
+ (nvram->flags & TEKRAM_SCAN_LUNS) ? " SCAN_LUNS" :"",
+ (nvram->flags1 & TEKRAM_F2_F6_ENABLED) ? " F2_F6" :"",
+ rem, boot_delay, tags);
+
+ /* display Tekram nvram drive data */
+ for (i = 0; i <= 15; i++) {
+ int sync, j;
+ struct Tekram_target *tn = &nvram->target[i];
+ j = tn->sync_index & 0xf;
+ sync = j < 12 ? Tekram_sync[j] : 255;
+ printf("%s-%d:%s%s%s%s%s%s PERIOD=%d\n",
+ ncr_name(np), i,
+ (tn->flags & TEKRAM_PARITY_CHECK) ? " PARITY" : "",
+ (tn->flags & TEKRAM_SYNC_NEGO) ? " SYNC" : "",
+ (tn->flags & TEKRAM_DISCONNECT_ENABLE) ? " DISC" : "",
+ (tn->flags & TEKRAM_START_CMD) ? " START" : "",
+ (tn->flags & TEKRAM_TAGGED_COMMANDS) ? " TCQ" : "",
+ (tn->flags & TEKRAM_WIDE_NEGO) ? " WIDE" : "",
+ sync);
+ }
+}
+#endif /* SCSI_NCR_DEBUG_NVRAM */
+
+/*
+** Host attach and initialisations.
+**
+** Allocate host data and ncb structure.
+** Request IO region and remap MMIO region.
+** Do chip initialization.
+** If all is OK, install interrupt handling and
+** start the timer daemon.
+*/
+
+__initfunc(
+static int ncr_attach (Scsi_Host_Template *tpnt, int unit, ncr_device *device)
+)
+{
+ struct host_data *host_data;
+ ncb_p np;
+ struct Scsi_Host *instance = 0;
+ u_long flags = 0;
+ ncr_nvram *nvram = device->nvram;
+
+printf(KERN_INFO "ncr53c%s-%d: rev=0x%02x, base=0x%x, io_port=0x%x, irq=%d\n",
+ device->chip.name, unit, device->chip.revision_id, device->slot.base,
+ device->slot.io_port, device->slot.irq);
+
+ /*
+ ** Allocate host_data structure
+ */
+ if (!(instance = scsi_register(tpnt, sizeof(*host_data))))
+ goto attach_error;
+
+ /*
+ ** Initialize structure.
+ */
+ host_data = (struct host_data *) instance->hostdata;
+
+ /*
+ ** Align np and first ccb to 32 boundary for cache line
+ ** bursting when copying the global header.
+ */
+ np = (ncb_p) (((u_long) &host_data->_ncb_data) & NCB_ALIGN_MASK);
+ host_data->ncb = np;
+ bzero (np, sizeof (*np));
+
+ np->ccb = (ccb_p) (((u_long) &host_data->_ccb_data) & CCB_ALIGN_MASK);
+ bzero (np->ccb, sizeof (*np->ccb));
+
+ /*
+ ** Store input informations in the host data structure.
+ */
+ strncpy(np->chip_name, device->chip.name, sizeof(np->chip_name) - 1);
+ np->unit = unit;
+ np->verbose = driver_setup.verbose;
+ sprintf(np->inst_name, "ncr53c%s-%d", np->chip_name, np->unit);
+ np->device_id = device->chip.device_id;
+ np->revision_id = device->chip.revision_id;
+ np->features = device->chip.features;
+ np->clock_divn = device->chip.nr_divisor;
+ np->maxoffs = device->chip.offset_max;
+ np->maxburst = device->chip.burst_max;
+
+ np->script0 =
+ (struct script *) (((u_long) &host_data->script_data) & SCR_ALIGN_MASK);
+ np->scripth0 = &host_data->scripth_data;
+
+ /*
+ ** Initialize timer structure
+ **
+ */
+ init_timer(&np->timer);
+ np->timer.data = (unsigned long) np;
+ np->timer.function = ncr53c8xx_timeout;
+
+ /*
+ ** Try to map the controller chip to
+ ** virtual and physical memory.
+ */
+
+ np->paddr = device->slot.base;
+ np->paddr2 = (np->features & FE_RAM)? device->slot.base_2 : 0;
+
+#ifndef NCR_IOMAPPED
+ np->vaddr = remap_pci_mem((u_long) np->paddr, (u_long) 128);
+ if (!np->vaddr) {
+ printf("%s: can't map memory mapped IO region\n", ncr_name(np));
+ goto attach_error;
+ }
+ else
+ if (bootverbose > 1)
+ printf("%s: using memory mapped IO at virtual address 0x%lx\n", ncr_name(np), (u_long) np->vaddr);
+
+ /*
+ ** Make the controller's registers available.
+ ** Now the INB INW INL OUTB OUTW OUTL macros
+ ** can be used safely.
+ */
+
+ np->reg = (struct ncr_reg*) np->vaddr;
+
+#endif /* !defined NCR_IOMAPPED */
+
+ /*
+ ** Try to map the controller chip into iospace.
+ */
+
+ request_region(device->slot.io_port, 128, "ncr53c8xx");
+ np->port = device->slot.io_port;
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ if (nvram) {
+ switch(nvram->type) {
+ case SCSI_NCR_SYMBIOS_NVRAM:
+#ifdef SCSI_NCR_DEBUG_NVRAM
+ ncr_display_Symbios_nvram(np, &nvram->data.Symbios);
+#endif
+ break;
+ case SCSI_NCR_TEKRAM_NVRAM:
+#ifdef SCSI_NCR_DEBUG_NVRAM
+ ncr_display_Tekram_nvram(np, &nvram->data.Tekram);
+#endif
+ break;
+ default:
+ nvram = 0;
+#ifdef SCSI_NCR_DEBUG_NVRAM
+ printf("%s: NVRAM: None or invalid data.\n", ncr_name(np));
+#endif
+ }
+ }
+#endif
+
+ /*
+ ** Do chip dependent initialization.
+ */
+ (void)ncr_prepare_setting(np, nvram);
+
+#ifndef NCR_IOMAPPED
+ if (np->paddr2 && sizeof(struct script) <= 4096) {
+ np->vaddr2 = remap_pci_mem((u_long) np->paddr2, (u_long) 4096);
+ if (!np->vaddr2) {
+ printf("%s: can't map memory mapped IO region\n", ncr_name(np));
+ goto attach_error;
+ }
+ else
+ if (bootverbose > 1)
+ printf("%s: on-board ram mapped at virtual address 0x%lx\n", ncr_name(np), (u_long) np->vaddr2);
+ }
+#endif /* !defined NCR_IOMAPPED */
+
+ /*
+ ** Fill Linux host instance structure
+ */
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,0)
+ instance->max_channel = 0;
+ instance->max_id = np->maxwide ? 16 : 8;
+ instance->max_lun = SCSI_NCR_MAX_LUN;
+#endif
+#ifndef NCR_IOMAPPED
+ instance->base = (char *) np->reg;
+#endif
+ instance->irq = device->slot.irq;
+ instance->io_port = device->slot.io_port;
+ instance->n_io_port = 128;
+ instance->dma_channel = 0;
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,0,0)
+ instance->select_queue_depths = ncr53c8xx_select_queue_depths;
+#endif
+
+ /*
+ ** Patch script to physical addresses
+ */
+ ncr_script_fill (&script0, &scripth0);
+
+ np->scripth = np->scripth0;
+ np->p_scripth = vtophys(np->scripth);
+
+ np->script = (np->vaddr2) ? (struct script *) np->vaddr2 : np->script0;
+ np->p_script = (np->vaddr2) ? np->paddr2 : vtophys(np->script0);
+
+ ncr_script_copy_and_bind (np, (ncrcmd *) &script0, (ncrcmd *) np->script0, sizeof(struct script));
+ ncr_script_copy_and_bind (np, (ncrcmd *) &scripth0, (ncrcmd *) np->scripth0, sizeof(struct scripth));
+ np->ccb->p_ccb = vtophys (np->ccb);
+
+ /*
+ ** Patch the script for LED support.
+ */
+
+ if (np->features & FE_LED0) {
+ np->script0->reselect[0] =
+ cpu_to_scr(SCR_REG_REG(gpreg, SCR_OR, 0x01));
+ np->script0->reselect1[0] =
+ cpu_to_scr(SCR_REG_REG(gpreg, SCR_AND, 0xfe));
+ np->script0->reselect2[0] =
+ cpu_to_scr(SCR_REG_REG(gpreg, SCR_AND, 0xfe));
+ }
+
+ /*
+ ** init data structure
+ */
+
+ np->jump_tcb.l_cmd = cpu_to_scr(SCR_JUMP);
+ np->jump_tcb.l_paddr = cpu_to_scr(NCB_SCRIPTH_PHYS (np, abort));
+
+ /*
+ ** Reset chip.
+ */
+
+ OUTB (nc_istat, SRST);
+ DELAY (1000);
+ OUTB (nc_istat, 0 );
+
+ /*
+ ** Now check the cache handling of the pci chipset.
+ */
+
+ if (ncr_snooptest (np)) {
+ printf ("CACHE INCORRECTLY CONFIGURED.\n");
+ goto attach_error;
+ };
+
+ /*
+ ** Install the interrupt handler.
+ */
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,70)
+#ifdef SCSI_NCR_SHARE_IRQ
+ if (bootverbose > 1)
+ printf("%s: requesting shared irq %d (dev_id=0x%lx)\n",
+ ncr_name(np), device->slot.irq, (u_long) np);
+ if (request_irq(device->slot.irq, ncr53c8xx_intr,
+ SA_INTERRUPT|SA_SHIRQ, "ncr53c8xx", np)) {
+#else
+ if (request_irq(device->slot.irq, ncr53c8xx_intr,
+ SA_INTERRUPT, "ncr53c8xx", np)) {
+#endif
+#else
+ if (request_irq(device->slot.irq, ncr53c8xx_intr,
+ SA_INTERRUPT, "ncr53c8xx")) {
+#endif
+ printf("%s: request irq %d failure\n", ncr_name(np), device->slot.irq);
+ goto attach_error;
+ }
+ np->irq = device->slot.irq;
+
+ /*
+ ** After SCSI devices have been opened, we cannot
+ ** reset the bus safely, so we do it here.
+ ** Interrupt handler does the real work.
+ ** Process the reset exception,
+ ** if interrupts are not enabled yet.
+ ** Then enable disconnects.
+ */
+ save_flags(flags); cli();
+ if (ncr_reset_scsi_bus(np, 0, driver_setup.settle_delay) != 0) {
+ printf("%s: FATAL ERROR: CHECK SCSI BUS - CABLES, TERMINATION, DEVICE POWER etc.!\n", ncr_name(np));
+ restore_flags(flags);
+ goto attach_error;
+ }
+ ncr_exception (np);
+ restore_flags(flags);
+
+ np->disc = 1;
+
+ /*
+ ** The middle-level SCSI driver does not
+ ** wait devices to settle.
+ ** Wait synchronously if more than 2 seconds.
+ */
+ if (driver_setup.settle_delay > 2) {
+ printf("%s: waiting %d seconds for scsi devices to settle...\n",
+ ncr_name(np), driver_setup.settle_delay);
+ DELAY(1000000UL * driver_setup.settle_delay);
+ }
+
+ /*
+ ** Now let the generic SCSI driver
+ ** look for the SCSI devices on the bus ..
+ */
+
+ /*
+ ** start the timeout daemon
+ */
+ np->lasttime=0;
+ ncr_timeout (np);
+
+ /*
+ ** use SIMPLE TAG messages by default
+ */
+#ifdef SCSI_NCR_ALWAYS_SIMPLE_TAG
+ np->order = M_SIMPLE_TAG;
+#endif
+
+ /*
+ ** Done.
+ */
+ if (!the_template) {
+ the_template = instance->hostt;
+ first_host = instance;
+ }
+
+ return 0;
+
+attach_error:
+ if (!instance) return -1;
+ printf("%s: detaching...\n", ncr_name(np));
+#ifndef NCR_IOMAPPED
+ if (np->vaddr) {
+#ifdef DEBUG_NCR53C8XX
+ printf("%s: releasing memory mapped IO region %lx[%d]\n", ncr_name(np), (u_long) np->vaddr, 128);
+#endif
+ unmap_pci_mem((vm_offset_t) np->vaddr, (u_long) 128);
+ }
+ if (np->vaddr2) {
+#ifdef DEBUG_NCR53C8XX
+ printf("%s: releasing memory mapped IO region %lx[%d]\n", ncr_name(np), (u_long) np->vaddr2, 4096);
+#endif
+ unmap_pci_mem((vm_offset_t) np->vaddr2, (u_long) 4096);
+ }
+#endif
+ if (np->port) {
+#ifdef DEBUG_NCR53C8XX
+ printf("%s: releasing IO region %x[%d]\n", ncr_name(np), np->port, 128);
+#endif
+ release_region(np->port, 128);
+ }
+ if (np->irq) {
+#ifdef DEBUG_NCR53C8XX
+ printf("%s: freeing irq %d\n", ncr_name(np), np->irq);
+#endif
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,70)
+ free_irq(np->irq, np);
+#else
+ free_irq(np->irq);
+#endif
+ }
+ scsi_unregister(instance);
+
+ return -1;
+ }
+
+/*==========================================================
+**
+**
+** Start execution of a SCSI command.
+** This is called from the generic SCSI driver.
+**
+**
+**==========================================================
+*/
+int ncr_queue_command (Scsi_Cmnd *cmd, void (* done)(Scsi_Cmnd *))
+{
+ struct Scsi_Host *host = cmd->host;
+/* Scsi_Device *device = cmd->device; */
+ struct host_data *host_data = (struct host_data *) host->hostdata;
+ ncb_p np = host_data->ncb;
+ tcb_p tp = &np->target[cmd->target];
+
+ ccb_p cp;
+ lcb_p lp;
+
+ int segments;
+ u_char qidx, nego, idmsg, *msgptr;
+ u_int msglen, msglen2;
+ u_long flags;
+ int xfer_direction;
+
+ cmd->scsi_done = done;
+ cmd->host_scribble = NULL;
+ cmd->SCp.ptr = NULL;
+ cmd->SCp.buffer = NULL;
+
+ /*---------------------------------------------
+ **
+ ** Some shortcuts ...
+ **
+ **---------------------------------------------
+ */
+ if ((cmd->target == np->myaddr ) ||
+ (cmd->target >= MAX_TARGET) ||
+ (cmd->lun >= MAX_LUN )) {
+ return(DID_BAD_TARGET);
+ }
+
+ /*---------------------------------------------
+ **
+ ** Complete the 1st TEST UNIT READY command
+ ** with error condition if the device is
+ ** flagged NOSCAN, in order to speed up
+ ** the boot.
+ **
+ **---------------------------------------------
+ */
+ if (cmd->cmnd[0] == 0 && (tp->usrflag & UF_NOSCAN)) {
+ tp->usrflag &= ~UF_NOSCAN;
+ return DID_BAD_TARGET;
+ }
+
+ if (DEBUG_FLAGS & DEBUG_TINY) {
+ PRINT_ADDR(cmd);
+ printf ("CMD=%x ", cmd->cmnd[0]);
+ }
+
+ /*---------------------------------------------------
+ **
+ ** Assign a ccb / bind cmd.
+ ** If resetting, shorten settle_time if necessary
+ ** in order to avoid spurious timeouts.
+ ** If resetting or no free ccb,
+ ** insert cmd into the waiting list.
+ **
+ **----------------------------------------------------
+ */
+ save_flags(flags); cli();
+
+ if (np->settle_time && cmd->timeout_per_command >= HZ &&
+ np->settle_time > jiffies + cmd->timeout_per_command - HZ) {
+ np->settle_time = jiffies + cmd->timeout_per_command - HZ;
+ }
+
+ if (np->settle_time || !(cp=ncr_get_ccb (np, cmd->target, cmd->lun))) {
+ insert_into_waiting_list(np, cmd);
+ restore_flags(flags);
+ return(DID_OK);
+ }
+ cp->cmd = cmd;
+
+ /*---------------------------------------------------
+ **
+ ** Enable tagged queue if asked by scsi ioctl
+ **
+ **----------------------------------------------------
+ */
+ if (!tp->usrtags && cmd->device && cmd->device->tagged_queue) {
+ tp->usrtags = SCSI_NCR_MAX_TAGS;
+ ncr_setmaxtags (np, tp, SCSI_NCR_MAX_TAGS);
+ }
+
+ /*---------------------------------------------------
+ **
+ ** timestamp
+ **
+ **----------------------------------------------------
+ */
+#ifdef SCSI_NCR_PROFILE_SUPPORT
+ bzero (&cp->phys.header.stamp, sizeof (struct tstamp));
+ cp->phys.header.stamp.start = jiffies;
+#endif
+
+ /*----------------------------------------------------
+ **
+ ** Get device quirks from a speciality table.
+ **
+ ** @GENSCSI@
+ ** This should be a part of the device table
+ ** in "scsi_conf.c".
+ **
+ **----------------------------------------------------
+ */
+ if (tp->quirks & QUIRK_UPDATE) {
+ tp->quirks = ncr_lookup ((char*) &tp->inqdata[0]);
+#ifndef NCR_GETCC_WITHMSG
+ if (tp->quirks) {
+ PRINT_ADDR(cmd);
+ printf ("quirks=%x.\n", tp->quirks);
+ }
+#endif
+ }
+
+ /*---------------------------------------------------
+ **
+ ** negotiation required?
+ **
+ ** Only SCSI-II devices.
+ ** To negotiate with SCSI-I devices is dangerous, since
+ ** Synchronous Negotiation protocol is optional, and
+ ** INQUIRY data do not contains capabilities in byte 7.
+ **----------------------------------------------------
+ */
+
+ nego = 0;
+
+ if (cmd->lun == 0 && !tp->nego_cp &&
+ (tp->inqdata[2] & 0x7) >= 2 && tp->inqdata[7]) {
+ /*
+ ** negotiate wide transfers ?
+ */
+
+ if (!tp->widedone) {
+ if (tp->inqdata[7] & INQ7_WIDE16) {
+ nego = NS_WIDE;
+ } else
+ tp->widedone=1;
+ };
+
+ /*
+ ** negotiate synchronous transfers?
+ */
+
+ if (!nego && !tp->period) {
+ if ( 1
+#if defined (CDROM_ASYNC)
+ && ((tp->inqdata[0] & 0x1f) != 5)
+#endif
+ && (tp->inqdata[7] & INQ7_SYNC)) {
+ nego = NS_SYNC;
+ } else {
+ tp->period =0xffff;
+ tp->sval = 0xe0;
+ PRINT_ADDR(cmd);
+ printf ("asynchronous.\n");
+ };
+ };
+
+ /*
+ ** remember nego is pending for the target.
+ ** Avoid to start a nego for all queued commands
+ ** when tagged command queuing is enabled.
+ */
+
+ if (nego)
+ tp->nego_cp = cp;
+ };
+
+ /*---------------------------------------------------
+ **
+ ** choose a new tag ...
+ **
+ **----------------------------------------------------
+ */
+
+ if ((lp = tp->lp[cmd->lun]) && (lp->usetags)) {
+ /*
+ ** assign a tag to this ccb!
+ */
+ while (!cp->tag) {
+ ccb_p cp2 = lp->next_ccb;
+ lp->lasttag = lp->lasttag % 255 + 1;
+ while (cp2 && cp2->tag != lp->lasttag)
+ cp2 = cp2->next_ccb;
+ if (cp2) continue;
+ cp->tag=lp->lasttag;
+ if (DEBUG_FLAGS & DEBUG_TAGS) {
+ PRINT_ADDR(cmd);
+ printf ("using tag #%d.\n", cp->tag);
+ }
+ }
+ } else {
+ cp->tag=0;
+ }
+
+ /*----------------------------------------------------
+ **
+ ** Build the identify / tag / sdtr message
+ **
+ **----------------------------------------------------
+ */
+
+ idmsg = M_IDENTIFY | cmd->lun;
+
+ if (cp != np->ccb && ((np->disc && !(tp->usrflag & UF_NODISC)) || cp->tag))
+ idmsg |= 0x40;
+
+ msgptr = cp->scsi_smsg;
+ msglen = 0;
+ msgptr[msglen++] = idmsg;
+
+ if (cp->tag) {
+ char tag;
+
+ tag = np->order;
+ if (tag == 0) {
+ /*
+ ** Ordered write ops, unordered read ops.
+ */
+ switch (cmd->cmnd[0]) {
+ case 0x08: /* READ_SMALL (6) */
+ case 0x28: /* READ_BIG (10) */
+ case 0xa8: /* READ_HUGE (12) */
+ tag = M_SIMPLE_TAG;
+ break;
+ default:
+ tag = M_ORDERED_TAG;
+ }
+ }
+ /*
+ ** Have to force ordered tag to avoid timeouts
+ */
+ if ((lp = tp->lp[cmd->lun]) && (lp->force_ordered_tag)) {
+ tag = M_ORDERED_TAG;
+ lp->force_ordered_tag = 0;
+ if (DEBUG_FLAGS & DEBUG_TAGS) {
+ PRINT_ADDR(cmd);
+ printf ("Ordered Queue Tag forced\n");
+ }
+ }
+ msgptr[msglen++] = tag;
+ msgptr[msglen++] = cp -> tag;
+ }
+
+ switch (nego) {
+ case NS_SYNC:
+ msgptr[msglen++] = M_EXTENDED;
+ msgptr[msglen++] = 3;
+ msgptr[msglen++] = M_X_SYNC_REQ;
+ msgptr[msglen++] = tp->maxoffs ? tp->minsync : 0;
+ msgptr[msglen++] = tp->maxoffs;
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ PRINT_ADDR(cp->cmd);
+ printf ("sync msgout: ");
+ ncr_show_msg (&cp->scsi_smsg [msglen-5]);
+ printf (".\n");
+ };
+ break;
+ case NS_WIDE:
+ msgptr[msglen++] = M_EXTENDED;
+ msgptr[msglen++] = 2;
+ msgptr[msglen++] = M_X_WIDE_REQ;
+ msgptr[msglen++] = tp->usrwide;
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ PRINT_ADDR(cp->cmd);
+ printf ("wide msgout: ");
+ ncr_show_msg (&cp->scsi_smsg [msglen-4]);
+ printf (".\n");
+ };
+ break;
+ };
+
+ /*----------------------------------------------------
+ **
+ ** Build the identify message for getcc.
+ **
+ **----------------------------------------------------
+ */
+
+ cp -> scsi_smsg2 [0] = idmsg;
+ msglen2 = 1;
+
+ /*----------------------------------------------------
+ **
+ ** Build the data descriptors
+ **
+ **----------------------------------------------------
+ */
+
+ segments = ncr_scatter (cp, cp->cmd);
+
+ if (segments < 0) {
+ ncr_free_ccb(np, cp, cmd->target, cmd->lun);
+ restore_flags(flags);
+ return(DID_ERROR);
+ }
+
+ /*----------------------------------------------------
+ **
+ ** Guess xfer direction.
+ ** Spare some CPU by testing here frequently opcode.
+ **
+ **----------------------------------------------------
+ */
+ switch((int) cmd->cmnd[0]) {
+ case 0x08: /* READ(6) 08 */
+ case 0x28: /* READ(10) 28 */
+ case 0xA8: /* READ(12) A8 */
+ xfer_direction = XferIn;
+ break;
+ case 0x0A: /* WRITE(6) 0A */
+ case 0x2A: /* WRITE(10) 2A */
+ case 0xAA: /* WRITE(12) AA */
+ xfer_direction = XferOut;
+ break;
+ default:
+ xfer_direction = guess_xfer_direction((int) cmd->cmnd[0]);
+ break;
+ }
+
+ /*----------------------------------------------------
+ **
+ ** Set the SAVED_POINTER.
+ **
+ **----------------------------------------------------
+ */
+
+ cp->segments = segments;
+ if (!cp->data_len)
+ xfer_direction = XferNone;
+
+ switch (xfer_direction) {
+ u_long endp;
+ default:
+ case XferBoth:
+ cp->phys.header.savep =
+ cpu_to_scr(NCB_SCRIPT_PHYS (np, data_io));
+ cp->phys.header.goalp = cp->phys.header.savep;
+ break;
+ case XferIn:
+ endp = NCB_SCRIPT_PHYS (np, data_in) + MAX_SCATTER*16;
+ cp->phys.header.goalp = cpu_to_scr(endp + 8);
+ cp->phys.header.savep = cpu_to_scr(endp - segments*16);
+ break;
+ case XferOut:
+ endp = NCB_SCRIPTH_PHYS (np, data_out) + MAX_SCATTER*16;
+ cp->phys.header.goalp = cpu_to_scr(endp + 8);
+ cp->phys.header.savep = cpu_to_scr(endp - segments*16);
+ break;
+ case XferNone:
+ cp->phys.header.savep =
+ cpu_to_scr(NCB_SCRIPT_PHYS (np, no_data));
+ cp->phys.header.goalp = cp->phys.header.savep;
+ break;
+ }
+
+ cp->phys.header.lastp = cp->phys.header.savep;
+
+ /*----------------------------------------------------
+ **
+ ** fill in ccb
+ **
+ **----------------------------------------------------
+ **
+ **
+ ** physical -> virtual backlink
+ ** Generic SCSI command
+ */
+ cp->phys.header.cp = cp;
+ /*
+ ** Startqueue
+ */
+ cp->phys.header.launch.l_paddr =
+ cpu_to_scr(NCB_SCRIPT_PHYS (np, select));
+ cp->phys.header.launch.l_cmd = cpu_to_scr(SCR_JUMP);
+ /*
+ ** select
+ */
+ cp->phys.select.sel_id = cmd->target;
+ cp->phys.select.sel_scntl3 = tp->wval;
+ cp->phys.select.sel_sxfer = tp->sval;
+ /*
+ ** message
+ */
+ cp->phys.smsg.addr = cpu_to_scr(CCB_PHYS (cp, scsi_smsg));
+ cp->phys.smsg.size = cpu_to_scr(msglen);
+
+ cp->phys.smsg2.addr = cpu_to_scr(CCB_PHYS (cp, scsi_smsg2));
+ cp->phys.smsg2.size = cpu_to_scr(msglen2);
+ /*
+ ** command
+ */
+ cp->phys.cmd.addr = cpu_to_scr(vtophys (&cmd->cmnd[0]));
+ cp->phys.cmd.size = cpu_to_scr(cmd->cmd_len);
+ /*
+ ** sense command
+ */
+ cp->phys.scmd.addr = cpu_to_scr(CCB_PHYS (cp, sensecmd));
+ cp->phys.scmd.size = cpu_to_scr(6);
+ /*
+ ** patch requested size into sense command
+ */
+ cp->sensecmd[0] = 0x03;
+ cp->sensecmd[1] = cmd->lun << 5;
+ cp->sensecmd[4] = sizeof(cmd->sense_buffer);
+ /*
+ ** sense data
+ */
+ cp->phys.sense.addr =
+ cpu_to_scr(vtophys (&cmd->sense_buffer[0]));
+ cp->phys.sense.size = cpu_to_scr(sizeof(cmd->sense_buffer));
+ /*
+ ** status
+ */
+ cp->actualquirks = tp->quirks;
+ cp->host_status = nego ? HS_NEGOTIATE : HS_BUSY;
+ cp->scsi_status = S_ILLEGAL;
+ cp->parity_status = 0;
+
+ cp->xerr_status = XE_OK;
+ cp->sync_status = tp->sval;
+ cp->nego_status = nego;
+ cp->wide_status = tp->wval;
+
+ /*----------------------------------------------------
+ **
+ ** Critical region: start this job.
+ **
+ **----------------------------------------------------
+ */
+
+ /*
+ ** reselect pattern and activate this job.
+ */
+
+ cp->jump_ccb.l_cmd =
+ cpu_to_scr((SCR_JUMP ^ IFFALSE (DATA (cp->tag))));
+
+ /* Compute a time limit greater than the middle-level driver one */
+ if (cmd->timeout_per_command > 0)
+ cp->tlimit = jiffies + cmd->timeout_per_command + NCR_TIMEOUT_INCREASE;
+ else
+ cp->tlimit = jiffies + 3600 * HZ; /* No timeout=one hour */
+ cp->magic = CCB_MAGIC;
+
+ /*
+ ** insert into start queue.
+ */
+
+ qidx = np->squeueput + 1;
+ if (qidx >= MAX_START) qidx=0;
+ np->squeue [qidx ] = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle));
+ np->squeue [np->squeueput] = cpu_to_scr(CCB_PHYS (cp, phys));
+ np->squeueput = qidx;
+
+ if(DEBUG_FLAGS & DEBUG_QUEUE)
+ printf ("%s: queuepos=%d tryoffset=%d.\n", ncr_name (np),
+ np->squeueput,
+ (unsigned)(scr_to_cpu(np->script->startpos[0]) -
+ (NCB_SCRIPTH_PHYS (np, tryloop))));
+
+ /*
+ ** Script processor may be waiting for reselect.
+ ** Wake it up.
+ */
+#ifdef SCSI_NCR_DEBUG_ERROR_RECOVERY_SUPPORT
+ if (!np->stalling)
+#endif
+ OUTB (nc_istat, SIGP);
+
+ /*
+ ** and reenable interrupts
+ */
+ restore_flags(flags);
+
+ /*
+ ** Command is successfully queued.
+ */
+
+ return(DID_OK);
+}
+
+/*==========================================================
+**
+**
+** Start reset process.
+** If reset in progress do nothing.
+** The interrupt handler will reinitialize the chip.
+** The timeout handler will wait for settle_time before
+** clearing it and so resuming command processing.
+**
+**
+**==========================================================
+*/
+static void ncr_start_reset(ncb_p np, int settle_delay)
+{
+ u_long flags;
+
+ save_flags(flags); cli();
+
+ if (!np->settle_time) {
+ (void) ncr_reset_scsi_bus(np, 1, settle_delay);
+ }
+ restore_flags(flags);
+}
+
+static int ncr_reset_scsi_bus(ncb_p np, int enab_int, int settle_delay)
+{
+ u_int32 term;
+ int retv = 0;
+
+ np->settle_time = jiffies + settle_delay * HZ;
+
+ if (bootverbose > 1)
+ printf("%s: resetting, "
+ "command processing suspended for %d seconds\n",
+ ncr_name(np), settle_delay);
+
+ OUTB (nc_istat, SRST);
+ DELAY (1000);
+ OUTB (nc_istat, 0);
+ if (enab_int)
+ OUTW (nc_sien, RST);
+ /*
+ ** Enable Tolerant, reset IRQD if present and
+ ** properly set IRQ mode, prior to resetting the bus.
+ */
+ OUTB (nc_stest3, TE);
+ OUTB (nc_dcntl, (np->rv_dcntl & IRQM));
+ OUTB (nc_scntl1, CRST);
+ DELAY (100);
+
+ if (!driver_setup.bus_check)
+ goto out;
+ /*
+ ** Check for no terminators or SCSI bus shorts to ground.
+ ** Read SCSI data bus, data parity bits and control signals.
+ ** We are expecting RESET to be TRUE and other signals to be
+ ** FALSE.
+ */
+ term = INB(nc_sstat0); /* rst, sdp0 */
+ term = ((term & 2) << 7) + ((term & 1) << 16);
+ term |= ((INB(nc_sstat2) & 0x01) << 25) | /* sdp1 */
+ (INW(nc_sbdl) << 9) | /* d15-0 */
+ INB(nc_sbcl); /* req, ack, bsy, sel, atn, msg, cd, io */
+
+ if (!(np->features & FE_WIDE))
+ term &= 0x3ffff;
+
+ if (term != (2<<7)) {
+ printf("%s: suspicious SCSI data while resetting the BUS.\n",
+ ncr_name(np));
+ printf("%s: %sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = "
+ "0x%lx, expecting 0x%lx\n",
+ ncr_name(np),
+ (np->features & FE_WIDE) ? "dp1,d15-8," : "",
+ (u_long)term, (u_long)(2<<7));
+ if (driver_setup.bus_check == 1)
+ retv = 1;
+ }
+out:
+ OUTB (nc_scntl1, 0);
+ return retv;
+}
+
+/*==========================================================
+**
+**
+** Reset the SCSI BUS.
+** This is called from the generic SCSI driver.
+**
+**
+**==========================================================
+*/
+int ncr_reset_bus (Scsi_Cmnd *cmd, int sync_reset)
+{
+ struct Scsi_Host *host = cmd->host;
+/* Scsi_Device *device = cmd->device; */
+ struct host_data *host_data = (struct host_data *) host->hostdata;
+ ncb_p np = host_data->ncb;
+ ccb_p cp;
+ u_long flags;
+ int found;
+
+#ifdef SCSI_NCR_DEBUG_ERROR_RECOVERY_SUPPORT
+ if (np->stalling)
+ np->stalling = 0;
+#endif
+
+ save_flags(flags); cli();
+/*
+ * Return immediately if reset is in progress.
+ */
+ if (np->settle_time) {
+ restore_flags(flags);
+ return SCSI_RESET_PUNT;
+ }
+/*
+ * Start the reset process.
+ * The script processor is then assumed to be stopped.
+ * Commands will now be queued in the waiting list until a settle
+ * delay of 2 seconds will be completed.
+ */
+ ncr_start_reset(np, driver_setup.settle_delay);
+/*
+ * First, look in the wakeup list
+ */
+ for (found=0, cp=np->ccb; cp; cp=cp->link_ccb) {
+ /*
+ ** look for the ccb of this command.
+ */
+ if (cp->host_status == HS_IDLE) continue;
+ if (cp->cmd == cmd) {
+ found = 1;
+ break;
+ }
+ }
+/*
+ * Then, look in the waiting list
+ */
+ if (!found && retrieve_from_waiting_list(0, np, cmd))
+ found = 1;
+/*
+ * Wake-up all awaiting commands with DID_RESET.
+ */
+ reset_waiting_list(np);
+/*
+ * Wake-up all pending commands with HS_RESET -> DID_RESET.
+ */
+ ncr_wakeup(np, HS_RESET);
+/*
+ * If the involved command was not in a driver queue, and the
+ * scsi driver told us reset is synchronous, and the command is not
+ * currently in the waiting list, complete it with DID_RESET status,
+ * in order to keep it alive.
+ */
+ if (!found && sync_reset && !retrieve_from_waiting_list(0, np, cmd)) {
+ cmd->result = ScsiResult(DID_RESET, 0);
+ cmd->scsi_done(cmd);
+ }
+
+ restore_flags(flags);
+
+ return SCSI_RESET_SUCCESS;
+}
+
+/*==========================================================
+**
+**
+** Abort an SCSI command.
+** This is called from the generic SCSI driver.
+**
+**
+**==========================================================
+*/
+static int ncr_abort_command (Scsi_Cmnd *cmd)
+{
+ struct Scsi_Host *host = cmd->host;
+/* Scsi_Device *device = cmd->device; */
+ struct host_data *host_data = (struct host_data *) host->hostdata;
+ ncb_p np = host_data->ncb;
+ ccb_p cp;
+ u_long flags;
+ int found;
+ int retv;
+
+#ifdef SCSI_NCR_DEBUG_ERROR_RECOVERY_SUPPORT
+ if (np->stalling == 2)
+ np->stalling = 0;
+#endif
+
+ save_flags(flags); cli();
+/*
+ * First, look for the scsi command in the waiting list
+ */
+ if (remove_from_waiting_list(np, cmd)) {
+ cmd->result = ScsiResult(DID_ABORT, 0);
+ cmd->scsi_done(cmd);
+ restore_flags(flags);
+ return SCSI_ABORT_SUCCESS;
+ }
+
+/*
+ * Then, look in the wakeup list
+ */
+ for (found=0, cp=np->ccb; cp; cp=cp->link_ccb) {
+ /*
+ ** look for the ccb of this command.
+ */
+ if (cp->host_status == HS_IDLE) continue;
+ if (cp->cmd == cmd) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ restore_flags(flags);
+ return SCSI_ABORT_NOT_RUNNING;
+ }
+
+ if (np->settle_time) {
+ restore_flags(flags);
+ return SCSI_ABORT_SNOOZE;
+ }
+
+ /*
+ ** Disable reselect.
+ ** Remove it from startqueue.
+ ** Set cp->tlimit to 0. The ncr_timeout() handler will use
+ ** this condition in order to complete the canceled command
+ ** after the script skipped the ccb, if necessary.
+ */
+ cp->jump_ccb.l_cmd = cpu_to_scr(SCR_JUMP);
+ if (cp->phys.header.launch.l_paddr ==
+ cpu_to_scr(NCB_SCRIPT_PHYS (np, select))) {
+ printf ("%s: abort ccb=%p (skip)\n", ncr_name (np), cp);
+ cp->phys.header.launch.l_paddr =
+ cpu_to_scr(NCB_SCRIPT_PHYS (np, skip));
+ }
+
+ cp->tlimit = 0;
+ retv = SCSI_ABORT_PENDING;
+
+ /*
+ ** If there are no requests, the script
+ ** processor will sleep on SEL_WAIT_RESEL.
+ ** Let's wake it up, since it may have to work.
+ */
+#ifdef SCSI_NCR_DEBUG_ERROR_RECOVERY_SUPPORT
+ if (!np->stalling)
+#endif
+ OUTB (nc_istat, SIGP);
+
+ restore_flags(flags);
+
+ return retv;
+}
+
+/*==========================================================
+**
+** Linux release module stuff.
+**
+** Called before unloading the module
+** Detach the host.
+** We have to free resources and halt the NCR chip
+**
+**==========================================================
+*/
+
+#ifdef MODULE
+static int ncr_detach(ncb_p np)
+{
+ ccb_p cp;
+ tcb_p tp;
+ lcb_p lp;
+ int target, lun;
+ int i;
+
+ printf("%s: releasing host resources\n", ncr_name(np));
+
+/*
+** Stop the ncr_timeout process
+** Set release_stage to 1 and wait that ncr_timeout() set it to 2.
+*/
+
+#ifdef DEBUG_NCR53C8XX
+ printf("%s: stopping the timer\n", ncr_name(np));
+#endif
+ np->release_stage = 1;
+ for (i = 50 ; i && np->release_stage != 2 ; i--) DELAY(100000);
+ if (np->release_stage != 2)
+ printf("%s: the timer seems to be already stopped\n", ncr_name(np));
+ else np->release_stage = 2;
+
+/*
+** Disable chip interrupts
+*/
+
+#ifdef DEBUG_NCR53C8XX
+ printf("%s: disabling chip interrupts\n", ncr_name(np));
+#endif
+ OUTW (nc_sien , 0);
+ OUTB (nc_dien , 0);
+
+/*
+** Free irq
+*/
+
+#ifdef DEBUG_NCR53C8XX
+ printf("%s: freeing irq %d\n", ncr_name(np), np->irq);
+#endif
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,70)
+ free_irq(np->irq, np);
+#else
+ free_irq(np->irq);
+#endif
+
+ /*
+ ** Reset NCR chip
+ ** Restore bios setting for automatic clock detection.
+ */
+
+ printf("%s: resetting chip\n", ncr_name(np));
+ OUTB (nc_istat, SRST);
+ DELAY (1000);
+ OUTB (nc_istat, 0 );
+
+ OUTB(nc_dmode, np->sv_dmode);
+ OUTB(nc_dcntl, np->sv_dcntl);
+ OUTB(nc_ctest3, np->sv_ctest3);
+ OUTB(nc_ctest4, np->sv_ctest4);
+ OUTB(nc_ctest5, np->sv_ctest5);
+ OUTB(nc_gpcntl, np->sv_gpcntl);
+ OUTB(nc_stest2, np->sv_stest2);
+
+ ncr_selectclock(np, np->sv_scntl3);
+
+ /*
+ ** Release Memory mapped IO region and IO mapped region
+ */
+
+#ifndef NCR_IOMAPPED
+#ifdef DEBUG_NCR53C8XX
+ printf("%s: releasing memory mapped IO region %lx[%d]\n", ncr_name(np), (u_long) np->vaddr, 128);
+#endif
+ unmap_pci_mem((vm_offset_t) np->vaddr, (u_long) 128);
+#ifdef DEBUG_NCR53C8XX
+ printf("%s: releasing memory mapped IO region %lx[%d]\n", ncr_name(np), (u_long) np->vaddr2, 4096);
+#endif
+ unmap_pci_mem((vm_offset_t) np->vaddr2, (u_long) 4096);
+#endif
+
+#ifdef DEBUG_NCR53C8XX
+ printf("%s: releasing IO region %x[%d]\n", ncr_name(np), np->port, 128);
+#endif
+ release_region(np->port, 128);
+
+ /*
+ ** Free allocated ccb(s)
+ */
+
+ while ((cp=np->ccb->link_ccb) != NULL) {
+ np->ccb->link_ccb = cp->link_ccb;
+ if (cp->host_status) {
+ printf("%s: shall free an active ccb (host_status=%d)\n",
+ ncr_name(np), cp->host_status);
+ }
+#ifdef DEBUG_NCR53C8XX
+ printf("%s: freeing ccb (%lx)\n", ncr_name(np), (u_long) cp);
+#endif
+ m_free(cp, sizeof(*cp));
+ }
+
+ /*
+ ** Free allocated tp(s)
+ */
+
+ for (target = 0; target < MAX_TARGET ; target++) {
+ tp=&np->target[target];
+ for (lun = 0 ; lun < MAX_LUN ; lun++) {
+ lp = tp->lp[lun];
+ if (lp) {
+#ifdef DEBUG_NCR53C8XX
+ printf("%s: freeing lp (%lx)\n", ncr_name(np), (u_long) lp);
+#endif
+ m_free(lp, sizeof(*lp));
+ }
+ }
+ }
+
+ printf("%s: host resources successfully released\n", ncr_name(np));
+
+ return 1;
+}
+#endif
+
+/*==========================================================
+**
+**
+** Complete execution of a SCSI command.
+** Signal completion to the generic SCSI driver.
+**
+**
+**==========================================================
+*/
+
+void ncr_complete (ncb_p np, ccb_p cp)
+{
+ Scsi_Cmnd *cmd;
+ tcb_p tp;
+ lcb_p lp;
+
+ /*
+ ** Sanity check
+ */
+
+ if (!cp || (cp->magic!=CCB_MAGIC) || !cp->cmd) return;
+ cp->magic = 1;
+ cp->tlimit= 0;
+ cmd = cp->cmd;
+
+ /*
+ ** No Reselect anymore.
+ */
+ cp->jump_ccb.l_cmd = cpu_to_scr(SCR_JUMP);
+
+ /*
+ ** No starting.
+ */
+ cp->phys.header.launch.l_paddr = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle));
+
+ /*
+ ** timestamp
+ ** Optional, spare some CPU time
+ */
+#ifdef SCSI_NCR_PROFILE_SUPPORT
+ ncb_profile (np, cp);
+#endif
+
+ if (DEBUG_FLAGS & DEBUG_TINY)
+ printf ("CCB=%lx STAT=%x/%x\n", (unsigned long)cp & 0xfff,
+ cp->host_status,cp->scsi_status);
+
+ cmd = cp->cmd;
+ cp->cmd = NULL;
+ tp = &np->target[cmd->target];
+ lp = tp->lp[cmd->lun];
+
+ /*
+ ** We donnot queue more than 1 ccb per target
+ ** with negotiation at any time. If this ccb was
+ ** used for negotiation, clear this info in the tcb.
+ */
+
+ if (cp == tp->nego_cp)
+ tp->nego_cp = 0;
+
+ /*
+ ** Check for parity errors.
+ */
+
+ if (cp->parity_status) {
+ PRINT_ADDR(cmd);
+ printf ("%d parity error(s), fallback.\n", cp->parity_status);
+ /*
+ ** fallback to asynch transfer.
+ */
+ tp->usrsync=255;
+ tp->period = 0;
+ }
+
+ /*
+ ** Check for extended errors.
+ */
+
+ if (cp->xerr_status != XE_OK) {
+ PRINT_ADDR(cmd);
+ switch (cp->xerr_status) {
+ case XE_EXTRA_DATA:
+ printf ("extraneous data discarded.\n");
+ break;
+ case XE_BAD_PHASE:
+ printf ("illegal scsi phase (4/5).\n");
+ break;
+ default:
+ printf ("extended error %d.\n", cp->xerr_status);
+ break;
+ }
+ if (cp->host_status==HS_COMPLETE)
+ cp->host_status = HS_FAIL;
+ }
+
+ /*
+ ** Check the status.
+ */
+ if ( (cp->host_status == HS_COMPLETE)
+ && (cp->scsi_status == S_GOOD ||
+ cp->scsi_status == S_COND_MET)) {
+ /*
+ ** All went well (GOOD status).
+ ** CONDITION MET status is returned on
+ ** `Pre-Fetch' or `Search data' success.
+ */
+ cmd->result = ScsiResult(DID_OK, cp->scsi_status);
+
+ /*
+ ** if (cp->phys.header.lastp != cp->phys.header.goalp)...
+ **
+ ** @RESID@
+ ** Could dig out the correct value for resid,
+ ** but it would be quite complicated.
+ **
+ ** The ah1542.c driver sets it to 0 too ...
+ */
+
+ /*
+ ** Try to assign a ccb to this nexus
+ */
+ ncr_alloc_ccb (np, cmd->target, cmd->lun);
+
+ /*
+ ** On inquire cmd (0x12) save some data.
+ ** Clear questionnable capacities.
+ */
+ if (cmd->lun == 0 && cmd->cmnd[0] == 0x12) {
+ if (np->unit < SCSI_NCR_MAX_HOST) {
+ if (driver_setup.force_sync_nego)
+ ((char *) cmd->request_buffer)[7] |= INQ7_SYNC;
+ else
+ ((char *) cmd->request_buffer)[7] &=
+ (target_capabilities[np->unit].and_map[cmd->target]);
+ }
+ bcopy ( cmd->request_buffer,
+ &tp->inqdata,
+ sizeof (tp->inqdata));
+
+ /*
+ ** set number of tags
+ */
+ ncr_setmaxtags (np, tp, driver_setup.default_tags);
+ /*
+ ** prepare negotiation of synch and wide.
+ */
+ ncr_negotiate (np, tp);
+
+ /*
+ ** force quirks update before next command start
+ */
+ tp->quirks |= QUIRK_UPDATE;
+ }
+
+ /*
+ ** Announce changes to the generic driver.
+ */
+ if (lp) {
+ ncr_settags (tp, lp);
+ if (lp->reqlink != lp->actlink)
+ ncr_opennings (np, lp, cmd);
+ };
+
+ tp->bytes += cp->data_len;
+ tp->transfers ++;
+
+ /*
+ ** If tags was reduced due to queue full,
+ ** increase tags if 100 good status received.
+ */
+ if (tp->numtags < tp->maxtags) {
+ ++tp->num_good;
+ if (tp->num_good >= 100) {
+ tp->num_good = 0;
+ ++tp->numtags;
+ if (tp->numtags == 1) {
+ PRINT_ADDR(cmd);
+ printf("tagged command queueing resumed\n");
+ }
+ }
+ }
+ } else if ((cp->host_status == HS_COMPLETE)
+ && (cp->scsi_status == (S_SENSE|S_GOOD) ||
+ cp->scsi_status == (S_SENSE|S_CHECK_COND))) {
+
+ /*
+ ** Check condition code
+ */
+ cmd->result = ScsiResult(DID_OK, S_CHECK_COND);
+
+ if (DEBUG_FLAGS & (DEBUG_RESULT|DEBUG_TINY)) {
+ u_char * p = (u_char*) & cmd->sense_buffer;
+ int i;
+ printf ("\n%s: sense data:", ncr_name (np));
+ for (i=0; i<14; i++) printf (" %x", *p++);
+ printf (".\n");
+ }
+
+ } else if ((cp->host_status == HS_COMPLETE)
+ && (cp->scsi_status == S_BUSY ||
+ cp->scsi_status == S_CONFLICT)) {
+
+ /*
+ ** Target is busy.
+ */
+ cmd->result = ScsiResult(DID_OK, cp->scsi_status);
+
+ } else if ((cp->host_status == HS_COMPLETE)
+ && (cp->scsi_status == S_QUEUE_FULL)) {
+
+ /*
+ ** Target is stuffed.
+ */
+ cmd->result = ScsiResult(DID_OK, cp->scsi_status);
+
+ /*
+ ** Suspend tagged queuing and start good status counter.
+ ** Announce changes to the generic driver.
+ */
+ if (tp->numtags) {
+ PRINT_ADDR(cmd);
+ printf("QUEUE FULL! suspending tagged command queueing\n");
+ tp->numtags = 0;
+ tp->num_good = 0;
+ if (lp) {
+ ncr_settags (tp, lp);
+ if (lp->reqlink != lp->actlink)
+ ncr_opennings (np, lp, cmd);
+ };
+ }
+ } else if ((cp->host_status == HS_SEL_TIMEOUT)
+ || (cp->host_status == HS_TIMEOUT)) {
+
+ /*
+ ** No response
+ */
+ cmd->result = ScsiResult(DID_TIME_OUT, cp->scsi_status);
+
+ } else if (cp->host_status == HS_RESET) {
+
+ /*
+ ** SCSI bus reset
+ */
+ cmd->result = ScsiResult(DID_RESET, cp->scsi_status);
+
+ } else if (cp->host_status == HS_ABORTED) {
+
+ /*
+ ** Transfer aborted
+ */
+ cmd->result = ScsiResult(DID_ABORT, cp->scsi_status);
+
+ } else {
+
+ /*
+ ** Other protocol messes
+ */
+ PRINT_ADDR(cmd);
+ printf ("COMMAND FAILED (%x %x) @%p.\n",
+ cp->host_status, cp->scsi_status, cp);
+
+ cmd->result = ScsiResult(DID_ERROR, cp->scsi_status);
+ }
+
+ /*
+ ** trace output
+ */
+
+ if (tp->usrflag & UF_TRACE) {
+ u_char * p;
+ int i;
+ PRINT_ADDR(cmd);
+ printf (" CMD:");
+ p = (u_char*) &cmd->cmnd[0];
+ for (i=0; i<cmd->cmd_len; i++) printf (" %x", *p++);
+
+ if (cp->host_status==HS_COMPLETE) {
+ switch (cp->scsi_status) {
+ case S_GOOD:
+ printf (" GOOD");
+ break;
+ case S_CHECK_COND:
+ printf (" SENSE:");
+ p = (u_char*) &cmd->sense_buffer;
+ for (i=0; i<14; i++)
+ printf (" %x", *p++);
+ break;
+ default:
+ printf (" STAT: %x\n", cp->scsi_status);
+ break;
+ }
+ } else printf (" HOSTERROR: %x", cp->host_status);
+ printf ("\n");
+ }
+
+ /*
+ ** Free this ccb
+ */
+ ncr_free_ccb (np, cp, cmd->target, cmd->lun);
+
+ /*
+ ** requeue awaiting scsi commands
+ */
+ if (np->waiting_list) requeue_waiting_list(np);
+
+ /*
+ ** signal completion to generic driver.
+ */
+ cmd->scsi_done (cmd);
+}
+
+/*==========================================================
+**
+**
+** Signal all (or one) control block done.
+**
+**
+**==========================================================
+*/
+
+void ncr_wakeup (ncb_p np, u_long code)
+{
+ /*
+ ** Starting at the default ccb and following
+ ** the links, complete all jobs with a
+ ** host_status greater than "disconnect".
+ **
+ ** If the "code" parameter is not zero,
+ ** complete all jobs that are not IDLE.
+ */
+
+ ccb_p cp = np->ccb;
+ while (cp) {
+ switch (cp->host_status) {
+
+ case HS_IDLE:
+ break;
+
+ case HS_DISCONNECT:
+ if(DEBUG_FLAGS & DEBUG_TINY) printf ("D");
+ /* fall through */
+
+ case HS_BUSY:
+ case HS_NEGOTIATE:
+ if (!code) break;
+ cp->host_status = code;
+
+ /* fall through */
+
+ default:
+ ncr_complete (np, cp);
+ break;
+ };
+ cp = cp -> link_ccb;
+ };
+}
+
+/*==========================================================
+**
+**
+** Start NCR chip.
+**
+**
+**==========================================================
+*/
+
+void ncr_init (ncb_p np, int reset, char * msg, u_long code)
+{
+ int i;
+
+ /*
+ ** Reset chip if asked, otherwise just clear fifos.
+ */
+ if (reset) {
+ OUTB (nc_istat, SRST);
+ DELAY (10000);
+ }
+ else {
+ OUTB (nc_stest3, TE|CSF);
+ OUTONB (nc_ctest3, CLF);
+ }
+
+ /*
+ ** Message.
+ */
+
+ if (msg) printf (KERN_INFO "%s: restart (%s).\n", ncr_name (np), msg);
+
+ /*
+ ** Clear Start Queue
+ */
+ for (i=0;i<MAX_START;i++)
+ np -> squeue [i] = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle));
+
+ /*
+ ** Start at first entry.
+ */
+ np->squeueput = 0;
+ np->script0->startpos[0] = cpu_to_scr(NCB_SCRIPTH_PHYS (np, tryloop));
+ np->script0->start0 [0] = cpu_to_scr(SCR_INT ^ IFFALSE (0));
+
+ /*
+ ** Wakeup all pending jobs.
+ */
+ ncr_wakeup (np, code);
+
+ /*
+ ** Init chip.
+ */
+
+ OUTB (nc_istat, 0x00 ); /* Remove Reset, abort */
+ OUTB (nc_scntl0, np->rv_scntl0 | 0xc0);
+ /* full arb., ena parity, par->ATN */
+ OUTB (nc_scntl1, 0x00); /* odd parity, and remove CRST!! */
+
+ ncr_selectclock(np, np->rv_scntl3); /* Select SCSI clock */
+
+ OUTB (nc_scid , RRE|np->myaddr); /* Adapter SCSI address */
+ OUTW (nc_respid, 1ul<<np->myaddr); /* Id to respond to */
+ OUTB (nc_istat , SIGP ); /* Signal Process */
+ OUTB (nc_dmode , np->rv_dmode); /* Burst length, dma mode */
+ OUTB (nc_ctest5, np->rv_ctest5); /* Large fifo + large burst */
+
+ OUTB (nc_dcntl , NOCOM|np->rv_dcntl); /* Protect SFBR */
+ OUTB (nc_ctest3, np->rv_ctest3); /* Write and invalidate */
+ OUTB (nc_ctest4, np->rv_ctest4); /* Master parity checking */
+
+ OUTB (nc_stest2, EXT|np->rv_stest2); /* Extended Sreq/Sack filtering */
+ OUTB (nc_stest3, TE); /* TolerANT enable */
+ OUTB (nc_stime0, 0x0d ); /* HTH disabled STO 0.4 sec. */
+
+ /*
+ ** Disable disconnects.
+ */
+
+ np->disc = 0;
+
+ /*
+ ** Enable GPIO0 pin for writing if LED support.
+ */
+
+ if (np->features & FE_LED0) {
+ OUTOFFB (nc_gpcntl, 0x01);
+ }
+
+ /*
+ ** Upload the script into on-board RAM
+ */
+ if (np->vaddr2) {
+ if (bootverbose)
+ printf ("%s: copying script fragments into the on-board RAM ...\n", ncr_name(np));
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,0,0)
+ memcpy_toio(np->script, np->script0, sizeof(struct script));
+#else
+ memcpy(np->script, np->script0, sizeof(struct script));
+#endif
+ }
+
+ /*
+ ** enable ints
+ */
+
+ OUTW (nc_sien , STO|HTH|MA|SGE|UDC|RST);
+ OUTB (nc_dien , MDPE|BF|ABRT|SSI|SIR|IID);
+
+ /*
+ ** For 895/6 enable SBMC interrupt and save current SCSI bus mode.
+ */
+ if (np->features & FE_ULTRA2) {
+ OUTONW (nc_sien, SBMC);
+ np->scsi_mode = INB (nc_stest4) & SMODE;
+ }
+
+ /*
+ ** Fill in target structure.
+ ** Reinitialize usrsync.
+ ** Reinitialize usrwide.
+ ** Prepare sync negotiation according to actual SCSI bus mode.
+ */
+
+ for (i=0;i<MAX_TARGET;i++) {
+ tcb_p tp = &np->target[i];
+
+ tp->sval = 0;
+ tp->wval = np->rv_scntl3;
+
+ if (tp->usrsync != 255) {
+ if (tp->usrsync <= np->maxsync) {
+ if (tp->usrsync < np->minsync) {
+ tp->usrsync = np->minsync;
+ }
+ }
+ else
+ tp->usrsync = 255;
+ };
+
+ if (tp->usrwide > np->maxwide)
+ tp->usrwide = np->maxwide;
+
+ ncr_negotiate (np, tp);
+ }
+
+ /*
+ ** Start script processor.
+ */
+
+ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, start));
+}
+
+/*==========================================================
+**
+** Prepare the negotiation values for wide and
+** synchronous transfers.
+**
+**==========================================================
+*/
+
+static void ncr_negotiate (struct ncb* np, struct tcb* tp)
+{
+ /*
+ ** minsync unit is 4ns !
+ */
+
+ u_long minsync = tp->usrsync;
+
+ /*
+ ** SCSI bus mode limit
+ */
+
+ if (np->scsi_mode && np->scsi_mode == SMODE_SE) {
+ if (minsync < 12) minsync = 12;
+ }
+
+ /*
+ ** if not scsi 2
+ ** don't believe FAST!
+ */
+
+ if ((minsync < 50) && (tp->inqdata[2] & 0x0f) < 2)
+ minsync=50;
+
+ /*
+ ** our limit ..
+ */
+
+ if (minsync < np->minsync)
+ minsync = np->minsync;
+
+ /*
+ ** divider limit
+ */
+
+ if (minsync > np->maxsync)
+ minsync = 255;
+
+ tp->minsync = minsync;
+ tp->maxoffs = (minsync<255 ? np->maxoffs : 0);
+
+ /*
+ ** period=0: has to negotiate sync transfer
+ */
+
+ tp->period=0;
+
+ /*
+ ** widedone=0: has to negotiate wide transfer
+ */
+ tp->widedone=0;
+}
+
+/*==========================================================
+**
+** Get clock factor and sync divisor for a given
+** synchronous factor period.
+** Returns the clock factor (in sxfer) and scntl3
+** synchronous divisor field.
+**
+**==========================================================
+*/
+
+static void ncr_getsync(ncb_p np, u_char sfac, u_char *fakp, u_char *scntl3p)
+{
+ u_long clk = np->clock_khz; /* SCSI clock frequency in kHz */
+ int div = np->clock_divn; /* Number of divisors supported */
+ u_long fak; /* Sync factor in sxfer */
+ u_long per; /* Period in tenths of ns */
+ u_long kpc; /* (per * clk) */
+
+ /*
+ ** Compute the synchronous period in tenths of nano-seconds
+ */
+ if (sfac <= 10) per = 250;
+ else if (sfac == 11) per = 303;
+ else if (sfac == 12) per = 500;
+ else per = 40 * sfac;
+
+ /*
+ ** Look for the greatest clock divisor that allows an
+ ** input speed faster than the period.
+ */
+ kpc = per * clk;
+ while (--div >= 0)
+ if (kpc >= (div_10M[div] << 2)) break;
+
+ /*
+ ** Calculate the lowest clock factor that allows an output
+ ** speed not faster than the period.
+ */
+ fak = (kpc - 1) / div_10M[div] + 1;
+
+#if 0 /* This optimization does not seem very usefull */
+
+ per = (fak * div_10M[div]) / clk;
+
+ /*
+ ** Why not to try the immediate lower divisor and to choose
+ ** the one that allows the fastest output speed ?
+ ** We don't want input speed too much greater than output speed.
+ */
+ if (div >= 1 && fak < 8) {
+ u_long fak2, per2;
+ fak2 = (kpc - 1) / div_10M[div-1] + 1;
+ per2 = (fak2 * div_10M[div-1]) / clk;
+ if (per2 < per && fak2 <= 8) {
+ fak = fak2;
+ per = per2;
+ --div;
+ }
+ }
+#endif
+
+ if (fak < 4) fak = 4; /* Should never happen, too bad ... */
+
+ /*
+ ** Compute and return sync parameters for the ncr
+ */
+ *fakp = fak - 4;
+ *scntl3p = ((div+1) << 4) + (sfac < 25 ? 0x80 : 0);
+}
+
+
+/*==========================================================
+**
+** Set actual values, sync status and patch all ccbs of
+** a target according to new sync/wide agreement.
+**
+**==========================================================
+*/
+
+static void ncr_set_sync_wide_status (ncb_p np, u_char target)
+{
+ ccb_p cp;
+ tcb_p tp = &np->target[target];
+
+ /*
+ ** set actual value and sync_status
+ */
+ OUTB (nc_sxfer, tp->sval);
+ np->sync_st = tp->sval;
+ OUTB (nc_scntl3, tp->wval);
+ np->wide_st = tp->wval;
+
+ /*
+ ** patch ALL ccbs of this target.
+ */
+ for (cp = np->ccb; cp; cp = cp->link_ccb) {
+ if (!cp->cmd) continue;
+ if (cp->cmd->target != target) continue;
+ cp->sync_status = tp->sval;
+ cp->wide_status = tp->wval;
+ };
+}
+
+/*==========================================================
+**
+** Switch sync mode for current job and it's target
+**
+**==========================================================
+*/
+
+static void ncr_setsync (ncb_p np, ccb_p cp, u_char scntl3, u_char sxfer)
+{
+ Scsi_Cmnd *cmd;
+ tcb_p tp;
+ u_char target = INB (nc_ctest0) & 0x0f;
+ u_char idiv;
+
+ assert (cp);
+ if (!cp) return;
+
+ cmd = cp->cmd;
+ assert (cmd);
+ if (!cmd) return;
+ assert (target == (cmd->target & 0xf));
+
+ tp = &np->target[target];
+
+ if (!scntl3 || !(sxfer & 0x1f))
+ scntl3 = np->rv_scntl3;
+ scntl3 = (scntl3 & 0xf0) | (tp->wval & EWS) | (np->rv_scntl3 & 0x07);
+
+ /*
+ ** Deduce the value of controller sync period from scntl3.
+ ** period is in tenths of nano-seconds.
+ */
+
+ idiv = ((scntl3 >> 4) & 0x7);
+ if ((sxfer & 0x1f) && idiv)
+ tp->period = (((sxfer>>5)+4)*div_10M[idiv-1])/np->clock_khz;
+ else
+ tp->period = 0xffff;
+
+ /*
+ ** Stop there if sync parameters are unchanged
+ */
+ if (tp->sval == sxfer && tp->wval == scntl3) return;
+ tp->sval = sxfer;
+ tp->wval = scntl3;
+
+ /*
+ ** Bells and whistles ;-)
+ */
+ PRINT_ADDR(cmd);
+ if (sxfer & 0x01f) {
+ unsigned f10 = 100000 << (tp->widedone ? tp->widedone -1 : 0);
+ unsigned mb10 = (f10 + tp->period/2) / tp->period;
+ char *scsi;
+
+ /*
+ ** Disable extended Sreq/Sack filtering
+ */
+ if (tp->period <= 2000) OUTOFFB (nc_stest2, EXT);
+
+ /*
+ ** Bells and whistles ;-)
+ */
+ if (tp->period < 500) scsi = "FAST-40";
+ else if (tp->period < 1000) scsi = "FAST-20";
+ else if (tp->period < 2000) scsi = "FAST-10";
+ else scsi = "FAST-5";
+
+ printf ("%s %sSCSI %d.%d MB/s (%d ns, offset %d)\n", scsi,
+ tp->widedone > 1 ? "WIDE " : "",
+ mb10 / 10, mb10 % 10, tp->period / 10, sxfer & 0x1f);
+ } else
+ printf ("%sasynchronous.\n", tp->widedone > 1 ? "wide " : "");
+
+ /*
+ ** set actual value and sync_status
+ ** patch ALL ccbs of this target.
+ */
+ ncr_set_sync_wide_status(np, target);
+}
+
+/*==========================================================
+**
+** Switch wide mode for current job and it's target
+** SCSI specs say: a SCSI device that accepts a WDTR
+** message shall reset the synchronous agreement to
+** asynchronous mode.
+**
+**==========================================================
+*/
+
+static void ncr_setwide (ncb_p np, ccb_p cp, u_char wide, u_char ack)
+{
+ Scsi_Cmnd *cmd;
+ u_short target = INB (nc_ctest0) & 0x0f;
+ tcb_p tp;
+ u_char scntl3;
+ u_char sxfer;
+
+ assert (cp);
+ if (!cp) return;
+
+ cmd = cp->cmd;
+ assert (cmd);
+ if (!cmd) return;
+ assert (target == (cmd->target & 0xf));
+
+ tp = &np->target[target];
+ tp->widedone = wide+1;
+ scntl3 = (tp->wval & (~EWS)) | (wide ? EWS : 0);
+
+ sxfer = ack ? 0 : tp->sval;
+
+ /*
+ ** Stop there if sync/wide parameters are unchanged
+ */
+ if (tp->sval == sxfer && tp->wval == scntl3) return;
+ tp->sval = sxfer;
+ tp->wval = scntl3;
+
+ /*
+ ** Bells and whistles ;-)
+ */
+ if (bootverbose >= 2) {
+ PRINT_ADDR(cmd);
+ if (scntl3 & EWS)
+ printf ("WIDE SCSI (16 bit) enabled.\n");
+ else
+ printf ("WIDE SCSI disabled.\n");
+ }
+
+ /*
+ ** set actual value and sync_status
+ ** patch ALL ccbs of this target.
+ */
+ ncr_set_sync_wide_status(np, target);
+}
+
+/*==========================================================
+**
+** Switch tagged mode for a target.
+**
+**==========================================================
+*/
+
+static void ncr_setmaxtags (ncb_p np, tcb_p tp, u_long numtags)
+{
+ int l;
+ if (numtags > tp->usrtags)
+ numtags = tp->usrtags;
+ tp->numtags = numtags;
+ tp->maxtags = numtags;
+
+ for (l=0; l<MAX_LUN; l++) {
+ lcb_p lp;
+ u_char wastags;
+
+ if (!tp) break;
+ lp=tp->lp[l];
+ if (!lp) continue;
+
+ wastags = lp->usetags;
+ ncr_settags (tp, lp);
+
+ if (numtags > 1 && lp->reqccbs > 1) {
+ PRINT_LUN(np, tp - np->target, l);
+ printf("using tagged command queueing, up to %ld cmds/lun\n", numtags);
+ }
+ else if (numtags <= 1 && wastags) {
+ PRINT_LUN(np, tp - np->target, l);
+ printf("disabling tagged command queueing\n");
+ }
+ };
+}
+
+static void ncr_settags (tcb_p tp, lcb_p lp)
+{
+ u_char reqtags, tmp;
+
+ if ((!tp) || (!lp)) return;
+
+ /*
+ ** only devices conformant to ANSI Version >= 2
+ ** only devices capable of tagges commands
+ ** only disk devices
+ ** only if enabled by user ..
+ */
+ if (( tp->inqdata[2] & 0x7) >= 2 &&
+ ( tp->inqdata[7] & INQ7_QUEUE) && ((tp->inqdata[0] & 0x1f)==0x00)
+ && tp->numtags > 1) {
+ reqtags = tp->numtags;
+ if (lp->actlink <= 1)
+ lp->usetags=reqtags;
+ } else {
+ reqtags = 1;
+ if (lp->actlink <= 1)
+ lp->usetags=0;
+ };
+
+ /*
+ ** don't announce more than available.
+ */
+ tmp = lp->actccbs;
+ if (tmp > reqtags) tmp = reqtags;
+ lp->reqlink = tmp;
+
+ /*
+ ** don't discard if announced.
+ */
+ tmp = lp->actlink;
+ if (tmp < reqtags) tmp = reqtags;
+ lp->reqccbs = tmp;
+}
+
+/*----------------------------------------------------
+**
+** handle user commands
+**
+**----------------------------------------------------
+*/
+
+#ifdef SCSI_NCR_USER_COMMAND_SUPPORT
+
+static void ncr_usercmd (ncb_p np)
+{
+ u_char t;
+ tcb_p tp;
+
+ switch (np->user.cmd) {
+
+ case 0: return;
+
+ case UC_SETSYNC:
+ for (t=0; t<MAX_TARGET; t++) {
+ if (!((np->user.target>>t)&1)) continue;
+ tp = &np->target[t];
+ tp->usrsync = np->user.data;
+ ncr_negotiate (np, tp);
+ };
+ break;
+
+ case UC_SETTAGS:
+ if (np->user.data > SCSI_NCR_MAX_TAGS)
+ np->user.data = SCSI_NCR_MAX_TAGS;
+ for (t=0; t<MAX_TARGET; t++) {
+ if (!((np->user.target>>t)&1)) continue;
+ np->target[t].usrtags = np->user.data;
+ ncr_setmaxtags (np, &np->target[t], np->user.data);
+ };
+ break;
+
+ case UC_SETDEBUG:
+#ifdef SCSI_NCR_DEBUG_INFO_SUPPORT
+ ncr_debug = np->user.data;
+#endif
+ break;
+
+ case UC_SETORDER:
+ np->order = np->user.data;
+ break;
+
+ case UC_SETWIDE:
+ for (t=0; t<MAX_TARGET; t++) {
+ u_long size;
+ if (!((np->user.target>>t)&1)) continue;
+ tp = &np->target[t];
+ size = np->user.data;
+ if (size > np->maxwide) size=np->maxwide;
+ tp->usrwide = size;
+ ncr_negotiate (np, tp);
+ };
+ break;
+
+ case UC_SETFLAG:
+ for (t=0; t<MAX_TARGET; t++) {
+ if (!((np->user.target>>t)&1)) continue;
+ tp = &np->target[t];
+ tp->usrflag = np->user.data;
+ };
+ break;
+
+ case UC_CLEARPROF:
+ bzero(&np->profile, sizeof(np->profile));
+ break;
+#ifdef UC_DEBUG_ERROR_RECOVERY
+ case UC_DEBUG_ERROR_RECOVERY:
+ np->debug_error_recovery = np->user.data;
+ break;
+#endif
+ }
+ np->user.cmd=0;
+}
+#endif
+
+
+/*=====================================================================
+**
+** Embedded error recovery debugging code.
+**
+**=====================================================================
+**
+** This code is conditionned by SCSI_NCR_DEBUG_ERROR_RECOVERY_SUPPORT.
+** It only can be enabled after boot-up with a control command.
+**
+** Every 30 seconds the timer handler of the driver decides to
+** change the behaviour of the driver in order to trigger errors.
+**
+** If last command was "debug_error_recovery sge", the driver
+** sets sync offset of all targets that use sync transfers to 2,
+** and so hopes a SCSI gross error at the next read operation.
+**
+** If last command was "debug_error_recovery abort", the driver
+** does not signal new scsi commands to the script processor, until
+** it is asked to abort or reset a command by the mid-level driver.
+**
+** If last command was "debug_error_recovery reset", the driver
+** does not signal new scsi commands to the script processor, until
+** it is asked to reset a command by the mid-level driver.
+**
+** If last command was "debug_error_recovery parity", the driver
+** will assert ATN on the next DATA IN phase mismatch, and so will
+** behave as if a parity error had been detected.
+**
+** The command "debug_error_recovery none" makes the driver behave
+** normaly.
+**
+**=====================================================================
+*/
+
+#ifdef SCSI_NCR_DEBUG_ERROR_RECOVERY_SUPPORT
+static void ncr_trigger_errors (ncb_p np)
+{
+ /*
+ ** If np->debug_error_recovery is not zero, we want to
+ ** simulate common errors in order to test error recovery.
+ */
+ do {
+ static u_long last = 0l;
+
+ if (!np->debug_error_recovery)
+ break;
+ if (!last)
+ last = jiffies;
+ else if (jiffies < last + 30*HZ)
+ break;
+ last = jiffies;
+ /*
+ * This one triggers SCSI gross errors.
+ */
+ if (np->debug_error_recovery == 1) {
+ int i;
+ printf("%s: testing error recovery from SCSI gross error...\n", ncr_name(np));
+ for (i = 0 ; i < MAX_TARGET ; i++) {
+ if (np->target[i].sval & 0x1f) {
+ np->target[i].sval &= ~0x1f;
+ np->target[i].sval += 2;
+ }
+ }
+ }
+ /*
+ * This one triggers abort from the mid-level driver.
+ */
+ else if (np->debug_error_recovery == 2) {
+ printf("%s: testing error recovery from mid-level driver abort()...\n", ncr_name(np));
+ np->stalling = 2;
+ }
+ /*
+ * This one triggers reset from the mid-level driver.
+ */
+ else if (np->debug_error_recovery == 3) {
+ printf("%s: testing error recovery from mid-level driver reset()...\n", ncr_name(np));
+ np->stalling = 3;
+ }
+ /*
+ * This one set ATN on phase mismatch in DATA IN phase and so
+ * will behave as on scsi parity error detected.
+ */
+ else if (np->debug_error_recovery == 4) {
+ printf("%s: testing data in parity error...\n", ncr_name(np));
+ np->assert_atn = 1;
+ }
+ } while (0);
+}
+#endif
+
+/*==========================================================
+**
+**
+** ncr timeout handler.
+**
+**
+**==========================================================
+**
+** Misused to keep the driver running when
+** interrupts are not configured correctly.
+**
+**----------------------------------------------------------
+*/
+
+static void ncr_timeout (ncb_p np)
+{
+ u_long thistime = jiffies;
+ u_long count = 0;
+ ccb_p cp;
+ u_long flags;
+
+ /*
+ ** If release process in progress, let's go
+ ** Set the release stage from 1 to 2 to synchronize
+ ** with the release process.
+ */
+
+ if (np->release_stage) {
+ if (np->release_stage == 1) np->release_stage = 2;
+ return;
+ }
+
+ np->timer.expires =
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,0)
+ jiffies +
+#endif
+ SCSI_NCR_TIMER_INTERVAL;
+
+ add_timer(&np->timer);
+
+#ifdef SCSI_NCR_DEBUG_ERROR_RECOVERY_SUPPORT
+ ncr_trigger_errors (np);
+#endif
+
+ /*
+ ** If we are resetting the ncr, wait for settle_time before
+ ** clearing it. Then command processing will be resumed.
+ */
+ if (np->settle_time) {
+ if (np->settle_time <= thistime) {
+ if (bootverbose > 1)
+ printf("%s: command processing resumed\n", ncr_name(np));
+ save_flags(flags); cli();
+ np->settle_time = 0;
+ np->disc = 1;
+ requeue_waiting_list(np);
+ restore_flags(flags);
+ }
+ return;
+ }
+
+ /*
+ ** Since the generic scsi driver only allows us 0.5 second
+ ** to perform abort of a command, we must look at ccbs about
+ ** every 0.25 second.
+ */
+ if (np->lasttime + (HZ>>2) <= thistime) {
+ /*
+ ** block ncr interrupts
+ */
+ save_flags(flags); cli();
+
+ np->lasttime = thistime;
+
+ /*
+ ** Reset profile data to avoid ugly overflow
+ ** (Limited to 1024 GB for 32 bit architecture)
+ */
+ if (np->profile.num_kbytes > (~0UL >> 2))
+ bzero(&np->profile, sizeof(np->profile));
+
+ /*----------------------------------------------------
+ **
+ ** handle ncr chip timeouts
+ **
+ ** Assumption:
+ ** We have a chance to arbitrate for the
+ ** SCSI bus at least every 10 seconds.
+ **
+ **----------------------------------------------------
+ */
+#if 0
+ if (thistime < np->heartbeat + HZ + HZ)
+ np->latetime = 0;
+ else
+ np->latetime++;
+#endif
+
+ /*----------------------------------------------------
+ **
+ ** handle ccb timeouts
+ **
+ **----------------------------------------------------
+ */
+
+ for (cp=np->ccb; cp; cp=cp->link_ccb) {
+ /*
+ ** look for timed out ccbs.
+ */
+ if (!cp->host_status) continue;
+ count++;
+ /*
+ ** Have to force ordered tag to avoid timeouts
+ */
+ if (cp->cmd && cp->tlimit && cp->tlimit <=
+ thistime + NCR_TIMEOUT_INCREASE + SCSI_NCR_TIMEOUT_ALERT) {
+ lcb_p lp;
+ lp = np->target[cp->cmd->target].lp[cp->cmd->lun];
+ if (lp && !lp->force_ordered_tag) {
+ lp->force_ordered_tag = 1;
+ }
+ }
+ /*
+ ** ncr_abort_command() cannot complete canceled
+ ** commands immediately. It sets tlimit to zero
+ ** and ask the script to skip the scsi process if
+ ** necessary. We have to complete this work here.
+ */
+
+ if (cp->tlimit) continue;
+
+ switch (cp->host_status) {
+
+ case HS_BUSY:
+ case HS_NEGOTIATE:
+ /*
+ ** still in start queue ?
+ */
+ if (cp->phys.header.launch.l_paddr ==
+ cpu_to_scr(NCB_SCRIPT_PHYS (np, skip)))
+ continue;
+
+ /* fall through */
+ case HS_DISCONNECT:
+ cp->host_status=HS_ABORTED;
+ };
+ cp->tag = 0;
+
+ /*
+ ** wakeup this ccb.
+ */
+ ncr_complete (np, cp);
+
+#ifdef SCSI_NCR_DEBUG_ERROR_RECOVERY_SUPPORT
+ if (!np->stalling)
+#endif
+ OUTB (nc_istat, SIGP);
+ }
+ restore_flags(flags);
+ }
+
+#ifdef SCSI_NCR_BROKEN_INTR
+ if (INB(nc_istat) & (INTF|SIP|DIP)) {
+
+ /*
+ ** Process pending interrupts.
+ */
+ save_flags(flags); cli();
+ if (DEBUG_FLAGS & DEBUG_TINY) printf ("{");
+ ncr_exception (np);
+ if (DEBUG_FLAGS & DEBUG_TINY) printf ("}");
+ restore_flags(flags);
+ }
+#endif /* SCSI_NCR_BROKEN_INTR */
+}
+
+/*==========================================================
+**
+** log message for real hard errors
+**
+** "ncr0 targ 0?: ERROR (ds:si) (so-si-sd) (sxfer/scntl3) @ name (dsp:dbc)."
+** " reg: r0 r1 r2 r3 r4 r5 r6 ..... rf."
+**
+** exception register:
+** ds: dstat
+** si: sist
+**
+** SCSI bus lines:
+** so: control lines as driver by NCR.
+** si: control lines as seen by NCR.
+** sd: scsi data lines as seen by NCR.
+**
+** wide/fastmode:
+** sxfer: (see the manual)
+** scntl3: (see the manual)
+**
+** current script command:
+** dsp: script adress (relative to start of script).
+** dbc: first word of script command.
+**
+** First 16 register of the chip:
+** r0..rf
+**
+**==========================================================
+*/
+
+static void ncr_log_hard_error(ncb_p np, u_short sist, u_char dstat)
+{
+ u_int32 dsp;
+ int script_ofs;
+ int script_size;
+ char *script_name;
+ u_char *script_base;
+ int i;
+
+ dsp = INL (nc_dsp);
+
+ if (dsp > np->p_script && dsp <= np->p_script + sizeof(struct script)) {
+ script_ofs = dsp - np->p_script;
+ script_size = sizeof(struct script);
+ script_base = (u_char *) np->script;
+ script_name = "script";
+ }
+ else if (np->p_scripth < dsp &&
+ dsp <= np->p_scripth + sizeof(struct scripth)) {
+ script_ofs = dsp - np->p_scripth;
+ script_size = sizeof(struct scripth);
+ script_base = (u_char *) np->scripth;
+ script_name = "scripth";
+ } else {
+ script_ofs = dsp;
+ script_size = 0;
+ script_base = 0;
+ script_name = "mem";
+ }
+
+ printf ("%s:%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x) @ (%s %x:%08x).\n",
+ ncr_name (np), (unsigned)INB (nc_ctest0)&0x0f, dstat, sist,
+ (unsigned)INB (nc_socl), (unsigned)INB (nc_sbcl), (unsigned)INB (nc_sbdl),
+ (unsigned)INB (nc_sxfer),(unsigned)INB (nc_scntl3), script_name, script_ofs,
+ (unsigned)INL (nc_dbc));
+
+ if (((script_ofs & 3) == 0) &&
+ (unsigned)script_ofs < script_size) {
+ printf ("%s: script cmd = %08x\n", ncr_name(np),
+ (int) *(ncrcmd *)(script_base + script_ofs));
+ }
+
+ printf ("%s: regdump:", ncr_name(np));
+ for (i=0; i<16;i++)
+ printf (" %02x", (unsigned)INB_OFF(i));
+ printf (".\n");
+}
+
+/*============================================================
+**
+** ncr chip exception handler.
+**
+**============================================================
+**
+** In normal cases, interrupt conditions occur one at a
+** time. The ncr is able to stack in some extra registers
+** other interrupts that will occurs after the first one.
+** But severall interrupts may occur at the same time.
+**
+** We probably should only try to deal with the normal
+** case, but it seems that multiple interrupts occur in
+** some cases that are not abnormal at all.
+**
+** The most frequent interrupt condition is Phase Mismatch.
+** We should want to service this interrupt quickly.
+** A SCSI parity error may be delivered at the same time.
+** The SIR interrupt is not very frequent in this driver,
+** since the INTFLY is likely used for command completion
+** signaling.
+** The Selection Timeout interrupt may be triggered with
+** IID and/or UDC.
+** The SBMC interrupt (SCSI Bus Mode Change) may probably
+** occur at any time.
+**
+** This handler try to deal as cleverly as possible with all
+** the above.
+**
+**============================================================
+*/
+
+void ncr_exception (ncb_p np)
+{
+ u_char istat, dstat;
+ u_short sist;
+ int i;
+
+ /*
+ ** interrupt on the fly ?
+ ** Since the global header may be copied back to a CCB
+ ** using a posted PCI memory write, the last operation on
+ ** the istat register is a READ in order to flush posted
+ ** PCI commands (Btw, the 'do' loop is probably useless).
+ */
+ istat = INB (nc_istat);
+ if (istat & INTF) {
+ do {
+ OUTB (nc_istat, (istat & SIGP) | INTF);
+ istat = INB (nc_istat);
+ } while (istat & INTF);
+ if (DEBUG_FLAGS & DEBUG_TINY) printf ("F ");
+ np->profile.num_fly++;
+ ncr_wakeup (np, 0);
+ };
+
+ if (!(istat & (SIP|DIP)))
+ return;
+
+ np->profile.num_int++;
+
+ if (istat & CABRT)
+ OUTB (nc_istat, CABRT);
+
+ /*
+ ** Steinbach's Guideline for Systems Programming:
+ ** Never test for an error condition you don't know how to handle.
+ */
+
+ sist = (istat & SIP) ? INW (nc_sist) : 0;
+ dstat = (istat & DIP) ? INB (nc_dstat) : 0;
+
+ if (DEBUG_FLAGS & DEBUG_TINY)
+ printf ("<%d|%x:%x|%x:%x>",
+ (int)INB(nc_scr0),
+ dstat,sist,
+ (unsigned)INL(nc_dsp),
+ (unsigned)INL(nc_dbc));
+
+ /*========================================================
+ ** First, interrupts we want to service cleanly.
+ **
+ ** Phase mismatch is the most frequent interrupt, and
+ ** so we have to service it as quickly and as cleanly
+ ** as possible.
+ ** Programmed interrupts are rarely used in this driver,
+ ** but we must handle them cleanly anyway.
+ ** We try to deal with PAR and SBMC combined with
+ ** some other interrupt(s).
+ **=========================================================
+ */
+
+ if (!(sist & (STO|GEN|HTH|SGE|UDC|RST)) &&
+ !(dstat & (MDPE|BF|ABRT|IID))) {
+ if ((sist & SBMC) && ncr_int_sbmc (np))
+ return;
+ if ((sist & PAR) && ncr_int_par (np))
+ return;
+ if (sist & MA) {
+ ncr_int_ma (np);
+ return;
+ }
+ if (dstat & SIR) {
+ ncr_int_sir (np);
+ return;
+ }
+ /*
+ ** DEL 397 - 53C875 Rev 3 - Part Number 609-0392410 - ITEM 2.
+ */
+ if (!(sist & (SBMC|PAR)) && !(dstat & SSI)) {
+ printf( "%s: unknown interrupt(s) ignored, "
+ "ISTAT=%x DSTAT=%x SIST=%x\n",
+ ncr_name(np), istat, dstat, sist);
+ return;
+ }
+
+ OUTONB (nc_dcntl, (STD|NOCOM));
+ return;
+ };
+
+ /*========================================================
+ ** Now, interrupts that need some fixing up.
+ ** Order and multiple interrupts is so less important.
+ **
+ ** If SRST has been asserted, we just reset the chip.
+ **
+ ** Selection is intirely handled by the chip. If the
+ ** chip says STO, we trust it. Seems some other
+ ** interrupts may occur at the same time (UDC, IID), so
+ ** we ignore them. In any case we do enough fix-up
+ ** in the service routine.
+ ** We just exclude some fatal dma errors.
+ **=========================================================
+ */
+
+ if (sist & RST) {
+ ncr_init (np, 1, bootverbose ? "scsi reset" : NULL, HS_RESET);
+ return;
+ };
+
+ if ((sist & STO) &&
+ !(dstat & (MDPE|BF|ABRT))) {
+ /*
+ ** DEL 397 - 53C875 Rev 3 - Part Number 609-0392410 - ITEM 1.
+ */
+ OUTONB (nc_ctest3, CLF);
+
+ ncr_int_sto (np);
+ return;
+ };
+
+ /*=========================================================
+ ** Now, interrupts we are not able to recover cleanly.
+ ** (At least for the moment).
+ **
+ ** Do the register dump.
+ ** Log message for real hard errors.
+ ** Clear all fifos.
+ ** For MDPE, BF, ABORT, IID, SGE and HTH we reset the
+ ** BUS and the chip.
+ ** We are more soft for UDC.
+ **=========================================================
+ */
+ if (jiffies - np->regtime > 10*HZ) {
+ np->regtime = jiffies;
+ for (i = 0; i<sizeof(np->regdump); i++)
+ ((char*)&np->regdump)[i] = INB_OFF(i);
+ np->regdump.nc_dstat = dstat;
+ np->regdump.nc_sist = sist;
+ };
+
+ ncr_log_hard_error(np, sist, dstat);
+
+ printf ("%s: have to clear fifos.\n", ncr_name (np));
+ OUTB (nc_stest3, TE|CSF);
+ OUTONB (nc_ctest3, CLF);
+
+ if ((sist & (SGE)) ||
+ (dstat & (MDPE|BF|ABORT|IID))) {
+ ncr_start_reset(np, driver_setup.settle_delay);
+ return;
+ };
+
+ if (sist & HTH) {
+ printf ("%s: handshake timeout\n", ncr_name(np));
+ ncr_start_reset(np, driver_setup.settle_delay);
+ return;
+ };
+
+ if (sist & UDC) {
+ printf ("%s: unexpected disconnect\n", ncr_name(np));
+ if (INB (nc_scr1) != 0xff) {
+ OUTB (nc_scr1, HS_UNEXPECTED);
+ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, cleanup));
+ };
+ ncr_start_reset(np, driver_setup.settle_delay);
+ return;
+ };
+
+ /*=========================================================
+ ** We just miss the cause of the interrupt. :(
+ ** Print a message. The timeout will do the real work.
+ **=========================================================
+ */
+ printf ("%s: unknown interrupt\n", ncr_name(np));
+}
+
+/*==========================================================
+**
+** ncr chip exception handler for selection timeout
+**
+**==========================================================
+**
+** There seems to be a bug in the 53c810.
+** Although a STO-Interrupt is pending,
+** it continues executing script commands.
+** But it will fail and interrupt (IID) on
+** the next instruction where it's looking
+** for a valid phase.
+**
+**----------------------------------------------------------
+*/
+
+void ncr_int_sto (ncb_p np)
+{
+ u_long dsa, scratcha, diff;
+ ccb_p cp;
+ if (DEBUG_FLAGS & DEBUG_TINY) printf ("T");
+
+ /*
+ ** look for ccb and set the status.
+ */
+
+ dsa = INL (nc_dsa);
+ cp = np->ccb;
+ while (cp && (CCB_PHYS (cp, phys) != dsa))
+ cp = cp->link_ccb;
+
+ if (cp) {
+ cp-> host_status = HS_SEL_TIMEOUT;
+ ncr_complete (np, cp);
+ };
+
+ /*
+ ** repair start queue
+ */
+
+ scratcha = INL (nc_scratcha);
+ diff = scratcha - NCB_SCRIPTH_PHYS (np, tryloop);
+
+/* assert ((diff <= MAX_START * 20) && !(diff % 20));*/
+
+ if ((diff <= MAX_START * 20) && !(diff % 20)) {
+ np->script->startpos[0] = cpu_to_scr(scratcha);
+ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, start));
+ return;
+ };
+ ncr_init (np, 1, "selection timeout", HS_FAIL);
+ np->disc = 1;
+}
+
+/*==========================================================
+**
+** ncr chip exception handler for SCSI bus mode change
+**
+**==========================================================
+**
+** spi2-r12 11.2.3 says a transceiver mode change must
+** generate a reset event and a device that detects a reset
+** event shall initiate a hard reset. It says also that a
+** device that detects a mode change shall set data transfer
+** mode to eight bit asynchronous, etc...
+** So, just resetting should be enough.
+**
+**
+**----------------------------------------------------------
+*/
+
+static int ncr_int_sbmc (ncb_p np)
+{
+ u_char scsi_mode = INB (nc_stest4) & SMODE;
+
+ printf("%s: SCSI bus mode change from %x to %x.\n",
+ ncr_name(np), np->scsi_mode, scsi_mode);
+
+ np->scsi_mode = scsi_mode;
+
+ /*
+ ** Suspend command processing for 1 second and
+ ** reinitialize all except the chip.
+ */
+ np->settle_time = jiffies + HZ;
+ ncr_init (np, 0, bootverbose ? "scsi mode change" : NULL, HS_RESET);
+
+ return 1;
+}
+
+/*==========================================================
+**
+** ncr chip exception handler for SCSI parity error.
+**
+**==========================================================
+**
+** SCSI parity errors are handled by the SCSI script.
+** So, we just print some message.
+**
+**----------------------------------------------------------
+*/
+
+static int ncr_int_par (ncb_p np)
+{
+ printf("%s: SCSI parity error detected\n", ncr_name(np));
+ return 0;
+}
+
+/*==========================================================
+**
+**
+** ncr chip exception handler for phase errors.
+**
+**
+**==========================================================
+**
+** We have to construct a new transfer descriptor,
+** to transfer the rest of the current block.
+**
+**----------------------------------------------------------
+*/
+
+static void ncr_int_ma (ncb_p np)
+{
+ u_int32 dbc;
+ u_int32 rest;
+ u_int32 dsp;
+ u_int32 dsa;
+ u_int32 nxtdsp;
+ u_int32 *vdsp;
+ u_int32 oadr, olen;
+ u_int32 *tblp;
+ ncrcmd *newcmd;
+ u_char cmd, sbcl;
+ ccb_p cp;
+
+ dsp = INL (nc_dsp);
+ dbc = INL (nc_dbc);
+ sbcl = INB (nc_sbcl);
+
+ cmd = dbc >> 24;
+ rest = dbc & 0xffffff;
+
+ /*
+ ** Take into account dma fifo and various buffers and latches,
+ ** only if the interrupted phase is an OUTPUT phase.
+ */
+
+ if ((cmd & 1) == 0) {
+ u_char ctest5, ss0, ss2;
+ u_short delta;
+
+ ctest5 = (np->rv_ctest5 & DFS) ? INB (nc_ctest5) : 0;
+ if (ctest5 & DFS)
+ delta=(((ctest5 << 8) | (INB (nc_dfifo) & 0xff)) - rest) & 0x3ff;
+ else
+ delta=(INB (nc_dfifo) - rest) & 0x7f;
+
+ /*
+ ** The data in the dma fifo has not been transfered to
+ ** the target -> add the amount to the rest
+ ** and clear the data.
+ ** Check the sstat2 register in case of wide transfer.
+ */
+
+ rest += delta;
+ ss0 = INB (nc_sstat0);
+ if (ss0 & OLF) rest++;
+ if (ss0 & ORF) rest++;
+ if (INB(nc_scntl3) & EWS) {
+ ss2 = INB (nc_sstat2);
+ if (ss2 & OLF1) rest++;
+ if (ss2 & ORF1) rest++;
+ };
+
+ OUTONB (nc_ctest3, CLF ); /* clear dma fifo */
+ OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */
+
+ if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE))
+ printf ("P%x%x RL=%d D=%d SS0=%x ", cmd&7, sbcl&7,
+ (unsigned) rest, (unsigned) delta, ss0);
+
+ } else {
+ if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE))
+ printf ("P%x%x RL=%d ", cmd&7, sbcl&7, rest);
+ if ((cmd & 7) != 1) {
+ OUTONB (nc_ctest3, CLF );
+ OUTB (nc_stest3, TE|CSF);
+ }
+ }
+
+ /*
+ ** locate matching cp
+ */
+ dsa = INL (nc_dsa);
+ cp = np->ccb;
+ while (cp && (CCB_PHYS (cp, phys) != dsa))
+ cp = cp->link_ccb;
+
+ if (!cp) {
+ printf ("%s: SCSI phase error fixup: CCB already dequeued (0x%08lx)\n",
+ ncr_name (np), (u_long) np->header.cp);
+ return;
+ }
+ if (cp != np->header.cp) {
+ printf ("%s: SCSI phase error fixup: CCB address mismatch (0x%08lx != 0x%08lx)\n",
+ ncr_name (np), (u_long) cp, (u_long) np->header.cp);
+/* return;*/
+ }
+
+ /*
+ ** find the interrupted script command,
+ ** and the address at which to continue.
+ */
+
+ if (dsp == vtophys (&cp->patch[2])) {
+ vdsp = &cp->patch[0];
+ nxtdsp = vdsp[3];
+ } else if (dsp == vtophys (&cp->patch[6])) {
+ vdsp = &cp->patch[4];
+ nxtdsp = vdsp[3];
+ } else if (dsp > np->p_script && dsp <= np->p_script + sizeof(struct script)) {
+ vdsp = (u_int32 *) ((char*)np->script - np->p_script + dsp -8);
+ nxtdsp = dsp;
+ } else {
+ vdsp = (u_int32 *) ((char*)np->scripth - np->p_scripth + dsp -8);
+ nxtdsp = dsp;
+ };
+
+ /*
+ ** log the information
+ */
+
+ if (DEBUG_FLAGS & DEBUG_PHASE) {
+ printf ("\nCP=%p CP2=%p DSP=%x NXT=%x VDSP=%p CMD=%x ",
+ cp, np->header.cp,
+ (unsigned)dsp,
+ (unsigned)nxtdsp, vdsp, cmd);
+ };
+
+ /*
+ ** get old startaddress and old length.
+ */
+
+ oadr = scr_to_cpu(vdsp[1]);
+
+ if (cmd & 0x10) { /* Table indirect */
+ tblp = (u_int32 *) ((char*) &cp->phys + oadr);
+ olen = scr_to_cpu(tblp[0]);
+ oadr = scr_to_cpu(tblp[1]);
+ } else {
+ tblp = (u_int32 *) 0;
+ olen = scr_to_cpu(vdsp[0]) & 0xffffff;
+ };
+
+ if (DEBUG_FLAGS & DEBUG_PHASE) {
+ printf ("OCMD=%x\nTBLP=%p OLEN=%x OADR=%x\n",
+ (unsigned) (scr_to_cpu(vdsp[0]) >> 24),
+ tblp,
+ (unsigned) olen,
+ (unsigned) oadr);
+ };
+
+ /*
+ ** check cmd against assumed interrupted script command.
+ */
+
+ if (cmd != (scr_to_cpu(vdsp[0]) >> 24)) {
+ PRINT_ADDR(cp->cmd);
+ printf ("internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n",
+ (unsigned)cmd, (unsigned)scr_to_cpu(vdsp[0]) >> 24);
+
+ return;
+ }
+
+#ifdef SCSI_NCR_DEBUG_ERROR_RECOVERY_SUPPORT
+ if ((cmd & 7) == 1 && np->assert_atn) {
+ np->assert_atn = 0;
+ OUTONB(nc_socl, CATN);
+ }
+#endif
+
+ /*
+ ** if old phase not dataphase, leave here.
+ */
+
+ if (cmd & 0x06) {
+ PRINT_ADDR(cp->cmd);
+ printf ("phase change %x-%x %d@%08x resid=%d.\n",
+ cmd&7, sbcl&7, (unsigned)olen,
+ (unsigned)oadr, (unsigned)rest);
+
+ OUTONB (nc_dcntl, (STD|NOCOM));
+ return;
+ };
+
+ /*
+ ** choose the correct patch area.
+ ** if savep points to one, choose the other.
+ */
+
+ newcmd = cp->patch;
+ if (cp->phys.header.savep == cpu_to_scr(vtophys (newcmd))) newcmd+=4;
+
+ /*
+ ** fillin the commands
+ */
+
+ newcmd[0] = cpu_to_scr(((cmd & 0x0f) << 24) | rest);
+ newcmd[1] = cpu_to_scr(oadr + olen - rest);
+ newcmd[2] = cpu_to_scr(SCR_JUMP);
+ newcmd[3] = cpu_to_scr(nxtdsp);
+
+ if (DEBUG_FLAGS & DEBUG_PHASE) {
+ PRINT_ADDR(cp->cmd);
+ printf ("newcmd[%d] %x %x %x %x.\n",
+ (int) (newcmd - cp->patch),
+ (unsigned)scr_to_cpu(newcmd[0]),
+ (unsigned)scr_to_cpu(newcmd[1]),
+ (unsigned)scr_to_cpu(newcmd[2]),
+ (unsigned)scr_to_cpu(newcmd[3]));
+ }
+ /*
+ ** fake the return address (to the patch).
+ ** and restart script processor at dispatcher.
+ */
+ np->profile.num_break++;
+ OUTL (nc_temp, vtophys (newcmd));
+ if ((cmd & 7) == 0)
+ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, dispatch));
+ else
+ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, checkatn));
+}
+
+/*==========================================================
+**
+**
+** ncr chip exception handler for programmed interrupts.
+**
+**
+**==========================================================
+*/
+
+static int ncr_show_msg (u_char * msg)
+{
+ u_char i;
+ printf ("%x",*msg);
+ if (*msg==M_EXTENDED) {
+ for (i=1;i<8;i++) {
+ if (i-1>msg[1]) break;
+ printf ("-%x",msg[i]);
+ };
+ return (i+1);
+ } else if ((*msg & 0xf0) == 0x20) {
+ printf ("-%x",msg[1]);
+ return (2);
+ };
+ return (1);
+}
+
+void ncr_int_sir (ncb_p np)
+{
+ u_char scntl3;
+ u_char chg, ofs, per, fak, wide;
+ u_char num = INB (nc_dsps);
+ ccb_p cp=0;
+ u_long dsa;
+ u_char target = INB (nc_ctest0) & 0x0f;
+ tcb_p tp = &np->target[target];
+ int i;
+ if (DEBUG_FLAGS & DEBUG_TINY) printf ("I#%d", num);
+
+ switch (num) {
+ case SIR_SENSE_RESTART:
+ case SIR_STALL_RESTART:
+ break;
+ case SIR_STALL_QUEUE: /* Ignore, just restart the script */
+ goto out;
+
+ default:
+ /*
+ ** lookup the ccb
+ */
+ dsa = INL (nc_dsa);
+ cp = np->ccb;
+ while (cp && (CCB_PHYS (cp, phys) != dsa))
+ cp = cp->link_ccb;
+
+ assert (cp);
+ if (!cp)
+ goto out;
+ assert (cp == np->header.cp);
+ if (cp != np->header.cp)
+ goto out;
+ }
+
+ switch (num) {
+ u_long endp;
+ case SIR_DATA_IO_IS_OUT:
+ case SIR_DATA_IO_IS_IN:
+/*
+** We did not guess the direction of transfer. We have to wait for
+** actual data direction driven by the target before setting
+** pointers. We must patch the global header too.
+*/
+ if (num == SIR_DATA_IO_IS_OUT) {
+ endp = NCB_SCRIPTH_PHYS (np, data_out) + MAX_SCATTER*16;
+ cp->phys.header.goalp = cpu_to_scr(endp + 8);
+ cp->phys.header.savep =
+ cpu_to_scr(endp - cp->segments*16);
+ } else {
+ endp = NCB_SCRIPT_PHYS (np, data_in) + MAX_SCATTER*16;
+ cp->phys.header.goalp = cpu_to_scr(endp + 8);
+ cp->phys.header.savep =
+ cpu_to_scr(endp - cp->segments*16);
+ }
+
+ cp->phys.header.lastp = cp->phys.header.savep;
+ np->header.savep = cp->phys.header.savep;
+ np->header.goalp = cp->phys.header.goalp;
+ np->header.lastp = cp->phys.header.lastp;
+
+ OUTL (nc_temp, scr_to_cpu(np->header.savep));
+ OUTL (nc_dsp, scr_to_cpu(np->header.savep));
+ return;
+ /* break; */
+
+/*--------------------------------------------------------------------
+**
+** Processing of interrupted getcc selects
+**
+**--------------------------------------------------------------------
+*/
+
+ case SIR_SENSE_RESTART:
+ /*------------------------------------------
+ ** Script processor is idle.
+ ** Look for interrupted "check cond"
+ **------------------------------------------
+ */
+
+ if (DEBUG_FLAGS & DEBUG_RESTART)
+ printf ("%s: int#%d",ncr_name (np),num);
+ cp = (ccb_p) 0;
+ for (i=0; i<MAX_TARGET; i++) {
+ if (DEBUG_FLAGS & DEBUG_RESTART) printf (" t%d", i);
+ tp = &np->target[i];
+ if (DEBUG_FLAGS & DEBUG_RESTART) printf ("+");
+ cp = tp->hold_cp;
+ if (!cp) continue;
+ if (DEBUG_FLAGS & DEBUG_RESTART) printf ("+");
+ if ((cp->host_status==HS_BUSY) &&
+ (cp->scsi_status==S_CHECK_COND))
+ break;
+ if (DEBUG_FLAGS & DEBUG_RESTART) printf ("- (remove)");
+ tp->hold_cp = cp = (ccb_p) 0;
+ };
+
+ if (cp) {
+ if (DEBUG_FLAGS & DEBUG_RESTART)
+ printf ("+ restart job ..\n");
+ OUTL (nc_dsa, CCB_PHYS (cp, phys));
+ OUTL (nc_dsp, NCB_SCRIPTH_PHYS (np, getcc));
+ return;
+ };
+
+ /*
+ ** no job, resume normal processing
+ */
+ if (DEBUG_FLAGS & DEBUG_RESTART) printf (" -- remove trap\n");
+ np->script->start0[0] = cpu_to_scr(SCR_INT ^ IFFALSE (0));
+ break;
+
+ case SIR_SENSE_FAILED:
+ /*-------------------------------------------
+ ** While trying to select for
+ ** getting the condition code,
+ ** a target reselected us.
+ **-------------------------------------------
+ */
+ if (DEBUG_FLAGS & DEBUG_RESTART) {
+ PRINT_ADDR(cp->cmd);
+ printf ("in getcc reselect by t%d.\n",
+ (int)INB(nc_ssid) & 0x0f);
+ }
+
+ /*
+ ** Mark this job
+ */
+ cp->host_status = HS_BUSY;
+ cp->scsi_status = S_CHECK_COND;
+ np->target[cp->cmd->target].hold_cp = cp;
+
+ /*
+ ** And patch code to restart it.
+ */
+ np->script->start0[0] = cpu_to_scr(SCR_INT);
+ break;
+
+/*-----------------------------------------------------------------------------
+**
+** Was Sie schon immer ueber transfermode negotiation wissen wollten ...
+**
+** We try to negotiate sync and wide transfer only after
+** a successfull inquire command. We look at byte 7 of the
+** inquire data to determine the capabilities of the target.
+**
+** When we try to negotiate, we append the negotiation message
+** to the identify and (maybe) simple tag message.
+** The host status field is set to HS_NEGOTIATE to mark this
+** situation.
+**
+** If the target doesn't answer this message immidiately
+** (as required by the standard), the SIR_NEGO_FAIL interrupt
+** will be raised eventually.
+** The handler removes the HS_NEGOTIATE status, and sets the
+** negotiated value to the default (async / nowide).
+**
+** If we receive a matching answer immediately, we check it
+** for validity, and set the values.
+**
+** If we receive a Reject message immediately, we assume the
+** negotiation has failed, and fall back to standard values.
+**
+** If we receive a negotiation message while not in HS_NEGOTIATE
+** state, it's a target initiated negotiation. We prepare a
+** (hopefully) valid answer, set our parameters, and send back
+** this answer to the target.
+**
+** If the target doesn't fetch the answer (no message out phase),
+** we assume the negotiation has failed, and fall back to default
+** settings.
+**
+** When we set the values, we adjust them in all ccbs belonging
+** to this target, in the controller's register, and in the "phys"
+** field of the controller's struct ncb.
+**
+** Possible cases: hs sir msg_in value send goto
+** We try to negotiate:
+** -> target doesnt't msgin NEG FAIL noop defa. - dispatch
+** -> target rejected our msg NEG FAIL reject defa. - dispatch
+** -> target answered (ok) NEG SYNC sdtr set - clrack
+** -> target answered (!ok) NEG SYNC sdtr defa. REJ--->msg_bad
+** -> target answered (ok) NEG WIDE wdtr set - clrack
+** -> target answered (!ok) NEG WIDE wdtr defa. REJ--->msg_bad
+** -> any other msgin NEG FAIL noop defa. - dispatch
+**
+** Target tries to negotiate:
+** -> incoming message --- SYNC sdtr set SDTR -
+** -> incoming message --- WIDE wdtr set WDTR -
+** We sent our answer:
+** -> target doesn't msgout --- PROTO ? defa. - dispatch
+**
+**-----------------------------------------------------------------------------
+*/
+
+ case SIR_NEGO_FAILED:
+ /*-------------------------------------------------------
+ **
+ ** Negotiation failed.
+ ** Target doesn't send an answer message,
+ ** or target rejected our message.
+ **
+ ** Remove negotiation request.
+ **
+ **-------------------------------------------------------
+ */
+ OUTB (HS_PRT, HS_BUSY);
+
+ /* fall through */
+
+ case SIR_NEGO_PROTO:
+ /*-------------------------------------------------------
+ **
+ ** Negotiation failed.
+ ** Target doesn't fetch the answer message.
+ **
+ **-------------------------------------------------------
+ */
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ PRINT_ADDR(cp->cmd);
+ printf ("negotiation failed sir=%x status=%x.\n",
+ num, cp->nego_status);
+ };
+
+ /*
+ ** any error in negotiation:
+ ** fall back to default mode.
+ */
+ switch (cp->nego_status) {
+
+ case NS_SYNC:
+ ncr_setsync (np, cp, 0, 0xe0);
+ break;
+
+ case NS_WIDE:
+ ncr_setwide (np, cp, 0, 0);
+ break;
+
+ };
+ np->msgin [0] = M_NOOP;
+ np->msgout[0] = M_NOOP;
+ cp->nego_status = 0;
+ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, dispatch));
+ break;
+
+ case SIR_NEGO_SYNC:
+ /*
+ ** Synchronous request message received.
+ */
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ PRINT_ADDR(cp->cmd);
+ printf ("sync msgin: ");
+ (void) ncr_show_msg (np->msgin);
+ printf (".\n");
+ };
+
+ /*
+ ** get requested values.
+ */
+
+ chg = 0;
+ per = np->msgin[3];
+ ofs = np->msgin[4];
+ if (ofs==0) per=255;
+
+ /*
+ ** if target sends SDTR message,
+ ** it CAN transfer synch.
+ */
+
+ if (ofs)
+ tp->inqdata[7] |= INQ7_SYNC;
+
+ /*
+ ** check values against driver limits.
+ */
+
+ if (per < np->minsync)
+ {chg = 1; per = np->minsync;}
+ if (per < tp->minsync)
+ {chg = 1; per = tp->minsync;}
+ if (ofs > tp->maxoffs)
+ {chg = 1; ofs = tp->maxoffs;}
+
+ /*
+ ** Check against controller limits.
+ */
+ fak = 7;
+ scntl3 = 0;
+ if (ofs != 0) {
+ ncr_getsync(np, per, &fak, &scntl3);
+ if (fak > 7) {
+ chg = 1;
+ ofs = 0;
+ }
+ }
+ if (ofs == 0) {
+ fak = 7;
+ per = 0;
+ scntl3 = 0;
+ tp->minsync = 0;
+ }
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ PRINT_ADDR(cp->cmd);
+ printf ("sync: per=%d scntl3=0x%x ofs=%d fak=%d chg=%d.\n",
+ per, scntl3, ofs, fak, chg);
+ }
+
+ if (INB (HS_PRT) == HS_NEGOTIATE) {
+ OUTB (HS_PRT, HS_BUSY);
+ switch (cp->nego_status) {
+
+ case NS_SYNC:
+ /*
+ ** This was an answer message
+ */
+ if (chg) {
+ /*
+ ** Answer wasn't acceptable.
+ */
+ ncr_setsync (np, cp, 0, 0xe0);
+ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, msg_bad));
+ } else {
+ /*
+ ** Answer is ok.
+ */
+ ncr_setsync (np, cp, scntl3, (fak<<5)|ofs);
+ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, clrack));
+ };
+ return;
+
+ case NS_WIDE:
+ ncr_setwide (np, cp, 0, 0);
+ break;
+ };
+ };
+
+ /*
+ ** It was a request.
+ ** Check against the table of target capabilities.
+ ** If target not capable force M_REJECT and asynchronous.
+ */
+ if (np->unit < SCSI_NCR_MAX_HOST) {
+ tp->inqdata[7] &=
+ (target_capabilities[np->unit].and_map[target]);
+ if (!(tp->inqdata[7] & INQ7_SYNC)) {
+ ofs = 0;
+ fak = 7;
+ }
+ }
+
+ /*
+ ** It was a request. Set value and
+ ** prepare an answer message
+ */
+
+ ncr_setsync (np, cp, scntl3, (fak<<5)|ofs);
+
+ np->msgout[0] = M_EXTENDED;
+ np->msgout[1] = 3;
+ np->msgout[2] = M_X_SYNC_REQ;
+ np->msgout[3] = per;
+ np->msgout[4] = ofs;
+
+ cp->nego_status = NS_SYNC;
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ PRINT_ADDR(cp->cmd);
+ printf ("sync msgout: ");
+ (void) ncr_show_msg (np->msgout);
+ printf (".\n");
+ }
+
+ if (!ofs) {
+ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, msg_bad));
+ return;
+ }
+ np->msgin [0] = M_NOOP;
+
+ break;
+
+ case SIR_NEGO_WIDE:
+ /*
+ ** Wide request message received.
+ */
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ PRINT_ADDR(cp->cmd);
+ printf ("wide msgin: ");
+ (void) ncr_show_msg (np->msgin);
+ printf (".\n");
+ };
+
+ /*
+ ** get requested values.
+ */
+
+ chg = 0;
+ wide = np->msgin[3];
+
+ /*
+ ** if target sends WDTR message,
+ ** it CAN transfer wide.
+ */
+
+ if (wide)
+ tp->inqdata[7] |= INQ7_WIDE16;
+
+ /*
+ ** check values against driver limits.
+ */
+
+ if (wide > tp->usrwide)
+ {chg = 1; wide = tp->usrwide;}
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ PRINT_ADDR(cp->cmd);
+ printf ("wide: wide=%d chg=%d.\n", wide, chg);
+ }
+
+ if (INB (HS_PRT) == HS_NEGOTIATE) {
+ OUTB (HS_PRT, HS_BUSY);
+ switch (cp->nego_status) {
+
+ case NS_WIDE:
+ /*
+ ** This was an answer message
+ */
+ if (chg) {
+ /*
+ ** Answer wasn't acceptable.
+ */
+ ncr_setwide (np, cp, 0, 1);
+ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, msg_bad));
+ } else {
+ /*
+ ** Answer is ok.
+ */
+ ncr_setwide (np, cp, wide, 1);
+ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, clrack));
+ };
+ return;
+
+ case NS_SYNC:
+ ncr_setsync (np, cp, 0, 0xe0);
+ break;
+ };
+ };
+
+ /*
+ ** It was a request, set value and
+ ** prepare an answer message
+ */
+
+ ncr_setwide (np, cp, wide, 1);
+
+ np->msgout[0] = M_EXTENDED;
+ np->msgout[1] = 2;
+ np->msgout[2] = M_X_WIDE_REQ;
+ np->msgout[3] = wide;
+
+ np->msgin [0] = M_NOOP;
+
+ cp->nego_status = NS_WIDE;
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ PRINT_ADDR(cp->cmd);
+ printf ("wide msgout: ");
+ (void) ncr_show_msg (np->msgin);
+ printf (".\n");
+ }
+ break;
+
+/*--------------------------------------------------------------------
+**
+** Processing of special messages
+**
+**--------------------------------------------------------------------
+*/
+
+ case SIR_REJECT_RECEIVED:
+ /*-----------------------------------------------
+ **
+ ** We received a M_REJECT message.
+ **
+ **-----------------------------------------------
+ */
+
+ PRINT_ADDR(cp->cmd);
+ printf ("M_REJECT received (%x:%x).\n",
+ (unsigned)scr_to_cpu(np->lastmsg), np->msgout[0]);
+ break;
+
+ case SIR_REJECT_SENT:
+ /*-----------------------------------------------
+ **
+ ** We received an unknown message
+ **
+ **-----------------------------------------------
+ */
+
+ PRINT_ADDR(cp->cmd);
+ printf ("M_REJECT sent for ");
+ (void) ncr_show_msg (np->msgin);
+ printf (".\n");
+ break;
+
+/*--------------------------------------------------------------------
+**
+** Processing of special messages
+**
+**--------------------------------------------------------------------
+*/
+
+ case SIR_IGN_RESIDUE:
+ /*-----------------------------------------------
+ **
+ ** We received an IGNORE RESIDUE message,
+ ** which couldn't be handled by the script.
+ **
+ **-----------------------------------------------
+ */
+
+ PRINT_ADDR(cp->cmd);
+ printf ("M_IGN_RESIDUE received, but not yet implemented.\n");
+ break;
+
+ case SIR_MISSING_SAVE:
+ /*-----------------------------------------------
+ **
+ ** We received an DISCONNECT message,
+ ** but the datapointer wasn't saved before.
+ **
+ **-----------------------------------------------
+ */
+
+ PRINT_ADDR(cp->cmd);
+ printf ("M_DISCONNECT received, but datapointer not saved: "
+ "data=%x save=%x goal=%x.\n",
+ (unsigned) INL (nc_temp),
+ (unsigned) scr_to_cpu(np->header.savep),
+ (unsigned) scr_to_cpu(np->header.goalp));
+ break;
+
+#if 0 /* This stuff does not work */
+/*--------------------------------------------------------------------
+**
+** Processing of a "S_QUEUE_FULL" status.
+**
+** The current command has been rejected,
+** because there are too many in the command queue.
+** We have started too many commands for that target.
+**
+** If possible, reinsert at head of queue.
+** Stall queue until there are no disconnected jobs
+** (ncr is REALLY idle). Then restart processing.
+**
+** We should restart the current job after the controller
+** has become idle. But this is not yet implemented.
+**
+**--------------------------------------------------------------------
+*/
+ case SIR_STALL_QUEUE:
+ /*-----------------------------------------------
+ **
+ ** Stall the start queue.
+ **
+ **-----------------------------------------------
+ */
+ PRINT_ADDR(cp->cmd);
+ printf ("queue full.\n");
+
+ np->script->start1[0] = cpu_to_scr(SCR_INT);
+
+ /*
+ ** Try to disable tagged transfers.
+ */
+ ncr_setmaxtags (np, &np->target[target], 0);
+
+ /*
+ ** @QUEUE@
+ **
+ ** Should update the launch field of the
+ ** current job to be able to restart it.
+ ** Then prepend it to the start queue.
+ */
+
+ /* fall through */
+
+ case SIR_STALL_RESTART:
+ /*-----------------------------------------------
+ **
+ ** Enable selecting again,
+ ** if NO disconnected jobs.
+ **
+ **-----------------------------------------------
+ */
+ /*
+ ** Look for a disconnected job.
+ */
+ cp = np->ccb;
+ while (cp && cp->host_status != HS_DISCONNECT)
+ cp = cp->link_ccb;
+
+ /*
+ ** if there is one, ...
+ */
+ if (cp) {
+ /*
+ ** wait for reselection
+ */
+ OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, reselect));
+ return;
+ };
+
+ /*
+ ** else remove the interrupt.
+ */
+
+ printf ("%s: queue empty.\n", ncr_name (np));
+ np->script->start1[0] = cpu_to_scr(SCR_INT ^ IFFALSE (0));
+ break;
+#endif /* This stuff does not work */
+ };
+
+out:
+ OUTONB (nc_dcntl, (STD|NOCOM));
+}
+
+/*==========================================================
+**
+**
+** Aquire a control block
+**
+**
+**==========================================================
+*/
+
+static ccb_p ncr_get_ccb
+ (ncb_p np, u_long target, u_long lun)
+{
+ lcb_p lp;
+ ccb_p cp = (ccb_p) 0;
+
+ /*
+ ** Lun structure available ?
+ */
+
+ lp = np->target[target].lp[lun];
+
+ if (lp && lp->opennings && (!lp->active || lp->active < lp->reqlink)) {
+
+ cp = lp->next_ccb;
+
+ /*
+ ** Look for free CCB
+ */
+
+ while (cp && cp->magic) cp = cp->next_ccb;
+
+ /*
+ ** Increment active commands and decrement credit.
+ */
+
+ if (cp) {
+ ++lp->active;
+ --lp->opennings;
+ }
+ }
+
+ /*
+ ** if nothing available, take the default.
+ ** DANGEROUS, because this ccb is not suitable for
+ ** reselection.
+ ** If lp->actccbs > 0 wait for a suitable ccb to be free.
+ */
+ if ((!cp) && lp && lp->actccbs > 0)
+ return ((ccb_p) 0);
+
+ if (!cp) cp = np->ccb;
+
+ /*
+ ** Wait until available.
+ */
+#if 0
+ while (cp->magic) {
+ if (flags & SCSI_NOSLEEP) break;
+ if (tsleep ((caddr_t)cp, PRIBIO|PCATCH, "ncr", 0))
+ break;
+ };
+#endif
+
+ if (cp->magic)
+ return ((ccb_p) 0);
+
+ cp->magic = 1;
+ return (cp);
+}
+
+/*==========================================================
+**
+**
+** Release one control block
+**
+**
+**==========================================================
+*/
+
+void ncr_free_ccb (ncb_p np, ccb_p cp, u_long target, u_long lun)
+{
+ lcb_p lp;
+
+ /*
+ ** sanity
+ */
+
+ assert (cp != NULL);
+
+ /*
+ ** Decrement active commands and increment credit.
+ */
+
+ lp = np->target[target].lp[lun];
+ if (lp) {
+ --lp->active;
+ ++lp->opennings;
+ }
+
+ cp -> host_status = HS_IDLE;
+ cp -> magic = 0;
+#if 0
+ if (cp == np->ccb)
+ wakeup ((caddr_t) cp);
+#endif
+}
+
+/*==========================================================
+**
+**
+** Allocation of resources for Targets/Luns/Tags.
+**
+**
+**==========================================================
+*/
+
+static void ncr_alloc_ccb (ncb_p np, u_long target, u_long lun)
+{
+ tcb_p tp;
+ lcb_p lp;
+ ccb_p cp;
+
+ assert (np != NULL);
+
+ if (target>=MAX_TARGET) return;
+ if (lun >=MAX_LUN ) return;
+
+ tp=&np->target[target];
+
+ if (!tp->jump_tcb.l_cmd) {
+ /*
+ ** initialize it.
+ */
+ tp->jump_tcb.l_cmd =
+ cpu_to_scr((SCR_JUMP^IFFALSE (DATA (0x80 + target))));
+ tp->jump_tcb.l_paddr = np->jump_tcb.l_paddr;
+
+ tp->getscr[0] = (np->features & FE_PFEN) ?
+ cpu_to_scr(SCR_COPY(1)):cpu_to_scr(SCR_COPY_F(1));
+ tp->getscr[1] = cpu_to_scr(vtophys (&tp->sval));
+ tp->getscr[2] =
+ cpu_to_scr(np->paddr + offsetof (struct ncr_reg, nc_sxfer));
+
+ tp->getscr[3] = (np->features & FE_PFEN) ?
+ cpu_to_scr(SCR_COPY(1)):cpu_to_scr(SCR_COPY_F(1));
+ tp->getscr[4] = cpu_to_scr(vtophys (&tp->wval));
+ tp->getscr[5] =
+ cpu_to_scr(np->paddr + offsetof (struct ncr_reg, nc_scntl3));
+
+ assert (( (offsetof(struct ncr_reg, nc_sxfer) ^
+ offsetof(struct tcb , sval )) &3) == 0);
+ assert (( (offsetof(struct ncr_reg, nc_scntl3) ^
+ offsetof(struct tcb , wval )) &3) == 0);
+
+ tp->call_lun.l_cmd = cpu_to_scr(SCR_CALL);
+ tp->call_lun.l_paddr =
+ cpu_to_scr(NCB_SCRIPT_PHYS (np, resel_lun));
+
+ tp->jump_lcb.l_cmd = cpu_to_scr(SCR_JUMP);
+ tp->jump_lcb.l_paddr = cpu_to_scr(NCB_SCRIPTH_PHYS (np, abort));
+ np->jump_tcb.l_paddr = cpu_to_scr(vtophys (&tp->jump_tcb));
+ }
+
+ /*
+ ** Logic unit control block
+ */
+ lp = tp->lp[lun];
+ if (!lp) {
+ /*
+ ** Allocate a lcb
+ */
+ lp = (lcb_p) m_alloc (sizeof (struct lcb), LCB_ALIGN_SHIFT);
+ if (!lp) return;
+
+ if (DEBUG_FLAGS & DEBUG_ALLOC) {
+ PRINT_LUN(np, target, lun);
+ printf ("new lcb @%p.\n", lp);
+ }
+
+ /*
+ ** Initialize it
+ */
+ bzero (lp, sizeof (*lp));
+ lp->jump_lcb.l_cmd =
+ cpu_to_scr(SCR_JUMP ^ IFFALSE (DATA (lun)));
+ lp->jump_lcb.l_paddr = tp->jump_lcb.l_paddr;
+
+ lp->call_tag.l_cmd = cpu_to_scr(SCR_CALL);
+ lp->call_tag.l_paddr =
+ cpu_to_scr(NCB_SCRIPT_PHYS (np, resel_tag));
+
+ lp->jump_ccb.l_cmd = cpu_to_scr(SCR_JUMP);
+ lp->jump_ccb.l_paddr =
+ cpu_to_scr(NCB_SCRIPTH_PHYS (np, aborttag));
+
+ lp->actlink = 1;
+
+ lp->active = 1;
+
+ /*
+ ** Chain into LUN list
+ */
+ tp->jump_lcb.l_paddr = cpu_to_scr(vtophys (&lp->jump_lcb));
+ tp->lp[lun] = lp;
+
+ ncr_setmaxtags (np, tp, driver_setup.default_tags);
+ }
+
+ /*
+ ** Allocate ccbs up to lp->reqccbs.
+ */
+
+ /*
+ ** Limit possible number of ccbs.
+ **
+ ** If tagged command queueing is enabled,
+ ** can use more than one ccb.
+ */
+ if (np->actccbs >= MAX_START-2) return;
+ if (lp->actccbs && (lp->actccbs >= lp->reqccbs))
+ return;
+
+ /*
+ ** Allocate a ccb
+ */
+ cp = (ccb_p) m_alloc (sizeof (struct ccb), CCB_ALIGN_SHIFT);
+ if (!cp)
+ return;
+
+ if (DEBUG_FLAGS & DEBUG_ALLOC) {
+ PRINT_LUN(np, target, lun);
+ printf ("new ccb @%p.\n", cp);
+ }
+
+ /*
+ ** Count it
+ */
+ lp->actccbs++;
+ np->actccbs++;
+
+ /*
+ ** Initialize it
+ */
+ bzero (cp, sizeof (*cp));
+
+ /*
+ ** Fill in physical addresses
+ */
+
+ cp->p_ccb = vtophys (cp);
+
+ /*
+ ** Chain into reselect list
+ */
+ cp->jump_ccb.l_cmd = cpu_to_scr(SCR_JUMP);
+ cp->jump_ccb.l_paddr = lp->jump_ccb.l_paddr;
+ lp->jump_ccb.l_paddr = cpu_to_scr(CCB_PHYS (cp, jump_ccb));
+ cp->call_tmp.l_cmd = cpu_to_scr(SCR_CALL);
+ cp->call_tmp.l_paddr = cpu_to_scr(NCB_SCRIPT_PHYS (np, resel_tmp));
+
+ /*
+ ** Chain into wakeup list
+ */
+ cp->link_ccb = np->ccb->link_ccb;
+ np->ccb->link_ccb = cp;
+
+ /*
+ ** Chain into CCB list
+ */
+ cp->next_ccb = lp->next_ccb;
+ lp->next_ccb = cp;
+}
+
+/*==========================================================
+**
+**
+** Announce the number of ccbs/tags to the scsi driver.
+**
+**
+**==========================================================
+*/
+
+static void ncr_opennings (ncb_p np, lcb_p lp, Scsi_Cmnd * cmd)
+{
+ /*
+ ** want to reduce the number ...
+ */
+ if (lp->actlink > lp->reqlink) {
+
+ /*
+ ** Try to reduce the count.
+ ** We assume to run at splbio ..
+ */
+ u_char diff = lp->actlink - lp->reqlink;
+
+ if (!diff) return;
+
+ if (diff > lp->opennings)
+ diff = lp->opennings;
+
+ lp->opennings -= diff;
+
+ lp->actlink -= diff;
+ if (DEBUG_FLAGS & DEBUG_TAGS)
+ printf ("%s: actlink: diff=%d, new=%d, req=%d\n",
+ ncr_name(np), diff, lp->actlink, lp->reqlink);
+ return;
+ };
+
+ /*
+ ** want to increase the number ?
+ */
+ if (lp->reqlink > lp->actlink) {
+ u_char diff = lp->reqlink - lp->actlink;
+
+ lp->opennings += diff;
+
+ lp->actlink += diff;
+#if 0
+ wakeup ((caddr_t) xp->sc_link);
+#endif
+ if (DEBUG_FLAGS & DEBUG_TAGS)
+ printf ("%s: actlink: diff=%d, new=%d, req=%d\n",
+ ncr_name(np), diff, lp->actlink, lp->reqlink);
+ };
+}
+
+/*==========================================================
+**
+**
+** Build Scatter Gather Block
+**
+**
+**==========================================================
+**
+** The transfer area may be scattered among
+** several non adjacent physical pages.
+**
+** We may use MAX_SCATTER blocks.
+**
+**----------------------------------------------------------
+*/
+
+/*
+** We try to reduce the number of interrupts caused
+** by unexpected phase changes due to disconnects.
+** A typical harddisk may disconnect before ANY block.
+** If we wanted to avoid unexpected phase changes at all
+** we had to use a break point every 512 bytes.
+** Of course the number of scatter/gather blocks is
+** limited.
+** Under Linux, the scatter/gatter blocks are provided by
+** the generic driver. We just have to copy addresses and
+** sizes to the data segment array.
+*/
+
+static int ncr_scatter(ccb_p cp, Scsi_Cmnd *cmd)
+{
+ struct scr_tblmove *data;
+ int segment = 0;
+ int use_sg = (int) cmd->use_sg;
+
+#if 0
+ bzero (cp->phys.data, sizeof (cp->phys.data));
+#endif
+ data = cp->phys.data;
+ cp->data_len = 0;
+
+ if (!use_sg) {
+ if (cmd->request_bufflen) {
+ data = &data[MAX_SCATTER - 1];
+ data[0].addr = cpu_to_scr(vtophys(cmd->request_buffer));
+ data[0].size = cpu_to_scr(cmd->request_bufflen);
+ cp->data_len = cmd->request_bufflen;
+ segment = 1;
+ }
+ }
+ else if (use_sg <= MAX_SCATTER) {
+ struct scatterlist *scatter = (struct scatterlist *)cmd->buffer;
+
+ data = &data[MAX_SCATTER - use_sg];
+ while (segment < use_sg) {
+ data[segment].addr =
+ cpu_to_scr(vtophys(scatter[segment].address));
+ data[segment].size =
+ cpu_to_scr(scatter[segment].length);
+ cp->data_len += scatter[segment].length;
+ ++segment;
+ }
+ }
+ else {
+ return -1;
+ }
+
+ return segment;
+}
+
+/*==========================================================
+**
+**
+** Test the pci bus snoop logic :-(
+**
+** Has to be called with interrupts disabled.
+**
+**
+**==========================================================
+*/
+
+#ifndef NCR_IOMAPPED
+__initfunc(
+static int ncr_regtest (struct ncb* np)
+)
+{
+ register volatile u_long data;
+ /*
+ ** ncr registers may NOT be cached.
+ ** write 0xffffffff to a read only register area,
+ ** and try to read it back.
+ */
+ data = 0xffffffff;
+ OUTL_OFF(offsetof(struct ncr_reg, nc_dstat), data);
+ data = INL_OFF(offsetof(struct ncr_reg, nc_dstat));
+#if 1
+ if (data == 0xffffffff) {
+#else
+ if ((data & 0xe2f0fffd) != 0x02000080) {
+#endif
+ printf ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n",
+ (unsigned) data);
+ return (0x10);
+ };
+ return (0);
+}
+#endif
+
+__initfunc(
+static int ncr_snooptest (struct ncb* np)
+)
+{
+ u_long ncr_rd, ncr_wr, ncr_bk, host_rd, host_wr, pc, err=0;
+ int i;
+#ifndef NCR_IOMAPPED
+ if (np->reg) {
+ err |= ncr_regtest (np);
+ if (err) return (err);
+ }
+#endif
+ /*
+ ** init
+ */
+ pc = NCB_SCRIPTH_PHYS (np, snooptest);
+ host_wr = 1;
+ ncr_wr = 2;
+ /*
+ ** Set memory and register.
+ */
+ np->ncr_cache = cpu_to_scr(host_wr);
+ OUTL (nc_temp, ncr_wr);
+ /*
+ ** Start script (exchange values)
+ */
+ OUTL (nc_dsp, pc);
+ /*
+ ** Wait 'til done (with timeout)
+ */
+ for (i=0; i<NCR_SNOOP_TIMEOUT; i++)
+ if (INB(nc_istat) & (INTF|SIP|DIP))
+ break;
+ /*
+ ** Save termination position.
+ */
+ pc = INL (nc_dsp);
+ /*
+ ** Read memory and register.
+ */
+ host_rd = scr_to_cpu(np->ncr_cache);
+ ncr_rd = INL (nc_scratcha);
+ ncr_bk = INL (nc_temp);
+ /*
+ ** Reset ncr chip
+ */
+ OUTB (nc_istat, SRST);
+ DELAY (1000);
+ OUTB (nc_istat, 0 );
+ /*
+ ** check for timeout
+ */
+ if (i>=NCR_SNOOP_TIMEOUT) {
+ printf ("CACHE TEST FAILED: timeout.\n");
+ return (0x20);
+ };
+ /*
+ ** Check termination position.
+ */
+ if (pc != NCB_SCRIPTH_PHYS (np, snoopend)+8) {
+ printf ("CACHE TEST FAILED: script execution failed.\n");
+ printf ("start=%08lx, pc=%08lx, end=%08lx\n",
+ (u_long) NCB_SCRIPTH_PHYS (np, snooptest), pc,
+ (u_long) NCB_SCRIPTH_PHYS (np, snoopend) +8);
+ return (0x40);
+ };
+ /*
+ ** Show results.
+ */
+ if (host_wr != ncr_rd) {
+ printf ("CACHE TEST FAILED: host wrote %d, ncr read %d.\n",
+ (int) host_wr, (int) ncr_rd);
+ err |= 1;
+ };
+ if (host_rd != ncr_wr) {
+ printf ("CACHE TEST FAILED: ncr wrote %d, host read %d.\n",
+ (int) ncr_wr, (int) host_rd);
+ err |= 2;
+ };
+ if (ncr_bk != ncr_wr) {
+ printf ("CACHE TEST FAILED: ncr wrote %d, read back %d.\n",
+ (int) ncr_wr, (int) ncr_bk);
+ err |= 4;
+ };
+ return (err);
+}
+
+/*==========================================================
+**
+**
+** Profiling the drivers and targets performance.
+**
+**
+**==========================================================
+*/
+
+#ifdef SCSI_NCR_PROFILE_SUPPORT
+
+/*
+** Compute the difference in jiffies ticks.
+*/
+
+#define ncr_delta(from, to) \
+ ( ((to) && (from))? (to) - (from) : -1 )
+
+#define PROFILE cp->phys.header.stamp
+static void ncb_profile (ncb_p np, ccb_p cp)
+{
+ int co, st, en, di, se, post,work,disc;
+ u_long diff;
+
+ PROFILE.end = jiffies;
+
+ st = ncr_delta (PROFILE.start,PROFILE.status);
+ if (st<0) return; /* status not reached */
+
+ co = ncr_delta (PROFILE.start,PROFILE.command);
+ if (co<0) return; /* command not executed */
+
+ en = ncr_delta (PROFILE.start,PROFILE.end),
+ di = ncr_delta (PROFILE.start,PROFILE.disconnect),
+ se = ncr_delta (PROFILE.start,PROFILE.select);
+ post = en - st;
+
+ /*
+ ** @PROFILE@ Disconnect time invalid if multiple disconnects
+ */
+
+ if (di>=0) disc = se-di; else disc = 0;
+
+ work = (st - co) - disc;
+
+ diff = (np->disc_phys - np->disc_ref) & 0xff;
+ np->disc_ref += diff;
+
+ np->profile.num_trans += 1;
+ if (cp->cmd) {
+ np->profile.num_kbytes += (cp->cmd->request_bufflen >> 10);
+ np->profile.rest_bytes += (cp->cmd->request_bufflen & (0x400-1));
+ if (np->profile.rest_bytes >= 0x400) {
+ ++np->profile.num_kbytes;
+ np->profile.rest_bytes -= 0x400;
+ }
+ }
+ np->profile.num_disc += diff;
+ np->profile.ms_setup += co;
+ np->profile.ms_data += work;
+ np->profile.ms_disc += disc;
+ np->profile.ms_post += post;
+}
+#undef PROFILE
+
+#endif /* SCSI_NCR_PROFILE_SUPPORT */
+
+/*==========================================================
+**
+**
+** Device lookup.
+**
+** @GENSCSI@ should be integrated to scsiconf.c
+**
+**
+**==========================================================
+*/
+
+struct table_entry {
+ char * manufacturer;
+ char * model;
+ char * version;
+ u_long info;
+};
+
+static struct table_entry device_tab[] =
+{
+#ifdef NCR_GETCC_WITHMSG
+ {"", "", "", QUIRK_NOMSG},
+ {"SONY", "SDT-5000", "3.17", QUIRK_NOMSG},
+ {"WangDAT", "Model 2600", "01.7", QUIRK_NOMSG},
+ {"WangDAT", "Model 3200", "02.2", QUIRK_NOMSG},
+ {"WangDAT", "Model 1300", "02.4", QUIRK_NOMSG},
+#endif
+ {"", "", "", 0} /* catch all: must be last entry. */
+};
+
+static u_long ncr_lookup(char * id)
+{
+ struct table_entry * p = device_tab;
+ char *d, *r, c;
+
+ for (;;p++) {
+
+ d = id+8;
+ r = p->manufacturer;
+ while ((c=*r++)) if (c!=*d++) break;
+ if (c) continue;
+
+ d = id+16;
+ r = p->model;
+ while ((c=*r++)) if (c!=*d++) break;
+ if (c) continue;
+
+ d = id+32;
+ r = p->version;
+ while ((c=*r++)) if (c!=*d++) break;
+ if (c) continue;
+
+ return (p->info);
+ }
+}
+
+/*==========================================================
+**
+** Determine the ncr's clock frequency.
+** This is essential for the negotiation
+** of the synchronous transfer rate.
+**
+**==========================================================
+**
+** Note: we have to return the correct value.
+** THERE IS NO SAVE DEFAULT VALUE.
+**
+** Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock.
+** 53C860 and 53C875 rev. 1 support fast20 transfers but
+** do not have a clock doubler and so are provided with a
+** 80 MHz clock. All other fast20 boards incorporate a doubler
+** and so should be delivered with a 40 MHz clock.
+** The future fast40 chips (895/895) use a 40 Mhz base clock
+** and provide a clock quadrupler (160 Mhz). The code below
+** tries to deal as cleverly as possible with all this stuff.
+**
+**----------------------------------------------------------
+*/
+
+/*
+ * Select NCR SCSI clock frequency
+ */
+static void ncr_selectclock(ncb_p np, u_char scntl3)
+{
+ if (np->multiplier < 2) {
+ OUTB(nc_scntl3, scntl3);
+ return;
+ }
+
+ if (bootverbose >= 2)
+ printf ("%s: enabling clock multiplier\n", ncr_name(np));
+
+ OUTB(nc_stest1, DBLEN); /* Enable clock multiplier */
+ if (np->multiplier > 2) { /* Poll bit 5 of stest4 for quadrupler */
+ int i = 20;
+ while (!(INB(nc_stest4) & LCKFRQ) && --i > 0)
+ DELAY(20);
+ if (!i)
+ printf("%s: the chip cannot lock the frequency\n", ncr_name(np));
+ } else /* Wait 20 micro-seconds for doubler */
+ DELAY(20);
+ OUTB(nc_stest3, HSC); /* Halt the scsi clock */
+ OUTB(nc_scntl3, scntl3);
+ OUTB(nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */
+ OUTB(nc_stest3, 0x00|TE); /* Restart scsi clock */
+}
+
+
+/*
+ * calculate NCR SCSI clock frequency (in KHz)
+ */
+__initfunc(
+static unsigned ncrgetfreq (ncb_p np, int gen)
+)
+{
+ unsigned ms = 0;
+
+ /*
+ * Measure GEN timer delay in order
+ * to calculate SCSI clock frequency
+ *
+ * This code will never execute too
+ * many loop iterations (if DELAY is
+ * reasonably correct). It could get
+ * too low a delay (too high a freq.)
+ * if the CPU is slow executing the
+ * loop for some reason (an NMI, for
+ * example). For this reason we will
+ * if multiple measurements are to be
+ * performed trust the higher delay
+ * (lower frequency returned).
+ */
+ OUTB (nc_stest1, 0); /* make sure clock doubler is OFF */
+ OUTW (nc_sien , 0); /* mask all scsi interrupts */
+ (void) INW (nc_sist); /* clear pending scsi interrupt */
+ OUTB (nc_dien , 0); /* mask all dma interrupts */
+ (void) INW (nc_sist); /* another one, just to be sure :) */
+ OUTB (nc_scntl3, 4); /* set pre-scaler to divide by 3 */
+ OUTB (nc_stime1, 0); /* disable general purpose timer */
+ OUTB (nc_stime1, gen); /* set to nominal delay of 1<<gen * 125us */
+ while (!(INW(nc_sist) & GEN) && ms++ < 100000)
+ DELAY(1000); /* count ms */
+ OUTB (nc_stime1, 0); /* disable general purpose timer */
+ /*
+ * set prescaler to divide by whatever 0 means
+ * 0 ought to choose divide by 2, but appears
+ * to set divide by 3.5 mode in my 53c810 ...
+ */
+ OUTB (nc_scntl3, 0);
+
+ if (bootverbose >= 2)
+ printf ("%s: Delay (GEN=%d): %u msec\n", ncr_name(np), gen, ms);
+ /*
+ * adjust for prescaler, and convert into KHz
+ */
+ return ms ? ((1 << gen) * 4340) / ms : 0;
+}
+
+/*
+ * Get/probe NCR SCSI clock frequency
+ */
+__initfunc(
+static void ncr_getclock (ncb_p np, int mult)
+)
+{
+ unsigned char scntl3 = INB(nc_scntl3);
+ unsigned char stest1 = INB(nc_stest1);
+ unsigned f1;
+
+ np->multiplier = 1;
+ f1 = 40000;
+
+ /*
+ ** True with 875 or 895 with clock multiplier selected
+ */
+ if (mult > 1 && (stest1 & (DBLEN+DBLSEL)) == DBLEN+DBLSEL) {
+ if (bootverbose >= 2)
+ printf ("%s: clock multiplier found\n", ncr_name(np));
+ np->multiplier = mult;
+ }
+
+ /*
+ ** If multiplier not found or scntl3 not 7,5,3,
+ ** reset chip and get frequency from general purpose timer.
+ ** Otherwise trust scntl3 BIOS setting.
+ */
+ if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) {
+ unsigned f2;
+
+ OUTB(nc_istat, SRST); DELAY(5); OUTB(nc_istat, 0);
+
+ (void) ncrgetfreq (np, 11); /* throw away first result */
+ f1 = ncrgetfreq (np, 11);
+ f2 = ncrgetfreq (np, 11);
+
+ if (bootverbose)
+ printf ("%s: NCR clock is %uKHz, %uKHz\n", ncr_name(np), f1, f2);
+
+ if (f1 > f2) f1 = f2; /* trust lower result */
+
+ if (f1 < 45000) f1 = 40000;
+ else if (f1 < 55000) f1 = 50000;
+ else f1 = 80000;
+
+ if (f1 < 80000 && mult > 1) {
+ if (bootverbose >= 2)
+ printf ("%s: clock multiplier assumed\n", ncr_name(np));
+ np->multiplier = mult;
+ }
+ } else {
+ if ((scntl3 & 7) == 3) f1 = 40000;
+ else if ((scntl3 & 7) == 5) f1 = 80000;
+ else f1 = 160000;
+
+ f1 /= np->multiplier;
+ }
+
+ /*
+ ** Compute controller synchronous parameters.
+ */
+ f1 *= np->multiplier;
+ np->clock_khz = f1;
+}
+
+/*===================== LINUX ENTRY POINTS SECTION ==========================*/
+
+#ifndef uchar
+#define uchar unsigned char
+#endif
+
+#ifndef ushort
+#define ushort unsigned short
+#endif
+
+#ifndef ulong
+#define ulong unsigned long
+#endif
+
+/* ---------------------------------------------------------------------
+**
+** Driver setup from the boot command line
+**
+** ---------------------------------------------------------------------
+*/
+
+__initfunc(
+void ncr53c8xx_setup(char *str, int *ints)
+)
+{
+#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
+ char *cur = str;
+ char *pc, *pv;
+ int val;
+ int base;
+ int c;
+
+ while (cur != NULL && (pc = strchr(cur, ':')) != NULL) {
+ val = 0;
+ pv = pc;
+ c = *++pv;
+ if (c == 'n')
+ val = 0;
+ else if (c == 'y')
+ val = 1;
+ else {
+ base = 0;
+#if 0
+ if (c == '0') {
+ c = *pv++;
+ base = 8;
+ }
+ if (c == 'x') {
+ ++pv;
+ base = 16;
+ }
+ else if (c >= '0' && c <= '9')
+ base = 10;
+ else
+ break;
+#endif
+ val = (int) simple_strtoul(pv, NULL, base);
+ }
+
+ if (!strncmp(cur, "mpar:", 5))
+ driver_setup.master_parity = val;
+ else if (!strncmp(cur, "spar:", 5))
+ driver_setup.scsi_parity = val;
+ else if (!strncmp(cur, "disc:", 5))
+ driver_setup.disconnection = val;
+ else if (!strncmp(cur, "specf:", 6))
+ driver_setup.special_features = val;
+ else if (!strncmp(cur, "ultra:", 6))
+ driver_setup.ultra_scsi = val;
+ else if (!strncmp(cur, "fsn:", 4))
+ driver_setup.force_sync_nego = val;
+ else if (!strncmp(cur, "revprob:", 8))
+ driver_setup.reverse_probe = val;
+ else if (!strncmp(cur, "tags:", 5)) {
+ if (val > SCSI_NCR_MAX_TAGS)
+ val = SCSI_NCR_MAX_TAGS;
+ driver_setup.default_tags = val;
+ }
+ else if (!strncmp(cur, "sync:", 5))
+ driver_setup.default_sync = val;
+ else if (!strncmp(cur, "verb:", 5))
+ driver_setup.verbose = val;
+ else if (!strncmp(cur, "debug:", 6))
+ driver_setup.debug = val;
+ else if (!strncmp(cur, "burst:", 6))
+ driver_setup.burst_max = val;
+ else if (!strncmp(cur, "led:", 4))
+ driver_setup.led_pin = val;
+ else if (!strncmp(cur, "wide:", 5))
+ driver_setup.max_wide = val? 1:0;
+ else if (!strncmp(cur, "settle:", 7))
+ driver_setup.settle_delay= val;
+ else if (!strncmp(cur, "diff:", 5))
+ driver_setup.diff_support= val;
+ else if (!strncmp(cur, "irqm:", 5))
+ driver_setup.irqm = val;
+ else if (!strncmp(cur, "pcifix:", 7))
+ driver_setup.pci_fix_up = val;
+ else if (!strncmp(cur, "buschk:", 7))
+ driver_setup.bus_check = val;
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ else if (!strncmp(cur, "nvram:", 6))
+ driver_setup.use_nvram = val;
+#endif
+
+ else if (!strncmp(cur, "safe:", 5) && val)
+ memcpy(&driver_setup, &driver_safe_setup, sizeof(driver_setup));
+ else
+ printf("ncr53c8xx_setup: unexpected boot option '%.*s' ignored\n", (int)(pc-cur+1), cur);
+
+#ifdef MODULE
+ if ((cur = strchr(cur, ' ')) != NULL)
+#else
+ if ((cur = strchr(cur, ',')) != NULL)
+#endif
+ ++cur;
+ }
+#endif /* SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT */
+}
+
+static int ncr53c8xx_pci_init(Scsi_Host_Template *tpnt,
+ uchar bus, uchar device_fn, ncr_device *device);
+
+/*
+** Linux entry point for NCR53C8XX devices detection routine.
+**
+** Called by the middle-level scsi drivers at initialization time,
+** or at module installation.
+**
+** Read the PCI configuration and try to attach each
+** detected NCR board.
+**
+** If NVRAM is present, try to attach boards according to
+** the used defined boot order.
+**
+** Returns the number of boards successfully attached.
+*/
+
+__initfunc(
+static void ncr_print_driver_setup(void)
+)
+{
+#define YesNo(y) y ? 'y' : 'n'
+ printk("ncr53c8xx: setup=disc:%c,specf:%d,ultra:%c,tags:%d,sync:%d,burst:%d,wide:%c,diff:%d\n",
+ YesNo(driver_setup.disconnection),
+ driver_setup.special_features,
+ YesNo(driver_setup.ultra_scsi),
+ driver_setup.default_tags,
+ driver_setup.default_sync,
+ driver_setup.burst_max,
+ YesNo(driver_setup.max_wide),
+ driver_setup.diff_support);
+ printk("ncr53c8xx: setup=mpar:%c,spar:%c,fsn=%c,verb:%d,debug:0x%x,led:%c,settle:%d,irqm:%d\n",
+ YesNo(driver_setup.master_parity),
+ YesNo(driver_setup.scsi_parity),
+ YesNo(driver_setup.force_sync_nego),
+ driver_setup.verbose,
+ driver_setup.debug,
+ YesNo(driver_setup.led_pin),
+ driver_setup.settle_delay,
+ driver_setup.irqm);
+#undef YesNo
+}
+
+/*
+** NCR53C8XX devices description table and chip ids list.
+*/
+
+static ncr_chip ncr_chip_table[] __initdata = SCSI_NCR_CHIP_TABLE;
+static ushort ncr_chip_ids[] __initdata = SCSI_NCR_CHIP_IDS;
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+__initfunc(
+static int
+ncr_attach_using_nvram(Scsi_Host_Template *tpnt, int nvram_index, int count, ncr_device device[])
+)
+{
+ int i, j;
+ int attach_count = 0;
+ ncr_nvram *nvram;
+ ncr_device *devp;
+
+ if (!nvram_index)
+ return 0;
+
+ /* find first Symbios NVRAM if there is one as we need to check it for host boot order */
+ for (i = 0, nvram_index = -1; i < count; i++) {
+ devp = &device[i];
+ nvram = devp->nvram;
+ if (!nvram)
+ continue;
+ if (nvram->type == SCSI_NCR_SYMBIOS_NVRAM) {
+ if (nvram_index == -1)
+ nvram_index = i;
+#ifdef SCSI_NCR_DEBUG_NVRAM
+ printf("ncr53c8xx: NVRAM: Symbios format Boot Block, 53c%s, PCI bus %d, device %d, function %d\n",
+ devp->chip.name, devp->slot.bus,
+ (int) (devp->slot.device_fn & 0xf8) >> 3,
+ (int) devp->slot.device_fn & 7);
+ for (j = 0 ; j < 4 ; j++) {
+ Symbios_host *h = &nvram->data.Symbios.host[j];
+ printf("ncr53c8xx: BOOT[%d] device_id=%04x vendor_id=%04x device_fn=%02x io_port=%04x %s\n",
+ j, h->device_id, h->vendor_id,
+ h->device_fn, h->io_port,
+ (h->flags & SYMBIOS_INIT_SCAN_AT_BOOT) ? "SCAN AT BOOT" : "");
+ }
+ }
+ else if (nvram->type == SCSI_NCR_TEKRAM_NVRAM) {
+ /* display Tekram nvram data */
+ printf("ncr53c8xx: NVRAM: Tekram format data, 53c%s, PCI bus %d, device %d, function %d\n",
+ devp->chip.name, devp->slot.bus,
+ (int) (devp->slot.device_fn & 0xf8) >> 3,
+ (int) devp->slot.device_fn & 7);
+#endif
+ }
+ }
+
+ if (nvram_index >= 0 && nvram_index < count)
+ nvram = device[nvram_index].nvram;
+ else
+ nvram = 0;
+
+ if (!nvram)
+ goto out;
+
+ /*
+ ** check devices in the boot record against devices detected.
+ ** attach devices if we find a match. boot table records that
+ ** do not match any detected devices will be ignored.
+ ** devices that do not match any boot table will not be attached
+ ** here but will attempt to be attached during the device table
+ ** rescan.
+ */
+ for (i = 0; i < 4; i++) {
+ Symbios_host *h = &nvram->data.Symbios.host[i];
+ for (j = 0 ; j < count ; j++) {
+ devp = &device[j];
+ if (h->device_fn == devp->slot.device_fn &&
+#if 0 /* bus number location in nvram ? */
+ h->bus == devp->slot.bus &&
+#endif
+ h->device_id == devp->chip.device_id)
+ break;
+ }
+ if (j < count && !devp->attach_done) {
+ if (!ncr_attach (tpnt, attach_count, devp))
+ attach_count++;
+ devp->attach_done = 1;
+ }
+ }
+
+out:
+ return attach_count;
+}
+#endif /* SCSI_NCR_NVRAM_SUPPORT */
+
+__initfunc(
+int ncr53c8xx_detect(Scsi_Host_Template *tpnt)
+)
+{
+ int i, j;
+ int chips;
+ int count = 0;
+ uchar bus, device_fn;
+ short index;
+ int attach_count = 0;
+ ncr_device device[8];
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ ncr_nvram nvram[4];
+ int k, nvrams;
+#endif
+ int hosts;
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ int nvram_index = 0;
+#endif
+ if (initverbose >= 2)
+ ncr_print_driver_setup();
+
+#ifdef SCSI_NCR_DEBUG_INFO_SUPPORT
+ ncr_debug = driver_setup.debug;
+#endif
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,0)
+ tpnt->proc_dir = &proc_scsi_ncr53c8xx;
+# ifdef SCSI_NCR_PROC_INFO_SUPPORT
+ tpnt->proc_info = ncr53c8xx_proc_info;
+# endif
+#endif
+
+#if defined(SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT) && defined(MODULE)
+if (ncr53c8xx)
+ ncr53c8xx_setup(ncr53c8xx, (int *) 0);
+#endif
+
+ /*
+ ** Detect all 53c8xx hosts and then attach them.
+ **
+ ** If we are using NVRAM, once all hosts are detected, we need to check
+ ** any NVRAM for boot order in case detect and boot order differ and
+ ** attach them using the order in the NVRAM.
+ **
+ ** If no NVRAM is found or data appears invalid attach boards in the
+ ** the order they are detected.
+ */
+
+ if (!pcibios_present())
+ return 0;
+
+ chips = sizeof(ncr_chip_ids) / sizeof(ncr_chip_ids[0]);
+ hosts = sizeof(device) / sizeof(device[0]);
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ k = 0;
+ if (driver_setup.use_nvram & 0x1)
+ nvrams = sizeof(nvram) / sizeof(nvram[0]);
+ else
+ nvrams = 0;
+#endif
+
+ for (j = 0; j < chips ; ++j) {
+ i = driver_setup.reverse_probe ? chips-1 - j : j;
+ for (index = 0; ; index++) {
+ char *msg = "";
+ if ((pcibios_find_device(PCI_VENDOR_ID_NCR, ncr_chip_ids[i],
+ index, &bus, &device_fn)) ||
+ (count == hosts))
+ break;
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ device[count].nvram = k < nvrams ? &nvram[k] : 0;
+#else
+ device[count].nvram = 0;
+#endif
+ if (ncr53c8xx_pci_init(tpnt, bus, device_fn, &device[count])) {
+ device[count].nvram = 0;
+ continue;
+ }
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ if (device[count].nvram) {
+ ++k;
+ nvram_index |= device[count].nvram->type;
+ switch (device[count].nvram->type) {
+ case SCSI_NCR_TEKRAM_NVRAM:
+ msg = "with Tekram NVRAM";
+ break;
+ case SCSI_NCR_SYMBIOS_NVRAM:
+ msg = "with Symbios NVRAM";
+ break;
+ default:
+ msg = "";
+ device[count].nvram = 0;
+ --k;
+ }
+ }
+#endif
+ printf(KERN_INFO "ncr53c8xx: 53c%s detected %s\n",
+ device[count].chip.name, msg);
+ ++count;
+ }
+ }
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ attach_count = ncr_attach_using_nvram(tpnt, nvram_index, count, device);
+#endif
+ /*
+ ** rescan device list to make sure all boards attached.
+ ** devices without boot records will not be attached yet
+ ** so try to attach them here.
+ */
+ for (i= 0; i < count; i++) {
+ if (!device[i].attach_done &&
+ !ncr_attach (tpnt, attach_count, &device[i])) {
+ attach_count++;
+ }
+ }
+
+ return attach_count;
+}
+
+/*
+** Read and check the PCI configuration for any detected NCR
+** boards and save data for attaching after all boards have
+** been detected.
+*/
+
+__initfunc(
+static int ncr53c8xx_pci_init(Scsi_Host_Template *tpnt,
+ uchar bus, uchar device_fn, ncr_device *device)
+)
+{
+ ushort vendor_id, device_id, command;
+ uchar cache_line_size, latency_timer;
+ uchar irq, revision;
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,0)
+ uint base, base_2, io_port;
+#else
+ ulong base, base_2;
+#endif
+ int i;
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ ncr_nvram *nvram = device->nvram;
+#endif
+ ncr_chip *chip;
+
+ printk(KERN_INFO "ncr53c8xx: at PCI bus %d, device %d, function %d\n",
+ bus, (int) (device_fn & 0xf8) >> 3, (int) device_fn & 7);
+ /*
+ * Read info from the PCI config space.
+ * pcibios_read_config_xxx() functions are assumed to be used for
+ * successfully detected PCI devices.
+ * Expecting error conditions from them is just paranoia,
+ * thus void cast.
+ */
+ (void) pcibios_read_config_word(bus, device_fn,
+ PCI_VENDOR_ID, &vendor_id);
+ (void) pcibios_read_config_word(bus, device_fn,
+ PCI_DEVICE_ID, &device_id);
+ (void) pcibios_read_config_word(bus, device_fn,
+ PCI_COMMAND, &command);
+ (void) pcibios_read_config_dword(bus, device_fn,
+ PCI_BASE_ADDRESS_0, &io_port);
+ (void) pcibios_read_config_dword(bus, device_fn,
+ PCI_BASE_ADDRESS_1, &base);
+ (void) pcibios_read_config_dword(bus, device_fn,
+ PCI_BASE_ADDRESS_2, &base_2);
+ (void) pcibios_read_config_byte(bus, device_fn,
+ PCI_CLASS_REVISION,&revision);
+ (void) pcibios_read_config_byte(bus, device_fn,
+ PCI_INTERRUPT_LINE, &irq);
+ (void) pcibios_read_config_byte(bus, device_fn,
+ PCI_CACHE_LINE_SIZE, &cache_line_size);
+ (void) pcibios_read_config_byte(bus, device_fn,
+ PCI_LATENCY_TIMER, &latency_timer);
+
+ /*
+ * Check if the chip is supported
+ */
+ chip = 0;
+ for (i = 0; i < sizeof(ncr_chip_table)/sizeof(ncr_chip_table[0]); i++) {
+ if (device_id != ncr_chip_table[i].device_id)
+ continue;
+ if (revision > ncr_chip_table[i].revision_id)
+ continue;
+ chip = &device->chip;
+ memcpy(chip, &ncr_chip_table[i], sizeof(*chip));
+ chip->revision_id = revision;
+ break;
+ }
+ if (!chip) {
+ printk("ncr53c8xx: not initializing, device not supported\n");
+ return -1;
+ }
+
+#ifdef __powerpc__
+ /*
+ * Severall fix-up for power/pc.
+ * Should not be performed by the driver.
+ */
+ if ((command &
+ (PCI_COMMAND_MASTER|PCI_COMMAND_IO|PCI_COMMAND_MEMORY)) !=
+ (PCI_COMMAND_MASTER|PCI_COMMAND_IO|PCI_COMMAND_MEMORY)) {
+ printk("ncr53c8xx : setting PCI master/io/command bit\n");
+ command |= PCI_COMMAND_MASTER|PCI_COMMAND_IO|PCI_COMMAND_MEMORY;
+ pcibios_write_config_word(bus, device_fn, PCI_COMMAND, command);
+ }
+ if (io_port >= 0x10000000) {
+ io_port = (io_port & 0x00FFFFFF) | 0x01000000;
+ pcibios_write_config_dword(bus, device_fn, PCI_BASE_ADDRESS_0, io_port);
+ }
+ if (base >= 0x10000000) {
+ base = (base & 0x00FFFFFF) | 0x01000000;
+ pcibios_write_config_dword(bus, device_fn, PCI_BASE_ADDRESS_1, base);
+ }
+#endif
+
+ /*
+ * Check availability of IO space, memory space and master capability.
+ */
+ if (command & PCI_COMMAND_IO) {
+ if ((io_port & 3) != 1) {
+ printk("ncr53c8xx: disabling I/O mapping since base address 0 (0x%x)\n"
+ " bits 0..1 indicate a non-IO mapping\n", (int) io_port);
+ io_port = 0;
+ }
+ else
+ io_port &= PCI_BASE_ADDRESS_IO_MASK;
+ }
+ else
+ io_port = 0;
+
+ if (command & PCI_COMMAND_MEMORY) {
+ if ((base & PCI_BASE_ADDRESS_SPACE) != PCI_BASE_ADDRESS_SPACE_MEMORY) {
+ printk("ncr53c8xx: disabling memory mapping since base address 1\n"
+ " contains a non-memory mapping\n");
+ base = 0;
+ }
+ else
+ base &= PCI_BASE_ADDRESS_MEM_MASK;
+ }
+ else
+ base = 0;
+
+ if (!io_port && !base) {
+ printk("ncr53c8xx: not initializing, both I/O and memory mappings disabled\n");
+ return -1;
+ }
+
+ base_2 &= PCI_BASE_ADDRESS_MEM_MASK;
+
+ if (io_port && check_region (io_port, 128)) {
+ printk("ncr53c8xx: IO region 0x%x to 0x%x is in use\n",
+ (int) io_port, (int) (io_port + 127));
+ return -1;
+ }
+
+ if (!(command & PCI_COMMAND_MASTER)) {
+ printk("ncr53c8xx: not initializing, BUS MASTERING was disabled\n");
+ return -1;
+ }
+
+ /*
+ * Fix some features according to driver setup.
+ */
+ if (!(driver_setup.special_features & 1))
+ chip->features &= ~FE_SPECIAL_SET;
+ else {
+ if (driver_setup.special_features & 2)
+ chip->features &= ~FE_WRIE;
+ }
+ if (driver_setup.ultra_scsi < 2 && (chip->features & FE_ULTRA2)) {
+ chip->features |= FE_ULTRA;
+ chip->features &= ~FE_ULTRA2;
+ }
+ if (driver_setup.ultra_scsi < 1)
+ chip->features &= ~FE_ULTRA;
+ if (!driver_setup.max_wide)
+ chip->features &= ~FE_WIDE;
+
+
+#ifdef SCSI_NCR_PCI_FIX_UP_SUPPORT
+
+ /*
+ * Try to fix up PCI config according to wished features.
+ */
+#if defined(__i386) && !defined(MODULE)
+ if ((driver_setup.pci_fix_up & 1) &&
+ (chip->features & FE_CLSE) && cache_line_size == 0) {
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,1,75)
+ extern char x86;
+ switch(x86) {
+#else
+ switch(boot_cpu_data.x86) {
+#endif
+ case 4: cache_line_size = 4; break;
+ case 5: cache_line_size = 8; break;
+ }
+ if (cache_line_size)
+ (void) pcibios_write_config_byte(bus, device_fn,
+ PCI_CACHE_LINE_SIZE, cache_line_size);
+ if (initverbose)
+ printk("ncr53c8xx: setting PCI_CACHE_LINE_SIZE to %d (fix-up).\n", cache_line_size);
+ }
+
+ if ((driver_setup.pci_fix_up & 2) && cache_line_size &&
+ (chip->features & FE_WRIE) && !(command & PCI_COMMAND_INVALIDATE)) {
+ command |= PCI_COMMAND_INVALIDATE;
+ (void) pcibios_write_config_word(bus, device_fn,
+ PCI_COMMAND, command);
+ if (initverbose)
+ printk("ncr53c8xx: setting PCI_COMMAND_INVALIDATE bit (fix-up).\n");
+ }
+#endif
+ /*
+ * Fix up for old chips that support READ LINE but not CACHE LINE SIZE.
+ * - If CACHE LINE SIZE is unknown, set burst max to 32 bytes = 8 dwords
+ * and donnot enable READ LINE.
+ * - Otherwise set it to the CACHE LINE SIZE (power of 2 assumed).
+ */
+
+ if (!(chip->features & FE_CLSE)) {
+ int burst_max = chip->burst_max;
+ if (cache_line_size == 0) {
+ chip->features &= ~FE_ERL;
+ if (burst_max > 3)
+ burst_max = 3;
+ }
+ else {
+ while (cache_line_size < (1 << burst_max))
+ --burst_max;
+ }
+ chip->burst_max = burst_max;
+ }
+
+ /*
+ * Tune PCI LATENCY TIMER according to burst max length transfer.
+ * (latency timer >= burst length + 6, we add 10 to be quite sure)
+ * If current value is zero, the device has probably been configured
+ * for no bursting due to some broken hardware.
+ */
+
+ if (latency_timer == 0 && chip->burst_max)
+ printk("ncr53c8xx: PCI_LATENCY_TIMER=0, bursting should'nt be allowed.\n");
+
+ if ((driver_setup.pci_fix_up & 4) && chip->burst_max) {
+ uchar lt = (1 << chip->burst_max) + 6 + 10;
+ if (latency_timer < lt) {
+ latency_timer = lt;
+ if (initverbose)
+ printk("ncr53c8xx: setting PCI_LATENCY_TIMER to %d bus clocks (fix-up).\n", latency_timer);
+ (void) pcibios_write_config_byte(bus, device_fn,
+ PCI_LATENCY_TIMER, latency_timer);
+ }
+ }
+
+ /*
+ * Fix up for recent chips that support CACHE LINE SIZE.
+ * If PCI config space is not OK, remove features that shall not be
+ * used by the chip. No need to trigger possible chip bugs.
+ */
+
+ if ((chip->features & FE_CLSE) && cache_line_size == 0) {
+ chip->features &= ~FE_CACHE_SET;
+ printk("ncr53c8xx: PCI_CACHE_LINE_SIZE not set, features based on CACHE LINE SIZE not used.\n");
+ }
+
+ if ((chip->features & FE_WRIE) && !(command & PCI_COMMAND_INVALIDATE)) {
+ chip->features &= ~FE_WRIE;
+ printk("ncr53c8xx: PCI_COMMAND_INVALIDATE not set, WRITE AND INVALIDATE not used\n");
+ }
+
+#endif /* SCSI_NCR_PCI_FIX_UP_SUPPORT */
+
+ /* initialise ncr_device structure with items required by ncr_attach */
+ device->slot.bus = bus;
+ device->slot.device_fn = device_fn;
+ device->slot.base = base;
+ device->slot.base_2 = base_2;
+ device->slot.io_port = io_port;
+ device->slot.irq = irq;
+ device->attach_done = 0;
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ if (!nvram)
+ goto out;
+
+ /*
+ ** Get access to chip IO registers
+ */
+#ifdef NCR_IOMAPPED
+ request_region(io_port, 128, "ncr53c8xx");
+ device->slot.port = io_port;
+#else
+ device->slot.reg = (struct ncr_reg *) remap_pci_mem((ulong) base, 128);
+ if (!device->slot.reg)
+ goto out;
+#endif
+
+ /*
+ ** Try to read SYMBIOS nvram.
+ ** Data can be used to order booting of boards.
+ **
+ ** Data is saved in ncr_device structure if NVRAM found. This
+ ** is then used to find drive boot order for ncr_attach().
+ **
+ ** NVRAM data is passed to Scsi_Host_Template later during ncr_attach()
+ ** for any device set up.
+ **
+ ** Try to read TEKRAM nvram if Symbios nvram not found.
+ */
+
+ if (!ncr_get_Symbios_nvram(&device->slot, &nvram->data.Symbios))
+ nvram->type = SCSI_NCR_SYMBIOS_NVRAM;
+ else if (!ncr_get_Tekram_nvram(&device->slot, &nvram->data.Tekram))
+ nvram->type = SCSI_NCR_TEKRAM_NVRAM;
+ else
+ nvram->type = 0;
+out:
+ /*
+ ** Release access to chip IO registers
+ */
+#ifdef NCR_IOMAPPED
+ release_region(device->slot.port, 128);
+#else
+ unmap_pci_mem((vm_offset_t) device->slot.reg, (u_long) 128);
+#endif
+
+#endif /* SCSI_NCR_NVRAM_SUPPORT */
+ return 0;
+}
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,0,0)
+/*
+** Linux select queue depths function
+*/
+static void ncr53c8xx_select_queue_depths(struct Scsi_Host *host, struct scsi_device *devlist)
+{
+ struct scsi_device *device;
+
+ for (device = devlist; device; device = device->next) {
+ if (device->host == host) {
+#if SCSI_NCR_MAX_TAGS > 1
+ if (device->tagged_supported) {
+ device->queue_depth = SCSI_NCR_MAX_TAGS;
+ }
+ else {
+ device->queue_depth = 2;
+ }
+#else
+ device->queue_depth = 1;
+#endif
+
+#ifdef DEBUG_NCR53C8XX
+printk("ncr53c8xx_select_queue_depth: id=%d, lun=%d, queue_depth=%d\n",
+ device->id, device->lun, device->queue_depth);
+#endif
+ }
+ }
+}
+#endif
+
+/*
+** Linux entry point of queuecommand() function
+*/
+
+int ncr53c8xx_queue_command (Scsi_Cmnd *cmd, void (* done)(Scsi_Cmnd *))
+{
+ int sts;
+#ifdef DEBUG_NCR53C8XX
+printk("ncr53c8xx_queue_command\n");
+#endif
+
+ if ((sts = ncr_queue_command(cmd, done)) != DID_OK) {
+ cmd->result = ScsiResult(sts, 0);
+ done(cmd);
+#ifdef DEBUG_NCR53C8XX
+printk("ncr53c8xx : command not queued - result=%d\n", sts);
+#endif
+ return sts;
+ }
+#ifdef DEBUG_NCR53C8XX
+printk("ncr53c8xx : command successfully queued\n");
+#endif
+ return sts;
+}
+
+/*
+** Linux entry point of the interrupt handler.
+** Fort linux versions > 1.3.70, we trust the kernel for
+** passing the internal host descriptor as 'dev_id'.
+** Otherwise, we scan the host list and call the interrupt
+** routine for each host that uses this IRQ.
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,70)
+static void ncr53c8xx_intr(int irq, void *dev_id, struct pt_regs * regs)
+{
+#ifdef DEBUG_NCR53C8XX
+ printk("ncr53c8xx : interrupt received\n");
+#endif
+
+ if (DEBUG_FLAGS & DEBUG_TINY) printf ("[");
+ ncr_exception((ncb_p) dev_id);
+ if (DEBUG_FLAGS & DEBUG_TINY) printf ("]\n");
+}
+
+#else
+static void ncr53c8xx_intr(int irq, struct pt_regs * regs)
+{
+ struct Scsi_Host *host;
+ struct host_data *host_data;
+
+ for (host = first_host; host; host = host->next) {
+ if (host->hostt == the_template && host->irq == irq) {
+ host_data = (struct host_data *) host->hostdata;
+ if (DEBUG_FLAGS & DEBUG_TINY) printf ("[");
+ ncr_exception(host_data->ncb);
+ if (DEBUG_FLAGS & DEBUG_TINY) printf ("]\n");
+ }
+ }
+}
+#endif
+
+/*
+** Linux entry point of the timer handler
+*/
+
+static void ncr53c8xx_timeout(unsigned long np)
+{
+ ncr_timeout((ncb_p) np);
+}
+
+/*
+** Linux entry point of reset() function
+*/
+
+#if defined SCSI_RESET_SYNCHRONOUS && defined SCSI_RESET_ASYNCHRONOUS
+
+int ncr53c8xx_reset(Scsi_Cmnd *cmd, unsigned int reset_flags)
+{
+ int sts;
+ unsigned long flags;
+
+ printk("ncr53c8xx_reset: pid=%lu reset_flags=%x serial_number=%ld serial_number_at_timeout=%ld\n",
+ cmd->pid, reset_flags, cmd->serial_number, cmd->serial_number_at_timeout);
+
+ save_flags(flags); cli();
+
+ /*
+ * We have to just ignore reset requests in some situations.
+ */
+#if defined SCSI_RESET_NOT_RUNNING
+ if (cmd->serial_number != cmd->serial_number_at_timeout) {
+ sts = SCSI_RESET_NOT_RUNNING;
+ goto out;
+ }
+#endif
+ /*
+ * If the mid-level driver told us reset is synchronous, it seems
+ * that we must call the done() callback for the involved command,
+ * even if this command was not queued to the low-level driver,
+ * before returning SCSI_RESET_SUCCESS.
+ */
+
+ sts = ncr_reset_bus(cmd,
+ (reset_flags & (SCSI_RESET_SYNCHRONOUS | SCSI_RESET_ASYNCHRONOUS)) == SCSI_RESET_SYNCHRONOUS);
+ /*
+ * Since we always reset the controller, when we return success,
+ * we add this information to the return code.
+ */
+#if defined SCSI_RESET_HOST_RESET
+ if (sts == SCSI_RESET_SUCCESS)
+ sts |= SCSI_RESET_HOST_RESET;
+#endif
+
+out:
+ restore_flags(flags);
+ return sts;
+}
+#else
+int ncr53c8xx_reset(Scsi_Cmnd *cmd)
+{
+ printk("ncr53c8xx_reset: command pid %lu\n", cmd->pid);
+ return ncr_reset_bus(cmd, 1);
+}
+#endif
+
+/*
+** Linux entry point of abort() function
+*/
+
+#if defined SCSI_RESET_SYNCHRONOUS && defined SCSI_RESET_ASYNCHRONOUS
+
+int ncr53c8xx_abort(Scsi_Cmnd *cmd)
+{
+ int sts;
+ unsigned long flags;
+
+ printk("ncr53c8xx_abort: pid=%lu serial_number=%ld serial_number_at_timeout=%ld\n",
+ cmd->pid, cmd->serial_number, cmd->serial_number_at_timeout);
+
+ save_flags(flags); cli();
+
+ /*
+ * We have to just ignore abort requests in some situations.
+ */
+ if (cmd->serial_number != cmd->serial_number_at_timeout) {
+ sts = SCSI_ABORT_NOT_RUNNING;
+ goto out;
+ }
+
+ sts = ncr_abort_command(cmd);
+out:
+ restore_flags(flags);
+ return sts;
+}
+#else
+int ncr53c8xx_abort(Scsi_Cmnd *cmd)
+{
+ printk("ncr53c8xx_abort: command pid %lu\n", cmd->pid);
+ return ncr_abort_command(cmd);
+}
+#endif
+
+#ifdef MODULE
+int ncr53c8xx_release(struct Scsi_Host *host)
+{
+#ifdef DEBUG_NCR53C8XX
+printk("ncr53c8xx : release\n");
+#endif
+ ncr_detach(((struct host_data *) host->hostdata)->ncb);
+
+ return 1;
+}
+#endif
+
+
+/*
+** Scsi command waiting list management.
+**
+** It may happen that we cannot insert a scsi command into the start queue,
+** in the following circumstances.
+** Too few preallocated ccb(s),
+** maxtags < cmd_per_lun of the Linux host control block,
+** etc...
+** Such scsi commands are inserted into a waiting list.
+** When a scsi command complete, we try to requeue the commands of the
+** waiting list.
+*/
+
+#define next_wcmd host_scribble
+
+static void insert_into_waiting_list(ncb_p np, Scsi_Cmnd *cmd)
+{
+ Scsi_Cmnd *wcmd;
+
+#ifdef DEBUG_WAITING_LIST
+ printf("%s: cmd %lx inserted into waiting list\n", ncr_name(np), (u_long) cmd);
+#endif
+ cmd->next_wcmd = 0;
+ if (!(wcmd = np->waiting_list)) np->waiting_list = cmd;
+ else {
+ while ((wcmd->next_wcmd) != 0)
+ wcmd = (Scsi_Cmnd *) wcmd->next_wcmd;
+ wcmd->next_wcmd = (char *) cmd;
+ }
+}
+
+static Scsi_Cmnd *retrieve_from_waiting_list(int to_remove, ncb_p np, Scsi_Cmnd *cmd)
+{
+ Scsi_Cmnd *wcmd;
+
+ if (!(wcmd = np->waiting_list)) return 0;
+ while (wcmd->next_wcmd) {
+ if (cmd == (Scsi_Cmnd *) wcmd->next_wcmd) {
+ if (to_remove) {
+ wcmd->next_wcmd = cmd->next_wcmd;
+ cmd->next_wcmd = 0;
+ }
+#ifdef DEBUG_WAITING_LIST
+ printf("%s: cmd %lx retrieved from waiting list\n", ncr_name(np), (u_long) cmd);
+#endif
+ return cmd;
+ }
+ }
+ return 0;
+}
+
+static void process_waiting_list(ncb_p np, int sts)
+{
+ Scsi_Cmnd *waiting_list, *wcmd;
+
+ waiting_list = np->waiting_list;
+ np->waiting_list = 0;
+
+#ifdef DEBUG_WAITING_LIST
+ if (waiting_list) printf("%s: waiting_list=%lx processing sts=%d\n", ncr_name(np), (u_long) waiting_list, sts);
+#endif
+ while ((wcmd = waiting_list) != 0) {
+ waiting_list = (Scsi_Cmnd *) wcmd->next_wcmd;
+ wcmd->next_wcmd = 0;
+ if (sts == DID_OK) {
+#ifdef DEBUG_WAITING_LIST
+ printf("%s: cmd %lx trying to requeue\n", ncr_name(np), (u_long) wcmd);
+#endif
+ sts = ncr_queue_command(wcmd, wcmd->scsi_done);
+ }
+ if (sts != DID_OK) {
+#ifdef DEBUG_WAITING_LIST
+ printf("%s: cmd %lx done forced sts=%d\n", ncr_name(np), (u_long) wcmd, sts);
+#endif
+ wcmd->result = ScsiResult(sts, 0);
+ wcmd->scsi_done(wcmd);
+ }
+ }
+}
+
+#undef next_wcmd
+
+/*
+** Returns data transfer direction for common op-codes.
+*/
+
+static int guess_xfer_direction(int opcode)
+{
+ int d;
+
+ switch(opcode) {
+ case 0x12: /* INQUIRY 12 */
+ case 0x4D: /* LOG SENSE 4D */
+ case 0x5A: /* MODE SENSE(10) 5A */
+ case 0x1A: /* MODE SENSE(6) 1A */
+ case 0x3C: /* READ BUFFER 3C */
+ case 0x1C: /* RECEIVE DIAGNOSTIC RESULTS 1C */
+ case 0x03: /* REQUEST SENSE 03 */
+ d = XferIn;
+ break;
+ case 0x39: /* COMPARE 39 */
+ case 0x3A: /* COPY AND VERIFY 3A */
+ case 0x18: /* COPY 18 */
+ case 0x4C: /* LOG SELECT 4C */
+ case 0x55: /* MODE SELECT(10) 55 */
+ case 0x3B: /* WRITE BUFFER 3B */
+ case 0x1D: /* SEND DIAGNOSTIC 1D */
+ case 0x40: /* CHANGE DEFINITION 40 */
+ case 0x15: /* MODE SELECT(6) 15 */
+ d = XferOut;
+ break;
+ case 0x00: /* TEST UNIT READY 00 */
+ d = XferNone;
+ break;
+ default:
+ d = XferBoth;
+ break;
+ }
+
+ return d;
+}
+
+
+#ifdef SCSI_NCR_PROC_INFO_SUPPORT
+
+/*=========================================================================
+** Proc file system stuff
+**
+** A read operation returns profile information.
+** A write operation is a control command.
+** The string is parsed in the driver code and the command is passed
+** to the ncr_usercmd() function.
+**=========================================================================
+*/
+
+#ifdef SCSI_NCR_USER_COMMAND_SUPPORT
+
+#define is_digit(c) ((c) >= '0' && (c) <= '9')
+#define digit_to_bin(c) ((c) - '0')
+#define is_space(c) ((c) == ' ' || (c) == '\t')
+
+static int skip_spaces(char *ptr, int len)
+{
+ int cnt, c;
+
+ for (cnt = len; cnt > 0 && (c = *ptr++) && is_space(c); cnt--);
+
+ return (len - cnt);
+}
+
+static int get_int_arg(char *ptr, int len, u_long *pv)
+{
+ int cnt, c;
+ u_long v;
+
+ for (v = 0, cnt = len; cnt > 0 && (c = *ptr++) && is_digit(c); cnt--) {
+ v = (v * 10) + digit_to_bin(c);
+ }
+
+ if (pv)
+ *pv = v;
+
+ return (len - cnt);
+}
+
+static int is_keyword(char *ptr, int len, char *verb)
+{
+ int verb_len = strlen(verb);
+
+ if (len >= strlen(verb) && !memcmp(verb, ptr, verb_len))
+ return verb_len;
+ else
+ return 0;
+
+}
+
+#define SKIP_SPACES(min_spaces) \
+ if ((arg_len = skip_spaces(ptr, len)) < (min_spaces)) \
+ return -EINVAL; \
+ ptr += arg_len; len -= arg_len;
+
+#define GET_INT_ARG(v) \
+ if (!(arg_len = get_int_arg(ptr, len, &(v)))) \
+ return -EINVAL; \
+ ptr += arg_len; len -= arg_len;
+
+
+/*
+** Parse a control command
+*/
+
+static int ncr_user_command(ncb_p np, char *buffer, int length)
+{
+ char *ptr = buffer;
+ int len = length;
+ struct usrcmd *uc = &np->user;
+ int arg_len;
+ u_long target;
+
+ bzero(uc, sizeof(*uc));
+
+ if (len > 0 && ptr[len-1] == '\n')
+ --len;
+
+ if ((arg_len = is_keyword(ptr, len, "setsync")) != 0)
+ uc->cmd = UC_SETSYNC;
+ else if ((arg_len = is_keyword(ptr, len, "settags")) != 0)
+ uc->cmd = UC_SETTAGS;
+ else if ((arg_len = is_keyword(ptr, len, "setorder")) != 0)
+ uc->cmd = UC_SETORDER;
+ else if ((arg_len = is_keyword(ptr, len, "setwide")) != 0)
+ uc->cmd = UC_SETWIDE;
+ else if ((arg_len = is_keyword(ptr, len, "setdebug")) != 0)
+ uc->cmd = UC_SETDEBUG;
+ else if ((arg_len = is_keyword(ptr, len, "setflag")) != 0)
+ uc->cmd = UC_SETFLAG;
+ else if ((arg_len = is_keyword(ptr, len, "clearprof")) != 0)
+ uc->cmd = UC_CLEARPROF;
+#ifdef UC_DEBUG_ERROR_RECOVERY
+ else if ((arg_len = is_keyword(ptr, len, "debug_error_recovery")) != 0)
+ uc->cmd = UC_DEBUG_ERROR_RECOVERY;
+#endif
+ else
+ arg_len = 0;
+
+#ifdef DEBUG_PROC_INFO
+printf("ncr_user_command: arg_len=%d, cmd=%ld\n", arg_len, uc->cmd);
+#endif
+
+ if (!arg_len)
+ return -EINVAL;
+ ptr += arg_len; len -= arg_len;
+
+ switch(uc->cmd) {
+ case UC_SETSYNC:
+ case UC_SETTAGS:
+ case UC_SETWIDE:
+ case UC_SETFLAG:
+ SKIP_SPACES(1);
+ if ((arg_len = is_keyword(ptr, len, "all")) != 0) {
+ ptr += arg_len; len -= arg_len;
+ uc->target = ~0;
+ } else {
+ GET_INT_ARG(target);
+ uc->target = (1<<target);
+#ifdef DEBUG_PROC_INFO
+printf("ncr_user_command: target=%ld\n", target);
+#endif
+ }
+ break;
+ }
+
+ switch(uc->cmd) {
+ case UC_SETSYNC:
+ case UC_SETTAGS:
+ case UC_SETWIDE:
+ SKIP_SPACES(1);
+ GET_INT_ARG(uc->data);
+#ifdef DEBUG_PROC_INFO
+printf("ncr_user_command: data=%ld\n", uc->data);
+#endif
+ break;
+ case UC_SETORDER:
+ SKIP_SPACES(1);
+ if ((arg_len = is_keyword(ptr, len, "simple")))
+ uc->data = M_SIMPLE_TAG;
+ else if ((arg_len = is_keyword(ptr, len, "ordered")))
+ uc->data = M_ORDERED_TAG;
+ else if ((arg_len = is_keyword(ptr, len, "default")))
+ uc->data = 0;
+ else
+ return -EINVAL;
+ break;
+ case UC_SETDEBUG:
+ while (len > 0) {
+ SKIP_SPACES(1);
+ if ((arg_len = is_keyword(ptr, len, "alloc")))
+ uc->data |= DEBUG_ALLOC;
+ else if ((arg_len = is_keyword(ptr, len, "phase")))
+ uc->data |= DEBUG_PHASE;
+ else if ((arg_len = is_keyword(ptr, len, "poll")))
+ uc->data |= DEBUG_POLL;
+ else if ((arg_len = is_keyword(ptr, len, "queue")))
+ uc->data |= DEBUG_QUEUE;
+ else if ((arg_len = is_keyword(ptr, len, "result")))
+ uc->data |= DEBUG_RESULT;
+ else if ((arg_len = is_keyword(ptr, len, "scatter")))
+ uc->data |= DEBUG_SCATTER;
+ else if ((arg_len = is_keyword(ptr, len, "script")))
+ uc->data |= DEBUG_SCRIPT;
+ else if ((arg_len = is_keyword(ptr, len, "tiny")))
+ uc->data |= DEBUG_TINY;
+ else if ((arg_len = is_keyword(ptr, len, "timing")))
+ uc->data |= DEBUG_TIMING;
+ else if ((arg_len = is_keyword(ptr, len, "nego")))
+ uc->data |= DEBUG_NEGO;
+ else if ((arg_len = is_keyword(ptr, len, "tags")))
+ uc->data |= DEBUG_TAGS;
+ else if ((arg_len = is_keyword(ptr, len, "freeze")))
+ uc->data |= DEBUG_FREEZE;
+ else if ((arg_len = is_keyword(ptr, len, "restart")))
+ uc->data |= DEBUG_RESTART;
+ else
+ return -EINVAL;
+ ptr += arg_len; len -= arg_len;
+ }
+#ifdef DEBUG_PROC_INFO
+printf("ncr_user_command: data=%ld\n", uc->data);
+#endif
+ break;
+ case UC_SETFLAG:
+ while (len > 0) {
+ SKIP_SPACES(1);
+ if ((arg_len = is_keyword(ptr, len, "trace")))
+ uc->data |= UF_TRACE;
+ else if ((arg_len = is_keyword(ptr, len, "no_disc")))
+ uc->data |= UF_NODISC;
+ else
+ return -EINVAL;
+ ptr += arg_len; len -= arg_len;
+ }
+ break;
+#ifdef UC_DEBUG_ERROR_RECOVERY
+ case UC_DEBUG_ERROR_RECOVERY:
+ SKIP_SPACES(1);
+ if ((arg_len = is_keyword(ptr, len, "sge")))
+ uc->data = 1;
+ else if ((arg_len = is_keyword(ptr, len, "abort")))
+ uc->data = 2;
+ else if ((arg_len = is_keyword(ptr, len, "reset")))
+ uc->data = 3;
+ else if ((arg_len = is_keyword(ptr, len, "parity")))
+ uc->data = 4;
+ else if ((arg_len = is_keyword(ptr, len, "none")))
+ uc->data = 0;
+ else
+ return -EINVAL;
+ ptr += arg_len; len -= arg_len;
+ break;
+#endif
+ default:
+ break;
+ }
+
+ if (len)
+ return -EINVAL;
+ else {
+ long flags;
+
+ save_flags(flags); cli();
+ ncr_usercmd (np);
+ restore_flags(flags);
+ }
+ return length;
+}
+
+#endif /* SCSI_NCR_USER_COMMAND_SUPPORT */
+
+#ifdef SCSI_NCR_USER_INFO_SUPPORT
+
+struct info_str
+{
+ char *buffer;
+ int length;
+ int offset;
+ int pos;
+};
+
+static void copy_mem_info(struct info_str *info, char *data, int len)
+{
+ if (info->pos + len > info->length)
+ len = info->length - info->pos;
+
+ if (info->pos + len < info->offset) {
+ info->pos += len;
+ return;
+ }
+ if (info->pos < info->offset) {
+ data += (info->offset - info->pos);
+ len -= (info->offset - info->pos);
+ }
+
+ if (len > 0) {
+ memcpy(info->buffer + info->pos, data, len);
+ info->pos += len;
+ }
+}
+
+static int copy_info(struct info_str *info, char *fmt, ...)
+{
+ va_list args;
+ char buf[81];
+ int len;
+
+ va_start(args, fmt);
+ len = vsprintf(buf, fmt, args);
+ va_end(args);
+
+ copy_mem_info(info, buf, len);
+ return len;
+}
+
+/*
+** Copy formatted profile information into the input buffer.
+*/
+
+#define to_ms(t) ((t) * 1000 / HZ)
+
+static int ncr_host_info(ncb_p np, char *ptr, off_t offset, int len)
+{
+ struct info_str info;
+
+ info.buffer = ptr;
+ info.length = len;
+ info.offset = offset;
+ info.pos = 0;
+
+ copy_info(&info, "General information:\n");
+ copy_info(&info, " Chip NCR53C%s, ", np->chip_name);
+ copy_info(&info, "device id 0x%x, ", np->device_id);
+ copy_info(&info, "revision id 0x%x\n", np->revision_id);
+
+ copy_info(&info, " IO port address 0x%lx, ", (u_long) np->port);
+ copy_info(&info, "IRQ number %d\n", (int) np->irq);
+
+#ifndef NCR_IOMAPPED
+ if (np->reg)
+ copy_info(&info, " Using memory mapped IO at virtual address 0x%lx\n",
+ (u_long) np->reg);
+#endif
+ copy_info(&info, " Synchronous period factor %d, ", (int) np->minsync);
+ copy_info(&info, "max commands per lun %d\n", SCSI_NCR_MAX_TAGS);
+
+ if (driver_setup.debug || driver_setup.verbose > 1) {
+ copy_info(&info, " Debug flags 0x%x, ", driver_setup.debug);
+ copy_info(&info, "verbosity level %d\n", driver_setup.verbose);
+ }
+
+#ifdef SCSI_NCR_PROFILE_SUPPORT
+ copy_info(&info, "Profiling information:\n");
+ copy_info(&info, " %-12s = %lu\n", "num_trans",np->profile.num_trans);
+ copy_info(&info, " %-12s = %lu\n", "num_kbytes",np->profile.num_kbytes);
+ copy_info(&info, " %-12s = %lu\n", "num_disc", np->profile.num_disc);
+ copy_info(&info, " %-12s = %lu\n", "num_break",np->profile.num_break);
+ copy_info(&info, " %-12s = %lu\n", "num_int", np->profile.num_int);
+ copy_info(&info, " %-12s = %lu\n", "num_fly", np->profile.num_fly);
+ copy_info(&info, " %-12s = %lu\n", "ms_setup", to_ms(np->profile.ms_setup));
+ copy_info(&info, " %-12s = %lu\n", "ms_data", to_ms(np->profile.ms_data));
+ copy_info(&info, " %-12s = %lu\n", "ms_disc", to_ms(np->profile.ms_disc));
+ copy_info(&info, " %-12s = %lu\n", "ms_post", to_ms(np->profile.ms_post));
+#endif
+
+ return info.pos > info.offset? info.pos - info.offset : 0;
+}
+
+#endif /* SCSI_NCR_USER_INFO_SUPPORT */
+
+/*
+** Entry point of the scsi proc fs of the driver.
+** - func = 0 means read (returns profile data)
+** - func = 1 means write (parse user control command)
+*/
+
+int ncr53c8xx_proc_info(char *buffer, char **start, off_t offset,
+ int length, int hostno, int func)
+{
+ struct Scsi_Host *host;
+ struct host_data *host_data;
+ ncb_p ncb = 0;
+ int retv;
+
+#ifdef DEBUG_PROC_INFO
+printf("ncr53c8xx_proc_info: hostno=%d, func=%d\n", hostno, func);
+#endif
+
+ for (host = first_host; host; host = host->next) {
+ if (host->hostt == the_template && host->host_no == hostno) {
+ host_data = (struct host_data *) host->hostdata;
+ ncb = host_data->ncb;
+ break;
+ }
+ }
+
+ if (!ncb)
+ return -EINVAL;
+
+ if (func) {
+#ifdef SCSI_NCR_USER_COMMAND_SUPPORT
+ retv = ncr_user_command(ncb, buffer, length);
+#else
+ retv = -EINVAL;
+#endif
+ }
+ else {
+ if (start)
+ *start = buffer;
+#ifdef SCSI_NCR_USER_INFO_SUPPORT
+ retv = ncr_host_info(ncb, buffer, offset, length);
+#else
+ retv = -EINVAL;
+#endif
+ }
+
+ return retv;
+}
+
+
+/*=========================================================================
+** End of proc file system stuff
+**=========================================================================
+*/
+#endif
+
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+
+/* ---------------------------------------------------------------------
+**
+** Try reading Symbios format nvram
+**
+** ---------------------------------------------------------------------
+**
+** GPOI0 - data in/data out
+** GPIO1 - clock
+**
+** return 0 if NVRAM data OK, 1 if NVRAM data not OK
+** ---------------------------------------------------------------------
+*/
+
+#define SET_BIT 0
+#define CLR_BIT 1
+#define SET_CLK 2
+#define CLR_CLK 3
+
+static u_short nvram_read_data(ncr_slot *np, u_char *data, int len, u_char *gpreg, u_char *gpcntl);
+static void nvram_start(ncr_slot *np, u_char *gpreg);
+static void nvram_write_byte(ncr_slot *np, u_char *ack_data, u_char write_data, u_char *gpreg, u_char *gpcntl);
+static void nvram_read_byte(ncr_slot *np, u_char *read_data, u_char ack_data, u_char *gpreg, u_char *gpcntl);
+static void nvram_readAck(ncr_slot *np, u_char *read_bit, u_char *gpreg, u_char *gpcntl);
+static void nvram_writeAck(ncr_slot *np, u_char write_bit, u_char *gpreg, u_char *gpcntl);
+static void nvram_doBit(ncr_slot *np, u_char *read_bit, u_char write_bit, u_char *gpreg);
+static void nvram_stop(ncr_slot *np, u_char *gpreg);
+static void nvram_setBit(ncr_slot *np, u_char write_bit, u_char *gpreg, int bit_mode);
+
+__initfunc(
+static int ncr_get_Symbios_nvram (ncr_slot *np, Symbios_nvram *nvram)
+)
+{
+ static u_char Symbios_trailer[6] = {0xfe, 0xfe, 0, 0, 0, 0};
+ u_char gpcntl, gpreg;
+ u_char old_gpcntl, old_gpreg;
+ u_short csum;
+ u_char ack_data;
+ int retv = 1;
+
+ /* save current state of GPCNTL and GPREG */
+ old_gpreg = INB (nc_gpreg);
+ old_gpcntl = INB (nc_gpcntl);
+ gpcntl = old_gpcntl & 0xfc;
+
+ /* set up GPREG & GPCNTL to set GPIO0 and GPIO1 in to known state */
+ OUTB (nc_gpreg, old_gpreg);
+ OUTB (nc_gpcntl, gpcntl);
+
+ /* this is to set NVRAM into a known state with GPIO0/1 both low */
+ gpreg = old_gpreg;
+ nvram_setBit(np, 0, &gpreg, CLR_CLK);
+ nvram_setBit(np, 0, &gpreg, CLR_BIT);
+
+ /* now set NVRAM inactive with GPIO0/1 both high */
+ nvram_stop(np, &gpreg);
+
+ /* activate NVRAM */
+ nvram_start(np, &gpreg);
+
+ /* write device code and random address MSB */
+ nvram_write_byte(np, &ack_data,
+ 0xa0 | ((SYMBIOS_NVRAM_ADDRESS >> 7) & 0x0e), &gpreg, &gpcntl);
+ if (ack_data & 0x01)
+ goto out;
+
+ /* write random address LSB */
+ nvram_write_byte(np, &ack_data,
+ (SYMBIOS_NVRAM_ADDRESS & 0x7f) << 1, &gpreg, &gpcntl);
+ if (ack_data & 0x01)
+ goto out;
+
+ /* regenerate START state to set up for reading */
+ nvram_start(np, &gpreg);
+
+ /* rewrite device code and address MSB with read bit set (lsb = 0x01) */
+ nvram_write_byte(np, &ack_data,
+ 0xa1 | ((SYMBIOS_NVRAM_ADDRESS >> 7) & 0x0e), &gpreg, &gpcntl);
+ if (ack_data & 0x01)
+ goto out;
+
+ /* now set up GPIO0 for inputting data */
+ gpcntl |= 0x01;
+ OUTB (nc_gpcntl, gpcntl);
+
+ /* input all active data - only part of total NVRAM */
+ csum = nvram_read_data(np,
+ (u_char *) nvram, sizeof(*nvram), &gpreg, &gpcntl);
+
+ /* finally put NVRAM back in inactive mode */
+ gpcntl &= 0xfe;
+ OUTB (nc_gpcntl, gpcntl);
+ nvram_stop(np, &gpreg);
+
+#ifdef SCSI_NCR_DEBUG_NVRAM
+printf("ncr53c8xx: NvRAM marker=%x trailer=%x %x %x %x %x %x byte_count=%d/%d checksum=%x/%x\n",
+ nvram->start_marker,
+ nvram->trailer[0], nvram->trailer[1], nvram->trailer[2],
+ nvram->trailer[3], nvram->trailer[4], nvram->trailer[5],
+ nvram->byte_count, sizeof(*nvram) - 12,
+ nvram->checksum, csum);
+#endif
+
+ /* check valid NVRAM signature, verify byte count and checksum */
+ if (nvram->start_marker == 0 &&
+ !memcmp(nvram->trailer, Symbios_trailer, 6) &&
+ nvram->byte_count == sizeof(*nvram) - 12 &&
+ csum == nvram->checksum)
+ retv = 0;
+out:
+ /* return GPIO0/1 to original states after having accessed NVRAM */
+ OUTB (nc_gpcntl, old_gpcntl);
+ OUTB (nc_gpreg, old_gpreg);
+
+ return retv;
+}
+
+/*
+ * Read Symbios NvRAM data and compute checksum.
+ */
+__initfunc(
+static u_short nvram_read_data(ncr_slot *np, u_char *data, int len, u_char *gpreg, u_char *gpcntl)
+)
+{
+ int x;
+ u_short csum;
+
+ for (x = 0; x < len; x++)
+ nvram_read_byte(np, &data[x], (x == (len - 1)), gpreg, gpcntl);
+
+ for (x = 6, csum = 0; x < len - 6; x++)
+ csum += data[x];
+
+ return csum;
+}
+
+/*
+ * Send START condition to NVRAM to wake it up.
+ */
+__initfunc(
+static void nvram_start(ncr_slot *np, u_char *gpreg)
+)
+{
+ nvram_setBit(np, 1, gpreg, SET_BIT);
+ nvram_setBit(np, 0, gpreg, SET_CLK);
+ nvram_setBit(np, 0, gpreg, CLR_BIT);
+ nvram_setBit(np, 0, gpreg, CLR_CLK);
+}
+
+/*
+ * WRITE a byte to the NVRAM and then get an ACK to see it was accepted OK,
+ * GPIO0 must already be set as an output
+ */
+__initfunc(
+static void nvram_write_byte(ncr_slot *np, u_char *ack_data, u_char write_data, u_char *gpreg, u_char *gpcntl)
+)
+{
+ int x;
+
+ for (x = 0; x < 8; x++)
+ nvram_doBit(np, 0, (write_data >> (7 - x)) & 0x01, gpreg);
+
+ nvram_readAck(np, ack_data, gpreg, gpcntl);
+}
+
+/*
+ * READ a byte from the NVRAM and then send an ACK to say we have got it,
+ * GPIO0 must already be set as an input
+ */
+__initfunc(
+static void nvram_read_byte(ncr_slot *np, u_char *read_data, u_char ack_data, u_char *gpreg, u_char *gpcntl)
+)
+{
+ int x;
+ u_char read_bit;
+
+ *read_data = 0;
+ for (x = 0; x < 8; x++) {
+ nvram_doBit(np, &read_bit, 1, gpreg);
+ *read_data |= ((read_bit & 0x01) << (7 - x));
+ }
+
+ nvram_writeAck(np, ack_data, gpreg, gpcntl);
+}
+
+/*
+ * Output an ACK to the NVRAM after reading,
+ * change GPIO0 to output and when done back to an input
+ */
+__initfunc(
+static void nvram_writeAck(ncr_slot *np, u_char write_bit, u_char *gpreg, u_char *gpcntl)
+)
+{
+ OUTB (nc_gpcntl, *gpcntl & 0xfe);
+ nvram_doBit(np, 0, write_bit, gpreg);
+ OUTB (nc_gpcntl, *gpcntl);
+}
+
+/*
+ * Input an ACK from NVRAM after writing,
+ * change GPIO0 to input and when done back to an output
+ */
+__initfunc(
+static void nvram_readAck(ncr_slot *np, u_char *read_bit, u_char *gpreg, u_char *gpcntl)
+)
+{
+ OUTB (nc_gpcntl, *gpcntl | 0x01);
+ nvram_doBit(np, read_bit, 1, gpreg);
+ OUTB (nc_gpcntl, *gpcntl);
+}
+
+/*
+ * Read or write a bit to the NVRAM,
+ * read if GPIO0 input else write if GPIO0 output
+ */
+__initfunc(
+static void nvram_doBit(ncr_slot *np, u_char *read_bit, u_char write_bit, u_char *gpreg)
+)
+{
+ nvram_setBit(np, write_bit, gpreg, SET_BIT);
+ nvram_setBit(np, 0, gpreg, SET_CLK);
+ if (read_bit)
+ *read_bit = INB (nc_gpreg);
+ nvram_setBit(np, 0, gpreg, CLR_CLK);
+ nvram_setBit(np, 0, gpreg, CLR_BIT);
+}
+
+/*
+ * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZzzzz!!
+ */
+__initfunc(
+static void nvram_stop(ncr_slot *np, u_char *gpreg)
+)
+{
+ nvram_setBit(np, 0, gpreg, SET_CLK);
+ nvram_setBit(np, 1, gpreg, SET_BIT);
+}
+
+/*
+ * Set/clear data/clock bit in GPIO0
+ */
+__initfunc(
+static void nvram_setBit(ncr_slot *np, u_char write_bit, u_char *gpreg, int bit_mode)
+)
+{
+ DELAY(5);
+ switch (bit_mode){
+ case SET_BIT:
+ *gpreg |= write_bit;
+ break;
+ case CLR_BIT:
+ *gpreg &= 0xfe;
+ break;
+ case SET_CLK:
+ *gpreg |= 0x02;
+ break;
+ case CLR_CLK:
+ *gpreg &= 0xfd;
+ break;
+
+ }
+ OUTB (nc_gpreg, *gpreg);
+ DELAY(5);
+}
+
+#undef SET_BIT 0
+#undef CLR_BIT 1
+#undef SET_CLK 2
+#undef CLR_CLK 3
+
+
+/* ---------------------------------------------------------------------
+**
+** Try reading Tekram format nvram
+**
+** ---------------------------------------------------------------------
+**
+** GPOI0 - data in
+** GPIO1 - data out
+** GPIO2 - clock
+** GPIO4 - chip select
+**
+** return 0 if NVRAM data OK, 1 if NVRAM data not OK
+** ---------------------------------------------------------------------
+*/
+
+static u_short Tnvram_read_data(ncr_slot *np, u_short *data, int len, u_char *gpreg);
+static void Tnvram_Send_Command(ncr_slot *np, u_short write_data, u_char *read_bit, u_char *gpreg);
+static void Tnvram_Read_Word(ncr_slot *np, u_short *nvram_data, u_char *gpreg);
+static void Tnvram_Read_Bit(ncr_slot *np, u_char *read_bit, u_char *gpreg);
+static void Tnvram_Write_Bit(ncr_slot *np, u_char write_bit, u_char *gpreg);
+static void Tnvram_Stop(ncr_slot *np, u_char *gpreg);
+static void Tnvram_Clk(ncr_slot *np, u_char *gpreg);
+
+__initfunc(
+static int ncr_get_Tekram_nvram (ncr_slot *np, Tekram_nvram *nvram)
+)
+{
+ u_char gpcntl, gpreg;
+ u_char old_gpcntl, old_gpreg;
+ u_short csum;
+
+ /* save current state of GPCNTL and GPREG */
+ old_gpreg = INB (nc_gpreg);
+ old_gpcntl = INB (nc_gpcntl);
+
+ /* set up GPREG & GPCNTL to set GPIO0/1/2/4 in to known state, 0 in,
+ 1/2/4 out */
+ gpreg = old_gpreg & 0xe9;
+ OUTB (nc_gpreg, gpreg);
+ gpcntl = (old_gpcntl & 0xe9) | 0x09;
+ OUTB (nc_gpcntl, gpcntl);
+
+ /* input all of NVRAM, 64 words */
+ csum = Tnvram_read_data(np, (u_short *) nvram,
+ sizeof(*nvram) / sizeof(short), &gpreg);
+
+ /* return GPIO0/1/2/4 to original states after having accessed NVRAM */
+ OUTB (nc_gpcntl, old_gpcntl);
+ OUTB (nc_gpreg, old_gpreg);
+
+ /* check data valid */
+ if (csum != 0x1234)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * Read Tekram NvRAM data and compute checksum.
+ */
+__initfunc(
+static u_short Tnvram_read_data(ncr_slot *np, u_short *data, int len, u_char *gpreg)
+)
+{
+ u_char read_bit;
+ u_short csum;
+ int x;
+
+ for (x = 0, csum = 0; x < len; x++) {
+
+ /* output read command and address */
+ Tnvram_Send_Command(np, 0x180 | x, &read_bit, gpreg);
+ if (read_bit & 0x01)
+ return 0; /* Force bad checksum */
+
+ Tnvram_Read_Word(np, &data[x], gpreg);
+ csum += data[x];
+
+ Tnvram_Stop(np, gpreg);
+ }
+
+ return csum;
+}
+
+/*
+ * Send read command and address to NVRAM
+ */
+__initfunc(
+static void Tnvram_Send_Command(ncr_slot *np, u_short write_data, u_char *read_bit, u_char *gpreg)
+)
+{
+ int x;
+
+ /* send 9 bits, start bit (1), command (2), address (6) */
+ for (x = 0; x < 9; x++)
+ Tnvram_Write_Bit(np, (u_char) (write_data >> (8 - x)), gpreg);
+
+ *read_bit = INB (nc_gpreg);
+}
+
+/*
+ * READ a byte from the NVRAM
+ */
+__initfunc(
+static void Tnvram_Read_Word(ncr_slot *np, u_short *nvram_data, u_char *gpreg)
+)
+{
+ int x;
+ u_char read_bit;
+
+ *nvram_data = 0;
+ for (x = 0; x < 16; x++) {
+ Tnvram_Read_Bit(np, &read_bit, gpreg);
+
+ if (read_bit & 0x01)
+ *nvram_data |= (0x01 << (15 - x));
+ else
+ *nvram_data &= ~(0x01 << (15 - x));
+ }
+}
+
+/*
+ * Read bit from NVRAM
+ */
+__initfunc(
+static void Tnvram_Read_Bit(ncr_slot *np, u_char *read_bit, u_char *gpreg)
+)
+{
+ DELAY(2);
+ Tnvram_Clk(np, gpreg);
+ *read_bit = INB (nc_gpreg);
+}
+
+/*
+ * Write bit to GPIO0
+ */
+__initfunc(
+static void Tnvram_Write_Bit(ncr_slot *np, u_char write_bit, u_char *gpreg)
+)
+{
+ if (write_bit & 0x01)
+ *gpreg |= 0x02;
+ else
+ *gpreg &= 0xfd;
+
+ *gpreg |= 0x10;
+
+ OUTB (nc_gpreg, *gpreg);
+ DELAY(2);
+
+ Tnvram_Clk(np, gpreg);
+}
+
+/*
+ * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZZzzz!!
+ */
+__initfunc(
+static void Tnvram_Stop(ncr_slot *np, u_char *gpreg)
+)
+{
+ *gpreg &= 0xef;
+ OUTB (nc_gpreg, *gpreg);
+ DELAY(2);
+
+ Tnvram_Clk(np, gpreg);
+}
+
+/*
+ * Pulse clock bit in GPIO0
+ */
+__initfunc(
+static void Tnvram_Clk(ncr_slot *np, u_char *gpreg)
+)
+{
+ OUTB (nc_gpreg, *gpreg | 0x04);
+ DELAY(2);
+ OUTB (nc_gpreg, *gpreg);
+}
+
+#endif /* SCSI_NCR_NVRAM_SUPPORT */
+
+/*
+** Module stuff
+*/
+
+#ifdef MODULE
+Scsi_Host_Template driver_template = NCR53C8XX;
+#include "scsi_module.c"
+#endif
diff --git a/linux/src/drivers/scsi/ncr53c8xx.h b/linux/src/drivers/scsi/ncr53c8xx.h
new file mode 100644
index 0000000..0342438
--- /dev/null
+++ b/linux/src/drivers/scsi/ncr53c8xx.h
@@ -0,0 +1,1220 @@
+/******************************************************************************
+** Device driver for the PCI-SCSI NCR538XX controller family.
+**
+** Copyright (C) 1994 Wolfgang Stanglmeier
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+** You should have received a copy of the GNU General Public License
+** along with this program; if not, write to the Free Software
+** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+**
+**-----------------------------------------------------------------------------
+**
+** This driver has been ported to Linux from the FreeBSD NCR53C8XX driver
+** and is currently maintained by
+**
+** Gerard Roudier <groudier@club-internet.fr>
+**
+** Being given that this driver originates from the FreeBSD version, and
+** in order to keep synergy on both, any suggested enhancements and corrections
+** received on Linux are automatically a potential candidate for the FreeBSD
+** version.
+**
+** The original driver has been written for 386bsd and FreeBSD by
+** Wolfgang Stanglmeier <wolf@cologne.de>
+** Stefan Esser <se@mi.Uni-Koeln.de>
+**
+** And has been ported to NetBSD by
+** Charles M. Hannum <mycroft@gnu.ai.mit.edu>
+**
+*******************************************************************************
+*/
+
+#ifndef NCR53C8XX_H
+#define NCR53C8XX_H
+
+/*
+** Name and revision of the driver
+*/
+#define SCSI_NCR_DRIVER_NAME "ncr53c8xx - revision 2.5f.1"
+
+/*
+** Check supported Linux versions
+*/
+
+#if !defined(LINUX_VERSION_CODE)
+#include <linux/version.h>
+#endif
+#include <linux/config.h>
+
+/*
+** During make dep of linux-1.2.13, LINUX_VERSION_CODE is undefined
+** Under linux-1.3.X, all seems to be OK.
+** So, we have only to define it under 1.2.13
+*/
+
+#define LinuxVersionCode(v, p, s) (((v)<<16)+((p)<<8)+(s))
+
+#if !defined(LINUX_VERSION_CODE)
+#define LINUX_VERSION_CODE LinuxVersionCode(1,2,13)
+#endif
+
+/*
+** Normal IO or memory mapped IO.
+**
+** Memory mapped IO only works with linux-1.3.X
+** If your motherboard does not work with memory mapped IO,
+** define SCSI_NCR_IOMAPPED for PATCHLEVEL 3 too.
+*/
+
+#if LINUX_VERSION_CODE < LinuxVersionCode(1,3,0)
+# define SCSI_NCR_IOMAPPED
+#endif
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,0)
+//# define SCSI_NCR_PROC_INFO_SUPPORT
+#endif
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,72)
+# define SCSI_NCR_SHARE_IRQ
+#endif
+
+/*
+** If you want a driver as small as possible, donnot define the
+** following options.
+*/
+
+#define SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
+#define SCSI_NCR_DEBUG_INFO_SUPPORT
+#define SCSI_NCR_PCI_FIX_UP_SUPPORT
+#ifdef SCSI_NCR_PROC_INFO_SUPPORT
+# define SCSI_NCR_PROFILE_SUPPORT
+# define SCSI_NCR_USER_COMMAND_SUPPORT
+# define SCSI_NCR_USER_INFO_SUPPORT
+/* # define SCSI_NCR_DEBUG_ERROR_RECOVERY_SUPPORT */
+#endif
+
+/*==========================================================
+**
+** nvram settings - #define SCSI_NCR_NVRAM_SUPPORT to enable
+**
+**==========================================================
+*/
+
+#ifdef CONFIG_SCSI_NCR53C8XX_NVRAM_DETECT
+#define SCSI_NCR_NVRAM_SUPPORT
+/* #define SCSI_NCR_DEBUG_NVRAM */
+#endif
+
+/* ---------------------------------------------------------------------
+** Take into account kernel configured parameters.
+** Most of these options can be overridden at startup by a command line.
+** ---------------------------------------------------------------------
+*/
+
+/*
+ * For Ultra2 SCSI support option, use special features and allow 40Mhz
+ * synchronous data transfers.
+ */
+#define SCSI_NCR_SETUP_SPECIAL_FEATURES (3)
+#define SCSI_NCR_SETUP_ULTRA_SCSI (2)
+#define SCSI_NCR_MAX_SYNC (40)
+
+/*
+ * Allow tags from 2 to 12, default 4
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_MAX_TAGS
+#if CONFIG_SCSI_NCR53C8XX_MAX_TAGS < 2
+#define SCSI_NCR_MAX_TAGS (2)
+#elif CONFIG_SCSI_NCR53C8XX_MAX_TAGS > 12
+#define SCSI_NCR_MAX_TAGS (12)
+#else
+#define SCSI_NCR_MAX_TAGS CONFIG_SCSI_NCR53C8XX_MAX_TAGS
+#endif
+#else
+#define SCSI_NCR_MAX_TAGS (4)
+#endif
+
+/*
+ * Allow tagged command queuing support if configured with default number
+ * of tags set to max (see above).
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_TAGGED_QUEUE
+#define SCSI_NCR_SETUP_DEFAULT_TAGS SCSI_NCR_MAX_TAGS
+#else
+#define SCSI_NCR_SETUP_DEFAULT_TAGS (0)
+#endif
+
+/*
+ * Use normal IO if configured. Forced for alpha and ppc.
+ */
+#if defined(CONFIG_SCSI_NCR53C8XX_IOMAPPED)
+#define SCSI_NCR_IOMAPPED
+#elif defined(__alpha__) || defined(__powerpc__)
+#define SCSI_NCR_IOMAPPED
+#endif
+
+/*
+ * Sync transfer frequency at startup.
+ * Allow from 5Mhz to 40Mhz default 10 Mhz.
+ */
+#ifndef CONFIG_SCSI_NCR53C8XX_SYNC
+#define CONFIG_SCSI_NCR53C8XX_SYNC (5)
+#elif CONFIG_SCSI_NCR53C8XX_SYNC > SCSI_NCR_MAX_SYNC
+#define SCSI_NCR_SETUP_DEFAULT_SYNC SCSI_NCR_MAX_SYNC
+#endif
+
+#if CONFIG_SCSI_NCR53C8XX_SYNC == 0
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (255)
+#elif CONFIG_SCSI_NCR53C8XX_SYNC <= 5
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (50)
+#elif CONFIG_SCSI_NCR53C8XX_SYNC <= 20
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (250/(CONFIG_SCSI_NCR53C8XX_SYNC))
+#elif CONFIG_SCSI_NCR53C8XX_SYNC <= 33
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (11)
+#else
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (10)
+#endif
+
+/*
+ * Disallow disconnections at boot-up
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_NO_DISCONNECT
+#define SCSI_NCR_SETUP_DISCONNECTION (0)
+#else
+#define SCSI_NCR_SETUP_DISCONNECTION (1)
+#endif
+
+/*
+ * Force synchronous negotiation for all targets
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_FORCE_SYNC_NEGO
+#define SCSI_NCR_SETUP_FORCE_SYNC_NEGO (1)
+#else
+#define SCSI_NCR_SETUP_FORCE_SYNC_NEGO (0)
+#endif
+
+/*
+ * Disable master parity checking (flawed hardwares need that)
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_DISABLE_MPARITY_CHECK
+#define SCSI_NCR_SETUP_MASTER_PARITY (0)
+#else
+#define SCSI_NCR_SETUP_MASTER_PARITY (1)
+#endif
+
+/*
+ * Disable scsi parity checking (flawed devices may need that)
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_DISABLE_PARITY_CHECK
+#define SCSI_NCR_SETUP_SCSI_PARITY (0)
+#else
+#define SCSI_NCR_SETUP_SCSI_PARITY (1)
+#endif
+
+/*
+ * Vendor specific stuff
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_SYMBIOS_COMPAT
+#define SCSI_NCR_SETUP_LED_PIN (1)
+#define SCSI_NCR_SETUP_DIFF_SUPPORT (3)
+#else
+#define SCSI_NCR_SETUP_LED_PIN (0)
+#define SCSI_NCR_SETUP_DIFF_SUPPORT (0)
+#endif
+
+/*
+ * Settle time after reset at boot-up
+ */
+#define SCSI_NCR_SETUP_SETTLE_TIME (2)
+
+/*
+** Other parameters not configurable with "make config"
+** Avoid to change these constants, unless you know what you are doing.
+*/
+
+#define SCSI_NCR_ALWAYS_SIMPLE_TAG
+#define SCSI_NCR_MAX_SCATTER (127)
+#define SCSI_NCR_MAX_TARGET (16)
+#define SCSI_NCR_MAX_HOST (2)
+#define SCSI_NCR_TIMEOUT_ALERT (3*HZ)
+
+#define SCSI_NCR_CAN_QUEUE (7*SCSI_NCR_MAX_TAGS)
+#define SCSI_NCR_CMD_PER_LUN (SCSI_NCR_MAX_TAGS)
+#define SCSI_NCR_SG_TABLESIZE (SCSI_NCR_MAX_SCATTER)
+
+#define SCSI_NCR_TIMER_INTERVAL ((HZ+5-1)/5)
+
+#if 1 /* defined CONFIG_SCSI_MULTI_LUN */
+#define SCSI_NCR_MAX_LUN (8)
+#else
+#define SCSI_NCR_MAX_LUN (1)
+#endif
+
+/*
+** Define Scsi_Host_Template parameters
+**
+** Used by hosts.c and ncr53c8xx.c with module configuration.
+*/
+
+#if defined(HOSTS_C) || defined(MODULE)
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,98)
+#include <scsi/scsicam.h>
+#else
+#include <linux/scsicam.h>
+#endif
+
+int ncr53c8xx_abort(Scsi_Cmnd *);
+int ncr53c8xx_detect(Scsi_Host_Template *tpnt);
+int ncr53c8xx_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,98)
+int ncr53c8xx_reset(Scsi_Cmnd *, unsigned int);
+#else
+int ncr53c8xx_reset(Scsi_Cmnd *);
+#endif
+
+#ifdef MODULE
+int ncr53c8xx_release(struct Scsi_Host *);
+#else
+#define ncr53c8xx_release NULL
+#endif
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,75)
+
+#define NCR53C8XX { name: SCSI_NCR_DRIVER_NAME, \
+ detect: ncr53c8xx_detect, \
+ release: ncr53c8xx_release, \
+ queuecommand: ncr53c8xx_queue_command,\
+ abort: ncr53c8xx_abort, \
+ reset: ncr53c8xx_reset, \
+ bios_param: scsicam_bios_param, \
+ can_queue: SCSI_NCR_CAN_QUEUE, \
+ this_id: 7, \
+ sg_tablesize: SCSI_NCR_SG_TABLESIZE, \
+ cmd_per_lun: SCSI_NCR_CMD_PER_LUN, \
+ use_clustering: DISABLE_CLUSTERING}
+
+#elif LINUX_VERSION_CODE >= LinuxVersionCode(1,3,0)
+
+#define NCR53C8XX { NULL, NULL, NULL, NULL, \
+ SCSI_NCR_DRIVER_NAME, ncr53c8xx_detect, \
+ ncr53c8xx_release, NULL, NULL, \
+ ncr53c8xx_queue_command,ncr53c8xx_abort, \
+ ncr53c8xx_reset, NULL, scsicam_bios_param, \
+ SCSI_NCR_CAN_QUEUE, 7, \
+ SCSI_NCR_SG_TABLESIZE, SCSI_NCR_CMD_PER_LUN, \
+ 0, 0, DISABLE_CLUSTERING}
+
+#else
+
+#define NCR53C8XX { NULL, NULL, \
+ SCSI_NCR_DRIVER_NAME, ncr53c8xx_detect, \
+ ncr53c8xx_release, NULL, NULL, \
+ ncr53c8xx_queue_command,ncr53c8xx_abort, \
+ ncr53c8xx_reset, NULL, scsicam_bios_param, \
+ SCSI_NCR_CAN_QUEUE, 7, \
+ SCSI_NCR_SG_TABLESIZE, SCSI_NCR_CMD_PER_LUN, \
+ 0, 0, DISABLE_CLUSTERING}
+
+#endif /* LINUX_VERSION_CODE */
+
+#endif /* defined(HOSTS_C) || defined(MODULE) */
+
+
+#ifndef HOSTS_C
+
+/*
+** IO functions definition for big/little endian support.
+** For now, the NCR is only supported in little endian addressing mode,
+** and big endian byte ordering is only supported for the PPC.
+** MMIO is not used on PPC.
+*/
+
+#ifdef __BIG_ENDIAN
+
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,1,0)
+#error "BIG ENDIAN byte ordering needs kernel version >= 2.1.0"
+#endif
+
+#ifdef __powerpc__
+#define inw_l2b inw
+#define inl_l2b inl
+#define outw_b2l outw
+#define outl_b2l outl
+#else
+#error "Support for BIG ENDIAN is only available for the PowerPC"
+#endif
+
+#else /* Assumed x86 or alpha */
+
+#define inw_raw inw
+#define inl_raw inl
+#define outw_raw outw
+#define outl_raw outl
+#define readw_raw readw
+#define readl_raw readl
+#define writew_raw writew
+#define writel_raw writel
+
+#endif
+
+#ifdef SCSI_NCR_BIG_ENDIAN
+#error "The NCR in BIG ENDIAN adressing mode is not (yet) supported"
+#endif
+
+/*
+** NCR53C8XX Device Ids
+*/
+
+#ifndef PCI_DEVICE_ID_NCR_53C810
+#define PCI_DEVICE_ID_NCR_53C810 1
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C810AP
+#define PCI_DEVICE_ID_NCR_53C810AP 5
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C815
+#define PCI_DEVICE_ID_NCR_53C815 4
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C820
+#define PCI_DEVICE_ID_NCR_53C820 2
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C825
+#define PCI_DEVICE_ID_NCR_53C825 3
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C860
+#define PCI_DEVICE_ID_NCR_53C860 6
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C875
+#define PCI_DEVICE_ID_NCR_53C875 0xf
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C875J
+#define PCI_DEVICE_ID_NCR_53C875J 0x8f
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C885
+#define PCI_DEVICE_ID_NCR_53C885 0xd
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C895
+#define PCI_DEVICE_ID_NCR_53C895 0xc
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C896
+#define PCI_DEVICE_ID_NCR_53C896 0xb
+#endif
+
+/*
+** NCR53C8XX devices features table.
+*/
+typedef struct {
+ unsigned short device_id;
+ unsigned short revision_id;
+ char *name;
+ unsigned char burst_max;
+ unsigned char offset_max;
+ unsigned char nr_divisor;
+ unsigned int features;
+#define FE_LED0 (1<<0)
+#define FE_WIDE (1<<1)
+#define FE_ULTRA (1<<2)
+#define FE_ULTRA2 (1<<3)
+#define FE_DBLR (1<<4)
+#define FE_QUAD (1<<5)
+#define FE_ERL (1<<6)
+#define FE_CLSE (1<<7)
+#define FE_WRIE (1<<8)
+#define FE_ERMP (1<<9)
+#define FE_BOF (1<<10)
+#define FE_DFS (1<<11)
+#define FE_PFEN (1<<12)
+#define FE_LDSTR (1<<13)
+#define FE_RAM (1<<14)
+#define FE_CLK80 (1<<15)
+#define FE_CACHE_SET (FE_ERL|FE_CLSE|FE_WRIE|FE_ERMP)
+#define FE_SCSI_SET (FE_WIDE|FE_ULTRA|FE_ULTRA2|FE_DBLR|FE_QUAD|F_CLK80)
+#define FE_SPECIAL_SET (FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM)
+} ncr_chip;
+
+/*
+** DEL 397 - 53C875 Rev 3 - Part Number 609-0392410 - ITEM 3.
+** Memory Read transaction terminated by a retry followed by
+** Memory Read Line command.
+*/
+#define FE_CACHE0_SET (FE_CACHE_SET & ~FE_ERL)
+
+/*
+** DEL 397 - 53C875 Rev 3 - Part Number 609-0392410 - ITEM 5.
+** On paper, this errata is harmless. But it is a good reason for
+** using a shorter programmed burst length (64 DWORDS instead of 128).
+*/
+
+#define SCSI_NCR_CHIP_TABLE \
+{ \
+ {PCI_DEVICE_ID_NCR_53C810, 0x0f, "810", 4, 8, 4, \
+ FE_ERL} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C810, 0xff, "810a", 4, 8, 4, \
+ FE_CACHE_SET|FE_LDSTR|FE_PFEN|FE_BOF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C815, 0xff, "815", 4, 8, 4, \
+ FE_ERL|FE_BOF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C820, 0xff, "820", 4, 8, 4, \
+ FE_WIDE|FE_ERL} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C825, 0x0f, "825", 4, 8, 4, \
+ FE_WIDE|FE_ERL|FE_BOF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C825, 0xff, "825a", 6, 8, 4, \
+ FE_WIDE|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C860, 0xff, "860", 4, 8, 5, \
+ FE_ULTRA|FE_CLK80|FE_CACHE_SET|FE_BOF|FE_LDSTR|FE_PFEN} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C875, 0x01, "875", 6, 16, 5, \
+ FE_WIDE|FE_ULTRA|FE_CLK80|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM}\
+ , \
+ {PCI_DEVICE_ID_NCR_53C875, 0xff, "875", 6, 16, 5, \
+ FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM}\
+ , \
+ {PCI_DEVICE_ID_NCR_53C875J,0xff, "875J", 6, 16, 5, \
+ FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM}\
+ , \
+ {PCI_DEVICE_ID_NCR_53C885, 0xff, "885", 6, 16, 5, \
+ FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM}\
+ , \
+ {PCI_DEVICE_ID_NCR_53C895, 0xff, "895", 7, 31, 7, \
+ FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM}\
+ , \
+ {PCI_DEVICE_ID_NCR_53C896, 0xff, "896", 7, 31, 7, \
+ FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM}\
+}
+
+/*
+ * List of supported NCR chip ids
+ */
+#define SCSI_NCR_CHIP_IDS \
+{ \
+ PCI_DEVICE_ID_NCR_53C810, \
+ PCI_DEVICE_ID_NCR_53C815, \
+ PCI_DEVICE_ID_NCR_53C820, \
+ PCI_DEVICE_ID_NCR_53C825, \
+ PCI_DEVICE_ID_NCR_53C860, \
+ PCI_DEVICE_ID_NCR_53C875, \
+ PCI_DEVICE_ID_NCR_53C875J, \
+ PCI_DEVICE_ID_NCR_53C885, \
+ PCI_DEVICE_ID_NCR_53C895, \
+ PCI_DEVICE_ID_NCR_53C896 \
+}
+
+/*
+** Initial setup.
+** Can be overriden at startup by a command line.
+*/
+#define SCSI_NCR_DRIVER_SETUP \
+{ \
+ SCSI_NCR_SETUP_MASTER_PARITY, \
+ SCSI_NCR_SETUP_SCSI_PARITY, \
+ SCSI_NCR_SETUP_DISCONNECTION, \
+ SCSI_NCR_SETUP_SPECIAL_FEATURES, \
+ SCSI_NCR_SETUP_ULTRA_SCSI, \
+ SCSI_NCR_SETUP_FORCE_SYNC_NEGO, \
+ 0, \
+ 0, \
+ 1, \
+ 1, \
+ SCSI_NCR_SETUP_DEFAULT_TAGS, \
+ SCSI_NCR_SETUP_DEFAULT_SYNC, \
+ 0x00, \
+ 7, \
+ SCSI_NCR_SETUP_LED_PIN, \
+ 1, \
+ SCSI_NCR_SETUP_SETTLE_TIME, \
+ SCSI_NCR_SETUP_DIFF_SUPPORT, \
+ 0, \
+ 1 \
+}
+
+/*
+** Boot fail safe setup.
+** Override initial setup from boot command line:
+** ncr53c8xx=safe:y
+*/
+#define SCSI_NCR_DRIVER_SAFE_SETUP \
+{ \
+ 0, \
+ 1, \
+ 0, \
+ 0, \
+ 0, \
+ 0, \
+ 0, \
+ 0, \
+ 1, \
+ 2, \
+ 0, \
+ 255, \
+ 0x00, \
+ 255, \
+ 0, \
+ 0, \
+ 10, \
+ 1, \
+ 1, \
+ 1 \
+}
+
+/*
+** Define the table of target capabilities by host and target
+**
+** If you have problems with a scsi device, note the host unit and the
+** corresponding target number.
+**
+** Edit the corresponding entry of the table below and try successively:
+** NQ7_Questionnable
+** NQ7_IdeLike
+**
+** This bitmap is anded with the byte 7 of inquiry data on completion of
+** INQUIRY command.
+** The driver never see the zeroed bits and will ignore the corresponding
+** capabilities of the target.
+*/
+
+#define INQ7_SftRe 1
+#define INQ7_CmdQueue (1<<1) /* Tagged Command */
+#define INQ7_Reserved (1<<2)
+#define INQ7_Linked (1<<3)
+#define INQ7_Sync (1<<4) /* Synchronous Negotiation */
+#define INQ7_WBus16 (1<<5)
+#define INQ7_WBus32 (1<<6)
+#define INQ7_RelAdr (1<<7)
+
+#define INQ7_IdeLike 0
+#define INQ7_Scsi1Like INQ7_IdeLike
+#define INQ7_Perfect 0xff
+#define INQ7_Questionnable ~(INQ7_CmdQueue|INQ7_Sync)
+#define INQ7_VeryQuestionnable \
+ ~(INQ7_CmdQueue|INQ7_Sync|INQ7_WBus16|INQ7_WBus32)
+
+#define INQ7_Default INQ7_Perfect
+
+#define NCR53C8XX_TARGET_CAPABILITIES \
+/* Host 0 */ \
+{ \
+ { \
+ /* Target 0 */ INQ7_Default, \
+ /* Target 1 */ INQ7_Default, \
+ /* Target 2 */ INQ7_Default, \
+ /* Target 3 */ INQ7_Default, \
+ /* Target 4 */ INQ7_Default, \
+ /* Target 5 */ INQ7_Default, \
+ /* Target 6 */ INQ7_Default, \
+ /* Target 7 */ INQ7_Default, \
+ /* Target 8 */ INQ7_Default, \
+ /* Target 9 */ INQ7_Default, \
+ /* Target 10 */ INQ7_Default, \
+ /* Target 11 */ INQ7_Default, \
+ /* Target 12 */ INQ7_Default, \
+ /* Target 13 */ INQ7_Default, \
+ /* Target 14 */ INQ7_Default, \
+ /* Target 15 */ INQ7_Default, \
+ } \
+}, \
+/* Host 1 */ \
+{ \
+ { \
+ /* Target 0 */ INQ7_Default, \
+ /* Target 1 */ INQ7_Default, \
+ /* Target 2 */ INQ7_Default, \
+ /* Target 3 */ INQ7_Default, \
+ /* Target 4 */ INQ7_Default, \
+ /* Target 5 */ INQ7_Default, \
+ /* Target 6 */ INQ7_Default, \
+ /* Target 7 */ INQ7_Default, \
+ /* Target 8 */ INQ7_Default, \
+ /* Target 9 */ INQ7_Default, \
+ /* Target 10 */ INQ7_Default, \
+ /* Target 11 */ INQ7_Default, \
+ /* Target 12 */ INQ7_Default, \
+ /* Target 13 */ INQ7_Default, \
+ /* Target 14 */ INQ7_Default, \
+ /* Target 15 */ INQ7_Default, \
+ } \
+}
+
+/*
+** Replace the proc_dir_entry of the standard ncr driver.
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(1,3,0)
+#if defined(CONFIG_SCSI_NCR53C7xx) || !defined(CONFIG_SCSI_NCR53C8XX)
+#define PROC_SCSI_NCR53C8XX PROC_SCSI_NCR53C7xx
+#endif
+#endif
+
+/**************** ORIGINAL CONTENT of ncrreg.h from FreeBSD ******************/
+
+/*-----------------------------------------------------------------
+**
+** The ncr 53c810 register structure.
+**
+**-----------------------------------------------------------------
+*/
+
+struct ncr_reg {
+/*00*/ u_char nc_scntl0; /* full arb., ena parity, par->ATN */
+
+/*01*/ u_char nc_scntl1; /* no reset */
+ #define ISCON 0x10 /* connected to scsi */
+ #define CRST 0x08 /* force reset */
+
+/*02*/ u_char nc_scntl2; /* no disconnect expected */
+ #define SDU 0x80 /* cmd: disconnect will raise error */
+ #define CHM 0x40 /* sta: chained mode */
+ #define WSS 0x08 /* sta: wide scsi send [W]*/
+ #define WSR 0x01 /* sta: wide scsi received [W]*/
+
+/*03*/ u_char nc_scntl3; /* cnf system clock dependent */
+ #define EWS 0x08 /* cmd: enable wide scsi [W]*/
+ #define ULTRA 0x80 /* cmd: ULTRA enable */
+
+/*04*/ u_char nc_scid; /* cnf host adapter scsi address */
+ #define RRE 0x40 /* r/w:e enable response to resel. */
+ #define SRE 0x20 /* r/w:e enable response to select */
+
+/*05*/ u_char nc_sxfer; /* ### Sync speed and count */
+
+/*06*/ u_char nc_sdid; /* ### Destination-ID */
+
+/*07*/ u_char nc_gpreg; /* ??? IO-Pins */
+
+/*08*/ u_char nc_sfbr; /* ### First byte in phase */
+
+/*09*/ u_char nc_socl;
+ #define CREQ 0x80 /* r/w: SCSI-REQ */
+ #define CACK 0x40 /* r/w: SCSI-ACK */
+ #define CBSY 0x20 /* r/w: SCSI-BSY */
+ #define CSEL 0x10 /* r/w: SCSI-SEL */
+ #define CATN 0x08 /* r/w: SCSI-ATN */
+ #define CMSG 0x04 /* r/w: SCSI-MSG */
+ #define CC_D 0x02 /* r/w: SCSI-C_D */
+ #define CI_O 0x01 /* r/w: SCSI-I_O */
+
+/*0a*/ u_char nc_ssid;
+
+/*0b*/ u_char nc_sbcl;
+
+/*0c*/ u_char nc_dstat;
+ #define DFE 0x80 /* sta: dma fifo empty */
+ #define MDPE 0x40 /* int: master data parity error */
+ #define BF 0x20 /* int: script: bus fault */
+ #define ABRT 0x10 /* int: script: command aborted */
+ #define SSI 0x08 /* int: script: single step */
+ #define SIR 0x04 /* int: script: interrupt instruct. */
+ #define IID 0x01 /* int: script: illegal instruct. */
+
+/*0d*/ u_char nc_sstat0;
+ #define ILF 0x80 /* sta: data in SIDL register lsb */
+ #define ORF 0x40 /* sta: data in SODR register lsb */
+ #define OLF 0x20 /* sta: data in SODL register lsb */
+ #define AIP 0x10 /* sta: arbitration in progress */
+ #define LOA 0x08 /* sta: arbitration lost */
+ #define WOA 0x04 /* sta: arbitration won */
+ #define IRST 0x02 /* sta: scsi reset signal */
+ #define SDP 0x01 /* sta: scsi parity signal */
+
+/*0e*/ u_char nc_sstat1;
+ #define FF3210 0xf0 /* sta: bytes in the scsi fifo */
+
+/*0f*/ u_char nc_sstat2;
+ #define ILF1 0x80 /* sta: data in SIDL register msb[W]*/
+ #define ORF1 0x40 /* sta: data in SODR register msb[W]*/
+ #define OLF1 0x20 /* sta: data in SODL register msb[W]*/
+ #define DM 0x04 /* sta: DIFFSENS mismatch (895/6 only) */
+ #define LDSC 0x02 /* sta: disconnect & reconnect */
+
+/*10*/ u_int32 nc_dsa; /* --> Base page */
+
+/*14*/ u_char nc_istat; /* --> Main Command and status */
+ #define CABRT 0x80 /* cmd: abort current operation */
+ #define SRST 0x40 /* mod: reset chip */
+ #define SIGP 0x20 /* r/w: message from host to ncr */
+ #define SEM 0x10 /* r/w: message between host + ncr */
+ #define CON 0x08 /* sta: connected to scsi */
+ #define INTF 0x04 /* sta: int on the fly (reset by wr)*/
+ #define SIP 0x02 /* sta: scsi-interrupt */
+ #define DIP 0x01 /* sta: host/script interrupt */
+
+/*15*/ u_char nc_15_;
+/*16*/ u_char nc_16_;
+/*17*/ u_char nc_17_;
+
+/*18*/ u_char nc_ctest0;
+/*19*/ u_char nc_ctest1;
+
+/*1a*/ u_char nc_ctest2;
+ #define CSIGP 0x40
+
+/*1b*/ u_char nc_ctest3;
+ #define FLF 0x08 /* cmd: flush dma fifo */
+ #define CLF 0x04 /* cmd: clear dma fifo */
+ #define FM 0x02 /* mod: fetch pin mode */
+ #define WRIE 0x01 /* mod: write and invalidate enable */
+
+/*1c*/ u_int32 nc_temp; /* ### Temporary stack */
+
+/*20*/ u_char nc_dfifo;
+/*21*/ u_char nc_ctest4;
+ #define BDIS 0x80 /* mod: burst disable */
+ #define MPEE 0x08 /* mod: master parity error enable */
+
+/*22*/ u_char nc_ctest5;
+ #define DFS 0x20 /* mod: dma fifo size */
+/*23*/ u_char nc_ctest6;
+
+/*24*/ u_int32 nc_dbc; /* ### Byte count and command */
+/*28*/ u_int32 nc_dnad; /* ### Next command register */
+/*2c*/ u_int32 nc_dsp; /* --> Script Pointer */
+/*30*/ u_int32 nc_dsps; /* --> Script pointer save/opcode#2 */
+/*34*/ u_int32 nc_scratcha; /* ??? Temporary register a */
+
+/*38*/ u_char nc_dmode;
+ #define BL_2 0x80 /* mod: burst length shift value +2 */
+ #define BL_1 0x40 /* mod: burst length shift value +1 */
+ #define ERL 0x08 /* mod: enable read line */
+ #define ERMP 0x04 /* mod: enable read multiple */
+ #define BOF 0x02 /* mod: burst op code fetch */
+
+/*39*/ u_char nc_dien;
+/*3a*/ u_char nc_dwt;
+
+/*3b*/ u_char nc_dcntl; /* --> Script execution control */
+
+ #define CLSE 0x80 /* mod: cache line size enable */
+ #define PFF 0x40 /* cmd: pre-fetch flush */
+ #define PFEN 0x20 /* mod: pre-fetch enable */
+ #define SSM 0x10 /* mod: single step mode */
+ #define IRQM 0x08 /* mod: irq mode (1 = totem pole !) */
+ #define STD 0x04 /* cmd: start dma mode */
+ #define IRQD 0x02 /* mod: irq disable */
+ #define NOCOM 0x01 /* cmd: protect sfbr while reselect */
+
+/*3c*/ u_int32 nc_adder;
+
+/*40*/ u_short nc_sien; /* -->: interrupt enable */
+/*42*/ u_short nc_sist; /* <--: interrupt status */
+ #define SBMC 0x1000/* sta: SCSI Bus Mode Change (895/6 only) */
+ #define STO 0x0400/* sta: timeout (select) */
+ #define GEN 0x0200/* sta: timeout (general) */
+ #define HTH 0x0100/* sta: timeout (handshake) */
+ #define MA 0x80 /* sta: phase mismatch */
+ #define CMP 0x40 /* sta: arbitration complete */
+ #define SEL 0x20 /* sta: selected by another device */
+ #define RSL 0x10 /* sta: reselected by another device*/
+ #define SGE 0x08 /* sta: gross error (over/underflow)*/
+ #define UDC 0x04 /* sta: unexpected disconnect */
+ #define RST 0x02 /* sta: scsi bus reset detected */
+ #define PAR 0x01 /* sta: scsi parity error */
+
+/*44*/ u_char nc_slpar;
+/*45*/ u_char nc_swide;
+/*46*/ u_char nc_macntl;
+/*47*/ u_char nc_gpcntl;
+/*48*/ u_char nc_stime0; /* cmd: timeout for select&handshake*/
+/*49*/ u_char nc_stime1; /* cmd: timeout user defined */
+/*4a*/ u_short nc_respid; /* sta: Reselect-IDs */
+
+/*4c*/ u_char nc_stest0;
+
+/*4d*/ u_char nc_stest1;
+ #define DBLEN 0x08 /* clock doubler running */
+ #define DBLSEL 0x04 /* clock doubler selected */
+
+
+/*4e*/ u_char nc_stest2;
+ #define ROF 0x40 /* reset scsi offset (after gross error!) */
+ #define EXT 0x02 /* extended filtering */
+
+/*4f*/ u_char nc_stest3;
+ #define TE 0x80 /* c: tolerAnt enable */
+ #define HSC 0x20 /* c: Halt SCSI Clock */
+ #define CSF 0x02 /* c: clear scsi fifo */
+
+/*50*/ u_short nc_sidl; /* Lowlevel: latched from scsi data */
+/*52*/ u_char nc_stest4;
+ #define SMODE 0xc0 /* SCSI bus mode (895/6 only) */
+ #define SMODE_HVD 0x40 /* High Voltage Differential */
+ #define SMODE_SE 0x80 /* Single Ended */
+ #define SMODE_LVD 0xc0 /* Low Voltage Differential */
+ #define LCKFRQ 0x20 /* Frequency Lock (895/6 only) */
+
+/*53*/ u_char nc_53_;
+/*54*/ u_short nc_sodl; /* Lowlevel: data out to scsi data */
+/*56*/ u_short nc_56_;
+/*58*/ u_short nc_sbdl; /* Lowlevel: data from scsi data */
+/*5a*/ u_short nc_5a_;
+/*5c*/ u_char nc_scr0; /* Working register B */
+/*5d*/ u_char nc_scr1; /* */
+/*5e*/ u_char nc_scr2; /* */
+/*5f*/ u_char nc_scr3; /* */
+/*60*/
+};
+
+/*-----------------------------------------------------------
+**
+** Utility macros for the script.
+**
+**-----------------------------------------------------------
+*/
+
+#define REGJ(p,r) (offsetof(struct ncr_reg, p ## r))
+#define REG(r) REGJ (nc_, r)
+
+#ifndef TARGET_MODE
+#define TARGET_MODE 0
+#endif
+
+typedef u_int32 ncrcmd;
+
+/*-----------------------------------------------------------
+**
+** SCSI phases
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_DATA_OUT 0x00000000
+#define SCR_DATA_IN 0x01000000
+#define SCR_COMMAND 0x02000000
+#define SCR_STATUS 0x03000000
+#define SCR_ILG_OUT 0x04000000
+#define SCR_ILG_IN 0x05000000
+#define SCR_MSG_OUT 0x06000000
+#define SCR_MSG_IN 0x07000000
+
+/*-----------------------------------------------------------
+**
+** Data transfer via SCSI.
+**
+**-----------------------------------------------------------
+**
+** MOVE_ABS (LEN)
+** <<start address>>
+**
+** MOVE_IND (LEN)
+** <<dnad_offset>>
+**
+** MOVE_TBL
+** <<dnad_offset>>
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_MOVE_ABS(l) ((0x08000000 ^ (TARGET_MODE << 1ul)) | (l))
+#define SCR_MOVE_IND(l) ((0x28000000 ^ (TARGET_MODE << 1ul)) | (l))
+#define SCR_MOVE_TBL (0x18000000 ^ (TARGET_MODE << 1ul))
+
+struct scr_tblmove {
+ u_int32 size;
+ u_int32 addr;
+};
+
+/*-----------------------------------------------------------
+**
+** Selection
+**
+**-----------------------------------------------------------
+**
+** SEL_ABS | SCR_ID (0..7) [ | REL_JMP]
+** <<alternate_address>>
+**
+** SEL_TBL | << dnad_offset>> [ | REL_JMP]
+** <<alternate_address>>
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_SEL_ABS 0x40000000
+#define SCR_SEL_ABS_ATN 0x41000000
+#define SCR_SEL_TBL 0x42000000
+#define SCR_SEL_TBL_ATN 0x43000000
+
+struct scr_tblsel {
+ u_char sel_0;
+ u_char sel_sxfer;
+ u_char sel_id;
+ u_char sel_scntl3;
+};
+
+#define SCR_JMP_REL 0x04000000
+#define SCR_ID(id) (((u_int32)(id)) << 16)
+
+/*-----------------------------------------------------------
+**
+** Waiting for Disconnect or Reselect
+**
+**-----------------------------------------------------------
+**
+** WAIT_DISC
+** dummy: <<alternate_address>>
+**
+** WAIT_RESEL
+** <<alternate_address>>
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_WAIT_DISC 0x48000000
+#define SCR_WAIT_RESEL 0x50000000
+
+/*-----------------------------------------------------------
+**
+** Bit Set / Reset
+**
+**-----------------------------------------------------------
+**
+** SET (flags {|.. })
+**
+** CLR (flags {|.. })
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_SET(f) (0x58000000 | (f))
+#define SCR_CLR(f) (0x60000000 | (f))
+
+#define SCR_CARRY 0x00000400
+#define SCR_TRG 0x00000200
+#define SCR_ACK 0x00000040
+#define SCR_ATN 0x00000008
+
+
+
+
+/*-----------------------------------------------------------
+**
+** Memory to memory move
+**
+**-----------------------------------------------------------
+**
+** COPY (bytecount)
+** << source_address >>
+** << destination_address >>
+**
+** SCR_COPY sets the NO FLUSH option by default.
+** SCR_COPY_F does not set this option.
+**
+** For chips which do not support this option,
+** ncr_copy_and_bind() will remove this bit.
+**-----------------------------------------------------------
+*/
+
+#define SCR_NO_FLUSH 0x01000000
+
+#define SCR_COPY(n) (0xc0000000 | SCR_NO_FLUSH | (n))
+#define SCR_COPY_F(n) (0xc0000000 | (n))
+
+/*-----------------------------------------------------------
+**
+** Register move and binary operations
+**
+**-----------------------------------------------------------
+**
+** SFBR_REG (reg, op, data) reg = SFBR op data
+** << 0 >>
+**
+** REG_SFBR (reg, op, data) SFBR = reg op data
+** << 0 >>
+**
+** REG_REG (reg, op, data) reg = reg op data
+** << 0 >>
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_REG_OFS(ofs) ((ofs) << 16ul)
+
+#define SCR_SFBR_REG(reg,op,data) \
+ (0x68000000 | (SCR_REG_OFS(REG(reg))) | (op) | ((data)<<8ul))
+
+#define SCR_REG_SFBR(reg,op,data) \
+ (0x70000000 | (SCR_REG_OFS(REG(reg))) | (op) | ((data)<<8ul))
+
+#define SCR_REG_REG(reg,op,data) \
+ (0x78000000 | (SCR_REG_OFS(REG(reg))) | (op) | ((data)<<8ul))
+
+
+#define SCR_LOAD 0x00000000
+#define SCR_SHL 0x01000000
+#define SCR_OR 0x02000000
+#define SCR_XOR 0x03000000
+#define SCR_AND 0x04000000
+#define SCR_SHR 0x05000000
+#define SCR_ADD 0x06000000
+#define SCR_ADDC 0x07000000
+
+/*-----------------------------------------------------------
+**
+** FROM_REG (reg) reg = SFBR
+** << 0 >>
+**
+** TO_REG (reg) SFBR = reg
+** << 0 >>
+**
+** LOAD_REG (reg, data) reg = <data>
+** << 0 >>
+**
+** LOAD_SFBR(data) SFBR = <data>
+** << 0 >>
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_FROM_REG(reg) \
+ SCR_REG_SFBR(reg,SCR_OR,0)
+
+#define SCR_TO_REG(reg) \
+ SCR_SFBR_REG(reg,SCR_OR,0)
+
+#define SCR_LOAD_REG(reg,data) \
+ SCR_REG_REG(reg,SCR_LOAD,data)
+
+#define SCR_LOAD_SFBR(data) \
+ (SCR_REG_SFBR (gpreg, SCR_LOAD, data))
+
+/*-----------------------------------------------------------
+**
+** Waiting for Disconnect or Reselect
+**
+**-----------------------------------------------------------
+**
+** JUMP [ | IFTRUE/IFFALSE ( ... ) ]
+** <<address>>
+**
+** JUMPR [ | IFTRUE/IFFALSE ( ... ) ]
+** <<distance>>
+**
+** CALL [ | IFTRUE/IFFALSE ( ... ) ]
+** <<address>>
+**
+** CALLR [ | IFTRUE/IFFALSE ( ... ) ]
+** <<distance>>
+**
+** RETURN [ | IFTRUE/IFFALSE ( ... ) ]
+** <<dummy>>
+**
+** INT [ | IFTRUE/IFFALSE ( ... ) ]
+** <<ident>>
+**
+** INT_FLY [ | IFTRUE/IFFALSE ( ... ) ]
+** <<ident>>
+**
+** Conditions:
+** WHEN (phase)
+** IF (phase)
+** CARRY
+** DATA (data, mask)
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_NO_OP 0x80000000
+#define SCR_JUMP 0x80080000
+#define SCR_JUMPR 0x80880000
+#define SCR_CALL 0x88080000
+#define SCR_CALLR 0x88880000
+#define SCR_RETURN 0x90080000
+#define SCR_INT 0x98080000
+#define SCR_INT_FLY 0x98180000
+
+#define IFFALSE(arg) (0x00080000 | (arg))
+#define IFTRUE(arg) (0x00000000 | (arg))
+
+#define WHEN(phase) (0x00030000 | (phase))
+#define IF(phase) (0x00020000 | (phase))
+
+#define DATA(D) (0x00040000 | ((D) & 0xff))
+#define MASK(D,M) (0x00040000 | (((M ^ 0xff) & 0xff) << 8ul)|((D) & 0xff))
+
+#define CARRYSET (0x00200000)
+
+/*-----------------------------------------------------------
+**
+** SCSI constants.
+**
+**-----------------------------------------------------------
+*/
+
+/*
+** Messages
+*/
+
+#define M_COMPLETE (0x00)
+#define M_EXTENDED (0x01)
+#define M_SAVE_DP (0x02)
+#define M_RESTORE_DP (0x03)
+#define M_DISCONNECT (0x04)
+#define M_ID_ERROR (0x05)
+#define M_ABORT (0x06)
+#define M_REJECT (0x07)
+#define M_NOOP (0x08)
+#define M_PARITY (0x09)
+#define M_LCOMPLETE (0x0a)
+#define M_FCOMPLETE (0x0b)
+#define M_RESET (0x0c)
+#define M_ABORT_TAG (0x0d)
+#define M_CLEAR_QUEUE (0x0e)
+#define M_INIT_REC (0x0f)
+#define M_REL_REC (0x10)
+#define M_TERMINATE (0x11)
+#define M_SIMPLE_TAG (0x20)
+#define M_HEAD_TAG (0x21)
+#define M_ORDERED_TAG (0x22)
+#define M_IGN_RESIDUE (0x23)
+#define M_IDENTIFY (0x80)
+
+#define M_X_MODIFY_DP (0x00)
+#define M_X_SYNC_REQ (0x01)
+#define M_X_WIDE_REQ (0x03)
+
+/*
+** Status
+*/
+
+#define S_GOOD (0x00)
+#define S_CHECK_COND (0x02)
+#define S_COND_MET (0x04)
+#define S_BUSY (0x08)
+#define S_INT (0x10)
+#define S_INT_COND_MET (0x14)
+#define S_CONFLICT (0x18)
+#define S_TERMINATED (0x20)
+#define S_QUEUE_FULL (0x28)
+#define S_ILLEGAL (0xff)
+#define S_SENSE (0x80)
+
+/*
+ * End of ncrreg from FreeBSD
+ */
+
+#endif /* !defined HOSTS_C */
+
+#endif /* defined NCR53C8XX_H */
diff --git a/linux/src/drivers/scsi/pas16.c b/linux/src/drivers/scsi/pas16.c
new file mode 100644
index 0000000..bd96420
--- /dev/null
+++ b/linux/src/drivers/scsi/pas16.c
@@ -0,0 +1,576 @@
+#define AUTOSENSE
+#define PSEUDO_DMA
+#define FOO
+#define UNSAFE /* Not unsafe for PAS16 -- use it */
+
+/*
+ * This driver adapted from Drew Eckhardt's Trantor T128 driver
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 666-5836
+ *
+ * ( Based on T128 - DISTRIBUTION RELEASE 3. )
+ *
+ * Modified to work with the Pro Audio Spectrum/Studio 16
+ * by John Weidman.
+ *
+ *
+ * For more information, please consult
+ *
+ * Media Vision
+ * (510) 770-8600
+ * (800) 348-7116
+ *
+ * and
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+ *
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * 1+ (719) 578-3400
+ * 1+ (800) 334-5454
+ */
+
+/*
+ * Options :
+ * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
+ * for commands that return with a CHECK CONDITION status.
+ *
+ * LIMIT_TRANSFERSIZE - if defined, limit the pseudo-dma transfers to 512
+ * bytes at a time. Since interrupts are disabled by default during
+ * these transfers, we might need this to give reasonable interrupt
+ * service time if the transfer size gets too large.
+ *
+ * PSEUDO_DMA - enables PSEUDO-DMA hardware, should give a 3-4X performance
+ * increase compared to polled I/O.
+ *
+ * PARITY - enable parity checking. Not supported.
+ *
+ * SCSI2 - enable support for SCSI-II tagged queueing. Untested.
+ *
+ * UNSAFE - leave interrupts enabled during pseudo-DMA transfers. This
+ * parameter comes from the NCR5380 code. It is NOT unsafe with
+ * the PAS16 and you should use it. If you don't you will have
+ * a problem with dropped characters during high speed
+ * communications during SCSI transfers. If you really don't
+ * want to use UNSAFE you can try defining LIMIT_TRANSFERSIZE or
+ * twiddle with the transfer size in the high level code.
+ *
+ * USLEEP - enable support for devices that don't disconnect. Untested.
+ *
+ * The card is detected and initialized in one of several ways :
+ * 1. Autoprobe (default) - There are many different models of
+ * the Pro Audio Spectrum/Studio 16, and I only have one of
+ * them, so this may require a little tweaking. An interrupt
+ * is triggered to autoprobe for the interrupt line. Note:
+ * with the newer model boards, the interrupt is set via
+ * software after reset using the default_irq for the
+ * current board number.
+ *
+ *
+ * 2. With command line overrides - pas16=port,irq may be
+ * used on the LILO command line to override the defaults.
+ *
+ * 3. With the PAS16_OVERRIDE compile time define. This is
+ * specified as an array of address, irq tuples. Ie, for
+ * one board at the default 0x388 address, IRQ10, I could say
+ * -DPAS16_OVERRIDE={{0x388, 10}}
+ * NOTE: Untested.
+ *
+ * Note that if the override methods are used, place holders must
+ * be specified for other boards in the system.
+ *
+ *
+ * Configuration notes :
+ * The current driver does not support interrupt sharing with the
+ * sound portion of the card. If you use the same irq for the
+ * scsi port and sound you will have problems. Either use
+ * a different irq for the scsi port or don't use interrupts
+ * for the scsi port.
+ *
+ * If you have problems with your card not being recognized, use
+ * the LILO command line override. Try to get it recognized without
+ * interrupts. Ie, for a board at the default 0x388 base port,
+ * boot: linux pas16=0x388,255
+ *
+ * (255 is the IRQ_NONE constant in NCR5380.h)
+ */
+
+#ifdef MODULE
+#include <linux/module.h>
+#endif
+
+#include <asm/system.h>
+#include <linux/signal.h>
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <asm/io.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "pas16.h"
+#define AUTOPROBE_IRQ
+#include "NCR5380.h"
+#include "constants.h"
+#include "sd.h"
+
+#include<linux/stat.h>
+
+struct proc_dir_entry proc_scsi_pas16 = {
+ PROC_SCSI_PAS16, 5, "pas16",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+static int pas_maxi = 0;
+static int pas_wmaxi = 0;
+
+
+int scsi_irq_translate[] =
+ { 0, 0, 1, 2, 3, 4, 5, 6, 0, 0, 7, 8, 9, 0, 10, 11 };
+
+/* The default_irqs array contains values used to set the irq into the
+ * board via software (as must be done on newer model boards without
+ * irq jumpers on the board). The first value in the array will be
+ * assigned to logical board 0, the next to board 1, etc.
+ */
+int default_irqs[] = { PAS16_DEFAULT_BOARD_1_IRQ,
+ PAS16_DEFAULT_BOARD_2_IRQ,
+ PAS16_DEFAULT_BOARD_3_IRQ,
+ PAS16_DEFAULT_BOARD_4_IRQ
+ };
+
+static struct override {
+ unsigned short io_port;
+ int irq;
+} overrides
+#ifdef PAS16_OVERRIDE
+ [] = PAS16_OVERRIDE;
+#else
+ [4] = {{0,IRQ_AUTO}, {0,IRQ_AUTO}, {0,IRQ_AUTO},
+ {0,IRQ_AUTO}};
+#endif
+
+#define NO_OVERRIDES (sizeof(overrides) / sizeof(struct override))
+
+static struct base {
+ unsigned short io_port;
+ int noauto;
+} bases[] = { {PAS16_DEFAULT_BASE_1, 0},
+ {PAS16_DEFAULT_BASE_2, 0},
+ {PAS16_DEFAULT_BASE_3, 0},
+ {PAS16_DEFAULT_BASE_4, 0}
+ };
+
+#define NO_BASES (sizeof (bases) / sizeof (struct base))
+
+unsigned short pas16_offset[ 8 ] =
+ {
+ 0x1c00, /* OUTPUT_DATA_REG */
+ 0x1c01, /* INITIATOR_COMMAND_REG */
+ 0x1c02, /* MODE_REG */
+ 0x1c03, /* TARGET_COMMAND_REG */
+ 0x3c00, /* STATUS_REG ro, SELECT_ENABLE_REG wo */
+ 0x3c01, /* BUS_AND_STATUS_REG ro, START_DMA_SEND_REG wo */
+ 0x3c02, /* INPUT_DATA_REGISTER ro, (N/A on PAS16 ?)
+ * START_DMA_TARGET_RECEIVE_REG wo
+ */
+ 0x3c03, /* RESET_PARITY_INTERRUPT_REG ro,
+ * START_DMA_INITIATOR_RECEIVE_REG wo
+ */
+ };
+/*----------------------------------------------------------------*/
+/* the following will set the monitor border color (useful to find
+ where something crashed or gets stuck at */
+/* 1 = blue
+ 2 = green
+ 3 = cyan
+ 4 = red
+ 5 = magenta
+ 6 = yellow
+ 7 = white
+*/
+#if 1
+#define rtrc(i) {inb(0x3da); outb(0x31, 0x3c0); outb((i), 0x3c0);}
+#else
+#define rtrc(i) {}
+#endif
+
+
+/*
+ * Function : enable_board( int board_num, unsigned short port )
+ *
+ * Purpose : set address in new model board
+ *
+ * Inputs : board_num - logical board number 0-3, port - base address
+ *
+ */
+
+void enable_board( int board_num, unsigned short port )
+{
+ outb( 0xbc + board_num, MASTER_ADDRESS_PTR );
+ outb( port >> 2, MASTER_ADDRESS_PTR );
+}
+
+
+
+/*
+ * Function : init_board( unsigned short port, int irq )
+ *
+ * Purpose : Set the board up to handle the SCSI interface
+ *
+ * Inputs : port - base address of the board,
+ * irq - irq to assign to the SCSI port
+ * force_irq - set it even if it conflicts with sound driver
+ *
+ */
+
+void init_board( unsigned short io_port, int irq, int force_irq )
+{
+ unsigned int tmp;
+ unsigned int pas_irq_code;
+
+ /* Initialize the SCSI part of the board */
+
+ outb( 0x30, io_port + P_TIMEOUT_COUNTER_REG ); /* Timeout counter */
+ outb( 0x01, io_port + P_TIMEOUT_STATUS_REG_OFFSET ); /* Reset TC */
+ outb( 0x01, io_port + WAIT_STATE ); /* 1 Wait state */
+
+ NCR5380_read( RESET_PARITY_INTERRUPT_REG );
+
+ /* Set the SCSI interrupt pointer without mucking up the sound
+ * interrupt pointer in the same byte.
+ */
+ pas_irq_code = ( irq < 16 ) ? scsi_irq_translate[irq] : 0;
+ tmp = inb( io_port + IO_CONFIG_3 );
+
+ if( (( tmp & 0x0f ) == pas_irq_code) && pas_irq_code > 0
+ && !force_irq )
+ {
+ printk( "pas16: WARNING: Can't use same irq as sound "
+ "driver -- interrupts disabled\n" );
+ /* Set up the drive parameters, disable 5380 interrupts */
+ outb( 0x4d, io_port + SYS_CONFIG_4 );
+ }
+ else
+ {
+ tmp = ( tmp & 0x0f ) | ( pas_irq_code << 4 );
+ outb( tmp, io_port + IO_CONFIG_3 );
+
+ /* Set up the drive parameters and enable 5380 interrupts */
+ outb( 0x6d, io_port + SYS_CONFIG_4 );
+ }
+}
+
+
+/*
+ * Function : pas16_hw_detect( unsigned short board_num )
+ *
+ * Purpose : determine if a pas16 board is present
+ *
+ * Inputs : board_num - logical board number ( 0 - 3 )
+ *
+ * Returns : 0 if board not found, 1 if found.
+ */
+
+int pas16_hw_detect( unsigned short board_num )
+{
+ unsigned char board_rev, tmp;
+ unsigned short io_port = bases[ board_num ].io_port;
+
+ /* See if we can find a PAS16 board at the address associated
+ * with this logical board number.
+ */
+
+ /* First, attempt to take a newer model board out of reset and
+ * give it a base address. This shouldn't affect older boards.
+ */
+ enable_board( board_num, io_port );
+
+ /* Now see if it looks like a PAS16 board */
+ board_rev = inb( io_port + PCB_CONFIG );
+
+ if( board_rev == 0xff )
+ return 0;
+
+ tmp = board_rev ^ 0xe0;
+
+ outb( tmp, io_port + PCB_CONFIG );
+ tmp = inb( io_port + PCB_CONFIG );
+ outb( board_rev, io_port + PCB_CONFIG );
+
+ if( board_rev != tmp ) /* Not a PAS-16 */
+ return 0;
+
+ if( ( inb( io_port + OPERATION_MODE_1 ) & 0x03 ) != 0x03 )
+ return 0; /* return if no SCSI interface found */
+
+ /* Mediavision has some new model boards that return ID bits
+ * that indicate a SCSI interface, but they're not (LMS). We'll
+ * put in an additional test to try to weed them out.
+ */
+
+ outb( 0x01, io_port + WAIT_STATE ); /* 1 Wait state */
+ NCR5380_write( MODE_REG, 0x20 ); /* Is it really SCSI? */
+ if( NCR5380_read( MODE_REG ) != 0x20 ) /* Write to a reg. */
+ return 0; /* and try to read */
+ NCR5380_write( MODE_REG, 0x00 ); /* it back. */
+ if( NCR5380_read( MODE_REG ) != 0x00 )
+ return 0;
+
+ return 1;
+}
+
+
+/*
+ * Function : pas16_setup(char *str, int *ints)
+ *
+ * Purpose : LILO command line initialization of the overrides array,
+ *
+ * Inputs : str - unused, ints - array of integer parameters with ints[0]
+ * equal to the number of ints.
+ *
+ */
+
+void pas16_setup(char *str, int *ints) {
+ static int commandline_current = 0;
+ int i;
+ if (ints[0] != 2)
+ printk("pas16_setup : usage pas16=io_port,irq\n");
+ else
+ if (commandline_current < NO_OVERRIDES) {
+ overrides[commandline_current].io_port = (unsigned short) ints[1];
+ overrides[commandline_current].irq = ints[2];
+ for (i = 0; i < NO_BASES; ++i)
+ if (bases[i].io_port == (unsigned short) ints[1]) {
+ bases[i].noauto = 1;
+ break;
+ }
+ ++commandline_current;
+ }
+}
+
+/*
+ * Function : int pas16_detect(Scsi_Host_Template * tpnt)
+ *
+ * Purpose : detects and initializes PAS16 controllers
+ * that were autoprobed, overridden on the LILO command line,
+ * or specified at compile time.
+ *
+ * Inputs : tpnt - template for this SCSI adapter.
+ *
+ * Returns : 1 if a host adapter was found, 0 if not.
+ *
+ */
+
+int pas16_detect(Scsi_Host_Template * tpnt) {
+ static int current_override = 0;
+ static unsigned short current_base = 0;
+ struct Scsi_Host *instance;
+ unsigned short io_port;
+ int count;
+
+ tpnt->proc_dir = &proc_scsi_pas16;
+ tpnt->proc_info = &pas16_proc_info;
+
+ for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
+ io_port = 0;
+
+ if (overrides[current_override].io_port)
+ {
+ io_port = overrides[current_override].io_port;
+ enable_board( current_override, io_port );
+ init_board( io_port, overrides[current_override].irq, 1 );
+ }
+ else
+ for (; !io_port && (current_base < NO_BASES); ++current_base) {
+#if (PDEBUG & PDEBUG_INIT)
+ printk("scsi-pas16 : probing io_port %04x\n", (unsigned int) bases[current_base].io_port);
+#endif
+ if ( !bases[current_base].noauto &&
+ pas16_hw_detect( current_base ) ){
+ io_port = bases[current_base].io_port;
+ init_board( io_port, default_irqs[ current_base ], 0 );
+#if (PDEBUG & PDEBUG_INIT)
+ printk("scsi-pas16 : detected board.\n");
+#endif
+ }
+ }
+
+
+#if defined(PDEBUG) && (PDEBUG & PDEBUG_INIT)
+ printk("scsi-pas16 : io_port = %04x\n", (unsigned int) io_port);
+#endif
+
+ if (!io_port)
+ break;
+
+ instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
+ instance->io_port = io_port;
+
+ NCR5380_init(instance, 0);
+
+ if (overrides[current_override].irq != IRQ_AUTO)
+ instance->irq = overrides[current_override].irq;
+ else
+ instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS);
+
+ if (instance->irq != IRQ_NONE)
+ if (request_irq(instance->irq, pas16_intr, SA_INTERRUPT, "pas16", NULL)) {
+ printk("scsi%d : IRQ%d not free, interrupts disabled\n",
+ instance->host_no, instance->irq);
+ instance->irq = IRQ_NONE;
+ }
+
+ if (instance->irq == IRQ_NONE) {
+ printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
+ printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
+ /* Disable 5380 interrupts, leave drive params the same */
+ outb( 0x4d, io_port + SYS_CONFIG_4 );
+ outb( (inb(io_port + IO_CONFIG_3) & 0x0f), io_port + IO_CONFIG_3 );
+ }
+
+#if defined(PDEBUG) && (PDEBUG & PDEBUG_INIT)
+ printk("scsi%d : irq = %d\n", instance->host_no, instance->irq);
+#endif
+
+ printk("scsi%d : at 0x%04x", instance->host_no, (int)
+ instance->io_port);
+ if (instance->irq == IRQ_NONE)
+ printk (" interrupts disabled");
+ else
+ printk (" irq %d", instance->irq);
+ printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
+ CAN_QUEUE, CMD_PER_LUN, PAS16_PUBLIC_RELEASE);
+ NCR5380_print_options(instance);
+ printk("\n");
+
+ ++current_override;
+ ++count;
+ }
+ return count;
+}
+
+/*
+ * Function : int pas16_biosparam(Disk *disk, kdev_t dev, int *ip)
+ *
+ * Purpose : Generates a BIOS / DOS compatible H-C-S mapping for
+ * the specified device / size.
+ *
+ * Inputs : size = size of device in sectors (512 bytes), dev = block device
+ * major / minor, ip[] = {heads, sectors, cylinders}
+ *
+ * Returns : always 0 (success), initializes ip
+ *
+ */
+
+/*
+ * XXX Most SCSI boards use this mapping, I could be incorrect. Some one
+ * using hard disks on a trantor should verify that this mapping corresponds
+ * to that used by the BIOS / ASPI driver by running the linux fdisk program
+ * and matching the H_C_S coordinates to what DOS uses.
+ */
+
+int pas16_biosparam(Disk * disk, kdev_t dev, int * ip)
+{
+ int size = disk->capacity;
+ ip[0] = 64;
+ ip[1] = 32;
+ ip[2] = size >> 11; /* I think I have it as /(32*64) */
+ if( ip[2] > 1024 ) { /* yes, >, not >= */
+ ip[0]=255;
+ ip[1]=63;
+ ip[2]=size/(63*255);
+ if( ip[2] > 1023 ) /* yes >1023... */
+ ip[2] = 1023;
+ }
+
+ return 0;
+}
+
+/*
+ * Function : int NCR5380_pread (struct Scsi_Host *instance,
+ * unsigned char *dst, int len)
+ *
+ * Purpose : Fast 5380 pseudo-dma read function, transfers len bytes to
+ * dst
+ *
+ * Inputs : dst = destination, len = length in bytes
+ *
+ * Returns : 0 on success, non zero on a failure such as a watchdog
+ * timeout.
+ */
+
+static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst,
+ int len) {
+ register unsigned char *d = dst;
+ register unsigned short reg = (unsigned short) (instance->io_port +
+ P_DATA_REG_OFFSET);
+ register int i = len;
+ int ii = 0;
+
+ while ( !(inb(instance->io_port + P_STATUS_REG_OFFSET) & P_ST_RDY) )
+ ++ii;
+
+ insb( reg, d, i );
+
+ if ( inb(instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET) & P_TS_TIM) {
+ outb( P_TS_CT, instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET);
+ printk("scsi%d : watchdog timer fired in NCR5380_pread()\n",
+ instance->host_no);
+ return -1;
+ }
+ if (ii > pas_maxi)
+ pas_maxi = ii;
+ return 0;
+}
+
+/*
+ * Function : int NCR5380_pwrite (struct Scsi_Host *instance,
+ * unsigned char *src, int len)
+ *
+ * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from
+ * src
+ *
+ * Inputs : src = source, len = length in bytes
+ *
+ * Returns : 0 on success, non zero on a failure such as a watchdog
+ * timeout.
+ */
+
+static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src,
+ int len) {
+ register unsigned char *s = src;
+ register unsigned short reg = (instance->io_port + P_DATA_REG_OFFSET);
+ register int i = len;
+ int ii = 0;
+
+ while ( !((inb(instance->io_port + P_STATUS_REG_OFFSET)) & P_ST_RDY) )
+ ++ii;
+
+ outsb( reg, s, i );
+
+ if (inb(instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET) & P_TS_TIM) {
+ outb( P_TS_CT, instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET);
+ printk("scsi%d : watchdog timer fired in NCR5380_pwrite()\n",
+ instance->host_no);
+ return -1;
+ }
+ if (ii > pas_maxi)
+ pas_wmaxi = ii;
+ return 0;
+}
+
+#include "NCR5380.c"
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = MV_PAS16;
+
+#include <linux/module.h>
+#include "scsi_module.c"
+#endif
diff --git a/linux/src/drivers/scsi/pas16.h b/linux/src/drivers/scsi/pas16.h
new file mode 100644
index 0000000..a1bda1f
--- /dev/null
+++ b/linux/src/drivers/scsi/pas16.h
@@ -0,0 +1,196 @@
+/*
+ * This driver adapted from Drew Eckhardt's Trantor T128 driver
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 666-5836
+ *
+ * ( Based on T128 - DISTRIBUTION RELEASE 3. )
+ *
+ * Modified to work with the Pro Audio Spectrum/Studio 16
+ * by John Weidman.
+ *
+ *
+ * For more information, please consult
+ *
+ * Media Vision
+ * (510) 770-8600
+ * (800) 348-7116
+ *
+ * and
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+ *
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * 1+ (719) 578-3400
+ * 1+ (800) 334-5454
+ */
+
+
+#ifndef PAS16_H
+#define PAS16_H
+
+#define PAS16_PUBLIC_RELEASE 3
+
+#define PDEBUG_INIT 0x1
+#define PDEBUG_TRANSFER 0x2
+
+#define PAS16_DEFAULT_BASE_1 0x388
+#define PAS16_DEFAULT_BASE_2 0x384
+#define PAS16_DEFAULT_BASE_3 0x38c
+#define PAS16_DEFAULT_BASE_4 0x288
+
+#define PAS16_DEFAULT_BOARD_1_IRQ 10
+#define PAS16_DEFAULT_BOARD_2_IRQ 12
+#define PAS16_DEFAULT_BOARD_3_IRQ 14
+#define PAS16_DEFAULT_BOARD_4_IRQ 15
+
+
+/*
+ * The Pro Audio Spectrum boards are I/O mapped. They use a Zilog 5380
+ * SCSI controller, which is the equivalent of NCR's 5380. "Pseudo-DMA"
+ * architecture is used, where a PAL drives the DMA signals on the 5380
+ * allowing fast, blind transfers with proper handshaking.
+ */
+
+
+/* The Time-out Counter register is used to safe-guard against a stuck
+ * bus (in the case of RDY driven handshake) or a stuck byte (if 16-Bit
+ * DMA conversion is used). The counter uses a 28.224MHz clock
+ * divided by 14 as its clock source. In the case of a stuck byte in
+ * the holding register, an interrupt is generated (and mixed with the
+ * one with the drive) using the CD-ROM interrupt pointer.
+ */
+
+#define P_TIMEOUT_COUNTER_REG 0x4000
+#define P_TC_DISABLE 0x80 /* Set to 0 to enable timeout int. */
+ /* Bits D6-D0 contain timeout count */
+
+
+#define P_TIMEOUT_STATUS_REG_OFFSET 0x4001
+#define P_TS_TIM 0x80 /* check timeout status */
+ /* Bits D6-D4 N/U */
+#define P_TS_ARM_DRQ_INT 0x08 /* Arm DRQ Int. When set high,
+ * the next rising edge will
+ * cause a CD-ROM interrupt.
+ * When set low, the interrupt
+ * will be cleared. There is
+ * no status available for
+ * this interrupt.
+ */
+#define P_TS_ENABLE_TO_ERR_INTERRUPT /* Enable timeout error int. */
+#define P_TS_ENABLE_WAIT /* Enable Wait */
+
+#define P_TS_CT 0x01 /* clear timeout. Note: writing
+ * to this register clears the
+ * timeout error int. or status
+ */
+
+
+/*
+ * The data register reads/writes to/from the 5380 in pseudo-DMA mode
+ */
+
+#define P_DATA_REG_OFFSET 0x5c00 /* rw */
+
+#define P_STATUS_REG_OFFSET 0x5c01 /* ro */
+#define P_ST_RDY 0x80 /* 5380 DDRQ Status */
+
+#define P_IRQ_STATUS 0x5c03
+#define P_IS_IRQ 0x80 /* DIRQ status */
+
+#define PCB_CONFIG 0x803
+#define MASTER_ADDRESS_PTR 0x9a01 /* Fixed position - no relo */
+#define SYS_CONFIG_4 0x8003
+#define WAIT_STATE 0xbc00
+#define OPERATION_MODE_1 0xec03
+#define IO_CONFIG_3 0xf002
+
+
+#ifndef ASM
+int pas16_abort(Scsi_Cmnd *);
+int pas16_biosparam(Disk *, kdev_t, int*);
+int pas16_detect(Scsi_Host_Template *);
+int pas16_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int pas16_reset(Scsi_Cmnd *, unsigned int);
+int pas16_proc_info (char *buffer ,char **start, off_t offset,
+ int length, int hostno, int inout);
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#ifndef CMD_PER_LUN
+#define CMD_PER_LUN 2
+#endif
+
+#ifndef CAN_QUEUE
+#define CAN_QUEUE 32
+#endif
+
+/*
+ * I hadn't thought of this with the earlier drivers - but to prevent
+ * macro definition conflicts, we shouldn't define all of the internal
+ * macros when this is being used solely for the host stub.
+ */
+
+#if defined(HOSTS_C) || defined(MODULE)
+
+#define MV_PAS16 {NULL, NULL, NULL, NULL, \
+ "Pro Audio Spectrum-16 SCSI", \
+ pas16_detect, NULL, NULL, \
+ NULL, pas16_queue_command, pas16_abort, pas16_reset, NULL, \
+ pas16_biosparam, \
+ /* can queue */ CAN_QUEUE, /* id */ 7, SG_ALL, \
+ /* cmd per lun */ CMD_PER_LUN , 0, 0, DISABLE_CLUSTERING}
+
+#endif
+#ifndef HOSTS_C
+
+#define NCR5380_implementation_fields \
+ volatile unsigned short io_port
+
+#define NCR5380_local_declare() \
+ volatile unsigned short io_port
+
+#define NCR5380_setup(instance) \
+ io_port = (instance)->io_port
+
+#define PAS16_io_port(reg) ( io_port + pas16_offset[(reg)] )
+
+#if !(PDEBUG & PDEBUG_TRANSFER)
+#define NCR5380_read(reg) ( inb(PAS16_io_port(reg)) )
+#define NCR5380_write(reg, value) ( outb((value),PAS16_io_port(reg)) )
+#else
+#define NCR5380_read(reg) \
+ (((unsigned char) printk("scsi%d : read register %d at io_port %04x\n"\
+ , instance->hostno, (reg), PAS16_io_port(reg))), inb( PAS16_io_port(reg)) )
+
+#define NCR5380_write(reg, value) \
+ (printk("scsi%d : write %02x to register %d at io_port %04x\n", \
+ instance->hostno, (value), (reg), PAS16_io_port(reg)), \
+ outb( (value),PAS16_io_port(reg) ) )
+
+#endif
+
+
+#define NCR5380_intr pas16_intr
+#define NCR5380_queue_command pas16_queue_command
+#define NCR5380_abort pas16_abort
+#define NCR5380_reset pas16_reset
+#define NCR5380_proc_info pas16_proc_info
+
+/* 15 14 12 10 7 5 3
+ 1101 0100 1010 1000 */
+
+#define PAS16_IRQS 0xd4a8
+
+#endif /* else def HOSTS_C */
+#endif /* ndef ASM */
+#endif /* PAS16_H */
diff --git a/linux/src/drivers/scsi/ppa.c b/linux/src/drivers/scsi/ppa.c
new file mode 100644
index 0000000..fd224f9
--- /dev/null
+++ b/linux/src/drivers/scsi/ppa.c
@@ -0,0 +1,1464 @@
+/* ppa.c -- low level driver for the IOMEGA PPA3
+ * parallel port SCSI host adapter.
+ *
+ * (The PPA3 is the embedded controller in the ZIP drive.)
+ *
+ * (c) 1995,1996 Grant R. Guenther, grant@torque.net,
+ * under the terms of the GNU Public License.
+ *
+ * Current Maintainer: David Campbell (Perth, Western Australia)
+ * campbell@gear.torque.net
+ * dcampbel@p01.as17.honeywell.com.au
+ *
+ * My unoffical company acronym list is 21 pages long:
+ * FLA: Four letter acronym with built in facility for
+ * future expansion to five letters.
+ */
+
+#include <linux/config.h>
+
+/* The following #define is to avoid a clash with hosts.c */
+#define PPA_CODE 1
+#ifndef HAVE_PC87332
+#define HAVE_PC87332 0
+#endif
+#define PPA_PROBE_SPP 0x0001
+#define PPA_PROBE_PS2 0x0002
+#define PPA_PROBE_ECR 0x0010
+#define PPA_PROBE_EPP17 0x0100
+#define PPA_PROBE_EPP19 0x0200
+int port_probe(unsigned short);
+
+#include <linux/blk.h>
+#include "sd.h"
+#include "hosts.h"
+typedef struct {
+ int base; /* Actual port address */
+ int mode; /* Transfer mode */
+ int host; /* Host number (for proc) */
+ Scsi_Cmnd *cur_cmd; /* Current queued command */
+ struct tq_struct ppa_tq; /* Polling interupt stuff */
+ unsigned long jstart; /* Jiffies at start */
+ unsigned failed:1; /* Failure flag */
+} ppa_struct;
+
+#define PPA_EMPTY \
+{-1, /* base */ \
+PPA_AUTODETECT, /* mode */ \
+-1, /* host */ \
+NULL, /* cur_cmd */ \
+{0, 0, ppa_interrupt, NULL}, \
+0, /* jstart */ \
+0 /* failed */ \
+}
+
+#include "ppa.h"
+#undef CONFIG_PARPORT
+#define NO_HOSTS 4
+static ppa_struct ppa_hosts[NO_HOSTS] =
+{PPA_EMPTY, PPA_EMPTY, PPA_EMPTY, PPA_EMPTY};
+
+#define PPA_BASE(x) ppa_hosts[(x)].base
+
+int base[NO_HOSTS] =
+{0x03bc, 0x0378, 0x0278, 0x0000};
+#define parbus_base base
+#define parbus_no NO_HOSTS
+
+static inline int ppa_pb_claim(int host_no)
+{
+ if (ppa_hosts[host_no].cur_cmd)
+ ppa_hosts[host_no].cur_cmd->SCp.phase++;
+ return 0;
+}
+
+/***************************************************************************
+ * Parallel port probing routines *
+ ***************************************************************************/
+
+#ifndef MODULE
+/*
+ * Command line parameters (for built-in driver):
+ *
+ * Syntax: ppa=base[,mode[,use_sg]]
+ *
+ * For example: ppa=0x378 or ppa=0x378,0,3
+ *
+ */
+
+void ppa_setup(char *str, int *ints)
+{
+ static int x = 0;
+
+ if (x == 0) { /* Disable ALL known ports */
+ int i;
+
+ for (i = 0; i < NO_HOSTS; i++)
+ parbus_base[i] = 0x0000;
+ }
+ switch (ints[0]) {
+ case 3:
+ ppa_sg = ints[3];
+ case 2:
+ ppa_hosts[x].mode = ints[2];
+ parbus_base[x] = ints[1];
+ break;
+ default:
+ printk("PPA: I only use between 2 to 3 parameters.\n");
+ break;
+ }
+ x++;
+ }
+#else
+Scsi_Host_Template driver_template = PPA;
+#include "scsi_module.c"
+#endif
+
+/*
+ * Start of Chipset kludges
+ */
+
+#if HAVE_PC87332 > 0
+#warning PC87332 Kludge code included
+static inline int pc87332_port(int host_no)
+{
+ /* A routine to detect and kludge pc87332 chipsets into the
+ * "optimum" mode for parallel port data transfer.
+ * This assumes EPP is better than ECP...
+ * (Which it is for disk drives but not printers and scanners)
+ */
+ int base = ppa_hosts[host_no].base;
+
+ /* This is where an pc87332 can hide */
+ unsigned short index_addr[4] =
+ {
+ 0x0398, 0x026e, 0x015c, 0x002e
+ };
+
+ /* Bits 0&1 of FAR (Function Address Register) which specify where
+ * the LPT port will show up at.
+ */
+ unsigned short port_ref[4] =
+ {
+ 0x378, 0x3bc, 0x278, 0xffff
+ };
+
+ unsigned char a;
+ int loop;
+
+ for (loop = 0; loop < 4; loop++) {
+ /* Clear the "wax" out of the pc87332, only needed after hard
+ * reset.
+ */
+ inb(index_addr[loop]);
+ inb(index_addr[loop]);
+ inb(index_addr[loop]);
+ inb(index_addr[loop]);
+
+ /* Anyone home ?? */
+ outb(0xff, index_addr[loop]);
+ a = inb(index_addr[loop]);
+ switch (a) {
+ case (0x0f): /* PC87732 */
+ break;
+ case (0x1f): /* PC87306 */
+ break;
+ case (0x7f): /* PC87??? */
+ break;
+ default:
+ continue;
+ } /* Is this pc87332 on the desired port */
+ outb(0x01, index_addr[loop]);
+ a = inb(index_addr[loop] + 1);
+ if (port_ref[a & 0x03] != base)
+ continue;
+
+ /* Found a pc87332 */
+ printk("NatSemi PC87332 (or variant) at 0x%04x\n", base);
+
+ /* Try to enable EPP modes
+ * with hardware data direction
+ */
+ if (base != 0x3bc) {
+ /* EPP 1.9 */
+ outb(0x04, index_addr[loop]);
+ a = inb(index_addr[loop] + 1);
+ printk("Old reg1 = %02x\n", a);
+ /* 0x01 for EPP 1.7, 0x03 for EPP 1.9, 0x0c for ECP */
+ a = (a & 0xf0) | 0x03;
+ outb(a, index_addr[loop] + 1);
+ outb(a, index_addr[loop] + 1);
+
+ /* Software data direction selection */
+ outb(0x02, index_addr[loop]);
+ a = inb(index_addr[loop] + 1);
+ printk("Old reg2 = %02x\n", a);
+ /* 0x80 for software, 0x00 for hardware */
+ a = (a & 0x7f) | 0x80;
+ outb(a, index_addr[loop] + 1);
+ outb(a, index_addr[loop] + 1);
+ ppa_hosts[host_no].mode = PPA_EPP_32;
+ } else {
+ /* There is not enough address space for the 0x3bc port
+ * to have EPP registers so we will kludge it into an
+ * ECP
+ * port to allow bi-directional byte mode...
+ */
+ /* ECP */
+ outb(0x04, index_addr[loop]);
+ a = inb(index_addr[loop] + 1);
+ a = (a & 0xfb) | 0x06;
+ outb(a, index_addr[loop] + 1);
+ outb(a, index_addr[loop] + 1);
+ ppa_hosts[host_no].mode = PPA_PS2;
+ }
+
+ outb(0x04, index_addr[loop]);
+ a = inb(index_addr[loop] + 1);
+ return ppa_hosts[host_no].mode;
+ }
+ return 0;
+ }
+#else
+#define pc87332_port(x)
+#endif /* HAVE_PC87332 */
+
+static inline int generic_port(int host_no)
+{
+ /* Generic parallel port detection
+ * This will try to discover if the port is
+ * EPP, ECP, PS/2 or NIBBLE (In that order, approx....)
+ */
+ unsigned int save_ctr, save_ecr, r;
+ int ppb = PPA_BASE(host_no);
+
+ save_ctr = r_ctr(ppb);
+ save_ecr = r_ecr(ppb);
+ r = port_probe(ppb);
+ w_ecr(ppb, save_ecr);
+ w_ctr(ppb, save_ctr);
+
+ if (r & PPA_PROBE_SPP)
+ ppa_hosts[host_no].mode = PPA_NIBBLE;
+
+ if (r & PPA_PROBE_PS2) {
+ ppa_hosts[host_no].mode = PPA_PS2;
+ if (r & PPA_PROBE_ECR)
+ w_ecr(ppb, 0x20);
+ }
+ if ((r & PPA_PROBE_EPP17) || (r & PPA_PROBE_EPP19)) {
+ /* ppa_hosts[host_no].mode = PPA_EPP_32; */
+ if (r & PPA_PROBE_ECR)
+ w_ecr(ppb, 0x80);
+ }
+ return ppa_hosts[host_no].mode;
+}
+
+int ppa_detect(Scsi_Host_Template * host)
+{
+ struct Scsi_Host *hreg;
+ int ports;
+ int i, nhosts;
+
+ printk("ppa: Version %s\n", PPA_VERSION);
+ nhosts = 0;
+
+ for (i = 0; i < parbus_no; i++) {
+ if (parbus_base[i] == 0x0000)
+ continue;
+ ppa_hosts[i].base = parbus_base[i];
+
+ /* sanity checks */
+ if (check_region(parbus_base[i],
+ (parbus_base[i] == 0x03bc) ? 3 : 8))
+ continue;
+
+ pc87332_port(i);
+ if (!generic_port(i))
+ continue;
+
+ if (ppa_init(i))
+ continue;
+
+ /* now the glue ... */
+ switch (ppa_hosts[i].mode) {
+ case PPA_NIBBLE:
+ case PPA_PS2:
+ ports = 3;
+ break;
+ case PPA_EPP_8:
+ case PPA_EPP_16:
+ case PPA_EPP_32:
+ ports = 8;
+ break;
+ default: /* Never gets here */
+ continue;
+ }
+ request_region(ppa_hosts[i].base, ports, "ppa");
+ host->can_queue = PPA_CAN_QUEUE;
+ host->sg_tablesize = ppa_sg;
+ hreg = scsi_register(host, 0);
+ hreg->io_port = ppa_hosts[i].base;
+ hreg->n_io_port = ports;
+ hreg->dma_channel = -1;
+ hreg->unique_id = i;
+ ppa_hosts[i].host = hreg->host_no;
+ nhosts++;
+ }
+ if (nhosts == 0)
+ return 0;
+ else
+ return 1; /* return number of hosts detected */
+}
+
+/* This is to give the ppa driver a way to modify the timings (and other
+ * parameters) by writing to the /proc/scsi/ppa/0 file.
+ * Very simple method really... (To simple, no error checking :( )
+ * Reason: Kernel hackers HATE having to unload and reload modules for
+ * testing...
+ * Also gives a method to use a script to obtain optimum timings (TODO)
+ */
+
+static inline int ppa_strncmp(const char *a, const char *b, int len)
+{
+ int loop;
+ for (loop = 0; loop < len; loop++)
+ if (a[loop] != b[loop])
+ return 1;
+
+ return 0;
+}
+static inline int ppa_proc_write(int hostno, char *buffer, int length)
+{
+ unsigned long x;
+
+ if ((length > 5) && (ppa_strncmp(buffer, "mode=", 5) == 0)) {
+ x = simple_strtoul(buffer + 5, NULL, 0);
+ ppa_hosts[hostno].mode = x;
+ return length;
+ }
+ printk("ppa /proc: invalid variable\n");
+ return (-EINVAL);
+}
+
+int ppa_proc_info(char *buffer, char **start, off_t offset,
+ int length, int hostno, int inout)
+{
+ int i;
+ int len = 0;
+
+ for (i = 0; i < 4; i++)
+ if (ppa_hosts[i].host == hostno)
+ break;
+
+ if (inout)
+ return ppa_proc_write(i, buffer, length);
+
+ len += sprintf(buffer + len, "Version : %s\n", PPA_VERSION);
+ len += sprintf(buffer + len, "Port : 0x%04x\n", ppa_hosts[i].base);
+ len += sprintf(buffer + len, "Mode : %s\n", PPA_MODE_STRING[ppa_hosts[i].mode]);
+
+ /* Request for beyond end of buffer */
+ if (offset > len)
+ return 0;
+
+ *start = buffer + offset;
+ len -= offset;
+ if (len > length)
+ len = length;
+ return len;
+} /* end of ppa.c */
+static int device_check(int host_no);
+
+#if PPA_DEBUG > 0
+#define ppa_fail(x,y) printk("ppa: ppa_fail(%i) from %s at line %d\n",\
+ y, __FUNCTION__, __LINE__); ppa_fail_func(x,y);
+static inline void ppa_fail_func(int host_no, int error_code)
+#else
+static inline void ppa_fail(int host_no, int error_code)
+ #endif
+{
+ /* If we fail a device then we trash status / message bytes */
+ if (ppa_hosts[host_no].cur_cmd) {
+ ppa_hosts[host_no].cur_cmd->result = error_code << 16;
+ ppa_hosts[host_no].failed = 1;
+ }
+}
+
+/*
+ * Wait for the high bit to be set.
+ *
+ * In principle, this could be tied to an interrupt, but the adapter
+ * doesn't appear to be designed to support interrupts. We spin on
+ * the 0x80 ready bit.
+ */
+static unsigned char ppa_wait(int host_no)
+{
+ int k;
+ unsigned short ppb = PPA_BASE(host_no);
+ unsigned char r;
+
+ k = PPA_SPIN_TMO;
+ do {
+ r = r_str(ppb);
+ k--;
+ udelay(1);
+ }
+ while (!(r & 0x80) && (k));
+
+ /*
+ * return some status information.
+ * Semantics: 0xc0 = ZIP wants more data
+ * 0xd0 = ZIP wants to send more data
+ * 0xe0 = ZIP is expecting SCSI command data
+ * 0xf0 = end of transfer, ZIP is sending status
+ */
+ if (k)
+ return (r & 0xf0);
+
+ /* Counter expired - Time out occurred */
+ ppa_fail(host_no, DID_TIME_OUT);
+ printk("ppa timeout in ppa_wait\n");
+ return 0; /* command timed out */
+}
+
+/*
+ * output a string, in whatever mode is available, according to the
+ * PPA protocol.
+ */
+static inline void epp_reset(unsigned short ppb)
+{
+ int i;
+
+ i = r_str(ppb);
+ w_str(ppb, i);
+ w_str(ppb, i & 0xfe);
+}
+
+static inline void ecp_sync(unsigned short ppb)
+{
+ int i;
+
+ if ((r_ecr(ppb) & 0xe0) != 0x80)
+ return;
+
+ for (i = 0; i < 100; i++) {
+ if (r_ecr(ppb) & 0x01)
+ return;
+ udelay(5);
+ }
+ printk("ppa: ECP sync failed as data still present in FIFO.\n");
+}
+
+/*
+ * Here is the asm code for the SPP/PS2 protocols for the i386.
+ * This has been optimised for speed on 386/486 machines. There will
+ * be very little improvement on the current 586+ machines as it is the
+ * IO statements which will limit throughput.
+ */
+#ifdef __i386__
+#define BYTE_OUT(reg) \
+ " movb " #reg ",%%al\n" \
+ " outb %%al,(%%dx)\n" \
+ " addl $2,%%edx\n" \
+ " movb $0x0e,%%al\n" \
+ " outb %%al,(%%dx)\n" \
+ " movb $0x0c,%%al\n" \
+ " outb %%al,(%%dx)\n" \
+ " subl $2,%%edx\n"
+
+static inline int ppa_byte_out(unsigned short base, char *buffer, unsigned int len)
+{
+ int i;
+
+ for (i = len; i; i--) {
+ w_dtr(base, *buffer++);
+ w_ctr(base, 0xe);
+ w_ctr(base, 0xc);
+ }
+ return 1; /* All went well - we hope! */
+}
+
+#define BYTE_IN(reg) \
+ " inb (%%dx),%%al\n" \
+ " movb %%al," #reg "\n" \
+ " addl $2,%%edx\n" \
+ " movb $0x27,%%al\n" \
+ " outb %%al,(%%dx)\n" \
+ " movb $0x25,%%al\n" \
+ " outb %%al,(%%dx)\n" \
+ " subl $2,%%edx\n"
+
+static inline int ppa_byte_in(unsigned short base, char *buffer, int len)
+{
+ int i;
+
+ for (i = len; i; i--) {
+ *buffer++ = r_dtr(base);
+ w_ctr(base, 0x27);
+ w_ctr(base, 0x25);
+ }
+ return 1; /* All went well - we hope! */
+}
+
+#define NIBBLE_IN(reg) \
+ " incl %%edx\n" \
+ " movb $0x04,%%al\n" \
+ " outb %%al,(%%dx)\n" \
+ " decl %%edx\n" \
+ " inb (%%dx),%%al\n" \
+ " andb $0xf0,%%al\n" \
+ " movb %%al," #reg "\n" \
+ " incl %%edx\n" \
+ " movb $0x06,%%al\n" \
+ " outb %%al,(%%dx)\n" \
+ " decl %%edx\n" \
+ " inb (%%dx),%%al\n" \
+ " shrb $4,%%al\n" \
+ " orb %%al," #reg "\n"
+
+static inline int ppa_nibble_in(unsigned short base, char *buffer, int len)
+{
+ for (; len; len--) {
+ unsigned char h;
+
+ w_ctr(base, 0x4);
+ h = r_str(base) & 0xf0;
+ w_ctr(base, 0x6);
+ *buffer++ = h | ((r_str(base) & 0xf0) >> 4);
+ }
+ return 1; /* All went well - we hope! */
+}
+#else /* Old style C routines */
+
+static inline int ppa_byte_out(unsigned short base, const char *buffer, int len)
+{
+ unsigned short ctr_p = base + 2;
+ int i;
+
+ for (i = len; i; i--) {
+ outb(*buffer++, base);
+ outb(0xe, ctr_p);
+ outb(0xc, ctr_p);
+ }
+ return 1; /* All went well - we hope! */
+}
+
+static inline int ppa_byte_in(unsigned short base, char *buffer, int len)
+{
+ unsigned short ctr_p = base + 2;
+ int i;
+
+ for (i = len; i; i--) {
+ *buffer++ = inb(base);
+ outb(0x27, ctr_p);
+ outb(0x25, ctr_p);
+ }
+ return 1; /* All went well - we hope! */
+}
+
+static inline int ppa_nibble_in(unsigned short str_p, char *buffer, int len)
+{
+ unsigned short ctr_p = str_p + 1;
+ unsigned char h, l;
+ int i;
+
+ for (i = len; i; i--) {
+ outb(0x4, ctr_p);
+ h = inb(str_p);
+ outb(0x6, ctr_p);
+ l = inb(str_p);
+ *buffer++ = (h & 0xf0) | ((l & 0xf0) >> 4);
+ }
+ return 1; /* All went well - we hope! */
+ }
+ #endif
+
+static inline int ppa_epp_out(unsigned short epp_p, unsigned short str_p, const char *buffer, int len)
+{
+ int i;
+ for (i = len; i; i--) {
+ outb(*buffer++, epp_p);
+#ifdef CONFIG_SCSI_PPA_HAVE_PEDANTIC
+ if (inb(str_p) & 0x01)
+ return 0;
+ #endif
+ }
+ return 1;
+ }
+
+static int ppa_out(int host_no, char *buffer, int len)
+{
+ int r;
+ unsigned short ppb = PPA_BASE(host_no);
+
+ r = ppa_wait(host_no);
+
+ if ((r & 0x50) != 0x40) {
+ ppa_fail(host_no, DID_ERROR);
+ return 0;
+ }
+ switch (ppa_hosts[host_no].mode) {
+ case PPA_NIBBLE:
+ case PPA_PS2:
+ /* 8 bit output, with a loop */
+ r = ppa_byte_out(ppb, buffer, len);
+ break;
+
+ case PPA_EPP_32:
+ case PPA_EPP_16:
+ case PPA_EPP_8:
+ epp_reset(ppb);
+ w_ctr(ppb, 0x4);
+#ifdef CONFIG_SCSI_PPA_HAVE_PEDANTIC
+ r = ppa_epp_out(ppb + 4, ppb + 1, buffer, len);
+#else
+ if (!(((long) buffer | len) & 0x03))
+ outsl(ppb + 4, buffer, len >> 2);
+ else
+ outsb(ppb + 4, buffer, len);
+ w_ctr(ppb, 0xc);
+ r = !(r_str(ppb) & 0x01);
+#endif
+ w_ctr(ppb, 0xc);
+ ecp_sync(ppb);
+ break;
+
+ default:
+ printk("PPA: bug in ppa_out()\n");
+ r = 0;
+ }
+ return r;
+}
+
+static inline int ppa_epp_in(int epp_p, int str_p, char *buffer, int len)
+{
+ int i;
+ for (i = len; i; i--) {
+ *buffer++ = inb(epp_p);
+#ifdef CONFIG_SCSI_PPA_HAVE_PEDANTIC
+ if (inb(str_p) & 0x01)
+ return 0;
+#endif
+ }
+ return 1;
+ }
+
+static int ppa_in(int host_no, char *buffer, int len)
+{
+ int r;
+ unsigned short ppb = PPA_BASE(host_no);
+
+ r = ppa_wait(host_no);
+
+ if ((r & 0x50) != 0x50) {
+ ppa_fail(host_no, DID_ERROR);
+ return 0;
+ }
+ switch (ppa_hosts[host_no].mode) {
+ case PPA_NIBBLE:
+ /* 4 bit input, with a loop */
+ r = ppa_nibble_in(ppb + 1, buffer, len);
+ w_ctr(ppb, 0xc);
+ break;
+
+ case PPA_PS2:
+ /* 8 bit input, with a loop */
+ w_ctr(ppb, 0x25);
+ r = ppa_byte_in(ppb, buffer, len);
+ w_ctr(ppb, 0x4);
+ w_ctr(ppb, 0xc);
+ break;
+
+ case PPA_EPP_32:
+ case PPA_EPP_16:
+ case PPA_EPP_8:
+ epp_reset(ppb);
+ w_ctr(ppb, 0x24);
+#ifdef CONFIG_SCSI_PPA_HAVE_PEDANTIC
+ r = ppa_epp_in(ppb + 4, ppb + 1, buffer, len);
+ #else
+ if (!(((long) buffer | len) & 0x03))
+ insl(ppb + 4, buffer, len >> 2);
+ else
+ insb(ppb + 4, buffer, len);
+ w_ctr(ppb, 0x2c);
+ r = !(r_str(ppb) & 0x01);
+#endif
+ w_ctr(ppb, 0x2c);
+ ecp_sync(ppb);
+ break;
+
+ default:
+ printk("PPA: bug in ppa_ins()\n");
+ r = 0;
+ break;
+ }
+ return r;
+}
+
+/* end of ppa_io.h */
+static inline void ppa_d_pulse(unsigned short ppb, unsigned char b)
+{
+ w_dtr(ppb, b);
+ w_ctr(ppb, 0xc);
+ w_ctr(ppb, 0xe);
+ w_ctr(ppb, 0xc);
+ w_ctr(ppb, 0x4);
+ w_ctr(ppb, 0xc);
+}
+
+static void ppa_disconnect(int host_no)
+{
+ unsigned short ppb = PPA_BASE(host_no);
+
+ ppa_d_pulse(ppb, 0);
+ ppa_d_pulse(ppb, 0x3c);
+ ppa_d_pulse(ppb, 0x20);
+ ppa_d_pulse(ppb, 0xf);
+}
+
+static inline void ppa_c_pulse(unsigned short ppb, unsigned char b)
+{
+ w_dtr(ppb, b);
+ w_ctr(ppb, 0x4);
+ w_ctr(ppb, 0x6);
+ w_ctr(ppb, 0x4);
+ w_ctr(ppb, 0xc);
+}
+
+static inline void ppa_connect(int host_no, int flag)
+{
+ unsigned short ppb = PPA_BASE(host_no);
+
+ ppa_c_pulse(ppb, 0);
+ ppa_c_pulse(ppb, 0x3c);
+ ppa_c_pulse(ppb, 0x20);
+ if ((flag == CONNECT_EPP_MAYBE) &&
+ IN_EPP_MODE(ppa_hosts[host_no].mode))
+ ppa_c_pulse(ppb, 0xcf);
+ else
+ ppa_c_pulse(ppb, 0x8f);
+}
+
+static int ppa_select(int host_no, int target)
+{
+ int k;
+ unsigned short ppb = PPA_BASE(host_no);
+
+ /*
+ * Bit 6 (0x40) is the device selected bit.
+ * First we must wait till the current device goes off line...
+ */
+ k = PPA_SELECT_TMO;
+ do {
+ k--;
+ } while ((r_str(ppb) & 0x40) && (k));
+ if (!k)
+ return 0;
+
+ w_dtr(ppb, (1 << target));
+ w_ctr(ppb, 0xe);
+ w_ctr(ppb, 0xc);
+ w_dtr(ppb, 0x80); /* This is NOT the initator */
+ w_ctr(ppb, 0x8);
+
+ k = PPA_SELECT_TMO;
+ do {
+ k--;
+ }
+ while (!(r_str(ppb) & 0x40) && (k));
+ if (!k)
+ return 0;
+
+ return 1;
+}
+
+/*
+ * This is based on a trace of what the Iomega DOS 'guest' driver does.
+ * I've tried several different kinds of parallel ports with guest and
+ * coded this to react in the same ways that it does.
+ *
+ * The return value from this function is just a hint about where the
+ * handshaking failed.
+ *
+ */
+static int ppa_init(int host_no)
+{
+ int retv;
+ unsigned short ppb = PPA_BASE(host_no);
+
+ ppa_disconnect(host_no);
+ ppa_connect(host_no, CONNECT_NORMAL);
+
+ retv = 2; /* Failed */
+
+ w_ctr(ppb, 0xe);
+ if ((r_str(ppb) & 0x08) == 0x08)
+ retv--;
+
+ w_ctr(ppb, 0xc);
+ if ((r_str(ppb) & 0x08) == 0x00)
+ retv--;
+
+ /* This is a SCSI BUS reset signal */
+ if (!retv) {
+ w_dtr(ppb, 0x40);
+ w_ctr(ppb, 0x08);
+ udelay(30);
+ w_ctr(ppb, 0x0c);
+ udelay(1000); /* Allow devices to settle down */
+ }
+ ppa_disconnect(host_no);
+ udelay(1000); /* Another delay to allow devices to settle */
+
+ if (!retv)
+ retv = device_check(host_no);
+
+ return retv;
+}
+
+static inline int ppa_send_command(Scsi_Cmnd * cmd)
+{
+ int host_no = cmd->host->unique_id;
+ int k;
+
+ w_ctr(PPA_BASE(host_no), 0x0c);
+
+ for (k = 0; k < cmd->cmd_len; k++)
+ if (!ppa_out(host_no, &cmd->cmnd[k], 1))
+ return 0;
+ return 1;
+}
+
+/*
+ * The bulk flag enables some optimisations in the data transfer loops,
+ * it should be true for any command that transfers data in integral
+ * numbers of sectors.
+ *
+ * The driver appears to remain stable if we speed up the parallel port
+ * i/o in this function, but not elsewhere.
+ */
+static int ppa_completion(Scsi_Cmnd * cmd)
+{
+ /* Return codes:
+ * -1 Error
+ * 0 Told to schedule
+ * 1 Finished data transfer
+ */
+ int host_no = cmd->host->unique_id;
+ unsigned short ppb = PPA_BASE(host_no);
+ unsigned long start_jiffies = jiffies;
+
+ unsigned char r, v;
+ int fast, bulk, status;
+
+ v = cmd->cmnd[0];
+ bulk = ((v == READ_6) ||
+ (v == READ_10) ||
+ (v == WRITE_6) ||
+ (v == WRITE_10));
+
+ /*
+ * We only get here if the drive is ready to comunicate,
+ * hence no need for a full ppa_wait.
+ */
+ r = (r_str(ppb) & 0xf0);
+
+ while (r != (unsigned char) 0xf0) {
+ /*
+ * If we have been running for more than a full timer tick
+ * then take a rest.
+ */
+ if (jiffies > start_jiffies + 1)
+ return 0;
+
+ if (((r & 0xc0) != 0xc0) || (cmd->SCp.this_residual <= 0)) {
+ ppa_fail(host_no, DID_ERROR);
+ return -1; /* ERROR_RETURN */
+ }
+ /* determine if we should use burst I/O */ fast = (bulk && (cmd->SCp.this_residual >= PPA_BURST_SIZE))
+ ? PPA_BURST_SIZE : 1;
+
+ if (r == (unsigned char) 0xc0)
+ status = ppa_out(host_no, cmd->SCp.ptr, fast);
+ else
+ status = ppa_in(host_no, cmd->SCp.ptr, fast);
+
+ cmd->SCp.ptr += fast;
+ cmd->SCp.this_residual -= fast;
+
+ if (!status) {
+ ppa_fail(host_no, DID_BUS_BUSY);
+ return -1; /* ERROR_RETURN */
+ }
+ if (cmd->SCp.buffer && !cmd->SCp.this_residual) {
+ /* if scatter/gather, advance to the next segment */
+ if (cmd->SCp.buffers_residual--) {
+ cmd->SCp.buffer++;
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ cmd->SCp.ptr = cmd->SCp.buffer->address;
+ }
+ }
+ /* Now check to see if the drive is ready to comunicate */
+ r = (r_str(ppb) & 0xf0);
+ /* If not, drop back down to the scheduler and wait a timer tick */
+ if (!(r & 0x80))
+ return 0;
+ }
+ return 1; /* FINISH_RETURN */
+}
+
+/*
+ * Since the PPA itself doesn't generate interrupts, we use
+ * the scheduler's task queue to generate a stream of call-backs and
+ * complete the request when the drive is ready.
+ */
+static void ppa_interrupt(void *data)
+{
+ ppa_struct *tmp = (ppa_struct *) data;
+ Scsi_Cmnd *cmd = tmp->cur_cmd;
+
+ if (!cmd) {
+ printk("PPA: bug in ppa_interrupt\n");
+ return;
+ }
+ if (ppa_engine(tmp, cmd)) {
+ tmp->ppa_tq.data = (void *) tmp;
+ tmp->ppa_tq.sync = 0;
+ queue_task(&tmp->ppa_tq, &tq_timer);
+ return;
+ }
+ /* Command must of completed hence it is safe to let go... */
+#if PPA_DEBUG > 0
+ switch ((cmd->result >> 16) & 0xff) {
+ case DID_OK:
+ break;
+ case DID_NO_CONNECT:
+ printk("ppa: no device at SCSI ID %i\n", cmd->target);
+ break;
+ case DID_BUS_BUSY:
+ printk("ppa: BUS BUSY - EPP timeout detected\n");
+ break;
+ case DID_TIME_OUT:
+ printk("ppa: unknown timeout\n");
+ break;
+ case DID_ABORT:
+ printk("ppa: told to abort\n");
+ break;
+ case DID_PARITY:
+ printk("ppa: parity error (???)\n");
+ break;
+ case DID_ERROR:
+ printk("ppa: internal driver error\n");
+ break;
+ case DID_RESET:
+ printk("ppa: told to reset device\n");
+ break;
+ case DID_BAD_INTR:
+ printk("ppa: bad interrupt (???)\n");
+ break;
+ default:
+ printk("ppa: bad return code (%02x)\n", (cmd->result >> 16) & 0xff);
+ }
+ #endif
+
+ if (cmd->SCp.phase > 1)
+ ppa_disconnect(cmd->host->unique_id);
+
+ tmp->cur_cmd = 0;
+ cmd->scsi_done(cmd);
+ return;
+}
+
+static int ppa_engine(ppa_struct * tmp, Scsi_Cmnd * cmd)
+{
+ int host_no = cmd->host->unique_id;
+ unsigned short ppb = PPA_BASE(host_no);
+ unsigned char l = 0, h = 0;
+ int retv;
+
+ /* First check for any errors that may have occurred
+ * Here we check for internal errors
+ */
+ if (tmp->failed)
+ return 0;
+
+ switch (cmd->SCp.phase) {
+ case 0: /* Phase 0 - Waiting for parport */
+ if ((jiffies - tmp->jstart) > HZ) {
+ /*
+ * We waited more than a second
+ * for parport to call us
+ */
+ ppa_fail(host_no, DID_BUS_BUSY);
+ return 0;
+ }
+ return 1; /* wait until ppa_wakeup claims parport */
+ case 1: /* Phase 1 - Connected */
+ { /* Perform a sanity check for cable unplugged */
+ int retv = 2; /* Failed */
+
+ ppa_connect(host_no, CONNECT_EPP_MAYBE);
+
+ w_ctr(ppb, 0xe);
+ if ((r_str(ppb) & 0x08) == 0x08)
+ retv--;
+
+ w_ctr(ppb, 0xc);
+ if ((r_str(ppb) & 0x08) == 0x00)
+ retv--;
+
+ if (retv)
+ if ((jiffies - tmp->jstart) > (1 * HZ)) {
+ printk("ppa: Parallel port cable is unplugged!!\n");
+ ppa_fail(host_no, DID_BUS_BUSY);
+ return 0;
+ } else {
+ ppa_disconnect(host_no);
+ return 1; /* Try again in a jiffy */
+ }
+ cmd->SCp.phase++;
+ }
+
+ case 2: /* Phase 2 - We are now talking to the scsi bus */
+ if (!ppa_select(host_no, cmd->target)) {
+ ppa_fail(host_no, DID_NO_CONNECT);
+ return 0;
+ }
+ cmd->SCp.phase++;
+
+ case 3: /* Phase 3 - Ready to accept a command */
+ w_ctr(ppb, 0x0c);
+ if (!(r_str(ppb) & 0x80))
+ return 1;
+
+ if (!ppa_send_command(cmd))
+ return 0;
+ cmd->SCp.phase++;
+
+ case 4: /* Phase 4 - Setup scatter/gather buffers */
+ if (cmd->use_sg) {
+ /* if many buffers are available, start filling the first */
+ cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
+ cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ cmd->SCp.ptr = cmd->SCp.buffer->address;
+ } else {
+ /* else fill the only available buffer */
+ cmd->SCp.buffer = NULL;
+ cmd->SCp.this_residual = cmd->request_bufflen;
+ cmd->SCp.ptr = cmd->request_buffer;
+ }
+ cmd->SCp.buffers_residual = cmd->use_sg;
+ cmd->SCp.phase++;
+
+ case 5: /* Phase 5 - Data transfer stage */
+ w_ctr(ppb, 0x0c);
+ if (!(r_str(ppb) & 0x80))
+ return 1;
+
+ retv = ppa_completion(cmd);
+ if (retv == -1)
+ return 0;
+ if (retv == 0)
+ return 1;
+ cmd->SCp.phase++;
+
+ case 6: /* Phase 6 - Read status/message */
+ cmd->result = DID_OK << 16;
+ /* Check for data overrun */
+ if (ppa_wait(host_no) != (unsigned char) 0xf0) {
+ ppa_fail(host_no, DID_ERROR);
+ return 0;
+ }
+ if (ppa_in(host_no, &l, 1)) { /* read status byte */
+ /* Check for optional message byte */
+ if (ppa_wait(host_no) == (unsigned char) 0xf0)
+ ppa_in(host_no, &h, 1);
+ cmd->result = (DID_OK << 16) + (h << 8) + (l & STATUS_MASK);
+ }
+ return 0; /* Finished */
+ break;
+
+ default:
+ printk("ppa: Invalid scsi phase\n");
+ }
+ return 0;
+}
+
+int ppa_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
+{
+ int host_no = cmd->host->unique_id;
+
+ if (ppa_hosts[host_no].cur_cmd) {
+ printk("PPA: bug in ppa_queuecommand\n");
+ return 0;
+ }
+ ppa_hosts[host_no].failed = 0;
+ ppa_hosts[host_no].jstart = jiffies;
+ ppa_hosts[host_no].cur_cmd = cmd;
+ cmd->scsi_done = done;
+ cmd->result = DID_ERROR << 16; /* default return code */
+ cmd->SCp.phase = 0; /* bus free */
+
+ ppa_pb_claim(host_no);
+
+ ppa_hosts[host_no].ppa_tq.data = ppa_hosts + host_no;
+ ppa_hosts[host_no].ppa_tq.sync = 0;
+ queue_task(&ppa_hosts[host_no].ppa_tq, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+
+ return 0;
+}
+
+/*
+ * Apparently the disk->capacity attribute is off by 1 sector
+ * for all disk drives. We add the one here, but it should really
+ * be done in sd.c. Even if it gets fixed there, this will still
+ * work.
+ */
+int ppa_biosparam(Disk * disk, kdev_t dev, int ip[])
+{
+ ip[0] = 0x40;
+ ip[1] = 0x20;
+ ip[2] = (disk->capacity + 1) / (ip[0] * ip[1]);
+ if (ip[2] > 1024) {
+ ip[0] = 0xff;
+ ip[1] = 0x3f;
+ ip[2] = (disk->capacity + 1) / (ip[0] * ip[1]);
+ if (ip[2] > 1023)
+ ip[2] = 1023;
+ }
+ return 0;
+}
+
+int ppa_abort(Scsi_Cmnd * cmd)
+{
+ /*
+ * There is no method for aborting commands since Iomega
+ * have tied the SCSI_MESSAGE line high in the interface
+ */
+
+ switch (cmd->SCp.phase) {
+ case 0: /* Do not have access to parport */
+ case 1: /* Have not connected to interface */
+ cmd->result = DID_ABORT;
+ cmd->done(cmd);
+ return SCSI_ABORT_SUCCESS;
+ break;
+ default: /* SCSI command sent, can not abort */
+ return SCSI_ABORT_BUSY;
+ break;
+ }
+}
+
+int ppa_reset(Scsi_Cmnd * cmd, unsigned int x)
+{
+ int host_no = cmd->host->unique_id;
+ int ppb = PPA_BASE(host_no);
+
+ /*
+ * PHASE1:
+ * Bring the interface crashing down on whatever is running
+ * hopefully this will kill the request.
+ * Bring back up the interface, reset the drive (and anything
+ * attached for that manner)
+ */
+ if (cmd)
+ if (cmd->SCp.phase)
+ ppa_disconnect(cmd->host->unique_id);
+
+ ppa_connect(host_no, CONNECT_NORMAL);
+ w_dtr(ppb, 0x40);
+ w_ctr(ppb, 0x8);
+ udelay(30);
+ w_ctr(ppb, 0xc);
+ udelay(1000); /* delay for devices to settle down */
+ ppa_disconnect(host_no);
+ udelay(1000); /* Additional delay to allow devices to settle down */
+
+ /*
+ * PHASE2:
+ * Sanity check for the sake of mid-level driver
+ */
+ if (!cmd) {
+ printk("ppa bus reset called for invalid command.\n");
+ return SCSI_RESET_NOT_RUNNING;
+ }
+ /*
+ * PHASE3:
+ * Flag the current command as having died due to reset
+ */
+ ppa_connect(host_no, CONNECT_NORMAL);
+ ppa_fail(host_no, DID_RESET);
+
+ /* Since the command was already on the timer queue ppa_interrupt
+ * will be called shortly.
+ */
+ return SCSI_RESET_PENDING;
+}
+
+static int device_check(int host_no)
+{
+ /* This routine looks for a device and then attempts to use EPP
+ to send a command. If all goes as planned then EPP is available. */
+
+ static char cmd[6] =
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ int loop, old_mode, status, k, ppb = PPA_BASE(host_no);
+ unsigned char l;
+
+ old_mode = ppa_hosts[host_no].mode;
+ for (loop = 0; loop < 8; loop++) {
+ /* Attempt to use EPP for Test Unit Ready */
+ if ((ppb & 0x0007) == 0x0000)
+ ppa_hosts[host_no].mode = PPA_EPP_32;
+
+ second_pass:
+ ppa_connect(host_no, CONNECT_EPP_MAYBE);
+ /* Select SCSI device */
+ if (!ppa_select(host_no, loop)) {
+ ppa_disconnect(host_no);
+ continue;
+ }
+ printk("ppa: Found device at ID %i, Attempting to use %s\n", loop,
+ PPA_MODE_STRING[ppa_hosts[host_no].mode]);
+
+ /* Send SCSI command */
+ status = 1;
+ w_ctr(ppb, 0x0c);
+ for (l = 0; (l < 6) && (status); l++)
+ status = ppa_out(host_no, cmd, 1);
+
+ if (!status) {
+ ppa_disconnect(host_no);
+ ppa_connect(host_no, CONNECT_EPP_MAYBE);
+ w_dtr(ppb, 0x40);
+ w_ctr(ppb, 0x08);
+ udelay(30);
+ w_ctr(ppb, 0x0c);
+ udelay(1000);
+ ppa_disconnect(host_no);
+ udelay(1000);
+ if (ppa_hosts[host_no].mode == PPA_EPP_32) {
+ ppa_hosts[host_no].mode = old_mode;
+ goto second_pass;
+ }
+ printk("ppa: Unable to establish communication, aborting driver load.\n");
+ return 1;
+ }
+ w_ctr(ppb, 0x0c);
+ k = 1000000; /* 1 Second */
+ do {
+ l = r_str(ppb);
+ k--;
+ udelay(1);
+ } while (!(l & 0x80) && (k));
+
+ l &= 0xf0;
+
+ if (l != 0xf0) {
+ ppa_disconnect(host_no);
+ ppa_connect(host_no, CONNECT_EPP_MAYBE);
+ w_dtr(ppb, 0x40);
+ w_ctr(ppb, 0x08);
+ udelay(30);
+ w_ctr(ppb, 0x0c);
+ udelay(1000);
+ ppa_disconnect(host_no);
+ udelay(1000);
+ if (ppa_hosts[host_no].mode == PPA_EPP_32) {
+ ppa_hosts[host_no].mode = old_mode;
+ goto second_pass;
+ }
+ printk("ppa: Unable to establish communication, aborting driver load.\n");
+ return 1;
+ }
+ ppa_disconnect(host_no);
+ printk("ppa: Communication established with ID %i using %s\n", loop,
+ PPA_MODE_STRING[ppa_hosts[host_no].mode]);
+ return 0;
+ }
+ printk("ppa: No devices found, aborting driver load.\n");
+ return 1;
+}
+
+#define PPA_ID "ppa: "
+
+int port_probe(unsigned short port)
+{
+ int retv = 0;
+ unsigned char a, b, c;
+ unsigned int i, j;
+
+
+ printk(PPA_ID "Probing port %04x\n", port);
+
+/* ##### ###### ######
+ * # # # # # #
+ * # # # # #
+ * ##### ###### ######
+ * # # #
+ * # # # #
+ * ##### # #
+ */
+
+ outb(0x0c, port + 0x402);
+ outb(0x0c, port + 0x002);
+ outb(0x55, port);
+ a = inb(port);
+ if (a != 0x55)
+ return retv;
+ printk(PPA_ID " SPP port present\n");
+
+ retv += PPA_PROBE_SPP;
+
+/* ####### ##### ######
+ * # # # # #
+ * # # # #
+ * ##### # ######
+ * # # #
+ * # # # #
+ * ####### ##### #
+ */
+
+ for (i = 1024; i > 0; i--) { /* clear at most 1k of data from FIFO */
+ a = inb(port + 0x402);
+ if ((a & 0x03) == 0x03)
+ goto no_ecp;
+ if (a & 0x01)
+ break;
+ inb(port + 0x400); /* Remove byte from FIFO */
+ }
+
+ if (i <= 0)
+ goto no_ecp;
+
+ b = a ^ 3;
+ outb(b, port + 0x402);
+ c = inb(port + 0x402);
+
+ if (a == c) {
+ outb(0xc0, port + 0x402); /* FIFO test */
+ j = 0;
+ while (!(inb(port + 0x402) & 0x01) && (j < 1024)) {
+ inb(port + 0x400);
+ j++;
+ }
+ if (j >= 1024)
+ goto no_ecp;
+ i = 0;
+ j = 0;
+ while (!(inb(port + 0x402) & 0x02) && (j < 1024)) {
+ outb(0x00, port + 0x400);
+ i++;
+ j++;
+ }
+ if (j >= 1024)
+ goto no_ecp;
+ j = 0;
+ while (!(inb(port + 0x402) & 0x01) && (j < 1024)) {
+ inb(port + 0x400);
+ j++;
+ }
+ if (j >= 1024)
+ goto no_ecp;
+ printk(PPA_ID " ECP with a %i byte FIFO present\n", i);
+
+ retv += PPA_PROBE_ECR;
+ }
+/* ###### ##### #####
+ * # # # # # #
+ * # # # #
+ * ###### ##### #####
+ * # # #
+ * # # # #
+ * # ##### #######
+ */
+
+ no_ecp:
+ if (retv & PPA_PROBE_ECR)
+ outb(0x20, port + 0x402);
+
+ outb(0x55, port);
+ outb(0x0c, port + 2);
+ a = inb(port);
+ outb(0x55, port);
+ outb(0x2c, port + 2);
+ b = inb(port);
+ if (a != b) {
+ printk(PPA_ID " PS/2 bidirectional port present\n");
+ retv += PPA_PROBE_PS2;
+ }
+/* ####### ###### ######
+ * # # # # #
+ * # # # # #
+ * ##### ###### ######
+ * # # #
+ * # # #
+ * ####### # #
+ */
+
+ if (port & 0x007) {
+ printk(PPA_ID " EPP not supported at this address\n");
+ return retv;
+ }
+ if (retv & PPA_PROBE_ECR) {
+ for (i = 0x00; i < 0x80; i += 0x20) {
+ outb(i, port + 0x402);
+
+ a = inb(port + 1);
+ outb(a, port + 1);
+ outb(a & 0xfe, port + 1);
+ a = inb(port + 1);
+ if (!(a & 0x01)) {
+ printk(PPA_ID " Failed Intel bug check. (Phony EPP in ECP)\n");
+ return retv;
+ }
+ }
+ printk(PPA_ID " Passed Intel bug check.\n");
+ outb(0x80, port + 0x402);
+ }
+ a = inb(port + 1);
+ outb(a, port + 1);
+ outb(a & 0xfe, port + 1);
+ a = inb(port + 1);
+
+ if (a & 0x01) {
+ outb(0x0c, port + 0x402);
+ outb(0x0c, port + 0x002);
+ return retv;
+ }
+
+ outb(0x04, port + 2);
+ inb(port + 4);
+ a = inb(port + 1);
+ outb(a, port + 1);
+ outb(a & 0xfe, port + 1);
+
+ if (a & 0x01) {
+ printk(PPA_ID " EPP 1.9 with hardware direction protocol\n");
+ retv += PPA_PROBE_EPP19;
+ } else {
+ /* The EPP timeout bit was not set, this could either be:
+ * EPP 1.7
+ * EPP 1.9 with software direction
+ */
+ outb(0x24, port + 2);
+ inb(port + 4);
+ a = inb(port + 1);
+ outb(a, port + 1);
+ outb(a & 0xfe, port + 1);
+ if (a & 0x01) {
+ printk(PPA_ID " EPP 1.9 with software direction protocol\n");
+ retv += PPA_PROBE_EPP19;
+ } else {
+ printk(PPA_ID " EPP 1.7\n");
+ retv += PPA_PROBE_EPP17;
+ }
+ }
+
+ outb(0x0c, port + 0x402);
+ outb(0x0c, port + 0x002);
+ return retv;
+}
diff --git a/linux/src/drivers/scsi/ppa.h b/linux/src/drivers/scsi/ppa.h
new file mode 100644
index 0000000..1497c20
--- /dev/null
+++ b/linux/src/drivers/scsi/ppa.h
@@ -0,0 +1,176 @@
+/* Driver for the PPA3 parallel port SCSI HBA embedded in
+ * the Iomega ZIP drive
+ *
+ * (c) 1996 Grant R. Guenther grant@torque.net
+ * David Campbell campbell@torque.net
+ *
+ * All comments to David.
+ */
+
+#include <linux/config.h> /* CONFIG_SCSI_PPA_HAVE_PEDANTIC */
+#ifndef _PPA_H
+#define _PPA_H
+
+#define PPA_VERSION "1.42"
+
+#if 0
+/* Use the following to enable certain chipset support
+ * Default is PEDANTIC = 3
+ */
+#ifndef CONFIG_SCSI_PPA_HAVE_PEDANTIC
+#define CONFIG_SCSI_PPA_HAVE_PEDANTIC 3
+#endif
+#endif
+
+/*
+ * this driver has been hacked by Matteo Frigo (athena@theory.lcs.mit.edu)
+ * to support EPP and scatter-gather. [0.26-athena]
+ *
+ * additional hacks by David Campbell
+ * in response to this driver "mis-behaving" on his machine.
+ * Fixed EPP to handle "software" changing of EPP port data direction.
+ * Chased down EPP timeouts
+ * Made this driver "kernel version friendly" [0.28-athena]
+ *
+ * [ Stuff removed ]
+ *
+ * Compiled against 2.1.53.
+ * Rebuilt ppa_abort() function, should handle unplugged cable.
+ * [1.35s]
+ *
+ * PPA now auto probes for EPP on base address which are aligned on
+ * 8 byte boundaries (0x278 & 0x378) using the attached devices.
+ * This hopefully avoids the nasty problem of trying to detect EPP.
+ * Tested on 2.1.53 [1.36]
+ *
+ * The id_probe utility no longer performs read/write tests.
+ * Additional code included for checking the Intel ECP bug
+ * (Bit 0 of STR stuck low which fools the EPP detection routine)
+ * [1.37]
+ *
+ * Oops! Got the bit sign mixed up for the Intel bug check.
+ * Found that an additional delay is required during SCSI resets
+ * to allow devices to settle down.
+ * [1.38]
+ *
+ * Fixed all problems in the parport sharing scheme. Now ppa can be safe
+ * used with lp or other parport devices on the same parallel port.
+ * 1997 by Andrea Arcangeli
+ * [1.39]
+ *
+ * Little fix in ppa engine to ensure that ppa don' t release parport
+ * or disconnect in wrong cases.
+ * 1997 by Andrea Arcangeli
+ * [1.40]
+ *
+ * Corrected ppa.h for 2.1.x kernels (>=2.1.85)
+ * Modified "Nat Semi Kludge" for extended chipsets
+ * [1.41]
+ *
+ * Fixed id_probe for EPP 1.9 chipsets (misdetected as EPP 1.7)
+ * [1.42]
+ */
+/* ------ END OF USER CONFIGURABLE PARAMETERS ----- */
+
+#ifdef PPA_CODE
+#include <linux/stddef.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/tqueue.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/blk.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include <asm/io.h>
+#include "sd.h"
+#include "hosts.h"
+/* batteries not included :-) */
+
+/*
+ * modes in which the driver can operate
+ */
+#define PPA_AUTODETECT 0 /* Autodetect mode */
+#define PPA_NIBBLE 1 /* work in standard 4 bit mode */
+#define PPA_PS2 2 /* PS/2 byte mode */
+#define PPA_EPP_8 3 /* EPP mode, 8 bit */
+#define PPA_EPP_16 4 /* EPP mode, 16 bit */
+#define PPA_EPP_32 5 /* EPP mode, 32 bit */
+#define PPA_UNKNOWN 6 /* Just in case... */
+
+static char *PPA_MODE_STRING[] =
+{
+ "Autodetect",
+ "SPP",
+ "PS/2",
+ "EPP 8 bit",
+ "EPP 16 bit",
+ "EPP 32 bit",
+ "Unknown"};
+
+/* This is a global option */
+int ppa_sg = SG_ALL; /* enable/disable scatter-gather. */
+
+/* other options */
+#define PPA_CAN_QUEUE 1 /* use "queueing" interface */
+#define PPA_BURST_SIZE 512 /* data burst size */
+#define PPA_SELECT_TMO 5000 /* how long to wait for target ? */
+#define PPA_SPIN_TMO 50000 /* ppa_wait loop limiter */
+#define PPA_DEBUG 0 /* debuging option */
+#define IN_EPP_MODE(x) (x == PPA_EPP_8 || x == PPA_EPP_16 || x == PPA_EPP_32)
+
+/* args to ppa_connect */
+#define CONNECT_EPP_MAYBE 1
+#define CONNECT_NORMAL 0
+
+#define r_dtr(x) (unsigned char)inb((x))
+#define r_str(x) (unsigned char)inb((x)+1)
+#define r_ctr(x) (unsigned char)inb((x)+2)
+#define r_epp(x) (unsigned char)inb((x)+4)
+#define r_fifo(x) (unsigned char)inb((x)+0x400)
+#define r_ecr(x) (unsigned char)inb((x)+0x402)
+
+#define w_dtr(x,y) outb(y, (x))
+#define w_str(x,y) outb(y, (x)+1)
+#define w_ctr(x,y) outb(y, (x)+2)
+#define w_epp(x,y) outb(y, (x)+4)
+#define w_fifo(x,y) outb(y, (x)+0x400)
+#define w_ecr(x,y) outb(y, (x)+0x402)
+
+static int ppa_engine(ppa_struct *, Scsi_Cmnd *);
+static int ppa_in(int, char *, int);
+static int ppa_init(int);
+static void ppa_interrupt(void *);
+static int ppa_out(int, char *, int);
+
+struct proc_dir_entry proc_scsi_ppa =
+{PROC_SCSI_PPA, 3, "ppa", S_IFDIR | S_IRUGO | S_IXUGO, 2};
+#else
+extern struct proc_dir_entry proc_scsi_ppa;
+#endif
+
+int ppa_detect(Scsi_Host_Template *);
+const char *ppa_info(struct Scsi_Host *);
+int ppa_queuecommand(Scsi_Cmnd *, void (*done) (Scsi_Cmnd *));
+int ppa_abort(Scsi_Cmnd *);
+int ppa_reset(Scsi_Cmnd *, unsigned int);
+int ppa_proc_info(char *, char **, off_t, int, int, int);
+int ppa_biosparam(Disk *, kdev_t, int *);
+
+#define PPA { proc_dir: &proc_scsi_ppa, \
+ proc_info: ppa_proc_info, \
+ name: "Iomega parport ZIP drive", \
+ detect: ppa_detect, \
+ queuecommand: ppa_queuecommand, \
+ abort: ppa_abort, \
+ reset: ppa_reset, \
+ bios_param: ppa_biosparam, \
+ this_id: -1, \
+ sg_tablesize: SG_ALL, \
+ cmd_per_lun: 1, \
+ use_clustering: ENABLE_CLUSTERING \
+}
+#endif /* _PPA_H */
diff --git a/linux/src/drivers/scsi/qlogicfas.c b/linux/src/drivers/scsi/qlogicfas.c
new file mode 100644
index 0000000..b5cb9dd
--- /dev/null
+++ b/linux/src/drivers/scsi/qlogicfas.c
@@ -0,0 +1,679 @@
+/*----------------------------------------------------------------*/
+/*
+ Qlogic linux driver - work in progress. No Warranty express or implied.
+ Use at your own risk. Support Tort Reform so you won't have to read all
+ these silly disclaimers.
+
+ Copyright 1994, Tom Zerucha.
+ zerucha@shell.portal.com
+
+ Additional Code, and much appreciated help by
+ Michael A. Griffith
+ grif@cs.ucr.edu
+
+ Thanks to Eric Youngdale and Dave Hinds for loadable module and PCMCIA
+ help respectively, and for suffering through my foolishness during the
+ debugging process.
+
+ Reference Qlogic FAS408 Technical Manual, 53408-510-00A, May 10, 1994
+ (you can reference it, but it is incomplete and inaccurate in places)
+
+ Version 0.45 6/9/96 - kernel 1.2.0+
+
+ Functions as standalone, loadable, and PCMCIA driver, the latter from
+ Dave Hind's PCMCIA package.
+
+ Redistributable under terms of the GNU Public License
+
+*/
+/*----------------------------------------------------------------*/
+/* Configuration */
+
+/* Set the following to 2 to use normal interrupt (active high/totempole-
+ tristate), otherwise use 0 (REQUIRED FOR PCMCIA) for active low, open
+ drain */
+#define QL_INT_ACTIVE_HIGH 2
+
+/* Set the following to 1 to enable the use of interrupts. Note that 0 tends
+ to be more stable, but slower (or ties up the system more) */
+#define QL_USE_IRQ 1
+
+/* Set the following to max out the speed of the PIO PseudoDMA transfers,
+ again, 0 tends to be slower, but more stable. */
+#define QL_TURBO_PDMA 1
+
+/* This should be 1 to enable parity detection */
+#define QL_ENABLE_PARITY 1
+
+/* This will reset all devices when the driver is initialized (during bootup).
+ The other linux drivers don't do this, but the DOS drivers do, and after
+ using DOS or some kind of crash or lockup this will bring things back
+ without requiring a cold boot. It does take some time to recover from a
+ reset, so it is slower, and I have seen timeouts so that devices weren't
+ recognized when this was set. */
+#define QL_RESET_AT_START 0
+
+/* crystal frequency in megahertz (for offset 5 and 9)
+ Please set this for your card. Most Qlogic cards are 40 Mhz. The
+ Control Concepts ISA (not VLB) is 24 Mhz */
+#define XTALFREQ 40
+
+/**********/
+/* DANGER! modify these at your own risk */
+/* SLOWCABLE can usually be reset to zero if you have a clean setup and
+ proper termination. The rest are for synchronous transfers and other
+ advanced features if your device can transfer faster than 5Mb/sec.
+ If you are really curious, email me for a quick howto until I have
+ something official */
+/**********/
+
+/*****/
+/* config register 1 (offset 8) options */
+/* This needs to be set to 1 if your cabling is long or noisy */
+#define SLOWCABLE 1
+
+/*****/
+/* offset 0xc */
+/* This will set fast (10Mhz) synchronous timing when set to 1
+ For this to have an effect, FASTCLK must also be 1 */
+#define FASTSCSI 0
+
+/* This when set to 1 will set a faster sync transfer rate */
+#define FASTCLK 0
+/*(XTALFREQ>25?1:0)*/
+
+/*****/
+/* offset 6 */
+/* This is the sync transfer divisor, XTALFREQ/X will be the maximum
+ achievable data rate (assuming the rest of the system is capable
+ and set properly) */
+#define SYNCXFRPD 5
+/*(XTALFREQ/5)*/
+
+/*****/
+/* offset 7 */
+/* This is the count of how many synchronous transfers can take place
+ i.e. how many reqs can occur before an ack is given.
+ The maximum value for this is 15, the upper bits can modify
+ REQ/ACK assertion and deassertion during synchronous transfers
+ If this is 0, the bus will only transfer asynchronously */
+#define SYNCOFFST 0
+/* for the curious, bits 7&6 control the deassertion delay in 1/2 cycles
+ of the 40Mhz clock. If FASTCLK is 1, specifying 01 (1/2) will
+ cause the deassertion to be early by 1/2 clock. Bits 5&4 control
+ the assertion delay, also in 1/2 clocks (FASTCLK is ignored here). */
+
+/*----------------------------------------------------------------*/
+#ifdef PCMCIA
+#undef QL_INT_ACTIVE_HIGH
+#define QL_INT_ACTIVE_HIGH 0
+#define MODULE
+#endif
+
+#include <linux/module.h>
+
+#ifdef PCMCIA
+#undef MODULE
+#endif
+
+#include <linux/blk.h> /* to get disk capacity */
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/proc_fs.h>
+#include <linux/unistd.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include "sd.h"
+#include "hosts.h"
+#include "qlogicfas.h"
+#include<linux/stat.h>
+
+struct proc_dir_entry proc_scsi_qlogicfas = {
+ PROC_SCSI_QLOGICFAS, 6, "qlogicfas",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+/*----------------------------------------------------------------*/
+/* driver state info, local to driver */
+static int qbase = 0; /* Port */
+static int qinitid; /* initiator ID */
+static int qabort; /* Flag to cause an abort */
+static int qlirq = -1; /* IRQ being used */
+static char qinfo[80]; /* description */
+static Scsi_Cmnd *qlcmd; /* current command being processed */
+
+static int qlcfg5 = ( XTALFREQ << 5 ); /* 15625/512 */
+static int qlcfg6 = SYNCXFRPD;
+static int qlcfg7 = SYNCOFFST;
+static int qlcfg8 = ( SLOWCABLE << 7 ) | ( QL_ENABLE_PARITY << 4 );
+static int qlcfg9 = ( ( XTALFREQ + 4 ) / 5 );
+static int qlcfgc = ( FASTCLK << 3 ) | ( FASTSCSI << 4 );
+
+/*----------------------------------------------------------------*/
+/* The qlogic card uses two register maps - These macros select which one */
+#define REG0 ( outb( inb( qbase + 0xd ) & 0x7f , qbase + 0xd ), outb( 4 , qbase + 0xd ))
+#define REG1 ( outb( inb( qbase + 0xd ) | 0x80 , qbase + 0xd ), outb( 0xb4 | QL_INT_ACTIVE_HIGH , qbase + 0xd ))
+
+/* following is watchdog timeout in microseconds */
+#define WATCHDOG 5000000
+
+/*----------------------------------------------------------------*/
+/* the following will set the monitor border color (useful to find
+ where something crashed or gets stuck at and as a simple profiler) */
+
+#if 0
+#define rtrc(i) {inb(0x3da);outb(0x31,0x3c0);outb((i),0x3c0);}
+#else
+#define rtrc(i) {}
+#endif
+
+/*----------------------------------------------------------------*/
+/* local functions */
+/*----------------------------------------------------------------*/
+static void ql_zap(void);
+/* error recovery - reset everything */
+void ql_zap()
+{
+int x;
+unsigned long flags;
+ save_flags( flags );
+ cli();
+ x = inb(qbase + 0xd);
+ REG0;
+ outb(3, qbase + 3); /* reset SCSI */
+ outb(2, qbase + 3); /* reset chip */
+ if (x & 0x80)
+ REG1;
+ restore_flags( flags );
+}
+
+/*----------------------------------------------------------------*/
+/* do pseudo-dma */
+static int ql_pdma(int phase, char *request, int reqlen)
+{
+int j;
+ j = 0;
+ if (phase & 1) { /* in */
+#if QL_TURBO_PDMA
+rtrc(4)
+ /* empty fifo in large chunks */
+ if( reqlen >= 128 && (inb( qbase + 8 ) & 2) ) { /* full */
+ insl( qbase + 4, request, 32 );
+ reqlen -= 128;
+ request += 128;
+ }
+ while( reqlen >= 84 && !( j & 0xc0 ) ) /* 2/3 */
+ if( (j=inb( qbase + 8 )) & 4 ) {
+ insl( qbase + 4, request, 21 );
+ reqlen -= 84;
+ request += 84;
+ }
+ if( reqlen >= 44 && (inb( qbase + 8 ) & 8) ) { /* 1/3 */
+ insl( qbase + 4, request, 11 );
+ reqlen -= 44;
+ request += 44;
+ }
+#endif
+ /* until both empty and int (or until reclen is 0) */
+rtrc(7)
+ j = 0;
+ while( reqlen && !( (j & 0x10) && (j & 0xc0) ) ) {
+ /* while bytes to receive and not empty */
+ j &= 0xc0;
+ while ( reqlen && !( (j=inb(qbase + 8)) & 0x10 ) ) {
+ *request++ = inb(qbase + 4);
+ reqlen--;
+ }
+ if( j & 0x10 )
+ j = inb(qbase+8);
+
+ }
+ }
+ else { /* out */
+#if QL_TURBO_PDMA
+rtrc(4)
+ if( reqlen >= 128 && inb( qbase + 8 ) & 0x10 ) { /* empty */
+ outsl(qbase + 4, request, 32 );
+ reqlen -= 128;
+ request += 128;
+ }
+ while( reqlen >= 84 && !( j & 0xc0 ) ) /* 1/3 */
+ if( !((j=inb( qbase + 8 )) & 8) ) {
+ outsl( qbase + 4, request, 21 );
+ reqlen -= 84;
+ request += 84;
+ }
+ if( reqlen >= 40 && !(inb( qbase + 8 ) & 4 ) ) { /* 2/3 */
+ outsl( qbase + 4, request, 10 );
+ reqlen -= 40;
+ request += 40;
+ }
+#endif
+ /* until full and int (or until reclen is 0) */
+rtrc(7)
+ j = 0;
+ while( reqlen && !( (j & 2) && (j & 0xc0) ) ) {
+ /* while bytes to send and not full */
+ while ( reqlen && !( (j=inb(qbase + 8)) & 2 ) ) {
+ outb(*request++, qbase + 4);
+ reqlen--;
+ }
+ if( j & 2 )
+ j = inb(qbase+8);
+ }
+ }
+/* maybe return reqlen */
+ return inb( qbase + 8 ) & 0xc0;
+}
+
+/*----------------------------------------------------------------*/
+/* wait for interrupt flag (polled - not real hardware interrupt) */
+static int ql_wai(void)
+{
+int i,k;
+ k = 0;
+ i = jiffies + WATCHDOG;
+ while ( i > jiffies && !qabort && !((k = inb(qbase + 4)) & 0xe0))
+ barrier();
+ if (i <= jiffies)
+ return (DID_TIME_OUT);
+ if (qabort)
+ return (qabort == 1 ? DID_ABORT : DID_RESET);
+ if (k & 0x60)
+ ql_zap();
+ if (k & 0x20)
+ return (DID_PARITY);
+ if (k & 0x40)
+ return (DID_ERROR);
+ return 0;
+}
+
+/*----------------------------------------------------------------*/
+/* initiate scsi command - queueing handler */
+static void ql_icmd(Scsi_Cmnd * cmd)
+{
+unsigned int i;
+unsigned long flags;
+
+ qabort = 0;
+
+ save_flags( flags );
+ cli();
+ REG0;
+/* clearing of interrupts and the fifo is needed */
+ inb(qbase + 5); /* clear interrupts */
+ if (inb(qbase + 5)) /* if still interrupting */
+ outb(2, qbase + 3); /* reset chip */
+ else if (inb(qbase + 7) & 0x1f)
+ outb(1, qbase + 3); /* clear fifo */
+ while (inb(qbase + 5)); /* clear ints */
+ REG1;
+ outb(1, qbase + 8); /* set for PIO pseudo DMA */
+ outb(0, qbase + 0xb); /* disable ints */
+ inb(qbase + 8); /* clear int bits */
+ REG0;
+ outb(0x40, qbase + 0xb); /* enable features */
+
+/* configurables */
+ outb( qlcfgc , qbase + 0xc);
+/* config: no reset interrupt, (initiator) bus id */
+ outb( 0x40 | qlcfg8 | qinitid, qbase + 8);
+ outb( qlcfg7 , qbase + 7 );
+ outb( qlcfg6 , qbase + 6 );
+/**/
+ outb(qlcfg5, qbase + 5); /* select timer */
+ outb(qlcfg9 & 7, qbase + 9); /* prescaler */
+/* outb(0x99, qbase + 5); */
+ outb(cmd->target, qbase + 4);
+
+ for (i = 0; i < cmd->cmd_len; i++)
+ outb(cmd->cmnd[i], qbase + 2);
+ qlcmd = cmd;
+ outb(0x41, qbase + 3); /* select and send command */
+ restore_flags( flags );
+}
+/*----------------------------------------------------------------*/
+/* process scsi command - usually after interrupt */
+static unsigned int ql_pcmd(Scsi_Cmnd * cmd)
+{
+unsigned int i, j, k;
+unsigned int result; /* ultimate return result */
+unsigned int status; /* scsi returned status */
+unsigned int message; /* scsi returned message */
+unsigned int phase; /* recorded scsi phase */
+unsigned int reqlen; /* total length of transfer */
+struct scatterlist *sglist; /* scatter-gather list pointer */
+unsigned int sgcount; /* sg counter */
+
+rtrc(1)
+ j = inb(qbase + 6);
+ i = inb(qbase + 5);
+ if (i == 0x20) {
+ return (DID_NO_CONNECT << 16);
+ }
+ i |= inb(qbase + 5); /* the 0x10 bit can be set after the 0x08 */
+ if (i != 0x18) {
+ printk("Ql:Bad Interrupt status:%02x\n", i);
+ ql_zap();
+ return (DID_BAD_INTR << 16);
+ }
+ j &= 7; /* j = inb( qbase + 7 ) >> 5; */
+/* correct status is supposed to be step 4 */
+/* it sometimes returns step 3 but with 0 bytes left to send */
+/* We can try stuffing the FIFO with the max each time, but we will get a
+ sequence of 3 if any bytes are left (but we do flush the FIFO anyway */
+ if(j != 3 && j != 4) {
+ printk("Ql:Bad sequence for command %d, int %02X, cmdleft = %d\n", j, i, inb( qbase+7 ) & 0x1f );
+ ql_zap();
+ return (DID_ERROR << 16);
+ }
+ result = DID_OK;
+ if (inb(qbase + 7) & 0x1f) /* if some bytes in fifo */
+ outb(1, qbase + 3); /* clear fifo */
+/* note that request_bufflen is the total xfer size when sg is used */
+ reqlen = cmd->request_bufflen;
+/* note that it won't work if transfers > 16M are requested */
+ if (reqlen && !((phase = inb(qbase + 4)) & 6)) { /* data phase */
+rtrc(2)
+ outb(reqlen, qbase); /* low-mid xfer cnt */
+ outb(reqlen >> 8, qbase+1); /* low-mid xfer cnt */
+ outb(reqlen >> 16, qbase + 0xe); /* high xfer cnt */
+ outb(0x90, qbase + 3); /* command do xfer */
+/* PIO pseudo DMA to buffer or sglist */
+ REG1;
+ if (!cmd->use_sg)
+ ql_pdma(phase, cmd->request_buffer, cmd->request_bufflen);
+ else {
+ sgcount = cmd->use_sg;
+ sglist = cmd->request_buffer;
+ while (sgcount--) {
+ if (qabort) {
+ REG0;
+ return ((qabort == 1 ? DID_ABORT : DID_RESET) << 16);
+ }
+ if (ql_pdma(phase, sglist->address, sglist->length))
+ break;
+ sglist++;
+ }
+ }
+ REG0;
+rtrc(2)
+/* wait for irq (split into second state of irq handler if this can take time) */
+ if ((k = ql_wai()))
+ return (k << 16);
+ k = inb(qbase + 5); /* should be 0x10, bus service */
+ }
+/*** Enter Status (and Message In) Phase ***/
+ k = jiffies + WATCHDOG;
+ while ( k > jiffies && !qabort && !(inb(qbase + 4) & 6)); /* wait for status phase */
+ if ( k <= jiffies ) {
+ ql_zap();
+ return (DID_TIME_OUT << 16);
+ }
+ while (inb(qbase + 5)); /* clear pending ints */
+ if (qabort)
+ return ((qabort == 1 ? DID_ABORT : DID_RESET) << 16);
+ outb(0x11, qbase + 3); /* get status and message */
+ if ((k = ql_wai()))
+ return (k << 16);
+ i = inb(qbase + 5); /* get chip irq stat */
+ j = inb(qbase + 7) & 0x1f; /* and bytes rec'd */
+ status = inb(qbase + 2);
+ message = inb(qbase + 2);
+/* should get function complete int if Status and message, else bus serv if only status */
+ if (!((i == 8 && j == 2) || (i == 0x10 && j == 1))) {
+ printk("Ql:Error during status phase, int=%02X, %d bytes recd\n", i, j);
+ result = DID_ERROR;
+ }
+ outb(0x12, qbase + 3); /* done, disconnect */
+rtrc(1)
+ if ((k = ql_wai()))
+ return (k << 16);
+/* should get bus service interrupt and disconnect interrupt */
+ i = inb(qbase + 5); /* should be bus service */
+ while (!qabort && ((i & 0x20) != 0x20)) {
+ barrier();
+ i |= inb(qbase + 5);
+ }
+rtrc(0)
+ if (qabort)
+ return ((qabort == 1 ? DID_ABORT : DID_RESET) << 16);
+ return (result << 16) | (message << 8) | (status & STATUS_MASK);
+}
+
+#if QL_USE_IRQ
+/*----------------------------------------------------------------*/
+/* interrupt handler */
+static void ql_ihandl(int irq, void *dev_id, struct pt_regs * regs)
+{
+Scsi_Cmnd *icmd;
+ REG0;
+ if (!(inb(qbase + 4) & 0x80)) /* false alarm? */
+ return;
+ if (qlcmd == NULL) { /* no command to process? */
+ int i;
+ i = 16;
+ while (i-- && inb(qbase + 5)); /* maybe also ql_zap() */
+ return;
+ }
+ icmd = qlcmd;
+ icmd->result = ql_pcmd(icmd);
+ qlcmd = NULL;
+/* if result is CHECK CONDITION done calls qcommand to request sense */
+ (icmd->scsi_done) (icmd);
+}
+#endif
+
+/*----------------------------------------------------------------*/
+/* global functions */
+/*----------------------------------------------------------------*/
+/* non queued command */
+#if QL_USE_IRQ
+static void qlidone(Scsi_Cmnd * cmd) {}; /* null function */
+#endif
+
+/* command process */
+int qlogicfas_command(Scsi_Cmnd * cmd)
+{
+int k;
+#if QL_USE_IRQ
+ if (qlirq >= 0) {
+ qlogicfas_queuecommand(cmd, qlidone);
+ while (qlcmd != NULL);
+ return cmd->result;
+ }
+#endif
+/* non-irq version */
+ if (cmd->target == qinitid)
+ return (DID_BAD_TARGET << 16);
+ ql_icmd(cmd);
+ if ((k = ql_wai()))
+ return (k << 16);
+ return ql_pcmd(cmd);
+
+}
+
+#if QL_USE_IRQ
+/*----------------------------------------------------------------*/
+/* queued command */
+int qlogicfas_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
+{
+ if(cmd->target == qinitid) {
+ cmd->result = DID_BAD_TARGET << 16;
+ done(cmd);
+ return 0;
+ }
+
+ cmd->scsi_done = done;
+/* wait for the last command's interrupt to finish */
+ while (qlcmd != NULL)
+ barrier();
+ ql_icmd(cmd);
+ return 0;
+}
+#else
+int qlogicfas_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
+{
+ return 1;
+}
+#endif
+
+#ifdef PCMCIA
+/*----------------------------------------------------------------*/
+/* allow PCMCIA code to preset the port */
+/* port should be 0 and irq to -1 respectively for autoprobing */
+void qlogicfas_preset(int port, int irq)
+{
+ qbase=port;
+ qlirq=irq;
+}
+#endif
+
+/*----------------------------------------------------------------*/
+/* look for qlogic card and init if found */
+int qlogicfas_detect(Scsi_Host_Template * host)
+{
+int i, j; /* these are only used by IRQ detect */
+int qltyp; /* type of chip */
+struct Scsi_Host *hreg; /* registered host structure */
+unsigned long flags;
+
+host->proc_dir = &proc_scsi_qlogicfas;
+
+/* Qlogic Cards only exist at 0x230 or 0x330 (the chip itself decodes the
+ address - I check 230 first since MIDI cards are typically at 330
+
+ Theoretically, two Qlogic cards can coexist in the same system. This
+ should work by simply using this as a loadable module for the second
+ card, but I haven't tested this.
+*/
+
+ if( !qbase ) {
+ for (qbase = 0x230; qbase < 0x430; qbase += 0x100) {
+ if( check_region( qbase , 0x10 ) )
+ continue;
+ REG1;
+ if ( ( (inb(qbase + 0xe) ^ inb(qbase + 0xe)) == 7 )
+ && ( (inb(qbase + 0xe) ^ inb(qbase + 0xe)) == 7 ) )
+ break;
+ }
+ if (qbase == 0x430)
+ return 0;
+ }
+ else
+ printk( "Ql: Using preset base address of %03x\n", qbase );
+
+ qltyp = inb(qbase + 0xe) & 0xf8;
+ qinitid = host->this_id;
+ if (qinitid < 0)
+ qinitid = 7; /* if no ID, use 7 */
+ outb(1, qbase + 8); /* set for PIO pseudo DMA */
+ REG0;
+ outb(0x40 | qlcfg8 | qinitid, qbase + 8); /* (ini) bus id, disable scsi rst */
+ outb(qlcfg5, qbase + 5); /* select timer */
+ outb(qlcfg9, qbase + 9); /* prescaler */
+#if QL_RESET_AT_START
+ outb( 3 , qbase + 3 );
+ REG1;
+ while( inb( qbase + 0xf ) & 4 );
+ REG0;
+#endif
+#if QL_USE_IRQ
+/* IRQ probe - toggle pin and check request pending */
+
+ if( qlirq == -1 ) {
+ save_flags( flags );
+ cli();
+ i = 0xffff;
+ j = 3;
+ outb(0x90, qbase + 3); /* illegal command - cause interrupt */
+ REG1;
+ outb(10, 0x20); /* access pending interrupt map */
+ outb(10, 0xa0);
+ while (j--) {
+ outb(0xb0 | QL_INT_ACTIVE_HIGH , qbase + 0xd); /* int pin off */
+ i &= ~(inb(0x20) | (inb(0xa0) << 8)); /* find IRQ off */
+ outb(0xb4 | QL_INT_ACTIVE_HIGH , qbase + 0xd); /* int pin on */
+ i &= inb(0x20) | (inb(0xa0) << 8); /* find IRQ on */
+ }
+ REG0;
+ while (inb(qbase + 5)); /* purge int */
+ j = -1;
+ while (i) /* find on bit */
+ i >>= 1, j++; /* should check for exactly 1 on */
+ qlirq = j;
+ restore_flags( flags );
+ }
+ else
+ printk( "Ql: Using preset IRQ %d\n", qlirq );
+
+ if (qlirq >= 0 && !request_irq(qlirq, ql_ihandl, 0, "qlogicfas", NULL))
+ host->can_queue = 1;
+#endif
+ request_region( qbase , 0x10 ,"qlogicfas");
+ hreg = scsi_register( host , 0 ); /* no host data */
+ hreg->io_port = qbase;
+ hreg->n_io_port = 16;
+ hreg->dma_channel = -1;
+ if( qlirq != -1 )
+ hreg->irq = qlirq;
+
+ sprintf(qinfo, "Qlogicfas Driver version 0.45, chip %02X at %03X, IRQ %d, TPdma:%d",
+ qltyp, qbase, qlirq, QL_TURBO_PDMA );
+ host->name = qinfo;
+
+ return 1;
+}
+
+/*----------------------------------------------------------------*/
+/* return bios parameters */
+int qlogicfas_biosparam(Disk * disk, kdev_t dev, int ip[])
+{
+/* This should mimic the DOS Qlogic driver's behavior exactly */
+ ip[0] = 0x40;
+ ip[1] = 0x20;
+ ip[2] = disk->capacity / (ip[0] * ip[1]);
+ if (ip[2] > 1024) {
+ ip[0] = 0xff;
+ ip[1] = 0x3f;
+ ip[2] = disk->capacity / (ip[0] * ip[1]);
+ if (ip[2] > 1023)
+ ip[2] = 1023;
+ }
+ return 0;
+}
+
+/*----------------------------------------------------------------*/
+/* abort command in progress */
+int qlogicfas_abort(Scsi_Cmnd * cmd)
+{
+ qabort = 1;
+ ql_zap();
+ return 0;
+}
+
+/*----------------------------------------------------------------*/
+/* reset SCSI bus */
+int qlogicfas_reset(Scsi_Cmnd * cmd, unsigned int flags)
+{
+ qabort = 2;
+ ql_zap();
+ return 1;
+}
+
+/*----------------------------------------------------------------*/
+/* return info string */
+const char *qlogicfas_info(struct Scsi_Host * host)
+{
+ return qinfo;
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = QLOGICFAS;
+
+#include "scsi_module.c"
+#endif
+
diff --git a/linux/src/drivers/scsi/qlogicfas.h b/linux/src/drivers/scsi/qlogicfas.h
new file mode 100644
index 0000000..5a1dfdb
--- /dev/null
+++ b/linux/src/drivers/scsi/qlogicfas.h
@@ -0,0 +1,43 @@
+#ifndef _QLOGICFAS_H
+#define _QLOGICFAS_H
+
+int qlogicfas_detect(Scsi_Host_Template * );
+const char * qlogicfas_info(struct Scsi_Host *);
+int qlogicfas_command(Scsi_Cmnd *);
+int qlogicfas_queuecommand(Scsi_Cmnd *, void (* done)(Scsi_Cmnd *));
+int qlogicfas_abort(Scsi_Cmnd *);
+int qlogicfas_reset(Scsi_Cmnd *, unsigned int flags);
+int qlogicfas_biosparam(Disk *, kdev_t, int[]);
+
+#ifndef NULL
+#define NULL (0)
+#endif
+
+#define QLOGICFAS { \
+ NULL, \
+ NULL, \
+ NULL, \
+ NULL, \
+ NULL, \
+ qlogicfas_detect, \
+ NULL, \
+ qlogicfas_info, \
+ qlogicfas_command, \
+ qlogicfas_queuecommand, \
+ qlogicfas_abort, \
+ qlogicfas_reset, \
+ NULL, \
+ qlogicfas_biosparam, \
+ 0, \
+ -1, \
+ SG_ALL, \
+ 1, \
+ 0, \
+ 0, \
+ DISABLE_CLUSTERING \
+}
+
+#endif /* _QLOGICFAS_H */
+
+
+
diff --git a/linux/src/drivers/scsi/qlogicisp.c b/linux/src/drivers/scsi/qlogicisp.c
new file mode 100644
index 0000000..ebee05d
--- /dev/null
+++ b/linux/src/drivers/scsi/qlogicisp.c
@@ -0,0 +1,1767 @@
+/*
+ * QLogic ISP1020 Intelligent SCSI Processor Driver (PCI)
+ * Written by Erik H. Moe, ehm@cris.com
+ * Copyright 1995, Erik H. Moe
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+/* Renamed and updated to 1.3.x by Michael Griffith <grif@cs.ucr.edu> */
+
+/*
+ * $Date: 2007/03/27 21:04:30 $
+ * $Revision: 1.1.4.2 $
+ *
+ * Revision 0.5 1995/09/22 02:23:15 root
+ * do auto request sense
+ *
+ * Revision 0.4 1995/08/07 04:44:33 root
+ * supply firmware with driver.
+ * numerous bug fixes/general cleanup of code.
+ *
+ * Revision 0.3 1995/07/16 16:15:39 root
+ * added reset/abort code.
+ *
+ * Revision 0.2 1995/06/29 03:14:19 root
+ * fixed biosparam.
+ * added queue protocol.
+ *
+ * Revision 0.1 1995/06/25 01:55:45 root
+ * Initial release.
+ *
+ */
+
+#include <linux/blk.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/unistd.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include "sd.h"
+#include "hosts.h"
+#include "qlogicisp.h"
+
+/* Configuration section *****************************************************/
+
+/* Set the following macro to 1 to reload the ISP1020's firmware. This is
+ the latest firmware provided by QLogic. This may be an earlier/later
+ revision than supplied by your board. */
+
+#define RELOAD_FIRMWARE 0
+
+/* Set the following macro to 1 to reload the ISP1020's defaults from nvram.
+ If you are not sure of your settings, leave this alone, the driver will
+ use a set of 'safe' defaults */
+
+#define USE_NVRAM_DEFAULTS 0
+
+/* Macros used for debugging */
+
+#define DEBUG_ISP1020 0
+#define DEBUG_ISP1020_INT 0
+#define DEBUG_ISP1020_SETUP 0
+#define TRACE_ISP 0
+
+#define DEFAULT_LOOP_COUNT 1000000
+
+/* End Configuration section *************************************************/
+
+#include <linux/module.h>
+
+#if TRACE_ISP
+
+# define TRACE_BUF_LEN (32*1024)
+
+struct {
+ u_long next;
+ struct {
+ u_long time;
+ u_int index;
+ u_int addr;
+ u_char * name;
+ } buf[TRACE_BUF_LEN];
+} trace;
+
+#define TRACE(w, i, a) \
+{ \
+ unsigned long flags; \
+ \
+ save_flags(flags); \
+ cli(); \
+ trace.buf[trace.next].name = (w); \
+ trace.buf[trace.next].time = jiffies; \
+ trace.buf[trace.next].index = (i); \
+ trace.buf[trace.next].addr = (long) (a); \
+ trace.next = (trace.next + 1) & (TRACE_BUF_LEN - 1); \
+ restore_flags(flags); \
+}
+
+#else
+# define TRACE(w, i, a)
+#endif
+
+#if DEBUG_ISP1020
+#define ENTER(x) printk("isp1020 : entering %s()\n", x);
+#define LEAVE(x) printk("isp1020 : leaving %s()\n", x);
+#define DEBUG(x) x
+#else
+#define ENTER(x)
+#define LEAVE(x)
+#define DEBUG(x)
+#endif /* DEBUG_ISP1020 */
+
+#if DEBUG_ISP1020_INTR
+#define ENTER_INTR(x) printk("isp1020 : entering %s()\n", x);
+#define LEAVE_INTR(x) printk("isp1020 : leaving %s()\n", x);
+#define DEBUG_INTR(x) x
+#else
+#define ENTER_INTR(x)
+#define LEAVE_INTR(x)
+#define DEBUG_INTR(x)
+#endif /* DEBUG ISP1020_INTR */
+
+#define ISP1020_REV_ID 1
+
+#define MAX_TARGETS 16
+#define MAX_LUNS 8
+
+/* host configuration and control registers */
+#define HOST_HCCR 0xc0 /* host command and control */
+
+/* pci bus interface registers */
+#define PCI_ID_LOW 0x00 /* vendor id */
+#define PCI_ID_HIGH 0x02 /* device id */
+#define ISP_CFG0 0x04 /* configuration register #0 */
+#define ISP_CFG1 0x06 /* configuration register #1 */
+#define PCI_INTF_CTL 0x08 /* pci interface control */
+#define PCI_INTF_STS 0x0a /* pci interface status */
+#define PCI_SEMAPHORE 0x0c /* pci semaphore */
+#define PCI_NVRAM 0x0e /* pci nvram interface */
+
+/* mailbox registers */
+#define MBOX0 0x70 /* mailbox 0 */
+#define MBOX1 0x72 /* mailbox 1 */
+#define MBOX2 0x74 /* mailbox 2 */
+#define MBOX3 0x76 /* mailbox 3 */
+#define MBOX4 0x78 /* mailbox 4 */
+#define MBOX5 0x7a /* mailbox 5 */
+
+/* mailbox command complete status codes */
+#define MBOX_COMMAND_COMPLETE 0x4000
+#define INVALID_COMMAND 0x4001
+#define HOST_INTERFACE_ERROR 0x4002
+#define TEST_FAILED 0x4003
+#define COMMAND_ERROR 0x4005
+#define COMMAND_PARAM_ERROR 0x4006
+
+/* async event status codes */
+#define ASYNC_SCSI_BUS_RESET 0x8001
+#define SYSTEM_ERROR 0x8002
+#define REQUEST_TRANSFER_ERROR 0x8003
+#define RESPONSE_TRANSFER_ERROR 0x8004
+#define REQUEST_QUEUE_WAKEUP 0x8005
+#define EXECUTION_TIMEOUT_RESET 0x8006
+
+struct Entry_header {
+ u_char entry_type;
+ u_char entry_cnt;
+ u_char sys_def_1;
+ u_char flags;
+};
+
+/* entry header type commands */
+#define ENTRY_COMMAND 1
+#define ENTRY_CONTINUATION 2
+#define ENTRY_STATUS 3
+#define ENTRY_MARKER 4
+#define ENTRY_EXTENDED_COMMAND 5
+
+/* entry header flag definitions */
+#define EFLAG_CONTINUATION 1
+#define EFLAG_BUSY 2
+#define EFLAG_BAD_HEADER 4
+#define EFLAG_BAD_PAYLOAD 8
+
+struct dataseg {
+ u_int d_base;
+ u_int d_count;
+};
+
+struct Command_Entry {
+ struct Entry_header hdr;
+ u_int handle;
+ u_char target_lun;
+ u_char target_id;
+ u_short cdb_length;
+ u_short control_flags;
+ u_short rsvd;
+ u_short time_out;
+ u_short segment_cnt;
+ u_char cdb[12];
+ struct dataseg dataseg[4];
+};
+
+/* command entry control flag definitions */
+#define CFLAG_NODISC 0x01
+#define CFLAG_HEAD_TAG 0x02
+#define CFLAG_ORDERED_TAG 0x04
+#define CFLAG_SIMPLE_TAG 0x08
+#define CFLAG_TAR_RTN 0x10
+#define CFLAG_READ 0x20
+#define CFLAG_WRITE 0x40
+
+struct Ext_Command_Entry {
+ struct Entry_header hdr;
+ u_int handle;
+ u_char target_lun;
+ u_char target_id;
+ u_short cdb_length;
+ u_short control_flags;
+ u_short rsvd;
+ u_short time_out;
+ u_short segment_cnt;
+ u_char cdb[44];
+};
+
+struct Continuation_Entry {
+ struct Entry_header hdr;
+ u_int reserved;
+ struct dataseg dataseg[7];
+};
+
+struct Marker_Entry {
+ struct Entry_header hdr;
+ u_int reserved;
+ u_char target_lun;
+ u_char target_id;
+ u_char modifier;
+ u_char rsvd;
+ u_char rsvds[52];
+};
+
+/* marker entry modifier definitions */
+#define SYNC_DEVICE 0
+#define SYNC_TARGET 1
+#define SYNC_ALL 2
+
+struct Status_Entry {
+ struct Entry_header hdr;
+ u_int handle;
+ u_short scsi_status;
+ u_short completion_status;
+ u_short state_flags;
+ u_short status_flags;
+ u_short time;
+ u_short req_sense_len;
+ u_int residual;
+ u_char rsvd[8];
+ u_char req_sense_data[32];
+};
+
+/* status entry completion status definitions */
+#define CS_COMPLETE 0x0000
+#define CS_INCOMPLETE 0x0001
+#define CS_DMA_ERROR 0x0002
+#define CS_TRANSPORT_ERROR 0x0003
+#define CS_RESET_OCCURRED 0x0004
+#define CS_ABORTED 0x0005
+#define CS_TIMEOUT 0x0006
+#define CS_DATA_OVERRUN 0x0007
+#define CS_COMMAND_OVERRUN 0x0008
+#define CS_STATUS_OVERRUN 0x0009
+#define CS_BAD_MESSAGE 0x000a
+#define CS_NO_MESSAGE_OUT 0x000b
+#define CS_EXT_ID_FAILED 0x000c
+#define CS_IDE_MSG_FAILED 0x000d
+#define CS_ABORT_MSG_FAILED 0x000e
+#define CS_REJECT_MSG_FAILED 0x000f
+#define CS_NOP_MSG_FAILED 0x0010
+#define CS_PARITY_ERROR_MSG_FAILED 0x0011
+#define CS_DEVICE_RESET_MSG_FAILED 0x0012
+#define CS_ID_MSG_FAILED 0x0013
+#define CS_UNEXP_BUS_FREE 0x0014
+/* as per app note #83120-514-06a: */
+#define CS_DATA_UNDERRUN 0x0015
+#define CS_INVALID_ENTRY_TYPE 0x001b
+#define CS_DEVICE_QUEUE_FULL 0x001c
+#define CS_SCSI_PHASE_SKIPPED 0x001d
+#define CS_ARS_FAILED 0x001e /* auto Req. Sense failed */
+
+/* status entry state flag definitions */
+#define SF_GOT_BUS 0x0100
+#define SF_GOT_TARGET 0x0200
+#define SF_SENT_CDB 0x0400
+#define SF_TRANSFERRED_DATA 0x0800
+#define SF_GOT_STATUS 0x1000
+#define SF_GOT_SENSE 0x2000
+
+/* status entry status flag definitions */
+#define STF_DISCONNECT 0x0001
+#define STF_SYNCHRONOUS 0x0002
+#define STF_PARITY_ERROR 0x0004
+#define STF_BUS_RESET 0x0008
+#define STF_DEVICE_RESET 0x0010
+#define STF_ABORTED 0x0020
+#define STF_TIMEOUT 0x0040
+#define STF_NEGOTIATION 0x0080
+
+/* interface control commands */
+#define ISP_RESET 0x0001
+#define ISP_EN_INT 0x0002
+#define ISP_EN_RISC 0x0004
+
+/* host control commands */
+#define HCCR_NOP 0x0000
+#define HCCR_RESET 0x1000
+#define HCCR_PAUSE 0x2000
+#define HCCR_RELEASE 0x3000
+#define HCCR_SINGLE_STEP 0x4000
+#define HCCR_SET_HOST_INTR 0x5000
+#define HCCR_CLEAR_HOST_INTR 0x6000
+#define HCCR_CLEAR_RISC_INTR 0x7000
+#define HCCR_BP_ENABLE 0x8000
+#define HCCR_BIOS_DISABLE 0x9000
+#define HCCR_TEST_MODE 0xf000
+
+#define RISC_BUSY 0x0004
+
+/* mailbox commands */
+#define MBOX_NO_OP 0x0000
+#define MBOX_LOAD_RAM 0x0001
+#define MBOX_EXEC_FIRMWARE 0x0002
+#define MBOX_DUMP_RAM 0x0003
+#define MBOX_WRITE_RAM_WORD 0x0004
+#define MBOX_READ_RAM_WORD 0x0005
+#define MBOX_MAILBOX_REG_TEST 0x0006
+#define MBOX_VERIFY_CHECKSUM 0x0007
+#define MBOX_ABOUT_FIRMWARE 0x0008
+#define MBOX_CHECK_FIRMWARE 0x000e
+#define MBOX_INIT_REQ_QUEUE 0x0010
+#define MBOX_INIT_RES_QUEUE 0x0011
+#define MBOX_EXECUTE_IOCB 0x0012
+#define MBOX_WAKE_UP 0x0013
+#define MBOX_STOP_FIRMWARE 0x0014
+#define MBOX_ABORT 0x0015
+#define MBOX_ABORT_DEVICE 0x0016
+#define MBOX_ABORT_TARGET 0x0017
+#define MBOX_BUS_RESET 0x0018
+#define MBOX_STOP_QUEUE 0x0019
+#define MBOX_START_QUEUE 0x001a
+#define MBOX_SINGLE_STEP_QUEUE 0x001b
+#define MBOX_ABORT_QUEUE 0x001c
+#define MBOX_GET_DEV_QUEUE_STATUS 0x001d
+#define MBOX_GET_FIRMWARE_STATUS 0x001f
+#define MBOX_GET_INIT_SCSI_ID 0x0020
+#define MBOX_GET_SELECT_TIMEOUT 0x0021
+#define MBOX_GET_RETRY_COUNT 0x0022
+#define MBOX_GET_TAG_AGE_LIMIT 0x0023
+#define MBOX_GET_CLOCK_RATE 0x0024
+#define MBOX_GET_ACT_NEG_STATE 0x0025
+#define MBOX_GET_ASYNC_DATA_SETUP_TIME 0x0026
+#define MBOX_GET_PCI_PARAMS 0x0027
+#define MBOX_GET_TARGET_PARAMS 0x0028
+#define MBOX_GET_DEV_QUEUE_PARAMS 0x0029
+#define MBOX_SET_INIT_SCSI_ID 0x0030
+#define MBOX_SET_SELECT_TIMEOUT 0x0031
+#define MBOX_SET_RETRY_COUNT 0x0032
+#define MBOX_SET_TAG_AGE_LIMIT 0x0033
+#define MBOX_SET_CLOCK_RATE 0x0034
+#define MBOX_SET_ACTIVE_NEG_STATE 0x0035
+#define MBOX_SET_ASYNC_DATA_SETUP_TIME 0x0036
+#define MBOX_SET_PCI_CONTROL_PARAMS 0x0037
+#define MBOX_SET_TARGET_PARAMS 0x0038
+#define MBOX_SET_DEV_QUEUE_PARAMS 0x0039
+#define MBOX_RETURN_BIOS_BLOCK_ADDR 0x0040
+#define MBOX_WRITE_FOUR_RAM_WORDS 0x0041
+#define MBOX_EXEC_BIOS_IOCB 0x0042
+
+unsigned short risc_code_addr01 = 0x1000 ;
+
+#define PACKB(a, b) (((a)<<4)|(b))
+
+const u_char mbox_param[] = {
+ PACKB(1, 1), /* MBOX_NO_OP */
+ PACKB(5, 5), /* MBOX_LOAD_RAM */
+ PACKB(2, 0), /* MBOX_EXEC_FIRMWARE */
+ PACKB(5, 5), /* MBOX_DUMP_RAM */
+ PACKB(3, 3), /* MBOX_WRITE_RAM_WORD */
+ PACKB(2, 3), /* MBOX_READ_RAM_WORD */
+ PACKB(6, 6), /* MBOX_MAILBOX_REG_TEST */
+ PACKB(2, 3), /* MBOX_VERIFY_CHECKSUM */
+ PACKB(1, 3), /* MBOX_ABOUT_FIRMWARE */
+ PACKB(0, 0), /* 0x0009 */
+ PACKB(0, 0), /* 0x000a */
+ PACKB(0, 0), /* 0x000b */
+ PACKB(0, 0), /* 0x000c */
+ PACKB(0, 0), /* 0x000d */
+ PACKB(1, 2), /* MBOX_CHECK_FIRMWARE */
+ PACKB(0, 0), /* 0x000f */
+ PACKB(5, 5), /* MBOX_INIT_REQ_QUEUE */
+ PACKB(6, 6), /* MBOX_INIT_RES_QUEUE */
+ PACKB(4, 4), /* MBOX_EXECUTE_IOCB */
+ PACKB(2, 2), /* MBOX_WAKE_UP */
+ PACKB(1, 6), /* MBOX_STOP_FIRMWARE */
+ PACKB(4, 4), /* MBOX_ABORT */
+ PACKB(2, 2), /* MBOX_ABORT_DEVICE */
+ PACKB(3, 3), /* MBOX_ABORT_TARGET */
+ PACKB(2, 2), /* MBOX_BUS_RESET */
+ PACKB(2, 3), /* MBOX_STOP_QUEUE */
+ PACKB(2, 3), /* MBOX_START_QUEUE */
+ PACKB(2, 3), /* MBOX_SINGLE_STEP_QUEUE */
+ PACKB(2, 3), /* MBOX_ABORT_QUEUE */
+ PACKB(2, 4), /* MBOX_GET_DEV_QUEUE_STATUS */
+ PACKB(0, 0), /* 0x001e */
+ PACKB(1, 3), /* MBOX_GET_FIRMWARE_STATUS */
+ PACKB(1, 2), /* MBOX_GET_INIT_SCSI_ID */
+ PACKB(1, 2), /* MBOX_GET_SELECT_TIMEOUT */
+ PACKB(1, 3), /* MBOX_GET_RETRY_COUNT */
+ PACKB(1, 2), /* MBOX_GET_TAG_AGE_LIMIT */
+ PACKB(1, 2), /* MBOX_GET_CLOCK_RATE */
+ PACKB(1, 2), /* MBOX_GET_ACT_NEG_STATE */
+ PACKB(1, 2), /* MBOX_GET_ASYNC_DATA_SETUP_TIME */
+ PACKB(1, 3), /* MBOX_GET_PCI_PARAMS */
+ PACKB(2, 4), /* MBOX_GET_TARGET_PARAMS */
+ PACKB(2, 4), /* MBOX_GET_DEV_QUEUE_PARAMS */
+ PACKB(0, 0), /* 0x002a */
+ PACKB(0, 0), /* 0x002b */
+ PACKB(0, 0), /* 0x002c */
+ PACKB(0, 0), /* 0x002d */
+ PACKB(0, 0), /* 0x002e */
+ PACKB(0, 0), /* 0x002f */
+ PACKB(2, 2), /* MBOX_SET_INIT_SCSI_ID */
+ PACKB(2, 2), /* MBOX_SET_SELECT_TIMEOUT */
+ PACKB(3, 3), /* MBOX_SET_RETRY_COUNT */
+ PACKB(2, 2), /* MBOX_SET_TAG_AGE_LIMIT */
+ PACKB(2, 2), /* MBOX_SET_CLOCK_RATE */
+ PACKB(2, 2), /* MBOX_SET_ACTIVE_NEG_STATE */
+ PACKB(2, 2), /* MBOX_SET_ASYNC_DATA_SETUP_TIME */
+ PACKB(3, 3), /* MBOX_SET_PCI_CONTROL_PARAMS */
+ PACKB(4, 4), /* MBOX_SET_TARGET_PARAMS */
+ PACKB(4, 4), /* MBOX_SET_DEV_QUEUE_PARAMS */
+ PACKB(0, 0), /* 0x003a */
+ PACKB(0, 0), /* 0x003b */
+ PACKB(0, 0), /* 0x003c */
+ PACKB(0, 0), /* 0x003d */
+ PACKB(0, 0), /* 0x003e */
+ PACKB(0, 0), /* 0x003f */
+ PACKB(1, 2), /* MBOX_RETURN_BIOS_BLOCK_ADDR */
+ PACKB(6, 1), /* MBOX_WRITE_FOUR_RAM_WORDS */
+ PACKB(2, 3) /* MBOX_EXEC_BIOS_IOCB */
+};
+
+#define MAX_MBOX_COMMAND (sizeof(mbox_param)/sizeof(u_short))
+
+struct host_param {
+ u_short fifo_threshold;
+ u_short host_adapter_enable;
+ u_short initiator_scsi_id;
+ u_short bus_reset_delay;
+ u_short retry_count;
+ u_short retry_delay;
+ u_short async_data_setup_time;
+ u_short req_ack_active_negation;
+ u_short data_line_active_negation;
+ u_short data_dma_burst_enable;
+ u_short command_dma_burst_enable;
+ u_short tag_aging;
+ u_short selection_timeout;
+ u_short max_queue_depth;
+};
+
+/*
+ * Device Flags:
+ *
+ * Bit Name
+ * ---------
+ * 7 Disconnect Privilege
+ * 6 Parity Checking
+ * 5 Wide Data Transfers
+ * 4 Synchronous Data Transfers
+ * 3 Tagged Queuing
+ * 2 Automatic Request Sense
+ * 1 Stop Queue on Check Condition
+ * 0 Renegotiate on Error
+ */
+
+struct dev_param {
+ u_short device_flags;
+ u_short execution_throttle;
+ u_short synchronous_period;
+ u_short synchronous_offset;
+ u_short device_enable;
+ u_short reserved; /* pad */
+};
+
+/*
+ * The result queue can be quite a bit smaller since continuation entries
+ * do not show up there:
+ */
+#define RES_QUEUE_LEN ((QLOGICISP_REQ_QUEUE_LEN + 1) / 8 - 1)
+#define QUEUE_ENTRY_LEN 64
+
+struct isp1020_hostdata {
+ u_char bus;
+ u_char revision;
+ u_char device_fn;
+ struct host_param host_param;
+ struct dev_param dev_param[MAX_TARGETS];
+
+ /* result and request queues (shared with isp1020): */
+ u_int req_in_ptr; /* index of next request slot */
+ u_int res_out_ptr; /* index of next result slot */
+
+ /* this is here so the queues are nicely aligned */
+ long send_marker; /* do we need to send a marker? */
+
+ char res[RES_QUEUE_LEN+1][QUEUE_ENTRY_LEN];
+ char req[QLOGICISP_REQ_QUEUE_LEN+1][QUEUE_ENTRY_LEN];
+};
+
+/* queue length's _must_ be power of two: */
+#define QUEUE_DEPTH(in, out, ql) ((in - out) & (ql))
+#define REQ_QUEUE_DEPTH(in, out) QUEUE_DEPTH(in, out, \
+ QLOGICISP_REQ_QUEUE_LEN)
+#define RES_QUEUE_DEPTH(in, out) QUEUE_DEPTH(in, out, RES_QUEUE_LEN)
+
+struct Scsi_Host *irq2host[NR_IRQS];
+
+static void isp1020_enable_irqs(struct Scsi_Host *);
+static void isp1020_disable_irqs(struct Scsi_Host *);
+static int isp1020_init(struct Scsi_Host *);
+static int isp1020_reset_hardware(struct Scsi_Host *);
+static int isp1020_set_defaults(struct Scsi_Host *);
+static int isp1020_load_parameters(struct Scsi_Host *);
+static int isp1020_mbox_command(struct Scsi_Host *, u_short []);
+static int isp1020_return_status(struct Status_Entry *);
+static void isp1020_intr_handler(int, void *, struct pt_regs *);
+
+#if USE_NVRAM_DEFAULTS
+static int isp1020_get_defaults(struct Scsi_Host *);
+static int isp1020_verify_nvram(struct Scsi_Host *);
+static u_short isp1020_read_nvram_word(struct Scsi_Host *, u_short);
+#endif
+
+#if DEBUG_ISP1020
+static void isp1020_print_scsi_cmd(Scsi_Cmnd *);
+#endif
+#if DEBUG_ISP1020_INTR
+static void isp1020_print_status_entry(struct Status_Entry *);
+#endif
+
+static struct proc_dir_entry proc_scsi_isp1020 = {
+ PROC_SCSI_QLOGICISP, 7, "isp1020",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+
+static inline void isp1020_enable_irqs(struct Scsi_Host *host)
+{
+ outw(ISP_EN_INT|ISP_EN_RISC, host->io_port + PCI_INTF_CTL);
+}
+
+
+static inline void isp1020_disable_irqs(struct Scsi_Host *host)
+{
+ outw(0x0, host->io_port + PCI_INTF_CTL);
+}
+
+
+int isp1020_detect(Scsi_Host_Template *tmpt)
+{
+ int hosts = 0;
+ u_short index;
+ u_char bus, device_fn;
+ struct Scsi_Host *host;
+ struct isp1020_hostdata *hostdata;
+
+ ENTER("isp1020_detect");
+
+ tmpt->proc_dir = &proc_scsi_isp1020;
+
+ if (pcibios_present() == 0) {
+ printk("qlogicisp : PCI bios not present\n");
+ return 0;
+ }
+
+ memset(irq2host, 0, sizeof(irq2host));
+
+ for (index = 0; pcibios_find_device(PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_ISP1020,
+ index, &bus, &device_fn) == 0;
+ index++)
+ {
+ host = scsi_register(tmpt, sizeof(struct isp1020_hostdata));
+ hostdata = (struct isp1020_hostdata *) host->hostdata;
+
+ memset(hostdata, 0, sizeof(struct isp1020_hostdata));
+ hostdata->bus = bus;
+ hostdata->device_fn = device_fn;
+
+ if (isp1020_init(host) || isp1020_reset_hardware(host)
+#if USE_NVRAM_DEFAULTS
+ || isp1020_get_defaults(host)
+#else
+ || isp1020_set_defaults(host)
+#endif /* USE_NVRAM_DEFAULTS */
+ || isp1020_load_parameters(host)) {
+ scsi_unregister(host);
+ continue;
+ }
+
+ host->this_id = hostdata->host_param.initiator_scsi_id;
+
+ if (request_irq(host->irq, isp1020_intr_handler, SA_INTERRUPT,
+ "qlogicisp", NULL))
+ {
+ printk("qlogicisp : interrupt %d already in use\n",
+ host->irq);
+ scsi_unregister(host);
+ continue;
+ }
+
+ if (check_region(host->io_port, 0xff)) {
+ printk("qlogicisp : i/o region 0x%04x-0x%04x already "
+ "in use\n",
+ host->io_port, host->io_port + 0xff);
+ free_irq(host->irq, NULL);
+ scsi_unregister(host);
+ continue;
+ }
+
+ request_region(host->io_port, 0xff, "qlogicisp");
+ irq2host[host->irq] = host;
+
+ outw(0x0, host->io_port + PCI_SEMAPHORE);
+ outw(HCCR_CLEAR_RISC_INTR, host->io_port + HOST_HCCR);
+ isp1020_enable_irqs(host);
+
+ hosts++;
+ }
+
+ LEAVE("isp1020_detect");
+
+ return hosts;
+}
+
+
+int isp1020_release(struct Scsi_Host *host)
+{
+ struct isp1020_hostdata *hostdata;
+
+ ENTER("isp1020_release");
+
+ hostdata = (struct isp1020_hostdata *) host->hostdata;
+
+ outw(0x0, host->io_port + PCI_INTF_CTL);
+ free_irq(host->irq, NULL);
+
+ release_region(host->io_port, 0xff);
+
+ LEAVE("isp1020_release");
+
+ return 0;
+}
+
+
+const char *isp1020_info(struct Scsi_Host *host)
+{
+ static char buf[80];
+ struct isp1020_hostdata *hostdata;
+
+ ENTER("isp1020_info");
+
+ hostdata = (struct isp1020_hostdata *) host->hostdata;
+ sprintf(buf,
+ "QLogic ISP1020 SCSI on PCI bus %d device %d irq %d base 0x%x",
+ hostdata->bus, (hostdata->device_fn & 0xf8) >> 3, host->irq,
+ host->io_port);
+
+ LEAVE("isp1020_info");
+
+ return buf;
+}
+
+
+/*
+ * The middle SCSI layer ensures that queuecommand never gets invoked
+ * concurrently with itself or the interrupt handler (though the
+ * interrupt handler may call this routine as part of
+ * request-completion handling).
+ */
+int isp1020_queuecommand(Scsi_Cmnd *Cmnd, void (*done)(Scsi_Cmnd *))
+{
+ int i, sg_count, n, num_free;
+ u_int in_ptr, out_ptr;
+ struct dataseg * ds;
+ struct scatterlist *sg;
+ struct Command_Entry *cmd;
+ struct Continuation_Entry *cont;
+ struct Scsi_Host *host;
+ struct isp1020_hostdata *hostdata;
+
+ ENTER("isp1020_queuecommand");
+
+ host = Cmnd->host;
+ hostdata = (struct isp1020_hostdata *) host->hostdata;
+ Cmnd->scsi_done = done;
+
+ DEBUG(isp1020_print_scsi_cmd(Cmnd));
+
+ out_ptr = inw(host->io_port + MBOX4);
+ in_ptr = hostdata->req_in_ptr;
+
+ DEBUG(printk("qlogicisp : request queue depth %d\n",
+ REQ_QUEUE_DEPTH(in_ptr, out_ptr)));
+
+ cmd = (struct Command_Entry *) &hostdata->req[in_ptr][0];
+ in_ptr = (in_ptr + 1) & QLOGICISP_REQ_QUEUE_LEN;
+ if (in_ptr == out_ptr) {
+ printk("qlogicisp : request queue overflow\n");
+ return 1;
+ }
+
+ if (hostdata->send_marker) {
+ struct Marker_Entry *marker;
+
+ TRACE("queue marker", in_ptr, 0);
+
+ DEBUG(printk("qlogicisp : adding marker entry\n"));
+ marker = (struct Marker_Entry *) cmd;
+ memset(marker, 0, sizeof(struct Marker_Entry));
+
+ marker->hdr.entry_type = ENTRY_MARKER;
+ marker->hdr.entry_cnt = 1;
+ marker->modifier = SYNC_ALL;
+
+ hostdata->send_marker = 0;
+
+ if (((in_ptr + 1) & QLOGICISP_REQ_QUEUE_LEN) == out_ptr) {
+ outw(in_ptr, host->io_port + MBOX4);
+ hostdata->req_in_ptr = in_ptr;
+ printk("qlogicisp : request queue overflow\n");
+ return 1;
+ }
+ cmd = (struct Command_Entry *) &hostdata->req[in_ptr][0];
+ in_ptr = (in_ptr + 1) & QLOGICISP_REQ_QUEUE_LEN;
+ }
+
+ TRACE("queue command", in_ptr, Cmnd);
+
+ memset(cmd, 0, sizeof(struct Command_Entry));
+
+ cmd->hdr.entry_type = ENTRY_COMMAND;
+ cmd->hdr.entry_cnt = 1;
+
+ cmd->handle = (u_int) virt_to_bus(Cmnd);
+ cmd->target_lun = Cmnd->lun;
+ cmd->target_id = Cmnd->target;
+ cmd->cdb_length = Cmnd->cmd_len;
+ cmd->control_flags = CFLAG_READ | CFLAG_WRITE;
+ cmd->time_out = 30;
+
+ memcpy(cmd->cdb, Cmnd->cmnd, Cmnd->cmd_len);
+
+ if (Cmnd->use_sg) {
+ cmd->segment_cnt = sg_count = Cmnd->use_sg;
+ sg = (struct scatterlist *) Cmnd->request_buffer;
+ ds = cmd->dataseg;
+
+ /* fill in first four sg entries: */
+ n = sg_count;
+ if (n > 4)
+ n = 4;
+ for (i = 0; i < n; i++) {
+ ds[i].d_base = (u_int) virt_to_bus(sg->address);
+ ds[i].d_count = sg->length;
+ ++sg;
+ }
+ sg_count -= 4;
+
+ while (sg_count > 0) {
+ ++cmd->hdr.entry_cnt;
+ cont = (struct Continuation_Entry *)
+ &hostdata->req[in_ptr][0];
+ in_ptr = (in_ptr + 1) & QLOGICISP_REQ_QUEUE_LEN;
+ if (in_ptr == out_ptr) {
+ printk("isp1020: unexpected request queue "
+ "overflow\n");
+ return 1;
+ }
+ TRACE("queue continuation", in_ptr, 0);
+ cont->hdr.entry_type = ENTRY_CONTINUATION;
+ cont->hdr.entry_cnt = 0;
+ cont->hdr.sys_def_1 = 0;
+ cont->hdr.flags = 0;
+ cont->reserved = 0;
+ ds = cont->dataseg;
+ n = sg_count;
+ if (n > 7)
+ n = 7;
+ for (i = 0; i < n; ++i) {
+ ds[i].d_base = (u_int)virt_to_bus(sg->address);
+ ds[i].d_count = sg->length;
+ ++sg;
+ }
+ sg_count -= n;
+ }
+ } else {
+ cmd->dataseg[0].d_base =
+ (u_int) virt_to_bus(Cmnd->request_buffer);
+ cmd->dataseg[0].d_count =
+ (u_int) Cmnd->request_bufflen;
+ cmd->segment_cnt = 1;
+ }
+
+ outw(in_ptr, host->io_port + MBOX4);
+ hostdata->req_in_ptr = in_ptr;
+
+ num_free = QLOGICISP_REQ_QUEUE_LEN - REQ_QUEUE_DEPTH(in_ptr, out_ptr);
+ host->can_queue = host->host_busy + num_free;
+ host->sg_tablesize = QLOGICISP_MAX_SG(num_free);
+
+ LEAVE("isp1020_queuecommand");
+
+ return 0;
+}
+
+
+#define ASYNC_EVENT_INTERRUPT 0x01
+
+void isp1020_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
+{
+ Scsi_Cmnd *Cmnd;
+ struct Status_Entry *sts;
+ struct Scsi_Host *host;
+ struct isp1020_hostdata *hostdata;
+ u_int in_ptr, out_ptr;
+ u_short status;
+
+ ENTER_INTR("isp1020_intr_handler");
+
+ host = irq2host[irq];
+ if (!host) {
+ printk("qlogicisp : unexpected interrupt on line %d\n", irq);
+ return;
+ }
+ hostdata = (struct isp1020_hostdata *) host->hostdata;
+
+ DEBUG_INTR(printk("qlogicisp : interrupt on line %d\n", irq));
+
+ if (!(inw(host->io_port + PCI_INTF_STS) & 0x04)) {
+ /* spurious interrupts can happen legally */
+ DEBUG_INTR(printk("qlogicisp: got spurious interrupt\n"));
+ return;
+ }
+ in_ptr = inw(host->io_port + MBOX5);
+ outw(HCCR_CLEAR_RISC_INTR, host->io_port + HOST_HCCR);
+
+ if ((inw(host->io_port + PCI_SEMAPHORE) & ASYNC_EVENT_INTERRUPT)) {
+ status = inw(host->io_port + MBOX0);
+
+ DEBUG_INTR(printk("qlogicisp : mbox completion status: %x\n",
+ status));
+
+ switch (status) {
+ case ASYNC_SCSI_BUS_RESET:
+ case EXECUTION_TIMEOUT_RESET:
+ hostdata->send_marker = 1;
+ break;
+ case INVALID_COMMAND:
+ case HOST_INTERFACE_ERROR:
+ case COMMAND_ERROR:
+ case COMMAND_PARAM_ERROR:
+ printk("qlogicisp : bad mailbox return status\n");
+ break;
+ }
+ outw(0x0, host->io_port + PCI_SEMAPHORE);
+ }
+ out_ptr = hostdata->res_out_ptr;
+
+ DEBUG_INTR(printk("qlogicisp : response queue update\n"));
+ DEBUG_INTR(printk("qlogicisp : response queue depth %d\n",
+ QUEUE_DEPTH(in_ptr, out_ptr)));
+
+ while (out_ptr != in_ptr) {
+ sts = (struct Status_Entry *) &hostdata->res[out_ptr][0];
+ out_ptr = (out_ptr + 1) & RES_QUEUE_LEN;
+
+ Cmnd = (Scsi_Cmnd *) bus_to_virt(sts->handle);
+
+ TRACE("done", out_ptr, Cmnd);
+
+ if (sts->completion_status == CS_RESET_OCCURRED
+ || sts->completion_status == CS_ABORTED
+ || (sts->status_flags & STF_BUS_RESET))
+ hostdata->send_marker = 1;
+
+ if (sts->state_flags & SF_GOT_SENSE)
+ memcpy(Cmnd->sense_buffer, sts->req_sense_data,
+ sizeof(Cmnd->sense_buffer));
+
+ DEBUG_INTR(isp1020_print_status_entry(sts));
+
+ if (sts->hdr.entry_type == ENTRY_STATUS)
+ Cmnd->result = isp1020_return_status(sts);
+ else
+ Cmnd->result = DID_ERROR << 16;
+
+ outw(out_ptr, host->io_port + MBOX5);
+ (*Cmnd->scsi_done)(Cmnd);
+ }
+ hostdata->res_out_ptr = out_ptr;
+
+ LEAVE_INTR("isp1020_intr_handler");
+}
+
+
+static int isp1020_return_status(struct Status_Entry *sts)
+{
+ int host_status = DID_ERROR;
+#if DEBUG_ISP1020_INTR
+ static char *reason[] = {
+ "DID_OK",
+ "DID_NO_CONNECT",
+ "DID_BUS_BUSY",
+ "DID_TIME_OUT",
+ "DID_BAD_TARGET",
+ "DID_ABORT",
+ "DID_PARITY",
+ "DID_ERROR",
+ "DID_RESET",
+ "DID_BAD_INTR"
+ };
+#endif /* DEBUG_ISP1020_INTR */
+
+ ENTER("isp1020_return_status");
+
+ DEBUG(printk("qlogicisp : completion status = 0x%04x\n",
+ sts->completion_status));
+
+ switch(sts->completion_status) {
+ case CS_COMPLETE:
+ host_status = DID_OK;
+ break;
+ case CS_INCOMPLETE:
+ if (!(sts->state_flags & SF_GOT_BUS))
+ host_status = DID_NO_CONNECT;
+ else if (!(sts->state_flags & SF_GOT_TARGET))
+ host_status = DID_BAD_TARGET;
+ else if (!(sts->state_flags & SF_SENT_CDB))
+ host_status = DID_ERROR;
+ else if (!(sts->state_flags & SF_TRANSFERRED_DATA))
+ host_status = DID_ERROR;
+ else if (!(sts->state_flags & SF_GOT_STATUS))
+ host_status = DID_ERROR;
+ else if (!(sts->state_flags & SF_GOT_SENSE))
+ host_status = DID_ERROR;
+ break;
+ case CS_DMA_ERROR:
+ case CS_TRANSPORT_ERROR:
+ host_status = DID_ERROR;
+ break;
+ case CS_RESET_OCCURRED:
+ host_status = DID_RESET;
+ break;
+ case CS_ABORTED:
+ host_status = DID_ABORT;
+ break;
+ case CS_TIMEOUT:
+ host_status = DID_TIME_OUT;
+ break;
+ case CS_DATA_OVERRUN:
+ case CS_COMMAND_OVERRUN:
+ case CS_STATUS_OVERRUN:
+ case CS_BAD_MESSAGE:
+ case CS_NO_MESSAGE_OUT:
+ case CS_EXT_ID_FAILED:
+ case CS_IDE_MSG_FAILED:
+ case CS_ABORT_MSG_FAILED:
+ case CS_NOP_MSG_FAILED:
+ case CS_PARITY_ERROR_MSG_FAILED:
+ case CS_DEVICE_RESET_MSG_FAILED:
+ case CS_ID_MSG_FAILED:
+ case CS_UNEXP_BUS_FREE:
+ case CS_INVALID_ENTRY_TYPE:
+ case CS_DEVICE_QUEUE_FULL:
+ case CS_SCSI_PHASE_SKIPPED:
+ case CS_ARS_FAILED:
+ host_status = DID_ERROR;
+ break;
+ case CS_DATA_UNDERRUN:
+ host_status = DID_OK;
+ break;
+ default:
+ printk("qlogicisp : unknown completion status 0x%04x\n",
+ sts->completion_status);
+ host_status = DID_ERROR;
+ break;
+ }
+
+ DEBUG_INTR(printk("qlogicisp : host status (%s) scsi status %x\n",
+ reason[host_status], sts->scsi_status));
+
+ LEAVE("isp1020_return_status");
+
+ return (sts->scsi_status & STATUS_MASK) | (host_status << 16);
+}
+
+
+int isp1020_abort(Scsi_Cmnd *Cmnd)
+{
+ u_short param[6];
+ struct Scsi_Host *host;
+ struct isp1020_hostdata *hostdata;
+ int return_status = SCSI_ABORT_SUCCESS;
+ u_int cmdaddr = virt_to_bus(Cmnd);
+
+ ENTER("isp1020_abort");
+
+ host = Cmnd->host;
+ hostdata = (struct isp1020_hostdata *) host->hostdata;
+
+ isp1020_disable_irqs(host);
+
+ param[0] = MBOX_ABORT;
+ param[1] = (((u_short) Cmnd->target) << 8) | Cmnd->lun;
+ param[2] = cmdaddr >> 16;
+ param[3] = cmdaddr & 0xffff;
+
+ isp1020_mbox_command(host, param);
+
+ if (param[0] != MBOX_COMMAND_COMPLETE) {
+ printk("qlogicisp : scsi abort failure: %x\n", param[0]);
+ return_status = SCSI_ABORT_ERROR;
+ }
+
+ isp1020_enable_irqs(host);
+
+ LEAVE("isp1020_abort");
+
+ return return_status;
+}
+
+
+int isp1020_reset(Scsi_Cmnd *Cmnd, unsigned int reset_flags)
+{
+ u_short param[6];
+ struct Scsi_Host *host;
+ struct isp1020_hostdata *hostdata;
+ int return_status = SCSI_RESET_SUCCESS;
+
+ ENTER("isp1020_reset");
+
+ host = Cmnd->host;
+ hostdata = (struct isp1020_hostdata *) host->hostdata;
+
+ param[0] = MBOX_BUS_RESET;
+ param[1] = hostdata->host_param.bus_reset_delay;
+
+ isp1020_disable_irqs(host);
+
+ isp1020_mbox_command(host, param);
+
+ if (param[0] != MBOX_COMMAND_COMPLETE) {
+ printk("qlogicisp : scsi bus reset failure: %x\n", param[0]);
+ return_status = SCSI_RESET_ERROR;
+ }
+
+ isp1020_enable_irqs(host);
+
+ LEAVE("isp1020_reset");
+
+ return return_status;;
+}
+
+
+int isp1020_biosparam(Disk *disk, kdev_t n, int ip[])
+{
+ int size = disk->capacity;
+
+ ENTER("isp1020_biosparam");
+
+ ip[0] = 64;
+ ip[1] = 32;
+ ip[2] = size >> 11;
+ if (ip[2] > 1024) {
+ ip[0] = 255;
+ ip[1] = 63;
+ ip[2] = size / (ip[0] * ip[1]);
+ if (ip[2] > 1023)
+ ip[2] = 1023;
+ }
+
+ LEAVE("isp1020_biosparam");
+
+ return 0;
+}
+
+
+static int isp1020_reset_hardware(struct Scsi_Host *host)
+{
+ u_short param[6];
+ int loop_count;
+
+ ENTER("isp1020_reset_hardware");
+
+ outw(ISP_RESET, host->io_port + PCI_INTF_CTL);
+ outw(HCCR_RESET, host->io_port + HOST_HCCR);
+ outw(HCCR_RELEASE, host->io_port + HOST_HCCR);
+ outw(HCCR_BIOS_DISABLE, host->io_port + HOST_HCCR);
+
+ loop_count = DEFAULT_LOOP_COUNT;
+ while (--loop_count && inw(host->io_port + HOST_HCCR) == RISC_BUSY)
+ barrier();
+ if (!loop_count)
+ printk("qlogicisp: reset_hardware loop timeout\n");
+
+ outw(0, host->io_port + ISP_CFG1);
+
+#if DEBUG_ISP1020
+ printk("qlogicisp : mbox 0 0x%04x \n", inw(host->io_port + MBOX0));
+ printk("qlogicisp : mbox 1 0x%04x \n", inw(host->io_port + MBOX1));
+ printk("qlogicisp : mbox 2 0x%04x \n", inw(host->io_port + MBOX2));
+ printk("qlogicisp : mbox 3 0x%04x \n", inw(host->io_port + MBOX3));
+ printk("qlogicisp : mbox 4 0x%04x \n", inw(host->io_port + MBOX4));
+ printk("qlogicisp : mbox 5 0x%04x \n", inw(host->io_port + MBOX5));
+#endif /* DEBUG_ISP1020 */
+
+ DEBUG(printk("qlogicisp : loading risc ram\n"));
+
+#if RELOAD_FIRMWARE
+ {
+ int i;
+ for (i = 0; i < risc_code_length01; i++) {
+ param[0] = MBOX_WRITE_RAM_WORD;
+ param[1] = risc_code_addr01 + i;
+ param[2] = risc_code01[i];
+
+ isp1020_mbox_command(host, param);
+
+ if (param[0] != MBOX_COMMAND_COMPLETE) {
+ printk("qlogicisp : firmware load failure\n");
+ return 1;
+ }
+ }
+ }
+#endif /* RELOAD_FIRMWARE */
+
+ DEBUG(printk("qlogicisp : verifying checksum\n"));
+
+ param[0] = MBOX_VERIFY_CHECKSUM;
+ param[1] = risc_code_addr01;
+
+ isp1020_mbox_command(host, param);
+
+ if (param[0] != MBOX_COMMAND_COMPLETE) {
+ printk("qlogicisp : ram checksum failure\n");
+ return 1;
+ }
+
+ DEBUG(printk("qlogicisp : executing firmware\n"));
+
+ param[0] = MBOX_EXEC_FIRMWARE;
+ param[1] = risc_code_addr01;
+
+ isp1020_mbox_command(host, param);
+
+ param[0] = MBOX_ABOUT_FIRMWARE;
+
+ isp1020_mbox_command(host, param);
+
+ if (param[0] != MBOX_COMMAND_COMPLETE) {
+ printk("qlogicisp : about firmware failure\n");
+ return 1;
+ }
+
+ DEBUG(printk("qlogicisp : firmware major revision %d\n", param[1]));
+ DEBUG(printk("qlogicisp : firmware minor revision %d\n", param[2]));
+
+ LEAVE("isp1020_reset_hardware");
+
+ return 0;
+}
+
+
+static int isp1020_init(struct Scsi_Host *sh)
+{
+ u_int io_base;
+ struct isp1020_hostdata *hostdata;
+ u_char bus, device_fn, revision, irq;
+ u_short vendor_id, device_id, command;
+
+ ENTER("isp1020_init");
+
+ hostdata = (struct isp1020_hostdata *) sh->hostdata;
+ bus = hostdata->bus;
+ device_fn = hostdata->device_fn;
+
+ if (pcibios_read_config_word(bus, device_fn, PCI_VENDOR_ID, &vendor_id)
+ || pcibios_read_config_word(bus, device_fn,
+ PCI_DEVICE_ID, &device_id)
+ || pcibios_read_config_word(bus, device_fn,
+ PCI_COMMAND, &command)
+ || pcibios_read_config_dword(bus, device_fn,
+ PCI_BASE_ADDRESS_0, &io_base)
+ || pcibios_read_config_byte(bus, device_fn,
+ PCI_CLASS_REVISION, &revision)
+ || pcibios_read_config_byte(bus, device_fn,
+ PCI_INTERRUPT_LINE, &irq))
+ {
+ printk("qlogicisp : error reading PCI configuration\n");
+ return 1;
+ }
+
+ if (vendor_id != PCI_VENDOR_ID_QLOGIC) {
+ printk("qlogicisp : 0x%04x is not QLogic vendor ID\n",
+ vendor_id);
+ return 1;
+ }
+
+ if (device_id != PCI_DEVICE_ID_QLOGIC_ISP1020) {
+ printk("qlogicisp : 0x%04x does not match ISP1020 device id\n",
+ device_id);
+ return 1;
+ }
+
+ if (command & PCI_COMMAND_IO && (io_base & 3) == 1)
+ io_base &= PCI_BASE_ADDRESS_IO_MASK;
+ else {
+ printk("qlogicisp : i/o mapping is disabled\n");
+ return 1;
+ }
+
+ if (!(command & PCI_COMMAND_MASTER)) {
+ printk("qlogicisp : bus mastering is disabled\n");
+ return 1;
+ }
+
+ if (revision != ISP1020_REV_ID)
+ printk("qlogicisp : new isp1020 revision ID (%d)\n", revision);
+
+ if (inw(io_base + PCI_ID_LOW) != PCI_VENDOR_ID_QLOGIC
+ || inw(io_base + PCI_ID_HIGH) != PCI_DEVICE_ID_QLOGIC_ISP1020)
+ {
+ printk("qlogicisp : can't decode i/o address space at 0x%x\n",
+ io_base);
+ return 1;
+ }
+
+ hostdata->revision = revision;
+
+ sh->irq = irq;
+ sh->io_port = io_base;
+
+ LEAVE("isp1020_init");
+
+ return 0;
+}
+
+
+#if USE_NVRAM_DEFAULTS
+
+static int isp1020_get_defaults(struct Scsi_Host *host)
+{
+ int i;
+ u_short value;
+ struct isp1020_hostdata *hostdata =
+ (struct isp1020_hostdata *) host->hostdata;
+
+ ENTER("isp1020_get_defaults");
+
+ if (!isp1020_verify_nvram(host)) {
+ printk("qlogicisp : nvram checksum failure\n");
+ printk("qlogicisp : attempting to use default parameters\n");
+ return isp1020_set_defaults(host);
+ }
+
+ value = isp1020_read_nvram_word(host, 2);
+ hostdata->host_param.fifo_threshold = (value >> 8) & 0x03;
+ hostdata->host_param.host_adapter_enable = (value >> 11) & 0x01;
+ hostdata->host_param.initiator_scsi_id = (value >> 12) & 0x0f;
+
+ value = isp1020_read_nvram_word(host, 3);
+ hostdata->host_param.bus_reset_delay = value & 0xff;
+ hostdata->host_param.retry_count = value >> 8;
+
+ value = isp1020_read_nvram_word(host, 4);
+ hostdata->host_param.retry_delay = value & 0xff;
+ hostdata->host_param.async_data_setup_time = (value >> 8) & 0x0f;
+ hostdata->host_param.req_ack_active_negation = (value >> 12) & 0x01;
+ hostdata->host_param.data_line_active_negation = (value >> 13) & 0x01;
+ hostdata->host_param.data_dma_burst_enable = (value >> 14) & 0x01;
+ hostdata->host_param.command_dma_burst_enable = (value >> 15);
+
+ value = isp1020_read_nvram_word(host, 5);
+ hostdata->host_param.tag_aging = value & 0xff;
+
+ value = isp1020_read_nvram_word(host, 6);
+ hostdata->host_param.selection_timeout = value & 0xffff;
+
+ value = isp1020_read_nvram_word(host, 7);
+ hostdata->host_param.max_queue_depth = value & 0xffff;
+
+#if DEBUG_ISP1020_SETUP
+ printk("qlogicisp : fifo threshold=%d\n",
+ hostdata->host_param.fifo_threshold);
+ printk("qlogicisp : initiator scsi id=%d\n",
+ hostdata->host_param.initiator_scsi_id);
+ printk("qlogicisp : bus reset delay=%d\n",
+ hostdata->host_param.bus_reset_delay);
+ printk("qlogicisp : retry count=%d\n",
+ hostdata->host_param.retry_count);
+ printk("qlogicisp : retry delay=%d\n",
+ hostdata->host_param.retry_delay);
+ printk("qlogicisp : async data setup time=%d\n",
+ hostdata->host_param.async_data_setup_time);
+ printk("qlogicisp : req/ack active negation=%d\n",
+ hostdata->host_param.req_ack_active_negation);
+ printk("qlogicisp : data line active negation=%d\n",
+ hostdata->host_param.data_line_active_negation);
+ printk("qlogicisp : data DMA burst enable=%d\n",
+ hostdata->host_param.data_dma_burst_enable);
+ printk("qlogicisp : command DMA burst enable=%d\n",
+ hostdata->host_param.command_dma_burst_enable);
+ printk("qlogicisp : tag age limit=%d\n",
+ hostdata->host_param.tag_aging);
+ printk("qlogicisp : selection timeout limit=%d\n",
+ hostdata->host_param.selection_timeout);
+ printk("qlogicisp : max queue depth=%d\n",
+ hostdata->host_param.max_queue_depth);
+#endif /* DEBUG_ISP1020_SETUP */
+
+ for (i = 0; i < MAX_TARGETS; i++) {
+
+ value = isp1020_read_nvram_word(host, 14 + i * 3);
+ hostdata->dev_param[i].device_flags = value & 0xff;
+ hostdata->dev_param[i].execution_throttle = value >> 8;
+
+ value = isp1020_read_nvram_word(host, 15 + i * 3);
+ hostdata->dev_param[i].synchronous_period = value & 0xff;
+ hostdata->dev_param[i].synchronous_offset = (value >> 8) & 0x0f;
+ hostdata->dev_param[i].device_enable = (value >> 12) & 0x01;
+
+#if DEBUG_ISP1020_SETUP
+ printk("qlogicisp : target 0x%02x\n", i);
+ printk("qlogicisp : device flags=0x%02x\n",
+ hostdata->dev_param[i].device_flags);
+ printk("qlogicisp : execution throttle=%d\n",
+ hostdata->dev_param[i].execution_throttle);
+ printk("qlogicisp : synchronous period=%d\n",
+ hostdata->dev_param[i].synchronous_period);
+ printk("qlogicisp : synchronous offset=%d\n",
+ hostdata->dev_param[i].synchronous_offset);
+ printk("qlogicisp : device enable=%d\n",
+ hostdata->dev_param[i].device_enable);
+#endif /* DEBUG_ISP1020_SETUP */
+ }
+
+ LEAVE("isp1020_get_defaults");
+
+ return 0;
+}
+
+
+#define ISP1020_NVRAM_LEN 0x40
+#define ISP1020_NVRAM_SIG1 0x5349
+#define ISP1020_NVRAM_SIG2 0x2050
+
+static int isp1020_verify_nvram(struct Scsi_Host *host)
+{
+ int i;
+ u_short value;
+ u_char checksum = 0;
+
+ for (i = 0; i < ISP1020_NVRAM_LEN; i++) {
+ value = isp1020_read_nvram_word(host, i);
+
+ switch (i) {
+ case 0:
+ if (value != ISP1020_NVRAM_SIG1) return 0;
+ break;
+ case 1:
+ if (value != ISP1020_NVRAM_SIG2) return 0;
+ break;
+ case 2:
+ if ((value & 0xff) != 0x02) return 0;
+ break;
+ }
+ checksum += value & 0xff;
+ checksum += value >> 8;
+ }
+
+ return (checksum == 0);
+}
+
+#define NVRAM_DELAY() udelay(2) /* 2 microsecond delay */
+
+
+u_short isp1020_read_nvram_word(struct Scsi_Host *host, u_short byte)
+{
+ int i;
+ u_short value, output, input;
+
+ byte &= 0x3f; byte |= 0x0180;
+
+ for (i = 8; i >= 0; i--) {
+ output = ((byte >> i) & 0x1) ? 0x4 : 0x0;
+ outw(output | 0x2, host->io_port + PCI_NVRAM); NVRAM_DELAY();
+ outw(output | 0x3, host->io_port + PCI_NVRAM); NVRAM_DELAY();
+ outw(output | 0x2, host->io_port + PCI_NVRAM); NVRAM_DELAY();
+ }
+
+ for (i = 0xf, value = 0; i >= 0; i--) {
+ value <<= 1;
+ outw(0x3, host->io_port + PCI_NVRAM); NVRAM_DELAY();
+ input = inw(host->io_port + PCI_NVRAM); NVRAM_DELAY();
+ outw(0x2, host->io_port + PCI_NVRAM); NVRAM_DELAY();
+ if (input & 0x8) value |= 1;
+ }
+
+ outw(0x0, host->io_port + PCI_NVRAM); NVRAM_DELAY();
+
+ return value;
+}
+
+#endif /* USE_NVRAM_DEFAULTS */
+
+
+static int isp1020_set_defaults(struct Scsi_Host *host)
+{
+ struct isp1020_hostdata *hostdata =
+ (struct isp1020_hostdata *) host->hostdata;
+ int i;
+
+ ENTER("isp1020_set_defaults");
+
+ hostdata->host_param.fifo_threshold = 2;
+ hostdata->host_param.host_adapter_enable = 1;
+ hostdata->host_param.initiator_scsi_id = 7;
+ hostdata->host_param.bus_reset_delay = 3;
+ hostdata->host_param.retry_count = 0;
+ hostdata->host_param.retry_delay = 1;
+ hostdata->host_param.async_data_setup_time = 6;
+ hostdata->host_param.req_ack_active_negation = 1;
+ hostdata->host_param.data_line_active_negation = 1;
+ hostdata->host_param.data_dma_burst_enable = 1;
+ hostdata->host_param.command_dma_burst_enable = 1;
+ hostdata->host_param.tag_aging = 8;
+ hostdata->host_param.selection_timeout = 250;
+ hostdata->host_param.max_queue_depth = 256;
+
+ for (i = 0; i < MAX_TARGETS; i++) {
+ hostdata->dev_param[i].device_flags = 0xfd;
+ hostdata->dev_param[i].execution_throttle = 16;
+ hostdata->dev_param[i].synchronous_period = 25;
+ hostdata->dev_param[i].synchronous_offset = 12;
+ hostdata->dev_param[i].device_enable = 1;
+ }
+
+ LEAVE("isp1020_set_defaults");
+
+ return 0;
+}
+
+
+static int isp1020_load_parameters(struct Scsi_Host *host)
+{
+ int i, k;
+ u_int queue_addr;
+ u_short param[6];
+ u_short isp_cfg1;
+ unsigned long flags;
+ struct isp1020_hostdata *hostdata =
+ (struct isp1020_hostdata *) host->hostdata;
+
+ ENTER("isp1020_load_parameters");
+
+ save_flags(flags);
+ cli();
+
+ outw(hostdata->host_param.fifo_threshold, host->io_port + ISP_CFG1);
+
+ param[0] = MBOX_SET_INIT_SCSI_ID;
+ param[1] = hostdata->host_param.initiator_scsi_id;
+
+ isp1020_mbox_command(host, param);
+
+ if (param[0] != MBOX_COMMAND_COMPLETE) {
+ restore_flags(flags);
+ printk("qlogicisp : set initiator id failure\n");
+ return 1;
+ }
+
+ param[0] = MBOX_SET_RETRY_COUNT;
+ param[1] = hostdata->host_param.retry_count;
+ param[2] = hostdata->host_param.retry_delay;
+
+ isp1020_mbox_command(host, param);
+
+ if (param[0] != MBOX_COMMAND_COMPLETE) {
+ restore_flags(flags);
+ printk("qlogicisp : set retry count failure\n");
+ return 1;
+ }
+
+ param[0] = MBOX_SET_ASYNC_DATA_SETUP_TIME;
+ param[1] = hostdata->host_param.async_data_setup_time;
+
+ isp1020_mbox_command(host, param);
+
+ if (param[0] != MBOX_COMMAND_COMPLETE) {
+ restore_flags(flags);
+ printk("qlogicisp : async data setup time failure\n");
+ return 1;
+ }
+
+ param[0] = MBOX_SET_ACTIVE_NEG_STATE;
+ param[1] = (hostdata->host_param.req_ack_active_negation << 4)
+ | (hostdata->host_param.data_line_active_negation << 5);
+
+ isp1020_mbox_command(host, param);
+
+ if (param[0] != MBOX_COMMAND_COMPLETE) {
+ restore_flags(flags);
+ printk("qlogicisp : set active negation state failure\n");
+ return 1;
+ }
+
+ param[0] = MBOX_SET_PCI_CONTROL_PARAMS;
+ param[1] = hostdata->host_param.data_dma_burst_enable << 1;
+ param[2] = hostdata->host_param.command_dma_burst_enable << 1;
+
+ isp1020_mbox_command(host, param);
+
+ if (param[0] != MBOX_COMMAND_COMPLETE) {
+ restore_flags(flags);
+ printk("qlogicisp : set pci control parameter failure\n");
+ return 1;
+ }
+
+ isp_cfg1 = inw(host->io_port + ISP_CFG1);
+
+ if (hostdata->host_param.data_dma_burst_enable
+ || hostdata->host_param.command_dma_burst_enable)
+ isp_cfg1 |= 0x0004;
+ else
+ isp_cfg1 &= 0xfffb;
+
+ outw(isp_cfg1, host->io_port + ISP_CFG1);
+
+ param[0] = MBOX_SET_TAG_AGE_LIMIT;
+ param[1] = hostdata->host_param.tag_aging;
+
+ isp1020_mbox_command(host, param);
+
+ if (param[0] != MBOX_COMMAND_COMPLETE) {
+ restore_flags(flags);
+ printk("qlogicisp : set tag age limit failure\n");
+ return 1;
+ }
+
+ param[0] = MBOX_SET_SELECT_TIMEOUT;
+ param[1] = hostdata->host_param.selection_timeout;
+
+ isp1020_mbox_command(host, param);
+
+ if (param[0] != MBOX_COMMAND_COMPLETE) {
+ restore_flags(flags);
+ printk("qlogicisp : set selection timeout failure\n");
+ return 1;
+ }
+
+ for (i = 0; i < MAX_TARGETS; i++) {
+
+ if (!hostdata->dev_param[i].device_enable)
+ continue;
+
+ param[0] = MBOX_SET_TARGET_PARAMS;
+ param[1] = i << 8;
+ param[2] = hostdata->dev_param[i].device_flags << 8;
+ param[3] = (hostdata->dev_param[i].synchronous_offset << 8)
+ | hostdata->dev_param[i].synchronous_period;
+
+ isp1020_mbox_command(host, param);
+
+ if (param[0] != MBOX_COMMAND_COMPLETE) {
+ restore_flags(flags);
+ printk("qlogicisp : set target parameter failure\n");
+ return 1;
+ }
+
+ for (k = 0; k < MAX_LUNS; k++) {
+
+ param[0] = MBOX_SET_DEV_QUEUE_PARAMS;
+ param[1] = (i << 8) | k;
+ param[2] = hostdata->host_param.max_queue_depth;
+ param[3] = hostdata->dev_param[i].execution_throttle;
+
+ isp1020_mbox_command(host, param);
+
+ if (param[0] != MBOX_COMMAND_COMPLETE) {
+ restore_flags(flags);
+ printk("qlogicisp : set device queue "
+ "parameter failure\n");
+ return 1;
+ }
+ }
+ }
+
+ queue_addr = (u_int) virt_to_bus(&hostdata->res[0][0]);
+
+ param[0] = MBOX_INIT_RES_QUEUE;
+ param[1] = RES_QUEUE_LEN + 1;
+ param[2] = (u_short) (queue_addr >> 16);
+ param[3] = (u_short) (queue_addr & 0xffff);
+ param[4] = 0;
+ param[5] = 0;
+
+ isp1020_mbox_command(host, param);
+
+ if (param[0] != MBOX_COMMAND_COMPLETE) {
+ restore_flags(flags);
+ printk("qlogicisp : set response queue failure\n");
+ return 1;
+ }
+
+ queue_addr = (u_int) virt_to_bus(&hostdata->req[0][0]);
+
+ param[0] = MBOX_INIT_REQ_QUEUE;
+ param[1] = QLOGICISP_REQ_QUEUE_LEN + 1;
+ param[2] = (u_short) (queue_addr >> 16);
+ param[3] = (u_short) (queue_addr & 0xffff);
+ param[4] = 0;
+
+ isp1020_mbox_command(host, param);
+
+ if (param[0] != MBOX_COMMAND_COMPLETE) {
+ restore_flags(flags);
+ printk("qlogicisp : set request queue failure\n");
+ return 1;
+ }
+
+ restore_flags(flags);
+
+ LEAVE("isp1020_load_parameters");
+
+ return 0;
+}
+
+
+/*
+ * currently, this is only called during initialization or abort/reset,
+ * at which times interrupts are disabled, so polling is OK, I guess...
+ */
+static int isp1020_mbox_command(struct Scsi_Host *host, u_short param[])
+{
+ int loop_count;
+
+ if (mbox_param[param[0]] == 0)
+ return 1;
+
+ loop_count = DEFAULT_LOOP_COUNT;
+ while (--loop_count && inw(host->io_port + HOST_HCCR) & 0x0080)
+ barrier();
+ if (!loop_count)
+ printk("qlogicisp: mbox_command loop timeout #1\n");
+
+ switch(mbox_param[param[0]] >> 4) {
+ case 6: outw(param[5], host->io_port + MBOX5);
+ case 5: outw(param[4], host->io_port + MBOX4);
+ case 4: outw(param[3], host->io_port + MBOX3);
+ case 3: outw(param[2], host->io_port + MBOX2);
+ case 2: outw(param[1], host->io_port + MBOX1);
+ case 1: outw(param[0], host->io_port + MBOX0);
+ }
+
+ outw(0x0, host->io_port + PCI_SEMAPHORE);
+ outw(HCCR_CLEAR_RISC_INTR, host->io_port + HOST_HCCR);
+ outw(HCCR_SET_HOST_INTR, host->io_port + HOST_HCCR);
+
+ loop_count = DEFAULT_LOOP_COUNT;
+ while (--loop_count && !(inw(host->io_port + PCI_INTF_STS) & 0x04))
+ barrier();
+ if (!loop_count)
+ printk("qlogicisp: mbox_command loop timeout #2\n");
+
+ loop_count = DEFAULT_LOOP_COUNT;
+ while (--loop_count && inw(host->io_port + MBOX0) == 0x04)
+ barrier();
+ if (!loop_count)
+ printk("qlogicisp: mbox_command loop timeout #3\n");
+
+ switch(mbox_param[param[0]] & 0xf) {
+ case 6: param[5] = inw(host->io_port + MBOX5);
+ case 5: param[4] = inw(host->io_port + MBOX4);
+ case 4: param[3] = inw(host->io_port + MBOX3);
+ case 3: param[2] = inw(host->io_port + MBOX2);
+ case 2: param[1] = inw(host->io_port + MBOX1);
+ case 1: param[0] = inw(host->io_port + MBOX0);
+ }
+
+ outw(0x0, host->io_port + PCI_SEMAPHORE);
+ outw(HCCR_CLEAR_RISC_INTR, host->io_port + HOST_HCCR);
+
+ return 0;
+}
+
+
+#if DEBUG_ISP1020_INTR
+
+void isp1020_print_status_entry(struct Status_Entry *status)
+{
+ int i;
+
+ printk("qlogicisp : entry count = 0x%02x, type = 0x%02x, flags = 0x%02x\n",
+ status->hdr.entry_cnt, status->hdr.entry_type, status->hdr.flags);
+ printk("qlogicisp : scsi status = 0x%04x, completion status = 0x%04x\n",
+ status->scsi_status, status->completion_status);
+ printk("qlogicisp : state flags = 0x%04x, status flags = 0x%04x\n",
+ status->state_flags, status->status_flags);
+ printk("qlogicisp : time = 0x%04x, request sense length = 0x%04x\n",
+ status->time, status->req_sense_len);
+ printk("qlogicisp : residual transfer length = 0x%08x\n", status->residual);
+
+ for (i = 0; i < status->req_sense_len; i++)
+ printk("qlogicisp : sense data = 0x%02x\n", status->req_sense_data[i]);
+}
+
+#endif /* DEBUG_ISP1020_INTR */
+
+
+#if DEBUG_ISP1020
+
+void isp1020_print_scsi_cmd(Scsi_Cmnd *cmd)
+{
+ int i;
+
+ printk("qlogicisp : target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n",
+ cmd->target, cmd->lun, cmd->cmd_len);
+ printk("qlogicisp : command = ");
+ for (i = 0; i < cmd->cmd_len; i++)
+ printk("0x%02x ", cmd->cmnd[i]);
+ printk("\n");
+}
+
+#endif /* DEBUG_ISP1020 */
+
+
+#ifdef MODULE
+Scsi_Host_Template driver_template = QLOGICISP;
+
+#include "scsi_module.c"
+#endif /* MODULE */
diff --git a/linux/src/drivers/scsi/qlogicisp.h b/linux/src/drivers/scsi/qlogicisp.h
new file mode 100644
index 0000000..b3e052c
--- /dev/null
+++ b/linux/src/drivers/scsi/qlogicisp.h
@@ -0,0 +1,98 @@
+/*
+ * QLogic ISP1020 Intelligent SCSI Processor Driver (PCI)
+ * Written by Erik H. Moe, ehm@cris.com
+ * Copyright 1995, Erik H. Moe
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+/* Renamed and updated to 1.3.x by Michael Griffith <grif@cs.ucr.edu> */
+
+/*
+ * $Date: 2007/03/27 21:04:30 $
+ * $Revision: 1.1.4.2 $
+ *
+ * Revision 0.5 1995/09/22 02:32:56 root
+ * do auto request sense
+ *
+ * Revision 0.4 1995/08/07 04:48:28 root
+ * supply firmware with driver.
+ * numerous bug fixes/general cleanup of code.
+ *
+ * Revision 0.3 1995/07/16 16:17:16 root
+ * added reset/abort code.
+ *
+ * Revision 0.2 1995/06/29 03:19:43 root
+ * fixed biosparam.
+ * added queue protocol.
+ *
+ * Revision 0.1 1995/06/25 01:56:13 root
+ * Initial release.
+ *
+ */
+
+#ifndef _QLOGICISP_H
+#define _QLOGICISP_H
+
+/*
+ * With the qlogic interface, every queue slot can hold a SCSI
+ * command with up to 4 scatter/gather entries. If we need more
+ * than 4 entries, continuation entries can be used that hold
+ * another 7 entries each. Unlike for other drivers, this means
+ * that the maximum number of scatter/gather entries we can
+ * support at any given time is a function of the number of queue
+ * slots available. That is, host->can_queue and host->sg_tablesize
+ * are dynamic and _not_ independent. This all works fine because
+ * requests are queued serially and the scatter/gather limit is
+ * determined for each queue request anew.
+ */
+#define QLOGICISP_REQ_QUEUE_LEN 63 /* must be power of two - 1 */
+#define QLOGICISP_MAX_SG(ql) (4 + ((ql) > 0) ? 7*((ql) - 1) : 0)
+
+int isp1020_detect(Scsi_Host_Template *);
+int isp1020_release(struct Scsi_Host *);
+const char * isp1020_info(struct Scsi_Host *);
+int isp1020_queuecommand(Scsi_Cmnd *, void (* done)(Scsi_Cmnd *));
+int isp1020_abort(Scsi_Cmnd *);
+int isp1020_reset(Scsi_Cmnd *, unsigned int);
+int isp1020_biosparam(Disk *, kdev_t, int[]);
+
+#ifndef NULL
+#define NULL (0)
+#endif
+
+static struct proc_dir_entry proc_scsi_isp1020;
+
+#define QLOGICISP { \
+ /* next */ NULL, \
+ /* usage_count */ NULL, \
+ /* proc dir */ NULL, \
+ /* procfs info */ NULL, \
+ /* name */ NULL, \
+ /* detect */ isp1020_detect, \
+ /* release */ isp1020_release, \
+ /* info */ isp1020_info, \
+ /* command */ NULL, \
+ /* queuecommand */ isp1020_queuecommand, \
+ /* abort */ isp1020_abort, \
+ /* reset */ isp1020_reset, \
+ /* slave_attach */ NULL, \
+ /* bios_param */ isp1020_biosparam, \
+ /* can_queue */ QLOGICISP_REQ_QUEUE_LEN, \
+ /* this_id */ -1, \
+ /* sg_tablesize */ QLOGICISP_MAX_SG(QLOGICISP_REQ_QUEUE_LEN), \
+ /* cmd_per_lun */ 1, \
+ /* present */ 0, \
+ /* unchecked_isa_dma */ 0, \
+ /* use_clustering */ DISABLE_CLUSTERING \
+}
+
+#endif /* _QLOGICISP_H */
diff --git a/linux/src/drivers/scsi/scripts.h b/linux/src/drivers/scsi/scripts.h
new file mode 100644
index 0000000..482b0c2
--- /dev/null
+++ b/linux/src/drivers/scsi/scripts.h
@@ -0,0 +1,1357 @@
+/***********************************************************************
+;* File Name : SCRIPTS.H *
+;* Description:SCRIPT language for NCR53c825A,875 SCRIPT processor*
+;* *
+;***********************************************************************
+
+;==========================================================
+; NCR 53C810,53C815,53C820,53C825,53C825A,53C860,53C875
+; Script language definition for assembly programming
+;==========================================================
+
+;==========================================================
+; DMA Command
+;==========================================================
+*/
+#define DCMD_BLOCK_MOVE 0
+#define DCMD_IO 0x040000000 /*;BIT30 */
+#define DCMD_RD_WRT 0x040000000 /*;BIT30 */
+#define DCMD_XFER_CTRL 0x080000000 /*;BIT31 */
+#define DCMD_MEM_MOVE 0x0C0000000 /*;(BIT31+BIT30) */
+#define DCMD_LOAD_STORE 0x0E0000000 /*;(BIT31+BIT30+BIT29) */
+/*;==========================================================*/
+#define INDIRECT_ADDR 0x20000000 /*;BIT29 */
+#define TABLE_INDIRECT 0x10000000 /*;BIT28 */
+#define BLOCK_MOVE 0x08000000 /*;BIT27 */
+#define CHAIN_MOVE 0
+/*; SCSI phase definition */
+#define DATA_OUT_ 0x00000000 /*;data out phase */
+#define DATA_IN_ 0x01000000 /*;BIT24 ; data in phase */
+#define COMMAND_ 0x02000000 /*;BIT25 ; command phase */
+#define STATUS_ 0x03000000 /*;(BIT25+BIT24) ; status phase */
+#define RESERVED_OUT 0x04000000 /*;BIT26 */
+#define RESERVED_IN 0x05000000 /*;(BIT26+BIT24) */
+#define MSG_OUT_ 0x06000000 /*;(BIT26+BIT25) ; message in phase */
+#define MSG_IN_ 0x07000000 /*;(BIT26+BIT25+BIT24);message out phase */
+/*;----------------------------------------------------------*/
+#define DCMD_SELECT 0x40000000 /*;DCMD_IO+0 */
+#define DCMD_SELECT_ATN 0x41000000 /*;(DCMD_IO+BIT24) */
+#define DCMD_WAIT_DISC 0x48000000 /*;(DCMD_IO+BIT27) */
+#define DCMD_WAIT_RESEL 0x50000000 /*;(DCMD_IO+BIT28) */
+#define DCMD_SET_CARRY 0x58000400 /*;(DCMD_IO+BIT28+BIT27+BIT10) */
+#define DCMD_SET_ACK 0x58000040 /*;(DCMD_IO+BIT28+BIT27+BIT6) */
+#define DCMD_SET_ATN 0x58000008 /*;(DCMD_IO+BIT28+BIT27+BIT3) */
+#define DCMD_CLR_CARRY 0x60000400 /*;(DCMD_IO+BIT29+BIT10) */
+#define DCMD_CLR_ACK 0x60000040 /*;(DCMD_IO+BIT29+BIT6) */
+#define DCMD_CLR_ATN 0x60000008 /*;(DCMD_IO+BIT29+BIT3) */
+#define RELATIVE_ADDR 0x04000000 /*;BIT26 */
+#define IO_TABLE_INDIR 0x02000000 /*;BIT25 */
+/*;----------------------------------------------------------*/
+#define MOVE_FROM_SFBR 0x68000000 /*;(DCMD_RD_WRT+BIT29+BIT27) */
+#define MOVE_TO_SFBR 0x70000000 /*;(DCMD_RD_WRT+BIT29+BIT28) */
+#define RD_MODIFY_WRT 0x78000000 /*;(DCMD_RD_WRT+BIT29+BIT28+BIT27) */
+#define OP_MOVE_DATA 0
+#define OP_SHIFT_LEFT_C 0x01000000 /*;BIT24 */
+#define OP_OR 0x02000000 /*;BIT25 */
+#define OP_XOR 0x03000000 /*;(BIT25+BIT24) */
+#define OP_AND 0x04000000 /*;BIT26 */
+#define OP_SHIFT_RIGHT_C 0x05000000 /*;(BIT26+BIT24) */
+#define OP_ADD_DATA 0x06000000 /*;(BIT26+BIT25) */
+#define OP_ADD_DATA_C 0x07000000 /*;(BIT26+BIT25+BIT24) */
+#define USE_SFBR 0x00800000 /*;BIT23 */
+/*;----------------------------------------------------------*/
+#define DCMD_JUMP 0x80000000 /*;DCMD_XFER_CTRL+0 */
+#define DCMD_CALL 0x88000000 /*;(DCMD_XFER_CTRL+BIT27) */
+#define DCMD_RETURN 0x90000000 /*;(DCMD_XFER_CTRL+BIT28) */
+#define DCMD_INT 0x98000000 /*;(DCMD_XFER_CTRL+BIT28+BIT27) */
+#define RELATIVE_ 0x00800000 /*;BIT23 */
+#define IF_CARRY 0x00200000 /*;BIT21 */
+#define INT_ON_FLY_ 0x00100000 /*;BIT20 */
+#define IF_TRUE 0x00080000 /*;BIT19 */
+#define IF_NOT 0
+#define DATA_CMP 0x00040000 /*;BIT18 */
+#define PHASE_CMP 0x00020000 /*;BIT17 */
+#define WAIT_PHASE_VALID 0x00010000 /*;BIT16 */
+/*;----------------------------------------------------------*/
+#define DSA_RELATIVE 0x10000000 /*;BIT28 */
+#define FLUSH_PREFETCH 0x02000000 /*;BIT25 */
+#define DCMD_LOAD 0x0E1000000 /*;(DCMD_LOAD_STORE+BIT24) */
+#define DCMD_STORE 0x0E0000000 /*;DCMD_LOAD_STORE */
+/*
+;==========================================================
+; SCSI message EQUATES
+;==========================================================
+*/
+#define CMD_COMPLETE 0
+#define EXT_MSG 1
+#define SAVE_PTR 2
+#define RESTORE_PTR 3
+#define DISCONNECTMSG 4
+#define INITIATOR_ERR 5
+#define ABORTMSG 6
+#define MSG_REJECT 7
+#define NOPMSG 8
+#define MSG_PARITY 9
+#define LINK_CMD_CPL 0x0a
+#define LINK_CMD_FLAG 0x0b
+#define RESET_DEVICE 0x0c
+#define IDENTIFYMSG 0x80
+#define SIMPLE_TAG 0x20
+#define IGNORE_WIDE_RES 0x23
+/*
+;==========================================================
+; Operation assumption
+; 1. If phase mismatch during Xfer PAD ==> do nothing
+; Else compute FIXUP needed
+; 2. After phase mismatch ==> Set to Xfer PAD
+; 3. At disconnection ==> Modify return address
+; 4. 1st restore ptr after reselection is ignored
+; 5. If Xfer PAD is done ==> Error
+;==========================================================
+*/
+/* static start_script
+ static reselected
+ static reselecttag
+ static select0
+ static select1
+ static check_phase
+ static status1_phase
+ static command_phase
+ static jump_table0
+ static jump_tableB
+ static din_phase
+ static din_phaseB
+ static din_pad_0
+ static din_pad_addrB
+ static dout_phase
+ static dout_phaseB
+ static dout_pad_0
+ static dout_pad_addrB
+ static jump_tablew
+ static jump_tableW
+ static din_phase1
+ static din_phaseW
+ static din_pad_1
+ static din_pad_addrW
+ static dout_phase1
+ static dout_phaseW
+ static dout_pad_1
+ static dout_pad_addrW
+ static mout_phase
+ static status_phase
+ static min_phase
+ static set_atn
+ static clr_atn
+ static end_script
+ static start_mov
+ static SrcPhysAddr
+ static DesPhysAddr
+*/
+ULONG start_script[]={
+/*
+;==========================================================
+; Wait for reselection
+;==========================================================
+*/
+ DCMD_WAIT_RESEL
+ };
+ULONG jmp_select0[]={
+ 0 /* offset select0 */
+ };
+ULONG reselected[]={
+ RD_MODIFY_WRT+OP_OR+0x200+0x340000, /* (2 shl 8) or (__scratcha shl 16) */
+ 0,
+
+ DCMD_INT+WAIT_PHASE_VALID+IF_NOT+PHASE_CMP+MSG_IN_,
+ __RESELECTED,
+
+ BLOCK_MOVE+MSG_IN_+1 /* ;move in ID byte */
+ };
+ULONG ACB_msgin123_1[]={
+ 0, /* offset ACB.msgin123,*/
+
+ DCMD_INT+IF_TRUE,
+ __RESELECTED1
+ };
+ULONG reselecttag[]={
+ DCMD_CLR_ACK,
+ 0,
+
+ BLOCK_MOVE+MSG_IN_+2 /* ;move 2 msg bytes */
+ };
+ULONG ACB_msgin123_2[]={
+ 0, /* offset ACB.msgin123,*/
+
+ DCMD_INT+IF_TRUE,
+ __RESELECTEDT
+ };
+/*
+;==========================================================
+; Select
+; Case 1 : Only identify message is to be sent
+; Case 2 : Synchronous negotiation is requested
+;==========================================================
+*/
+ULONG select0[]={
+ DCMD_INT+IF_TRUE,
+ __SIGNAL
+ };
+ULONG select1[]={ /* ; Select with ATN */
+
+ DCMD_SELECT_ATN+IO_TABLE_INDIR /* +offset SRB.__select ;4200h or 0100H */
+ };
+ULONG jmp_reselected[]={
+ 0, /* offset reselected, */
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_NOT+PHASE_CMP+MSG_OUT_
+ };
+ULONG jmp_check_phase[]={
+ 0, /* offset check_phase, */
+
+ TABLE_INDIRECT+BLOCK_MOVE+MSG_OUT_
+ };
+ULONG SRB_msgout0[]={
+ 0 /* offset SRB.__msgout0 */
+ };
+ULONG check_phase[]={
+ DCMD_RETURN+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0,
+
+ DCMD_RETURN+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0
+ };
+ULONG status1_phase[]={
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+STATUS_
+ };
+ULONG jmp_status_phase[]={
+ 0, /* offset status_phase,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+COMMAND_
+ };
+ULONG jmp_command_phase[]={
+ 0, /* offset command_phase, */
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+MSG_IN_
+ };
+ULONG jmp_min_phase[]={
+ 0, /* offset min_phase,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+MSG_OUT_
+ };
+ULONG jmp_mout_phase[]={
+ 0, /* offset mout_phase,*/
+
+ DCMD_INT+IF_TRUE,
+ __FATALERROR
+ };
+/*
+;==========================================================
+; Command phase
+;==========================================================
+*/
+ULONG command_phase[]={
+ DCMD_CLR_ATN,
+ 0,
+ TABLE_INDIRECT+BLOCK_MOVE+COMMAND_
+ };
+ULONG SRB_command[]={
+ 0, /* offset SRB.__command,*/
+
+ DCMD_JUMP+IF_TRUE
+ };
+ULONG jmp_check_phase1[]={
+ 0 /* offset check_phase */
+ };
+/*
+;==========================================================
+; Data phase jump table for 8 bit operation
+;==========================================================
+*/
+ULONG jmp_dio_phaseB[]={
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 0,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 0,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 8,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 8,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 16,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 16,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 24,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 24,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 32,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 32,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 40,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 40,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 48,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 48,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 56,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 56,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 64,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 64,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 72,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 72,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 80,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 80,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 88,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 88,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 96,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 96,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 104,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 104,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 112,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 112,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 120,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 120,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 0,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 0,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 8,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 8,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 16,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 16,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 24,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 24,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 32,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 32,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 40,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 40,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 48,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 48,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 56,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 56,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 64,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 64,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 72,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 72,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 80,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 80,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 88,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 88,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 96,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 96,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 104,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 104,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 112,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 112,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 120,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseB+ 120,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseB+ 128,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0 /* offset dout_phaseB+ 128 */
+ };
+ULONG jump_table0[]={
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_
+ };
+ULONG jmp_din_pad_0[]={
+ 0, /* offset din_pad_0,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_
+ };
+ULONG jmp_dout_pad_0[]={
+ 0 /* offset dout_pad_0 */
+ };
+
+#define jump_tableB jump_table0
+/*
+;==========================================================
+; Data in phase
+;==========================================================
+*/
+ULONG din_phaseB[]={
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment0,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment1,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment2,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment3,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment4,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment5,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment6,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment7,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment8,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment9,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment10,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment11,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment12,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment13,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment14,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment15,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment0,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment1,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment2,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment3,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment4,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment5,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment6,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment7,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment8,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment9,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment10,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment11,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment12,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment13,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment14,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment15,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment16,*/
+
+ RD_MODIFY_WRT+OP_OR+0x100+0x340000, /*;(1 shl 8) or (__scratcha shl 16)*/
+ 0,
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_NOT+PHASE_CMP+DATA_IN_
+ };
+ULONG jmp_status1_phase[]={
+ 0 /* offset status1_phase */
+ };
+
+#define din_phase din_phaseB
+
+ULONG din_pad_0[]={
+ RD_MODIFY_WRT+OP_OR+0x340000+0x400, /*;(4 shl 8) or (__scratcha shl 16)*/
+ 0
+ };
+ULONG din_pad_addrB[]={
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_IN_
+ };
+ULONG SRB_SegmentPad[]={
+ 0, /* offset SRB.SegmentPad,*/
+
+ DCMD_JUMP+IF_TRUE
+ };
+ULONG jmp_din_pad_addrB[]={
+ 0 /* offset din_pad_addrB */
+ };
+/*
+;==========================================================
+; Data out phase
+;==========================================================
+*/
+ULONG dout_phaseB[]={
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment0,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment1,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment2,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment3,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment4,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment5,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment6,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment7,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment8,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment9,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment10,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment11,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment12,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment13,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment14,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment15,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment0,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment1,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment2,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment3,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment4,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment5,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment6,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment7,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment8,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment9,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment10,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment11,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment12,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment13,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment14,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment15,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment16,*/
+
+ RD_MODIFY_WRT+OP_OR+0x100+0x340000, /*;(1 shl 8) or (__scratcha shl 16)*/
+ 0,
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_NOT+PHASE_CMP+DATA_OUT_
+ };
+ULONG jmp_status1_phase1[]={
+ 0 /* offset status1_phase */
+ };
+
+#define dout_phase dout_phaseB
+
+ULONG dout_pad_0[]={
+ RD_MODIFY_WRT+OP_OR+0x340000+0x400, /*;(4 shl 8) or (__scratcha shl 16)*/
+ 0
+ };
+ULONG dout_pad_addrB[]={
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_
+ };
+ULONG SRB_SegmentPad1[]={
+ 0, /* offset SRB.SegmentPad,*/
+
+ DCMD_JUMP+IF_TRUE
+ };
+ULONG jmp_dout_pad_addrB[]={
+ 0 /* offset dout_pad_addrB */
+ };
+/*
+;==========================================================
+; Data phase jump table for WIDE SCSI operation
+;==========================================================
+*/
+ULONG jmp_dio_phaseW[]={
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 0,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 0,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 8,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 8,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 16,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 16,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 24,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 24,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 32,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 32,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 40,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 40,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 48,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 48,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 56,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 56,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 64,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 64,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 72,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 72,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 80,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 80,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 88,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 88,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 96,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 96,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 104,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 104,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 112,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 112,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 120,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 120,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 0,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 0,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 8,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 8,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 16,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 16,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 24,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 24,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 32,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 32,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 40,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 40,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 48,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 48,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 56,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 56,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 64,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 64,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 72,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 72,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 80,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 80,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 88,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 88,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 96,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 96,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 104,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 104,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 112,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 112,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 120,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0, /* offset dout_phaseW+ 120,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_,
+ 0, /* offset din_phaseW+ 128,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_,
+ 0 /* offset dout_phaseW+ 128 */
+ };
+ULONG jump_tablew[]={
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_IN_
+ };
+ULONG jmp_din_pad_1[]={
+ 0, /* offset din_pad_1,*/
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_TRUE+PHASE_CMP+DATA_OUT_
+ };
+ULONG jmp_dout_pad_1[]={
+ 0 /* offset dout_pad_1 */
+ };
+
+#define jump_tableW jump_tablew
+/*
+;==========================================================
+; Data in phase
+;==========================================================
+*/
+ULONG din_phaseW[]={
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment0,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment1,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment2,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment3,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment4,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment5,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment6,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment7,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment8,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment9,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment10,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment11,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment12,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment13,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment14,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment15,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment0,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment1,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment2,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment3,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment4,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment5,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment6,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment7,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment8,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment9,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment10,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment11,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment12,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment13,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment14,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment15,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_,
+ 0, /* offset SRB.Segment16,*/
+
+ RD_MODIFY_WRT+OP_OR+0x340000+0x100, /*;(1 shl 8) or (__scratcha shl 16)*/
+ 0,
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_NOT+PHASE_CMP+DATA_IN_
+ };
+ULONG jmp_status1_phase2[]={
+ 0 /* offset status1_phase */
+ };
+
+#define din_phase1 din_phaseW
+
+ULONG din_pad_1[]={
+ RD_MODIFY_WRT+OP_OR+0x340000+0x400, /*;(4 shl 8) or (__scratcha shl 16)*/
+ 0
+ };
+ULONG din_pad_addrW[]={
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_IN_
+ };
+ULONG SRB_SegmentPad2[]={
+ 0, /* offset SRB.SegmentPad,*/
+
+ DCMD_JUMP+IF_TRUE
+ };
+ULONG jmp_din_pad_addrW[]={
+ 0 /* offset din_pad_addrW */
+ };
+/*
+;==========================================================
+; Data out phase
+;==========================================================
+*/
+ULONG dout_phaseW[]={
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment0,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment1,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment2,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment3,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment4,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment5,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment6,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment7,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment8,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment9,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment10,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment11,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment12,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment13,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment14,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment15,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment0,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment1,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment2,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment3,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment4,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment5,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment6,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment7,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment8,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment9,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment10,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment11,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment12,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment13,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment14,*/
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+CHAIN_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment15,*/
+/*; 18000000h or DATA_OUT_ */
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_,
+ 0, /* offset SRB.Segment16,*/
+
+ RD_MODIFY_WRT+OP_OR+0x340000+0x100, /*;(1 shl 8) or (__scratcha shl 16)*/
+ 0,
+
+ DCMD_JUMP+WAIT_PHASE_VALID+IF_NOT+PHASE_CMP+DATA_OUT_
+ };
+ULONG jmp_status1_phase3[]={
+ 0 /* offset status1_phase */
+ };
+
+#define dout_phase1 dout_phaseW
+
+ULONG dout_pad_1[]={
+ RD_MODIFY_WRT+OP_OR+0x340000+0x400, /*;(4 shl 8) or (__scratcha shl 16)*/
+ 0
+ };
+ULONG dout_pad_addrW[]={
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+DATA_OUT_
+ };
+ULONG SRB_SegmentPad3[]={
+ 0, /* offset SRB.SegmentPad,*/
+
+ DCMD_JUMP+IF_TRUE
+ };
+ULONG jmp_dout_pad_addrW[]={
+ 0 /* offset dout_pad_addrW */
+ };
+/*
+;==========================================================
+; message out phase
+;==========================================================
+*/
+ULONG mout_phase[]={
+ DCMD_SET_ATN,
+ 0,
+
+ DCMD_BLOCK_MOVE+TABLE_INDIRECT+BLOCK_MOVE+MSG_OUT_
+ };
+ULONG SRB_msgout01[]={
+ 0, /* offset SRB.__msgout0,*/
+
+ DCMD_JUMP+IF_TRUE
+ };
+ULONG jmp_check_phase2[]={
+ 0 /* offset check_phase */
+ };
+/*
+;==========================================================
+; Status phase process
+;==========================================================
+*/
+ULONG status_phase[]={
+ DCMD_BLOCK_MOVE+BLOCK_MOVE+STATUS_+1
+ };
+ULONG ACB_status[]={
+ 0 /* offset ACB.status */
+ };
+/*
+;==========================================================
+; message in phase
+;==========================================================
+*/
+ULONG min_phase[]={
+ DCMD_BLOCK_MOVE+BLOCK_MOVE+MSG_IN_+1
+ };
+ULONG ACB_msgin123_3[]={
+ 0, /* offset ACB.msgin123,*/
+
+ DCMD_JUMP+IF_NOT+DATA_CMP+CMD_COMPLETE
+ };
+ULONG jmp_jump_msgok[]={
+ 0 /* offset jump_msgok */
+ };
+/*
+;==========================================================
+; command complete message
+;==========================================================
+*/
+ULONG msg__0[]={
+ RD_MODIFY_WRT+OP_AND+0x20000+0x7F00, /*;(7FH shl 8) or (__scntl2 shl 16)*/
+ 0,
+
+ DCMD_CLR_ACK,
+ 0,
+
+ DCMD_WAIT_DISC,
+ 0,
+
+ DCMD_INT+IF_TRUE,
+ __COMPLETE
+ };
+/*
+;==========================================================
+; Other message
+;==========================================================
+*/
+ULONG jump_msgok[]={
+ DCMD_JUMP+IF_TRUE+DATA_CMP+SAVE_PTR
+ };
+ULONG jmp_msg__a[]={
+ 0, /* offset msg__a,*/
+
+ DCMD_JUMP+IF_TRUE+DATA_CMP+RESTORE_PTR
+ };
+ULONG jmp_msg__3[]={
+ 0, /* offset msg__3,*/
+
+ DCMD_JUMP+IF_TRUE+DATA_CMP+DISCONNECTMSG
+ };
+ULONG jmp_msg__4[]={
+ 0, /* offset msg__4,*/
+
+ DCMD_JUMP+IF_TRUE+DATA_CMP+EXT_MSG
+ };
+ULONG jmp_msg__1[]={
+ 0, /* offset msg__1,*/
+
+ DCMD_INT+IF_TRUE+DATA_CMP+MSG_REJECT,
+ __MSGREJECT,
+
+ DCMD_JUMP+IF_TRUE+DATA_CMP+LINK_CMD_CPL
+ };
+ULONG jmp_msg__a1[]={
+ 0, /* offset msg__a,*/
+
+ DCMD_JUMP+IF_TRUE+DATA_CMP+LINK_CMD_FLAG
+ };
+ULONG jmp_msg__a2[]={
+ 0, /* offset msg__a,*/
+
+ DCMD_JUMP+IF_TRUE+DATA_CMP+IGNORE_WIDE_RES
+ };
+ULONG jmp_msg__23[]={
+ 0, /* offset msg__23,*/
+
+ DCMD_INT+IF_TRUE,
+ __MSGUNKNOWN
+ };
+/*
+;==========================================================
+; Extended message
+;==========================================================
+*/
+ULONG msg__1[]={
+ DCMD_CLR_ACK,
+ 0,
+
+ DCMD_BLOCK_MOVE+BLOCK_MOVE+MSG_IN_+1 /* ;ext msg len */
+ };
+ULONG ACB_msgin123_4[]={
+ 0, /* offset ACB.msgin123,*/
+
+ DCMD_JUMP+IF_TRUE+DATA_CMP+3
+ };
+ULONG jmp_msg___3[]={
+ 0, /* offset msg___3,*/
+
+ DCMD_JUMP+IF_TRUE+DATA_CMP+2
+ };
+ULONG jmp_msg___2[]={
+ 0, /* offset msg___2,*/
+
+ DCMD_INT+IF_TRUE,
+ __MSGEXTEND
+ };
+
+ULONG msg___3[]={
+ DCMD_CLR_ACK,
+ 0,
+
+ DCMD_BLOCK_MOVE+BLOCK_MOVE+MSG_IN_+3
+ };
+ULONG ACB_msgin123_5[]={
+ 0, /* offset ACB.msgin123,*/
+
+ DCMD_INT+IF_TRUE,
+ __MSGSYNC
+ };
+
+ULONG msg___2[]={
+ DCMD_CLR_ACK,
+ 0,
+
+ DCMD_BLOCK_MOVE+BLOCK_MOVE+MSG_IN_+2
+ };
+ULONG ACB_msgin123_6[]={
+ 0, /* offset ACB.msgin123,*/
+
+ DCMD_INT+IF_TRUE,
+ __MSGWIDE
+ };
+/*
+;############################################################
+; for synchronous negotiation
+; 1. Active ==> INT3, restart at data__1_2
+; 2. Passive ==> INT3, prepare message out, restart at data__1_1
+; 3. Disable ==> INT3, prepare message out, restart at data__1_1
+;############################################################
+*/
+ULONG set_atn[]={
+ DCMD_SET_ATN,
+ 0
+ };
+ULONG msg__a[]={
+ DCMD_CLR_ACK,
+ 0,
+
+ DCMD_JUMP+IF_TRUE
+ };
+ULONG jmp_check_phase3[]={
+ 0 /* offset check_phase */
+ };
+
+ULONG msg__23[]={ /* ; ignore wide residue */
+ DCMD_CLR_ACK,
+ 0,
+
+ DCMD_BLOCK_MOVE+BLOCK_MOVE+MSG_IN_+1
+ };
+ULONG ACB_msgin123_7[]={
+ 0, /* offset ACB.msgin123,*/
+
+ DCMD_CLR_ACK,
+ 0,
+
+ DCMD_JUMP+IF_TRUE
+ };
+ULONG jmp_check_phase4[]={
+ 0 /* offset check_phase */
+ };
+
+ULONG msg__3[]={
+ DCMD_CLR_ACK,
+ 0,
+
+ DCMD_JUMP+IF_TRUE
+ };
+ULONG jmp_check_phase5[]={
+ 0 /* offset check_phase */
+ };
+
+ULONG msg__4[]={ /* ; disconnect */
+ RD_MODIFY_WRT+OP_AND+0x20000+0x7F00, /*;(7FH shl 8) or (__scntl2 shl 16)*/
+ 0,
+
+ DCMD_CLR_ACK,
+ 0,
+
+ DCMD_WAIT_DISC,
+ 0,
+
+ DCMD_INT+IF_TRUE,
+ __DISCONNECTED
+ };
+
+ULONG clr_atn[]={
+ DCMD_CLR_ATN,
+ 0,
+
+ DCMD_JUMP+IF_TRUE
+ };
+ULONG jmp_check_phase6[]={
+ 0 /* offset check_phase */
+ };
+/*
+;==========================================================
+; Used for script operation
+;==========================================================
+*/
+ULONG start_mov[]={
+/*; DCMD_MEM_MOVE+(OFFSET DGROUP:end_script - OFFSET DGROUP:start_script) ;Memory move SCRIPTS instruction*/
+ DCMD_MEM_MOVE+0x1000 /*;Memory move SCRIPTS instruction ( 4K )*/
+ };
+ULONG SrcPhysAddr[]={
+ 0 /*; source */
+ };
+ULONG DesPhysAddr[]={
+ 0, /*; destination */
+
+ DCMD_INT+IF_TRUE, /*; script interrupt, */
+ 0,
+
+ DCMD_INT+IF_NOT, /*; script interrupt */
+ 0
+ };
+ULONG end_script[]={0};
+/***********************************************************************/
+
diff --git a/linux/src/drivers/scsi/scsi.c b/linux/src/drivers/scsi/scsi.c
new file mode 100644
index 0000000..62c4b10
--- /dev/null
+++ b/linux/src/drivers/scsi/scsi.c
@@ -0,0 +1,3585 @@
+/*
+ * scsi.c Copyright (C) 1992 Drew Eckhardt
+ * Copyright (C) 1993, 1994, 1995 Eric Youngdale
+ *
+ * generic mid-level SCSI driver
+ * Initial versions: Drew Eckhardt
+ * Subsequent revisions: Eric Youngdale
+ *
+ * <drew@colorado.edu>
+ *
+ * Bug correction thanks go to :
+ * Rik Faith <faith@cs.unc.edu>
+ * Tommy Thorn <tthorn>
+ * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
+ *
+ * Modified by Eric Youngdale eric@aib.com to
+ * add scatter-gather, multiple outstanding request, and other
+ * enhancements.
+ *
+ * Native multichannel, wide scsi, /proc/scsi and hot plugging
+ * support added by Michael Neuffer <mike@i-connect.net>
+ *
+ * Added request_module("scsi_hostadapter") for kerneld:
+ * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/conf.modules)
+ * Bjorn Ekwall <bj0rn@blox.se>
+ *
+ * Major improvements to the timeout, abort, and reset processing,
+ * as well as performance modifications for large queue depths by
+ * Leonard N. Zubkoff <lnz@dandelion.com>
+ */
+
+/*
+ * Don't import our own symbols, as this would severely mess up our
+ * symbol tables.
+ */
+#define _SCSI_SYMS_VER_
+
+#include <linux/config.h>
+#include <linux/module.h>
+
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <linux/malloc.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/stat.h>
+#include <linux/blk.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/dma.h>
+
+#include "scsi.h"
+#include "hosts.h"
+#include "constants.h"
+
+#ifdef CONFIG_KERNELD
+#include <linux/kerneld.h>
+#endif
+
+#undef USE_STATIC_SCSI_MEMORY
+
+/*
+static const char RCSid[] = "$Header: cvs/gnumach/linux/src/drivers/scsi/Attic/scsi.c,v 1.1 1999/04/26 05:54:57 tb Exp $";
+*/
+
+
+/* Command groups 3 and 4 are reserved and should never be used. */
+const unsigned char scsi_command_size[8] = { 6, 10, 10, 12, 12, 12, 10, 10 };
+
+#define INTERNAL_ERROR (panic ("Internal error in file %s, line %d.\n", __FILE__, __LINE__))
+
+/*
+ * PAGE_SIZE must be a multiple of the sector size (512). True
+ * for all reasonably recent architectures (even the VAX...).
+ */
+#define SECTOR_SIZE 512
+#define SECTORS_PER_PAGE (PAGE_SIZE/SECTOR_SIZE)
+
+#if SECTORS_PER_PAGE <= 8
+ typedef unsigned char FreeSectorBitmap;
+#elif SECTORS_PER_PAGE <= 32
+ typedef unsigned int FreeSectorBitmap;
+#else
+# error You lose.
+#endif
+
+static void scsi_done (Scsi_Cmnd *SCpnt);
+static int update_timeout (Scsi_Cmnd *, int);
+static void print_inquiry(unsigned char *data);
+static void scsi_times_out (Scsi_Cmnd * SCpnt);
+static int scan_scsis_single (int channel,int dev,int lun,int * max_scsi_dev ,
+ int * sparse_lun, Scsi_Device ** SDpnt, Scsi_Cmnd * SCpnt,
+ struct Scsi_Host *shpnt, char * scsi_result);
+void scsi_build_commandblocks(Scsi_Device * SDpnt);
+
+#ifdef CONFIG_MODULES
+extern struct symbol_table scsi_symbol_table;
+#endif
+
+static FreeSectorBitmap * dma_malloc_freelist = NULL;
+static int scsi_need_isa_bounce_buffers;
+static unsigned int dma_sectors = 0;
+unsigned int dma_free_sectors = 0;
+unsigned int need_isa_buffer = 0;
+static unsigned char ** dma_malloc_pages = NULL;
+
+static int time_start;
+static int time_elapsed;
+static volatile struct Scsi_Host * host_active = NULL;
+#define SCSI_BLOCK(HOST) ((HOST->block && host_active && HOST != host_active) \
+ || (HOST->can_queue && HOST->host_busy >= HOST->can_queue))
+
+const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] =
+{
+ "Direct-Access ",
+ "Sequential-Access",
+ "Printer ",
+ "Processor ",
+ "WORM ",
+ "CD-ROM ",
+ "Scanner ",
+ "Optical Device ",
+ "Medium Changer ",
+ "Communications "
+};
+
+
+/*
+ * global variables :
+ * scsi_devices an array of these specifying the address for each
+ * (host, id, LUN)
+ */
+
+Scsi_Device * scsi_devices = NULL;
+
+/* Process ID of SCSI commands */
+unsigned long scsi_pid = 0;
+
+static unsigned long serial_number = 0;
+
+static unsigned char generic_sense[6] = {REQUEST_SENSE, 0,0,0, 255, 0};
+static void resize_dma_pool(void);
+
+/* This variable is merely a hook so that we can debug the kernel with gdb. */
+Scsi_Cmnd * last_cmnd = NULL;
+
+/* This is the pointer to the /proc/scsi code.
+ * It is only initialized to !=0 if the scsi code is present
+ */
+#if CONFIG_PROC_FS
+extern int (* dispatch_scsi_info_ptr)(int ino, char *buffer, char **start,
+ off_t offset, int length, int inout);
+extern int dispatch_scsi_info(int ino, char *buffer, char **start,
+ off_t offset, int length, int inout);
+
+struct proc_dir_entry proc_scsi_scsi = {
+ PROC_SCSI_SCSI, 4, "scsi",
+ S_IFREG | S_IRUGO | S_IWUSR, 1, 0, 0, 0,
+ NULL,
+ NULL, NULL,
+ NULL, NULL, NULL
+};
+#endif
+
+/*
+ * This is the number of clock ticks we should wait before we time out
+ * and abort the command. This is for where the scsi.c module generates
+ * the command, not where it originates from a higher level, in which
+ * case the timeout is specified there.
+ *
+ * ABORT_TIMEOUT and RESET_TIMEOUT are the timeouts for RESET and ABORT
+ * respectively.
+ */
+
+#ifdef DEBUG_TIMEOUT
+static void scsi_dump_status(void);
+#endif
+
+
+#ifdef DEBUG
+ #define SCSI_TIMEOUT (5*HZ)
+#else
+ #define SCSI_TIMEOUT (2*HZ)
+#endif
+
+#ifdef DEBUG
+ #define SENSE_TIMEOUT SCSI_TIMEOUT
+ #define ABORT_TIMEOUT SCSI_TIMEOUT
+ #define RESET_TIMEOUT SCSI_TIMEOUT
+#else
+ #define SENSE_TIMEOUT (5*HZ/10)
+ #define RESET_TIMEOUT (5*HZ/10)
+ #define ABORT_TIMEOUT (5*HZ/10)
+#endif
+
+#define MIN_RESET_DELAY (2*HZ)
+
+/* Do not call reset on error if we just did a reset within 15 sec. */
+#define MIN_RESET_PERIOD (15*HZ)
+
+/* The following devices are known not to tolerate a lun != 0 scan for
+ * one reason or another. Some will respond to all luns, others will
+ * lock up.
+ */
+
+#define BLIST_NOLUN 0x01
+#define BLIST_FORCELUN 0x02
+#define BLIST_BORKEN 0x04
+#define BLIST_KEY 0x08
+#define BLIST_SINGLELUN 0x10
+#define BLIST_NOTQ 0x20
+#define BLIST_SPARSELUN 0x40
+#define BLIST_MAX5LUN 0x80
+
+struct dev_info{
+ const char * vendor;
+ const char * model;
+ const char * revision; /* Latest revision known to be bad. Not used yet */
+ unsigned flags;
+};
+
+/*
+ * This is what was previously known as the blacklist. The concept
+ * has been expanded so that we can specify other types of things we
+ * need to be aware of.
+ */
+static struct dev_info device_list[] =
+{
+{"TEAC","CD-R55S","1.0H", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
+{"CHINON","CD-ROM CDS-431","H42", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
+{"CHINON","CD-ROM CDS-535","Q14", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
+{"DENON","DRD-25X","V", BLIST_NOLUN}, /* Locks up if probed for lun != 0 */
+{"HITACHI","DK312C","CM81", BLIST_NOLUN}, /* Responds to all lun - dtg */
+{"HITACHI","DK314C","CR21" , BLIST_NOLUN}, /* responds to all lun */
+{"IMS", "CDD521/10","2.06", BLIST_NOLUN}, /* Locks-up when LUN>0 polled. */
+{"MAXTOR","XT-3280","PR02", BLIST_NOLUN}, /* Locks-up when LUN>0 polled. */
+{"MAXTOR","XT-4380S","B3C", BLIST_NOLUN}, /* Locks-up when LUN>0 polled. */
+{"MAXTOR","MXT-1240S","I1.2", BLIST_NOLUN}, /* Locks up when LUN>0 polled */
+{"MAXTOR","XT-4170S","B5A", BLIST_NOLUN}, /* Locks-up sometimes when LUN>0 polled. */
+{"MAXTOR","XT-8760S","B7B", BLIST_NOLUN}, /* guess what? */
+{"MEDIAVIS","RENO CD-ROMX2A","2.03",BLIST_NOLUN},/*Responds to all lun */
+{"MICROP", "4110", "*", BLIST_NOTQ}, /* Buggy Tagged Queuing */
+{"NEC","CD-ROM DRIVE:841","1.0", BLIST_NOLUN}, /* Locks-up when LUN>0 polled. */
+{"RODIME","RO3000S","2.33", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
+{"SANYO", "CRD-250S", "1.20", BLIST_NOLUN}, /* causes failed REQUEST SENSE on lun 1
+ * for aha152x controller, which causes
+ * SCSI code to reset bus.*/
+{"SEAGATE", "ST157N", "\004|j", BLIST_NOLUN}, /* causes failed REQUEST SENSE on lun 1
+ * for aha152x controller, which causes
+ * SCSI code to reset bus.*/
+{"SEAGATE", "ST296","921", BLIST_NOLUN}, /* Responds to all lun */
+{"SEAGATE","ST1581","6538",BLIST_NOLUN}, /* Responds to all lun */
+{"SONY","CD-ROM CDU-541","4.3d", BLIST_NOLUN},
+{"SONY","CD-ROM CDU-55S","1.0i", BLIST_NOLUN},
+{"SONY","CD-ROM CDU-561","1.7x", BLIST_NOLUN},
+{"TANDBERG","TDC 3600","U07", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
+{"TEAC","CD-ROM","1.06", BLIST_NOLUN}, /* causes failed REQUEST SENSE on lun 1
+ * for seagate controller, which causes
+ * SCSI code to reset bus.*/
+{"TEXEL","CD-ROM","1.06", BLIST_NOLUN}, /* causes failed REQUEST SENSE on lun 1
+ * for seagate controller, which causes
+ * SCSI code to reset bus.*/
+{"QUANTUM","LPS525S","3110", BLIST_NOLUN}, /* Locks sometimes if polled for lun != 0 */
+{"QUANTUM","PD1225S","3110", BLIST_NOLUN}, /* Locks sometimes if polled for lun != 0 */
+{"MEDIAVIS","CDR-H93MV","1.31", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
+{"SANKYO", "CP525","6.64", BLIST_NOLUN}, /* causes failed REQ SENSE, extra reset */
+{"HP", "C1750A", "3226", BLIST_NOLUN}, /* scanjet iic */
+{"HP", "C1790A", "", BLIST_NOLUN}, /* scanjet iip */
+{"HP", "C2500A", "", BLIST_NOLUN}, /* scanjet iicx */
+
+/*
+ * Other types of devices that have special flags.
+ */
+{"SONY","CD-ROM CDU-8001","*", BLIST_BORKEN},
+{"TEXEL","CD-ROM","1.06", BLIST_BORKEN},
+{"IOMEGA","Io20S *F","*", BLIST_KEY},
+{"INSITE","Floptical F*8I","*", BLIST_KEY},
+{"INSITE","I325VM","*", BLIST_KEY},
+{"NRC","MBR-7","*", BLIST_FORCELUN | BLIST_SINGLELUN},
+{"NRC","MBR-7.4","*", BLIST_FORCELUN | BLIST_SINGLELUN},
+{"REGAL","CDC-4X","*", BLIST_MAX5LUN | BLIST_SINGLELUN},
+{"NAKAMICH","MJ-4.8S","*", BLIST_FORCELUN | BLIST_SINGLELUN},
+{"NAKAMICH","MJ-5.16S","*", BLIST_FORCELUN | BLIST_SINGLELUN},
+{"PIONEER","CD-ROM DRM-600","*", BLIST_FORCELUN | BLIST_SINGLELUN},
+{"PIONEER","CD-ROM DRM-602X","*", BLIST_FORCELUN | BLIST_SINGLELUN},
+{"PIONEER","CD-ROM DRM-604X","*", BLIST_FORCELUN | BLIST_SINGLELUN},
+{"EMULEX","MD21/S2 ESDI","*", BLIST_SINGLELUN},
+{"CANON","IPUBJD","*", BLIST_SPARSELUN},
+{"MATSHITA","PD","*", BLIST_FORCELUN | BLIST_SINGLELUN},
+{"YAMAHA","CDR100","1.00", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
+{"YAMAHA","CDR102","1.00", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
+{"nCipher","Fastness Crypto","*", BLIST_FORCELUN},
+/*
+ * Must be at end of list...
+ */
+{NULL, NULL, NULL}
+};
+
+static int get_device_flags(unsigned char * response_data){
+ int i = 0;
+ unsigned char * pnt;
+ for(i=0; 1; i++){
+ if(device_list[i].vendor == NULL) return 0;
+ pnt = &response_data[8];
+ while(*pnt && *pnt == ' ') pnt++;
+ if(memcmp(device_list[i].vendor, pnt,
+ strlen(device_list[i].vendor))) continue;
+ pnt = &response_data[16];
+ while(*pnt && *pnt == ' ') pnt++;
+ if(memcmp(device_list[i].model, pnt,
+ strlen(device_list[i].model))) continue;
+ return device_list[i].flags;
+ }
+ return 0;
+}
+
+void scsi_make_blocked_list(void) {
+ int block_count = 0, index;
+ unsigned long flags;
+ struct Scsi_Host * sh[128], * shpnt;
+
+ /*
+ * Create a circular linked list from the scsi hosts which have
+ * the "wish_block" field in the Scsi_Host structure set.
+ * The blocked list should include all the scsi hosts using ISA DMA.
+ * In some systems, using two dma channels simultaneously causes
+ * unpredictable results.
+ * Among the scsi hosts in the blocked list, only one host at a time
+ * is allowed to have active commands queued. The transition from
+ * one active host to the next one is allowed only when host_busy == 0
+ * for the active host (which implies host_busy == 0 for all the hosts
+ * in the list). Moreover for block devices the transition to a new
+ * active host is allowed only when a request is completed, since a
+ * block device request can be divided into multiple scsi commands
+ * (when there are few sg lists or clustering is disabled).
+ *
+ * (DB, 4 Feb 1995)
+ */
+
+ save_flags(flags);
+ cli();
+ host_active = NULL;
+
+ for(shpnt=scsi_hostlist; shpnt; shpnt = shpnt->next) {
+
+#if 0
+ /*
+ * Is this is a candidate for the blocked list?
+ * Useful to put into the blocked list all the hosts whose driver
+ * does not know about the host->block feature.
+ */
+ if (shpnt->unchecked_isa_dma) shpnt->wish_block = 1;
+#endif
+
+ if (shpnt->wish_block) sh[block_count++] = shpnt;
+ }
+
+ if (block_count == 1) sh[0]->block = NULL;
+
+ else if (block_count > 1) {
+
+ for(index = 0; index < block_count - 1; index++) {
+ sh[index]->block = sh[index + 1];
+ printk("scsi%d : added to blocked host list.\n",
+ sh[index]->host_no);
+ }
+
+ sh[block_count - 1]->block = sh[0];
+ printk("scsi%d : added to blocked host list.\n",
+ sh[index]->host_no);
+ }
+
+ restore_flags(flags);
+}
+
+static void scan_scsis_done (Scsi_Cmnd * SCpnt)
+{
+
+#ifdef DEBUG
+ printk ("scan_scsis_done(%p, %06x)\n", SCpnt->host, SCpnt->result);
+#endif
+ SCpnt->request.rq_status = RQ_SCSI_DONE;
+
+ if (SCpnt->request.sem != NULL)
+ up(SCpnt->request.sem);
+}
+
+#ifdef CONFIG_SCSI_MULTI_LUN
+static int max_scsi_luns = 8;
+#else
+static int max_scsi_luns = 1;
+#endif
+
+void scsi_luns_setup(char *str, int *ints) {
+ if (ints[0] != 1)
+ printk("scsi_luns_setup : usage max_scsi_luns=n (n should be between 1 and 8)\n");
+ else
+ max_scsi_luns = ints[1];
+}
+
+/*
+ * Detecting SCSI devices :
+ * We scan all present host adapter's busses, from ID 0 to ID (max_id).
+ * We use the INQUIRY command, determine device type, and pass the ID /
+ * lun address of all sequential devices to the tape driver, all random
+ * devices to the disk driver.
+ */
+static void scan_scsis (struct Scsi_Host *shpnt, unchar hardcoded,
+ unchar hchannel, unchar hid, unchar hlun)
+{
+ int dev, lun, channel;
+ unsigned char scsi_result0[256];
+ unsigned char *scsi_result;
+ Scsi_Device *SDpnt;
+ int max_dev_lun, sparse_lun;
+ Scsi_Cmnd *SCpnt;
+
+ SCpnt = (Scsi_Cmnd *) scsi_init_malloc (sizeof (Scsi_Cmnd), GFP_ATOMIC | GFP_DMA);
+ SDpnt = (Scsi_Device *) scsi_init_malloc (sizeof (Scsi_Device), GFP_ATOMIC);
+ memset (SCpnt, 0, sizeof (Scsi_Cmnd));
+
+
+ /* Make sure we have something that is valid for DMA purposes */
+ scsi_result = ( ( !shpnt->unchecked_isa_dma )
+ ? &scsi_result0[0] : scsi_init_malloc (512, GFP_DMA));
+
+ if (scsi_result == NULL) {
+ printk ("Unable to obtain scsi_result buffer\n");
+ goto leave;
+ }
+
+ /* We must chain ourself in the host_queue, so commands can time out */
+ if(shpnt->host_queue)
+ shpnt->host_queue->prev = SCpnt;
+ SCpnt->next = shpnt->host_queue;
+ SCpnt->prev = NULL;
+ shpnt->host_queue = SCpnt;
+
+
+ if (hardcoded == 1) {
+ Scsi_Device *oldSDpnt=SDpnt;
+ struct Scsi_Device_Template * sdtpnt;
+ channel = hchannel;
+ if(channel > shpnt->max_channel) goto leave;
+ dev = hid;
+ if(dev >= shpnt->max_id) goto leave;
+ lun = hlun;
+ if(lun >= shpnt->max_lun) goto leave;
+ scan_scsis_single (channel, dev, lun, &max_dev_lun, &sparse_lun,
+ &SDpnt, SCpnt, shpnt, scsi_result);
+ if(SDpnt!=oldSDpnt) {
+
+ /* it could happen the blockdevice hasn't yet been inited */
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if(sdtpnt->init && sdtpnt->dev_noticed) (*sdtpnt->init)();
+
+ oldSDpnt->scsi_request_fn = NULL;
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if(sdtpnt->attach) {
+ (*sdtpnt->attach)(oldSDpnt);
+ if(oldSDpnt->attached) scsi_build_commandblocks(oldSDpnt);}
+ resize_dma_pool();
+
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
+ if(sdtpnt->finish && sdtpnt->nr_dev)
+ {(*sdtpnt->finish)();}
+ }
+ }
+
+ }
+ else {
+ for (channel = 0; channel <= shpnt->max_channel; channel++) {
+ for (dev = 0; dev < shpnt->max_id; ++dev) {
+ if (shpnt->this_id != dev) {
+
+ /*
+ * We need the for so our continue, etc. work fine. We put this in
+ * a variable so that we can override it during the scan if we
+ * detect a device *KNOWN* to have multiple logical units.
+ */
+ max_dev_lun = (max_scsi_luns < shpnt->max_lun ?
+ max_scsi_luns : shpnt->max_lun);
+ sparse_lun = 0;
+ for (lun = 0; lun < max_dev_lun; ++lun) {
+ if (!scan_scsis_single (channel, dev, lun, &max_dev_lun,
+ &sparse_lun, &SDpnt, SCpnt, shpnt,
+ scsi_result)
+ && !sparse_lun)
+ break; /* break means don't probe further for luns!=0 */
+ } /* for lun ends */
+ } /* if this_id != id ends */
+ } /* for dev ends */
+ } /* for channel ends */
+ } /* if/else hardcoded */
+
+ leave:
+
+ {/* Unchain SCpnt from host_queue */
+ Scsi_Cmnd *prev, *next, *hqptr;
+ for(hqptr = shpnt->host_queue; hqptr != SCpnt; hqptr = hqptr->next) ;
+ if(hqptr) {
+ prev = hqptr->prev;
+ next = hqptr->next;
+ if(prev)
+ prev->next = next;
+ else
+ shpnt->host_queue = next;
+ if(next) next->prev = prev;
+ }
+ }
+
+ /* Last device block does not exist. Free memory. */
+ if (SDpnt != NULL)
+ scsi_init_free ((char *) SDpnt, sizeof (Scsi_Device));
+
+ if (SCpnt != NULL)
+ scsi_init_free ((char *) SCpnt, sizeof (Scsi_Cmnd));
+
+ /* If we allocated a buffer so we could do DMA, free it now */
+ if (scsi_result != &scsi_result0[0] && scsi_result != NULL)
+ scsi_init_free (scsi_result, 512);
+
+}
+
+/*
+ * The worker for scan_scsis.
+ * Returning 0 means Please don't ask further for lun!=0, 1 means OK go on.
+ * Global variables used : scsi_devices(linked list)
+ */
+int scan_scsis_single (int channel, int dev, int lun, int *max_dev_lun,
+ int *sparse_lun, Scsi_Device **SDpnt2, Scsi_Cmnd * SCpnt,
+ struct Scsi_Host * shpnt, char *scsi_result)
+{
+ unsigned char scsi_cmd[12];
+ struct Scsi_Device_Template *sdtpnt;
+ Scsi_Device * SDtail, *SDpnt=*SDpnt2;
+ int bflags, type=-1;
+
+ SDtail = scsi_devices;
+ if (scsi_devices)
+ while (SDtail->next)
+ SDtail = SDtail->next;
+
+ memset (SDpnt, 0, sizeof (Scsi_Device));
+ SDpnt->host = shpnt;
+ SDpnt->id = dev;
+ SDpnt->lun = lun;
+ SDpnt->channel = channel;
+
+ /* Some low level driver could use device->type (DB) */
+ SDpnt->type = -1;
+
+ /*
+ * Assume that the device will have handshaking problems, and then fix this
+ * field later if it turns out it doesn't
+ */
+ SDpnt->borken = 1;
+ SDpnt->was_reset = 0;
+ SDpnt->expecting_cc_ua = 0;
+
+ scsi_cmd[0] = TEST_UNIT_READY;
+ scsi_cmd[1] = lun << 5;
+ scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[4] = scsi_cmd[5] = 0;
+
+ SCpnt->host = SDpnt->host;
+ SCpnt->device = SDpnt;
+ SCpnt->target = SDpnt->id;
+ SCpnt->lun = SDpnt->lun;
+ SCpnt->channel = SDpnt->channel;
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ SCpnt->request.sem = &sem;
+ SCpnt->request.rq_status = RQ_SCSI_BUSY;
+ scsi_do_cmd (SCpnt, (void *) scsi_cmd,
+ (void *) scsi_result,
+ 256, scan_scsis_done, SCSI_TIMEOUT + 4 * HZ, 5);
+ down (&sem);
+ }
+
+#if defined(DEBUG) || defined(DEBUG_INIT)
+ printk ("scsi: scan_scsis_single id %d lun %d. Return code 0x%08x\n",
+ dev, lun, SCpnt->result);
+ print_driverbyte(SCpnt->result); print_hostbyte(SCpnt->result);
+ printk("\n");
+#endif
+
+ if (SCpnt->result) {
+ if (((driver_byte (SCpnt->result) & DRIVER_SENSE) ||
+ (status_byte (SCpnt->result) & CHECK_CONDITION)) &&
+ ((SCpnt->sense_buffer[0] & 0x70) >> 4) == 7) {
+ if (((SCpnt->sense_buffer[2] & 0xf) != NOT_READY) &&
+ ((SCpnt->sense_buffer[2] & 0xf) != UNIT_ATTENTION) &&
+ ((SCpnt->sense_buffer[2] & 0xf) != ILLEGAL_REQUEST || lun > 0))
+ return 1;
+ }
+ else
+ return 0;
+ }
+
+#if defined (DEBUG) || defined(DEBUG_INIT)
+ printk ("scsi: performing INQUIRY\n");
+#endif
+ /*
+ * Build an INQUIRY command block.
+ */
+ scsi_cmd[0] = INQUIRY;
+ scsi_cmd[1] = (lun << 5) & 0xe0;
+ scsi_cmd[2] = 0;
+ scsi_cmd[3] = 0;
+ scsi_cmd[4] = 255;
+ scsi_cmd[5] = 0;
+ SCpnt->cmd_len = 0;
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ SCpnt->request.sem = &sem;
+ SCpnt->request.rq_status = RQ_SCSI_BUSY;
+ scsi_do_cmd (SCpnt, (void *) scsi_cmd,
+ (void *) scsi_result,
+ 256, scan_scsis_done, SCSI_TIMEOUT, 3);
+ down (&sem);
+ }
+
+#if defined(DEBUG) || defined(DEBUG_INIT)
+ printk ("scsi: INQUIRY %s with code 0x%x\n",
+ SCpnt->result ? "failed" : "successful", SCpnt->result);
+#endif
+
+ if (SCpnt->result)
+ return 0; /* assume no peripheral if any sort of error */
+
+ /*
+ * Check the peripheral qualifier field - this tells us whether LUNS
+ * are supported here or not.
+ */
+ if( (scsi_result[0] >> 5) == 3 )
+ {
+ return 0; /* assume no peripheral if any sort of error */
+ }
+
+ /*
+ * It would seem some TOSHIBA CDROM gets things wrong
+ */
+ if (!strncmp (scsi_result + 8, "TOSHIBA", 7) &&
+ !strncmp (scsi_result + 16, "CD-ROM", 6) &&
+ scsi_result[0] == TYPE_DISK) {
+ scsi_result[0] = TYPE_ROM;
+ scsi_result[1] |= 0x80; /* removable */
+ }
+
+ if (!strncmp (scsi_result + 8, "NEC", 3)) {
+ if (!strncmp (scsi_result + 16, "CD-ROM DRIVE:84 ", 16) ||
+ !strncmp (scsi_result + 16, "CD-ROM DRIVE:25", 15))
+ SDpnt->manufacturer = SCSI_MAN_NEC_OLDCDR;
+ else
+ SDpnt->manufacturer = SCSI_MAN_NEC;
+ }
+ else if (!strncmp (scsi_result + 8, "TOSHIBA", 7))
+ SDpnt->manufacturer = SCSI_MAN_TOSHIBA;
+ else if (!strncmp (scsi_result + 8, "SONY", 4))
+ SDpnt->manufacturer = SCSI_MAN_SONY;
+ else if (!strncmp (scsi_result + 8, "PIONEER", 7))
+ SDpnt->manufacturer = SCSI_MAN_PIONEER;
+ else
+ SDpnt->manufacturer = SCSI_MAN_UNKNOWN;
+
+ memcpy (SDpnt->vendor, scsi_result + 8, 8);
+ memcpy (SDpnt->model, scsi_result + 16, 16);
+ memcpy (SDpnt->rev, scsi_result + 32, 4);
+
+ SDpnt->removable = (0x80 & scsi_result[1]) >> 7;
+ SDpnt->lockable = SDpnt->removable;
+ SDpnt->changed = 0;
+ SDpnt->access_count = 0;
+ SDpnt->busy = 0;
+ SDpnt->has_cmdblocks = 0;
+ /*
+ * Currently, all sequential devices are assumed to be tapes, all random
+ * devices disk, with the appropriate read only flags set for ROM / WORM
+ * treated as RO.
+ */
+ switch (type = (scsi_result[0] & 0x1f)) {
+ case TYPE_TAPE:
+ case TYPE_DISK:
+ case TYPE_MOD:
+ case TYPE_PROCESSOR:
+ case TYPE_SCANNER:
+ case TYPE_MEDIUM_CHANGER:
+ SDpnt->writeable = 1;
+ break;
+ case TYPE_WORM:
+ case TYPE_ROM:
+ SDpnt->writeable = 0;
+ break;
+ default:
+ printk ("scsi: unknown type %d\n", type);
+ }
+
+ SDpnt->single_lun = 0;
+ SDpnt->soft_reset =
+ (scsi_result[7] & 1) && ((scsi_result[3] & 7) == 2);
+ SDpnt->random = (type == TYPE_TAPE) ? 0 : 1;
+ SDpnt->type = (type & 0x1f);
+
+ print_inquiry (scsi_result);
+
+ for (sdtpnt = scsi_devicelist; sdtpnt;
+ sdtpnt = sdtpnt->next)
+ if (sdtpnt->detect)
+ SDpnt->attached +=
+ (*sdtpnt->detect) (SDpnt);
+
+ SDpnt->scsi_level = scsi_result[2] & 0x07;
+ if (SDpnt->scsi_level >= 2 ||
+ (SDpnt->scsi_level == 1 &&
+ (scsi_result[3] & 0x0f) == 1))
+ SDpnt->scsi_level++;
+
+ /*
+ * Accommodate drivers that want to sleep when they should be in a polling
+ * loop.
+ */
+ SDpnt->disconnect = 0;
+
+ /*
+ * Get any flags for this device.
+ */
+ bflags = get_device_flags (scsi_result);
+
+ /*
+ * Set the tagged_queue flag for SCSI-II devices that purport to support
+ * tagged queuing in the INQUIRY data.
+ */
+ SDpnt->tagged_queue = 0;
+ if ((SDpnt->scsi_level >= SCSI_2) &&
+ (scsi_result[7] & 2) &&
+ !(bflags & BLIST_NOTQ)) {
+ SDpnt->tagged_supported = 1;
+ SDpnt->current_tag = 0;
+ }
+
+ /*
+ * Some revisions of the Texel CD ROM drives have handshaking problems when
+ * used with the Seagate controllers. Before we know what type of device
+ * we're talking to, we assume it's borken and then change it here if it
+ * turns out that it isn't a TEXEL drive.
+ */
+ if ((bflags & BLIST_BORKEN) == 0)
+ SDpnt->borken = 0;
+
+ /*
+ * If we want to only allow I/O to one of the luns attached to this device
+ * at a time, then we set this flag.
+ */
+ if (bflags & BLIST_SINGLELUN)
+ SDpnt->single_lun = 1;
+
+ /*
+ * These devices need this "key" to unlock the devices so we can use it
+ */
+ if ((bflags & BLIST_KEY) != 0) {
+ printk ("Unlocked floptical drive.\n");
+ SDpnt->lockable = 0;
+ scsi_cmd[0] = MODE_SENSE;
+ scsi_cmd[1] = (lun << 5) & 0xe0;
+ scsi_cmd[2] = 0x2e;
+ scsi_cmd[3] = 0;
+ scsi_cmd[4] = 0x2a;
+ scsi_cmd[5] = 0;
+ SCpnt->cmd_len = 0;
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ SCpnt->request.rq_status = RQ_SCSI_BUSY;
+ SCpnt->request.sem = &sem;
+ scsi_do_cmd (SCpnt, (void *) scsi_cmd,
+ (void *) scsi_result, 0x2a,
+ scan_scsis_done, SCSI_TIMEOUT, 3);
+ down (&sem);
+ }
+ }
+ /* Add this device to the linked list at the end */
+ if (SDtail)
+ SDtail->next = SDpnt;
+ else
+ scsi_devices = SDpnt;
+ SDtail = SDpnt;
+
+ SDpnt = (Scsi_Device *) scsi_init_malloc (sizeof (Scsi_Device), GFP_ATOMIC);
+ *SDpnt2=SDpnt;
+ if (!SDpnt)
+ printk ("scsi: scan_scsis_single: Cannot malloc\n");
+
+
+ /*
+ * Some scsi devices cannot be polled for lun != 0 due to firmware bugs
+ */
+ if (bflags & BLIST_NOLUN)
+ return 0; /* break; */
+
+ /*
+ * If this device is known to support sparse multiple units, override the
+ * other settings, and scan all of them.
+ */
+ if (bflags & BLIST_SPARSELUN) {
+ *max_dev_lun = 8;
+ *sparse_lun = 1;
+ return 1;
+ }
+
+ /*
+ * If this device is known to support multiple units, override the other
+ * settings, and scan all of them.
+ */
+ if (bflags & BLIST_FORCELUN) {
+ *max_dev_lun = 8;
+ return 1;
+ }
+
+ /*
+ * REGAL CDC-4X: avoid hang after LUN 4
+ */
+ if (bflags & BLIST_MAX5LUN) {
+ *max_dev_lun = 5;
+ return 1;
+ }
+
+ /*
+ * We assume the device can't handle lun!=0 if: - it reports scsi-0 (ANSI
+ * SCSI Revision 0) (old drives like MAXTOR XT-3280) or - it reports scsi-1
+ * (ANSI SCSI Revision 1) and Response Data Format 0
+ */
+ if (((scsi_result[2] & 0x07) == 0)
+ ||
+ ((scsi_result[2] & 0x07) == 1 &&
+ (scsi_result[3] & 0x0f) == 0))
+ return 0;
+ return 1;
+}
+
+/*
+ * Flag bits for the internal_timeout array
+ */
+#define NORMAL_TIMEOUT 0
+#define IN_ABORT 1
+#define IN_RESET 2
+#define IN_RESET2 4
+#define IN_RESET3 8
+
+/*
+ * This is our time out function, called when the timer expires for a
+ * given host adapter. It will attempt to abort the currently executing
+ * command, that failing perform a kernel panic.
+ */
+
+static void scsi_times_out (Scsi_Cmnd * SCpnt)
+{
+
+ switch (SCpnt->internal_timeout & (IN_ABORT | IN_RESET | IN_RESET2 | IN_RESET3))
+ {
+ case NORMAL_TIMEOUT:
+ {
+#ifdef DEBUG_TIMEOUT
+ scsi_dump_status();
+#endif
+ }
+
+ if (!scsi_abort (SCpnt, DID_TIME_OUT))
+ return;
+ case IN_ABORT:
+ printk("SCSI host %d abort (pid %ld) timed out - resetting\n",
+ SCpnt->host->host_no, SCpnt->pid);
+ if (!scsi_reset (SCpnt, SCSI_RESET_ASYNCHRONOUS))
+ return;
+ case IN_RESET:
+ case (IN_ABORT | IN_RESET):
+ /* This might be controversial, but if there is a bus hang,
+ * you might conceivably want the machine up and running
+ * esp if you have an ide disk.
+ */
+ printk("SCSI host %d channel %d reset (pid %ld) timed out - "
+ "trying harder\n",
+ SCpnt->host->host_no, SCpnt->channel, SCpnt->pid);
+ SCpnt->internal_timeout &= ~IN_RESET;
+ SCpnt->internal_timeout |= IN_RESET2;
+ scsi_reset (SCpnt,
+ SCSI_RESET_ASYNCHRONOUS | SCSI_RESET_SUGGEST_BUS_RESET);
+ return;
+ case IN_RESET2:
+ case (IN_ABORT | IN_RESET2):
+ /* Obviously the bus reset didn't work.
+ * Let's try even harder and call for an HBA reset.
+ * Maybe the HBA itself crashed and this will shake it loose.
+ */
+ printk("SCSI host %d reset (pid %ld) timed out - trying to shake it loose\n",
+ SCpnt->host->host_no, SCpnt->pid);
+ SCpnt->internal_timeout &= ~(IN_RESET | IN_RESET2);
+ SCpnt->internal_timeout |= IN_RESET3;
+ scsi_reset (SCpnt,
+ SCSI_RESET_ASYNCHRONOUS | SCSI_RESET_SUGGEST_HOST_RESET);
+ return;
+
+ default:
+ printk("SCSI host %d reset (pid %ld) timed out again -\n",
+ SCpnt->host->host_no, SCpnt->pid);
+ printk("probably an unrecoverable SCSI bus or device hang.\n");
+ return;
+
+ }
+
+}
+
+
+/* This function takes a quick look at a request, and decides if it
+ * can be queued now, or if there would be a stall while waiting for
+ * something else to finish. This routine assumes that interrupts are
+ * turned off when entering the routine. It is the responsibility
+ * of the calling code to ensure that this is the case.
+ */
+
+Scsi_Cmnd * request_queueable (struct request * req, Scsi_Device * device)
+{
+ Scsi_Cmnd * SCpnt = NULL;
+ int tablesize;
+ Scsi_Cmnd * found = NULL;
+ struct buffer_head * bh, *bhp;
+
+ if (!device)
+ panic ("No device passed to request_queueable().\n");
+
+ if (req && req->rq_status == RQ_INACTIVE)
+ panic("Inactive in request_queueable");
+
+ /*
+ * Look for a free command block. If we have been instructed not to queue
+ * multiple commands to multi-lun devices, then check to see what else is
+ * going for this device first.
+ */
+
+ if (!device->single_lun) {
+ SCpnt = device->device_queue;
+ while(SCpnt){
+ if(SCpnt->request.rq_status == RQ_INACTIVE) break;
+ SCpnt = SCpnt->device_next;
+ }
+ } else {
+ SCpnt = device->host->host_queue;
+ while(SCpnt){
+ if(SCpnt->channel == device->channel
+ && SCpnt->target == device->id) {
+ if (SCpnt->lun == device->lun) {
+ if(found == NULL
+ && SCpnt->request.rq_status == RQ_INACTIVE)
+ {
+ found=SCpnt;
+ }
+ }
+ if(SCpnt->request.rq_status != RQ_INACTIVE) {
+ /*
+ * I think that we should really limit things to one
+ * outstanding command per device - this is what tends
+ * to trip up buggy firmware.
+ */
+ return NULL;
+ }
+ }
+ SCpnt = SCpnt->next;
+ }
+ SCpnt = found;
+ }
+
+ if (!SCpnt) return NULL;
+
+ if (SCSI_BLOCK(device->host)) return NULL;
+
+ if (req) {
+ memcpy(&SCpnt->request, req, sizeof(struct request));
+ tablesize = device->host->sg_tablesize;
+ bhp = bh = req->bh;
+ if(!tablesize) bh = NULL;
+ /* Take a quick look through the table to see how big it is.
+ * We already have our copy of req, so we can mess with that
+ * if we want to.
+ */
+ while(req->nr_sectors && bh){
+ bhp = bhp->b_reqnext;
+ if(!bhp || !CONTIGUOUS_BUFFERS(bh,bhp)) tablesize--;
+ req->nr_sectors -= bh->b_size >> 9;
+ req->sector += bh->b_size >> 9;
+ if(!tablesize) break;
+ bh = bhp;
+ }
+ if(req->nr_sectors && bh && bh->b_reqnext){ /* Any leftovers? */
+ SCpnt->request.bhtail = bh;
+ req->bh = bh->b_reqnext; /* Divide request */
+ bh->b_reqnext = NULL;
+ bh = req->bh;
+
+ /* Now reset things so that req looks OK */
+ SCpnt->request.nr_sectors -= req->nr_sectors;
+ req->current_nr_sectors = bh->b_size >> 9;
+ req->buffer = bh->b_data;
+ SCpnt->request.sem = NULL; /* Wait until whole thing done */
+ } else {
+ req->rq_status = RQ_INACTIVE;
+ wake_up(&wait_for_request);
+ }
+ } else {
+ SCpnt->request.rq_status = RQ_SCSI_BUSY; /* Busy, but no request */
+ SCpnt->request.sem = NULL; /* And no one is waiting for the device
+ * either */
+ }
+
+ SCpnt->use_sg = 0; /* Reset the scatter-gather flag */
+ SCpnt->old_use_sg = 0;
+ SCpnt->transfersize = 0;
+ SCpnt->underflow = 0;
+ SCpnt->cmd_len = 0;
+
+/* Since not everyone seems to set the device info correctly
+ * before Scsi_Cmnd gets send out to scsi_do_command, we do it here.
+ */
+ SCpnt->channel = device->channel;
+ SCpnt->lun = device->lun;
+ SCpnt->target = device->id;
+
+ return SCpnt;
+}
+
+/* This function returns a structure pointer that will be valid for
+ * the device. The wait parameter tells us whether we should wait for
+ * the unit to become free or not. We are also able to tell this routine
+ * not to return a descriptor if the host is unable to accept any more
+ * commands for the time being. We need to keep in mind that there is no
+ * guarantee that the host remain not busy. Keep in mind the
+ * request_queueable function also knows the internal allocation scheme
+ * of the packets for each device
+ */
+
+Scsi_Cmnd * allocate_device (struct request ** reqp, Scsi_Device * device,
+ int wait)
+{
+ kdev_t dev;
+ struct request * req = NULL;
+ int tablesize;
+ unsigned long flags;
+ struct buffer_head * bh, *bhp;
+ struct Scsi_Host * host;
+ Scsi_Cmnd * SCpnt = NULL;
+ Scsi_Cmnd * SCwait = NULL;
+ Scsi_Cmnd * found = NULL;
+
+ if (!device)
+ panic ("No device passed to allocate_device().\n");
+
+ if (reqp) req = *reqp;
+
+ /* See if this request has already been queued by an interrupt routine */
+ if (req) {
+ if(req->rq_status == RQ_INACTIVE) return NULL;
+ dev = req->rq_dev;
+ } else
+ dev = 0; /* unused */
+
+ host = device->host;
+
+ if (intr_count && SCSI_BLOCK(host)) return NULL;
+
+ while (1==1){
+ if (!device->single_lun) {
+ SCpnt = device->device_queue;
+ while(SCpnt){
+ SCwait = SCpnt;
+ if(SCpnt->request.rq_status == RQ_INACTIVE) break;
+ SCpnt = SCpnt->device_next;
+ }
+ } else {
+ SCpnt = device->host->host_queue;
+ while(SCpnt){
+ if(SCpnt->channel == device->channel
+ && SCpnt->target == device->id) {
+ if (SCpnt->lun == device->lun) {
+ SCwait = SCpnt;
+ if(found == NULL
+ && SCpnt->request.rq_status == RQ_INACTIVE)
+ {
+ found=SCpnt;
+ }
+ }
+ if(SCpnt->request.rq_status != RQ_INACTIVE) {
+ /*
+ * I think that we should really limit things to one
+ * outstanding command per device - this is what tends
+ * to trip up buggy firmware.
+ */
+ found = NULL;
+ break;
+ }
+ }
+ SCpnt = SCpnt->next;
+ }
+ SCpnt = found;
+ }
+
+ save_flags(flags);
+ cli();
+ /* See if this request has already been queued by an interrupt routine
+ */
+ if (req && (req->rq_status == RQ_INACTIVE || req->rq_dev != dev)) {
+ restore_flags(flags);
+ return NULL;
+ }
+ if (!SCpnt || SCpnt->request.rq_status != RQ_INACTIVE) /* Might have changed */
+ {
+#if 1 /* NEW CODE */
+ if (wait && SCwait && SCwait->request.rq_status != RQ_INACTIVE){
+ sleep_on(&device->device_wait);
+ restore_flags(flags);
+ } else {
+ restore_flags(flags);
+ if (!wait) return NULL;
+ if (!SCwait) {
+ printk("Attempt to allocate device channel %d,"
+ " target %d, lun %d\n", device->channel,
+ device->id, device->lun);
+ panic("No device found in allocate_device\n");
+ }
+ }
+#else /* ORIGINAL CODE */
+ restore_flags(flags);
+ if(!wait) return NULL;
+ if (!SCwait) {
+ printk("Attempt to allocate device channel %d, target"
+ " %d, lun %d\n", device->channel, device->id,
+ device->lun);
+ panic("No device found in allocate_device\n");
+ }
+ SCSI_SLEEP(&device->device_wait,
+ (SCwait->request.rq_status != RQ_INACTIVE));
+#endif
+ } else {
+ if (req) {
+ memcpy(&SCpnt->request, req, sizeof(struct request));
+ tablesize = device->host->sg_tablesize;
+ bhp = bh = req->bh;
+ if(!tablesize) bh = NULL;
+ /* Take a quick look through the table to see how big it is.
+ * We already have our copy of req, so we can mess with that
+ * if we want to.
+ */
+ while(req->nr_sectors && bh){
+ bhp = bhp->b_reqnext;
+ if(!bhp || !CONTIGUOUS_BUFFERS(bh,bhp)) tablesize--;
+ req->nr_sectors -= bh->b_size >> 9;
+ req->sector += bh->b_size >> 9;
+ if(!tablesize) break;
+ bh = bhp;
+ }
+ if(req->nr_sectors && bh && bh->b_reqnext){/* Any leftovers? */
+ SCpnt->request.bhtail = bh;
+ req->bh = bh->b_reqnext; /* Divide request */
+ bh->b_reqnext = NULL;
+ bh = req->bh;
+ /* Now reset things so that req looks OK */
+ SCpnt->request.nr_sectors -= req->nr_sectors;
+ req->current_nr_sectors = bh->b_size >> 9;
+ req->buffer = bh->b_data;
+ SCpnt->request.sem = NULL; /* Wait until whole thing done*/
+ }
+ else
+ {
+ req->rq_status = RQ_INACTIVE;
+ *reqp = req->next;
+ wake_up(&wait_for_request);
+ }
+ } else {
+ SCpnt->request.rq_status = RQ_SCSI_BUSY;
+ SCpnt->request.sem = NULL; /* And no one is waiting for this
+ * to complete */
+ }
+ restore_flags(flags);
+ break;
+ }
+ }
+
+ SCpnt->use_sg = 0; /* Reset the scatter-gather flag */
+ SCpnt->old_use_sg = 0;
+ SCpnt->transfersize = 0; /* No default transfer size */
+ SCpnt->cmd_len = 0;
+
+ SCpnt->underflow = 0; /* Do not flag underflow conditions */
+
+ /* Since not everyone seems to set the device info correctly
+ * before Scsi_Cmnd gets send out to scsi_do_command, we do it here.
+ */
+ SCpnt->channel = device->channel;
+ SCpnt->lun = device->lun;
+ SCpnt->target = device->id;
+
+ return SCpnt;
+}
+
+/*
+ * This is inline because we have stack problemes if we recurse to deeply.
+ */
+
+inline void internal_cmnd (Scsi_Cmnd * SCpnt)
+{
+ unsigned long flags, timeout;
+ struct Scsi_Host * host;
+#ifdef DEBUG_DELAY
+ unsigned long clock;
+#endif
+
+#if DEBUG
+ unsigned long *ret = 0;
+#ifdef __mips__
+ __asm__ __volatile__ ("move\t%0,$31":"=r"(ret));
+#else
+ ret = __builtin_return_address(0);
+#endif
+#endif
+
+ host = SCpnt->host;
+
+ save_flags(flags);
+ cli();
+ /* Assign a unique nonzero serial_number. */
+ if (++serial_number == 0) serial_number = 1;
+ SCpnt->serial_number = serial_number;
+
+ /*
+ * We will wait MIN_RESET_DELAY clock ticks after the last reset so
+ * we can avoid the drive not being ready.
+ */
+ timeout = host->last_reset + MIN_RESET_DELAY;
+ if (jiffies < timeout) {
+ int ticks_remaining = timeout - jiffies;
+ /*
+ * NOTE: This may be executed from within an interrupt
+ * handler! This is bad, but for now, it'll do. The irq
+ * level of the interrupt handler has been masked out by the
+ * platform dependent interrupt handling code already, so the
+ * sti() here will not cause another call to the SCSI host's
+ * interrupt handler (assuming there is one irq-level per
+ * host).
+ */
+ sti();
+ while (--ticks_remaining >= 0) udelay(1000000/HZ);
+ host->last_reset = jiffies - MIN_RESET_DELAY;
+ }
+ restore_flags(flags);
+
+ update_timeout(SCpnt, SCpnt->timeout_per_command);
+
+ /*
+ * We will use a queued command if possible, otherwise we will emulate the
+ * queuing and calling of completion function ourselves.
+ */
+#ifdef DEBUG
+ printk("internal_cmnd (host = %d, channel = %d, target = %d, "
+ "command = %p, buffer = %p, \nbufflen = %d, done = %p)\n",
+ SCpnt->host->host_no, SCpnt->channel, SCpnt->target, SCpnt->cmnd,
+ SCpnt->buffer, SCpnt->bufflen, SCpnt->done);
+#endif
+
+ if (host->can_queue)
+ {
+#ifdef DEBUG
+ printk("queuecommand : routine at %p\n",
+ host->hostt->queuecommand);
+#endif
+ /* This locking tries to prevent all sorts of races between
+ * queuecommand and the interrupt code. In effect,
+ * we are only allowed to be in queuecommand once at
+ * any given time, and we can only be in the interrupt
+ * handler and the queuecommand function at the same time
+ * when queuecommand is called while servicing the
+ * interrupt.
+ */
+
+ if(!intr_count && SCpnt->host->irq)
+ disable_irq(SCpnt->host->irq);
+
+ host->hostt->queuecommand (SCpnt, scsi_done);
+
+ if(!intr_count && SCpnt->host->irq)
+ enable_irq(SCpnt->host->irq);
+ }
+ else
+ {
+ int temp;
+
+#ifdef DEBUG
+ printk("command() : routine at %p\n", host->hostt->command);
+#endif
+ temp = host->hostt->command (SCpnt);
+ SCpnt->result = temp;
+#ifdef DEBUG_DELAY
+ clock = jiffies + 4 * HZ;
+ while (jiffies < clock) barrier();
+ printk("done(host = %d, result = %04x) : routine at %p\n",
+ host->host_no, temp, host->hostt->command);
+#endif
+ scsi_done(SCpnt);
+ }
+#ifdef DEBUG
+ printk("leaving internal_cmnd()\n");
+#endif
+}
+
+static void scsi_request_sense (Scsi_Cmnd * SCpnt)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ SCpnt->flags |= WAS_SENSE | ASKED_FOR_SENSE;
+ update_timeout(SCpnt, SENSE_TIMEOUT);
+ restore_flags(flags);
+
+
+ memcpy ((void *) SCpnt->cmnd , (void *) generic_sense,
+ sizeof(generic_sense));
+
+ SCpnt->cmnd[1] = SCpnt->lun << 5;
+ SCpnt->cmnd[4] = sizeof(SCpnt->sense_buffer);
+
+ SCpnt->request_buffer = &SCpnt->sense_buffer;
+ SCpnt->request_bufflen = sizeof(SCpnt->sense_buffer);
+ SCpnt->use_sg = 0;
+ SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
+ internal_cmnd (SCpnt);
+}
+
+
+
+/*
+ * scsi_do_cmd sends all the commands out to the low-level driver. It
+ * handles the specifics required for each low level driver - ie queued
+ * or non queued. It also prevents conflicts when different high level
+ * drivers go for the same host at the same time.
+ */
+
+void scsi_do_cmd (Scsi_Cmnd * SCpnt, const void *cmnd ,
+ void *buffer, unsigned bufflen, void (*done)(Scsi_Cmnd *),
+ int timeout, int retries)
+{
+ unsigned long flags;
+ struct Scsi_Host * host = SCpnt->host;
+
+#ifdef DEBUG
+ {
+ int i;
+ int target = SCpnt->target;
+ printk ("scsi_do_cmd (host = %d, channel = %d target = %d, "
+ "buffer =%p, bufflen = %d, done = %p, timeout = %d, "
+ "retries = %d)\n"
+ "command : " , host->host_no, SCpnt->channel, target, buffer,
+ bufflen, done, timeout, retries);
+ for (i = 0; i < 10; ++i)
+ printk ("%02x ", ((unsigned char *) cmnd)[i]);
+ printk("\n");
+ }
+#endif
+
+ if (!host)
+ {
+ panic ("Invalid or not present host.\n");
+ }
+
+
+ /*
+ * We must prevent reentrancy to the lowlevel host driver. This prevents
+ * it - we enter a loop until the host we want to talk to is not busy.
+ * Race conditions are prevented, as interrupts are disabled in between the
+ * time we check for the host being not busy, and the time we mark it busy
+ * ourselves.
+ */
+
+ save_flags(flags);
+ cli();
+ SCpnt->pid = scsi_pid++;
+
+ while (SCSI_BLOCK(host)) {
+ restore_flags(flags);
+ SCSI_SLEEP(&host->host_wait, SCSI_BLOCK(host));
+ cli();
+ }
+
+ if (host->block) host_active = host;
+
+ host->host_busy++;
+ restore_flags(flags);
+
+ /*
+ * Our own function scsi_done (which marks the host as not busy, disables
+ * the timeout counter, etc) will be called by us or by the
+ * scsi_hosts[host].queuecommand() function needs to also call
+ * the completion function for the high level driver.
+ */
+
+ memcpy ((void *) SCpnt->data_cmnd , (const void *) cmnd, 12);
+#if 0
+ SCpnt->host = host;
+ SCpnt->channel = channel;
+ SCpnt->target = target;
+ SCpnt->lun = (SCpnt->data_cmnd[1] >> 5);
+#endif
+ SCpnt->reset_chain = NULL;
+ SCpnt->serial_number = 0;
+ SCpnt->bufflen = bufflen;
+ SCpnt->buffer = buffer;
+ SCpnt->flags = 0;
+ SCpnt->retries = 0;
+ SCpnt->allowed = retries;
+ SCpnt->done = done;
+ SCpnt->timeout_per_command = timeout;
+
+ memcpy ((void *) SCpnt->cmnd , (const void *) cmnd, 12);
+ /* Zero the sense buffer. Some host adapters automatically request
+ * sense on error. 0 is not a valid sense code.
+ */
+ memset ((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
+ SCpnt->request_buffer = buffer;
+ SCpnt->request_bufflen = bufflen;
+ SCpnt->old_use_sg = SCpnt->use_sg;
+ if (SCpnt->cmd_len == 0)
+ SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
+ SCpnt->old_cmd_len = SCpnt->cmd_len;
+
+ /* Start the timer ticking. */
+
+ SCpnt->internal_timeout = NORMAL_TIMEOUT;
+ SCpnt->abort_reason = 0;
+ internal_cmnd (SCpnt);
+
+#ifdef DEBUG
+ printk ("Leaving scsi_do_cmd()\n");
+#endif
+}
+
+static int check_sense (Scsi_Cmnd * SCpnt)
+{
+ /* If there is no sense information, request it. If we have already
+ * requested it, there is no point in asking again - the firmware must
+ * be confused.
+ */
+ if (((SCpnt->sense_buffer[0] & 0x70) >> 4) != 7) {
+ if(!(SCpnt->flags & ASKED_FOR_SENSE))
+ return SUGGEST_SENSE;
+ else
+ return SUGGEST_RETRY;
+ }
+
+ SCpnt->flags &= ~ASKED_FOR_SENSE;
+
+#ifdef DEBUG_INIT
+ printk("scsi%d, channel%d : ", SCpnt->host->host_no, SCpnt->channel);
+ print_sense("", SCpnt);
+ printk("\n");
+#endif
+ if (SCpnt->sense_buffer[2] & 0xe0)
+ return SUGGEST_ABORT;
+
+ switch (SCpnt->sense_buffer[2] & 0xf)
+ {
+ case NO_SENSE:
+ return 0;
+ case RECOVERED_ERROR:
+ return SUGGEST_IS_OK;
+
+ case ABORTED_COMMAND:
+ return SUGGEST_RETRY;
+ case NOT_READY:
+ case UNIT_ATTENTION:
+ /*
+ * If we are expecting a CC/UA because of a bus reset that we
+ * performed, treat this just as a retry. Otherwise this is
+ * information that we should pass up to the upper-level driver
+ * so that we can deal with it there.
+ */
+ if( SCpnt->device->expecting_cc_ua )
+ {
+ SCpnt->device->expecting_cc_ua = 0;
+ return SUGGEST_RETRY;
+ }
+ return SUGGEST_ABORT;
+
+ /* these three are not supported */
+ case COPY_ABORTED:
+ case VOLUME_OVERFLOW:
+ case MISCOMPARE:
+
+ case MEDIUM_ERROR:
+ return SUGGEST_REMAP;
+ case BLANK_CHECK:
+ case DATA_PROTECT:
+ case HARDWARE_ERROR:
+ case ILLEGAL_REQUEST:
+ default:
+ return SUGGEST_ABORT;
+ }
+}
+
+/* This function is the mid-level interrupt routine, which decides how
+ * to handle error conditions. Each invocation of this function must
+ * do one and *only* one of the following:
+ *
+ * (1) Call last_cmnd[host].done. This is done for fatal errors and
+ * normal completion, and indicates that the handling for this
+ * request is complete.
+ * (2) Call internal_cmnd to requeue the command. This will result in
+ * scsi_done being called again when the retry is complete.
+ * (3) Call scsi_request_sense. This asks the host adapter/drive for
+ * more information about the error condition. When the information
+ * is available, scsi_done will be called again.
+ * (4) Call reset(). This is sort of a last resort, and the idea is that
+ * this may kick things loose and get the drive working again. reset()
+ * automatically calls scsi_request_sense, and thus scsi_done will be
+ * called again once the reset is complete.
+ *
+ * If none of the above actions are taken, the drive in question
+ * will hang. If more than one of the above actions are taken by
+ * scsi_done, then unpredictable behavior will result.
+ */
+static void scsi_done (Scsi_Cmnd * SCpnt)
+{
+ int status=0;
+ int exit=0;
+ int checked;
+ int oldto;
+ struct Scsi_Host * host = SCpnt->host;
+ int result = SCpnt->result;
+ SCpnt->serial_number = 0;
+ oldto = update_timeout(SCpnt, 0);
+
+#ifdef DEBUG_TIMEOUT
+ if(result) printk("Non-zero result in scsi_done %x %d:%d\n",
+ result, SCpnt->target, SCpnt->lun);
+#endif
+
+ /* If we requested an abort, (and we got it) then fix up the return
+ * status to say why
+ */
+ if(host_byte(result) == DID_ABORT && SCpnt->abort_reason)
+ SCpnt->result = result = (result & 0xff00ffff) |
+ (SCpnt->abort_reason << 16);
+
+
+#define FINISHED 0
+#define MAYREDO 1
+#define REDO 3
+#define PENDING 4
+
+#ifdef DEBUG
+ printk("In scsi_done(host = %d, result = %06x)\n", host->host_no, result);
+#endif
+
+ if(SCpnt->flags & WAS_SENSE)
+ {
+ SCpnt->use_sg = SCpnt->old_use_sg;
+ SCpnt->cmd_len = SCpnt->old_cmd_len;
+ }
+
+ switch (host_byte(result))
+ {
+ case DID_OK:
+ if (status_byte(result) && (SCpnt->flags & WAS_SENSE))
+ /* Failed to obtain sense information */
+ {
+ SCpnt->flags &= ~WAS_SENSE;
+#if 0 /* This cannot possibly be correct. */
+ SCpnt->internal_timeout &= ~SENSE_TIMEOUT;
+#endif
+
+ if (!(SCpnt->flags & WAS_RESET))
+ {
+ printk("scsi%d : channel %d target %d lun %d request sense"
+ " failed, performing reset.\n",
+ SCpnt->host->host_no, SCpnt->channel, SCpnt->target,
+ SCpnt->lun);
+ scsi_reset(SCpnt, SCSI_RESET_SYNCHRONOUS);
+ return;
+ }
+ else
+ {
+ exit = (DRIVER_HARD | SUGGEST_ABORT);
+ status = FINISHED;
+ }
+ }
+ else switch(msg_byte(result))
+ {
+ case COMMAND_COMPLETE:
+ switch (status_byte(result))
+ {
+ case GOOD:
+ if (SCpnt->flags & WAS_SENSE)
+ {
+#ifdef DEBUG
+ printk ("In scsi_done, GOOD status, COMMAND COMPLETE, "
+ "parsing sense information.\n");
+#endif
+ SCpnt->flags &= ~WAS_SENSE;
+#if 0 /* This cannot possibly be correct. */
+ SCpnt->internal_timeout &= ~SENSE_TIMEOUT;
+#endif
+
+ switch (checked = check_sense(SCpnt))
+ {
+ case SUGGEST_SENSE:
+ case 0:
+#ifdef DEBUG
+ printk("NO SENSE. status = REDO\n");
+#endif
+ update_timeout(SCpnt, oldto);
+ status = REDO;
+ break;
+ case SUGGEST_IS_OK:
+ break;
+ case SUGGEST_REMAP:
+#ifdef DEBUG
+ printk("SENSE SUGGEST REMAP - status = FINISHED\n");
+#endif
+ status = FINISHED;
+ exit = DRIVER_SENSE | SUGGEST_ABORT;
+ break;
+ case SUGGEST_RETRY:
+#ifdef DEBUG
+ printk("SENSE SUGGEST RETRY - status = MAYREDO\n");
+#endif
+ status = MAYREDO;
+ exit = DRIVER_SENSE | SUGGEST_RETRY;
+ break;
+ case SUGGEST_ABORT:
+#ifdef DEBUG
+ printk("SENSE SUGGEST ABORT - status = FINISHED");
+#endif
+ status = FINISHED;
+ exit = DRIVER_SENSE | SUGGEST_ABORT;
+ break;
+ default:
+ printk ("Internal error %s %d \n", __FILE__,
+ __LINE__);
+ }
+ } /* end WAS_SENSE */
+ else
+ {
+#ifdef DEBUG
+ printk("COMMAND COMPLETE message returned, "
+ "status = FINISHED. \n");
+#endif
+ exit = DRIVER_OK;
+ status = FINISHED;
+ }
+ break;
+
+ case CHECK_CONDITION:
+ case COMMAND_TERMINATED:
+ switch (check_sense(SCpnt))
+ {
+ case 0:
+ update_timeout(SCpnt, oldto);
+ status = REDO;
+ break;
+ case SUGGEST_REMAP:
+ status = FINISHED;
+ exit = DRIVER_SENSE | SUGGEST_ABORT;
+ break;
+ case SUGGEST_RETRY:
+ status = MAYREDO;
+ exit = DRIVER_SENSE | SUGGEST_RETRY;
+ break;
+ case SUGGEST_ABORT:
+ status = FINISHED;
+ exit = DRIVER_SENSE | SUGGEST_ABORT;
+ break;
+ case SUGGEST_SENSE:
+ scsi_request_sense (SCpnt);
+ status = PENDING;
+ break;
+ }
+ break;
+
+ case CONDITION_GOOD:
+ case INTERMEDIATE_GOOD:
+ case INTERMEDIATE_C_GOOD:
+ break;
+
+ case BUSY:
+ case QUEUE_FULL:
+ update_timeout(SCpnt, oldto);
+ status = REDO;
+ break;
+
+ case RESERVATION_CONFLICT:
+ printk("scsi%d, channel %d : RESERVATION CONFLICT performing"
+ " reset.\n", SCpnt->host->host_no, SCpnt->channel);
+ scsi_reset(SCpnt, SCSI_RESET_SYNCHRONOUS);
+ return;
+#if 0
+ exit = DRIVER_SOFT | SUGGEST_ABORT;
+ status = MAYREDO;
+ break;
+#endif
+ default:
+ printk ("Internal error %s %d \n"
+ "status byte = %d \n", __FILE__,
+ __LINE__, status_byte(result));
+
+ }
+ break;
+ default:
+ panic("scsi: unsupported message byte %d received\n",
+ msg_byte(result));
+ }
+ break;
+ case DID_TIME_OUT:
+#ifdef DEBUG
+ printk("Host returned DID_TIME_OUT - ");
+#endif
+
+ if (SCpnt->flags & WAS_TIMEDOUT)
+ {
+#ifdef DEBUG
+ printk("Aborting\n");
+#endif
+ /*
+ Allow TEST_UNIT_READY and INQUIRY commands to timeout early
+ without causing resets. All other commands should be retried.
+ */
+ if (SCpnt->cmnd[0] != TEST_UNIT_READY &&
+ SCpnt->cmnd[0] != INQUIRY)
+ status = MAYREDO;
+ exit = (DRIVER_TIMEOUT | SUGGEST_ABORT);
+ }
+ else
+ {
+#ifdef DEBUG
+ printk ("Retrying.\n");
+#endif
+ SCpnt->flags |= WAS_TIMEDOUT;
+ SCpnt->internal_timeout &= ~IN_ABORT;
+ status = REDO;
+ }
+ break;
+ case DID_BUS_BUSY:
+ case DID_PARITY:
+ status = REDO;
+ break;
+ case DID_NO_CONNECT:
+#ifdef DEBUG
+ printk("Couldn't connect.\n");
+#endif
+ exit = (DRIVER_HARD | SUGGEST_ABORT);
+ break;
+ case DID_ERROR:
+ status = MAYREDO;
+ exit = (DRIVER_HARD | SUGGEST_ABORT);
+ break;
+ case DID_BAD_TARGET:
+ case DID_ABORT:
+ exit = (DRIVER_INVALID | SUGGEST_ABORT);
+ break;
+ case DID_RESET:
+ if (SCpnt->flags & IS_RESETTING)
+ {
+ SCpnt->flags &= ~IS_RESETTING;
+ status = REDO;
+ break;
+ }
+
+ if(msg_byte(result) == GOOD &&
+ status_byte(result) == CHECK_CONDITION) {
+ switch (check_sense(SCpnt)) {
+ case 0:
+ update_timeout(SCpnt, oldto);
+ status = REDO;
+ break;
+ case SUGGEST_REMAP:
+ case SUGGEST_RETRY:
+ status = MAYREDO;
+ exit = DRIVER_SENSE | SUGGEST_RETRY;
+ break;
+ case SUGGEST_ABORT:
+ status = FINISHED;
+ exit = DRIVER_SENSE | SUGGEST_ABORT;
+ break;
+ case SUGGEST_SENSE:
+ scsi_request_sense (SCpnt);
+ status = PENDING;
+ break;
+ }
+ } else {
+ status=REDO;
+ exit = SUGGEST_RETRY;
+ }
+ break;
+ default :
+ exit = (DRIVER_ERROR | SUGGEST_DIE);
+ }
+
+ switch (status)
+ {
+ case FINISHED:
+ case PENDING:
+ break;
+ case MAYREDO:
+#ifdef DEBUG
+ printk("In MAYREDO, allowing %d retries, have %d\n",
+ SCpnt->allowed, SCpnt->retries);
+#endif
+ if ((++SCpnt->retries) < SCpnt->allowed)
+ {
+ if ((SCpnt->retries >= (SCpnt->allowed >> 1))
+ && !(SCpnt->host->last_reset > 0 &&
+ jiffies < SCpnt->host->last_reset + MIN_RESET_PERIOD)
+ && !(SCpnt->flags & WAS_RESET))
+ {
+ printk("scsi%d channel %d : resetting for second half of retries.\n",
+ SCpnt->host->host_no, SCpnt->channel);
+ scsi_reset(SCpnt, SCSI_RESET_SYNCHRONOUS);
+ break;
+ }
+
+ }
+ else
+ {
+ status = FINISHED;
+ break;
+ }
+ /* fall through to REDO */
+
+ case REDO:
+
+ if (SCpnt->flags & WAS_SENSE)
+ scsi_request_sense(SCpnt);
+ else
+ {
+ memcpy ((void *) SCpnt->cmnd,
+ (void*) SCpnt->data_cmnd,
+ sizeof(SCpnt->data_cmnd));
+ SCpnt->request_buffer = SCpnt->buffer;
+ SCpnt->request_bufflen = SCpnt->bufflen;
+ SCpnt->use_sg = SCpnt->old_use_sg;
+ SCpnt->cmd_len = SCpnt->old_cmd_len;
+ internal_cmnd (SCpnt);
+ }
+ break;
+ default:
+ INTERNAL_ERROR;
+ }
+
+ if (status == FINISHED) {
+#ifdef DEBUG
+ printk("Calling done function - at address %p\n", SCpnt->done);
+#endif
+ host->host_busy--; /* Indicate that we are free */
+
+ if (host->block && host->host_busy == 0) {
+ host_active = NULL;
+
+ /* For block devices "wake_up" is done in end_scsi_request */
+ if (MAJOR(SCpnt->request.rq_dev) != SCSI_DISK_MAJOR &&
+ MAJOR(SCpnt->request.rq_dev) != SCSI_CDROM_MAJOR) {
+ struct Scsi_Host * next;
+
+ for (next = host->block; next != host; next = next->block)
+ wake_up(&next->host_wait);
+ }
+
+ }
+
+ wake_up(&host->host_wait);
+ SCpnt->result = result | ((exit & 0xff) << 24);
+ SCpnt->use_sg = SCpnt->old_use_sg;
+ SCpnt->cmd_len = SCpnt->old_cmd_len;
+ SCpnt->done (SCpnt);
+ }
+
+#undef FINISHED
+#undef REDO
+#undef MAYREDO
+#undef PENDING
+}
+
+/*
+ * The scsi_abort function interfaces with the abort() function of the host
+ * we are aborting, and causes the current command to not complete. The
+ * caller should deal with any error messages or status returned on the
+ * next call.
+ *
+ * This will not be called reentrantly for a given host.
+ */
+
+/*
+ * Since we're nice guys and specified that abort() and reset()
+ * can be non-reentrant. The internal_timeout flags are used for
+ * this.
+ */
+
+
+int scsi_abort (Scsi_Cmnd * SCpnt, int why)
+{
+ int oldto;
+ unsigned long flags;
+ struct Scsi_Host * host = SCpnt->host;
+
+ while(1)
+ {
+ save_flags(flags);
+ cli();
+
+ /*
+ * Protect against races here. If the command is done, or we are
+ * on a different command forget it.
+ */
+ if (SCpnt->serial_number != SCpnt->serial_number_at_timeout) {
+ restore_flags(flags);
+ return 0;
+ }
+
+ if (SCpnt->internal_timeout & IN_ABORT)
+ {
+ restore_flags(flags);
+ while (SCpnt->internal_timeout & IN_ABORT)
+ barrier();
+ }
+ else
+ {
+ SCpnt->internal_timeout |= IN_ABORT;
+ oldto = update_timeout(SCpnt, ABORT_TIMEOUT);
+
+ if ((SCpnt->flags & IS_RESETTING) && SCpnt->device->soft_reset) {
+ /* OK, this command must have died when we did the
+ * reset. The device itself must have lied.
+ */
+ printk("Stale command on %d %d:%d appears to have died when"
+ " the bus was reset\n",
+ SCpnt->channel, SCpnt->target, SCpnt->lun);
+ }
+
+ restore_flags(flags);
+ if (!host->host_busy) {
+ SCpnt->internal_timeout &= ~IN_ABORT;
+ update_timeout(SCpnt, oldto);
+ return 0;
+ }
+ printk("scsi : aborting command due to timeout : pid %lu, scsi%d,"
+ " channel %d, id %d, lun %d ",
+ SCpnt->pid, SCpnt->host->host_no, (int) SCpnt->channel,
+ (int) SCpnt->target, (int) SCpnt->lun);
+ print_command (SCpnt->cmnd);
+ if (SCpnt->serial_number != SCpnt->serial_number_at_timeout)
+ return 0;
+ SCpnt->abort_reason = why;
+ switch(host->hostt->abort(SCpnt)) {
+ /* We do not know how to abort. Try waiting another
+ * time increment and see if this helps. Set the
+ * WAS_TIMEDOUT flag set so we do not try this twice
+ */
+ case SCSI_ABORT_BUSY: /* Tough call - returning 1 from
+ * this is too severe
+ */
+ case SCSI_ABORT_SNOOZE:
+ if(why == DID_TIME_OUT) {
+ save_flags(flags);
+ cli();
+ SCpnt->internal_timeout &= ~IN_ABORT;
+ if(SCpnt->flags & WAS_TIMEDOUT) {
+ restore_flags(flags);
+ return 1; /* Indicate we cannot handle this.
+ * We drop down into the reset handler
+ * and try again
+ */
+ } else {
+ SCpnt->flags |= WAS_TIMEDOUT;
+ oldto = SCpnt->timeout_per_command;
+ update_timeout(SCpnt, oldto);
+ }
+ restore_flags(flags);
+ }
+ return 0;
+ case SCSI_ABORT_PENDING:
+ if(why != DID_TIME_OUT) {
+ save_flags(flags);
+ cli();
+ update_timeout(SCpnt, oldto);
+ restore_flags(flags);
+ }
+ return 0;
+ case SCSI_ABORT_SUCCESS:
+ /* We should have already aborted this one. No
+ * need to adjust timeout
+ */
+ SCpnt->internal_timeout &= ~IN_ABORT;
+ return 0;
+ case SCSI_ABORT_NOT_RUNNING:
+ SCpnt->internal_timeout &= ~IN_ABORT;
+ update_timeout(SCpnt, 0);
+ return 0;
+ case SCSI_ABORT_ERROR:
+ default:
+ SCpnt->internal_timeout &= ~IN_ABORT;
+ return 1;
+ }
+ }
+ }
+}
+
+
+/* Mark a single SCSI Device as having been reset. */
+
+static inline void scsi_mark_device_reset(Scsi_Device *Device)
+{
+ Device->was_reset = 1;
+ Device->expecting_cc_ua = 1;
+}
+
+
+/* Mark all SCSI Devices on a specific Host as having been reset. */
+
+void scsi_mark_host_reset(struct Scsi_Host *Host)
+{
+ Scsi_Cmnd *SCpnt;
+ for (SCpnt = Host->host_queue; SCpnt; SCpnt = SCpnt->next)
+ scsi_mark_device_reset(SCpnt->device);
+}
+
+
+/* Mark all SCSI Devices on a specific Host Bus as having been reset. */
+
+void scsi_mark_bus_reset(struct Scsi_Host *Host, int channel)
+{
+ Scsi_Cmnd *SCpnt;
+ for (SCpnt = Host->host_queue; SCpnt; SCpnt = SCpnt->next)
+ if (SCpnt->channel == channel)
+ scsi_mark_device_reset(SCpnt->device);
+}
+
+
+int scsi_reset (Scsi_Cmnd * SCpnt, unsigned int reset_flags)
+{
+ int temp;
+ unsigned long flags;
+ Scsi_Cmnd * SCpnt1;
+ struct Scsi_Host * host = SCpnt->host;
+
+ printk("SCSI bus is being reset for host %d channel %d.\n",
+ host->host_no, SCpnt->channel);
+
+#if 0
+ /*
+ * First of all, we need to make a recommendation to the low-level
+ * driver as to whether a BUS_DEVICE_RESET should be performed,
+ * or whether we should do a full BUS_RESET. There is no simple
+ * algorithm here - we basically use a series of heuristics
+ * to determine what we should do.
+ */
+ SCpnt->host->suggest_bus_reset = FALSE;
+
+ /*
+ * First see if all of the active devices on the bus have
+ * been jammed up so that we are attempting resets. If so,
+ * then suggest a bus reset. Forcing a bus reset could
+ * result in some race conditions, but no more than
+ * you would usually get with timeouts. We will cross
+ * that bridge when we come to it.
+ *
+ * This is actually a pretty bad idea, since a sequence of
+ * commands will often timeout together and this will cause a
+ * Bus Device Reset followed immediately by a SCSI Bus Reset.
+ * If all of the active devices really are jammed up, the
+ * Bus Device Reset will quickly timeout and scsi_times_out
+ * will follow up with a SCSI Bus Reset anyway.
+ */
+ SCpnt1 = host->host_queue;
+ while(SCpnt1) {
+ if( SCpnt1->request.rq_status != RQ_INACTIVE
+ && (SCpnt1->flags & (WAS_RESET | IS_RESETTING)) == 0 )
+ break;
+ SCpnt1 = SCpnt1->next;
+ }
+ if( SCpnt1 == NULL ) {
+ reset_flags |= SCSI_RESET_SUGGEST_BUS_RESET;
+ }
+
+ /*
+ * If the code that called us is suggesting a hard reset, then
+ * definitely request it. This usually occurs because a
+ * BUS_DEVICE_RESET times out.
+ *
+ * Passing reset_flags along takes care of this automatically.
+ */
+ if( reset_flags & SCSI_RESET_SUGGEST_BUS_RESET ) {
+ SCpnt->host->suggest_bus_reset = TRUE;
+ }
+#endif
+
+ while (1) {
+ save_flags(flags);
+ cli();
+
+ /*
+ * Protect against races here. If the command is done, or we are
+ * on a different command forget it.
+ */
+ if (reset_flags & SCSI_RESET_ASYNCHRONOUS)
+ if (SCpnt->serial_number != SCpnt->serial_number_at_timeout) {
+ restore_flags(flags);
+ return 0;
+ }
+
+ if (SCpnt->internal_timeout & IN_RESET)
+ {
+ restore_flags(flags);
+ while (SCpnt->internal_timeout & IN_RESET)
+ barrier();
+ }
+ else
+ {
+ SCpnt->internal_timeout |= IN_RESET;
+ update_timeout(SCpnt, RESET_TIMEOUT);
+
+ if (host->host_busy)
+ {
+ restore_flags(flags);
+ SCpnt1 = host->host_queue;
+ while(SCpnt1) {
+ if (SCpnt1->request.rq_status != RQ_INACTIVE) {
+#if 0
+ if (!(SCpnt1->flags & IS_RESETTING) &&
+ !(SCpnt1->internal_timeout & IN_ABORT))
+ scsi_abort(SCpnt1, DID_RESET);
+#endif
+ SCpnt1->flags |= (WAS_RESET | IS_RESETTING);
+ }
+ SCpnt1 = SCpnt1->next;
+ }
+
+ host->last_reset = jiffies;
+ temp = host->hostt->reset(SCpnt, reset_flags);
+ /*
+ This test allows the driver to introduce an additional bus
+ settle time delay by setting last_reset up to 20 seconds in
+ the future. In the normal case where the driver does not
+ modify last_reset, it must be assumed that the actual bus
+ reset occurred immediately prior to the return to this code,
+ and so last_reset must be updated to the current time, so
+ that the delay in internal_cmnd will guarantee at least a
+ MIN_RESET_DELAY bus settle time.
+ */
+ if ((host->last_reset < jiffies) ||
+ (host->last_reset > (jiffies + 20 * HZ)))
+ host->last_reset = jiffies;
+ }
+ else
+ {
+ if (!host->block) host->host_busy++;
+ restore_flags(flags);
+ host->last_reset = jiffies;
+ SCpnt->flags |= (WAS_RESET | IS_RESETTING);
+ temp = host->hostt->reset(SCpnt, reset_flags);
+ if ((host->last_reset < jiffies) ||
+ (host->last_reset > (jiffies + 20 * HZ)))
+ host->last_reset = jiffies;
+ if (!host->block) host->host_busy--;
+ }
+
+#ifdef DEBUG
+ printk("scsi reset function returned %d\n", temp);
+#endif
+
+ /*
+ * Now figure out what we need to do, based upon
+ * what the low level driver said that it did.
+ * If the result is SCSI_RESET_SUCCESS, SCSI_RESET_PENDING,
+ * or SCSI_RESET_WAKEUP, then the low level driver did a
+ * bus device reset or bus reset, so we should go through
+ * and mark one or all of the devices on that bus
+ * as having been reset.
+ */
+ switch(temp & SCSI_RESET_ACTION) {
+ case SCSI_RESET_SUCCESS:
+ if (temp & SCSI_RESET_HOST_RESET)
+ scsi_mark_host_reset(host);
+ else if (temp & SCSI_RESET_BUS_RESET)
+ scsi_mark_bus_reset(host, SCpnt->channel);
+ else scsi_mark_device_reset(SCpnt->device);
+ save_flags(flags);
+ cli();
+ SCpnt->internal_timeout &= ~(IN_RESET|IN_RESET2|IN_RESET3);
+ restore_flags(flags);
+ return 0;
+ case SCSI_RESET_PENDING:
+ if (temp & SCSI_RESET_HOST_RESET)
+ scsi_mark_host_reset(host);
+ else if (temp & SCSI_RESET_BUS_RESET)
+ scsi_mark_bus_reset(host, SCpnt->channel);
+ else scsi_mark_device_reset(SCpnt->device);
+ case SCSI_RESET_NOT_RUNNING:
+ return 0;
+ case SCSI_RESET_PUNT:
+ SCpnt->internal_timeout &= ~(IN_RESET|IN_RESET2|IN_RESET3);
+ scsi_request_sense (SCpnt);
+ return 0;
+ case SCSI_RESET_WAKEUP:
+ if (temp & SCSI_RESET_HOST_RESET)
+ scsi_mark_host_reset(host);
+ else if (temp & SCSI_RESET_BUS_RESET)
+ scsi_mark_bus_reset(host, SCpnt->channel);
+ else scsi_mark_device_reset(SCpnt->device);
+ SCpnt->internal_timeout &= ~(IN_RESET|IN_RESET2|IN_RESET3);
+ scsi_request_sense (SCpnt);
+ /*
+ * If a bus reset was performed, we
+ * need to wake up each and every command
+ * that was active on the bus or if it was a HBA
+ * reset all active commands on all channels
+ */
+ if( temp & SCSI_RESET_HOST_RESET )
+ {
+ SCpnt1 = host->host_queue;
+ while(SCpnt1) {
+ if (SCpnt1->request.rq_status != RQ_INACTIVE
+ && SCpnt1 != SCpnt)
+ scsi_request_sense (SCpnt1);
+ SCpnt1 = SCpnt1->next;
+ }
+ } else if( temp & SCSI_RESET_BUS_RESET ) {
+ SCpnt1 = host->host_queue;
+ while(SCpnt1) {
+ if(SCpnt1->request.rq_status != RQ_INACTIVE
+ && SCpnt1 != SCpnt
+ && SCpnt1->channel == SCpnt->channel)
+ scsi_request_sense (SCpnt);
+ SCpnt1 = SCpnt1->next;
+ }
+ }
+ return 0;
+ case SCSI_RESET_SNOOZE:
+ /* In this case, we set the timeout field to 0
+ * so that this command does not time out any more,
+ * and we return 1 so that we get a message on the
+ * screen.
+ */
+ save_flags(flags);
+ cli();
+ SCpnt->internal_timeout &= ~(IN_RESET|IN_RESET2|IN_RESET3);
+ update_timeout(SCpnt, 0);
+ restore_flags(flags);
+ /* If you snooze, you lose... */
+ case SCSI_RESET_ERROR:
+ default:
+ return 1;
+ }
+
+ return temp;
+ }
+ }
+}
+
+
+static void scsi_main_timeout(void)
+{
+ /*
+ * We must not enter update_timeout with a timeout condition still pending.
+ */
+
+ int timed_out;
+ unsigned long flags;
+ struct Scsi_Host * host;
+ Scsi_Cmnd * SCpnt = NULL;
+
+ save_flags(flags);
+ cli();
+
+ update_timeout(NULL, 0);
+
+ /*
+ * Find all timers such that they have 0 or negative (shouldn't happen)
+ * time remaining on them.
+ */
+ timed_out = 0;
+ for (host = scsi_hostlist; host; host = host->next) {
+ for (SCpnt = host->host_queue; SCpnt; SCpnt = SCpnt->next)
+ if (SCpnt->timeout == -1)
+ {
+ SCpnt->timeout = 0;
+ SCpnt->serial_number_at_timeout = SCpnt->serial_number;
+ ++timed_out;
+ }
+ }
+ if (timed_out > 0) {
+ for (host = scsi_hostlist; host; host = host->next) {
+ for (SCpnt = host->host_queue; SCpnt; SCpnt = SCpnt->next)
+ if (SCpnt->serial_number_at_timeout > 0 &&
+ SCpnt->serial_number_at_timeout == SCpnt->serial_number)
+ {
+ restore_flags(flags);
+ scsi_times_out(SCpnt);
+ SCpnt->serial_number_at_timeout = 0;
+ cli();
+ }
+ }
+ }
+ restore_flags(flags);
+}
+
+/*
+ * The strategy is to cause the timer code to call scsi_times_out()
+ * when the soonest timeout is pending.
+ * The arguments are used when we are queueing a new command, because
+ * we do not want to subtract the time used from this time, but when we
+ * set the timer, we want to take this value into account.
+ */
+
+static int update_timeout(Scsi_Cmnd * SCset, int timeout)
+{
+ unsigned int least, used;
+ unsigned int oldto;
+ unsigned long flags;
+ struct Scsi_Host * host;
+ Scsi_Cmnd * SCpnt = NULL;
+
+ save_flags(flags);
+ cli();
+
+ oldto = 0;
+
+ /*
+ * This routine can be a performance bottleneck under high loads, since
+ * it is called twice per SCSI operation: once when internal_cmnd is
+ * called, and again when scsi_done completes the command. To limit
+ * the load this routine can cause, we shortcut processing if no clock
+ * ticks have occurred since the last time it was called.
+ */
+
+ if (jiffies == time_start && timer_table[SCSI_TIMER].expires > 0) {
+ if(SCset){
+ oldto = SCset->timeout;
+ SCset->timeout = timeout;
+ if (timeout > 0 &&
+ jiffies + timeout < timer_table[SCSI_TIMER].expires)
+ timer_table[SCSI_TIMER].expires = jiffies + timeout;
+ }
+ restore_flags(flags);
+ return oldto;
+ }
+
+ /*
+ * Figure out how much time has passed since the last time the timeouts
+ * were updated
+ */
+ used = (time_start) ? (jiffies - time_start) : 0;
+
+ /*
+ * Find out what is due to timeout soonest, and adjust all timeouts for
+ * the amount of time that has passed since the last time we called
+ * update_timeout.
+ */
+
+ oldto = 0;
+
+ if(SCset){
+ oldto = SCset->timeout - used;
+ SCset->timeout = timeout;
+ }
+
+ least = 0xffffffff;
+
+ for(host = scsi_hostlist; host; host = host->next)
+ for(SCpnt = host->host_queue; SCpnt; SCpnt = SCpnt->next)
+ if (SCpnt->timeout > 0) {
+ if (SCpnt != SCset)
+ SCpnt->timeout -= used;
+ if(SCpnt->timeout <= 0) SCpnt->timeout = -1;
+ if(SCpnt->timeout > 0 && SCpnt->timeout < least)
+ least = SCpnt->timeout;
+ }
+
+ /*
+ * If something is due to timeout again, then we will set the next timeout
+ * interrupt to occur. Otherwise, timeouts are disabled.
+ */
+
+ if (least != 0xffffffff)
+ {
+ time_start = jiffies;
+ timer_table[SCSI_TIMER].expires = (time_elapsed = least) + jiffies;
+ timer_active |= 1 << SCSI_TIMER;
+ }
+ else
+ {
+ timer_table[SCSI_TIMER].expires = time_start = time_elapsed = 0;
+ timer_active &= ~(1 << SCSI_TIMER);
+ }
+ restore_flags(flags);
+ return oldto;
+}
+
+#ifdef CONFIG_MODULES
+static int scsi_register_host(Scsi_Host_Template *);
+static void scsi_unregister_host(Scsi_Host_Template *);
+#endif
+
+void *scsi_malloc(unsigned int len)
+{
+ unsigned int nbits, mask;
+ unsigned long flags;
+ int i, j;
+ if(len % SECTOR_SIZE != 0 || len > PAGE_SIZE)
+ return NULL;
+
+ save_flags(flags);
+ cli();
+ nbits = len >> 9;
+ mask = (1 << nbits) - 1;
+
+ for(i=0;i < dma_sectors / SECTORS_PER_PAGE; i++)
+ for(j=0; j<=SECTORS_PER_PAGE - nbits; j++){
+ if ((dma_malloc_freelist[i] & (mask << j)) == 0){
+ dma_malloc_freelist[i] |= (mask << j);
+ restore_flags(flags);
+ dma_free_sectors -= nbits;
+#ifdef DEBUG
+ printk("SMalloc: %d %p\n",len, dma_malloc_pages[i] + (j << 9));
+#endif
+ return (void *) ((unsigned long) dma_malloc_pages[i] + (j << 9));
+ }
+ }
+ restore_flags(flags);
+ return NULL; /* Nope. No more */
+}
+
+int scsi_free(void *obj, unsigned int len)
+{
+ unsigned int page, sector, nbits, mask;
+ unsigned long flags;
+
+#ifdef DEBUG
+ unsigned long ret = 0;
+
+#ifdef __mips__
+ __asm__ __volatile__ ("move\t%0,$31":"=r"(ret));
+#else
+ ret = __builtin_return_address(0);
+#endif
+ printk("scsi_free %p %d\n",obj, len);
+#endif
+
+ for (page = 0; page < dma_sectors / SECTORS_PER_PAGE; page++) {
+ unsigned long page_addr = (unsigned long) dma_malloc_pages[page];
+ if ((unsigned long) obj >= page_addr &&
+ (unsigned long) obj < page_addr + PAGE_SIZE)
+ {
+ sector = (((unsigned long) obj) - page_addr) >> 9;
+
+ nbits = len >> 9;
+ mask = (1 << nbits) - 1;
+
+ if ((mask << sector) >= (1 << SECTORS_PER_PAGE))
+ panic ("scsi_free:Bad memory alignment");
+
+ save_flags(flags);
+ cli();
+ if((dma_malloc_freelist[page] &
+ (mask << sector)) != (mask<<sector)){
+#ifdef DEBUG
+ printk("scsi_free(obj=%p, len=%d) called from %08lx\n",
+ obj, len, ret);
+#endif
+ panic("scsi_free:Trying to free unused memory");
+ }
+ dma_free_sectors += nbits;
+ dma_malloc_freelist[page] &= ~(mask << sector);
+ restore_flags(flags);
+ return 0;
+ }
+ }
+ panic("scsi_free:Bad offset");
+}
+
+
+int scsi_loadable_module_flag; /* Set after we scan builtin drivers */
+
+void * scsi_init_malloc(unsigned int size, int priority)
+{
+ void * retval;
+
+ /*
+ * For buffers used by the DMA pool, we assume page aligned
+ * structures.
+ */
+ if ((size % PAGE_SIZE) == 0) {
+ int order, a_size;
+ for (order = 0, a_size = PAGE_SIZE;
+ a_size < size; order++, a_size <<= 1)
+ ;
+ retval = (void *) __get_dma_pages(priority & GFP_LEVEL_MASK,
+ order);
+ } else
+ retval = kmalloc(size, priority);
+
+ if (retval)
+ memset(retval, 0, size);
+ return retval;
+}
+
+
+void scsi_init_free(char * ptr, unsigned int size)
+{
+ /*
+ * We need this special code here because the DMA pool assumes
+ * page aligned data. Besides, it is wasteful to allocate
+ * page sized chunks with kmalloc.
+ */
+ if ((size % PAGE_SIZE) == 0) {
+ int order, a_size;
+
+ for (order = 0, a_size = PAGE_SIZE;
+ a_size < size; order++, a_size <<= 1)
+ ;
+ free_pages((unsigned long)ptr, order);
+ } else
+ kfree(ptr);
+}
+
+void scsi_build_commandblocks(Scsi_Device * SDpnt)
+{
+ struct Scsi_Host *host = SDpnt->host;
+ int j;
+ Scsi_Cmnd * SCpnt;
+
+ if (SDpnt->queue_depth == 0)
+ SDpnt->queue_depth = host->cmd_per_lun;
+ SDpnt->device_queue = NULL;
+
+ for(j=0;j<SDpnt->queue_depth;j++){
+ SCpnt = (Scsi_Cmnd *)
+ scsi_init_malloc(sizeof(Scsi_Cmnd),
+ GFP_ATOMIC |
+ (host->unchecked_isa_dma ? GFP_DMA : 0));
+ SCpnt->host = host;
+ SCpnt->device = SDpnt;
+ SCpnt->target = SDpnt->id;
+ SCpnt->lun = SDpnt->lun;
+ SCpnt->channel = SDpnt->channel;
+ SCpnt->request.rq_status = RQ_INACTIVE;
+ SCpnt->use_sg = 0;
+ SCpnt->old_use_sg = 0;
+ SCpnt->old_cmd_len = 0;
+ SCpnt->timeout = 0;
+ SCpnt->underflow = 0;
+ SCpnt->transfersize = 0;
+ SCpnt->serial_number = 0;
+ SCpnt->serial_number_at_timeout = 0;
+ SCpnt->host_scribble = NULL;
+ if(host->host_queue)
+ host->host_queue->prev = SCpnt;
+ SCpnt->next = host->host_queue;
+ SCpnt->prev = NULL;
+ host->host_queue = SCpnt;
+ SCpnt->device_next = SDpnt->device_queue;
+ SDpnt->device_queue = SCpnt;
+ }
+ SDpnt->has_cmdblocks = 1;
+}
+
+/*
+ * scsi_dev_init() is our initialization routine, which in turn calls host
+ * initialization, bus scanning, and sd/st initialization routines.
+ */
+
+int scsi_dev_init(void)
+{
+ Scsi_Device * SDpnt;
+ struct Scsi_Host * shpnt;
+ struct Scsi_Device_Template * sdtpnt;
+#ifdef FOO_ON_YOU
+ return;
+#endif
+
+ /* Yes we're here... */
+#if CONFIG_PROC_FS
+ dispatch_scsi_info_ptr = dispatch_scsi_info;
+#endif
+
+ /* Init a few things so we can "malloc" memory. */
+ scsi_loadable_module_flag = 0;
+
+ timer_table[SCSI_TIMER].fn = scsi_main_timeout;
+ timer_table[SCSI_TIMER].expires = 0;
+
+#ifdef CONFIG_MODULES
+ register_symtab(&scsi_symbol_table);
+#endif
+
+ /* Register the /proc/scsi/scsi entry */
+#if CONFIG_PROC_FS
+ proc_scsi_register(0, &proc_scsi_scsi);
+#endif
+
+ /* initialize all hosts */
+ scsi_init();
+
+ scsi_devices = (Scsi_Device *) NULL;
+
+ for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
+ scan_scsis(shpnt,0,0,0,0); /* scan for scsi devices */
+ if (shpnt->select_queue_depths != NULL)
+ (shpnt->select_queue_depths)(shpnt, scsi_devices);
+ }
+
+ printk("scsi : detected ");
+ for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if (sdtpnt->dev_noticed && sdtpnt->name)
+ printk("%d SCSI %s%s ", sdtpnt->dev_noticed, sdtpnt->name,
+ (sdtpnt->dev_noticed != 1) ? "s" : "");
+ printk("total.\n");
+
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if(sdtpnt->init && sdtpnt->dev_noticed) (*sdtpnt->init)();
+
+ for (SDpnt=scsi_devices; SDpnt; SDpnt = SDpnt->next) {
+ SDpnt->scsi_request_fn = NULL;
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if(sdtpnt->attach) (*sdtpnt->attach)(SDpnt);
+ if(SDpnt->attached) scsi_build_commandblocks(SDpnt);
+ }
+
+
+ /*
+ * This should build the DMA pool.
+ */
+ resize_dma_pool();
+
+ /*
+ * OK, now we finish the initialization by doing spin-up, read
+ * capacity, etc, etc
+ */
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if(sdtpnt->finish && sdtpnt->nr_dev)
+ (*sdtpnt->finish)();
+
+ scsi_loadable_module_flag = 1;
+
+ return 0;
+}
+
+static void print_inquiry(unsigned char *data)
+{
+ int i;
+
+ printk(" Vendor: ");
+ for (i = 8; i < 16; i++)
+ {
+ if (data[i] >= 0x20 && i < data[4] + 5)
+ printk("%c", data[i]);
+ else
+ printk(" ");
+ }
+
+ printk(" Model: ");
+ for (i = 16; i < 32; i++)
+ {
+ if (data[i] >= 0x20 && i < data[4] + 5)
+ printk("%c", data[i]);
+ else
+ printk(" ");
+ }
+
+ printk(" Rev: ");
+ for (i = 32; i < 36; i++)
+ {
+ if (data[i] >= 0x20 && i < data[4] + 5)
+ printk("%c", data[i]);
+ else
+ printk(" ");
+ }
+
+ printk("\n");
+
+ i = data[0] & 0x1f;
+
+ printk(" Type: %s ",
+ i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] : "Unknown " );
+ printk(" ANSI SCSI revision: %02x", data[2] & 0x07);
+ if ((data[2] & 0x07) == 1 && (data[3] & 0x0f) == 1)
+ printk(" CCS\n");
+ else
+ printk("\n");
+}
+
+
+#ifdef CONFIG_PROC_FS
+int scsi_proc_info(char *buffer, char **start, off_t offset, int length,
+ int hostno, int inout)
+{
+ Scsi_Cmnd *SCpnt;
+ struct Scsi_Device_Template *SDTpnt;
+ Scsi_Device *scd, *scd_h = NULL;
+ struct Scsi_Host *HBA_ptr;
+ char *p;
+ int host, channel, id, lun;
+ int size, len = 0;
+ off_t begin = 0;
+ off_t pos = 0;
+
+ scd = scsi_devices;
+ HBA_ptr = scsi_hostlist;
+
+ if(inout == 0) {
+ size = sprintf(buffer+len,"Attached devices: %s\n", (scd)?"":"none");
+ len += size;
+ pos = begin + len;
+ while (HBA_ptr) {
+#if 0
+ size += sprintf(buffer+len,"scsi%2d: %s\n", (int) HBA_ptr->host_no,
+ HBA_ptr->hostt->procname);
+ len += size;
+ pos = begin + len;
+#endif
+ scd = scsi_devices;
+ while (scd) {
+ if (scd->host == HBA_ptr) {
+ proc_print_scsidevice(scd, buffer, &size, len);
+ len += size;
+ pos = begin + len;
+
+ if (pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+ if (pos > offset + length)
+ goto stop_output;
+ }
+ scd = scd->next;
+ }
+ HBA_ptr = HBA_ptr->next;
+ }
+
+ stop_output:
+ *start=buffer+(offset-begin); /* Start of wanted data */
+ len-=(offset-begin); /* Start slop */
+ if(len>length)
+ len = length; /* Ending slop */
+ return (len);
+ }
+
+ if(!buffer || length < 25 || strncmp("scsi", buffer, 4))
+ return(-EINVAL);
+
+ /*
+ * Usage: echo "scsi add-single-device 0 1 2 3" >/proc/scsi/scsi
+ * with "0 1 2 3" replaced by your "Host Channel Id Lun".
+ * Consider this feature BETA.
+ * CAUTION: This is not for hotplugging your peripherals. As
+ * SCSI was not designed for this you could damage your
+ * hardware !
+ * However perhaps it is legal to switch on an
+ * already connected device. It is perhaps not
+ * guaranteed this device doesn't corrupt an ongoing data transfer.
+ */
+ if(!strncmp("add-single-device", buffer + 5, 17)) {
+ p = buffer + 23;
+
+ host = simple_strtoul(p, &p, 0);
+ channel = simple_strtoul(p+1, &p, 0);
+ id = simple_strtoul(p+1, &p, 0);
+ lun = simple_strtoul(p+1, &p, 0);
+
+ printk("scsi singledevice %d %d %d %d\n", host, channel,
+ id, lun);
+
+ while(scd && (scd->host->host_no != host
+ || scd->channel != channel
+ || scd->id != id
+ || scd->lun != lun)) {
+ scd = scd->next;
+ }
+ if(scd)
+ return(-ENOSYS); /* We do not yet support unplugging */
+ while(HBA_ptr && HBA_ptr->host_no != host)
+ HBA_ptr = HBA_ptr->next;
+
+ if(!HBA_ptr)
+ return(-ENXIO);
+
+ scan_scsis (HBA_ptr, 1, channel, id, lun);
+ return(length);
+
+ }
+
+ /*
+ * Usage: echo "scsi remove-single-device 0 1 2 3" >/proc/scsi/scsi
+ * with "0 1 2 3" replaced by your "Host Channel Id Lun".
+ *
+ * Consider this feature pre-BETA.
+ *
+ * CAUTION: This is not for hotplugging your peripherals. As
+ * SCSI was not designed for this you could damage your
+ * hardware and thoroughly confuse the SCSI subsystem.
+ *
+ */
+ else if(!strncmp("remove-single-device", buffer + 5, 20)) {
+ p = buffer + 26;
+
+ host = simple_strtoul(p, &p, 0);
+ channel = simple_strtoul(p+1, &p, 0);
+ id = simple_strtoul(p+1, &p, 0);
+ lun = simple_strtoul(p+1, &p, 0);
+
+ while(scd != NULL) {
+ if(scd->host->host_no == host
+ && scd->channel == channel
+ && scd->id == id
+ && scd->lun == lun){
+ break;
+ }
+ scd_h = scd;
+ scd = scd->next;
+ }
+
+ if(scd == NULL)
+ return(-ENODEV); /* there is no such device attached */
+
+ if(scd->access_count)
+ return(-EBUSY);
+
+ SDTpnt = scsi_devicelist;
+ while(SDTpnt != NULL) {
+ if(SDTpnt->detach) (*SDTpnt->detach)(scd);
+ SDTpnt = SDTpnt->next;
+ }
+
+ if(scd->attached == 0) {
+ /*
+ * Nobody is using this device any more.
+ * Free all of the command structures.
+ */
+ for(SCpnt=scd->host->host_queue; SCpnt; SCpnt = SCpnt->next){
+ if(SCpnt->device == scd) {
+ if(SCpnt->prev != NULL)
+ SCpnt->prev->next = SCpnt->next;
+ if(SCpnt->next != NULL)
+ SCpnt->next->prev = SCpnt->prev;
+ if(SCpnt == scd->host->host_queue)
+ scd->host->host_queue = SCpnt->next;
+ scsi_init_free((char *) SCpnt, sizeof(*SCpnt));
+ }
+ }
+ /* Now we can remove the device structure */
+ if(scd_h != NULL) {
+ scd_h->next = scd->next;
+ } else if (scsi_devices == scd) {
+ /* We had a hit on the first entry of the device list */
+ scsi_devices = scd->next;
+ }
+ scsi_init_free((char *) scd, sizeof(Scsi_Device));
+ } else {
+ return(-EBUSY);
+ }
+ return(0);
+ }
+ return(-EINVAL);
+}
+#endif
+
+/*
+ * Go through the device list and recompute the most appropriate size
+ * for the dma pool. Then grab more memory (as required).
+ */
+static void resize_dma_pool(void)
+{
+ int i;
+ unsigned long size;
+ struct Scsi_Host * shpnt;
+ struct Scsi_Host * host = NULL;
+ Scsi_Device * SDpnt;
+ unsigned long flags;
+ FreeSectorBitmap * new_dma_malloc_freelist = NULL;
+ unsigned int new_dma_sectors = 0;
+ unsigned int new_need_isa_buffer = 0;
+ unsigned char ** new_dma_malloc_pages = NULL;
+
+ if( !scsi_devices )
+ {
+ /*
+ * Free up the DMA pool.
+ */
+ if( dma_free_sectors != dma_sectors )
+ panic("SCSI DMA pool memory leak %d %d\n",dma_free_sectors,dma_sectors);
+
+ for(i=0; i < dma_sectors / SECTORS_PER_PAGE; i++)
+ scsi_init_free(dma_malloc_pages[i], PAGE_SIZE);
+ if (dma_malloc_pages)
+ scsi_init_free((char *) dma_malloc_pages,
+ (dma_sectors / SECTORS_PER_PAGE)*sizeof(*dma_malloc_pages));
+ dma_malloc_pages = NULL;
+ if (dma_malloc_freelist)
+ scsi_init_free((char *) dma_malloc_freelist,
+ (dma_sectors / SECTORS_PER_PAGE)*sizeof(*dma_malloc_freelist));
+ dma_malloc_freelist = NULL;
+ dma_sectors = 0;
+ dma_free_sectors = 0;
+ return;
+ }
+ /* Next, check to see if we need to extend the DMA buffer pool */
+
+ new_dma_sectors = 2*SECTORS_PER_PAGE; /* Base value we use */
+
+ if (high_memory-1 > ISA_DMA_THRESHOLD)
+ scsi_need_isa_bounce_buffers = 1;
+ else
+ scsi_need_isa_bounce_buffers = 0;
+
+ if (scsi_devicelist)
+ for(shpnt=scsi_hostlist; shpnt; shpnt = shpnt->next)
+ new_dma_sectors += SECTORS_PER_PAGE; /* Increment for each host */
+
+ for (SDpnt=scsi_devices; SDpnt; SDpnt = SDpnt->next) {
+ host = SDpnt->host;
+
+ /*
+ * sd and sr drivers allocate scatterlists.
+ * sr drivers may allocate for each command 1x2048 or 2x1024 extra
+ * buffers for 2k sector size and 1k fs.
+ * sg driver allocates buffers < 4k.
+ * st driver does not need buffers from the dma pool.
+ * estimate 4k buffer/command for devices of unknown type (should panic).
+ */
+ if (SDpnt->type == TYPE_WORM || SDpnt->type == TYPE_ROM ||
+ SDpnt->type == TYPE_DISK || SDpnt->type == TYPE_MOD) {
+ new_dma_sectors += ((host->sg_tablesize *
+ sizeof(struct scatterlist) + 511) >> 9) *
+ SDpnt->queue_depth;
+ if (SDpnt->type == TYPE_WORM || SDpnt->type == TYPE_ROM)
+ new_dma_sectors += (2048 >> 9) * SDpnt->queue_depth;
+ }
+ else if (SDpnt->type == TYPE_SCANNER ||
+ SDpnt->type == TYPE_PROCESSOR ||
+ SDpnt->type == TYPE_MEDIUM_CHANGER) {
+ new_dma_sectors += (4096 >> 9) * SDpnt->queue_depth;
+ }
+ else {
+ if (SDpnt->type != TYPE_TAPE) {
+ printk("resize_dma_pool: unknown device type %d\n", SDpnt->type);
+ new_dma_sectors += (4096 >> 9) * SDpnt->queue_depth;
+ }
+ }
+
+ if(host->unchecked_isa_dma &&
+ scsi_need_isa_bounce_buffers &&
+ SDpnt->type != TYPE_TAPE) {
+ new_dma_sectors += (PAGE_SIZE >> 9) * host->sg_tablesize *
+ SDpnt->queue_depth;
+ new_need_isa_buffer++;
+ }
+ }
+
+#ifdef DEBUG_INIT
+ printk("resize_dma_pool: needed dma sectors = %d\n", new_dma_sectors);
+#endif
+
+ /* limit DMA memory to 32MB: */
+ new_dma_sectors = (new_dma_sectors + 15) & 0xfff0;
+
+ /*
+ * We never shrink the buffers - this leads to
+ * race conditions that I would rather not even think
+ * about right now.
+ */
+ if( new_dma_sectors < dma_sectors )
+ new_dma_sectors = dma_sectors;
+
+ if (new_dma_sectors)
+ {
+ size = (new_dma_sectors / SECTORS_PER_PAGE)*sizeof(FreeSectorBitmap);
+ new_dma_malloc_freelist = (FreeSectorBitmap *) scsi_init_malloc(size, GFP_ATOMIC);
+ memset(new_dma_malloc_freelist, 0, size);
+
+ size = (new_dma_sectors / SECTORS_PER_PAGE)*sizeof(*new_dma_malloc_pages);
+ new_dma_malloc_pages = (unsigned char **) scsi_init_malloc(size, GFP_ATOMIC);
+ memset(new_dma_malloc_pages, 0, size);
+ }
+
+ /*
+ * If we need more buffers, expand the list.
+ */
+ if( new_dma_sectors > dma_sectors ) {
+ for(i=dma_sectors / SECTORS_PER_PAGE; i< new_dma_sectors / SECTORS_PER_PAGE; i++)
+ new_dma_malloc_pages[i] = (unsigned char *)
+ scsi_init_malloc(PAGE_SIZE, GFP_ATOMIC | GFP_DMA);
+ }
+
+ /* When we dick with the actual DMA list, we need to
+ * protect things
+ */
+ save_flags(flags);
+ cli();
+ if (dma_malloc_freelist)
+ {
+ size = (dma_sectors / SECTORS_PER_PAGE)*sizeof(FreeSectorBitmap);
+ memcpy(new_dma_malloc_freelist, dma_malloc_freelist, size);
+ scsi_init_free((char *) dma_malloc_freelist, size);
+ }
+ dma_malloc_freelist = new_dma_malloc_freelist;
+
+ if (dma_malloc_pages)
+ {
+ size = (dma_sectors / SECTORS_PER_PAGE)*sizeof(*dma_malloc_pages);
+ memcpy(new_dma_malloc_pages, dma_malloc_pages, size);
+ scsi_init_free((char *) dma_malloc_pages, size);
+ }
+
+ dma_free_sectors += new_dma_sectors - dma_sectors;
+ dma_malloc_pages = new_dma_malloc_pages;
+ dma_sectors = new_dma_sectors;
+ need_isa_buffer = new_need_isa_buffer;
+ restore_flags(flags);
+
+#ifdef DEBUG_INIT
+ printk("resize_dma_pool: dma free sectors = %d\n", dma_free_sectors);
+ printk("resize_dma_pool: dma sectors = %d\n", dma_sectors);
+ printk("resize_dma_pool: need isa buffers = %d\n", need_isa_buffer);
+#endif
+}
+
+#ifdef CONFIG_MODULES /* a big #ifdef block... */
+
+/*
+ * This entry point should be called by a loadable module if it is trying
+ * add a low level scsi driver to the system.
+ */
+static int scsi_register_host(Scsi_Host_Template * tpnt)
+{
+ int pcount;
+ struct Scsi_Host * shpnt;
+ Scsi_Device * SDpnt;
+ struct Scsi_Device_Template * sdtpnt;
+ const char * name;
+
+ if (tpnt->next || !tpnt->detect) return 1;/* Must be already loaded, or
+ * no detect routine available
+ */
+ pcount = next_scsi_host;
+ if ((tpnt->present = tpnt->detect(tpnt)))
+ {
+ if(pcount == next_scsi_host) {
+ if(tpnt->present > 1) {
+ printk("Failure to register low-level scsi driver");
+ scsi_unregister_host(tpnt);
+ return 1;
+ }
+ /* The low-level driver failed to register a driver. We
+ * can do this now.
+ */
+ scsi_register(tpnt,0);
+ }
+ tpnt->next = scsi_hosts; /* Add to the linked list */
+ scsi_hosts = tpnt;
+
+ /* Add the new driver to /proc/scsi */
+#if CONFIG_PROC_FS
+ build_proc_dir_entries(tpnt);
+#endif
+
+ for(shpnt=scsi_hostlist; shpnt; shpnt = shpnt->next)
+ if(shpnt->hostt == tpnt)
+ {
+ if(tpnt->info)
+ name = tpnt->info(shpnt);
+ else
+ name = tpnt->name;
+ printk ("scsi%d : %s\n", /* And print a little message */
+ shpnt->host_no, name);
+ }
+
+ printk ("scsi : %d host%s.\n", next_scsi_host,
+ (next_scsi_host == 1) ? "" : "s");
+
+ scsi_make_blocked_list();
+
+ /* The next step is to call scan_scsis here. This generates the
+ * Scsi_Devices entries
+ */
+
+ for(shpnt=scsi_hostlist; shpnt; shpnt = shpnt->next)
+ if(shpnt->hostt == tpnt) {
+ scan_scsis(shpnt,0,0,0,0);
+ if (shpnt->select_queue_depths != NULL)
+ (shpnt->select_queue_depths)(shpnt, scsi_devices);
+ }
+
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if(sdtpnt->init && sdtpnt->dev_noticed) (*sdtpnt->init)();
+
+ /* Next we create the Scsi_Cmnd structures for this host */
+
+ for(SDpnt = scsi_devices; SDpnt; SDpnt = SDpnt->next)
+ if(SDpnt->host->hostt == tpnt)
+ {
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if(sdtpnt->attach) (*sdtpnt->attach)(SDpnt);
+ if(SDpnt->attached) scsi_build_commandblocks(SDpnt);
+ }
+
+ /*
+ * Now that we have all of the devices, resize the DMA pool,
+ * as required. */
+ resize_dma_pool();
+
+
+ /* This does any final handling that is required. */
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if(sdtpnt->finish && sdtpnt->nr_dev)
+ (*sdtpnt->finish)();
+ }
+
+#if defined(USE_STATIC_SCSI_MEMORY)
+ printk ("SCSI memory: total %ldKb, used %ldKb, free %ldKb.\n",
+ (scsi_memory_upper_value - scsi_memory_lower_value) / 1024,
+ (scsi_init_memory_start - scsi_memory_lower_value) / 1024,
+ (scsi_memory_upper_value - scsi_init_memory_start) / 1024);
+#endif
+
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+/*
+ * Similarly, this entry point should be called by a loadable module if it
+ * is trying to remove a low level scsi driver from the system.
+ */
+static void scsi_unregister_host(Scsi_Host_Template * tpnt)
+{
+ Scsi_Host_Template * SHT, *SHTp;
+ Scsi_Device *sdpnt, * sdppnt, * sdpnt1;
+ Scsi_Cmnd * SCpnt;
+ unsigned long flags;
+ struct Scsi_Device_Template * sdtpnt;
+ struct Scsi_Host * shpnt, *sh1;
+ int pcount;
+
+ /* First verify that this host adapter is completely free with no pending
+ * commands */
+
+ for(sdpnt = scsi_devices; sdpnt; sdpnt = sdpnt->next)
+ if(sdpnt->host->hostt == tpnt && sdpnt->host->hostt->usage_count
+ && *sdpnt->host->hostt->usage_count) return;
+
+ for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next)
+ {
+ if (shpnt->hostt != tpnt) continue;
+ for(SCpnt = shpnt->host_queue; SCpnt; SCpnt = SCpnt->next)
+ {
+ save_flags(flags);
+ cli();
+ if(SCpnt->request.rq_status != RQ_INACTIVE) {
+ restore_flags(flags);
+ for(SCpnt = shpnt->host_queue; SCpnt; SCpnt = SCpnt->next)
+ if(SCpnt->request.rq_status == RQ_SCSI_DISCONNECTING)
+ SCpnt->request.rq_status = RQ_INACTIVE;
+ printk("Device busy???\n");
+ return;
+ }
+ SCpnt->request.rq_status = RQ_SCSI_DISCONNECTING; /* Mark as busy */
+ restore_flags(flags);
+ }
+ }
+ /* Next we detach the high level drivers from the Scsi_Device structures */
+
+ for(sdpnt = scsi_devices; sdpnt; sdpnt = sdpnt->next)
+ if(sdpnt->host->hostt == tpnt)
+ {
+ for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
+ if(sdtpnt->detach) (*sdtpnt->detach)(sdpnt);
+ /* If something still attached, punt */
+ if (sdpnt->attached) {
+ printk("Attached usage count = %d\n", sdpnt->attached);
+ return;
+ }
+ }
+
+ /* Next we free up the Scsi_Cmnd structures for this host */
+
+ for(sdpnt = scsi_devices; sdpnt; sdpnt = sdpnt->next)
+ if(sdpnt->host->hostt == tpnt)
+ while (sdpnt->host->host_queue) {
+ SCpnt = sdpnt->host->host_queue->next;
+ scsi_init_free((char *) sdpnt->host->host_queue, sizeof(Scsi_Cmnd));
+ sdpnt->host->host_queue = SCpnt;
+ if (SCpnt) SCpnt->prev = NULL;
+ sdpnt->has_cmdblocks = 0;
+ }
+
+ /* Next free up the Scsi_Device structures for this host */
+
+ sdppnt = NULL;
+ for(sdpnt = scsi_devices; sdpnt; sdpnt = sdpnt1)
+ {
+ sdpnt1 = sdpnt->next;
+ if (sdpnt->host->hostt == tpnt) {
+ if (sdppnt)
+ sdppnt->next = sdpnt->next;
+ else
+ scsi_devices = sdpnt->next;
+ scsi_init_free((char *) sdpnt, sizeof (Scsi_Device));
+ } else
+ sdppnt = sdpnt;
+ }
+
+ /* Next we go through and remove the instances of the individual hosts
+ * that were detected */
+
+ shpnt = scsi_hostlist;
+ while(shpnt) {
+ sh1 = shpnt->next;
+ if(shpnt->hostt == tpnt) {
+ if(shpnt->loaded_as_module) {
+ pcount = next_scsi_host;
+ /* Remove the /proc/scsi directory entry */
+#if CONFIG_PROC_FS
+ proc_scsi_unregister(tpnt->proc_dir,
+ shpnt->host_no + PROC_SCSI_FILE);
+#endif
+ if(tpnt->release)
+ (*tpnt->release)(shpnt);
+ else {
+ /* This is the default case for the release function.
+ * It should do the right thing for most correctly
+ * written host adapters.
+ */
+ if (shpnt->irq) free_irq(shpnt->irq, NULL);
+ if (shpnt->dma_channel != 0xff) free_dma(shpnt->dma_channel);
+ if (shpnt->io_port && shpnt->n_io_port)
+ release_region(shpnt->io_port, shpnt->n_io_port);
+ }
+ if(pcount == next_scsi_host) scsi_unregister(shpnt);
+ tpnt->present--;
+ }
+ }
+ shpnt = sh1;
+ }
+
+ /*
+ * If there are absolutely no more hosts left, it is safe
+ * to completely nuke the DMA pool. The resize operation will
+ * do the right thing and free everything.
+ */
+ if( !scsi_devices )
+ resize_dma_pool();
+
+ printk ("scsi : %d host%s.\n", next_scsi_host,
+ (next_scsi_host == 1) ? "" : "s");
+
+#if defined(USE_STATIC_SCSI_MEMORY)
+ printk ("SCSI memory: total %ldKb, used %ldKb, free %ldKb.\n",
+ (scsi_memory_upper_value - scsi_memory_lower_value) / 1024,
+ (scsi_init_memory_start - scsi_memory_lower_value) / 1024,
+ (scsi_memory_upper_value - scsi_init_memory_start) / 1024);
+#endif
+
+ scsi_make_blocked_list();
+
+ /* There were some hosts that were loaded at boot time, so we cannot
+ do any more than this */
+ if (tpnt->present) return;
+
+ /* OK, this is the very last step. Remove this host adapter from the
+ linked list. */
+ for(SHTp=NULL, SHT=scsi_hosts; SHT; SHTp=SHT, SHT=SHT->next)
+ if(SHT == tpnt) {
+ if(SHTp)
+ SHTp->next = SHT->next;
+ else
+ scsi_hosts = SHT->next;
+ SHT->next = NULL;
+ break;
+ }
+
+ /* Rebuild the /proc/scsi directory entries */
+#if CONFIG_PROC_FS
+ proc_scsi_unregister(tpnt->proc_dir, tpnt->proc_dir->low_ino);
+#endif
+ MOD_DEC_USE_COUNT;
+}
+
+/*
+ * This entry point should be called by a loadable module if it is trying
+ * add a high level scsi driver to the system.
+ */
+static int scsi_register_device_module(struct Scsi_Device_Template * tpnt)
+{
+ Scsi_Device * SDpnt;
+
+ if (tpnt->next) return 1;
+
+ scsi_register_device(tpnt);
+ /*
+ * First scan the devices that we know about, and see if we notice them.
+ */
+
+ for(SDpnt = scsi_devices; SDpnt; SDpnt = SDpnt->next)
+ if(tpnt->detect) SDpnt->attached += (*tpnt->detect)(SDpnt);
+
+ /*
+ * If any of the devices would match this driver, then perform the
+ * init function.
+ */
+ if(tpnt->init && tpnt->dev_noticed)
+ if ((*tpnt->init)()) return 1;
+
+ /*
+ * Now actually connect the devices to the new driver.
+ */
+ for(SDpnt = scsi_devices; SDpnt; SDpnt = SDpnt->next)
+ {
+ if(tpnt->attach) (*tpnt->attach)(SDpnt);
+ /*
+ * If this driver attached to the device, and we no longer
+ * have anything attached, release the scsi command blocks.
+ */
+ if(SDpnt->attached && SDpnt->has_cmdblocks == 0)
+ scsi_build_commandblocks(SDpnt);
+ }
+
+ /*
+ * This does any final handling that is required.
+ */
+ if(tpnt->finish && tpnt->nr_dev) (*tpnt->finish)();
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static int scsi_unregister_device(struct Scsi_Device_Template * tpnt)
+{
+ Scsi_Device * SDpnt;
+ Scsi_Cmnd * SCpnt;
+ struct Scsi_Device_Template * spnt;
+ struct Scsi_Device_Template * prev_spnt;
+
+ /*
+ * If we are busy, this is not going to fly.
+ */
+ if( *tpnt->usage_count != 0) return 0;
+ /*
+ * Next, detach the devices from the driver.
+ */
+
+ for(SDpnt = scsi_devices; SDpnt; SDpnt = SDpnt->next)
+ {
+ if(tpnt->detach) (*tpnt->detach)(SDpnt);
+ if(SDpnt->attached == 0)
+ {
+ /*
+ * Nobody is using this device any more. Free all of the
+ * command structures.
+ */
+ for(SCpnt = SDpnt->host->host_queue; SCpnt; SCpnt = SCpnt->next)
+ {
+ if(SCpnt->device == SDpnt)
+ {
+ if(SCpnt->prev != NULL)
+ SCpnt->prev->next = SCpnt->next;
+ if(SCpnt->next != NULL)
+ SCpnt->next->prev = SCpnt->prev;
+ if(SCpnt == SDpnt->host->host_queue)
+ SDpnt->host->host_queue = SCpnt->next;
+ scsi_init_free((char *) SCpnt, sizeof(*SCpnt));
+ }
+ }
+ SDpnt->has_cmdblocks = 0;
+ }
+ }
+ /*
+ * Extract the template from the linked list.
+ */
+ spnt = scsi_devicelist;
+ prev_spnt = NULL;
+ while(spnt != tpnt)
+ {
+ prev_spnt = spnt;
+ spnt = spnt->next;
+ }
+ if(prev_spnt == NULL)
+ scsi_devicelist = tpnt->next;
+ else
+ prev_spnt->next = spnt->next;
+
+ MOD_DEC_USE_COUNT;
+ /*
+ * Final cleanup for the driver is done in the driver sources in the
+ * cleanup function.
+ */
+ return 0;
+}
+
+
+int scsi_register_module(int module_type, void * ptr)
+{
+ switch(module_type){
+ case MODULE_SCSI_HA:
+ return scsi_register_host((Scsi_Host_Template *) ptr);
+
+ /* Load upper level device handler of some kind */
+ case MODULE_SCSI_DEV:
+#ifdef CONFIG_KERNELD
+ if (scsi_hosts == NULL)
+ request_module("scsi_hostadapter");
+#endif
+ return scsi_register_device_module((struct Scsi_Device_Template *) ptr);
+ /* The rest of these are not yet implemented */
+
+ /* Load constants.o */
+ case MODULE_SCSI_CONST:
+
+ /* Load specialized ioctl handler for some device. Intended for
+ * cdroms that have non-SCSI2 audio command sets. */
+ case MODULE_SCSI_IOCTL:
+
+ default:
+ return 1;
+ }
+}
+
+void scsi_unregister_module(int module_type, void * ptr)
+{
+ switch(module_type) {
+ case MODULE_SCSI_HA:
+ scsi_unregister_host((Scsi_Host_Template *) ptr);
+ break;
+ case MODULE_SCSI_DEV:
+ scsi_unregister_device((struct Scsi_Device_Template *) ptr);
+ break;
+ /* The rest of these are not yet implemented. */
+ case MODULE_SCSI_CONST:
+ case MODULE_SCSI_IOCTL:
+ break;
+ default:
+ }
+ return;
+}
+
+#endif /* CONFIG_MODULES */
+
+#ifdef DEBUG_TIMEOUT
+static void
+scsi_dump_status(void)
+{
+ int i;
+ struct Scsi_Host * shpnt;
+ Scsi_Cmnd * SCpnt;
+ printk("Dump of scsi parameters:\n");
+ i = 0;
+ for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next)
+ for(SCpnt=shpnt->host_queue; SCpnt; SCpnt = SCpnt->next)
+ {
+ /* (0) 0:0:0:0 (802 123434 8 8 0) (3 3 2) (%d %d %d) %d %x */
+ printk("(%d) %d:%d:%d:%d (%s %ld %ld %ld %d) (%d %d %x) (%d %d %d) %x %x %x\n",
+ i++, SCpnt->host->host_no,
+ SCpnt->channel,
+ SCpnt->target,
+ SCpnt->lun,
+ kdevname(SCpnt->request.rq_dev),
+ SCpnt->request.sector,
+ SCpnt->request.nr_sectors,
+ SCpnt->request.current_nr_sectors,
+ SCpnt->use_sg,
+ SCpnt->retries,
+ SCpnt->allowed,
+ SCpnt->flags,
+ SCpnt->timeout_per_command,
+ SCpnt->timeout,
+ SCpnt->internal_timeout,
+ SCpnt->cmnd[0],
+ SCpnt->sense_buffer[2],
+ SCpnt->result);
+ }
+ printk("wait_for_request = %p\n", wait_for_request);
+ /* Now dump the request lists for each block device */
+ printk("Dump of pending block device requests\n");
+ for(i=0; i<MAX_BLKDEV; i++)
+ if(blk_dev[i].current_request)
+ {
+ struct request * req;
+ printk("%d: ", i);
+ req = blk_dev[i].current_request;
+ while(req) {
+ printk("(%s %d %ld %ld %ld) ",
+ kdevname(req->rq_dev),
+ req->cmd,
+ req->sector,
+ req->nr_sectors,
+ req->current_nr_sectors);
+ req = req->next;
+ }
+ printk("\n");
+ }
+}
+#endif
+
+#ifdef MODULE
+
+int init_module(void) {
+ unsigned long size;
+
+ /*
+ * This makes /proc/scsi visible.
+ */
+#if CONFIG_PROC_FS
+ dispatch_scsi_info_ptr = dispatch_scsi_info;
+#endif
+
+ timer_table[SCSI_TIMER].fn = scsi_main_timeout;
+ timer_table[SCSI_TIMER].expires = 0;
+ register_symtab(&scsi_symbol_table);
+ scsi_loadable_module_flag = 1;
+
+ /* Register the /proc/scsi/scsi entry */
+#if CONFIG_PROC_FS
+ proc_scsi_register(0, &proc_scsi_scsi);
+#endif
+
+
+ dma_sectors = PAGE_SIZE / SECTOR_SIZE;
+ dma_free_sectors= dma_sectors;
+ /*
+ * Set up a minimal DMA buffer list - this will be used during scan_scsis
+ * in some cases.
+ */
+
+ /* One bit per sector to indicate free/busy */
+ size = (dma_sectors / SECTORS_PER_PAGE)*sizeof(FreeSectorBitmap);
+ dma_malloc_freelist = (unsigned char *) scsi_init_malloc(size, GFP_ATOMIC);
+ memset(dma_malloc_freelist, 0, size);
+
+ /* One pointer per page for the page list */
+ dma_malloc_pages = (unsigned char **)
+ scsi_init_malloc((dma_sectors / SECTORS_PER_PAGE)*sizeof(*dma_malloc_pages), GFP_ATOMIC);
+ dma_malloc_pages[0] = (unsigned char *)
+ scsi_init_malloc(PAGE_SIZE, GFP_ATOMIC | GFP_DMA);
+ return 0;
+}
+
+void cleanup_module( void)
+{
+#if CONFIG_PROC_FS
+ proc_scsi_unregister(0, PROC_SCSI_SCSI);
+
+ /* No, we're not here anymore. Don't show the /proc/scsi files. */
+ dispatch_scsi_info_ptr = 0L;
+#endif
+
+ /*
+ * Free up the DMA pool.
+ */
+ resize_dma_pool();
+
+ timer_table[SCSI_TIMER].fn = NULL;
+ timer_table[SCSI_TIMER].expires = 0;
+}
+#endif /* MODULE */
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/scsi.h b/linux/src/drivers/scsi/scsi.h
new file mode 100644
index 0000000..13052ba
--- /dev/null
+++ b/linux/src/drivers/scsi/scsi.h
@@ -0,0 +1,650 @@
+/*
+ * scsi.h Copyright (C) 1992 Drew Eckhardt
+ * Copyright (C) 1993, 1994, 1995 Eric Youngdale
+ * generic SCSI package header file by
+ * Initial versions: Drew Eckhardt
+ * Subsequent revisions: Eric Youngdale
+ *
+ * <drew@colorado.edu>
+ *
+ * Modified by Eric Youngdale eric@aib.com to
+ * add scatter-gather, multiple outstanding request, and other
+ * enhancements.
+ */
+
+#ifndef _SCSI_H
+#define _SCSI_H
+
+/*
+ * Some of the public constants are being moved to this file.
+ * We include it here so that what came from where is transparent.
+ */
+#include <scsi/scsi.h>
+
+#include <linux/random.h>
+
+
+/*
+ * Some defs, in case these are not defined elsewhere.
+ */
+#ifndef TRUE
+# define TRUE 1
+#endif
+#ifndef FALSE
+# define FALSE 0
+#endif
+
+
+extern void scsi_make_blocked_list(void);
+extern volatile int in_scan_scsis;
+extern const unsigned char scsi_command_size[8];
+#define COMMAND_SIZE(opcode) scsi_command_size[((opcode) >> 5) & 7]
+#define IDENTIFY_BASE 0x80
+#define IDENTIFY(can_disconnect, lun) (IDENTIFY_BASE |\
+ ((can_disconnect) ? 0x40 : 0) |\
+ ((lun) & 0x07))
+#define MAX_SCSI_DEVICE_CODE 10
+extern const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE];
+
+
+
+/*
+ * the return of the status word will be in the following format :
+ * The low byte is the status returned by the SCSI command,
+ * with vendor specific bits masked.
+ *
+ * The next byte is the message which followed the SCSI status.
+ * This allows a stos to be used, since the Intel is a little
+ * endian machine.
+ *
+ * The final byte is a host return code, which is one of the following.
+ *
+ * IE
+ * lsb msb
+ * status msg host code
+ *
+ * Our errors returned by OUR driver, NOT SCSI message. Or'd with
+ * SCSI message passed back to driver <IF any>.
+ */
+
+
+#define DID_OK 0x00 /* NO error */
+#define DID_NO_CONNECT 0x01 /* Couldn't connect before timeout period */
+#define DID_BUS_BUSY 0x02 /* BUS stayed busy through time out period */
+#define DID_TIME_OUT 0x03 /* TIMED OUT for other reason */
+#define DID_BAD_TARGET 0x04 /* BAD target. */
+#define DID_ABORT 0x05 /* Told to abort for some other reason */
+#define DID_PARITY 0x06 /* Parity error */
+#define DID_ERROR 0x07 /* Internal error */
+#define DID_RESET 0x08 /* Reset by somebody. */
+#define DID_BAD_INTR 0x09 /* Got an interrupt we weren't expecting. */
+#define DRIVER_OK 0x00 /* Driver status */
+
+/*
+ * These indicate the error that occurred, and what is available.
+ */
+
+#define DRIVER_BUSY 0x01
+#define DRIVER_SOFT 0x02
+#define DRIVER_MEDIA 0x03
+#define DRIVER_ERROR 0x04
+
+#define DRIVER_INVALID 0x05
+#define DRIVER_TIMEOUT 0x06
+#define DRIVER_HARD 0x07
+#define DRIVER_SENSE 0x08
+
+#define SUGGEST_RETRY 0x10
+#define SUGGEST_ABORT 0x20
+#define SUGGEST_REMAP 0x30
+#define SUGGEST_DIE 0x40
+#define SUGGEST_SENSE 0x80
+#define SUGGEST_IS_OK 0xff
+
+#define DRIVER_MASK 0x0f
+#define SUGGEST_MASK 0xf0
+
+#define MAX_COMMAND_SIZE 12
+
+/*
+ * SCSI command sets
+ */
+
+#define SCSI_UNKNOWN 0
+#define SCSI_1 1
+#define SCSI_1_CCS 2
+#define SCSI_2 3
+
+/*
+ * Every SCSI command starts with a one byte OP-code.
+ * The next byte's high three bits are the LUN of the
+ * device. Any multi-byte quantities are stored high byte
+ * first, and may have a 5 bit MSB in the same byte
+ * as the LUN.
+ */
+
+/*
+ * Manufacturers list
+ */
+
+#define SCSI_MAN_UNKNOWN 0
+#define SCSI_MAN_NEC 1
+#define SCSI_MAN_TOSHIBA 2
+#define SCSI_MAN_NEC_OLDCDR 3
+#define SCSI_MAN_SONY 4
+#define SCSI_MAN_PIONEER 5
+
+/*
+ * As the scsi do command functions are intelligent, and may need to
+ * redo a command, we need to keep track of the last command
+ * executed on each one.
+ */
+
+#define WAS_RESET 0x01
+#define WAS_TIMEDOUT 0x02
+#define WAS_SENSE 0x04
+#define IS_RESETTING 0x08
+#define IS_ABORTING 0x10
+#define ASKED_FOR_SENSE 0x20
+
+/*
+ * The scsi_device struct contains what we know about each given scsi
+ * device.
+ */
+
+typedef struct scsi_device {
+ struct scsi_device * next; /* Used for linked list */
+
+ unsigned char id, lun, channel;
+
+ unsigned int manufacturer; /* Manufacturer of device, for using
+ * vendor-specific cmd's */
+ int attached; /* # of high level drivers attached to
+ * this */
+ int access_count; /* Count of open channels/mounts */
+ struct wait_queue * device_wait;/* Used to wait if device is busy */
+ struct Scsi_Host * host;
+ void (*scsi_request_fn)(void); /* Used to jumpstart things after an
+ * ioctl */
+ struct scsi_cmnd *device_queue; /* queue of SCSI Command structures */
+ void *hostdata; /* available to low-level driver */
+ char type;
+ char scsi_level;
+ char vendor[8], model[16], rev[4];
+ unsigned char current_tag; /* current tag */
+ unsigned char sync_min_period; /* Not less than this period */
+ unsigned char sync_max_offset; /* Not greater than this offset */
+ unsigned char queue_depth; /* How deep a queue to use */
+
+ unsigned writeable:1;
+ unsigned removable:1;
+ unsigned random:1;
+ unsigned has_cmdblocks:1;
+ unsigned changed:1; /* Data invalid due to media change */
+ unsigned busy:1; /* Used to prevent races */
+ unsigned lockable:1; /* Able to prevent media removal */
+ unsigned borken:1; /* Tell the Seagate driver to be
+ * painfully slow on this device */
+ unsigned tagged_supported:1; /* Supports SCSI-II tagged queuing */
+ unsigned tagged_queue:1; /* SCSI-II tagged queuing enabled */
+ unsigned disconnect:1; /* can disconnect */
+ unsigned soft_reset:1; /* Uses soft reset option */
+ unsigned sync:1; /* Negotiate for sync transfers */
+ unsigned single_lun:1; /* Indicates we should only allow I/O to
+ * one of the luns for the device at a
+ * time. */
+ unsigned was_reset:1; /* There was a bus reset on the bus for
+ * this device */
+ unsigned expecting_cc_ua:1; /* Expecting a CHECK_CONDITION/UNIT_ATTN
+ * because we did a bus reset. */
+} Scsi_Device;
+
+/*
+ * Use these to separate status msg and our bytes
+ */
+
+#define status_byte(result) (((result) >> 1) & 0x1f)
+#define msg_byte(result) (((result) >> 8) & 0xff)
+#define host_byte(result) (((result) >> 16) & 0xff)
+#define driver_byte(result) (((result) >> 24) & 0xff)
+#define suggestion(result) (driver_byte(result) & SUGGEST_MASK)
+
+#define sense_class(sense) (((sense) >> 4) & 0x7)
+#define sense_error(sense) ((sense) & 0xf)
+#define sense_valid(sense) ((sense) & 0x80);
+
+/*
+ * These are the SCSI devices available on the system.
+ */
+
+extern Scsi_Device * scsi_devices;
+
+extern struct hd_struct * sd;
+
+#if defined(MAJOR_NR) && (MAJOR_NR == SCSI_DISK_MAJOR)
+extern struct hd_struct * sd;
+#endif
+
+/*
+ * Initializes all SCSI devices. This scans all scsi busses.
+ */
+
+extern int scsi_dev_init (void);
+
+struct scatterlist {
+ char * address; /* Location data is to be transferred to */
+ char * alt_address; /* Location of actual if address is a
+ * dma indirect buffer. NULL otherwise */
+ unsigned int length;
+};
+
+#ifdef __alpha__
+# define ISA_DMA_THRESHOLD (~0UL)
+#else
+# define ISA_DMA_THRESHOLD (0x00ffffff)
+#endif
+#define CONTIGUOUS_BUFFERS(X,Y) ((X->b_data+X->b_size) == Y->b_data)
+
+
+/*
+ * These are the return codes for the abort and reset functions. The mid-level
+ * code uses these to decide what to do next. Each of the low level abort
+ * and reset functions must correctly indicate what it has done.
+ * The descriptions are written from the point of view of the mid-level code,
+ * so that the return code is telling the mid-level drivers exactly what
+ * the low level driver has already done, and what remains to be done.
+ */
+
+/* We did not do anything.
+ * Wait some more for this command to complete, and if this does not work,
+ * try something more serious. */
+#define SCSI_ABORT_SNOOZE 0
+
+/* This means that we were able to abort the command. We have already
+ * called the mid-level done function, and do not expect an interrupt that
+ * will lead to another call to the mid-level done function for this command */
+#define SCSI_ABORT_SUCCESS 1
+
+/* We called for an abort of this command, and we should get an interrupt
+ * when this succeeds. Thus we should not restore the timer for this
+ * command in the mid-level abort function. */
+#define SCSI_ABORT_PENDING 2
+
+/* Unable to abort - command is currently on the bus. Grin and bear it. */
+#define SCSI_ABORT_BUSY 3
+
+/* The command is not active in the low level code. Command probably
+ * finished. */
+#define SCSI_ABORT_NOT_RUNNING 4
+
+/* Something went wrong. The low level driver will indicate the correct
+ * error condition when it calls scsi_done, so the mid-level abort function
+ * can simply wait until this comes through */
+#define SCSI_ABORT_ERROR 5
+
+/* We do not know how to reset the bus, or we do not want to. Bummer.
+ * Anyway, just wait a little more for the command in question, and hope that
+ * it eventually finishes. If it never finishes, the SCSI device could
+ * hang, so use this with caution. */
+#define SCSI_RESET_SNOOZE 0
+
+/* We do not know how to reset the bus, or we do not want to. Bummer.
+ * We have given up on this ever completing. The mid-level code will
+ * request sense information to decide how to proceed from here. */
+#define SCSI_RESET_PUNT 1
+
+/* This means that we were able to reset the bus. We have restarted all of
+ * the commands that should be restarted, and we should be able to continue
+ * on normally from here. We do not expect any interrupts that will return
+ * DID_RESET to any of the other commands in the host_queue, and the mid-level
+ * code does not need to do anything special to keep the commands alive.
+ * If a hard reset was performed then all outstanding commands on the
+ * bus have been restarted. */
+#define SCSI_RESET_SUCCESS 2
+
+/* We called for a reset of this bus, and we should get an interrupt
+ * when this succeeds. Each command should get its own status
+ * passed up to scsi_done, but this has not happened yet.
+ * If a hard reset was performed, then we expect an interrupt
+ * for *each* of the outstanding commands that will have the
+ * effect of restarting the commands.
+ */
+#define SCSI_RESET_PENDING 3
+
+/* We did a reset, but do not expect an interrupt to signal DID_RESET.
+ * This tells the upper level code to request the sense info, and this
+ * should keep the command alive. */
+#define SCSI_RESET_WAKEUP 4
+
+/* The command is not active in the low level code. Command probably
+ finished. */
+#define SCSI_RESET_NOT_RUNNING 5
+
+/* Something went wrong, and we do not know how to fix it. */
+#define SCSI_RESET_ERROR 6
+
+#define SCSI_RESET_SYNCHRONOUS 0x01
+#define SCSI_RESET_ASYNCHRONOUS 0x02
+#define SCSI_RESET_SUGGEST_BUS_RESET 0x04
+#define SCSI_RESET_SUGGEST_HOST_RESET 0x08
+/*
+ * This is a bitmask that is ored with one of the above codes.
+ * It tells the mid-level code that we did a hard reset.
+ */
+#define SCSI_RESET_BUS_RESET 0x100
+/*
+ * This is a bitmask that is ored with one of the above codes.
+ * It tells the mid-level code that we did a host adapter reset.
+ */
+#define SCSI_RESET_HOST_RESET 0x200
+/*
+ * Used to mask off bits and to obtain the basic action that was
+ * performed.
+ */
+#define SCSI_RESET_ACTION 0xff
+
+void * scsi_malloc(unsigned int);
+int scsi_free(void *, unsigned int);
+extern unsigned int dma_free_sectors; /* How much room do we have left */
+extern unsigned int need_isa_buffer; /* True if some devices need indirection
+ * buffers */
+
+/*
+ * The Scsi_Cmnd structure is used by scsi.c internally, and for communication
+ * with low level drivers that support multiple outstanding commands.
+ */
+typedef struct scsi_pointer {
+ char * ptr; /* data pointer */
+ int this_residual; /* left in this buffer */
+ struct scatterlist *buffer; /* which buffer */
+ int buffers_residual; /* how many buffers left */
+
+ volatile int Status;
+ volatile int Message;
+ volatile int have_data_in;
+ volatile int sent_command;
+ volatile int phase;
+} Scsi_Pointer;
+
+typedef struct scsi_cmnd {
+ struct Scsi_Host * host;
+ Scsi_Device * device;
+ unsigned char target, lun, channel;
+ unsigned char cmd_len;
+ unsigned char old_cmd_len;
+ struct scsi_cmnd *next, *prev, *device_next, *reset_chain;
+
+ /* These elements define the operation we are about to perform */
+ unsigned char cmnd[12];
+ unsigned request_bufflen; /* Actual request size */
+
+ void * request_buffer; /* Actual requested buffer */
+
+ /* These elements define the operation we ultimately want to perform */
+ unsigned char data_cmnd[12];
+ unsigned short old_use_sg; /* We save use_sg here when requesting
+ * sense info */
+ unsigned short use_sg; /* Number of pieces of scatter-gather */
+ unsigned short sglist_len; /* size of malloc'd scatter-gather list */
+ unsigned short abort_reason;/* If the mid-level code requests an
+ * abort, this is the reason. */
+ unsigned bufflen; /* Size of data buffer */
+ void *buffer; /* Data buffer */
+
+ unsigned underflow; /* Return error if less than this amount is
+ * transfered */
+
+ unsigned transfersize; /* How much we are guaranteed to transfer with
+ * each SCSI transfer (ie, between disconnect /
+ * reconnects. Probably == sector size */
+
+
+ struct request request; /* A copy of the command we are working on */
+
+ unsigned char sense_buffer[16]; /* Sense for this command, if needed */
+
+ /*
+ A SCSI Command is assigned a nonzero serial_number when internal_cmnd
+ passes it to the driver's queue command function. The serial_number
+ is cleared when scsi_done is entered indicating that the command has
+ been completed. If a timeout occurs, the serial number at the moment
+ of timeout is copied into serial_number_at_timeout. By subsequently
+ comparing the serial_number and serial_number_at_timeout fields
+ during abort or reset processing, we can detect whether the command
+ has already completed. This also detects cases where the command has
+ completed and the SCSI Command structure has already being reused
+ for another command, so that we can avoid incorrectly aborting or
+ resetting the new command.
+ */
+
+ unsigned long serial_number;
+ unsigned long serial_number_at_timeout;
+
+ int retries;
+ int allowed;
+ int timeout_per_command, timeout_total, timeout;
+
+ /*
+ * We handle the timeout differently if it happens when a reset,
+ * abort, etc are in process.
+ */
+ unsigned volatile char internal_timeout;
+
+ unsigned flags;
+
+ /* These variables are for the cdrom only. Once we have variable size
+ * buffers in the buffer cache, they will go away. */
+ int this_count;
+ /* End of special cdrom variables */
+
+ /* Low-level done function - can be used by low-level driver to point
+ * to completion function. Not used by mid/upper level code. */
+ void (*scsi_done)(struct scsi_cmnd *);
+ void (*done)(struct scsi_cmnd *); /* Mid-level done function */
+
+ /*
+ * The following fields can be written to by the host specific code.
+ * Everything else should be left alone.
+ */
+
+ Scsi_Pointer SCp; /* Scratchpad used by some host adapters */
+
+ unsigned char * host_scribble; /* The host adapter is allowed to
+ * call scsi_malloc and get some memory
+ * and hang it here. The host adapter
+ * is also expected to call scsi_free
+ * to release this memory. (The memory
+ * obtained by scsi_malloc is guaranteed
+ * to be at an address < 16Mb). */
+
+ int result; /* Status code from lower level driver */
+
+ unsigned char tag; /* SCSI-II queued command tag */
+ unsigned long pid; /* Process ID, starts at 0 */
+} Scsi_Cmnd;
+
+/*
+ * scsi_abort aborts the current command that is executing on host host.
+ * The error code, if non zero is returned in the host byte, otherwise
+ * DID_ABORT is returned in the hostbyte.
+ */
+
+extern int scsi_abort (Scsi_Cmnd *, int code);
+
+extern void scsi_do_cmd (Scsi_Cmnd *, const void *cmnd ,
+ void *buffer, unsigned bufflen,
+ void (*done)(struct scsi_cmnd *),
+ int timeout, int retries);
+
+
+extern Scsi_Cmnd * allocate_device(struct request **, Scsi_Device *, int);
+
+extern Scsi_Cmnd * request_queueable(struct request *, Scsi_Device *);
+extern int scsi_reset (Scsi_Cmnd *, unsigned int);
+
+extern int max_scsi_hosts;
+
+extern void proc_print_scsidevice(Scsi_Device *, char *, int *, int);
+
+extern void print_command(unsigned char *);
+extern void print_sense(const char *, Scsi_Cmnd *);
+extern void print_driverbyte(int scsiresult);
+extern void print_hostbyte(int scsiresult);
+
+extern void scsi_mark_host_reset(struct Scsi_Host *Host);
+extern void scsi_mark_bus_reset(struct Scsi_Host *Host, int channel);
+
+#if defined(MAJOR_NR) && (MAJOR_NR != SCSI_TAPE_MAJOR)
+#include "hosts.h"
+
+static Scsi_Cmnd * end_scsi_request(Scsi_Cmnd * SCpnt, int uptodate, int sectors)
+{
+ struct request * req;
+ struct buffer_head * bh;
+
+ req = &SCpnt->request;
+ req->errors = 0;
+ if (!uptodate) {
+#if defined(MAJOR_NR) && (MAJOR_NR == SCSI_DISK_MAJOR)
+ printk(DEVICE_NAME " I/O error: dev %s, sector %lu, absolute sector %lu\n",
+ kdevname(req->rq_dev), req->sector,
+ req->sector + sd[MINOR(SCpnt->request.rq_dev)].start_sect);
+#else
+ printk(DEVICE_NAME " I/O error: dev %s, sector %lu\n",
+ kdevname(req->rq_dev), req->sector);
+#endif
+ }
+
+ do {
+ if ((bh = req->bh) != NULL) {
+ req->bh = bh->b_reqnext;
+ req->nr_sectors -= bh->b_size >> 9;
+ req->sector += bh->b_size >> 9;
+ bh->b_reqnext = NULL;
+ /*
+ * This is our 'MD IO has finished' event handler.
+ * note that b_state should be cached in a register
+ * anyways, so the overhead if this checking is almost
+ * zero. But anyways .. we never get OO for free :)
+ */
+ if (test_bit(BH_MD, &bh->b_state)) {
+ struct md_personality * pers=(struct md_personality *)bh->personality;
+ pers->end_request(bh,uptodate);
+ }
+ /*
+ * the normal (nonmirrored and no RAID5) case:
+ */
+ else {
+ mark_buffer_uptodate(bh, uptodate);
+ unlock_buffer(bh);
+ }
+ sectors -= bh->b_size >> 9;
+ if ((bh = req->bh) != NULL) {
+ req->current_nr_sectors = bh->b_size >> 9;
+ if (req->nr_sectors < req->current_nr_sectors) {
+ req->nr_sectors = req->current_nr_sectors;
+ printk("end_scsi_request: buffer-list destroyed\n");
+ }
+ }
+ }
+ } while(sectors && bh);
+ if (req->bh){
+ req->buffer = bh->b_data;
+ return SCpnt;
+ }
+ DEVICE_OFF(req->rq_dev);
+ if (req->sem != NULL) {
+ up(req->sem);
+ }
+ add_blkdev_randomness(MAJOR(req->rq_dev));
+
+ if (SCpnt->host->block) {
+ struct Scsi_Host * next;
+
+ for (next = SCpnt->host->block; next != SCpnt->host;
+ next = next->block)
+ wake_up(&next->host_wait);
+ }
+
+ req->rq_status = RQ_INACTIVE;
+ wake_up(&wait_for_request);
+ wake_up(&SCpnt->device->device_wait);
+ return NULL;
+}
+
+
+/* This is just like INIT_REQUEST, but we need to be aware of the fact
+ * that an interrupt may start another request, so we run this with interrupts
+ * turned off
+ */
+#define INIT_SCSI_REQUEST \
+ if (!CURRENT) { \
+ CLEAR_INTR; \
+ restore_flags(flags); \
+ return; \
+ } \
+ if (MAJOR(CURRENT->rq_dev) != MAJOR_NR) \
+ panic(DEVICE_NAME ": request list destroyed");\
+ if (CURRENT->bh) { \
+ if (!buffer_locked(CURRENT->bh)) \
+ panic(DEVICE_NAME ": block not locked"); \
+ }
+#endif
+
+#ifdef MACH
+#define SCSI_SLEEP(QUEUE, CONDITION) { \
+ if (CONDITION) { \
+ struct wait_queue wait = { NULL, NULL}; \
+ add_wait_queue(QUEUE, &wait); \
+ for(;;) { \
+ if (CONDITION) { \
+ if (intr_count) \
+ panic("scsi: trying to call schedule() in interrupt" \
+ ", file %s, line %d.\n", __FILE__, __LINE__); \
+ schedule(); \
+ } \
+ else \
+ break; \
+ } \
+ remove_wait_queue(QUEUE, &wait);\
+ }; }
+#else /* !MACH */
+#define SCSI_SLEEP(QUEUE, CONDITION) { \
+ if (CONDITION) { \
+ struct wait_queue wait = { current, NULL}; \
+ add_wait_queue(QUEUE, &wait); \
+ for(;;) { \
+ current->state = TASK_UNINTERRUPTIBLE; \
+ if (CONDITION) { \
+ if (intr_count) \
+ panic("scsi: trying to call schedule() in interrupt" \
+ ", file %s, line %d.\n", __FILE__, __LINE__); \
+ schedule(); \
+ } \
+ else \
+ break; \
+ } \
+ remove_wait_queue(QUEUE, &wait);\
+ current->state = TASK_RUNNING; \
+ }; }
+#endif /* !MACH */
+#endif
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/scsi_ioctl.c b/linux/src/drivers/scsi/scsi_ioctl.c
new file mode 100644
index 0000000..7691859
--- /dev/null
+++ b/linux/src/drivers/scsi/scsi_ioctl.c
@@ -0,0 +1,452 @@
+/*
+ * Don't import our own symbols, as this would severely mess up our
+ * symbol tables.
+ */
+#define _SCSI_SYMS_VER_
+#define __NO_VERSION__
+#include <linux/module.h>
+
+#include <asm/io.h>
+#include <asm/segment.h>
+#include <asm/system.h>
+#include <asm/page.h>
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include <scsi/scsi_ioctl.h>
+
+#define NORMAL_RETRIES 5
+#define NORMAL_TIMEOUT (10 * HZ)
+#define FORMAT_UNIT_TIMEOUT (2 * 60 * 60 * HZ)
+#define START_STOP_TIMEOUT (60 * HZ)
+#define MOVE_MEDIUM_TIMEOUT (5 * 60 * HZ)
+#define READ_ELEMENT_STATUS_TIMEOUT (5 * 60 * HZ)
+
+#define MAX_BUF PAGE_SIZE
+
+#define max(a,b) (((a) > (b)) ? (a) : (b))
+
+/*
+ * If we are told to probe a host, we will return 0 if the host is not
+ * present, 1 if the host is present, and will return an identifying
+ * string at *arg, if arg is non null, filling to the length stored at
+ * (int *) arg
+ */
+
+static int ioctl_probe(struct Scsi_Host * host, void *buffer)
+{
+ int temp, result;
+ unsigned int len,slen;
+ const char * string;
+
+ if ((temp = host->hostt->present) && buffer) {
+ result = verify_area(VERIFY_READ, buffer, sizeof(long));
+ if (result) return result;
+
+ len = get_user ((unsigned int *) buffer);
+ if(host->hostt->info)
+ string = host->hostt->info(host);
+ else
+ string = host->hostt->name;
+ if(string) {
+ slen = strlen(string);
+ if (len > slen)
+ len = slen + 1;
+ result = verify_area(VERIFY_WRITE, buffer, len);
+ if (result) return result;
+
+ memcpy_tofs (buffer, string, len);
+ }
+ }
+ return temp;
+}
+
+/*
+ *
+ * The SCSI_IOCTL_SEND_COMMAND ioctl sends a command out to the SCSI host.
+ * The NORMAL_TIMEOUT and NORMAL_RETRIES variables are used.
+ *
+ * dev is the SCSI device struct ptr, *(int *) arg is the length of the
+ * input data, if any, not including the command string & counts,
+ * *((int *)arg + 1) is the output buffer size in bytes.
+ *
+ * *(char *) ((int *) arg)[2] the actual command byte.
+ *
+ * Note that if more than MAX_BUF bytes are requested to be transfered,
+ * the ioctl will fail with error EINVAL. MAX_BUF can be increased in
+ * the future by increasing the size that scsi_malloc will accept.
+ *
+ * This size *does not* include the initial lengths that were passed.
+ *
+ * The SCSI command is read from the memory location immediately after the
+ * length words, and the input data is right after the command. The SCSI
+ * routines know the command size based on the opcode decode.
+ *
+ * The output area is then filled in starting from the command byte.
+ */
+
+static void scsi_ioctl_done (Scsi_Cmnd * SCpnt)
+{
+ struct request * req;
+
+ req = &SCpnt->request;
+ req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
+
+ if (req->sem != NULL) {
+ up(req->sem);
+ }
+}
+
+static int ioctl_internal_command(Scsi_Device *dev, char * cmd,
+ int timeout, int retries)
+{
+ int result;
+ Scsi_Cmnd * SCpnt;
+
+ SCpnt = allocate_device(NULL, dev, 1);
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ SCpnt->request.sem = &sem;
+ scsi_do_cmd(SCpnt, cmd, NULL, 0, scsi_ioctl_done, timeout, retries);
+ down(&sem);
+ }
+
+ if(driver_byte(SCpnt->result) != 0)
+ switch(SCpnt->sense_buffer[2] & 0xf) {
+ case ILLEGAL_REQUEST:
+ if(cmd[0] == ALLOW_MEDIUM_REMOVAL) dev->lockable = 0;
+ else printk("SCSI device (ioctl) reports ILLEGAL REQUEST.\n");
+ break;
+ case NOT_READY: /* This happens if there is no disc in drive */
+ if(dev->removable){
+ printk(KERN_INFO "Device not ready. Make sure there is a disc in the drive.\n");
+ break;
+ };
+ case UNIT_ATTENTION:
+ if (dev->removable){
+ dev->changed = 1;
+ SCpnt->result = 0; /* This is no longer considered an error */
+ printk(KERN_INFO "Disc change detected.\n");
+ break;
+ };
+ default: /* Fall through for non-removable media */
+ printk("SCSI error: host %d id %d lun %d return code = %x\n",
+ dev->host->host_no,
+ dev->id,
+ dev->lun,
+ SCpnt->result);
+ printk("\tSense class %x, sense error %x, extended sense %x\n",
+ sense_class(SCpnt->sense_buffer[0]),
+ sense_error(SCpnt->sense_buffer[0]),
+ SCpnt->sense_buffer[2] & 0xf);
+
+ };
+
+ result = SCpnt->result;
+ SCpnt->request.rq_status = RQ_INACTIVE;
+
+ if (!SCpnt->device->was_reset && SCpnt->device->scsi_request_fn)
+ (*SCpnt->device->scsi_request_fn)();
+
+ wake_up(&SCpnt->device->device_wait);
+ return result;
+}
+
+/*
+ * This interface is depreciated - users should use the scsi generics
+ * interface instead, as this is a more flexible approach to performing
+ * generic SCSI commands on a device.
+ */
+int scsi_ioctl_send_command(Scsi_Device *dev, void *buffer)
+{
+ char * buf;
+ unsigned char cmd[12];
+ char * cmd_in;
+ Scsi_Cmnd * SCpnt;
+ unsigned char opcode;
+ int inlen, outlen, cmdlen;
+ int needed, buf_needed;
+ int timeout, retries, result;
+
+ if (!buffer)
+ return -EINVAL;
+
+
+ /*
+ * Verify that we can read at least this much.
+ */
+ result = verify_area(VERIFY_READ, buffer, 2*sizeof(long) + 1);
+ if (result) return result;
+
+ /*
+ * The structure that we are passed should look like:
+ *
+ * struct sdata {
+ * unsigned int inlen;
+ * unsigned int outlen;
+ * unsigned char cmd[]; # However many bytes are used for cmd.
+ * unsigned char data[];
+ * };
+ */
+ inlen = get_user((unsigned int *) buffer);
+ outlen = get_user( ((unsigned int *) buffer) + 1);
+
+ /*
+ * We do not transfer more than MAX_BUF with this interface.
+ * If the user needs to transfer more data than this, they
+ * should use scsi_generics instead.
+ */
+ if( inlen > MAX_BUF ) return -EINVAL;
+ if( outlen > MAX_BUF ) return -EINVAL;
+
+ cmd_in = (char *) ( ((int *)buffer) + 2);
+ opcode = get_user(cmd_in);
+
+ needed = buf_needed = (inlen > outlen ? inlen : outlen);
+ if(buf_needed){
+ buf_needed = (buf_needed + 511) & ~511;
+ if (buf_needed > MAX_BUF) buf_needed = MAX_BUF;
+ buf = (char *) scsi_malloc(buf_needed);
+ if (!buf) return -ENOMEM;
+ memset(buf, 0, buf_needed);
+ } else
+ buf = NULL;
+
+ /*
+ * Obtain the command from the user's address space.
+ */
+ cmdlen = COMMAND_SIZE(opcode);
+
+ result = verify_area(VERIFY_READ, cmd_in,
+ cmdlen + inlen > MAX_BUF ? MAX_BUF : inlen);
+ if (result) return result;
+
+ memcpy_fromfs ((void *) cmd, cmd_in, cmdlen);
+
+ /*
+ * Obtain the data to be sent to the device (if any).
+ */
+ memcpy_fromfs ((void *) buf,
+ (void *) (cmd_in + cmdlen),
+ inlen);
+
+ /*
+ * Set the lun field to the correct value.
+ */
+ cmd[1] = ( cmd[1] & 0x1f ) | (dev->lun << 5);
+
+ switch (opcode)
+ {
+ case FORMAT_UNIT:
+ timeout = FORMAT_UNIT_TIMEOUT;
+ retries = 1;
+ break;
+ case START_STOP:
+ timeout = START_STOP_TIMEOUT;
+ retries = NORMAL_RETRIES;
+ break;
+ case MOVE_MEDIUM:
+ timeout = MOVE_MEDIUM_TIMEOUT;
+ retries = NORMAL_RETRIES;
+ break;
+ case READ_ELEMENT_STATUS:
+ timeout = READ_ELEMENT_STATUS_TIMEOUT;
+ retries = NORMAL_RETRIES;
+ break;
+ default:
+ timeout = NORMAL_TIMEOUT;
+ retries = NORMAL_RETRIES;
+ break;
+ }
+
+#ifndef DEBUG_NO_CMD
+
+ SCpnt = allocate_device(NULL, dev, 1);
+
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ SCpnt->request.sem = &sem;
+ scsi_do_cmd(SCpnt, cmd, buf, needed, scsi_ioctl_done,
+ timeout, retries);
+ down(&sem);
+ }
+
+ /*
+ * If there was an error condition, pass the info back to the user.
+ */
+ if(SCpnt->result) {
+ result = verify_area(VERIFY_WRITE,
+ cmd_in,
+ sizeof(SCpnt->sense_buffer));
+ if (result) return result;
+ memcpy_tofs((void *) cmd_in,
+ SCpnt->sense_buffer,
+ sizeof(SCpnt->sense_buffer));
+ } else {
+ result = verify_area(VERIFY_WRITE, cmd_in, outlen);
+ if (result) return result;
+ memcpy_tofs ((void *) cmd_in, buf, outlen);
+ }
+ result = SCpnt->result;
+
+ SCpnt->request.rq_status = RQ_INACTIVE;
+
+ if (buf) scsi_free(buf, buf_needed);
+
+ if(SCpnt->device->scsi_request_fn)
+ (*SCpnt->device->scsi_request_fn)();
+
+ wake_up(&SCpnt->device->device_wait);
+ return result;
+#else
+ {
+ int i;
+ printk("scsi_ioctl : device %d. command = ", dev->id);
+ for (i = 0; i < 12; ++i)
+ printk("%02x ", cmd[i]);
+ printk("\nbuffer =");
+ for (i = 0; i < 20; ++i)
+ printk("%02x ", buf[i]);
+ printk("\n");
+ printk("inlen = %d, outlen = %d, cmdlen = %d\n",
+ inlen, outlen, cmdlen);
+ printk("buffer = %d, cmd_in = %d\n", buffer, cmd_in);
+ }
+ return 0;
+#endif
+}
+
+/*
+ * the scsi_ioctl() function differs from most ioctls in that it does
+ * not take a major/minor number as the dev field. Rather, it takes
+ * a pointer to a scsi_devices[] element, a structure.
+ */
+int scsi_ioctl (Scsi_Device *dev, int cmd, void *arg)
+{
+ int result;
+ char scsi_cmd[12];
+
+ /* No idea how this happens.... */
+ if (!dev) return -ENXIO;
+
+ switch (cmd) {
+ case SCSI_IOCTL_GET_IDLUN:
+ result = verify_area(VERIFY_WRITE, (void *) arg, 2*sizeof(long));
+ if (result) return result;
+
+ put_user(dev->id
+ + (dev->lun << 8)
+ + (dev->channel << 16)
+ + ((dev->host->hostt->proc_dir->low_ino & 0xff) << 24),
+ (unsigned long *) arg);
+ put_user( dev->host->unique_id, (unsigned long *) arg+1);
+ return 0;
+ case SCSI_IOCTL_GET_BUS_NUMBER:
+ result = verify_area(VERIFY_WRITE, (void *) arg, sizeof(int));
+ if (result) return result;
+ put_user( dev->host->host_no, (int *) arg);
+ return 0;
+ case SCSI_IOCTL_TAGGED_ENABLE:
+ if(!suser()) return -EACCES;
+ if(!dev->tagged_supported) return -EINVAL;
+ dev->tagged_queue = 1;
+ dev->current_tag = 1;
+ return 0;
+ case SCSI_IOCTL_TAGGED_DISABLE:
+ if(!suser()) return -EACCES;
+ if(!dev->tagged_supported) return -EINVAL;
+ dev->tagged_queue = 0;
+ dev->current_tag = 0;
+ return 0;
+ case SCSI_IOCTL_PROBE_HOST:
+ return ioctl_probe(dev->host, arg);
+ case SCSI_IOCTL_SEND_COMMAND:
+ if(!suser() || securelevel > 0) return -EACCES;
+ return scsi_ioctl_send_command((Scsi_Device *) dev, arg);
+ case SCSI_IOCTL_DOORLOCK:
+ if (!dev->removable || !dev->lockable) return 0;
+ scsi_cmd[0] = ALLOW_MEDIUM_REMOVAL;
+ scsi_cmd[1] = dev->lun << 5;
+ scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
+ scsi_cmd[4] = SCSI_REMOVAL_PREVENT;
+ return ioctl_internal_command((Scsi_Device *) dev, scsi_cmd,
+ NORMAL_TIMEOUT, NORMAL_RETRIES);
+ break;
+ case SCSI_IOCTL_DOORUNLOCK:
+ if (!dev->removable || !dev->lockable) return 0;
+ scsi_cmd[0] = ALLOW_MEDIUM_REMOVAL;
+ scsi_cmd[1] = dev->lun << 5;
+ scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
+ scsi_cmd[4] = SCSI_REMOVAL_ALLOW;
+ return ioctl_internal_command((Scsi_Device *) dev, scsi_cmd,
+ NORMAL_TIMEOUT, NORMAL_RETRIES);
+ case SCSI_IOCTL_TEST_UNIT_READY:
+ scsi_cmd[0] = TEST_UNIT_READY;
+ scsi_cmd[1] = dev->lun << 5;
+ scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
+ scsi_cmd[4] = 0;
+ return ioctl_internal_command((Scsi_Device *) dev, scsi_cmd,
+ NORMAL_TIMEOUT, NORMAL_RETRIES);
+ break;
+ case SCSI_IOCTL_START_UNIT:
+ scsi_cmd[0] = START_STOP;
+ scsi_cmd[1] = dev->lun << 5;
+ scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
+ scsi_cmd[4] = 1;
+ return ioctl_internal_command((Scsi_Device *) dev, scsi_cmd,
+ START_STOP_TIMEOUT, NORMAL_RETRIES);
+ break;
+ case SCSI_IOCTL_STOP_UNIT:
+ scsi_cmd[0] = START_STOP;
+ scsi_cmd[1] = dev->lun << 5;
+ scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
+ scsi_cmd[4] = 0;
+ return ioctl_internal_command((Scsi_Device *) dev, scsi_cmd,
+ START_STOP_TIMEOUT, NORMAL_RETRIES);
+ break;
+ default :
+ return -EINVAL;
+ }
+ return -EINVAL;
+}
+
+/*
+ * Just like scsi_ioctl, only callable from kernel space with no
+ * fs segment fiddling.
+ */
+
+int kernel_scsi_ioctl (Scsi_Device *dev, int cmd, void *arg) {
+ unsigned long oldfs;
+ int tmp;
+ oldfs = get_fs();
+ set_fs(get_ds());
+ tmp = scsi_ioctl (dev, cmd, arg);
+ set_fs(oldfs);
+ return tmp;
+}
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/scsi_proc.c b/linux/src/drivers/scsi/scsi_proc.c
new file mode 100644
index 0000000..d1fa28d
--- /dev/null
+++ b/linux/src/drivers/scsi/scsi_proc.c
@@ -0,0 +1,302 @@
+/*
+ * linux/drivers/scsi/scsi_proc.c
+ *
+ * The functions in this file provide an interface between
+ * the PROC file system and the SCSI device drivers
+ * It is mainly used for debugging, statistics and to pass
+ * information directly to the lowlevel driver.
+ *
+ * (c) 1995 Michael Neuffer neuffer@goofy.zdv.uni-mainz.de
+ * Version: 0.99.8 last change: 95/09/13
+ *
+ * generic command parser provided by:
+ * Andreas Heilwagen <crashcar@informatik.uni-koblenz.de>
+ */
+
+/*
+ * Don't import our own symbols, as this would severely mess up our
+ * symbol tables.
+ */
+#define _SCSI_SYMS_VER_
+#define __NO_VERSION__
+#include <linux/module.h>
+
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/malloc.h>
+#include <linux/proc_fs.h>
+#include <linux/errno.h>
+#include <linux/stat.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+
+#ifndef TRUE
+#define TRUE 1
+#define FALSE 0
+#endif
+
+extern int scsi_proc_info(char *, char **, off_t, int, int, int);
+
+struct scsi_dir {
+ struct proc_dir_entry entry;
+ char name[4];
+};
+
+
+/* generic_proc_info
+ * Used if the driver currently has no own support for /proc/scsi
+ */
+int generic_proc_info(char *buffer, char **start, off_t offset,
+ int length, int inode, int inout)
+{
+ int len, pos, begin;
+
+ if(inout == TRUE)
+ return(-ENOSYS); /* This is a no-op */
+
+ begin = 0;
+ pos = len = sprintf(buffer,
+ "The driver does not yet support the proc-fs\n");
+ if(pos < offset) {
+ len = 0;
+ begin = pos;
+ }
+
+ *start = buffer + (offset - begin); /* Start of wanted data */
+ len -= (offset - begin);
+ if(len > length)
+ len = length;
+
+ return(len);
+}
+
+/* dispatch_scsi_info is the central dispatcher
+ * It is the interface between the proc-fs and the SCSI subsystem code
+ */
+extern int dispatch_scsi_info(int ino, char *buffer, char **start,
+ off_t offset, int length, int func)
+{
+ struct Scsi_Host *hpnt = scsi_hostlist;
+
+ if(ino == PROC_SCSI_SCSI) {
+ /*
+ * This is for the scsi core, rather than any specific
+ * lowlevel driver.
+ */
+ return(scsi_proc_info(buffer, start, offset, length, 0, func));
+ }
+
+ while(hpnt) {
+ if (ino == (hpnt->host_no + PROC_SCSI_FILE)) {
+ if(hpnt->hostt->proc_info == NULL)
+ return generic_proc_info(buffer, start, offset, length,
+ hpnt->host_no, func);
+ else
+ return(hpnt->hostt->proc_info(buffer, start, offset,
+ length, hpnt->host_no, func));
+ }
+ hpnt = hpnt->next;
+ }
+ return(-EBADF);
+}
+
+void build_proc_dir_entries(Scsi_Host_Template *tpnt)
+{
+ struct Scsi_Host *hpnt;
+
+ struct scsi_dir *scsi_hba_dir;
+
+ proc_scsi_register(0, tpnt->proc_dir);
+
+ hpnt = scsi_hostlist;
+ while (hpnt) {
+ if (tpnt == hpnt->hostt) {
+ scsi_hba_dir = scsi_init_malloc(sizeof(struct scsi_dir), GFP_KERNEL);
+ if(scsi_hba_dir == NULL)
+ panic("Not enough memory to register SCSI HBA in /proc/scsi !\n");
+ memset(scsi_hba_dir, 0, sizeof(struct scsi_dir));
+ scsi_hba_dir->entry.low_ino = PROC_SCSI_FILE + hpnt->host_no;
+ scsi_hba_dir->entry.namelen = sprintf(scsi_hba_dir->name,"%d",
+ hpnt->host_no);
+ scsi_hba_dir->entry.name = scsi_hba_dir->name;
+ scsi_hba_dir->entry.mode = S_IFREG | S_IRUGO | S_IWUSR;
+ proc_scsi_register(tpnt->proc_dir, &scsi_hba_dir->entry);
+ }
+ hpnt = hpnt->next;
+ }
+}
+
+/*
+ * parseHandle *parseInit(char *buf, char *cmdList, int cmdNum);
+ * gets a pointer to a null terminated data buffer
+ * and a list of commands with blanks as delimiter
+ * in between.
+ * The commands have to be alphanumerically sorted.
+ * cmdNum has to contain the number of commands.
+ * On success, a pointer to a handle structure
+ * is returned, NULL on failure
+ *
+ * int parseOpt(parseHandle *handle, char **param);
+ * processes the next parameter. On success, the
+ * index of the appropriate command in the cmdList
+ * is returned, starting with zero.
+ * param points to the null terminated parameter string.
+ * On failure, -1 is returned.
+ *
+ * The databuffer buf may only contain pairs of commands
+ * options, separated by blanks:
+ * <Command> <Parameter> [<Command> <Parameter>]*
+ */
+
+typedef struct
+{
+ char *buf, /* command buffer */
+ *cmdList, /* command list */
+ *bufPos, /* actual position */
+ **cmdPos, /* cmdList index */
+ cmdNum; /* cmd number */
+} parseHandle;
+
+
+inline int parseFree (parseHandle *handle) /* free memory */
+{
+ kfree (handle->cmdPos);
+ kfree (handle);
+
+ return(-1);
+}
+
+
+parseHandle *parseInit(char *buf, char *cmdList, int cmdNum)
+{
+ char *ptr; /* temp pointer */
+ parseHandle *handle; /* new handle */
+
+ if (!buf || !cmdList) /* bad input ? */
+ return(NULL);
+ if ((handle = (parseHandle*) kmalloc(sizeof(parseHandle), GFP_KERNEL)) == 0)
+ return(NULL); /* out of memory */
+ if ((handle->cmdPos = (char**) kmalloc(sizeof(int) * cmdNum, GFP_KERNEL)) == 0) {
+ kfree(handle);
+ return(NULL); /* out of memory */
+ }
+
+ handle->buf = handle->bufPos = buf; /* init handle */
+ handle->cmdList = cmdList;
+ handle->cmdNum = cmdNum;
+
+ handle->cmdPos[cmdNum = 0] = cmdList;
+ for (ptr = cmdList; *ptr; ptr++) { /* scan command string */
+ if(*ptr == ' ') { /* and insert zeroes */
+ *ptr++ = 0;
+ handle->cmdPos[++cmdNum] = ptr++;
+ }
+ }
+ return(handle);
+}
+
+
+int parseOpt(parseHandle *handle, char **param)
+{
+ int cmdIndex = 0,
+ cmdLen = 0;
+ char *startPos;
+
+ if (!handle) /* invalid handle */
+ return(parseFree(handle));
+ /* skip spaces */
+ for (; *(handle->bufPos) && *(handle->bufPos) == ' '; handle->bufPos++);
+ if (!*(handle->bufPos))
+ return(parseFree(handle)); /* end of data */
+
+ startPos = handle->bufPos; /* store cmd start */
+ for (; handle->cmdPos[cmdIndex][cmdLen] && *(handle->bufPos); handle->bufPos++)
+ { /* no string end? */
+ for (;;)
+ {
+ if (*(handle->bufPos) == handle->cmdPos[cmdIndex][cmdLen])
+ break; /* char matches ? */
+ else
+ if (memcmp(startPos, (char*)(handle->cmdPos[++cmdIndex]), cmdLen))
+ return(parseFree(handle)); /* unknown command */
+
+ if (cmdIndex >= handle->cmdNum)
+ return(parseFree(handle)); /* unknown command */
+ }
+
+ cmdLen++; /* next char */
+ }
+
+ /* Get param. First skip all blanks, then insert zero after param */
+
+ for (; *(handle->bufPos) && *(handle->bufPos) == ' '; handle->bufPos++);
+ *param = handle->bufPos;
+
+ for (; *(handle->bufPos) && *(handle->bufPos) != ' '; handle->bufPos++);
+ *(handle->bufPos++) = 0;
+
+ return(cmdIndex);
+}
+
+void proc_print_scsidevice(Scsi_Device *scd, char *buffer, int *size, int len)
+{
+ int x, y = *size;
+
+ y = sprintf(buffer + len,
+ "Host: scsi%d Channel: %02d Id: %02d Lun: %02d\n Vendor: ",
+ scd->host->host_no, scd->channel, scd->id, scd->lun);
+ for (x = 0; x < 8; x++) {
+ if (scd->vendor[x] >= 0x20)
+ y += sprintf(buffer + len + y, "%c", scd->vendor[x]);
+ else
+ y += sprintf(buffer + len + y," ");
+ }
+ y += sprintf(buffer + len + y, " Model: ");
+ for (x = 0; x < 16; x++) {
+ if (scd->model[x] >= 0x20)
+ y += sprintf(buffer + len + y, "%c", scd->model[x]);
+ else
+ y += sprintf(buffer + len + y, " ");
+ }
+ y += sprintf(buffer + len + y, " Rev: ");
+ for (x = 0; x < 4; x++) {
+ if (scd->rev[x] >= 0x20)
+ y += sprintf(buffer + len + y, "%c", scd->rev[x]);
+ else
+ y += sprintf(buffer + len + y, " ");
+ }
+ y += sprintf(buffer + len + y, "\n");
+
+ y += sprintf(buffer + len + y, " Type: %s ",
+ scd->type < MAX_SCSI_DEVICE_CODE ?
+ scsi_device_types[(int)scd->type] : "Unknown " );
+ y += sprintf(buffer + len + y, " ANSI"
+ " SCSI revision: %02x", (scd->scsi_level < 3)?1:2);
+ if (scd->scsi_level == 2)
+ y += sprintf(buffer + len + y, " CCS\n");
+ else
+ y += sprintf(buffer + len + y, "\n");
+
+ *size = y;
+ return;
+}
+
+/*
+ * Overrides for Emacs so that we get a uniform tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/scsicam.c b/linux/src/drivers/scsi/scsicam.c
new file mode 100644
index 0000000..c3fb879
--- /dev/null
+++ b/linux/src/drivers/scsi/scsicam.c
@@ -0,0 +1,229 @@
+/*
+ * scsicam.c - SCSI CAM support functions, use for HDIO_GETGEO, etc.
+ *
+ * Copyright 1993, 1994 Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@Colorado.EDU
+ * +1 (303) 786-7975
+ *
+ * For more information, please consult the SCSI-CAM draft.
+ */
+
+/*
+ * Don't import our own symbols, as this would severely mess up our
+ * symbol tables.
+ */
+#define _SCSI_SYMS_VER_
+#define __NO_VERSION__
+#include <linux/module.h>
+
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/kernel.h>
+#include <linux/blk.h>
+#include <asm/unaligned.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+#include <scsi/scsicam.h>
+
+static int setsize(unsigned long capacity,unsigned int *cyls,unsigned int *hds,
+ unsigned int *secs);
+
+/*
+ * Function : int scsicam_bios_param (Disk *disk, int dev, int *ip)
+ *
+ * Purpose : to determine the BIOS mapping used for a drive in a
+ * SCSI-CAM system, storing the results in ip as required
+ * by the HDIO_GETGEO ioctl().
+ *
+ * Returns : -1 on failure, 0 on success.
+ *
+ */
+
+int scsicam_bios_param (Disk *disk, /* SCSI disk */
+ kdev_t dev, /* Device major, minor */
+ int *ip /* Heads, sectors, cylinders in that order */) {
+
+ struct buffer_head *bh;
+ int ret_code;
+ int size = disk->capacity;
+ unsigned long temp_cyl;
+
+ if (!(bh = bread(MKDEV(MAJOR(dev), MINOR(dev)&~0xf), 0, 1024)))
+ return -1;
+
+ /* try to infer mapping from partition table */
+ ret_code = scsi_partsize (bh, (unsigned long) size, (unsigned int *) ip + 2,
+ (unsigned int *) ip + 0, (unsigned int *) ip + 1);
+ brelse (bh);
+
+ if (ret_code == -1) {
+ /* pick some standard mapping with at most 1024 cylinders,
+ and at most 62 sectors per track - this works up to
+ 7905 MB */
+ ret_code = setsize ((unsigned long) size, (unsigned int *) ip + 2,
+ (unsigned int *) ip + 0, (unsigned int *) ip + 1);
+ }
+
+ /* if something went wrong, then apparently we have to return
+ a geometry with more than 1024 cylinders */
+ if (ret_code || ip[0] > 255 || ip[1] > 63) {
+ ip[0] = 64;
+ ip[1] = 32;
+ temp_cyl = size / (ip[0] * ip[1]);
+ if (temp_cyl > 65534) {
+ ip[0] = 255;
+ ip[1] = 63;
+ }
+ ip[2] = size / (ip[0] * ip[1]);
+ }
+
+ return 0;
+}
+
+/*
+ * Function : static int scsi_partsize(struct buffer_head *bh, unsigned long
+ * capacity,unsigned int *cyls, unsigned int *hds, unsigned int *secs);
+ *
+ * Purpose : to determine the BIOS mapping used to create the partition
+ * table, storing the results in *cyls, *hds, and *secs
+ *
+ * Returns : -1 on failure, 0 on success.
+ *
+ */
+
+int scsi_partsize(struct buffer_head *bh, unsigned long capacity,
+ unsigned int *cyls, unsigned int *hds, unsigned int *secs) {
+ struct partition *p, *largest = NULL;
+ int i, largest_cyl;
+ int cyl, ext_cyl, end_head, end_cyl, end_sector;
+ unsigned int logical_end, physical_end, ext_physical_end;
+
+
+ if (*(unsigned short *) (bh->b_data+510) == 0xAA55) {
+ for (largest_cyl = -1, p = (struct partition *)
+ (0x1BE + bh->b_data), i = 0; i < 4; ++i, ++p) {
+ if (!p->sys_ind)
+ continue;
+#ifdef DEBUG
+ printk ("scsicam_bios_param : partition %d has system \n",
+ i);
+#endif
+ cyl = p->cyl + ((p->sector & 0xc0) << 2);
+ if (cyl > largest_cyl) {
+ largest_cyl = cyl;
+ largest = p;
+ }
+ }
+ }
+
+ if (largest) {
+ end_cyl = largest->end_cyl + ((largest->end_sector & 0xc0) << 2);
+ end_head = largest->end_head;
+ end_sector = largest->end_sector & 0x3f;
+
+ if( end_head + 1 == 0 || end_sector == 0 ) return -1;
+
+#ifdef DEBUG
+ printk ("scsicam_bios_param : end at h = %d, c = %d, s = %d\n",
+ end_head, end_cyl, end_sector);
+#endif
+
+ physical_end = end_cyl * (end_head + 1) * end_sector +
+ end_head * end_sector + end_sector;
+
+ /* This is the actual _sector_ number at the end */
+ logical_end = get_unaligned(&largest->start_sect)
+ + get_unaligned(&largest->nr_sects);
+
+ /* This is for >1023 cylinders */
+ ext_cyl= (logical_end-(end_head * end_sector + end_sector))
+ /(end_head + 1) / end_sector;
+ ext_physical_end = ext_cyl * (end_head + 1) * end_sector +
+ end_head * end_sector + end_sector;
+
+#ifdef DEBUG
+ printk("scsicam_bios_param : logical_end=%d physical_end=%d ext_physical_end=%d ext_cyl=%d\n"
+ ,logical_end,physical_end,ext_physical_end,ext_cyl);
+#endif
+
+ if ((logical_end == physical_end) ||
+ (end_cyl==1023 && ext_physical_end==logical_end)) {
+ *secs = end_sector;
+ *hds = end_head + 1;
+ *cyls = capacity / ((end_head + 1) * end_sector);
+ return 0;
+ }
+
+#ifdef DEBUG
+ printk ("scsicam_bios_param : logical (%u) != physical (%u)\n",
+ logical_end, physical_end);
+#endif
+ }
+ return -1;
+}
+
+/*
+ * Function : static int setsize(unsigned long capacity,unsigned int *cyls,
+ * unsigned int *hds, unsigned int *secs);
+ *
+ * Purpose : to determine a near-optimal int 0x13 mapping for a
+ * SCSI disk in terms of lost space of size capacity, storing
+ * the results in *cyls, *hds, and *secs.
+ *
+ * Returns : -1 on failure, 0 on success.
+ *
+ * Extracted from
+ *
+ * WORKING X3T9.2
+ * DRAFT 792D
+ *
+ *
+ * Revision 6
+ * 10-MAR-94
+ * Information technology -
+ * SCSI-2 Common access method
+ * transport and SCSI interface module
+ *
+ * ANNEX A :
+ *
+ * setsize() converts a read capacity value to int 13h
+ * head-cylinder-sector requirements. It minimizes the value for
+ * number of heads and maximizes the number of cylinders. This
+ * will support rather large disks before the number of heads
+ * will not fit in 4 bits (or 6 bits). This algorithm also
+ * minimizes the number of sectors that will be unused at the end
+ * of the disk while allowing for very large disks to be
+ * accommodated. This algorithm does not use physical geometry.
+ */
+
+static int setsize(unsigned long capacity,unsigned int *cyls,unsigned int *hds,
+ unsigned int *secs) {
+ unsigned int rv = 0;
+ unsigned long heads, sectors, cylinders, temp;
+
+ cylinders = 1024L; /* Set number of cylinders to max */
+ sectors = 62L; /* Maximize sectors per track */
+
+ temp = cylinders * sectors; /* Compute divisor for heads */
+ heads = capacity / temp; /* Compute value for number of heads */
+ if (capacity % temp) { /* If no remainder, done! */
+ heads++; /* Else, increment number of heads */
+ temp = cylinders * heads; /* Compute divisor for sectors */
+ sectors = capacity / temp; /* Compute value for sectors per
+ track */
+ if (capacity % temp) { /* If no remainder, done! */
+ sectors++; /* Else, increment number of sectors */
+ temp = heads * sectors; /* Compute divisor for cylinders */
+ cylinders = capacity / temp;/* Compute number of cylinders */
+ }
+ }
+ if (cylinders == 0) rv=(unsigned)-1;/* Give error if 0 cylinders */
+
+ *cyls = (unsigned int) cylinders; /* Stuff return values */
+ *secs = (unsigned int) sectors;
+ *hds = (unsigned int) heads;
+ return(rv);
+}
diff --git a/linux/src/drivers/scsi/scsiio.c b/linux/src/drivers/scsi/scsiio.c
new file mode 100644
index 0000000..cea68b8
--- /dev/null
+++ b/linux/src/drivers/scsi/scsiio.c
@@ -0,0 +1,1537 @@
+/***********************************************************************
+ * FILE NAME : SCSIIO.C *
+ * BY : C.L. Huang *
+ * Description: Device Driver for Tekram DC-390W/U/F (T) PCI SCSI *
+ * Bus Master Host Adapter *
+ ***********************************************************************/
+
+
+static void
+PrepareSG( PACB pACB, PDCB pDCB, PSRB pSRB )
+{
+ ULONG retAddr,wlval;
+ USHORT wval,i;
+ PSGL psgl;
+ PSGE psge;
+
+
+ retAddr = pACB->jmp_table8;
+ if(pDCB->DCBscntl3 & EN_WIDE_SCSI)
+ retAddr += jmp_table16;
+ wval = (USHORT)(pSRB->SGcount);
+ wval <<= 4; /* 16 bytes per entry, datain=8, dataout=8 */
+ /* (4 bytes for count, 4 bytes for addr) */
+ retAddr -= (ULONG)wval;
+ pSRB->ReturnAddr = retAddr; /* return address for SCRIPT */
+ if(wval)
+ {
+ wval >>= 1;
+ wlval = (ULONG) pSRB->SegmentPad;
+ wlval -= (ULONG)wval;
+ wval >>= 3;
+ psge = (PSGE) wlval;
+ psgl = pSRB->pSegmentList;
+ for(i=0; i<wval; i++)
+ {
+#ifndef VERSION_ELF_1_2_13
+ psge->SGXPtr = virt_to_phys( psgl->address );
+#else
+ psge->SGXPtr = (ULONG) psgl->address;
+#endif
+ psge->SGXLen = psgl->length;
+ psge++;
+ psgl++;
+ }
+ }
+}
+
+
+static void
+DC390W_StartSCSI( PACB pACB, PDCB pDCB, PSRB pSRB )
+{
+ USHORT ioport;
+ UCHAR bval;
+
+ pSRB->TagNumber = 31;
+ ioport = pACB->IOPortBase;
+ bval = SIGNAL_PROC;
+ outb(bval,ioport+ISTAT);
+ pACB->pActiveDCB = pDCB;
+ pDCB->pActiveSRB = pSRB;
+ return;
+}
+
+
+#ifndef VERSION_ELF_1_2_13
+static void
+DC390W_Interrupt( int irq, void *dev_id, struct pt_regs *regs)
+#else
+static void
+DC390W_Interrupt( int irq, struct pt_regs *regs)
+#endif
+{
+ PACB pACB;
+ PDCB pDCB;
+ ULONG wlval;
+ USHORT ioport = 0;
+ USHORT wval, i;
+ void (*stateV)( PACB );
+ UCHAR istat = 0;
+ UCHAR bval;
+
+ pACB = pACB_start;
+ if( pACB == NULL )
+ return;
+ for( i=0; i < adapterCnt; i++ )
+ {
+ if( pACB->IRQLevel == (UCHAR) irq )
+ {
+ ioport = pACB->IOPortBase;
+ istat = inb( ioport+ISTAT );
+ if( istat & (ABORT_OP+SCSI_INT_PENDING+DMA_INT_PENDING) )
+ break;
+ else
+ pACB = pACB->pNextACB;
+ }
+ else
+ {
+ pACB = pACB->pNextACB;
+ }
+ }
+
+ if( pACB == (PACB )-1 )
+ {
+ printk("DC390W_intr: Spurious interrupt detected!\n");
+ return;
+ }
+
+
+#ifdef DC390W_DEBUG1
+ printk("Istate=%2x,",istat);
+#endif
+ /* if Abort operation occurred, reset abort bit before reading DMA status
+ to prevent further aborted interrupt. */
+
+ if(istat & ABORT_OP)
+ {
+ istat &= ~ABORT_OP;
+ outb(istat,ioport+ISTAT);
+ }
+
+ pDCB = pACB->pActiveDCB;
+ bval = inb(ioport+CTEST2); /* Clear Signal Bit */
+
+ /* If Scsi Interrupt, then clear Interrupt Status by reading
+ Scsi interrupt status register 0. */
+
+ wlval = 0;
+ if(istat & SCSI_INT_PENDING)
+ {
+ wlval = (ULONG)inw( ioport+SIST0 );
+ wlval <<= 8;
+ }
+
+ /* If DMA Interrupt, then read the DMA status register to see what happen */
+
+ if(istat & DMA_INT_PENDING)
+ {
+ bval = inb(ioport+DSTAT);
+ wlval |= (ULONG) bval;
+ }
+
+#ifdef DC390W_DEBUG1
+ printk("IDstate=%8x,",(UINT) wlval);
+#endif
+ if(wlval & ( (SEL_TIMEOUT << 16)+
+ ((SCSI_GERROR+UNEXPECT_DISC+SCSI_RESET) << 8)+
+ ILLEGAL_INSTRUC+ABORT_) )
+ {
+ ExceptionHandler( wlval, pACB, pDCB );
+ }
+ else if( wlval & SCRIPTS_INT )
+ {
+ wval = inw( ioport+DSPS );
+ stateV = (void *) IntVector[wval];
+ stateV( pACB );
+ }
+ else if( wlval & ( PARITY_ERROR << 8) )
+ ParityError( pACB, pDCB );
+ else if( wlval & ( PHASE_MISMATCH << 8) )
+ PhaseMismatch( pACB );
+ return;
+}
+
+
+static void
+ExceptionHandler(ULONG wlval, PACB pACB, PDCB pDCB)
+{
+ PSRB pSRB;
+ UCHAR bval;
+ USHORT ioport;
+
+/* disconnect/scsi reset/illegal instruction */
+
+ ioport = pACB->IOPortBase;
+ if(wlval & ( (SCSI_RESET+SCSI_GERROR) << 8) )
+ DC390W_ScsiRstDetect( pACB );
+ else if(wlval & ABORT_)
+ {
+#ifdef DC390W_DEBUG0
+ printk("AboRst,");
+#endif
+ if( !InitialTime )
+ DC390W_ResetSCSIBus2( pACB );
+ }
+ else if(wlval & (SEL_TIMEOUT << 16) )
+ {
+ pACB->status = SCSI_STAT_SEL_TIMEOUT;
+#ifdef DC390W_DEBUG1
+ printk("Selto,");
+#endif
+ DC390W_CmdCompleted( pACB );
+ }
+ else if(wlval & (UNEXPECT_DISC << 8) )
+ {
+ bval = inb(ioport+STEST3);
+ bval |= CLR_SCSI_FIFO;
+ outb(bval,ioport+STEST3);
+ bval = CLR_DMA_FIFO;
+ outb(bval,ioport+CTEST3);
+ pSRB = pDCB->pActiveSRB;
+ if( pSRB->SRBState & DO_SYNC_NEGO )
+ {
+ pDCB->DevMode &= ~SYNC_NEGO_;
+ pACB->status = SCSI_STAT_CHECKCOND;
+ DC390W_CmdCompleted( pACB );
+ }
+ else if( pSRB->SRBState & DO_WIDE_NEGO )
+ {
+ pDCB->DevMode &= ~WIDE_NEGO_;
+ pACB->status = SCSI_STAT_CHECKCOND;
+ DC390W_CmdCompleted( pACB );
+ }
+ else
+ {
+ pACB->status = SCSI_STAT_UNEXP_BUS_F;
+ DC390W_CmdCompleted( pACB );
+ }
+#ifdef DC390W_DEBUG0
+ printk("Uxpbf,");
+#endif
+ }
+ else
+ {
+#ifdef DC390W_DEBUG0
+ printk("Except,");
+#endif
+ DC390W_ResetSCSIBus( pACB );
+ }
+}
+
+
+static void
+ParityError( PACB pACB, PDCB pDCB )
+{
+ ULONG ioport;
+ UCHAR bval,msg;
+ ULONG wlval;
+ PSRB pSRB;
+
+ ioport = pACB->IOPortBase;
+ bval = inb(ioport+SCRATCHA);
+ if(bval & RE_SELECTED_)
+ {
+#ifdef DC390W_DEBUG0
+ printk("ParityErr,");
+#endif
+ DC390W_ResetSCSIBus( pACB );
+ return;
+ }
+ else
+ {
+ pSRB = pDCB->pActiveSRB;
+ bval = inb(ioport+STEST3);
+ bval |= CLR_SCSI_FIFO;
+ outb(bval,ioport+STEST3);
+ bval = CLR_DMA_FIFO;
+ outb(bval,ioport+CTEST3);
+
+ bval = inb(ioport+DCMD);
+ bval &= 0x07; /* get phase bits */
+ if(bval == 0x07) /* message in phase */
+ {
+ msg = MSG_PARITY_ERROR;
+ wlval = pACB->jmp_clear_ack;
+ }
+ else
+ {
+ msg = MSG_INITIATOR_ERROR;
+ wlval = pACB->jmp_next;
+ }
+ pSRB->__msgout0[0] = 1;
+ pSRB->MsgOutBuf[0] = msg;
+ outl(wlval,(ioport+DSP));
+ return;
+ }
+}
+
+
+static void
+DC390W_Signal( PACB pACB )
+{
+ PDCB pDCB;
+ PSRB pSRB;
+ USHORT ioport;
+ ULONG wlval, flags;
+ UCHAR bval,msgcnt,tagnum;
+
+ save_flags(flags);
+ cli();
+ ioport = pACB->IOPortBase;
+ pDCB = pACB->pActiveDCB;
+ pSRB = pDCB->pActiveSRB;
+#ifdef DC390W_DEBUG0
+ printk("Signal,Cmd=%2x", pSRB->CmdBlock[0]);
+#endif
+ wlval = pSRB->PhysSRB;
+ outl(wlval,(ioport+DSA));
+ wlval = pSRB->ReturnAddr;
+ outl(wlval,(ioport+TEMP));
+ msgcnt = 1;
+ bval = pDCB->IdentifyMsg;
+ pSRB->MsgOutBuf[0] = bval;
+ if( (pSRB->CmdBlock[0] != INQUIRY) &&
+ (pSRB->CmdBlock[0] != REQUEST_SENSE) )
+ {
+ if(pDCB->MaxCommand > 1)
+ {
+ wlval = 1;
+ tagnum = 0;
+ while( wlval & pDCB->TagMask )
+ {
+ wlval = wlval << 1;
+ tagnum++;
+ }
+ pDCB->TagMask |= wlval;
+ pSRB->TagNumber = tagnum;
+ pSRB->MsgOutBuf[1] = MSG_SIMPLE_QTAG;
+ pSRB->MsgOutBuf[2] = tagnum;
+ msgcnt = 3;
+ }
+ }
+ else
+ {
+ pSRB->MsgOutBuf[0] &= 0xBF; /* Diable Disconnected */
+ if(pSRB->CmdBlock[0] == INQUIRY)
+ {
+ if(bval & 0x07)
+ goto type_6_3;
+ }
+ if(pDCB->DevMode & WIDE_NEGO_)
+ {
+ msgcnt = 5;
+ *((PULONG) &(pSRB->MsgOutBuf[1])) = 0x01030201;
+ pSRB->SRBState |= DO_WIDE_NEGO;
+ }
+ else if(pDCB->DevMode & SYNC_NEGO_)
+ {
+ msgcnt = 6;
+ *((PULONG) &(pSRB->MsgOutBuf[1])) = 0x00010301;
+ pSRB->MsgOutBuf[4] = pDCB->NegoPeriod;
+ pSRB->MsgOutBuf[5] = SYNC_NEGO_OFFSET;
+ pSRB->SRBState |= DO_SYNC_NEGO;
+ }
+ }
+type_6_3:
+ pSRB->__msgout0[0] = (ULONG) msgcnt;
+ wlval = 0;
+ outl(wlval,(ioport+SCRATCHA));
+ bval = pDCB->DCBscntl0;
+ outb(bval,ioport+SCNTL0);
+ pSRB->__select = *((PULONG) &(pDCB->DCBselect));
+#ifdef DC390W_DEBUG0
+ printk("__sel=%8x,", (UINT)(pSRB->__select));
+#endif
+ wlval = pACB->jmp_select;
+ outl(wlval,(ioport+DSP));
+ restore_flags(flags);
+ return;
+}
+
+
+static void
+DC390W_MessageWide( PACB pACB )
+{
+ PDCB pDCB;
+ PSRB pSRB;
+ PUCHAR msgoutPtr;
+ USHORT ioport;
+ ULONG wlval;
+ UCHAR bval,msgcnt;
+
+
+#ifdef DC390W_DEBUG0
+ printk("MsgWide,");
+#endif
+ ioport = pACB->IOPortBase;
+ pDCB = pACB->pActiveDCB;
+ pSRB = pDCB->pActiveSRB;
+ msgcnt = 0;
+ pDCB->DCBscntl3 &= ~EN_WIDE_SCSI;
+ msgoutPtr = pSRB->MsgOutBuf;
+ if( pSRB->SRBState & DO_WIDE_NEGO )
+ {
+ pSRB->SRBState &= ~DO_WIDE_NEGO;
+ if( pACB->msgin123[0] == 3 )
+ {
+ bval = pACB->msgin123[1];
+ if(bval == 1)
+ {
+ pDCB->DCBscntl3 |= EN_WIDE_SCSI;
+ goto x5;
+ }
+ if(bval < 1)
+ goto x5;
+ }
+ }
+
+/*type_11_1:*/
+ msgcnt = 1;
+ *msgoutPtr = MSG_REJECT_;
+ msgoutPtr++;
+x5:
+ bval = pDCB->DCBscntl3;
+ outb(bval,ioport+SCNTL3);
+ AdjustTemp(pACB,pDCB,pSRB);
+ SetXferRate(pACB,pDCB);
+ if( pDCB->DevMode & SYNC_NEGO_ )
+ {
+ *((PULONG)msgoutPtr) = 0x00010301;
+ *(msgoutPtr + 3) = pDCB->NegoPeriod;
+ *(msgoutPtr + 4) = SYNC_NEGO_OFFSET;
+ msgcnt += 5;
+ pSRB->SRBState |= DO_SYNC_NEGO;
+ }
+
+ pSRB->__msgout0[0] = (ULONG) msgcnt;
+ wlval = pACB->jmp_clear_ack;
+ if(msgcnt)
+ wlval = pACB->jmp_set_atn;
+ outl(wlval,(ioport+DSP));
+ return;
+}
+
+
+static void
+DC390W_MessageSync( PACB pACB )
+{
+ PDCB pDCB;
+ PSRB pSRB;
+ USHORT ioport;
+ ULONG wlval;
+ USHORT wval,wval1;
+ UCHAR bval,bval1;
+
+#ifdef DC390W_DEBUG0
+ printk("MsgSync,");
+#endif
+ ioport = pACB->IOPortBase;
+ pDCB = pACB->pActiveDCB;
+ pSRB = pDCB->pActiveSRB;
+ if( !(pSRB->SRBState & DO_SYNC_NEGO) )
+ goto MessageExtnd;
+ pSRB->SRBState &= ~DO_SYNC_NEGO;
+ if(pACB->msgin123[0] != 1)
+ {
+MessageExtnd:
+ pSRB->__msgout0[0] = 1;
+ pSRB->MsgOutBuf[0] = MSG_REJECT_;
+ wlval = pACB->jmp_set_atn;
+ outl(wlval,(ioport+DSP));
+ return;
+ }
+ bval = pACB->msgin123[2]; /* offset */
+asyncx:
+ pDCB->DCBsxfer = bval;
+ if(bval == 0) /* if offset or period == 0, async */
+ {
+ if( pACB->AdaptType == DC390W )
+ bval = SYNC_CLK_F2+ASYNC_CLK_F2;
+ else
+ bval = SYNC_CLK_F4+ASYNC_CLK_F4;
+ pDCB->DCBscntl3 = bval;
+ }
+ else
+ {
+ bval = pACB->msgin123[1];
+ if(bval == 0)
+ goto asyncx;
+ pDCB->SyncPeriod = bval;
+ wval = (USHORT)bval;
+ wval <<= 3;
+ bval = pDCB->DCBscntl3;
+ bval &= 0x0f;
+ if(wval < 200) /* < 100 ns ==> Fast-20 */
+ {
+ bval |= 0x90; /* Fast-20 and div 1 */
+ bval1 = 25; /* 12.5 ns */
+ }
+ else if(wval < 400)
+ {
+ bval |= 0x30; /* 1 cycle = 25ns */
+ bval1 = 50;
+ }
+ else /* Non Fast */
+ {
+ bval |= 0x50; /* 1 cycle = 50ns */
+ bval1 = 100;
+ }
+ if( pACB->AdaptType == DC390W )
+ bval -= 0x20; /* turn down to 40Mhz scsi clock */
+ /* assume 390W will not receive fast-20 */
+ wval1 = wval;
+ wval /= bval1;
+ if(wval * bval1 < wval1)
+ wval++;
+ /* XFERP TP2 TP1 TP0 */
+ wval -= 4; /* 4 0 0 0 */
+ /* 5 0 0 1 */
+ wval <<= 5;
+ pDCB->DCBsxfer |= (UCHAR)wval;
+ pDCB->DCBscntl3 = bval;
+ }
+/*sync_2:*/
+ SetXferRate( pACB,pDCB );
+ wlval = pACB->jmp_clear_ack;
+/*sync_3:*/
+ bval = pDCB->DCBscntl3;
+ outb(bval,ioport+SCNTL3);
+ bval = pDCB->DCBsxfer;
+ outb(bval,ioport+SXFER);
+ outl(wlval,(ioport+DSP));
+ return;
+}
+
+
+static void
+DC390W_MsgReject( PACB pACB )
+{
+ PDCB pDCB;
+ PSRB pSRB;
+ ULONG wlval;
+ USHORT ioport;
+ UCHAR bval;
+
+#ifdef DC390W_DEBUG0
+ printk("Msgrjt,");
+#endif
+ pDCB = pACB->pActiveDCB;
+ pSRB = pDCB->pActiveSRB;
+ wlval = pACB->jmp_clear_ack;
+ if(pSRB->SRBState & DO_WIDE_NEGO)
+ {
+ pSRB->SRBState &= ~DO_WIDE_NEGO;
+ pDCB->DCBscntl3 &= ~EN_WIDE_SCSI;
+ AdjustTemp( pACB, pDCB, pSRB );
+ SetXferRate( pACB, pDCB );
+ if( pDCB->DevMode & SYNC_NEGO_ )
+ {
+ *((PULONG) &(pSRB->MsgOutBuf[0])) = 0x00010301;
+ pSRB->MsgOutBuf[3] = pDCB->NegoPeriod;
+ pSRB->MsgOutBuf[4] = SYNC_NEGO_OFFSET;
+ pSRB->__msgout0[0] = 5;
+ pSRB->SRBState |= DO_SYNC_NEGO;
+ wlval = pACB->jmp_set_atn;
+ }
+ }
+ else
+ {
+ if(pSRB->SRBState & DO_SYNC_NEGO)
+ {
+ pSRB->SRBState &= ~DO_SYNC_NEGO;
+ pDCB->DCBsxfer = 0; /* reject sync msg, set aync */
+ if( pACB->AdaptType == DC390W )
+ bval = SYNC_CLK_F2+ASYNC_CLK_F2;
+ else
+ bval = SYNC_CLK_F4+ASYNC_CLK_F4;
+ pDCB->DCBscntl3 = bval;
+ SetXferRate(pACB,pDCB);
+ wlval = pACB->jmp_clear_ack;
+ }
+ }
+ ioport = pACB->IOPortBase;
+ bval = pDCB->DCBscntl3;
+ outb(bval,ioport+SCNTL3);
+ bval = pDCB->DCBsxfer;
+ outb(bval,ioport+SXFER);
+ outl(wlval,(ioport+DSP));
+ return;
+}
+
+
+static void
+AdjustTemp( PACB pACB, PDCB pDCB, PSRB pSRB )
+{
+ USHORT ioport;
+ ULONG wlval;
+
+ wlval = pSRB->ReturnAddr;
+ if(wlval <= pACB->jmp_table8)
+ {
+ if(pDCB->DCBscntl3 & EN_WIDE_SCSI)
+ wlval += jmp_table16;
+ }
+ else
+ {
+ if((pDCB->DCBscntl3 & EN_WIDE_SCSI) == 0)
+ wlval -= jmp_table16;
+ }
+ pSRB->ReturnAddr = wlval;
+ ioport = pACB->IOPortBase;
+ outl(wlval,(ioport+TEMP));
+ return;
+}
+
+
+static void
+SetXferRate( PACB pACB, PDCB pDCB )
+{
+ UCHAR bval;
+ USHORT cnt, i;
+ PDCB ptr;
+
+ if( !(pDCB->IdentifyMsg & 0x07) )
+ {
+ if( pACB->scan_devices )
+ {
+ CurrDCBscntl3 = pDCB->DCBscntl3;
+ }
+ else
+ {
+ ptr = pACB->pLinkDCB;
+ cnt = pACB->DeviceCnt;
+ bval = pDCB->UnitSCSIID;
+ for(i=0; i<cnt; i++)
+ {
+ if( ptr->UnitSCSIID == bval )
+ {
+ ptr->DCBsxfer = pDCB->DCBsxfer;
+ ptr->DCBscntl3 = pDCB->DCBscntl3;
+ }
+ ptr = ptr->pNextDCB;
+ }
+ }
+ }
+ return;
+}
+
+
+static void
+DC390W_UnknownMsg( PACB pACB )
+{
+ PSRB pSRB;
+ ULONG wlval;
+ USHORT ioport;
+
+ pSRB = pACB->pActiveDCB->pActiveSRB;
+ pSRB->__msgout0[0] = 1;
+ pSRB->MsgOutBuf[0] = MSG_REJECT_;
+ wlval = pACB->jmp_set_atn;
+ ioport = pACB->IOPortBase;
+ outl(wlval,(ioport+DSP));
+ return;
+}
+
+
+static void
+DC390W_MessageExtnd( PACB pACB )
+{
+ DC390W_UnknownMsg( pACB );
+}
+
+
+static void
+DC390W_Disconnected( PACB pACB )
+{
+ PDCB pDCB;
+ PSRB pSRB;
+ ULONG wlval, flags;
+ USHORT ioport;
+ UCHAR bval;
+
+#ifdef DC390W_DEBUG0
+ printk("Discnet,");
+#endif
+ save_flags(flags);
+ cli();
+ pDCB = pACB->pActiveDCB;
+ if (! pDCB)
+ {
+#ifdef DC390W_DEBUG0
+ printk("ACB:%08lx->ActiveDCB:%08lx !,", (ULONG)pACB, (ULONG)pDCB);
+#endif
+ restore_flags(flags); return;
+ }
+
+ pSRB = pDCB->pActiveSRB;
+
+ ioport = pACB->IOPortBase;
+ bval = inb(ioport+SCRATCHA);
+ pSRB->ScratchABuf = bval;
+ pSRB->SRBState |= SRB_DISCONNECT; /* 1.02 */
+ wlval = pACB->jmp_reselect;
+ outl(wlval,(ioport+DSP));
+ pACB->pActiveDCB = 0;
+ DoWaitingSRB( pACB );
+ restore_flags(flags);
+ return;
+}
+
+
+static void
+DC390W_Reselected( PACB pACB )
+{
+#ifdef DC390W_DEBUG0
+ printk("Rsel,");
+#endif
+ pACB->msgin123[0] = 0x80; /* set identify byte 80h */
+ DC390W_Reselected1(pACB);
+ return;
+}
+
+
+static void
+DC390W_Reselected1( PACB pACB )
+{
+ PDCB pDCB;
+ PSRB pSRB;
+ USHORT ioport, wval;
+ ULONG wlval, flags;
+ UCHAR bval;
+
+
+#ifdef DC390W_DEBUG0
+ printk("Rsel1,");
+#endif
+ ioport = pACB->IOPortBase;
+ pDCB = pACB->pActiveDCB;
+ if(pDCB)
+ {
+ pSRB = pDCB->pActiveSRB;
+ RewaitSRB( pDCB, pSRB );
+ }
+
+ wval = (USHORT) (pACB->msgin123[0]);
+ wval = (wval & 7) << 8; /* get LUN */
+ wval |= (USHORT) (inb(ioport+SSID) & 0x0f); /* get ID */
+ pDCB = pACB->pLinkDCB;
+ while( *((PUSHORT) &pDCB->UnitSCSIID) != wval )
+ pDCB = pDCB->pNextDCB;
+ pACB->pActiveDCB = pDCB;
+ bval = pDCB->DCBscntl3;
+ outb(bval,ioport+SCNTL3);
+ bval = pDCB->DCBsxfer;
+ outb(bval,ioport+SXFER);
+ bval = pDCB->DCBscntl0;
+ outb(bval,ioport+SCNTL0);
+ if(pDCB->MaxCommand > 1)
+ {
+ wlval = pACB->jmp_reselecttag;
+ outl(wlval,(ioport+DSP));
+ }
+ else
+ {
+ pSRB = pDCB->pActiveSRB;
+ if( !pSRB || !(pSRB->SRBState & SRB_DISCONNECT) )
+ {
+ save_flags(flags);
+ cli();
+ pSRB = pACB->pFreeSRB;
+ pACB->pFreeSRB = pSRB->pNextSRB;
+ restore_flags(flags);
+ pSRB->SRBState = SRB_UNEXPECT_RESEL;
+ pDCB->pActiveSRB = pSRB;
+ pSRB->MsgOutBuf[0] = MSG_ABORT;
+ pSRB->__msgout0[0] = 1;
+ }
+ pSRB->SRBState &= ~SRB_DISCONNECT;
+ wlval = pSRB->PhysSRB;
+ outl(wlval,(ioport+DSA));
+ wlval = pSRB->ReturnAddr;
+ outl(wlval,(ioport+TEMP));
+ bval = pSRB->ScratchABuf;
+ outb(bval,ioport+SCRATCHA);
+ if( pSRB->SRBState & SRB_UNEXPECT_RESEL )
+ wlval = pACB->jmp_set_atn;
+ else
+ wlval = pACB->jmp_clear_ack;
+ outl(wlval,(ioport+DSP));
+ }
+ return;
+}
+
+
+static void
+DC390W_ReselectedT( PACB pACB )
+{
+ PDCB pDCB;
+ PSRB pSRB, psrb1;
+ USHORT ioport;
+ ULONG wlval, flags;
+ UCHAR bval;
+
+#ifdef DC390W_DEBUG0
+ printk("RselT,");
+#endif
+ ioport = pACB->IOPortBase;
+ bval = pACB->msgin123[1];
+ pDCB = pACB->pActiveDCB;
+ pSRB = pDCB->pGoingSRB;
+ psrb1 = pDCB->pGoingLast;
+ if( !pSRB )
+ goto UXP_RSL;
+ for(;;)
+ {
+ if(pSRB->TagNumber != bval)
+ {
+ if( pSRB != psrb1 )
+ pSRB = pSRB->pNextSRB;
+ else
+ goto UXP_RSL;
+ }
+ else
+ break;
+ }
+ if( !(pSRB->SRBState & SRB_DISCONNECT) )
+ {
+UXP_RSL:
+ save_flags(flags);
+ cli();
+ pSRB = pACB->pFreeSRB;
+ pACB->pFreeSRB = pSRB->pNextSRB;
+ restore_flags(flags);
+ pSRB->SRBState = SRB_UNEXPECT_RESEL;
+ pDCB->pActiveSRB = pSRB;
+ pSRB->MsgOutBuf[0] = MSG_ABORT_TAG;
+ pSRB->__msgout0[0] = 1;
+ }
+ else
+ {
+ pSRB->SRBState &= ~SRB_DISCONNECT;
+ pDCB->pActiveSRB = pSRB;
+ }
+ wlval = pSRB->PhysSRB;
+ outl(wlval,(ioport+DSA));
+ wlval = pSRB->ReturnAddr;
+ outl(wlval,(ioport+TEMP));
+ bval = pSRB->ScratchABuf;
+ outb(bval,ioport+SCRATCHA);
+ if( pSRB->SRBState & SRB_UNEXPECT_RESEL )
+ wlval = pACB->jmp_set_atn;
+ else
+ wlval = pACB->jmp_clear_ack;
+ outl(wlval,(ioport+DSP));
+ return;
+}
+
+
+static void
+DC390W_RestorePtr( PACB pACB )
+{
+ PSRB pSRB;
+ USHORT ioport;
+ ULONG wlval;
+
+ pSRB = pACB->pActiveDCB->pActiveSRB;
+ wlval = pSRB->ReturnAddr;
+ ioport = pACB->IOPortBase;
+ outl(wlval,(ioport+TEMP));
+ wlval = inl(ioport+DSP);
+ outl(wlval,(ioport+DSP));
+ return;
+}
+
+
+static void
+PhaseMismatch( PACB pACB )
+{
+ USHORT ioport;
+ ULONG wlval,swlval;
+ USHORT wval;
+ UCHAR bval,phase;
+ PDCB pDCB;
+
+#ifdef DC390W_DEBUG0
+ printk("Mismatch,");
+#endif
+ ioport = pACB->IOPortBase;
+ bval = inb(ioport+SCRATCHA);
+ if(bval & OVER_RUN_) /* xfer PAD */
+ {
+ bval = inb(ioport+STEST3);
+ bval |= CLR_SCSI_FIFO;
+ outb(bval,ioport+STEST3);
+ bval = CLR_DMA_FIFO;
+ outb(bval,ioport+CTEST3);
+ wlval = pACB->jmp_next; /* check phase */
+ outl(wlval,(ioport+DSP));
+ return;
+ }
+ pDCB = pACB->pActiveDCB;
+ wlval = inl(ioport+DBC);
+ phase = (UCHAR)((wlval & 0x07000000) >> 24);
+ wlval &= 0xffffff; /* bytes not xferred */
+ if( phase == SCSI_DATA_IN )
+ {
+ swlval = pACB->jmp_din8;
+ if( pDCB->DCBscntl3 & EN_WIDE_SCSI )
+ swlval += jmp_din16;
+ DataIOcommon(pACB,swlval,wlval);
+ }
+ else if( phase == SCSI_DATA_OUT )
+ {
+ wval = (USHORT)inb(ioport+CTEST5);
+ wval <<= 8;
+ bval = inb(ioport+DFIFO);
+ wval |= (USHORT) bval;
+ wval -= ((USHORT)(wlval & 0xffff));
+ wval &= 0x3ff;
+ wlval += (ULONG)wval; /* # of bytes remains in FIFO */
+ bval = inb(ioport+SSTAT0);
+ if(bval & SODR_LSB_FULL)
+ wlval++; /* data left in Scsi Output Data Buffer */
+ if(bval & SODL_LSB_FULL)
+ wlval++; /* data left in Scsi Output Data Latch */
+ swlval = pACB->jmp_dout8;
+ if(pDCB->DCBscntl3 & EN_WIDE_SCSI)
+ {
+ swlval += jmp_dout16;
+ bval = inb(ioport+SSTAT2);
+ if(bval & SODR_MSB_FULL)
+ wlval++;
+ if(bval & SODL_MSB_FULL)
+ wlval++;
+ }
+ bval = inb(ioport+STEST3);
+ bval |= CLR_SCSI_FIFO;
+ outb(bval,ioport+STEST3);
+ bval = CLR_DMA_FIFO;
+ outb(bval,ioport+CTEST3);
+ DataIOcommon(pACB,swlval,wlval);
+ }
+ else
+ {
+ bval = inb(ioport+STEST3);
+ bval |= CLR_SCSI_FIFO;
+ outb(bval,ioport+STEST3);
+ bval = CLR_DMA_FIFO;
+ outb(bval,ioport+CTEST3);
+ if(phase == SCSI_MSG_OUT)
+ wlval = pACB->jmp_clear_atn;
+ else
+ wlval = pACB->jmp_next; /* check phase */
+ outl(wlval,(ioport+DSP));
+ }
+ return;
+}
+
+
+static void
+DataIOcommon( PACB pACB, ULONG Swlval, ULONG Cwlval )
+{
+ /* Swlval - script address */
+ /* Cwlval - bytes not xferred */
+ PDCB pDCB;
+ PSRB pSRB;
+ PSGE Segptr;
+ USHORT ioport;
+ ULONG wlval,swlval,dataXferCnt;
+ UCHAR bval,bvald;
+
+ ioport = pACB->IOPortBase;
+ wlval = inl((ioport+DSP));
+ pDCB = pACB->pActiveDCB;
+ pSRB = pDCB->pActiveSRB;
+ wlval -= Swlval;
+ bval = inb(ioport+SBCL);
+ bval &= 0x07;
+ if(bval == SCSI_MSG_IN)
+ {
+ bval = pDCB->DCBscntl3;
+ bval &= ~EN_WIDE_SCSI;
+ outb(bval,ioport+SCNTL3);
+ bval = inb(ioport+SBDL);
+ bvald = pDCB->DCBscntl3; /* enable WIDE SCSI */
+ outb(bvald,ioport+SCNTL3);
+ if(bval == MSG_DISCONNECT || bval == MSG_SAVE_PTR)
+ {
+ Segptr = (PSGE)((ULONG) &(pSRB->Segment0[0][0]) + wlval);
+ dataXferCnt = Segptr->SGXLen - Cwlval;
+ Segptr->SGXLen = Cwlval; /* modified count */
+ Segptr->SGXPtr += dataXferCnt; /* modified address */
+ swlval = pACB->jmp_table8;
+ if(pDCB->DCBscntl3 & EN_WIDE_SCSI)
+ swlval += jmp_table16;
+ wlval <<= 1;
+ swlval += wlval;
+ swlval = swlval - ((MAX_SG_LIST_BUF+1) * 16);
+ pSRB->ReturnAddr = swlval;
+ }
+ }
+ else if( Cwlval ) /* Remaining not xferred -- UNDER_RUN */
+ {
+ Segptr = (PSGE)((ULONG) &(pSRB->Segment0[0][0]) + wlval);
+ dataXferCnt = Segptr->SGXLen - Cwlval;
+ Segptr->SGXLen = Cwlval; /* modified count */
+ Segptr->SGXPtr += dataXferCnt; /* modified address */
+ swlval = pACB->jmp_table8;
+ if(pDCB->DCBscntl3 & EN_WIDE_SCSI)
+ swlval += jmp_table16;
+ wlval <<= 1;
+ swlval += wlval;
+ swlval = swlval - ((MAX_SG_LIST_BUF+1) * 16);
+ pSRB->RemainSegPtr = swlval;
+ }
+/* pm__1: */
+ wlval = pSRB->ReturnAddr;
+ outl(wlval,(ioport+TEMP));
+ wlval = pACB->jmp_next;
+ outl(wlval,(ioport+DSP));
+ return;
+}
+
+
+static void
+DC390W_CmdCompleted( PACB pACB )
+{
+ PDCB pDCB;
+ PSRB pSRB;
+ USHORT ioport;
+ ULONG wlval, flags;
+ UCHAR bval;
+
+#ifdef DC390W_DEBUG0
+ printk("Cmplete,");
+#endif
+ save_flags(flags);
+ cli();
+ pDCB = pACB->pActiveDCB;
+ pSRB = pDCB->pActiveSRB;
+ pDCB->pActiveSRB = NULL;
+ ioport = pACB->IOPortBase;
+
+ bval = inb(ioport+SCRATCHA);
+ pSRB->ScratchABuf = bval; /* save status */
+ bval = pSRB->TagNumber;
+ if(pDCB->MaxCommand > 1)
+ pDCB->TagMask &= (~(1 << bval)); /* free tag mask */
+ pACB->pActiveDCB = NULL; /* no active device */
+ wlval = pACB->jmp_reselect; /* enable reselection */
+ outl(wlval,(ioport+DSP));
+ SRBdone( pACB, pDCB, pSRB);
+ restore_flags(flags);
+ return;
+}
+
+
+static void
+SRBdone( PACB pACB, PDCB pDCB, PSRB pSRB )
+{
+ PSRB psrb;
+ UCHAR bval, bval1, i, j, status;
+ PSCSICMD pcmd;
+ PSCSI_INQDATA ptr;
+ USHORT disable_tag;
+ ULONG flags;
+ PSGE ptr1;
+ PSGL ptr2;
+ ULONG wlval,swlval;
+
+ pcmd = pSRB->pcmd;
+ status = pACB->status;
+ if(pSRB->SRBFlag & AUTO_REQSENSE)
+ {
+ pSRB->SRBFlag &= ~AUTO_REQSENSE;
+ pSRB->AdaptStatus = 0;
+ pSRB->TargetStatus = SCSI_STAT_CHECKCOND;
+ if(status == SCSI_STAT_CHECKCOND)
+ {
+ pcmd->result = DID_BAD_TARGET << 16;
+ goto ckc_e;
+ }
+ if(pSRB->RetryCnt == 0)
+ {
+ *((PULONG) &(pSRB->CmdBlock[0])) = pSRB->Segment0[0][0];
+ pSRB->XferredLen = pSRB->Segment0[2][1];
+ if( (pSRB->XferredLen) &&
+ (pSRB->XferredLen >= pcmd->underflow) )
+ {
+ pcmd->result |= (DID_OK << 16);
+ }
+ else
+ pcmd->result = (DRIVER_SENSE << 24) | (DRIVER_OK << 16) |
+ SCSI_STAT_CHECKCOND;
+ goto ckc_e;
+ }
+ else
+ {
+ pSRB->RetryCnt--;
+ pSRB->TargetStatus = 0;
+ *((PULONG) &(pSRB->CmdBlock[0])) = pSRB->Segment0[0][0];
+ *((PULONG) &(pSRB->CmdBlock[4])) = pSRB->Segment0[0][1];
+ *((PULONG) &(pSRB->CmdBlock[8])) = pSRB->Segment0[1][0];
+ pSRB->__command[0] = pSRB->Segment0[1][1] & 0xff;
+ pSRB->SGcount = (UCHAR) (pSRB->Segment0[1][1] >> 8);
+ *((PULONG) &(pSRB->pSegmentList))= pSRB->Segment0[2][0];
+ if( pSRB->CmdBlock[0] == TEST_UNIT_READY )
+ {
+ pcmd->result = (DRIVER_SENSE << 24) | (DRIVER_OK << 16) |
+ SCSI_STAT_CHECKCOND;
+ goto ckc_e;
+ }
+ pcmd->result |= (DRIVER_SENSE << 24);
+ PrepareSG(pACB,pDCB,pSRB);
+ pSRB->XferredLen = 0;
+ DC390W_StartSCSI( pACB, pDCB, pSRB );
+ return;
+ }
+ }
+ if( status )
+ {
+ if( status == SCSI_STAT_CHECKCOND)
+ {
+ if( !(pSRB->ScratchABuf & SRB_OK) && (pSRB->SGcount) && (pSRB->RemainSegPtr) )
+ {
+ wlval = pSRB->RemainSegPtr;
+ swlval = pACB->jmp_table8;
+ if(pDCB->DCBscntl3 & EN_WIDE_SCSI)
+ swlval += jmp_table16;
+ swlval -= wlval;
+ swlval >>= 4;
+ bval = (UCHAR) swlval;
+ wlval = 0;
+ ptr1 = (PSGE) &pSRB->Segment0[MAX_SG_LIST_BUF+1][0];
+ for( i=0; i< bval; i++)
+ {
+ wlval += ptr1->SGXLen;
+ ptr1--;
+ }
+
+ bval = pSRB->SGcount;
+ swlval = 0;
+ ptr2 = pSRB->pSegmentList;
+ for( i=0; i< bval; i++)
+ {
+ swlval += ptr2->length;
+ ptr2++;
+ }
+ pSRB->XferredLen = swlval - wlval;
+ pSRB->RemainSegPtr = 0;
+#ifdef DC390W_DEBUG0
+ printk("XferredLen=%8x,NotXferLen=%8x,",(UINT) pSRB->XferredLen,(UINT) wlval);
+#endif
+ }
+ RequestSense( pACB, pDCB, pSRB );
+ return;
+ }
+ else if( status == SCSI_STAT_QUEUEFULL )
+ {
+ bval = (UCHAR) pDCB->GoingSRBCnt;
+ bval--;
+ pDCB->MaxCommand = bval;
+ RewaitSRB( pDCB, pSRB );
+ pSRB->AdaptStatus = 0;
+ pSRB->TargetStatus = 0;
+ return;
+ }
+ else if(status == SCSI_STAT_SEL_TIMEOUT)
+ {
+ pSRB->AdaptStatus = H_SEL_TIMEOUT;
+ pSRB->TargetStatus = 0;
+ pcmd->result = DID_BAD_TARGET << 16;
+ }
+ else if(status == SCSI_STAT_UNEXP_BUS_F)
+ {
+ pSRB->AdaptStatus = H_UNEXP_BUS_FREE;
+ pSRB->TargetStatus = 0;
+ pcmd->result |= DID_NO_CONNECT << 16;
+ }
+ else if(status == SCSI_STAT_BUS_RST_DETECT )
+ {
+ pSRB->AdaptStatus = H_ABORT;
+ pSRB->TargetStatus = 0;
+ pcmd->result = DID_RESET << 16;
+ }
+ else
+ {
+ pSRB->AdaptStatus = 0;
+ pSRB->TargetStatus = status;
+ if( pSRB->RetryCnt )
+ {
+ pSRB->RetryCnt--;
+ pSRB->TargetStatus = 0;
+ PrepareSG(pACB,pDCB,pSRB);
+ pSRB->XferredLen = 0;
+ DC390W_StartSCSI( pACB, pDCB, pSRB );
+ return;
+ }
+ else
+ {
+ pcmd->result |= (DID_ERROR << 16) | (ULONG) (pACB->msgin123[0] << 8) |
+ (ULONG) status;
+ }
+ }
+ }
+ else
+ {
+ status = pSRB->ScratchABuf;
+ if(status & OVER_RUN_)
+ {
+ pSRB->AdaptStatus = H_OVER_UNDER_RUN;
+ pSRB->TargetStatus = 0;
+ pcmd->result |= (DID_OK << 16) | (pACB->msgin123[0] << 8);
+ }
+ else /* No error */
+ {
+ pSRB->AdaptStatus = 0;
+ pSRB->TargetStatus = 0;
+ pcmd->result |= (DID_OK << 16);
+ }
+ }
+ckc_e:
+
+ if( pACB->scan_devices )
+ {
+ if( pSRB->CmdBlock[0] == TEST_UNIT_READY )
+ {
+ if(pcmd->result != (DID_OK << 16))
+ {
+ if( pcmd->result & SCSI_STAT_CHECKCOND )
+ {
+ goto RTN_OK;
+ }
+ else
+ {
+ pACB->DCBmap[pcmd->target] &= ~(1 << pcmd->lun);
+ pPrevDCB->pNextDCB = pACB->pLinkDCB;
+ if( (pcmd->target == pACB->max_id) &&
+ ((pcmd->lun == 0) || (pcmd->lun == pACB->max_lun)) )
+ {
+ pACB->scan_devices = 0;
+ }
+ }
+ }
+ else
+ {
+RTN_OK:
+ pPrevDCB->pNextDCB = pDCB;
+ pDCB->pNextDCB = pACB->pLinkDCB;
+ if( (pcmd->target == pACB->max_id) && (pcmd->lun == pACB->max_lun) )
+ pACB->scan_devices = END_SCAN;
+ }
+ }
+ else if( pSRB->CmdBlock[0] == INQUIRY )
+ {
+ if( (pcmd->target == pACB->max_id) &&
+ (pcmd->lun == pACB->max_lun) )
+ {
+ pACB->scan_devices = 0;
+ }
+ ptr = (PSCSI_INQDATA) (pcmd->request_buffer);
+ if( pcmd->use_sg )
+ ptr = (PSCSI_INQDATA) (((PSGL) ptr)->address);
+ bval1 = ptr->DevType & SCSI_DEVTYPE;
+ if(bval1 == SCSI_NODEV)
+ {
+ pACB->DCBmap[pcmd->target] &= ~(1 << pcmd->lun);
+ pPrevDCB->pNextDCB = pACB->pLinkDCB;
+ }
+ else
+ {
+ pACB->DeviceCnt++;
+ pPrevDCB = pDCB;
+ pACB->pDCB_free = (PDCB) ((ULONG) (pACB->pDCB_free) + sizeof( DC390W_DCB ));
+ pDCB->DevType = bval1;
+ if(bval1 == TYPE_DISK || bval1 == TYPE_MOD)
+ {
+ if( (((ptr->Vers & 0x07) >= 2) || ((ptr->RDF & 0x0F) == 2)) &&
+ (ptr->Flags & SCSI_INQ_CMDQUEUE) &&
+ (pDCB->DevMode & TAG_QUEUING_) &&
+ (pDCB->DevMode & EN_DISCONNECT_) )
+ {
+ disable_tag = 0;
+ for(i=0; i<BADDEVCNT; i++)
+ {
+ for(j=0; j<28; j++)
+ {
+ if( ((PUCHAR)ptr)[8+j] != baddevname[i][j])
+ break;
+ }
+ if(j == 28)
+ {
+ disable_tag = 1;
+ break;
+ }
+ }
+
+ if( !disable_tag )
+ {
+ pDCB->MaxCommand = pACB->TagMaxNum;
+ pDCB->TagMask = 0;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ save_flags( flags );
+ cli();
+/* ReleaseSRB( pDCB, pSRB ); */
+
+ if(pSRB == pDCB->pGoingSRB )
+ {
+ pDCB->pGoingSRB = pSRB->pNextSRB;
+ }
+ else
+ {
+ psrb = pDCB->pGoingSRB;
+ while( psrb->pNextSRB != pSRB )
+ psrb = psrb->pNextSRB;
+ psrb->pNextSRB = pSRB->pNextSRB;
+ if( pSRB == pDCB->pGoingLast )
+ pDCB->pGoingLast = psrb;
+ }
+ pSRB->pNextSRB = pACB->pFreeSRB;
+ pACB->pFreeSRB = pSRB;
+ pDCB->GoingSRBCnt--;
+
+ DoWaitingSRB( pACB );
+ restore_flags(flags);
+
+/* Notify cmd done */
+ pcmd->scsi_done( pcmd );
+
+ if( pDCB->QIORBCnt )
+ DoNextCmd( pACB, pDCB );
+ return;
+}
+
+
+static void
+DoingSRB_Done( PACB pACB )
+{
+ PDCB pDCB, pdcb;
+ PSRB psrb, psrb2;
+ USHORT cnt, i;
+ PSCSICMD pcmd;
+
+ pDCB = pACB->pLinkDCB;
+ pdcb = pDCB;
+ do
+ {
+ cnt = pdcb->GoingSRBCnt;
+ psrb = pdcb->pGoingSRB;
+ for( i=0; i<cnt; i++)
+ {
+ psrb2 = psrb->pNextSRB;
+ pcmd = psrb->pcmd;
+ pcmd->result = DID_RESET << 16;
+
+/* ReleaseSRB( pDCB, pSRB ); */
+
+ psrb->pNextSRB = pACB->pFreeSRB;
+ pACB->pFreeSRB = psrb;
+
+ pcmd->scsi_done( pcmd );
+ psrb = psrb2;
+ }
+ pdcb->GoingSRBCnt = 0;;
+ pdcb->pGoingSRB = NULL;
+ pdcb->TagMask = 0;
+ pdcb = pdcb->pNextDCB;
+ }
+ while( pdcb != pDCB );
+}
+
+
+static void
+DC390W_ResetSCSIBus( PACB pACB )
+{
+ USHORT ioport;
+ UCHAR bval;
+ ULONG flags;
+
+ save_flags(flags);
+ cli();
+ pACB->ACBFlag |= RESET_DEV;
+ ioport = pACB->IOPortBase;
+ bval = ABORT_OP;
+ outb(bval,ioport+ISTAT);
+ udelay(25);
+ bval = 0;
+ outb(bval,ioport+ISTAT);
+
+ bval = ASSERT_RST;
+ outb(bval,ioport+SCNTL1);
+ udelay(25); /* 25 us */
+ bval = 0;
+ outb(bval,ioport+SCNTL1);
+ restore_flags(flags);
+ return;
+}
+
+
+
+static void
+DC390W_ResetSCSIBus2( PACB pACB )
+{
+ USHORT ioport;
+ UCHAR bval;
+ ULONG flags;
+
+ save_flags(flags);
+ cli();
+ ioport = pACB->IOPortBase;
+ bval = ASSERT_RST;
+ outb(bval,ioport+SCNTL1);
+ udelay(25); /* 25 us */
+ bval = 0;
+ outb(bval,ioport+SCNTL1);
+ restore_flags(flags);
+ return;
+}
+
+
+
+static void
+DC390W_ScsiRstDetect( PACB pACB )
+{
+ ULONG wlval, flags;
+ USHORT ioport;
+ UCHAR bval;
+
+ save_flags(flags);
+ sti();
+#ifdef DC390W_DEBUG0
+ printk("Reset_Detect0,");
+#endif
+/* delay 1 sec */
+ wlval = jiffies + HZ;
+ while( jiffies < wlval );
+/* USHORT i;
+ for( i=0; i<1000; i++ )
+ udelay(1000); */
+
+ cli();
+ ioport = pACB->IOPortBase;
+ bval = inb(ioport+STEST3);
+ bval |= CLR_SCSI_FIFO;
+ outb(bval,ioport+STEST3);
+ bval = CLR_DMA_FIFO;
+ outb(bval,ioport+CTEST3);
+
+ if( pACB->ACBFlag & RESET_DEV )
+ pACB->ACBFlag |= RESET_DONE;
+ else
+ {
+ pACB->ACBFlag |= RESET_DETECT;
+
+ ResetDevParam( pACB );
+/* DoingSRB_Done( pACB ); ???? */
+ RecoverSRB( pACB );
+ pACB->pActiveDCB = NULL;
+ wlval = pACB->jmp_reselect;
+ outl(wlval,(ioport+DSP));
+ pACB->ACBFlag = 0;
+ DoWaitingSRB( pACB );
+ }
+ restore_flags(flags);
+ return;
+}
+
+
+static void
+RequestSense( PACB pACB, PDCB pDCB, PSRB pSRB )
+{
+ PSCSICMD pcmd;
+
+ pSRB->SRBFlag |= AUTO_REQSENSE;
+ pSRB->Segment0[0][0] = *((PULONG) &(pSRB->CmdBlock[0]));
+ pSRB->Segment0[0][1] = *((PULONG) &(pSRB->CmdBlock[4]));
+ pSRB->Segment0[1][0] = *((PULONG) &(pSRB->CmdBlock[8]));
+ pSRB->Segment0[1][1] = pSRB->__command[0] | (pSRB->SGcount << 8);
+ pSRB->Segment0[2][0] = *((PULONG) &(pSRB->pSegmentList));
+ pSRB->Segment0[2][1] = pSRB->XferredLen;
+ pSRB->AdaptStatus = 0;
+ pSRB->TargetStatus = 0;
+
+ pcmd = pSRB->pcmd;
+
+ pSRB->Segmentx.address = (PUCHAR) &(pcmd->sense_buffer);
+ pSRB->Segmentx.length = sizeof(pcmd->sense_buffer);
+ pSRB->pSegmentList = &pSRB->Segmentx;
+ pSRB->SGcount = 1;
+
+ *((PULONG) &(pSRB->CmdBlock[0])) = 0x00000003;
+ pSRB->CmdBlock[1] = pDCB->IdentifyMsg << 5;
+ *((PUSHORT) &(pSRB->CmdBlock[4])) = sizeof(pcmd->sense_buffer);
+ pSRB->__command[0] = 6;
+ PrepareSG( pACB, pDCB, pSRB );
+ pSRB->XferredLen = 0;
+ DC390W_StartSCSI( pACB, pDCB, pSRB );
+ return;
+}
+
+
+static void
+DC390W_MessageOut( PACB pACB )
+{
+ DC390W_FatalError( pACB );
+}
+
+
+static void
+DC390W_FatalError( PACB pACB )
+{
+ PSRB pSRB;
+ PDCB pDCB;
+ ULONG flags;
+
+#ifdef DC390W_DEBUG0
+ printk("DC390W: Fatal Error!!\n");
+#endif
+
+ pDCB = pACB->pActiveDCB;
+ pSRB = pDCB->pActiveSRB;
+ if( pSRB->SRBState & SRB_UNEXPECT_RESEL )
+ {
+ save_flags(flags);
+ cli();
+ pSRB->SRBState &= ~SRB_UNEXPECT_RESEL;
+ pSRB->pNextSRB = pACB->pFreeSRB;
+ pACB->pFreeSRB = pSRB;
+ pACB->pActiveDCB = NULL;
+ pDCB->pActiveSRB = NULL;
+ restore_flags(flags);
+ DoWaitingSRB( pACB );
+ }
+ else
+ DC390W_ResetSCSIBus(pACB);
+ return;
+}
+
+
+static void
+DC390W_Debug( PACB pACB )
+{
+ ULONG wlval;
+ USHORT ioport;
+
+ ioport = pACB->IOPortBase;
+ wlval = inl(ioport+DSP);
+ outl(wlval,(ioport+DSP));
+ return;
+}
+
+
diff --git a/linux/src/drivers/scsi/scsiiom.c b/linux/src/drivers/scsi/scsiiom.c
new file mode 100644
index 0000000..97801d7
--- /dev/null
+++ b/linux/src/drivers/scsi/scsiiom.c
@@ -0,0 +1,1540 @@
+/***********************************************************************
+ * FILE NAME : SCSIIOM.C *
+ * BY : C.L. Huang, ching@tekram.com.tw *
+ * Description: Device Driver for Tekram DC-390 (T) PCI SCSI *
+ * Bus Master Host Adapter *
+ ***********************************************************************/
+
+
+static USHORT
+DC390_StartSCSI( PACB pACB, PDCB pDCB, PSRB pSRB )
+{
+ USHORT ioport, rc;
+ UCHAR bval, bval1, i, cnt;
+ PUCHAR ptr;
+ ULONG wlval;
+
+ pSRB->TagNumber = 31;
+ ioport = pACB->IOPortBase;
+ bval = pDCB->UnitSCSIID;
+ outb(bval,ioport+Scsi_Dest_ID);
+ bval = pDCB->SyncPeriod;
+ outb(bval,ioport+Sync_Period);
+ bval = pDCB->SyncOffset;
+ outb(bval,ioport+Sync_Offset);
+ bval = pDCB->CtrlR1;
+ outb(bval,ioport+CtrlReg1);
+ bval = pDCB->CtrlR3;
+ outb(bval,ioport+CtrlReg3);
+ bval = pDCB->CtrlR4;
+ outb(bval,ioport+CtrlReg4);
+ bval = CLEAR_FIFO_CMD; /* Flush FIFO */
+ outb(bval,ioport+ScsiCmd);
+
+ pSRB->ScsiPhase = SCSI_NOP0;
+ bval = pDCB->IdentifyMsg;
+ if( !(pDCB->SyncMode & EN_ATN_STOP) )
+ {
+ if( (pSRB->CmdBlock[0] == INQUIRY) ||
+ (pSRB->CmdBlock[0] == REQUEST_SENSE) ||
+ (pSRB->SRBFlag & AUTO_REQSENSE) )
+ {
+ bval &= 0xBF; /* NO disconnection */
+ outb(bval,ioport+ScsiFifo);
+ bval1 = SELECT_W_ATN;
+ pSRB->SRBState = SRB_START_;
+ if( pDCB->SyncMode & SYNC_ENABLE )
+ {
+ if( !(pDCB->IdentifyMsg & 7) ||
+ (pSRB->CmdBlock[0] != INQUIRY) )
+ {
+ bval1 = SEL_W_ATN_STOP;
+ pSRB->SRBState = SRB_MSGOUT;
+ }
+ }
+ }
+ else
+ {
+ if(pDCB->SyncMode & EN_TAG_QUEUING)
+ {
+ outb(bval,ioport+ScsiFifo);
+ bval = MSG_SIMPLE_QTAG;
+ outb(bval,ioport+ScsiFifo);
+ wlval = 1;
+ bval = 0;
+ while( wlval & pDCB->TagMask )
+ {
+ wlval = wlval << 1;
+ bval++;
+ }
+ outb(bval,ioport+ScsiFifo);
+ pDCB->TagMask |= wlval;
+ pSRB->TagNumber = bval;
+ bval1 = SEL_W_ATN2;
+ pSRB->SRBState = SRB_START_;
+ }
+ else
+ {
+ outb(bval,ioport+ScsiFifo);
+ bval1 = SELECT_W_ATN;
+ pSRB->SRBState = SRB_START_;
+ }
+ }
+
+ if( pSRB->SRBFlag & AUTO_REQSENSE )
+ {
+ bval = REQUEST_SENSE;
+ outb(bval,ioport+ScsiFifo);
+ bval = pDCB->IdentifyMsg << 5;
+ outb(bval,ioport+ScsiFifo);
+ bval = 0;
+ outb(bval,ioport+ScsiFifo);
+ outb(bval,ioport+ScsiFifo);
+ bval = sizeof(pSRB->pcmd->sense_buffer);
+ outb(bval,ioport+ScsiFifo);
+ bval = 0;
+ outb(bval,ioport+ScsiFifo);
+ }
+ else
+ {
+ cnt = pSRB->ScsiCmdLen;
+ ptr = (PUCHAR) pSRB->CmdBlock;
+ for(i=0; i<cnt; i++)
+ {
+ bval = *ptr++;
+ outb(bval,ioport+ScsiFifo);
+ }
+ }
+ }
+ else /* ATN_STOP */
+ {
+ if( (pSRB->CmdBlock[0] == INQUIRY) ||
+ (pSRB->CmdBlock[0] == REQUEST_SENSE) ||
+ (pSRB->SRBFlag & AUTO_REQSENSE) )
+ {
+ bval &= 0xBF;
+ outb(bval,ioport+ScsiFifo);
+ bval1 = SELECT_W_ATN;
+ pSRB->SRBState = SRB_START_;
+ if( pDCB->SyncMode & SYNC_ENABLE )
+ {
+ if( !(pDCB->IdentifyMsg & 7) ||
+ (pSRB->CmdBlock[0] != INQUIRY) )
+ {
+ bval1 = SEL_W_ATN_STOP;
+ pSRB->SRBState = SRB_MSGOUT;
+ }
+ }
+ }
+ else
+ {
+ if(pDCB->SyncMode & EN_TAG_QUEUING)
+ {
+ outb(bval,ioport+ScsiFifo);
+ pSRB->MsgOutBuf[0] = MSG_SIMPLE_QTAG;
+ wlval = 1;
+ bval = 0;
+ while( wlval & pDCB->TagMask )
+ {
+ wlval = wlval << 1;
+ bval++;
+ }
+ pDCB->TagMask |= wlval;
+ pSRB->TagNumber = bval;
+ pSRB->MsgOutBuf[1] = bval;
+ pSRB->MsgCnt = 2;
+ bval1 = SEL_W_ATN_STOP;
+ pSRB->SRBState = SRB_START_;
+ }
+ else
+ {
+ outb(bval,ioport+ScsiFifo);
+ pSRB->MsgOutBuf[0] = MSG_NOP;
+ pSRB->MsgCnt = 1;
+ pSRB->SRBState = SRB_START_;
+ bval1 = SEL_W_ATN_STOP;
+ }
+ }
+ }
+ bval = inb( ioport+Scsi_Status );
+ if( bval & INTERRUPT )
+ {
+ pSRB->SRBState = SRB_READY;
+ pDCB->TagMask &= ~( 1 << pSRB->TagNumber );
+ rc = 1;
+ }
+ else
+ {
+ pSRB->ScsiPhase = SCSI_NOP1;
+ pACB->pActiveDCB = pDCB;
+ pDCB->pActiveSRB = pSRB;
+ rc = 0;
+ outb(bval1,ioport+ScsiCmd);
+ }
+ return( rc );
+}
+
+
+#ifndef VERSION_ELF_1_2_13
+static void
+DC390_Interrupt( int irq, void *dev_id, struct pt_regs *regs)
+#else
+static void
+DC390_Interrupt( int irq, struct pt_regs *regs)
+#endif
+{
+ PACB pACB;
+ PDCB pDCB;
+ PSRB pSRB;
+ USHORT ioport = 0;
+ USHORT phase, i;
+ void (*stateV)( PACB, PSRB, PUCHAR );
+ UCHAR istate = 0;
+ UCHAR sstatus=0, istatus;
+
+ pACB = pACB_start;
+ if( pACB == NULL )
+ return;
+ for( i=0; i < adapterCnt; i++ )
+ {
+ if( pACB->IRQLevel == (UCHAR) irq )
+ {
+ ioport = pACB->IOPortBase;
+ sstatus = inb( ioport+Scsi_Status );
+ if( sstatus & INTERRUPT )
+ break;
+ else
+ pACB = pACB->pNextACB;
+ }
+ else
+ {
+ pACB = pACB->pNextACB;
+ }
+ }
+
+#ifdef DC390_DEBUG1
+ printk("sstatus=%2x,",sstatus);
+#endif
+
+ if( pACB == (PACB )-1 )
+ {
+ printk("DC390: Spurious interrupt detected!\n");
+ return;
+ }
+
+ istate = inb( ioport+Intern_State );
+ istatus = inb( ioport+INT_Status );
+
+#ifdef DC390_DEBUG1
+ printk("Istatus=%2x,",istatus);
+#endif
+
+ if(istatus & DISCONNECTED)
+ {
+ DC390_Disconnect( pACB );
+ return;
+ }
+
+ if(istatus & RESELECTED)
+ {
+ DC390_Reselect( pACB );
+ return;
+ }
+
+ if(istatus & INVALID_CMD)
+ {
+ DC390_InvalidCmd( pACB );
+ return;
+ }
+
+ if(istatus & SCSI_RESET)
+ {
+ DC390_ScsiRstDetect( pACB );
+ return;
+ }
+
+ if( istatus & (SUCCESSFUL_OP+SERVICE_REQUEST) )
+ {
+ pDCB = pACB->pActiveDCB;
+ pSRB = pDCB->pActiveSRB;
+ if( pDCB )
+ {
+ if( pDCB->DCBFlag & ABORT_DEV_ )
+ EnableMsgOut( pACB, pSRB );
+ }
+
+ phase = (USHORT) pSRB->ScsiPhase;
+ stateV = (void *) DC390_phase0[phase];
+ stateV( pACB, pSRB, &sstatus );
+
+ pSRB->ScsiPhase = sstatus & 7;
+ phase = (USHORT) sstatus & 7;
+ stateV = (void *) DC390_phase1[phase];
+ stateV( pACB, pSRB, &sstatus );
+ }
+}
+
+
+static void
+DC390_DataOut_0( PACB pACB, PSRB pSRB, PUCHAR psstatus)
+{
+ UCHAR sstatus, bval;
+ USHORT ioport;
+ PSGL psgl;
+ ULONG ResidCnt, xferCnt;
+
+ ioport = pACB->IOPortBase;
+ sstatus = *psstatus;
+
+ if( !(pSRB->SRBState & SRB_XFERPAD) )
+ {
+ if( sstatus & PARITY_ERR )
+ pSRB->SRBStatus |= PARITY_ERROR;
+
+ if( sstatus & COUNT_2_ZERO )
+ {
+ bval = inb(ioport+DMA_Status);
+ while( !(bval & DMA_XFER_DONE) )
+ bval = inb(ioport+DMA_Status);
+ pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
+ pSRB->SGIndex++;
+ if( pSRB->SGIndex < pSRB->SGcount )
+ {
+ pSRB->pSegmentList++;
+ psgl = pSRB->pSegmentList;
+
+#ifndef VERSION_ELF_1_2_13
+ pSRB->SGPhysAddr = virt_to_phys( psgl->address );
+#else
+ pSRB->SGPhysAddr = (ULONG) psgl->address;
+#endif
+ pSRB->SGToBeXferLen = (ULONG) psgl->length;
+ }
+ else
+ pSRB->SGToBeXferLen = 0;
+ }
+ else
+ {
+ bval = inb( ioport+Current_Fifo );
+ bval &= 0x1f;
+ ResidCnt = (ULONG) inb(ioport+CtcReg_High);
+ ResidCnt = ResidCnt << 8;
+ ResidCnt |= (ULONG) inb(ioport+CtcReg_Mid);
+ ResidCnt = ResidCnt << 8;
+ ResidCnt |= (ULONG) inb(ioport+CtcReg_Low);
+ ResidCnt += (ULONG) bval;
+
+ xferCnt = pSRB->SGToBeXferLen - ResidCnt;
+ pSRB->SGPhysAddr += xferCnt;
+ pSRB->TotalXferredLen += xferCnt;
+ pSRB->SGToBeXferLen = ResidCnt;
+ }
+ }
+ bval = WRITE_DIRECTION+DMA_IDLE_CMD;
+ outb( bval, ioport+DMA_Cmd);
+}
+
+static void
+DC390_DataIn_0( PACB pACB, PSRB pSRB, PUCHAR psstatus)
+{
+ UCHAR sstatus, bval;
+ USHORT i, ioport, residual;
+ PSGL psgl;
+ ULONG ResidCnt, xferCnt;
+ PUCHAR ptr;
+
+
+ ioport = pACB->IOPortBase;
+ sstatus = *psstatus;
+
+ if( !(pSRB->SRBState & SRB_XFERPAD) )
+ {
+ if( sstatus & PARITY_ERR )
+ pSRB->SRBStatus |= PARITY_ERROR;
+
+ if( sstatus & COUNT_2_ZERO )
+ {
+ bval = inb(ioport+DMA_Status);
+ while( !(bval & DMA_XFER_DONE) )
+ bval = inb(ioport+DMA_Status);
+
+ bval = READ_DIRECTION+DMA_IDLE_CMD;
+ outb( bval, ioport+DMA_Cmd);
+
+ pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
+ pSRB->SGIndex++;
+ if( pSRB->SGIndex < pSRB->SGcount )
+ {
+ pSRB->pSegmentList++;
+ psgl = pSRB->pSegmentList;
+
+#ifndef VERSION_ELF_1_2_13
+ pSRB->SGPhysAddr = virt_to_phys( psgl->address );
+#else
+ pSRB->SGPhysAddr = (ULONG) psgl->address;
+#endif
+ pSRB->SGToBeXferLen = (ULONG) psgl->length;
+ }
+ else
+ pSRB->SGToBeXferLen = 0;
+ }
+ else /* phase changed */
+ {
+ residual = 0;
+ bval = inb(ioport+Current_Fifo);
+ while( bval & 0x1f )
+ {
+ if( (bval & 0x1f) == 1 )
+ {
+ for(i=0; i< 0x100; i++)
+ {
+ bval = inb(ioport+Current_Fifo);
+ if( !(bval & 0x1f) )
+ goto din_1;
+ else if( i == 0x0ff )
+ {
+ residual = 1; /* ;1 residual byte */
+ goto din_1;
+ }
+ }
+ }
+ else
+ bval = inb(ioport+Current_Fifo);
+ }
+din_1:
+ bval = READ_DIRECTION+DMA_BLAST_CMD;
+ outb(bval, ioport+DMA_Cmd);
+ for(i=0; i<0x8000; i++)
+ {
+ bval = inb(ioport+DMA_Status);
+ if(bval & BLAST_COMPLETE)
+ break;
+ }
+ bval = READ_DIRECTION+DMA_IDLE_CMD;
+ outb(bval, ioport+DMA_Cmd);
+
+ ResidCnt = (ULONG) inb(ioport+CtcReg_High);
+ ResidCnt = ResidCnt << 8;
+ ResidCnt |= (ULONG) inb(ioport+CtcReg_Mid);
+ ResidCnt = ResidCnt << 8;
+ ResidCnt |= (ULONG) inb(ioport+CtcReg_Low);
+
+ xferCnt = pSRB->SGToBeXferLen - ResidCnt;
+ pSRB->SGPhysAddr += xferCnt;
+ pSRB->TotalXferredLen += xferCnt;
+ pSRB->SGToBeXferLen = ResidCnt;
+
+ if( residual )
+ {
+ bval = inb(ioport+ScsiFifo); /* get residual byte */
+#ifndef VERSION_ELF_1_2_13
+ ptr = (PUCHAR) phys_to_virt( pSRB->SGPhysAddr );
+#else
+ ptr = (PUCHAR) pSRB->SGPhysAddr;
+#endif
+ *ptr = bval;
+ pSRB->SGPhysAddr++;
+ pSRB->TotalXferredLen++;
+ pSRB->SGToBeXferLen--;
+ }
+ }
+ }
+}
+
+static void
+DC390_Command_0( PACB pACB, PSRB pSRB, PUCHAR psstatus)
+{
+}
+
+static void
+DC390_Status_0( PACB pACB, PSRB pSRB, PUCHAR psstatus)
+{
+ UCHAR bval;
+ USHORT ioport;
+
+ ioport = pACB->IOPortBase;
+ bval = inb(ioport+ScsiFifo);
+ pSRB->TargetStatus = bval;
+ bval++;
+ bval = inb(ioport+ScsiFifo); /* get message */
+ pSRB->EndMessage = bval;
+
+ *psstatus = SCSI_NOP0;
+ pSRB->SRBState = SRB_COMPLETED;
+ bval = MSG_ACCEPTED_CMD;
+ outb(bval, ioport+ScsiCmd);
+}
+
+static void
+DC390_MsgOut_0( PACB pACB, PSRB pSRB, PUCHAR psstatus)
+{
+ if( pSRB->SRBState & (SRB_UNEXPECT_RESEL+SRB_ABORT_SENT) )
+ *psstatus = SCSI_NOP0;
+}
+
+static void
+DC390_MsgIn_0( PACB pACB, PSRB pSRB, PUCHAR psstatus)
+{
+ UCHAR bval;
+ USHORT ioport, wval, wval1;
+ PDCB pDCB;
+ PSRB psrb;
+
+ ioport = pACB->IOPortBase;
+ pDCB = pACB->pActiveDCB;
+
+ bval = inb( ioport+ScsiFifo );
+ if( !(pSRB->SRBState & SRB_MSGIN_MULTI) )
+ {
+ if(bval == MSG_DISCONNECT)
+ {
+ pSRB->SRBState = SRB_DISCONNECT;
+ }
+ else if( bval == MSG_SAVE_PTR )
+ goto min6;
+ else if( (bval == MSG_EXTENDED) || ((bval >= MSG_SIMPLE_QTAG) &&
+ (bval <= MSG_ORDER_QTAG)) )
+ {
+ pSRB->SRBState |= SRB_MSGIN_MULTI;
+ pSRB->MsgInBuf[0] = bval;
+ pSRB->MsgCnt = 1;
+ pSRB->pMsgPtr = &pSRB->MsgInBuf[1];
+ }
+ else if(bval == MSG_REJECT_)
+ {
+ bval = RESET_ATN_CMD;
+ outb(bval, ioport+ScsiCmd);
+ if( pSRB->SRBState & DO_SYNC_NEGO)
+ goto set_async;
+ }
+ else if( bval == MSG_RESTORE_PTR)
+ goto min6;
+ else
+ goto min6;
+ }
+ else
+ { /* minx: */
+
+ *pSRB->pMsgPtr = bval;
+ pSRB->MsgCnt++;
+ pSRB->pMsgPtr++;
+ if( (pSRB->MsgInBuf[0] >= MSG_SIMPLE_QTAG) &&
+ (pSRB->MsgInBuf[0] <= MSG_ORDER_QTAG) )
+ {
+ if( pSRB->MsgCnt == 2)
+ {
+ pSRB->SRBState = 0;
+ bval = pSRB->MsgInBuf[1];
+ pSRB = pDCB->pGoingSRB;
+ psrb = pDCB->pGoingLast;
+ if( pSRB )
+ {
+ for( ;; )
+ {
+ if(pSRB->TagNumber != bval)
+ {
+ if( pSRB == psrb )
+ goto mingx0;
+ pSRB = pSRB->pNextSRB;
+ }
+ else
+ break;
+ }
+ if( pDCB->DCBFlag & ABORT_DEV_ )
+ {
+ pSRB->SRBState = SRB_ABORT_SENT;
+ EnableMsgOut( pACB, pSRB );
+ }
+ if( !(pSRB->SRBState & SRB_DISCONNECT) )
+ goto mingx0;
+ pDCB->pActiveSRB = pSRB;
+ pSRB->SRBState = SRB_DATA_XFER;
+ }
+ else
+ {
+mingx0:
+ pSRB = pACB->pTmpSRB;
+ pSRB->SRBState = SRB_UNEXPECT_RESEL;
+ pDCB->pActiveSRB = pSRB;
+ pSRB->MsgOutBuf[0] = MSG_ABORT_TAG;
+ EnableMsgOut2( pACB, pSRB );
+ }
+ }
+ }
+ else if( (pSRB->MsgInBuf[0] == MSG_EXTENDED) && (pSRB->MsgCnt == 5) )
+ {
+ pSRB->SRBState &= ~(SRB_MSGIN_MULTI+DO_SYNC_NEGO);
+ if( (pSRB->MsgInBuf[1] != 3) || (pSRB->MsgInBuf[2] != 1) )
+ { /* reject_msg: */
+ pSRB->MsgCnt = 1;
+ pSRB->MsgInBuf[0] = MSG_REJECT_;
+ bval = SET_ATN_CMD;
+ outb(bval, ioport+ScsiCmd);
+ }
+ else if( !(pSRB->MsgInBuf[3]) || !(pSRB->MsgInBuf[4]) )
+ {
+set_async:
+ pDCB = pSRB->pSRBDCB;
+ pDCB->SyncMode &= ~(SYNC_ENABLE+SYNC_NEGO_DONE);
+ pDCB->SyncPeriod = 0;
+ pDCB->SyncOffset = 0;
+ pDCB->CtrlR3 = FAST_CLK; /* ;non_fast */
+ pDCB->CtrlR4 &= 0x3f;
+ pDCB->CtrlR4 |= EATER_25NS; /* ; 25ns glitch eater */
+ goto re_prog;
+ }
+ else
+ { /* set_sync: */
+
+ pDCB = pSRB->pSRBDCB;
+ pDCB->SyncMode |= SYNC_ENABLE+SYNC_NEGO_DONE;
+ pDCB->SyncOffset &= 0x0f0;
+ pDCB->SyncOffset |= pSRB->MsgInBuf[4];
+ pDCB->NegoPeriod = pSRB->MsgInBuf[3];
+ wval = (USHORT) pSRB->MsgInBuf[3];
+ wval = wval << 2;
+ wval--;
+ wval1 = wval / 25;
+ if( (wval1 * 25) != wval)
+ wval1++;
+ bval = FAST_CLK+FAST_SCSI;
+ pDCB->CtrlR4 &= 0x3f;
+ if(wval1 >= 8)
+ {
+ wval1--;
+ bval = FAST_CLK; /* ;fast clock/normal scsi */
+ pDCB->CtrlR4 |= EATER_25NS; /* ;25 ns glitch eater */
+ }
+ pDCB->CtrlR3 = bval;
+ pDCB->SyncPeriod = (UCHAR)wval1;
+re_prog:
+ bval = pDCB->SyncPeriod;
+ outb(bval, ioport+Sync_Period);
+ bval = pDCB->SyncOffset;
+ outb(bval, ioport+Sync_Offset);
+ bval = pDCB->CtrlR3;
+ outb(bval, ioport+CtrlReg3);
+ bval = pDCB->CtrlR4;
+ outb(bval, ioport+CtrlReg4);
+ SetXferRate( pACB, pDCB);
+ }
+ }
+ }
+min6:
+ *psstatus = SCSI_NOP0;
+ bval = MSG_ACCEPTED_CMD;
+ outb(bval, ioport+ScsiCmd);
+}
+
+static void
+DataIO_Comm( PACB pACB, PSRB pSRB, UCHAR ioDir)
+{
+ PSGL psgl;
+ UCHAR bval;
+ USHORT ioport;
+ ULONG lval;
+
+
+ ioport = pACB->IOPortBase;
+ if( pSRB->SGIndex < pSRB->SGcount )
+ {
+ bval = DMA_IDLE_CMD | ioDir; /* ;+EN_DMA_INT */
+ outb( bval, ioport+DMA_Cmd);
+ if( !pSRB->SGToBeXferLen )
+ {
+ psgl = pSRB->pSegmentList;
+#ifndef VERSION_ELF_1_2_13
+ pSRB->SGPhysAddr = virt_to_phys( psgl->address );
+#else
+ pSRB->SGPhysAddr = (ULONG) psgl->address;
+#endif
+ pSRB->SGToBeXferLen = (ULONG) psgl->length;
+ }
+ lval = pSRB->SGToBeXferLen;
+ bval = (UCHAR) lval;
+ outb(bval,ioport+CtcReg_Low);
+ lval = lval >> 8;
+ bval = (UCHAR) lval;
+ outb(bval,ioport+CtcReg_Mid);
+ lval = lval >> 8;
+ bval = (UCHAR) lval;
+ outb(bval,ioport+CtcReg_High);
+
+ lval = pSRB->SGToBeXferLen;
+ outl(lval, ioport+DMA_XferCnt);
+
+ lval = pSRB->SGPhysAddr;
+ outl( lval, ioport+DMA_XferAddr);
+
+ bval = DMA_COMMAND+INFO_XFER_CMD;
+ outb(bval, ioport+ScsiCmd);
+
+ pSRB->SRBState = SRB_DATA_XFER;
+
+ bval = DMA_IDLE_CMD | ioDir; /* ;+EN_DMA_INT */
+ outb(bval, ioport+DMA_Cmd);
+
+ bval = DMA_START_CMD | ioDir; /* ;+EN_DMA_INT */
+ outb(bval, ioport+DMA_Cmd);
+ }
+ else /* xfer pad */
+ {
+ if( pSRB->SGcount )
+ {
+ pSRB->AdaptStatus = H_OVER_UNDER_RUN;
+ pSRB->SRBStatus |= OVER_RUN;
+ }
+ bval = 0;
+ outb(bval,ioport+CtcReg_Low);
+ outb(bval,ioport+CtcReg_Mid);
+ outb(bval,ioport+CtcReg_High);
+
+ pSRB->SRBState |= SRB_XFERPAD;
+ bval = DMA_COMMAND+XFER_PAD_BYTE;
+ outb(bval, ioport+ScsiCmd);
+/*
+ bval = DMA_IDLE_CMD | ioDir; ;+EN_DMA_INT
+ outb(bval, ioport+DMA_Cmd);
+ bval = DMA_START_CMD | ioDir; ;+EN_DMA_INT
+ outb(bval, ioport+DMA_Cmd);
+*/
+ }
+}
+
+
+static void
+DC390_DataOutPhase( PACB pACB, PSRB pSRB, PUCHAR psstatus)
+{
+ UCHAR ioDir;
+
+ ioDir = WRITE_DIRECTION;
+ DataIO_Comm( pACB, pSRB, ioDir);
+}
+
+static void
+DC390_DataInPhase( PACB pACB, PSRB pSRB, PUCHAR psstatus)
+{
+ UCHAR ioDir;
+
+ ioDir = READ_DIRECTION;
+ DataIO_Comm( pACB, pSRB, ioDir);
+}
+
+static void
+DC390_CommandPhase( PACB pACB, PSRB pSRB, PUCHAR psstatus)
+{
+ PDCB pDCB;
+ UCHAR bval;
+ PUCHAR ptr;
+ USHORT ioport, i, cnt;
+
+
+ ioport = pACB->IOPortBase;
+ bval = RESET_ATN_CMD;
+ outb(bval, ioport+ScsiCmd);
+ bval = CLEAR_FIFO_CMD;
+ outb(bval, ioport+ScsiCmd);
+ if( !(pSRB->SRBFlag & AUTO_REQSENSE) )
+ {
+ cnt = (USHORT) pSRB->ScsiCmdLen;
+ ptr = (PUCHAR) pSRB->CmdBlock;
+ for(i=0; i < cnt; i++)
+ {
+ outb(*ptr, ioport+ScsiFifo);
+ ptr++;
+ }
+ }
+ else
+ {
+ bval = REQUEST_SENSE;
+ outb(bval, ioport+ScsiFifo);
+ pDCB = pACB->pActiveDCB;
+ bval = pDCB->IdentifyMsg << 5;
+ outb(bval, ioport+ScsiFifo);
+ bval = 0;
+ outb(bval, ioport+ScsiFifo);
+ outb(bval, ioport+ScsiFifo);
+ bval = sizeof(pSRB->pcmd->sense_buffer);
+ outb(bval, ioport+ScsiFifo);
+ bval = 0;
+ outb(bval, ioport+ScsiFifo);
+ }
+ pSRB->SRBState = SRB_COMMAND;
+ bval = INFO_XFER_CMD;
+ outb(bval, ioport+ScsiCmd);
+}
+
+static void
+DC390_StatusPhase( PACB pACB, PSRB pSRB, PUCHAR psstatus)
+{
+ UCHAR bval;
+ USHORT ioport;
+
+ ioport = pACB->IOPortBase;
+ bval = CLEAR_FIFO_CMD;
+ outb(bval, ioport+ScsiCmd);
+ pSRB->SRBState = SRB_STATUS;
+ bval = INITIATOR_CMD_CMPLTE;
+ outb(bval, ioport+ScsiCmd);
+}
+
+static void
+DC390_MsgOutPhase( PACB pACB, PSRB pSRB, PUCHAR psstatus)
+{
+ UCHAR bval;
+ USHORT ioport, i, cnt;
+ PUCHAR ptr;
+ PDCB pDCB;
+
+ ioport = pACB->IOPortBase;
+ bval = CLEAR_FIFO_CMD;
+ outb(bval, ioport+ScsiCmd);
+ pDCB = pACB->pActiveDCB;
+ if( !(pSRB->SRBState & SRB_MSGOUT) )
+ {
+ cnt = pSRB->MsgCnt;
+ if( cnt )
+ {
+ ptr = (PUCHAR) pSRB->MsgOutBuf;
+ for(i=0; i < cnt; i++)
+ {
+ outb(*ptr, ioport+ScsiFifo);
+ ptr++;
+ }
+ pSRB->MsgCnt = 0;
+ if( (pDCB->DCBFlag & ABORT_DEV_) &&
+ (pSRB->MsgOutBuf[0] == MSG_ABORT) )
+ pSRB->SRBState = SRB_ABORT_SENT;
+ }
+ else
+ {
+ bval = MSG_ABORT; /* ??? MSG_NOP */
+ if( (pSRB->CmdBlock[0] == INQUIRY ) ||
+ (pSRB->CmdBlock[0] == REQUEST_SENSE) ||
+ (pSRB->SRBFlag & AUTO_REQSENSE) )
+ {
+ if( pDCB->SyncMode & SYNC_ENABLE )
+ goto mop1;
+ }
+ outb(bval, ioport+ScsiFifo);
+ }
+ bval = INFO_XFER_CMD;
+ outb( bval, ioport+ScsiCmd);
+ }
+ else
+ {
+mop1:
+ bval = MSG_EXTENDED;
+ outb(bval, ioport+ScsiFifo);
+ bval = 3; /* ;length of extended msg */
+ outb(bval, ioport+ScsiFifo);
+ bval = 1; /* ; sync nego */
+ outb(bval, ioport+ScsiFifo);
+ bval = pDCB->NegoPeriod;
+ outb(bval, ioport+ScsiFifo);
+ bval = SYNC_NEGO_OFFSET;
+ outb(bval, ioport+ScsiFifo);
+ pSRB->SRBState |= DO_SYNC_NEGO;
+ bval = INFO_XFER_CMD;
+ outb(bval, ioport+ScsiCmd);
+ }
+}
+
+static void
+DC390_MsgInPhase( PACB pACB, PSRB pSRB, PUCHAR psstatus)
+{
+ UCHAR bval;
+ USHORT ioport;
+
+ ioport = pACB->IOPortBase;
+ bval = CLEAR_FIFO_CMD;
+ outb(bval, ioport+ScsiCmd);
+ if( !(pSRB->SRBState & SRB_MSGIN) )
+ {
+ pSRB->SRBState &= SRB_DISCONNECT;
+ pSRB->SRBState |= SRB_MSGIN;
+ }
+ bval = INFO_XFER_CMD;
+ outb(bval, ioport+ScsiCmd);
+}
+
+static void
+DC390_Nop_0( PACB pACB, PSRB pSRB, PUCHAR psstatus)
+{
+}
+
+static void
+DC390_Nop_1( PACB pACB, PSRB pSRB, PUCHAR psstatus)
+{
+}
+
+
+static void
+SetXferRate( PACB pACB, PDCB pDCB )
+{
+ UCHAR bval;
+ USHORT cnt, i;
+ PDCB ptr;
+
+ if( !(pDCB->IdentifyMsg & 0x07) )
+ {
+ if( pACB->scan_devices )
+ {
+ CurrSyncOffset = pDCB->SyncOffset;
+ }
+ else
+ {
+ ptr = pACB->pLinkDCB;
+ cnt = pACB->DeviceCnt;
+ bval = pDCB->UnitSCSIID;
+ for(i=0; i<cnt; i++)
+ {
+ if( ptr->UnitSCSIID == bval )
+ {
+ ptr->SyncPeriod = pDCB->SyncPeriod;
+ ptr->SyncOffset = pDCB->SyncOffset;
+ ptr->CtrlR3 = pDCB->CtrlR3;
+ ptr->CtrlR4 = pDCB->CtrlR4;
+ ptr->SyncMode = pDCB->SyncMode;
+ }
+ ptr = ptr->pNextDCB;
+ }
+ }
+ }
+ return;
+}
+
+
+static void
+DC390_Disconnect( PACB pACB )
+{
+ PDCB pDCB;
+ PSRB pSRB, psrb;
+ ULONG flags;
+ USHORT ioport, i, cnt;
+ UCHAR bval;
+
+#ifdef DC390_DEBUG0
+ printk("DISC,");
+#endif
+
+ save_flags(flags);
+ cli();
+ ioport = pACB->IOPortBase;
+ pDCB = pACB->pActiveDCB;
+ if (!pDCB)
+ {
+#ifdef DC390_DEBUG0
+ printk("ACB:%08lx->ActiveDCB:%08lx !,",(ULONG)pACB,(ULONG)pDCB);
+#endif
+ restore_flags(flags); return;
+ }
+ pSRB = pDCB->pActiveSRB;
+ pACB->pActiveDCB = 0;
+ pSRB->ScsiPhase = SCSI_NOP0;
+ bval = EN_SEL_RESEL;
+ outb(bval, ioport+ScsiCmd);
+ if( pSRB->SRBState & SRB_UNEXPECT_RESEL )
+ {
+ pSRB->SRBState = 0;
+ DoWaitingSRB( pACB );
+ }
+ else if( pSRB->SRBState & SRB_ABORT_SENT )
+ {
+ pDCB->TagMask = 0;
+ pDCB->DCBFlag = 0;
+ cnt = pDCB->GoingSRBCnt;
+ pDCB->GoingSRBCnt = 0;
+ pSRB = pDCB->pGoingSRB;
+ for( i=0; i < cnt; i++)
+ {
+ psrb = pSRB->pNextSRB;
+ pSRB->pNextSRB = pACB->pFreeSRB;
+ pACB->pFreeSRB = pSRB;
+ pSRB = psrb;
+ }
+ pDCB->pGoingSRB = 0;
+ DoWaitingSRB( pACB );
+ }
+ else
+ {
+ if( (pSRB->SRBState & (SRB_START_+SRB_MSGOUT)) ||
+ !(pSRB->SRBState & (SRB_DISCONNECT+SRB_COMPLETED)) )
+ { /* Selection time out */
+ if( !(pACB->scan_devices) )
+ {
+ pSRB->SRBState = SRB_READY;
+ RewaitSRB( pDCB, pSRB);
+ }
+ else
+ {
+ pSRB->TargetStatus = SCSI_STAT_SEL_TIMEOUT;
+ goto disc1;
+ }
+ }
+ else if( pSRB->SRBState & SRB_DISCONNECT )
+ {
+ DoWaitingSRB( pACB );
+ }
+ else if( pSRB->SRBState & SRB_COMPLETED )
+ {
+disc1:
+ if(pDCB->MaxCommand > 1)
+ {
+ bval = pSRB->TagNumber;
+ pDCB->TagMask &= (~(1 << bval)); /* free tag mask */
+ }
+ pDCB->pActiveSRB = 0;
+ pSRB->SRBState = SRB_FREE;
+ SRBdone( pACB, pDCB, pSRB);
+ }
+ }
+ restore_flags(flags);
+ return;
+}
+
+
+static void
+DC390_Reselect( PACB pACB )
+{
+ PDCB pDCB, pdcb;
+ PSRB pSRB;
+ USHORT ioport, wval;
+ UCHAR bval, bval1;
+
+
+#ifdef DC390_DEBUG0
+ printk("RSEL,");
+#endif
+ ioport = pACB->IOPortBase;
+ pDCB = pACB->pActiveDCB;
+ if( pDCB )
+ { /* Arbitration lost but Reselection win */
+ pSRB = pDCB->pActiveSRB;
+ if( !( pACB->scan_devices ) )
+ {
+ pSRB->SRBState = SRB_READY;
+ RewaitSRB( pDCB, pSRB);
+ }
+ }
+ bval = inb(ioport+ScsiFifo); /* get ID */
+ bval = bval ^ pACB->HostID_Bit;
+ wval = 0;
+ bval1 = 1;
+ for(;;)
+ {
+ if( !(bval & bval1) )
+ {
+ bval1 = bval1 << 1;
+ wval++;
+ }
+ else
+ break;
+ }
+ wval |= ( (USHORT) inb(ioport+ScsiFifo) & 7) << 8; /* get LUN */
+ pDCB = pACB->pLinkDCB;
+ pdcb = pDCB;
+ while( wval != *((PUSHORT) &pDCB->UnitSCSIID) )
+ {
+ pDCB = pDCB->pNextDCB;
+ if( pDCB == pdcb )
+ return;
+ }
+ pACB->pActiveDCB = pDCB;
+ if( pDCB->SyncMode & EN_TAG_QUEUING )
+ {
+ pSRB = pACB->pTmpSRB;
+ pDCB->pActiveSRB = pSRB;
+ }
+ else
+ {
+ pSRB = pDCB->pActiveSRB;
+ if( !pSRB || !(pSRB->SRBState & SRB_DISCONNECT) )
+ {
+ pSRB= pACB->pTmpSRB;
+ pSRB->SRBState = SRB_UNEXPECT_RESEL;
+ pDCB->pActiveSRB = pSRB;
+ EnableMsgOut( pACB, pSRB );
+ }
+ else
+ {
+ if( pDCB->DCBFlag & ABORT_DEV_ )
+ {
+ pSRB->SRBState = SRB_ABORT_SENT;
+ EnableMsgOut( pACB, pSRB );
+ }
+ else
+ pSRB->SRBState = SRB_DATA_XFER;
+ }
+ }
+ pSRB->ScsiPhase = SCSI_NOP0;
+ bval = pDCB->UnitSCSIID;
+ outb( bval, ioport+Scsi_Dest_ID);
+ bval = pDCB->SyncPeriod;
+ outb(bval, ioport+Sync_Period);
+ bval = pDCB->SyncOffset;
+ outb( bval, ioport+Sync_Offset);
+ bval = pDCB->CtrlR1;
+ outb(bval, ioport+CtrlReg1);
+ bval = pDCB->CtrlR3;
+ outb(bval, ioport+CtrlReg3);
+ bval = pDCB->CtrlR4; /* ; Glitch eater */
+ outb(bval, ioport+CtrlReg4);
+ bval = MSG_ACCEPTED_CMD; /* ;to rls the /ACK signal */
+ outb(bval, ioport+ScsiCmd);
+}
+
+
+static void
+SRBdone( PACB pACB, PDCB pDCB, PSRB pSRB )
+{
+ PSRB psrb;
+ UCHAR bval, bval1, i, j, status;
+ PSCSICMD pcmd;
+ PSCSI_INQDATA ptr;
+ USHORT disable_tag;
+ ULONG flags;
+ PSGL ptr2;
+ ULONG swlval;
+
+ pcmd = pSRB->pcmd;
+ status = pSRB->TargetStatus;
+ if(pSRB->SRBFlag & AUTO_REQSENSE)
+ {
+ pSRB->SRBFlag &= ~AUTO_REQSENSE;
+ pSRB->AdaptStatus = 0;
+ pSRB->TargetStatus = SCSI_STAT_CHECKCOND;
+ if(status == SCSI_STAT_CHECKCOND)
+ {
+ pcmd->result = DID_BAD_TARGET << 16;
+ goto ckc_e;
+ }
+ if(pSRB->RetryCnt == 0)
+ {
+ *((PULONG) &(pSRB->CmdBlock[0])) = pSRB->Segment0[0];
+ pSRB->TotalXferredLen = pSRB->Segment1[1];
+ if( (pSRB->TotalXferredLen) &&
+ (pSRB->TotalXferredLen >= pcmd->underflow) )
+ pcmd->result |= (DID_OK << 16);
+ else
+ pcmd->result = (DRIVER_SENSE << 24) | (DRIVER_OK << 16) |
+ SCSI_STAT_CHECKCOND;
+#ifdef DC390_DEBUG0
+ printk("Cmd=%2x,Result=%8x,XferL=%8x,",pSRB->CmdBlock[0],
+ (UINT) pcmd->result, (UINT) pSRB->TotalXferredLen);
+#endif
+ goto ckc_e;
+ }
+ else
+ {
+ pSRB->RetryCnt--;
+ pSRB->AdaptStatus = 0;
+ pSRB->TargetStatus = 0;
+ *((PULONG) &(pSRB->CmdBlock[0])) = pSRB->Segment0[0];
+ *((PULONG) &(pSRB->CmdBlock[4])) = pSRB->Segment0[1];
+ if( pSRB->CmdBlock[0] == TEST_UNIT_READY )
+ {
+ pcmd->result = (DRIVER_SENSE << 24) | (DRIVER_OK << 16) |
+ SCSI_STAT_CHECKCOND;
+ goto ckc_e;
+ }
+ pcmd->result |= (DRIVER_SENSE << 24);
+ pSRB->SGcount = (UCHAR) pSRB->Segment1[0];
+ pSRB->ScsiCmdLen = (UCHAR) (pSRB->Segment1[0] >> 8);
+ pSRB->SGIndex = 0;
+ pSRB->TotalXferredLen = 0;
+ pSRB->SGToBeXferLen = 0;
+ if( pcmd->use_sg )
+ pSRB->pSegmentList = (PSGL) pcmd->request_buffer;
+ else if( pcmd->request_buffer )
+ {
+ pSRB->pSegmentList = (PSGL) &pSRB->Segmentx;
+ pSRB->Segmentx.address = (PUCHAR) pcmd->request_buffer;
+ pSRB->Segmentx.length = pcmd->request_bufflen;
+ }
+ if( DC390_StartSCSI( pACB, pDCB, pSRB ) )
+ RewaitSRB( pDCB, pSRB );
+ return;
+ }
+ }
+ if( status )
+ {
+ if( status == SCSI_STAT_CHECKCOND)
+ {
+ if( (pSRB->SGIndex < pSRB->SGcount) && (pSRB->SGcount) && (pSRB->SGToBeXferLen) )
+ {
+ bval = pSRB->SGcount;
+ swlval = 0;
+ ptr2 = pSRB->pSegmentList;
+ for( i=pSRB->SGIndex; i < bval; i++)
+ {
+ swlval += ptr2->length;
+ ptr2++;
+ }
+#ifdef DC390_DEBUG0
+ printk("XferredLen=%8x,NotXferLen=%8x,",
+ (UINT) pSRB->TotalXferredLen, (UINT) swlval);
+#endif
+ }
+ RequestSense( pACB, pDCB, pSRB );
+ return;
+ }
+ else if( status == SCSI_STAT_QUEUEFULL )
+ {
+ bval = (UCHAR) pDCB->GoingSRBCnt;
+ bval--;
+ pDCB->MaxCommand = bval;
+ RewaitSRB( pDCB, pSRB );
+ pSRB->AdaptStatus = 0;
+ pSRB->TargetStatus = 0;
+ return;
+ }
+ else if(status == SCSI_STAT_SEL_TIMEOUT)
+ {
+ pSRB->AdaptStatus = H_SEL_TIMEOUT;
+ pSRB->TargetStatus = 0;
+ pcmd->result = DID_BAD_TARGET << 16;
+ }
+ else
+ {
+ pSRB->AdaptStatus = 0;
+ if( pSRB->RetryCnt )
+ {
+ pSRB->RetryCnt--;
+ pSRB->TargetStatus = 0;
+ pSRB->SGIndex = 0;
+ pSRB->TotalXferredLen = 0;
+ pSRB->SGToBeXferLen = 0;
+ if( pcmd->use_sg )
+ pSRB->pSegmentList = (PSGL) pcmd->request_buffer;
+ else if( pcmd->request_buffer )
+ {
+ pSRB->pSegmentList = (PSGL) &pSRB->Segmentx;
+ pSRB->Segmentx.address = (PUCHAR) pcmd->request_buffer;
+ pSRB->Segmentx.length = pcmd->request_bufflen;
+ }
+ if( DC390_StartSCSI( pACB, pDCB, pSRB ) )
+ RewaitSRB( pDCB, pSRB );
+ return;
+ }
+ else
+ {
+ pcmd->result |= (DID_ERROR << 16) | (ULONG) (pSRB->EndMessage << 8) |
+ (ULONG) status;
+ }
+ }
+ }
+ else
+ {
+ status = pSRB->AdaptStatus;
+ if(status & H_OVER_UNDER_RUN)
+ {
+ pSRB->TargetStatus = 0;
+ pcmd->result |= (DID_OK << 16) | (pSRB->EndMessage << 8);
+ }
+ else if( pSRB->SRBStatus & PARITY_ERROR)
+ {
+ pcmd->result |= (DID_PARITY << 16) | (pSRB->EndMessage << 8);
+ }
+ else /* No error */
+ {
+ pSRB->AdaptStatus = 0;
+ pSRB->TargetStatus = 0;
+ pcmd->result |= (DID_OK << 16);
+ }
+ }
+
+ckc_e:
+ if( pACB->scan_devices )
+ {
+ if( pSRB->CmdBlock[0] == TEST_UNIT_READY )
+ {
+ if(pcmd->result != (DID_OK << 16))
+ {
+ if( pcmd->result & SCSI_STAT_CHECKCOND )
+ {
+ goto RTN_OK;
+ }
+ else
+ {
+ pACB->DCBmap[pcmd->target] &= ~(1 << pcmd->lun);
+ pPrevDCB->pNextDCB = pACB->pLinkDCB;
+ if( (pcmd->target == pACB->max_id) &&
+ ((pcmd->lun == 0) || (pcmd->lun == pACB->max_lun)) )
+ {
+ pACB->scan_devices = 0;
+ }
+ }
+ }
+ else
+ {
+RTN_OK:
+ pPrevDCB->pNextDCB = pDCB;
+ pDCB->pNextDCB = pACB->pLinkDCB;
+ if( (pcmd->target == pACB->max_id) && (pcmd->lun == pACB->max_lun) )
+ pACB->scan_devices = END_SCAN;
+ }
+ }
+ else if( pSRB->CmdBlock[0] == INQUIRY )
+ {
+ if( (pcmd->target == pACB->max_id) &&
+ (pcmd->lun == pACB->max_lun) )
+ {
+ pACB->scan_devices = 0;
+ }
+ ptr = (PSCSI_INQDATA) (pcmd->request_buffer);
+ if( pcmd->use_sg )
+ ptr = (PSCSI_INQDATA) (((PSGL) ptr)->address);
+ bval1 = ptr->DevType & SCSI_DEVTYPE;
+ if(bval1 == SCSI_NODEV)
+ {
+ pACB->DCBmap[pcmd->target] &= ~(1 << pcmd->lun);
+ pPrevDCB->pNextDCB = pACB->pLinkDCB;
+ }
+ else
+ {
+ pACB->DeviceCnt++;
+ pPrevDCB = pDCB;
+ pACB->pDCB_free = (PDCB) ((ULONG) (pACB->pDCB_free) + sizeof( DC390_DCB ));
+ pDCB->DevType = bval1;
+ if(bval1 == TYPE_DISK || bval1 == TYPE_MOD)
+ {
+ if( (((ptr->Vers & 0x07) >= 2) || ((ptr->RDF & 0x0F) == 2)) &&
+ (ptr->Flags & SCSI_INQ_CMDQUEUE) &&
+ (pDCB->DevMode & TAG_QUEUING_) &&
+ (pDCB->DevMode & EN_DISCONNECT_) )
+ {
+ disable_tag = 0;
+ for(i=0; i<BADDEVCNT; i++)
+ {
+ for(j=0; j<28; j++)
+ {
+ if( ((PUCHAR)ptr)[8+j] != baddevname1[i][j])
+ break;
+ }
+ if(j == 28)
+ {
+ disable_tag = 1;
+ break;
+ }
+ }
+
+ if( !disable_tag )
+ {
+ pDCB->MaxCommand = pACB->TagMaxNum;
+ pDCB->SyncMode |= EN_TAG_QUEUING;
+ pDCB->TagMask = 0;
+ }
+ else
+ {
+ pDCB->SyncMode |= EN_ATN_STOP;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ save_flags( flags );
+ cli();
+/* ReleaseSRB( pDCB, pSRB ); */
+
+ if(pSRB == pDCB->pGoingSRB )
+ {
+ pDCB->pGoingSRB = pSRB->pNextSRB;
+ }
+ else
+ {
+ psrb = pDCB->pGoingSRB;
+ while( psrb->pNextSRB != pSRB )
+ psrb = psrb->pNextSRB;
+ psrb->pNextSRB = pSRB->pNextSRB;
+ if( pSRB == pDCB->pGoingLast )
+ pDCB->pGoingLast = psrb;
+ }
+ pSRB->pNextSRB = pACB->pFreeSRB;
+ pACB->pFreeSRB = pSRB;
+ pDCB->GoingSRBCnt--;
+
+ DoWaitingSRB( pACB );
+ restore_flags(flags);
+
+/* Notify cmd done */
+ pcmd->scsi_done( pcmd );
+
+ if( pDCB->QIORBCnt )
+ DoNextCmd( pACB, pDCB );
+ return;
+}
+
+
+static void
+DoingSRB_Done( PACB pACB )
+{
+ PDCB pDCB, pdcb;
+ PSRB psrb, psrb2;
+ USHORT cnt, i;
+ PSCSICMD pcmd;
+
+ pDCB = pACB->pLinkDCB;
+ pdcb = pDCB;
+ do
+ {
+ cnt = pdcb->GoingSRBCnt;
+ psrb = pdcb->pGoingSRB;
+ for( i=0; i<cnt; i++)
+ {
+ psrb2 = psrb->pNextSRB;
+ pcmd = psrb->pcmd;
+ pcmd->result = DID_RESET << 16;
+
+/* ReleaseSRB( pDCB, pSRB ); */
+
+ psrb->pNextSRB = pACB->pFreeSRB;
+ pACB->pFreeSRB = psrb;
+
+ pcmd->scsi_done( pcmd );
+ psrb = psrb2;
+ }
+ pdcb->GoingSRBCnt = 0;;
+ pdcb->pGoingSRB = NULL;
+ pdcb->TagMask = 0;
+ pdcb = pdcb->pNextDCB;
+ }
+ while( pdcb != pDCB );
+}
+
+
+static void
+DC390_ResetSCSIBus( PACB pACB )
+{
+ USHORT ioport;
+ UCHAR bval;
+ ULONG flags;
+
+ save_flags(flags);
+ cli();
+ pACB->ACBFlag |= RESET_DEV;
+ ioport = pACB->IOPortBase;
+
+ bval = DMA_IDLE_CMD;
+ outb(bval,ioport+DMA_Cmd);
+
+ bval = RST_SCSI_BUS_CMD;
+ outb(bval,ioport+ScsiCmd);
+
+ restore_flags(flags);
+ return;
+}
+
+
+static void
+DC390_ScsiRstDetect( PACB pACB )
+{
+ ULONG wlval, flags;
+ USHORT ioport;
+ UCHAR bval;
+
+#ifdef DC390_DEBUG0
+ printk("RST_DETEC");
+#endif
+ save_flags(flags);
+ sti();
+ wlval = jiffies + HZ;
+ while( jiffies < wlval ); /* delay 1 sec */
+
+ cli();
+ ioport = pACB->IOPortBase;
+ bval = DMA_IDLE_CMD;
+ outb(bval,ioport+DMA_Cmd);
+ bval = CLEAR_FIFO_CMD;
+ outb(bval,ioport+ScsiCmd);
+
+ if( pACB->ACBFlag & RESET_DEV )
+ pACB->ACBFlag |= RESET_DONE;
+ else
+ {
+ pACB->ACBFlag |= RESET_DETECT;
+
+ ResetDevParam( pACB );
+/* DoingSRB_Done( pACB ); ???? */
+ RecoverSRB( pACB );
+ pACB->pActiveDCB = NULL;
+ pACB->ACBFlag = 0;
+ DoWaitingSRB( pACB );
+ }
+ restore_flags(flags);
+ return;
+}
+
+
+static void
+RequestSense( PACB pACB, PDCB pDCB, PSRB pSRB )
+{
+ PSCSICMD pcmd;
+
+ pSRB->SRBFlag |= AUTO_REQSENSE;
+ pSRB->Segment0[0] = *((PULONG) &(pSRB->CmdBlock[0]));
+ pSRB->Segment0[1] = *((PULONG) &(pSRB->CmdBlock[4]));
+ pSRB->Segment1[0] = (ULONG) ((pSRB->ScsiCmdLen << 8) + pSRB->SGcount);
+ pSRB->Segment1[1] = pSRB->TotalXferredLen;
+ pSRB->AdaptStatus = 0;
+ pSRB->TargetStatus = 0;
+
+ pcmd = pSRB->pcmd;
+
+ pSRB->Segmentx.address = (PUCHAR) &(pcmd->sense_buffer);
+ pSRB->Segmentx.length = sizeof(pcmd->sense_buffer);
+ pSRB->pSegmentList = &pSRB->Segmentx;
+ pSRB->SGcount = 1;
+ pSRB->SGIndex = 0;
+
+ *((PULONG) &(pSRB->CmdBlock[0])) = 0x00000003;
+ pSRB->CmdBlock[1] = pDCB->IdentifyMsg << 5;
+ *((PUSHORT) &(pSRB->CmdBlock[4])) = sizeof(pcmd->sense_buffer);
+ pSRB->ScsiCmdLen = 6;
+
+ pSRB->TotalXferredLen = 0;
+ pSRB->SGToBeXferLen = 0;
+ if( DC390_StartSCSI( pACB, pDCB, pSRB ) )
+ RewaitSRB( pDCB, pSRB );
+}
+
+
+static void
+EnableMsgOut2( PACB pACB, PSRB pSRB )
+{
+ USHORT ioport;
+ UCHAR bval;
+
+ ioport = pACB->IOPortBase;
+ pSRB->MsgCnt = 1;
+ bval = SET_ATN_CMD;
+ outb(bval, ioport+ScsiCmd);
+}
+
+
+static void
+EnableMsgOut( PACB pACB, PSRB pSRB )
+{
+ pSRB->MsgOutBuf[0] = MSG_ABORT;
+ EnableMsgOut2( pACB, pSRB );
+}
+
+
+static void
+DC390_InvalidCmd( PACB pACB )
+{
+ UCHAR bval;
+ USHORT ioport;
+ PSRB pSRB;
+
+ pSRB = pACB->pActiveDCB->pActiveSRB;
+ if( pSRB->SRBState & (SRB_START_+SRB_MSGOUT) )
+ {
+ ioport = pACB->IOPortBase;
+ bval = CLEAR_FIFO_CMD;
+ outb(bval,(ioport+ScsiCmd));
+ }
+}
+
diff --git a/linux/src/drivers/scsi/sd.c b/linux/src/drivers/scsi/sd.c
new file mode 100644
index 0000000..eab64dd
--- /dev/null
+++ b/linux/src/drivers/scsi/sd.c
@@ -0,0 +1,1691 @@
+/*
+ * sd.c Copyright (C) 1992 Drew Eckhardt
+ * Copyright (C) 1993, 1994, 1995 Eric Youngdale
+ *
+ * Linux scsi disk driver
+ * Initial versions: Drew Eckhardt
+ * Subsequent revisions: Eric Youngdale
+ *
+ * <drew@colorado.edu>
+ *
+ * Modified by Eric Youngdale ericy@cais.com to
+ * add scatter-gather, multiple outstanding request, and other
+ * enhancements.
+ *
+ * Modified by Eric Youngdale eric@aib.com to support loadable
+ * low-level scsi drivers.
+ */
+
+#include <linux/module.h>
+#ifdef MODULE
+/*
+ * This is a variable in scsi.c that is set when we are processing something
+ * after boot time. By definition, this is true when we are a loadable module
+ * ourselves.
+ */
+#define MODULE_FLAG 1
+#else
+#define MODULE_FLAG scsi_loadable_module_flag
+#endif /* MODULE */
+
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+
+#include <asm/system.h>
+
+#define MAJOR_NR SCSI_DISK_MAJOR
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+#include <scsi/scsi_ioctl.h>
+#include "constants.h"
+
+#include <linux/genhd.h>
+
+/*
+ * static const char RCSid[] = "$Header:";
+ */
+
+#define MAX_RETRIES 5
+
+/*
+ * Time out in seconds for disks and Magneto-opticals (which are slower).
+ */
+
+#define SD_TIMEOUT (20 * HZ)
+#define SD_MOD_TIMEOUT (25 * HZ)
+
+#define CLUSTERABLE_DEVICE(SC) (SC->host->use_clustering && \
+ SC->device->type != TYPE_MOD)
+
+struct hd_struct * sd;
+
+Scsi_Disk * rscsi_disks = NULL;
+static int * sd_sizes;
+static int * sd_blocksizes;
+static int * sd_hardsizes; /* Hardware sector size */
+
+extern int sd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
+
+static int check_scsidisk_media_change(kdev_t);
+static int fop_revalidate_scsidisk(kdev_t);
+
+static int sd_init_onedisk(int);
+
+static void requeue_sd_request (Scsi_Cmnd * SCpnt);
+
+static int sd_init(void);
+static void sd_finish(void);
+static int sd_attach(Scsi_Device *);
+static int sd_detect(Scsi_Device *);
+static void sd_detach(Scsi_Device *);
+
+struct Scsi_Device_Template sd_template =
+{ NULL, "disk", "sd", NULL, TYPE_DISK,
+ SCSI_DISK_MAJOR, 0, 0, 0, 1,
+ sd_detect, sd_init,
+ sd_finish, sd_attach, sd_detach
+};
+
+static int sd_open(struct inode * inode, struct file * filp)
+{
+ int target;
+ target = DEVICE_NR(inode->i_rdev);
+
+ if(target >= sd_template.dev_max || !rscsi_disks[target].device)
+ return -ENXIO; /* No such device */
+
+ /*
+ * Make sure that only one process can do a check_change_disk at one time.
+ * This is also used to lock out further access when the partition table
+ * is being re-read.
+ */
+
+ while (rscsi_disks[target].device->busy)
+ barrier();
+ if(rscsi_disks[target].device->removable) {
+ check_disk_change(inode->i_rdev);
+
+ /*
+ * If the drive is empty, just let the open fail.
+ */
+ if ( !rscsi_disks[target].ready )
+ return -ENXIO;
+
+ /*
+ * Similarly, if the device has the write protect tab set,
+ * have the open fail if the user expects to be able to write
+ * to the thing.
+ */
+ if ( (rscsi_disks[target].write_prot) && (filp->f_mode & 2) )
+ return -EROFS;
+ }
+
+ /*
+ * See if we are requesting a non-existent partition. Do this
+ * after checking for disk change.
+ */
+ if(sd_sizes[MINOR(inode->i_rdev)] == 0)
+ return -ENXIO;
+
+ if(rscsi_disks[target].device->removable)
+ if(!rscsi_disks[target].device->access_count)
+ sd_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
+
+ rscsi_disks[target].device->access_count++;
+ if (rscsi_disks[target].device->host->hostt->usage_count)
+ (*rscsi_disks[target].device->host->hostt->usage_count)++;
+ if(sd_template.usage_count) (*sd_template.usage_count)++;
+ return 0;
+}
+
+static void sd_release(struct inode * inode, struct file * file)
+{
+ int target;
+ fsync_dev(inode->i_rdev);
+
+ target = DEVICE_NR(inode->i_rdev);
+
+ rscsi_disks[target].device->access_count--;
+ if (rscsi_disks[target].device->host->hostt->usage_count)
+ (*rscsi_disks[target].device->host->hostt->usage_count)--;
+ if(sd_template.usage_count) (*sd_template.usage_count)--;
+
+ if(rscsi_disks[target].device->removable) {
+ if(!rscsi_disks[target].device->access_count)
+ sd_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
+ }
+}
+
+static void sd_geninit(struct gendisk *);
+
+static struct file_operations sd_fops = {
+ NULL, /* lseek - default */
+ block_read, /* read - general block-dev read */
+ block_write, /* write - general block-dev write */
+ NULL, /* readdir - bad */
+ NULL, /* select */
+ sd_ioctl, /* ioctl */
+ NULL, /* mmap */
+ sd_open, /* open code */
+ sd_release, /* release */
+ block_fsync, /* fsync */
+ NULL, /* fasync */
+ check_scsidisk_media_change, /* Disk change */
+ fop_revalidate_scsidisk /* revalidate */
+};
+
+static struct gendisk sd_gendisk = {
+ MAJOR_NR, /* Major number */
+ "sd", /* Major name */
+ 4, /* Bits to shift to get real from partition */
+ 1 << 4, /* Number of partitions per real */
+ 0, /* maximum number of real */
+ sd_geninit, /* init function */
+ NULL, /* hd struct */
+ NULL, /* block sizes */
+ 0, /* number */
+ NULL, /* internal */
+ NULL /* next */
+};
+
+static void sd_geninit (struct gendisk *ignored)
+{
+ int i;
+
+ for (i = 0; i < sd_template.dev_max; ++i)
+ if(rscsi_disks[i].device)
+ sd[i << 4].nr_sects = rscsi_disks[i].capacity;
+#if 0
+ /* No longer needed - we keep track of this as we attach/detach */
+ sd_gendisk.nr_real = sd_template.dev_max;
+#endif
+}
+
+/*
+ * rw_intr is the interrupt routine for the device driver.
+ * It will be notified on the end of a SCSI read / write, and
+ * will take one of several actions based on success or failure.
+ */
+
+static void rw_intr (Scsi_Cmnd *SCpnt)
+{
+ int result = SCpnt->result;
+ int this_count = SCpnt->bufflen >> 9;
+ int good_sectors = (result == 0 ? this_count : 0);
+ int block_sectors = 1;
+
+#ifdef DEBUG
+ printk("sd%c : rw_intr(%d, %d)\n", 'a' + MINOR(SCpnt->request.rq_dev),
+ SCpnt->host->host_no, result);
+#endif
+
+ /*
+ Handle MEDIUM ERRORs that indicate partial success. Since this is a
+ relatively rare error condition, no care is taken to avoid unnecessary
+ additional work such as memcpy's that could be avoided.
+ */
+
+ if (driver_byte(result) != 0 && /* An error occurred */
+ SCpnt->sense_buffer[0] == 0xF0 && /* Sense data is valid */
+ SCpnt->sense_buffer[2] == MEDIUM_ERROR)
+ {
+ long error_sector = (SCpnt->sense_buffer[3] << 24) |
+ (SCpnt->sense_buffer[4] << 16) |
+ (SCpnt->sense_buffer[5] << 8) |
+ SCpnt->sense_buffer[6];
+ int sector_size =
+ rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].sector_size;
+ if (SCpnt->request.bh != NULL)
+ block_sectors = SCpnt->request.bh->b_size >> 9;
+ if (sector_size == 1024)
+ {
+ error_sector <<= 1;
+ if (block_sectors < 2) block_sectors = 2;
+ }
+ else if (sector_size == 256)
+ error_sector >>= 1;
+ error_sector -= sd[MINOR(SCpnt->request.rq_dev)].start_sect;
+ error_sector &= ~ (block_sectors - 1);
+ good_sectors = error_sector - SCpnt->request.sector;
+ if (good_sectors < 0 || good_sectors >= this_count)
+ good_sectors = 0;
+ }
+
+ /*
+ * Handle RECOVERED ERRORs that indicate success after recovery action
+ * by the target device.
+ */
+
+ if (SCpnt->sense_buffer[0] == 0xF0 && /* Sense data is valid */
+ SCpnt->sense_buffer[2] == RECOVERED_ERROR)
+ {
+ printk("scsidisk recovered I/O error: dev %s, sector %lu, absolute sector %lu\n",
+ kdevname(SCpnt->request.rq_dev), SCpnt->request.sector,
+ SCpnt->request.sector + sd[MINOR(SCpnt->request.rq_dev)].start_sect);
+ good_sectors = this_count;
+ result = 0;
+ }
+
+ /*
+ * First case : we assume that the command succeeded. One of two things
+ * will happen here. Either we will be finished, or there will be more
+ * sectors that we were unable to read last time.
+ */
+
+ if (good_sectors > 0) {
+
+#ifdef DEBUG
+ printk("sd%c : %d sectors remain.\n", 'a' + MINOR(SCpnt->request.rq_dev),
+ SCpnt->request.nr_sectors);
+ printk("use_sg is %d\n ",SCpnt->use_sg);
+#endif
+ if (SCpnt->use_sg) {
+ struct scatterlist * sgpnt;
+ int i;
+ sgpnt = (struct scatterlist *) SCpnt->buffer;
+ for(i=0; i<SCpnt->use_sg; i++) {
+#ifdef DEBUG
+ printk(":%x %x %d\n",sgpnt[i].alt_address, sgpnt[i].address,
+ sgpnt[i].length);
+#endif
+ if (sgpnt[i].alt_address) {
+ if (SCpnt->request.cmd == READ)
+ memcpy(sgpnt[i].alt_address, sgpnt[i].address,
+ sgpnt[i].length);
+ scsi_free(sgpnt[i].address, sgpnt[i].length);
+ }
+ }
+
+ /* Free list of scatter-gather pointers */
+ scsi_free(SCpnt->buffer, SCpnt->sglist_len);
+ } else {
+ if (SCpnt->buffer != SCpnt->request.buffer) {
+#ifdef DEBUG
+ printk("nosg: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
+ SCpnt->bufflen);
+#endif
+ if (SCpnt->request.cmd == READ)
+ memcpy(SCpnt->request.buffer, SCpnt->buffer,
+ SCpnt->bufflen);
+ scsi_free(SCpnt->buffer, SCpnt->bufflen);
+ }
+ }
+ /*
+ * If multiple sectors are requested in one buffer, then
+ * they will have been finished off by the first command.
+ * If not, then we have a multi-buffer command.
+ */
+ if (SCpnt->request.nr_sectors > this_count)
+ {
+ SCpnt->request.errors = 0;
+
+ if (!SCpnt->request.bh)
+ {
+#ifdef DEBUG
+ printk("sd%c : handling page request, no buffer\n",
+ 'a' + MINOR(SCpnt->request.rq_dev));
+#endif
+ /*
+ * The SCpnt->request.nr_sectors field is always done in
+ * 512 byte sectors, even if this really isn't the case.
+ */
+ panic("sd.c: linked page request (%lx %x)",
+ SCpnt->request.sector, this_count);
+ }
+ }
+ SCpnt = end_scsi_request(SCpnt, 1, good_sectors);
+ if (result == 0)
+ {
+ requeue_sd_request(SCpnt);
+ return;
+ }
+ }
+
+ if (good_sectors == 0) {
+
+ /* Free up any indirection buffers we allocated for DMA purposes. */
+ if (SCpnt->use_sg) {
+ struct scatterlist * sgpnt;
+ int i;
+ sgpnt = (struct scatterlist *) SCpnt->buffer;
+ for(i=0; i<SCpnt->use_sg; i++) {
+#ifdef DEBUG
+ printk("err: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
+ SCpnt->bufflen);
+#endif
+ if (sgpnt[i].alt_address) {
+ scsi_free(sgpnt[i].address, sgpnt[i].length);
+ }
+ }
+ scsi_free(SCpnt->buffer, SCpnt->sglist_len); /* Free list of scatter-gather pointers */
+ } else {
+#ifdef DEBUG
+ printk("nosgerr: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
+ SCpnt->bufflen);
+#endif
+ if (SCpnt->buffer != SCpnt->request.buffer)
+ scsi_free(SCpnt->buffer, SCpnt->bufflen);
+ }
+ }
+
+ /*
+ * Now, if we were good little boys and girls, Santa left us a request
+ * sense buffer. We can extract information from this, so we
+ * can choose a block to remap, etc.
+ */
+
+ if (driver_byte(result) != 0) {
+ if (suggestion(result) == SUGGEST_REMAP) {
+#ifdef REMAP
+ /*
+ * Not yet implemented. A read will fail after being remapped,
+ * a write will call the strategy routine again.
+ */
+ if rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].remap
+ {
+ result = 0;
+ }
+ else
+#endif
+ }
+
+ if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
+ if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
+ if(rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->removable) {
+ /* detected disc change. set a bit and quietly refuse
+ * further access.
+ */
+ rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->changed = 1;
+ SCpnt = end_scsi_request(SCpnt, 0, this_count);
+ requeue_sd_request(SCpnt);
+ return;
+ }
+ else
+ {
+ /*
+ * Must have been a power glitch, or a bus reset.
+ * Could not have been a media change, so we just retry
+ * the request and see what happens.
+ */
+ requeue_sd_request(SCpnt);
+ return;
+ }
+ }
+ }
+
+
+ /* If we had an ILLEGAL REQUEST returned, then we may have
+ * performed an unsupported command. The only thing this should be
+ * would be a ten byte read where only a six byte read was supported.
+ * Also, on a system where READ CAPACITY failed, we have read past
+ * the end of the disk.
+ */
+
+ if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
+ if (rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].ten) {
+ rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].ten = 0;
+ requeue_sd_request(SCpnt);
+ result = 0;
+ } else {
+ /* ???? */
+ }
+ }
+
+ if (SCpnt->sense_buffer[2] == MEDIUM_ERROR) {
+ printk("scsi%d: MEDIUM ERROR on channel %d, id %d, lun %d, CDB: ",
+ SCpnt->host->host_no, (int) SCpnt->channel,
+ (int) SCpnt->target, (int) SCpnt->lun);
+ print_command(SCpnt->cmnd);
+ print_sense("sd", SCpnt);
+ SCpnt = end_scsi_request(SCpnt, 0, block_sectors);
+ requeue_sd_request(SCpnt);
+ return;
+ }
+ } /* driver byte != 0 */
+ if (result) {
+ printk("SCSI disk error : host %d channel %d id %d lun %d return code = %x\n",
+ rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->host->host_no,
+ rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->channel,
+ rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->id,
+ rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->lun, result);
+
+ if (driver_byte(result) & DRIVER_SENSE)
+ print_sense("sd", SCpnt);
+ SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
+ requeue_sd_request(SCpnt);
+ return;
+ }
+}
+
+/*
+ * requeue_sd_request() is the request handler function for the sd driver.
+ * Its function in life is to take block device requests, and translate
+ * them to SCSI commands.
+ */
+
+static void do_sd_request (void)
+{
+ Scsi_Cmnd * SCpnt = NULL;
+ Scsi_Device * SDev;
+ struct request * req = NULL;
+ unsigned long flags;
+ int flag = 0;
+
+ save_flags(flags);
+ while (1==1){
+ cli();
+ if (CURRENT != NULL && CURRENT->rq_status == RQ_INACTIVE) {
+ restore_flags(flags);
+ return;
+ }
+
+ INIT_SCSI_REQUEST;
+ SDev = rscsi_disks[DEVICE_NR(CURRENT->rq_dev)].device;
+
+ /*
+ * I am not sure where the best place to do this is. We need
+ * to hook in a place where we are likely to come if in user
+ * space.
+ */
+ if( SDev->was_reset )
+ {
+ /*
+ * We need to relock the door, but we might
+ * be in an interrupt handler. Only do this
+ * from user space, since we do not want to
+ * sleep from an interrupt.
+ */
+ if( SDev->removable && !intr_count )
+ {
+ scsi_ioctl(SDev, SCSI_IOCTL_DOORLOCK, 0);
+ /* scsi_ioctl may allow CURRENT to change, so start over. */
+ SDev->was_reset = 0;
+ continue;
+ }
+ SDev->was_reset = 0;
+ }
+
+ /* We have to be careful here. allocate_device will get a free pointer,
+ * but there is no guarantee that it is queueable. In normal usage,
+ * we want to call this, because other types of devices may have the
+ * host all tied up, and we want to make sure that we have at least
+ * one request pending for this type of device. We can also come
+ * through here while servicing an interrupt, because of the need to
+ * start another command. If we call allocate_device more than once,
+ * then the system can wedge if the command is not queueable. The
+ * request_queueable function is safe because it checks to make sure
+ * that the host is able to take another command before it returns
+ * a pointer.
+ */
+
+ if (flag++ == 0)
+ SCpnt = allocate_device(&CURRENT,
+ rscsi_disks[DEVICE_NR(CURRENT->rq_dev)].device, 0);
+ else SCpnt = NULL;
+
+ /*
+ * The following restore_flags leads to latency problems. FIXME.
+ * Using a "sti()" gets rid of the latency problems but causes
+ * race conditions and crashes.
+ */
+ restore_flags(flags);
+
+ /* This is a performance enhancement. We dig down into the request
+ * list and try to find a queueable request (i.e. device not busy,
+ * and host able to accept another command. If we find one, then we
+ * queue it. This can make a big difference on systems with more than
+ * one disk drive. We want to have the interrupts off when monkeying
+ * with the request list, because otherwise the kernel might try to
+ * slip in a request in between somewhere.
+ */
+
+ if (!SCpnt && sd_template.nr_dev > 1){
+ struct request *req1;
+ req1 = NULL;
+ cli();
+ req = CURRENT;
+ while(req){
+ SCpnt = request_queueable(req,
+ rscsi_disks[DEVICE_NR(req->rq_dev)].device);
+ if(SCpnt) break;
+ req1 = req;
+ req = req->next;
+ }
+ if (SCpnt && req->rq_status == RQ_INACTIVE) {
+ if (req == CURRENT)
+ CURRENT = CURRENT->next;
+ else
+ req1->next = req->next;
+ }
+ restore_flags(flags);
+ }
+
+ if (!SCpnt) return; /* Could not find anything to do */
+
+ /* Queue command */
+ requeue_sd_request(SCpnt);
+ } /* While */
+}
+
+static void requeue_sd_request (Scsi_Cmnd * SCpnt)
+{
+ int dev, devm, block, this_count;
+ unsigned char cmd[12];
+ int bounce_size, contiguous;
+ int max_sg;
+ struct buffer_head * bh, *bhp;
+ char * buff, *bounce_buffer;
+
+ repeat:
+
+ if(!SCpnt || SCpnt->request.rq_status == RQ_INACTIVE) {
+ do_sd_request();
+ return;
+ }
+
+ devm = MINOR(SCpnt->request.rq_dev);
+ dev = DEVICE_NR(SCpnt->request.rq_dev);
+
+ block = SCpnt->request.sector;
+ this_count = 0;
+
+#ifdef DEBUG
+ printk("Doing sd request, dev = %d, block = %d\n", devm, block);
+#endif
+
+ if (devm >= (sd_template.dev_max << 4) ||
+ !rscsi_disks[dev].device ||
+ block + SCpnt->request.nr_sectors > sd[devm].nr_sects)
+ {
+ SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ goto repeat;
+ }
+
+ block += sd[devm].start_sect;
+
+ if (rscsi_disks[dev].device->changed)
+ {
+ /*
+ * quietly refuse to do anything to a changed disc until the changed
+ * bit has been reset
+ */
+ /* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */
+ SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ goto repeat;
+ }
+
+#ifdef DEBUG
+ printk("sd%c : real dev = /dev/sd%c, block = %d\n",
+ 'a' + devm, dev, block);
+#endif
+
+ /*
+ * If we have a 1K hardware sectorsize, prevent access to single
+ * 512 byte sectors. In theory we could handle this - in fact
+ * the scsi cdrom driver must be able to handle this because
+ * we typically use 1K blocksizes, and cdroms typically have
+ * 2K hardware sectorsizes. Of course, things are simpler
+ * with the cdrom, since it is read-only. For performance
+ * reasons, the filesystems should be able to handle this
+ * and not force the scsi disk driver to use bounce buffers
+ * for this.
+ */
+ if (rscsi_disks[dev].sector_size == 1024)
+ if((block & 1) || (SCpnt->request.nr_sectors & 1)) {
+ printk("sd.c:Bad block number requested");
+ SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ goto repeat;
+ }
+
+ switch (SCpnt->request.cmd)
+ {
+ case WRITE :
+ if (!rscsi_disks[dev].device->writeable)
+ {
+ SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ goto repeat;
+ }
+ cmd[0] = WRITE_6;
+ break;
+ case READ :
+ cmd[0] = READ_6;
+ break;
+ default :
+ panic ("Unknown sd command %d\n", SCpnt->request.cmd);
+ }
+
+ SCpnt->this_count = 0;
+
+ /* If the host adapter can deal with very large scatter-gather
+ * requests, it is a waste of time to cluster
+ */
+ contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 :1);
+ bounce_buffer = NULL;
+ bounce_size = (SCpnt->request.nr_sectors << 9);
+
+ /* First see if we need a bounce buffer for this request. If we do, make
+ * sure that we can allocate a buffer. Do not waste space by allocating
+ * a bounce buffer if we are straddling the 16Mb line
+ */
+ if (contiguous && SCpnt->request.bh &&
+ ((long) SCpnt->request.bh->b_data)
+ + (SCpnt->request.nr_sectors << 9) - 1 > ISA_DMA_THRESHOLD
+ && SCpnt->host->unchecked_isa_dma) {
+ if(((long) SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)
+ bounce_buffer = (char *) scsi_malloc(bounce_size);
+ if(!bounce_buffer) contiguous = 0;
+ }
+
+ if(contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)
+ for(bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp,
+ bhp = bhp->b_reqnext) {
+ if(!CONTIGUOUS_BUFFERS(bh,bhp)) {
+ if(bounce_buffer) scsi_free(bounce_buffer, bounce_size);
+ contiguous = 0;
+ break;
+ }
+ }
+ if (!SCpnt->request.bh || contiguous) {
+
+ /* case of page request (i.e. raw device), or unlinked buffer */
+ this_count = SCpnt->request.nr_sectors;
+ buff = SCpnt->request.buffer;
+ SCpnt->use_sg = 0;
+
+ } else if (SCpnt->host->sg_tablesize == 0 ||
+ (need_isa_buffer && dma_free_sectors <= 10)) {
+
+ /* Case of host adapter that cannot scatter-gather. We also
+ * come here if we are running low on DMA buffer memory. We set
+ * a threshold higher than that we would need for this request so
+ * we leave room for other requests. Even though we would not need
+ * it all, we need to be conservative, because if we run low enough
+ * we have no choice but to panic.
+ */
+ if (SCpnt->host->sg_tablesize != 0 &&
+ need_isa_buffer &&
+ dma_free_sectors <= 10)
+ printk("Warning: SCSI DMA buffer space running low. Using non scatter-gather I/O.\n");
+
+ this_count = SCpnt->request.current_nr_sectors;
+ buff = SCpnt->request.buffer;
+ SCpnt->use_sg = 0;
+
+ } else {
+
+ /* Scatter-gather capable host adapter */
+ struct scatterlist * sgpnt;
+ int count, this_count_max;
+ int counted;
+
+ bh = SCpnt->request.bh;
+ this_count = 0;
+ this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);
+ count = 0;
+ bhp = NULL;
+ while(bh) {
+ if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
+ if(!bhp || !CONTIGUOUS_BUFFERS(bhp,bh) ||
+ !CLUSTERABLE_DEVICE(SCpnt) ||
+ (SCpnt->host->unchecked_isa_dma &&
+ ((unsigned long) bh->b_data-1) == ISA_DMA_THRESHOLD)) {
+ if (count < SCpnt->host->sg_tablesize) count++;
+ else break;
+ }
+ this_count += (bh->b_size >> 9);
+ bhp = bh;
+ bh = bh->b_reqnext;
+ }
+#if 0
+ if(SCpnt->host->unchecked_isa_dma &&
+ ((unsigned int) SCpnt->request.bh->b_data-1) == ISA_DMA_THRESHOLD) count--;
+#endif
+ SCpnt->use_sg = count; /* Number of chains */
+ /* scsi_malloc can only allocate in chunks of 512 bytes */
+ count = (SCpnt->use_sg * sizeof(struct scatterlist) + 511) & ~511;
+
+ SCpnt->sglist_len = count;
+ max_sg = count / sizeof(struct scatterlist);
+ if(SCpnt->host->sg_tablesize < max_sg)
+ max_sg = SCpnt->host->sg_tablesize;
+ sgpnt = (struct scatterlist * ) scsi_malloc(count);
+ if (!sgpnt) {
+ printk("Warning - running *really* short on DMA buffers\n");
+ SCpnt->use_sg = 0; /* No memory left - bail out */
+ this_count = SCpnt->request.current_nr_sectors;
+ buff = SCpnt->request.buffer;
+ } else {
+ memset(sgpnt, 0, count); /* Zero so it is easy to fill, but only
+ * if memory is available
+ */
+ buff = (char *) sgpnt;
+ counted = 0;
+ for(count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;
+ count < SCpnt->use_sg && bh;
+ count++, bh = bhp) {
+
+ bhp = bh->b_reqnext;
+
+ if(!sgpnt[count].address) sgpnt[count].address = bh->b_data;
+ sgpnt[count].length += bh->b_size;
+ counted += bh->b_size >> 9;
+
+ if (((long) sgpnt[count].address) + sgpnt[count].length - 1 >
+ ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&
+ !sgpnt[count].alt_address) {
+ sgpnt[count].alt_address = sgpnt[count].address;
+ /* We try to avoid exhausting the DMA pool, since it is
+ * easier to control usage here. In other places we might
+ * have a more pressing need, and we would be screwed if
+ * we ran out */
+ if(dma_free_sectors < (sgpnt[count].length >> 9) + 10) {
+ sgpnt[count].address = NULL;
+ } else {
+ sgpnt[count].address =
+ (char *) scsi_malloc(sgpnt[count].length);
+ }
+ /* If we start running low on DMA buffers, we abort the
+ * scatter-gather operation, and free all of the memory
+ * we have allocated. We want to ensure that all scsi
+ * operations are able to do at least a non-scatter/gather
+ * operation */
+ if(sgpnt[count].address == NULL){ /* Out of dma memory */
+#if 0
+ printk("Warning: Running low on SCSI DMA buffers");
+ /* Try switching back to a non s-g operation. */
+ while(--count >= 0){
+ if(sgpnt[count].alt_address)
+ scsi_free(sgpnt[count].address,
+ sgpnt[count].length);
+ }
+ this_count = SCpnt->request.current_nr_sectors;
+ buff = SCpnt->request.buffer;
+ SCpnt->use_sg = 0;
+ scsi_free(sgpnt, SCpnt->sglist_len);
+#endif
+ SCpnt->use_sg = count;
+ this_count = counted -= bh->b_size >> 9;
+ break;
+ }
+ }
+
+ /* Only cluster buffers if we know that we can supply DMA
+ * buffers large enough to satisfy the request. Do not cluster
+ * a new request if this would mean that we suddenly need to
+ * start using DMA bounce buffers */
+ if(bhp && CONTIGUOUS_BUFFERS(bh,bhp)
+ && CLUSTERABLE_DEVICE(SCpnt)) {
+ char * tmp;
+
+ if (((long) sgpnt[count].address) + sgpnt[count].length +
+ bhp->b_size - 1 > ISA_DMA_THRESHOLD &&
+ (SCpnt->host->unchecked_isa_dma) &&
+ !sgpnt[count].alt_address) continue;
+
+ if(!sgpnt[count].alt_address) {count--; continue; }
+ if(dma_free_sectors > 10)
+ tmp = (char *) scsi_malloc(sgpnt[count].length
+ + bhp->b_size);
+ else {
+ tmp = NULL;
+ max_sg = SCpnt->use_sg;
+ }
+ if(tmp){
+ scsi_free(sgpnt[count].address, sgpnt[count].length);
+ sgpnt[count].address = tmp;
+ count--;
+ continue;
+ }
+
+ /* If we are allowed another sg chain, then increment
+ * counter so we can insert it. Otherwise we will end
+ up truncating */
+
+ if (SCpnt->use_sg < max_sg) SCpnt->use_sg++;
+ } /* contiguous buffers */
+ } /* for loop */
+
+ /* This is actually how many we are going to transfer */
+ this_count = counted;
+
+ if(count < SCpnt->use_sg || SCpnt->use_sg
+ > SCpnt->host->sg_tablesize){
+ bh = SCpnt->request.bh;
+ printk("Use sg, count %d %x %d\n",
+ SCpnt->use_sg, count, dma_free_sectors);
+ printk("maxsg = %x, counted = %d this_count = %d\n",
+ max_sg, counted, this_count);
+ while(bh){
+ printk("[%p %lx] ", bh->b_data, bh->b_size);
+ bh = bh->b_reqnext;
+ }
+ if(SCpnt->use_sg < 16)
+ for(count=0; count<SCpnt->use_sg; count++)
+ printk("{%d:%p %p %d} ", count,
+ sgpnt[count].address,
+ sgpnt[count].alt_address,
+ sgpnt[count].length);
+ panic("Ooops");
+ }
+
+ if (SCpnt->request.cmd == WRITE)
+ for(count=0; count<SCpnt->use_sg; count++)
+ if(sgpnt[count].alt_address)
+ memcpy(sgpnt[count].address, sgpnt[count].alt_address,
+ sgpnt[count].length);
+ } /* Able to malloc sgpnt */
+ } /* Host adapter capable of scatter-gather */
+
+ /* Now handle the possibility of DMA to addresses > 16Mb */
+
+ if(SCpnt->use_sg == 0){
+ if (((long) buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD &&
+ (SCpnt->host->unchecked_isa_dma)) {
+ if(bounce_buffer)
+ buff = bounce_buffer;
+ else
+ buff = (char *) scsi_malloc(this_count << 9);
+ if(buff == NULL) { /* Try backing off a bit if we are low on mem*/
+ this_count = SCpnt->request.current_nr_sectors;
+ buff = (char *) scsi_malloc(this_count << 9);
+ if(!buff) panic("Ran out of DMA buffers.");
+ }
+ if (SCpnt->request.cmd == WRITE)
+ memcpy(buff, (char *)SCpnt->request.buffer, this_count << 9);
+ }
+ }
+#ifdef DEBUG
+ printk("sd%c : %s %d/%d 512 byte blocks.\n",
+ 'a' + devm,
+ (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
+ this_count, SCpnt->request.nr_sectors);
+#endif
+
+ cmd[1] = (SCpnt->lun << 5) & 0xe0;
+
+ if (rscsi_disks[dev].sector_size == 1024){
+ if(block & 1) panic("sd.c:Bad block number requested");
+ if(this_count & 1) panic("sd.c:Bad block number requested");
+ block = block >> 1;
+ this_count = this_count >> 1;
+ }
+
+ if (rscsi_disks[dev].sector_size == 256){
+ block = block << 1;
+ this_count = this_count << 1;
+ }
+
+ if (((this_count > 0xff) || (block > 0x1fffff)) && rscsi_disks[dev].ten)
+ {
+ if (this_count > 0xffff)
+ this_count = 0xffff;
+
+ cmd[0] += READ_10 - READ_6 ;
+ cmd[2] = (unsigned char) (block >> 24) & 0xff;
+ cmd[3] = (unsigned char) (block >> 16) & 0xff;
+ cmd[4] = (unsigned char) (block >> 8) & 0xff;
+ cmd[5] = (unsigned char) block & 0xff;
+ cmd[6] = cmd[9] = 0;
+ cmd[7] = (unsigned char) (this_count >> 8) & 0xff;
+ cmd[8] = (unsigned char) this_count & 0xff;
+ }
+ else
+ {
+ if (this_count > 0xff)
+ this_count = 0xff;
+
+ cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
+ cmd[2] = (unsigned char) ((block >> 8) & 0xff);
+ cmd[3] = (unsigned char) block & 0xff;
+ cmd[4] = (unsigned char) this_count;
+ cmd[5] = 0;
+ }
+
+ /*
+ * We shouldn't disconnect in the middle of a sector, so with a dumb
+ * host adapter, it's safe to assume that we can at least transfer
+ * this many bytes between each connect / disconnect.
+ */
+
+ SCpnt->transfersize = rscsi_disks[dev].sector_size;
+ SCpnt->underflow = this_count << 9;
+ scsi_do_cmd (SCpnt, (void *) cmd, buff,
+ this_count * rscsi_disks[dev].sector_size,
+ rw_intr,
+ (SCpnt->device->type == TYPE_DISK ?
+ SD_TIMEOUT : SD_MOD_TIMEOUT),
+ MAX_RETRIES);
+}
+
+static int check_scsidisk_media_change(kdev_t full_dev){
+ int retval;
+ int target;
+ struct inode inode;
+ int flag = 0;
+
+ target = DEVICE_NR(full_dev);
+
+ if (target >= sd_template.dev_max ||
+ !rscsi_disks[target].device) {
+ printk("SCSI disk request error: invalid device.\n");
+ return 0;
+ }
+
+ if(!rscsi_disks[target].device->removable) return 0;
+
+ inode.i_rdev = full_dev; /* This is all we really need here */
+
+ /* Using Start/Stop enables differentiation between drive with
+ * no cartridge loaded - NOT READY, drive with changed cartridge -
+ * UNIT ATTENTION, or with same cartridge - GOOD STATUS.
+ * This also handles drives that auto spin down. eg iomega jaz 1GB
+ * as this will spin up the drive.
+ */
+ retval = sd_ioctl(&inode, NULL, SCSI_IOCTL_START_UNIT, 0);
+
+ if(retval){ /* Unable to test, unit probably not ready. This usually
+ * means there is no disc in the drive. Mark as changed,
+ * and we will figure it out later once the drive is
+ * available again. */
+
+ rscsi_disks[target].ready = 0;
+ rscsi_disks[target].device->changed = 1;
+ return 1; /* This will force a flush, if called from
+ * check_disk_change */
+ }
+
+ /*
+ * for removable scsi disk ( FLOPTICAL ) we have to recognise the
+ * presence of disk in the drive. This is kept in the Scsi_Disk
+ * struct and tested at open ! Daniel Roche ( dan@lectra.fr )
+ */
+
+ rscsi_disks[target].ready = 1; /* FLOPTICAL */
+
+ retval = rscsi_disks[target].device->changed;
+ if(!flag) rscsi_disks[target].device->changed = 0;
+ return retval;
+}
+
+static void sd_init_done (Scsi_Cmnd * SCpnt)
+{
+ struct request * req;
+
+ req = &SCpnt->request;
+ req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
+
+ if (req->sem != NULL) {
+ up(req->sem);
+ }
+}
+
+static int sd_init_onedisk(int i)
+{
+ unsigned char cmd[12];
+ unsigned char *buffer;
+ unsigned long spintime;
+ int the_result, retries;
+ Scsi_Cmnd * SCpnt;
+
+ /* We need to retry the READ_CAPACITY because a UNIT_ATTENTION is
+ * considered a fatal error, and many devices report such an error
+ * just after a scsi bus reset.
+ */
+
+ SCpnt = allocate_device(NULL, rscsi_disks[i].device, 1);
+ buffer = (unsigned char *) scsi_malloc(512);
+
+ spintime = 0;
+
+ /* Spin up drives, as required. Only do this at boot time */
+ /* Spinup needs to be done for module loads too. */
+ do{
+ retries = 0;
+ while(retries < 3)
+ {
+ cmd[0] = TEST_UNIT_READY;
+ cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
+ memset ((void *) &cmd[2], 0, 8);
+ SCpnt->cmd_len = 0;
+ SCpnt->sense_buffer[0] = 0;
+ SCpnt->sense_buffer[2] = 0;
+
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ /* Mark as really busy again */
+ SCpnt->request.rq_status = RQ_SCSI_BUSY;
+ SCpnt->request.sem = &sem;
+ scsi_do_cmd (SCpnt,
+ (void *) cmd, (void *) buffer,
+ 512, sd_init_done, SD_TIMEOUT,
+ MAX_RETRIES);
+ down(&sem);
+ }
+
+ the_result = SCpnt->result;
+ retries++;
+ if( the_result == 0
+ || SCpnt->sense_buffer[2] != UNIT_ATTENTION)
+ break;
+ }
+
+ /* Look for non-removable devices that return NOT_READY.
+ * Issue command to spin up drive for these cases. */
+ if(the_result && !rscsi_disks[i].device->removable &&
+ SCpnt->sense_buffer[2] == NOT_READY) {
+ unsigned long time1;
+ if(!spintime){
+#ifdef MACH
+ printk( "sd%d: Spinning up disk...", i);
+#else
+ printk( "sd%c: Spinning up disk...", 'a' + i );
+#endif
+ cmd[0] = START_STOP;
+ cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
+ cmd[1] |= 1; /* Return immediately */
+ memset ((void *) &cmd[2], 0, 8);
+ cmd[4] = 1; /* Start spin cycle */
+ SCpnt->cmd_len = 0;
+ SCpnt->sense_buffer[0] = 0;
+ SCpnt->sense_buffer[2] = 0;
+
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ /* Mark as really busy again */
+ SCpnt->request.rq_status = RQ_SCSI_BUSY;
+ SCpnt->request.sem = &sem;
+ scsi_do_cmd (SCpnt,
+ (void *) cmd, (void *) buffer,
+ 512, sd_init_done, SD_TIMEOUT,
+ MAX_RETRIES);
+ down(&sem);
+ }
+
+ spintime = jiffies;
+ }
+
+ time1 = jiffies + HZ;
+ while(jiffies < time1); /* Wait 1 second for next try */
+ printk( "." );
+ }
+ } while(the_result && spintime && spintime+100*HZ > jiffies);
+ if (spintime) {
+ if (the_result)
+ printk( "not responding...\n" );
+ else
+ printk( "ready\n" );
+ }
+
+ retries = 3;
+ do {
+ cmd[0] = READ_CAPACITY;
+ cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
+ memset ((void *) &cmd[2], 0, 8);
+ memset ((void *) buffer, 0, 8);
+ SCpnt->cmd_len = 0;
+ SCpnt->sense_buffer[0] = 0;
+ SCpnt->sense_buffer[2] = 0;
+
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ /* Mark as really busy again */
+ SCpnt->request.rq_status = RQ_SCSI_BUSY;
+ SCpnt->request.sem = &sem;
+ scsi_do_cmd (SCpnt,
+ (void *) cmd, (void *) buffer,
+ 8, sd_init_done, SD_TIMEOUT,
+ MAX_RETRIES);
+ down(&sem); /* sleep until it is ready */
+ }
+
+ the_result = SCpnt->result;
+ retries--;
+
+ } while(the_result && retries);
+
+ SCpnt->request.rq_status = RQ_INACTIVE; /* Mark as not busy */
+
+ wake_up(&SCpnt->device->device_wait);
+
+ /* Wake up a process waiting for device */
+
+ /*
+ * The SCSI standard says:
+ * "READ CAPACITY is necessary for self configuring software"
+ * While not mandatory, support of READ CAPACITY is strongly encouraged.
+ * We used to die if we couldn't successfully do a READ CAPACITY.
+ * But, now we go on about our way. The side effects of this are
+ *
+ * 1. We can't know block size with certainty. I have said "512 bytes
+ * is it" as this is most common.
+ *
+ * 2. Recovery from when some one attempts to read past the end of the
+ * raw device will be slower.
+ */
+
+ if (the_result)
+ {
+#ifdef MACH
+ printk ("sd%d : READ CAPACITY failed.\n"
+ "sd%d : status = %x, message = %02x, host = %d, driver = %02x \n",
+ i, i,
+#else
+ printk ("sd%c : READ CAPACITY failed.\n"
+ "sd%c : status = %x, message = %02x, host = %d, driver = %02x \n",
+ 'a' + i, 'a' + i,
+#endif
+ status_byte(the_result),
+ msg_byte(the_result),
+ host_byte(the_result),
+ driver_byte(the_result)
+ );
+ if (driver_byte(the_result) & DRIVER_SENSE)
+#ifdef MACH
+ printk("sd%d : extended sense code = %1x \n",
+ i, SCpnt->sense_buffer[2] & 0xf);
+#else
+ printk("sd%c : extended sense code = %1x \n",
+ 'a' + i, SCpnt->sense_buffer[2] & 0xf);
+#endif
+ else
+#ifdef MACH
+ printk("sd%d : sense not available. \n", i);
+#else
+ printk("sd%c : sense not available. \n", 'a' + i);
+#endif
+
+#ifdef MACH
+ printk("sd%d : block size assumed to be 512 bytes, disk size 1GB. \n",
+ i);
+#else
+ printk("sd%c : block size assumed to be 512 bytes, disk size 1GB. \n",
+ 'a' + i);
+#endif
+ rscsi_disks[i].capacity = 0x1fffff;
+ rscsi_disks[i].sector_size = 512;
+
+ /* Set dirty bit for removable devices if not ready - sometimes drives
+ * will not report this properly. */
+ if(rscsi_disks[i].device->removable &&
+ SCpnt->sense_buffer[2] == NOT_READY)
+ rscsi_disks[i].device->changed = 1;
+
+ }
+ else
+ {
+ /*
+ * FLOPTICAL , if read_capa is ok , drive is assumed to be ready
+ */
+ rscsi_disks[i].ready = 1;
+
+ rscsi_disks[i].capacity = 1 + ((buffer[0] << 24) |
+ (buffer[1] << 16) |
+ (buffer[2] << 8) |
+ buffer[3]);
+
+ rscsi_disks[i].sector_size = (buffer[4] << 24) |
+ (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
+
+ if (rscsi_disks[i].sector_size == 0) {
+ rscsi_disks[i].sector_size = 512;
+#ifdef MACH
+ printk("sd%d : sector size 0 reported, assuming 512.\n", i);
+#else
+ printk("sd%c : sector size 0 reported, assuming 512.\n", 'a' + i);
+#endif
+ }
+
+
+ if (rscsi_disks[i].sector_size != 512 &&
+ rscsi_disks[i].sector_size != 1024 &&
+ rscsi_disks[i].sector_size != 256)
+ {
+#ifdef MACH
+ printk ("sd%d : unsupported sector size %d.\n",
+ i, rscsi_disks[i].sector_size);
+#else
+ printk ("sd%c : unsupported sector size %d.\n",
+ 'a' + i, rscsi_disks[i].sector_size);
+#endif
+ if(rscsi_disks[i].device->removable){
+ rscsi_disks[i].capacity = 0;
+ } else {
+ printk ("scsi : deleting disk entry.\n");
+ rscsi_disks[i].device = NULL;
+ sd_template.nr_dev--;
+ sd_gendisk.nr_real--;
+ return i;
+ }
+ }
+ {
+ /*
+ * The msdos fs needs to know the hardware sector size
+ * So I have created this table. See ll_rw_blk.c
+ * Jacques Gelinas (Jacques@solucorp.qc.ca)
+ */
+ int m, mb;
+ int sz_quot, sz_rem;
+ int hard_sector = rscsi_disks[i].sector_size;
+ /* There are 16 minors allocated for each major device */
+ for (m=i<<4; m<((i+1)<<4); m++){
+ sd_hardsizes[m] = hard_sector;
+ }
+ mb = rscsi_disks[i].capacity / 1024 * hard_sector / 1024;
+ /* sz = div(m/100, 10); this seems to not be in the libr */
+ m = (mb + 50) / 100;
+ sz_quot = m / 10;
+ sz_rem = m - (10 * sz_quot);
+#ifdef MACH
+ printk ("SCSI device sd%d: hdwr sector= %d bytes."
+ " Sectors= %d [%d MB] [%d.%1d GB]\n",
+ i, hard_sector, rscsi_disks[i].capacity,
+ mb, sz_quot, sz_rem);
+#else
+ printk ("SCSI device sd%c: hdwr sector= %d bytes."
+ " Sectors= %d [%d MB] [%d.%1d GB]\n",
+ i+'a', hard_sector, rscsi_disks[i].capacity,
+ mb, sz_quot, sz_rem);
+#endif
+ }
+ if(rscsi_disks[i].sector_size == 1024)
+ rscsi_disks[i].capacity <<= 1; /* Change into 512 byte sectors */
+ if(rscsi_disks[i].sector_size == 256)
+ rscsi_disks[i].capacity >>= 1; /* Change into 512 byte sectors */
+ }
+
+
+ /*
+ * Unless otherwise specified, this is not write protected.
+ */
+ rscsi_disks[i].write_prot = 0;
+ if ( rscsi_disks[i].device->removable && rscsi_disks[i].ready ) {
+ /* FLOPTICAL */
+
+ /*
+ * for removable scsi disk ( FLOPTICAL ) we have to recognise
+ * the Write Protect Flag. This flag is kept in the Scsi_Disk struct
+ * and tested at open !
+ * Daniel Roche ( dan@lectra.fr )
+ */
+
+ memset ((void *) &cmd[0], 0, 8);
+ cmd[0] = MODE_SENSE;
+ cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
+ cmd[2] = 1; /* page code 1 ?? */
+ cmd[4] = 12;
+ SCpnt->cmd_len = 0;
+ SCpnt->sense_buffer[0] = 0;
+ SCpnt->sense_buffer[2] = 0;
+
+ /* same code as READCAPA !! */
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ SCpnt->request.rq_status = RQ_SCSI_BUSY; /* Mark as really busy again */
+ SCpnt->request.sem = &sem;
+ scsi_do_cmd (SCpnt,
+ (void *) cmd, (void *) buffer,
+ 512, sd_init_done, SD_TIMEOUT,
+ MAX_RETRIES);
+ down(&sem);
+ }
+
+ the_result = SCpnt->result;
+ SCpnt->request.rq_status = RQ_INACTIVE; /* Mark as not busy */
+ wake_up(&SCpnt->device->device_wait);
+
+ if ( the_result ) {
+#ifdef MACH
+ printk ("sd%d: test WP failed, assume Write Protected\n",i);
+#else
+ printk ("sd%c: test WP failed, assume Write Protected\n",i+'a');
+#endif
+ rscsi_disks[i].write_prot = 1;
+ } else {
+ rscsi_disks[i].write_prot = ((buffer[2] & 0x80) != 0);
+#ifdef MACH
+ printk ("sd%d: Write Protect is %s\n",i,
+ rscsi_disks[i].write_prot ? "on" : "off");
+#else
+ printk ("sd%c: Write Protect is %s\n",i+'a',
+ rscsi_disks[i].write_prot ? "on" : "off");
+#endif
+ }
+
+ } /* check for write protect */
+
+ rscsi_disks[i].ten = 1;
+ rscsi_disks[i].remap = 1;
+ scsi_free(buffer, 512);
+ return i;
+}
+
+/*
+ * The sd_init() function looks at all SCSI drives present, determines
+ * their size, and reads partition table entries for them.
+ */
+
+static int sd_registered = 0;
+
+static int sd_init()
+{
+ int i;
+
+ if (sd_template.dev_noticed == 0) return 0;
+
+ if(!sd_registered) {
+ if (register_blkdev(MAJOR_NR,"sd",&sd_fops)) {
+ printk("Unable to get major %d for SCSI disk\n",MAJOR_NR);
+ return 1;
+ }
+ sd_registered++;
+ }
+
+ /* We do not support attaching loadable devices yet. */
+ if(rscsi_disks) return 0;
+
+ sd_template.dev_max = sd_template.dev_noticed + SD_EXTRA_DEVS;
+
+ rscsi_disks = (Scsi_Disk *)
+ scsi_init_malloc(sd_template.dev_max * sizeof(Scsi_Disk), GFP_ATOMIC);
+ memset(rscsi_disks, 0, sd_template.dev_max * sizeof(Scsi_Disk));
+
+ sd_sizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
+ sizeof(int), GFP_ATOMIC);
+ memset(sd_sizes, 0, (sd_template.dev_max << 4) * sizeof(int));
+
+ sd_blocksizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
+ sizeof(int), GFP_ATOMIC);
+
+ sd_hardsizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
+ sizeof(int), GFP_ATOMIC);
+
+ for(i=0;i<(sd_template.dev_max << 4);i++){
+ sd_blocksizes[i] = 1024;
+ sd_hardsizes[i] = 512;
+ }
+ blksize_size[MAJOR_NR] = sd_blocksizes;
+ hardsect_size[MAJOR_NR] = sd_hardsizes;
+ sd = (struct hd_struct *) scsi_init_malloc((sd_template.dev_max << 4) *
+ sizeof(struct hd_struct),
+ GFP_ATOMIC);
+
+
+ sd_gendisk.max_nr = sd_template.dev_max;
+ sd_gendisk.part = sd;
+ sd_gendisk.sizes = sd_sizes;
+ sd_gendisk.real_devices = (void *) rscsi_disks;
+ return 0;
+}
+
+static void sd_finish(void)
+{
+ struct gendisk *gendisk;
+ int i;
+
+ blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+
+ for (gendisk = gendisk_head; gendisk != NULL; gendisk = gendisk->next)
+ if (gendisk == &sd_gendisk)
+ break;
+ if (gendisk == NULL)
+ {
+ sd_gendisk.next = gendisk_head;
+ gendisk_head = &sd_gendisk;
+ }
+
+ for (i = 0; i < sd_template.dev_max; ++i)
+ if (!rscsi_disks[i].capacity &&
+ rscsi_disks[i].device)
+ {
+ if (MODULE_FLAG
+ && !rscsi_disks[i].has_part_table) {
+ sd_sizes[i << 4] = rscsi_disks[i].capacity;
+ /* revalidate does sd_init_onedisk via MAYBE_REINIT*/
+ revalidate_scsidisk(MKDEV(MAJOR_NR, i << 4), 0);
+ }
+ else
+ i=sd_init_onedisk(i);
+ rscsi_disks[i].has_part_table = 1;
+ }
+
+ /* If our host adapter is capable of scatter-gather, then we increase
+ * the read-ahead to 16 blocks (32 sectors). If not, we use
+ * a two block (4 sector) read ahead.
+ */
+ if(rscsi_disks[0].device && rscsi_disks[0].device->host->sg_tablesize)
+ read_ahead[MAJOR_NR] = 120; /* 120 sector read-ahead */
+ else
+ read_ahead[MAJOR_NR] = 4; /* 4 sector read-ahead */
+
+ return;
+}
+
+static int sd_detect(Scsi_Device * SDp){
+ if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
+
+#ifdef MACH
+ printk("Detected scsi %sdisk sd%d at scsi%d, channel %d, id %d, lun %d\n",
+ SDp->removable ? "removable " : "",
+ sd_template.dev_noticed++,
+ SDp->host->host_no, SDp->channel, SDp->id, SDp->lun);
+#else
+ printk("Detected scsi %sdisk sd%c at scsi%d, channel %d, id %d, lun %d\n",
+ SDp->removable ? "removable " : "",
+ 'a'+ (sd_template.dev_noticed++),
+ SDp->host->host_no, SDp->channel, SDp->id, SDp->lun);
+#endif
+
+ return 1;
+}
+
+static int sd_attach(Scsi_Device * SDp){
+ Scsi_Disk * dpnt;
+ int i;
+
+ if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
+
+ if(sd_template.nr_dev >= sd_template.dev_max) {
+ SDp->attached--;
+ return 1;
+ }
+
+ for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++)
+ if(!dpnt->device) break;
+
+ if(i >= sd_template.dev_max) panic ("scsi_devices corrupt (sd)");
+
+ SDp->scsi_request_fn = do_sd_request;
+ rscsi_disks[i].device = SDp;
+ rscsi_disks[i].has_part_table = 0;
+ sd_template.nr_dev++;
+ sd_gendisk.nr_real++;
+ return 0;
+}
+
+#define DEVICE_BUSY rscsi_disks[target].device->busy
+#define USAGE rscsi_disks[target].device->access_count
+#define CAPACITY rscsi_disks[target].capacity
+#define MAYBE_REINIT sd_init_onedisk(target)
+#define GENDISK_STRUCT sd_gendisk
+
+/* This routine is called to flush all partitions and partition tables
+ * for a changed scsi disk, and then re-read the new partition table.
+ * If we are revalidating a disk because of a media change, then we
+ * enter with usage == 0. If we are using an ioctl, we automatically have
+ * usage == 1 (we need an open channel to use an ioctl :-), so this
+ * is our limit.
+ */
+int revalidate_scsidisk(kdev_t dev, int maxusage){
+ int target;
+ struct gendisk * gdev;
+ unsigned long flags;
+ int max_p;
+ int start;
+ int i;
+
+ target = DEVICE_NR(dev);
+ gdev = &GENDISK_STRUCT;
+
+ save_flags(flags);
+ cli();
+ if (DEVICE_BUSY || USAGE > maxusage) {
+ restore_flags(flags);
+ printk("Device busy for revalidation (usage=%d)\n", USAGE);
+ return -EBUSY;
+ }
+ DEVICE_BUSY = 1;
+ restore_flags(flags);
+
+ max_p = gdev->max_p;
+ start = target << gdev->minor_shift;
+
+ for (i=max_p - 1; i >=0 ; i--) {
+ int minor = start+i;
+ kdev_t devi = MKDEV(MAJOR_NR, minor);
+ sync_dev(devi);
+ invalidate_inodes(devi);
+ invalidate_buffers(devi);
+ gdev->part[minor].start_sect = 0;
+ gdev->part[minor].nr_sects = 0;
+ /*
+ * Reset the blocksize for everything so that we can read
+ * the partition table.
+ */
+ blksize_size[MAJOR_NR][minor] = 1024;
+ }
+
+#ifdef MAYBE_REINIT
+ MAYBE_REINIT;
+#endif
+
+ gdev->part[start].nr_sects = CAPACITY;
+ resetup_one_dev(gdev, target);
+
+ DEVICE_BUSY = 0;
+ return 0;
+}
+
+static int fop_revalidate_scsidisk(kdev_t dev){
+ return revalidate_scsidisk(dev, 0);
+}
+
+
+static void sd_detach(Scsi_Device * SDp)
+{
+ Scsi_Disk * dpnt;
+ int i;
+ int max_p;
+ int start;
+
+ for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++)
+ if(dpnt->device == SDp) {
+
+ /* If we are disconnecting a disk driver, sync and invalidate
+ * everything */
+ max_p = sd_gendisk.max_p;
+ start = i << sd_gendisk.minor_shift;
+
+ for (i=max_p - 1; i >=0 ; i--) {
+ int minor = start+i;
+ kdev_t devi = MKDEV(MAJOR_NR, minor);
+ sync_dev(devi);
+ invalidate_inodes(devi);
+ invalidate_buffers(devi);
+ sd_gendisk.part[minor].start_sect = 0;
+ sd_gendisk.part[minor].nr_sects = 0;
+ sd_sizes[minor] = 0;
+ }
+
+ dpnt->has_part_table = 0;
+ dpnt->device = NULL;
+ dpnt->capacity = 0;
+ SDp->attached--;
+ sd_template.dev_noticed--;
+ sd_template.nr_dev--;
+ sd_gendisk.nr_real--;
+ return;
+ }
+ return;
+}
+
+#ifdef MODULE
+
+int init_module(void) {
+ sd_template.usage_count = &mod_use_count_;
+ return scsi_register_module(MODULE_SCSI_DEV, &sd_template);
+}
+
+void cleanup_module( void)
+{
+ struct gendisk * prev_sdgd;
+ struct gendisk * sdgd;
+
+ scsi_unregister_module(MODULE_SCSI_DEV, &sd_template);
+ unregister_blkdev(SCSI_DISK_MAJOR, "sd");
+ sd_registered--;
+ if( rscsi_disks != NULL )
+ {
+ scsi_init_free((char *) rscsi_disks,
+ (sd_template.dev_noticed + SD_EXTRA_DEVS)
+ * sizeof(Scsi_Disk));
+
+ scsi_init_free((char *) sd_sizes, sd_template.dev_max * sizeof(int));
+ scsi_init_free((char *) sd_blocksizes, sd_template.dev_max * sizeof(int));
+ scsi_init_free((char *) sd_hardsizes, sd_template.dev_max * sizeof(int));
+ scsi_init_free((char *) sd,
+ (sd_template.dev_max << 4) * sizeof(struct hd_struct));
+ /*
+ * Now remove sd_gendisk from the linked list
+ */
+ sdgd = gendisk_head;
+ prev_sdgd = NULL;
+ while(sdgd != &sd_gendisk)
+ {
+ prev_sdgd = sdgd;
+ sdgd = sdgd->next;
+ }
+
+ if(sdgd != &sd_gendisk)
+ printk("sd_gendisk not in disk chain.\n");
+ else {
+ if(prev_sdgd != NULL)
+ prev_sdgd->next = sdgd->next;
+ else
+ gendisk_head = sdgd->next;
+ }
+ }
+
+ blksize_size[MAJOR_NR] = NULL;
+ blk_dev[MAJOR_NR].request_fn = NULL;
+ blk_size[MAJOR_NR] = NULL;
+ hardsect_size[MAJOR_NR] = NULL;
+ read_ahead[MAJOR_NR] = 0;
+ sd_template.dev_max = 0;
+}
+#endif /* MODULE */
+
+/*
+ * Overrides for Emacs so that we almost follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/sd.h b/linux/src/drivers/scsi/sd.h
new file mode 100644
index 0000000..02b3437
--- /dev/null
+++ b/linux/src/drivers/scsi/sd.h
@@ -0,0 +1,65 @@
+/*
+ * sd.h Copyright (C) 1992 Drew Eckhardt
+ * SCSI disk driver header file by
+ * Drew Eckhardt
+ *
+ * <drew@colorado.edu>
+ *
+ * Modified by Eric Youngdale eric@aib.com to
+ * add scatter-gather, multiple outstanding request, and other
+ * enhancements.
+ */
+#ifndef _SD_H
+#define _SD_H
+/*
+ $Header: cvs/gnumach/linux/src/drivers/scsi/Attic/sd.h,v 1.1 1999/04/26 05:55:03 tb Exp $
+*/
+
+#ifndef _SCSI_H
+#include "scsi.h"
+#endif
+
+#ifndef _GENDISK_H
+#include <linux/genhd.h>
+#endif
+
+extern struct hd_struct * sd;
+
+typedef struct scsi_disk {
+ unsigned capacity; /* size in blocks */
+ unsigned sector_size; /* size in bytes */
+ Scsi_Device *device;
+ unsigned char ready; /* flag ready for FLOPTICAL */
+ unsigned char write_prot; /* flag write_protect for rmvable dev */
+ unsigned char sector_bit_size; /* sector_size = 2 to the bit size power */
+ unsigned char sector_bit_shift; /* power of 2 sectors per FS block */
+ unsigned ten:1; /* support ten byte read / write */
+ unsigned remap:1; /* support remapping */
+ unsigned has_part_table:1; /* has partition table */
+} Scsi_Disk;
+
+extern Scsi_Disk * rscsi_disks;
+
+extern int revalidate_scsidisk(kdev_t dev, int maxusage);
+
+#endif
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
+
diff --git a/linux/src/drivers/scsi/sd_ioctl.c b/linux/src/drivers/scsi/sd_ioctl.c
new file mode 100644
index 0000000..4c58f04
--- /dev/null
+++ b/linux/src/drivers/scsi/sd_ioctl.c
@@ -0,0 +1,128 @@
+/*
+ * drivers/scsi/sd_ioctl.c
+ *
+ * ioctl handling for SCSI disks
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/hdreg.h>
+#include <linux/errno.h>
+
+#include <asm/segment.h>
+
+#include <linux/blk.h>
+#include "scsi.h"
+#include <scsi/scsi_ioctl.h>
+#include "hosts.h"
+#include "sd.h"
+#include <scsi/scsicam.h> /* must follow "hosts.h" */
+
+int sd_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigned long arg)
+{
+ kdev_t dev = inode->i_rdev;
+ int error;
+ struct Scsi_Host * host;
+ int diskinfo[4];
+ struct hd_geometry *loc = (struct hd_geometry *) arg;
+
+ switch (cmd) {
+ case HDIO_GETGEO: /* Return BIOS disk parameters */
+ if (!loc) return -EINVAL;
+#ifndef MACH
+ error = verify_area(VERIFY_WRITE, loc, sizeof(*loc));
+ if (error)
+ return error;
+#endif
+ host = rscsi_disks[MINOR(dev) >> 4].device->host;
+
+/* default to most commonly used values */
+
+ diskinfo[0] = 0x40;
+ diskinfo[1] = 0x20;
+ diskinfo[2] = rscsi_disks[MINOR(dev) >> 4].capacity >> 11;
+
+/* override with calculated, extended default, or driver values */
+
+ if(host->hostt->bios_param != NULL)
+ host->hostt->bios_param(&rscsi_disks[MINOR(dev) >> 4],
+ dev,
+ &diskinfo[0]);
+ else scsicam_bios_param(&rscsi_disks[MINOR(dev) >> 4],
+ dev, &diskinfo[0]);
+
+#ifdef MACH
+ loc->heads = diskinfo[0];
+ loc->sectors = diskinfo[1];
+ loc->cylinders = diskinfo[2];
+ loc->start = sd[MINOR(inode->i_rdev)].start_sect;
+#else
+ put_user(diskinfo[0], &loc->heads);
+ put_user(diskinfo[1], &loc->sectors);
+ put_user(diskinfo[2], &loc->cylinders);
+ put_user(sd[MINOR(inode->i_rdev)].start_sect, &loc->start);
+#endif
+ return 0;
+ case BLKGETSIZE: /* Return device size */
+ if (!arg) return -EINVAL;
+ error = verify_area(VERIFY_WRITE, (long *) arg, sizeof(long));
+ if (error)
+ return error;
+ put_user(sd[MINOR(inode->i_rdev)].nr_sects,
+ (long *) arg);
+ return 0;
+
+ case BLKRASET:
+ if (!suser())
+ return -EACCES;
+ if(!(inode->i_rdev)) return -EINVAL;
+ if(arg > 0xff) return -EINVAL;
+ read_ahead[MAJOR(inode->i_rdev)] = arg;
+ return 0;
+
+ case BLKRAGET:
+ if (!arg)
+ return -EINVAL;
+ error = verify_area(VERIFY_WRITE, (int *) arg, sizeof(int));
+ if (error)
+ return error;
+ put_user(read_ahead[MAJOR(inode->i_rdev)], (int *) arg);
+ return 0;
+
+ case BLKFLSBUF:
+ if(!suser()) return -EACCES;
+ if(!(inode->i_rdev)) return -EINVAL;
+ fsync_dev(inode->i_rdev);
+ invalidate_buffers(inode->i_rdev);
+ return 0;
+
+ case BLKRRPART: /* Re-read partition tables */
+ return revalidate_scsidisk(dev, 1);
+
+ RO_IOCTLS(dev, arg);
+
+ default:
+ return scsi_ioctl(rscsi_disks[MINOR(dev) >> 4].device , cmd, (void *) arg);
+ }
+}
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/seagate.c b/linux/src/drivers/scsi/seagate.c
new file mode 100644
index 0000000..3dd8f9d
--- /dev/null
+++ b/linux/src/drivers/scsi/seagate.c
@@ -0,0 +1,1679 @@
+/*
+ * seagate.c Copyright (C) 1992, 1993 Drew Eckhardt
+ * low level scsi driver for ST01/ST02, Future Domain TMC-885,
+ * TMC-950 by
+ *
+ * Drew Eckhardt
+ *
+ * <drew@colorado.edu>
+ *
+ * Note : TMC-880 boards don't work because they have two bits in
+ * the status register flipped, I'll fix this "RSN"
+ *
+ * This card does all the I/O via memory mapped I/O, so there is no need
+ * to check or allocate a region of the I/O address space.
+ */
+
+/*
+ * Configuration :
+ * To use without BIOS -DOVERRIDE=base_address -DCONTROLLER=FD or SEAGATE
+ * -DIRQ will override the default of 5.
+ * Note: You can now set these options from the kernel's "command line".
+ * The syntax is:
+ *
+ * st0x=ADDRESS,IRQ (for a Seagate controller)
+ * or:
+ * tmc8xx=ADDRESS,IRQ (for a TMC-8xx or TMC-950 controller)
+ * eg:
+ * tmc8xx=0xC8000,15
+ *
+ * will configure the driver for a TMC-8xx style controller using IRQ 15
+ * with a base address of 0xC8000.
+ *
+ * -DFAST or -DFAST32 will use blind transfers where possible
+ *
+ * -DARBITRATE will cause the host adapter to arbitrate for the
+ * bus for better SCSI-II compatibility, rather than just
+ * waiting for BUS FREE and then doing its thing. Should
+ * let us do one command per Lun when I integrate my
+ * reorganization changes into the distribution sources.
+ *
+ * -DSLOW_HANDSHAKE will allow compatibility with broken devices that don't
+ * handshake fast enough (ie, some CD ROM's) for the Seagate
+ * code.
+ *
+ * -DSLOW_RATE=x, x some number will let you specify a default
+ * transfer rate if handshaking isn't working correctly.
+ */
+
+#ifdef MACH
+#define ARBITRATE
+#define SLOW_HANDSHAKE
+#define FAST32
+#endif
+
+#include <linux/module.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/config.h>
+#include <linux/proc_fs.h>
+
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "seagate.h"
+#include "constants.h"
+#include<linux/stat.h>
+
+struct proc_dir_entry proc_scsi_seagate = {
+ PROC_SCSI_SEAGATE, 7, "seagate",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+
+#ifndef IRQ
+#define IRQ 5
+#endif
+
+#if (defined(FAST32) && !defined(FAST))
+#define FAST
+#endif
+
+#if defined(SLOW_RATE) && !defined(SLOW_HANDSHAKE)
+#define SLOW_HANDSHAKE
+#endif
+
+#if defined(SLOW_HANDSHAKE) && !defined(SLOW_RATE)
+#define SLOW_RATE 50
+#endif
+
+
+#if defined(LINKED)
+#undef LINKED /* Linked commands are currently broken ! */
+#endif
+
+static int internal_command(unsigned char target, unsigned char lun,
+ const void *cmnd,
+ void *buff, int bufflen, int reselect);
+
+static int incommand; /*
+ set if arbitration has finished and we are
+ in some command phase.
+ */
+
+static const void *base_address = NULL; /*
+ Where the card ROM starts,
+ used to calculate memory mapped
+ register location.
+ */
+#ifdef notyet
+static volatile int abort_confirm = 0;
+#endif
+
+static volatile void *st0x_cr_sr; /*
+ control register write,
+ status register read.
+ 256 bytes in length.
+
+ Read is status of SCSI BUS,
+ as per STAT masks.
+
+ */
+
+
+static volatile void *st0x_dr; /*
+ data register, read write
+ 256 bytes in length.
+ */
+
+
+static volatile int st0x_aborted=0; /*
+ set when we are aborted, ie by a time out, etc.
+ */
+
+static unsigned char controller_type = 0; /* set to SEAGATE for ST0x boards or FD for TMC-8xx boards */
+static unsigned char irq = IRQ;
+
+#define retcode(result) (((result) << 16) | (message << 8) | status)
+#define STATUS (*(volatile unsigned char *) st0x_cr_sr)
+#define CONTROL STATUS
+#define DATA (*(volatile unsigned char *) st0x_dr)
+#define WRITE_CONTROL(d) { writeb((d), st0x_cr_sr); }
+#define WRITE_DATA(d) { writeb((d), st0x_dr); }
+
+void st0x_setup (char *str, int *ints) {
+ controller_type = SEAGATE;
+ base_address = (void *) ints[1];
+ irq = ints[2];
+}
+
+void tmc8xx_setup (char *str, int *ints) {
+ controller_type = FD;
+ base_address = (void *) ints[1];
+ irq = ints[2];
+}
+
+
+#ifndef OVERRIDE
+static const char * seagate_bases[] = {
+ (char *) 0xc8000, (char *) 0xca000, (char *) 0xcc000,
+ (char *) 0xce000, (char *) 0xdc000, (char *) 0xde000
+};
+
+typedef struct {
+ const char *signature ;
+ unsigned offset;
+ unsigned length;
+ unsigned char type;
+} Signature;
+
+static const Signature signatures[] = {
+#ifdef CONFIG_SCSI_SEAGATE
+{"ST01 v1.7 (C) Copyright 1987 Seagate", 15, 37, SEAGATE},
+{"SCSI BIOS 2.00 (C) Copyright 1987 Seagate", 15, 40, SEAGATE},
+
+/*
+ * The following two lines are NOT mistakes. One detects ROM revision
+ * 3.0.0, the other 3.2. Since seagate has only one type of SCSI adapter,
+ * and this is not going to change, the "SEAGATE" and "SCSI" together
+ * are probably "good enough"
+ */
+
+{"SEAGATE SCSI BIOS ",16, 17, SEAGATE},
+{"SEAGATE SCSI BIOS ",17, 17, SEAGATE},
+
+/*
+ * However, future domain makes several incompatible SCSI boards, so specific
+ * signatures must be used.
+ */
+
+{"FUTURE DOMAIN CORP. (C) 1986-1989 V5.0C2/14/89", 5, 46, FD},
+{"FUTURE DOMAIN CORP. (C) 1986-1989 V6.0A7/28/89", 5, 46, FD},
+{"FUTURE DOMAIN CORP. (C) 1986-1990 V6.0105/31/90",5, 47, FD},
+{"FUTURE DOMAIN CORP. (C) 1986-1990 V6.0209/18/90",5, 47, FD},
+{"FUTURE DOMAIN CORP. (C) 1986-1990 V7.009/18/90", 5, 46, FD},
+{"FUTURE DOMAIN CORP. (C) 1992 V8.00.004/02/92", 5, 44, FD},
+{"IBM F1 BIOS V1.1004/30/92", 5, 25, FD},
+{"FUTURE DOMAIN TMC-950", 5, 21, FD},
+#endif /* CONFIG_SCSI_SEAGATE */
+}
+;
+
+#define NUM_SIGNATURES (sizeof(signatures) / sizeof(Signature))
+#endif /* n OVERRIDE */
+
+/*
+ * hostno stores the hostnumber, as told to us by the init routine.
+ */
+
+static int hostno = -1;
+static void seagate_reconnect_intr(int, void *, struct pt_regs *);
+
+#ifdef FAST
+static int fast = 1;
+#endif
+
+#ifdef SLOW_HANDSHAKE
+/*
+ * Support for broken devices :
+ * The Seagate board has a handshaking problem. Namely, a lack
+ * thereof for slow devices. You can blast 600K/second through
+ * it if you are polling for each byte, more if you do a blind
+ * transfer. In the first case, with a fast device, REQ will
+ * transition high-low or high-low-high before your loop restarts
+ * and you'll have no problems. In the second case, the board
+ * will insert wait states for up to 13.2 usecs for REQ to
+ * transition low->high, and everything will work.
+ *
+ * However, there's nothing in the state machine that says
+ * you *HAVE* to see a high-low-high set of transitions before
+ * sending the next byte, and slow things like the Trantor CD ROMS
+ * will break because of this.
+ *
+ * So, we need to slow things down, which isn't as simple as it
+ * seems. We can't slow things down period, because then people
+ * who don't recompile their kernels will shoot me for ruining
+ * their performance. We need to do it on a case per case basis.
+ *
+ * The best for performance will be to, only for borken devices
+ * (this is stored on a per-target basis in the scsi_devices array)
+ *
+ * Wait for a low->high transition before continuing with that
+ * transfer. If we timeout, continue anyways. We don't need
+ * a long timeout, because REQ should only be asserted until the
+ * corresponding ACK is received and processed.
+ *
+ * Note that we can't use the system timer for this, because of
+ * resolution, and we *really* can't use the timer chip since
+ * gettimeofday() and the beeper routines use that. So,
+ * the best thing for us to do will be to calibrate a timing
+ * loop in the initialization code using the timer chip before
+ * gettimeofday() can screw with it.
+ */
+
+static int borken_calibration = 0;
+static void borken_init (void) {
+ register int count = 0, start = jiffies + 1, stop = start + 25;
+
+ while (jiffies < start);
+ for (;jiffies < stop; ++count);
+
+/*
+ * Ok, we now have a count for .25 seconds. Convert to a
+ * count per second and divide by transfer rate in K.
+ */
+
+ borken_calibration = (count * 4) / (SLOW_RATE*1024);
+
+ if (borken_calibration < 1)
+ borken_calibration = 1;
+#if (DEBUG & DEBUG_BORKEN)
+ printk("scsi%d : borken calibrated to %dK/sec, %d cycles per transfer\n",
+ hostno, BORKEN_RATE, borken_calibration);
+#endif
+}
+
+static inline void borken_wait(void) {
+ register int count;
+ for (count = borken_calibration; count && (STATUS & STAT_REQ);
+ --count);
+#if (DEBUG & DEBUG_BORKEN)
+ if (count)
+ printk("scsi%d : borken timeout\n", hostno);
+#endif
+}
+
+#endif /* def SLOW_HANDSHAKE */
+
+int seagate_st0x_detect (Scsi_Host_Template * tpnt)
+ {
+ struct Scsi_Host *instance;
+#ifndef OVERRIDE
+ int i,j;
+#endif
+
+ tpnt->proc_dir = &proc_scsi_seagate;
+/*
+ * First, we try for the manual override.
+ */
+#ifdef DEBUG
+ printk("Autodetecting ST0x / TMC-8xx\n");
+#endif
+
+ if (hostno != -1)
+ {
+ printk ("ERROR : seagate_st0x_detect() called twice.\n");
+ return 0;
+ }
+
+ /* If the user specified the controller type from the command line,
+ controller_type will be non-zero, so don't try to detect one */
+
+ if (!controller_type) {
+#ifdef OVERRIDE
+ base_address = (void *) OVERRIDE;
+
+/* CONTROLLER is used to override controller (SEAGATE or FD). PM: 07/01/93 */
+#ifdef CONTROLLER
+ controller_type = CONTROLLER;
+#else
+#error Please use -DCONTROLLER=SEAGATE or -DCONTROLLER=FD to override controller type
+#endif /* CONTROLLER */
+#ifdef DEBUG
+ printk("Base address overridden to %x, controller type is %s\n",
+ base_address,controller_type == SEAGATE ? "SEAGATE" : "FD");
+#endif
+#else /* OVERRIDE */
+/*
+ * To detect this card, we simply look for the signature
+ * from the BIOS version notice in all the possible locations
+ * of the ROM's. This has a nice side effect of not trashing
+ * any register locations that might be used by something else.
+ *
+ * XXX - note that we probably should be probing the address
+ * space for the on-board RAM instead.
+ */
+
+ for (i = 0; i < (sizeof (seagate_bases) / sizeof (char * )); ++i)
+ for (j = 0; !base_address && j < NUM_SIGNATURES; ++j)
+ if (!memcmp ((const void *) (seagate_bases[i] +
+ signatures[j].offset), (const void *) signatures[j].signature,
+ signatures[j].length)) {
+ base_address = (const void *) seagate_bases[i];
+ controller_type = signatures[j].type;
+ }
+#endif /* OVERRIDE */
+ } /* (! controller_type) */
+
+ tpnt->this_id = (controller_type == SEAGATE) ? 7 : 6;
+ tpnt->name = (controller_type == SEAGATE) ? ST0X_ID_STR : FD_ID_STR;
+
+ if (base_address)
+ {
+ st0x_cr_sr =(void *) (((const unsigned char *) base_address) + (controller_type == SEAGATE ? 0x1a00 : 0x1c00));
+ st0x_dr = (void *) (((const unsigned char *) base_address ) + (controller_type == SEAGATE ? 0x1c00 : 0x1e00));
+#ifdef DEBUG
+ printk("%s detected. Base address = %x, cr = %x, dr = %x\n", tpnt->name, base_address, st0x_cr_sr, st0x_dr);
+#endif
+/*
+ * At all times, we will use IRQ 5. Should also check for IRQ3 if we
+ * loose our first interrupt.
+ */
+ instance = scsi_register(tpnt, 0);
+ hostno = instance->host_no;
+ if (request_irq((int) irq, seagate_reconnect_intr, SA_INTERRUPT,
+ (controller_type == SEAGATE) ? "seagate" : "tmc-8xx", NULL)) {
+ printk("scsi%d : unable to allocate IRQ%d\n",
+ hostno, (int) irq);
+ return 0;
+ }
+ instance->irq = irq;
+ instance->io_port = (unsigned int) base_address;
+#ifdef SLOW_HANDSHAKE
+ borken_init();
+#endif
+
+ printk("%s options:"
+#ifdef ARBITRATE
+ " ARBITRATE"
+#endif
+#ifdef SLOW_HANDSHAKE
+ " SLOW_HANDSHAKE"
+#endif
+#ifdef FAST
+#ifdef FAST32
+ " FAST32"
+#else
+ " FAST"
+#endif
+#endif
+#ifdef LINKED
+ " LINKED"
+#endif
+ "\n", tpnt->name);
+ return 1;
+ }
+ else
+ {
+#ifdef DEBUG
+ printk("ST0x / TMC-8xx not detected.\n");
+#endif
+ return 0;
+ }
+ }
+
+const char *seagate_st0x_info(struct Scsi_Host * shpnt) {
+ static char buffer[64];
+ sprintf(buffer, "%s at irq %d, address 0x%05X",
+ (controller_type == SEAGATE) ? ST0X_ID_STR : FD_ID_STR,
+ irq, (unsigned int)base_address);
+ return buffer;
+}
+
+int seagate_st0x_proc_info(char *buffer, char **start, off_t offset,
+ int length, int hostno, int inout)
+{
+ const char *info = seagate_st0x_info(NULL);
+ int len;
+ int pos;
+ int begin;
+
+ if (inout) return(-ENOSYS);
+
+ begin = 0;
+ strcpy(buffer,info);
+ strcat(buffer,"\n");
+
+ pos = len = strlen(buffer);
+
+ if (pos<offset) {
+ len = 0;
+ begin = pos;
+ }
+
+ *start = buffer + (offset - begin);
+ len -= (offset - begin);
+ if ( len > length ) len = length;
+ return(len);
+}
+
+/*
+ * These are our saved pointers for the outstanding command that is
+ * waiting for a reconnect
+ */
+
+static unsigned char current_target, current_lun;
+static unsigned char *current_cmnd, *current_data;
+static int current_nobuffs;
+static struct scatterlist *current_buffer;
+static int current_bufflen;
+
+#ifdef LINKED
+
+/*
+ * linked_connected indicates whether or not we are currently connected to
+ * linked_target, linked_lun and in an INFORMATION TRANSFER phase,
+ * using linked commands.
+ */
+
+static int linked_connected = 0;
+static unsigned char linked_target, linked_lun;
+#endif
+
+
+static void (*done_fn)(Scsi_Cmnd *) = NULL;
+static Scsi_Cmnd * SCint = NULL;
+
+/*
+ * These control whether or not disconnect / reconnect will be attempted,
+ * or are being attempted.
+ */
+
+#define NO_RECONNECT 0
+#define RECONNECT_NOW 1
+#define CAN_RECONNECT 2
+
+#ifdef LINKED
+
+/*
+ * LINKED_RIGHT indicates that we are currently connected to the correct target
+ * for this command, LINKED_WRONG indicates that we are connected to the wrong
+ * target. Note that these imply CAN_RECONNECT.
+ */
+
+#define LINKED_RIGHT 3
+#define LINKED_WRONG 4
+#endif
+
+/*
+ * This determines if we are expecting to reconnect or not.
+ */
+
+static int should_reconnect = 0;
+
+/*
+ * The seagate_reconnect_intr routine is called when a target reselects the
+ * host adapter. This occurs on the interrupt triggered by the target
+ * asserting SEL.
+ */
+
+static void seagate_reconnect_intr(int irq, void *dev_id, struct pt_regs *regs)
+ {
+ int temp;
+ Scsi_Cmnd * SCtmp;
+
+/* enable all other interrupts. */
+ sti();
+#if (DEBUG & PHASE_RESELECT)
+ printk("scsi%d : seagate_reconnect_intr() called\n", hostno);
+#endif
+
+ if (!should_reconnect)
+ printk("scsi%d: unexpected interrupt.\n", hostno);
+ else {
+ should_reconnect = 0;
+
+#if (DEBUG & PHASE_RESELECT)
+ printk("scsi%d : internal_command("
+ "%d, %08x, %08x, %d, RECONNECT_NOW\n", hostno,
+ current_target, current_data, current_bufflen);
+#endif
+
+ temp = internal_command (current_target, current_lun,
+ current_cmnd, current_data, current_bufflen,
+ RECONNECT_NOW);
+
+ if (msg_byte(temp) != DISCONNECT) {
+ if (done_fn) {
+#if (DEBUG & PHASE_RESELECT)
+ printk("scsi%d : done_fn(%d,%08x)", hostno,
+ hostno, temp);
+#endif
+ if(!SCint) panic("SCint == NULL in seagate");
+ SCtmp = SCint;
+ SCint = NULL;
+ SCtmp->result = temp;
+ done_fn (SCtmp);
+ } else
+ printk("done_fn() not defined.\n");
+ }
+ }
+ }
+
+/*
+ * The seagate_st0x_queue_command() function provides a queued interface
+ * to the seagate SCSI driver. Basically, it just passes control onto the
+ * seagate_command() function, after fixing it so that the done_fn()
+ * is set to the one passed to the function. We have to be very careful,
+ * because there are some commands on some devices that do not disconnect,
+ * and if we simply call the done_fn when the command is done then another
+ * command is started and queue_command is called again... We end up
+ * overflowing the kernel stack, and this tends not to be such a good idea.
+ */
+
+static int recursion_depth = 0;
+
+int seagate_st0x_queue_command (Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
+ {
+ int result, reconnect;
+ Scsi_Cmnd * SCtmp;
+
+ done_fn = done;
+ current_target = SCpnt->target;
+ current_lun = SCpnt->lun;
+ current_cmnd = SCpnt->cmnd;
+ current_data = (unsigned char *) SCpnt->request_buffer;
+ current_bufflen = SCpnt->request_bufflen;
+ SCint = SCpnt;
+ if(recursion_depth) {
+ return 0;
+ };
+ recursion_depth++;
+ do{
+#ifdef LINKED
+/*
+ * Set linked command bit in control field of SCSI command.
+ */
+
+ current_cmnd[SCpnt->cmd_len] |= 0x01;
+ if (linked_connected) {
+#if (DEBUG & DEBUG_LINKED)
+ printk("scsi%d : using linked commands, current I_T_L nexus is ",
+ hostno);
+#endif
+ if ((linked_target == current_target) &&
+ (linked_lun == current_lun)) {
+#if (DEBUG & DEBUG_LINKED)
+ printk("correct\n");
+#endif
+ reconnect = LINKED_RIGHT;
+ } else {
+#if (DEBUG & DEBUG_LINKED)
+ printk("incorrect\n");
+#endif
+ reconnect = LINKED_WRONG;
+ }
+ } else
+#endif /* LINKED */
+ reconnect = CAN_RECONNECT;
+
+
+
+
+
+ result = internal_command (SCint->target, SCint->lun, SCint->cmnd, SCint->request_buffer,
+ SCint->request_bufflen,
+ reconnect);
+ if (msg_byte(result) == DISCONNECT) break;
+ SCtmp = SCint;
+ SCint = NULL;
+ SCtmp->result = result;
+ done_fn (SCtmp);
+ } while(SCint);
+ recursion_depth--;
+ return 0;
+ }
+
+int seagate_st0x_command (Scsi_Cmnd * SCpnt) {
+ return internal_command (SCpnt->target, SCpnt->lun, SCpnt->cmnd, SCpnt->request_buffer,
+ SCpnt->request_bufflen,
+ (int) NO_RECONNECT);
+}
+
+static int internal_command(unsigned char target, unsigned char lun, const void *cmnd,
+ void *buff, int bufflen, int reselect) {
+ int len = 0;
+ unsigned char *data = NULL;
+ struct scatterlist *buffer = NULL;
+ int nobuffs = 0;
+ int clock;
+ int temp;
+#ifdef SLOW_HANDSHAKE
+ int borken; /* Does the current target require Very Slow I/O ? */
+#endif
+
+
+#if (DEBUG & PHASE_DATAIN) || (DEBUG & PHASE_DATOUT)
+ int transfered = 0;
+#endif
+
+#if (((DEBUG & PHASE_ETC) == PHASE_ETC) || (DEBUG & PRINT_COMMAND) || \
+ (DEBUG & PHASE_EXIT))
+ int i;
+#endif
+
+#if ((DEBUG & PHASE_ETC) == PHASE_ETC)
+ int phase=0, newphase;
+#endif
+
+ int done = 0;
+ unsigned char status = 0;
+ unsigned char message = 0;
+ register unsigned char status_read;
+
+ unsigned transfersize = 0, underflow = 0;
+
+ incommand = 0;
+ st0x_aborted = 0;
+
+#ifdef SLOW_HANDSHAKE
+ borken = (int) SCint->device->borken;
+#endif
+
+#if (DEBUG & PRINT_COMMAND)
+ printk ("scsi%d : target = %d, command = ", hostno, target);
+ print_command((unsigned char *) cmnd);
+ printk("\n");
+#endif
+
+#if (DEBUG & PHASE_RESELECT)
+ switch (reselect) {
+ case RECONNECT_NOW :
+ printk("scsi%d : reconnecting\n", hostno);
+ break;
+#ifdef LINKED
+ case LINKED_RIGHT :
+ printk("scsi%d : connected, can reconnect\n", hostno);
+ break;
+ case LINKED_WRONG :
+ printk("scsi%d : connected to wrong target, can reconnect\n",
+ hostno);
+ break;
+#endif
+ case CAN_RECONNECT :
+ printk("scsi%d : allowed to reconnect\n", hostno);
+ break;
+ default :
+ printk("scsi%d : not allowed to reconnect\n", hostno);
+ }
+#endif
+
+
+ if (target == (controller_type == SEAGATE ? 7 : 6))
+ return DID_BAD_TARGET;
+
+/*
+ * We work it differently depending on if this is "the first time,"
+ * or a reconnect. If this is a reselect phase, then SEL will
+ * be asserted, and we must skip selection / arbitration phases.
+ */
+
+ switch (reselect) {
+ case RECONNECT_NOW:
+#if (DEBUG & PHASE_RESELECT)
+ printk("scsi%d : phase RESELECT \n", hostno);
+#endif
+
+/*
+ * At this point, we should find the logical or of our ID and the original
+ * target's ID on the BUS, with BSY, SEL, and I/O signals asserted.
+ *
+ * After ARBITRATION phase is completed, only SEL, BSY, and the
+ * target ID are asserted. A valid initiator ID is not on the bus
+ * until IO is asserted, so we must wait for that.
+ */
+ clock = jiffies + 10;
+ for (;;) {
+ temp = STATUS;
+ if ((temp & STAT_IO) && !(temp & STAT_BSY))
+ break;
+
+ if (jiffies > clock) {
+#if (DEBUG & PHASE_RESELECT)
+ printk("scsi%d : RESELECT timed out while waiting for IO .\n",
+ hostno);
+#endif
+ return (DID_BAD_INTR << 16);
+ }
+ }
+
+/*
+ * After I/O is asserted by the target, we can read our ID and its
+ * ID off of the BUS.
+ */
+
+ if (!((temp = DATA) & (controller_type == SEAGATE ? 0x80 : 0x40)))
+ {
+#if (DEBUG & PHASE_RESELECT)
+ printk("scsi%d : detected reconnect request to different target.\n"
+ "\tData bus = %d\n", hostno, temp);
+#endif
+ return (DID_BAD_INTR << 16);
+ }
+
+ if (!(temp & (1 << current_target)))
+ {
+ printk("scsi%d : Unexpected reselect interrupt. Data bus = %d\n",
+ hostno, temp);
+ return (DID_BAD_INTR << 16);
+ }
+
+ buffer=current_buffer;
+ cmnd=current_cmnd; /* WDE add */
+ data=current_data; /* WDE add */
+ len=current_bufflen; /* WDE add */
+ nobuffs=current_nobuffs;
+
+/*
+ * We have determined that we have been selected. At this point,
+ * we must respond to the reselection by asserting BSY ourselves
+ */
+
+#if 1
+ CONTROL = (BASE_CMD | CMD_DRVR_ENABLE | CMD_BSY);
+#else
+ CONTROL = (BASE_CMD | CMD_BSY);
+#endif
+
+/*
+ * The target will drop SEL, and raise BSY, at which time we must drop
+ * BSY.
+ */
+
+ for (clock = jiffies + 10; (jiffies < clock) && (STATUS & STAT_SEL););
+
+ if (jiffies >= clock)
+ {
+ CONTROL = (BASE_CMD | CMD_INTR);
+#if (DEBUG & PHASE_RESELECT)
+ printk("scsi%d : RESELECT timed out while waiting for SEL.\n",
+ hostno);
+#endif
+ return (DID_BAD_INTR << 16);
+ }
+
+ CONTROL = BASE_CMD;
+
+/*
+ * At this point, we have connected with the target and can get
+ * on with our lives.
+ */
+ break;
+ case CAN_RECONNECT:
+
+#ifdef LINKED
+/*
+ * This is a bletcherous hack, just as bad as the Unix #! interpreter stuff.
+ * If it turns out we are using the wrong I_T_L nexus, the easiest way to deal
+ * with it is to go into our INFORMATION TRANSFER PHASE code, send a ABORT
+ * message on MESSAGE OUT phase, and then loop back to here.
+ */
+
+connect_loop :
+
+#endif
+
+#if (DEBUG & PHASE_BUS_FREE)
+ printk ("scsi%d : phase = BUS FREE \n", hostno);
+#endif
+
+/*
+ * BUS FREE PHASE
+ *
+ * On entry, we make sure that the BUS is in a BUS FREE
+ * phase, by insuring that both BSY and SEL are low for
+ * at least one bus settle delay. Several reads help
+ * eliminate wire glitch.
+ */
+
+ clock = jiffies + ST0X_BUS_FREE_DELAY;
+
+#if !defined (ARBITRATE)
+ while (((STATUS | STATUS | STATUS) &
+ (STAT_BSY | STAT_SEL)) &&
+ (!st0x_aborted) && (jiffies < clock));
+
+ if (jiffies > clock)
+ return retcode(DID_BUS_BUSY);
+ else if (st0x_aborted)
+ return retcode(st0x_aborted);
+#endif
+
+#if (DEBUG & PHASE_SELECTION)
+ printk("scsi%d : phase = SELECTION\n", hostno);
+#endif
+
+ clock = jiffies + ST0X_SELECTION_DELAY;
+
+/*
+ * Arbitration/selection procedure :
+ * 1. Disable drivers
+ * 2. Write HOST adapter address bit
+ * 3. Set start arbitration.
+ * 4. We get either ARBITRATION COMPLETE or SELECT at this
+ * point.
+ * 5. OR our ID and targets on bus.
+ * 6. Enable SCSI drivers and asserted SEL and ATTN
+ */
+
+#if defined(ARBITRATE)
+ cli();
+ CONTROL = 0;
+ DATA = (controller_type == SEAGATE) ? 0x80 : 0x40;
+ CONTROL = CMD_START_ARB;
+ sti();
+ while (!((status_read = STATUS) & (STAT_ARB_CMPL | STAT_SEL)) &&
+ (jiffies < clock) && !st0x_aborted);
+
+ if (!(status_read & STAT_ARB_CMPL)) {
+#if (DEBUG & PHASE_SELECTION)
+ if (status_read & STAT_SEL)
+ printk("scsi%d : arbitration lost\n", hostno);
+ else
+ printk("scsi%d : arbitration timeout.\n", hostno);
+#endif
+ CONTROL = BASE_CMD;
+ return retcode(DID_NO_CONNECT);
+ };
+
+#if (DEBUG & PHASE_SELECTION)
+ printk("scsi%d : arbitration complete\n", hostno);
+#endif
+#endif
+
+
+/*
+ * When the SCSI device decides that we're gawking at it, it will
+ * respond by asserting BUSY on the bus.
+ *
+ * Note : the Seagate ST-01/02 product manual says that we should
+ * twiddle the DATA register before the control register. However,
+ * this does not work reliably so we do it the other way around.
+ *
+ * Probably could be a problem with arbitration too, we really should
+ * try this with a SCSI protocol or logic analyzer to see what is
+ * going on.
+ */
+ cli();
+ DATA = (unsigned char) ((1 << target) | (controller_type == SEAGATE ? 0x80 : 0x40));
+ CONTROL = BASE_CMD | CMD_DRVR_ENABLE | CMD_SEL |
+ (reselect ? CMD_ATTN : 0);
+ sti();
+ while (!((status_read = STATUS) & STAT_BSY) &&
+ (jiffies < clock) && !st0x_aborted)
+
+#if 0 && (DEBUG & PHASE_SELECTION)
+ {
+ temp = clock - jiffies;
+
+ if (!(jiffies % 5))
+ printk("seagate_st0x_timeout : %d \r",temp);
+
+ }
+ printk("Done. \n");
+ printk("scsi%d : status = %02x, seagate_st0x_timeout = %d, aborted = %02x \n",
+ hostno, status_read, temp, st0x_aborted);
+#else
+ ;
+#endif
+
+
+ if ((jiffies >= clock) && !(status_read & STAT_BSY))
+ {
+#if (DEBUG & PHASE_SELECTION)
+ printk ("scsi%d : NO CONNECT with target %d, status = %x \n",
+ hostno, target, STATUS);
+#endif
+ return retcode(DID_NO_CONNECT);
+ }
+
+/*
+ * If we have been aborted, and we have a command in progress, IE the
+ * target still has BSY asserted, then we will reset the bus, and
+ * notify the midlevel driver to expect sense.
+ */
+
+ if (st0x_aborted) {
+ CONTROL = BASE_CMD;
+ if (STATUS & STAT_BSY) {
+ printk("scsi%d : BST asserted after we've been aborted.\n",
+ hostno);
+ seagate_st0x_reset(NULL, 0);
+ return retcode(DID_RESET);
+ }
+ return retcode(st0x_aborted);
+ }
+
+/* Establish current pointers. Take into account scatter / gather */
+
+ if ((nobuffs = SCint->use_sg)) {
+#if (DEBUG & DEBUG_SG)
+ {
+ int i;
+ printk("scsi%d : scatter gather requested, using %d buffers.\n",
+ hostno, nobuffs);
+ for (i = 0; i < nobuffs; ++i)
+ printk("scsi%d : buffer %d address = %08x length = %d\n",
+ hostno, i, buffer[i].address, buffer[i].length);
+ }
+#endif
+
+ buffer = (struct scatterlist *) SCint->buffer;
+ len = buffer->length;
+ data = (unsigned char *) buffer->address;
+ } else {
+#if (DEBUG & DEBUG_SG)
+ printk("scsi%d : scatter gather not requested.\n", hostno);
+#endif
+ buffer = NULL;
+ len = SCint->request_bufflen;
+ data = (unsigned char *) SCint->request_buffer;
+ }
+
+#if (DEBUG & (PHASE_DATAIN | PHASE_DATAOUT))
+ printk("scsi%d : len = %d\n", hostno, len);
+#endif
+
+ break;
+#ifdef LINKED
+ case LINKED_RIGHT:
+ break;
+ case LINKED_WRONG:
+ break;
+#endif
+ }
+
+/*
+ * There are several conditions under which we wish to send a message :
+ * 1. When we are allowing disconnect / reconnect, and need to establish
+ * the I_T_L nexus via an IDENTIFY with the DiscPriv bit set.
+ *
+ * 2. When we are doing linked commands, are have the wrong I_T_L nexus
+ * established and want to send an ABORT message.
+ */
+
+
+ CONTROL = BASE_CMD | CMD_DRVR_ENABLE |
+ (((reselect == CAN_RECONNECT)
+#ifdef LINKED
+ || (reselect == LINKED_WRONG)
+#endif
+ ) ? CMD_ATTN : 0) ;
+
+/*
+ * INFORMATION TRANSFER PHASE
+ *
+ * The nasty looking read / write inline assembler loops we use for
+ * DATAIN and DATAOUT phases are approximately 4-5 times as fast as
+ * the 'C' versions - since we're moving 1024 bytes of data, this
+ * really adds up.
+ */
+
+#if ((DEBUG & PHASE_ETC) == PHASE_ETC)
+ printk("scsi%d : phase = INFORMATION TRANSFER\n", hostno);
+#endif
+
+ incommand = 1;
+ transfersize = SCint->transfersize;
+ underflow = SCint->underflow;
+
+
+/*
+ * Now, we poll the device for status information,
+ * and handle any requests it makes. Note that since we are unsure of
+ * how much data will be flowing across the system, etc and cannot
+ * make reasonable timeouts, that we will instead have the midlevel
+ * driver handle any timeouts that occur in this phase.
+ */
+
+ while (((status_read = STATUS) & STAT_BSY) && !st0x_aborted && !done)
+ {
+#ifdef PARITY
+ if (status_read & STAT_PARITY)
+ {
+ printk("scsi%d : got parity error\n", hostno);
+ st0x_aborted = DID_PARITY;
+ }
+#endif
+
+ if (status_read & STAT_REQ)
+ {
+#if ((DEBUG & PHASE_ETC) == PHASE_ETC)
+ if ((newphase = (status_read & REQ_MASK)) != phase)
+ {
+ phase = newphase;
+ switch (phase)
+ {
+ case REQ_DATAOUT:
+ printk("scsi%d : phase = DATA OUT\n",
+ hostno);
+ break;
+ case REQ_DATAIN :
+ printk("scsi%d : phase = DATA IN\n",
+ hostno);
+ break;
+ case REQ_CMDOUT :
+ printk("scsi%d : phase = COMMAND OUT\n",
+ hostno);
+ break;
+ case REQ_STATIN :
+ printk("scsi%d : phase = STATUS IN\n",
+ hostno);
+ break;
+ case REQ_MSGOUT :
+ printk("scsi%d : phase = MESSAGE OUT\n",
+ hostno);
+ break;
+ case REQ_MSGIN :
+ printk("scsi%d : phase = MESSAGE IN\n",
+ hostno);
+ break;
+ default :
+ printk("scsi%d : phase = UNKNOWN\n",
+ hostno);
+ st0x_aborted = DID_ERROR;
+ }
+ }
+#endif
+ switch (status_read & REQ_MASK)
+ {
+ case REQ_DATAOUT :
+/*
+ * If we are in fast mode, then we simply splat the data out
+ * in word-sized chunks as fast as we can.
+ */
+
+#ifdef FAST
+if (!len) {
+#if 0
+ printk("scsi%d: underflow to target %d lun %d \n",
+ hostno, target, lun);
+ st0x_aborted = DID_ERROR;
+ fast = 0;
+#endif
+ break;
+}
+
+if (fast && transfersize && !(len % transfersize) && (len >= transfersize)
+#ifdef FAST32
+ && !(transfersize % 4)
+#endif
+ ) {
+#if (DEBUG & DEBUG_FAST)
+ printk("scsi%d : FAST transfer, underflow = %d, transfersize = %d\n"
+ " len = %d, data = %08x\n", hostno, SCint->underflow,
+ SCint->transfersize, len, data);
+#endif
+
+ {
+#ifdef FAST32
+ unsigned int *iop = phys_to_virt (st0x_dr);
+ const unsigned int *dp = (unsigned int *) data;
+ int xferlen = transfersize >> 2;
+#else
+ unsigned char *iop = phys_to_virt (st0x_dr);
+ const unsigned char *dp = data;
+ int xferlen = transfersize;
+#endif
+ for (; xferlen; --xferlen)
+ *iop = *dp++;
+ }
+
+ len -= transfersize;
+ data += transfersize;
+
+#if (DEBUG & DEBUG_FAST)
+ printk("scsi%d : FAST transfer complete len = %d data = %08x\n",
+ hostno, len, data);
+#endif
+
+
+} else
+#endif
+
+{
+/*
+ * We loop as long as we are in a data out phase, there is data to send,
+ * and BSY is still active.
+ */
+
+ while (len)
+ {
+ unsigned char stat;
+
+ stat = STATUS;
+ if (!(stat & STAT_BSY) || ((stat & REQ_MASK) != REQ_DATAOUT))
+ break;
+ if (stat & STAT_REQ)
+ {
+ WRITE_DATA (*data++);
+ --len;
+ }
+ }
+}
+
+ if (!len && nobuffs) {
+ --nobuffs;
+ ++buffer;
+ len = buffer->length;
+ data = (unsigned char *) buffer->address;
+#if (DEBUG & DEBUG_SG)
+ printk("scsi%d : next scatter-gather buffer len = %d address = %08x\n",
+ hostno, len, data);
+#endif
+ }
+ break;
+
+ case REQ_DATAIN :
+#ifdef SLOW_HANDSHAKE
+ if (borken) {
+#if (DEBUG & (PHASE_DATAIN))
+ transfered += len;
+#endif
+ for (; len && (STATUS & (REQ_MASK | STAT_REQ)) == (REQ_DATAIN |
+ STAT_REQ); --len) {
+ *data++ = DATA;
+ borken_wait();
+}
+#if (DEBUG & (PHASE_DATAIN))
+ transfered -= len;
+#endif
+ } else
+#endif
+#ifdef FAST
+if (fast && transfersize && !(len % transfersize) && (len >= transfersize)
+#ifdef FAST32
+ && !(transfersize % 4)
+#endif
+ ) {
+#if (DEBUG & DEBUG_FAST)
+ printk("scsi%d : FAST transfer, underflow = %d, transfersize = %d\n"
+ " len = %d, data = %08x\n", hostno, SCint->underflow,
+ SCint->transfersize, len, data);
+#endif
+ {
+#ifdef FAST32
+ const unsigned int *iop = phys_to_virt (st0x_dr);
+ unsigned int *dp = (unsigned int *) data;
+ int xferlen = len >> 2;
+#else
+ const unsigned char *iop = phys_to_virt (st0x_dr);
+ unsigned char *dp = data;
+ int xferlen = len;
+#endif
+ for (; xferlen; --xferlen)
+ *dp++ = *iop;
+ }
+
+ len -= transfersize;
+ data += transfersize;
+
+#if (DEBUG & PHASE_DATAIN)
+ printk("scsi%d: transfered += %d\n", hostno, transfersize);
+ transfered += transfersize;
+#endif
+
+#if (DEBUG & DEBUG_FAST)
+ printk("scsi%d : FAST transfer complete len = %d data = %08x\n",
+ hostno, len, data);
+#endif
+
+} else
+#endif
+{
+
+#if (DEBUG & PHASE_DATAIN)
+ printk("scsi%d: transfered += %d\n", hostno, len);
+ transfered += len; /* Assume we'll transfer it all, then
+ subtract what we *didn't* transfer */
+#endif
+
+/*
+ * We loop as long as we are in a data in phase, there is room to read,
+ * and BSY is still active
+ */
+
+ while (len)
+ {
+ unsigned char stat;
+
+ stat = STATUS;
+ if (!(stat & STAT_BSY) || ((stat & REQ_MASK) != REQ_DATAIN))
+ break;
+ if (stat & STAT_REQ)
+ {
+ *data++ = DATA;
+ --len;
+ }
+ }
+
+#if (DEBUG & PHASE_DATAIN)
+ printk("scsi%d: transfered -= %d\n", hostno, len);
+ transfered -= len; /* Since we assumed all of Len got
+ * transfered, correct our mistake */
+#endif
+}
+
+ if (!len && nobuffs) {
+ --nobuffs;
+ ++buffer;
+ len = buffer->length;
+ data = (unsigned char *) buffer->address;
+#if (DEBUG & DEBUG_SG)
+ printk("scsi%d : next scatter-gather buffer len = %d address = %08x\n",
+ hostno, len, data);
+#endif
+ }
+
+ break;
+
+ case REQ_CMDOUT :
+ while (((status_read = STATUS) & STAT_BSY) &&
+ ((status_read & REQ_MASK) == REQ_CMDOUT))
+ if (status_read & STAT_REQ) {
+ DATA = *(const unsigned char *) cmnd;
+ cmnd = 1+(const unsigned char *) cmnd;
+#ifdef SLOW_HANDSHAKE
+ if (borken)
+ borken_wait();
+#endif
+ }
+ break;
+
+ case REQ_STATIN :
+ status = DATA;
+ break;
+
+ case REQ_MSGOUT :
+/*
+ * We can only have sent a MSG OUT if we requested to do this
+ * by raising ATTN. So, we must drop ATTN.
+ */
+
+ CONTROL = BASE_CMD | CMD_DRVR_ENABLE;
+/*
+ * If we are reconnecting, then we must send an IDENTIFY message in
+ * response to MSGOUT.
+ */
+ switch (reselect) {
+ case CAN_RECONNECT:
+ DATA = IDENTIFY(1, lun);
+
+#if (DEBUG & (PHASE_RESELECT | PHASE_MSGOUT))
+ printk("scsi%d : sent IDENTIFY message.\n", hostno);
+#endif
+ break;
+#ifdef LINKED
+ case LINKED_WRONG:
+ DATA = ABORT;
+ linked_connected = 0;
+ reselect = CAN_RECONNECT;
+ goto connect_loop;
+#if (DEBUG & (PHASE_MSGOUT | DEBUG_LINKED))
+ printk("scsi%d : sent ABORT message to cancel incorrect I_T_L nexus.\n", hostno);
+#endif
+#endif /* LINKED */
+#if (DEBUG & DEBUG_LINKED)
+ printk("correct\n");
+#endif
+ default:
+ DATA = NOP;
+ printk("scsi%d : target %d requested MSGOUT, sent NOP message.\n", hostno, target);
+ }
+ break;
+
+ case REQ_MSGIN :
+ switch (message = DATA) {
+ case DISCONNECT :
+ should_reconnect = 1;
+ current_data = data; /* WDE add */
+ current_buffer = buffer;
+ current_bufflen = len; /* WDE add */
+ current_nobuffs = nobuffs;
+#ifdef LINKED
+ linked_connected = 0;
+#endif
+ done=1;
+#if (DEBUG & (PHASE_RESELECT | PHASE_MSGIN))
+ printk("scsi%d : disconnected.\n", hostno);
+#endif
+ break;
+
+#ifdef LINKED
+ case LINKED_CMD_COMPLETE:
+ case LINKED_FLG_CMD_COMPLETE:
+#endif
+ case COMMAND_COMPLETE :
+/*
+ * Note : we should check for underflow here.
+ */
+#if (DEBUG & PHASE_MSGIN)
+ printk("scsi%d : command complete.\n", hostno);
+#endif
+ done = 1;
+ break;
+ case ABORT :
+#if (DEBUG & PHASE_MSGIN)
+ printk("scsi%d : abort message.\n", hostno);
+#endif
+ done=1;
+ break;
+ case SAVE_POINTERS :
+ current_buffer = buffer;
+ current_bufflen = len; /* WDE add */
+ current_data = data; /* WDE mod */
+ current_nobuffs = nobuffs;
+#if (DEBUG & PHASE_MSGIN)
+ printk("scsi%d : pointers saved.\n", hostno);
+#endif
+ break;
+ case RESTORE_POINTERS:
+ buffer=current_buffer;
+ cmnd=current_cmnd;
+ data=current_data; /* WDE mod */
+ len=current_bufflen;
+ nobuffs=current_nobuffs;
+#if (DEBUG & PHASE_MSGIN)
+ printk("scsi%d : pointers restored.\n", hostno);
+#endif
+ break;
+ default:
+
+/*
+ * IDENTIFY distinguishes itself from the other messages by setting the
+ * high byte.
+ *
+ * Note : we need to handle at least one outstanding command per LUN,
+ * and need to hash the SCSI command for that I_T_L nexus based on the
+ * known ID (at this point) and LUN.
+ */
+
+ if (message & 0x80) {
+#if (DEBUG & PHASE_MSGIN)
+ printk("scsi%d : IDENTIFY message received from id %d, lun %d.\n",
+ hostno, target, message & 7);
+#endif
+ } else {
+
+/*
+ * We should go into a MESSAGE OUT phase, and send a MESSAGE_REJECT
+ * if we run into a message that we don't like. The seagate driver
+ * needs some serious restructuring first though.
+ */
+
+#if (DEBUG & PHASE_MSGIN)
+ printk("scsi%d : unknown message %d from target %d.\n",
+ hostno, message, target);
+#endif
+ }
+ }
+ break;
+
+ default :
+ printk("scsi%d : unknown phase.\n", hostno);
+ st0x_aborted = DID_ERROR;
+ }
+
+#ifdef SLOW_HANDSHAKE
+/*
+ * I really don't care to deal with borken devices in each single
+ * byte transfer case (ie, message in, message out, status), so
+ * I'll do the wait here if necessary.
+ */
+ if (borken)
+ borken_wait();
+#endif
+
+ } /* if ends */
+ } /* while ends */
+
+#if (DEBUG & (PHASE_DATAIN | PHASE_DATAOUT | PHASE_EXIT))
+ printk("scsi%d : Transfered %d bytes\n", hostno, transfered);
+#endif
+
+#if (DEBUG & PHASE_EXIT)
+#if 0 /* Doesn't work for scatter / gather */
+ printk("Buffer : \n");
+ for (i = 0; i < 20; ++i)
+ printk ("%02x ", ((unsigned char *) data)[i]); /* WDE mod */
+ printk("\n");
+#endif
+ printk("scsi%d : status = ", hostno);
+ print_status(status);
+ printk("message = %02x\n", message);
+#endif
+
+
+/* We shouldn't reach this until *after* BSY has been deasserted */
+#ifdef notyet
+ if (st0x_aborted) {
+ if (STATUS & STAT_BSY) {
+ seagate_st0x_reset(NULL);
+ st0x_aborted = DID_RESET;
+ }
+ abort_confirm = 1;
+ }
+#endif
+
+#ifdef LINKED
+else {
+/*
+ * Fix the message byte so that unsuspecting high level drivers don't
+ * puke when they see a LINKED COMMAND message in place of the COMMAND
+ * COMPLETE they may be expecting. Shouldn't be necessary, but it's
+ * better to be on the safe side.
+ *
+ * A non LINKED* message byte will indicate that the command completed,
+ * and we are now disconnected.
+ */
+
+ switch (message) {
+ case LINKED_CMD_COMPLETE :
+ case LINKED_FLG_CMD_COMPLETE :
+ message = COMMAND_COMPLETE;
+ linked_target = current_target;
+ linked_lun = current_lun;
+ linked_connected = 1;
+#if (DEBUG & DEBUG_LINKED)
+ printk("scsi%d : keeping I_T_L nexus established for linked command.\n",
+ hostno);
+#endif
+/*
+ * We also will need to adjust status to accommodate intermediate conditions.
+ */
+ if ((status == INTERMEDIATE_GOOD) ||
+ (status == INTERMEDIATE_C_GOOD))
+ status = GOOD;
+
+ break;
+/*
+ * We should also handle what are "normal" termination messages
+ * here (ABORT, BUS_DEVICE_RESET?, and COMMAND_COMPLETE individually,
+ * and flake if things aren't right.
+ */
+
+ default :
+#if (DEBUG & DEBUG_LINKED)
+ printk("scsi%d : closing I_T_L nexus.\n", hostno);
+#endif
+ linked_connected = 0;
+ }
+ }
+#endif /* LINKED */
+
+
+
+
+ if (should_reconnect) {
+#if (DEBUG & PHASE_RESELECT)
+ printk("scsi%d : exiting seagate_st0x_queue_command() with reconnect enabled.\n",
+ hostno);
+#endif
+ CONTROL = BASE_CMD | CMD_INTR ;
+ } else
+ CONTROL = BASE_CMD;
+
+ return retcode (st0x_aborted);
+ }
+
+int seagate_st0x_abort (Scsi_Cmnd * SCpnt)
+ {
+ st0x_aborted = DID_ABORT;
+
+ return SCSI_ABORT_PENDING;
+ }
+
+/*
+ the seagate_st0x_reset function resets the SCSI bus
+*/
+
+int seagate_st0x_reset (Scsi_Cmnd * SCpnt, unsigned int reset_flags)
+ {
+ unsigned clock;
+ /*
+ No timeouts - this command is going to fail because
+ it was reset.
+ */
+
+#ifdef DEBUG
+ printk("In seagate_st0x_reset()\n");
+#endif
+
+
+ /* assert RESET signal on SCSI bus. */
+
+ CONTROL = BASE_CMD | CMD_RST;
+ clock=jiffies+2;
+
+
+ /* Wait. */
+
+ while (jiffies < clock);
+
+ CONTROL = BASE_CMD;
+
+ st0x_aborted = DID_RESET;
+
+#ifdef DEBUG
+ printk("SCSI bus reset.\n");
+#endif
+ return SCSI_RESET_WAKEUP;
+ }
+
+#include <asm/segment.h>
+#include "sd.h"
+#include <scsi/scsi_ioctl.h>
+
+int seagate_st0x_biosparam(Disk * disk, kdev_t dev, int* ip) {
+ unsigned char buf[256 + sizeof(int) * 2], cmd[6], *data, *page;
+ int *sizes, result, formatted_sectors, total_sectors;
+ int cylinders, heads, sectors;
+ int capacity;
+
+/*
+ * Only SCSI-I CCS drives and later implement the necessary mode sense
+ * pages.
+ */
+
+ if (disk->device->scsi_level < 2)
+ return -1;
+
+ sizes = (int *) buf;
+ data = (unsigned char *) (sizes + 2);
+
+ cmd[0] = MODE_SENSE;
+ cmd[1] = (disk->device->lun << 5) & 0xe5;
+ cmd[2] = 0x04; /* Read page 4, rigid disk geometry page current values */
+ cmd[3] = 0;
+ cmd[4] = 255;
+ cmd[5] = 0;
+
+/*
+ * We are transferring 0 bytes in the out direction, and expect to get back
+ * 24 bytes for each mode page.
+ */
+
+ sizes[0] = 0;
+ sizes[1] = 256;
+
+ memcpy (data, cmd, 6);
+
+ if (!(result = kernel_scsi_ioctl (disk->device, SCSI_IOCTL_SEND_COMMAND, (void *) buf))) {
+/*
+ * The mode page lies beyond the MODE SENSE header, with length 4, and
+ * the BLOCK DESCRIPTOR, with length header[3].
+ */
+
+ page = data + 4 + data[3];
+ heads = (int) page[5];
+ cylinders = (page[2] << 16) | (page[3] << 8) | page[4];
+
+ cmd[2] = 0x03; /* Read page 3, format page current values */
+ memcpy (data, cmd, 6);
+
+ if (!(result = kernel_scsi_ioctl (disk->device, SCSI_IOCTL_SEND_COMMAND, (void *) buf))) {
+ page = data + 4 + data[3];
+ sectors = (page[10] << 8) | page[11];
+
+
+/*
+ * Get the total number of formatted sectors from the block descriptor,
+ * so we can tell how many are being used for alternates.
+ */
+
+ formatted_sectors = (data[4 + 1] << 16) | (data[4 + 2] << 8) |
+ data[4 + 3] ;
+
+ total_sectors = (heads * cylinders * sectors);
+
+/*
+ * Adjust the real geometry by subtracting
+ * (spare sectors / (heads * tracks)) cylinders from the number of cylinders.
+ *
+ * It appears that the CE cylinder CAN be a partial cylinder.
+ */
+
+
+printk("scsi%d : heads = %d cylinders = %d sectors = %d total = %d formatted = %d\n",
+ hostno, heads, cylinders, sectors, total_sectors, formatted_sectors);
+
+ if (!heads || !sectors || !cylinders)
+ result = -1;
+ else
+ cylinders -= ((total_sectors - formatted_sectors) / (heads * sectors));
+
+/*
+ * Now, we need to do a sanity check on the geometry to see if it is
+ * BIOS compatible. The maximum BIOS geometry is 1024 cylinders *
+ * 256 heads * 64 sectors.
+ */
+
+ if ((cylinders > 1024) || (sectors > 64)) {
+ /* The Seagate's seem to have some mapping
+ * Multiple heads * sectors * cyl to get capacity
+ * Then start rounding down. */
+ capacity = heads * sectors * cylinders;
+ sectors = 17; /* Old MFM Drives use this, so does the Seagate */
+ heads = 2;
+ capacity = capacity / sectors;
+ while (cylinders > 1024)
+ {
+ heads *= 2; /* For some reason, they go in multiples */
+ cylinders = capacity / heads;
+ }
+ }
+ ip[0] = heads;
+ ip[1] = sectors;
+ ip[2] = cylinders;
+
+/*
+ * There should be an alternate mapping for things the seagate doesn't
+ * understand, but I couldn't say what it is with reasonable certainty.
+ */
+
+ }
+ }
+
+ return result;
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = SEAGATE_ST0X;
+
+#include "scsi_module.c"
+#endif
diff --git a/linux/src/drivers/scsi/seagate.h b/linux/src/drivers/scsi/seagate.h
new file mode 100644
index 0000000..da18dbe
--- /dev/null
+++ b/linux/src/drivers/scsi/seagate.h
@@ -0,0 +1,139 @@
+/*
+ * seagate.h Copyright (C) 1992 Drew Eckhardt
+ * low level scsi driver header for ST01/ST02 by
+ * Drew Eckhardt
+ *
+ * <drew@colorado.edu>
+ */
+
+#ifndef _SEAGATE_H
+ #define SEAGATE_H
+/*
+ $Header
+*/
+#ifndef ASM
+int seagate_st0x_detect(Scsi_Host_Template *);
+int seagate_st0x_command(Scsi_Cmnd *);
+int seagate_st0x_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+
+int seagate_st0x_abort(Scsi_Cmnd *);
+const char *seagate_st0x_info(struct Scsi_Host *);
+int seagate_st0x_reset(Scsi_Cmnd *, unsigned int);
+int seagate_st0x_proc_info(char *,char **,off_t,int,int,int);
+
+#ifndef NULL
+ #define NULL 0
+#endif
+
+#include <linux/kdev_t.h>
+int seagate_st0x_biosparam(Disk *, kdev_t, int*);
+
+#define SEAGATE_ST0X { NULL, NULL, NULL, seagate_st0x_proc_info, \
+ NULL, seagate_st0x_detect, \
+ NULL, \
+ seagate_st0x_info, seagate_st0x_command, \
+ seagate_st0x_queue_command, seagate_st0x_abort, \
+ seagate_st0x_reset, NULL, seagate_st0x_biosparam, \
+ 1, 7, SG_ALL, 1, 0, 0, DISABLE_CLUSTERING}
+#endif
+
+
+/*
+ defining PARITY causes parity data to be checked
+*/
+
+#define PARITY
+
+
+/*
+ Thanks to Brian Antoine for the example code in his Messy-Loss ST-01
+ driver, and Mitsugu Suzuki for information on the ST-01
+ SCSI host.
+*/
+
+/*
+ CONTROL defines
+*/
+
+#define CMD_RST 0x01
+#define CMD_SEL 0x02
+#define CMD_BSY 0x04
+#define CMD_ATTN 0x08
+#define CMD_START_ARB 0x10
+#define CMD_EN_PARITY 0x20
+#define CMD_INTR 0x40
+#define CMD_DRVR_ENABLE 0x80
+
+/*
+ STATUS
+*/
+
+#define STAT_BSY 0x01
+#define STAT_MSG 0x02
+#define STAT_IO 0x04
+#define STAT_CD 0x08
+#define STAT_REQ 0x10
+#define STAT_SEL 0x20
+#define STAT_PARITY 0x40
+#define STAT_ARB_CMPL 0x80
+
+/*
+ REQUESTS
+*/
+
+#define REQ_MASK (STAT_CD | STAT_IO | STAT_MSG)
+#define REQ_DATAOUT 0
+#define REQ_DATAIN STAT_IO
+#define REQ_CMDOUT STAT_CD
+#define REQ_STATIN (STAT_CD | STAT_IO)
+#define REQ_MSGOUT (STAT_MSG | STAT_CD)
+#define REQ_MSGIN (STAT_MSG | STAT_CD | STAT_IO)
+
+extern volatile int seagate_st0x_timeout;
+
+#ifdef PARITY
+ #define BASE_CMD CMD_EN_PARITY
+#else
+ #define BASE_CMD 0
+#endif
+
+/*
+ Debugging code
+*/
+
+#define PHASE_BUS_FREE 1
+#define PHASE_ARBITRATION 2
+#define PHASE_SELECTION 4
+#define PHASE_DATAIN 8
+#define PHASE_DATAOUT 0x10
+#define PHASE_CMDOUT 0x20
+#define PHASE_MSGIN 0x40
+#define PHASE_MSGOUT 0x80
+#define PHASE_STATUSIN 0x100
+#define PHASE_ETC (PHASE_DATAIN | PHASE_DATA_OUT | PHASE_CMDOUT | PHASE_MSGIN | PHASE_MSGOUT | PHASE_STATUSIN)
+#define PRINT_COMMAND 0x200
+#define PHASE_EXIT 0x400
+#define PHASE_RESELECT 0x800
+#define DEBUG_FAST 0x1000
+#define DEBUG_SG 0x2000
+#define DEBUG_LINKED 0x4000
+#define DEBUG_BORKEN 0x8000
+
+/*
+ * Control options - these are timeouts specified in .01 seconds.
+ */
+
+/* 30, 20 work */
+#define ST0X_BUS_FREE_DELAY 25
+#define ST0X_SELECTION_DELAY 25
+
+#define eoi() __asm__("push %%eax\nmovb $0x20, %%al\noutb %%al, $0x20\npop %%eax"::)
+
+#define SEAGATE 1 /* these determine the type of the controller */
+#define FD 2
+
+#define ST0X_ID_STR "Seagate ST-01/ST-02"
+#define FD_ID_STR "TMC-8XX/TMC-950"
+
+#endif
+
diff --git a/linux/src/drivers/scsi/sr.c b/linux/src/drivers/scsi/sr.c
new file mode 100644
index 0000000..be64fb1
--- /dev/null
+++ b/linux/src/drivers/scsi/sr.c
@@ -0,0 +1,1290 @@
+/*
+ * sr.c Copyright (C) 1992 David Giller
+ * Copyright (C) 1993, 1994, 1995 Eric Youngdale
+ *
+ * adapted from:
+ * sd.c Copyright (C) 1992 Drew Eckhardt
+ * Linux scsi disk driver by
+ * Drew Eckhardt <drew@colorado.edu>
+ *
+ * Modified by Eric Youngdale ericy@cais.com to
+ * add scatter-gather, multiple outstanding request, and other
+ * enhancements.
+ *
+ * Modified by Eric Youngdale eric@aib.com to support loadable
+ * low-level scsi drivers.
+ *
+ * Modified by Thomas Quinot thomas@melchior.cuivre.fdn.fr to
+ * provide auto-eject.
+ *
+ */
+
+#include <linux/module.h>
+
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/cdrom.h>
+#include <linux/interrupt.h>
+#include <asm/system.h>
+
+#define MAJOR_NR SCSI_CDROM_MAJOR
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sr.h"
+#include <scsi/scsi_ioctl.h> /* For the door lock/unlock commands */
+#include "constants.h"
+
+#define MAX_RETRIES 3
+#define SR_TIMEOUT (30 * HZ)
+
+static int sr_init(void);
+static void sr_finish(void);
+static int sr_attach(Scsi_Device *);
+static int sr_detect(Scsi_Device *);
+static void sr_detach(Scsi_Device *);
+
+struct Scsi_Device_Template sr_template = {NULL, "cdrom", "sr", NULL, TYPE_ROM,
+ SCSI_CDROM_MAJOR, 0, 0, 0, 1,
+ sr_detect, sr_init,
+ sr_finish, sr_attach, sr_detach};
+
+Scsi_CD * scsi_CDs = NULL;
+static int * sr_sizes;
+
+static int * sr_blocksizes;
+
+static int sr_open(struct inode *, struct file *);
+void get_sectorsize(int);
+void sr_photocd(struct inode *);
+
+extern int sr_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
+
+void requeue_sr_request (Scsi_Cmnd * SCpnt);
+static int check_cdrom_media_change(kdev_t);
+
+static void sr_release(struct inode * inode, struct file * file)
+{
+ sync_dev(inode->i_rdev);
+ if(! --scsi_CDs[MINOR(inode->i_rdev)].device->access_count)
+ {
+ sr_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
+ if (scsi_CDs[MINOR(inode->i_rdev)].auto_eject)
+ sr_ioctl(inode, NULL, CDROMEJECT, 0);
+ }
+ if (scsi_CDs[MINOR(inode->i_rdev)].device->host->hostt->usage_count)
+ (*scsi_CDs[MINOR(inode->i_rdev)].device->host->hostt->usage_count)--;
+ if(sr_template.usage_count) (*sr_template.usage_count)--;
+}
+
+static struct file_operations sr_fops =
+{
+ NULL, /* lseek - default */
+ block_read, /* read - general block-dev read */
+ block_write, /* write - general block-dev write */
+ NULL, /* readdir - bad */
+ NULL, /* select */
+ sr_ioctl, /* ioctl */
+ NULL, /* mmap */
+ sr_open, /* special open code */
+ sr_release, /* release */
+ NULL, /* fsync */
+ NULL, /* fasync */
+ check_cdrom_media_change, /* Disk change */
+ NULL /* revalidate */
+};
+
+/*
+ * This function checks to see if the media has been changed in the
+ * CDROM drive. It is possible that we have already sensed a change,
+ * or the drive may have sensed one and not yet reported it. We must
+ * be ready for either case. This function always reports the current
+ * value of the changed bit. If flag is 0, then the changed bit is reset.
+ * This function could be done as an ioctl, but we would need to have
+ * an inode for that to work, and we do not always have one.
+ */
+
+int check_cdrom_media_change(kdev_t full_dev){
+ int retval, target;
+ struct inode inode;
+ int flag = 0;
+
+ target = MINOR(full_dev);
+
+ if (target >= sr_template.nr_dev) {
+ printk("CD-ROM request error: invalid device.\n");
+ return 0;
+ };
+
+ inode.i_rdev = full_dev; /* This is all we really need here */
+ retval = sr_ioctl(&inode, NULL, SCSI_IOCTL_TEST_UNIT_READY, 0);
+
+ if(retval){ /* Unable to test, unit probably not ready. This usually
+ * means there is no disc in the drive. Mark as changed,
+ * and we will figure it out later once the drive is
+ * available again. */
+
+ scsi_CDs[target].device->changed = 1;
+ return 1; /* This will force a flush, if called from
+ * check_disk_change */
+ };
+
+ retval = scsi_CDs[target].device->changed;
+ if(!flag) {
+ scsi_CDs[target].device->changed = 0;
+ /* If the disk changed, the capacity will now be different,
+ * so we force a re-read of this information */
+ if (retval) scsi_CDs[target].needs_sector_size = 1;
+ };
+ return retval;
+}
+
+/*
+ * rw_intr is the interrupt routine for the device driver. It will be notified on the
+ * end of a SCSI read / write, and will take on of several actions based on success or failure.
+ */
+
+static void rw_intr (Scsi_Cmnd * SCpnt)
+{
+ int result = SCpnt->result;
+ int this_count = SCpnt->this_count;
+ int good_sectors = (result == 0 ? this_count : 0);
+ int block_sectors = 0;
+
+#ifdef DEBUG
+ printk("sr.c done: %x %x\n",result, SCpnt->request.bh->b_data);
+#endif
+ /*
+ Handle MEDIUM ERRORs or VOLUME OVERFLOWs that indicate partial success.
+ Since this is a relatively rare error condition, no care is taken to
+ avoid unnecessary additional work such as memcpy's that could be avoided.
+ */
+
+ if (driver_byte(result) != 0 && /* An error occurred */
+ SCpnt->sense_buffer[0] == 0xF0 && /* Sense data is valid */
+ (SCpnt->sense_buffer[2] == MEDIUM_ERROR ||
+ SCpnt->sense_buffer[2] == VOLUME_OVERFLOW ||
+ SCpnt->sense_buffer[2] == ILLEGAL_REQUEST))
+ {
+ long error_sector = (SCpnt->sense_buffer[3] << 24) |
+ (SCpnt->sense_buffer[4] << 16) |
+ (SCpnt->sense_buffer[5] << 8) |
+ SCpnt->sense_buffer[6];
+ int device_nr = DEVICE_NR(SCpnt->request.rq_dev);
+ if (SCpnt->request.bh != NULL)
+ block_sectors = SCpnt->request.bh->b_size >> 9;
+ if (block_sectors < 4) block_sectors = 4;
+ if (scsi_CDs[device_nr].sector_size == 2048)
+ error_sector <<= 2;
+ error_sector &= ~ (block_sectors - 1);
+ good_sectors = error_sector - SCpnt->request.sector;
+ if (good_sectors < 0 || good_sectors >= this_count)
+ good_sectors = 0;
+ /*
+ The SCSI specification allows for the value returned by READ
+ CAPACITY to be up to 75 2K sectors past the last readable
+ block. Therefore, if we hit a medium error within the last
+ 75 2K sectors, we decrease the saved size value.
+ */
+ if ((error_sector >> 1) < sr_sizes[device_nr] &&
+ scsi_CDs[device_nr].capacity - error_sector < 4*75)
+ sr_sizes[device_nr] = error_sector >> 1;
+ }
+
+ if (good_sectors > 0)
+ { /* Some sectors were read successfully. */
+ if (SCpnt->use_sg == 0) {
+ if (SCpnt->buffer != SCpnt->request.buffer)
+ {
+ int offset;
+ offset = (SCpnt->request.sector % 4) << 9;
+ memcpy((char *)SCpnt->request.buffer,
+ (char *)SCpnt->buffer + offset,
+ good_sectors << 9);
+ /* Even though we are not using scatter-gather, we look
+ * ahead and see if there is a linked request for the
+ * other half of this buffer. If there is, then satisfy
+ * it. */
+ if((offset == 0) && good_sectors == 2 &&
+ SCpnt->request.nr_sectors > good_sectors &&
+ SCpnt->request.bh &&
+ SCpnt->request.bh->b_reqnext &&
+ SCpnt->request.bh->b_reqnext->b_size == 1024) {
+ memcpy((char *)SCpnt->request.bh->b_reqnext->b_data,
+ (char *)SCpnt->buffer + 1024,
+ 1024);
+ good_sectors += 2;
+ };
+
+ scsi_free(SCpnt->buffer, 2048);
+ }
+ } else {
+ struct scatterlist * sgpnt;
+ int i;
+ sgpnt = (struct scatterlist *) SCpnt->buffer;
+ for(i=0; i<SCpnt->use_sg; i++) {
+ if (sgpnt[i].alt_address) {
+ if (sgpnt[i].alt_address != sgpnt[i].address) {
+ memcpy(sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
+ };
+ scsi_free(sgpnt[i].address, sgpnt[i].length);
+ };
+ };
+ scsi_free(SCpnt->buffer, SCpnt->sglist_len); /* Free list of scatter-gather pointers */
+ if(SCpnt->request.sector % 4) good_sectors -= 2;
+ /* See if there is a padding record at the end that needs to be removed */
+ if(good_sectors > SCpnt->request.nr_sectors)
+ good_sectors -= 2;
+ };
+
+#ifdef DEBUG
+ printk("(%x %x %x) ",SCpnt->request.bh, SCpnt->request.nr_sectors,
+ good_sectors);
+#endif
+ if (SCpnt->request.nr_sectors > this_count)
+ {
+ SCpnt->request.errors = 0;
+ if (!SCpnt->request.bh)
+ panic("sr.c: linked page request (%lx %x)",
+ SCpnt->request.sector, this_count);
+ }
+
+ SCpnt = end_scsi_request(SCpnt, 1, good_sectors); /* All done */
+ if (result == 0)
+ {
+ requeue_sr_request(SCpnt);
+ return;
+ }
+ }
+
+ if (good_sectors == 0) {
+ /* We only come through here if no sectors were read successfully. */
+
+ /* Free up any indirection buffers we allocated for DMA purposes. */
+ if (SCpnt->use_sg) {
+ struct scatterlist * sgpnt;
+ int i;
+ sgpnt = (struct scatterlist *) SCpnt->buffer;
+ for(i=0; i<SCpnt->use_sg; i++) {
+ if (sgpnt[i].alt_address) {
+ scsi_free(sgpnt[i].address, sgpnt[i].length);
+ }
+ }
+ scsi_free(SCpnt->buffer, SCpnt->sglist_len); /* Free list of scatter-gather pointers */
+ } else {
+ if (SCpnt->buffer != SCpnt->request.buffer)
+ scsi_free(SCpnt->buffer, SCpnt->bufflen);
+ }
+
+ }
+
+ if (driver_byte(result) != 0) {
+ if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
+ if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
+ /* detected disc change. set a bit and quietly refuse
+ * further access. */
+
+ scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].device->changed = 1;
+ SCpnt = end_scsi_request(SCpnt, 0, this_count);
+ requeue_sr_request(SCpnt);
+ return;
+ }
+ }
+
+ if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
+ printk("CD-ROM error: ");
+ print_sense("sr", SCpnt);
+ printk("command was: ");
+ print_command(SCpnt->cmnd);
+ if (scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].ten) {
+ scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].ten = 0;
+ requeue_sr_request(SCpnt);
+ result = 0;
+ return;
+ } else {
+ SCpnt = end_scsi_request(SCpnt, 0, this_count);
+ requeue_sr_request(SCpnt); /* Do next request */
+ return;
+ }
+
+ }
+
+ if (SCpnt->sense_buffer[2] == NOT_READY) {
+ printk("CD-ROM not ready. Make sure you have a disc in the drive.\n");
+ SCpnt = end_scsi_request(SCpnt, 0, this_count);
+ requeue_sr_request(SCpnt); /* Do next request */
+ return;
+ }
+
+ if (SCpnt->sense_buffer[2] == MEDIUM_ERROR) {
+ printk("scsi%d: MEDIUM ERROR on "
+ "channel %d, id %d, lun %d, CDB: ",
+ SCpnt->host->host_no, (int) SCpnt->channel,
+ (int) SCpnt->target, (int) SCpnt->lun);
+ print_command(SCpnt->cmnd);
+ print_sense("sr", SCpnt);
+ SCpnt = end_scsi_request(SCpnt, 0, block_sectors);
+ requeue_sr_request(SCpnt);
+ return;
+ }
+
+ if (SCpnt->sense_buffer[2] == VOLUME_OVERFLOW) {
+ printk("scsi%d: VOLUME OVERFLOW on "
+ "channel %d, id %d, lun %d, CDB: ",
+ SCpnt->host->host_no, (int) SCpnt->channel,
+ (int) SCpnt->target, (int) SCpnt->lun);
+ print_command(SCpnt->cmnd);
+ print_sense("sr", SCpnt);
+ SCpnt = end_scsi_request(SCpnt, 0, block_sectors);
+ requeue_sr_request(SCpnt);
+ return;
+ }
+ }
+
+ /* We only get this far if we have an error we have not recognized */
+ if(result) {
+ printk("SCSI CD error : host %d id %d lun %d return code = %03x\n",
+ scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].device->host->host_no,
+ scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].device->id,
+ scsi_CDs[DEVICE_NR(SCpnt->request.rq_dev)].device->lun,
+ result);
+
+ if (status_byte(result) == CHECK_CONDITION)
+ print_sense("sr", SCpnt);
+
+ SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
+ requeue_sr_request(SCpnt);
+ }
+}
+
+/*
+ * Here I tried to implement support for multisession-CD's
+ *
+ * Much of this has do be done with vendor-specific SCSI-commands, because
+ * multisession is newer than the SCSI-II standard.
+ * So I have to complete it step by step. Useful information is welcome.
+ *
+ * Actually works:
+ * - NEC: Detection and support of multisession CD's. Special handling
+ * for XA-disks is not necessary.
+ *
+ * - TOSHIBA: setting density is done here now, mounting PhotoCD's should
+ * work now without running the program "set_density"
+ * Multisession CD's are supported too.
+ *
+ * Gerd Knorr <kraxel@cs.tu-berlin.de>
+ */
+/*
+ * 19950704 operator@melchior.cuivre.fdn.fr (Thomas Quinot)
+ *
+ * - SONY: Same as Nec.
+ *
+ * - PIONEER: works with SONY code (may be others too ?)
+ */
+
+void sr_photocd(struct inode *inode)
+{
+ unsigned long sector,min,sec,frame;
+ unsigned char buf[40]; /* the buffer for the ioctl */
+ unsigned char *cmd; /* the scsi-command */
+ unsigned char *send; /* the data we send to the drive ... */
+ unsigned char *rec; /* ... and get back */
+ int rc,is_xa,no_multi;
+
+ if (scsi_CDs[MINOR(inode->i_rdev)].xa_flags & 0x02) {
+#ifdef DEBUG
+ printk(KERN_DEBUG "sr_photocd: CDROM and/or driver do not support multisession CD's");
+#endif
+ return;
+ }
+
+ if (!suser()) {
+ /* I'm not the superuser, so SCSI_IOCTL_SEND_COMMAND isn't allowed
+ * for me. That's why mpcd_sector will be initialized with zero,
+ * because I'm not able to get the right value. Necessary only if
+ * access_count is 1, else no disk change happened since the last
+ * call of this function and we can keep the old value.
+ */
+ if (1 == scsi_CDs[MINOR(inode->i_rdev)].device->access_count) {
+ scsi_CDs[MINOR(inode->i_rdev)].mpcd_sector = 0;
+ scsi_CDs[MINOR(inode->i_rdev)].xa_flags &= ~0x01;
+ }
+ return;
+ }
+
+ sector = 0;
+ is_xa = 0;
+ no_multi = 0;
+ cmd = rec = &buf[8];
+
+ switch(scsi_CDs[MINOR(inode->i_rdev)].device->manufacturer) {
+
+ case SCSI_MAN_NEC:
+#ifdef DEBUG
+ printk(KERN_DEBUG "sr_photocd: use NEC code\n");
+#endif
+ memset(buf,0,40);
+ *((unsigned int*)buf) = 0x0; /* we send nothing... */
+ *((unsigned int*)buf+1) = 0x16; /* and receive 0x16 bytes */
+ cmd[0] = 0xde;
+ cmd[1] = 0x03;
+ cmd[2] = 0xb0;
+ rc = kernel_scsi_ioctl(scsi_CDs[MINOR(inode->i_rdev)].device,
+ SCSI_IOCTL_SEND_COMMAND, buf);
+ if (rc != 0) {
+ if (rc != 0x28000002) /* drop "not ready" */
+ printk(KERN_WARNING"sr_photocd: ioctl error (NEC): 0x%x\n",rc);
+ break;
+ }
+ if (rec[14] != 0 && rec[14] != 0xb0) {
+ printk(KERN_INFO"sr_photocd: (NEC) Hmm, seems the CDROM doesn't support multisession CD's\n");
+ no_multi = 1;
+ break;
+ }
+ min = (unsigned long) rec[15]/16*10 + (unsigned long) rec[15]%16;
+ sec = (unsigned long) rec[16]/16*10 + (unsigned long) rec[16]%16;
+ frame = (unsigned long) rec[17]/16*10 + (unsigned long) rec[17]%16;
+ sector = min*CD_SECS*CD_FRAMES + sec*CD_FRAMES + frame;
+ is_xa = (rec[14] == 0xb0);
+#ifdef DEBUG
+ if (sector) {
+ printk(KERN_DEBUG "sr_photocd: multisession CD detected. start: %lu\n",sector);
+ }
+#endif
+ break;
+
+ case SCSI_MAN_TOSHIBA:
+#ifdef DEBUG
+ printk(KERN_DEBUG "sr_photocd: use TOSHIBA code\n");
+#endif
+
+ /* we request some disc information (is it a XA-CD ?,
+ * where starts the last session ?) */
+ memset(buf,0,40);
+ *((unsigned int*)buf) = (unsigned int) 0;
+ *((unsigned int*)buf+1) = (unsigned int) 4; /* receive 4 bytes */
+ cmd[0] = (unsigned char) 0x00c7;
+ cmd[1] = (unsigned char) 3;
+ rc = kernel_scsi_ioctl(scsi_CDs[MINOR(inode->i_rdev)].device,
+ SCSI_IOCTL_SEND_COMMAND, buf);
+ if (rc != 0) {
+ if (rc == 0x28000002) {
+ /* Got a "not ready" - error. No chance to find out if this is
+ * because there is no CD in the drive or because the drive
+ * don't knows multisession CD's. So I need to do an extra
+ * check... */
+ if (!kernel_scsi_ioctl(scsi_CDs[MINOR(inode->i_rdev)].device,
+ SCSI_IOCTL_TEST_UNIT_READY, NULL)) {
+ printk(KERN_INFO "sr_photocd: (TOSHIBA) Hmm, seems the CDROM doesn't support multisession CD's\n");
+ no_multi = 1;
+ }
+ } else
+ printk(KERN_INFO"sr_photocd: ioctl error (TOSHIBA #1): 0x%x\n",rc);
+ break; /* if the first ioctl fails, we don't call the second one */
+ }
+ is_xa = (rec[0] == 0x20);
+ min = (unsigned long) rec[1]/16*10 + (unsigned long) rec[1]%16;
+ sec = (unsigned long) rec[2]/16*10 + (unsigned long) rec[2]%16;
+ frame = (unsigned long) rec[3]/16*10 + (unsigned long) rec[3]%16;
+ sector = min*CD_SECS*CD_FRAMES + sec*CD_FRAMES + frame;
+ if (sector) {
+ sector -= CD_BLOCK_OFFSET;
+#ifdef DEBUG
+ printk(KERN_DEBUG "sr_photocd: multisession CD detected: start: %lu\n",sector);
+#endif
+ }
+
+ /* now we do a get_density... */
+ memset(buf,0,40);
+ *((unsigned int*)buf) = (unsigned int) 0;
+ *((unsigned int*)buf+1) = (unsigned int) 12;
+ cmd[0] = (unsigned char) MODE_SENSE;
+ cmd[2] = (unsigned char) 1;
+ cmd[4] = (unsigned char) 12;
+ rc = kernel_scsi_ioctl(scsi_CDs[MINOR(inode->i_rdev)].device,
+ SCSI_IOCTL_SEND_COMMAND, buf);
+ if (rc != 0) {
+ printk(KERN_WARNING "sr_photocd: ioctl error (TOSHIBA #2): 0x%x\n",rc);
+ break;
+ }
+#ifdef DEBUG
+ printk(KERN_DEBUG "sr_photocd: get_density: 0x%x\n",rec[4]);
+#endif
+
+ /* ...and only if necessary a set_density */
+ if ((rec[4] != 0x81 && is_xa) || (rec[4] != 0 && !is_xa)) {
+#ifdef DEBUG
+ printk(KERN_DEBUG "sr_photocd: doing set_density\n");
+#endif
+ memset(buf,0,40);
+ *((unsigned int*)buf) = (unsigned int) 12; /* send 12 bytes */
+ *((unsigned int*)buf+1) = (unsigned int) 0;
+ cmd[0] = (unsigned char) MODE_SELECT;
+ cmd[1] = (unsigned char) (1 << 4);
+ cmd[4] = (unsigned char) 12;
+ send = &cmd[6]; /* this is a 6-Byte command */
+ send[ 3] = (unsigned char) 0x08; /* data for cmd */
+ /* density 0x81 for XA, 0 else */
+ send[ 4] = (is_xa) ?
+ (unsigned char) 0x81 : (unsigned char) 0;
+ send[10] = (unsigned char) 0x08;
+ rc = kernel_scsi_ioctl(scsi_CDs[MINOR(inode->i_rdev)].device,
+ SCSI_IOCTL_SEND_COMMAND, buf);
+ if (rc != 0) {
+ printk(KERN_WARNING "sr_photocd: ioctl error (TOSHIBA #3): 0x%x\n",rc);
+ }
+ /* The set_density command may have changed the
+ * sector size or capacity. */
+ scsi_CDs[MINOR(inode->i_rdev)].needs_sector_size = 1;
+ }
+ break;
+
+ case SCSI_MAN_SONY: /* Thomas QUINOT <thomas@melchior.cuivre.fdn.fr> */
+ case SCSI_MAN_PIONEER:
+ case SCSI_MAN_UNKNOWN:
+#ifdef DEBUG
+ printk(KERN_DEBUG "sr_photocd: use SONY/PIONEER code\n");
+#endif
+ get_sectorsize(MINOR(inode->i_rdev)); /* spinup (avoid timeout) */
+ memset(buf,0,40);
+ *((unsigned int*)buf) = 0x0; /* we send nothing... */
+ *((unsigned int*)buf+1) = 0x0c; /* and receive 0x0c bytes */
+ cmd[0] = READ_TOC;
+ cmd[8] = 0x0c;
+ cmd[9] = 0x40;
+ rc = kernel_scsi_ioctl(scsi_CDs[MINOR(inode->i_rdev)].device,
+ SCSI_IOCTL_SEND_COMMAND, buf);
+
+ if (rc != 0) {
+ if (rc != 0x28000002) /* drop "not ready" */
+ printk(KERN_WARNING "sr_photocd: ioctl error (SONY/PIONEER): 0x%x\n",rc);
+ break;
+ }
+ if ((rec[0] << 8) + rec[1] < 0x0a) {
+ printk(KERN_INFO "sr_photocd: (SONY/PIONEER) Hmm, seems the CDROM doesn't support multisession CD's\n");
+ no_multi = 1;
+ break;
+ }
+ sector = rec[11] + (rec[10] << 8) + (rec[9] << 16) + (rec[8] << 24);
+ is_xa = !!sector;
+#ifdef DEBUG
+ if (sector)
+ printk (KERN_DEBUG "sr_photocd: multisession CD detected. start: %lu\n",sector);
+#endif
+ break;
+
+ case SCSI_MAN_NEC_OLDCDR:
+ default:
+ sector = 0;
+ no_multi = 1;
+ break; }
+
+ scsi_CDs[MINOR(inode->i_rdev)].mpcd_sector = sector;
+ if (is_xa)
+ scsi_CDs[MINOR(inode->i_rdev)].xa_flags |= 0x01;
+ else
+ scsi_CDs[MINOR(inode->i_rdev)].xa_flags &= ~0x01;
+ if (no_multi)
+ scsi_CDs[MINOR(inode->i_rdev)].xa_flags |= 0x02;
+ return;
+}
+
+static int sr_open(struct inode * inode, struct file * filp)
+{
+ if(MINOR(inode->i_rdev) >= sr_template.nr_dev ||
+ !scsi_CDs[MINOR(inode->i_rdev)].device) return -ENXIO; /* No such device */
+
+ if (filp->f_mode & 2)
+ return -EROFS;
+
+ if(sr_template.usage_count) (*sr_template.usage_count)++;
+
+ sr_ioctl(inode,filp,CDROMCLOSETRAY,0);
+ check_disk_change(inode->i_rdev);
+
+ if(!scsi_CDs[MINOR(inode->i_rdev)].device->access_count++)
+ sr_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
+ if (scsi_CDs[MINOR(inode->i_rdev)].device->host->hostt->usage_count)
+ (*scsi_CDs[MINOR(inode->i_rdev)].device->host->hostt->usage_count)++;
+
+ sr_photocd(inode);
+
+ /* If this device did not have media in the drive at boot time, then
+ * we would have been unable to get the sector size. Check to see if
+ * this is the case, and try again.
+ */
+
+ if(scsi_CDs[MINOR(inode->i_rdev)].needs_sector_size)
+ get_sectorsize(MINOR(inode->i_rdev));
+
+ return 0;
+}
+
+
+/*
+ * do_sr_request() is the request handler function for the sr driver.
+ * Its function in life is to take block device requests, and
+ * translate them to SCSI commands.
+ */
+
+static void do_sr_request (void)
+{
+ Scsi_Cmnd * SCpnt = NULL;
+ struct request * req = NULL;
+ Scsi_Device * SDev;
+ unsigned long flags;
+ int flag = 0;
+
+ while (1==1){
+ save_flags(flags);
+ cli();
+ if (CURRENT != NULL && CURRENT->rq_status == RQ_INACTIVE) {
+ restore_flags(flags);
+ return;
+ };
+
+ INIT_SCSI_REQUEST;
+
+ SDev = scsi_CDs[DEVICE_NR(CURRENT->rq_dev)].device;
+
+ /*
+ * I am not sure where the best place to do this is. We need
+ * to hook in a place where we are likely to come if in user
+ * space.
+ */
+ if( SDev->was_reset )
+ {
+ /*
+ * We need to relock the door, but we might
+ * be in an interrupt handler. Only do this
+ * from user space, since we do not want to
+ * sleep from an interrupt.
+ */
+ if( SDev->removable && !intr_count )
+ {
+ scsi_ioctl(SDev, SCSI_IOCTL_DOORLOCK, 0);
+ }
+ SDev->was_reset = 0;
+ }
+
+ if (flag++ == 0)
+ SCpnt = allocate_device(&CURRENT,
+ scsi_CDs[DEVICE_NR(CURRENT->rq_dev)].device, 0);
+ else SCpnt = NULL;
+ restore_flags(flags);
+
+ /* This is a performance enhancement. We dig down into the request list and
+ * try to find a queueable request (i.e. device not busy, and host able to
+ * accept another command. If we find one, then we queue it. This can
+ * make a big difference on systems with more than one disk drive. We want
+ * to have the interrupts off when monkeying with the request list, because
+ * otherwise the kernel might try to slip in a request in between somewhere. */
+
+ if (!SCpnt && sr_template.nr_dev > 1){
+ struct request *req1;
+ req1 = NULL;
+ save_flags(flags);
+ cli();
+ req = CURRENT;
+ while(req){
+ SCpnt = request_queueable(req,
+ scsi_CDs[DEVICE_NR(req->rq_dev)].device);
+ if(SCpnt) break;
+ req1 = req;
+ req = req->next;
+ };
+ if (SCpnt && req->rq_status == RQ_INACTIVE) {
+ if (req == CURRENT)
+ CURRENT = CURRENT->next;
+ else
+ req1->next = req->next;
+ };
+ restore_flags(flags);
+ };
+
+ if (!SCpnt)
+ return; /* Could not find anything to do */
+
+ wake_up(&wait_for_request);
+
+ /* Queue command */
+ requeue_sr_request(SCpnt);
+ }; /* While */
+}
+
+void requeue_sr_request (Scsi_Cmnd * SCpnt)
+{
+ unsigned int dev, block, realcount;
+ unsigned char cmd[12], *buffer, tries;
+ int this_count, start, end_rec;
+
+ tries = 2;
+
+ repeat:
+ if(!SCpnt || SCpnt->request.rq_status == RQ_INACTIVE) {
+ do_sr_request();
+ return;
+ }
+
+ dev = MINOR(SCpnt->request.rq_dev);
+ block = SCpnt->request.sector;
+ buffer = NULL;
+ this_count = 0;
+
+ if (dev >= sr_template.nr_dev) {
+ /* printk("CD-ROM request error: invalid device.\n"); */
+ SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ tries = 2;
+ goto repeat;
+ }
+
+ if (!scsi_CDs[dev].use) {
+ /* printk("CD-ROM request error: device marked not in use.\n"); */
+ SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ tries = 2;
+ goto repeat;
+ }
+
+ if (scsi_CDs[dev].device->changed) {
+ /*
+ * quietly refuse to do anything to a changed disc
+ * until the changed bit has been reset
+ */
+ /* printk("CD-ROM has been changed. Prohibiting further I/O.\n"); */
+ SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ tries = 2;
+ goto repeat;
+ }
+
+ switch (SCpnt->request.cmd)
+ {
+ case WRITE:
+ SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
+ goto repeat;
+ break;
+ case READ :
+ cmd[0] = READ_6;
+ break;
+ default :
+ panic ("Unknown sr command %d\n", SCpnt->request.cmd);
+ }
+
+ cmd[1] = (SCpnt->lun << 5) & 0xe0;
+
+ /*
+ * Now do the grungy work of figuring out which sectors we need, and
+ * where in memory we are going to put them.
+ *
+ * The variables we need are:
+ *
+ * this_count= number of 512 byte sectors being read
+ * block = starting cdrom sector to read.
+ * realcount = # of cdrom sectors to read
+ *
+ * The major difference between a scsi disk and a scsi cdrom
+ * is that we will always use scatter-gather if we can, because we can
+ * work around the fact that the buffer cache has a block size of 1024,
+ * and we have 2048 byte sectors. This code should work for buffers that
+ * are any multiple of 512 bytes long.
+ */
+
+ SCpnt->use_sg = 0;
+
+ if (SCpnt->host->sg_tablesize > 0 &&
+ (!need_isa_buffer ||
+ dma_free_sectors >= 10)) {
+ struct buffer_head * bh;
+ struct scatterlist * sgpnt;
+ int count, this_count_max;
+ bh = SCpnt->request.bh;
+ this_count = 0;
+ count = 0;
+ this_count_max = (scsi_CDs[dev].ten ? 0xffff : 0xff) << 4;
+ /* Calculate how many links we can use. First see if we need
+ * a padding record at the start */
+ this_count = SCpnt->request.sector % 4;
+ if(this_count) count++;
+ while(bh && count < SCpnt->host->sg_tablesize) {
+ if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
+ this_count += (bh->b_size >> 9);
+ count++;
+ bh = bh->b_reqnext;
+ };
+ /* Fix up in case of an odd record at the end */
+ end_rec = 0;
+ if(this_count % 4) {
+ if (count < SCpnt->host->sg_tablesize) {
+ count++;
+ end_rec = (4 - (this_count % 4)) << 9;
+ this_count += 4 - (this_count % 4);
+ } else {
+ count--;
+ this_count -= (this_count % 4);
+ };
+ };
+ SCpnt->use_sg = count; /* Number of chains */
+ /* scsi_malloc can only allocate in chunks of 512 bytes */
+ count = (SCpnt->use_sg * sizeof(struct scatterlist) + 511) & ~511;
+
+ SCpnt->sglist_len = count;
+ sgpnt = (struct scatterlist * ) scsi_malloc(count);
+ if (!sgpnt) {
+ printk("Warning - running *really* short on DMA buffers\n");
+ SCpnt->use_sg = 0; /* No memory left - bail out */
+ } else {
+ buffer = (unsigned char *) sgpnt;
+ count = 0;
+ bh = SCpnt->request.bh;
+ if(SCpnt->request.sector % 4) {
+ sgpnt[count].length = (SCpnt->request.sector % 4) << 9;
+ sgpnt[count].address = (char *) scsi_malloc(sgpnt[count].length);
+ if(!sgpnt[count].address) panic("SCSI DMA pool exhausted.");
+ sgpnt[count].alt_address = sgpnt[count].address; /* Flag to delete
+ if needed */
+ count++;
+ };
+ for(bh = SCpnt->request.bh; count < SCpnt->use_sg;
+ count++, bh = bh->b_reqnext) {
+ if (bh) { /* Need a placeholder at the end of the record? */
+ sgpnt[count].address = bh->b_data;
+ sgpnt[count].length = bh->b_size;
+ sgpnt[count].alt_address = NULL;
+ } else {
+ sgpnt[count].address = (char *) scsi_malloc(end_rec);
+ if(!sgpnt[count].address) panic("SCSI DMA pool exhausted.");
+ sgpnt[count].length = end_rec;
+ sgpnt[count].alt_address = sgpnt[count].address;
+ if (count+1 != SCpnt->use_sg) panic("Bad sr request list");
+ break;
+ };
+ if (((long) sgpnt[count].address) + sgpnt[count].length - 1 >
+ ISA_DMA_THRESHOLD && SCpnt->host->unchecked_isa_dma) {
+ sgpnt[count].alt_address = sgpnt[count].address;
+ /* We try to avoid exhausting the DMA pool, since it is easier
+ * to control usage here. In other places we might have a more
+ * pressing need, and we would be screwed if we ran out */
+ if(dma_free_sectors < (sgpnt[count].length >> 9) + 5) {
+ sgpnt[count].address = NULL;
+ } else {
+ sgpnt[count].address = (char *) scsi_malloc(sgpnt[count].length);
+ };
+ /* If we start running low on DMA buffers, we abort the scatter-gather
+ * operation, and free all of the memory we have allocated. We want to
+ * ensure that all scsi operations are able to do at least a non-scatter/gather
+ * operation */
+ if(sgpnt[count].address == NULL){ /* Out of dma memory */
+ printk("Warning: Running low on SCSI DMA buffers");
+ /* Try switching back to a non scatter-gather operation. */
+ while(--count >= 0){
+ if(sgpnt[count].alt_address)
+ scsi_free(sgpnt[count].address, sgpnt[count].length);
+ };
+ SCpnt->use_sg = 0;
+ scsi_free(buffer, SCpnt->sglist_len);
+ break;
+ }; /* if address == NULL */
+ }; /* if need DMA fixup */
+ }; /* for loop to fill list */
+#ifdef DEBUG
+ printk("SR: %d %d %d %d %d *** ",SCpnt->use_sg, SCpnt->request.sector,
+ this_count,
+ SCpnt->request.current_nr_sectors,
+ SCpnt->request.nr_sectors);
+ for(count=0; count<SCpnt->use_sg; count++)
+ printk("SGlist: %d %x %x %x\n", count,
+ sgpnt[count].address,
+ sgpnt[count].alt_address,
+ sgpnt[count].length);
+#endif
+ }; /* Able to allocate scatter-gather list */
+ };
+
+ if (SCpnt->use_sg == 0){
+ /* We cannot use scatter-gather. Do this the old fashion way */
+ if (!SCpnt->request.bh)
+ this_count = SCpnt->request.nr_sectors;
+ else
+ this_count = (SCpnt->request.bh->b_size >> 9);
+
+ start = block % 4;
+ if (start)
+ {
+ this_count = ((this_count > 4 - start) ?
+ (4 - start) : (this_count));
+ buffer = (unsigned char *) scsi_malloc(2048);
+ }
+ else if (this_count < 4)
+ {
+ buffer = (unsigned char *) scsi_malloc(2048);
+ }
+ else
+ {
+ this_count -= this_count % 4;
+ buffer = (unsigned char *) SCpnt->request.buffer;
+ if (((long) buffer) + (this_count << 9) > ISA_DMA_THRESHOLD &&
+ SCpnt->host->unchecked_isa_dma)
+ buffer = (unsigned char *) scsi_malloc(this_count << 9);
+ }
+ };
+
+ if (scsi_CDs[dev].sector_size == 2048)
+ block = block >> 2; /* These are the sectors that the cdrom uses */
+ else
+ block = block & 0xfffffffc;
+
+ realcount = (this_count + 3) / 4;
+
+ if (scsi_CDs[dev].sector_size == 512) realcount = realcount << 2;
+
+ if (((realcount > 0xff) || (block > 0x1fffff)) && scsi_CDs[dev].ten)
+ {
+ if (realcount > 0xffff)
+ {
+ realcount = 0xffff;
+ this_count = realcount * (scsi_CDs[dev].sector_size >> 9);
+ }
+
+ cmd[0] += READ_10 - READ_6 ;
+ cmd[2] = (unsigned char) (block >> 24) & 0xff;
+ cmd[3] = (unsigned char) (block >> 16) & 0xff;
+ cmd[4] = (unsigned char) (block >> 8) & 0xff;
+ cmd[5] = (unsigned char) block & 0xff;
+ cmd[6] = cmd[9] = 0;
+ cmd[7] = (unsigned char) (realcount >> 8) & 0xff;
+ cmd[8] = (unsigned char) realcount & 0xff;
+ }
+ else
+ {
+ if (realcount > 0xff)
+ {
+ realcount = 0xff;
+ this_count = realcount * (scsi_CDs[dev].sector_size >> 9);
+ }
+
+ cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
+ cmd[2] = (unsigned char) ((block >> 8) & 0xff);
+ cmd[3] = (unsigned char) block & 0xff;
+ cmd[4] = (unsigned char) realcount;
+ cmd[5] = 0;
+ }
+
+#ifdef DEBUG
+ {
+ int i;
+ printk("ReadCD: %d %d %d %d\n",block, realcount, buffer, this_count);
+ printk("Use sg: %d\n", SCpnt->use_sg);
+ printk("Dumping command: ");
+ for(i=0; i<12; i++) printk("%2.2x ", cmd[i]);
+ printk("\n");
+ };
+#endif
+
+ /* Some dumb host adapters can speed transfers by knowing the
+ * minimum transfersize in advance.
+ *
+ * We shouldn't disconnect in the middle of a sector, but the cdrom
+ * sector size can be larger than the size of a buffer and the
+ * transfer may be split to the size of a buffer. So it's safe to
+ * assume that we can at least transfer the minimum of the buffer
+ * size (1024) and the sector size between each connect / disconnect.
+ */
+
+ SCpnt->transfersize = (scsi_CDs[dev].sector_size > 1024) ?
+ 1024 : scsi_CDs[dev].sector_size;
+
+ SCpnt->this_count = this_count;
+ scsi_do_cmd (SCpnt, (void *) cmd, buffer,
+ realcount * scsi_CDs[dev].sector_size,
+ rw_intr, SR_TIMEOUT, MAX_RETRIES);
+}
+
+static int sr_detect(Scsi_Device * SDp){
+
+ if(SDp->type != TYPE_ROM && SDp->type != TYPE_WORM) return 0;
+
+#ifdef MACH
+ printk("Detected scsi CD-ROM cd%d at scsi%d, channel %d, id %d, lun %d\n",
+#else
+ printk("Detected scsi CD-ROM sr%d at scsi%d, channel %d, id %d, lun %d\n",
+#endif
+ sr_template.dev_noticed++,
+ SDp->host->host_no, SDp->channel, SDp->id, SDp->lun);
+
+ return 1;
+}
+
+static int sr_attach(Scsi_Device * SDp){
+ Scsi_CD * cpnt;
+ int i;
+
+ if(SDp->type != TYPE_ROM && SDp->type != TYPE_WORM) return 1;
+
+ if (sr_template.nr_dev >= sr_template.dev_max)
+ {
+ SDp->attached--;
+ return 1;
+ }
+
+ for(cpnt = scsi_CDs, i=0; i<sr_template.dev_max; i++, cpnt++)
+ if(!cpnt->device) break;
+
+ if(i >= sr_template.dev_max) panic ("scsi_devices corrupt (sr)");
+
+ SDp->scsi_request_fn = do_sr_request;
+ scsi_CDs[i].device = SDp;
+ sr_template.nr_dev++;
+ if(sr_template.nr_dev > sr_template.dev_max)
+ panic ("scsi_devices corrupt (sr)");
+ return 0;
+}
+
+
+static void sr_init_done (Scsi_Cmnd * SCpnt)
+{
+ struct request * req;
+
+ req = &SCpnt->request;
+ req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
+
+ if (req->sem != NULL) {
+ up(req->sem);
+ }
+}
+
+void get_sectorsize(int i){
+ unsigned char cmd[12];
+ unsigned char *buffer;
+ int the_result, retries;
+ Scsi_Cmnd * SCpnt;
+
+ buffer = (unsigned char *) scsi_malloc(512);
+ SCpnt = allocate_device(NULL, scsi_CDs[i].device, 1);
+
+ retries = 3;
+ do {
+ cmd[0] = READ_CAPACITY;
+ cmd[1] = (scsi_CDs[i].device->lun << 5) & 0xe0;
+ memset ((void *) &cmd[2], 0, 8);
+ SCpnt->request.rq_status = RQ_SCSI_BUSY; /* Mark as really busy */
+ SCpnt->cmd_len = 0;
+
+ memset(buffer, 0, 8);
+
+ /* Do the command and wait.. */
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ SCpnt->request.sem = &sem;
+ scsi_do_cmd (SCpnt,
+ (void *) cmd, (void *) buffer,
+ 512, sr_init_done, SR_TIMEOUT,
+ MAX_RETRIES);
+ down(&sem);
+ }
+
+ the_result = SCpnt->result;
+ retries--;
+
+ } while(the_result && retries);
+
+ SCpnt->request.rq_status = RQ_INACTIVE; /* Mark as not busy */
+
+ wake_up(&SCpnt->device->device_wait);
+
+ if (the_result) {
+ scsi_CDs[i].capacity = 0x1fffff;
+ scsi_CDs[i].sector_size = 2048; /* A guess, just in case */
+ scsi_CDs[i].needs_sector_size = 1;
+ } else {
+ scsi_CDs[i].capacity = 1 + ((buffer[0] << 24) |
+ (buffer[1] << 16) |
+ (buffer[2] << 8) |
+ buffer[3]);
+ scsi_CDs[i].sector_size = (buffer[4] << 24) |
+ (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
+ switch (scsi_CDs[i].sector_size) {
+ /*
+ * HP 4020i CD-Recorder reports 2340 byte sectors
+ * Philips CD-Writers report 2352 byte sectors
+ *
+ * Use 2k sectors for them..
+ */
+ case 0: case 2340: case 2352:
+ scsi_CDs[i].sector_size = 2048;
+ /* fall through */
+ case 2048:
+ scsi_CDs[i].capacity *= 4;
+ /* fall through */
+ case 512:
+ break;
+ default:
+#ifdef MACH
+ printk ("cd%d : unsupported sector size %d.\n",
+ i, scsi_CDs[i].sector_size);
+#else
+ printk ("scd%d : unsupported sector size %d.\n",
+ i, scsi_CDs[i].sector_size);
+#endif
+ scsi_CDs[i].capacity = 0;
+ scsi_CDs[i].needs_sector_size = 1;
+ }
+ scsi_CDs[i].needs_sector_size = 0;
+ sr_sizes[i] = scsi_CDs[i].capacity >> (BLOCK_SIZE_BITS - 9);
+ };
+ scsi_free(buffer, 512);
+}
+
+static int sr_registered = 0;
+
+static int sr_init()
+{
+ int i;
+
+ if(sr_template.dev_noticed == 0) return 0;
+
+ if(!sr_registered) {
+ if (register_blkdev(MAJOR_NR,"sr",&sr_fops)) {
+ printk("Unable to get major %d for SCSI-CD\n",MAJOR_NR);
+ return 1;
+ }
+ sr_registered++;
+ }
+
+
+ if (scsi_CDs) return 0;
+ sr_template.dev_max = sr_template.dev_noticed + SR_EXTRA_DEVS;
+ scsi_CDs = (Scsi_CD *) scsi_init_malloc(sr_template.dev_max * sizeof(Scsi_CD), GFP_ATOMIC);
+ memset(scsi_CDs, 0, sr_template.dev_max * sizeof(Scsi_CD));
+
+ sr_sizes = (int *) scsi_init_malloc(sr_template.dev_max * sizeof(int), GFP_ATOMIC);
+ memset(sr_sizes, 0, sr_template.dev_max * sizeof(int));
+
+ sr_blocksizes = (int *) scsi_init_malloc(sr_template.dev_max *
+ sizeof(int), GFP_ATOMIC);
+ for(i=0;i<sr_template.dev_max;i++) sr_blocksizes[i] = 2048;
+ blksize_size[MAJOR_NR] = sr_blocksizes;
+ return 0;
+}
+
+void sr_finish()
+{
+ int i;
+
+ blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ blk_size[MAJOR_NR] = sr_sizes;
+
+ for (i = 0; i < sr_template.nr_dev; ++i)
+ {
+ /* If we have already seen this, then skip it. Comes up
+ * with loadable modules. */
+ if (scsi_CDs[i].capacity) continue;
+ scsi_CDs[i].capacity = 0x1fffff;
+ scsi_CDs[i].sector_size = 2048; /* A guess, just in case */
+ scsi_CDs[i].needs_sector_size = 1;
+#if 0
+ /* seems better to leave this for later */
+ get_sectorsize(i);
+ printk("Scd sectorsize = %d bytes.\n", scsi_CDs[i].sector_size);
+#endif
+ scsi_CDs[i].use = 1;
+ scsi_CDs[i].ten = 1;
+ scsi_CDs[i].remap = 1;
+ scsi_CDs[i].auto_eject = 0; /* Default is not to eject upon unmount. */
+ sr_sizes[i] = scsi_CDs[i].capacity >> (BLOCK_SIZE_BITS - 9);
+ }
+
+
+ /* If our host adapter is capable of scatter-gather, then we increase
+ * the read-ahead to 16 blocks (32 sectors). If not, we use
+ * a two block (4 sector) read ahead. */
+ if(scsi_CDs[0].device && scsi_CDs[0].device->host->sg_tablesize)
+ read_ahead[MAJOR_NR] = 32; /* 32 sector read-ahead. Always removable. */
+ else
+ read_ahead[MAJOR_NR] = 4; /* 4 sector read-ahead */
+
+ return;
+}
+
+static void sr_detach(Scsi_Device * SDp)
+{
+ Scsi_CD * cpnt;
+ int i;
+
+ for(cpnt = scsi_CDs, i=0; i<sr_template.dev_max; i++, cpnt++)
+ if(cpnt->device == SDp) {
+ kdev_t devi = MKDEV(MAJOR_NR, i);
+
+ /*
+ * Since the cdrom is read-only, no need to sync the device.
+ * We should be kind to our buffer cache, however.
+ */
+ invalidate_inodes(devi);
+ invalidate_buffers(devi);
+
+ /*
+ * Reset things back to a sane state so that one can re-load a new
+ * driver (perhaps the same one).
+ */
+ cpnt->device = NULL;
+ cpnt->capacity = 0;
+ SDp->attached--;
+ sr_template.nr_dev--;
+ sr_template.dev_noticed--;
+ sr_sizes[i] = 0;
+ return;
+ }
+ return;
+}
+
+
+#ifdef MODULE
+
+int init_module(void) {
+ sr_template.usage_count = &mod_use_count_;
+ return scsi_register_module(MODULE_SCSI_DEV, &sr_template);
+}
+
+void cleanup_module( void)
+{
+ scsi_unregister_module(MODULE_SCSI_DEV, &sr_template);
+ unregister_blkdev(SCSI_CDROM_MAJOR, "sr");
+ sr_registered--;
+ if(scsi_CDs != NULL) {
+ scsi_init_free((char *) scsi_CDs,
+ (sr_template.dev_noticed + SR_EXTRA_DEVS)
+ * sizeof(Scsi_CD));
+
+ scsi_init_free((char *) sr_sizes, sr_template.dev_max * sizeof(int));
+ scsi_init_free((char *) sr_blocksizes, sr_template.dev_max * sizeof(int));
+ }
+
+ blksize_size[MAJOR_NR] = NULL;
+ blk_dev[MAJOR_NR].request_fn = NULL;
+ blk_size[MAJOR_NR] = NULL;
+ read_ahead[MAJOR_NR] = 0;
+
+ sr_template.dev_max = 0;
+}
+#endif /* MODULE */
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/sr.h b/linux/src/drivers/scsi/sr.h
new file mode 100644
index 0000000..381678a
--- /dev/null
+++ b/linux/src/drivers/scsi/sr.h
@@ -0,0 +1,40 @@
+/*
+ * sr.h by David Giller
+ * CD-ROM disk driver header file
+ *
+ * adapted from:
+ * sd.h Copyright (C) 1992 Drew Eckhardt
+ * SCSI disk driver header file by
+ * Drew Eckhardt
+ *
+ * <drew@colorado.edu>
+ *
+ * Modified by Eric Youngdale eric@aib.com to
+ * add scatter-gather, multiple outstanding request, and other
+ * enhancements.
+ */
+
+#ifndef _SR_H
+#define _SR_H
+
+#include "scsi.h"
+
+typedef struct
+ {
+ unsigned capacity; /* size in blocks */
+ unsigned sector_size; /* size in bytes */
+ Scsi_Device *device;
+ unsigned long mpcd_sector; /* for reading multisession-CD's */
+ char xa_flags; /* some flags for handling XA-CD's */
+ unsigned char sector_bit_size; /* sector size = 2^sector_bit_size */
+ unsigned char sector_bit_shift; /* sectors/FS block = 2^sector_bit_shift*/
+ unsigned needs_sector_size:1; /* needs to get sector size */
+ unsigned ten:1; /* support ten byte commands */
+ unsigned remap:1; /* support remapping */
+ unsigned use:1; /* is this device still supportable */
+ unsigned auto_eject:1; /* auto-eject medium on last release. */
+ } Scsi_CD;
+
+extern Scsi_CD * scsi_CDs;
+
+#endif
diff --git a/linux/src/drivers/scsi/sr_ioctl.c b/linux/src/drivers/scsi/sr_ioctl.c
new file mode 100644
index 0000000..6d8b633
--- /dev/null
+++ b/linux/src/drivers/scsi/sr_ioctl.c
@@ -0,0 +1,607 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <asm/segment.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sr.h"
+#include <scsi/scsi_ioctl.h>
+
+#include <linux/cdrom.h>
+
+extern void get_sectorsize(int);
+extern void sr_photocd(struct inode *);
+
+#define IOCTL_RETRIES 3
+/* The CDROM is fairly slow, so we need a little extra time */
+/* In fact, it is very slow if it has to spin up first */
+#define IOCTL_TIMEOUT 3000
+
+static void sr_ioctl_done(Scsi_Cmnd * SCpnt)
+{
+ struct request * req;
+
+ req = &SCpnt->request;
+ req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
+
+ if (req->sem != NULL) {
+ up(req->sem);
+ }
+}
+
+/* We do our own retries because we want to know what the specific
+ error code is. Normally the UNIT_ATTENTION code will automatically
+ clear after one error */
+
+static int do_ioctl(int target, unsigned char * sr_cmd, void * buffer, unsigned buflength)
+{
+ Scsi_Cmnd * SCpnt;
+ int result;
+
+ SCpnt = allocate_device(NULL, scsi_CDs[target].device, 1);
+ {
+ struct semaphore sem = MUTEX_LOCKED;
+ SCpnt->request.sem = &sem;
+ scsi_do_cmd(SCpnt,
+ (void *) sr_cmd, buffer, buflength, sr_ioctl_done,
+ IOCTL_TIMEOUT, IOCTL_RETRIES);
+ down(&sem);
+ }
+
+ result = SCpnt->result;
+
+ /* Minimal error checking. Ignore cases we know about, and report the rest. */
+ if(driver_byte(result) != 0)
+ switch(SCpnt->sense_buffer[2] & 0xf) {
+ case UNIT_ATTENTION:
+ scsi_CDs[target].device->changed = 1;
+ printk("Disc change detected.\n");
+ break;
+ case NOT_READY: /* This happens if there is no disc in drive */
+ printk("CDROM not ready. Make sure there is a disc in the drive.\n");
+ break;
+ case ILLEGAL_REQUEST:
+ /* CDROMCLOSETRAY should not print an error for caddy drives. */
+ if (!(sr_cmd[0] == START_STOP && sr_cmd[4] == 0x03))
+ printk("CDROM (ioctl) reports ILLEGAL REQUEST.\n");
+ break;
+ default:
+ printk("SCSI CD error: host %d id %d lun %d return code = %03x\n",
+ scsi_CDs[target].device->host->host_no,
+ scsi_CDs[target].device->id,
+ scsi_CDs[target].device->lun,
+ result);
+ printk("\tSense class %x, sense error %x, extended sense %x\n",
+ sense_class(SCpnt->sense_buffer[0]),
+ sense_error(SCpnt->sense_buffer[0]),
+ SCpnt->sense_buffer[2] & 0xf);
+
+ };
+
+ result = SCpnt->result;
+ SCpnt->request.rq_status = RQ_INACTIVE; /* Deallocate */
+ wake_up(&SCpnt->device->device_wait);
+ /* Wake up a process waiting for device*/
+ return result;
+}
+
+int sr_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigned long arg)
+{
+ u_char sr_cmd[12];
+
+ kdev_t dev = inode->i_rdev;
+ int result, target, err;
+
+ target = MINOR(dev);
+
+ if (target >= sr_template.nr_dev ||
+ !scsi_CDs[target].device) return -ENXIO;
+
+ switch (cmd)
+ {
+ /* Sun-compatible */
+ case CDROMPAUSE:
+
+ sr_cmd[0] = SCMD_PAUSE_RESUME;
+ sr_cmd[1] = scsi_CDs[target].device->lun << 5;
+ sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = 0;
+ sr_cmd[5] = sr_cmd[6] = sr_cmd[7] = 0;
+ sr_cmd[8] = 0;
+ sr_cmd[9] = 0;
+
+ result = do_ioctl(target, sr_cmd, NULL, 255);
+ return result;
+
+ case CDROMRESUME:
+
+ sr_cmd[0] = SCMD_PAUSE_RESUME;
+ sr_cmd[1] = scsi_CDs[target].device->lun << 5;
+ sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = 0;
+ sr_cmd[5] = sr_cmd[6] = sr_cmd[7] = 0;
+ sr_cmd[8] = 1;
+ sr_cmd[9] = 0;
+
+ result = do_ioctl(target, sr_cmd, NULL, 255);
+
+ return result;
+
+ case CDROMPLAYMSF:
+ {
+ struct cdrom_msf msf;
+
+ err = verify_area (VERIFY_READ, (void *) arg, sizeof (msf));
+ if (err) return err;
+
+ memcpy_fromfs(&msf, (void *) arg, sizeof(msf));
+
+ sr_cmd[0] = SCMD_PLAYAUDIO_MSF;
+ sr_cmd[1] = scsi_CDs[target].device->lun << 5;
+ sr_cmd[2] = 0;
+ sr_cmd[3] = msf.cdmsf_min0;
+ sr_cmd[4] = msf.cdmsf_sec0;
+ sr_cmd[5] = msf.cdmsf_frame0;
+ sr_cmd[6] = msf.cdmsf_min1;
+ sr_cmd[7] = msf.cdmsf_sec1;
+ sr_cmd[8] = msf.cdmsf_frame1;
+ sr_cmd[9] = 0;
+
+ result = do_ioctl(target, sr_cmd, NULL, 255);
+ return result;
+ }
+
+ case CDROMPLAYBLK:
+ {
+ struct cdrom_blk blk;
+
+ err = verify_area (VERIFY_READ, (void *) arg, sizeof (blk));
+ if (err) return err;
+
+ memcpy_fromfs(&blk, (void *) arg, sizeof(blk));
+
+ sr_cmd[0] = SCMD_PLAYAUDIO10;
+ sr_cmd[1] = scsi_CDs[target].device->lun << 5;
+ sr_cmd[2] = blk.from >> 24;
+ sr_cmd[3] = blk.from >> 16;
+ sr_cmd[4] = blk.from >> 8;
+ sr_cmd[5] = blk.from;
+ sr_cmd[6] = 0;
+ sr_cmd[7] = blk.len >> 8;
+ sr_cmd[8] = blk.len;
+ sr_cmd[9] = 0;
+
+ result = do_ioctl(target, sr_cmd, NULL, 255);
+ return result;
+ }
+
+ case CDROMPLAYTRKIND:
+ {
+ struct cdrom_ti ti;
+
+ err = verify_area (VERIFY_READ, (void *) arg, sizeof (ti));
+ if (err) return err;
+
+ memcpy_fromfs(&ti, (void *) arg, sizeof(ti));
+
+ sr_cmd[0] = SCMD_PLAYAUDIO_TI;
+ sr_cmd[1] = scsi_CDs[target].device->lun << 5;
+ sr_cmd[2] = 0;
+ sr_cmd[3] = 0;
+ sr_cmd[4] = ti.cdti_trk0;
+ sr_cmd[5] = ti.cdti_ind0;
+ sr_cmd[6] = 0;
+ sr_cmd[7] = ti.cdti_trk1;
+ sr_cmd[8] = ti.cdti_ind1;
+ sr_cmd[9] = 0;
+
+ result = do_ioctl(target, sr_cmd, NULL, 255);
+
+ return result;
+ }
+
+ case CDROMREADTOCHDR:
+ {
+ struct cdrom_tochdr tochdr;
+ char * buffer;
+
+ sr_cmd[0] = SCMD_READ_TOC;
+ sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5);
+ sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0;
+ sr_cmd[6] = 0;
+ sr_cmd[7] = 0; /* MSB of length (12) */
+ sr_cmd[8] = 12; /* LSB of length */
+ sr_cmd[9] = 0;
+
+ buffer = (unsigned char *) scsi_malloc(512);
+ if(!buffer) return -ENOMEM;
+
+ result = do_ioctl(target, sr_cmd, buffer, 12);
+
+ tochdr.cdth_trk0 = buffer[2];
+ tochdr.cdth_trk1 = buffer[3];
+
+ scsi_free(buffer, 512);
+
+ err = verify_area (VERIFY_WRITE, (void *) arg, sizeof (struct cdrom_tochdr));
+ if (err)
+ return err;
+ memcpy_tofs ((void *) arg, &tochdr, sizeof (struct cdrom_tochdr));
+
+ return result;
+ }
+
+ case CDROMREADTOCENTRY:
+ {
+ struct cdrom_tocentry tocentry;
+ unsigned char * buffer;
+
+ err = verify_area (VERIFY_READ, (void *) arg, sizeof (struct cdrom_tocentry));
+ if (err) return err;
+
+ memcpy_fromfs (&tocentry, (void *) arg, sizeof (struct cdrom_tocentry));
+
+ sr_cmd[0] = SCMD_READ_TOC;
+ sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5) |
+ (tocentry.cdte_format == CDROM_MSF ? 0x02 : 0);
+ sr_cmd[2] = sr_cmd[3] = sr_cmd[4] = sr_cmd[5] = 0;
+ sr_cmd[6] = tocentry.cdte_track;
+ sr_cmd[7] = 0; /* MSB of length (12) */
+ sr_cmd[8] = 12; /* LSB of length */
+ sr_cmd[9] = 0;
+
+ buffer = (unsigned char *) scsi_malloc(512);
+ if(!buffer) return -ENOMEM;
+
+ result = do_ioctl (target, sr_cmd, buffer, 12);
+
+ tocentry.cdte_ctrl = buffer[5] & 0xf;
+ tocentry.cdte_adr = buffer[5] >> 4;
+ tocentry.cdte_datamode = (tocentry.cdte_ctrl & 0x04) ? 1 : 0;
+ if (tocentry.cdte_format == CDROM_MSF) {
+ tocentry.cdte_addr.msf.minute = buffer[9];
+ tocentry.cdte_addr.msf.second = buffer[10];
+ tocentry.cdte_addr.msf.frame = buffer[11];
+ }
+ else
+ tocentry.cdte_addr.lba = (((((buffer[8] << 8) + buffer[9]) << 8)
+ + buffer[10]) << 8) + buffer[11];
+
+ scsi_free(buffer, 512);
+
+ err = verify_area (VERIFY_WRITE, (void *) arg, sizeof (struct cdrom_tocentry));
+ if (err)
+ return err;
+ memcpy_tofs ((void *) arg, &tocentry, sizeof (struct cdrom_tocentry));
+
+ return result;
+ }
+
+ case CDROMSTOP:
+ sr_cmd[0] = START_STOP;
+ sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5) | 1;
+ sr_cmd[2] = sr_cmd[3] = sr_cmd[5] = 0;
+ sr_cmd[4] = 0;
+
+ result = do_ioctl(target, sr_cmd, NULL, 255);
+ return result;
+
+ case CDROMSTART:
+ sr_cmd[0] = START_STOP;
+ sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5) | 1;
+ sr_cmd[2] = sr_cmd[3] = sr_cmd[5] = 0;
+ sr_cmd[4] = 1;
+
+ result = do_ioctl(target, sr_cmd, NULL, 255);
+ return result;
+
+ case CDROMCLOSETRAY:
+ sr_cmd[0] = START_STOP;
+ sr_cmd[1] = ((scsi_CDs[target].device -> lun) << 5);
+ sr_cmd[2] = sr_cmd[3] = sr_cmd[5] = 0;
+ sr_cmd[4] = 0x03;
+
+ if ((result = do_ioctl(target, sr_cmd, NULL, 255)))
+ return result;
+
+ /* Gather information about newly inserted disc */
+ check_disk_change (inode->i_rdev);
+ sr_ioctl (inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
+ sr_photocd (inode);
+
+ if (scsi_CDs[MINOR(inode->i_rdev)].needs_sector_size)
+ get_sectorsize (MINOR(inode->i_rdev));
+
+ return 0;
+
+ case CDROMEJECT:
+ /*
+ * Allow 0 for access count for auto-eject feature.
+ */
+ if (scsi_CDs[target].device -> access_count > 1)
+ return -EBUSY;
+
+ sr_ioctl (inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
+ sr_cmd[0] = START_STOP;
+ sr_cmd[1] = ((scsi_CDs[target].device -> lun) << 5) | 1;
+ sr_cmd[2] = sr_cmd[3] = sr_cmd[5] = 0;
+ sr_cmd[4] = 0x02;
+
+ if (!(result = do_ioctl(target, sr_cmd, NULL, 255)))
+ scsi_CDs[target].device -> changed = 1;
+
+ return result;
+
+ case CDROMEJECT_SW:
+ scsi_CDs[target].auto_eject = !!arg;
+ return 0;
+
+ case CDROMVOLCTRL:
+ {
+ char * buffer, * mask;
+ struct cdrom_volctrl volctrl;
+
+ err = verify_area (VERIFY_READ, (void *) arg, sizeof (struct cdrom_volctrl));
+ if (err) return err;
+
+ memcpy_fromfs (&volctrl, (void *) arg, sizeof (struct cdrom_volctrl));
+
+ /* First we get the current params so we can just twiddle the volume */
+
+ sr_cmd[0] = MODE_SENSE;
+ sr_cmd[1] = (scsi_CDs[target].device -> lun) << 5;
+ sr_cmd[2] = 0xe; /* Want mode page 0xe, CDROM audio params */
+ sr_cmd[3] = 0;
+ sr_cmd[4] = 28;
+ sr_cmd[5] = 0;
+
+ buffer = (unsigned char *) scsi_malloc(512);
+ if(!buffer) return -ENOMEM;
+
+ if ((result = do_ioctl (target, sr_cmd, buffer, 28))) {
+ printk ("Hosed while obtaining audio mode page\n");
+ scsi_free(buffer, 512);
+ return result;
+ }
+
+ sr_cmd[0] = MODE_SENSE;
+ sr_cmd[1] = (scsi_CDs[target].device -> lun) << 5;
+ sr_cmd[2] = 0x4e; /* Want the mask for mode page 0xe */
+ sr_cmd[3] = 0;
+ sr_cmd[4] = 28;
+ sr_cmd[5] = 0;
+
+ mask = (unsigned char *) scsi_malloc(512);
+ if(!mask) {
+ scsi_free(buffer, 512);
+ return -ENOMEM;
+ };
+
+ if ((result = do_ioctl (target, sr_cmd, mask, 28))) {
+ printk ("Hosed while obtaining mask for audio mode page\n");
+ scsi_free(buffer, 512);
+ scsi_free(mask, 512);
+ return result;
+ }
+
+ /* Now mask and substitute our own volume and reuse the rest */
+ buffer[0] = 0; /* Clear reserved field */
+
+ buffer[21] = volctrl.channel0 & mask[21];
+ buffer[23] = volctrl.channel1 & mask[23];
+ buffer[25] = volctrl.channel2 & mask[25];
+ buffer[27] = volctrl.channel3 & mask[27];
+
+ sr_cmd[0] = MODE_SELECT;
+ sr_cmd[1] = ((scsi_CDs[target].device -> lun) << 5) | 0x10; /* Params are SCSI-2 */
+ sr_cmd[2] = sr_cmd[3] = 0;
+ sr_cmd[4] = 28;
+ sr_cmd[5] = 0;
+
+ result = do_ioctl (target, sr_cmd, buffer, 28);
+ scsi_free(buffer, 512);
+ scsi_free(mask, 512);
+ return result;
+ }
+
+ case CDROMVOLREAD:
+ {
+ char * buffer;
+ struct cdrom_volctrl volctrl;
+
+ err = verify_area (VERIFY_WRITE, (void *) arg, sizeof (struct cdrom_volctrl));
+ if (err) return err;
+
+ /* Get the current params */
+
+ sr_cmd[0] = MODE_SENSE;
+ sr_cmd[1] = (scsi_CDs[target].device -> lun) << 5;
+ sr_cmd[2] = 0xe; /* Want mode page 0xe, CDROM audio params */
+ sr_cmd[3] = 0;
+ sr_cmd[4] = 28;
+ sr_cmd[5] = 0;
+
+ buffer = (unsigned char *) scsi_malloc(512);
+ if(!buffer) return -ENOMEM;
+
+ if ((result = do_ioctl (target, sr_cmd, buffer, 28))) {
+ printk ("(CDROMVOLREAD) Hosed while obtaining audio mode page\n");
+ scsi_free(buffer, 512);
+ return result;
+ }
+
+ volctrl.channel0 = buffer[21];
+ volctrl.channel1 = buffer[23];
+ volctrl.channel2 = buffer[25];
+ volctrl.channel3 = buffer[27];
+
+ memcpy_tofs ((void *) arg, &volctrl, sizeof (struct cdrom_volctrl));
+
+ scsi_free(buffer, 512);
+
+ return 0;
+ }
+
+ case CDROMSUBCHNL:
+ {
+ struct cdrom_subchnl subchnl;
+ char * buffer;
+
+ sr_cmd[0] = SCMD_READ_SUBCHANNEL;
+ sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5) | 0x02; /* MSF format */
+ sr_cmd[2] = 0x40; /* I do want the subchannel info */
+ sr_cmd[3] = 0x01; /* Give me current position info */
+ sr_cmd[4] = sr_cmd[5] = 0;
+ sr_cmd[6] = 0;
+ sr_cmd[7] = 0;
+ sr_cmd[8] = 16;
+ sr_cmd[9] = 0;
+
+ buffer = (unsigned char*) scsi_malloc(512);
+ if(!buffer) return -ENOMEM;
+
+ result = do_ioctl(target, sr_cmd, buffer, 16);
+
+ subchnl.cdsc_audiostatus = buffer[1];
+ subchnl.cdsc_format = CDROM_MSF;
+ subchnl.cdsc_ctrl = buffer[5] & 0xf;
+ subchnl.cdsc_trk = buffer[6];
+ subchnl.cdsc_ind = buffer[7];
+
+ subchnl.cdsc_reladdr.msf.minute = buffer[13];
+ subchnl.cdsc_reladdr.msf.second = buffer[14];
+ subchnl.cdsc_reladdr.msf.frame = buffer[15];
+ subchnl.cdsc_absaddr.msf.minute = buffer[9];
+ subchnl.cdsc_absaddr.msf.second = buffer[10];
+ subchnl.cdsc_absaddr.msf.frame = buffer[11];
+
+ scsi_free(buffer, 512);
+
+ err = verify_area (VERIFY_WRITE, (void *) arg, sizeof (struct cdrom_subchnl));
+ if (err)
+ return err;
+ memcpy_tofs ((void *) arg, &subchnl, sizeof (struct cdrom_subchnl));
+ return result;
+ }
+
+ case CDROM_GET_UPC:
+ {
+ struct cdrom_mcn mcn;
+ char * buffer;
+
+ sr_cmd[0] = SCMD_READ_SUBCHANNEL;
+ sr_cmd[1] = ((scsi_CDs[target].device->lun) << 5);
+ sr_cmd[2] = 0x40; /* I do want the subchannel info */
+ sr_cmd[3] = 0x02; /* Give me medium catalog number info */
+ sr_cmd[4] = sr_cmd[5] = 0;
+ sr_cmd[6] = 0;
+ sr_cmd[7] = 0;
+ sr_cmd[8] = 24;
+ sr_cmd[9] = 0;
+
+ buffer = (unsigned char*) scsi_malloc(512);
+ if(!buffer) return -ENOMEM;
+
+ result = do_ioctl(target, sr_cmd, buffer, 24);
+
+ memcpy (mcn.medium_catalog_number, buffer + 9, 13);
+ mcn.medium_catalog_number[13] = 0;
+
+ scsi_free(buffer, 512);
+
+ err = verify_area (VERIFY_WRITE, (void *) arg, sizeof (struct cdrom_mcn));
+ if (err)
+ return err;
+ memcpy_tofs ((void *) arg, &mcn, sizeof (struct cdrom_mcn));
+ return result;
+ }
+
+ /* these are compatible with the ide-cd driver */
+ case CDROMREADRAW:
+ case CDROMREADMODE1:
+ case CDROMREADMODE2:
+ return -EINVAL;
+
+ /* block-copy from ../block/sbpcd.c with some adjustments... */
+ case CDROMMULTISESSION: /* tell start-of-last-session to user */
+ {
+ struct cdrom_multisession ms_info;
+ long lba;
+
+ err = verify_area(VERIFY_READ, (void *) arg,
+ sizeof(struct cdrom_multisession));
+ if (err) return (err);
+
+ memcpy_fromfs(&ms_info, (void *) arg, sizeof(struct cdrom_multisession));
+
+ if (ms_info.addr_format==CDROM_MSF) { /* MSF-bin requested */
+ lba = scsi_CDs[target].mpcd_sector+CD_BLOCK_OFFSET;
+ ms_info.addr.msf.minute = lba / (CD_SECS*CD_FRAMES);
+ lba %= CD_SECS*CD_FRAMES;
+ ms_info.addr.msf.second = lba / CD_FRAMES;
+ ms_info.addr.msf.frame = lba % CD_FRAMES;
+ } else if (ms_info.addr_format==CDROM_LBA) /* lba requested */
+ ms_info.addr.lba=scsi_CDs[target].mpcd_sector;
+ else return (-EINVAL);
+
+ ms_info.xa_flag=scsi_CDs[target].xa_flags & 0x01;
+
+ err=verify_area(VERIFY_WRITE,(void *) arg,
+ sizeof(struct cdrom_multisession));
+ if (err) return (err);
+
+ memcpy_tofs((void *) arg, &ms_info, sizeof(struct cdrom_multisession));
+ return (0);
+ }
+
+ case BLKRAGET:
+ if (!arg)
+ return -EINVAL;
+ err = verify_area(VERIFY_WRITE, (int *) arg, sizeof(int));
+ if (err)
+ return err;
+ put_user(read_ahead[MAJOR(inode->i_rdev)], (int *) arg);
+ return 0;
+
+ case BLKRASET:
+ if(!suser())
+ return -EACCES;
+ if(!(inode->i_rdev))
+ return -EINVAL;
+ if(arg > 0xff)
+ return -EINVAL;
+ read_ahead[MAJOR(inode->i_rdev)] = arg;
+ return 0;
+
+ RO_IOCTLS(dev,arg);
+
+ case CDROMRESET:
+ invalidate_buffers(inode->i_rdev);
+ return 0;
+
+ default:
+ return scsi_ioctl(scsi_CDs[target].device,cmd,(void *) arg);
+ }
+}
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/linux/src/drivers/scsi/sym53c8xx.c b/linux/src/drivers/scsi/sym53c8xx.c
new file mode 100644
index 0000000..f496954
--- /dev/null
+++ b/linux/src/drivers/scsi/sym53c8xx.c
@@ -0,0 +1,14696 @@
+/******************************************************************************
+** High Performance device driver for the Symbios 53C896 controller.
+**
+** Copyright (C) 1998-2000 Gerard Roudier <groudier@club-internet.fr>
+**
+** This driver also supports all the Symbios 53C8XX controller family,
+** except 53C810 revisions < 16, 53C825 revisions < 16 and all
+** revisions of 53C815 controllers.
+**
+** This driver is based on the Linux port of the FreeBSD ncr driver.
+**
+** Copyright (C) 1994 Wolfgang Stanglmeier
+**
+**-----------------------------------------------------------------------------
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+** You should have received a copy of the GNU General Public License
+** along with this program; if not, write to the Free Software
+** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+**
+**-----------------------------------------------------------------------------
+**
+** The Linux port of the FreeBSD ncr driver has been achieved in
+** november 1995 by:
+**
+** Gerard Roudier <groudier@club-internet.fr>
+**
+** Being given that this driver originates from the FreeBSD version, and
+** in order to keep synergy on both, any suggested enhancements and corrections
+** received on Linux are automatically a potential candidate for the FreeBSD
+** version.
+**
+** The original driver has been written for 386bsd and FreeBSD by
+** Wolfgang Stanglmeier <wolf@cologne.de>
+** Stefan Esser <se@mi.Uni-Koeln.de>
+**
+**-----------------------------------------------------------------------------
+**
+** Major contributions:
+** --------------------
+**
+** NVRAM detection and reading.
+** Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+**
+*******************************************************************************
+*/
+
+/*
+** Supported SCSI features:
+** Synchronous data transfers
+** Wide16 SCSI BUS
+** Disconnection/Reselection
+** Tagged command queuing
+** SCSI Parity checking
+**
+** Supported NCR/SYMBIOS chips:
+** 53C810A (8 bits, Fast 10, no rom BIOS)
+** 53C825A (Wide, Fast 10, on-board rom BIOS)
+** 53C860 (8 bits, Fast 20, no rom BIOS)
+** 53C875 (Wide, Fast 20, on-board rom BIOS)
+** 53C876 (Wide, Fast 20 Dual, on-board rom BIOS)
+** 53C895 (Wide, Fast 40, on-board rom BIOS)
+** 53C895A (Wide, Fast 40, on-board rom BIOS)
+** 53C896 (Wide, Fast 40 Dual, on-board rom BIOS)
+** 53C897 (Wide, Fast 40 Dual, on-board rom BIOS)
+** 53C1510D (Wide, Fast 40 Dual, on-board rom BIOS)
+** 53C1010 (Wide, Fast 80 Dual, on-board rom BIOS)
+** 53C1010_66(Wide, Fast 80 Dual, on-board rom BIOS, 33/66MHz PCI)
+**
+** Other features:
+** Memory mapped IO
+** Module
+** Shared IRQ
+*/
+
+/*
+** Name and version of the driver
+*/
+#define SCSI_NCR_DRIVER_NAME "sym53c8xx-1.7.1-20000726"
+
+#define SCSI_NCR_DEBUG_FLAGS (0)
+
+#define NAME53C "sym53c"
+#define NAME53C8XX "sym53c8xx"
+
+/*==========================================================
+**
+** Include files
+**
+**==========================================================
+*/
+
+#define LinuxVersionCode(v, p, s) (((v)<<16)+((p)<<8)+(s))
+
+#ifdef MODULE
+#include <linux/module.h>
+#endif
+
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,3,17)
+#include <linux/spinlock.h>
+#elif LINUX_VERSION_CODE >= LinuxVersionCode(2,1,93)
+#include <asm/spinlock.h>
+#endif
+#include <linux/delay.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/malloc.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/stat.h>
+
+#include <linux/version.h>
+#include <linux/blk.h>
+
+#ifdef CONFIG_ALL_PPC
+#include <asm/prom.h>
+#endif
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,35)
+#include <linux/init.h>
+#endif
+
+#ifndef __init
+#define __init
+#endif
+#ifndef __initdata
+#define __initdata
+#endif
+
+#if LINUX_VERSION_CODE <= LinuxVersionCode(2,1,92)
+#include <linux/bios32.h>
+#endif
+
+#include "scsi.h"
+#include "hosts.h"
+#include "constants.h"
+#include "sd.h"
+
+#include <linux/types.h>
+
+/*
+** Define BITS_PER_LONG for earlier linux versions.
+*/
+#ifndef BITS_PER_LONG
+#if (~0UL) == 0xffffffffUL
+#define BITS_PER_LONG 32
+#else
+#define BITS_PER_LONG 64
+#endif
+#endif
+
+/*
+** Define the BSD style u_int32 and u_int64 type.
+** Are in fact u_int32_t and u_int64_t :-)
+*/
+typedef u32 u_int32;
+typedef u64 u_int64;
+
+#include "sym53c8xx.h"
+
+/*
+** Donnot compile integrity checking code for Linux-2.3.0
+** and above since SCSI data structures are not ready yet.
+*/
+/* #if LINUX_VERSION_CODE < LinuxVersionCode(2,3,0) */
+#if 0
+#define SCSI_NCR_INTEGRITY_CHECKING
+#endif
+
+#define MIN(a,b) (((a) < (b)) ? (a) : (b))
+#define MAX(a,b) (((a) > (b)) ? (a) : (b))
+
+/*
+** Hmmm... What complex some PCI-HOST bridges actually are,
+** despite the fact that the PCI specifications are looking
+** so smart and simple! ;-)
+*/
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,3,47)
+#define SCSI_NCR_DYNAMIC_DMA_MAPPING
+#endif
+
+/*==========================================================
+**
+** A la VMS/CAM-3 queue management.
+** Implemented from linux list management.
+**
+**==========================================================
+*/
+
+typedef struct xpt_quehead {
+ struct xpt_quehead *flink; /* Forward pointer */
+ struct xpt_quehead *blink; /* Backward pointer */
+} XPT_QUEHEAD;
+
+#define xpt_que_init(ptr) do { \
+ (ptr)->flink = (ptr); (ptr)->blink = (ptr); \
+} while (0)
+
+static inline void __xpt_que_add(struct xpt_quehead * new,
+ struct xpt_quehead * blink,
+ struct xpt_quehead * flink)
+{
+ flink->blink = new;
+ new->flink = flink;
+ new->blink = blink;
+ blink->flink = new;
+}
+
+static inline void __xpt_que_del(struct xpt_quehead * blink,
+ struct xpt_quehead * flink)
+{
+ flink->blink = blink;
+ blink->flink = flink;
+}
+
+static inline int xpt_que_empty(struct xpt_quehead *head)
+{
+ return head->flink == head;
+}
+
+static inline void xpt_que_splice(struct xpt_quehead *list,
+ struct xpt_quehead *head)
+{
+ struct xpt_quehead *first = list->flink;
+
+ if (first != list) {
+ struct xpt_quehead *last = list->blink;
+ struct xpt_quehead *at = head->flink;
+
+ first->blink = head;
+ head->flink = first;
+
+ last->flink = at;
+ at->blink = last;
+ }
+}
+
+#define xpt_que_entry(ptr, type, member) \
+ ((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
+
+
+#define xpt_insque(new, pos) __xpt_que_add(new, pos, (pos)->flink)
+
+#define xpt_remque(el) __xpt_que_del((el)->blink, (el)->flink)
+
+#define xpt_insque_head(new, head) __xpt_que_add(new, head, (head)->flink)
+
+static inline struct xpt_quehead *xpt_remque_head(struct xpt_quehead *head)
+{
+ struct xpt_quehead *elem = head->flink;
+
+ if (elem != head)
+ __xpt_que_del(head, elem->flink);
+ else
+ elem = 0;
+ return elem;
+}
+
+#define xpt_insque_tail(new, head) __xpt_que_add(new, (head)->blink, head)
+
+static inline struct xpt_quehead *xpt_remque_tail(struct xpt_quehead *head)
+{
+ struct xpt_quehead *elem = head->blink;
+
+ if (elem != head)
+ __xpt_que_del(elem->blink, head);
+ else
+ elem = 0;
+ return elem;
+}
+
+/*==========================================================
+**
+** Configuration and Debugging
+**
+**==========================================================
+*/
+
+/*
+** SCSI address of this device.
+** The boot routines should have set it.
+** If not, use this.
+*/
+
+#ifndef SCSI_NCR_MYADDR
+#define SCSI_NCR_MYADDR (7)
+#endif
+
+/*
+** The maximum number of tags per logic unit.
+** Used only for devices that support tags.
+*/
+
+#ifndef SCSI_NCR_MAX_TAGS
+#define SCSI_NCR_MAX_TAGS (8)
+#endif
+
+/*
+** TAGS are actually unlimited (256 tags/lun).
+** But Linux only supports 255. :)
+*/
+#if SCSI_NCR_MAX_TAGS > 255
+#define MAX_TAGS 255
+#else
+#define MAX_TAGS SCSI_NCR_MAX_TAGS
+#endif
+
+/*
+** Since the ncr chips only have a 8 bit ALU, we try to be clever
+** about offset calculation in the TASK TABLE per LUN that is an
+** array of DWORDS = 4 bytes.
+*/
+#if MAX_TAGS > (512/4)
+#define MAX_TASKS (1024/4)
+#elif MAX_TAGS > (256/4)
+#define MAX_TASKS (512/4)
+#else
+#define MAX_TASKS (256/4)
+#endif
+
+/*
+** This one means 'NO TAG for this job'
+*/
+#define NO_TAG (256)
+
+/*
+** Number of targets supported by the driver.
+** n permits target numbers 0..n-1.
+** Default is 16, meaning targets #0..#15.
+** #7 .. is myself.
+*/
+
+#ifdef SCSI_NCR_MAX_TARGET
+#define MAX_TARGET (SCSI_NCR_MAX_TARGET)
+#else
+#define MAX_TARGET (16)
+#endif
+
+/*
+** Number of logic units supported by the driver.
+** n enables logic unit numbers 0..n-1.
+** The common SCSI devices require only
+** one lun, so take 1 as the default.
+*/
+
+#ifdef SCSI_NCR_MAX_LUN
+#define MAX_LUN 64
+#else
+#define MAX_LUN (1)
+#endif
+
+/*
+** Asynchronous pre-scaler (ns). Shall be 40 for
+** the SCSI timings to be compliant.
+*/
+
+#ifndef SCSI_NCR_MIN_ASYNC
+#define SCSI_NCR_MIN_ASYNC (40)
+#endif
+
+/*
+** The maximum number of jobs scheduled for starting.
+** We allocate 4 entries more than the value we announce
+** to the SCSI upper layer. Guess why ! :-)
+*/
+
+#ifdef SCSI_NCR_CAN_QUEUE
+#define MAX_START (SCSI_NCR_CAN_QUEUE + 4)
+#else
+#define MAX_START (MAX_TARGET + 7 * MAX_TAGS)
+#endif
+
+/*
+** We donnot want to allocate more than 1 PAGE for the
+** the start queue and the done queue. We hard-code entry
+** size to 8 in order to let cpp do the checking.
+** Allows 512-4=508 pending IOs for i386 but Linux seems for
+** now not able to provide the driver with this amount of IOs.
+*/
+#if MAX_START > PAGE_SIZE/8
+#undef MAX_START
+#define MAX_START (PAGE_SIZE/8)
+#endif
+
+/*
+** The maximum number of segments a transfer is split into.
+** We support up to 127 segments for both read and write.
+*/
+
+#define MAX_SCATTER (SCSI_NCR_MAX_SCATTER)
+#define SCR_SG_SIZE (2)
+
+/*
+** other
+*/
+
+#define NCR_SNOOP_TIMEOUT (1000000)
+
+/*==========================================================
+**
+** Miscallaneous BSDish defines.
+**
+**==========================================================
+*/
+
+#define u_char unsigned char
+#define u_short unsigned short
+#define u_int unsigned int
+#define u_long unsigned long
+
+#ifndef bcopy
+#define bcopy(s, d, n) memcpy((d), (s), (n))
+#endif
+
+#ifndef bzero
+#define bzero(d, n) memset((d), 0, (n))
+#endif
+
+#ifndef offsetof
+#define offsetof(t, m) ((size_t) (&((t *)0)->m))
+#endif
+
+/*
+** Simple Wrapper to kernel PCI bus interface.
+**
+** This wrapper allows to get rid of old kernel PCI interface
+** and still allows to preserve linux-2.0 compatibilty.
+** In fact, it is mostly an incomplete emulation of the new
+** PCI code for pre-2.2 kernels. When kernel-2.0 support
+** will be dropped, we will just have to remove most of this
+** code.
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,2,0)
+
+typedef struct pci_dev *pcidev_t;
+#define PCIDEV_NULL (0)
+#define PciBusNumber(d) (d)->bus->number
+#define PciDeviceFn(d) (d)->devfn
+#define PciVendorId(d) (d)->vendor
+#define PciDeviceId(d) (d)->device
+#define PciIrqLine(d) (d)->irq
+
+#if LINUX_VERSION_CODE > LinuxVersionCode(2,3,12)
+
+static int __init
+pci_get_base_address(struct pci_dev *pdev, int index, u_long *base)
+{
+ *base = pdev->resource[index].start;
+ if ((pdev->resource[index].flags & 0x7) == 0x4)
+ ++index;
+ return ++index;
+}
+#else
+static int __init
+pci_get_base_address(struct pci_dev *pdev, int index, u_long *base)
+{
+ *base = pdev->base_address[index++];
+ if ((*base & 0x7) == 0x4) {
+#if BITS_PER_LONG > 32
+ *base |= (((u_long)pdev->base_address[index]) << 32);
+#endif
+ ++index;
+ }
+ return index;
+}
+#endif
+
+#else /* Incomplete emulation of current PCI code for pre-2.2 kernels */
+
+typedef unsigned int pcidev_t;
+#define PCIDEV_NULL (~0u)
+#define PciBusNumber(d) ((d)>>8)
+#define PciDeviceFn(d) ((d)&0xff)
+#define __PciDev(busn, devfn) (((busn)<<8)+(devfn))
+
+#define pci_present pcibios_present
+
+#define pci_read_config_byte(d, w, v) \
+ pcibios_read_config_byte(PciBusNumber(d), PciDeviceFn(d), w, v)
+#define pci_read_config_word(d, w, v) \
+ pcibios_read_config_word(PciBusNumber(d), PciDeviceFn(d), w, v)
+#define pci_read_config_dword(d, w, v) \
+ pcibios_read_config_dword(PciBusNumber(d), PciDeviceFn(d), w, v)
+
+#define pci_write_config_byte(d, w, v) \
+ pcibios_write_config_byte(PciBusNumber(d), PciDeviceFn(d), w, v)
+#define pci_write_config_word(d, w, v) \
+ pcibios_write_config_word(PciBusNumber(d), PciDeviceFn(d), w, v)
+#define pci_write_config_dword(d, w, v) \
+ pcibios_write_config_dword(PciBusNumber(d), PciDeviceFn(d), w, v)
+
+static pcidev_t __init
+pci_find_device(unsigned int vendor, unsigned int device, pcidev_t prev)
+{
+ static unsigned short pci_index;
+ int retv;
+ unsigned char bus_number, device_fn;
+
+ if (prev == PCIDEV_NULL)
+ pci_index = 0;
+ else
+ ++pci_index;
+ retv = pcibios_find_device (vendor, device, pci_index,
+ &bus_number, &device_fn);
+ return retv ? PCIDEV_NULL : __PciDev(bus_number, device_fn);
+}
+
+static u_short __init PciVendorId(pcidev_t dev)
+{
+ u_short vendor_id;
+ pci_read_config_word(dev, PCI_VENDOR_ID, &vendor_id);
+ return vendor_id;
+}
+
+static u_short __init PciDeviceId(pcidev_t dev)
+{
+ u_short device_id;
+ pci_read_config_word(dev, PCI_DEVICE_ID, &device_id);
+ return device_id;
+}
+
+static u_int __init PciIrqLine(pcidev_t dev)
+{
+ u_char irq;
+ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
+ return irq;
+}
+
+static int __init
+pci_get_base_address(pcidev_t dev, int offset, u_long *base)
+{
+ u_int32 tmp;
+
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + offset, &tmp);
+ *base = tmp;
+ offset += sizeof(u_int32);
+ if ((tmp & 0x7) == 0x4) {
+#if BITS_PER_LONG > 32
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + offset, &tmp);
+ *base |= (((u_long)tmp) << 32);
+#endif
+ offset += sizeof(u_int32);
+ }
+ return offset;
+}
+
+#endif /* LINUX_VERSION_CODE >= LinuxVersionCode(2,2,0) */
+
+/*==========================================================
+**
+** Debugging tags
+**
+**==========================================================
+*/
+
+#define DEBUG_ALLOC (0x0001)
+#define DEBUG_PHASE (0x0002)
+#define DEBUG_QUEUE (0x0008)
+#define DEBUG_RESULT (0x0010)
+#define DEBUG_POINTER (0x0020)
+#define DEBUG_SCRIPT (0x0040)
+#define DEBUG_TINY (0x0080)
+#define DEBUG_TIMING (0x0100)
+#define DEBUG_NEGO (0x0200)
+#define DEBUG_TAGS (0x0400)
+#define DEBUG_IC (0x0800)
+
+/*
+** Enable/Disable debug messages.
+** Can be changed at runtime too.
+*/
+
+#ifdef SCSI_NCR_DEBUG_INFO_SUPPORT
+static int ncr_debug = SCSI_NCR_DEBUG_FLAGS;
+ #define DEBUG_FLAGS ncr_debug
+#else
+ #define DEBUG_FLAGS SCSI_NCR_DEBUG_FLAGS
+#endif
+
+/*
+** SMP threading.
+**
+** Assuming that SMP systems are generally high end systems and may
+** use several SCSI adapters, we are using one lock per controller
+** instead of some global one. For the moment (linux-2.1.95), driver's
+** entry points are called with the 'io_request_lock' lock held, so:
+** - We are uselessly loosing a couple of micro-seconds to lock the
+** controller data structure.
+** - But the driver is not broken by design for SMP and so can be
+** more resistant to bugs or bad changes in the IO sub-system code.
+** - A small advantage could be that the interrupt code is grained as
+** wished (e.g.: threaded by controller).
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,93)
+
+spinlock_t sym53c8xx_lock = SPIN_LOCK_UNLOCKED;
+#define NCR_LOCK_DRIVER(flags) spin_lock_irqsave(&sym53c8xx_lock, flags)
+#define NCR_UNLOCK_DRIVER(flags) spin_unlock_irqrestore(&sym53c8xx_lock,flags)
+
+#define NCR_INIT_LOCK_NCB(np) spin_lock_init(&np->smp_lock);
+#define NCR_LOCK_NCB(np, flags) spin_lock_irqsave(&np->smp_lock, flags)
+#define NCR_UNLOCK_NCB(np, flags) spin_unlock_irqrestore(&np->smp_lock, flags)
+
+#define NCR_LOCK_SCSI_DONE(np, flags) \
+ spin_lock_irqsave(&io_request_lock, flags)
+#define NCR_UNLOCK_SCSI_DONE(np, flags) \
+ spin_unlock_irqrestore(&io_request_lock, flags)
+
+#else
+
+#define NCR_LOCK_DRIVER(flags) do { save_flags(flags); cli(); } while (0)
+#define NCR_UNLOCK_DRIVER(flags) do { restore_flags(flags); } while (0)
+
+#define NCR_INIT_LOCK_NCB(np) do { } while (0)
+#define NCR_LOCK_NCB(np, flags) do { save_flags(flags); cli(); } while (0)
+#define NCR_UNLOCK_NCB(np, flags) do { restore_flags(flags); } while (0)
+
+#define NCR_LOCK_SCSI_DONE(np, flags) do {;} while (0)
+#define NCR_UNLOCK_SCSI_DONE(np, flags) do {;} while (0)
+
+#endif
+
+/*
+** Memory mapped IO
+**
+** Since linux-2.1, we must use ioremap() to map the io memory space.
+** iounmap() to unmap it. That allows portability.
+** Linux 1.3.X and 2.0.X allow to remap physical pages addresses greater
+** than the highest physical memory address to kernel virtual pages with
+** vremap() / vfree(). That was not portable but worked with i386
+** architecture.
+*/
+
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,1,0)
+#define ioremap vremap
+#define iounmap vfree
+#endif
+
+#ifdef __sparc__
+# include <asm/irq.h>
+# if LINUX_VERSION_CODE < LinuxVersionCode(2,3,0)
+ /* ioremap/iounmap broken in 2.2.x on Sparc. -DaveM */
+# define ioremap(base, size) ((u_long) __va(base))
+# define iounmap(vaddr)
+# endif
+# define pcivtobus(p) bus_dvma_to_mem(p)
+# define memcpy_to_pci(a, b, c) memcpy_toio((void *)(a), (const void *)(b), (c))
+#elif defined(__alpha__)
+# define pcivtobus(p) ((p) & 0xfffffffful)
+# define memcpy_to_pci(a, b, c) memcpy_toio((a), (b), (c))
+#else /* others */
+# define pcivtobus(p) (p)
+# define memcpy_to_pci(a, b, c) memcpy_toio((a), (b), (c))
+#endif
+
+#ifndef SCSI_NCR_PCI_MEM_NOT_SUPPORTED
+static u_long __init remap_pci_mem(u_long base, u_long size)
+{
+ u_long page_base = ((u_long) base) & PAGE_MASK;
+ u_long page_offs = ((u_long) base) - page_base;
+ u_long page_remapped = (u_long) ioremap(page_base, page_offs+size);
+
+ return page_remapped? (page_remapped + page_offs) : 0UL;
+}
+
+static void __init unmap_pci_mem(u_long vaddr, u_long size)
+{
+ if (vaddr)
+ iounmap((void *) (vaddr & PAGE_MASK));
+}
+
+#endif /* not def SCSI_NCR_PCI_MEM_NOT_SUPPORTED */
+
+/*
+** Insert a delay in micro-seconds and milli-seconds.
+** -------------------------------------------------
+** Under Linux, udelay() is restricted to delay < 1 milli-second.
+** In fact, it generally works for up to 1 second delay.
+** Since 2.1.105, the mdelay() function is provided for delays
+** in milli-seconds.
+** Under 2.0 kernels, udelay() is an inline function that is very
+** inaccurate on Pentium processors.
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,105)
+#define UDELAY udelay
+#define MDELAY mdelay
+#else
+static void UDELAY(long us) { udelay(us); }
+static void MDELAY(long ms) { while (ms--) UDELAY(1000); }
+#endif
+
+/*
+** Simple power of two buddy-like allocator
+** ----------------------------------------
+** This simple code is not intended to be fast, but to provide
+** power of 2 aligned memory allocations.
+** Since the SCRIPTS processor only supplies 8 bit arithmetic,
+** this allocator allows simple and fast address calculations
+** from the SCRIPTS code. In addition, cache line alignment
+** is guaranteed for power of 2 cache line size.
+** Enhanced in linux-2.3.44 to provide a memory pool per pcidev
+** to support dynamic dma mapping. (I would have preferred a
+** real bus astraction, btw).
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,0)
+#define __GetFreePages(flags, order) __get_free_pages(flags, order)
+#else
+#define __GetFreePages(flags, order) __get_free_pages(flags, order, 0)
+#endif
+
+#define MEMO_SHIFT 4 /* 16 bytes minimum memory chunk */
+#if PAGE_SIZE >= 8192
+#define MEMO_PAGE_ORDER 0 /* 1 PAGE maximum */
+#else
+#define MEMO_PAGE_ORDER 1 /* 2 PAGES maximum */
+#endif
+#define MEMO_FREE_UNUSED /* Free unused pages immediately */
+#define MEMO_WARN 1
+#define MEMO_GFP_FLAGS GFP_ATOMIC
+#define MEMO_CLUSTER_SHIFT (PAGE_SHIFT+MEMO_PAGE_ORDER)
+#define MEMO_CLUSTER_SIZE (1UL << MEMO_CLUSTER_SHIFT)
+#define MEMO_CLUSTER_MASK (MEMO_CLUSTER_SIZE-1)
+
+typedef u_long m_addr_t; /* Enough bits to bit-hack addresses */
+typedef pcidev_t m_bush_t; /* Something that addresses DMAable */
+
+typedef struct m_link { /* Link between free memory chunks */
+ struct m_link *next;
+} m_link_s;
+
+#ifdef SCSI_NCR_DYNAMIC_DMA_MAPPING
+typedef struct m_vtob { /* Virtual to Bus address translation */
+ struct m_vtob *next;
+ m_addr_t vaddr;
+ m_addr_t baddr;
+} m_vtob_s;
+#define VTOB_HASH_SHIFT 5
+#define VTOB_HASH_SIZE (1UL << VTOB_HASH_SHIFT)
+#define VTOB_HASH_MASK (VTOB_HASH_SIZE-1)
+#define VTOB_HASH_CODE(m) \
+ ((((m_addr_t) (m)) >> MEMO_CLUSTER_SHIFT) & VTOB_HASH_MASK)
+#endif
+
+typedef struct m_pool { /* Memory pool of a given kind */
+#ifdef SCSI_NCR_DYNAMIC_DMA_MAPPING
+ m_bush_t bush;
+ m_addr_t (*getp)(struct m_pool *);
+ void (*freep)(struct m_pool *, m_addr_t);
+#define M_GETP() mp->getp(mp)
+#define M_FREEP(p) mp->freep(mp, p)
+#define GetPages() __GetFreePages(MEMO_GFP_FLAGS, MEMO_PAGE_ORDER)
+#define FreePages(p) free_pages(p, MEMO_PAGE_ORDER)
+ int nump;
+ m_vtob_s *(vtob[VTOB_HASH_SIZE]);
+ struct m_pool *next;
+#else
+#define M_GETP() __GetFreePages(MEMO_GFP_FLAGS, MEMO_PAGE_ORDER)
+#define M_FREEP(p) free_pages(p, MEMO_PAGE_ORDER)
+#endif /* SCSI_NCR_DYNAMIC_DMA_MAPPING */
+ struct m_link h[PAGE_SHIFT-MEMO_SHIFT+MEMO_PAGE_ORDER+1];
+} m_pool_s;
+
+static void *___m_alloc(m_pool_s *mp, int size)
+{
+ int i = 0;
+ int s = (1 << MEMO_SHIFT);
+ int j;
+ m_addr_t a;
+ m_link_s *h = mp->h;
+
+ if (size > (PAGE_SIZE << MEMO_PAGE_ORDER))
+ return 0;
+
+ while (size > s) {
+ s <<= 1;
+ ++i;
+ }
+
+ j = i;
+ while (!h[j].next) {
+ if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) {
+ h[j].next = (m_link_s *) M_GETP();
+ if (h[j].next)
+ h[j].next->next = 0;
+ break;
+ }
+ ++j;
+ s <<= 1;
+ }
+ a = (m_addr_t) h[j].next;
+ if (a) {
+ h[j].next = h[j].next->next;
+ while (j > i) {
+ j -= 1;
+ s >>= 1;
+ h[j].next = (m_link_s *) (a+s);
+ h[j].next->next = 0;
+ }
+ }
+#ifdef DEBUG
+ printk("___m_alloc(%d) = %p\n", size, (void *) a);
+#endif
+ return (void *) a;
+}
+
+static void ___m_free(m_pool_s *mp, void *ptr, int size)
+{
+ int i = 0;
+ int s = (1 << MEMO_SHIFT);
+ m_link_s *q;
+ m_addr_t a, b;
+ m_link_s *h = mp->h;
+
+#ifdef DEBUG
+ printk("___m_free(%p, %d)\n", ptr, size);
+#endif
+
+ if (size > (PAGE_SIZE << MEMO_PAGE_ORDER))
+ return;
+
+ while (size > s) {
+ s <<= 1;
+ ++i;
+ }
+
+ a = (m_addr_t) ptr;
+
+ while (1) {
+#ifdef MEMO_FREE_UNUSED
+ if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) {
+ M_FREEP(a);
+ break;
+ }
+#endif
+ b = a ^ s;
+ q = &h[i];
+ while (q->next && q->next != (m_link_s *) b) {
+ q = q->next;
+ }
+ if (!q->next) {
+ ((m_link_s *) a)->next = h[i].next;
+ h[i].next = (m_link_s *) a;
+ break;
+ }
+ q->next = q->next->next;
+ a = a & b;
+ s <<= 1;
+ ++i;
+ }
+}
+
+static void *__m_calloc2(m_pool_s *mp, int size, char *name, int uflags)
+{
+ void *p;
+
+ p = ___m_alloc(mp, size);
+
+ if (DEBUG_FLAGS & DEBUG_ALLOC)
+ printk ("new %-10s[%4d] @%p.\n", name, size, p);
+
+ if (p)
+ bzero(p, size);
+ else if (uflags & MEMO_WARN)
+ printk (NAME53C8XX ": failed to allocate %s[%d]\n", name, size);
+
+ return p;
+}
+
+#define __m_calloc(mp, s, n) __m_calloc2(mp, s, n, MEMO_WARN)
+
+static void __m_free(m_pool_s *mp, void *ptr, int size, char *name)
+{
+ if (DEBUG_FLAGS & DEBUG_ALLOC)
+ printk ("freeing %-10s[%4d] @%p.\n", name, size, ptr);
+
+ ___m_free(mp, ptr, size);
+
+}
+
+/*
+ * With pci bus iommu support, we use a default pool of unmapped memory
+ * for memory we donnot need to DMA from/to and one pool per pcidev for
+ * memory accessed by the PCI chip. `mp0' is the default not DMAable pool.
+ */
+
+#ifndef SCSI_NCR_DYNAMIC_DMA_MAPPING
+
+static m_pool_s mp0;
+
+#else
+
+static m_addr_t ___mp0_getp(m_pool_s *mp)
+{
+ m_addr_t m = GetPages();
+ if (m)
+ ++mp->nump;
+ return m;
+}
+
+static void ___mp0_freep(m_pool_s *mp, m_addr_t m)
+{
+ FreePages(m);
+ --mp->nump;
+}
+
+static m_pool_s mp0 = {0, ___mp0_getp, ___mp0_freep};
+
+#endif /* SCSI_NCR_DYNAMIC_DMA_MAPPING */
+
+static void *m_calloc(int size, char *name)
+{
+ u_long flags;
+ void *m;
+ NCR_LOCK_DRIVER(flags);
+ m = __m_calloc(&mp0, size, name);
+ NCR_UNLOCK_DRIVER(flags);
+ return m;
+}
+
+static void m_free(void *ptr, int size, char *name)
+{
+ u_long flags;
+ NCR_LOCK_DRIVER(flags);
+ __m_free(&mp0, ptr, size, name);
+ NCR_UNLOCK_DRIVER(flags);
+}
+
+/*
+ * DMAable pools.
+ */
+
+#ifndef SCSI_NCR_DYNAMIC_DMA_MAPPING
+
+/* Without pci bus iommu support, all the memory is assumed DMAable */
+
+#define __m_calloc_dma(b, s, n) m_calloc(s, n)
+#define __m_free_dma(b, p, s, n) m_free(p, s, n)
+#define __vtobus(b, p) virt_to_bus(p)
+
+#else
+
+/*
+ * With pci bus iommu support, we maintain one pool per pcidev and a
+ * hashed reverse table for virtual to bus physical address translations.
+ */
+static m_addr_t ___dma_getp(m_pool_s *mp)
+{
+ m_addr_t vp;
+ m_vtob_s *vbp;
+
+ vbp = __m_calloc(&mp0, sizeof(*vbp), "VTOB");
+ if (vbp) {
+ dma_addr_t daddr;
+ vp = (m_addr_t) pci_alloc_consistent(mp->bush,
+ PAGE_SIZE<<MEMO_PAGE_ORDER,
+ &daddr);
+ if (vp) {
+ int hc = VTOB_HASH_CODE(vp);
+ vbp->vaddr = vp;
+ vbp->baddr = daddr;
+ vbp->next = mp->vtob[hc];
+ mp->vtob[hc] = vbp;
+ ++mp->nump;
+ return vp;
+ }
+ else
+ __m_free(&mp0, vbp, sizeof(*vbp), "VTOB");
+ }
+ return 0;
+}
+
+static void ___dma_freep(m_pool_s *mp, m_addr_t m)
+{
+ m_vtob_s **vbpp, *vbp;
+ int hc = VTOB_HASH_CODE(m);
+
+ vbpp = &mp->vtob[hc];
+ while (*vbpp && (*vbpp)->vaddr != m)
+ vbpp = &(*vbpp)->next;
+ if (*vbpp) {
+ vbp = *vbpp;
+ *vbpp = (*vbpp)->next;
+ pci_free_consistent(mp->bush, PAGE_SIZE<<MEMO_PAGE_ORDER,
+ (void *)vbp->vaddr, (dma_addr_t)vbp->baddr);
+ __m_free(&mp0, vbp, sizeof(*vbp), "VTOB");
+ --mp->nump;
+ }
+}
+
+static inline m_pool_s *___get_dma_pool(m_bush_t bush)
+{
+ m_pool_s *mp;
+ for (mp = mp0.next; mp && mp->bush != bush; mp = mp->next);
+ return mp;
+}
+
+static m_pool_s *___cre_dma_pool(m_bush_t bush)
+{
+ m_pool_s *mp;
+ mp = __m_calloc(&mp0, sizeof(*mp), "MPOOL");
+ if (mp) {
+ bzero(mp, sizeof(*mp));
+ mp->bush = bush;
+ mp->getp = ___dma_getp;
+ mp->freep = ___dma_freep;
+ mp->next = mp0.next;
+ mp0.next = mp;
+ }
+ return mp;
+}
+
+static void ___del_dma_pool(m_pool_s *p)
+{
+ struct m_pool **pp = &mp0.next;
+
+ while (*pp && *pp != p)
+ pp = &(*pp)->next;
+ if (*pp) {
+ *pp = (*pp)->next;
+ __m_free(&mp0, p, sizeof(*p), "MPOOL");
+ }
+}
+
+static void *__m_calloc_dma(m_bush_t bush, int size, char *name)
+{
+ u_long flags;
+ struct m_pool *mp;
+ void *m = 0;
+
+ NCR_LOCK_DRIVER(flags);
+ mp = ___get_dma_pool(bush);
+ if (!mp)
+ mp = ___cre_dma_pool(bush);
+ if (mp)
+ m = __m_calloc(mp, size, name);
+ if (mp && !mp->nump)
+ ___del_dma_pool(mp);
+ NCR_UNLOCK_DRIVER(flags);
+
+ return m;
+}
+
+static void __m_free_dma(m_bush_t bush, void *m, int size, char *name)
+{
+ u_long flags;
+ struct m_pool *mp;
+
+ NCR_LOCK_DRIVER(flags);
+ mp = ___get_dma_pool(bush);
+ if (mp)
+ __m_free(mp, m, size, name);
+ if (mp && !mp->nump)
+ ___del_dma_pool(mp);
+ NCR_UNLOCK_DRIVER(flags);
+}
+
+static m_addr_t __vtobus(m_bush_t bush, void *m)
+{
+ u_long flags;
+ m_pool_s *mp;
+ int hc = VTOB_HASH_CODE(m);
+ m_vtob_s *vp = 0;
+ m_addr_t a = ((m_addr_t) m) & ~MEMO_CLUSTER_MASK;
+
+ NCR_LOCK_DRIVER(flags);
+ mp = ___get_dma_pool(bush);
+ if (mp) {
+ vp = mp->vtob[hc];
+ while (vp && (m_addr_t) vp->vaddr != a)
+ vp = vp->next;
+ }
+ NCR_UNLOCK_DRIVER(flags);
+ return vp ? vp->baddr + (((m_addr_t) m) - a) : 0;
+}
+
+#endif /* SCSI_NCR_DYNAMIC_DMA_MAPPING */
+
+#define _m_calloc_dma(np, s, n) __m_calloc_dma(np->pdev, s, n)
+#define _m_free_dma(np, p, s, n) __m_free_dma(np->pdev, p, s, n)
+#define m_calloc_dma(s, n) _m_calloc_dma(np, s, n)
+#define m_free_dma(p, s, n) _m_free_dma(np, p, s, n)
+#define _vtobus(np, p) __vtobus(np->pdev, p)
+#define vtobus(p) _vtobus(np, p)
+
+/*
+ * Deal with DMA mapping/unmapping.
+ */
+
+#ifndef SCSI_NCR_DYNAMIC_DMA_MAPPING
+
+/* Linux versions prior to pci bus iommu kernel interface */
+
+#define __unmap_scsi_data(pdev, cmd) do {; } while (0)
+#define __map_scsi_single_data(pdev, cmd) (__vtobus(pdev,(cmd)->request_buffer))
+#define __map_scsi_sg_data(pdev, cmd) ((cmd)->use_sg)
+#define __sync_scsi_data(pdev, cmd) do {; } while (0)
+
+#define scsi_sg_dma_address(sc) vtobus((sc)->address)
+#define scsi_sg_dma_len(sc) ((sc)->length)
+
+#else
+
+/* Linux version with pci bus iommu kernel interface */
+
+/* To keep track of the dma mapping (sg/single) that has been set */
+#define __data_mapped SCp.phase
+#define __data_mapping SCp.have_data_in
+
+static void __unmap_scsi_data(pcidev_t pdev, Scsi_Cmnd *cmd)
+{
+ int dma_dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+
+ switch(cmd->__data_mapped) {
+ case 2:
+ pci_unmap_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir);
+ break;
+ case 1:
+ pci_unmap_single(pdev, cmd->__data_mapping,
+ cmd->request_bufflen, dma_dir);
+ break;
+ }
+ cmd->__data_mapped = 0;
+}
+
+static u_long __map_scsi_single_data(pcidev_t pdev, Scsi_Cmnd *cmd)
+{
+ dma_addr_t mapping;
+ int dma_dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+
+ if (cmd->request_bufflen == 0)
+ return 0;
+
+ mapping = pci_map_single(pdev, cmd->request_buffer,
+ cmd->request_bufflen, dma_dir);
+ cmd->__data_mapped = 1;
+ cmd->__data_mapping = mapping;
+
+ return mapping;
+}
+
+static int __map_scsi_sg_data(pcidev_t pdev, Scsi_Cmnd *cmd)
+{
+ int use_sg;
+ int dma_dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+
+ if (cmd->use_sg == 0)
+ return 0;
+
+ use_sg = pci_map_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir);
+ cmd->__data_mapped = 2;
+ cmd->__data_mapping = use_sg;
+
+ return use_sg;
+}
+
+static void __sync_scsi_data(pcidev_t pdev, Scsi_Cmnd *cmd)
+{
+ int dma_dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+
+ switch(cmd->__data_mapped) {
+ case 2:
+ pci_dma_sync_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir);
+ break;
+ case 1:
+ pci_dma_sync_single(pdev, cmd->__data_mapping,
+ cmd->request_bufflen, dma_dir);
+ break;
+ }
+}
+
+#define scsi_sg_dma_address(sc) sg_dma_address(sc)
+#define scsi_sg_dma_len(sc) sg_dma_len(sc)
+
+#endif /* SCSI_NCR_DYNAMIC_DMA_MAPPING */
+
+#define unmap_scsi_data(np, cmd) __unmap_scsi_data(np->pdev, cmd)
+#define map_scsi_single_data(np, cmd) __map_scsi_single_data(np->pdev, cmd)
+#define map_scsi_sg_data(np, cmd) __map_scsi_sg_data(np->pdev, cmd)
+#define sync_scsi_data(np, cmd) __sync_scsi_data(np->pdev, cmd)
+
+
+/*
+ * Print out some buffer.
+ */
+static void ncr_print_hex(u_char *p, int n)
+{
+ while (n-- > 0)
+ printk (" %x", *p++);
+}
+
+static void ncr_printl_hex(char *label, u_char *p, int n)
+{
+ printk("%s", label);
+ ncr_print_hex(p, n);
+ printk (".\n");
+}
+
+/*
+** Transfer direction
+**
+** Until some linux kernel version near 2.3.40, low-level scsi
+** drivers were not told about data transfer direction.
+** We check the existence of this feature that has been expected
+** for a _long_ time by all SCSI driver developers by just
+** testing against the definition of SCSI_DATA_UNKNOWN. Indeed
+** this is a hack, but testing against a kernel version would
+** have been a shame. ;-)
+*/
+#ifdef SCSI_DATA_UNKNOWN
+
+#define scsi_data_direction(cmd) (cmd->sc_data_direction)
+
+#else
+
+#define SCSI_DATA_UNKNOWN 0
+#define SCSI_DATA_WRITE 1
+#define SCSI_DATA_READ 2
+#define SCSI_DATA_NONE 3
+
+static __inline__ int scsi_data_direction(Scsi_Cmnd *cmd)
+{
+ int direction;
+
+ switch((int) cmd->cmnd[0]) {
+ case 0x08: /* READ(6) 08 */
+ case 0x28: /* READ(10) 28 */
+ case 0xA8: /* READ(12) A8 */
+ direction = SCSI_DATA_READ;
+ break;
+ case 0x0A: /* WRITE(6) 0A */
+ case 0x2A: /* WRITE(10) 2A */
+ case 0xAA: /* WRITE(12) AA */
+ direction = SCSI_DATA_WRITE;
+ break;
+ default:
+ direction = SCSI_DATA_UNKNOWN;
+ break;
+ }
+
+ return direction;
+}
+
+#endif /* SCSI_DATA_UNKNOWN */
+
+/*
+** Head of list of NCR boards
+**
+** For kernel version < 1.3.70, host is retrieved by its irq level.
+** For later kernels, the internal host control block address
+** (struct ncb) is used as device id parameter of the irq stuff.
+*/
+
+static struct Scsi_Host *first_host = NULL;
+
+
+/*
+** /proc directory entry and proc_info function
+*/
+#ifdef SCSI_NCR_PROC_INFO_SUPPORT
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,3,27)
+static struct proc_dir_entry proc_scsi_sym53c8xx = {
+ PROC_SCSI_SYM53C8XX, 9, NAME53C8XX,
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+#endif
+static int sym53c8xx_proc_info(char *buffer, char **start, off_t offset,
+ int length, int hostno, int func);
+#endif
+
+/*
+** Driver setup.
+**
+** This structure is initialized from linux config options.
+** It can be overridden at boot-up by the boot command line.
+*/
+static struct ncr_driver_setup
+ driver_setup = SCSI_NCR_DRIVER_SETUP;
+
+#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
+static struct ncr_driver_setup
+ driver_safe_setup __initdata = SCSI_NCR_DRIVER_SAFE_SETUP;
+# ifdef MODULE
+char *sym53c8xx = 0; /* command line passed by insmod */
+# if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,30)
+MODULE_PARM(sym53c8xx, "s");
+# endif
+# endif
+#endif
+
+/*
+** Other Linux definitions
+*/
+#define SetScsiResult(cmd, h_sts, s_sts) \
+ cmd->result = (((h_sts) << 16) + ((s_sts) & 0x7f))
+
+/* We may have to remind our amnesiac SCSI layer of the reason of the abort */
+#if 0
+#define SetScsiAbortResult(cmd) \
+ SetScsiResult( \
+ cmd, \
+ (cmd)->abort_reason == DID_TIME_OUT ? DID_TIME_OUT : DID_ABORT, \
+ 0xff)
+#else
+#define SetScsiAbortResult(cmd) SetScsiResult(cmd, DID_ABORT, 0xff)
+#endif
+
+static void sym53c8xx_select_queue_depths(
+ struct Scsi_Host *host, struct scsi_device *devlist);
+static void sym53c8xx_intr(int irq, void *dev_id, struct pt_regs * regs);
+static void sym53c8xx_timeout(unsigned long np);
+
+#define initverbose (driver_setup.verbose)
+#define bootverbose (np->verbose)
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+static u_char Tekram_sync[16] __initdata =
+ {25,31,37,43, 50,62,75,125, 12,15,18,21, 6,7,9,10};
+#endif /* SCSI_NCR_NVRAM_SUPPORT */
+
+/*
+** Structures used by sym53c8xx_detect/sym53c8xx_pci_init to
+** transmit device configuration to the ncr_attach() function.
+*/
+typedef struct {
+ int bus;
+ u_char device_fn;
+ u_long base;
+ u_long base_2;
+ u_long io_port;
+ int irq;
+/* port and reg fields to use INB, OUTB macros */
+ u_long base_io;
+ volatile struct ncr_reg *reg;
+} ncr_slot;
+
+typedef struct {
+ int type;
+#define SCSI_NCR_SYMBIOS_NVRAM (1)
+#define SCSI_NCR_TEKRAM_NVRAM (2)
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ union {
+ Symbios_nvram Symbios;
+ Tekram_nvram Tekram;
+ } data;
+#endif
+} ncr_nvram;
+
+/*
+** Structure used by sym53c8xx_detect/sym53c8xx_pci_init
+** to save data on each detected board for ncr_attach().
+*/
+typedef struct {
+ pcidev_t pdev;
+ ncr_slot slot;
+ ncr_chip chip;
+ ncr_nvram *nvram;
+ u_char host_id;
+#ifdef SCSI_NCR_PQS_PDS_SUPPORT
+ u_char pqs_pds;
+#endif
+ int attach_done;
+} ncr_device;
+
+/*==========================================================
+**
+** assert ()
+**
+**==========================================================
+**
+** modified copy from 386bsd:/usr/include/sys/assert.h
+**
+**----------------------------------------------------------
+*/
+
+#define assert(expression) { \
+ if (!(expression)) { \
+ (void)panic( \
+ "assertion \"%s\" failed: file \"%s\", line %d\n", \
+ #expression, \
+ __FILE__, __LINE__); \
+ } \
+}
+
+/*==========================================================
+**
+** Command control block states.
+**
+**==========================================================
+*/
+
+#define HS_IDLE (0)
+#define HS_BUSY (1)
+#define HS_NEGOTIATE (2) /* sync/wide data transfer*/
+#define HS_DISCONNECT (3) /* Disconnected by target */
+
+#define HS_DONEMASK (0x80)
+#define HS_COMPLETE (4|HS_DONEMASK)
+#define HS_SEL_TIMEOUT (5|HS_DONEMASK) /* Selection timeout */
+#define HS_RESET (6|HS_DONEMASK) /* SCSI reset */
+#define HS_ABORTED (7|HS_DONEMASK) /* Transfer aborted */
+#define HS_TIMEOUT (8|HS_DONEMASK) /* Software timeout */
+#define HS_FAIL (9|HS_DONEMASK) /* SCSI or PCI bus errors */
+#define HS_UNEXPECTED (10|HS_DONEMASK)/* Unexpected disconnect */
+
+#define DSA_INVALID 0xffffffff
+
+/*==========================================================
+**
+** Software Interrupt Codes
+**
+**==========================================================
+*/
+
+#define SIR_BAD_STATUS (1)
+#define SIR_SEL_ATN_NO_MSG_OUT (2)
+#define SIR_MSG_RECEIVED (3)
+#define SIR_MSG_WEIRD (4)
+#define SIR_NEGO_FAILED (5)
+#define SIR_NEGO_PROTO (6)
+#define SIR_SCRIPT_STOPPED (7)
+#define SIR_REJECT_TO_SEND (8)
+#define SIR_SWIDE_OVERRUN (9)
+#define SIR_SODL_UNDERRUN (10)
+#define SIR_RESEL_NO_MSG_IN (11)
+#define SIR_RESEL_NO_IDENTIFY (12)
+#define SIR_RESEL_BAD_LUN (13)
+#define SIR_TARGET_SELECTED (14)
+#define SIR_RESEL_BAD_I_T_L (15)
+#define SIR_RESEL_BAD_I_T_L_Q (16)
+#define SIR_ABORT_SENT (17)
+#define SIR_RESEL_ABORTED (18)
+#define SIR_MSG_OUT_DONE (19)
+#define SIR_AUTO_SENSE_DONE (20)
+#define SIR_DUMMY_INTERRUPT (21)
+#define SIR_DATA_OVERRUN (22)
+#define SIR_BAD_PHASE (23)
+#define SIR_MAX (23)
+
+/*==========================================================
+**
+** Extended error bits.
+** xerr_status field of struct ccb.
+**
+**==========================================================
+*/
+
+#define XE_EXTRA_DATA (1) /* unexpected data phase */
+#define XE_BAD_PHASE (2) /* illegal phase (4/5) */
+#define XE_PARITY_ERR (4) /* unrecovered SCSI parity error */
+#define XE_SODL_UNRUN (1<<3)
+#define XE_SWIDE_OVRUN (1<<4)
+
+/*==========================================================
+**
+** Negotiation status.
+** nego_status field of struct ccb.
+**
+**==========================================================
+*/
+
+#define NS_NOCHANGE (0)
+#define NS_SYNC (1)
+#define NS_WIDE (2)
+#define NS_PPR (4)
+
+/*==========================================================
+**
+** "Special features" of targets.
+** quirks field of struct tcb.
+** actualquirks field of struct ccb.
+**
+**==========================================================
+*/
+
+#define QUIRK_AUTOSAVE (0x01)
+
+/*==========================================================
+**
+** Capability bits in Inquire response byte 7.
+**
+**==========================================================
+*/
+
+#define INQ7_QUEUE (0x02)
+#define INQ7_SYNC (0x10)
+#define INQ7_WIDE16 (0x20)
+
+/*==========================================================
+**
+** A CCB hashed table is used to retrieve CCB address
+** from DSA value.
+**
+**==========================================================
+*/
+
+#define CCB_HASH_SHIFT 8
+#define CCB_HASH_SIZE (1UL << CCB_HASH_SHIFT)
+#define CCB_HASH_MASK (CCB_HASH_SIZE-1)
+#define CCB_HASH_CODE(dsa) (((dsa) >> 11) & CCB_HASH_MASK)
+
+/*==========================================================
+**
+** Declaration of structs.
+**
+**==========================================================
+*/
+
+struct tcb;
+struct lcb;
+struct ccb;
+struct ncb;
+struct script;
+
+typedef struct ncb * ncb_p;
+typedef struct tcb * tcb_p;
+typedef struct lcb * lcb_p;
+typedef struct ccb * ccb_p;
+
+struct link {
+ ncrcmd l_cmd;
+ ncrcmd l_paddr;
+};
+
+struct usrcmd {
+ u_long target;
+ u_long lun;
+ u_long data;
+ u_long cmd;
+};
+
+#define UC_SETSYNC 10
+#define UC_SETTAGS 11
+#define UC_SETDEBUG 12
+#define UC_SETORDER 13
+#define UC_SETWIDE 14
+#define UC_SETFLAG 15
+#define UC_SETVERBOSE 17
+#define UC_RESETDEV 18
+#define UC_CLEARDEV 19
+
+#define UF_TRACE (0x01)
+#define UF_NODISC (0x02)
+#define UF_NOSCAN (0x04)
+
+/*========================================================================
+**
+** Declaration of structs: target control block
+**
+**========================================================================
+*/
+struct tcb {
+ /*----------------------------------------------------------------
+ ** LUN tables.
+ ** An array of bus addresses is used on reselection by
+ ** the SCRIPT.
+ **----------------------------------------------------------------
+ */
+ u_int32 *luntbl; /* lcbs bus address table */
+ u_int32 b_luntbl; /* bus address of this table */
+ u_int32 b_lun0; /* bus address of lun0 */
+ lcb_p l0p; /* lcb of LUN #0 (normal case) */
+#if MAX_LUN > 1
+ lcb_p *lmp; /* Other lcb's [1..MAX_LUN] */
+#endif
+ /*----------------------------------------------------------------
+ ** Target capabilities.
+ **----------------------------------------------------------------
+ */
+ u_char inq_done; /* Target capabilities received */
+ u_char inq_byte7; /* Contains these capabilities */
+
+ /*----------------------------------------------------------------
+ ** Some flags.
+ **----------------------------------------------------------------
+ */
+ u_char to_reset; /* This target is to be reset */
+
+ /*----------------------------------------------------------------
+ ** Pointer to the ccb used for negotiation.
+ ** Prevent from starting a negotiation for all queued commands
+ ** when tagged command queuing is enabled.
+ **----------------------------------------------------------------
+ */
+ ccb_p nego_cp;
+
+ /*----------------------------------------------------------------
+ ** negotiation of wide and synch transfer and device quirks.
+ ** sval, wval and uval are read from SCRIPTS and so have alignment
+ ** constraints.
+ **----------------------------------------------------------------
+ */
+/*0*/ u_char minsync;
+/*1*/ u_char sval;
+/*2*/ u_short period;
+/*0*/ u_char maxoffs;
+/*1*/ u_char quirks;
+/*2*/ u_char widedone;
+/*3*/ u_char wval;
+/*0*/ u_char uval;
+
+#ifdef SCSI_NCR_INTEGRITY_CHECKING
+ u_char ic_min_sync;
+ u_char ic_max_width;
+ u_char ic_done;
+#endif
+ u_char ic_maximums_set;
+ u_char ppr_negotiation;
+
+ /*----------------------------------------------------------------
+ ** User settable limits and options.
+ ** These limits are read from the NVRAM if present.
+ **----------------------------------------------------------------
+ */
+ u_char usrsync;
+ u_char usrwide;
+ u_short usrtags;
+ u_char usrflag;
+};
+
+/*========================================================================
+**
+** Declaration of structs: lun control block
+**
+**========================================================================
+*/
+struct lcb {
+ /*----------------------------------------------------------------
+ ** On reselection, SCRIPTS use this value as a JUMP address
+ ** after the IDENTIFY has been successfully received.
+ ** This field is set to 'resel_tag' if TCQ is enabled and
+ ** to 'resel_notag' if TCQ is disabled.
+ ** (Must be at zero due to bad lun handling on reselection)
+ **----------------------------------------------------------------
+ */
+/*0*/ u_int32 resel_task;
+
+ /*----------------------------------------------------------------
+ ** Task table used by the script processor to retrieve the
+ ** task corresponding to a reselected nexus. The TAG is used
+ ** as offset to determine the corresponding entry.
+ ** Each entry contains the associated CCB bus address.
+ **----------------------------------------------------------------
+ */
+ u_int32 tasktbl_0; /* Used if TCQ not enabled */
+ u_int32 *tasktbl;
+ u_int32 b_tasktbl;
+
+ /*----------------------------------------------------------------
+ ** CCB queue management.
+ **----------------------------------------------------------------
+ */
+ XPT_QUEHEAD busy_ccbq; /* Queue of busy CCBs */
+ XPT_QUEHEAD wait_ccbq; /* Queue of waiting for IO CCBs */
+ u_short busyccbs; /* CCBs busy for this lun */
+ u_short queuedccbs; /* CCBs queued to the controller*/
+ u_short queuedepth; /* Queue depth for this lun */
+ u_short scdev_depth; /* SCSI device queue depth */
+ u_short maxnxs; /* Max possible nexuses */
+
+ /*----------------------------------------------------------------
+ ** Control of tagged command queuing.
+ ** Tags allocation is performed using a circular buffer.
+ ** This avoids using a loop for tag allocation.
+ **----------------------------------------------------------------
+ */
+ u_short ia_tag; /* Tag allocation index */
+ u_short if_tag; /* Tag release index */
+ u_char *cb_tags; /* Circular tags buffer */
+ u_char inq_byte7; /* Store unit CmdQ capability */
+ u_char usetags; /* Command queuing is active */
+ u_char to_clear; /* User wants to clear all tasks*/
+ u_short maxtags; /* Max NR of tags asked by user */
+ u_short numtags; /* Current number of tags */
+
+ /*----------------------------------------------------------------
+ ** QUEUE FULL and ORDERED tag control.
+ **----------------------------------------------------------------
+ */
+ u_short num_good; /* Nr of GOOD since QUEUE FULL */
+ u_short tags_sum[2]; /* Tags sum counters */
+ u_char tags_si; /* Current index to tags sum */
+ u_long tags_stime; /* Last time we switch tags_sum */
+};
+
+/*========================================================================
+**
+** Declaration of structs: actions for a task.
+**
+**========================================================================
+**
+** It is part of the CCB and is called by the scripts processor to
+** start or restart the data structure (nexus).
+**
+**------------------------------------------------------------------------
+*/
+struct action {
+ u_int32 start;
+ u_int32 restart;
+};
+
+/*========================================================================
+**
+** Declaration of structs: Phase mismatch context.
+**
+**========================================================================
+**
+** It is part of the CCB and is used as parameters for the DATA
+** pointer. We need two contexts to handle correctly the SAVED
+** DATA POINTER.
+**
+**------------------------------------------------------------------------
+*/
+struct pm_ctx {
+ struct scr_tblmove sg; /* Updated interrupted SG block */
+ u_int32 ret; /* SCRIPT return address */
+};
+
+/*========================================================================
+**
+** Declaration of structs: global HEADER.
+**
+**========================================================================
+**
+** In earlier driver versions, this substructure was copied from the
+** ccb to a global address after selection (or reselection) and copied
+** back before disconnect. Since we are now using LOAD/STORE DSA
+** RELATIVE instructions, the script is able to access directly these
+** fields, and so, this header is no more copied.
+**
+**------------------------------------------------------------------------
+*/
+
+struct head {
+ /*----------------------------------------------------------------
+ ** Start and restart SCRIPTS addresses (must be at 0).
+ **----------------------------------------------------------------
+ */
+ struct action go;
+
+ /*----------------------------------------------------------------
+ ** Saved data pointer.
+ ** Points to the position in the script responsible for the
+ ** actual transfer of data.
+ ** It's written after reception of a SAVE_DATA_POINTER message.
+ ** The goalpointer points after the last transfer command.
+ **----------------------------------------------------------------
+ */
+ u_int32 savep;
+ u_int32 lastp;
+ u_int32 goalp;
+
+ /*----------------------------------------------------------------
+ ** Alternate data pointer.
+ ** They are copied back to savep/lastp/goalp by the SCRIPTS
+ ** when the direction is unknown and the device claims data out.
+ **----------------------------------------------------------------
+ */
+ u_int32 wlastp;
+ u_int32 wgoalp;
+
+ /*----------------------------------------------------------------
+ ** Status fields.
+ **----------------------------------------------------------------
+ */
+ u_char status[4]; /* host status */
+};
+
+/*
+** LUN control block lookup.
+** We use a direct pointer for LUN #0, and a table of pointers
+** which is only allocated for devices that support LUN(s) > 0.
+*/
+#if MAX_LUN <= 1
+#define ncr_lp(np, tp, lun) (!lun) ? (tp)->l0p : 0
+#else
+#define ncr_lp(np, tp, lun) \
+ (!lun) ? (tp)->l0p : (tp)->lmp ? (tp)->lmp[(lun)] : 0
+#endif
+
+/*
+** The status bytes are used by the host and the script processor.
+**
+** The four bytes (status[4]) are copied to the scratchb register
+** (declared as scr0..scr3 in ncr_reg.h) just after the select/reselect,
+** and copied back just after disconnecting.
+** Inside the script the XX_REG are used.
+*/
+
+/*
+** Last four bytes (script)
+*/
+#define QU_REG scr0
+#define HS_REG scr1
+#define HS_PRT nc_scr1
+#define SS_REG scr2
+#define SS_PRT nc_scr2
+#define HF_REG scr3
+#define HF_PRT nc_scr3
+
+/*
+** Last four bytes (host)
+*/
+#define actualquirks phys.header.status[0]
+#define host_status phys.header.status[1]
+#define scsi_status phys.header.status[2]
+#define host_flags phys.header.status[3]
+
+/*
+** Host flags
+*/
+#define HF_IN_PM0 1u
+#define HF_IN_PM1 (1u<<1)
+#define HF_ACT_PM (1u<<2)
+#define HF_DP_SAVED (1u<<3)
+#define HF_AUTO_SENSE (1u<<4)
+#define HF_DATA_IN (1u<<5)
+#define HF_PM_TO_C (1u<<6)
+#define HF_EXT_ERR (1u<<7)
+
+#ifdef SCSI_NCR_IARB_SUPPORT
+#define HF_HINT_IARB (1u<<7)
+#endif
+
+/*
+** This one is stolen from QU_REG.:)
+*/
+#define HF_DATA_ST (1u<<7)
+
+/*==========================================================
+**
+** Declaration of structs: Data structure block
+**
+**==========================================================
+**
+** During execution of a ccb by the script processor,
+** the DSA (data structure address) register points
+** to this substructure of the ccb.
+** This substructure contains the header with
+** the script-processor-changable data and
+** data blocks for the indirect move commands.
+**
+**----------------------------------------------------------
+*/
+
+struct dsb {
+
+ /*
+ ** Header.
+ */
+
+ struct head header;
+
+ /*
+ ** Table data for Script
+ */
+
+ struct scr_tblsel select;
+ struct scr_tblmove smsg ;
+ struct scr_tblmove smsg_ext ;
+ struct scr_tblmove cmd ;
+ struct scr_tblmove sense ;
+ struct scr_tblmove wresid;
+ struct scr_tblmove data [MAX_SCATTER];
+
+ /*
+ ** Phase mismatch contexts.
+ ** We need two to handle correctly the
+ ** SAVED DATA POINTER.
+ */
+
+ struct pm_ctx pm0;
+ struct pm_ctx pm1;
+};
+
+
+/*========================================================================
+**
+** Declaration of structs: Command control block.
+**
+**========================================================================
+*/
+struct ccb {
+ /*----------------------------------------------------------------
+ ** This is the data structure which is pointed by the DSA
+ ** register when it is executed by the script processor.
+ ** It must be the first entry.
+ **----------------------------------------------------------------
+ */
+ struct dsb phys;
+
+ /*----------------------------------------------------------------
+ ** The general SCSI driver provides a
+ ** pointer to a control block.
+ **----------------------------------------------------------------
+ */
+ Scsi_Cmnd *cmd; /* SCSI command */
+ u_char cdb_buf[16]; /* Copy of CDB */
+ u_char sense_buf[64];
+ int data_len; /* Total data length */
+ int segments; /* Number of SG segments */
+
+ /*----------------------------------------------------------------
+ ** Message areas.
+ ** We prepare a message to be sent after selection.
+ ** We may use a second one if the command is rescheduled
+ ** due to CHECK_CONDITION or QUEUE FULL status.
+ ** Contents are IDENTIFY and SIMPLE_TAG.
+ ** While negotiating sync or wide transfer,
+ ** a SDTR or WDTR message is appended.
+ **----------------------------------------------------------------
+ */
+ u_char scsi_smsg [12];
+ u_char scsi_smsg2[12];
+
+ /*----------------------------------------------------------------
+ ** Miscellaneous status'.
+ **----------------------------------------------------------------
+ */
+ u_char nego_status; /* Negotiation status */
+ u_char xerr_status; /* Extended error flags */
+ u_int32 extra_bytes; /* Extraneous bytes transferred */
+
+ /*----------------------------------------------------------------
+ ** Saved info for auto-sense
+ **----------------------------------------------------------------
+ */
+ u_char sv_scsi_status;
+ u_char sv_xerr_status;
+
+ /*----------------------------------------------------------------
+ ** Other fields.
+ **----------------------------------------------------------------
+ */
+ u_long p_ccb; /* BUS address of this CCB */
+ u_char sensecmd[6]; /* Sense command */
+ u_char to_abort; /* This CCB is to be aborted */
+ u_short tag; /* Tag for this transfer */
+ /* NO_TAG means no tag */
+ u_char tags_si; /* Lun tags sum index (0,1) */
+
+ u_char target;
+ u_char lun;
+ u_short queued;
+ ccb_p link_ccb; /* Host adapter CCB chain */
+ ccb_p link_ccbh; /* Host adapter CCB hash chain */
+ XPT_QUEHEAD link_ccbq; /* Link to unit CCB queue */
+ u_int32 startp; /* Initial data pointer */
+ u_int32 lastp0; /* Initial 'lastp' */
+ int ext_sg; /* Extreme data pointer, used */
+ int ext_ofs; /* to calculate the residual. */
+ int resid;
+};
+
+#define CCB_PHYS(cp,lbl) (cp->p_ccb + offsetof(struct ccb, lbl))
+
+
+/*========================================================================
+**
+** Declaration of structs: NCR device descriptor
+**
+**========================================================================
+*/
+struct ncb {
+ /*----------------------------------------------------------------
+ ** Idle task and invalid task actions and their bus
+ ** addresses.
+ **----------------------------------------------------------------
+ */
+ struct action idletask;
+ struct action notask;
+ struct action bad_i_t_l;
+ struct action bad_i_t_l_q;
+ u_long p_idletask;
+ u_long p_notask;
+ u_long p_bad_i_t_l;
+ u_long p_bad_i_t_l_q;
+
+ /*----------------------------------------------------------------
+ ** Dummy lun table to protect us against target returning bad
+ ** lun number on reselection.
+ **----------------------------------------------------------------
+ */
+ u_int32 *badluntbl; /* Table physical address */
+ u_int32 resel_badlun; /* SCRIPT handler BUS address */
+
+ /*----------------------------------------------------------------
+ ** Bit 32-63 of the on-chip RAM bus address in LE format.
+ ** The START_RAM64 script loads the MMRS and MMWS from this
+ ** field.
+ **----------------------------------------------------------------
+ */
+ u_int32 scr_ram_seg;
+
+ /*----------------------------------------------------------------
+ ** CCBs management queues.
+ **----------------------------------------------------------------
+ */
+ Scsi_Cmnd *waiting_list; /* Commands waiting for a CCB */
+ /* when lcb is not allocated. */
+ Scsi_Cmnd *done_list; /* Commands waiting for done() */
+ /* callback to be invoked. */
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,93)
+ spinlock_t smp_lock; /* Lock for SMP threading */
+#endif
+
+ /*----------------------------------------------------------------
+ ** Chip and controller indentification.
+ **----------------------------------------------------------------
+ */
+ int unit; /* Unit number */
+ char chip_name[8]; /* Chip name */
+ char inst_name[16]; /* ncb instance name */
+
+ /*----------------------------------------------------------------
+ ** Initial value of some IO register bits.
+ ** These values are assumed to have been set by BIOS, and may
+ ** be used for probing adapter implementation differences.
+ **----------------------------------------------------------------
+ */
+ u_char sv_scntl0, sv_scntl3, sv_dmode, sv_dcntl, sv_ctest3, sv_ctest4,
+ sv_ctest5, sv_gpcntl, sv_stest2, sv_stest4, sv_stest1, sv_scntl4;
+
+ /*----------------------------------------------------------------
+ ** Actual initial value of IO register bits used by the
+ ** driver. They are loaded at initialisation according to
+ ** features that are to be enabled.
+ **----------------------------------------------------------------
+ */
+ u_char rv_scntl0, rv_scntl3, rv_dmode, rv_dcntl, rv_ctest3, rv_ctest4,
+ rv_ctest5, rv_stest2, rv_ccntl0, rv_ccntl1, rv_scntl4;
+
+ /*----------------------------------------------------------------
+ ** Target data.
+ ** Target control block bus address array used by the SCRIPT
+ ** on reselection.
+ **----------------------------------------------------------------
+ */
+ struct tcb target[MAX_TARGET];
+ u_int32 *targtbl;
+
+ /*----------------------------------------------------------------
+ ** Virtual and physical bus addresses of the chip.
+ **----------------------------------------------------------------
+ */
+#ifndef SCSI_NCR_PCI_MEM_NOT_SUPPORTED
+ u_long base_va; /* MMIO base virtual address */
+ u_long base2_va; /* On-chip RAM virtual address */
+#endif
+ u_long base_ba; /* MMIO base bus address */
+ u_long base_io; /* IO space base address */
+ u_long base_ws; /* (MM)IO window size */
+ u_long base2_ba; /* On-chip RAM bus address */
+ u_long base2_ws; /* On-chip RAM window size */
+ u_int irq; /* IRQ number */
+ volatile /* Pointer to volatile for */
+ struct ncr_reg *reg; /* memory mapped IO. */
+
+ /*----------------------------------------------------------------
+ ** SCRIPTS virtual and physical bus addresses.
+ ** 'script' is loaded in the on-chip RAM if present.
+ ** 'scripth' stays in main memory for all chips except the
+ ** 53C895A and 53C896 that provide 8K on-chip RAM.
+ **----------------------------------------------------------------
+ */
+ struct script *script0; /* Copies of script and scripth */
+ struct scripth *scripth0; /* relocated for this ncb. */
+ u_long p_script; /* Actual script and scripth */
+ u_long p_scripth; /* bus addresses. */
+ u_long p_scripth0;
+
+ /*----------------------------------------------------------------
+ ** General controller parameters and configuration.
+ **----------------------------------------------------------------
+ */
+ pcidev_t pdev;
+ u_short device_id; /* PCI device id */
+ u_char revision_id; /* PCI device revision id */
+ u_char bus; /* PCI BUS number */
+ u_char device_fn; /* PCI BUS device and function */
+ u_char myaddr; /* SCSI id of the adapter */
+ u_char maxburst; /* log base 2 of dwords burst */
+ u_char maxwide; /* Maximum transfer width */
+ u_char minsync; /* Minimum sync period factor */
+ u_char maxsync; /* Maximum sync period factor */
+ u_char maxoffs; /* Max scsi offset */
+ u_char multiplier; /* Clock multiplier (1,2,4) */
+ u_char clock_divn; /* Number of clock divisors */
+ u_long clock_khz; /* SCSI clock frequency in KHz */
+ u_int features; /* Chip features map */
+
+ /*----------------------------------------------------------------
+ ** Range for the PCI clock frequency measurement result
+ ** that ensures the algorithm used by the driver can be
+ ** trusted for the SCSI clock frequency measurement.
+ ** (Assuming a PCI clock frequency of 33 MHz).
+ **----------------------------------------------------------------
+ */
+ u_int pciclock_min;
+ u_int pciclock_max;
+
+ /*----------------------------------------------------------------
+ ** Start queue management.
+ ** It is filled up by the host processor and accessed by the
+ ** SCRIPTS processor in order to start SCSI commands.
+ **----------------------------------------------------------------
+ */
+ u_long p_squeue; /* Start queue BUS address */
+ u_int32 *squeue; /* Start queue virtual address */
+ u_short squeueput; /* Next free slot of the queue */
+ u_short actccbs; /* Number of allocated CCBs */
+ u_short queuedepth; /* Start queue depth */
+
+ /*----------------------------------------------------------------
+ ** Command completion queue.
+ ** It is the same size as the start queue to avoid overflow.
+ **----------------------------------------------------------------
+ */
+ u_short dqueueget; /* Next position to scan */
+ u_int32 *dqueue; /* Completion (done) queue */
+
+ /*----------------------------------------------------------------
+ ** Timeout handler.
+ **----------------------------------------------------------------
+ */
+ struct timer_list timer; /* Timer handler link header */
+ u_long lasttime;
+ u_long settle_time; /* Resetting the SCSI BUS */
+
+ /*----------------------------------------------------------------
+ ** Debugging and profiling.
+ **----------------------------------------------------------------
+ */
+ struct ncr_reg regdump; /* Register dump */
+ u_long regtime; /* Time it has been done */
+
+ /*----------------------------------------------------------------
+ ** Miscellaneous buffers accessed by the scripts-processor.
+ ** They shall be DWORD aligned, because they may be read or
+ ** written with a script command.
+ **----------------------------------------------------------------
+ */
+ u_char msgout[12]; /* Buffer for MESSAGE OUT */
+ u_char msgin [12]; /* Buffer for MESSAGE IN */
+ u_int32 lastmsg; /* Last SCSI message sent */
+ u_char scratch; /* Scratch for SCSI receive */
+
+ /*----------------------------------------------------------------
+ ** Miscellaneous configuration and status parameters.
+ **----------------------------------------------------------------
+ */
+ u_char scsi_mode; /* Current SCSI BUS mode */
+ u_char order; /* Tag order to use */
+ u_char verbose; /* Verbosity for this controller*/
+ u_int32 ncr_cache; /* Used for cache test at init. */
+ u_long p_ncb; /* BUS address of this NCB */
+
+ /*----------------------------------------------------------------
+ ** CCB lists and queue.
+ **----------------------------------------------------------------
+ */
+ ccb_p ccbh[CCB_HASH_SIZE]; /* CCB hashed by DSA value */
+ struct ccb *ccbc; /* CCB chain */
+ XPT_QUEHEAD free_ccbq; /* Queue of available CCBs */
+
+ /*----------------------------------------------------------------
+ ** IMMEDIATE ARBITRATION (IARB) control.
+ ** We keep track in 'last_cp' of the last CCB that has been
+ ** queued to the SCRIPTS processor and clear 'last_cp' when
+ ** this CCB completes. If last_cp is not zero at the moment
+ ** we queue a new CCB, we set a flag in 'last_cp' that is
+ ** used by the SCRIPTS as a hint for setting IARB.
+ ** We donnot set more than 'iarb_max' consecutive hints for
+ ** IARB in order to leave devices a chance to reselect.
+ ** By the way, any non zero value of 'iarb_max' is unfair. :)
+ **----------------------------------------------------------------
+ */
+#ifdef SCSI_NCR_IARB_SUPPORT
+ struct ccb *last_cp; /* Last queud CCB used for IARB */
+ u_short iarb_max; /* Max. # consecutive IARB hints*/
+ u_short iarb_count; /* Actual # of these hints */
+#endif
+
+ /*----------------------------------------------------------------
+ ** We need the LCB in order to handle disconnections and
+ ** to count active CCBs for task management. So, we use
+ ** a unique CCB for LUNs we donnot have the LCB yet.
+ ** This queue normally should have at most 1 element.
+ **----------------------------------------------------------------
+ */
+ XPT_QUEHEAD b0_ccbq;
+
+ /*----------------------------------------------------------------
+ ** We use a different scatter function for 896 rev 1.
+ **----------------------------------------------------------------
+ */
+ int (*scatter) (ncb_p, ccb_p, Scsi_Cmnd *);
+
+ /*----------------------------------------------------------------
+ ** Command abort handling.
+ ** We need to synchronize tightly with the SCRIPTS
+ ** processor in order to handle things correctly.
+ **----------------------------------------------------------------
+ */
+ u_char abrt_msg[4]; /* Message to send buffer */
+ struct scr_tblmove abrt_tbl; /* Table for the MOV of it */
+ struct scr_tblsel abrt_sel; /* Sync params for selection */
+ u_char istat_sem; /* Tells the chip to stop (SEM) */
+
+ /*----------------------------------------------------------------
+ ** Fields that should be removed or changed.
+ **----------------------------------------------------------------
+ */
+ struct usrcmd user; /* Command from user */
+ volatile u_char release_stage; /* Synchronisation stage on release */
+
+ /*----------------------------------------------------------------
+ ** Fields that are used (primarily) for integrity check
+ **----------------------------------------------------------------
+ */
+ unsigned char check_integrity; /* Enable midlayer integ. check on
+ * bus scan. */
+#ifdef SCSI_NCR_INTEGRITY_CHECKING
+ unsigned char check_integ_par; /* Set if par or Init. Det. error
+ * used only during integ check */
+#endif
+};
+
+#define NCB_PHYS(np, lbl) (np->p_ncb + offsetof(struct ncb, lbl))
+#define NCB_SCRIPT_PHYS(np,lbl) (np->p_script + offsetof (struct script, lbl))
+#define NCB_SCRIPTH_PHYS(np,lbl) (np->p_scripth + offsetof (struct scripth,lbl))
+#define NCB_SCRIPTH0_PHYS(np,lbl) (np->p_scripth0+offsetof (struct scripth,lbl))
+
+/*==========================================================
+**
+**
+** Script for NCR-Processor.
+**
+** Use ncr_script_fill() to create the variable parts.
+** Use ncr_script_copy_and_bind() to make a copy and
+** bind to physical addresses.
+**
+**
+**==========================================================
+**
+** We have to know the offsets of all labels before
+** we reach them (for forward jumps).
+** Therefore we declare a struct here.
+** If you make changes inside the script,
+** DONT FORGET TO CHANGE THE LENGTHS HERE!
+**
+**----------------------------------------------------------
+*/
+
+/*
+** Script fragments which are loaded into the on-chip RAM
+** of 825A, 875, 876, 895, 895A and 896 chips.
+*/
+struct script {
+ ncrcmd start [ 14];
+ ncrcmd getjob_begin [ 4];
+ ncrcmd getjob_end [ 4];
+ ncrcmd select [ 8];
+ ncrcmd wf_sel_done [ 2];
+ ncrcmd send_ident [ 2];
+#ifdef SCSI_NCR_IARB_SUPPORT
+ ncrcmd select2 [ 8];
+#else
+ ncrcmd select2 [ 2];
+#endif
+ ncrcmd command [ 2];
+ ncrcmd dispatch [ 28];
+ ncrcmd sel_no_cmd [ 10];
+ ncrcmd init [ 6];
+ ncrcmd clrack [ 4];
+ ncrcmd disp_status [ 4];
+ ncrcmd datai_done [ 26];
+ ncrcmd datao_done [ 12];
+ ncrcmd ign_i_w_r_msg [ 4];
+ ncrcmd datai_phase [ 2];
+ ncrcmd datao_phase [ 4];
+ ncrcmd msg_in [ 2];
+ ncrcmd msg_in2 [ 10];
+#ifdef SCSI_NCR_IARB_SUPPORT
+ ncrcmd status [ 14];
+#else
+ ncrcmd status [ 10];
+#endif
+ ncrcmd complete [ 8];
+#ifdef SCSI_NCR_PCIQ_MAY_REORDER_WRITES
+ ncrcmd complete2 [ 12];
+#else
+ ncrcmd complete2 [ 10];
+#endif
+#ifdef SCSI_NCR_PCIQ_SYNC_ON_INTR
+ ncrcmd done [ 18];
+#else
+ ncrcmd done [ 14];
+#endif
+ ncrcmd done_end [ 2];
+ ncrcmd save_dp [ 8];
+ ncrcmd restore_dp [ 4];
+ ncrcmd disconnect [ 20];
+#ifdef SCSI_NCR_IARB_SUPPORT
+ ncrcmd idle [ 4];
+#else
+ ncrcmd idle [ 2];
+#endif
+#ifdef SCSI_NCR_IARB_SUPPORT
+ ncrcmd ungetjob [ 6];
+#else
+ ncrcmd ungetjob [ 4];
+#endif
+ ncrcmd reselect [ 4];
+ ncrcmd reselected [ 20];
+ ncrcmd resel_scntl4 [ 30];
+#if MAX_TASKS*4 > 512
+ ncrcmd resel_tag [ 18];
+#elif MAX_TASKS*4 > 256
+ ncrcmd resel_tag [ 12];
+#else
+ ncrcmd resel_tag [ 8];
+#endif
+ ncrcmd resel_go [ 6];
+ ncrcmd resel_notag [ 2];
+ ncrcmd resel_dsa [ 8];
+ ncrcmd data_in [MAX_SCATTER * SCR_SG_SIZE];
+ ncrcmd data_in2 [ 4];
+ ncrcmd data_out [MAX_SCATTER * SCR_SG_SIZE];
+ ncrcmd data_out2 [ 4];
+ ncrcmd pm0_data [ 12];
+ ncrcmd pm0_data_out [ 6];
+ ncrcmd pm0_data_end [ 6];
+ ncrcmd pm1_data [ 12];
+ ncrcmd pm1_data_out [ 6];
+ ncrcmd pm1_data_end [ 6];
+};
+
+/*
+** Script fragments which stay in main memory for all chips
+** except for the 895A and 896 that support 8K on-chip RAM.
+*/
+struct scripth {
+ ncrcmd start64 [ 2];
+ ncrcmd no_data [ 2];
+ ncrcmd sel_for_abort [ 18];
+ ncrcmd sel_for_abort_1 [ 2];
+ ncrcmd select_no_atn [ 8];
+ ncrcmd wf_sel_done_no_atn [ 4];
+
+ ncrcmd msg_in_etc [ 14];
+ ncrcmd msg_received [ 4];
+ ncrcmd msg_weird_seen [ 4];
+ ncrcmd msg_extended [ 20];
+ ncrcmd msg_bad [ 6];
+ ncrcmd msg_weird [ 4];
+ ncrcmd msg_weird1 [ 8];
+
+ ncrcmd wdtr_resp [ 6];
+ ncrcmd send_wdtr [ 4];
+ ncrcmd sdtr_resp [ 6];
+ ncrcmd send_sdtr [ 4];
+ ncrcmd ppr_resp [ 6];
+ ncrcmd send_ppr [ 4];
+ ncrcmd nego_bad_phase [ 4];
+ ncrcmd msg_out [ 4];
+ ncrcmd msg_out_done [ 4];
+ ncrcmd data_ovrun [ 2];
+ ncrcmd data_ovrun1 [ 22];
+ ncrcmd data_ovrun2 [ 8];
+ ncrcmd abort_resel [ 16];
+ ncrcmd resend_ident [ 4];
+ ncrcmd ident_break [ 4];
+ ncrcmd ident_break_atn [ 4];
+ ncrcmd sdata_in [ 6];
+ ncrcmd data_io [ 2];
+ ncrcmd data_io_com [ 8];
+ ncrcmd data_io_out [ 12];
+ ncrcmd resel_bad_lun [ 4];
+ ncrcmd bad_i_t_l [ 4];
+ ncrcmd bad_i_t_l_q [ 4];
+ ncrcmd bad_status [ 6];
+ ncrcmd tweak_pmj [ 12];
+ ncrcmd pm_handle [ 20];
+ ncrcmd pm_handle1 [ 4];
+ ncrcmd pm_save [ 4];
+ ncrcmd pm0_save [ 14];
+ ncrcmd pm1_save [ 14];
+
+ /* WSR handling */
+#ifdef SYM_DEBUG_PM_WITH_WSR
+ ncrcmd pm_wsr_handle [ 44];
+#else
+ ncrcmd pm_wsr_handle [ 42];
+#endif
+ ncrcmd wsr_ma_helper [ 4];
+
+ /* Data area */
+ ncrcmd zero [ 1];
+ ncrcmd scratch [ 1];
+ ncrcmd scratch1 [ 1];
+ ncrcmd pm0_data_addr [ 1];
+ ncrcmd pm1_data_addr [ 1];
+ ncrcmd saved_dsa [ 1];
+ ncrcmd saved_drs [ 1];
+ ncrcmd done_pos [ 1];
+ ncrcmd startpos [ 1];
+ ncrcmd targtbl [ 1];
+ /* End of data area */
+
+#ifdef SCSI_NCR_PCI_MEM_NOT_SUPPORTED
+ ncrcmd start_ram [ 1];
+ ncrcmd script0_ba [ 4];
+ ncrcmd start_ram64 [ 3];
+ ncrcmd script0_ba64 [ 3];
+ ncrcmd scripth0_ba64 [ 6];
+ ncrcmd ram_seg64 [ 1];
+#endif
+ ncrcmd snooptest [ 6];
+ ncrcmd snoopend [ 2];
+};
+
+/*==========================================================
+**
+**
+** Function headers.
+**
+**
+**==========================================================
+*/
+
+static ccb_p ncr_alloc_ccb (ncb_p np);
+static void ncr_complete (ncb_p np, ccb_p cp);
+static void ncr_exception (ncb_p np);
+static void ncr_free_ccb (ncb_p np, ccb_p cp);
+static ccb_p ncr_ccb_from_dsa(ncb_p np, u_long dsa);
+static void ncr_init_tcb (ncb_p np, u_char tn);
+static lcb_p ncr_alloc_lcb (ncb_p np, u_char tn, u_char ln);
+static lcb_p ncr_setup_lcb (ncb_p np, u_char tn, u_char ln,
+ u_char *inq_data);
+static void ncr_getclock (ncb_p np, int mult);
+static u_int ncr_getpciclock (ncb_p np);
+static void ncr_selectclock (ncb_p np, u_char scntl3);
+static ccb_p ncr_get_ccb (ncb_p np, u_char tn, u_char ln);
+static void ncr_init (ncb_p np, int reset, char * msg, u_long code);
+static void ncr_int_sbmc (ncb_p np);
+static void ncr_int_par (ncb_p np, u_short sist);
+static void ncr_int_ma (ncb_p np);
+static void ncr_int_sir (ncb_p np);
+static void ncr_int_sto (ncb_p np);
+static void ncr_int_udc (ncb_p np);
+static void ncr_negotiate (ncb_p np, tcb_p tp);
+static int ncr_prepare_nego(ncb_p np, ccb_p cp, u_char *msgptr);
+#ifdef SCSI_NCR_INTEGRITY_CHECKING
+static int ncr_ic_nego(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd, u_char *msgptr);
+#endif
+static void ncr_script_copy_and_bind
+ (ncb_p np, ncrcmd *src, ncrcmd *dst, int len);
+static void ncr_script_fill (struct script * scr, struct scripth * scripth);
+static int ncr_scatter_896R1 (ncb_p np, ccb_p cp, Scsi_Cmnd *cmd);
+static int ncr_scatter (ncb_p np, ccb_p cp, Scsi_Cmnd *cmd);
+static void ncr_getsync (ncb_p np, u_char sfac, u_char *fakp, u_char *scntl3p);
+static void ncr_get_xfer_info(ncb_p np, tcb_p tp, u_char *factor, u_char *offset, u_char *width);
+static void ncr_setsync (ncb_p np, ccb_p cp, u_char scntl3, u_char sxfer, u_char scntl4);
+static void ncr_set_sync_wide_status (ncb_p np, u_char target);
+static void ncr_setup_tags (ncb_p np, u_char tn, u_char ln);
+static void ncr_setwide (ncb_p np, ccb_p cp, u_char wide, u_char ack);
+static void ncr_setsyncwide (ncb_p np, ccb_p cp, u_char scntl3, u_char sxfer, u_char scntl4, u_char wide);
+static int ncr_show_msg (u_char * msg);
+static void ncr_print_msg (ccb_p cp, char *label, u_char * msg);
+static int ncr_snooptest (ncb_p np);
+static void ncr_timeout (ncb_p np);
+static void ncr_wakeup (ncb_p np, u_long code);
+static int ncr_wakeup_done (ncb_p np);
+static void ncr_start_next_ccb (ncb_p np, lcb_p lp, int maxn);
+static void ncr_put_start_queue(ncb_p np, ccb_p cp);
+static void ncr_chip_reset (ncb_p np);
+static void ncr_soft_reset (ncb_p np);
+static void ncr_start_reset (ncb_p np);
+static int ncr_reset_scsi_bus (ncb_p np, int enab_int, int settle_delay);
+static int ncr_compute_residual (ncb_p np, ccb_p cp);
+
+#ifdef SCSI_NCR_USER_COMMAND_SUPPORT
+static void ncr_usercmd (ncb_p np);
+#endif
+
+static int ncr_attach (Scsi_Host_Template *tpnt, int unit, ncr_device *device);
+static void ncr_free_resources(ncb_p np);
+
+static void insert_into_waiting_list(ncb_p np, Scsi_Cmnd *cmd);
+static Scsi_Cmnd *retrieve_from_waiting_list(int to_remove, ncb_p np, Scsi_Cmnd *cmd);
+static void process_waiting_list(ncb_p np, int sts);
+
+#define remove_from_waiting_list(np, cmd) \
+ retrieve_from_waiting_list(1, (np), (cmd))
+#define requeue_waiting_list(np) process_waiting_list((np), DID_OK)
+#define reset_waiting_list(np) process_waiting_list((np), DID_RESET)
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+static void ncr_get_nvram (ncr_device *devp, ncr_nvram *nvp);
+static int sym_read_Tekram_nvram (ncr_slot *np, u_short device_id,
+ Tekram_nvram *nvram);
+static int sym_read_Symbios_nvram (ncr_slot *np, Symbios_nvram *nvram);
+#endif
+
+/*==========================================================
+**
+**
+** Global static data.
+**
+**
+**==========================================================
+*/
+
+static inline char *ncr_name (ncb_p np)
+{
+ return np->inst_name;
+}
+
+
+/*==========================================================
+**
+**
+** Scripts for NCR-Processor.
+**
+** Use ncr_script_bind for binding to physical addresses.
+**
+**
+**==========================================================
+**
+** NADDR generates a reference to a field of the controller data.
+** PADDR generates a reference to another part of the script.
+** RADDR generates a reference to a script processor register.
+** FADDR generates a reference to a script processor register
+** with offset.
+**
+**----------------------------------------------------------
+*/
+
+#define RELOC_SOFTC 0x40000000
+#define RELOC_LABEL 0x50000000
+#define RELOC_REGISTER 0x60000000
+#if 0
+#define RELOC_KVAR 0x70000000
+#endif
+#define RELOC_LABELH 0x80000000
+#define RELOC_MASK 0xf0000000
+
+#define NADDR(label) (RELOC_SOFTC | offsetof(struct ncb, label))
+#define PADDR(label) (RELOC_LABEL | offsetof(struct script, label))
+#define PADDRH(label) (RELOC_LABELH | offsetof(struct scripth, label))
+#define RADDR(label) (RELOC_REGISTER | REG(label))
+#define FADDR(label,ofs)(RELOC_REGISTER | ((REG(label))+(ofs)))
+#define KVAR(which) (RELOC_KVAR | (which))
+
+#define SCR_DATA_ZERO 0xf00ff00f
+
+#ifdef RELOC_KVAR
+#define SCRIPT_KVAR_JIFFIES (0)
+#define SCRIPT_KVAR_FIRST SCRIPT_KVAR_JIFFIES
+#define SCRIPT_KVAR_LAST SCRIPT_KVAR_JIFFIES
+/*
+ * Kernel variables referenced in the scripts.
+ * THESE MUST ALL BE ALIGNED TO A 4-BYTE BOUNDARY.
+ */
+static void *script_kvars[] __initdata =
+ { (void *)&jiffies };
+#endif
+
+static struct script script0 __initdata = {
+/*--------------------------< START >-----------------------*/ {
+ /*
+ ** This NOP will be patched with LED ON
+ ** SCR_REG_REG (gpreg, SCR_AND, 0xfe)
+ */
+ SCR_NO_OP,
+ 0,
+ /*
+ ** Clear SIGP.
+ */
+ SCR_FROM_REG (ctest2),
+ 0,
+
+ /*
+ ** Stop here if the C code wants to perform
+ ** some error recovery procedure manually.
+ ** (Indicate this by setting SEM in ISTAT)
+ */
+ SCR_FROM_REG (istat),
+ 0,
+ /*
+ ** Report to the C code the next position in
+ ** the start queue the SCRIPTS will schedule.
+ ** The C code must not change SCRATCHA.
+ */
+ SCR_LOAD_ABS (scratcha, 4),
+ PADDRH (startpos),
+ SCR_INT ^ IFTRUE (MASK (SEM, SEM)),
+ SIR_SCRIPT_STOPPED,
+
+ /*
+ ** Start the next job.
+ **
+ ** @DSA = start point for this job.
+ ** SCRATCHA = address of this job in the start queue.
+ **
+ ** We will restore startpos with SCRATCHA if we fails the
+ ** arbitration or if it is the idle job.
+ **
+ ** The below GETJOB_BEGIN to GETJOB_END section of SCRIPTS
+ ** is a critical path. If it is partially executed, it then
+ ** may happen that the job address is not yet in the DSA
+ ** and the the next queue position points to the next JOB.
+ */
+ SCR_LOAD_ABS (dsa, 4),
+ PADDRH (startpos),
+ SCR_LOAD_REL (temp, 4),
+ 4,
+}/*-------------------------< GETJOB_BEGIN >------------------*/,{
+ SCR_STORE_ABS (temp, 4),
+ PADDRH (startpos),
+ SCR_LOAD_REL (dsa, 4),
+ 0,
+}/*-------------------------< GETJOB_END >--------------------*/,{
+ SCR_LOAD_REL (temp, 4),
+ 0,
+ SCR_RETURN,
+ 0,
+
+}/*-------------------------< SELECT >----------------------*/,{
+ /*
+ ** DSA contains the address of a scheduled
+ ** data structure.
+ **
+ ** SCRATCHA contains the address of the start queue
+ ** entry which points to the next job.
+ **
+ ** Set Initiator mode.
+ **
+ ** (Target mode is left as an exercise for the reader)
+ */
+
+ SCR_CLR (SCR_TRG),
+ 0,
+ /*
+ ** And try to select this target.
+ */
+ SCR_SEL_TBL_ATN ^ offsetof (struct dsb, select),
+ PADDR (ungetjob),
+ /*
+ ** Now there are 4 possibilities:
+ **
+ ** (1) The ncr looses arbitration.
+ ** This is ok, because it will try again,
+ ** when the bus becomes idle.
+ ** (But beware of the timeout function!)
+ **
+ ** (2) The ncr is reselected.
+ ** Then the script processor takes the jump
+ ** to the RESELECT label.
+ **
+ ** (3) The ncr wins arbitration.
+ ** Then it will execute SCRIPTS instruction until
+ ** the next instruction that checks SCSI phase.
+ ** Then will stop and wait for selection to be
+ ** complete or selection time-out to occur.
+ **
+ ** After having won arbitration, the ncr SCRIPTS
+ ** processor is able to execute instructions while
+ ** the SCSI core is performing SCSI selection. But
+ ** some script instruction that is not waiting for
+ ** a valid phase (or selection timeout) to occur
+ ** breaks the selection procedure, by probably
+ ** affecting timing requirements.
+ ** So we have to wait immediately for the next phase
+ ** or the selection to complete or time-out.
+ */
+
+ /*
+ ** load the savep (saved pointer) into
+ ** the actual data pointer.
+ */
+ SCR_LOAD_REL (temp, 4),
+ offsetof (struct ccb, phys.header.savep),
+ /*
+ ** Initialize the status registers
+ */
+ SCR_LOAD_REL (scr0, 4),
+ offsetof (struct ccb, phys.header.status),
+
+}/*-------------------------< WF_SEL_DONE >----------------------*/,{
+ SCR_INT ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ SIR_SEL_ATN_NO_MSG_OUT,
+}/*-------------------------< SEND_IDENT >----------------------*/,{
+ /*
+ ** Selection complete.
+ ** Send the IDENTIFY and SIMPLE_TAG messages
+ ** (and the M_X_SYNC_REQ / M_X_WIDE_REQ message)
+ */
+ SCR_MOVE_TBL ^ SCR_MSG_OUT,
+ offsetof (struct dsb, smsg),
+}/*-------------------------< SELECT2 >----------------------*/,{
+#ifdef SCSI_NCR_IARB_SUPPORT
+ /*
+ ** Set IMMEDIATE ARBITRATION if we have been given
+ ** a hint to do so. (Some job to do after this one).
+ */
+ SCR_FROM_REG (HF_REG),
+ 0,
+ SCR_JUMPR ^ IFFALSE (MASK (HF_HINT_IARB, HF_HINT_IARB)),
+ 8,
+ SCR_REG_REG (scntl1, SCR_OR, IARB),
+ 0,
+#endif
+ /*
+ ** Anticipate the COMMAND phase.
+ ** This is the PHASE we expect at this point.
+ */
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_COMMAND)),
+ PADDR (sel_no_cmd),
+
+}/*-------------------------< COMMAND >--------------------*/,{
+ /*
+ ** ... and send the command
+ */
+ SCR_MOVE_TBL ^ SCR_COMMAND,
+ offsetof (struct dsb, cmd),
+
+}/*-----------------------< DISPATCH >----------------------*/,{
+ /*
+ ** MSG_IN is the only phase that shall be
+ ** entered at least once for each (re)selection.
+ ** So we test it first.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
+ PADDR (msg_in),
+ SCR_JUMP ^ IFTRUE (IF (SCR_DATA_OUT)),
+ PADDR (datao_phase),
+ SCR_JUMP ^ IFTRUE (IF (SCR_DATA_IN)),
+ PADDR (datai_phase),
+ SCR_JUMP ^ IFTRUE (IF (SCR_STATUS)),
+ PADDR (status),
+ SCR_JUMP ^ IFTRUE (IF (SCR_COMMAND)),
+ PADDR (command),
+ SCR_JUMP ^ IFTRUE (IF (SCR_MSG_OUT)),
+ PADDRH (msg_out),
+ /*
+ * Discard as many illegal phases as
+ * required and tell the C code about.
+ */
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_ILG_OUT)),
+ 16,
+ SCR_MOVE_ABS (1) ^ SCR_ILG_OUT,
+ NADDR (scratch),
+ SCR_JUMPR ^ IFTRUE (WHEN (SCR_ILG_OUT)),
+ -16,
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_ILG_IN)),
+ 16,
+ SCR_MOVE_ABS (1) ^ SCR_ILG_IN,
+ NADDR (scratch),
+ SCR_JUMPR ^ IFTRUE (WHEN (SCR_ILG_IN)),
+ -16,
+ SCR_INT,
+ SIR_BAD_PHASE,
+ SCR_JUMP,
+ PADDR (dispatch),
+}/*---------------------< SEL_NO_CMD >----------------------*/,{
+ /*
+ ** The target does not switch to command
+ ** phase after IDENTIFY has been sent.
+ **
+ ** If it stays in MSG OUT phase send it
+ ** the IDENTIFY again.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)),
+ PADDRH (resend_ident),
+ /*
+ ** If target does not switch to MSG IN phase
+ ** and we sent a negotiation, assert the
+ ** failure immediately.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
+ PADDR (dispatch),
+ SCR_FROM_REG (HS_REG),
+ 0,
+ SCR_INT ^ IFTRUE (DATA (HS_NEGOTIATE)),
+ SIR_NEGO_FAILED,
+ /*
+ ** Jump to dispatcher.
+ */
+ SCR_JUMP,
+ PADDR (dispatch),
+
+}/*-------------------------< INIT >------------------------*/,{
+ /*
+ ** Wait for the SCSI RESET signal to be
+ ** inactive before restarting operations,
+ ** since the chip may hang on SEL_ATN
+ ** if SCSI RESET is active.
+ */
+ SCR_FROM_REG (sstat0),
+ 0,
+ SCR_JUMPR ^ IFTRUE (MASK (IRST, IRST)),
+ -16,
+ SCR_JUMP,
+ PADDR (start),
+}/*-------------------------< CLRACK >----------------------*/,{
+ /*
+ ** Terminate possible pending message phase.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP,
+ PADDR (dispatch),
+
+}/*-------------------------< DISP_STATUS >----------------------*/,{
+ /*
+ ** Anticipate STATUS phase.
+ **
+ ** Does spare 3 SCRIPTS instructions when we have
+ ** completed the INPUT of the data.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_STATUS)),
+ PADDR (status),
+ SCR_JUMP,
+ PADDR (dispatch),
+
+}/*-------------------------< DATAI_DONE >-------------------*/,{
+ /*
+ * If the device wants us to send more data,
+ * we must count the extra bytes.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_DATA_IN)),
+ PADDRH (data_ovrun),
+ /*
+ ** If the SWIDE is not full, jump to dispatcher.
+ ** We anticipate a STATUS phase.
+ ** If we get later an IGNORE WIDE RESIDUE, we
+ ** will alias it as a MODIFY DP (-1).
+ */
+ SCR_FROM_REG (scntl2),
+ 0,
+ SCR_JUMP ^ IFFALSE (MASK (WSR, WSR)),
+ PADDR (disp_status),
+ /*
+ ** The SWIDE is full.
+ ** Clear this condition.
+ */
+ SCR_REG_REG (scntl2, SCR_OR, WSR),
+ 0,
+ /*
+ * We are expecting an IGNORE RESIDUE message
+ * from the device, otherwise we are in data
+ * overrun condition. Check against MSG_IN phase.
+ */
+ SCR_INT ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ SIR_SWIDE_OVERRUN,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ PADDR (disp_status),
+ /*
+ * We are in MSG_IN phase,
+ * Read the first byte of the message.
+ * If it is not an IGNORE RESIDUE message,
+ * signal overrun and jump to message
+ * processing.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[0]),
+ SCR_INT ^ IFFALSE (DATA (M_IGN_RESIDUE)),
+ SIR_SWIDE_OVERRUN,
+ SCR_JUMP ^ IFFALSE (DATA (M_IGN_RESIDUE)),
+ PADDR (msg_in2),
+
+ /*
+ * We got the message we expected.
+ * Read the 2nd byte, and jump to dispatcher.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[1]),
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP,
+ PADDR (disp_status),
+
+}/*-------------------------< DATAO_DONE >-------------------*/,{
+ /*
+ * If the device wants us to send more data,
+ * we must count the extra bytes.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_DATA_OUT)),
+ PADDRH (data_ovrun),
+ /*
+ ** If the SODL is not full jump to dispatcher.
+ ** We anticipate a MSG IN phase or a STATUS phase.
+ */
+ SCR_FROM_REG (scntl2),
+ 0,
+ SCR_JUMP ^ IFFALSE (MASK (WSS, WSS)),
+ PADDR (disp_status),
+ /*
+ ** The SODL is full, clear this condition.
+ */
+ SCR_REG_REG (scntl2, SCR_OR, WSS),
+ 0,
+ /*
+ ** And signal a DATA UNDERRUN condition
+ ** to the C code.
+ */
+ SCR_INT,
+ SIR_SODL_UNDERRUN,
+ SCR_JUMP,
+ PADDR (dispatch),
+
+}/*-------------------------< IGN_I_W_R_MSG >--------------*/,{
+ /*
+ ** We jump here from the phase mismatch interrupt,
+ ** When we have a SWIDE and the device has presented
+ ** a IGNORE WIDE RESIDUE message on the BUS.
+ ** We just have to throw away this message and then
+ ** to jump to dispatcher.
+ */
+ SCR_MOVE_ABS (2) ^ SCR_MSG_IN,
+ NADDR (scratch),
+ /*
+ ** Clear ACK and jump to dispatcher.
+ */
+ SCR_JUMP,
+ PADDR (clrack),
+
+}/*-------------------------< DATAI_PHASE >------------------*/,{
+ SCR_RETURN,
+ 0,
+}/*-------------------------< DATAO_PHASE >------------------*/,{
+ /*
+ ** Patch for 53c1010_66 only - to allow A0 part
+ ** to operate properly in a 33MHz PCI bus.
+ **
+ ** SCR_REG_REG(scntl4, SCR_OR, 0x0c),
+ ** 0,
+ */
+ SCR_NO_OP,
+ 0,
+ SCR_RETURN,
+ 0,
+}/*-------------------------< MSG_IN >--------------------*/,{
+ /*
+ ** Get the first byte of the message.
+ **
+ ** The script processor doesn't negate the
+ ** ACK signal after this transfer.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[0]),
+}/*-------------------------< MSG_IN2 >--------------------*/,{
+ /*
+ ** Check first against 1 byte messages
+ ** that we handle from SCRIPTS.
+ */
+ SCR_JUMP ^ IFTRUE (DATA (M_COMPLETE)),
+ PADDR (complete),
+ SCR_JUMP ^ IFTRUE (DATA (M_DISCONNECT)),
+ PADDR (disconnect),
+ SCR_JUMP ^ IFTRUE (DATA (M_SAVE_DP)),
+ PADDR (save_dp),
+ SCR_JUMP ^ IFTRUE (DATA (M_RESTORE_DP)),
+ PADDR (restore_dp),
+ /*
+ ** We handle all other messages from the
+ ** C code, so no need to waste on-chip RAM
+ ** for those ones.
+ */
+ SCR_JUMP,
+ PADDRH (msg_in_etc),
+
+}/*-------------------------< STATUS >--------------------*/,{
+ /*
+ ** get the status
+ */
+ SCR_MOVE_ABS (1) ^ SCR_STATUS,
+ NADDR (scratch),
+#ifdef SCSI_NCR_IARB_SUPPORT
+ /*
+ ** If STATUS is not GOOD, clear IMMEDIATE ARBITRATION,
+ ** since we may have to tamper the start queue from
+ ** the C code.
+ */
+ SCR_JUMPR ^ IFTRUE (DATA (S_GOOD)),
+ 8,
+ SCR_REG_REG (scntl1, SCR_AND, ~IARB),
+ 0,
+#endif
+ /*
+ ** save status to scsi_status.
+ ** mark as complete.
+ */
+ SCR_TO_REG (SS_REG),
+ 0,
+ SCR_LOAD_REG (HS_REG, HS_COMPLETE),
+ 0,
+ /*
+ ** Anticipate the MESSAGE PHASE for
+ ** the TASK COMPLETE message.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_IN)),
+ PADDR (msg_in),
+ SCR_JUMP,
+ PADDR (dispatch),
+
+}/*-------------------------< COMPLETE >-----------------*/,{
+ /*
+ ** Complete message.
+ **
+ ** Copy the data pointer to LASTP in header.
+ */
+ SCR_STORE_REL (temp, 4),
+ offsetof (struct ccb, phys.header.lastp),
+ /*
+ ** When we terminate the cycle by clearing ACK,
+ ** the target may disconnect immediately.
+ **
+ ** We don't want to be told of an
+ ** "unexpected disconnect",
+ ** so we disable this feature.
+ */
+ SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+ 0,
+ /*
+ ** Terminate cycle ...
+ */
+ SCR_CLR (SCR_ACK|SCR_ATN),
+ 0,
+ /*
+ ** ... and wait for the disconnect.
+ */
+ SCR_WAIT_DISC,
+ 0,
+}/*-------------------------< COMPLETE2 >-----------------*/,{
+ /*
+ ** Save host status to header.
+ */
+ SCR_STORE_REL (scr0, 4),
+ offsetof (struct ccb, phys.header.status),
+
+#ifdef SCSI_NCR_PCIQ_MAY_REORDER_WRITES
+ /*
+ ** Some bridges may reorder DMA writes to memory.
+ ** We donnot want the CPU to deal with completions
+ ** without all the posted write having been flushed
+ ** to memory. This DUMMY READ should flush posted
+ ** buffers prior to the CPU having to deal with
+ ** completions.
+ */
+ SCR_LOAD_REL (scr0, 4), /* DUMMY READ */
+ offsetof (struct ccb, phys.header.status),
+#endif
+ /*
+ ** If command resulted in not GOOD status,
+ ** call the C code if needed.
+ */
+ SCR_FROM_REG (SS_REG),
+ 0,
+ SCR_CALL ^ IFFALSE (DATA (S_GOOD)),
+ PADDRH (bad_status),
+
+ /*
+ ** If we performed an auto-sense, call
+ ** the C code to synchronyze task aborts
+ ** with UNIT ATTENTION conditions.
+ */
+ SCR_FROM_REG (HF_REG),
+ 0,
+ SCR_INT ^ IFTRUE (MASK (HF_AUTO_SENSE, HF_AUTO_SENSE)),
+ SIR_AUTO_SENSE_DONE,
+
+}/*------------------------< DONE >-----------------*/,{
+#ifdef SCSI_NCR_PCIQ_SYNC_ON_INTR
+ /*
+ ** It seems that some bridges flush everything
+ ** when the INTR line is raised. For these ones,
+ ** we can just ensure that the INTR line will be
+ ** raised before each completion. So, if it happens
+ ** that we have been faster that the CPU, we just
+ ** have to synchronize with it. A dummy programmed
+ ** interrupt will do the trick.
+ ** Note that we overlap at most 1 IO with the CPU
+ ** in this situation and that the IRQ line must not
+ ** be shared.
+ */
+ SCR_FROM_REG (istat),
+ 0,
+ SCR_INT ^ IFTRUE (MASK (INTF, INTF)),
+ SIR_DUMMY_INTERRUPT,
+#endif
+ /*
+ ** Copy the DSA to the DONE QUEUE and
+ ** signal completion to the host.
+ ** If we are interrupted between DONE
+ ** and DONE_END, we must reset, otherwise
+ ** the completed CCB will be lost.
+ */
+ SCR_STORE_ABS (dsa, 4),
+ PADDRH (saved_dsa),
+ SCR_LOAD_ABS (dsa, 4),
+ PADDRH (done_pos),
+ SCR_LOAD_ABS (scratcha, 4),
+ PADDRH (saved_dsa),
+ SCR_STORE_REL (scratcha, 4),
+ 0,
+ /*
+ ** The instruction below reads the DONE QUEUE next
+ ** free position from memory.
+ ** In addition it ensures that all PCI posted writes
+ ** are flushed and so the DSA value of the done
+ ** CCB is visible by the CPU before INTFLY is raised.
+ */
+ SCR_LOAD_REL (temp, 4),
+ 4,
+ SCR_INT_FLY,
+ 0,
+ SCR_STORE_ABS (temp, 4),
+ PADDRH (done_pos),
+}/*------------------------< DONE_END >-----------------*/,{
+ SCR_JUMP,
+ PADDR (start),
+
+}/*-------------------------< SAVE_DP >------------------*/,{
+ /*
+ ** Clear ACK immediately.
+ ** No need to delay it.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ /*
+ ** Keep track we received a SAVE DP, so
+ ** we will switch to the other PM context
+ ** on the next PM since the DP may point
+ ** to the current PM context.
+ */
+ SCR_REG_REG (HF_REG, SCR_OR, HF_DP_SAVED),
+ 0,
+ /*
+ ** SAVE_DP message:
+ ** Copy the data pointer to SAVEP in header.
+ */
+ SCR_STORE_REL (temp, 4),
+ offsetof (struct ccb, phys.header.savep),
+ SCR_JUMP,
+ PADDR (dispatch),
+}/*-------------------------< RESTORE_DP >---------------*/,{
+ /*
+ ** RESTORE_DP message:
+ ** Copy SAVEP in header to actual data pointer.
+ */
+ SCR_LOAD_REL (temp, 4),
+ offsetof (struct ccb, phys.header.savep),
+ SCR_JUMP,
+ PADDR (clrack),
+
+}/*-------------------------< DISCONNECT >---------------*/,{
+ /*
+ ** DISCONNECTing ...
+ **
+ ** disable the "unexpected disconnect" feature,
+ ** and remove the ACK signal.
+ */
+ SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+ 0,
+ SCR_CLR (SCR_ACK|SCR_ATN),
+ 0,
+ /*
+ ** Wait for the disconnect.
+ */
+ SCR_WAIT_DISC,
+ 0,
+ /*
+ ** Status is: DISCONNECTED.
+ */
+ SCR_LOAD_REG (HS_REG, HS_DISCONNECT),
+ 0,
+ /*
+ ** Save host status to header.
+ */
+ SCR_STORE_REL (scr0, 4),
+ offsetof (struct ccb, phys.header.status),
+ /*
+ ** If QUIRK_AUTOSAVE is set,
+ ** do an "save pointer" operation.
+ */
+ SCR_FROM_REG (QU_REG),
+ 0,
+ SCR_JUMP ^ IFFALSE (MASK (QUIRK_AUTOSAVE, QUIRK_AUTOSAVE)),
+ PADDR (start),
+ /*
+ ** like SAVE_DP message:
+ ** Remember we saved the data pointer.
+ ** Copy data pointer to SAVEP in header.
+ */
+ SCR_REG_REG (HF_REG, SCR_OR, HF_DP_SAVED),
+ 0,
+ SCR_STORE_REL (temp, 4),
+ offsetof (struct ccb, phys.header.savep),
+ SCR_JUMP,
+ PADDR (start),
+
+}/*-------------------------< IDLE >------------------------*/,{
+ /*
+ ** Nothing to do?
+ ** Wait for reselect.
+ ** This NOP will be patched with LED OFF
+ ** SCR_REG_REG (gpreg, SCR_OR, 0x01)
+ */
+ SCR_NO_OP,
+ 0,
+#ifdef SCSI_NCR_IARB_SUPPORT
+ SCR_JUMPR,
+ 8,
+#endif
+}/*-------------------------< UNGETJOB >-----------------*/,{
+#ifdef SCSI_NCR_IARB_SUPPORT
+ /*
+ ** Set IMMEDIATE ARBITRATION, for the next time.
+ ** This will give us better chance to win arbitration
+ ** for the job we just wanted to do.
+ */
+ SCR_REG_REG (scntl1, SCR_OR, IARB),
+ 0,
+#endif
+ /*
+ ** We are not able to restart the SCRIPTS if we are
+ ** interrupted and these instruction haven't been
+ ** all executed. BTW, this is very unlikely to
+ ** happen, but we check that from the C code.
+ */
+ SCR_LOAD_REG (dsa, 0xff),
+ 0,
+ SCR_STORE_ABS (scratcha, 4),
+ PADDRH (startpos),
+}/*-------------------------< RESELECT >--------------------*/,{
+ /*
+ ** make the host status invalid.
+ */
+ SCR_CLR (SCR_TRG),
+ 0,
+ /*
+ ** Sleep waiting for a reselection.
+ ** If SIGP is set, special treatment.
+ **
+ ** Zu allem bereit ..
+ */
+ SCR_WAIT_RESEL,
+ PADDR(start),
+}/*-------------------------< RESELECTED >------------------*/,{
+ /*
+ ** This NOP will be patched with LED ON
+ ** SCR_REG_REG (gpreg, SCR_AND, 0xfe)
+ */
+ SCR_NO_OP,
+ 0,
+ /*
+ ** load the target id into the sdid
+ */
+ SCR_REG_SFBR (ssid, SCR_AND, 0x8F),
+ 0,
+ SCR_TO_REG (sdid),
+ 0,
+ /*
+ ** load the target control block address
+ */
+ SCR_LOAD_ABS (dsa, 4),
+ PADDRH (targtbl),
+ SCR_SFBR_REG (dsa, SCR_SHL, 0),
+ 0,
+ SCR_REG_REG (dsa, SCR_SHL, 0),
+ 0,
+ SCR_REG_REG (dsa, SCR_AND, 0x3c),
+ 0,
+ SCR_LOAD_REL (dsa, 4),
+ 0,
+ /*
+ ** Load the synchronous transfer registers.
+ */
+ SCR_LOAD_REL (scntl3, 1),
+ offsetof(struct tcb, wval),
+ SCR_LOAD_REL (sxfer, 1),
+ offsetof(struct tcb, sval),
+}/*-------------------------< RESEL_SCNTL4 >------------------*/,{
+ /*
+ ** Write with uval value. Patch if device
+ ** does not support Ultra3.
+ **
+ ** SCR_LOAD_REL (scntl4, 1),
+ ** offsetof(struct tcb, uval),
+ */
+
+ SCR_NO_OP,
+ 0,
+ /*
+ * We expect MESSAGE IN phase.
+ * If not, get help from the C code.
+ */
+ SCR_INT ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ SIR_RESEL_NO_MSG_IN,
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin),
+
+ /*
+ * If IDENTIFY LUN #0, use a faster path
+ * to find the LCB structure.
+ */
+ SCR_JUMPR ^ IFTRUE (MASK (0x80, 0xbf)),
+ 56,
+ /*
+ * If message isn't an IDENTIFY,
+ * tell the C code about.
+ */
+ SCR_INT ^ IFFALSE (MASK (0x80, 0x80)),
+ SIR_RESEL_NO_IDENTIFY,
+ /*
+ * It is an IDENTIFY message,
+ * Load the LUN control block address.
+ */
+ SCR_LOAD_REL (dsa, 4),
+ offsetof(struct tcb, b_luntbl),
+ SCR_SFBR_REG (dsa, SCR_SHL, 0),
+ 0,
+ SCR_REG_REG (dsa, SCR_SHL, 0),
+ 0,
+ SCR_REG_REG (dsa, SCR_AND, 0xfc),
+ 0,
+ SCR_LOAD_REL (dsa, 4),
+ 0,
+ SCR_JUMPR,
+ 8,
+ /*
+ ** LUN 0 special case (but usual one :))
+ */
+ SCR_LOAD_REL (dsa, 4),
+ offsetof(struct tcb, b_lun0),
+
+ /*
+ ** Load the reselect task action for this LUN.
+ ** Load the tasks DSA array for this LUN.
+ ** Call the action.
+ */
+ SCR_LOAD_REL (temp, 4),
+ offsetof(struct lcb, resel_task),
+ SCR_LOAD_REL (dsa, 4),
+ offsetof(struct lcb, b_tasktbl),
+ SCR_RETURN,
+ 0,
+}/*-------------------------< RESEL_TAG >-------------------*/,{
+ /*
+ ** ACK the IDENTIFY or TAG previously received
+ */
+
+ SCR_CLR (SCR_ACK),
+ 0,
+ /*
+ ** Read IDENTIFY + SIMPLE + TAG using a single MOVE.
+ ** Agressive optimization, is'nt it?
+ ** No need to test the SIMPLE TAG message, since the
+ ** driver only supports conformant devices for tags. ;-)
+ */
+ SCR_MOVE_ABS (2) ^ SCR_MSG_IN,
+ NADDR (msgin),
+ /*
+ ** Read the TAG from the SIDL.
+ ** Still an aggressive optimization. ;-)
+ ** Compute the CCB indirect jump address which
+ ** is (#TAG*2 & 0xfc) due to tag numbering using
+ ** 1,3,5..MAXTAGS*2+1 actual values.
+ */
+ SCR_REG_SFBR (sidl, SCR_SHL, 0),
+ 0,
+#if MAX_TASKS*4 > 512
+ SCR_JUMPR ^ IFFALSE (CARRYSET),
+ 8,
+ SCR_REG_REG (dsa1, SCR_OR, 2),
+ 0,
+ SCR_REG_REG (sfbr, SCR_SHL, 0),
+ 0,
+ SCR_JUMPR ^ IFFALSE (CARRYSET),
+ 8,
+ SCR_REG_REG (dsa1, SCR_OR, 1),
+ 0,
+#elif MAX_TASKS*4 > 256
+ SCR_JUMPR ^ IFFALSE (CARRYSET),
+ 8,
+ SCR_REG_REG (dsa1, SCR_OR, 1),
+ 0,
+#endif
+ /*
+ ** Retrieve the DSA of this task.
+ ** JUMP indirectly to the restart point of the CCB.
+ */
+ SCR_SFBR_REG (dsa, SCR_AND, 0xfc),
+ 0,
+}/*-------------------------< RESEL_GO >-------------------*/,{
+ SCR_LOAD_REL (dsa, 4),
+ 0,
+ SCR_LOAD_REL (temp, 4),
+ offsetof(struct ccb, phys.header.go.restart),
+ SCR_RETURN,
+ 0,
+ /* In normal situations we branch to RESEL_DSA */
+}/*-------------------------< RESEL_NOTAG >-------------------*/,{
+ /*
+ ** JUMP indirectly to the restart point of the CCB.
+ */
+ SCR_JUMP,
+ PADDR (resel_go),
+
+}/*-------------------------< RESEL_DSA >-------------------*/,{
+ /*
+ ** Ack the IDENTIFY or TAG previously received.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ /*
+ ** load the savep (saved pointer) into
+ ** the actual data pointer.
+ */
+ SCR_LOAD_REL (temp, 4),
+ offsetof (struct ccb, phys.header.savep),
+ /*
+ ** Initialize the status registers
+ */
+ SCR_LOAD_REL (scr0, 4),
+ offsetof (struct ccb, phys.header.status),
+ /*
+ ** Jump to dispatcher.
+ */
+ SCR_JUMP,
+ PADDR (dispatch),
+
+}/*-------------------------< DATA_IN >--------------------*/,{
+/*
+** Because the size depends on the
+** #define MAX_SCATTER parameter,
+** it is filled in at runtime.
+**
+** ##===========< i=0; i<MAX_SCATTER >=========
+** || SCR_CHMOV_TBL ^ SCR_DATA_IN,
+** || offsetof (struct dsb, data[ i]),
+** ##==========================================
+**
+**---------------------------------------------------------
+*/
+0
+}/*-------------------------< DATA_IN2 >-------------------*/,{
+ SCR_CALL,
+ PADDR (datai_done),
+ SCR_JUMP,
+ PADDRH (data_ovrun),
+}/*-------------------------< DATA_OUT >--------------------*/,{
+/*
+** Because the size depends on the
+** #define MAX_SCATTER parameter,
+** it is filled in at runtime.
+**
+** ##===========< i=0; i<MAX_SCATTER >=========
+** || SCR_CHMOV_TBL ^ SCR_DATA_OUT,
+** || offsetof (struct dsb, data[ i]),
+** ##==========================================
+**
+**---------------------------------------------------------
+*/
+0
+}/*-------------------------< DATA_OUT2 >-------------------*/,{
+ SCR_CALL,
+ PADDR (datao_done),
+ SCR_JUMP,
+ PADDRH (data_ovrun),
+
+}/*-------------------------< PM0_DATA >--------------------*/,{
+ /*
+ ** Read our host flags to SFBR, so we will be able
+ ** to check against the data direction we expect.
+ */
+ SCR_FROM_REG (HF_REG),
+ 0,
+ /*
+ ** Check against actual DATA PHASE.
+ */
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)),
+ PADDR (pm0_data_out),
+ /*
+ ** Actual phase is DATA IN.
+ ** Check against expected direction.
+ */
+ SCR_JUMP ^ IFFALSE (MASK (HF_DATA_IN, HF_DATA_IN)),
+ PADDRH (data_ovrun),
+ /*
+ ** Keep track we are moving data from the
+ ** PM0 DATA mini-script.
+ */
+ SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM0),
+ 0,
+ /*
+ ** Move the data to memory.
+ */
+ SCR_CHMOV_TBL ^ SCR_DATA_IN,
+ offsetof (struct ccb, phys.pm0.sg),
+ SCR_JUMP,
+ PADDR (pm0_data_end),
+}/*-------------------------< PM0_DATA_OUT >----------------*/,{
+ /*
+ ** Actual phase is DATA OUT.
+ ** Check against expected direction.
+ */
+ SCR_JUMP ^ IFTRUE (MASK (HF_DATA_IN, HF_DATA_IN)),
+ PADDRH (data_ovrun),
+ /*
+ ** Keep track we are moving data from the
+ ** PM0 DATA mini-script.
+ */
+ SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM0),
+ 0,
+ /*
+ ** Move the data from memory.
+ */
+ SCR_CHMOV_TBL ^ SCR_DATA_OUT,
+ offsetof (struct ccb, phys.pm0.sg),
+}/*-------------------------< PM0_DATA_END >----------------*/,{
+ /*
+ ** Clear the flag that told we were moving
+ ** data from the PM0 DATA mini-script.
+ */
+ SCR_REG_REG (HF_REG, SCR_AND, (~HF_IN_PM0)),
+ 0,
+ /*
+ ** Return to the previous DATA script which
+ ** is guaranteed by design (if no bug) to be
+ ** the main DATA script for this transfer.
+ */
+ SCR_LOAD_REL (temp, 4),
+ offsetof (struct ccb, phys.pm0.ret),
+ SCR_RETURN,
+ 0,
+}/*-------------------------< PM1_DATA >--------------------*/,{
+ /*
+ ** Read our host flags to SFBR, so we will be able
+ ** to check against the data direction we expect.
+ */
+ SCR_FROM_REG (HF_REG),
+ 0,
+ /*
+ ** Check against actual DATA PHASE.
+ */
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_DATA_IN)),
+ PADDR (pm1_data_out),
+ /*
+ ** Actual phase is DATA IN.
+ ** Check against expected direction.
+ */
+ SCR_JUMP ^ IFFALSE (MASK (HF_DATA_IN, HF_DATA_IN)),
+ PADDRH (data_ovrun),
+ /*
+ ** Keep track we are moving data from the
+ ** PM1 DATA mini-script.
+ */
+ SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM1),
+ 0,
+ /*
+ ** Move the data to memory.
+ */
+ SCR_CHMOV_TBL ^ SCR_DATA_IN,
+ offsetof (struct ccb, phys.pm1.sg),
+ SCR_JUMP,
+ PADDR (pm1_data_end),
+}/*-------------------------< PM1_DATA_OUT >----------------*/,{
+ /*
+ ** Actual phase is DATA OUT.
+ ** Check against expected direction.
+ */
+ SCR_JUMP ^ IFTRUE (MASK (HF_DATA_IN, HF_DATA_IN)),
+ PADDRH (data_ovrun),
+ /*
+ ** Keep track we are moving data from the
+ ** PM1 DATA mini-script.
+ */
+ SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM1),
+ 0,
+ /*
+ ** Move the data from memory.
+ */
+ SCR_CHMOV_TBL ^ SCR_DATA_OUT,
+ offsetof (struct ccb, phys.pm1.sg),
+}/*-------------------------< PM1_DATA_END >----------------*/,{
+ /*
+ ** Clear the flag that told we were moving
+ ** data from the PM1 DATA mini-script.
+ */
+ SCR_REG_REG (HF_REG, SCR_AND, (~HF_IN_PM1)),
+ 0,
+ /*
+ ** Return to the previous DATA script which
+ ** is guaranteed by design (if no bug) to be
+ ** the main DATA script for this transfer.
+ */
+ SCR_LOAD_REL (temp, 4),
+ offsetof (struct ccb, phys.pm1.ret),
+ SCR_RETURN,
+ 0,
+}/*---------------------------------------------------------*/
+};
+
+
+static struct scripth scripth0 __initdata = {
+/*------------------------< START64 >-----------------------*/{
+ /*
+ ** SCRIPT entry point for the 895A and the 896.
+ ** For now, there is no specific stuff for that
+ ** chip at this point, but this may come.
+ */
+ SCR_JUMP,
+ PADDR (init),
+}/*-------------------------< NO_DATA >-------------------*/,{
+ SCR_JUMP,
+ PADDRH (data_ovrun),
+}/*-----------------------< SEL_FOR_ABORT >------------------*/,{
+ /*
+ ** We are jumped here by the C code, if we have
+ ** some target to reset or some disconnected
+ ** job to abort. Since error recovery is a serious
+ ** busyness, we will really reset the SCSI BUS, if
+ ** case of a SCSI interrupt occurring in this path.
+ */
+
+ /*
+ ** Set initiator mode.
+ */
+ SCR_CLR (SCR_TRG),
+ 0,
+ /*
+ ** And try to select this target.
+ */
+ SCR_SEL_TBL_ATN ^ offsetof (struct ncb, abrt_sel),
+ PADDR (reselect),
+
+ /*
+ ** Wait for the selection to complete or
+ ** the selection to time out.
+ */
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ -8,
+ /*
+ ** Call the C code.
+ */
+ SCR_INT,
+ SIR_TARGET_SELECTED,
+ /*
+ ** The C code should let us continue here.
+ ** Send the 'kiss of death' message.
+ ** We expect an immediate disconnect once
+ ** the target has eaten the message.
+ */
+ SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+ 0,
+ SCR_MOVE_TBL ^ SCR_MSG_OUT,
+ offsetof (struct ncb, abrt_tbl),
+ SCR_CLR (SCR_ACK|SCR_ATN),
+ 0,
+ SCR_WAIT_DISC,
+ 0,
+ /*
+ ** Tell the C code that we are done.
+ */
+ SCR_INT,
+ SIR_ABORT_SENT,
+}/*-----------------------< SEL_FOR_ABORT_1 >--------------*/,{
+ /*
+ ** Jump at scheduler.
+ */
+ SCR_JUMP,
+ PADDR (start),
+
+}/*------------------------< SELECT_NO_ATN >-----------------*/,{
+ /*
+ ** Set Initiator mode.
+ ** And try to select this target without ATN.
+ */
+
+ SCR_CLR (SCR_TRG),
+ 0,
+ SCR_SEL_TBL ^ offsetof (struct dsb, select),
+ PADDR (ungetjob),
+ /*
+ ** load the savep (saved pointer) into
+ ** the actual data pointer.
+ */
+ SCR_LOAD_REL (temp, 4),
+ offsetof (struct ccb, phys.header.savep),
+ /*
+ ** Initialize the status registers
+ */
+ SCR_LOAD_REL (scr0, 4),
+ offsetof (struct ccb, phys.header.status),
+
+}/*------------------------< WF_SEL_DONE_NO_ATN >-----------------*/,{
+ /*
+ ** Wait immediately for the next phase or
+ ** the selection to complete or time-out.
+ */
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ 0,
+ SCR_JUMP,
+ PADDR (select2),
+
+}/*-------------------------< MSG_IN_ETC >--------------------*/,{
+ /*
+ ** If it is an EXTENDED (variable size message)
+ ** Handle it.
+ */
+ SCR_JUMP ^ IFTRUE (DATA (M_EXTENDED)),
+ PADDRH (msg_extended),
+ /*
+ ** Let the C code handle any other
+ ** 1 byte message.
+ */
+ SCR_JUMP ^ IFTRUE (MASK (0x00, 0xf0)),
+ PADDRH (msg_received),
+ SCR_JUMP ^ IFTRUE (MASK (0x10, 0xf0)),
+ PADDRH (msg_received),
+ /*
+ ** We donnot handle 2 bytes messages from SCRIPTS.
+ ** So, let the C code deal with these ones too.
+ */
+ SCR_JUMP ^ IFFALSE (MASK (0x20, 0xf0)),
+ PADDRH (msg_weird_seen),
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[1]),
+ SCR_JUMP,
+ PADDRH (msg_received),
+
+}/*-------------------------< MSG_RECEIVED >--------------------*/,{
+ SCR_LOAD_REL (scratcha, 4), /* DUMMY READ */
+ 0,
+ SCR_INT,
+ SIR_MSG_RECEIVED,
+
+}/*-------------------------< MSG_WEIRD_SEEN >------------------*/,{
+ SCR_LOAD_REL (scratcha1, 4), /* DUMMY READ */
+ 0,
+ SCR_INT,
+ SIR_MSG_WEIRD,
+
+}/*-------------------------< MSG_EXTENDED >--------------------*/,{
+ /*
+ ** Clear ACK and get the next byte
+ ** assumed to be the message length.
+ */
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (msgin[1]),
+ /*
+ ** Try to catch some unlikely situations as 0 length
+ ** or too large the length.
+ */
+ SCR_JUMP ^ IFTRUE (DATA (0)),
+ PADDRH (msg_weird_seen),
+ SCR_TO_REG (scratcha),
+ 0,
+ SCR_REG_REG (sfbr, SCR_ADD, (256-8)),
+ 0,
+ SCR_JUMP ^ IFTRUE (CARRYSET),
+ PADDRH (msg_weird_seen),
+ /*
+ ** We donnot handle extended messages from SCRIPTS.
+ ** Read the amount of data correponding to the
+ ** message length and call the C code.
+ */
+ SCR_STORE_REL (scratcha, 1),
+ offsetof (struct dsb, smsg_ext.size),
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_MOVE_TBL ^ SCR_MSG_IN,
+ offsetof (struct dsb, smsg_ext),
+ SCR_JUMP,
+ PADDRH (msg_received),
+
+}/*-------------------------< MSG_BAD >------------------*/,{
+ /*
+ ** unimplemented message - reject it.
+ */
+ SCR_INT,
+ SIR_REJECT_TO_SEND,
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_JUMP,
+ PADDR (clrack),
+
+}/*-------------------------< MSG_WEIRD >--------------------*/,{
+ /*
+ ** weird message received
+ ** ignore all MSG IN phases and reject it.
+ */
+ SCR_INT,
+ SIR_REJECT_TO_SEND,
+ SCR_SET (SCR_ATN),
+ 0,
+}/*-------------------------< MSG_WEIRD1 >--------------------*/,{
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_IN)),
+ PADDR (dispatch),
+ SCR_MOVE_ABS (1) ^ SCR_MSG_IN,
+ NADDR (scratch),
+ SCR_JUMP,
+ PADDRH (msg_weird1),
+}/*-------------------------< WDTR_RESP >----------------*/,{
+ /*
+ ** let the target fetch our answer.
+ */
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ PADDRH (nego_bad_phase),
+
+}/*-------------------------< SEND_WDTR >----------------*/,{
+ /*
+ ** Send the M_X_WIDE_REQ
+ */
+ SCR_MOVE_ABS (4) ^ SCR_MSG_OUT,
+ NADDR (msgout),
+ SCR_JUMP,
+ PADDRH (msg_out_done),
+
+}/*-------------------------< SDTR_RESP >-------------*/,{
+ /*
+ ** let the target fetch our answer.
+ */
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ PADDRH (nego_bad_phase),
+
+}/*-------------------------< SEND_SDTR >-------------*/,{
+ /*
+ ** Send the M_X_SYNC_REQ
+ */
+ SCR_MOVE_ABS (5) ^ SCR_MSG_OUT,
+ NADDR (msgout),
+ SCR_JUMP,
+ PADDRH (msg_out_done),
+
+}/*-------------------------< PPR_RESP >-------------*/,{
+ /*
+ ** let the target fetch our answer.
+ */
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_CLR (SCR_ACK),
+ 0,
+ SCR_JUMP ^ IFFALSE (WHEN (SCR_MSG_OUT)),
+ PADDRH (nego_bad_phase),
+
+}/*-------------------------< SEND_PPR >-------------*/,{
+ /*
+ ** Send the M_X_PPR_REQ
+ */
+ SCR_MOVE_ABS (8) ^ SCR_MSG_OUT,
+ NADDR (msgout),
+ SCR_JUMP,
+ PADDRH (msg_out_done),
+
+}/*-------------------------< NEGO_BAD_PHASE >------------*/,{
+ SCR_INT,
+ SIR_NEGO_PROTO,
+ SCR_JUMP,
+ PADDR (dispatch),
+
+}/*-------------------------< MSG_OUT >-------------------*/,{
+ /*
+ ** The target requests a message.
+ */
+ SCR_MOVE_ABS (1) ^ SCR_MSG_OUT,
+ NADDR (msgout),
+ /*
+ ** ... wait for the next phase
+ ** if it's a message out, send it again, ...
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_MSG_OUT)),
+ PADDRH (msg_out),
+}/*-------------------------< MSG_OUT_DONE >--------------*/,{
+ /*
+ ** ... else clear the message ...
+ */
+ SCR_INT,
+ SIR_MSG_OUT_DONE,
+ /*
+ ** ... and process the next phase
+ */
+ SCR_JUMP,
+ PADDR (dispatch),
+
+}/*-------------------------< DATA_OVRUN >-----------------------*/,{
+ /*
+ * Use scratcha to count the extra bytes.
+ */
+ SCR_LOAD_ABS (scratcha, 4),
+ PADDRH (zero),
+}/*-------------------------< DATA_OVRUN1 >----------------------*/,{
+ /*
+ * The target may want to transfer too much data.
+ *
+ * If phase is DATA OUT write 1 byte and count it.
+ */
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_OUT)),
+ 16,
+ SCR_CHMOV_ABS (1) ^ SCR_DATA_OUT,
+ NADDR (scratch),
+ SCR_JUMP,
+ PADDRH (data_ovrun2),
+ /*
+ * If WSR is set, clear this condition, and
+ * count this byte.
+ */
+ SCR_FROM_REG (scntl2),
+ 0,
+ SCR_JUMPR ^ IFFALSE (MASK (WSR, WSR)),
+ 16,
+ SCR_REG_REG (scntl2, SCR_OR, WSR),
+ 0,
+ SCR_JUMP,
+ PADDRH (data_ovrun2),
+ /*
+ * Finally check against DATA IN phase.
+ * Signal data overrun to the C code
+ * and jump to dispatcher if not so.
+ * Read 1 byte otherwise and count it.
+ */
+ SCR_JUMPR ^ IFTRUE (WHEN (SCR_DATA_IN)),
+ 16,
+ SCR_INT,
+ SIR_DATA_OVERRUN,
+ SCR_JUMP,
+ PADDR (dispatch),
+ SCR_CHMOV_ABS (1) ^ SCR_DATA_IN,
+ NADDR (scratch),
+}/*-------------------------< DATA_OVRUN2 >----------------------*/,{
+ /*
+ * Count this byte.
+ * This will allow to return a negative
+ * residual to user.
+ */
+ SCR_REG_REG (scratcha, SCR_ADD, 0x01),
+ 0,
+ SCR_REG_REG (scratcha1, SCR_ADDC, 0),
+ 0,
+ SCR_REG_REG (scratcha2, SCR_ADDC, 0),
+ 0,
+ /*
+ * .. and repeat as required.
+ */
+ SCR_JUMP,
+ PADDRH (data_ovrun1),
+
+}/*-------------------------< ABORT_RESEL >----------------*/,{
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_CLR (SCR_ACK),
+ 0,
+ /*
+ ** send the abort/abortag/reset message
+ ** we expect an immediate disconnect
+ */
+ SCR_REG_REG (scntl2, SCR_AND, 0x7f),
+ 0,
+ SCR_MOVE_ABS (1) ^ SCR_MSG_OUT,
+ NADDR (msgout),
+ SCR_CLR (SCR_ACK|SCR_ATN),
+ 0,
+ SCR_WAIT_DISC,
+ 0,
+ SCR_INT,
+ SIR_RESEL_ABORTED,
+ SCR_JUMP,
+ PADDR (start),
+}/*-------------------------< RESEND_IDENT >-------------------*/,{
+ /*
+ ** The target stays in MSG OUT phase after having acked
+ ** Identify [+ Tag [+ Extended message ]]. Targets shall
+ ** behave this way on parity error.
+ ** We must send it again all the messages.
+ */
+ SCR_SET (SCR_ATN), /* Shall be asserted 2 deskew delays before the */
+ 0, /* 1rst ACK = 90 ns. Hope the NCR is'nt too fast */
+ SCR_JUMP,
+ PADDR (send_ident),
+}/*-------------------------< IDENT_BREAK >-------------------*/,{
+ SCR_CLR (SCR_ATN),
+ 0,
+ SCR_JUMP,
+ PADDR (select2),
+}/*-------------------------< IDENT_BREAK_ATN >----------------*/,{
+ SCR_SET (SCR_ATN),
+ 0,
+ SCR_JUMP,
+ PADDR (select2),
+}/*-------------------------< SDATA_IN >-------------------*/,{
+ SCR_CHMOV_TBL ^ SCR_DATA_IN,
+ offsetof (struct dsb, sense),
+ SCR_CALL,
+ PADDR (datai_done),
+ SCR_JUMP,
+ PADDRH (data_ovrun),
+}/*-------------------------< DATA_IO >--------------------*/,{
+ /*
+ ** We jump here if the data direction was unknown at the
+ ** time we had to queue the command to the scripts processor.
+ ** Pointers had been set as follow in this situation:
+ ** savep --> DATA_IO
+ ** lastp --> start pointer when DATA_IN
+ ** goalp --> goal pointer when DATA_IN
+ ** wlastp --> start pointer when DATA_OUT
+ ** wgoalp --> goal pointer when DATA_OUT
+ ** This script sets savep/lastp/goalp according to the
+ ** direction chosen by the target.
+ */
+ SCR_JUMP ^ IFTRUE (WHEN (SCR_DATA_OUT)),
+ PADDRH(data_io_out),
+}/*-------------------------< DATA_IO_COM >-----------------*/,{
+ /*
+ ** Direction is DATA IN.
+ ** Warning: we jump here, even when phase is DATA OUT.
+ */
+ SCR_LOAD_REL (scratcha, 4),
+ offsetof (struct ccb, phys.header.lastp),
+ SCR_STORE_REL (scratcha, 4),
+ offsetof (struct ccb, phys.header.savep),
+
+ /*
+ ** Jump to the SCRIPTS according to actual direction.
+ */
+ SCR_LOAD_REL (temp, 4),
+ offsetof (struct ccb, phys.header.savep),
+ SCR_RETURN,
+ 0,
+}/*-------------------------< DATA_IO_OUT >-----------------*/,{
+ /*
+ ** Direction is DATA OUT.
+ */
+ SCR_REG_REG (HF_REG, SCR_AND, (~HF_DATA_IN)),
+ 0,
+ SCR_LOAD_REL (scratcha, 4),
+ offsetof (struct ccb, phys.header.wlastp),
+ SCR_STORE_REL (scratcha, 4),
+ offsetof (struct ccb, phys.header.lastp),
+ SCR_LOAD_REL (scratcha, 4),
+ offsetof (struct ccb, phys.header.wgoalp),
+ SCR_STORE_REL (scratcha, 4),
+ offsetof (struct ccb, phys.header.goalp),
+ SCR_JUMP,
+ PADDRH(data_io_com),
+
+}/*-------------------------< RESEL_BAD_LUN >---------------*/,{
+ /*
+ ** Message is an IDENTIFY, but lun is unknown.
+ ** Signal problem to C code for logging the event.
+ ** Send a M_ABORT to clear all pending tasks.
+ */
+ SCR_INT,
+ SIR_RESEL_BAD_LUN,
+ SCR_JUMP,
+ PADDRH (abort_resel),
+}/*-------------------------< BAD_I_T_L >------------------*/,{
+ /*
+ ** We donnot have a task for that I_T_L.
+ ** Signal problem to C code for logging the event.
+ ** Send a M_ABORT message.
+ */
+ SCR_INT,
+ SIR_RESEL_BAD_I_T_L,
+ SCR_JUMP,
+ PADDRH (abort_resel),
+}/*-------------------------< BAD_I_T_L_Q >----------------*/,{
+ /*
+ ** We donnot have a task that matches the tag.
+ ** Signal problem to C code for logging the event.
+ ** Send a M_ABORTTAG message.
+ */
+ SCR_INT,
+ SIR_RESEL_BAD_I_T_L_Q,
+ SCR_JUMP,
+ PADDRH (abort_resel),
+}/*-------------------------< BAD_STATUS >-----------------*/,{
+ /*
+ ** Anything different from INTERMEDIATE
+ ** CONDITION MET should be a bad SCSI status,
+ ** given that GOOD status has already been tested.
+ ** Call the C code.
+ */
+ SCR_LOAD_ABS (scratcha, 4),
+ PADDRH (startpos),
+ SCR_INT ^ IFFALSE (DATA (S_COND_MET)),
+ SIR_BAD_STATUS,
+ SCR_RETURN,
+ 0,
+
+}/*-------------------------< TWEAK_PMJ >------------------*/,{
+ /*
+ ** Disable PM handling from SCRIPTS for the data phase
+ ** and so force PM to be handled from C code if HF_PM_TO_C
+ ** flag is set.
+ */
+ SCR_FROM_REG(HF_REG),
+ 0,
+ SCR_JUMPR ^ IFTRUE (MASK (HF_PM_TO_C, HF_PM_TO_C)),
+ 16,
+ SCR_REG_REG (ccntl0, SCR_OR, ENPMJ),
+ 0,
+ SCR_RETURN,
+ 0,
+ SCR_REG_REG (ccntl0, SCR_AND, (~ENPMJ)),
+ 0,
+ SCR_RETURN,
+ 0,
+
+}/*-------------------------< PM_HANDLE >------------------*/,{
+ /*
+ ** Phase mismatch handling.
+ **
+ ** Since we have to deal with 2 SCSI data pointers
+ ** (current and saved), we need at least 2 contexts.
+ ** Each context (pm0 and pm1) has a saved area, a
+ ** SAVE mini-script and a DATA phase mini-script.
+ */
+ /*
+ ** Get the PM handling flags.
+ */
+ SCR_FROM_REG (HF_REG),
+ 0,
+ /*
+ ** If no flags (1rst PM for example), avoid
+ ** all the below heavy flags testing.
+ ** This makes the normal case a bit faster.
+ */
+ SCR_JUMP ^ IFTRUE (MASK (0, (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED))),
+ PADDRH (pm_handle1),
+ /*
+ ** If we received a SAVE DP, switch to the
+ ** other PM context since the savep may point
+ ** to the current PM context.
+ */
+ SCR_JUMPR ^ IFFALSE (MASK (HF_DP_SAVED, HF_DP_SAVED)),
+ 8,
+ SCR_REG_REG (sfbr, SCR_XOR, HF_ACT_PM),
+ 0,
+ /*
+ ** If we have been interrupt in a PM DATA mini-script,
+ ** we take the return address from the corresponding
+ ** saved area.
+ ** This ensure the return address always points to the
+ ** main DATA script for this transfer.
+ */
+ SCR_JUMP ^ IFTRUE (MASK (0, (HF_IN_PM0 | HF_IN_PM1))),
+ PADDRH (pm_handle1),
+ SCR_JUMPR ^ IFFALSE (MASK (HF_IN_PM0, HF_IN_PM0)),
+ 16,
+ SCR_LOAD_REL (ia, 4),
+ offsetof(struct ccb, phys.pm0.ret),
+ SCR_JUMP,
+ PADDRH (pm_save),
+ SCR_LOAD_REL (ia, 4),
+ offsetof(struct ccb, phys.pm1.ret),
+ SCR_JUMP,
+ PADDRH (pm_save),
+}/*-------------------------< PM_HANDLE1 >-----------------*/,{
+ /*
+ ** Normal case.
+ ** Update the return address so that it
+ ** will point after the interrupted MOVE.
+ */
+ SCR_REG_REG (ia, SCR_ADD, 8),
+ 0,
+ SCR_REG_REG (ia1, SCR_ADDC, 0),
+ 0,
+}/*-------------------------< PM_SAVE >--------------------*/,{
+ /*
+ ** Clear all the flags that told us if we were
+ ** interrupted in a PM DATA mini-script and/or
+ ** we received a SAVE DP.
+ */
+ SCR_SFBR_REG (HF_REG, SCR_AND, (~(HF_IN_PM0|HF_IN_PM1|HF_DP_SAVED))),
+ 0,
+ /*
+ ** Choose the current PM context.
+ */
+ SCR_JUMP ^ IFTRUE (MASK (HF_ACT_PM, HF_ACT_PM)),
+ PADDRH (pm1_save),
+}/*-------------------------< PM0_SAVE >-------------------*/,{
+ SCR_STORE_REL (ia, 4),
+ offsetof(struct ccb, phys.pm0.ret),
+ /*
+ ** If WSR bit is set, either UA and RBC may
+ ** have to be changed whatever the device wants
+ ** to ignore this residue ot not.
+ */
+ SCR_FROM_REG (scntl2),
+ 0,
+ SCR_CALL ^ IFTRUE (MASK (WSR, WSR)),
+ PADDRH (pm_wsr_handle),
+ /*
+ ** Save the remaining byte count, the updated
+ ** address and the return address.
+ */
+ SCR_STORE_REL (rbc, 4),
+ offsetof(struct ccb, phys.pm0.sg.size),
+ SCR_STORE_REL (ua, 4),
+ offsetof(struct ccb, phys.pm0.sg.addr),
+ /*
+ ** Set the current pointer at the PM0 DATA mini-script.
+ */
+ SCR_LOAD_ABS (temp, 4),
+ PADDRH (pm0_data_addr),
+ SCR_JUMP,
+ PADDR (dispatch),
+}/*-------------------------< PM1_SAVE >-------------------*/,{
+ SCR_STORE_REL (ia, 4),
+ offsetof(struct ccb, phys.pm1.ret),
+ /*
+ ** If WSR bit is set, either UA and RBC may
+ ** have been changed whatever the device wants
+ ** to ignore this residue or not.
+ */
+ SCR_FROM_REG (scntl2),
+ 0,
+ SCR_CALL ^ IFTRUE (MASK (WSR, WSR)),
+ PADDRH (pm_wsr_handle),
+ /*
+ ** Save the remaining byte count, the updated
+ ** address and the return address.
+ */
+ SCR_STORE_REL (rbc, 4),
+ offsetof(struct ccb, phys.pm1.sg.size),
+ SCR_STORE_REL (ua, 4),
+ offsetof(struct ccb, phys.pm1.sg.addr),
+ /*
+ ** Set the current pointer at the PM1 DATA mini-script.
+ */
+ SCR_LOAD_ABS (temp, 4),
+ PADDRH (pm1_data_addr),
+ SCR_JUMP,
+ PADDR (dispatch),
+}/*--------------------------< PM_WSR_HANDLE >-----------------------*/,{
+ /*
+ * Phase mismatch handling from SCRIPT with WSR set.
+ * Such a condition can occur if the chip wants to
+ * execute a CHMOV(size > 1) when the WSR bit is
+ * set and the target changes PHASE.
+ */
+#ifdef SYM_DEBUG_PM_WITH_WSR
+ /*
+ * Some debugging may still be needed.:)
+ */
+ SCR_INT,
+ SIR_PM_WITH_WSR,
+#endif
+ /*
+ * We must move the residual byte to memory.
+ *
+ * UA contains bit 0..31 of the address to
+ * move the residual byte.
+ * Move it to the table indirect.
+ */
+ SCR_STORE_REL (ua, 4),
+ offsetof (struct ccb, phys.wresid.addr),
+ /*
+ * Increment UA (move address to next position).
+ */
+ SCR_REG_REG (ua, SCR_ADD, 1),
+ 0,
+ SCR_REG_REG (ua1, SCR_ADDC, 0),
+ 0,
+ SCR_REG_REG (ua2, SCR_ADDC, 0),
+ 0,
+ SCR_REG_REG (ua3, SCR_ADDC, 0),
+ 0,
+ /*
+ * Compute SCRATCHA as:
+ * - size to transfer = 1 byte.
+ * - bit 24..31 = high address bit [32...39].
+ */
+ SCR_LOAD_ABS (scratcha, 4),
+ PADDRH (zero),
+ SCR_REG_REG (scratcha, SCR_OR, 1),
+ 0,
+ SCR_FROM_REG (rbc3),
+ 0,
+ SCR_TO_REG (scratcha3),
+ 0,
+ /*
+ * Move this value to the table indirect.
+ */
+ SCR_STORE_REL (scratcha, 4),
+ offsetof (struct ccb, phys.wresid.size),
+ /*
+ * Wait for a valid phase.
+ * While testing with bogus QUANTUM drives, the C1010
+ * sometimes raised a spurious phase mismatch with
+ * WSR and the CHMOV(1) triggered another PM.
+ * Waiting explicitely for the PHASE seemed to avoid
+ * the nested phase mismatch. Btw, this didn't happen
+ * using my IBM drives.
+ */
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_DATA_IN)),
+ 0,
+ /*
+ * Perform the move of the residual byte.
+ */
+ SCR_CHMOV_TBL ^ SCR_DATA_IN,
+ offsetof (struct ccb, phys.wresid),
+ /*
+ * We can now handle the phase mismatch with UA fixed.
+ * RBC[0..23]=0 is a special case that does not require
+ * a PM context. The C code also checks against this.
+ */
+ SCR_FROM_REG (rbc),
+ 0,
+ SCR_RETURN ^ IFFALSE (DATA (0)),
+ 0,
+ SCR_FROM_REG (rbc1),
+ 0,
+ SCR_RETURN ^ IFFALSE (DATA (0)),
+ 0,
+ SCR_FROM_REG (rbc2),
+ 0,
+ SCR_RETURN ^ IFFALSE (DATA (0)),
+ 0,
+ /*
+ * RBC[0..23]=0.
+ * Not only we donnot need a PM context, but this would
+ * lead to a bogus CHMOV(0). This condition means that
+ * the residual was the last byte to move from this CHMOV.
+ * So, we just have to move the current data script pointer
+ * (i.e. TEMP) to the SCRIPTS address following the
+ * interrupted CHMOV and jump to dispatcher.
+ */
+ SCR_STORE_ABS (ia, 4),
+ PADDRH (scratch),
+ SCR_LOAD_ABS (temp, 4),
+ PADDRH (scratch),
+ SCR_JUMP,
+ PADDR (dispatch),
+}/*--------------------------< WSR_MA_HELPER >-----------------------*/,{
+ /*
+ * Helper for the C code when WSR bit is set.
+ * Perform the move of the residual byte.
+ */
+ SCR_CHMOV_TBL ^ SCR_DATA_IN,
+ offsetof (struct ccb, phys.wresid),
+ SCR_JUMP,
+ PADDR (dispatch),
+}/*-------------------------< ZERO >------------------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< SCRATCH >---------------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< SCRATCH1 >--------------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< PM0_DATA_ADDR >---------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< PM1_DATA_ADDR >---------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< SAVED_DSA >-------------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< SAVED_DRS >-------------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< DONE_POS >--------------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< STARTPOS >--------------------*/,{
+ SCR_DATA_ZERO,
+}/*-------------------------< TARGTBL >---------------------*/,{
+ SCR_DATA_ZERO,
+
+
+/*
+** We may use MEMORY MOVE instructions to load the on chip-RAM,
+** if it happens that mapping PCI memory is not possible.
+** But writing the RAM from the CPU is the preferred method,
+** since PCI 2.2 seems to disallow PCI self-mastering.
+*/
+
+#ifdef SCSI_NCR_PCI_MEM_NOT_SUPPORTED
+
+}/*-------------------------< START_RAM >-------------------*/,{
+ /*
+ ** Load the script into on-chip RAM,
+ ** and jump to start point.
+ */
+ SCR_COPY (sizeof (struct script)),
+}/*-------------------------< SCRIPT0_BA >--------------------*/,{
+ 0,
+ PADDR (start),
+ SCR_JUMP,
+ PADDR (init),
+
+}/*-------------------------< START_RAM64 >--------------------*/,{
+ /*
+ ** Load the RAM and start for 64 bit PCI (895A,896).
+ ** Both scripts (script and scripth) are loaded into
+ ** the RAM which is 8K (4K for 825A/875/895).
+ ** We also need to load some 32-63 bit segments
+ ** address of the SCRIPTS processor.
+ ** LOAD/STORE ABSOLUTE always refers to on-chip RAM
+ ** in our implementation. The main memory is
+ ** accessed using LOAD/STORE DSA RELATIVE.
+ */
+ SCR_LOAD_REL (mmws, 4),
+ offsetof (struct ncb, scr_ram_seg),
+ SCR_COPY (sizeof(struct script)),
+}/*-------------------------< SCRIPT0_BA64 >--------------------*/,{
+ 0,
+ PADDR (start),
+ SCR_COPY (sizeof(struct scripth)),
+}/*-------------------------< SCRIPTH0_BA64 >--------------------*/,{
+ 0,
+ PADDRH (start64),
+ SCR_LOAD_REL (mmrs, 4),
+ offsetof (struct ncb, scr_ram_seg),
+ SCR_JUMP64,
+ PADDRH (start64),
+}/*-------------------------< RAM_SEG64 >--------------------*/,{
+ 0,
+
+#endif /* SCSI_NCR_PCI_MEM_NOT_SUPPORTED */
+
+}/*-------------------------< SNOOPTEST >-------------------*/,{
+ /*
+ ** Read the variable.
+ */
+ SCR_LOAD_REL (scratcha, 4),
+ offsetof(struct ncb, ncr_cache),
+ SCR_STORE_REL (temp, 4),
+ offsetof(struct ncb, ncr_cache),
+ SCR_LOAD_REL (temp, 4),
+ offsetof(struct ncb, ncr_cache),
+}/*-------------------------< SNOOPEND >-------------------*/,{
+ /*
+ ** And stop.
+ */
+ SCR_INT,
+ 99,
+}/*--------------------------------------------------------*/
+};
+
+/*==========================================================
+**
+**
+** Fill in #define dependent parts of the script
+**
+**
+**==========================================================
+*/
+
+void __init ncr_script_fill (struct script * scr, struct scripth * scrh)
+{
+ int i;
+ ncrcmd *p;
+
+ p = scr->data_in;
+ for (i=0; i<MAX_SCATTER; i++) {
+ *p++ =SCR_CHMOV_TBL ^ SCR_DATA_IN;
+ *p++ =offsetof (struct dsb, data[i]);
+ };
+
+ assert ((u_long)p == (u_long)&scr->data_in + sizeof (scr->data_in));
+
+ p = scr->data_out;
+
+ for (i=0; i<MAX_SCATTER; i++) {
+ *p++ =SCR_CHMOV_TBL ^ SCR_DATA_OUT;
+ *p++ =offsetof (struct dsb, data[i]);
+ };
+
+ assert ((u_long)p == (u_long)&scr->data_out + sizeof (scr->data_out));
+}
+
+/*==========================================================
+**
+**
+** Copy and rebind a script.
+**
+**
+**==========================================================
+*/
+
+static void __init
+ncr_script_copy_and_bind (ncb_p np,ncrcmd *src,ncrcmd *dst,int len)
+{
+ ncrcmd opcode, new, old, tmp1, tmp2;
+ ncrcmd *start, *end;
+ int relocs;
+ int opchanged = 0;
+
+ start = src;
+ end = src + len/4;
+
+ while (src < end) {
+
+ opcode = *src++;
+ *dst++ = cpu_to_scr(opcode);
+
+ /*
+ ** If we forget to change the length
+ ** in struct script, a field will be
+ ** padded with 0. This is an illegal
+ ** command.
+ */
+
+ if (opcode == 0) {
+ printk (KERN_INFO "%s: ERROR0 IN SCRIPT at %d.\n",
+ ncr_name(np), (int) (src-start-1));
+ MDELAY (10000);
+ continue;
+ };
+
+ /*
+ ** We use the bogus value 0xf00ff00f ;-)
+ ** to reserve data area in SCRIPTS.
+ */
+ if (opcode == SCR_DATA_ZERO) {
+ dst[-1] = 0;
+ continue;
+ }
+
+ if (DEBUG_FLAGS & DEBUG_SCRIPT)
+ printk (KERN_INFO "%p: <%x>\n",
+ (src-1), (unsigned)opcode);
+
+ /*
+ ** We don't have to decode ALL commands
+ */
+ switch (opcode >> 28) {
+
+ case 0xf:
+ /*
+ ** LOAD / STORE DSA relative, don't relocate.
+ */
+ relocs = 0;
+ break;
+ case 0xe:
+ /*
+ ** LOAD / STORE absolute.
+ */
+ relocs = 1;
+ break;
+ case 0xc:
+ /*
+ ** COPY has TWO arguments.
+ */
+ relocs = 2;
+ tmp1 = src[0];
+ tmp2 = src[1];
+#ifdef RELOC_KVAR
+ if ((tmp1 & RELOC_MASK) == RELOC_KVAR)
+ tmp1 = 0;
+ if ((tmp2 & RELOC_MASK) == RELOC_KVAR)
+ tmp2 = 0;
+#endif
+ if ((tmp1 ^ tmp2) & 3) {
+ printk (KERN_ERR"%s: ERROR1 IN SCRIPT at %d.\n",
+ ncr_name(np), (int) (src-start-1));
+ MDELAY (1000);
+ }
+ /*
+ ** If PREFETCH feature not enabled, remove
+ ** the NO FLUSH bit if present.
+ */
+ if ((opcode & SCR_NO_FLUSH) &&
+ !(np->features & FE_PFEN)) {
+ dst[-1] = cpu_to_scr(opcode & ~SCR_NO_FLUSH);
+ ++opchanged;
+ }
+ break;
+
+ case 0x0:
+ /*
+ ** MOVE/CHMOV (absolute address)
+ */
+ if (!(np->features & FE_WIDE))
+ dst[-1] = cpu_to_scr(opcode | OPC_MOVE);
+ relocs = 1;
+ break;
+
+ case 0x1:
+ /*
+ ** MOVE/CHMOV (table indirect)
+ */
+ if (!(np->features & FE_WIDE))
+ dst[-1] = cpu_to_scr(opcode | OPC_MOVE);
+ relocs = 0;
+ break;
+
+ case 0x8:
+ /*
+ ** JUMP / CALL
+ ** dont't relocate if relative :-)
+ */
+ if (opcode & 0x00800000)
+ relocs = 0;
+ else if ((opcode & 0xf8400000) == 0x80400000)/*JUMP64*/
+ relocs = 2;
+ else
+ relocs = 1;
+ break;
+
+ case 0x4:
+ case 0x5:
+ case 0x6:
+ case 0x7:
+ relocs = 1;
+ break;
+
+ default:
+ relocs = 0;
+ break;
+ };
+
+ if (!relocs) {
+ *dst++ = cpu_to_scr(*src++);
+ continue;
+ }
+ while (relocs--) {
+ old = *src++;
+
+ switch (old & RELOC_MASK) {
+ case RELOC_REGISTER:
+ new = (old & ~RELOC_MASK) + pcivtobus(np->base_ba);
+ break;
+ case RELOC_LABEL:
+ new = (old & ~RELOC_MASK) + np->p_script;
+ break;
+ case RELOC_LABELH:
+ new = (old & ~RELOC_MASK) + np->p_scripth;
+ break;
+ case RELOC_SOFTC:
+ new = (old & ~RELOC_MASK) + np->p_ncb;
+ break;
+#ifdef RELOC_KVAR
+ case RELOC_KVAR:
+ new=0;
+ if (((old & ~RELOC_MASK) < SCRIPT_KVAR_FIRST) ||
+ ((old & ~RELOC_MASK) > SCRIPT_KVAR_LAST))
+ panic("ncr KVAR out of range");
+ new = vtobus(script_kvars[old & ~RELOC_MASK]);
+#endif
+ break;
+ case 0:
+ /* Don't relocate a 0 address. */
+ if (old == 0) {
+ new = old;
+ break;
+ }
+ /* fall through */
+ default:
+ new = 0; /* For 'cc' not to complain */
+ panic("ncr_script_copy_and_bind: "
+ "weird relocation %x\n", old);
+ break;
+ }
+
+ *dst++ = cpu_to_scr(new);
+ }
+ };
+}
+
+/*==========================================================
+**
+**
+** Auto configuration: attach and init a host adapter.
+**
+**
+**==========================================================
+*/
+
+/*
+** Linux host data structure.
+*/
+
+struct host_data {
+ struct ncb *ncb;
+};
+
+/*
+** Print something which allows to retrieve the controler type, unit,
+** target, lun concerned by a kernel message.
+*/
+
+static void PRINT_TARGET(ncb_p np, int target)
+{
+ printk(KERN_INFO "%s-<%d,*>: ", ncr_name(np), target);
+}
+
+static void PRINT_LUN(ncb_p np, int target, int lun)
+{
+ printk(KERN_INFO "%s-<%d,%d>: ", ncr_name(np), target, lun);
+}
+
+static void PRINT_ADDR(Scsi_Cmnd *cmd)
+{
+ struct host_data *host_data = (struct host_data *) cmd->host->hostdata;
+ PRINT_LUN(host_data->ncb, cmd->target, cmd->lun);
+}
+
+/*==========================================================
+**
+** NCR chip clock divisor table.
+** Divisors are multiplied by 10,000,000 in order to make
+** calculations more simple.
+**
+**==========================================================
+*/
+
+#define _5M 5000000
+static u_long div_10M[] =
+ {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M};
+
+
+/*===============================================================
+**
+** Prepare io register values used by ncr_init() according
+** to selected and supported features.
+**
+** NCR/SYMBIOS chips allow burst lengths of 2, 4, 8, 16, 32, 64,
+** 128 transfers. All chips support at least 16 transfers bursts.
+** The 825A, 875 and 895 chips support bursts of up to 128
+** transfers and the 895A and 896 support bursts of up to 64
+** transfers. All other chips support up to 16 transfers bursts.
+**
+** For PCI 32 bit data transfers each transfer is a DWORD (4 bytes).
+** It is a QUADWORD (8 bytes) for PCI 64 bit data transfers.
+** Only the 896 is able to perform 64 bit data transfers.
+**
+** We use log base 2 (burst length) as internal code, with
+** value 0 meaning "burst disabled".
+**
+**===============================================================
+*/
+
+/*
+ * Burst length from burst code.
+ */
+#define burst_length(bc) (!(bc))? 0 : 1 << (bc)
+
+/*
+ * Burst code from io register bits.
+ */
+#define burst_code(dmode, ctest4, ctest5) \
+ (ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ((ctest5) & 0x04) + 1
+
+/*
+ * Set initial io register bits from burst code.
+ */
+static inline void ncr_init_burst(ncb_p np, u_char bc)
+{
+ np->rv_ctest4 &= ~0x80;
+ np->rv_dmode &= ~(0x3 << 6);
+ np->rv_ctest5 &= ~0x4;
+
+ if (!bc) {
+ np->rv_ctest4 |= 0x80;
+ }
+ else {
+ --bc;
+ np->rv_dmode |= ((bc & 0x3) << 6);
+ np->rv_ctest5 |= (bc & 0x4);
+ }
+}
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+
+/*
+** Get target set-up from Symbios format NVRAM.
+*/
+
+static void __init
+ncr_Symbios_setup_target(ncb_p np, int target, Symbios_nvram *nvram)
+{
+ tcb_p tp = &np->target[target];
+ Symbios_target *tn = &nvram->target[target];
+
+ tp->usrsync = tn->sync_period ? (tn->sync_period + 3) / 4 : 255;
+ tp->usrwide = tn->bus_width == 0x10 ? 1 : 0;
+ tp->usrtags =
+ (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? MAX_TAGS : 0;
+
+ if (!(tn->flags & SYMBIOS_DISCONNECT_ENABLE))
+ tp->usrflag |= UF_NODISC;
+ if (!(tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME))
+ tp->usrflag |= UF_NOSCAN;
+}
+
+/*
+** Get target set-up from Tekram format NVRAM.
+*/
+
+static void __init
+ncr_Tekram_setup_target(ncb_p np, int target, Tekram_nvram *nvram)
+{
+ tcb_p tp = &np->target[target];
+ struct Tekram_target *tn = &nvram->target[target];
+ int i;
+
+ if (tn->flags & TEKRAM_SYNC_NEGO) {
+ i = tn->sync_index & 0xf;
+ tp->usrsync = Tekram_sync[i];
+ }
+
+ tp->usrwide = (tn->flags & TEKRAM_WIDE_NEGO) ? 1 : 0;
+
+ if (tn->flags & TEKRAM_TAGGED_COMMANDS) {
+ tp->usrtags = 2 << nvram->max_tags_index;
+ }
+
+ if (!(tn->flags & TEKRAM_DISCONNECT_ENABLE))
+ tp->usrflag = UF_NODISC;
+
+ /* If any device does not support parity, we will not use this option */
+ if (!(tn->flags & TEKRAM_PARITY_CHECK))
+ np->rv_scntl0 &= ~0x0a; /* SCSI parity checking disabled */
+}
+#endif /* SCSI_NCR_NVRAM_SUPPORT */
+
+/*
+** Save initial settings of some IO registers.
+** Assumed to have been set by BIOS.
+*/
+static void __init ncr_save_initial_setting(ncb_p np)
+{
+ np->sv_scntl0 = INB(nc_scntl0) & 0x0a;
+ np->sv_dmode = INB(nc_dmode) & 0xce;
+ np->sv_dcntl = INB(nc_dcntl) & 0xa8;
+ np->sv_ctest3 = INB(nc_ctest3) & 0x01;
+ np->sv_ctest4 = INB(nc_ctest4) & 0x80;
+ np->sv_gpcntl = INB(nc_gpcntl);
+ np->sv_stest2 = INB(nc_stest2) & 0x20;
+ np->sv_stest4 = INB(nc_stest4);
+ np->sv_stest1 = INB(nc_stest1);
+
+ np->sv_scntl3 = INB(nc_scntl3) & 0x07;
+
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66) ){
+ /*
+ ** C1010 always uses large fifo, bit 5 rsvd
+ ** scntl4 used ONLY with C1010
+ */
+ np->sv_ctest5 = INB(nc_ctest5) & 0x04 ;
+ np->sv_scntl4 = INB(nc_scntl4);
+ }
+ else {
+ np->sv_ctest5 = INB(nc_ctest5) & 0x24 ;
+ np->sv_scntl4 = 0;
+ }
+}
+
+/*
+** Prepare io register values used by ncr_init()
+** according to selected and supported features.
+*/
+static int __init ncr_prepare_setting(ncb_p np, ncr_nvram *nvram)
+{
+ u_char burst_max;
+ u_long period;
+ int i;
+
+ /*
+ ** Wide ?
+ */
+
+ np->maxwide = (np->features & FE_WIDE)? 1 : 0;
+
+ /*
+ ** Get the frequency of the chip's clock.
+ ** Find the right value for scntl3.
+ */
+
+ if (np->features & FE_QUAD)
+ np->multiplier = 4;
+ else if (np->features & FE_DBLR)
+ np->multiplier = 2;
+ else
+ np->multiplier = 1;
+
+ np->clock_khz = (np->features & FE_CLK80)? 80000 : 40000;
+ np->clock_khz *= np->multiplier;
+
+ if (np->clock_khz != 40000)
+ ncr_getclock(np, np->multiplier);
+
+ /*
+ * Divisor to be used for async (timer pre-scaler).
+ *
+ * Note: For C1010 the async divisor is 2(8) if he
+ * quadrupler is disabled (enabled).
+ */
+
+ if ( (np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)) {
+
+ np->rv_scntl3 = 0;
+ }
+ else
+ {
+ i = np->clock_divn - 1;
+ while (--i >= 0) {
+ if (10ul * SCSI_NCR_MIN_ASYNC * np->clock_khz
+ > div_10M[i]) {
+ ++i;
+ break;
+ }
+ }
+ np->rv_scntl3 = i+1;
+ }
+
+
+ /*
+ * Save the ultra3 register for the C1010/C1010_66
+ */
+
+ np->rv_scntl4 = np->sv_scntl4;
+
+ /*
+ * Minimum synchronous period factor supported by the chip.
+ * Btw, 'period' is in tenths of nanoseconds.
+ */
+
+ period = (4 * div_10M[0] + np->clock_khz - 1) / np->clock_khz;
+ if (period <= 250) np->minsync = 10;
+ else if (period <= 303) np->minsync = 11;
+ else if (period <= 500) np->minsync = 12;
+ else np->minsync = (period + 40 - 1) / 40;
+
+ /*
+ * Fix up. If sync. factor is 10 (160000Khz clock) and chip
+ * supports ultra3, then min. sync. period 12.5ns and the factor is 9
+ */
+
+ if ((np->minsync == 10) && (np->features & FE_ULTRA3))
+ np->minsync = 9;
+
+ /*
+ * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2).
+ *
+ * Transfer period minimums: SCSI-1 200 (50); Fast 100 (25)
+ * Ultra 50 (12); Ultra2 (6); Ultra3 (3)
+ */
+
+ if (np->minsync < 25 && !(np->features & (FE_ULTRA|FE_ULTRA2|FE_ULTRA3)))
+ np->minsync = 25;
+ else if (np->minsync < 12 && (np->features & FE_ULTRA))
+ np->minsync = 12;
+ else if (np->minsync < 10 && (np->features & FE_ULTRA2))
+ np->minsync = 10;
+ else if (np->minsync < 9 && (np->features & FE_ULTRA3))
+ np->minsync = 9;
+
+ /*
+ * Maximum synchronous period factor supported by the chip.
+ */
+
+ period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz);
+ np->maxsync = period > 2540 ? 254 : period / 10;
+
+ /*
+ ** 64 bit (53C895A or 53C896) ?
+ */
+ if (np->features & FE_64BIT)
+#ifdef SCSI_NCR_USE_64BIT_DAC
+ np->rv_ccntl1 |= (XTIMOD | EXTIBMV);
+#else
+ np->rv_ccntl1 |= (DDAC);
+#endif
+
+ /*
+ ** Phase mismatch handled by SCRIPTS (53C895A, 53C896 or C1010) ?
+ */
+ if (np->features & FE_NOPM)
+ np->rv_ccntl0 |= (ENPMJ);
+
+ /*
+ ** Prepare initial value of other IO registers
+ */
+#if defined SCSI_NCR_TRUST_BIOS_SETTING
+ np->rv_scntl0 = np->sv_scntl0;
+ np->rv_dmode = np->sv_dmode;
+ np->rv_dcntl = np->sv_dcntl;
+ np->rv_ctest3 = np->sv_ctest3;
+ np->rv_ctest4 = np->sv_ctest4;
+ np->rv_ctest5 = np->sv_ctest5;
+ burst_max = burst_code(np->sv_dmode, np->sv_ctest4, np->sv_ctest5);
+#else
+
+ /*
+ ** Select burst length (dwords)
+ */
+ burst_max = driver_setup.burst_max;
+ if (burst_max == 255)
+ burst_max = burst_code(np->sv_dmode, np->sv_ctest4, np->sv_ctest5);
+ if (burst_max > 7)
+ burst_max = 7;
+ if (burst_max > np->maxburst)
+ burst_max = np->maxburst;
+
+ /*
+ ** DEL 352 - 53C810 Rev x11 - Part Number 609-0392140 - ITEM 2.
+ ** This chip and the 860 Rev 1 may wrongly use PCI cache line
+ ** based transactions on LOAD/STORE instructions. So we have
+ ** to prevent these chips from using such PCI transactions in
+ ** this driver. The generic sym53c8xx driver that does not use
+ ** LOAD/STORE instructions does not need this work-around.
+ */
+ if ((np->device_id == PCI_DEVICE_ID_NCR_53C810 &&
+ np->revision_id >= 0x10 && np->revision_id <= 0x11) ||
+ (np->device_id == PCI_DEVICE_ID_NCR_53C860 &&
+ np->revision_id <= 0x1))
+ np->features &= ~(FE_WRIE|FE_ERL|FE_ERMP);
+
+ /*
+ ** DEL ? - 53C1010 Rev 1 - Part Number 609-0393638
+ ** 64-bit Slave Cycles must be disabled.
+ */
+ if ( ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) && (np->revision_id < 0x02) )
+ || (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66 ) )
+ np->rv_ccntl1 |= 0x10;
+
+ /*
+ ** Select all supported special features.
+ ** If we are using on-board RAM for scripts, prefetch (PFEN)
+ ** does not help, but burst op fetch (BOF) does.
+ ** Disabling PFEN makes sure BOF will be used.
+ */
+ if (np->features & FE_ERL)
+ np->rv_dmode |= ERL; /* Enable Read Line */
+ if (np->features & FE_BOF)
+ np->rv_dmode |= BOF; /* Burst Opcode Fetch */
+ if (np->features & FE_ERMP)
+ np->rv_dmode |= ERMP; /* Enable Read Multiple */
+#if 1
+ if ((np->features & FE_PFEN) && !np->base2_ba)
+#else
+ if (np->features & FE_PFEN)
+#endif
+ np->rv_dcntl |= PFEN; /* Prefetch Enable */
+ if (np->features & FE_CLSE)
+ np->rv_dcntl |= CLSE; /* Cache Line Size Enable */
+ if (np->features & FE_WRIE)
+ np->rv_ctest3 |= WRIE; /* Write and Invalidate */
+
+
+ if ( (np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66) &&
+ (np->features & FE_DFS))
+ np->rv_ctest5 |= DFS; /* Dma Fifo Size */
+ /* C1010/C1010_66 always large fifo */
+
+ /*
+ ** Select some other
+ */
+ if (driver_setup.master_parity)
+ np->rv_ctest4 |= MPEE; /* Master parity checking */
+ if (driver_setup.scsi_parity)
+ np->rv_scntl0 |= 0x0a; /* full arb., ena parity, par->ATN */
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ /*
+ ** Get parity checking, host ID and verbose mode from NVRAM
+ **/
+ if (nvram) {
+ switch(nvram->type) {
+ case SCSI_NCR_TEKRAM_NVRAM:
+ np->myaddr = nvram->data.Tekram.host_id & 0x0f;
+ break;
+ case SCSI_NCR_SYMBIOS_NVRAM:
+ if (!(nvram->data.Symbios.flags & SYMBIOS_PARITY_ENABLE))
+ np->rv_scntl0 &= ~0x0a;
+ np->myaddr = nvram->data.Symbios.host_id & 0x0f;
+ if (nvram->data.Symbios.flags & SYMBIOS_VERBOSE_MSGS)
+ np->verbose += 1;
+ break;
+ }
+ }
+#endif
+ /*
+ ** Get SCSI addr of host adapter (set by bios?).
+ */
+ if (np->myaddr == 255) {
+ np->myaddr = INB(nc_scid) & 0x07;
+ if (!np->myaddr)
+ np->myaddr = SCSI_NCR_MYADDR;
+ }
+
+#endif /* SCSI_NCR_TRUST_BIOS_SETTING */
+
+ /*
+ * Prepare initial io register bits for burst length
+ */
+ ncr_init_burst(np, burst_max);
+
+ /*
+ ** Set SCSI BUS mode.
+ **
+ ** - ULTRA2 chips (895/895A/896)
+ ** and ULTRA 3 chips (1010) report the current
+ ** BUS mode through the STEST4 IO register.
+ ** - For previous generation chips (825/825A/875),
+ ** user has to tell us how to check against HVD,
+ ** since a 100% safe algorithm is not possible.
+ */
+ np->scsi_mode = SMODE_SE;
+ if (np->features & (FE_ULTRA2 | FE_ULTRA3))
+ np->scsi_mode = (np->sv_stest4 & SMODE);
+ else if (np->features & FE_DIFF) {
+ switch(driver_setup.diff_support) {
+ case 4: /* Trust previous settings if present, then GPIO3 */
+ if (np->sv_scntl3) {
+ if (np->sv_stest2 & 0x20)
+ np->scsi_mode = SMODE_HVD;
+ break;
+ }
+ case 3: /* SYMBIOS controllers report HVD through GPIO3 */
+ if (nvram && nvram->type != SCSI_NCR_SYMBIOS_NVRAM)
+ break;
+ if (INB(nc_gpreg) & 0x08)
+ break;
+ case 2: /* Set HVD unconditionally */
+ np->scsi_mode = SMODE_HVD;
+ case 1: /* Trust previous settings for HVD */
+ if (np->sv_stest2 & 0x20)
+ np->scsi_mode = SMODE_HVD;
+ break;
+ default:/* Don't care about HVD */
+ break;
+ }
+ }
+ if (np->scsi_mode == SMODE_HVD)
+ np->rv_stest2 |= 0x20;
+
+ /*
+ ** Set LED support from SCRIPTS.
+ ** Ignore this feature for boards known to use a
+ ** specific GPIO wiring and for the 895A or 896
+ ** that drive the LED directly.
+ ** Also probe initial setting of GPIO0 as output.
+ */
+ if ((driver_setup.led_pin ||
+ (nvram && nvram->type == SCSI_NCR_SYMBIOS_NVRAM)) &&
+ !(np->features & FE_LEDC) && !(np->sv_gpcntl & 0x01))
+ np->features |= FE_LED0;
+
+ /*
+ ** Set irq mode.
+ */
+ switch(driver_setup.irqm & 3) {
+ case 2:
+ np->rv_dcntl |= IRQM;
+ break;
+ case 1:
+ np->rv_dcntl |= (np->sv_dcntl & IRQM);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ ** Configure targets according to driver setup.
+ ** If NVRAM present get targets setup from NVRAM.
+ ** Allow to override sync, wide and NOSCAN from
+ ** boot command line.
+ */
+ for (i = 0 ; i < MAX_TARGET ; i++) {
+ tcb_p tp = &np->target[i];
+
+ tp->usrsync = 255;
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ if (nvram) {
+ switch(nvram->type) {
+ case SCSI_NCR_TEKRAM_NVRAM:
+ ncr_Tekram_setup_target(np, i, &nvram->data.Tekram);
+ break;
+ case SCSI_NCR_SYMBIOS_NVRAM:
+ ncr_Symbios_setup_target(np, i, &nvram->data.Symbios);
+ break;
+ }
+ if (driver_setup.use_nvram & 0x2)
+ tp->usrsync = driver_setup.default_sync;
+ if (driver_setup.use_nvram & 0x4)
+ tp->usrwide = driver_setup.max_wide;
+ if (driver_setup.use_nvram & 0x8)
+ tp->usrflag &= ~UF_NOSCAN;
+ }
+ else {
+#else
+ if (1) {
+#endif
+ tp->usrsync = driver_setup.default_sync;
+ tp->usrwide = driver_setup.max_wide;
+ tp->usrtags = MAX_TAGS;
+ if (!driver_setup.disconnection)
+ np->target[i].usrflag = UF_NODISC;
+ }
+ }
+
+ /*
+ ** Announce all that stuff to user.
+ */
+
+ i = nvram ? nvram->type : 0;
+ printk(KERN_INFO "%s: %sID %d, Fast-%d%s%s\n", ncr_name(np),
+ i == SCSI_NCR_SYMBIOS_NVRAM ? "Symbios format NVRAM, " :
+ (i == SCSI_NCR_TEKRAM_NVRAM ? "Tekram format NVRAM, " : ""),
+ np->myaddr,
+ np->minsync < 10 ? 80 :
+ (np->minsync < 12 ? 40 : (np->minsync < 25 ? 20 : 10) ),
+ (np->rv_scntl0 & 0xa) ? ", Parity Checking" : ", NO Parity",
+ (np->rv_stest2 & 0x20) ? ", Differential" : "");
+
+ if (bootverbose > 1) {
+ printk (KERN_INFO "%s: initial SCNTL3/DMODE/DCNTL/CTEST3/4/5 = "
+ "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n",
+ ncr_name(np), np->sv_scntl3, np->sv_dmode, np->sv_dcntl,
+ np->sv_ctest3, np->sv_ctest4, np->sv_ctest5);
+
+ printk (KERN_INFO "%s: final SCNTL3/DMODE/DCNTL/CTEST3/4/5 = "
+ "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n",
+ ncr_name(np), np->rv_scntl3, np->rv_dmode, np->rv_dcntl,
+ np->rv_ctest3, np->rv_ctest4, np->rv_ctest5);
+ }
+
+ if (bootverbose && np->base2_ba)
+ printk (KERN_INFO "%s: on-chip RAM at 0x%lx\n",
+ ncr_name(np), np->base2_ba);
+
+ return 0;
+}
+
+
+#ifdef SCSI_NCR_DEBUG_NVRAM
+
+void __init ncr_display_Symbios_nvram(ncb_p np, Symbios_nvram *nvram)
+{
+ int i;
+
+ /* display Symbios nvram host data */
+ printk(KERN_DEBUG "%s: HOST ID=%d%s%s%s%s%s\n",
+ ncr_name(np), nvram->host_id & 0x0f,
+ (nvram->flags & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"",
+ (nvram->flags & SYMBIOS_PARITY_ENABLE) ? " PARITY" :"",
+ (nvram->flags & SYMBIOS_VERBOSE_MSGS) ? " VERBOSE" :"",
+ (nvram->flags & SYMBIOS_CHS_MAPPING) ? " CHS_ALT" :"",
+ (nvram->flags1 & SYMBIOS_SCAN_HI_LO) ? " HI_LO" :"");
+
+ /* display Symbios nvram drive data */
+ for (i = 0 ; i < 15 ; i++) {
+ struct Symbios_target *tn = &nvram->target[i];
+ printk(KERN_DEBUG "%s-%d:%s%s%s%s WIDTH=%d SYNC=%d TMO=%d\n",
+ ncr_name(np), i,
+ (tn->flags & SYMBIOS_DISCONNECT_ENABLE) ? " DISC" : "",
+ (tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME) ? " SCAN_BOOT" : "",
+ (tn->flags & SYMBIOS_SCAN_LUNS) ? " SCAN_LUNS" : "",
+ (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? " TCQ" : "",
+ tn->bus_width,
+ tn->sync_period / 4,
+ tn->timeout);
+ }
+}
+
+static u_char Tekram_boot_delay[7] __initdata = {3, 5, 10, 20, 30, 60, 120};
+
+void __init ncr_display_Tekram_nvram(ncb_p np, Tekram_nvram *nvram)
+{
+ int i, tags, boot_delay;
+ char *rem;
+
+ /* display Tekram nvram host data */
+ tags = 2 << nvram->max_tags_index;
+ boot_delay = 0;
+ if (nvram->boot_delay_index < 6)
+ boot_delay = Tekram_boot_delay[nvram->boot_delay_index];
+ switch((nvram->flags & TEKRAM_REMOVABLE_FLAGS) >> 6) {
+ default:
+ case 0: rem = ""; break;
+ case 1: rem = " REMOVABLE=boot device"; break;
+ case 2: rem = " REMOVABLE=all"; break;
+ }
+
+ printk(KERN_DEBUG
+ "%s: HOST ID=%d%s%s%s%s%s%s%s%s%s BOOT DELAY=%d tags=%d\n",
+ ncr_name(np), nvram->host_id & 0x0f,
+ (nvram->flags1 & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"",
+ (nvram->flags & TEKRAM_MORE_THAN_2_DRIVES) ? " >2DRIVES" :"",
+ (nvram->flags & TEKRAM_DRIVES_SUP_1GB) ? " >1GB" :"",
+ (nvram->flags & TEKRAM_RESET_ON_POWER_ON) ? " RESET" :"",
+ (nvram->flags & TEKRAM_ACTIVE_NEGATION) ? " ACT_NEG" :"",
+ (nvram->flags & TEKRAM_IMMEDIATE_SEEK) ? " IMM_SEEK" :"",
+ (nvram->flags & TEKRAM_SCAN_LUNS) ? " SCAN_LUNS" :"",
+ (nvram->flags1 & TEKRAM_F2_F6_ENABLED) ? " F2_F6" :"",
+ rem, boot_delay, tags);
+
+ /* display Tekram nvram drive data */
+ for (i = 0; i <= 15; i++) {
+ int sync, j;
+ struct Tekram_target *tn = &nvram->target[i];
+ j = tn->sync_index & 0xf;
+ sync = Tekram_sync[j];
+ printk(KERN_DEBUG "%s-%d:%s%s%s%s%s%s PERIOD=%d\n",
+ ncr_name(np), i,
+ (tn->flags & TEKRAM_PARITY_CHECK) ? " PARITY" : "",
+ (tn->flags & TEKRAM_SYNC_NEGO) ? " SYNC" : "",
+ (tn->flags & TEKRAM_DISCONNECT_ENABLE) ? " DISC" : "",
+ (tn->flags & TEKRAM_START_CMD) ? " START" : "",
+ (tn->flags & TEKRAM_TAGGED_COMMANDS) ? " TCQ" : "",
+ (tn->flags & TEKRAM_WIDE_NEGO) ? " WIDE" : "",
+ sync);
+ }
+}
+#endif /* SCSI_NCR_DEBUG_NVRAM */
+
+/*
+** Host attach and initialisations.
+**
+** Allocate host data and ncb structure.
+** Request IO region and remap MMIO region.
+** Do chip initialization.
+** If all is OK, install interrupt handling and
+** start the timer daemon.
+*/
+
+static int __init
+ncr_attach (Scsi_Host_Template *tpnt, int unit, ncr_device *device)
+{
+ struct host_data *host_data;
+ ncb_p np = 0;
+ struct Scsi_Host *instance = 0;
+ u_long flags = 0;
+ ncr_nvram *nvram = device->nvram;
+ int i;
+
+ printk(KERN_INFO NAME53C "%s-%d: rev 0x%x on pci bus %d device %d function %d "
+#ifdef __sparc__
+ "irq %s\n",
+#else
+ "irq %d\n",
+#endif
+ device->chip.name, unit, device->chip.revision_id,
+ device->slot.bus, (device->slot.device_fn & 0xf8) >> 3,
+ device->slot.device_fn & 7,
+#ifdef __sparc__
+ __irq_itoa(device->slot.irq));
+#else
+ device->slot.irq);
+#endif
+
+ /*
+ ** Allocate host_data structure
+ */
+ if (!(instance = scsi_register(tpnt, sizeof(*host_data))))
+ goto attach_error;
+ host_data = (struct host_data *) instance->hostdata;
+
+ /*
+ ** Allocate the host control block.
+ */
+ np = __m_calloc_dma(device->pdev, sizeof(struct ncb), "NCB");
+ if (!np)
+ goto attach_error;
+ NCR_INIT_LOCK_NCB(np);
+ np->pdev = device->pdev;
+ np->p_ncb = vtobus(np);
+ host_data->ncb = np;
+
+ /*
+ ** Store input informations in the host data structure.
+ */
+ strncpy(np->chip_name, device->chip.name, sizeof(np->chip_name) - 1);
+ np->unit = unit;
+ np->verbose = driver_setup.verbose;
+ sprintf(np->inst_name, NAME53C "%s-%d", np->chip_name, np->unit);
+ np->device_id = device->chip.device_id;
+ np->revision_id = device->chip.revision_id;
+ np->bus = device->slot.bus;
+ np->device_fn = device->slot.device_fn;
+ np->features = device->chip.features;
+ np->clock_divn = device->chip.nr_divisor;
+ np->maxoffs = device->chip.offset_max;
+ np->maxburst = device->chip.burst_max;
+ np->myaddr = device->host_id;
+
+ /*
+ ** Allocate the start queue.
+ */
+ np->squeue = (ncrcmd *)
+ m_calloc_dma(sizeof(ncrcmd)*(MAX_START*2), "SQUEUE");
+ if (!np->squeue)
+ goto attach_error;
+ np->p_squeue = vtobus(np->squeue);
+
+ /*
+ ** Allocate the done queue.
+ */
+ np->dqueue = (ncrcmd *)
+ m_calloc_dma(sizeof(ncrcmd)*(MAX_START*2), "DQUEUE");
+ if (!np->dqueue)
+ goto attach_error;
+
+ /*
+ ** Allocate the target bus address array.
+ */
+ np->targtbl = (u_int32 *) m_calloc_dma(256, "TARGTBL");
+ if (!np->targtbl)
+ goto attach_error;
+
+ /*
+ ** Allocate SCRIPTS areas
+ */
+ np->script0 = (struct script *)
+ m_calloc_dma(sizeof(struct script), "SCRIPT");
+ if (!np->script0)
+ goto attach_error;
+ np->scripth0 = (struct scripth *)
+ m_calloc_dma(sizeof(struct scripth), "SCRIPTH");
+ if (!np->scripth0)
+ goto attach_error;
+
+ /*
+ ** Initialyze the CCB free queue and,
+ ** allocate some CCB. We need at least ONE.
+ */
+ xpt_que_init(&np->free_ccbq);
+ xpt_que_init(&np->b0_ccbq);
+ if (!ncr_alloc_ccb(np))
+ goto attach_error;
+
+ /*
+ ** Initialize timer structure
+ **
+ */
+ init_timer(&np->timer);
+ np->timer.data = (unsigned long) np;
+ np->timer.function = sym53c8xx_timeout;
+
+ /*
+ ** Try to map the controller chip to
+ ** virtual and physical memory.
+ */
+
+ np->base_ba = device->slot.base;
+ np->base_ws = (np->features & FE_IO256)? 256 : 128;
+ np->base2_ba = (np->features & FE_RAM)? device->slot.base_2 : 0;
+
+#ifndef SCSI_NCR_IOMAPPED
+ np->base_va = remap_pci_mem(np->base_ba, np->base_ws);
+ if (!np->base_va) {
+ printk(KERN_ERR "%s: can't map PCI MMIO region\n",ncr_name(np));
+ goto attach_error;
+ }
+ else if (bootverbose > 1)
+ printk(KERN_INFO "%s: using memory mapped IO\n", ncr_name(np));
+
+ /*
+ ** Make the controller's registers available.
+ ** Now the INB INW INL OUTB OUTW OUTL macros
+ ** can be used safely.
+ */
+
+ np->reg = (struct ncr_reg *) np->base_va;
+
+#endif /* !defined SCSI_NCR_IOMAPPED */
+
+ /*
+ ** If on-chip RAM is used, make sure SCRIPTS isn't too large.
+ */
+ if (np->base2_ba && sizeof(struct script) > 4096) {
+ printk(KERN_ERR "%s: script too large.\n", ncr_name(np));
+ goto attach_error;
+ }
+
+ /*
+ ** Try to map the controller chip into iospace.
+ */
+
+ if (device->slot.io_port) {
+ request_region(device->slot.io_port, np->base_ws, NAME53C8XX);
+ np->base_io = device->slot.io_port;
+ }
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ if (nvram) {
+ switch(nvram->type) {
+ case SCSI_NCR_SYMBIOS_NVRAM:
+#ifdef SCSI_NCR_DEBUG_NVRAM
+ ncr_display_Symbios_nvram(np, &nvram->data.Symbios);
+#endif
+ break;
+ case SCSI_NCR_TEKRAM_NVRAM:
+#ifdef SCSI_NCR_DEBUG_NVRAM
+ ncr_display_Tekram_nvram(np, &nvram->data.Tekram);
+#endif
+ break;
+ default:
+ nvram = 0;
+#ifdef SCSI_NCR_DEBUG_NVRAM
+ printk(KERN_DEBUG "%s: NVRAM: None or invalid data.\n", ncr_name(np));
+#endif
+ }
+ }
+#endif
+
+ /*
+ ** Save setting of some IO registers, so we will
+ ** be able to probe specific implementations.
+ */
+ ncr_save_initial_setting (np);
+
+ /*
+ ** Reset the chip now, since it has been reported
+ ** that SCSI clock calibration may not work properly
+ ** if the chip is currently active.
+ */
+ ncr_chip_reset (np);
+
+ /*
+ ** Do chip dependent initialization.
+ */
+ (void) ncr_prepare_setting(np, nvram);
+
+ /*
+ ** Check the PCI clock frequency if needed.
+ **
+ ** Must be done after ncr_prepare_setting since it destroys
+ ** STEST1 that is used to probe for the clock multiplier.
+ **
+ ** The range is currently [22688 - 45375 Khz], given
+ ** the values used by ncr_getclock().
+ ** This calibration of the frequecy measurement
+ ** algorithm against the PCI clock frequency is only
+ ** performed if the driver has had to measure the SCSI
+ ** clock due to other heuristics not having been enough
+ ** to deduce the SCSI clock frequency.
+ **
+ ** When the chip has been initialized correctly by the
+ ** SCSI BIOS, the driver deduces the presence of the
+ ** clock multiplier and the value of the SCSI clock from
+ ** initial values of IO registers, and therefore no
+ ** clock measurement is performed.
+ ** Normally the driver should never have to measure any
+ ** clock, unless the controller may use a 80 MHz clock
+ ** or has a clock multiplier and any of the following
+ ** condition is met:
+ **
+ ** - No SCSI BIOS is present.
+ ** - SCSI BIOS did'nt enable the multiplier for some reason.
+ ** - User has disabled the controller from the SCSI BIOS.
+ ** - User booted the O/S from another O/S that did'nt enable
+ ** the multiplier for some reason.
+ **
+ ** As a result, the driver may only have to measure some
+ ** frequency in very unusual situations.
+ **
+ ** For this reality test against the PCI clock to really
+ ** protect against flaws in the udelay() calibration or
+ ** driver problem that affect the clock measurement
+ ** algorithm, the actual PCI clock frequency must be 33 MHz.
+ */
+ i = np->pciclock_max ? ncr_getpciclock(np) : 0;
+ if (i && (i < np->pciclock_min || i > np->pciclock_max)) {
+ printk(KERN_ERR "%s: PCI clock (%u KHz) is out of range "
+ "[%u KHz - %u KHz].\n",
+ ncr_name(np), i, np->pciclock_min, np->pciclock_max);
+ goto attach_error;
+ }
+
+ /*
+ ** Patch script to physical addresses
+ */
+ ncr_script_fill (&script0, &scripth0);
+
+ np->p_script = vtobus(np->script0);
+ np->p_scripth = vtobus(np->scripth0);
+ np->p_scripth0 = np->p_scripth;
+
+ if (np->base2_ba) {
+ np->p_script = pcivtobus(np->base2_ba);
+ if (np->features & FE_RAM8K) {
+ np->base2_ws = 8192;
+ np->p_scripth = np->p_script + 4096;
+#if BITS_PER_LONG > 32
+ np->scr_ram_seg = cpu_to_scr(np->base2_ba >> 32);
+#endif
+ }
+ else
+ np->base2_ws = 4096;
+#ifndef SCSI_NCR_PCI_MEM_NOT_SUPPORTED
+ np->base2_va = remap_pci_mem(np->base2_ba, np->base2_ws);
+ if (!np->base2_va) {
+ printk(KERN_ERR "%s: can't map PCI MEMORY region\n",
+ ncr_name(np));
+ goto attach_error;
+ }
+#endif
+ }
+
+ ncr_script_copy_and_bind (np, (ncrcmd *) &script0, (ncrcmd *) np->script0, sizeof(struct script));
+ ncr_script_copy_and_bind (np, (ncrcmd *) &scripth0, (ncrcmd *) np->scripth0, sizeof(struct scripth));
+
+ /*
+ ** Patch some variables in SCRIPTS
+ */
+ np->scripth0->pm0_data_addr[0] =
+ cpu_to_scr(NCB_SCRIPT_PHYS(np, pm0_data));
+ np->scripth0->pm1_data_addr[0] =
+ cpu_to_scr(NCB_SCRIPT_PHYS(np, pm1_data));
+
+ /*
+ ** Patch if not Ultra 3 - Do not write to scntl4
+ */
+ if (np->features & FE_ULTRA3) {
+ np->script0->resel_scntl4[0] = cpu_to_scr(SCR_LOAD_REL (scntl4, 1));
+ np->script0->resel_scntl4[1] = cpu_to_scr(offsetof(struct tcb, uval));
+ }
+
+
+#ifdef SCSI_NCR_PCI_MEM_NOT_SUPPORTED
+ np->scripth0->script0_ba[0] = cpu_to_scr(vtobus(np->script0));
+ np->scripth0->script0_ba64[0] = cpu_to_scr(vtobus(np->script0));
+ np->scripth0->scripth0_ba64[0] = cpu_to_scr(vtobus(np->scripth0));
+ np->scripth0->ram_seg64[0] = np->scr_ram_seg;
+#endif
+ /*
+ ** Prepare the idle and invalid task actions.
+ */
+ np->idletask.start = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle));
+ np->idletask.restart = cpu_to_scr(NCB_SCRIPTH_PHYS (np, bad_i_t_l));
+ np->p_idletask = NCB_PHYS(np, idletask);
+
+ np->notask.start = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle));
+ np->notask.restart = cpu_to_scr(NCB_SCRIPTH_PHYS (np, bad_i_t_l));
+ np->p_notask = NCB_PHYS(np, notask);
+
+ np->bad_i_t_l.start = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle));
+ np->bad_i_t_l.restart = cpu_to_scr(NCB_SCRIPTH_PHYS (np, bad_i_t_l));
+ np->p_bad_i_t_l = NCB_PHYS(np, bad_i_t_l);
+
+ np->bad_i_t_l_q.start = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle));
+ np->bad_i_t_l_q.restart = cpu_to_scr(NCB_SCRIPTH_PHYS (np,bad_i_t_l_q));
+ np->p_bad_i_t_l_q = NCB_PHYS(np, bad_i_t_l_q);
+
+ /*
+ ** Allocate and prepare the bad lun table.
+ */
+ np->badluntbl = m_calloc_dma(256, "BADLUNTBL");
+ if (!np->badluntbl)
+ goto attach_error;
+
+ assert (offsetof(struct lcb, resel_task) == 0);
+ np->resel_badlun = cpu_to_scr(NCB_SCRIPTH_PHYS(np, resel_bad_lun));
+
+ for (i = 0 ; i < 64 ; i++)
+ np->badluntbl[i] = cpu_to_scr(NCB_PHYS(np, resel_badlun));
+
+ /*
+ ** Prepare the target bus address array.
+ */
+ np->scripth0->targtbl[0] = cpu_to_scr(vtobus(np->targtbl));
+ for (i = 0 ; i < MAX_TARGET ; i++) {
+ np->targtbl[i] = cpu_to_scr(NCB_PHYS(np, target[i]));
+ np->target[i].b_luntbl = cpu_to_scr(vtobus(np->badluntbl));
+ np->target[i].b_lun0 = cpu_to_scr(NCB_PHYS(np, resel_badlun));
+ }
+
+ /*
+ ** Patch the script for LED support.
+ */
+
+ if (np->features & FE_LED0) {
+ np->script0->idle[0] =
+ cpu_to_scr(SCR_REG_REG(gpreg, SCR_OR, 0x01));
+ np->script0->reselected[0] =
+ cpu_to_scr(SCR_REG_REG(gpreg, SCR_AND, 0xfe));
+ np->script0->start[0] =
+ cpu_to_scr(SCR_REG_REG(gpreg, SCR_AND, 0xfe));
+ }
+
+ /*
+ ** Patch the script to provide an extra clock cycle on
+ ** data out phase - 53C1010_66MHz part only.
+ */
+ if (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66){
+ np->script0->datao_phase[0] =
+ cpu_to_scr(SCR_REG_REG(scntl4, SCR_OR, 0x0c));
+ }
+
+#ifdef SCSI_NCR_IARB_SUPPORT
+ /*
+ ** If user does not want to use IMMEDIATE ARBITRATION
+ ** when we are reselected while attempting to arbitrate,
+ ** patch the SCRIPTS accordingly with a SCRIPT NO_OP.
+ */
+ if (!(driver_setup.iarb & 1))
+ np->script0->ungetjob[0] = cpu_to_scr(SCR_NO_OP);
+ /*
+ ** If user wants IARB to be set when we win arbitration
+ ** and have other jobs, compute the max number of consecutive
+ ** settings of IARB hint before we leave devices a chance to
+ ** arbitrate for reselection.
+ */
+ np->iarb_max = (driver_setup.iarb >> 4);
+#endif
+
+ /*
+ ** DEL 472 - 53C896 Rev 1 - Part Number 609-0393055 - ITEM 5.
+ */
+ if (np->device_id == PCI_DEVICE_ID_NCR_53C896 &&
+ np->revision_id <= 0x1 && (np->features & FE_NOPM)) {
+ np->scatter = ncr_scatter_896R1;
+ np->script0->datai_phase[0] = cpu_to_scr(SCR_JUMP);
+ np->script0->datai_phase[1] =
+ cpu_to_scr(NCB_SCRIPTH_PHYS (np, tweak_pmj));
+ np->script0->datao_phase[0] = cpu_to_scr(SCR_JUMP);
+ np->script0->datao_phase[1] =
+ cpu_to_scr(NCB_SCRIPTH_PHYS (np, tweak_pmj));
+ }
+ else
+#ifdef DEBUG_896R1
+ np->scatter = ncr_scatter_896R1;
+#else
+ np->scatter = ncr_scatter;
+#endif
+
+ /*
+ ** Reset chip.
+ ** We should use ncr_soft_reset(), but we donnot want to do
+ ** so, since we may not be safe if ABRT interrupt occurs due
+ ** to the BIOS or previous O/S having enable this interrupt.
+ **
+ ** For C1010 need to set ABRT bit prior to SRST if SCRIPTs
+ ** are running. Not true in this case.
+ */
+ ncr_chip_reset(np);
+
+ /*
+ ** Now check the cache handling of the pci chipset.
+ */
+
+ if (ncr_snooptest (np)) {
+ printk (KERN_ERR "CACHE INCORRECTLY CONFIGURED.\n");
+ goto attach_error;
+ };
+
+ /*
+ ** Install the interrupt handler.
+ ** If we synchonize the C code with SCRIPTS on interrupt,
+ ** we donnot want to share the INTR line at all.
+ */
+ if (request_irq(device->slot.irq, sym53c8xx_intr,
+#ifdef SCSI_NCR_PCIQ_SYNC_ON_INTR
+ ((driver_setup.irqm & 0x20) ? 0 : SA_INTERRUPT),
+#else
+ ((driver_setup.irqm & 0x10) ? 0 : SA_SHIRQ) |
+
+#if 0 && LINUX_VERSION_CODE < LinuxVersionCode(2,2,0)
+ ((driver_setup.irqm & 0x20) ? 0 : SA_INTERRUPT),
+#else
+ 0,
+#endif
+#endif
+ NAME53C8XX, np)) {
+ printk(KERN_ERR "%s: request irq %d failure\n",
+ ncr_name(np), device->slot.irq);
+ goto attach_error;
+ }
+ np->irq = device->slot.irq;
+
+ /*
+ ** After SCSI devices have been opened, we cannot
+ ** reset the bus safely, so we do it here.
+ ** Interrupt handler does the real work.
+ ** Process the reset exception,
+ ** if interrupts are not enabled yet.
+ ** Then enable disconnects.
+ */
+ NCR_LOCK_NCB(np, flags);
+ if (ncr_reset_scsi_bus(np, 0, driver_setup.settle_delay) != 0) {
+ printk(KERN_ERR "%s: FATAL ERROR: CHECK SCSI BUS - CABLES, TERMINATION, DEVICE POWER etc.!\n", ncr_name(np));
+
+ NCR_UNLOCK_NCB(np, flags);
+ goto attach_error;
+ }
+ ncr_exception (np);
+
+ /*
+ ** The middle-level SCSI driver does not
+ ** wait for devices to settle.
+ ** Wait synchronously if more than 2 seconds.
+ */
+ if (driver_setup.settle_delay > 2) {
+ printk(KERN_INFO "%s: waiting %d seconds for scsi devices to settle...\n",
+ ncr_name(np), driver_setup.settle_delay);
+ MDELAY (1000 * driver_setup.settle_delay);
+ }
+
+ /*
+ ** start the timeout daemon
+ */
+ np->lasttime=0;
+ ncr_timeout (np);
+
+ /*
+ ** use SIMPLE TAG messages by default
+ */
+#ifdef SCSI_NCR_ALWAYS_SIMPLE_TAG
+ np->order = M_SIMPLE_TAG;
+#endif
+
+ /*
+ ** Done.
+ */
+ if (!first_host)
+ first_host = instance;
+
+ /*
+ ** Fill Linux host instance structure
+ ** and return success.
+ */
+ instance->max_channel = 0;
+ instance->this_id = np->myaddr;
+ instance->max_id = np->maxwide ? 16 : 8;
+ instance->max_lun = MAX_LUN;
+#ifndef SCSI_NCR_IOMAPPED
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,3,29)
+ instance->base = (unsigned long) np->reg;
+#else
+ instance->base = (char *) np->reg;
+#endif
+#endif
+ instance->irq = np->irq;
+ instance->unique_id = np->base_io;
+ instance->io_port = np->base_io;
+ instance->n_io_port = np->base_ws;
+ instance->dma_channel = 0;
+ instance->cmd_per_lun = MAX_TAGS;
+ instance->can_queue = (MAX_START-4);
+
+ np->check_integrity = 0;
+
+#ifdef SCSI_NCR_INTEGRITY_CHECKING
+ instance->check_integrity = 0;
+
+#ifdef SCSI_NCR_ENABLE_INTEGRITY_CHECK
+ if ( !(driver_setup.bus_check & 0x04) ) {
+ np->check_integrity = 1;
+ instance->check_integrity = 1;
+ }
+#endif
+#endif
+
+ instance->select_queue_depths = sym53c8xx_select_queue_depths;
+
+ NCR_UNLOCK_NCB(np, flags);
+
+ /*
+ ** Now let the generic SCSI driver
+ ** look for the SCSI devices on the bus ..
+ */
+ return 0;
+
+attach_error:
+ if (!instance) return -1;
+ printk(KERN_INFO "%s: giving up ...\n", ncr_name(np));
+ if (np)
+ ncr_free_resources(np);
+ scsi_unregister(instance);
+
+ return -1;
+ }
+
+
+/*
+** Free controller resources.
+*/
+static void ncr_free_resources(ncb_p np)
+{
+ ccb_p cp;
+ tcb_p tp;
+ lcb_p lp;
+ int target, lun;
+
+ if (np->irq)
+ free_irq(np->irq, np);
+ if (np->base_io)
+ release_region(np->base_io, np->base_ws);
+#ifndef SCSI_NCR_PCI_MEM_NOT_SUPPORTED
+ if (np->base_va)
+ unmap_pci_mem(np->base_va, np->base_ws);
+ if (np->base2_va)
+ unmap_pci_mem(np->base2_va, np->base2_ws);
+#endif
+ if (np->scripth0)
+ m_free_dma(np->scripth0, sizeof(struct scripth), "SCRIPTH");
+ if (np->script0)
+ m_free_dma(np->script0, sizeof(struct script), "SCRIPT");
+ if (np->squeue)
+ m_free_dma(np->squeue, sizeof(ncrcmd)*(MAX_START*2), "SQUEUE");
+ if (np->dqueue)
+ m_free_dma(np->dqueue, sizeof(ncrcmd)*(MAX_START*2),"DQUEUE");
+
+ while ((cp = np->ccbc) != NULL) {
+ np->ccbc = cp->link_ccb;
+ m_free_dma(cp, sizeof(*cp), "CCB");
+ }
+
+ if (np->badluntbl)
+ m_free_dma(np->badluntbl, 256,"BADLUNTBL");
+
+ for (target = 0; target < MAX_TARGET ; target++) {
+ tp = &np->target[target];
+ for (lun = 0 ; lun < MAX_LUN ; lun++) {
+ lp = ncr_lp(np, tp, lun);
+ if (!lp)
+ continue;
+ if (lp->tasktbl != &lp->tasktbl_0)
+ m_free_dma(lp->tasktbl, MAX_TASKS*4, "TASKTBL");
+ if (lp->cb_tags)
+ m_free(lp->cb_tags, MAX_TAGS, "CB_TAGS");
+ m_free_dma(lp, sizeof(*lp), "LCB");
+ }
+#if MAX_LUN > 1
+ if (tp->lmp)
+ m_free(tp->lmp, MAX_LUN * sizeof(lcb_p), "LMP");
+ if (tp->luntbl)
+ m_free_dma(tp->luntbl, 256, "LUNTBL");
+#endif
+ }
+
+ if (np->targtbl)
+ m_free_dma(np->targtbl, 256, "TARGTBL");
+
+ m_free_dma(np, sizeof(*np), "NCB");
+}
+
+
+/*==========================================================
+**
+**
+** Done SCSI commands list management.
+**
+** We donnot enter the scsi_done() callback immediately
+** after a command has been seen as completed but we
+** insert it into a list which is flushed outside any kind
+** of driver critical section.
+** This allows to do minimal stuff under interrupt and
+** inside critical sections and to also avoid locking up
+** on recursive calls to driver entry points under SMP.
+** In fact, the only kernel point which is entered by the
+** driver with a driver lock set is get_free_pages(GFP_ATOMIC...)
+** that shall not reenter the driver under any circumstance.
+**
+**==========================================================
+*/
+static inline void ncr_queue_done_cmd(ncb_p np, Scsi_Cmnd *cmd)
+{
+ unmap_scsi_data(np, cmd);
+ cmd->host_scribble = (char *) np->done_list;
+ np->done_list = cmd;
+}
+
+static inline void ncr_flush_done_cmds(Scsi_Cmnd *lcmd)
+{
+ Scsi_Cmnd *cmd;
+
+ while (lcmd) {
+ cmd = lcmd;
+ lcmd = (Scsi_Cmnd *) cmd->host_scribble;
+ cmd->scsi_done(cmd);
+ }
+}
+
+/*==========================================================
+**
+**
+** Prepare the next negotiation message for integrity check,
+** if needed.
+**
+** Fill in the part of message buffer that contains the
+** negotiation and the nego_status field of the CCB.
+** Returns the size of the message in bytes.
+**
+** If tp->ppr_negotiation is 1 and a M_REJECT occurs, then
+** we disable ppr_negotiation. If the first ppr_negotiation is
+** successful, set this flag to 2.
+**
+**==========================================================
+*/
+#ifdef SCSI_NCR_INTEGRITY_CHECKING
+static int ncr_ic_nego(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd, u_char *msgptr)
+{
+ tcb_p tp = &np->target[cp->target];
+ int msglen = 0;
+ int nego = 0;
+ u_char new_width, new_offset, new_period;
+ u_char no_increase;
+
+ if (tp->ppr_negotiation == 1) /* PPR message successful */
+ tp->ppr_negotiation = 2;
+
+ if (tp->inq_done) {
+
+ if (!tp->ic_maximums_set) {
+ tp->ic_maximums_set = 1;
+
+ /*
+ * Check against target, host and user limits
+ */
+ if ( (tp->inq_byte7 & INQ7_WIDE16) &&
+ np->maxwide && tp->usrwide)
+ tp->ic_max_width = 1;
+ else
+ tp->ic_max_width = 0;
+
+
+ if ((tp->inq_byte7 & INQ7_SYNC) && tp->maxoffs)
+ tp->ic_min_sync = (tp->minsync < np->minsync) ?
+ np->minsync : tp->minsync;
+ else
+ tp->ic_min_sync = 255;
+
+ tp->period = 1;
+ tp->widedone = 1;
+
+ /*
+ * Enable PPR negotiation - only if Ultra3 support
+ * is accessible.
+ */
+
+#if 0
+ if (tp->ic_max_width && (tp->ic_min_sync != 255 ))
+ tp->ppr_negotiation = 1;
+#endif
+ tp->ppr_negotiation = 0;
+ if (np->features & FE_ULTRA3) {
+ if (tp->ic_max_width && (tp->ic_min_sync == 0x09))
+ tp->ppr_negotiation = 1;
+ }
+
+ if (!tp->ppr_negotiation)
+ cmd->ic_nego &= ~NS_PPR;
+ }
+
+ if (DEBUG_FLAGS & DEBUG_IC) {
+ printk("%s: cmd->ic_nego %d, 1st byte 0x%2X\n",
+ ncr_name(np), cmd->ic_nego, cmd->cmnd[0]);
+ }
+
+ /* Previous command recorded a parity or an initiator
+ * detected error condition. Force bus to narrow for this
+ * target. Clear flag. Negotation on request sense.
+ * Note: kernel forces 2 bus resets :o( but clears itself out.
+ * Minor bug? in scsi_obsolete.c (ugly)
+ */
+ if (np->check_integ_par) {
+ printk("%s: Parity Error. Target set to narrow.\n",
+ ncr_name(np));
+ tp->ic_max_width = 0;
+ tp->widedone = tp->period = 0;
+ }
+
+ /* Initializing:
+ * If ic_nego == NS_PPR, we are in the initial test for
+ * PPR messaging support. If driver flag is clear, then
+ * either we don't support PPR nego (narrow or async device)
+ * or this is the second TUR and we have had a M. REJECT
+ * or unexpected disconnect on the first PPR negotiation.
+ * Do not negotiate, reset nego flags (in case a reset has
+ * occurred), clear ic_nego and return.
+ * General case: Kernel will clear flag on a fallback.
+ * Do only SDTR or WDTR in the future.
+ */
+ if (!tp->ppr_negotiation && (cmd->ic_nego == NS_PPR )) {
+ tp->ppr_negotiation = 0;
+ cmd->ic_nego &= ~NS_PPR;
+ tp->widedone = tp->period = 1;
+ return msglen;
+ }
+ else if (( tp->ppr_negotiation && !(cmd->ic_nego & NS_PPR )) ||
+ (!tp->ppr_negotiation && (cmd->ic_nego & NS_PPR )) ) {
+ tp->ppr_negotiation = 0;
+ cmd->ic_nego &= ~NS_PPR;
+ }
+
+ /*
+ * Always check the PPR nego. flag bit if ppr_negotiation
+ * is set. If the ic_nego PPR bit is clear,
+ * there must have been a fallback. Do only
+ * WDTR / SDTR in the future.
+ */
+ if ((tp->ppr_negotiation) && (!(cmd->ic_nego & NS_PPR)))
+ tp->ppr_negotiation = 0;
+
+ /* In case of a bus reset, ncr_negotiate will reset
+ * the flags tp->widedone and tp->period to 0, forcing
+ * a new negotiation. Do WDTR then SDTR. If PPR, do both.
+ * Do NOT increase the period. It is possible for the Scsi_Cmnd
+ * flags to be set to increase the period when a bus reset
+ * occurs - we don't want to change anything.
+ */
+
+ no_increase = 0;
+
+ if (tp->ppr_negotiation && (!tp->widedone) && (!tp->period) ) {
+ cmd->ic_nego = NS_PPR;
+ tp->widedone = tp->period = 1;
+ no_increase = 1;
+ }
+ else if (!tp->widedone) {
+ cmd->ic_nego = NS_WIDE;
+ tp->widedone = 1;
+ no_increase = 1;
+ }
+ else if (!tp->period) {
+ cmd->ic_nego = NS_SYNC;
+ tp->period = 1;
+ no_increase = 1;
+ }
+
+ new_width = cmd->ic_nego_width & tp->ic_max_width;
+
+ switch (cmd->ic_nego_sync) {
+ case 2: /* increase the period */
+ if (!no_increase) {
+ if (tp->ic_min_sync <= 0x09)
+ tp->ic_min_sync = 0x0A;
+ else if (tp->ic_min_sync <= 0x0A)
+ tp->ic_min_sync = 0x0C;
+ else if (tp->ic_min_sync <= 0x0C)
+ tp->ic_min_sync = 0x19;
+ else if (tp->ic_min_sync <= 0x19)
+ tp->ic_min_sync *= 2;
+ else {
+ tp->ic_min_sync = 255;
+ cmd->ic_nego_sync = 0;
+ tp->maxoffs = 0;
+ }
+ }
+ new_period = tp->maxoffs?tp->ic_min_sync:0;
+ new_offset = tp->maxoffs;
+ break;
+
+ case 1: /* nego. to maximum */
+ new_period = tp->maxoffs?tp->ic_min_sync:0;
+ new_offset = tp->maxoffs;
+ break;
+
+ case 0: /* nego to async */
+ default:
+ new_period = 0;
+ new_offset = 0;
+ break;
+ };
+
+
+ nego = NS_NOCHANGE;
+ if (tp->ppr_negotiation) {
+ u_char options_byte = 0;
+
+ /*
+ ** Must make sure data is consistent.
+ ** If period is 9 and sync, must be wide and DT bit set.
+ ** else period must be larger. If the width is 0,
+ ** reset bus to wide but increase the period to 0x0A.
+ ** Note: The strange else clause is due to the integrity check.
+ ** If fails at 0x09, wide, the I.C. code will redo at the same
+ ** speed but a narrow bus. The driver must take care of slowing
+ ** the bus speed down.
+ **
+ ** The maximum offset in ST mode is 31, in DT mode 62 (1010/1010_66 only)
+ */
+ if ( (new_period==0x09) && new_offset) {
+ if (new_width)
+ options_byte = 0x02;
+ else {
+ tp->ic_min_sync = 0x0A;
+ new_period = 0x0A;
+ cmd->ic_nego_width = 1;
+ new_width = 1;
+ new_offset &= 0x1f;
+ }
+ }
+ else if (new_period > 0x09)
+ new_offset &= 0x1f;
+
+ nego = NS_PPR;
+
+ msgptr[msglen++] = M_EXTENDED;
+ msgptr[msglen++] = 6;
+ msgptr[msglen++] = M_X_PPR_REQ;
+ msgptr[msglen++] = new_period;
+ msgptr[msglen++] = 0;
+ msgptr[msglen++] = new_offset;
+ msgptr[msglen++] = new_width;
+ msgptr[msglen++] = options_byte;
+
+ }
+ else {
+ switch (cmd->ic_nego & ~NS_PPR) {
+ case NS_WIDE:
+ /*
+ ** WDTR negotiation on if device supports
+ ** wide or if wide device forced narrow
+ ** due to a parity error.
+ */
+
+ cmd->ic_nego_width &= tp->ic_max_width;
+
+ if (tp->ic_max_width | np->check_integ_par) {
+ nego = NS_WIDE;
+ msgptr[msglen++] = M_EXTENDED;
+ msgptr[msglen++] = 2;
+ msgptr[msglen++] = M_X_WIDE_REQ;
+ msgptr[msglen++] = new_width;
+ }
+ break;
+
+ case NS_SYNC:
+ /*
+ ** negotiate synchronous transfers
+ ** Target must support sync transfers.
+ ** Min. period = 0x0A, maximum offset of 31=0x1f.
+ */
+
+ if (tp->inq_byte7 & INQ7_SYNC) {
+
+ if (new_offset && (new_period < 0x0A)) {
+ tp->ic_min_sync = 0x0A;
+ new_period = 0x0A;
+ }
+ nego = NS_SYNC;
+ msgptr[msglen++] = M_EXTENDED;
+ msgptr[msglen++] = 3;
+ msgptr[msglen++] = M_X_SYNC_REQ;
+ msgptr[msglen++] = new_period;
+ msgptr[msglen++] = new_offset & 0x1f;
+ }
+ else
+ cmd->ic_nego_sync = 0;
+ break;
+
+ case NS_NOCHANGE:
+ break;
+ }
+ }
+
+ };
+
+ cp->nego_status = nego;
+ np->check_integ_par = 0;
+
+ if (nego) {
+ tp->nego_cp = cp;
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ ncr_print_msg(cp, nego == NS_WIDE ?
+ "wide/narrow msgout":
+ (nego == NS_SYNC ? "sync/async msgout" : "ppr msgout"),
+ msgptr);
+ };
+ };
+
+ return msglen;
+}
+#endif /* SCSI_NCR_INTEGRITY_CHECKING */
+
+/*==========================================================
+**
+**
+** Prepare the next negotiation message if needed.
+**
+** Fill in the part of message buffer that contains the
+** negotiation and the nego_status field of the CCB.
+** Returns the size of the message in bytes.
+**
+**
+**==========================================================
+*/
+
+
+static int ncr_prepare_nego(ncb_p np, ccb_p cp, u_char *msgptr)
+{
+ tcb_p tp = &np->target[cp->target];
+ int msglen = 0;
+ int nego = 0;
+ u_char width, offset, factor, last_byte;
+
+ if (!np->check_integrity) {
+ /* If integrity checking disabled, enable PPR messaging
+ * if device supports wide, sync and ultra 3
+ */
+ if (tp->ppr_negotiation == 1) /* PPR message successful */
+ tp->ppr_negotiation = 2;
+
+ if ((tp->inq_done) && (!tp->ic_maximums_set)) {
+ tp->ic_maximums_set = 1;
+
+ /*
+ * Issue PPR only if board is capable
+ * and set-up for Ultra3 transfers.
+ */
+ tp->ppr_negotiation = 0;
+ if ( (np->features & FE_ULTRA3) &&
+ (tp->usrwide) && (tp->maxoffs) &&
+ (tp->minsync == 0x09) )
+ tp->ppr_negotiation = 1;
+ }
+ }
+
+ if (tp->inq_done) {
+ /*
+ * Get the current width, offset and period
+ */
+ ncr_get_xfer_info( np, tp, &factor,
+ &offset, &width);
+
+ /*
+ ** negotiate wide transfers ?
+ */
+
+ if (!tp->widedone) {
+ if (tp->inq_byte7 & INQ7_WIDE16) {
+ if (tp->ppr_negotiation)
+ nego = NS_PPR;
+ else
+ nego = NS_WIDE;
+
+ width = tp->usrwide;
+#ifdef SCSI_NCR_INTEGRITY_CHECKING
+ if (tp->ic_done)
+ width &= tp->ic_max_width;
+#endif
+ } else
+ tp->widedone=1;
+
+ };
+
+ /*
+ ** negotiate synchronous transfers?
+ */
+
+ if ((nego != NS_WIDE) && !tp->period) {
+ if (tp->inq_byte7 & INQ7_SYNC) {
+ if (tp->ppr_negotiation)
+ nego = NS_PPR;
+ else
+ nego = NS_SYNC;
+
+ /* Check for async flag */
+ if (tp->maxoffs == 0) {
+ offset = 0;
+ factor = 0;
+ }
+ else {
+ offset = tp->maxoffs;
+ factor = tp->minsync;
+#ifdef SCSI_NCR_INTEGRITY_CHECKING
+ if ((tp->ic_done) &&
+ (factor < tp->ic_min_sync))
+ factor = tp->ic_min_sync;
+#endif
+ }
+
+ } else {
+ offset = 0;
+ factor = 0;
+ tp->period =0xffff;
+ PRINT_TARGET(np, cp->target);
+ printk ("target did not report SYNC.\n");
+ };
+ };
+ };
+
+ switch (nego) {
+ case NS_PPR:
+ /*
+ ** Must make sure data is consistent.
+ ** If period is 9 and sync, must be wide and DT bit set
+ ** else period must be larger.
+ ** Maximum offset is 31=0x1f is ST mode, 62 if DT mode
+ */
+ last_byte = 0;
+ if ( (factor==9) && offset) {
+ if (!width) {
+ factor = 0x0A;
+ offset &= 0x1f;
+ }
+ else
+ last_byte = 0x02;
+ }
+ else if (factor > 0x09)
+ offset &= 0x1f;
+
+ msgptr[msglen++] = M_EXTENDED;
+ msgptr[msglen++] = 6;
+ msgptr[msglen++] = M_X_PPR_REQ;
+ msgptr[msglen++] = factor;
+ msgptr[msglen++] = 0;
+ msgptr[msglen++] = offset;
+ msgptr[msglen++] = width;
+ msgptr[msglen++] = last_byte;
+ break;
+ case NS_SYNC:
+ /*
+ ** Never negotiate faster than Ultra 2 (25ns periods)
+ */
+ if (offset && (factor < 0x0A)) {
+ factor = 0x0A;
+ tp->minsync = 0x0A;
+ }
+
+ msgptr[msglen++] = M_EXTENDED;
+ msgptr[msglen++] = 3;
+ msgptr[msglen++] = M_X_SYNC_REQ;
+ msgptr[msglen++] = factor;
+ msgptr[msglen++] = offset & 0x1f;
+ break;
+ case NS_WIDE:
+ msgptr[msglen++] = M_EXTENDED;
+ msgptr[msglen++] = 2;
+ msgptr[msglen++] = M_X_WIDE_REQ;
+ msgptr[msglen++] = width;
+ break;
+ };
+
+ cp->nego_status = nego;
+
+ if (nego) {
+ tp->nego_cp = cp;
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ ncr_print_msg(cp, nego == NS_WIDE ?
+ "wide msgout":
+ (nego == NS_SYNC ? "sync msgout" : "ppr msgout"),
+ msgptr);
+ };
+ };
+
+ return msglen;
+}
+
+/*==========================================================
+**
+**
+** Start execution of a SCSI command.
+** This is called from the generic SCSI driver.
+**
+**
+**==========================================================
+*/
+static int ncr_queue_command (ncb_p np, Scsi_Cmnd *cmd)
+{
+/* Scsi_Device *device = cmd->device; */
+ tcb_p tp = &np->target[cmd->target];
+ lcb_p lp = ncr_lp(np, tp, cmd->lun);
+ ccb_p cp;
+
+ u_char idmsg, *msgptr;
+ u_int msglen;
+ int direction;
+ u_int32 lastp, goalp;
+
+ /*---------------------------------------------
+ **
+ ** Some shortcuts ...
+ **
+ **---------------------------------------------
+ */
+ if ((cmd->target == np->myaddr ) ||
+ (cmd->target >= MAX_TARGET) ||
+ (cmd->lun >= MAX_LUN )) {
+ return(DID_BAD_TARGET);
+ }
+
+ /*---------------------------------------------
+ **
+ ** Complete the 1st TEST UNIT READY command
+ ** with error condition if the device is
+ ** flagged NOSCAN, in order to speed up
+ ** the boot.
+ **
+ **---------------------------------------------
+ */
+ if (cmd->cmnd[0] == 0 && (tp->usrflag & UF_NOSCAN)) {
+ tp->usrflag &= ~UF_NOSCAN;
+ return DID_BAD_TARGET;
+ }
+
+ if (DEBUG_FLAGS & DEBUG_TINY) {
+ PRINT_ADDR(cmd);
+ printk ("CMD=%x ", cmd->cmnd[0]);
+ }
+
+ /*---------------------------------------------------
+ **
+ ** Assign a ccb / bind cmd.
+ ** If resetting, shorten settle_time if necessary
+ ** in order to avoid spurious timeouts.
+ ** If resetting or no free ccb,
+ ** insert cmd into the waiting list.
+ **
+ **----------------------------------------------------
+ */
+ if (np->settle_time && cmd->timeout_per_command >= HZ) {
+ u_long tlimit = ktime_get(cmd->timeout_per_command - HZ);
+ if (ktime_dif(np->settle_time, tlimit) > 0)
+ np->settle_time = tlimit;
+ }
+
+ if (np->settle_time || !(cp=ncr_get_ccb (np, cmd->target, cmd->lun))) {
+ insert_into_waiting_list(np, cmd);
+ return(DID_OK);
+ }
+ cp->cmd = cmd;
+
+ /*---------------------------------------------------
+ **
+ ** Enable tagged queue if asked by scsi ioctl
+ **
+ **----------------------------------------------------
+ */
+#if 0 /* This stuff was only usefull for linux-1.2.13 */
+ if (lp && !lp->numtags && cmd->device && cmd->device->tagged_queue) {
+ lp->numtags = tp->usrtags;
+ ncr_setup_tags (np, cp->target, cp->lun);
+ }
+#endif
+
+ /*----------------------------------------------------
+ **
+ ** Build the identify / tag / sdtr message
+ **
+ **----------------------------------------------------
+ */
+
+ idmsg = M_IDENTIFY | cp->lun;
+
+ if (cp ->tag != NO_TAG || (lp && !(tp->usrflag & UF_NODISC)))
+ idmsg |= 0x40;
+
+ msgptr = cp->scsi_smsg;
+ msglen = 0;
+ msgptr[msglen++] = idmsg;
+
+ if (cp->tag != NO_TAG) {
+ char order = np->order;
+
+ /*
+ ** Force ordered tag if necessary to avoid timeouts
+ ** and to preserve interactivity.
+ */
+ if (lp && ktime_exp(lp->tags_stime)) {
+ lp->tags_si = !(lp->tags_si);
+ if (lp->tags_sum[lp->tags_si]) {
+ order = M_ORDERED_TAG;
+ if ((DEBUG_FLAGS & DEBUG_TAGS)||bootverbose>0){
+ PRINT_ADDR(cmd);
+ printk("ordered tag forced.\n");
+ }
+ }
+ lp->tags_stime = ktime_get(3*HZ);
+ }
+
+ if (order == 0) {
+ /*
+ ** Ordered write ops, unordered read ops.
+ */
+ switch (cmd->cmnd[0]) {
+ case 0x08: /* READ_SMALL (6) */
+ case 0x28: /* READ_BIG (10) */
+ case 0xa8: /* READ_HUGE (12) */
+ order = M_SIMPLE_TAG;
+ break;
+ default:
+ order = M_ORDERED_TAG;
+ }
+ }
+ msgptr[msglen++] = order;
+ /*
+ ** For less than 128 tags, actual tags are numbered
+ ** 1,3,5,..2*MAXTAGS+1,since we may have to deal
+ ** with devices that have problems with #TAG 0 or too
+ ** great #TAG numbers. For more tags (up to 256),
+ ** we use directly our tag number.
+ */
+#if MAX_TASKS > (512/4)
+ msgptr[msglen++] = cp->tag;
+#else
+ msgptr[msglen++] = (cp->tag << 1) + 1;
+#endif
+ }
+
+ cp->host_flags = 0;
+
+ /*----------------------------------------------------
+ **
+ ** Build the data descriptors
+ **
+ **----------------------------------------------------
+ */
+
+ direction = scsi_data_direction(cmd);
+ if (direction != SCSI_DATA_NONE) {
+ cp->segments = np->scatter (np, cp, cp->cmd);
+ if (cp->segments < 0) {
+ ncr_free_ccb(np, cp);
+ return(DID_ERROR);
+ }
+ }
+ else {
+ cp->data_len = 0;
+ cp->segments = 0;
+ }
+
+ /*---------------------------------------------------
+ **
+ ** negotiation required?
+ **
+ ** (nego_status is filled by ncr_prepare_nego())
+ **
+ **---------------------------------------------------
+ */
+
+ cp->nego_status = 0;
+
+#ifdef SCSI_NCR_INTEGRITY_CHECKING
+ if ((np->check_integrity && tp->ic_done) || !np->check_integrity) {
+ if ((!tp->widedone || !tp->period) && !tp->nego_cp && lp) {
+ msglen += ncr_prepare_nego (np, cp, msgptr + msglen);
+ }
+ }
+ else if (np->check_integrity && (cmd->ic_in_progress)) {
+ msglen += ncr_ic_nego (np, cp, cmd, msgptr + msglen);
+ }
+ else if (np->check_integrity && cmd->ic_complete) {
+ u_long current_period;
+ u_char current_offset, current_width, current_factor;
+
+ ncr_get_xfer_info (np, tp, &current_factor,
+ &current_offset, &current_width);
+
+ tp->ic_max_width = current_width;
+ tp->ic_min_sync = current_factor;
+
+ if (current_factor == 9) current_period = 125;
+ else if (current_factor == 10) current_period = 250;
+ else if (current_factor == 11) current_period = 303;
+ else if (current_factor == 12) current_period = 500;
+ else current_period = current_factor * 40;
+
+ /*
+ * Negotiation for this target is complete. Update flags.
+ */
+ tp->period = current_period;
+ tp->widedone = 1;
+ tp->ic_done = 1;
+
+ printk("%s: Integrity Check Complete: \n", ncr_name(np));
+
+ printk("%s: %s %s SCSI", ncr_name(np),
+ current_offset?"SYNC":"ASYNC",
+ tp->ic_max_width?"WIDE":"NARROW");
+ if (current_offset) {
+ u_long mbs = 10000 * (tp->ic_max_width + 1);
+
+ printk(" %d.%d MB/s",
+ (int) (mbs / current_period), (int) (mbs % current_period));
+
+ printk(" (%d ns, %d offset)\n",
+ (int) current_period/10, current_offset);
+ }
+ else
+ printk(" %d MB/s. \n ", (tp->ic_max_width+1)*5);
+ }
+#else
+ if ((!tp->widedone || !tp->period) && !tp->nego_cp && lp) {
+ msglen += ncr_prepare_nego (np, cp, msgptr + msglen);
+ }
+#endif /* SCSI_NCR_INTEGRITY_CHECKING */
+
+
+ /*----------------------------------------------------
+ **
+ ** Determine xfer direction.
+ **
+ **----------------------------------------------------
+ */
+ if (!cp->data_len)
+ direction = SCSI_DATA_NONE;
+
+ /*
+ ** If data direction is UNKNOWN, speculate DATA_READ
+ ** but prepare alternate pointers for WRITE in case
+ ** of our speculation will be just wrong.
+ ** SCRIPTS will swap values if needed.
+ */
+ switch(direction) {
+ case SCSI_DATA_UNKNOWN:
+ case SCSI_DATA_WRITE:
+ goalp = NCB_SCRIPT_PHYS (np, data_out2) + 8;
+ lastp = goalp - 8 - (cp->segments * (SCR_SG_SIZE*4));
+ if (direction != SCSI_DATA_UNKNOWN)
+ break;
+ cp->phys.header.wgoalp = cpu_to_scr(goalp);
+ cp->phys.header.wlastp = cpu_to_scr(lastp);
+ /* fall through */
+ case SCSI_DATA_READ:
+ cp->host_flags |= HF_DATA_IN;
+ goalp = NCB_SCRIPT_PHYS (np, data_in2) + 8;
+ lastp = goalp - 8 - (cp->segments * (SCR_SG_SIZE*4));
+ break;
+ default:
+ case SCSI_DATA_NONE:
+ lastp = goalp = NCB_SCRIPTH_PHYS (np, no_data);
+ break;
+ }
+
+ /*
+ ** Set all pointers values needed by SCRIPTS.
+ ** If direction is unknown, start at data_io.
+ */
+ cp->phys.header.lastp = cpu_to_scr(lastp);
+ cp->phys.header.goalp = cpu_to_scr(goalp);
+
+ if (direction == SCSI_DATA_UNKNOWN)
+ cp->phys.header.savep =
+ cpu_to_scr(NCB_SCRIPTH_PHYS (np, data_io));
+ else
+ cp->phys.header.savep= cpu_to_scr(lastp);
+
+ /*
+ ** Save the initial data pointer in order to be able
+ ** to redo the command.
+ ** We also have to save the initial lastp, since it
+ ** will be changed to DATA_IO if we don't know the data
+ ** direction and the device completes the command with
+ ** QUEUE FULL status (without entering the data phase).
+ */
+ cp->startp = cp->phys.header.savep;
+ cp->lastp0 = cp->phys.header.lastp;
+
+ /*----------------------------------------------------
+ **
+ ** fill in ccb
+ **
+ **----------------------------------------------------
+ **
+ **
+ ** physical -> virtual backlink
+ ** Generic SCSI command
+ */
+
+ /*
+ ** Startqueue
+ */
+ cp->phys.header.go.start = cpu_to_scr(NCB_SCRIPT_PHYS (np,select));
+ cp->phys.header.go.restart = cpu_to_scr(NCB_SCRIPT_PHYS (np,resel_dsa));
+ /*
+ ** select
+ */
+ cp->phys.select.sel_id = cp->target;
+ cp->phys.select.sel_scntl3 = tp->wval;
+ cp->phys.select.sel_sxfer = tp->sval;
+ cp->phys.select.sel_scntl4 = tp->uval;
+ /*
+ ** message
+ */
+ cp->phys.smsg.addr = cpu_to_scr(CCB_PHYS (cp, scsi_smsg));
+ cp->phys.smsg.size = cpu_to_scr(msglen);
+
+ /*
+ ** command
+ */
+ memcpy(cp->cdb_buf, cmd->cmnd, MIN(cmd->cmd_len, sizeof(cp->cdb_buf)));
+ cp->phys.cmd.addr = cpu_to_scr(CCB_PHYS (cp, cdb_buf[0]));
+ cp->phys.cmd.size = cpu_to_scr(cmd->cmd_len);
+
+ /*
+ ** status
+ */
+ cp->actualquirks = tp->quirks;
+ cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY;
+ cp->scsi_status = S_ILLEGAL;
+ cp->xerr_status = 0;
+ cp->extra_bytes = 0;
+
+ /*
+ ** extreme data pointer.
+ ** shall be positive, so -1 is lower than lowest.:)
+ */
+ cp->ext_sg = -1;
+ cp->ext_ofs = 0;
+
+ /*----------------------------------------------------
+ **
+ ** Critical region: start this job.
+ **
+ **----------------------------------------------------
+ */
+
+ /*
+ ** activate this job.
+ */
+
+ /*
+ ** insert next CCBs into start queue.
+ ** 2 max at a time is enough to flush the CCB wait queue.
+ */
+ if (lp)
+ ncr_start_next_ccb(np, lp, 2);
+ else
+ ncr_put_start_queue(np, cp);
+
+ /*
+ ** Command is successfully queued.
+ */
+
+ return(DID_OK);
+}
+
+
+/*==========================================================
+**
+**
+** Insert a CCB into the start queue and wake up the
+** SCRIPTS processor.
+**
+**
+**==========================================================
+*/
+
+static void ncr_start_next_ccb(ncb_p np, lcb_p lp, int maxn)
+{
+ XPT_QUEHEAD *qp;
+ ccb_p cp;
+
+ while (maxn-- && lp->queuedccbs < lp->queuedepth) {
+ qp = xpt_remque_head(&lp->wait_ccbq);
+ if (!qp)
+ break;
+ ++lp->queuedccbs;
+ cp = xpt_que_entry(qp, struct ccb, link_ccbq);
+ xpt_insque_tail(qp, &lp->busy_ccbq);
+ lp->tasktbl[cp->tag == NO_TAG ? 0 : cp->tag] =
+ cpu_to_scr(cp->p_ccb);
+ ncr_put_start_queue(np, cp);
+ }
+}
+
+static void ncr_put_start_queue(ncb_p np, ccb_p cp)
+{
+ u_short qidx;
+
+#ifdef SCSI_NCR_IARB_SUPPORT
+ /*
+ ** If the previously queued CCB is not yet done,
+ ** set the IARB hint. The SCRIPTS will go with IARB
+ ** for this job when starting the previous one.
+ ** We leave devices a chance to win arbitration by
+ ** not using more than 'iarb_max' consecutive
+ ** immediate arbitrations.
+ */
+ if (np->last_cp && np->iarb_count < np->iarb_max) {
+ np->last_cp->host_flags |= HF_HINT_IARB;
+ ++np->iarb_count;
+ }
+ else
+ np->iarb_count = 0;
+ np->last_cp = cp;
+#endif
+
+ /*
+ ** insert into start queue.
+ */
+ qidx = np->squeueput + 2;
+ if (qidx >= MAX_START*2) qidx = 0;
+
+ np->squeue [qidx] = cpu_to_scr(np->p_idletask);
+ MEMORY_BARRIER();
+ np->squeue [np->squeueput] = cpu_to_scr(cp->p_ccb);
+
+ np->squeueput = qidx;
+ cp->queued = 1;
+
+ if (DEBUG_FLAGS & DEBUG_QUEUE)
+ printk ("%s: queuepos=%d.\n", ncr_name (np), np->squeueput);
+
+ /*
+ ** Script processor may be waiting for reselect.
+ ** Wake it up.
+ */
+ MEMORY_BARRIER();
+ OUTB (nc_istat, SIGP|np->istat_sem);
+}
+
+
+/*==========================================================
+**
+** Soft reset the chip.
+**
+** Some 896 and 876 chip revisions may hang-up if we set
+** the SRST (soft reset) bit at the wrong time when SCRIPTS
+** are running.
+** So, we need to abort the current operation prior to
+** soft resetting the chip.
+**
+**==========================================================
+*/
+
+static void ncr_chip_reset (ncb_p np)
+{
+ OUTB (nc_istat, SRST);
+ UDELAY (10);
+ OUTB (nc_istat, 0);
+}
+
+static void ncr_soft_reset(ncb_p np)
+{
+ u_char istat;
+ int i;
+
+ OUTB (nc_istat, CABRT);
+ for (i = 1000000 ; i ; --i) {
+ istat = INB (nc_istat);
+ if (istat & SIP) {
+ INW (nc_sist);
+ continue;
+ }
+ if (istat & DIP) {
+ OUTB (nc_istat, 0);
+ INB (nc_dstat);
+ break;
+ }
+ }
+ if (!i)
+ printk("%s: unable to abort current chip operation.\n",
+ ncr_name(np));
+ ncr_chip_reset(np);
+}
+
+/*==========================================================
+**
+**
+** Start reset process.
+** The interrupt handler will reinitialize the chip.
+** The timeout handler will wait for settle_time before
+** clearing it and so resuming command processing.
+**
+**
+**==========================================================
+*/
+static void ncr_start_reset(ncb_p np)
+{
+ (void) ncr_reset_scsi_bus(np, 1, driver_setup.settle_delay);
+}
+
+static int ncr_reset_scsi_bus(ncb_p np, int enab_int, int settle_delay)
+{
+ u_int32 term;
+ int retv = 0;
+
+ np->settle_time = ktime_get(settle_delay * HZ);
+
+ if (bootverbose > 1)
+ printk("%s: resetting, "
+ "command processing suspended for %d seconds\n",
+ ncr_name(np), settle_delay);
+
+ ncr_soft_reset(np); /* Soft reset the chip */
+ UDELAY (2000); /* The 895/6 need time for the bus mode to settle */
+ if (enab_int)
+ OUTW (nc_sien, RST);
+ /*
+ ** Enable Tolerant, reset IRQD if present and
+ ** properly set IRQ mode, prior to resetting the bus.
+ */
+ OUTB (nc_stest3, TE);
+ OUTB (nc_dcntl, (np->rv_dcntl & IRQM));
+ OUTB (nc_scntl1, CRST);
+ UDELAY (200);
+
+ if (!driver_setup.bus_check)
+ goto out;
+ /*
+ ** Check for no terminators or SCSI bus shorts to ground.
+ ** Read SCSI data bus, data parity bits and control signals.
+ ** We are expecting RESET to be TRUE and other signals to be
+ ** FALSE.
+ */
+ term = INB(nc_sstat0);
+ term = ((term & 2) << 7) + ((term & 1) << 17); /* rst sdp0 */
+ term |= ((INB(nc_sstat2) & 0x01) << 26) | /* sdp1 */
+ ((INW(nc_sbdl) & 0xff) << 9) | /* d7-0 */
+ ((INW(nc_sbdl) & 0xff00) << 10) | /* d15-8 */
+ INB(nc_sbcl); /* req ack bsy sel atn msg cd io */
+
+ if (!(np->features & FE_WIDE))
+ term &= 0x3ffff;
+
+ if (term != (2<<7)) {
+ printk("%s: suspicious SCSI data while resetting the BUS.\n",
+ ncr_name(np));
+ printk("%s: %sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = "
+ "0x%lx, expecting 0x%lx\n",
+ ncr_name(np),
+ (np->features & FE_WIDE) ? "dp1,d15-8," : "",
+ (u_long)term, (u_long)(2<<7));
+ if (driver_setup.bus_check == 1)
+ retv = 1;
+ }
+out:
+ OUTB (nc_scntl1, 0);
+ return retv;
+}
+
+/*==========================================================
+**
+**
+** Reset the SCSI BUS.
+** This is called from the generic SCSI driver.
+**
+**
+**==========================================================
+*/
+static int ncr_reset_bus (ncb_p np, Scsi_Cmnd *cmd, int sync_reset)
+{
+/* Scsi_Device *device = cmd->device; */
+ ccb_p cp;
+ int found;
+
+/*
+ * Return immediately if reset is in progress.
+ */
+ if (np->settle_time) {
+ return SCSI_RESET_PUNT;
+ }
+/*
+ * Start the reset process.
+ * The script processor is then assumed to be stopped.
+ * Commands will now be queued in the waiting list until a settle
+ * delay of 2 seconds will be completed.
+ */
+ ncr_start_reset(np);
+/*
+ * First, look in the wakeup list
+ */
+ for (found=0, cp=np->ccbc; cp; cp=cp->link_ccb) {
+ /*
+ ** look for the ccb of this command.
+ */
+ if (cp->host_status == HS_IDLE) continue;
+ if (cp->cmd == cmd) {
+ found = 1;
+ break;
+ }
+ }
+/*
+ * Then, look in the waiting list
+ */
+ if (!found && retrieve_from_waiting_list(0, np, cmd))
+ found = 1;
+/*
+ * Wake-up all awaiting commands with DID_RESET.
+ */
+ reset_waiting_list(np);
+/*
+ * Wake-up all pending commands with HS_RESET -> DID_RESET.
+ */
+ ncr_wakeup(np, HS_RESET);
+/*
+ * If the involved command was not in a driver queue, and the
+ * scsi driver told us reset is synchronous, and the command is not
+ * currently in the waiting list, complete it with DID_RESET status,
+ * in order to keep it alive.
+ */
+ if (!found && sync_reset && !retrieve_from_waiting_list(0, np, cmd)) {
+ SetScsiResult(cmd, DID_RESET, 0);
+ ncr_queue_done_cmd(np, cmd);
+ }
+
+ return SCSI_RESET_SUCCESS;
+}
+
+/*==========================================================
+**
+**
+** Abort an SCSI command.
+** This is called from the generic SCSI driver.
+**
+**
+**==========================================================
+*/
+static int ncr_abort_command (ncb_p np, Scsi_Cmnd *cmd)
+{
+/* Scsi_Device *device = cmd->device; */
+ ccb_p cp;
+
+/*
+ * First, look for the scsi command in the waiting list
+ */
+ if (remove_from_waiting_list(np, cmd)) {
+ SetScsiAbortResult(cmd);
+ ncr_queue_done_cmd(np, cmd);
+ return SCSI_ABORT_SUCCESS;
+ }
+
+/*
+ * Then, look in the wakeup list
+ */
+ for (cp=np->ccbc; cp; cp=cp->link_ccb) {
+ /*
+ ** look for the ccb of this command.
+ */
+ if (cp->host_status == HS_IDLE) continue;
+ if (cp->cmd == cmd)
+ break;
+ }
+
+ if (!cp) {
+ return SCSI_ABORT_NOT_RUNNING;
+ }
+
+ /*
+ ** Keep track we have to abort this job.
+ */
+ cp->to_abort = 1;
+
+ /*
+ ** Tell the SCRIPTS processor to stop
+ ** and synchronize with us.
+ */
+ np->istat_sem = SEM;
+
+ /*
+ ** If there are no requests, the script
+ ** processor will sleep on SEL_WAIT_RESEL.
+ ** Let's wake it up, since it may have to work.
+ */
+ OUTB (nc_istat, SIGP|SEM);
+
+ /*
+ ** Tell user we are working for him.
+ */
+ return SCSI_ABORT_PENDING;
+}
+
+/*==========================================================
+**
+** Linux release module stuff.
+**
+** Called before unloading the module
+** Detach the host.
+** We have to free resources and halt the NCR chip
+**
+**==========================================================
+*/
+
+#ifdef MODULE
+static int ncr_detach(ncb_p np)
+{
+ int i;
+
+ printk("%s: detaching ...\n", ncr_name(np));
+
+/*
+** Stop the ncr_timeout process
+** Set release_stage to 1 and wait that ncr_timeout() set it to 2.
+*/
+ np->release_stage = 1;
+ for (i = 50 ; i && np->release_stage != 2 ; i--) MDELAY (100);
+ if (np->release_stage != 2)
+ printk("%s: the timer seems to be already stopped\n",
+ ncr_name(np));
+ else np->release_stage = 2;
+
+/*
+** Reset NCR chip.
+** We should use ncr_soft_reset(), but we donnot want to do
+** so, since we may not be safe if interrupts occur.
+*/
+
+ printk("%s: resetting chip\n", ncr_name(np));
+ ncr_chip_reset(np);
+
+/*
+** Restore bios setting for automatic clock detection.
+*/
+ OUTB(nc_dmode, np->sv_dmode);
+ OUTB(nc_dcntl, np->sv_dcntl);
+ OUTB(nc_ctest3, np->sv_ctest3);
+ OUTB(nc_ctest4, np->sv_ctest4);
+ OUTB(nc_ctest5, np->sv_ctest5);
+ OUTB(nc_gpcntl, np->sv_gpcntl);
+ OUTB(nc_stest2, np->sv_stest2);
+
+ ncr_selectclock(np, np->sv_scntl3);
+/*
+** Free host resources
+*/
+ ncr_free_resources(np);
+
+ return 1;
+}
+#endif
+
+/*==========================================================
+**
+**
+** Complete execution of a SCSI command.
+** Signal completion to the generic SCSI driver.
+**
+**
+**==========================================================
+*/
+
+void ncr_complete (ncb_p np, ccb_p cp)
+{
+ Scsi_Cmnd *cmd;
+ tcb_p tp;
+ lcb_p lp;
+
+ /*
+ ** Sanity check
+ */
+ if (!cp || !cp->cmd)
+ return;
+
+ /*
+ ** Print some debugging info.
+ */
+
+ if (DEBUG_FLAGS & DEBUG_TINY)
+ printk ("CCB=%lx STAT=%x/%x\n", (unsigned long)cp,
+ cp->host_status,cp->scsi_status);
+
+ /*
+ ** Get command, target and lun pointers.
+ */
+
+ cmd = cp->cmd;
+ cp->cmd = NULL;
+ tp = &np->target[cp->target];
+ lp = ncr_lp(np, tp, cp->lun);
+
+ /*
+ ** We donnot queue more than 1 ccb per target
+ ** with negotiation at any time. If this ccb was
+ ** used for negotiation, clear this info in the tcb.
+ */
+
+ if (cp == tp->nego_cp)
+ tp->nego_cp = 0;
+
+#ifdef SCSI_NCR_IARB_SUPPORT
+ /*
+ ** We just complete the last queued CCB.
+ ** Clear this info that is no more relevant.
+ */
+ if (cp == np->last_cp)
+ np->last_cp = 0;
+#endif
+
+ /*
+ ** If auto-sense performed, change scsi status,
+ ** Otherwise, compute the residual.
+ */
+ if (cp->host_flags & HF_AUTO_SENSE) {
+ cp->scsi_status = cp->sv_scsi_status;
+ cp->xerr_status = cp->sv_xerr_status;
+ }
+ else {
+ cp->resid = 0;
+ if (cp->xerr_status ||
+ cp->phys.header.lastp != cp->phys.header.goalp)
+ cp->resid = ncr_compute_residual(np, cp);
+ }
+
+ /*
+ ** Check for extended errors.
+ */
+
+ if (cp->xerr_status) {
+ if (cp->xerr_status & XE_PARITY_ERR) {
+ PRINT_ADDR(cmd);
+ printk ("unrecovered SCSI parity error.\n");
+ }
+ if (cp->xerr_status & XE_EXTRA_DATA) {
+ PRINT_ADDR(cmd);
+ printk ("extraneous data discarded.\n");
+ }
+ if (cp->xerr_status & XE_BAD_PHASE) {
+ PRINT_ADDR(cmd);
+ printk ("illegal scsi phase (4/5).\n");
+ }
+ if (cp->xerr_status & XE_SODL_UNRUN) {
+ PRINT_ADDR(cmd);
+ printk ("ODD transfer in DATA OUT phase.\n");
+ }
+ if (cp->xerr_status & XE_SWIDE_OVRUN){
+ PRINT_ADDR(cmd);
+ printk ("ODD transfer in DATA IN phase.\n");
+ }
+
+ if (cp->host_status==HS_COMPLETE)
+ cp->host_status = HS_FAIL;
+ }
+
+ /*
+ ** Print out any error for debugging purpose.
+ */
+ if (DEBUG_FLAGS & (DEBUG_RESULT|DEBUG_TINY)) {
+ if (cp->host_status!=HS_COMPLETE || cp->scsi_status!=S_GOOD ||
+ cp->resid) {
+ PRINT_ADDR(cmd);
+ printk ("ERROR: cmd=%x host_status=%x scsi_status=%x "
+ "data_len=%d residual=%d\n",
+ cmd->cmnd[0], cp->host_status, cp->scsi_status,
+ cp->data_len, cp->resid);
+ }
+ }
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,3,99)
+ /*
+ ** Move residual byte count to user structure.
+ */
+ cmd->resid = cp->resid;
+#endif
+ /*
+ ** Check the status.
+ */
+ if ( (cp->host_status == HS_COMPLETE)
+ && (cp->scsi_status == S_GOOD ||
+ cp->scsi_status == S_COND_MET)) {
+ /*
+ ** All went well (GOOD status).
+ ** CONDITION MET status is returned on
+ ** `Pre-Fetch' or `Search data' success.
+ */
+ SetScsiResult(cmd, DID_OK, cp->scsi_status);
+
+ /*
+ ** Allocate the lcb if not yet.
+ */
+ if (!lp)
+ ncr_alloc_lcb (np, cp->target, cp->lun);
+
+ /*
+ ** On standard INQUIRY response (EVPD and CmDt
+ ** not set), setup logical unit according to
+ ** announced capabilities (we need the 1rst 7 bytes).
+ */
+ if (cmd->cmnd[0] == 0x12 && !(cmd->cmnd[1] & 0x3) &&
+ cmd->cmnd[4] >= 7 && !cmd->use_sg) {
+ sync_scsi_data(np, cmd); /* SYNC the data */
+ ncr_setup_lcb (np, cp->target, cp->lun,
+ (char *) cmd->request_buffer);
+ }
+
+ /*
+ ** If tags was reduced due to queue full,
+ ** increase tags if 1000 good status received.
+ */
+ if (lp && lp->usetags && lp->numtags < lp->maxtags) {
+ ++lp->num_good;
+ if (lp->num_good >= 1000) {
+ lp->num_good = 0;
+ ++lp->numtags;
+ ncr_setup_tags (np, cp->target, cp->lun);
+ }
+ }
+ } else if ((cp->host_status == HS_COMPLETE)
+ && (cp->scsi_status == S_CHECK_COND)) {
+ /*
+ ** Check condition code
+ */
+ SetScsiResult(cmd, DID_OK, S_CHECK_COND);
+
+ if (DEBUG_FLAGS & (DEBUG_RESULT|DEBUG_TINY)) {
+ PRINT_ADDR(cmd);
+ ncr_printl_hex("sense data:", cmd->sense_buffer, 14);
+ }
+ } else if ((cp->host_status == HS_COMPLETE)
+ && (cp->scsi_status == S_CONFLICT)) {
+ /*
+ ** Reservation Conflict condition code
+ */
+ SetScsiResult(cmd, DID_OK, S_CONFLICT);
+
+ } else if ((cp->host_status == HS_COMPLETE)
+ && (cp->scsi_status == S_BUSY ||
+ cp->scsi_status == S_QUEUE_FULL)) {
+
+ /*
+ ** Target is busy.
+ */
+ SetScsiResult(cmd, DID_OK, cp->scsi_status);
+
+ } else if ((cp->host_status == HS_SEL_TIMEOUT)
+ || (cp->host_status == HS_TIMEOUT)) {
+
+ /*
+ ** No response
+ */
+ SetScsiResult(cmd, DID_TIME_OUT, cp->scsi_status);
+
+ } else if (cp->host_status == HS_RESET) {
+
+ /*
+ ** SCSI bus reset
+ */
+ SetScsiResult(cmd, DID_RESET, cp->scsi_status);
+
+ } else if (cp->host_status == HS_ABORTED) {
+
+ /*
+ ** Transfer aborted
+ */
+ SetScsiAbortResult(cmd);
+
+ } else {
+ int did_status;
+
+ /*
+ ** Other protocol messes
+ */
+ PRINT_ADDR(cmd);
+ printk ("COMMAND FAILED (%x %x) @%p.\n",
+ cp->host_status, cp->scsi_status, cp);
+
+ did_status = DID_ERROR;
+ if (cp->xerr_status & XE_PARITY_ERR)
+ did_status = DID_PARITY;
+
+ SetScsiResult(cmd, did_status, cp->scsi_status);
+ }
+
+ /*
+ ** trace output
+ */
+
+ if (tp->usrflag & UF_TRACE) {
+ PRINT_ADDR(cmd);
+ printk (" CMD:");
+ ncr_print_hex(cmd->cmnd, cmd->cmd_len);
+
+ if (cp->host_status==HS_COMPLETE) {
+ switch (cp->scsi_status) {
+ case S_GOOD:
+ printk (" GOOD");
+ break;
+ case S_CHECK_COND:
+ printk (" SENSE:");
+ ncr_print_hex(cmd->sense_buffer, 14);
+ break;
+ default:
+ printk (" STAT: %x\n", cp->scsi_status);
+ break;
+ }
+ } else printk (" HOSTERROR: %x", cp->host_status);
+ printk ("\n");
+ }
+
+ /*
+ ** Free this ccb
+ */
+ ncr_free_ccb (np, cp);
+
+ /*
+ ** requeue awaiting scsi commands for this lun.
+ */
+ if (lp && lp->queuedccbs < lp->queuedepth &&
+ !xpt_que_empty(&lp->wait_ccbq))
+ ncr_start_next_ccb(np, lp, 2);
+
+ /*
+ ** requeue awaiting scsi commands for this controller.
+ */
+ if (np->waiting_list)
+ requeue_waiting_list(np);
+
+ /*
+ ** signal completion to generic driver.
+ */
+ ncr_queue_done_cmd(np, cmd);
+}
+
+/*==========================================================
+**
+**
+** Signal all (or one) control block done.
+**
+**
+**==========================================================
+*/
+
+/*
+** The NCR has completed CCBs.
+** Look at the DONE QUEUE.
+**
+** On architectures that may reorder LOAD/STORE operations,
+** a memory barrier may be needed after the reading of the
+** so-called `flag' and prior to dealing with the data.
+*/
+int ncr_wakeup_done (ncb_p np)
+{
+ ccb_p cp;
+ int i, n;
+ u_long dsa;
+
+ n = 0;
+ i = np->dqueueget;
+ while (1) {
+ dsa = scr_to_cpu(np->dqueue[i]);
+ if (!dsa)
+ break;
+ np->dqueue[i] = 0;
+ if ((i = i+2) >= MAX_START*2)
+ i = 0;
+
+ cp = ncr_ccb_from_dsa(np, dsa);
+ if (cp) {
+ MEMORY_BARRIER();
+ ncr_complete (np, cp);
+ ++n;
+ }
+ else
+ printk (KERN_ERR "%s: bad DSA (%lx) in done queue.\n",
+ ncr_name(np), dsa);
+ }
+ np->dqueueget = i;
+
+ return n;
+}
+
+/*
+** Complete all active CCBs.
+*/
+void ncr_wakeup (ncb_p np, u_long code)
+{
+ ccb_p cp = np->ccbc;
+
+ while (cp) {
+ if (cp->host_status != HS_IDLE) {
+ cp->host_status = code;
+ ncr_complete (np, cp);
+ }
+ cp = cp->link_ccb;
+ }
+}
+
+/*==========================================================
+**
+**
+** Start NCR chip.
+**
+**
+**==========================================================
+*/
+
+void ncr_init (ncb_p np, int reset, char * msg, u_long code)
+{
+ int i;
+ u_long phys;
+
+ /*
+ ** Reset chip if asked, otherwise just clear fifos.
+ */
+
+ if (reset)
+ ncr_soft_reset(np);
+ else {
+ OUTB (nc_stest3, TE|CSF);
+ OUTONB (nc_ctest3, CLF);
+ }
+
+ /*
+ ** Message.
+ */
+
+ if (msg) printk (KERN_INFO "%s: restart (%s).\n", ncr_name (np), msg);
+
+ /*
+ ** Clear Start Queue
+ */
+ phys = np->p_squeue;
+ np->queuedepth = MAX_START - 1; /* 1 entry needed as end marker */
+ for (i = 0; i < MAX_START*2; i += 2) {
+ np->squeue[i] = cpu_to_scr(np->p_idletask);
+ np->squeue[i+1] = cpu_to_scr(phys + (i+2)*4);
+ }
+ np->squeue[MAX_START*2-1] = cpu_to_scr(phys);
+
+
+ /*
+ ** Start at first entry.
+ */
+ np->squeueput = 0;
+ np->scripth0->startpos[0] = cpu_to_scr(phys);
+
+ /*
+ ** Clear Done Queue
+ */
+ phys = vtobus(np->dqueue);
+ for (i = 0; i < MAX_START*2; i += 2) {
+ np->dqueue[i] = 0;
+ np->dqueue[i+1] = cpu_to_scr(phys + (i+2)*4);
+ }
+ np->dqueue[MAX_START*2-1] = cpu_to_scr(phys);
+
+ /*
+ ** Start at first entry.
+ */
+ np->scripth0->done_pos[0] = cpu_to_scr(phys);
+ np->dqueueget = 0;
+
+ /*
+ ** Wakeup all pending jobs.
+ */
+ ncr_wakeup (np, code);
+
+ /*
+ ** Init chip.
+ */
+
+ OUTB (nc_istat, 0x00 ); /* Remove Reset, abort */
+ UDELAY (2000); /* The 895 needs time for the bus mode to settle */
+
+ OUTB (nc_scntl0, np->rv_scntl0 | 0xc0);
+ /* full arb., ena parity, par->ATN */
+ OUTB (nc_scntl1, 0x00); /* odd parity, and remove CRST!! */
+
+ ncr_selectclock(np, np->rv_scntl3); /* Select SCSI clock */
+
+ OUTB (nc_scid , RRE|np->myaddr); /* Adapter SCSI address */
+ OUTW (nc_respid, 1ul<<np->myaddr); /* Id to respond to */
+ OUTB (nc_istat , SIGP ); /* Signal Process */
+ OUTB (nc_dmode , np->rv_dmode); /* Burst length, dma mode */
+ OUTB (nc_ctest5, np->rv_ctest5); /* Large fifo + large burst */
+
+ OUTB (nc_dcntl , NOCOM|np->rv_dcntl); /* Protect SFBR */
+ OUTB (nc_ctest3, np->rv_ctest3); /* Write and invalidate */
+ OUTB (nc_ctest4, np->rv_ctest4); /* Master parity checking */
+
+ if ((np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66)){
+ OUTB (nc_stest2, EXT|np->rv_stest2);
+ /* Extended Sreq/Sack filtering, not supported in C1010/C1010_66 */
+ }
+ OUTB (nc_stest3, TE); /* TolerANT enable */
+ OUTB (nc_stime0, 0x0c); /* HTH disabled STO 0.25 sec */
+
+ /*
+ ** DEL 441 - 53C876 Rev 5 - Part Number 609-0392787/2788 - ITEM 2.
+ ** Disable overlapped arbitration for all dual-function
+ ** devices, regardless revision id.
+ ** We may consider it is a post-chip-design feature. ;-)
+ **
+ ** Errata applies to all 896 and 1010 parts.
+ */
+ if (np->device_id == PCI_DEVICE_ID_NCR_53C875)
+ OUTB (nc_ctest0, (1<<5));
+ else if (np->device_id == PCI_DEVICE_ID_NCR_53C896 ||
+ np->device_id == PCI_DEVICE_ID_LSI_53C1010 ||
+ np->device_id == PCI_DEVICE_ID_LSI_53C1010_66 )
+ np->rv_ccntl0 |= DPR;
+
+ /*
+ ** C1010_66MHz rev 0 part requies AIPCNTL1 bit 3 to be set.
+ */
+ if (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)
+ OUTB(nc_aipcntl1, (1<<3));
+
+ /*
+ ** If 64 bit (895A/896/1010/1010_66) write the CCNTL1 register to
+ ** enable 40 bit address table indirect addressing for MOVE.
+ ** Also write CCNTL0 if 64 bit chip, since this register seems
+ ** to only be used by 64 bit cores.
+ */
+ if (np->features & FE_64BIT) {
+ OUTB (nc_ccntl0, np->rv_ccntl0);
+ OUTB (nc_ccntl1, np->rv_ccntl1);
+ }
+
+ /*
+ ** If phase mismatch handled by scripts (53C895A or 53C896
+ ** or 53C1010 or 53C1010_66), set PM jump addresses.
+ */
+
+ if (np->features & FE_NOPM) {
+ printk(KERN_INFO "%s: handling phase mismatch from SCRIPTS.\n",
+ ncr_name(np));
+ OUTL (nc_pmjad1, NCB_SCRIPTH_PHYS (np, pm_handle));
+ OUTL (nc_pmjad2, NCB_SCRIPTH_PHYS (np, pm_handle));
+ }
+
+ /*
+ ** Enable GPIO0 pin for writing if LED support from SCRIPTS.
+ ** Also set GPIO5 and clear GPIO6 if hardware LED control.
+ */
+
+ if (np->features & FE_LED0)
+ OUTB(nc_gpcntl, INB(nc_gpcntl) & ~0x01);
+ else if (np->features & FE_LEDC)
+ OUTB(nc_gpcntl, (INB(nc_gpcntl) & ~0x41) | 0x20);
+
+
+ /*
+ ** enable ints
+ */
+
+ OUTW (nc_sien , STO|HTH|MA|SGE|UDC|RST|PAR);
+ OUTB (nc_dien , MDPE|BF|SSI|SIR|IID);
+
+ /*
+ ** For 895/895A/896/c1010
+ ** Enable SBMC interrupt and save current SCSI bus mode.
+ */
+ if ( (np->features & FE_ULTRA2) || (np->features & FE_ULTRA3) ) {
+ OUTONW (nc_sien, SBMC);
+ np->scsi_mode = INB (nc_stest4) & SMODE;
+ }
+
+ /*
+ ** Fill in target structure.
+ ** Reinitialize usrsync.
+ ** Reinitialize usrwide.
+ ** Prepare sync negotiation according to actual SCSI bus mode.
+ */
+
+ for (i=0;i<MAX_TARGET;i++) {
+ tcb_p tp = &np->target[i];
+
+ tp->to_reset = 0;
+
+ tp->sval = 0;
+ tp->wval = np->rv_scntl3;
+ tp->uval = np->rv_scntl4;
+
+ if (tp->usrsync != 255) {
+ if (tp->usrsync <= np->maxsync) {
+ if (tp->usrsync < np->minsync) {
+ tp->usrsync = np->minsync;
+ }
+ }
+ else
+ tp->usrsync = 255;
+ };
+
+ if (tp->usrwide > np->maxwide)
+ tp->usrwide = np->maxwide;
+
+ ncr_negotiate (np, tp);
+ }
+
+ /*
+ ** Download SCSI SCRIPTS to on-chip RAM if present,
+ ** and start script processor.
+ ** We do the download preferently from the CPU.
+ ** For platforms that may not support PCI memory mapping,
+ ** we use a simple SCRIPTS that performs MEMORY MOVEs.
+ */
+ if (np->base2_ba) {
+ if (bootverbose)
+ printk ("%s: Downloading SCSI SCRIPTS.\n",
+ ncr_name(np));
+#ifdef SCSI_NCR_PCI_MEM_NOT_SUPPORTED
+ if (np->base2_ws == 8192)
+ phys = NCB_SCRIPTH0_PHYS (np, start_ram64);
+ else
+ phys = NCB_SCRIPTH_PHYS (np, start_ram);
+#else
+ if (np->base2_ws == 8192) {
+ memcpy_to_pci(np->base2_va + 4096,
+ np->scripth0, sizeof(struct scripth));
+ OUTL (nc_mmws, np->scr_ram_seg);
+ OUTL (nc_mmrs, np->scr_ram_seg);
+ OUTL (nc_sfs, np->scr_ram_seg);
+ phys = NCB_SCRIPTH_PHYS (np, start64);
+ }
+ else
+ phys = NCB_SCRIPT_PHYS (np, init);
+ memcpy_to_pci(np->base2_va, np->script0, sizeof(struct script));
+#endif /* SCSI_NCR_PCI_MEM_NOT_SUPPORTED */
+ }
+ else
+ phys = NCB_SCRIPT_PHYS (np, init);
+
+ np->istat_sem = 0;
+
+ OUTL (nc_dsa, np->p_ncb);
+ OUTL_DSP (phys);
+}
+
+/*==========================================================
+**
+** Prepare the negotiation values for wide and
+** synchronous transfers.
+**
+**==========================================================
+*/
+
+static void ncr_negotiate (struct ncb* np, struct tcb* tp)
+{
+ /*
+ ** minsync unit is 4ns !
+ */
+
+ u_long minsync = tp->usrsync;
+
+ /*
+ ** SCSI bus mode limit
+ */
+
+ if (np->scsi_mode && np->scsi_mode == SMODE_SE) {
+ if (minsync < 12) minsync = 12;
+ }
+
+ /*
+ ** our limit ..
+ */
+
+ if (minsync < np->minsync)
+ minsync = np->minsync;
+
+ /*
+ ** divider limit
+ */
+
+ if (minsync > np->maxsync)
+ minsync = 255;
+
+ tp->minsync = minsync;
+ tp->maxoffs = (minsync<255 ? np->maxoffs : 0);
+
+ /*
+ ** period=0: has to negotiate sync transfer
+ */
+
+ tp->period=0;
+
+ /*
+ ** widedone=0: has to negotiate wide transfer
+ */
+ tp->widedone=0;
+}
+
+/*==========================================================
+**
+** Get clock factor and sync divisor for a given
+** synchronous factor period.
+** Returns the clock factor (in sxfer) and scntl3
+** synchronous divisor field.
+**
+**==========================================================
+*/
+
+static void ncr_getsync(ncb_p np, u_char sfac, u_char *fakp, u_char *scntl3p)
+{
+ u_long clk = np->clock_khz; /* SCSI clock frequency in kHz */
+ int div = np->clock_divn; /* Number of divisors supported */
+ u_long fak; /* Sync factor in sxfer */
+ u_long per; /* Period in tenths of ns */
+ u_long kpc; /* (per * clk) */
+
+ /*
+ ** Compute the synchronous period in tenths of nano-seconds
+ ** from sfac.
+ **
+ ** Note, if sfac == 9, DT is being used. Double the period of 125
+ ** to 250.
+ */
+ if (sfac <= 10) per = 250;
+ else if (sfac == 11) per = 303;
+ else if (sfac == 12) per = 500;
+ else per = 40 * sfac;
+
+ /*
+ ** Look for the greatest clock divisor that allows an
+ ** input speed faster than the period.
+ */
+ kpc = per * clk;
+ while (--div >= 0)
+ if (kpc >= (div_10M[div] << 2)) break;
+
+ /*
+ ** Calculate the lowest clock factor that allows an output
+ ** speed not faster than the period.
+ */
+ fak = (kpc - 1) / div_10M[div] + 1;
+
+#if 0 /* This optimization does not seem very usefull */
+
+ per = (fak * div_10M[div]) / clk;
+
+ /*
+ ** Why not to try the immediate lower divisor and to choose
+ ** the one that allows the fastest output speed ?
+ ** We dont want input speed too much greater than output speed.
+ */
+ if (div >= 1 && fak < 8) {
+ u_long fak2, per2;
+ fak2 = (kpc - 1) / div_10M[div-1] + 1;
+ per2 = (fak2 * div_10M[div-1]) / clk;
+ if (per2 < per && fak2 <= 8) {
+ fak = fak2;
+ per = per2;
+ --div;
+ }
+ }
+#endif
+
+ if (fak < 4) fak = 4; /* Should never happen, too bad ... */
+
+ /*
+ ** Compute and return sync parameters for the ncr
+ */
+ *fakp = fak - 4;
+
+ /*
+ ** If sfac < 25, and 8xx parts, desire that the chip operate at
+ ** least at Ultra speeds. Must set bit 7 of scntl3.
+ ** For C1010, do not set this bit. If operating at Ultra3 speeds,
+ ** set the U3EN bit instead.
+ */
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)) {
+ *scntl3p = (div+1) << 4;
+ *fakp = 0;
+ }
+ else {
+ *scntl3p = ((div+1) << 4) + (sfac < 25 ? 0x80 : 0);
+ *fakp = fak - 4;
+ }
+}
+
+/*==========================================================
+**
+** Utility routine to return the current bus width
+** synchronous period and offset.
+** Utilizes target sval, wval and uval
+**
+**==========================================================
+*/
+static void ncr_get_xfer_info(ncb_p np, tcb_p tp, u_char *factor,
+ u_char *offset, u_char *width)
+{
+
+ u_char idiv;
+ u_long period;
+
+ *width = (tp->wval & EWS) ? 1 : 0;
+
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66))
+ *offset = (tp->sval & 0x3f);
+ else
+ *offset = (tp->sval & 0x1f);
+
+ /*
+ * Midlayer signal to the driver that all of the scsi commands
+ * for the integrity check have completed. Save the negotiated
+ * parameters (extracted from sval, wval and uval).
+ * See ncr_setsync for alg. details.
+ */
+
+ idiv = (tp->wval>>4) & 0x07;
+
+ if ( *offset && idiv ) {
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)){
+ if (tp->uval & 0x80)
+ period = (2*div_10M[idiv-1])/np->clock_khz;
+ else
+ period = (4*div_10M[idiv-1])/np->clock_khz;
+ }
+ else
+ period = (((tp->sval>>5)+4)*div_10M[idiv-1])/np->clock_khz;
+ }
+ else
+ period = 0xffff;
+
+ if (period <= 125) *factor = 9;
+ else if (period <= 250) *factor = 10;
+ else if (period <= 303) *factor = 11;
+ else if (period <= 500) *factor = 12;
+ else *factor = (period + 40 - 1) / 40;
+
+}
+
+
+/*==========================================================
+**
+** Set actual values, sync status and patch all ccbs of
+** a target according to new sync/wide agreement.
+**
+**==========================================================
+*/
+
+static void ncr_set_sync_wide_status (ncb_p np, u_char target)
+{
+ ccb_p cp = np->ccbc;
+ tcb_p tp = &np->target[target];
+
+ /*
+ ** set actual value and sync_status
+ **
+ ** TEMP register contains current scripts address
+ ** which is data type/direction/dependent.
+ */
+ OUTB (nc_sxfer, tp->sval);
+ OUTB (nc_scntl3, tp->wval);
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66))
+ OUTB (nc_scntl4, tp->uval);
+
+ /*
+ ** patch ALL ccbs of this target.
+ */
+ for (cp = np->ccbc; cp; cp = cp->link_ccb) {
+ if (cp->host_status == HS_IDLE)
+ continue;
+ if (cp->target != target)
+ continue;
+ cp->phys.select.sel_scntl3 = tp->wval;
+ cp->phys.select.sel_sxfer = tp->sval;
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66))
+ cp->phys.select.sel_scntl4 = tp->uval;
+ };
+}
+
+/*==========================================================
+**
+** Switch sync mode for current job and it's target
+**
+**==========================================================
+*/
+
+static void ncr_setsync (ncb_p np, ccb_p cp, u_char scntl3, u_char sxfer,
+ u_char scntl4)
+{
+ tcb_p tp;
+ u_char target = INB (nc_sdid) & 0x0f;
+ u_char idiv;
+ u_char offset;
+
+ assert (cp);
+ if (!cp) return;
+
+ assert (target == (cp->target & 0xf));
+
+ tp = &np->target[target];
+
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)) {
+ offset = sxfer & 0x3f; /* bits 5-0 */
+ scntl3 = (scntl3 & 0xf0) | (tp->wval & EWS);
+ scntl4 = (scntl4 & 0x80);
+ }
+ else {
+ offset = sxfer & 0x1f; /* bits 4-0 */
+ if (!scntl3 || !offset)
+ scntl3 = np->rv_scntl3;
+
+ scntl3 = (scntl3 & 0xf0) | (tp->wval & EWS) |
+ (np->rv_scntl3 & 0x07);
+ }
+
+
+ /*
+ ** Deduce the value of controller sync period from scntl3.
+ ** period is in tenths of nano-seconds.
+ */
+
+ idiv = ((scntl3 >> 4) & 0x7);
+ if ( offset && idiv) {
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)) {
+ /* Note: If extra data hold clocks are used,
+ * the formulas below must be modified.
+ * When scntl4 == 0, ST mode.
+ */
+ if (scntl4 & 0x80)
+ tp->period = (2*div_10M[idiv-1])/np->clock_khz;
+ else
+ tp->period = (4*div_10M[idiv-1])/np->clock_khz;
+ }
+ else
+ tp->period = (((sxfer>>5)+4)*div_10M[idiv-1])/np->clock_khz;
+ }
+ else
+ tp->period = 0xffff;
+
+
+ /*
+ ** Stop there if sync parameters are unchanged
+ */
+ if (tp->sval == sxfer && tp->wval == scntl3 && tp->uval == scntl4) return;
+ tp->sval = sxfer;
+ tp->wval = scntl3;
+ tp->uval = scntl4;
+
+ /*
+ ** Bells and whistles ;-)
+ ** Donnot announce negotiations due to auto-sense,
+ ** unless user really want us to be verbose. :)
+ */
+ if ( bootverbose < 2 && (cp->host_flags & HF_AUTO_SENSE))
+ goto next;
+ PRINT_TARGET(np, target);
+ if (offset) {
+ unsigned f10 = 100000 << (tp->widedone ? tp->widedone -1 : 0);
+ unsigned mb10 = (f10 + tp->period/2) / tp->period;
+ char *scsi;
+
+ /*
+ ** Disable extended Sreq/Sack filtering
+ */
+ if ((tp->period <= 2000) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66))
+ OUTOFFB (nc_stest2, EXT);
+
+ /*
+ ** Bells and whistles ;-)
+ */
+ if (tp->period < 250) scsi = "FAST-80";
+ else if (tp->period < 500) scsi = "FAST-40";
+ else if (tp->period < 1000) scsi = "FAST-20";
+ else if (tp->period < 2000) scsi = "FAST-10";
+ else scsi = "FAST-5";
+
+ printk ("%s %sSCSI %d.%d MB/s (%d ns, offset %d)\n", scsi,
+ tp->widedone > 1 ? "WIDE " : "",
+ mb10 / 10, mb10 % 10, tp->period / 10, offset);
+ } else
+ printk ("%sasynchronous.\n", tp->widedone > 1 ? "wide " : "");
+next:
+ /*
+ ** set actual value and sync_status
+ ** patch ALL ccbs of this target.
+ */
+ ncr_set_sync_wide_status(np, target);
+}
+
+
+/*==========================================================
+**
+** Switch wide mode for current job and it's target
+** SCSI specs say: a SCSI device that accepts a WDTR
+** message shall reset the synchronous agreement to
+** asynchronous mode.
+**
+**==========================================================
+*/
+
+static void ncr_setwide (ncb_p np, ccb_p cp, u_char wide, u_char ack)
+{
+ u_short target = INB (nc_sdid) & 0x0f;
+ tcb_p tp;
+ u_char scntl3;
+ u_char sxfer;
+
+ assert (cp);
+ if (!cp) return;
+
+ assert (target == (cp->target & 0xf));
+
+ tp = &np->target[target];
+ tp->widedone = wide+1;
+ scntl3 = (tp->wval & (~EWS)) | (wide ? EWS : 0);
+
+ sxfer = ack ? 0 : tp->sval;
+
+ /*
+ ** Stop there if sync/wide parameters are unchanged
+ */
+ if (tp->sval == sxfer && tp->wval == scntl3) return;
+ tp->sval = sxfer;
+ tp->wval = scntl3;
+
+ /*
+ ** Bells and whistles ;-)
+ */
+ if (bootverbose >= 2) {
+ PRINT_TARGET(np, target);
+ if (scntl3 & EWS)
+ printk ("WIDE SCSI (16 bit) enabled.\n");
+ else
+ printk ("WIDE SCSI disabled.\n");
+ }
+
+ /*
+ ** set actual value and sync_status
+ ** patch ALL ccbs of this target.
+ */
+ ncr_set_sync_wide_status(np, target);
+}
+
+
+/*==========================================================
+**
+** Switch sync/wide mode for current job and it's target
+** PPR negotiations only
+**
+**==========================================================
+*/
+
+static void ncr_setsyncwide (ncb_p np, ccb_p cp, u_char scntl3, u_char sxfer,
+ u_char scntl4, u_char wide)
+{
+ tcb_p tp;
+ u_char target = INB (nc_sdid) & 0x0f;
+ u_char idiv;
+ u_char offset;
+
+ assert (cp);
+ if (!cp) return;
+
+ assert (target == (cp->target & 0xf));
+
+ tp = &np->target[target];
+ tp->widedone = wide+1;
+
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)) {
+ offset = sxfer & 0x3f; /* bits 5-0 */
+ scntl3 = (scntl3 & 0xf0) | (wide ? EWS : 0);
+ scntl4 = (scntl4 & 0x80);
+ }
+ else {
+ offset = sxfer & 0x1f; /* bits 4-0 */
+ if (!scntl3 || !offset)
+ scntl3 = np->rv_scntl3;
+
+ scntl3 = (scntl3 & 0xf0) | (wide ? EWS : 0) |
+ (np->rv_scntl3 & 0x07);
+ }
+
+
+ /*
+ ** Deduce the value of controller sync period from scntl3.
+ ** period is in tenths of nano-seconds.
+ */
+
+ idiv = ((scntl3 >> 4) & 0x7);
+ if ( offset && idiv) {
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)) {
+ /* Note: If extra data hold clocks are used,
+ * the formulas below must be modified.
+ * When scntl4 == 0, ST mode.
+ */
+ if (scntl4 & 0x80)
+ tp->period = (2*div_10M[idiv-1])/np->clock_khz;
+ else
+ tp->period = (4*div_10M[idiv-1])/np->clock_khz;
+ }
+ else
+ tp->period = (((sxfer>>5)+4)*div_10M[idiv-1])/np->clock_khz;
+ }
+ else
+ tp->period = 0xffff;
+
+
+ /*
+ ** Stop there if sync parameters are unchanged
+ */
+ if (tp->sval == sxfer && tp->wval == scntl3 && tp->uval == scntl4) return;
+ tp->sval = sxfer;
+ tp->wval = scntl3;
+ tp->uval = scntl4;
+
+ /*
+ ** Bells and whistles ;-)
+ ** Donnot announce negotiations due to auto-sense,
+ ** unless user really want us to be verbose. :)
+ */
+ if ( bootverbose < 2 && (cp->host_flags & HF_AUTO_SENSE))
+ goto next;
+ PRINT_TARGET(np, target);
+ if (offset) {
+ unsigned f10 = 100000 << (tp->widedone ? tp->widedone -1 : 0);
+ unsigned mb10 = (f10 + tp->period/2) / tp->period;
+ char *scsi;
+
+ /*
+ ** Disable extended Sreq/Sack filtering
+ */
+ if ((tp->period <= 2000) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66))
+ OUTOFFB (nc_stest2, EXT);
+
+ /*
+ ** Bells and whistles ;-)
+ */
+ if (tp->period < 250) scsi = "FAST-80";
+ else if (tp->period < 500) scsi = "FAST-40";
+ else if (tp->period < 1000) scsi = "FAST-20";
+ else if (tp->period < 2000) scsi = "FAST-10";
+ else scsi = "FAST-5";
+
+ printk ("%s %sSCSI %d.%d MB/s (%d ns, offset %d)\n", scsi,
+ tp->widedone > 1 ? "WIDE " : "",
+ mb10 / 10, mb10 % 10, tp->period / 10, offset);
+ } else
+ printk ("%sasynchronous.\n", tp->widedone > 1 ? "wide " : "");
+next:
+ /*
+ ** set actual value and sync_status
+ ** patch ALL ccbs of this target.
+ */
+ ncr_set_sync_wide_status(np, target);
+}
+
+
+
+
+/*==========================================================
+**
+** Switch tagged mode for a target.
+**
+**==========================================================
+*/
+
+static void ncr_setup_tags (ncb_p np, u_char tn, u_char ln)
+{
+ tcb_p tp = &np->target[tn];
+ lcb_p lp = ncr_lp(np, tp, ln);
+ u_short reqtags, maxdepth;
+
+ /*
+ ** Just in case ...
+ */
+ if ((!tp) || (!lp))
+ return;
+
+ /*
+ ** If SCSI device queue depth is not yet set, leave here.
+ */
+ if (!lp->scdev_depth)
+ return;
+
+ /*
+ ** Donnot allow more tags than the SCSI driver can queue
+ ** for this device.
+ ** Donnot allow more tags than we can handle.
+ */
+ maxdepth = lp->scdev_depth;
+ if (maxdepth > lp->maxnxs) maxdepth = lp->maxnxs;
+ if (lp->maxtags > maxdepth) lp->maxtags = maxdepth;
+ if (lp->numtags > maxdepth) lp->numtags = maxdepth;
+
+ /*
+ ** only devices conformant to ANSI Version >= 2
+ ** only devices capable of tagged commands
+ ** only if enabled by user ..
+ */
+ if ((lp->inq_byte7 & INQ7_QUEUE) && lp->numtags > 1) {
+ reqtags = lp->numtags;
+ } else {
+ reqtags = 1;
+ };
+
+ /*
+ ** Update max number of tags
+ */
+ lp->numtags = reqtags;
+ if (lp->numtags > lp->maxtags)
+ lp->maxtags = lp->numtags;
+
+ /*
+ ** If we want to switch tag mode, we must wait
+ ** for no CCB to be active.
+ */
+ if (reqtags > 1 && lp->usetags) { /* Stay in tagged mode */
+ if (lp->queuedepth == reqtags) /* Already announced */
+ return;
+ lp->queuedepth = reqtags;
+ }
+ else if (reqtags <= 1 && !lp->usetags) { /* Stay in untagged mode */
+ lp->queuedepth = reqtags;
+ return;
+ }
+ else { /* Want to switch tag mode */
+ if (lp->busyccbs) /* If not yet safe, return */
+ return;
+ lp->queuedepth = reqtags;
+ lp->usetags = reqtags > 1 ? 1 : 0;
+ }
+
+ /*
+ ** Patch the lun mini-script, according to tag mode.
+ */
+ lp->resel_task = lp->usetags?
+ cpu_to_scr(NCB_SCRIPT_PHYS(np, resel_tag)) :
+ cpu_to_scr(NCB_SCRIPT_PHYS(np, resel_notag));
+
+ /*
+ ** Announce change to user.
+ */
+ if (bootverbose) {
+ PRINT_LUN(np, tn, ln);
+ if (lp->usetags)
+ printk("tagged command queue depth set to %d\n", reqtags);
+ else
+ printk("tagged command queueing disabled\n");
+ }
+}
+
+/*----------------------------------------------------
+**
+** handle user commands
+**
+**----------------------------------------------------
+*/
+
+#ifdef SCSI_NCR_USER_COMMAND_SUPPORT
+
+static void ncr_usercmd (ncb_p np)
+{
+ u_char t;
+ tcb_p tp;
+ int ln;
+ u_long size;
+
+ switch (np->user.cmd) {
+ case 0: return;
+
+ case UC_SETDEBUG:
+#ifdef SCSI_NCR_DEBUG_INFO_SUPPORT
+ ncr_debug = np->user.data;
+#endif
+ break;
+
+ case UC_SETORDER:
+ np->order = np->user.data;
+ break;
+
+ case UC_SETVERBOSE:
+ np->verbose = np->user.data;
+ break;
+
+ default:
+ /*
+ ** We assume that other commands apply to targets.
+ ** This should always be the case and avoid the below
+ ** 4 lines to be repeated 5 times.
+ */
+ for (t = 0; t < MAX_TARGET; t++) {
+ if (!((np->user.target >> t) & 1))
+ continue;
+ tp = &np->target[t];
+
+ switch (np->user.cmd) {
+
+ case UC_SETSYNC:
+ tp->usrsync = np->user.data;
+ ncr_negotiate (np, tp);
+ break;
+
+ case UC_SETWIDE:
+ size = np->user.data;
+ if (size > np->maxwide)
+ size=np->maxwide;
+ tp->usrwide = size;
+ ncr_negotiate (np, tp);
+ break;
+
+ case UC_SETTAGS:
+ tp->usrtags = np->user.data;
+ for (ln = 0; ln < MAX_LUN; ln++) {
+ lcb_p lp;
+ lp = ncr_lp(np, tp, ln);
+ if (!lp)
+ continue;
+ lp->numtags = np->user.data;
+ lp->maxtags = lp->numtags;
+ ncr_setup_tags (np, t, ln);
+ }
+ break;
+
+ case UC_RESETDEV:
+ tp->to_reset = 1;
+ np->istat_sem = SEM;
+ OUTB (nc_istat, SIGP|SEM);
+ break;
+
+ case UC_CLEARDEV:
+ for (ln = 0; ln < MAX_LUN; ln++) {
+ lcb_p lp;
+ lp = ncr_lp(np, tp, ln);
+ if (lp)
+ lp->to_clear = 1;
+ }
+ np->istat_sem = SEM;
+ OUTB (nc_istat, SIGP|SEM);
+ break;
+
+ case UC_SETFLAG:
+ tp->usrflag = np->user.data;
+ break;
+ }
+ }
+ break;
+ }
+ np->user.cmd=0;
+}
+#endif
+
+/*==========================================================
+**
+**
+** ncr timeout handler.
+**
+**
+**==========================================================
+**
+** Misused to keep the driver running when
+** interrupts are not configured correctly.
+**
+**----------------------------------------------------------
+*/
+
+static void ncr_timeout (ncb_p np)
+{
+ u_long thistime = ktime_get(0);
+
+ /*
+ ** If release process in progress, let's go
+ ** Set the release stage from 1 to 2 to synchronize
+ ** with the release process.
+ */
+
+ if (np->release_stage) {
+ if (np->release_stage == 1) np->release_stage = 2;
+ return;
+ }
+
+#ifdef SCSI_NCR_PCIQ_BROKEN_INTR
+ np->timer.expires = ktime_get((HZ+9)/10);
+#else
+ np->timer.expires = ktime_get(SCSI_NCR_TIMER_INTERVAL);
+#endif
+ add_timer(&np->timer);
+
+ /*
+ ** If we are resetting the ncr, wait for settle_time before
+ ** clearing it. Then command processing will be resumed.
+ */
+ if (np->settle_time) {
+ if (np->settle_time <= thistime) {
+ if (bootverbose > 1)
+ printk("%s: command processing resumed\n", ncr_name(np));
+ np->settle_time = 0;
+ requeue_waiting_list(np);
+ }
+ return;
+ }
+
+ /*
+ ** Nothing to do for now, but that may come.
+ */
+ if (np->lasttime + 4*HZ < thistime) {
+ np->lasttime = thistime;
+ }
+
+#ifdef SCSI_NCR_PCIQ_MAY_MISS_COMPLETIONS
+ /*
+ ** Some way-broken PCI bridges may lead to
+ ** completions being lost when the clearing
+ ** of the INTFLY flag by the CPU occurs
+ ** concurrently with the chip raising this flag.
+ ** If this ever happen, lost completions will
+ ** be reaped here.
+ */
+ ncr_wakeup_done(np);
+#endif
+
+#ifdef SCSI_NCR_PCIQ_BROKEN_INTR
+ if (INB(nc_istat) & (INTF|SIP|DIP)) {
+
+ /*
+ ** Process pending interrupts.
+ */
+ if (DEBUG_FLAGS & DEBUG_TINY) printk ("{");
+ ncr_exception (np);
+ if (DEBUG_FLAGS & DEBUG_TINY) printk ("}");
+ }
+#endif /* SCSI_NCR_PCIQ_BROKEN_INTR */
+}
+
+/*==========================================================
+**
+** log message for real hard errors
+**
+** "ncr0 targ 0?: ERROR (ds:si) (so-si-sd) (sxfer/scntl3) @ name (dsp:dbc)."
+** " reg: r0 r1 r2 r3 r4 r5 r6 ..... rf."
+**
+** exception register:
+** ds: dstat
+** si: sist
+**
+** SCSI bus lines:
+** so: control lines as driven by NCR.
+** si: control lines as seen by NCR.
+** sd: scsi data lines as seen by NCR.
+**
+** wide/fastmode:
+** sxfer: (see the manual)
+** scntl3: (see the manual)
+**
+** current script command:
+** dsp: script address (relative to start of script).
+** dbc: first word of script command.
+**
+** First 24 register of the chip:
+** r0..rf
+**
+**==========================================================
+*/
+
+static void ncr_log_hard_error(ncb_p np, u_short sist, u_char dstat)
+{
+ u_int32 dsp;
+ int script_ofs;
+ int script_size;
+ char *script_name;
+ u_char *script_base;
+ int i;
+
+ dsp = INL (nc_dsp);
+
+ if (dsp > np->p_script && dsp <= np->p_script + sizeof(struct script)) {
+ script_ofs = dsp - np->p_script;
+ script_size = sizeof(struct script);
+ script_base = (u_char *) np->script0;
+ script_name = "script";
+ }
+ else if (np->p_scripth < dsp &&
+ dsp <= np->p_scripth + sizeof(struct scripth)) {
+ script_ofs = dsp - np->p_scripth;
+ script_size = sizeof(struct scripth);
+ script_base = (u_char *) np->scripth0;
+ script_name = "scripth";
+ } else {
+ script_ofs = dsp;
+ script_size = 0;
+ script_base = 0;
+ script_name = "mem";
+ }
+
+ printk ("%s:%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x) @ (%s %x:%08x).\n",
+ ncr_name (np), (unsigned)INB (nc_sdid)&0x0f, dstat, sist,
+ (unsigned)INB (nc_socl), (unsigned)INB (nc_sbcl), (unsigned)INB (nc_sbdl),
+ (unsigned)INB (nc_sxfer),(unsigned)INB (nc_scntl3), script_name, script_ofs,
+ (unsigned)INL (nc_dbc));
+
+ if (((script_ofs & 3) == 0) &&
+ (unsigned)script_ofs < script_size) {
+ printk ("%s: script cmd = %08x\n", ncr_name(np),
+ scr_to_cpu((int) *(ncrcmd *)(script_base + script_ofs)));
+ }
+
+ printk ("%s: regdump:", ncr_name(np));
+ for (i=0; i<24;i++)
+ printk (" %02x", (unsigned)INB_OFF(i));
+ printk (".\n");
+}
+
+/*============================================================
+**
+** ncr chip exception handler.
+**
+**============================================================
+**
+** In normal situations, interrupt conditions occur one at
+** a time. But when something bad happens on the SCSI BUS,
+** the chip may raise several interrupt flags before
+** stopping and interrupting the CPU. The additionnal
+** interrupt flags are stacked in some extra registers
+** after the SIP and/or DIP flag has been raised in the
+** ISTAT. After the CPU has read the interrupt condition
+** flag from SIST or DSTAT, the chip unstacks the other
+** interrupt flags and sets the corresponding bits in
+** SIST or DSTAT. Since the chip starts stacking once the
+** SIP or DIP flag is set, there is a small window of time
+** where the stacking does not occur.
+**
+** Typically, multiple interrupt conditions may happen in
+** the following situations:
+**
+** - SCSI parity error + Phase mismatch (PAR|MA)
+** When an parity error is detected in input phase
+** and the device switches to msg-in phase inside a
+** block MOV.
+** - SCSI parity error + Unexpected disconnect (PAR|UDC)
+** When a stupid device does not want to handle the
+** recovery of an SCSI parity error.
+** - Some combinations of STO, PAR, UDC, ...
+** When using non compliant SCSI stuff, when user is
+** doing non compliant hot tampering on the BUS, when
+** something really bad happens to a device, etc ...
+**
+** The heuristic suggested by SYMBIOS to handle
+** multiple interrupts is to try unstacking all
+** interrupts conditions and to handle them on some
+** priority based on error severity.
+** This will work when the unstacking has been
+** successful, but we cannot be 100 % sure of that,
+** since the CPU may have been faster to unstack than
+** the chip is able to stack. Hmmm ... But it seems that
+** such a situation is very unlikely to happen.
+**
+** If this happen, for example STO catched by the CPU
+** then UDC happenning before the CPU have restarted
+** the SCRIPTS, the driver may wrongly complete the
+** same command on UDC, since the SCRIPTS didn't restart
+** and the DSA still points to the same command.
+** We avoid this situation by setting the DSA to an
+** invalid value when the CCB is completed and before
+** restarting the SCRIPTS.
+**
+** Another issue is that we need some section of our
+** recovery procedures to be somehow uninterruptible and
+** that the SCRIPTS processor does not provides such a
+** feature. For this reason, we handle recovery preferently
+** from the C code and check against some SCRIPTS
+** critical sections from the C code.
+**
+** Hopefully, the interrupt handling of the driver is now
+** able to resist to weird BUS error conditions, but donnot
+** ask me for any guarantee that it will never fail. :-)
+** Use at your own decision and risk.
+**
+**============================================================
+*/
+
+void ncr_exception (ncb_p np)
+{
+ u_char istat, istatc;
+ u_char dstat;
+ u_short sist;
+ int i;
+
+ /*
+ ** interrupt on the fly ?
+ **
+ ** A `dummy read' is needed to ensure that the
+ ** clear of the INTF flag reaches the device
+ ** before the scanning of the DONE queue.
+ */
+ istat = INB (nc_istat);
+ if (istat & INTF) {
+ OUTB (nc_istat, (istat & SIGP) | INTF | np->istat_sem);
+ istat = INB (nc_istat); /* DUMMY READ */
+ if (DEBUG_FLAGS & DEBUG_TINY) printk ("F ");
+ (void)ncr_wakeup_done (np);
+ };
+
+ if (!(istat & (SIP|DIP)))
+ return;
+
+#if 0 /* We should never get this one */
+ if (istat & CABRT)
+ OUTB (nc_istat, CABRT);
+#endif
+
+ /*
+ ** Steinbach's Guideline for Systems Programming:
+ ** Never test for an error condition you don't know how to handle.
+ */
+
+ /*========================================================
+ ** PAR and MA interrupts may occur at the same time,
+ ** and we need to know of both in order to handle
+ ** this situation properly. We try to unstack SCSI
+ ** interrupts for that reason. BTW, I dislike a LOT
+ ** such a loop inside the interrupt routine.
+ ** Even if DMA interrupt stacking is very unlikely to
+ ** happen, we also try unstacking these ones, since
+ ** this has no performance impact.
+ **=========================================================
+ */
+ sist = 0;
+ dstat = 0;
+ istatc = istat;
+ do {
+ if (istatc & SIP)
+ sist |= INW (nc_sist);
+ if (istatc & DIP)
+ dstat |= INB (nc_dstat);
+ istatc = INB (nc_istat);
+ istat |= istatc;
+ } while (istatc & (SIP|DIP));
+
+ if (DEBUG_FLAGS & DEBUG_TINY)
+ printk ("<%d|%x:%x|%x:%x>",
+ (int)INB(nc_scr0),
+ dstat,sist,
+ (unsigned)INL(nc_dsp),
+ (unsigned)INL(nc_dbc));
+
+ /*
+ ** On paper, a memory barrier may be needed here.
+ ** And since we are paranoid ... :)
+ */
+ MEMORY_BARRIER();
+
+ /*========================================================
+ ** First, interrupts we want to service cleanly.
+ **
+ ** Phase mismatch (MA) is the most frequent interrupt
+ ** for chip earlier than the 896 and so we have to service
+ ** it as quickly as possible.
+ ** A SCSI parity error (PAR) may be combined with a phase
+ ** mismatch condition (MA).
+ ** Programmed interrupts (SIR) are used to call the C code
+ ** from SCRIPTS.
+ ** The single step interrupt (SSI) is not used in this
+ ** driver.
+ **=========================================================
+ */
+
+ if (!(sist & (STO|GEN|HTH|SGE|UDC|SBMC|RST)) &&
+ !(dstat & (MDPE|BF|ABRT|IID))) {
+ if (sist & PAR) ncr_int_par (np, sist);
+ else if (sist & MA) ncr_int_ma (np);
+ else if (dstat & SIR) ncr_int_sir (np);
+ else if (dstat & SSI) OUTONB_STD ();
+ else goto unknown_int;
+ return;
+ };
+
+ /*========================================================
+ ** Now, interrupts that donnot happen in normal
+ ** situations and that we may need to recover from.
+ **
+ ** On SCSI RESET (RST), we reset everything.
+ ** On SCSI BUS MODE CHANGE (SBMC), we complete all
+ ** active CCBs with RESET status, prepare all devices
+ ** for negotiating again and restart the SCRIPTS.
+ ** On STO and UDC, we complete the CCB with the corres-
+ ** ponding status and restart the SCRIPTS.
+ **=========================================================
+ */
+
+ if (sist & RST) {
+ ncr_init (np, 1, bootverbose ? "scsi reset" : NULL, HS_RESET);
+ return;
+ };
+
+ OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */
+ OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */
+
+ if (!(sist & (GEN|HTH|SGE)) &&
+ !(dstat & (MDPE|BF|ABRT|IID))) {
+ if (sist & SBMC) ncr_int_sbmc (np);
+ else if (sist & STO) ncr_int_sto (np);
+ else if (sist & UDC) ncr_int_udc (np);
+ else goto unknown_int;
+ return;
+ };
+
+ /*=========================================================
+ ** Now, interrupts we are not able to recover cleanly.
+ **
+ ** Do the register dump.
+ ** Log message for hard errors.
+ ** Reset everything.
+ **=========================================================
+ */
+ if (ktime_exp(np->regtime)) {
+ np->regtime = ktime_get(10*HZ);
+ for (i = 0; i<sizeof(np->regdump); i++)
+ ((char*)&np->regdump)[i] = INB_OFF(i);
+ np->regdump.nc_dstat = dstat;
+ np->regdump.nc_sist = sist;
+ };
+
+ ncr_log_hard_error(np, sist, dstat);
+
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)) {
+ u_char ctest4_o, ctest4_m;
+ u_char shadow;
+
+ /*
+ * Get shadow register data
+ * Write 1 to ctest4
+ */
+ ctest4_o = INB(nc_ctest4);
+
+ OUTB(nc_ctest4, ctest4_o | 0x10);
+
+ ctest4_m = INB(nc_ctest4);
+ shadow = INW_OFF(0x42);
+
+ OUTB(nc_ctest4, ctest4_o);
+
+ printk("%s: ctest4/sist original 0x%x/0x%X mod: 0x%X/0x%x\n",
+ ncr_name(np), ctest4_o, sist, ctest4_m, shadow);
+ }
+
+ if ((sist & (GEN|HTH|SGE)) ||
+ (dstat & (MDPE|BF|ABRT|IID))) {
+ ncr_start_reset(np);
+ return;
+ };
+
+unknown_int:
+ /*=========================================================
+ ** We just miss the cause of the interrupt. :(
+ ** Print a message. The timeout will do the real work.
+ **=========================================================
+ */
+ printk( "%s: unknown interrupt(s) ignored, "
+ "ISTAT=0x%x DSTAT=0x%x SIST=0x%x\n",
+ ncr_name(np), istat, dstat, sist);
+}
+
+
+/*==========================================================
+**
+** generic recovery from scsi interrupt
+**
+**==========================================================
+**
+** The doc says that when the chip gets an SCSI interrupt,
+** it tries to stop in an orderly fashion, by completing
+** an instruction fetch that had started or by flushing
+** the DMA fifo for a write to memory that was executing.
+** Such a fashion is not enough to know if the instruction
+** that was just before the current DSP value has been
+** executed or not.
+**
+** There are 3 small SCRIPTS sections that deal with the
+** start queue and the done queue that may break any
+** assomption from the C code if we are interrupted
+** inside, so we reset if it happens. Btw, since these
+** SCRIPTS sections are executed while the SCRIPTS hasn't
+** started SCSI operations, it is very unlikely to happen.
+**
+** All the driver data structures are supposed to be
+** allocated from the same 4 GB memory window, so there
+** is a 1 to 1 relationship between DSA and driver data
+** structures. Since we are careful :) to invalidate the
+** DSA when we complete a command or when the SCRIPTS
+** pushes a DSA into a queue, we can trust it when it
+** points to a CCB.
+**
+**----------------------------------------------------------
+*/
+static void ncr_recover_scsi_int (ncb_p np, u_char hsts)
+{
+ u_int32 dsp = INL (nc_dsp);
+ u_int32 dsa = INL (nc_dsa);
+ ccb_p cp = ncr_ccb_from_dsa(np, dsa);
+
+ /*
+ ** If we haven't been interrupted inside the SCRIPTS
+ ** critical pathes, we can safely restart the SCRIPTS
+ ** and trust the DSA value if it matches a CCB.
+ */
+ if ((!(dsp > NCB_SCRIPT_PHYS (np, getjob_begin) &&
+ dsp < NCB_SCRIPT_PHYS (np, getjob_end) + 1)) &&
+ (!(dsp > NCB_SCRIPT_PHYS (np, ungetjob) &&
+ dsp < NCB_SCRIPT_PHYS (np, reselect) + 1)) &&
+ (!(dsp > NCB_SCRIPTH_PHYS (np, sel_for_abort) &&
+ dsp < NCB_SCRIPTH_PHYS (np, sel_for_abort_1) + 1)) &&
+ (!(dsp > NCB_SCRIPT_PHYS (np, done) &&
+ dsp < NCB_SCRIPT_PHYS (np, done_end) + 1))) {
+ if (cp) {
+ cp->host_status = hsts;
+ ncr_complete (np, cp);
+ }
+ OUTL (nc_dsa, DSA_INVALID);
+ OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */
+ OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, start));
+ }
+ else
+ goto reset_all;
+
+ return;
+
+reset_all:
+ ncr_start_reset(np);
+}
+
+/*==========================================================
+**
+** ncr chip exception handler for selection timeout
+**
+**==========================================================
+**
+** There seems to be a bug in the 53c810.
+** Although a STO-Interrupt is pending,
+** it continues executing script commands.
+** But it will fail and interrupt (IID) on
+** the next instruction where it's looking
+** for a valid phase.
+**
+**----------------------------------------------------------
+*/
+
+void ncr_int_sto (ncb_p np)
+{
+ u_int32 dsp = INL (nc_dsp);
+
+ if (DEBUG_FLAGS & DEBUG_TINY) printk ("T");
+
+ if (dsp == NCB_SCRIPT_PHYS (np, wf_sel_done) + 8 ||
+ !(driver_setup.recovery & 1))
+ ncr_recover_scsi_int(np, HS_SEL_TIMEOUT);
+ else
+ ncr_start_reset(np);
+}
+
+/*==========================================================
+**
+** ncr chip exception handler for unexpected disconnect
+**
+**==========================================================
+**
+**----------------------------------------------------------
+*/
+void ncr_int_udc (ncb_p np)
+{
+ u_int32 dsa = INL (nc_dsa);
+ ccb_p cp = ncr_ccb_from_dsa(np, dsa);
+ tcb_p tp = &np->target[cp->target];
+
+ /*
+ * Fix Up. Some disks respond to a PPR negotation with
+ * a bus free instead of a message reject.
+ * Disable ppr negotiation if this is first time
+ * tried ppr negotiation.
+ */
+
+ if (tp->ppr_negotiation == 1)
+ tp->ppr_negotiation = 0;
+
+ printk ("%s: unexpected disconnect\n", ncr_name(np));
+ ncr_recover_scsi_int(np, HS_UNEXPECTED);
+}
+
+/*==========================================================
+**
+** ncr chip exception handler for SCSI bus mode change
+**
+**==========================================================
+**
+** spi2-r12 11.2.3 says a transceiver mode change must
+** generate a reset event and a device that detects a reset
+** event shall initiate a hard reset. It says also that a
+** device that detects a mode change shall set data transfer
+** mode to eight bit asynchronous, etc...
+** So, just resetting should be enough.
+**
+**
+**----------------------------------------------------------
+*/
+
+static void ncr_int_sbmc (ncb_p np)
+{
+ u_char scsi_mode = INB (nc_stest4) & SMODE;
+
+ printk("%s: SCSI bus mode change from %x to %x.\n",
+ ncr_name(np), np->scsi_mode, scsi_mode);
+
+ np->scsi_mode = scsi_mode;
+
+
+ /*
+ ** Suspend command processing for 1 second and
+ ** reinitialize all except the chip.
+ */
+ np->settle_time = ktime_get(1*HZ);
+ ncr_init (np, 0, bootverbose ? "scsi mode change" : NULL, HS_RESET);
+}
+
+/*==========================================================
+**
+** ncr chip exception handler for SCSI parity error.
+**
+**==========================================================
+**
+** When the chip detects a SCSI parity error and is
+** currently executing a (CH)MOV instruction, it does
+** not interrupt immediately, but tries to finish the
+** transfer of the current scatter entry before
+** interrupting. The following situations may occur:
+**
+** - The complete scatter entry has been transferred
+** without the device having changed phase.
+** The chip will then interrupt with the DSP pointing
+** to the instruction that follows the MOV.
+**
+** - A phase mismatch occurs before the MOV finished
+** and phase errors are to be handled by the C code.
+** The chip will then interrupt with both PAR and MA
+** conditions set.
+**
+** - A phase mismatch occurs before the MOV finished and
+** phase errors are to be handled by SCRIPTS (895A or 896).
+** The chip will load the DSP with the phase mismatch
+** JUMP address and interrupt the host processor.
+**
+**----------------------------------------------------------
+*/
+
+static void ncr_int_par (ncb_p np, u_short sist)
+{
+ u_char hsts = INB (HS_PRT);
+ u_int32 dsp = INL (nc_dsp);
+ u_int32 dbc = INL (nc_dbc);
+ u_int32 dsa = INL (nc_dsa);
+ u_char sbcl = INB (nc_sbcl);
+ u_char cmd = dbc >> 24;
+ int phase = cmd & 7;
+ ccb_p cp = ncr_ccb_from_dsa(np, dsa);
+
+ printk("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n",
+ ncr_name(np), hsts, dbc, sbcl);
+
+ /*
+ ** Check that the chip is connected to the SCSI BUS.
+ */
+ if (!(INB (nc_scntl1) & ISCON)) {
+ if (!(driver_setup.recovery & 1)) {
+ ncr_recover_scsi_int(np, HS_FAIL);
+ return;
+ }
+ goto reset_all;
+ }
+
+ /*
+ ** If the nexus is not clearly identified, reset the bus.
+ ** We will try to do better later.
+ */
+ if (!cp)
+ goto reset_all;
+
+ /*
+ ** Check instruction was a MOV, direction was INPUT and
+ ** ATN is asserted.
+ */
+ if ((cmd & 0xc0) || !(phase & 1) || !(sbcl & 0x8))
+ goto reset_all;
+
+ /*
+ ** Keep track of the parity error.
+ */
+ OUTONB (HF_PRT, HF_EXT_ERR);
+ cp->xerr_status |= XE_PARITY_ERR;
+
+ /*
+ ** Prepare the message to send to the device.
+ */
+ np->msgout[0] = (phase == 7) ? M_PARITY : M_ID_ERROR;
+
+#ifdef SCSI_NCR_INTEGRITY_CHECKING
+ /*
+ ** Save error message. For integrity check use only.
+ */
+ if (np->check_integrity)
+ np->check_integ_par = np->msgout[0];
+#endif
+
+ /*
+ ** If the old phase was DATA IN or DT DATA IN phase,
+ ** we have to deal with the 3 situations described above.
+ ** For other input phases (MSG IN and STATUS), the device
+ ** must resend the whole thing that failed parity checking
+ ** or signal error. So, jumping to dispatcher should be OK.
+ */
+ if ((phase == 1) || (phase == 5)) {
+ /* Phase mismatch handled by SCRIPTS */
+ if (dsp == NCB_SCRIPTH_PHYS (np, pm_handle))
+ OUTL_DSP (dsp);
+ /* Phase mismatch handled by the C code */
+ else if (sist & MA)
+ ncr_int_ma (np);
+ /* No phase mismatch occurred */
+ else {
+ OUTL (nc_temp, dsp);
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, dispatch));
+ }
+ }
+ else
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack));
+ return;
+
+reset_all:
+ ncr_start_reset(np);
+ return;
+}
+
+/*==========================================================
+**
+**
+** ncr chip exception handler for phase errors.
+**
+**
+**==========================================================
+**
+** We have to construct a new transfer descriptor,
+** to transfer the rest of the current block.
+**
+**----------------------------------------------------------
+*/
+
+static void ncr_int_ma (ncb_p np)
+{
+ u_int32 dbc;
+ u_int32 rest;
+ u_int32 dsp;
+ u_int32 dsa;
+ u_int32 nxtdsp;
+ u_int32 *vdsp;
+ u_int32 oadr, olen;
+ u_int32 *tblp;
+ u_int32 newcmd;
+ u_int delta;
+ u_char cmd;
+ u_char hflags, hflags0;
+ struct pm_ctx *pm;
+ ccb_p cp;
+
+ dsp = INL (nc_dsp);
+ dbc = INL (nc_dbc);
+ dsa = INL (nc_dsa);
+
+ cmd = dbc >> 24;
+ rest = dbc & 0xffffff;
+ delta = 0;
+
+ /*
+ ** locate matching cp.
+ */
+ cp = ncr_ccb_from_dsa(np, dsa);
+
+ if (DEBUG_FLAGS & DEBUG_PHASE)
+ printk("CCB = %2x %2x %2x %2x %2x %2x\n",
+ cp->cmd->cmnd[0], cp->cmd->cmnd[1], cp->cmd->cmnd[2],
+ cp->cmd->cmnd[3], cp->cmd->cmnd[4], cp->cmd->cmnd[5]);
+
+ /*
+ ** Donnot take into account dma fifo and various buffers in
+ ** INPUT phase since the chip flushes everything before
+ ** raising the MA interrupt for interrupted INPUT phases.
+ ** For DATA IN phase, we will check for the SWIDE later.
+ */
+ if ((cmd & 7) != 1 && (cmd & 7) != 5) {
+ u_int32 dfifo;
+ u_char ss0, ss2;
+
+ /*
+ ** If C1010, DFBC contains number of bytes in DMA fifo.
+ ** else read DFIFO, CTEST[4-6] using 1 PCI bus ownership.
+ */
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66))
+ delta = INL(nc_dfbc) & 0xffff;
+ else {
+ dfifo = INL(nc_dfifo);
+
+ /*
+ ** Calculate remaining bytes in DMA fifo.
+ ** C1010 - always large fifo, value in dfbc
+ ** Otherwise, (CTEST5 = dfifo >> 16)
+ */
+ if (dfifo & (DFS << 16))
+ delta = ((((dfifo >> 8) & 0x300) |
+ (dfifo & 0xff)) - rest) & 0x3ff;
+ else
+ delta = ((dfifo & 0xff) - rest) & 0x7f;
+
+ /*
+ ** The data in the dma fifo has not been
+ ** transferred to the target -> add the amount
+ ** to the rest and clear the data.
+ ** Check the sstat2 register in case of wide
+ ** transfer.
+ */
+
+ }
+
+ rest += delta;
+ ss0 = INB (nc_sstat0);
+ if (ss0 & OLF) rest++;
+ if ((np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66) && (ss0 & ORF))
+ rest++;
+ if (cp && (cp->phys.select.sel_scntl3 & EWS)) {
+ ss2 = INB (nc_sstat2);
+ if (ss2 & OLF1) rest++;
+ if ((np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66) && (ss2 & ORF))
+ rest++;
+ };
+
+ /*
+ ** Clear fifos.
+ */
+ OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* dma fifo */
+ OUTB (nc_stest3, TE|CSF); /* scsi fifo */
+ }
+
+ /*
+ ** log the information
+ */
+
+ if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE))
+ printk ("P%x%x RL=%d D=%d ", cmd&7, INB(nc_sbcl)&7,
+ (unsigned) rest, (unsigned) delta);
+
+ /*
+ ** try to find the interrupted script command,
+ ** and the address at which to continue.
+ */
+ vdsp = 0;
+ nxtdsp = 0;
+ if (dsp > np->p_script &&
+ dsp <= np->p_script + sizeof(struct script)) {
+ vdsp = (u_int32 *)((char*)np->script0 + (dsp-np->p_script-8));
+ nxtdsp = dsp;
+ }
+ else if (dsp > np->p_scripth &&
+ dsp <= np->p_scripth + sizeof(struct scripth)) {
+ vdsp = (u_int32 *)((char*)np->scripth0 + (dsp-np->p_scripth-8));
+ nxtdsp = dsp;
+ }
+
+ /*
+ ** log the information
+ */
+ if (DEBUG_FLAGS & DEBUG_PHASE) {
+ printk ("\nCP=%p DSP=%x NXT=%x VDSP=%p CMD=%x ",
+ cp, (unsigned)dsp, (unsigned)nxtdsp, vdsp, cmd);
+ };
+
+ if (!vdsp) {
+ printk ("%s: interrupted SCRIPT address not found.\n",
+ ncr_name (np));
+ goto reset_all;
+ }
+
+ if (!cp) {
+ printk ("%s: SCSI phase error fixup: CCB already dequeued.\n",
+ ncr_name (np));
+ goto reset_all;
+ }
+
+ /*
+ ** get old startaddress and old length.
+ */
+
+ oadr = scr_to_cpu(vdsp[1]);
+
+ if (cmd & 0x10) { /* Table indirect */
+ tblp = (u_int32 *) ((char*) &cp->phys + oadr);
+ olen = scr_to_cpu(tblp[0]);
+ oadr = scr_to_cpu(tblp[1]);
+ } else {
+ tblp = (u_int32 *) 0;
+ olen = scr_to_cpu(vdsp[0]) & 0xffffff;
+ };
+
+ if (DEBUG_FLAGS & DEBUG_PHASE) {
+ printk ("OCMD=%x\nTBLP=%p OLEN=%x OADR=%x\n",
+ (unsigned) (scr_to_cpu(vdsp[0]) >> 24),
+ tblp,
+ (unsigned) olen,
+ (unsigned) oadr);
+ };
+
+ /*
+ ** check cmd against assumed interrupted script command.
+ ** If dt data phase, the MOVE instruction hasn't bit 4 of
+ ** the phase.
+ */
+
+ if (((cmd & 2) ? cmd : (cmd & ~4)) != (scr_to_cpu(vdsp[0]) >> 24)) {
+ PRINT_ADDR(cp->cmd);
+ printk ("internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n",
+ (unsigned)cmd, (unsigned)scr_to_cpu(vdsp[0]) >> 24);
+
+ goto reset_all;
+ };
+
+ /*
+ ** if old phase not dataphase, leave here.
+ ** C/D line is low if data.
+ */
+
+ if (cmd & 0x02) {
+ PRINT_ADDR(cp->cmd);
+ printk ("phase change %x-%x %d@%08x resid=%d.\n",
+ cmd&7, INB(nc_sbcl)&7, (unsigned)olen,
+ (unsigned)oadr, (unsigned)rest);
+ goto unexpected_phase;
+ };
+
+ /*
+ ** Choose the correct PM save area.
+ **
+ ** Look at the PM_SAVE SCRIPT if you want to understand
+ ** this stuff. The equivalent code is implemented in
+ ** SCRIPTS for the 895A and 896 that are able to handle
+ ** PM from the SCRIPTS processor.
+ */
+
+ hflags0 = INB (HF_PRT);
+ hflags = hflags0;
+
+ if (hflags & (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED)) {
+ if (hflags & HF_IN_PM0)
+ nxtdsp = scr_to_cpu(cp->phys.pm0.ret);
+ else if (hflags & HF_IN_PM1)
+ nxtdsp = scr_to_cpu(cp->phys.pm1.ret);
+
+ if (hflags & HF_DP_SAVED)
+ hflags ^= HF_ACT_PM;
+ }
+
+ if (!(hflags & HF_ACT_PM)) {
+ pm = &cp->phys.pm0;
+ newcmd = NCB_SCRIPT_PHYS(np, pm0_data);
+ }
+ else {
+ pm = &cp->phys.pm1;
+ newcmd = NCB_SCRIPT_PHYS(np, pm1_data);
+ }
+
+ hflags &= ~(HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED);
+ if (hflags != hflags0)
+ OUTB (HF_PRT, hflags);
+
+ /*
+ ** fillin the phase mismatch context
+ */
+
+ pm->sg.addr = cpu_to_scr(oadr + olen - rest);
+ pm->sg.size = cpu_to_scr(rest);
+ pm->ret = cpu_to_scr(nxtdsp);
+
+ /*
+ ** If we have a SWIDE,
+ ** - prepare the address to write the SWIDE from SCRIPTS,
+ ** - compute the SCRIPTS address to restart from,
+ ** - move current data pointer context by one byte.
+ */
+ nxtdsp = NCB_SCRIPT_PHYS (np, dispatch);
+ if ( ((cmd & 7) == 1 || (cmd & 7) == 5)
+ && cp && (cp->phys.select.sel_scntl3 & EWS) &&
+ (INB (nc_scntl2) & WSR)) {
+ u32 tmp;
+
+#ifdef SYM_DEBUG_PM_WITH_WSR
+ PRINT_ADDR(cp);
+ printf ("MA interrupt with WSR set - "
+ "pm->sg.addr=%x - pm->sg.size=%d\n",
+ pm->sg.addr, pm->sg.size);
+#endif
+ /*
+ * Set up the table indirect for the MOVE
+ * of the residual byte and adjust the data
+ * pointer context.
+ */
+ tmp = scr_to_cpu(pm->sg.addr);
+ cp->phys.wresid.addr = cpu_to_scr(tmp);
+ pm->sg.addr = cpu_to_scr(tmp + 1);
+ tmp = scr_to_cpu(pm->sg.size);
+ cp->phys.wresid.size = cpu_to_scr((tmp&0xff000000) | 1);
+ pm->sg.size = cpu_to_scr(tmp - 1);
+
+ /*
+ * If only the residual byte is to be moved,
+ * no PM context is needed.
+ */
+ if ((tmp&0xffffff) == 1)
+ newcmd = pm->ret;
+
+ /*
+ * Prepare the address of SCRIPTS that will
+ * move the residual byte to memory.
+ */
+ nxtdsp = NCB_SCRIPTH_PHYS (np, wsr_ma_helper);
+ }
+
+ if (DEBUG_FLAGS & DEBUG_PHASE) {
+ PRINT_ADDR(cp->cmd);
+ printk ("PM %x %x %x / %x %x %x.\n",
+ hflags0, hflags, newcmd,
+ (unsigned)scr_to_cpu(pm->sg.addr),
+ (unsigned)scr_to_cpu(pm->sg.size),
+ (unsigned)scr_to_cpu(pm->ret));
+ }
+
+ /*
+ ** Restart the SCRIPTS processor.
+ */
+
+ OUTL (nc_temp, newcmd);
+ OUTL_DSP (nxtdsp);
+ return;
+
+ /*
+ ** Unexpected phase changes that occurs when the current phase
+ ** is not a DATA IN or DATA OUT phase are due to error conditions.
+ ** Such event may only happen when the SCRIPTS is using a
+ ** multibyte SCSI MOVE.
+ **
+ ** Phase change Some possible cause
+ **
+ ** COMMAND --> MSG IN SCSI parity error detected by target.
+ ** COMMAND --> STATUS Bad command or refused by target.
+ ** MSG OUT --> MSG IN Message rejected by target.
+ ** MSG OUT --> COMMAND Bogus target that discards extended
+ ** negotiation messages.
+ **
+ ** The code below does not care of the new phase and so
+ ** trusts the target. Why to annoy it ?
+ ** If the interrupted phase is COMMAND phase, we restart at
+ ** dispatcher.
+ ** If a target does not get all the messages after selection,
+ ** the code assumes blindly that the target discards extended
+ ** messages and clears the negotiation status.
+ ** If the target does not want all our response to negotiation,
+ ** we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids
+ ** bloat for such a should_not_happen situation).
+ ** In all other situation, we reset the BUS.
+ ** Are these assumptions reasonnable ? (Wait and see ...)
+ */
+unexpected_phase:
+ dsp -= 8;
+ nxtdsp = 0;
+
+ switch (cmd & 7) {
+ case 2: /* COMMAND phase */
+ nxtdsp = NCB_SCRIPT_PHYS (np, dispatch);
+ break;
+#if 0
+ case 3: /* STATUS phase */
+ nxtdsp = NCB_SCRIPT_PHYS (np, dispatch);
+ break;
+#endif
+ case 6: /* MSG OUT phase */
+ /*
+ ** If the device may want to use untagged when we want
+ ** tagged, we prepare an IDENTIFY without disc. granted,
+ ** since we will not be able to handle reselect.
+ ** Otherwise, we just don't care.
+ */
+ if (dsp == NCB_SCRIPT_PHYS (np, send_ident)) {
+ if (cp->tag != NO_TAG && olen - rest <= 3) {
+ cp->host_status = HS_BUSY;
+ np->msgout[0] = M_IDENTIFY | cp->lun;
+ nxtdsp = NCB_SCRIPTH_PHYS (np, ident_break_atn);
+ }
+ else
+ nxtdsp = NCB_SCRIPTH_PHYS (np, ident_break);
+ }
+ else if (dsp == NCB_SCRIPTH_PHYS (np, send_wdtr) ||
+ dsp == NCB_SCRIPTH_PHYS (np, send_sdtr) ||
+ dsp == NCB_SCRIPTH_PHYS (np, send_ppr)) {
+ nxtdsp = NCB_SCRIPTH_PHYS (np, nego_bad_phase);
+ }
+ break;
+#if 0
+ case 7: /* MSG IN phase */
+ nxtdsp = NCB_SCRIPT_PHYS (np, clrack);
+ break;
+#endif
+ }
+
+ if (nxtdsp) {
+ OUTL_DSP (nxtdsp);
+ return;
+ }
+
+reset_all:
+ ncr_start_reset(np);
+}
+
+/*==========================================================
+**
+** ncr chip handler for QUEUE FULL and CHECK CONDITION
+**
+**==========================================================
+**
+** On QUEUE FULL status, we set the actual tagged command
+** queue depth to the number of disconnected CCBs that is
+** hopefully a good value to avoid further QUEUE FULL.
+**
+** On CHECK CONDITION or COMMAND TERMINATED, we use the
+** CCB of the failed command for performing a REQUEST
+** SENSE SCSI command.
+**
+** We do not want to change the order commands will be
+** actually queued to the device after we received a
+** QUEUE FULL status. We also want to properly deal with
+** contingent allegiance condition. For these reasons,
+** we remove from the start queue all commands for this
+** LUN that haven't been yet queued to the device and
+** put them back in the correponding LUN queue, then
+** requeue the CCB that failed in front of the LUN queue.
+** I just hope this not to be performed too often. :)
+**
+** If we are using IMMEDIATE ARBITRATION, we clear the
+** IARB hint for every commands we encounter in order not
+** to be stuck with a won arbitration and no job to queue
+** to a device.
+**----------------------------------------------------------
+*/
+
+static void ncr_sir_to_redo(ncb_p np, int num, ccb_p cp)
+{
+ Scsi_Cmnd *cmd = cp->cmd;
+ tcb_p tp = &np->target[cp->target];
+ lcb_p lp = ncr_lp(np, tp, cp->lun);
+ ccb_p cp2;
+ int busyccbs = 1;
+ u_int32 startp;
+ u_char s_status = INB (SS_PRT);
+ int msglen;
+ int i, j;
+
+
+ /*
+ ** If the LCB is not yet available, then only
+ ** 1 IO is accepted, so we should have it.
+ */
+ if (!lp)
+ goto next;
+ /*
+ ** Remove all CCBs queued to the chip for that LUN and put
+ ** them back in the LUN CCB wait queue.
+ */
+ busyccbs = lp->queuedccbs;
+ i = (INL (nc_scratcha) - np->p_squeue) / 4;
+ j = i;
+ while (i != np->squeueput) {
+ cp2 = ncr_ccb_from_dsa(np, scr_to_cpu(np->squeue[i]));
+ assert(cp2);
+#ifdef SCSI_NCR_IARB_SUPPORT
+ /* IARB hints may not be relevant any more. Forget them. */
+ cp2->host_flags &= ~HF_HINT_IARB;
+#endif
+ if (cp2 && cp2->target == cp->target && cp2->lun == cp->lun) {
+ xpt_remque(&cp2->link_ccbq);
+ xpt_insque_head(&cp2->link_ccbq, &lp->wait_ccbq);
+ --lp->queuedccbs;
+ cp2->queued = 0;
+ }
+ else {
+ if (i != j)
+ np->squeue[j] = np->squeue[i];
+ if ((j += 2) >= MAX_START*2) j = 0;
+ }
+ if ((i += 2) >= MAX_START*2) i = 0;
+ }
+ if (i != j) /* Copy back the idle task if needed */
+ np->squeue[j] = np->squeue[i];
+ np->squeueput = j; /* Update our current start queue pointer */
+
+ /*
+ ** Requeue the interrupted CCB in front of the
+ ** LUN CCB wait queue to preserve ordering.
+ */
+ xpt_remque(&cp->link_ccbq);
+ xpt_insque_head(&cp->link_ccbq, &lp->wait_ccbq);
+ --lp->queuedccbs;
+ cp->queued = 0;
+
+next:
+
+#ifdef SCSI_NCR_IARB_SUPPORT
+ /* IARB hint may not be relevant any more. Forget it. */
+ cp->host_flags &= ~HF_HINT_IARB;
+ if (np->last_cp)
+ np->last_cp = 0;
+#endif
+
+ /*
+ ** Now we can restart the SCRIPTS processor safely.
+ */
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, start));
+
+ switch(s_status) {
+ default:
+ case S_BUSY:
+ ncr_complete(np, cp);
+ break;
+ case S_QUEUE_FULL:
+ if (!lp || !lp->queuedccbs) {
+ ncr_complete(np, cp);
+ break;
+ }
+ if (bootverbose >= 1) {
+ PRINT_ADDR(cmd);
+ printk ("QUEUE FULL! %d busy, %d disconnected CCBs\n",
+ busyccbs, lp->queuedccbs);
+ }
+ /*
+ ** Decrease number of tags to the number of
+ ** disconnected commands.
+ */
+ if (lp->queuedccbs < lp->numtags) {
+ lp->numtags = lp->queuedccbs;
+ lp->num_good = 0;
+ ncr_setup_tags (np, cp->target, cp->lun);
+ }
+ /*
+ ** Repair the offending CCB.
+ */
+ cp->phys.header.savep = cp->startp;
+ cp->phys.header.lastp = cp->lastp0;
+ cp->host_status = HS_BUSY;
+ cp->scsi_status = S_ILLEGAL;
+ cp->xerr_status = 0;
+ cp->extra_bytes = 0;
+ cp->host_flags &= (HF_PM_TO_C|HF_DATA_IN);
+
+ break;
+
+ case S_TERMINATED:
+ case S_CHECK_COND:
+ /*
+ ** If we were requesting sense, give up.
+ */
+ if (cp->host_flags & HF_AUTO_SENSE) {
+ ncr_complete(np, cp);
+ break;
+ }
+
+ /*
+ ** Save SCSI status and extended error.
+ ** Compute the data residual now.
+ */
+ cp->sv_scsi_status = cp->scsi_status;
+ cp->sv_xerr_status = cp->xerr_status;
+ cp->resid = ncr_compute_residual(np, cp);
+
+ /*
+ ** Device returned CHECK CONDITION status.
+ ** Prepare all needed data strutures for getting
+ ** sense data.
+ */
+
+ /*
+ ** identify message
+ */
+ cp->scsi_smsg2[0] = M_IDENTIFY | cp->lun;
+ msglen = 1;
+
+ /*
+ ** If we are currently using anything different from
+ ** async. 8 bit data transfers with that target,
+ ** start a negotiation, since the device may want
+ ** to report us a UNIT ATTENTION condition due to
+ ** a cause we currently ignore, and we donnot want
+ ** to be stuck with WIDE and/or SYNC data transfer.
+ **
+ ** cp->nego_status is filled by ncr_prepare_nego().
+ **
+ ** Do NOT negotiate if performing integrity check
+ ** or if integrity check has completed, all check
+ ** conditions will have been cleared.
+ */
+
+#ifdef SCSI_NCR_INTEGRITY_CHECKING
+ if (DEBUG_FLAGS & DEBUG_IC) {
+ printk("%s: ncr_sir_to_redo: ic_done %2X, in_progress %2X\n",
+ ncr_name(np), tp->ic_done, cp->cmd->ic_in_progress);
+ }
+
+ /*
+ ** If parity error during integrity check,
+ ** set the target width to narrow. Otherwise,
+ ** do not negotiate on a request sense.
+ */
+ if ( np->check_integ_par && np->check_integrity
+ && cp->cmd->ic_in_progress ) {
+ cp->nego_status = 0;
+ msglen +=
+ ncr_ic_nego (np, cp, cmd ,&cp->scsi_smsg2[msglen]);
+ }
+
+ if (!np->check_integrity ||
+ (np->check_integrity &&
+ (!cp->cmd->ic_in_progress && !tp->ic_done)) ) {
+ ncr_negotiate(np, tp);
+ cp->nego_status = 0;
+ {
+ u_char sync_offset;
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66))
+ sync_offset = tp->sval & 0x3f;
+ else
+ sync_offset = tp->sval & 0x1f;
+
+ if ((tp->wval & EWS) || sync_offset)
+ msglen +=
+ ncr_prepare_nego (np, cp, &cp->scsi_smsg2[msglen]);
+ }
+
+ }
+#else
+ ncr_negotiate(np, tp);
+ cp->nego_status = 0;
+ if ((tp->wval & EWS) || (tp->sval & 0x1f))
+ msglen +=
+ ncr_prepare_nego (np, cp, &cp->scsi_smsg2[msglen]);
+#endif /* SCSI_NCR_INTEGRITY_CHECKING */
+
+ /*
+ ** Message table indirect structure.
+ */
+ cp->phys.smsg.addr = cpu_to_scr(CCB_PHYS (cp, scsi_smsg2));
+ cp->phys.smsg.size = cpu_to_scr(msglen);
+
+ /*
+ ** sense command
+ */
+ cp->phys.cmd.addr = cpu_to_scr(CCB_PHYS (cp, sensecmd));
+ cp->phys.cmd.size = cpu_to_scr(6);
+
+ /*
+ ** patch requested size into sense command
+ */
+ cp->sensecmd[0] = 0x03;
+ cp->sensecmd[1] = cp->lun << 5;
+ cp->sensecmd[4] = sizeof(cp->sense_buf);
+
+ /*
+ ** sense data
+ */
+ bzero(cp->sense_buf, sizeof(cp->sense_buf));
+ cp->phys.sense.addr = cpu_to_scr(CCB_PHYS(cp,sense_buf[0]));
+ cp->phys.sense.size = cpu_to_scr(sizeof(cp->sense_buf));
+
+ /*
+ ** requeue the command.
+ */
+ startp = NCB_SCRIPTH_PHYS (np, sdata_in);
+
+ cp->phys.header.savep = cpu_to_scr(startp);
+ cp->phys.header.goalp = cpu_to_scr(startp + 16);
+ cp->phys.header.lastp = cpu_to_scr(startp);
+ cp->phys.header.wgoalp = cpu_to_scr(startp + 16);
+ cp->phys.header.wlastp = cpu_to_scr(startp);
+
+ cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY;
+ cp->scsi_status = S_ILLEGAL;
+ cp->host_flags = (HF_AUTO_SENSE|HF_DATA_IN);
+
+ cp->phys.header.go.start =
+ cpu_to_scr(NCB_SCRIPT_PHYS (np, select));
+
+ /*
+ ** If lp not yet allocated, requeue the command.
+ */
+ if (!lp)
+ ncr_put_start_queue(np, cp);
+ break;
+ }
+
+ /*
+ ** requeue awaiting scsi commands for this lun.
+ */
+ if (lp)
+ ncr_start_next_ccb(np, lp, 1);
+
+ return;
+}
+
+/*----------------------------------------------------------
+**
+** After a device has accepted some management message
+** as BUS DEVICE RESET, ABORT TASK, etc ..., or when
+** a device signals a UNIT ATTENTION condition, some
+** tasks are thrown away by the device. We are required
+** to reflect that on our tasks list since the device
+** will never complete these tasks.
+**
+** This function completes all disconnected CCBs for a
+** given target that matches the following criteria:
+** - lun=-1 means any logical UNIT otherwise a given one.
+** - task=-1 means any task, otherwise a given one.
+**----------------------------------------------------------
+*/
+static int ncr_clear_tasks(ncb_p np, u_char hsts,
+ int target, int lun, int task)
+{
+ int i = 0;
+ ccb_p cp;
+
+ for (cp = np->ccbc; cp; cp = cp->link_ccb) {
+ if (cp->host_status != HS_DISCONNECT)
+ continue;
+ if (cp->target != target)
+ continue;
+ if (lun != -1 && cp->lun != lun)
+ continue;
+ if (task != -1 && cp->tag != NO_TAG && cp->scsi_smsg[2] != task)
+ continue;
+ cp->host_status = hsts;
+ cp->scsi_status = S_ILLEGAL;
+ ncr_complete(np, cp);
+ ++i;
+ }
+ return i;
+}
+
+/*==========================================================
+**
+** ncr chip handler for TASKS recovery.
+**
+**==========================================================
+**
+** We cannot safely abort a command, while the SCRIPTS
+** processor is running, since we just would be in race
+** with it.
+**
+** As long as we have tasks to abort, we keep the SEM
+** bit set in the ISTAT. When this bit is set, the
+** SCRIPTS processor interrupts (SIR_SCRIPT_STOPPED)
+** each time it enters the scheduler.
+**
+** If we have to reset a target, clear tasks of a unit,
+** or to perform the abort of a disconnected job, we
+** restart the SCRIPTS for selecting the target. Once
+** selected, the SCRIPTS interrupts (SIR_TARGET_SELECTED).
+** If it loses arbitration, the SCRIPTS will interrupt again
+** the next time it will enter its scheduler, and so on ...
+**
+** On SIR_TARGET_SELECTED, we scan for the more
+** appropriate thing to do:
+**
+** - If nothing, we just sent a M_ABORT message to the
+** target to get rid of the useless SCSI bus ownership.
+** According to the specs, no tasks shall be affected.
+** - If the target is to be reset, we send it a M_RESET
+** message.
+** - If a logical UNIT is to be cleared , we send the
+** IDENTIFY(lun) + M_ABORT.
+** - If an untagged task is to be aborted, we send the
+** IDENTIFY(lun) + M_ABORT.
+** - If a tagged task is to be aborted, we send the
+** IDENTIFY(lun) + task attributes + M_ABORT_TAG.
+**
+** Once our 'kiss of death' :) message has been accepted
+** by the target, the SCRIPTS interrupts again
+** (SIR_ABORT_SENT). On this interrupt, we complete
+** all the CCBs that should have been aborted by the
+** target according to our message.
+**
+**----------------------------------------------------------
+*/
+static void ncr_sir_task_recovery(ncb_p np, int num)
+{
+ ccb_p cp;
+ tcb_p tp;
+ int target=-1, lun=-1, task;
+ int i, k;
+ u_char *p;
+
+ switch(num) {
+ /*
+ ** The SCRIPTS processor stopped before starting
+ ** the next command in order to allow us to perform
+ ** some task recovery.
+ */
+ case SIR_SCRIPT_STOPPED:
+
+ /*
+ ** Do we have any target to reset or unit to clear ?
+ */
+ for (i = 0 ; i < MAX_TARGET ; i++) {
+ tp = &np->target[i];
+ if (tp->to_reset || (tp->l0p && tp->l0p->to_clear)) {
+ target = i;
+ break;
+ }
+ if (!tp->lmp)
+ continue;
+ for (k = 1 ; k < MAX_LUN ; k++) {
+ if (tp->lmp[k] && tp->lmp[k]->to_clear) {
+ target = i;
+ break;
+ }
+ }
+ if (target != -1)
+ break;
+ }
+
+ /*
+ ** If not, look at the CCB list for any
+ ** disconnected CCB to be aborted.
+ */
+ if (target == -1) {
+ for (cp = np->ccbc; cp; cp = cp->link_ccb) {
+ if (cp->host_status != HS_DISCONNECT)
+ continue;
+ if (cp->to_abort) {
+ target = cp->target;
+ break;
+ }
+ }
+ }
+
+ /*
+ ** If some target is to be selected,
+ ** prepare and start the selection.
+ */
+ if (target != -1) {
+ tp = &np->target[target];
+ np->abrt_sel.sel_id = target;
+ np->abrt_sel.sel_scntl3 = tp->wval;
+ np->abrt_sel.sel_sxfer = tp->sval;
+ np->abrt_sel.sel_scntl4 = tp->uval;
+ OUTL(nc_dsa, np->p_ncb);
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, sel_for_abort));
+ return;
+ }
+
+ /*
+ ** Nothing is to be selected, so we donnot need
+ ** to synchronize with the SCRIPTS anymore.
+ ** Remove the SEM flag from the ISTAT.
+ */
+ np->istat_sem = 0;
+ OUTB (nc_istat, SIGP);
+
+ /*
+ ** Now look at CCBs to abort that haven't started yet.
+ ** Remove all those CCBs from the start queue and
+ ** complete them with appropriate status.
+ ** Btw, the SCRIPTS processor is still stopped, so
+ ** we are not in race.
+ */
+ for (cp = np->ccbc; cp; cp = cp->link_ccb) {
+ if (cp->host_status != HS_BUSY &&
+ cp->host_status != HS_NEGOTIATE)
+ continue;
+ if (!cp->to_abort)
+ continue;
+#ifdef SCSI_NCR_IARB_SUPPORT
+ /*
+ ** If we are using IMMEDIATE ARBITRATION, we donnot
+ ** want to cancel the last queued CCB, since the
+ ** SCRIPTS may have anticipated the selection.
+ */
+ if (cp == np->last_cp) {
+ cp->to_abort = 0;
+ continue;
+ }
+#endif
+ /*
+ ** Compute index of next position in the start
+ ** queue the SCRIPTS will schedule.
+ */
+ i = (INL (nc_scratcha) - np->p_squeue) / 4;
+
+ /*
+ ** Remove the job from the start queue.
+ */
+ k = -1;
+ while (1) {
+ if (i == np->squeueput)
+ break;
+ if (k == -1) { /* Not found yet */
+ if (cp == ncr_ccb_from_dsa(np,
+ scr_to_cpu(np->squeue[i])))
+ k = i; /* Found */
+ }
+ else {
+ /*
+ ** Once found, we have to move
+ ** back all jobs by 1 position.
+ */
+ np->squeue[k] = np->squeue[i];
+ k += 2;
+ if (k >= MAX_START*2)
+ k = 0;
+ }
+
+ i += 2;
+ if (i >= MAX_START*2)
+ i = 0;
+ }
+ if (k != -1) {
+ np->squeue[k] = np->squeue[i]; /* Idle task */
+ np->squeueput = k; /* Start queue pointer */
+ }
+ cp->host_status = HS_ABORTED;
+ cp->scsi_status = S_ILLEGAL;
+ ncr_complete(np, cp);
+ }
+ break;
+ /*
+ ** The SCRIPTS processor has selected a target
+ ** we may have some manual recovery to perform for.
+ */
+ case SIR_TARGET_SELECTED:
+ target = (INB (nc_sdid) & 0xf);
+ tp = &np->target[target];
+
+ np->abrt_tbl.addr = vtobus(np->abrt_msg);
+
+ /*
+ ** If the target is to be reset, prepare a
+ ** M_RESET message and clear the to_reset flag
+ ** since we donnot expect this operation to fail.
+ */
+ if (tp->to_reset) {
+ np->abrt_msg[0] = M_RESET;
+ np->abrt_tbl.size = 1;
+ tp->to_reset = 0;
+ break;
+ }
+
+ /*
+ ** Otherwise, look for some logical unit to be cleared.
+ */
+ if (tp->l0p && tp->l0p->to_clear)
+ lun = 0;
+ else if (tp->lmp) {
+ for (k = 1 ; k < MAX_LUN ; k++) {
+ if (tp->lmp[k] && tp->lmp[k]->to_clear) {
+ lun = k;
+ break;
+ }
+ }
+ }
+
+ /*
+ ** If a logical unit is to be cleared, prepare
+ ** an IDENTIFY(lun) + ABORT MESSAGE.
+ */
+ if (lun != -1) {
+ lcb_p lp = ncr_lp(np, tp, lun);
+ lp->to_clear = 0; /* We donnot expect to fail here */
+ np->abrt_msg[0] = M_IDENTIFY | lun;
+ np->abrt_msg[1] = M_ABORT;
+ np->abrt_tbl.size = 2;
+ break;
+ }
+
+ /*
+ ** Otherwise, look for some disconnected job to
+ ** abort for this target.
+ */
+ for (cp = np->ccbc; cp; cp = cp->link_ccb) {
+ if (cp->host_status != HS_DISCONNECT)
+ continue;
+ if (cp->target != target)
+ continue;
+ if (cp->to_abort)
+ break;
+ }
+
+ /*
+ ** If we have none, probably since the device has
+ ** completed the command before we won abitration,
+ ** send a M_ABORT message without IDENTIFY.
+ ** According to the specs, the device must just
+ ** disconnect the BUS and not abort any task.
+ */
+ if (!cp) {
+ np->abrt_msg[0] = M_ABORT;
+ np->abrt_tbl.size = 1;
+ break;
+ }
+
+ /*
+ ** We have some task to abort.
+ ** Set the IDENTIFY(lun)
+ */
+ np->abrt_msg[0] = M_IDENTIFY | cp->lun;
+
+ /*
+ ** If we want to abort an untagged command, we
+ ** will send a IDENTIFY + M_ABORT.
+ ** Otherwise (tagged command), we will send
+ ** a IDENTITFY + task attributes + ABORT TAG.
+ */
+ if (cp->tag == NO_TAG) {
+ np->abrt_msg[1] = M_ABORT;
+ np->abrt_tbl.size = 2;
+ }
+ else {
+ np->abrt_msg[1] = cp->scsi_smsg[1];
+ np->abrt_msg[2] = cp->scsi_smsg[2];
+ np->abrt_msg[3] = M_ABORT_TAG;
+ np->abrt_tbl.size = 4;
+ }
+ cp->to_abort = 0; /* We donnot expect to fail here */
+ break;
+
+ /*
+ ** The target has accepted our message and switched
+ ** to BUS FREE phase as we expected.
+ */
+ case SIR_ABORT_SENT:
+ target = (INB (nc_sdid) & 0xf);
+ tp = &np->target[target];
+
+ /*
+ ** If we didn't abort anything, leave here.
+ */
+ if (np->abrt_msg[0] == M_ABORT)
+ break;
+
+ /*
+ ** If we sent a M_RESET, then a hardware reset has
+ ** been performed by the target.
+ ** - Reset everything to async 8 bit
+ ** - Tell ourself to negotiate next time :-)
+ ** - Prepare to clear all disconnected CCBs for
+ ** this target from our task list (lun=task=-1)
+ */
+ lun = -1;
+ task = -1;
+ if (np->abrt_msg[0] == M_RESET) {
+ tp->sval = 0;
+ tp->wval = np->rv_scntl3;
+ tp->uval = np->rv_scntl4;
+ ncr_set_sync_wide_status(np, target);
+ ncr_negotiate(np, tp);
+ }
+
+ /*
+ ** Otherwise, check for the LUN and TASK(s)
+ ** concerned by the cancelation.
+ ** If it is not ABORT_TAG then it is CLEAR_QUEUE
+ ** or an ABORT message :-)
+ */
+ else {
+ lun = np->abrt_msg[0] & 0x3f;
+ if (np->abrt_msg[1] == M_ABORT_TAG)
+ task = np->abrt_msg[2];
+ }
+
+ /*
+ ** Complete all the CCBs the device should have
+ ** aborted due to our 'kiss of death' message.
+ */
+ (void) ncr_clear_tasks(np, HS_ABORTED, target, lun, task);
+ break;
+
+ /*
+ ** We have performed a auto-sense that succeeded.
+ ** If the device reports a UNIT ATTENTION condition
+ ** due to a RESET condition, we must complete all
+ ** disconnect CCBs for this unit since the device
+ ** shall have thrown them away.
+ ** Since I haven't time to guess what the specs are
+ ** expecting for other UNIT ATTENTION conditions, I
+ ** decided to only care about RESET conditions. :)
+ */
+ case SIR_AUTO_SENSE_DONE:
+ cp = ncr_ccb_from_dsa(np, INL (nc_dsa));
+ if (!cp)
+ break;
+ memcpy(cp->cmd->sense_buffer, cp->sense_buf,
+ sizeof(cp->cmd->sense_buffer));
+ p = &cp->cmd->sense_buffer[0];
+
+ if (p[0] != 0x70 || p[2] != 0x6 || p[12] != 0x29)
+ break;
+#if 0
+ (void) ncr_clear_tasks(np, HS_RESET, cp->target, cp->lun, -1);
+#endif
+ break;
+ }
+
+ /*
+ ** Print to the log the message we intend to send.
+ */
+ if (num == SIR_TARGET_SELECTED) {
+ PRINT_TARGET(np, target);
+ ncr_printl_hex("control msgout:", np->abrt_msg,
+ np->abrt_tbl.size);
+ np->abrt_tbl.size = cpu_to_scr(np->abrt_tbl.size);
+ }
+
+ /*
+ ** Let the SCRIPTS processor continue.
+ */
+ OUTONB_STD ();
+}
+
+
+/*==========================================================
+**
+** Gérard's alchemy:) that deals with with the data
+** pointer for both MDP and the residual calculation.
+**
+**==========================================================
+**
+** I didn't want to bloat the code by more than 200
+** lignes for the handling of both MDP and the residual.
+** This has been achieved by using a data pointer
+** representation consisting in an index in the data
+** array (dp_sg) and a negative offset (dp_ofs) that
+** have the following meaning:
+**
+** - dp_sg = MAX_SCATTER
+** we are at the end of the data script.
+** - dp_sg < MAX_SCATTER
+** dp_sg points to the next entry of the scatter array
+** we want to transfer.
+** - dp_ofs < 0
+** dp_ofs represents the residual of bytes of the
+** previous entry scatter entry we will send first.
+** - dp_ofs = 0
+** no residual to send first.
+**
+** The function ncr_evaluate_dp() accepts an arbitray
+** offset (basically from the MDP message) and returns
+** the corresponding values of dp_sg and dp_ofs.
+**
+**----------------------------------------------------------
+*/
+
+static int ncr_evaluate_dp(ncb_p np, ccb_p cp, u_int32 scr, int *ofs)
+{
+ u_int32 dp_scr;
+ int dp_ofs, dp_sg, dp_sgmin;
+ int tmp;
+ struct pm_ctx *pm;
+
+ /*
+ ** Compute the resulted data pointer in term of a script
+ ** address within some DATA script and a signed byte offset.
+ */
+ dp_scr = scr;
+ dp_ofs = *ofs;
+ if (dp_scr == NCB_SCRIPT_PHYS (np, pm0_data))
+ pm = &cp->phys.pm0;
+ else if (dp_scr == NCB_SCRIPT_PHYS (np, pm1_data))
+ pm = &cp->phys.pm1;
+ else
+ pm = 0;
+
+ if (pm) {
+ dp_scr = scr_to_cpu(pm->ret);
+ dp_ofs -= scr_to_cpu(pm->sg.size);
+ }
+
+ /*
+ ** Deduce the index of the sg entry.
+ ** Keep track of the index of the first valid entry.
+ ** If result is dp_sg = MAX_SCATTER, then we are at the
+ ** end of the data and vice-versa.
+ */
+ tmp = scr_to_cpu(cp->phys.header.goalp);
+ dp_sg = MAX_SCATTER;
+ if (dp_scr != tmp)
+ dp_sg -= (tmp - 8 - (int)dp_scr) / (SCR_SG_SIZE*4);
+ dp_sgmin = MAX_SCATTER - cp->segments;
+
+ /*
+ ** Move to the sg entry the data pointer belongs to.
+ **
+ ** If we are inside the data area, we expect result to be:
+ **
+ ** Either,
+ ** dp_ofs = 0 and dp_sg is the index of the sg entry
+ ** the data pointer belongs to (or the end of the data)
+ ** Or,
+ ** dp_ofs < 0 and dp_sg is the index of the sg entry
+ ** the data pointer belongs to + 1.
+ */
+ if (dp_ofs < 0) {
+ int n;
+ while (dp_sg > dp_sgmin) {
+ --dp_sg;
+ tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
+ n = dp_ofs + (tmp & 0xffffff);
+ if (n > 0) {
+ ++dp_sg;
+ break;
+ }
+ dp_ofs = n;
+ }
+ }
+ else if (dp_ofs > 0) {
+ while (dp_sg < MAX_SCATTER) {
+ tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
+ dp_ofs -= (tmp & 0xffffff);
+ ++dp_sg;
+ if (dp_ofs <= 0)
+ break;
+ }
+ }
+
+ /*
+ ** Make sure the data pointer is inside the data area.
+ ** If not, return some error.
+ */
+ if (dp_sg < dp_sgmin || (dp_sg == dp_sgmin && dp_ofs < 0))
+ goto out_err;
+ else if (dp_sg > MAX_SCATTER || (dp_sg == MAX_SCATTER && dp_ofs > 0))
+ goto out_err;
+
+ /*
+ ** Save the extreme pointer if needed.
+ */
+ if (dp_sg > cp->ext_sg ||
+ (dp_sg == cp->ext_sg && dp_ofs > cp->ext_ofs)) {
+ cp->ext_sg = dp_sg;
+ cp->ext_ofs = dp_ofs;
+ }
+
+ /*
+ ** Return data.
+ */
+ *ofs = dp_ofs;
+ return dp_sg;
+
+out_err:
+ return -1;
+}
+
+/*==========================================================
+**
+** ncr chip handler for MODIFY DATA POINTER MESSAGE
+**
+**==========================================================
+**
+** We also call this function on IGNORE WIDE RESIDUE
+** messages that do not match a SWIDE full condition.
+** Btw, we assume in that situation that such a message
+** is equivalent to a MODIFY DATA POINTER (offset=-1).
+**
+**----------------------------------------------------------
+*/
+
+static void ncr_modify_dp(ncb_p np, tcb_p tp, ccb_p cp, int ofs)
+{
+ int dp_ofs = ofs;
+ u_int32 dp_scr = INL (nc_temp);
+ u_int32 dp_ret;
+ u_int32 tmp;
+ u_char hflags;
+ int dp_sg;
+ struct pm_ctx *pm;
+
+ /*
+ ** Not supported for auto_sense;
+ */
+ if (cp->host_flags & HF_AUTO_SENSE)
+ goto out_reject;
+
+ /*
+ ** Apply our alchemy:) (see comments in ncr_evaluate_dp()),
+ ** to the resulted data pointer.
+ */
+ dp_sg = ncr_evaluate_dp(np, cp, dp_scr, &dp_ofs);
+ if (dp_sg < 0)
+ goto out_reject;
+
+ /*
+ ** And our alchemy:) allows to easily calculate the data
+ ** script address we want to return for the next data phase.
+ */
+ dp_ret = cpu_to_scr(cp->phys.header.goalp);
+ dp_ret = dp_ret - 8 - (MAX_SCATTER - dp_sg) * (SCR_SG_SIZE*4);
+
+ /*
+ ** If offset / scatter entry is zero we donnot need
+ ** a context for the new current data pointer.
+ */
+ if (dp_ofs == 0) {
+ dp_scr = dp_ret;
+ goto out_ok;
+ }
+
+ /*
+ ** Get a context for the new current data pointer.
+ */
+ hflags = INB (HF_PRT);
+
+ if (hflags & HF_DP_SAVED)
+ hflags ^= HF_ACT_PM;
+
+ if (!(hflags & HF_ACT_PM)) {
+ pm = &cp->phys.pm0;
+ dp_scr = NCB_SCRIPT_PHYS (np, pm0_data);
+ }
+ else {
+ pm = &cp->phys.pm1;
+ dp_scr = NCB_SCRIPT_PHYS (np, pm1_data);
+ }
+
+ hflags &= ~(HF_DP_SAVED);
+
+ OUTB (HF_PRT, hflags);
+
+ /*
+ ** Set up the new current data pointer.
+ ** ofs < 0 there, and for the next data phase, we
+ ** want to transfer part of the data of the sg entry
+ ** corresponding to index dp_sg-1 prior to returning
+ ** to the main data script.
+ */
+ pm->ret = cpu_to_scr(dp_ret);
+ tmp = scr_to_cpu(cp->phys.data[dp_sg-1].addr);
+ tmp += scr_to_cpu(cp->phys.data[dp_sg-1].size) + dp_ofs;
+ pm->sg.addr = cpu_to_scr(tmp);
+ pm->sg.size = cpu_to_scr(-dp_ofs);
+
+out_ok:
+ OUTL (nc_temp, dp_scr);
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack));
+ return;
+
+out_reject:
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad));
+}
+
+
+/*==========================================================
+**
+** ncr chip calculation of the data residual.
+**
+**==========================================================
+**
+** As I used to say, the requirement of data residual
+** in SCSI is broken, useless and cannot be achieved
+** without huge complexity.
+** But most OSes and even the official CAM require it.
+** When stupidity happens to be so widely spread inside
+** a community, it gets hard to convince.
+**
+** Anyway, I don't care, since I am not going to use
+** any software that considers this data residual as
+** a relevant information. :)
+**
+**----------------------------------------------------------
+*/
+
+static int ncr_compute_residual(ncb_p np, ccb_p cp)
+{
+ int dp_sg, dp_sgmin, tmp;
+ int resid=0;
+ int dp_ofs = 0;
+
+ /*
+ * Check for some data lost or just thrown away.
+ * We are not required to be quite accurate in this
+ * situation. Btw, if we are odd for output and the
+ * device claims some more data, it may well happen
+ * than our residual be zero. :-)
+ */
+ if (cp->xerr_status & (XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) {
+ if (cp->xerr_status & XE_EXTRA_DATA)
+ resid -= cp->extra_bytes;
+ if (cp->xerr_status & XE_SODL_UNRUN)
+ ++resid;
+ if (cp->xerr_status & XE_SWIDE_OVRUN)
+ --resid;
+ }
+
+
+ /*
+ ** If SCRIPTS reaches its goal point, then
+ ** there is no additionnal residual.
+ */
+ if (cp->phys.header.lastp == cp->phys.header.goalp)
+ return resid;
+
+ /*
+ ** If the last data pointer is data_io (direction
+ ** unknown), then no data transfer should have
+ ** taken place.
+ */
+ if (cp->phys.header.lastp == NCB_SCRIPTH_PHYS (np, data_io))
+ return cp->data_len;
+
+ /*
+ ** If no data transfer occurs, or if the data
+ ** pointer is weird, return full residual.
+ */
+ if (cp->startp == cp->phys.header.lastp ||
+ ncr_evaluate_dp(np, cp, scr_to_cpu(cp->phys.header.lastp),
+ &dp_ofs) < 0) {
+ return cp->data_len;
+ }
+
+ /*
+ ** We are now full comfortable in the computation
+ ** of the data residual (2's complement).
+ */
+ dp_sgmin = MAX_SCATTER - cp->segments;
+ resid = -cp->ext_ofs;
+ for (dp_sg = cp->ext_sg; dp_sg < MAX_SCATTER; ++dp_sg) {
+ tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
+ resid += (tmp & 0xffffff);
+ }
+
+ /*
+ ** Hopefully, the result is not too wrong.
+ */
+ return resid;
+}
+
+/*==========================================================
+**
+** Print out the containt of a SCSI message.
+**
+**==========================================================
+*/
+
+static int ncr_show_msg (u_char * msg)
+{
+ u_char i;
+ printk ("%x",*msg);
+ if (*msg==M_EXTENDED) {
+ for (i=1;i<8;i++) {
+ if (i-1>msg[1]) break;
+ printk ("-%x",msg[i]);
+ };
+ return (i+1);
+ } else if ((*msg & 0xf0) == 0x20) {
+ printk ("-%x",msg[1]);
+ return (2);
+ };
+ return (1);
+}
+
+static void ncr_print_msg (ccb_p cp, char *label, u_char *msg)
+{
+ if (cp)
+ PRINT_ADDR(cp->cmd);
+ if (label)
+ printk ("%s: ", label);
+
+ (void) ncr_show_msg (msg);
+ printk (".\n");
+}
+
+/*===================================================================
+**
+** Negotiation for WIDE and SYNCHRONOUS DATA TRANSFER.
+**
+**===================================================================
+**
+** Was Sie schon immer ueber transfermode negotiation wissen wollten ...
+**
+** We try to negotiate sync and wide transfer only after
+** a successfull inquire command. We look at byte 7 of the
+** inquire data to determine the capabilities of the target.
+**
+** When we try to negotiate, we append the negotiation message
+** to the identify and (maybe) simple tag message.
+** The host status field is set to HS_NEGOTIATE to mark this
+** situation.
+**
+** If the target doesn't answer this message immediately
+** (as required by the standard), the SIR_NEGO_FAILED interrupt
+** will be raised eventually.
+** The handler removes the HS_NEGOTIATE status, and sets the
+** negotiated value to the default (async / nowide).
+**
+** If we receive a matching answer immediately, we check it
+** for validity, and set the values.
+**
+** If we receive a Reject message immediately, we assume the
+** negotiation has failed, and fall back to standard values.
+**
+** If we receive a negotiation message while not in HS_NEGOTIATE
+** state, it's a target initiated negotiation. We prepare a
+** (hopefully) valid answer, set our parameters, and send back
+** this answer to the target.
+**
+** If the target doesn't fetch the answer (no message out phase),
+** we assume the negotiation has failed, and fall back to default
+** settings (SIR_NEGO_PROTO interrupt).
+**
+** When we set the values, we adjust them in all ccbs belonging
+** to this target, in the controller's register, and in the "phys"
+** field of the controller's struct ncb.
+**
+**---------------------------------------------------------------------
+*/
+
+/*==========================================================
+**
+** ncr chip handler for SYNCHRONOUS DATA TRANSFER
+** REQUEST (SDTR) message.
+**
+**==========================================================
+**
+** Read comments above.
+**
+**----------------------------------------------------------
+*/
+static void ncr_sync_nego(ncb_p np, tcb_p tp, ccb_p cp)
+{
+ u_char scntl3, scntl4;
+ u_char chg, ofs, per, fak;
+
+ /*
+ ** Synchronous request message received.
+ */
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ ncr_print_msg(cp, "sync msg in", np->msgin);
+ };
+
+ /*
+ ** get requested values.
+ */
+
+ chg = 0;
+ per = np->msgin[3];
+ ofs = np->msgin[4];
+ if (ofs==0) per=255;
+
+ /*
+ ** if target sends SDTR message,
+ ** it CAN transfer synch.
+ */
+
+ if (ofs)
+ tp->inq_byte7 |= INQ7_SYNC;
+
+ /*
+ ** check values against driver limits.
+ */
+
+ if (per < np->minsync)
+ {chg = 1; per = np->minsync;}
+ if (per < tp->minsync)
+ {chg = 1; per = tp->minsync;}
+ if (ofs > tp->maxoffs)
+ {chg = 1; ofs = tp->maxoffs;}
+
+ /*
+ ** Check against controller limits.
+ */
+ fak = 7;
+ scntl3 = 0;
+ scntl4 = 0;
+ if (ofs != 0) {
+ ncr_getsync(np, per, &fak, &scntl3);
+ if (fak > 7) {
+ chg = 1;
+ ofs = 0;
+ }
+ }
+ if (ofs == 0) {
+ fak = 7;
+ per = 0;
+ scntl3 = 0;
+ scntl4 = 0;
+ tp->minsync = 0;
+ }
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ PRINT_ADDR(cp->cmd);
+ printk ("sync: per=%d scntl3=0x%x scntl4=0x%x ofs=%d fak=%d chg=%d.\n",
+ per, scntl3, scntl4, ofs, fak, chg);
+ }
+
+ if (INB (HS_PRT) == HS_NEGOTIATE) {
+ OUTB (HS_PRT, HS_BUSY);
+ switch (cp->nego_status) {
+ case NS_SYNC:
+ /*
+ ** This was an answer message
+ */
+ if (chg) {
+ /*
+ ** Answer wasn't acceptable.
+ */
+ ncr_setsync (np, cp, 0, 0xe0, 0);
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad));
+ } else {
+ /*
+ ** Answer is ok.
+ */
+ if ((np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66))
+ ncr_setsync (np, cp, scntl3, (fak<<5)|ofs,0);
+ else
+ ncr_setsync (np, cp, scntl3, ofs, scntl4);
+
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack));
+ };
+ return;
+
+ case NS_WIDE:
+ ncr_setwide (np, cp, 0, 0);
+ break;
+ };
+ };
+
+ /*
+ ** It was a request. Set value and
+ ** prepare an answer message
+ */
+
+ if ((np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66))
+ ncr_setsync (np, cp, scntl3, (fak<<5)|ofs,0);
+ else
+ ncr_setsync (np, cp, scntl3, ofs, scntl4);
+
+ np->msgout[0] = M_EXTENDED;
+ np->msgout[1] = 3;
+ np->msgout[2] = M_X_SYNC_REQ;
+ np->msgout[3] = per;
+ np->msgout[4] = ofs;
+
+ cp->nego_status = NS_SYNC;
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ ncr_print_msg(cp, "sync msgout", np->msgout);
+ }
+
+ np->msgin [0] = M_NOOP;
+
+ if (!ofs)
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad));
+ else
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, sdtr_resp));
+}
+
+/*==========================================================
+**
+** ncr chip handler for WIDE DATA TRANSFER REQUEST
+** (WDTR) message.
+**
+**==========================================================
+**
+** Read comments above.
+**
+**----------------------------------------------------------
+*/
+static void ncr_wide_nego(ncb_p np, tcb_p tp, ccb_p cp)
+{
+ u_char chg, wide;
+
+ /*
+ ** Wide request message received.
+ */
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ ncr_print_msg(cp, "wide msgin", np->msgin);
+ };
+
+ /*
+ ** get requested values.
+ */
+
+ chg = 0;
+ wide = np->msgin[3];
+
+ /*
+ ** if target sends WDTR message,
+ ** it CAN transfer wide.
+ */
+
+ if (wide)
+ tp->inq_byte7 |= INQ7_WIDE16;
+
+ /*
+ ** check values against driver limits.
+ */
+
+ if (wide > tp->usrwide)
+ {chg = 1; wide = tp->usrwide;}
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ PRINT_ADDR(cp->cmd);
+ printk ("wide: wide=%d chg=%d.\n", wide, chg);
+ }
+
+ if (INB (HS_PRT) == HS_NEGOTIATE) {
+ OUTB (HS_PRT, HS_BUSY);
+ switch (cp->nego_status) {
+ case NS_WIDE:
+ /*
+ ** This was an answer message
+ */
+ if (chg) {
+ /*
+ ** Answer wasn't acceptable.
+ */
+ ncr_setwide (np, cp, 0, 1);
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad));
+ } else {
+ /*
+ ** Answer is ok.
+ */
+ ncr_setwide (np, cp, wide, 1);
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack));
+ };
+ return;
+
+ case NS_SYNC:
+ ncr_setsync (np, cp, 0, 0xe0, 0);
+ break;
+ };
+ };
+
+ /*
+ ** It was a request, set value and
+ ** prepare an answer message
+ */
+
+ ncr_setwide (np, cp, wide, 1);
+
+ np->msgout[0] = M_EXTENDED;
+ np->msgout[1] = 2;
+ np->msgout[2] = M_X_WIDE_REQ;
+ np->msgout[3] = wide;
+
+ np->msgin [0] = M_NOOP;
+
+ cp->nego_status = NS_WIDE;
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ ncr_print_msg(cp, "wide msgout", np->msgout);
+ }
+
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, wdtr_resp));
+}
+/*==========================================================
+**
+** ncr chip handler for PARALLEL PROTOCOL REQUEST
+** (PPR) message.
+**
+**==========================================================
+**
+** Read comments above.
+**
+**----------------------------------------------------------
+*/
+static void ncr_ppr_nego(ncb_p np, tcb_p tp, ccb_p cp)
+{
+ u_char scntl3, scntl4;
+ u_char chg, ofs, per, fak, wth, dt;
+
+ /*
+ ** PPR message received.
+ */
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ ncr_print_msg(cp, "ppr msg in", np->msgin);
+ };
+
+ /*
+ ** get requested values.
+ */
+
+ chg = 0;
+ per = np->msgin[3];
+ ofs = np->msgin[5];
+ wth = np->msgin[6];
+ dt = np->msgin[7];
+ if (ofs==0) per=255;
+
+ /*
+ ** if target sends sync (wide),
+ ** it CAN transfer synch (wide).
+ */
+
+ if (ofs)
+ tp->inq_byte7 |= INQ7_SYNC;
+
+ if (wth)
+ tp->inq_byte7 |= INQ7_WIDE16;
+
+ /*
+ ** check values against driver limits.
+ */
+
+ if (wth > tp->usrwide)
+ {chg = 1; wth = tp->usrwide;}
+ if (per < np->minsync)
+ {chg = 1; per = np->minsync;}
+ if (per < tp->minsync)
+ {chg = 1; per = tp->minsync;}
+ if (ofs > tp->maxoffs)
+ {chg = 1; ofs = tp->maxoffs;}
+
+ /*
+ ** Check against controller limits.
+ */
+ fak = 7;
+ scntl3 = 0;
+ scntl4 = 0;
+ if (ofs != 0) {
+ scntl4 = dt ? 0x80 : 0;
+ ncr_getsync(np, per, &fak, &scntl3);
+ if (fak > 7) {
+ chg = 1;
+ ofs = 0;
+ }
+ }
+ if (ofs == 0) {
+ fak = 7;
+ per = 0;
+ scntl3 = 0;
+ scntl4 = 0;
+ tp->minsync = 0;
+ }
+
+ /*
+ ** If target responds with Ultra 3 speed
+ ** but narrow or not DT, reject.
+ ** If target responds with DT request
+ ** but not Ultra3 speeds, reject message,
+ ** reset min sync for target to 0x0A and
+ ** set flags to re-negotiate.
+ */
+
+ if ((per == 0x09) && ofs && (!wth || !dt))
+ chg = 1;
+ else if (( (per > 0x09) && dt) )
+ chg = 2;
+
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ PRINT_ADDR(cp->cmd);
+ printk ("ppr: wth=%d per=%d scntl3=0x%x scntl4=0x%x ofs=%d fak=%d chg=%d.\n",
+ wth, per, scntl3, scntl4, ofs, fak, chg);
+ }
+
+ if (INB (HS_PRT) == HS_NEGOTIATE) {
+ OUTB (HS_PRT, HS_BUSY);
+ switch (cp->nego_status) {
+ case NS_PPR:
+ /*
+ ** This was an answer message
+ */
+ if (chg) {
+ /*
+ ** Answer wasn't acceptable.
+ */
+ if (chg == 2) {
+ /* Send message reject and reset flags for
+ ** host to re-negotiate with min period 0x0A.
+ */
+ tp->minsync = 0x0A;
+ tp->period = 0;
+ tp->widedone = 0;
+ }
+ ncr_setsyncwide (np, cp, 0, 0xe0, 0, 0);
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad));
+ } else {
+ /*
+ ** Answer is ok.
+ */
+
+ if ((np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66))
+ ncr_setsyncwide (np, cp, scntl3, (fak<<5)|ofs,0, wth);
+ else
+ ncr_setsyncwide (np, cp, scntl3, ofs, scntl4, wth);
+
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack));
+
+ };
+ return;
+
+ case NS_SYNC:
+ ncr_setsync (np, cp, 0, 0xe0, 0);
+ break;
+
+ case NS_WIDE:
+ ncr_setwide (np, cp, 0, 0);
+ break;
+ };
+ };
+
+ /*
+ ** It was a request. Set value and
+ ** prepare an answer message
+ **
+ ** If narrow or not DT and requesting Ultra3
+ ** slow the bus down and force ST. If not
+ ** requesting Ultra3, force ST.
+ ** Max offset is 31=0x1f if ST mode.
+ */
+
+ if ((per == 0x09) && ofs && (!wth || !dt)) {
+ per = 0x0A;
+ dt = 0;
+ ofs &= 0x1f;
+ }
+ else if ( (per > 0x09) && dt) {
+ dt = 0;
+ ofs &= 0x1f;
+ }
+
+ if ((np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66))
+ ncr_setsyncwide (np, cp, scntl3, (fak<<5)|ofs,0, wth);
+ else
+ ncr_setsyncwide (np, cp, scntl3, ofs, scntl4, wth);
+
+ np->msgout[0] = M_EXTENDED;
+ np->msgout[1] = 6;
+ np->msgout[2] = M_X_PPR_REQ;
+ np->msgout[3] = per;
+ np->msgout[4] = 0;
+ np->msgout[5] = ofs;
+ np->msgout[6] = wth;
+ np->msgout[7] = dt;
+
+ cp->nego_status = NS_PPR;
+
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ ncr_print_msg(cp, "ppr msgout", np->msgout);
+ }
+
+ np->msgin [0] = M_NOOP;
+
+ if (!ofs)
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad));
+ else
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, ppr_resp));
+}
+
+
+
+/*
+** Reset SYNC or WIDE to default settings.
+** Called when a negotiation does not succeed either
+** on rejection or on protocol error.
+*/
+static void ncr_nego_default(ncb_p np, tcb_p tp, ccb_p cp)
+{
+ /*
+ ** any error in negotiation:
+ ** fall back to default mode.
+ */
+ switch (cp->nego_status) {
+
+ case NS_SYNC:
+ ncr_setsync (np, cp, 0, 0xe0, 0);
+ break;
+
+ case NS_WIDE:
+ ncr_setwide (np, cp, 0, 0);
+ break;
+
+ case NS_PPR:
+ /*
+ * ppr_negotiation is set to 1 on the first ppr nego command.
+ * If ppr is successful, it is reset to 2.
+ * If unsuccessful it is reset to 0.
+ */
+ if (DEBUG_FLAGS & DEBUG_NEGO) {
+ tcb_p tp=&np->target[cp->target];
+ u_char factor, offset, width;
+
+ ncr_get_xfer_info ( np, tp, &factor, &offset, &width);
+
+ printk("Current factor %d offset %d width %d\n",
+ factor, offset, width);
+ }
+ if (tp->ppr_negotiation == 2)
+ ncr_setsyncwide (np, cp, 0, 0xe0, 0, 0);
+ else if (tp->ppr_negotiation == 1) {
+
+ /* First ppr command has received a M REJECT.
+ * Do not change the existing wide/sync parameter
+ * values (asyn/narrow if this as the first nego;
+ * may be different if target initiates nego.).
+ */
+ tp->ppr_negotiation = 0;
+ }
+ else
+ {
+ tp->ppr_negotiation = 0;
+ ncr_setwide (np, cp, 0, 0);
+ }
+ break;
+ };
+ np->msgin [0] = M_NOOP;
+ np->msgout[0] = M_NOOP;
+ cp->nego_status = 0;
+}
+
+/*==========================================================
+**
+** ncr chip handler for MESSAGE REJECT received for
+** a WIDE or SYNCHRONOUS negotiation.
+**
+** clear the PPR negotiation flag, all future nego.
+** will be SDTR and WDTR
+**
+**==========================================================
+**
+** Read comments above.
+**
+**----------------------------------------------------------
+*/
+static void ncr_nego_rejected(ncb_p np, tcb_p tp, ccb_p cp)
+{
+ ncr_nego_default(np, tp, cp);
+ OUTB (HS_PRT, HS_BUSY);
+}
+
+
+/*==========================================================
+**
+**
+** ncr chip exception handler for programmed interrupts.
+**
+**
+**==========================================================
+*/
+
+void ncr_int_sir (ncb_p np)
+{
+ u_char num = INB (nc_dsps);
+ u_long dsa = INL (nc_dsa);
+ ccb_p cp = ncr_ccb_from_dsa(np, dsa);
+ u_char target = INB (nc_sdid) & 0x0f;
+ tcb_p tp = &np->target[target];
+ int tmp;
+
+ if (DEBUG_FLAGS & DEBUG_TINY) printk ("I#%d", num);
+
+ switch (num) {
+ /*
+ ** See comments in the SCRIPTS code.
+ */
+#ifdef SCSI_NCR_PCIQ_SYNC_ON_INTR
+ case SIR_DUMMY_INTERRUPT:
+ goto out;
+#endif
+
+ /*
+ ** The C code is currently trying to recover from something.
+ ** Typically, user want to abort some command.
+ */
+ case SIR_SCRIPT_STOPPED:
+ case SIR_TARGET_SELECTED:
+ case SIR_ABORT_SENT:
+ case SIR_AUTO_SENSE_DONE:
+ ncr_sir_task_recovery(np, num);
+ return;
+ /*
+ ** The device didn't go to MSG OUT phase after having
+ ** been selected with ATN. We donnot want to handle
+ ** that.
+ */
+ case SIR_SEL_ATN_NO_MSG_OUT:
+ printk ("%s:%d: No MSG OUT phase after selection with ATN.\n",
+ ncr_name (np), target);
+ goto out_stuck;
+ /*
+ ** The device didn't switch to MSG IN phase after
+ ** having reseleted the initiator.
+ */
+ case SIR_RESEL_NO_MSG_IN:
+ /*
+ ** After reselection, the device sent a message that wasn't
+ ** an IDENTIFY.
+ */
+ case SIR_RESEL_NO_IDENTIFY:
+ /*
+ ** If devices reselecting without sending an IDENTIFY
+ ** message still exist, this should help.
+ ** We just assume lun=0, 1 CCB, no tag.
+ */
+ if (tp->l0p) {
+ OUTL (nc_dsa, scr_to_cpu(tp->l0p->tasktbl[0]));
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, resel_go));
+ return;
+ }
+ /*
+ ** The device reselected a LUN we donnot know of.
+ */
+ case SIR_RESEL_BAD_LUN:
+ np->msgout[0] = M_RESET;
+ goto out;
+ /*
+ ** The device reselected for an untagged nexus and we
+ ** haven't any.
+ */
+ case SIR_RESEL_BAD_I_T_L:
+ np->msgout[0] = M_ABORT;
+ goto out;
+ /*
+ ** The device reselected for a tagged nexus that we donnot
+ ** have.
+ */
+ case SIR_RESEL_BAD_I_T_L_Q:
+ np->msgout[0] = M_ABORT_TAG;
+ goto out;
+ /*
+ ** The SCRIPTS let us know that the device has grabbed
+ ** our message and will abort the job.
+ */
+ case SIR_RESEL_ABORTED:
+ np->lastmsg = np->msgout[0];
+ np->msgout[0] = M_NOOP;
+ printk ("%s:%d: message %x sent on bad reselection.\n",
+ ncr_name (np), target, np->lastmsg);
+ goto out;
+ /*
+ ** The SCRIPTS let us know that a message has been
+ ** successfully sent to the device.
+ */
+ case SIR_MSG_OUT_DONE:
+ np->lastmsg = np->msgout[0];
+ np->msgout[0] = M_NOOP;
+ /* Should we really care of that */
+ if (np->lastmsg == M_PARITY || np->lastmsg == M_ID_ERROR) {
+ if (cp) {
+ cp->xerr_status &= ~XE_PARITY_ERR;
+ if (!cp->xerr_status)
+ OUTOFFB (HF_PRT, HF_EXT_ERR);
+ }
+ }
+ goto out;
+ /*
+ ** The device didn't send a GOOD SCSI status.
+ ** We may have some work to do prior to allow
+ ** the SCRIPTS processor to continue.
+ */
+ case SIR_BAD_STATUS:
+ if (!cp)
+ goto out;
+ ncr_sir_to_redo(np, num, cp);
+ return;
+ /*
+ ** We are asked by the SCRIPTS to prepare a
+ ** REJECT message.
+ */
+ case SIR_REJECT_TO_SEND:
+ ncr_print_msg(cp, "M_REJECT to send for ", np->msgin);
+ np->msgout[0] = M_REJECT;
+ goto out;
+ /*
+ ** We have been ODD at the end of a DATA IN
+ ** transfer and the device didn't send a
+ ** IGNORE WIDE RESIDUE message.
+ ** It is a data overrun condition.
+ */
+ case SIR_SWIDE_OVERRUN:
+ if (cp) {
+ OUTONB (HF_PRT, HF_EXT_ERR);
+ cp->xerr_status |= XE_SWIDE_OVRUN;
+ }
+ goto out;
+ /*
+ ** We have been ODD at the end of a DATA OUT
+ ** transfer.
+ ** It is a data underrun condition.
+ */
+ case SIR_SODL_UNDERRUN:
+ if (cp) {
+ OUTONB (HF_PRT, HF_EXT_ERR);
+ cp->xerr_status |= XE_SODL_UNRUN;
+ }
+ goto out;
+ /*
+ ** The device wants us to tranfer more data than
+ ** expected or in the wrong direction.
+ ** The number of extra bytes is in scratcha.
+ ** It is a data overrun condition.
+ */
+ case SIR_DATA_OVERRUN:
+ if (cp) {
+ OUTONB (HF_PRT, HF_EXT_ERR);
+ cp->xerr_status |= XE_EXTRA_DATA;
+ cp->extra_bytes += INL (nc_scratcha);
+ }
+ goto out;
+ /*
+ ** The device switched to an illegal phase (4/5).
+ */
+ case SIR_BAD_PHASE:
+ if (cp) {
+ OUTONB (HF_PRT, HF_EXT_ERR);
+ cp->xerr_status |= XE_BAD_PHASE;
+ }
+ goto out;
+ /*
+ ** We received a message.
+ */
+ case SIR_MSG_RECEIVED:
+ if (!cp)
+ goto out_stuck;
+ switch (np->msgin [0]) {
+ /*
+ ** We received an extended message.
+ ** We handle MODIFY DATA POINTER, SDTR, WDTR
+ ** and reject all other extended messages.
+ */
+ case M_EXTENDED:
+ switch (np->msgin [2]) {
+ case M_X_MODIFY_DP:
+ if (DEBUG_FLAGS & DEBUG_POINTER)
+ ncr_print_msg(cp,"modify DP",np->msgin);
+ tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) +
+ (np->msgin[5]<<8) + (np->msgin[6]);
+ ncr_modify_dp(np, tp, cp, tmp);
+ return;
+ case M_X_SYNC_REQ:
+ ncr_sync_nego(np, tp, cp);
+ return;
+ case M_X_WIDE_REQ:
+ ncr_wide_nego(np, tp, cp);
+ return;
+ case M_X_PPR_REQ:
+ ncr_ppr_nego(np, tp, cp);
+ return;
+ default:
+ goto out_reject;
+ }
+ break;
+ /*
+ ** We received a 1/2 byte message not handled from SCRIPTS.
+ ** We are only expecting MESSAGE REJECT and IGNORE WIDE
+ ** RESIDUE messages that haven't been anticipated by
+ ** SCRIPTS on SWIDE full condition. Unanticipated IGNORE
+ ** WIDE RESIDUE messages are aliased as MODIFY DP (-1).
+ */
+ case M_IGN_RESIDUE:
+ if (DEBUG_FLAGS & DEBUG_POINTER)
+ ncr_print_msg(cp,"ign wide residue", np->msgin);
+ ncr_modify_dp(np, tp, cp, -1);
+ return;
+ case M_REJECT:
+ if (INB (HS_PRT) == HS_NEGOTIATE)
+ ncr_nego_rejected(np, tp, cp);
+ else {
+ PRINT_ADDR(cp->cmd);
+ printk ("M_REJECT received (%x:%x).\n",
+ scr_to_cpu(np->lastmsg), np->msgout[0]);
+ }
+ goto out_clrack;
+ break;
+ default:
+ goto out_reject;
+ }
+ break;
+ /*
+ ** We received an unknown message.
+ ** Ignore all MSG IN phases and reject it.
+ */
+ case SIR_MSG_WEIRD:
+ ncr_print_msg(cp, "WEIRD message received", np->msgin);
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_weird));
+ return;
+ /*
+ ** Negotiation failed.
+ ** Target does not send us the reply.
+ ** Remove the HS_NEGOTIATE status.
+ */
+ case SIR_NEGO_FAILED:
+ OUTB (HS_PRT, HS_BUSY);
+ /*
+ ** Negotiation failed.
+ ** Target does not want answer message.
+ */
+ case SIR_NEGO_PROTO:
+ ncr_nego_default(np, tp, cp);
+ goto out;
+ };
+
+out:
+ OUTONB_STD ();
+ return;
+out_reject:
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad));
+ return;
+out_clrack:
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack));
+ return;
+out_stuck:
+ return;
+}
+
+
+/*==========================================================
+**
+**
+** Aquire a control block
+**
+**
+**==========================================================
+*/
+
+static ccb_p ncr_get_ccb (ncb_p np, u_char tn, u_char ln)
+{
+ tcb_p tp = &np->target[tn];
+ lcb_p lp = ncr_lp(np, tp, ln);
+ u_short tag = NO_TAG;
+ XPT_QUEHEAD *qp;
+ ccb_p cp = (ccb_p) 0;
+
+ /*
+ ** Allocate a new CCB if needed.
+ */
+ if (xpt_que_empty(&np->free_ccbq))
+ (void) ncr_alloc_ccb(np);
+
+ /*
+ ** Look for a free CCB
+ */
+ qp = xpt_remque_head(&np->free_ccbq);
+ if (!qp)
+ goto out;
+ cp = xpt_que_entry(qp, struct ccb, link_ccbq);
+
+ /*
+ ** If the LCB is not yet available and we already
+ ** have queued a CCB for a LUN without LCB,
+ ** give up. Otherwise all is fine. :-)
+ */
+ if (!lp) {
+ if (xpt_que_empty(&np->b0_ccbq))
+ xpt_insque_head(&cp->link_ccbq, &np->b0_ccbq);
+ else
+ goto out_free;
+ } else {
+ /*
+ ** Tune tag mode if asked by user.
+ */
+ if (lp->queuedepth != lp->numtags) {
+ ncr_setup_tags(np, tn, ln);
+ }
+
+ /*
+ ** Get a tag for this nexus if required.
+ ** Keep from using more tags than we can handle.
+ */
+ if (lp->usetags) {
+ if (lp->busyccbs < lp->maxnxs) {
+ tag = lp->cb_tags[lp->ia_tag];
+ ++lp->ia_tag;
+ if (lp->ia_tag == MAX_TAGS)
+ lp->ia_tag = 0;
+ cp->tags_si = lp->tags_si;
+ ++lp->tags_sum[cp->tags_si];
+ }
+ else
+ goto out_free;
+ }
+
+ /*
+ ** Put the CCB in the LUN wait queue and
+ ** count it as busy.
+ */
+ xpt_insque_tail(&cp->link_ccbq, &lp->wait_ccbq);
+ ++lp->busyccbs;
+ }
+
+ /*
+ ** Remember all informations needed to free this CCB.
+ */
+ cp->to_abort = 0;
+ cp->tag = tag;
+ cp->target = tn;
+ cp->lun = ln;
+
+ if (DEBUG_FLAGS & DEBUG_TAGS) {
+ PRINT_LUN(np, tn, ln);
+ printk ("ccb @%p using tag %d.\n", cp, tag);
+ }
+
+out:
+ return cp;
+out_free:
+ xpt_insque_head(&cp->link_ccbq, &np->free_ccbq);
+ return (ccb_p) 0;
+}
+
+/*==========================================================
+**
+**
+** Release one control block
+**
+**
+**==========================================================
+*/
+
+static void ncr_free_ccb (ncb_p np, ccb_p cp)
+{
+ tcb_p tp = &np->target[cp->target];
+ lcb_p lp = ncr_lp(np, tp, cp->lun);
+
+ if (DEBUG_FLAGS & DEBUG_TAGS) {
+ PRINT_LUN(np, cp->target, cp->lun);
+ printk ("ccb @%p freeing tag %d.\n", cp, cp->tag);
+ }
+
+ /*
+ ** If lun control block available, make available
+ ** the task slot and the tag if any.
+ ** Decrement counters.
+ */
+ if (lp) {
+ if (cp->tag != NO_TAG) {
+ lp->cb_tags[lp->if_tag++] = cp->tag;
+ if (lp->if_tag == MAX_TAGS)
+ lp->if_tag = 0;
+ --lp->tags_sum[cp->tags_si];
+ lp->tasktbl[cp->tag] = cpu_to_scr(np->p_bad_i_t_l_q);
+ } else {
+ lp->tasktbl[0] = cpu_to_scr(np->p_bad_i_t_l);
+ }
+ --lp->busyccbs;
+ if (cp->queued) {
+ --lp->queuedccbs;
+ }
+ }
+
+ /*
+ ** Make this CCB available.
+ */
+ xpt_remque(&cp->link_ccbq);
+ xpt_insque_head(&cp->link_ccbq, &np->free_ccbq);
+ cp -> host_status = HS_IDLE;
+ cp -> queued = 0;
+}
+
+/*------------------------------------------------------------------------
+** Allocate a CCB and initialize its fixed part.
+**------------------------------------------------------------------------
+**------------------------------------------------------------------------
+*/
+static ccb_p ncr_alloc_ccb(ncb_p np)
+{
+ ccb_p cp = 0;
+ int hcode;
+
+ /*
+ ** Allocate memory for this CCB.
+ */
+ cp = m_calloc_dma(sizeof(struct ccb), "CCB");
+ if (!cp)
+ return 0;
+
+ /*
+ ** Count it and initialyze it.
+ */
+ np->actccbs++;
+
+ /*
+ ** Remember virtual and bus address of this ccb.
+ */
+ cp->p_ccb = vtobus(cp);
+
+ /*
+ ** Insert this ccb into the hashed list.
+ */
+ hcode = CCB_HASH_CODE(cp->p_ccb);
+ cp->link_ccbh = np->ccbh[hcode];
+ np->ccbh[hcode] = cp;
+
+ /*
+ ** Initialyze the start and restart actions.
+ */
+ cp->phys.header.go.start = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle));
+ cp->phys.header.go.restart = cpu_to_scr(NCB_SCRIPTH_PHYS(np,bad_i_t_l));
+
+ /*
+ ** Initilialyze some other fields.
+ */
+ cp->phys.smsg_ext.addr = cpu_to_scr(NCB_PHYS(np, msgin[2]));
+
+ /*
+ ** Chain into wakeup list and free ccb queue.
+ */
+ cp->link_ccb = np->ccbc;
+ np->ccbc = cp;
+
+ xpt_insque_head(&cp->link_ccbq, &np->free_ccbq);
+
+ return cp;
+}
+
+/*------------------------------------------------------------------------
+** Look up a CCB from a DSA value.
+**------------------------------------------------------------------------
+**------------------------------------------------------------------------
+*/
+static ccb_p ncr_ccb_from_dsa(ncb_p np, u_long dsa)
+{
+ int hcode;
+ ccb_p cp;
+
+ hcode = CCB_HASH_CODE(dsa);
+ cp = np->ccbh[hcode];
+ while (cp) {
+ if (cp->p_ccb == dsa)
+ break;
+ cp = cp->link_ccbh;
+ }
+
+ return cp;
+}
+
+/*==========================================================
+**
+**
+** Allocation of resources for Targets/Luns/Tags.
+**
+**
+**==========================================================
+*/
+
+
+/*------------------------------------------------------------------------
+** Target control block initialisation.
+**------------------------------------------------------------------------
+** This data structure is fully initialized after a SCSI command
+** has been successfully completed for this target.
+**------------------------------------------------------------------------
+*/
+static void ncr_init_tcb (ncb_p np, u_char tn)
+{
+ /*
+ ** Check some alignments required by the chip.
+ */
+ assert (( (offsetof(struct ncr_reg, nc_sxfer) ^
+ offsetof(struct tcb , sval )) &3) == 0);
+ assert (( (offsetof(struct ncr_reg, nc_scntl3) ^
+ offsetof(struct tcb , wval )) &3) == 0);
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)){
+ assert (( (offsetof(struct ncr_reg, nc_scntl4) ^
+ offsetof(struct tcb , uval )) &3) == 0);
+ }
+}
+
+/*------------------------------------------------------------------------
+** Lun control block allocation and initialization.
+**------------------------------------------------------------------------
+** This data structure is allocated and initialized after a SCSI
+** command has been successfully completed for this target/lun.
+**------------------------------------------------------------------------
+*/
+static lcb_p ncr_alloc_lcb (ncb_p np, u_char tn, u_char ln)
+{
+ tcb_p tp = &np->target[tn];
+ lcb_p lp = ncr_lp(np, tp, ln);
+
+ /*
+ ** Already done, return.
+ */
+ if (lp)
+ return lp;
+
+ /*
+ ** Initialize the target control block if not yet.
+ */
+ ncr_init_tcb(np, tn);
+
+ /*
+ ** Allocate the lcb bus address array.
+ ** Compute the bus address of this table.
+ */
+ if (ln && !tp->luntbl) {
+ int i;
+
+ tp->luntbl = m_calloc_dma(256, "LUNTBL");
+ if (!tp->luntbl)
+ goto fail;
+ for (i = 0 ; i < 64 ; i++)
+ tp->luntbl[i] = cpu_to_scr(NCB_PHYS(np, resel_badlun));
+ tp->b_luntbl = cpu_to_scr(vtobus(tp->luntbl));
+ }
+
+ /*
+ ** Allocate the table of pointers for LUN(s) > 0, if needed.
+ */
+ if (ln && !tp->lmp) {
+ tp->lmp = m_calloc(MAX_LUN * sizeof(lcb_p), "LMP");
+ if (!tp->lmp)
+ goto fail;
+ }
+
+ /*
+ ** Allocate the lcb.
+ ** Make it available to the chip.
+ */
+ lp = m_calloc_dma(sizeof(struct lcb), "LCB");
+ if (!lp)
+ goto fail;
+ if (ln) {
+ tp->lmp[ln] = lp;
+ tp->luntbl[ln] = cpu_to_scr(vtobus(lp));
+ }
+ else {
+ tp->l0p = lp;
+ tp->b_lun0 = cpu_to_scr(vtobus(lp));
+ }
+
+ /*
+ ** Initialize the CCB queue headers.
+ */
+ xpt_que_init(&lp->busy_ccbq);
+ xpt_que_init(&lp->wait_ccbq);
+
+ /*
+ ** Set max CCBs to 1 and use the default task array
+ ** by default.
+ */
+ lp->maxnxs = 1;
+ lp->tasktbl = &lp->tasktbl_0;
+ lp->b_tasktbl = cpu_to_scr(vtobus(lp->tasktbl));
+ lp->tasktbl[0] = cpu_to_scr(np->p_notask);
+ lp->resel_task = cpu_to_scr(NCB_SCRIPT_PHYS(np, resel_notag));
+
+ /*
+ ** Initialize command queuing control.
+ */
+ lp->busyccbs = 1;
+ lp->queuedccbs = 1;
+ lp->queuedepth = 1;
+fail:
+ return lp;
+}
+
+
+/*------------------------------------------------------------------------
+** Lun control block setup on INQUIRY data received.
+**------------------------------------------------------------------------
+** We only support WIDE, SYNC for targets and CMDQ for logical units.
+** This setup is done on each INQUIRY since we are expecting user
+** will play with CHANGE DEFINITION commands. :-)
+**------------------------------------------------------------------------
+*/
+static lcb_p ncr_setup_lcb (ncb_p np, u_char tn, u_char ln, u_char *inq_data)
+{
+ tcb_p tp = &np->target[tn];
+ lcb_p lp = ncr_lp(np, tp, ln);
+ u_char inq_byte7;
+ int i;
+
+ /*
+ ** If no lcb, try to allocate it.
+ */
+ if (!lp && !(lp = ncr_alloc_lcb(np, tn, ln)))
+ goto fail;
+
+#if 0 /* No more used. Left here as provision */
+ /*
+ ** Get device quirks.
+ */
+ tp->quirks = 0;
+ if (tp->quirks && bootverbose) {
+ PRINT_LUN(np, tn, ln);
+ printk ("quirks=%x.\n", tp->quirks);
+ }
+#endif
+
+ /*
+ ** Evaluate trustable target/unit capabilities.
+ ** We only believe device version >= SCSI-2 that
+ ** use appropriate response data format (2).
+ ** But it seems that some CCS devices also
+ ** support SYNC and I donnot want to frustrate
+ ** anybody. ;-)
+ */
+ inq_byte7 = 0;
+ if ((inq_data[2] & 0x7) >= 2 && (inq_data[3] & 0xf) == 2)
+ inq_byte7 = inq_data[7];
+ else if ((inq_data[2] & 0x7) == 1 && (inq_data[3] & 0xf) == 1)
+ inq_byte7 = INQ7_SYNC;
+
+ /*
+ ** Throw away announced LUN capabilities if we are told
+ ** that there is no real device supported by the logical unit.
+ */
+ if ((inq_data[0] & 0xe0) > 0x20 || (inq_data[0] & 0x1f) == 0x1f)
+ inq_byte7 &= (INQ7_SYNC | INQ7_WIDE16);
+
+ /*
+ ** If user is wanting SYNC, force this feature.
+ */
+ if (driver_setup.force_sync_nego)
+ inq_byte7 |= INQ7_SYNC;
+
+ /*
+ ** Prepare negotiation if SIP capabilities have changed.
+ */
+ tp->inq_done = 1;
+ if ((inq_byte7 ^ tp->inq_byte7) & (INQ7_SYNC | INQ7_WIDE16)) {
+ tp->inq_byte7 = inq_byte7;
+ ncr_negotiate(np, tp);
+ }
+
+ /*
+ ** If unit supports tagged commands, allocate and
+ ** initialyze the task table if not yet.
+ */
+ if ((inq_byte7 & INQ7_QUEUE) && lp->tasktbl == &lp->tasktbl_0) {
+ lp->tasktbl = m_calloc_dma(MAX_TASKS*4, "TASKTBL");
+ if (!lp->tasktbl) {
+ lp->tasktbl = &lp->tasktbl_0;
+ goto fail;
+ }
+ lp->b_tasktbl = cpu_to_scr(vtobus(lp->tasktbl));
+ for (i = 0 ; i < MAX_TASKS ; i++)
+ lp->tasktbl[i] = cpu_to_scr(np->p_notask);
+
+ lp->cb_tags = m_calloc(MAX_TAGS, "CB_TAGS");
+ if (!lp->cb_tags)
+ goto fail;
+ for (i = 0 ; i < MAX_TAGS ; i++)
+ lp->cb_tags[i] = i;
+
+ lp->maxnxs = MAX_TAGS;
+ lp->tags_stime = ktime_get(3*HZ);
+ }
+
+ /*
+ ** Adjust tagged queueing status if needed.
+ */
+ if ((inq_byte7 ^ lp->inq_byte7) & INQ7_QUEUE) {
+ lp->inq_byte7 = inq_byte7;
+ lp->numtags = lp->maxtags;
+ ncr_setup_tags (np, tn, ln);
+ }
+
+fail:
+ return lp;
+}
+
+/*==========================================================
+**
+**
+** Build Scatter Gather Block
+**
+**
+**==========================================================
+**
+** The transfer area may be scattered among
+** several non adjacent physical pages.
+**
+** We may use MAX_SCATTER blocks.
+**
+**----------------------------------------------------------
+*/
+
+/*
+** We try to reduce the number of interrupts caused
+** by unexpected phase changes due to disconnects.
+** A typical harddisk may disconnect before ANY block.
+** If we wanted to avoid unexpected phase changes at all
+** we had to use a break point every 512 bytes.
+** Of course the number of scatter/gather blocks is
+** limited.
+** Under Linux, the scatter/gatter blocks are provided by
+** the generic driver. We just have to copy addresses and
+** sizes to the data segment array.
+*/
+
+/*
+** For 64 bit systems, we use the 8 upper bits of the size field
+** to provide bus address bits 32-39 to the SCRIPTS processor.
+** This allows the 895A and 896 to address up to 1 TB of memory.
+** For 32 bit chips on 64 bit systems, we must be provided with
+** memory addresses that fit into the first 32 bit bus address
+** range and so, this does not matter and we expect an error from
+** the chip if this ever happen.
+**
+** We use a separate function for the case Linux does not provide
+** a scatter list in order to allow better code optimization
+** for the case we have a scatter list (BTW, for now this just wastes
+** about 40 bytes of code for x86, but my guess is that the scatter
+** code will get more complex later).
+*/
+
+#ifdef SCSI_NCR_USE_64BIT_DAC
+#define SCATTER_ONE(data, badd, len) \
+ (data)->addr = cpu_to_scr(badd); \
+ (data)->size = cpu_to_scr((((badd) >> 8) & 0xff000000) + len);
+#else
+#define SCATTER_ONE(data, badd, len) \
+ (data)->addr = cpu_to_scr(badd); \
+ (data)->size = cpu_to_scr(len);
+#endif
+
+#define CROSS_16MB(p, n) (((((u_long) p) + n - 1) ^ ((u_long) p)) & ~0xffffff)
+
+static int ncr_scatter_no_sglist(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd)
+{
+ struct scr_tblmove *data = &cp->phys.data[MAX_SCATTER-1];
+ int segment;
+
+ cp->data_len = cmd->request_bufflen;
+
+ if (cmd->request_bufflen) {
+ u_long baddr = map_scsi_single_data(np, cmd);
+
+ SCATTER_ONE(data, baddr, cmd->request_bufflen);
+ if (CROSS_16MB(baddr, cmd->request_bufflen)) {
+ cp->host_flags |= HF_PM_TO_C;
+#ifdef DEBUG_896R1
+printk("He! we are crossing a 16 MB boundary (0x%lx, 0x%x)\n",
+ baddr, cmd->request_bufflen);
+#endif
+ }
+ segment = 1;
+ }
+ else
+ segment = 0;
+
+ return segment;
+}
+
+/*
+** DEL 472 - 53C896 Rev 1 - Part Number 609-0393055 - ITEM 5.
+**
+** We disable data phase mismatch handling from SCRIPTS for data
+** transfers that contains scatter/gather entries that cross
+** a 16 MB boundary.
+** We use a different scatter function for 896 rev. 1 that needs
+** such a work-around. Doing so, we do not affect performance for
+** other chips.
+** This problem should not be triggered for disk IOs under Linux,
+** since such IOs are performed using pages and buffers that are
+** nicely power-of-two sized and aligned. But, since this may change
+** at any time, a work-around was required.
+*/
+static int ncr_scatter_896R1(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd)
+{
+ int segn;
+ int use_sg = (int) cmd->use_sg;
+
+ cp->data_len = 0;
+
+ if (!use_sg)
+ segn = ncr_scatter_no_sglist(np, cp, cmd);
+ else if (use_sg > MAX_SCATTER)
+ segn = -1;
+ else {
+ struct scatterlist *scatter = (struct scatterlist *)cmd->buffer;
+ struct scr_tblmove *data;
+
+ use_sg = map_scsi_sg_data(np, cmd);
+ data = &cp->phys.data[MAX_SCATTER - use_sg];
+
+ for (segn = 0; segn < use_sg; segn++) {
+ u_long baddr = scsi_sg_dma_address(&scatter[segn]);
+ unsigned int len = scsi_sg_dma_len(&scatter[segn]);
+
+ SCATTER_ONE(&data[segn],
+ baddr,
+ len);
+ if (CROSS_16MB(baddr, scatter[segn].length)) {
+ cp->host_flags |= HF_PM_TO_C;
+#ifdef DEBUG_896R1
+printk("He! we are crossing a 16 MB boundary (0x%lx, 0x%x)\n",
+ baddr, scatter[segn].length);
+#endif
+ }
+ cp->data_len += len;
+ }
+ }
+
+ return segn;
+}
+
+static int ncr_scatter(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd)
+{
+ int segment;
+ int use_sg = (int) cmd->use_sg;
+
+ cp->data_len = 0;
+
+ if (!use_sg)
+ segment = ncr_scatter_no_sglist(np, cp, cmd);
+ else if (use_sg > MAX_SCATTER)
+ segment = -1;
+ else {
+ struct scatterlist *scatter = (struct scatterlist *)cmd->buffer;
+ struct scr_tblmove *data;
+
+ use_sg = map_scsi_sg_data(np, cmd);
+ data = &cp->phys.data[MAX_SCATTER - use_sg];
+
+ for (segment = 0; segment < use_sg; segment++) {
+ u_long baddr = scsi_sg_dma_address(&scatter[segment]);
+ unsigned int len = scsi_sg_dma_len(&scatter[segment]);
+
+ SCATTER_ONE(&data[segment],
+ baddr,
+ len);
+ cp->data_len += len;
+ }
+ }
+
+ return segment;
+}
+
+/*==========================================================
+**
+**
+** Test the pci bus snoop logic :-(
+**
+** Has to be called with interrupts disabled.
+**
+**
+**==========================================================
+*/
+
+#ifndef SCSI_NCR_IOMAPPED
+static int __init ncr_regtest (struct ncb* np)
+{
+ register volatile u_int32 data;
+ /*
+ ** ncr registers may NOT be cached.
+ ** write 0xffffffff to a read only register area,
+ ** and try to read it back.
+ */
+ data = 0xffffffff;
+ OUTL_OFF(offsetof(struct ncr_reg, nc_dstat), data);
+ data = INL_OFF(offsetof(struct ncr_reg, nc_dstat));
+#if 1
+ if (data == 0xffffffff) {
+#else
+ if ((data & 0xe2f0fffd) != 0x02000080) {
+#endif
+ printk ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n",
+ (unsigned) data);
+ return (0x10);
+ };
+ return (0);
+}
+#endif
+
+static int __init ncr_snooptest (struct ncb* np)
+{
+ u_int32 ncr_rd, ncr_wr, ncr_bk, host_rd, host_wr, pc;
+ int i, err=0;
+#ifndef SCSI_NCR_IOMAPPED
+ if (np->reg) {
+ err |= ncr_regtest (np);
+ if (err) return (err);
+ }
+#endif
+ /*
+ ** init
+ */
+ pc = NCB_SCRIPTH0_PHYS (np, snooptest);
+ host_wr = 1;
+ ncr_wr = 2;
+ /*
+ ** Set memory and register.
+ */
+ np->ncr_cache = cpu_to_scr(host_wr);
+ OUTL (nc_temp, ncr_wr);
+ /*
+ ** Start script (exchange values)
+ */
+ OUTL (nc_dsa, np->p_ncb);
+ OUTL_DSP (pc);
+ /*
+ ** Wait 'til done (with timeout)
+ */
+ for (i=0; i<NCR_SNOOP_TIMEOUT; i++)
+ if (INB(nc_istat) & (INTF|SIP|DIP))
+ break;
+ /*
+ ** Save termination position.
+ */
+ pc = INL (nc_dsp);
+ /*
+ ** Read memory and register.
+ */
+ host_rd = scr_to_cpu(np->ncr_cache);
+ ncr_rd = INL (nc_scratcha);
+ ncr_bk = INL (nc_temp);
+
+ /*
+ ** check for timeout
+ */
+ if (i>=NCR_SNOOP_TIMEOUT) {
+ printk ("CACHE TEST FAILED: timeout.\n");
+ return (0x20);
+ };
+ /*
+ ** Check termination position.
+ */
+ if (pc != NCB_SCRIPTH0_PHYS (np, snoopend)+8) {
+ printk ("CACHE TEST FAILED: script execution failed.\n");
+ printk ("start=%08lx, pc=%08lx, end=%08lx\n",
+ (u_long) NCB_SCRIPTH0_PHYS (np, snooptest), (u_long) pc,
+ (u_long) NCB_SCRIPTH0_PHYS (np, snoopend) +8);
+ return (0x40);
+ };
+ /*
+ ** Show results.
+ */
+ if (host_wr != ncr_rd) {
+ printk ("CACHE TEST FAILED: host wrote %d, ncr read %d.\n",
+ (int) host_wr, (int) ncr_rd);
+ err |= 1;
+ };
+ if (host_rd != ncr_wr) {
+ printk ("CACHE TEST FAILED: ncr wrote %d, host read %d.\n",
+ (int) ncr_wr, (int) host_rd);
+ err |= 2;
+ };
+ if (ncr_bk != ncr_wr) {
+ printk ("CACHE TEST FAILED: ncr wrote %d, read back %d.\n",
+ (int) ncr_wr, (int) ncr_bk);
+ err |= 4;
+ };
+ return (err);
+}
+
+/*==========================================================
+**
+** Determine the ncr's clock frequency.
+** This is essential for the negotiation
+** of the synchronous transfer rate.
+**
+**==========================================================
+**
+** Note: we have to return the correct value.
+** THERE IS NO SAFE DEFAULT VALUE.
+**
+** Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock.
+** 53C860 and 53C875 rev. 1 support fast20 transfers but
+** do not have a clock doubler and so are provided with a
+** 80 MHz clock. All other fast20 boards incorporate a doubler
+** and so should be delivered with a 40 MHz clock.
+** The recent fast40 chips (895/896/895A) and the
+** fast80 chip (C1010) use a 40 Mhz base clock
+** and provide a clock quadrupler (160 Mhz). The code below
+** tries to deal as cleverly as possible with all this stuff.
+**
+**----------------------------------------------------------
+*/
+
+/*
+ * Select NCR SCSI clock frequency
+ */
+static void ncr_selectclock(ncb_p np, u_char scntl3)
+{
+ if (np->multiplier < 2) {
+ OUTB(nc_scntl3, scntl3);
+ return;
+ }
+
+ if (bootverbose >= 2)
+ printk ("%s: enabling clock multiplier\n", ncr_name(np));
+
+ OUTB(nc_stest1, DBLEN); /* Enable clock multiplier */
+
+ if ( (np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
+ (np->device_id != PCI_DEVICE_ID_LSI_53C1010_66) &&
+ (np->multiplier > 2)) {
+ int i = 20; /* Poll bit 5 of stest4 for quadrupler */
+ while (!(INB(nc_stest4) & LCKFRQ) && --i > 0)
+ UDELAY (20);
+ if (!i)
+ printk("%s: the chip cannot lock the frequency\n",
+ ncr_name(np));
+
+ } else /* Wait 120 micro-seconds for multiplier*/
+ UDELAY (120);
+
+ OUTB(nc_stest3, HSC); /* Halt the scsi clock */
+ OUTB(nc_scntl3, scntl3);
+ OUTB(nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */
+ OUTB(nc_stest3, 0x00); /* Restart scsi clock */
+}
+
+
+/*
+ * calculate NCR SCSI clock frequency (in KHz)
+ */
+static unsigned __init ncrgetfreq (ncb_p np, int gen)
+{
+ unsigned int ms = 0;
+ unsigned int f;
+ int count;
+
+ /*
+ * Measure GEN timer delay in order
+ * to calculate SCSI clock frequency
+ *
+ * This code will never execute too
+ * many loop iterations (if DELAY is
+ * reasonably correct). It could get
+ * too low a delay (too high a freq.)
+ * if the CPU is slow executing the
+ * loop for some reason (an NMI, for
+ * example). For this reason we will
+ * if multiple measurements are to be
+ * performed trust the higher delay
+ * (lower frequency returned).
+ */
+ OUTW (nc_sien , 0x0);/* mask all scsi interrupts */
+ /* enable general purpose timer */
+ (void) INW (nc_sist); /* clear pending scsi interrupt */
+ OUTB (nc_dien , 0); /* mask all dma interrupts */
+ (void) INW (nc_sist); /* another one, just to be sure :) */
+ OUTB (nc_scntl3, 4); /* set pre-scaler to divide by 3 */
+ OUTB (nc_stime1, 0); /* disable general purpose timer */
+ OUTB (nc_stime1, gen); /* set to nominal delay of 1<<gen * 125us */
+ /* Temporary fix for udelay issue with Alpha
+ platform */
+ while (!(INW(nc_sist) & GEN) && ms++ < 100000) {
+ /* count 1ms */
+ for (count = 0; count < 10; count++)
+ UDELAY (100);
+ }
+ OUTB (nc_stime1, 0); /* disable general purpose timer */
+ /*
+ * set prescaler to divide by whatever 0 means
+ * 0 ought to choose divide by 2, but appears
+ * to set divide by 3.5 mode in my 53c810 ...
+ */
+ OUTB (nc_scntl3, 0);
+
+ /*
+ * adjust for prescaler, and convert into KHz
+ * scale values derived empirically. C1010 uses
+ * different dividers
+ */
+#if 0
+ if (np->device_id == PCI_DEVICE_ID_LSI_53C1010)
+ f = ms ? ((1 << gen) * 2866 ) / ms : 0;
+ else
+#endif
+ f = ms ? ((1 << gen) * 4340) / ms : 0;
+
+ if (bootverbose >= 2)
+ printk ("%s: Delay (GEN=%d): %u msec, %u KHz\n",
+ ncr_name(np), gen, ms, f);
+
+ return f;
+}
+
+static unsigned __init ncr_getfreq (ncb_p np)
+{
+ u_int f1, f2;
+ int gen = 11;
+
+ (void) ncrgetfreq (np, gen); /* throw away first result */
+ f1 = ncrgetfreq (np, gen);
+ f2 = ncrgetfreq (np, gen);
+ if (f1 > f2) f1 = f2; /* trust lower result */
+ return f1;
+}
+
+/*
+ * Get/probe NCR SCSI clock frequency
+ */
+static void __init ncr_getclock (ncb_p np, int mult)
+{
+ unsigned char scntl3 = np->sv_scntl3;
+ unsigned char stest1 = np->sv_stest1;
+ unsigned f1;
+
+ np->multiplier = 1;
+ f1 = 40000;
+
+ /*
+ ** True with 875/895/896/895A with clock multiplier selected
+ */
+ if (mult > 1 && (stest1 & (DBLEN+DBLSEL)) == DBLEN+DBLSEL) {
+ if (bootverbose >= 2)
+ printk ("%s: clock multiplier found\n", ncr_name(np));
+ np->multiplier = mult;
+ }
+
+ /*
+ ** If multiplier not found but a C1010, assume a mult of 4.
+ ** If multiplier not found or scntl3 not 7,5,3,
+ ** reset chip and get frequency from general purpose timer.
+ ** Otherwise trust scntl3 BIOS setting.
+ */
+ if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)) {
+ f1=40000;
+ np->multiplier = mult;
+ if (bootverbose >= 2)
+ printk ("%s: clock multiplier assumed\n", ncr_name(np));
+ }
+ else if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) {
+ OUTB (nc_stest1, 0); /* make sure doubler is OFF */
+ f1 = ncr_getfreq (np);
+
+ if (bootverbose)
+ printk ("%s: NCR clock is %uKHz\n", ncr_name(np), f1);
+
+ if (f1 < 55000) f1 = 40000;
+ else f1 = 80000;
+
+ /*
+ ** Suggest to also check the PCI clock frequency
+ ** to make sure our frequency calculation algorithm
+ ** is not too biased.
+ */
+ if (np->features & FE_66MHZ) {
+ np->pciclock_min = (66000*55+80-1)/80;
+ np->pciclock_max = (66000*55)/40;
+ }
+ else {
+ np->pciclock_min = (33000*55+80-1)/80;
+ np->pciclock_max = (33000*55)/40;
+ }
+
+ if (f1 == 40000 && mult > 1) {
+ if (bootverbose >= 2)
+ printk ("%s: clock multiplier assumed\n", ncr_name(np));
+ np->multiplier = mult;
+ }
+ } else {
+ if ((scntl3 & 7) == 3) f1 = 40000;
+ else if ((scntl3 & 7) == 5) f1 = 80000;
+ else f1 = 160000;
+
+ f1 /= np->multiplier;
+ }
+
+ /*
+ ** Compute controller synchronous parameters.
+ */
+ f1 *= np->multiplier;
+ np->clock_khz = f1;
+}
+
+/*
+ * Get/probe PCI clock frequency
+ */
+static u_int __init ncr_getpciclock (ncb_p np)
+{
+ static u_int f;
+
+ OUTB (nc_stest1, SCLK); /* Use the PCI clock as SCSI clock */
+ f = ncr_getfreq (np);
+ OUTB (nc_stest1, 0);
+
+ return f;
+}
+
+/*===================== LINUX ENTRY POINTS SECTION ==========================*/
+
+#ifndef uchar
+#define uchar unsigned char
+#endif
+
+#ifndef ushort
+#define ushort unsigned short
+#endif
+
+#ifndef ulong
+#define ulong unsigned long
+#endif
+
+/* ---------------------------------------------------------------------
+**
+** Driver setup from the boot command line
+**
+** ---------------------------------------------------------------------
+*/
+
+#ifdef MODULE
+#define ARG_SEP ' '
+#else
+#define ARG_SEP ','
+#endif
+
+#define OPT_TAGS 1
+#define OPT_MASTER_PARITY 2
+#define OPT_SCSI_PARITY 3
+#define OPT_DISCONNECTION 4
+#define OPT_SPECIAL_FEATURES 5
+#define OPT_ULTRA_SCSI 6
+#define OPT_FORCE_SYNC_NEGO 7
+#define OPT_REVERSE_PROBE 8
+#define OPT_DEFAULT_SYNC 9
+#define OPT_VERBOSE 10
+#define OPT_DEBUG 11
+#define OPT_BURST_MAX 12
+#define OPT_LED_PIN 13
+#define OPT_MAX_WIDE 14
+#define OPT_SETTLE_DELAY 15
+#define OPT_DIFF_SUPPORT 16
+#define OPT_IRQM 17
+#define OPT_PCI_FIX_UP 18
+#define OPT_BUS_CHECK 19
+#define OPT_OPTIMIZE 20
+#define OPT_RECOVERY 21
+#define OPT_SAFE_SETUP 22
+#define OPT_USE_NVRAM 23
+#define OPT_EXCLUDE 24
+#define OPT_HOST_ID 25
+
+#ifdef SCSI_NCR_IARB_SUPPORT
+#define OPT_IARB 26
+#endif
+
+static char setup_token[] __initdata =
+ "tags:" "mpar:"
+ "spar:" "disc:"
+ "specf:" "ultra:"
+ "fsn:" "revprob:"
+ "sync:" "verb:"
+ "debug:" "burst:"
+ "led:" "wide:"
+ "settle:" "diff:"
+ "irqm:" "pcifix:"
+ "buschk:" "optim:"
+ "recovery:"
+ "safe:" "nvram:"
+ "excl:" "hostid:"
+#ifdef SCSI_NCR_IARB_SUPPORT
+ "iarb:"
+#endif
+ ; /* DONNOT REMOVE THIS ';' */
+
+#ifdef MODULE
+#define ARG_SEP ' '
+#else
+#define ARG_SEP ','
+#endif
+
+static int __init get_setup_token(char *p)
+{
+ char *cur = setup_token;
+ char *pc;
+ int i = 0;
+
+ while (cur != NULL && (pc = strchr(cur, ':')) != NULL) {
+ ++pc;
+ ++i;
+ if (!strncmp(p, cur, pc - cur))
+ return i;
+ cur = pc;
+ }
+ return 0;
+}
+
+
+int __init sym53c8xx_setup(char *str)
+{
+#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
+ char *cur = str;
+ char *pc, *pv;
+ unsigned long val;
+ int i, c;
+ int xi = 0;
+
+ while (cur != NULL && (pc = strchr(cur, ':')) != NULL) {
+ char *pe;
+
+ val = 0;
+ pv = pc;
+ c = *++pv;
+
+ if (c == 'n')
+ val = 0;
+ else if (c == 'y')
+ val = 1;
+ else
+ val = (int) simple_strtoul(pv, &pe, 0);
+
+ switch (get_setup_token(cur)) {
+ case OPT_TAGS:
+ driver_setup.default_tags = val;
+ if (pe && *pe == '/') {
+ i = 0;
+ while (*pe && *pe != ARG_SEP &&
+ i < sizeof(driver_setup.tag_ctrl)-1) {
+ driver_setup.tag_ctrl[i++] = *pe++;
+ }
+ driver_setup.tag_ctrl[i] = '\0';
+ }
+ break;
+ case OPT_MASTER_PARITY:
+ driver_setup.master_parity = val;
+ break;
+ case OPT_SCSI_PARITY:
+ driver_setup.scsi_parity = val;
+ break;
+ case OPT_DISCONNECTION:
+ driver_setup.disconnection = val;
+ break;
+ case OPT_SPECIAL_FEATURES:
+ driver_setup.special_features = val;
+ break;
+ case OPT_ULTRA_SCSI:
+ driver_setup.ultra_scsi = val;
+ break;
+ case OPT_FORCE_SYNC_NEGO:
+ driver_setup.force_sync_nego = val;
+ break;
+ case OPT_REVERSE_PROBE:
+ driver_setup.reverse_probe = val;
+ break;
+ case OPT_DEFAULT_SYNC:
+ driver_setup.default_sync = val;
+ break;
+ case OPT_VERBOSE:
+ driver_setup.verbose = val;
+ break;
+ case OPT_DEBUG:
+ driver_setup.debug = val;
+ break;
+ case OPT_BURST_MAX:
+ driver_setup.burst_max = val;
+ break;
+ case OPT_LED_PIN:
+ driver_setup.led_pin = val;
+ break;
+ case OPT_MAX_WIDE:
+ driver_setup.max_wide = val? 1:0;
+ break;
+ case OPT_SETTLE_DELAY:
+ driver_setup.settle_delay = val;
+ break;
+ case OPT_DIFF_SUPPORT:
+ driver_setup.diff_support = val;
+ break;
+ case OPT_IRQM:
+ driver_setup.irqm = val;
+ break;
+ case OPT_PCI_FIX_UP:
+ driver_setup.pci_fix_up = val;
+ break;
+ case OPT_BUS_CHECK:
+ driver_setup.bus_check = val;
+ break;
+ case OPT_OPTIMIZE:
+ driver_setup.optimize = val;
+ break;
+ case OPT_RECOVERY:
+ driver_setup.recovery = val;
+ break;
+ case OPT_USE_NVRAM:
+ driver_setup.use_nvram = val;
+ break;
+ case OPT_SAFE_SETUP:
+ memcpy(&driver_setup, &driver_safe_setup,
+ sizeof(driver_setup));
+ break;
+ case OPT_EXCLUDE:
+ if (xi < SCSI_NCR_MAX_EXCLUDES)
+ driver_setup.excludes[xi++] = val;
+ break;
+ case OPT_HOST_ID:
+ driver_setup.host_id = val;
+ break;
+#ifdef SCSI_NCR_IARB_SUPPORT
+ case OPT_IARB:
+ driver_setup.iarb = val;
+ break;
+#endif
+ default:
+ printk("sym53c8xx_setup: unexpected boot option '%.*s' ignored\n", (int)(pc-cur+1), cur);
+ break;
+ }
+
+ if ((cur = strchr(cur, ARG_SEP)) != NULL)
+ ++cur;
+ }
+#endif /* SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT */
+ return 1;
+}
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,3,13)
+#ifndef MODULE
+__setup("sym53c8xx=", sym53c8xx_setup);
+#endif
+#endif
+
+static int
+sym53c8xx_pci_init(Scsi_Host_Template *tpnt, pcidev_t pdev, ncr_device *device);
+
+/*
+** Linux entry point for SYM53C8XX devices detection routine.
+**
+** Called by the middle-level scsi drivers at initialization time,
+** or at module installation.
+**
+** Read the PCI configuration and try to attach each
+** detected NCR board.
+**
+** If NVRAM is present, try to attach boards according to
+** the used defined boot order.
+**
+** Returns the number of boards successfully attached.
+*/
+
+static void __init ncr_print_driver_setup(void)
+{
+#define YesNo(y) y ? 'y' : 'n'
+ printk (NAME53C8XX ": setup=disc:%c,specf:%d,ultra:%d,tags:%d,sync:%d,"
+ "burst:%d,wide:%c,diff:%d,revprob:%c,buschk:0x%x\n",
+ YesNo(driver_setup.disconnection),
+ driver_setup.special_features,
+ driver_setup.ultra_scsi,
+ driver_setup.default_tags,
+ driver_setup.default_sync,
+ driver_setup.burst_max,
+ YesNo(driver_setup.max_wide),
+ driver_setup.diff_support,
+ YesNo(driver_setup.reverse_probe),
+ driver_setup.bus_check);
+
+ printk (NAME53C8XX ": setup=mpar:%c,spar:%c,fsn=%c,verb:%d,debug:0x%x,"
+ "led:%c,settle:%d,irqm:0x%x,nvram:0x%x,pcifix:0x%x\n",
+ YesNo(driver_setup.master_parity),
+ YesNo(driver_setup.scsi_parity),
+ YesNo(driver_setup.force_sync_nego),
+ driver_setup.verbose,
+ driver_setup.debug,
+ YesNo(driver_setup.led_pin),
+ driver_setup.settle_delay,
+ driver_setup.irqm,
+ driver_setup.use_nvram,
+ driver_setup.pci_fix_up);
+#undef YesNo
+}
+
+/*===================================================================
+** SYM53C8XX devices description table and chip ids list.
+**===================================================================
+*/
+
+static ncr_chip ncr_chip_table[] __initdata = SCSI_NCR_CHIP_TABLE;
+static ushort ncr_chip_ids[] __initdata = SCSI_NCR_CHIP_IDS;
+
+#ifdef SCSI_NCR_PQS_PDS_SUPPORT
+/*===================================================================
+** Detect all NCR PQS/PDS boards and keep track of their bus nr.
+**
+** The NCR PQS or PDS card is constructed as a DEC bridge
+** behind which sit a proprietary NCR memory controller and
+** four or two 53c875s as separate devices. In its usual mode
+** of operation, the 875s are slaved to the memory controller
+** for all transfers. We can tell if an 875 is part of a
+** PQS/PDS or not since if it is, it will be on the same bus
+** as the memory controller. To operate with the Linux
+** driver, the memory controller is disabled and the 875s
+** freed to function independently. The only wrinkle is that
+** the preset SCSI ID (which may be zero) must be read in from
+** a special configuration space register of the 875
+**===================================================================
+*/
+#define SCSI_NCR_MAX_PQS_BUS 16
+static int pqs_bus[SCSI_NCR_MAX_PQS_BUS] __initdata = { 0 };
+
+static void __init ncr_detect_pqs_pds(void)
+{
+ short index;
+ pcidev_t dev = PCIDEV_NULL;
+
+ for(index=0; index < SCSI_NCR_MAX_PQS_BUS; index++) {
+ u_char tmp;
+
+ dev = pci_find_device(0x101a, 0x0009, dev);
+ if (dev == PCIDEV_NULL) {
+ pqs_bus[index] = -1;
+ break;
+ }
+ printk(KERN_INFO NAME53C8XX ": NCR PQS/PDS memory controller detected on bus %d\n", PciBusNumber(dev));
+ pci_read_config_byte(dev, 0x44, &tmp);
+ /* bit 1: allow individual 875 configuration */
+ tmp |= 0x2;
+ pci_write_config_byte(dev, 0x44, tmp);
+ pci_read_config_byte(dev, 0x45, &tmp);
+ /* bit 2: drive individual 875 interrupts to the bus */
+ tmp |= 0x4;
+ pci_write_config_byte(dev, 0x45, tmp);
+
+ pqs_bus[index] = PciBusNumber(dev);
+ }
+}
+#endif /* SCSI_NCR_PQS_PDS_SUPPORT */
+
+/*===================================================================
+** Detect all 53c8xx hosts and then attach them.
+**
+** If we are using NVRAM, once all hosts are detected, we need to
+** check any NVRAM for boot order in case detect and boot order
+** differ and attach them using the order in the NVRAM.
+**
+** If no NVRAM is found or data appears invalid attach boards in
+** the the order they are detected.
+**===================================================================
+*/
+int __init sym53c8xx_detect(Scsi_Host_Template *tpnt)
+{
+ pcidev_t pcidev;
+ int i, j, chips, hosts, count;
+ int attach_count = 0;
+ ncr_device *devtbl, *devp;
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ ncr_nvram nvram0, nvram, *nvp;
+#endif
+
+ /*
+ ** PCI is required.
+ */
+ if (!pci_present())
+ return 0;
+
+ /*
+ ** Initialize driver general stuff.
+ */
+#ifdef SCSI_NCR_PROC_INFO_SUPPORT
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,3,27)
+ tpnt->proc_dir = &proc_scsi_sym53c8xx;
+#else
+ tpnt->proc_name = NAME53C8XX;
+#endif
+ tpnt->proc_info = sym53c8xx_proc_info;
+#endif
+
+#if defined(SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT) && defined(MODULE)
+if (sym53c8xx)
+ sym53c8xx_setup(sym53c8xx);
+#endif
+#ifdef SCSI_NCR_DEBUG_INFO_SUPPORT
+ ncr_debug = driver_setup.debug;
+#endif
+
+ if (initverbose >= 2)
+ ncr_print_driver_setup();
+
+ /*
+ ** Allocate the device table since we donnot want to
+ ** overflow the kernel stack.
+ ** 1 x 4K PAGE is enough for more than 40 devices for i386.
+ */
+ devtbl = m_calloc(PAGE_SIZE, "devtbl");
+ if (!devtbl)
+ return 0;
+
+ /*
+ ** Detect all NCR PQS/PDS memory controllers.
+ */
+#ifdef SCSI_NCR_PQS_PDS_SUPPORT
+ ncr_detect_pqs_pds();
+#endif
+
+ /*
+ ** Detect all 53c8xx hosts.
+ ** Save the first Symbios NVRAM content if any
+ ** for the boot order.
+ */
+ chips = sizeof(ncr_chip_ids) / sizeof(ncr_chip_ids[0]);
+ hosts = PAGE_SIZE / sizeof(*devtbl);
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ nvp = (driver_setup.use_nvram & 0x1) ? &nvram0 : 0;
+#endif
+ j = 0;
+ count = 0;
+ pcidev = PCIDEV_NULL;
+ while (1) {
+ char *msg = "";
+ if (count >= hosts)
+ break;
+ if (j >= chips)
+ break;
+ i = driver_setup.reverse_probe ? chips - 1 - j : j;
+ pcidev = pci_find_device(PCI_VENDOR_ID_NCR, ncr_chip_ids[i],
+ pcidev);
+ if (pcidev == PCIDEV_NULL) {
+ ++j;
+ continue;
+ }
+ /* Some HW as the HP LH4 may report twice PCI devices */
+ for (i = 0; i < count ; i++) {
+ if (devtbl[i].slot.bus == PciBusNumber(pcidev) &&
+ devtbl[i].slot.device_fn == PciDeviceFn(pcidev))
+ break;
+ }
+ if (i != count) /* Ignore this device if we already have it */
+ continue;
+ devp = &devtbl[count];
+ devp->host_id = driver_setup.host_id;
+ devp->attach_done = 0;
+ if (sym53c8xx_pci_init(tpnt, pcidev, devp)) {
+ continue;
+ }
+ ++count;
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ if (nvp) {
+ ncr_get_nvram(devp, nvp);
+ switch(nvp->type) {
+ case SCSI_NCR_SYMBIOS_NVRAM:
+ /*
+ * Switch to the other nvram buffer, so that
+ * nvram0 will contain the first Symbios
+ * format NVRAM content with boot order.
+ */
+ nvp = &nvram;
+ msg = "with Symbios NVRAM";
+ break;
+ case SCSI_NCR_TEKRAM_NVRAM:
+ msg = "with Tekram NVRAM";
+ break;
+ }
+ }
+#endif
+#ifdef SCSI_NCR_PQS_PDS_SUPPORT
+ if (devp->pqs_pds)
+ msg = "(NCR PQS/PDS)";
+#endif
+ printk(KERN_INFO NAME53C8XX ": 53c%s detected %s\n",
+ devp->chip.name, msg);
+ }
+
+ /*
+ ** If we have found a SYMBIOS NVRAM, use first the NVRAM boot
+ ** sequence as device boot order.
+ ** check devices in the boot record against devices detected.
+ ** attach devices if we find a match. boot table records that
+ ** do not match any detected devices will be ignored.
+ ** devices that do not match any boot table will not be attached
+ ** here but will attempt to be attached during the device table
+ ** rescan.
+ */
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ if (!nvp || nvram0.type != SCSI_NCR_SYMBIOS_NVRAM)
+ goto next;
+ for (i = 0; i < 4; i++) {
+ Symbios_host *h = &nvram0.data.Symbios.host[i];
+ for (j = 0 ; j < count ; j++) {
+ devp = &devtbl[j];
+ if (h->device_fn != devp->slot.device_fn ||
+ h->bus_nr != devp->slot.bus ||
+ h->device_id != devp->chip.device_id)
+ continue;
+ if (devp->attach_done)
+ continue;
+ if (h->flags & SYMBIOS_INIT_SCAN_AT_BOOT) {
+ ncr_get_nvram(devp, nvp);
+ if (!ncr_attach (tpnt, attach_count, devp))
+ attach_count++;
+ }
+ else if (!(driver_setup.use_nvram & 0x80))
+ printk(KERN_INFO NAME53C8XX
+ ": 53c%s state OFF thus not attached\n",
+ devp->chip.name);
+ else
+ continue;
+
+ devp->attach_done = 1;
+ break;
+ }
+ }
+next:
+#endif
+
+ /*
+ ** Rescan device list to make sure all boards attached.
+ ** Devices without boot records will not be attached yet
+ ** so try to attach them here.
+ */
+ for (i= 0; i < count; i++) {
+ devp = &devtbl[i];
+ if (!devp->attach_done) {
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ ncr_get_nvram(devp, nvp);
+#endif
+ if (!ncr_attach (tpnt, attach_count, devp))
+ attach_count++;
+ }
+ }
+
+ m_free(devtbl, PAGE_SIZE, "devtbl");
+
+ return attach_count;
+}
+
+/*===================================================================
+** Read and check the PCI configuration for any detected NCR
+** boards and save data for attaching after all boards have
+** been detected.
+**===================================================================
+*/
+static int __init
+sym53c8xx_pci_init(Scsi_Host_Template *tpnt, pcidev_t pdev, ncr_device *device)
+{
+ u_short vendor_id, device_id, command, status_reg;
+ u_char cache_line_size, latency_timer;
+ u_char suggested_cache_line_size = 0;
+ u_char pci_fix_up = driver_setup.pci_fix_up;
+ u_char revision;
+ u_int irq;
+ u_long base, base_2, io_port;
+ int i;
+ ncr_chip *chip;
+
+ printk(KERN_INFO NAME53C8XX ": at PCI bus %d, device %d, function %d\n",
+ PciBusNumber(pdev),
+ (int) (PciDeviceFn(pdev) & 0xf8) >> 3,
+ (int) (PciDeviceFn(pdev) & 7));
+
+#ifdef SCSI_NCR_DYNAMIC_DMA_MAPPING
+ if (!pci_dma_supported(pdev, (dma_addr_t) (0xffffffffUL))) {
+ printk(KERN_WARNING NAME53C8XX
+ "32 BIT PCI BUS DMA ADDRESSING NOT SUPPORTED\n");
+ return -1;
+ }
+#endif
+
+ /*
+ ** Read info from the PCI config space.
+ ** pci_read_config_xxx() functions are assumed to be used for
+ ** successfully detected PCI devices.
+ */
+ vendor_id = PciVendorId(pdev);
+ device_id = PciDeviceId(pdev);
+ irq = PciIrqLine(pdev);
+ i = 0;
+ i = pci_get_base_address(pdev, i, &io_port);
+ i = pci_get_base_address(pdev, i, &base);
+ (void) pci_get_base_address(pdev, i, &base_2);
+
+ pci_read_config_word(pdev, PCI_COMMAND, &command);
+ pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
+ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line_size);
+ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_timer);
+ pci_read_config_word(pdev, PCI_STATUS, &status_reg);
+
+#ifdef SCSI_NCR_PQS_PDS_SUPPORT
+ /*
+ ** Match the BUS number for PQS/PDS devices.
+ ** Read the SCSI ID from a special register mapped
+ ** into the configuration space of the individual
+ ** 875s. This register is set up by the PQS bios
+ */
+ for(i = 0; i < SCSI_NCR_MAX_PQS_BUS && pqs_bus[i] != -1; i++) {
+ u_char tmp;
+ if (pqs_bus[i] == PciBusNumber(pdev)) {
+ pci_read_config_byte(pdev, 0x84, &tmp);
+ device->pqs_pds = 1;
+ device->host_id = tmp;
+ break;
+ }
+ }
+#endif /* SCSI_NCR_PQS_PDS_SUPPORT */
+
+ /*
+ ** If user excludes this chip, donnot initialize it.
+ */
+ for (i = 0 ; i < SCSI_NCR_MAX_EXCLUDES ; i++) {
+ if (driver_setup.excludes[i] ==
+ (io_port & PCI_BASE_ADDRESS_IO_MASK))
+ return -1;
+ }
+ /*
+ ** Check if the chip is supported
+ */
+ chip = 0;
+ for (i = 0; i < sizeof(ncr_chip_table)/sizeof(ncr_chip_table[0]); i++) {
+ if (device_id != ncr_chip_table[i].device_id)
+ continue;
+ if (revision > ncr_chip_table[i].revision_id)
+ continue;
+ if (!(ncr_chip_table[i].features & FE_LDSTR))
+ break;
+ chip = &device->chip;
+ memcpy(chip, &ncr_chip_table[i], sizeof(*chip));
+ chip->revision_id = revision;
+ break;
+ }
+
+ /*
+ ** Ignore Symbios chips controlled by SISL RAID controller.
+ ** This controller sets value 0x52414944 at RAM end - 16.
+ */
+#if defined(__i386__) && !defined(SCSI_NCR_PCI_MEM_NOT_SUPPORTED)
+ if (chip && (base_2 & PCI_BASE_ADDRESS_MEM_MASK)) {
+ unsigned int ram_size, ram_val;
+ u_long ram_ptr;
+
+ if (chip->features & FE_RAM8K)
+ ram_size = 8192;
+ else
+ ram_size = 4096;
+
+ ram_ptr = remap_pci_mem(base_2 & PCI_BASE_ADDRESS_MEM_MASK,
+ ram_size);
+ if (ram_ptr) {
+ ram_val = readl_raw(ram_ptr + ram_size - 16);
+ unmap_pci_mem(ram_ptr, ram_size);
+ if (ram_val == 0x52414944) {
+ printk(NAME53C8XX": not initializing, "
+ "driven by SISL RAID controller.\n");
+ return -1;
+ }
+ }
+ }
+#endif /* i386 and PCI MEMORY accessible */
+
+ if (!chip) {
+ printk(NAME53C8XX ": not initializing, device not supported\n");
+ return -1;
+ }
+
+#ifdef __powerpc__
+ /*
+ ** Fix-up for power/pc.
+ ** Should not be performed by the driver.
+ */
+ if ((command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
+ != (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
+ printk(NAME53C8XX ": setting%s%s...\n",
+ (command & PCI_COMMAND_IO) ? "" : " PCI_COMMAND_IO",
+ (command & PCI_COMMAND_MEMORY) ? "" : " PCI_COMMAND_MEMORY");
+ command |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
+ pci_write_config_word(pdev, PCI_COMMAND, command);
+ }
+
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,2,0)
+ if ( is_prep ) {
+ if (io_port >= 0x10000000) {
+ printk(NAME53C8XX ": reallocating io_port (Wacky IBM)");
+ io_port = (io_port & 0x00FFFFFF) | 0x01000000;
+ pci_write_config_dword(pdev,
+ PCI_BASE_ADDRESS_0, io_port);
+ }
+ if (base >= 0x10000000) {
+ printk(NAME53C8XX ": reallocating base (Wacky IBM)");
+ base = (base & 0x00FFFFFF) | 0x01000000;
+ pci_write_config_dword(pdev,
+ PCI_BASE_ADDRESS_1, base);
+ }
+ if (base_2 >= 0x10000000) {
+ printk(NAME53C8XX ": reallocating base2 (Wacky IBM)");
+ base_2 = (base_2 & 0x00FFFFFF) | 0x01000000;
+ pci_write_config_dword(pdev,
+ PCI_BASE_ADDRESS_2, base_2);
+ }
+ }
+#endif
+#endif /* __powerpc__ */
+
+#if defined(__sparc__) && (LINUX_VERSION_CODE < LinuxVersionCode(2,3,0))
+ /*
+ ** Fix-ups for sparc.
+ **
+ ** I wrote: Should not be performed by the driver,
+ ** Guy wrote: but how can OBP know each and every PCI card,
+ ** if they don't use Fcode?
+ ** I replied: no need to know each and every PCI card, just
+ ** be skilled enough to understand the PCI specs.
+ */
+
+ /*
+ ** PCI configuration is based on configuration registers being
+ ** coherent with hardware and software resource identifications.
+ ** This is fairly simple, but seems still too complex for Sparc.
+ */
+ base = __pa(base);
+ base_2 = __pa(base_2);
+
+ if (!cache_line_size)
+ suggested_cache_line_size = 16;
+
+ driver_setup.pci_fix_up |= 0x7;
+
+#endif /* __sparc__ */
+
+#if defined(__i386__) && !defined(MODULE)
+ if (!cache_line_size) {
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,1,75)
+ extern char x86;
+ switch(x86) {
+#else
+ switch(boot_cpu_data.x86) {
+#endif
+ case 4: suggested_cache_line_size = 4; break;
+ case 6:
+ case 5: suggested_cache_line_size = 8; break;
+ }
+ }
+#endif /* __i386__ */
+
+ /*
+ ** Check availability of IO space, memory space.
+ ** Enable master capability if not yet.
+ **
+ ** We shouldn't have to care about the IO region when
+ ** we are using MMIO. But calling check_region() from
+ ** both the ncr53c8xx and the sym53c8xx drivers prevents
+ ** from attaching devices from the both drivers.
+ ** If you have a better idea, let me know.
+ */
+/* #ifdef SCSI_NCR_IOMAPPED */
+#if 1
+ if (!(command & PCI_COMMAND_IO)) {
+ printk(NAME53C8XX ": I/O base address (0x%lx) disabled.\n",
+ (long) io_port);
+ io_port = 0;
+ }
+#endif
+ if (!(command & PCI_COMMAND_MEMORY)) {
+ printk(NAME53C8XX ": PCI_COMMAND_MEMORY not set.\n");
+ base = 0;
+ base_2 = 0;
+ }
+ io_port &= PCI_BASE_ADDRESS_IO_MASK;
+ base &= PCI_BASE_ADDRESS_MEM_MASK;
+ base_2 &= PCI_BASE_ADDRESS_MEM_MASK;
+
+/* #ifdef SCSI_NCR_IOMAPPED */
+#if 1
+ if (io_port && check_region (io_port, 128)) {
+ printk(NAME53C8XX ": IO region 0x%lx[0..127] is in use\n",
+ (long) io_port);
+ io_port = 0;
+ }
+ if (!io_port)
+ return -1;
+#endif
+#ifndef SCSI_NCR_IOMAPPED
+ if (!base) {
+ printk(NAME53C8XX ": MMIO base address disabled.\n");
+ return -1;
+ }
+#endif
+
+ /*
+ ** Set MASTER capable and PARITY bit, if not yet.
+ */
+ if ((command & (PCI_COMMAND_MASTER | PCI_COMMAND_PARITY))
+ != (PCI_COMMAND_MASTER | PCI_COMMAND_PARITY)) {
+ printk(NAME53C8XX ": setting%s%s...(fix-up)\n",
+ (command & PCI_COMMAND_MASTER) ? "" : " PCI_COMMAND_MASTER",
+ (command & PCI_COMMAND_PARITY) ? "" : " PCI_COMMAND_PARITY");
+ command |= (PCI_COMMAND_MASTER | PCI_COMMAND_PARITY);
+ pci_write_config_word(pdev, PCI_COMMAND, command);
+ }
+
+ /*
+ ** Fix some features according to driver setup.
+ */
+ if (!(driver_setup.special_features & 1))
+ chip->features &= ~FE_SPECIAL_SET;
+ else {
+ if (driver_setup.special_features & 2)
+ chip->features &= ~FE_WRIE;
+ if (driver_setup.special_features & 4)
+ chip->features &= ~FE_NOPM;
+ }
+
+ /*
+ ** Work around for errant bit in 895A. The 66Mhz
+ ** capable bit is set erroneously. Clear this bit.
+ ** (Item 1 DEL 533)
+ **
+ ** Make sure Config space and Features agree.
+ **
+ ** Recall: writes are not normal to status register -
+ ** write a 1 to clear and a 0 to leave unchanged.
+ ** Can only reset bits.
+ */
+ if (chip->features & FE_66MHZ) {
+ if (!(status_reg & PCI_STATUS_66MHZ))
+ chip->features &= ~FE_66MHZ;
+ }
+ else {
+ if (status_reg & PCI_STATUS_66MHZ) {
+ status_reg = PCI_STATUS_66MHZ;
+ pci_write_config_word(pdev, PCI_STATUS, status_reg);
+ pci_read_config_word(pdev, PCI_STATUS, &status_reg);
+ }
+ }
+
+ if (driver_setup.ultra_scsi < 3 && (chip->features & FE_ULTRA3)) {
+ chip->features |= FE_ULTRA2;
+ chip->features &= ~FE_ULTRA3;
+ }
+ if (driver_setup.ultra_scsi < 2 && (chip->features & FE_ULTRA2)) {
+ chip->features |= FE_ULTRA;
+ chip->features &= ~FE_ULTRA2;
+ }
+ if (driver_setup.ultra_scsi < 1)
+ chip->features &= ~FE_ULTRA;
+
+ if (!driver_setup.max_wide)
+ chip->features &= ~FE_WIDE;
+
+ /*
+ * C1010 Ultra3 support requires 16 bit data transfers.
+ */
+ if (!driver_setup.max_wide && (chip->features & FE_ULTRA3)) {
+ chip->features |= FE_ULTRA2;
+ chip->features |= ~FE_ULTRA3;
+ }
+
+ /*
+ ** Some features are required to be enabled in order to
+ ** work around some chip problems. :) ;)
+ ** (ITEM 12 of a DEL about the 896 I haven't yet).
+ ** We must ensure the chip will use WRITE AND INVALIDATE.
+ ** The revision number limit is for now arbitrary.
+ */
+ if (device_id == PCI_DEVICE_ID_NCR_53C896 && revision <= 0x10) {
+ chip->features |= (FE_WRIE | FE_CLSE);
+ pci_fix_up |= 3; /* Force appropriate PCI fix-up */
+ }
+
+#ifdef SCSI_NCR_PCI_FIX_UP_SUPPORT
+ /*
+ ** Try to fix up PCI config according to wished features.
+ */
+ if ((pci_fix_up & 1) && (chip->features & FE_CLSE) &&
+ !cache_line_size && suggested_cache_line_size) {
+ cache_line_size = suggested_cache_line_size;
+ pci_write_config_byte(pdev,
+ PCI_CACHE_LINE_SIZE, cache_line_size);
+ printk(NAME53C8XX ": PCI_CACHE_LINE_SIZE set to %d (fix-up).\n",
+ cache_line_size);
+ }
+
+ if ((pci_fix_up & 2) && cache_line_size &&
+ (chip->features & FE_WRIE) && !(command & PCI_COMMAND_INVALIDATE)) {
+ printk(NAME53C8XX": setting PCI_COMMAND_INVALIDATE (fix-up)\n");
+ command |= PCI_COMMAND_INVALIDATE;
+ pci_write_config_word(pdev, PCI_COMMAND, command);
+ }
+
+ /*
+ ** Tune PCI LATENCY TIMER according to burst max length transfer.
+ ** (latency timer >= burst length + 6, we add 10 to be quite sure)
+ */
+
+ if (chip->burst_max && (latency_timer == 0 || (pci_fix_up & 4))) {
+ uchar lt = (1 << chip->burst_max) + 6 + 10;
+ if (latency_timer < lt) {
+ printk(NAME53C8XX
+ ": changing PCI_LATENCY_TIMER from %d to %d.\n",
+ (int) latency_timer, (int) lt);
+ latency_timer = lt;
+ pci_write_config_byte(pdev,
+ PCI_LATENCY_TIMER, latency_timer);
+ }
+ }
+
+#endif /* SCSI_NCR_PCI_FIX_UP_SUPPORT */
+
+ /*
+ ** Initialise ncr_device structure with items required by ncr_attach.
+ */
+ device->pdev = pdev;
+ device->slot.bus = PciBusNumber(pdev);
+ device->slot.device_fn = PciDeviceFn(pdev);
+ device->slot.base = base;
+ device->slot.base_2 = base_2;
+ device->slot.io_port = io_port;
+ device->slot.irq = irq;
+ device->attach_done = 0;
+
+ return 0;
+}
+
+
+/*===================================================================
+** Detect and try to read SYMBIOS and TEKRAM NVRAM.
+**
+** Data can be used to order booting of boards.
+**
+** Data is saved in ncr_device structure if NVRAM found. This
+** is then used to find drive boot order for ncr_attach().
+**
+** NVRAM data is passed to Scsi_Host_Template later during
+** ncr_attach() for any device set up.
+*===================================================================
+*/
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+static void __init ncr_get_nvram(ncr_device *devp, ncr_nvram *nvp)
+{
+ devp->nvram = nvp;
+ if (!nvp)
+ return;
+ /*
+ ** Get access to chip IO registers
+ */
+#ifdef SCSI_NCR_IOMAPPED
+ request_region(devp->slot.io_port, 128, NAME53C8XX);
+ devp->slot.base_io = devp->slot.io_port;
+#else
+ devp->slot.reg = (struct ncr_reg *) remap_pci_mem(devp->slot.base, 128);
+ if (!devp->slot.reg)
+ return;
+#endif
+
+ /*
+ ** Try to read SYMBIOS nvram.
+ ** Try to read TEKRAM nvram if Symbios nvram not found.
+ */
+ if (!sym_read_Symbios_nvram(&devp->slot, &nvp->data.Symbios))
+ nvp->type = SCSI_NCR_SYMBIOS_NVRAM;
+ else if (!sym_read_Tekram_nvram(&devp->slot, devp->chip.device_id,
+ &nvp->data.Tekram))
+ nvp->type = SCSI_NCR_TEKRAM_NVRAM;
+ else {
+ nvp->type = 0;
+ devp->nvram = 0;
+ }
+
+ /*
+ ** Release access to chip IO registers
+ */
+#ifdef SCSI_NCR_IOMAPPED
+ release_region(devp->slot.base_io, 128);
+#else
+ unmap_pci_mem((u_long) devp->slot.reg, 128ul);
+#endif
+
+}
+#endif /* SCSI_NCR_NVRAM_SUPPORT */
+
+/*
+** Linux select queue depths function
+*/
+
+#define DEF_DEPTH (driver_setup.default_tags)
+#define ALL_TARGETS -2
+#define NO_TARGET -1
+#define ALL_LUNS -2
+#define NO_LUN -1
+
+static int device_queue_depth(ncb_p np, int target, int lun)
+{
+ int c, h, t, u, v;
+ char *p = driver_setup.tag_ctrl;
+ char *ep;
+
+ h = -1;
+ t = NO_TARGET;
+ u = NO_LUN;
+ while ((c = *p++) != 0) {
+ v = simple_strtoul(p, &ep, 0);
+ switch(c) {
+ case '/':
+ ++h;
+ t = ALL_TARGETS;
+ u = ALL_LUNS;
+ break;
+ case 't':
+ if (t != target)
+ t = (target == v) ? v : NO_TARGET;
+ u = ALL_LUNS;
+ break;
+ case 'u':
+ if (u != lun)
+ u = (lun == v) ? v : NO_LUN;
+ break;
+ case 'q':
+ if (h == np->unit &&
+ (t == ALL_TARGETS || t == target) &&
+ (u == ALL_LUNS || u == lun))
+ return v;
+ break;
+ case '-':
+ t = ALL_TARGETS;
+ u = ALL_LUNS;
+ break;
+ default:
+ break;
+ }
+ p = ep;
+ }
+ return DEF_DEPTH;
+}
+
+static void sym53c8xx_select_queue_depths(struct Scsi_Host *host, struct scsi_device *devlist)
+{
+ struct scsi_device *device;
+
+ for (device = devlist; device; device = device->next) {
+ ncb_p np;
+ tcb_p tp;
+ lcb_p lp;
+ int numtags;
+
+ if (device->host != host)
+ continue;
+
+ np = ((struct host_data *) host->hostdata)->ncb;
+ tp = &np->target[device->id];
+ lp = ncr_lp(np, tp, device->lun);
+
+ /*
+ ** Select queue depth from driver setup.
+ ** Donnot use more than configured by user.
+ ** Use at least 2.
+ ** Donnot use more than our maximum.
+ */
+ numtags = device_queue_depth(np, device->id, device->lun);
+ if (numtags > tp->usrtags)
+ numtags = tp->usrtags;
+ if (!device->tagged_supported)
+ numtags = 1;
+ device->queue_depth = numtags;
+ if (device->queue_depth < 2)
+ device->queue_depth = 2;
+ if (device->queue_depth > MAX_TAGS)
+ device->queue_depth = MAX_TAGS;
+
+ /*
+ ** Since the queue depth is not tunable under Linux,
+ ** we need to know this value in order not to
+ ** announce stupid things to user.
+ */
+ if (lp) {
+ lp->numtags = lp->maxtags = numtags;
+ lp->scdev_depth = device->queue_depth;
+ }
+ ncr_setup_tags (np, device->id, device->lun);
+
+#ifdef DEBUG_SYM53C8XX
+printk("sym53c8xx_select_queue_depth: host=%d, id=%d, lun=%d, depth=%d\n",
+ np->unit, device->id, device->lun, device->queue_depth);
+#endif
+ }
+}
+
+/*
+** Linux entry point for info() function
+*/
+const char *sym53c8xx_info (struct Scsi_Host *host)
+{
+ return SCSI_NCR_DRIVER_NAME;
+}
+
+/*
+** Linux entry point of queuecommand() function
+*/
+
+int sym53c8xx_queue_command (Scsi_Cmnd *cmd, void (* done)(Scsi_Cmnd *))
+{
+ ncb_p np = ((struct host_data *) cmd->host->hostdata)->ncb;
+ unsigned long flags;
+ int sts;
+
+#ifdef DEBUG_SYM53C8XX
+printk("sym53c8xx_queue_command\n");
+#endif
+
+ cmd->scsi_done = done;
+ cmd->host_scribble = NULL;
+ cmd->SCp.ptr = NULL;
+ cmd->SCp.buffer = NULL;
+#ifdef SCSI_NCR_DYNAMIC_DMA_MAPPING
+ cmd->__data_mapped = 0;
+ cmd->__data_mapping = 0;
+#endif
+
+ NCR_LOCK_NCB(np, flags);
+
+ if ((sts = ncr_queue_command(np, cmd)) != DID_OK) {
+ SetScsiResult(cmd, sts, 0);
+#ifdef DEBUG_SYM53C8XX
+printk("sym53c8xx : command not queued - result=%d\n", sts);
+#endif
+ }
+#ifdef DEBUG_SYM53C8XX
+ else
+printk("sym53c8xx : command successfully queued\n");
+#endif
+
+ NCR_UNLOCK_NCB(np, flags);
+
+ if (sts != DID_OK) {
+ unmap_scsi_data(np, cmd);
+ done(cmd);
+ }
+
+ return sts;
+}
+
+/*
+** Linux entry point of the interrupt handler.
+** Since linux versions > 1.3.70, we trust the kernel for
+** passing the internal host descriptor as 'dev_id'.
+** Otherwise, we scan the host list and call the interrupt
+** routine for each host that uses this IRQ.
+*/
+
+static void sym53c8xx_intr(int irq, void *dev_id, struct pt_regs * regs)
+{
+ unsigned long flags;
+ ncb_p np = (ncb_p) dev_id;
+ Scsi_Cmnd *done_list;
+
+#ifdef DEBUG_SYM53C8XX
+ printk("sym53c8xx : interrupt received\n");
+#endif
+
+ if (DEBUG_FLAGS & DEBUG_TINY) printk ("[");
+
+ NCR_LOCK_NCB(np, flags);
+ ncr_exception(np);
+ done_list = np->done_list;
+ np->done_list = 0;
+ NCR_UNLOCK_NCB(np, flags);
+
+ if (DEBUG_FLAGS & DEBUG_TINY) printk ("]\n");
+
+ if (done_list) {
+ NCR_LOCK_SCSI_DONE(np, flags);
+ ncr_flush_done_cmds(done_list);
+ NCR_UNLOCK_SCSI_DONE(np, flags);
+ }
+}
+
+/*
+** Linux entry point of the timer handler
+*/
+
+static void sym53c8xx_timeout(unsigned long npref)
+{
+ ncb_p np = (ncb_p) npref;
+ unsigned long flags;
+ Scsi_Cmnd *done_list;
+
+ NCR_LOCK_NCB(np, flags);
+ ncr_timeout((ncb_p) np);
+ done_list = np->done_list;
+ np->done_list = 0;
+ NCR_UNLOCK_NCB(np, flags);
+
+ if (done_list) {
+ NCR_LOCK_SCSI_DONE(np, flags);
+ ncr_flush_done_cmds(done_list);
+ NCR_UNLOCK_SCSI_DONE(np, flags);
+ }
+}
+
+/*
+** Linux entry point of reset() function
+*/
+
+#if defined SCSI_RESET_SYNCHRONOUS && defined SCSI_RESET_ASYNCHRONOUS
+int sym53c8xx_reset(Scsi_Cmnd *cmd, unsigned int reset_flags)
+#else
+int sym53c8xx_reset(Scsi_Cmnd *cmd)
+#endif
+{
+ ncb_p np = ((struct host_data *) cmd->host->hostdata)->ncb;
+ int sts;
+ unsigned long flags;
+ Scsi_Cmnd *done_list;
+
+#if defined SCSI_RESET_SYNCHRONOUS && defined SCSI_RESET_ASYNCHRONOUS
+ printk("sym53c8xx_reset: pid=%lu reset_flags=%x serial_number=%ld serial_number_at_timeout=%ld\n",
+ cmd->pid, reset_flags, cmd->serial_number, cmd->serial_number_at_timeout);
+#else
+ printk("sym53c8xx_reset: command pid %lu\n", cmd->pid);
+#endif
+
+ NCR_LOCK_NCB(np, flags);
+
+ /*
+ * We have to just ignore reset requests in some situations.
+ */
+#if defined SCSI_RESET_NOT_RUNNING
+ if (cmd->serial_number != cmd->serial_number_at_timeout) {
+ sts = SCSI_RESET_NOT_RUNNING;
+ goto out;
+ }
+#endif
+ /*
+ * If the mid-level driver told us reset is synchronous, it seems
+ * that we must call the done() callback for the involved command,
+ * even if this command was not queued to the low-level driver,
+ * before returning SCSI_RESET_SUCCESS.
+ */
+
+#if defined SCSI_RESET_SYNCHRONOUS && defined SCSI_RESET_ASYNCHRONOUS
+ sts = ncr_reset_bus(np, cmd,
+ (reset_flags & (SCSI_RESET_SYNCHRONOUS | SCSI_RESET_ASYNCHRONOUS)) == SCSI_RESET_SYNCHRONOUS);
+#else
+ sts = ncr_reset_bus(np, cmd, 0);
+#endif
+
+ /*
+ * Since we always reset the controller, when we return success,
+ * we add this information to the return code.
+ */
+#if defined SCSI_RESET_HOST_RESET
+ if (sts == SCSI_RESET_SUCCESS)
+ sts |= SCSI_RESET_HOST_RESET;
+#endif
+
+out:
+ done_list = np->done_list;
+ np->done_list = 0;
+ NCR_UNLOCK_NCB(np, flags);
+
+ ncr_flush_done_cmds(done_list);
+
+ return sts;
+}
+
+/*
+** Linux entry point of abort() function
+*/
+
+int sym53c8xx_abort(Scsi_Cmnd *cmd)
+{
+ ncb_p np = ((struct host_data *) cmd->host->hostdata)->ncb;
+ int sts;
+ unsigned long flags;
+ Scsi_Cmnd *done_list;
+
+#if defined SCSI_RESET_SYNCHRONOUS && defined SCSI_RESET_ASYNCHRONOUS
+ printk("sym53c8xx_abort: pid=%lu serial_number=%ld serial_number_at_timeout=%ld\n",
+ cmd->pid, cmd->serial_number, cmd->serial_number_at_timeout);
+#else
+ printk("sym53c8xx_abort: command pid %lu\n", cmd->pid);
+#endif
+
+ NCR_LOCK_NCB(np, flags);
+
+#if defined SCSI_RESET_SYNCHRONOUS && defined SCSI_RESET_ASYNCHRONOUS
+ /*
+ * We have to just ignore abort requests in some situations.
+ */
+ if (cmd->serial_number != cmd->serial_number_at_timeout) {
+ sts = SCSI_ABORT_NOT_RUNNING;
+ goto out;
+ }
+#endif
+
+ sts = ncr_abort_command(np, cmd);
+out:
+ done_list = np->done_list;
+ np->done_list = 0;
+ NCR_UNLOCK_NCB(np, flags);
+
+ ncr_flush_done_cmds(done_list);
+
+ return sts;
+}
+
+
+#ifdef MODULE
+int sym53c8xx_release(struct Scsi_Host *host)
+{
+#ifdef DEBUG_SYM53C8XX
+printk("sym53c8xx : release\n");
+#endif
+ ncr_detach(((struct host_data *) host->hostdata)->ncb);
+
+ return 1;
+}
+#endif
+
+
+/*
+** Scsi command waiting list management.
+**
+** It may happen that we cannot insert a scsi command into the start queue,
+** in the following circumstances.
+** Too few preallocated ccb(s),
+** maxtags < cmd_per_lun of the Linux host control block,
+** etc...
+** Such scsi commands are inserted into a waiting list.
+** When a scsi command complete, we try to requeue the commands of the
+** waiting list.
+*/
+
+#define next_wcmd host_scribble
+
+static void insert_into_waiting_list(ncb_p np, Scsi_Cmnd *cmd)
+{
+ Scsi_Cmnd *wcmd;
+
+#ifdef DEBUG_WAITING_LIST
+ printk("%s: cmd %lx inserted into waiting list\n", ncr_name(np), (u_long) cmd);
+#endif
+ cmd->next_wcmd = 0;
+ if (!(wcmd = np->waiting_list)) np->waiting_list = cmd;
+ else {
+ while ((wcmd->next_wcmd) != 0)
+ wcmd = (Scsi_Cmnd *) wcmd->next_wcmd;
+ wcmd->next_wcmd = (char *) cmd;
+ }
+}
+
+static Scsi_Cmnd *retrieve_from_waiting_list(int to_remove, ncb_p np, Scsi_Cmnd *cmd)
+{
+ Scsi_Cmnd **pcmd = &np->waiting_list;
+
+ while (*pcmd) {
+ if (cmd == *pcmd) {
+ if (to_remove) {
+ *pcmd = (Scsi_Cmnd *) cmd->next_wcmd;
+ cmd->next_wcmd = 0;
+ }
+#ifdef DEBUG_WAITING_LIST
+ printk("%s: cmd %lx retrieved from waiting list\n", ncr_name(np), (u_long) cmd);
+#endif
+ return cmd;
+ }
+ pcmd = (Scsi_Cmnd **) &(*pcmd)->next_wcmd;
+ }
+ return 0;
+}
+
+static void process_waiting_list(ncb_p np, int sts)
+{
+ Scsi_Cmnd *waiting_list, *wcmd;
+
+ waiting_list = np->waiting_list;
+ np->waiting_list = 0;
+
+#ifdef DEBUG_WAITING_LIST
+ if (waiting_list) printk("%s: waiting_list=%lx processing sts=%d\n", ncr_name(np), (u_long) waiting_list, sts);
+#endif
+ while ((wcmd = waiting_list) != 0) {
+ waiting_list = (Scsi_Cmnd *) wcmd->next_wcmd;
+ wcmd->next_wcmd = 0;
+ if (sts == DID_OK) {
+#ifdef DEBUG_WAITING_LIST
+ printk("%s: cmd %lx trying to requeue\n", ncr_name(np), (u_long) wcmd);
+#endif
+ sts = ncr_queue_command(np, wcmd);
+ }
+ if (sts != DID_OK) {
+#ifdef DEBUG_WAITING_LIST
+ printk("%s: cmd %lx done forced sts=%d\n", ncr_name(np), (u_long) wcmd, sts);
+#endif
+ SetScsiResult(wcmd, sts, 0);
+ ncr_queue_done_cmd(np, wcmd);
+ }
+ }
+}
+
+#undef next_wcmd
+
+#ifdef SCSI_NCR_PROC_INFO_SUPPORT
+
+/*=========================================================================
+** Proc file system stuff
+**
+** A read operation returns adapter information.
+** A write operation is a control command.
+** The string is parsed in the driver code and the command is passed
+** to the ncr_usercmd() function.
+**=========================================================================
+*/
+
+#ifdef SCSI_NCR_USER_COMMAND_SUPPORT
+
+#define is_digit(c) ((c) >= '0' && (c) <= '9')
+#define digit_to_bin(c) ((c) - '0')
+#define is_space(c) ((c) == ' ' || (c) == '\t')
+
+static int skip_spaces(char *ptr, int len)
+{
+ int cnt, c;
+
+ for (cnt = len; cnt > 0 && (c = *ptr++) && is_space(c); cnt--);
+
+ return (len - cnt);
+}
+
+static int get_int_arg(char *ptr, int len, u_long *pv)
+{
+ int cnt, c;
+ u_long v;
+
+ for (v = 0, cnt = len; cnt > 0 && (c = *ptr++) && is_digit(c); cnt--) {
+ v = (v * 10) + digit_to_bin(c);
+ }
+
+ if (pv)
+ *pv = v;
+
+ return (len - cnt);
+}
+
+static int is_keyword(char *ptr, int len, char *verb)
+{
+ int verb_len = strlen(verb);
+
+ if (len >= strlen(verb) && !memcmp(verb, ptr, verb_len))
+ return verb_len;
+ else
+ return 0;
+
+}
+
+#define SKIP_SPACES(min_spaces) \
+ if ((arg_len = skip_spaces(ptr, len)) < (min_spaces)) \
+ return -EINVAL; \
+ ptr += arg_len; len -= arg_len;
+
+#define GET_INT_ARG(v) \
+ if (!(arg_len = get_int_arg(ptr, len, &(v)))) \
+ return -EINVAL; \
+ ptr += arg_len; len -= arg_len;
+
+
+/*
+** Parse a control command
+*/
+
+static int ncr_user_command(ncb_p np, char *buffer, int length)
+{
+ char *ptr = buffer;
+ int len = length;
+ struct usrcmd *uc = &np->user;
+ int arg_len;
+ u_long target;
+
+ bzero(uc, sizeof(*uc));
+
+ if (len > 0 && ptr[len-1] == '\n')
+ --len;
+
+ if ((arg_len = is_keyword(ptr, len, "setsync")) != 0)
+ uc->cmd = UC_SETSYNC;
+ else if ((arg_len = is_keyword(ptr, len, "settags")) != 0)
+ uc->cmd = UC_SETTAGS;
+ else if ((arg_len = is_keyword(ptr, len, "setorder")) != 0)
+ uc->cmd = UC_SETORDER;
+ else if ((arg_len = is_keyword(ptr, len, "setverbose")) != 0)
+ uc->cmd = UC_SETVERBOSE;
+ else if ((arg_len = is_keyword(ptr, len, "setwide")) != 0)
+ uc->cmd = UC_SETWIDE;
+ else if ((arg_len = is_keyword(ptr, len, "setdebug")) != 0)
+ uc->cmd = UC_SETDEBUG;
+ else if ((arg_len = is_keyword(ptr, len, "setflag")) != 0)
+ uc->cmd = UC_SETFLAG;
+ else if ((arg_len = is_keyword(ptr, len, "resetdev")) != 0)
+ uc->cmd = UC_RESETDEV;
+ else if ((arg_len = is_keyword(ptr, len, "cleardev")) != 0)
+ uc->cmd = UC_CLEARDEV;
+ else
+ arg_len = 0;
+
+#ifdef DEBUG_PROC_INFO
+printk("ncr_user_command: arg_len=%d, cmd=%ld\n", arg_len, uc->cmd);
+#endif
+
+ if (!arg_len)
+ return -EINVAL;
+ ptr += arg_len; len -= arg_len;
+
+ switch(uc->cmd) {
+ case UC_SETSYNC:
+ case UC_SETTAGS:
+ case UC_SETWIDE:
+ case UC_SETFLAG:
+ case UC_RESETDEV:
+ case UC_CLEARDEV:
+ SKIP_SPACES(1);
+ if ((arg_len = is_keyword(ptr, len, "all")) != 0) {
+ ptr += arg_len; len -= arg_len;
+ uc->target = ~0;
+ } else {
+ GET_INT_ARG(target);
+ uc->target = (1<<target);
+#ifdef DEBUG_PROC_INFO
+printk("ncr_user_command: target=%ld\n", target);
+#endif
+ }
+ break;
+ }
+
+ switch(uc->cmd) {
+ case UC_SETVERBOSE:
+ case UC_SETSYNC:
+ case UC_SETTAGS:
+ case UC_SETWIDE:
+ SKIP_SPACES(1);
+ GET_INT_ARG(uc->data);
+#ifdef DEBUG_PROC_INFO
+printk("ncr_user_command: data=%ld\n", uc->data);
+#endif
+ break;
+ case UC_SETORDER:
+ SKIP_SPACES(1);
+ if ((arg_len = is_keyword(ptr, len, "simple")))
+ uc->data = M_SIMPLE_TAG;
+ else if ((arg_len = is_keyword(ptr, len, "ordered")))
+ uc->data = M_ORDERED_TAG;
+ else if ((arg_len = is_keyword(ptr, len, "default")))
+ uc->data = 0;
+ else
+ return -EINVAL;
+ break;
+ case UC_SETDEBUG:
+ while (len > 0) {
+ SKIP_SPACES(1);
+ if ((arg_len = is_keyword(ptr, len, "alloc")))
+ uc->data |= DEBUG_ALLOC;
+ else if ((arg_len = is_keyword(ptr, len, "phase")))
+ uc->data |= DEBUG_PHASE;
+ else if ((arg_len = is_keyword(ptr, len, "queue")))
+ uc->data |= DEBUG_QUEUE;
+ else if ((arg_len = is_keyword(ptr, len, "result")))
+ uc->data |= DEBUG_RESULT;
+ else if ((arg_len = is_keyword(ptr, len, "pointer")))
+ uc->data |= DEBUG_POINTER;
+ else if ((arg_len = is_keyword(ptr, len, "script")))
+ uc->data |= DEBUG_SCRIPT;
+ else if ((arg_len = is_keyword(ptr, len, "tiny")))
+ uc->data |= DEBUG_TINY;
+ else if ((arg_len = is_keyword(ptr, len, "timing")))
+ uc->data |= DEBUG_TIMING;
+ else if ((arg_len = is_keyword(ptr, len, "nego")))
+ uc->data |= DEBUG_NEGO;
+ else if ((arg_len = is_keyword(ptr, len, "tags")))
+ uc->data |= DEBUG_TAGS;
+ else
+ return -EINVAL;
+ ptr += arg_len; len -= arg_len;
+ }
+#ifdef DEBUG_PROC_INFO
+printk("ncr_user_command: data=%ld\n", uc->data);
+#endif
+ break;
+ case UC_SETFLAG:
+ while (len > 0) {
+ SKIP_SPACES(1);
+ if ((arg_len = is_keyword(ptr, len, "trace")))
+ uc->data |= UF_TRACE;
+ else if ((arg_len = is_keyword(ptr, len, "no_disc")))
+ uc->data |= UF_NODISC;
+ else
+ return -EINVAL;
+ ptr += arg_len; len -= arg_len;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (len)
+ return -EINVAL;
+ else {
+ long flags;
+
+ NCR_LOCK_NCB(np, flags);
+ ncr_usercmd (np);
+ NCR_UNLOCK_NCB(np, flags);
+ }
+ return length;
+}
+
+#endif /* SCSI_NCR_USER_COMMAND_SUPPORT */
+
+#ifdef SCSI_NCR_USER_INFO_SUPPORT
+
+struct info_str
+{
+ char *buffer;
+ int length;
+ int offset;
+ int pos;
+};
+
+static void copy_mem_info(struct info_str *info, char *data, int len)
+{
+ if (info->pos + len > info->length)
+ len = info->length - info->pos;
+
+ if (info->pos + len < info->offset) {
+ info->pos += len;
+ return;
+ }
+ if (info->pos < info->offset) {
+ data += (info->offset - info->pos);
+ len -= (info->offset - info->pos);
+ }
+
+ if (len > 0) {
+ memcpy(info->buffer + info->pos, data, len);
+ info->pos += len;
+ }
+}
+
+static int copy_info(struct info_str *info, char *fmt, ...)
+{
+ va_list args;
+ char buf[81];
+ int len;
+
+ va_start(args, fmt);
+ len = vsprintf(buf, fmt, args);
+ va_end(args);
+
+ copy_mem_info(info, buf, len);
+ return len;
+}
+
+/*
+** Copy formatted information into the input buffer.
+*/
+
+static int ncr_host_info(ncb_p np, char *ptr, off_t offset, int len)
+{
+ struct info_str info;
+#ifdef CONFIG_ALL_PPC
+ struct device_node* of_node;
+#endif
+
+ info.buffer = ptr;
+ info.length = len;
+ info.offset = offset;
+ info.pos = 0;
+
+ copy_info(&info, "General information:\n");
+ copy_info(&info, " Chip " NAME53C "%s, device id 0x%x, "
+ "revision id 0x%x\n",
+ np->chip_name, np->device_id, np->revision_id);
+ copy_info(&info, " On PCI bus %d, device %d, function %d, "
+#ifdef __sparc__
+ "IRQ %s\n",
+#else
+ "IRQ %d\n",
+#endif
+ np->bus, (np->device_fn & 0xf8) >> 3, np->device_fn & 7,
+#ifdef __sparc__
+ __irq_itoa(np->irq));
+#else
+ (int) np->irq);
+#endif
+#ifdef CONFIG_ALL_PPC
+ of_node = find_pci_device_OFnode(np->bus, np->device_fn);
+ if (of_node && of_node->full_name)
+ copy_info(&info, "PPC OpenFirmware path : %s\n", of_node->full_name);
+#endif
+ copy_info(&info, " Synchronous period factor %d, "
+ "max commands per lun %d\n",
+ (int) np->minsync, MAX_TAGS);
+
+ if (driver_setup.debug || driver_setup.verbose > 1) {
+ copy_info(&info, " Debug flags 0x%x, verbosity level %d\n",
+ driver_setup.debug, driver_setup.verbose);
+ }
+
+ return info.pos > info.offset? info.pos - info.offset : 0;
+}
+
+#endif /* SCSI_NCR_USER_INFO_SUPPORT */
+
+/*
+** Entry point of the scsi proc fs of the driver.
+** - func = 0 means read (returns adapter infos)
+** - func = 1 means write (parse user control command)
+*/
+
+static int sym53c8xx_proc_info(char *buffer, char **start, off_t offset,
+ int length, int hostno, int func)
+{
+ struct Scsi_Host *host;
+ struct host_data *host_data;
+ ncb_p ncb = 0;
+ int retv;
+
+#ifdef DEBUG_PROC_INFO
+printk("sym53c8xx_proc_info: hostno=%d, func=%d\n", hostno, func);
+#endif
+
+ for (host = first_host; host; host = host->next) {
+ if (host->hostt != first_host->hostt)
+ continue;
+ if (host->host_no == hostno) {
+ host_data = (struct host_data *) host->hostdata;
+ ncb = host_data->ncb;
+ break;
+ }
+ }
+
+ if (!ncb)
+ return -EINVAL;
+
+ if (func) {
+#ifdef SCSI_NCR_USER_COMMAND_SUPPORT
+ retv = ncr_user_command(ncb, buffer, length);
+#else
+ retv = -EINVAL;
+#endif
+ }
+ else {
+ if (start)
+ *start = buffer;
+#ifdef SCSI_NCR_USER_INFO_SUPPORT
+ retv = ncr_host_info(ncb, buffer, offset, length);
+#else
+ retv = -EINVAL;
+#endif
+ }
+
+ return retv;
+}
+
+
+/*=========================================================================
+** End of proc file system stuff
+**=========================================================================
+*/
+#endif
+
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+
+/*
+ * 24C16 EEPROM reading.
+ *
+ * GPOI0 - data in/data out
+ * GPIO1 - clock
+ * Symbios NVRAM wiring now also used by Tekram.
+ */
+
+#define SET_BIT 0
+#define CLR_BIT 1
+#define SET_CLK 2
+#define CLR_CLK 3
+
+/*
+ * Set/clear data/clock bit in GPIO0
+ */
+static void __init
+S24C16_set_bit(ncr_slot *np, u_char write_bit, u_char *gpreg, int bit_mode)
+{
+ UDELAY (5);
+ switch (bit_mode){
+ case SET_BIT:
+ *gpreg |= write_bit;
+ break;
+ case CLR_BIT:
+ *gpreg &= 0xfe;
+ break;
+ case SET_CLK:
+ *gpreg |= 0x02;
+ break;
+ case CLR_CLK:
+ *gpreg &= 0xfd;
+ break;
+
+ }
+ OUTB (nc_gpreg, *gpreg);
+ UDELAY (5);
+}
+
+/*
+ * Send START condition to NVRAM to wake it up.
+ */
+static void __init S24C16_start(ncr_slot *np, u_char *gpreg)
+{
+ S24C16_set_bit(np, 1, gpreg, SET_BIT);
+ S24C16_set_bit(np, 0, gpreg, SET_CLK);
+ S24C16_set_bit(np, 0, gpreg, CLR_BIT);
+ S24C16_set_bit(np, 0, gpreg, CLR_CLK);
+}
+
+/*
+ * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZzzzz!!
+ */
+static void __init S24C16_stop(ncr_slot *np, u_char *gpreg)
+{
+ S24C16_set_bit(np, 0, gpreg, SET_CLK);
+ S24C16_set_bit(np, 1, gpreg, SET_BIT);
+}
+
+/*
+ * Read or write a bit to the NVRAM,
+ * read if GPIO0 input else write if GPIO0 output
+ */
+static void __init
+S24C16_do_bit(ncr_slot *np, u_char *read_bit, u_char write_bit, u_char *gpreg)
+{
+ S24C16_set_bit(np, write_bit, gpreg, SET_BIT);
+ S24C16_set_bit(np, 0, gpreg, SET_CLK);
+ if (read_bit)
+ *read_bit = INB (nc_gpreg);
+ S24C16_set_bit(np, 0, gpreg, CLR_CLK);
+ S24C16_set_bit(np, 0, gpreg, CLR_BIT);
+}
+
+/*
+ * Output an ACK to the NVRAM after reading,
+ * change GPIO0 to output and when done back to an input
+ */
+static void __init
+S24C16_write_ack(ncr_slot *np, u_char write_bit, u_char *gpreg, u_char *gpcntl)
+{
+ OUTB (nc_gpcntl, *gpcntl & 0xfe);
+ S24C16_do_bit(np, 0, write_bit, gpreg);
+ OUTB (nc_gpcntl, *gpcntl);
+}
+
+/*
+ * Input an ACK from NVRAM after writing,
+ * change GPIO0 to input and when done back to an output
+ */
+static void __init
+S24C16_read_ack(ncr_slot *np, u_char *read_bit, u_char *gpreg, u_char *gpcntl)
+{
+ OUTB (nc_gpcntl, *gpcntl | 0x01);
+ S24C16_do_bit(np, read_bit, 1, gpreg);
+ OUTB (nc_gpcntl, *gpcntl);
+}
+
+/*
+ * WRITE a byte to the NVRAM and then get an ACK to see it was accepted OK,
+ * GPIO0 must already be set as an output
+ */
+static void __init
+S24C16_write_byte(ncr_slot *np, u_char *ack_data, u_char write_data,
+ u_char *gpreg, u_char *gpcntl)
+{
+ int x;
+
+ for (x = 0; x < 8; x++)
+ S24C16_do_bit(np, 0, (write_data >> (7 - x)) & 0x01, gpreg);
+
+ S24C16_read_ack(np, ack_data, gpreg, gpcntl);
+}
+
+/*
+ * READ a byte from the NVRAM and then send an ACK to say we have got it,
+ * GPIO0 must already be set as an input
+ */
+static void __init
+S24C16_read_byte(ncr_slot *np, u_char *read_data, u_char ack_data,
+ u_char *gpreg, u_char *gpcntl)
+{
+ int x;
+ u_char read_bit;
+
+ *read_data = 0;
+ for (x = 0; x < 8; x++) {
+ S24C16_do_bit(np, &read_bit, 1, gpreg);
+ *read_data |= ((read_bit & 0x01) << (7 - x));
+ }
+
+ S24C16_write_ack(np, ack_data, gpreg, gpcntl);
+}
+
+/*
+ * Read 'len' bytes starting at 'offset'.
+ */
+static int __init
+sym_read_S24C16_nvram (ncr_slot *np, int offset, u_char *data, int len)
+{
+ u_char gpcntl, gpreg;
+ u_char old_gpcntl, old_gpreg;
+ u_char ack_data;
+ int retv = 1;
+ int x;
+
+ /* save current state of GPCNTL and GPREG */
+ old_gpreg = INB (nc_gpreg);
+ old_gpcntl = INB (nc_gpcntl);
+ gpcntl = old_gpcntl & 0xfc;
+
+ /* set up GPREG & GPCNTL to set GPIO0 and GPIO1 in to known state */
+ OUTB (nc_gpreg, old_gpreg);
+ OUTB (nc_gpcntl, gpcntl);
+
+ /* this is to set NVRAM into a known state with GPIO0/1 both low */
+ gpreg = old_gpreg;
+ S24C16_set_bit(np, 0, &gpreg, CLR_CLK);
+ S24C16_set_bit(np, 0, &gpreg, CLR_BIT);
+
+ /* now set NVRAM inactive with GPIO0/1 both high */
+ S24C16_stop(np, &gpreg);
+
+ /* activate NVRAM */
+ S24C16_start(np, &gpreg);
+
+ /* write device code and random address MSB */
+ S24C16_write_byte(np, &ack_data,
+ 0xa0 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl);
+ if (ack_data & 0x01)
+ goto out;
+
+ /* write random address LSB */
+ S24C16_write_byte(np, &ack_data,
+ offset & 0xff, &gpreg, &gpcntl);
+ if (ack_data & 0x01)
+ goto out;
+
+ /* regenerate START state to set up for reading */
+ S24C16_start(np, &gpreg);
+
+ /* rewrite device code and address MSB with read bit set (lsb = 0x01) */
+ S24C16_write_byte(np, &ack_data,
+ 0xa1 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl);
+ if (ack_data & 0x01)
+ goto out;
+
+ /* now set up GPIO0 for inputting data */
+ gpcntl |= 0x01;
+ OUTB (nc_gpcntl, gpcntl);
+
+ /* input all requested data - only part of total NVRAM */
+ for (x = 0; x < len; x++)
+ S24C16_read_byte(np, &data[x], (x == (len-1)), &gpreg, &gpcntl);
+
+ /* finally put NVRAM back in inactive mode */
+ gpcntl &= 0xfe;
+ OUTB (nc_gpcntl, gpcntl);
+ S24C16_stop(np, &gpreg);
+ retv = 0;
+out:
+ /* return GPIO0/1 to original states after having accessed NVRAM */
+ OUTB (nc_gpcntl, old_gpcntl);
+ OUTB (nc_gpreg, old_gpreg);
+
+ return retv;
+}
+
+#undef SET_BIT
+#undef CLR_BIT
+#undef SET_CLK
+#undef CLR_CLK
+
+/*
+ * Try reading Symbios NVRAM.
+ * Return 0 if OK.
+ */
+static int __init sym_read_Symbios_nvram (ncr_slot *np, Symbios_nvram *nvram)
+{
+ static u_char Symbios_trailer[6] = {0xfe, 0xfe, 0, 0, 0, 0};
+ u_char *data = (u_char *) nvram;
+ int len = sizeof(*nvram);
+ u_short csum;
+ int x;
+
+ /* probe the 24c16 and read the SYMBIOS 24c16 area */
+ if (sym_read_S24C16_nvram (np, SYMBIOS_NVRAM_ADDRESS, data, len))
+ return 1;
+
+ /* check valid NVRAM signature, verify byte count and checksum */
+ if (nvram->type != 0 ||
+ memcmp(nvram->trailer, Symbios_trailer, 6) ||
+ nvram->byte_count != len - 12)
+ return 1;
+
+ /* verify checksum */
+ for (x = 6, csum = 0; x < len - 6; x++)
+ csum += data[x];
+ if (csum != nvram->checksum)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * 93C46 EEPROM reading.
+ *
+ * GPOI0 - data in
+ * GPIO1 - data out
+ * GPIO2 - clock
+ * GPIO4 - chip select
+ *
+ * Used by Tekram.
+ */
+
+/*
+ * Pulse clock bit in GPIO0
+ */
+static void __init T93C46_Clk(ncr_slot *np, u_char *gpreg)
+{
+ OUTB (nc_gpreg, *gpreg | 0x04);
+ UDELAY (2);
+ OUTB (nc_gpreg, *gpreg);
+}
+
+/*
+ * Read bit from NVRAM
+ */
+static void __init T93C46_Read_Bit(ncr_slot *np, u_char *read_bit, u_char *gpreg)
+{
+ UDELAY (2);
+ T93C46_Clk(np, gpreg);
+ *read_bit = INB (nc_gpreg);
+}
+
+/*
+ * Write bit to GPIO0
+ */
+static void __init T93C46_Write_Bit(ncr_slot *np, u_char write_bit, u_char *gpreg)
+{
+ if (write_bit & 0x01)
+ *gpreg |= 0x02;
+ else
+ *gpreg &= 0xfd;
+
+ *gpreg |= 0x10;
+
+ OUTB (nc_gpreg, *gpreg);
+ UDELAY (2);
+
+ T93C46_Clk(np, gpreg);
+}
+
+/*
+ * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZZzzz!!
+ */
+static void __init T93C46_Stop(ncr_slot *np, u_char *gpreg)
+{
+ *gpreg &= 0xef;
+ OUTB (nc_gpreg, *gpreg);
+ UDELAY (2);
+
+ T93C46_Clk(np, gpreg);
+}
+
+/*
+ * Send read command and address to NVRAM
+ */
+static void __init
+T93C46_Send_Command(ncr_slot *np, u_short write_data,
+ u_char *read_bit, u_char *gpreg)
+{
+ int x;
+
+ /* send 9 bits, start bit (1), command (2), address (6) */
+ for (x = 0; x < 9; x++)
+ T93C46_Write_Bit(np, (u_char) (write_data >> (8 - x)), gpreg);
+
+ *read_bit = INB (nc_gpreg);
+}
+
+/*
+ * READ 2 bytes from the NVRAM
+ */
+static void __init
+T93C46_Read_Word(ncr_slot *np, u_short *nvram_data, u_char *gpreg)
+{
+ int x;
+ u_char read_bit;
+
+ *nvram_data = 0;
+ for (x = 0; x < 16; x++) {
+ T93C46_Read_Bit(np, &read_bit, gpreg);
+
+ if (read_bit & 0x01)
+ *nvram_data |= (0x01 << (15 - x));
+ else
+ *nvram_data &= ~(0x01 << (15 - x));
+ }
+}
+
+/*
+ * Read Tekram NvRAM data.
+ */
+static int __init
+T93C46_Read_Data(ncr_slot *np, u_short *data,int len,u_char *gpreg)
+{
+ u_char read_bit;
+ int x;
+
+ for (x = 0; x < len; x++) {
+
+ /* output read command and address */
+ T93C46_Send_Command(np, 0x180 | x, &read_bit, gpreg);
+ if (read_bit & 0x01)
+ return 1; /* Bad */
+ T93C46_Read_Word(np, &data[x], gpreg);
+ T93C46_Stop(np, gpreg);
+ }
+
+ return 0;
+}
+
+/*
+ * Try reading 93C46 Tekram NVRAM.
+ */
+static int __init
+sym_read_T93C46_nvram (ncr_slot *np, Tekram_nvram *nvram)
+{
+ u_char gpcntl, gpreg;
+ u_char old_gpcntl, old_gpreg;
+ int retv = 1;
+
+ /* save current state of GPCNTL and GPREG */
+ old_gpreg = INB (nc_gpreg);
+ old_gpcntl = INB (nc_gpcntl);
+
+ /* set up GPREG & GPCNTL to set GPIO0/1/2/4 in to known state, 0 in,
+ 1/2/4 out */
+ gpreg = old_gpreg & 0xe9;
+ OUTB (nc_gpreg, gpreg);
+ gpcntl = (old_gpcntl & 0xe9) | 0x09;
+ OUTB (nc_gpcntl, gpcntl);
+
+ /* input all of NVRAM, 64 words */
+ retv = T93C46_Read_Data(np, (u_short *) nvram,
+ sizeof(*nvram) / sizeof(short), &gpreg);
+
+ /* return GPIO0/1/2/4 to original states after having accessed NVRAM */
+ OUTB (nc_gpcntl, old_gpcntl);
+ OUTB (nc_gpreg, old_gpreg);
+
+ return retv;
+}
+
+/*
+ * Try reading Tekram NVRAM.
+ * Return 0 if OK.
+ */
+static int __init
+sym_read_Tekram_nvram (ncr_slot *np, u_short device_id, Tekram_nvram *nvram)
+{
+ u_char *data = (u_char *) nvram;
+ int len = sizeof(*nvram);
+ u_short csum;
+ int x;
+
+ switch (device_id) {
+ case PCI_DEVICE_ID_NCR_53C885:
+ case PCI_DEVICE_ID_NCR_53C895:
+ case PCI_DEVICE_ID_NCR_53C896:
+ x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS,
+ data, len);
+ break;
+ case PCI_DEVICE_ID_NCR_53C875:
+ x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS,
+ data, len);
+ if (!x)
+ break;
+ default:
+ x = sym_read_T93C46_nvram(np, nvram);
+ break;
+ }
+ if (x)
+ return 1;
+
+ /* verify checksum */
+ for (x = 0, csum = 0; x < len - 1; x += 2)
+ csum += data[x] + (data[x+1] << 8);
+ if (csum != 0x1234)
+ return 1;
+
+ return 0;
+}
+
+#endif /* SCSI_NCR_NVRAM_SUPPORT */
+
+/*
+** Module stuff
+*/
+
+#ifdef MODULE
+Scsi_Host_Template driver_template = SYM53C8XX;
+#include "scsi_module.c"
+#endif
diff --git a/linux/src/drivers/scsi/sym53c8xx.h b/linux/src/drivers/scsi/sym53c8xx.h
new file mode 100644
index 0000000..128fe16
--- /dev/null
+++ b/linux/src/drivers/scsi/sym53c8xx.h
@@ -0,0 +1,116 @@
+/******************************************************************************
+** High Performance device driver for the Symbios 53C896 controller.
+**
+** Copyright (C) 1998-2000 Gerard Roudier <groudier@club-internet.fr>
+**
+** This driver also supports all the Symbios 53C8XX controller family,
+** except 53C810 revisions < 16, 53C825 revisions < 16 and all
+** revisions of 53C815 controllers.
+**
+** This driver is based on the Linux port of the FreeBSD ncr driver.
+**
+** Copyright (C) 1994 Wolfgang Stanglmeier
+**
+**-----------------------------------------------------------------------------
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+** You should have received a copy of the GNU General Public License
+** along with this program; if not, write to the Free Software
+** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+**
+**-----------------------------------------------------------------------------
+**
+** The Linux port of the FreeBSD ncr driver has been achieved in
+** november 1995 by:
+**
+** Gerard Roudier <groudier@club-internet.fr>
+**
+** Being given that this driver originates from the FreeBSD version, and
+** in order to keep synergy on both, any suggested enhancements and corrections
+** received on Linux are automatically a potential candidate for the FreeBSD
+** version.
+**
+** The original driver has been written for 386bsd and FreeBSD by
+** Wolfgang Stanglmeier <wolf@cologne.de>
+** Stefan Esser <se@mi.Uni-Koeln.de>
+**
+**-----------------------------------------------------------------------------
+**
+** Major contributions:
+** --------------------
+**
+** NVRAM detection and reading.
+** Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+**
+*******************************************************************************
+*/
+
+#ifndef SYM53C8XX_H
+#define SYM53C8XX_H
+
+#include "sym53c8xx_defs.h"
+
+/*
+** Define Scsi_Host_Template parameters
+**
+** Used by hosts.c and sym53c8xx.c with module configuration.
+*/
+
+#if defined(HOSTS_C) || defined(MODULE)
+
+#include <scsi/scsicam.h>
+
+int sym53c8xx_abort(Scsi_Cmnd *);
+int sym53c8xx_detect(Scsi_Host_Template *tpnt);
+const char *sym53c8xx_info(struct Scsi_Host *host);
+int sym53c8xx_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int sym53c8xx_reset(Scsi_Cmnd *, unsigned int);
+
+#ifdef MODULE
+int sym53c8xx_release(struct Scsi_Host *);
+#else
+#define sym53c8xx_release NULL
+#endif
+
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,75)
+
+#define SYM53C8XX { name: "", \
+ detect: sym53c8xx_detect, \
+ release: sym53c8xx_release, \
+ info: sym53c8xx_info, \
+ queuecommand: sym53c8xx_queue_command,\
+ abort: sym53c8xx_abort, \
+ reset: sym53c8xx_reset, \
+ bios_param: scsicam_bios_param, \
+ can_queue: SCSI_NCR_CAN_QUEUE, \
+ this_id: 7, \
+ sg_tablesize: SCSI_NCR_SG_TABLESIZE, \
+ cmd_per_lun: SCSI_NCR_CMD_PER_LUN, \
+ use_clustering: DISABLE_CLUSTERING}
+
+#else
+
+#define SYM53C8XX { NULL, NULL, NULL, NULL, \
+ NULL, sym53c8xx_detect, \
+ sym53c8xx_release, sym53c8xx_info, NULL, \
+ sym53c8xx_queue_command,sym53c8xx_abort, \
+ sym53c8xx_reset, NULL, scsicam_bios_param, \
+ SCSI_NCR_CAN_QUEUE, 7, \
+ SCSI_NCR_SG_TABLESIZE, SCSI_NCR_CMD_PER_LUN, \
+ 0, 0, DISABLE_CLUSTERING}
+
+#endif /* LINUX_VERSION_CODE */
+
+#endif /* defined(HOSTS_C) || defined(MODULE) */
+
+#endif /* SYM53C8XX_H */
diff --git a/linux/src/drivers/scsi/sym53c8xx_comm.h b/linux/src/drivers/scsi/sym53c8xx_comm.h
new file mode 100644
index 0000000..ba961db
--- /dev/null
+++ b/linux/src/drivers/scsi/sym53c8xx_comm.h
@@ -0,0 +1,2717 @@
+/******************************************************************************
+** High Performance device driver for the Symbios 53C896 controller.
+**
+** Copyright (C) 1998-2000 Gerard Roudier <groudier@club-internet.fr>
+**
+** This driver also supports all the Symbios 53C8XX controller family,
+** except 53C810 revisions < 16, 53C825 revisions < 16 and all
+** revisions of 53C815 controllers.
+**
+** This driver is based on the Linux port of the FreeBSD ncr driver.
+**
+** Copyright (C) 1994 Wolfgang Stanglmeier
+**
+**-----------------------------------------------------------------------------
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+** You should have received a copy of the GNU General Public License
+** along with this program; if not, write to the Free Software
+** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+**
+**-----------------------------------------------------------------------------
+**
+** The Linux port of the FreeBSD ncr driver has been achieved in
+** november 1995 by:
+**
+** Gerard Roudier <groudier@club-internet.fr>
+**
+** Being given that this driver originates from the FreeBSD version, and
+** in order to keep synergy on both, any suggested enhancements and corrections
+** received on Linux are automatically a potential candidate for the FreeBSD
+** version.
+**
+** The original driver has been written for 386bsd and FreeBSD by
+** Wolfgang Stanglmeier <wolf@cologne.de>
+** Stefan Esser <se@mi.Uni-Koeln.de>
+**
+**-----------------------------------------------------------------------------
+**
+** Major contributions:
+** --------------------
+**
+** NVRAM detection and reading.
+** Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+**
+*******************************************************************************
+*/
+
+/*
+** This file contains definitions and code that the
+** sym53c8xx and ncr53c8xx drivers should share.
+** The sharing will be achieved in a further version
+** of the driver bundle. For now, only the ncr53c8xx
+** driver includes this file.
+*/
+
+#define MIN(a,b) (((a) < (b)) ? (a) : (b))
+#define MAX(a,b) (((a) > (b)) ? (a) : (b))
+
+/*==========================================================
+**
+** Hmmm... What complex some PCI-HOST bridges actually
+** are, despite the fact that the PCI specifications
+** are looking so smart and simple! ;-)
+**
+**==========================================================
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,3,47)
+#define SCSI_NCR_DYNAMIC_DMA_MAPPING
+#endif
+
+/*==========================================================
+**
+** Miscallaneous defines.
+**
+**==========================================================
+*/
+
+#define u_char unsigned char
+#define u_short unsigned short
+#define u_int unsigned int
+#define u_long unsigned long
+
+#ifndef bcopy
+#define bcopy(s, d, n) memcpy((d), (s), (n))
+#endif
+
+#ifndef bcmp
+#define bcmp(s, d, n) memcmp((d), (s), (n))
+#endif
+
+#ifndef bzero
+#define bzero(d, n) memset((d), 0, (n))
+#endif
+
+#ifndef offsetof
+#define offsetof(t, m) ((size_t) (&((t *)0)->m))
+#endif
+
+/*==========================================================
+**
+** assert ()
+**
+**==========================================================
+**
+** modified copy from 386bsd:/usr/include/sys/assert.h
+**
+**----------------------------------------------------------
+*/
+
+#define assert(expression) { \
+ if (!(expression)) { \
+ (void)panic( \
+ "assertion \"%s\" failed: file \"%s\", line %d\n", \
+ #expression, \
+ __FILE__, __LINE__); \
+ } \
+}
+
+/*==========================================================
+**
+** Debugging tags
+**
+**==========================================================
+*/
+
+#define DEBUG_ALLOC (0x0001)
+#define DEBUG_PHASE (0x0002)
+#define DEBUG_QUEUE (0x0008)
+#define DEBUG_RESULT (0x0010)
+#define DEBUG_POINTER (0x0020)
+#define DEBUG_SCRIPT (0x0040)
+#define DEBUG_TINY (0x0080)
+#define DEBUG_TIMING (0x0100)
+#define DEBUG_NEGO (0x0200)
+#define DEBUG_TAGS (0x0400)
+#define DEBUG_SCATTER (0x0800)
+#define DEBUG_IC (0x1000)
+
+/*
+** Enable/Disable debug messages.
+** Can be changed at runtime too.
+*/
+
+#ifdef SCSI_NCR_DEBUG_INFO_SUPPORT
+static int ncr_debug = SCSI_NCR_DEBUG_FLAGS;
+ #define DEBUG_FLAGS ncr_debug
+#else
+ #define DEBUG_FLAGS SCSI_NCR_DEBUG_FLAGS
+#endif
+
+/*==========================================================
+**
+** A la VMS/CAM-3 queue management.
+** Implemented from linux list management.
+**
+**==========================================================
+*/
+
+typedef struct xpt_quehead {
+ struct xpt_quehead *flink; /* Forward pointer */
+ struct xpt_quehead *blink; /* Backward pointer */
+} XPT_QUEHEAD;
+
+#define xpt_que_init(ptr) do { \
+ (ptr)->flink = (ptr); (ptr)->blink = (ptr); \
+} while (0)
+
+static inline void __xpt_que_add(struct xpt_quehead * new,
+ struct xpt_quehead * blink,
+ struct xpt_quehead * flink)
+{
+ flink->blink = new;
+ new->flink = flink;
+ new->blink = blink;
+ blink->flink = new;
+}
+
+static inline void __xpt_que_del(struct xpt_quehead * blink,
+ struct xpt_quehead * flink)
+{
+ flink->blink = blink;
+ blink->flink = flink;
+}
+
+static inline int xpt_que_empty(struct xpt_quehead *head)
+{
+ return head->flink == head;
+}
+
+static inline void xpt_que_splice(struct xpt_quehead *list,
+ struct xpt_quehead *head)
+{
+ struct xpt_quehead *first = list->flink;
+
+ if (first != list) {
+ struct xpt_quehead *last = list->blink;
+ struct xpt_quehead *at = head->flink;
+
+ first->blink = head;
+ head->flink = first;
+
+ last->flink = at;
+ at->blink = last;
+ }
+}
+
+#define xpt_que_entry(ptr, type, member) \
+ ((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
+
+
+#define xpt_insque(new, pos) __xpt_que_add(new, pos, (pos)->flink)
+
+#define xpt_remque(el) __xpt_que_del((el)->blink, (el)->flink)
+
+#define xpt_insque_head(new, head) __xpt_que_add(new, head, (head)->flink)
+
+static inline struct xpt_quehead *xpt_remque_head(struct xpt_quehead *head)
+{
+ struct xpt_quehead *elem = head->flink;
+
+ if (elem != head)
+ __xpt_que_del(head, elem->flink);
+ else
+ elem = 0;
+ return elem;
+}
+
+#define xpt_insque_tail(new, head) __xpt_que_add(new, (head)->blink, head)
+
+static inline struct xpt_quehead *xpt_remque_tail(struct xpt_quehead *head)
+{
+ struct xpt_quehead *elem = head->blink;
+
+ if (elem != head)
+ __xpt_que_del(elem->blink, head);
+ else
+ elem = 0;
+ return elem;
+}
+
+/*==========================================================
+**
+** Simple Wrapper to kernel PCI bus interface.
+**
+** This wrapper allows to get rid of old kernel PCI
+** interface and still allows to preserve linux-2.0
+** compatibilty. In fact, it is mostly an incomplete
+** emulation of the new PCI code for pre-2.2 kernels.
+** When kernel-2.0 support will be dropped, we will
+** just have to remove most of this code.
+**
+**==========================================================
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,2,0)
+
+typedef struct pci_dev *pcidev_t;
+#define PCIDEV_NULL (0)
+#define PciBusNumber(d) (d)->bus->number
+#define PciDeviceFn(d) (d)->devfn
+#define PciVendorId(d) (d)->vendor
+#define PciDeviceId(d) (d)->device
+#define PciIrqLine(d) (d)->irq
+
+#if LINUX_VERSION_CODE > LinuxVersionCode(2,3,12)
+
+static int __init
+pci_get_base_address(struct pci_dev *pdev, int index, u_long *base)
+{
+ *base = pdev->resource[index].start;
+ if ((pdev->resource[index].flags & 0x7) == 0x4)
+ ++index;
+ return ++index;
+}
+#else
+static int __init
+pci_get_base_address(struct pci_dev *pdev, int index, u_long *base)
+{
+ *base = pdev->base_address[index++];
+ if ((*base & 0x7) == 0x4) {
+#if BITS_PER_LONG > 32
+ *base |= (((u_long)pdev->base_address[index]) << 32);
+#endif
+ ++index;
+ }
+ return index;
+}
+#endif
+
+#else /* Incomplete emulation of current PCI code for pre-2.2 kernels */
+
+typedef unsigned int pcidev_t;
+#define PCIDEV_NULL (~0u)
+#define PciBusNumber(d) ((d)>>8)
+#define PciDeviceFn(d) ((d)&0xff)
+#define __PciDev(busn, devfn) (((busn)<<8)+(devfn))
+
+#define pci_present pcibios_present
+
+#define pci_read_config_byte(d, w, v) \
+ pcibios_read_config_byte(PciBusNumber(d), PciDeviceFn(d), w, v)
+#define pci_read_config_word(d, w, v) \
+ pcibios_read_config_word(PciBusNumber(d), PciDeviceFn(d), w, v)
+#define pci_read_config_dword(d, w, v) \
+ pcibios_read_config_dword(PciBusNumber(d), PciDeviceFn(d), w, v)
+
+#define pci_write_config_byte(d, w, v) \
+ pcibios_write_config_byte(PciBusNumber(d), PciDeviceFn(d), w, v)
+#define pci_write_config_word(d, w, v) \
+ pcibios_write_config_word(PciBusNumber(d), PciDeviceFn(d), w, v)
+#define pci_write_config_dword(d, w, v) \
+ pcibios_write_config_dword(PciBusNumber(d), PciDeviceFn(d), w, v)
+
+static pcidev_t __init
+pci_find_device(unsigned int vendor, unsigned int device, pcidev_t prev)
+{
+ static unsigned short pci_index;
+ int retv;
+ unsigned char bus_number, device_fn;
+
+ if (prev == PCIDEV_NULL)
+ pci_index = 0;
+ else
+ ++pci_index;
+ retv = pcibios_find_device (vendor, device, pci_index,
+ &bus_number, &device_fn);
+ return retv ? PCIDEV_NULL : __PciDev(bus_number, device_fn);
+}
+
+static u_short __init PciVendorId(pcidev_t dev)
+{
+ u_short vendor_id;
+ pci_read_config_word(dev, PCI_VENDOR_ID, &vendor_id);
+ return vendor_id;
+}
+
+static u_short __init PciDeviceId(pcidev_t dev)
+{
+ u_short device_id;
+ pci_read_config_word(dev, PCI_DEVICE_ID, &device_id);
+ return device_id;
+}
+
+static u_int __init PciIrqLine(pcidev_t dev)
+{
+ u_char irq;
+ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
+ return irq;
+}
+
+static int __init
+pci_get_base_address(pcidev_t dev, int offset, u_long *base)
+{
+ u_int32 tmp;
+
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + offset, &tmp);
+ *base = tmp;
+ offset += sizeof(u_int32);
+ if ((tmp & 0x7) == 0x4) {
+#if BITS_PER_LONG > 32
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + offset, &tmp);
+ *base |= (((u_long)tmp) << 32);
+#endif
+ offset += sizeof(u_int32);
+ }
+ return offset;
+}
+
+#endif /* LINUX_VERSION_CODE >= LinuxVersionCode(2,2,0) */
+
+/*==========================================================
+**
+** SMP threading.
+**
+** Assuming that SMP systems are generally high end
+** systems and may use several SCSI adapters, we are
+** using one lock per controller instead of some global
+** one. For the moment (linux-2.1.95), driver's entry
+** points are called with the 'io_request_lock' lock
+** held, so:
+** - We are uselessly loosing a couple of micro-seconds
+** to lock the controller data structure.
+** - But the driver is not broken by design for SMP and
+** so can be more resistant to bugs or bad changes in
+** the IO sub-system code.
+** - A small advantage could be that the interrupt code
+** is grained as wished (e.g.: by controller).
+**
+**==========================================================
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,93)
+spinlock_t DRIVER_SMP_LOCK = SPIN_LOCK_UNLOCKED;
+#define NCR_LOCK_DRIVER(flags) spin_lock_irqsave(&DRIVER_SMP_LOCK, flags)
+#define NCR_UNLOCK_DRIVER(flags) \
+ spin_unlock_irqrestore(&DRIVER_SMP_LOCK, flags)
+
+#define NCR_INIT_LOCK_NCB(np) spin_lock_init(&np->smp_lock)
+#define NCR_LOCK_NCB(np, flags) spin_lock_irqsave(&np->smp_lock, flags)
+#define NCR_UNLOCK_NCB(np, flags) spin_unlock_irqrestore(&np->smp_lock, flags)
+
+#define NCR_LOCK_SCSI_DONE(np, flags) \
+ spin_lock_irqsave(&io_request_lock, flags)
+#define NCR_UNLOCK_SCSI_DONE(np, flags) \
+ spin_unlock_irqrestore(&io_request_lock, flags)
+
+#else
+
+#define NCR_LOCK_DRIVER(flags) do { save_flags(flags); cli(); } while (0)
+#define NCR_UNLOCK_DRIVER(flags) do { restore_flags(flags); } while (0)
+
+#define NCR_INIT_LOCK_NCB(np) do { } while (0)
+#define NCR_LOCK_NCB(np, flags) do { save_flags(flags); cli(); } while (0)
+#define NCR_UNLOCK_NCB(np, flags) do { restore_flags(flags); } while (0)
+
+#define NCR_LOCK_SCSI_DONE(np, flags) do {;} while (0)
+#define NCR_UNLOCK_SCSI_DONE(np, flags) do {;} while (0)
+
+#endif
+
+/*==========================================================
+**
+** Memory mapped IO
+**
+** Since linux-2.1, we must use ioremap() to map the io
+** memory space and iounmap() to unmap it. This allows
+** portability. Linux 1.3.X and 2.0.X allow to remap
+** physical pages addresses greater than the highest
+** physical memory address to kernel virtual pages with
+** vremap() / vfree(). That was not portable but worked
+** with i386 architecture.
+**
+**==========================================================
+*/
+
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,1,0)
+#define ioremap vremap
+#define iounmap vfree
+#endif
+
+#ifdef __sparc__
+# include <asm/irq.h>
+# define pcivtobus(p) bus_dvma_to_mem(p)
+# define memcpy_to_pci(a, b, c) memcpy_toio((a), (b), (c))
+#elif defined(__alpha__)
+# define pcivtobus(p) ((p) & 0xfffffffful)
+# define memcpy_to_pci(a, b, c) memcpy_toio((a), (b), (c))
+#else /* others */
+# define pcivtobus(p) (p)
+# define memcpy_to_pci(a, b, c) memcpy_toio((a), (b), (c))
+#endif
+
+#ifndef SCSI_NCR_PCI_MEM_NOT_SUPPORTED
+static u_long __init remap_pci_mem(u_long base, u_long size)
+{
+ u_long page_base = ((u_long) base) & PAGE_MASK;
+ u_long page_offs = ((u_long) base) - page_base;
+ u_long page_remapped = (u_long) ioremap(page_base, page_offs+size);
+
+ return page_remapped? (page_remapped + page_offs) : 0UL;
+}
+
+static void __init unmap_pci_mem(u_long vaddr, u_long size)
+{
+ if (vaddr)
+ iounmap((void *) (vaddr & PAGE_MASK));
+}
+
+#endif /* not def SCSI_NCR_PCI_MEM_NOT_SUPPORTED */
+
+/*==========================================================
+**
+** Insert a delay in micro-seconds and milli-seconds.
+**
+** Under Linux, udelay() is restricted to delay <
+** 1 milli-second. In fact, it generally works for up
+** to 1 second delay. Since 2.1.105, the mdelay() function
+** is provided for delays in milli-seconds.
+** Under 2.0 kernels, udelay() is an inline function
+** that is very inaccurate on Pentium processors.
+**
+**==========================================================
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,105)
+#define UDELAY udelay
+#define MDELAY mdelay
+#else
+static void UDELAY(long us) { udelay(us); }
+static void MDELAY(long ms) { while (ms--) UDELAY(1000); }
+#endif
+
+/*==========================================================
+**
+** Simple power of two buddy-like allocator.
+**
+** This simple code is not intended to be fast, but to
+** provide power of 2 aligned memory allocations.
+** Since the SCRIPTS processor only supplies 8 bit
+** arithmetic, this allocator allows simple and fast
+** address calculations from the SCRIPTS code.
+** In addition, cache line alignment is guaranteed for
+** power of 2 cache line size.
+** Enhanced in linux-2.3.44 to provide a memory pool
+** per pcidev to support dynamic dma mapping. (I would
+** have preferred a real bus astraction, btw).
+**
+**==========================================================
+*/
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,0)
+#define __GetFreePages(flags, order) __get_free_pages(flags, order)
+#else
+#define __GetFreePages(flags, order) __get_free_pages(flags, order, 0)
+#endif
+
+#define MEMO_SHIFT 4 /* 16 bytes minimum memory chunk */
+#if PAGE_SIZE >= 8192
+#define MEMO_PAGE_ORDER 0 /* 1 PAGE maximum */
+#else
+#define MEMO_PAGE_ORDER 1 /* 2 PAGES maximum */
+#endif
+#define MEMO_FREE_UNUSED /* Free unused pages immediately */
+#define MEMO_WARN 1
+#define MEMO_GFP_FLAGS GFP_ATOMIC
+#define MEMO_CLUSTER_SHIFT (PAGE_SHIFT+MEMO_PAGE_ORDER)
+#define MEMO_CLUSTER_SIZE (1UL << MEMO_CLUSTER_SHIFT)
+#define MEMO_CLUSTER_MASK (MEMO_CLUSTER_SIZE-1)
+
+typedef u_long m_addr_t; /* Enough bits to bit-hack addresses */
+typedef pcidev_t m_bush_t; /* Something that addresses DMAable */
+
+typedef struct m_link { /* Link between free memory chunks */
+ struct m_link *next;
+} m_link_s;
+
+#ifdef SCSI_NCR_DYNAMIC_DMA_MAPPING
+typedef struct m_vtob { /* Virtual to Bus address translation */
+ struct m_vtob *next;
+ m_addr_t vaddr;
+ m_addr_t baddr;
+} m_vtob_s;
+#define VTOB_HASH_SHIFT 5
+#define VTOB_HASH_SIZE (1UL << VTOB_HASH_SHIFT)
+#define VTOB_HASH_MASK (VTOB_HASH_SIZE-1)
+#define VTOB_HASH_CODE(m) \
+ ((((m_addr_t) (m)) >> MEMO_CLUSTER_SHIFT) & VTOB_HASH_MASK)
+#endif
+
+typedef struct m_pool { /* Memory pool of a given kind */
+#ifdef SCSI_NCR_DYNAMIC_DMA_MAPPING
+ m_bush_t bush;
+ m_addr_t (*getp)(struct m_pool *);
+ void (*freep)(struct m_pool *, m_addr_t);
+#define M_GETP() mp->getp(mp)
+#define M_FREEP(p) mp->freep(mp, p)
+#define GetPages() __GetFreePages(MEMO_GFP_FLAGS, MEMO_PAGE_ORDER)
+#define FreePages(p) free_pages(p, MEMO_PAGE_ORDER)
+ int nump;
+ m_vtob_s *(vtob[VTOB_HASH_SIZE]);
+ struct m_pool *next;
+#else
+#define M_GETP() __GetFreePages(MEMO_GFP_FLAGS, MEMO_PAGE_ORDER)
+#define M_FREEP(p) free_pages(p, MEMO_PAGE_ORDER)
+#endif /* SCSI_NCR_DYNAMIC_DMA_MAPPING */
+ struct m_link h[PAGE_SHIFT-MEMO_SHIFT+MEMO_PAGE_ORDER+1];
+} m_pool_s;
+
+static void *___m_alloc(m_pool_s *mp, int size)
+{
+ int i = 0;
+ int s = (1 << MEMO_SHIFT);
+ int j;
+ m_addr_t a;
+ m_link_s *h = mp->h;
+
+ if (size > (PAGE_SIZE << MEMO_PAGE_ORDER))
+ return 0;
+
+ while (size > s) {
+ s <<= 1;
+ ++i;
+ }
+
+ j = i;
+ while (!h[j].next) {
+ if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) {
+ h[j].next = (m_link_s *) M_GETP();
+ if (h[j].next)
+ h[j].next->next = 0;
+ break;
+ }
+ ++j;
+ s <<= 1;
+ }
+ a = (m_addr_t) h[j].next;
+ if (a) {
+ h[j].next = h[j].next->next;
+ while (j > i) {
+ j -= 1;
+ s >>= 1;
+ h[j].next = (m_link_s *) (a+s);
+ h[j].next->next = 0;
+ }
+ }
+#ifdef DEBUG
+ printk("___m_alloc(%d) = %p\n", size, (void *) a);
+#endif
+ return (void *) a;
+}
+
+static void ___m_free(m_pool_s *mp, void *ptr, int size)
+{
+ int i = 0;
+ int s = (1 << MEMO_SHIFT);
+ m_link_s *q;
+ m_addr_t a, b;
+ m_link_s *h = mp->h;
+
+#ifdef DEBUG
+ printk("___m_free(%p, %d)\n", ptr, size);
+#endif
+
+ if (size > (PAGE_SIZE << MEMO_PAGE_ORDER))
+ return;
+
+ while (size > s) {
+ s <<= 1;
+ ++i;
+ }
+
+ a = (m_addr_t) ptr;
+
+ while (1) {
+#ifdef MEMO_FREE_UNUSED
+ if (s == (PAGE_SIZE << MEMO_PAGE_ORDER)) {
+ M_FREEP(a);
+ break;
+ }
+#endif
+ b = a ^ s;
+ q = &h[i];
+ while (q->next && q->next != (m_link_s *) b) {
+ q = q->next;
+ }
+ if (!q->next) {
+ ((m_link_s *) a)->next = h[i].next;
+ h[i].next = (m_link_s *) a;
+ break;
+ }
+ q->next = q->next->next;
+ a = a & b;
+ s <<= 1;
+ ++i;
+ }
+}
+
+static void *__m_calloc2(m_pool_s *mp, int size, char *name, int uflags)
+{
+ void *p;
+
+ p = ___m_alloc(mp, size);
+
+ if (DEBUG_FLAGS & DEBUG_ALLOC)
+ printk ("new %-10s[%4d] @%p.\n", name, size, p);
+
+ if (p)
+ bzero(p, size);
+ else if (uflags & MEMO_WARN)
+ printk (NAME53C8XX ": failed to allocate %s[%d]\n", name, size);
+
+ return p;
+}
+
+#define __m_calloc(mp, s, n) __m_calloc2(mp, s, n, MEMO_WARN)
+
+static void __m_free(m_pool_s *mp, void *ptr, int size, char *name)
+{
+ if (DEBUG_FLAGS & DEBUG_ALLOC)
+ printk ("freeing %-10s[%4d] @%p.\n", name, size, ptr);
+
+ ___m_free(mp, ptr, size);
+
+}
+
+/*
+ * With pci bus iommu support, we use a default pool of unmapped memory
+ * for memory we donnot need to DMA from/to and one pool per pcidev for
+ * memory accessed by the PCI chip. `mp0' is the default not DMAable pool.
+ */
+
+#ifndef SCSI_NCR_DYNAMIC_DMA_MAPPING
+
+static m_pool_s mp0;
+
+#else
+
+static m_addr_t ___mp0_getp(m_pool_s *mp)
+{
+ m_addr_t m = GetPages();
+ if (m)
+ ++mp->nump;
+ return m;
+}
+
+static void ___mp0_freep(m_pool_s *mp, m_addr_t m)
+{
+ FreePages(m);
+ --mp->nump;
+}
+
+static m_pool_s mp0 = {0, ___mp0_getp, ___mp0_freep};
+
+#endif /* SCSI_NCR_DYNAMIC_DMA_MAPPING */
+
+static void *m_calloc(int size, char *name)
+{
+ u_long flags;
+ void *m;
+ NCR_LOCK_DRIVER(flags);
+ m = __m_calloc(&mp0, size, name);
+ NCR_UNLOCK_DRIVER(flags);
+ return m;
+}
+
+static void m_free(void *ptr, int size, char *name)
+{
+ u_long flags;
+ NCR_LOCK_DRIVER(flags);
+ __m_free(&mp0, ptr, size, name);
+ NCR_UNLOCK_DRIVER(flags);
+}
+
+/*
+ * DMAable pools.
+ */
+
+#ifndef SCSI_NCR_DYNAMIC_DMA_MAPPING
+
+/* Without pci bus iommu support, all the memory is assumed DMAable */
+
+#define __m_calloc_dma(b, s, n) m_calloc(s, n)
+#define __m_free_dma(b, p, s, n) m_free(p, s, n)
+#define __vtobus(b, p) virt_to_bus(p)
+
+#else
+
+/*
+ * With pci bus iommu support, we maintain one pool per pcidev and a
+ * hashed reverse table for virtual to bus physical address translations.
+ */
+static m_addr_t ___dma_getp(m_pool_s *mp)
+{
+ m_addr_t vp;
+ m_vtob_s *vbp;
+
+ vbp = __m_calloc(&mp0, sizeof(*vbp), "VTOB");
+ if (vbp) {
+ dma_addr_t daddr;
+ vp = (m_addr_t) pci_alloc_consistent(mp->bush,
+ PAGE_SIZE<<MEMO_PAGE_ORDER,
+ &daddr);
+ if (vp) {
+ int hc = VTOB_HASH_CODE(vp);
+ vbp->vaddr = vp;
+ vbp->baddr = daddr;
+ vbp->next = mp->vtob[hc];
+ mp->vtob[hc] = vbp;
+ ++mp->nump;
+ return vp;
+ }
+ }
+ if (vbp)
+ __m_free(&mp0, vbp, sizeof(*vbp), "VTOB");
+ return 0;
+}
+
+static void ___dma_freep(m_pool_s *mp, m_addr_t m)
+{
+ m_vtob_s **vbpp, *vbp;
+ int hc = VTOB_HASH_CODE(m);
+
+ vbpp = &mp->vtob[hc];
+ while (*vbpp && (*vbpp)->vaddr != m)
+ vbpp = &(*vbpp)->next;
+ if (*vbpp) {
+ vbp = *vbpp;
+ *vbpp = (*vbpp)->next;
+ pci_free_consistent(mp->bush, PAGE_SIZE<<MEMO_PAGE_ORDER,
+ (void *)vbp->vaddr, (dma_addr_t)vbp->baddr);
+ __m_free(&mp0, vbp, sizeof(*vbp), "VTOB");
+ --mp->nump;
+ }
+}
+
+static inline m_pool_s *___get_dma_pool(m_bush_t bush)
+{
+ m_pool_s *mp;
+ for (mp = mp0.next; mp && mp->bush != bush; mp = mp->next);
+ return mp;
+}
+
+static m_pool_s *___cre_dma_pool(m_bush_t bush)
+{
+ m_pool_s *mp;
+ mp = __m_calloc(&mp0, sizeof(*mp), "MPOOL");
+ if (mp) {
+ bzero(mp, sizeof(*mp));
+ mp->bush = bush;
+ mp->getp = ___dma_getp;
+ mp->freep = ___dma_freep;
+ mp->next = mp0.next;
+ mp0.next = mp;
+ }
+ return mp;
+}
+
+static void ___del_dma_pool(m_pool_s *p)
+{
+ struct m_pool **pp = &mp0.next;
+
+ while (*pp && *pp != p)
+ pp = &(*pp)->next;
+ if (*pp) {
+ *pp = (*pp)->next;
+ __m_free(&mp0, p, sizeof(*p), "MPOOL");
+ }
+}
+
+static void *__m_calloc_dma(m_bush_t bush, int size, char *name)
+{
+ u_long flags;
+ struct m_pool *mp;
+ void *m = 0;
+
+ NCR_LOCK_DRIVER(flags);
+ mp = ___get_dma_pool(bush);
+ if (!mp)
+ mp = ___cre_dma_pool(bush);
+ if (mp)
+ m = __m_calloc(mp, size, name);
+ if (mp && !mp->nump)
+ ___del_dma_pool(mp);
+ NCR_UNLOCK_DRIVER(flags);
+
+ return m;
+}
+
+static void __m_free_dma(m_bush_t bush, void *m, int size, char *name)
+{
+ u_long flags;
+ struct m_pool *mp;
+
+ NCR_LOCK_DRIVER(flags);
+ mp = ___get_dma_pool(bush);
+ if (mp)
+ __m_free(mp, m, size, name);
+ if (mp && !mp->nump)
+ ___del_dma_pool(mp);
+ NCR_UNLOCK_DRIVER(flags);
+}
+
+static m_addr_t __vtobus(m_bush_t bush, void *m)
+{
+ u_long flags;
+ m_pool_s *mp;
+ int hc = VTOB_HASH_CODE(m);
+ m_vtob_s *vp = 0;
+ m_addr_t a = ((m_addr_t) m) & ~MEMO_CLUSTER_MASK;
+
+ NCR_LOCK_DRIVER(flags);
+ mp = ___get_dma_pool(bush);
+ if (mp) {
+ vp = mp->vtob[hc];
+ while (vp && (m_addr_t) vp->vaddr != a)
+ vp = vp->next;
+ }
+ NCR_UNLOCK_DRIVER(flags);
+ return vp ? vp->baddr + (((m_addr_t) m) - a) : 0;
+}
+
+#endif /* SCSI_NCR_DYNAMIC_DMA_MAPPING */
+
+#define _m_calloc_dma(np, s, n) __m_calloc_dma(np->pdev, s, n)
+#define _m_free_dma(np, p, s, n) __m_free_dma(np->pdev, p, s, n)
+#define m_calloc_dma(s, n) _m_calloc_dma(np, s, n)
+#define m_free_dma(p, s, n) _m_free_dma(np, p, s, n)
+#define _vtobus(np, p) __vtobus(np->pdev, p)
+#define vtobus(p) _vtobus(np, p)
+
+/*
+ * Deal with DMA mapping/unmapping.
+ */
+
+#ifndef SCSI_NCR_DYNAMIC_DMA_MAPPING
+
+/* Linux versions prior to pci bus iommu kernel interface */
+
+#define __unmap_scsi_data(pdev, cmd) do {; } while (0)
+#define __map_scsi_single_data(pdev, cmd) (__vtobus(pdev,(cmd)->request_buffer))
+#define __map_scsi_sg_data(pdev, cmd) ((cmd)->use_sg)
+#define __sync_scsi_data(pdev, cmd) do {; } while (0)
+
+#define scsi_sg_dma_address(sc) vtobus((sc)->address)
+#define scsi_sg_dma_len(sc) ((sc)->length)
+
+#else
+
+/* Linux version with pci bus iommu kernel interface */
+
+/* To keep track of the dma mapping (sg/single) that has been set */
+#define __data_mapped SCp.phase
+#define __data_mapping SCp.have_data_in
+
+static void __unmap_scsi_data(pcidev_t pdev, Scsi_Cmnd *cmd)
+{
+ int dma_dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+
+ switch(cmd->__data_mapped) {
+ case 2:
+ pci_unmap_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir);
+ break;
+ case 1:
+ pci_unmap_single(pdev, cmd->__data_mapping,
+ cmd->request_bufflen, dma_dir);
+ break;
+ }
+ cmd->__data_mapped = 0;
+}
+
+static u_long __map_scsi_single_data(pcidev_t pdev, Scsi_Cmnd *cmd)
+{
+ dma_addr_t mapping;
+ int dma_dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+
+ if (cmd->request_bufflen == 0)
+ return 0;
+
+ mapping = pci_map_single(pdev, cmd->request_buffer,
+ cmd->request_bufflen, dma_dir);
+ cmd->__data_mapped = 1;
+ cmd->__data_mapping = mapping;
+
+ return mapping;
+}
+
+static int __map_scsi_sg_data(pcidev_t pdev, Scsi_Cmnd *cmd)
+{
+ int use_sg;
+ int dma_dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+
+ if (cmd->use_sg == 0)
+ return 0;
+
+ use_sg = pci_map_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir);
+ cmd->__data_mapped = 2;
+ cmd->__data_mapping = use_sg;
+
+ return use_sg;
+}
+
+static void __sync_scsi_data(pcidev_t pdev, Scsi_Cmnd *cmd)
+{
+ int dma_dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+
+ switch(cmd->__data_mapped) {
+ case 2:
+ pci_dma_sync_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir);
+ break;
+ case 1:
+ pci_dma_sync_single(pdev, cmd->__data_mapping,
+ cmd->request_bufflen, dma_dir);
+ break;
+ }
+}
+
+#define scsi_sg_dma_address(sc) sg_dma_address(sc)
+#define scsi_sg_dma_len(sc) sg_dma_len(sc)
+
+#endif /* SCSI_NCR_DYNAMIC_DMA_MAPPING */
+
+#define unmap_scsi_data(np, cmd) __unmap_scsi_data(np->pdev, cmd)
+#define map_scsi_single_data(np, cmd) __map_scsi_single_data(np->pdev, cmd)
+#define map_scsi_sg_data(np, cmd) __map_scsi_sg_data(np->pdev, cmd)
+#define sync_scsi_data(np, cmd) __sync_scsi_data(np->pdev, cmd)
+
+/*==========================================================
+**
+** SCSI data transfer direction
+**
+** Until some linux kernel version near 2.3.40,
+** low-level scsi drivers were not told about data
+** transfer direction. We check the existence of this
+** feature that has been expected for a _long_ time by
+** all SCSI driver developers by just testing against
+** the definition of SCSI_DATA_UNKNOWN. Indeed this is
+** a hack, but testing against a kernel version would
+** have been a shame. ;-)
+**
+**==========================================================
+*/
+#ifdef SCSI_DATA_UNKNOWN
+
+#define scsi_data_direction(cmd) (cmd->sc_data_direction)
+
+#else
+
+#define SCSI_DATA_UNKNOWN 0
+#define SCSI_DATA_WRITE 1
+#define SCSI_DATA_READ 2
+#define SCSI_DATA_NONE 3
+
+static __inline__ int scsi_data_direction(Scsi_Cmnd *cmd)
+{
+ int direction;
+
+ switch((int) cmd->cmnd[0]) {
+ case 0x08: /* READ(6) 08 */
+ case 0x28: /* READ(10) 28 */
+ case 0xA8: /* READ(12) A8 */
+ direction = SCSI_DATA_READ;
+ break;
+ case 0x0A: /* WRITE(6) 0A */
+ case 0x2A: /* WRITE(10) 2A */
+ case 0xAA: /* WRITE(12) AA */
+ direction = SCSI_DATA_WRITE;
+ break;
+ default:
+ direction = SCSI_DATA_UNKNOWN;
+ break;
+ }
+
+ return direction;
+}
+
+#endif /* SCSI_DATA_UNKNOWN */
+
+/*==========================================================
+**
+** Driver setup.
+**
+** This structure is initialized from linux config
+** options. It can be overridden at boot-up by the boot
+** command line.
+**
+**==========================================================
+*/
+static struct ncr_driver_setup
+ driver_setup = SCSI_NCR_DRIVER_SETUP;
+
+#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
+static struct ncr_driver_setup
+ driver_safe_setup __initdata = SCSI_NCR_DRIVER_SAFE_SETUP;
+#endif
+
+#define initverbose (driver_setup.verbose)
+#define bootverbose (np->verbose)
+
+
+/*==========================================================
+**
+** Structures used by the detection routine to transmit
+** device configuration to the attach function.
+**
+**==========================================================
+*/
+typedef struct {
+ int bus;
+ u_char device_fn;
+ u_long base;
+ u_long base_2;
+ u_long io_port;
+ int irq;
+/* port and reg fields to use INB, OUTB macros */
+ u_long base_io;
+ volatile struct ncr_reg *reg;
+} ncr_slot;
+
+/*==========================================================
+**
+** Structure used to store the NVRAM content.
+**
+**==========================================================
+*/
+typedef struct {
+ int type;
+#define SCSI_NCR_SYMBIOS_NVRAM (1)
+#define SCSI_NCR_TEKRAM_NVRAM (2)
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ union {
+ Symbios_nvram Symbios;
+ Tekram_nvram Tekram;
+ } data;
+#endif
+} ncr_nvram;
+
+/*==========================================================
+**
+** Structure used by detection routine to save data on
+** each detected board for attach.
+**
+**==========================================================
+*/
+typedef struct {
+ pcidev_t pdev;
+ ncr_slot slot;
+ ncr_chip chip;
+ ncr_nvram *nvram;
+ u_char host_id;
+#ifdef SCSI_NCR_PQS_PDS_SUPPORT
+ u_char pqs_pds;
+#endif
+ int attach_done;
+} ncr_device;
+
+static int ncr_attach (Scsi_Host_Template *tpnt, int unit, ncr_device *device);
+
+/*==========================================================
+**
+** NVRAM detection and reading.
+**
+** Currently supported:
+** - 24C16 EEPROM with both Symbios and Tekram layout.
+** - 93C46 EEPROM with Tekram layout.
+**
+**==========================================================
+*/
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+/*
+ * 24C16 EEPROM reading.
+ *
+ * GPOI0 - data in/data out
+ * GPIO1 - clock
+ * Symbios NVRAM wiring now also used by Tekram.
+ */
+
+#define SET_BIT 0
+#define CLR_BIT 1
+#define SET_CLK 2
+#define CLR_CLK 3
+
+/*
+ * Set/clear data/clock bit in GPIO0
+ */
+static void __init
+S24C16_set_bit(ncr_slot *np, u_char write_bit, u_char *gpreg, int bit_mode)
+{
+ UDELAY (5);
+ switch (bit_mode){
+ case SET_BIT:
+ *gpreg |= write_bit;
+ break;
+ case CLR_BIT:
+ *gpreg &= 0xfe;
+ break;
+ case SET_CLK:
+ *gpreg |= 0x02;
+ break;
+ case CLR_CLK:
+ *gpreg &= 0xfd;
+ break;
+
+ }
+ OUTB (nc_gpreg, *gpreg);
+ UDELAY (5);
+}
+
+/*
+ * Send START condition to NVRAM to wake it up.
+ */
+static void __init S24C16_start(ncr_slot *np, u_char *gpreg)
+{
+ S24C16_set_bit(np, 1, gpreg, SET_BIT);
+ S24C16_set_bit(np, 0, gpreg, SET_CLK);
+ S24C16_set_bit(np, 0, gpreg, CLR_BIT);
+ S24C16_set_bit(np, 0, gpreg, CLR_CLK);
+}
+
+/*
+ * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZzzzz!!
+ */
+static void __init S24C16_stop(ncr_slot *np, u_char *gpreg)
+{
+ S24C16_set_bit(np, 0, gpreg, SET_CLK);
+ S24C16_set_bit(np, 1, gpreg, SET_BIT);
+}
+
+/*
+ * Read or write a bit to the NVRAM,
+ * read if GPIO0 input else write if GPIO0 output
+ */
+static void __init
+S24C16_do_bit(ncr_slot *np, u_char *read_bit, u_char write_bit, u_char *gpreg)
+{
+ S24C16_set_bit(np, write_bit, gpreg, SET_BIT);
+ S24C16_set_bit(np, 0, gpreg, SET_CLK);
+ if (read_bit)
+ *read_bit = INB (nc_gpreg);
+ S24C16_set_bit(np, 0, gpreg, CLR_CLK);
+ S24C16_set_bit(np, 0, gpreg, CLR_BIT);
+}
+
+/*
+ * Output an ACK to the NVRAM after reading,
+ * change GPIO0 to output and when done back to an input
+ */
+static void __init
+S24C16_write_ack(ncr_slot *np, u_char write_bit, u_char *gpreg, u_char *gpcntl)
+{
+ OUTB (nc_gpcntl, *gpcntl & 0xfe);
+ S24C16_do_bit(np, 0, write_bit, gpreg);
+ OUTB (nc_gpcntl, *gpcntl);
+}
+
+/*
+ * Input an ACK from NVRAM after writing,
+ * change GPIO0 to input and when done back to an output
+ */
+static void __init
+S24C16_read_ack(ncr_slot *np, u_char *read_bit, u_char *gpreg, u_char *gpcntl)
+{
+ OUTB (nc_gpcntl, *gpcntl | 0x01);
+ S24C16_do_bit(np, read_bit, 1, gpreg);
+ OUTB (nc_gpcntl, *gpcntl);
+}
+
+/*
+ * WRITE a byte to the NVRAM and then get an ACK to see it was accepted OK,
+ * GPIO0 must already be set as an output
+ */
+static void __init
+S24C16_write_byte(ncr_slot *np, u_char *ack_data, u_char write_data,
+ u_char *gpreg, u_char *gpcntl)
+{
+ int x;
+
+ for (x = 0; x < 8; x++)
+ S24C16_do_bit(np, 0, (write_data >> (7 - x)) & 0x01, gpreg);
+
+ S24C16_read_ack(np, ack_data, gpreg, gpcntl);
+}
+
+/*
+ * READ a byte from the NVRAM and then send an ACK to say we have got it,
+ * GPIO0 must already be set as an input
+ */
+static void __init
+S24C16_read_byte(ncr_slot *np, u_char *read_data, u_char ack_data,
+ u_char *gpreg, u_char *gpcntl)
+{
+ int x;
+ u_char read_bit;
+
+ *read_data = 0;
+ for (x = 0; x < 8; x++) {
+ S24C16_do_bit(np, &read_bit, 1, gpreg);
+ *read_data |= ((read_bit & 0x01) << (7 - x));
+ }
+
+ S24C16_write_ack(np, ack_data, gpreg, gpcntl);
+}
+
+/*
+ * Read 'len' bytes starting at 'offset'.
+ */
+static int __init
+sym_read_S24C16_nvram (ncr_slot *np, int offset, u_char *data, int len)
+{
+ u_char gpcntl, gpreg;
+ u_char old_gpcntl, old_gpreg;
+ u_char ack_data;
+ int retv = 1;
+ int x;
+
+ /* save current state of GPCNTL and GPREG */
+ old_gpreg = INB (nc_gpreg);
+ old_gpcntl = INB (nc_gpcntl);
+ gpcntl = old_gpcntl & 0xfc;
+
+ /* set up GPREG & GPCNTL to set GPIO0 and GPIO1 in to known state */
+ OUTB (nc_gpreg, old_gpreg);
+ OUTB (nc_gpcntl, gpcntl);
+
+ /* this is to set NVRAM into a known state with GPIO0/1 both low */
+ gpreg = old_gpreg;
+ S24C16_set_bit(np, 0, &gpreg, CLR_CLK);
+ S24C16_set_bit(np, 0, &gpreg, CLR_BIT);
+
+ /* now set NVRAM inactive with GPIO0/1 both high */
+ S24C16_stop(np, &gpreg);
+
+ /* activate NVRAM */
+ S24C16_start(np, &gpreg);
+
+ /* write device code and random address MSB */
+ S24C16_write_byte(np, &ack_data,
+ 0xa0 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl);
+ if (ack_data & 0x01)
+ goto out;
+
+ /* write random address LSB */
+ S24C16_write_byte(np, &ack_data,
+ offset & 0xff, &gpreg, &gpcntl);
+ if (ack_data & 0x01)
+ goto out;
+
+ /* regenerate START state to set up for reading */
+ S24C16_start(np, &gpreg);
+
+ /* rewrite device code and address MSB with read bit set (lsb = 0x01) */
+ S24C16_write_byte(np, &ack_data,
+ 0xa1 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl);
+ if (ack_data & 0x01)
+ goto out;
+
+ /* now set up GPIO0 for inputting data */
+ gpcntl |= 0x01;
+ OUTB (nc_gpcntl, gpcntl);
+
+ /* input all requested data - only part of total NVRAM */
+ for (x = 0; x < len; x++)
+ S24C16_read_byte(np, &data[x], (x == (len-1)), &gpreg, &gpcntl);
+
+ /* finally put NVRAM back in inactive mode */
+ gpcntl &= 0xfe;
+ OUTB (nc_gpcntl, gpcntl);
+ S24C16_stop(np, &gpreg);
+ retv = 0;
+out:
+ /* return GPIO0/1 to original states after having accessed NVRAM */
+ OUTB (nc_gpcntl, old_gpcntl);
+ OUTB (nc_gpreg, old_gpreg);
+
+ return retv;
+}
+
+#undef SET_BIT 0
+#undef CLR_BIT 1
+#undef SET_CLK 2
+#undef CLR_CLK 3
+
+/*
+ * Try reading Symbios NVRAM.
+ * Return 0 if OK.
+ */
+static int __init sym_read_Symbios_nvram (ncr_slot *np, Symbios_nvram *nvram)
+{
+ static u_char Symbios_trailer[6] = {0xfe, 0xfe, 0, 0, 0, 0};
+ u_char *data = (u_char *) nvram;
+ int len = sizeof(*nvram);
+ u_short csum;
+ int x;
+
+ /* probe the 24c16 and read the SYMBIOS 24c16 area */
+ if (sym_read_S24C16_nvram (np, SYMBIOS_NVRAM_ADDRESS, data, len))
+ return 1;
+
+ /* check valid NVRAM signature, verify byte count and checksum */
+ if (nvram->type != 0 ||
+ memcmp(nvram->trailer, Symbios_trailer, 6) ||
+ nvram->byte_count != len - 12)
+ return 1;
+
+ /* verify checksum */
+ for (x = 6, csum = 0; x < len - 6; x++)
+ csum += data[x];
+ if (csum != nvram->checksum)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * 93C46 EEPROM reading.
+ *
+ * GPOI0 - data in
+ * GPIO1 - data out
+ * GPIO2 - clock
+ * GPIO4 - chip select
+ *
+ * Used by Tekram.
+ */
+
+/*
+ * Pulse clock bit in GPIO0
+ */
+static void __init T93C46_Clk(ncr_slot *np, u_char *gpreg)
+{
+ OUTB (nc_gpreg, *gpreg | 0x04);
+ UDELAY (2);
+ OUTB (nc_gpreg, *gpreg);
+}
+
+/*
+ * Read bit from NVRAM
+ */
+static void __init T93C46_Read_Bit(ncr_slot *np, u_char *read_bit, u_char *gpreg)
+{
+ UDELAY (2);
+ T93C46_Clk(np, gpreg);
+ *read_bit = INB (nc_gpreg);
+}
+
+/*
+ * Write bit to GPIO0
+ */
+static void __init T93C46_Write_Bit(ncr_slot *np, u_char write_bit, u_char *gpreg)
+{
+ if (write_bit & 0x01)
+ *gpreg |= 0x02;
+ else
+ *gpreg &= 0xfd;
+
+ *gpreg |= 0x10;
+
+ OUTB (nc_gpreg, *gpreg);
+ UDELAY (2);
+
+ T93C46_Clk(np, gpreg);
+}
+
+/*
+ * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZZzzz!!
+ */
+static void __init T93C46_Stop(ncr_slot *np, u_char *gpreg)
+{
+ *gpreg &= 0xef;
+ OUTB (nc_gpreg, *gpreg);
+ UDELAY (2);
+
+ T93C46_Clk(np, gpreg);
+}
+
+/*
+ * Send read command and address to NVRAM
+ */
+static void __init
+T93C46_Send_Command(ncr_slot *np, u_short write_data,
+ u_char *read_bit, u_char *gpreg)
+{
+ int x;
+
+ /* send 9 bits, start bit (1), command (2), address (6) */
+ for (x = 0; x < 9; x++)
+ T93C46_Write_Bit(np, (u_char) (write_data >> (8 - x)), gpreg);
+
+ *read_bit = INB (nc_gpreg);
+}
+
+/*
+ * READ 2 bytes from the NVRAM
+ */
+static void __init
+T93C46_Read_Word(ncr_slot *np, u_short *nvram_data, u_char *gpreg)
+{
+ int x;
+ u_char read_bit;
+
+ *nvram_data = 0;
+ for (x = 0; x < 16; x++) {
+ T93C46_Read_Bit(np, &read_bit, gpreg);
+
+ if (read_bit & 0x01)
+ *nvram_data |= (0x01 << (15 - x));
+ else
+ *nvram_data &= ~(0x01 << (15 - x));
+ }
+}
+
+/*
+ * Read Tekram NvRAM data.
+ */
+static int __init
+T93C46_Read_Data(ncr_slot *np, u_short *data,int len,u_char *gpreg)
+{
+ u_char read_bit;
+ int x;
+
+ for (x = 0; x < len; x++) {
+
+ /* output read command and address */
+ T93C46_Send_Command(np, 0x180 | x, &read_bit, gpreg);
+ if (read_bit & 0x01)
+ return 1; /* Bad */
+ T93C46_Read_Word(np, &data[x], gpreg);
+ T93C46_Stop(np, gpreg);
+ }
+
+ return 0;
+}
+
+/*
+ * Try reading 93C46 Tekram NVRAM.
+ */
+static int __init
+sym_read_T93C46_nvram (ncr_slot *np, Tekram_nvram *nvram)
+{
+ u_char gpcntl, gpreg;
+ u_char old_gpcntl, old_gpreg;
+ int retv = 1;
+
+ /* save current state of GPCNTL and GPREG */
+ old_gpreg = INB (nc_gpreg);
+ old_gpcntl = INB (nc_gpcntl);
+
+ /* set up GPREG & GPCNTL to set GPIO0/1/2/4 in to known state, 0 in,
+ 1/2/4 out */
+ gpreg = old_gpreg & 0xe9;
+ OUTB (nc_gpreg, gpreg);
+ gpcntl = (old_gpcntl & 0xe9) | 0x09;
+ OUTB (nc_gpcntl, gpcntl);
+
+ /* input all of NVRAM, 64 words */
+ retv = T93C46_Read_Data(np, (u_short *) nvram,
+ sizeof(*nvram) / sizeof(short), &gpreg);
+
+ /* return GPIO0/1/2/4 to original states after having accessed NVRAM */
+ OUTB (nc_gpcntl, old_gpcntl);
+ OUTB (nc_gpreg, old_gpreg);
+
+ return retv;
+}
+
+/*
+ * Try reading Tekram NVRAM.
+ * Return 0 if OK.
+ */
+static int __init
+sym_read_Tekram_nvram (ncr_slot *np, u_short device_id, Tekram_nvram *nvram)
+{
+ u_char *data = (u_char *) nvram;
+ int len = sizeof(*nvram);
+ u_short csum;
+ int x;
+
+ switch (device_id) {
+ case PCI_DEVICE_ID_NCR_53C885:
+ case PCI_DEVICE_ID_NCR_53C895:
+ case PCI_DEVICE_ID_NCR_53C896:
+ x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS,
+ data, len);
+ break;
+ case PCI_DEVICE_ID_NCR_53C875:
+ x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS,
+ data, len);
+ if (!x)
+ break;
+ default:
+ x = sym_read_T93C46_nvram(np, nvram);
+ break;
+ }
+ if (x)
+ return 1;
+
+ /* verify checksum */
+ for (x = 0, csum = 0; x < len - 1; x += 2)
+ csum += data[x] + (data[x+1] << 8);
+ if (csum != 0x1234)
+ return 1;
+
+ return 0;
+}
+
+#endif /* SCSI_NCR_NVRAM_SUPPORT */
+
+/*===================================================================
+**
+** Detect and try to read SYMBIOS and TEKRAM NVRAM.
+**
+** Data can be used to order booting of boards.
+**
+** Data is saved in ncr_device structure if NVRAM found. This
+** is then used to find drive boot order for ncr_attach().
+**
+** NVRAM data is passed to Scsi_Host_Template later during
+** ncr_attach() for any device set up.
+**
+**===================================================================
+*/
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+static void __init ncr_get_nvram(ncr_device *devp, ncr_nvram *nvp)
+{
+ devp->nvram = nvp;
+ if (!nvp)
+ return;
+ /*
+ ** Get access to chip IO registers
+ */
+#ifdef SCSI_NCR_IOMAPPED
+ request_region(devp->slot.io_port, 128, NAME53C8XX);
+ devp->slot.base_io = devp->slot.io_port;
+#else
+ devp->slot.reg = (struct ncr_reg *) remap_pci_mem(devp->slot.base, 128);
+ if (!devp->slot.reg)
+ return;
+#endif
+
+ /*
+ ** Try to read SYMBIOS nvram.
+ ** Try to read TEKRAM nvram if Symbios nvram not found.
+ */
+ if (!sym_read_Symbios_nvram(&devp->slot, &nvp->data.Symbios))
+ nvp->type = SCSI_NCR_SYMBIOS_NVRAM;
+ else if (!sym_read_Tekram_nvram(&devp->slot, devp->chip.device_id,
+ &nvp->data.Tekram))
+ nvp->type = SCSI_NCR_TEKRAM_NVRAM;
+ else {
+ nvp->type = 0;
+ devp->nvram = 0;
+ }
+
+ /*
+ ** Release access to chip IO registers
+ */
+#ifdef SCSI_NCR_IOMAPPED
+ release_region(devp->slot.base_io, 128);
+#else
+ unmap_pci_mem((u_long) devp->slot.reg, 128ul);
+#endif
+
+}
+
+/*===================================================================
+**
+** Display the content of NVRAM for debugging purpose.
+**
+**===================================================================
+*/
+#ifdef SCSI_NCR_DEBUG_NVRAM
+static void __init ncr_display_Symbios_nvram(Symbios_nvram *nvram)
+{
+ int i;
+
+ /* display Symbios nvram host data */
+ printk(KERN_DEBUG NAME53C8XX ": HOST ID=%d%s%s%s%s%s\n",
+ nvram->host_id & 0x0f,
+ (nvram->flags & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"",
+ (nvram->flags & SYMBIOS_PARITY_ENABLE) ? " PARITY" :"",
+ (nvram->flags & SYMBIOS_VERBOSE_MSGS) ? " VERBOSE" :"",
+ (nvram->flags & SYMBIOS_CHS_MAPPING) ? " CHS_ALT" :"",
+ (nvram->flags1 & SYMBIOS_SCAN_HI_LO) ? " HI_LO" :"");
+
+ /* display Symbios nvram drive data */
+ for (i = 0 ; i < 15 ; i++) {
+ struct Symbios_target *tn = &nvram->target[i];
+ printk(KERN_DEBUG NAME53C8XX
+ "-%d:%s%s%s%s WIDTH=%d SYNC=%d TMO=%d\n",
+ i,
+ (tn->flags & SYMBIOS_DISCONNECT_ENABLE) ? " DISC" : "",
+ (tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME) ? " SCAN_BOOT" : "",
+ (tn->flags & SYMBIOS_SCAN_LUNS) ? " SCAN_LUNS" : "",
+ (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? " TCQ" : "",
+ tn->bus_width,
+ tn->sync_period / 4,
+ tn->timeout);
+ }
+}
+
+static u_char Tekram_boot_delay[7] __initdata = {3, 5, 10, 20, 30, 60, 120};
+
+static void __init ncr_display_Tekram_nvram(Tekram_nvram *nvram)
+{
+ int i, tags, boot_delay;
+ char *rem;
+
+ /* display Tekram nvram host data */
+ tags = 2 << nvram->max_tags_index;
+ boot_delay = 0;
+ if (nvram->boot_delay_index < 6)
+ boot_delay = Tekram_boot_delay[nvram->boot_delay_index];
+ switch((nvram->flags & TEKRAM_REMOVABLE_FLAGS) >> 6) {
+ default:
+ case 0: rem = ""; break;
+ case 1: rem = " REMOVABLE=boot device"; break;
+ case 2: rem = " REMOVABLE=all"; break;
+ }
+
+ printk(KERN_DEBUG NAME53C8XX
+ ": HOST ID=%d%s%s%s%s%s%s%s%s%s BOOT DELAY=%d tags=%d\n",
+ nvram->host_id & 0x0f,
+ (nvram->flags1 & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"",
+ (nvram->flags & TEKRAM_MORE_THAN_2_DRIVES) ? " >2DRIVES":"",
+ (nvram->flags & TEKRAM_DRIVES_SUP_1GB) ? " >1GB" :"",
+ (nvram->flags & TEKRAM_RESET_ON_POWER_ON) ? " RESET" :"",
+ (nvram->flags & TEKRAM_ACTIVE_NEGATION) ? " ACT_NEG" :"",
+ (nvram->flags & TEKRAM_IMMEDIATE_SEEK) ? " IMM_SEEK" :"",
+ (nvram->flags & TEKRAM_SCAN_LUNS) ? " SCAN_LUNS" :"",
+ (nvram->flags1 & TEKRAM_F2_F6_ENABLED) ? " F2_F6" :"",
+ rem, boot_delay, tags);
+
+ /* display Tekram nvram drive data */
+ for (i = 0; i <= 15; i++) {
+ int sync, j;
+ struct Tekram_target *tn = &nvram->target[i];
+ j = tn->sync_index & 0xf;
+ sync = Tekram_sync[j];
+ printk(KERN_DEBUG NAME53C8XX "-%d:%s%s%s%s%s%s PERIOD=%d\n",
+ i,
+ (tn->flags & TEKRAM_PARITY_CHECK) ? " PARITY" : "",
+ (tn->flags & TEKRAM_SYNC_NEGO) ? " SYNC" : "",
+ (tn->flags & TEKRAM_DISCONNECT_ENABLE) ? " DISC" : "",
+ (tn->flags & TEKRAM_START_CMD) ? " START" : "",
+ (tn->flags & TEKRAM_TAGGED_COMMANDS) ? " TCQ" : "",
+ (tn->flags & TEKRAM_WIDE_NEGO) ? " WIDE" : "",
+ sync);
+ }
+}
+#endif /* SCSI_NCR_DEBUG_NVRAM */
+#endif /* SCSI_NCR_NVRAM_SUPPORT */
+
+
+/*===================================================================
+**
+** Utility routines that protperly return data through /proc FS.
+**
+**===================================================================
+*/
+#ifdef SCSI_NCR_USER_INFO_SUPPORT
+
+struct info_str
+{
+ char *buffer;
+ int length;
+ int offset;
+ int pos;
+};
+
+static void copy_mem_info(struct info_str *info, char *data, int len)
+{
+ if (info->pos + len > info->length)
+ len = info->length - info->pos;
+
+ if (info->pos + len < info->offset) {
+ info->pos += len;
+ return;
+ }
+ if (info->pos < info->offset) {
+ data += (info->offset - info->pos);
+ len -= (info->offset - info->pos);
+ }
+
+ if (len > 0) {
+ memcpy(info->buffer + info->pos, data, len);
+ info->pos += len;
+ }
+}
+
+static int copy_info(struct info_str *info, char *fmt, ...)
+{
+ va_list args;
+ char buf[81];
+ int len;
+
+ va_start(args, fmt);
+ len = vsprintf(buf, fmt, args);
+ va_end(args);
+
+ copy_mem_info(info, buf, len);
+ return len;
+}
+
+#endif
+
+/*===================================================================
+**
+** Driver setup from the boot command line
+**
+**===================================================================
+*/
+
+#ifdef MODULE
+#define ARG_SEP ' '
+#else
+#define ARG_SEP ','
+#endif
+
+#define OPT_TAGS 1
+#define OPT_MASTER_PARITY 2
+#define OPT_SCSI_PARITY 3
+#define OPT_DISCONNECTION 4
+#define OPT_SPECIAL_FEATURES 5
+#define OPT_ULTRA_SCSI 6
+#define OPT_FORCE_SYNC_NEGO 7
+#define OPT_REVERSE_PROBE 8
+#define OPT_DEFAULT_SYNC 9
+#define OPT_VERBOSE 10
+#define OPT_DEBUG 11
+#define OPT_BURST_MAX 12
+#define OPT_LED_PIN 13
+#define OPT_MAX_WIDE 14
+#define OPT_SETTLE_DELAY 15
+#define OPT_DIFF_SUPPORT 16
+#define OPT_IRQM 17
+#define OPT_PCI_FIX_UP 18
+#define OPT_BUS_CHECK 19
+#define OPT_OPTIMIZE 20
+#define OPT_RECOVERY 21
+#define OPT_SAFE_SETUP 22
+#define OPT_USE_NVRAM 23
+#define OPT_EXCLUDE 24
+#define OPT_HOST_ID 25
+
+#ifdef SCSI_NCR_IARB_SUPPORT
+#define OPT_IARB 26
+#endif
+
+static char setup_token[] __initdata =
+ "tags:" "mpar:"
+ "spar:" "disc:"
+ "specf:" "ultra:"
+ "fsn:" "revprob:"
+ "sync:" "verb:"
+ "debug:" "burst:"
+ "led:" "wide:"
+ "settle:" "diff:"
+ "irqm:" "pcifix:"
+ "buschk:" "optim:"
+ "recovery:"
+ "safe:" "nvram:"
+ "excl:" "hostid:"
+#ifdef SCSI_NCR_IARB_SUPPORT
+ "iarb:"
+#endif
+ ; /* DONNOT REMOVE THIS ';' */
+
+#ifdef MODULE
+#define ARG_SEP ' '
+#else
+#define ARG_SEP ','
+#endif
+
+static int __init get_setup_token(char *p)
+{
+ char *cur = setup_token;
+ char *pc;
+ int i = 0;
+
+ while (cur != NULL && (pc = strchr(cur, ':')) != NULL) {
+ ++pc;
+ ++i;
+ if (!strncmp(p, cur, pc - cur))
+ return i;
+ cur = pc;
+ }
+ return 0;
+}
+
+
+static int __init sym53c8xx__setup(char *str)
+{
+#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
+ char *cur = str;
+ char *pc, *pv;
+ int i, val, c;
+ int xi = 0;
+
+ while (cur != NULL && (pc = strchr(cur, ':')) != NULL) {
+ char *pe;
+
+ val = 0;
+ pv = pc;
+ c = *++pv;
+
+ if (c == 'n')
+ val = 0;
+ else if (c == 'y')
+ val = 1;
+ else
+ val = (int) simple_strtoul(pv, &pe, 0);
+
+ switch (get_setup_token(cur)) {
+ case OPT_TAGS:
+ driver_setup.default_tags = val;
+ if (pe && *pe == '/') {
+ i = 0;
+ while (*pe && *pe != ARG_SEP &&
+ i < sizeof(driver_setup.tag_ctrl)-1) {
+ driver_setup.tag_ctrl[i++] = *pe++;
+ }
+ driver_setup.tag_ctrl[i] = '\0';
+ }
+ break;
+ case OPT_MASTER_PARITY:
+ driver_setup.master_parity = val;
+ break;
+ case OPT_SCSI_PARITY:
+ driver_setup.scsi_parity = val;
+ break;
+ case OPT_DISCONNECTION:
+ driver_setup.disconnection = val;
+ break;
+ case OPT_SPECIAL_FEATURES:
+ driver_setup.special_features = val;
+ break;
+ case OPT_ULTRA_SCSI:
+ driver_setup.ultra_scsi = val;
+ break;
+ case OPT_FORCE_SYNC_NEGO:
+ driver_setup.force_sync_nego = val;
+ break;
+ case OPT_REVERSE_PROBE:
+ driver_setup.reverse_probe = val;
+ break;
+ case OPT_DEFAULT_SYNC:
+ driver_setup.default_sync = val;
+ break;
+ case OPT_VERBOSE:
+ driver_setup.verbose = val;
+ break;
+ case OPT_DEBUG:
+ driver_setup.debug = val;
+ break;
+ case OPT_BURST_MAX:
+ driver_setup.burst_max = val;
+ break;
+ case OPT_LED_PIN:
+ driver_setup.led_pin = val;
+ break;
+ case OPT_MAX_WIDE:
+ driver_setup.max_wide = val? 1:0;
+ break;
+ case OPT_SETTLE_DELAY:
+ driver_setup.settle_delay = val;
+ break;
+ case OPT_DIFF_SUPPORT:
+ driver_setup.diff_support = val;
+ break;
+ case OPT_IRQM:
+ driver_setup.irqm = val;
+ break;
+ case OPT_PCI_FIX_UP:
+ driver_setup.pci_fix_up = val;
+ break;
+ case OPT_BUS_CHECK:
+ driver_setup.bus_check = val;
+ break;
+ case OPT_OPTIMIZE:
+ driver_setup.optimize = val;
+ break;
+ case OPT_RECOVERY:
+ driver_setup.recovery = val;
+ break;
+ case OPT_USE_NVRAM:
+ driver_setup.use_nvram = val;
+ break;
+ case OPT_SAFE_SETUP:
+ memcpy(&driver_setup, &driver_safe_setup,
+ sizeof(driver_setup));
+ break;
+ case OPT_EXCLUDE:
+ if (xi < SCSI_NCR_MAX_EXCLUDES)
+ driver_setup.excludes[xi++] = val;
+ break;
+ case OPT_HOST_ID:
+ driver_setup.host_id = val;
+ break;
+#ifdef SCSI_NCR_IARB_SUPPORT
+ case OPT_IARB:
+ driver_setup.iarb = val;
+ break;
+#endif
+ default:
+ printk("sym53c8xx_setup: unexpected boot option '%.*s' ignored\n", (int)(pc-cur+1), cur);
+ break;
+ }
+
+ if ((cur = strchr(cur, ARG_SEP)) != NULL)
+ ++cur;
+ }
+#endif /* SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT */
+ return 1;
+}
+
+/*===================================================================
+**
+** Get device queue depth from boot command line.
+**
+**===================================================================
+*/
+#define DEF_DEPTH (driver_setup.default_tags)
+#define ALL_TARGETS -2
+#define NO_TARGET -1
+#define ALL_LUNS -2
+#define NO_LUN -1
+
+static int device_queue_depth(int unit, int target, int lun)
+{
+ int c, h, t, u, v;
+ char *p = driver_setup.tag_ctrl;
+ char *ep;
+
+ h = -1;
+ t = NO_TARGET;
+ u = NO_LUN;
+ while ((c = *p++) != 0) {
+ v = simple_strtoul(p, &ep, 0);
+ switch(c) {
+ case '/':
+ ++h;
+ t = ALL_TARGETS;
+ u = ALL_LUNS;
+ break;
+ case 't':
+ if (t != target)
+ t = (target == v) ? v : NO_TARGET;
+ u = ALL_LUNS;
+ break;
+ case 'u':
+ if (u != lun)
+ u = (lun == v) ? v : NO_LUN;
+ break;
+ case 'q':
+ if (h == unit &&
+ (t == ALL_TARGETS || t == target) &&
+ (u == ALL_LUNS || u == lun))
+ return v;
+ break;
+ case '-':
+ t = ALL_TARGETS;
+ u = ALL_LUNS;
+ break;
+ default:
+ break;
+ }
+ p = ep;
+ }
+ return DEF_DEPTH;
+}
+
+/*===================================================================
+**
+** Print out information about driver configuration.
+**
+**===================================================================
+*/
+static void __init ncr_print_driver_setup(void)
+{
+#define YesNo(y) y ? 'y' : 'n'
+ printk (NAME53C8XX ": setup=disc:%c,specf:%d,ultra:%d,tags:%d,sync:%d,"
+ "burst:%d,wide:%c,diff:%d,revprob:%c,buschk:0x%x\n",
+ YesNo(driver_setup.disconnection),
+ driver_setup.special_features,
+ driver_setup.ultra_scsi,
+ driver_setup.default_tags,
+ driver_setup.default_sync,
+ driver_setup.burst_max,
+ YesNo(driver_setup.max_wide),
+ driver_setup.diff_support,
+ YesNo(driver_setup.reverse_probe),
+ driver_setup.bus_check);
+
+ printk (NAME53C8XX ": setup=mpar:%c,spar:%c,fsn=%c,verb:%d,debug:0x%x,"
+ "led:%c,settle:%d,irqm:0x%x,nvram:0x%x,pcifix:0x%x\n",
+ YesNo(driver_setup.master_parity),
+ YesNo(driver_setup.scsi_parity),
+ YesNo(driver_setup.force_sync_nego),
+ driver_setup.verbose,
+ driver_setup.debug,
+ YesNo(driver_setup.led_pin),
+ driver_setup.settle_delay,
+ driver_setup.irqm,
+ driver_setup.use_nvram,
+ driver_setup.pci_fix_up);
+#undef YesNo
+}
+
+/*===================================================================
+**
+** SYM53C8XX devices description table.
+**
+**===================================================================
+*/
+
+static ncr_chip ncr_chip_table[] __initdata = SCSI_NCR_CHIP_TABLE;
+
+#ifdef SCSI_NCR_PQS_PDS_SUPPORT
+/*===================================================================
+**
+** Detect all NCR PQS/PDS boards and keep track of their bus nr.
+**
+** The NCR PQS or PDS card is constructed as a DEC bridge
+** behind which sit a proprietary NCR memory controller and
+** four or two 53c875s as separate devices. In its usual mode
+** of operation, the 875s are slaved to the memory controller
+** for all transfers. We can tell if an 875 is part of a
+** PQS/PDS or not since if it is, it will be on the same bus
+** as the memory controller. To operate with the Linux
+** driver, the memory controller is disabled and the 875s
+** freed to function independently. The only wrinkle is that
+** the preset SCSI ID (which may be zero) must be read in from
+** a special configuration space register of the 875.
+**
+**===================================================================
+*/
+#define SCSI_NCR_MAX_PQS_BUS 16
+static int pqs_bus[SCSI_NCR_MAX_PQS_BUS] __initdata = { 0 };
+
+static void __init ncr_detect_pqs_pds(void)
+{
+ short index;
+ pcidev_t dev = PCIDEV_NULL;
+
+ for(index=0; index < SCSI_NCR_MAX_PQS_BUS; index++) {
+ u_char tmp;
+
+ dev = pci_find_device(0x101a, 0x0009, dev);
+ if (dev == PCIDEV_NULL) {
+ pqs_bus[index] = -1;
+ break;
+ }
+ printk(KERN_INFO NAME53C8XX ": NCR PQS/PDS memory controller detected on bus %d\n", PciBusNumber(dev));
+ pci_read_config_byte(dev, 0x44, &tmp);
+ /* bit 1: allow individual 875 configuration */
+ tmp |= 0x2;
+ pci_write_config_byte(dev, 0x44, tmp);
+ pci_read_config_byte(dev, 0x45, &tmp);
+ /* bit 2: drive individual 875 interrupts to the bus */
+ tmp |= 0x4;
+ pci_write_config_byte(dev, 0x45, tmp);
+
+ pqs_bus[index] = PciBusNumber(dev);
+ }
+}
+#endif /* SCSI_NCR_PQS_PDS_SUPPORT */
+
+/*===================================================================
+**
+** Read and check the PCI configuration for any detected NCR
+** boards and save data for attaching after all boards have
+** been detected.
+**
+**===================================================================
+*/
+static int __init
+sym53c8xx_pci_init(Scsi_Host_Template *tpnt, pcidev_t pdev, ncr_device *device)
+{
+ u_short vendor_id, device_id, command;
+ u_char cache_line_size, latency_timer;
+ u_char suggested_cache_line_size = 0;
+ u_char pci_fix_up = driver_setup.pci_fix_up;
+ u_char revision;
+ u_int irq;
+ u_long base, base_2, io_port;
+ int i;
+ ncr_chip *chip;
+
+ printk(KERN_INFO NAME53C8XX ": at PCI bus %d, device %d, function %d\n",
+ PciBusNumber(pdev),
+ (int) (PciDeviceFn(pdev) & 0xf8) >> 3,
+ (int) (PciDeviceFn(pdev) & 7));
+
+#ifdef SCSI_NCR_DYNAMIC_DMA_MAPPING
+ if (!pci_dma_supported(pdev, (dma_addr_t) (0xffffffffUL))) {
+ printk(KERN_WARNING NAME53C8XX
+ "32 BIT PCI BUS DMA ADDRESSING NOT SUPPORTED\n");
+ return -1;
+ }
+#endif
+
+ /*
+ ** Read info from the PCI config space.
+ ** pci_read_config_xxx() functions are assumed to be used for
+ ** successfully detected PCI devices.
+ */
+ vendor_id = PciVendorId(pdev);
+ device_id = PciDeviceId(pdev);
+ irq = PciIrqLine(pdev);
+ i = 0;
+ i = pci_get_base_address(pdev, i, &io_port);
+ i = pci_get_base_address(pdev, i, &base);
+ (void) pci_get_base_address(pdev, i, &base_2);
+
+ pci_read_config_word(pdev, PCI_COMMAND, &command);
+ pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
+ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line_size);
+ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_timer);
+
+#ifdef SCSI_NCR_PQS_PDS_SUPPORT
+ /*
+ ** Match the BUS number for PQS/PDS devices.
+ ** Read the SCSI ID from a special register mapped
+ ** into the configuration space of the individual
+ ** 875s. This register is set up by the PQS bios
+ */
+ for(i = 0; i < SCSI_NCR_MAX_PQS_BUS && pqs_bus[i] != -1; i++) {
+ u_char tmp;
+ if (pqs_bus[i] == PciBusNumber(pdev)) {
+ pci_read_config_byte(pdev, 0x84, &tmp);
+ device->pqs_pds = 1;
+ device->host_id = tmp;
+ break;
+ }
+ }
+#endif /* SCSI_NCR_PQS_PDS_SUPPORT */
+
+ /*
+ ** If user excludes this chip, donnot initialize it.
+ */
+ for (i = 0 ; i < SCSI_NCR_MAX_EXCLUDES ; i++) {
+ if (driver_setup.excludes[i] ==
+ (io_port & PCI_BASE_ADDRESS_IO_MASK))
+ return -1;
+ }
+ /*
+ ** Check if the chip is supported
+ */
+ if ((device_id == PCI_DEVICE_ID_LSI_53C1010) ||
+ (device_id == PCI_DEVICE_ID_LSI_53C1010_66)){
+ printk(NAME53C8XX ": not initializing, device not supported\n");
+ return -1;
+ }
+ chip = 0;
+ for (i = 0; i < sizeof(ncr_chip_table)/sizeof(ncr_chip_table[0]); i++) {
+ if (device_id != ncr_chip_table[i].device_id)
+ continue;
+ if (revision > ncr_chip_table[i].revision_id)
+ continue;
+ chip = &device->chip;
+ memcpy(chip, &ncr_chip_table[i], sizeof(*chip));
+ chip->revision_id = revision;
+ break;
+ }
+
+ /*
+ ** Ignore Symbios chips controlled by SISL RAID controller.
+ ** This controller sets value 0x52414944 at RAM end - 16.
+ */
+#if defined(__i386__) && !defined(SCSI_NCR_PCI_MEM_NOT_SUPPORTED)
+ if (chip && (base_2 & PCI_BASE_ADDRESS_MEM_MASK)) {
+ unsigned int ram_size, ram_val;
+ u_long ram_ptr;
+
+ if (chip->features & FE_RAM8K)
+ ram_size = 8192;
+ else
+ ram_size = 4096;
+
+ ram_ptr = remap_pci_mem(base_2 & PCI_BASE_ADDRESS_MEM_MASK,
+ ram_size);
+ if (ram_ptr) {
+ ram_val = readl_raw(ram_ptr + ram_size - 16);
+ unmap_pci_mem(ram_ptr, ram_size);
+ if (ram_val == 0x52414944) {
+ printk(NAME53C8XX": not initializing, "
+ "driven by SISL RAID controller.\n");
+ return -1;
+ }
+ }
+ }
+#endif /* i386 and PCI MEMORY accessible */
+
+ if (!chip) {
+ printk(NAME53C8XX ": not initializing, device not supported\n");
+ return -1;
+ }
+
+#ifdef __powerpc__
+ /*
+ ** Fix-up for power/pc.
+ ** Should not be performed by the driver.
+ */
+ if ((command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
+ != (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
+ printk(NAME53C8XX ": setting%s%s...\n",
+ (command & PCI_COMMAND_IO) ? "" : " PCI_COMMAND_IO",
+ (command & PCI_COMMAND_MEMORY) ? "" : " PCI_COMMAND_MEMORY");
+ command |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
+ pci_write_config_word(pdev, PCI_COMMAND, command);
+ }
+
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,2,0)
+ if ( is_prep ) {
+ if (io_port >= 0x10000000) {
+ printk(NAME53C8XX ": reallocating io_port (Wacky IBM)");
+ io_port = (io_port & 0x00FFFFFF) | 0x01000000;
+ pci_write_config_dword(pdev,
+ PCI_BASE_ADDRESS_0, io_port);
+ }
+ if (base >= 0x10000000) {
+ printk(NAME53C8XX ": reallocating base (Wacky IBM)");
+ base = (base & 0x00FFFFFF) | 0x01000000;
+ pci_write_config_dword(pdev,
+ PCI_BASE_ADDRESS_1, base);
+ }
+ if (base_2 >= 0x10000000) {
+ printk(NAME53C8XX ": reallocating base2 (Wacky IBM)");
+ base_2 = (base_2 & 0x00FFFFFF) | 0x01000000;
+ pci_write_config_dword(pdev,
+ PCI_BASE_ADDRESS_2, base_2);
+ }
+ }
+#endif
+#endif /* __powerpc__ */
+
+#if defined(__sparc__) && (LINUX_VERSION_CODE < LinuxVersionCode(2,3,0))
+ /*
+ * Severall fix-ups for sparc.
+ *
+ * Should not be performed by the driver, which is why all
+ * this crap is cleaned up in 2.4.x
+ */
+
+ base = __pa(base);
+ base_2 = __pa(base_2);
+
+ if (!(command & PCI_COMMAND_MASTER)) {
+ if (initverbose >= 2)
+ printk("ncr53c8xx: setting PCI_COMMAND_MASTER bit (fixup)\n");
+ command |= PCI_COMMAND_MASTER;
+ pcibios_write_config_word(bus, device_fn, PCI_COMMAND, command);
+ pcibios_read_config_word(bus, device_fn, PCI_COMMAND, &command);
+ }
+
+ if ((chip->features & FE_WRIE) && !(command & PCI_COMMAND_INVALIDATE)) {
+ if (initverbose >= 2)
+ printk("ncr53c8xx: setting PCI_COMMAND_INVALIDATE bit (fixup)\n");
+ command |= PCI_COMMAND_INVALIDATE;
+ pcibios_write_config_word(bus, device_fn, PCI_COMMAND, command);
+ pcibios_read_config_word(bus, device_fn, PCI_COMMAND, &command);
+ }
+
+ if ((chip->features & FE_CLSE) && !cache_line_size) {
+ /* PCI_CACHE_LINE_SIZE value is in 32-bit words. */
+ cache_line_size = 64 / sizeof(u_int32);
+ if (initverbose >= 2)
+ printk("ncr53c8xx: setting PCI_CACHE_LINE_SIZE to %d (fixup)\n",
+ cache_line_size);
+ pcibios_write_config_byte(bus, device_fn,
+ PCI_CACHE_LINE_SIZE, cache_line_size);
+ pcibios_read_config_byte(bus, device_fn,
+ PCI_CACHE_LINE_SIZE, &cache_line_size);
+ }
+
+ if (!latency_timer) {
+ unsigned char min_gnt;
+
+ pcibios_read_config_byte(bus, device_fn,
+ PCI_MIN_GNT, &min_gnt);
+ if (min_gnt == 0)
+ latency_timer = 128;
+ else
+ latency_timer = ((min_gnt << 3) & 0xff);
+ printk("ncr53c8xx: setting PCI_LATENCY_TIMER to %d bus clocks (fixup)\n", latency_timer);
+ pcibios_write_config_byte(bus, device_fn,
+ PCI_LATENCY_TIMER, latency_timer);
+ pcibios_read_config_byte(bus, device_fn,
+ PCI_LATENCY_TIMER, &latency_timer);
+ }
+#endif /* __sparc__ && (LINUX_VERSION_CODE < LinuxVersionCode(2,3,0)) */
+
+#if defined(__i386__) && !defined(MODULE)
+ if (!cache_line_size) {
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,1,75)
+ extern char x86;
+ switch(x86) {
+#else
+ switch(boot_cpu_data.x86) {
+#endif
+ case 4: suggested_cache_line_size = 4; break;
+ case 6:
+ case 5: suggested_cache_line_size = 8; break;
+ }
+ }
+#endif /* __i386__ */
+
+ /*
+ ** Check availability of IO space, memory space.
+ ** Enable master capability if not yet.
+ **
+ ** We shouldn't have to care about the IO region when
+ ** we are using MMIO. But calling check_region() from
+ ** both the ncr53c8xx and the sym53c8xx drivers prevents
+ ** from attaching devices from the both drivers.
+ ** If you have a better idea, let me know.
+ */
+/* #ifdef SCSI_NCR_IOMAPPED */
+#if 1
+ if (!(command & PCI_COMMAND_IO)) {
+ printk(NAME53C8XX ": I/O base address (0x%lx) disabled.\n",
+ (long) io_port);
+ io_port = 0;
+ }
+#endif
+ if (!(command & PCI_COMMAND_MEMORY)) {
+ printk(NAME53C8XX ": PCI_COMMAND_MEMORY not set.\n");
+ base = 0;
+ base_2 = 0;
+ }
+ io_port &= PCI_BASE_ADDRESS_IO_MASK;
+ base &= PCI_BASE_ADDRESS_MEM_MASK;
+ base_2 &= PCI_BASE_ADDRESS_MEM_MASK;
+
+/* #ifdef SCSI_NCR_IOMAPPED */
+#if 1
+ if (io_port && check_region (io_port, 128)) {
+ printk(NAME53C8XX ": IO region 0x%lx[0..127] is in use\n",
+ (long) io_port);
+ io_port = 0;
+ }
+ if (!io_port)
+ return -1;
+#endif
+#ifndef SCSI_NCR_IOMAPPED
+ if (!base) {
+ printk(NAME53C8XX ": MMIO base address disabled.\n");
+ return -1;
+ }
+#endif
+
+/* The ncr53c8xx driver never did set the PCI parity bit. */
+/* Since setting this bit is known to trigger spurious MDPE */
+/* errors on some 895 controllers when noise on power lines is */
+/* too high, I donnot want to change previous ncr53c8xx driver */
+/* behaviour on that point (the sym53c8xx driver set this bit). */
+#if 0
+ /*
+ ** Set MASTER capable and PARITY bit, if not yet.
+ */
+ if ((command & (PCI_COMMAND_MASTER | PCI_COMMAND_PARITY))
+ != (PCI_COMMAND_MASTER | PCI_COMMAND_PARITY)) {
+ printk(NAME53C8XX ": setting%s%s...(fix-up)\n",
+ (command & PCI_COMMAND_MASTER) ? "" : " PCI_COMMAND_MASTER",
+ (command & PCI_COMMAND_PARITY) ? "" : " PCI_COMMAND_PARITY");
+ command |= (PCI_COMMAND_MASTER | PCI_COMMAND_PARITY);
+ pci_write_config_word(pdev, PCI_COMMAND, command);
+ }
+#else
+ /*
+ ** Set MASTER capable if not yet.
+ */
+ if ((command & PCI_COMMAND_MASTER) != PCI_COMMAND_MASTER) {
+ printk(NAME53C8XX ": setting PCI_COMMAND_MASTER...(fix-up)\n");
+ command |= PCI_COMMAND_MASTER;
+ pci_write_config_word(pdev, PCI_COMMAND, command);
+ }
+#endif
+
+ /*
+ ** Fix some features according to driver setup.
+ */
+ if (!(driver_setup.special_features & 1))
+ chip->features &= ~FE_SPECIAL_SET;
+ else {
+ if (driver_setup.special_features & 2)
+ chip->features &= ~FE_WRIE;
+ if (driver_setup.special_features & 4)
+ chip->features &= ~FE_NOPM;
+ }
+ if (driver_setup.ultra_scsi < 2 && (chip->features & FE_ULTRA2)) {
+ chip->features |= FE_ULTRA;
+ chip->features &= ~FE_ULTRA2;
+ }
+ if (driver_setup.ultra_scsi < 1)
+ chip->features &= ~FE_ULTRA;
+ if (!driver_setup.max_wide)
+ chip->features &= ~FE_WIDE;
+
+ /*
+ ** Some features are required to be enabled in order to
+ ** work around some chip problems. :) ;)
+ ** (ITEM 12 of a DEL about the 896 I haven't yet).
+ ** We must ensure the chip will use WRITE AND INVALIDATE.
+ ** The revision number limit is for now arbitrary.
+ */
+ if (device_id == PCI_DEVICE_ID_NCR_53C896 && revision <= 0x10) {
+ chip->features |= (FE_WRIE | FE_CLSE);
+ pci_fix_up |= 3; /* Force appropriate PCI fix-up */
+ }
+
+#ifdef SCSI_NCR_PCI_FIX_UP_SUPPORT
+ /*
+ ** Try to fix up PCI config according to wished features.
+ */
+ if ((pci_fix_up & 1) && (chip->features & FE_CLSE) &&
+ !cache_line_size && suggested_cache_line_size) {
+ cache_line_size = suggested_cache_line_size;
+ pci_write_config_byte(pdev,
+ PCI_CACHE_LINE_SIZE, cache_line_size);
+ printk(NAME53C8XX ": PCI_CACHE_LINE_SIZE set to %d (fix-up).\n",
+ cache_line_size);
+ }
+
+ if ((pci_fix_up & 2) && cache_line_size &&
+ (chip->features & FE_WRIE) && !(command & PCI_COMMAND_INVALIDATE)) {
+ printk(NAME53C8XX": setting PCI_COMMAND_INVALIDATE (fix-up)\n");
+ command |= PCI_COMMAND_INVALIDATE;
+ pci_write_config_word(pdev, PCI_COMMAND, command);
+ }
+
+ /*
+ ** Tune PCI LATENCY TIMER according to burst max length transfer.
+ ** (latency timer >= burst length + 6, we add 10 to be quite sure)
+ */
+
+ if (chip->burst_max && (latency_timer == 0 || (pci_fix_up & 4))) {
+ u_char lt = (1 << chip->burst_max) + 6 + 10;
+ if (latency_timer < lt) {
+ printk(NAME53C8XX
+ ": changing PCI_LATENCY_TIMER from %d to %d.\n",
+ (int) latency_timer, (int) lt);
+ latency_timer = lt;
+ pci_write_config_byte(pdev,
+ PCI_LATENCY_TIMER, latency_timer);
+ }
+ }
+
+#endif /* SCSI_NCR_PCI_FIX_UP_SUPPORT */
+
+ /*
+ ** Initialise ncr_device structure with items required by ncr_attach.
+ */
+ device->pdev = pdev;
+ device->slot.bus = PciBusNumber(pdev);
+ device->slot.device_fn = PciDeviceFn(pdev);
+ device->slot.base = base;
+ device->slot.base_2 = base_2;
+ device->slot.io_port = io_port;
+ device->slot.irq = irq;
+ device->attach_done = 0;
+
+ return 0;
+}
+
+/*===================================================================
+**
+** Detect all 53c8xx hosts and then attach them.
+**
+** If we are using NVRAM, once all hosts are detected, we need to
+** check any NVRAM for boot order in case detect and boot order
+** differ and attach them using the order in the NVRAM.
+**
+** If no NVRAM is found or data appears invalid attach boards in
+** the the order they are detected.
+**
+**===================================================================
+*/
+static int __init
+sym53c8xx__detect(Scsi_Host_Template *tpnt, u_short ncr_chip_ids[], int chips)
+{
+ pcidev_t pcidev;
+ int i, j, hosts, count;
+ int attach_count = 0;
+ ncr_device *devtbl, *devp;
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ ncr_nvram nvram0, nvram, *nvp;
+#endif
+
+ /*
+ ** PCI is required.
+ */
+ if (!pci_present())
+ return 0;
+
+#ifdef SCSI_NCR_DEBUG_INFO_SUPPORT
+ ncr_debug = driver_setup.debug;
+#endif
+ if (initverbose >= 2)
+ ncr_print_driver_setup();
+
+ /*
+ ** Allocate the device table since we donnot want to
+ ** overflow the kernel stack.
+ ** 1 x 4K PAGE is enough for more than 40 devices for i386.
+ */
+ devtbl = m_calloc(PAGE_SIZE, "devtbl");
+ if (!devtbl)
+ return 0;
+
+ /*
+ ** Detect all NCR PQS/PDS memory controllers.
+ */
+#ifdef SCSI_NCR_PQS_PDS_SUPPORT
+ ncr_detect_pqs_pds();
+#endif
+
+ /*
+ ** Detect all 53c8xx hosts.
+ ** Save the first Symbios NVRAM content if any
+ ** for the boot order.
+ */
+ hosts = PAGE_SIZE / sizeof(*devtbl);
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ nvp = (driver_setup.use_nvram & 0x1) ? &nvram0 : 0;
+#endif
+ j = 0;
+ count = 0;
+ pcidev = PCIDEV_NULL;
+ while (1) {
+ char *msg = "";
+ if (count >= hosts)
+ break;
+ if (j >= chips)
+ break;
+ i = driver_setup.reverse_probe ? chips - 1 - j : j;
+ pcidev = pci_find_device(PCI_VENDOR_ID_NCR, ncr_chip_ids[i],
+ pcidev);
+ if (pcidev == PCIDEV_NULL) {
+ ++j;
+ continue;
+ }
+ /* Some HW as the HP LH4 may report twice PCI devices */
+ for (i = 0; i < count ; i++) {
+ if (devtbl[i].slot.bus == PciBusNumber(pcidev) &&
+ devtbl[i].slot.device_fn == PciDeviceFn(pcidev))
+ break;
+ }
+ if (i != count) /* Ignore this device if we already have it */
+ continue;
+ devp = &devtbl[count];
+ devp->host_id = driver_setup.host_id;
+ devp->attach_done = 0;
+ if (sym53c8xx_pci_init(tpnt, pcidev, devp)) {
+ continue;
+ }
+ ++count;
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ if (nvp) {
+ ncr_get_nvram(devp, nvp);
+ switch(nvp->type) {
+ case SCSI_NCR_SYMBIOS_NVRAM:
+ /*
+ * Switch to the other nvram buffer, so that
+ * nvram0 will contain the first Symbios
+ * format NVRAM content with boot order.
+ */
+ nvp = &nvram;
+ msg = "with Symbios NVRAM";
+ break;
+ case SCSI_NCR_TEKRAM_NVRAM:
+ msg = "with Tekram NVRAM";
+ break;
+ }
+ }
+#endif
+#ifdef SCSI_NCR_PQS_PDS_SUPPORT
+ if (devp->pqs_pds)
+ msg = "(NCR PQS/PDS)";
+#endif
+ printk(KERN_INFO NAME53C8XX ": 53c%s detected %s\n",
+ devp->chip.name, msg);
+ }
+
+ /*
+ ** If we have found a SYMBIOS NVRAM, use first the NVRAM boot
+ ** sequence as device boot order.
+ ** check devices in the boot record against devices detected.
+ ** attach devices if we find a match. boot table records that
+ ** do not match any detected devices will be ignored.
+ ** devices that do not match any boot table will not be attached
+ ** here but will attempt to be attached during the device table
+ ** rescan.
+ */
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ if (!nvp || nvram0.type != SCSI_NCR_SYMBIOS_NVRAM)
+ goto next;
+ for (i = 0; i < 4; i++) {
+ Symbios_host *h = &nvram0.data.Symbios.host[i];
+ for (j = 0 ; j < count ; j++) {
+ devp = &devtbl[j];
+ if (h->device_fn != devp->slot.device_fn ||
+ h->bus_nr != devp->slot.bus ||
+ h->device_id != devp->chip.device_id)
+ continue;
+ if (devp->attach_done)
+ continue;
+ if (h->flags & SYMBIOS_INIT_SCAN_AT_BOOT) {
+ ncr_get_nvram(devp, nvp);
+ if (!ncr_attach (tpnt, attach_count, devp))
+ attach_count++;
+ }
+#if 0 /* Restore previous behaviour of ncr53c8xx driver */
+ else if (!(driver_setup.use_nvram & 0x80))
+ printk(KERN_INFO NAME53C8XX
+ ": 53c%s state OFF thus not attached\n",
+ devp->chip.name);
+#endif
+ else
+ continue;
+
+ devp->attach_done = 1;
+ break;
+ }
+ }
+next:
+#endif
+
+ /*
+ ** Rescan device list to make sure all boards attached.
+ ** Devices without boot records will not be attached yet
+ ** so try to attach them here.
+ */
+ for (i= 0; i < count; i++) {
+ devp = &devtbl[i];
+ if (!devp->attach_done) {
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+ ncr_get_nvram(devp, nvp);
+#endif
+ if (!ncr_attach (tpnt, attach_count, devp))
+ attach_count++;
+ }
+ }
+
+ m_free(devtbl, PAGE_SIZE, "devtbl");
+
+ return attach_count;
+}
diff --git a/linux/src/drivers/scsi/sym53c8xx_defs.h b/linux/src/drivers/scsi/sym53c8xx_defs.h
new file mode 100644
index 0000000..10acf78
--- /dev/null
+++ b/linux/src/drivers/scsi/sym53c8xx_defs.h
@@ -0,0 +1,1767 @@
+/******************************************************************************
+** High Performance device driver for the Symbios 53C896 controller.
+**
+** Copyright (C) 1998-2000 Gerard Roudier <groudier@club-internet.fr>
+**
+** This driver also supports all the Symbios 53C8XX controller family,
+** except 53C810 revisions < 16, 53C825 revisions < 16 and all
+** revisions of 53C815 controllers.
+**
+** This driver is based on the Linux port of the FreeBSD ncr driver.
+**
+** Copyright (C) 1994 Wolfgang Stanglmeier
+**
+**-----------------------------------------------------------------------------
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+** You should have received a copy of the GNU General Public License
+** along with this program; if not, write to the Free Software
+** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+**
+**-----------------------------------------------------------------------------
+**
+** The Linux port of the FreeBSD ncr driver has been achieved in
+** november 1995 by:
+**
+** Gerard Roudier <groudier@club-internet.fr>
+**
+** Being given that this driver originates from the FreeBSD version, and
+** in order to keep synergy on both, any suggested enhancements and corrections
+** received on Linux are automatically a potential candidate for the FreeBSD
+** version.
+**
+** The original driver has been written for 386bsd and FreeBSD by
+** Wolfgang Stanglmeier <wolf@cologne.de>
+** Stefan Esser <se@mi.Uni-Koeln.de>
+**
+**-----------------------------------------------------------------------------
+**
+** Major contributions:
+** --------------------
+**
+** NVRAM detection and reading.
+** Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
+**
+*******************************************************************************
+*/
+
+#ifndef SYM53C8XX_DEFS_H
+#define SYM53C8XX_DEFS_H
+
+/*
+** Check supported Linux versions
+*/
+
+#if !defined(LINUX_VERSION_CODE)
+#include <linux/version.h>
+#endif
+#include <linux/config.h>
+
+#ifndef LinuxVersionCode
+#define LinuxVersionCode(v, p, s) (((v)<<16)+((p)<<8)+(s))
+#endif
+
+/*
+ * NCR PQS/PDS special device support.
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_PQS_PDS
+#define SCSI_NCR_PQS_PDS_SUPPORT
+#endif
+
+/*
+ * No more an option, enabled by default.
+ */
+#ifndef CONFIG_SCSI_NCR53C8XX_NVRAM_DETECT
+#define CONFIG_SCSI_NCR53C8XX_NVRAM_DETECT
+#endif
+
+/*
+** These options are not tunable from 'make config'
+*/
+//#define SCSI_NCR_PROC_INFO_SUPPORT
+
+/*
+** If you want a driver as small as possible, donnot define the
+** following options.
+*/
+#define SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
+#define SCSI_NCR_DEBUG_INFO_SUPPORT
+#define SCSI_NCR_PCI_FIX_UP_SUPPORT
+#ifdef SCSI_NCR_PROC_INFO_SUPPORT
+# define SCSI_NCR_USER_COMMAND_SUPPORT
+# define SCSI_NCR_USER_INFO_SUPPORT
+#endif
+
+/*
+** To disable integrity checking, do not define the
+** following option.
+*/
+#ifdef CONFIG_SCSI_NCR53C8XX_INTEGRITY_CHECK
+# define SCSI_NCR_ENABLE_INTEGRITY_CHECK
+#endif
+
+/*==========================================================
+**
+** nvram settings - #define SCSI_NCR_NVRAM_SUPPORT to enable
+**
+**==========================================================
+*/
+
+#ifdef CONFIG_SCSI_NCR53C8XX_NVRAM_DETECT
+#define SCSI_NCR_NVRAM_SUPPORT
+/* #define SCSI_NCR_DEBUG_NVRAM */
+#endif
+
+/* ---------------------------------------------------------------------
+** Take into account kernel configured parameters.
+** Most of these options can be overridden at startup by a command line.
+** ---------------------------------------------------------------------
+*/
+
+/*
+ * For Ultra2 and Ultra3 SCSI support option, use special features.
+ *
+ * Value (default) means:
+ * bit 0 : all features enabled, except:
+ * bit 1 : PCI Write And Invalidate.
+ * bit 2 : Data Phase Mismatch handling from SCRIPTS.
+ *
+ * Use boot options ncr53c8xx=specf:1 if you want all chip features to be
+ * enabled by the driver.
+ */
+#define SCSI_NCR_SETUP_SPECIAL_FEATURES (3)
+
+/*
+ * For Ultra2 and Ultra3 SCSI support allow 80Mhz synchronous data transfers.
+ * Value means:
+ * 0 - Ultra speeds disabled
+ * 1 - Ultra enabled (Maximum 20Mtrans/sec)
+ * 2 - Ultra2 enabled (Maximum 40Mtrans/sec)
+ * 3 - Ultra3 enabled (Maximum 80Mtrans/sec)
+ *
+ * Use boot options sym53c8xx=ultra:3 to enable Ultra3 support.
+ */
+
+#define SCSI_NCR_SETUP_ULTRA_SCSI (3)
+#define SCSI_NCR_MAX_SYNC (80)
+
+/*
+ * Allow tags from 2 to 256, default 8
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_MAX_TAGS
+#if CONFIG_SCSI_NCR53C8XX_MAX_TAGS < 2
+#define SCSI_NCR_MAX_TAGS (2)
+#elif CONFIG_SCSI_NCR53C8XX_MAX_TAGS > 256
+#define SCSI_NCR_MAX_TAGS (256)
+#else
+#define SCSI_NCR_MAX_TAGS CONFIG_SCSI_NCR53C8XX_MAX_TAGS
+#endif
+#else
+#define SCSI_NCR_MAX_TAGS (8)
+#endif
+
+/*
+ * Allow tagged command queuing support if configured with default number
+ * of tags set to max (see above).
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_DEFAULT_TAGS
+#define SCSI_NCR_SETUP_DEFAULT_TAGS CONFIG_SCSI_NCR53C8XX_DEFAULT_TAGS
+#elif defined CONFIG_SCSI_NCR53C8XX_TAGGED_QUEUE
+#define SCSI_NCR_SETUP_DEFAULT_TAGS SCSI_NCR_MAX_TAGS
+#else
+#define SCSI_NCR_SETUP_DEFAULT_TAGS (0)
+#endif
+
+/*
+ * Use normal IO if configured. Forced for alpha and powerpc.
+ * Powerpc fails copying to on-chip RAM using memcpy_toio().
+ */
+#if defined(CONFIG_SCSI_NCR53C8XX_IOMAPPED)
+#define SCSI_NCR_IOMAPPED
+#elif defined(__alpha__)
+#define SCSI_NCR_IOMAPPED
+#elif defined(__powerpc__)
+#define SCSI_NCR_IOMAPPED
+#define SCSI_NCR_PCI_MEM_NOT_SUPPORTED
+#elif defined(__sparc__)
+#undef SCSI_NCR_IOMAPPED
+#endif
+
+/*
+ * Should we enable DAC cycles on this platform?
+ * Until further investigation we do not enable it
+ * anywhere at the moment.
+ */
+#undef SCSI_NCR_USE_64BIT_DAC
+
+/*
+ * Immediate arbitration
+ */
+#if defined(CONFIG_SCSI_NCR53C8XX_IARB)
+#define SCSI_NCR_IARB_SUPPORT
+#endif
+
+/*
+ * Should we enable DAC cycles on sparc64 platforms?
+ * Until further investigation we do not enable it
+ * anywhere at the moment.
+ */
+#undef SCSI_NCR_USE_64BIT_DAC
+
+/*
+ * Sync transfer frequency at startup.
+ * Allow from 5Mhz to 80Mhz default 20 Mhz.
+ */
+#ifndef CONFIG_SCSI_NCR53C8XX_SYNC
+#define CONFIG_SCSI_NCR53C8XX_SYNC (20)
+#elif CONFIG_SCSI_NCR53C8XX_SYNC > SCSI_NCR_MAX_SYNC
+#undef CONFIG_SCSI_NCR53C8XX_SYNC
+#define CONFIG_SCSI_NCR53C8XX_SYNC SCSI_NCR_MAX_SYNC
+#endif
+
+#if CONFIG_SCSI_NCR53C8XX_SYNC == 0
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (255)
+#elif CONFIG_SCSI_NCR53C8XX_SYNC <= 5
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (50)
+#elif CONFIG_SCSI_NCR53C8XX_SYNC <= 20
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (250/(CONFIG_SCSI_NCR53C8XX_SYNC))
+#elif CONFIG_SCSI_NCR53C8XX_SYNC <= 33
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (11)
+#elif CONFIG_SCSI_NCR53C8XX_SYNC <= 40
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (10)
+#else
+#define SCSI_NCR_SETUP_DEFAULT_SYNC (9)
+#endif
+
+/*
+ * Disallow disconnections at boot-up
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_NO_DISCONNECT
+#define SCSI_NCR_SETUP_DISCONNECTION (0)
+#else
+#define SCSI_NCR_SETUP_DISCONNECTION (1)
+#endif
+
+/*
+ * Force synchronous negotiation for all targets
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_FORCE_SYNC_NEGO
+#define SCSI_NCR_SETUP_FORCE_SYNC_NEGO (1)
+#else
+#define SCSI_NCR_SETUP_FORCE_SYNC_NEGO (0)
+#endif
+
+/*
+ * Disable master parity checking (flawed hardwares need that)
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_DISABLE_MPARITY_CHECK
+#define SCSI_NCR_SETUP_MASTER_PARITY (0)
+#else
+#define SCSI_NCR_SETUP_MASTER_PARITY (1)
+#endif
+
+/*
+ * Disable scsi parity checking (flawed devices may need that)
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_DISABLE_PARITY_CHECK
+#define SCSI_NCR_SETUP_SCSI_PARITY (0)
+#else
+#define SCSI_NCR_SETUP_SCSI_PARITY (1)
+#endif
+
+/*
+ * Vendor specific stuff
+ */
+#ifdef CONFIG_SCSI_NCR53C8XX_SYMBIOS_COMPAT
+#define SCSI_NCR_SETUP_LED_PIN (1)
+#define SCSI_NCR_SETUP_DIFF_SUPPORT (4)
+#else
+#define SCSI_NCR_SETUP_LED_PIN (0)
+#define SCSI_NCR_SETUP_DIFF_SUPPORT (0)
+#endif
+
+/*
+ * Settle time after reset at boot-up
+ */
+#define SCSI_NCR_SETUP_SETTLE_TIME (2)
+
+/*
+** Bridge quirks work-around option defaulted to 1.
+*/
+#ifndef SCSI_NCR_PCIQ_WORK_AROUND_OPT
+#define SCSI_NCR_PCIQ_WORK_AROUND_OPT 1
+#endif
+
+/*
+** Work-around common bridge misbehaviour.
+**
+** - Do not flush posted writes in the opposite
+** direction on read.
+** - May reorder DMA writes to memory.
+**
+** This option should not affect performances
+** significantly, so it is the default.
+*/
+#if SCSI_NCR_PCIQ_WORK_AROUND_OPT == 1
+#define SCSI_NCR_PCIQ_MAY_NOT_FLUSH_PW_UPSTREAM
+#define SCSI_NCR_PCIQ_MAY_REORDER_WRITES
+#define SCSI_NCR_PCIQ_MAY_MISS_COMPLETIONS
+
+/*
+** Same as option 1, but also deal with
+** misconfigured interrupts.
+**
+** - Edge triggerred instead of level sensitive.
+** - No interrupt line connected.
+** - IRQ number misconfigured.
+**
+** If no interrupt is delivered, the driver will
+** catch the interrupt conditions 10 times per
+** second. No need to say that this option is
+** not recommended.
+*/
+#elif SCSI_NCR_PCIQ_WORK_AROUND_OPT == 2
+#define SCSI_NCR_PCIQ_MAY_NOT_FLUSH_PW_UPSTREAM
+#define SCSI_NCR_PCIQ_MAY_REORDER_WRITES
+#define SCSI_NCR_PCIQ_MAY_MISS_COMPLETIONS
+#define SCSI_NCR_PCIQ_BROKEN_INTR
+
+/*
+** Some bridge designers decided to flush
+** everything prior to deliver the interrupt.
+** This option tries to deal with such a
+** behaviour.
+*/
+#elif SCSI_NCR_PCIQ_WORK_AROUND_OPT == 3
+#define SCSI_NCR_PCIQ_SYNC_ON_INTR
+#endif
+
+/*
+** Other parameters not configurable with "make config"
+** Avoid to change these constants, unless you know what you are doing.
+*/
+
+#define SCSI_NCR_ALWAYS_SIMPLE_TAG
+#define SCSI_NCR_MAX_SCATTER (127)
+#define SCSI_NCR_MAX_TARGET (16)
+
+/*
+** Compute some desirable value for CAN_QUEUE
+** and CMD_PER_LUN.
+** The driver will use lower values if these
+** ones appear to be too large.
+*/
+#define SCSI_NCR_CAN_QUEUE (8*SCSI_NCR_MAX_TAGS + 2*SCSI_NCR_MAX_TARGET)
+#define SCSI_NCR_CMD_PER_LUN (SCSI_NCR_MAX_TAGS)
+
+#define SCSI_NCR_SG_TABLESIZE (SCSI_NCR_MAX_SCATTER)
+#define SCSI_NCR_TIMER_INTERVAL (HZ)
+
+#if 1 /* defined CONFIG_SCSI_MULTI_LUN */
+#define SCSI_NCR_MAX_LUN (16)
+#else
+#define SCSI_NCR_MAX_LUN (1)
+#endif
+
+#ifndef HOSTS_C
+
+/*
+** These simple macros limit expression involving
+** kernel time values (jiffies) to some that have
+** chance not to be too much incorrect. :-)
+*/
+#define ktime_get(o) (jiffies + (u_long) o)
+#define ktime_exp(b) ((long)(jiffies) - (long)(b) >= 0)
+#define ktime_dif(a, b) ((long)(a) - (long)(b))
+/* These ones are not used in this driver */
+#define ktime_add(a, o) ((a) + (u_long)(o))
+#define ktime_sub(a, o) ((a) - (u_long)(o))
+
+
+/*
+ * IO functions definition for big/little endian CPU support.
+ * For now, the NCR is only supported in little endian addressing mode,
+ */
+
+#ifdef __BIG_ENDIAN
+
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,1,0)
+#error "BIG ENDIAN byte ordering needs kernel version >= 2.1.0"
+#endif
+
+#define inw_l2b inw
+#define inl_l2b inl
+#define outw_b2l outw
+#define outl_b2l outl
+#define readw_l2b readw
+#define readl_l2b readl
+#define writew_b2l writew
+#define writel_b2l writel
+
+#else /* little endian */
+
+#if defined(__i386__) /* i386 implements full FLAT memory/MMIO model */
+#define inw_raw inw
+#define inl_raw inl
+#define outw_raw outw
+#define outl_raw outl
+#define readb_raw(a) (*(volatile unsigned char *) (a))
+#define readw_raw(a) (*(volatile unsigned short *) (a))
+#define readl_raw(a) (*(volatile unsigned int *) (a))
+#define writeb_raw(b,a) ((*(volatile unsigned char *) (a)) = (b))
+#define writew_raw(b,a) ((*(volatile unsigned short *) (a)) = (b))
+#define writel_raw(b,a) ((*(volatile unsigned int *) (a)) = (b))
+
+#else /* Other little-endian */
+#define inw_raw inw
+#define inl_raw inl
+#define outw_raw outw
+#define outl_raw outl
+#define readw_raw readw
+#define readl_raw readl
+#define writew_raw writew
+#define writel_raw writel
+
+#endif
+#endif
+
+#ifdef SCSI_NCR_BIG_ENDIAN
+#error "The NCR in BIG ENDIAN addressing mode is not (yet) supported"
+#endif
+
+
+/*
+ * IA32 architecture does not reorder STORES and prevents
+ * LOADS from passing STORES. It is called `program order'
+ * by Intel and allows device drivers to deal with memory
+ * ordering by only ensuring that the code is not reordered
+ * by the compiler when ordering is required.
+ * Other architectures implement a weaker ordering that
+ * requires memory barriers (and also IO barriers when they
+ * make sense) to be used.
+ * We want to be paranoid for ppc and ia64. :)
+ */
+
+#if defined __i386__
+#define MEMORY_BARRIER() do { ; } while(0)
+#elif defined __powerpc__
+#define MEMORY_BARRIER() __asm__ volatile("eieio; sync" : : : "memory")
+#elif defined __ia64__
+#define MEMORY_BARRIER() __asm__ volatile("mf.a; mf" : : : "memory")
+#else
+#define MEMORY_BARRIER() mb()
+#endif
+
+
+/*
+ * If the NCR uses big endian addressing mode over the
+ * PCI, actual io register addresses for byte and word
+ * accesses must be changed according to lane routing.
+ * Btw, ncr_offb() and ncr_offw() macros only apply to
+ * constants and so donnot generate bloated code.
+ */
+
+#if defined(SCSI_NCR_BIG_ENDIAN)
+
+#define ncr_offb(o) (((o)&~3)+((~((o)&3))&3))
+#define ncr_offw(o) (((o)&~3)+((~((o)&3))&2))
+
+#else
+
+#define ncr_offb(o) (o)
+#define ncr_offw(o) (o)
+
+#endif
+
+/*
+ * If the CPU and the NCR use same endian-ness addressing,
+ * no byte reordering is needed for script patching.
+ * Macro cpu_to_scr() is to be used for script patching.
+ * Macro scr_to_cpu() is to be used for getting a DWORD
+ * from the script.
+ */
+
+#if defined(__BIG_ENDIAN) && !defined(SCSI_NCR_BIG_ENDIAN)
+
+#define cpu_to_scr(dw) cpu_to_le32(dw)
+#define scr_to_cpu(dw) le32_to_cpu(dw)
+
+#elif defined(__LITTLE_ENDIAN) && defined(SCSI_NCR_BIG_ENDIAN)
+
+#define cpu_to_scr(dw) cpu_to_be32(dw)
+#define scr_to_cpu(dw) be32_to_cpu(dw)
+
+#else
+
+#define cpu_to_scr(dw) (dw)
+#define scr_to_cpu(dw) (dw)
+
+#endif
+
+/*
+ * Access to the controller chip.
+ *
+ * If SCSI_NCR_IOMAPPED is defined, the driver will use
+ * normal IOs instead of the MEMORY MAPPED IO method
+ * recommended by PCI specifications.
+ * If all PCI bridges, host brigdes and architectures
+ * would have been correctly designed for PCI, this
+ * option would be useless.
+ *
+ * If the CPU and the NCR use same endian-ness addressing,
+ * no byte reordering is needed for accessing chip io
+ * registers. Functions suffixed by '_raw' are assumed
+ * to access the chip over the PCI without doing byte
+ * reordering. Functions suffixed by '_l2b' are
+ * assumed to perform little-endian to big-endian byte
+ * reordering, those suffixed by '_b2l' blah, blah,
+ * blah, ...
+ */
+
+#if defined(SCSI_NCR_IOMAPPED)
+
+/*
+ * IO mapped only input / ouput
+ */
+
+#define INB_OFF(o) inb (np->base_io + ncr_offb(o))
+#define OUTB_OFF(o, val) outb ((val), np->base_io + ncr_offb(o))
+
+#if defined(__BIG_ENDIAN) && !defined(SCSI_NCR_BIG_ENDIAN)
+
+#define INW_OFF(o) inw_l2b (np->base_io + ncr_offw(o))
+#define INL_OFF(o) inl_l2b (np->base_io + (o))
+
+#define OUTW_OFF(o, val) outw_b2l ((val), np->base_io + ncr_offw(o))
+#define OUTL_OFF(o, val) outl_b2l ((val), np->base_io + (o))
+
+#elif defined(__LITTLE_ENDIAN) && defined(SCSI_NCR_BIG_ENDIAN)
+
+#define INW_OFF(o) inw_b2l (np->base_io + ncr_offw(o))
+#define INL_OFF(o) inl_b2l (np->base_io + (o))
+
+#define OUTW_OFF(o, val) outw_l2b ((val), np->base_io + ncr_offw(o))
+#define OUTL_OFF(o, val) outl_l2b ((val), np->base_io + (o))
+
+#else
+
+#define INW_OFF(o) inw_raw (np->base_io + ncr_offw(o))
+#define INL_OFF(o) inl_raw (np->base_io + (o))
+
+#define OUTW_OFF(o, val) outw_raw ((val), np->base_io + ncr_offw(o))
+#define OUTL_OFF(o, val) outl_raw ((val), np->base_io + (o))
+
+#endif /* ENDIANs */
+
+#else /* defined SCSI_NCR_IOMAPPED */
+
+/*
+ * MEMORY mapped IO input / output
+ */
+
+#define INB_OFF(o) readb((char *)np->reg + ncr_offb(o))
+#define OUTB_OFF(o, val) writeb((val), (char *)np->reg + ncr_offb(o))
+
+#if defined(__BIG_ENDIAN) && !defined(SCSI_NCR_BIG_ENDIAN)
+
+#define INW_OFF(o) readw_l2b((char *)np->reg + ncr_offw(o))
+#define INL_OFF(o) readl_l2b((char *)np->reg + (o))
+
+#define OUTW_OFF(o, val) writew_b2l((val), (char *)np->reg + ncr_offw(o))
+#define OUTL_OFF(o, val) writel_b2l((val), (char *)np->reg + (o))
+
+#elif defined(__LITTLE_ENDIAN) && defined(SCSI_NCR_BIG_ENDIAN)
+
+#define INW_OFF(o) readw_b2l((char *)np->reg + ncr_offw(o))
+#define INL_OFF(o) readl_b2l((char *)np->reg + (o))
+
+#define OUTW_OFF(o, val) writew_l2b((val), (char *)np->reg + ncr_offw(o))
+#define OUTL_OFF(o, val) writel_l2b((val), (char *)np->reg + (o))
+
+#else
+
+#define INW_OFF(o) readw_raw((char *)np->reg + ncr_offw(o))
+#define INL_OFF(o) readl_raw((char *)np->reg + (o))
+
+#define OUTW_OFF(o, val) writew_raw((val), (char *)np->reg + ncr_offw(o))
+#define OUTL_OFF(o, val) writel_raw((val), (char *)np->reg + (o))
+
+#endif
+
+#endif /* defined SCSI_NCR_IOMAPPED */
+
+#define INB(r) INB_OFF (offsetof(struct ncr_reg,r))
+#define INW(r) INW_OFF (offsetof(struct ncr_reg,r))
+#define INL(r) INL_OFF (offsetof(struct ncr_reg,r))
+
+#define OUTB(r, val) OUTB_OFF (offsetof(struct ncr_reg,r), (val))
+#define OUTW(r, val) OUTW_OFF (offsetof(struct ncr_reg,r), (val))
+#define OUTL(r, val) OUTL_OFF (offsetof(struct ncr_reg,r), (val))
+
+/*
+ * Set bit field ON, OFF
+ */
+
+#define OUTONB(r, m) OUTB(r, INB(r) | (m))
+#define OUTOFFB(r, m) OUTB(r, INB(r) & ~(m))
+#define OUTONW(r, m) OUTW(r, INW(r) | (m))
+#define OUTOFFW(r, m) OUTW(r, INW(r) & ~(m))
+#define OUTONL(r, m) OUTL(r, INL(r) | (m))
+#define OUTOFFL(r, m) OUTL(r, INL(r) & ~(m))
+
+/*
+ * We normally want the chip to have a consistent view
+ * of driver internal data structures when we restart it.
+ * Thus these macros.
+ */
+#define OUTL_DSP(v) \
+ do { \
+ MEMORY_BARRIER(); \
+ OUTL (nc_dsp, (v)); \
+ } while (0)
+
+#define OUTONB_STD() \
+ do { \
+ MEMORY_BARRIER(); \
+ OUTONB (nc_dcntl, (STD|NOCOM)); \
+ } while (0)
+
+
+/*
+** NCR53C8XX Device Ids
+*/
+
+#ifndef PCI_DEVICE_ID_NCR_53C810
+#define PCI_DEVICE_ID_NCR_53C810 1
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C810AP
+#define PCI_DEVICE_ID_NCR_53C810AP 5
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C815
+#define PCI_DEVICE_ID_NCR_53C815 4
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C820
+#define PCI_DEVICE_ID_NCR_53C820 2
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C825
+#define PCI_DEVICE_ID_NCR_53C825 3
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C860
+#define PCI_DEVICE_ID_NCR_53C860 6
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C875
+#define PCI_DEVICE_ID_NCR_53C875 0xf
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C875J
+#define PCI_DEVICE_ID_NCR_53C875J 0x8f
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C885
+#define PCI_DEVICE_ID_NCR_53C885 0xd
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C895
+#define PCI_DEVICE_ID_NCR_53C895 0xc
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C896
+#define PCI_DEVICE_ID_NCR_53C896 0xb
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C895A
+#define PCI_DEVICE_ID_NCR_53C895A 0x12
+#endif
+
+#ifndef PCI_DEVICE_ID_NCR_53C1510D
+#define PCI_DEVICE_ID_NCR_53C1510D 0xa
+#endif
+
+#ifndef PCI_DEVICE_ID_LSI_53C1010
+#define PCI_DEVICE_ID_LSI_53C1010 0x20
+#endif
+
+#ifndef PCI_DEVICE_ID_LSI_53C1010_66
+#define PCI_DEVICE_ID_LSI_53C1010_66 0x21
+#endif
+
+
+/*
+** NCR53C8XX devices features table.
+*/
+typedef struct {
+ unsigned short device_id;
+ unsigned short revision_id;
+ char *name;
+ unsigned char burst_max; /* log-base-2 of max burst */
+ unsigned char offset_max;
+ unsigned char nr_divisor;
+ unsigned int features;
+#define FE_LED0 (1<<0)
+#define FE_WIDE (1<<1) /* Wide data transfers */
+#define FE_ULTRA (1<<2) /* Ultra speed 20Mtrans/sec */
+#define FE_ULTRA2 (1<<3) /* Ultra 2 - 40 Mtrans/sec */
+#define FE_DBLR (1<<4) /* Clock doubler present */
+#define FE_QUAD (1<<5) /* Clock quadrupler present */
+#define FE_ERL (1<<6) /* Enable read line */
+#define FE_CLSE (1<<7) /* Cache line size enable */
+#define FE_WRIE (1<<8) /* Write & Invalidate enable */
+#define FE_ERMP (1<<9) /* Enable read multiple */
+#define FE_BOF (1<<10) /* Burst opcode fetch */
+#define FE_DFS (1<<11) /* DMA fifo size */
+#define FE_PFEN (1<<12) /* Prefetch enable */
+#define FE_LDSTR (1<<13) /* Load/Store supported */
+#define FE_RAM (1<<14) /* On chip RAM present */
+#define FE_CLK80 (1<<15) /* Board clock is 80 MHz */
+#define FE_RAM8K (1<<16) /* On chip RAM sized 8Kb */
+#define FE_64BIT (1<<17) /* Supports 64-bit addressing */
+#define FE_IO256 (1<<18) /* Requires full 256 bytes in PCI space */
+#define FE_NOPM (1<<19) /* Scripts handles phase mismatch */
+#define FE_LEDC (1<<20) /* Hardware control of LED */
+#define FE_DIFF (1<<21) /* Support Differential SCSI */
+#define FE_ULTRA3 (1<<22) /* Ultra-3 80Mtrans/sec */
+#define FE_66MHZ (1<<23) /* 66MHz PCI Support */
+
+#define FE_CACHE_SET (FE_ERL|FE_CLSE|FE_WRIE|FE_ERMP)
+#define FE_SCSI_SET (FE_WIDE|FE_ULTRA|FE_ULTRA2|FE_DBLR|FE_QUAD|F_CLK80)
+#define FE_SPECIAL_SET (FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM)
+} ncr_chip;
+
+/*
+** DEL 397 - 53C875 Rev 3 - Part Number 609-0392410 - ITEM 3.
+** Memory Read transaction terminated by a retry followed by
+** Memory Read Line command.
+*/
+#define FE_CACHE0_SET (FE_CACHE_SET & ~FE_ERL)
+
+/*
+** DEL 397 - 53C875 Rev 3 - Part Number 609-0392410 - ITEM 5.
+** On paper, this errata is harmless. But it is a good reason for
+** using a shorter programmed burst length (64 DWORDS instead of 128).
+*/
+
+#define SCSI_NCR_CHIP_TABLE \
+{ \
+ {PCI_DEVICE_ID_NCR_53C810, 0x0f, "810", 4, 8, 4, \
+ FE_ERL} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C810, 0xff, "810a", 4, 8, 4, \
+ FE_CACHE_SET|FE_LDSTR|FE_PFEN|FE_BOF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C815, 0xff, "815", 4, 8, 4, \
+ FE_ERL|FE_BOF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C820, 0xff, "820", 4, 8, 4, \
+ FE_WIDE|FE_ERL} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C825, 0x0f, "825", 4, 8, 4, \
+ FE_WIDE|FE_ERL|FE_BOF|FE_DIFF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C825, 0xff, "825a", 6, 8, 4, \
+ FE_WIDE|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM|FE_DIFF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C860, 0xff, "860", 4, 8, 5, \
+ FE_ULTRA|FE_CLK80|FE_CACHE_SET|FE_BOF|FE_LDSTR|FE_PFEN} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C875, 0x01, "875", 6, 16, 5, \
+ FE_WIDE|FE_ULTRA|FE_CLK80|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|\
+ FE_RAM|FE_DIFF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C875, 0x0f, "875", 6, 16, 5, \
+ FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM|FE_DIFF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C875, 0x1f, "876", 6, 16, 5, \
+ FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM|FE_DIFF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C875, 0x2f, "875E", 6, 16, 5, \
+ FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM|FE_DIFF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C875, 0xff, "876", 6, 16, 5, \
+ FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM|FE_DIFF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C875J,0xff, "875J", 6, 16, 5, \
+ FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C885, 0xff, "885", 6, 16, 5, \
+ FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM|FE_DIFF} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C895, 0xff, "895", 6, 31, 7, \
+ FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C896, 0xff, "896", 6, 31, 7, \
+ FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM|FE_RAM8K|FE_64BIT|FE_IO256|FE_NOPM|FE_LEDC} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C895A, 0xff, "895a", 6, 31, 7, \
+ FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM|FE_RAM8K|FE_64BIT|FE_IO256|FE_NOPM|FE_LEDC} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C1510D, 0xff, "1510D", 7, 31, 7, \
+ FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM|FE_IO256} \
+ , \
+ {PCI_DEVICE_ID_LSI_53C1010, 0xff, "1010", 6, 31, 7, \
+ FE_WIDE|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM|FE_RAM8K|FE_64BIT|FE_IO256|FE_NOPM|FE_LEDC|FE_ULTRA3} \
+ , \
+ {PCI_DEVICE_ID_LSI_53C1010_66, 0xff, "1010_66", 6, 31, 7, \
+ FE_WIDE|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM|FE_RAM8K|FE_64BIT|FE_IO256|FE_NOPM|FE_LEDC|FE_ULTRA3|FE_66MHZ} \
+}
+
+/*
+ * List of supported NCR chip ids
+ */
+#define SCSI_NCR_CHIP_IDS \
+{ \
+ PCI_DEVICE_ID_NCR_53C810, \
+ PCI_DEVICE_ID_NCR_53C815, \
+ PCI_DEVICE_ID_NCR_53C820, \
+ PCI_DEVICE_ID_NCR_53C825, \
+ PCI_DEVICE_ID_NCR_53C860, \
+ PCI_DEVICE_ID_NCR_53C875, \
+ PCI_DEVICE_ID_NCR_53C875J, \
+ PCI_DEVICE_ID_NCR_53C885, \
+ PCI_DEVICE_ID_NCR_53C895, \
+ PCI_DEVICE_ID_NCR_53C896, \
+ PCI_DEVICE_ID_NCR_53C895A, \
+ PCI_DEVICE_ID_NCR_53C1510D, \
+ PCI_DEVICE_ID_LSI_53C1010, \
+ PCI_DEVICE_ID_LSI_53C1010_66 \
+}
+
+/*
+** Driver setup structure.
+**
+** This structure is initialized from linux config options.
+** It can be overridden at boot-up by the boot command line.
+*/
+#define SCSI_NCR_MAX_EXCLUDES 8
+struct ncr_driver_setup {
+ u_char master_parity;
+ u_char scsi_parity;
+ u_char disconnection;
+ u_char special_features;
+ u_char ultra_scsi;
+ u_char force_sync_nego;
+ u_char reverse_probe;
+ u_char pci_fix_up;
+ u_char use_nvram;
+ u_char verbose;
+ u_char default_tags;
+ u_short default_sync;
+ u_short debug;
+ u_char burst_max;
+ u_char led_pin;
+ u_char max_wide;
+ u_char settle_delay;
+ u_char diff_support;
+ u_char irqm;
+ u_char bus_check;
+ u_char optimize;
+ u_char recovery;
+ u_char host_id;
+ u_short iarb;
+ u_long excludes[SCSI_NCR_MAX_EXCLUDES];
+ char tag_ctrl[100];
+};
+
+/*
+** Initial setup.
+** Can be overriden at startup by a command line.
+*/
+#define SCSI_NCR_DRIVER_SETUP \
+{ \
+ SCSI_NCR_SETUP_MASTER_PARITY, \
+ SCSI_NCR_SETUP_SCSI_PARITY, \
+ SCSI_NCR_SETUP_DISCONNECTION, \
+ SCSI_NCR_SETUP_SPECIAL_FEATURES, \
+ SCSI_NCR_SETUP_ULTRA_SCSI, \
+ SCSI_NCR_SETUP_FORCE_SYNC_NEGO, \
+ 0, \
+ 0, \
+ 1, \
+ 0, \
+ SCSI_NCR_SETUP_DEFAULT_TAGS, \
+ SCSI_NCR_SETUP_DEFAULT_SYNC, \
+ 0x00, \
+ 7, \
+ SCSI_NCR_SETUP_LED_PIN, \
+ 1, \
+ SCSI_NCR_SETUP_SETTLE_TIME, \
+ SCSI_NCR_SETUP_DIFF_SUPPORT, \
+ 0, \
+ 1, \
+ 0, \
+ 0, \
+ 255, \
+ 0x00 \
+}
+
+/*
+** Boot fail safe setup.
+** Override initial setup from boot command line:
+** ncr53c8xx=safe:y
+*/
+#define SCSI_NCR_DRIVER_SAFE_SETUP \
+{ \
+ 0, \
+ 1, \
+ 0, \
+ 0, \
+ 0, \
+ 0, \
+ 0, \
+ 0, \
+ 1, \
+ 2, \
+ 0, \
+ 255, \
+ 0x00, \
+ 255, \
+ 0, \
+ 0, \
+ 10, \
+ 1, \
+ 1, \
+ 1, \
+ 0, \
+ 0, \
+ 255 \
+}
+
+#ifdef SCSI_NCR_NVRAM_SUPPORT
+/*
+** Symbios NvRAM data format
+*/
+#define SYMBIOS_NVRAM_SIZE 368
+#define SYMBIOS_NVRAM_ADDRESS 0x100
+
+struct Symbios_nvram {
+/* Header 6 bytes */
+ u_short type; /* 0x0000 */
+ u_short byte_count; /* excluding header/trailer */
+ u_short checksum;
+
+/* Controller set up 20 bytes */
+ u_char v_major; /* 0x00 */
+ u_char v_minor; /* 0x30 */
+ u_int32 boot_crc;
+ u_short flags;
+#define SYMBIOS_SCAM_ENABLE (1)
+#define SYMBIOS_PARITY_ENABLE (1<<1)
+#define SYMBIOS_VERBOSE_MSGS (1<<2)
+#define SYMBIOS_CHS_MAPPING (1<<3)
+#define SYMBIOS_NO_NVRAM (1<<3) /* ??? */
+ u_short flags1;
+#define SYMBIOS_SCAN_HI_LO (1)
+ u_short term_state;
+#define SYMBIOS_TERM_CANT_PROGRAM (0)
+#define SYMBIOS_TERM_ENABLED (1)
+#define SYMBIOS_TERM_DISABLED (2)
+ u_short rmvbl_flags;
+#define SYMBIOS_RMVBL_NO_SUPPORT (0)
+#define SYMBIOS_RMVBL_BOOT_DEVICE (1)
+#define SYMBIOS_RMVBL_MEDIA_INSTALLED (2)
+ u_char host_id;
+ u_char num_hba; /* 0x04 */
+ u_char num_devices; /* 0x10 */
+ u_char max_scam_devices; /* 0x04 */
+ u_char num_valid_scam_devives; /* 0x00 */
+ u_char rsvd;
+
+/* Boot order 14 bytes * 4 */
+ struct Symbios_host{
+ u_short type; /* 4:8xx / 0:nok */
+ u_short device_id; /* PCI device id */
+ u_short vendor_id; /* PCI vendor id */
+ u_char bus_nr; /* PCI bus number */
+ u_char device_fn; /* PCI device/function number << 3*/
+ u_short word8;
+ u_short flags;
+#define SYMBIOS_INIT_SCAN_AT_BOOT (1)
+ u_short io_port; /* PCI io_port address */
+ } host[4];
+
+/* Targets 8 bytes * 16 */
+ struct Symbios_target {
+ u_char flags;
+#define SYMBIOS_DISCONNECT_ENABLE (1)
+#define SYMBIOS_SCAN_AT_BOOT_TIME (1<<1)
+#define SYMBIOS_SCAN_LUNS (1<<2)
+#define SYMBIOS_QUEUE_TAGS_ENABLED (1<<3)
+ u_char rsvd;
+ u_char bus_width; /* 0x08/0x10 */
+ u_char sync_offset;
+ u_short sync_period; /* 4*period factor */
+ u_short timeout;
+ } target[16];
+/* Scam table 8 bytes * 4 */
+ struct Symbios_scam {
+ u_short id;
+ u_short method;
+#define SYMBIOS_SCAM_DEFAULT_METHOD (0)
+#define SYMBIOS_SCAM_DONT_ASSIGN (1)
+#define SYMBIOS_SCAM_SET_SPECIFIC_ID (2)
+#define SYMBIOS_SCAM_USE_ORDER_GIVEN (3)
+ u_short status;
+#define SYMBIOS_SCAM_UNKNOWN (0)
+#define SYMBIOS_SCAM_DEVICE_NOT_FOUND (1)
+#define SYMBIOS_SCAM_ID_NOT_SET (2)
+#define SYMBIOS_SCAM_ID_VALID (3)
+ u_char target_id;
+ u_char rsvd;
+ } scam[4];
+
+ u_char spare_devices[15*8];
+ u_char trailer[6]; /* 0xfe 0xfe 0x00 0x00 0x00 0x00 */
+};
+typedef struct Symbios_nvram Symbios_nvram;
+typedef struct Symbios_host Symbios_host;
+typedef struct Symbios_target Symbios_target;
+typedef struct Symbios_scam Symbios_scam;
+
+/*
+** Tekram NvRAM data format.
+*/
+#define TEKRAM_NVRAM_SIZE 64
+#define TEKRAM_93C46_NVRAM_ADDRESS 0
+#define TEKRAM_24C16_NVRAM_ADDRESS 0x40
+
+struct Tekram_nvram {
+ struct Tekram_target {
+ u_char flags;
+#define TEKRAM_PARITY_CHECK (1)
+#define TEKRAM_SYNC_NEGO (1<<1)
+#define TEKRAM_DISCONNECT_ENABLE (1<<2)
+#define TEKRAM_START_CMD (1<<3)
+#define TEKRAM_TAGGED_COMMANDS (1<<4)
+#define TEKRAM_WIDE_NEGO (1<<5)
+ u_char sync_index;
+ u_short word2;
+ } target[16];
+ u_char host_id;
+ u_char flags;
+#define TEKRAM_MORE_THAN_2_DRIVES (1)
+#define TEKRAM_DRIVES_SUP_1GB (1<<1)
+#define TEKRAM_RESET_ON_POWER_ON (1<<2)
+#define TEKRAM_ACTIVE_NEGATION (1<<3)
+#define TEKRAM_IMMEDIATE_SEEK (1<<4)
+#define TEKRAM_SCAN_LUNS (1<<5)
+#define TEKRAM_REMOVABLE_FLAGS (3<<6) /* 0: disable; 1: boot device; 2:all */
+ u_char boot_delay_index;
+ u_char max_tags_index;
+ u_short flags1;
+#define TEKRAM_F2_F6_ENABLED (1)
+ u_short spare[29];
+};
+typedef struct Tekram_nvram Tekram_nvram;
+typedef struct Tekram_target Tekram_target;
+
+#endif /* SCSI_NCR_NVRAM_SUPPORT */
+
+/**************** ORIGINAL CONTENT of ncrreg.h from FreeBSD ******************/
+
+/*-----------------------------------------------------------------
+**
+** The ncr 53c810 register structure.
+**
+**-----------------------------------------------------------------
+*/
+
+struct ncr_reg {
+/*00*/ u_char nc_scntl0; /* full arb., ena parity, par->ATN */
+
+/*01*/ u_char nc_scntl1; /* no reset */
+ #define ISCON 0x10 /* connected to scsi */
+ #define CRST 0x08 /* force reset */
+ #define IARB 0x02 /* immediate arbitration */
+
+/*02*/ u_char nc_scntl2; /* no disconnect expected */
+ #define SDU 0x80 /* cmd: disconnect will raise error */
+ #define CHM 0x40 /* sta: chained mode */
+ #define WSS 0x08 /* sta: wide scsi send [W]*/
+ #define WSR 0x01 /* sta: wide scsi received [W]*/
+
+/*03*/ u_char nc_scntl3; /* cnf system clock dependent */
+ #define EWS 0x08 /* cmd: enable wide scsi [W]*/
+ #define ULTRA 0x80 /* cmd: ULTRA enable */
+ /* bits 0-2, 7 rsvd for C1010 */
+
+/*04*/ u_char nc_scid; /* cnf host adapter scsi address */
+ #define RRE 0x40 /* r/w:e enable response to resel. */
+ #define SRE 0x20 /* r/w:e enable response to select */
+
+/*05*/ u_char nc_sxfer; /* ### Sync speed and count */
+ /* bits 6-7 rsvd for C1010 */
+
+/*06*/ u_char nc_sdid; /* ### Destination-ID */
+
+/*07*/ u_char nc_gpreg; /* ??? IO-Pins */
+
+/*08*/ u_char nc_sfbr; /* ### First byte in phase */
+
+/*09*/ u_char nc_socl;
+ #define CREQ 0x80 /* r/w: SCSI-REQ */
+ #define CACK 0x40 /* r/w: SCSI-ACK */
+ #define CBSY 0x20 /* r/w: SCSI-BSY */
+ #define CSEL 0x10 /* r/w: SCSI-SEL */
+ #define CATN 0x08 /* r/w: SCSI-ATN */
+ #define CMSG 0x04 /* r/w: SCSI-MSG */
+ #define CC_D 0x02 /* r/w: SCSI-C_D */
+ #define CI_O 0x01 /* r/w: SCSI-I_O */
+
+/*0a*/ u_char nc_ssid;
+
+/*0b*/ u_char nc_sbcl;
+
+/*0c*/ u_char nc_dstat;
+ #define DFE 0x80 /* sta: dma fifo empty */
+ #define MDPE 0x40 /* int: master data parity error */
+ #define BF 0x20 /* int: script: bus fault */
+ #define ABRT 0x10 /* int: script: command aborted */
+ #define SSI 0x08 /* int: script: single step */
+ #define SIR 0x04 /* int: script: interrupt instruct. */
+ #define IID 0x01 /* int: script: illegal instruct. */
+
+/*0d*/ u_char nc_sstat0;
+ #define ILF 0x80 /* sta: data in SIDL register lsb */
+ #define ORF 0x40 /* sta: data in SODR register lsb */
+ #define OLF 0x20 /* sta: data in SODL register lsb */
+ #define AIP 0x10 /* sta: arbitration in progress */
+ #define LOA 0x08 /* sta: arbitration lost */
+ #define WOA 0x04 /* sta: arbitration won */
+ #define IRST 0x02 /* sta: scsi reset signal */
+ #define SDP 0x01 /* sta: scsi parity signal */
+
+/*0e*/ u_char nc_sstat1;
+ #define FF3210 0xf0 /* sta: bytes in the scsi fifo */
+
+/*0f*/ u_char nc_sstat2;
+ #define ILF1 0x80 /* sta: data in SIDL register msb[W]*/
+ #define ORF1 0x40 /* sta: data in SODR register msb[W]*/
+ #define OLF1 0x20 /* sta: data in SODL register msb[W]*/
+ #define DM 0x04 /* sta: DIFFSENS mismatch (895/6 only) */
+ #define LDSC 0x02 /* sta: disconnect & reconnect */
+
+/*10*/ u_char nc_dsa; /* --> Base page */
+/*11*/ u_char nc_dsa1;
+/*12*/ u_char nc_dsa2;
+/*13*/ u_char nc_dsa3;
+
+/*14*/ u_char nc_istat; /* --> Main Command and status */
+ #define CABRT 0x80 /* cmd: abort current operation */
+ #define SRST 0x40 /* mod: reset chip */
+ #define SIGP 0x20 /* r/w: message from host to ncr */
+ #define SEM 0x10 /* r/w: message between host + ncr */
+ #define CON 0x08 /* sta: connected to scsi */
+ #define INTF 0x04 /* sta: int on the fly (reset by wr)*/
+ #define SIP 0x02 /* sta: scsi-interrupt */
+ #define DIP 0x01 /* sta: host/script interrupt */
+
+/*15*/ u_char nc_istat1; /* 896 only */
+/*16*/ u_char nc_mbox0; /* 896 only */
+/*17*/ u_char nc_mbox1; /* 896 only */
+
+/*18*/ u_char nc_ctest0;
+/*19*/ u_char nc_ctest1;
+
+/*1a*/ u_char nc_ctest2;
+ #define CSIGP 0x40
+ /* bits 0-2,7 rsvd for C1010 */
+
+/*1b*/ u_char nc_ctest3;
+ #define FLF 0x08 /* cmd: flush dma fifo */
+ #define CLF 0x04 /* cmd: clear dma fifo */
+ #define FM 0x02 /* mod: fetch pin mode */
+ #define WRIE 0x01 /* mod: write and invalidate enable */
+ /* bits 4-7 rsvd for C1010 */
+
+/*1c*/ u_int32 nc_temp; /* ### Temporary stack */
+
+/*20*/ u_char nc_dfifo;
+/*21*/ u_char nc_ctest4;
+ #define BDIS 0x80 /* mod: burst disable */
+ #define MPEE 0x08 /* mod: master parity error enable */
+
+/*22*/ u_char nc_ctest5;
+ #define DFS 0x20 /* mod: dma fifo size */
+ /* bits 0-1, 3-7 rsvd for C1010 */
+/*23*/ u_char nc_ctest6;
+
+/*24*/ u_int32 nc_dbc; /* ### Byte count and command */
+/*28*/ u_int32 nc_dnad; /* ### Next command register */
+/*2c*/ u_int32 nc_dsp; /* --> Script Pointer */
+/*30*/ u_int32 nc_dsps; /* --> Script pointer save/opcode#2 */
+
+/*34*/ u_char nc_scratcha; /* Temporary register a */
+/*35*/ u_char nc_scratcha1;
+/*36*/ u_char nc_scratcha2;
+/*37*/ u_char nc_scratcha3;
+
+/*38*/ u_char nc_dmode;
+ #define BL_2 0x80 /* mod: burst length shift value +2 */
+ #define BL_1 0x40 /* mod: burst length shift value +1 */
+ #define ERL 0x08 /* mod: enable read line */
+ #define ERMP 0x04 /* mod: enable read multiple */
+ #define BOF 0x02 /* mod: burst op code fetch */
+
+/*39*/ u_char nc_dien;
+/*3a*/ u_char nc_sbr;
+
+/*3b*/ u_char nc_dcntl; /* --> Script execution control */
+ #define CLSE 0x80 /* mod: cache line size enable */
+ #define PFF 0x40 /* cmd: pre-fetch flush */
+ #define PFEN 0x20 /* mod: pre-fetch enable */
+ #define SSM 0x10 /* mod: single step mode */
+ #define IRQM 0x08 /* mod: irq mode (1 = totem pole !) */
+ #define STD 0x04 /* cmd: start dma mode */
+ #define IRQD 0x02 /* mod: irq disable */
+ #define NOCOM 0x01 /* cmd: protect sfbr while reselect */
+ /* bits 0-1 rsvd for C1010 */
+
+/*3c*/ u_int32 nc_adder;
+
+/*40*/ u_short nc_sien; /* -->: interrupt enable */
+/*42*/ u_short nc_sist; /* <--: interrupt status */
+ #define SBMC 0x1000/* sta: SCSI Bus Mode Change (895/6 only) */
+ #define STO 0x0400/* sta: timeout (select) */
+ #define GEN 0x0200/* sta: timeout (general) */
+ #define HTH 0x0100/* sta: timeout (handshake) */
+ #define MA 0x80 /* sta: phase mismatch */
+ #define CMP 0x40 /* sta: arbitration complete */
+ #define SEL 0x20 /* sta: selected by another device */
+ #define RSL 0x10 /* sta: reselected by another device*/
+ #define SGE 0x08 /* sta: gross error (over/underflow)*/
+ #define UDC 0x04 /* sta: unexpected disconnect */
+ #define RST 0x02 /* sta: scsi bus reset detected */
+ #define PAR 0x01 /* sta: scsi parity error */
+
+/*44*/ u_char nc_slpar;
+/*45*/ u_char nc_swide;
+/*46*/ u_char nc_macntl;
+/*47*/ u_char nc_gpcntl;
+/*48*/ u_char nc_stime0; /* cmd: timeout for select&handshake*/
+/*49*/ u_char nc_stime1; /* cmd: timeout user defined */
+/*4a*/ u_short nc_respid; /* sta: Reselect-IDs */
+
+/*4c*/ u_char nc_stest0;
+
+/*4d*/ u_char nc_stest1;
+ #define SCLK 0x80 /* Use the PCI clock as SCSI clock */
+ #define DBLEN 0x08 /* clock doubler running */
+ #define DBLSEL 0x04 /* clock doubler selected */
+
+
+/*4e*/ u_char nc_stest2;
+ #define ROF 0x40 /* reset scsi offset (after gross error!) */
+ #define EXT 0x02 /* extended filtering */
+
+/*4f*/ u_char nc_stest3;
+ #define TE 0x80 /* c: tolerAnt enable */
+ #define HSC 0x20 /* c: Halt SCSI Clock */
+ #define CSF 0x02 /* c: clear scsi fifo */
+
+/*50*/ u_short nc_sidl; /* Lowlevel: latched from scsi data */
+/*52*/ u_char nc_stest4;
+ #define SMODE 0xc0 /* SCSI bus mode (895/6 only) */
+ #define SMODE_HVD 0x40 /* High Voltage Differential */
+ #define SMODE_SE 0x80 /* Single Ended */
+ #define SMODE_LVD 0xc0 /* Low Voltage Differential */
+ #define LCKFRQ 0x20 /* Frequency Lock (895/6 only) */
+ /* bits 0-5 rsvd for C1010 */
+
+/*53*/ u_char nc_53_;
+/*54*/ u_short nc_sodl; /* Lowlevel: data out to scsi data */
+/*56*/ u_char nc_ccntl0; /* Chip Control 0 (896) */
+ #define ENPMJ 0x80 /* Enable Phase Mismatch Jump */
+ #define PMJCTL 0x40 /* Phase Mismatch Jump Control */
+ #define ENNDJ 0x20 /* Enable Non Data PM Jump */
+ #define DISFC 0x10 /* Disable Auto FIFO Clear */
+ #define DILS 0x02 /* Disable Internal Load/Store */
+ #define DPR 0x01 /* Disable Pipe Req */
+
+/*57*/ u_char nc_ccntl1; /* Chip Control 1 (896) */
+ #define ZMOD 0x80 /* High Impedance Mode */
+ #define DIC 0x10 /* Disable Internal Cycles */
+ #define DDAC 0x08 /* Disable Dual Address Cycle */
+ #define XTIMOD 0x04 /* 64-bit Table Ind. Indexing Mode */
+ #define EXTIBMV 0x02 /* Enable 64-bit Table Ind. BMOV */
+ #define EXDBMV 0x01 /* Enable 64-bit Direct BMOV */
+
+/*58*/ u_short nc_sbdl; /* Lowlevel: data from scsi data */
+/*5a*/ u_short nc_5a_;
+
+/*5c*/ u_char nc_scr0; /* Working register B */
+/*5d*/ u_char nc_scr1; /* */
+/*5e*/ u_char nc_scr2; /* */
+/*5f*/ u_char nc_scr3; /* */
+
+/*60*/ u_char nc_scrx[64]; /* Working register C-R */
+/*a0*/ u_int32 nc_mmrs; /* Memory Move Read Selector */
+/*a4*/ u_int32 nc_mmws; /* Memory Move Write Selector */
+/*a8*/ u_int32 nc_sfs; /* Script Fetch Selector */
+/*ac*/ u_int32 nc_drs; /* DSA Relative Selector */
+/*b0*/ u_int32 nc_sbms; /* Static Block Move Selector */
+/*b4*/ u_int32 nc_dbms; /* Dynamic Block Move Selector */
+/*b8*/ u_int32 nc_dnad64; /* DMA Next Address 64 */
+/*bc*/ u_short nc_scntl4; /* C1010 only */
+ #define U3EN 0x80 /* Enable Ultra 3 */
+ #define AIPEN 0x40 /* Allow check upper byte lanes */
+ #define XCLKH_DT 0x08 /* Extra clock of data hold on DT
+ transfer edge */
+ #define XCLKH_ST 0x04 /* Extra clock of data hold on ST
+ transfer edge */
+
+/*be*/ u_char nc_aipcntl0; /* Epat Control 1 C1010 only */
+/*bf*/ u_char nc_aipcntl1; /* AIP Control C1010_66 Only */
+
+/*c0*/ u_int32 nc_pmjad1; /* Phase Mismatch Jump Address 1 */
+/*c4*/ u_int32 nc_pmjad2; /* Phase Mismatch Jump Address 2 */
+/*c8*/ u_char nc_rbc; /* Remaining Byte Count */
+/*c9*/ u_char nc_rbc1; /* */
+/*ca*/ u_char nc_rbc2; /* */
+/*cb*/ u_char nc_rbc3; /* */
+
+/*cc*/ u_char nc_ua; /* Updated Address */
+/*cd*/ u_char nc_ua1; /* */
+/*ce*/ u_char nc_ua2; /* */
+/*cf*/ u_char nc_ua3; /* */
+/*d0*/ u_int32 nc_esa; /* Entry Storage Address */
+/*d4*/ u_char nc_ia; /* Instruction Address */
+/*d5*/ u_char nc_ia1;
+/*d6*/ u_char nc_ia2;
+/*d7*/ u_char nc_ia3;
+/*d8*/ u_int32 nc_sbc; /* SCSI Byte Count (3 bytes only) */
+/*dc*/ u_int32 nc_csbc; /* Cumulative SCSI Byte Count */
+
+ /* Following for C1010 only */
+/*e0*/ u_short nc_crcpad; /* CRC Value */
+/*e2*/ u_char nc_crccntl0; /* CRC control register */
+ #define SNDCRC 0x10 /* Send CRC Request */
+/*e3*/ u_char nc_crccntl1; /* CRC control register */
+/*e4*/ u_int32 nc_crcdata; /* CRC data register */
+/*e8*/ u_int32 nc_e8_; /* rsvd */
+/*ec*/ u_int32 nc_ec_; /* rsvd */
+/*f0*/ u_short nc_dfbc; /* DMA FIFO byte count */
+
+};
+
+/*-----------------------------------------------------------
+**
+** Utility macros for the script.
+**
+**-----------------------------------------------------------
+*/
+
+#define REGJ(p,r) (offsetof(struct ncr_reg, p ## r))
+#define REG(r) REGJ (nc_, r)
+
+typedef u_int32 ncrcmd;
+
+/*-----------------------------------------------------------
+**
+** SCSI phases
+**
+** DT phases illegal for ncr driver.
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_DATA_OUT 0x00000000
+#define SCR_DATA_IN 0x01000000
+#define SCR_COMMAND 0x02000000
+#define SCR_STATUS 0x03000000
+#define SCR_DT_DATA_OUT 0x04000000
+#define SCR_DT_DATA_IN 0x05000000
+#define SCR_MSG_OUT 0x06000000
+#define SCR_MSG_IN 0x07000000
+
+#define SCR_ILG_OUT 0x04000000
+#define SCR_ILG_IN 0x05000000
+
+/*-----------------------------------------------------------
+**
+** Data transfer via SCSI.
+**
+**-----------------------------------------------------------
+**
+** MOVE_ABS (LEN)
+** <<start address>>
+**
+** MOVE_IND (LEN)
+** <<dnad_offset>>
+**
+** MOVE_TBL
+** <<dnad_offset>>
+**
+**-----------------------------------------------------------
+*/
+
+#define OPC_MOVE 0x08000000
+
+#define SCR_MOVE_ABS(l) ((0x00000000 | OPC_MOVE) | (l))
+#define SCR_MOVE_IND(l) ((0x20000000 | OPC_MOVE) | (l))
+#define SCR_MOVE_TBL (0x10000000 | OPC_MOVE)
+
+#define SCR_CHMOV_ABS(l) ((0x00000000) | (l))
+#define SCR_CHMOV_IND(l) ((0x20000000) | (l))
+#define SCR_CHMOV_TBL (0x10000000)
+
+struct scr_tblmove {
+ u_int32 size;
+ u_int32 addr;
+};
+
+/*-----------------------------------------------------------
+**
+** Selection
+**
+**-----------------------------------------------------------
+**
+** SEL_ABS | SCR_ID (0..15) [ | REL_JMP]
+** <<alternate_address>>
+**
+** SEL_TBL | << dnad_offset>> [ | REL_JMP]
+** <<alternate_address>>
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_SEL_ABS 0x40000000
+#define SCR_SEL_ABS_ATN 0x41000000
+#define SCR_SEL_TBL 0x42000000
+#define SCR_SEL_TBL_ATN 0x43000000
+
+struct scr_tblsel {
+ u_char sel_scntl4;
+ u_char sel_sxfer;
+ u_char sel_id;
+ u_char sel_scntl3;
+};
+
+#define SCR_JMP_REL 0x04000000
+#define SCR_ID(id) (((u_int32)(id)) << 16)
+
+/*-----------------------------------------------------------
+**
+** Waiting for Disconnect or Reselect
+**
+**-----------------------------------------------------------
+**
+** WAIT_DISC
+** dummy: <<alternate_address>>
+**
+** WAIT_RESEL
+** <<alternate_address>>
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_WAIT_DISC 0x48000000
+#define SCR_WAIT_RESEL 0x50000000
+
+/*-----------------------------------------------------------
+**
+** Bit Set / Reset
+**
+**-----------------------------------------------------------
+**
+** SET (flags {|.. })
+**
+** CLR (flags {|.. })
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_SET(f) (0x58000000 | (f))
+#define SCR_CLR(f) (0x60000000 | (f))
+
+#define SCR_CARRY 0x00000400
+#define SCR_TRG 0x00000200
+#define SCR_ACK 0x00000040
+#define SCR_ATN 0x00000008
+
+
+
+
+/*-----------------------------------------------------------
+**
+** Memory to memory move
+**
+**-----------------------------------------------------------
+**
+** COPY (bytecount)
+** << source_address >>
+** << destination_address >>
+**
+** SCR_COPY sets the NO FLUSH option by default.
+** SCR_COPY_F does not set this option.
+**
+** For chips which do not support this option,
+** ncr_copy_and_bind() will remove this bit.
+**-----------------------------------------------------------
+*/
+
+#define SCR_NO_FLUSH 0x01000000
+
+#define SCR_COPY(n) (0xc0000000 | SCR_NO_FLUSH | (n))
+#define SCR_COPY_F(n) (0xc0000000 | (n))
+
+/*-----------------------------------------------------------
+**
+** Register move and binary operations
+**
+**-----------------------------------------------------------
+**
+** SFBR_REG (reg, op, data) reg = SFBR op data
+** << 0 >>
+**
+** REG_SFBR (reg, op, data) SFBR = reg op data
+** << 0 >>
+**
+** REG_REG (reg, op, data) reg = reg op data
+** << 0 >>
+**
+**-----------------------------------------------------------
+** On 810A, 860, 825A, 875, 895 and 896 chips the content
+** of SFBR register can be used as data (SCR_SFBR_DATA).
+** The 896 has additionnal IO registers starting at
+** offset 0x80. Bit 7 of register offset is stored in
+** bit 7 of the SCRIPTS instruction first DWORD.
+**-----------------------------------------------------------
+*/
+
+#define SCR_REG_OFS(ofs) ((((ofs) & 0x7f) << 16ul) + ((ofs) & 0x80))
+
+#define SCR_SFBR_REG(reg,op,data) \
+ (0x68000000 | (SCR_REG_OFS(REG(reg))) | (op) | (((data)&0xff)<<8ul))
+
+#define SCR_REG_SFBR(reg,op,data) \
+ (0x70000000 | (SCR_REG_OFS(REG(reg))) | (op) | (((data)&0xff)<<8ul))
+
+#define SCR_REG_REG(reg,op,data) \
+ (0x78000000 | (SCR_REG_OFS(REG(reg))) | (op) | (((data)&0xff)<<8ul))
+
+
+#define SCR_LOAD 0x00000000
+#define SCR_SHL 0x01000000
+#define SCR_OR 0x02000000
+#define SCR_XOR 0x03000000
+#define SCR_AND 0x04000000
+#define SCR_SHR 0x05000000
+#define SCR_ADD 0x06000000
+#define SCR_ADDC 0x07000000
+
+#define SCR_SFBR_DATA (0x00800000>>8ul) /* Use SFBR as data */
+
+/*-----------------------------------------------------------
+**
+** FROM_REG (reg) SFBR = reg
+** << 0 >>
+**
+** TO_REG (reg) reg = SFBR
+** << 0 >>
+**
+** LOAD_REG (reg, data) reg = <data>
+** << 0 >>
+**
+** LOAD_SFBR(data) SFBR = <data>
+** << 0 >>
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_FROM_REG(reg) \
+ SCR_REG_SFBR(reg,SCR_OR,0)
+
+#define SCR_TO_REG(reg) \
+ SCR_SFBR_REG(reg,SCR_OR,0)
+
+#define SCR_LOAD_REG(reg,data) \
+ SCR_REG_REG(reg,SCR_LOAD,data)
+
+#define SCR_LOAD_SFBR(data) \
+ (SCR_REG_SFBR (gpreg, SCR_LOAD, data))
+
+/*-----------------------------------------------------------
+**
+** LOAD from memory to register.
+** STORE from register to memory.
+**
+** Only supported by 810A, 860, 825A, 875, 895 and 896.
+**
+**-----------------------------------------------------------
+**
+** LOAD_ABS (LEN)
+** <<start address>>
+**
+** LOAD_REL (LEN) (DSA relative)
+** <<dsa_offset>>
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_REG_OFS2(ofs) (((ofs) & 0xff) << 16ul)
+#define SCR_NO_FLUSH2 0x02000000
+#define SCR_DSA_REL2 0x10000000
+
+#define SCR_LOAD_R(reg, how, n) \
+ (0xe1000000 | how | (SCR_REG_OFS2(REG(reg))) | (n))
+
+#define SCR_STORE_R(reg, how, n) \
+ (0xe0000000 | how | (SCR_REG_OFS2(REG(reg))) | (n))
+
+#define SCR_LOAD_ABS(reg, n) SCR_LOAD_R(reg, SCR_NO_FLUSH2, n)
+#define SCR_LOAD_REL(reg, n) SCR_LOAD_R(reg, SCR_NO_FLUSH2|SCR_DSA_REL2, n)
+#define SCR_LOAD_ABS_F(reg, n) SCR_LOAD_R(reg, 0, n)
+#define SCR_LOAD_REL_F(reg, n) SCR_LOAD_R(reg, SCR_DSA_REL2, n)
+
+#define SCR_STORE_ABS(reg, n) SCR_STORE_R(reg, SCR_NO_FLUSH2, n)
+#define SCR_STORE_REL(reg, n) SCR_STORE_R(reg, SCR_NO_FLUSH2|SCR_DSA_REL2,n)
+#define SCR_STORE_ABS_F(reg, n) SCR_STORE_R(reg, 0, n)
+#define SCR_STORE_REL_F(reg, n) SCR_STORE_R(reg, SCR_DSA_REL2, n)
+
+
+/*-----------------------------------------------------------
+**
+** Waiting for Disconnect or Reselect
+**
+**-----------------------------------------------------------
+**
+** JUMP [ | IFTRUE/IFFALSE ( ... ) ]
+** <<address>>
+**
+** JUMPR [ | IFTRUE/IFFALSE ( ... ) ]
+** <<distance>>
+**
+** CALL [ | IFTRUE/IFFALSE ( ... ) ]
+** <<address>>
+**
+** CALLR [ | IFTRUE/IFFALSE ( ... ) ]
+** <<distance>>
+**
+** RETURN [ | IFTRUE/IFFALSE ( ... ) ]
+** <<dummy>>
+**
+** INT [ | IFTRUE/IFFALSE ( ... ) ]
+** <<ident>>
+**
+** INT_FLY [ | IFTRUE/IFFALSE ( ... ) ]
+** <<ident>>
+**
+** Conditions:
+** WHEN (phase)
+** IF (phase)
+** CARRYSET
+** DATA (data, mask)
+**
+**-----------------------------------------------------------
+*/
+
+#define SCR_NO_OP 0x80000000
+#define SCR_JUMP 0x80080000
+#define SCR_JUMP64 0x80480000
+#define SCR_JUMPR 0x80880000
+#define SCR_CALL 0x88080000
+#define SCR_CALLR 0x88880000
+#define SCR_RETURN 0x90080000
+#define SCR_INT 0x98080000
+#define SCR_INT_FLY 0x98180000
+
+#define IFFALSE(arg) (0x00080000 | (arg))
+#define IFTRUE(arg) (0x00000000 | (arg))
+
+#define WHEN(phase) (0x00030000 | (phase))
+#define IF(phase) (0x00020000 | (phase))
+
+#define DATA(D) (0x00040000 | ((D) & 0xff))
+#define MASK(D,M) (0x00040000 | (((M ^ 0xff) & 0xff) << 8ul)|((D) & 0xff))
+
+#define CARRYSET (0x00200000)
+
+/*-----------------------------------------------------------
+**
+** SCSI constants.
+**
+**-----------------------------------------------------------
+*/
+
+/*
+** Messages
+*/
+
+#define M_COMPLETE (0x00)
+#define M_EXTENDED (0x01)
+#define M_SAVE_DP (0x02)
+#define M_RESTORE_DP (0x03)
+#define M_DISCONNECT (0x04)
+#define M_ID_ERROR (0x05)
+#define M_ABORT (0x06)
+#define M_REJECT (0x07)
+#define M_NOOP (0x08)
+#define M_PARITY (0x09)
+#define M_LCOMPLETE (0x0a)
+#define M_FCOMPLETE (0x0b)
+#define M_RESET (0x0c)
+#define M_ABORT_TAG (0x0d)
+#define M_CLEAR_QUEUE (0x0e)
+#define M_INIT_REC (0x0f)
+#define M_REL_REC (0x10)
+#define M_TERMINATE (0x11)
+#define M_SIMPLE_TAG (0x20)
+#define M_HEAD_TAG (0x21)
+#define M_ORDERED_TAG (0x22)
+#define M_IGN_RESIDUE (0x23)
+#define M_IDENTIFY (0x80)
+
+#define M_X_MODIFY_DP (0x00)
+#define M_X_SYNC_REQ (0x01)
+#define M_X_WIDE_REQ (0x03)
+#define M_X_PPR_REQ (0x04)
+
+/*
+** Status
+*/
+
+#define S_GOOD (0x00)
+#define S_CHECK_COND (0x02)
+#define S_COND_MET (0x04)
+#define S_BUSY (0x08)
+#define S_INT (0x10)
+#define S_INT_COND_MET (0x14)
+#define S_CONFLICT (0x18)
+#define S_TERMINATED (0x20)
+#define S_QUEUE_FULL (0x28)
+#define S_ILLEGAL (0xff)
+#define S_SENSE (0x80)
+
+/*
+ * End of ncrreg from FreeBSD
+ */
+
+#endif /* !defined HOSTS_C */
+
+#endif /* defined SYM53C8XX_DEFS_H */
diff --git a/linux/src/drivers/scsi/t128.c b/linux/src/drivers/scsi/t128.c
new file mode 100644
index 0000000..198e910
--- /dev/null
+++ b/linux/src/drivers/scsi/t128.c
@@ -0,0 +1,400 @@
+#define AUTOSENSE
+#define PSEUDO_DMA
+
+/*
+ * Trantor T128/T128F/T228 driver
+ * Note : architecturally, the T100 and T130 are different and won't
+ * work
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 440-4894
+ *
+ * DISTRIBUTION RELEASE 3.
+ *
+ * For more information, please consult
+ *
+ * Trantor Systems, Ltd.
+ * T128/T128F/T228 SCSI Host Adapter
+ * Hardware Specifications
+ *
+ * Trantor Systems, Ltd.
+ * 5415 Randall Place
+ * Fremont, CA 94538
+ * 1+ (415) 770-1400, FAX 1+ (415) 770-9910
+ *
+ * and
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+ *
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * 1+ (719) 578-3400
+ * 1+ (800) 334-5454
+ */
+
+/*
+ * Options :
+ * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
+ * for commands that return with a CHECK CONDITION status.
+ *
+ * PSEUDO_DMA - enables PSEUDO-DMA hardware, should give a 3-4X performance
+ * increase compared to polled I/O.
+ *
+ * PARITY - enable parity checking. Not supported.
+ *
+ * SCSI2 - enable support for SCSI-II tagged queueing. Untested.
+ *
+ *
+ * UNSAFE - leave interrupts enabled during pseudo-DMA transfers. You
+ * only really want to use this if you're having a problem with
+ * dropped characters during high speed communications, and even
+ * then, you're going to be better off twiddling with transfersize.
+ *
+ * USLEEP - enable support for devices that don't disconnect. Untested.
+ *
+ * The card is detected and initialized in one of several ways :
+ * 1. Autoprobe (default) - since the board is memory mapped,
+ * a BIOS signature is scanned for to locate the registers.
+ * An interrupt is triggered to autoprobe for the interrupt
+ * line.
+ *
+ * 2. With command line overrides - t128=address,irq may be
+ * used on the LILO command line to override the defaults.
+ *
+ * 3. With the T128_OVERRIDE compile time define. This is
+ * specified as an array of address, irq tuples. Ie, for
+ * one board at the default 0xcc000 address, IRQ5, I could say
+ * -DT128_OVERRIDE={{0xcc000, 5}}
+ *
+ * Note that if the override methods are used, place holders must
+ * be specified for other boards in the system.
+ *
+ * T128/T128F jumper/dipswitch settings (note : on my sample, the switches
+ * were epoxy'd shut, meaning I couldn't change the 0xcc000 base address) :
+ *
+ * T128 Sw7 Sw8 Sw6 = 0ws Sw5 = boot
+ * T128F Sw6 Sw7 Sw5 = 0ws Sw4 = boot Sw8 = floppy disable
+ * cc000 off off
+ * c8000 off on
+ * dc000 on off
+ * d8000 on on
+ *
+ *
+ * Interrupts
+ * There is a 12 pin jumper block, jp1, numbered as follows :
+ * T128 (JP1) T128F (J5)
+ * 2 4 6 8 10 12 11 9 7 5 3 1
+ * 1 3 5 7 9 11 12 10 8 6 4 2
+ *
+ * 3 2-4
+ * 5 1-3
+ * 7 3-5
+ * T128F only
+ * 10 8-10
+ * 12 7-9
+ * 14 10-12
+ * 15 9-11
+ */
+
+#include <asm/system.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <asm/io.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "t128.h"
+#define AUTOPROBE_IRQ
+#include "NCR5380.h"
+#include "constants.h"
+#include "sd.h"
+#include<linux/stat.h>
+
+struct proc_dir_entry proc_scsi_t128 = {
+ PROC_SCSI_T128, 4, "t128",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+
+static struct override {
+ unsigned char *address;
+ int irq;
+} overrides
+#ifdef T128_OVERRIDE
+ [] = T128_OVERRIDE;
+#else
+ [4] = {{NULL,IRQ_AUTO}, {NULL,IRQ_AUTO}, {NULL,IRQ_AUTO},
+ {NULL,IRQ_AUTO}};
+#endif
+
+#define NO_OVERRIDES (sizeof(overrides) / sizeof(struct override))
+
+static struct base {
+ unsigned char *address;
+ int noauto;
+} bases[] = {{(unsigned char *) 0xcc000, 0}, {(unsigned char *) 0xc8000, 0},
+ {(unsigned char *) 0xdc000, 0}, {(unsigned char *) 0xd8000, 0}};
+
+#define NO_BASES (sizeof (bases) / sizeof (struct base))
+
+static const struct signature {
+ const char *string;
+ int offset;
+} signatures[] = {
+{"TSROM: SCSI BIOS, Version 1.12", 0x36},
+};
+
+#define NO_SIGNATURES (sizeof (signatures) / sizeof (struct signature))
+
+/*
+ * Function : t128_setup(char *str, int *ints)
+ *
+ * Purpose : LILO command line initialization of the overrides array,
+ *
+ * Inputs : str - unused, ints - array of integer parameters with ints[0]
+ * equal to the number of ints.
+ *
+ */
+
+void t128_setup(char *str, int *ints) {
+ static int commandline_current = 0;
+ int i;
+ if (ints[0] != 2)
+ printk("t128_setup : usage t128=address,irq\n");
+ else
+ if (commandline_current < NO_OVERRIDES) {
+ overrides[commandline_current].address = (unsigned char *) ints[1];
+ overrides[commandline_current].irq = ints[2];
+ for (i = 0; i < NO_BASES; ++i)
+ if (bases[i].address == (unsigned char *) ints[1]) {
+ bases[i].noauto = 1;
+ break;
+ }
+ ++commandline_current;
+ }
+}
+
+/*
+ * Function : int t128_detect(Scsi_Host_Template * tpnt)
+ *
+ * Purpose : detects and initializes T128,T128F, or T228 controllers
+ * that were autoprobed, overridden on the LILO command line,
+ * or specified at compile time.
+ *
+ * Inputs : tpnt - template for this SCSI adapter.
+ *
+ * Returns : 1 if a host adapter was found, 0 if not.
+ *
+ */
+
+int t128_detect(Scsi_Host_Template * tpnt) {
+ static int current_override = 0, current_base = 0;
+ struct Scsi_Host *instance;
+ unsigned char *base;
+ int sig, count;
+
+ tpnt->proc_dir = &proc_scsi_t128;
+ tpnt->proc_info = &t128_proc_info;
+
+ for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
+ base = NULL;
+
+ if (overrides[current_override].address)
+ base = overrides[current_override].address;
+ else
+ for (; !base && (current_base < NO_BASES); ++current_base) {
+#if (TDEBUG & TDEBUG_INIT)
+ printk("scsi : probing address %08x\n", (unsigned int) bases[current_base].address);
+#endif
+ for (sig = 0; sig < NO_SIGNATURES; ++sig)
+ if (!bases[current_base].noauto && !memcmp
+ (bases[current_base].address + signatures[sig].offset,
+ signatures[sig].string, strlen(signatures[sig].string))) {
+ base = bases[current_base].address;
+#if (TDEBUG & TDEBUG_INIT)
+ printk("scsi-t128 : detected board.\n");
+#endif
+ break;
+ }
+ }
+
+#if defined(TDEBUG) && (TDEBUG & TDEBUG_INIT)
+ printk("scsi-t128 : base = %08x\n", (unsigned int) base);
+#endif
+
+ if (!base)
+ break;
+
+ instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
+ instance->base = base;
+
+ NCR5380_init(instance, 0);
+
+ if (overrides[current_override].irq != IRQ_AUTO)
+ instance->irq = overrides[current_override].irq;
+ else
+ instance->irq = NCR5380_probe_irq(instance, T128_IRQS);
+
+ if (instance->irq != IRQ_NONE)
+ if (request_irq(instance->irq, t128_intr, SA_INTERRUPT, "t128", NULL)) {
+ printk("scsi%d : IRQ%d not free, interrupts disabled\n",
+ instance->host_no, instance->irq);
+ instance->irq = IRQ_NONE;
+ }
+
+ if (instance->irq == IRQ_NONE) {
+ printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
+ printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
+ }
+
+#if defined(TDEBUG) && (TDEBUG & TDEBUG_INIT)
+ printk("scsi%d : irq = %d\n", instance->host_no, instance->irq);
+#endif
+
+ printk("scsi%d : at 0x%08x", instance->host_no, (int)
+ instance->base);
+ if (instance->irq == IRQ_NONE)
+ printk (" interrupts disabled");
+ else
+ printk (" irq %d", instance->irq);
+ printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
+ CAN_QUEUE, CMD_PER_LUN, T128_PUBLIC_RELEASE);
+ NCR5380_print_options(instance);
+ printk("\n");
+
+ ++current_override;
+ ++count;
+ }
+ return count;
+}
+
+/*
+ * Function : int t128_biosparam(Disk * disk, kdev_t dev, int *ip)
+ *
+ * Purpose : Generates a BIOS / DOS compatible H-C-S mapping for
+ * the specified device / size.
+ *
+ * Inputs : size = size of device in sectors (512 bytes), dev = block device
+ * major / minor, ip[] = {heads, sectors, cylinders}
+ *
+ * Returns : always 0 (success), initializes ip
+ *
+ */
+
+/*
+ * XXX Most SCSI boards use this mapping, I could be incorrect. Some one
+ * using hard disks on a trantor should verify that this mapping corresponds
+ * to that used by the BIOS / ASPI driver by running the linux fdisk program
+ * and matching the H_C_S coordinates to what DOS uses.
+ */
+
+int t128_biosparam(Disk * disk, kdev_t dev, int * ip)
+{
+ int size = disk->capacity;
+ ip[0] = 64;
+ ip[1] = 32;
+ ip[2] = size >> 11;
+ return 0;
+}
+
+/*
+ * Function : int NCR5380_pread (struct Scsi_Host *instance,
+ * unsigned char *dst, int len)
+ *
+ * Purpose : Fast 5380 pseudo-dma read function, transfers len bytes to
+ * dst
+ *
+ * Inputs : dst = destination, len = length in bytes
+ *
+ * Returns : 0 on success, non zero on a failure such as a watchdog
+ * timeout.
+ */
+
+static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst,
+ int len) {
+ register unsigned char *reg = (unsigned char *) (instance->base +
+ T_DATA_REG_OFFSET), *d = dst;
+ register int i = len;
+
+
+#if 0
+ for (; i; --i) {
+ while (!(instance->base[T_STATUS_REG_OFFSET]) & T_ST_RDY) barrier();
+#else
+ while (!((instance->base[T_STATUS_REG_OFFSET]) & T_ST_RDY)) barrier();
+ for (; i; --i) {
+#endif
+ *d++ = *reg;
+ }
+
+ if (*(instance->base + T_STATUS_REG_OFFSET) & T_ST_TIM) {
+ unsigned char tmp;
+ volatile unsigned char *foo;
+ foo = instance->base + T_CONTROL_REG_OFFSET;
+ tmp = *foo;
+ *foo = tmp | T_CR_CT;
+ *foo = tmp;
+ printk("scsi%d : watchdog timer fired in NCR5380_pread()\n",
+ instance->host_no);
+ return -1;
+ } else
+ return 0;
+}
+
+/*
+ * Function : int NCR5380_pwrite (struct Scsi_Host *instance,
+ * unsigned char *src, int len)
+ *
+ * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from
+ * src
+ *
+ * Inputs : src = source, len = length in bytes
+ *
+ * Returns : 0 on success, non zero on a failure such as a watchdog
+ * timeout.
+ */
+
+static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src,
+ int len) {
+ register unsigned char *reg = (unsigned char *) (instance->base +
+ T_DATA_REG_OFFSET), *s = src;
+ register int i = len;
+
+#if 0
+ for (; i; --i) {
+ while (!(instance->base[T_STATUS_REG_OFFSET]) & T_ST_RDY) barrier();
+#else
+ while (!((instance->base[T_STATUS_REG_OFFSET]) & T_ST_RDY)) barrier();
+ for (; i; --i) {
+#endif
+ *reg = *s++;
+ }
+
+ if (*(instance->base + T_STATUS_REG_OFFSET) & T_ST_TIM) {
+ unsigned char tmp;
+ volatile unsigned char *foo;
+ foo = instance->base + T_CONTROL_REG_OFFSET;
+ tmp = *foo;
+ *foo = tmp | T_CR_CT;
+ *foo = tmp;
+ printk("scsi%d : watchdog timer fired in NCR5380_pwrite()\n",
+ instance->host_no);
+ return -1;
+ } else
+ return 0;
+}
+
+#include "NCR5380.c"
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = TRANTOR_T128;
+
+#include "scsi_module.c"
+#endif
diff --git a/linux/src/drivers/scsi/t128.h b/linux/src/drivers/scsi/t128.h
new file mode 100644
index 0000000..2a3c3cb
--- /dev/null
+++ b/linux/src/drivers/scsi/t128.h
@@ -0,0 +1,169 @@
+/*
+ * Trantor T128/T128F/T228 defines
+ * Note : architecturally, the T100 and T128 are different and won't work
+ *
+ * Copyright 1993, Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@colorado.edu
+ * +1 (303) 440-4894
+ *
+ * DISTRIBUTION RELEASE 3.
+ *
+ * For more information, please consult
+ *
+ * Trantor Systems, Ltd.
+ * T128/T128F/T228 SCSI Host Adapter
+ * Hardware Specifications
+ *
+ * Trantor Systems, Ltd.
+ * 5415 Randall Place
+ * Fremont, CA 94538
+ * 1+ (415) 770-1400, FAX 1+ (415) 770-9910
+ *
+ * and
+ *
+ * NCR 5380 Family
+ * SCSI Protocol Controller
+ * Databook
+ *
+ * NCR Microelectronics
+ * 1635 Aeroplaza Drive
+ * Colorado Springs, CO 80916
+ * 1+ (719) 578-3400
+ * 1+ (800) 334-5454
+ */
+
+#ifndef T128_H
+#define T128_H
+
+#define T128_PUBLIC_RELEASE 3
+
+#define TDEBUG_INIT 0x1
+#define TDEBUG_TRANSFER 0x2
+
+/*
+ * The trantor boards are memory mapped. They use an NCR5380 or
+ * equivalent (my sample board had part second sourced from ZILOG).
+ * NCR's recommended "Pseudo-DMA" architecture is used, where
+ * a PAL drives the DMA signals on the 5380 allowing fast, blind
+ * transfers with proper handshaking.
+ */
+
+/*
+ * Note : a boot switch is provided for the purpose of informing the
+ * firmware to boot or not boot from attached SCSI devices. So, I imagine
+ * there are fewer people who've yanked the ROM like they do on the Seagate
+ * to make bootup faster, and I'll probably use this for autodetection.
+ */
+#define T_ROM_OFFSET 0
+
+/*
+ * Note : my sample board *WAS NOT* populated with the SRAM, so this
+ * can't be used for autodetection without a ROM present.
+ */
+#define T_RAM_OFFSET 0x1800
+
+/*
+ * All of the registers are allocated 32 bytes of address space, except
+ * for the data register (read/write to/from the 5380 in pseudo-DMA mode)
+ */
+#define T_CONTROL_REG_OFFSET 0x1c00 /* rw */
+#define T_CR_INT 0x10 /* Enable interrupts */
+#define T_CR_CT 0x02 /* Reset watchdog timer */
+
+#define T_STATUS_REG_OFFSET 0x1c20 /* ro */
+#define T_ST_BOOT 0x80 /* Boot switch */
+#define T_ST_S3 0x40 /* User settable switches, */
+#define T_ST_S2 0x20 /* read 0 when switch is on, 1 off */
+#define T_ST_S1 0x10
+#define T_ST_PS2 0x08 /* Set for Microchannel 228 */
+#define T_ST_RDY 0x04 /* 5380 DRQ */
+#define T_ST_TIM 0x02 /* indicates 40us watchdog timer fired */
+#define T_ST_ZERO 0x01 /* Always zero */
+
+#define T_5380_OFFSET 0x1d00 /* 8 registers here, see NCR5380.h */
+
+#define T_DATA_REG_OFFSET 0x1e00 /* rw 512 bytes long */
+
+#ifndef ASM
+int t128_abort(Scsi_Cmnd *);
+int t128_biosparam(Disk *, kdev_t, int*);
+int t128_detect(Scsi_Host_Template *);
+int t128_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int t128_reset(Scsi_Cmnd *, unsigned int reset_flags);
+int t128_proc_info (char *buffer, char **start, off_t offset,
+ int length, int hostno, int inout);
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#ifndef CMD_PER_LUN
+#define CMD_PER_LUN 2
+#endif
+
+#ifndef CAN_QUEUE
+#define CAN_QUEUE 32
+#endif
+
+/*
+ * I hadn't thought of this with the earlier drivers - but to prevent
+ * macro definition conflicts, we shouldn't define all of the internal
+ * macros when this is being used solely for the host stub.
+ */
+
+#if defined(HOSTS_C) || defined(MODULE)
+
+#define TRANTOR_T128 {NULL, NULL, NULL, NULL, \
+ "Trantor T128/T128F/T228", t128_detect, NULL, \
+ NULL, \
+ NULL, t128_queue_command, t128_abort, t128_reset, NULL, \
+ t128_biosparam, \
+ /* can queue */ CAN_QUEUE, /* id */ 7, SG_ALL, \
+ /* cmd per lun */ CMD_PER_LUN , 0, 0, DISABLE_CLUSTERING}
+
+#endif
+
+#ifndef HOSTS_C
+
+#define NCR5380_implementation_fields \
+ volatile unsigned char *base
+
+#define NCR5380_local_declare() \
+ volatile unsigned char *base
+
+#define NCR5380_setup(instance) \
+ base = (volatile unsigned char *) (instance)->base
+
+#define T128_address(reg) (base + T_5380_OFFSET + ((reg) * 0x20))
+
+#if !(TDEBUG & TDEBUG_TRANSFER)
+#define NCR5380_read(reg) (*(T128_address(reg)))
+#define NCR5380_write(reg, value) (*(T128_address(reg)) = (value))
+#else
+#define NCR5380_read(reg) \
+ (((unsigned char) printk("scsi%d : read register %d at address %08x\n"\
+ , instance->hostno, (reg), T128_address(reg))), *(T128_address(reg)))
+
+#define NCR5380_write(reg, value) { \
+ printk("scsi%d : write %02x to register %d at address %08x\n", \
+ instance->hostno, (value), (reg), T128_address(reg)); \
+ *(T128_address(reg)) = (value); \
+}
+#endif
+
+#define NCR5380_intr t128_intr
+#define NCR5380_queue_command t128_queue_command
+#define NCR5380_abort t128_abort
+#define NCR5380_reset t128_reset
+#define NCR5380_proc_info t128_proc_info
+
+/* 15 14 12 10 7 5 3
+ 1101 0100 1010 1000 */
+
+#define T128_IRQS 0xc4a8
+
+#endif /* else def HOSTS_C */
+#endif /* ndef ASM */
+#endif /* T128_H */
diff --git a/linux/src/drivers/scsi/tmscsim.c b/linux/src/drivers/scsi/tmscsim.c
new file mode 100644
index 0000000..be986ff
--- /dev/null
+++ b/linux/src/drivers/scsi/tmscsim.c
@@ -0,0 +1,1930 @@
+/***********************************************************************
+ * FILE NAME : TMSCSIM.C *
+ * BY : C.L. Huang, ching@tekram.com.tw *
+ * Description: Device Driver for Tekram DC-390(T) PCI SCSI *
+ * Bus Master Host Adapter *
+ * (C)Copyright 1995-1996 Tekram Technology Co., Ltd. *
+ ***********************************************************************/
+/* Minor enhancements and bugfixes by *
+ * Kurt Garloff <K.Garloff@ping.de> *
+ ***********************************************************************/
+/* HISTORY: *
+ * *
+ * REV# DATE NAME DESCRIPTION *
+ * 1.00 04/24/96 CLH First release *
+ * 1.01 06/12/96 CLH Fixed bug of Media Change for Removable *
+ * Device, scan all LUN. Support Pre2.0.10 *
+ * 1.02 06/18/96 CLH Fixed bug of Command timeout ... *
+ * 1.03 09/25/96 KG Added tmscsim_proc_info() *
+ * 1.04 10/11/96 CLH Updating for support KV 2.0.x *
+ * 1.05 10/18/96 KG Fixed bug in DC390_abort(null ptr deref)*
+ * 1.06 10/25/96 KG Fixed module support *
+ * 1.07 11/09/96 KG Fixed tmscsim_proc_info() *
+ * 1.08 11/18/96 KG Fixed null ptr in DC390_Disconnect() *
+ * 1.09 11/30/96 KG Added register the allocated IO space *
+ * 1.10 12/05/96 CLH Modified tmscsim_proc_info(), and reset *
+ * pending interrupt in DC390_detect() *
+ * 1.11 02/05/97 KG/CLH Fixeds problem with partitions greater *
+ * than 1GB *
+ ***********************************************************************/
+
+
+#define DC390_DEBUG
+
+#define SCSI_MALLOC
+
+#ifdef MODULE
+#include <linux/module.h>
+#endif
+
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <linux/delay.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/bios32.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/config.h>
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < 66354 /* 1.3.50 */
+#include "../block/blk.h"
+#else
+#include <linux/blk.h>
+#endif
+
+#include "scsi.h"
+#include "hosts.h"
+#include "tmscsim.h"
+#include "constants.h"
+#include "sd.h"
+#include <linux/stat.h>
+
+#include "dc390.h"
+
+#define PCI_DEVICE_ID_AMD53C974 PCI_DEVICE_ID_AMD_SCSI
+
+
+#ifndef VERSION_ELF_1_2_13
+struct proc_dir_entry proc_scsi_tmscsim ={
+ PROC_SCSI_DC390T, 7 ,"tmscsim",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+ };
+#endif
+
+static USHORT DC390_StartSCSI( PACB pACB, PDCB pDCB, PSRB pSRB );
+static void DC390_DataOut_0( PACB pACB, PSRB pSRB, PUCHAR psstatus);
+static void DC390_DataIn_0( PACB pACB, PSRB pSRB, PUCHAR psstatus);
+static void DC390_Command_0( PACB pACB, PSRB pSRB, PUCHAR psstatus);
+static void DC390_Status_0( PACB pACB, PSRB pSRB, PUCHAR psstatus);
+static void DC390_MsgOut_0( PACB pACB, PSRB pSRB, PUCHAR psstatus);
+static void DC390_MsgIn_0( PACB pACB, PSRB pSRB, PUCHAR psstatus);
+static void DC390_DataOutPhase( PACB pACB, PSRB pSRB, PUCHAR psstatus);
+static void DC390_DataInPhase( PACB pACB, PSRB pSRB, PUCHAR psstatus);
+static void DC390_CommandPhase( PACB pACB, PSRB pSRB, PUCHAR psstatus);
+static void DC390_StatusPhase( PACB pACB, PSRB pSRB, PUCHAR psstatus);
+static void DC390_MsgOutPhase( PACB pACB, PSRB pSRB, PUCHAR psstatus);
+static void DC390_MsgInPhase( PACB pACB, PSRB pSRB, PUCHAR psstatus);
+static void DC390_Nop_0( PACB pACB, PSRB pSRB, PUCHAR psstatus);
+static void DC390_Nop_1( PACB pACB, PSRB pSRB, PUCHAR psstatus);
+
+static void SetXferRate( PACB pACB, PDCB pDCB );
+static void DC390_Disconnect( PACB pACB );
+static void DC390_Reselect( PACB pACB );
+static void SRBdone( PACB pACB, PDCB pDCB, PSRB pSRB );
+static void DoingSRB_Done( PACB pACB );
+static void DC390_ScsiRstDetect( PACB pACB );
+static void DC390_ResetSCSIBus( PACB pACB );
+static void RequestSense( PACB pACB, PDCB pDCB, PSRB pSRB );
+static void EnableMsgOut2( PACB pACB, PSRB pSRB );
+static void EnableMsgOut( PACB pACB, PSRB pSRB );
+static void DC390_InvalidCmd( PACB pACB );
+
+int DC390_initAdapter( PSH psh, ULONG io_port, UCHAR Irq, USHORT index );
+void DC390_initDCB( PACB pACB, PDCB pDCB, PSCSICMD cmd );
+
+#ifdef MODULE
+static int DC390_release(struct Scsi_Host *host);
+static int DC390_shutdown (struct Scsi_Host *host);
+#endif
+
+
+static PSHT pSHT_start = NULL;
+static PSH pSH_start = NULL;
+static PSH pSH_current = NULL;
+static PACB pACB_start= NULL;
+static PACB pACB_current = NULL;
+static PDCB pPrevDCB = NULL;
+static USHORT adapterCnt = 0;
+static USHORT InitialTime = 0;
+static USHORT CurrSyncOffset = 0;
+static ULONG mech1addr;
+static UCHAR mech2bus, mech2Agent, mech2CfgSPenR;
+
+static PVOID DC390_phase0[]={
+ DC390_DataOut_0,
+ DC390_DataIn_0,
+ DC390_Command_0,
+ DC390_Status_0,
+ DC390_Nop_0,
+ DC390_Nop_0,
+ DC390_MsgOut_0,
+ DC390_MsgIn_0,
+ DC390_Nop_1
+ };
+
+static PVOID DC390_phase1[]={
+ DC390_DataOutPhase,
+ DC390_DataInPhase,
+ DC390_CommandPhase,
+ DC390_StatusPhase,
+ DC390_Nop_0,
+ DC390_Nop_0,
+ DC390_MsgOutPhase,
+ DC390_MsgInPhase,
+ DC390_Nop_1,
+ };
+
+UCHAR eepromBuf[MAX_ADAPTER_NUM][128];
+
+
+UCHAR clock_period1[] = {4, 5, 6, 7, 8, 10, 13, 20};
+
+UCHAR baddevname1[2][28] ={
+ "SEAGATE ST3390N 9546",
+ "HP C3323-300 4269"};
+
+#define BADDEVCNT 2
+
+
+/***********************************************************************
+ *
+ *
+ *
+ **********************************************************************/
+static void
+QLinkcmd( PSCSICMD cmd, PDCB pDCB )
+{
+ ULONG flags;
+ PSCSICMD pcmd;
+
+ save_flags(flags);
+ cli();
+
+ if( !pDCB->QIORBCnt )
+ {
+ pDCB->pQIORBhead = cmd;
+ pDCB->pQIORBtail = cmd;
+ pDCB->QIORBCnt++;
+ cmd->next = NULL;
+ }
+ else
+ {
+ pcmd = pDCB->pQIORBtail;
+ pcmd->next = cmd;
+ pDCB->pQIORBtail = cmd;
+ pDCB->QIORBCnt++;
+ cmd->next = NULL;
+ }
+
+ restore_flags(flags);
+}
+
+
+static PSCSICMD
+Getcmd( PDCB pDCB )
+{
+ ULONG flags;
+ PSCSICMD pcmd;
+
+ save_flags(flags);
+ cli();
+
+ pcmd = pDCB->pQIORBhead;
+ pDCB->pQIORBhead = pcmd->next;
+ pcmd->next = NULL;
+ pDCB->QIORBCnt--;
+
+ restore_flags(flags);
+ return( pcmd );
+}
+
+
+static PSRB
+GetSRB( PACB pACB )
+{
+ ULONG flags;
+ PSRB pSRB;
+
+ save_flags(flags);
+ cli();
+
+ pSRB = pACB->pFreeSRB;
+ if( pSRB )
+ {
+ pACB->pFreeSRB = pSRB->pNextSRB;
+ pSRB->pNextSRB = NULL;
+ }
+ restore_flags(flags);
+ return( pSRB );
+}
+
+
+static void
+RewaitSRB0( PDCB pDCB, PSRB pSRB )
+{
+ PSRB psrb1;
+ ULONG flags;
+
+ save_flags(flags);
+ cli();
+
+ if( (psrb1 = pDCB->pWaitingSRB) )
+ {
+ pSRB->pNextSRB = psrb1;
+ pDCB->pWaitingSRB = pSRB;
+ }
+ else
+ {
+ pSRB->pNextSRB = NULL;
+ pDCB->pWaitingSRB = pSRB;
+ pDCB->pWaitLast = pSRB;
+ }
+ restore_flags(flags);
+}
+
+
+static void
+RewaitSRB( PDCB pDCB, PSRB pSRB )
+{
+ PSRB psrb1;
+ ULONG flags;
+ UCHAR bval;
+
+ save_flags(flags);
+ cli();
+ pDCB->GoingSRBCnt--;
+ psrb1 = pDCB->pGoingSRB;
+ if( pSRB == psrb1 )
+ {
+ pDCB->pGoingSRB = psrb1->pNextSRB;
+ }
+ else
+ {
+ while( pSRB != psrb1->pNextSRB )
+ psrb1 = psrb1->pNextSRB;
+ psrb1->pNextSRB = pSRB->pNextSRB;
+ if( pSRB == pDCB->pGoingLast )
+ pDCB->pGoingLast = psrb1;
+ }
+ if( (psrb1 = pDCB->pWaitingSRB) )
+ {
+ pSRB->pNextSRB = psrb1;
+ pDCB->pWaitingSRB = pSRB;
+ }
+ else
+ {
+ pSRB->pNextSRB = NULL;
+ pDCB->pWaitingSRB = pSRB;
+ pDCB->pWaitLast = pSRB;
+ }
+
+ bval = pSRB->TagNumber;
+ pDCB->TagMask &= (~(1 << bval)); /* Free TAG number */
+ restore_flags(flags);
+}
+
+
+static void
+DoWaitingSRB( PACB pACB )
+{
+ ULONG flags;
+ PDCB ptr, ptr1;
+ PSRB pSRB;
+
+ save_flags(flags);
+ cli();
+
+ if( !(pACB->pActiveDCB) && !(pACB->ACBFlag & (RESET_DETECT+RESET_DONE+RESET_DEV) ) )
+ {
+ ptr = pACB->pDCBRunRobin;
+ if( !ptr )
+ {
+ ptr = pACB->pLinkDCB;
+ pACB->pDCBRunRobin = ptr;
+ }
+ ptr1 = ptr;
+ for( ;ptr1; )
+ {
+ pACB->pDCBRunRobin = ptr1->pNextDCB;
+ if( !( ptr1->MaxCommand > ptr1->GoingSRBCnt ) ||
+ !( pSRB = ptr1->pWaitingSRB ) )
+ {
+ if(pACB->pDCBRunRobin == ptr)
+ break;
+ ptr1 = ptr1->pNextDCB;
+ }
+ else
+ {
+ if( !DC390_StartSCSI(pACB, ptr1, pSRB) )
+ {
+ ptr1->GoingSRBCnt++;
+ if( ptr1->pWaitLast == pSRB )
+ {
+ ptr1->pWaitingSRB = NULL;
+ ptr1->pWaitLast = NULL;
+ }
+ else
+ {
+ ptr1->pWaitingSRB = pSRB->pNextSRB;
+ }
+ pSRB->pNextSRB = NULL;
+
+ if( ptr1->pGoingSRB )
+ ptr1->pGoingLast->pNextSRB = pSRB;
+ else
+ ptr1->pGoingSRB = pSRB;
+ ptr1->pGoingLast = pSRB;
+ }
+ break;
+ }
+ }
+ }
+ restore_flags(flags);
+ return;
+}
+
+
+static void
+SRBwaiting( PDCB pDCB, PSRB pSRB)
+{
+ if( pDCB->pWaitingSRB )
+ {
+ pDCB->pWaitLast->pNextSRB = pSRB;
+ pDCB->pWaitLast = pSRB;
+ pSRB->pNextSRB = NULL;
+ }
+ else
+ {
+ pDCB->pWaitingSRB = pSRB;
+ pDCB->pWaitLast = pSRB;
+ }
+}
+
+
+static void
+SendSRB( PSCSICMD pcmd, PACB pACB, PSRB pSRB )
+{
+ ULONG flags;
+ PDCB pDCB;
+
+ save_flags(flags);
+ cli();
+
+ pDCB = pSRB->pSRBDCB;
+ if( !(pDCB->MaxCommand > pDCB->GoingSRBCnt) || (pACB->pActiveDCB) ||
+ (pACB->ACBFlag & (RESET_DETECT+RESET_DONE+RESET_DEV)) )
+ {
+ SRBwaiting(pDCB, pSRB);
+ goto SND_EXIT;
+ }
+
+ if( pDCB->pWaitingSRB )
+ {
+ SRBwaiting(pDCB, pSRB);
+/* pSRB = GetWaitingSRB(pDCB); */
+ pSRB = pDCB->pWaitingSRB;
+ pDCB->pWaitingSRB = pSRB->pNextSRB;
+ pSRB->pNextSRB = NULL;
+ }
+
+ if( !DC390_StartSCSI(pACB, pDCB, pSRB) )
+ {
+ pDCB->GoingSRBCnt++;
+ if( pDCB->pGoingSRB )
+ {
+ pDCB->pGoingLast->pNextSRB = pSRB;
+ pDCB->pGoingLast = pSRB;
+ }
+ else
+ {
+ pDCB->pGoingSRB = pSRB;
+ pDCB->pGoingLast = pSRB;
+ }
+ }
+ else
+ RewaitSRB0( pDCB, pSRB );
+
+SND_EXIT:
+ restore_flags(flags);
+ return;
+}
+
+
+/***********************************************************************
+ * Function : static int DC390_queue_command (Scsi_Cmnd *cmd,
+ * void (*done)(Scsi_Cmnd *))
+ *
+ * Purpose : enqueues a SCSI command
+ *
+ * Inputs : cmd - SCSI command, done - function called on completion, with
+ * a pointer to the command descriptor.
+ *
+ * Returns : 0
+ *
+ ***********************************************************************/
+
+int
+DC390_queue_command (Scsi_Cmnd *cmd, void (* done)(Scsi_Cmnd *))
+{
+ USHORT ioport, i;
+ Scsi_Cmnd *pcmd;
+ struct Scsi_Host *psh;
+ PACB pACB;
+ PDCB pDCB;
+ PSRB pSRB;
+ ULONG flags;
+ PUCHAR ptr,ptr1;
+
+ psh = cmd->host;
+ pACB = (PACB ) psh->hostdata;
+ ioport = pACB->IOPortBase;
+
+#ifdef DC390_DEBUG0
+/* if(pACB->scan_devices) */
+ printk("Cmd=%2x,ID=%d,LUN=%d,",cmd->cmnd[0],cmd->target,cmd->lun);
+#endif
+
+ if( (pACB->scan_devices == END_SCAN) && (cmd->cmnd[0] != INQUIRY) )
+ {
+ pACB->scan_devices = 0;
+ pPrevDCB->pNextDCB = pACB->pLinkDCB;
+ }
+ else if( (pACB->scan_devices) && (cmd->cmnd[0] == 8) )
+ {
+ pACB->scan_devices = 0;
+ pPrevDCB->pNextDCB = pACB->pLinkDCB;
+ }
+
+ if ( ( cmd->target > pACB->max_id ) || (cmd->lun > pACB->max_lun) )
+ {
+/* printk("DC390: Ignore target %d lun %d\n",
+ cmd->target, cmd->lun); */
+ cmd->result = (DID_BAD_TARGET << 16);
+ done(cmd);
+ return( 0 );
+ }
+
+ if( (pACB->scan_devices) && !(pACB->DCBmap[cmd->target] & (1 << cmd->lun)) )
+ {
+ if( pACB->DeviceCnt < MAX_DEVICES )
+ {
+ pACB->DCBmap[cmd->target] |= (1 << cmd->lun);
+ pDCB = pACB->pDCB_free;
+#ifdef DC390_DEBUG0
+ printk("pDCB=%8x,ID=%2x,", (UINT) pDCB, cmd->target);
+#endif
+ DC390_initDCB( pACB, pDCB, cmd );
+ }
+ else /* ???? */
+ {
+/* printk("DC390: Ignore target %d lun %d\n",
+ cmd->target, cmd->lun); */
+ cmd->result = (DID_BAD_TARGET << 16);
+ done(cmd);
+ return(0);
+ }
+ }
+ else if( !(pACB->scan_devices) && !(pACB->DCBmap[cmd->target] & (1 << cmd->lun)) )
+ {
+/* printk("DC390: Ignore target %d lun %d\n",
+ cmd->target, cmd->lun); */
+ cmd->result = (DID_BAD_TARGET << 16);
+ done(cmd);
+ return(0);
+ }
+ else
+ {
+ pDCB = pACB->pLinkDCB;
+ while( (pDCB->UnitSCSIID != cmd->target) ||
+ (pDCB->UnitSCSILUN != cmd->lun) )
+ {
+ pDCB = pDCB->pNextDCB;
+ }
+#ifdef DC390_DEBUG0
+ printk("pDCB=%8x,ID=%2x,", (UINT) pDCB, cmd->target);
+#endif
+ }
+
+ cmd->scsi_done = done;
+ cmd->result = 0;
+
+ save_flags(flags);
+ cli();
+
+ if( pDCB->QIORBCnt )
+ {
+ QLinkcmd( cmd, pDCB );
+ pcmd = Getcmd( pDCB );
+ }
+ else
+ pcmd = cmd;
+
+ pSRB = GetSRB( pACB );
+
+ if( !pSRB )
+ {
+ QLinkcmd( pcmd, pDCB );
+ restore_flags(flags);
+ return(0);
+ }
+
+/* BuildSRB(pSRB); */
+
+ pSRB->pSRBDCB = pDCB;
+ pSRB->pcmd = pcmd;
+ ptr = (PUCHAR) pSRB->CmdBlock;
+ ptr1 = (PUCHAR) pcmd->cmnd;
+ pSRB->ScsiCmdLen = pcmd->cmd_len;
+ for(i=0; i< pcmd->cmd_len; i++)
+ {
+ *ptr = *ptr1;
+ ptr++;
+ ptr1++;
+ }
+ if( pcmd->use_sg )
+ {
+ pSRB->SGcount = (UCHAR) pcmd->use_sg;
+ pSRB->pSegmentList = (PSGL) pcmd->request_buffer;
+ }
+ else if( pcmd->request_buffer )
+ {
+ pSRB->SGcount = 1;
+ pSRB->pSegmentList = (PSGL) &pSRB->Segmentx;
+ pSRB->Segmentx.address = (PUCHAR) pcmd->request_buffer;
+ pSRB->Segmentx.length = pcmd->request_bufflen;
+ }
+ else
+ pSRB->SGcount = 0;
+
+ pSRB->SGIndex = 0;
+ pSRB->AdaptStatus = 0;
+ pSRB->TargetStatus = 0;
+ pSRB->MsgCnt = 0;
+ if( pDCB->DevType != TYPE_TAPE )
+ pSRB->RetryCnt = 1;
+ else
+ pSRB->RetryCnt = 0;
+ pSRB->SRBStatus = 0;
+ pSRB->SRBFlag = 0;
+ pSRB->SRBState = 0;
+ pSRB->TotalXferredLen = 0;
+ pSRB->SGPhysAddr = 0;
+ pSRB->SGToBeXferLen = 0;
+ pSRB->ScsiPhase = 0;
+ pSRB->EndMessage = 0;
+ SendSRB( pcmd, pACB, pSRB );
+
+ restore_flags(flags);
+ return(0);
+}
+
+
+static void
+DoNextCmd( PACB pACB, PDCB pDCB )
+{
+ Scsi_Cmnd *pcmd;
+ PSRB pSRB;
+ ULONG flags;
+ PUCHAR ptr,ptr1;
+ USHORT i;
+
+
+ if( pACB->ACBFlag & (RESET_DETECT+RESET_DONE+RESET_DEV) )
+ return;
+ save_flags(flags);
+ cli();
+
+ pcmd = Getcmd( pDCB );
+ pSRB = GetSRB( pACB );
+ if( !pSRB )
+ {
+ QLinkcmd( pcmd, pDCB );
+ restore_flags(flags);
+ return;
+ }
+
+ pSRB->pSRBDCB = pDCB;
+ pSRB->pcmd = pcmd;
+ ptr = (PUCHAR) pSRB->CmdBlock;
+ ptr1 = (PUCHAR) pcmd->cmnd;
+ pSRB->ScsiCmdLen = pcmd->cmd_len;
+ for(i=0; i< pcmd->cmd_len; i++)
+ {
+ *ptr = *ptr1;
+ ptr++;
+ ptr1++;
+ }
+ if( pcmd->use_sg )
+ {
+ pSRB->SGcount = (UCHAR) pcmd->use_sg;
+ pSRB->pSegmentList = (PSGL) pcmd->request_buffer;
+ }
+ else if( pcmd->request_buffer )
+ {
+ pSRB->SGcount = 1;
+ pSRB->pSegmentList = (PSGL) &pSRB->Segmentx;
+ pSRB->Segmentx.address = (PUCHAR) pcmd->request_buffer;
+ pSRB->Segmentx.length = pcmd->request_bufflen;
+ }
+ else
+ pSRB->SGcount = 0;
+
+ pSRB->SGIndex = 0;
+ pSRB->AdaptStatus = 0;
+ pSRB->TargetStatus = 0;
+ pSRB->MsgCnt = 0;
+ if( pDCB->DevType != TYPE_TAPE )
+ pSRB->RetryCnt = 1;
+ else
+ pSRB->RetryCnt = 0;
+ pSRB->SRBStatus = 0;
+ pSRB->SRBFlag = 0;
+ pSRB->SRBState = 0;
+ pSRB->TotalXferredLen = 0;
+ pSRB->SGPhysAddr = 0;
+ pSRB->SGToBeXferLen = 0;
+ pSRB->ScsiPhase = 0;
+ pSRB->EndMessage = 0;
+ SendSRB( pcmd, pACB, pSRB );
+
+ restore_flags(flags);
+ return;
+}
+
+
+/***********************************************************************
+ * Function:
+ * DC390_bios_param
+ *
+ * Description:
+ * Return the disk geometry for the given SCSI device.
+ ***********************************************************************/
+#ifdef VERSION_ELF_1_2_13
+int DC390_bios_param(Disk *disk, int devno, int geom[])
+#else
+int DC390_bios_param(Disk *disk, kdev_t devno, int geom[])
+#endif
+{
+ int heads, sectors, cylinders;
+ PACB pACB;
+
+ pACB = (PACB) disk->device->host->hostdata;
+ heads = 64;
+ sectors = 32;
+ cylinders = disk->capacity / (heads * sectors);
+
+ if ( (pACB->Gmode2 & GREATER_1G) && (cylinders > 1024) )
+ {
+ heads = 255;
+ sectors = 63;
+ cylinders = disk->capacity / (heads * sectors);
+ }
+
+ geom[0] = heads;
+ geom[1] = sectors;
+ geom[2] = cylinders;
+
+ return (0);
+}
+
+
+/***********************************************************************
+ * Function : int DC390_abort (Scsi_Cmnd *cmd)
+ *
+ * Purpose : Abort an errant SCSI command
+ *
+ * Inputs : cmd - command to abort
+ *
+ * Returns : 0 on success, -1 on failure.
+ ***********************************************************************/
+
+int
+DC390_abort (Scsi_Cmnd *cmd)
+{
+ ULONG flags;
+ PACB pACB;
+ PDCB pDCB, pdcb;
+ PSRB pSRB, psrb;
+ USHORT count, i;
+ PSCSICMD pcmd, pcmd1;
+ int status;
+
+
+#ifdef DC390_DEBUG0
+ printk("DC390 : Abort Cmd.");
+#endif
+
+ save_flags(flags);
+ cli();
+
+ pACB = (PACB) cmd->host->hostdata;
+ pDCB = pACB->pLinkDCB;
+ pdcb = pDCB;
+ while( (pDCB->UnitSCSIID != cmd->target) ||
+ (pDCB->UnitSCSILUN != cmd->lun) )
+ {
+ pDCB = pDCB->pNextDCB;
+ if( pDCB == pdcb )
+ goto NOT_RUN;
+ }
+
+ if( pDCB->QIORBCnt )
+ {
+ pcmd = pDCB->pQIORBhead;
+ if( pcmd == cmd )
+ {
+ pDCB->pQIORBhead = pcmd->next;
+ pcmd->next = NULL;
+ pDCB->QIORBCnt--;
+ status = SCSI_ABORT_SUCCESS;
+ goto ABO_X;
+ }
+ for( count = pDCB->QIORBCnt, i=0; i<count-1; i++)
+ {
+ if( pcmd->next == cmd )
+ {
+ pcmd1 = pcmd->next;
+ pcmd->next = pcmd1->next;
+ pcmd1->next = NULL;
+ pDCB->QIORBCnt--;
+ status = SCSI_ABORT_SUCCESS;
+ goto ABO_X;
+ }
+ else
+ {
+ pcmd = pcmd->next;
+ }
+ }
+ }
+
+ pSRB = pDCB->pWaitingSRB;
+ if( !pSRB )
+ goto ON_GOING;
+ if( pSRB->pcmd == cmd )
+ {
+ pDCB->pWaitingSRB = pSRB->pNextSRB;
+ goto IN_WAIT;
+ }
+ else
+ {
+ psrb = pSRB;
+ if( !(psrb->pNextSRB) )
+ goto ON_GOING;
+ while( psrb->pNextSRB->pcmd != cmd )
+ {
+ psrb = psrb->pNextSRB;
+ if( !(psrb->pNextSRB) )
+ goto ON_GOING;
+ }
+ pSRB = psrb->pNextSRB;
+ psrb->pNextSRB = pSRB->pNextSRB;
+ if( pSRB == pDCB->pWaitLast )
+ pDCB->pWaitLast = psrb; /* No check for psrb == NULL ? */
+IN_WAIT:
+ pSRB->pNextSRB = pACB->pFreeSRB;
+ pACB->pFreeSRB = pSRB;
+ cmd->next = NULL;
+ status = SCSI_ABORT_SUCCESS;
+ goto ABO_X;
+ }
+
+ON_GOING:
+ pSRB = pDCB->pGoingSRB;
+ for( count = pDCB->GoingSRBCnt, i=0; i<count; i++)
+ {
+ if( pSRB->pcmd != cmd )
+ pSRB = pSRB->pNextSRB;
+ else
+ {
+ if( (pACB->pActiveDCB == pDCB) && (pDCB->pActiveSRB == pSRB) )
+ {
+ status = SCSI_ABORT_BUSY;
+ goto ABO_X;
+ }
+ else
+ {
+ status = SCSI_ABORT_SNOOZE;
+ goto ABO_X;
+ }
+ }
+ }
+
+NOT_RUN:
+ status = SCSI_ABORT_NOT_RUNNING;
+
+ABO_X:
+ cmd->result = DID_ABORT << 16;
+ cmd->scsi_done(cmd);
+ restore_flags(flags);
+ return( status );
+}
+
+
+static void
+ResetDevParam( PACB pACB )
+{
+ PDCB pDCB, pdcb;
+
+ pDCB = pACB->pLinkDCB;
+ if( pDCB == NULL )
+ return;
+ pdcb = pDCB;
+ do
+ {
+ pDCB->SyncMode &= ~SYNC_NEGO_DONE;
+ pDCB->SyncPeriod = 0;
+ pDCB->SyncOffset = 0;
+ pDCB->CtrlR3 = FAST_CLK;
+ pDCB->CtrlR4 &= NEGATE_REQACKDATA;
+ pDCB->CtrlR4 |= EATER_25NS;
+ pDCB = pDCB->pNextDCB;
+ }
+ while( pdcb != pDCB );
+}
+
+
+static void
+RecoverSRB( PACB pACB )
+{
+ PDCB pDCB, pdcb;
+ PSRB psrb, psrb2;
+ USHORT cnt, i;
+
+ pDCB = pACB->pLinkDCB;
+ if( pDCB == NULL )
+ return;
+ pdcb = pDCB;
+ do
+ {
+ cnt = pdcb->GoingSRBCnt;
+ psrb = pdcb->pGoingSRB;
+ for (i=0; i<cnt; i++)
+ {
+ psrb2 = psrb;
+ psrb = psrb->pNextSRB;
+/* RewaitSRB( pDCB, psrb ); */
+ if( pdcb->pWaitingSRB )
+ {
+ psrb2->pNextSRB = pdcb->pWaitingSRB;
+ pdcb->pWaitingSRB = psrb2;
+ }
+ else
+ {
+ pdcb->pWaitingSRB = psrb2;
+ pdcb->pWaitLast = psrb2;
+ psrb2->pNextSRB = NULL;
+ }
+ }
+ pdcb->GoingSRBCnt = 0;
+ pdcb->pGoingSRB = NULL;
+ pdcb->TagMask = 0;
+ pdcb = pdcb->pNextDCB;
+ }
+ while( pdcb != pDCB );
+}
+
+
+/***********************************************************************
+ * Function : int DC390_reset (Scsi_Cmnd *cmd, ...)
+ *
+ * Purpose : perform a hard reset on the SCSI bus
+ *
+ * Inputs : cmd - command which caused the SCSI RESET
+ *
+ * Returns : 0 on success.
+ ***********************************************************************/
+
+#ifdef VERSION_2_0_0
+int DC390_reset(Scsi_Cmnd *cmd, unsigned int resetFlags)
+#else
+int DC390_reset (Scsi_Cmnd *cmd)
+#endif
+{
+ USHORT ioport;
+ unsigned long flags;
+ PACB pACB;
+ UCHAR bval;
+ USHORT i;
+
+
+#ifdef DC390_DEBUG1
+ printk("DC390: RESET,");
+#endif
+
+ pACB = (PACB ) cmd->host->hostdata;
+ ioport = pACB->IOPortBase;
+ save_flags(flags);
+ cli();
+ bval = inb(ioport+CtrlReg1);
+ bval |= DIS_INT_ON_SCSI_RST;
+ outb(bval,ioport+CtrlReg1); /* disable interrupt */
+ DC390_ResetSCSIBus( pACB );
+ for( i=0; i<500; i++ )
+ udelay(1000);
+ bval = inb(ioport+CtrlReg1);
+ bval &= ~DIS_INT_ON_SCSI_RST;
+ outb(bval,ioport+CtrlReg1); /* re-enable interrupt */
+
+ bval = DMA_IDLE_CMD;
+ outb(bval,ioport+DMA_Cmd);
+ bval = CLEAR_FIFO_CMD;
+ outb(bval,ioport+ScsiCmd);
+
+ ResetDevParam( pACB );
+ DoingSRB_Done( pACB );
+ pACB->pActiveDCB = NULL;
+
+ pACB->ACBFlag = 0;
+ DoWaitingSRB( pACB );
+
+ restore_flags(flags);
+#ifdef DC390_DEBUG1
+ printk("DC390: RESET1,");
+#endif
+ return( SCSI_RESET_SUCCESS );
+}
+
+
+#include "scsiiom.c"
+
+
+/***********************************************************************
+ * Function : static void DC390_initDCB
+ *
+ * Purpose : initialize the internal structures for a given DCB
+ *
+ * Inputs : cmd - pointer to this scsi cmd request block structure
+ *
+ ***********************************************************************/
+void DC390_initDCB( PACB pACB, PDCB pDCB, PSCSICMD cmd )
+{
+ PEEprom prom;
+ UCHAR bval;
+ USHORT index;
+
+ if( pACB->DeviceCnt == 0 )
+ {
+ pACB->pLinkDCB = pDCB;
+ pACB->pDCBRunRobin = pDCB;
+ pDCB->pNextDCB = pDCB;
+ pPrevDCB = pDCB;
+ }
+ else
+ pPrevDCB->pNextDCB = pDCB;
+
+ pDCB->pDCBACB = pACB;
+ pDCB->QIORBCnt = 0;
+ pDCB->UnitSCSIID = cmd->target;
+ pDCB->UnitSCSILUN = cmd->lun;
+ pDCB->pWaitingSRB = NULL;
+ pDCB->pGoingSRB = NULL;
+ pDCB->GoingSRBCnt = 0;
+ pDCB->pActiveSRB = NULL;
+ pDCB->TagMask = 0;
+ pDCB->MaxCommand = 1;
+ pDCB->AdaptIndex = pACB->AdapterIndex;
+ index = pACB->AdapterIndex;
+ pDCB->DCBFlag = 0;
+
+ prom = (PEEprom) &eepromBuf[index][cmd->target << 2];
+ pDCB->DevMode = prom->EE_MODE1;
+ pDCB->AdpMode = eepromBuf[index][EE_MODE2];
+
+ if( pDCB->DevMode & EN_DISCONNECT_ )
+ bval = 0xC0;
+ else
+ bval = 0x80;
+ bval |= cmd->lun;
+ pDCB->IdentifyMsg = bval;
+
+ pDCB->SyncMode = 0;
+ if( pDCB->DevMode & SYNC_NEGO_ )
+ {
+ if( !(cmd->lun) || CurrSyncOffset )
+ pDCB->SyncMode = SYNC_ENABLE;
+ }
+
+ pDCB->SyncPeriod = 0;
+ pDCB->SyncOffset = 0;
+ pDCB->NegoPeriod = (clock_period1[prom->EE_SPEED] * 25) >> 2;
+
+ pDCB->CtrlR1 = pACB->AdaptSCSIID;
+ if( pDCB->DevMode & PARITY_CHK_ )
+ pDCB->CtrlR1 |= PARITY_ERR_REPO;
+
+ pDCB->CtrlR3 = FAST_CLK;
+
+ pDCB->CtrlR4 = EATER_25NS;
+ if( pDCB->AdpMode & ACTIVE_NEGATION)
+ pDCB->CtrlR4 |= NEGATE_REQACKDATA;
+}
+
+
+/***********************************************************************
+ * Function : static void DC390_initSRB
+ *
+ * Purpose : initialize the internal structures for a given SRB
+ *
+ * Inputs : psrb - pointer to this scsi request block structure
+ *
+ ***********************************************************************/
+void DC390_initSRB( PSRB psrb )
+{
+#ifndef VERSION_ELF_1_2_13
+#ifdef DC390_DEBUG0
+ printk("DC390 init: %08lx %08lx,",(ULONG)psrb,(ULONG)virt_to_bus(psrb));
+#endif
+ psrb->PhysSRB = virt_to_bus( psrb );
+#else
+ psrb->PhysSRB = (ULONG) psrb;
+#endif
+}
+
+
+void DC390_linkSRB( PACB pACB )
+{
+ USHORT count, i;
+ PSRB psrb;
+
+ count = pACB->SRBCount;
+
+ for( i=0; i< count; i++)
+ {
+ if( i != count - 1)
+ pACB->SRB_array[i].pNextSRB = &pACB->SRB_array[i+1];
+ else
+ pACB->SRB_array[i].pNextSRB = NULL;
+ psrb = (PSRB) &pACB->SRB_array[i];
+ DC390_initSRB( psrb );
+ }
+}
+
+
+/***********************************************************************
+ * Function : static void DC390_initACB
+ *
+ * Purpose : initialize the internal structures for a given SCSI host
+ *
+ * Inputs : psh - pointer to this host adapter's structure
+ *
+ ***********************************************************************/
+void DC390_initACB( PSH psh, ULONG io_port, UCHAR Irq, USHORT index )
+{
+ PACB pACB;
+ USHORT i;
+
+ psh->can_queue = MAX_CMD_QUEUE;
+ psh->cmd_per_lun = MAX_CMD_PER_LUN;
+ psh->this_id = (int) eepromBuf[index][EE_ADAPT_SCSI_ID];
+ psh->io_port = io_port;
+ psh->n_io_port = 0x80;
+ psh->irq = Irq;
+
+ pACB = (PACB) psh->hostdata;
+
+#ifndef VERSION_ELF_1_2_13
+ psh->max_id = 8;
+#ifdef CONFIG_SCSI_MULTI_LUN
+ if( eepromBuf[index][EE_MODE2] & LUN_CHECK )
+ psh->max_lun = 8;
+ else
+#endif
+ psh->max_lun = 1;
+#endif
+
+ pACB->max_id = 7;
+ if( pACB->max_id == eepromBuf[index][EE_ADAPT_SCSI_ID] )
+ pACB->max_id--;
+#ifdef CONFIG_SCSI_MULTI_LUN
+ if( eepromBuf[index][EE_MODE2] & LUN_CHECK )
+ pACB->max_lun = 7;
+ else
+#endif
+ pACB->max_lun = 0;
+
+ pACB->pScsiHost = psh;
+ pACB->IOPortBase = (USHORT) io_port;
+ pACB->pLinkDCB = NULL;
+ pACB->pDCBRunRobin = NULL;
+ pACB->pActiveDCB = NULL;
+ pACB->pFreeSRB = pACB->SRB_array;
+ pACB->SRBCount = MAX_SRB_CNT;
+ pACB->AdapterIndex = index;
+ pACB->status = 0;
+ pACB->AdaptSCSIID = eepromBuf[index][EE_ADAPT_SCSI_ID];
+ pACB->HostID_Bit = (1 << pACB->AdaptSCSIID);
+ pACB->AdaptSCSILUN = 0;
+ pACB->DeviceCnt = 0;
+ pACB->IRQLevel = Irq;
+ pACB->TagMaxNum = eepromBuf[index][EE_TAG_CMD_NUM] << 2;
+ pACB->ACBFlag = 0;
+ pACB->scan_devices = 1;
+ pACB->Gmode2 = eepromBuf[index][EE_MODE2];
+ if( eepromBuf[index][EE_MODE2] & LUN_CHECK )
+ pACB->LUNchk = 1;
+ pACB->pDCB_free = &pACB->DCB_array[0];
+ DC390_linkSRB( pACB );
+ pACB->pTmpSRB = &pACB->TmpSRB;
+ DC390_initSRB( pACB->pTmpSRB );
+ for(i=0; i<MAX_SCSI_ID; i++)
+ pACB->DCBmap[i] = 0;
+}
+
+
+/***********************************************************************
+ * Function : static int DC390_initAdapter
+ *
+ * Purpose : initialize the SCSI chip ctrl registers
+ *
+ * Inputs : psh - pointer to this host adapter's structure
+ *
+ ***********************************************************************/
+int DC390_initAdapter( PSH psh, ULONG io_port, UCHAR Irq, USHORT index )
+{
+ USHORT ioport;
+ UCHAR bval;
+ PACB pACB, pacb;
+ USHORT used_irq = 0;
+
+ pacb = pACB_start;
+ if( pacb != NULL )
+ {
+ for ( ; (pacb != (PACB) -1) ; )
+ {
+ if( pacb->IRQLevel == Irq )
+ {
+ used_irq = 1;
+ break;
+ }
+ else
+ pacb = pacb->pNextACB;
+ }
+ }
+
+ if( !used_irq )
+ {
+#ifdef VERSION_ELF_1_2_13
+ if( request_irq(Irq, DC390_Interrupt, SA_INTERRUPT, "tmscsim"))
+#else
+ if( request_irq(Irq, DC390_Interrupt, SA_INTERRUPT | SA_SHIRQ, "tmscsim", NULL))
+#endif
+ {
+ printk("DC390: register IRQ error!\n");
+ return( -1 );
+ }
+ }
+
+ request_region(io_port,psh->n_io_port,"tmscsim");
+
+ ioport = (USHORT) io_port;
+
+ pACB = (PACB) psh->hostdata;
+ bval = SEL_TIMEOUT; /* 250ms selection timeout */
+ outb(bval,ioport+Scsi_TimeOut);
+
+ bval = CLK_FREQ_40MHZ; /* Conversion factor = 0 , 40MHz clock */
+ outb(bval,ioport+Clk_Factor);
+
+ bval = NOP_CMD; /* NOP cmd - clear command register */
+ outb(bval,ioport+ScsiCmd);
+
+ bval = EN_FEATURE+EN_SCSI2_CMD; /* Enable Feature and SCSI-2 */
+ outb(bval,ioport+CtrlReg2);
+
+ bval = FAST_CLK; /* fast clock */
+ outb(bval,ioport+CtrlReg3);
+
+ bval = EATER_25NS;
+ if( eepromBuf[index][EE_MODE2] & ACTIVE_NEGATION )
+ bval |= NEGATE_REQACKDATA;
+ outb(bval,ioport+CtrlReg4);
+
+ bval = DIS_INT_ON_SCSI_RST; /* Disable SCSI bus reset interrupt */
+ outb(bval,ioport+CtrlReg1);
+
+ return(0);
+}
+
+
+void
+DC390_EnableCfg( USHORT mechnum, UCHAR regval )
+{
+ ULONG wlval;
+
+ if(mechnum == 2)
+ {
+ outb(mech2bus, PCI_CFG2_FORWARD_REG);
+ outb(mech2CfgSPenR, PCI_CFG2_ENABLE_REG);
+ }
+ else
+ {
+ regval &= 0xFC;
+ wlval = mech1addr;
+ wlval |= (((ULONG)regval) & 0xff);
+ outl(wlval, PCI_CFG1_ADDRESS_REG);
+ }
+}
+
+
+void
+DC390_DisableCfg( USHORT mechnum )
+{
+
+ if(mechnum == 2)
+ outb(0, PCI_CFG2_ENABLE_REG);
+ else
+ outl(0, PCI_CFG1_ADDRESS_REG);
+}
+
+
+UCHAR
+DC390_inByte( USHORT mechnum, UCHAR regval )
+{
+ UCHAR bval;
+ ULONG wval;
+ ULONG flags;
+
+ save_flags(flags);
+ cli();
+ DC390_EnableCfg( mechnum, regval );
+ if(mechnum == 2)
+ {
+ wval = mech2Agent;
+ wval <<= 8;
+ wval |= ((USHORT) regval) & 0xff;
+ bval = inb(wval);
+ }
+ else
+ {
+ regval &= 3;
+ bval = inb(PCI_CFG1_DATA_REG | regval);
+ }
+ DC390_DisableCfg(mechnum);
+ restore_flags(flags);
+ return(bval);
+}
+
+
+USHORT
+DC390_inWord( USHORT mechnum, UCHAR regval )
+{
+ USHORT wval;
+ ULONG flags;
+
+ save_flags(flags);
+ cli();
+ DC390_EnableCfg(mechnum,regval);
+ if(mechnum == 2)
+ {
+ wval = mech2Agent;
+ wval <<= 8;
+ wval |= regval;
+ wval = inw(wval);
+ }
+ else
+ {
+ regval &= 3;
+ wval = inw(PCI_CFG1_DATA_REG | regval);
+ }
+ DC390_DisableCfg(mechnum);
+ restore_flags(flags);
+ return(wval);
+}
+
+
+ULONG
+DC390_inDword(USHORT mechnum, UCHAR regval )
+{
+ ULONG wlval;
+ ULONG flags;
+ USHORT wval;
+
+ save_flags(flags);
+ cli();
+ DC390_EnableCfg(mechnum,regval);
+ if(mechnum == 2)
+ {
+ wval = mech2Agent;
+ wval <<= 8;
+ wval |= regval;
+ wlval = inl(wval);
+ }
+ else
+ {
+ wlval = inl(PCI_CFG1_DATA_REG);
+ }
+ DC390_DisableCfg(mechnum);
+ restore_flags(flags);
+ return(wlval);
+}
+
+
+void
+DC390_OutB(USHORT mechnum, UCHAR regval, UCHAR bval )
+{
+
+ USHORT wval;
+ ULONG flags;
+
+ save_flags(flags);
+ cli();
+ DC390_EnableCfg(mechnum,regval);
+ if(mechnum == 2)
+ {
+ wval = mech2Agent;
+ wval <<= 8;
+ wval |= regval;
+ outb(bval, wval);
+ }
+ else
+ {
+ regval &= 3;
+ outb(bval, PCI_CFG1_DATA_REG | regval);
+ }
+ DC390_DisableCfg(mechnum);
+ restore_flags(flags);
+}
+
+
+void
+DC390_EnDisableCE( UCHAR mode, USHORT mechnum, PUCHAR regval )
+{
+
+ UCHAR bval;
+
+ bval = 0;
+ if(mode == ENABLE_CE)
+ *regval = 0xc0;
+ else
+ *regval = 0x80;
+ DC390_OutB(mechnum,*regval,bval);
+ if(mode == DISABLE_CE)
+ DC390_OutB(mechnum,*regval,bval);
+ udelay(160);
+}
+
+
+void
+DC390_EEpromOutDI( USHORT mechnum, PUCHAR regval, USHORT Carry )
+{
+ UCHAR bval;
+
+ bval = 0;
+ if(Carry)
+ {
+ bval = 0x40;
+ *regval = 0x80;
+ DC390_OutB(mechnum,*regval,bval);
+ }
+ udelay(160);
+ bval |= 0x80;
+ DC390_OutB(mechnum,*regval,bval);
+ udelay(160);
+ bval = 0;
+ DC390_OutB(mechnum,*regval,bval);
+ udelay(160);
+}
+
+
+UCHAR
+DC390_EEpromInDO( USHORT mechnum )
+{
+ UCHAR bval,regval;
+
+ regval = 0x80;
+ bval = 0x80;
+ DC390_OutB(mechnum,regval,bval);
+ udelay(160);
+ bval = 0x40;
+ DC390_OutB(mechnum,regval,bval);
+ udelay(160);
+ regval = 0x0;
+ bval = DC390_inByte(mechnum,regval);
+ if(bval == 0x22)
+ return(1);
+ else
+ return(0);
+}
+
+
+USHORT
+EEpromGetData1( USHORT mechnum )
+{
+ UCHAR i;
+ UCHAR carryFlag;
+ USHORT wval;
+
+ wval = 0;
+ for(i=0; i<16; i++)
+ {
+ wval <<= 1;
+ carryFlag = DC390_EEpromInDO(mechnum);
+ wval |= carryFlag;
+ }
+ return(wval);
+}
+
+
+void
+DC390_Prepare( USHORT mechnum, PUCHAR regval, UCHAR EEpromCmd )
+{
+ UCHAR i,j;
+ USHORT carryFlag;
+
+ carryFlag = 1;
+ j = 0x80;
+ for(i=0; i<9; i++)
+ {
+ DC390_EEpromOutDI(mechnum,regval,carryFlag);
+ carryFlag = (EEpromCmd & j) ? 1 : 0;
+ j >>= 1;
+ }
+}
+
+
+void
+DC390_ReadEEprom( USHORT mechnum, USHORT index )
+{
+ UCHAR regval,cmd;
+ PUSHORT ptr;
+ USHORT i;
+
+ ptr = (PUSHORT) &eepromBuf[index][0];
+ cmd = EEPROM_READ;
+ for(i=0; i<0x40; i++)
+ {
+ DC390_EnDisableCE(ENABLE_CE, mechnum, &regval);
+ DC390_Prepare(mechnum, &regval, cmd);
+ *ptr = EEpromGetData1(mechnum);
+ ptr++;
+ cmd++;
+ DC390_EnDisableCE(DISABLE_CE,mechnum,&regval);
+ }
+}
+
+
+USHORT
+DC390_CheckEEpromCheckSum( USHORT MechNum, USHORT index )
+{
+ USHORT wval, rc, *ptr;
+ UCHAR i;
+
+ DC390_ReadEEprom( MechNum, index );
+ wval = 0;
+ ptr = (PUSHORT) &eepromBuf[index][0];
+ for(i=0; i<128 ;i+=2, ptr++)
+ wval += *ptr;
+ if( wval == 0x1234 )
+ rc = 0;
+ else
+ rc = -1;
+ return( rc );
+}
+
+
+USHORT
+DC390_ToMech( USHORT Mechnum, USHORT BusDevFunNum )
+{
+ USHORT devnum;
+
+ devnum = BusDevFunNum;
+
+ if(Mechnum == 2)
+ {
+ if(devnum & 0x80)
+ return(-1);
+ mech2bus = (UCHAR)((devnum & 0xff00) >> 8); /* Bus num */
+ mech2Agent = ((UCHAR)(devnum & 0xff)) >> 3; /* Dev num */
+ mech2Agent |= 0xc0;
+ mech2CfgSPenR = ((UCHAR)(devnum & 0xff)) & 0x07; /* Fun num */
+ mech2CfgSPenR = (mech2CfgSPenR << 1) | 0x20;
+ }
+ else /* use mech #1 method */
+ {
+ mech1addr = 0x80000000 | ((ULONG)devnum << 8);
+ }
+ return(0);
+}
+
+/***********************************************************************
+ * Function : static int DC390_init (struct Scsi_Host *host)
+ *
+ * Purpose : initialize the internal structures for a given SCSI host
+ *
+ * Inputs : host - pointer to this host adapter's structure/
+ *
+ * Preconditions : when this function is called, the chip_type
+ * field of the pACB structure MUST have been set.
+ ***********************************************************************/
+
+static int
+DC390_init (PSHT psht, ULONG io_port, UCHAR Irq, USHORT index, USHORT MechNum)
+{
+ PSH psh;
+ PACB pACB;
+
+ if( !DC390_CheckEEpromCheckSum( MechNum, index) )
+ {
+ psh = scsi_register( psht, sizeof(DC390_ACB) );
+ if( !psh )
+ return( -1 );
+ if( !pSH_start )
+ {
+ pSH_start = psh;
+ pSH_current = psh;
+ }
+ else
+ {
+ pSH_current->next = psh;
+ pSH_current = psh;
+ }
+
+#ifdef DC390_DEBUG0
+ printk("DC390: pSH = %8x,", (UINT) psh);
+ printk("DC390: Index %02i,", index);
+#endif
+
+ DC390_initACB( psh, io_port, Irq, index );
+ if( !DC390_initAdapter( psh, io_port, Irq, index ) )
+ {
+ pACB = (PACB) psh->hostdata;
+ if( !pACB_start )
+ {
+ pACB_start = pACB;
+ pACB_current = pACB;
+ pACB->pNextACB = (PACB) -1;
+ }
+ else
+ {
+ pACB_current->pNextACB = pACB;
+ pACB_current = pACB;
+ pACB->pNextACB = (PACB) -1;
+ }
+
+#ifdef DC390_DEBUG0
+ printk("DC390: pACB = %8x, pDCB_array = %8x, pSRB_array = %8x\n",
+ (UINT) pACB, (UINT) pACB->DCB_array, (UINT) pACB->SRB_array);
+ printk("DC390: ACB size= %4x, DCB size= %4x, SRB size= %4x\n",
+ sizeof(DC390_ACB), sizeof(DC390_DCB), sizeof(DC390_SRB) );
+#endif
+
+ }
+ else
+ {
+ pSH_start = NULL;
+ scsi_unregister( psh );
+ return( -1 );
+ }
+ return( 0 );
+ }
+ else
+ {
+ printk("DC390_init: EEPROM reading error!\n");
+ return( -1 );
+ }
+}
+
+
+/***********************************************************************
+ * Function : int DC390_detect(Scsi_Host_Template *psht)
+ *
+ * Purpose : detects and initializes AMD53C974 SCSI chips
+ * that were autoprobed, overridden on the LILO command line,
+ * or specified at compile time.
+ *
+ * Inputs : psht - template for this SCSI adapter
+ *
+ * Returns : number of host adapters detected
+ *
+ ***********************************************************************/
+
+int
+DC390_detect(Scsi_Host_Template *psht)
+{
+#ifdef FOR_PCI_OK
+ UCHAR pci_bus, pci_device_fn;
+ int error = 0;
+ USHORT chipType = 0;
+ USHORT i;
+#endif
+
+ UCHAR irq;
+ UCHAR istatus;
+#ifndef VERSION_ELF_1_2_13
+ UINT io_port;
+#else
+ ULONG io_port;
+#endif
+ USHORT adaptCnt = 0; /* Number of boards detected */
+ USHORT pci_index = 0; /* Device index to PCI BIOS calls */
+ USHORT MechNum, BusDevFunNum;
+ ULONG wlval;
+
+#ifndef VERSION_ELF_1_2_13
+ psht->proc_dir = &proc_scsi_tmscsim;
+#endif
+
+ InitialTime = 1;
+ pSHT_start = psht;
+ pACB_start = NULL;
+
+ MechNum = 1;
+ for( ; (MechNum < 3) && (!adaptCnt); MechNum++)
+ {
+ BusDevFunNum = 0;
+ for (; adaptCnt < MAX_ADAPTER_NUM ;)
+ {
+ if( !DC390_ToMech( MechNum, BusDevFunNum) )
+ {
+ wlval = DC390_inDword( MechNum, PCI_VENDOR_ID);
+ if(wlval == ( (PCI_DEVICE_ID_AMD53C974 << 16)+
+ PCI_VENDOR_ID_AMD) )
+ {
+ io_port =DC390_inDword(MechNum,PCI_BASE_ADDRESS_0) & 0xFFFE;
+ irq = DC390_inByte( MechNum, PCI_INTERRUPT_LINE);
+#ifdef DC390_DEBUG0
+ printk("DC390: IO_PORT=%4x,IRQ=%x,\n",(UINT) io_port, irq);
+#endif
+ if( !DC390_init(psht, io_port, irq, pci_index, MechNum) )
+ {
+ adaptCnt++;
+ pci_index++;
+ istatus = inb( (USHORT)io_port+INT_Status ); /* Reset Pending INT */
+#ifdef DC390_DEBUG0
+ printk("DC390: Mech=%2x,\n",(UCHAR) MechNum);
+#endif
+ }
+ }
+ }
+ if( BusDevFunNum != 0xfff8 )
+ BusDevFunNum += 8; /* next device # */
+ else
+ break;
+ }
+ }
+
+#ifdef FOR_PCI_OK
+ if ( pcibios_present() )
+ {
+ for (i = 0; i < MAX_ADAPTER_NUM; ++i)
+ {
+ if( !pcibios_find_device( PCI_VENDOR_ID_AMD,
+ PCI_DEVICE_ID_AMD53C974,
+ pci_index, &pci_bus, &pci_device_fn) )
+ {
+ chipType = PCI_DEVICE_ID_AMD53C974;
+ pci_index++;
+ }
+
+ if( chipType )
+ {
+
+ error = pcibios_read_config_dword(pci_bus, pci_device_fn,
+ PCI_BASE_ADDRESS_0, &io_port);
+ error |= pcibios_read_config_byte(pci_bus, pci_device_fn,
+ PCI_INTERRUPT_LINE, &irq);
+ if( error )
+ {
+ printk("DC390_detect: reading configuration registers error!\n");
+ InitialTime = 0;
+ return( 0 );
+ }
+
+ (USHORT) io_port = (USHORT) io_port & 0xFFFE;
+#ifdef DC390_DEBUG0
+ printk("DC390: IO_PORT=%4x,IRQ=%x,\n",(UINT) io_port, irq);
+#endif
+ if( !DC390_init(psht, io_port, irq, i) )
+ adaptCnt++;
+ chipType = 0;
+ }
+ else
+ break;
+ }
+ }
+#endif
+
+ InitialTime = 0;
+ adapterCnt = adaptCnt;
+ return( adaptCnt );
+}
+
+
+#ifndef VERSION_ELF_1_2_13
+
+/********************************************************************
+ * Function: tmscsim_set_info()
+ *
+ * Purpose: Set adapter info (!)
+ *
+ * Not yet implemented
+ *
+ *******************************************************************/
+
+int tmscsim_set_info(char *buffer, int length, struct Scsi_Host *shpnt)
+{
+ return(-ENOSYS); /* Currently this is a no-op */
+}
+
+/********************************************************************
+ * Function: tmscsim_proc_info(char* buffer, char **start,
+ * off_t offset, int length, int hostno, int inout)
+ *
+ * Purpose: return SCSI Adapter/Device Info
+ *
+ * Input: buffer: Pointer to a buffer where to write info
+ * start :
+ * offset:
+ * hostno: Host adapter index
+ * inout : Read (=0) or set(!=0) info
+ *
+ * Output: buffer: contains info
+ * length; length of info in buffer
+ *
+ * return value: length
+ *
+ ********************************************************************/
+
+/* KG: proc_info taken from driver aha152x.c */
+
+#undef SPRINTF
+#define SPRINTF(args...) pos += sprintf(pos, ## args)
+
+#define YESNO(YN)\
+if (YN) SPRINTF(" Yes ");\
+else SPRINTF(" No ")
+
+int tmscsim_proc_info(char *buffer, char **start,
+ off_t offset, int length, int hostno, int inout)
+{
+ int dev, spd, spd1;
+ char *pos = buffer;
+ PSH shpnt;
+ PACB acbpnt;
+ PDCB dcbpnt;
+ unsigned long flags;
+/* Scsi_Cmnd *ptr; */
+
+ acbpnt = pACB_start;
+
+ while(acbpnt != (PACB)-1)
+ {
+ shpnt = acbpnt->pScsiHost;
+ if (shpnt->host_no == hostno) break;
+ acbpnt = acbpnt->pNextACB;
+ }
+
+ if (acbpnt == (PACB)-1) return(-ESRCH);
+ if(!shpnt) return(-ESRCH);
+
+ if(inout) /* Has data been written to the file ? */
+ return(tmscsim_set_info(buffer, length, shpnt));
+
+ SPRINTF("Tekram DC390(T) PCI SCSI Host Adadpter, ");
+ SPRINTF("Driver Version 1.10, 1996/12/05\n");
+
+ save_flags(flags);
+ cli();
+
+ SPRINTF("SCSI Host Nr %i, ", shpnt->host_no);
+ SPRINTF("DC390 Adapter Nr %i\n", acbpnt->AdapterIndex);
+ SPRINTF("IOPortBase 0x%04x, ", acbpnt->IOPortBase);
+ SPRINTF("IRQLevel 0x%02x\n", acbpnt->IRQLevel);
+
+ SPRINTF("MaxID %i, MaxLUN %i, ",acbpnt->max_id, acbpnt->max_lun);
+ SPRINTF("AdapterID %i, AdapterLUN %i\n", acbpnt->AdaptSCSIID, acbpnt->AdaptSCSILUN);
+
+ SPRINTF("TagMaxNum %i, Status %i\n", acbpnt->TagMaxNum, acbpnt->status);
+
+ SPRINTF("Nr of attached devices: %i\n", acbpnt->DeviceCnt);
+
+ SPRINTF("Un ID LUN Prty Sync DsCn SndS TagQ NegoPeriod SyncSpeed SyncOffs\n");
+
+ dcbpnt = acbpnt->pLinkDCB;
+ for (dev = 0; dev < acbpnt->DeviceCnt; dev++)
+ {
+ SPRINTF("%02i %02i %02i ", dev, dcbpnt->UnitSCSIID, dcbpnt->UnitSCSILUN);
+ YESNO(dcbpnt->DevMode & PARITY_CHK_);
+ YESNO(dcbpnt->SyncMode & SYNC_NEGO_DONE);
+ YESNO(dcbpnt->DevMode & EN_DISCONNECT_);
+ YESNO(dcbpnt->DevMode & SEND_START_);
+ YESNO(dcbpnt->SyncMode & EN_TAG_QUEUING);
+ SPRINTF(" %03i ns ", (dcbpnt->NegoPeriod) << 2);
+ if (dcbpnt->SyncOffset & 0x0f)
+ {
+ spd = 1000/(dcbpnt->NegoPeriod <<2);
+ spd1 = 1000%(dcbpnt->NegoPeriod <<2);
+ spd1 = (spd1 * 10)/(dcbpnt->NegoPeriod <<2);
+ SPRINTF(" %2i.%1i M %02i\n", spd, spd1, (dcbpnt->SyncOffset & 0x0f));
+ }
+ else SPRINTF("\n");
+ /* Add more info ...*/
+ dcbpnt = dcbpnt->pNextDCB;
+ }
+
+ restore_flags(flags);
+ *start = buffer + offset;
+
+ if (pos - buffer < offset)
+ return 0;
+ else if (pos - buffer - offset < length)
+ return pos - buffer - offset;
+ else
+ return length;
+}
+#endif /* VERSION_ELF_1_2_13 */
+
+
+#ifdef MODULE
+
+/***********************************************************************
+ * Function : static int DC390_shutdown (struct Scsi_Host *host)
+ *
+ * Purpose : does a clean (we hope) shutdown of the SCSI chip.
+ * Use prior to dumping core, unloading the driver, etc.
+ *
+ * Returns : 0 on success
+ ***********************************************************************/
+static int
+DC390_shutdown (struct Scsi_Host *host)
+{
+ UCHAR bval;
+ USHORT ioport;
+ unsigned long flags;
+ PACB pACB = (PACB)(host->hostdata);
+
+ ioport = (unsigned int) pACB->IOPortBase;
+
+ save_flags (flags);
+ cli();
+
+/* pACB->soft_reset(host); */
+
+#ifdef DC390_DEBUG0
+ printk("DC390: shutdown,");
+#endif
+
+ bval = inb(ioport+CtrlReg1);
+ bval |= DIS_INT_ON_SCSI_RST;
+ outb(bval,ioport+CtrlReg1); /* disable interrupt */
+ DC390_ResetSCSIBus( pACB );
+
+ restore_flags (flags);
+ return( 0 );
+}
+
+
+int DC390_release(struct Scsi_Host *host)
+{
+ int irq_count;
+ struct Scsi_Host *tmp;
+
+ DC390_shutdown (host);
+
+ if (host->irq != IRQ_NONE)
+ {
+ for (irq_count = 0, tmp = pSH_start; tmp; tmp = tmp->next)
+ {
+ if ( tmp->irq == host->irq )
+ ++irq_count;
+ }
+ if (irq_count == 1)
+ {
+#ifdef DC390_DEBUG0
+ printk("DC390: Free IRQ %i.",host->irq);
+#endif
+#ifndef VERSION_ELF_1_2_13
+ free_irq(host->irq,NULL);
+#else
+ free_irq(host->irq);
+#endif
+ }
+ }
+
+ release_region(host->io_port,host->n_io_port);
+
+ return( 1 );
+}
+
+Scsi_Host_Template driver_template = DC390_T;
+#include "scsi_module.c"
+#endif /* def MODULE */
+
diff --git a/linux/src/drivers/scsi/tmscsim.h b/linux/src/drivers/scsi/tmscsim.h
new file mode 100644
index 0000000..361c488
--- /dev/null
+++ b/linux/src/drivers/scsi/tmscsim.h
@@ -0,0 +1,680 @@
+/***********************************************************************
+;* File Name : TMSCSIM.H *
+;* TEKRAM DC-390(T) PCI SCSI Bus Master Host Adapter *
+;* Device Driver *
+;***********************************************************************/
+
+#ifndef TMSCSIM_H
+#define TMSCSIM_H
+
+#define IRQ_NONE 255
+
+typedef unsigned char UCHAR;
+typedef unsigned short USHORT;
+typedef unsigned long ULONG;
+typedef unsigned int UINT;
+
+typedef UCHAR *PUCHAR;
+typedef USHORT *PUSHORT;
+typedef ULONG *PULONG;
+typedef Scsi_Host_Template *PSHT;
+typedef struct Scsi_Host *PSH;
+typedef Scsi_Device *PSCSIDEV;
+typedef Scsi_Cmnd *PSCSICMD;
+typedef void *PVOID;
+typedef struct scatterlist *PSGL, SGL;
+
+
+/*;-----------------------------------------------------------------------*/
+typedef struct _SyncMsg
+{
+UCHAR ExtendMsg;
+UCHAR ExtMsgLen;
+UCHAR SyncXferReq;
+UCHAR Period;
+UCHAR ReqOffset;
+} SyncMsg;
+/*;-----------------------------------------------------------------------*/
+typedef struct _Capacity
+{
+ULONG BlockCount;
+ULONG BlockLength;
+} Capacity;
+/*;-----------------------------------------------------------------------*/
+typedef struct _SGentry
+{
+ULONG SGXferDataPtr;
+ULONG SGXferDataLen;
+} SGentry;
+
+typedef struct _SGentry1
+{
+ULONG SGXLen;
+ULONG SGXPtr;
+} SGentry1, *PSGE;
+
+
+#define MAX_ADAPTER_NUM 4
+#define MAX_DEVICES 10
+#define MAX_SG_LIST_BUF 16
+#define MAX_CMD_QUEUE 20
+#define MAX_CMD_PER_LUN 8
+#define MAX_SCSI_ID 8
+#define MAX_SRB_CNT MAX_CMD_QUEUE+4
+#define END_SCAN 2
+
+#define SEL_TIMEOUT 153 /* 250 ms selection timeout (@ 40 MHz) */
+
+/*
+;-----------------------------------------------------------------------
+; SCSI Request Block
+;-----------------------------------------------------------------------
+*/
+struct _SRB
+{
+UCHAR CmdBlock[12];
+
+struct _SRB *pNextSRB;
+struct _DCB *pSRBDCB;
+PSCSICMD pcmd;
+PSGL pSegmentList;
+
+ULONG PhysSRB;
+ULONG TotalXferredLen;
+ULONG SGPhysAddr; /*;a segment starting address */
+ULONG SGToBeXferLen; /*; to be xfer length */
+
+SGL Segmentx; /* make a one entry of S/G list table */
+
+PUCHAR pMsgPtr;
+USHORT SRBState;
+USHORT Revxx2; /* ??? */
+
+UCHAR MsgInBuf[6];
+UCHAR MsgOutBuf[6];
+
+UCHAR AdaptStatus;
+UCHAR TargetStatus;
+UCHAR MsgCnt;
+UCHAR EndMessage;
+UCHAR TagNumber;
+UCHAR SGcount;
+UCHAR SGIndex;
+UCHAR IORBFlag; /*;81h-Reset, 2-retry */
+
+UCHAR SRBStatus;
+UCHAR RetryCnt;
+UCHAR SRBFlag; /*; b0-AutoReqSense,b6-Read,b7-write */
+ /*; b4-settimeout,b5-Residual valid */
+UCHAR ScsiCmdLen;
+UCHAR ScsiPhase;
+UCHAR Reserved3[3]; /*;for dword alignment */
+ULONG Segment0[2];
+ULONG Segment1[2];
+};
+
+typedef struct _SRB DC390_SRB, *PSRB;
+
+/*
+;-----------------------------------------------------------------------
+; Device Control Block
+;-----------------------------------------------------------------------
+*/
+struct _DCB
+{
+struct _DCB *pNextDCB;
+struct _ACB *pDCBACB;
+
+PSCSICMD pQIORBhead;
+PSCSICMD pQIORBtail;
+PSCSICMD AboIORBhead;
+PSCSICMD AboIORBtail;
+USHORT QIORBCnt;
+USHORT AboIORBcnt;
+
+PSRB pWaitingSRB;
+PSRB pWaitLast;
+PSRB pGoingSRB;
+PSRB pGoingLast;
+PSRB pActiveSRB;
+USHORT GoingSRBCnt;
+USHORT WaitSRBCnt; /* ??? */
+
+ULONG TagMask;
+
+USHORT MaxCommand;
+USHORT AdaptIndex; /*; UnitInfo struc start */
+USHORT UnitIndex; /*; nth Unit on this card */
+UCHAR UnitSCSIID; /*; SCSI Target ID (SCSI Only) */
+UCHAR UnitSCSILUN; /*; SCSI Log. Unit (SCSI Only) */
+
+UCHAR IdentifyMsg;
+UCHAR CtrlR1;
+UCHAR CtrlR3;
+UCHAR CtrlR4;
+
+UCHAR InqDataBuf[8];
+UCHAR CapacityBuf[8];
+UCHAR DevMode;
+UCHAR AdpMode;
+UCHAR SyncMode; /*; 0:async mode */
+UCHAR NegoPeriod; /*;for nego. */
+UCHAR SyncPeriod; /*;for reg. */
+UCHAR SyncOffset; /*;for reg. and nego.(low nibble) */
+UCHAR UnitCtrlFlag;
+UCHAR DCBFlag;
+UCHAR DevType;
+UCHAR Reserved2[3]; /*;for dword alignment */
+};
+
+typedef struct _DCB DC390_DCB, *PDCB;
+/*
+;-----------------------------------------------------------------------
+; Adapter Control Block
+;-----------------------------------------------------------------------
+*/
+struct _ACB
+{
+ULONG PhysACB;
+PSH pScsiHost;
+struct _ACB *pNextACB;
+USHORT IOPortBase;
+USHORT Revxx1; /* ??? */
+
+PDCB pLinkDCB;
+PDCB pDCBRunRobin;
+PDCB pActiveDCB;
+PDCB pDCB_free;
+PSRB pFreeSRB;
+PSRB pTmpSRB;
+USHORT SRBCount;
+USHORT AdapterIndex; /*; nth Adapter this driver */
+USHORT max_id;
+USHORT max_lun;
+
+UCHAR msgin123[4];
+UCHAR status;
+UCHAR AdaptSCSIID; /*; Adapter SCSI Target ID */
+UCHAR AdaptSCSILUN; /*; Adapter SCSI LUN */
+UCHAR DeviceCnt;
+UCHAR IRQLevel;
+UCHAR TagMaxNum;
+UCHAR ACBFlag;
+UCHAR Gmode2;
+UCHAR LUNchk;
+UCHAR scan_devices;
+UCHAR HostID_Bit;
+UCHAR Reserved1[1]; /*;for dword alignment */
+UCHAR DCBmap[MAX_SCSI_ID];
+DC390_DCB DCB_array[MAX_DEVICES]; /* +74h, Len=3E8 */
+DC390_SRB SRB_array[MAX_SRB_CNT]; /* +45Ch, Len= */
+DC390_SRB TmpSRB;
+};
+
+typedef struct _ACB DC390_ACB, *PACB;
+
+/*;-----------------------------------------------------------------------*/
+
+
+#define BIT31 0x80000000
+#define BIT30 0x40000000
+#define BIT29 0x20000000
+#define BIT28 0x10000000
+#define BIT27 0x08000000
+#define BIT26 0x04000000
+#define BIT25 0x02000000
+#define BIT24 0x01000000
+#define BIT23 0x00800000
+#define BIT22 0x00400000
+#define BIT21 0x00200000
+#define BIT20 0x00100000
+#define BIT19 0x00080000
+#define BIT18 0x00040000
+#define BIT17 0x00020000
+#define BIT16 0x00010000
+#define BIT15 0x00008000
+#define BIT14 0x00004000
+#define BIT13 0x00002000
+#define BIT12 0x00001000
+#define BIT11 0x00000800
+#define BIT10 0x00000400
+#define BIT9 0x00000200
+#define BIT8 0x00000100
+#define BIT7 0x00000080
+#define BIT6 0x00000040
+#define BIT5 0x00000020
+#define BIT4 0x00000010
+#define BIT3 0x00000008
+#define BIT2 0x00000004
+#define BIT1 0x00000002
+#define BIT0 0x00000001
+
+/*;---UnitCtrlFlag */
+#define UNIT_ALLOCATED BIT0
+#define UNIT_INFO_CHANGED BIT1
+#define FORMATING_MEDIA BIT2
+#define UNIT_RETRY BIT3
+
+/*;---UnitFlags */
+#define DASD_SUPPORT BIT0
+#define SCSI_SUPPORT BIT1
+#define ASPI_SUPPORT BIT2
+
+/*;----SRBState machine definition */
+#define SRB_FREE 0
+#define SRB_WAIT BIT0
+#define SRB_READY BIT1
+#define SRB_MSGOUT BIT2 /*;arbitration+msg_out 1st byte*/
+#define SRB_MSGIN BIT3
+#define SRB_MSGIN_MULTI BIT4
+#define SRB_COMMAND BIT5
+#define SRB_START_ BIT6 /*;arbitration+msg_out+command_out*/
+#define SRB_DISCONNECT BIT7
+#define SRB_DATA_XFER BIT8
+#define SRB_XFERPAD BIT9
+#define SRB_STATUS BIT10
+#define SRB_COMPLETED BIT11
+#define SRB_ABORT_SENT BIT12
+#define DO_SYNC_NEGO BIT13
+#define SRB_UNEXPECT_RESEL BIT14
+
+/*;---ACBFlag */
+#define RESET_DEV BIT0
+#define RESET_DETECT BIT1
+#define RESET_DONE BIT2
+
+/*;---DCBFlag */
+#define ABORT_DEV_ BIT0
+
+/*;---SRBstatus */
+#define SRB_OK BIT0
+#define ABORTION BIT1
+#define OVER_RUN BIT2
+#define UNDER_RUN BIT3
+#define PARITY_ERROR BIT4
+#define SRB_ERROR BIT5
+
+/*;---SRBFlag */
+#define DATAOUT BIT7
+#define DATAIN BIT6
+#define RESIDUAL_VALID BIT5
+#define ENABLE_TIMER BIT4
+#define RESET_DEV0 BIT2
+#define ABORT_DEV BIT1
+#define AUTO_REQSENSE BIT0
+
+/*;---Adapter status */
+#define H_STATUS_GOOD 0
+#define H_SEL_TIMEOUT 0x11
+#define H_OVER_UNDER_RUN 0x12
+#define H_UNEXP_BUS_FREE 0x13
+#define H_TARGET_PHASE_F 0x14
+#define H_INVALID_CCB_OP 0x16
+#define H_LINK_CCB_BAD 0x17
+#define H_BAD_TARGET_DIR 0x18
+#define H_DUPLICATE_CCB 0x19
+#define H_BAD_CCB_OR_SG 0x1A
+#define H_ABORT 0x0FF
+
+/*; SCSI Status byte codes*/
+#define SCSI_STAT_GOOD 0x0 /*; Good status */
+#define SCSI_STAT_CHECKCOND 0x02 /*; SCSI Check Condition */
+#define SCSI_STAT_CONDMET 0x04 /*; Condition Met */
+#define SCSI_STAT_BUSY 0x08 /*; Target busy status */
+#define SCSI_STAT_INTER 0x10 /*; Intermediate status */
+#define SCSI_STAT_INTERCONDMET 0x14 /*; Intermediate condition met */
+#define SCSI_STAT_RESCONFLICT 0x18 /*; Reservation conflict */
+#define SCSI_STAT_CMDTERM 0x22 /*; Command Terminated */
+#define SCSI_STAT_QUEUEFULL 0x28 /*; Queue Full */
+
+#define SCSI_STAT_UNEXP_BUS_F 0xFD /*; Unexpect Bus Free */
+#define SCSI_STAT_BUS_RST_DETECT 0xFE /*; Scsi Bus Reset detected */
+#define SCSI_STAT_SEL_TIMEOUT 0xFF /*; Selection Time out */
+
+/*;---Sync_Mode */
+#define SYNC_DISABLE 0
+#define SYNC_ENABLE BIT0
+#define SYNC_NEGO_DONE BIT1
+#define WIDE_ENABLE BIT2
+#define WIDE_NEGO_DONE BIT3
+#define EN_TAG_QUEUING BIT4
+#define EN_ATN_STOP BIT5
+
+#define SYNC_NEGO_OFFSET 15
+
+/*;---SCSI bus phase*/
+#define SCSI_DATA_OUT 0
+#define SCSI_DATA_IN 1
+#define SCSI_COMMAND 2
+#define SCSI_STATUS_ 3
+#define SCSI_NOP0 4
+#define SCSI_NOP1 5
+#define SCSI_MSG_OUT 6
+#define SCSI_MSG_IN 7
+
+/*;----SCSI MSG BYTE*/
+#define MSG_COMPLETE 0x00
+#define MSG_EXTENDED 0x01
+#define MSG_SAVE_PTR 0x02
+#define MSG_RESTORE_PTR 0x03
+#define MSG_DISCONNECT 0x04
+#define MSG_INITIATOR_ERROR 0x05
+#define MSG_ABORT 0x06
+#define MSG_REJECT_ 0x07
+#define MSG_NOP 0x08
+#define MSG_PARITY_ERROR 0x09
+#define MSG_LINK_CMD_COMPL 0x0A
+#define MSG_LINK_CMD_COMPL_FLG 0x0B
+#define MSG_BUS_RESET 0x0C
+#define MSG_ABORT_TAG 0x0D
+#define MSG_SIMPLE_QTAG 0x20
+#define MSG_HEAD_QTAG 0x21
+#define MSG_ORDER_QTAG 0x22
+#define MSG_IDENTIFY 0x80
+#define MSG_HOST_ID 0x0C0
+
+/*;----SCSI STATUS BYTE*/
+#define STATUS_GOOD 0x00
+#define CHECK_CONDITION_ 0x02
+#define STATUS_BUSY 0x08
+#define STATUS_INTERMEDIATE 0x10
+#define RESERVE_CONFLICT 0x18
+
+/* cmd->result */
+#define STATUS_MASK_ 0xFF
+#define MSG_MASK 0xFF00
+#define RETURN_MASK 0xFF0000
+
+/*
+** Inquiry Data format
+*/
+
+typedef struct _SCSIInqData { /* INQ */
+
+ UCHAR DevType; /* Periph Qualifier & Periph Dev Type*/
+ UCHAR RMB_TypeMod; /* rem media bit & Dev Type Modifier */
+ UCHAR Vers; /* ISO, ECMA, & ANSI versions */
+ UCHAR RDF; /* AEN, TRMIOP, & response data format*/
+ UCHAR AddLen; /* length of additional data */
+ UCHAR Res1; /* reserved */
+ UCHAR Res2; /* reserved */
+ UCHAR Flags; /* RelADr,Wbus32,Wbus16,Sync,etc. */
+ UCHAR VendorID[8]; /* Vendor Identification */
+ UCHAR ProductID[16]; /* Product Identification */
+ UCHAR ProductRev[4]; /* Product Revision */
+
+
+} SCSI_INQDATA, *PSCSI_INQDATA;
+
+
+/* Inquiry byte 0 masks */
+
+
+#define SCSI_DEVTYPE 0x1F /* Peripheral Device Type */
+#define SCSI_PERIPHQUAL 0xE0 /* Peripheral Qualifier */
+
+
+/* Inquiry byte 1 mask */
+
+#define SCSI_REMOVABLE_MEDIA 0x80 /* Removable Media bit (1=removable) */
+
+
+/* Peripheral Device Type definitions */
+
+#define SCSI_DASD 0x00 /* Direct-access Device */
+#define SCSI_SEQACESS 0x01 /* Sequential-access device */
+#define SCSI_PRINTER 0x02 /* Printer device */
+#define SCSI_PROCESSOR 0x03 /* Processor device */
+#define SCSI_WRITEONCE 0x04 /* Write-once device */
+#define SCSI_CDROM 0x05 /* CD-ROM device */
+#define SCSI_SCANNER 0x06 /* Scanner device */
+#define SCSI_OPTICAL 0x07 /* Optical memory device */
+#define SCSI_MEDCHGR 0x08 /* Medium changer device */
+#define SCSI_COMM 0x09 /* Communications device */
+#define SCSI_NODEV 0x1F /* Unknown or no device type */
+
+/*
+** Inquiry flag definitions (Inq data byte 7)
+*/
+
+#define SCSI_INQ_RELADR 0x80 /* device supports relative addressing*/
+#define SCSI_INQ_WBUS32 0x40 /* device supports 32 bit data xfers */
+#define SCSI_INQ_WBUS16 0x20 /* device supports 16 bit data xfers */
+#define SCSI_INQ_SYNC 0x10 /* device supports synchronous xfer */
+#define SCSI_INQ_LINKED 0x08 /* device supports linked commands */
+#define SCSI_INQ_CMDQUEUE 0x02 /* device supports command queueing */
+#define SCSI_INQ_SFTRE 0x01 /* device supports soft resets */
+
+
+/*
+;==========================================================
+; EEPROM byte offset
+;==========================================================
+*/
+typedef struct _EEprom
+{
+UCHAR EE_MODE1;
+UCHAR EE_SPEED;
+UCHAR xx1;
+UCHAR xx2;
+} EEprom, *PEEprom;
+
+#define EE_ADAPT_SCSI_ID 64
+#define EE_MODE2 65
+#define EE_DELAY 66
+#define EE_TAG_CMD_NUM 67
+
+/*; EE_MODE1 bits definition*/
+#define PARITY_CHK_ BIT0
+#define SYNC_NEGO_ BIT1
+#define EN_DISCONNECT_ BIT2
+#define SEND_START_ BIT3
+#define TAG_QUEUING_ BIT4
+
+/*; EE_MODE2 bits definition*/
+#define MORE2_DRV BIT0
+#define GREATER_1G BIT1
+#define RST_SCSI_BUS BIT2
+#define ACTIVE_NEGATION BIT3
+#define NO_SEEK BIT4
+#define LUN_CHECK BIT5
+
+#define ENABLE_CE 1
+#define DISABLE_CE 0
+#define EEPROM_READ 0x80
+
+/*
+;==========================================================
+; AMD 53C974 Registers bit Definition
+;==========================================================
+*/
+/*
+;====================
+; SCSI Register
+;====================
+*/
+
+/*; Command Reg.(+0CH) */
+#define DMA_COMMAND BIT7
+#define NOP_CMD 0
+#define CLEAR_FIFO_CMD 1
+#define RST_DEVICE_CMD 2
+#define RST_SCSI_BUS_CMD 3
+#define INFO_XFER_CMD 0x10
+#define INITIATOR_CMD_CMPLTE 0x11
+#define MSG_ACCEPTED_CMD 0x12
+#define XFER_PAD_BYTE 0x18
+#define SET_ATN_CMD 0x1A
+#define RESET_ATN_CMD 0x1B
+#define SELECT_W_ATN 0x42
+#define SEL_W_ATN_STOP 0x43
+#define EN_SEL_RESEL 0x44
+#define SEL_W_ATN2 0x46
+#define DATA_XFER_CMD INFO_XFER_CMD
+
+
+/*; SCSI Status Reg.(+10H) */
+#define INTERRUPT BIT7
+#define ILLEGAL_OP_ERR BIT6
+#define PARITY_ERR BIT5
+#define COUNT_2_ZERO BIT4
+#define GROUP_CODE_VALID BIT3
+#define SCSI_PHASE_MASK (BIT2+BIT1+BIT0)
+
+/*; Interrupt Status Reg.(+14H) */
+#define SCSI_RESET BIT7
+#define INVALID_CMD BIT6
+#define DISCONNECTED BIT5
+#define SERVICE_REQUEST BIT4
+#define SUCCESSFUL_OP BIT3
+#define RESELECTED BIT2
+#define SEL_ATTENTION BIT1
+#define SELECTED BIT0
+
+/*; Internal State Reg.(+18H) */
+#define SYNC_OFFSET_FLAG BIT3
+#define INTRN_STATE_MASK (BIT2+BIT1+BIT0)
+
+/*; Clock Factor Reg.(+24H) */
+#define CLK_FREQ_40MHZ 0
+#define CLK_FREQ_35MHZ (BIT2+BIT1+BIT0)
+#define CLK_FREQ_30MHZ (BIT2+BIT1)
+#define CLK_FREQ_25MHZ (BIT2+BIT0)
+#define CLK_FREQ_20MHZ BIT2
+#define CLK_FREQ_15MHZ (BIT1+BIT0)
+#define CLK_FREQ_10MHZ BIT1
+
+/*; Control Reg. 1(+20H) */
+#define EXTENDED_TIMING BIT7
+#define DIS_INT_ON_SCSI_RST BIT6
+#define PARITY_ERR_REPO BIT4
+#define SCSI_ID_ON_BUS (BIT2+BIT1+BIT0)
+
+/*; Control Reg. 2(+2CH) */
+#define EN_FEATURE BIT6
+#define EN_SCSI2_CMD BIT3
+
+/*; Control Reg. 3(+30H) */
+#define ID_MSG_CHECK BIT7
+#define EN_QTAG_MSG BIT6
+#define EN_GRP2_CMD BIT5
+#define FAST_SCSI BIT4 /* ;10MB/SEC */
+#define FAST_CLK BIT3 /* ;25 - 40 MHZ */
+
+/*; Control Reg. 4(+34H) */
+#define EATER_12NS 0
+#define EATER_25NS BIT7
+#define EATER_35NS BIT6
+#define EATER_0NS (BIT7+BIT6)
+#define NEGATE_REQACKDATA BIT2
+#define NEGATE_REQACK BIT3
+/*
+;====================
+; DMA Register
+;====================
+*/
+/*; DMA Command Reg.(+40H) */
+#define READ_DIRECTION BIT7
+#define WRITE_DIRECTION 0
+#define EN_DMA_INT BIT6
+#define MAP_TO_MDL BIT5
+#define DIAGNOSTIC BIT4
+#define DMA_IDLE_CMD 0
+#define DMA_BLAST_CMD BIT0
+#define DMA_ABORT_CMD BIT1
+#define DMA_START_CMD (BIT1+BIT0)
+
+/*; DMA Status Reg.(+54H) */
+#define PCI_MS_ABORT BIT6
+#define BLAST_COMPLETE BIT5
+#define SCSI_INTERRUPT BIT4
+#define DMA_XFER_DONE BIT3
+#define DMA_XFER_ABORT BIT2
+#define DMA_XFER_ERROR BIT1
+#define POWER_DOWN BIT0
+
+/*
+; DMA SCSI Bus and Ctrl.(+70H)
+;EN_INT_ON_PCI_ABORT
+*/
+
+/*
+;==========================================================
+; SCSI Chip register address offset
+;==========================================================
+*/
+#define CtcReg_Low 0x00
+#define CtcReg_Mid 0x04
+#define ScsiFifo 0x08
+#define ScsiCmd 0x0C
+#define Scsi_Status 0x10
+#define INT_Status 0x14
+#define Sync_Period 0x18
+#define Sync_Offset 0x1C
+#define CtrlReg1 0x20
+#define Clk_Factor 0x24
+#define CtrlReg2 0x2C
+#define CtrlReg3 0x30
+#define CtrlReg4 0x34
+#define CtcReg_High 0x38
+#define DMA_Cmd 0x40
+#define DMA_XferCnt 0x44
+#define DMA_XferAddr 0x48
+#define DMA_Wk_ByteCntr 0x4C
+#define DMA_Wk_AddrCntr 0x50
+#define DMA_Status 0x54
+#define DMA_MDL_Addr 0x58
+#define DMA_Wk_MDL_Cntr 0x5C
+#define DMA_ScsiBusCtrl 0x70
+
+#define StcReg_Low CtcReg_Low
+#define StcReg_Mid CtcReg_Mid
+#define Scsi_Dest_ID Scsi_Status
+#define Scsi_TimeOut INT_Status
+#define Intern_State Sync_Period
+#define Current_Fifo Sync_Offset
+#define StcReg_High CtcReg_High
+
+#define am_target Scsi_Status
+#define am_timeout INT_Status
+#define am_seq_step Sync_Period
+#define am_fifo_count Sync_Offset
+
+
+#define DC390_read8(address) \
+ inb(DC390_ioport + (address)))
+
+#define DC390_read16(address) \
+ inw(DC390_ioport + (address)))
+
+#define DC390_read32(address) \
+ inl(DC390_ioport + (address)))
+
+#define DC390_write8(address,value) \
+ outb((value), DC390_ioport + (address)))
+
+#define DC390_write16(address,value) \
+ outw((value), DC390_ioport + (address)))
+
+#define DC390_write32(address,value) \
+ outl((value), DC390_ioport + (address)))
+
+
+/* Configuration method #1 */
+#define PCI_CFG1_ADDRESS_REG 0xcf8
+#define PCI_CFG1_DATA_REG 0xcfc
+#define PCI_CFG1_ENABLE 0x80000000
+#define PCI_CFG1_TUPPLE(bus, device, function, register) \
+ (PCI_CFG1_ENABLE | (((bus) << 16) & 0xff0000) | \
+ (((device) << 11) & 0xf800) | (((function) << 8) & 0x700)| \
+ (((register) << 2) & 0xfc))
+
+/* Configuration method #2 */
+#define PCI_CFG2_ENABLE_REG 0xcf8
+#define PCI_CFG2_FORWARD_REG 0xcfa
+#define PCI_CFG2_ENABLE 0x0f0
+#define PCI_CFG2_TUPPLE(function) \
+ (PCI_CFG2_ENABLE | (((function) << 1) & 0xe))
+
+
+#endif /* TMSCSIM_H */
diff --git a/linux/src/drivers/scsi/u14-34f.c b/linux/src/drivers/scsi/u14-34f.c
new file mode 100644
index 0000000..ece5330
--- /dev/null
+++ b/linux/src/drivers/scsi/u14-34f.c
@@ -0,0 +1,1996 @@
+/*
+ * u14-34f.c - Low-level driver for UltraStor 14F/34F SCSI host adapters.
+ *
+ * 26 Jul 1998 Rev. 4.33 for linux 2.0.35 and 2.1.111
+ * Added command line option (et:[y|n]) to use the existing
+ * translation (returned by scsicam_bios_param) as disk geometry.
+ * The default is et:n, which uses the disk geometry jumpered
+ * on the board.
+ * The default value et:n is compatible with all previous revisions
+ * of this driver.
+ *
+ * 28 May 1998 Rev. 4.32 for linux 2.0.33 and 2.1.104
+ * Increased busy timeout from 10 msec. to 200 msec. while
+ * processing interrupts.
+ *
+ * 18 May 1998 Rev. 4.31 for linux 2.0.33 and 2.1.102
+ * Improved abort handling during the eh recovery process.
+ *
+ * 13 May 1998 Rev. 4.30 for linux 2.0.33 and 2.1.101
+ * The driver is now fully SMP safe, including the
+ * abort and reset routines.
+ * Added command line options (eh:[y|n]) to choose between
+ * new_eh_code and the old scsi code.
+ * If linux version >= 2.1.101 the default is eh:y, while the eh
+ * option is ignored for previous releases and the old scsi code
+ * is used.
+ *
+ * 18 Apr 1998 Rev. 4.20 for linux 2.0.33 and 2.1.97
+ * Reworked interrupt handler.
+ *
+ * 11 Apr 1998 rev. 4.05 for linux 2.0.33 and 2.1.95
+ * Major reliability improvement: when a batch with overlapping
+ * requests is detected, requests are queued one at a time
+ * eliminating any possible board or drive reordering.
+ *
+ * 10 Apr 1998 rev. 4.04 for linux 2.0.33 and 2.1.95
+ * Improved SMP support (if linux version >= 2.1.95).
+ *
+ * 9 Apr 1998 rev. 4.03 for linux 2.0.33 and 2.1.94
+ * Performance improvement: when sequential i/o is detected,
+ * always use direct sort instead of reverse sort.
+ *
+ * 4 Apr 1998 rev. 4.02 for linux 2.0.33 and 2.1.92
+ * io_port is now unsigned long.
+ *
+ * 17 Mar 1998 rev. 4.01 for linux 2.0.33 and 2.1.88
+ * Use new scsi error handling code (if linux version >= 2.1.88).
+ * Use new interrupt code.
+ *
+ * 12 Sep 1997 rev. 3.11 for linux 2.0.30 and 2.1.55
+ * Use of udelay inside the wait loops to avoid timeout
+ * problems with fast cpus.
+ * Removed check about useless calls to the interrupt service
+ * routine (reported on SMP systems only).
+ * At initialization time "sorted/unsorted" is displayed instead
+ * of "linked/unlinked" to reinforce the fact that "linking" is
+ * nothing but "elevator sorting" in the actual implementation.
+ *
+ * 17 May 1997 rev. 3.10 for linux 2.0.30 and 2.1.38
+ * Use of serial_number_at_timeout in abort and reset processing.
+ * Use of the __initfunc and __initdata macro in setup code.
+ * Minor cleanups in the list_statistics code.
+ *
+ * 24 Feb 1997 rev. 3.00 for linux 2.0.29 and 2.1.26
+ * When loading as a module, parameter passing is now supported
+ * both in 2.0 and in 2.1 style.
+ * Fixed data transfer direction for some SCSI opcodes.
+ * Immediate acknowledge to request sense commands.
+ * Linked commands to each disk device are now reordered by elevator
+ * sorting. Rare cases in which reordering of write requests could
+ * cause wrong results are managed.
+ *
+ * 18 Jan 1997 rev. 2.60 for linux 2.1.21 and 2.0.28
+ * Added command line options to enable/disable linked commands
+ * (lc:[y|n]), old firmware support (of:[y|n]) and to set the max
+ * queue depth (mq:xx). Default is "u14-34f=lc:n,of:n,mq:8".
+ * Improved command linking.
+ *
+ * 8 Jan 1997 rev. 2.50 for linux 2.1.20 and 2.0.27
+ * Added linked command support.
+ *
+ * 3 Dec 1996 rev. 2.40 for linux 2.1.14 and 2.0.27
+ * Added queue depth adjustment.
+ *
+ * 22 Nov 1996 rev. 2.30 for linux 2.1.12 and 2.0.26
+ * The list of i/o ports to be probed can be overwritten by the
+ * "u14-34f=port0,port1,...." boot command line option.
+ * Scatter/gather lists are now allocated by a number of kmalloc
+ * calls, in order to avoid the previous size limit of 64Kb.
+ *
+ * 16 Nov 1996 rev. 2.20 for linux 2.1.10 and 2.0.25
+ * Added multichannel support.
+ *
+ * 27 Sep 1996 rev. 2.12 for linux 2.1.0
+ * Portability cleanups (virtual/bus addressing, little/big endian
+ * support).
+ *
+ * 09 Jul 1996 rev. 2.11 for linux 2.0.4
+ * "Data over/under-run" no longer implies a redo on all targets.
+ * Number of internal retries is now limited.
+ *
+ * 16 Apr 1996 rev. 2.10 for linux 1.3.90
+ * New argument "reset_flags" to the reset routine.
+ *
+ * 21 Jul 1995 rev. 2.02 for linux 1.3.11
+ * Fixed Data Transfer Direction for some SCSI commands.
+ *
+ * 13 Jun 1995 rev. 2.01 for linux 1.2.10
+ * HAVE_OLD_UX4F_FIRMWARE should be defined for U34F boards when
+ * the firmware prom is not the latest one (28008-006).
+ *
+ * 11 Mar 1995 rev. 2.00 for linux 1.2.0
+ * Fixed a bug which prevented media change detection for removable
+ * disk drives.
+ *
+ * 23 Feb 1995 rev. 1.18 for linux 1.1.94
+ * Added a check for scsi_register returning NULL.
+ *
+ * 11 Feb 1995 rev. 1.17 for linux 1.1.91
+ * U14F qualified to run with 32 sglists.
+ * Now DEBUG_RESET is disabled by default.
+ *
+ * 9 Feb 1995 rev. 1.16 for linux 1.1.90
+ * Use host->wish_block instead of host->block.
+ *
+ * 8 Feb 1995 rev. 1.15 for linux 1.1.89
+ * Cleared target_time_out counter while performing a reset.
+ *
+ * 28 Jan 1995 rev. 1.14 for linux 1.1.86
+ * Added module support.
+ * Log and do a retry when a disk drive returns a target status
+ * different from zero on a recovered error.
+ * Auto detects if U14F boards have an old firmware revision.
+ * Max number of scatter/gather lists set to 16 for all boards
+ * (most installation run fine using 33 sglists, while other
+ * has problems when using more then 16).
+ *
+ * 16 Jan 1995 rev. 1.13 for linux 1.1.81
+ * Display a message if check_region detects a port address
+ * already in use.
+ *
+ * 15 Dec 1994 rev. 1.12 for linux 1.1.74
+ * The host->block flag is set for all the detected ISA boards.
+ *
+ * 30 Nov 1994 rev. 1.11 for linux 1.1.68
+ * Redo i/o on target status CHECK_CONDITION for TYPE_DISK only.
+ * Added optional support for using a single board at a time.
+ *
+ * 14 Nov 1994 rev. 1.10 for linux 1.1.63
+ *
+ * 28 Oct 1994 rev. 1.09 for linux 1.1.58 Final BETA release.
+ * 16 Jul 1994 rev. 1.00 for linux 1.1.29 Initial ALPHA release.
+ *
+ * This driver is a total replacement of the original UltraStor
+ * scsi driver, but it supports ONLY the 14F and 34F boards.
+ * It can be configured in the same kernel in which the original
+ * ultrastor driver is configured to allow the original U24F
+ * support.
+ *
+ * Multiple U14F and/or U34F host adapters are supported.
+ *
+ * Copyright (C) 1994-1998 Dario Ballabio (dario@milano.europe.dg.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that redistributions of source
+ * code retain the above copyright notice and this comment without
+ * modification.
+ *
+ * WARNING: if your 14/34F board has an old firmware revision (see below)
+ * you must change "#undef" into "#define" in the following
+ * statement.
+ */
+#undef HAVE_OLD_UX4F_FIRMWARE
+/*
+ * The UltraStor 14F, 24F, and 34F are a family of intelligent, high
+ * performance SCSI-2 host adapters.
+ * Here is the scoop on the various models:
+ *
+ * 14F - ISA first-party DMA HA with floppy support and WD1003 emulation.
+ * 24F - EISA Bus Master HA with floppy support and WD1003 emulation.
+ * 34F - VESA Local-Bus Bus Master HA (no WD1003 emulation).
+ *
+ * This code has been tested with up to two U14F boards, using both
+ * firmware 28004-005/38004-004 (BIOS rev. 2.00) and the latest firmware
+ * 28004-006/38004-005 (BIOS rev. 2.01).
+ *
+ * The latest firmware is required in order to get reliable operations when
+ * clustering is enabled. ENABLE_CLUSTERING provides a performance increase
+ * up to 50% on sequential access.
+ *
+ * Since the Scsi_Host_Template structure is shared among all 14F and 34F,
+ * the last setting of use_clustering is in effect for all of these boards.
+ *
+ * Here a sample configuration using two U14F boards:
+ *
+ U14F0: ISA 0x330, BIOS 0xc8000, IRQ 11, DMA 5, SG 32, MB 16, of:n, lc:y, mq:8.
+ U14F1: ISA 0x340, BIOS 0x00000, IRQ 10, DMA 6, SG 32, MB 16, of:n, lc:y, mq:8.
+ *
+ * The boot controller must have its BIOS enabled, while other boards can
+ * have their BIOS disabled, or enabled to an higher address.
+ * Boards are named Ux4F0, Ux4F1..., according to the port address order in
+ * the io_port[] array.
+ *
+ * The following facts are based on real testing results (not on
+ * documentation) on the above U14F board.
+ *
+ * - The U14F board should be jumpered for bus on time less or equal to 7
+ * microseconds, while the default is 11 microseconds. This is order to
+ * get acceptable performance while using floppy drive and hard disk
+ * together. The jumpering for 7 microseconds is: JP13 pin 15-16,
+ * JP14 pin 7-8 and pin 9-10.
+ * The reduction has a little impact on scsi performance.
+ *
+ * - If scsi bus length exceeds 3m., the scsi bus speed needs to be reduced
+ * from 10Mhz to 5Mhz (do this by inserting a jumper on JP13 pin 7-8).
+ *
+ * - If U14F on board firmware is older than 28004-006/38004-005,
+ * the U14F board is unable to provide reliable operations if the scsi
+ * request length exceeds 16Kbyte. When this length is exceeded the
+ * behavior is:
+ * - adapter_status equal 0x96 or 0xa3 or 0x93 or 0x94;
+ * - adapter_status equal 0 and target_status equal 2 on for all targets
+ * in the next operation following the reset.
+ * This sequence takes a long time (>3 seconds), so in the meantime
+ * the SD_TIMEOUT in sd.c could expire giving rise to scsi aborts
+ * (SD_TIMEOUT has been increased from 3 to 6 seconds in 1.1.31).
+ * Because of this I had to DISABLE_CLUSTERING and to work around the
+ * bus reset in the interrupt service routine, returning DID_BUS_BUSY
+ * so that the operations are retried without complains from the scsi.c
+ * code.
+ * Any reset of the scsi bus is going to kill tape operations, since
+ * no retry is allowed for tapes. Bus resets are more likely when the
+ * scsi bus is under heavy load.
+ * Requests using scatter/gather have a maximum length of 16 x 1024 bytes
+ * when DISABLE_CLUSTERING is in effect, but unscattered requests could be
+ * larger than 16Kbyte.
+ *
+ * The new firmware has fixed all the above problems.
+ *
+ * For U34F boards the latest bios prom is 38008-002 (BIOS rev. 2.01),
+ * the latest firmware prom is 28008-006. Older firmware 28008-005 has
+ * problems when using more then 16 scatter/gather lists.
+ *
+ * The list of i/o ports to be probed can be totally replaced by the
+ * boot command line option: "u14-34f=port0,port1,port2,...", where the
+ * port0, port1... arguments are ISA/VESA addresses to be probed.
+ * For example using "u14-34f=0x230,0x340", the driver probes only the two
+ * addresses 0x230 and 0x340 in this order; "u14-34f=0" totally disables
+ * this driver.
+ *
+ * After the optional list of detection probes, other possible command line
+ * options are:
+ *
+ * eh:y use new scsi code (linux 2.2 only);
+ * eh:n use old scsi code;
+ * et:y use disk geometry returned by scsicam_bios_param;
+ * et:n use disk geometry jumpered on the board;
+ * lc:y enables linked commands;
+ * lc:n disables linked commands;
+ * of:y enables old firmware support;
+ * of:n disables old firmware support;
+ * mq:xx set the max queue depth to the value xx (2 <= xx <= 8).
+ *
+ * The default value is: "u14-34f=lc:n,of:n,mq:8,et:n".
+ * An example using the list of detection probes could be:
+ * "u14-34f=0x230,0x340,lc:y,of:n,mq:4,eh:n,et:n".
+ *
+ * When loading as a module, parameters can be specified as well.
+ * The above example would be (use 1 in place of y and 0 in place of n):
+ *
+ * modprobe u14-34f io_port=0x230,0x340 linked_comm=1 have_old_firmware=0 \
+ * max_queue_depth=4 use_new_eh_code=0 ext_tran=0
+ *
+ * ----------------------------------------------------------------------------
+ * In this implementation, linked commands are designed to work with any DISK
+ * or CD-ROM, since this linking has only the intent of clustering (time-wise)
+ * and reordering by elevator sorting commands directed to each device,
+ * without any relation with the actual SCSI protocol between the controller
+ * and the device.
+ * If Q is the queue depth reported at boot time for each device (also named
+ * cmds/lun) and Q > 2, whenever there is already an active command to the
+ * device all other commands to the same device (up to Q-1) are kept waiting
+ * in the elevator sorting queue. When the active command completes, the
+ * commands in this queue are sorted by sector address. The sort is chosen
+ * between increasing or decreasing by minimizing the seek distance between
+ * the sector of the commands just completed and the sector of the first
+ * command in the list to be sorted.
+ * Trivial math assures that the unsorted average seek distance when doing
+ * random seeks over S sectors is S/3.
+ * When (Q-1) requests are uniformly distributed over S sectors, the average
+ * distance between two adjacent requests is S/((Q-1) + 1), so the sorted
+ * average seek distance for (Q-1) random requests over S sectors is S/Q.
+ * The elevator sorting hence divides the seek distance by a factor Q/3.
+ * The above pure geometric remarks are valid in all cases and the
+ * driver effectively reduces the seek distance by the predicted factor
+ * when there are Q concurrent read i/o operations on the device, but this
+ * does not necessarily results in a noticeable performance improvement:
+ * your mileage may vary....
+ *
+ * Note: command reordering inside a batch of queued commands could cause
+ * wrong results only if there is at least one write request and the
+ * intersection (sector-wise) of all requests is not empty.
+ * When the driver detects a batch including overlapping requests
+ * (a really rare event) strict serial (pid) order is enforced.
+ * ----------------------------------------------------------------------------
+ *
+ * The boards are named Ux4F0, Ux4F1,... according to the detection order.
+ *
+ * In order to support multiple ISA boards in a reliable way,
+ * the driver sets host->wish_block = TRUE for all ISA boards.
+ */
+
+#include <linux/version.h>
+
+#define LinuxVersionCode(v, p, s) (((v)<<16)+((p)<<8)+(s))
+#define MAX_INT_PARAM 10
+
+#if defined(MODULE)
+#include <linux/module.h>
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,26)
+MODULE_PARM(io_port, "1-" __MODULE_STRING(MAX_INT_PARAM) "i");
+MODULE_PARM(linked_comm, "i");
+MODULE_PARM(have_old_firmware, "i");
+MODULE_PARM(link_statistics, "i");
+MODULE_PARM(max_queue_depth, "i");
+MODULE_PARM(use_new_eh_code, "i");
+MODULE_PARM(ext_tran, "i");
+MODULE_AUTHOR("Dario Ballabio");
+#endif
+
+#endif
+
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/byteorder.h>
+#include <linux/proc_fs.h>
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include "u14-34f.h"
+#include <linux/stat.h>
+#include <linux/config.h>
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,36)
+#include <linux/init.h>
+#else
+#define __initfunc(A) A
+#define __initdata
+#define __init
+#endif
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,101)
+#include <asm/spinlock.h>
+#define IRQ_FLAGS
+#define IRQ_LOCK
+#define IRQ_LOCK_SAVE
+#define IRQ_UNLOCK
+#define IRQ_UNLOCK_RESTORE
+#define SPIN_FLAGS unsigned long spin_flags;
+#define SPIN_LOCK spin_lock_irq(&io_request_lock);
+#define SPIN_LOCK_SAVE spin_lock_irqsave(&io_request_lock, spin_flags);
+#define SPIN_UNLOCK spin_unlock_irq(&io_request_lock);
+#define SPIN_UNLOCK_RESTORE \
+ spin_unlock_irqrestore(&io_request_lock, spin_flags);
+static int use_new_eh_code = TRUE;
+#else
+#define IRQ_FLAGS unsigned long irq_flags;
+#define IRQ_LOCK cli();
+#define IRQ_LOCK_SAVE do {save_flags(irq_flags); cli();} while (0);
+#define IRQ_UNLOCK sti();
+#define IRQ_UNLOCK_RESTORE do {restore_flags(irq_flags);} while (0);
+#define SPIN_FLAGS
+#define SPIN_LOCK
+#define SPIN_LOCK_SAVE
+#define SPIN_UNLOCK
+#define SPIN_UNLOCK_RESTORE
+static int use_new_eh_code = FALSE;
+#endif
+
+struct proc_dir_entry proc_scsi_u14_34f = {
+ PROC_SCSI_U14_34F, 6, "u14_34f",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+/* Values for the PRODUCT_ID ports for the 14/34F */
+#define PRODUCT_ID1 0x56
+#define PRODUCT_ID2 0x40 /* NOTE: Only upper nibble is used */
+
+/* Subversion values */
+#define ISA 0
+#define ESA 1
+
+#define OP_HOST_ADAPTER 0x1
+#define OP_SCSI 0x2
+#define OP_RESET 0x4
+#define DTD_SCSI 0x0
+#define DTD_IN 0x1
+#define DTD_OUT 0x2
+#define DTD_NONE 0x3
+#define HA_CMD_INQUIRY 0x1
+#define HA_CMD_SELF_DIAG 0x2
+#define HA_CMD_READ_BUFF 0x3
+#define HA_CMD_WRITE_BUFF 0x4
+
+#undef DEBUG_LINKED_COMMANDS
+#undef DEBUG_DETECT
+#undef DEBUG_INTERRUPT
+#undef DEBUG_RESET
+#undef DEBUG_GENERATE_ERRORS
+#undef DEBUG_GENERATE_ABORTS
+#undef DEBUG_GEOMETRY
+
+#define MAX_ISA 3
+#define MAX_VESA 1
+#define MAX_EISA 0
+#define MAX_PCI 0
+#define MAX_BOARDS (MAX_ISA + MAX_VESA + MAX_EISA + MAX_PCI)
+#define MAX_CHANNEL 1
+#define MAX_LUN 8
+#define MAX_TARGET 8
+#define MAX_MAILBOXES 16
+#define MAX_SGLIST 32
+#define MAX_SAFE_SGLIST 16
+#define MAX_INTERNAL_RETRIES 64
+#define MAX_CMD_PER_LUN 2
+#define MAX_TAGGED_CMD_PER_LUN (MAX_MAILBOXES - MAX_CMD_PER_LUN)
+
+#define SKIP ULONG_MAX
+#define FALSE 0
+#define TRUE 1
+#define FREE 0
+#define IN_USE 1
+#define LOCKED 2
+#define IN_RESET 3
+#define IGNORE 4
+#define READY 5
+#define ABORTING 6
+#define NO_DMA 0xff
+#define MAXLOOP 10000
+
+#define REG_LCL_MASK 0
+#define REG_LCL_INTR 1
+#define REG_SYS_MASK 2
+#define REG_SYS_INTR 3
+#define REG_PRODUCT_ID1 4
+#define REG_PRODUCT_ID2 5
+#define REG_CONFIG1 6
+#define REG_CONFIG2 7
+#define REG_OGM 8
+#define REG_ICM 12
+#define REGION_SIZE 13
+#define BSY_ASSERTED 0x01
+#define IRQ_ASSERTED 0x01
+#define CMD_RESET 0xc0
+#define CMD_OGM_INTR 0x01
+#define CMD_CLR_INTR 0x01
+#define CMD_ENA_INTR 0x81
+#define ASOK 0x00
+#define ASST 0x91
+
+#define ARRAY_SIZE(arr) (sizeof (arr) / sizeof (arr)[0])
+#define YESNO(a) ((a) ? 'y' : 'n')
+#define TLDEV(type) ((type) == TYPE_DISK || (type) == TYPE_ROM)
+
+#define PACKED __attribute__((packed))
+
+struct sg_list {
+ unsigned int address; /* Segment Address */
+ unsigned int num_bytes; /* Segment Length */
+ };
+
+/* MailBox SCSI Command Packet */
+struct mscp {
+ unsigned char opcode: 3; /* type of command */
+ unsigned char xdir: 2; /* data transfer direction */
+ unsigned char dcn: 1; /* disable disconnect */
+ unsigned char ca: 1; /* use cache (if available) */
+ unsigned char sg: 1; /* scatter/gather operation */
+ unsigned char target: 3; /* SCSI target id */
+ unsigned char channel: 2; /* SCSI channel number */
+ unsigned char lun: 3; /* SCSI logical unit number */
+ unsigned int data_address PACKED; /* transfer data pointer */
+ unsigned int data_len PACKED; /* length in bytes */
+ unsigned int link_address PACKED; /* for linking command chains */
+ unsigned char clink_id; /* identifies command in chain */
+ unsigned char use_sg; /* (if sg is set) 8 bytes per list */
+ unsigned char sense_len;
+ unsigned char scsi_cdbs_len; /* 6, 10, or 12 */
+ unsigned char scsi_cdbs[12]; /* SCSI commands */
+ unsigned char adapter_status; /* non-zero indicates HA error */
+ unsigned char target_status; /* non-zero indicates target error */
+ unsigned int sense_addr PACKED;
+ Scsi_Cmnd *SCpnt;
+ unsigned int index; /* cp index */
+ struct sg_list *sglist;
+ };
+
+struct hostdata {
+ struct mscp cp[MAX_MAILBOXES]; /* Mailboxes for this board */
+ unsigned int cp_stat[MAX_MAILBOXES]; /* FREE, IN_USE, LOCKED, IN_RESET */
+ unsigned int last_cp_used; /* Index of last mailbox used */
+ unsigned int iocount; /* Total i/o done for this board */
+ int board_number; /* Number of this board */
+ char board_name[16]; /* Name of this board */
+ char board_id[256]; /* data from INQUIRY on this board */
+ int in_reset; /* True if board is doing a reset */
+ int target_to[MAX_TARGET][MAX_CHANNEL]; /* N. of timeout errors on target */
+ int target_redo[MAX_TARGET][MAX_CHANNEL]; /* If TRUE redo i/o on target */
+ unsigned int retries; /* Number of internal retries */
+ unsigned long last_retried_pid; /* Pid of last retried command */
+ unsigned char subversion; /* Bus type, either ISA or ESA */
+ unsigned char heads;
+ unsigned char sectors;
+
+ /* slot != 0 for the U24F, slot == 0 for both the U14F and U34F */
+ unsigned char slot;
+ };
+
+static struct Scsi_Host *sh[MAX_BOARDS + 1];
+static const char *driver_name = "Ux4F";
+static char sha[MAX_BOARDS];
+
+/* Initialize num_boards so that ihdlr can work while detect is in progress */
+static unsigned int num_boards = MAX_BOARDS;
+
+static unsigned long io_port[] __initdata = {
+
+ /* Space for MAX_INT_PARAM ports usable while loading as a module */
+ SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP,
+ SKIP, SKIP,
+
+ /* Possible ISA/VESA ports */
+ 0x330, 0x340, 0x230, 0x240, 0x210, 0x130, 0x140,
+
+ /* End of list */
+ 0x0
+ };
+
+#define HD(board) ((struct hostdata *) &sh[board]->hostdata)
+#define BN(board) (HD(board)->board_name)
+
+#define SWAP_BYTE(x) ((unsigned long)( \
+ (((unsigned long)(x) & 0x000000ffU) << 24) | \
+ (((unsigned long)(x) & 0x0000ff00U) << 8) | \
+ (((unsigned long)(x) & 0x00ff0000U) >> 8) | \
+ (((unsigned long)(x) & 0xff000000U) >> 24)))
+
+#if defined(__BIG_ENDIAN)
+#define H2DEV(x) SWAP_BYTE(x)
+#else
+#define H2DEV(x) (x)
+#endif
+
+#define DEV2H(x) H2DEV(x)
+#define V2DEV(addr) ((addr) ? H2DEV(virt_to_bus((void *)addr)) : 0)
+#define DEV2V(addr) ((addr) ? DEV2H(bus_to_virt((unsigned long)addr)) : 0)
+
+static void do_interrupt_handler(int, void *, struct pt_regs *);
+static void flush_dev(Scsi_Device *, unsigned long, unsigned int, unsigned int);
+static int do_trace = FALSE;
+static int setup_done = FALSE;
+static int link_statistics = 0;
+static int ext_tran = FALSE;
+
+#if defined(HAVE_OLD_UX4F_FIRMWARE)
+static int have_old_firmware = TRUE;
+#else
+static int have_old_firmware = FALSE;
+#endif
+
+#if defined(CONFIG_SCSI_U14_34F_LINKED_COMMANDS)
+static int linked_comm = TRUE;
+#else
+static int linked_comm = FALSE;
+#endif
+
+#if defined(CONFIG_SCSI_U14_34F_MAX_TAGS)
+static int max_queue_depth = CONFIG_SCSI_U14_34F_MAX_TAGS;
+#else
+static int max_queue_depth = MAX_CMD_PER_LUN;
+#endif
+
+static void select_queue_depths(struct Scsi_Host *host, Scsi_Device *devlist) {
+ Scsi_Device *dev;
+ int j, ntag = 0, nuntag = 0, tqd, utqd;
+ IRQ_FLAGS
+
+ IRQ_LOCK_SAVE
+ j = ((struct hostdata *) host->hostdata)->board_number;
+
+ for(dev = devlist; dev; dev = dev->next) {
+
+ if (dev->host != host) continue;
+
+ if (TLDEV(dev->type) && (dev->tagged_supported || linked_comm))
+ ntag++;
+ else
+ nuntag++;
+ }
+
+ utqd = MAX_CMD_PER_LUN;
+
+ tqd = (host->can_queue - utqd * nuntag) / (ntag ? ntag : 1);
+
+ if (tqd > max_queue_depth) tqd = max_queue_depth;
+
+ if (tqd < MAX_CMD_PER_LUN) tqd = MAX_CMD_PER_LUN;
+
+ for(dev = devlist; dev; dev = dev->next) {
+ char *tag_suffix = "", *link_suffix = "";
+
+ if (dev->host != host) continue;
+
+ if (TLDEV(dev->type) && (dev->tagged_supported || linked_comm))
+ dev->queue_depth = tqd;
+ else
+ dev->queue_depth = utqd;
+
+ if (TLDEV(dev->type)) {
+ if (linked_comm && dev->queue_depth > 2)
+ link_suffix = ", sorted";
+ else
+ link_suffix = ", unsorted";
+ }
+
+ if (dev->tagged_supported && TLDEV(dev->type) && dev->tagged_queue)
+ tag_suffix = ", tagged";
+ else if (dev->tagged_supported && TLDEV(dev->type))
+ tag_suffix = ", untagged";
+
+ printk("%s: scsi%d, channel %d, id %d, lun %d, cmds/lun %d%s%s.\n",
+ BN(j), host->host_no, dev->channel, dev->id, dev->lun,
+ dev->queue_depth, link_suffix, tag_suffix);
+ }
+
+ IRQ_UNLOCK_RESTORE
+ return;
+}
+
+static inline int wait_on_busy(unsigned long iobase, unsigned int loop) {
+
+ while (inb(iobase + REG_LCL_INTR) & BSY_ASSERTED) {
+ udelay(1L);
+ if (--loop == 0) return TRUE;
+ }
+
+ return FALSE;
+}
+
+static int board_inquiry(unsigned int j) {
+ struct mscp *cpp;
+ unsigned int time, limit = 0;
+
+ cpp = &HD(j)->cp[0];
+ memset(cpp, 0, sizeof(struct mscp));
+ cpp->opcode = OP_HOST_ADAPTER;
+ cpp->xdir = DTD_IN;
+ cpp->data_address = V2DEV(HD(j)->board_id);
+ cpp->data_len = H2DEV(sizeof(HD(j)->board_id));
+ cpp->scsi_cdbs_len = 6;
+ cpp->scsi_cdbs[0] = HA_CMD_INQUIRY;
+
+ if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
+ printk("%s: board_inquiry, adapter busy.\n", BN(j));
+ return TRUE;
+ }
+
+ HD(j)->cp_stat[0] = IGNORE;
+
+ /* Clear the interrupt indication */
+ outb(CMD_CLR_INTR, sh[j]->io_port + REG_SYS_INTR);
+
+ /* Store pointer in OGM address bytes */
+ outl(V2DEV(cpp), sh[j]->io_port + REG_OGM);
+
+ /* Issue OGM interrupt */
+ outb(CMD_OGM_INTR, sh[j]->io_port + REG_LCL_INTR);
+
+ SPIN_UNLOCK
+ IRQ_UNLOCK
+ time = jiffies;
+ while ((jiffies - time) < HZ && limit++ < 20000) udelay(100L);
+ IRQ_LOCK
+ SPIN_LOCK
+
+ if (cpp->adapter_status || HD(j)->cp_stat[0] != FREE) {
+ HD(j)->cp_stat[0] = FREE;
+ printk("%s: board_inquiry, err 0x%x.\n", BN(j), cpp->adapter_status);
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+__initfunc (static inline int port_detect \
+ (unsigned long port_base, unsigned int j, Scsi_Host_Template *tpnt)) {
+ unsigned char irq, dma_channel, subversion, i;
+ unsigned char in_byte;
+ char *bus_type, dma_name[16];
+
+ /* Allowed BIOS base addresses (NULL indicates reserved) */
+ void *bios_segment_table[8] = {
+ NULL,
+ (void *) 0xc4000, (void *) 0xc8000, (void *) 0xcc000, (void *) 0xd0000,
+ (void *) 0xd4000, (void *) 0xd8000, (void *) 0xdc000
+ };
+
+ /* Allowed IRQs */
+ unsigned char interrupt_table[4] = { 15, 14, 11, 10 };
+
+ /* Allowed DMA channels for ISA (0 indicates reserved) */
+ unsigned char dma_channel_table[4] = { 5, 6, 7, 0 };
+
+ /* Head/sector mappings */
+ struct {
+ unsigned char heads;
+ unsigned char sectors;
+ } mapping_table[4] = {
+ { 16, 63 }, { 64, 32 }, { 64, 63 }, { 64, 32 }
+ };
+
+ struct config_1 {
+ unsigned char bios_segment: 3;
+ unsigned char removable_disks_as_fixed: 1;
+ unsigned char interrupt: 2;
+ unsigned char dma_channel: 2;
+ } config_1;
+
+ struct config_2 {
+ unsigned char ha_scsi_id: 3;
+ unsigned char mapping_mode: 2;
+ unsigned char bios_drive_number: 1;
+ unsigned char tfr_port: 2;
+ } config_2;
+
+ char name[16];
+
+ sprintf(name, "%s%d", driver_name, j);
+
+ if(check_region(port_base, REGION_SIZE)) {
+ printk("%s: address 0x%03lx in use, skipping probe.\n", name, port_base);
+ return FALSE;
+ }
+
+ if (inb(port_base + REG_PRODUCT_ID1) != PRODUCT_ID1) return FALSE;
+
+ in_byte = inb(port_base + REG_PRODUCT_ID2);
+
+ if ((in_byte & 0xf0) != PRODUCT_ID2) return FALSE;
+
+ *(char *)&config_1 = inb(port_base + REG_CONFIG1);
+ *(char *)&config_2 = inb(port_base + REG_CONFIG2);
+
+ irq = interrupt_table[config_1.interrupt];
+ dma_channel = dma_channel_table[config_1.dma_channel];
+ subversion = (in_byte & 0x0f);
+
+ /* Board detected, allocate its IRQ */
+ if (request_irq(irq, do_interrupt_handler,
+ SA_INTERRUPT | ((subversion == ESA) ? SA_SHIRQ : 0),
+ driver_name, (void *) &sha[j])) {
+ printk("%s: unable to allocate IRQ %u, detaching.\n", name, irq);
+ return FALSE;
+ }
+
+ if (subversion == ISA && request_dma(dma_channel, driver_name)) {
+ printk("%s: unable to allocate DMA channel %u, detaching.\n",
+ name, dma_channel);
+ free_irq(irq, &sha[j]);
+ return FALSE;
+ }
+
+ if (have_old_firmware) tpnt->use_clustering = DISABLE_CLUSTERING;
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,101)
+ tpnt->use_new_eh_code = use_new_eh_code;
+#else
+ use_new_eh_code = FALSE;
+#endif
+
+ sh[j] = scsi_register(tpnt, sizeof(struct hostdata));
+
+ if (sh[j] == NULL) {
+ printk("%s: unable to register host, detaching.\n", name);
+
+ free_irq(irq, &sha[j]);
+
+ if (subversion == ISA) free_dma(dma_channel);
+
+ return FALSE;
+ }
+
+ sh[j]->io_port = port_base;
+ sh[j]->unique_id = port_base;
+ sh[j]->n_io_port = REGION_SIZE;
+ sh[j]->base = bios_segment_table[config_1.bios_segment];
+ sh[j]->irq = irq;
+ sh[j]->sg_tablesize = MAX_SGLIST;
+ sh[j]->this_id = config_2.ha_scsi_id;
+ sh[j]->can_queue = MAX_MAILBOXES;
+ sh[j]->cmd_per_lun = MAX_CMD_PER_LUN;
+ sh[j]->select_queue_depths = select_queue_depths;
+
+#if defined(DEBUG_DETECT)
+ {
+ unsigned char sys_mask, lcl_mask;
+
+ sys_mask = inb(sh[j]->io_port + REG_SYS_MASK);
+ lcl_mask = inb(sh[j]->io_port + REG_LCL_MASK);
+ printk("SYS_MASK 0x%x, LCL_MASK 0x%x.\n", sys_mask, lcl_mask);
+ }
+#endif
+
+ /* Probably a bogus host scsi id, set it to the dummy value */
+ if (sh[j]->this_id == 0) sh[j]->this_id = -1;
+
+ /* If BIOS is disabled, force enable interrupts */
+ if (sh[j]->base == 0) outb(CMD_ENA_INTR, sh[j]->io_port + REG_SYS_MASK);
+
+ /* Register the I/O space that we use */
+ request_region(sh[j]->io_port, sh[j]->n_io_port, driver_name);
+
+ memset(HD(j), 0, sizeof(struct hostdata));
+ HD(j)->heads = mapping_table[config_2.mapping_mode].heads;
+ HD(j)->sectors = mapping_table[config_2.mapping_mode].sectors;
+ HD(j)->subversion = subversion;
+ HD(j)->board_number = j;
+
+ if (have_old_firmware) sh[j]->sg_tablesize = MAX_SAFE_SGLIST;
+
+ if (HD(j)->subversion == ESA) {
+ sh[j]->unchecked_isa_dma = FALSE;
+ sh[j]->dma_channel = NO_DMA;
+ sprintf(BN(j), "U34F%d", j);
+ bus_type = "VESA";
+ }
+ else {
+ sh[j]->wish_block = TRUE;
+ sh[j]->unchecked_isa_dma = TRUE;
+ disable_dma(dma_channel);
+ clear_dma_ff(dma_channel);
+ set_dma_mode(dma_channel, DMA_MODE_CASCADE);
+ enable_dma(dma_channel);
+ sh[j]->dma_channel = dma_channel;
+ sprintf(BN(j), "U14F%d", j);
+ bus_type = "ISA";
+ }
+
+ sh[j]->max_channel = MAX_CHANNEL - 1;
+ sh[j]->max_id = MAX_TARGET;
+ sh[j]->max_lun = MAX_LUN;
+
+ if (HD(j)->subversion == ISA && !board_inquiry(j)) {
+ HD(j)->board_id[40] = 0;
+
+ if (strcmp(&HD(j)->board_id[32], "06000600")) {
+ printk("%s: %s.\n", BN(j), &HD(j)->board_id[8]);
+ printk("%s: firmware %s is outdated, FW PROM should be 28004-006.\n",
+ BN(j), &HD(j)->board_id[32]);
+ sh[j]->hostt->use_clustering = DISABLE_CLUSTERING;
+ sh[j]->sg_tablesize = MAX_SAFE_SGLIST;
+ }
+ }
+
+ if (dma_channel == NO_DMA) sprintf(dma_name, "%s", "BMST");
+ else sprintf(dma_name, "DMA %u", dma_channel);
+
+ for (i = 0; i < sh[j]->can_queue; i++)
+ if (! ((&HD(j)->cp[i])->sglist = kmalloc(
+ sh[j]->sg_tablesize * sizeof(struct sg_list),
+ (sh[j]->unchecked_isa_dma ? GFP_DMA : 0) | GFP_ATOMIC))) {
+ printk("%s: kmalloc SGlist failed, mbox %d, detaching.\n", BN(j), i);
+ u14_34f_release(sh[j]);
+ return FALSE;
+ }
+
+ if (max_queue_depth > MAX_TAGGED_CMD_PER_LUN)
+ max_queue_depth = MAX_TAGGED_CMD_PER_LUN;
+
+ if (max_queue_depth < MAX_CMD_PER_LUN) max_queue_depth = MAX_CMD_PER_LUN;
+
+ if (j == 0) {
+ printk("UltraStor 14F/34F: Copyright (C) 1994-1998 Dario Ballabio.\n");
+ printk("%s config options -> of:%c, lc:%c, mq:%d, eh:%c, et:%c.\n",
+ driver_name, YESNO(have_old_firmware), YESNO(linked_comm),
+ max_queue_depth, YESNO(use_new_eh_code), YESNO(ext_tran));
+ }
+
+ printk("%s: %s 0x%03lx, BIOS 0x%05x, IRQ %u, %s, SG %d, MB %d.\n",
+ BN(j), bus_type, (unsigned long)sh[j]->io_port, (int)sh[j]->base,
+ sh[j]->irq, dma_name, sh[j]->sg_tablesize, sh[j]->can_queue);
+
+ if (sh[j]->max_id > 8 || sh[j]->max_lun > 8)
+ printk("%s: wide SCSI support enabled, max_id %u, max_lun %u.\n",
+ BN(j), sh[j]->max_id, sh[j]->max_lun);
+
+ for (i = 0; i <= sh[j]->max_channel; i++)
+ printk("%s: SCSI channel %u enabled, host target ID %d.\n",
+ BN(j), i, sh[j]->this_id);
+
+ return TRUE;
+}
+
+__initfunc (void u14_34f_setup(char *str, int *ints)) {
+ int i, argc = ints[0];
+ char *cur = str, *pc;
+
+ if (argc > 0) {
+
+ if (argc > MAX_INT_PARAM) argc = MAX_INT_PARAM;
+
+ for (i = 0; i < argc; i++) io_port[i] = ints[i + 1];
+
+ io_port[i] = 0;
+ setup_done = TRUE;
+ }
+
+ while (cur && (pc = strchr(cur, ':'))) {
+ int val = 0, c = *++pc;
+
+ if (c == 'n' || c == 'N') val = FALSE;
+ else if (c == 'y' || c == 'Y') val = TRUE;
+ else val = (int) simple_strtoul(pc, NULL, 0);
+
+ if (!strncmp(cur, "lc:", 3)) linked_comm = val;
+ else if (!strncmp(cur, "of:", 3)) have_old_firmware = val;
+ else if (!strncmp(cur, "mq:", 3)) max_queue_depth = val;
+ else if (!strncmp(cur, "ls:", 3)) link_statistics = val;
+ else if (!strncmp(cur, "eh:", 3)) use_new_eh_code = val;
+ else if (!strncmp(cur, "et:", 3)) ext_tran = val;
+
+ if ((cur = strchr(cur, ','))) ++cur;
+ }
+
+ return;
+}
+
+__initfunc (int u14_34f_detect(Scsi_Host_Template *tpnt)) {
+ unsigned int j = 0, k;
+ IRQ_FLAGS
+
+ IRQ_LOCK_SAVE
+ tpnt->proc_dir = &proc_scsi_u14_34f;
+
+#if defined(MODULE)
+ /* io_port could have been modified when loading as a module */
+ if(io_port[0] != SKIP) {
+ setup_done = TRUE;
+ io_port[MAX_INT_PARAM] = 0;
+ }
+#endif
+
+ for (k = 0; k < MAX_BOARDS + 1; k++) sh[k] = NULL;
+
+ for (k = 0; io_port[k]; k++) {
+
+ if (io_port[k] == SKIP) continue;
+
+ if (j < MAX_BOARDS && port_detect(io_port[k], j, tpnt)) j++;
+ }
+
+ num_boards = j;
+ IRQ_UNLOCK_RESTORE
+ return j;
+}
+
+static inline void build_sg_list(struct mscp *cpp, Scsi_Cmnd *SCpnt) {
+ unsigned int k, data_len = 0;
+ struct scatterlist *sgpnt;
+
+ sgpnt = (struct scatterlist *) SCpnt->request_buffer;
+
+ for (k = 0; k < SCpnt->use_sg; k++) {
+ cpp->sglist[k].address = V2DEV(sgpnt[k].address);
+ cpp->sglist[k].num_bytes = H2DEV(sgpnt[k].length);
+ data_len += sgpnt[k].length;
+ }
+
+ cpp->use_sg = SCpnt->use_sg;
+ cpp->data_address = V2DEV(cpp->sglist);
+ cpp->data_len = H2DEV(data_len);
+}
+
+static inline int do_qcomm(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) {
+ unsigned int i, j, k;
+ struct mscp *cpp;
+
+ static const unsigned char data_out_cmds[] = {
+ 0x0a, 0x2a, 0x15, 0x55, 0x04, 0x07, 0x18, 0x1d, 0x24, 0x2e,
+ 0x30, 0x31, 0x32, 0x38, 0x39, 0x3a, 0x3b, 0x3d, 0x3f, 0x40,
+ 0x41, 0x4c, 0xaa, 0xae, 0xb0, 0xb1, 0xb2, 0xb6, 0xea, 0x1b
+ };
+
+ static const unsigned char data_none_cmds[] = {
+ 0x01, 0x0b, 0x10, 0x11, 0x13, 0x16, 0x17, 0x19, 0x2b, 0x1e,
+ 0x2c, 0xac, 0x2f, 0xaf, 0x33, 0xb3, 0x35, 0x36, 0x45, 0x47,
+ 0x48, 0x49, 0xa9, 0x4b, 0xa5, 0xa6, 0xb5
+ };
+
+ /* j is the board number */
+ j = ((struct hostdata *) SCpnt->host->hostdata)->board_number;
+
+ if (SCpnt->host_scribble)
+ panic("%s: qcomm, pid %ld, SCpnt %p already active.\n",
+ BN(j), SCpnt->pid, SCpnt);
+
+ /* i is the mailbox number, look for the first free mailbox
+ starting from last_cp_used */
+ i = HD(j)->last_cp_used + 1;
+
+ for (k = 0; k < sh[j]->can_queue; k++, i++) {
+
+ if (i >= sh[j]->can_queue) i = 0;
+
+ if (HD(j)->cp_stat[i] == FREE) {
+ HD(j)->last_cp_used = i;
+ break;
+ }
+ }
+
+ if (k == sh[j]->can_queue) {
+ printk("%s: qcomm, no free mailbox.\n", BN(j));
+ return 1;
+ }
+
+ /* Set pointer to control packet structure */
+ cpp = &HD(j)->cp[i];
+
+ memset(cpp, 0, sizeof(struct mscp) - sizeof(struct sg_list *));
+ SCpnt->scsi_done = done;
+ cpp->index = i;
+ SCpnt->host_scribble = (unsigned char *) &cpp->index;
+
+ if (do_trace) printk("%s: qcomm, mbox %d, target %d.%d:%d, pid %ld.\n",
+ BN(j), i, SCpnt->channel, SCpnt->target,
+ SCpnt->lun, SCpnt->pid);
+
+ cpp->xdir = DTD_IN;
+
+ for (k = 0; k < ARRAY_SIZE(data_out_cmds); k++)
+ if (SCpnt->cmnd[0] == data_out_cmds[k]) {
+ cpp->xdir = DTD_OUT;
+ break;
+ }
+
+ if (cpp->xdir == DTD_IN)
+ for (k = 0; k < ARRAY_SIZE(data_none_cmds); k++)
+ if (SCpnt->cmnd[0] == data_none_cmds[k]) {
+ cpp->xdir = DTD_NONE;
+ break;
+ }
+
+ cpp->opcode = OP_SCSI;
+ cpp->channel = SCpnt->channel;
+ cpp->target = SCpnt->target;
+ cpp->lun = SCpnt->lun;
+ cpp->SCpnt = SCpnt;
+ cpp->sense_addr = V2DEV(SCpnt->sense_buffer);
+ cpp->sense_len = sizeof SCpnt->sense_buffer;
+
+ if (SCpnt->use_sg) {
+ cpp->sg = TRUE;
+ build_sg_list(cpp, SCpnt);
+ }
+ else {
+ cpp->data_address = V2DEV(SCpnt->request_buffer);
+ cpp->data_len = H2DEV(SCpnt->request_bufflen);
+ }
+
+ cpp->scsi_cdbs_len = SCpnt->cmd_len;
+ memcpy(cpp->scsi_cdbs, SCpnt->cmnd, cpp->scsi_cdbs_len);
+
+ if (linked_comm && SCpnt->device->queue_depth > 2
+ && TLDEV(SCpnt->device->type)) {
+ HD(j)->cp_stat[i] = READY;
+ flush_dev(SCpnt->device, SCpnt->request.sector, j, FALSE);
+ return 0;
+ }
+
+ if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
+ SCpnt->host_scribble = NULL;
+ printk("%s: qcomm, target %d.%d:%d, pid %ld, adapter busy.\n",
+ BN(j), SCpnt->channel, SCpnt->target, SCpnt->lun, SCpnt->pid);
+ return 1;
+ }
+
+ /* Store pointer in OGM address bytes */
+ outl(V2DEV(cpp), sh[j]->io_port + REG_OGM);
+
+ /* Issue OGM interrupt */
+ outb(CMD_OGM_INTR, sh[j]->io_port + REG_LCL_INTR);
+
+ HD(j)->cp_stat[i] = IN_USE;
+ return 0;
+}
+
+int u14_34f_queuecommand(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) {
+ int rtn;
+ IRQ_FLAGS
+
+ IRQ_LOCK_SAVE
+ rtn = do_qcomm(SCpnt, done);
+ IRQ_UNLOCK_RESTORE
+ return rtn;
+}
+
+static inline int do_old_abort(Scsi_Cmnd *SCarg) {
+ unsigned int i, j;
+
+ j = ((struct hostdata *) SCarg->host->hostdata)->board_number;
+
+ if (SCarg->host_scribble == NULL ||
+ (SCarg->serial_number_at_timeout &&
+ (SCarg->serial_number != SCarg->serial_number_at_timeout))) {
+ printk("%s: abort, target %d.%d:%d, pid %ld inactive.\n",
+ BN(j), SCarg->channel, SCarg->target, SCarg->lun, SCarg->pid);
+ return SCSI_ABORT_NOT_RUNNING;
+ }
+
+ i = *(unsigned int *)SCarg->host_scribble;
+ printk("%s: abort, mbox %d, target %d.%d:%d, pid %ld.\n",
+ BN(j), i, SCarg->channel, SCarg->target, SCarg->lun, SCarg->pid);
+
+ if (i >= sh[j]->can_queue)
+ panic("%s: abort, invalid SCarg->host_scribble.\n", BN(j));
+
+ if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
+ printk("%s: abort, timeout error.\n", BN(j));
+ return SCSI_ABORT_ERROR;
+ }
+
+ if (HD(j)->cp_stat[i] == FREE) {
+ printk("%s: abort, mbox %d is free.\n", BN(j), i);
+ return SCSI_ABORT_NOT_RUNNING;
+ }
+
+ if (HD(j)->cp_stat[i] == IN_USE) {
+ printk("%s: abort, mbox %d is in use.\n", BN(j), i);
+
+ if (SCarg != HD(j)->cp[i].SCpnt)
+ panic("%s: abort, mbox %d, SCarg %p, cp SCpnt %p.\n",
+ BN(j), i, SCarg, HD(j)->cp[i].SCpnt);
+
+ if (inb(sh[j]->io_port + REG_SYS_INTR) & IRQ_ASSERTED)
+ printk("%s: abort, mbox %d, interrupt pending.\n", BN(j), i);
+
+ return SCSI_ABORT_SNOOZE;
+ }
+
+ if (HD(j)->cp_stat[i] == IN_RESET) {
+ printk("%s: abort, mbox %d is in reset.\n", BN(j), i);
+ return SCSI_ABORT_ERROR;
+ }
+
+ if (HD(j)->cp_stat[i] == LOCKED) {
+ printk("%s: abort, mbox %d is locked.\n", BN(j), i);
+ return SCSI_ABORT_NOT_RUNNING;
+ }
+
+ if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) {
+ SCarg->result = DID_ABORT << 16;
+ SCarg->host_scribble = NULL;
+ HD(j)->cp_stat[i] = FREE;
+ printk("%s, abort, mbox %d ready, DID_ABORT, pid %ld done.\n",
+ BN(j), i, SCarg->pid);
+ SCarg->scsi_done(SCarg);
+ return SCSI_ABORT_SUCCESS;
+ }
+
+ panic("%s: abort, mbox %d, invalid cp_stat.\n", BN(j), i);
+}
+
+int u14_34f_old_abort(Scsi_Cmnd *SCarg) {
+ int rtn;
+ IRQ_FLAGS
+
+ IRQ_LOCK_SAVE
+ rtn = do_old_abort(SCarg);
+ IRQ_UNLOCK_RESTORE
+ return rtn;
+}
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,101)
+
+static inline int do_abort(Scsi_Cmnd *SCarg) {
+ unsigned int i, j;
+
+ j = ((struct hostdata *) SCarg->host->hostdata)->board_number;
+
+ if (SCarg->host_scribble == NULL) {
+ printk("%s: abort, target %d.%d:%d, pid %ld inactive.\n",
+ BN(j), SCarg->channel, SCarg->target, SCarg->lun, SCarg->pid);
+ return SUCCESS;
+ }
+
+ i = *(unsigned int *)SCarg->host_scribble;
+ printk("%s: abort, mbox %d, target %d.%d:%d, pid %ld.\n",
+ BN(j), i, SCarg->channel, SCarg->target, SCarg->lun, SCarg->pid);
+
+ if (i >= sh[j]->can_queue)
+ panic("%s: abort, invalid SCarg->host_scribble.\n", BN(j));
+
+ if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
+ printk("%s: abort, timeout error.\n", BN(j));
+ return FAILED;
+ }
+
+ if (HD(j)->cp_stat[i] == FREE) {
+ printk("%s: abort, mbox %d is free.\n", BN(j), i);
+ return SUCCESS;
+ }
+
+ if (HD(j)->cp_stat[i] == IN_USE) {
+ printk("%s: abort, mbox %d is in use.\n", BN(j), i);
+
+ if (SCarg != HD(j)->cp[i].SCpnt)
+ panic("%s: abort, mbox %d, SCarg %p, cp SCpnt %p.\n",
+ BN(j), i, SCarg, HD(j)->cp[i].SCpnt);
+
+ if (inb(sh[j]->io_port + REG_SYS_INTR) & IRQ_ASSERTED)
+ printk("%s: abort, mbox %d, interrupt pending.\n", BN(j), i);
+
+ if (SCarg->eh_state == SCSI_STATE_TIMEOUT) {
+ SCarg->host_scribble = NULL;
+ HD(j)->cp_stat[i] = FREE;
+ printk("%s, abort, mbox %d, eh_state timeout, pid %ld.\n",
+ BN(j), i, SCarg->pid);
+ return SUCCESS;
+ }
+
+ return FAILED;
+ }
+
+ if (HD(j)->cp_stat[i] == IN_RESET) {
+ printk("%s: abort, mbox %d is in reset.\n", BN(j), i);
+ return FAILED;
+ }
+
+ if (HD(j)->cp_stat[i] == LOCKED) {
+ printk("%s: abort, mbox %d is locked.\n", BN(j), i);
+ return SUCCESS;
+ }
+
+ if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) {
+ SCarg->result = DID_ABORT << 16;
+ SCarg->host_scribble = NULL;
+ HD(j)->cp_stat[i] = FREE;
+ printk("%s, abort, mbox %d ready, DID_ABORT, pid %ld done.\n",
+ BN(j), i, SCarg->pid);
+ SCarg->scsi_done(SCarg);
+ return SUCCESS;
+ }
+
+ panic("%s: abort, mbox %d, invalid cp_stat.\n", BN(j), i);
+}
+
+int u14_34f_abort(Scsi_Cmnd *SCarg) {
+
+ return do_abort(SCarg);
+}
+
+#endif /* new_eh_code */
+
+static inline int do_old_reset(Scsi_Cmnd *SCarg) {
+ unsigned int i, j, time, k, c, limit = 0;
+ int arg_done = FALSE;
+ Scsi_Cmnd *SCpnt;
+
+ j = ((struct hostdata *) SCarg->host->hostdata)->board_number;
+ printk("%s: reset, enter, target %d.%d:%d, pid %ld.\n",
+ BN(j), SCarg->channel, SCarg->target, SCarg->lun, SCarg->pid);
+
+ if (SCarg->host_scribble == NULL)
+ printk("%s: reset, pid %ld inactive.\n", BN(j), SCarg->pid);
+
+ if (SCarg->serial_number_at_timeout &&
+ (SCarg->serial_number != SCarg->serial_number_at_timeout)) {
+ printk("%s: reset, pid %ld, reset not running.\n", BN(j), SCarg->pid);
+ return SCSI_RESET_NOT_RUNNING;
+ }
+
+ if (HD(j)->in_reset) {
+ printk("%s: reset, exit, already in reset.\n", BN(j));
+ return SCSI_RESET_ERROR;
+ }
+
+ if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
+ printk("%s: reset, exit, timeout error.\n", BN(j));
+ return SCSI_RESET_ERROR;
+ }
+
+ HD(j)->retries = 0;
+
+ for (c = 0; c <= sh[j]->max_channel; c++)
+ for (k = 0; k < sh[j]->max_id; k++) {
+ HD(j)->target_redo[k][c] = TRUE;
+ HD(j)->target_to[k][c] = 0;
+ }
+
+ for (i = 0; i < sh[j]->can_queue; i++) {
+
+ if (HD(j)->cp_stat[i] == FREE) continue;
+
+ if (HD(j)->cp_stat[i] == LOCKED) {
+ HD(j)->cp_stat[i] = FREE;
+ printk("%s: reset, locked mbox %d forced free.\n", BN(j), i);
+ continue;
+ }
+
+ if (!(SCpnt = HD(j)->cp[i].SCpnt))
+ panic("%s: reset, mbox %d, SCpnt == NULL.\n", BN(j), i);
+
+ if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) {
+ HD(j)->cp_stat[i] = ABORTING;
+ printk("%s: reset, mbox %d aborting, pid %ld.\n",
+ BN(j), i, SCpnt->pid);
+ }
+
+ else {
+ HD(j)->cp_stat[i] = IN_RESET;
+ printk("%s: reset, mbox %d in reset, pid %ld.\n",
+ BN(j), i, SCpnt->pid);
+ }
+
+ if (SCpnt->host_scribble == NULL)
+ panic("%s: reset, mbox %d, garbled SCpnt.\n", BN(j), i);
+
+ if (*(unsigned int *)SCpnt->host_scribble != i)
+ panic("%s: reset, mbox %d, index mismatch.\n", BN(j), i);
+
+ if (SCpnt->scsi_done == NULL)
+ panic("%s: reset, mbox %d, SCpnt->scsi_done == NULL.\n", BN(j), i);
+
+ if (SCpnt == SCarg) arg_done = TRUE;
+ }
+
+ if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
+ printk("%s: reset, cannot reset, timeout error.\n", BN(j));
+ return SCSI_RESET_ERROR;
+ }
+
+ outb(CMD_RESET, sh[j]->io_port + REG_LCL_INTR);
+ printk("%s: reset, board reset done, enabling interrupts.\n", BN(j));
+
+#if defined(DEBUG_RESET)
+ do_trace = TRUE;
+#endif
+
+ HD(j)->in_reset = TRUE;
+ SPIN_UNLOCK
+ IRQ_UNLOCK
+ time = jiffies;
+ while ((jiffies - time) < (10 * HZ) && limit++ < 200000) udelay(100L);
+ IRQ_LOCK
+ SPIN_LOCK
+ printk("%s: reset, interrupts disabled, loops %d.\n", BN(j), limit);
+
+ for (i = 0; i < sh[j]->can_queue; i++) {
+
+ if (HD(j)->cp_stat[i] == IN_RESET) {
+ SCpnt = HD(j)->cp[i].SCpnt;
+ SCpnt->result = DID_RESET << 16;
+ SCpnt->host_scribble = NULL;
+
+ /* This mailbox is still waiting for its interrupt */
+ HD(j)->cp_stat[i] = LOCKED;
+
+ printk("%s, reset, mbox %d locked, DID_RESET, pid %ld done.\n",
+ BN(j), i, SCpnt->pid);
+ }
+
+ else if (HD(j)->cp_stat[i] == ABORTING) {
+ SCpnt = HD(j)->cp[i].SCpnt;
+ SCpnt->result = DID_RESET << 16;
+ SCpnt->host_scribble = NULL;
+
+ /* This mailbox was never queued to the adapter */
+ HD(j)->cp_stat[i] = FREE;
+
+ printk("%s, reset, mbox %d aborting, DID_RESET, pid %ld done.\n",
+ BN(j), i, SCpnt->pid);
+ }
+
+ else
+
+ /* Any other mailbox has already been set free by interrupt */
+ continue;
+
+ SCpnt->scsi_done(SCpnt);
+ IRQ_LOCK
+ }
+
+ HD(j)->in_reset = FALSE;
+ do_trace = FALSE;
+
+ if (arg_done) {
+ printk("%s: reset, exit, success.\n", BN(j));
+ return SCSI_RESET_SUCCESS;
+ }
+ else {
+ printk("%s: reset, exit, wakeup.\n", BN(j));
+ return SCSI_RESET_PUNT;
+ }
+}
+
+int u14_34f_old_reset(Scsi_Cmnd *SCarg, unsigned int reset_flags) {
+ int rtn;
+ IRQ_FLAGS
+
+ IRQ_LOCK_SAVE
+ rtn = do_old_reset(SCarg);
+ IRQ_UNLOCK_RESTORE
+ return rtn;
+}
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,101)
+
+static inline int do_reset(Scsi_Cmnd *SCarg) {
+ unsigned int i, j, time, k, c, limit = 0;
+ int arg_done = FALSE;
+ Scsi_Cmnd *SCpnt;
+
+ j = ((struct hostdata *) SCarg->host->hostdata)->board_number;
+ printk("%s: reset, enter, target %d.%d:%d, pid %ld.\n",
+ BN(j), SCarg->channel, SCarg->target, SCarg->lun, SCarg->pid);
+
+ if (SCarg->host_scribble == NULL)
+ printk("%s: reset, pid %ld inactive.\n", BN(j), SCarg->pid);
+
+ if (HD(j)->in_reset) {
+ printk("%s: reset, exit, already in reset.\n", BN(j));
+ return FAILED;
+ }
+
+ if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
+ printk("%s: reset, exit, timeout error.\n", BN(j));
+ return FAILED;
+ }
+
+ HD(j)->retries = 0;
+
+ for (c = 0; c <= sh[j]->max_channel; c++)
+ for (k = 0; k < sh[j]->max_id; k++) {
+ HD(j)->target_redo[k][c] = TRUE;
+ HD(j)->target_to[k][c] = 0;
+ }
+
+ for (i = 0; i < sh[j]->can_queue; i++) {
+
+ if (HD(j)->cp_stat[i] == FREE) continue;
+
+ if (HD(j)->cp_stat[i] == LOCKED) {
+ HD(j)->cp_stat[i] = FREE;
+ printk("%s: reset, locked mbox %d forced free.\n", BN(j), i);
+ continue;
+ }
+
+ if (!(SCpnt = HD(j)->cp[i].SCpnt))
+ panic("%s: reset, mbox %d, SCpnt == NULL.\n", BN(j), i);
+
+ if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) {
+ HD(j)->cp_stat[i] = ABORTING;
+ printk("%s: reset, mbox %d aborting, pid %ld.\n",
+ BN(j), i, SCpnt->pid);
+ }
+
+ else {
+ HD(j)->cp_stat[i] = IN_RESET;
+ printk("%s: reset, mbox %d in reset, pid %ld.\n",
+ BN(j), i, SCpnt->pid);
+ }
+
+ if (SCpnt->host_scribble == NULL)
+ panic("%s: reset, mbox %d, garbled SCpnt.\n", BN(j), i);
+
+ if (*(unsigned int *)SCpnt->host_scribble != i)
+ panic("%s: reset, mbox %d, index mismatch.\n", BN(j), i);
+
+ if (SCpnt->scsi_done == NULL)
+ panic("%s: reset, mbox %d, SCpnt->scsi_done == NULL.\n", BN(j), i);
+
+ if (SCpnt == SCarg) arg_done = TRUE;
+ }
+
+ if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
+ printk("%s: reset, cannot reset, timeout error.\n", BN(j));
+ return FAILED;
+ }
+
+ outb(CMD_RESET, sh[j]->io_port + REG_LCL_INTR);
+ printk("%s: reset, board reset done, enabling interrupts.\n", BN(j));
+
+#if defined(DEBUG_RESET)
+ do_trace = TRUE;
+#endif
+
+ HD(j)->in_reset = TRUE;
+ SPIN_UNLOCK
+ IRQ_UNLOCK
+ time = jiffies;
+ while ((jiffies - time) < (10 * HZ) && limit++ < 200000) udelay(100L);
+ IRQ_LOCK
+ SPIN_LOCK
+ printk("%s: reset, interrupts disabled, loops %d.\n", BN(j), limit);
+
+ for (i = 0; i < sh[j]->can_queue; i++) {
+
+ if (HD(j)->cp_stat[i] == IN_RESET) {
+ SCpnt = HD(j)->cp[i].SCpnt;
+ SCpnt->result = DID_RESET << 16;
+ SCpnt->host_scribble = NULL;
+
+ /* This mailbox is still waiting for its interrupt */
+ HD(j)->cp_stat[i] = LOCKED;
+
+ printk("%s, reset, mbox %d locked, DID_RESET, pid %ld done.\n",
+ BN(j), i, SCpnt->pid);
+ }
+
+ else if (HD(j)->cp_stat[i] == ABORTING) {
+ SCpnt = HD(j)->cp[i].SCpnt;
+ SCpnt->result = DID_RESET << 16;
+ SCpnt->host_scribble = NULL;
+
+ /* This mailbox was never queued to the adapter */
+ HD(j)->cp_stat[i] = FREE;
+
+ printk("%s, reset, mbox %d aborting, DID_RESET, pid %ld done.\n",
+ BN(j), i, SCpnt->pid);
+ }
+
+ else
+
+ /* Any other mailbox has already been set free by interrupt */
+ continue;
+
+ SCpnt->scsi_done(SCpnt);
+ IRQ_LOCK
+ }
+
+ HD(j)->in_reset = FALSE;
+ do_trace = FALSE;
+
+ if (arg_done) printk("%s: reset, exit, pid %ld done.\n", BN(j), SCarg->pid);
+ else printk("%s: reset, exit.\n", BN(j));
+
+ return SUCCESS;
+}
+
+int u14_34f_reset(Scsi_Cmnd *SCarg) {
+
+ return do_reset(SCarg);
+}
+
+#endif /* new_eh_code */
+
+int u14_34f_biosparam(Disk *disk, kdev_t dev, int *dkinfo) {
+ unsigned int j = 0;
+ int size = disk->capacity;
+
+ dkinfo[0] = HD(j)->heads;
+ dkinfo[1] = HD(j)->sectors;
+ dkinfo[2] = size / (HD(j)->heads * HD(j)->sectors);
+
+ if (ext_tran && (scsicam_bios_param(disk, dev, dkinfo) < 0)) {
+ dkinfo[0] = 255;
+ dkinfo[1] = 63;
+ dkinfo[2] = size / (dkinfo[0] * dkinfo[1]);
+ }
+
+#if defined (DEBUG_GEOMETRY)
+ printk ("%s: biosparam, head=%d, sec=%d, cyl=%d.\n", driver_name,
+ dkinfo[0], dkinfo[1], dkinfo[2]);
+#endif
+
+ return FALSE;
+}
+
+static void sort(unsigned long sk[], unsigned int da[], unsigned int n,
+ unsigned int rev) {
+ unsigned int i, j, k, y;
+ unsigned long x;
+
+ for (i = 0; i < n - 1; i++) {
+ k = i;
+
+ for (j = k + 1; j < n; j++)
+ if (rev) {
+ if (sk[j] > sk[k]) k = j;
+ }
+ else {
+ if (sk[j] < sk[k]) k = j;
+ }
+
+ if (k != i) {
+ x = sk[k]; sk[k] = sk[i]; sk[i] = x;
+ y = da[k]; da[k] = da[i]; da[i] = y;
+ }
+ }
+
+ return;
+ }
+
+static inline int reorder(unsigned int j, unsigned long cursec,
+ unsigned int ihdlr, unsigned int il[], unsigned int n_ready) {
+ Scsi_Cmnd *SCpnt;
+ struct mscp *cpp;
+ unsigned int k, n;
+ unsigned int rev = FALSE, s = TRUE, r = TRUE;
+ unsigned int input_only = TRUE, overlap = FALSE;
+ unsigned long sl[n_ready], pl[n_ready], ll[n_ready];
+ unsigned long maxsec = 0, minsec = ULONG_MAX, seek = 0, iseek = 0;
+ unsigned long ioseek = 0;
+
+ static unsigned int flushcount = 0, batchcount = 0, sortcount = 0;
+ static unsigned int readycount = 0, ovlcount = 0, inputcount = 0;
+ static unsigned int readysorted = 0, revcount = 0;
+ static unsigned long seeksorted = 0, seeknosort = 0;
+
+ if (link_statistics && !(++flushcount % link_statistics))
+ printk("fc %d bc %d ic %d oc %d rc %d rs %d sc %d re %d"\
+ " av %ldK as %ldK.\n", flushcount, batchcount, inputcount,
+ ovlcount, readycount, readysorted, sortcount, revcount,
+ seeknosort / (readycount + 1),
+ seeksorted / (readycount + 1));
+
+ if (n_ready <= 1) return FALSE;
+
+ for (n = 0; n < n_ready; n++) {
+ k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
+
+ if (!(cpp->xdir == DTD_IN)) input_only = FALSE;
+
+ if (SCpnt->request.sector < minsec) minsec = SCpnt->request.sector;
+ if (SCpnt->request.sector > maxsec) maxsec = SCpnt->request.sector;
+
+ sl[n] = SCpnt->request.sector;
+ ioseek += SCpnt->request.nr_sectors;
+
+ if (!n) continue;
+
+ if (sl[n] < sl[n - 1]) s = FALSE;
+ if (sl[n] > sl[n - 1]) r = FALSE;
+
+ if (link_statistics) {
+ if (sl[n] > sl[n - 1])
+ seek += sl[n] - sl[n - 1];
+ else
+ seek += sl[n - 1] - sl[n];
+ }
+
+ }
+
+ if (link_statistics) {
+ if (cursec > sl[0]) seek += cursec - sl[0]; else seek += sl[0] - cursec;
+ }
+
+ if (cursec > ((maxsec + minsec) / 2)) rev = TRUE;
+
+ if (ioseek > ((maxsec - minsec) / 2)) rev = FALSE;
+
+ if (!((rev && r) || (!rev && s))) sort(sl, il, n_ready, rev);
+
+ if (!input_only) for (n = 0; n < n_ready; n++) {
+ k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
+ ll[n] = SCpnt->request.nr_sectors; pl[n] = SCpnt->pid;
+
+ if (!n) continue;
+
+ if ((sl[n] == sl[n - 1]) || (!rev && ((sl[n - 1] + ll[n - 1]) > sl[n]))
+ || (rev && ((sl[n] + ll[n]) > sl[n - 1]))) overlap = TRUE;
+ }
+
+ if (overlap) sort(pl, il, n_ready, FALSE);
+
+ if (link_statistics) {
+ if (cursec > sl[0]) iseek = cursec - sl[0]; else iseek = sl[0] - cursec;
+ batchcount++; readycount += n_ready, seeknosort += seek / 1024;
+ if (input_only) inputcount++;
+ if (overlap) { ovlcount++; seeksorted += iseek / 1024; }
+ else seeksorted += (iseek + maxsec - minsec) / 1024;
+ if (rev && !r) { revcount++; readysorted += n_ready; }
+ if (!rev && !s) { sortcount++; readysorted += n_ready; }
+ }
+
+#if defined(DEBUG_LINKED_COMMANDS)
+ if (link_statistics && (overlap || !(flushcount % link_statistics)))
+ for (n = 0; n < n_ready; n++) {
+ k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
+ printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %ld"\
+ " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
+ (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target,
+ SCpnt->lun, SCpnt->pid, k, flushcount, n_ready,
+ SCpnt->request.sector, SCpnt->request.nr_sectors, cursec,
+ YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only),
+ YESNO(overlap), cpp->xdir);
+ }
+#endif
+ return overlap;
+}
+
+static void flush_dev(Scsi_Device *dev, unsigned long cursec, unsigned int j,
+ unsigned int ihdlr) {
+ Scsi_Cmnd *SCpnt;
+ struct mscp *cpp;
+ unsigned int k, n, n_ready = 0, il[MAX_MAILBOXES];
+
+ for (k = 0; k < sh[j]->can_queue; k++) {
+
+ if (HD(j)->cp_stat[k] != READY && HD(j)->cp_stat[k] != IN_USE) continue;
+
+ cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
+
+ if (SCpnt->device != dev) continue;
+
+ if (HD(j)->cp_stat[k] == IN_USE) return;
+
+ il[n_ready++] = k;
+ }
+
+ if (reorder(j, cursec, ihdlr, il, n_ready)) n_ready = 1;
+
+ for (n = 0; n < n_ready; n++) {
+ k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
+
+ if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
+ printk("%s: %s, target %d.%d:%d, pid %ld, mbox %d, adapter"\
+ " busy, will abort.\n", BN(j), (ihdlr ? "ihdlr" : "qcomm"),
+ SCpnt->channel, SCpnt->target, SCpnt->lun, SCpnt->pid, k);
+ HD(j)->cp_stat[k] = ABORTING;
+ continue;
+ }
+
+ outl(V2DEV(cpp), sh[j]->io_port + REG_OGM);
+ outb(CMD_OGM_INTR, sh[j]->io_port + REG_LCL_INTR);
+ HD(j)->cp_stat[k] = IN_USE;
+ }
+
+}
+
+static inline void ihdlr(int irq, unsigned int j) {
+ Scsi_Cmnd *SCpnt;
+ unsigned int i, k, c, status, tstatus, reg, ret;
+ struct mscp *spp, *cpp;
+
+ if (sh[j]->irq != irq)
+ panic("%s: ihdlr, irq %d, sh[j]->irq %d.\n", BN(j), irq, sh[j]->irq);
+
+ /* Check if this board need to be serviced */
+ if (!((reg = inb(sh[j]->io_port + REG_SYS_INTR)) & IRQ_ASSERTED)) return;
+
+ HD(j)->iocount++;
+
+ if (do_trace) printk("%s: ihdlr, enter, irq %d, count %d.\n", BN(j), irq,
+ HD(j)->iocount);
+
+ /* Check if this board is still busy */
+ if (wait_on_busy(sh[j]->io_port, 20 * MAXLOOP)) {
+ outb(CMD_CLR_INTR, sh[j]->io_port + REG_SYS_INTR);
+ printk("%s: ihdlr, busy timeout error, irq %d, reg 0x%x, count %d.\n",
+ BN(j), irq, reg, HD(j)->iocount);
+ return;
+ }
+
+ ret = inl(sh[j]->io_port + REG_ICM);
+ spp = (struct mscp *)DEV2V(ret);
+ cpp = spp;
+
+ /* Clear interrupt pending flag */
+ outb(CMD_CLR_INTR, sh[j]->io_port + REG_SYS_INTR);
+
+#if defined(DEBUG_GENERATE_ABORTS)
+ if ((HD(j)->iocount > 500) && ((HD(j)->iocount % 500) < 3)) return;
+#endif
+
+ /* Find the mailbox to be serviced on this board */
+ i = cpp - HD(j)->cp;
+
+ if (cpp < HD(j)->cp || cpp >= HD(j)->cp + sh[j]->can_queue
+ || i >= sh[j]->can_queue)
+ panic("%s: ihdlr, invalid mscp bus address %p, cp0 %p.\n", BN(j),
+ (void *)ret, HD(j)->cp);
+
+ if (HD(j)->cp_stat[i] == IGNORE) {
+ HD(j)->cp_stat[i] = FREE;
+ return;
+ }
+ else if (HD(j)->cp_stat[i] == LOCKED) {
+ HD(j)->cp_stat[i] = FREE;
+ printk("%s: ihdlr, mbox %d unlocked, count %d.\n", BN(j), i,
+ HD(j)->iocount);
+ return;
+ }
+ else if (HD(j)->cp_stat[i] == FREE) {
+ printk("%s: ihdlr, mbox %d is free, count %d.\n", BN(j), i,
+ HD(j)->iocount);
+ return;
+ }
+ else if (HD(j)->cp_stat[i] == IN_RESET)
+ printk("%s: ihdlr, mbox %d is in reset.\n", BN(j), i);
+ else if (HD(j)->cp_stat[i] != IN_USE)
+ panic("%s: ihdlr, mbox %d, invalid cp_stat: %d.\n",
+ BN(j), i, HD(j)->cp_stat[i]);
+
+ HD(j)->cp_stat[i] = FREE;
+ SCpnt = cpp->SCpnt;
+
+ if (SCpnt == NULL) panic("%s: ihdlr, mbox %d, SCpnt == NULL.\n", BN(j), i);
+
+ if (SCpnt->host_scribble == NULL)
+ panic("%s: ihdlr, mbox %d, pid %ld, SCpnt %p garbled.\n", BN(j), i,
+ SCpnt->pid, SCpnt);
+
+ if (*(unsigned int *)SCpnt->host_scribble != i)
+ panic("%s: ihdlr, mbox %d, pid %ld, index mismatch %d.\n",
+ BN(j), i, SCpnt->pid, *(unsigned int *)SCpnt->host_scribble);
+
+ if (linked_comm && SCpnt->device->queue_depth > 2
+ && TLDEV(SCpnt->device->type))
+ flush_dev(SCpnt->device, SCpnt->request.sector, j, TRUE);
+
+ tstatus = status_byte(spp->target_status);
+
+#if defined(DEBUG_GENERATE_ERRORS)
+ if ((HD(j)->iocount > 500) && ((HD(j)->iocount % 200) < 2))
+ spp->adapter_status = 0x01;
+#endif
+
+ switch (spp->adapter_status) {
+ case ASOK: /* status OK */
+
+ /* Forces a reset if a disk drive keeps returning BUSY */
+ if (tstatus == BUSY && SCpnt->device->type != TYPE_TAPE)
+ status = DID_ERROR << 16;
+
+ /* If there was a bus reset, redo operation on each target */
+ else if (tstatus != GOOD && SCpnt->device->type == TYPE_DISK
+ && HD(j)->target_redo[SCpnt->target][SCpnt->channel])
+ status = DID_BUS_BUSY << 16;
+
+ /* Works around a flaw in scsi.c */
+ else if (tstatus == CHECK_CONDITION
+ && SCpnt->device->type == TYPE_DISK
+ && (SCpnt->sense_buffer[2] & 0xf) == RECOVERED_ERROR)
+ status = DID_BUS_BUSY << 16;
+
+ else
+ status = DID_OK << 16;
+
+ if (tstatus == GOOD)
+ HD(j)->target_redo[SCpnt->target][SCpnt->channel] = FALSE;
+
+ if (spp->target_status && SCpnt->device->type == TYPE_DISK)
+ printk("%s: ihdlr, target %d.%d:%d, pid %ld, "\
+ "target_status 0x%x, sense key 0x%x.\n", BN(j),
+ SCpnt->channel, SCpnt->target, SCpnt->lun,
+ SCpnt->pid, spp->target_status,
+ SCpnt->sense_buffer[2]);
+
+ HD(j)->target_to[SCpnt->target][SCpnt->channel] = 0;
+
+ if (HD(j)->last_retried_pid == SCpnt->pid) HD(j)->retries = 0;
+
+ break;
+ case ASST: /* Selection Time Out */
+
+ if (HD(j)->target_to[SCpnt->target][SCpnt->channel] > 1)
+ status = DID_ERROR << 16;
+ else {
+ status = DID_TIME_OUT << 16;
+ HD(j)->target_to[SCpnt->target][SCpnt->channel]++;
+ }
+
+ break;
+
+ /* Perform a limited number of internal retries */
+ case 0x93: /* Unexpected bus free */
+ case 0x94: /* Target bus phase sequence failure */
+ case 0x96: /* Illegal SCSI command */
+ case 0xa3: /* SCSI bus reset error */
+
+ for (c = 0; c <= sh[j]->max_channel; c++)
+ for (k = 0; k < sh[j]->max_id; k++)
+ HD(j)->target_redo[k][c] = TRUE;
+
+
+ case 0x92: /* Data over/under-run */
+
+ if (SCpnt->device->type != TYPE_TAPE
+ && HD(j)->retries < MAX_INTERNAL_RETRIES) {
+
+#if defined(DID_SOFT_ERROR)
+ status = DID_SOFT_ERROR << 16;
+#else
+ status = DID_BUS_BUSY << 16;
+#endif
+
+ HD(j)->retries++;
+ HD(j)->last_retried_pid = SCpnt->pid;
+ }
+ else
+ status = DID_ERROR << 16;
+
+ break;
+ case 0x01: /* Invalid command */
+ case 0x02: /* Invalid parameters */
+ case 0x03: /* Invalid data list */
+ case 0x84: /* SCSI bus abort error */
+ case 0x9b: /* Auto request sense error */
+ case 0x9f: /* Unexpected command complete message error */
+ case 0xff: /* Invalid parameter in the S/G list */
+ default:
+ status = DID_ERROR << 16;
+ break;
+ }
+
+ SCpnt->result = status | spp->target_status;
+
+#if defined(DEBUG_INTERRUPT)
+ if (SCpnt->result || do_trace)
+#else
+ if ((spp->adapter_status != ASOK && HD(j)->iocount > 1000) ||
+ (spp->adapter_status != ASOK &&
+ spp->adapter_status != ASST && HD(j)->iocount <= 1000) ||
+ do_trace || msg_byte(spp->target_status))
+#endif
+ printk("%s: ihdlr, mbox %2d, err 0x%x:%x,"\
+ " target %d.%d:%d, pid %ld, reg 0x%x, count %d.\n",
+ BN(j), i, spp->adapter_status, spp->target_status,
+ SCpnt->channel, SCpnt->target, SCpnt->lun, SCpnt->pid,
+ reg, HD(j)->iocount);
+
+ /* Set the command state to inactive */
+ SCpnt->host_scribble = NULL;
+
+ SCpnt->scsi_done(SCpnt);
+
+ if (do_trace) printk("%s: ihdlr, exit, irq %d, count %d.\n", BN(j), irq,
+ HD(j)->iocount);
+
+ return;
+}
+
+static void do_interrupt_handler(int irq, void *shap, struct pt_regs *regs) {
+ unsigned int j;
+ IRQ_FLAGS
+ SPIN_FLAGS
+
+ /* Check if the interrupt must be processed by this handler */
+ if ((j = (unsigned int)((char *)shap - sha)) >= num_boards) return;
+
+ SPIN_LOCK_SAVE
+ IRQ_LOCK_SAVE
+ ihdlr(irq, j);
+ IRQ_UNLOCK_RESTORE
+ SPIN_UNLOCK_RESTORE
+}
+
+int u14_34f_release(struct Scsi_Host *shpnt) {
+ unsigned int i, j;
+ IRQ_FLAGS
+
+ IRQ_LOCK_SAVE
+
+ for (j = 0; sh[j] != NULL && sh[j] != shpnt; j++);
+
+ if (sh[j] == NULL) panic("%s: release, invalid Scsi_Host pointer.\n",
+ driver_name);
+
+ for (i = 0; i < sh[j]->can_queue; i++)
+ if ((&HD(j)->cp[i])->sglist) kfree((&HD(j)->cp[i])->sglist);
+
+ free_irq(sh[j]->irq, &sha[j]);
+
+ if (sh[j]->dma_channel != NO_DMA) free_dma(sh[j]->dma_channel);
+
+ release_region(sh[j]->io_port, sh[j]->n_io_port);
+ scsi_unregister(sh[j]);
+ IRQ_UNLOCK_RESTORE
+ return FALSE;
+}
+
+#if defined(MODULE)
+Scsi_Host_Template driver_template = ULTRASTOR_14_34F;
+
+#include "scsi_module.c"
+#endif
diff --git a/linux/src/drivers/scsi/u14-34f.h b/linux/src/drivers/scsi/u14-34f.h
new file mode 100644
index 0000000..943b8cb
--- /dev/null
+++ b/linux/src/drivers/scsi/u14-34f.h
@@ -0,0 +1,60 @@
+/*
+ * u14-34f.h - used by the low-level driver for UltraStor 14F/34F
+ */
+#ifndef _U14_34F_H
+#define _U14_34F_H
+
+#include <scsi/scsicam.h>
+#include <linux/version.h>
+
+int u14_34f_detect(Scsi_Host_Template *);
+int u14_34f_release(struct Scsi_Host *);
+int u14_34f_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int u14_34f_abort(Scsi_Cmnd *);
+int u14_34f_old_abort(Scsi_Cmnd *);
+int u14_34f_reset(Scsi_Cmnd *);
+int u14_34f_old_reset(Scsi_Cmnd *, unsigned int);
+int u14_34f_biosparam(Disk *, kdev_t, int *);
+
+#define U14_34F_VERSION "4.33.00"
+
+#define LinuxVersionCode(v, p, s) (((v)<<16)+((p)<<8)+(s))
+
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,1,101)
+
+#define ULTRASTOR_14_34F { \
+ name: "UltraStor 14F/34F rev. " U14_34F_VERSION " ", \
+ detect: u14_34f_detect, \
+ release: u14_34f_release, \
+ queuecommand: u14_34f_queuecommand, \
+ abort: u14_34f_old_abort, \
+ reset: u14_34f_old_reset, \
+ eh_abort_handler: u14_34f_abort, \
+ eh_device_reset_handler: NULL, \
+ eh_bus_reset_handler: NULL, \
+ eh_host_reset_handler: u14_34f_reset, \
+ bios_param: u14_34f_biosparam, \
+ this_id: 7, \
+ unchecked_isa_dma: 1, \
+ use_clustering: ENABLE_CLUSTERING, \
+ use_new_eh_code: 1 /* Enable new error code */ \
+ }
+
+#else /* Use old scsi code */
+
+#define ULTRASTOR_14_34F { \
+ name: "UltraStor 14F/34F rev. " U14_34F_VERSION " ", \
+ detect: u14_34f_detect, \
+ release: u14_34f_release, \
+ queuecommand: u14_34f_queuecommand, \
+ abort: u14_34f_old_abort, \
+ reset: u14_34f_old_reset, \
+ bios_param: u14_34f_biosparam, \
+ this_id: 7, \
+ unchecked_isa_dma: 1, \
+ use_clustering: ENABLE_CLUSTERING \
+ }
+
+#endif
+
+#endif
diff --git a/linux/src/drivers/scsi/ultrastor.c b/linux/src/drivers/scsi/ultrastor.c
new file mode 100644
index 0000000..de82472
--- /dev/null
+++ b/linux/src/drivers/scsi/ultrastor.c
@@ -0,0 +1,1165 @@
+/*
+ * ultrastor.c Copyright (C) 1992 David B. Gentzel
+ * Low-level SCSI driver for UltraStor 14F, 24F, and 34F
+ * by David B. Gentzel, Whitfield Software Services, Carnegie, PA
+ * (gentzel@nova.enet.dec.com)
+ * scatter/gather added by Scott Taylor (n217cg@tamuts.tamu.edu)
+ * 24F and multiple command support by John F. Carr (jfc@athena.mit.edu)
+ * John's work modified by Caleb Epstein (cae@jpmorgan.com) and
+ * Eric Youngdale (ericy@cais.com).
+ * Thanks to UltraStor for providing the necessary documentation
+ */
+
+/*
+ * TODO:
+ * 1. Find out why scatter/gather is limited to 16 requests per command.
+ * This is fixed, at least on the 24F, as of version 1.12 - CAE.
+ * 2. Look at command linking (mscp.command_link and
+ * mscp.command_link_id). (Does not work with many disks,
+ * and no performance increase. ERY).
+ * 3. Allow multiple adapters.
+ */
+
+/*
+ * NOTES:
+ * The UltraStor 14F, 24F, and 34F are a family of intelligent, high
+ * performance SCSI-2 host adapters. They all support command queueing
+ * and scatter/gather I/O. Some of them can also emulate the standard
+ * WD1003 interface for use with OS's which don't support SCSI. Here
+ * is the scoop on the various models:
+ * 14F - ISA first-party DMA HA with floppy support and WD1003 emulation.
+ * 14N - ISA HA with floppy support. I think that this is a non-DMA
+ * HA. Nothing further known.
+ * 24F - EISA Bus Master HA with floppy support and WD1003 emulation.
+ * 34F - VL-Bus Bus Master HA with floppy support (no WD1003 emulation).
+ *
+ * The 14F, 24F, and 34F are supported by this driver.
+ *
+ * Places flagged with a triple question-mark are things which are either
+ * unfinished, questionable, or wrong.
+ */
+
+/* Changes from version 1.11 alpha to 1.12
+ *
+ * Increased the size of the scatter-gather list to 33 entries for
+ * the 24F adapter (it was 16). I don't have the specs for the 14F
+ * or the 34F, so they may support larger s-g lists as well.
+ *
+ * Caleb Epstein <cae@jpmorgan.com>
+ */
+
+/* Changes from version 1.9 to 1.11
+ *
+ * Patches to bring this driver up to speed with the default kernel
+ * driver which supports only the 14F and 34F adapters. This version
+ * should compile cleanly into 0.99.13, 0.99.12 and probably 0.99.11.
+ *
+ * Fixes from Eric Youngdale to fix a few possible race conditions and
+ * several problems with bit testing operations (insufficient
+ * parentheses).
+ *
+ * Removed the ultrastor_abort() and ultrastor_reset() functions
+ * (enclosed them in #if 0 / #endif). These functions, at least on
+ * the 24F, cause the SCSI bus to do odd things and generally lead to
+ * kernel panics and machine hangs. This is like the Adaptec code.
+ *
+ * Use check/snarf_region for 14f, 34f to avoid I/O space address conflicts.
+ */
+
+/* Changes from version 1.8 to version 1.9
+ *
+ * 0.99.11 patches (cae@jpmorgan.com) */
+
+/* Changes from version 1.7 to version 1.8
+ *
+ * Better error reporting.
+ */
+
+/* Changes from version 1.6 to version 1.7
+ *
+ * Removed CSIR command code.
+ *
+ * Better race condition avoidance (xchgb function added).
+ *
+ * Set ICM and OGM status to zero at probe (24F)
+ *
+ * reset sends soft reset to UltraStor adapter
+ *
+ * reset adapter if adapter interrupts with an invalid MSCP address
+ *
+ * handle aborted command interrupt (24F)
+ *
+ */
+
+/* Changes from version 1.5 to version 1.6:
+ *
+ * Read MSCP address from ICM _before_ clearing the interrupt flag.
+ * This fixes a race condition.
+ */
+
+/* Changes from version 1.4 to version 1.5:
+ *
+ * Abort now calls done when multiple commands are enabled.
+ *
+ * Clear busy when aborted command finishes, not when abort is called.
+ *
+ * More debugging messages for aborts.
+ */
+
+/* Changes from version 1.3 to version 1.4:
+ *
+ * Enable automatic request of sense data on error (requires newer version
+ * of scsi.c to be useful).
+ *
+ * Fix PORT_OVERRIDE for 14F.
+ *
+ * Fix abort and reset to work properly (config.aborted wasn't cleared
+ * after it was tested, so after a command abort no further commands would
+ * work).
+ *
+ * Boot time test to enable SCSI bus reset (defaults to not allowing reset).
+ *
+ * Fix test for OGM busy -- the busy bit is in different places on the 24F.
+ *
+ * Release ICM slot by clearing first byte on 24F.
+ */
+
+#ifdef MODULE
+#include <linux/module.h>
+#endif
+
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/proc_fs.h>
+#include <asm/io.h>
+#include <asm/bitops.h>
+#include <asm/system.h>
+#include <asm/dma.h>
+
+#define ULTRASTOR_PRIVATE /* Get the private stuff from ultrastor.h */
+#include <linux/blk.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "ultrastor.h"
+#include "sd.h"
+#include<linux/stat.h>
+
+struct proc_dir_entry proc_scsi_ultrastor = {
+ PROC_SCSI_ULTRASTOR, 9, "ultrastor",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+#define FALSE 0
+#define TRUE 1
+
+#ifndef ULTRASTOR_DEBUG
+#define ULTRASTOR_DEBUG (UD_ABORT|UD_CSIR|UD_RESET)
+#endif
+
+#define VERSION "1.12"
+
+#define ARRAY_SIZE(arr) (sizeof (arr) / sizeof (arr)[0])
+
+#define PACKED __attribute__((packed))
+#define ALIGNED(x) __attribute__((aligned(x)))
+
+
+/* The 14F uses an array of 4-byte ints for its scatter/gather list.
+ The data can be unaligned, but need not be. It's easier to give
+ the list normal alignment since it doesn't need to fit into a
+ packed structure. */
+
+typedef struct {
+ unsigned int address;
+ unsigned int num_bytes;
+} ultrastor_sg_list;
+
+
+/* MailBox SCSI Command Packet. Basic command structure for communicating
+ with controller. */
+struct mscp {
+ unsigned char opcode: 3; /* type of command */
+ unsigned char xdir: 2; /* data transfer direction */
+ unsigned char dcn: 1; /* disable disconnect */
+ unsigned char ca: 1; /* use cache (if available) */
+ unsigned char sg: 1; /* scatter/gather operation */
+ unsigned char target_id: 3; /* target SCSI id */
+ unsigned char ch_no: 2; /* SCSI channel (always 0 for 14f) */
+ unsigned char lun: 3; /* logical unit number */
+ unsigned int transfer_data PACKED; /* transfer data pointer */
+ unsigned int transfer_data_length PACKED; /* length in bytes */
+ unsigned int command_link PACKED; /* for linking command chains */
+ unsigned char scsi_command_link_id; /* identifies command in chain */
+ unsigned char number_of_sg_list; /* (if sg is set) 8 bytes per list */
+ unsigned char length_of_sense_byte;
+ unsigned char length_of_scsi_cdbs; /* 6, 10, or 12 */
+ unsigned char scsi_cdbs[12]; /* SCSI commands */
+ unsigned char adapter_status; /* non-zero indicates HA error */
+ unsigned char target_status; /* non-zero indicates target error */
+ unsigned int sense_data PACKED;
+ /* The following fields are for software only. They are included in
+ the MSCP structure because they are associated with SCSI requests. */
+ void (*done)(Scsi_Cmnd *);
+ Scsi_Cmnd *SCint;
+ ultrastor_sg_list sglist[ULTRASTOR_24F_MAX_SG]; /* use larger size for 24F */
+};
+
+
+/* Port addresses (relative to the base address) */
+#define U14F_PRODUCT_ID(port) ((port) + 0x4)
+#define CONFIG(port) ((port) + 0x6)
+
+/* Port addresses relative to the doorbell base address. */
+#define LCL_DOORBELL_MASK(port) ((port) + 0x0)
+#define LCL_DOORBELL_INTR(port) ((port) + 0x1)
+#define SYS_DOORBELL_MASK(port) ((port) + 0x2)
+#define SYS_DOORBELL_INTR(port) ((port) + 0x3)
+
+
+/* Used to store configuration info read from config i/o registers. Most of
+ this is not used yet, but might as well save it.
+
+ This structure also holds port addresses that are not at the same offset
+ on the 14F and 24F.
+
+ This structure holds all data that must be duplicated to support multiple
+ adapters. */
+
+static struct ultrastor_config
+{
+ unsigned short port_address; /* base address of card */
+ unsigned short doorbell_address; /* base address of doorbell CSRs */
+ unsigned short ogm_address; /* base address of OGM */
+ unsigned short icm_address; /* base address of ICM */
+ const void *bios_segment;
+ unsigned char interrupt: 4;
+ unsigned char dma_channel: 3;
+ unsigned char bios_drive_number: 1;
+ unsigned char heads;
+ unsigned char sectors;
+ unsigned char ha_scsi_id: 3;
+ unsigned char subversion: 4;
+ unsigned char revision;
+ /* The slot number is used to distinguish the 24F (slot != 0) from
+ the 14F and 34F (slot == 0). */
+ unsigned char slot;
+
+#ifdef PRINT_U24F_VERSION
+ volatile int csir_done;
+#endif
+
+ /* A pool of MSCP structures for this adapter, and a bitmask of
+ busy structures. (If ULTRASTOR_14F_MAX_CMDS == 1, a 1 byte
+ busy flag is used instead.) */
+
+#if ULTRASTOR_MAX_CMDS == 1
+ unsigned char mscp_busy;
+#else
+ unsigned short mscp_free;
+#endif
+ volatile unsigned char aborted[ULTRASTOR_MAX_CMDS];
+ struct mscp mscp[ULTRASTOR_MAX_CMDS];
+} config = {0};
+
+/* Set this to 1 to reset the SCSI bus on error. */
+int ultrastor_bus_reset = 0;
+
+
+/* Allowed BIOS base addresses (NULL indicates reserved) */
+static const void *const bios_segment_table[8] = {
+ NULL, (void *)0xC4000, (void *)0xC8000, (void *)0xCC000,
+ (void *)0xD0000, (void *)0xD4000, (void *)0xD8000, (void *)0xDC000,
+};
+
+/* Allowed IRQs for 14f */
+static const unsigned char interrupt_table_14f[4] = { 15, 14, 11, 10 };
+
+/* Allowed DMA channels for 14f (0 indicates reserved) */
+static const unsigned char dma_channel_table_14f[4] = { 5, 6, 7, 0 };
+
+/* Head/sector mappings allowed by 14f */
+static const struct {
+ unsigned char heads;
+ unsigned char sectors;
+} mapping_table[4] = { { 16, 63 }, { 64, 32 }, { 64, 63 }, { 64, 32 } };
+
+#ifndef PORT_OVERRIDE
+/* ??? A probe of address 0x310 screws up NE2000 cards */
+static const unsigned short ultrastor_ports_14f[] = {
+ 0x330, 0x340, /*0x310,*/ 0x230, 0x240, 0x210, 0x130, 0x140,
+};
+#endif
+
+static void ultrastor_interrupt(int, void *, struct pt_regs *);
+static inline void build_sg_list(struct mscp *, Scsi_Cmnd *SCpnt);
+
+
+static inline int find_and_clear_bit_16(unsigned short *field)
+{
+ int rv;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ if (*field == 0) panic("No free mscp");
+ asm("xorl %0,%0\n0:\tbsfw %1,%w0\n\tbtr %0,%1\n\tjnc 0b"
+ : "=&r" (rv), "+m" (*field));
+ restore_flags(flags);
+ return rv;
+}
+
+/* This has been re-implemented with the help of Richard Earnshaw,
+ <rwe@pegasus.esprit.ec.org> and works with gcc-2.5.8 and gcc-2.6.0.
+ The instability noted by jfc below appears to be a bug in
+ gcc-2.5.x when compiling w/o optimization. --Caleb
+
+ This asm is fragile: it doesn't work without the casts and it may
+ not work without optimization. Maybe I should add a swap builtin
+ to gcc. --jfc */
+static inline unsigned char xchgb(unsigned char reg,
+ volatile unsigned char *mem)
+{
+ __asm__ ("xchgb %0,%1" : "=q" (reg), "=m" (*mem) : "0" (reg));
+ return reg;
+}
+
+#if ULTRASTOR_DEBUG & (UD_COMMAND | UD_ABORT)
+
+static void log_ultrastor_abort(register struct ultrastor_config *config,
+ int command)
+{
+ static char fmt[80] = "abort %d (%x); MSCP free pool: %x;";
+ register int i;
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+
+ for (i = 0; i < ULTRASTOR_MAX_CMDS; i++)
+ {
+ fmt[20 + i*2] = ' ';
+ if (! (config->mscp_free & (1 << i)))
+ fmt[21 + i*2] = '0' + config->mscp[i].target_id;
+ else
+ fmt[21 + i*2] = '-';
+ }
+ fmt[20 + ULTRASTOR_MAX_CMDS * 2] = '\n';
+ fmt[21 + ULTRASTOR_MAX_CMDS * 2] = 0;
+ printk(fmt, command, &config->mscp[command], config->mscp_free);
+ restore_flags(flags);
+}
+#endif
+
+static int ultrastor_14f_detect(Scsi_Host_Template * tpnt)
+{
+ size_t i;
+ unsigned char in_byte, version_byte = 0;
+ struct config_1 {
+ unsigned char bios_segment: 3;
+ unsigned char removable_disks_as_fixed: 1;
+ unsigned char interrupt: 2;
+ unsigned char dma_channel: 2;
+ } config_1;
+ struct config_2 {
+ unsigned char ha_scsi_id: 3;
+ unsigned char mapping_mode: 2;
+ unsigned char bios_drive_number: 1;
+ unsigned char tfr_port: 2;
+ } config_2;
+
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US14F: detect: called\n");
+#endif
+
+ /* If a 24F has already been configured, don't look for a 14F. */
+ if (config.bios_segment)
+ return FALSE;
+
+#ifdef PORT_OVERRIDE
+ if(check_region(PORT_OVERRIDE, 0xc)) {
+ printk("Ultrastor I/O space already in use\n");
+ return FALSE;
+ };
+ config.port_address = PORT_OVERRIDE;
+#else
+ for (i = 0; i < ARRAY_SIZE(ultrastor_ports_14f); i++) {
+ if(check_region(ultrastor_ports_14f[i], 0x0c)) continue;
+ config.port_address = ultrastor_ports_14f[i];
+#endif
+
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US14F: detect: testing port address %03X\n", config.port_address);
+#endif
+
+ in_byte = inb(U14F_PRODUCT_ID(config.port_address));
+ if (in_byte != US14F_PRODUCT_ID_0) {
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+# ifdef PORT_OVERRIDE
+ printk("US14F: detect: wrong product ID 0 - %02X\n", in_byte);
+# else
+ printk("US14F: detect: no adapter at port %03X\n", config.port_address);
+# endif
+#endif
+#ifdef PORT_OVERRIDE
+ return FALSE;
+#else
+ continue;
+#endif
+ }
+ in_byte = inb(U14F_PRODUCT_ID(config.port_address) + 1);
+ /* Only upper nibble is significant for Product ID 1 */
+ if ((in_byte & 0xF0) != US14F_PRODUCT_ID_1) {
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+# ifdef PORT_OVERRIDE
+ printk("US14F: detect: wrong product ID 1 - %02X\n", in_byte);
+# else
+ printk("US14F: detect: no adapter at port %03X\n", config.port_address);
+# endif
+#endif
+#ifdef PORT_OVERRIDE
+ return FALSE;
+#else
+ continue;
+#endif
+ }
+ version_byte = in_byte;
+#ifndef PORT_OVERRIDE
+ break;
+ }
+ if (i == ARRAY_SIZE(ultrastor_ports_14f)) {
+# if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US14F: detect: no port address found!\n");
+# endif
+ return FALSE;
+ }
+#endif
+
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US14F: detect: adapter found at port address %03X\n",
+ config.port_address);
+#endif
+
+ /* Set local doorbell mask to disallow bus reset unless
+ ultrastor_bus_reset is true. */
+ outb(ultrastor_bus_reset ? 0xc2 : 0x82, LCL_DOORBELL_MASK(config.port_address));
+
+ /* All above tests passed, must be the right thing. Get some useful
+ info. */
+
+ request_region(config.port_address, 0x0c,"ultrastor");
+ /* Register the I/O space that we use */
+
+ *(char *)&config_1 = inb(CONFIG(config.port_address + 0));
+ *(char *)&config_2 = inb(CONFIG(config.port_address + 1));
+ config.bios_segment = bios_segment_table[config_1.bios_segment];
+ config.doorbell_address = config.port_address;
+ config.ogm_address = config.port_address + 0x8;
+ config.icm_address = config.port_address + 0xC;
+ config.interrupt = interrupt_table_14f[config_1.interrupt];
+ config.ha_scsi_id = config_2.ha_scsi_id;
+ config.heads = mapping_table[config_2.mapping_mode].heads;
+ config.sectors = mapping_table[config_2.mapping_mode].sectors;
+ config.bios_drive_number = config_2.bios_drive_number;
+ config.subversion = (version_byte & 0x0F);
+ if (config.subversion == U34F)
+ config.dma_channel = 0;
+ else
+ config.dma_channel = dma_channel_table_14f[config_1.dma_channel];
+
+ if (!config.bios_segment) {
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US14F: detect: not detected.\n");
+#endif
+ return FALSE;
+ }
+
+ /* Final consistency check, verify previous info. */
+ if (config.subversion != U34F)
+ if (!config.dma_channel || !(config_2.tfr_port & 0x2)) {
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US14F: detect: consistency check failed\n");
+#endif
+ return FALSE;
+ }
+
+ /* If we were TRULY paranoid, we could issue a host adapter inquiry
+ command here and verify the data returned. But frankly, I'm
+ exhausted! */
+
+ /* Finally! Now I'm satisfied... */
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US14F: detect: detect succeeded\n"
+ " Port address: %03X\n"
+ " BIOS segment: %05X\n"
+ " Interrupt: %u\n"
+ " DMA channel: %u\n"
+ " H/A SCSI ID: %u\n"
+ " Subversion: %u\n",
+ config.port_address, config.bios_segment, config.interrupt,
+ config.dma_channel, config.ha_scsi_id, config.subversion);
+#endif
+ tpnt->this_id = config.ha_scsi_id;
+ tpnt->unchecked_isa_dma = (config.subversion != U34F);
+
+#if ULTRASTOR_MAX_CMDS > 1
+ config.mscp_free = ~0;
+#endif
+
+ if (request_irq(config.interrupt, ultrastor_interrupt, 0, "Ultrastor", NULL)) {
+ printk("Unable to allocate IRQ%u for UltraStor controller.\n",
+ config.interrupt);
+ return FALSE;
+ }
+ if (config.dma_channel && request_dma(config.dma_channel,"Ultrastor")) {
+ printk("Unable to allocate DMA channel %u for UltraStor controller.\n",
+ config.dma_channel);
+ free_irq(config.interrupt, NULL);
+ return FALSE;
+ }
+ tpnt->sg_tablesize = ULTRASTOR_14F_MAX_SG;
+ printk("UltraStor driver version" VERSION ". Using %d SG lists.\n",
+ ULTRASTOR_14F_MAX_SG);
+
+ return TRUE;
+}
+
+static int ultrastor_24f_detect(Scsi_Host_Template * tpnt)
+{
+ register int i;
+ struct Scsi_Host * shpnt = NULL;
+
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US24F: detect");
+#endif
+
+ /* probe each EISA slot at slot address C80 */
+ for (i = 1; i < 15; i++)
+ {
+ unsigned char config_1, config_2;
+ unsigned short addr = (i << 12) | ULTRASTOR_24F_PORT;
+
+ if (inb(addr) != US24F_PRODUCT_ID_0 &&
+ inb(addr+1) != US24F_PRODUCT_ID_1 &&
+ inb(addr+2) != US24F_PRODUCT_ID_2)
+ continue;
+
+ config.revision = inb(addr+3);
+ config.slot = i;
+ if (! (inb(addr+4) & 1))
+ {
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("U24F: found disabled card in slot %u\n", i);
+#endif
+ continue;
+ }
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("U24F: found card in slot %u\n", i);
+#endif
+ config_1 = inb(addr + 5);
+ config.bios_segment = bios_segment_table[config_1 & 7];
+ switch(config_1 >> 4)
+ {
+ case 1:
+ config.interrupt = 15;
+ break;
+ case 2:
+ config.interrupt = 14;
+ break;
+ case 4:
+ config.interrupt = 11;
+ break;
+ case 8:
+ config.interrupt = 10;
+ break;
+ default:
+ printk("U24F: invalid IRQ\n");
+ return FALSE;
+ }
+ if (request_irq(config.interrupt, ultrastor_interrupt, 0, "Ultrastor", NULL))
+ {
+ printk("Unable to allocate IRQ%u for UltraStor controller.\n",
+ config.interrupt);
+ return FALSE;
+ }
+ /* BIOS addr set */
+ /* base port set */
+ config.port_address = addr;
+ config.doorbell_address = addr + 12;
+ config.ogm_address = addr + 0x17;
+ config.icm_address = addr + 0x1C;
+ config_2 = inb(addr + 7);
+ config.ha_scsi_id = config_2 & 7;
+ config.heads = mapping_table[(config_2 >> 3) & 3].heads;
+ config.sectors = mapping_table[(config_2 >> 3) & 3].sectors;
+#if (ULTRASTOR_DEBUG & UD_DETECT)
+ printk("US24F: detect: detect succeeded\n"
+ " Port address: %03X\n"
+ " BIOS segment: %05X\n"
+ " Interrupt: %u\n"
+ " H/A SCSI ID: %u\n",
+ config.port_address, config.bios_segment,
+ config.interrupt, config.ha_scsi_id);
+#endif
+ tpnt->this_id = config.ha_scsi_id;
+ tpnt->unchecked_isa_dma = 0;
+ tpnt->sg_tablesize = ULTRASTOR_24F_MAX_SG;
+
+ shpnt = scsi_register(tpnt, 0);
+ shpnt->irq = config.interrupt;
+ shpnt->dma_channel = config.dma_channel;
+ shpnt->io_port = config.port_address;
+
+#if ULTRASTOR_MAX_CMDS > 1
+ config.mscp_free = ~0;
+#endif
+ /* Mark ICM and OGM free */
+ outb(0, addr + 0x16);
+ outb(0, addr + 0x1B);
+
+ /* Set local doorbell mask to disallow bus reset unless
+ ultrastor_bus_reset is true. */
+ outb(ultrastor_bus_reset ? 0xc2 : 0x82, LCL_DOORBELL_MASK(addr+12));
+ outb(0x02, SYS_DOORBELL_MASK(addr+12));
+ printk("UltraStor driver version " VERSION ". Using %d SG lists.\n",
+ tpnt->sg_tablesize);
+ return TRUE;
+ }
+ return FALSE;
+}
+
+int ultrastor_detect(Scsi_Host_Template * tpnt)
+{
+ tpnt->proc_dir = &proc_scsi_ultrastor;
+ return ultrastor_14f_detect(tpnt) || ultrastor_24f_detect(tpnt);
+}
+
+const char *ultrastor_info(struct Scsi_Host * shpnt)
+{
+ static char buf[64];
+
+ if (config.slot)
+ sprintf(buf, "UltraStor 24F SCSI @ Slot %u IRQ%u",
+ config.slot, config.interrupt);
+ else if (config.subversion)
+ sprintf(buf, "UltraStor 34F SCSI @ Port %03X BIOS %05X IRQ%u",
+ config.port_address, (int)config.bios_segment,
+ config.interrupt);
+ else
+ sprintf(buf, "UltraStor 14F SCSI @ Port %03X BIOS %05X IRQ%u DMA%u",
+ config.port_address, (int)config.bios_segment,
+ config.interrupt, config.dma_channel);
+ return buf;
+}
+
+static inline void build_sg_list(register struct mscp *mscp, Scsi_Cmnd *SCpnt)
+{
+ struct scatterlist *sl;
+ long transfer_length = 0;
+ int i, max;
+
+ sl = (struct scatterlist *) SCpnt->request_buffer;
+ max = SCpnt->use_sg;
+ for (i = 0; i < max; i++) {
+ mscp->sglist[i].address = (unsigned int)sl[i].address;
+ mscp->sglist[i].num_bytes = sl[i].length;
+ transfer_length += sl[i].length;
+ }
+ mscp->number_of_sg_list = max;
+ mscp->transfer_data = (unsigned int)mscp->sglist;
+ /* ??? May not be necessary. Docs are unclear as to whether transfer
+ length field is ignored or whether it should be set to the total
+ number of bytes of the transfer. */
+ mscp->transfer_data_length = transfer_length;
+}
+
+int ultrastor_queuecommand(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
+{
+ register struct mscp *my_mscp;
+#if ULTRASTOR_MAX_CMDS > 1
+ int mscp_index;
+#endif
+ unsigned int status;
+ unsigned long flags;
+
+ /* Next test is for debugging; "can't happen" */
+ if ((config.mscp_free & ((1U << ULTRASTOR_MAX_CMDS) - 1)) == 0)
+ panic("ultrastor_queuecommand: no free MSCP\n");
+ mscp_index = find_and_clear_bit_16(&config.mscp_free);
+
+ /* Has the command been aborted? */
+ if (xchgb(0xff, &config.aborted[mscp_index]) != 0)
+ {
+ status = DID_ABORT << 16;
+ goto aborted;
+ }
+
+ my_mscp = &config.mscp[mscp_index];
+
+#if 1
+ /* This way is faster. */
+ *(unsigned char *)my_mscp = OP_SCSI | (DTD_SCSI << 3);
+#else
+ my_mscp->opcode = OP_SCSI;
+ my_mscp->xdir = DTD_SCSI;
+ my_mscp->dcn = FALSE;
+#endif
+ /* Tape drives don't work properly if the cache is used. The SCSI
+ READ command for a tape doesn't have a block offset, and the adapter
+ incorrectly assumes that all reads from the tape read the same
+ blocks. Results will depend on read buffer size and other disk
+ activity.
+
+ ??? Which other device types should never use the cache? */
+ my_mscp->ca = SCpnt->device->type != TYPE_TAPE;
+ my_mscp->target_id = SCpnt->target;
+ my_mscp->ch_no = 0;
+ my_mscp->lun = SCpnt->lun;
+ if (SCpnt->use_sg) {
+ /* Set scatter/gather flag in SCSI command packet */
+ my_mscp->sg = TRUE;
+ build_sg_list(my_mscp, SCpnt);
+ } else {
+ /* Unset scatter/gather flag in SCSI command packet */
+ my_mscp->sg = FALSE;
+ my_mscp->transfer_data = (unsigned int)SCpnt->request_buffer;
+ my_mscp->transfer_data_length = SCpnt->request_bufflen;
+ }
+ my_mscp->command_link = 0; /*???*/
+ my_mscp->scsi_command_link_id = 0; /*???*/
+ my_mscp->length_of_sense_byte = sizeof SCpnt->sense_buffer;
+ my_mscp->length_of_scsi_cdbs = SCpnt->cmd_len;
+ memcpy(my_mscp->scsi_cdbs, SCpnt->cmnd, my_mscp->length_of_scsi_cdbs);
+ my_mscp->adapter_status = 0;
+ my_mscp->target_status = 0;
+ my_mscp->sense_data = (unsigned int)&SCpnt->sense_buffer;
+ my_mscp->done = done;
+ my_mscp->SCint = SCpnt;
+ SCpnt->host_scribble = (unsigned char *)my_mscp;
+
+ /* Find free OGM slot. On 24F, look for OGM status byte == 0.
+ On 14F and 34F, wait for local interrupt pending flag to clear. */
+
+ retry:
+ if (config.slot)
+ while (inb(config.ogm_address - 1) != 0 &&
+ config.aborted[mscp_index] == 0xff) barrier();
+
+ /* else??? */
+
+ while ((inb(LCL_DOORBELL_INTR(config.doorbell_address)) &
+ (config.slot ? 2 : 1))
+ && config.aborted[mscp_index] == 0xff) barrier();
+
+ /* To avoid race conditions, make the code to write to the adapter
+ atomic. This simplifies the abort code. */
+
+ save_flags(flags);
+ cli();
+
+ if (inb(LCL_DOORBELL_INTR(config.doorbell_address)) &
+ (config.slot ? 2 : 1))
+ {
+ restore_flags(flags);
+ goto retry;
+ }
+
+ status = xchgb(0, &config.aborted[mscp_index]);
+ if (status != 0xff) {
+ restore_flags(flags);
+
+#if ULTRASTOR_DEBUG & (UD_COMMAND | UD_ABORT)
+ printk("USx4F: queuecommand: aborted\n");
+#if ULTRASTOR_MAX_CMDS > 1
+ log_ultrastor_abort(&config, mscp_index);
+#endif
+#endif
+ status <<= 16;
+
+ aborted:
+ set_bit(mscp_index, &config.mscp_free);
+ /* If the driver queues commands, call the done proc here. Otherwise
+ return an error. */
+#if ULTRASTOR_MAX_CMDS > 1
+ SCpnt->result = status;
+ done(SCpnt);
+ return 0;
+#else
+ return status;
+#endif
+ }
+
+ /* Store pointer in OGM address bytes */
+ outl((unsigned int)my_mscp, config.ogm_address);
+
+ /* Issue OGM interrupt */
+ if (config.slot) {
+ /* Write OGM command register on 24F */
+ outb(1, config.ogm_address - 1);
+ outb(0x2, LCL_DOORBELL_INTR(config.doorbell_address));
+ } else {
+ outb(0x1, LCL_DOORBELL_INTR(config.doorbell_address));
+ }
+
+ restore_flags(flags);
+
+#if (ULTRASTOR_DEBUG & UD_COMMAND)
+ printk("USx4F: queuecommand: returning\n");
+#endif
+
+ return 0;
+}
+
+/* This code must deal with 2 cases:
+
+ 1. The command has not been written to the OGM. In this case, set
+ the abort flag and return.
+
+ 2. The command has been written to the OGM and is stuck somewhere in
+ the adapter.
+
+ 2a. On a 24F, ask the adapter to abort the command. It will interrupt
+ when it does.
+
+ 2b. Call the command's done procedure.
+
+ */
+
+int ultrastor_abort(Scsi_Cmnd *SCpnt)
+{
+#if ULTRASTOR_DEBUG & UD_ABORT
+ char out[108];
+ unsigned char icm_status = 0, ogm_status = 0;
+ unsigned int icm_addr = 0, ogm_addr = 0;
+#endif
+ unsigned int mscp_index;
+ unsigned char old_aborted;
+ void (*done)(Scsi_Cmnd *);
+
+ if(config.slot)
+ return SCSI_ABORT_SNOOZE; /* Do not attempt an abort for the 24f */
+
+ /* Simple consistency checking */
+ if(!SCpnt->host_scribble)
+ return SCSI_ABORT_NOT_RUNNING;
+
+ mscp_index = ((struct mscp *)SCpnt->host_scribble) - config.mscp;
+ if (mscp_index >= ULTRASTOR_MAX_CMDS)
+ panic("Ux4F aborting invalid MSCP");
+
+#if ULTRASTOR_DEBUG & UD_ABORT
+ if (config.slot)
+ {
+ int port0 = (config.slot << 12) | 0xc80;
+ int i;
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ strcpy(out, "OGM %d:%x ICM %d:%x ports: ");
+ for (i = 0; i < 16; i++)
+ {
+ unsigned char p = inb(port0 + i);
+ out[28 + i * 3] = "0123456789abcdef"[p >> 4];
+ out[29 + i * 3] = "0123456789abcdef"[p & 15];
+ out[30 + i * 3] = ' ';
+ }
+ out[28 + i * 3] = '\n';
+ out[29 + i * 3] = 0;
+ ogm_status = inb(port0 + 22);
+ ogm_addr = inl(port0 + 23);
+ icm_status = inb(port0 + 27);
+ icm_addr = inl(port0 + 28);
+ restore_flags(flags);
+ }
+
+ /* First check to see if an interrupt is pending. I suspect the SiS
+ chipset loses interrupts. (I also suspect is mangles data, but
+ one bug at a time... */
+ if (config.slot ? inb(config.icm_address - 1) == 2 :
+ (inb(SYS_DOORBELL_INTR(config.doorbell_address)) & 1))
+ {
+ unsigned long flags;
+ save_flags(flags);
+ printk("Ux4F: abort while completed command pending\n");
+ restore_flags(flags);
+ cli();
+ ultrastor_interrupt(0, NULL, NULL);
+ restore_flags(flags);
+ return SCSI_ABORT_SUCCESS; /* FIXME - is this correct? -ERY */
+ }
+#endif
+
+ old_aborted = xchgb(DID_ABORT, &config.aborted[mscp_index]);
+
+ /* aborted == 0xff is the signal that queuecommand has not yet sent
+ the command. It will notice the new abort flag and fail. */
+ if (old_aborted == 0xff)
+ return SCSI_ABORT_SUCCESS;
+
+ /* On 24F, send an abort MSCP request. The adapter will interrupt
+ and the interrupt handler will call done. */
+ if (config.slot && inb(config.ogm_address - 1) == 0)
+ {
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ outl((int)&config.mscp[mscp_index], config.ogm_address);
+ inb(0xc80); /* delay */
+ outb(0x80, config.ogm_address - 1);
+ outb(0x2, LCL_DOORBELL_INTR(config.doorbell_address));
+#if ULTRASTOR_DEBUG & UD_ABORT
+ log_ultrastor_abort(&config, mscp_index);
+ printk(out, ogm_status, ogm_addr, icm_status, icm_addr);
+#endif
+ restore_flags(flags);
+ return SCSI_ABORT_PENDING;
+ }
+
+#if ULTRASTOR_DEBUG & UD_ABORT
+ log_ultrastor_abort(&config, mscp_index);
+#endif
+
+ /* Can't request a graceful abort. Either this is not a 24F or
+ the OGM is busy. Don't free the command -- the adapter might
+ still be using it. Setting SCint = 0 causes the interrupt
+ handler to ignore the command. */
+
+ /* FIXME - devices that implement soft resets will still be running
+ the command after a bus reset. We would probably rather leave
+ the command in the queue. The upper level code will automatically
+ leave the command in the active state instead of requeueing it. ERY */
+
+#if ULTRASTOR_DEBUG & UD_ABORT
+ if (config.mscp[mscp_index].SCint != SCpnt)
+ printk("abort: command mismatch, %p != %p\n",
+ config.mscp[mscp_index].SCint, SCpnt);
+#endif
+ if (config.mscp[mscp_index].SCint == 0)
+ return SCSI_ABORT_NOT_RUNNING;
+
+ if (config.mscp[mscp_index].SCint != SCpnt) panic("Bad abort");
+ config.mscp[mscp_index].SCint = 0;
+ done = config.mscp[mscp_index].done;
+ config.mscp[mscp_index].done = 0;
+ SCpnt->result = DID_ABORT << 16;
+ /* I worry about reentrancy in scsi.c */
+ done(SCpnt);
+
+ /* Need to set a timeout here in case command never completes. */
+ return SCSI_ABORT_SUCCESS;
+}
+
+int ultrastor_reset(Scsi_Cmnd * SCpnt, unsigned int reset_flags)
+{
+ unsigned long flags;
+ register int i;
+#if (ULTRASTOR_DEBUG & UD_RESET)
+ printk("US14F: reset: called\n");
+#endif
+
+ if(config.slot)
+ return SCSI_RESET_PUNT; /* Do not attempt a reset for the 24f */
+
+ save_flags(flags);
+ cli();
+
+ /* Reset the adapter and SCSI bus. The SCSI bus reset can be
+ inhibited by clearing ultrastor_bus_reset before probe. */
+ outb(0xc0, LCL_DOORBELL_INTR(config.doorbell_address));
+ if (config.slot)
+ {
+ outb(0, config.ogm_address - 1);
+ outb(0, config.icm_address - 1);
+ }
+
+#if ULTRASTOR_MAX_CMDS == 1
+ if (config.mscp_busy && config.mscp->done && config.mscp->SCint)
+ {
+ config.mscp->SCint->result = DID_RESET << 16;
+ config.mscp->done(config.mscp->SCint);
+ }
+ config.mscp->SCint = 0;
+#else
+ for (i = 0; i < ULTRASTOR_MAX_CMDS; i++)
+ {
+ if (! (config.mscp_free & (1 << i)) &&
+ config.mscp[i].done && config.mscp[i].SCint)
+ {
+ config.mscp[i].SCint->result = DID_RESET << 16;
+ config.mscp[i].done(config.mscp[i].SCint);
+ config.mscp[i].done = 0;
+ }
+ config.mscp[i].SCint = 0;
+ }
+#endif
+
+ /* FIXME - if the device implements soft resets, then the command
+ will still be running. ERY */
+
+ memset((unsigned char *)config.aborted, 0, sizeof config.aborted);
+#if ULTRASTOR_MAX_CMDS == 1
+ config.mscp_busy = 0;
+#else
+ config.mscp_free = ~0;
+#endif
+
+ restore_flags(flags);
+ return SCSI_RESET_SUCCESS;
+
+}
+
+int ultrastor_biosparam(Disk * disk, kdev_t dev, int * dkinfo)
+{
+ int size = disk->capacity;
+ unsigned int s = config.heads * config.sectors;
+
+ dkinfo[0] = config.heads;
+ dkinfo[1] = config.sectors;
+ dkinfo[2] = size / s; /* Ignore partial cylinders */
+#if 0
+ if (dkinfo[2] > 1024)
+ dkinfo[2] = 1024;
+#endif
+ return 0;
+}
+
+static void ultrastor_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ unsigned int status;
+#if ULTRASTOR_MAX_CMDS > 1
+ unsigned int mscp_index;
+#endif
+ register struct mscp *mscp;
+ void (*done)(Scsi_Cmnd *);
+ Scsi_Cmnd *SCtmp;
+
+#if ULTRASTOR_MAX_CMDS == 1
+ mscp = &config.mscp[0];
+#else
+ mscp = (struct mscp *)inl(config.icm_address);
+ mscp_index = mscp - config.mscp;
+ if (mscp_index >= ULTRASTOR_MAX_CMDS) {
+ printk("Ux4F interrupt: bad MSCP address %x\n", (unsigned int) mscp);
+ /* A command has been lost. Reset and report an error
+ for all commands. */
+ ultrastor_reset(NULL, 0);
+ return;
+ }
+#endif
+
+ /* Clean ICM slot (set ICMINT bit to 0) */
+ if (config.slot) {
+ unsigned char icm_status = inb(config.icm_address - 1);
+#if ULTRASTOR_DEBUG & (UD_INTERRUPT|UD_ERROR|UD_ABORT)
+ if (icm_status != 1 && icm_status != 2)
+ printk("US24F: ICM status %x for MSCP %d (%x)\n", icm_status,
+ mscp_index, (unsigned int) mscp);
+#endif
+ /* The manual says clear interrupt then write 0 to ICM status.
+ This seems backwards, but I'll do it anyway. --jfc */
+ outb(2, SYS_DOORBELL_INTR(config.doorbell_address));
+ outb(0, config.icm_address - 1);
+ if (icm_status == 4) {
+ printk("UltraStor abort command failed\n");
+ return;
+ }
+ if (icm_status == 3) {
+ void (*done)(Scsi_Cmnd *) = mscp->done;
+ if (done) {
+ mscp->done = 0;
+ mscp->SCint->result = DID_ABORT << 16;
+ done(mscp->SCint);
+ }
+ return;
+ }
+ } else {
+ outb(1, SYS_DOORBELL_INTR(config.doorbell_address));
+ }
+
+ SCtmp = mscp->SCint;
+ mscp->SCint = NULL;
+
+ if (SCtmp == 0)
+ {
+#if ULTRASTOR_DEBUG & (UD_ABORT|UD_INTERRUPT)
+ printk("MSCP %d (%x): no command\n", mscp_index, (unsigned int) mscp);
+#endif
+#if ULTRASTOR_MAX_CMDS == 1
+ config.mscp_busy = FALSE;
+#else
+ set_bit(mscp_index, &config.mscp_free);
+#endif
+ config.aborted[mscp_index] = 0;
+ return;
+ }
+
+ /* Save done locally and zero before calling. This is needed as
+ once we call done, we may get another command queued before this
+ interrupt service routine can return. */
+ done = mscp->done;
+ mscp->done = 0;
+
+ /* Let the higher levels know that we're done */
+ switch (mscp->adapter_status)
+ {
+ case 0:
+ status = DID_OK << 16;
+ break;
+ case 0x01: /* invalid command */
+ case 0x02: /* invalid parameters */
+ case 0x03: /* invalid data list */
+ default:
+ status = DID_ERROR << 16;
+ break;
+ case 0x84: /* SCSI bus abort */
+ status = DID_ABORT << 16;
+ break;
+ case 0x91:
+ status = DID_TIME_OUT << 16;
+ break;
+ }
+
+ SCtmp->result = status | mscp->target_status;
+
+ SCtmp->host_scribble = 0;
+
+ /* Free up mscp block for next command */
+#if ULTRASTOR_MAX_CMDS == 1
+ config.mscp_busy = FALSE;
+#else
+ set_bit(mscp_index, &config.mscp_free);
+#endif
+
+#if ULTRASTOR_DEBUG & (UD_ABORT|UD_INTERRUPT)
+ if (config.aborted[mscp_index])
+ printk("Ux4 interrupt: MSCP %d (%x) aborted = %d\n",
+ mscp_index, (unsigned int) mscp, config.aborted[mscp_index]);
+#endif
+ config.aborted[mscp_index] = 0;
+
+ if (done)
+ done(SCtmp);
+ else
+ printk("US14F: interrupt: unexpected interrupt\n");
+
+ if (config.slot ? inb(config.icm_address - 1) :
+ (inb(SYS_DOORBELL_INTR(config.doorbell_address)) & 1))
+#if (ULTRASTOR_DEBUG & UD_MULTI_CMD)
+ printk("Ux4F: multiple commands completed\n");
+#else
+ ;
+#endif
+
+#if (ULTRASTOR_DEBUG & UD_INTERRUPT)
+ printk("USx4F: interrupt: returning\n");
+#endif
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = ULTRASTOR_14F;
+
+#include "scsi_module.c"
+#endif
diff --git a/linux/src/drivers/scsi/ultrastor.h b/linux/src/drivers/scsi/ultrastor.h
new file mode 100644
index 0000000..7a40acc
--- /dev/null
+++ b/linux/src/drivers/scsi/ultrastor.h
@@ -0,0 +1,102 @@
+/*
+ * ultrastor.c (C) 1991 David B. Gentzel
+ * Low-level scsi driver for UltraStor 14F
+ * by David B. Gentzel, Whitfield Software Services, Carnegie, PA
+ * (gentzel@nova.enet.dec.com)
+ * scatter/gather added by Scott Taylor (n217cg@tamuts.tamu.edu)
+ * 24F support by John F. Carr (jfc@athena.mit.edu)
+ * John's work modified by Caleb Epstein (cae@jpmorgan.com) and
+ * Eric Youngdale (eric@tantalus.nrl.navy.mil).
+ * Thanks to UltraStor for providing the necessary documentation
+ */
+
+#ifndef _ULTRASTOR_H
+#define _ULTRASTOR_H
+#include <linux/kdev_t.h>
+
+int ultrastor_detect(Scsi_Host_Template *);
+const char *ultrastor_info(struct Scsi_Host * shpnt);
+int ultrastor_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int ultrastor_abort(Scsi_Cmnd *);
+int ultrastor_reset(Scsi_Cmnd *, unsigned int);
+int ultrastor_biosparam(Disk *, kdev_t, int *);
+
+
+#define ULTRASTOR_14F_MAX_SG 16
+#define ULTRASTOR_24F_MAX_SG 33
+
+#define ULTRASTOR_MAX_CMDS_PER_LUN 5
+#define ULTRASTOR_MAX_CMDS 16
+
+#define ULTRASTOR_24F_PORT 0xC80
+
+
+#define ULTRASTOR_14F { NULL, NULL, /* Ptr for modules*/ \
+ NULL, \
+ NULL, \
+ "UltraStor 14F/24F/34F", \
+ ultrastor_detect, \
+ NULL, /* Release */ \
+ ultrastor_info, \
+ 0, \
+ ultrastor_queuecommand, \
+ ultrastor_abort, \
+ ultrastor_reset, \
+ 0, \
+ ultrastor_biosparam, \
+ ULTRASTOR_MAX_CMDS, \
+ 0, \
+ ULTRASTOR_14F_MAX_SG, \
+ ULTRASTOR_MAX_CMDS_PER_LUN, \
+ 0, \
+ 1, \
+ ENABLE_CLUSTERING }
+
+
+#ifdef ULTRASTOR_PRIVATE
+
+#define UD_ABORT 0x0001
+#define UD_COMMAND 0x0002
+#define UD_DETECT 0x0004
+#define UD_INTERRUPT 0x0008
+#define UD_RESET 0x0010
+#define UD_MULTI_CMD 0x0020
+#define UD_CSIR 0x0040
+#define UD_ERROR 0x0080
+
+/* #define PORT_OVERRIDE 0x330 */
+
+/* Values for the PRODUCT_ID ports for the 14F */
+#define US14F_PRODUCT_ID_0 0x56
+#define US14F_PRODUCT_ID_1 0x40 /* NOTE: Only upper nibble is used */
+
+#define US24F_PRODUCT_ID_0 0x56
+#define US24F_PRODUCT_ID_1 0x63
+#define US24F_PRODUCT_ID_2 0x02
+
+/* Subversion values */
+#define U14F 0
+#define U34F 1
+
+/* MSCP field values */
+
+/* Opcode */
+#define OP_HOST_ADAPTER 0x1
+#define OP_SCSI 0x2
+#define OP_RESET 0x4
+
+/* Date Transfer Direction */
+#define DTD_SCSI 0x0
+#define DTD_IN 0x1
+#define DTD_OUT 0x2
+#define DTD_NONE 0x3
+
+/* Host Adapter command subcodes */
+#define HA_CMD_INQUIRY 0x1
+#define HA_CMD_SELF_DIAG 0x2
+#define HA_CMD_READ_BUFF 0x3
+#define HA_CMD_WRITE_BUFF 0x4
+
+#endif
+
+#endif
diff --git a/linux/src/drivers/scsi/wd7000.c b/linux/src/drivers/scsi/wd7000.c
new file mode 100644
index 0000000..08d3ac3
--- /dev/null
+++ b/linux/src/drivers/scsi/wd7000.c
@@ -0,0 +1,1452 @@
+/* $Id: wd7000.c,v 1.1 1999/04/26 05:55:18 tb Exp $
+ * linux/drivers/scsi/wd7000.c
+ *
+ * Copyright (C) 1992 Thomas Wuensche
+ * closely related to the aha1542 driver from Tommy Thorn
+ * ( as close as different hardware allows on a lowlevel-driver :-) )
+ *
+ * Revised (and renamed) by John Boyd <boyd@cis.ohio-state.edu> to
+ * accommodate Eric Youngdale's modifications to scsi.c. Nov 1992.
+ *
+ * Additional changes to support scatter/gather. Dec. 1992. tw/jb
+ *
+ * No longer tries to reset SCSI bus at boot (it wasn't working anyway).
+ * Rewritten to support multiple host adapters.
+ * Miscellaneous cleanup.
+ * So far, still doesn't do reset or abort correctly, since I have no idea
+ * how to do them with this board (8^(. Jan 1994 jb
+ *
+ * This driver now supports both of the two standard configurations (per
+ * the 3.36 Owner's Manual, my latest reference) by the same method as
+ * before; namely, by looking for a BIOS signature. Thus, the location of
+ * the BIOS signature determines the board configuration. Until I have
+ * time to do something more flexible, users should stick to one of the
+ * following:
+ *
+ * Standard configuration for single-adapter systems:
+ * - BIOS at CE00h
+ * - I/O base address 350h
+ * - IRQ level 15
+ * - DMA channel 6
+ * Standard configuration for a second adapter in a system:
+ * - BIOS at C800h
+ * - I/O base address 330h
+ * - IRQ level 11
+ * - DMA channel 5
+ *
+ * Anyone who can recompile the kernel is welcome to add others as need
+ * arises, but unpredictable results may occur if there are conflicts.
+ * In any event, if there are multiple adapters in a system, they MUST
+ * use different I/O bases, IRQ levels, and DMA channels, since they will be
+ * indistinguishable (and in direct conflict) otherwise.
+ *
+ * As a point of information, the NO_OP command toggles the CMD_RDY bit
+ * of the status port, and this fact could be used as a test for the I/O
+ * base address (or more generally, board detection). There is an interrupt
+ * status port, so IRQ probing could also be done. I suppose the full
+ * DMA diagnostic could be used to detect the DMA channel being used. I
+ * haven't done any of this, though, because I think there's too much of
+ * a chance that such explorations could be destructive, if some other
+ * board's resources are used inadvertently. So, call me a wimp, but I
+ * don't want to try it. The only kind of exploration I trust is memory
+ * exploration, since it's more certain that reading memory won't be
+ * destructive.
+ *
+ * More to my liking would be a LILO boot command line specification, such
+ * as is used by the aha152x driver (and possibly others). I'll look into
+ * it, as I have time...
+ *
+ * I get mail occasionally from people who either are using or are
+ * considering using a WD7000 with Linux. There is a variety of
+ * nomenclature describing WD7000's. To the best of my knowledge, the
+ * following is a brief summary (from an old WD doc - I don't work for
+ * them or anything like that):
+ *
+ * WD7000-FASST2: This is a WD7000 board with the real-mode SST ROM BIOS
+ * installed. Last I heard, the BIOS was actually done by Columbia
+ * Data Products. The BIOS is only used by this driver (and thus
+ * by Linux) to identify the board; none of it can be executed under
+ * Linux.
+ *
+ * WD7000-ASC: This is the original adapter board, with or without BIOS.
+ * The board uses a WD33C93 or WD33C93A SBIC, which in turn is
+ * controlled by an onboard Z80 processor. The board interface
+ * visible to the host CPU is defined effectively by the Z80's
+ * firmware, and it is this firmware's revision level that is
+ * determined and reported by this driver. (The version of the
+ * on-board BIOS is of no interest whatsoever.) The host CPU has
+ * no access to the SBIC; hence the fact that it is a WD33C93 is
+ * also of no interest to this driver.
+ *
+ * WD7000-AX:
+ * WD7000-MX:
+ * WD7000-EX: These are newer versions of the WD7000-ASC. The -ASC is
+ * largely built from discrete components; these boards use more
+ * integration. The -AX is an ISA bus board (like the -ASC),
+ * the -MX is an MCA (i.e., PS/2) bus board), and the -EX is an
+ * EISA bus board.
+ *
+ * At the time of my documentation, the -?X boards were "future" products,
+ * and were not yet available. However, I vaguely recall that Thomas
+ * Wuensche had an -AX, so I believe at least it is supported by this
+ * driver. I have no personal knowledge of either -MX or -EX boards.
+ *
+ * P.S. Just recently, I've discovered (directly from WD and Future
+ * Domain) that all but the WD7000-EX have been out of production for
+ * two years now. FD has production rights to the 7000-EX, and are
+ * producing it under a new name, and with a new BIOS. If anyone has
+ * one of the FD boards, it would be nice to come up with a signature
+ * for it.
+ * J.B. Jan 1994.
+ *
+ *
+ * Revisions by Miroslav Zagorac <zaga@fly.cc.fer.hr>
+ *
+ * -- 08/24/1996. --------------------------------------------------------------
+ * Enhancement for wd7000_detect function has been made, so you don't have
+ * to enter BIOS ROM address in initialisation data (see struct Config).
+ * We cannot detect IRQ, DMA and I/O base address for now, so we have to
+ * enter them as arguments while wd_7000 is detected. If someone has IRQ,
+ * DMA or an I/O base address set to some other value, he can enter them in
+ * a configuration without any problem.
+ * Also I wrote a function wd7000_setup, so now you can enter WD-7000
+ * definition as kernel arguments, as in lilo.conf:
+ *
+ * append="wd7000=IRQ,DMA,IO"
+ *
+ * PS: If card BIOS ROM is disabled, function wd7000_detect now will recognize
+ * adapter, unlike the old one. Anyway, BIOS ROM from WD7000 adapter is
+ * useless for Linux. B^)
+ *
+ * -- 09/06/1996. --------------------------------------------------------------
+ * Auto detecting of an I/O base address from wd7000_detect function is
+ * removed, some little bugs too...
+ *
+ * Thanks to Roger Scott for driver debugging.
+ *
+ * -- 06/07/1997. --------------------------------------------------------------
+ * Added support for /proc file system (/proc/scsi/wd7000/[0...] files).
+ * Now, the driver can handle hard disks with capacity >1GB.
+ *
+ * -- 01/15/1998. --------------------------------------------------------------
+ * Added support for BUS_ON and BUS_OFF parameters in config line.
+ * Miscellaneous cleanups. Syntax of the append line is changed to:
+ *
+ * append="wd7000=IRQ,DMA,IO[,BUS_ON[,BUS_OFF]]"
+ *
+ * , where BUS_ON and BUS_OFF are time in nanoseconds.
+ *
+ * -- 03/01/1998. --------------------------------------------------------------
+ * The WD7000 driver now works on kernels' >= 2.1.x
+ *
+ * -- 06/11/1998. --------------------------------------------------------------
+ * Ugly init_scbs, alloc_scbs and free_scb functions are changed with
+ * scbs_init, scb_alloc and scb_free. Now, source code is identical on
+ * 2.0.xx and 2.1.xx kernels.
+ * WD7000 specific definitions are moved from this file to wd7000.h.
+ *
+ */
+#ifdef MODULE
+# include <linux/module.h>
+#endif
+
+#if (LINUX_VERSION_CODE >= 0x020100)
+# include <asm/spinlock.h>
+#endif
+
+#include <stdarg.h>
+#include <linux/kernel.h>
+#include <linux/head.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/malloc.h>
+#include <asm/system.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <linux/ioport.h>
+#include <linux/proc_fs.h>
+#include <linux/blk.h>
+#include <linux/version.h>
+#include <linux/stat.h>
+#include "scsi.h"
+#include "hosts.h"
+#include "sd.h"
+#include <scsi/scsicam.h>
+
+#undef WD7000_DEBUG /* general debug */
+#define WD7000_DEFINES /* This must be defined! */
+
+#include "wd7000.h"
+
+
+struct proc_dir_entry proc_scsi_wd7000 =
+{
+ PROC_SCSI_7000FASST,
+ 6,
+ "wd7000",
+ S_IFDIR | S_IRUGO | S_IXUGO,
+ 2
+};
+
+/*
+ * (linear) base address for ROM BIOS
+ */
+static const long wd7000_biosaddr[] = {
+ 0xc0000, 0xc2000, 0xc4000, 0xc6000, 0xc8000, 0xca000, 0xcc000, 0xce000,
+ 0xd0000, 0xd2000, 0xd4000, 0xd6000, 0xd8000, 0xda000, 0xdc000, 0xde000
+};
+#define NUM_ADDRS (sizeof (wd7000_biosaddr) / sizeof (long))
+
+static const ushort wd7000_iobase[] = {
+ 0x0300, 0x0308, 0x0310, 0x0318, 0x0320, 0x0328, 0x0330, 0x0338,
+ 0x0340, 0x0348, 0x0350, 0x0358, 0x0360, 0x0368, 0x0370, 0x0378,
+ 0x0380, 0x0388, 0x0390, 0x0398, 0x03a0, 0x03a8, 0x03b0, 0x03b8,
+ 0x03c0, 0x03c8, 0x03d0, 0x03d8, 0x03e0, 0x03e8, 0x03f0, 0x03f8
+};
+#define NUM_IOPORTS (sizeof (wd7000_iobase) / sizeof (ushort))
+
+static const short wd7000_irq[] = { 3, 4, 5, 7, 9, 10, 11, 12, 14, 15 };
+#define NUM_IRQS (sizeof (wd7000_irq) / sizeof (short))
+
+static const short wd7000_dma[] = { 5, 6, 7 };
+#define NUM_DMAS (sizeof (wd7000_dma) / sizeof (short))
+
+/*
+ * The following is set up by wd7000_detect, and used thereafter by
+ * wd7000_intr_handle to map the irq level to the corresponding Adapter.
+ * Note that if SA_INTERRUPT is not used, wd7000_intr_handle must be
+ * changed to pick up the IRQ level correctly.
+ */
+static struct Scsi_Host *wd7000_host[IRQS];
+
+/*
+ * Add here your configuration...
+ */
+static Config configs[] =
+{
+ { 15, 6, 0x350, BUS_ON, BUS_OFF }, /* defaults for single adapter */
+ { 11, 5, 0x320, BUS_ON, BUS_OFF }, /* defaults for second adapter */
+ { 7, 6, 0x350, BUS_ON, BUS_OFF }, /* My configuration (Zaga) */
+ { -1, -1, 0x0, BUS_ON, BUS_OFF } /* Empty slot */
+};
+#define NUM_CONFIGS (sizeof(configs)/sizeof(Config))
+
+static const Signature signatures[] =
+{
+ {"SSTBIOS", 0x0000d, 7} /* "SSTBIOS" @ offset 0x0000d */
+};
+#define NUM_SIGNATURES (sizeof(signatures)/sizeof(Signature))
+
+/*
+ * Driver SCB structure pool.
+ *
+ * The SCBs declared here are shared by all host adapters; hence, this
+ * structure is not part of the Adapter structure.
+ */
+static Scb scbs[MAX_SCBS];
+
+
+/*
+ * END of data/declarations - code follows.
+ */
+static void setup_error (char *mesg, int *ints)
+{
+ if (ints[0] == 3)
+ printk ("wd7000_setup: \"wd7000=%d,%d,0x%x\" -> %s\n",
+ ints[1], ints[2], ints[3], mesg);
+ else if (ints[0] == 4)
+ printk ("wd7000_setup: \"wd7000=%d,%d,0x%x,%d\" -> %s\n",
+ ints[1], ints[2], ints[3], ints[4], mesg);
+ else
+ printk ("wd7000_setup: \"wd7000=%d,%d,0x%x,%d,%d\" -> %s\n",
+ ints[1], ints[2], ints[3], ints[4], ints[5], mesg);
+}
+
+
+/*
+ * Note: You can now set these options from the kernel's "command line".
+ * The syntax is:
+ *
+ * wd7000=<IRQ>,<DMA>,<IO>[,<BUS_ON>[,<BUS_OFF>]]
+ *
+ * , where BUS_ON and BUS_OFF are in nanoseconds. BIOS default values
+ * are 8000ns for BUS_ON and 1875ns for BUS_OFF.
+ *
+ * eg:
+ * wd7000=7,6,0x350
+ *
+ * will configure the driver for a WD-7000 controller
+ * using IRQ 15 with a DMA channel 6, at IO base address 0x350.
+ */
+void wd7000_setup (char *str, int *ints)
+{
+ static short wd7000_card_num = 0;
+ short i, j;
+
+ if (wd7000_card_num >= NUM_CONFIGS) {
+ printk ("%s: Too many \"wd7000=\" configurations in "
+ "command line!\n", __FUNCTION__);
+ return;
+ }
+
+ if ((ints[0] < 3) || (ints[0] > 5))
+ printk ("%s: Error in command line! "
+ "Usage: wd7000=<IRQ>,<DMA>,<IO>[,<BUS_ON>[,<BUS_OFF>]]\n",
+ __FUNCTION__);
+ else {
+ for (i = 0; i < NUM_IRQS; i++)
+ if (ints[1] == wd7000_irq[i])
+ break;
+
+ if (i == NUM_IRQS) {
+ setup_error ("invalid IRQ.", ints);
+ return;
+ }
+ else
+ configs[wd7000_card_num].irq = ints[1];
+
+ for (i = 0; i < NUM_DMAS; i++)
+ if (ints[2] == wd7000_dma[i])
+ break;
+
+ if (i == NUM_DMAS) {
+ setup_error ("invalid DMA channel.", ints);
+ return;
+ }
+ else
+ configs[wd7000_card_num].dma = ints[2];
+
+ for (i = 0; i < NUM_IOPORTS; i++)
+ if (ints[3] == wd7000_iobase[i])
+ break;
+
+ if (i == NUM_IOPORTS) {
+ setup_error ("invalid I/O base address.", ints);
+ return;
+ }
+ else
+ configs[wd7000_card_num].iobase = ints[3];
+
+ if (ints[0] > 3) {
+ if ((ints[4] < 500) || (ints[4] > 31875)) {
+ setup_error ("BUS_ON value is out of range (500 to 31875 nanoseconds)!",
+ ints);
+ configs[wd7000_card_num].bus_on = BUS_ON;
+ }
+ else
+ configs[wd7000_card_num].bus_on = ints[4] / 125;
+ }
+ else
+ configs[wd7000_card_num].bus_on = BUS_ON;
+
+ if (ints[0] > 4) {
+ if ((ints[5] < 500) || (ints[5] > 31875)) {
+ setup_error ("BUS_OFF value is out of range (500 to 31875 nanoseconds)!",
+ ints);
+ configs[wd7000_card_num].bus_off = BUS_OFF;
+ }
+ else
+ configs[wd7000_card_num].bus_off = ints[5] / 125;
+ }
+ else
+ configs[wd7000_card_num].bus_off = BUS_OFF;
+
+ if (wd7000_card_num) {
+ for (i = 0; i < (wd7000_card_num - 1); i++)
+ for (j = i + 1; j < wd7000_card_num; j++)
+ if (configs[i].irq == configs[j].irq) {
+ setup_error ("duplicated IRQ!", ints);
+ return;
+ }
+ else if (configs[i].dma == configs[j].dma) {
+ setup_error ("duplicated DMA channel!", ints);
+ return;
+ }
+ else if (configs[i].iobase == configs[j].iobase) {
+ setup_error ("duplicated I/O base address!", ints);
+ return;
+ }
+ }
+
+#ifdef WD7000_DEBUG
+ printk ("%s: IRQ=%d, DMA=%d, I/O=0x%x, BUS_ON=%dns, BUS_OFF=%dns\n",
+ __FUNCTION__,
+ configs[wd7000_card_num].irq,
+ configs[wd7000_card_num].dma,
+ configs[wd7000_card_num].iobase,
+ configs[wd7000_card_num].bus_on * 125,
+ configs[wd7000_card_num].bus_off * 125);
+#endif
+
+ wd7000_card_num++;
+ }
+}
+
+
+/*
+ * Since they're used a lot, I've redone the following from the macros
+ * formerly in wd7000.h, hopefully to speed them up by getting rid of
+ * all the shifting (it may not matter; GCC might have done as well anyway).
+ *
+ * xany2scsi and xscsi2int were not being used, and are no longer defined.
+ * (They were simply 4-byte versions of these routines).
+ */
+static inline void any2scsi (unchar *scsi, int any)
+{
+ *scsi++ = ((i_u) any).u[2];
+ *scsi++ = ((i_u) any).u[1];
+ *scsi = ((i_u) any).u[0];
+}
+
+
+static inline int scsi2int (unchar *scsi)
+{
+ i_u result;
+
+ result.i = 0; /* clears unused bytes */
+ result.u[2] = *scsi++;
+ result.u[1] = *scsi++;
+ result.u[0] = *scsi;
+
+ return (result.i);
+}
+
+
+static inline void wd7000_enable_intr (Adapter *host)
+{
+ host->control |= INT_EN;
+ outb (host->control, host->iobase + ASC_CONTROL);
+}
+
+
+static inline void wd7000_enable_dma (Adapter *host)
+{
+ host->control |= DMA_EN;
+ outb (host->control, host->iobase + ASC_CONTROL);
+ set_dma_mode (host->dma, DMA_MODE_CASCADE);
+ enable_dma (host->dma);
+}
+
+
+static inline short WAIT (uint port, uint mask, uint allof, uint noneof)
+{
+ register uint WAITbits;
+ register ulong WAITtimeout = jiffies + WAITnexttimeout;
+
+ while (jiffies <= WAITtimeout) {
+ WAITbits = inb (port) & mask;
+
+ if (((WAITbits & allof) == allof) && ((WAITbits & noneof) == 0))
+ return (0);
+ }
+
+ return (1);
+}
+
+
+static inline void delay (uint how_long)
+{
+ register ulong time = jiffies + how_long;
+
+ while (jiffies < time);
+}
+
+
+static inline int wd7000_command_out (Adapter *host, unchar *cmd, int len)
+{
+ if (! WAIT (host->iobase + ASC_STAT, ASC_STATMASK, CMD_RDY, 0)) {
+ for ( ; len--; cmd++)
+ do {
+ outb (*cmd, host->iobase + ASC_COMMAND);
+ WAIT (host->iobase + ASC_STAT, ASC_STATMASK, CMD_RDY, 0);
+ } while (inb (host->iobase + ASC_STAT) & CMD_REJ);
+
+ return (1);
+ }
+
+ printk ("%s: WAIT failed (%d)\n", __FUNCTION__, len + 1);
+
+ return (0);
+}
+
+
+static inline void scbs_init (void)
+{
+ short i;
+
+ for (i = 0; i < MAX_SCBS; i++)
+ memset ((void *) &(scbs[i]), 0, sizeof (Scb));
+}
+
+
+static inline Scb *scb_alloc (void)
+{
+ Scb *scb = NULL;
+ ulong flags;
+ short i;
+#ifdef WD7000_DEBUG
+ short free_scbs = 0;
+#endif
+
+ save_flags (flags);
+ cli ();
+
+ for (i = 0; i < MAX_SCBS; i++)
+ if (! scbs[i].used) {
+ scbs[i].used = 1;
+ scb = &(scbs[i]);
+
+ break;
+ }
+
+#ifdef WD7000_DEBUG
+ for (i = 0; i < MAX_SCBS; i++)
+ free_scbs += scbs[i].used ? 0 : 1;
+
+ printk ("wd7000_%s: allocating scb (0x%08x), %d scbs free\n",
+ __FUNCTION__, (int) scb, free_scbs);
+#endif
+
+ restore_flags (flags);
+
+ return (scb);
+}
+
+
+static inline void scb_free (Scb *scb)
+{
+ short i;
+ ulong flags;
+
+ save_flags (flags);
+ cli ();
+
+ for (i = 0; i < MAX_SCBS; i++)
+ if (&(scbs[i]) == scb) {
+ memset ((void *) &(scbs[i]), 0, sizeof (Scb));
+
+ break;
+ }
+
+ if (i == MAX_SCBS)
+ printk ("wd7000_%s: trying to free alien scb (0x%08x)...\n",
+ __FUNCTION__, (int) scb);
+#ifdef WD7000_DEBUG
+ else
+ printk ("wd7000_%s: freeing scb (0x%08x)\n", __FUNCTION__, (int) scb);
+#endif
+
+ restore_flags (flags);
+}
+
+
+static int mail_out (Adapter *host, Scb *scbptr)
+/*
+ * Note: this can also be used for ICBs; just cast to the parm type.
+ */
+{
+ register int i, ogmb;
+ ulong flags;
+ unchar start_ogmb;
+ Mailbox *ogmbs = host->mb.ogmb;
+ int *next_ogmb = &(host->next_ogmb);
+
+#ifdef WD7000_DEBUG
+ printk ("wd7000_%s: 0x%08x", __FUNCTION__, (int) scbptr);
+#endif
+
+ /* We first look for a free outgoing mailbox */
+ save_flags (flags);
+ cli ();
+
+ ogmb = *next_ogmb;
+ for (i = 0; i < OGMB_CNT; i++) {
+ if (ogmbs[ogmb].status == 0) {
+#ifdef WD7000_DEBUG
+ printk (" using OGMB 0x%x", ogmb);
+#endif
+ ogmbs[ogmb].status = 1;
+ any2scsi ((unchar *) ogmbs[ogmb].scbptr, (int) scbptr);
+
+ *next_ogmb = (ogmb + 1) % OGMB_CNT;
+ break;
+ }
+ else
+ ogmb = (ogmb + 1) % OGMB_CNT;
+ }
+
+ restore_flags (flags);
+
+#ifdef WD7000_DEBUG
+ printk (", scb is 0x%08x", (int) scbptr);
+#endif
+
+ if (i >= OGMB_CNT) {
+ /*
+ * Alternatively, we might issue the "interrupt on free OGMB",
+ * and sleep, but it must be ensured that it isn't the init
+ * task running. Instead, this version assumes that the caller
+ * will be persistent, and try again. Since it's the adapter
+ * that marks OGMB's free, waiting even with interrupts off
+ * should work, since they are freed very quickly in most cases.
+ */
+#ifdef WD7000_DEBUG
+ printk (", no free OGMBs.\n");
+#endif
+ return (0);
+ }
+
+ wd7000_enable_intr (host);
+
+ start_ogmb = START_OGMB | ogmb;
+ wd7000_command_out (host, &start_ogmb, 1);
+
+#ifdef WD7000_DEBUG
+ printk (", awaiting interrupt.\n");
+#endif
+
+ return (1);
+}
+
+
+int make_code (uint hosterr, uint scsierr)
+{
+#ifdef WD7000_DEBUG
+ int in_error = hosterr;
+#endif
+
+ switch ((hosterr >> 8) & 0xff) {
+ case 0: /* Reserved */
+ hosterr = DID_ERROR;
+ break;
+
+ case 1: /* Command Complete, no errors */
+ hosterr = DID_OK;
+ break;
+
+ case 2: /* Command complete, error logged in scb status (scsierr) */
+ hosterr = DID_OK;
+ break;
+
+ case 4: /* Command failed to complete - timeout */
+ hosterr = DID_TIME_OUT;
+ break;
+
+ case 5: /* Command terminated; Bus reset by external device */
+ hosterr = DID_RESET;
+ break;
+
+ case 6: /* Unexpected Command Received w/ host as target */
+ hosterr = DID_BAD_TARGET;
+ break;
+
+ case 80: /* Unexpected Reselection */
+ case 81: /* Unexpected Selection */
+ hosterr = DID_BAD_INTR;
+ break;
+
+ case 82: /* Abort Command Message */
+ hosterr = DID_ABORT;
+ break;
+
+ case 83: /* SCSI Bus Software Reset */
+ case 84: /* SCSI Bus Hardware Reset */
+ hosterr = DID_RESET;
+ break;
+
+ default: /* Reserved */
+ hosterr = DID_ERROR;
+ }
+
+#ifdef WD7000_DEBUG
+ if (scsierr || hosterr)
+ printk ("\nSCSI command error: SCSI 0x%02x host 0x%04x return %d\n",
+ scsierr, in_error, hosterr);
+#endif
+
+ return (scsierr | (hosterr << 16));
+}
+
+
+static void wd7000_scsi_done (Scsi_Cmnd *SCpnt)
+{
+#ifdef WD7000_DEBUG
+ printk ("%s: 0x%08x\n", __FUNCTION__, (int) SCpnt);
+#endif
+
+ SCpnt->SCp.phase = 0;
+}
+
+
+static inline void wd7000_intr_ack (Adapter *host)
+{
+ outb (0, host->iobase + ASC_INTR_ACK);
+}
+
+
+void wd7000_intr_handle (int irq, void *dev_id, struct pt_regs *regs)
+{
+ register int flag, icmb, errstatus, icmb_status;
+ register int host_error, scsi_error;
+ register Scb *scb; /* for SCSI commands */
+ register IcbAny *icb; /* for host commands */
+ register Scsi_Cmnd *SCpnt;
+ Adapter *host = (Adapter *) wd7000_host[irq - IRQ_MIN]->hostdata; /* This MUST be set!!! */
+ Mailbox *icmbs = host->mb.icmb;
+
+ host->int_counter++;
+
+#ifdef WD7000_DEBUG
+ printk ("%s: irq = %d, host = 0x%08x\n", __FUNCTION__, irq, (int) host);
+#endif
+
+ flag = inb (host->iobase + ASC_INTR_STAT);
+
+#ifdef WD7000_DEBUG
+ printk ("%s: intr stat = 0x%02x\n", __FUNCTION__, flag);
+#endif
+
+ if (! (inb (host->iobase + ASC_STAT) & INT_IM)) {
+ /* NB: these are _very_ possible if IRQ 15 is being used, since
+ * it's the "garbage collector" on the 2nd 8259 PIC. Specifically,
+ * any interrupt signal into the 8259 which can't be identified
+ * comes out as 7 from the 8259, which is 15 to the host. Thus, it
+ * is a good thing the WD7000 has an interrupt status port, so we
+ * can sort these out. Otherwise, electrical noise and other such
+ * problems would be indistinguishable from valid interrupts...
+ */
+#ifdef WD7000_DEBUG
+ printk ("%s: phantom interrupt...\n", __FUNCTION__);
+#endif
+ wd7000_intr_ack (host);
+ return;
+ }
+
+ if (flag & MB_INTR) {
+ /* The interrupt is for a mailbox */
+ if (! (flag & IMB_INTR)) {
+#ifdef WD7000_DEBUG
+ printk ("%s: free outgoing mailbox\n", __FUNCTION__);
+#endif
+ /*
+ * If sleep_on() and the "interrupt on free OGMB" command are
+ * used in mail_out(), wake_up() should correspondingly be called
+ * here. For now, we don't need to do anything special.
+ */
+ wd7000_intr_ack (host);
+ return;
+ }
+ else {
+ /* The interrupt is for an incoming mailbox */
+ icmb = flag & MB_MASK;
+ icmb_status = icmbs[icmb].status;
+
+ if (icmb_status & 0x80) { /* unsolicited - result in ICMB */
+#ifdef WD7000_DEBUG
+ printk ("%s: unsolicited interrupt 0x%02x\n",
+ __FUNCTION__, icmb_status);
+#endif
+ wd7000_intr_ack (host);
+ return;
+ }
+
+ /* Aaaargh! (Zaga) */
+ scb = (Scb *) bus_to_virt (scsi2int ((unchar *) icmbs[icmb].scbptr));
+
+ icmbs[icmb].status = 0;
+ if (!(scb->op & ICB_OP_MASK)) { /* an SCB is done */
+ SCpnt = scb->SCpnt;
+ if (--(SCpnt->SCp.phase) <= 0) { /* all scbs are done */
+ host_error = scb->vue | (icmb_status << 8);
+ scsi_error = scb->status;
+ errstatus = make_code (host_error, scsi_error);
+ SCpnt->result = errstatus;
+
+ scb_free (scb);
+
+ SCpnt->scsi_done (SCpnt);
+ }
+ }
+ else { /* an ICB is done */
+ icb = (IcbAny *) scb;
+ icb->status = icmb_status;
+ icb->phase = 0;
+ }
+ } /* incoming mailbox */
+ }
+
+ wd7000_intr_ack (host);
+
+#ifdef WD7000_DEBUG
+ printk ("%s: return from interrupt handler\n", __FUNCTION__);
+#endif
+}
+
+
+void do_wd7000_intr_handle (int irq, void *dev_id, struct pt_regs *regs)
+{
+#if (LINUX_VERSION_CODE >= 0x020100)
+ ulong flags;
+
+ spin_lock_irqsave (&io_request_lock, flags);
+#endif
+
+ wd7000_intr_handle (irq, dev_id, regs);
+
+#if (LINUX_VERSION_CODE >= 0x020100)
+ spin_unlock_irqrestore (&io_request_lock, flags);
+#endif
+}
+
+
+int wd7000_queuecommand (Scsi_Cmnd *SCpnt, void (*done) (Scsi_Cmnd *))
+{
+ register Scb *scb;
+ register Sgb *sgb;
+ register Adapter *host = (Adapter *) SCpnt->host->hostdata;
+
+ if ((scb = scb_alloc ()) == NULL) {
+ printk ("%s: Cannot allocate SCB!\n", __FUNCTION__);
+ return (0);
+ }
+
+ SCpnt->scsi_done = done;
+ SCpnt->SCp.phase = 1;
+ SCpnt->host_scribble = (unchar *) scb;
+ scb->idlun = ((SCpnt->target << 5) & 0xe0) | (SCpnt->lun & 7);
+ scb->direc = 0x40; /* Disable direction check */
+ scb->SCpnt = SCpnt; /* so we can find stuff later */
+ scb->host = host;
+ memcpy (scb->cdb, SCpnt->cmnd, SCpnt->cmd_len);
+
+ if (SCpnt->use_sg) {
+ struct scatterlist *sg = (struct scatterlist *) SCpnt->request_buffer;
+ uint i;
+
+ if (SCpnt->host->sg_tablesize == SG_NONE)
+ panic ("%s: scatter/gather not supported.\n", __FUNCTION__);
+#ifdef WD7000_DEBUG
+ else
+ printk ("Using scatter/gather with %d elements.\n", SCpnt->use_sg);
+#endif
+
+ sgb = scb->sgb;
+ scb->op = 1;
+ any2scsi (scb->dataptr, (int) sgb);
+ any2scsi (scb->maxlen, SCpnt->use_sg * sizeof (Sgb));
+
+ for (i = 0; i < SCpnt->use_sg; i++) {
+ any2scsi (sgb[i].ptr, (int) sg[i].address);
+ any2scsi (sgb[i].len, sg[i].length);
+ }
+ }
+ else {
+ scb->op = 0;
+ any2scsi (scb->dataptr, (int) SCpnt->request_buffer);
+ any2scsi (scb->maxlen, SCpnt->request_bufflen);
+ }
+
+ while (! mail_out (host, scb)); /* keep trying */
+
+ return (1);
+}
+
+
+int wd7000_command (Scsi_Cmnd *SCpnt)
+{
+ if (! wd7000_queuecommand (SCpnt, wd7000_scsi_done))
+ return (-1);
+
+ while (SCpnt->SCp.phase > 0)
+ barrier (); /* phase counts scbs down to 0 */
+
+ return (SCpnt->result);
+}
+
+
+int wd7000_diagnostics (Adapter *host, int code)
+{
+ static IcbDiag icb = { ICB_OP_DIAGNOSTICS };
+ static unchar buf[256];
+ ulong timeout;
+
+ /*
+ * This routine is only called at init, so there should be OGMBs
+ * available. I'm assuming so here. If this is going to
+ * fail, I can just let the timeout catch the failure.
+ */
+ icb.type = code;
+ any2scsi (icb.len, sizeof (buf));
+ any2scsi (icb.ptr, (int) &buf);
+ icb.phase = 1;
+
+ mail_out (host, (Scb *) &icb);
+
+ /*
+ * Wait up to 2 seconds for completion.
+ */
+ for (timeout = jiffies + WAITnexttimeout; icb.phase && (jiffies < timeout); )
+ barrier ();
+
+ if (icb.phase) {
+ printk ("%s: timed out.\n", __FUNCTION__);
+ return (0);
+ }
+
+ if (make_code (icb.vue | (icb.status << 8), 0)) {
+ printk ("%s: failed (0x%02x,0x%02x)\n", __FUNCTION__, icb.vue, icb.status);
+ return (0);
+ }
+
+ return (1);
+}
+
+
+int wd7000_init (Adapter *host)
+{
+ InitCmd init_cmd =
+ {
+ INITIALIZATION,
+ 7,
+ host->bus_on,
+ host->bus_off,
+ 0,
+ { 0, 0, 0 },
+ OGMB_CNT,
+ ICMB_CNT
+ };
+ int diag;
+
+ /*
+ * Reset the adapter - only. The SCSI bus was initialized at power-up,
+ * and we need to do this just so we control the mailboxes, etc.
+ */
+ outb (ASC_RES, host->iobase + ASC_CONTROL);
+ delay (1); /* reset pulse: this is 10ms, only need 25us */
+ outb (0, host->iobase + ASC_CONTROL);
+ host->control = 0; /* this must always shadow ASC_CONTROL */
+
+ if (WAIT (host->iobase + ASC_STAT, ASC_STATMASK, CMD_RDY, 0)) {
+ printk ("%s: WAIT timed out.\n", __FUNCTION__);
+ return (0); /* 0 = not ok */
+ }
+
+ if ((diag = inb (host->iobase + ASC_INTR_STAT)) != 1) {
+ printk ("%s: ", __FUNCTION__);
+
+ switch (diag) {
+ case 2: printk ("RAM failure.\n");
+ break;
+
+ case 3: printk ("FIFO R/W failed\n");
+ break;
+
+ case 4: printk ("SBIC register R/W failed\n");
+ break;
+
+ case 5: printk ("Initialization D-FF failed.\n");
+ break;
+
+ case 6: printk ("Host IRQ D-FF failed.\n");
+ break;
+
+ case 7: printk ("ROM checksum error.\n");
+ break;
+
+ default: printk ("diagnostic code 0x%02Xh received.\n", diag);
+ }
+
+ return (0);
+ }
+
+ /* Clear mailboxes */
+ memset (&(host->mb), 0, sizeof (host->mb));
+
+ /* Execute init command */
+ any2scsi ((unchar *) &(init_cmd.mailboxes), (int) &(host->mb));
+
+ if (! wd7000_command_out (host, (unchar *) &init_cmd, sizeof (init_cmd))) {
+ printk ("%s: adapter initialization failed.\n", __FUNCTION__);
+ return (0);
+ }
+
+ if (WAIT (host->iobase + ASC_STAT, ASC_STATMASK, ASC_INIT, 0)) {
+ printk ("%s: WAIT timed out.\n", __FUNCTION__);
+ return (0);
+ }
+
+ if (request_irq (host->irq, do_wd7000_intr_handle, SA_INTERRUPT, "wd7000", NULL)) {
+ printk ("%s: can't get IRQ %d.\n", __FUNCTION__, host->irq);
+ return (0);
+ }
+
+ if (request_dma (host->dma, "wd7000")) {
+ printk ("%s: can't get DMA channel %d.\n", __FUNCTION__, host->dma);
+ free_irq (host->irq, NULL);
+ return (0);
+ }
+
+ wd7000_enable_dma (host);
+ wd7000_enable_intr (host);
+
+ if (! wd7000_diagnostics (host, ICB_DIAG_FULL)) {
+ free_dma (host->dma);
+ free_irq (host->irq, NULL);
+ return (0);
+ }
+
+ return (1);
+}
+
+
+void wd7000_revision (Adapter *host)
+{
+ static IcbRevLvl icb = { ICB_OP_GET_REVISION };
+
+ /*
+ * Like diagnostics, this is only done at init time, in fact, from
+ * wd7000_detect, so there should be OGMBs available. If it fails,
+ * the only damage will be that the revision will show up as 0.0,
+ * which in turn means that scatter/gather will be disabled.
+ */
+ icb.phase = 1;
+ mail_out (host, (Scb *) &icb);
+
+ while (icb.phase)
+ barrier (); /* wait for completion */
+
+ host->rev1 = icb.primary;
+ host->rev2 = icb.secondary;
+}
+
+
+#undef SPRINTF
+#define SPRINTF(args...) { if (pos < (buffer + length)) pos += sprintf (pos, ## args); }
+
+int wd7000_set_info (char *buffer, int length, struct Scsi_Host *host)
+{
+ ulong flags;
+
+ save_flags (flags);
+ cli ();
+
+#ifdef WD7000_DEBUG
+ printk ("Buffer = <%.*s>, length = %d\n", length, buffer, length);
+#endif
+
+ /*
+ * Currently this is a no-op
+ */
+ printk ("Sorry, this function is currently out of order...\n");
+
+ restore_flags (flags);
+
+ return (length);
+}
+
+
+int wd7000_proc_info (char *buffer, char **start, off_t offset, int length, int hostno, int inout)
+{
+ struct Scsi_Host *host = NULL;
+ Scsi_Device *scd;
+ Adapter *adapter;
+ ulong flags;
+ char *pos = buffer;
+ short i;
+
+#ifdef WD7000_DEBUG
+ Mailbox *ogmbs, *icmbs;
+ short count;
+#endif
+
+ /*
+ * Find the specified host board.
+ */
+ for (i = 0; i < IRQS; i++)
+ if (wd7000_host[i] && (wd7000_host[i]->host_no == hostno)) {
+ host = wd7000_host[i];
+
+ break;
+ }
+
+ /*
+ * Host not found!
+ */
+ if (! host)
+ return (-ESRCH);
+
+ /*
+ * Has data been written to the file ?
+ */
+ if (inout)
+ return (wd7000_set_info (buffer, length, host));
+
+ adapter = (Adapter *) host->hostdata;
+
+ save_flags (flags);
+ cli ();
+
+ SPRINTF ("Host scsi%d: Western Digital WD-7000 (rev %d.%d)\n", hostno, adapter->rev1, adapter->rev2);
+ SPRINTF (" IO base: 0x%x\n", adapter->iobase);
+ SPRINTF (" IRQ: %d\n", adapter->irq);
+ SPRINTF (" DMA channel: %d\n", adapter->dma);
+ SPRINTF (" Interrupts: %d\n", adapter->int_counter);
+ SPRINTF (" BUS_ON time: %d nanoseconds\n", adapter->bus_on * 125);
+ SPRINTF (" BUS_OFF time: %d nanoseconds\n", adapter->bus_off * 125);
+
+#ifdef WD7000_DEBUG
+ ogmbs = adapter->mb.ogmb;
+ icmbs = adapter->mb.icmb;
+
+ SPRINTF ("\nControl port value: 0x%x\n", adapter->control);
+ SPRINTF ("Incoming mailbox:\n");
+ SPRINTF (" size: %d\n", ICMB_CNT);
+ SPRINTF (" queued messages: ");
+
+ for (i = count = 0; i < ICMB_CNT; i++)
+ if (icmbs[i].status) {
+ count++;
+ SPRINTF ("0x%x ", i);
+ }
+
+ SPRINTF (count ? "\n" : "none\n");
+
+ SPRINTF ("Outgoing mailbox:\n");
+ SPRINTF (" size: %d\n", OGMB_CNT);
+ SPRINTF (" next message: 0x%x\n", adapter->next_ogmb);
+ SPRINTF (" queued messages: ");
+
+ for (i = count = 0; i < OGMB_CNT; i++)
+ if (ogmbs[i].status) {
+ count++;
+ SPRINTF ("0x%x ", i);
+ }
+
+ SPRINTF (count ? "\n" : "none\n");
+#endif
+
+ /*
+ * Display driver information for each device attached to the board.
+ */
+#if (LINUX_VERSION_CODE >= 0x020100)
+ scd = host->host_queue;
+#else
+ scd = scsi_devices;
+#endif
+
+ SPRINTF ("\nAttached devices: %s\n", scd ? "" : "none");
+
+ for ( ; scd; scd = scd->next)
+ if (scd->host->host_no == hostno) {
+ SPRINTF (" [Channel: %02d, Id: %02d, Lun: %02d] ",
+ scd->channel, scd->id, scd->lun);
+ SPRINTF ("%s ", (scd->type < MAX_SCSI_DEVICE_CODE) ?
+ scsi_device_types[(short) scd->type] : "Unknown device");
+
+ for (i = 0; (i < 8) && (scd->vendor[i] >= 0x20); i++)
+ SPRINTF ("%c", scd->vendor[i]);
+ SPRINTF (" ");
+
+ for (i = 0; (i < 16) && (scd->model[i] >= 0x20); i++)
+ SPRINTF ("%c", scd->model[i]);
+ SPRINTF ("\n");
+ }
+
+ SPRINTF ("\n");
+
+ restore_flags (flags);
+
+ /*
+ * Calculate start of next buffer, and return value.
+ */
+ *start = buffer + offset;
+
+ if ((pos - buffer) < offset)
+ return (0);
+ else if ((pos - buffer - offset) < length)
+ return (pos - buffer - offset);
+ else
+ return (length);
+}
+
+
+/*
+ * Returns the number of adapters this driver is supporting.
+ *
+ * The source for hosts.c says to wait to call scsi_register until 100%
+ * sure about an adapter. We need to do it a little sooner here; we
+ * need the storage set up by scsi_register before wd7000_init, and
+ * changing the location of an Adapter structure is more trouble than
+ * calling scsi_unregister.
+ *
+ */
+int wd7000_detect (Scsi_Host_Template *tpnt)
+{
+ short present = 0, biosaddr_ptr, sig_ptr, i, pass;
+ short biosptr[NUM_CONFIGS];
+ uint iobase;
+ Adapter *host = NULL;
+ struct Scsi_Host *sh;
+
+#ifdef WD7000_DEBUG
+ printk ("%s: started\n", __FUNCTION__);
+#endif
+
+ /*
+ * Set up SCB free list, which is shared by all adapters
+ */
+ scbs_init ();
+
+ for (i = 0; i < IRQS; wd7000_host[i++] = NULL);
+ for (i = 0; i < NUM_CONFIGS; biosptr[i++] = -1);
+
+ tpnt->proc_dir = &proc_scsi_wd7000;
+ tpnt->proc_info = &wd7000_proc_info;
+
+ for (pass = 0; pass < NUM_CONFIGS; pass++) {
+ short bios_match = 1;
+
+#ifdef WD7000_DEBUG
+ printk ("%s: pass %d\n", __FUNCTION__, pass + 1);
+#endif
+
+ /*
+ * First, search for BIOS SIGNATURE...
+ */
+ for (biosaddr_ptr = 0; bios_match && (biosaddr_ptr < NUM_ADDRS); biosaddr_ptr++)
+ for (sig_ptr = 0; bios_match && (sig_ptr < NUM_SIGNATURES); sig_ptr++) {
+ for (i = 0; i < pass; i++)
+ if (biosptr[i] == biosaddr_ptr)
+ break;
+
+ if (i == pass) {
+#if (LINUX_VERSION_CODE >= 0x020100)
+ char *biosaddr = (char *) ioremap (wd7000_biosaddr[biosaddr_ptr] +
+ signatures[sig_ptr].ofs,
+ signatures[sig_ptr].len);
+#else
+ char *biosaddr = (char *) (wd7000_biosaddr[biosaddr_ptr] +
+ signatures[sig_ptr].ofs);
+#endif
+ bios_match = memcmp (biosaddr, signatures[sig_ptr].sig,
+ signatures[sig_ptr].len);
+
+#if (LINUX_VERSION_CODE >= 0x020100)
+ iounmap (biosaddr);
+#else
+#endif
+ if (! bios_match) {
+ /*
+ * BIOS SIGNATURE has been found.
+ */
+ biosptr[pass] = biosaddr_ptr;
+#ifdef WD7000_DEBUG
+ printk ("WD-7000 SST BIOS detected at 0x%lx: checking...\n",
+ wd7000_biosaddr[biosaddr_ptr]);
+#endif
+ }
+ }
+ }
+
+#ifdef WD7000_DEBUG
+ if (bios_match)
+ printk ("WD-7000 SST BIOS not detected...\n");
+#endif
+
+ if (configs[pass].irq < 0)
+ continue;
+
+ iobase = configs[pass].iobase;
+
+#ifdef WD7000_DEBUG
+ printk ("%s: check IO 0x%x region...\n", __FUNCTION__, iobase);
+#endif
+
+ if (! check_region (iobase, 4)) {
+#ifdef WD7000_DEBUG
+ printk ("%s: ASC reset (IO 0x%x) ...", __FUNCTION__, iobase);
+#endif
+ /*
+ * ASC reset...
+ */
+ outb (ASC_RES, iobase + ASC_CONTROL);
+ delay (1);
+ outb (0, iobase + ASC_CONTROL);
+
+ if (WAIT (iobase + ASC_STAT, ASC_STATMASK, CMD_RDY, 0))
+#ifdef WD7000_DEBUG
+ {
+ printk ("failed!\n");
+ continue;
+ }
+ else
+ printk ("ok!\n");
+#else
+ continue;
+#endif
+
+ if (inb (iobase + ASC_INTR_STAT) == 1) {
+ /*
+ * We register here, to get a pointer to the extra space,
+ * which we'll use as the Adapter structure (host) for
+ * this adapter. It is located just after the registered
+ * Scsi_Host structure (sh), and is located by the empty
+ * array hostdata.
+ */
+ sh = scsi_register (tpnt, sizeof (Adapter));
+ host = (Adapter *) sh->hostdata;
+
+#ifdef WD7000_DEBUG
+ printk ("%s: adapter allocated at 0x%x\n", __FUNCTION__, (int) host);
+#endif
+
+ memset (host, 0, sizeof (Adapter));
+
+ host->irq = configs[pass].irq;
+ host->dma = configs[pass].dma;
+ host->iobase = iobase;
+ host->int_counter = 0;
+ host->bus_on = configs[pass].bus_on;
+ host->bus_off = configs[pass].bus_off;
+ host->sh = wd7000_host[host->irq - IRQ_MIN] = sh;
+
+#ifdef WD7000_DEBUG
+ printk ("%s: Trying to init WD-7000 card at IO 0x%x, IRQ %d, DMA %d...\n",
+ __FUNCTION__, host->iobase, host->irq, host->dma);
+#endif
+
+ if (! wd7000_init (host)) { /* Initialization failed */
+ scsi_unregister (sh);
+ continue;
+ }
+
+ /*
+ * OK from here - we'll use this adapter/configuration.
+ */
+ wd7000_revision (host); /* important for scatter/gather */
+
+ /*
+ * Register our ports.
+ */
+ request_region (host->iobase, 4, "wd7000");
+
+ /*
+ * For boards before rev 6.0, scatter/gather isn't supported.
+ */
+ if (host->rev1 < 6)
+ sh->sg_tablesize = SG_NONE;
+
+ present++; /* count it */
+
+ printk ("Western Digital WD-7000 (rev %d.%d) ",
+ host->rev1, host->rev2);
+ printk ("using IO 0x%x, IRQ %d, DMA %d.\n",
+ host->iobase, host->irq, host->dma);
+ printk (" BUS_ON time: %dns, BUS_OFF time: %dns\n",
+ host->bus_on * 125, host->bus_off * 125);
+ }
+ }
+
+#ifdef WD7000_DEBUG
+ else
+ printk ("%s: IO 0x%x region is already allocated!\n", __FUNCTION__, iobase);
+#endif
+
+ }
+
+ if (! present)
+ printk ("Failed initialization of WD-7000 SCSI card!\n");
+
+ return (present);
+}
+
+
+/*
+ * I have absolutely NO idea how to do an abort with the WD7000...
+ */
+int wd7000_abort (Scsi_Cmnd *SCpnt)
+{
+ Adapter *host = (Adapter *) SCpnt->host->hostdata;
+
+ if (inb (host->iobase + ASC_STAT) & INT_IM) {
+ printk ("%s: lost interrupt\n", __FUNCTION__);
+ wd7000_intr_handle (host->irq, NULL, NULL);
+
+ return (SCSI_ABORT_SUCCESS);
+ }
+
+ return (SCSI_ABORT_SNOOZE);
+}
+
+
+/*
+ * I also have no idea how to do a reset...
+ */
+int wd7000_reset (Scsi_Cmnd *SCpnt, uint flags)
+{
+ return (SCSI_RESET_PUNT);
+}
+
+
+/*
+ * This was borrowed directly from aha1542.c. (Zaga)
+ */
+int wd7000_biosparam (Disk *disk, kdev_t dev, int *ip)
+{
+#ifdef WD7000_DEBUG
+ printk ("%s: dev=%s, size=%d, ", __FUNCTION__, kdevname (dev), disk->capacity);
+#endif
+
+ /*
+ * try default translation
+ */
+ ip[0] = 64;
+ ip[1] = 32;
+ ip[2] = disk->capacity / (64 * 32);
+
+ /*
+ * for disks >1GB do some guessing
+ */
+ if (ip[2] >= 1024) {
+ int info[3];
+
+ /*
+ * try to figure out the geometry from the partition table
+ */
+ if ((scsicam_bios_param (disk, dev, info) < 0) ||
+ !(((info[0] == 64) && (info[1] == 32)) ||
+ ((info[0] == 255) && (info[1] == 63)))) {
+ printk ("%s: unable to verify geometry for disk with >1GB.\n"
+ " using extended translation.\n",
+ __FUNCTION__);
+
+ ip[0] = 255;
+ ip[1] = 63;
+ ip[2] = disk->capacity / (255 * 63);
+ }
+ else {
+ ip[0] = info[0];
+ ip[1] = info[1];
+ ip[2] = info[2];
+
+ if (info[0] == 255)
+ printk ("%s: current partition table is using extended translation.\n",
+ __FUNCTION__);
+ }
+ }
+
+#ifdef WD7000_DEBUG
+ printk ("bios geometry: head=%d, sec=%d, cyl=%d\n", ip[0], ip[1], ip[2]);
+ printk ("WARNING: check, if the bios geometry is correct.\n");
+#endif
+
+ return (0);
+}
+
+#ifdef MODULE
+/* Eventually this will go into an include file, but this will be later */
+Scsi_Host_Template driver_template = WD7000;
+
+#include "scsi_module.c"
+#endif
diff --git a/linux/src/drivers/scsi/wd7000.h b/linux/src/drivers/scsi/wd7000.h
new file mode 100644
index 0000000..e17a69b
--- /dev/null
+++ b/linux/src/drivers/scsi/wd7000.h
@@ -0,0 +1,446 @@
+/* $Id: wd7000.h,v 1.1 1999/04/26 05:55:19 tb Exp $
+ *
+ * Header file for the WD-7000 driver for Linux
+ *
+ * John Boyd <boyd@cis.ohio-state.edu> Jan 1994:
+ * This file has been reduced to only the definitions needed for the
+ * WD7000 host structure.
+ *
+ * Revision by Miroslav Zagorac <zaga@fly.cc.fer.hr> Jun 1997.
+ */
+#ifndef _WD7000_H
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+
+#ifndef NULL
+#define NULL 0L
+#endif
+
+/*
+ * In this version, sg_tablesize now defaults to WD7000_SG, and will
+ * be set to SG_NONE for older boards. This is the reverse of the
+ * previous default, and was changed so that the driver-level
+ * Scsi_Host_Template would reflect the driver's support for scatter/
+ * gather.
+ *
+ * Also, it has been reported that boards at Revision 6 support scatter/
+ * gather, so the new definition of an "older" board has been changed
+ * accordingly.
+ */
+#define WD7000_Q 16
+#define WD7000_SG 16
+
+#ifdef WD7000_DEFINES
+/*
+ * Mailbox structure sizes.
+ * I prefer to keep the number of ICMBs much larger than the number of
+ * OGMBs. OGMBs are used very quickly by the driver to start one or
+ * more commands, while ICMBs are used by the host adapter per command.
+ */
+#define OGMB_CNT 16
+#define ICMB_CNT 32
+
+/*
+ * Scb's are shared by all active adapters. If you'd rather conserve
+ * memory, use a smaller number (> 0, of course) - things will should
+ * still work OK.
+ */
+#define MAX_SCBS (4 * WD7000_Q)
+
+/*
+ * WD7000-specific mailbox structure
+ */
+typedef volatile struct {
+ unchar status;
+ unchar scbptr[3]; /* SCSI-style - MSB first (big endian) */
+} Mailbox;
+
+/*
+ * This structure should contain all per-adapter global data. I.e., any
+ * new global per-adapter data should put in here.
+ */
+typedef struct {
+ struct Scsi_Host *sh; /* Pointer to Scsi_Host structure */
+ int iobase; /* This adapter's I/O base address */
+ int irq; /* This adapter's IRQ level */
+ int dma; /* This adapter's DMA channel */
+ int int_counter; /* This adapter's interrupt counter */
+ int bus_on; /* This adapter's BUS_ON time */
+ int bus_off; /* This adapter's BUS_OFF time */
+ struct { /* This adapter's mailboxes */
+ Mailbox ogmb[OGMB_CNT]; /* Outgoing mailboxes */
+ Mailbox icmb[ICMB_CNT]; /* Incoming mailboxes */
+ } mb;
+ int next_ogmb; /* to reduce contention at mailboxes */
+ unchar control; /* shadows CONTROL port value */
+ unchar rev1; /* filled in by wd7000_revision */
+ unchar rev2;
+} Adapter;
+
+
+/*
+ * possible irq range
+ */
+#define IRQ_MIN 3
+#define IRQ_MAX 15
+#define IRQS (IRQ_MAX - IRQ_MIN + 1)
+
+#define BUS_ON 64 /* x 125ns = 8000ns (BIOS default) */
+#define BUS_OFF 15 /* x 125ns = 1875ns (BIOS default) */
+
+/*
+ * Standard Adapter Configurations - used by wd7000_detect
+ */
+typedef struct {
+ short irq; /* IRQ level */
+ short dma; /* DMA channel */
+ uint iobase; /* I/O base address */
+ short bus_on; /* Time that WD7000 spends on the AT-bus when */
+ /* transferring data. BIOS default is 8000ns. */
+ short bus_off; /* Time that WD7000 spends OFF THE BUS after */
+ /* while it is transferring data. */
+ /* BIOS default is 1875ns */
+} Config;
+
+
+/*
+ * The following list defines strings to look for in the BIOS that identify
+ * it as the WD7000-FASST2 SST BIOS. I suspect that something should be
+ * added for the Future Domain version.
+ */
+typedef struct {
+ const char *sig; /* String to look for */
+ ulong ofs; /* offset from BIOS base address */
+ uint len; /* length of string */
+} Signature;
+
+/*
+ * I/O Port Offsets and Bit Definitions
+ * 4 addresses are used. Those not defined here are reserved.
+ */
+#define ASC_STAT 0 /* Status, Read */
+#define ASC_COMMAND 0 /* Command, Write */
+#define ASC_INTR_STAT 1 /* Interrupt Status, Read */
+#define ASC_INTR_ACK 1 /* Acknowledge, Write */
+#define ASC_CONTROL 2 /* Control, Write */
+
+/*
+ * ASC Status Port
+ */
+#define INT_IM 0x80 /* Interrupt Image Flag */
+#define CMD_RDY 0x40 /* Command Port Ready */
+#define CMD_REJ 0x20 /* Command Port Byte Rejected */
+#define ASC_INIT 0x10 /* ASC Initialized Flag */
+#define ASC_STATMASK 0xf0 /* The lower 4 Bytes are reserved */
+
+/*
+ * COMMAND opcodes
+ *
+ * Unfortunately, I have no idea how to properly use some of these commands,
+ * as the OEM manual does not make it clear. I have not been able to use
+ * enable/disable unsolicited interrupts or the reset commands with any
+ * discernible effect whatsoever. I think they may be related to certain
+ * ICB commands, but again, the OEM manual doesn't make that clear.
+ */
+#define NO_OP 0 /* NO-OP toggles CMD_RDY bit in ASC_STAT */
+#define INITIALIZATION 1 /* initialization (10 bytes) */
+#define DISABLE_UNS_INTR 2 /* disable unsolicited interrupts */
+#define ENABLE_UNS_INTR 3 /* enable unsolicited interrupts */
+#define INTR_ON_FREE_OGMB 4 /* interrupt on free OGMB */
+#define SOFT_RESET 5 /* SCSI bus soft reset */
+#define HARD_RESET_ACK 6 /* SCSI bus hard reset acknowledge */
+#define START_OGMB 0x80 /* start command in OGMB (n) */
+#define SCAN_OGMBS 0xc0 /* start multiple commands, signature (n) */
+ /* where (n) = lower 6 bits */
+/*
+ * For INITIALIZATION:
+ */
+typedef struct {
+ unchar op; /* command opcode (= 1) */
+ unchar ID; /* Adapter's SCSI ID */
+ unchar bus_on; /* Bus on time, x 125ns (see below) */
+ unchar bus_off; /* Bus off time, "" "" */
+ unchar rsvd; /* Reserved */
+ unchar mailboxes[3]; /* Address of Mailboxes, MSB first */
+ unchar ogmbs; /* Number of outgoing MBs, max 64, 0,1 = 1 */
+ unchar icmbs; /* Number of incoming MBs, "" "" */
+} InitCmd;
+
+/*
+ * Interrupt Status Port - also returns diagnostic codes at ASC reset
+ *
+ * if msb is zero, the lower bits are diagnostic status
+ * Diagnostics:
+ * 01 No diagnostic error occurred
+ * 02 RAM failure
+ * 03 FIFO R/W failed
+ * 04 SBIC register read/write failed
+ * 05 Initialization D-FF failed
+ * 06 Host IRQ D-FF failed
+ * 07 ROM checksum error
+ * Interrupt status (bitwise):
+ * 10NNNNNN outgoing mailbox NNNNNN is free
+ * 11NNNNNN incoming mailbox NNNNNN needs service
+ */
+#define MB_INTR 0xC0 /* Mailbox Service possible/required */
+#define IMB_INTR 0x40 /* 1 Incoming / 0 Outgoing */
+#define MB_MASK 0x3f /* mask for mailbox number */
+
+/*
+ * CONTROL port bits
+ */
+#define INT_EN 0x08 /* Interrupt Enable */
+#define DMA_EN 0x04 /* DMA Enable */
+#define SCSI_RES 0x02 /* SCSI Reset */
+#define ASC_RES 0x01 /* ASC Reset */
+
+/*
+ * Driver data structures:
+ * - mb and scbs are required for interfacing with the host adapter.
+ * An SCB has extra fields not visible to the adapter; mb's
+ * _cannot_ do this, since the adapter assumes they are contiguous in
+ * memory, 4 bytes each, with ICMBs following OGMBs, and uses this fact
+ * to access them.
+ * - An icb is for host-only (non-SCSI) commands. ICBs are 16 bytes each;
+ * the additional bytes are used only by the driver.
+ * - For now, a pool of SCBs are kept in global storage by this driver,
+ * and are allocated and freed as needed.
+ *
+ * The 7000-FASST2 marks OGMBs empty as soon as it has _started_ a command,
+ * not when it has finished. Since the SCB must be around for completion,
+ * problems arise when SCBs correspond to OGMBs, which may be reallocated
+ * earlier (or delayed unnecessarily until a command completes).
+ * Mailboxes are used as transient data structures, simply for
+ * carrying SCB addresses to/from the 7000-FASST2.
+ *
+ * Note also since SCBs are not "permanently" associated with mailboxes,
+ * there is no need to keep a global list of Scsi_Cmnd pointers indexed
+ * by OGMB. Again, SCBs reference their Scsi_Cmnds directly, so mailbox
+ * indices need not be involved.
+ */
+
+/*
+ * WD7000-specific scatter/gather element structure
+ */
+typedef struct {
+ unchar len[3];
+ unchar ptr[3]; /* Also SCSI-style - MSB first */
+} Sgb;
+
+typedef struct { /* Command Control Block 5.4.1 */
+ unchar op; /* Command Control Block Operation Code */
+ unchar idlun; /* op=0,2:Target Id, op=1:Initiator Id */
+ /* Outbound data transfer, length is checked */
+ /* Inbound data transfer, length is checked */
+ /* Logical Unit Number */
+ unchar cdb[12]; /* SCSI Command Block */
+ volatile unchar status; /* SCSI Return Status */
+ volatile unchar vue; /* Vendor Unique Error Code */
+ unchar maxlen[3]; /* Maximum Data Transfer Length */
+ unchar dataptr[3]; /* SCSI Data Block Pointer */
+ unchar linkptr[3]; /* Next Command Link Pointer */
+ unchar direc; /* Transfer Direction */
+ unchar reserved2[6]; /* SCSI Command Descriptor Block */
+ /* end of hardware SCB */
+ Scsi_Cmnd *SCpnt; /* Scsi_Cmnd using this SCB */
+ Sgb sgb[WD7000_SG]; /* Scatter/gather list for this SCB */
+ Adapter *host; /* host adapter */
+ unchar used; /* flag */
+} Scb;
+
+/*
+ * This driver is written to allow host-only commands to be executed.
+ * These use a 16-byte block called an ICB. The format is extended by the
+ * driver to 18 bytes, to support the status returned in the ICMB and
+ * an execution phase code.
+ *
+ * There are other formats besides these; these are the ones I've tried
+ * to use. Formats for some of the defined ICB opcodes are not defined
+ * (notably, get/set unsolicited interrupt status) in my copy of the OEM
+ * manual, and others are ambiguous/hard to follow.
+ */
+#define ICB_OP_MASK 0x80 /* distinguishes scbs from icbs */
+#define ICB_OP_OPEN_RBUF 0x80 /* open receive buffer */
+#define ICB_OP_RECV_CMD 0x81 /* receive command from initiator */
+#define ICB_OP_RECV_DATA 0x82 /* receive data from initiator */
+#define ICB_OP_RECV_SDATA 0x83 /* receive data with status from init. */
+#define ICB_OP_SEND_DATA 0x84 /* send data with status to initiator */
+#define ICB_OP_SEND_STAT 0x86 /* send command status to initiator */
+ /* 0x87 is reserved */
+#define ICB_OP_READ_INIT 0x88 /* read initialization bytes */
+#define ICB_OP_READ_ID 0x89 /* read adapter's SCSI ID */
+#define ICB_OP_SET_UMASK 0x8A /* set unsolicited interrupt mask */
+#define ICB_OP_GET_UMASK 0x8B /* read unsolicited interrupt mask */
+#define ICB_OP_GET_REVISION 0x8C /* read firmware revision level */
+#define ICB_OP_DIAGNOSTICS 0x8D /* execute diagnostics */
+#define ICB_OP_SET_EPARMS 0x8E /* set execution parameters */
+#define ICB_OP_GET_EPARMS 0x8F /* read execution parameters */
+
+typedef struct {
+ unchar op;
+ unchar IDlun; /* Initiator SCSI ID/lun */
+ unchar len[3]; /* command buffer length */
+ unchar ptr[3]; /* command buffer address */
+ unchar rsvd[7]; /* reserved */
+ volatile unchar vue; /* vendor-unique error code */
+ volatile unchar status; /* returned (icmb) status */
+ volatile unchar phase; /* used by interrupt handler */
+} IcbRecvCmd;
+
+typedef struct {
+ unchar op;
+ unchar IDlun; /* Target SCSI ID/lun */
+ unchar stat; /* (outgoing) completion status byte 1 */
+ unchar rsvd[12]; /* reserved */
+ volatile unchar vue; /* vendor-unique error code */
+ volatile unchar status; /* returned (icmb) status */
+ volatile unchar phase; /* used by interrupt handler */
+} IcbSendStat;
+
+typedef struct {
+ unchar op;
+ volatile unchar primary; /* primary revision level (returned) */
+ volatile unchar secondary; /* secondary revision level (returned) */
+ unchar rsvd[12]; /* reserved */
+ volatile unchar vue; /* vendor-unique error code */
+ volatile unchar status; /* returned (icmb) status */
+ volatile unchar phase; /* used by interrupt handler */
+} IcbRevLvl;
+
+typedef struct { /* I'm totally guessing here */
+ unchar op;
+ volatile unchar mask[14]; /* mask bits */
+#if 0
+ unchar rsvd[12]; /* reserved */
+#endif
+ volatile unchar vue; /* vendor-unique error code */
+ volatile unchar status; /* returned (icmb) status */
+ volatile unchar phase; /* used by interrupt handler */
+} IcbUnsMask;
+
+typedef struct {
+ unchar op;
+ unchar type; /* diagnostics type code (0-3) */
+ unchar len[3]; /* buffer length */
+ unchar ptr[3]; /* buffer address */
+ unchar rsvd[7]; /* reserved */
+ volatile unchar vue; /* vendor-unique error code */
+ volatile unchar status; /* returned (icmb) status */
+ volatile unchar phase; /* used by interrupt handler */
+} IcbDiag;
+
+#define ICB_DIAG_POWERUP 0 /* Power-up diags only */
+#define ICB_DIAG_WALKING 1 /* walking 1's pattern */
+#define ICB_DIAG_DMA 2 /* DMA - system memory diags */
+#define ICB_DIAG_FULL 3 /* do both 1 & 2 */
+
+typedef struct {
+ unchar op;
+ unchar rsvd1; /* reserved */
+ unchar len[3]; /* parms buffer length */
+ unchar ptr[3]; /* parms buffer address */
+ unchar idx[2]; /* index (MSB-LSB) */
+ unchar rsvd2[5]; /* reserved */
+ volatile unchar vue; /* vendor-unique error code */
+ volatile unchar status; /* returned (icmb) status */
+ volatile unchar phase; /* used by interrupt handler */
+} IcbParms;
+
+typedef struct {
+ unchar op;
+ unchar data[14]; /* format-specific data */
+ volatile unchar vue; /* vendor-unique error code */
+ volatile unchar status; /* returned (icmb) status */
+ volatile unchar phase; /* used by interrupt handler */
+} IcbAny;
+
+typedef union {
+ unchar op; /* ICB opcode */
+ IcbRecvCmd recv_cmd; /* format for receive command */
+ IcbSendStat send_stat; /* format for send status */
+ IcbRevLvl rev_lvl; /* format for get revision level */
+ IcbDiag diag; /* format for execute diagnostics */
+ IcbParms eparms; /* format for get/set exec parms */
+ IcbAny icb; /* generic format */
+ unchar data[18];
+} Icb;
+
+#define WAITnexttimeout 200 /* 2 seconds */
+
+typedef union { /* let's cheat... */
+ int i;
+ unchar u[sizeof (int)]; /* the sizeof(int) makes it more portable */
+} i_u;
+
+#endif /* WD7000_DEFINES */
+
+
+#if (LINUX_VERSION_CODE >= 0x020100)
+
+#define WD7000 { \
+ proc_dir: &proc_scsi_wd7000, \
+ proc_info: wd7000_proc_info, \
+ name: "Western Digital WD-7000", \
+ detect: wd7000_detect, \
+ command: wd7000_command, \
+ queuecommand: wd7000_queuecommand, \
+ abort: wd7000_abort, \
+ reset: wd7000_reset, \
+ bios_param: wd7000_biosparam, \
+ can_queue: WD7000_Q, \
+ this_id: 7, \
+ sg_tablesize: WD7000_SG, \
+ cmd_per_lun: 1, \
+ unchecked_isa_dma: 1, \
+ use_clustering: ENABLE_CLUSTERING, \
+ use_new_eh_code: 0 \
+}
+
+#else /* Use old scsi code */
+
+#define WD7000 { \
+ proc_dir: &proc_scsi_wd7000, \
+ proc_info: wd7000_proc_info, \
+ name: "Western Digital WD-7000", \
+ detect: wd7000_detect, \
+ command: wd7000_command, \
+ queuecommand: wd7000_queuecommand, \
+ abort: wd7000_abort, \
+ reset: wd7000_reset, \
+ bios_param: wd7000_biosparam, \
+ can_queue: WD7000_Q, \
+ this_id: 7, \
+ sg_tablesize: WD7000_SG, \
+ cmd_per_lun: 1, \
+ unchecked_isa_dma: 1, \
+ use_clustering: ENABLE_CLUSTERING, \
+}
+
+#endif /* LINUX_VERSION_CODE */
+
+
+extern struct proc_dir_entry proc_scsi_wd7000;
+
+
+#ifdef WD7000_DEFINES
+int wd7000_diagnostics (Adapter *, int);
+int wd7000_init (Adapter *);
+void wd7000_revision (Adapter *);
+#endif /* WD7000_DEFINES */
+
+void wd7000_setup (char *, int *);
+int make_code (uint, uint);
+void wd7000_intr_handle (int, void *, struct pt_regs *);
+void do_wd7000_intr_handle (int, void *, struct pt_regs *);
+int wd7000_queuecommand (Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+int wd7000_command (Scsi_Cmnd *);
+int wd7000_set_info (char *, int, struct Scsi_Host *);
+int wd7000_proc_info (char *, char **, off_t, int, int, int);
+int wd7000_detect (Scsi_Host_Template *);
+int wd7000_abort (Scsi_Cmnd *);
+int wd7000_reset (Scsi_Cmnd *, uint);
+int wd7000_biosparam (Disk *, kdev_t, int *);
+
+#endif /* _WD7000_H */
diff --git a/linux/src/include/asm-i386/atomic.h b/linux/src/include/asm-i386/atomic.h
new file mode 100644
index 0000000..7e5dd06
--- /dev/null
+++ b/linux/src/include/asm-i386/atomic.h
@@ -0,0 +1,69 @@
+#ifndef __ARCH_I386_ATOMIC__
+#define __ARCH_I386_ATOMIC__
+
+/*
+ * Atomic operations that C can't guarantee us. Useful for
+ * resource counting etc..
+ */
+
+#ifdef __SMP__
+#define LOCK "lock ; "
+#else
+#define LOCK ""
+#endif
+
+/*
+ * Make sure gcc doesn't try to be clever and move things around
+ * on us. We need to use _exactly_ the address the user gave us,
+ * not some alias that contains the same information.
+ */
+#define __atomic_fool_gcc(x) (*(struct { int a[100]; } *)x)
+
+typedef int atomic_t;
+
+#define atomic_read(v) (*v)
+
+static __inline__ void atomic_add(atomic_t i, atomic_t *v)
+{
+ __asm__ __volatile__(
+ LOCK "addl %1,%0"
+ :"=m" (__atomic_fool_gcc(v))
+ :"ir" (i), "m" (__atomic_fool_gcc(v)));
+}
+
+static __inline__ void atomic_sub(atomic_t i, atomic_t *v)
+{
+ __asm__ __volatile__(
+ LOCK "subl %1,%0"
+ :"=m" (__atomic_fool_gcc(v))
+ :"ir" (i), "m" (__atomic_fool_gcc(v)));
+}
+
+static __inline__ void atomic_inc(atomic_t *v)
+{
+ __asm__ __volatile__(
+ LOCK "incl %0"
+ :"=m" (__atomic_fool_gcc(v))
+ :"m" (__atomic_fool_gcc(v)));
+}
+
+static __inline__ void atomic_dec(atomic_t *v)
+{
+ __asm__ __volatile__(
+ LOCK "decl %0"
+ :"=m" (__atomic_fool_gcc(v))
+ :"m" (__atomic_fool_gcc(v)));
+}
+
+static __inline__ int atomic_dec_and_test(atomic_t *v)
+{
+ unsigned char c;
+
+ __asm__ __volatile__(
+ LOCK "decl %0; sete %1"
+ :"=m" (__atomic_fool_gcc(v)), "=qm" (c)
+ :"m" (__atomic_fool_gcc(v)));
+ return c != 0;
+}
+
+#endif
diff --git a/linux/src/include/asm-i386/bitops.h b/linux/src/include/asm-i386/bitops.h
new file mode 100644
index 0000000..e2a4c14
--- /dev/null
+++ b/linux/src/include/asm-i386/bitops.h
@@ -0,0 +1,201 @@
+#ifndef _I386_BITOPS_H
+#define _I386_BITOPS_H
+
+/*
+ * Copyright 1992, Linus Torvalds.
+ */
+
+/*
+ * These have to be done with inline assembly: that way the bit-setting
+ * is guaranteed to be atomic. All bit operations return 0 if the bit
+ * was cleared before the operation and != 0 if it was not.
+ *
+ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+ */
+
+#ifdef __SMP__
+#define LOCK_PREFIX "lock ; "
+#define SMPVOL volatile
+#else
+#define LOCK_PREFIX ""
+#define SMPVOL
+#endif
+
+/*
+ * Some hacks to defeat gcc over-optimizations..
+ */
+struct __dummy { unsigned long a[100]; };
+#define ADDR (*(struct __dummy *) addr)
+#define CONST_ADDR (*(const struct __dummy *) addr)
+
+static __inline__ int set_bit(int nr, SMPVOL void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__(LOCK_PREFIX
+ "btsl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"ir" (nr));
+ return oldbit;
+}
+
+static __inline__ int clear_bit(int nr, SMPVOL void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__(LOCK_PREFIX
+ "btrl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"ir" (nr));
+ return oldbit;
+}
+
+static __inline__ int change_bit(int nr, SMPVOL void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__(LOCK_PREFIX
+ "btcl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"ir" (nr));
+ return oldbit;
+}
+
+static __inline__ int test_and_set_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__( LOCK_PREFIX
+ "btsl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"Ir" (nr));
+ return oldbit;
+}
+
+static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__( LOCK_PREFIX
+ "btrl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"Ir" (nr));
+ return oldbit;
+}
+
+static __inline__ int test_and_change_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__( LOCK_PREFIX
+ "btcl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"Ir" (nr));
+ return oldbit;
+}
+
+
+/*
+ * This routine doesn't need to be atomic.
+ */
+static __inline__ int test_bit(int nr, const SMPVOL void * addr)
+{
+ return ((1UL << (nr & 31)) & (((const unsigned int *) addr)[nr >> 5])) != 0;
+}
+
+/*
+ * Find-bit routines..
+ */
+static __inline__ int find_first_zero_bit(void * addr, unsigned size)
+{
+ int d0, d1, d2;
+ int res;
+
+ if (!size)
+ return 0;
+ __asm__("cld\n\t"
+ "movl $-1,%%eax\n\t"
+ "xorl %%edx,%%edx\n\t"
+ "repe; scasl\n\t"
+ "je 1f\n\t"
+ "xorl -4(%%edi),%%eax\n\t"
+ "subl $4,%%edi\n\t"
+ "bsfl %%eax,%%edx\n"
+ "1:\tsubl %%ebx,%%edi\n\t"
+ "shll $3,%%edi\n\t"
+ "addl %%edi,%%edx"
+ :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
+ :"1" ((size + 31) >> 5), "2" (addr), "b" (addr));
+ return res;
+}
+
+static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
+{
+ unsigned long * p = ((unsigned long *) addr) + (offset >> 5);
+ int set = 0, bit = offset & 31, res;
+
+ if (bit) {
+ /*
+ * Look for zero in first byte
+ */
+ __asm__("bsfl %1,%0\n\t"
+ "jne 1f\n\t"
+ "movl $32, %0\n"
+ "1:"
+ : "=r" (set)
+ : "r" (~(*p >> bit)));
+ if (set < (32 - bit))
+ return set + offset;
+ set = 32 - bit;
+ p++;
+ }
+ /*
+ * No zero yet, search remaining full bytes for a zero
+ */
+ res = find_first_zero_bit (p, size - 32 * (p - (unsigned long *) addr));
+ return (offset + set + res);
+}
+
+/*
+ * ffz = Find First Zero in word. Undefined if no zero exists,
+ * so code should check against ~0UL first..
+ */
+static __inline__ unsigned long ffz(unsigned long word)
+{
+ __asm__("bsfl %1,%0"
+ :"=r" (word)
+ :"r" (~word));
+ return word;
+}
+
+#ifdef __KERNEL__
+
+/*
+ * ffs: find first bit set. This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+
+static __inline__ int ffs(int x)
+{
+ int r;
+
+ __asm__("bsfl %1,%0\n\t"
+ "jnz 1f\n\t"
+ "movl $-1,%0\n"
+ "1:" : "=r" (r) : "g" (x));
+ return r+1;
+}
+
+/*
+ * hweightN: returns the hamming weight (i.e. the number
+ * of bits set) of a N-bit word
+ */
+
+#define hweight32(x) generic_hweight32(x)
+#define hweight16(x) generic_hweight16(x)
+#define hweight8(x) generic_hweight8(x)
+
+#endif /* __KERNEL__ */
+
+#endif /* _I386_BITOPS_H */
diff --git a/linux/src/include/asm-i386/byteorder.h b/linux/src/include/asm-i386/byteorder.h
new file mode 100644
index 0000000..3f40767
--- /dev/null
+++ b/linux/src/include/asm-i386/byteorder.h
@@ -0,0 +1,90 @@
+#ifndef _I386_BYTEORDER_H
+#define _I386_BYTEORDER_H
+
+#undef ntohl
+#undef ntohs
+#undef htonl
+#undef htons
+
+#ifndef __LITTLE_ENDIAN
+#define __LITTLE_ENDIAN 1234
+#endif
+
+#ifndef __LITTLE_ENDIAN_BITFIELD
+#define __LITTLE_ENDIAN_BITFIELD
+#endif
+
+/* For avoiding bswap on i386 */
+#ifdef __KERNEL__
+#include <linux/config.h>
+#endif
+
+extern unsigned long int ntohl(unsigned long int);
+extern unsigned short int ntohs(unsigned short int);
+extern unsigned long int htonl(unsigned long int);
+extern unsigned short int htons(unsigned short int);
+
+extern __inline__ unsigned long int __ntohl(unsigned long int);
+extern __inline__ unsigned short int __ntohs(unsigned short int);
+extern __inline__ unsigned long int __constant_ntohl(unsigned long int);
+extern __inline__ unsigned short int __constant_ntohs(unsigned short int);
+
+extern __inline__ unsigned long int
+__ntohl(unsigned long int x)
+{
+#if defined(__KERNEL__) && !defined(CONFIG_M386)
+ __asm__("bswap %0" : "=r" (x) : "0" (x));
+#else
+ __asm__("xchgb %b0,%h0\n\t" /* swap lower bytes */
+ "rorl $16,%0\n\t" /* swap words */
+ "xchgb %b0,%h0" /* swap higher bytes */
+ :"=q" (x)
+ : "0" (x));
+#endif
+ return x;
+}
+
+#define __constant_ntohl(x) \
+ ((unsigned long int)((((unsigned long int)(x) & 0x000000ffU) << 24) | \
+ (((unsigned long int)(x) & 0x0000ff00U) << 8) | \
+ (((unsigned long int)(x) & 0x00ff0000U) >> 8) | \
+ (((unsigned long int)(x) & 0xff000000U) >> 24)))
+
+extern __inline__ unsigned short int
+__ntohs(unsigned short int x)
+{
+ __asm__("xchgb %b0,%h0" /* swap bytes */
+ : "=q" (x)
+ : "0" (x));
+ return x;
+}
+
+#define __constant_ntohs(x) \
+ ((unsigned short int)((((unsigned short int)(x) & 0x00ff) << 8) | \
+ (((unsigned short int)(x) & 0xff00) >> 8))) \
+
+#define __htonl(x) __ntohl(x)
+#define __htons(x) __ntohs(x)
+#define __constant_htonl(x) __constant_ntohl(x)
+#define __constant_htons(x) __constant_ntohs(x)
+
+#ifdef __OPTIMIZE__
+# define ntohl(x) \
+(__builtin_constant_p((long)(x)) ? \
+ __constant_ntohl((x)) : \
+ __ntohl((x)))
+# define ntohs(x) \
+(__builtin_constant_p((short)(x)) ? \
+ __constant_ntohs((x)) : \
+ __ntohs((x)))
+# define htonl(x) \
+(__builtin_constant_p((long)(x)) ? \
+ __constant_htonl((x)) : \
+ __htonl((x)))
+# define htons(x) \
+(__builtin_constant_p((short)(x)) ? \
+ __constant_htons((x)) : \
+ __htons((x)))
+#endif
+
+#endif
diff --git a/linux/src/include/asm-i386/cache.h b/linux/src/include/asm-i386/cache.h
new file mode 100644
index 0000000..cea6c85
--- /dev/null
+++ b/linux/src/include/asm-i386/cache.h
@@ -0,0 +1,18 @@
+/*
+ * include/asm-i386/cache.h
+ */
+#ifndef __ARCH_I386_CACHE_H
+#define __ARCH_I386_CACHE_H
+
+/* bytes per L1 cache line */
+#if CPU==586 || CPU==686
+#define L1_CACHE_BYTES 32
+#else
+#define L1_CACHE_BYTES 16
+#endif
+
+#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
+
+#define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+#endif
diff --git a/linux/src/include/asm-i386/checksum.h b/linux/src/include/asm-i386/checksum.h
new file mode 100644
index 0000000..ac49b14
--- /dev/null
+++ b/linux/src/include/asm-i386/checksum.h
@@ -0,0 +1,121 @@
+#ifndef _I386_CHECKSUM_H
+#define _I386_CHECKSUM_H
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
+
+/*
+ * the same as csum_partial, but copies from src while it
+ * checksums
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+
+unsigned int csum_partial_copy( const char *src, char *dst, int len, int sum);
+
+
+/*
+ * the same as csum_partial_copy, but copies from user space.
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+
+unsigned int csum_partial_copy_fromuser(const char *src, char *dst, int len, int sum);
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ *
+ * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
+ * Arnt Gulbrandsen.
+ */
+static inline unsigned short ip_fast_csum(unsigned char * iph,
+ unsigned int ihl) {
+ unsigned int sum;
+
+ __asm__ __volatile__("
+ movl (%1), %0
+ subl $4, %2
+ jbe 2f
+ addl 4(%1), %0
+ adcl 8(%1), %0
+ adcl 12(%1), %0
+1: adcl 16(%1), %0
+ lea 4(%1), %1
+ decl %2
+ jne 1b
+ adcl $0, %0
+ movl %0, %2
+ shrl $16, %0
+ addw %w2, %w0
+ adcl $0, %0
+ notl %0
+2:
+ "
+ /* Since the input registers which are loaded with iph and ipl
+ are modified, we must also specify them as outputs, or gcc
+ will assume they contain their original values. */
+ : "=r" (sum), "=r" (iph), "=r" (ihl)
+ : "1" (iph), "2" (ihl));
+ return(sum);
+}
+
+/*
+ * Fold a partial checksum
+ */
+
+static inline unsigned int csum_fold(unsigned int sum)
+{
+ __asm__("
+ addl %1, %0
+ adcl $0xffff, %0
+ "
+ : "=r" (sum)
+ : "r" (sum << 16), "0" (sum & 0xffff0000)
+ );
+ return (~sum) >> 16;
+}
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+
+static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
+ unsigned long daddr,
+ unsigned short len,
+ unsigned short proto,
+ unsigned int sum) {
+ __asm__("
+ addl %1, %0
+ adcl %2, %0
+ adcl %3, %0
+ adcl $0, %0
+ "
+ : "=r" (sum)
+ : "g" (daddr), "g"(saddr), "g"((ntohs(len)<<16)+proto*256), "0"(sum));
+ return csum_fold(sum);
+}
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+
+static inline unsigned short ip_compute_csum(unsigned char * buff, int len) {
+ return csum_fold (csum_partial(buff, len, 0));
+}
+
+#endif
diff --git a/linux/src/include/asm-i386/delay.h b/linux/src/include/asm-i386/delay.h
new file mode 100644
index 0000000..2166c4c
--- /dev/null
+++ b/linux/src/include/asm-i386/delay.h
@@ -0,0 +1,18 @@
+#ifndef _I386_DELAY_H
+#define _I386_DELAY_H
+
+/*
+ * Copyright (C) 1993 Linus Torvalds
+ *
+ * Delay routines calling functions in arch/i386/lib/delay.c
+ */
+
+extern void __udelay(unsigned long usecs);
+extern void __const_udelay(unsigned long usecs);
+extern void __delay(unsigned long loops);
+
+#define udelay(n) (__builtin_constant_p(n) ? \
+ __const_udelay((n) * 0x10c6ul) : \
+ __udelay(n))
+
+#endif /* defined(_I386_DELAY_H) */
diff --git a/linux/src/include/asm-i386/dma.h b/linux/src/include/asm-i386/dma.h
new file mode 100644
index 0000000..c323d40
--- /dev/null
+++ b/linux/src/include/asm-i386/dma.h
@@ -0,0 +1,271 @@
+/* $Id: dma.h,v 1.1 1999/04/26 05:55:43 tb Exp $
+ * linux/include/asm/dma.h: Defines for using and allocating dma channels.
+ * Written by Hennus Bergman, 1992.
+ * High DMA channel support & info by Hannu Savolainen
+ * and John Boyd, Nov. 1992.
+ */
+
+#ifndef _ASM_DMA_H
+#define _ASM_DMA_H
+
+#include <asm/io.h> /* need byte IO */
+
+
+#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
+#define dma_outb outb_p
+#else
+#define dma_outb outb
+#endif
+
+#define dma_inb inb
+
+/*
+ * NOTES about DMA transfers:
+ *
+ * controller 1: channels 0-3, byte operations, ports 00-1F
+ * controller 2: channels 4-7, word operations, ports C0-DF
+ *
+ * - ALL registers are 8 bits only, regardless of transfer size
+ * - channel 4 is not used - cascades 1 into 2.
+ * - channels 0-3 are byte - addresses/counts are for physical bytes
+ * - channels 5-7 are word - addresses/counts are for physical words
+ * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
+ * - transfer count loaded to registers is 1 less than actual count
+ * - controller 2 offsets are all even (2x offsets for controller 1)
+ * - page registers for 5-7 don't use data bit 0, represent 128K pages
+ * - page registers for 0-3 use bit 0, represent 64K pages
+ *
+ * DMA transfers are limited to the lower 16MB of _physical_ memory.
+ * Note that addresses loaded into registers must be _physical_ addresses,
+ * not logical addresses (which may differ if paging is active).
+ *
+ * Address mapping for channels 0-3:
+ *
+ * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
+ * | ... | | ... | | ... |
+ * | ... | | ... | | ... |
+ * | ... | | ... | | ... |
+ * P7 ... P0 A7 ... A0 A7 ... A0
+ * | Page | Addr MSB | Addr LSB | (DMA registers)
+ *
+ * Address mapping for channels 5-7:
+ *
+ * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
+ * | ... | \ \ ... \ \ \ ... \ \
+ * | ... | \ \ ... \ \ \ ... \ (not used)
+ * | ... | \ \ ... \ \ \ ... \
+ * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
+ * | Page | Addr MSB | Addr LSB | (DMA registers)
+ *
+ * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
+ * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
+ * the hardware level, so odd-byte transfers aren't possible).
+ *
+ * Transfer count (_not # bytes_) is limited to 64K, represented as actual
+ * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more,
+ * and up to 128K bytes may be transferred on channels 5-7 in one operation.
+ *
+ */
+
+#define MAX_DMA_CHANNELS 8
+
+/* The maximum address that we can perform a DMA transfer to on this platform */
+#define MAX_DMA_ADDRESS 0x1000000
+
+/* 8237 DMA controllers */
+#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
+#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
+
+/* DMA controller registers */
+#define DMA1_CMD_REG 0x08 /* command register (w) */
+#define DMA1_STAT_REG 0x08 /* status register (r) */
+#define DMA1_REQ_REG 0x09 /* request register (w) */
+#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */
+#define DMA1_MODE_REG 0x0B /* mode register (w) */
+#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */
+#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
+#define DMA1_RESET_REG 0x0D /* Master Clear (w) */
+#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
+#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
+
+#define DMA2_CMD_REG 0xD0 /* command register (w) */
+#define DMA2_STAT_REG 0xD0 /* status register (r) */
+#define DMA2_REQ_REG 0xD2 /* request register (w) */
+#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */
+#define DMA2_MODE_REG 0xD6 /* mode register (w) */
+#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */
+#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
+#define DMA2_RESET_REG 0xDA /* Master Clear (w) */
+#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
+#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
+
+#define DMA_ADDR_0 0x00 /* DMA address registers */
+#define DMA_ADDR_1 0x02
+#define DMA_ADDR_2 0x04
+#define DMA_ADDR_3 0x06
+#define DMA_ADDR_4 0xC0
+#define DMA_ADDR_5 0xC4
+#define DMA_ADDR_6 0xC8
+#define DMA_ADDR_7 0xCC
+
+#define DMA_CNT_0 0x01 /* DMA count registers */
+#define DMA_CNT_1 0x03
+#define DMA_CNT_2 0x05
+#define DMA_CNT_3 0x07
+#define DMA_CNT_4 0xC2
+#define DMA_CNT_5 0xC6
+#define DMA_CNT_6 0xCA
+#define DMA_CNT_7 0xCE
+
+#define DMA_PAGE_0 0x87 /* DMA page registers */
+#define DMA_PAGE_1 0x83
+#define DMA_PAGE_2 0x81
+#define DMA_PAGE_3 0x82
+#define DMA_PAGE_5 0x8B
+#define DMA_PAGE_6 0x89
+#define DMA_PAGE_7 0x8A
+
+#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
+#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
+#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
+
+/* enable/disable a specific DMA channel */
+static __inline__ void enable_dma(unsigned int dmanr)
+{
+ if (dmanr<=3)
+ dma_outb(dmanr, DMA1_MASK_REG);
+ else
+ dma_outb(dmanr & 3, DMA2_MASK_REG);
+}
+
+static __inline__ void disable_dma(unsigned int dmanr)
+{
+ if (dmanr<=3)
+ dma_outb(dmanr | 4, DMA1_MASK_REG);
+ else
+ dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
+}
+
+/* Clear the 'DMA Pointer Flip Flop'.
+ * Write 0 for LSB/MSB, 1 for MSB/LSB access.
+ * Use this once to initialize the FF to a known state.
+ * After that, keep track of it. :-)
+ * --- In order to do that, the DMA routines below should ---
+ * --- only be used while interrupts are disabled! ---
+ */
+static __inline__ void clear_dma_ff(unsigned int dmanr)
+{
+ if (dmanr<=3)
+ dma_outb(0, DMA1_CLEAR_FF_REG);
+ else
+ dma_outb(0, DMA2_CLEAR_FF_REG);
+}
+
+/* set mode (above) for a specific DMA channel */
+static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
+{
+ if (dmanr<=3)
+ dma_outb(mode | dmanr, DMA1_MODE_REG);
+ else
+ dma_outb(mode | (dmanr&3), DMA2_MODE_REG);
+}
+
+/* Set only the page register bits of the transfer address.
+ * This is used for successive transfers when we know the contents of
+ * the lower 16 bits of the DMA current address register, but a 64k boundary
+ * may have been crossed.
+ */
+static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
+{
+ switch(dmanr) {
+ case 0:
+ dma_outb(pagenr, DMA_PAGE_0);
+ break;
+ case 1:
+ dma_outb(pagenr, DMA_PAGE_1);
+ break;
+ case 2:
+ dma_outb(pagenr, DMA_PAGE_2);
+ break;
+ case 3:
+ dma_outb(pagenr, DMA_PAGE_3);
+ break;
+ case 5:
+ dma_outb(pagenr & 0xfe, DMA_PAGE_5);
+ break;
+ case 6:
+ dma_outb(pagenr & 0xfe, DMA_PAGE_6);
+ break;
+ case 7:
+ dma_outb(pagenr & 0xfe, DMA_PAGE_7);
+ break;
+ }
+}
+
+
+/* Set transfer address & page bits for specific DMA channel.
+ * Assumes dma flipflop is clear.
+ */
+static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
+{
+ set_dma_page(dmanr, a>>16);
+ if (dmanr <= 3) {
+ dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
+ dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
+ } else {
+ dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
+ dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
+ }
+}
+
+
+/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
+ * a specific DMA channel.
+ * You must ensure the parameters are valid.
+ * NOTE: from a manual: "the number of transfers is one more
+ * than the initial word count"! This is taken into account.
+ * Assumes dma flip-flop is clear.
+ * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
+ */
+static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
+{
+ count--;
+ if (dmanr <= 3) {
+ dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
+ dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
+ } else {
+ dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
+ dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
+ }
+}
+
+
+/* Get DMA residue count. After a DMA transfer, this
+ * should return zero. Reading this while a DMA transfer is
+ * still in progress will return unpredictable results.
+ * If called before the channel has been used, it may return 1.
+ * Otherwise, it returns the number of _bytes_ left to transfer.
+ *
+ * Assumes DMA flip-flop is clear.
+ */
+static __inline__ int get_dma_residue(unsigned int dmanr)
+{
+ unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
+ : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
+
+ /* using short to get 16-bit wrap around */
+ unsigned short count;
+
+ count = 1 + dma_inb(io_port);
+ count += dma_inb(io_port) << 8;
+
+ return (dmanr<=3)? count : (count<<1);
+}
+
+
+/* These are in kernel/dma.c: */
+extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
+extern void free_dma(unsigned int dmanr); /* release it again */
+
+
+#endif /* _ASM_DMA_H */
diff --git a/linux/src/include/asm-i386/errno.h b/linux/src/include/asm-i386/errno.h
new file mode 100644
index 0000000..7cf599f
--- /dev/null
+++ b/linux/src/include/asm-i386/errno.h
@@ -0,0 +1,132 @@
+#ifndef _I386_ERRNO_H
+#define _I386_ERRNO_H
+
+#define EPERM 1 /* Operation not permitted */
+#define ENOENT 2 /* No such file or directory */
+#define ESRCH 3 /* No such process */
+#define EINTR 4 /* Interrupted system call */
+#define EIO 5 /* I/O error */
+#define ENXIO 6 /* No such device or address */
+#define E2BIG 7 /* Arg list too long */
+#define ENOEXEC 8 /* Exec format error */
+#define EBADF 9 /* Bad file number */
+#define ECHILD 10 /* No child processes */
+#define EAGAIN 11 /* Try again */
+#define ENOMEM 12 /* Out of memory */
+#define EACCES 13 /* Permission denied */
+#define EFAULT 14 /* Bad address */
+#define ENOTBLK 15 /* Block device required */
+#define EBUSY 16 /* Device or resource busy */
+#define EEXIST 17 /* File exists */
+#define EXDEV 18 /* Cross-device link */
+#define ENODEV 19 /* No such device */
+#define ENOTDIR 20 /* Not a directory */
+#define EISDIR 21 /* Is a directory */
+#define EINVAL 22 /* Invalid argument */
+#define ENFILE 23 /* File table overflow */
+#define EMFILE 24 /* Too many open files */
+#define ENOTTY 25 /* Not a typewriter */
+#define ETXTBSY 26 /* Text file busy */
+#define EFBIG 27 /* File too large */
+#define ENOSPC 28 /* No space left on device */
+#define ESPIPE 29 /* Illegal seek */
+#define EROFS 30 /* Read-only file system */
+#define EMLINK 31 /* Too many links */
+#define EPIPE 32 /* Broken pipe */
+#define EDOM 33 /* Math argument out of domain of func */
+#define ERANGE 34 /* Math result not representable */
+#define EDEADLK 35 /* Resource deadlock would occur */
+#define ENAMETOOLONG 36 /* File name too long */
+#define ENOLCK 37 /* No record locks available */
+#define ENOSYS 38 /* Function not implemented */
+#define ENOTEMPTY 39 /* Directory not empty */
+#define ELOOP 40 /* Too many symbolic links encountered */
+#define EWOULDBLOCK EAGAIN /* Operation would block */
+#define ENOMSG 42 /* No message of desired type */
+#define EIDRM 43 /* Identifier removed */
+#define ECHRNG 44 /* Channel number out of range */
+#define EL2NSYNC 45 /* Level 2 not synchronized */
+#define EL3HLT 46 /* Level 3 halted */
+#define EL3RST 47 /* Level 3 reset */
+#define ELNRNG 48 /* Link number out of range */
+#define EUNATCH 49 /* Protocol driver not attached */
+#define ENOCSI 50 /* No CSI structure available */
+#define EL2HLT 51 /* Level 2 halted */
+#define EBADE 52 /* Invalid exchange */
+#define EBADR 53 /* Invalid request descriptor */
+#define EXFULL 54 /* Exchange full */
+#define ENOANO 55 /* No anode */
+#define EBADRQC 56 /* Invalid request code */
+#define EBADSLT 57 /* Invalid slot */
+
+#define EDEADLOCK EDEADLK
+
+#define EBFONT 59 /* Bad font file format */
+#define ENOSTR 60 /* Device not a stream */
+#define ENODATA 61 /* No data available */
+#define ETIME 62 /* Timer expired */
+#define ENOSR 63 /* Out of streams resources */
+#define ENONET 64 /* Machine is not on the network */
+#define ENOPKG 65 /* Package not installed */
+#define EREMOTE 66 /* Object is remote */
+#define ENOLINK 67 /* Link has been severed */
+#define EADV 68 /* Advertise error */
+#define ESRMNT 69 /* Srmount error */
+#define ECOMM 70 /* Communication error on send */
+#define EPROTO 71 /* Protocol error */
+#define EMULTIHOP 72 /* Multihop attempted */
+#define EDOTDOT 73 /* RFS specific error */
+#define EBADMSG 74 /* Not a data message */
+#define EOVERFLOW 75 /* Value too large for defined data type */
+#define ENOTUNIQ 76 /* Name not unique on network */
+#define EBADFD 77 /* File descriptor in bad state */
+#define EREMCHG 78 /* Remote address changed */
+#define ELIBACC 79 /* Can not access a needed shared library */
+#define ELIBBAD 80 /* Accessing a corrupted shared library */
+#define ELIBSCN 81 /* .lib section in a.out corrupted */
+#define ELIBMAX 82 /* Attempting to link in too many shared libraries */
+#define ELIBEXEC 83 /* Cannot exec a shared library directly */
+#define EILSEQ 84 /* Illegal byte sequence */
+#define ERESTART 85 /* Interrupted system call should be restarted */
+#define ESTRPIPE 86 /* Streams pipe error */
+#define EUSERS 87 /* Too many users */
+#define ENOTSOCK 88 /* Socket operation on non-socket */
+#define EDESTADDRREQ 89 /* Destination address required */
+#define EMSGSIZE 90 /* Message too long */
+#define EPROTOTYPE 91 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 92 /* Protocol not available */
+#define EPROTONOSUPPORT 93 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 94 /* Socket type not supported */
+#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
+#define EPFNOSUPPORT 96 /* Protocol family not supported */
+#define EAFNOSUPPORT 97 /* Address family not supported by protocol */
+#define EADDRINUSE 98 /* Address already in use */
+#define EADDRNOTAVAIL 99 /* Cannot assign requested address */
+#define ENETDOWN 100 /* Network is down */
+#define ENETUNREACH 101 /* Network is unreachable */
+#define ENETRESET 102 /* Network dropped connection because of reset */
+#define ECONNABORTED 103 /* Software caused connection abort */
+#define ECONNRESET 104 /* Connection reset by peer */
+#define ENOBUFS 105 /* No buffer space available */
+#define EISCONN 106 /* Transport endpoint is already connected */
+#define ENOTCONN 107 /* Transport endpoint is not connected */
+#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */
+#define ETOOMANYREFS 109 /* Too many references: cannot splice */
+#define ETIMEDOUT 110 /* Connection timed out */
+#define ECONNREFUSED 111 /* Connection refused */
+#define EHOSTDOWN 112 /* Host is down */
+#define EHOSTUNREACH 113 /* No route to host */
+#define EALREADY 114 /* Operation already in progress */
+#define EINPROGRESS 115 /* Operation now in progress */
+#define ESTALE 116 /* Stale NFS file handle */
+#define EUCLEAN 117 /* Structure needs cleaning */
+#define ENOTNAM 118 /* Not a XENIX named type file */
+#define ENAVAIL 119 /* No XENIX semaphores available */
+#define EISNAM 120 /* Is a named type file */
+#define EREMOTEIO 121 /* Remote I/O error */
+#define EDQUOT 122 /* Quota exceeded */
+
+#define ENOMEDIUM 123 /* No medium found */
+#define EMEDIUMTYPE 124 /* Wrong medium type */
+
+#endif
diff --git a/linux/src/include/asm-i386/fcntl.h b/linux/src/include/asm-i386/fcntl.h
new file mode 100644
index 0000000..369ac51
--- /dev/null
+++ b/linux/src/include/asm-i386/fcntl.h
@@ -0,0 +1,59 @@
+#ifndef _I386_FCNTL_H
+#define _I386_FCNTL_H
+
+/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
+ located on an ext2 file system */
+#define O_ACCMODE 0003
+#define O_RDONLY 00
+#define O_WRONLY 01
+#define O_RDWR 02
+#define O_CREAT 0100 /* not fcntl */
+#define O_EXCL 0200 /* not fcntl */
+#define O_NOCTTY 0400 /* not fcntl */
+#define O_TRUNC 01000 /* not fcntl */
+#define O_APPEND 02000
+#define O_NONBLOCK 04000
+#define O_NDELAY O_NONBLOCK
+#define O_SYNC 010000
+#define FASYNC 020000 /* fcntl, for BSD compatibility */
+
+#define F_DUPFD 0 /* dup */
+#define F_GETFD 1 /* get f_flags */
+#define F_SETFD 2 /* set f_flags */
+#define F_GETFL 3 /* more flags (cloexec) */
+#define F_SETFL 4
+#define F_GETLK 5
+#define F_SETLK 6
+#define F_SETLKW 7
+
+#define F_SETOWN 8 /* for sockets. */
+#define F_GETOWN 9 /* for sockets. */
+
+/* for F_[GET|SET]FL */
+#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
+
+/* for posix fcntl() and lockf() */
+#define F_RDLCK 0
+#define F_WRLCK 1
+#define F_UNLCK 2
+
+/* for old implementation of bsd flock () */
+#define F_EXLCK 4 /* or 3 */
+#define F_SHLCK 8 /* or 4 */
+
+/* operations for bsd flock(), also used by the kernel implementation */
+#define LOCK_SH 1 /* shared lock */
+#define LOCK_EX 2 /* exclusive lock */
+#define LOCK_NB 4 /* or'd with one of the above to prevent
+ blocking */
+#define LOCK_UN 8 /* remove lock */
+
+struct flock {
+ short l_type;
+ short l_whence;
+ off_t l_start;
+ off_t l_len;
+ pid_t l_pid;
+};
+
+#endif
diff --git a/linux/src/include/asm-i386/floppy.h b/linux/src/include/asm-i386/floppy.h
new file mode 100644
index 0000000..033a20f
--- /dev/null
+++ b/linux/src/include/asm-i386/floppy.h
@@ -0,0 +1,289 @@
+/*
+ * Architecture specific parts of the Floppy driver
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995
+ */
+#ifndef __ASM_I386_FLOPPY_H
+#define __ASM_I386_FLOPPY_H
+
+
+#define SW fd_routine[use_virtual_dma&1]
+
+
+#define fd_inb(port) inb_p(port)
+#define fd_outb(port,value) outb_p(port,value)
+
+#define fd_enable_dma() SW._enable_dma(FLOPPY_DMA)
+#define fd_disable_dma() SW._disable_dma(FLOPPY_DMA)
+#define fd_request_dma() SW._request_dma(FLOPPY_DMA,"floppy")
+#define fd_free_dma() SW._free_dma(FLOPPY_DMA)
+#define fd_clear_dma_ff() SW._clear_dma_ff(FLOPPY_DMA)
+#define fd_set_dma_mode(mode) SW._set_dma_mode(FLOPPY_DMA,mode)
+#define fd_set_dma_addr(addr) SW._set_dma_addr(FLOPPY_DMA,addr)
+#define fd_set_dma_count(count) SW._set_dma_count(FLOPPY_DMA,count)
+#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
+#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
+#define fd_cacheflush(addr,size) /* nothing */
+#define fd_request_irq() SW._request_irq(FLOPPY_IRQ, floppy_interrupt, \
+ SA_INTERRUPT|SA_SAMPLE_RANDOM, \
+ "floppy", NULL)
+#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
+#define fd_get_dma_residue() SW._get_dma_residue(FLOPPY_DMA)
+#define fd_dma_mem_alloc(size) SW._dma_mem_alloc(size)
+#define fd_dma_mem_free(addr,size) SW._dma_mem_free(addr,size)
+
+static int virtual_dma_count=0;
+static int virtual_dma_residue=0;
+static unsigned long virtual_dma_addr=0;
+static int virtual_dma_mode=0;
+static int doing_pdma=0;
+
+static void floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
+{
+ register unsigned char st;
+
+#undef TRACE_FLPY_INT
+#undef NO_FLOPPY_ASSEMBLER
+
+#ifdef TRACE_FLPY_INT
+ static int calls=0;
+ static int bytes=0;
+ static int dma_wait=0;
+#endif
+ if(!doing_pdma) {
+ floppy_interrupt(irq, dev_id, regs);
+ return;
+ }
+
+#ifdef TRACE_FLPY_INT
+ if(!calls)
+ bytes = virtual_dma_count;
+#endif
+
+#ifndef NO_FLOPPY_ASSEMBLER
+ __asm__ ("testl %1,%1\n"
+ "je 3f\n"
+ "1: inb %w4,%b0\n"
+ "andb $160,%b0\n"
+ "cmpb $160,%b0\n"
+ "jne 2f\n"
+ "incw %w4\n"
+ "testl %3,%3\n"
+ "jne 4f\n"
+ "inb %w4,%b0\n"
+ "movb %0,(%2)\n"
+ "jmp 5f\n"
+ "4: movb (%2),%0\n"
+ "outb %b0,%w4\n"
+ "5: decw %w4\n"
+ "outb %0,$0x80\n"
+ "decl %1\n"
+ "incl %2\n"
+ "testl %1,%1\n"
+ "jne 1b\n"
+ "3: inb %w4,%b0\n"
+ "2:\n"
+ : "=a" ((char) st),
+ "=c" ((long) virtual_dma_count),
+ "=S" ((long) virtual_dma_addr)
+ : "b" ((long) virtual_dma_mode),
+ "d" ((short) virtual_dma_port+4),
+ "1" ((long) virtual_dma_count),
+ "2" ((long) virtual_dma_addr));
+#else
+ {
+ register int lcount;
+ register char *lptr;
+
+ st = 1;
+ for(lcount=virtual_dma_count, lptr=(char *)virtual_dma_addr;
+ lcount; lcount--, lptr++) {
+ st=inb(virtual_dma_port+4) & 0xa0 ;
+ if(st != 0xa0)
+ break;
+ if(virtual_dma_mode)
+ outb_p(*lptr, virtual_dma_port+5);
+ else
+ *lptr = inb_p(virtual_dma_port+5);
+ st = inb(virtual_dma_port+4);
+ }
+ virtual_dma_count = lcount;
+ virtual_dma_addr = (int) lptr;
+ }
+#endif
+
+#ifdef TRACE_FLPY_INT
+ calls++;
+#endif
+ if(st == 0x20)
+ return;
+ if(!(st & 0x20)) {
+ virtual_dma_residue += virtual_dma_count;
+ virtual_dma_count=0;
+#ifdef TRACE_FLPY_INT
+ printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
+ virtual_dma_count, virtual_dma_residue, calls, bytes,
+ dma_wait);
+ calls = 0;
+ dma_wait=0;
+#endif
+ doing_pdma = 0;
+ floppy_interrupt(irq, dev_id, regs);
+ return;
+ }
+#ifdef TRACE_FLPY_INT
+ if(!virtual_dma_count)
+ dma_wait++;
+#endif
+}
+
+static void vdma_enable_dma(unsigned int dummy)
+{
+ doing_pdma = 1;
+}
+
+static void vdma_disable_dma(unsigned int dummy)
+{
+ doing_pdma = 0;
+ virtual_dma_residue += virtual_dma_count;
+ virtual_dma_count=0;
+}
+
+static int vdma_request_dma(unsigned int dmanr, const char * device_id)
+{
+ return 0;
+}
+
+static void vdma_nop(unsigned int dummy)
+{
+}
+
+static void vdma_set_dma_mode(unsigned int dummy,char mode)
+{
+ virtual_dma_mode = (mode == DMA_MODE_WRITE);
+}
+
+static void vdma_set_dma_addr(unsigned int dummy,unsigned int addr)
+{
+ virtual_dma_addr = addr;
+}
+
+static void vdma_set_dma_count(unsigned int dummy,unsigned int count)
+{
+ virtual_dma_count = count;
+ virtual_dma_residue = 0;
+}
+
+static int vdma_get_dma_residue(unsigned int dummy)
+{
+ return virtual_dma_count + virtual_dma_residue;
+}
+
+
+static int vdma_request_irq(unsigned int irq,
+ void (*handler)(int, void *, struct pt_regs *),
+ unsigned long flags,
+ const char *device,
+ void *dev_id)
+{
+ return request_irq(irq, floppy_hardint,SA_INTERRUPT,device, dev_id);
+
+}
+
+static unsigned long dma_mem_alloc(unsigned long size)
+{
+ return __get_dma_pages(GFP_KERNEL,__get_order(size));
+}
+
+static void dma_mem_free(unsigned long addr, unsigned long size)
+{
+ free_pages(addr, __get_order(size));
+}
+
+static unsigned long vdma_mem_alloc(unsigned long size)
+{
+ return (unsigned long) vmalloc(size);
+}
+
+static void vdma_mem_free(unsigned long addr, unsigned long size)
+{
+ return vfree((void *)addr);
+}
+
+struct fd_routine_l {
+ void (*_enable_dma)(unsigned int dummy);
+ void (*_disable_dma)(unsigned int dummy);
+ int (*_request_dma)(unsigned int dmanr, const char * device_id);
+ void (*_free_dma)(unsigned int dmanr);
+ void (*_clear_dma_ff)(unsigned int dummy);
+ void (*_set_dma_mode)(unsigned int dummy, char mode);
+ void (*_set_dma_addr)(unsigned int dummy, unsigned int addr);
+ void (*_set_dma_count)(unsigned int dummy, unsigned int count);
+ int (*_get_dma_residue)(unsigned int dummy);
+ int (*_request_irq)(unsigned int irq,
+ void (*handler)(int, void *, struct pt_regs *),
+ unsigned long flags,
+ const char *device,
+ void *dev_id);
+ unsigned long (*_dma_mem_alloc) (unsigned long size);
+ void (*_dma_mem_free)(unsigned long addr, unsigned long size);
+} fd_routine[] = {
+ {
+ enable_dma,
+ disable_dma,
+ request_dma,
+ free_dma,
+ clear_dma_ff,
+ set_dma_mode,
+ set_dma_addr,
+ set_dma_count,
+ get_dma_residue,
+ request_irq,
+ dma_mem_alloc,
+ dma_mem_free
+ },
+ {
+ vdma_enable_dma,
+ vdma_disable_dma,
+ vdma_request_dma,
+ vdma_nop,
+ vdma_nop,
+ vdma_set_dma_mode,
+ vdma_set_dma_addr,
+ vdma_set_dma_count,
+ vdma_get_dma_residue,
+ vdma_request_irq,
+ vdma_mem_alloc,
+ vdma_mem_free
+ }
+};
+
+__inline__ void virtual_dma_init(void)
+{
+ /* Nothing to do on an i386 */
+}
+
+static int FDC1 = 0x3f0;
+static int FDC2 = -1;
+
+#define FLOPPY0_TYPE ((CMOS_READ(0x10) >> 4) & 15)
+#define FLOPPY1_TYPE (CMOS_READ(0x10) & 15)
+
+#define N_FDC 2
+#define N_DRIVE 8
+
+/*
+ * The DMA channel used by the floppy controller cannot access data at
+ * addresses >= 16MB
+ *
+ * Went back to the 1MB limit, as some people had problems with the floppy
+ * driver otherwise. It doesn't matter much for performance anyway, as most
+ * floppy accesses go through the track buffer.
+ */
+#define CROSS_64KB(a,s) (((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64) && ! (use_virtual_dma & 1))
+
+#endif /* __ASM_I386_FLOPPY_H */
diff --git a/linux/src/include/asm-i386/hardirq.h b/linux/src/include/asm-i386/hardirq.h
new file mode 100644
index 0000000..10dae41
--- /dev/null
+++ b/linux/src/include/asm-i386/hardirq.h
@@ -0,0 +1,66 @@
+#ifndef __ASM_HARDIRQ_H
+#define __ASM_HARDIRQ_H
+
+#include <linux/tasks.h>
+
+extern unsigned int local_irq_count[NR_CPUS];
+extern unsigned int local_bh_count[NR_CPUS];
+
+/*
+ * Are we in an interrupt context? Either doing bottom half
+ * or hardware interrupt processing?
+ */
+#define in_interrupt() ({ int __cpu = smp_processor_id(); \
+ (local_irq_count[__cpu] + local_bh_count[__cpu] != 0); })
+
+#ifndef __SMP__
+
+#define hardirq_trylock(cpu) (local_irq_count[cpu] == 0)
+#define hardirq_endlock(cpu) do { } while (0)
+
+#define hardirq_enter(cpu) (local_irq_count[cpu]++)
+#define hardirq_exit(cpu) (local_irq_count[cpu]--)
+
+#define synchronize_irq() barrier()
+
+#else
+
+#include <asm/atomic.h>
+
+extern unsigned char global_irq_holder;
+extern unsigned volatile int global_irq_lock;
+extern atomic_t global_irq_count;
+
+static inline void release_irqlock(int cpu)
+{
+ /* if we didn't own the irq lock, just ignore.. */
+ if (global_irq_holder == (unsigned char) cpu) {
+ global_irq_holder = NO_PROC_ID;
+ clear_bit(0,&global_irq_lock);
+ }
+}
+
+static inline void hardirq_enter(int cpu)
+{
+ ++local_irq_count[cpu];
+ atomic_inc(&global_irq_count);
+}
+
+static inline void hardirq_exit(int cpu)
+{
+ atomic_dec(&global_irq_count);
+ --local_irq_count[cpu];
+}
+
+static inline int hardirq_trylock(int cpu)
+{
+ return !atomic_read(&global_irq_count) && !test_bit(0,&global_irq_lock);
+}
+
+#define hardirq_endlock(cpu) do { } while (0)
+
+extern void synchronize_irq(void);
+
+#endif /* __SMP__ */
+
+#endif /* __ASM_HARDIRQ_H */
diff --git a/linux/src/include/asm-i386/io.h b/linux/src/include/asm-i386/io.h
new file mode 100644
index 0000000..e5c0744
--- /dev/null
+++ b/linux/src/include/asm-i386/io.h
@@ -0,0 +1,216 @@
+#ifndef _ASM_IO_H
+#define _ASM_IO_H
+
+/*
+ * This file contains the definitions for the x86 IO instructions
+ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
+ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
+ * versions of the single-IO instructions (inb_p/inw_p/..).
+ *
+ * This file is not meant to be obfuscating: it's just complicated
+ * to (a) handle it all in a way that makes gcc able to optimize it
+ * as well as possible and (b) trying to avoid writing the same thing
+ * over and over again with slight variations and possibly making a
+ * mistake somewhere.
+ */
+
+/*
+ * Thanks to James van Artsdalen for a better timing-fix than
+ * the two short jumps: using outb's to a nonexistent port seems
+ * to guarantee better timings even on fast machines.
+ *
+ * On the other hand, I'd like to be sure of a non-existent port:
+ * I feel a bit unsafe about using 0x80 (should be safe, though)
+ *
+ * Linus
+ */
+
+#include <machine/vm_param.h>
+#include <intel/pmap.h>
+
+#ifdef SLOW_IO_BY_JUMPING
+#define __SLOW_DOWN_IO __asm__ __volatile__("jmp 1f\n1:\tjmp 1f\n1:")
+#else
+#define __SLOW_DOWN_IO __asm__ __volatile__("outb %al,$0x80")
+#endif
+
+#ifdef REALLY_SLOW_IO
+#define SLOW_DOWN_IO { __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; }
+#else
+#define SLOW_DOWN_IO __SLOW_DOWN_IO
+#endif
+
+/*
+ * Change virtual addresses to physical addresses and vv.
+ * These are trivial on the 1:1 Linux/i386 mapping (but if we ever
+ * make the kernel segment mapped at 0, we need to do translation
+ * on the i386 as well)
+ */
+static inline unsigned long virt_to_phys(volatile void * address)
+{
+ return (unsigned long) kvtophys((vm_offset_t) address);
+}
+
+static inline void * phys_to_virt(unsigned long address)
+{
+ return (void *) phystokv(address);
+}
+
+/*
+ * IO bus memory addresses are also 1:1 with the physical address
+ */
+#define virt_to_bus virt_to_phys
+#define bus_to_virt phys_to_virt
+
+/*
+ * readX/writeX() are used to access memory mapped devices. On some
+ * architectures the memory mapped IO stuff needs to be accessed
+ * differently. On the x86 architecture, we just read/write the
+ * memory location directly.
+ */
+#define readb(addr) (*(volatile unsigned char *) (addr))
+#define readw(addr) (*(volatile unsigned short *) (addr))
+#define readl(addr) (*(volatile unsigned int *) (addr))
+
+#define writeb(b,addr) ((*(volatile unsigned char *) (addr)) = (b))
+#define writew(b,addr) ((*(volatile unsigned short *) (addr)) = (b))
+#define writel(b,addr) ((*(volatile unsigned int *) (addr)) = (b))
+
+#define memset_io(a,b,c) memset((void *)(a),(b),(c))
+#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
+#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
+
+/*
+ * Again, i386 does not require mem IO specific function.
+ */
+
+#define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void *)(b),(c),(d))
+
+/*
+ * Talk about misusing macros..
+ */
+
+#define __OUT1(s,x) \
+static inline void __out##s(unsigned x value, unsigned short port) {
+
+#define __OUT2(s,s1,s2) \
+__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
+
+#define __OUT(s,s1,x) \
+__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "d" (port)); } \
+__OUT1(s##c,x) __OUT2(s,s1,"") : : "a" (value), "id" (port)); } \
+__OUT1(s##_p,x) __OUT2(s,s1,"w") : : "a" (value), "d" (port)); SLOW_DOWN_IO; } \
+__OUT1(s##c_p,x) __OUT2(s,s1,"") : : "a" (value), "id" (port)); SLOW_DOWN_IO; }
+
+#define __IN1(s) \
+static inline RETURN_TYPE __in##s(unsigned short port) { RETURN_TYPE _v;
+
+#define __IN2(s,s1,s2) \
+__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
+
+#define __IN(s,s1,i...) \
+__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "d" (port) ,##i ); return _v; } \
+__IN1(s##c) __IN2(s,s1,"") : "=a" (_v) : "id" (port) ,##i ); return _v; } \
+__IN1(s##_p) __IN2(s,s1,"w") : "=a" (_v) : "d" (port) ,##i ); SLOW_DOWN_IO; return _v; } \
+__IN1(s##c_p) __IN2(s,s1,"") : "=a" (_v) : "id" (port) ,##i ); SLOW_DOWN_IO; return _v; }
+
+#define __INS(s) \
+static inline void ins##s(unsigned short port, void * addr, unsigned long count) \
+{ __asm__ __volatile__ ("cld ; rep ; ins" #s \
+: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
+
+#define __OUTS(s) \
+static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
+{ __asm__ __volatile__ ("cld ; rep ; outs" #s \
+: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
+
+#define RETURN_TYPE unsigned char
+/* __IN(b,"b","0" (0)) */
+__IN(b,"")
+#undef RETURN_TYPE
+#define RETURN_TYPE unsigned short
+/* __IN(w,"w","0" (0)) */
+__IN(w,"")
+#undef RETURN_TYPE
+#define RETURN_TYPE unsigned int
+__IN(l,"")
+#undef RETURN_TYPE
+
+__OUT(b,"b",char)
+__OUT(w,"w",short)
+__OUT(l,,int)
+
+__INS(b)
+__INS(w)
+__INS(l)
+
+__OUTS(b)
+__OUTS(w)
+__OUTS(l)
+
+/*
+ * Note that due to the way __builtin_constant_p() works, you
+ * - can't use it inside a inline function (it will never be true)
+ * - you don't have to worry about side effects within the __builtin..
+ */
+#define outb(val,port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __outbc((val),(port)) : \
+ __outb((val),(port)))
+
+#define inb(port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __inbc(port) : \
+ __inb(port))
+
+#define outb_p(val,port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __outbc_p((val),(port)) : \
+ __outb_p((val),(port)))
+
+#define inb_p(port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __inbc_p(port) : \
+ __inb_p(port))
+
+#define outw(val,port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __outwc((val),(port)) : \
+ __outw((val),(port)))
+
+#define inw(port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __inwc(port) : \
+ __inw(port))
+
+#define outw_p(val,port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __outwc_p((val),(port)) : \
+ __outw_p((val),(port)))
+
+#define inw_p(port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __inwc_p(port) : \
+ __inw_p(port))
+
+#define outl(val,port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __outlc((val),(port)) : \
+ __outl((val),(port)))
+
+#define inl(port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __inlc(port) : \
+ __inl(port))
+
+#define outl_p(val,port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __outlc_p((val),(port)) : \
+ __outl_p((val),(port)))
+
+#define inl_p(port) \
+((__builtin_constant_p((port)) && (port) < 256) ? \
+ __inlc_p(port) : \
+ __inl_p(port))
+
+#endif
diff --git a/linux/src/include/asm-i386/ioctl.h b/linux/src/include/asm-i386/ioctl.h
new file mode 100644
index 0000000..44df7b0
--- /dev/null
+++ b/linux/src/include/asm-i386/ioctl.h
@@ -0,0 +1,75 @@
+/* $Id: ioctl.h,v 1.1 1999/04/26 05:55:47 tb Exp $
+ *
+ * linux/ioctl.h for Linux by H.H. Bergman.
+ */
+
+#ifndef _ASMI386_IOCTL_H
+#define _ASMI386_IOCTL_H
+
+/* ioctl command encoding: 32 bits total, command in lower 16 bits,
+ * size of the parameter structure in the lower 14 bits of the
+ * upper 16 bits.
+ * Encoding the size of the parameter structure in the ioctl request
+ * is useful for catching programs compiled with old versions
+ * and to avoid overwriting user space outside the user buffer area.
+ * The highest 2 bits are reserved for indicating the ``access mode''.
+ * NOTE: This limits the max parameter size to 16kB -1 !
+ */
+
+/*
+ * The following is for compatibility across the various Linux
+ * platforms. The i386 ioctl numbering scheme doesn't really enforce
+ * a type field. De facto, however, the top 8 bits of the lower 16
+ * bits are indeed used as a type field, so we might just as well make
+ * this explicit here. Please be sure to use the decoding macros
+ * below from now on.
+ */
+#define _IOC_NRBITS 8
+#define _IOC_TYPEBITS 8
+#define _IOC_SIZEBITS 14
+#define _IOC_DIRBITS 2
+
+#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
+#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
+#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
+#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
+
+#define _IOC_NRSHIFT 0
+#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
+#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
+#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
+
+/*
+ * Direction bits.
+ */
+#define _IOC_NONE 0U
+#define _IOC_WRITE 1U
+#define _IOC_READ 2U
+
+#define _IOC(dir,type,nr,size) \
+ (((dir) << _IOC_DIRSHIFT) | \
+ ((type) << _IOC_TYPESHIFT) | \
+ ((nr) << _IOC_NRSHIFT) | \
+ ((size) << _IOC_SIZESHIFT))
+
+/* used to create numbers */
+#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
+#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
+#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
+#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
+
+/* used to decode ioctl numbers.. */
+#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
+#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
+#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
+#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
+
+/* ...and for the drivers/sound files... */
+
+#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
+#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
+#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
+#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
+#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
+
+#endif /* _ASMI386_IOCTL_H */
diff --git a/linux/src/include/asm-i386/ioctls.h b/linux/src/include/asm-i386/ioctls.h
new file mode 100644
index 0000000..60e0806
--- /dev/null
+++ b/linux/src/include/asm-i386/ioctls.h
@@ -0,0 +1,74 @@
+#ifndef __ARCH_I386_IOCTLS_H__
+#define __ARCH_I386_IOCTLS_H__
+
+#include <asm/ioctl.h>
+
+/* 0x54 is just a magic number to make these relatively unique ('T') */
+
+#define TCGETS 0x5401
+#define TCSETS 0x5402
+#define TCSETSW 0x5403
+#define TCSETSF 0x5404
+#define TCGETA 0x5405
+#define TCSETA 0x5406
+#define TCSETAW 0x5407
+#define TCSETAF 0x5408
+#define TCSBRK 0x5409
+#define TCXONC 0x540A
+#define TCFLSH 0x540B
+#define TIOCEXCL 0x540C
+#define TIOCNXCL 0x540D
+#define TIOCSCTTY 0x540E
+#define TIOCGPGRP 0x540F
+#define TIOCSPGRP 0x5410
+#define TIOCOUTQ 0x5411
+#define TIOCSTI 0x5412
+#define TIOCGWINSZ 0x5413
+#define TIOCSWINSZ 0x5414
+#define TIOCMGET 0x5415
+#define TIOCMBIS 0x5416
+#define TIOCMBIC 0x5417
+#define TIOCMSET 0x5418
+#define TIOCGSOFTCAR 0x5419
+#define TIOCSSOFTCAR 0x541A
+#define FIONREAD 0x541B
+#define TIOCINQ FIONREAD
+#define TIOCLINUX 0x541C
+#define TIOCCONS 0x541D
+#define TIOCGSERIAL 0x541E
+#define TIOCSSERIAL 0x541F
+#define TIOCPKT 0x5420
+#define FIONBIO 0x5421
+#define TIOCNOTTY 0x5422
+#define TIOCSETD 0x5423
+#define TIOCGETD 0x5424
+#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
+#define TIOCTTYGSTRUCT 0x5426 /* For debugging only */
+#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
+#define FIOCLEX 0x5451
+#define FIOASYNC 0x5452
+#define TIOCSERCONFIG 0x5453
+#define TIOCSERGWILD 0x5454
+#define TIOCSERSWILD 0x5455
+#define TIOCGLCKTRMIOS 0x5456
+#define TIOCSLCKTRMIOS 0x5457
+#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
+#define TIOCSERGETLSR 0x5459 /* Get line status register */
+#define TIOCSERGETMULTI 0x545A /* Get multiport config */
+#define TIOCSERSETMULTI 0x545B /* Set multiport config */
+
+#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
+#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
+
+/* Used for packet mode */
+#define TIOCPKT_DATA 0
+#define TIOCPKT_FLUSHREAD 1
+#define TIOCPKT_FLUSHWRITE 2
+#define TIOCPKT_STOP 4
+#define TIOCPKT_START 8
+#define TIOCPKT_NOSTOP 16
+#define TIOCPKT_DOSTOP 32
+
+#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
+
+#endif
diff --git a/linux/src/include/asm-i386/irq.h b/linux/src/include/asm-i386/irq.h
new file mode 100644
index 0000000..c75744a
--- /dev/null
+++ b/linux/src/include/asm-i386/irq.h
@@ -0,0 +1,421 @@
+#ifndef _ASM_IRQ_H
+#define _ASM_IRQ_H
+
+/*
+ * linux/include/asm/irq.h
+ *
+ * (C) 1992, 1993 Linus Torvalds
+ *
+ * IRQ/IPI changes taken from work by Thomas Radke <tomsoft@informatik.tu-chemnitz.de>
+ */
+
+#include <linux/linkage.h>
+#include <asm/segment.h>
+
+#define NR_IRQS 16
+
+#define TIMER_IRQ 0
+
+extern void disable_irq(unsigned int);
+extern void enable_irq(unsigned int);
+
+#define __STR(x) #x
+#define STR(x) __STR(x)
+
+#define SAVE_ALL \
+ "cld\n\t" \
+ "push %gs\n\t" \
+ "push %fs\n\t" \
+ "push %es\n\t" \
+ "push %ds\n\t" \
+ "pushl %eax\n\t" \
+ "pushl %ebp\n\t" \
+ "pushl %edi\n\t" \
+ "pushl %esi\n\t" \
+ "pushl %edx\n\t" \
+ "pushl %ecx\n\t" \
+ "pushl %ebx\n\t" \
+ "movl $" STR(KERNEL_DS) ",%edx\n\t" \
+ "mov %dx,%ds\n\t" \
+ "mov %dx,%es\n\t" \
+ "mov %dx,%gs\n\t" \
+ "movl $" STR(USER_DS) ",%edx\n\t" \
+ "mov %dx,%fs\n\t" \
+ "movl $0,%edx\n\t" \
+ "movl %edx,%db7\n\t"
+
+/*
+ * SAVE_MOST/RESTORE_MOST is used for the faster version of IRQ handlers,
+ * installed by using the SA_INTERRUPT flag. These kinds of IRQ's don't
+ * call the routines that do signal handling etc on return, and can have
+ * more relaxed register-saving etc. They are also atomic, and are thus
+ * suited for small, fast interrupts like the serial lines or the harddisk
+ * drivers, which don't actually need signal handling etc.
+ *
+ * Also note that we actually save only those registers that are used in
+ * C subroutines (%eax, %edx and %ecx), so if you do something weird,
+ * you're on your own. The only segments that are saved (not counting the
+ * automatic stack and code segment handling) are %ds and %es, and they
+ * point to kernel space. No messing around with %fs here.
+ */
+#define SAVE_MOST \
+ "cld\n\t" \
+ "push %es\n\t" \
+ "push %ds\n\t" \
+ "pushl %eax\n\t" \
+ "pushl %edx\n\t" \
+ "pushl %ecx\n\t" \
+ "movl $" STR(KERNEL_DS) ",%edx\n\t" \
+ "mov %dx,%ds\n\t" \
+ "mov %dx,%es\n\t"
+
+#define RESTORE_MOST \
+ "popl %ecx\n\t" \
+ "popl %edx\n\t" \
+ "popl %eax\n\t" \
+ "pop %ds\n\t" \
+ "pop %es\n\t" \
+ "iret"
+
+/*
+ * The "inb" instructions are not needed, but seem to change the timings
+ * a bit - without them it seems that the harddisk driver won't work on
+ * all hardware. Arghh.
+ */
+#define ACK_FIRST(mask,nr) \
+ "inb $0x21,%al\n\t" \
+ "jmp 1f\n" \
+ "1:\tjmp 1f\n" \
+ "1:\torb $" #mask ","SYMBOL_NAME_STR(cache_21)"\n\t" \
+ "movb "SYMBOL_NAME_STR(cache_21)",%al\n\t" \
+ "outb %al,$0x21\n\t" \
+ "jmp 1f\n" \
+ "1:\tjmp 1f\n" \
+ "1:\tmovb $0x20,%al\n\t" \
+ "outb %al,$0x20\n\t"
+
+#define ACK_SECOND(mask,nr) \
+ "inb $0xA1,%al\n\t" \
+ "jmp 1f\n" \
+ "1:\tjmp 1f\n" \
+ "1:\torb $" #mask ","SYMBOL_NAME_STR(cache_A1)"\n\t" \
+ "movb "SYMBOL_NAME_STR(cache_A1)",%al\n\t" \
+ "outb %al,$0xA1\n\t" \
+ "jmp 1f\n" \
+ "1:\tjmp 1f\n" \
+ "1:\tmovb $0x20,%al\n\t" \
+ "outb %al,$0xA0\n\t" \
+ "jmp 1f\n" \
+ "1:\tjmp 1f\n" \
+ "1:\toutb %al,$0x20\n\t"
+
+/* do not modify the ISR nor the cache_A1 variable */
+#define MSGACK_SECOND(mask,nr) \
+ "inb $0xA1,%al\n\t" \
+ "jmp 1f\n" \
+ "1:\tjmp 1f\n" \
+ "1:\tmovb $0x20,%al\n\t" \
+ "outb %al,$0xA0\n\t" \
+ "jmp 1f\n" \
+ "1:\tjmp 1f\n" \
+ "1:\toutb %al,$0x20\n\t"
+
+#define UNBLK_FIRST(mask) \
+ "inb $0x21,%al\n\t" \
+ "jmp 1f\n" \
+ "1:\tjmp 1f\n" \
+ "1:\tandb $~(" #mask "),"SYMBOL_NAME_STR(cache_21)"\n\t" \
+ "movb "SYMBOL_NAME_STR(cache_21)",%al\n\t" \
+ "outb %al,$0x21\n\t"
+
+#define UNBLK_SECOND(mask) \
+ "inb $0xA1,%al\n\t" \
+ "jmp 1f\n" \
+ "1:\tjmp 1f\n" \
+ "1:\tandb $~(" #mask "),"SYMBOL_NAME_STR(cache_A1)"\n\t" \
+ "movb "SYMBOL_NAME_STR(cache_A1)",%al\n\t" \
+ "outb %al,$0xA1\n\t"
+
+#define IRQ_NAME2(nr) nr##_interrupt(void)
+#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
+#define FAST_IRQ_NAME(nr) IRQ_NAME2(fast_IRQ##nr)
+#define BAD_IRQ_NAME(nr) IRQ_NAME2(bad_IRQ##nr)
+
+#ifdef __SMP__
+
+#ifndef __SMP_PROF__
+#define SMP_PROF_INT_SPINS
+#define SMP_PROF_IPI_CNT
+#else
+#define SMP_PROF_INT_SPINS "incl "SYMBOL_NAME_STR(smp_spins)"(,%eax,4)\n\t"
+#define SMP_PROF_IPI_CNT "incl "SYMBOL_NAME_STR(ipi_count)"\n\t"
+#endif
+
+#define GET_PROCESSOR_ID \
+ "movl "SYMBOL_NAME_STR(apic_reg)", %edx\n\t" \
+ "movl 32(%edx), %eax\n\t" \
+ "shrl $24,%eax\n\t" \
+ "andl $0x0F,%eax\n"
+
+#define ENTER_KERNEL \
+ "pushl %eax\n\t" \
+ "pushl %ebx\n\t" \
+ "pushl %ecx\n\t" \
+ "pushl %edx\n\t" \
+ "pushfl\n\t" \
+ "cli\n\t" \
+ "movl $6000, %ebx\n\t" \
+ "movl "SYMBOL_NAME_STR(smp_loops_per_tick)", %ecx\n\t" \
+ GET_PROCESSOR_ID \
+ "btsl $" STR(SMP_FROM_INT) ","SYMBOL_NAME_STR(smp_proc_in_lock)"(,%eax,4)\n\t" \
+ "1: " \
+ "lock\n\t" \
+ "btsl $0, "SYMBOL_NAME_STR(kernel_flag)"\n\t" \
+ "jnc 3f\n\t" \
+ "cmpb "SYMBOL_NAME_STR(active_kernel_processor)", %al\n\t" \
+ "je 4f\n\t" \
+ "cmpb "SYMBOL_NAME_STR(boot_cpu_id)", %al\n\t" \
+ "jne 2f\n\t" \
+ "movb $1, "SYMBOL_NAME_STR(smp_blocked_interrupt_pending)"\n\t" \
+ "2: " \
+ SMP_PROF_INT_SPINS \
+ "btl %eax, "SYMBOL_NAME_STR(smp_invalidate_needed)"\n\t" \
+ "jnc 5f\n\t" \
+ "lock\n\t" \
+ "btrl %eax, "SYMBOL_NAME_STR(smp_invalidate_needed)"\n\t" \
+ "jnc 5f\n\t" \
+ "movl %cr3,%edx\n\t" \
+ "movl %edx,%cr3\n" \
+ "5: btl $0, "SYMBOL_NAME_STR(kernel_flag)"\n\t" \
+ "jnc 1b\n\t" \
+ "cmpb "SYMBOL_NAME_STR(active_kernel_processor)", %al\n\t" \
+ "je 4f\n\t" \
+ "decl %ecx\n\t" \
+ "jne 2b\n\t" \
+ "decl %ebx\n\t" \
+ "jne 6f\n\t" \
+ "call "SYMBOL_NAME_STR(irq_deadlock_detected)"\n\t" \
+ "6: movl "SYMBOL_NAME_STR(smp_loops_per_tick)", %ecx\n\t" \
+ "cmpb "SYMBOL_NAME_STR(boot_cpu_id)", %al\n\t" \
+ "jne 2b\n\t" \
+ "incl "SYMBOL_NAME_STR(jiffies)"\n\t" \
+ "jmp 2b\n\t" \
+ "3: " \
+ "movb %al, "SYMBOL_NAME_STR(active_kernel_processor)"\n\t" \
+ "4: " \
+ "incl "SYMBOL_NAME_STR(kernel_counter)"\n\t" \
+ "cmpb "SYMBOL_NAME_STR(boot_cpu_id)", %al\n\t" \
+ "jne 7f\n\t" \
+ "movb $0, "SYMBOL_NAME_STR(smp_blocked_interrupt_pending)"\n\t" \
+ "7: " \
+ "popfl\n\t" \
+ "popl %edx\n\t" \
+ "popl %ecx\n\t" \
+ "popl %ebx\n\t" \
+ "popl %eax\n\t"
+
+#define LEAVE_KERNEL \
+ GET_PROCESSOR_ID \
+ "btrl $" STR(SMP_FROM_INT) ","SYMBOL_NAME_STR(smp_proc_in_lock)"(,%eax,4)\n\t" \
+ "pushfl\n\t" \
+ "cli\n\t" \
+ "decl "SYMBOL_NAME_STR(kernel_counter)"\n\t" \
+ "jnz 1f\n\t" \
+ "movb "SYMBOL_NAME_STR(saved_active_kernel_processor)",%al\n\t" \
+ "movb %al,"SYMBOL_NAME_STR(active_kernel_processor)"\n\t" \
+ "cmpb $" STR (NO_PROC_ID) ",%al\n\t" \
+ "jne 1f\n\t" \
+ "lock\n\t" \
+ "btrl $0, "SYMBOL_NAME_STR(kernel_flag)"\n\t" \
+ "1: " \
+ "popfl\n\t"
+
+
+/*
+ * the syscall count inc is a gross hack because ret_from_syscall is used by both irq and
+ * syscall return paths (urghh).
+ */
+
+#define BUILD_IRQ(chip,nr,mask) \
+asmlinkage void IRQ_NAME(nr); \
+asmlinkage void FAST_IRQ_NAME(nr); \
+asmlinkage void BAD_IRQ_NAME(nr); \
+__asm__( \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
+ "pushl $-"#nr"-2\n\t" \
+ SAVE_ALL \
+ ENTER_KERNEL \
+ ACK_##chip(mask,(nr&7)) \
+ "incl "SYMBOL_NAME_STR(intr_count)"\n\t"\
+ "sti\n\t" \
+ "movl %esp,%ebx\n\t" \
+ "pushl %ebx\n\t" \
+ "pushl $" #nr "\n\t" \
+ "call "SYMBOL_NAME_STR(do_IRQ)"\n\t" \
+ "addl $8,%esp\n\t" \
+ "cli\n\t" \
+ UNBLK_##chip(mask) \
+ "decl "SYMBOL_NAME_STR(intr_count)"\n\t" \
+ "incl "SYMBOL_NAME_STR(syscall_count)"\n\t" \
+ "jmp ret_from_sys_call\n" \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(fast_IRQ) #nr "_interrupt:\n\t" \
+ SAVE_MOST \
+ ENTER_KERNEL \
+ ACK_##chip(mask,(nr&7)) \
+ "incl "SYMBOL_NAME_STR(intr_count)"\n\t" \
+ "pushl $" #nr "\n\t" \
+ "call "SYMBOL_NAME_STR(do_fast_IRQ)"\n\t" \
+ "addl $4,%esp\n\t" \
+ "cli\n\t" \
+ UNBLK_##chip(mask) \
+ "decl "SYMBOL_NAME_STR(intr_count)"\n\t" \
+ LEAVE_KERNEL \
+ RESTORE_MOST \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(bad_IRQ) #nr "_interrupt:\n\t" \
+ SAVE_MOST \
+ ENTER_KERNEL \
+ ACK_##chip(mask,(nr&7)) \
+ LEAVE_KERNEL \
+ RESTORE_MOST);
+
+
+#define BUILD_TIMER_IRQ(chip,nr,mask) \
+asmlinkage void IRQ_NAME(nr); \
+asmlinkage void FAST_IRQ_NAME(nr); \
+asmlinkage void BAD_IRQ_NAME(nr); \
+__asm__( \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(fast_IRQ) #nr "_interrupt:\n\t" \
+SYMBOL_NAME_STR(bad_IRQ) #nr "_interrupt:\n\t" \
+SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
+ "pushl $-"#nr"-2\n\t" \
+ SAVE_ALL \
+ ENTER_KERNEL \
+ ACK_##chip(mask,(nr&7)) \
+ "incl "SYMBOL_NAME_STR(intr_count)"\n\t"\
+ "movl %esp,%ebx\n\t" \
+ "pushl %ebx\n\t" \
+ "pushl $" #nr "\n\t" \
+ "call "SYMBOL_NAME_STR(do_IRQ)"\n\t" \
+ "addl $8,%esp\n\t" \
+ "cli\n\t" \
+ UNBLK_##chip(mask) \
+ "decl "SYMBOL_NAME_STR(intr_count)"\n\t" \
+ "incl "SYMBOL_NAME_STR(syscall_count)"\n\t" \
+ "jmp ret_from_sys_call\n");
+
+
+/*
+ * Message pass must be a fast IRQ..
+ */
+
+#define BUILD_MSGIRQ(chip,nr,mask) \
+asmlinkage void IRQ_NAME(nr); \
+asmlinkage void FAST_IRQ_NAME(nr); \
+asmlinkage void BAD_IRQ_NAME(nr); \
+__asm__( \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
+SYMBOL_NAME_STR(fast_IRQ) #nr "_interrupt:\n\t" \
+ SAVE_MOST \
+ MSGACK_##chip(mask,(nr&7)) \
+ SMP_PROF_IPI_CNT \
+ "pushl $" #nr "\n\t" \
+ "call "SYMBOL_NAME_STR(do_fast_IRQ)"\n\t" \
+ "addl $4,%esp\n\t" \
+ "cli\n\t" \
+ RESTORE_MOST \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(bad_IRQ) #nr "_interrupt:\n\t" \
+ SAVE_MOST \
+ ACK_##chip(mask,(nr&7)) \
+ RESTORE_MOST);
+
+#define BUILD_RESCHEDIRQ(nr) \
+asmlinkage void IRQ_NAME(nr); \
+__asm__( \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
+ "pushl $-"#nr"-2\n\t" \
+ SAVE_ALL \
+ ENTER_KERNEL \
+ "incl "SYMBOL_NAME_STR(intr_count)"\n\t"\
+ "sti\n\t" \
+ "movl %esp,%ebx\n\t" \
+ "pushl %ebx\n\t" \
+ "pushl $" #nr "\n\t" \
+ "call "SYMBOL_NAME_STR(smp_reschedule_irq)"\n\t" \
+ "addl $8,%esp\n\t" \
+ "cli\n\t" \
+ "decl "SYMBOL_NAME_STR(intr_count)"\n\t" \
+ "incl "SYMBOL_NAME_STR(syscall_count)"\n\t" \
+ "jmp ret_from_sys_call\n");
+#else
+
+#define BUILD_IRQ(chip,nr,mask) \
+asmlinkage void IRQ_NAME(nr); \
+asmlinkage void FAST_IRQ_NAME(nr); \
+asmlinkage void BAD_IRQ_NAME(nr); \
+__asm__( \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
+ "pushl $-"#nr"-2\n\t" \
+ SAVE_ALL \
+ ACK_##chip(mask,(nr&7)) \
+ "incl "SYMBOL_NAME_STR(intr_count)"\n\t"\
+ "sti\n\t" \
+ "movl %esp,%ebx\n\t" \
+ "pushl %ebx\n\t" \
+ "pushl $" #nr "\n\t" \
+ "call "SYMBOL_NAME_STR(do_IRQ)"\n\t" \
+ "addl $8,%esp\n\t" \
+ "cli\n\t" \
+ UNBLK_##chip(mask) \
+ "decl "SYMBOL_NAME_STR(intr_count)"\n\t" \
+ "jmp ret_from_sys_call\n" \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(fast_IRQ) #nr "_interrupt:\n\t" \
+ SAVE_MOST \
+ ACK_##chip(mask,(nr&7)) \
+ "incl "SYMBOL_NAME_STR(intr_count)"\n\t" \
+ "pushl $" #nr "\n\t" \
+ "call "SYMBOL_NAME_STR(do_fast_IRQ)"\n\t" \
+ "addl $4,%esp\n\t" \
+ "cli\n\t" \
+ UNBLK_##chip(mask) \
+ "decl "SYMBOL_NAME_STR(intr_count)"\n\t" \
+ RESTORE_MOST \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(bad_IRQ) #nr "_interrupt:\n\t" \
+ SAVE_MOST \
+ ACK_##chip(mask,(nr&7)) \
+ RESTORE_MOST);
+
+#define BUILD_TIMER_IRQ(chip,nr,mask) \
+asmlinkage void IRQ_NAME(nr); \
+asmlinkage void FAST_IRQ_NAME(nr); \
+asmlinkage void BAD_IRQ_NAME(nr); \
+__asm__( \
+"\n"__ALIGN_STR"\n" \
+SYMBOL_NAME_STR(fast_IRQ) #nr "_interrupt:\n\t" \
+SYMBOL_NAME_STR(bad_IRQ) #nr "_interrupt:\n\t" \
+SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
+ "pushl $-"#nr"-2\n\t" \
+ SAVE_ALL \
+ ACK_##chip(mask,(nr&7)) \
+ "incl "SYMBOL_NAME_STR(intr_count)"\n\t"\
+ "movl %esp,%ebx\n\t" \
+ "pushl %ebx\n\t" \
+ "pushl $" #nr "\n\t" \
+ "call "SYMBOL_NAME_STR(do_IRQ)"\n\t" \
+ "addl $8,%esp\n\t" \
+ "cli\n\t" \
+ UNBLK_##chip(mask) \
+ "decl "SYMBOL_NAME_STR(intr_count)"\n\t" \
+ "jmp ret_from_sys_call\n");
+
+#endif
+#endif
diff --git a/linux/src/include/asm-i386/math_emu.h b/linux/src/include/asm-i386/math_emu.h
new file mode 100644
index 0000000..92083a5
--- /dev/null
+++ b/linux/src/include/asm-i386/math_emu.h
@@ -0,0 +1,57 @@
+#ifndef _I386_MATH_EMU_H
+#define _I386_MATH_EMU_H
+
+#include <asm/sigcontext.h>
+
+void restore_i387_soft(struct _fpstate *buf);
+struct _fpstate * save_i387_soft(struct _fpstate * buf);
+
+struct fpu_reg {
+ char sign;
+ char tag;
+ long exp;
+ unsigned sigl;
+ unsigned sigh;
+};
+
+
+/* This structure matches the layout of the data saved to the stack
+ following a device-not-present interrupt, part of it saved
+ automatically by the 80386/80486.
+ */
+struct info {
+ long ___orig_eip;
+ long ___ret_from_system_call;
+ long ___ebx;
+ long ___ecx;
+ long ___edx;
+ long ___esi;
+ long ___edi;
+ long ___ebp;
+ long ___eax;
+ long ___ds;
+ long ___es;
+ long ___fs;
+ long ___gs;
+ long ___orig_eax;
+ long ___eip;
+ long ___cs;
+ long ___eflags;
+ long ___esp;
+ long ___ss;
+ long ___vm86_es; /* This and the following only in vm86 mode */
+ long ___vm86_ds;
+ long ___vm86_fs;
+ long ___vm86_gs;
+};
+
+/* Interface for converting data between the emulator format
+ * and the hardware format. Used for core dumping and for
+ * ptrace(2) */
+void hardreg_to_softreg(const char hardreg[10],
+ struct fpu_reg *soft_reg);
+
+void softreg_to_hardreg(const struct fpu_reg *rp, char d[10],
+ long int control_word);
+
+#endif
diff --git a/linux/src/include/asm-i386/page.h b/linux/src/include/asm-i386/page.h
new file mode 100644
index 0000000..f315634
--- /dev/null
+++ b/linux/src/include/asm-i386/page.h
@@ -0,0 +1,62 @@
+#ifndef _I386_PAGE_H
+#define _I386_PAGE_H
+
+/* PAGE_SHIFT determines the page size */
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (1UL << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#ifdef __KERNEL__
+
+#define STRICT_MM_TYPECHECKS
+
+#ifdef STRICT_MM_TYPECHECKS
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { unsigned long pte; } pte_t;
+typedef struct { unsigned long pmd; } pmd_t;
+typedef struct { unsigned long pgd; } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+
+#define pte_val(x) ((x).pte)
+#define pmd_val(x) ((x).pmd)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pmd(x) ((pmd_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+#else
+/*
+ * .. while these make it easier on the compiler
+ */
+typedef unsigned long pte_t;
+typedef unsigned long pmd_t;
+typedef unsigned long pgd_t;
+typedef unsigned long pgprot_t;
+
+#define pte_val(x) (x)
+#define pmd_val(x) (x)
+#define pgd_val(x) (x)
+#define pgprot_val(x) (x)
+
+#define __pte(x) (x)
+#define __pmd(x) (x)
+#define __pgd(x) (x)
+#define __pgprot(x) (x)
+
+#endif
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+/* This handles the memory map.. */
+#define PAGE_OFFSET 0
+#define MAP_NR(addr) (((unsigned long)(addr)) >> PAGE_SHIFT)
+
+#endif /* __KERNEL__ */
+
+#endif /* _I386_PAGE_H */
diff --git a/linux/src/include/asm-i386/param.h b/linux/src/include/asm-i386/param.h
new file mode 100644
index 0000000..f821b86
--- /dev/null
+++ b/linux/src/include/asm-i386/param.h
@@ -0,0 +1,20 @@
+#ifndef _ASMi386_PARAM_H
+#define _ASMi386_PARAM_H
+
+#ifndef HZ
+#define HZ 100
+#endif
+
+#define EXEC_PAGESIZE 4096
+
+#ifndef NGROUPS
+#define NGROUPS 32
+#endif
+
+#ifndef NOGROUP
+#define NOGROUP (-1)
+#endif
+
+#define MAXHOSTNAMELEN 64 /* max length of hostname */
+
+#endif
diff --git a/linux/src/include/asm-i386/posix_types.h b/linux/src/include/asm-i386/posix_types.h
new file mode 100644
index 0000000..6a04605
--- /dev/null
+++ b/linux/src/include/asm-i386/posix_types.h
@@ -0,0 +1,63 @@
+#ifndef __ARCH_I386_POSIX_TYPES_H
+#define __ARCH_I386_POSIX_TYPES_H
+
+/*
+ * This file is generally used by user-level software, so you need to
+ * be a little careful about namespace pollution etc. Also, we cannot
+ * assume GCC is being used.
+ */
+
+typedef unsigned short __kernel_dev_t;
+typedef unsigned long __kernel_ino_t;
+typedef unsigned short __kernel_mode_t;
+typedef unsigned short __kernel_nlink_t;
+typedef long __kernel_off_t;
+typedef int __kernel_pid_t;
+typedef unsigned short __kernel_uid_t;
+typedef unsigned short __kernel_gid_t;
+typedef unsigned long __kernel_size_t;
+typedef long __kernel_ssize_t;
+typedef int __kernel_ptrdiff_t;
+typedef long __kernel_time_t;
+typedef long __kernel_clock_t;
+typedef int __kernel_daddr_t;
+typedef char * __kernel_caddr_t;
+
+#ifdef __GNUC__
+typedef long long __kernel_loff_t;
+#endif
+
+typedef struct {
+#if defined(__KERNEL__) || defined(__USE_ALL)
+ int val[2];
+#else /* !defined(__KERNEL__) && !defined(__USE_ALL) */
+ int __val[2];
+#endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */
+} __kernel_fsid_t;
+
+#undef __FD_SET
+#define __FD_SET(fd,fdsetp) \
+ __asm__ __volatile__("btsl %1,%0": \
+ "=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
+
+#undef __FD_CLR
+#define __FD_CLR(fd,fdsetp) \
+ __asm__ __volatile__("btrl %1,%0": \
+ "=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
+
+#undef __FD_ISSET
+#define __FD_ISSET(fd,fdsetp) (__extension__ ({ \
+ unsigned char __result; \
+ __asm__ __volatile__("btl %1,%2 ; setb %0" \
+ :"=q" (__result) :"r" ((int) (fd)), \
+ "m" (*(__kernel_fd_set *) (fdsetp))); \
+ __result; }))
+
+#undef __FD_ZERO
+#define __FD_ZERO(fdsetp) \
+ __asm__ __volatile__("cld ; rep ; stosl" \
+ :"=m" (*(__kernel_fd_set *) (fdsetp)) \
+ :"a" (0), "c" (__FDSET_LONGS), \
+ "D" ((__kernel_fd_set *) (fdsetp)) :"cx","di")
+
+#endif
diff --git a/linux/src/include/asm-i386/processor.h b/linux/src/include/asm-i386/processor.h
new file mode 100644
index 0000000..b067940
--- /dev/null
+++ b/linux/src/include/asm-i386/processor.h
@@ -0,0 +1,204 @@
+/*
+ * include/asm-i386/processor.h
+ *
+ * Copyright (C) 1994 Linus Torvalds
+ */
+
+#ifndef __ASM_I386_PROCESSOR_H
+#define __ASM_I386_PROCESSOR_H
+
+#include <asm/vm86.h>
+#include <asm/math_emu.h>
+
+/*
+ * System setup and hardware bug flags..
+ * [Note we don't test the 386 multiply bug or popad bug]
+ */
+
+extern char hard_math;
+extern char x86; /* lower 4 bits */
+extern char x86_vendor_id[13];
+extern char x86_model; /* lower 4 bits */
+extern char x86_mask; /* lower 4 bits */
+extern int x86_capability; /* field of flags */
+extern int fdiv_bug;
+extern char ignore_irq13;
+extern char wp_works_ok; /* doesn't work on a 386 */
+extern char hlt_works_ok; /* problems on some 486Dx4's and old 386's */
+extern int have_cpuid; /* We have a CPUID */
+
+extern unsigned long cpu_hz; /* CPU clock frequency from time.c */
+
+#if 0
+/*
+ * Detection of CPU model (CPUID).
+ */
+extern inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx)
+{
+ __asm__("cpuid"
+ : "=a" (*eax),
+ "=b" (*ebx),
+ "=c" (*ecx),
+ "=d" (*edx)
+ : "a" (op)
+ : "cc");
+}
+#endif
+
+/*
+ * Cyrix CPU register indexes (use special macros to access these)
+ */
+#define CX86_CCR2 0xc2
+#define CX86_CCR3 0xc3
+#define CX86_CCR4 0xe8
+#define CX86_CCR5 0xe9
+#define CX86_DIR0 0xfe
+#define CX86_DIR1 0xff
+
+/*
+ * Cyrix CPU register access macros
+ */
+
+extern inline unsigned char getCx86(unsigned char reg)
+{
+ unsigned char data;
+
+ __asm__ __volatile__("movb %1,%%al\n\t"
+ "outb %%al,$0x22\n\t"
+ "inb $0x23,%%al" : "=a" (data) : "q" (reg));
+ return data;
+}
+
+extern inline void setCx86(unsigned char reg, unsigned char data)
+{
+ __asm__ __volatile__("outb %%al,$0x22\n\t"
+ "movb %1,%%al\n\t"
+ "outb %%al,$0x23" : : "a" (reg), "q" (data));
+}
+
+/*
+ * Bus types (default is ISA, but people can check others with these..)
+ * MCA_bus hardcoded to 0 for now.
+ */
+extern int EISA_bus;
+#define MCA_bus 0
+#define MCA_bus__is_a_macro /* for versions in ksyms.c */
+
+/*
+ * User space process size: 3GB. This is hardcoded into a few places,
+ * so don't change it unless you know what you are doing.
+ */
+#define TASK_SIZE (0xC0000000UL)
+#define MAX_USER_ADDR TASK_SIZE
+#define MMAP_SEARCH_START (TASK_SIZE/3)
+
+/*
+ * Size of io_bitmap in longwords: 32 is ports 0-0x3ff.
+ */
+#define IO_BITMAP_SIZE 32
+
+struct i387_hard_struct {
+ long cwd;
+ long swd;
+ long twd;
+ long fip;
+ long fcs;
+ long foo;
+ long fos;
+ long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
+ long status; /* software status information */
+};
+
+struct i387_soft_struct {
+ long cwd;
+ long swd;
+ long twd;
+ long fip;
+ long fcs;
+ long foo;
+ long fos;
+ long top;
+ struct fpu_reg regs[8]; /* 8*16 bytes for each FP-reg = 128 bytes */
+ unsigned char lookahead;
+ struct info *info;
+ unsigned long entry_eip;
+};
+
+union i387_union {
+ struct i387_hard_struct hard;
+ struct i387_soft_struct soft;
+};
+
+struct thread_struct {
+ unsigned short back_link,__blh;
+ unsigned long esp0;
+ unsigned short ss0,__ss0h;
+ unsigned long esp1;
+ unsigned short ss1,__ss1h;
+ unsigned long esp2;
+ unsigned short ss2,__ss2h;
+ unsigned long cr3;
+ unsigned long eip;
+ unsigned long eflags;
+ unsigned long eax,ecx,edx,ebx;
+ unsigned long esp;
+ unsigned long ebp;
+ unsigned long esi;
+ unsigned long edi;
+ unsigned short es, __esh;
+ unsigned short cs, __csh;
+ unsigned short ss, __ssh;
+ unsigned short ds, __dsh;
+ unsigned short fs, __fsh;
+ unsigned short gs, __gsh;
+ unsigned short ldt, __ldth;
+ unsigned short trace, bitmap;
+ unsigned long io_bitmap[IO_BITMAP_SIZE+1];
+ unsigned long tr;
+ unsigned long cr2, trap_no, error_code;
+/* floating point info */
+ union i387_union i387;
+/* virtual 86 mode info */
+ struct vm86_struct * vm86_info;
+ unsigned long screen_bitmap;
+ unsigned long v86flags, v86mask, v86mode;
+};
+
+#define INIT_MMAP { &init_mm, 0, 0x40000000, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC }
+
+#define INIT_TSS { \
+ 0,0, \
+ sizeof(init_kernel_stack) + (long) &init_kernel_stack, \
+ KERNEL_DS, 0, \
+ 0,0,0,0,0,0, \
+ (long) &swapper_pg_dir, \
+ 0,0,0,0,0,0,0,0,0,0, \
+ USER_DS,0,USER_DS,0,USER_DS,0,USER_DS,0,USER_DS,0,USER_DS,0, \
+ _LDT(0),0, \
+ 0, 0x8000, \
+ {~0, }, /* ioperm */ \
+ _TSS(0), 0, 0,0, \
+ { { 0, }, }, /* 387 state */ \
+ NULL, 0, 0, 0, 0 /* vm86_info */ \
+}
+
+#define alloc_kernel_stack() __get_free_page(GFP_KERNEL)
+#define free_kernel_stack(page) free_page((page))
+
+static inline void start_thread(struct pt_regs * regs, unsigned long eip, unsigned long esp)
+{
+ regs->cs = USER_CS;
+ regs->ds = regs->es = regs->ss = regs->fs = regs->gs = USER_DS;
+ regs->eip = eip;
+ regs->esp = esp;
+}
+
+/*
+ * Return saved PC of a blocked thread.
+ */
+extern inline unsigned long thread_saved_pc(struct thread_struct *t)
+{
+ return ((unsigned long *)t->esp)[3];
+}
+
+#endif /* __ASM_I386_PROCESSOR_H */
diff --git a/linux/src/include/asm-i386/ptrace.h b/linux/src/include/asm-i386/ptrace.h
new file mode 100644
index 0000000..ae94ede
--- /dev/null
+++ b/linux/src/include/asm-i386/ptrace.h
@@ -0,0 +1,60 @@
+#ifndef _I386_PTRACE_H
+#define _I386_PTRACE_H
+
+#define EBX 0
+#define ECX 1
+#define EDX 2
+#define ESI 3
+#define EDI 4
+#define EBP 5
+#define EAX 6
+#define DS 7
+#define ES 8
+#define FS 9
+#define GS 10
+#define ORIG_EAX 11
+#define EIP 12
+#define CS 13
+#define EFL 14
+#define UESP 15
+#define SS 16
+
+
+/* this struct defines the way the registers are stored on the
+ stack during a system call. */
+
+struct pt_regs {
+ long ebx;
+ long ecx;
+ long edx;
+ long esi;
+ long edi;
+ long ebp;
+ long eax;
+ unsigned short ds, __dsu;
+ unsigned short es, __esu;
+ unsigned short fs, __fsu;
+ unsigned short gs, __gsu;
+ long orig_eax;
+ long eip;
+ unsigned short cs, __csu;
+ long eflags;
+ long esp;
+ unsigned short ss, __ssu;
+};
+
+/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
+#define PTRACE_GETREGS 12
+#define PTRACE_SETREGS 13
+#define PTRACE_GETFPREGS 14
+#define PTRACE_SETFPREGS 15
+
+#ifdef __KERNEL__
+#define user_mode(regs) ((VM_MASK & (regs)->eflags) || (3 & (regs)->cs))
+#define instruction_pointer(regs) ((regs)->eip)
+extern void show_regs(struct pt_regs *);
+struct task_struct;
+extern void get_pt_regs_for_task(struct pt_regs *, struct task_struct *p);
+#endif
+
+#endif
diff --git a/linux/src/include/asm-i386/resource.h b/linux/src/include/asm-i386/resource.h
new file mode 100644
index 0000000..3143b5b
--- /dev/null
+++ b/linux/src/include/asm-i386/resource.h
@@ -0,0 +1,39 @@
+#ifndef _I386_RESOURCE_H
+#define _I386_RESOURCE_H
+
+/*
+ * Resource limits
+ */
+
+#define RLIMIT_CPU 0 /* CPU time in ms */
+#define RLIMIT_FSIZE 1 /* Maximum filesize */
+#define RLIMIT_DATA 2 /* max data size */
+#define RLIMIT_STACK 3 /* max stack size */
+#define RLIMIT_CORE 4 /* max core file size */
+#define RLIMIT_RSS 5 /* max resident set size */
+#define RLIMIT_NPROC 6 /* max number of processes */
+#define RLIMIT_NOFILE 7 /* max number of open files */
+#define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */
+#define RLIMIT_AS 9 /* address space limit */
+
+#define RLIM_NLIMITS 10
+
+#ifdef __KERNEL__
+
+#define INIT_RLIMITS \
+{ \
+ { LONG_MAX, LONG_MAX }, \
+ { LONG_MAX, LONG_MAX }, \
+ { LONG_MAX, LONG_MAX }, \
+ { _STK_LIM, _STK_LIM }, \
+ { 0, LONG_MAX }, \
+ { LONG_MAX, LONG_MAX }, \
+ { MAX_TASKS_PER_USER, MAX_TASKS_PER_USER }, \
+ { NR_OPEN, NR_OPEN }, \
+ { LONG_MAX, LONG_MAX }, \
+ { LONG_MAX, LONG_MAX }, \
+}
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/linux/src/include/asm-i386/segment.h b/linux/src/include/asm-i386/segment.h
new file mode 100644
index 0000000..d23aa17
--- /dev/null
+++ b/linux/src/include/asm-i386/segment.h
@@ -0,0 +1,380 @@
+#ifndef _ASM_SEGMENT_H
+#define _ASM_SEGMENT_H
+
+#ifdef MACH
+
+#include <machine/gdt.h>
+#include <machine/ldt.h>
+
+#else /* !MACH */
+
+#define KERNEL_CS 0x10
+#define KERNEL_DS 0x18
+
+#define USER_CS 0x23
+#define USER_DS 0x2B
+
+#endif /* !MACH */
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Uh, these should become the main single-value transfer routines..
+ * They automatically use the right size if we just have the right
+ * pointer type..
+ */
+#define put_user(x,ptr) __put_user((unsigned long)(x),(ptr),sizeof(*(ptr)))
+#define get_user(ptr) ((__typeof__(*(ptr)))__get_user((ptr),sizeof(*(ptr))))
+
+/*
+ * This is a silly but good way to make sure that
+ * the __put_user function is indeed always optimized,
+ * and that we use the correct sizes..
+ */
+extern int bad_user_access_length(void);
+
+/*
+ * dummy pointer type structure.. gcc won't try to do something strange
+ * this way..
+ */
+struct __segment_dummy { unsigned long a[100]; };
+#define __sd(x) ((struct __segment_dummy *) (x))
+#define __const_sd(x) ((const struct __segment_dummy *) (x))
+
+static inline void __attribute__((always_inline)) __put_user(unsigned long x, void * y, int size)
+{
+ switch (size) {
+ case 1:
+ __asm__ ("movb %b1,%%fs:%0"
+ :"=m" (*__sd(y))
+ :"iq" ((unsigned char) x), "m" (*__sd(y)));
+ break;
+ case 2:
+ __asm__ ("movw %w1,%%fs:%0"
+ :"=m" (*__sd(y))
+ :"ir" ((unsigned short) x), "m" (*__sd(y)));
+ break;
+ case 4:
+ __asm__ ("movl %1,%%fs:%0"
+ :"=m" (*__sd(y))
+ :"ir" (x), "m" (*__sd(y)));
+ break;
+ default:
+#ifdef __OPTIMIZE__
+ bad_user_access_length();
+#else
+ asm volatile("ud2");
+#endif
+ }
+}
+
+static inline unsigned long __attribute__((always_inline)) __get_user(const void * y, int size)
+{
+ unsigned long result;
+
+ switch (size) {
+ case 1:
+ __asm__ ("movb %%fs:%1,%b0"
+ :"=q" (result)
+ :"m" (*__const_sd(y)));
+ return (unsigned char) result;
+ case 2:
+ __asm__ ("movw %%fs:%1,%w0"
+ :"=r" (result)
+ :"m" (*__const_sd(y)));
+ return (unsigned short) result;
+ case 4:
+ __asm__ ("movl %%fs:%1,%0"
+ :"=r" (result)
+ :"m" (*__const_sd(y)));
+ return result;
+ default:
+#ifdef __OPTIMIZE__
+ return bad_user_access_length();
+#else
+ asm volatile("ud2");
+#endif
+ }
+}
+
+#if defined(__GNUC__) && (__GNUC__ == 2) && (__GNUC_MINOR__ < 95)
+static inline void __generic_memcpy_tofs(void * to, const void * from, unsigned long n)
+{
+ __asm__ volatile
+ ("cld\n"
+ "push %%es\n"
+ "push %%fs\n"
+ "cmpl $3,%0\n"
+ "pop %%es\n"
+ "jbe 1f\n"
+ "movl %%edi,%%ecx\n"
+ "negl %%ecx\n"
+ "andl $3,%%ecx\n"
+ "subl %%ecx,%0\n"
+ "rep; movsb\n"
+ "movl %0,%%ecx\n"
+ "shrl $2,%%ecx\n"
+ "rep; movsl\n"
+ "andl $3,%0\n"
+ "1: movl %0,%%ecx\n"
+ "rep; movsb\n"
+ "pop %%es\n"
+ :"=abd" (n)
+ :"0" (n),"D" ((long) to),"S" ((long) from)
+ :"cx","di","si");
+}
+
+static inline void __constant_memcpy_tofs(void * to, const void * from, unsigned long n)
+{
+ switch (n) {
+ case 0:
+ return;
+ case 1:
+ __put_user(*(const char *) from, (char *) to, 1);
+ return;
+ case 2:
+ __put_user(*(const short *) from, (short *) to, 2);
+ return;
+ case 3:
+ __put_user(*(const short *) from, (short *) to, 2);
+ __put_user(*(2+(const char *) from), 2+(char *) to, 1);
+ return;
+ case 4:
+ __put_user(*(const int *) from, (int *) to, 4);
+ return;
+ case 8:
+ __put_user(*(const int *) from, (int *) to, 4);
+ __put_user(*(1+(const int *) from), 1+(int *) to, 4);
+ return;
+ case 12:
+ __put_user(*(const int *) from, (int *) to, 4);
+ __put_user(*(1+(const int *) from), 1+(int *) to, 4);
+ __put_user(*(2+(const int *) from), 2+(int *) to, 4);
+ return;
+ case 16:
+ __put_user(*(const int *) from, (int *) to, 4);
+ __put_user(*(1+(const int *) from), 1+(int *) to, 4);
+ __put_user(*(2+(const int *) from), 2+(int *) to, 4);
+ __put_user(*(3+(const int *) from), 3+(int *) to, 4);
+ return;
+ }
+#define COMMON(x) \
+__asm__("cld\n\t" \
+ "push %%es\n\t" \
+ "push %%fs\n\t" \
+ "pop %%es\n\t" \
+ "rep ; movsl\n\t" \
+ x \
+ "pop %%es" \
+ : /* no outputs */ \
+ :"c" (n/4),"D" ((long) to),"S" ((long) from) \
+ :"cx","di","si")
+
+ switch (n % 4) {
+ case 0:
+ COMMON("");
+ return;
+ case 1:
+ COMMON("movsb\n\t");
+ return;
+ case 2:
+ COMMON("movsw\n\t");
+ return;
+ case 3:
+ COMMON("movsw\n\tmovsb\n\t");
+ return;
+ }
+#undef COMMON
+}
+
+static inline void __generic_memcpy_fromfs(void * to, const void * from, unsigned long n)
+{
+ __asm__ volatile
+ ("cld\n"
+ "cmpl $3,%0\n"
+ "jbe 1f\n"
+ "movl %%edi,%%ecx\n"
+ "negl %%ecx\n"
+ "andl $3,%%ecx\n"
+ "subl %%ecx,%0\n"
+ "fs; rep; movsb\n"
+ "movl %0,%%ecx\n"
+ "shrl $2,%%ecx\n"
+ "fs; rep; movsl\n"
+ "andl $3,%0\n"
+ "1:movl %0,%%ecx\n"
+ "fs; rep; movsb\n"
+ :"=abd" (n)
+ :"0" (n),"D" ((long) to),"S" ((long) from)
+ :"cx","di","si", "memory");
+}
+
+static inline void __constant_memcpy_fromfs(void * to, const void * from, unsigned long n)
+{
+ switch (n) {
+ case 0:
+ return;
+ case 1:
+ *(char *)to = __get_user((const char *) from, 1);
+ return;
+ case 2:
+ *(short *)to = __get_user((const short *) from, 2);
+ return;
+ case 3:
+ *(short *) to = __get_user((const short *) from, 2);
+ *((char *) to + 2) = __get_user(2+(const char *) from, 1);
+ return;
+ case 4:
+ *(int *) to = __get_user((const int *) from, 4);
+ return;
+ case 8:
+ *(int *) to = __get_user((const int *) from, 4);
+ *(1+(int *) to) = __get_user(1+(const int *) from, 4);
+ return;
+ case 12:
+ *(int *) to = __get_user((const int *) from, 4);
+ *(1+(int *) to) = __get_user(1+(const int *) from, 4);
+ *(2+(int *) to) = __get_user(2+(const int *) from, 4);
+ return;
+ case 16:
+ *(int *) to = __get_user((const int *) from, 4);
+ *(1+(int *) to) = __get_user(1+(const int *) from, 4);
+ *(2+(int *) to) = __get_user(2+(const int *) from, 4);
+ *(3+(int *) to) = __get_user(3+(const int *) from, 4);
+ return;
+ }
+#define COMMON(x) \
+__asm__("cld\n\t" \
+ "rep ; fs ; movsl\n\t" \
+ x \
+ : /* no outputs */ \
+ :"c" (n/4),"D" ((long) to),"S" ((long) from) \
+ :"cx","di","si","memory")
+
+ switch (n % 4) {
+ case 0:
+ COMMON("");
+ return;
+ case 1:
+ COMMON("fs ; movsb");
+ return;
+ case 2:
+ COMMON("fs ; movsw");
+ return;
+ case 3:
+ COMMON("fs ; movsw\n\tfs ; movsb");
+ return;
+ }
+#undef COMMON
+}
+
+#define memcpy_fromfs(to, from, n) \
+(__builtin_constant_p(n) ? \
+ __constant_memcpy_fromfs((to),(from),(n)) : \
+ __generic_memcpy_fromfs((to),(from),(n)))
+
+#define memcpy_tofs(to, from, n) \
+(__builtin_constant_p(n) ? \
+ __constant_memcpy_tofs((to),(from),(n)) : \
+ __generic_memcpy_tofs((to),(from),(n)))
+
+
+#else /* code for gcc-2.95.x and newer follows */
+
+static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
+{
+ char *d = (char *)to;
+ const char *s = (const char *)from;
+ while (n-- > 0) {
+ *d++ = __get_user(s++, 1);
+ }
+}
+
+static inline void memcpy_tofs(void * to, const void * from, unsigned long n)
+{
+ char *d = (char *)to;
+ const char *s = (const char *)from;
+ while (n-- > 0) {
+ __put_user(*s++, d++, 1);
+ }
+}
+
+#endif /* not gcc-2.95 */
+
+/*
+ * These are deprecated..
+ *
+ * Use "put_user()" and "get_user()" with the proper pointer types instead.
+ */
+
+#define get_fs_byte(addr) __get_user((const unsigned char *)(addr),1)
+#define get_fs_word(addr) __get_user((const unsigned short *)(addr),2)
+#define get_fs_long(addr) __get_user((const unsigned int *)(addr),4)
+
+#define put_fs_byte(x,addr) __put_user((x),(unsigned char *)(addr),1)
+#define put_fs_word(x,addr) __put_user((x),(unsigned short *)(addr),2)
+#define put_fs_long(x,addr) __put_user((x),(unsigned int *)(addr),4)
+
+#ifdef WE_REALLY_WANT_TO_USE_A_BROKEN_INTERFACE
+
+static inline unsigned short get_user_word(const short *addr)
+{
+ return __get_user(addr, 2);
+}
+
+static inline unsigned char get_user_byte(const char * addr)
+{
+ return __get_user(addr,1);
+}
+
+static inline unsigned long get_user_long(const int *addr)
+{
+ return __get_user(addr, 4);
+}
+
+static inline void put_user_byte(char val,char *addr)
+{
+ __put_user(val, addr, 1);
+}
+
+static inline void put_user_word(short val,short * addr)
+{
+ __put_user(val, addr, 2);
+}
+
+static inline void put_user_long(unsigned long val,int * addr)
+{
+ __put_user(val, addr, 4);
+}
+
+#endif
+
+/*
+ * Someone who knows GNU asm better than I should double check the following.
+ * It seems to work, but I don't know if I'm doing something subtly wrong.
+ * --- TYT, 11/24/91
+ * [ nothing wrong here, Linus: I just changed the ax to be any reg ]
+ */
+
+static inline unsigned long get_fs(void)
+{
+ unsigned long _v;
+ __asm__("mov %%fs,%w0":"=r" (_v):"0" (0));
+ return _v;
+}
+
+static inline unsigned long get_ds(void)
+{
+ unsigned long _v;
+ __asm__("mov %%ds,%w0":"=r" (_v):"0" (0));
+ return _v;
+}
+
+static inline void set_fs(unsigned long val)
+{
+ __asm__ __volatile__("mov %w0,%%fs": /* no output */ :"r" (val));
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_SEGMENT_H */
diff --git a/linux/src/include/asm-i386/semaphore.h b/linux/src/include/asm-i386/semaphore.h
new file mode 100644
index 0000000..18e12c1
--- /dev/null
+++ b/linux/src/include/asm-i386/semaphore.h
@@ -0,0 +1,133 @@
+#ifndef _I386_SEMAPHORE_H
+#define _I386_SEMAPHORE_H
+
+#include <linux/linkage.h>
+#include <asm/system.h>
+
+/*
+ * SMP- and interrupt-safe semaphores..
+ *
+ * (C) Copyright 1996 Linus Torvalds
+ *
+ * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in
+ * the original code and to make semaphore waits
+ * interruptible so that processes waiting on
+ * semaphores can be killed.
+ *
+ * If you would like to see an analysis of this implementation, please
+ * ftp to gcom.com and download the file
+ * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz.
+ *
+ */
+
+struct semaphore {
+ int count;
+ int waking;
+ int lock ; /* to make waking testing atomic */
+ struct wait_queue * wait;
+};
+
+#define MUTEX ((struct semaphore) { 1, 0, 0, NULL })
+#define MUTEX_LOCKED ((struct semaphore) { 0, 0, 0, NULL })
+
+/* Special register calling convention:
+ * eax contains return address
+ * ecx contains semaphore address
+ */
+asmlinkage void down_failed(void /* special register calling convention */);
+asmlinkage void up_wakeup(void /* special register calling convention */);
+
+extern void __down(struct semaphore * sem);
+extern void __up(struct semaphore * sem);
+
+/*
+ * This is ugly, but we want the default case to fall through.
+ * "down_failed" is a special asm handler that calls the C
+ * routine that actually waits. See arch/i386/lib/semaphore.S
+ */
+static inline void down(struct semaphore * sem)
+{
+ int d0;
+ __asm__ __volatile__(
+ "# atomic down operation\n\t"
+ "movl $1f,%%eax\n\t"
+#ifdef __SMP__
+ "lock ; "
+#endif
+ "decl %1\n\t"
+ "js " SYMBOL_NAME_STR(down_failed) "\n"
+ "1:\n"
+ :"=&a" (d0), "=m" (sem->count)
+ :"c" (sem)
+ :"memory");
+}
+
+/*
+ * Primitives to spin on a lock. Needed only for SMP version.
+ */
+extern inline void get_buzz_lock(int *lock_ptr)
+{
+#ifdef __SMP__
+ while (xchg(lock_ptr,1) != 0) ;
+#endif
+} /* get_buzz_lock */
+
+extern inline void give_buzz_lock(int *lock_ptr)
+{
+#ifdef __SMP__
+ *lock_ptr = 0 ;
+#endif
+} /* give_buzz_lock */
+
+asmlinkage int down_failed_interruptible(void); /* params in registers */
+
+/*
+ * This version waits in interruptible state so that the waiting
+ * process can be killed. The down_failed_interruptible routine
+ * returns negative for signalled and zero for semaphore acquired.
+ */
+static inline int down_interruptible(struct semaphore * sem)
+{
+ int ret ;
+
+ __asm__ __volatile__(
+ "# atomic interruptible down operation\n\t"
+ "movl $2f,%%eax\n\t"
+#ifdef __SMP__
+ "lock ; "
+#endif
+ "decl %1\n\t"
+ "js " SYMBOL_NAME_STR(down_failed_interruptible) "\n\t"
+ "xorl %%eax,%%eax\n"
+ "2:\n"
+ :"=&a" (ret), "=m" (sem->count)
+ :"c" (sem)
+ :"memory");
+
+ return(ret) ;
+}
+
+/*
+ * Note! This is subtle. We jump to wake people up only if
+ * the semaphore was negative (== somebody was waiting on it).
+ * The default case (no contention) will result in NO
+ * jumps for both down() and up().
+ */
+static inline void up(struct semaphore * sem)
+{
+ int d0;
+ __asm__ __volatile__(
+ "# atomic up operation\n\t"
+ "movl $1f,%%eax\n\t"
+#ifdef __SMP__
+ "lock ; "
+#endif
+ "incl %1\n\t"
+ "jle " SYMBOL_NAME_STR(up_wakeup)
+ "\n1:"
+ :"=&a" (d0), "=m" (sem->count)
+ :"c" (sem)
+ :"memory");
+}
+
+#endif
diff --git a/linux/src/include/asm-i386/sigcontext.h b/linux/src/include/asm-i386/sigcontext.h
new file mode 100644
index 0000000..df06596
--- /dev/null
+++ b/linux/src/include/asm-i386/sigcontext.h
@@ -0,0 +1,54 @@
+#ifndef _ASMi386_SIGCONTEXT_H
+#define _ASMi386_SIGCONTEXT_H
+
+/*
+ * As documented in the iBCS2 standard..
+ *
+ * The first part of "struct _fpstate" is just the
+ * normal i387 hardware setup, the extra "status"
+ * word is used to save the coprocessor status word
+ * before entering the handler.
+ */
+struct _fpreg {
+ unsigned short significand[4];
+ unsigned short exponent;
+};
+
+struct _fpstate {
+ unsigned long cw,
+ sw,
+ tag,
+ ipoff,
+ cssel,
+ dataoff,
+ datasel;
+ struct _fpreg _st[8];
+ unsigned long status;
+};
+
+struct sigcontext_struct {
+ unsigned short gs, __gsh;
+ unsigned short fs, __fsh;
+ unsigned short es, __esh;
+ unsigned short ds, __dsh;
+ unsigned long edi;
+ unsigned long esi;
+ unsigned long ebp;
+ unsigned long esp;
+ unsigned long ebx;
+ unsigned long edx;
+ unsigned long ecx;
+ unsigned long eax;
+ unsigned long trapno;
+ unsigned long err;
+ unsigned long eip;
+ unsigned short cs, __csh;
+ unsigned long eflags;
+ unsigned long esp_at_signal;
+ unsigned short ss, __ssh;
+ struct _fpstate * fpstate;
+ unsigned long oldmask;
+ unsigned long cr2;
+};
+
+#endif
diff --git a/linux/src/include/asm-i386/signal.h b/linux/src/include/asm-i386/signal.h
new file mode 100644
index 0000000..c68928c
--- /dev/null
+++ b/linux/src/include/asm-i386/signal.h
@@ -0,0 +1,97 @@
+#ifndef _ASMi386_SIGNAL_H
+#define _ASMi386_SIGNAL_H
+
+typedef unsigned long sigset_t; /* at least 32 bits */
+
+#define _NSIG 32
+#define NSIG _NSIG
+
+#define SIGHUP 1
+#define SIGINT 2
+#define SIGQUIT 3
+#define SIGILL 4
+#define SIGTRAP 5
+#define SIGABRT 6
+#define SIGIOT 6
+#define SIGBUS 7
+#define SIGFPE 8
+#define SIGKILL 9
+#define SIGUSR1 10
+#define SIGSEGV 11
+#define SIGUSR2 12
+#define SIGPIPE 13
+#define SIGALRM 14
+#define SIGTERM 15
+#define SIGSTKFLT 16
+#define SIGCHLD 17
+#define SIGCONT 18
+#define SIGSTOP 19
+#define SIGTSTP 20
+#define SIGTTIN 21
+#define SIGTTOU 22
+#define SIGURG 23
+#define SIGXCPU 24
+#define SIGXFSZ 25
+#define SIGVTALRM 26
+#define SIGPROF 27
+#define SIGWINCH 28
+#define SIGIO 29
+#define SIGPOLL SIGIO
+/*
+#define SIGLOST 29
+*/
+#define SIGPWR 30
+#define SIGUNUSED 31
+
+/*
+ * sa_flags values: SA_STACK is not currently supported, but will allow the
+ * usage of signal stacks by using the (now obsolete) sa_restorer field in
+ * the sigaction structure as a stack pointer. This is now possible due to
+ * the changes in signal handling. LBT 010493.
+ * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_SHIRQ flag is for shared interrupt support on PCI and EISA.
+ */
+#define SA_NOCLDSTOP 1
+#define SA_SHIRQ 0x04000000
+#define SA_STACK 0x08000000
+#define SA_RESTART 0x10000000
+#define SA_INTERRUPT 0x20000000
+#define SA_NOMASK 0x40000000
+#define SA_ONESHOT 0x80000000
+
+#ifdef __KERNEL__
+/*
+ * These values of sa_flags are used only by the kernel as part of the
+ * irq handling routines.
+ *
+ * SA_INTERRUPT is also used by the irq handling routines.
+ */
+#define SA_PROBE SA_ONESHOT
+#define SA_SAMPLE_RANDOM SA_RESTART
+#endif
+
+
+#define SIG_BLOCK 0 /* for blocking signals */
+#define SIG_UNBLOCK 1 /* for unblocking signals */
+#define SIG_SETMASK 2 /* for setting the signal mask */
+
+/* Type of a signal handler. */
+typedef void (*__sighandler_t)(int);
+
+#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
+#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
+#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */
+
+struct sigaction {
+ __sighandler_t sa_handler;
+ sigset_t sa_mask;
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+};
+
+#ifdef __KERNEL__
+#include <asm/sigcontext.h>
+#endif
+
+#endif
diff --git a/linux/src/include/asm-i386/socket.h b/linux/src/include/asm-i386/socket.h
new file mode 100644
index 0000000..7301511
--- /dev/null
+++ b/linux/src/include/asm-i386/socket.h
@@ -0,0 +1,27 @@
+#ifndef _ASM_SOCKET_H
+#define _ASM_SOCKET_H
+
+#include <asm/sockios.h>
+
+/* For setsockoptions(2) */
+#define SOL_SOCKET 1
+
+#define SO_DEBUG 1
+#define SO_REUSEADDR 2
+#define SO_TYPE 3
+#define SO_ERROR 4
+#define SO_DONTROUTE 5
+#define SO_BROADCAST 6
+#define SO_SNDBUF 7
+#define SO_RCVBUF 8
+#define SO_KEEPALIVE 9
+#define SO_OOBINLINE 10
+#define SO_NO_CHECK 11
+#define SO_PRIORITY 12
+#define SO_LINGER 13
+#define SO_BSDCOMPAT 14
+/* To add :#define SO_REUSEPORT 15 */
+
+#define SO_BINDTODEVICE 25
+
+#endif /* _ASM_SOCKET_H */
diff --git a/linux/src/include/asm-i386/sockios.h b/linux/src/include/asm-i386/sockios.h
new file mode 100644
index 0000000..6b747f8
--- /dev/null
+++ b/linux/src/include/asm-i386/sockios.h
@@ -0,0 +1,12 @@
+#ifndef __ARCH_I386_SOCKIOS__
+#define __ARCH_I386_SOCKIOS__
+
+/* Socket-level I/O control calls. */
+#define FIOSETOWN 0x8901
+#define SIOCSPGRP 0x8902
+#define FIOGETOWN 0x8903
+#define SIOCGPGRP 0x8904
+#define SIOCATMARK 0x8905
+#define SIOCGSTAMP 0x8906 /* Get stamp */
+
+#endif
diff --git a/linux/src/include/asm-i386/spinlock.h b/linux/src/include/asm-i386/spinlock.h
new file mode 100644
index 0000000..18119d4
--- /dev/null
+++ b/linux/src/include/asm-i386/spinlock.h
@@ -0,0 +1,262 @@
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+
+#ifndef __SMP__
+
+#define DEBUG_SPINLOCKS 0 /* 0 == no debugging, 1 == maintain lock state, 2 == full debug */
+
+#if (DEBUG_SPINLOCKS < 1)
+
+/*
+ * Your basic spinlocks, allowing only a single CPU anywhere
+ *
+ * Gcc-2.7.x has a nasty bug with empty initializers.
+ */
+#if (__GNUC__ > 2) || (__GNUC__ == 2 && __GNUC_MINOR__ >= 8)
+ typedef struct { } spinlock_t;
+ #define SPIN_LOCK_UNLOCKED (spinlock_t) { }
+#else
+ typedef struct { int gcc_is_buggy; } spinlock_t;
+ #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
+#endif
+
+#define spin_lock_init(lock) do { } while(0)
+#define spin_lock(lock) (void)(lock) /* Not "unused variable". */
+#define spin_trylock(lock) (1)
+#define spin_unlock_wait(lock) do { } while(0)
+#define spin_unlock(lock) do { } while(0)
+#define spin_lock_irq(lock) cli()
+#define spin_unlock_irq(lock) sti()
+
+#define spin_lock_irqsave(lock, flags) \
+ do { save_flags(flags); cli(); } while (0)
+#define spin_unlock_irqrestore(lock, flags) \
+ restore_flags(flags)
+
+#elif (DEBUG_SPINLOCKS < 2)
+
+typedef struct {
+ volatile unsigned int lock;
+} spinlock_t;
+#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
+
+#define spin_lock_init(x) do { (x)->lock = 0; } while (0)
+#define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
+
+#define spin_lock(x) do { (x)->lock = 1; } while (0)
+#define spin_unlock_wait(x) do { } while (0)
+#define spin_unlock(x) do { (x)->lock = 0; } while (0)
+#define spin_lock_irq(x) do { cli(); spin_lock(x); } while (0)
+#define spin_unlock_irq(x) do { spin_unlock(x); sti(); } while (0)
+
+#define spin_lock_irqsave(x, flags) \
+ do { save_flags(flags); spin_lock_irq(x); } while (0)
+#define spin_unlock_irqrestore(x, flags) \
+ do { spin_unlock(x); restore_flags(flags); } while (0)
+
+#else /* (DEBUG_SPINLOCKS >= 2) */
+
+typedef struct {
+ volatile unsigned int lock;
+ volatile unsigned int babble;
+ const char *module;
+} spinlock_t;
+#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 25, __BASE_FILE__ }
+
+#include <linux/kernel.h>
+
+#define spin_lock_init(x) do { (x)->lock = 0; } while (0)
+#define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
+
+#define spin_lock(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_lock(%s:%p) already locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 1; restore_flags(__spinflags);} while (0)
+#define spin_unlock_wait(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_unlock_wait(%s:%p) deadlock\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} restore_flags(__spinflags);} while (0)
+#define spin_unlock(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if (!(x)->lock&&(x)->babble) {printk("%s:%d: spin_unlock(%s:%p) not locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 0; restore_flags(__spinflags);} while (0)
+#define spin_lock_irq(x) do {cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_lock_irq(%s:%p) already locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 1;} while (0)
+#define spin_unlock_irq(x) do {cli(); if (!(x)->lock&&(x)->babble) {printk("%s:%d: spin_unlock_irq(%s:%p) not locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 0; sti();} while (0)
+
+#define spin_lock_irqsave(x,flags) do {save_flags(flags); cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_lock_irqsave(%s:%p) already locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 1;} while (0)
+#define spin_unlock_irqrestore(x,flags) do {cli(); if (!(x)->lock&&(x)->babble) {printk("%s:%d: spin_unlock_irqrestore(%s:%p) not locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 0; restore_flags(flags);} while (0)
+
+#endif /* DEBUG_SPINLOCKS */
+
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ *
+ * Gcc-2.7.x has a nasty bug with empty initializers.
+ */
+#if (__GNUC__ > 2) || (__GNUC__ == 2 && __GNUC_MINOR__ >= 8)
+ typedef struct { } rwlock_t;
+ #define RW_LOCK_UNLOCKED (rwlock_t) { }
+#else
+ typedef struct { int gcc_is_buggy; } rwlock_t;
+ #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
+#endif
+
+#define read_lock(lock) (void)(lock) /* Not "unused variable." */
+#define read_unlock(lock) do { } while(0)
+#define write_lock(lock) (void)(lock) /* Not "unused variable." */
+#define write_unlock(lock) do { } while(0)
+#define read_lock_irq(lock) cli()
+#define read_unlock_irq(lock) sti()
+#define write_lock_irq(lock) cli()
+#define write_unlock_irq(lock) sti()
+
+#define read_lock_irqsave(lock, flags) \
+ do { save_flags(flags); cli(); } while (0)
+#define read_unlock_irqrestore(lock, flags) \
+ restore_flags(flags)
+#define write_lock_irqsave(lock, flags) \
+ do { save_flags(flags); cli(); } while (0)
+#define write_unlock_irqrestore(lock, flags) \
+ restore_flags(flags)
+
+#else /* __SMP__ */
+
+/*
+ * Your basic spinlocks, allowing only a single CPU anywhere
+ */
+
+typedef struct {
+ volatile unsigned int lock;
+} spinlock_t;
+
+#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
+
+#define spin_lock_init(x) do { (x)->lock = 0; } while(0)
+/*
+ * Simple spin lock operations. There are two variants, one clears IRQ's
+ * on the local processor, one does not.
+ *
+ * We make no fairness assumptions. They have a cost.
+ */
+
+#define spin_unlock_wait(x) do { barrier(); } while(((volatile spinlock_t *)(x))->lock)
+
+typedef struct { unsigned long a[100]; } __dummy_lock_t;
+#define __dummy_lock(lock) (*(__dummy_lock_t *)(lock))
+
+/*
+ * Intel PIV would benefit from using 'rep nop' here but on older
+ * processors and non intel it is listed as 'undefined' so cannot be
+ * blindly used. On 2.4 we should add a PIV CPU type for this one.
+ */
+#define spin_lock_string \
+ "\n1:\t" \
+ "lock ; btsl $0,%0\n\t" \
+ "jc 2f\n" \
+ ".section .text.lock,\"ax\"\n" \
+ "2:\t" \
+ "rep; nop\n\t" \
+ "testb $1,%0\n\t" \
+ "jne 2b\n\t" \
+ "jmp 1b\n" \
+ ".previous"
+
+#define spin_unlock_string \
+ "lock ; btrl $0,%0"
+
+#define spin_lock(lock) \
+__asm__ __volatile__( \
+ spin_lock_string \
+ :"=m" (__dummy_lock(lock)))
+
+#define spin_unlock(lock) \
+__asm__ __volatile__( \
+ spin_unlock_string \
+ :"=m" (__dummy_lock(lock)))
+
+#define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
+
+#define spin_lock_irq(lock) \
+ do { __cli(); spin_lock(lock); } while (0)
+
+#define spin_unlock_irq(lock) \
+ do { spin_unlock(lock); __sti(); } while (0)
+
+#define spin_lock_irqsave(lock, flags) \
+ do { __save_flags(flags); __cli(); spin_lock(lock); } while (0)
+
+#define spin_unlock_irqrestore(lock, flags) \
+ do { spin_unlock(lock); __restore_flags(flags); } while (0)
+
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ */
+typedef struct {
+ volatile unsigned int lock;
+ unsigned long previous;
+} rwlock_t;
+
+#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
+
+/*
+ * On x86, we implement read-write locks as a 32-bit counter
+ * with the high bit (sign) being the "write" bit.
+ *
+ * The inline assembly is non-obvious. Think about it.
+ */
+#define read_lock(rw) \
+ asm volatile("\n1:\t" \
+ "lock ; incl %0\n\t" \
+ "js 2f\n" \
+ ".section .text.lock,\"ax\"\n" \
+ "2:\tlock ; decl %0\n" \
+ "3:\trep; nop\n\t" \
+ "cmpl $0,%0\n\t" \
+ "js 3b\n\t" \
+ "jmp 1b\n" \
+ ".previous" \
+ :"=m" (__dummy_lock(&(rw)->lock)))
+
+#define read_unlock(rw) \
+ asm volatile("lock ; decl %0" \
+ :"=m" (__dummy_lock(&(rw)->lock)))
+
+#define write_lock(rw) \
+ asm volatile("\n1:\t" \
+ "lock ; btsl $31,%0\n\t" \
+ "jc 4f\n" \
+ "2:\ttestl $0x7fffffff,%0\n\t" \
+ "jne 3f\n" \
+ ".section .text.lock,\"ax\"\n" \
+ "3:\tlock ; btrl $31,%0\n" \
+ "4:\trep; nop\n\t" \
+ "cmp $0,%0\n\t" \
+ "jne 4b\n\t" \
+ "jmp 1b\n" \
+ ".previous" \
+ :"=m" (__dummy_lock(&(rw)->lock)))
+
+#define write_unlock(rw) \
+ asm volatile("lock ; btrl $31,%0":"=m" (__dummy_lock(&(rw)->lock)))
+
+#define read_lock_irq(lock) do { __cli(); read_lock(lock); } while (0)
+#define read_unlock_irq(lock) do { read_unlock(lock); __sti(); } while (0)
+#define write_lock_irq(lock) do { __cli(); write_lock(lock); } while (0)
+#define write_unlock_irq(lock) do { write_unlock(lock); __sti(); } while (0)
+
+#define read_lock_irqsave(lock, flags) \
+ do { __save_flags(flags); __cli(); read_lock(lock); } while (0)
+#define read_unlock_irqrestore(lock, flags) \
+ do { read_unlock(lock); __restore_flags(flags); } while (0)
+#define write_lock_irqsave(lock, flags) \
+ do { __save_flags(flags); __cli(); write_lock(lock); } while (0)
+#define write_unlock_irqrestore(lock, flags) \
+ do { write_unlock(lock); __restore_flags(flags); } while (0)
+
+#endif /* __SMP__ */
+#endif /* __ASM_SPINLOCK_H */
diff --git a/linux/src/include/asm-i386/stat.h b/linux/src/include/asm-i386/stat.h
new file mode 100644
index 0000000..b4c6486
--- /dev/null
+++ b/linux/src/include/asm-i386/stat.h
@@ -0,0 +1,41 @@
+#ifndef _I386_STAT_H
+#define _I386_STAT_H
+
+struct old_stat {
+ unsigned short st_dev;
+ unsigned short st_ino;
+ unsigned short st_mode;
+ unsigned short st_nlink;
+ unsigned short st_uid;
+ unsigned short st_gid;
+ unsigned short st_rdev;
+ unsigned long st_size;
+ unsigned long st_atime;
+ unsigned long st_mtime;
+ unsigned long st_ctime;
+};
+
+struct new_stat {
+ unsigned short st_dev;
+ unsigned short __pad1;
+ unsigned long st_ino;
+ unsigned short st_mode;
+ unsigned short st_nlink;
+ unsigned short st_uid;
+ unsigned short st_gid;
+ unsigned short st_rdev;
+ unsigned short __pad2;
+ unsigned long st_size;
+ unsigned long st_blksize;
+ unsigned long st_blocks;
+ unsigned long st_atime;
+ unsigned long __unused1;
+ unsigned long st_mtime;
+ unsigned long __unused2;
+ unsigned long st_ctime;
+ unsigned long __unused3;
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+#endif
diff --git a/linux/src/include/asm-i386/statfs.h b/linux/src/include/asm-i386/statfs.h
new file mode 100644
index 0000000..113d5d4
--- /dev/null
+++ b/linux/src/include/asm-i386/statfs.h
@@ -0,0 +1,25 @@
+#ifndef _I386_STATFS_H
+#define _I386_STATFS_H
+
+#ifndef __KERNEL_STRICT_NAMES
+
+#include <linux/types.h>
+
+typedef __kernel_fsid_t fsid_t;
+
+#endif
+
+struct statfs {
+ long f_type;
+ long f_bsize;
+ long f_blocks;
+ long f_bfree;
+ long f_bavail;
+ long f_files;
+ long f_ffree;
+ __kernel_fsid_t f_fsid;
+ long f_namelen;
+ long f_spare[6];
+};
+
+#endif
diff --git a/linux/src/include/asm-i386/string.h b/linux/src/include/asm-i386/string.h
new file mode 100644
index 0000000..8417d4a
--- /dev/null
+++ b/linux/src/include/asm-i386/string.h
@@ -0,0 +1,487 @@
+#ifndef _I386_STRING_H_
+#define _I386_STRING_H_
+
+/*
+ * On a 486 or Pentium, we are better off not using the
+ * byte string operations. But on a 386 or a PPro the
+ * byte string ops are faster than doing it by hand
+ * (MUCH faster on a Pentium).
+ *
+ * Also, the byte strings actually work correctly. Forget
+ * the i486 routines for now as they may be broken..
+ */
+#if FIXED_486_STRING && (CPU == 486 || CPU == 586)
+#include <asm/string-486.h>
+#else
+
+/*
+ * This string-include defines all string functions as inline
+ * functions. Use gcc. It also assumes ds=es=data space, this should be
+ * normal. Most of the string-functions are rather heavily hand-optimized,
+ * see especially strtok,strstr,str[c]spn. They should work, but are not
+ * very easy to understand. Everything is done entirely within the register
+ * set, making the functions fast and clean. String instructions have been
+ * used through-out, making for "slightly" unclear code :-)
+ *
+ * NO Copyright (C) 1991, 1992 Linus Torvalds,
+ * consider these trivial functions to be PD.
+ */
+
+#define __HAVE_ARCH_STRCPY
+extern inline char * strcpy(char * dest,const char *src)
+{
+int d0, d1, d2;
+__asm__ __volatile__(
+ "cld\n"
+ "1:\tlodsb\n\t"
+ "stosb\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b"
+ : "=&S" (d0), "=&D" (d1), "=&a" (d2)
+ :"0" (src),"1" (dest) : "memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRNCPY
+extern inline char * strncpy(char * dest,const char *src,size_t count)
+{
+int d0, d1, d2, d3;
+__asm__ __volatile__(
+ "cld\n"
+ "1:\tdecl %2\n\t"
+ "js 2f\n\t"
+ "lodsb\n\t"
+ "stosb\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n\t"
+ "rep\n\t"
+ "stosb\n"
+ "2:"
+ : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3)
+ :"0" (src),"1" (dest),"2" (count) : "memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRCAT
+extern inline char * strcat(char * dest,const char * src)
+{
+int d0, d1, d2, d3;
+__asm__ __volatile__(
+ "cld\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "decl %1\n"
+ "1:\tlodsb\n\t"
+ "stosb\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b"
+ : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
+ : "0" (src), "1" (dest), "2" (0), "3" (0xffffffff):"memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRNCAT
+extern inline char * strncat(char * dest,const char * src,size_t count)
+{
+int d0, d1, d2, d3;
+__asm__ __volatile__(
+ "cld\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "decl %1\n\t"
+ "movl %8,%3\n"
+ "1:\tdecl %3\n\t"
+ "js 2f\n\t"
+ "lodsb\n\t"
+ "stosb\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n"
+ "2:\txorl %2,%2\n\t"
+ "stosb"
+ : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
+ : "0" (src),"1" (dest),"2" (0),"3" (0xffffffff), "g" (count)
+ : "memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRCMP
+extern inline int strcmp(const char * cs,const char * ct)
+{
+int d0, d1;
+register int __res;
+__asm__ __volatile__(
+ "cld\n"
+ "1:\tlodsb\n\t"
+ "scasb\n\t"
+ "jne 2f\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n\t"
+ "xorl %%eax,%%eax\n\t"
+ "jmp 3f\n"
+ "2:\tsbbl %%eax,%%eax\n\t"
+ "orb $1,%%al\n"
+ "3:"
+ :"=a" (__res), "=&S" (d0), "=&D" (d1)
+ :"1" (cs),"2" (ct));
+return __res;
+}
+
+#define __HAVE_ARCH_STRNCMP
+extern inline int strncmp(const char * cs,const char * ct,size_t count)
+{
+register int __res;
+int d0, d1, d2;
+__asm__ __volatile__(
+ "cld\n"
+ "1:\tdecl %3\n\t"
+ "js 2f\n\t"
+ "lodsb\n\t"
+ "scasb\n\t"
+ "jne 3f\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n"
+ "2:\txorl %%eax,%%eax\n\t"
+ "jmp 4f\n"
+ "3:\tsbbl %%eax,%%eax\n\t"
+ "orb $1,%%al\n"
+ "4:"
+ :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
+ :"1" (cs),"2" (ct),"3" (count));
+return __res;
+}
+
+#define __HAVE_ARCH_STRCHR
+extern inline char * strchr(const char * s, int c)
+{
+int d0;
+register char * __res;
+__asm__ __volatile__(
+ "cld\n\t"
+ "movb %%al,%%ah\n"
+ "1:\tlodsb\n\t"
+ "cmpb %%ah,%%al\n\t"
+ "je 2f\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n\t"
+ "movl $1,%1\n"
+ "2:\tmovl %1,%0\n\t"
+ "decl %0"
+ :"=a" (__res), "=&S" (d0) : "1" (s),"0" (c));
+return __res;
+}
+
+#define __HAVE_ARCH_STRRCHR
+extern inline char * strrchr(const char * s, int c)
+{
+int d0, d1;
+register char * __res;
+__asm__ __volatile__(
+ "cld\n\t"
+ "movb %%al,%%ah\n"
+ "1:\tlodsb\n\t"
+ "cmpb %%ah,%%al\n\t"
+ "jne 2f\n\t"
+ "leal -1(%%esi),%0\n"
+ "2:\ttestb %%al,%%al\n\t"
+ "jne 1b"
+ :"=g" (__res), "=&S" (d0), "=&a" (d1) :"0" (0),"1" (s),"2" (c));
+return __res;
+}
+
+#define __HAVE_ARCH_STRLEN
+extern inline size_t strlen(const char * s)
+{
+int d0;
+register int __res;
+__asm__ __volatile__(
+ "cld\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "notl %0\n\t"
+ "decl %0"
+ :"=c" (__res), "=&D" (d0) :"1" (s),"a" (0), "0" (0xffffffff));
+return __res;
+}
+
+extern inline void * __memcpy(void * to, const void * from, size_t n)
+{
+int d0, d1, d2;
+__asm__ __volatile__(
+ "cld\n\t"
+ "rep ; movsl\n\t"
+ "testb $2,%b4\n\t"
+ "je 1f\n\t"
+ "movsw\n"
+ "1:\ttestb $1,%b4\n\t"
+ "je 2f\n\t"
+ "movsb\n"
+ "2:"
+ : "=&c" (d0), "=&D" (d1), "=&S" (d2)
+ :"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from)
+ : "memory");
+return (to);
+}
+
+/*
+ * This looks horribly ugly, but the compiler can optimize it totally,
+ * as the count is constant.
+ */
+extern inline void * __constant_memcpy(void * to, const void * from, size_t n)
+{
+ switch (n) {
+ case 0:
+ return to;
+ case 1:
+ *(unsigned char *)to = *(const unsigned char *)from;
+ return to;
+ case 2:
+ *(unsigned short *)to = *(const unsigned short *)from;
+ return to;
+ case 3:
+ *(unsigned short *)to = *(const unsigned short *)from;
+ *(2+(unsigned char *)to) = *(2+(const unsigned char *)from);
+ return to;
+ case 4:
+ *(unsigned long *)to = *(const unsigned long *)from;
+ return to;
+ case 6: /* for Ethernet addresses */
+ *(unsigned long *)to = *(const unsigned long *)from;
+ *(2+(unsigned short *)to) = *(2+(const unsigned short *)from);
+ return to;
+ case 8:
+ *(unsigned long *)to = *(const unsigned long *)from;
+ *(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
+ return to;
+ case 12:
+ *(unsigned long *)to = *(const unsigned long *)from;
+ *(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
+ *(2+(unsigned long *)to) = *(2+(const unsigned long *)from);
+ return to;
+ case 16:
+ *(unsigned long *)to = *(const unsigned long *)from;
+ *(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
+ *(2+(unsigned long *)to) = *(2+(const unsigned long *)from);
+ *(3+(unsigned long *)to) = *(3+(const unsigned long *)from);
+ return to;
+ case 20:
+ *(unsigned long *)to = *(const unsigned long *)from;
+ *(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
+ *(2+(unsigned long *)to) = *(2+(const unsigned long *)from);
+ *(3+(unsigned long *)to) = *(3+(const unsigned long *)from);
+ *(4+(unsigned long *)to) = *(4+(const unsigned long *)from);
+ return to;
+ }
+#define COMMON(x) \
+__asm__ __volatile__( \
+ "cld\n\t" \
+ "rep ; movsl" \
+ x \
+ : "=&c" (d0), "=&D" (d1), "=&S" (d2) \
+ : "0" (n/4),"1" ((long) to),"2" ((long) from) \
+ : "memory");
+{
+ int d0, d1, d2;
+ switch (n % 4) {
+ case 0: COMMON(""); return to;
+ case 1: COMMON("\n\tmovsb"); return to;
+ case 2: COMMON("\n\tmovsw"); return to;
+ default: COMMON("\n\tmovsw\n\tmovsb"); return to;
+ }
+}
+
+#undef COMMON
+}
+
+#define __HAVE_ARCH_MEMCPY
+#define memcpy(t, f, n) \
+(__builtin_constant_p(n) ? \
+ __constant_memcpy((t),(f),(n)) : \
+ __memcpy((t),(f),(n)))
+
+#define __HAVE_ARCH_MEMMOVE
+extern inline void * memmove(void * dest,const void * src, size_t n)
+{
+int d0, d1, d2;
+if (dest<src)
+__asm__ __volatile__(
+ "cld\n\t"
+ "rep\n\t"
+ "movsb"
+ : "=&c" (d0), "=&S" (d1), "=&D" (d2)
+ :"0" (n),"1" (src),"2" (dest)
+ : "memory");
+else
+__asm__ __volatile__(
+ "std\n\t"
+ "rep\n\t"
+ "movsb\n\t"
+ "cld"
+ : "=&c" (d0), "=&S" (d1), "=&D" (d2)
+ :"0" (n),
+ "1" (n-1+(const char *)src),
+ "2" (n-1+(char *)dest)
+ :"memory");
+return dest;
+}
+
+#define memcmp __builtin_memcmp
+
+#define __HAVE_ARCH_MEMCHR
+extern inline void * memchr(const void * cs,int c,size_t count)
+{
+int d0;
+register void * __res;
+if (!count)
+ return NULL;
+__asm__ __volatile__(
+ "cld\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "je 1f\n\t"
+ "movl $1,%0\n"
+ "1:\tdecl %0"
+ :"=D" (__res), "=&c" (d0) : "a" (c),"0" (cs),"1" (count));
+return __res;
+}
+
+extern inline void * __memset_generic(void * s, char c,size_t count)
+{
+int d0, d1;
+__asm__ __volatile__(
+ "cld\n\t"
+ "rep\n\t"
+ "stosb"
+ : "=&c" (d0), "=&D" (d1)
+ :"a" (c),"1" (s),"0" (count)
+ :"memory");
+return s;
+}
+
+/* we might want to write optimized versions of these later */
+#define __constant_count_memset(s,c,count) __memset_generic((s),(c),(count))
+
+/*
+ * memset(x,0,y) is a reasonably common thing to do, so we want to fill
+ * things 32 bits at a time even when we don't know the size of the
+ * area at compile-time..
+ */
+extern inline void * __constant_c_memset(void * s, unsigned long c, size_t count)
+{
+int d0, d1;
+__asm__ __volatile__(
+ "cld\n\t"
+ "rep ; stosl\n\t"
+ "testb $2,%b3\n\t"
+ "je 1f\n\t"
+ "stosw\n"
+ "1:\ttestb $1,%b3\n\t"
+ "je 2f\n\t"
+ "stosb\n"
+ "2:"
+ : "=&c" (d0), "=&D" (d1)
+ :"a" (c), "q" (count), "0" (count/4), "1" ((long) s)
+ :"memory");
+return (s);
+}
+
+/* Added by Gertjan van Wingerde to make minix and sysv module work */
+#define __HAVE_ARCH_STRNLEN
+extern inline size_t strnlen(const char * s, size_t count)
+{
+int d0;
+register int __res;
+__asm__ __volatile__(
+ "movl %2,%0\n\t"
+ "jmp 2f\n"
+ "1:\tcmpb $0,(%0)\n\t"
+ "je 3f\n\t"
+ "incl %0\n"
+ "2:\tdecl %1\n\t"
+ "cmpl $-1,%1\n\t"
+ "jne 1b\n"
+ "3:\tsubl %2,%0"
+ :"=a" (__res), "=&d" (d0)
+ :"c" (s),"1" (count));
+return __res;
+}
+/* end of additional stuff */
+
+/*
+ * This looks horribly ugly, but the compiler can optimize it totally,
+ * as we by now know that both pattern and count is constant..
+ */
+extern inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count)
+{
+ switch (count) {
+ case 0:
+ return s;
+ case 1:
+ *(unsigned char *)s = pattern;
+ return s;
+ case 2:
+ *(unsigned short *)s = pattern;
+ return s;
+ case 3:
+ *(unsigned short *)s = pattern;
+ *(2+(unsigned char *)s) = pattern;
+ return s;
+ case 4:
+ *(unsigned long *)s = pattern;
+ return s;
+ }
+#define COMMON(x) \
+__asm__ __volatile__("cld\n\t" \
+ "rep ; stosl" \
+ x \
+ : "=&c" (d0), "=&D" (d1) \
+ : "a" (pattern),"0" (count/4),"1" ((long) s) \
+ : "memory")
+{
+ int d0, d1;
+ switch (count % 4) {
+ case 0: COMMON(""); return s;
+ case 1: COMMON("\n\tstosb"); return s;
+ case 2: COMMON("\n\tstosw"); return s;
+ default: COMMON("\n\tstosw\n\tstosb"); return s;
+ }
+}
+
+#undef COMMON
+}
+
+#define __constant_c_x_memset(s, c, count) \
+(__builtin_constant_p(count) ? \
+ __constant_c_and_count_memset((s),(c),(count)) : \
+ __constant_c_memset((s),(c),(count)))
+
+#define __memset(s, c, count) \
+(__builtin_constant_p(count) ? \
+ __constant_count_memset((s),(c),(count)) : \
+ __memset_generic((s),(c),(count)))
+
+#define __HAVE_ARCH_MEMSET
+#define memset(s, c, count) \
+(__builtin_constant_p(c) ? \
+ __constant_c_x_memset((s),(0x01010101UL*(unsigned char)(c)),(count)) : \
+ __memset((s),(c),(count)))
+
+/*
+ * find the first occurrence of byte 'c', or 1 past the area if none
+ */
+#define __HAVE_ARCH_MEMSCAN
+extern inline void * memscan(void * addr, int c, size_t size)
+{
+ if (!size)
+ return addr;
+ __asm__("cld
+ repnz; scasb
+ jnz 1f
+ dec %%edi
+1: "
+ : "=D" (addr), "=c" (size)
+ : "0" (addr), "1" (size), "a" (c));
+ return addr;
+}
+
+#endif
+#endif
diff --git a/linux/src/include/asm-i386/system.h b/linux/src/include/asm-i386/system.h
new file mode 100644
index 0000000..f186393
--- /dev/null
+++ b/linux/src/include/asm-i386/system.h
@@ -0,0 +1,334 @@
+#ifndef __ASM_SYSTEM_H
+#define __ASM_SYSTEM_H
+
+#include <asm/segment.h>
+
+/*
+ * Entry into gdt where to find first TSS. GDT layout:
+ * 0 - null
+ * 1 - not used
+ * 2 - kernel code segment
+ * 3 - kernel data segment
+ * 4 - user code segment
+ * 5 - user data segment
+ * ...
+ * 8 - TSS #0
+ * 9 - LDT #0
+ * 10 - TSS #1
+ * 11 - LDT #1
+ */
+#define FIRST_TSS_ENTRY 8
+#define FIRST_LDT_ENTRY (FIRST_TSS_ENTRY+1)
+#define _TSS(n) ((((unsigned long) n)<<4)+(FIRST_TSS_ENTRY<<3))
+#define _LDT(n) ((((unsigned long) n)<<4)+(FIRST_LDT_ENTRY<<3))
+#define load_TR(n) __asm__("ltr %%ax": /* no output */ :"a" (_TSS(n)))
+#define load_ldt(n) __asm__("lldt %%ax": /* no output */ :"a" (_LDT(n)))
+#define store_TR(n) \
+__asm__("str %%ax\n\t" \
+ "subl %2,%%eax\n\t" \
+ "shrl $4,%%eax" \
+ :"=a" (n) \
+ :"0" (0),"i" (FIRST_TSS_ENTRY<<3))
+
+/* This special macro can be used to load a debugging register */
+
+#define loaddebug(tsk,register) \
+ __asm__("movl %0,%%edx\n\t" \
+ "movl %%edx,%%db" #register "\n\t" \
+ : /* no output */ \
+ :"m" (tsk->debugreg[register]) \
+ :"dx");
+
+
+/*
+ * switch_to(n) should switch tasks to task nr n, first
+ * checking that n isn't the current task, in which case it does nothing.
+ * This also clears the TS-flag if the task we switched to has used
+ * the math co-processor latest.
+ *
+ * It also reloads the debug regs if necessary..
+ */
+
+
+#ifdef __SMP__
+ /*
+ * Keep the lock depth straight. If we switch on an interrupt from
+ * kernel->user task we need to lose a depth, and if we switch the
+ * other way we need to gain a depth. Same layer switches come out
+ * the same.
+ *
+ * We spot a switch in user mode because the kernel counter is the
+ * same as the interrupt counter depth. (We never switch during the
+ * message/invalidate IPI).
+ *
+ * We fsave/fwait so that an exception goes off at the right time
+ * (as a call from the fsave or fwait in effect) rather than to
+ * the wrong process.
+ */
+
+#define switch_to(prev,next) do { \
+ cli();\
+ if(prev->flags&PF_USEDFPU) \
+ { \
+ __asm__ __volatile__("fnsave %0":"=m" (prev->tss.i387.hard)); \
+ __asm__ __volatile__("fwait"); \
+ prev->flags&=~PF_USEDFPU; \
+ } \
+ prev->lock_depth=syscall_count; \
+ kernel_counter+=next->lock_depth-prev->lock_depth; \
+ syscall_count=next->lock_depth; \
+__asm__("pushl %%edx\n\t" \
+ "movl "SYMBOL_NAME_STR(apic_reg)",%%edx\n\t" \
+ "movl 0x20(%%edx), %%edx\n\t" \
+ "shrl $22,%%edx\n\t" \
+ "and $0x3C,%%edx\n\t" \
+ "movl %%ecx,"SYMBOL_NAME_STR(current_set)"(,%%edx)\n\t" \
+ "popl %%edx\n\t" \
+ "ljmp %0\n\t" \
+ "sti\n\t" \
+ : /* no output */ \
+ :"m" (*(((char *)&next->tss.tr)-4)), \
+ "c" (next)); \
+ /* Now maybe reload the debug registers */ \
+ if(prev->debugreg[7]){ \
+ loaddebug(prev,0); \
+ loaddebug(prev,1); \
+ loaddebug(prev,2); \
+ loaddebug(prev,3); \
+ loaddebug(prev,6); \
+ } \
+} while (0)
+
+#else
+#define switch_to(prev,next) do { \
+__asm__("movl %2,"SYMBOL_NAME_STR(current_set)"\n\t" \
+ "ljmp %0\n\t" \
+ "cmpl %1,"SYMBOL_NAME_STR(last_task_used_math)"\n\t" \
+ "jne 1f\n\t" \
+ "clts\n" \
+ "1:" \
+ : /* no outputs */ \
+ :"m" (*(((char *)&next->tss.tr)-4)), \
+ "r" (prev), "r" (next)); \
+ /* Now maybe reload the debug registers */ \
+ if(prev->debugreg[7]){ \
+ loaddebug(prev,0); \
+ loaddebug(prev,1); \
+ loaddebug(prev,2); \
+ loaddebug(prev,3); \
+ loaddebug(prev,6); \
+ } \
+} while (0)
+#endif
+
+#define _set_base(addr,base) \
+__asm__("movw %%dx,%0\n\t" \
+ "rorl $16,%%edx\n\t" \
+ "movb %%dl,%1\n\t" \
+ "movb %%dh,%2" \
+ : /* no output */ \
+ :"m" (*((addr)+2)), \
+ "m" (*((addr)+4)), \
+ "m" (*((addr)+7)), \
+ "d" (base) \
+ :"dx")
+
+#define _set_limit(addr,limit) \
+__asm__("movw %%dx,%0\n\t" \
+ "rorl $16,%%edx\n\t" \
+ "movb %1,%%dh\n\t" \
+ "andb $0xf0,%%dh\n\t" \
+ "orb %%dh,%%dl\n\t" \
+ "movb %%dl,%1" \
+ : /* no output */ \
+ :"m" (*(addr)), \
+ "m" (*((addr)+6)), \
+ "d" (limit) \
+ :"dx")
+
+#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , base )
+#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , (limit-1)>>12 )
+
+static inline unsigned long _get_base(char * addr)
+{
+ unsigned long __base;
+ __asm__("movb %3,%%dh\n\t"
+ "movb %2,%%dl\n\t"
+ "shll $16,%%edx\n\t"
+ "movw %1,%%dx"
+ :"=&d" (__base)
+ :"m" (*((addr)+2)),
+ "m" (*((addr)+4)),
+ "m" (*((addr)+7)));
+ return __base;
+}
+
+#define get_base(ldt) _get_base( ((char *)&(ldt)) )
+
+static inline unsigned long get_limit(unsigned long segment)
+{
+ unsigned long __limit;
+ __asm__("lsll %1,%0"
+ :"=r" (__limit):"r" (segment));
+ return __limit+1;
+}
+
+#define nop() __asm__ __volatile__ ("nop")
+
+/*
+ * Clear and set 'TS' bit respectively
+ */
+#define clts() __asm__ __volatile__ ("clts")
+#define stts() \
+__asm__ __volatile__ ( \
+ "movl %%cr0,%%eax\n\t" \
+ "orl $8,%%eax\n\t" \
+ "movl %%eax,%%cr0" \
+ : /* no outputs */ \
+ : /* no inputs */ \
+ :"ax")
+
+
+#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+#define tas(ptr) (xchg((ptr),1))
+
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((struct __xchg_dummy *)(x))
+
+static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
+{
+ switch (size) {
+ case 1:
+ __asm__("xchgb %b0,%1"
+ :"=q" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 2:
+ __asm__("xchgw %w0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 4:
+ __asm__("xchgl %0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ }
+ return x;
+}
+
+#define mb() __asm__ __volatile__ ("" : : :"memory")
+#define sti() __asm__ __volatile__ ("sti": : :"memory")
+#define cli() __asm__ __volatile__ ("cli": : :"memory")
+
+#define save_flags(x) \
+__asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */ :"memory")
+
+#define restore_flags(x) \
+__asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory")
+
+#define iret() __asm__ __volatile__ ("iret": : :"memory")
+
+#define _set_gate(gate_addr,type,dpl,addr) \
+__asm__ __volatile__ ("movw %%dx,%%ax\n\t" \
+ "movw %2,%%dx\n\t" \
+ "movl %%eax,%0\n\t" \
+ "movl %%edx,%1" \
+ :"=m" (*((long *) (gate_addr))), \
+ "=m" (*(1+(long *) (gate_addr))) \
+ :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \
+ "d" ((char *) (addr)),"a" (KERNEL_CS << 16) \
+ :"ax","dx")
+
+#define set_intr_gate(n,addr) \
+ _set_gate(&idt[n],14,0,addr)
+
+#define set_trap_gate(n,addr) \
+ _set_gate(&idt[n],15,0,addr)
+
+#define set_system_gate(n,addr) \
+ _set_gate(&idt[n],15,3,addr)
+
+#define set_call_gate(a,addr) \
+ _set_gate(a,12,3,addr)
+
+#define _set_seg_desc(gate_addr,type,dpl,base,limit) {\
+ *((gate_addr)+1) = ((base) & 0xff000000) | \
+ (((base) & 0x00ff0000)>>16) | \
+ ((limit) & 0xf0000) | \
+ ((dpl)<<13) | \
+ (0x00408000) | \
+ ((type)<<8); \
+ *(gate_addr) = (((base) & 0x0000ffff)<<16) | \
+ ((limit) & 0x0ffff); }
+
+#define _set_tssldt_desc(n,addr,limit,type) \
+__asm__ __volatile__ ("movw $" #limit ",%1\n\t" \
+ "movw %%ax,%2\n\t" \
+ "rorl $16,%%eax\n\t" \
+ "movb %%al,%3\n\t" \
+ "movb $" type ",%4\n\t" \
+ "movb $0x00,%5\n\t" \
+ "movb %%ah,%6\n\t" \
+ "rorl $16,%%eax" \
+ : /* no output */ \
+ :"a" (addr+0xc0000000), "m" (*(n)), "m" (*(n+2)), "m" (*(n+4)), \
+ "m" (*(n+5)), "m" (*(n+6)), "m" (*(n+7)) \
+ )
+
+#define set_tss_desc(n,addr) _set_tssldt_desc(((char *) (n)),((int)(addr)),235,"0x89")
+#define set_ldt_desc(n,addr,size) \
+ _set_tssldt_desc(((char *) (n)),((int)(addr)),((size << 3) - 1),"0x82")
+
+/*
+ * This is the ldt that every process will get unless we need
+ * something other than this.
+ */
+extern struct desc_struct default_ldt;
+
+/*
+ * disable hlt during certain critical i/o operations
+ */
+#define HAVE_DISABLE_HLT
+void disable_hlt(void);
+void enable_hlt(void);
+
+static __inline__ unsigned long long rdmsr(unsigned int msr)
+{
+ unsigned long long ret;
+ __asm__ __volatile__("rdmsr"
+ : "=A" (ret)
+ : "c" (msr));
+ return ret;
+}
+
+static __inline__ void wrmsr(unsigned int msr,unsigned long long val)
+{
+ __asm__ __volatile__("wrmsr"
+ : /* no Outputs */
+ : "c" (msr), "A" (val));
+}
+
+
+static __inline__ unsigned long long rdtsc(void)
+{
+ unsigned long long ret;
+ __asm__ __volatile__("rdtsc"
+ : "=A" (ret)
+ : /* no inputs */);
+ return ret;
+}
+
+static __inline__ unsigned long long rdpmc(unsigned int counter)
+{
+ unsigned long long ret;
+ __asm__ __volatile__("rdpmc"
+ : "=A" (ret)
+ : "c" (counter));
+ return ret;
+}
+
+#endif
diff --git a/linux/src/include/asm-i386/termbits.h b/linux/src/include/asm-i386/termbits.h
new file mode 100644
index 0000000..c40e6f0
--- /dev/null
+++ b/linux/src/include/asm-i386/termbits.h
@@ -0,0 +1,160 @@
+#ifndef __ARCH_I386_TERMBITS_H__
+#define __ARCH_I386_TERMBITS_H__
+
+#include <linux/posix_types.h>
+
+typedef unsigned char cc_t;
+typedef unsigned int speed_t;
+typedef unsigned int tcflag_t;
+
+#define NCCS 19
+struct termios {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+};
+
+/* c_cc characters */
+#define VINTR 0
+#define VQUIT 1
+#define VERASE 2
+#define VKILL 3
+#define VEOF 4
+#define VTIME 5
+#define VMIN 6
+#define VSWTC 7
+#define VSTART 8
+#define VSTOP 9
+#define VSUSP 10
+#define VEOL 11
+#define VREPRINT 12
+#define VDISCARD 13
+#define VWERASE 14
+#define VLNEXT 15
+#define VEOL2 16
+
+/* c_iflag bits */
+#define IGNBRK 0000001
+#define BRKINT 0000002
+#define IGNPAR 0000004
+#define PARMRK 0000010
+#define INPCK 0000020
+#define ISTRIP 0000040
+#define INLCR 0000100
+#define IGNCR 0000200
+#define ICRNL 0000400
+#define IUCLC 0001000
+#define IXON 0002000
+#define IXANY 0004000
+#define IXOFF 0010000
+#define IMAXBEL 0020000
+
+/* c_oflag bits */
+#define OPOST 0000001
+#define OLCUC 0000002
+#define ONLCR 0000004
+#define OCRNL 0000010
+#define ONOCR 0000020
+#define ONLRET 0000040
+#define OFILL 0000100
+#define OFDEL 0000200
+#define NLDLY 0000400
+#define NL0 0000000
+#define NL1 0000400
+#define CRDLY 0003000
+#define CR0 0000000
+#define CR1 0001000
+#define CR2 0002000
+#define CR3 0003000
+#define TABDLY 0014000
+#define TAB0 0000000
+#define TAB1 0004000
+#define TAB2 0010000
+#define TAB3 0014000
+#define XTABS 0014000
+#define BSDLY 0020000
+#define BS0 0000000
+#define BS1 0020000
+#define VTDLY 0040000
+#define VT0 0000000
+#define VT1 0040000
+#define FFDLY 0100000
+#define FF0 0000000
+#define FF1 0100000
+
+/* c_cflag bit meaning */
+#define CBAUD 0010017
+#define B0 0000000 /* hang up */
+#define B50 0000001
+#define B75 0000002
+#define B110 0000003
+#define B134 0000004
+#define B150 0000005
+#define B200 0000006
+#define B300 0000007
+#define B600 0000010
+#define B1200 0000011
+#define B1800 0000012
+#define B2400 0000013
+#define B4800 0000014
+#define B9600 0000015
+#define B19200 0000016
+#define B38400 0000017
+#define EXTA B19200
+#define EXTB B38400
+#define CSIZE 0000060
+#define CS5 0000000
+#define CS6 0000020
+#define CS7 0000040
+#define CS8 0000060
+#define CSTOPB 0000100
+#define CREAD 0000200
+#define PARENB 0000400
+#define PARODD 0001000
+#define HUPCL 0002000
+#define CLOCAL 0004000
+#define CBAUDEX 0010000
+#define B57600 0010001
+#define B115200 0010002
+#define B230400 0010003
+#define B460800 0010004
+#define CIBAUD 002003600000 /* input baud rate (not used) */
+#define CRTSCTS 020000000000 /* flow control */
+
+/* c_lflag bits */
+#define ISIG 0000001
+#define ICANON 0000002
+#define XCASE 0000004
+#define ECHO 0000010
+#define ECHOE 0000020
+#define ECHOK 0000040
+#define ECHONL 0000100
+#define NOFLSH 0000200
+#define TOSTOP 0000400
+#define ECHOCTL 0001000
+#define ECHOPRT 0002000
+#define ECHOKE 0004000
+#define FLUSHO 0010000
+#define PENDIN 0040000
+#define IEXTEN 0100000
+
+/* tcflow() and TCXONC use these */
+#define TCOOFF 0
+#define TCOON 1
+#define TCIOFF 2
+#define TCION 3
+
+/* tcflush() and TCFLSH use these */
+#define TCIFLUSH 0
+#define TCOFLUSH 1
+#define TCIOFLUSH 2
+
+/* tcsetattr uses these */
+#define TCSANOW 0
+#define TCSADRAIN 1
+#define TCSAFLUSH 2
+
+#endif
diff --git a/linux/src/include/asm-i386/termios.h b/linux/src/include/asm-i386/termios.h
new file mode 100644
index 0000000..9f65b4d
--- /dev/null
+++ b/linux/src/include/asm-i386/termios.h
@@ -0,0 +1,92 @@
+#ifndef _I386_TERMIOS_H
+#define _I386_TERMIOS_H
+
+#include <asm/termbits.h>
+#include <asm/ioctls.h>
+
+struct winsize {
+ unsigned short ws_row;
+ unsigned short ws_col;
+ unsigned short ws_xpixel;
+ unsigned short ws_ypixel;
+};
+
+#define NCC 8
+struct termio {
+ unsigned short c_iflag; /* input mode flags */
+ unsigned short c_oflag; /* output mode flags */
+ unsigned short c_cflag; /* control mode flags */
+ unsigned short c_lflag; /* local mode flags */
+ unsigned char c_line; /* line discipline */
+ unsigned char c_cc[NCC]; /* control characters */
+};
+
+#ifdef __KERNEL__
+/* intr=^C quit=^\ erase=del kill=^U
+ eof=^D vtime=\0 vmin=\1 sxtc=\0
+ start=^Q stop=^S susp=^Z eol=\0
+ reprint=^R discard=^U werase=^W lnext=^V
+ eol2=\0
+*/
+#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
+#endif
+
+/* modem lines */
+#define TIOCM_LE 0x001
+#define TIOCM_DTR 0x002
+#define TIOCM_RTS 0x004
+#define TIOCM_ST 0x008
+#define TIOCM_SR 0x010
+#define TIOCM_CTS 0x020
+#define TIOCM_CAR 0x040
+#define TIOCM_RNG 0x080
+#define TIOCM_DSR 0x100
+#define TIOCM_CD TIOCM_CAR
+#define TIOCM_RI TIOCM_RNG
+
+/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+
+/* line disciplines */
+#define N_TTY 0
+#define N_SLIP 1
+#define N_MOUSE 2
+#define N_PPP 3
+#define N_STRIP 4
+#define N_AX25 5
+
+#ifdef __KERNEL__
+
+#include <linux/string.h>
+
+/*
+ * Translate a "termio" structure into a "termios". Ugh.
+ */
+static inline void trans_from_termio(struct termio * termio,
+ struct termios * termios)
+{
+#define SET_LOW_BITS(x,y) (*(unsigned short *)(&x) = (y))
+ SET_LOW_BITS(termios->c_iflag, termio->c_iflag);
+ SET_LOW_BITS(termios->c_oflag, termio->c_oflag);
+ SET_LOW_BITS(termios->c_cflag, termio->c_cflag);
+ SET_LOW_BITS(termios->c_lflag, termio->c_lflag);
+#undef SET_LOW_BITS
+ memcpy(termios->c_cc, termio->c_cc, NCC);
+}
+
+/*
+ * Translate a "termios" structure into a "termio". Ugh.
+ */
+static inline void trans_to_termio(struct termios * termios,
+ struct termio * termio)
+{
+ termio->c_iflag = termios->c_iflag;
+ termio->c_oflag = termios->c_oflag;
+ termio->c_cflag = termios->c_cflag;
+ termio->c_lflag = termios->c_lflag;
+ termio->c_line = termios->c_line;
+ memcpy(termio->c_cc, termios->c_cc, NCC);
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* _I386_TERMIOS_H */
diff --git a/linux/src/include/asm-i386/types.h b/linux/src/include/asm-i386/types.h
new file mode 100644
index 0000000..d792546
--- /dev/null
+++ b/linux/src/include/asm-i386/types.h
@@ -0,0 +1,46 @@
+#ifndef _I386_TYPES_H
+#define _I386_TYPES_H
+
+typedef unsigned short umode_t;
+
+/*
+ * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
+ * header files exported to user space
+ */
+
+typedef __signed__ char __s8;
+typedef unsigned char __u8;
+
+typedef __signed__ short __s16;
+typedef unsigned short __u16;
+
+typedef __signed__ int __s32;
+typedef unsigned int __u32;
+
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+typedef __signed__ long long __s64;
+typedef unsigned long long __u64;
+#endif
+
+/*
+ * These aren't exported outside the kernel to avoid name space clashes
+ */
+#ifdef __KERNEL__
+
+typedef signed char s8;
+typedef unsigned char u8;
+
+typedef signed short s16;
+typedef unsigned short u16;
+
+typedef signed int s32;
+typedef unsigned int u32;
+
+typedef signed long long s64;
+typedef unsigned long long u64;
+
+#define BITS_PER_LONG 32
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/linux/src/include/asm-i386/unaligned.h b/linux/src/include/asm-i386/unaligned.h
new file mode 100644
index 0000000..282ce19
--- /dev/null
+++ b/linux/src/include/asm-i386/unaligned.h
@@ -0,0 +1,16 @@
+#ifndef __I386_UNALIGNED_H
+#define __I386_UNALIGNED_H
+
+/*
+ * The i386 can do unaligned accesses itself.
+ *
+ * The strange macros are there to make sure these can't
+ * be misused in a way that makes them not work on other
+ * architectures where unaligned accesses aren't as simple.
+ */
+
+#define get_unaligned(ptr) (*(ptr))
+
+#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
+
+#endif
diff --git a/linux/src/include/asm-i386/unistd.h b/linux/src/include/asm-i386/unistd.h
new file mode 100644
index 0000000..c5f8f3a
--- /dev/null
+++ b/linux/src/include/asm-i386/unistd.h
@@ -0,0 +1,328 @@
+#ifndef _ASM_I386_UNISTD_H_
+#define _ASM_I386_UNISTD_H_
+
+/*
+ * This file contains the system call numbers.
+ */
+
+#define __NR_setup 0 /* used only by init, to get system going */
+#define __NR_exit 1
+#define __NR_fork 2
+#define __NR_read 3
+#define __NR_write 4
+#define __NR_open 5
+#define __NR_close 6
+#define __NR_waitpid 7
+#define __NR_creat 8
+#define __NR_link 9
+#define __NR_unlink 10
+#define __NR_execve 11
+#define __NR_chdir 12
+#define __NR_time 13
+#define __NR_mknod 14
+#define __NR_chmod 15
+#define __NR_chown 16
+#define __NR_break 17
+#define __NR_oldstat 18
+#define __NR_lseek 19
+#define __NR_getpid 20
+#define __NR_mount 21
+#define __NR_umount 22
+#define __NR_setuid 23
+#define __NR_getuid 24
+#define __NR_stime 25
+#define __NR_ptrace 26
+#define __NR_alarm 27
+#define __NR_oldfstat 28
+#define __NR_pause 29
+#define __NR_utime 30
+#define __NR_stty 31
+#define __NR_gtty 32
+#define __NR_access 33
+#define __NR_nice 34
+#define __NR_ftime 35
+#define __NR_sync 36
+#define __NR_kill 37
+#define __NR_rename 38
+#define __NR_mkdir 39
+#define __NR_rmdir 40
+#define __NR_dup 41
+#define __NR_pipe 42
+#define __NR_times 43
+#define __NR_prof 44
+#define __NR_brk 45
+#define __NR_setgid 46
+#define __NR_getgid 47
+#define __NR_signal 48
+#define __NR_geteuid 49
+#define __NR_getegid 50
+#define __NR_acct 51
+#define __NR_phys 52
+#define __NR_lock 53
+#define __NR_ioctl 54
+#define __NR_fcntl 55
+#define __NR_mpx 56
+#define __NR_setpgid 57
+#define __NR_ulimit 58
+#define __NR_oldolduname 59
+#define __NR_umask 60
+#define __NR_chroot 61
+#define __NR_ustat 62
+#define __NR_dup2 63
+#define __NR_getppid 64
+#define __NR_getpgrp 65
+#define __NR_setsid 66
+#define __NR_sigaction 67
+#define __NR_sgetmask 68
+#define __NR_ssetmask 69
+#define __NR_setreuid 70
+#define __NR_setregid 71
+#define __NR_sigsuspend 72
+#define __NR_sigpending 73
+#define __NR_sethostname 74
+#define __NR_setrlimit 75
+#define __NR_getrlimit 76
+#define __NR_getrusage 77
+#define __NR_gettimeofday 78
+#define __NR_settimeofday 79
+#define __NR_getgroups 80
+#define __NR_setgroups 81
+#define __NR_select 82
+#define __NR_symlink 83
+#define __NR_oldlstat 84
+#define __NR_readlink 85
+#define __NR_uselib 86
+#define __NR_swapon 87
+#define __NR_reboot 88
+#define __NR_readdir 89
+#define __NR_mmap 90
+#define __NR_munmap 91
+#define __NR_truncate 92
+#define __NR_ftruncate 93
+#define __NR_fchmod 94
+#define __NR_fchown 95
+#define __NR_getpriority 96
+#define __NR_setpriority 97
+#define __NR_profil 98
+#define __NR_statfs 99
+#define __NR_fstatfs 100
+#define __NR_ioperm 101
+#define __NR_socketcall 102
+#define __NR_syslog 103
+#define __NR_setitimer 104
+#define __NR_getitimer 105
+#define __NR_stat 106
+#define __NR_lstat 107
+#define __NR_fstat 108
+#define __NR_olduname 109
+#define __NR_iopl 110
+#define __NR_vhangup 111
+#define __NR_idle 112
+#define __NR_vm86 113
+#define __NR_wait4 114
+#define __NR_swapoff 115
+#define __NR_sysinfo 116
+#define __NR_ipc 117
+#define __NR_fsync 118
+#define __NR_sigreturn 119
+#define __NR_clone 120
+#define __NR_setdomainname 121
+#define __NR_uname 122
+#define __NR_modify_ldt 123
+#define __NR_adjtimex 124
+#define __NR_mprotect 125
+#define __NR_sigprocmask 126
+#define __NR_create_module 127
+#define __NR_init_module 128
+#define __NR_delete_module 129
+#define __NR_get_kernel_syms 130
+#define __NR_quotactl 131
+#define __NR_getpgid 132
+#define __NR_fchdir 133
+#define __NR_bdflush 134
+#define __NR_sysfs 135
+#define __NR_personality 136
+#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
+#define __NR_setfsuid 138
+#define __NR_setfsgid 139
+#define __NR__llseek 140
+#define __NR_getdents 141
+#define __NR__newselect 142
+#define __NR_flock 143
+#define __NR_msync 144
+#define __NR_readv 145
+#define __NR_writev 146
+#define __NR_getsid 147
+#define __NR_fdatasync 148
+#define __NR__sysctl 149
+#define __NR_mlock 150
+#define __NR_munlock 151
+#define __NR_mlockall 152
+#define __NR_munlockall 153
+#define __NR_sched_setparam 154
+#define __NR_sched_getparam 155
+#define __NR_sched_setscheduler 156
+#define __NR_sched_getscheduler 157
+#define __NR_sched_yield 158
+#define __NR_sched_get_priority_max 159
+#define __NR_sched_get_priority_min 160
+#define __NR_sched_rr_get_interval 161
+#define __NR_nanosleep 162
+#define __NR_mremap 163
+#define __NR_poll 168
+#define __NR_getpmsg 188
+#define __NR_putpmsg 189
+
+/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
+#define _syscall0(type,name) \
+type name(void) \
+{ \
+long __res; \
+__asm__ volatile ("int $0x80" \
+ : "=a" (__res) \
+ : "0" (__NR_##name)); \
+if (__res >= 0) \
+ return (type) __res; \
+errno = -__res; \
+return -1; \
+}
+
+#define _syscall1(type,name,type1,arg1) \
+type name(type1 arg1) \
+{ \
+long __res; \
+__asm__ volatile ("int $0x80" \
+ : "=a" (__res) \
+ : "0" (__NR_##name),"b" ((long)(arg1))); \
+if (__res >= 0) \
+ return (type) __res; \
+errno = -__res; \
+return -1; \
+}
+
+#define _syscall2(type,name,type1,arg1,type2,arg2) \
+type name(type1 arg1,type2 arg2) \
+{ \
+long __res; \
+__asm__ volatile ("int $0x80" \
+ : "=a" (__res) \
+ : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2))); \
+if (__res >= 0) \
+ return (type) __res; \
+errno = -__res; \
+return -1; \
+}
+
+#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
+type name(type1 arg1,type2 arg2,type3 arg3) \
+{ \
+long __res; \
+__asm__ volatile ("int $0x80" \
+ : "=a" (__res) \
+ : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
+ "d" ((long)(arg3))); \
+if (__res>=0) \
+ return (type) __res; \
+errno=-__res; \
+return -1; \
+}
+
+#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
+type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
+{ \
+long __res; \
+__asm__ volatile ("int $0x80" \
+ : "=a" (__res) \
+ : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
+ "d" ((long)(arg3)),"S" ((long)(arg4))); \
+if (__res>=0) \
+ return (type) __res; \
+errno=-__res; \
+return -1; \
+}
+
+#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
+ type5,arg5) \
+type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
+{ \
+long __res; \
+__asm__ volatile ("int $0x80" \
+ : "=a" (__res) \
+ : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
+ "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5))); \
+if (__res>=0) \
+ return (type) __res; \
+errno=-__res; \
+return -1; \
+}
+
+#ifdef __KERNEL_SYSCALLS__
+
+/*
+ * we need this inline - forking from kernel space will result
+ * in NO COPY ON WRITE (!!!), until an execve is executed. This
+ * is no problem, but for the stack. This is handled by not letting
+ * main() use the stack at all after fork(). Thus, no function
+ * calls - which means inline code for fork too, as otherwise we
+ * would use the stack upon exit from 'fork()'.
+ *
+ * Actually only pause and fork are needed inline, so that there
+ * won't be any messing with the stack from main(), but we define
+ * some others too.
+ */
+#define __NR__exit __NR_exit
+static inline _syscall0(int,idle)
+static inline _syscall0(int,fork)
+static inline _syscall2(int,clone,unsigned long,flags,char *,esp)
+static inline _syscall0(int,pause)
+static inline _syscall0(int,setup)
+static inline _syscall0(int,sync)
+static inline _syscall0(pid_t,setsid)
+static inline _syscall3(int,read,int,fd,char *,buf,off_t,count)
+static inline _syscall3(off_t,lseek,int,fd,off_t,offset,int,count)
+static inline _syscall3(int,write,int,fd,const char *,buf,off_t,count)
+static inline _syscall1(int,dup,int,fd)
+static inline _syscall3(int,execve,const char *,file,char **,argv,char **,envp)
+static inline _syscall3(int,open,const char *,file,int,flag,int,mode)
+static inline _syscall1(int,close,int,fd)
+static inline _syscall1(int,_exit,int,exitcode)
+static inline _syscall3(pid_t,waitpid,pid_t,pid,int *,wait_stat,int,options)
+
+static inline pid_t wait(int * wait_stat)
+{
+ return waitpid(-1,wait_stat,0);
+}
+
+/*
+ * This is the mechanism for creating a new kernel thread.
+ *
+ * NOTE! Only a kernel-only process(ie the swapper or direct descendants
+ * who haven't done an "execve()") should use this: it will work within
+ * a system call from a "real" process, but the process memory space will
+ * not be free'd until both the parent and the child have exited.
+ */
+static inline pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+{
+ long retval;
+
+ __asm__ __volatile__(
+ "movl %%esp,%%esi\n\t"
+ "int $0x80\n\t" /* Linux/i386 system call */
+ "cmpl %%esp,%%esi\n\t" /* child or parent? */
+ "je 1f\n\t" /* parent - jump */
+ "pushl %3\n\t" /* push argument */
+ "call *%4\n\t" /* call fn */
+ "movl %2,%0\n\t" /* exit */
+ "int $0x80\n"
+ "1:\t"
+ :"=a" (retval)
+ :"0" (__NR_clone), "i" (__NR_exit),
+ "r" (arg), "r" (fn),
+ "b" (flags | CLONE_VM)
+ :"si");
+ return retval;
+}
+
+#endif
+
+#endif /* _ASM_I386_UNISTD_H_ */
diff --git a/linux/src/include/asm-i386/vm86.h b/linux/src/include/asm-i386/vm86.h
new file mode 100644
index 0000000..42ef92e
--- /dev/null
+++ b/linux/src/include/asm-i386/vm86.h
@@ -0,0 +1,175 @@
+#ifndef _LINUX_VM86_H
+#define _LINUX_VM86_H
+
+/*
+ * I'm guessing at the VIF/VIP flag usage, but hope that this is how
+ * the Pentium uses them. Linux will return from vm86 mode when both
+ * VIF and VIP is set.
+ *
+ * On a Pentium, we could probably optimize the virtual flags directly
+ * in the eflags register instead of doing it "by hand" in vflags...
+ *
+ * Linus
+ */
+
+#define TF_MASK 0x00000100
+#define IF_MASK 0x00000200
+#define IOPL_MASK 0x00003000
+#define NT_MASK 0x00004000
+#define VM_MASK 0x00020000
+#define AC_MASK 0x00040000
+#define VIF_MASK 0x00080000 /* virtual interrupt flag */
+#define VIP_MASK 0x00100000 /* virtual interrupt pending */
+#define ID_MASK 0x00200000
+
+#define BIOSSEG 0x0f000
+
+#define CPU_086 0
+#define CPU_186 1
+#define CPU_286 2
+#define CPU_386 3
+#define CPU_486 4
+#define CPU_586 5
+
+/*
+ * Return values for the 'vm86()' system call
+ */
+#define VM86_TYPE(retval) ((retval) & 0xff)
+#define VM86_ARG(retval) ((retval) >> 8)
+
+#define VM86_SIGNAL 0 /* return due to signal */
+#define VM86_UNKNOWN 1 /* unhandled GP fault - IO-instruction or similar */
+#define VM86_INTx 2 /* int3/int x instruction (ARG = x) */
+#define VM86_STI 3 /* sti/popf/iret instruction enabled virtual interrupts */
+
+/*
+ * Additional return values when invoking new vm86()
+ */
+#define VM86_PICRETURN 4 /* return due to pending PIC request */
+#define VM86_TRAP 6 /* return due to DOS-debugger request */
+
+/*
+ * function codes when invoking new vm86()
+ */
+#define VM86_PLUS_INSTALL_CHECK 0
+#define VM86_ENTER 1
+#define VM86_ENTER_NO_BYPASS 2
+#define VM86_REQUEST_IRQ 3
+#define VM86_FREE_IRQ 4
+#define VM86_GET_IRQ_BITS 5
+#define VM86_GET_AND_RESET_IRQ 6
+
+/*
+ * This is the stack-layout when we have done a "SAVE_ALL" from vm86
+ * mode - the main change is that the old segment descriptors aren't
+ * useful any more and are forced to be zero by the kernel (and the
+ * hardware when a trap occurs), and the real segment descriptors are
+ * at the end of the structure. Look at ptrace.h to see the "normal"
+ * setup.
+ */
+
+struct vm86_regs {
+/*
+ * normal regs, with special meaning for the segment descriptors..
+ */
+ long ebx;
+ long ecx;
+ long edx;
+ long esi;
+ long edi;
+ long ebp;
+ long eax;
+ long __null_ds;
+ long __null_es;
+ long __null_fs;
+ long __null_gs;
+ long orig_eax;
+ long eip;
+ unsigned short cs, __csh;
+ long eflags;
+ long esp;
+ unsigned short ss, __ssh;
+/*
+ * these are specific to v86 mode:
+ */
+ unsigned short es, __esh;
+ unsigned short ds, __dsh;
+ unsigned short fs, __fsh;
+ unsigned short gs, __gsh;
+};
+
+struct revectored_struct {
+ unsigned long __map[8]; /* 256 bits */
+};
+
+struct vm86_struct {
+ struct vm86_regs regs;
+ unsigned long flags;
+ unsigned long screen_bitmap;
+ unsigned long cpu_type;
+ struct revectored_struct int_revectored;
+ struct revectored_struct int21_revectored;
+};
+
+/*
+ * flags masks
+ */
+#define VM86_SCREEN_BITMAP 0x0001
+
+struct vm86plus_info_struct {
+ unsigned long force_return_for_pic:1;
+ unsigned long vm86dbg_active:1; /* for debugger */
+ unsigned long vm86dbg_TFpendig:1; /* for debugger */
+ unsigned long unused:28;
+ unsigned long is_vm86pus:1; /* for vm86 internal use */
+ unsigned char vm86dbg_intxxtab[32]; /* for debugger */
+};
+
+struct vm86plus_struct {
+ struct vm86_regs regs;
+ unsigned long flags;
+ unsigned long screen_bitmap;
+ unsigned long cpu_type;
+ struct revectored_struct int_revectored;
+ struct revectored_struct int21_revectored;
+ struct vm86plus_info_struct vm86plus;
+};
+
+#ifdef __KERNEL__
+
+struct kernel_vm86_struct {
+ struct vm86_regs regs;
+/*
+ * the below part remains on the kernel stack while we are in VM86 mode.
+ * 'tss.esp0' then contains the address of VM86_TSS_ESP0 below, and when we
+ * get forced back from VM86, the CPU and "SAVE_ALL" will restore the above
+ * 'struct kernel_vm86_regs' with the then actual values.
+ * Therefore, pt_regs in fact points to a complete 'kernel_vm86_struct'
+ * in kernelspace, hence we need not reget the data from userspace.
+ */
+#define VM86_TSS_ESP0 flags
+ unsigned long flags;
+ unsigned long screen_bitmap;
+ unsigned long cpu_type;
+ struct revectored_struct int_revectored;
+ struct revectored_struct int21_revectored;
+ struct vm86plus_info_struct vm86plus;
+ struct pt_regs *regs32; /* here we save the pointer to the old regs */
+/*
+ * The below is not part of the structure, but the stack layout continues
+ * this way. In front of 'return-eip' may be some data, depending on
+ * compilation, so we don't rely on this and save the pointer to 'oldregs'
+ * in 'regs32' above.
+ * However, with GCC-2.7.2 and the current CFLAGS you see exactly this:
+
+ long return-eip; from call to vm86()
+ struct pt_regs oldregs; user space registers as saved by syscall
+ */
+};
+
+void handle_vm86_fault(struct vm86_regs *, long);
+int handle_vm86_trap(struct vm86_regs *, long, int);
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/linux/src/include/linux/affs_hardblocks.h b/linux/src/include/linux/affs_hardblocks.h
new file mode 100644
index 0000000..3331548
--- /dev/null
+++ b/linux/src/include/linux/affs_hardblocks.h
@@ -0,0 +1,66 @@
+#ifndef AFFS_HARDBLOCKS_H
+#define AFFS_HARDBLOCKS_H
+
+/* Just the needed definitions for the RDB of an Amiga HD. */
+
+struct RigidDiskBlock {
+ __u32 rdb_ID;
+ __u32 rdb_SummedLongs;
+ __s32 rdb_ChkSum;
+ __u32 rdb_HostID;
+ __u32 rdb_BlockBytes;
+ __u32 rdb_Flags;
+ __u32 rdb_BadBlockList;
+ __u32 rdb_PartitionList;
+ __u32 rdb_FileSysHeaderList;
+ __u32 rdb_DriveInit;
+ __u32 rdb_Reserved1[6];
+ __u32 rdb_Cylinders;
+ __u32 rdb_Sectors;
+ __u32 rdb_Heads;
+ __u32 rdb_Interleave;
+ __u32 rdb_Park;
+ __u32 rdb_Reserved2[3];
+ __u32 rdb_WritePreComp;
+ __u32 rdb_ReducedWrite;
+ __u32 rdb_StepRate;
+ __u32 rdb_Reserved3[5];
+ __u32 rdb_RDBBlocksLo;
+ __u32 rdb_RDBBlocksHi;
+ __u32 rdb_LoCylinder;
+ __u32 rdb_HiCylinder;
+ __u32 rdb_CylBlocks;
+ __u32 rdb_AutoParkSeconds;
+ __u32 rdb_HighRDSKBlock;
+ __u32 rdb_Reserved4;
+ char rdb_DiskVendor[8];
+ char rdb_DiskProduct[16];
+ char rdb_DiskRevision[4];
+ char rdb_ControllerVendor[8];
+ char rdb_ControllerProduct[16];
+ char rdb_ControllerRevision[4];
+ __u32 rdb_Reserved5[10];
+};
+
+#define IDNAME_RIGIDDISK 0x5244534B /* "RDSK" */
+
+struct PartitionBlock {
+ __u32 pb_ID;
+ __u32 pb_SummedLongs;
+ __s32 pb_ChkSum;
+ __u32 pb_HostID;
+ __u32 pb_Next;
+ __u32 pb_Flags;
+ __u32 pb_Reserved1[2];
+ __u32 pb_DevFlags;
+ __u8 pb_DriveName[32];
+ __u32 pb_Reserved2[15];
+ __u32 pb_Environment[17];
+ __u32 pb_EReserved[15];
+};
+
+#define IDNAME_PARTITION 0x50415254 /* "PART" */
+
+#define RDB_ALLOCATION_LIMIT 16
+
+#endif /* AFFS_HARDBLOCKS_H */
diff --git a/linux/src/include/linux/atalk.h b/linux/src/include/linux/atalk.h
new file mode 100644
index 0000000..c1a5d64
--- /dev/null
+++ b/linux/src/include/linux/atalk.h
@@ -0,0 +1,157 @@
+/*
+ * Appletalk networking structures
+ *
+ * The following are directly referenced from the University Of Michigan
+ * netatalk for compatibility reasons.
+ */
+
+#ifndef __LINUX_ATALK_H__
+#define __LINUX_ATALK_H__
+
+#define SIOCATALKDIFADDR (SIOCPROTOPRIVATE + 0)
+
+#define ATPORT_FIRST 1
+#define ATPORT_RESERVED 128
+#define ATPORT_LAST 255
+#define ATADDR_ANYNET (__u16)0
+#define ATADDR_ANYNODE (__u8)0
+#define ATADDR_ANYPORT (__u8)0
+#define ATADDR_BCAST (__u8)255
+#define DDP_MAXSZ 587
+
+struct at_addr
+{
+ __u16 s_net;
+ __u8 s_node;
+};
+
+struct sockaddr_at
+{
+ short sat_family;
+ __u8 sat_port;
+ struct at_addr sat_addr;
+ char sat_zero[ 8 ];
+};
+
+struct netrange
+{
+ __u8 nr_phase;
+ __u16 nr_firstnet;
+ __u16 nr_lastnet;
+};
+
+struct atalk_route
+{
+ struct device *dev;
+ struct at_addr target;
+ struct at_addr gateway;
+ int flags;
+ struct atalk_route *next;
+};
+
+struct atalk_iface
+{
+ struct device *dev;
+ struct at_addr address; /* Our address */
+ int status; /* What are we doing ?? */
+#define ATIF_PROBE 1 /* Probing for an address */
+#define ATIF_PROBE_FAIL 2 /* Probe collided */
+ struct netrange nets; /* Associated direct netrange */
+ struct atalk_iface *next;
+};
+
+struct atalk_sock
+{
+ unsigned short dest_net;
+ unsigned short src_net;
+ unsigned char dest_node;
+ unsigned char src_node;
+ unsigned char dest_port;
+ unsigned char src_port;
+};
+
+#define DDP_MAXHOPS 15 /* 4 bits of hop counter */
+
+#ifdef __KERNEL__
+
+#include <asm/byteorder.h>
+
+struct ddpehdr
+{
+#ifdef __LITTLE_ENDIAN_BITFIELD
+ __u16 deh_len:10, deh_hops:4, deh_pad:2;
+#else
+ __u16 deh_pad:2, deh_hops:4, deh_len:10;
+#endif
+ __u16 deh_sum;
+ __u16 deh_dnet;
+ __u16 deh_snet;
+ __u8 deh_dnode;
+ __u8 deh_snode;
+ __u8 deh_dport;
+ __u8 deh_sport;
+ /* And netatalk apps expect to stick the type in themselves */
+};
+
+/*
+ * Unused (and currently unsupported)
+ */
+
+struct ddpshdr
+{
+#ifdef __LITTLE_ENDIAN_BITFIELD
+ __u16 dsh_len:10, dsh_pad:6;
+#else
+ __u16 dsh_pad:6, dsh_len:10;
+#endif
+ __u8 dsh_dport;
+ __u8 dsh_sport;
+ /* And netatalk apps expect to stick the type in themselves */
+};
+
+/* Appletalk AARP headers */
+
+struct elapaarp
+{
+ __u16 hw_type;
+#define AARP_HW_TYPE_ETHERNET 1
+#define AARP_HW_TYPE_TOKENRING 2
+ __u16 pa_type;
+ __u8 hw_len;
+ __u8 pa_len;
+#define AARP_PA_ALEN 4
+ __u16 function;
+#define AARP_REQUEST 1
+#define AARP_REPLY 2
+#define AARP_PROBE 3
+ __u8 hw_src[ETH_ALEN] __attribute__ ((packed));
+ __u8 pa_src_zero __attribute__ ((packed));
+ __u16 pa_src_net __attribute__ ((packed));
+ __u8 pa_src_node __attribute__ ((packed));
+ __u8 hw_dst[ETH_ALEN] __attribute__ ((packed));
+ __u8 pa_dst_zero __attribute__ ((packed));
+ __u16 pa_dst_net __attribute__ ((packed));
+ __u8 pa_dst_node __attribute__ ((packed));
+};
+
+typedef struct sock atalk_socket;
+
+#define AARP_EXPIRY_TIME (5*60*HZ) /* Not specified - how long till we drop a resolved entry */
+#define AARP_HASH_SIZE 16 /* Size of hash table */
+#define AARP_TICK_TIME (HZ/5) /* Fast retransmission timer when resolving */
+#define AARP_RETRANSMIT_LIMIT 10 /* Send 10 requests then give up (2 seconds) */
+#define AARP_RESOLVE_TIME (10*HZ) /* Some value bigger than total retransmit time + a bit for last reply to appear and to stop continual requests */
+
+extern struct datalink_proto *ddp_dl, *aarp_dl;
+extern void aarp_proto_init(void);
+/* Inter module exports */
+extern struct atalk_iface *atalk_find_dev(struct device *dev);
+extern struct at_addr *atalk_find_dev_addr(struct device *dev);
+extern int aarp_send_ddp(struct device *dev,struct sk_buff *skb, struct at_addr *sa, void *hwaddr);
+extern void aarp_send_probe(struct device *dev, struct at_addr *addr);
+#ifdef MODULE
+extern void aarp_cleanup_module(void);
+#endif
+
+#endif
+#endif
diff --git a/linux/src/include/linux/ax25.h b/linux/src/include/linux/ax25.h
new file mode 100644
index 0000000..f4bc5a0
--- /dev/null
+++ b/linux/src/include/linux/ax25.h
@@ -0,0 +1,96 @@
+/*
+ * These are the public elements of the Linux kernel AX.25 code. A similar
+ * file netrom.h exists for the NET/ROM protocol.
+ */
+
+#ifndef AX25_KERNEL_H
+#define AX25_KERNEL_H
+
+#define AX25_MTU 256
+#define AX25_MAX_DIGIS 6 /* This is wrong, should be 8 */
+
+#define AX25_WINDOW 1
+#define AX25_T1 2
+#define AX25_N2 3
+#define AX25_T3 4
+#define AX25_T2 5
+#define AX25_BACKOFF 6
+#define AX25_EXTSEQ 7
+#define AX25_PIDINCL 8
+#define AX25_IDLE 9
+#define AX25_PACLEN 10
+#define AX25_IAMDIGI 12
+
+#define AX25_KILL 99
+
+#define SIOCAX25GETUID (SIOCPROTOPRIVATE+0)
+#define SIOCAX25ADDUID (SIOCPROTOPRIVATE+1)
+#define SIOCAX25DELUID (SIOCPROTOPRIVATE+2)
+#define SIOCAX25NOUID (SIOCPROTOPRIVATE+3)
+#define SIOCAX25OPTRT (SIOCPROTOPRIVATE+7)
+#define SIOCAX25CTLCON (SIOCPROTOPRIVATE+8)
+#define SIOCAX25GETINFO (SIOCPROTOPRIVATE+9)
+#define SIOCAX25ADDFWD (SIOCPROTOPRIVATE+10)
+#define SIOCAX25DELFWD (SIOCPROTOPRIVATE+11)
+
+#define AX25_SET_RT_IPMODE 2
+
+#define AX25_NOUID_DEFAULT 0
+#define AX25_NOUID_BLOCK 1
+
+typedef struct {
+ char ax25_call[7]; /* 6 call + SSID (shifted ascii!) */
+} ax25_address;
+
+struct sockaddr_ax25 {
+ unsigned short sax25_family;
+ ax25_address sax25_call;
+ int sax25_ndigis;
+ /* Digipeater ax25_address sets follow */
+};
+
+#define sax25_uid sax25_ndigis
+
+struct full_sockaddr_ax25 {
+ struct sockaddr_ax25 fsa_ax25;
+ ax25_address fsa_digipeater[AX25_MAX_DIGIS];
+};
+
+struct ax25_routes_struct {
+ ax25_address port_addr;
+ ax25_address dest_addr;
+ unsigned char digi_count;
+ ax25_address digi_addr[AX25_MAX_DIGIS];
+};
+
+struct ax25_route_opt_struct {
+ ax25_address port_addr;
+ ax25_address dest_addr;
+ int cmd;
+ int arg;
+};
+
+struct ax25_ctl_struct {
+ ax25_address port_addr;
+ ax25_address source_addr;
+ ax25_address dest_addr;
+ unsigned int cmd;
+ unsigned long arg;
+};
+
+struct ax25_info_struct {
+ unsigned int n2, n2count;
+ unsigned int t1, t1timer;
+ unsigned int t2, t2timer;
+ unsigned int t3, t3timer;
+ unsigned int idle, idletimer;
+ unsigned int state;
+ unsigned int rcv_q, snd_q;
+};
+
+struct ax25_fwd_struct {
+ ax25_address port_from;
+ ax25_address port_to;
+};
+
+#endif
diff --git a/linux/src/include/linux/binfmts.h b/linux/src/include/linux/binfmts.h
new file mode 100644
index 0000000..ae7167e
--- /dev/null
+++ b/linux/src/include/linux/binfmts.h
@@ -0,0 +1,65 @@
+#ifndef _LINUX_BINFMTS_H
+#define _LINUX_BINFMTS_H
+
+#include <linux/ptrace.h>
+
+/*
+ * MAX_ARG_PAGES defines the number of pages allocated for arguments
+ * and envelope for the new program. 32 should suffice, this gives
+ * a maximum env+arg of 128kB w/4KB pages!
+ */
+#define MAX_ARG_PAGES 32
+
+/*
+ * This structure is used to hold the arguments that are used when loading binaries.
+ */
+struct linux_binprm{
+ char buf[128];
+ unsigned long page[MAX_ARG_PAGES];
+ unsigned long p;
+ int sh_bang;
+ struct inode * inode;
+ int e_uid, e_gid;
+ int argc, envc;
+ char * filename; /* Name of binary */
+ unsigned long loader, exec;
+ int dont_iput; /* binfmt handler has put inode */
+};
+
+/*
+ * This structure defines the functions that are used to load the binary formats that
+ * linux accepts.
+ */
+struct linux_binfmt {
+ struct linux_binfmt * next;
+ long *use_count;
+ int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
+ int (*load_shlib)(int fd);
+ int (*core_dump)(long signr, struct pt_regs * regs);
+};
+
+extern int register_binfmt(struct linux_binfmt *);
+extern int unregister_binfmt(struct linux_binfmt *);
+
+extern int read_exec(struct inode *inode, unsigned long offset,
+ char * addr, unsigned long count, int to_kmem);
+
+extern int open_inode(struct inode * inode, int mode);
+
+extern int init_elf_binfmt(void);
+extern int init_aout_binfmt(void);
+extern int init_script_binfmt(void);
+extern int init_java_binfmt(void);
+
+extern int prepare_binprm(struct linux_binprm *);
+extern void remove_arg_zero(struct linux_binprm *);
+extern int search_binary_handler(struct linux_binprm *,struct pt_regs *);
+extern int flush_old_exec(struct linux_binprm * bprm);
+extern unsigned long setup_arg_pages(unsigned long p, struct linux_binprm * bprm);
+extern unsigned long copy_strings(int argc,char ** argv,unsigned long *page,
+ unsigned long p, int from_kmem);
+
+/* this eventually goes away */
+#define change_ldt(a,b) setup_arg_pages(a,b)
+
+#endif
diff --git a/linux/src/include/linux/bios32.h b/linux/src/include/linux/bios32.h
new file mode 100644
index 0000000..7944a53
--- /dev/null
+++ b/linux/src/include/linux/bios32.h
@@ -0,0 +1,61 @@
+/*
+ * BIOS32, PCI BIOS functions and defines
+ * Copyright 1994, Drew Eckhardt
+ *
+ * For more information, please consult
+ *
+ * PCI BIOS Specification Revision
+ * PCI Local Bus Specification
+ * PCI System Design Guide
+ *
+ * PCI Special Interest Group
+ * P.O. Box 14070
+ * Portland, OR 97214
+ * U. S. A.
+ * Phone: 800-433-5177 / +1-503-797-4207
+ * Fax: +1-503-234-6762
+ *
+ * Manuals are $25 each or $50 for all three, plus $7 shipping
+ * within the United States, $35 abroad.
+ */
+
+#ifndef BIOS32_H
+#define BIOS32_H
+
+/*
+ * Error values that may be returned by the PCI bios. Use
+ * pcibios_strerror() to convert to a printable string.
+ */
+#define PCIBIOS_SUCCESSFUL 0x00
+#define PCIBIOS_FUNC_NOT_SUPPORTED 0x81
+#define PCIBIOS_BAD_VENDOR_ID 0x83
+#define PCIBIOS_DEVICE_NOT_FOUND 0x86
+#define PCIBIOS_BAD_REGISTER_NUMBER 0x87
+#define PCIBIOS_SET_FAILED 0x88
+#define PCIBIOS_BUFFER_TOO_SMALL 0x89
+
+extern int pcibios_present (void);
+extern unsigned long pcibios_init (unsigned long memory_start,
+ unsigned long memory_end);
+extern unsigned long pcibios_fixup (unsigned long memory_start,
+ unsigned long memory_end);
+extern int pcibios_find_class (unsigned int class_code, unsigned short index,
+ unsigned char *bus, unsigned char *dev_fn);
+extern int pcibios_find_device (unsigned short vendor, unsigned short dev_id,
+ unsigned short index, unsigned char *bus,
+ unsigned char *dev_fn);
+extern int pcibios_read_config_byte (unsigned char bus, unsigned char dev_fn,
+ unsigned char where, unsigned char *val);
+extern int pcibios_read_config_word (unsigned char bus, unsigned char dev_fn,
+ unsigned char where, unsigned short *val);
+extern int pcibios_read_config_dword (unsigned char bus, unsigned char dev_fn,
+ unsigned char where, unsigned int *val);
+extern int pcibios_write_config_byte (unsigned char bus, unsigned char dev_fn,
+ unsigned char where, unsigned char val);
+extern int pcibios_write_config_word (unsigned char bus, unsigned char dev_fn,
+ unsigned char where, unsigned short val);
+extern int pcibios_write_config_dword (unsigned char bus, unsigned char dev_fn,
+ unsigned char where, unsigned int val);
+extern const char *pcibios_strerror (int error);
+
+#endif /* BIOS32_H */
diff --git a/linux/src/include/linux/blk.h b/linux/src/include/linux/blk.h
new file mode 100644
index 0000000..92634d7
--- /dev/null
+++ b/linux/src/include/linux/blk.h
@@ -0,0 +1,454 @@
+#ifndef _BLK_H
+#define _BLK_H
+
+#include <linux/blkdev.h>
+#include <linux/locks.h>
+#include <linux/malloc.h>
+#include <linux/config.h>
+#include <linux/md.h>
+
+/*
+ * NR_REQUEST is the number of entries in the request-queue.
+ * NOTE that writes may use only the low 2/3 of these: reads
+ * take precedence.
+ */
+#define NR_REQUEST 64
+
+/*
+ * This is used in the elevator algorithm. We don't prioritise reads
+ * over writes any more --- although reads are more time-critical than
+ * writes, by treating them equally we increase filesystem throughput.
+ * This turns out to give better overall performance. -- sct
+ */
+#define IN_ORDER(s1,s2) \
+((s1)->rq_dev < (s2)->rq_dev || (((s1)->rq_dev == (s2)->rq_dev && \
+(s1)->sector < (s2)->sector)))
+
+/*
+ * These will have to be changed to be aware of different buffer
+ * sizes etc.. It actually needs a major cleanup.
+ */
+#if defined(IDE_DRIVER) || defined(MD_DRIVER)
+#define SECTOR_MASK ((BLOCK_SIZE >> 9) - 1)
+#else
+#define SECTOR_MASK (blksize_size[MAJOR_NR] && \
+ blksize_size[MAJOR_NR][MINOR(CURRENT->rq_dev)] ? \
+ ((blksize_size[MAJOR_NR][MINOR(CURRENT->rq_dev)] >> 9) - 1) : \
+ ((BLOCK_SIZE >> 9) - 1))
+#endif /* IDE_DRIVER */
+
+#define SUBSECTOR(block) (CURRENT->current_nr_sectors > 0)
+
+#ifdef CONFIG_CDU31A
+extern int cdu31a_init(void);
+#endif CONFIG_CDU31A
+#ifdef CONFIG_MCD
+extern int mcd_init(void);
+#endif CONFIG_MCD
+#ifdef CONFIG_MCDX
+extern int mcdx_init(void);
+#endif CONFIG_MCDX
+#ifdef CONFIG_SBPCD
+extern int sbpcd_init(void);
+#endif CONFIG_SBPCD
+#ifdef CONFIG_AZTCD
+extern int aztcd_init(void);
+#endif CONFIG_AZTCD
+#ifdef CONFIG_CDU535
+extern int sony535_init(void);
+#endif CONFIG_CDU535
+#ifdef CONFIG_GSCD
+extern int gscd_init(void);
+#endif CONFIG_GSCD
+#ifdef CONFIG_CM206
+extern int cm206_init(void);
+#endif CONFIG_CM206
+#ifdef CONFIG_OPTCD
+extern int optcd_init(void);
+#endif CONFIG_OPTCD
+#ifdef CONFIG_SJCD
+extern int sjcd_init(void);
+#endif CONFIG_SJCD
+#ifdef CONFIG_CDI_INIT
+extern int cdi_init(void);
+#endif CONFIG_CDI_INIT
+#ifdef CONFIG_BLK_DEV_HD
+extern int hd_init(void);
+#endif
+#ifdef CONFIG_BLK_DEV_IDE
+extern int ide_init(void);
+#endif
+#ifdef CONFIG_BLK_DEV_XD
+extern int xd_init(void);
+#endif
+#ifdef CONFIG_BLK_DEV_LOOP
+extern int loop_init(void);
+#endif
+#ifdef CONFIG_BLK_DEV_MD
+extern int md_init(void);
+#endif CONFIG_BLK_DEV_MD
+
+extern void set_device_ro(kdev_t dev,int flag);
+void add_blkdev_randomness(int major);
+
+extern int floppy_init(void);
+extern void rd_load(void);
+extern int rd_init(void);
+extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */
+extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */
+extern int rd_image_start; /* starting block # of image */
+
+#ifdef CONFIG_BLK_DEV_INITRD
+
+#define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */
+
+extern unsigned long initrd_start,initrd_end;
+extern int mount_initrd; /* zero if initrd should not be mounted */
+void initrd_init(void);
+
+#endif
+
+#define RO_IOCTLS(dev,where) \
+ case BLKROSET: { int __err; if (!suser()) return -EACCES; \
+ __err = verify_area(VERIFY_READ, (void *) (where), sizeof(long)); \
+ if (!__err) set_device_ro((dev),get_fs_long((long *) (where))); return __err; } \
+ case BLKROGET: { int __err = verify_area(VERIFY_WRITE, (void *) (where), sizeof(long)); \
+ if (!__err) put_fs_long(0!=is_read_only(dev),(long *) (where)); return __err; }
+
+#if defined(MAJOR_NR) || defined(IDE_DRIVER)
+
+/*
+ * Add entries as needed.
+ */
+
+#ifdef IDE_DRIVER
+
+#define DEVICE_NR(device) (MINOR(device) >> PARTN_BITS)
+#define DEVICE_ON(device) /* nothing */
+#define DEVICE_OFF(device) /* nothing */
+
+#elif (MAJOR_NR == RAMDISK_MAJOR)
+
+/* ram disk */
+#define DEVICE_NAME "ramdisk"
+#define DEVICE_REQUEST rd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+#define DEVICE_NO_RANDOM
+
+#elif (MAJOR_NR == FLOPPY_MAJOR)
+
+static void floppy_off(unsigned int nr);
+
+#define DEVICE_NAME "floppy"
+#define DEVICE_INTR do_floppy
+#define DEVICE_REQUEST do_fd_request
+#define DEVICE_NR(device) ( (MINOR(device) & 3) | ((MINOR(device) & 0x80 ) >> 5 ))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device) floppy_off(DEVICE_NR(device))
+
+#elif (MAJOR_NR == HD_MAJOR)
+
+/* harddisk: timeout is 6 seconds.. */
+#define DEVICE_NAME "harddisk"
+#define DEVICE_INTR do_hd
+#define DEVICE_TIMEOUT HD_TIMER
+#define TIMEOUT_VALUE (6*HZ)
+#define DEVICE_REQUEST do_hd_request
+#define DEVICE_NR(device) (MINOR(device)>>6)
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == SCSI_DISK_MAJOR)
+
+#define DEVICE_NAME "scsidisk"
+#define DEVICE_INTR do_sd
+#define TIMEOUT_VALUE (2*HZ)
+#define DEVICE_REQUEST do_sd_request
+#define DEVICE_NR(device) (MINOR(device) >> 4)
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+/* Kludge to use the same number for both char and block major numbers */
+#elif (MAJOR_NR == MD_MAJOR) && defined(MD_DRIVER)
+
+#define DEVICE_NAME "Multiple devices driver"
+#define DEVICE_REQUEST do_md_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == SCSI_TAPE_MAJOR)
+
+#define DEVICE_NAME "scsitape"
+#define DEVICE_INTR do_st
+#define DEVICE_NR(device) (MINOR(device) & 0x7f)
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == SCSI_CDROM_MAJOR)
+
+#define DEVICE_NAME "CD-ROM"
+#define DEVICE_INTR do_sr
+#define DEVICE_REQUEST do_sr_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == XT_DISK_MAJOR)
+
+#define DEVICE_NAME "xt disk"
+#define DEVICE_REQUEST do_xd_request
+#define DEVICE_NR(device) (MINOR(device) >> 6)
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == CDU31A_CDROM_MAJOR)
+
+#define DEVICE_NAME "CDU31A"
+#define DEVICE_REQUEST do_cdu31a_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MITSUMI_CDROM_MAJOR)
+
+#define DEVICE_NAME "Mitsumi CD-ROM"
+/* #define DEVICE_INTR do_mcd */
+#define DEVICE_REQUEST do_mcd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MITSUMI_X_CDROM_MAJOR)
+
+#define DEVICE_NAME "Mitsumi CD-ROM"
+/* #define DEVICE_INTR do_mcdx */
+#define DEVICE_REQUEST do_mcdx_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MATSUSHITA_CDROM_MAJOR)
+
+#define DEVICE_NAME "Matsushita CD-ROM controller #1"
+#define DEVICE_REQUEST do_sbpcd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MATSUSHITA_CDROM2_MAJOR)
+
+#define DEVICE_NAME "Matsushita CD-ROM controller #2"
+#define DEVICE_REQUEST do_sbpcd2_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MATSUSHITA_CDROM3_MAJOR)
+
+#define DEVICE_NAME "Matsushita CD-ROM controller #3"
+#define DEVICE_REQUEST do_sbpcd3_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MATSUSHITA_CDROM4_MAJOR)
+
+#define DEVICE_NAME "Matsushita CD-ROM controller #4"
+#define DEVICE_REQUEST do_sbpcd4_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == AZTECH_CDROM_MAJOR)
+
+#define DEVICE_NAME "Aztech CD-ROM"
+#define DEVICE_REQUEST do_aztcd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == CDU535_CDROM_MAJOR)
+
+#define DEVICE_NAME "SONY-CDU535"
+#define DEVICE_INTR do_cdu535
+#define DEVICE_REQUEST do_cdu535_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == GOLDSTAR_CDROM_MAJOR)
+
+#define DEVICE_NAME "Goldstar R420"
+#define DEVICE_REQUEST do_gscd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == CM206_CDROM_MAJOR)
+#define DEVICE_NAME "Philips/LMS cd-rom cm206"
+#define DEVICE_REQUEST do_cm206_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == OPTICS_CDROM_MAJOR)
+
+#define DEVICE_NAME "DOLPHIN 8000AT CD-ROM"
+#define DEVICE_REQUEST do_optcd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == SANYO_CDROM_MAJOR)
+
+#define DEVICE_NAME "Sanyo H94A CD-ROM"
+#define DEVICE_REQUEST do_sjcd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#endif /* MAJOR_NR == whatever */
+
+#if (MAJOR_NR != SCSI_TAPE_MAJOR)
+#if !defined(IDE_DRIVER)
+
+#ifndef CURRENT
+#define CURRENT (blk_dev[MAJOR_NR].current_request)
+#endif
+
+#define CURRENT_DEV DEVICE_NR(CURRENT->rq_dev)
+
+#ifdef DEVICE_INTR
+static void (*DEVICE_INTR)(void) = NULL;
+#endif
+#ifdef DEVICE_TIMEOUT
+
+#define SET_TIMER \
+((timer_table[DEVICE_TIMEOUT].expires = jiffies + TIMEOUT_VALUE), \
+(timer_active |= 1<<DEVICE_TIMEOUT))
+
+#define CLEAR_TIMER \
+timer_active &= ~(1<<DEVICE_TIMEOUT)
+
+#define SET_INTR(x) \
+if ((DEVICE_INTR = (x)) != NULL) \
+ SET_TIMER; \
+else \
+ CLEAR_TIMER;
+
+#else
+
+#define SET_INTR(x) (DEVICE_INTR = (x))
+
+#endif /* DEVICE_TIMEOUT */
+
+static void (DEVICE_REQUEST)(void);
+
+#ifdef DEVICE_INTR
+#define CLEAR_INTR SET_INTR(NULL)
+#else
+#define CLEAR_INTR
+#endif
+
+#define INIT_REQUEST \
+ if (!CURRENT) {\
+ CLEAR_INTR; \
+ return; \
+ } \
+ if (MAJOR(CURRENT->rq_dev) != MAJOR_NR) \
+ panic(DEVICE_NAME ": request list destroyed"); \
+ if (CURRENT->bh) { \
+ if (!buffer_locked(CURRENT->bh)) \
+ panic(DEVICE_NAME ": block not locked"); \
+ }
+
+#endif /* !defined(IDE_DRIVER) */
+
+/* end_request() - SCSI devices have their own version */
+/* - IDE drivers have their own copy too */
+
+#if ! SCSI_BLK_MAJOR(MAJOR_NR)
+
+#if defined(IDE_DRIVER) && !defined(_IDE_C) /* shared copy for IDE modules */
+void ide_end_request(byte uptodate, ide_hwgroup_t *hwgroup);
+#else
+
+#ifdef IDE_DRIVER
+void ide_end_request(byte uptodate, ide_hwgroup_t *hwgroup) {
+ struct request *req = hwgroup->rq;
+#else
+static void end_request(int uptodate) {
+ struct request *req = CURRENT;
+#endif /* IDE_DRIVER */
+ struct buffer_head * bh;
+ int nsect;
+
+ req->errors = 0;
+ if (!uptodate) {
+ printk("end_request: I/O error, dev %s, sector %lu\n",
+ kdevname(req->rq_dev), req->sector);
+ if ((bh = req->bh) != NULL) {
+ nsect = bh->b_size >> 9;
+ req->nr_sectors--;
+ req->nr_sectors &= ~(nsect - 1);
+ req->sector += nsect;
+ req->sector &= ~(nsect - 1);
+ }
+ }
+
+ if ((bh = req->bh) != NULL) {
+ req->bh = bh->b_reqnext;
+ bh->b_reqnext = NULL;
+
+ /*
+ * This is our 'MD IO has finished' event handler.
+ * note that b_state should be cached in a register
+ * anyways, so the overhead if this checking is almost
+ * zero. But anyways .. we never get OO for free :)
+ */
+ if (test_bit(BH_MD, &bh->b_state)) {
+ struct md_personality * pers=(struct md_personality *)bh->personality;
+ pers->end_request(bh,uptodate);
+ }
+ /*
+ * the normal (nonmirrored and no RAID5) case:
+ */
+ else {
+ mark_buffer_uptodate(bh, uptodate);
+ unlock_buffer(bh);
+ }
+ if ((bh = req->bh) != NULL) {
+ req->current_nr_sectors = bh->b_size >> 9;
+ if (req->nr_sectors < req->current_nr_sectors) {
+ req->nr_sectors = req->current_nr_sectors;
+ printk("end_request: buffer-list destroyed\n");
+ }
+ req->buffer = bh->b_data;
+ return;
+ }
+ }
+#ifndef DEVICE_NO_RANDOM
+ add_blkdev_randomness(MAJOR(req->rq_dev));
+#endif
+#ifdef IDE_DRIVER
+ blk_dev[MAJOR(req->rq_dev)].current_request = req->next;
+ hwgroup->rq = NULL;
+#else
+ DEVICE_OFF(req->rq_dev);
+ CURRENT = req->next;
+#endif /* IDE_DRIVER */
+ if (req->sem != NULL)
+ up(req->sem);
+ req->rq_status = RQ_INACTIVE;
+ wake_up(&wait_for_request);
+}
+#endif /* defined(IDE_DRIVER) && !defined(_IDE_C) */
+#endif /* ! SCSI_BLK_MAJOR(MAJOR_NR) */
+#endif /* (MAJOR_NR != SCSI_TAPE_MAJOR) */
+
+#endif /* defined(MAJOR_NR) || defined(IDE_DRIVER) */
+
+#endif /* _BLK_H */
diff --git a/linux/src/include/linux/blkdev.h b/linux/src/include/linux/blkdev.h
new file mode 100644
index 0000000..5bfc84e
--- /dev/null
+++ b/linux/src/include/linux/blkdev.h
@@ -0,0 +1,66 @@
+#ifndef _LINUX_BLKDEV_H
+#define _LINUX_BLKDEV_H
+
+#include <linux/major.h>
+#include <linux/sched.h>
+#include <linux/genhd.h>
+#include <linux/tqueue.h>
+
+/*
+ * Ok, this is an expanded form so that we can use the same
+ * request for paging requests when that is implemented. In
+ * paging, 'bh' is NULL, and the semaphore is used to wait
+ * for read/write completion.
+ */
+struct request {
+ volatile int rq_status; /* should split this into a few status bits */
+#define RQ_INACTIVE (-1)
+#define RQ_ACTIVE 1
+#define RQ_SCSI_BUSY 0xffff
+#define RQ_SCSI_DONE 0xfffe
+#define RQ_SCSI_DISCONNECTING 0xffe0
+
+ kdev_t rq_dev;
+ int cmd; /* READ or WRITE */
+ int errors;
+ unsigned long sector;
+ unsigned long nr_sectors;
+ unsigned long current_nr_sectors;
+ char * buffer;
+ struct semaphore * sem;
+ struct buffer_head * bh;
+ struct buffer_head * bhtail;
+ struct request * next;
+};
+
+struct blk_dev_struct {
+ void (*request_fn)(void);
+ struct request * current_request;
+ struct request plug;
+ struct tq_struct plug_tq;
+};
+
+struct sec_size {
+ unsigned block_size;
+ unsigned block_size_bits;
+};
+
+extern struct sec_size * blk_sec[MAX_BLKDEV];
+extern struct blk_dev_struct blk_dev[MAX_BLKDEV];
+extern struct wait_queue * wait_for_request;
+extern void resetup_one_dev(struct gendisk *dev, int drive);
+extern void unplug_device(void * data);
+extern void make_request(int major,int rw, struct buffer_head * bh);
+
+/* md needs this function to remap requests */
+extern int md_map (int minor, kdev_t *rdev, unsigned long *rsector, unsigned long size);
+extern int md_make_request (int minor, int rw, struct buffer_head * bh);
+extern int md_error (kdev_t mddev, kdev_t rdev);
+
+extern int * blk_size[MAX_BLKDEV];
+
+extern int * blksize_size[MAX_BLKDEV];
+
+extern int * hardsect_size[MAX_BLKDEV];
+
+#endif
diff --git a/linux/src/include/linux/cdrom.h b/linux/src/include/linux/cdrom.h
new file mode 100644
index 0000000..022d6e1
--- /dev/null
+++ b/linux/src/include/linux/cdrom.h
@@ -0,0 +1,453 @@
+/*
+ * -- <linux/cdrom.h>
+ * general (not only SCSI) header library for linux CDROM drivers
+ * (C) 1992 David Giller rafetmad@oxy.edu
+ * 1994, 1995 Eberhard Moenkeberg emoenke@gwdg.de
+ *
+ */
+
+#ifndef _LINUX_CDROM_H
+#define _LINUX_CDROM_H
+
+/*
+ * some fix numbers
+ */
+#define CD_MINS 74 /* max. minutes per CD, not really a limit */
+#define CD_SECS 60 /* seconds per minute */
+#define CD_FRAMES 75 /* frames per second */
+
+#define CD_SYNC_SIZE 12 /* 12 sync bytes per raw data frame, not transfered by the drive */
+#define CD_HEAD_SIZE 4 /* header (address) bytes per raw data frame */
+#define CD_SUBHEAD_SIZE 8 /* subheader bytes per raw XA data frame */
+#define CD_XA_HEAD (CD_HEAD_SIZE+CD_SUBHEAD_SIZE) /* "before data" part of raw XA frame */
+#define CD_XA_SYNC_HEAD (CD_SYNC_SIZE+CD_XA_HEAD)/* sync bytes + header of XA frame */
+
+#define CD_FRAMESIZE 2048 /* bytes per frame, "cooked" mode */
+#define CD_FRAMESIZE_RAW 2352 /* bytes per frame, "raw" mode */
+/* most drives don't deliver everything: */
+#define CD_FRAMESIZE_RAW1 (CD_FRAMESIZE_RAW-CD_SYNC_SIZE) /* 2340 */
+#define CD_FRAMESIZE_RAW0 (CD_FRAMESIZE_RAW-CD_SYNC_SIZE-CD_HEAD_SIZE) /* 2336 */
+/* Optics drive also has a 'read all' mode: */
+#define CD_FRAMESIZE_RAWER 2646 /* bytes per frame */
+
+#define CD_EDC_SIZE 4 /* bytes EDC per most raw data frame types */
+#define CD_ZERO_SIZE 8 /* bytes zero per yellow book mode 1 frame */
+#define CD_ECC_SIZE 276 /* bytes ECC per most raw data frame types */
+#define CD_XA_TAIL (CD_EDC_SIZE+CD_ECC_SIZE) /* "after data" part of raw XA frame */
+
+#define CD_FRAMESIZE_SUB 96 /* subchannel data "frame" size */
+#define CD_MSF_OFFSET 150 /* MSF numbering offset of first frame */
+
+#define CD_CHUNK_SIZE 24 /* lowest-level "data bytes piece" */
+#define CD_NUM_OF_CHUNKS 98 /* chunks per frame */
+
+#define CD_FRAMESIZE_XA CD_FRAMESIZE_RAW1 /* obsolete name */
+#define CD_BLOCK_OFFSET CD_MSF_OFFSET /* obsolete name */
+
+/*
+ * the raw frame layout:
+ *
+ * - audio (red): | audio_sample_bytes |
+ * | 2352 |
+ *
+ * - data (yellow, mode1): | sync - head - data - EDC - zero - ECC |
+ * | 12 - 4 - 2048 - 4 - 8 - 276 |
+ *
+ * - data (yellow, mode2): | sync - head - data |
+ * | 12 - 4 - 2336 |
+ *
+ * - XA data (green, mode2 form1): | sync - head - sub - data - EDC - ECC |
+ * | 12 - 4 - 8 - 2048 - 4 - 276 |
+ *
+ * - XA data (green, mode2 form2): | sync - head - sub - data - EDC |
+ * | 12 - 4 - 8 - 2324 - 4 |
+ */
+
+/*
+ * CDROM IOCTL structures
+ */
+
+struct cdrom_blk
+{
+ unsigned from;
+ unsigned short len;
+};
+
+
+struct cdrom_msf
+{
+ u_char cdmsf_min0; /* start minute */
+ u_char cdmsf_sec0; /* start second */
+ u_char cdmsf_frame0; /* start frame */
+ u_char cdmsf_min1; /* end minute */
+ u_char cdmsf_sec1; /* end second */
+ u_char cdmsf_frame1; /* end frame */
+};
+
+struct cdrom_ti
+{
+ u_char cdti_trk0; /* start track */
+ u_char cdti_ind0; /* start index */
+ u_char cdti_trk1; /* end track */
+ u_char cdti_ind1; /* end index */
+};
+
+struct cdrom_tochdr
+{
+ u_char cdth_trk0; /* start track */
+ u_char cdth_trk1; /* end track */
+};
+
+struct cdrom_msf0 /* address in MSF format */
+{
+ u_char minute;
+ u_char second;
+ u_char frame;
+};
+
+union cdrom_addr /* address in either MSF or logical format */
+{
+ struct cdrom_msf0 msf;
+ int lba;
+};
+
+struct cdrom_tocentry
+{
+ u_char cdte_track;
+ u_char cdte_adr :4;
+ u_char cdte_ctrl :4;
+ u_char cdte_format;
+ union cdrom_addr cdte_addr;
+ u_char cdte_datamode;
+};
+
+/*
+ * CD-ROM address types (cdrom_tocentry.cdte_format)
+ */
+#define CDROM_LBA 0x01 /* "logical block": first frame is #0 */
+#define CDROM_MSF 0x02 /* "minute-second-frame": binary, not bcd here! */
+
+/*
+ * bit to tell whether track is data or audio (cdrom_tocentry.cdte_ctrl)
+ */
+#define CDROM_DATA_TRACK 0x04
+
+/*
+ * The leadout track is always 0xAA, regardless of # of tracks on disc
+ */
+#define CDROM_LEADOUT 0xAA
+
+struct cdrom_subchnl
+{
+ u_char cdsc_format;
+ u_char cdsc_audiostatus;
+ u_char cdsc_adr: 4;
+ u_char cdsc_ctrl: 4;
+ u_char cdsc_trk;
+ u_char cdsc_ind;
+ union cdrom_addr cdsc_absaddr;
+ union cdrom_addr cdsc_reladdr;
+};
+
+struct cdrom_mcn {
+ u_char medium_catalog_number[14]; /* 13 ASCII digits, null-terminated */
+};
+
+/*
+ * audio states (from SCSI-2, but seen with other drives, too)
+ */
+#define CDROM_AUDIO_INVALID 0x00 /* audio status not supported */
+#define CDROM_AUDIO_PLAY 0x11 /* audio play operation in progress */
+#define CDROM_AUDIO_PAUSED 0x12 /* audio play operation paused */
+#define CDROM_AUDIO_COMPLETED 0x13 /* audio play successfully completed */
+#define CDROM_AUDIO_ERROR 0x14 /* audio play stopped due to error */
+#define CDROM_AUDIO_NO_STATUS 0x15 /* no current audio status to return */
+
+struct cdrom_volctrl
+{
+ u_char channel0;
+ u_char channel1;
+ u_char channel2;
+ u_char channel3;
+};
+
+struct cdrom_read
+{
+ int cdread_lba;
+ caddr_t cdread_bufaddr;
+ int cdread_buflen;
+};
+
+/*
+ * extensions for transferring audio frames
+ * currently used by sbpcd.c, cdu31a.c, ide-cd.c
+ */
+struct cdrom_read_audio
+{
+ union cdrom_addr addr; /* frame address */
+ u_char addr_format; /* CDROM_LBA or CDROM_MSF */
+ int nframes; /* number of 2352-byte-frames to read at once, limited by the drivers */
+ u_char *buf; /* frame buffer (size: nframes*2352 bytes) */
+};
+
+/*
+ * this has to be the "arg" of the CDROMMULTISESSION ioctl
+ * for obtaining multi session info.
+ * The returned "addr" is valid only if "xa_flag" is true.
+ */
+struct cdrom_multisession
+{
+ union cdrom_addr addr; /* frame address: start-of-last-session (not the new "frame 16"!)*/
+ u_char xa_flag; /* 1: "is XA disk" */
+ u_char addr_format; /* CDROM_LBA or CDROM_MSF */
+};
+
+#ifdef FIVETWELVE
+#define CDROM_MODE1_SIZE 512
+#else
+#define CDROM_MODE1_SIZE 2048
+#endif /* FIVETWELVE */
+#define CDROM_MODE2_SIZE 2336
+
+/*
+ * CD-ROM IOCTL commands
+ * For IOCTL calls, we will commandeer byte 0x53, or 'S'.
+ */
+
+#define CDROMPAUSE 0x5301
+#define CDROMRESUME 0x5302
+#define CDROMPLAYMSF 0x5303 /* (struct cdrom_msf) */
+#define CDROMPLAYTRKIND 0x5304 /* (struct cdrom_ti) */
+
+#define CDROMREADTOCHDR 0x5305 /* (struct cdrom_tochdr) */
+#define CDROMREADTOCENTRY 0x5306 /* (struct cdrom_tocentry) */
+
+#define CDROMSTOP 0x5307 /* stop the drive motor */
+#define CDROMSTART 0x5308 /* turn the motor on */
+
+#define CDROMEJECT 0x5309 /* eject CD-ROM media */
+
+#define CDROMVOLCTRL 0x530a /* (struct cdrom_volctrl) */
+
+#define CDROMSUBCHNL 0x530b /* (struct cdrom_subchnl) */
+
+#define CDROMREADMODE2 0x530c /* (struct cdrom_read) */
+ /* read type-2 data */
+
+#define CDROMREADMODE1 0x530d /* (struct cdrom_read) */
+ /* read type-1 data */
+
+#define CDROMREADAUDIO 0x530e /* (struct cdrom_read_audio) */
+
+/*
+ * enable (1) / disable (0) auto-ejecting
+ */
+#define CDROMEJECT_SW 0x530f /* arg: 0 or 1 */
+
+/*
+ * obtain the start-of-last-session address of multi session disks
+ */
+#define CDROMMULTISESSION 0x5310 /* (struct cdrom_multisession) */
+
+/*
+ * obtain the "universal product code" number
+ * (only some data disks have it coded)
+ */
+#define CDROM_GET_UPC 0x5311 /* 8 bytes returned */
+
+#define CDROMRESET 0x5312 /* hard-reset the drive */
+#define CDROMVOLREAD 0x5313 /* let the drive tell its volume setting */
+ /* (struct cdrom_volctrl) */
+
+/*
+ * these ioctls are used in aztcd.c and optcd.c
+ */
+#define CDROMREADRAW 0x5314 /* read data in raw mode */
+#define CDROMREADCOOKED 0x5315 /* read data in cooked mode */
+#define CDROMSEEK 0x5316 /* seek msf address */
+
+/*
+ * for playing audio in logical block addressing mode
+ */
+#define CDROMPLAYBLK 0x5317 /* (struct cdrom_blk) */
+
+/*
+ * these ioctls are used in optcd.c
+ */
+#define CDROMREADALL 0x5318 /* read all 2646 bytes */
+#define CDROMCLOSETRAY 0x5319 /* pendant of CDROMEJECT */
+
+
+/*
+ * For controlling a changer. (Used by ATAPI driver.)
+ * This ioctl is depreciated in favor of CDROM_SELECT_DISC from
+ * ucdrom.h. It will probably be deleted during the 2.1 kernel series.
+ */
+#define CDROMLOADFROMSLOT 0x531a /* LOAD disk from slot*/
+
+
+/*
+ * CD-ROM-specific SCSI command opcodes
+ */
+
+/*
+ * Group 2 (10-byte). All of these are called 'optional' by SCSI-II.
+ */
+#define SCMD_READ_TOC 0x43 /* read table of contents */
+#define SCMD_PLAYAUDIO_MSF 0x47 /* play data at time offset */
+#define SCMD_PLAYAUDIO_TI 0x48 /* play data at track/index */
+#define SCMD_PAUSE_RESUME 0x4B /* pause/resume audio */
+#define SCMD_READ_SUBCHANNEL 0x42 /* read SC info on playing disc */
+#define SCMD_PLAYAUDIO10 0x45 /* play data at logical block */
+#define SCMD_READ_HEADER 0x44 /* read TOC header */
+
+/*
+ * Group 5
+ */
+#define SCMD_PLAYAUDIO12 0xA5 /* play data at logical block */
+#define SCMD_PLAYTRACK_REL12 0xA9 /* play track at relative offset */
+
+/*
+ * Group 6 Commands
+ */
+#define SCMD_CD_PLAYBACK_CONTROL 0xC9 /* Sony vendor-specific audio */
+#define SCMD_CD_PLAYBACK_STATUS 0xC4 /* control opcodes */
+
+/*
+ * CD-ROM capacity structure.
+ */
+struct scsi_capacity
+{
+ u_long capacity;
+ u_long lbasize;
+};
+
+/*
+ * CD-ROM MODE_SENSE/MODE_SELECT parameters
+ */
+#define ERR_RECOVERY_PARMS 0x01
+#define DISCO_RECO_PARMS 0x02
+#define FORMAT_PARMS 0x03
+#define GEOMETRY_PARMS 0x04
+#define CERTIFICATION_PARMS 0x06
+#define CACHE_PARMS 0x38
+
+/*
+ * standard mode-select header prepended to all mode-select commands
+ */
+struct ccs_modesel_head
+{
+ u_char _r1; /* reserved */
+ u_char medium; /* device-specific medium type */
+ u_char _r2; /* reserved */
+ u_char block_desc_length; /* block descriptor length */
+ u_char density; /* device-specific density code */
+ u_char number_blocks_hi; /* number of blocks in this block desc */
+ u_char number_blocks_med;
+ u_char number_blocks_lo;
+ u_char _r3;
+ u_char block_length_hi; /* block length for blocks in this desc */
+ u_short block_length;
+};
+
+/*
+ * error recovery parameters
+ */
+struct ccs_err_recovery
+{
+ u_char _r1 : 2; /* reserved */
+ u_char page_code : 6; /* page code */
+ u_char page_length; /* page length */
+ u_char awre : 1; /* auto write realloc enabled */
+ u_char arre : 1; /* auto read realloc enabled */
+ u_char tb : 1; /* transfer block */
+ u_char rc : 1; /* read continuous */
+ u_char eec : 1; /* enable early correction */
+ u_char per : 1; /* post error */
+ u_char dte : 1; /* disable transfer on error */
+ u_char dcr : 1; /* disable correction */
+ u_char retry_count; /* error retry count */
+ u_char correction_span; /* largest recov. to be attempted, bits */
+ u_char head_offset_count; /* head offset (2's C) for each retry */
+ u_char strobe_offset_count; /* data strobe */
+ u_char recovery_time_limit; /* time limit on recovery attempts */
+};
+
+/*
+ * disco/reco parameters
+ */
+struct ccs_disco_reco
+{
+ u_char _r1 : 2; /* reserved */
+ u_char page_code : 6; /* page code */
+ u_char page_length; /* page length */
+ u_char buffer_full_ratio; /* write buffer reconnect threshold */
+ u_char buffer_empty_ratio; /* read */
+ u_short bus_inactivity_limit; /* limit on bus inactivity time */
+ u_short disconnect_time_limit; /* minimum disconnect time */
+ u_short connect_time_limit; /* minimum connect time */
+ u_short _r2; /* reserved */
+};
+
+/*
+ * drive geometry parameters
+ */
+struct ccs_geometry
+{
+ u_char _r1 : 2; /* reserved */
+ u_char page_code : 6; /* page code */
+ u_char page_length; /* page length */
+ u_char cyl_ub; /* #cyls */
+ u_char cyl_mb;
+ u_char cyl_lb;
+ u_char heads; /* #heads */
+ u_char precomp_cyl_ub; /* precomp start */
+ u_char precomp_cyl_mb;
+ u_char precomp_cyl_lb;
+ u_char current_cyl_ub; /* reduced current start */
+ u_char current_cyl_mb;
+ u_char current_cyl_lb;
+ u_short step_rate; /* stepping motor rate */
+ u_char landing_cyl_ub; /* landing zone */
+ u_char landing_cyl_mb;
+ u_char landing_cyl_lb;
+ u_char _r2;
+ u_char _r3;
+ u_char _r4;
+};
+
+/*
+ * cache parameters
+ */
+struct ccs_cache
+{
+ u_char _r1 : 2; /* reserved */
+ u_char page_code : 6; /* page code */
+ u_char page_length; /* page length */
+ u_char mode; /* cache control byte */
+ u_char threshold; /* prefetch threshold */
+ u_char max_prefetch; /* maximum prefetch size */
+ u_char max_multiplier; /* maximum prefetch multiplier */
+ u_char min_prefetch; /* minimum prefetch size */
+ u_char min_multiplier; /* minimum prefetch multiplier */
+ u_char _r2[8];
+};
+
+#endif /* _LINUX_CDROM_H */
+/*==========================================================================*/
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 8
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -8
+ * c-argdecl-indent: 8
+ * c-label-offset: -8
+ * c-continued-statement-offset: 8
+ * c-continued-brace-offset: 0
+ * End:
+ */
diff --git a/linux/src/include/linux/compatmac.h b/linux/src/include/linux/compatmac.h
new file mode 100644
index 0000000..9537070
--- /dev/null
+++ b/linux/src/include/linux/compatmac.h
@@ -0,0 +1,153 @@
+ /*
+ * This header tries to allow you to write 2.3-compatible drivers,
+ * but (using this header) still allows you to run them on 2.2 and
+ * 2.0 kernels.
+ *
+ * Sometimes, a #define replaces a "construct" that older kernels
+ * had. For example,
+ *
+ * DECLARE_MUTEX(name);
+ *
+ * replaces the older
+ *
+ * struct semaphore name = MUTEX;
+ *
+ * This file then declares the DECLARE_MUTEX macro to compile into the
+ * older version.
+ *
+ * In some cases, a macro or function changes the number of arguments.
+ * In that case, there is nothing we can do except define an access
+ * macro that provides the same functionality on both versions of Linux.
+ *
+ * This is the case for example with the "get_user" macro 2.0 kernels use:
+ *
+ * a = get_user (b);
+ *
+ * while newer kernels use
+ *
+ * get_user (a,b);
+ *
+ * This is unfortunate. We therefore define "Get_user (a,b)" which looks
+ * almost the same as the 2.2+ construct, and translates into the
+ * appropriate sequence for earlier constructs.
+ *
+ * Supported by this file are the 2.0 kernels, 2.2 kernels, and the
+ * most recent 2.3 kernel. 2.3 support will be dropped as soon when 2.4
+ * comes out. 2.0 support may someday be dropped. But then again, maybe
+ * not.
+ *
+ * I'll try to maintain this, provided that Linus agrees with the setup.
+ * Feel free to mail updates or suggestions.
+ *
+ * -- R.E.Wolff@BitWizard.nl
+ *
+ */
+
+#ifndef COMPATMAC_H
+#define COMPATMAC_H
+
+#include <linux/version.h>
+#include <asm/io.h>
+
+#if LINUX_VERSION_CODE < 0x020100 /* Less than 2.1.0 */
+#define TWO_ZERO
+#else
+#if LINUX_VERSION_CODE < 0x020200 /* less than 2.2.x */
+#warning "Please use a 2.2.x kernel. "
+#else
+#if LINUX_VERSION_CODE < 0x020300 /* less than 2.3.x */
+#define TWO_TWO
+#else
+#define TWO_THREE
+#endif
+#endif
+#endif
+
+#ifdef TWO_ZERO
+
+/* Here is the section that makes the 2.2 compatible driver source
+ work for 2.0 too! We mostly try to adopt the "new thingies" from 2.2,
+ and provide for compatibility stuff here if possible. */
+
+/* Some 200 days (on intel) */
+#define MAX_SCHEDULE_TIMEOUT ((long)(~0UL>>1))
+
+#include <linux/bios32.h>
+
+#define Get_user(a,b) a = get_user(b)
+#define Put_user(a,b) 0,put_user(a,b)
+#define copy_to_user(a,b,c) memcpy_tofs(a,b,c)
+
+static inline int copy_from_user(void *to,const void *from, int c)
+{
+ memcpy_fromfs(to, from, c);
+ return 0;
+}
+
+#define pci_present pcibios_present
+#define pci_read_config_word pcibios_read_config_word
+#define pci_read_config_dword pcibios_read_config_dword
+
+static inline unsigned char get_irq (unsigned char bus, unsigned char fn)
+{
+ unsigned char t;
+ pcibios_read_config_byte (bus, fn, PCI_INTERRUPT_LINE, &t);
+ return t;
+}
+
+static inline void *ioremap(unsigned long base, long length)
+{
+ if (base < 0x100000) return phys_to_virt(base);
+ return vremap (base, length);
+}
+
+#define my_iounmap(x, b) (((long)x<(long)phys_to_virt(0x100000))?0:vfree ((void*)x))
+
+#define capable(x) suser()
+
+#define queue_task queue_task_irq_off
+#define tty_flip_buffer_push(tty) queue_task(&tty->flip.tqueue, &tq_timer)
+#define signal_pending(current) (current->signal & ~current->blocked)
+#define schedule_timeout(to) do {current->timeout = jiffies + (to);schedule ();} while (0)
+#define time_after(t1,t2) (((long)t1-t2) > 0)
+
+
+//#define test_and_set_bit(nr, addr) set_bit(nr, addr)
+//#define test_and_clear_bit(nr, addr) clear_bit(nr, addr)
+
+/* Not yet implemented on 2.0 */
+#define ASYNC_SPD_SHI -1
+#define ASYNC_SPD_WARP -1
+
+
+/* Ugly hack: the driver_name doesn't exist in 2.0.x . So we define it
+ to the "name" field that does exist. As long as the assignments are
+ done in the right order, there is nothing to worry about. */
+#define driver_name name
+
+/* Should be in a header somewhere. They are in tty.h on 2.2 */
+#define TTY_HW_COOK_OUT 14 /* Flag to tell ntty what we can handle */
+#define TTY_HW_COOK_IN 15 /* in hardware - output and input */
+
+/* The return type of a "close" routine. */
+#define INT void
+#define NO_ERROR /* Nothing */
+
+#else
+
+/* The 2.2.x compatibility section. */
+#include <asm/uaccess.h>
+
+
+#define Get_user(a,b) get_user(a,b)
+#define Put_user(a,b) put_user(a,b)
+#define get_irq(pdev) pdev->irq
+
+#define INT int
+#define NO_ERROR 0
+
+#define my_iounmap(x,b) (iounmap((char *)(b)))
+
+#endif
+
+#endif
diff --git a/linux/src/include/linux/compiler-gcc.h b/linux/src/include/linux/compiler-gcc.h
new file mode 100644
index 0000000..d9426df
--- /dev/null
+++ b/linux/src/include/linux/compiler-gcc.h
@@ -0,0 +1,112 @@
+#ifndef __LINUX_COMPILER_H
+#error "Please don't include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead."
+#endif
+
+/*
+ * Common definitions for all gcc versions go here.
+ */
+
+
+/* Optimization barrier */
+/* The "volatile" is due to gcc bugs */
+#ifndef barrier
+#define barrier() __asm__ __volatile__("": : :"memory")
+#endif /* barrier */
+
+/*
+ * This macro obfuscates arithmetic on a variable address so that gcc
+ * shouldn't recognize the original var, and make assumptions about it.
+ *
+ * This is needed because the C standard makes it undefined to do
+ * pointer arithmetic on "objects" outside their boundaries and the
+ * gcc optimizers assume this is the case. In particular they
+ * assume such arithmetic does not wrap.
+ *
+ * A miscompilation has been observed because of this on PPC.
+ * To work around it we hide the relationship of the pointer and the object
+ * using this macro.
+ *
+ * Versions of the ppc64 compiler before 4.1 had a bug where use of
+ * RELOC_HIDE could trash r30. The bug can be worked around by changing
+ * the inline assembly constraint from =g to =r, in this particular
+ * case either is valid.
+ */
+#define RELOC_HIDE(ptr, off) \
+ ({ unsigned long __ptr; \
+ __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
+ (typeof(ptr)) (__ptr + (off)); })
+
+#ifdef __CHECKER__
+#define __must_be_array(arr) 0
+#else
+/* &a[0] degrades to a pointer: a different type from an array */
+#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
+#endif
+
+/*
+ * Force always-inline if the user requests it so via the .config,
+ * or if gcc is too old:
+ */
+#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
+ !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
+# define inline inline __attribute__((always_inline))
+# define __inline__ __inline__ __attribute__((always_inline))
+# define __inline __inline __attribute__((always_inline))
+#endif
+
+#define __deprecated __attribute__((deprecated))
+#define __packed __attribute__((packed))
+#define __weak __attribute__((weak))
+
+/*
+ * it doesn't make sense on ARM (currently the only user of __naked) to trace
+ * naked functions because then mcount is called without stack and frame pointer
+ * being set up and there is no chance to restore the lr register to the value
+ * before mcount was called.
+ *
+ * The asm() bodies of naked functions often depend on standard calling conventions,
+ * therefore they must be noinline and noclone. GCC 4.[56] currently fail to enforce
+ * this, so we must do so ourselves. See GCC PR44290.
+ */
+#define __naked __attribute__((naked)) noinline __noclone notrace
+
+#define __noreturn __attribute__((noreturn))
+
+/*
+ * From the GCC manual:
+ *
+ * Many functions have no effects except the return value and their
+ * return value depends only on the parameters and/or global
+ * variables. Such a function can be subject to common subexpression
+ * elimination and loop optimization just as an arithmetic operator
+ * would be.
+ * [...]
+ */
+#define __pure __attribute__((pure))
+#define __aligned(x) __attribute__((aligned(x)))
+#define __printf(a,b) __attribute__((format(printf,a,b)))
+#define noinline __attribute__((noinline))
+#define __attribute_const__ __attribute__((__const__))
+#define __maybe_unused __attribute__((unused))
+#define __always_unused __attribute__((unused))
+
+#define __gcc_header(x) #x
+#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h)
+#define gcc_header(x) _gcc_header(x)
+#if __GNUC__ < 5
+#include gcc_header(__GNUC__)
+#else
+#include gcc_header(5)
+#endif
+
+#if !defined(__noclone)
+#define __noclone /* not needed */
+#endif
+
+/*
+ * A trick to suppress uninitialized variable warning without generating any
+ * code
+ */
+#define uninitialized_var(x) x = x
+
+#define __always_inline inline __attribute__((always_inline))
diff --git a/linux/src/include/linux/compiler-gcc3.h b/linux/src/include/linux/compiler-gcc3.h
new file mode 100644
index 0000000..37d4124
--- /dev/null
+++ b/linux/src/include/linux/compiler-gcc3.h
@@ -0,0 +1,23 @@
+#ifndef __LINUX_COMPILER_H
+#error "Please don't include <linux/compiler-gcc3.h> directly, include <linux/compiler.h> instead."
+#endif
+
+#if __GNUC_MINOR__ < 2
+# error Sorry, your compiler is too old - please upgrade it.
+#endif
+
+#if __GNUC_MINOR__ >= 3
+# define __used __attribute__((__used__))
+#else
+# define __used __attribute__((__unused__))
+#endif
+
+#if __GNUC_MINOR__ >= 4
+#define __must_check __attribute__((warn_unused_result))
+#endif
+
+#ifdef CONFIG_GCOV_KERNEL
+# if __GNUC_MINOR__ < 4
+# error "GCOV profiling support for gcc versions below 3.4 not included"
+# endif /* __GNUC_MINOR__ */
+#endif /* CONFIG_GCOV_KERNEL */
diff --git a/linux/src/include/linux/compiler-gcc4.h b/linux/src/include/linux/compiler-gcc4.h
new file mode 100644
index 0000000..dfadc96
--- /dev/null
+++ b/linux/src/include/linux/compiler-gcc4.h
@@ -0,0 +1,57 @@
+#ifndef __LINUX_COMPILER_H
+#error "Please don't include <linux/compiler-gcc4.h> directly, include <linux/compiler.h> instead."
+#endif
+
+/* GCC 4.1.[01] miscompiles __weak */
+#ifdef __KERNEL__
+# if __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ <= 1
+# error Your version of gcc miscompiles the __weak directive
+# endif
+#endif
+
+#define __used __attribute__((__used__))
+#define __must_check __attribute__((warn_unused_result))
+#define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
+
+#if __GNUC_MINOR__ >= 3
+/* Mark functions as cold. gcc will assume any path leading to a call
+ to them will be unlikely. This means a lot of manual unlikely()s
+ are unnecessary now for any paths leading to the usual suspects
+ like BUG(), printk(), panic() etc. [but let's keep them for now for
+ older compilers]
+
+ Early snapshots of gcc 4.3 don't support this and we can't detect this
+ in the preprocessor, but we can live with this because they're unreleased.
+ Maketime probing would be overkill here.
+
+ gcc also has a __attribute__((__hot__)) to move hot functions into
+ a special section, but I don't see any sense in this right now in
+ the kernel context */
+#define __cold __attribute__((__cold__))
+
+
+#if __GNUC_MINOR__ >= 5
+/*
+ * Mark a position in code as unreachable. This can be used to
+ * suppress control flow warnings after asm blocks that transfer
+ * control elsewhere.
+ *
+ * Early snapshots of gcc 4.5 don't support this and we can't detect
+ * this in the preprocessor, but we can live with this because they're
+ * unreleased. Really, we need to have autoconf for the kernel.
+ */
+#define unreachable() __builtin_unreachable()
+
+/* Mark a function definition as prohibited from being cloned. */
+#define __noclone __attribute__((__noclone__))
+
+#endif
+#endif
+
+#if __GNUC_MINOR__ > 0
+#define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
+#endif
+#if __GNUC_MINOR__ >= 4 && !defined(__CHECKER__)
+#define __compiletime_warning(message) __attribute__((warning(message)))
+#define __compiletime_error(message) __attribute__((error(message)))
+#endif
diff --git a/linux/src/include/linux/compiler-gcc5.h b/linux/src/include/linux/compiler-gcc5.h
new file mode 100644
index 0000000..efee493
--- /dev/null
+++ b/linux/src/include/linux/compiler-gcc5.h
@@ -0,0 +1,67 @@
+#ifndef __LINUX_COMPILER_H
+#error "Please don't include <linux/compiler-gcc5.h> directly, include <linux/compiler.h> instead."
+#endif
+
+#define __used __attribute__((__used__))
+#define __must_check __attribute__((warn_unused_result))
+#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
+
+/* Mark functions as cold. gcc will assume any path leading to a call
+ to them will be unlikely. This means a lot of manual unlikely()s
+ are unnecessary now for any paths leading to the usual suspects
+ like BUG(), printk(), panic() etc. [but let's keep them for now for
+ older compilers]
+
+ Early snapshots of gcc 4.3 don't support this and we can't detect this
+ in the preprocessor, but we can live with this because they're unreleased.
+ Maketime probing would be overkill here.
+
+ gcc also has a __attribute__((__hot__)) to move hot functions into
+ a special section, but I don't see any sense in this right now in
+ the kernel context */
+#define __cold __attribute__((__cold__))
+
+#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+
+#ifndef __CHECKER__
+# define __compiletime_warning(message) __attribute__((warning(message)))
+# define __compiletime_error(message) __attribute__((error(message)))
+#endif /* __CHECKER__ */
+
+/*
+ * Mark a position in code as unreachable. This can be used to
+ * suppress control flow warnings after asm blocks that transfer
+ * control elsewhere.
+ *
+ * Early snapshots of gcc 4.5 don't support this and we can't detect
+ * this in the preprocessor, but we can live with this because they're
+ * unreleased. Really, we need to have autoconf for the kernel.
+ */
+#define unreachable() __builtin_unreachable()
+
+/* Mark a function definition as prohibited from being cloned. */
+#define __noclone __attribute__((__noclone__))
+
+/*
+ * Tell the optimizer that something else uses this function or variable.
+ */
+#define __visible __attribute__((externally_visible))
+
+/*
+ * GCC 'asm goto' miscompiles certain code sequences:
+ *
+ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
+ *
+ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
+ *
+ * (asm goto is automatically volatile - the naming reflects this.)
+ */
+#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
+
+#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+#define __HAVE_BUILTIN_BSWAP32__
+#define __HAVE_BUILTIN_BSWAP64__
+#define __HAVE_BUILTIN_BSWAP16__
+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+
+#define KASAN_ABI_VERSION 4
diff --git a/linux/src/include/linux/compiler.h b/linux/src/include/linux/compiler.h
new file mode 100644
index 0000000..eb3dd94
--- /dev/null
+++ b/linux/src/include/linux/compiler.h
@@ -0,0 +1,315 @@
+#ifndef __LINUX_COMPILER_H
+#define __LINUX_COMPILER_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef __CHECKER__
+# define __user __attribute__((noderef, address_space(1)))
+# define __kernel __attribute__((address_space(0)))
+# define __safe __attribute__((safe))
+# define __force __attribute__((force))
+# define __nocast __attribute__((nocast))
+# define __iomem __attribute__((noderef, address_space(2)))
+# define __acquires(x) __attribute__((context(x,0,1)))
+# define __releases(x) __attribute__((context(x,1,0)))
+# define __acquire(x) __context__(x,1)
+# define __release(x) __context__(x,-1)
+# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
+# define __percpu __attribute__((noderef, address_space(3)))
+#ifdef CONFIG_SPARSE_RCU_POINTER
+# define __rcu __attribute__((noderef, address_space(4)))
+#else
+# define __rcu
+#endif
+extern void __chk_user_ptr(const volatile void __user *);
+extern void __chk_io_ptr(const volatile void __iomem *);
+#else
+# define __user
+# define __kernel
+# define __safe
+# define __force
+# define __nocast
+# define __iomem
+# define __chk_user_ptr(x) (void)0
+# define __chk_io_ptr(x) (void)0
+# define __builtin_warning(x, y...) (1)
+# define __acquires(x)
+# define __releases(x)
+# define __acquire(x) (void)0
+# define __release(x) (void)0
+# define __cond_lock(x,c) (c)
+# define __percpu
+# define __rcu
+#endif
+
+#ifdef __KERNEL__
+
+#ifdef __GNUC__
+#include <linux/compiler-gcc.h>
+#endif
+
+#define notrace __attribute__((no_instrument_function))
+
+/* Intel compiler defines __GNUC__. So we will overwrite implementations
+ * coming from above header files here
+ */
+#ifdef __INTEL_COMPILER
+# include <linux/compiler-intel.h>
+#endif
+
+/*
+ * Generic compiler-dependent macros required for kernel
+ * build go below this comment. Actual compiler/compiler version
+ * specific implementations come from the above header files
+ */
+
+struct ftrace_branch_data {
+ const char *func;
+ const char *file;
+ unsigned line;
+ union {
+ struct {
+ unsigned long correct;
+ unsigned long incorrect;
+ };
+ struct {
+ unsigned long miss;
+ unsigned long hit;
+ };
+ unsigned long miss_hit[2];
+ };
+};
+
+/*
+ * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
+ * to disable branch tracing on a per file basis.
+ */
+#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
+ && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
+void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+
+#define likely_notrace(x) __builtin_expect(!!(x), 1)
+#define unlikely_notrace(x) __builtin_expect(!!(x), 0)
+
+#define __branch_check__(x, expect) ({ \
+ int ______r; \
+ static struct ftrace_branch_data \
+ __attribute__((__aligned__(4))) \
+ __attribute__((section("_ftrace_annotated_branch"))) \
+ ______f = { \
+ .func = __func__, \
+ .file = __FILE__, \
+ .line = __LINE__, \
+ }; \
+ ______r = likely_notrace(x); \
+ ftrace_likely_update(&______f, ______r, expect); \
+ ______r; \
+ })
+
+/*
+ * Using __builtin_constant_p(x) to ignore cases where the return
+ * value is always the same. This idea is taken from a similar patch
+ * written by Daniel Walker.
+ */
+# ifndef likely
+# define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
+# endif
+# ifndef unlikely
+# define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
+# endif
+
+#ifdef CONFIG_PROFILE_ALL_BRANCHES
+/*
+ * "Define 'is'", Bill Clinton
+ * "Define 'if'", Steven Rostedt
+ */
+#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
+#define __trace_if(cond) \
+ if (__builtin_constant_p((cond)) ? !!(cond) : \
+ ({ \
+ int ______r; \
+ static struct ftrace_branch_data \
+ __attribute__((__aligned__(4))) \
+ __attribute__((section("_ftrace_branch"))) \
+ ______f = { \
+ .func = __func__, \
+ .file = __FILE__, \
+ .line = __LINE__, \
+ }; \
+ ______r = !!(cond); \
+ ______f.miss_hit[______r]++; \
+ ______r; \
+ }))
+#endif /* CONFIG_PROFILE_ALL_BRANCHES */
+
+#else
+# ifndef likely
+# define likely(x) __builtin_expect(!!(x), 1)
+# endif /* likely */
+# ifndef unlikely
+# define unlikely(x) __builtin_expect(!!(x), 0)
+# endif /* unlikely */
+#endif
+
+/* Optimization barrier */
+#ifndef barrier
+# define barrier() __memory_barrier()
+#endif
+
+/* Unreachable code */
+#ifndef unreachable
+# define unreachable() do { } while (1)
+#endif
+
+#ifndef RELOC_HIDE
+# define RELOC_HIDE(ptr, off) \
+ ({ unsigned long __ptr; \
+ __ptr = (unsigned long) (ptr); \
+ (typeof(ptr)) (__ptr + (off)); })
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASSEMBLY__ */
+
+#ifdef __KERNEL__
+/*
+ * Allow us to mark functions as 'deprecated' and have gcc emit a nice
+ * warning for each use, in hopes of speeding the functions removal.
+ * Usage is:
+ * int __deprecated foo(void)
+ */
+#ifndef __deprecated
+# define __deprecated /* unimplemented */
+#endif
+
+#ifdef MODULE
+#define __deprecated_for_modules __deprecated
+#else
+#define __deprecated_for_modules
+#endif
+
+#ifndef __must_check
+#define __must_check
+#endif
+
+#ifndef CONFIG_ENABLE_MUST_CHECK
+#undef __must_check
+#define __must_check
+#endif
+#ifndef CONFIG_ENABLE_WARN_DEPRECATED
+#undef __deprecated
+#undef __deprecated_for_modules
+#define __deprecated
+#define __deprecated_for_modules
+#endif
+
+/*
+ * Allow us to avoid 'defined but not used' warnings on functions and data,
+ * as well as force them to be emitted to the assembly file.
+ *
+ * As of gcc 3.4, static functions that are not marked with attribute((used))
+ * may be elided from the assembly file. As of gcc 3.4, static data not so
+ * marked will not be elided, but this may change in a future gcc version.
+ *
+ * NOTE: Because distributions shipped with a backported unit-at-a-time
+ * compiler in gcc 3.3, we must define __used to be __attribute__((used))
+ * for gcc >=3.3 instead of 3.4.
+ *
+ * In prior versions of gcc, such functions and data would be emitted, but
+ * would be warned about except with attribute((unused)).
+ *
+ * Mark functions that are referenced only in inline assembly as __used so
+ * the code is emitted even though it appears to be unreferenced.
+ */
+#ifndef __used
+# define __used /* unimplemented */
+#endif
+
+#ifndef __maybe_unused
+# define __maybe_unused /* unimplemented */
+#endif
+
+#ifndef __always_unused
+# define __always_unused /* unimplemented */
+#endif
+
+#ifndef noinline
+#define noinline
+#endif
+
+/*
+ * Rather then using noinline to prevent stack consumption, use
+ * noinline_for_stack instead. For documentaiton reasons.
+ */
+#define noinline_for_stack noinline
+
+#ifndef __always_inline
+#define __always_inline inline
+#endif
+
+#endif /* __KERNEL__ */
+
+/*
+ * From the GCC manual:
+ *
+ * Many functions do not examine any values except their arguments,
+ * and have no effects except the return value. Basically this is
+ * just slightly more strict class than the `pure' attribute above,
+ * since function is not allowed to read global memory.
+ *
+ * Note that a function that has pointer arguments and examines the
+ * data pointed to must _not_ be declared `const'. Likewise, a
+ * function that calls a non-`const' function usually must not be
+ * `const'. It does not make sense for a `const' function to return
+ * `void'.
+ */
+#ifndef __attribute_const__
+# define __attribute_const__ /* unimplemented */
+#endif
+
+/*
+ * Tell gcc if a function is cold. The compiler will assume any path
+ * directly leading to the call is unlikely.
+ */
+
+#ifndef __cold
+#define __cold
+#endif
+
+/* Simple shorthand for a section definition */
+#ifndef __section
+# define __section(S) __attribute__ ((__section__(#S)))
+#endif
+
+/* Are two types/vars the same type (ignoring qualifiers)? */
+#ifndef __same_type
+# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
+#endif
+
+/* Compile time object size, -1 for unknown */
+#ifndef __compiletime_object_size
+# define __compiletime_object_size(obj) -1
+#endif
+#ifndef __compiletime_warning
+# define __compiletime_warning(message)
+#endif
+#ifndef __compiletime_error
+# define __compiletime_error(message)
+#endif
+
+/*
+ * Prevent the compiler from merging or refetching accesses. The compiler
+ * is also forbidden from reordering successive instances of ACCESS_ONCE(),
+ * but only when the compiler is aware of some particular ordering. One way
+ * to make the compiler aware of ordering is to put the two invocations of
+ * ACCESS_ONCE() in different C statements.
+ *
+ * This macro does absolutely -nothing- to prevent the CPU from reordering,
+ * merging, or refetching absolutely anything at any time. Its main intended
+ * use is to mediate communication between process-level code and irq/NMI
+ * handlers, all running on the same CPU.
+ */
+#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
+
+#endif /* __LINUX_COMPILER_H */
diff --git a/linux/src/include/linux/config.h b/linux/src/include/linux/config.h
new file mode 100644
index 0000000..da47f8c
--- /dev/null
+++ b/linux/src/include/linux/config.h
@@ -0,0 +1,43 @@
+#ifndef _LINUX_CONFIG_H
+#define _LINUX_CONFIG_H
+
+#include <linux/autoconf.h>
+
+/*
+ * Defines for what uname() should return
+ */
+#ifndef UTS_SYSNAME
+#define UTS_SYSNAME "Linux"
+#endif
+
+#ifndef UTS_MACHINE
+#define UTS_MACHINE "unknown"
+#endif
+
+#ifndef UTS_NODENAME
+#define UTS_NODENAME "(none)" /* set by sethostname() */
+#endif
+
+#ifndef UTS_DOMAINNAME
+#define UTS_DOMAINNAME "(none)" /* set by setdomainname() */
+#endif
+
+/*
+ * The definitions for UTS_RELEASE and UTS_VERSION are now defined
+ * in linux/version.h, and should only be used by linux/version.c
+ */
+
+/* Shouldn't these be defined somewhere in a i386 definition? */
+
+/* Don't touch these, unless you really know what you're doing. */
+#define DEF_INITSEG 0x9000
+#define DEF_SYSSEG 0x1000
+#define DEF_SETUPSEG 0x9020
+#define DEF_SYSSIZE 0x7F00
+
+/* internal svga startup constants */
+#define NORMAL_VGA 0xffff /* 80x25 mode */
+#define EXTENDED_VGA 0xfffe /* 80x50 mode */
+#define ASK_VGA 0xfffd /* ask for it at bootup */
+
+#endif
diff --git a/linux/src/include/linux/ctype.h b/linux/src/include/linux/ctype.h
new file mode 100644
index 0000000..8acfe31
--- /dev/null
+++ b/linux/src/include/linux/ctype.h
@@ -0,0 +1,64 @@
+#ifndef _LINUX_CTYPE_H
+#define _LINUX_CTYPE_H
+
+/*
+ * NOTE! This ctype does not handle EOF like the standard C
+ * library is required to.
+ */
+
+#define _U 0x01 /* upper */
+#define _L 0x02 /* lower */
+#define _D 0x04 /* digit */
+#define _C 0x08 /* cntrl */
+#define _P 0x10 /* punct */
+#define _S 0x20 /* white space (space/lf/tab) */
+#define _X 0x40 /* hex digit */
+#define _SP 0x80 /* hard space (0x20) */
+
+extern const unsigned char _ctype[];
+
+#define __ismask(x) (_ctype[(int)(unsigned char)(x)])
+
+#define isalnum(c) ((__ismask(c)&(_U|_L|_D)) != 0)
+#define isalpha(c) ((__ismask(c)&(_U|_L)) != 0)
+#define iscntrl(c) ((__ismask(c)&(_C)) != 0)
+#define isdigit(c) ((__ismask(c)&(_D)) != 0)
+#define isgraph(c) ((__ismask(c)&(_P|_U|_L|_D)) != 0)
+#define islower(c) ((__ismask(c)&(_L)) != 0)
+#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
+#define ispunct(c) ((__ismask(c)&(_P)) != 0)
+/* Note: isspace() must return false for %NUL-terminator */
+#define isspace(c) ((__ismask(c)&(_S)) != 0)
+#define isupper(c) ((__ismask(c)&(_U)) != 0)
+#define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0)
+
+#define isascii(c) (((unsigned char)(c))<=0x7f)
+#define toascii(c) (((unsigned char)(c))&0x7f)
+
+static inline unsigned char __tolower(unsigned char c)
+{
+ if (isupper(c))
+ c -= 'A'-'a';
+ return c;
+}
+
+static inline unsigned char __toupper(unsigned char c)
+{
+ if (islower(c))
+ c -= 'a'-'A';
+ return c;
+}
+
+#define tolower(c) __tolower(c)
+#define toupper(c) __toupper(c)
+
+/*
+ * Fast implementation of tolower() for internal usage. Do not use in your
+ * code.
+ */
+static inline char _tolower(const char c)
+{
+ return c | 0x20;
+}
+
+#endif
diff --git a/linux/src/include/linux/delay.h b/linux/src/include/linux/delay.h
new file mode 100644
index 0000000..50b5d0b
--- /dev/null
+++ b/linux/src/include/linux/delay.h
@@ -0,0 +1,14 @@
+#ifndef _LINUX_DELAY_H
+#define _LINUX_DELAY_H
+
+/*
+ * Copyright (C) 1993 Linus Torvalds
+ *
+ * Delay routines, using a pre-computed "loops_per_second" value.
+ */
+
+extern unsigned long loops_per_sec;
+
+#include <asm/delay.h>
+
+#endif /* defined(_LINUX_DELAY_H) */
diff --git a/linux/src/include/linux/errno.h b/linux/src/include/linux/errno.h
new file mode 100644
index 0000000..ac21284
--- /dev/null
+++ b/linux/src/include/linux/errno.h
@@ -0,0 +1,16 @@
+#ifndef _LINUX_ERRNO_H
+#define _LINUX_ERRNO_H
+
+#include <asm/errno.h>
+
+#ifdef __KERNEL__
+
+/* Should never be seen by user programs */
+#define ERESTARTSYS 512
+#define ERESTARTNOINTR 513
+#define ERESTARTNOHAND 514 /* restart if no handler.. */
+#define ENOIOCTLCMD 515 /* No ioctl command */
+
+#endif
+
+#endif
diff --git a/linux/src/include/linux/etherdevice.h b/linux/src/include/linux/etherdevice.h
new file mode 100644
index 0000000..9f8b97c
--- /dev/null
+++ b/linux/src/include/linux/etherdevice.h
@@ -0,0 +1,46 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. NET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the Ethernet handlers.
+ *
+ * Version: @(#)eth.h 1.0.4 05/13/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * Relocated to include/linux where it belongs by Alan Cox
+ * <gw4pts@gw4pts.ampr.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * WARNING: This move may well be temporary. This file will get merged with others RSN.
+ *
+ */
+#ifndef _LINUX_ETHERDEVICE_H
+#define _LINUX_ETHERDEVICE_H
+
+
+#include <linux/if_ether.h>
+
+#ifdef __KERNEL__
+extern int eth_header(struct sk_buff *skb, struct device *dev,
+ unsigned short type, void *daddr,
+ void *saddr, unsigned len);
+extern int eth_rebuild_header(void *buff, struct device *dev,
+ unsigned long dst, struct sk_buff *skb);
+extern unsigned short eth_type_trans(struct sk_buff *skb, struct device *dev);
+extern void eth_header_cache_bind(struct hh_cache ** hhp, struct device *dev,
+ unsigned short htype, __u32 daddr);
+extern void eth_header_cache_update(struct hh_cache *hh, struct device *dev, unsigned char * haddr);
+extern void eth_copy_and_sum(struct sk_buff *dest,
+ unsigned char *src, int length, int base);
+extern struct device * init_etherdev(struct device *, int);
+
+#endif
+
+#endif /* _LINUX_ETHERDEVICE_H */
diff --git a/linux/src/include/linux/fcntl.h b/linux/src/include/linux/fcntl.h
new file mode 100644
index 0000000..9de3512
--- /dev/null
+++ b/linux/src/include/linux/fcntl.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_FCNTL_H
+#define _LINUX_FCNTL_H
+
+#include <asm/fcntl.h>
+
+#endif
diff --git a/linux/src/include/linux/fd.h b/linux/src/include/linux/fd.h
new file mode 100644
index 0000000..a05cf39
--- /dev/null
+++ b/linux/src/include/linux/fd.h
@@ -0,0 +1,377 @@
+#ifndef _LINUX_FD_H
+#define _LINUX_FD_H
+
+#include <linux/ioctl.h>
+
+/* New file layout: Now the ioctl definitions immediately follow the
+ * definitions of the structures that they use */
+
+/*
+ * Geometry
+ */
+struct floppy_struct {
+ unsigned int size, /* nr of sectors total */
+ sect, /* sectors per track */
+ head, /* nr of heads */
+ track, /* nr of tracks */
+ stretch; /* !=0 means double track steps */
+#define FD_STRETCH 1
+#define FD_SWAPSIDES 2
+
+ unsigned char gap, /* gap1 size */
+
+ rate, /* data rate. |= 0x40 for perpendicular */
+#define FD_2M 0x4
+#define FD_SIZECODEMASK 0x38
+#define FD_SIZECODE(floppy) (((((floppy)->rate&FD_SIZECODEMASK)>> 3)+ 2) %8)
+#define FD_SECTSIZE(floppy) ( (floppy)->rate & FD_2M ? \
+ 512 : 128 << FD_SIZECODE(floppy) )
+#define FD_PERP 0x40
+
+ spec1, /* stepping rate, head unload time */
+ fmt_gap; /* gap2 size */
+ const char * name; /* used only for predefined formats */
+};
+
+
+/* commands needing write access have 0x40 set */
+/* commands needing super user access have 0x80 set */
+
+#define FDCLRPRM _IO(2, 0x41)
+/* clear user-defined parameters */
+
+#define FDSETPRM _IOW(2, 0x42, struct floppy_struct)
+#define FDSETMEDIAPRM FDSETPRM
+/* set user-defined parameters for current media */
+
+#define FDDEFPRM _IOW(2, 0x43, struct floppy_struct)
+#define FDGETPRM _IOR(2, 0x04, struct floppy_struct)
+#define FDDEFMEDIAPRM FDDEFPRM
+#define FDGETMEDIAPRM FDGETPRM
+/* set/get disk parameters */
+
+
+#define FDMSGON _IO(2,0x45)
+#define FDMSGOFF _IO(2,0x46)
+/* issue/don't issue kernel messages on media type change */
+
+
+/*
+ * Formatting (obsolete)
+ */
+#define FD_FILL_BYTE 0xF6 /* format fill byte. */
+
+struct format_descr {
+ unsigned int device,head,track;
+};
+
+#define FDFMTBEG _IO(2,0x47)
+/* begin formatting a disk */
+#define FDFMTTRK _IOW(2,0x48, struct format_descr)
+/* format the specified track */
+#define FDFMTEND _IO(2,0x49)
+/* end formatting a disk */
+
+
+/*
+ * Error thresholds
+ */
+struct floppy_max_errors {
+ unsigned int
+ abort, /* number of errors to be reached before aborting */
+ read_track, /* maximal number of errors permitted to read an
+ * entire track at once */
+ reset, /* maximal number of errors before a reset is tried */
+ recal, /* maximal number of errors before a recalibrate is
+ * tried */
+
+ /*
+ * Threshold for reporting FDC errors to the console.
+ * Setting this to zero may flood your screen when using
+ * ultra cheap floppies ;-)
+ */
+ reporting;
+
+};
+
+#define FDSETEMSGTRESH _IO(2,0x4a)
+/* set fdc error reporting threshold */
+
+#define FDFLUSH _IO(2,0x4b)
+/* flush buffers for media; either for verifying media, or for
+ * handling a media change without closing the file descriptor */
+
+#define FDSETMAXERRS _IOW(2, 0x4c, struct floppy_max_errors)
+#define FDGETMAXERRS _IOR(2, 0x0e, struct floppy_max_errors)
+/* set/get abortion and read_track threshold. See also floppy_drive_params
+ * structure */
+
+
+typedef char floppy_drive_name[16];
+#define FDGETDRVTYP _IOR(2, 0x0f, floppy_drive_name)
+/* get drive type: 5 1/4 or 3 1/2 */
+
+
+/*
+ * Drive parameters (user modifiable)
+ */
+struct floppy_drive_params {
+ char cmos; /* cmos type */
+
+ /* Spec2 is (HLD<<1 | ND), where HLD is head load time (1=2ms, 2=4 ms
+ * etc) and ND is set means no DMA. Hardcoded to 6 (HLD=6ms, use DMA).
+ */
+ unsigned long max_dtr; /* Step rate, usec */
+ unsigned long hlt; /* Head load/settle time, msec */
+ unsigned long hut; /* Head unload time (remnant of
+ * 8" drives) */
+ unsigned long srt; /* Step rate, usec */
+
+ unsigned long spinup; /* time needed for spinup (expressed
+ * in jiffies) */
+ unsigned long spindown; /* timeout needed for spindown */
+ unsigned char spindown_offset; /* decides in which position the disk
+ * will stop */
+ unsigned char select_delay; /* delay to wait after select */
+ unsigned char rps; /* rotations per second */
+ unsigned char tracks; /* maximum number of tracks */
+ unsigned long timeout; /* timeout for interrupt requests */
+
+ unsigned char interleave_sect; /* if there are more sectors, use
+ * interleave */
+
+ struct floppy_max_errors max_errors;
+
+ char flags; /* various flags, including ftd_msg */
+/*
+ * Announce successful media type detection and media information loss after
+ * disk changes.
+ * Also used to enable/disable printing of overrun warnings.
+ */
+
+#define FTD_MSG 0x10
+#define FD_BROKEN_DCL 0x20
+#define FD_DEBUG 0x02
+#define FD_SILENT_DCL_CLEAR 0x4
+#define FD_INVERTED_DCL 0x80
+
+ char read_track; /* use readtrack during probing? */
+
+/*
+ * Auto-detection. Each drive type has eight formats which are
+ * used in succession to try to read the disk. If the FDC cannot lock onto
+ * the disk, the next format is tried. This uses the variable 'probing'.
+ */
+ short autodetect[8]; /* autodetected formats */
+
+ int checkfreq; /* how often should the drive be checked for disk
+ * changes */
+ int native_format; /* native format of this drive */
+};
+
+enum {
+ FD_NEED_TWADDLE_BIT, /* more magic */
+ FD_VERIFY_BIT, /* inquire for write protection */
+ FD_DISK_NEWCHANGE_BIT, /* change detected, and no action undertaken yet
+ * to clear media change status */
+ FD_UNUSED_BIT,
+ FD_DISK_CHANGED_BIT, /* disk has been changed since last i/o */
+ FD_DISK_WRITABLE_BIT /* disk is writable */
+};
+
+#define FDSETDRVPRM _IOW(2, 0x90, struct floppy_drive_params)
+#define FDGETDRVPRM _IOR(2, 0x11, struct floppy_drive_params)
+/* set/get drive parameters */
+
+
+/*
+ * Current drive state (not directly modifiable by user, readonly)
+ */
+struct floppy_drive_struct {
+ signed char flags;
+/* values for these flags */
+#define FD_NEED_TWADDLE (1 << FD_NEED_TWADDLE_BIT)
+#define FD_VERIFY (1 << FD_VERIFY_BIT)
+#define FD_DISK_NEWCHANGE (1 << FD_DISK_NEWCHANGE_BIT)
+#define FD_DISK_CHANGED (1 << FD_DISK_CHANGED_BIT)
+#define FD_DISK_WRITABLE (1 << FD_DISK_WRITABLE_BIT)
+
+ unsigned long spinup_date;
+ unsigned long select_date;
+ unsigned long first_read_date;
+ short probed_format;
+ short track; /* current track */
+ short maxblock; /* id of highest block read */
+ short maxtrack; /* id of highest half track read */
+ int generation; /* how many diskchanges? */
+
+/*
+ * (User-provided) media information is _not_ discarded after a media change
+ * if the corresponding keep_data flag is non-zero. Positive values are
+ * decremented after each probe.
+ */
+ int keep_data;
+
+ /* Prevent "aliased" accesses. */
+ int fd_ref;
+ int fd_device;
+ int last_checked; /* when was the drive last checked for a disk
+ * change? */
+
+ char *dmabuf;
+ int bufblocks;
+};
+
+#define FDGETDRVSTAT _IOR(2, 0x12, struct floppy_drive_struct)
+#define FDPOLLDRVSTAT _IOR(2, 0x13, struct floppy_drive_struct)
+/* get drive state: GET returns the cached state, POLL polls for new state */
+
+
+/*
+ * reset FDC
+ */
+enum reset_mode {
+ FD_RESET_IF_NEEDED, /* reset only if the reset flags is set */
+ FD_RESET_IF_RAWCMD, /* obsolete */
+ FD_RESET_ALWAYS /* reset always */
+};
+#define FDRESET _IO(2, 0x54)
+
+
+/*
+ * FDC state
+ */
+struct floppy_fdc_state {
+ int spec1; /* spec1 value last used */
+ int spec2; /* spec2 value last used */
+ int dtr;
+ unsigned char version; /* FDC version code */
+ unsigned char dor;
+ int address; /* io address */
+ unsigned int rawcmd:2;
+ unsigned int reset:1;
+ unsigned int need_configure:1;
+ unsigned int perp_mode:2;
+ unsigned int has_fifo:1;
+ unsigned int driver_version; /* version code for floppy driver */
+#define FD_DRIVER_VERSION 0x100
+/* user programs using the floppy API should use floppy_fdc_state to
+ * get the version number of the floppy driver that they are running
+ * on. If this version number is bigger than the one compiled into the
+ * user program (the FD_DRIVER_VERSION define), it should be prepared
+ * to bigger structures
+ */
+
+ unsigned char track[4];
+ /* Position of the heads of the 4 units attached to this FDC,
+ * as stored on the FDC. In the future, the position as stored
+ * on the FDC might not agree with the actual physical
+ * position of these drive heads. By allowing such
+ * disagreement, it will be possible to reset the FDC without
+ * incurring the expensive cost of repositioning all heads.
+ * Right now, these positions are hard wired to 0. */
+
+};
+
+#define FDGETFDCSTAT _IOR(2, 0x15, struct floppy_fdc_state)
+
+
+/*
+ * Asynchronous Write error tracking
+ */
+struct floppy_write_errors {
+ /* Write error logging.
+ *
+ * These fields can be cleared with the FDWERRORCLR ioctl.
+ * Only writes that were attempted but failed due to a physical media
+ * error are logged. write(2) calls that fail and return an error code
+ * to the user process are not counted.
+ */
+
+ unsigned int write_errors; /* number of physical write errors
+ * encountered */
+
+ /* position of first and last write errors */
+ unsigned long first_error_sector;
+ int first_error_generation;
+ unsigned long last_error_sector;
+ int last_error_generation;
+
+ unsigned int badness; /* highest retry count for a read or write
+ * operation */
+};
+
+#define FDWERRORCLR _IO(2, 0x56)
+/* clear write error and badness information */
+#define FDWERRORGET _IOR(2, 0x17, struct floppy_write_errors)
+/* get write error and badness information */
+
+
+/*
+ * Raw commands
+ */
+/* new interface flag: now we can do them in batches */
+#define FDHAVEBATCHEDRAWCMD
+
+struct floppy_raw_cmd {
+ unsigned int flags;
+#define FD_RAW_READ 1
+#define FD_RAW_WRITE 2
+#define FD_RAW_NO_MOTOR 4
+#define FD_RAW_DISK_CHANGE 4 /* out: disk change flag was set */
+#define FD_RAW_INTR 8 /* wait for an interrupt */
+#define FD_RAW_SPIN 0x10 /* spin up the disk for this command */
+#define FD_RAW_NO_MOTOR_AFTER 0x20 /* switch the motor off after command
+ * completion */
+#define FD_RAW_NEED_DISK 0x40 /* this command needs a disk to be present */
+#define FD_RAW_NEED_SEEK 0x80 /* this command uses an implied seek (soft) */
+
+/* more "in" flags */
+#define FD_RAW_MORE 0x100 /* more records follow */
+#define FD_RAW_STOP_IF_FAILURE 0x200 /* stop if we encounter a failure */
+#define FD_RAW_STOP_IF_SUCCESS 0x400 /* stop if command successful */
+#define FD_RAW_SOFTFAILURE 0x800 /* consider the return value for failure
+ * detection too */
+
+/* more "out" flags */
+#define FD_RAW_FAILURE 0x10000 /* command sent to fdc, fdc returned error */
+#define FD_RAW_HARDFAILURE 0x20000 /* fdc had to be reset, or timed out */
+
+ void *data;
+ char *kernel_data; /* location of data buffer in the kernel */
+ struct floppy_raw_cmd *next; /* used for chaining of raw cmd's
+ * within the kernel */
+ long length; /* in: length of dma transfer. out: remaining bytes */
+ long phys_length; /* physical length, if different from dma length */
+ int buffer_length; /* length of allocated buffer */
+
+ unsigned char rate;
+ unsigned char cmd_count;
+ unsigned char cmd[16];
+ unsigned char reply_count;
+ unsigned char reply[16];
+ int track;
+ int resultcode;
+
+ int reserved1;
+ int reserved2;
+};
+
+#define FDRAWCMD _IO(2, 0x58)
+/* send a raw command to the fdc. Structure size not included, because of
+ * batches */
+
+#define FDTWADDLE _IO(2, 0x59)
+/* flicker motor-on bit before reading a sector. Experimental */
+
+
+#define FDEJECT _IO(2, 0x5a)
+/* eject the disk */
+
+
+#ifdef __KERNEL__
+/* eject the boot floppy (if we need the drive for a different root floppy) */
+void floppy_eject(void);
+#endif
+
+#endif
diff --git a/linux/src/include/linux/fddidevice.h b/linux/src/include/linux/fddidevice.h
new file mode 100644
index 0000000..bb0b298
--- /dev/null
+++ b/linux/src/include/linux/fddidevice.h
@@ -0,0 +1,42 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the FDDI handlers.
+ *
+ * Version: @(#)fddidevice.h 1.0.0 08/12/96
+ *
+ * Author: Lawrence V. Stefani, <stefani@lkg.dec.com>
+ *
+ * fddidevice.h is based on previous trdevice.h work by
+ * Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_FDDIDEVICE_H
+#define _LINUX_FDDIDEVICE_H
+
+#include <linux/if_fddi.h>
+
+#ifdef __KERNEL__
+extern int fddi_header(struct sk_buff *skb,
+ struct device *dev,
+ unsigned short type,
+ void *daddr,
+ void *saddr,
+ unsigned len);
+extern int fddi_rebuild_header(void *buff,
+ struct device *dev,
+ unsigned long dest,
+ struct sk_buff *skb);
+extern unsigned short fddi_type_trans(struct sk_buff *skb,
+ struct device *dev);
+#endif
+
+#endif /* _LINUX_FDDIDEVICE_H */
diff --git a/linux/src/include/linux/fdreg.h b/linux/src/include/linux/fdreg.h
new file mode 100644
index 0000000..1d9026e
--- /dev/null
+++ b/linux/src/include/linux/fdreg.h
@@ -0,0 +1,143 @@
+#ifndef _LINUX_FDREG_H
+#define _LINUX_FDREG_H
+/*
+ * This file contains some defines for the floppy disk controller.
+ * Various sources. Mostly "IBM Microcomputers: A Programmers
+ * Handbook", Sanches and Canton.
+ */
+
+#ifdef FDPATCHES
+
+#define FD_IOPORT fdc_state[fdc].address
+
+/* Fd controller regs. S&C, about page 340 */
+#define FD_STATUS (4 + FD_IOPORT )
+#define FD_DATA (5 + FD_IOPORT )
+
+/* Digital Output Register */
+#define FD_DOR (2 + FD_IOPORT )
+
+/* Digital Input Register (read) */
+#define FD_DIR (7 + FD_IOPORT )
+
+/* Diskette Control Register (write)*/
+#define FD_DCR (7 + FD_IOPORT )
+
+#else
+
+#define FD_STATUS 0x3f4
+#define FD_DATA 0x3f5
+#define FD_DOR 0x3f2 /* Digital Output Register */
+#define FD_DIR 0x3f7 /* Digital Input Register (read) */
+#define FD_DCR 0x3f7 /* Diskette Control Register (write)*/
+
+#endif
+
+/* Bits of main status register */
+#define STATUS_BUSYMASK 0x0F /* drive busy mask */
+#define STATUS_BUSY 0x10 /* FDC busy */
+#define STATUS_DMA 0x20 /* 0- DMA mode */
+#define STATUS_DIR 0x40 /* 0- cpu->fdc */
+#define STATUS_READY 0x80 /* Data reg ready */
+
+/* Bits of FD_ST0 */
+#define ST0_DS 0x03 /* drive select mask */
+#define ST0_HA 0x04 /* Head (Address) */
+#define ST0_NR 0x08 /* Not Ready */
+#define ST0_ECE 0x10 /* Equipment check error */
+#define ST0_SE 0x20 /* Seek end */
+#define ST0_INTR 0xC0 /* Interrupt code mask */
+
+/* Bits of FD_ST1 */
+#define ST1_MAM 0x01 /* Missing Address Mark */
+#define ST1_WP 0x02 /* Write Protect */
+#define ST1_ND 0x04 /* No Data - unreadable */
+#define ST1_OR 0x10 /* OverRun */
+#define ST1_CRC 0x20 /* CRC error in data or addr */
+#define ST1_EOC 0x80 /* End Of Cylinder */
+
+/* Bits of FD_ST2 */
+#define ST2_MAM 0x01 /* Missing Address Mark (again) */
+#define ST2_BC 0x02 /* Bad Cylinder */
+#define ST2_SNS 0x04 /* Scan Not Satisfied */
+#define ST2_SEH 0x08 /* Scan Equal Hit */
+#define ST2_WC 0x10 /* Wrong Cylinder */
+#define ST2_CRC 0x20 /* CRC error in data field */
+#define ST2_CM 0x40 /* Control Mark = deleted */
+
+/* Bits of FD_ST3 */
+#define ST3_HA 0x04 /* Head (Address) */
+#define ST3_DS 0x08 /* drive is double-sided */
+#define ST3_TZ 0x10 /* Track Zero signal (1=track 0) */
+#define ST3_RY 0x20 /* drive is ready */
+#define ST3_WP 0x40 /* Write Protect */
+#define ST3_FT 0x80 /* Drive Fault */
+
+/* Values for FD_COMMAND */
+#define FD_RECALIBRATE 0x07 /* move to track 0 */
+#define FD_SEEK 0x0F /* seek track */
+#define FD_READ 0xE6 /* read with MT, MFM, SKip deleted */
+#define FD_WRITE 0xC5 /* write with MT, MFM */
+#define FD_SENSEI 0x08 /* Sense Interrupt Status */
+#define FD_SPECIFY 0x03 /* specify HUT etc */
+#define FD_FORMAT 0x4D /* format one track */
+#define FD_VERSION 0x10 /* get version code */
+#define FD_CONFIGURE 0x13 /* configure FIFO operation */
+#define FD_PERPENDICULAR 0x12 /* perpendicular r/w mode */
+#define FD_GETSTATUS 0x04 /* read ST3 */
+#define FD_DUMPREGS 0x0E /* dump the contents of the fdc regs */
+#define FD_READID 0xEA /* prints the header of a sector */
+#define FD_UNLOCK 0x14 /* Fifo config unlock */
+#define FD_LOCK 0x94 /* Fifo config lock */
+#define FD_RSEEK_OUT 0x8f /* seek out (i.e. to lower tracks) */
+#define FD_RSEEK_IN 0xcf /* seek in (i.e. to higher tracks) */
+
+/* the following commands are new in the 82078. They are not used in the
+ * floppy driver, except the first three. These commands may be useful for apps
+ * which use the FDRAWCMD interface. For doc, get the 82078 spec sheets at
+ * http://www-techdoc.intel.com/docs/periph/fd_contr/datasheets/ */
+
+#define FD_PARTID 0x18 /* part id ("extended" version cmd) */
+#define FD_SAVE 0x2e /* save fdc regs for later restore */
+#define FD_DRIVESPEC 0x8e /* drive specification: Access to the
+ * 2 Mbps data transfer rate for tape
+ * drives */
+
+#define FD_RESTORE 0x4e /* later restore */
+#define FD_POWERDOWN 0x27 /* configure FDC's powersave features */
+#define FD_FORMAT_N_WRITE 0xef /* format and write in one go. */
+#define FD_OPTION 0x33 /* ISO format (which is a clean way to
+ * pack more sectors on a track) */
+
+/* DMA commands */
+#define DMA_READ 0x46
+#define DMA_WRITE 0x4A
+
+/* FDC version return types */
+#define FDC_NONE 0x00
+#define FDC_UNKNOWN 0x10 /* DO NOT USE THIS TYPE EXCEPT IF IDENTIFICATION
+ FAILS EARLY */
+#define FDC_8272A 0x20 /* Intel 8272a, NEC 765 */
+#define FDC_765ED 0x30 /* Non-Intel 1MB-compatible FDC, can't detect */
+#define FDC_82072 0x40 /* Intel 82072; 8272a + FIFO + DUMPREGS */
+#define FDC_82072A 0x45 /* 82072A (on Sparcs) */
+#define FDC_82077_ORIG 0x51 /* Original version of 82077AA, sans LOCK */
+#define FDC_82077 0x52 /* 82077AA-1 */
+#define FDC_82078_UNKN 0x5f /* Unknown 82078 variant */
+#define FDC_82078 0x60 /* 44pin 82078 or 64pin 82078SL */
+#define FDC_82078_1 0x61 /* 82078-1 (2Mbps fdc) */
+#define FDC_S82078B 0x62 /* S82078B (first seen on Adaptec AVA-2825 VLB
+ * SCSI/EIDE/Floppy controller) */
+#define FDC_87306 0x63 /* National Semiconductor PC 87306 */
+
+/*
+ * Beware: the fdc type list is roughly sorted by increasing features.
+ * Presence of features is tested by comparing the FDC version id with the
+ * "oldest" version that has the needed feature.
+ * If during FDC detection, an obscure test fails late in the sequence, don't
+ * assign FDC_UNKNOWN. Else the FDC will be treated as a dumb 8272a, or worse.
+ * This is especially true if the tests are unneeded.
+ */
+
+#define FD_RESET_DELAY 20
+#endif
diff --git a/linux/src/include/linux/fs.h b/linux/src/include/linux/fs.h
new file mode 100644
index 0000000..b698b3f
--- /dev/null
+++ b/linux/src/include/linux/fs.h
@@ -0,0 +1,728 @@
+#ifndef _LINUX_FS_H
+#define _LINUX_FS_H
+
+/*
+ * This file has definitions for some important file table
+ * structures etc.
+ */
+
+#include <linux/config.h>
+#include <linux/linkage.h>
+#include <linux/limits.h>
+#include <linux/wait.h>
+#include <linux/types.h>
+#include <linux/vfs.h>
+#include <linux/net.h>
+#include <linux/kdev_t.h>
+#include <linux/ioctl.h>
+
+/*
+ * It's silly to have NR_OPEN bigger than NR_FILE, but I'll fix
+ * that later. Anyway, now the file code is no longer dependent
+ * on bitmaps in unsigned longs, but uses the new fd_set structure..
+ *
+ * Some programs (notably those using select()) may have to be
+ * recompiled to take full advantage of the new limits..
+ */
+
+/* Fixed constants first: */
+#undef NR_OPEN
+#define NR_OPEN 256
+
+#define NR_SUPER 64
+#define BLOCK_SIZE 1024
+#define BLOCK_SIZE_BITS 10
+
+/* And dynamically-tunable limits and defaults: */
+extern int max_inodes, nr_inodes;
+extern int max_files, nr_files;
+#define NR_INODE 3072 /* this should be bigger than NR_FILE */
+#define NR_FILE 1024 /* this can well be larger on a larger system */
+
+#define MAY_EXEC 1
+#define MAY_WRITE 2
+#define MAY_READ 4
+
+#define FMODE_READ 1
+#define FMODE_WRITE 2
+
+#define READ 0
+#define WRITE 1
+#define READA 2 /* read-ahead - don't block if no resources */
+#define WRITEA 3 /* write-ahead - don't block if no resources */
+
+#ifndef NULL
+#define NULL ((void *) 0)
+#endif
+
+#define NIL_FILP ((struct file *)0)
+#define SEL_IN 1
+#define SEL_OUT 2
+#define SEL_EX 4
+
+/*
+ * These are the fs-independent mount-flags: up to 16 flags are supported
+ */
+#define MS_RDONLY 1 /* Mount read-only */
+#define MS_NOSUID 2 /* Ignore suid and sgid bits */
+#define MS_NODEV 4 /* Disallow access to device special files */
+#define MS_NOEXEC 8 /* Disallow program execution */
+#define MS_SYNCHRONOUS 16 /* Writes are synced at once */
+#define MS_REMOUNT 32 /* Alter flags of a mounted FS */
+#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */
+#define S_WRITE 128 /* Write on file/directory/symlink */
+#define S_APPEND 256 /* Append-only file */
+#define S_IMMUTABLE 512 /* Immutable file */
+#define MS_NOATIME 1024 /* Do not update access times. */
+#define S_BAD_INODE 2048 /* Marker for unreadable inodes */
+#define S_ZERO_WR 4096 /* Device accepts 0 length writes */
+
+/*
+ * Flags that can be altered by MS_REMOUNT
+ */
+#define MS_RMT_MASK (MS_RDONLY|MS_NOSUID|MS_NODEV|MS_NOEXEC|MS_SYNCHRONOUS|MS_MANDLOCK|MS_NOATIME)
+
+/*
+ * Magic mount flag number. Has to be or-ed to the flag values.
+ */
+#define MS_MGC_VAL 0xC0ED0000 /* magic flag number to indicate "new" flags */
+#define MS_MGC_MSK 0xffff0000 /* magic flag number mask */
+
+/*
+ * Note that read-only etc flags are inode-specific: setting some file-system
+ * flags just means all the inodes inherit those flags by default. It might be
+ * possible to override it selectively if you really wanted to with some
+ * ioctl() that is not currently implemented.
+ *
+ * Exception: MS_RDONLY is always applied to the entire file system.
+ */
+#define IS_RDONLY(inode) (((inode)->i_sb) && ((inode)->i_sb->s_flags & MS_RDONLY))
+#define IS_NOSUID(inode) ((inode)->i_flags & MS_NOSUID)
+#define IS_NODEV(inode) ((inode)->i_flags & MS_NODEV)
+#define IS_NOEXEC(inode) ((inode)->i_flags & MS_NOEXEC)
+#define IS_SYNC(inode) ((inode)->i_flags & MS_SYNCHRONOUS)
+#define IS_MANDLOCK(inode) ((inode)->i_flags & MS_MANDLOCK)
+
+#define IS_WRITABLE(inode) ((inode)->i_flags & S_WRITE)
+#define IS_APPEND(inode) ((inode)->i_flags & S_APPEND)
+#define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE)
+#define IS_NOATIME(inode) ((inode)->i_flags & MS_NOATIME)
+#define IS_ZERO_WR(inode) ((inode)->i_flags & S_ZERO_WR)
+
+#define UPDATE_ATIME(inode) \
+ if (!IS_NOATIME(inode) && !IS_RDONLY(inode)) { \
+ inode->i_atime = CURRENT_TIME; \
+ inode->i_dirt = 1; \
+ }
+
+/* the read-only stuff doesn't really belong here, but any other place is
+ probably as bad and I don't want to create yet another include file. */
+
+#define BLKROSET _IO(0x12,93) /* set device read-only (0 = read-write) */
+#define BLKROGET _IO(0x12,94) /* get read-only status (0 = read_write) */
+#define BLKRRPART _IO(0x12,95) /* re-read partition table */
+#define BLKGETSIZE _IO(0x12,96) /* return device size */
+#define BLKFLSBUF _IO(0x12,97) /* flush buffer cache */
+#define BLKRASET _IO(0x12,98) /* Set read ahead for block device */
+#define BLKRAGET _IO(0x12,99) /* get current read ahead setting */
+
+#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
+#define FIBMAP _IO(0x00,1) /* bmap access */
+#define FIGETBSZ _IO(0x00,2) /* get the block size used for bmap */
+
+#ifdef __KERNEL__
+
+#include <asm/semaphore.h>
+#include <asm/bitops.h>
+
+extern void buffer_init(void);
+extern unsigned long inode_init(unsigned long start, unsigned long end);
+extern unsigned long file_table_init(unsigned long start, unsigned long end);
+extern unsigned long name_cache_init(unsigned long start, unsigned long end);
+
+typedef char buffer_block[BLOCK_SIZE];
+
+/* bh state bits */
+#define BH_Uptodate 0 /* 1 if the buffer contains valid data */
+#define BH_Dirty 1 /* 1 if the buffer is dirty */
+#define BH_Lock 2 /* 1 if the buffer is locked */
+#define BH_Req 3 /* 0 if the buffer has been invalidated */
+#define BH_Touched 4 /* 1 if the buffer has been touched (aging) */
+#define BH_Has_aged 5 /* 1 if the buffer has been aged (aging) */
+#define BH_Protected 6 /* 1 if the buffer is protected */
+#define BH_FreeOnIO 7 /* 1 to discard the buffer_head after IO */
+#define BH_MD 8 /* 1 if the buffer is an MD request */
+
+/*
+ * Try to keep the most commonly used fields in single cache lines (16
+ * bytes) to improve performance. This ordering should be
+ * particularly beneficial on 32-bit processors.
+ *
+ * We use the first 16 bytes for the data which is used in searches
+ * over the block hash lists (ie. getblk(), find_buffer() and
+ * friends).
+ *
+ * The second 16 bytes we use for lru buffer scans, as used by
+ * sync_buffers() and refill_freelist(). -- sct
+ */
+struct buffer_head {
+ /* First cache line: */
+ unsigned long b_blocknr; /* block number */
+ kdev_t b_dev; /* device (B_FREE = free) */
+ kdev_t b_rdev; /* Real device */
+ unsigned long b_rsector; /* Real buffer location on disk */
+ struct buffer_head * b_next; /* Hash queue list */
+ struct buffer_head * b_this_page; /* circular list of buffers in one page */
+
+ /* Second cache line: */
+ unsigned long b_state; /* buffer state bitmap (see above) */
+ struct buffer_head * b_next_free;
+ unsigned int b_count; /* users using this block */
+ unsigned long b_size; /* block size */
+
+ /* Non-performance-critical data follows. */
+ char * b_data; /* pointer to data block (1024 bytes) */
+ unsigned int b_list; /* List that this buffer appears */
+ unsigned long b_flushtime; /* Time when this (dirty) buffer
+ * should be written */
+ unsigned long b_lru_time; /* Time when this buffer was
+ * last used. */
+ struct wait_queue * b_wait;
+ struct buffer_head * b_prev; /* doubly linked list of hash-queue */
+ struct buffer_head * b_prev_free; /* doubly linked list of buffers */
+ struct buffer_head * b_reqnext; /* request queue */
+
+/*
+ * Some MD stuff like RAID5 needs special event handlers and
+ * special private buffer_head fields:
+ */
+ void * personality;
+ void * private_bh;
+};
+
+static inline int buffer_uptodate(struct buffer_head * bh)
+{
+ return test_bit(BH_Uptodate, &bh->b_state);
+}
+
+static inline int buffer_dirty(struct buffer_head * bh)
+{
+ return test_bit(BH_Dirty, &bh->b_state);
+}
+
+static inline int buffer_locked(struct buffer_head * bh)
+{
+ return test_bit(BH_Lock, &bh->b_state);
+}
+
+static inline int buffer_req(struct buffer_head * bh)
+{
+ return test_bit(BH_Req, &bh->b_state);
+}
+
+static inline int buffer_touched(struct buffer_head * bh)
+{
+ return test_bit(BH_Touched, &bh->b_state);
+}
+
+static inline int buffer_has_aged(struct buffer_head * bh)
+{
+ return test_bit(BH_Has_aged, &bh->b_state);
+}
+
+static inline int buffer_protected(struct buffer_head * bh)
+{
+ return test_bit(BH_Protected, &bh->b_state);
+}
+
+#include <linux/pipe_fs_i.h>
+#include <linux/minix_fs_i.h>
+#include <linux/ext_fs_i.h>
+#include <linux/ext2_fs_i.h>
+#include <linux/hpfs_fs_i.h>
+#include <linux/msdos_fs_i.h>
+#include <linux/umsdos_fs_i.h>
+#include <linux/iso_fs_i.h>
+#include <linux/nfs_fs_i.h>
+#include <linux/xia_fs_i.h>
+#include <linux/sysv_fs_i.h>
+#include <linux/affs_fs_i.h>
+#include <linux/ufs_fs_i.h>
+
+/*
+ * Attribute flags. These should be or-ed together to figure out what
+ * has been changed!
+ */
+#define ATTR_MODE 1
+#define ATTR_UID 2
+#define ATTR_GID 4
+#define ATTR_SIZE 8
+#define ATTR_ATIME 16
+#define ATTR_MTIME 32
+#define ATTR_CTIME 64
+#define ATTR_ATIME_SET 128
+#define ATTR_MTIME_SET 256
+#define ATTR_FORCE 512 /* Not a change, but a change it */
+
+/*
+ * This is the Inode Attributes structure, used for notify_change(). It
+ * uses the above definitions as flags, to know which values have changed.
+ * Also, in this manner, a Filesystem can look at only the values it cares
+ * about. Basically, these are the attributes that the VFS layer can
+ * request to change from the FS layer.
+ *
+ * Derek Atkins <warlord@MIT.EDU> 94-10-20
+ */
+struct iattr {
+ unsigned int ia_valid;
+ umode_t ia_mode;
+ uid_t ia_uid;
+ gid_t ia_gid;
+ off_t ia_size;
+ time_t ia_atime;
+ time_t ia_mtime;
+ time_t ia_ctime;
+};
+
+#include <linux/quota.h>
+
+struct inode {
+ kdev_t i_dev;
+ unsigned long i_ino;
+ umode_t i_mode;
+ nlink_t i_nlink;
+ uid_t i_uid;
+ gid_t i_gid;
+ kdev_t i_rdev;
+ off_t i_size;
+ time_t i_atime;
+ time_t i_mtime;
+ time_t i_ctime;
+ unsigned long i_blksize;
+ unsigned long i_blocks;
+ unsigned long i_version;
+ unsigned long i_nrpages;
+ struct semaphore i_sem;
+ struct inode_operations *i_op;
+ struct super_block *i_sb;
+ struct wait_queue *i_wait;
+ struct file_lock *i_flock;
+ struct vm_area_struct *i_mmap;
+ struct page *i_pages;
+ struct dquot *i_dquot[MAXQUOTAS];
+ struct inode *i_next, *i_prev;
+ struct inode *i_hash_next, *i_hash_prev;
+ struct inode *i_bound_to, *i_bound_by;
+ struct inode *i_mount;
+ unsigned long i_count; /* needs to be > (address_space * tasks)>>pagebits */
+ unsigned short i_flags;
+ unsigned short i_writecount;
+ unsigned char i_lock;
+ unsigned char i_dirt;
+ unsigned char i_pipe;
+ unsigned char i_sock;
+ unsigned char i_seek;
+ unsigned char i_update;
+ unsigned char i_condemned;
+ union {
+ struct pipe_inode_info pipe_i;
+ struct minix_inode_info minix_i;
+ struct ext_inode_info ext_i;
+ struct ext2_inode_info ext2_i;
+ struct hpfs_inode_info hpfs_i;
+ struct msdos_inode_info msdos_i;
+ struct umsdos_inode_info umsdos_i;
+ struct iso_inode_info isofs_i;
+ struct nfs_inode_info nfs_i;
+ struct xiafs_inode_info xiafs_i;
+ struct sysv_inode_info sysv_i;
+ struct affs_inode_info affs_i;
+ struct ufs_inode_info ufs_i;
+ struct socket socket_i;
+ void * generic_ip;
+ } u;
+};
+
+struct fown_struct {
+ int pid; /* pid or -pgrp where SIGIO should be sent */
+ uid_t uid, euid; /* uid/euid of process setting the owner */
+};
+
+struct file {
+ mode_t f_mode;
+ loff_t f_pos;
+ unsigned short f_flags;
+ unsigned short f_count;
+ unsigned long f_reada, f_ramax, f_raend, f_ralen, f_rawin;
+ struct file *f_next, *f_prev;
+ struct fown_struct f_owner;
+ struct inode * f_inode;
+ struct file_operations * f_op;
+ unsigned long f_version;
+ void *private_data; /* needed for tty driver, and maybe others */
+};
+
+#define FL_POSIX 1
+#define FL_FLOCK 2
+#define FL_BROKEN 4 /* broken flock() emulation */
+#define FL_ACCESS 8 /* for processes suspended by mandatory locking */
+
+struct file_lock {
+ struct file_lock *fl_next; /* singly linked list for this inode */
+ struct file_lock *fl_nextlink; /* doubly linked list of all locks */
+ struct file_lock *fl_prevlink; /* used to simplify lock removal */
+ struct file_lock *fl_nextblock; /* circular list of blocked processes */
+ struct file_lock *fl_prevblock;
+ struct task_struct *fl_owner;
+ struct wait_queue *fl_wait;
+ struct file *fl_file;
+ unsigned char fl_flags;
+ unsigned char fl_type;
+ off_t fl_start;
+ off_t fl_end;
+};
+
+#include <linux/fcntl.h>
+
+extern int fcntl_getlk(unsigned int fd, struct flock *l);
+extern int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l);
+extern void locks_remove_locks(struct task_struct *task, struct file *filp);
+
+#include <linux/stat.h>
+
+#define FLOCK_VERIFY_READ 1
+#define FLOCK_VERIFY_WRITE 2
+
+extern int locks_mandatory_locked(struct inode *inode);
+extern int locks_mandatory_area(int read_write, struct inode *inode,
+ struct file *filp, unsigned int offset,
+ unsigned int count);
+
+extern inline int locks_verify_locked(struct inode *inode)
+{
+ /* Candidates for mandatory locking have the setgid bit set
+ * but no group execute bit - an otherwise meaningless combination.
+ */
+ if (IS_MANDLOCK(inode) &&
+ (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
+ return (locks_mandatory_locked(inode));
+ return (0);
+}
+extern inline int locks_verify_area(int read_write, struct inode *inode,
+ struct file *filp, unsigned int offset,
+ unsigned int count)
+{
+ /* Candidates for mandatory locking have the setgid bit set
+ * but no group execute bit - an otherwise meaningless combination.
+ */
+ if (IS_MANDLOCK(inode) &&
+ (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
+ return (locks_mandatory_area(read_write, inode, filp, offset,
+ count));
+ return (0);
+}
+
+struct fasync_struct {
+ int magic;
+ struct fasync_struct *fa_next; /* singly linked list */
+ struct file *fa_file;
+};
+
+#define FASYNC_MAGIC 0x4601
+
+extern int fasync_helper(struct inode *, struct file *, int, struct fasync_struct **);
+
+#include <linux/minix_fs_sb.h>
+#include <linux/ext_fs_sb.h>
+#include <linux/ext2_fs_sb.h>
+#include <linux/hpfs_fs_sb.h>
+#include <linux/msdos_fs_sb.h>
+#include <linux/iso_fs_sb.h>
+#include <linux/nfs_fs_sb.h>
+#include <linux/xia_fs_sb.h>
+#include <linux/sysv_fs_sb.h>
+#include <linux/affs_fs_sb.h>
+#include <linux/ufs_fs_sb.h>
+
+struct super_block {
+ kdev_t s_dev;
+ unsigned long s_blocksize;
+ unsigned char s_blocksize_bits;
+ unsigned char s_lock;
+ unsigned char s_rd_only;
+ unsigned char s_dirt;
+ struct file_system_type *s_type;
+ struct super_operations *s_op;
+ struct dquot_operations *dq_op;
+ unsigned long s_flags;
+ unsigned long s_magic;
+ unsigned long s_time;
+ struct inode * s_covered;
+ struct inode * s_mounted;
+ struct wait_queue * s_wait;
+ union {
+ struct minix_sb_info minix_sb;
+ struct ext_sb_info ext_sb;
+ struct ext2_sb_info ext2_sb;
+ struct hpfs_sb_info hpfs_sb;
+ struct msdos_sb_info msdos_sb;
+ struct isofs_sb_info isofs_sb;
+ struct nfs_sb_info nfs_sb;
+ struct xiafs_sb_info xiafs_sb;
+ struct sysv_sb_info sysv_sb;
+ struct affs_sb_info affs_sb;
+ struct ufs_sb_info ufs_sb;
+ void *generic_sbp;
+ } u;
+};
+
+/*
+ * This is the "filldir" function type, used by readdir() to let
+ * the kernel specify what kind of dirent layout it wants to have.
+ * This allows the kernel to read directories into kernel space or
+ * to have different dirent layouts depending on the binary type.
+ */
+typedef int (*filldir_t)(void *, const char *, int, off_t, ino_t);
+
+struct file_operations {
+ int (*lseek) (struct inode *, struct file *, off_t, int);
+ int (*read) (struct inode *, struct file *, char *, int);
+ int (*write) (struct inode *, struct file *, const char *, int);
+ int (*readdir) (struct inode *, struct file *, void *, filldir_t);
+ int (*select) (struct inode *, struct file *, int, select_table *);
+ int (*ioctl) (struct inode *, struct file *, unsigned int, unsigned long);
+ int (*mmap) (struct inode *, struct file *, struct vm_area_struct *);
+ int (*open) (struct inode *, struct file *);
+ void (*release) (struct inode *, struct file *);
+ int (*fsync) (struct inode *, struct file *);
+ int (*fasync) (struct inode *, struct file *, int);
+ int (*check_media_change) (kdev_t dev);
+ int (*revalidate) (kdev_t dev);
+};
+
+struct inode_operations {
+ struct file_operations * default_file_ops;
+ int (*create) (struct inode *,const char *,int,int,struct inode **);
+ int (*lookup) (struct inode *,const char *,int,struct inode **);
+ int (*link) (struct inode *,struct inode *,const char *,int);
+ int (*unlink) (struct inode *,const char *,int);
+ int (*symlink) (struct inode *,const char *,int,const char *);
+ int (*mkdir) (struct inode *,const char *,int,int);
+ int (*rmdir) (struct inode *,const char *,int);
+ int (*mknod) (struct inode *,const char *,int,int,int);
+ int (*rename) (struct inode *,const char *,int,struct inode *,const char *,int, int);
+ int (*readlink) (struct inode *,char *,int);
+ int (*follow_link) (struct inode *,struct inode *,int,int,struct inode **);
+ int (*readpage) (struct inode *, struct page *);
+ int (*writepage) (struct inode *, struct page *);
+ int (*bmap) (struct inode *,int);
+ void (*truncate) (struct inode *);
+ int (*permission) (struct inode *, int);
+ int (*smap) (struct inode *,int);
+};
+
+struct super_operations {
+ void (*read_inode) (struct inode *);
+ int (*notify_change) (struct inode *, struct iattr *);
+ void (*write_inode) (struct inode *);
+ void (*put_inode) (struct inode *);
+ void (*put_super) (struct super_block *);
+ void (*write_super) (struct super_block *);
+ void (*statfs) (struct super_block *, struct statfs *, int);
+ int (*remount_fs) (struct super_block *, int *, char *);
+};
+
+struct dquot_operations {
+ void (*initialize) (struct inode *, short);
+ void (*drop) (struct inode *);
+ int (*alloc_block) (const struct inode *, unsigned long);
+ int (*alloc_inode) (const struct inode *, unsigned long);
+ void (*free_block) (const struct inode *, unsigned long);
+ void (*free_inode) (const struct inode *, unsigned long);
+ int (*transfer) (struct inode *, struct iattr *, char);
+};
+
+struct file_system_type {
+ struct super_block *(*read_super) (struct super_block *, void *, int);
+ const char *name;
+ int requires_dev;
+ struct file_system_type * next;
+};
+
+extern int register_filesystem(struct file_system_type *);
+extern int unregister_filesystem(struct file_system_type *);
+
+asmlinkage int sys_open(const char *, int, int);
+asmlinkage int sys_close(unsigned int); /* yes, it's really unsigned */
+asmlinkage int sys_read(unsigned int, char *, int);
+
+extern void kill_fasync(struct fasync_struct *fa, int sig);
+
+extern int getname(const char * filename, char **result);
+extern void putname(char * name);
+extern int do_truncate(struct inode *, unsigned long);
+extern int register_blkdev(unsigned int, const char *, struct file_operations *);
+extern int unregister_blkdev(unsigned int major, const char * name);
+extern int blkdev_open(struct inode * inode, struct file * filp);
+extern void blkdev_release (struct inode * inode);
+extern struct file_operations def_blk_fops;
+extern struct inode_operations blkdev_inode_operations;
+
+extern int register_chrdev(unsigned int, const char *, struct file_operations *);
+extern int unregister_chrdev(unsigned int major, const char * name);
+extern int chrdev_open(struct inode * inode, struct file * filp);
+extern struct file_operations def_chr_fops;
+extern struct inode_operations chrdev_inode_operations;
+
+extern void init_fifo(struct inode * inode);
+extern struct inode_operations fifo_inode_operations;
+
+extern struct file_operations connecting_fifo_fops;
+extern struct file_operations read_fifo_fops;
+extern struct file_operations write_fifo_fops;
+extern struct file_operations rdwr_fifo_fops;
+extern struct file_operations read_pipe_fops;
+extern struct file_operations write_pipe_fops;
+extern struct file_operations rdwr_pipe_fops;
+
+extern struct file_system_type *get_fs_type(const char *name);
+
+extern int fs_may_mount(kdev_t dev);
+extern int fs_may_umount(kdev_t dev, struct inode * mount_root);
+extern int fs_may_remount_ro(kdev_t dev);
+
+extern struct file *first_file;
+extern struct super_block super_blocks[NR_SUPER];
+
+extern void refile_buffer(struct buffer_head * buf);
+extern void set_writetime(struct buffer_head * buf, int flag);
+extern int try_to_free_buffer(struct buffer_head*, struct buffer_head**, int);
+
+extern int nr_buffers;
+extern int buffermem;
+extern int nr_buffer_heads;
+
+#define BUF_CLEAN 0
+#define BUF_LOCKED 1 /* Buffers scheduled for write */
+#define BUF_LOCKED1 2 /* Supers, inodes */
+#define BUF_DIRTY 3 /* Dirty buffers, not yet scheduled for write */
+#define NR_LIST 4
+
+void mark_buffer_uptodate(struct buffer_head * bh, int on);
+
+extern inline void mark_buffer_clean(struct buffer_head * bh)
+{
+ if (clear_bit(BH_Dirty, &bh->b_state)) {
+ if (bh->b_list == BUF_DIRTY)
+ refile_buffer(bh);
+ }
+}
+
+extern inline void mark_buffer_dirty(struct buffer_head * bh, int flag)
+{
+ if (!set_bit(BH_Dirty, &bh->b_state)) {
+ set_writetime(bh, flag);
+ if (bh->b_list != BUF_DIRTY)
+ refile_buffer(bh);
+ }
+}
+
+extern int check_disk_change(kdev_t dev);
+extern void invalidate_inodes(kdev_t dev);
+extern void invalidate_inode_pages(struct inode *);
+extern void invalidate_buffers(kdev_t dev);
+extern int floppy_is_wp(int minor);
+extern void sync_inodes(kdev_t dev);
+extern void sync_dev(kdev_t dev);
+extern int fsync_dev(kdev_t dev);
+extern void sync_supers(kdev_t dev);
+extern int bmap(struct inode * inode,int block);
+extern int notify_change(struct inode *, struct iattr *);
+extern int namei(const char * pathname, struct inode ** res_inode);
+extern int lnamei(const char * pathname, struct inode ** res_inode);
+extern int permission(struct inode * inode,int mask);
+extern int get_write_access(struct inode *inode);
+extern void put_write_access(struct inode *inode);
+extern int open_namei(const char * pathname, int flag, int mode,
+ struct inode ** res_inode, struct inode * base);
+extern int do_mknod(const char * filename, int mode, dev_t dev);
+extern int do_pipe(int *);
+extern void iput(struct inode * inode);
+extern struct inode * __iget(struct super_block * sb,int nr,int crsmnt);
+extern struct inode * get_empty_inode(void);
+extern void insert_inode_hash(struct inode *);
+extern void clear_inode(struct inode *);
+extern struct inode * get_pipe_inode(void);
+extern void make_bad_inode(struct inode *);
+extern int get_unused_fd(void);
+extern void put_unused_fd(int);
+extern struct file * get_empty_filp(void);
+extern int close_fp(struct file *filp);
+extern struct buffer_head * get_hash_table(kdev_t dev, int block, int size);
+extern struct buffer_head * getblk(kdev_t dev, int block, int size);
+extern void ll_rw_block(int rw, int nr, struct buffer_head * bh[]);
+extern void ll_rw_page(int rw, kdev_t dev, unsigned long nr, char * buffer);
+extern void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buffer);
+extern int is_read_only(kdev_t dev);
+extern void __brelse(struct buffer_head *buf);
+extern inline void brelse(struct buffer_head *buf)
+{
+ if (buf)
+ __brelse(buf);
+}
+extern void __bforget(struct buffer_head *buf);
+extern inline void bforget(struct buffer_head *buf)
+{
+ if (buf)
+ __bforget(buf);
+}
+extern void set_blocksize(kdev_t dev, int size);
+extern struct buffer_head * bread(kdev_t dev, int block, int size);
+extern struct buffer_head * breada(kdev_t dev,int block, int size,
+ unsigned int pos, unsigned int filesize);
+
+extern int generic_readpage(struct inode *, struct page *);
+extern int generic_file_read(struct inode *, struct file *, char *, int);
+extern int generic_file_mmap(struct inode *, struct file *, struct vm_area_struct *);
+extern int brw_page(int, struct page *, kdev_t, int [], int, int);
+
+extern void put_super(kdev_t dev);
+unsigned long generate_cluster(kdev_t dev, int b[], int size);
+extern kdev_t ROOT_DEV;
+
+extern void show_buffers(void);
+extern void mount_root(void);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+extern kdev_t real_root_dev;
+extern int change_root(kdev_t new_root_dev,const char *put_old);
+#endif
+
+extern int char_read(struct inode *, struct file *, char *, int);
+extern int block_read(struct inode *, struct file *, char *, int);
+extern int read_ahead[];
+
+extern int char_write(struct inode *, struct file *, const char *, int);
+extern int block_write(struct inode *, struct file *, const char *, int);
+
+extern int block_fsync(struct inode *, struct file *);
+extern int file_fsync(struct inode *, struct file *);
+
+extern void dcache_add(struct inode *, const char *, int, unsigned long);
+extern int dcache_lookup(struct inode *, const char *, int, unsigned long *);
+
+extern int inode_change_ok(struct inode *, struct iattr *);
+extern void inode_setattr(struct inode *, struct iattr *);
+
+extern inline struct inode * iget(struct super_block * sb,int nr)
+{
+ return __iget(sb, nr, 1);
+}
+
+/* kludge to get SCSI modules working */
+#include <linux/minix_fs.h>
+#include <linux/minix_fs_sb.h>
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/linux/src/include/linux/genhd.h b/linux/src/include/linux/genhd.h
new file mode 100644
index 0000000..dafeff7
--- /dev/null
+++ b/linux/src/include/linux/genhd.h
@@ -0,0 +1,136 @@
+#ifndef _LINUX_GENHD_H
+#define _LINUX_GENHD_H
+
+/*
+ * genhd.h Copyright (C) 1992 Drew Eckhardt
+ * Generic hard disk header file by
+ * Drew Eckhardt
+ *
+ * <drew@colorado.edu>
+ */
+
+#include <linux/config.h>
+
+#define CONFIG_MSDOS_PARTITION 1
+
+#ifdef __alpha__
+#define CONFIG_OSF_PARTITION 1
+#endif
+
+#if defined(__sparc__) || defined(CONFIG_SMD_DISKLABEL)
+#define CONFIG_SUN_PARTITION 1
+#endif
+
+/* These three have identical behaviour; use the second one if DOS fdisk gets
+ confused about extended/logical partitions starting past cylinder 1023. */
+#define DOS_EXTENDED_PARTITION 5
+#define LINUX_EXTENDED_PARTITION 0x85
+#define WIN98_EXTENDED_PARTITION 0x0f
+
+#define DM6_PARTITION 0x54 /* has DDO: use xlated geom & offset */
+#define EZD_PARTITION 0x55 /* EZ-DRIVE: same as DM6 (we think) */
+#define DM6_AUX1PARTITION 0x51 /* no DDO: use xlated geom */
+#define DM6_AUX3PARTITION 0x53 /* no DDO: use xlated geom */
+
+struct partition {
+ unsigned char boot_ind; /* 0x80 - active */
+ unsigned char head; /* starting head */
+ unsigned char sector; /* starting sector */
+ unsigned char cyl; /* starting cylinder */
+ unsigned char sys_ind; /* What partition type */
+ unsigned char end_head; /* end head */
+ unsigned char end_sector; /* end sector */
+ unsigned char end_cyl; /* end cylinder */
+ unsigned int start_sect; /* starting sector counting from 0 */
+ unsigned int nr_sects; /* nr of sectors in partition */
+} __attribute((packed)); /* Give a polite hint to egcs/alpha to generate
+ unaligned operations */
+
+struct hd_struct {
+ long start_sect;
+ long nr_sects;
+};
+
+struct gendisk {
+ int major; /* major number of driver */
+ const char *major_name; /* name of major driver */
+ int minor_shift; /* number of times minor is shifted to
+ get real minor */
+ int max_p; /* maximum partitions per device */
+ int max_nr; /* maximum number of real devices */
+
+ void (*init)(struct gendisk *); /* Initialization called before we do our thing */
+ struct hd_struct *part; /* partition table */
+ int *sizes; /* device size in blocks, copied to blk_size[] */
+ int nr_real; /* number of real devices */
+
+ void *real_devices; /* internal use */
+ struct gendisk *next;
+};
+
+#ifdef CONFIG_BSD_DISKLABEL
+/*
+ * BSD disklabel support by Yossi Gottlieb <yogo@math.tau.ac.il>
+ */
+
+#define BSD_PARTITION 0xa5 /* Partition ID */
+
+#define BSD_DISKMAGIC (0x82564557UL) /* The disk magic number */
+#define BSD_MAXPARTITIONS 8
+#define BSD_FS_UNUSED 0 /* disklabel unused partition entry ID */
+struct bsd_disklabel {
+ __u32 d_magic; /* the magic number */
+ __s16 d_type; /* drive type */
+ __s16 d_subtype; /* controller/d_type specific */
+ char d_typename[16]; /* type name, e.g. "eagle" */
+ char d_packname[16]; /* pack identifier */
+ __u32 d_secsize; /* # of bytes per sector */
+ __u32 d_nsectors; /* # of data sectors per track */
+ __u32 d_ntracks; /* # of tracks per cylinder */
+ __u32 d_ncylinders; /* # of data cylinders per unit */
+ __u32 d_secpercyl; /* # of data sectors per cylinder */
+ __u32 d_secperunit; /* # of data sectors per unit */
+ __u16 d_sparespertrack; /* # of spare sectors per track */
+ __u16 d_sparespercyl; /* # of spare sectors per cylinder */
+ __u32 d_acylinders; /* # of alt. cylinders per unit */
+ __u16 d_rpm; /* rotational speed */
+ __u16 d_interleave; /* hardware sector interleave */
+ __u16 d_trackskew; /* sector 0 skew, per track */
+ __u16 d_cylskew; /* sector 0 skew, per cylinder */
+ __u32 d_headswitch; /* head switch time, usec */
+ __u32 d_trkseek; /* track-to-track seek, usec */
+ __u32 d_flags; /* generic flags */
+#define NDDATA 5
+ __u32 d_drivedata[NDDATA]; /* drive-type specific information */
+#define NSPARE 5
+ __u32 d_spare[NSPARE]; /* reserved for future use */
+ __u32 d_magic2; /* the magic number (again) */
+ __u16 d_checksum; /* xor of data incl. partitions */
+
+ /* filesystem and partition information: */
+ __u16 d_npartitions; /* number of partitions in following */
+ __u32 d_bbsize; /* size of boot area at sn0, bytes */
+ __u32 d_sbsize; /* max size of fs superblock, bytes */
+ struct bsd_partition { /* the partition table */
+ __u32 p_size; /* number of sectors in partition */
+ __u32 p_offset; /* starting sector */
+ __u32 p_fsize; /* filesystem basic fragment size */
+ __u8 p_fstype; /* filesystem type, see below */
+ __u8 p_frag; /* filesystem fragments per block */
+ __u16 p_cpg; /* filesystem cylinders per group */
+ } d_partitions[BSD_MAXPARTITIONS]; /* actually may be more */
+};
+
+#endif /* CONFIG_BSD_DISKLABEL */
+
+extern struct gendisk *gendisk_head; /* linked list of disks */
+
+/*
+ * disk_name() is used by genhd.c and md.c.
+ * It formats the devicename of the indicated disk
+ * into the supplied buffer, and returns a pointer
+ * to that same buffer (for convenience).
+ */
+char *disk_name (struct gendisk *hd, int minor, char *buf);
+
+#endif
diff --git a/linux/src/include/linux/hdreg.h b/linux/src/include/linux/hdreg.h
new file mode 100644
index 0000000..4a388c5
--- /dev/null
+++ b/linux/src/include/linux/hdreg.h
@@ -0,0 +1,240 @@
+#ifndef _LINUX_HDREG_H
+#define _LINUX_HDREG_H
+
+/*
+ * This file contains some defines for the AT-hd-controller.
+ * Various sources.
+ */
+
+#define HD_IRQ 14 /* the standard disk interrupt */
+
+/* ide.c has its own port definitions in "ide.h" */
+
+/* Hd controller regs. Ref: IBM AT Bios-listing */
+#define HD_DATA 0x1f0 /* _CTL when writing */
+#define HD_ERROR 0x1f1 /* see err-bits */
+#define HD_NSECTOR 0x1f2 /* nr of sectors to read/write */
+#define HD_SECTOR 0x1f3 /* starting sector */
+#define HD_LCYL 0x1f4 /* starting cylinder */
+#define HD_HCYL 0x1f5 /* high byte of starting cyl */
+#define HD_CURRENT 0x1f6 /* 101dhhhh , d=drive, hhhh=head */
+#define HD_STATUS 0x1f7 /* see status-bits */
+#define HD_FEATURE HD_ERROR /* same io address, read=error, write=feature */
+#define HD_PRECOMP HD_FEATURE /* obsolete use of this port - predates IDE */
+#define HD_COMMAND HD_STATUS /* same io address, read=status, write=cmd */
+
+#define HD_CMD 0x3f6 /* used for resets */
+#define HD_ALTSTATUS 0x3f6 /* same as HD_STATUS but doesn't clear irq */
+
+/* remainder is shared between hd.c, ide.c, ide-cd.c, and the hdparm utility */
+
+/* Bits of HD_STATUS */
+#define ERR_STAT 0x01
+#define INDEX_STAT 0x02
+#define ECC_STAT 0x04 /* Corrected error */
+#define DRQ_STAT 0x08
+#define SEEK_STAT 0x10
+#define WRERR_STAT 0x20
+#define READY_STAT 0x40
+#define BUSY_STAT 0x80
+
+/* Values for HD_COMMAND */
+#define WIN_RESTORE 0x10
+#define WIN_READ 0x20
+#define WIN_WRITE 0x30
+#define WIN_VERIFY 0x40
+#define WIN_FORMAT 0x50
+#define WIN_INIT 0x60
+#define WIN_SEEK 0x70
+#define WIN_DIAGNOSE 0x90
+#define WIN_SPECIFY 0x91 /* set drive geometry translation */
+#define WIN_SETIDLE1 0xE3
+#define WIN_SETIDLE2 0x97
+
+#define WIN_DOORLOCK 0xde /* lock door on removable drives */
+#define WIN_DOORUNLOCK 0xdf /* unlock door on removable drives */
+#define WIN_ACKMC 0xdb /* acknowledge media change */
+
+#define WIN_MULTREAD 0xC4 /* read sectors using multiple mode */
+#define WIN_MULTWRITE 0xC5 /* write sectors using multiple mode */
+#define WIN_SETMULT 0xC6 /* enable/disable multiple mode */
+#define WIN_IDENTIFY 0xEC /* ask drive to identify itself */
+#define WIN_SETFEATURES 0xEF /* set special drive features */
+#define WIN_READDMA 0xc8 /* read sectors using DMA transfers */
+#define WIN_WRITEDMA 0xca /* write sectors using DMA transfers */
+#define WIN_READDMA_EXT 0x25 /* read sectors using LBA48 DMA transfers */
+#define WIN_WRITEDMA_EXT 0x35 /* write sectors using LBA48 DMA transfers */
+
+/* Additional drive command codes used by ATAPI devices. */
+#define WIN_PIDENTIFY 0xA1 /* identify ATAPI device */
+#define WIN_SRST 0x08 /* ATAPI soft reset command */
+#define WIN_PACKETCMD 0xa0 /* Send a packet command. */
+
+/* Non-standard commands */
+#define EXABYTE_ENABLE_NEST 0xf0
+
+/* Bits for HD_ERROR */
+#define MARK_ERR 0x01 /* Bad address mark */
+#define TRK0_ERR 0x02 /* couldn't find track 0 */
+#define ABRT_ERR 0x04 /* Command aborted */
+#define ID_ERR 0x10 /* ID field not found */
+#define MC_ERR 0x20 /* media changed */
+#define ECC_ERR 0x40 /* Uncorrectable ECC error */
+#define BBD_ERR 0x80 /* pre-EIDE meaning: block marked bad */
+#define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */
+
+struct hd_geometry {
+ unsigned char heads;
+ unsigned char sectors;
+ unsigned short cylinders;
+ unsigned long start;
+};
+
+/* hd/ide ctl's that pass (arg) ptrs to user space are numbered 0x030n/0x031n */
+#define HDIO_GETGEO 0x0301 /* get device geometry */
+#define HDIO_GET_UNMASKINTR 0x0302 /* get current unmask setting */
+#define HDIO_GET_MULTCOUNT 0x0304 /* get current IDE blockmode setting */
+#define HDIO_OBSOLETE_IDENTITY 0x0307 /* OBSOLETE, DO NOT USE: returns 142 bytes */
+#define HDIO_GET_KEEPSETTINGS 0x0308 /* get keep-settings-on-reset flag */
+#define HDIO_GET_32BIT 0x0309 /* get current io_32bit setting */
+#define HDIO_GET_NOWERR 0x030a /* get ignore-write-error flag */
+#define HDIO_GET_DMA 0x030b /* get use-dma flag */
+#define HDIO_GET_IDENTITY 0x030d /* get IDE identification info */
+#define HDIO_DRIVE_CMD 0x031f /* execute a special drive command */
+
+/* hd/ide ctl's that pass (arg) non-ptr values are numbered 0x032n/0x033n */
+#define HDIO_SET_MULTCOUNT 0x0321 /* change IDE blockmode */
+#define HDIO_SET_UNMASKINTR 0x0322 /* permit other irqs during I/O */
+#define HDIO_SET_KEEPSETTINGS 0x0323 /* keep ioctl settings on reset */
+#define HDIO_SET_32BIT 0x0324 /* change io_32bit flags */
+#define HDIO_SET_NOWERR 0x0325 /* change ignore-write-error flag */
+#define HDIO_SET_DMA 0x0326 /* change use-dma flag */
+#define HDIO_SET_PIO_MODE 0x0327 /* reconfig interface to new speed */
+
+/* structure returned by HDIO_GET_IDENTITY, as per ANSI ATA2 rev.2f spec */
+struct hd_driveid {
+ unsigned short config; /* lots of obsolete bit flags */
+ unsigned short cyls; /* "physical" cyls */
+ unsigned short reserved2; /* reserved (word 2) */
+ unsigned short heads; /* "physical" heads */
+ unsigned short track_bytes; /* unformatted bytes per track */
+ unsigned short sector_bytes; /* unformatted bytes per sector */
+ unsigned short sectors; /* "physical" sectors per track */
+ unsigned short vendor0; /* vendor unique */
+ unsigned short vendor1; /* vendor unique */
+ unsigned short vendor2; /* vendor unique */
+ unsigned char serial_no[20]; /* 0 = not_specified */
+ unsigned short buf_type;
+ unsigned short buf_size; /* 512 byte increments; 0 = not_specified */
+ unsigned short ecc_bytes; /* for r/w long cmds; 0 = not_specified */
+ unsigned char fw_rev[8]; /* 0 = not_specified */
+ unsigned char model[40]; /* 0 = not_specified */
+ unsigned char max_multsect; /* 0=not_implemented */
+ unsigned char vendor3; /* vendor unique */
+ unsigned short dword_io; /* 0=not_implemented; 1=implemented */
+ unsigned char vendor4; /* vendor unique */
+ unsigned char capability; /* bits 0:DMA 1:LBA 2:IORDYsw 3:IORDYsup*/
+ unsigned short reserved50; /* reserved (word 50) */
+ unsigned char vendor5; /* vendor unique */
+ unsigned char tPIO; /* 0=slow, 1=medium, 2=fast */
+ unsigned char vendor6; /* vendor unique */
+ unsigned char tDMA; /* 0=slow, 1=medium, 2=fast */
+ unsigned short field_valid; /* bits 0:cur_ok 1:eide_ok */
+ unsigned short cur_cyls; /* logical cylinders */
+ unsigned short cur_heads; /* logical heads */
+ unsigned short cur_sectors; /* logical sectors per track */
+ unsigned short cur_capacity0; /* logical total sectors on drive */
+ unsigned short cur_capacity1; /* (2 words, misaligned int) */
+ unsigned char multsect; /* current multiple sector count */
+ unsigned char multsect_valid; /* when (bit0==1) multsect is ok */
+ unsigned int lba_capacity; /* total number of sectors */
+ unsigned short dma_1word; /* single-word dma info */
+ unsigned short dma_mword; /* multiple-word dma info */
+ unsigned short eide_pio_modes; /* bits 0:mode3 1:mode4 */
+ unsigned short eide_dma_min; /* min mword dma cycle time (ns) */
+ unsigned short eide_dma_time; /* recommended mword dma cycle time (ns) */
+ unsigned short eide_pio; /* min cycle time (ns), no IORDY */
+ unsigned short eide_pio_iordy; /* min cycle time (ns), with IORDY */
+ unsigned short word69;
+ unsigned short word70;
+ /* HDIO_GET_IDENTITY currently returns only words 0 through 70 */
+ unsigned short word71;
+ unsigned short word72;
+ unsigned short word73;
+ unsigned short word74;
+ unsigned short word75;
+ unsigned short word76;
+ unsigned short word77;
+ unsigned short word78;
+ unsigned short word79;
+ unsigned short word80;
+ unsigned short word81;
+ unsigned short command_sets; /* bits 0:Smart 1:Security 2:Removable 3:PM */
+ unsigned short command_set_2; /* bits 14:Smart Enabled 13:0 zero */
+ unsigned short word84;
+ unsigned short word85;
+ unsigned short word86;
+ unsigned short word87;
+ unsigned short dma_ultra;
+ unsigned short word89; /* reserved (word 89) */
+ unsigned short word90; /* reserved (word 90) */
+ unsigned short word91; /* reserved (word 91) */
+ unsigned short word92; /* reserved (word 92) */
+ unsigned short word93; /* reserved (word 93) */
+ unsigned short word94; /* reserved (word 94) */
+ unsigned short word95; /* reserved (word 95) */
+ unsigned short word96; /* reserved (word 96) */
+ unsigned short word97; /* reserved (word 97) */
+ unsigned short word98; /* reserved (word 98) */
+ unsigned short word99; /* reserved (word 99) */
+ unsigned long long lba_capacity_2; /* 48-bit total number of sectors */
+ unsigned short word104; /* reserved (word 104) */
+ unsigned short word105; /* reserved (word 105) */
+ unsigned short word106; /* reserved (word 106) */
+ unsigned short word107; /* reserved (word 107) */
+ unsigned short word108; /* reserved (word 108) */
+ unsigned short word109; /* reserved (word 109) */
+ unsigned short word110; /* reserved (word 110) */
+ unsigned short word111; /* reserved (word 111) */
+ unsigned short word112; /* reserved (word 112) */
+ unsigned short word113; /* reserved (word 113) */
+ unsigned short word114; /* reserved (word 114) */
+ unsigned short word115; /* reserved (word 115) */
+ unsigned short word116; /* reserved (word 116) */
+ unsigned short word117; /* reserved (word 117) */
+ unsigned short word118; /* reserved (word 118) */
+ unsigned short word119; /* reserved (word 119) */
+ unsigned short word120; /* reserved (word 120) */
+ unsigned short word121; /* reserved (word 121) */
+ unsigned short word122; /* reserved (word 122) */
+ unsigned short word123; /* reserved (word 123) */
+ unsigned short word124; /* reserved (word 124) */
+ unsigned short word125; /* reserved (word 125) */
+ unsigned short word126; /* reserved (word 126) */
+ unsigned short word127; /* reserved (word 127) */
+ unsigned short security; /* bits 0:suuport 1:enabled 2:locked 3:frozen */
+ unsigned short reserved[127];
+};
+
+#ifdef __KERNEL__
+/*
+ * These routines are used for kernel command line parameters from main.c:
+ */
+#include <linux/config.h>
+
+#ifdef CONFIG_BLK_DEV_HD
+void hd_setup(char *, int *);
+#endif /* CONFIG_BLK_DEV_HD */
+#ifdef CONFIG_BLK_DEV_IDE
+void ide_setup(char *);
+
+#ifdef CONFIG_BLK_DEV_IDE_PCMCIA
+int ide_register(int io_port, int ctl_port, int irq);
+void ide_unregister(unsigned int);
+#endif /* CONFIG_BLK_DEV_IDE_PCMCIA */
+
+#endif /* CONFIG_BLK_DEV_IDE */
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_HDREG_H */
diff --git a/linux/src/include/linux/head.h b/linux/src/include/linux/head.h
new file mode 100644
index 0000000..c501f21
--- /dev/null
+++ b/linux/src/include/linux/head.h
@@ -0,0 +1,20 @@
+#ifndef _LINUX_HEAD_H
+#define _LINUX_HEAD_H
+
+typedef struct desc_struct {
+ unsigned long a,b;
+} desc_table[256];
+
+/* XXX Linux code shouldn't use idt/gdt directly */
+/* extern desc_table idt,gdt; */
+
+#define GDT_NUL 0
+#define GDT_CODE 1
+#define GDT_DATA 2
+#define GDT_TMP 3
+
+#define LDT_NUL 0
+#define LDT_CODE 1
+#define LDT_DATA 2
+
+#endif
diff --git a/linux/src/include/linux/icmp.h b/linux/src/include/linux/icmp.h
new file mode 100644
index 0000000..611c41d
--- /dev/null
+++ b/linux/src/include/linux/icmp.h
@@ -0,0 +1,85 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the ICMP protocol.
+ *
+ * Version: @(#)icmp.h 1.0.3 04/28/93
+ *
+ * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_ICMP_H
+#define _LINUX_ICMP_H
+
+#define ICMP_ECHOREPLY 0 /* Echo Reply */
+#define ICMP_DEST_UNREACH 3 /* Destination Unreachable */
+#define ICMP_SOURCE_QUENCH 4 /* Source Quench */
+#define ICMP_REDIRECT 5 /* Redirect (change route) */
+#define ICMP_ECHO 8 /* Echo Request */
+#define ICMP_TIME_EXCEEDED 11 /* Time Exceeded */
+#define ICMP_PARAMETERPROB 12 /* Parameter Problem */
+#define ICMP_TIMESTAMP 13 /* Timestamp Request */
+#define ICMP_TIMESTAMPREPLY 14 /* Timestamp Reply */
+#define ICMP_INFO_REQUEST 15 /* Information Request */
+#define ICMP_INFO_REPLY 16 /* Information Reply */
+#define ICMP_ADDRESS 17 /* Address Mask Request */
+#define ICMP_ADDRESSREPLY 18 /* Address Mask Reply */
+
+
+/* Codes for UNREACH. */
+#define ICMP_NET_UNREACH 0 /* Network Unreachable */
+#define ICMP_HOST_UNREACH 1 /* Host Unreachable */
+#define ICMP_PROT_UNREACH 2 /* Protocol Unreachable */
+#define ICMP_PORT_UNREACH 3 /* Port Unreachable */
+#define ICMP_FRAG_NEEDED 4 /* Fragmentation Needed/DF set */
+#define ICMP_SR_FAILED 5 /* Source Route failed */
+#define ICMP_NET_UNKNOWN 6
+#define ICMP_HOST_UNKNOWN 7
+#define ICMP_HOST_ISOLATED 8
+#define ICMP_NET_ANO 9
+#define ICMP_HOST_ANO 10
+#define ICMP_NET_UNR_TOS 11
+#define ICMP_HOST_UNR_TOS 12
+#define ICMP_PKT_FILTERED 13 /* Packet filtered */
+#define ICMP_PREC_VIOLATION 14 /* Precedence violation */
+#define ICMP_PREC_CUTOFF 15 /* Precedence cut off */
+#define NR_ICMP_UNREACH 15 /* instead of hardcoding immediate value */
+
+/* Codes for REDIRECT. */
+#define ICMP_REDIR_NET 0 /* Redirect Net */
+#define ICMP_REDIR_HOST 1 /* Redirect Host */
+#define ICMP_REDIR_NETTOS 2 /* Redirect Net for TOS */
+#define ICMP_REDIR_HOSTTOS 3 /* Redirect Host for TOS */
+
+/* Codes for TIME_EXCEEDED. */
+#define ICMP_EXC_TTL 0 /* TTL count exceeded */
+#define ICMP_EXC_FRAGTIME 1 /* Fragment Reass time exceeded */
+
+
+struct icmphdr {
+ __u8 type;
+ __u8 code;
+ __u16 checksum;
+ union {
+ struct {
+ __u16 id;
+ __u16 sequence;
+ } echo;
+ __u32 gateway;
+ } un;
+};
+
+
+struct icmp_err {
+ int errno;
+ unsigned fatal:1;
+};
+
+
+#endif /* _LINUX_ICMP_H */
diff --git a/linux/src/include/linux/if.h b/linux/src/include/linux/if.h
new file mode 100644
index 0000000..7dee13a
--- /dev/null
+++ b/linux/src/include/linux/if.h
@@ -0,0 +1,155 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Global definitions for the INET interface module.
+ *
+ * Version: @(#)if.h 1.0.2 04/18/93
+ *
+ * Authors: Original taken from Berkeley UNIX 4.3, (c) UCB 1982-1988
+ * Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_IF_H
+#define _LINUX_IF_H
+
+#include <linux/types.h> /* for "caddr_t" et al */
+#include <linux/socket.h> /* for "struct sockaddr" et al */
+
+/* Standard interface flags. */
+#define IFF_UP 0x1 /* interface is up */
+#define IFF_BROADCAST 0x2 /* broadcast address valid */
+#define IFF_DEBUG 0x4 /* turn on debugging */
+#define IFF_LOOPBACK 0x8 /* is a loopback net */
+#define IFF_POINTOPOINT 0x10 /* interface is has p-p link */
+#define IFF_NOTRAILERS 0x20 /* avoid use of trailers */
+#define IFF_RUNNING 0x40 /* resources allocated */
+#define IFF_NOARP 0x80 /* no ARP protocol */
+#define IFF_PROMISC 0x100 /* receive all packets */
+/* Not supported */
+#define IFF_ALLMULTI 0x200 /* receive all multicast packets*/
+
+#define IFF_MASTER 0x400 /* master of a load balancer */
+#define IFF_SLAVE 0x800 /* slave of a load balancer */
+
+#define IFF_MULTICAST 0x1000 /* Supports multicast */
+#define IFF_SOFTHEADERS 0x2000 /* Device cannot construct headers
+ * until broadcast time. Therefore
+ * SOCK_PACKET must call header
+ * construction. Private flag.
+ * Never visible outside of kernel.
+ */
+
+/*
+ * The ifaddr structure contains information about one address
+ * of an interface. They are maintained by the different address
+ * families, are allocated and attached when an address is set,
+ * and are linked together so all addresses for an interface can
+ * be located.
+ */
+
+struct ifaddr
+{
+ struct sockaddr ifa_addr; /* address of interface */
+ union {
+ struct sockaddr ifu_broadaddr;
+ struct sockaddr ifu_dstaddr;
+ } ifa_ifu;
+ struct iface *ifa_ifp; /* back-pointer to interface */
+ struct ifaddr *ifa_next; /* next address for interface */
+};
+
+#define ifa_broadaddr ifa_ifu.ifu_broadaddr /* broadcast address */
+#define ifa_dstaddr ifa_ifu.ifu_dstaddr /* other end of link */
+
+/*
+ * Device mapping structure. I'd just gone off and designed a
+ * beautiful scheme using only loadable modules with arguments
+ * for driver options and along come the PCMCIA people 8)
+ *
+ * Ah well. The get() side of this is good for WDSETUP, and it'll
+ * be handy for debugging things. The set side is fine for now and
+ * being very small might be worth keeping for clean configuration.
+ */
+
+struct ifmap
+{
+ unsigned long mem_start;
+ unsigned long mem_end;
+ unsigned short base_addr;
+ unsigned char irq;
+ unsigned char dma;
+ unsigned char port;
+ /* 3 bytes spare */
+};
+
+/*
+ * Interface request structure used for socket
+ * ioctl's. All interface ioctl's must have parameter
+ * definitions which begin with ifr_name. The
+ * remainder may be interface specific.
+ */
+
+struct ifreq
+{
+#define IFHWADDRLEN 6
+#define IFNAMSIZ 16
+ union
+ {
+ char ifrn_name[IFNAMSIZ]; /* if name, e.g. "en0" */
+ } ifr_ifrn;
+
+ union {
+ struct sockaddr ifru_addr;
+ struct sockaddr ifru_dstaddr;
+ struct sockaddr ifru_broadaddr;
+ struct sockaddr ifru_netmask;
+ struct sockaddr ifru_hwaddr;
+ short ifru_flags;
+ int ifru_metric;
+ int ifru_mtu;
+ struct ifmap ifru_map;
+ char ifru_slave[IFNAMSIZ]; /* Just fits the size */
+ caddr_t ifru_data;
+ } ifr_ifru;
+};
+
+#define ifr_name ifr_ifrn.ifrn_name /* interface name */
+#define ifr_hwaddr ifr_ifru.ifru_hwaddr /* MAC address */
+#define ifr_addr ifr_ifru.ifru_addr /* address */
+#define ifr_dstaddr ifr_ifru.ifru_dstaddr /* other end of p-p lnk */
+#define ifr_broadaddr ifr_ifru.ifru_broadaddr /* broadcast address */
+#define ifr_netmask ifr_ifru.ifru_netmask /* interface net mask */
+#define ifr_flags ifr_ifru.ifru_flags /* flags */
+#define ifr_metric ifr_ifru.ifru_metric /* metric */
+#define ifr_mtu ifr_ifru.ifru_mtu /* mtu */
+#define ifr_map ifr_ifru.ifru_map /* device map */
+#define ifr_slave ifr_ifru.ifru_slave /* slave device */
+#define ifr_data ifr_ifru.ifru_data /* for use by interface */
+
+/*
+ * Structure used in SIOCGIFCONF request.
+ * Used to retrieve interface configuration
+ * for machine (useful for programs which
+ * must know all networks accessible).
+ */
+
+struct ifconf
+{
+ int ifc_len; /* size of buffer */
+ union
+ {
+ caddr_t ifcu_buf;
+ struct ifreq *ifcu_req;
+ } ifc_ifcu;
+};
+#define ifc_buf ifc_ifcu.ifcu_buf /* buffer address */
+#define ifc_req ifc_ifcu.ifcu_req /* array of structures */
+
+#endif /* _LINUX_IF_H */
diff --git a/linux/src/include/linux/if_arp.h b/linux/src/include/linux/if_arp.h
new file mode 100644
index 0000000..6104ee2
--- /dev/null
+++ b/linux/src/include/linux/if_arp.h
@@ -0,0 +1,130 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Global definitions for the ARP (RFC 826) protocol.
+ *
+ * Version: @(#)if_arp.h 1.0.2 08/12/96
+ *
+ * Authors: Original taken from Berkeley UNIX 4.3, (c) UCB 1986-1988
+ * Portions taken from the KA9Q/NOS (v2.00m PA0GRI) source.
+ * Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Florian La Roche,
+ * Jonathan Layes, <layes@loran.com>
+ * Lawrence V. Stefani, <stefani@lkg.dec.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_IF_ARP_H
+#define _LINUX_IF_ARP_H
+
+#include <linux/netdevice.h>
+
+/* ARP protocol HARDWARE identifiers. */
+#define ARPHRD_NETROM 0 /* from KA9Q: NET/ROM pseudo */
+#define ARPHRD_ETHER 1 /* Ethernet 10Mbps */
+#define ARPHRD_EETHER 2 /* Experimental Ethernet */
+#define ARPHRD_AX25 3 /* AX.25 Level 2 */
+#define ARPHRD_PRONET 4 /* PROnet token ring */
+#define ARPHRD_CHAOS 5 /* Chaosnet */
+#define ARPHRD_IEEE802 6 /* IEEE 802.2 Ethernet/TR/TB */
+#define ARPHRD_ARCNET 7 /* ARCnet */
+#define ARPHRD_APPLETLK 8 /* APPLEtalk */
+#define ARPHRD_DLCI 15 /* Frame Relay DLCI */
+#define ARPHRD_METRICOM 23 /* Metricom STRIP (new IANA id) */
+
+/* Dummy types for non ARP hardware */
+#define ARPHRD_SLIP 256
+#define ARPHRD_CSLIP 257
+#define ARPHRD_SLIP6 258
+#define ARPHRD_CSLIP6 259
+#define ARPHRD_RSRVD 260 /* Notional KISS type */
+#define ARPHRD_ADAPT 264
+#define ARPHRD_ROSE 270
+#define ARPHRD_PPP 512
+
+#define ARPHRD_TUNNEL 768 /* IPIP tunnel */
+#define ARPHRD_TUNNEL6 769 /* IPIP6 tunnel */
+#define ARPHRD_FRAD 770 /* Frame Relay Access Device */
+#define ARPHRD_SKIP 771 /* SKIP vif */
+#define ARPHRD_LOOPBACK 772 /* Loopback device */
+#define ARPHRD_LOCALTLK 773 /* Localtalk device */
+#define ARPHRD_FDDI 774 /* Fiber Distributed Data Interface */
+
+/* ARP protocol opcodes. */
+#define ARPOP_REQUEST 1 /* ARP request */
+#define ARPOP_REPLY 2 /* ARP reply */
+#define ARPOP_RREQUEST 3 /* RARP request */
+#define ARPOP_RREPLY 4 /* RARP reply */
+
+
+/* ARP ioctl request. */
+struct arpreq {
+ struct sockaddr arp_pa; /* protocol address */
+ struct sockaddr arp_ha; /* hardware address */
+ int arp_flags; /* flags */
+ struct sockaddr arp_netmask; /* netmask (only for proxy arps) */
+ char arp_dev[16];
+};
+
+struct arpreq_old {
+ struct sockaddr arp_pa; /* protocol address */
+ struct sockaddr arp_ha; /* hardware address */
+ int arp_flags; /* flags */
+ struct sockaddr arp_netmask; /* netmask (only for proxy arps) */
+};
+
+/* ARP Flag values. */
+#define ATF_COM 0x02 /* completed entry (ha valid) */
+#define ATF_PERM 0x04 /* permanent entry */
+#define ATF_PUBL 0x08 /* publish entry */
+#define ATF_USETRAILERS 0x10 /* has requested trailers */
+#define ATF_NETMASK 0x20 /* want to use a netmask (only
+ for proxy entries) */
+
+/*
+ * This structure defines an ethernet arp header.
+ */
+
+struct arphdr
+{
+ unsigned short ar_hrd; /* format of hardware address */
+ unsigned short ar_pro; /* format of protocol address */
+ unsigned char ar_hln; /* length of hardware address */
+ unsigned char ar_pln; /* length of protocol address */
+ unsigned short ar_op; /* ARP opcode (command) */
+
+#if 0
+ /*
+ * Ethernet looks like this : This bit is variable sized however...
+ */
+ unsigned char ar_sha[ETH_ALEN]; /* sender hardware address */
+ unsigned char ar_sip[4]; /* sender IP address */
+ unsigned char ar_tha[ETH_ALEN]; /* target hardware address */
+ unsigned char ar_tip[4]; /* target IP address */
+#endif
+
+};
+
+/* Support for the user space arp daemon, arpd */
+
+#define ARPD_UPDATE 0x01
+#define ARPD_LOOKUP 0x02
+#define ARPD_FLUSH 0x03
+
+struct arpd_request
+{
+ unsigned short req; /* request type */
+ __u32 ip; /* ip address of entry */
+ unsigned long dev; /* Device entry is tied to */
+ unsigned long stamp;
+ unsigned long updated;
+ unsigned char ha[MAX_ADDR_LEN]; /* Hardware address */
+};
+
+#endif /* _LINUX_IF_ARP_H */
diff --git a/linux/src/include/linux/if_ether.h b/linux/src/include/linux/if_ether.h
new file mode 100644
index 0000000..dd09d83
--- /dev/null
+++ b/linux/src/include/linux/if_ether.h
@@ -0,0 +1,119 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Global definitions for the Ethernet IEEE 802.3 interface.
+ *
+ * Version: @(#)if_ether.h 1.0.1a 02/08/94
+ *
+ * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Donald Becker, <becker@super.org>
+ * Alan Cox, <alan@cymru.net>
+ * Steve Whitehouse, <gw7rrm@eeshack3.swan.ac.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_IF_ETHER_H
+#define _LINUX_IF_ETHER_H
+
+/*
+ * IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble
+ * and FCS/CRC (frame check sequence).
+ */
+
+#define ETH_ALEN 6 /* Octets in one ethernet addr */
+#define ETH_HLEN 14 /* Total octets in header. */
+#define ETH_ZLEN 60 /* Min. octets in frame sans FCS */
+#define ETH_DATA_LEN 1500 /* Max. octets in payload */
+#define ETH_FRAME_LEN 1514 /* Max. octets in frame sans FCS */
+
+/*
+ * These are the defined Ethernet Protocol ID's.
+ */
+
+#define ETH_P_LOOP 0x0060 /* Ethernet Loopback packet */
+#define ETH_P_ECHO 0x0200 /* Ethernet Echo packet */
+#define ETH_P_PUP 0x0400 /* Xerox PUP packet */
+#define ETH_P_IP 0x0800 /* Internet Protocol packet */
+#define ETH_P_X25 0x0805 /* CCITT X.25 */
+#define ETH_P_ARP 0x0806 /* Address Resolution packet */
+#define ETH_P_BPQ 0x08FF /* G8BPQ AX.25 Ethernet Packet [ NOT AN OFFICIALLY REGISTERED ID ] */
+#define ETH_P_DEC 0x6000 /* DEC Assigned proto */
+#define ETH_P_DNA_DL 0x6001 /* DEC DNA Dump/Load */
+#define ETH_P_DNA_RC 0x6002 /* DEC DNA Remote Console */
+#define ETH_P_DNA_RT 0x6003 /* DEC DNA Routing */
+#define ETH_P_LAT 0x6004 /* DEC LAT */
+#define ETH_P_DIAG 0x6005 /* DEC Diagnostics */
+#define ETH_P_CUST 0x6006 /* DEC Customer use */
+#define ETH_P_SCA 0x6007 /* DEC Systems Comms Arch */
+#define ETH_P_RARP 0x8035 /* Reverse Addr Res packet */
+#define ETH_P_ATALK 0x809B /* Appletalk DDP */
+#define ETH_P_AARP 0x80F3 /* Appletalk AARP */
+#define ETH_P_IPX 0x8137 /* IPX over DIX */
+#define ETH_P_IPV6 0x86DD /* IPv6 over bluebook */
+
+/*
+ * Non DIX types. Won't clash for 1500 types.
+ */
+
+#define ETH_P_802_3 0x0001 /* Dummy type for 802.3 frames */
+#define ETH_P_AX25 0x0002 /* Dummy protocol id for AX.25 */
+#define ETH_P_ALL 0x0003 /* Every packet (be careful!!!) */
+#define ETH_P_802_2 0x0004 /* 802.2 frames */
+#define ETH_P_SNAP 0x0005 /* Internal only */
+#define ETH_P_DDCMP 0x0006 /* DEC DDCMP: Internal only */
+#define ETH_P_WAN_PPP 0x0007 /* Dummy type for WAN PPP frames*/
+#define ETH_P_PPP_MP 0x0008 /* Dummy type for PPP MP frames */
+#define ETH_P_LOCALTALK 0x0009 /* Localtalk pseudo type */
+#define ETH_P_PPPTALK 0x0010 /* Dummy type for Atalk over PPP*/
+#define ETH_P_TR_802_2 0x0011 /* 802.2 frames */
+
+/*
+ * This is an Ethernet frame header.
+ */
+
+struct ethhdr
+{
+ unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
+ unsigned char h_source[ETH_ALEN]; /* source ether addr */
+ unsigned short h_proto; /* packet type ID field */
+};
+
+/*
+ * Ethernet statistics collection data.
+ */
+
+struct enet_statistics
+{
+ int rx_packets; /* total packets received */
+ int tx_packets; /* total packets transmitted */
+ int rx_errors; /* bad packets received */
+ int tx_errors; /* packet transmit problems */
+ int rx_dropped; /* no space in linux buffers */
+ int tx_dropped; /* no space available in linux */
+ int multicast; /* multicast packets received */
+ int collisions;
+
+ /* detailed rx_errors: */
+ int rx_length_errors;
+ int rx_over_errors; /* receiver ring buff overflow */
+ int rx_crc_errors; /* recved pkt with crc error */
+ int rx_frame_errors; /* recv'd frame alignment error */
+ int rx_fifo_errors; /* recv'r fifo overrun */
+ int rx_missed_errors; /* receiver missed packet */
+
+ /* detailed tx_errors */
+ int tx_aborted_errors;
+ int tx_carrier_errors;
+ int tx_fifo_errors;
+ int tx_heartbeat_errors;
+ int tx_window_errors;
+};
+
+
+#endif /* _LINUX_IF_ETHER_H */
diff --git a/linux/src/include/linux/if_fddi.h b/linux/src/include/linux/if_fddi.h
new file mode 100644
index 0000000..6db6745
--- /dev/null
+++ b/linux/src/include/linux/if_fddi.h
@@ -0,0 +1,202 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Global definitions for the ANSI FDDI interface.
+ *
+ * Version: @(#)if_fddi.h 1.0.1 09/16/96
+ *
+ * Author: Lawrence V. Stefani, <stefani@lkg.dec.com>
+ *
+ * if_fddi.h is based on previous if_ether.h and if_tr.h work by
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Donald Becker, <becker@super.org>
+ * Alan Cox, <alan@cymru.net>
+ * Steve Whitehouse, <gw7rrm@eeshack3.swan.ac.uk>
+ * Peter De Schrijver, <stud11@cc4.kuleuven.ac.be>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_IF_FDDI_H
+#define _LINUX_IF_FDDI_H
+
+/*
+ * Define max and min legal sizes. The frame sizes do not include
+ * 4 byte FCS/CRC (frame check sequence).
+ */
+#define FDDI_K_ALEN 6 /* Octets in one FDDI address */
+#define FDDI_K_8022_HLEN 16 /* Total octets in 802.2 header */
+#define FDDI_K_SNAP_HLEN 21 /* Total octets in 802.2 SNAP header */
+#define FDDI_K_8022_ZLEN 16 /* Min octets in 802.2 frame sans FCS */
+#define FDDI_K_SNAP_ZLEN 21 /* Min octets in 802.2 SNAP frame sans FCS */
+#define FDDI_K_8022_DLEN 4475 /* Max octets in 802.2 payload */
+#define FDDI_K_SNAP_DLEN 4470 /* Max octets in 802.2 SNAP payload */
+#define FDDI_K_LLC_ZLEN 13 /* Min octets in LLC frame sans FCS */
+#define FDDI_K_LLC_LEN 4491 /* Max octets in LLC frame sans FCS */
+
+/* Define FDDI Frame Control (FC) Byte values */
+#define FDDI_FC_K_VOID 0x00
+#define FDDI_FC_K_NON_RESTRICTED_TOKEN 0x80
+#define FDDI_FC_K_RESTRICTED_TOKEN 0xC0
+#define FDDI_FC_K_SMT_MIN 0x41
+#define FDDI_FC_K_SMT_MAX 0x4F
+#define FDDI_FC_K_MAC_MIN 0xC1
+#define FDDI_FC_K_MAC_MAX 0xCF
+#define FDDI_FC_K_ASYNC_LLC_MIN 0x50
+#define FDDI_FC_K_ASYNC_LLC_DEF 0x54
+#define FDDI_FC_K_ASYNC_LLC_MAX 0x5F
+#define FDDI_FC_K_SYNC_LLC_MIN 0xD0
+#define FDDI_FC_K_SYNC_LLC_MAX 0xD7
+#define FDDI_FC_K_IMPLEMENTOR_MIN 0x60
+#define FDDI_FC_K_IMPLEMENTOR_MAX 0x6F
+#define FDDI_FC_K_RESERVED_MIN 0x70
+#define FDDI_FC_K_RESERVED_MAX 0x7F
+
+/* Define LLC and SNAP constants */
+#define FDDI_EXTENDED_SAP 0xAA
+#define FDDI_UI_CMD 0x03
+
+/* Define 802.2 Type 1 header */
+struct fddi_8022_1_hdr
+ {
+ __u8 dsap; /* destination service access point */
+ __u8 ssap; /* source service access point */
+ __u8 ctrl; /* control byte #1 */
+ } __attribute__ ((packed));
+
+/* Define 802.2 Type 2 header */
+struct fddi_8022_2_hdr
+ {
+ __u8 dsap; /* destination service access point */
+ __u8 ssap; /* source service access point */
+ __u8 ctrl_1; /* control byte #1 */
+ __u8 ctrl_2; /* control byte #2 */
+ } __attribute__ ((packed));
+
+/* Define 802.2 SNAP header */
+#define FDDI_K_OUI_LEN 3
+struct fddi_snap_hdr
+ {
+ __u8 dsap; /* always 0xAA */
+ __u8 ssap; /* always 0xAA */
+ __u8 ctrl; /* always 0x03 */
+ __u8 oui[FDDI_K_OUI_LEN]; /* organizational universal id */
+ __u16 ethertype; /* packet type ID field */
+ } __attribute__ ((packed));
+
+/* Define FDDI LLC frame header */
+struct fddihdr
+ {
+ __u8 fc; /* frame control */
+ __u8 daddr[FDDI_K_ALEN]; /* destination address */
+ __u8 saddr[FDDI_K_ALEN]; /* source address */
+ union
+ {
+ struct fddi_8022_1_hdr llc_8022_1;
+ struct fddi_8022_2_hdr llc_8022_2;
+ struct fddi_snap_hdr llc_snap;
+ } hdr;
+ } __attribute__ ((packed));
+
+/* Define FDDI statistics structure */
+struct fddi_statistics
+ {
+ __u32 rx_packets; /* total packets received */
+ __u32 tx_packets; /* total packets transmitted */
+ __u32 rx_errors; /* bad packets received */
+ __u32 tx_errors; /* packet transmit problems */
+ __u32 rx_dropped; /* no space in linux buffers */
+ __u32 tx_dropped; /* no space available in linux */
+ __u32 multicast; /* multicast packets received */
+ __u32 transmit_collision; /* always 0 for FDDI */
+
+ /* Detailed FDDI statistics. Adopted from RFC 1512 */
+
+ __u8 smt_station_id[8];
+ __u32 smt_op_version_id;
+ __u32 smt_hi_version_id;
+ __u32 smt_lo_version_id;
+ __u8 smt_user_data[32];
+ __u32 smt_mib_version_id;
+ __u32 smt_mac_cts;
+ __u32 smt_non_master_cts;
+ __u32 smt_master_cts;
+ __u32 smt_available_paths;
+ __u32 smt_config_capabilities;
+ __u32 smt_config_policy;
+ __u32 smt_connection_policy;
+ __u32 smt_t_notify;
+ __u32 smt_stat_rpt_policy;
+ __u32 smt_trace_max_expiration;
+ __u32 smt_bypass_present;
+ __u32 smt_ecm_state;
+ __u32 smt_cf_state;
+ __u32 smt_remote_disconnect_flag;
+ __u32 smt_station_status;
+ __u32 smt_peer_wrap_flag;
+ __u32 smt_time_stamp;
+ __u32 smt_transition_time_stamp;
+ __u32 mac_frame_status_functions;
+ __u32 mac_t_max_capability;
+ __u32 mac_tvx_capability;
+ __u32 mac_available_paths;
+ __u32 mac_current_path;
+ __u8 mac_upstream_nbr[FDDI_K_ALEN];
+ __u8 mac_downstream_nbr[FDDI_K_ALEN];
+ __u8 mac_old_upstream_nbr[FDDI_K_ALEN];
+ __u8 mac_old_downstream_nbr[FDDI_K_ALEN];
+ __u32 mac_dup_address_test;
+ __u32 mac_requested_paths;
+ __u32 mac_downstream_port_type;
+ __u8 mac_smt_address[FDDI_K_ALEN];
+ __u32 mac_t_req;
+ __u32 mac_t_neg;
+ __u32 mac_t_max;
+ __u32 mac_tvx_value;
+ __u32 mac_frame_cts;
+ __u32 mac_copied_cts;
+ __u32 mac_transmit_cts;
+ __u32 mac_error_cts;
+ __u32 mac_lost_cts;
+ __u32 mac_frame_error_threshold;
+ __u32 mac_frame_error_ratio;
+ __u32 mac_rmt_state;
+ __u32 mac_da_flag;
+ __u32 mac_una_da_flag;
+ __u32 mac_frame_error_flag;
+ __u32 mac_ma_unitdata_available;
+ __u32 mac_hardware_present;
+ __u32 mac_ma_unitdata_enable;
+ __u32 path_tvx_lower_bound;
+ __u32 path_t_max_lower_bound;
+ __u32 path_max_t_req;
+ __u32 path_configuration[8];
+ __u32 port_my_type[2];
+ __u32 port_neighbor_type[2];
+ __u32 port_connection_policies[2];
+ __u32 port_mac_indicated[2];
+ __u32 port_current_path[2];
+ __u8 port_requested_paths[3*2];
+ __u32 port_mac_placement[2];
+ __u32 port_available_paths[2];
+ __u32 port_pmd_class[2];
+ __u32 port_connection_capabilities[2];
+ __u32 port_bs_flag[2];
+ __u32 port_lct_fail_cts[2];
+ __u32 port_ler_estimate[2];
+ __u32 port_lem_reject_cts[2];
+ __u32 port_lem_cts[2];
+ __u32 port_ler_cutoff[2];
+ __u32 port_ler_alarm[2];
+ __u32 port_connect_state[2];
+ __u32 port_pcm_state[2];
+ __u32 port_pc_withhold[2];
+ __u32 port_ler_flag[2];
+ __u32 port_hardware_present[2];
+ };
+
+#endif /* _LINUX_IF_FDDI_H */
diff --git a/linux/src/include/linux/if_tr.h b/linux/src/include/linux/if_tr.h
new file mode 100644
index 0000000..545f1b7
--- /dev/null
+++ b/linux/src/include/linux/if_tr.h
@@ -0,0 +1,102 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Global definitions for the Token-Ring IEEE 802.5 interface.
+ *
+ * Version: @(#)if_tr.h 0.0 07/11/94
+ *
+ * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Donald Becker, <becker@super.org>
+ * Peter De Schrijver, <stud11@cc4.kuleuven.ac.be>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_IF_TR_H
+#define _LINUX_IF_TR_H
+
+
+/* IEEE 802.5 Token-Ring magic constants. The frame sizes omit the preamble
+ and FCS/CRC (frame check sequence). */
+#define TR_ALEN 6 /* Octets in one ethernet addr */
+#define TR_HLEN (sizeof(struct trh_hdr)+sizeof(struct trllc))
+#define AC 0x10
+#define LLC_FRAME 0x40
+#if 0
+#define ETH_HLEN 14 /* Total octets in header. */
+#define ETH_ZLEN 60 /* Min. octets in frame sans FCS */
+#define ETH_DATA_LEN 1500 /* Max. octets in payload */
+#define ETH_FRAME_LEN 1514 /* Max. octets in frame sans FCS */
+#endif
+
+
+/* These are some defined Ethernet Protocol ID's. */
+#define ETH_P_IP 0x0800 /* Internet Protocol packet */
+#define ETH_P_ARP 0x0806 /* Address Resolution packet */
+#define ETH_P_RARP 0x8035 /* Reverse Addr Res packet */
+
+/* LLC and SNAP constants */
+#define EXTENDED_SAP 0xAA
+#define UI_CMD 0x03
+
+/* This is an Token-Ring frame header. */
+struct trh_hdr {
+ __u8 ac; /* access control field */
+ __u8 fc; /* frame control field */
+ __u8 daddr[TR_ALEN]; /* destination address */
+ __u8 saddr[TR_ALEN]; /* source address */
+ __u16 rcf; /* route control field */
+ __u16 rseg[8]; /* routing registers */
+};
+
+/* This is an Token-Ring LLC structure */
+struct trllc {
+ __u8 dsap; /* destination SAP */
+ __u8 ssap; /* source SAP */
+ __u8 llc; /* LLC control field */
+ __u8 protid[3]; /* protocol id */
+ __u16 ethertype; /* ether type field */
+};
+
+/* Token-Ring statistics collection data. */
+struct tr_statistics {
+ int rx_packets; /* total packets received */
+ int tx_packets; /* total packets transmitted */
+ int rx_errors; /* bad packets received */
+ int tx_errors; /* packet transmit problems */
+ int rx_dropped; /* no space in linux buffers */
+ int tx_dropped; /* no space available in linux */
+ int multicast; /* multicast packets received */
+ int transmit_collision;
+
+ /* detailed Token-Ring errors. See IBM Token-Ring Network
+ Architecture for more info */
+
+ int line_errors;
+ int internal_errors;
+ int burst_errors;
+ int A_C_errors;
+ int abort_delimiters;
+ int lost_frames;
+ int recv_congest_count;
+ int frame_copied_errors;
+ int frequency_errors;
+ int token_errors;
+ int dummy1;
+};
+
+/* source routing stuff */
+
+#define TR_RII 0x80
+#define TR_RCF_DIR_BIT 0x80
+#define TR_RCF_LEN_MASK 0x1f00
+#define TR_RCF_BROADCAST 0x8000
+#define TR_RCF_LIMITED_BROADCAST 0xA000
+#define TR_RCF_FRAME2K 0x20
+#define TR_RCF_BROADCAST_MASK 0xC000
+
+#endif /* _LINUX_IF_TR_H */
diff --git a/linux/src/include/linux/igmp.h b/linux/src/include/linux/igmp.h
new file mode 100644
index 0000000..82569a6
--- /dev/null
+++ b/linux/src/include/linux/igmp.h
@@ -0,0 +1,119 @@
+/*
+ * Linux NET3: Internet Group Management Protocol [IGMP]
+ *
+ * Authors:
+ * Alan Cox <Alan.Cox@linux.org>
+ *
+ * Extended to talk the BSD extended IGMP protocol of mrouted 3.6
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_IGMP_H
+#define _LINUX_IGMP_H
+
+/*
+ * IGMP protocol structures
+ */
+
+/*
+ * Header in on cable format
+ */
+
+struct igmphdr
+{
+ __u8 type;
+ __u8 code; /* For newer IGMP */
+ __u16 csum;
+ __u32 group;
+};
+
+#define IGMP_HOST_MEMBERSHIP_QUERY 0x11 /* From RFC1112 */
+#define IGMP_HOST_MEMBERSHIP_REPORT 0x12 /* Ditto */
+#define IGMP_DVMRP 0x13 /* DVMRP routing */
+#define IGMP_PIM 0x14 /* PIM routing */
+#define IGMP_TRACE 0x15 /* CISCO trace */
+#define IGMP_HOST_NEW_MEMBERSHIP_REPORT 0x16 /* New version of 0x11 */
+#define IGMP_HOST_LEAVE_MESSAGE 0x17 /* An extra BSD seems to send */
+
+#define IGMP_MTRACE_RESP 0x1e
+#define IGMP_MTRACE 0x1f
+
+
+/*
+ * Use the BSD names for these for compatibility
+ */
+
+#define IGMP_DELAYING_MEMBER 0x01
+#define IGMP_IDLE_MEMBER 0x02
+#define IGMP_LAZY_MEMBER 0x03
+#define IGMP_SLEEPING_MEMBER 0x04
+#define IGMP_AWAKENING_MEMBER 0x05
+
+#define IGMP_OLD_ROUTER 0x00
+#define IGMP_NEW_ROUTER 0x01
+
+#define IGMP_MINLEN 8
+
+#define IGMP_MAX_HOST_REPORT_DELAY 10 /* max delay for response to */
+ /* query (in seconds) */
+
+#define IGMP_TIMER_SCALE 10 /* denotes that the igmphdr->timer field */
+ /* specifies time in 10th of seconds */
+
+#define IGMP_AGE_THRESHOLD 540 /* If this host don't hear any IGMP V1 */
+ /* message in this period of time, */
+ /* revert to IGMP v2 router. */
+
+#define IGMP_ALL_HOSTS htonl(0xE0000001L)
+#define IGMP_ALL_ROUTER htonl(0xE0000002L)
+#define IGMP_LOCAL_GROUP htonl(0xE0000000L)
+#define IGMP_LOCAL_GROUP_MASK htonl(0xFFFFFF00L)
+
+/*
+ * struct for keeping the multicast list in
+ */
+
+#ifdef __KERNEL__
+struct ip_mc_socklist
+{
+ unsigned long multiaddr[IP_MAX_MEMBERSHIPS]; /* This is a speed trade off */
+ struct device *multidev[IP_MAX_MEMBERSHIPS];
+};
+
+struct ip_mc_list
+{
+ struct device *interface;
+ unsigned long multiaddr;
+ struct ip_mc_list *next;
+ struct timer_list timer;
+ short tm_running;
+ short reporter;
+ int users;
+};
+
+struct ip_router_info
+{
+ struct device *dev;
+ int type; /* type of router which is querier on this interface */
+ int time; /* # of slow timeouts since last old query */
+ struct timer_list timer;
+ struct ip_router_info *next;
+};
+
+extern struct ip_mc_list *ip_mc_head;
+
+
+extern int igmp_rcv(struct sk_buff *, struct device *, struct options *, __u32, unsigned short,
+ __u32, int , struct inet_protocol *);
+extern void ip_mc_drop_device(struct device *dev);
+extern int ip_mc_join_group(struct sock *sk, struct device *dev, unsigned long addr);
+extern int ip_mc_leave_group(struct sock *sk, struct device *dev,unsigned long addr);
+extern void ip_mc_drop_socket(struct sock *sk);
+extern void ip_mr_init(void);
+#endif
+#endif
diff --git a/linux/src/include/linux/in.h b/linux/src/include/linux/in.h
new file mode 100644
index 0000000..b2a44f5
--- /dev/null
+++ b/linux/src/include/linux/in.h
@@ -0,0 +1,149 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions of the Internet Protocol.
+ *
+ * Version: @(#)in.h 1.0.1 04/21/93
+ *
+ * Authors: Original taken from the GNU Project <netinet/in.h> file.
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_IN_H
+#define _LINUX_IN_H
+
+#include <linux/types.h>
+
+/* Standard well-defined IP protocols. */
+enum {
+ IPPROTO_IP = 0, /* Dummy protocol for TCP */
+ IPPROTO_ICMP = 1, /* Internet Control Message Protocol */
+ IPPROTO_IGMP = 2, /* Internet Group Management Protocol */
+ IPPROTO_IPIP = 4, /* IPIP tunnels (older KA9Q tunnels use 94) */
+ IPPROTO_TCP = 6, /* Transmission Control Protocol */
+ IPPROTO_EGP = 8, /* Exterior Gateway Protocol */
+ IPPROTO_PUP = 12, /* PUP protocol */
+ IPPROTO_UDP = 17, /* User Datagram Protocol */
+ IPPROTO_IDP = 22, /* XNS IDP protocol */
+
+ IPPROTO_RAW = 255, /* Raw IP packets */
+ IPPROTO_MAX
+};
+
+
+/* Internet address. */
+struct in_addr {
+ __u32 s_addr;
+};
+
+/* Request struct for multicast socket ops */
+
+struct ip_mreq
+{
+ struct in_addr imr_multiaddr; /* IP multicast address of group */
+ struct in_addr imr_interface; /* local IP address of interface */
+};
+
+
+/* Structure describing an Internet (IP) socket address. */
+#define __SOCK_SIZE__ 16 /* sizeof(struct sockaddr) */
+struct sockaddr_in {
+ short int sin_family; /* Address family */
+ unsigned short int sin_port; /* Port number */
+ struct in_addr sin_addr; /* Internet address */
+
+ /* Pad to size of `struct sockaddr'. */
+ unsigned char __pad[__SOCK_SIZE__ - sizeof(short int) -
+ sizeof(unsigned short int) - sizeof(struct in_addr)];
+};
+#define sin_zero __pad /* for BSD UNIX comp. -FvK */
+
+
+/*
+ * Definitions of the bits in an Internet address integer.
+ * On subnets, host and network parts are found according
+ * to the subnet mask, not these masks.
+ */
+#define IN_CLASSA(a) ((((long int) (a)) & 0x80000000) == 0)
+#define IN_CLASSA_NET 0xff000000
+#define IN_CLASSA_NSHIFT 24
+#define IN_CLASSA_HOST (0xffffffff & ~IN_CLASSA_NET)
+#define IN_CLASSA_MAX 128
+
+#define IN_CLASSB(a) ((((long int) (a)) & 0xc0000000) == 0x80000000)
+#define IN_CLASSB_NET 0xffff0000
+#define IN_CLASSB_NSHIFT 16
+#define IN_CLASSB_HOST (0xffffffff & ~IN_CLASSB_NET)
+#define IN_CLASSB_MAX 65536
+
+#define IN_CLASSC(a) ((((long int) (a)) & 0xe0000000) == 0xc0000000)
+#define IN_CLASSC_NET 0xffffff00
+#define IN_CLASSC_NSHIFT 8
+#define IN_CLASSC_HOST (0xffffffff & ~IN_CLASSC_NET)
+
+#define IN_CLASSD(a) ((((long int) (a)) & 0xf0000000) == 0xe0000000)
+#define IN_MULTICAST(a) IN_CLASSD(a)
+#define IN_MULTICAST_NET 0xF0000000
+
+#define IN_EXPERIMENTAL(a) ((((long int) (a)) & 0xe0000000) == 0xe0000000)
+#define IN_BADCLASS(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000)
+
+/* Address to accept any incoming messages. */
+#define INADDR_ANY ((unsigned long int) 0x00000000)
+
+/* Address to send to all hosts. */
+#define INADDR_BROADCAST ((unsigned long int) 0xffffffff)
+
+/* Address indicating an error return. */
+#define INADDR_NONE ((unsigned long int) 0xffffffff)
+
+/* Network number for local host loopback. */
+#define IN_LOOPBACKNET 127
+
+/* Address to loopback in software to local host. */
+#define INADDR_LOOPBACK 0x7f000001 /* 127.0.0.1 */
+#define IN_LOOPBACK(a) ((((long int) (a)) & 0xff000000) == 0x7f000000)
+
+/* Defines for Multicast INADDR */
+#define INADDR_UNSPEC_GROUP 0xe0000000 /* 224.0.0.0 */
+#define INADDR_ALLHOSTS_GROUP 0xe0000001 /* 224.0.0.1 */
+#define INADDR_MAX_LOCAL_GROUP 0xe00000ff /* 224.0.0.255 */
+
+/* <asm/byteorder.h> contains the htonl type stuff.. */
+
+#include <asm/byteorder.h>
+
+/* Some random defines to make it easier in the kernel.. */
+#ifdef __KERNEL__
+
+#define LOOPBACK(x) (((x) & htonl(0xff000000)) == htonl(0x7f000000))
+#define MULTICAST(x) (((x) & htonl(0xf0000000)) == htonl(0xe0000000))
+
+#endif
+
+/*
+ * IPv6 definitions as we start to include them. This is just
+ * a beginning -- don't get excited 8)
+ */
+
+struct in_addr6
+{
+ unsigned char s6_addr[16];
+};
+
+struct sockaddr_in6
+{
+ unsigned short sin6_family;
+ unsigned short sin6_port;
+ unsigned long sin6_flowinfo;
+ struct in_addr6 sin6_addr;
+};
+
+
+#endif /* _LINUX_IN_H */
diff --git a/linux/src/include/linux/inet.h b/linux/src/include/linux/inet.h
new file mode 100644
index 0000000..0d0fbd6
--- /dev/null
+++ b/linux/src/include/linux/inet.h
@@ -0,0 +1,52 @@
+/*
+ * Swansea University Computer Society NET3
+ *
+ * This work is derived from NET2Debugged, which is in turn derived
+ * from NET2D which was written by:
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This work was derived from Ross Biro's inspirational work
+ * for the LINUX operating system. His version numbers were:
+ *
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ * $Id: inet.h,v 1.1 1999/04/26 05:56:34 tb Exp $
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_INET_H
+#define _LINUX_INET_H
+
+#ifdef __KERNEL__
+
+extern void inet_proto_init(struct net_proto *pro);
+extern char *in_ntoa(unsigned long in);
+extern unsigned long in_aton(const char *str);
+
+#endif
+#endif /* _LINUX_INET_H */
diff --git a/linux/src/include/linux/init.h b/linux/src/include/linux/init.h
new file mode 100644
index 0000000..d4798b2
--- /dev/null
+++ b/linux/src/include/linux/init.h
@@ -0,0 +1,30 @@
+#ifndef _COMPAT_INIT_H
+#define _COMPAT_INIT_H
+
+#include <linux/compiler.h>
+
+#ifdef MODULE
+#define __exitused
+#else
+#define __exitused __used
+#endif
+
+#define __init
+#define __initdata
+#define __exit __exitused __cold notrace
+#define __exitdata
+#define __devinit
+#define __devinitdata
+#define __devexit
+#define __devexitdata
+
+#ifndef module_init
+#define module_init(x)
+#define module_exit(x)
+#endif
+
+#ifndef __devexit_p
+#define __devexit_p(x) (x)
+#endif
+
+#endif /* _COMPAT_INIT_H */
diff --git a/linux/src/include/linux/interrupt.h b/linux/src/include/linux/interrupt.h
new file mode 100644
index 0000000..0224475
--- /dev/null
+++ b/linux/src/include/linux/interrupt.h
@@ -0,0 +1,120 @@
+/* interrupt.h */
+#ifndef _LINUX_INTERRUPT_H
+#define _LINUX_INTERRUPT_H
+
+#include <linux/kernel.h>
+#include <asm/bitops.h>
+
+struct irqaction {
+ void (*handler)(int, void *, struct pt_regs *);
+ unsigned long flags;
+ unsigned long mask;
+ const char *name;
+ void *dev_id;
+ struct irqaction *next;
+};
+
+extern unsigned int intr_count;
+
+extern int bh_mask_count[32];
+extern unsigned int bh_active;
+extern unsigned int bh_mask;
+extern void (*bh_base[32])(void);
+
+asmlinkage void do_bottom_half(void);
+
+/* Who gets which entry in bh_base. Things which will occur most often
+ should come first - in which case NET should be up the top with SERIAL/TQUEUE! */
+
+enum {
+ TIMER_BH = 0,
+ CONSOLE_BH,
+ TQUEUE_BH,
+ DIGI_BH,
+ SERIAL_BH,
+ RISCOM8_BH,
+ SPECIALIX_BH,
+ BAYCOM_BH,
+ NET_BH,
+ IMMEDIATE_BH,
+ KEYBOARD_BH,
+ CYCLADES_BH,
+ CM206_BH,
+ ISICOM_BH
+};
+
+static inline void init_bh(int nr, void (*routine)(void))
+{
+ bh_base[nr] = routine;
+ bh_mask_count[nr] = 0;
+ bh_mask |= 1 << nr;
+}
+
+static inline void mark_bh(int nr)
+{
+ set_bit(nr, &bh_active);
+}
+
+/*
+ * These use a mask count to correctly handle
+ * nested disable/enable calls
+ */
+static inline void disable_bh(int nr)
+{
+ bh_mask &= ~(1 << nr);
+ bh_mask_count[nr]++;
+}
+
+static inline void enable_bh(int nr)
+{
+ if (!--bh_mask_count[nr])
+ bh_mask |= 1 << nr;
+}
+
+/*
+ * start_bh_atomic/end_bh_atomic also nest
+ * naturally by using a counter
+ */
+static inline void start_bh_atomic(void)
+{
+ intr_count++;
+ barrier();
+}
+
+static inline void end_bh_atomic(void)
+{
+ barrier();
+ intr_count--;
+}
+
+/*
+ * Autoprobing for irqs:
+ *
+ * probe_irq_on() and probe_irq_off() provide robust primitives
+ * for accurate IRQ probing during kernel initialization. They are
+ * reasonably simple to use, are not "fooled" by spurious interrupts,
+ * and, unlike other attempts at IRQ probing, they do not get hung on
+ * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
+ *
+ * For reasonably foolproof probing, use them as follows:
+ *
+ * 1. clear and/or mask the device's internal interrupt.
+ * 2. sti();
+ * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
+ * 4. enable the device and cause it to trigger an interrupt.
+ * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
+ * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
+ * 7. service the device to clear its pending interrupt.
+ * 8. loop again if paranoia is required.
+ *
+ * probe_irq_on() returns a mask of allocated irq's.
+ *
+ * probe_irq_off() takes the mask as a parameter,
+ * and returns the irq number which occurred,
+ * or zero if none occurred, or a negative irq number
+ * if more than one irq occurred.
+ */
+extern unsigned long probe_irq_on(void); /* returns 0 on failure */
+extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
+
+#endif
diff --git a/linux/src/include/linux/ioctl.h b/linux/src/include/linux/ioctl.h
new file mode 100644
index 0000000..aa91eb3
--- /dev/null
+++ b/linux/src/include/linux/ioctl.h
@@ -0,0 +1,7 @@
+#ifndef _LINUX_IOCTL_H
+#define _LINUX_IOCTL_H
+
+#include <asm/ioctl.h>
+
+#endif /* _LINUX_IOCTL_H */
+
diff --git a/linux/src/include/linux/ioport.h b/linux/src/include/linux/ioport.h
new file mode 100644
index 0000000..293b468
--- /dev/null
+++ b/linux/src/include/linux/ioport.h
@@ -0,0 +1,31 @@
+/*
+ * portio.h Definitions of routines for detecting, reserving and
+ * allocating system resources.
+ *
+ * Version: 0.01 8/30/93
+ *
+ * Author: Donald Becker (becker@super.org)
+ */
+
+#ifndef _LINUX_PORTIO_H
+#define _LINUX_PORTIO_H
+
+#define HAVE_PORTRESERVE
+/*
+ * Call check_region() before probing for your hardware.
+ * Once you have found you hardware, register it with request_region().
+ * If you unload the driver, use release_region to free ports.
+ */
+extern void reserve_setup(char *str, int *ints);
+extern int check_region(unsigned int from, unsigned int extent);
+extern void request_region(unsigned int from, unsigned int extent,const char *name);
+extern void release_region(unsigned int from, unsigned int extent);
+extern int get_ioport_list(char *);
+
+
+#define HAVE_AUTOIRQ
+extern void *irq2dev_map[]; /* Use only if you own the IRQ. */
+extern int autoirq_setup(int waittime);
+extern int autoirq_report(int waittime);
+
+#endif /* _LINUX_PORTIO_H */
diff --git a/linux/src/include/linux/ip.h b/linux/src/include/linux/ip.h
new file mode 100644
index 0000000..6bbe740
--- /dev/null
+++ b/linux/src/include/linux/ip.h
@@ -0,0 +1,112 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the IP protocol.
+ *
+ * Version: @(#)ip.h 1.0.2 04/28/93
+ *
+ * Authors: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_IP_H
+#define _LINUX_IP_H
+#include <asm/byteorder.h>
+
+#define IPOPT_END 0
+#define IPOPT_NOOP 1
+#define IPOPT_SEC 130
+#define IPOPT_LSRR 131
+#define IPOPT_SSRR 137
+#define IPOPT_RR 7
+#define IPOPT_SID 136
+#define IPOPT_TIMESTAMP 68
+
+
+#define MAXTTL 255
+
+struct timestamp {
+ __u8 len;
+ __u8 ptr;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 flags:4,
+ overflow:4;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u8 overflow:4,
+ flags:4;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __u32 data[9];
+};
+
+
+#define MAX_ROUTE 16
+
+struct route {
+ char route_size;
+ char pointer;
+ unsigned long route[MAX_ROUTE];
+};
+
+#define IPOPT_OPTVAL 0
+#define IPOPT_OLEN 1
+#define IPOPT_OFFSET 2
+#define IPOPT_MINOFF 4
+#define MAX_IPOPTLEN 40
+#define IPOPT_NOP IPOPT_NOOP
+#define IPOPT_EOL IPOPT_END
+#define IPOPT_TS IPOPT_TIMESTAMP
+
+#define IPOPT_TS_TSONLY 0 /* timestamps only */
+#define IPOPT_TS_TSANDADDR 1 /* timestamps and addresses */
+#define IPOPT_TS_PRESPEC 3 /* specified modules only */
+
+struct options {
+ __u32 faddr; /* Saved first hop address */
+ unsigned char optlen;
+ unsigned char srr;
+ unsigned char rr;
+ unsigned char ts;
+ unsigned char is_setbyuser:1, /* Set by setsockopt? */
+ is_data:1, /* Options in __data, rather than skb */
+ is_strictroute:1, /* Strict source route */
+ srr_is_hit:1, /* Packet destination addr was our one */
+ is_changed:1, /* IP checksum more not valid */
+ rr_needaddr:1, /* Need to record addr of outgoing dev */
+ ts_needtime:1, /* Need to record timestamp */
+ ts_needaddr:1; /* Need to record addr of outgoing dev */
+ unsigned char __pad1;
+ unsigned char __pad2;
+ unsigned char __pad3;
+ unsigned char __data[0];
+};
+
+struct iphdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 ihl:4,
+ version:4;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+ __u8 version:4,
+ ihl:4;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __u8 tos;
+ __u16 tot_len;
+ __u16 id;
+ __u16 frag_off;
+ __u8 ttl;
+ __u8 protocol;
+ __u16 check;
+ __u32 saddr;
+ __u32 daddr;
+ /*The options start here. */
+};
+
+#endif /* _LINUX_IP_H */
diff --git a/linux/src/include/linux/ipc.h b/linux/src/include/linux/ipc.h
new file mode 100644
index 0000000..3878e02
--- /dev/null
+++ b/linux/src/include/linux/ipc.h
@@ -0,0 +1,67 @@
+#ifndef _LINUX_IPC_H
+#define _LINUX_IPC_H
+#include <linux/types.h>
+
+typedef int key_t; /* should go in <types.h> type for IPC key */
+#define IPC_PRIVATE ((key_t) 0)
+
+struct ipc_perm
+{
+ key_t key;
+ ushort uid; /* owner euid and egid */
+ ushort gid;
+ ushort cuid; /* creator euid and egid */
+ ushort cgid;
+ ushort mode; /* access modes see mode flags below */
+ ushort seq; /* sequence number */
+};
+
+
+/* resource get request flags */
+#define IPC_CREAT 00001000 /* create if key is nonexistent */
+#define IPC_EXCL 00002000 /* fail if key exists */
+#define IPC_NOWAIT 00004000 /* return error on wait */
+
+
+/*
+ * Control commands used with semctl, msgctl and shmctl
+ * see also specific commands in sem.h, msg.h and shm.h
+ */
+#define IPC_RMID 0 /* remove resource */
+#define IPC_SET 1 /* set ipc_perm options */
+#define IPC_STAT 2 /* get ipc_perm options */
+#define IPC_INFO 3 /* see ipcs */
+
+#ifdef __KERNEL__
+
+/* special shmsegs[id], msgque[id] or semary[id] values */
+#define IPC_UNUSED ((void *) -1)
+#define IPC_NOID ((void *) -2) /* being allocated/destroyed */
+
+/*
+ * These are used to wrap system calls. See ipc/util.c.
+ */
+struct ipc_kludge {
+ struct msgbuf *msgp;
+ long msgtyp;
+};
+
+#define SEMOP 1
+#define SEMGET 2
+#define SEMCTL 3
+#define MSGSND 11
+#define MSGRCV 12
+#define MSGGET 13
+#define MSGCTL 14
+#define SHMAT 21
+#define SHMDT 22
+#define SHMGET 23
+#define SHMCTL 24
+
+#define IPCCALL(version,op) ((version)<<16 | (op))
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_IPC_H */
+
+
diff --git a/linux/src/include/linux/ipx.h b/linux/src/include/linux/ipx.h
new file mode 100644
index 0000000..6fb26f7
--- /dev/null
+++ b/linux/src/include/linux/ipx.h
@@ -0,0 +1,80 @@
+#ifndef _IPX_H_
+#define _IPX_H_
+#include <linux/sockios.h>
+#define IPX_NODE_LEN 6
+#define IPX_MTU 576
+
+struct sockaddr_ipx
+{
+ short sipx_family;
+ short sipx_port;
+ unsigned long sipx_network;
+ unsigned char sipx_node[IPX_NODE_LEN];
+ unsigned char sipx_type;
+ unsigned char sipx_zero; /* 16 byte fill */
+};
+
+/*
+ * So we can fit the extra info for SIOCSIFADDR into the address nicely
+ */
+
+#define sipx_special sipx_port
+#define sipx_action sipx_zero
+#define IPX_DLTITF 0
+#define IPX_CRTITF 1
+
+typedef struct ipx_route_definition
+{
+ unsigned long ipx_network;
+ unsigned long ipx_router_network;
+ unsigned char ipx_router_node[IPX_NODE_LEN];
+} ipx_route_definition;
+
+typedef struct ipx_interface_definition
+{
+ unsigned long ipx_network;
+ unsigned char ipx_device[16];
+ unsigned char ipx_dlink_type;
+#define IPX_FRAME_NONE 0
+#define IPX_FRAME_SNAP 1
+#define IPX_FRAME_8022 2
+#define IPX_FRAME_ETHERII 3
+#define IPX_FRAME_8023 4
+#define IPX_FRAME_TR_8022 5
+ unsigned char ipx_special;
+#define IPX_SPECIAL_NONE 0
+#define IPX_PRIMARY 1
+#define IPX_INTERNAL 2
+ unsigned char ipx_node[IPX_NODE_LEN];
+} ipx_interface_definition;
+
+typedef struct ipx_config_data
+{
+ unsigned char ipxcfg_auto_select_primary;
+ unsigned char ipxcfg_auto_create_interfaces;
+} ipx_config_data;
+
+/*
+ * OLD Route Definition for backward compatibility.
+ */
+
+struct ipx_route_def
+{
+ unsigned long ipx_network;
+ unsigned long ipx_router_network;
+#define IPX_ROUTE_NO_ROUTER 0
+ unsigned char ipx_router_node[IPX_NODE_LEN];
+ unsigned char ipx_device[16];
+ unsigned short ipx_flags;
+#define IPX_RT_SNAP 8
+#define IPX_RT_8022 4
+#define IPX_RT_BLUEBOOK 2
+#define IPX_RT_ROUTED 1
+};
+
+#define SIOCAIPXITFCRT (SIOCPROTOPRIVATE)
+#define SIOCAIPXPRISLT (SIOCPROTOPRIVATE+1)
+#define SIOCIPXCFGDATA (SIOCPROTOPRIVATE+2)
+#define SIOCIPXNCPCONN (SIOCPROTOPRIVATE+3)
+#endif
+
diff --git a/linux/src/include/linux/kcomp.h b/linux/src/include/linux/kcomp.h
new file mode 100644
index 0000000..5e06d7e
--- /dev/null
+++ b/linux/src/include/linux/kcomp.h
@@ -0,0 +1,52 @@
+/*
+ * Kernel compatibility glue to allow USB compile on 2.2.x kernels
+ */
+
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/netdevice.h>
+#include <linux/pagemap.h>
+
+#define pci_enable_device(x) 0
+
+#define page_address(x) (x | PAGE_OFFSET)
+
+#define TTY_DRIVER_NO_DEVFS 0
+
+#define net_device device
+#define dev_kfree_skb_irq(a) dev_kfree_skb(a, FREE_WRITE)
+#define netif_wake_queue(dev) do { clear_bit(0, &dev->tbusy); mark_bh(NET_BH); } while(0)
+#define netif_stop_queue(dev) test_and_set_bit(0, &dev->tbusy)
+#define netif_start_queue(dev) do { dev->tbusy = 0; dev->interrupt = 0; dev->start = 1; } while (0)
+#define netif_queue_stopped(dev) dev->tbusy
+#define netif_running(dev) dev->start
+
+/* hot-(un)plugging stuff */
+static inline int netif_device_present(struct net_device *dev)
+{
+ return test_bit(0, &dev->start);
+}
+
+static inline void netif_device_detach(struct net_device *dev)
+{
+ if ( test_and_clear_bit(0, &dev->start) )
+ netif_stop_queue(dev);
+}
+
+static inline void netif_device_attach(struct net_device *dev)
+{
+ if ( !test_and_set_bit(0, &dev->start) )
+ netif_wake_queue(dev);
+}
+
+#define NET_XMIT_SUCCESS 0
+#define NET_XMIT_DROP 1
+#define NET_XMIT_CN 2
+
+#define IORESOURCE_IO 1
+#define pci_resource_start(dev,bar) \
+(((dev)->base_address[(bar)] & PCI_BASE_ADDRESS_SPACE) ? \
+ ((dev)->base_address[(bar)] & PCI_BASE_ADDRESS_IO_MASK) : \
+ ((dev)->base_address[(bar)] & PCI_BASE_ADDRESS_MEM_MASK))
+#define pci_resource_flags(dev, i) (dev->base_address[i] & IORESOURCE_IO)
+
diff --git a/linux/src/include/linux/kdev_t.h b/linux/src/include/linux/kdev_t.h
new file mode 100644
index 0000000..0497ea8
--- /dev/null
+++ b/linux/src/include/linux/kdev_t.h
@@ -0,0 +1,114 @@
+#ifndef _LINUX_KDEV_T_H
+#define _LINUX_KDEV_T_H
+#ifdef __KERNEL__
+/*
+As a preparation for the introduction of larger device numbers,
+we introduce a type kdev_t to hold them. No information about
+this type is known outside of this include file.
+
+Objects of type kdev_t designate a device. Outside of the kernel
+the corresponding things are objects of type dev_t - usually an
+integral type with the device major and minor in the high and low
+bits, respectively. Conversion is done by
+
+extern kdev_t to_kdev_t(int);
+
+It is up to the various file systems to decide how objects of type
+dev_t are stored on disk.
+The only other point of contact between kernel and outside world
+are the system calls stat and mknod, new versions of which will
+eventually have to be used in libc.
+
+[Unfortunately, the floppy control ioctls fail to hide the internal
+kernel structures, and the fd_device field of a struct floppy_drive_struct
+is user-visible. So, it remains a dev_t for the moment, with some ugly
+conversions in floppy.c.]
+
+Inside the kernel, we aim for a kdev_t type that is a pointer
+to a structure with information about the device (like major,
+minor, size, blocksize, sectorsize, name, read-only flag,
+struct file_operations etc.).
+
+However, for the time being we let kdev_t be almost the same as dev_t:
+
+typedef struct { unsigned short major, minor; } kdev_t;
+
+Admissible operations on an object of type kdev_t:
+- passing it along
+- comparing it for equality with another such object
+- storing it in ROOT_DEV, inode->i_dev, inode->i_rdev, sb->s_dev,
+ bh->b_dev, req->rq_dev, de->dc_dev, tty->device
+- using its bit pattern as argument in a hash function
+- finding its major and minor
+- complaining about it
+
+An object of type kdev_t is created only by the function MKDEV(),
+with the single exception of the constant 0 (no device).
+
+Right now the other information mentioned above is usually found
+in static arrays indexed by major or major,minor.
+
+An obstacle to immediately using
+ typedef struct { ... (* lots of information *) } *kdev_t
+is the case of mknod used to create a block device that the
+kernel doesn't know about at present (but first learns about
+when some module is inserted).
+
+aeb - 950811
+*/
+
+/* Since MINOR(dev) is used as index in static arrays,
+ the kernel is not quite ready yet for larger minors.
+ However, everything runs fine with an arbitrary kdev_t type. */
+
+#define MINORBITS 8
+#define MINORMASK ((1<<MINORBITS) - 1)
+
+typedef unsigned short kdev_t;
+
+#define MAJOR(dev) ((dev) >> MINORBITS)
+#define MINOR(dev) ((dev) & MINORMASK)
+#define HASHDEV(dev) (dev)
+#define NODEV 0
+#define MKDEV(ma,mi) (((ma) << MINORBITS) | (mi))
+#define B_FREE 0xffff /* yuk */
+
+extern char * kdevname(kdev_t); /* note: returns pointer to static data! */
+
+/*
+As long as device numbers in the outside world have 16 bits only,
+we use these conversions.
+*/
+
+static inline unsigned int kdev_t_to_nr(kdev_t dev) {
+ return (MAJOR(dev)<<8) | MINOR(dev);
+}
+
+static inline kdev_t to_kdev_t(int dev)
+{
+ int major, minor;
+#if 0
+ major = (dev >> 16);
+ if (!major) {
+ major = (dev >> 8);
+ minor = (dev & 0xff);
+ } else
+ minor = (dev & 0xffff);
+#else
+ major = (dev >> 8);
+ minor = (dev & 0xff);
+#endif
+ return MKDEV(major, minor);
+}
+
+#else /* __KERNEL__ */
+
+/*
+Some programs want their definitions of MAJOR and MINOR and MKDEV
+from the kernel sources. These must be the externally visible ones.
+*/
+#define MAJOR(dev) ((dev)>>8)
+#define MINOR(dev) ((dev) & 0xff)
+#define MKDEV(ma,mi) ((ma)<<8 | (mi))
+#endif /* __KERNEL__ */
+#endif
diff --git a/linux/src/include/linux/kernel.h b/linux/src/include/linux/kernel.h
new file mode 100644
index 0000000..e05912b
--- /dev/null
+++ b/linux/src/include/linux/kernel.h
@@ -0,0 +1,97 @@
+#ifndef _LINUX_KERNEL_H
+#define _LINUX_KERNEL_H
+
+/*
+ * 'kernel.h' contains some often-used function prototypes etc
+ */
+
+#ifdef __KERNEL__
+
+#include <stdarg.h>
+#include <linux/linkage.h>
+
+/* Optimization barrier */
+#define barrier() __asm__("": : :"memory")
+
+#define INT_MAX ((int)(~0U>>1))
+#define UINT_MAX (~0U)
+#define LONG_MAX ((long)(~0UL>>1))
+#define ULONG_MAX (~0UL)
+
+#define STACK_MAGIC 0xdeadbeef
+
+#define KERN_EMERG "<0>" /* system is unusable */
+#define KERN_ALERT "<1>" /* action must be taken immediately */
+#define KERN_CRIT "<2>" /* critical conditions */
+#define KERN_ERR "<3>" /* error conditions */
+#define KERN_WARNING "<4>" /* warning conditions */
+#define KERN_NOTICE "<5>" /* normal but significant condition */
+#define KERN_INFO "<6>" /* informational */
+#define KERN_DEBUG "<7>" /* debug-level messages */
+
+# define NORET_TYPE /**/
+# define ATTRIB_NORET __attribute__((noreturn))
+# define NORET_AND noreturn,
+
+extern void math_error(void);
+NORET_TYPE void panic(const char * fmt, ...)
+ __attribute__ ((NORET_AND format (printf, 1, 2)));
+NORET_TYPE void do_exit(long error_code)
+ ATTRIB_NORET;
+extern unsigned long simple_strtoul(const char *,char **,unsigned int);
+extern int sprintf(char * buf, const char * fmt, ...);
+extern int vsprintf(char *buf, const char *, va_list);
+
+extern int session_of_pgrp(int pgrp);
+
+extern int kill_proc(int pid, int sig, int priv);
+extern int kill_pg(int pgrp, int sig, int priv);
+extern int kill_sl(int sess, int sig, int priv);
+
+asmlinkage int printk(const char * fmt, ...)
+ __attribute__ ((format (printf, 1, 2)));
+
+#if DEBUG
+#define pr_debug(fmt,arg...) \
+ printk(KERN_DEBUG fmt,##arg)
+#else
+#define pr_debug(fmt,arg...) \
+ do { } while (0)
+#endif
+
+#define pr_info(fmt,arg...) \
+ printk(KERN_INFO fmt,##arg)
+
+/*
+ * "suser()" checks against the effective user id, while "fsuser()"
+ * is used for file permission checking and checks against the fsuid..
+ */
+#define fsuser() (current->fsuid == 0)
+
+/*
+ * Display an IP address in readable format.
+ */
+
+#define NIPQUAD(addr) \
+ (((addr) >> 0) & 0xff), \
+ (((addr) >> 8) & 0xff), \
+ (((addr) >> 16) & 0xff), \
+ (((addr) >> 24) & 0xff)
+
+#endif /* __KERNEL__ */
+
+#define SI_LOAD_SHIFT 16
+struct sysinfo {
+ long uptime; /* Seconds since boot */
+ unsigned long loads[3]; /* 1, 5, and 15 minute load averages */
+ unsigned long totalram; /* Total usable main memory size */
+ unsigned long freeram; /* Available memory size */
+ unsigned long sharedram; /* Amount of shared memory */
+ unsigned long bufferram; /* Memory used by buffers */
+ unsigned long totalswap; /* Total swap space size */
+ unsigned long freeswap; /* swap space still available */
+ unsigned short procs; /* Number of current processes */
+ char _f[22]; /* Pads structure to 64 bytes */
+};
+
+#endif
diff --git a/linux/src/include/linux/kernel_stat.h b/linux/src/include/linux/kernel_stat.h
new file mode 100644
index 0000000..1966490
--- /dev/null
+++ b/linux/src/include/linux/kernel_stat.h
@@ -0,0 +1,32 @@
+#ifndef _LINUX_KERNEL_STAT_H
+#define _LINUX_KERNEL_STAT_H
+
+#include <asm/irq.h>
+
+/*
+ * 'kernel_stat.h' contains the definitions needed for doing
+ * some kernel statistics (cpu usage, context switches ...),
+ * used by rstatd/perfmeter
+ */
+
+#define DK_NDRIVE 4
+
+struct kernel_stat {
+ unsigned int cpu_user, cpu_nice, cpu_system;
+ unsigned int dk_drive[DK_NDRIVE];
+ unsigned int dk_drive_rio[DK_NDRIVE];
+ unsigned int dk_drive_wio[DK_NDRIVE];
+ unsigned int dk_drive_rblk[DK_NDRIVE];
+ unsigned int dk_drive_wblk[DK_NDRIVE];
+ unsigned int pgpgin, pgpgout;
+ unsigned int pswpin, pswpout;
+ unsigned int interrupts[NR_IRQS];
+ unsigned int ipackets, opackets;
+ unsigned int ierrors, oerrors;
+ unsigned int collisions;
+ unsigned int context_swtch;
+};
+
+extern struct kernel_stat kstat;
+
+#endif /* _LINUX_KERNEL_STAT_H */
diff --git a/linux/src/include/linux/limits.h b/linux/src/include/linux/limits.h
new file mode 100644
index 0000000..d0f300c
--- /dev/null
+++ b/linux/src/include/linux/limits.h
@@ -0,0 +1,17 @@
+#ifndef _LINUX_LIMITS_H
+#define _LINUX_LIMITS_H
+
+#define NR_OPEN 256
+
+#define NGROUPS_MAX 32 /* supplemental group IDs are available */
+#define ARG_MAX 131072 /* # bytes of args + environ for exec() */
+#define CHILD_MAX 999 /* no limit :-) */
+#define OPEN_MAX 256 /* # open files a process may have */
+#define LINK_MAX 127 /* # links a file may have */
+#define MAX_CANON 255 /* size of the canonical input queue */
+#define MAX_INPUT 255 /* size of the type-ahead buffer */
+#define NAME_MAX 255 /* # chars in a file name */
+#define PATH_MAX 1024 /* # chars in a path name */
+#define PIPE_BUF 4096 /* # bytes in atomic write to a pipe */
+
+#endif
diff --git a/linux/src/include/linux/linkage.h b/linux/src/include/linux/linkage.h
new file mode 100644
index 0000000..c8a7a49
--- /dev/null
+++ b/linux/src/include/linux/linkage.h
@@ -0,0 +1,59 @@
+#ifndef _LINUX_LINKAGE_H
+#define _LINUX_LINKAGE_H
+
+#ifdef __cplusplus
+#define asmlinkage extern "C"
+#else
+#define asmlinkage
+#endif
+
+#ifdef __ELF__
+#define SYMBOL_NAME_STR(X) #X
+#define SYMBOL_NAME(X) X
+#ifdef __STDC__
+#define SYMBOL_NAME_LABEL(X) X##:
+#else
+#define SYMBOL_NAME_LABEL(X) X/**/:
+#endif
+#else
+#define SYMBOL_NAME_STR(X) "_"#X
+#ifdef __STDC__
+#define SYMBOL_NAME(X) _##X
+#define SYMBOL_NAME_LABEL(X) _##X##:
+#else
+#define SYMBOL_NAME(X) _/**/X
+#define SYMBOL_NAME_LABEL(X) _/**/X/**/:
+#endif
+#endif
+
+#if !defined(__i486__) && !defined(__i586__)
+#ifdef __ELF__
+#define __ALIGN .align 4,0x90
+#define __ALIGN_STR ".align 4,0x90"
+#else /* __ELF__ */
+#define __ALIGN .align 2,0x90
+#define __ALIGN_STR ".align 2,0x90"
+#endif /* __ELF__ */
+#else /* __i486__/__i586__ */
+#ifdef __ELF__
+#define __ALIGN .align 16,0x90
+#define __ALIGN_STR ".align 16,0x90"
+#else /* __ELF__ */
+#define __ALIGN .align 4,0x90
+#define __ALIGN_STR ".align 4,0x90"
+#endif /* __ELF__ */
+#endif /* __i486__/__i586__ */
+
+#ifdef __ASSEMBLY__
+
+#define ALIGN __ALIGN
+#define ALIGN_STRING __ALIGN_STRING
+
+#define ENTRY(name) \
+ .globl SYMBOL_NAME(name); \
+ ALIGN; \
+ SYMBOL_NAME_LABEL(name)
+
+#endif
+
+#endif
diff --git a/linux/src/include/linux/list.h b/linux/src/include/linux/list.h
new file mode 100644
index 0000000..27a6ff4
--- /dev/null
+++ b/linux/src/include/linux/list.h
@@ -0,0 +1,112 @@
+#ifndef _LINUX_LIST_H
+#define _LINUX_LIST_H
+
+#ifdef __KERNEL__
+
+/*
+ * Simple doubly linked list implementation.
+ *
+ * Some of the internal functions ("__xxx") are useful when
+ * manipulating whole lists rather than single entries, as
+ * sometimes we already know the next/prev entries and we can
+ * generate better code by using them directly rather than
+ * using the generic single-entry routines.
+ */
+
+struct list_head {
+ struct list_head *next, *prev;
+};
+
+#define LIST_HEAD_INIT(name) { &(name), &(name) }
+
+#define LIST_HEAD(name) \
+ struct list_head name = { &name, &name }
+
+#define INIT_LIST_HEAD(ptr) do { \
+ (ptr)->next = (ptr); (ptr)->prev = (ptr); \
+} while (0)
+
+/*
+ * Insert a new entry between two known consecutive entries.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static __inline__ void __list_add(struct list_head * new,
+ struct list_head * prev,
+ struct list_head * next)
+{
+ next->prev = new;
+ new->next = next;
+ new->prev = prev;
+ prev->next = new;
+}
+
+/*
+ * Insert a new entry after the specified head..
+ */
+static __inline__ void list_add(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head, head->next);
+}
+
+/*
+ * Insert a new entry at the tail
+ */
+static __inline__ void list_add_tail(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head->prev, head);
+}
+
+/*
+ * Delete a list entry by making the prev/next entries
+ * point to each other.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static __inline__ void __list_del(struct list_head * prev,
+ struct list_head * next)
+{
+ next->prev = prev;
+ prev->next = next;
+}
+
+static __inline__ void list_del(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+}
+
+static __inline__ int list_empty(struct list_head *head)
+{
+ return head->next == head;
+}
+
+/*
+ * Splice in "list" into "head"
+ */
+static __inline__ void list_splice(struct list_head *list, struct list_head *head)
+{
+ struct list_head *first = list->next;
+
+ if (first != list) {
+ struct list_head *last = list->prev;
+ struct list_head *at = head->next;
+
+ first->prev = head;
+ head->next = first;
+
+ last->next = at;
+ at->prev = last;
+ }
+}
+
+#define list_entry(ptr, type, member) \
+ ((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
+
+#define list_for_each(pos, head) \
+ for (pos = (head)->next; pos != (head); pos = pos->next)
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/linux/src/include/linux/locks.h b/linux/src/include/linux/locks.h
new file mode 100644
index 0000000..9735bc6
--- /dev/null
+++ b/linux/src/include/linux/locks.h
@@ -0,0 +1,65 @@
+#ifndef _LINUX_LOCKS_H
+#define _LINUX_LOCKS_H
+
+#ifndef _LINUX_MM_H
+#include <linux/mm.h>
+#endif
+#ifndef _LINUX_PAGEMAP_H
+#include <linux/pagemap.h>
+#endif
+
+/*
+ * Unlocked, temporary IO buffer_heads gets moved to the reuse_list
+ * once their page becomes unlocked.
+ */
+extern struct buffer_head *reuse_list;
+
+/*
+ * Buffer cache locking - note that interrupts may only unlock, not
+ * lock buffers.
+ */
+extern void __wait_on_buffer(struct buffer_head *);
+
+extern inline void wait_on_buffer(struct buffer_head * bh)
+{
+ if (test_bit(BH_Lock, &bh->b_state))
+ __wait_on_buffer(bh);
+}
+
+extern inline void lock_buffer(struct buffer_head * bh)
+{
+ while (set_bit(BH_Lock, &bh->b_state))
+ __wait_on_buffer(bh);
+}
+
+void unlock_buffer(struct buffer_head *);
+
+
+/*
+ * super-block locking. Again, interrupts may only unlock
+ * a super-block (although even this isn't done right now.
+ * nfs may need it).
+ */
+extern void __wait_on_super(struct super_block *);
+
+extern inline void wait_on_super(struct super_block * sb)
+{
+ if (sb->s_lock)
+ __wait_on_super(sb);
+}
+
+extern inline void lock_super(struct super_block * sb)
+{
+ if (sb->s_lock)
+ __wait_on_super(sb);
+ sb->s_lock = 1;
+}
+
+extern inline void unlock_super(struct super_block * sb)
+{
+ sb->s_lock = 0;
+ wake_up(&sb->s_wait);
+}
+
+#endif /* _LINUX_LOCKS_H */
+
diff --git a/linux/src/include/linux/major.h b/linux/src/include/linux/major.h
new file mode 100644
index 0000000..97d9926
--- /dev/null
+++ b/linux/src/include/linux/major.h
@@ -0,0 +1,88 @@
+#ifndef _LINUX_MAJOR_H
+#define _LINUX_MAJOR_H
+
+/*
+ * This file has definitions for major device numbers.
+ * For the device number assignments, see Documentation/devices.txt.
+ */
+
+/* limits */
+
+#define MAX_CHRDEV 128
+#define MAX_BLKDEV 128
+
+#define UNNAMED_MAJOR 0
+#define MEM_MAJOR 1
+#define RAMDISK_MAJOR 1
+#define FLOPPY_MAJOR 2
+#define PTY_MASTER_MAJOR 2
+#define IDE0_MAJOR 3
+#define PTY_SLAVE_MAJOR 3
+#define HD_MAJOR IDE0_MAJOR
+#define TTY_MAJOR 4
+#define TTYAUX_MAJOR 5
+#define LP_MAJOR 6
+#define VCS_MAJOR 7
+#define LOOP_MAJOR 7
+#define SCSI_DISK_MAJOR 8
+#define SCSI_TAPE_MAJOR 9
+#define MD_MAJOR 9
+#define MISC_MAJOR 10
+#define SCSI_CDROM_MAJOR 11
+#define QIC02_TAPE_MAJOR 12
+#define XT_DISK_MAJOR 13
+#define SOUND_MAJOR 14
+#define CDU31A_CDROM_MAJOR 15
+#define JOYSTICK_MAJOR 15
+#define GOLDSTAR_CDROM_MAJOR 16
+#define OPTICS_CDROM_MAJOR 17
+#define SANYO_CDROM_MAJOR 18
+#define CYCLADES_MAJOR 19
+#define CYCLADESAUX_MAJOR 20
+#define MITSUMI_X_CDROM_MAJOR 20
+#define SCSI_GENERIC_MAJOR 21
+#define Z8530_MAJOR 34
+#define DIGI_MAJOR 23
+#define IDE1_MAJOR 22
+#define DIGICU_MAJOR 22
+#define MITSUMI_CDROM_MAJOR 23
+#define CDU535_CDROM_MAJOR 24
+#define STL_SERIALMAJOR 24
+#define MATSUSHITA_CDROM_MAJOR 25
+#define STL_CALLOUTMAJOR 25
+#define MATSUSHITA_CDROM2_MAJOR 26
+#define QIC117_TAPE_MAJOR 27
+#define MATSUSHITA_CDROM3_MAJOR 27
+#define MATSUSHITA_CDROM4_MAJOR 28
+#define STL_SIOMEMMAJOR 28
+#define ACSI_MAJOR 28
+#define AZTECH_CDROM_MAJOR 29
+#define GRAPHDEV_MAJOR 29 /* SparcLinux & Linux/68k /dev/fb */
+#define CM206_CDROM_MAJOR 32
+#define IDE2_MAJOR 33
+#define IDE3_MAJOR 34
+#define NETLINK_MAJOR 36
+#define IDETAPE_MAJOR 37
+#define Z2RAM_MAJOR 37
+#define RISCOM8_NORMAL_MAJOR 48
+#define RISCOM8_CALLOUT_MAJOR 49
+#define MKISS_MAJOR 55
+#define APBLOCK_MAJOR 60 /* AP1000 Block device */
+#define DDV_MAJOR 61 /* AP1000 DDV block device */
+
+#define SPECIALIX_NORMAL_MAJOR 75
+#define SPECIALIX_CALLOUT_MAJOR 76
+
+/*
+ * Tests for SCSI devices.
+ */
+
+#define SCSI_BLK_MAJOR(M) \
+ ((M) == SCSI_DISK_MAJOR \
+ || (M) == SCSI_CDROM_MAJOR)
+
+static __inline__ int scsi_blk_major(int m) {
+ return SCSI_BLK_MAJOR(m);
+}
+
+#endif
diff --git a/linux/src/include/linux/malloc.h b/linux/src/include/linux/malloc.h
new file mode 100644
index 0000000..0ef0857
--- /dev/null
+++ b/linux/src/include/linux/malloc.h
@@ -0,0 +1,11 @@
+#ifndef _LINUX_MALLOC_H
+#define _LINUX_MALLOC_H
+
+#include <linux/mm.h>
+
+void * kmalloc(unsigned int size, int priority);
+void kfree(void * obj);
+
+#define kfree_s(a,b) kfree(a)
+
+#endif /* _LINUX_MALLOC_H */
diff --git a/linux/src/include/linux/mc146818rtc.h b/linux/src/include/linux/mc146818rtc.h
new file mode 100644
index 0000000..0a2efb6
--- /dev/null
+++ b/linux/src/include/linux/mc146818rtc.h
@@ -0,0 +1,149 @@
+/* mc146818rtc.h - register definitions for the Real-Time-Clock / CMOS RAM
+ * Copyright Torsten Duwe <duwe@informatik.uni-erlangen.de> 1993
+ * derived from Data Sheet, Copyright Motorola 1984 (!).
+ * It was written to be part of the Linux operating system.
+ */
+/* permission is hereby granted to copy, modify and redistribute this code
+ * in terms of the GNU Library General Public License, Version 2 or later,
+ * at your option.
+ */
+
+#ifndef _MC146818RTC_H
+#define _MC146818RTC_H
+#include <asm/io.h>
+
+#ifndef RTC_PORT
+#define RTC_PORT(x) (0x70 + (x))
+#define RTC_ALWAYS_BCD 1
+#endif
+
+#define CMOS_READ(addr) ({ \
+outb_p((addr),RTC_PORT(0)); \
+inb_p(RTC_PORT(1)); \
+})
+#define CMOS_WRITE(val, addr) ({ \
+outb_p((addr),RTC_PORT(0)); \
+outb_p((val),RTC_PORT(1)); \
+})
+
+/**********************************************************************
+ * register summary
+ **********************************************************************/
+#define RTC_SECONDS 0
+#define RTC_SECONDS_ALARM 1
+#define RTC_MINUTES 2
+#define RTC_MINUTES_ALARM 3
+#define RTC_HOURS 4
+#define RTC_HOURS_ALARM 5
+/* RTC_*_alarm is always true if 2 MSBs are set */
+# define RTC_ALARM_DONT_CARE 0xC0
+
+#define RTC_DAY_OF_WEEK 6
+#define RTC_DAY_OF_MONTH 7
+#define RTC_MONTH 8
+#define RTC_YEAR 9
+
+/* control registers - Moto names
+ */
+#define RTC_REG_A 10
+#define RTC_REG_B 11
+#define RTC_REG_C 12
+#define RTC_REG_D 13
+
+/**********************************************************************
+ * register details
+ **********************************************************************/
+#define RTC_FREQ_SELECT RTC_REG_A
+
+/* update-in-progress - set to "1" 244 microsecs before RTC goes off the bus,
+ * reset after update (may take 1.984ms @ 32768Hz RefClock) is complete,
+ * totalling to a max high interval of 2.228 ms.
+ */
+# define RTC_UIP 0x80
+# define RTC_DIV_CTL 0x70
+ /* divider control: refclock values 4.194 / 1.049 MHz / 32.768 kHz */
+# define RTC_REF_CLCK_4MHZ 0x00
+# define RTC_REF_CLCK_1MHZ 0x10
+# define RTC_REF_CLCK_32KHZ 0x20
+ /* 2 values for divider stage reset, others for "testing purposes only" */
+# define RTC_DIV_RESET1 0x60
+# define RTC_DIV_RESET2 0x70
+ /* Periodic intr. / Square wave rate select. 0=none, 1=32.8kHz,... 15=2Hz */
+# define RTC_RATE_SELECT 0x0F
+
+/**********************************************************************/
+#define RTC_CONTROL RTC_REG_B
+# define RTC_SET 0x80 /* disable updates for clock setting */
+# define RTC_PIE 0x40 /* periodic interrupt enable */
+# define RTC_AIE 0x20 /* alarm interrupt enable */
+# define RTC_UIE 0x10 /* update-finished interrupt enable */
+# define RTC_SQWE 0x08 /* enable square-wave output */
+# define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */
+# define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */
+# define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */
+
+/**********************************************************************/
+#define RTC_INTR_FLAGS RTC_REG_C
+/* caution - cleared by read */
+# define RTC_IRQF 0x80 /* any of the following 3 is active */
+# define RTC_PF 0x40
+# define RTC_AF 0x20
+# define RTC_UF 0x10
+
+/**********************************************************************/
+#define RTC_VALID RTC_REG_D
+# define RTC_VRT 0x80 /* valid RAM and time */
+/**********************************************************************/
+
+/* example: !(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY)
+ * determines if the following two #defines are needed
+ */
+#ifndef BCD_TO_BIN
+#define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10)
+#endif
+
+#ifndef BIN_TO_BCD
+#define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10)
+#endif
+
+/*
+ * The struct used to pass data via the following ioctl. Similar to the
+ * struct tm in <time.h>, but it needs to be here so that the kernel
+ * source is self contained, allowing cross-compiles, etc. etc.
+ */
+
+struct rtc_time {
+ int tm_sec;
+ int tm_min;
+ int tm_hour;
+ int tm_mday;
+ int tm_mon;
+ int tm_year;
+ int tm_wday;
+ int tm_yday;
+ int tm_isdst;
+};
+
+/*
+ * ioctl calls that are permitted to the /dev/rtc interface, if
+ * CONFIG_RTC was enabled.
+ */
+
+#define RTC_AIE_ON _IO('p', 0x01) /* Alarm int. enable on */
+#define RTC_AIE_OFF _IO('p', 0x02) /* ... off */
+#define RTC_UIE_ON _IO('p', 0x03) /* Update int. enable on */
+#define RTC_UIE_OFF _IO('p', 0x04) /* ... off */
+#define RTC_PIE_ON _IO('p', 0x05) /* Periodic int. enable on */
+#define RTC_PIE_OFF _IO('p', 0x06) /* ... off */
+
+#define RTC_ALM_SET _IOW('p', 0x07, struct rtc_time) /* Set alarm time */
+#define RTC_ALM_READ _IOR('p', 0x08, struct rtc_time) /* Read alarm time */
+#define RTC_RD_TIME _IOR('p', 0x09, struct rtc_time) /* Read RTC time */
+#define RTC_SET_TIME _IOW('p', 0x0a, struct rtc_time) /* Set RTC time */
+#define RTC_IRQP_READ _IOR('p', 0x0b, unsigned long) /* Read IRQ rate */
+#define RTC_IRQP_SET _IOW('p', 0x0c, unsigned long) /* Set IRQ rate */
+#define RTC_EPOCH_READ _IOR('p', 0x0d, unsigned long) /* Read epoch */
+#define RTC_EPOCH_SET _IOW('p', 0x0e, unsigned long) /* Set epoch */
+
+
+#endif /* _MC146818RTC_H */
diff --git a/linux/src/include/linux/md.h b/linux/src/include/linux/md.h
new file mode 100644
index 0000000..413beb7
--- /dev/null
+++ b/linux/src/include/linux/md.h
@@ -0,0 +1,275 @@
+/*
+ md.h : Multiple Devices driver for Linux
+ Copyright (C) 1994-96 Marc ZYNGIER
+ <zyngier@ufr-info-p7.ibp.fr> or
+ <maz@gloups.fdn.fr>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ You should have received a copy of the GNU General Public License
+ (for example /usr/src/linux/COPYING); if not, write to the Free
+ Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef _MD_H
+#define _MD_H
+
+#include <asm/segment.h>
+#include <linux/major.h>
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/*
+ * Different major versions are not compatible.
+ * Different minor versions are only downward compatible.
+ * Different patchlevel versions are downward and upward compatible.
+ */
+#define MD_MAJOR_VERSION 0
+#define MD_MINOR_VERSION 36
+#define MD_PATCHLEVEL_VERSION 3
+
+/* ioctls */
+#define REGISTER_DEV _IO (MD_MAJOR, 1)
+#define START_MD _IO (MD_MAJOR, 2)
+#define STOP_MD _IO (MD_MAJOR, 3)
+
+/*
+ personalities :
+ Byte 0 : Chunk size factor
+ Byte 1 : Fault tolerance count for each physical device
+ ( 0 means no fault tolerance,
+ 0xFF means always tolerate faults), not used by now.
+ Byte 2 : Personality
+ Byte 3 : Reserved.
+ */
+
+#define FAULT_SHIFT 8
+#define PERSONALITY_SHIFT 16
+
+#define FACTOR_MASK 0x000000FFUL
+#define FAULT_MASK 0x0000FF00UL
+#define PERSONALITY_MASK 0x00FF0000UL
+
+#define MD_RESERVED 0 /* Not used by now */
+#define LINEAR (1UL << PERSONALITY_SHIFT)
+#define STRIPED (2UL << PERSONALITY_SHIFT)
+#define RAID0 STRIPED
+#define RAID1 (3UL << PERSONALITY_SHIFT)
+#define RAID5 (4UL << PERSONALITY_SHIFT)
+#define MAX_PERSONALITY 5
+
+/*
+ * MD superblock.
+ *
+ * The MD superblock maintains some statistics on each MD configuration.
+ * Each real device in the MD set contains it near the end of the device.
+ * Some of the ideas are copied from the ext2fs implementation.
+ *
+ * We currently use 4096 bytes as follows:
+ *
+ * word offset function
+ *
+ * 0 - 31 Constant generic MD device information.
+ * 32 - 63 Generic state information.
+ * 64 - 127 Personality specific information.
+ * 128 - 511 12 32-words descriptors of the disks in the raid set.
+ * 512 - 911 Reserved.
+ * 912 - 1023 Disk specific descriptor.
+ */
+
+/*
+ * If x is the real device size in bytes, we return an apparent size of:
+ *
+ * y = (x & ~(MD_RESERVED_BYTES - 1)) - MD_RESERVED_BYTES
+ *
+ * and place the 4kB superblock at offset y.
+ */
+#define MD_RESERVED_BYTES (64 * 1024)
+#define MD_RESERVED_SECTORS (MD_RESERVED_BYTES / 512)
+#define MD_RESERVED_BLOCKS (MD_RESERVED_BYTES / BLOCK_SIZE)
+
+#define MD_NEW_SIZE_SECTORS(x) ((x & ~(MD_RESERVED_SECTORS - 1)) - MD_RESERVED_SECTORS)
+#define MD_NEW_SIZE_BLOCKS(x) ((x & ~(MD_RESERVED_BLOCKS - 1)) - MD_RESERVED_BLOCKS)
+
+#define MD_SB_BYTES 4096
+#define MD_SB_WORDS (MD_SB_BYTES / 4)
+#define MD_SB_BLOCKS (MD_SB_BYTES / BLOCK_SIZE)
+#define MD_SB_SECTORS (MD_SB_BYTES / 512)
+
+/*
+ * The following are counted in 32-bit words
+ */
+#define MD_SB_GENERIC_OFFSET 0
+#define MD_SB_PERSONALITY_OFFSET 64
+#define MD_SB_DISKS_OFFSET 128
+#define MD_SB_DESCRIPTOR_OFFSET 992
+
+#define MD_SB_GENERIC_CONSTANT_WORDS 32
+#define MD_SB_GENERIC_STATE_WORDS 32
+#define MD_SB_GENERIC_WORDS (MD_SB_GENERIC_CONSTANT_WORDS + MD_SB_GENERIC_STATE_WORDS)
+#define MD_SB_PERSONALITY_WORDS 64
+#define MD_SB_DISKS_WORDS 384
+#define MD_SB_DESCRIPTOR_WORDS 32
+#define MD_SB_RESERVED_WORDS (1024 - MD_SB_GENERIC_WORDS - MD_SB_PERSONALITY_WORDS - MD_SB_DISKS_WORDS - MD_SB_DESCRIPTOR_WORDS)
+#define MD_SB_EQUAL_WORDS (MD_SB_GENERIC_WORDS + MD_SB_PERSONALITY_WORDS + MD_SB_DISKS_WORDS)
+#define MD_SB_DISKS (MD_SB_DISKS_WORDS / MD_SB_DESCRIPTOR_WORDS)
+
+/*
+ * Device "operational" state bits
+ */
+#define MD_FAULTY_DEVICE 0 /* Device is faulty / operational */
+#define MD_ACTIVE_DEVICE 1 /* Device is a part or the raid set / spare disk */
+#define MD_SYNC_DEVICE 2 /* Device is in sync with the raid set */
+
+typedef struct md_device_descriptor_s {
+ __u32 number; /* 0 Device number in the entire set */
+ __u32 major; /* 1 Device major number */
+ __u32 minor; /* 2 Device minor number */
+ __u32 raid_disk; /* 3 The role of the device in the raid set */
+ __u32 state; /* 4 Operational state */
+ __u32 reserved[MD_SB_DESCRIPTOR_WORDS - 5];
+} md_descriptor_t;
+
+#define MD_SB_MAGIC 0xa92b4efc
+
+/*
+ * Superblock state bits
+ */
+#define MD_SB_CLEAN 0
+#define MD_SB_ERRORS 1
+
+typedef struct md_superblock_s {
+
+ /*
+ * Constant generic information
+ */
+ __u32 md_magic; /* 0 MD identifier */
+ __u32 major_version; /* 1 major version to which the set conforms */
+ __u32 minor_version; /* 2 minor version to which the set conforms */
+ __u32 patch_version; /* 3 patchlevel version to which the set conforms */
+ __u32 gvalid_words; /* 4 Number of non-reserved words in this section */
+ __u32 set_magic; /* 5 Raid set identifier */
+ __u32 ctime; /* 6 Creation time */
+ __u32 level; /* 7 Raid personality (mirroring, raid5, ...) */
+ __u32 size; /* 8 Apparent size of each individual disk, in kB */
+ __u32 nr_disks; /* 9 Number of total disks in the raid set */
+ __u32 raid_disks; /* 10 Number of disks in a fully functional raid set */
+ __u32 gstate_creserved[MD_SB_GENERIC_CONSTANT_WORDS - 11];
+
+ /*
+ * Generic state information
+ */
+ __u32 utime; /* 0 Superblock update time */
+ __u32 state; /* 1 State bits (clean, ...) */
+ __u32 active_disks; /* 2 Number of currently active disks (some non-faulty disks might not be in sync) */
+ __u32 working_disks; /* 3 Number of working disks */
+ __u32 failed_disks; /* 4 Number of failed disks */
+ __u32 spare_disks; /* 5 Number of spare disks */
+ __u32 gstate_sreserved[MD_SB_GENERIC_STATE_WORDS - 6];
+
+ /*
+ * Personality information
+ */
+ __u32 parity_algorithm;
+ __u32 chunk_size;
+ __u32 pstate_reserved[MD_SB_PERSONALITY_WORDS - 2];
+
+ /*
+ * Disks information
+ */
+ md_descriptor_t disks[MD_SB_DISKS];
+
+ /*
+ * Reserved
+ */
+ __u32 reserved[MD_SB_RESERVED_WORDS];
+
+ /*
+ * Active descriptor
+ */
+ md_descriptor_t descriptor;
+} md_superblock_t;
+
+#ifdef __KERNEL__
+
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+
+#define MAX_REAL 8 /* Max number of physical dev per md dev */
+#define MAX_MD_DEV 4 /* Max number of md dev */
+#define MAX_MD_THREADS 2 /* Max number of kernel threads */
+
+#define FACTOR(a) ((a)->repartition & FACTOR_MASK)
+#define MAX_FAULT(a) (((a)->repartition & FAULT_MASK)>>8)
+#define PERSONALITY(a) ((a)->repartition & PERSONALITY_MASK)
+
+#define FACTOR_SHIFT(a) (PAGE_SHIFT + (a) - 10)
+
+struct real_dev
+{
+ kdev_t dev; /* Device number */
+ int size; /* Device size (in blocks) */
+ int offset; /* Real device offset (in blocks) in md dev
+ (only used in linear mode) */
+ struct inode *inode; /* Lock inode */
+ md_superblock_t *sb;
+ u32 sb_offset;
+};
+
+struct md_dev;
+
+struct md_personality
+{
+ char *name;
+ int (*map)(struct md_dev *md_dev, kdev_t *rdev,
+ unsigned long *rsector, unsigned long size);
+ int (*make_request)(struct md_dev *md_dev, int rw, struct buffer_head * bh);
+ void (*end_request)(struct buffer_head * bh, int uptodate);
+ int (*run)(int minor, struct md_dev *md_dev);
+ int (*stop)(int minor, struct md_dev *md_dev);
+ int (*status)(char *page, int minor, struct md_dev *md_dev);
+ int (*ioctl)(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg);
+ int max_invalid_dev;
+ int (*error_handler)(struct md_dev *md_dev, kdev_t dev);
+};
+
+struct md_dev
+{
+ struct real_dev devices[MAX_REAL];
+ struct md_personality *pers;
+ md_superblock_t *sb;
+ int sb_dirty;
+ int repartition;
+ int busy;
+ int nb_dev;
+ void *private;
+};
+
+struct md_thread {
+ void (*run) (void *data);
+ void *data;
+ struct wait_queue *wqueue;
+ __u32 flags;
+};
+
+#define THREAD_WAKEUP 0
+
+extern struct md_dev md_dev[MAX_MD_DEV];
+extern int md_size[MAX_MD_DEV];
+
+extern char *partition_name (kdev_t dev);
+
+extern int register_md_personality (int p_num, struct md_personality *p);
+extern int unregister_md_personality (int p_num);
+extern struct md_thread *md_register_thread (void (*run) (void *data), void *data);
+extern void md_unregister_thread (struct md_thread *thread);
+extern void md_wakeup_thread(struct md_thread *thread);
+extern int md_update_sb (int minor);
+
+#endif /* __KERNEL__ */
+#endif /* _MD_H */
diff --git a/linux/src/include/linux/mm.h b/linux/src/include/linux/mm.h
new file mode 100644
index 0000000..39522dd
--- /dev/null
+++ b/linux/src/include/linux/mm.h
@@ -0,0 +1,375 @@
+#ifndef _LINUX_MM_H
+#define _LINUX_MM_H
+
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+
+#ifdef __KERNEL__
+
+#include <linux/string.h>
+
+extern unsigned long high_memory;
+
+#include <asm/page.h>
+#include <asm/atomic.h>
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+extern int verify_area(int, const void *, unsigned long);
+
+/*
+ * Linux kernel virtual memory manager primitives.
+ * The idea being to have a "virtual" mm in the same way
+ * we have a virtual fs - giving a cleaner interface to the
+ * mm details, and allowing different kinds of memory mappings
+ * (from shared memory to executable loading to arbitrary
+ * mmap() functions).
+ */
+
+/*
+ * This struct defines a memory VMM memory area. There is one of these
+ * per VM-area/task. A VM area is any part of the process virtual memory
+ * space that has a special rule for the page-fault handlers (ie a shared
+ * library, the executable area etc).
+ */
+struct vm_area_struct {
+ struct mm_struct * vm_mm; /* VM area parameters */
+ unsigned long vm_start;
+ unsigned long vm_end;
+ pgprot_t vm_page_prot;
+ unsigned short vm_flags;
+/* AVL tree of VM areas per task, sorted by address */
+ short vm_avl_height;
+ struct vm_area_struct * vm_avl_left;
+ struct vm_area_struct * vm_avl_right;
+/* linked list of VM areas per task, sorted by address */
+ struct vm_area_struct * vm_next;
+/* for areas with inode, the circular list inode->i_mmap */
+/* for shm areas, the circular list of attaches */
+/* otherwise unused */
+ struct vm_area_struct * vm_next_share;
+ struct vm_area_struct * vm_prev_share;
+/* more */
+ struct vm_operations_struct * vm_ops;
+ unsigned long vm_offset;
+ struct inode * vm_inode;
+ unsigned long vm_pte; /* shared mem */
+};
+
+/*
+ * vm_flags..
+ */
+#define VM_READ 0x0001 /* currently active flags */
+#define VM_WRITE 0x0002
+#define VM_EXEC 0x0004
+#define VM_SHARED 0x0008
+
+#define VM_MAYREAD 0x0010 /* limits for mprotect() etc */
+#define VM_MAYWRITE 0x0020
+#define VM_MAYEXEC 0x0040
+#define VM_MAYSHARE 0x0080
+
+#define VM_GROWSDOWN 0x0100 /* general info on the segment */
+#define VM_GROWSUP 0x0200
+#define VM_SHM 0x0400 /* shared memory area, don't swap out */
+#define VM_DENYWRITE 0x0800 /* ETXTBSY on write attempts.. */
+
+#define VM_EXECUTABLE 0x1000
+#define VM_LOCKED 0x2000
+
+#define VM_STACK_FLAGS 0x0177
+
+/*
+ * mapping from the currently active vm_flags protection bits (the
+ * low four bits) to a page protection mask..
+ */
+extern pgprot_t protection_map[16];
+
+
+/*
+ * These are the virtual MM functions - opening of an area, closing and
+ * unmapping it (needed to keep files on disk up-to-date etc), pointer
+ * to the functions called when a no-page or a wp-page exception occurs.
+ */
+struct vm_operations_struct {
+ void (*open)(struct vm_area_struct * area);
+ void (*close)(struct vm_area_struct * area);
+ void (*unmap)(struct vm_area_struct *area, unsigned long, size_t);
+ void (*protect)(struct vm_area_struct *area, unsigned long, size_t, unsigned int newprot);
+ int (*sync)(struct vm_area_struct *area, unsigned long, size_t, unsigned int flags);
+ void (*advise)(struct vm_area_struct *area, unsigned long, size_t, unsigned int advise);
+ unsigned long (*nopage)(struct vm_area_struct * area, unsigned long address, int write_access);
+ unsigned long (*wppage)(struct vm_area_struct * area, unsigned long address,
+ unsigned long page);
+ int (*swapout)(struct vm_area_struct *, unsigned long, pte_t *);
+ pte_t (*swapin)(struct vm_area_struct *, unsigned long, unsigned long);
+};
+
+/*
+ * Try to keep the most commonly accessed fields in single cache lines
+ * here (16 bytes or greater). This ordering should be particularly
+ * beneficial on 32-bit processors.
+ *
+ * The first line is data used in page cache lookup, the second line
+ * is used for linear searches (eg. clock algorithm scans).
+ */
+typedef struct page {
+ /* these must be first (free area handling) */
+ struct page *next;
+ struct page *prev;
+ struct inode *inode;
+ unsigned long offset;
+ struct page *next_hash;
+ atomic_t count;
+ unsigned flags; /* atomic flags, some possibly updated asynchronously */
+ unsigned dirty:16,
+ age:8;
+ struct wait_queue *wait;
+ struct page *prev_hash;
+ struct buffer_head * buffers;
+ unsigned long swap_unlock_entry;
+ unsigned long map_nr; /* page->map_nr == page - mem_map */
+} mem_map_t;
+
+/* Page flag bit values */
+#define PG_locked 0
+#define PG_error 1
+#define PG_referenced 2
+#define PG_uptodate 3
+#define PG_free_after 4
+#define PG_decr_after 5
+#define PG_swap_unlock_after 6
+#define PG_DMA 7
+#define PG_reserved 31
+
+/* Make it prettier to test the above... */
+#define PageLocked(page) (test_bit(PG_locked, &(page)->flags))
+#define PageError(page) (test_bit(PG_error, &(page)->flags))
+#define PageReferenced(page) (test_bit(PG_referenced, &(page)->flags))
+#define PageDirty(page) (test_bit(PG_dirty, &(page)->flags))
+#define PageUptodate(page) (test_bit(PG_uptodate, &(page)->flags))
+#define PageFreeAfter(page) (test_bit(PG_free_after, &(page)->flags))
+#define PageDecrAfter(page) (test_bit(PG_decr_after, &(page)->flags))
+#define PageSwapUnlockAfter(page) (test_bit(PG_swap_unlock_after, &(page)->flags))
+#define PageDMA(page) (test_bit(PG_DMA, &(page)->flags))
+#define PageReserved(page) (test_bit(PG_reserved, &(page)->flags))
+
+/*
+ * page->reserved denotes a page which must never be accessed (which
+ * may not even be present).
+ *
+ * page->dma is set for those pages which lie in the range of
+ * physical addresses capable of carrying DMA transfers.
+ *
+ * Multiple processes may "see" the same page. E.g. for untouched
+ * mappings of /dev/null, all processes see the same page full of
+ * zeroes, and text pages of executables and shared libraries have
+ * only one copy in memory, at most, normally.
+ *
+ * For the non-reserved pages, page->count denotes a reference count.
+ * page->count == 0 means the page is free.
+ * page->count == 1 means the page is used for exactly one purpose
+ * (e.g. a private data page of one process).
+ *
+ * A page may be used for kmalloc() or anyone else who does a
+ * get_free_page(). In this case the page->count is at least 1, and
+ * all other fields are unused but should be 0 or NULL. The
+ * management of this page is the responsibility of the one who uses
+ * it.
+ *
+ * The other pages (we may call them "process pages") are completely
+ * managed by the Linux memory manager: I/O, buffers, swapping etc.
+ * The following discussion applies only to them.
+ *
+ * A page may belong to an inode's memory mapping. In this case,
+ * page->inode is the inode, and page->offset is the file offset
+ * of the page (not necessarily a multiple of PAGE_SIZE).
+ *
+ * A page may have buffers allocated to it. In this case,
+ * page->buffers is a circular list of these buffer heads. Else,
+ * page->buffers == NULL.
+ *
+ * For pages belonging to inodes, the page->count is the number of
+ * attaches, plus 1 if buffers are allocated to the page.
+ *
+ * All pages belonging to an inode make up a doubly linked list
+ * inode->i_pages, using the fields page->next and page->prev. (These
+ * fields are also used for freelist management when page->count==0.)
+ * There is also a hash table mapping (inode,offset) to the page
+ * in memory if present. The lists for this hash table use the fields
+ * page->next_hash and page->prev_hash.
+ *
+ * All process pages can do I/O:
+ * - inode pages may need to be read from disk,
+ * - inode pages which have been modified and are MAP_SHARED may need
+ * to be written to disk,
+ * - private pages which have been modified may need to be swapped out
+ * to swap space and (later) to be read back into memory.
+ * During disk I/O, page->locked is true. This bit is set before I/O
+ * and reset when I/O completes. page->wait is a wait queue of all
+ * tasks waiting for the I/O on this page to complete.
+ * page->uptodate tells whether the page's contents is valid.
+ * When a read completes, the page becomes uptodate, unless a disk I/O
+ * error happened.
+ * When a write completes, and page->free_after is true, the page is
+ * freed without any further delay.
+ *
+ * For choosing which pages to swap out, inode pages carry a
+ * page->referenced bit, which is set any time the system accesses
+ * that page through the (inode,offset) hash table.
+ * There is also the page->age counter, which implements a linear
+ * decay (why not an exponential decay?), see swapctl.h.
+ */
+
+extern mem_map_t * mem_map;
+
+/*
+ * This is timing-critical - most of the time in getting a new page
+ * goes to clearing the page. If you want a page without the clearing
+ * overhead, just use __get_free_page() directly..
+ */
+#define __get_free_page(priority) __get_free_pages((priority),0,0)
+#define __get_dma_pages(priority, order) __get_free_pages((priority),(order),1)
+extern unsigned long __get_free_pages(int priority, unsigned long gfporder, int dma);
+
+extern inline unsigned long get_free_page(int priority)
+{
+ unsigned long page;
+
+ page = __get_free_page(priority);
+ if (page)
+ memset((void *) page, 0, PAGE_SIZE);
+ return page;
+}
+
+/* memory.c & swap.c*/
+
+#define free_page(addr) free_pages((addr),0)
+extern void free_pages(unsigned long addr, unsigned long order);
+extern void __free_page(struct page *);
+
+extern void show_free_areas(void);
+extern unsigned long put_dirty_page(struct task_struct * tsk,unsigned long page,
+ unsigned long address);
+
+extern void free_page_tables(struct mm_struct * mm);
+extern void clear_page_tables(struct task_struct * tsk);
+extern int new_page_tables(struct task_struct * tsk);
+extern int copy_page_tables(struct task_struct * to);
+
+extern int zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size);
+extern int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma);
+extern int remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot);
+extern int zeromap_page_range(unsigned long from, unsigned long size, pgprot_t prot);
+
+extern void vmtruncate(struct inode * inode, unsigned long offset);
+extern void handle_mm_fault(struct vm_area_struct *vma, unsigned long address, int write_access);
+extern void do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access);
+extern void do_no_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access);
+
+extern unsigned long paging_init(unsigned long start_mem, unsigned long end_mem);
+extern void mem_init(unsigned long start_mem, unsigned long end_mem);
+extern void show_mem(void);
+extern void oom(struct task_struct * tsk);
+extern void si_meminfo(struct sysinfo * val);
+
+/* vmalloc.c */
+
+extern void * vmalloc(unsigned long size);
+extern void * vremap(unsigned long offset, unsigned long size);
+extern void vfree(void * addr);
+extern int vread(char *buf, char *addr, int count);
+
+/* mmap.c */
+extern unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags, unsigned long off);
+extern void merge_segments(struct mm_struct *, unsigned long, unsigned long);
+extern void insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
+extern void remove_shared_vm_struct(struct vm_area_struct *);
+extern void build_mmap_avl(struct mm_struct *);
+extern void exit_mmap(struct mm_struct *);
+extern int do_munmap(unsigned long, size_t);
+extern unsigned long get_unmapped_area(unsigned long, unsigned long);
+
+/* filemap.c */
+extern unsigned long page_unuse(unsigned long);
+extern int shrink_mmap(int, int, int);
+extern void truncate_inode_pages(struct inode *, unsigned long);
+
+#define GFP_BUFFER 0x00
+#define GFP_ATOMIC 0x01
+#define GFP_USER 0x02
+#define GFP_KERNEL 0x03
+#define GFP_NOBUFFER 0x04
+#define GFP_NFS 0x05
+#define GFP_IO 0x06
+
+/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
+ platforms, used as appropriate on others */
+
+#define GFP_DMA 0x80
+
+#define GFP_LEVEL_MASK 0xf
+
+/* vma is the first one with address < vma->vm_end,
+ * and even address < vma->vm_start. Have to extend vma. */
+static inline int expand_stack(struct vm_area_struct * vma, unsigned long address)
+{
+ unsigned long grow;
+
+ address &= PAGE_MASK;
+ grow = vma->vm_start - address;
+ if (vma->vm_end - address
+ > (unsigned long) current->rlim[RLIMIT_STACK].rlim_cur ||
+ (vma->vm_mm->total_vm << PAGE_SHIFT) + grow
+ > (unsigned long) current->rlim[RLIMIT_AS].rlim_cur)
+ return -ENOMEM;
+ vma->vm_start = address;
+ vma->vm_offset -= grow;
+ vma->vm_mm->total_vm += grow >> PAGE_SHIFT;
+ if (vma->vm_flags & VM_LOCKED)
+ vma->vm_mm->locked_vm += grow >> PAGE_SHIFT;
+ return 0;
+}
+
+#define avl_empty (struct vm_area_struct *) NULL
+
+/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
+static inline struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
+{
+ struct vm_area_struct * result = NULL;
+
+ if (mm) {
+ struct vm_area_struct * tree = mm->mmap_avl;
+ for (;;) {
+ if (tree == avl_empty)
+ break;
+ if (tree->vm_end > addr) {
+ result = tree;
+ if (tree->vm_start <= addr)
+ break;
+ tree = tree->vm_avl_left;
+ } else
+ tree = tree->vm_avl_right;
+ }
+ }
+ return result;
+}
+
+/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
+ NULL if none. Assume start_addr < end_addr. */
+static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
+{
+ struct vm_area_struct * vma;
+
+ vma = find_vma(mm,start_addr);
+ if (vma && end_addr <= vma->vm_start)
+ vma = NULL;
+ return vma;
+}
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/linux/src/include/linux/module.h b/linux/src/include/linux/module.h
new file mode 100644
index 0000000..acc2540
--- /dev/null
+++ b/linux/src/include/linux/module.h
@@ -0,0 +1,116 @@
+/*
+ * Dynamic loading of modules into the kernel.
+ *
+ * Modified by Bjorn Ekwall <bj0rn@blox.se>
+ */
+
+#ifndef _LINUX_MODULE_H
+#define _LINUX_MODULE_H
+
+#ifdef __GENKSYMS__
+# define _set_ver(sym,vers) sym
+# undef MODVERSIONS
+# define MODVERSIONS
+#else /* ! __GENKSYMS__ */
+# if defined(MODVERSIONS) && !defined(MODULE) && defined(EXPORT_SYMTAB)
+# define _set_ver(sym,vers) sym
+# include <linux/modversions.h>
+# endif
+#endif /* __GENKSYMS__ */
+
+/* values of module.state */
+#define MOD_UNINITIALIZED 0
+#define MOD_RUNNING 1
+#define MOD_DELETED 2
+
+/* maximum length of module name */
+#define MOD_MAX_NAME 64
+
+/* magic marker for modules inserted from kerneld, to be auto-reaped */
+#define MOD_AUTOCLEAN 0x40000000 /* big enough, but no sign problems... */
+#define MOD_VISITED 0x20000000 /* Thanks Jacques! */
+
+/* maximum length of symbol name */
+#define SYM_MAX_NAME 60
+
+struct kernel_sym { /* sent to "insmod" */
+ unsigned long value; /* value of symbol */
+ char name[SYM_MAX_NAME]; /* name of symbol */
+};
+
+struct module_ref {
+ struct module *module;
+ struct module_ref *next;
+};
+
+struct internal_symbol {
+ void *addr;
+ const char *name;
+};
+
+struct symbol_table { /* received from "insmod" */
+ int size; /* total, including string table!!! */
+ int n_symbols;
+ int n_refs;
+ struct internal_symbol symbol[0]; /* actual size defined by n_symbols */
+ struct module_ref ref[0]; /* actual size defined by n_refs */
+};
+/*
+ * Note: The string table follows immediately after the symbol table in memory!
+ */
+
+struct module {
+ struct module *next;
+ struct module_ref *ref; /* the list of modules that refer to me */
+ struct symbol_table *symtab;
+ const char *name;
+ int size; /* size of module in pages */
+ void *addr; /* address of module */
+ int state;
+ void (*cleanup)(void); /* cleanup routine */
+};
+
+struct mod_routines {
+ int (*init)(void); /* initialization routine */
+ void (*cleanup)(void); /* cleanup routine */
+};
+
+/*
+ * The first word of the module contains the use count.
+ */
+#define GET_USE_COUNT(module) (* (long *) (module)->addr)
+/*
+ * define the count variable, and usage macros.
+ */
+
+#ifdef MODULE
+
+extern long mod_use_count_;
+#define MOD_INC_USE_COUNT (mod_use_count_++, mod_use_count_ |= MOD_VISITED)
+#define MOD_DEC_USE_COUNT (mod_use_count_--, mod_use_count_ |= MOD_VISITED)
+#define MOD_IN_USE ((mod_use_count_ & ~(MOD_AUTOCLEAN | MOD_VISITED)) != 0)
+
+#ifndef __NO_VERSION__
+#include <linux/version.h>
+char kernel_version[]=UTS_RELEASE;
+#endif
+
+#if defined(MODVERSIONS) && !defined(__GENKSYMS__)
+int Using_Versions; /* gcc will handle this global (used as a flag) correctly */
+#endif
+
+#else
+
+#define EXPORT_SYMBOL(sym)
+
+#define MOD_INC_USE_COUNT do { } while (0)
+#define MOD_DEC_USE_COUNT do { } while (0)
+#define MOD_IN_USE 1
+#define SET_MODULE_OWNER(dev) do{ } while(0)
+
+#endif
+
+/* insert new symbol table */
+#define register_symtab(symtab)
+
+#endif
diff --git a/linux/src/include/linux/mount.h b/linux/src/include/linux/mount.h
new file mode 100644
index 0000000..a2f941b
--- /dev/null
+++ b/linux/src/include/linux/mount.h
@@ -0,0 +1,30 @@
+/*
+ *
+ * Definitions for mount interface. This describes the in the kernel build
+ * linkedlist with mounted filesystems.
+ *
+ * Author: Marco van Wieringen <mvw@mcs.ow.nl> <mvw@tnix.net> <mvw@cistron.nl>
+ *
+ * Version: $Id: mount.h,v 1.1 1999/04/26 05:56:47 tb Exp $
+ *
+ */
+#ifndef _LINUX_MOUNT_H
+#define _LINUX_MOUNT_H
+
+struct vfsmount
+{
+ kdev_t mnt_dev; /* Device this applies to */
+ char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */
+ char *mnt_dirname; /* Name of directory mounted on */
+ unsigned int mnt_flags; /* Flags of this device */
+ struct semaphore mnt_sem; /* lock device while I/O in progress */
+ struct super_block *mnt_sb; /* pointer to superblock */
+ struct file *mnt_quotas[MAXQUOTAS]; /* fp's to quotafiles */
+ time_t mnt_iexp[MAXQUOTAS]; /* expiretime for inodes */
+ time_t mnt_bexp[MAXQUOTAS]; /* expiretime for blocks */
+ struct vfsmount *mnt_next; /* pointer to next in linkedlist */
+};
+
+struct vfsmount *lookup_vfsmnt(kdev_t dev);
+
+#endif /* _LINUX_MOUNT_H */
diff --git a/linux/src/include/linux/net.h b/linux/src/include/linux/net.h
new file mode 100644
index 0000000..a338a8e
--- /dev/null
+++ b/linux/src/include/linux/net.h
@@ -0,0 +1,130 @@
+/*
+ * NET An implementation of the SOCKET network access protocol.
+ * This is the master header file for the Linux NET layer,
+ * or, in plain English: the networking handling part of the
+ * kernel.
+ *
+ * Version: @(#)net.h 1.0.3 05/25/93
+ *
+ * Authors: Orest Zborowski, <obz@Kodak.COM>
+ * Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_NET_H
+#define _LINUX_NET_H
+
+
+#include <linux/wait.h>
+#include <linux/socket.h>
+
+#define NPROTO 16 /* should be enough for now.. */
+
+
+#define SYS_SOCKET 1 /* sys_socket(2) */
+#define SYS_BIND 2 /* sys_bind(2) */
+#define SYS_CONNECT 3 /* sys_connect(2) */
+#define SYS_LISTEN 4 /* sys_listen(2) */
+#define SYS_ACCEPT 5 /* sys_accept(2) */
+#define SYS_GETSOCKNAME 6 /* sys_getsockname(2) */
+#define SYS_GETPEERNAME 7 /* sys_getpeername(2) */
+#define SYS_SOCKETPAIR 8 /* sys_socketpair(2) */
+#define SYS_SEND 9 /* sys_send(2) */
+#define SYS_RECV 10 /* sys_recv(2) */
+#define SYS_SENDTO 11 /* sys_sendto(2) */
+#define SYS_RECVFROM 12 /* sys_recvfrom(2) */
+#define SYS_SHUTDOWN 13 /* sys_shutdown(2) */
+#define SYS_SETSOCKOPT 14 /* sys_setsockopt(2) */
+#define SYS_GETSOCKOPT 15 /* sys_getsockopt(2) */
+#define SYS_SENDMSG 16 /* sys_sendmsg(2) */
+#define SYS_RECVMSG 17 /* sys_recvmsg(2) */
+
+
+typedef enum {
+ SS_FREE = 0, /* not allocated */
+ SS_UNCONNECTED, /* unconnected to any socket */
+ SS_CONNECTING, /* in process of connecting */
+ SS_CONNECTED, /* connected to socket */
+ SS_DISCONNECTING /* in process of disconnecting */
+} socket_state;
+
+#define SO_ACCEPTCON (1<<16) /* performed a listen */
+#define SO_WAITDATA (1<<17) /* wait data to read */
+#define SO_NOSPACE (1<<18) /* no space to write */
+
+#ifdef __KERNEL__
+/*
+ * Internal representation of a socket. not all the fields are used by
+ * all configurations:
+ *
+ * server client
+ * conn client connected to server connected to
+ * iconn list of clients -unused-
+ * awaiting connections
+ * wait sleep for clients, sleep for connection,
+ * sleep for i/o sleep for i/o
+ */
+struct socket {
+ short type; /* SOCK_STREAM, ... */
+ socket_state state;
+ long flags;
+ struct proto_ops *ops; /* protocols do most everything */
+ void *data; /* protocol data */
+ struct socket *conn; /* server socket connected to */
+ struct socket *iconn; /* incomplete client conn.s */
+ struct socket *next;
+ struct wait_queue **wait; /* ptr to place to wait on */
+ struct inode *inode;
+ struct fasync_struct *fasync_list; /* Asynchronous wake up list */
+ struct file *file; /* File back pointer for gc */
+};
+
+#define SOCK_INODE(S) ((S)->inode)
+
+struct proto_ops {
+ int family;
+
+ int (*create) (struct socket *sock, int protocol);
+ int (*dup) (struct socket *newsock, struct socket *oldsock);
+ int (*release) (struct socket *sock, struct socket *peer);
+ int (*bind) (struct socket *sock, struct sockaddr *umyaddr,
+ int sockaddr_len);
+ int (*connect) (struct socket *sock, struct sockaddr *uservaddr,
+ int sockaddr_len, int flags);
+ int (*socketpair) (struct socket *sock1, struct socket *sock2);
+ int (*accept) (struct socket *sock, struct socket *newsock,
+ int flags);
+ int (*getname) (struct socket *sock, struct sockaddr *uaddr,
+ int *usockaddr_len, int peer);
+ int (*select) (struct socket *sock, int sel_type,
+ select_table *wait);
+ int (*ioctl) (struct socket *sock, unsigned int cmd,
+ unsigned long arg);
+ int (*listen) (struct socket *sock, int len);
+ int (*shutdown) (struct socket *sock, int flags);
+ int (*setsockopt) (struct socket *sock, int level, int optname,
+ char *optval, int optlen);
+ int (*getsockopt) (struct socket *sock, int level, int optname,
+ char *optval, int *optlen);
+ int (*fcntl) (struct socket *sock, unsigned int cmd,
+ unsigned long arg);
+ int (*sendmsg) (struct socket *sock, struct msghdr *m, int total_len, int nonblock, int flags);
+ int (*recvmsg) (struct socket *sock, struct msghdr *m, int total_len, int nonblock, int flags, int *addr_len);
+};
+
+struct net_proto {
+ const char *name; /* Protocol name */
+ void (*init_func)(struct net_proto *); /* Bootstrap */
+};
+
+extern int sock_wake_async(struct socket *sock, int how);
+extern int sock_register(int family, struct proto_ops *ops);
+extern int sock_unregister(int family);
+extern struct socket *sock_alloc(void);
+extern void sock_release(struct socket *sock);
+#endif /* __KERNEL__ */
+#endif /* _LINUX_NET_H */
diff --git a/linux/src/include/linux/netdevice.h b/linux/src/include/linux/netdevice.h
new file mode 100644
index 0000000..5de278a
--- /dev/null
+++ b/linux/src/include/linux/netdevice.h
@@ -0,0 +1,313 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the Interfaces handler.
+ *
+ * Version: @(#)dev.h 1.0.11 07/31/96
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Corey Minyard <wf-rch!minyard@relay.EU.net>
+ * Donald J. Becker, <becker@super.org>
+ * Alan Cox, <A.Cox@swansea.ac.uk>
+ * Bjorn Ekwall. <bj0rn@blox.se>
+ * Lawrence V. Stefani, <stefani@lkg.dec.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Moved to /usr/include/linux for NET3
+ * Added extern for fddi_setup()
+ */
+#ifndef _LINUX_NETDEVICE_H
+#define _LINUX_NETDEVICE_H
+
+#include <linux/config.h>
+#include <linux/if.h>
+#include <linux/if_ether.h>
+
+/* for future expansion when we will have different priorities. */
+#define DEV_NUMBUFFS 3
+#define MAX_ADDR_LEN 7
+#ifndef CONFIG_AX25
+#ifndef CONFIG_AX25_MODULE
+#ifndef CONFIG_TR
+#if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE)
+#define MAX_HEADER 32 /* We really need about 18 worst case .. so 32 is aligned */
+#else
+#define MAX_HEADER 80 /* We need to allow for having tunnel headers */
+#endif /* IPIP */
+#else
+#define MAX_HEADER 48 /* Token Ring header needs 40 bytes ... 48 is aligned */
+#endif /* TR */
+#else
+#define MAX_HEADER 96 /* AX.25 + NET/ROM module*/
+#endif /* AX.25 module */
+#else
+#define MAX_HEADER 96 /* AX.25 + NET/ROM */
+#endif /* AX.25 */
+
+#define IS_MYADDR 1 /* address is (one of) our own */
+#define IS_LOOPBACK 2 /* address is for LOOPBACK */
+#define IS_BROADCAST 3 /* address is a valid broadcast */
+#define IS_INVBCAST 4 /* Wrong netmask bcast not for us (unused)*/
+#define IS_MULTICAST 5 /* Multicast IP address */
+
+#ifdef __KERNEL__
+
+#include <linux/skbuff.h>
+
+/*
+ * We tag multicasts with these structures.
+ */
+
+struct dev_mc_list
+{
+ struct dev_mc_list *next;
+ char dmi_addr[MAX_ADDR_LEN];
+ unsigned short dmi_addrlen;
+ unsigned short dmi_users;
+};
+
+struct hh_cache
+{
+ struct hh_cache *hh_next;
+ void *hh_arp; /* Opaque pointer, used by
+ * any address resolution module,
+ * not only ARP.
+ */
+ int hh_refcnt; /* number of users */
+ unsigned short hh_type; /* protocol identifier, f.e ETH_P_IP */
+ char hh_uptodate; /* hh_data is valid */
+ char hh_data[16]; /* cached hardware header */
+};
+
+/*
+ * The DEVICE structure.
+ * Actually, this whole structure is a big mistake. It mixes I/O
+ * data with strictly "high-level" data, and it has to know about
+ * almost every data structure used in the INET module.
+ */
+struct device
+{
+
+ /*
+ * This is the first field of the "visible" part of this structure
+ * (i.e. as seen by users in the "Space.c" file). It is the name
+ * the interface.
+ */
+ char *name;
+
+ /* I/O specific fields - FIXME: Merge these and struct ifmap into one */
+ unsigned long rmem_end; /* shmem "recv" end */
+ unsigned long rmem_start; /* shmem "recv" start */
+ unsigned long mem_end; /* shared mem end */
+ unsigned long mem_start; /* shared mem start */
+ unsigned long base_addr; /* device I/O address */
+ unsigned char irq; /* device IRQ number */
+
+ /* Low-level status flags. */
+ volatile unsigned char start, /* start an operation */
+ interrupt; /* interrupt arrived */
+ unsigned long tbusy; /* transmitter busy must be long for bitops */
+
+ struct device *next;
+
+ /* The device initialization function. Called only once. */
+ int (*init)(struct device *dev);
+
+ /* Some hardware also needs these fields, but they are not part of the
+ usual set specified in Space.c. */
+ unsigned char if_port; /* Selectable AUI, TP,..*/
+ unsigned char dma; /* DMA channel */
+
+ struct enet_statistics* (*get_stats)(struct device *dev);
+
+ /*
+ * This marks the end of the "visible" part of the structure. All
+ * fields hereafter are internal to the system, and may change at
+ * will (read: may be cleaned up at will).
+ */
+
+ /* These may be needed for future network-power-down code. */
+ unsigned long trans_start; /* Time (in jiffies) of last Tx */
+ unsigned long last_rx; /* Time of last Rx */
+
+ unsigned short flags; /* interface flags (a la BSD) */
+ unsigned short family; /* address family ID (AF_INET) */
+ unsigned short metric; /* routing metric (not used) */
+ unsigned short mtu; /* interface MTU value */
+ unsigned short type; /* interface hardware type */
+ unsigned short hard_header_len; /* hardware hdr length */
+ void *priv; /* pointer to private data */
+
+ /* Interface address info. */
+ unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
+ unsigned char pad; /* make dev_addr aligned to 8 bytes */
+ unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address */
+ unsigned char addr_len; /* hardware address length */
+ unsigned long pa_addr; /* protocol address */
+ unsigned long pa_brdaddr; /* protocol broadcast addr */
+ unsigned long pa_dstaddr; /* protocol P-P other side addr */
+ unsigned long pa_mask; /* protocol netmask */
+ unsigned short pa_alen; /* protocol address length */
+
+ struct dev_mc_list *mc_list; /* Multicast mac addresses */
+ int mc_count; /* Number of installed mcasts */
+
+ struct ip_mc_list *ip_mc_list; /* IP multicast filter chain */
+ __u32 tx_queue_len; /* Max frames per queue allowed */
+
+ /* For load balancing driver pair support */
+
+ unsigned long pkt_queue; /* Packets queued */
+ struct device *slave; /* Slave device */
+ struct net_alias_info *alias_info; /* main dev alias info */
+ struct net_alias *my_alias; /* alias devs */
+
+ /* Pointer to the interface buffers. */
+ struct sk_buff_head buffs[DEV_NUMBUFFS];
+
+ /* Pointers to interface service routines. */
+ int (*open)(struct device *dev);
+ int (*stop)(struct device *dev);
+ int (*hard_start_xmit) (struct sk_buff *skb,
+ struct device *dev);
+ int (*hard_header) (struct sk_buff *skb,
+ struct device *dev,
+ unsigned short type,
+ void *daddr,
+ void *saddr,
+ unsigned len);
+ int (*rebuild_header)(void *eth, struct device *dev,
+ unsigned long raddr, struct sk_buff *skb);
+#define HAVE_MULTICAST
+ void (*set_multicast_list)(struct device *dev);
+#define HAVE_SET_MAC_ADDR
+ int (*set_mac_address)(struct device *dev, void *addr);
+#define HAVE_PRIVATE_IOCTL
+ int (*do_ioctl)(struct device *dev, struct ifreq *ifr, int cmd);
+#define HAVE_SET_CONFIG
+ int (*set_config)(struct device *dev, struct ifmap *map);
+#define HAVE_HEADER_CACHE
+ void (*header_cache_bind)(struct hh_cache **hhp, struct device *dev, unsigned short htype, __u32 daddr);
+ void (*header_cache_update)(struct hh_cache *hh, struct device *dev, unsigned char * haddr);
+#define HAVE_CHANGE_MTU
+ int (*change_mtu)(struct device *dev, int new_mtu);
+
+ struct iw_statistics* (*get_wireless_stats)(struct device *dev);
+};
+
+
+struct packet_type {
+ unsigned short type; /* This is really htons(ether_type). */
+ struct device * dev;
+ int (*func) (struct sk_buff *, struct device *,
+ struct packet_type *);
+ void *data;
+ struct packet_type *next;
+};
+
+
+#include <linux/interrupt.h>
+#include <linux/notifier.h>
+
+/* Used by dev_rint */
+#define IN_SKBUFF 1
+
+extern volatile unsigned long in_bh;
+
+extern struct device loopback_dev;
+extern struct device *dev_base;
+extern struct packet_type *ptype_base[16];
+
+
+extern int ip_addr_match(unsigned long addr1, unsigned long addr2);
+extern int ip_chk_addr(unsigned long addr);
+extern struct device *ip_dev_bynet(unsigned long daddr, unsigned long mask);
+extern unsigned long ip_my_addr(void);
+extern unsigned long ip_get_mask(unsigned long addr);
+extern struct device *ip_dev_find(unsigned long addr);
+extern struct device *dev_getbytype(unsigned short type);
+
+extern void dev_add_pack(struct packet_type *pt);
+extern void dev_remove_pack(struct packet_type *pt);
+extern struct device *dev_get(const char *name);
+extern int dev_open(struct device *dev);
+extern int dev_close(struct device *dev);
+extern void dev_queue_xmit(struct sk_buff *skb, struct device *dev,
+ int pri);
+
+#define HAVE_NETIF_RX 1
+extern void netif_rx(struct sk_buff *skb);
+extern void net_bh(void);
+extern void dev_tint(struct device *dev);
+extern int dev_get_info(char *buffer, char **start, off_t offset, int length, int dummy);
+extern int dev_ioctl(unsigned int cmd, void *);
+
+extern void dev_init(void);
+
+/* Locking protection for page faults during outputs to devices unloaded during the fault */
+
+extern int dev_lockct;
+
+/*
+ * These two don't currently need to be interrupt-safe
+ * but they may do soon. Do it properly anyway.
+ */
+
+extern __inline__ void dev_lock_list(void)
+{
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ dev_lockct++;
+ restore_flags(flags);
+}
+
+extern __inline__ void dev_unlock_list(void)
+{
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ dev_lockct--;
+ restore_flags(flags);
+}
+
+/*
+ * This almost never occurs, isn't in performance critical paths
+ * and we can thus be relaxed about it
+ */
+
+extern __inline__ void dev_lock_wait(void)
+{
+ while(dev_lockct)
+ schedule();
+}
+
+
+/* These functions live elsewhere (drivers/net/net_init.c, but related) */
+
+extern void ether_setup(struct device *dev);
+extern void tr_setup(struct device *dev);
+extern void fddi_setup(struct device *dev);
+extern int ether_config(struct device *dev, struct ifmap *map);
+/* Support for loadable net-drivers */
+extern int register_netdev(struct device *dev);
+extern void unregister_netdev(struct device *dev);
+extern int register_netdevice_notifier(struct notifier_block *nb);
+extern int unregister_netdevice_notifier(struct notifier_block *nb);
+/* Functions used for multicast support */
+extern void dev_mc_upload(struct device *dev);
+extern void dev_mc_delete(struct device *dev, void *addr, int alen, int all);
+extern void dev_mc_add(struct device *dev, void *addr, int alen, int newonly);
+extern void dev_mc_discard(struct device *dev);
+/* This is the wrong place but it'll do for the moment */
+extern void ip_mc_allhost(struct device *dev);
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_DEV_H */
diff --git a/linux/src/include/linux/netrom.h b/linux/src/include/linux/netrom.h
new file mode 100644
index 0000000..6939b32
--- /dev/null
+++ b/linux/src/include/linux/netrom.h
@@ -0,0 +1,34 @@
+/*
+ * These are the public elements of the Linux kernel NET/ROM implementation.
+ * For kernel AX.25 see the file ax25.h. This file requires ax25.h for the
+ * definition of the ax25_address structure.
+ */
+
+#ifndef NETROM_KERNEL_H
+#define NETROM_KERNEL_H
+
+#define NETROM_MTU 236
+
+#define NETROM_T1 1
+#define NETROM_T2 2
+#define NETROM_N2 3
+#define NETROM_T4 6
+#define NETROM_IDLE 7
+
+#define SIOCNRDECOBS (SIOCPROTOPRIVATE+2)
+
+struct nr_route_struct {
+#define NETROM_NEIGH 0
+#define NETROM_NODE 1
+ int type;
+ ax25_address callsign;
+ char device[16];
+ unsigned int quality;
+ char mnemonic[7];
+ ax25_address neighbour;
+ unsigned int obs_count;
+ unsigned int ndigis;
+ ax25_address digipeaters[AX25_MAX_DIGIS];
+};
+
+#endif
diff --git a/linux/src/include/linux/notifier.h b/linux/src/include/linux/notifier.h
new file mode 100644
index 0000000..b3c9ccf
--- /dev/null
+++ b/linux/src/include/linux/notifier.h
@@ -0,0 +1,96 @@
+/*
+ * Routines to manage notifier chains for passing status changes to any
+ * interested routines. We need this instead of hard coded call lists so
+ * that modules can poke their nose into the innards. The network devices
+ * needed them so here they are for the rest of you.
+ *
+ * Alan Cox <Alan.Cox@linux.org>
+ */
+
+#ifndef _LINUX_NOTIFIER_H
+#define _LINUX_NOTIFIER_H
+#include <linux/errno.h>
+
+struct notifier_block
+{
+ int (*notifier_call)(struct notifier_block *this, unsigned long, void *);
+ struct notifier_block *next;
+ int priority;
+};
+
+
+#ifdef __KERNEL__
+
+#define NOTIFY_DONE 0x0000 /* Don't care */
+#define NOTIFY_OK 0x0001 /* Suits me */
+#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
+#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002) /* Bad/Veto action */
+
+extern __inline__ int notifier_chain_register(struct notifier_block **list, struct notifier_block *n)
+{
+ while(*list)
+ {
+ if(n->priority > (*list)->priority)
+ break;
+ list= &((*list)->next);
+ }
+ n->next = *list;
+ *list=n;
+ return 0;
+}
+
+/*
+ * Warning to any non GPL module writers out there.. these functions are
+ * GPL'd
+ */
+
+extern __inline__ int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n)
+{
+ while((*nl)!=NULL)
+ {
+ if((*nl)==n)
+ {
+ *nl=n->next;
+ return 0;
+ }
+ nl=&((*nl)->next);
+ }
+ return -ENOENT;
+}
+
+/*
+ * This is one of these things that is generally shorter inline
+ */
+
+extern __inline__ int notifier_call_chain(struct notifier_block **n, unsigned long val, void *v)
+{
+ int ret=NOTIFY_DONE;
+ struct notifier_block *nb = *n;
+ while(nb)
+ {
+ ret=nb->notifier_call(nb,val,v);
+ if(ret&NOTIFY_STOP_MASK)
+ return ret;
+ nb=nb->next;
+ }
+ return ret;
+}
+
+
+/*
+ * Declared notifiers so far. I can imagine quite a few more chains
+ * over time (eg laptop power reset chains, reboot chain (to clean
+ * device units up), device [un]mount chain, module load/unload chain,
+ * low memory chain, screenblank chain (for plug in modular screenblankers)
+ * VC switch chains (for loadable kernel svgalib VC switch helpers) etc...
+ */
+
+/* netdevice notifier chain */
+#define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
+#define NETDEV_DOWN 0x0002
+#define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
+ detected a hardware crash and restarted
+ - we can use this eg to kick tcp sessions
+ once done */
+#endif
+#endif
diff --git a/linux/src/include/linux/pagemap.h b/linux/src/include/linux/pagemap.h
new file mode 100644
index 0000000..ac85c78
--- /dev/null
+++ b/linux/src/include/linux/pagemap.h
@@ -0,0 +1,146 @@
+#ifndef _LINUX_PAGEMAP_H
+#define _LINUX_PAGEMAP_H
+
+#include <asm/system.h>
+
+/*
+ * Page-mapping primitive inline functions
+ *
+ * Copyright 1995 Linus Torvalds
+ */
+
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/swapctl.h>
+
+static inline unsigned long page_address(struct page * page)
+{
+ return PAGE_OFFSET + PAGE_SIZE * page->map_nr;
+}
+
+#define PAGE_HASH_BITS 11
+#define PAGE_HASH_SIZE (1 << PAGE_HASH_BITS)
+
+#define PAGE_AGE_VALUE ((PAGE_INITIAL_AGE)+(PAGE_ADVANCE))
+
+extern unsigned long page_cache_size; /* # of pages currently in the hash table */
+extern struct page * page_hash_table[PAGE_HASH_SIZE];
+
+/*
+ * We use a power-of-two hash table to avoid a modulus,
+ * and get a reasonable hash by knowing roughly how the
+ * inode pointer and offsets are distributed (ie, we
+ * roughly know which bits are "significant")
+ */
+static inline unsigned long _page_hashfn(struct inode * inode, unsigned long offset)
+{
+#define i (((unsigned long) inode)/(sizeof(struct inode) & ~ (sizeof(struct inode) - 1)))
+#define o (offset >> PAGE_SHIFT)
+#define s(x) ((x)+((x)>>PAGE_HASH_BITS))
+ return s(i+o) & (PAGE_HASH_SIZE-1);
+#undef i
+#undef o
+#undef s
+}
+
+#define page_hash(inode,offset) (page_hash_table+_page_hashfn(inode,offset))
+
+static inline struct page * __find_page(struct inode * inode, unsigned long offset, struct page *page)
+{
+ goto inside;
+ for (;;) {
+ page = page->next_hash;
+inside:
+ if (!page)
+ goto not_found;
+ if (page->inode != inode)
+ continue;
+ if (page->offset == offset)
+ break;
+ }
+ /* Found the page. */
+ atomic_inc(&page->count);
+ set_bit(PG_referenced, &page->flags);
+not_found:
+ return page;
+}
+
+static inline struct page *find_page(struct inode * inode, unsigned long offset)
+{
+ return __find_page(inode, offset, *page_hash(inode, offset));
+}
+
+static inline void remove_page_from_hash_queue(struct page * page)
+{
+ struct page **p;
+ struct page *next_hash, *prev_hash;
+
+ next_hash = page->next_hash;
+ prev_hash = page->prev_hash;
+ page->next_hash = NULL;
+ page->prev_hash = NULL;
+ if (next_hash)
+ next_hash->prev_hash = prev_hash;
+ if (prev_hash)
+ prev_hash->next_hash = next_hash;
+ p = page_hash(page->inode,page->offset);
+ if (*p == page)
+ *p = next_hash;
+ page_cache_size--;
+}
+
+static inline void __add_page_to_hash_queue(struct page * page, struct page **p)
+{
+ page_cache_size++;
+ set_bit(PG_referenced, &page->flags);
+ page->age = PAGE_AGE_VALUE;
+ page->prev_hash = NULL;
+ if ((page->next_hash = *p) != NULL)
+ page->next_hash->prev_hash = page;
+ *p = page;
+}
+
+static inline void add_page_to_hash_queue(struct page * page, struct inode * inode, unsigned long offset)
+{
+ __add_page_to_hash_queue(page, page_hash(inode,offset));
+}
+
+
+static inline void remove_page_from_inode_queue(struct page * page)
+{
+ struct inode * inode = page->inode;
+
+ page->inode = NULL;
+ inode->i_nrpages--;
+ if (inode->i_pages == page)
+ inode->i_pages = page->next;
+ if (page->next)
+ page->next->prev = page->prev;
+ if (page->prev)
+ page->prev->next = page->next;
+ page->next = NULL;
+ page->prev = NULL;
+}
+
+static inline void add_page_to_inode_queue(struct inode * inode, struct page * page)
+{
+ struct page **p = &inode->i_pages;
+
+ inode->i_nrpages++;
+ page->inode = inode;
+ page->prev = NULL;
+ if ((page->next = *p) != NULL)
+ page->next->prev = page;
+ *p = page;
+}
+
+extern void __wait_on_page(struct page *);
+static inline void wait_on_page(struct page * page)
+{
+ if (PageLocked(page))
+ __wait_on_page(page);
+}
+
+extern void update_vm_cache(struct inode *, unsigned long, const char *, int);
+
+#endif
diff --git a/linux/src/include/linux/param.h b/linux/src/include/linux/param.h
new file mode 100644
index 0000000..092e92f
--- /dev/null
+++ b/linux/src/include/linux/param.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_PARAM_H
+#define _LINUX_PARAM_H
+
+#include <asm/param.h>
+
+#endif
diff --git a/linux/src/include/linux/pci.h b/linux/src/include/linux/pci.h
new file mode 100644
index 0000000..8aad3d5
--- /dev/null
+++ b/linux/src/include/linux/pci.h
@@ -0,0 +1,1116 @@
+/*
+ * PCI defines and function prototypes
+ * Copyright 1994, Drew Eckhardt
+ *
+ * For more information, please consult
+ *
+ * PCI BIOS Specification Revision
+ * PCI Local Bus Specification
+ * PCI System Design Guide
+ *
+ * PCI Special Interest Group
+ * M/S HF3-15A
+ * 5200 N.E. Elam Young Parkway
+ * Hillsboro, Oregon 97124-6497
+ * +1 (503) 696-2000
+ * +1 (800) 433-5177
+ *
+ * Manuals are $25 each or $50 for all three, plus $7 shipping
+ * within the United States, $35 abroad.
+ */
+
+
+
+/* PROCEDURE TO REPORT NEW PCI DEVICES
+ * We are trying to collect information on new PCI devices, using
+ * the standard PCI identification procedure. If some warning is
+ * displayed at boot time, please report
+ * - /proc/pci
+ * - your exact hardware description. Try to find out
+ * which device is unknown. It may be you mainboard chipset.
+ * PCI-CPU bridge or PCI-ISA bridge.
+ * - If you can't find the actual information in your hardware
+ * booklet, try to read the references of the chip on the board.
+ * - Send all that to linux-pcisupport@cck.uni-kl.de
+ * and I'll add your device to the list as soon as possible
+ *
+ * BEFORE you send a mail, please check the latest linux releases
+ * to be sure it has not been recently added.
+ *
+ * Thanks
+ * Jens Maurer
+ */
+
+
+
+#ifndef LINUX_PCI_H
+#define LINUX_PCI_H
+
+/*
+ * Under PCI, each device has 256 bytes of configuration address space,
+ * of which the first 64 bytes are standardized as follows:
+ */
+#define PCI_VENDOR_ID 0x00 /* 16 bits */
+#define PCI_DEVICE_ID 0x02 /* 16 bits */
+#define PCI_COMMAND 0x04 /* 16 bits */
+#define PCI_COMMAND_IO 0x1 /* Enable response in I/O space */
+#define PCI_COMMAND_MEMORY 0x2 /* Enable response in Memory space */
+#define PCI_COMMAND_MASTER 0x4 /* Enable bus mastering */
+#define PCI_COMMAND_SPECIAL 0x8 /* Enable response to special cycles */
+#define PCI_COMMAND_INVALIDATE 0x10 /* Use memory write and invalidate */
+#define PCI_COMMAND_VGA_PALETTE 0x20 /* Enable palette snooping */
+#define PCI_COMMAND_PARITY 0x40 /* Enable parity checking */
+#define PCI_COMMAND_WAIT 0x80 /* Enable address/data stepping */
+#define PCI_COMMAND_SERR 0x100 /* Enable SERR */
+#define PCI_COMMAND_FAST_BACK 0x200 /* Enable back-to-back writes */
+
+#define PCI_STATUS 0x06 /* 16 bits */
+#define PCI_STATUS_66MHZ 0x20 /* Support 66 Mhz PCI 2.1 bus */
+#define PCI_STATUS_UDF 0x40 /* Support User Definable Features */
+
+#define PCI_STATUS_FAST_BACK 0x80 /* Accept fast-back to back */
+#define PCI_STATUS_PARITY 0x100 /* Detected parity error */
+#define PCI_STATUS_DEVSEL_MASK 0x600 /* DEVSEL timing */
+#define PCI_STATUS_DEVSEL_FAST 0x000
+#define PCI_STATUS_DEVSEL_MEDIUM 0x200
+#define PCI_STATUS_DEVSEL_SLOW 0x400
+#define PCI_STATUS_SIG_TARGET_ABORT 0x800 /* Set on target abort */
+#define PCI_STATUS_REC_TARGET_ABORT 0x1000 /* Master ack of " */
+#define PCI_STATUS_REC_MASTER_ABORT 0x2000 /* Set on master abort */
+#define PCI_STATUS_SIG_SYSTEM_ERROR 0x4000 /* Set when we drive SERR */
+#define PCI_STATUS_DETECTED_PARITY 0x8000 /* Set on parity error */
+
+#define PCI_CLASS_REVISION 0x08 /* High 24 bits are class, low 8
+ revision */
+#define PCI_REVISION_ID 0x08 /* Revision ID */
+#define PCI_CLASS_PROG 0x09 /* Reg. Level Programming Interface */
+#define PCI_CLASS_DEVICE 0x0a /* Device class */
+
+#define PCI_CACHE_LINE_SIZE 0x0c /* 8 bits */
+#define PCI_LATENCY_TIMER 0x0d /* 8 bits */
+#define PCI_HEADER_TYPE 0x0e /* 8 bits */
+#define PCI_BIST 0x0f /* 8 bits */
+#define PCI_BIST_CODE_MASK 0x0f /* Return result */
+#define PCI_BIST_START 0x40 /* 1 to start BIST, 2 secs or less */
+#define PCI_BIST_CAPABLE 0x80 /* 1 if BIST capable */
+
+/*
+ * Base addresses specify locations in memory or I/O space.
+ * Decoded size can be determined by writing a value of
+ * 0xffffffff to the register, and reading it back. Only
+ * 1 bits are decoded.
+ */
+#define PCI_BASE_ADDRESS_0 0x10 /* 32 bits */
+#define PCI_BASE_ADDRESS_1 0x14 /* 32 bits */
+#define PCI_BASE_ADDRESS_2 0x18 /* 32 bits */
+#define PCI_BASE_ADDRESS_3 0x1c /* 32 bits */
+#define PCI_BASE_ADDRESS_4 0x20 /* 32 bits */
+#define PCI_BASE_ADDRESS_5 0x24 /* 32 bits */
+#define PCI_BASE_ADDRESS_SPACE 0x01 /* 0 = memory, 1 = I/O */
+#define PCI_BASE_ADDRESS_SPACE_IO 0x01
+#define PCI_BASE_ADDRESS_SPACE_MEMORY 0x00
+#define PCI_BASE_ADDRESS_MEM_TYPE_MASK 0x06
+#define PCI_BASE_ADDRESS_MEM_TYPE_32 0x00 /* 32 bit address */
+#define PCI_BASE_ADDRESS_MEM_TYPE_1M 0x02 /* Below 1M */
+#define PCI_BASE_ADDRESS_MEM_TYPE_64 0x04 /* 64 bit address */
+#define PCI_BASE_ADDRESS_MEM_PREFETCH 0x08 /* prefetchable? */
+#define PCI_BASE_ADDRESS_MEM_MASK (~0x0f)
+#define PCI_BASE_ADDRESS_IO_MASK (~0x03)
+/* bit 1 is reserved if address_space = 1 */
+
+#define PCI_CARDBUS_CIS 0x28
+#define PCI_SUBSYSTEM_VENDOR_ID 0x2c
+#define PCI_SUBSYSTEM_ID 0x2e
+#define PCI_ROM_ADDRESS 0x30 /* 32 bits */
+#define PCI_ROM_ADDRESS_ENABLE 0x01 /* Write 1 to enable ROM,
+ bits 31..11 are address,
+ 10..2 are reserved */
+/* 0x34-0x3b are reserved */
+#define PCI_INTERRUPT_LINE 0x3c /* 8 bits */
+#define PCI_INTERRUPT_PIN 0x3d /* 8 bits */
+#define PCI_MIN_GNT 0x3e /* 8 bits */
+#define PCI_MAX_LAT 0x3f /* 8 bits */
+
+#define PCI_CLASS_NOT_DEFINED 0x0000
+#define PCI_CLASS_NOT_DEFINED_VGA 0x0001
+
+#define PCI_BASE_CLASS_STORAGE 0x01
+#define PCI_CLASS_STORAGE_SCSI 0x0100
+#define PCI_CLASS_STORAGE_IDE 0x0101
+#define PCI_CLASS_STORAGE_FLOPPY 0x0102
+#define PCI_CLASS_STORAGE_IPI 0x0103
+#define PCI_CLASS_STORAGE_RAID 0x0104
+#define PCI_CLASS_STORAGE_SATA 0x0106
+#define PCI_CLASS_STORAGE_SATA_AHCI 0x010601
+#define PCI_CLASS_STORAGE_OTHER 0x0180
+
+#define PCI_BASE_CLASS_NETWORK 0x02
+#define PCI_CLASS_NETWORK_ETHERNET 0x0200
+#define PCI_CLASS_NETWORK_TOKEN_RING 0x0201
+#define PCI_CLASS_NETWORK_FDDI 0x0202
+#define PCI_CLASS_NETWORK_ATM 0x0203
+#define PCI_CLASS_NETWORK_OTHER 0x0280
+
+#define PCI_BASE_CLASS_DISPLAY 0x03
+#define PCI_CLASS_DISPLAY_VGA 0x0300
+#define PCI_CLASS_DISPLAY_XGA 0x0301
+#define PCI_CLASS_DISPLAY_OTHER 0x0380
+
+#define PCI_BASE_CLASS_MULTIMEDIA 0x04
+#define PCI_CLASS_MULTIMEDIA_VIDEO 0x0400
+#define PCI_CLASS_MULTIMEDIA_AUDIO 0x0401
+#define PCI_CLASS_MULTIMEDIA_OTHER 0x0480
+
+#define PCI_BASE_CLASS_MEMORY 0x05
+#define PCI_CLASS_MEMORY_RAM 0x0500
+#define PCI_CLASS_MEMORY_FLASH 0x0501
+#define PCI_CLASS_MEMORY_OTHER 0x0580
+
+#define PCI_BASE_CLASS_BRIDGE 0x06
+#define PCI_CLASS_BRIDGE_HOST 0x0600
+#define PCI_CLASS_BRIDGE_ISA 0x0601
+#define PCI_CLASS_BRIDGE_EISA 0x0602
+#define PCI_CLASS_BRIDGE_MC 0x0603
+#define PCI_CLASS_BRIDGE_PCI 0x0604
+#define PCI_CLASS_BRIDGE_PCMCIA 0x0605
+#define PCI_CLASS_BRIDGE_NUBUS 0x0606
+#define PCI_CLASS_BRIDGE_CARDBUS 0x0607
+#define PCI_CLASS_BRIDGE_OTHER 0x0680
+
+
+#define PCI_BASE_CLASS_COMMUNICATION 0x07
+#define PCI_CLASS_COMMUNICATION_SERIAL 0x0700
+#define PCI_CLASS_COMMUNICATION_PARALLEL 0x0701
+#define PCI_CLASS_COMMUNICATION_OTHER 0x0780
+
+#define PCI_BASE_CLASS_SYSTEM 0x08
+#define PCI_CLASS_SYSTEM_PIC 0x0800
+#define PCI_CLASS_SYSTEM_DMA 0x0801
+#define PCI_CLASS_SYSTEM_TIMER 0x0802
+#define PCI_CLASS_SYSTEM_RTC 0x0803
+#define PCI_CLASS_SYSTEM_OTHER 0x0880
+
+#define PCI_BASE_CLASS_INPUT 0x09
+#define PCI_CLASS_INPUT_KEYBOARD 0x0900
+#define PCI_CLASS_INPUT_PEN 0x0901
+#define PCI_CLASS_INPUT_MOUSE 0x0902
+#define PCI_CLASS_INPUT_OTHER 0x0980
+
+#define PCI_BASE_CLASS_DOCKING 0x0a
+#define PCI_CLASS_DOCKING_GENERIC 0x0a00
+#define PCI_CLASS_DOCKING_OTHER 0x0a01
+
+#define PCI_BASE_CLASS_PROCESSOR 0x0b
+#define PCI_CLASS_PROCESSOR_386 0x0b00
+#define PCI_CLASS_PROCESSOR_486 0x0b01
+#define PCI_CLASS_PROCESSOR_PENTIUM 0x0b02
+#define PCI_CLASS_PROCESSOR_ALPHA 0x0b10
+#define PCI_CLASS_PROCESSOR_POWERPC 0x0b20
+#define PCI_CLASS_PROCESSOR_CO 0x0b40
+
+#define PCI_BASE_CLASS_SERIAL 0x0c
+#define PCI_CLASS_SERIAL_FIREWIRE 0x0c00
+#define PCI_CLASS_SERIAL_ACCESS 0x0c01
+#define PCI_CLASS_SERIAL_SSA 0x0c02
+#define PCI_CLASS_SERIAL_USB 0x0c03
+#define PCI_CLASS_SERIAL_FIBER 0x0c04
+
+#define PCI_CLASS_OTHERS 0xff
+
+/*
+ * Vendor and card ID's: sort these numerically according to vendor
+ * (and according to card ID within vendor). Send all updates to
+ * <linux-pcisupport@cck.uni-kl.de>.
+ */
+#define PCI_VENDOR_ID_COMPAQ 0x0e11
+#define PCI_DEVICE_ID_COMPAQ_1280 0x3033
+#define PCI_DEVICE_ID_COMPAQ_TRIFLEX 0x4000
+#define PCI_DEVICE_ID_COMPAQ_SMART2P 0xae10
+#define PCI_DEVICE_ID_COMPAQ_NETEL100 0xae32
+#define PCI_DEVICE_ID_COMPAQ_NETEL10 0xae34
+#define PCI_DEVICE_ID_COMPAQ_NETFLEX3I 0xae35
+#define PCI_DEVICE_ID_COMPAQ_NETEL100D 0xae40
+#define PCI_DEVICE_ID_COMPAQ_NETEL100PI 0xae43
+#define PCI_DEVICE_ID_COMPAQ_NETEL100I 0xb011
+#define PCI_DEVICE_ID_COMPAQ_THUNDER 0xf130
+#define PCI_DEVICE_ID_COMPAQ_NETFLEX3B 0xf150
+
+#define PCI_VENDOR_ID_NCR 0x1000
+#define PCI_DEVICE_ID_NCR_53C810 0x0001
+#define PCI_DEVICE_ID_NCR_53C820 0x0002
+#define PCI_DEVICE_ID_NCR_53C825 0x0003
+#define PCI_DEVICE_ID_NCR_53C815 0x0004
+#define PCI_DEVICE_ID_NCR_53C860 0x0006
+#define PCI_DEVICE_ID_NCR_53C896 0x000b
+#define PCI_DEVICE_ID_NCR_53C895 0x000c
+#define PCI_DEVICE_ID_NCR_53C885 0x000d
+#define PCI_DEVICE_ID_NCR_53C875 0x000f
+#define PCI_DEVICE_ID_NCR_53C875J 0x008f
+
+#define PCI_VENDOR_ID_ATI 0x1002
+#define PCI_DEVICE_ID_ATI_68800 0x4158
+#define PCI_DEVICE_ID_ATI_215CT222 0x4354
+#define PCI_DEVICE_ID_ATI_210888CX 0x4358
+#define PCI_DEVICE_ID_ATI_215GB 0x4742
+#define PCI_DEVICE_ID_ATI_215GD 0x4744
+#define PCI_DEVICE_ID_ATI_215GI 0x4749
+#define PCI_DEVICE_ID_ATI_215GP 0x4750
+#define PCI_DEVICE_ID_ATI_215GQ 0x4751
+#define PCI_DEVICE_ID_ATI_215GT 0x4754
+#define PCI_DEVICE_ID_ATI_215GTB 0x4755
+#define PCI_DEVICE_ID_ATI_210888GX 0x4758
+#define PCI_DEVICE_ID_ATI_215LG 0x4c47
+#define PCI_DEVICE_ID_ATI_264LT 0x4c54
+#define PCI_DEVICE_ID_ATI_264VT 0x5654
+
+#define PCI_VENDOR_ID_VLSI 0x1004
+#define PCI_DEVICE_ID_VLSI_82C592 0x0005
+#define PCI_DEVICE_ID_VLSI_82C593 0x0006
+#define PCI_DEVICE_ID_VLSI_82C594 0x0007
+#define PCI_DEVICE_ID_VLSI_82C597 0x0009
+#define PCI_DEVICE_ID_VLSI_82C541 0x000c
+#define PCI_DEVICE_ID_VLSI_82C543 0x000d
+#define PCI_DEVICE_ID_VLSI_82C532 0x0101
+#define PCI_DEVICE_ID_VLSI_82C534 0x0102
+#define PCI_DEVICE_ID_VLSI_82C535 0x0104
+#define PCI_DEVICE_ID_VLSI_82C147 0x0105
+#define PCI_DEVICE_ID_VLSI_VAS96011 0x0702
+
+#define PCI_VENDOR_ID_ADL 0x1005
+#define PCI_DEVICE_ID_ADL_2301 0x2301
+
+#define PCI_VENDOR_ID_NS 0x100b
+#define PCI_DEVICE_ID_NS_87415 0x0002
+#define PCI_DEVICE_ID_NS_87410 0xd001
+
+#define PCI_VENDOR_ID_TSENG 0x100c
+#define PCI_DEVICE_ID_TSENG_W32P_2 0x3202
+#define PCI_DEVICE_ID_TSENG_W32P_b 0x3205
+#define PCI_DEVICE_ID_TSENG_W32P_c 0x3206
+#define PCI_DEVICE_ID_TSENG_W32P_d 0x3207
+#define PCI_DEVICE_ID_TSENG_ET6000 0x3208
+
+#define PCI_VENDOR_ID_WEITEK 0x100e
+#define PCI_DEVICE_ID_WEITEK_P9000 0x9001
+#define PCI_DEVICE_ID_WEITEK_P9100 0x9100
+
+#define PCI_VENDOR_ID_DEC 0x1011
+#define PCI_DEVICE_ID_DEC_BRD 0x0001
+#define PCI_DEVICE_ID_DEC_TULIP 0x0002
+#define PCI_DEVICE_ID_DEC_TGA 0x0004
+#define PCI_DEVICE_ID_DEC_TULIP_FAST 0x0009
+#define PCI_DEVICE_ID_DEC_TGA2 0x000D
+#define PCI_DEVICE_ID_DEC_FDDI 0x000F
+#define PCI_DEVICE_ID_DEC_TULIP_PLUS 0x0014
+#define PCI_DEVICE_ID_DEC_21142 0x0019
+#define PCI_DEVICE_ID_DEC_21052 0x0021
+#define PCI_DEVICE_ID_DEC_21150 0x0022
+#define PCI_DEVICE_ID_DEC_21152 0x0024
+#define PCI_DEVICE_ID_DEC_21154 0x0026
+#define PCI_DEVICE_ID_DEC_21285 0x1065
+
+#define PCI_VENDOR_ID_CIRRUS 0x1013
+#define PCI_DEVICE_ID_CIRRUS_7548 0x0038
+#define PCI_DEVICE_ID_CIRRUS_5430 0x00a0
+#define PCI_DEVICE_ID_CIRRUS_5434_4 0x00a4
+#define PCI_DEVICE_ID_CIRRUS_5434_8 0x00a8
+#define PCI_DEVICE_ID_CIRRUS_5436 0x00ac
+#define PCI_DEVICE_ID_CIRRUS_5446 0x00b8
+#define PCI_DEVICE_ID_CIRRUS_5480 0x00bc
+#define PCI_DEVICE_ID_CIRRUS_5464 0x00d4
+#define PCI_DEVICE_ID_CIRRUS_5465 0x00d6
+#define PCI_DEVICE_ID_CIRRUS_6729 0x1100
+#define PCI_DEVICE_ID_CIRRUS_6832 0x1110
+#define PCI_DEVICE_ID_CIRRUS_7542 0x1200
+#define PCI_DEVICE_ID_CIRRUS_7543 0x1202
+#define PCI_DEVICE_ID_CIRRUS_7541 0x1204
+
+#define PCI_VENDOR_ID_IBM 0x1014
+#define PCI_DEVICE_ID_IBM_FIRE_CORAL 0x000a
+#define PCI_DEVICE_ID_IBM_TR 0x0018
+#define PCI_DEVICE_ID_IBM_82G2675 0x001d
+#define PCI_DEVICE_ID_IBM_MCA 0x0020
+#define PCI_DEVICE_ID_IBM_82351 0x0022
+#define PCI_DEVICE_ID_IBM_SERVERAID 0x002e
+#define PCI_DEVICE_ID_IBM_TR_WAKE 0x003e
+#define PCI_DEVICE_ID_IBM_3780IDSP 0x007d
+
+#define PCI_VENDOR_ID_WD 0x101c
+#define PCI_DEVICE_ID_WD_7197 0x3296
+
+#define PCI_VENDOR_ID_AMD 0x1022
+#define PCI_DEVICE_ID_AMD_LANCE 0x2000
+#define PCI_DEVICE_ID_AMD_SCSI 0x2020
+
+#define PCI_VENDOR_ID_TRIDENT 0x1023
+#define PCI_DEVICE_ID_TRIDENT_9397 0x9397
+#define PCI_DEVICE_ID_TRIDENT_9420 0x9420
+#define PCI_DEVICE_ID_TRIDENT_9440 0x9440
+#define PCI_DEVICE_ID_TRIDENT_9660 0x9660
+#define PCI_DEVICE_ID_TRIDENT_9750 0x9750
+
+#define PCI_VENDOR_ID_AI 0x1025
+#define PCI_DEVICE_ID_AI_M1435 0x1435
+
+#define PCI_VENDOR_ID_MATROX 0x102B
+#define PCI_DEVICE_ID_MATROX_MGA_2 0x0518
+#define PCI_DEVICE_ID_MATROX_MIL 0x0519
+#define PCI_DEVICE_ID_MATROX_MYS 0x051A
+#define PCI_DEVICE_ID_MATROX_MIL_2 0x051b
+#define PCI_DEVICE_ID_MATROX_MIL_2_AGP 0x051f
+#define PCI_DEVICE_ID_MATROX_G200_PCI 0x0520
+#define PCI_DEVICE_ID_MATROX_G200_AGP 0x0521
+#define PCI_DEVICE_ID_MATROX_MGA_IMP 0x0d10
+#define PCI_DEVICE_ID_MATROX_G100_MM 0x1000
+#define PCI_DEVICE_ID_MATROX_G100_AGP 0x1001
+
+#define PCI_VENDOR_ID_CT 0x102c
+#define PCI_DEVICE_ID_CT_65545 0x00d8
+#define PCI_DEVICE_ID_CT_65548 0x00dc
+#define PCI_DEVICE_ID_CT_65550 0x00e0
+#define PCI_DEVICE_ID_CT_65554 0x00e4
+#define PCI_DEVICE_ID_CT_65555 0x00e5
+
+#define PCI_VENDOR_ID_MIRO 0x1031
+#define PCI_DEVICE_ID_MIRO_36050 0x5601
+
+#define PCI_VENDOR_ID_NEC 0x1033
+#define PCI_DEVICE_ID_NEC_PCX2 0x0046
+
+#define PCI_VENDOR_ID_FD 0x1036
+#define PCI_DEVICE_ID_FD_36C70 0x0000
+
+#define PCI_VENDOR_ID_SI 0x1039
+#define PCI_DEVICE_ID_SI_5591_AGP 0x0001
+#define PCI_DEVICE_ID_SI_6202 0x0002
+#define PCI_DEVICE_ID_SI_503 0x0008
+#define PCI_DEVICE_ID_SI_ACPI 0x0009
+#define PCI_DEVICE_ID_SI_5597_VGA 0x0200
+#define PCI_DEVICE_ID_SI_6205 0x0205
+#define PCI_DEVICE_ID_SI_501 0x0406
+#define PCI_DEVICE_ID_SI_496 0x0496
+#define PCI_DEVICE_ID_SI_601 0x0601
+#define PCI_DEVICE_ID_SI_5107 0x5107
+#define PCI_DEVICE_ID_SI_5511 0x5511
+#define PCI_DEVICE_ID_SI_5513 0x5513
+#define PCI_DEVICE_ID_SI_5571 0x5571
+#define PCI_DEVICE_ID_SI_5591 0x5591
+#define PCI_DEVICE_ID_SI_5597 0x5597
+#define PCI_DEVICE_ID_SI_7001 0x7001
+
+#define PCI_VENDOR_ID_HP 0x103c
+#define PCI_DEVICE_ID_HP_J2585A 0x1030
+#define PCI_DEVICE_ID_HP_J2585B 0x1031
+
+#define PCI_VENDOR_ID_PCTECH 0x1042
+#define PCI_DEVICE_ID_PCTECH_RZ1000 0x1000
+#define PCI_DEVICE_ID_PCTECH_RZ1001 0x1001
+#define PCI_DEVICE_ID_PCTECH_SAMURAI_0 0x3000
+#define PCI_DEVICE_ID_PCTECH_SAMURAI_1 0x3010
+#define PCI_DEVICE_ID_PCTECH_SAMURAI_IDE 0x3020
+
+#define PCI_VENDOR_ID_DPT 0x1044
+#define PCI_DEVICE_ID_DPT 0xa400
+
+#define PCI_VENDOR_ID_OPTI 0x1045
+#define PCI_DEVICE_ID_OPTI_92C178 0xc178
+#define PCI_DEVICE_ID_OPTI_82C557 0xc557
+#define PCI_DEVICE_ID_OPTI_82C558 0xc558
+#define PCI_DEVICE_ID_OPTI_82C621 0xc621
+#define PCI_DEVICE_ID_OPTI_82C700 0xc700
+#define PCI_DEVICE_ID_OPTI_82C701 0xc701
+#define PCI_DEVICE_ID_OPTI_82C814 0xc814
+#define PCI_DEVICE_ID_OPTI_82C822 0xc822
+#define PCI_DEVICE_ID_OPTI_82C825 0xd568
+
+#define PCI_VENDOR_ID_SGS 0x104a
+#define PCI_DEVICE_ID_SGS_2000 0x0008
+#define PCI_DEVICE_ID_SGS_1764 0x0009
+
+#define PCI_VENDOR_ID_BUSLOGIC 0x104B
+#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC 0x0140
+#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER 0x1040
+#define PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT 0x8130
+
+#define PCI_VENDOR_ID_TI 0x104c
+#define PCI_DEVICE_ID_TI_TVP4010 0x3d04
+#define PCI_DEVICE_ID_TI_TVP4020 0x3d07
+#define PCI_DEVICE_ID_TI_PCI1130 0xac12
+#define PCI_DEVICE_ID_TI_PCI1131 0xac15
+#define PCI_DEVICE_ID_TI_PCI1250 0xac16
+
+#define PCI_VENDOR_ID_OAK 0x104e
+#define PCI_DEVICE_ID_OAK_OTI107 0x0107
+
+/* Winbond have two vendor IDs! See 0x10ad as well */
+#define PCI_VENDOR_ID_WINBOND2 0x1050
+#define PCI_DEVICE_ID_WINBOND2_89C940 0x0940
+
+#define PCI_VENDOR_ID_MOTOROLA 0x1057
+#define PCI_DEVICE_ID_MOTOROLA_MPC105 0x0001
+#define PCI_DEVICE_ID_MOTOROLA_MPC106 0x0002
+#define PCI_DEVICE_ID_MOTOROLA_RAVEN 0x4801
+
+#define PCI_VENDOR_ID_PROMISE 0x105a
+#define PCI_DEVICE_ID_PROMISE_20246 0x4d33
+#define PCI_DEVICE_ID_PROMISE_20262 0x4d38
+#define PCI_DEVICE_ID_PROMISE_5300 0x5300
+
+#define PCI_VENDOR_ID_N9 0x105d
+#define PCI_DEVICE_ID_N9_I128 0x2309
+#define PCI_DEVICE_ID_N9_I128_2 0x2339
+#define PCI_DEVICE_ID_N9_I128_T2R 0x493d
+
+#define PCI_VENDOR_ID_UMC 0x1060
+#define PCI_DEVICE_ID_UMC_UM8673F 0x0101
+#define PCI_DEVICE_ID_UMC_UM8891A 0x0891
+#define PCI_DEVICE_ID_UMC_UM8886BF 0x673a
+#define PCI_DEVICE_ID_UMC_UM8886A 0x886a
+#define PCI_DEVICE_ID_UMC_UM8881F 0x8881
+#define PCI_DEVICE_ID_UMC_UM8886F 0x8886
+#define PCI_DEVICE_ID_UMC_UM9017F 0x9017
+#define PCI_DEVICE_ID_UMC_UM8886N 0xe886
+#define PCI_DEVICE_ID_UMC_UM8891N 0xe891
+
+#define PCI_VENDOR_ID_X 0x1061
+#define PCI_DEVICE_ID_X_AGX016 0x0001
+
+#define PCI_VENDOR_ID_PICOP 0x1066
+#define PCI_DEVICE_ID_PICOP_PT86C52X 0x0001
+#define PCI_DEVICE_ID_PICOP_PT80C524 0x8002
+
+#define PCI_VENDOR_ID_MYLEX 0x1069
+#define PCI_DEVICE_ID_MYLEX_DAC960P_V2 0x0001
+#define PCI_DEVICE_ID_MYLEX_DAC960P_V3 0x0002
+#define PCI_DEVICE_ID_MYLEX_DAC960P_V4 0x0010
+#define PCI_DEVICE_ID_MYLEX_DAC960P_V5 0x0020
+
+#define PCI_VENDOR_ID_APPLE 0x106b
+#define PCI_DEVICE_ID_APPLE_BANDIT 0x0001
+#define PCI_DEVICE_ID_APPLE_GC 0x0002
+#define PCI_DEVICE_ID_APPLE_HYDRA 0x000e
+
+#define PCI_VENDOR_ID_NEXGEN 0x1074
+#define PCI_DEVICE_ID_NEXGEN_82C501 0x4e78
+
+#define PCI_VENDOR_ID_QLOGIC 0x1077
+#define PCI_DEVICE_ID_QLOGIC_ISP1020 0x1020
+#define PCI_DEVICE_ID_QLOGIC_ISP1022 0x1022
+
+#define PCI_VENDOR_ID_CYRIX 0x1078
+#define PCI_DEVICE_ID_CYRIX_5510 0x0000
+#define PCI_DEVICE_ID_CYRIX_PCI_MASTER 0x0001
+#define PCI_DEVICE_ID_CYRIX_5520 0x0002
+#define PCI_DEVICE_ID_CYRIX_5530_LEGACY 0x0100
+#define PCI_DEVICE_ID_CYRIX_5530_SMI 0x0101
+#define PCI_DEVICE_ID_CYRIX_5530_IDE 0x0102
+#define PCI_DEVICE_ID_CYRIX_5530_AUDIO 0x0103
+#define PCI_DEVICE_ID_CYRIX_5530_VIDEO 0x0104
+
+#define PCI_VENDOR_ID_LEADTEK 0x107d
+#define PCI_DEVICE_ID_LEADTEK_805 0x0000
+
+#define PCI_VENDOR_ID_CONTAQ 0x1080
+#define PCI_DEVICE_ID_CONTAQ_82C599 0x0600
+#define PCI_DEVICE_ID_CONTAQ_82C693 0xc693
+
+#define PCI_VENDOR_ID_FOREX 0x1083
+
+#define PCI_VENDOR_ID_OLICOM 0x108d
+#define PCI_DEVICE_ID_OLICOM_OC3136 0x0001
+#define PCI_DEVICE_ID_OLICOM_OC2315 0x0011
+#define PCI_DEVICE_ID_OLICOM_OC2325 0x0012
+#define PCI_DEVICE_ID_OLICOM_OC2183 0x0013
+#define PCI_DEVICE_ID_OLICOM_OC2326 0x0014
+#define PCI_DEVICE_ID_OLICOM_OC6151 0x0021
+
+#define PCI_VENDOR_ID_SUN 0x108e
+#define PCI_DEVICE_ID_SUN_EBUS 0x1000
+#define PCI_DEVICE_ID_SUN_HAPPYMEAL 0x1001
+#define PCI_DEVICE_ID_SUN_SIMBA 0x5000
+#define PCI_DEVICE_ID_SUN_PBM 0x8000
+#define PCI_DEVICE_ID_SUN_SABRE 0xa000
+
+#define PCI_VENDOR_ID_CMD 0x1095
+#define PCI_DEVICE_ID_CMD_640 0x0640
+#define PCI_DEVICE_ID_CMD_643 0x0643
+#define PCI_DEVICE_ID_CMD_646 0x0646
+#define PCI_DEVICE_ID_CMD_670 0x0670
+
+#define PCI_VENDOR_ID_VISION 0x1098
+#define PCI_DEVICE_ID_VISION_QD8500 0x0001
+#define PCI_DEVICE_ID_VISION_QD8580 0x0002
+
+#define PCI_VENDOR_ID_BROOKTREE 0x109e
+#define PCI_DEVICE_ID_BROOKTREE_848 0x0350
+#define PCI_DEVICE_ID_BROOKTREE_849A 0x0351
+#define PCI_DEVICE_ID_BROOKTREE_8474 0x8474
+
+#define PCI_VENDOR_ID_SIERRA 0x10a8
+#define PCI_DEVICE_ID_SIERRA_STB 0x0000
+
+#define PCI_VENDOR_ID_ACC 0x10aa
+#define PCI_DEVICE_ID_ACC_2056 0x0000
+
+#define PCI_VENDOR_ID_WINBOND 0x10ad
+#define PCI_DEVICE_ID_WINBOND_83769 0x0001
+#define PCI_DEVICE_ID_WINBOND_82C105 0x0105
+#define PCI_DEVICE_ID_WINBOND_83C553 0x0565
+
+#define PCI_VENDOR_ID_DATABOOK 0x10b3
+#define PCI_DEVICE_ID_DATABOOK_87144 0xb106
+
+#define PCI_VENDOR_ID_PLX 0x10b5
+#define PCI_DEVICE_ID_PLX_9050 0x9050
+#define PCI_DEVICE_ID_PLX_9080 0x9080
+
+#define PCI_DEVICE_ID_PLX_SPCOM200 0x1103
+
+#define PCI_VENDOR_ID_MADGE 0x10b6
+#define PCI_DEVICE_ID_MADGE_MK2 0x0002
+
+#define PCI_VENDOR_ID_3COM 0x10b7
+#define PCI_DEVICE_ID_3COM_3C339 0x3390
+#define PCI_DEVICE_ID_3COM_3C590 0x5900
+#define PCI_DEVICE_ID_3COM_3C595TX 0x5950
+#define PCI_DEVICE_ID_3COM_3C595T4 0x5951
+#define PCI_DEVICE_ID_3COM_3C595MII 0x5952
+#define PCI_DEVICE_ID_3COM_3C900TPO 0x9000
+#define PCI_DEVICE_ID_3COM_3C900COMBO 0x9001
+#define PCI_DEVICE_ID_3COM_3C905TX 0x9050
+#define PCI_DEVICE_ID_3COM_3C905T4 0x9051
+#define PCI_DEVICE_ID_3COM_3C905B_TX 0x9055
+
+#define PCI_VENDOR_ID_SMC 0x10b8
+#define PCI_DEVICE_ID_SMC_EPIC100 0x0005
+
+#define PCI_VENDOR_ID_AL 0x10b9
+#define PCI_DEVICE_ID_AL_M1445 0x1445
+#define PCI_DEVICE_ID_AL_M1449 0x1449
+#define PCI_DEVICE_ID_AL_M1451 0x1451
+#define PCI_DEVICE_ID_AL_M1461 0x1461
+#define PCI_DEVICE_ID_AL_M1489 0x1489
+#define PCI_DEVICE_ID_AL_M1511 0x1511
+#define PCI_DEVICE_ID_AL_M1513 0x1513
+#define PCI_DEVICE_ID_AL_M1521 0x1521
+#define PCI_DEVICE_ID_AL_M1523 0x1523
+#define PCI_DEVICE_ID_AL_M1531 0x1531
+#define PCI_DEVICE_ID_AL_M1533 0x1533
+#define PCI_DEVICE_ID_AL_M1541 0x1541
+#define PCI_DEVICE_ID_AL_M1543 0x1543
+#define PCI_DEVICE_ID_AL_M3307 0x3307
+#define PCI_DEVICE_ID_AL_M4803 0x5215
+#define PCI_DEVICE_ID_AL_M5219 0x5219
+#define PCI_DEVICE_ID_AL_M5229 0x5229
+#define PCI_DEVICE_ID_AL_M5237 0x5237
+#define PCI_DEVICE_ID_AL_M7101 0x7101
+
+#define PCI_VENDOR_ID_MITSUBISHI 0x10ba
+
+#define PCI_VENDOR_ID_SURECOM 0x10bd
+#define PCI_DEVICE_ID_SURECOM_NE34 0x0e34
+
+#define PCI_VENDOR_ID_NEOMAGIC 0x10c8
+#define PCI_DEVICE_ID_NEOMAGIC_MAGICGRAPH_NM2070 0x0001
+#define PCI_DEVICE_ID_NEOMAGIC_MAGICGRAPH_128V 0x0002
+#define PCI_DEVICE_ID_NEOMAGIC_MAGICGRAPH_128ZV 0x0003
+#define PCI_DEVICE_ID_NEOMAGIC_MAGICGRAPH_NM2160 0x0004
+
+#define PCI_VENDOR_ID_ASP 0x10cd
+#define PCI_DEVICE_ID_ASP_ABP940 0x1200
+#define PCI_DEVICE_ID_ASP_ABP940U 0x1300
+#define PCI_DEVICE_ID_ASP_ABP940UW 0x2300
+
+#define PCI_VENDOR_ID_MACRONIX 0x10d9
+#define PCI_DEVICE_ID_MACRONIX_MX98713 0x0512
+#define PCI_DEVICE_ID_MACRONIX_MX987x5 0x0531
+
+#define PCI_VENDOR_ID_CERN 0x10dc
+#define PCI_DEVICE_ID_CERN_SPSB_PMC 0x0001
+#define PCI_DEVICE_ID_CERN_SPSB_PCI 0x0002
+#define PCI_DEVICE_ID_CERN_HIPPI_DST 0x0021
+#define PCI_DEVICE_ID_CERN_HIPPI_SRC 0x0022
+
+#define PCI_VENDOR_ID_NVIDIA 0x10de
+
+#define PCI_VENDOR_ID_IMS 0x10e0
+#define PCI_DEVICE_ID_IMS_8849 0x8849
+
+#define PCI_VENDOR_ID_TEKRAM2 0x10e1
+#define PCI_DEVICE_ID_TEKRAM2_690c 0x690c
+
+#define PCI_VENDOR_ID_TUNDRA 0x10e3
+#define PCI_DEVICE_ID_TUNDRA_CA91C042 0x0000
+
+#define PCI_VENDOR_ID_AMCC 0x10e8
+#define PCI_DEVICE_ID_AMCC_MYRINET 0x8043
+#define PCI_DEVICE_ID_AMCC_S5933 0x807d
+#define PCI_DEVICE_ID_AMCC_S5933_HEPC3 0x809c
+
+#define PCI_VENDOR_ID_INTERG 0x10ea
+#define PCI_DEVICE_ID_INTERG_1680 0x1680
+#define PCI_DEVICE_ID_INTERG_1682 0x1682
+
+#define PCI_VENDOR_ID_REALTEK 0x10ec
+#define PCI_DEVICE_ID_REALTEK_8029 0x8029
+#define PCI_DEVICE_ID_REALTEK_8129 0x8129
+#define PCI_DEVICE_ID_REALTEK_8139 0x8139
+
+#define PCI_VENDOR_ID_TRUEVISION 0x10fa
+#define PCI_DEVICE_ID_TRUEVISION_T1000 0x000c
+
+#define PCI_VENDOR_ID_INIT 0x1101
+#define PCI_DEVICE_ID_INIT_320P 0x9100
+#define PCI_DEVICE_ID_INIT_360P 0x9500
+
+#define PCI_VENDOR_ID_TTI 0x1103
+#define PCI_DEVICE_ID_TTI_HPT343 0x0003
+
+#define PCI_VENDOR_ID_VIA 0x1106
+#define PCI_DEVICE_ID_VIA_82C505 0x0505
+#define PCI_DEVICE_ID_VIA_82C561 0x0561
+#define PCI_DEVICE_ID_VIA_82C586_1 0x0571
+#define PCI_DEVICE_ID_VIA_82C576 0x0576
+#define PCI_DEVICE_ID_VIA_82C585 0x0585
+#define PCI_DEVICE_ID_VIA_82C586_0 0x0586
+#define PCI_DEVICE_ID_VIA_82C595 0x0595
+#define PCI_DEVICE_ID_VIA_82C597_0 0x0597
+#define PCI_DEVICE_ID_VIA_82C598_0 0x0598
+#define PCI_DEVICE_ID_VIA_82C926 0x0926
+#define PCI_DEVICE_ID_VIA_82C416 0x1571
+#define PCI_DEVICE_ID_VIA_82C595_97 0x1595
+#define PCI_DEVICE_ID_VIA_82C586_2 0x3038
+#define PCI_DEVICE_ID_VIA_82C586_3 0x3040
+#define PCI_DEVICE_ID_VIA_86C100A 0x6100
+#define PCI_DEVICE_ID_VIA_82C597_1 0x8597
+#define PCI_DEVICE_ID_VIA_82C598_1 0x8598
+
+#define PCI_VENDOR_ID_SMC2 0x1113
+#define PCI_DEVICE_ID_SMC2_1211TX 0x1211
+
+#define PCI_VENDOR_ID_VORTEX 0x1119
+#define PCI_DEVICE_ID_VORTEX_GDT60x0 0x0000
+#define PCI_DEVICE_ID_VORTEX_GDT6000B 0x0001
+#define PCI_DEVICE_ID_VORTEX_GDT6x10 0x0002
+#define PCI_DEVICE_ID_VORTEX_GDT6x20 0x0003
+#define PCI_DEVICE_ID_VORTEX_GDT6530 0x0004
+#define PCI_DEVICE_ID_VORTEX_GDT6550 0x0005
+#define PCI_DEVICE_ID_VORTEX_GDT6x17 0x0006
+#define PCI_DEVICE_ID_VORTEX_GDT6x27 0x0007
+#define PCI_DEVICE_ID_VORTEX_GDT6537 0x0008
+#define PCI_DEVICE_ID_VORTEX_GDT6557 0x0009
+#define PCI_DEVICE_ID_VORTEX_GDT6x15 0x000a
+#define PCI_DEVICE_ID_VORTEX_GDT6x25 0x000b
+#define PCI_DEVICE_ID_VORTEX_GDT6535 0x000c
+#define PCI_DEVICE_ID_VORTEX_GDT6555 0x000d
+#define PCI_DEVICE_ID_VORTEX_GDT6x17RP 0x0100
+#define PCI_DEVICE_ID_VORTEX_GDT6x27RP 0x0101
+#define PCI_DEVICE_ID_VORTEX_GDT6537RP 0x0102
+#define PCI_DEVICE_ID_VORTEX_GDT6557RP 0x0103
+#define PCI_DEVICE_ID_VORTEX_GDT6x11RP 0x0104
+#define PCI_DEVICE_ID_VORTEX_GDT6x21RP 0x0105
+#define PCI_DEVICE_ID_VORTEX_GDT6x17RP1 0x0110
+#define PCI_DEVICE_ID_VORTEX_GDT6x27RP1 0x0111
+#define PCI_DEVICE_ID_VORTEX_GDT6537RP1 0x0112
+#define PCI_DEVICE_ID_VORTEX_GDT6557RP1 0x0113
+#define PCI_DEVICE_ID_VORTEX_GDT6x11RP1 0x0114
+#define PCI_DEVICE_ID_VORTEX_GDT6x21RP1 0x0115
+#define PCI_DEVICE_ID_VORTEX_GDT6x17RP2 0x0120
+#define PCI_DEVICE_ID_VORTEX_GDT6x27RP2 0x0121
+#define PCI_DEVICE_ID_VORTEX_GDT6537RP2 0x0122
+#define PCI_DEVICE_ID_VORTEX_GDT6557RP2 0x0123
+#define PCI_DEVICE_ID_VORTEX_GDT6x11RP2 0x0124
+#define PCI_DEVICE_ID_VORTEX_GDT6x21RP2 0x0125
+
+#define PCI_VENDOR_ID_EF 0x111a
+#define PCI_DEVICE_ID_EF_ATM_FPGA 0x0000
+#define PCI_DEVICE_ID_EF_ATM_ASIC 0x0002
+
+#define PCI_VENDOR_ID_FORE 0x1127
+#define PCI_DEVICE_ID_FORE_PCA200PC 0x0210
+#define PCI_DEVICE_ID_FORE_PCA200E 0x0300
+
+#define PCI_VENDOR_ID_IMAGINGTECH 0x112f
+#define PCI_DEVICE_ID_IMAGINGTECH_ICPCI 0x0000
+
+#define PCI_VENDOR_ID_PHILIPS 0x1131
+#define PCI_DEVICE_ID_PHILIPS_SAA7146 0x7146
+
+#define PCI_VENDOR_ID_CYCLONE 0x113c
+#define PCI_DEVICE_ID_CYCLONE_SDK 0x0001
+
+#define PCI_VENDOR_ID_ALLIANCE 0x1142
+#define PCI_DEVICE_ID_ALLIANCE_PROMOTIO 0x3210
+#define PCI_DEVICE_ID_ALLIANCE_PROVIDEO 0x6422
+#define PCI_DEVICE_ID_ALLIANCE_AT24 0x6424
+#define PCI_DEVICE_ID_ALLIANCE_AT3D 0x643d
+
+#define PCI_VENDOR_ID_VMIC 0x114a
+#define PCI_DEVICE_ID_VMIC_VME 0x7587
+
+#define PCI_VENDOR_ID_DIGI 0x114f
+#define PCI_DEVICE_ID_DIGI_EPC 0x0002
+#define PCI_DEVICE_ID_DIGI_RIGHTSWITCH 0x0003
+#define PCI_DEVICE_ID_DIGI_XEM 0x0004
+#define PCI_DEVICE_ID_DIGI_XR 0x0005
+#define PCI_DEVICE_ID_DIGI_CX 0x0006
+#define PCI_DEVICE_ID_DIGI_XRJ 0x0009
+#define PCI_DEVICE_ID_DIGI_EPCJ 0x000a
+#define PCI_DEVICE_ID_DIGI_XR_920 0x0027
+
+#define PCI_VENDOR_ID_MUTECH 0x1159
+#define PCI_DEVICE_ID_MUTECH_MV1000 0x0001
+
+#define PCI_VENDOR_ID_RENDITION 0x1163
+#define PCI_DEVICE_ID_RENDITION_VERITE 0x0001
+#define PCI_DEVICE_ID_RENDITION_VERITE2100 0x2000
+
+#define PCI_VENDOR_ID_TOSHIBA 0x1179
+#define PCI_DEVICE_ID_TOSHIBA_601 0x0601
+#define PCI_DEVICE_ID_TOSHIBA_TOPIC95 0x060a
+#define PCI_DEVICE_ID_TOSHIBA_TOPIC97 0x060f
+
+#define PCI_VENDOR_ID_RICOH 0x1180
+#define PCI_DEVICE_ID_RICOH_RL5C466 0x0466
+
+#define PCI_VENDOR_ID_ARTOP 0x1191
+#define PCI_DEVICE_ID_ARTOP_ATP850UF 0x0005
+
+#define PCI_VENDOR_ID_ZEITNET 0x1193
+#define PCI_DEVICE_ID_ZEITNET_1221 0x0001
+#define PCI_DEVICE_ID_ZEITNET_1225 0x0002
+
+#define PCI_VENDOR_ID_OMEGA 0x119b
+#define PCI_DEVICE_ID_OMEGA_82C092G 0x1221
+
+#define PCI_VENDOR_ID_LITEON 0x11ad
+#define PCI_DEVICE_ID_LITEON_LNE100TX 0x0002
+
+#define PCI_VENDOR_ID_NP 0x11bc
+#define PCI_DEVICE_ID_NP_PCI_FDDI 0x0001
+
+#define PCI_VENDOR_ID_ATT 0x11c1
+#define PCI_DEVICE_ID_ATT_L56XMF 0x0440
+
+#define PCI_VENDOR_ID_SPECIALIX 0x11cb
+#define PCI_DEVICE_ID_SPECIALIX_IO8 0x2000
+#define PCI_DEVICE_ID_SPECIALIX_XIO 0x4000
+#define PCI_DEVICE_ID_SPECIALIX_RIO 0x8000
+
+#define PCI_VENDOR_ID_AURAVISION 0x11d1
+#define PCI_DEVICE_ID_AURAVISION_VXP524 0x01f7
+
+#define PCI_VENDOR_ID_IKON 0x11d5
+#define PCI_DEVICE_ID_IKON_10115 0x0115
+#define PCI_DEVICE_ID_IKON_10117 0x0117
+
+#define PCI_VENDOR_ID_ZORAN 0x11de
+#define PCI_DEVICE_ID_ZORAN_36057 0x6057
+#define PCI_DEVICE_ID_ZORAN_36120 0x6120
+
+#define PCI_VENDOR_ID_KINETIC 0x11f4
+#define PCI_DEVICE_ID_KINETIC_2915 0x2915
+
+#define PCI_VENDOR_ID_COMPEX 0x11f6
+#define PCI_DEVICE_ID_COMPEX_ENET100VG4 0x0112
+#define PCI_DEVICE_ID_COMPEX_RL2000 0x1401
+
+#define PCI_VENDOR_ID_RP 0x11fe
+#define PCI_DEVICE_ID_RP8OCTA 0x0001
+#define PCI_DEVICE_ID_RP8INTF 0x0002
+#define PCI_DEVICE_ID_RP16INTF 0x0003
+#define PCI_DEVICE_ID_RP32INTF 0x0004
+
+#define PCI_VENDOR_ID_CYCLADES 0x120e
+#define PCI_DEVICE_ID_CYCLOM_Y_Lo 0x0100
+#define PCI_DEVICE_ID_CYCLOM_Y_Hi 0x0101
+#define PCI_DEVICE_ID_CYCLOM_4Y_Lo 0x0102
+#define PCI_DEVICE_ID_CYCLOM_4Y_Hi 0x0103
+#define PCI_DEVICE_ID_CYCLOM_8Y_Lo 0x0104
+#define PCI_DEVICE_ID_CYCLOM_8Y_Hi 0x0105
+#define PCI_DEVICE_ID_CYCLOM_Z_Lo 0x0200
+#define PCI_DEVICE_ID_CYCLOM_Z_Hi 0x0201
+
+#define PCI_VENDOR_ID_ESSENTIAL 0x120f
+#define PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER 0x0001
+
+#define PCI_VENDOR_ID_O2 0x1217
+#define PCI_DEVICE_ID_O2_6832 0x6832
+
+#define PCI_VENDOR_ID_3DFX 0x121a
+#define PCI_DEVICE_ID_3DFX_VOODOO 0x0001
+#define PCI_DEVICE_ID_3DFX_VOODOO2 0x0002
+
+#define PCI_VENDOR_ID_SIGMADES 0x1236
+#define PCI_DEVICE_ID_SIGMADES_6425 0x6401
+
+#define PCI_VENDOR_ID_CCUBE 0x123f
+
+#define PCI_VENDOR_ID_DIPIX 0x1246
+
+#define PCI_VENDOR_ID_STALLION 0x124d
+#define PCI_DEVICE_ID_STALLION_ECHPCI832 0x0000
+#define PCI_DEVICE_ID_STALLION_ECHPCI864 0x0002
+#define PCI_DEVICE_ID_STALLION_EIOPCI 0x0003
+
+#define PCI_VENDOR_ID_OPTIBASE 0x1255
+#define PCI_DEVICE_ID_OPTIBASE_FORGE 0x1110
+#define PCI_DEVICE_ID_OPTIBASE_FUSION 0x1210
+#define PCI_DEVICE_ID_OPTIBASE_VPLEX 0x2110
+#define PCI_DEVICE_ID_OPTIBASE_VPLEXCC 0x2120
+#define PCI_DEVICE_ID_OPTIBASE_VQUEST 0x2130
+
+#define PCI_VENDOR_ID_ASIX 0x125b
+#define PCI_DEVICE_ID_ASIX_88140 0x1400
+
+#define PCI_VENDOR_ID_SATSAGEM 0x1267
+#define PCI_DEVICE_ID_SATSAGEM_PCR2101 0x5352
+#define PCI_DEVICE_ID_SATSAGEM_TELSATTURBO 0x5a4b
+
+#define PCI_VENDOR_ID_ENSONIQ 0x1274
+#define PCI_DEVICE_ID_ENSONIQ_AUDIOPCI 0x5000
+
+#define PCI_VENDOR_ID_PICTUREL 0x12c5
+#define PCI_DEVICE_ID_PICTUREL_PCIVST 0x0081
+
+#define PCI_VENDOR_ID_NVIDIA_SGS 0x12d2
+#define PCI_DEVICE_ID_NVIDIA_SGS_RIVA128 0x0018
+
+#define PCI_VENDOR_ID_CBOARDS 0x1307
+#define PCI_DEVICE_ID_CBOARDS_DAS1602_16 0x0001
+
+#define PCI_VENDOR_ID_SYMPHONY 0x1c1c
+#define PCI_DEVICE_ID_SYMPHONY_101 0x0001
+
+#define PCI_VENDOR_ID_TEKRAM 0x1de1
+#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
+
+#define PCI_VENDOR_ID_3DLABS 0x3d3d
+#define PCI_DEVICE_ID_3DLABS_300SX 0x0001
+#define PCI_DEVICE_ID_3DLABS_500TX 0x0002
+#define PCI_DEVICE_ID_3DLABS_DELTA 0x0003
+#define PCI_DEVICE_ID_3DLABS_PERMEDIA 0x0004
+#define PCI_DEVICE_ID_3DLABS_MX 0x0006
+
+#define PCI_VENDOR_ID_AVANCE 0x4005
+#define PCI_DEVICE_ID_AVANCE_ALG2064 0x2064
+#define PCI_DEVICE_ID_AVANCE_2302 0x2302
+
+#define PCI_VENDOR_ID_NETVIN 0x4a14
+#define PCI_DEVICE_ID_NETVIN_NV5000SC 0x5000
+
+#define PCI_VENDOR_ID_S3 0x5333
+#define PCI_DEVICE_ID_S3_PLATO_PXS 0x0551
+#define PCI_DEVICE_ID_S3_ViRGE 0x5631
+#define PCI_DEVICE_ID_S3_TRIO 0x8811
+#define PCI_DEVICE_ID_S3_AURORA64VP 0x8812
+#define PCI_DEVICE_ID_S3_TRIO64UVP 0x8814
+#define PCI_DEVICE_ID_S3_ViRGE_VX 0x883d
+#define PCI_DEVICE_ID_S3_868 0x8880
+#define PCI_DEVICE_ID_S3_928 0x88b0
+#define PCI_DEVICE_ID_S3_864_1 0x88c0
+#define PCI_DEVICE_ID_S3_864_2 0x88c1
+#define PCI_DEVICE_ID_S3_964_1 0x88d0
+#define PCI_DEVICE_ID_S3_964_2 0x88d1
+#define PCI_DEVICE_ID_S3_968 0x88f0
+#define PCI_DEVICE_ID_S3_TRIO64V2 0x8901
+#define PCI_DEVICE_ID_S3_PLATO_PXG 0x8902
+#define PCI_DEVICE_ID_S3_ViRGE_DXGX 0x8a01
+#define PCI_DEVICE_ID_S3_ViRGE_GX2 0x8a10
+#define PCI_DEVICE_ID_S3_ViRGE_MX 0x8c01
+#define PCI_DEVICE_ID_S3_ViRGE_MXP 0x8c02
+#define PCI_DEVICE_ID_S3_ViRGE_MXPMV 0x8c03
+#define PCI_DEVICE_ID_S3_SONICVIBES 0xca00
+
+#define PCI_VENDOR_ID_DCI 0x6666
+#define PCI_DEVICE_ID_DCI_PCCOM4 0x0001
+
+#define PCI_VENDOR_ID_INTEL 0x8086
+#define PCI_DEVICE_ID_INTEL_82375 0x0482
+#define PCI_DEVICE_ID_INTEL_82424 0x0483
+#define PCI_DEVICE_ID_INTEL_82378 0x0484
+#define PCI_DEVICE_ID_INTEL_82430 0x0486
+#define PCI_DEVICE_ID_INTEL_82434 0x04a3
+#define PCI_DEVICE_ID_INTEL_I960 0x0960
+#define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221
+#define PCI_DEVICE_ID_INTEL_82092AA_1 0x1222
+#define PCI_DEVICE_ID_INTEL_7116 0x1223
+#define PCI_DEVICE_ID_INTEL_82596 0x1226
+#define PCI_DEVICE_ID_INTEL_82865 0x1227
+#define PCI_DEVICE_ID_INTEL_82557 0x1229
+#define PCI_DEVICE_ID_INTEL_82437 0x122d
+#define PCI_DEVICE_ID_INTEL_82371_0 0x122e
+#define PCI_DEVICE_ID_INTEL_82371_1 0x1230
+#define PCI_DEVICE_ID_INTEL_82371MX 0x1234
+#define PCI_DEVICE_ID_INTEL_82437MX 0x1235
+#define PCI_DEVICE_ID_INTEL_82441 0x1237
+#define PCI_DEVICE_ID_INTEL_82380FB 0x124b
+#define PCI_DEVICE_ID_INTEL_82439 0x1250
+#define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000
+#define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010
+#define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020
+#define PCI_DEVICE_ID_INTEL_82437VX 0x7030
+#define PCI_DEVICE_ID_INTEL_82439TX 0x7100
+#define PCI_DEVICE_ID_INTEL_82371AB_0 0x7110
+#define PCI_DEVICE_ID_INTEL_82371AB 0x7111
+#define PCI_DEVICE_ID_INTEL_82371AB_2 0x7112
+#define PCI_DEVICE_ID_INTEL_82371AB_3 0x7113
+#define PCI_DEVICE_ID_INTEL_82443LX_0 0x7180
+#define PCI_DEVICE_ID_INTEL_82443LX_1 0x7181
+#define PCI_DEVICE_ID_INTEL_82443BX_0 0x7190
+#define PCI_DEVICE_ID_INTEL_82443BX_1 0x7191
+#define PCI_DEVICE_ID_INTEL_82443BX_2 0x7192
+#define PCI_DEVICE_ID_INTEL_82443GX_0 0x71A0
+#define PCI_DEVICE_ID_INTEL_82443GX_1 0x71A1
+#define PCI_DEVICE_ID_INTEL_82443GX_2 0x71A2
+#define PCI_DEVICE_ID_INTEL_P6 0x84c4
+#define PCI_DEVICE_ID_INTEL_82450GX 0x84c5
+
+#define PCI_VENDOR_ID_KTI 0x8e2e
+#define PCI_DEVICE_ID_KTI_ET32P2 0x3000
+
+#define PCI_VENDOR_ID_ADAPTEC 0x9004
+#define PCI_DEVICE_ID_ADAPTEC_7810 0x1078
+#define PCI_DEVICE_ID_ADAPTEC_7821 0x2178
+#define PCI_DEVICE_ID_ADAPTEC_7850 0x5078
+#define PCI_DEVICE_ID_ADAPTEC_7855 0x5578
+#define PCI_DEVICE_ID_ADAPTEC_5800 0x5800
+#define PCI_DEVICE_ID_ADAPTEC_3860 0x6038
+#define PCI_DEVICE_ID_ADAPTEC_1480A 0x6075
+#define PCI_DEVICE_ID_ADAPTEC_7860 0x6078
+#define PCI_DEVICE_ID_ADAPTEC_7861 0x6178
+#define PCI_DEVICE_ID_ADAPTEC_7870 0x7078
+#define PCI_DEVICE_ID_ADAPTEC_7871 0x7178
+#define PCI_DEVICE_ID_ADAPTEC_7872 0x7278
+#define PCI_DEVICE_ID_ADAPTEC_7873 0x7378
+#define PCI_DEVICE_ID_ADAPTEC_7874 0x7478
+#define PCI_DEVICE_ID_ADAPTEC_7895 0x7895
+#define PCI_DEVICE_ID_ADAPTEC_7880 0x8078
+#define PCI_DEVICE_ID_ADAPTEC_7881 0x8178
+#define PCI_DEVICE_ID_ADAPTEC_7882 0x8278
+#define PCI_DEVICE_ID_ADAPTEC_7883 0x8378
+#define PCI_DEVICE_ID_ADAPTEC_7884 0x8478
+#define PCI_DEVICE_ID_ADAPTEC_7885 0x8578
+#define PCI_DEVICE_ID_ADAPTEC_7886 0x8678
+#define PCI_DEVICE_ID_ADAPTEC_7887 0x8778
+#define PCI_DEVICE_ID_ADAPTEC_7888 0x8878
+#define PCI_DEVICE_ID_ADAPTEC_1030 0x8b78
+
+#define PCI_VENDOR_ID_ADAPTEC2 0x9005
+#define PCI_DEVICE_ID_ADAPTEC2_2940U2 0x0010
+#define PCI_DEVICE_ID_ADAPTEC2_2930U2 0x0011
+#define PCI_DEVICE_ID_ADAPTEC2_7890B 0x0013
+#define PCI_DEVICE_ID_ADAPTEC2_7890 0x001f
+#define PCI_DEVICE_ID_ADAPTEC2_3940U2 0x0050
+#define PCI_DEVICE_ID_ADAPTEC2_3950U2D 0x0051
+#define PCI_DEVICE_ID_ADAPTEC2_7896 0x005f
+#define PCI_DEVICE_ID_ADAPTEC2_7892A 0x0080
+#define PCI_DEVICE_ID_ADAPTEC2_7892B 0x0081
+#define PCI_DEVICE_ID_ADAPTEC2_7892D 0x0083
+#define PCI_DEVICE_ID_ADAPTEC2_7892P 0x008f
+#define PCI_DEVICE_ID_ADAPTEC2_7899A 0x00c0
+#define PCI_DEVICE_ID_ADAPTEC2_7899B 0x00c1
+#define PCI_DEVICE_ID_ADAPTEC2_7899D 0x00c3
+#define PCI_DEVICE_ID_ADAPTEC2_7899P 0x00cf
+
+#define PCI_VENDOR_ID_ATRONICS 0x907f
+#define PCI_DEVICE_ID_ATRONICS_2015 0x2015
+
+#define PCI_VENDOR_ID_HOLTEK 0x9412
+#define PCI_DEVICE_ID_HOLTEK_6565 0x6565
+
+#define PCI_VENDOR_ID_TIGERJET 0xe159
+#define PCI_DEVICE_ID_TIGERJET_300 0x0001
+
+#define PCI_VENDOR_ID_ARK 0xedd8
+#define PCI_DEVICE_ID_ARK_STING 0xa091
+#define PCI_DEVICE_ID_ARK_STINGARK 0xa099
+#define PCI_DEVICE_ID_ARK_2000MT 0xa0a1
+
+#ifdef __KERNEL__
+/*
+ * The PCI interface treats multi-function devices as independent
+ * devices. The slot/function address of each device is encoded
+ * in a single byte as follows:
+ *
+ * 7:3 = slot
+ * 2:0 = function
+ */
+#define PCI_DEVFN(slot,func) ((((slot) & 0x1f) << 3) | ((func) & 0x07))
+#define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
+#define PCI_FUNC(devfn) ((devfn) & 0x07)
+
+/*
+ * There is one pci_dev structure for each slot-number/function-number
+ * combination:
+ */
+struct pci_dev {
+ struct pci_bus *bus; /* bus this device is on */
+ struct pci_dev *sibling; /* next device on this bus */
+ struct pci_dev *next; /* chain of all devices */
+
+ void *sysdata; /* hook for sys-specific extension */
+
+ unsigned int devfn; /* encoded device & function index */
+ unsigned short vendor;
+ unsigned short device;
+ unsigned int class; /* 3 bytes: (base,sub,prog-if) */
+ unsigned int master : 1; /* set if device is master capable */
+ /*
+ * In theory, the irq level can be read from configuration
+ * space and all would be fine. However, old PCI chips don't
+ * support these registers and return 0 instead. For example,
+ * the Vision864-P rev 0 chip can uses INTA, but returns 0 in
+ * the interrupt line and pin registers. pci_init()
+ * initializes this field with the value at PCI_INTERRUPT_LINE
+ * and it is the job of pcibios_fixup() to change it if
+ * necessary. The field must not be 0 unless the device
+ * cannot generate interrupts at all.
+ */
+ unsigned char irq; /* irq generated by this device */
+};
+
+struct pci_bus {
+ struct pci_bus *parent; /* parent bus this bridge is on */
+ struct pci_bus *children; /* chain of P2P bridges on this bus */
+ struct pci_bus *next; /* chain of all PCI buses */
+
+ struct pci_dev *self; /* bridge device as seen by parent */
+ struct pci_dev *devices; /* devices behind this bridge */
+
+ void *sysdata; /* hook for sys-specific extension */
+
+ unsigned char number; /* bus number */
+ unsigned char primary; /* number of primary bridge */
+ unsigned char secondary; /* number of secondary bridge */
+ unsigned char subordinate; /* max number of subordinate buses */
+};
+
+/*
+ * This is used to map a vendor-id/device-id pair into device-specific
+ * information.
+ */
+struct pci_dev_info {
+ unsigned short vendor; /* vendor id */
+ unsigned short device; /* device id */
+
+ const char *name; /* device name */
+ unsigned char bridge_type; /* bridge type or 0xff */
+};
+
+extern struct pci_bus pci_root; /* root bus */
+extern struct pci_dev *pci_devices; /* list of all devices */
+
+
+extern unsigned long pci_init (unsigned long mem_start, unsigned long mem_end);
+
+extern struct pci_dev_info *pci_lookup_dev (unsigned int vendor,
+ unsigned int dev);
+extern const char *pci_strclass (unsigned int class);
+extern const char *pci_strvendor (unsigned int vendor);
+extern const char *pci_strdev (unsigned int vendor, unsigned int device);
+
+extern int get_pci_list (char *buf);
+
+#endif /* __KERNEL__ */
+#endif /* LINUX_PCI_H */
diff --git a/linux/src/include/linux/personality.h b/linux/src/include/linux/personality.h
new file mode 100644
index 0000000..aa09f46
--- /dev/null
+++ b/linux/src/include/linux/personality.h
@@ -0,0 +1,55 @@
+#ifndef _PERSONALITY_H
+#define _PERSONALITY_H
+
+#include <linux/linkage.h>
+#include <linux/ptrace.h>
+
+
+/* Flags for bug emulation. These occupy the top three bytes. */
+#define STICKY_TIMEOUTS 0x4000000
+#define WHOLE_SECONDS 0x2000000
+#define ADDR_MAX_32BIT 0x1000000
+#define ADDR_MAX_31BIT 0x0800000
+
+/* Personality types. These go in the low byte. Avoid using the top bit,
+ * it will conflict with error returns.
+ */
+#define PER_MASK (0x00ff)
+#define PER_LINUX (0x0000)
+#define PER_LINUX_32BIT (PER_LINUX | ADDR_MAX_32BIT)
+#define PER_LINUX_EM86 (PER_LINUX | ADDR_MAX_31BIT)
+#define PER_SVR4 (0x0001 | STICKY_TIMEOUTS)
+#define PER_SVR3 (0x0002 | STICKY_TIMEOUTS)
+#define PER_SCOSVR3 (0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS)
+#define PER_WYSEV386 (0x0004 | STICKY_TIMEOUTS)
+#define PER_ISCR4 (0x0005 | STICKY_TIMEOUTS)
+#define PER_BSD (0x0006)
+#define PER_XENIX (0x0007 | STICKY_TIMEOUTS)
+
+/* Prototype for an lcall7 syscall handler. */
+typedef void (*lcall7_func)(struct pt_regs *);
+
+
+/* Description of an execution domain - personality range supported,
+ * lcall7 syscall handler, start up / shut down functions etc.
+ * N.B. The name and lcall7 handler must be where they are since the
+ * offset of the handler is hard coded in kernel/sys_call.S.
+ */
+struct exec_domain {
+ const char *name;
+ lcall7_func handler;
+ unsigned char pers_low, pers_high;
+ unsigned long * signal_map;
+ unsigned long * signal_invmap;
+ long *use_count;
+ struct exec_domain *next;
+};
+
+extern struct exec_domain default_exec_domain;
+
+extern struct exec_domain *lookup_exec_domain(unsigned long personality);
+extern int register_exec_domain(struct exec_domain *it);
+extern int unregister_exec_domain(struct exec_domain *it);
+asmlinkage int sys_personality(unsigned long personality);
+
+#endif /* _PERSONALITY_H */
diff --git a/linux/src/include/linux/posix_types.h b/linux/src/include/linux/posix_types.h
new file mode 100644
index 0000000..d3ceb0b
--- /dev/null
+++ b/linux/src/include/linux/posix_types.h
@@ -0,0 +1,50 @@
+#ifndef _LINUX_POSIX_TYPES_H
+#define _LINUX_POSIX_TYPES_H
+
+/*
+ * This file is generally used by user-level software, so you need to
+ * be a little careful about namespace pollution etc. Also, we cannot
+ * assume GCC is being used.
+ */
+
+#ifndef NULL
+# define NULL ((void *) 0)
+#endif
+
+/*
+ * This allows for 1024 file descriptors: if NR_OPEN is ever grown
+ * beyond that you'll have to change this too. But 1024 fd's seem to be
+ * enough even for such "real" unices like OSF/1, so hopefully this is
+ * one limit that doesn't have to be changed [again].
+ *
+ * Note that POSIX wants the FD_CLEAR(fd,fdsetp) defines to be in
+ * <sys/time.h> (and thus <linux/time.h>) - but this is a more logical
+ * place for them. Solved by having dummy defines in <sys/time.h>.
+ */
+
+/*
+ * Those macros may have been defined in <gnu/types.h>. But we always
+ * use the ones here.
+ */
+#undef __NFDBITS
+#define __NFDBITS (8 * sizeof(unsigned long))
+
+#undef __FD_SETSIZE
+#define __FD_SETSIZE 1024
+
+#undef __FDSET_LONGS
+#define __FDSET_LONGS (__FD_SETSIZE/__NFDBITS)
+
+#undef __FDELT
+#define __FDELT(d) ((d) / __NFDBITS)
+
+#undef __FDMASK
+#define __FDMASK(d) (1UL << ((d) % __NFDBITS))
+
+typedef struct {
+ unsigned long fds_bits [__FDSET_LONGS];
+} __kernel_fd_set;
+
+#include <asm/posix_types.h>
+
+#endif /* _LINUX_POSIX_TYPES_H */
diff --git a/linux/src/include/linux/proc_fs.h b/linux/src/include/linux/proc_fs.h
new file mode 100644
index 0000000..bbb74dd
--- /dev/null
+++ b/linux/src/include/linux/proc_fs.h
@@ -0,0 +1,292 @@
+#ifndef _LINUX_PROC_FS_H
+#define _LINUX_PROC_FS_H
+
+#include <linux/fs.h>
+#include <linux/malloc.h>
+
+/*
+ * The proc filesystem constants/structures
+ */
+
+/*
+ * We always define these enumerators
+ */
+
+enum root_directory_inos {
+ PROC_ROOT_INO = 1,
+ PROC_LOADAVG,
+ PROC_UPTIME,
+ PROC_MEMINFO,
+ PROC_KMSG,
+ PROC_VERSION,
+ PROC_CPUINFO,
+ PROC_PCI,
+ PROC_SELF, /* will change inode # */
+ PROC_NET,
+ PROC_SCSI,
+ PROC_MALLOC,
+ PROC_KCORE,
+ PROC_MODULES,
+ PROC_STAT,
+ PROC_DEVICES,
+ PROC_INTERRUPTS,
+ PROC_FILESYSTEMS,
+ PROC_KSYMS,
+ PROC_DMA,
+ PROC_IOPORTS,
+#ifdef __SMP_PROF__
+ PROC_SMP_PROF,
+#endif
+ PROC_PROFILE, /* whether enabled or not */
+ PROC_CMDLINE,
+ PROC_SYS,
+ PROC_MTAB,
+ PROC_MD,
+ PROC_RTC,
+ PROC_LOCKS
+};
+
+enum pid_directory_inos {
+ PROC_PID_INO = 2,
+ PROC_PID_STATUS,
+ PROC_PID_MEM,
+ PROC_PID_CWD,
+ PROC_PID_ROOT,
+ PROC_PID_EXE,
+ PROC_PID_FD,
+ PROC_PID_ENVIRON,
+ PROC_PID_CMDLINE,
+ PROC_PID_STAT,
+ PROC_PID_STATM,
+ PROC_PID_MAPS
+};
+
+enum pid_subdirectory_inos {
+ PROC_PID_FD_DIR = 1
+};
+
+enum net_directory_inos {
+ PROC_NET_UNIX = 128,
+ PROC_NET_ARP,
+ PROC_NET_ROUTE,
+ PROC_NET_DEV,
+ PROC_NET_RAW,
+ PROC_NET_TCP,
+ PROC_NET_UDP,
+ PROC_NET_SNMP,
+ PROC_NET_RARP,
+ PROC_NET_IGMP,
+ PROC_NET_IPMR_VIF,
+ PROC_NET_IPMR_MFC,
+ PROC_NET_IPFWFWD,
+ PROC_NET_IPFWIN,
+ PROC_NET_IPFWOUT,
+ PROC_NET_IPACCT,
+ PROC_NET_IPMSQHST,
+ PROC_NET_WIRELESS,
+ PROC_NET_IPX_INTERFACE,
+ PROC_NET_IPX_ROUTE,
+ PROC_NET_IPX,
+ PROC_NET_ATALK,
+ PROC_NET_AT_ROUTE,
+ PROC_NET_ATIF,
+ PROC_NET_AX25_ROUTE,
+ PROC_NET_AX25,
+ PROC_NET_AX25_CALLS,
+ PROC_NET_NR_NODES,
+ PROC_NET_NR_NEIGH,
+ PROC_NET_NR,
+ PROC_NET_SOCKSTAT,
+ PROC_NET_RTCACHE,
+ PROC_NET_AX25_BPQETHER,
+ PROC_NET_ALIAS_TYPES,
+ PROC_NET_ALIASES,
+ PROC_NET_IP_MASQ_APP,
+ PROC_NET_STRIP_STATUS,
+ PROC_NET_STRIP_TRACE,
+ PROC_NET_IPAUTOFW,
+ PROC_NET_RS_NODES,
+ PROC_NET_RS_NEIGH,
+ PROC_NET_RS_ROUTES,
+ PROC_NET_RS,
+ PROC_NET_Z8530,
+ PROC_NET_LAST
+};
+
+enum scsi_directory_inos {
+ PROC_SCSI_SCSI = 256,
+ PROC_SCSI_ADVANSYS,
+ PROC_SCSI_EATA,
+ PROC_SCSI_EATA_PIO,
+ PROC_SCSI_AHA152X,
+ PROC_SCSI_AHA1542,
+ PROC_SCSI_AHA1740,
+ PROC_SCSI_AIC7XXX,
+ PROC_SCSI_BUSLOGIC,
+ PROC_SCSI_U14_34F,
+ PROC_SCSI_FDOMAIN,
+ PROC_SCSI_GENERIC_NCR5380,
+ PROC_SCSI_IN2000,
+ PROC_SCSI_PAS16,
+ PROC_SCSI_QLOGICFAS,
+ PROC_SCSI_QLOGICISP,
+ PROC_SCSI_SEAGATE,
+ PROC_SCSI_T128,
+ PROC_SCSI_DC390T,
+ PROC_SCSI_NCR53C7xx,
+ PROC_SCSI_NCR53C8XX,
+ PROC_SCSI_ULTRASTOR,
+ PROC_SCSI_7000FASST,
+ PROC_SCSI_EATA2X,
+ PROC_SCSI_AM53C974,
+ PROC_SCSI_SSC,
+ PROC_SCSI_NCR53C406A,
+ PROC_SCSI_MEGARAID,
+ PROC_SCSI_PPA,
+ PROC_SCSI_ESP,
+ PROC_SCSI_A3000,
+ PROC_SCSI_A2091,
+ PROC_SCSI_GVP11,
+ PROC_SCSI_ATARI,
+ PROC_SCSI_GDTH,
+ PROC_SCSI_IDESCSI,
+ PROC_SCSI_SCSI_DEBUG,
+ PROC_SCSI_NOT_PRESENT,
+ PROC_SCSI_FILE, /* I'm assuming here that we */
+ PROC_SCSI_LAST = (PROC_SCSI_FILE + 16) /* won't ever see more than */
+}; /* 16 HBAs in one machine */
+
+/* Finally, the dynamically allocatable proc entries are reserved: */
+
+#define PROC_DYNAMIC_FIRST 4096
+#define PROC_NDYNAMIC 4096
+
+#define PROC_SUPER_MAGIC 0x9fa0
+
+/*
+ * This is not completely implemented yet. The idea is to
+ * create a in-memory tree (like the actual /proc filesystem
+ * tree) of these proc_dir_entries, so that we can dynamically
+ * add new files to /proc.
+ *
+ * The "next" pointer creates a linked list of one /proc directory,
+ * while parent/subdir create the directory structure (every
+ * /proc file has a parent, but "subdir" is NULL for all
+ * non-directory entries).
+ *
+ * "get_info" is called at "read", while "fill_inode" is used to
+ * fill in file type/protection/owner information specific to the
+ * particular /proc file.
+ */
+struct proc_dir_entry {
+ unsigned short low_ino;
+ unsigned short namelen;
+ const char *name;
+ mode_t mode;
+ nlink_t nlink;
+ uid_t uid;
+ gid_t gid;
+ unsigned long size;
+ struct inode_operations * ops;
+ int (*get_info)(char *, char **, off_t, int, int);
+ void (*fill_inode)(struct inode *);
+ struct proc_dir_entry *next, *parent, *subdir;
+ void *data;
+};
+
+extern int (* dispatch_scsi_info_ptr) (int ino, char *buffer, char **start,
+ off_t offset, int length, int inout);
+
+extern struct proc_dir_entry proc_root;
+extern struct proc_dir_entry proc_net;
+extern struct proc_dir_entry proc_scsi;
+extern struct proc_dir_entry proc_sys;
+extern struct proc_dir_entry proc_pid;
+extern struct proc_dir_entry proc_pid_fd;
+
+extern struct inode_operations proc_scsi_inode_operations;
+
+extern void proc_root_init(void);
+extern void proc_base_init(void);
+extern void proc_net_init(void);
+
+extern int proc_register(struct proc_dir_entry *, struct proc_dir_entry *);
+extern int proc_register_dynamic(struct proc_dir_entry *,
+ struct proc_dir_entry *);
+extern int proc_unregister(struct proc_dir_entry *, int);
+
+static inline int proc_net_register(struct proc_dir_entry * x)
+{
+ return proc_register(&proc_net, x);
+}
+
+static inline int proc_net_unregister(int x)
+{
+ return proc_unregister(&proc_net, x);
+}
+
+static inline int proc_scsi_register(struct proc_dir_entry *driver,
+ struct proc_dir_entry *x)
+{
+ x->ops = &proc_scsi_inode_operations;
+ if(x->low_ino < PROC_SCSI_FILE){
+ return(proc_register(&proc_scsi, x));
+ }else{
+ return(proc_register(driver, x));
+ }
+}
+
+static inline int proc_scsi_unregister(struct proc_dir_entry *driver, int x)
+{
+ extern void scsi_init_free(char *ptr, unsigned int size);
+
+ if(x <= PROC_SCSI_FILE)
+ return(proc_unregister(&proc_scsi, x));
+ else {
+ struct proc_dir_entry **p = &driver->subdir, *dp;
+ int ret;
+
+ while ((dp = *p) != NULL) {
+ if (dp->low_ino == x)
+ break;
+ p = &dp->next;
+ }
+ ret = proc_unregister(driver, x);
+ scsi_init_free((char *) dp, sizeof(struct proc_dir_entry) + 4);
+ return(ret);
+ }
+}
+
+extern struct super_block *proc_read_super(struct super_block *,void *,int);
+extern int init_proc_fs(void);
+extern struct inode * proc_get_inode(struct super_block *, int, struct proc_dir_entry *);
+extern void proc_statfs(struct super_block *, struct statfs *, int);
+extern void proc_read_inode(struct inode *);
+extern void proc_write_inode(struct inode *);
+extern int proc_match(int, const char *, struct proc_dir_entry *);
+
+/*
+ * These are generic /proc routines that use the internal
+ * "struct proc_dir_entry" tree to traverse the filesystem.
+ *
+ * The /proc root directory has extended versions to take care
+ * of the /proc/<pid> subdirectories.
+ */
+extern int proc_readdir(struct inode *, struct file *, void *, filldir_t);
+extern int proc_lookup(struct inode *, const char *, int, struct inode **);
+
+extern struct inode_operations proc_dir_inode_operations;
+extern struct inode_operations proc_net_inode_operations;
+extern struct inode_operations proc_netdir_inode_operations;
+extern struct inode_operations proc_scsi_inode_operations;
+extern struct inode_operations proc_mem_inode_operations;
+extern struct inode_operations proc_sys_inode_operations;
+extern struct inode_operations proc_array_inode_operations;
+extern struct inode_operations proc_arraylong_inode_operations;
+extern struct inode_operations proc_kcore_inode_operations;
+extern struct inode_operations proc_profile_inode_operations;
+extern struct inode_operations proc_kmsg_inode_operations;
+extern struct inode_operations proc_link_inode_operations;
+extern struct inode_operations proc_fd_inode_operations;
+
+#endif
diff --git a/linux/src/include/linux/ptrace.h b/linux/src/include/linux/ptrace.h
new file mode 100644
index 0000000..0a02879
--- /dev/null
+++ b/linux/src/include/linux/ptrace.h
@@ -0,0 +1,26 @@
+#ifndef _LINUX_PTRACE_H
+#define _LINUX_PTRACE_H
+/* ptrace.h */
+/* structs and defines to help the user use the ptrace system call. */
+
+/* has the defines to get at the registers. */
+
+#define PTRACE_TRACEME 0
+#define PTRACE_PEEKTEXT 1
+#define PTRACE_PEEKDATA 2
+#define PTRACE_PEEKUSR 3
+#define PTRACE_POKETEXT 4
+#define PTRACE_POKEDATA 5
+#define PTRACE_POKEUSR 6
+#define PTRACE_CONT 7
+#define PTRACE_KILL 8
+#define PTRACE_SINGLESTEP 9
+
+#define PTRACE_ATTACH 0x10
+#define PTRACE_DETACH 0x11
+
+#define PTRACE_SYSCALL 24
+
+#include <asm/ptrace.h>
+
+#endif
diff --git a/linux/src/include/linux/quota.h b/linux/src/include/linux/quota.h
new file mode 100644
index 0000000..5a394c5
--- /dev/null
+++ b/linux/src/include/linux/quota.h
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 1982, 1986 Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Robert Elz at The University of Melbourne.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Version: $Id: quota.h,v 1.1 1999/04/26 05:56:57 tb Exp $
+ */
+
+#ifndef _LINUX_QUOTA_
+#define _LINUX_QUOTA_
+
+#include <linux/errno.h>
+
+/*
+ * Convert diskblocks to blocks and the other way around.
+ * currently only to fool the BSD source. :-)
+ */
+#define dbtob(num) (num << 10)
+#define btodb(num) (num >> 10)
+
+/*
+ * Convert count of filesystem blocks to diskquota blocks, meant
+ * for filesystems where i_blksize != BLOCK_SIZE
+ */
+#define fs_to_dq_blocks(num, blksize) (((num) * (blksize)) / BLOCK_SIZE)
+
+/*
+ * Definitions for disk quotas imposed on the average user
+ * (big brother finally hits Linux).
+ *
+ * The following constants define the amount of time given a user
+ * before the soft limits are treated as hard limits (usually resulting
+ * in an allocation failure). The timer is started when the user crosses
+ * their soft limit, it is reset when they go below their soft limit.
+ */
+#define MAX_IQ_TIME 604800 /* (7*24*60*60) 1 week */
+#define MAX_DQ_TIME 604800 /* (7*24*60*60) 1 week */
+
+#define MAXQUOTAS 2
+#define USRQUOTA 0 /* element used for user quotas */
+#define GRPQUOTA 1 /* element used for group quotas */
+
+/*
+ * Definitions for the default names of the quotas files.
+ */
+#define INITQFNAMES { \
+ "user", /* USRQUOTA */ \
+ "group", /* GRPQUOTA */ \
+ "undefined", \
+};
+
+#define QUOTAFILENAME "quota"
+#define QUOTAGROUP "staff"
+
+#define NR_DQHASH 43 /* Just an arbitrary number any suggestions ? */
+#define NR_DQUOTS 256 /* Number of quotas active at one time */
+
+/*
+ * Command definitions for the 'quotactl' system call.
+ * The commands are broken into a main command defined below
+ * and a subcommand that is used to convey the type of
+ * quota that is being manipulated (see above).
+ */
+#define SUBCMDMASK 0x00ff
+#define SUBCMDSHIFT 8
+#define QCMD(cmd, type) (((cmd) << SUBCMDSHIFT) | ((type) & SUBCMDMASK))
+
+#define Q_QUOTAON 0x0100 /* enable quotas */
+#define Q_QUOTAOFF 0x0200 /* disable quotas */
+#define Q_GETQUOTA 0x0300 /* get limits and usage */
+#define Q_SETQUOTA 0x0400 /* set limits and usage */
+#define Q_SETUSE 0x0500 /* set usage */
+#define Q_SYNC 0x0600 /* sync disk copy of a filesystems quotas */
+#define Q_SETQLIM 0x0700 /* set limits */
+#define Q_GETSTATS 0x0800 /* get collected stats */
+
+/*
+ * The following structure defines the format of the disk quota file
+ * (as it appears on disk) - the file is an array of these structures
+ * indexed by user or group number.
+ */
+struct dqblk {
+ __u32 dqb_bhardlimit; /* absolute limit on disk blks alloc */
+ __u32 dqb_bsoftlimit; /* preferred limit on disk blks */
+ __u32 dqb_curblocks; /* current block count */
+ __u32 dqb_ihardlimit; /* maximum # allocated inodes */
+ __u32 dqb_isoftlimit; /* preferred inode limit */
+ __u32 dqb_curinodes; /* current # allocated inodes */
+ time_t dqb_btime; /* time limit for excessive disk use */
+ time_t dqb_itime; /* time limit for excessive files */
+};
+
+/*
+ * Shorthand notation.
+ */
+#define dq_bhardlimit dq_dqb.dqb_bhardlimit
+#define dq_bsoftlimit dq_dqb.dqb_bsoftlimit
+#define dq_curblocks dq_dqb.dqb_curblocks
+#define dq_ihardlimit dq_dqb.dqb_ihardlimit
+#define dq_isoftlimit dq_dqb.dqb_isoftlimit
+#define dq_curinodes dq_dqb.dqb_curinodes
+#define dq_btime dq_dqb.dqb_btime
+#define dq_itime dq_dqb.dqb_itime
+
+#define dqoff(UID) ((off_t)((UID) * sizeof (struct dqblk)))
+
+struct dqstats {
+ __u32 lookups;
+ __u32 drops;
+ __u32 reads;
+ __u32 writes;
+ __u32 cache_hits;
+ __u32 pages_allocated;
+ __u32 allocated_dquots;
+ __u32 free_dquots;
+ __u32 syncs;
+};
+
+#ifdef __KERNEL__
+
+#include <linux/mount.h>
+
+/*
+ * Maximum length of a message generated in the quota system,
+ * that needs to be kicked onto the tty.
+ */
+#define MAX_QUOTA_MESSAGE 75
+
+#define DQ_LOCKED 0x01 /* locked for update */
+#define DQ_WANT 0x02 /* wanted for update */
+#define DQ_MOD 0x04 /* dquot modified since read */
+#define DQ_BLKS 0x10 /* uid/gid has been warned about blk limit */
+#define DQ_INODES 0x20 /* uid/gid has been warned about inode limit */
+#define DQ_FAKE 0x40 /* no limits only usage */
+
+struct dquot {
+ unsigned int dq_id; /* id this applies to (uid, gid) */
+ short dq_type; /* type of quota */
+ kdev_t dq_dev; /* Device this applies to */
+ short dq_flags; /* see DQ_* */
+ short dq_count; /* reference count */
+ short dq_locknest; /* lock nesting */
+ struct task_struct *dq_lockproc; /* process holding the lock */
+ struct vfsmount *dq_mnt; /* vfsmountpoint this applies to */
+ struct dqblk dq_dqb; /* diskquota usage */
+ struct wait_queue *dq_wait; /* pointer to waitqueue */
+ struct dquot *dq_prev; /* pointer to prev dquot */
+ struct dquot *dq_next; /* pointer to next dquot */
+ struct dquot *dq_hash_prev; /* pointer to prev dquot */
+ struct dquot *dq_hash_next; /* pointer to next dquot */
+};
+
+#define NODQUOT (struct dquot *)NULL
+
+/*
+ * Flags used for set_dqblk.
+ */
+#define QUOTA_SYSCALL 0x01
+#define SET_QUOTA 0x02
+#define SET_USE 0x04
+#define SET_QLIMIT 0x08
+
+#define QUOTA_OK 0
+#define NO_QUOTA 1
+
+/*
+ * declaration of quota_function calls in kernel.
+ */
+
+extern void dquot_initialize(struct inode *inode, short type);
+extern void dquot_drop(struct inode *inode);
+extern int dquot_alloc_block(const struct inode *inode, unsigned long number);
+extern int dquot_alloc_inode(const struct inode *inode, unsigned long number);
+extern void dquot_free_block(const struct inode *inode, unsigned long number);
+extern void dquot_free_inode(const struct inode *inode, unsigned long number);
+extern int dquot_transfer(struct inode *inode, struct iattr *iattr, char direction);
+
+extern void invalidate_dquots(kdev_t dev, short type);
+extern int quota_off(kdev_t dev, short type);
+extern int sync_dquots(kdev_t dev, short type);
+
+#else
+
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+int quotactl __P ((int, const char *, int, caddr_t));
+__END_DECLS
+
+#endif /* __KERNEL__ */
+#endif /* _QUOTA_ */
diff --git a/linux/src/include/linux/random.h b/linux/src/include/linux/random.h
new file mode 100644
index 0000000..b47367a
--- /dev/null
+++ b/linux/src/include/linux/random.h
@@ -0,0 +1,70 @@
+/*
+ * include/linux/random.h
+ *
+ * Include file for the random number generator.
+ */
+
+#ifndef _LINUX_RANDOM_H
+#define _LINUX_RANDOM_H
+
+#include <linux/ioctl.h>
+
+/* ioctl()'s for the random number generator */
+
+/* Get the entropy count. */
+#define RNDGETENTCNT _IOR( 'R', 0x00, int )
+
+/* Add to (or subtract from) the entropy count. (Superuser only.) */
+#define RNDADDTOENTCNT _IOW( 'R', 0x01, int )
+
+/* Get the contents of the entropy pool. (Superuser only.) */
+#define RNDGETPOOL _IOR( 'R', 0x02, int [2] )
+
+/*
+ * Write bytes into the entropy pool and add to the entropy count.
+ * (Superuser only.)
+ */
+#define RNDADDENTROPY _IOW( 'R', 0x03, int [2] )
+
+/* Clear entropy count to 0. (Superuser only.) */
+#define RNDZAPENTCNT _IO( 'R', 0x04 )
+
+/* Clear the entropy pool and associated counters. (Superuser only.) */
+#define RNDCLEARPOOL _IO( 'R', 0x06 )
+
+struct rand_pool_info {
+ int entropy_count;
+ int buf_size;
+ __u32 buf[0];
+};
+
+/* Exported functions */
+
+#ifdef __KERNEL__
+
+extern void rand_initialize(void);
+extern void rand_initialize_irq(int irq);
+extern void rand_initialize_blkdev(int irq, int mode);
+
+extern void add_keyboard_randomness(unsigned char scancode);
+extern void add_mouse_randomness(__u32 mouse_data);
+extern void add_interrupt_randomness(int irq);
+extern void add_blkdev_randomness(int major);
+
+extern void get_random_bytes(void *buf, int nbytes);
+
+extern __u32 secure_tcp_sequence_number(__u32 saddr, __u32 daddr,
+ __u16 sport, __u16 dport);
+__u32 secure_tcp_probe_number(__u32 saddr, __u32 daddr,
+ __u16 sport, __u16 dport, __u32 sseq, int validate);
+
+__u32 secure_tcp_syn_cookie(__u32 saddr, __u32 daddr,
+ __u16 sport, __u16 dport, __u32 sseq, __u32 count);
+
+#ifndef MODULE
+extern struct file_operations random_fops, urandom_fops;
+#endif
+
+#endif /* __KERNEL___ */
+
+#endif /* _LINUX_RANDOM_H */
diff --git a/linux/src/include/linux/resource.h b/linux/src/include/linux/resource.h
new file mode 100644
index 0000000..f3bffbd
--- /dev/null
+++ b/linux/src/include/linux/resource.h
@@ -0,0 +1,60 @@
+#ifndef _LINUX_RESOURCE_H
+#define _LINUX_RESOURCE_H
+
+#include <linux/time.h>
+
+/*
+ * Resource control/accounting header file for linux
+ */
+
+/*
+ * Definition of struct rusage taken from BSD 4.3 Reno
+ *
+ * We don't support all of these yet, but we might as well have them....
+ * Otherwise, each time we add new items, programs which depend on this
+ * structure will lose. This reduces the chances of that happening.
+ */
+#define RUSAGE_SELF 0
+#define RUSAGE_CHILDREN (-1)
+#define RUSAGE_BOTH (-2) /* sys_wait4() uses this */
+
+struct rusage {
+ struct timeval ru_utime; /* user time used */
+ struct timeval ru_stime; /* system time used */
+ long ru_maxrss; /* maximum resident set size */
+ long ru_ixrss; /* integral shared memory size */
+ long ru_idrss; /* integral unshared data size */
+ long ru_isrss; /* integral unshared stack size */
+ long ru_minflt; /* page reclaims */
+ long ru_majflt; /* page faults */
+ long ru_nswap; /* swaps */
+ long ru_inblock; /* block input operations */
+ long ru_oublock; /* block output operations */
+ long ru_msgsnd; /* messages sent */
+ long ru_msgrcv; /* messages received */
+ long ru_nsignals; /* signals received */
+ long ru_nvcsw; /* voluntary context switches */
+ long ru_nivcsw; /* involuntary " */
+};
+
+#define RLIM_INFINITY ((long)(~0UL>>1))
+
+struct rlimit {
+ long rlim_cur;
+ long rlim_max;
+};
+
+#define PRIO_MIN (-20)
+#define PRIO_MAX 20
+
+#define PRIO_PROCESS 0
+#define PRIO_PGRP 1
+#define PRIO_USER 2
+
+/*
+ * Due to binary compatibility, the actual resource numbers
+ * may be different for different linux versions..
+ */
+#include <asm/resource.h>
+
+#endif
diff --git a/linux/src/include/linux/rose.h b/linux/src/include/linux/rose.h
new file mode 100644
index 0000000..9fb1efc
--- /dev/null
+++ b/linux/src/include/linux/rose.h
@@ -0,0 +1,88 @@
+/*
+ * These are the public elements of the Linux kernel Rose implementation.
+ * For kernel AX.25 see the file ax25.h. This file requires ax25.h for the
+ * definition of the ax25_address structure.
+ */
+
+#ifndef ROSE_KERNEL_H
+#define ROSE_KERNEL_H
+
+#define ROSE_MTU 251
+
+#define ROSE_MAX_DIGIS 6
+
+#define ROSE_DEFER 1
+#define ROSE_T1 2
+#define ROSE_T2 3
+#define ROSE_T3 4
+#define ROSE_IDLE 5
+#define ROSE_QBITINCL 6
+#define ROSE_HOLDBACK 7
+
+#define SIOCRSGCAUSE (SIOCPROTOPRIVATE+0)
+#define SIOCRSSCAUSE (SIOCPROTOPRIVATE+1)
+#define SIOCRSL2CALL (SIOCPROTOPRIVATE+2)
+#define SIOCRSSL2CALL (SIOCPROTOPRIVATE+2)
+#define SIOCRSACCEPT (SIOCPROTOPRIVATE+3)
+#define SIOCRSCLRRT (SIOCPROTOPRIVATE+4)
+#define SIOCRSGL2CALL (SIOCPROTOPRIVATE+5)
+#define SIOCRSGFACILITIES (SIOCPROTOPRIVATE+6)
+
+#define ROSE_DTE_ORIGINATED 0x00
+#define ROSE_NUMBER_BUSY 0x01
+#define ROSE_INVALID_FACILITY 0x03
+#define ROSE_NETWORK_CONGESTION 0x05
+#define ROSE_OUT_OF_ORDER 0x09
+#define ROSE_ACCESS_BARRED 0x0B
+#define ROSE_NOT_OBTAINABLE 0x0D
+#define ROSE_REMOTE_PROCEDURE 0x11
+#define ROSE_LOCAL_PROCEDURE 0x13
+#define ROSE_SHIP_ABSENT 0x39
+
+typedef struct {
+ char rose_addr[5];
+} rose_address;
+
+struct sockaddr_rose {
+ unsigned short srose_family;
+ rose_address srose_addr;
+ ax25_address srose_call;
+ unsigned int srose_ndigis;
+ ax25_address srose_digi;
+};
+
+struct full_sockaddr_rose {
+ unsigned short srose_family;
+ rose_address srose_addr;
+ ax25_address srose_call;
+ unsigned int srose_ndigis;
+ ax25_address srose_digis[ROSE_MAX_DIGIS];
+};
+
+struct rose_route_struct {
+ rose_address address;
+ unsigned short mask;
+ ax25_address neighbour;
+ char device[16];
+ unsigned char ndigis;
+ ax25_address digipeaters[AX25_MAX_DIGIS];
+};
+
+struct rose_cause_struct {
+ unsigned char cause;
+ unsigned char diagnostic;
+};
+
+struct rose_facilities_struct {
+ rose_address source_addr, dest_addr;
+ ax25_address source_call, dest_call;
+ unsigned char source_ndigis, dest_ndigis;
+ ax25_address source_digis[ROSE_MAX_DIGIS];
+ ax25_address dest_digis[ROSE_MAX_DIGIS];
+ unsigned int rand;
+ rose_address fail_addr;
+ ax25_address fail_call;
+};
+
+
+#endif
diff --git a/linux/src/include/linux/route.h b/linux/src/include/linux/route.h
new file mode 100644
index 0000000..8f210b6
--- /dev/null
+++ b/linux/src/include/linux/route.h
@@ -0,0 +1,79 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Global definitions for the IP router interface.
+ *
+ * Version: @(#)route.h 1.0.3 05/27/93
+ *
+ * Authors: Original taken from Berkeley UNIX 4.3, (c) UCB 1986-1988
+ * for the purposes of compatibility only.
+ *
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_ROUTE_H
+#define _LINUX_ROUTE_H
+
+#include <linux/if.h>
+
+
+/* This structure gets passed by the SIOCADDRT and SIOCDELRT calls. */
+struct rtentry
+{
+ unsigned long rt_hash; /* hash key for lookups */
+ struct sockaddr rt_dst; /* target address */
+ struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */
+ struct sockaddr rt_genmask; /* target network mask (IP) */
+ short rt_flags;
+ short rt_refcnt;
+ unsigned long rt_use;
+ struct ifnet *rt_ifp;
+ short rt_metric; /* +1 for binary compatibility! */
+ char *rt_dev; /* forcing the device at add */
+ unsigned long rt_mss; /* per route MTU/Window */
+ unsigned long rt_window; /* Window clamping */
+ unsigned short rt_irtt; /* Initial RTT */
+};
+
+
+#define RTF_UP 0x0001 /* route usable */
+#define RTF_GATEWAY 0x0002 /* destination is a gateway */
+#define RTF_HOST 0x0004 /* host entry (net otherwise) */
+#define RTF_REINSTATE 0x0008 /* reinstate route after tmout */
+#define RTF_DYNAMIC 0x0010 /* created dyn. (by redirect) */
+#define RTF_MODIFIED 0x0020 /* modified dyn. (by redirect) */
+#define RTF_MSS 0x0040 /* specific MSS for this route */
+#define RTF_WINDOW 0x0080 /* per route window clamping */
+#define RTF_IRTT 0x0100 /* Initial round trip time */
+#define RTF_REJECT 0x0200 /* Reject route */
+#define RTF_NOTCACHED 0x0400 /* this route isn't cached */
+
+/*
+ * This structure is passed from the kernel to user space by netlink
+ * routing/device announcements
+ */
+
+struct netlink_rtinfo
+{
+ unsigned long rtmsg_type;
+ struct sockaddr rtmsg_dst;
+ struct sockaddr rtmsg_gateway;
+ struct sockaddr rtmsg_genmask;
+ short rtmsg_flags;
+ short rtmsg_metric;
+ char rtmsg_device[16];
+};
+
+#define RTMSG_NEWROUTE 0x01
+#define RTMSG_DELROUTE 0x02
+#define RTMSG_NEWDEVICE 0x11
+#define RTMSG_DELDEVICE 0x12
+
+#endif /* _LINUX_ROUTE_H */
+
diff --git a/linux/src/include/linux/sched.h b/linux/src/include/linux/sched.h
new file mode 100644
index 0000000..523d4c4
--- /dev/null
+++ b/linux/src/include/linux/sched.h
@@ -0,0 +1,496 @@
+#ifndef _LINUX_SCHED_H
+#define _LINUX_SCHED_H
+
+/*
+ * define DEBUG if you want the wait-queues to have some extra
+ * debugging code. It's not normally used, but might catch some
+ * wait-queue coding errors.
+ *
+ * #define DEBUG
+ */
+
+#include <asm/param.h> /* for HZ */
+
+extern unsigned long event;
+
+#include <linux/binfmts.h>
+#include <linux/personality.h>
+#include <linux/tasks.h>
+#include <linux/kernel.h>
+
+#include <asm/system.h>
+#include <asm/semaphore.h>
+#include <asm/page.h>
+
+#include <linux/smp.h>
+#include <linux/tty.h>
+#include <linux/sem.h>
+
+/*
+ * cloning flags:
+ */
+#define CSIGNAL 0x000000ff /* signal mask to be sent at exit */
+#define CLONE_VM 0x00000100 /* set if VM shared between processes */
+#define CLONE_FS 0x00000200 /* set if fs info shared between processes */
+#define CLONE_FILES 0x00000400 /* set if open files shared between processes */
+#define CLONE_SIGHAND 0x00000800 /* set if signal handlers shared */
+#define CLONE_PID 0x00001000 /* set if pid shared */
+
+/*
+ * These are the constant used to fake the fixed-point load-average
+ * counting. Some notes:
+ * - 11 bit fractions expand to 22 bits by the multiplies: this gives
+ * a load-average precision of 10 bits integer + 11 bits fractional
+ * - if you want to count load-averages more often, you need more
+ * precision, or rounding will get you. With 2-second counting freq,
+ * the EXP_n values would be 1981, 2034 and 2043 if still using only
+ * 11 bit fractions.
+ */
+extern unsigned long avenrun[]; /* Load averages */
+
+#define FSHIFT 11 /* nr of bits of precision */
+#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
+#define LOAD_FREQ (5*HZ) /* 5 sec intervals */
+#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */
+#define EXP_5 2014 /* 1/exp(5sec/5min) */
+#define EXP_15 2037 /* 1/exp(5sec/15min) */
+
+#define CALC_LOAD(load,exp,n) \
+ load *= exp; \
+ load += n*(FIXED_1-exp); \
+ load >>= FSHIFT;
+
+#define CT_TO_SECS(x) ((x) / HZ)
+#define CT_TO_USECS(x) (((x) % HZ) * 1000000/HZ)
+
+extern int nr_running, nr_tasks;
+extern int last_pid;
+
+#define FIRST_TASK task[0]
+#define LAST_TASK task[NR_TASKS-1]
+
+#include <linux/head.h>
+#include <linux/fs.h>
+#include <linux/signal.h>
+#include <linux/time.h>
+#include <linux/param.h>
+#include <linux/resource.h>
+#include <linux/ptrace.h>
+#include <linux/timer.h>
+
+#include <asm/processor.h>
+
+#define TASK_RUNNING 0
+#define TASK_INTERRUPTIBLE 1
+#define TASK_UNINTERRUPTIBLE 2
+#define TASK_ZOMBIE 3
+#define TASK_STOPPED 4
+#define TASK_SWAPPING 5
+
+/*
+ * Scheduling policies
+ */
+#define SCHED_OTHER 0
+#define SCHED_FIFO 1
+#define SCHED_RR 2
+
+struct sched_param {
+ int sched_priority;
+};
+
+#ifndef NULL
+#define NULL ((void *) 0)
+#endif
+
+#ifdef __KERNEL__
+
+extern void sched_init(void);
+extern void show_state(void);
+extern void trap_init(void);
+
+asmlinkage void schedule(void);
+
+/* Open file table structure */
+struct files_struct {
+ int count;
+ fd_set close_on_exec;
+ fd_set open_fds;
+ struct file * fd[NR_OPEN];
+};
+
+#define INIT_FILES { \
+ 1, \
+ { { 0, } }, \
+ { { 0, } }, \
+ { NULL, } \
+}
+
+struct fs_struct {
+ int count;
+ unsigned short umask;
+ struct inode * root, * pwd;
+};
+
+#define INIT_FS { \
+ 1, \
+ 0022, \
+ NULL, NULL \
+}
+
+struct mm_struct {
+ int count;
+ pgd_t * pgd;
+ unsigned long context;
+ unsigned long start_code, end_code, start_data, end_data;
+ unsigned long start_brk, brk, start_stack, start_mmap;
+ unsigned long arg_start, arg_end, env_start, env_end;
+ unsigned long rss, total_vm, locked_vm;
+ unsigned long def_flags;
+ struct vm_area_struct * mmap;
+ struct vm_area_struct * mmap_avl;
+ struct semaphore mmap_sem;
+};
+
+#define INIT_MM { \
+ 1, \
+ swapper_pg_dir, \
+ 0, \
+ 0, 0, 0, 0, \
+ 0, 0, 0, 0, \
+ 0, 0, 0, 0, \
+ 0, 0, 0, \
+ 0, \
+ &init_mmap, &init_mmap, MUTEX }
+
+struct signal_struct {
+ int count;
+ struct sigaction action[32];
+};
+
+#define INIT_SIGNALS { \
+ 1, \
+ { {0,}, } }
+
+struct task_struct {
+/* these are hardcoded - don't touch */
+ volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
+ long counter;
+ long priority;
+ unsigned long signal;
+ unsigned long blocked; /* bitmap of masked signals */
+ unsigned long flags; /* per process flags, defined below */
+ int errno;
+ long debugreg[8]; /* Hardware debugging registers */
+ struct exec_domain *exec_domain;
+/* various fields */
+ struct linux_binfmt *binfmt;
+ struct task_struct *next_task, *prev_task;
+ struct task_struct *next_run, *prev_run;
+ unsigned long saved_kernel_stack;
+ unsigned long kernel_stack_page;
+ int exit_code, exit_signal;
+ /* ??? */
+ unsigned long personality;
+ int dumpable:1;
+ int did_exec:1;
+ /* shouldn't this be pid_t? */
+ int pid;
+ int pgrp;
+ int tty_old_pgrp;
+ int session;
+ /* boolean value for session group leader */
+ int leader;
+ int groups[NGROUPS];
+ /*
+ * pointers to (original) parent process, youngest child, younger sibling,
+ * older sibling, respectively. (p->father can be replaced with
+ * p->p_pptr->pid)
+ */
+ struct task_struct *p_opptr, *p_pptr, *p_cptr, *p_ysptr, *p_osptr;
+ struct wait_queue *wait_chldexit; /* for wait4() */
+ unsigned short uid,euid,suid,fsuid;
+ unsigned short gid,egid,sgid,fsgid;
+ unsigned long timeout, policy, rt_priority;
+ unsigned long it_real_value, it_prof_value, it_virt_value;
+ unsigned long it_real_incr, it_prof_incr, it_virt_incr;
+ struct timer_list real_timer;
+ long utime, stime, cutime, cstime, start_time;
+/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
+ unsigned long min_flt, maj_flt, nswap, cmin_flt, cmaj_flt, cnswap;
+ int swappable:1;
+ unsigned long swap_address;
+ unsigned long old_maj_flt; /* old value of maj_flt */
+ unsigned long dec_flt; /* page fault count of the last time */
+ unsigned long swap_cnt; /* number of pages to swap on next pass */
+/* limits */
+ struct rlimit rlim[RLIM_NLIMITS];
+ unsigned short used_math;
+ char comm[16];
+/* file system info */
+ int link_count;
+ struct tty_struct *tty; /* NULL if no tty */
+/* ipc stuff */
+ struct sem_undo *semundo;
+ struct sem_queue *semsleeping;
+/* ldt for this task - used by Wine. If NULL, default_ldt is used */
+ struct desc_struct *ldt;
+/* tss for this task */
+ struct thread_struct tss;
+/* filesystem information */
+ struct fs_struct *fs;
+/* open file information */
+ struct files_struct *files;
+/* memory management info */
+ struct mm_struct *mm;
+/* signal handlers */
+ struct signal_struct *sig;
+#ifdef __SMP__
+ int processor;
+ int last_processor;
+ int lock_depth; /* Lock depth. We can context switch in and out of holding a syscall kernel lock... */
+#endif
+};
+
+/*
+ * Per process flags
+ */
+#define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */
+ /* Not implemented yet, only for 486*/
+#define PF_PTRACED 0x00000010 /* set if ptrace (0) has been called. */
+#define PF_TRACESYS 0x00000020 /* tracing system calls */
+#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
+#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
+#define PF_DUMPCORE 0x00000200 /* dumped core */
+#define PF_SIGNALED 0x00000400 /* killed by a signal */
+
+#define PF_STARTING 0x00000002 /* being created */
+#define PF_EXITING 0x00000004 /* getting shut down */
+
+#define PF_USEDFPU 0x00100000 /* Process used the FPU this quantum (SMP only) */
+#define PF_DTRACE 0x00200000 /* delayed trace (used on m68k) */
+
+/*
+ * Limit the stack by to some sane default: root can always
+ * increase this limit if needed.. 8MB seems reasonable.
+ */
+#define _STK_LIM (8*1024*1024)
+
+#define DEF_PRIORITY (20*HZ/100) /* 200 ms time slices */
+
+/*
+ * INIT_TASK is used to set up the first task table, touch at
+ * your own risk!. Base=0, limit=0x1fffff (=2MB)
+ */
+#define INIT_TASK \
+/* state etc */ { 0,DEF_PRIORITY,DEF_PRIORITY,0,0,0,0, \
+/* debugregs */ { 0, }, \
+/* exec domain */&default_exec_domain, \
+/* binfmt */ NULL, \
+/* schedlink */ &init_task,&init_task, &init_task, &init_task, \
+/* stack */ 0,(unsigned long) &init_kernel_stack, \
+/* ec,brk... */ 0,0,0,0,0, \
+/* pid etc.. */ 0,0,0,0,0, \
+/* suppl grps*/ {NOGROUP,}, \
+/* proc links*/ &init_task,&init_task,NULL,NULL,NULL,NULL, \
+/* uid etc */ 0,0,0,0,0,0,0,0, \
+/* timeout */ 0,SCHED_OTHER,0,0,0,0,0,0,0, \
+/* timer */ { NULL, NULL, 0, 0, it_real_fn }, \
+/* utime */ 0,0,0,0,0, \
+/* flt */ 0,0,0,0,0,0, \
+/* swp */ 0,0,0,0,0, \
+/* rlimits */ INIT_RLIMITS, \
+/* math */ 0, \
+/* comm */ "swapper", \
+/* fs info */ 0,NULL, \
+/* ipc */ NULL, NULL, \
+/* ldt */ NULL, \
+/* tss */ INIT_TSS, \
+/* fs */ &init_fs, \
+/* files */ &init_files, \
+/* mm */ &init_mm, \
+/* signals */ &init_signals, \
+}
+
+extern struct mm_struct init_mm;
+extern struct task_struct init_task;
+extern struct task_struct *task[NR_TASKS];
+extern struct task_struct *last_task_used_math;
+extern struct task_struct *current_set[NR_CPUS];
+/*
+ * On a single processor system this comes out as current_set[0] when cpp
+ * has finished with it, which gcc will optimise away.
+ */
+#define current (0+current_set[smp_processor_id()]) /* Current on this processor */
+extern unsigned long volatile jiffies;
+extern unsigned long itimer_ticks;
+extern unsigned long itimer_next;
+extern struct timeval xtime;
+extern int need_resched;
+extern void do_timer(struct pt_regs *);
+
+extern unsigned int * prof_buffer;
+extern unsigned long prof_len;
+extern unsigned long prof_shift;
+
+extern int securelevel; /* system security level */
+
+#define CURRENT_TIME (xtime.tv_sec)
+
+extern void sleep_on(struct wait_queue ** p);
+extern void interruptible_sleep_on(struct wait_queue ** p);
+extern void wake_up(struct wait_queue ** p);
+extern void wake_up_interruptible(struct wait_queue ** p);
+extern void wake_up_process(struct task_struct * tsk);
+
+extern void notify_parent(struct task_struct * tsk, int signal);
+extern void force_sig(unsigned long sig,struct task_struct * p);
+extern int send_sig(unsigned long sig,struct task_struct * p,int priv);
+extern int in_group_p(gid_t grp);
+
+extern int request_irq(unsigned int irq,
+ void (*handler)(int, void *, struct pt_regs *),
+ unsigned long flags,
+ const char *device,
+ void *dev_id);
+extern void free_irq(unsigned int irq, void *dev_id);
+
+/*
+ * This has now become a routine instead of a macro, it sets a flag if
+ * it returns true (to do BSD-style accounting where the process is flagged
+ * if it uses root privs). The implication of this is that you should do
+ * normal permissions checks first, and check suser() last.
+ */
+extern inline int suser(void)
+{
+ if (current->euid == 0) {
+ current->flags |= PF_SUPERPRIV;
+ return 1;
+ }
+ return 0;
+}
+
+extern void copy_thread(int, unsigned long, unsigned long, struct task_struct *, struct pt_regs *);
+extern void flush_thread(void);
+extern void exit_thread(void);
+
+extern void exit_mm(struct task_struct *);
+extern void exit_fs(struct task_struct *);
+extern void exit_files(struct task_struct *);
+extern void exit_sighand(struct task_struct *);
+extern void release_thread(struct task_struct *);
+
+extern int do_execve(char *, char **, char **, struct pt_regs *);
+extern int do_fork(unsigned long, unsigned long, struct pt_regs *);
+
+
+/* See if we have a valid user level fd.
+ * If it makes sense, return the file structure it references.
+ * Otherwise return NULL.
+ */
+extern inline struct file *file_from_fd(const unsigned int fd)
+{
+
+ if (fd >= NR_OPEN)
+ return NULL;
+ /* either valid or null */
+ return current->files->fd[fd];
+}
+
+/*
+ * The wait-queues are circular lists, and you have to be *very* sure
+ * to keep them correct. Use only these two functions to add/remove
+ * entries in the queues.
+ */
+extern inline void __add_wait_queue(struct wait_queue ** p, struct wait_queue * wait)
+{
+ struct wait_queue *head = *p;
+ struct wait_queue *next = WAIT_QUEUE_HEAD(p);
+
+ if (head)
+ next = head;
+ *p = wait;
+ wait->next = next;
+}
+
+extern inline void add_wait_queue(struct wait_queue ** p, struct wait_queue * wait)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __add_wait_queue(p, wait);
+ restore_flags(flags);
+}
+
+extern inline void __remove_wait_queue(struct wait_queue ** p, struct wait_queue * wait)
+{
+ struct wait_queue * next = wait->next;
+ struct wait_queue * head = next;
+
+ for (;;) {
+ struct wait_queue * nextlist = head->next;
+ if (nextlist == wait)
+ break;
+ head = nextlist;
+ }
+ head->next = next;
+}
+
+extern inline void remove_wait_queue(struct wait_queue ** p, struct wait_queue * wait)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __remove_wait_queue(p, wait);
+ restore_flags(flags);
+}
+
+extern inline void select_wait(struct wait_queue ** wait_address, select_table * p)
+{
+ struct select_table_entry * entry;
+
+ if (!p || !wait_address)
+ return;
+ if (p->nr >= __MAX_SELECT_TABLE_ENTRIES)
+ return;
+ entry = p->entry + p->nr;
+ entry->wait_address = wait_address;
+ entry->wait.task = current;
+ entry->wait.next = NULL;
+ add_wait_queue(wait_address,&entry->wait);
+ p->nr++;
+}
+
+#define REMOVE_LINKS(p) do { unsigned long flags; \
+ save_flags(flags) ; cli(); \
+ (p)->next_task->prev_task = (p)->prev_task; \
+ (p)->prev_task->next_task = (p)->next_task; \
+ restore_flags(flags); \
+ if ((p)->p_osptr) \
+ (p)->p_osptr->p_ysptr = (p)->p_ysptr; \
+ if ((p)->p_ysptr) \
+ (p)->p_ysptr->p_osptr = (p)->p_osptr; \
+ else \
+ (p)->p_pptr->p_cptr = (p)->p_osptr; \
+ } while (0)
+
+#define SET_LINKS(p) do { unsigned long flags; \
+ save_flags(flags); cli(); \
+ (p)->next_task = &init_task; \
+ (p)->prev_task = init_task.prev_task; \
+ init_task.prev_task->next_task = (p); \
+ init_task.prev_task = (p); \
+ restore_flags(flags); \
+ (p)->p_ysptr = NULL; \
+ if (((p)->p_osptr = (p)->p_pptr->p_cptr) != NULL) \
+ (p)->p_osptr->p_ysptr = p; \
+ (p)->p_pptr->p_cptr = p; \
+ } while (0)
+
+#define for_each_task(p) \
+ for (p = &init_task ; (p = p->next_task) != &init_task ; )
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/linux/src/include/linux/sem.h b/linux/src/include/linux/sem.h
new file mode 100644
index 0000000..0eb1d02
--- /dev/null
+++ b/linux/src/include/linux/sem.h
@@ -0,0 +1,112 @@
+#ifndef _LINUX_SEM_H
+#define _LINUX_SEM_H
+#include <linux/ipc.h>
+
+/* semop flags */
+#define SEM_UNDO 0x1000 /* undo the operation on exit */
+
+/* semctl Command Definitions. */
+#define GETPID 11 /* get sempid */
+#define GETVAL 12 /* get semval */
+#define GETALL 13 /* get all semval's */
+#define GETNCNT 14 /* get semncnt */
+#define GETZCNT 15 /* get semzcnt */
+#define SETVAL 16 /* set semval */
+#define SETALL 17 /* set all semval's */
+
+/* One semid data structure for each set of semaphores in the system. */
+struct semid_ds {
+ struct ipc_perm sem_perm; /* permissions .. see ipc.h */
+ time_t sem_otime; /* last semop time */
+ time_t sem_ctime; /* last change time */
+ struct sem *sem_base; /* ptr to first semaphore in array */
+ struct sem_queue *sem_pending; /* pending operations to be processed */
+ struct sem_queue **sem_pending_last; /* last pending operation */
+ struct sem_undo *undo; /* undo requests on this array */
+ ushort sem_nsems; /* no. of semaphores in array */
+};
+
+/* semop system calls takes an array of these. */
+struct sembuf {
+ ushort sem_num; /* semaphore index in array */
+ short sem_op; /* semaphore operation */
+ short sem_flg; /* operation flags */
+};
+
+/* arg for semctl system calls. */
+union semun {
+ int val; /* value for SETVAL */
+ struct semid_ds *buf; /* buffer for IPC_STAT & IPC_SET */
+ ushort *array; /* array for GETALL & SETALL */
+ struct seminfo *__buf; /* buffer for IPC_INFO */
+ void *__pad;
+};
+
+struct seminfo {
+ int semmap;
+ int semmni;
+ int semmns;
+ int semmnu;
+ int semmsl;
+ int semopm;
+ int semume;
+ int semusz;
+ int semvmx;
+ int semaem;
+};
+
+#define SEMMNI 128 /* ? max # of semaphore identifiers */
+#define SEMMSL 32 /* <= 512 max num of semaphores per id */
+#define SEMMNS (SEMMNI*SEMMSL) /* ? max # of semaphores in system */
+#define SEMOPM 32 /* ~ 100 max num of ops per semop call */
+#define SEMVMX 32767 /* semaphore maximum value */
+
+/* unused */
+#define SEMUME SEMOPM /* max num of undo entries per process */
+#define SEMMNU SEMMNS /* num of undo structures system wide */
+#define SEMAEM (SEMVMX >> 1) /* adjust on exit max value */
+#define SEMMAP SEMMNS /* # of entries in semaphore map */
+#define SEMUSZ 20 /* sizeof struct sem_undo */
+
+#ifdef __KERNEL__
+
+/* One semaphore structure for each semaphore in the system. */
+struct sem {
+ short semval; /* current value */
+ short sempid; /* pid of last operation */
+};
+
+/* ipcs ctl cmds */
+#define SEM_STAT 18
+#define SEM_INFO 19
+
+/* One queue for each semaphore set in the system. */
+struct sem_queue {
+ struct sem_queue * next; /* next entry in the queue */
+ struct sem_queue ** prev; /* previous entry in the queue, *(q->prev) == q */
+ struct wait_queue * sleeper; /* sleeping process */
+ struct sem_undo * undo; /* undo structure */
+ int pid; /* process id of requesting process */
+ int status; /* completion status of operation */
+ struct semid_ds * sma; /* semaphore array for operations */
+ struct sembuf * sops; /* array of pending operations */
+ int nsops; /* number of operations */
+};
+
+/* Each task has a list of undo requests. They are executed automatically
+ * when the process exits.
+ */
+struct sem_undo {
+ struct sem_undo * proc_next; /* next entry on this process */
+ struct sem_undo * id_next; /* next entry on this semaphore set */
+ int semid; /* semaphore set identifier */
+ short * semadj; /* array of adjustments, one per semaphore */
+};
+
+asmlinkage int sys_semget (key_t key, int nsems, int semflg);
+asmlinkage int sys_semop (int semid, struct sembuf *sops, unsigned nsops);
+asmlinkage int sys_semctl (int semid, int semnum, int cmd, union semun arg);
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_SEM_H */
diff --git a/linux/src/include/linux/signal.h b/linux/src/include/linux/signal.h
new file mode 100644
index 0000000..9d1afa9
--- /dev/null
+++ b/linux/src/include/linux/signal.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_SIGNAL_H
+#define _LINUX_SIGNAL_H
+
+#include <asm/signal.h>
+
+#endif
diff --git a/linux/src/include/linux/skbuff.h b/linux/src/include/linux/skbuff.h
new file mode 100644
index 0000000..e4c77b4
--- /dev/null
+++ b/linux/src/include/linux/skbuff.h
@@ -0,0 +1,467 @@
+/*
+ * Definitions for the 'struct sk_buff' memory handlers.
+ *
+ * Authors:
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ * Florian La Roche, <rzsfl@rz.uni-sb.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_SKBUFF_H
+#define _LINUX_SKBUFF_H
+
+#include <linux/config.h>
+#include <linux/time.h>
+
+#include <asm/atomic.h>
+#include <asm/types.h>
+
+#define CONFIG_SKB_CHECK 0
+
+#define HAVE_ALLOC_SKB /* For the drivers to know */
+#define HAVE_ALIGNABLE_SKB /* Ditto 8) */
+
+
+#define FREE_READ 1
+#define FREE_WRITE 0
+
+#define CHECKSUM_NONE 0
+#define CHECKSUM_HW 1
+#define CHECKSUM_UNNECESSARY 2
+
+struct sk_buff_head
+{
+ struct sk_buff * next;
+ struct sk_buff * prev;
+ __u32 qlen; /* Must be same length as a pointer
+ for using debugging */
+#if CONFIG_SKB_CHECK
+ int magic_debug_cookie;
+#endif
+};
+
+
+struct sk_buff
+{
+ struct sk_buff * next; /* Next buffer in list */
+ struct sk_buff * prev; /* Previous buffer in list */
+ struct sk_buff_head * list; /* List we are on */
+#if CONFIG_SKB_CHECK
+ int magic_debug_cookie;
+#endif
+ struct sk_buff *link3; /* Link for IP protocol level buffer chains */
+ struct sock *sk; /* Socket we are owned by */
+ unsigned long when; /* used to compute rtt's */
+ struct timeval stamp; /* Time we arrived */
+ struct device *dev; /* Device we arrived on/are leaving by */
+ union
+ {
+ struct tcphdr *th;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ struct udphdr *uh;
+ unsigned char *raw;
+ /* for passing file handles in a unix domain socket */
+ void *filp;
+ } h;
+
+ union
+ {
+ /* As yet incomplete physical layer views */
+ unsigned char *raw;
+ struct ethhdr *ethernet;
+ } mac;
+
+ struct iphdr *ip_hdr; /* For IPPROTO_RAW */
+ unsigned long len; /* Length of actual data */
+ unsigned long csum; /* Checksum */
+ __u32 saddr; /* IP source address */
+ __u32 daddr; /* IP target address */
+ __u32 raddr; /* IP next hop address */
+ __u32 seq; /* TCP sequence number */
+ __u32 end_seq; /* seq [+ fin] [+ syn] + datalen */
+ __u32 ack_seq; /* TCP ack sequence number */
+ unsigned char proto_priv[16]; /* Protocol private data */
+ volatile char acked, /* Are we acked ? */
+ used, /* Are we in use ? */
+ free, /* How to free this buffer */
+ arp; /* Has IP/ARP resolution finished */
+ unsigned char tries, /* Times tried */
+ lock, /* Are we locked ? */
+ localroute, /* Local routing asserted for this frame */
+ pkt_type, /* Packet class */
+ pkt_bridged, /* Tracker for bridging */
+ ip_summed; /* Driver fed us an IP checksum */
+#define PACKET_HOST 0 /* To us */
+#define PACKET_BROADCAST 1 /* To all */
+#define PACKET_MULTICAST 2 /* To group */
+#define PACKET_OTHERHOST 3 /* To someone else */
+ unsigned short users; /* User count - see datagram.c,tcp.c */
+ unsigned short protocol; /* Packet protocol from driver. */
+ unsigned int truesize; /* Buffer size */
+
+ atomic_t count; /* reference count */
+ struct sk_buff *data_skb; /* Link to the actual data skb */
+ unsigned char *head; /* Head of buffer */
+ unsigned char *data; /* Data head pointer */
+ unsigned char *tail; /* Tail pointer */
+ unsigned char *end; /* End pointer */
+ void (*destructor)(struct sk_buff *); /* Destruct function */
+ __u16 redirport; /* Redirect port */
+
+ /*
+ * Keep this at the end then we wont break stuff.
+ */
+#if defined(CONFIG_SHAPER) || defined(CONFIG_SHAPER_MODULE)
+ __u32 shapelatency; /* Latency on frame */
+ __u32 shapeclock; /* Time it should go out */
+ __u32 shapelen; /* Frame length in clocks */
+ __u32 shapestamp; /* Stamp for shaper */
+ __u16 shapepend; /* Pending */
+#endif
+};
+
+#ifdef CONFIG_SKB_LARGE
+#define SK_WMEM_MAX 65535
+#define SK_RMEM_MAX 65535
+#else
+#define SK_WMEM_MAX 32767
+#define SK_RMEM_MAX 32767
+#endif
+
+#if CONFIG_SKB_CHECK
+#define SK_FREED_SKB 0x0DE2C0DE
+#define SK_GOOD_SKB 0xDEC0DED1
+#define SK_HEAD_SKB 0x12231298
+#endif
+
+#ifdef __KERNEL__
+/*
+ * Handling routines are only of interest to the kernel
+ */
+#include <linux/malloc.h>
+
+#include <asm/system.h>
+
+#if 0
+extern void print_skb(struct sk_buff *);
+#endif
+extern void kfree_skb(struct sk_buff *skb, int rw);
+extern void skb_queue_head_init(struct sk_buff_head *list);
+extern void skb_queue_head(struct sk_buff_head *list,struct sk_buff *buf);
+extern void skb_queue_tail(struct sk_buff_head *list,struct sk_buff *buf);
+extern struct sk_buff * skb_dequeue(struct sk_buff_head *list);
+extern void skb_insert(struct sk_buff *old,struct sk_buff *newsk);
+extern void skb_append(struct sk_buff *old,struct sk_buff *newsk);
+extern void skb_unlink(struct sk_buff *buf);
+extern __u32 skb_queue_len(struct sk_buff_head *list);
+extern struct sk_buff * skb_peek_copy(struct sk_buff_head *list);
+extern struct sk_buff * alloc_skb(unsigned int size, int priority);
+extern struct sk_buff * dev_alloc_skb(unsigned int size);
+extern void kfree_skbmem(struct sk_buff *skb);
+extern struct sk_buff * skb_clone(struct sk_buff *skb, int priority);
+extern struct sk_buff * skb_copy(struct sk_buff *skb, int priority);
+extern void skb_device_lock(struct sk_buff *skb);
+extern void skb_device_unlock(struct sk_buff *skb);
+extern void dev_kfree_skb(struct sk_buff *skb, int mode);
+extern int skb_device_locked(struct sk_buff *skb);
+extern unsigned char * skb_put(struct sk_buff *skb, int len);
+extern unsigned char * skb_push(struct sk_buff *skb, int len);
+extern unsigned char * skb_pull(struct sk_buff *skb, int len);
+extern int skb_headroom(struct sk_buff *skb);
+extern int skb_tailroom(struct sk_buff *skb);
+extern void skb_reserve(struct sk_buff *skb, int len);
+extern void skb_trim(struct sk_buff *skb, int len);
+
+extern __inline__ int skb_queue_empty(struct sk_buff_head *list)
+{
+ return (list->next == (struct sk_buff *) list);
+}
+
+/*
+ * Peek an sk_buff. Unlike most other operations you _MUST_
+ * be careful with this one. A peek leaves the buffer on the
+ * list and someone else may run off with it. For an interrupt
+ * type system cli() peek the buffer copy the data and sti();
+ */
+extern __inline__ struct sk_buff *skb_peek(struct sk_buff_head *list_)
+{
+ struct sk_buff *list = ((struct sk_buff *)list_)->next;
+ if (list == (struct sk_buff *)list_)
+ list = NULL;
+ return list;
+}
+
+/*
+ * Return the length of an sk_buff queue
+ */
+
+extern __inline__ __u32 skb_queue_len(struct sk_buff_head *list_)
+{
+ return(list_->qlen);
+}
+
+#if CONFIG_SKB_CHECK
+extern int skb_check(struct sk_buff *skb,int,int, char *);
+#define IS_SKB(skb) skb_check((skb), 0, __LINE__,__FILE__)
+#define IS_SKB_HEAD(skb) skb_check((skb), 1, __LINE__,__FILE__)
+#else
+#define IS_SKB(skb)
+#define IS_SKB_HEAD(skb)
+
+extern __inline__ void skb_queue_head_init(struct sk_buff_head *list)
+{
+ list->prev = (struct sk_buff *)list;
+ list->next = (struct sk_buff *)list;
+ list->qlen = 0;
+}
+
+/*
+ * Insert an sk_buff at the start of a list.
+ *
+ * The "__skb_xxxx()" functions are the non-atomic ones that
+ * can only be called with interrupts disabled.
+ */
+
+extern __inline__ void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+ struct sk_buff *prev, *next;
+
+ newsk->list = list;
+ list->qlen++;
+ prev = (struct sk_buff *)list;
+ next = prev->next;
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = newsk;
+ prev->next = newsk;
+}
+
+extern __inline__ void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __skb_queue_head(list, newsk);
+ restore_flags(flags);
+}
+
+/*
+ * Insert an sk_buff at the end of a list.
+ */
+
+extern __inline__ void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+ struct sk_buff *prev, *next;
+
+ newsk->list = list;
+ list->qlen++;
+ next = (struct sk_buff *)list;
+ prev = next->prev;
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = newsk;
+ prev->next = newsk;
+}
+
+extern __inline__ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __skb_queue_tail(list, newsk);
+ restore_flags(flags);
+}
+
+/*
+ * Remove an sk_buff from a list.
+ */
+
+extern __inline__ struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
+{
+ struct sk_buff *next, *prev, *result;
+
+ prev = (struct sk_buff *) list;
+ next = prev->next;
+ result = NULL;
+ if (next != prev) {
+ result = next;
+ next = next->next;
+ list->qlen--;
+ next->prev = prev;
+ prev->next = next;
+ result->next = NULL;
+ result->prev = NULL;
+ result->list = NULL;
+ }
+ return result;
+}
+
+extern __inline__ struct sk_buff *skb_dequeue(struct sk_buff_head *list)
+{
+ long flags;
+ struct sk_buff *result;
+
+ save_flags(flags);
+ cli();
+ result = __skb_dequeue(list);
+ restore_flags(flags);
+ return result;
+}
+
+/*
+ * Insert a packet on a list.
+ */
+
+extern __inline__ void __skb_insert(struct sk_buff *newsk,
+ struct sk_buff * prev, struct sk_buff *next,
+ struct sk_buff_head * list)
+{
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = newsk;
+ prev->next = newsk;
+ newsk->list = list;
+ list->qlen++;
+}
+
+/*
+ * Place a packet before a given packet in a list
+ */
+extern __inline__ void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __skb_insert(newsk, old->prev, old, old->list);
+ restore_flags(flags);
+}
+
+/*
+ * Place a packet after a given packet in a list.
+ */
+
+extern __inline__ void skb_append(struct sk_buff *old, struct sk_buff *newsk)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __skb_insert(newsk, old, old->next, old->list);
+ restore_flags(flags);
+}
+
+/*
+ * remove sk_buff from list. _Must_ be called atomically, and with
+ * the list known..
+ */
+extern __inline__ void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
+{
+ struct sk_buff * next, * prev;
+
+ list->qlen--;
+ next = skb->next;
+ prev = skb->prev;
+ skb->next = NULL;
+ skb->prev = NULL;
+ skb->list = NULL;
+ next->prev = prev;
+ prev->next = next;
+}
+
+/*
+ * Remove an sk_buff from its list. Works even without knowing the list it
+ * is sitting on, which can be handy at times. It also means that THE LIST
+ * MUST EXIST when you unlink. Thus a list must have its contents unlinked
+ * _FIRST_.
+ */
+
+extern __inline__ void skb_unlink(struct sk_buff *skb)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ if(skb->list)
+ __skb_unlink(skb, skb->list);
+ restore_flags(flags);
+}
+
+/*
+ * Add data to an sk_buff
+ */
+
+extern __inline__ unsigned char *skb_put(struct sk_buff *skb, int len)
+{
+ unsigned char *tmp=skb->tail;
+ skb->tail+=len;
+ skb->len+=len;
+ if(skb->tail>skb->end)
+ {
+ panic("skput:over: %d", len);
+ }
+ return tmp;
+}
+
+extern __inline__ unsigned char *skb_push(struct sk_buff *skb, int len)
+{
+ skb->data-=len;
+ skb->len+=len;
+ if(skb->data<skb->head)
+ {
+ panic("skpush:under: %d", len);
+ }
+ return skb->data;
+}
+
+extern __inline__ unsigned char * skb_pull(struct sk_buff *skb, int len)
+{
+ if(len > skb->len)
+ return NULL;
+ skb->data+=len;
+ skb->len-=len;
+ return skb->data;
+}
+
+extern __inline__ int skb_headroom(struct sk_buff *skb)
+{
+ return skb->data-skb->head;
+}
+
+extern __inline__ int skb_tailroom(struct sk_buff *skb)
+{
+ return skb->end-skb->tail;
+}
+
+extern __inline__ void skb_reserve(struct sk_buff *skb, int len)
+{
+ skb->data+=len;
+ skb->tail+=len;
+}
+
+extern __inline__ void skb_trim(struct sk_buff *skb, int len)
+{
+ if(skb->len>len)
+ {
+ skb->len=len;
+ skb->tail=skb->data+len;
+ }
+}
+
+#endif
+
+extern struct sk_buff * skb_recv_datagram(struct sock *sk,unsigned flags,int noblock, int *err);
+extern int datagram_select(struct sock *sk, int sel_type, select_table *wait);
+extern void skb_copy_datagram(struct sk_buff *from, int offset, char *to,int size);
+extern void skb_copy_datagram_iovec(struct sk_buff *from, int offset, struct iovec *to,int size);
+extern void skb_free_datagram(struct sock * sk, struct sk_buff *skb);
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_SKBUFF_H */
diff --git a/linux/src/include/linux/smp.h b/linux/src/include/linux/smp.h
new file mode 100644
index 0000000..72984f1
--- /dev/null
+++ b/linux/src/include/linux/smp.h
@@ -0,0 +1,54 @@
+#ifndef __LINUX_SMP_H
+#define __LINUX_SMP_H
+
+/*
+ * Generic SMP support
+ * Alan Cox. <alan@cymru.net>
+ */
+
+#ifdef __SMP__
+#include <asm/smp.h>
+
+extern void smp_message_pass(int target, int msg, unsigned long data, int wait);
+extern void smp_boot_cpus(void); /* Boot processor call to load the other CPU's */
+extern void smp_callin(void); /* Processor call in. Must hold processors until .. */
+extern void smp_commence(void); /* Multiprocessors may now schedule */
+extern int smp_num_cpus;
+extern int smp_threads_ready; /* True once the per process idle is forked */
+#ifdef __SMP_PROF__
+extern volatile unsigned long smp_spins[NR_CPUS]; /* count of interrupt spins */
+extern volatile unsigned long smp_spins_sys_idle[]; /* count of idle spins */
+extern volatile unsigned long smp_spins_syscall[]; /* count of syscall spins */
+extern volatile unsigned long smp_spins_syscall_cur[]; /* count of syscall spins for the current
+ call */
+extern volatile unsigned long smp_idle_count[1+NR_CPUS];/* count idle ticks */
+extern volatile unsigned long smp_idle_map; /* map with idle cpus */
+#else
+extern volatile unsigned long smp_spins;
+#endif
+
+
+extern volatile unsigned long smp_msg_data;
+extern volatile int smp_src_cpu;
+extern volatile int smp_msg_id;
+
+#define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */
+#define MSG_ALL 0x8001
+
+#define MSG_INVALIDATE_TLB 0x0001 /* Remote processor TLB invalidate */
+#define MSG_STOP_CPU 0x0002 /* Sent to shut down slave CPU's when rebooting */
+#define MSG_RESCHEDULE 0x0003 /* Reschedule request from master CPU */
+
+#else
+
+/*
+ * These macros fold the SMP functionality into a single CPU system
+ */
+
+#define smp_num_cpus 1
+#define smp_processor_id() 0
+#define smp_message_pass(t,m,d,w)
+#define smp_threads_ready 1
+#define kernel_lock()
+#endif
+#endif
diff --git a/linux/src/include/linux/socket.h b/linux/src/include/linux/socket.h
new file mode 100644
index 0000000..96c04ec
--- /dev/null
+++ b/linux/src/include/linux/socket.h
@@ -0,0 +1,147 @@
+#ifndef _LINUX_SOCKET_H
+#define _LINUX_SOCKET_H
+
+#include <asm/socket.h> /* arch-dependent defines */
+#include <linux/sockios.h> /* the SIOCxxx I/O controls */
+#include <linux/uio.h> /* iovec support */
+
+struct sockaddr
+{
+ unsigned short sa_family; /* address family, AF_xxx */
+ char sa_data[14]; /* 14 bytes of protocol address */
+};
+
+struct linger {
+ int l_onoff; /* Linger active */
+ int l_linger; /* How long to linger for */
+};
+
+/*
+ * As we do 4.4BSD message passing we use a 4.4BSD message passing
+ * system, not 4.3. Thus msg_accrights(len) are now missing. They
+ * belong in an obscure libc emulation or the bin.
+ */
+
+struct msghdr
+{
+ void * msg_name; /* Socket name */
+ int msg_namelen; /* Length of name */
+ struct iovec * msg_iov; /* Data blocks */
+ int msg_iovlen; /* Number of blocks */
+ void * msg_control; /* Per protocol magic (eg BSD file descriptor passing) */
+ int msg_controllen; /* Length of rights list */
+ int msg_flags; /* 4.4 BSD item we dont use */
+};
+
+/* Control Messages */
+
+#define SCM_RIGHTS 1
+
+/* Socket types. */
+#define SOCK_STREAM 1 /* stream (connection) socket */
+#define SOCK_DGRAM 2 /* datagram (conn.less) socket */
+#define SOCK_RAW 3 /* raw socket */
+#define SOCK_RDM 4 /* reliably-delivered message */
+#define SOCK_SEQPACKET 5 /* sequential packet socket */
+#define SOCK_PACKET 10 /* linux specific way of */
+ /* getting packets at the dev */
+ /* level. For writing rarp and */
+ /* other similar things on the */
+ /* user level. */
+
+/* Supported address families. */
+#define AF_UNSPEC 0
+#define AF_UNIX 1 /* Unix domain sockets */
+#define AF_INET 2 /* Internet IP Protocol */
+#define AF_AX25 3 /* Amateur Radio AX.25 */
+#define AF_IPX 4 /* Novell IPX */
+#define AF_APPLETALK 5 /* Appletalk DDP */
+#define AF_NETROM 6 /* Amateur radio NetROM */
+#define AF_BRIDGE 7 /* Multiprotocol bridge */
+#define AF_AAL5 8 /* Reserved for Werner's ATM */
+#define AF_X25 9 /* Reserved for X.25 project */
+#ifdef LINUX_2_1_X
+#define AF_INET6 10 /* IP version 6 */
+#endif
+#define AF_ROSE 11 /* Amateur Radio X.25 PLP */
+#define AF_MAX 13 /* For now.. */
+#define AF_PACKET 17 /* Forward compat hook */
+
+/* Protocol families, same as address families. */
+#define PF_UNSPEC AF_UNSPEC
+#define PF_UNIX AF_UNIX
+#define PF_INET AF_INET
+#define PF_AX25 AF_AX25
+#define PF_IPX AF_IPX
+#define PF_APPLETALK AF_APPLETALK
+#define PF_NETROM AF_NETROM
+#define PF_BRIDGE AF_BRIDGE
+#define PF_AAL5 AF_AAL5
+#define PF_X25 AF_X25
+#ifdef LINUX_2_1_X
+#define PF_INET6 AF_INET6
+#endif
+#define PF_ROSE AF_ROSE
+#define PF_MAX AF_MAX
+#define PF_PACKET AF_PACKET
+/* Maximum queue length specifiable by listen. */
+#define SOMAXCONN 128
+
+/* Flags we can use with send/ and recv. */
+#define MSG_OOB 1
+#define MSG_PEEK 2
+#define MSG_DONTROUTE 4
+/*#define MSG_CTRUNC 8 - We need to support this for BSD oddments */
+#define MSG_PROXY 16 /* Supply or ask second address. */
+
+/* Setsockoptions(2) level. Thanks to BSD these must match IPPROTO_xxx */
+#define SOL_IP 0
+#define SOL_IPX 256
+#define SOL_AX25 257
+#define SOL_ATALK 258
+#define SOL_NETROM 259
+#define SOL_ROSE 260
+#define SOL_TCP 6
+#define SOL_UDP 17
+
+/* IP options */
+#define IP_TOS 1
+#define IPTOS_LOWDELAY 0x10
+#define IPTOS_THROUGHPUT 0x08
+#define IPTOS_RELIABILITY 0x04
+#define IPTOS_MINCOST 0x02
+#define IP_TTL 2
+#define IP_HDRINCL 3
+#define IP_OPTIONS 4
+
+#define IP_MULTICAST_IF 32
+#define IP_MULTICAST_TTL 33
+#define IP_MULTICAST_LOOP 34
+#define IP_ADD_MEMBERSHIP 35
+#define IP_DROP_MEMBERSHIP 36
+
+/* These need to appear somewhere around here */
+#define IP_DEFAULT_MULTICAST_TTL 1
+#define IP_DEFAULT_MULTICAST_LOOP 1
+#define IP_MAX_MEMBERSHIPS 20
+
+/* IPX options */
+#define IPX_TYPE 1
+
+/* TCP options - this way around because someone left a set in the c library includes */
+#define TCP_NODELAY 1
+#define TCP_MAXSEG 2
+
+/* The various priorities. */
+#define SOPRI_INTERACTIVE 0
+#define SOPRI_NORMAL 1
+#define SOPRI_BACKGROUND 2
+
+#ifdef __KERNEL__
+extern void memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
+extern int verify_iovec(struct msghdr *m, struct iovec *iov, char *address, int mode);
+extern void memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
+extern int move_addr_to_user(void *kaddr, int klen, void *uaddr, int *ulen);
+extern int move_addr_to_kernel(void *uaddr, int ulen, void *kaddr);
+#endif
+#endif /* _LINUX_SOCKET_H */
diff --git a/linux/src/include/linux/sockios.h b/linux/src/include/linux/sockios.h
new file mode 100644
index 0000000..12a8ae4
--- /dev/null
+++ b/linux/src/include/linux/sockios.h
@@ -0,0 +1,98 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions of the socket-level I/O control calls.
+ *
+ * Version: @(#)sockios.h 1.0.2 03/09/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_SOCKIOS_H
+#define _LINUX_SOCKIOS_H
+
+#include <asm/sockios.h>
+
+/* Routing table calls. */
+#define SIOCADDRT 0x890B /* add routing table entry */
+#define SIOCDELRT 0x890C /* delete routing table entry */
+
+/* Socket configuration controls. */
+#define SIOCGIFNAME 0x8910 /* get iface name */
+#define SIOCSIFLINK 0x8911 /* set iface channel */
+#define SIOCGIFCONF 0x8912 /* get iface list */
+#define SIOCGIFFLAGS 0x8913 /* get flags */
+#define SIOCSIFFLAGS 0x8914 /* set flags */
+#define SIOCGIFADDR 0x8915 /* get PA address */
+#define SIOCSIFADDR 0x8916 /* set PA address */
+#define SIOCGIFDSTADDR 0x8917 /* get remote PA address */
+#define SIOCSIFDSTADDR 0x8918 /* set remote PA address */
+#define SIOCGIFBRDADDR 0x8919 /* get broadcast PA address */
+#define SIOCSIFBRDADDR 0x891a /* set broadcast PA address */
+#define SIOCGIFNETMASK 0x891b /* get network PA mask */
+#define SIOCSIFNETMASK 0x891c /* set network PA mask */
+#define SIOCGIFMETRIC 0x891d /* get metric */
+#define SIOCSIFMETRIC 0x891e /* set metric */
+#define SIOCGIFMEM 0x891f /* get memory address (BSD) */
+#define SIOCSIFMEM 0x8920 /* set memory address (BSD) */
+#define SIOCGIFMTU 0x8921 /* get MTU size */
+#define SIOCSIFMTU 0x8922 /* set MTU size */
+#define SIOCSIFHWADDR 0x8924 /* set hardware address (NI) */
+#define SIOCGIFENCAP 0x8925 /* get/set slip encapsulation */
+#define SIOCSIFENCAP 0x8926
+#define SIOCGIFHWADDR 0x8927 /* Get hardware address */
+#define SIOCGIFSLAVE 0x8929 /* Driver slaving support */
+#define SIOCSIFSLAVE 0x8930
+#define SIOCADDMULTI 0x8931 /* Multicast address lists */
+#define SIOCDELMULTI 0x8932
+
+#define SIOCGIFBR 0x8940 /* Bridging support */
+#define SIOCSIFBR 0x8941 /* Set bridging options */
+
+/* ARP cache control calls. */
+#define OLD_SIOCDARP 0x8950 /* old delete ARP table entry */
+#define OLD_SIOCGARP 0x8951 /* old get ARP table entry */
+#define OLD_SIOCSARP 0x8952 /* old set ARP table entry */
+#define SIOCDARP 0x8953 /* delete ARP table entry */
+#define SIOCGARP 0x8954 /* get ARP table entry */
+#define SIOCSARP 0x8955 /* set ARP table entry */
+
+/* RARP cache control calls. */
+#define SIOCDRARP 0x8960 /* delete RARP table entry */
+#define SIOCGRARP 0x8961 /* get RARP table entry */
+#define SIOCSRARP 0x8962 /* set RARP table entry */
+
+/* Driver configuration calls */
+
+#define SIOCGIFMAP 0x8970 /* Get device parameters */
+#define SIOCSIFMAP 0x8971 /* Set device parameters */
+
+/* DLCI configuration calls */
+
+#define SIOCADDDLCI 0x8980 /* Create new DLCI device */
+#define SIOCDELDLCI 0x8981 /* Delete DLCI device */
+
+/* Device private ioctl calls */
+
+/*
+ * These 16 ioctls are available to devices via the do_ioctl() device
+ * vector. Each device should include this file and redefine these names
+ * as their own. Because these are device dependent it is a good idea
+ * _NOT_ to issue them to random objects and hope.
+ */
+
+#define SIOCDEVPRIVATE 0x89F0 /* to 89FF */
+
+/*
+ * These 16 ioctl calls are protocol private
+ */
+
+#define SIOCPROTOPRIVATE 0x89E0 /* to 89EF */
+#endif /* _LINUX_SOCKIOS_H */
diff --git a/linux/src/include/linux/spinlock.h b/linux/src/include/linux/spinlock.h
new file mode 100644
index 0000000..790ac18
--- /dev/null
+++ b/linux/src/include/linux/spinlock.h
@@ -0,0 +1,4 @@
+#ifndef __LINUX_SPINLOCK_H
+#define __LINUX_SPINLOCK_H
+#include <asm/spinlock.h>
+#endif
diff --git a/linux/src/include/linux/stat.h b/linux/src/include/linux/stat.h
new file mode 100644
index 0000000..d86b164
--- /dev/null
+++ b/linux/src/include/linux/stat.h
@@ -0,0 +1,53 @@
+#ifndef _LINUX_STAT_H
+#define _LINUX_STAT_H
+
+#ifdef __KERNEL__
+
+#include <asm/stat.h>
+
+#endif
+
+#define S_IFMT 00170000
+#define S_IFSOCK 0140000
+#define S_IFLNK 0120000
+#define S_IFREG 0100000
+#define S_IFBLK 0060000
+#define S_IFDIR 0040000
+#define S_IFCHR 0020000
+#define S_IFIFO 0010000
+#define S_ISUID 0004000
+#define S_ISGID 0002000
+#define S_ISVTX 0001000
+
+#define S_ISLNK(m) (((m) & S_IFMT) == S_IFLNK)
+#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG)
+#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR)
+#define S_ISCHR(m) (((m) & S_IFMT) == S_IFCHR)
+#define S_ISBLK(m) (((m) & S_IFMT) == S_IFBLK)
+#define S_ISFIFO(m) (((m) & S_IFMT) == S_IFIFO)
+#define S_ISSOCK(m) (((m) & S_IFMT) == S_IFSOCK)
+
+#define S_IRWXU 00700
+#define S_IRUSR 00400
+#define S_IWUSR 00200
+#define S_IXUSR 00100
+
+#define S_IRWXG 00070
+#define S_IRGRP 00040
+#define S_IWGRP 00020
+#define S_IXGRP 00010
+
+#define S_IRWXO 00007
+#define S_IROTH 00004
+#define S_IWOTH 00002
+#define S_IXOTH 00001
+
+#ifdef __KERNEL__
+#define S_IRWXUGO (S_IRWXU|S_IRWXG|S_IRWXO)
+#define S_IALLUGO (S_ISUID|S_ISGID|S_ISVTX|S_IRWXUGO)
+#define S_IRUGO (S_IRUSR|S_IRGRP|S_IROTH)
+#define S_IWUGO (S_IWUSR|S_IWGRP|S_IWOTH)
+#define S_IXUGO (S_IXUSR|S_IXGRP|S_IXOTH)
+#endif
+
+#endif
diff --git a/linux/src/include/linux/stddef.h b/linux/src/include/linux/stddef.h
new file mode 100644
index 0000000..488d49c
--- /dev/null
+++ b/linux/src/include/linux/stddef.h
@@ -0,0 +1,15 @@
+#ifndef _LINUX_STDDEF_H
+#define _LINUX_STDDEF_H
+
+#ifndef _SIZE_T
+#define _SIZE_T
+typedef unsigned long size_t;
+#endif
+
+#undef NULL
+#define NULL ((void *)0)
+
+#undef offsetof
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+
+#endif
diff --git a/linux/src/include/linux/string.h b/linux/src/include/linux/string.h
new file mode 100644
index 0000000..62ff880
--- /dev/null
+++ b/linux/src/include/linux/string.h
@@ -0,0 +1,53 @@
+#ifndef _LINUX_STRING_H_
+#define _LINUX_STRING_H_
+
+#include <linux/types.h> /* for size_t */
+
+#ifndef NULL
+#define NULL ((void *) 0)
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern char * ___strtok;
+#if 0
+extern char * strcpy(char *,const char *);
+extern char * strncpy(char *,const char *, __kernel_size_t);
+extern char * strcat(char *, const char *);
+extern char * strncat(char *, const char *, __kernel_size_t);
+extern char * strchr(const char *,int);
+extern char * strrchr(const char *,int);
+#endif
+extern char * strpbrk(const char *,const char *);
+extern char * strtok(char *,const char *);
+extern char * strstr(const char *,const char *);
+#if 0
+extern __kernel_size_t strlen(const char *);
+extern __kernel_size_t strnlen(const char *,__kernel_size_t);
+#endif
+extern __kernel_size_t strspn(const char *,const char *);
+#if 0
+extern int strcmp(const char *,const char *);
+extern int strncmp(const char *,const char *,__kernel_size_t);
+#endif
+
+extern void * memset(void *,int,__kernel_size_t);
+extern void * memcpy(void *,const void *,__kernel_size_t);
+#if 0
+extern void * memmove(void *,const void *,__kernel_size_t);
+extern void * memscan(void *,int,__kernel_size_t);
+#endif
+extern int memcmp(const void *,const void *,__kernel_size_t);
+
+/*
+ * Include machine specific inline routines
+ */
+#include <asm/string.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _LINUX_STRING_H_ */
diff --git a/linux/src/include/linux/symtab_begin.h b/linux/src/include/linux/symtab_begin.h
new file mode 100644
index 0000000..65a8700
--- /dev/null
+++ b/linux/src/include/linux/symtab_begin.h
@@ -0,0 +1,45 @@
+#include <linux/linkage.h>
+
+#ifdef MODVERSIONS
+# undef _set_ver
+# undef X
+/*
+ * These two macros _will_ get enough arguments from the X* macros
+ * since "sym" expands to "symaddr, symstr" from the #define in *.ver
+ */
+# define _basic_version(symaddr,symstr) symaddr, symstr
+# define _alias_version(really,symaddr,symstr) (void *) & really , symstr
+
+# ifndef __GENKSYMS__
+# ifdef MODULE
+# define _set_ver(sym,ver) \
+ (void *) & sym ## _R ## ver, SYMBOL_NAME_STR(sym) "_R" #ver
+# else /* !MODULE */
+# define _set_ver(sym,ver) \
+ (void *) & sym, SYMBOL_NAME_STR(sym) "_R" #ver
+# endif /* !MODULE */
+# define X(sym) { _basic_version(sym) }
+/*
+ * For _really_ stacked modules:
+ *
+ * Use "Xalias(local_symbol, symbol_from_other_module)"
+ * to make subsequent modules really use "local_symbol"
+ * when they think that they are using "symbol_from_other_module"
+ *
+ * The "aliasing" module can still use "symbol_from_other_module",
+ * but can now replace and/or modify the behaviour of that symbol.
+ */
+# define Xalias(really,sym) { _alias_version(really,sym) }
+# endif /* !__GENKSYMS__ */
+#else /* !MODVERSIONS */
+# define X(sym) { (void *) & sym, SYMBOL_NAME_STR(sym)}
+# define Xalias(really,sym) { (void *) & really, SYMBOL_NAME_STR(sym)}
+#endif /* MODVERSIONS */
+/*
+ * Some symbols always need to be unversioned. This includes
+ * compiler generated calls to functions.
+ */
+#define XNOVERS(sym) { (void *) & sym, SYMBOL_NAME_STR(sym)}
+
+#define EMPTY {0,0}
+ 0, 0, 0, {
diff --git a/linux/src/include/linux/symtab_end.h b/linux/src/include/linux/symtab_end.h
new file mode 100644
index 0000000..91b92e2
--- /dev/null
+++ b/linux/src/include/linux/symtab_end.h
@@ -0,0 +1,15 @@
+#ifdef MODVERSIONS
+#undef _set_ver
+#if defined(MODULE) && !defined(__GENKSYMS__)
+#define _set_ver(sym,vers) sym ## _R ## vers
+#else
+#define _set_ver(a,b) a
+#endif
+#endif /* MODVERSIONS */
+#undef X
+#undef EMPTY
+ /* mark end of table, last entry above ended with a comma! */
+ { (void *)0, (char *)0 }
+ },
+ /* no module refs, insmod will take care of that instead! */
+ { { (struct module *)0, (struct module_ref *)0 } }
diff --git a/linux/src/include/linux/tasks.h b/linux/src/include/linux/tasks.h
new file mode 100644
index 0000000..466560e
--- /dev/null
+++ b/linux/src/include/linux/tasks.h
@@ -0,0 +1,17 @@
+#ifndef _LINUX_TASKS_H
+#define _LINUX_TASKS_H
+
+/*
+ * This is the maximum nr of tasks - change it if you need to
+ */
+
+#define NR_CPUS NCPUS /* Max processors that can be running in SMP */
+
+#define NR_TASKS 512
+
+#define NO_PROC_ID -1
+
+#define MAX_TASKS_PER_USER (NR_TASKS/2)
+#define MIN_TASKS_LEFT_FOR_ROOT 4
+
+#endif
diff --git a/linux/src/include/linux/tcp.h b/linux/src/include/linux/tcp.h
new file mode 100644
index 0000000..ae6a063
--- /dev/null
+++ b/linux/src/include/linux/tcp.h
@@ -0,0 +1,71 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the TCP protocol.
+ *
+ * Version: @(#)tcp.h 1.0.2 04/28/93
+ *
+ * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_TCP_H
+#define _LINUX_TCP_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+struct tcphdr {
+ __u16 source;
+ __u16 dest;
+ __u32 seq;
+ __u32 ack_seq;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u16 res1:4,
+ doff:4,
+ fin:1,
+ syn:1,
+ rst:1,
+ psh:1,
+ ack:1,
+ urg:1,
+ res2:2;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u16 doff:4,
+ res1:4,
+ res2:2,
+ urg:1,
+ ack:1,
+ psh:1,
+ rst:1,
+ syn:1,
+ fin:1;
+#else
+#error "Adjust your <asm/byteorder.h> defines"
+#endif
+ __u16 window;
+ __u16 check;
+ __u16 urg_ptr;
+};
+
+
+enum {
+ TCP_ESTABLISHED = 1,
+ TCP_SYN_SENT,
+ TCP_SYN_RECV,
+ TCP_FIN_WAIT1,
+ TCP_FIN_WAIT2,
+ TCP_TIME_WAIT,
+ TCP_CLOSE,
+ TCP_CLOSE_WAIT,
+ TCP_LAST_ACK,
+ TCP_LISTEN,
+ TCP_CLOSING /* now a valid state */
+};
+
+#endif /* _LINUX_TCP_H */
diff --git a/linux/src/include/linux/termios.h b/linux/src/include/linux/termios.h
new file mode 100644
index 0000000..4786628
--- /dev/null
+++ b/linux/src/include/linux/termios.h
@@ -0,0 +1,7 @@
+#ifndef _LINUX_TERMIOS_H
+#define _LINUX_TERMIOS_H
+
+#include <linux/types.h>
+#include <asm/termios.h>
+
+#endif
diff --git a/linux/src/include/linux/time.h b/linux/src/include/linux/time.h
new file mode 100644
index 0000000..d929c9c
--- /dev/null
+++ b/linux/src/include/linux/time.h
@@ -0,0 +1,53 @@
+#ifndef _LINUX_TIME_H
+#define _LINUX_TIME_H
+
+#ifndef _STRUCT_TIMESPEC
+#define _STRUCT_TIMESPEC
+struct timespec {
+ long tv_sec; /* seconds */
+ long tv_nsec; /* nanoseconds */
+};
+#endif /* _STRUCT_TIMESPEC */
+
+struct timeval {
+ int tv_sec; /* seconds */
+ int tv_usec; /* microseconds */
+};
+
+struct timezone {
+ int tz_minuteswest; /* minutes west of Greenwich */
+ int tz_dsttime; /* type of dst correction */
+};
+
+#define NFDBITS __NFDBITS
+
+#ifdef __KERNEL__
+void do_gettimeofday(struct timeval *tv);
+void do_settimeofday(struct timeval *tv);
+#endif
+
+#define FD_SETSIZE __FD_SETSIZE
+#define FD_SET(fd,fdsetp) __FD_SET(fd,fdsetp)
+#define FD_CLR(fd,fdsetp) __FD_CLR(fd,fdsetp)
+#define FD_ISSET(fd,fdsetp) __FD_ISSET(fd,fdsetp)
+#define FD_ZERO(fdsetp) __FD_ZERO(fdsetp)
+
+/*
+ * Names of the interval timers, and structure
+ * defining a timer setting.
+ */
+#define ITIMER_REAL 0
+#define ITIMER_VIRTUAL 1
+#define ITIMER_PROF 2
+
+struct itimerspec {
+ struct timespec it_interval; /* timer period */
+ struct timespec it_value; /* timer expiration */
+};
+
+struct itimerval {
+ struct timeval it_interval; /* timer interval */
+ struct timeval it_value; /* current value */
+};
+
+#endif
diff --git a/linux/src/include/linux/timer.h b/linux/src/include/linux/timer.h
new file mode 100644
index 0000000..b922d0d
--- /dev/null
+++ b/linux/src/include/linux/timer.h
@@ -0,0 +1,100 @@
+#ifndef _LINUX_TIMER_H
+#define _LINUX_TIMER_H
+
+/*
+ * DON'T CHANGE THESE!! Most of them are hardcoded into some assembly language
+ * as well as being defined here.
+ */
+
+/*
+ * The timers are:
+ *
+ * BLANK_TIMER console screen-saver timer
+ *
+ * BEEP_TIMER console beep timer
+ *
+ * RS_TIMER timer for the RS-232 ports
+ *
+ * SWAP_TIMER timer for the background pageout daemon
+ *
+ * HD_TIMER harddisk timer
+ *
+ * HD_TIMER2 (atdisk2 patches)
+ *
+ * FLOPPY_TIMER floppy disk timer (not used right now)
+ *
+ * SCSI_TIMER scsi.c timeout timer
+ *
+ * NET_TIMER tcp/ip timeout timer
+ *
+ * COPRO_TIMER 387 timeout for buggy hardware..
+ *
+ * QIC02_TAPE_TIMER timer for QIC-02 tape driver (it's not hardcoded)
+ *
+ * MCD_TIMER Mitsumi CD-ROM Timer
+ *
+ * GSCD_TIMER Goldstar CD-ROM Timer
+ *
+ */
+
+#define BLANK_TIMER 0
+#define BEEP_TIMER 1
+#define RS_TIMER 2
+#define SWAP_TIMER 3
+
+#define HD_TIMER 16
+#define FLOPPY_TIMER 17
+#define SCSI_TIMER 18
+#define NET_TIMER 19
+#define SOUND_TIMER 20
+#define COPRO_TIMER 21
+
+#define QIC02_TAPE_TIMER 22 /* hhb */
+#define MCD_TIMER 23
+
+#define HD_TIMER2 24
+#define GSCD_TIMER 25
+
+#define DIGI_TIMER 29
+
+struct timer_struct {
+ unsigned long expires;
+ void (*fn)(void);
+};
+
+extern unsigned long timer_active;
+extern struct timer_struct timer_table[32];
+
+/*
+ * This is completely separate from the above, and is the
+ * "new and improved" way of handling timers more dynamically.
+ * Hopefully efficient and general enough for most things.
+ *
+ * The "hardcoded" timers above are still useful for well-
+ * defined problems, but the timer-list is probably better
+ * when you need multiple outstanding timers or similar.
+ *
+ * The "data" field is in case you want to use the same
+ * timeout function for several timeouts. You can use this
+ * to distinguish between the different invocations.
+ */
+struct timer_list {
+ struct timer_list *next;
+ struct timer_list *prev;
+ unsigned long expires;
+ unsigned long data;
+ void (*function)(unsigned long);
+};
+
+extern void add_timer(struct timer_list * timer);
+extern int del_timer(struct timer_list * timer);
+
+extern void it_real_fn(unsigned long);
+
+extern inline void init_timer(struct timer_list * timer)
+{
+ timer->next = NULL;
+ timer->prev = NULL;
+}
+
+#endif
diff --git a/linux/src/include/linux/tqueue.h b/linux/src/include/linux/tqueue.h
new file mode 100644
index 0000000..d38e1df
--- /dev/null
+++ b/linux/src/include/linux/tqueue.h
@@ -0,0 +1,143 @@
+/*
+ * tqueue.h --- task queue handling for Linux.
+ *
+ * Mostly based on a proposed bottom-half replacement code written by
+ * Kai Petzke, wpp@marie.physik.tu-berlin.de.
+ *
+ * Modified for use in the Linux kernel by Theodore Ts'o,
+ * tytso@mit.edu. Any bugs are my fault, not Kai's.
+ *
+ * The original comment follows below.
+ */
+
+#ifndef _LINUX_TQUEUE_H
+#define _LINUX_TQUEUE_H
+
+#include <asm/bitops.h>
+#include <asm/system.h>
+
+/*
+ * New proposed "bottom half" handlers:
+ * (C) 1994 Kai Petzke, wpp@marie.physik.tu-berlin.de
+ *
+ * Advantages:
+ * - Bottom halfs are implemented as a linked list. You can have as many
+ * of them, as you want.
+ * - No more scanning of a bit field is required upon call of a bottom half.
+ * - Support for chained bottom half lists. The run_task_queue() function can be
+ * used as a bottom half handler. This is for example useful for bottom
+ * halfs, which want to be delayed until the next clock tick.
+ *
+ * Problems:
+ * - The queue_task_irq() inline function is only atomic with respect to itself.
+ * Problems can occur, when queue_task_irq() is called from a normal system
+ * call, and an interrupt comes in. No problems occur, when queue_task_irq()
+ * is called from an interrupt or bottom half, and interrupted, as run_task_queue()
+ * will not be executed/continued before the last interrupt returns. If in
+ * doubt, use queue_task(), not queue_task_irq().
+ * - Bottom halfs are called in the reverse order that they were linked into
+ * the list.
+ */
+
+struct tq_struct {
+ struct tq_struct *next; /* linked list of active bh's */
+ int sync; /* must be initialized to zero */
+ void (*routine)(void *); /* function to call */
+ void *data; /* argument to function */
+};
+
+typedef struct tq_struct * task_queue;
+
+#define DECLARE_TASK_QUEUE(q) task_queue q = NULL
+
+extern task_queue tq_timer, tq_immediate, tq_scheduler, tq_disk;
+
+/*
+ * To implement your own list of active bottom halfs, use the following
+ * two definitions:
+ *
+ * struct tq_struct *my_bh = NULL;
+ * struct tq_struct run_my_bh = {
+ * 0, 0, (void *)(void *) run_task_queue, &my_bh
+ * };
+ *
+ * To activate a bottom half on your list, use:
+ *
+ * queue_task(tq_pointer, &my_bh);
+ *
+ * To run the bottom halfs on your list put them on the immediate list by:
+ *
+ * queue_task(&run_my_bh, &tq_immediate);
+ *
+ * This allows you to do deferred procession. For example, you could
+ * have a bottom half list tq_timer, which is marked active by the timer
+ * interrupt.
+ */
+
+/*
+ * queue_task_irq: put the bottom half handler "bh_pointer" on the list
+ * "bh_list". You may call this function only from an interrupt
+ * handler or a bottom half handler.
+ */
+static __inline__ void queue_task_irq(struct tq_struct *bh_pointer,
+ task_queue *bh_list)
+{
+ if (!set_bit(0,&bh_pointer->sync)) {
+ bh_pointer->next = *bh_list;
+ *bh_list = bh_pointer;
+ }
+}
+
+/*
+ * queue_task_irq_off: put the bottom half handler "bh_pointer" on the list
+ * "bh_list". You may call this function only when interrupts are off.
+ */
+static __inline__ void queue_task_irq_off(struct tq_struct *bh_pointer,
+ task_queue *bh_list)
+{
+ if (!(bh_pointer->sync & 1)) {
+ bh_pointer->sync = 1;
+ bh_pointer->next = *bh_list;
+ *bh_list = bh_pointer;
+ }
+}
+
+
+/*
+ * queue_task: as queue_task_irq, but can be called from anywhere.
+ */
+static __inline__ void queue_task(struct tq_struct *bh_pointer,
+ task_queue *bh_list)
+{
+ if (!set_bit(0,&bh_pointer->sync)) {
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ bh_pointer->next = *bh_list;
+ *bh_list = bh_pointer;
+ restore_flags(flags);
+ }
+}
+
+/*
+ * Call all "bottom halfs" on a given list.
+ */
+static __inline__ void run_task_queue(task_queue *list)
+{
+ struct tq_struct *p;
+
+ p = xchg(list,NULL);
+ while (p) {
+ void *arg;
+ void (*f) (void *);
+ struct tq_struct *save_p;
+ arg = p -> data;
+ f = p -> routine;
+ save_p = p;
+ p = p -> next;
+ save_p -> sync = 0;
+ (*f)(arg);
+ }
+}
+
+#endif /* _LINUX_TQUEUE_H */
diff --git a/linux/src/include/linux/trdevice.h b/linux/src/include/linux/trdevice.h
new file mode 100644
index 0000000..9680176
--- /dev/null
+++ b/linux/src/include/linux/trdevice.h
@@ -0,0 +1,40 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. NET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the Ethernet handlers.
+ *
+ * Version: @(#)eth.h 1.0.4 05/13/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * Relocated to include/linux where it belongs by Alan Cox
+ * <gw4pts@gw4pts.ampr.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * WARNING: This move may well be temporary. This file will get merged with others RSN.
+ *
+ */
+#ifndef _LINUX_TRDEVICE_H
+#define _LINUX_TRDEVICE_H
+
+
+#include <linux/if_tr.h>
+
+#ifdef __KERNEL__
+extern int tr_header(struct sk_buff *skb, struct device *dev,
+ unsigned short type, void *daddr,
+ void *saddr, unsigned len);
+extern int tr_rebuild_header(void *buff, struct device *dev,
+ unsigned long raddr, struct sk_buff *skb);
+extern unsigned short tr_type_trans(struct sk_buff *skb, struct device *dev);
+
+#endif
+
+#endif /* _LINUX_TRDEVICE_H */
diff --git a/linux/src/include/linux/tty.h b/linux/src/include/linux/tty.h
new file mode 100644
index 0000000..6fe00af
--- /dev/null
+++ b/linux/src/include/linux/tty.h
@@ -0,0 +1,351 @@
+#ifndef _LINUX_TTY_H
+#define _LINUX_TTY_H
+
+/*
+ * 'tty.h' defines some structures used by tty_io.c and some defines.
+ */
+
+/*
+ * These constants are also useful for user-level apps (e.g., VC
+ * resizing).
+ */
+#define MIN_NR_CONSOLES 1 /* must be at least 1 */
+#define MAX_NR_CONSOLES 63 /* serial lines start at 64 */
+#define MAX_NR_USER_CONSOLES 63 /* must be root to allocate above this */
+ /* Note: the ioctl VT_GETSTATE does not work for
+ consoles 16 and higher (since it returns a short) */
+
+#ifdef __KERNEL__
+#include <linux/fs.h>
+#include <linux/termios.h>
+#include <linux/tqueue.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_ldisc.h>
+
+#include <asm/system.h>
+
+
+/*
+ * Note: don't mess with NR_PTYS until you understand the tty minor
+ * number allocation game...
+ * (Note: the *_driver.minor_start values 1, 64, 128, 192 are
+ * hardcoded at present.)
+ */
+#define NR_PTYS 256
+#define NR_LDISCS 16
+
+/*
+ * These are set up by the setup-routine at boot-time:
+ */
+
+struct screen_info {
+ unsigned char orig_x;
+ unsigned char orig_y;
+ unsigned char unused1[2];
+ unsigned short orig_video_page;
+ unsigned char orig_video_mode;
+ unsigned char orig_video_cols;
+ unsigned short unused2;
+ unsigned short orig_video_ega_bx;
+ unsigned short unused3;
+ unsigned char orig_video_lines;
+ unsigned char orig_video_isVGA;
+ unsigned short orig_video_points;
+};
+
+extern struct screen_info screen_info;
+
+#define ORIG_X (screen_info.orig_x)
+#define ORIG_Y (screen_info.orig_y)
+#define ORIG_VIDEO_MODE (screen_info.orig_video_mode)
+#define ORIG_VIDEO_COLS (screen_info.orig_video_cols)
+#define ORIG_VIDEO_EGA_BX (screen_info.orig_video_ega_bx)
+#define ORIG_VIDEO_LINES (screen_info.orig_video_lines)
+#define ORIG_VIDEO_ISVGA (screen_info.orig_video_isVGA)
+#define ORIG_VIDEO_POINTS (screen_info.orig_video_points)
+
+#define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
+#define VIDEO_TYPE_CGA 0x11 /* CGA Display */
+#define VIDEO_TYPE_EGAM 0x20 /* EGA/VGA in Monochrome Mode */
+#define VIDEO_TYPE_EGAC 0x21 /* EGA in Color Mode */
+#define VIDEO_TYPE_VGAC 0x22 /* VGA+ in Color Mode */
+
+#define VIDEO_TYPE_TGAC 0x40 /* DEC TGA */
+
+#define VIDEO_TYPE_SUN 0x50 /* Sun frame buffer. */
+
+/*
+ * This character is the same as _POSIX_VDISABLE: it cannot be used as
+ * a c_cc[] character, but indicates that a particular special character
+ * isn't in use (eg VINTR has no character etc)
+ */
+#define __DISABLED_CHAR '\0'
+
+/*
+ * This is the flip buffer used for the tty driver. The buffer is
+ * located in the tty structure, and is used as a high speed interface
+ * between the tty driver and the tty line discipline.
+ */
+#define TTY_FLIPBUF_SIZE 512
+
+struct tty_flip_buffer {
+ struct tq_struct tqueue;
+ unsigned char char_buf[2*TTY_FLIPBUF_SIZE];
+ char flag_buf[2*TTY_FLIPBUF_SIZE];
+ char *char_buf_ptr;
+ unsigned char *flag_buf_ptr;
+ int count;
+ int buf_num;
+};
+
+/*
+ * When a break, frame error, or parity error happens, these codes are
+ * stuffed into the flags buffer.
+ */
+#define TTY_NORMAL 0
+#define TTY_BREAK 1
+#define TTY_FRAME 2
+#define TTY_PARITY 3
+#define TTY_OVERRUN 4
+
+#define INTR_CHAR(tty) ((tty)->termios->c_cc[VINTR])
+#define QUIT_CHAR(tty) ((tty)->termios->c_cc[VQUIT])
+#define ERASE_CHAR(tty) ((tty)->termios->c_cc[VERASE])
+#define KILL_CHAR(tty) ((tty)->termios->c_cc[VKILL])
+#define EOF_CHAR(tty) ((tty)->termios->c_cc[VEOF])
+#define TIME_CHAR(tty) ((tty)->termios->c_cc[VTIME])
+#define MIN_CHAR(tty) ((tty)->termios->c_cc[VMIN])
+#define SWTC_CHAR(tty) ((tty)->termios->c_cc[VSWTC])
+#define START_CHAR(tty) ((tty)->termios->c_cc[VSTART])
+#define STOP_CHAR(tty) ((tty)->termios->c_cc[VSTOP])
+#define SUSP_CHAR(tty) ((tty)->termios->c_cc[VSUSP])
+#define EOL_CHAR(tty) ((tty)->termios->c_cc[VEOL])
+#define REPRINT_CHAR(tty) ((tty)->termios->c_cc[VREPRINT])
+#define DISCARD_CHAR(tty) ((tty)->termios->c_cc[VDISCARD])
+#define WERASE_CHAR(tty) ((tty)->termios->c_cc[VWERASE])
+#define LNEXT_CHAR(tty) ((tty)->termios->c_cc[VLNEXT])
+#define EOL2_CHAR(tty) ((tty)->termios->c_cc[VEOL2])
+
+#define _I_FLAG(tty,f) ((tty)->termios->c_iflag & (f))
+#define _O_FLAG(tty,f) ((tty)->termios->c_oflag & (f))
+#define _C_FLAG(tty,f) ((tty)->termios->c_cflag & (f))
+#define _L_FLAG(tty,f) ((tty)->termios->c_lflag & (f))
+
+#define I_IGNBRK(tty) _I_FLAG((tty),IGNBRK)
+#define I_BRKINT(tty) _I_FLAG((tty),BRKINT)
+#define I_IGNPAR(tty) _I_FLAG((tty),IGNPAR)
+#define I_PARMRK(tty) _I_FLAG((tty),PARMRK)
+#define I_INPCK(tty) _I_FLAG((tty),INPCK)
+#define I_ISTRIP(tty) _I_FLAG((tty),ISTRIP)
+#define I_INLCR(tty) _I_FLAG((tty),INLCR)
+#define I_IGNCR(tty) _I_FLAG((tty),IGNCR)
+#define I_ICRNL(tty) _I_FLAG((tty),ICRNL)
+#define I_IUCLC(tty) _I_FLAG((tty),IUCLC)
+#define I_IXON(tty) _I_FLAG((tty),IXON)
+#define I_IXANY(tty) _I_FLAG((tty),IXANY)
+#define I_IXOFF(tty) _I_FLAG((tty),IXOFF)
+#define I_IMAXBEL(tty) _I_FLAG((tty),IMAXBEL)
+
+#define O_OPOST(tty) _O_FLAG((tty),OPOST)
+#define O_OLCUC(tty) _O_FLAG((tty),OLCUC)
+#define O_ONLCR(tty) _O_FLAG((tty),ONLCR)
+#define O_OCRNL(tty) _O_FLAG((tty),OCRNL)
+#define O_ONOCR(tty) _O_FLAG((tty),ONOCR)
+#define O_ONLRET(tty) _O_FLAG((tty),ONLRET)
+#define O_OFILL(tty) _O_FLAG((tty),OFILL)
+#define O_OFDEL(tty) _O_FLAG((tty),OFDEL)
+#define O_NLDLY(tty) _O_FLAG((tty),NLDLY)
+#define O_CRDLY(tty) _O_FLAG((tty),CRDLY)
+#define O_TABDLY(tty) _O_FLAG((tty),TABDLY)
+#define O_BSDLY(tty) _O_FLAG((tty),BSDLY)
+#define O_VTDLY(tty) _O_FLAG((tty),VTDLY)
+#define O_FFDLY(tty) _O_FLAG((tty),FFDLY)
+
+#define C_BAUD(tty) _C_FLAG((tty),CBAUD)
+#define C_CSIZE(tty) _C_FLAG((tty),CSIZE)
+#define C_CSTOPB(tty) _C_FLAG((tty),CSTOPB)
+#define C_CREAD(tty) _C_FLAG((tty),CREAD)
+#define C_PARENB(tty) _C_FLAG((tty),PARENB)
+#define C_PARODD(tty) _C_FLAG((tty),PARODD)
+#define C_HUPCL(tty) _C_FLAG((tty),HUPCL)
+#define C_CLOCAL(tty) _C_FLAG((tty),CLOCAL)
+#define C_CIBAUD(tty) _C_FLAG((tty),CIBAUD)
+#define C_CRTSCTS(tty) _C_FLAG((tty),CRTSCTS)
+
+#define L_ISIG(tty) _L_FLAG((tty),ISIG)
+#define L_ICANON(tty) _L_FLAG((tty),ICANON)
+#define L_XCASE(tty) _L_FLAG((tty),XCASE)
+#define L_ECHO(tty) _L_FLAG((tty),ECHO)
+#define L_ECHOE(tty) _L_FLAG((tty),ECHOE)
+#define L_ECHOK(tty) _L_FLAG((tty),ECHOK)
+#define L_ECHONL(tty) _L_FLAG((tty),ECHONL)
+#define L_NOFLSH(tty) _L_FLAG((tty),NOFLSH)
+#define L_TOSTOP(tty) _L_FLAG((tty),TOSTOP)
+#define L_ECHOCTL(tty) _L_FLAG((tty),ECHOCTL)
+#define L_ECHOPRT(tty) _L_FLAG((tty),ECHOPRT)
+#define L_ECHOKE(tty) _L_FLAG((tty),ECHOKE)
+#define L_FLUSHO(tty) _L_FLAG((tty),FLUSHO)
+#define L_PENDIN(tty) _L_FLAG((tty),PENDIN)
+#define L_IEXTEN(tty) _L_FLAG((tty),IEXTEN)
+
+/*
+ * Where all of the state associated with a tty is kept while the tty
+ * is open. Since the termios state should be kept even if the tty
+ * has been closed --- for things like the baud rate, etc --- it is
+ * not stored here, but rather a pointer to the real state is stored
+ * here. Possible the winsize structure should have the same
+ * treatment, but (1) the default 80x24 is usually right and (2) it's
+ * most often used by a windowing system, which will set the correct
+ * size each time the window is created or resized anyway.
+ * IMPORTANT: since this structure is dynamically allocated, it must
+ * be no larger than 4096 bytes. Changing TTY_BUF_SIZE will change
+ * the size of this structure, and it needs to be done with care.
+ * - TYT, 9/14/92
+ */
+struct tty_struct {
+ int magic;
+ struct tty_driver driver;
+ struct tty_ldisc ldisc;
+ struct termios *termios, *termios_locked;
+ int pgrp;
+ int session;
+ kdev_t device;
+ unsigned long flags;
+ int count;
+ struct winsize winsize;
+ unsigned char stopped:1, hw_stopped:1, packet:1;
+ unsigned char ctrl_status;
+
+ struct tty_struct *link;
+ struct fasync_struct *fasync;
+ struct tty_flip_buffer flip;
+ int max_flip_cnt;
+ struct wait_queue *write_wait;
+ struct wait_queue *read_wait;
+ void *disc_data;
+ void *driver_data;
+
+#define N_TTY_BUF_SIZE 4096
+
+ /*
+ * The following is data for the N_TTY line discipline. For
+ * historical reasons, this is included in the tty structure.
+ */
+ unsigned int column;
+ unsigned char lnext:1, erasing:1, raw:1, real_raw:1, icanon:1;
+ unsigned char closing:1;
+ unsigned short minimum_to_wake;
+ unsigned overrun_time;
+ int num_overrun;
+ unsigned long process_char_map[256/(8*sizeof(unsigned long))];
+ char *read_buf;
+ int read_head;
+ int read_tail;
+ int read_cnt;
+ unsigned long read_flags[N_TTY_BUF_SIZE/(8*sizeof(unsigned long))];
+ int canon_data;
+ unsigned long canon_head;
+ unsigned int canon_column;
+};
+
+/* tty magic number */
+#define TTY_MAGIC 0x5401
+
+/*
+ * These bits are used in the flags field of the tty structure.
+ *
+ * So that interrupts won't be able to mess up the queues,
+ * copy_to_cooked must be atomic with respect to itself, as must
+ * tty->write. Thus, you must use the inline functions set_bit() and
+ * clear_bit() to make things atomic.
+ */
+#define TTY_THROTTLED 0
+#define TTY_IO_ERROR 1
+#define TTY_OTHER_CLOSED 2
+#define TTY_EXCLUSIVE 3
+#define TTY_DEBUG 4
+#define TTY_DO_WRITE_WAKEUP 5
+#define TTY_PUSH 6
+#define TTY_CLOSING 7
+
+#define TTY_WRITE_FLUSH(tty) tty_write_flush((tty))
+
+extern void tty_write_flush(struct tty_struct *);
+
+extern struct termios tty_std_termios;
+extern struct tty_struct * redirect;
+extern struct tty_ldisc ldiscs[];
+extern int fg_console, last_console, want_console;
+
+extern int kmsg_redirect;
+extern struct wait_queue * keypress_wait;
+
+extern unsigned long con_init(unsigned long);
+
+extern int rs_init(void);
+extern int lp_init(void);
+extern int pty_init(void);
+extern int tty_init(void);
+extern int pcxe_init(void);
+extern int vcs_init(void);
+extern int cy_init(void);
+extern int stl_init(void);
+extern int stli_init(void);
+extern int riscom8_init(void);
+extern int specialix_init(void);
+extern int baycom_init(void);
+
+extern int tty_paranoia_check(struct tty_struct *tty, kdev_t device,
+ const char *routine);
+extern char *_tty_name(struct tty_struct *tty, char *buf);
+extern char *tty_name(struct tty_struct *tty);
+extern void tty_wait_until_sent(struct tty_struct * tty, int timeout);
+extern int tty_check_change(struct tty_struct * tty);
+extern void stop_tty(struct tty_struct * tty);
+extern void start_tty(struct tty_struct * tty);
+extern int tty_register_ldisc(int disc, struct tty_ldisc *new_ldisc);
+extern int tty_register_driver(struct tty_driver *driver);
+extern int tty_unregister_driver(struct tty_driver *driver);
+extern int tty_read_raw_data(struct tty_struct *tty, unsigned char *bufp,
+ int buflen);
+extern void tty_write_message(struct tty_struct *tty, char *msg);
+
+extern int is_orphaned_pgrp(int pgrp);
+extern int is_ignored(int sig);
+extern int tty_signal(int sig, struct tty_struct *tty);
+extern void tty_hangup(struct tty_struct * tty);
+extern void tty_vhangup(struct tty_struct * tty);
+extern void tty_unhangup(struct file *filp);
+extern int tty_hung_up_p(struct file * filp);
+extern void do_SAK(struct tty_struct *tty);
+extern void disassociate_ctty(int priv);
+
+/* n_tty.c */
+extern struct tty_ldisc tty_ldisc_N_TTY;
+
+/* tty_ioctl.c */
+extern int n_tty_ioctl(struct tty_struct * tty, struct file * file,
+ unsigned int cmd, unsigned long arg);
+
+/* serial.c */
+
+extern int rs_open(struct tty_struct * tty, struct file * filp);
+
+/* pty.c */
+
+extern int pty_open(struct tty_struct * tty, struct file * filp);
+extern int pcxe_open(struct tty_struct *tty, struct file *filp);
+
+/* console.c */
+
+extern int con_open(struct tty_struct * tty, struct file * filp);
+extern void update_screen(int new_console);
+extern void console_print(const char *);
+
+/* vt.c */
+
+extern int vt_ioctl(struct tty_struct *tty, struct file * file,
+ unsigned int cmd, unsigned long arg);
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/linux/src/include/linux/tty_driver.h b/linux/src/include/linux/tty_driver.h
new file mode 100644
index 0000000..3468fa2
--- /dev/null
+++ b/linux/src/include/linux/tty_driver.h
@@ -0,0 +1,189 @@
+#ifndef _LINUX_TTY_DRIVER_H
+#define _LINUX_TTY_DRIVER_H
+
+/*
+ * This structure defines the interface between the low-level tty
+ * driver and the tty routines. The following routines can be
+ * defined; unless noted otherwise, they are optional, and can be
+ * filled in with a null pointer.
+ *
+ * int (*open)(struct tty_struct * tty, struct file * filp);
+ *
+ * This routine is called when a particular tty device is opened.
+ * This routine is mandatory; if this routine is not filled in,
+ * the attempted open will fail with ENODEV.
+ *
+ * void (*close)(struct tty_struct * tty, struct file * filp);
+ *
+ * This routine is called when a particular tty device is closed.
+ *
+ * int (*write)(struct tty_struct * tty, int from_user,
+ * const unsigned char *buf, int count);
+ *
+ * This routine is called by the kernel to write a series of
+ * characters to the tty device. The characters may come from
+ * user space or kernel space. This routine will return the
+ * number of characters actually accepted for writing. This
+ * routine is mandatory.
+ *
+ * void (*put_char)(struct tty_struct *tty, unsigned char ch);
+ *
+ * This routine is called by the kernel to write a single
+ * character to the tty device. If the kernel uses this routine,
+ * it must call the flush_chars() routine (if defined) when it is
+ * done stuffing characters into the driver. If there is no room
+ * in the queue, the character is ignored.
+ *
+ * void (*flush_chars)(struct tty_struct *tty);
+ *
+ * This routine is called by the kernel after it has written a
+ * series of characters to the tty device using put_char().
+ *
+ * int (*write_room)(struct tty_struct *tty);
+ *
+ * This routine returns the numbers of characters the tty driver
+ * will accept for queuing to be written. This number is subject
+ * to change as output buffers get emptied, or if the output flow
+ * control is acted.
+ *
+ * int (*ioctl)(struct tty_struct *tty, struct file * file,
+ * unsigned int cmd, unsigned long arg);
+ *
+ * This routine allows the tty driver to implement
+ * device-specific ioctl's. If the ioctl number passed in cmd
+ * is not recognized by the driver, it should return ENOIOCTLCMD.
+ *
+ * void (*set_termios)(struct tty_struct *tty, struct termios * old);
+ *
+ * This routine allows the tty driver to be notified when
+ * device's termios settings have changed. Note that a
+ * well-designed tty driver should be prepared to accept the case
+ * where old == NULL, and try to do something rational.
+ *
+ * void (*set_ldisc)(struct tty_struct *tty);
+ *
+ * This routine allows the tty driver to be notified when the
+ * device's termios settings have changed.
+ *
+ * void (*throttle)(struct tty_struct * tty);
+ *
+ * This routine notifies the tty driver that input buffers for
+ * the line discipline are close to full, and it should somehow
+ * signal that no more characters should be sent to the tty.
+ *
+ * void (*unthrottle)(struct tty_struct * tty);
+ *
+ * This routine notifies the tty drivers that it should signals
+ * that characters can now be sent to the tty without fear of
+ * overrunning the input buffers of the line disciplines.
+ *
+ * void (*stop)(struct tty_struct *tty);
+ *
+ * This routine notifies the tty driver that it should stop
+ * outputting characters to the tty device.
+ *
+ * void (*start)(struct tty_struct *tty);
+ *
+ * This routine notifies the tty driver that it resume sending
+ * characters to the tty device.
+ *
+ * void (*hangup)(struct tty_struct *tty);
+ *
+ * This routine notifies the tty driver that it should hangup the
+ * tty device.
+ *
+ */
+
+#include <linux/fs.h>
+
+struct tty_driver {
+ int magic; /* magic number for this structure */
+ const char *name;
+ int name_base; /* offset of printed name */
+ short major; /* major device number */
+ short minor_start; /* start of minor device number*/
+ short num; /* number of devices */
+ short type; /* type of tty driver */
+ short subtype; /* subtype of tty driver */
+ struct termios init_termios; /* Initial termios */
+ int flags; /* tty driver flags */
+ int *refcount; /* for loadable tty drivers */
+ struct tty_driver *other; /* only used for the PTY driver */
+
+ /*
+ * Pointer to the tty data structures
+ */
+ struct tty_struct **table;
+ struct termios **termios;
+ struct termios **termios_locked;
+
+ /*
+ * Interface routines from the upper tty layer to the tty
+ * driver.
+ */
+ int (*open)(struct tty_struct * tty, struct file * filp);
+ void (*close)(struct tty_struct * tty, struct file * filp);
+ int (*write)(struct tty_struct * tty, int from_user,
+ const unsigned char *buf, int count);
+ void (*put_char)(struct tty_struct *tty, unsigned char ch);
+ void (*flush_chars)(struct tty_struct *tty);
+ int (*write_room)(struct tty_struct *tty);
+ int (*chars_in_buffer)(struct tty_struct *tty);
+ int (*ioctl)(struct tty_struct *tty, struct file * file,
+ unsigned int cmd, unsigned long arg);
+ void (*set_termios)(struct tty_struct *tty, struct termios * old);
+ void (*throttle)(struct tty_struct * tty);
+ void (*unthrottle)(struct tty_struct * tty);
+ void (*stop)(struct tty_struct *tty);
+ void (*start)(struct tty_struct *tty);
+ void (*hangup)(struct tty_struct *tty);
+ void (*flush_buffer)(struct tty_struct *tty);
+ void (*set_ldisc)(struct tty_struct *tty);
+
+ /*
+ * linked list pointers
+ */
+ struct tty_driver *next;
+ struct tty_driver *prev;
+};
+
+/* tty driver magic number */
+#define TTY_DRIVER_MAGIC 0x5402
+
+/*
+ * tty driver flags
+ *
+ * TTY_DRIVER_RESET_TERMIOS --- requests the tty layer to reset the
+ * termios setting when the last process has closed the device.
+ * Used for PTY's, in particular.
+ *
+ * TTY_DRIVER_REAL_RAW --- if set, indicates that the driver will
+ * guarantee never not to set any special character handling
+ * flags if ((IGNBRK || (!BRKINT && !PARMRK)) && (IGNPAR ||
+ * !INPCK)). That is, if there is no reason for the driver to
+ * send notifications of parity and break characters up to the
+ * line driver, it won't do so. This allows the line driver to
+ * optimize for this case if this flag is set. (Note that there
+ * is also a promise, if the above case is true, not to signal
+ * overruns, either.)
+ */
+#define TTY_DRIVER_INSTALLED 0x0001
+#define TTY_DRIVER_RESET_TERMIOS 0x0002
+#define TTY_DRIVER_REAL_RAW 0x0004
+
+/* tty driver types */
+#define TTY_DRIVER_TYPE_SYSTEM 0x0001
+#define TTY_DRIVER_TYPE_CONSOLE 0x0002
+#define TTY_DRIVER_TYPE_SERIAL 0x0003
+#define TTY_DRIVER_TYPE_PTY 0x0004
+#define TTY_DRIVER_TYPE_SCC 0x0005 /* scc driver */
+
+/* system subtypes (magic, used by tty_io.c) */
+#define SYSTEM_TYPE_TTY 0x0001
+#define SYSTEM_TYPE_CONSOLE 0x0002
+
+/* pty subtypes (magic, used by tty_io.c) */
+#define PTY_TYPE_MASTER 0x0001
+#define PTY_TYPE_SLAVE 0x0002
+
+#endif /* #ifdef _LINUX_TTY_DRIVER_H */
diff --git a/linux/src/include/linux/tty_ldisc.h b/linux/src/include/linux/tty_ldisc.h
new file mode 100644
index 0000000..87b54ca
--- /dev/null
+++ b/linux/src/include/linux/tty_ldisc.h
@@ -0,0 +1,46 @@
+#ifndef _LINUX_TTY_LDISC_H
+#define _LINUX_TTY_LDISC_H
+
+/*
+ * Definitions for the tty line discipline
+ */
+
+#include <linux/fs.h>
+#include <linux/wait.h>
+
+struct tty_ldisc {
+ int magic;
+ int num;
+ int flags;
+ /*
+ * The following routines are called from above.
+ */
+ int (*open)(struct tty_struct *);
+ void (*close)(struct tty_struct *);
+ void (*flush_buffer)(struct tty_struct *tty);
+ int (*chars_in_buffer)(struct tty_struct *tty);
+ int (*read)(struct tty_struct * tty, struct file * file,
+ unsigned char * buf, unsigned int nr);
+ int (*write)(struct tty_struct * tty, struct file * file,
+ const unsigned char * buf, unsigned int nr);
+ int (*ioctl)(struct tty_struct * tty, struct file * file,
+ unsigned int cmd, unsigned long arg);
+ void (*set_termios)(struct tty_struct *tty, struct termios * old);
+ int (*select)(struct tty_struct * tty, struct inode * inode,
+ struct file * file, int sel_type,
+ struct select_table_struct *wait);
+
+ /*
+ * The following routines are called from below.
+ */
+ void (*receive_buf)(struct tty_struct *, const unsigned char *cp,
+ char *fp, int count);
+ int (*receive_room)(struct tty_struct *);
+ void (*write_wakeup)(struct tty_struct *);
+};
+
+#define TTY_LDISC_MAGIC 0x5403
+
+#define LDISC_FLAG_DEFINED 0x00000001
+
+#endif /* _LINUX_TTY_LDISC_H */
diff --git a/linux/src/include/linux/types.h b/linux/src/include/linux/types.h
new file mode 100644
index 0000000..0b9999f
--- /dev/null
+++ b/linux/src/include/linux/types.h
@@ -0,0 +1,96 @@
+#ifndef _LINUX_TYPES_H
+#define _LINUX_TYPES_H
+
+#ifdef __i386__
+#if defined(__KERNEL__) && !defined(STDC_HEADERS)
+#if ((__GNUC_MINOR__ >= 8) || (__GNUC_MAJOR >=3))
+#warning "This code is tested with gcc 2.7.2.x only. Using egcs/gcc 2.8.x needs"
+#warning "additional patches that have not been sufficiently tested to include by"
+#warning "default."
+#warning "See http://www.suse.de/~florian/kernel+egcs.html for more information"
+#error "Remove this if you have applied the gcc 2.8/egcs patches and wish to use them"
+#endif
+#endif
+#endif
+
+#include <linux/posix_types.h>
+#include <asm/types.h>
+
+#ifndef __KERNEL_STRICT_NAMES
+
+typedef __kernel_fd_set fd_set;
+typedef __kernel_dev_t dev_t;
+typedef __kernel_ino_t ino_t;
+typedef __kernel_mode_t mode_t;
+typedef __kernel_nlink_t nlink_t;
+typedef __kernel_off_t off_t;
+typedef __kernel_pid_t pid_t;
+typedef __kernel_uid_t uid_t;
+typedef __kernel_gid_t gid_t;
+typedef __kernel_daddr_t daddr_t;
+
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+typedef __kernel_loff_t loff_t;
+#endif
+
+/*
+ * The following typedefs are also protected by individual ifdefs for
+ * historical reasons:
+ */
+#ifndef _SIZE_T
+#define _SIZE_T
+typedef __kernel_size_t size_t;
+#endif
+
+#ifndef _SSIZE_T
+#define _SSIZE_T
+typedef __kernel_ssize_t ssize_t;
+#endif
+
+#ifndef _PTRDIFF_T
+#define _PTRDIFF_T
+typedef __kernel_ptrdiff_t ptrdiff_t;
+#endif
+
+#ifndef _TIME_T
+#define _TIME_T
+typedef __kernel_time_t time_t;
+#endif
+
+#ifndef _CLOCK_T
+#define _CLOCK_T
+typedef __kernel_clock_t clock_t;
+#endif
+
+#ifndef _CADDR_T
+#define _CADDR_T
+typedef __kernel_caddr_t caddr_t;
+#endif
+
+/* bsd */
+typedef unsigned char u_char;
+typedef unsigned short u_short;
+typedef unsigned int u_int;
+typedef unsigned long u_long;
+
+/* sysv */
+typedef unsigned char unchar;
+typedef unsigned short ushort;
+typedef unsigned int uint;
+typedef unsigned long ulong;
+
+#endif /* __KERNEL_STRICT_NAMES */
+
+/*
+ * Below are truly Linux-specific types that should never collide with
+ * any application/library that wants linux/types.h.
+ */
+
+struct ustat {
+ __kernel_daddr_t f_tfree;
+ __kernel_ino_t f_tinode;
+ char f_fname[6];
+ char f_fpack[6];
+};
+
+#endif /* _LINUX_TYPES_H */
diff --git a/linux/src/include/linux/ucdrom.h b/linux/src/include/linux/ucdrom.h
new file mode 100644
index 0000000..3d8b358
--- /dev/null
+++ b/linux/src/include/linux/ucdrom.h
@@ -0,0 +1,96 @@
+/* ucdrom.h. Uniform cdrom data structures for cdrom.c. -*- linux-c -*-
+ Copyright (c) 1996 David van Leeuwen.
+ */
+
+#ifndef LINUX_UCDROM_H
+#define LINUX_UCDROM_H
+
+#ifdef __KERNEL__
+struct cdrom_device_ops {
+/* routines */
+ int (*open) (kdev_t, int);
+ void (*release) (kdev_t);
+ int (*open_files) (kdev_t); /* number of open files */
+ int (*drive_status) (kdev_t);
+ int (*disc_status) (kdev_t);
+ int (*media_changed) (kdev_t);
+ int (*tray_move) (kdev_t, int);
+ int (*lock_door) (kdev_t, int);
+ int (*select_speed) (kdev_t, int);
+ int (*select_disc) (kdev_t, int);
+ int (*get_last_session) (kdev_t, struct cdrom_multisession *);
+ int (*get_mcn) (kdev_t, struct cdrom_mcn *);
+ int (*reset) (kdev_t dev); /* hard reset device */
+ int (*audio_ioctl) (kdev_t, unsigned int, void *); /* play stuff */
+ int (*dev_ioctl) (kdev_t, unsigned int, unsigned long); /* dev-specific */
+/* specifications */
+ const int capability; /* capability flags */
+ int mask; /* mask of capability: disables them */
+ const int speed; /* maximum speed for reading data */
+ const int minors; /* number of minor devs supported */
+ const int capacity; /* number of discs in jukebox */
+/* device-related storage */
+ int options; /* options flags */
+ long mc_flags; /* media change buffer flags (2*16) */
+};
+#endif
+
+/* capability flags */
+#define CDC_CLOSE_TRAY 0x1 /* caddy systems _can't_ close */
+#define CDC_OPEN_TRAY 0x2 /* but _can_ eject. */
+#define CDC_LOCK 0x4 /* disable manual eject */
+#define CDC_SELECT_SPEED 0x8 /* programmable speed */
+#define CDC_SELECT_DISC 0x10 /* select disc from juke-box */
+#define CDC_MULTI_SESSION 0x20 /* read sessions>1 */
+#define CDC_MCN 0x40 /* Medium Catalog Number */
+#define CDC_MEDIA_CHANGED 0x80 /* media changed */
+#define CDC_PLAY_AUDIO 0x100 /* audio functions */
+
+/* drive status possibilities */
+#define CDS_NO_INFO 0 /* if not implemented */
+#define CDS_NO_DISC 1
+#define CDS_TRAY_OPEN 2
+#define CDS_DRIVE_NOT_READY 3
+#define CDS_DISC_OK 4
+
+/* disc status possibilities, other than CDS_NO_DISC */
+#define CDS_AUDIO 100
+#define CDS_DATA_1 101
+#define CDS_DATA_2 102
+#define CDS_XA_2_1 103
+#define CDS_XA_2_2 104
+
+/* User-configurable behavior options */
+#define CDO_AUTO_CLOSE 0x1 /* close tray on first open() */
+#define CDO_AUTO_EJECT 0x2 /* open tray on last release() */
+#define CDO_USE_FFLAGS 0x4 /* use O_NONBLOCK information on open */
+#define CDO_LOCK 0x8 /* lock tray on open files */
+#define CDO_CHECK_TYPE 0x10 /* check type on open for data */
+
+/* Some more ioctls to control these options */
+#define CDROM_SET_OPTIONS 0x5320
+#define CDROM_CLEAR_OPTIONS 0x5321
+#define CDROM_SELECT_SPEED 0x5322 /* head-speed */
+#define CDROM_SELECT_DISC 0x5323 /* for juke-boxes */
+#define CDROM_MEDIA_CHANGED 0x5325
+#define CDROM_DRIVE_STATUS 0x5326 /* tray position, etc. */
+#define CDROM_DISC_STATUS 0x5327 /* disc type etc. */
+
+/* Rename an old ioctl */
+#define CDROM_GET_MCN CDROM_GET_UPC /* medium catalog number */
+
+#ifdef __KERNEL__
+/* the general file operations structure: */
+extern struct file_operations cdrom_fops;
+
+extern int register_cdrom(int major, char *name,
+ struct cdrom_device_ops *cdo);
+extern int unregister_cdrom(int major, char *name);
+#endif
+
+#endif /* LINUX_UCDROM_H */
+/*
+ * Local variables:
+ * comment-column: 40
+ * End:
+ */
diff --git a/linux/src/include/linux/udp.h b/linux/src/include/linux/udp.h
new file mode 100644
index 0000000..471301a
--- /dev/null
+++ b/linux/src/include/linux/udp.h
@@ -0,0 +1,29 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the UDP protocol.
+ *
+ * Version: @(#)udp.h 1.0.2 04/28/93
+ *
+ * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_UDP_H
+#define _LINUX_UDP_H
+
+
+struct udphdr {
+ unsigned short source;
+ unsigned short dest;
+ unsigned short len;
+ unsigned short check;
+};
+
+
+#endif /* _LINUX_UDP_H */
diff --git a/linux/src/include/linux/uio.h b/linux/src/include/linux/uio.h
new file mode 100644
index 0000000..8027bc8
--- /dev/null
+++ b/linux/src/include/linux/uio.h
@@ -0,0 +1,26 @@
+#ifndef __LINUX_UIO_H
+#define __LINUX_UIO_H
+
+/*
+ * Berkeley style UIO structures - Alan Cox 1994.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+
+/* A word of warning: Our uio structure will clash with the C library one (which is now obsolete). Remove the C
+ library one from sys/uio.h if you have a very old library set */
+
+struct iovec
+{
+ void *iov_base; /* BSD uses caddr_t (same thing in effect) */
+ int iov_len;
+};
+
+#define UIO_MAXIOV 16 /* Maximum iovec's in one operation
+ 16 matches BSD */
+
+#endif
diff --git a/linux/src/include/linux/unistd.h b/linux/src/include/linux/unistd.h
new file mode 100644
index 0000000..10ed983
--- /dev/null
+++ b/linux/src/include/linux/unistd.h
@@ -0,0 +1,11 @@
+#ifndef _LINUX_UNISTD_H_
+#define _LINUX_UNISTD_H_
+
+extern int errno;
+
+/*
+ * Include machine specific syscallX macros
+ */
+#include <asm/unistd.h>
+
+#endif /* _LINUX_UNISTD_H_ */
diff --git a/linux/src/include/linux/utsname.h b/linux/src/include/linux/utsname.h
new file mode 100644
index 0000000..7aef28f
--- /dev/null
+++ b/linux/src/include/linux/utsname.h
@@ -0,0 +1,35 @@
+#ifndef _LINUX_UTSNAME_H
+#define _LINUX_UTSNAME_H
+
+#define __OLD_UTS_LEN 8
+
+struct oldold_utsname {
+ char sysname[9];
+ char nodename[9];
+ char release[9];
+ char version[9];
+ char machine[9];
+};
+
+#define __NEW_UTS_LEN 64
+
+struct old_utsname {
+ char sysname[65];
+ char nodename[65];
+ char release[65];
+ char version[65];
+ char machine[65];
+};
+
+struct new_utsname {
+ char sysname[65];
+ char nodename[65];
+ char release[65];
+ char version[65];
+ char machine[65];
+ char domainname[65];
+};
+
+extern struct new_utsname system_utsname;
+
+#endif
diff --git a/linux/src/include/linux/version.h b/linux/src/include/linux/version.h
new file mode 100644
index 0000000..1a8bd9f
--- /dev/null
+++ b/linux/src/include/linux/version.h
@@ -0,0 +1,2 @@
+#define UTS_RELEASE "2.0.36"
+#define LINUX_VERSION_CODE 131108
diff --git a/linux/src/include/linux/vfs.h b/linux/src/include/linux/vfs.h
new file mode 100644
index 0000000..b3a5865
--- /dev/null
+++ b/linux/src/include/linux/vfs.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_VFS_H
+#define _LINUX_VFS_H
+
+#include <asm/statfs.h>
+
+#endif
diff --git a/linux/src/include/linux/wait.h b/linux/src/include/linux/wait.h
new file mode 100644
index 0000000..96de4aa
--- /dev/null
+++ b/linux/src/include/linux/wait.h
@@ -0,0 +1,53 @@
+#ifndef _LINUX_WAIT_H
+#define _LINUX_WAIT_H
+
+#define WNOHANG 0x00000001
+#define WUNTRACED 0x00000002
+
+#define __WALL 0x40000000 /* Wait on all children, regardless of type */
+#define __WCLONE 0x80000000 /* Wait only on non-SIGCHLD children */
+
+#ifdef __KERNEL__
+
+#include <asm/page.h>
+
+struct wait_queue {
+ struct task_struct * task;
+ struct wait_queue * next;
+};
+
+typedef struct wait_queue wait_queue_t;
+typedef struct wait_queue *wait_queue_head_t;
+
+#define WAIT_QUEUE_HEAD(x) ((struct wait_queue *)((x)-1))
+#define DECLARE_WAITQUEUE(wait, current) struct wait_queue wait = { current, NULL }
+#define DECLARE_WAIT_QUEUE_HEAD(wait) wait_queue_head_t wait
+#define init_waitqueue_head(x) *(x)=NULL
+#define init_waitqueue_entry(q,p) ((q)->task)=(p)
+
+static inline void init_waitqueue(struct wait_queue **q)
+{
+ *q = WAIT_QUEUE_HEAD(q);
+}
+
+static inline int waitqueue_active(struct wait_queue **q)
+{
+ struct wait_queue *head = *q;
+ return head && head != WAIT_QUEUE_HEAD(q);
+}
+
+struct select_table_entry {
+ struct wait_queue wait;
+ struct wait_queue ** wait_address;
+};
+
+typedef struct select_table_struct {
+ int nr;
+ struct select_table_entry * entry;
+} select_table;
+
+#define __MAX_SELECT_TABLE_ENTRIES (4096 / sizeof (struct select_table_entry))
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/linux/src/include/linux/wireless.h b/linux/src/include/linux/wireless.h
new file mode 100644
index 0000000..c552ff2
--- /dev/null
+++ b/linux/src/include/linux/wireless.h
@@ -0,0 +1,479 @@
+/*
+ * This file define a set of standard wireless extensions
+ *
+ * Version : 9 16.10.99
+ *
+ * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
+ */
+
+#ifndef _LINUX_WIRELESS_H
+#define _LINUX_WIRELESS_H
+
+/************************** DOCUMENTATION **************************/
+/*
+ * Basically, the wireless extensions are for now a set of standard ioctl
+ * call + /proc/net/wireless
+ *
+ * The entry /proc/net/wireless give statistics and information on the
+ * driver.
+ * This is better than having each driver having its entry because
+ * its centralised and we may remove the driver module safely.
+ *
+ * Ioctl are used to configure the driver and issue commands. This is
+ * better than command line options of insmod because we may want to
+ * change dynamically (while the driver is running) some parameters.
+ *
+ * The ioctl mechanimsm are copied from standard devices ioctl.
+ * We have the list of command plus a structure descibing the
+ * data exchanged...
+ * Note that to add these ioctl, I was obliged to modify :
+ * net/core/dev.c (two place + add include)
+ * net/ipv4/af_inet.c (one place + add include)
+ *
+ * /proc/net/wireless is a copy of /proc/net/dev.
+ * We have a structure for data passed from the driver to /proc/net/wireless
+ * Too add this, I've modified :
+ * net/core/dev.c (two other places)
+ * include/linux/netdevice.h (one place)
+ * include/linux/proc_fs.h (one place)
+ *
+ * Do not add here things that are redundant with other mechanisms
+ * (drivers init, ifconfig, /proc/net/dev, ...) and with are not
+ * wireless specific.
+ *
+ * These wireless extensions are not magic : each driver has to provide
+ * support for them...
+ *
+ * IMPORTANT NOTE : As everything in the kernel, this is very much a
+ * work in progress. Contact me if you have ideas of improvements...
+ */
+
+/***************************** INCLUDES *****************************/
+
+#include <linux/types.h> /* for "caddr_t" et al */
+#include <linux/socket.h> /* for "struct sockaddr" et al */
+#include <linux/if.h> /* for IFNAMSIZ and co... */
+
+/**************************** CONSTANTS ****************************/
+
+/* --------------------------- VERSION --------------------------- */
+/*
+ * This constant is used to know the availability of the wireless
+ * extensions and to know which version of wireless extensions it is
+ * (there is some stuff that will be added in the future...)
+ * I just plan to increment with each new version.
+ */
+#define WIRELESS_EXT 10
+
+/*
+ * Changes :
+ *
+ * V2 to V3
+ * --------
+ * Alan Cox start some incompatibles changes. I've integrated a bit more.
+ * - Encryption renamed to Encode to avoid US regulation problems
+ * - Frequency changed from float to struct to avoid problems on old 386
+ *
+ * V3 to V4
+ * --------
+ * - Add sensitivity
+ *
+ * V4 to V5
+ * --------
+ * - Missing encoding definitions in range
+ * - Access points stuff
+ *
+ * V5 to V6
+ * --------
+ * - 802.11 support (ESSID ioctls)
+ *
+ * V6 to V7
+ * --------
+ * - define IW_ESSID_MAX_SIZE and IW_MAX_AP
+ *
+ * V7 to V8
+ * --------
+ * - Changed my e-mail address
+ * - More 802.11 support (nickname, rate, rts, frag)
+ * - List index in frequencies
+ *
+ * V8 to V9
+ * --------
+ * - Support for 'mode of operation' (ad-hoc, managed...)
+ * - Support for unicast and multicast power saving
+ * - Change encoding to support larger tokens (>64 bits)
+ * - Updated iw_params (disable, flags) and use it for NWID
+ * - Extracted iw_point from iwreq for clarity
+ *
+ * V9 to V10
+ * ---------
+ * - Add PM capability to range structure
+ * - Add PM modifier : MAX/MIN/RELATIVE
+ * - Add encoding option : IW_ENCODE_NOKEY
+ * - Add TxPower ioctls (work like TxRate)
+ */
+
+/* -------------------------- IOCTL LIST -------------------------- */
+
+/* Basic operations */
+#define SIOCSIWNAME 0x8B00 /* Unused */
+#define SIOCGIWNAME 0x8B01 /* get name == wireless protocol */
+#define SIOCSIWNWID 0x8B02 /* set network id (the cell) */
+#define SIOCGIWNWID 0x8B03 /* get network id */
+#define SIOCSIWFREQ 0x8B04 /* set channel/frequency (Hz) */
+#define SIOCGIWFREQ 0x8B05 /* get channel/frequency (Hz) */
+#define SIOCSIWMODE 0x8B06 /* set operation mode */
+#define SIOCGIWMODE 0x8B07 /* get operation mode */
+#define SIOCSIWSENS 0x8B08 /* set sensitivity (dBm) */
+#define SIOCGIWSENS 0x8B09 /* get sensitivity (dBm) */
+
+/* Informative stuff */
+#define SIOCSIWRANGE 0x8B0A /* Unused */
+#define SIOCGIWRANGE 0x8B0B /* Get range of parameters */
+#define SIOCSIWPRIV 0x8B0C /* Unused */
+#define SIOCGIWPRIV 0x8B0D /* get private ioctl interface info */
+
+/* Mobile IP support */
+#define SIOCSIWSPY 0x8B10 /* set spy addresses */
+#define SIOCGIWSPY 0x8B11 /* get spy info (quality of link) */
+
+/* Access Point manipulation */
+#define SIOCSIWAP 0x8B14 /* set access point MAC addresses */
+#define SIOCGIWAP 0x8B15 /* get access point MAC addresses */
+#define SIOCGIWAPLIST 0x8B17 /* get list of access point in range */
+
+/* 802.11 specific support */
+#define SIOCSIWESSID 0x8B1A /* set ESSID (network name) */
+#define SIOCGIWESSID 0x8B1B /* get ESSID */
+#define SIOCSIWNICKN 0x8B1C /* set node name/nickname */
+#define SIOCGIWNICKN 0x8B1D /* get node name/nickname */
+/* As the ESSID and NICKN are strings up to 32 bytes long, it doesn't fit
+ * within the 'iwreq' structure, so we need to use the 'data' member to
+ * point to a string in user space, like it is done for RANGE...
+ * The "flags" member indicate if the ESSID is active or not (promiscuous).
+ */
+
+/* Other parameters usefull in 802.11 and some other devices */
+#define SIOCSIWRATE 0x8B20 /* set default bit rate (bps) */
+#define SIOCGIWRATE 0x8B21 /* get default bit rate (bps) */
+#define SIOCSIWRTS 0x8B22 /* set RTS/CTS threshold (bytes) */
+#define SIOCGIWRTS 0x8B23 /* get RTS/CTS threshold (bytes) */
+#define SIOCSIWFRAG 0x8B24 /* set fragmentation thr (bytes) */
+#define SIOCGIWFRAG 0x8B25 /* get fragmentation thr (bytes) */
+#define SIOCSIWTXPOW 0x8B26 /* set transmit power (dBm) */
+#define SIOCGIWTXPOW 0x8B27 /* get transmit power (dBm) */
+
+/* Encoding stuff (scrambling, hardware security, WEP...) */
+#define SIOCSIWENCODE 0x8B2A /* set encoding token & mode */
+#define SIOCGIWENCODE 0x8B2B /* get encoding token & mode */
+/* Power saving stuff (power management, unicast and multicast) */
+#define SIOCSIWPOWER 0x8B2C /* set Power Management settings */
+#define SIOCGIWPOWER 0x8B2D /* get Power Management settings */
+
+/* ------------------------- IOCTL STUFF ------------------------- */
+
+/* The first and the last (range) */
+#define SIOCIWFIRST 0x8B00
+#define SIOCIWLAST 0x8B30
+
+/* Even : get (world access), odd : set (root access) */
+#define IW_IS_SET(cmd) (!((cmd) & 0x1))
+#define IW_IS_GET(cmd) ((cmd) & 0x1)
+
+/* ------------------------- PRIVATE INFO ------------------------- */
+/*
+ * The following is used with SIOCGIWPRIV. It allow a driver to define
+ * the interface (name, type of data) for its private ioctl.
+ * Privates ioctl are SIOCDEVPRIVATE -> SIOCDEVPRIVATE + 0xF
+ */
+
+#define IW_PRIV_TYPE_MASK 0x7000 /* Type of arguments */
+#define IW_PRIV_TYPE_NONE 0x0000
+#define IW_PRIV_TYPE_BYTE 0x1000 /* Char as number */
+#define IW_PRIV_TYPE_CHAR 0x2000 /* Char as character */
+#define IW_PRIV_TYPE_INT 0x4000 /* 32 bits int */
+#define IW_PRIV_TYPE_FLOAT 0x5000
+
+#define IW_PRIV_SIZE_FIXED 0x0800 /* Variable or fixed nuber of args */
+
+#define IW_PRIV_SIZE_MASK 0x07FF /* Max number of those args */
+
+/*
+ * Note : if the number of args is fixed and the size < 16 octets,
+ * instead of passing a pointer we will put args in the iwreq struct...
+ */
+
+/* ----------------------- OTHER CONSTANTS ----------------------- */
+
+/* Maximum frequencies in the range struct */
+#define IW_MAX_FREQUENCIES 16
+/* Note : if you have something like 80 frequencies,
+ * don't increase this constant and don't fill the frequency list.
+ * The user will be able to set by channel anyway... */
+
+/* Maximum bit rates in the range struct */
+#define IW_MAX_BITRATES 8
+
+/* Maximum tx powers in the range struct */
+#define IW_MAX_TXPOWER 8
+
+/* Maximum of address that you may set with SPY */
+#define IW_MAX_SPY 8
+
+/* Maximum of address that you may get in the
+ list of access points in range */
+#define IW_MAX_AP 8
+
+/* Maximum size of the ESSID and NICKN strings */
+#define IW_ESSID_MAX_SIZE 32
+
+/* Modes of operation */
+#define IW_MODE_AUTO 0 /* Let the driver decides */
+#define IW_MODE_ADHOC 1 /* Single cell network */
+#define IW_MODE_INFRA 2 /* Multi cell network, roaming, ... */
+#define IW_MODE_MASTER 3 /* Synchronisation master or Access Point */
+#define IW_MODE_REPEAT 4 /* Wireless Repeater (forwarder) */
+#define IW_MODE_SECOND 5 /* Secondary master/repeater (backup) */
+
+/* Maximum number of size of encoding token available
+ * they are listed in the range structure */
+#define IW_MAX_ENCODING_SIZES 8
+
+/* Maximum size of the encoding token in bytes */
+#define IW_ENCODING_TOKEN_MAX 32 /* 256 bits (for now) */
+
+/* Flags for encoding (along with the token) */
+#define IW_ENCODE_INDEX 0x00FF /* Token index (if needed) */
+#define IW_ENCODE_FLAGS 0xFF00 /* Flags defined below */
+#define IW_ENCODE_MODE 0xF000 /* Modes defined below */
+#define IW_ENCODE_DISABLED 0x8000 /* Encoding disabled */
+#define IW_ENCODE_ENABLED 0x0000 /* Encoding enabled */
+#define IW_ENCODE_RESTRICTED 0x4000 /* Refuse non-encoded packets */
+#define IW_ENCODE_OPEN 0x2000 /* Accept non-encoded packets */
+#define IW_ENCODE_NOKEY 0x0800 /* Key is write only, so not present */
+
+/* Power management flags available (along with the value, if any) */
+#define IW_POWER_ON 0x0000 /* No details... */
+#define IW_POWER_TYPE 0xF000 /* Type of parameter */
+#define IW_POWER_PERIOD 0x1000 /* Value is a period/duration of */
+#define IW_POWER_TIMEOUT 0x2000 /* Value is a timeout (to go asleep) */
+#define IW_POWER_MODE 0x0F00 /* Power Management mode */
+#define IW_POWER_UNICAST_R 0x0100 /* Receive only unicast messages */
+#define IW_POWER_MULTICAST_R 0x0200 /* Receive only multicast messages */
+#define IW_POWER_ALL_R 0x0300 /* Receive all messages though PM */
+#define IW_POWER_FORCE_S 0x0400 /* Force PM procedure for sending unicast */
+#define IW_POWER_REPEATER 0x0800 /* Repeat broadcast messages in PM period */
+#define IW_POWER_MODIFIER 0x000F /* Modify a parameter */
+#define IW_POWER_MIN 0x0001 /* Value is a minimum */
+#define IW_POWER_MAX 0x0002 /* Value is a maximum */
+#define IW_POWER_RELATIVE 0x0004 /* Value is not in seconds/ms/us */
+
+/* Transmit Power flags available */
+#define IW_TXPOW_DBM 0x0000 /* Value is in dBm */
+#define IW_TXPOW_MWATT 0x0001 /* Value is in mW */
+
+/****************************** TYPES ******************************/
+
+/* --------------------------- SUBTYPES --------------------------- */
+/*
+ * Generic format for most parameters that fit in an int
+ */
+struct iw_param
+{
+ __s32 value; /* The value of the parameter itself */
+ __u8 fixed; /* Hardware should not use auto select */
+ __u8 disabled; /* Disable the feature */
+ __u16 flags; /* Various specifc flags (if any) */
+};
+
+/*
+ * For all data larger than 16 octets, we need to use a
+ * pointer to memory alocated in user space.
+ */
+struct iw_point
+{
+ caddr_t pointer; /* Pointer to the data (in user space) */
+ __u16 length; /* number of fields or size in bytes */
+ __u16 flags; /* Optional params */
+};
+
+/*
+ * A frequency
+ * For numbers lower than 10^9, we encode the number in 'm' and
+ * set 'e' to 0
+ * For number greater than 10^9, we divide it by the lowest power
+ * of 10 to get 'm' lower than 10^9, with 'm'= f / (10^'e')...
+ * The power of 10 is in 'e', the result of the division is in 'm'.
+ */
+struct iw_freq
+{
+ __u32 m; /* Mantissa */
+ __u16 e; /* Exponent */
+ __u8 i; /* List index (when in range struct) */
+};
+
+/*
+ * Quality of the link
+ */
+struct iw_quality
+{
+ __u8 qual; /* link quality (%retries, SNR or better...) */
+ __u8 level; /* signal level */
+ __u8 noise; /* noise level */
+ __u8 updated; /* Flags to know if updated */
+};
+
+/*
+ * Packet discarded in the wireless adapter due to
+ * "wireless" specific problems...
+ */
+struct iw_discarded
+{
+ __u32 nwid; /* Wrong nwid */
+ __u32 code; /* Unable to code/decode */
+ __u32 misc; /* Others cases */
+};
+
+/* ------------------------ WIRELESS STATS ------------------------ */
+/*
+ * Wireless statistics (used for /proc/net/wireless)
+ */
+struct iw_statistics
+{
+ __u16 status; /* Status
+ * - device dependent for now */
+
+ struct iw_quality qual; /* Quality of the link
+ * (instant/mean/max) */
+ struct iw_discarded discard; /* Packet discarded counts */
+};
+
+/* ------------------------ IOCTL REQUEST ------------------------ */
+/*
+ * The structure to exchange data for ioctl.
+ * This structure is the same as 'struct ifreq', but (re)defined for
+ * convenience...
+ *
+ * Note that it should fit on the same memory footprint !
+ * You should check this when increasing the above structures (16 octets)
+ * 16 octets = 128 bits. Warning, pointers might be 64 bits wide...
+ */
+struct iwreq
+{
+ union
+ {
+ char ifrn_name[IFNAMSIZ]; /* if name, e.g. "eth0" */
+ } ifr_ifrn;
+
+ /* Data part */
+ union
+ {
+ /* Config - generic */
+ char name[IFNAMSIZ];
+ /* Name : used to verify the presence of wireless extensions.
+ * Name of the protocol/provider... */
+
+ struct iw_point essid; /* Extended network name */
+ struct iw_param nwid; /* network id (or domain - the cell) */
+ struct iw_freq freq; /* frequency or channel :
+ * 0-1000 = channel
+ * > 1000 = frequency in Hz */
+
+ struct iw_param sens; /* signal level threshold */
+ struct iw_param bitrate; /* default bit rate */
+ struct iw_param txpower; /* default transmit power */
+ struct iw_param rts; /* RTS threshold threshold */
+ struct iw_param frag; /* Fragmentation threshold */
+ __u32 mode; /* Operation mode */
+
+ struct iw_point encoding; /* Encoding stuff : tokens */
+ struct iw_param power; /* PM duration/timeout */
+
+ struct sockaddr ap_addr; /* Access point address */
+
+ struct iw_point data; /* Other large parameters */
+ } u;
+};
+
+/* -------------------------- IOCTL DATA -------------------------- */
+/*
+ * For those ioctl which want to exchange mode data that what could
+ * fit in the above structure...
+ */
+
+/*
+ * Range of parameters
+ */
+
+struct iw_range
+{
+ /* Informative stuff (to choose between different interface) */
+ __u32 throughput; /* To give an idea... */
+ /* In theory this value should be the maximum benchmarked
+ * TCP/IP throughput, because with most of these devices the
+ * bit rate is meaningless (overhead an co) to estimate how
+ * fast the connection will go and pick the fastest one.
+ * I suggest people to play with Netperf or any benchmark...
+ */
+
+ /* NWID (or domain id) */
+ __u32 min_nwid; /* Minimal NWID we are able to set */
+ __u32 max_nwid; /* Maximal NWID we are able to set */
+
+ /* Frequency */
+ __u16 num_channels; /* Number of channels [0; num - 1] */
+ __u8 num_frequency; /* Number of entry in the list */
+ struct iw_freq freq[IW_MAX_FREQUENCIES]; /* list */
+ /* Note : this frequency list doesn't need to fit channel numbers */
+
+ /* signal level threshold range */
+ __s32 sensitivity;
+
+ /* Quality of link & SNR stuff */
+ struct iw_quality max_qual; /* Quality of the link */
+
+ /* Rates */
+ __u8 num_bitrates; /* Number of entries in the list */
+ __s32 bitrate[IW_MAX_BITRATES]; /* list, in bps */
+
+ /* RTS threshold */
+ __s32 min_rts; /* Minimal RTS threshold */
+ __s32 max_rts; /* Maximal RTS threshold */
+
+ /* Frag threshold */
+ __s32 min_frag; /* Minimal frag threshold */
+ __s32 max_frag; /* Maximal frag threshold */
+
+ /* Power Management duration & timeout */
+ __s32 min_pmp; /* Minimal PM period */
+ __s32 max_pmp; /* Maximal PM period */
+ __s32 min_pmt; /* Minimal PM timeout */
+ __s32 max_pmt; /* Maximal PM timeout */
+ __u16 pmp_flags; /* How to decode max/min PM period */
+ __u16 pmt_flags; /* How to decode max/min PM timeout */
+ __u16 pm_capa; /* What PM options are supported */
+
+ /* Encoder stuff */
+ __u16 encoding_size[IW_MAX_ENCODING_SIZES]; /* Different token sizes */
+ __u8 num_encoding_sizes; /* Number of entry in the list */
+ __u8 max_encoding_tokens; /* Max number of tokens */
+
+ /* Transmit power */
+ __u16 txpower_capa; /* What options are supported */
+ __u8 num_txpower; /* Number of entries in the list */
+ __s32 txpower[IW_MAX_TXPOWER]; /* list, in bps */
+};
+
+/*
+ * Private ioctl interface information
+ */
+
+struct iw_priv_args
+{
+ __u32 cmd; /* Number of the ioctl to issue */
+ __u16 set_args; /* Type and number of args */
+ __u16 get_args; /* Type and number of args */
+ char name[IFNAMSIZ]; /* Name of the extension */
+};
+
+#endif /* _LINUX_WIRELESS_H */
diff --git a/linux/src/include/net/af_unix.h b/linux/src/include/net/af_unix.h
new file mode 100644
index 0000000..86b0e1e
--- /dev/null
+++ b/linux/src/include/net/af_unix.h
@@ -0,0 +1,14 @@
+#ifndef __LINUX_NET_AFUNIX_H
+#define __LINUX_NET_AFUNIX_H
+extern void unix_proto_init(struct net_proto *pro);
+extern struct proto_ops unix_proto_ops;
+extern void unix_inflight(struct file *fp);
+extern void unix_notinflight(struct file *fp);
+typedef struct sock unix_socket;
+extern void unix_gc(void);
+
+extern unix_socket *unix_socket_list;
+
+#define UNIX_MAX_FD 8
+
+#endif
diff --git a/linux/src/include/net/arp.h b/linux/src/include/net/arp.h
new file mode 100644
index 0000000..db7a29c
--- /dev/null
+++ b/linux/src/include/net/arp.h
@@ -0,0 +1,17 @@
+/* linux/net/inet/arp.h */
+#ifndef _ARP_H
+#define _ARP_H
+
+extern void arp_init(void);
+extern int arp_rcv(struct sk_buff *skb, struct device *dev,
+ struct packet_type *pt);
+extern int arp_query(unsigned char *haddr, u32 paddr, struct device *dev);
+extern int arp_find(unsigned char *haddr, u32 paddr,
+ struct device *dev, u32 saddr, struct sk_buff *skb);
+extern int arp_ioctl(unsigned int cmd, void *arg);
+extern void arp_send(int type, int ptype, u32 dest_ip,
+ struct device *dev, u32 src_ip,
+ unsigned char *dest_hw, unsigned char *src_hw, unsigned char *th);
+extern int arp_bind_cache(struct hh_cache ** hhp, struct device *dev, unsigned short type, __u32 daddr);
+extern int arp_update_cache(struct hh_cache * hh);
+#endif /* _ARP_H */
diff --git a/linux/src/include/net/atalkcall.h b/linux/src/include/net/atalkcall.h
new file mode 100644
index 0000000..726e33c
--- /dev/null
+++ b/linux/src/include/net/atalkcall.h
@@ -0,0 +1,2 @@
+/* Separate to keep compilation of protocols.c simpler */
+extern void atalk_proto_init(struct net_proto *pro);
diff --git a/linux/src/include/net/ax25.h b/linux/src/include/net/ax25.h
new file mode 100644
index 0000000..fce33cf
--- /dev/null
+++ b/linux/src/include/net/ax25.h
@@ -0,0 +1,292 @@
+/*
+ * Declarations of AX.25 type objects.
+ *
+ * Alan Cox (GW4PTS) 10/11/93
+ */
+
+#ifndef _AX25_H
+#define _AX25_H
+#include <linux/ax25.h>
+
+#define AX25_SLOWHZ 10 /* Run timing at 1/10 second - gives us better resolution for 56kbit links */
+
+#define AX25_T1CLAMPLO (1 * AX25_SLOWHZ) /* If defined, clamp at 1 second **/
+#define AX25_T1CLAMPHI (30 * AX25_SLOWHZ) /* If defined, clamp at 30 seconds **/
+
+#define AX25_BPQ_HEADER_LEN 16
+#define AX25_KISS_HEADER_LEN 1
+
+#define AX25_HEADER_LEN 17
+#define AX25_ADDR_LEN 7
+#define AX25_DIGI_HEADER_LEN (AX25_MAX_DIGIS * AX25_ADDR_LEN)
+#define AX25_MAX_HEADER_LEN (AX25_HEADER_LEN + AX25_DIGI_HEADER_LEN)
+
+/* AX.25 Protocol IDs */
+#define AX25_P_ROSE 0x01
+#define AX25_P_IP 0xCC
+#define AX25_P_ARP 0xCD
+#define AX25_P_TEXT 0xF0
+#define AX25_P_NETROM 0xCF
+#define AX25_P_SEGMENT 0x08
+
+/* AX.25 Segment control values */
+#define AX25_SEG_REM 0x7F
+#define AX25_SEG_FIRST 0x80
+
+#define AX25_CBIT 0x80 /* Command/Response bit */
+#define AX25_EBIT 0x01 /* HDLC Address Extension bit */
+#define AX25_HBIT 0x80 /* Has been repeated bit */
+
+#define AX25_SSSID_SPARE 0x60 /* Unused bits in SSID for standard AX.25 */
+#define AX25_ESSID_SPARE 0x20 /* Unused bits in SSID for extended AX.25 */
+#define AX25_DAMA_FLAG 0x20 /* Well, it is *NOT* unused! (dl1bke 951121 */
+
+#define AX25_COND_ACK_PENDING 0x01
+#define AX25_COND_REJECT 0x02
+#define AX25_COND_PEER_RX_BUSY 0x04
+#define AX25_COND_OWN_RX_BUSY 0x08
+
+#ifndef _LINUX_NETDEVICE_H
+#include <linux/netdevice.h>
+#endif
+
+/*
+ * These headers are taken from the KA9Q package by Phil Karn. These specific
+ * files have been placed under the GPL (not the whole package) by Phil.
+ *
+ *
+ * Copyright 1991 Phil Karn, KA9Q
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 dated June, 1991.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave., Cambridge, MA 02139, USA.
+ */
+
+/* Upper sub-layer (LAPB) definitions */
+
+/* Control field templates */
+#define AX25_I 0x00 /* Information frames */
+#define AX25_S 0x01 /* Supervisory frames */
+#define AX25_RR 0x01 /* Receiver ready */
+#define AX25_RNR 0x05 /* Receiver not ready */
+#define AX25_REJ 0x09 /* Reject */
+#define AX25_U 0x03 /* Unnumbered frames */
+#define AX25_SABM 0x2f /* Set Asynchronous Balanced Mode */
+#define AX25_SABME 0x6f /* Set Asynchronous Balanced Mode Extended */
+#define AX25_DISC 0x43 /* Disconnect */
+#define AX25_DM 0x0f /* Disconnected mode */
+#define AX25_UA 0x63 /* Unnumbered acknowledge */
+#define AX25_FRMR 0x87 /* Frame reject */
+#define AX25_UI 0x03 /* Unnumbered information */
+
+#define AX25_PF 0x10 /* Poll/final bit for standard AX.25 */
+#define AX25_EPF 0x01 /* Poll/final bit for extended AX.25 */
+
+#define AX25_ILLEGAL 0x100 /* Impossible to be a real frame type */
+
+#define AX25_POLLOFF 0
+#define AX25_POLLON 1
+
+/* AX25 L2 C-bit */
+#define AX25_COMMAND 1
+#define AX25_RESPONSE 2
+
+/* Define Link State constants. */
+
+enum {
+ AX25_STATE_0,
+ AX25_STATE_1,
+ AX25_STATE_2,
+ AX25_STATE_3,
+ AX25_STATE_4
+};
+
+#define AX25_MAX_DEVICES 20 /* Max No of AX.25 devices */
+
+#define AX25_MODULUS 8 /* Standard AX.25 modulus */
+#define AX25_EMODULUS 128 /* Extended AX.25 modulus */
+
+enum {
+ AX25_VALUES_IPDEFMODE, /* 0=DG 1=VC */
+ AX25_VALUES_AXDEFMODE, /* 0=Normal 1=Extended Seq Nos */
+ AX25_VALUES_BACKOFF, /* 0=None 1=Linear 2=Exponential */
+ AX25_VALUES_CONMODE, /* Allow connected modes - 0=No 1=no "PID text" 2=all PIDs */
+ AX25_VALUES_WINDOW, /* Default window size for standard AX.25 */
+ AX25_VALUES_EWINDOW, /* Default window size for extended AX.25 */
+ AX25_VALUES_T1, /* Default T1 timeout value */
+ AX25_VALUES_T2, /* Default T2 timeout value */
+ AX25_VALUES_T3, /* Default T3 timeout value */
+ AX25_VALUES_IDLE, /* Connected mode idle timer */
+ AX25_VALUES_N2, /* Default N2 value */
+ AX25_VALUES_PACLEN, /* AX.25 MTU */
+ AX25_MAX_VALUES /* THIS MUST REMAIN THE LAST ENTRY OF THIS LIST */
+};
+
+#define AX25_DEF_IPDEFMODE 0 /* Datagram */
+#define AX25_DEF_AXDEFMODE 0 /* Normal */
+#define AX25_DEF_BACKOFF 1 /* Linear backoff */
+#define AX25_DEF_CONMODE 2 /* Connected mode allowed */
+#define AX25_DEF_WINDOW 2 /* Window=2 */
+#define AX25_DEF_EWINDOW 32 /* Module-128 Window=32 */
+#define AX25_DEF_T1 (10 * AX25_SLOWHZ) /* T1=10s */
+#define AX25_DEF_T2 (3 * AX25_SLOWHZ) /* T2=3s */
+#define AX25_DEF_T3 (300 * AX25_SLOWHZ) /* T3=300s */
+#define AX25_DEF_N2 10 /* N2=10 */
+#define AX25_DEF_IDLE (0 * 60 * AX25_SLOWHZ) /* Idle=None */
+#define AX25_DEF_PACLEN 256 /* Paclen=256 */
+
+typedef struct ax25_uid_assoc {
+ struct ax25_uid_assoc *next;
+ uid_t uid;
+ ax25_address call;
+} ax25_uid_assoc;
+
+typedef struct {
+ ax25_address calls[AX25_MAX_DIGIS];
+ unsigned char repeated[AX25_MAX_DIGIS];
+ unsigned char ndigi;
+ char lastrepeat;
+} ax25_digi;
+
+typedef struct ax25_cb {
+ struct ax25_cb *next;
+ ax25_address source_addr, dest_addr;
+ struct device *device;
+ unsigned char dama_slave, iamdigi;
+ unsigned char state, modulus, pidincl;
+ unsigned short vs, vr, va;
+ unsigned char condition, backoff;
+ unsigned char n2, n2count;
+ unsigned short t1, t2, t3, idle, rtt;
+ unsigned short t1timer, t2timer, t3timer, idletimer;
+ unsigned short paclen;
+ unsigned short fragno, fraglen;
+ ax25_digi *digipeat;
+ struct sk_buff_head write_queue;
+ struct sk_buff_head reseq_queue;
+ struct sk_buff_head ack_queue;
+ struct sk_buff_head frag_queue;
+ unsigned char window;
+ struct timer_list timer;
+ struct sock *sk; /* Backlink to socket */
+} ax25_cb;
+
+#ifndef _LINUX_SYSCTL_H
+#include <linux/sysctl.h>
+#endif
+
+struct ax25_dev {
+ char name[20];
+ struct device *dev;
+ struct device *forward;
+ struct ctl_table systable[AX25_MAX_VALUES+1];
+ int values[AX25_MAX_VALUES];
+};
+
+/* af_ax25.c */
+extern ax25_address null_ax25_address;
+extern char *ax2asc(ax25_address *);
+extern ax25_address *asc2ax(char *);
+extern int ax25cmp(ax25_address *, ax25_address *);
+extern ax25_cb *ax25_send_frame(struct sk_buff *, int, ax25_address *, ax25_address *, ax25_digi *, struct device *);
+extern ax25_cb *ax25_find_cb(ax25_address *, ax25_address *, ax25_digi *, struct device *);
+extern void ax25_destroy_socket(ax25_cb *);
+extern struct device *ax25rtr_get_dev(ax25_address *);
+extern int ax25_encapsulate(struct sk_buff *, struct device *, unsigned short,
+ void *, void *, unsigned int);
+extern int ax25_rebuild_header(void *, struct device *, unsigned long, struct sk_buff *);
+extern ax25_uid_assoc *ax25_uid_list;
+extern int ax25_uid_policy;
+extern ax25_address *ax25_findbyuid(uid_t);
+extern void ax25_queue_xmit(struct sk_buff *, struct device *, int);
+extern int ax25_dev_is_dama_slave(struct device *); /* dl1bke 951121 */
+
+#include <net/ax25call.h>
+
+/* ax25_in.c */
+extern int ax25_process_rx_frame(ax25_cb *, struct sk_buff *, int, int);
+
+/* ax25_out.c */
+extern void ax25_output(ax25_cb *, int, struct sk_buff *);
+extern void ax25_kick(ax25_cb *);
+extern void ax25_transmit_buffer(ax25_cb *, struct sk_buff *, int);
+extern void ax25_nr_error_recovery(ax25_cb *);
+extern void ax25_establish_data_link(ax25_cb *);
+extern void ax25_transmit_enquiry(ax25_cb *);
+extern void ax25_enquiry_response(ax25_cb *);
+extern void ax25_timeout_response(ax25_cb *);
+extern void ax25_check_iframes_acked(ax25_cb *, unsigned short);
+extern void dama_enquiry_response(ax25_cb *); /* dl1bke 960114 */
+extern void dama_establish_data_link(ax25_cb *);
+
+/* ax25_route.c */
+extern struct ax25_dev ax25_device[];
+extern int ax25_rt_get_info(char *, char **, off_t, int, int);
+extern int ax25_cs_get_info(char *, char **, off_t, int, int);
+extern int ax25_rt_autobind(ax25_cb *, ax25_address *);
+extern ax25_digi *ax25_rt_find_path(ax25_address *, struct device *);
+extern void ax25_rt_build_path(struct sk_buff *, ax25_address *, ax25_address *, ax25_digi *);
+extern void ax25_rt_device_down(struct device *);
+extern int ax25_rt_ioctl(unsigned int, void *);
+extern char ax25_rt_mode_get(ax25_address *, struct device *);
+extern int ax25_dev_get_value(struct device *, int);
+extern void ax25_dev_device_up(struct device *);
+extern void ax25_dev_device_down(struct device *);
+extern int ax25_fwd_ioctl(unsigned int, struct ax25_fwd_struct *);
+extern struct device *ax25_fwd_dev(struct device *);
+extern void ax25_rt_free(void);
+
+/* ax25_subr.c */
+extern void ax25_clear_queues(ax25_cb *);
+extern void ax25_frames_acked(ax25_cb *, unsigned short);
+extern void ax25_requeue_frames(ax25_cb *);
+extern int ax25_validate_nr(ax25_cb *, unsigned short);
+extern int ax25_decode(ax25_cb *, struct sk_buff *, int *, int *, int *);
+extern void ax25_send_control(ax25_cb *, int, int, int);
+extern unsigned short ax25_calculate_t1(ax25_cb *);
+extern void ax25_calculate_rtt(ax25_cb *);
+extern unsigned char *ax25_parse_addr(unsigned char *, int, ax25_address *,
+ ax25_address *, ax25_digi *, int *, int *); /* dl1bke 951121 */
+extern int build_ax25_addr(unsigned char *, ax25_address *, ax25_address *,
+ ax25_digi *, int, int);
+extern int size_ax25_addr(ax25_digi *);
+extern void ax25_digi_invert(ax25_digi *, ax25_digi *);
+extern void ax25_return_dm(struct device *, ax25_address *, ax25_address *, ax25_digi *);
+extern void ax25_dama_on(ax25_cb *); /* dl1bke 951121 */
+extern void ax25_dama_off(ax25_cb *); /* dl1bke 951121 */
+extern void ax25_disconnect(ax25_cb *, int);
+
+/* ax25_timer.c */
+extern void ax25_set_timer(ax25_cb *);
+extern void ax25_t1_timeout(ax25_cb *);
+extern void ax25_link_failed(ax25_cb *, int);
+extern int (*ax25_protocol_function(unsigned int))(struct sk_buff *, ax25_cb *);
+extern int ax25_listen_mine(ax25_address *, struct device *);
+
+/* sysctl_net_ax25.c */
+extern void ax25_register_sysctl(void);
+extern void ax25_unregister_sysctl(void);
+
+/* ... */
+
+extern ax25_cb *volatile ax25_list;
+
+/* support routines for modules that use AX.25, in ax25_timer.c */
+extern int ax25_protocol_register(unsigned int, int (*)(struct sk_buff *, ax25_cb *));
+extern void ax25_protocol_release(unsigned int);
+extern int ax25_linkfail_register(void (*)(ax25_cb *, int));
+extern void ax25_linkfail_release(void (*)(ax25_cb *, int));
+extern int ax25_listen_register(ax25_address *, struct device *);
+extern void ax25_listen_release(ax25_address *, struct device *);
+extern int ax25_protocol_is_registered(unsigned int);
+
+#endif
diff --git a/linux/src/include/net/ax25call.h b/linux/src/include/net/ax25call.h
new file mode 100644
index 0000000..68b8a70
--- /dev/null
+++ b/linux/src/include/net/ax25call.h
@@ -0,0 +1,2 @@
+/* Separate to keep compilation of protocols.c simpler */
+extern void ax25_proto_init(struct net_proto *pro);
diff --git a/linux/src/include/net/br.h b/linux/src/include/net/br.h
new file mode 100644
index 0000000..f1d6678
--- /dev/null
+++ b/linux/src/include/net/br.h
@@ -0,0 +1,270 @@
+/*
+ * Constants and structure definitions for the bridging code
+ */
+
+#if !defined(One)
+#define Zero 0
+#define One 1
+#endif /* !defined(One) */
+
+#if !defined(TRUE)
+#define FALSE 0
+#define TRUE 1
+#endif /* !defined(TRUE) */
+
+/** port states. **/
+#define Disabled 0 /* (4.4 5) */
+#define Listening 1 /* (4.4.2) */
+#define Learning 2 /* (4.4.3) */
+#define Forwarding 3 /* (4 4 4) */
+#define Blocking 4 /* (4.4.1) */
+
+#define No_of_ports 8
+/* arbitrary choice, to allow the code below to compile */
+
+#define All_ports (No_of_ports + 1)
+
+/*
+ * We time out our entries in the FDB after this many seconds.
+ */
+#define FDB_TIMEOUT 300
+
+/*
+ * the following defines are the initial values used when the
+ * bridge is booted. These may be overridden when this bridge is
+ * not the root bridge. These are the recommended default values
+ * from the 802.1d specification.
+ */
+#define BRIDGE_MAX_AGE 20
+#define BRIDGE_HELLO_TIME 2
+#define BRIDGE_FORWARD_DELAY 15
+#define HOLD_TIME 1
+
+#define Default_path_cost 10
+
+/*
+ * minimum increment possible to avoid underestimating age, allows for BPDU
+ * transmission time
+ */
+#define Message_age_increment 1
+
+#define No_port 0
+/*
+ * reserved value for Bridge's root port parameter indicating no root port,
+ * used when Bridge is the root - also used to indicate the source when
+ * a frame is being generated by a higher layer protocol on this host
+ */
+
+/** Configuration BPDU Parameters (4.5.1) **/
+
+typedef struct {
+ union {
+ struct {
+ unsigned short priority;
+ unsigned char ula[6];
+ } p_u;
+ unsigned int id[2];
+ } bi;
+} bridge_id_t;
+
+#define BRIDGE_PRIORITY bi.p_u.priority
+#define BRIDGE_ID_ULA bi.p_u.ula
+#define BRIDGE_ID bi.id
+
+typedef struct {
+ unsigned short protocol_id;
+ unsigned char protocol_version_id;
+ unsigned char type;
+ unsigned char flags;
+#define TOPOLOGY_CHANGE 0x01
+#define TOPOLOGY_CHANGE_ACK 0x80
+ bridge_id_t root_id; /* (4.5.1.1) */
+ unsigned int root_path_cost; /* (4.5.1.2) */
+ bridge_id_t bridge_id; /* (4.5.1.3) */
+ unsigned short port_id; /* (4.5.1.4) */
+ unsigned short message_age; /* (4.5.1.5) */
+ unsigned short max_age; /* (4.5.1.6) */
+ unsigned short hello_time; /* (4.5.1.7) */
+ unsigned short forward_delay; /* (4.5.1.8) */
+} Config_bpdu;
+
+
+/** Topology Change Notification BPDU Parameters (4.5.2) **/
+
+typedef struct {
+ unsigned short protocol_id;
+ unsigned char protocol_version_id;
+ unsigned char type;
+} Tcn_bpdu;
+
+#define BPDU_TYPE_CONFIG 0
+#define BPDU_TYPE_TOPO_CHANGE 128
+
+/** Bridge Parameters (4.5.3) **/
+typedef struct {
+ bridge_id_t designated_root; /* (4.5.3.1) */
+ unsigned int root_path_cost; /* (4.5.3.2) */
+ unsigned int root_port; /* (4.5.3.3) */
+ unsigned short max_age; /* (4.5.3.4) */
+ unsigned short hello_time; /* (4.5.3.5) */
+ unsigned short forward_delay; /* (4.5.3.6) */
+ bridge_id_t bridge_id; /* (4.5.3.7) */
+ unsigned short bridge_max_age; /* (4.5.3.8) */
+ unsigned short bridge_hello_time; /* (4.5.3.9) */
+ unsigned short bridge_forward_delay; /* (4.5.3.10) */
+ unsigned int topology_change_detected; /* (4.5.3.11) */
+ unsigned int topology_change; /* (4.5.3.12) */
+ unsigned short topology_change_time; /* (4.5.3.13) */
+ unsigned short hold_time; /* (4.5.3.14) */
+ unsigned int top_change;
+ unsigned int top_change_detected;
+} Bridge_data;
+
+/** Port Parameters (4.5.5) **/
+typedef struct {
+ unsigned short port_id; /* (4.5.5.1) */
+ unsigned int state; /* (4.5.5.2) */
+ unsigned int path_cost; /* (4.5.5.3) */
+ bridge_id_t designated_root; /* (4.5.5.4) */
+ unsigned int designated_cost; /* (4.5.5.5) */
+ bridge_id_t designated_bridge; /* (4.5.5.6) */
+ unsigned short designated_port; /* (4.5.5.7) */
+ unsigned int top_change_ack; /* (4.5.5.8) */
+ unsigned int config_pending; /* (4.5.5.9) */
+ struct device *dev;
+ struct fdb *fdb; /* head of per port fdb chain */
+} Port_data;
+
+
+
+/** types to support timers for this pseudo-implementation. **/
+typedef struct {
+ unsigned int active; /* timer in use. */
+ unsigned int value; /* current value of timer,
+ * counting up. */
+} Timer;
+
+struct fdb {
+ unsigned char ula[6];
+ unsigned char pad[2];
+ unsigned short port;
+ unsigned int timer;
+ unsigned int flags;
+#define FDB_ENT_VALID 0x01
+/* AVL tree of all addresses, sorted by address */
+ short fdb_avl_height;
+ struct fdb *fdb_avl_left;
+ struct fdb *fdb_avl_right;
+/* linked list of addresses for each port */
+ struct fdb *fdb_next;
+};
+
+#define IS_BRIDGED 0x2e
+
+struct br_stat {
+ unsigned int flags;
+ Bridge_data bridge_data;
+ Port_data port_data[No_of_ports];
+};
+
+/* defined flags for br_stat.flags */
+#define BR_UP 0x0001 /* bridging enabled */
+#define BR_DEBUG 0x0002 /* debugging enabled */
+
+struct br_cf {
+ unsigned int cmd;
+ unsigned int arg1;
+ unsigned int arg2;
+};
+
+/* defined cmds */
+#define BRCMD_BRIDGE_ENABLE 1
+#define BRCMD_BRIDGE_DISABLE 2
+#define BRCMD_PORT_ENABLE 3 /* arg1 = port */
+#define BRCMD_PORT_DISABLE 4 /* arg1 = port */
+#define BRCMD_SET_BRIDGE_PRIORITY 5 /* arg1 = priority */
+#define BRCMD_SET_PORT_PRIORITY 6 /* arg1 = port, arg2 = priority */
+#define BRCMD_SET_PATH_COST 7 /* arg1 = port, arg2 = cost */
+#define BRCMD_DISPLAY_FDB 8 /* arg1 = port */
+#define BRCMD_ENABLE_DEBUG 9
+#define BRCMD_DISABLE_DEBUG 10
+
+/* prototypes of all bridging functions... */
+
+void transmit_config(int port_no);
+int root_bridge(void);
+int supersedes_port_info(int port_no, Config_bpdu *config);
+void record_config_information(int port_no, Config_bpdu *config);
+void record_config_timeout_values(Config_bpdu *config);
+void config_bpdu_generation(void);
+int designated_port(int port_no);
+void reply(int port_no);
+void transmit_tcn(void);
+void configuration_update(void);
+void root_selection(void);
+void designated_port_selection(void);
+void become_designated_port(int port_no);
+void port_state_selection(void);
+void make_forwarding(int port_no);
+void topology_change_detection(void);
+void topology_change_acknowledged(void);
+void acknowledge_topology_change(int port_no);
+void make_blocking(int port_no);
+void set_port_state(int port_no, int state);
+void received_config_bpdu(int port_no, Config_bpdu *config);
+void received_tcn_bpdu(int port_no, Tcn_bpdu *tcn);
+void hello_timer_expiry(void);
+void message_age_timer_expiry(int port_no);
+void forward_delay_timer_expiry(int port_no);
+int designated_for_some_port(void);
+void tcn_timer_expiry(void);
+void topology_change_timer_expiry(void);
+void hold_timer_expiry(int port_no);
+void br_init(void);
+void br_init_port(int port_no);
+void enable_port(int port_no);
+void disable_port(int port_no);
+void set_bridge_priority(bridge_id_t *new_bridge_id);
+void set_port_priority(int port_no, unsigned short new_port_id);
+void set_path_cost(int port_no, unsigned short path_cost);
+void start_hello_timer(void);
+void stop_hello_timer(void);
+int hello_timer_expired(void);
+void start_tcn_timer(void);
+void stop_tcn_timer(void);
+int tcn_timer_expired(void);
+void start_topology_change_timer(void);
+void stop_topology_change_timer(void);
+int topology_change_timer_expired(void);
+void start_message_age_timer(int port_no, unsigned short message_age);
+void stop_message_age_timer(int port_no);
+int message_age_timer_expired(int port_no);
+void start_forward_delay_timer(int port_no);
+void stop_forward_delay_timer(int port_no);
+int forward_delay_timer_expired(int port_no);
+void start_hold_timer(int port_no);
+void stop_hold_timer(int port_no);
+int hold_timer_expired(int port_no);
+
+struct fdb *br_avl_find_addr(unsigned char addr[6]);
+int br_avl_insert (struct fdb * new_node);
+int br_avl_remove (struct fdb * node_to_delete);
+
+int send_tcn_bpdu(int port_no, Tcn_bpdu *bpdu);
+int send_config_bpdu(int port_no, Config_bpdu *config_bpdu);
+int find_port(struct device *dev);
+int br_flood(struct sk_buff *skb, int port);
+int br_drop(struct sk_buff *skb);
+int br_learn(struct sk_buff *skb, int port); /* 3.8 */
+
+int br_receive_frame(struct sk_buff *skb); /* 3.5 */
+int br_tx_frame(struct sk_buff *skb);
+int br_ioctl(unsigned int cmd, void *arg);
+
+void free_fdb(struct fdb *);
+struct fdb *get_fdb(void);
+
+/* externs */
+
+extern struct br_stat br_stats;
+
diff --git a/linux/src/include/net/checksum.h b/linux/src/include/net/checksum.h
new file mode 100644
index 0000000..aee4fd4
--- /dev/null
+++ b/linux/src/include/net/checksum.h
@@ -0,0 +1,25 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Checksumming functions for IP, TCP, UDP and so on
+ *
+ * Authors: Jorge Cwik, <jorge@laser.satlink.net>
+ * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
+ * Borrows very liberally from tcp.c and ip.c, see those
+ * files for more names.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _CHECKSUM_H
+#define _CHECKSUM_H
+
+#include <asm/byteorder.h>
+#include <net/ip.h>
+#include <asm/checksum.h>
+
+#endif
diff --git a/linux/src/include/net/datalink.h b/linux/src/include/net/datalink.h
new file mode 100644
index 0000000..44e5699
--- /dev/null
+++ b/linux/src/include/net/datalink.h
@@ -0,0 +1,16 @@
+#ifndef _NET_INET_DATALINK_H_
+#define _NET_INET_DATALINK_H_
+
+struct datalink_proto {
+ unsigned short type_len;
+ unsigned char type[8];
+ const char *string_name;
+ unsigned short header_length;
+ int (*rcvfunc)(struct sk_buff *, struct device *,
+ struct packet_type *);
+ void (*datalink_header)(struct datalink_proto *, struct sk_buff *,
+ unsigned char *);
+ struct datalink_proto *next;
+};
+
+#endif
diff --git a/linux/src/include/net/gc.h b/linux/src/include/net/gc.h
new file mode 100644
index 0000000..0b28c09
--- /dev/null
+++ b/linux/src/include/net/gc.h
@@ -0,0 +1,46 @@
+/*
+ * Interface routines assumed by gc()
+ *
+ * Copyright (C) Barak A. Pearlmutter.
+ * Released under the GPL version 2 or later.
+ *
+ */
+
+typedef struct object *pobj; /* pointer to a guy of the type we gc */
+
+/*
+ * How to mark and unmark objects
+ */
+
+extern void gc_mark(pobj);
+extern void gc_unmark(pobj);
+extern int gc_marked(pobj);
+
+/*
+ * How to count and access an object's children
+ */
+
+extern int n_children(pobj); /* how many children */
+extern pobj child_n(pobj, int); /* child i, numbered 0..n-1 */
+
+/*
+ * How to access the root set
+ */
+
+extern int root_size(void); /* number of things in root set */
+extern pobj root_elt(int); /* element i of root set, numbered 0..n-1 */
+
+/*
+ * How to access the free list
+ */
+
+extern void clear_freelist(void);
+extern void add_to_free_list(pobj);
+
+/*
+ * How to iterate through all objects in memory
+ */
+
+extern int N_OBJS;
+extern pobj obj_number(int);
+
diff --git a/linux/src/include/net/icmp.h b/linux/src/include/net/icmp.h
new file mode 100644
index 0000000..fa770d2
--- /dev/null
+++ b/linux/src/include/net/icmp.h
@@ -0,0 +1,43 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the ICMP module.
+ *
+ * Version: @(#)icmp.h 1.0.4 05/13/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ICMP_H
+#define _ICMP_H
+
+#include <linux/icmp.h>
+#include <linux/skbuff.h>
+
+#include <net/sock.h>
+#include <net/protocol.h>
+
+extern struct icmp_err icmp_err_convert[];
+extern struct icmp_mib icmp_statistics;
+
+extern void icmp_send(struct sk_buff *skb_in, int type, int code,
+ unsigned long info, struct device *dev);
+extern int icmp_rcv(struct sk_buff *skb1, struct device *dev,
+ struct options *opt, __u32 daddr,
+ unsigned short len, __u32 saddr,
+ int redo, struct inet_protocol *protocol);
+extern int icmp_ioctl(struct sock *sk, int cmd,
+ unsigned long arg);
+extern void icmp_init(struct proto_ops *ops);
+
+/* CONFIG_IP_TRANSPARENT_PROXY */
+extern int icmp_chkaddr(struct sk_buff *skb);
+
+#endif /* _ICMP_H */
diff --git a/linux/src/include/net/ip.h b/linux/src/include/net/ip.h
new file mode 100644
index 0000000..5437b3d
--- /dev/null
+++ b/linux/src/include/net/ip.h
@@ -0,0 +1,159 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the IP module.
+ *
+ * Version: @(#)ip.h 1.0.2 05/07/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _IP_H
+#define _IP_H
+
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/ip.h>
+#include <linux/netdevice.h>
+#include <net/route.h>
+
+#ifndef _SNMP_H
+#include <net/snmp.h>
+#endif
+
+#include <net/sock.h> /* struct sock */
+
+/* IP flags. */
+#define IP_CE 0x8000 /* Flag: "Congestion" */
+#define IP_DF 0x4000 /* Flag: "Don't Fragment" */
+#define IP_MF 0x2000 /* Flag: "More Fragments" */
+#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */
+
+#define IP_FRAG_TIME (30 * HZ) /* fragment lifetime */
+
+#ifdef CONFIG_IP_MULTICAST
+extern void ip_mc_dropsocket(struct sock *);
+extern void ip_mc_dropdevice(struct device *dev);
+extern int ip_mc_procinfo(char *, char **, off_t, int, int);
+#endif
+
+#include <net/ip_forward.h>
+
+/* Describe an IP fragment. */
+struct ipfrag
+{
+ int offset; /* offset of fragment in IP datagram */
+ int end; /* last byte of data in datagram */
+ int len; /* length of this fragment */
+ struct sk_buff *skb; /* complete received fragment */
+ unsigned char *ptr; /* pointer into real fragment data */
+ struct ipfrag *next; /* linked list pointers */
+ struct ipfrag *prev;
+};
+
+/*
+ * Describe an entry in the "incomplete datagrams" queue.
+ */
+
+struct ipq
+{
+ unsigned char *mac; /* pointer to MAC header */
+ struct iphdr *iph; /* pointer to IP header */
+ int len; /* total length of original datagram */
+ short ihlen; /* length of the IP header */
+ short maclen; /* length of the MAC header */
+ struct timer_list timer; /* when will this queue expire? */
+ struct ipfrag *fragments; /* linked list of received fragments */
+ struct ipq *next; /* linked list pointers */
+ struct ipq *prev;
+ struct device *dev; /* Device - for icmp replies */
+};
+
+/*
+ * Functions provided by ip.c
+ */
+
+extern void ip_print(const struct iphdr *ip);
+extern int ip_ioctl(struct sock *sk, int cmd, unsigned long arg);
+extern void ip_route_check(__u32 daddr);
+extern int ip_send(struct rtable *rt, struct sk_buff *skb, __u32 daddr, int len, struct device *dev, __u32 saddr);
+extern int ip_build_header(struct sk_buff *skb,
+ __u32 saddr,
+ __u32 daddr,
+ struct device **dev, int type,
+ struct options *opt, int len,
+ int tos,int ttl,struct rtable **rp);
+extern int ip_rcv(struct sk_buff *skb, struct device *dev,
+ struct packet_type *pt);
+extern int ip_options_echo(struct options * dopt, struct options * sopt,
+ __u32 daddr, __u32 saddr,
+ struct sk_buff * skb);
+extern int ip_options_compile(struct options * opt, struct sk_buff * skb);
+extern void ip_send_check(struct iphdr *ip);
+extern int ip_id_count;
+extern void ip_queue_xmit(struct sock *sk,
+ struct device *dev, struct sk_buff *skb,
+ int free);
+extern void ip_init(void);
+extern int ip_build_xmit(struct sock *sk,
+ void getfrag (const void *,
+ __u32,
+ char *,
+ unsigned int,
+ unsigned int),
+ const void *frag,
+ unsigned short int length,
+ __u32 daddr,
+ __u32 saddr,
+ struct options * opt,
+ int flags,
+ int type,
+ int noblock);
+
+extern struct ip_mib ip_statistics;
+
+extern int sysctl_ip_dynaddr;
+int ip_rewrite_addrs(struct sock *sk, struct sk_buff *skb, struct device *dev);
+
+/*
+ * Functions provided by ip_fragment.o
+ */
+
+struct sk_buff *ip_defrag(struct iphdr *iph, struct sk_buff *skb, struct device *dev);
+void ip_fragment(struct sock *sk, struct sk_buff *skb, struct device *dev, int is_frag);
+
+/*
+ * Functions provided by ip_forward.c
+ */
+
+extern int ip_forward(struct sk_buff *skb, struct device *dev, int is_frag, __u32 target_addr);
+extern int sysctl_ip_forward;
+
+
+/*
+ * Functions provided by ip_options.c
+ */
+
+extern void ip_options_build(struct sk_buff *skb, struct options *opt, __u32 daddr, __u32 saddr, int is_frag);
+extern int ip_options_echo(struct options *dopt, struct options *sopt, __u32 daddr, __u32 saddr, struct sk_buff *skb);
+extern void ip_options_fragment(struct sk_buff *skb);
+extern int ip_options_compile(struct options *opt, struct sk_buff *skb);
+
+/*
+ * Functions provided by ip_sockglue.c
+ */
+
+extern int ip_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen);
+extern int ip_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen);
+
+#endif /* _IP_H */
diff --git a/linux/src/include/net/ip_alias.h b/linux/src/include/net/ip_alias.h
new file mode 100644
index 0000000..ee9aa33
--- /dev/null
+++ b/linux/src/include/net/ip_alias.h
@@ -0,0 +1,23 @@
+/*
+ * IP_ALIAS (AF_INET) aliasing definitions.
+ *
+ *
+ * Version: @(#)ip_alias.h 0.50 4/20/97
+ *
+ * Author: Juan Jose Ciarlante, <jjciarla@raiz.uncu.edu.ar>
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#ifndef _IP_ALIAS_H
+#define _IP_ALIAS_H
+
+extern int ip_alias_init(void);
+extern int ip_alias_done(void);
+
+#endif /* _IP_ALIAS_H */
diff --git a/linux/src/include/net/ip_forward.h b/linux/src/include/net/ip_forward.h
new file mode 100644
index 0000000..f1b532f
--- /dev/null
+++ b/linux/src/include/net/ip_forward.h
@@ -0,0 +1,11 @@
+#ifndef __NET_IP_FORWARD_H
+#define __NET_IP_FORWARD_H
+
+#define IPFWD_FRAGMENT 1
+#define IPFWD_LASTFRAG 2
+#define IPFWD_MASQUERADED 4
+#define IPFWD_MULTICASTING 8
+#define IPFWD_MULTITUNNEL 0x10
+#define IPFWD_NOTTLDEC 0x20
+
+#endif
diff --git a/linux/src/include/net/ip_masq.h b/linux/src/include/net/ip_masq.h
new file mode 100644
index 0000000..ccfb646
--- /dev/null
+++ b/linux/src/include/net/ip_masq.h
@@ -0,0 +1,205 @@
+/*
+ * IP masquerading functionality definitions
+ */
+
+#ifndef _IP_MASQ_H
+#define _IP_MASQ_H
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/config.h>
+
+/*
+ * This define affects the number of ports that can be handled
+ * by each of the protocol helper modules.
+ */
+#define MAX_MASQ_APP_PORTS 12
+
+/*
+ * Linux ports don't normally get allocated above 32K.
+ * I used an extra 4K port-space
+ */
+
+#define PORT_MASQ_BEGIN 61000
+#define PORT_MASQ_END (PORT_MASQ_BEGIN+4096)
+
+/*
+ * Default timeouts for masquerade functions The control channels now
+ * expire the same as TCP channels (other than being updated by
+ * packets on their associated data channels.
+ */
+#define MASQUERADE_EXPIRE_TCP 15*60*HZ
+#define MASQUERADE_EXPIRE_TCP_FIN 2*60*HZ
+#define MASQUERADE_EXPIRE_UDP 5*60*HZ
+/*
+ * ICMP can no longer be modified on the fly using an ioctl - this
+ * define is the only way to change the timeouts
+ */
+#define MASQUERADE_EXPIRE_ICMP 125*HZ
+
+#define IP_AUTOFW_EXPIRE 15*HZ
+
+#define IP_MASQ_F_OUT_SEQ 0x01 /* must do output seq adjust */
+#define IP_MASQ_F_IN_SEQ 0x02 /* must do input seq adjust */
+#define IP_MASQ_F_NO_DPORT 0x04 /* no dport set yet */
+#define IP_MASQ_F_NO_DADDR 0x08 /* no daddr yet */
+#define IP_MASQ_F_HASHED 0x10 /* hashed entry */
+#define IP_MASQ_F_SAW_RST 0x20 /* tcp rst pkt seen */
+#define IP_MASQ_F_SAW_FIN_IN 0x40 /* tcp fin pkt seen incoming */
+#define IP_MASQ_F_SAW_FIN_OUT 0x80 /* tcp fin pkt seen outgoing */
+#define IP_MASQ_F_SAW_FIN (IP_MASQ_F_SAW_FIN_IN | \
+ IP_MASQ_F_SAW_FIN_OUT)
+ /* tcp fin pkts seen */
+#define IP_MASQ_F_CONTROL 0x100 /* this is a control channel */
+#define IP_MASQ_F_NO_SPORT 0x200 /* no sport set yet */
+#define IP_MASQ_F_FTP_PASV 0x400 /* ftp PASV command just issued */
+#define IP_MASQ_F_NO_REPLY 0x800 /* no reply yet from outside */
+#define IP_MASQ_F_AFW_PORT 0x1000
+
+#ifdef __KERNEL__
+
+/*
+ * Delta seq. info structure
+ * Each MASQ struct has 2 (output AND input seq. changes).
+ */
+
+struct ip_masq_seq {
+ __u32 init_seq; /* Add delta from this seq */
+ short delta; /* Delta in sequence numbers */
+ short previous_delta; /* Delta in sequence numbers before last resized pkt */
+};
+
+/*
+ * MASQ structure allocated for each masqueraded association
+ */
+struct ip_masq {
+ struct ip_masq *m_link, *s_link; /* hashed link ptrs */
+ struct timer_list timer; /* Expiration timer */
+ __u16 protocol; /* Which protocol are we talking? */
+ __u16 sport, dport, mport; /* src, dst & masq ports */
+ __u32 saddr, daddr, maddr; /* src, dst & masq addresses */
+ struct ip_masq_seq out_seq, in_seq;
+ struct ip_masq_app *app; /* bound ip_masq_app object */
+ void *app_data; /* Application private data */
+ unsigned flags; /* status flags */
+ struct ip_masq *control; /* Corresponding control connection */
+};
+
+/*
+ * timeout values
+ */
+
+struct ip_fw_masq {
+ int tcp_timeout;
+ int tcp_fin_timeout;
+ int udp_timeout;
+};
+
+extern struct ip_fw_masq *ip_masq_expire;
+
+/*
+ * [0]: UDP free_ports
+ * [1]: TCP free_ports
+ * [2]: ICMP free ids
+ */
+
+extern int ip_masq_free_ports[3];
+
+/*
+ * ip_masq initializer (registers symbols and /proc/net entries)
+ */
+extern int ip_masq_init(void);
+
+/*
+ * functions called from ip layer
+ */
+extern int ip_fw_masquerade(struct sk_buff **, struct device *);
+extern int ip_fw_masq_icmp(struct sk_buff **, struct device *);
+extern int ip_fw_demasquerade(struct sk_buff **, struct device *);
+
+/*
+ * ip_masq obj creation/deletion functions.
+ */
+extern struct ip_masq *ip_masq_new(struct device *dev, int proto, __u32 saddr, __u16 sport, __u32 daddr, __u16 dport, unsigned flags);
+extern void ip_masq_set_expire(struct ip_masq *ms, unsigned long tout);
+
+#ifdef CONFIG_IP_MASQUERADE_IPAUTOFW
+extern void ip_autofw_expire(unsigned long data);
+#endif
+
+/*
+ *
+ * IP_MASQ_APP: IP application masquerading definitions
+ *
+ */
+
+struct ip_masq_app
+{
+ struct ip_masq_app *next;
+ char *name; /* name of application proxy */
+ unsigned type; /* type = proto<<16 | port (host byte order)*/
+ int n_attach;
+ int (*masq_init_1) /* ip_masq initializer */
+ (struct ip_masq_app *, struct ip_masq *);
+ int (*masq_done_1) /* ip_masq fin. */
+ (struct ip_masq_app *, struct ip_masq *);
+ int (*pkt_out) /* output (masquerading) hook */
+ (struct ip_masq_app *, struct ip_masq *, struct sk_buff **, struct device *);
+ int (*pkt_in) /* input (demasq) hook */
+ (struct ip_masq_app *, struct ip_masq *, struct sk_buff **, struct device *);
+};
+
+/*
+ * ip_masq_app initializer
+ */
+extern int ip_masq_app_init(void);
+
+/*
+ * ip_masq_app object registration functions (port: host byte order)
+ */
+extern int register_ip_masq_app(struct ip_masq_app *mapp, unsigned short proto, __u16 port);
+extern int unregister_ip_masq_app(struct ip_masq_app *mapp);
+
+/*
+ * get ip_masq_app obj by proto,port(net_byte_order)
+ */
+extern struct ip_masq_app * ip_masq_app_get(unsigned short proto, __u16 port);
+
+/*
+ * ip_masq TO ip_masq_app (un)binding functions.
+ */
+extern struct ip_masq_app * ip_masq_bind_app(struct ip_masq *ms);
+extern int ip_masq_unbind_app(struct ip_masq *ms);
+
+/*
+ * output and input app. masquerading hooks.
+ *
+ */
+extern int ip_masq_app_pkt_out(struct ip_masq *, struct sk_buff **skb_p, struct device *dev);
+extern int ip_masq_app_pkt_in(struct ip_masq *, struct sk_buff **skb_p, struct device *dev);
+
+/*
+ * service routine(s).
+ */
+extern struct ip_masq * ip_masq_out_get_2(int protocol, __u32 s_addr, __u16 s_port, __u32 d_addr, __u16 d_port);
+extern struct ip_masq * ip_masq_in_get_2(int protocol, __u32 s_addr, __u16 s_port, __u32 d_addr, __u16 d_port);
+
+/*
+ * /proc/net entry
+ */
+extern int ip_masq_app_getinfo(char *buffer, char **start, off_t offset, int length, int dummy);
+
+/*
+ * skb_replace function used by "client" modules to replace
+ * a segment of skb.
+ */
+extern struct sk_buff * ip_masq_skb_replace(struct sk_buff *skb, int pri, char *o_buf, int o_len, char *n_buf, int n_len);
+
+#ifdef CONFIG_IP_MASQUERADE_IPAUTOFW
+extern struct ip_autofw * ip_autofw_hosts;
+#endif /* CONFIG_IP_MASQUERADE_IPAUTOFW */
+
+#endif /* __KERNEL__ */
+
+#endif /* _IP_MASQ_H */
diff --git a/linux/src/include/net/ipip.h b/linux/src/include/net/ipip.h
new file mode 100644
index 0000000..bba1492
--- /dev/null
+++ b/linux/src/include/net/ipip.h
@@ -0,0 +1,4 @@
+extern int ipip_rcv(struct sk_buff *skb, struct device *dev, struct options *opt,
+ __u32 daddr, unsigned short len, __u32 saddr,
+ int redo, struct inet_protocol *protocol);
+
diff --git a/linux/src/include/net/ipx.h b/linux/src/include/net/ipx.h
new file mode 100644
index 0000000..13d3dbb
--- /dev/null
+++ b/linux/src/include/net/ipx.h
@@ -0,0 +1,88 @@
+
+/*
+ * The following information is in its entirety obtained from:
+ *
+ * Novell 'IPX Router Specification' Version 1.10
+ * Part No. 107-000029-001
+ *
+ * Which is available from ftp.novell.com
+ */
+
+#ifndef _NET_INET_IPX_H_
+#define _NET_INET_IPX_H_
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <net/datalink.h>
+#include <linux/ipx.h>
+
+/* #define CONFIG_IPX_INTERN 1 */
+
+typedef struct
+{
+ unsigned long net;
+ unsigned char node[IPX_NODE_LEN];
+ unsigned short sock;
+} ipx_address;
+
+#define ipx_broadcast_node "\377\377\377\377\377\377"
+#define ipx_this_node "\0\0\0\0\0\0"
+
+typedef struct ipx_packet
+{
+ unsigned short ipx_checksum;
+#define IPX_NO_CHECKSUM 0xFFFF
+ unsigned short ipx_pktsize;
+ unsigned char ipx_tctrl;
+ unsigned char ipx_type;
+#define IPX_TYPE_UNKNOWN 0x00
+#define IPX_TYPE_RIP 0x01 /* may also be 0 */
+#define IPX_TYPE_SAP 0x04 /* may also be 0 */
+#define IPX_TYPE_SPX 0x05 /* Not yet implemented */
+#define IPX_TYPE_NCP 0x11 /* $lots for docs on this (SPIT) */
+#define IPX_TYPE_PPROP 0x14 /* complicated flood fill brdcast [Not supported] */
+ ipx_address ipx_dest __attribute__ ((packed));
+ ipx_address ipx_source __attribute__ ((packed));
+} ipx_packet;
+
+
+typedef struct sock ipx_socket;
+
+#include <net/ipxcall.h>
+extern int ipx_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt);
+extern void ipxrtr_device_down(struct device *dev);
+
+typedef struct ipx_interface {
+ /* IPX address */
+ unsigned long if_netnum;
+ unsigned char if_node[IPX_NODE_LEN];
+
+ /* physical device info */
+ struct device *if_dev;
+ struct datalink_proto *if_dlink;
+ unsigned short if_dlink_type;
+
+ /* socket support */
+ unsigned short if_sknum;
+ ipx_socket *if_sklist;
+
+ /* administrative overhead */
+ int if_ipx_offset;
+ unsigned char if_internal;
+ unsigned char if_primary;
+
+ struct ipx_interface *if_next;
+} ipx_interface;
+
+typedef struct ipx_route {
+ unsigned long ir_net;
+ ipx_interface *ir_intrfc;
+ unsigned char ir_routed;
+ unsigned char ir_router_node[IPX_NODE_LEN];
+ struct ipx_route *ir_next;
+} ipx_route;
+
+#define IPX_MIN_EPHEMERAL_SOCKET 0x4000
+#define IPX_MAX_EPHEMERAL_SOCKET 0x7fff
+
+#endif
diff --git a/linux/src/include/net/ipxcall.h b/linux/src/include/net/ipxcall.h
new file mode 100644
index 0000000..eb5bd2b
--- /dev/null
+++ b/linux/src/include/net/ipxcall.h
@@ -0,0 +1,2 @@
+/* Separate to keep compilation of protocols.c simpler */
+extern void ipx_proto_init(struct net_proto *pro);
diff --git a/linux/src/include/net/netlink.h b/linux/src/include/net/netlink.h
new file mode 100644
index 0000000..0d7cf3f
--- /dev/null
+++ b/linux/src/include/net/netlink.h
@@ -0,0 +1,32 @@
+#ifndef __NET_NETLINK_H
+#define __NET_NETLINK_H
+
+#define NET_MAJOR 36 /* Major 18 is reserved for networking */
+#define MAX_LINKS 11 /* 18,0 for route updates, 18,1 for SKIP, 18,2 debug tap 18,3 PPP reserved */
+ /* 4-7 are psi0-psi3 8 is arpd 9 is ppp */
+ /* 10 is for IPSEC <John Ioannidis> */
+#define MAX_QBYTES 32768 /* Maximum bytes in the queue */
+
+#include <linux/config.h>
+
+extern int netlink_attach(int unit, int (*function)(struct sk_buff *skb));
+extern int netlink_donothing(struct sk_buff *skb);
+extern void netlink_detach(int unit);
+extern int netlink_post(int unit, struct sk_buff *skb);
+extern int init_netlink(void);
+
+#define NETLINK_ROUTE 0 /* Routing/device hook */
+#define NETLINK_SKIP 1 /* Reserved for ENskip */
+#define NETLINK_USERSOCK 2 /* Reserved for user mode socket protocols */
+#define NETLINK_FIREWALL 3 /* Firewalling hook */
+#define NETLINK_PSI 4 /* PSI devices - 4 to 7 */
+#define NETLINK_ARPD 8
+#define NETLINK_NET_PPP 9 /* Non tty PPP devices */
+#define NETLINK_IPSEC 10 /* IPSEC */
+
+#ifdef CONFIG_RTNETLINK
+extern void ip_netlink_msg(unsigned long, __u32, __u32, __u32, short, short, char *);
+#else
+#define ip_netlink_msg(a,b,c,d,e,f,g)
+#endif
+#endif
diff --git a/linux/src/include/net/netrom.h b/linux/src/include/net/netrom.h
new file mode 100644
index 0000000..0920f8d
--- /dev/null
+++ b/linux/src/include/net/netrom.h
@@ -0,0 +1,166 @@
+/*
+ * Declarations of NET/ROM type objects.
+ *
+ * Jonathan Naylor G4KLX 9/4/95
+ */
+
+#ifndef _NETROM_H
+#define _NETROM_H
+#include <linux/netrom.h>
+
+#define NR_SLOWHZ 10 /* Run timing at 1/10 second */
+
+#define NR_NETWORK_LEN 15
+#define NR_TRANSPORT_LEN 5
+
+#define NR_PROTO_IP 0x0C
+
+#define NR_PROTOEXT 0x00
+#define NR_CONNREQ 0x01
+#define NR_CONNACK 0x02
+#define NR_DISCREQ 0x03
+#define NR_DISCACK 0x04
+#define NR_INFO 0x05
+#define NR_INFOACK 0x06
+
+#define NR_CHOKE_FLAG 0x80
+#define NR_NAK_FLAG 0x40
+#define NR_MORE_FLAG 0x20
+
+/* Define Link State constants. */
+
+#define NR_STATE_0 0
+#define NR_STATE_1 1
+#define NR_STATE_2 2
+#define NR_STATE_3 3
+
+#define NR_COND_ACK_PENDING 0x01
+#define NR_COND_REJECT 0x02
+#define NR_COND_PEER_RX_BUSY 0x04
+#define NR_COND_OWN_RX_BUSY 0x08
+
+#define NR_DEFAULT_T1 (120 * NR_SLOWHZ) /* Outstanding frames - 120 seconds */
+#define NR_DEFAULT_T2 (5 * NR_SLOWHZ) /* Response delay - 5 seconds */
+#define NR_DEFAULT_N2 3 /* Number of Retries - 3 */
+#define NR_DEFAULT_T4 (180 * NR_SLOWHZ) /* Busy Delay - 180 seconds */
+#define NR_DEFAULT_WINDOW 4 /* Default Window Size - 4 */
+#define NR_DEFAULT_OBS 6 /* Default Obsolescence Count - 6 */
+#define NR_DEFAULT_QUAL 10 /* Default Neighbour Quality - 10 */
+#define NR_DEFAULT_TTL 16 /* Default Time To Live - 16 */
+#define NR_DEFAULT_ROUTING 1 /* Is routing enabled ? */
+#define NR_DEFAULT_FAILS 2 /* Link fails until route fails */
+
+#define NR_MODULUS 256
+#define NR_MAX_WINDOW_SIZE 127 /* Maximum Window Allowable - 127 */
+#define NR_MAX_PACKET_SIZE 236 /* Maximum Packet Length - 236 */
+
+typedef struct {
+ ax25_address user_addr, source_addr, dest_addr;
+ struct device *device;
+ unsigned char my_index, my_id;
+ unsigned char your_index, your_id;
+ unsigned char state, condition, bpqext, window;
+ unsigned short vs, vr, va, vl;
+ unsigned char n2, n2count;
+ unsigned short t1, t2, t4;
+ unsigned short t1timer, t2timer, t4timer;
+ unsigned short fraglen;
+ struct sk_buff_head ack_queue;
+ struct sk_buff_head reseq_queue;
+ struct sk_buff_head frag_queue;
+ struct sock *sk; /* Backlink to socket */
+} nr_cb;
+
+struct nr_neigh {
+ struct nr_neigh *next;
+ ax25_address callsign;
+ ax25_digi *digipeat;
+ ax25_cb *ax25;
+ struct device *dev;
+ unsigned char quality;
+ unsigned char locked;
+ unsigned short count;
+ unsigned int number;
+ unsigned char failed;
+};
+
+struct nr_route {
+ unsigned char quality;
+ unsigned char obs_count;
+ struct nr_neigh *neighbour;
+};
+
+struct nr_node {
+ struct nr_node *next;
+ ax25_address callsign;
+ char mnemonic[7];
+ unsigned char which;
+ unsigned char count;
+ struct nr_route routes[3];
+};
+
+/* af_netrom.c */
+extern int sysctl_netrom_default_path_quality;
+extern int sysctl_netrom_obsolescence_count_initialiser;
+extern int sysctl_netrom_network_ttl_initialiser;
+extern int sysctl_netrom_transport_timeout;
+extern int sysctl_netrom_transport_maximum_tries;
+extern int sysctl_netrom_transport_acknowledge_delay;
+extern int sysctl_netrom_transport_busy_delay;
+extern int sysctl_netrom_transport_requested_window_size;
+extern int sysctl_netrom_routing_control;
+extern int sysctl_netrom_link_fails_count;
+extern int nr_rx_frame(struct sk_buff *, struct device *);
+extern void nr_destroy_socket(struct sock *);
+
+/* nr_dev.c */
+extern int nr_rx_ip(struct sk_buff *, struct device *);
+extern int nr_init(struct device *);
+
+#include <net/nrcall.h>
+
+/* nr_in.c */
+extern int nr_process_rx_frame(struct sock *, struct sk_buff *);
+
+/* nr_out.c */
+extern void nr_output(struct sock *, struct sk_buff *);
+extern void nr_send_nak_frame(struct sock *);
+extern void nr_kick(struct sock *);
+extern void nr_transmit_buffer(struct sock *, struct sk_buff *);
+extern void nr_establish_data_link(struct sock *);
+extern void nr_enquiry_response(struct sock *);
+extern void nr_check_iframes_acked(struct sock *, unsigned short);
+
+/* nr_route.c */
+extern void nr_rt_device_down(struct device *);
+extern struct device *nr_dev_first(void);
+extern struct device *nr_dev_get(ax25_address *);
+extern int nr_rt_ioctl(unsigned int, void *);
+extern void nr_link_failed(ax25_cb *, int);
+extern int nr_route_frame(struct sk_buff *, ax25_cb *);
+extern int nr_nodes_get_info(char *, char **, off_t, int, int);
+extern int nr_neigh_get_info(char *, char **, off_t, int, int);
+extern void nr_rt_free(void);
+
+/* nr_subr.c */
+extern void nr_clear_queues(struct sock *);
+extern void nr_frames_acked(struct sock *, unsigned short);
+extern void nr_requeue_frames(struct sock *);
+extern int nr_validate_nr(struct sock *, unsigned short);
+extern int nr_in_rx_window(struct sock *, unsigned short);
+extern void nr_write_internal(struct sock *, int);
+extern void nr_transmit_dm(struct sk_buff *, int);
+
+/* nr_timer.c */
+extern void nr_set_timer(struct sock *);
+
+/* sysctl_net_netrom.c */
+extern void nr_register_sysctl(void);
+extern void nr_unregister_sysctl(void);
+
+/* nr_loopback.c */
+extern void nr_loopback_init(void);
+extern void nr_loopback_clear(void);
+extern int nr_loopback_queue(struct sk_buff *);
+
+#endif
diff --git a/linux/src/include/net/nrcall.h b/linux/src/include/net/nrcall.h
new file mode 100644
index 0000000..09ee699
--- /dev/null
+++ b/linux/src/include/net/nrcall.h
@@ -0,0 +1,2 @@
+/* Separate to keep compilation of protocols.c simpler */
+extern void nr_proto_init(struct net_proto *pro);
diff --git a/linux/src/include/net/p8022.h b/linux/src/include/net/p8022.h
new file mode 100644
index 0000000..03d7c3d
--- /dev/null
+++ b/linux/src/include/net/p8022.h
@@ -0,0 +1,7 @@
+#ifndef _NET_P8022_H
+#define _NET_P8022_H
+
+extern struct datalink_proto *register_8022_client(unsigned char type, int (*rcvfunc)(struct sk_buff *, struct device *, struct packet_type *));
+extern void unregister_8022_client(unsigned char type);
+
+#endif
diff --git a/linux/src/include/net/p8022call.h b/linux/src/include/net/p8022call.h
new file mode 100644
index 0000000..14f0c2c
--- /dev/null
+++ b/linux/src/include/net/p8022call.h
@@ -0,0 +1,2 @@
+/* Separate to keep compilation of Space.c simpler */
+extern void p8022_proto_init(struct net_proto *);
diff --git a/linux/src/include/net/p8022tr.h b/linux/src/include/net/p8022tr.h
new file mode 100644
index 0000000..f4231ec
--- /dev/null
+++ b/linux/src/include/net/p8022tr.h
@@ -0,0 +1,8 @@
+#ifndef _NET_P8022TR_H
+#define _NET_P8022TR_H
+
+extern struct datalink_proto *register_8022tr_client(unsigned char type, int (*rcvfunc)(struct sk_buff *, struct device *, struct packet_type *));
+extern void unregister_8022tr_client(unsigned char type);
+
+#endif
+
diff --git a/linux/src/include/net/p8022trcall.h b/linux/src/include/net/p8022trcall.h
new file mode 100644
index 0000000..3ce6f3c
--- /dev/null
+++ b/linux/src/include/net/p8022trcall.h
@@ -0,0 +1,3 @@
+/* Separate to keep compilation of Space.c simpler */
+extern void p8022tr_proto_init(struct net_proto *);
+
diff --git a/linux/src/include/net/protocol.h b/linux/src/include/net/protocol.h
new file mode 100644
index 0000000..c21f845
--- /dev/null
+++ b/linux/src/include/net/protocol.h
@@ -0,0 +1,55 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the protocol dispatcher.
+ *
+ * Version: @(#)protocol.h 1.0.2 05/07/93
+ *
+ * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Changes:
+ * Alan Cox : Added a name field and a frag handler
+ * field for later.
+ * Alan Cox : Cleaned up, and sorted types.
+ */
+
+#ifndef _PROTOCOL_H
+#define _PROTOCOL_H
+
+#define MAX_INET_PROTOS 32 /* Must be a power of 2 */
+
+
+/* This is used to register protocols. */
+struct inet_protocol {
+ int (*handler)(struct sk_buff *skb, struct device *dev,
+ struct options *opt, __u32 daddr,
+ unsigned short len, __u32 saddr,
+ int redo, struct inet_protocol *protocol);
+ void (*err_handler)(int type, int code, unsigned char *buff,
+ __u32 daddr,
+ __u32 saddr,
+ struct inet_protocol *protocol, int len);
+ struct inet_protocol *next;
+ unsigned char protocol;
+ unsigned char copy:1;
+ void *data;
+ const char *name;
+};
+
+
+extern struct inet_protocol *inet_protocol_base;
+extern struct inet_protocol *inet_protos[MAX_INET_PROTOS];
+
+
+extern void inet_add_protocol(struct inet_protocol *prot);
+extern int inet_del_protocol(struct inet_protocol *prot);
+
+
+#endif /* _PROTOCOL_H */
diff --git a/linux/src/include/net/psnap.h b/linux/src/include/net/psnap.h
new file mode 100644
index 0000000..49a68f7
--- /dev/null
+++ b/linux/src/include/net/psnap.h
@@ -0,0 +1,7 @@
+#ifndef _NET_PSNAP_H
+#define _NET_PSNAP_H
+
+extern struct datalink_proto *register_snap_client(unsigned char *desc, int (*rcvfunc)(struct sk_buff *, struct device *, struct packet_type *));
+extern void unregister_snap_client(unsigned char *desc);
+
+#endif
diff --git a/linux/src/include/net/psnapcall.h b/linux/src/include/net/psnapcall.h
new file mode 100644
index 0000000..9da5763
--- /dev/null
+++ b/linux/src/include/net/psnapcall.h
@@ -0,0 +1,2 @@
+/* Separate to keep compilation of Space.c simpler */
+extern void snap_proto_init(struct net_proto *);
diff --git a/linux/src/include/net/rarp.h b/linux/src/include/net/rarp.h
new file mode 100644
index 0000000..7bfb08e
--- /dev/null
+++ b/linux/src/include/net/rarp.h
@@ -0,0 +1,12 @@
+/* linux/net/inet/rarp.h */
+#ifndef _RARP_H
+#define _RARP_H
+
+extern int rarp_ioctl(unsigned int cmd, void *arg);
+extern int rarp_get_info(char *buffer,
+ char **start,
+ off_t offset,
+ int length,
+ int dummy);
+#endif /* _RARP_H */
+
diff --git a/linux/src/include/net/raw.h b/linux/src/include/net/raw.h
new file mode 100644
index 0000000..5b2f97f
--- /dev/null
+++ b/linux/src/include/net/raw.h
@@ -0,0 +1,44 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the RAW-IP module.
+ *
+ * Version: @(#)raw.h 1.0.2 05/07/93
+ *
+ * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _RAW_H
+#define _RAW_H
+
+
+extern struct proto raw_prot;
+
+
+extern void raw_err(int type, int code, unsigned char *header, __u32 daddr,
+ __u32 saddr, struct inet_protocol *protocol);
+extern int raw_recvfrom(struct sock *sk, unsigned char *to,
+ int len, int noblock, unsigned flags,
+ struct sockaddr_in *sin, int *addr_len);
+extern int raw_read(struct sock *sk, unsigned char *buff,
+ int len, int noblock, unsigned flags);
+extern int raw_rcv(struct sock *, struct sk_buff *, struct device *,
+ __u32, __u32);
+
+/* Note: v4 ICMP wants to get at this stuff, if you change the
+ * hashing mechanism, make sure you update icmp.c as well.
+ */
+#define RAWV4_HTABLE_SIZE MAX_INET_PROTOS
+extern struct sock *raw_v4_htable[RAWV4_HTABLE_SIZE];
+
+
+extern struct sock *raw_v4_lookup(struct sock *sk, unsigned short num,
+ unsigned long raddr, unsigned long laddr);
+
+#endif /* _RAW_H */
diff --git a/linux/src/include/net/rose.h b/linux/src/include/net/rose.h
new file mode 100644
index 0000000..e868e51
--- /dev/null
+++ b/linux/src/include/net/rose.h
@@ -0,0 +1,233 @@
+/*
+ * Declarations of Rose type objects.
+ *
+ * Jonathan Naylor G4KLX 25/8/96
+ */
+
+#ifndef _ROSE_H
+#define _ROSE_H
+#include <linux/rose.h>
+
+#define ROSE_SLOWHZ 10 /* Run timing at 1/10 second */
+
+#define ROSE_ADDR_LEN 5
+
+#define ROSE_MIN_LEN 3
+
+#define ROSE_GFI 0x10
+#define ROSE_Q_BIT 0x80
+#define ROSE_D_BIT 0x40
+#define ROSE_M_BIT 0x10
+#define M_BIT 0x10
+
+#define ROSE_CALL_REQUEST 0x0B
+#define ROSE_CALL_ACCEPTED 0x0F
+#define ROSE_CLEAR_REQUEST 0x13
+#define ROSE_CLEAR_CONFIRMATION 0x17
+#define ROSE_DATA 0x00
+#define ROSE_INTERRUPT 0x23
+#define ROSE_INTERRUPT_CONFIRMATION 0x27
+#define ROSE_RR 0x01
+#define ROSE_RNR 0x05
+#define ROSE_REJ 0x09
+#define ROSE_RESET_REQUEST 0x1B
+#define ROSE_RESET_CONFIRMATION 0x1F
+#define ROSE_REGISTRATION_REQUEST 0xF3
+#define ROSE_REGISTRATION_CONFIRMATION 0xF7
+#define ROSE_RESTART_REQUEST 0xFB
+#define ROSE_RESTART_CONFIRMATION 0xFF
+#define ROSE_DIAGNOSTIC 0xF1
+#define ROSE_ILLEGAL 0xFD
+
+/* Define Link State constants. */
+
+#define ROSE_STATE_0 0 /* Ready */
+#define ROSE_STATE_1 1 /* Awaiting Call Accepted */
+#define ROSE_STATE_2 2 /* Awaiting Clear Confirmation */
+#define ROSE_STATE_3 3 /* Data Transfer */
+#define ROSE_STATE_4 4 /* Awaiting Reset Confirmation */
+#define ROSE_STATE_5 5 /* Deferred Call Acceptance */
+
+#define ROSE_DEFAULT_T0 (180 * ROSE_SLOWHZ) /* Default T10 T20 value */
+#define ROSE_DEFAULT_T1 (200 * ROSE_SLOWHZ) /* Default T11 T21 value */
+#define ROSE_DEFAULT_T2 (180 * ROSE_SLOWHZ) /* Default T12 T22 value */
+#define ROSE_DEFAULT_T3 (180 * ROSE_SLOWHZ) /* Default T13 T23 value */
+#define ROSE_DEFAULT_HB (5 * ROSE_SLOWHZ) /* Default Holdback value */
+#define ROSE_DEFAULT_ROUTING 1 /* Default routing flag */
+#define ROSE_DEFAULT_FAIL_TIMEOUT (120 * ROSE_SLOWHZ) /* Time until link considered usable */
+#define ROSE_DEFAULT_MAXVC 50 /* Maximum number of VCs per neighbour */
+#define ROSE_DEFAULT_WINDOW_SIZE 7 /* Default window value */
+
+#define ROSE_MODULUS 8
+#define ROSE_MAX_PACKET_SIZE 251 /* Maximum Packet Size */
+
+#define ROSE_MAX_WINDOW_LEN ((AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 300) * 7)
+
+#define ROSE_COND_ACK_PENDING 0x01
+#define ROSE_COND_PEER_RX_BUSY 0x02
+#define ROSE_COND_OWN_RX_BUSY 0x04
+
+#define FAC_NATIONAL 0x00
+#define FAC_CCITT 0x0F
+
+#define FAC_NATIONAL_RAND 0x7F
+#define FAC_NATIONAL_FLAGS 0x3F
+#define FAC_NATIONAL_DEST_DIGI 0xE9
+#define FAC_NATIONAL_SRC_DIGI 0xEB
+#define FAC_NATIONAL_FAIL_CALL 0xED
+#define FAC_NATIONAL_FAIL_ADD 0xEE
+#define FAC_NATIONAL_DIGIS 0xEF
+
+#define FAC_CCITT_DEST_NSAP 0xC9
+#define FAC_CCITT_SRC_NSAP 0xCB
+
+struct rose_neigh {
+ struct rose_neigh *next;
+ ax25_address callsign;
+ ax25_digi *digipeat;
+ ax25_cb *ax25;
+ struct device *dev;
+ unsigned short count;
+ unsigned short use;
+ unsigned int number;
+ char restarted;
+ char dce_mode;
+ struct sk_buff_head queue;
+ unsigned short t0timer, ftimer;
+ struct timer_list timer;
+};
+
+#define ROSE_MAX_ALTERNATE 3
+struct rose_node {
+ struct rose_node *next;
+ rose_address address;
+ unsigned short mask;
+ unsigned char count;
+ struct rose_neigh *neighbour[ROSE_MAX_ALTERNATE];
+};
+
+typedef struct {
+ unsigned int lci;
+ struct rose_neigh *neigh;
+ unsigned short vs, vr, va, vl;
+ unsigned short pending;
+ unsigned char state, condition;
+ struct timer_list timer;
+} rose_tr;
+
+struct rose_route {
+ struct rose_route *next;
+ rose_address src_addr, dest_addr;
+ ax25_address src_call, dest_call;
+ rose_tr tr1, tr2;
+ unsigned int rand;
+};
+
+typedef struct {
+ rose_address source_addr, dest_addr;
+ ax25_address source_call, dest_call;
+ unsigned char source_ndigis, dest_ndigis;
+ ax25_address source_digis[ROSE_MAX_DIGIS];
+ ax25_address dest_digis[ROSE_MAX_DIGIS];
+ struct rose_neigh *neighbour;
+ struct device *device;
+ unsigned int lci, rand;
+ unsigned char state, condition, qbitincl, defer;
+ unsigned char cause, diagnostic;
+ unsigned short vs, vr, va, vl;
+ unsigned short timer;
+ unsigned short t1, t2, t3, hb;
+#ifdef M_BIT
+ unsigned short fraglen;
+ struct sk_buff_head frag_queue;
+#endif
+ struct sk_buff_head ack_queue;
+ struct rose_facilities_struct facilities;
+ struct sock *sk; /* Backlink to socket */
+} rose_cb;
+
+/* af_rose.c */
+extern ax25_address rose_callsign;
+extern int sysctl_rose_restart_request_timeout;
+extern int sysctl_rose_call_request_timeout;
+extern int sysctl_rose_reset_request_timeout;
+extern int sysctl_rose_clear_request_timeout;
+extern int sysctl_rose_ack_hold_back_timeout;
+extern int sysctl_rose_routing_control;
+extern int sysctl_rose_link_fail_timeout;
+extern int sysctl_rose_maximum_vcs;
+extern int sysctl_rose_window_size;
+extern int rosecmp(rose_address *, rose_address *);
+extern int rosecmpm(rose_address *, rose_address *, unsigned short);
+extern char *rose2asc(rose_address *);
+extern void rose_kill_by_neigh(struct rose_neigh *);
+extern struct sock *rose_find_socket(unsigned int, struct rose_neigh *);
+extern unsigned int rose_new_lci(struct rose_neigh *);
+extern int rose_rx_call_request(struct sk_buff *, struct device *, struct rose_neigh *, unsigned int);
+extern void rose_destroy_socket(struct sock *);
+
+/* rose_dev.c */
+extern int rose_rx_ip(struct sk_buff *, struct device *);
+extern int rose_init(struct device *);
+
+#include <net/rosecall.h>
+
+/* rose_in.c */
+extern int rose_process_rx_frame(struct sock *, struct sk_buff *);
+
+/* rose_link.c */
+extern void rose_link_set_timer(struct rose_neigh *);
+extern void rose_link_rx_restart(struct sk_buff *, struct rose_neigh *, unsigned short);
+extern void rose_transmit_restart_request(struct rose_neigh *);
+extern void rose_transmit_restart_confirmation(struct rose_neigh *);
+extern void rose_transmit_diagnostic(struct rose_neigh *, unsigned char);
+extern void rose_transmit_clear_request(struct rose_neigh *, unsigned int, unsigned char, unsigned char);
+extern void rose_transmit_link(struct sk_buff *, struct rose_neigh *);
+
+/* rose_loopback.c */
+extern void rose_loopback_init(void);
+extern void rose_loopback_clear(void);
+extern int rose_loopback_queue(struct sk_buff *, struct rose_neigh *);
+
+/* rose_out.c */
+extern void rose_kick(struct sock *);
+extern void rose_enquiry_response(struct sock *);
+
+/* rose_route.c */
+extern void rose_rt_device_down(struct device *);
+extern void rose_link_device_down(struct device *);
+extern void rose_clean_neighbour(struct rose_neigh *);
+extern struct device *rose_dev_first(void);
+extern struct device *rose_dev_get(rose_address *);
+extern struct rose_route *rose_route_free_lci(unsigned int, struct rose_neigh *);
+extern struct device *rose_ax25_dev_get(char *);
+extern struct rose_neigh *rose_get_neigh(rose_address *, unsigned char *, unsigned char *);
+extern int rose_rt_ioctl(unsigned int, void *);
+extern void rose_link_failed(ax25_cb *, int);
+extern int rose_route_frame(struct sk_buff *, ax25_cb *);
+extern int rose_nodes_get_info(char *, char **, off_t, int, int);
+extern int rose_neigh_get_info(char *, char **, off_t, int, int);
+extern int rose_routes_get_info(char *, char **, off_t, int, int);
+extern void rose_rt_free(void);
+
+/* rose_subr.c */
+extern void rose_clear_queues(struct sock *);
+extern void rose_frames_acked(struct sock *, unsigned short);
+extern int rose_validate_nr(struct sock *, unsigned short);
+extern void rose_write_internal(struct sock *, int);
+extern int rose_decode(struct sk_buff *, int *, int *, int *, int *, int *);
+extern int rose_parse_facilities(unsigned char *, struct rose_facilities_struct *);
+extern int rose_create_facilities(unsigned char *, rose_cb *);
+
+/* rose_timer.c */
+extern void rose_set_timer(struct sock *);
+
+/* sysctl_net_rose.c */
+extern void rose_register_sysctl(void);
+extern void rose_unregister_sysctl(void);
+
+/* rose_transit.c */
+void rose_transit(struct sk_buff *, rose_tr *, rose_tr *);
+void rose_init_transit(rose_tr *, unsigned int, struct rose_neigh *);
+
+#endif
diff --git a/linux/src/include/net/rosecall.h b/linux/src/include/net/rosecall.h
new file mode 100644
index 0000000..5bbe69c
--- /dev/null
+++ b/linux/src/include/net/rosecall.h
@@ -0,0 +1,2 @@
+/* Separate to keep compilation of protocols.c simpler */
+extern void rose_proto_init(struct net_proto *pro);
diff --git a/linux/src/include/net/route.h b/linux/src/include/net/route.h
new file mode 100644
index 0000000..2af1a41
--- /dev/null
+++ b/linux/src/include/net/route.h
@@ -0,0 +1,189 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the IP router.
+ *
+ * Version: @(#)route.h 1.0.4 05/27/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Fixes:
+ * Alan Cox : Reformatted. Added ip_rt_local()
+ * Alan Cox : Support for TCP parameters.
+ * Alexey Kuznetsov: Major changes for new routing code.
+ * Elliot Poger : Added support for SO_BINDTODEVICE.
+ * Wolfgang Walter,
+ * Daniel Ryde,
+ * Ingo Molinar : fixed bug in ip_rt_put introduced
+ * by SO_BINDTODEVICE support causing
+ * a memory leak
+ *
+ * FIXME:
+ * Make atomic ops more generic and hide them in asm/...
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ROUTE_H
+#define _ROUTE_H
+
+#include <linux/config.h>
+
+/*
+ * 0 - no debugging messages
+ * 1 - rare events and bugs situations (default)
+ * 2 - trace mode.
+ */
+#define RT_CACHE_DEBUG 0
+
+#define RT_HASH_DIVISOR 256
+#define RT_CACHE_SIZE_MAX 256
+
+#define RTZ_HASH_DIVISOR 256
+
+#if RT_CACHE_DEBUG >= 2
+#define RTZ_HASHING_LIMIT 0
+#else
+#define RTZ_HASHING_LIMIT 16
+#endif
+
+/*
+ * Maximal time to live for unused entry.
+ */
+#define RT_CACHE_TIMEOUT (HZ*300)
+
+/*
+ * Prevents LRU trashing, entries considered equivalent,
+ * if the difference between last use times is less then this number.
+ */
+#define RT_CACHE_BUBBLE_THRESHOLD (HZ*5)
+
+#include <linux/route.h>
+
+#ifdef __KERNEL__
+#define RTF_LOCAL 0x8000
+#endif
+
+struct rtable
+{
+ struct rtable *rt_next;
+ __u32 rt_dst;
+ __u32 rt_src;
+ __u32 rt_gateway;
+ atomic_t rt_refcnt;
+ atomic_t rt_use;
+ unsigned long rt_window;
+ atomic_t rt_lastuse;
+ struct hh_cache *rt_hh;
+ struct device *rt_dev;
+ unsigned short rt_flags;
+ unsigned short rt_mtu;
+ unsigned short rt_irtt;
+ unsigned char rt_tos;
+};
+
+extern void ip_rt_flush(struct device *dev);
+extern void ip_rt_update(int event, struct device *dev);
+extern void ip_rt_redirect(__u32 src, __u32 dst, __u32 gw, struct device *dev);
+extern struct rtable *ip_rt_slow_route(__u32 daddr, int local, struct device *dev);
+extern struct device *ip_rt_dev(__u32 addr);
+extern int rt_get_info(char * buffer, char **start, off_t offset, int length, int dummy);
+extern int rt_cache_get_info(char *buffer, char **start, off_t offset, int length, int dummy);
+extern int ip_rt_ioctl(unsigned int cmd, void *arg);
+extern int ip_rt_new(struct rtentry *rt);
+extern int ip_rt_kill(struct rtentry *rt);
+extern void ip_rt_check_expire(void);
+extern void ip_rt_advice(struct rtable **rp, int advice);
+
+extern void ip_rt_run_bh(void);
+extern atomic_t ip_rt_lock;
+extern unsigned ip_rt_bh_mask;
+extern struct rtable *ip_rt_hash_table[RT_HASH_DIVISOR];
+extern void rt_free(struct rtable * rt);
+
+static __inline__ void ip_rt_fast_lock(void)
+{
+ atomic_inc(&ip_rt_lock);
+}
+
+static __inline__ void ip_rt_fast_unlock(void)
+{
+ atomic_dec(&ip_rt_lock);
+}
+
+static __inline__ void ip_rt_unlock(void)
+{
+ if (atomic_dec_and_test(&ip_rt_lock) && ip_rt_bh_mask)
+ ip_rt_run_bh();
+}
+
+static __inline__ unsigned ip_rt_hash_code(__u32 addr)
+{
+ unsigned tmp = addr + (addr>>16);
+ return (tmp + (tmp>>8)) & 0xFF;
+}
+
+
+static __inline__ void ip_rt_put(struct rtable * rt)
+#ifndef MODULE
+{
+ /* If this rtable entry is not in the cache, we'd better free
+ * it once the refcnt goes to zero, because nobody else will.
+ */
+ if (rt&&atomic_dec_and_test(&rt->rt_refcnt)&&(rt->rt_flags&RTF_NOTCACHED))
+ rt_free(rt);
+}
+#else
+;
+#endif
+
+#ifdef CONFIG_KERNELD
+static struct rtable * ip_rt_route(__u32 daddr, int local, struct device *dev);
+#else
+static __inline__ struct rtable * ip_rt_route(__u32 daddr, int local, struct device *dev)
+#ifndef MODULE
+{
+ struct rtable * rth;
+
+ ip_rt_fast_lock();
+
+ for (rth=ip_rt_hash_table[ip_rt_hash_code(daddr)^local]; rth; rth=rth->rt_next)
+ {
+ /* If an interface is specified, make sure this route points to it. */
+ if ( (rth->rt_dst == daddr) && ((dev==NULL) || (dev==rth->rt_dev)) )
+ {
+ rth->rt_lastuse = jiffies;
+ atomic_inc(&rth->rt_use);
+ atomic_inc(&rth->rt_refcnt);
+ ip_rt_unlock();
+ return rth;
+ }
+ }
+ return ip_rt_slow_route (daddr, local, dev);
+}
+#else
+;
+#endif
+#endif
+
+static __inline__ struct rtable * ip_check_route(struct rtable ** rp, __u32 daddr,
+ int local, struct device *dev)
+{
+ struct rtable * rt = *rp;
+
+ if (!rt || rt->rt_dst != daddr || !(rt->rt_flags&RTF_UP) || (dev!=NULL)
+ || ((local==1)^((rt->rt_flags&RTF_LOCAL) != 0)))
+ {
+ ip_rt_put(rt);
+ rt = ip_rt_route(daddr, local, dev);
+ *rp = rt;
+ }
+ return rt;
+}
+
+
+#endif /* _ROUTE_H */
diff --git a/linux/src/include/net/slhc.h b/linux/src/include/net/slhc.h
new file mode 100644
index 0000000..c7b39db
--- /dev/null
+++ b/linux/src/include/net/slhc.h
@@ -0,0 +1,6 @@
+#ifndef __NET_SLHC_H
+#define __NET_SLHC_H
+
+extern void slhc_install(void);
+
+#endif
diff --git a/linux/src/include/net/slhc_vj.h b/linux/src/include/net/slhc_vj.h
new file mode 100644
index 0000000..471cf71
--- /dev/null
+++ b/linux/src/include/net/slhc_vj.h
@@ -0,0 +1,187 @@
+#ifndef _SLHC_H
+#define _SLHC_H
+/*
+ * Definitions for tcp compression routines.
+ *
+ * $Header: cvs/gnumach/linux/src/include/net/Attic/slhc_vj.h,v 1.1 1999/04/26 05:57:54 tb Exp $
+ *
+ * Copyright (c) 1989 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms are permitted
+ * provided that the above copyright notice and this paragraph are
+ * duplicated in all such forms and that any documentation,
+ * advertising materials, and other materials related to such
+ * distribution and use acknowledge that the software was developed
+ * by the University of California, Berkeley. The name of the
+ * University may not be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Van Jacobson (van@helios.ee.lbl.gov), Dec 31, 1989:
+ * - Initial distribution.
+ *
+ *
+ * modified for KA9Q Internet Software Package by
+ * Katie Stevens (dkstevens@ucdavis.edu)
+ * University of California, Davis
+ * Computing Services
+ * - 01-31-90 initial adaptation
+ *
+ * - Feb 1991 Bill_Simpson@um.cc.umich.edu
+ * variable number of conversation slots
+ * allow zero or one slots
+ * separate routines
+ * status display
+ */
+
+/*
+ * Compressed packet format:
+ *
+ * The first octet contains the packet type (top 3 bits), TCP
+ * 'push' bit, and flags that indicate which of the 4 TCP sequence
+ * numbers have changed (bottom 5 bits). The next octet is a
+ * conversation number that associates a saved IP/TCP header with
+ * the compressed packet. The next two octets are the TCP checksum
+ * from the original datagram. The next 0 to 15 octets are
+ * sequence number changes, one change per bit set in the header
+ * (there may be no changes and there are two special cases where
+ * the receiver implicitly knows what changed -- see below).
+ *
+ * There are 5 numbers which can change (they are always inserted
+ * in the following order): TCP urgent pointer, window,
+ * acknowledgment, sequence number and IP ID. (The urgent pointer
+ * is different from the others in that its value is sent, not the
+ * change in value.) Since typical use of SLIP links is biased
+ * toward small packets (see comments on MTU/MSS below), changes
+ * use a variable length coding with one octet for numbers in the
+ * range 1 - 255 and 3 octets (0, MSB, LSB) for numbers in the
+ * range 256 - 65535 or 0. (If the change in sequence number or
+ * ack is more than 65535, an uncompressed packet is sent.)
+ */
+
+/*
+ * Packet types (must not conflict with IP protocol version)
+ *
+ * The top nibble of the first octet is the packet type. There are
+ * three possible types: IP (not proto TCP or tcp with one of the
+ * control flags set); uncompressed TCP (a normal IP/TCP packet but
+ * with the 8-bit protocol field replaced by an 8-bit connection id --
+ * this type of packet syncs the sender & receiver); and compressed
+ * TCP (described above).
+ *
+ * LSB of 4-bit field is TCP "PUSH" bit (a worthless anachronism) and
+ * is logically part of the 4-bit "changes" field that follows. Top
+ * three bits are actual packet type. For backward compatibility
+ * and in the interest of conserving bits, numbers are chosen so the
+ * IP protocol version number (4) which normally appears in this nibble
+ * means "IP packet".
+ */
+
+/* SLIP compression masks for len/vers byte */
+#define SL_TYPE_IP 0x40
+#define SL_TYPE_UNCOMPRESSED_TCP 0x70
+#define SL_TYPE_COMPRESSED_TCP 0x80
+#define SL_TYPE_ERROR 0x00
+
+/* Bits in first octet of compressed packet */
+#define NEW_C 0x40 /* flag bits for what changed in a packet */
+#define NEW_I 0x20
+#define NEW_S 0x08
+#define NEW_A 0x04
+#define NEW_W 0x02
+#define NEW_U 0x01
+
+/* reserved, special-case values of above */
+#define SPECIAL_I (NEW_S|NEW_W|NEW_U) /* echoed interactive traffic */
+#define SPECIAL_D (NEW_S|NEW_A|NEW_W|NEW_U) /* unidirectional data */
+#define SPECIALS_MASK (NEW_S|NEW_A|NEW_W|NEW_U)
+
+#define TCP_PUSH_BIT 0x10
+
+/*
+ * data type and sizes conversion assumptions:
+ *
+ * VJ code KA9Q style generic
+ * u_char byte_t unsigned char 8 bits
+ * u_short int16 unsigned short 16 bits
+ * u_int int16 unsigned short 16 bits
+ * u_long unsigned long unsigned long 32 bits
+ * int int32 long 32 bits
+ */
+
+typedef unsigned char byte_t;
+typedef unsigned long int32;
+
+/*
+ * "state" data for each active tcp conversation on the wire. This is
+ * basically a copy of the entire IP/TCP header from the last packet
+ * we saw from the conversation together with a small identifier
+ * the transmit & receive ends of the line use to locate saved header.
+ */
+struct cstate {
+ byte_t cs_this; /* connection id number (xmit) */
+ struct cstate *next; /* next in ring (xmit) */
+ struct iphdr cs_ip; /* ip/tcp hdr from most recent packet */
+ struct tcphdr cs_tcp;
+ unsigned char cs_ipopt[64];
+ unsigned char cs_tcpopt[64];
+ int cs_hsize;
+};
+#define NULLSLSTATE (struct cstate *)0
+
+/*
+ * all the state data for one serial line (we need one of these per line).
+ */
+struct slcompress {
+ struct cstate *tstate; /* transmit connection states (array)*/
+ struct cstate *rstate; /* receive connection states (array)*/
+
+ byte_t tslot_limit; /* highest transmit slot id (0-l)*/
+ byte_t rslot_limit; /* highest receive slot id (0-l)*/
+
+ byte_t xmit_oldest; /* oldest xmit in ring */
+ byte_t xmit_current; /* most recent xmit id */
+ byte_t recv_current; /* most recent rcvd id */
+
+ byte_t flags;
+#define SLF_TOSS 0x01 /* tossing rcvd frames until id received */
+
+ int32 sls_o_nontcp; /* outbound non-TCP packets */
+ int32 sls_o_tcp; /* outbound TCP packets */
+ int32 sls_o_uncompressed; /* outbound uncompressed packets */
+ int32 sls_o_compressed; /* outbound compressed packets */
+ int32 sls_o_searches; /* searches for connection state */
+ int32 sls_o_misses; /* times couldn't find conn. state */
+
+ int32 sls_i_uncompressed; /* inbound uncompressed packets */
+ int32 sls_i_compressed; /* inbound compressed packets */
+ int32 sls_i_error; /* inbound error packets */
+ int32 sls_i_tossed; /* inbound packets tossed because of error */
+
+ int32 sls_i_runt;
+ int32 sls_i_badcheck;
+};
+#define NULLSLCOMPR (struct slcompress *)0
+
+#define __ARGS(x) x
+
+/* In slhc.c: */
+struct slcompress *slhc_init __ARGS((int rslots, int tslots));
+void slhc_free __ARGS((struct slcompress *comp));
+
+int slhc_compress __ARGS((struct slcompress *comp, unsigned char *icp,
+ int isize, unsigned char *ocp, unsigned char **cpp,
+ int compress_cid));
+int slhc_uncompress __ARGS((struct slcompress *comp, unsigned char *icp,
+ int isize));
+int slhc_remember __ARGS((struct slcompress *comp, unsigned char *icp,
+ int isize));
+int slhc_toss __ARGS((struct slcompress *comp));
+
+void slhc_i_status __ARGS((struct slcompress *comp));
+void slhc_o_status __ARGS((struct slcompress *comp));
+
+#endif /* _SLHC_H */
diff --git a/linux/src/include/net/snmp.h b/linux/src/include/net/snmp.h
new file mode 100644
index 0000000..552292b
--- /dev/null
+++ b/linux/src/include/net/snmp.h
@@ -0,0 +1,107 @@
+/*
+ *
+ * SNMP MIB entries for the IP subsystem.
+ *
+ * Alan Cox <gw4pts@gw4pts.ampr.org>
+ *
+ * We don't chose to implement SNMP in the kernel (this would
+ * be silly as SNMP is a pain in the backside in places). We do
+ * however need to collect the MIB statistics and export them
+ * out of /proc (eventually)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#ifndef _SNMP_H
+#define _SNMP_H
+
+/*
+ * We use all unsigned longs. Linux will soon be so reliable that even these
+ * will rapidly get too small 8-). Seriously consider the IpInReceives count
+ * on the 20Gb/s + networks people expect in a few years time!
+ */
+
+struct ip_mib
+{
+ unsigned long IpForwarding;
+ unsigned long IpDefaultTTL;
+ unsigned long IpInReceives;
+ unsigned long IpInHdrErrors;
+ unsigned long IpInAddrErrors;
+ unsigned long IpForwDatagrams;
+ unsigned long IpInUnknownProtos;
+ unsigned long IpInDiscards;
+ unsigned long IpInDelivers;
+ unsigned long IpOutRequests;
+ unsigned long IpOutDiscards;
+ unsigned long IpOutNoRoutes;
+ unsigned long IpReasmTimeout;
+ unsigned long IpReasmReqds;
+ unsigned long IpReasmOKs;
+ unsigned long IpReasmFails;
+ unsigned long IpFragOKs;
+ unsigned long IpFragFails;
+ unsigned long IpFragCreates;
+};
+
+
+struct icmp_mib
+{
+ unsigned long IcmpInMsgs;
+ unsigned long IcmpInErrors;
+ unsigned long IcmpInDestUnreachs;
+ unsigned long IcmpInTimeExcds;
+ unsigned long IcmpInParmProbs;
+ unsigned long IcmpInSrcQuenchs;
+ unsigned long IcmpInRedirects;
+ unsigned long IcmpInEchos;
+ unsigned long IcmpInEchoReps;
+ unsigned long IcmpInTimestamps;
+ unsigned long IcmpInTimestampReps;
+ unsigned long IcmpInAddrMasks;
+ unsigned long IcmpInAddrMaskReps;
+ unsigned long IcmpOutMsgs;
+ unsigned long IcmpOutErrors;
+ unsigned long IcmpOutDestUnreachs;
+ unsigned long IcmpOutTimeExcds;
+ unsigned long IcmpOutParmProbs;
+ unsigned long IcmpOutSrcQuenchs;
+ unsigned long IcmpOutRedirects;
+ unsigned long IcmpOutEchos;
+ unsigned long IcmpOutEchoReps;
+ unsigned long IcmpOutTimestamps;
+ unsigned long IcmpOutTimestampReps;
+ unsigned long IcmpOutAddrMasks;
+ unsigned long IcmpOutAddrMaskReps;
+};
+
+struct tcp_mib
+{
+ unsigned long TcpRtoAlgorithm;
+ unsigned long TcpRtoMin;
+ unsigned long TcpRtoMax;
+ unsigned long TcpMaxConn;
+ unsigned long TcpActiveOpens;
+ unsigned long TcpPassiveOpens;
+ unsigned long TcpAttemptFails;
+ unsigned long TcpEstabResets;
+ unsigned long TcpCurrEstab;
+ unsigned long TcpInSegs;
+ unsigned long TcpOutSegs;
+ unsigned long TcpRetransSegs;
+};
+
+struct udp_mib
+{
+ unsigned long UdpInDatagrams;
+ unsigned long UdpNoPorts;
+ unsigned long UdpInErrors;
+ unsigned long UdpOutDatagrams;
+};
+
+
+#endif
diff --git a/linux/src/include/net/sock.h b/linux/src/include/net/sock.h
new file mode 100644
index 0000000..25a9044
--- /dev/null
+++ b/linux/src/include/net/sock.h
@@ -0,0 +1,613 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the AF_INET socket handler.
+ *
+ * Version: @(#)sock.h 1.0.4 05/13/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Corey Minyard <wf-rch!minyard@relay.EU.net>
+ * Florian La Roche <flla@stud.uni-sb.de>
+ *
+ * Fixes:
+ * Alan Cox : Volatiles in skbuff pointers. See
+ * skbuff comments. May be overdone,
+ * better to prove they can be removed
+ * than the reverse.
+ * Alan Cox : Added a zapped field for tcp to note
+ * a socket is reset and must stay shut up
+ * Alan Cox : New fields for options
+ * Pauline Middelink : identd support
+ * Alan Cox : Eliminate low level recv/recvfrom
+ * David S. Miller : New socket lookup architecture for ISS.
+ * Elliot Poger : New field for SO_BINDTODEVICE option.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _SOCK_H
+#define _SOCK_H
+
+#include <linux/timer.h>
+#include <linux/ip.h> /* struct options */
+#include <linux/in.h> /* struct sockaddr_in */
+#include <linux/tcp.h> /* struct tcphdr */
+#include <linux/config.h>
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h> /* struct sk_buff */
+#include <net/protocol.h> /* struct inet_protocol */
+#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#include <net/ax25.h>
+#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
+#include <net/netrom.h>
+#endif
+#if defined(CONFIG_ROSE) || defined(CONFIG_ROSE_MODULE)
+#include <net/rose.h>
+#endif
+#endif
+
+#if defined(CONFIG_IPX) || defined(CONFIG_IPX_MODULE)
+#include <net/ipx.h>
+#endif
+
+#if defined(CONFIG_ATALK) || defined(CONFIG_ATALK_MODULE)
+#include <linux/atalk.h>
+#endif
+
+#include <linux/igmp.h>
+
+#include <asm/atomic.h>
+
+/*
+ * The AF_UNIX specific socket options
+ */
+
+struct unix_opt
+{
+ int family;
+ char * name;
+ int locks;
+ struct inode * inode;
+ struct semaphore readsem;
+ struct sock * other;
+ int marksweep;
+#define MARKED 1
+ int inflight;
+};
+
+/*
+ * IP packet socket options
+ */
+
+struct inet_packet_opt
+{
+ struct notifier_block notifier; /* Used when bound */
+ struct device *bound_dev;
+ unsigned long dev_stamp;
+ struct packet_type *prot_hook;
+ char device_name[15];
+};
+
+/*
+ * Once the IPX ncpd patches are in these are going into protinfo
+ */
+
+#if defined(CONFIG_IPX) || defined(CONFIG_IPX_MODULE)
+struct ipx_opt
+{
+ ipx_address dest_addr;
+ ipx_interface *intrfc;
+ unsigned short port;
+#ifdef CONFIG_IPX_INTERN
+ unsigned char node[IPX_NODE_LEN];
+#endif
+ unsigned short type;
+/*
+ * To handle asynchronous messages from the NetWare server, we have to
+ * know the connection this socket belongs to.
+ */
+ struct ncp_server *ncp_server;
+/*
+ * To handle special ncp connection-handling sockets for mars_nwe,
+ * the connection number must be stored in the socket.
+ */
+ unsigned short ipx_ncp_conn;
+};
+#endif
+
+#ifdef CONFIG_NUTCP
+struct tcp_opt
+{
+/*
+ * RFC793 variables by their proper names. This means you can
+ * read the code and the spec side by side (and laugh ...)
+ * See RFC793 and RFC1122. The RFC writes these in capitals.
+ */
+ __u32 rcv_nxt; /* What we want to receive next */
+ __u32 rcv_up; /* The urgent point (may not be valid) */
+ __u32 rcv_wnd; /* Current receiver window */
+ __u32 snd_nxt; /* Next sequence we send */
+ __u32 snd_una; /* First byte we want an ack for */
+ __u32 snd_up; /* Outgoing urgent pointer */
+ __u32 snd_wl1; /* Sequence for window update */
+ __u32 snd_wl2; /* Ack sequence for update */
+/*
+ * Slow start and congestion control (see also Nagle, and Karn & Partridge)
+ */
+ __u32 snd_cwnd; /* Sending congestion window */
+ __u32 snd_ssthresh; /* Slow start size threshold */
+/*
+ * Timers used by the TCP protocol layer
+ */
+ struct timer_list delack_timer; /* Ack delay */
+ struct timer_list idle_timer; /* Idle watch */
+ struct timer_list completion_timer; /* Up/Down timer */
+ struct timer_list probe_timer; /* Probes */
+ struct timer_list retransmit_timer; /* Resend (no ack) */
+};
+#endif
+
+/*
+ * This structure really needs to be cleaned up.
+ * Most of it is for TCP, and not used by any of
+ * the other protocols.
+ */
+struct sock
+{
+ /* This must be first. */
+ struct sock *sklist_next;
+ struct sock *sklist_prev;
+
+ struct options *opt;
+ atomic_t wmem_alloc;
+ atomic_t rmem_alloc;
+ unsigned long allocation; /* Allocation mode */
+ __u32 write_seq;
+ __u32 sent_seq;
+ __u32 acked_seq;
+ __u32 copied_seq;
+ __u32 rcv_ack_seq;
+ unsigned short rcv_ack_cnt; /* count of same ack */
+ __u32 window_seq;
+ __u32 fin_seq;
+ __u32 urg_seq;
+ __u32 urg_data;
+ __u32 syn_seq;
+ int users; /* user count */
+ /*
+ * Not all are volatile, but some are, so we
+ * might as well say they all are.
+ */
+ volatile char dead,
+ urginline,
+ intr,
+ blog,
+ done,
+ reuse,
+ keepopen,
+ linger,
+ delay_acks,
+ destroy,
+ ack_timed,
+ no_check,
+ zapped, /* In ax25 & ipx means not linked */
+ broadcast,
+ nonagle,
+ bsdism;
+ struct device * bound_device;
+ unsigned long lingertime;
+ int proc;
+
+ struct sock *next;
+ struct sock **pprev;
+ struct sock *bind_next;
+ struct sock **bind_pprev;
+ struct sock *pair;
+ int hashent;
+ struct sock *prev;
+ struct sk_buff * volatile send_head;
+ struct sk_buff * volatile send_next;
+ struct sk_buff * volatile send_tail;
+ struct sk_buff_head back_log;
+ struct sk_buff *partial;
+ struct timer_list partial_timer;
+ long retransmits;
+ struct sk_buff_head write_queue,
+ receive_queue;
+ struct proto *prot;
+ struct wait_queue **sleep;
+ __u32 daddr;
+ __u32 saddr; /* Sending source */
+ __u32 rcv_saddr; /* Bound address */
+ unsigned short max_unacked;
+ unsigned short window;
+ __u32 lastwin_seq; /* sequence number when we last updated the window we offer */
+ __u32 high_seq; /* sequence number when we did current fast retransmit */
+ volatile unsigned long ato; /* ack timeout */
+ volatile unsigned long lrcvtime; /* jiffies at last data rcv */
+ volatile unsigned long idletime; /* jiffies at last rcv */
+ unsigned int bytes_rcv;
+/*
+ * mss is min(mtu, max_window)
+ */
+ unsigned short mtu; /* mss negotiated in the syn's */
+ volatile unsigned short mss; /* current eff. mss - can change */
+ volatile unsigned short user_mss; /* mss requested by user in ioctl */
+ volatile unsigned short max_window;
+ unsigned long window_clamp;
+ unsigned int ssthresh;
+ unsigned short num;
+ volatile unsigned short cong_window;
+ volatile unsigned short cong_count;
+ volatile unsigned short packets_out;
+ volatile unsigned short shutdown;
+ volatile unsigned long rtt;
+ volatile unsigned long mdev;
+ volatile unsigned long rto;
+
+/*
+ * currently backoff isn't used, but I'm maintaining it in case
+ * we want to go back to a backoff formula that needs it
+ */
+
+ volatile unsigned short backoff;
+ int err, err_soft; /* Soft holds errors that don't
+ cause failure but are the cause
+ of a persistent failure not just
+ 'timed out' */
+ unsigned char protocol;
+ volatile unsigned char state;
+ unsigned short ack_backlog;
+ unsigned char priority;
+ unsigned char debug;
+ int rcvbuf;
+ int sndbuf;
+ unsigned short type;
+ unsigned char localroute; /* Route locally only */
+
+/*
+ * This is where all the private (optional) areas that don't
+ * overlap will eventually live.
+ */
+
+ union
+ {
+ struct unix_opt af_unix;
+#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+ ax25_cb *ax25;
+#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
+ nr_cb *nr;
+#endif
+#if defined(CONFIG_ROSE) || defined(CONFIG_ROSE_MODULE)
+ rose_cb *rose;
+#endif
+#endif
+#if defined(CONFIG_ATALK) || defined(CONFIG_ATALK_MODULE)
+ struct atalk_sock af_at;
+#endif
+#if defined(CONFIG_IPX) || defined(CONFIG_IPX_MODULE)
+ struct ipx_opt af_ipx;
+#endif
+#ifdef CONFIG_INET
+ struct inet_packet_opt af_packet;
+#ifdef CONFIG_NUTCP
+ struct tcp_opt af_tcp;
+#endif
+#endif
+ } protinfo;
+
+/*
+ * IP 'private area' or will be eventually
+ */
+ int ip_ttl; /* TTL setting */
+ int ip_tos; /* TOS */
+ struct tcphdr dummy_th;
+ struct timer_list keepalive_timer; /* TCP keepalive hack */
+ struct timer_list retransmit_timer; /* TCP retransmit timer */
+ struct timer_list delack_timer; /* TCP delayed ack timer */
+ int ip_xmit_timeout; /* Why the timeout is running */
+ struct rtable *ip_route_cache; /* Cached output route */
+ unsigned char ip_hdrincl; /* Include headers ? */
+#ifdef CONFIG_IP_MULTICAST
+ int ip_mc_ttl; /* Multicasting TTL */
+ int ip_mc_loop; /* Loopback */
+ char ip_mc_name[MAX_ADDR_LEN];/* Multicast device name */
+ struct ip_mc_socklist *ip_mc_list; /* Group array */
+#endif
+
+/*
+ * This part is used for the timeout functions (timer.c).
+ */
+
+ int timeout; /* What are we waiting for? */
+ struct timer_list timer; /* This is the TIME_WAIT/receive timer
+ * when we are doing IP
+ */
+ struct timeval stamp;
+
+ /*
+ * Identd
+ */
+
+ struct socket *socket;
+
+ /*
+ * Callbacks
+ */
+
+ void (*state_change)(struct sock *sk);
+ void (*data_ready)(struct sock *sk,int bytes);
+ void (*write_space)(struct sock *sk);
+ void (*error_report)(struct sock *sk);
+
+ /*
+ * Moved solely for 2.0 to keep binary module compatibility stuff straight.
+ */
+
+ unsigned short max_ack_backlog;
+ struct sock *listening;
+};
+
+/*
+ * IP protocol blocks we attach to sockets.
+ */
+
+struct proto
+{
+ /* These must be first. */
+ struct sock *sklist_next;
+ struct sock *sklist_prev;
+
+ void (*close)(struct sock *sk, unsigned long timeout);
+ int (*build_header)(struct sk_buff *skb,
+ __u32 saddr,
+ __u32 daddr,
+ struct device **dev, int type,
+ struct options *opt, int len,
+ int tos, int ttl, struct rtable ** rp);
+ int (*connect)(struct sock *sk,
+ struct sockaddr_in *usin, int addr_len);
+ struct sock * (*accept) (struct sock *sk, int flags);
+ void (*queue_xmit)(struct sock *sk,
+ struct device *dev, struct sk_buff *skb,
+ int free);
+ void (*retransmit)(struct sock *sk, int all);
+ void (*write_wakeup)(struct sock *sk);
+ void (*read_wakeup)(struct sock *sk);
+ int (*rcv)(struct sk_buff *buff, struct device *dev,
+ struct options *opt, __u32 daddr,
+ unsigned short len, __u32 saddr,
+ int redo, struct inet_protocol *protocol);
+ int (*select)(struct sock *sk, int which,
+ select_table *wait);
+ int (*ioctl)(struct sock *sk, int cmd,
+ unsigned long arg);
+ int (*init)(struct sock *sk);
+ void (*shutdown)(struct sock *sk, int how);
+ int (*setsockopt)(struct sock *sk, int level, int optname,
+ char *optval, int optlen);
+ int (*getsockopt)(struct sock *sk, int level, int optname,
+ char *optval, int *option);
+ int (*sendmsg)(struct sock *sk, struct msghdr *msg, int len,
+ int noblock, int flags);
+ int (*recvmsg)(struct sock *sk, struct msghdr *msg, int len,
+ int noblock, int flags, int *addr_len);
+ int (*bind)(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+
+ /* Keeping track of sk's, looking them up, and port selection methods. */
+ void (*hash)(struct sock *sk);
+ void (*unhash)(struct sock *sk);
+ void (*rehash)(struct sock *sk);
+ unsigned short (*good_socknum)(void);
+ int (*verify_bind)(struct sock *sk, unsigned short snum);
+
+ unsigned short max_header;
+ unsigned long retransmits;
+ char name[32];
+ int inuse, highestinuse;
+};
+
+#define TIME_WRITE 1
+#define TIME_CLOSE 2
+#define TIME_KEEPOPEN 3
+#define TIME_DESTROY 4
+#define TIME_DONE 5 /* Used to absorb those last few packets */
+#define TIME_PROBE0 6
+
+/*
+ * About 10 seconds
+ */
+
+#define SOCK_DESTROY_TIME (10*HZ)
+
+/*
+ * Sockets 0-1023 can't be bound to unless you are superuser
+ */
+
+#define PROT_SOCK 1024
+
+#define SHUTDOWN_MASK 3
+#define RCV_SHUTDOWN 1
+#define SEND_SHUTDOWN 2
+
+/* Per-protocol hash table implementations use this to make sure
+ * nothing changes.
+ */
+#define SOCKHASH_LOCK() start_bh_atomic()
+#define SOCKHASH_UNLOCK() end_bh_atomic()
+
+/* Some things in the kernel just want to get at a protocols
+ * entire socket list commensurate, thus...
+ */
+static __inline__ void add_to_prot_sklist(struct sock *sk)
+{
+ SOCKHASH_LOCK();
+ if(!sk->sklist_next) {
+ struct proto *p = sk->prot;
+
+ sk->sklist_prev = (struct sock *) p;
+ sk->sklist_next = p->sklist_next;
+ p->sklist_next->sklist_prev = sk;
+ p->sklist_next = sk;
+
+ /* Charge the protocol. */
+ sk->prot->inuse += 1;
+ if(sk->prot->highestinuse < sk->prot->inuse)
+ sk->prot->highestinuse = sk->prot->inuse;
+ }
+ SOCKHASH_UNLOCK();
+}
+
+static __inline__ void del_from_prot_sklist(struct sock *sk)
+{
+ SOCKHASH_LOCK();
+ if(sk->sklist_next) {
+ sk->sklist_next->sklist_prev = sk->sklist_prev;
+ sk->sklist_prev->sklist_next = sk->sklist_next;
+ sk->sklist_next = NULL;
+ sk->prot->inuse--;
+ }
+ SOCKHASH_UNLOCK();
+}
+
+/*
+ * Used by processes to "lock" a socket state, so that
+ * interrupts and bottom half handlers won't change it
+ * from under us. It essentially blocks any incoming
+ * packets, so that we won't get any new data or any
+ * packets that change the state of the socket.
+ *
+ * Note the 'barrier()' calls: gcc may not move a lock
+ * "downwards" or a unlock "upwards" when optimizing.
+ */
+extern void __release_sock(struct sock *sk);
+
+static inline void lock_sock(struct sock *sk)
+{
+#if 0
+/* debugging code: the test isn't even 100% correct, but it can catch bugs */
+/* Note that a double lock is ok in theory - it's just _usually_ a bug */
+ if (sk->users) {
+ __label__ here;
+ printk("double lock on socket at %p\n", &&here);
+here:
+ }
+#endif
+ sk->users++;
+ barrier();
+}
+
+static inline void release_sock(struct sock *sk)
+{
+ barrier();
+#if 0
+/* debugging code: remove me when ok */
+ if (sk->users == 0) {
+ __label__ here;
+ sk->users = 1;
+ printk("trying to unlock unlocked socket at %p\n", &&here);
+here:
+ }
+#endif
+ if ((sk->users = sk->users-1) == 0)
+ __release_sock(sk);
+}
+
+
+extern struct sock * sk_alloc(int priority);
+extern void sk_free(struct sock *sk);
+extern void destroy_sock(struct sock *sk);
+
+extern struct sk_buff *sock_wmalloc(struct sock *sk,
+ unsigned long size, int force,
+ int priority);
+extern struct sk_buff *sock_rmalloc(struct sock *sk,
+ unsigned long size, int force,
+ int priority);
+extern void sock_wfree(struct sock *sk,
+ struct sk_buff *skb);
+extern void sock_rfree(struct sock *sk,
+ struct sk_buff *skb);
+extern unsigned long sock_rspace(struct sock *sk);
+extern unsigned long sock_wspace(struct sock *sk);
+
+extern int sock_setsockopt(struct sock *sk, int level,
+ int op, char *optval,
+ int optlen);
+
+extern int sock_getsockopt(struct sock *sk, int level,
+ int op, char *optval,
+ int *optlen);
+extern struct sk_buff *sock_alloc_send_skb(struct sock *skb,
+ unsigned long size,
+ unsigned long fallback,
+ int noblock,
+ int *errcode);
+
+/*
+ * Queue a received datagram if it will fit. Stream and sequenced
+ * protocols can't normally use this as they need to fit buffers in
+ * and play with them.
+ *
+ * Inlined as it's very short and called for pretty much every
+ * packet ever received.
+ */
+
+static __inline__ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ if (sk->rmem_alloc + skb->truesize >= sk->rcvbuf)
+ return -ENOMEM;
+ atomic_add(skb->truesize, &sk->rmem_alloc);
+ skb->sk=sk;
+ skb_queue_tail(&sk->receive_queue,skb);
+ if (!sk->dead)
+ sk->data_ready(sk,skb->len);
+ return 0;
+}
+
+static __inline__ int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ if (sk->rmem_alloc + skb->truesize >= sk->rcvbuf)
+ return -ENOMEM;
+ atomic_add(skb->truesize, &sk->rmem_alloc);
+ skb->sk=sk;
+ __skb_queue_tail(&sk->receive_queue,skb);
+ if (!sk->dead)
+ sk->data_ready(sk,skb->len);
+ return 0;
+}
+
+/*
+ * Recover an error report and clear atomically
+ */
+
+static __inline__ int sock_error(struct sock *sk)
+{
+ int err=xchg(&sk->err,0);
+ return -err;
+}
+
+/*
+ * Declarations from timer.c
+ */
+
+extern struct sock *timer_base;
+
+extern void delete_timer (struct sock *);
+extern void reset_timer (struct sock *, int, unsigned long);
+extern void net_timer (unsigned long);
+
+
+/*
+ * Enable debug/info messages
+ */
+
+#define NETDEBUG(x) do { } while (0)
+
+#endif /* _SOCK_H */
diff --git a/linux/src/include/net/spx.h b/linux/src/include/net/spx.h
new file mode 100644
index 0000000..3e9b1d1
--- /dev/null
+++ b/linux/src/include/net/spx.h
@@ -0,0 +1,38 @@
+#ifndef __NET_SPX_H
+#define __NET_SPX_H
+
+/*
+ * Internal definitions for the SPX protocol.
+ */
+
+/*
+ * The SPX header following an IPX header.
+ */
+
+struct spxhdr
+{
+ __u8 cctl;
+#define CCTL_SPXII_XHD 0x01 /* SPX2 extended header */
+#define CCTL_SPX_UNKNOWN 0x02 /* Unknown (unused ??) */
+#define CCTL_SPXII_NEG 0x04 /* Negotiate size */
+#define CCTL_SPXII 0x08 /* Set for SPX2 */
+#define CCTL_EOM 0x10 /* End of message marker */
+#define CCTL_URG 0x20 /* Urgent marker in SPP (not used in SPX?) */
+#define CCTL_ACK 0x40 /* Send me an ACK */
+#define CCTL_CTL 0x80 /* Control message */
+ __u8 dtype;
+#define SPX_DTYPE_ECONN 0xFE /* Finished */
+#define SPX_DTYPE_ECACK 0xFF /* Ok */
+ __u16 sconn; /* Connection ID */
+ __u16 dconn; /* Connection ID */
+ __u16 sequence;
+ __u16 ackseq;
+ __u16 allocseq;
+};
+
+#define IPXTYPE_SPX 5
+
+
+
+
+#endif
diff --git a/linux/src/include/net/tcp.h b/linux/src/include/net/tcp.h
new file mode 100644
index 0000000..b2534ba
--- /dev/null
+++ b/linux/src/include/net/tcp.h
@@ -0,0 +1,374 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the TCP module.
+ *
+ * Version: @(#)tcp.h 1.0.5 05/23/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _TCP_H
+#define _TCP_H
+
+#include <linux/tcp.h>
+#include <net/checksum.h>
+
+/* This is for all connections with a full identity, no wildcards. */
+#define TCP_HTABLE_SIZE 256
+
+/* This is for listening sockets, thus all sockets which possess wildcards. */
+#define TCP_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */
+
+/* This is for all sockets, to keep track of the local port allocations. */
+#define TCP_BHTABLE_SIZE 64
+
+/* tcp_ipv4.c: These need to be shared by v4 and v6 because the lookup
+ * and hashing code needs to work with different AF's yet
+ * the port space is shared.
+ */
+extern struct sock *tcp_established_hash[TCP_HTABLE_SIZE];
+extern struct sock *tcp_listening_hash[TCP_LHTABLE_SIZE];
+extern struct sock *tcp_bound_hash[TCP_BHTABLE_SIZE];
+
+/* These are AF independant. */
+static __inline__ int tcp_bhashfn(__u16 lport)
+{
+ return (lport ^ (lport >> 7)) & (TCP_BHTABLE_SIZE-1);
+}
+
+/* Find the next port that hashes h that is larger than lport.
+ * If you change the hash, change this function to match, or you will
+ * break TCP port selection. This function must also NOT wrap around
+ * when the next number exceeds the largest possible port (2^16-1).
+ */
+static __inline__ int tcp_bhashnext(__u16 lport, __u16 h)
+{
+ __u32 s; /* don't change this to a smaller type! */
+
+ s = (lport ^ (h ^ tcp_bhashfn(lport)));
+ if (s > lport)
+ return s;
+ s = lport + TCP_BHTABLE_SIZE;
+ return (s ^ (h ^ tcp_bhashfn(s)));
+}
+
+static __inline__ int tcp_sk_bhashfn(struct sock *sk)
+{
+ __u16 lport = sk->num;
+ return tcp_bhashfn(lport);
+}
+
+/* These can have wildcards, don't try too hard.
+ * XXX deal with thousands of IP aliases for listening ports later
+ */
+static __inline__ int tcp_lhashfn(unsigned short num)
+{
+ return num & (TCP_LHTABLE_SIZE - 1);
+}
+
+static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
+{
+ return tcp_lhashfn(sk->num);
+}
+
+/* This is IPv4 specific. */
+static __inline__ int tcp_hashfn(__u32 laddr, __u16 lport,
+ __u32 faddr, __u16 fport)
+{
+ return ((laddr ^ lport) ^ (faddr ^ fport)) & (TCP_HTABLE_SIZE - 1);
+}
+
+static __inline__ int tcp_sk_hashfn(struct sock *sk)
+{
+ __u32 laddr = sk->rcv_saddr;
+ __u16 lport = sk->num;
+ __u32 faddr = sk->daddr;
+ __u16 fport = sk->dummy_th.dest;
+
+ return tcp_hashfn(laddr, lport, faddr, fport);
+}
+
+/* Only those holding the sockhash lock call these two things here.
+ * Note the slightly gross overloading of sk->prev, AF_UNIX is the
+ * only other main benefactor of that member of SK, so who cares.
+ */
+static __inline__ void tcp_sk_bindify(struct sock *sk)
+{
+ int hashent = tcp_sk_bhashfn(sk);
+ struct sock **htable = &tcp_bound_hash[hashent];
+
+ if((sk->bind_next = *htable) != NULL)
+ (*htable)->bind_pprev = &sk->bind_next;
+ *htable = sk;
+ sk->bind_pprev = htable;
+}
+
+static __inline__ void tcp_sk_unbindify(struct sock *sk)
+{
+ if(sk->bind_next)
+ sk->bind_next->bind_pprev = sk->bind_pprev;
+ *(sk->bind_pprev) = sk->bind_next;
+}
+
+/*
+ * 40 is maximal IP options size
+ * 4 is TCP option size (MSS)
+ */
+#define MAX_SYN_SIZE (sizeof(struct iphdr) + 40 + sizeof(struct tcphdr) + 4 + MAX_HEADER + 15)
+#define MAX_FIN_SIZE (sizeof(struct iphdr) + 40 + sizeof(struct tcphdr) + MAX_HEADER + 15)
+#define MAX_ACK_SIZE (sizeof(struct iphdr) + 40 + sizeof(struct tcphdr) + MAX_HEADER + 15)
+#define MAX_RESET_SIZE (sizeof(struct iphdr) + 40 + sizeof(struct tcphdr) + MAX_HEADER + 15)
+
+#define MAX_WINDOW 32767 /* Never offer a window over 32767 without using
+ window scaling (not yet supported). Some poor
+ stacks do signed 16bit maths! */
+#define MIN_WINDOW 2048
+#define MAX_ACK_BACKLOG 2
+#define MAX_DUP_ACKS 3
+#define MIN_WRITE_SPACE 2048
+#define TCP_WINDOW_DIFF 2048
+
+/* urg_data states */
+#define URG_VALID 0x0100
+#define URG_NOTYET 0x0200
+#define URG_READ 0x0400
+
+#define TCP_RETR1 7 /*
+ * This is how many retries it does before it
+ * tries to figure out if the gateway is
+ * down.
+ */
+
+#define TCP_RETR2 15 /*
+ * This should take at least
+ * 90 minutes to time out.
+ */
+
+#define TCP_TIMEOUT_LEN (15*60*HZ) /* should be about 15 mins */
+#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to successfully
+ * close the socket, about 60 seconds */
+#define TCP_FIN_TIMEOUT (3*60*HZ) /* BSD style FIN_WAIT2 deadlock breaker */
+#define TCP_ACK_TIME (3*HZ) /* time to delay before sending an ACK */
+#define TCP_DONE_TIME (5*HZ/2)/* maximum time to wait before actually
+ * destroying a socket */
+#define TCP_WRITE_TIME (30*HZ) /* initial time to wait for an ACK,
+ * after last transmit */
+#define TCP_TIMEOUT_INIT (3*HZ) /* RFC 1122 initial timeout value */
+#define TCP_SYN_RETRIES 5 /* number of times to retry opening a
+ * connection (TCP_RETR2-....) */
+#define TCP_PROBEWAIT_LEN (1*HZ)/* time to wait between probes when
+ * I've got something to write and
+ * there is no window */
+
+#define TCP_NO_CHECK 0 /* turn to one if you want the default
+ * to be no checksum */
+
+
+/*
+ * TCP option
+ */
+
+#define TCPOPT_NOP 1 /* Padding */
+#define TCPOPT_EOL 0 /* End of options */
+#define TCPOPT_MSS 2 /* Segment size negotiating */
+/*
+ * We don't use these yet, but they are for PAWS and big windows
+ */
+#define TCPOPT_WINDOW 3 /* Window scaling */
+#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
+
+
+/*
+ * The next routines deal with comparing 32 bit unsigned ints
+ * and worry about wraparound (automatic with unsigned arithmetic).
+ */
+
+extern __inline int before(__u32 seq1, __u32 seq2)
+{
+ return (__s32)(seq1-seq2) < 0;
+}
+
+extern __inline int after(__u32 seq1, __u32 seq2)
+{
+ return (__s32)(seq2-seq1) < 0;
+}
+
+
+/* is s2<=s1<=s3 ? */
+extern __inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
+{
+ return (after(seq1+1, seq2) && before(seq1, seq3+1));
+}
+
+static __inline__ int min(unsigned int a, unsigned int b)
+{
+ if (a > b)
+ a = b;
+ return a;
+}
+
+static __inline__ int max(unsigned int a, unsigned int b)
+{
+ if (a < b)
+ a = b;
+ return a;
+}
+
+extern struct proto tcp_prot;
+extern struct tcp_mib tcp_statistics;
+
+extern unsigned short tcp_good_socknum(void);
+
+extern void tcp_err(int type, int code, unsigned char *header, __u32 daddr,
+ __u32, struct inet_protocol *protocol, int len);
+extern void tcp_shutdown (struct sock *sk, int how);
+extern int tcp_rcv(struct sk_buff *skb, struct device *dev,
+ struct options *opt, __u32 daddr,
+ unsigned short len, __u32 saddr, int redo,
+ struct inet_protocol *protocol);
+
+extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+
+extern void tcp_v4_unhash(struct sock *sk);
+
+extern void tcp_read_wakeup(struct sock *);
+extern void tcp_write_xmit(struct sock *);
+extern void tcp_time_wait(struct sock *);
+extern void tcp_retransmit(struct sock *, int);
+extern void tcp_do_retransmit(struct sock *, int);
+extern void tcp_send_check(struct tcphdr *th, unsigned long saddr,
+ unsigned long daddr, int len, struct sk_buff *skb);
+
+/* tcp_output.c */
+
+extern void tcp_send_probe0(struct sock *);
+extern void tcp_send_partial(struct sock *);
+extern void tcp_write_wakeup(struct sock *);
+extern void tcp_send_fin(struct sock *sk);
+extern void tcp_send_synack(struct sock *, struct sock *, struct sk_buff *, int);
+extern void tcp_send_skb(struct sock *, struct sk_buff *);
+extern void tcp_send_ack(struct sock *sk);
+extern void tcp_send_delayed_ack(struct sock *sk, int max_timeout, unsigned long timeout);
+extern void tcp_send_reset(unsigned long saddr, unsigned long daddr, struct tcphdr *th,
+ struct proto *prot, struct options *opt, struct device *dev, int tos, int ttl);
+
+extern void tcp_enqueue_partial(struct sk_buff *, struct sock *);
+extern struct sk_buff * tcp_dequeue_partial(struct sock *);
+extern void tcp_shrink_skb(struct sock *,struct sk_buff *,u32);
+
+/* CONFIG_IP_TRANSPARENT_PROXY */
+extern int tcp_chkaddr(struct sk_buff *);
+
+/* tcp_timer.c */
+#define tcp_reset_msl_timer(x,y,z) reset_timer(x,y,z)
+extern void tcp_reset_xmit_timer(struct sock *, int, unsigned long);
+extern void tcp_delack_timer(unsigned long);
+extern void tcp_retransmit_timer(unsigned long);
+
+static __inline__ int tcp_old_window(struct sock * sk)
+{
+ return sk->window - (sk->acked_seq - sk->lastwin_seq);
+}
+
+extern int tcp_new_window(struct sock *);
+
+/*
+ * Return true if we should raise the window when we
+ * have cleaned up the receive queue. We don't want to
+ * do this normally, only if it makes sense to avoid
+ * zero window probes..
+ *
+ * We do this only if we can raise the window noticeably.
+ */
+static __inline__ int tcp_raise_window(struct sock * sk)
+{
+ int new = tcp_new_window(sk);
+ return new && (new >= 2*tcp_old_window(sk));
+}
+
+static __inline__ unsigned short tcp_select_window(struct sock *sk)
+{
+ int window = tcp_new_window(sk);
+ int oldwin = tcp_old_window(sk);
+
+ /* Don't allow a shrinking window */
+ if (window > oldwin) {
+ sk->window = window;
+ sk->lastwin_seq = sk->acked_seq;
+ oldwin = window;
+ }
+ return oldwin;
+}
+
+/*
+ * List all states of a TCP socket that can be viewed as a "connected"
+ * state. This now includes TCP_SYN_RECV, although I am not yet fully
+ * convinced that this is the solution for the 'getpeername(2)'
+ * problem. Thanks to Stephen A. Wood <saw@cebaf.gov> -FvK
+ */
+
+extern __inline const int tcp_connected(const int state)
+{
+ return(state == TCP_ESTABLISHED || state == TCP_CLOSE_WAIT ||
+ state == TCP_FIN_WAIT1 || state == TCP_FIN_WAIT2 ||
+ state == TCP_SYN_RECV);
+}
+
+/*
+ * Calculate(/check) TCP checksum
+ */
+static __inline__ u16 tcp_check(struct tcphdr *th, int len,
+ unsigned long saddr, unsigned long daddr, unsigned long base)
+{
+ return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
+}
+
+#undef STATE_TRACE
+
+#ifdef STATE_TRACE
+static char *statename[]={
+ "Unused","Established","Syn Sent","Syn Recv",
+ "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
+ "Close Wait","Last ACK","Listen","Closing"
+};
+#endif
+
+static __inline__ void tcp_set_state(struct sock *sk, int state)
+{
+ int oldstate = sk->state;
+
+ sk->state = state;
+
+#ifdef STATE_TRACE
+ if(sk->debug)
+ printk("TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]);
+#endif
+
+ switch (state) {
+ case TCP_ESTABLISHED:
+ if (oldstate != TCP_ESTABLISHED) {
+ tcp_statistics.TcpCurrEstab++;
+ }
+ break;
+
+ case TCP_CLOSE:
+ /* Preserve the invariant */
+ tcp_v4_unhash(sk);
+ /* Should be about 2 rtt's */
+ reset_timer(sk, TIME_DONE, min(sk->rtt * 2, TCP_DONE_TIME));
+ /* fall through */
+ default:
+ if (oldstate==TCP_ESTABLISHED)
+ tcp_statistics.TcpCurrEstab--;
+ }
+}
+
+#endif /* _TCP_H */
diff --git a/linux/src/include/net/udp.h b/linux/src/include/net/udp.h
new file mode 100644
index 0000000..d2c7476
--- /dev/null
+++ b/linux/src/include/net/udp.h
@@ -0,0 +1,63 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the UDP module.
+ *
+ * Version: @(#)udp.h 1.0.2 05/07/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * Fixes:
+ * Alan Cox : Turned on udp checksums. I don't want to
+ * chase 'memory corruption' bugs that aren't!
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _UDP_H
+#define _UDP_H
+
+#include <linux/udp.h>
+
+#define UDP_HTABLE_SIZE 128
+
+/* udp.c: This needs to be shared by v4 and v6 because the lookup
+ * and hashing code needs to work with different AF's yet
+ * the port space is shared.
+ */
+extern struct sock *udp_hash[UDP_HTABLE_SIZE];
+
+extern unsigned short udp_good_socknum(void);
+
+#define UDP_NO_CHECK 0
+
+
+extern struct proto udp_prot;
+
+
+extern void udp_err(int type, int code, unsigned char *header, __u32 daddr,
+ __u32 saddr, struct inet_protocol *protocol, int len);
+extern void udp_send_check(struct udphdr *uh, __u32 saddr,
+ __u32 daddr, int len, struct sock *sk);
+extern int udp_recvfrom(struct sock *sk, unsigned char *to,
+ int len, int noblock, unsigned flags,
+ struct sockaddr_in *sin, int *addr_len);
+extern int udp_read(struct sock *sk, unsigned char *buff,
+ int len, int noblock, unsigned flags);
+extern int udp_connect(struct sock *sk,
+ struct sockaddr_in *usin, int addr_len);
+extern int udp_rcv(struct sk_buff *skb, struct device *dev,
+ struct options *opt, __u32 daddr,
+ unsigned short len, __u32 saddr, int redo,
+ struct inet_protocol *protocol);
+extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+
+/* CONFIG_IP_TRANSPARENT_PROXY */
+extern int udp_chkaddr(struct sk_buff *skb);
+
+#endif /* _UDP_H */
diff --git a/linux/src/include/scsi/scsi.h b/linux/src/include/scsi/scsi.h
new file mode 100644
index 0000000..cd44f52
--- /dev/null
+++ b/linux/src/include/scsi/scsi.h
@@ -0,0 +1,205 @@
+#ifndef _LINUX_SCSI_H
+#define _LINUX_SCSI_H
+
+/*
+ * This header file contains public constants and structures used by
+ * the scsi code for linux.
+ */
+
+/*
+ $Header: cvs/gnumach/linux/src/include/scsi/Attic/scsi.h,v 1.1 1999/04/26 05:58:14 tb Exp $
+
+ For documentation on the OPCODES, MESSAGES, and SENSE values,
+ please consult the SCSI standard.
+
+*/
+
+/*
+ * SCSI opcodes
+ */
+
+#define TEST_UNIT_READY 0x00
+#define REZERO_UNIT 0x01
+#define REQUEST_SENSE 0x03
+#define FORMAT_UNIT 0x04
+#define READ_BLOCK_LIMITS 0x05
+#define REASSIGN_BLOCKS 0x07
+#define READ_6 0x08
+#define WRITE_6 0x0a
+#define SEEK_6 0x0b
+#define READ_REVERSE 0x0f
+#define WRITE_FILEMARKS 0x10
+#define SPACE 0x11
+#define INQUIRY 0x12
+#define RECOVER_BUFFERED_DATA 0x14
+#define MODE_SELECT 0x15
+#define RESERVE 0x16
+#define RELEASE 0x17
+#define COPY 0x18
+#define ERASE 0x19
+#define MODE_SENSE 0x1a
+#define START_STOP 0x1b
+#define RECEIVE_DIAGNOSTIC 0x1c
+#define SEND_DIAGNOSTIC 0x1d
+#define ALLOW_MEDIUM_REMOVAL 0x1e
+
+#define SET_WINDOW 0x24
+#define READ_CAPACITY 0x25
+#define READ_10 0x28
+#define WRITE_10 0x2a
+#define SEEK_10 0x2b
+#define WRITE_VERIFY 0x2e
+#define VERIFY 0x2f
+#define SEARCH_HIGH 0x30
+#define SEARCH_EQUAL 0x31
+#define SEARCH_LOW 0x32
+#define SET_LIMITS 0x33
+#define PRE_FETCH 0x34
+#define READ_POSITION 0x34
+#define SYNCHRONIZE_CACHE 0x35
+#define LOCK_UNLOCK_CACHE 0x36
+#define READ_DEFECT_DATA 0x37
+#define MEDIUM_SCAN 0x38
+#define COMPARE 0x39
+#define COPY_VERIFY 0x3a
+#define WRITE_BUFFER 0x3b
+#define READ_BUFFER 0x3c
+#define UPDATE_BLOCK 0x3d
+#define READ_LONG 0x3e
+#define WRITE_LONG 0x3f
+#define CHANGE_DEFINITION 0x40
+#define WRITE_SAME 0x41
+#define READ_TOC 0x43
+#define LOG_SELECT 0x4c
+#define LOG_SENSE 0x4d
+#define MODE_SELECT_10 0x55
+#define MODE_SENSE_10 0x5a
+#define MOVE_MEDIUM 0xa5
+#define READ_12 0xa8
+#define WRITE_12 0xaa
+#define WRITE_VERIFY_12 0xae
+#define SEARCH_HIGH_12 0xb0
+#define SEARCH_EQUAL_12 0xb1
+#define SEARCH_LOW_12 0xb2
+#define READ_ELEMENT_STATUS 0xb8
+#define SEND_VOLUME_TAG 0xb6
+#define WRITE_LONG_2 0xea
+
+/*
+ * Status codes
+ */
+
+#define GOOD 0x00
+#define CHECK_CONDITION 0x01
+#define CONDITION_GOOD 0x02
+#define BUSY 0x04
+#define INTERMEDIATE_GOOD 0x08
+#define INTERMEDIATE_C_GOOD 0x0a
+#define RESERVATION_CONFLICT 0x0c
+#define COMMAND_TERMINATED 0x11
+#define QUEUE_FULL 0x14
+
+#define STATUS_MASK 0x3e
+
+/*
+ * SENSE KEYS
+ */
+
+#define NO_SENSE 0x00
+#define RECOVERED_ERROR 0x01
+#define NOT_READY 0x02
+#define MEDIUM_ERROR 0x03
+#define HARDWARE_ERROR 0x04
+#define ILLEGAL_REQUEST 0x05
+#define UNIT_ATTENTION 0x06
+#define DATA_PROTECT 0x07
+#define BLANK_CHECK 0x08
+#define COPY_ABORTED 0x0a
+#define ABORTED_COMMAND 0x0b
+#define VOLUME_OVERFLOW 0x0d
+#define MISCOMPARE 0x0e
+
+
+/*
+ * DEVICE TYPES
+ */
+
+#define TYPE_DISK 0x00
+#define TYPE_TAPE 0x01
+#define TYPE_PROCESSOR 0x03 /* HP scanners use this */
+#define TYPE_WORM 0x04 /* Treated as ROM by our system */
+#define TYPE_ROM 0x05
+#define TYPE_SCANNER 0x06
+#define TYPE_MOD 0x07 /* Magneto-optical disk -
+ * - treated as TYPE_DISK */
+#define TYPE_MEDIUM_CHANGER 0x08
+#define TYPE_NO_LUN 0x7f
+
+
+/*
+ * MESSAGE CODES
+ */
+
+#define COMMAND_COMPLETE 0x00
+#define EXTENDED_MESSAGE 0x01
+#define EXTENDED_MODIFY_DATA_POINTER 0x00
+#define EXTENDED_SDTR 0x01
+#define EXTENDED_EXTENDED_IDENTIFY 0x02 /* SCSI-I only */
+#define EXTENDED_WDTR 0x03
+#define SAVE_POINTERS 0x02
+#define RESTORE_POINTERS 0x03
+#define DISCONNECT 0x04
+#define INITIATOR_ERROR 0x05
+#define ABORT 0x06
+#define MESSAGE_REJECT 0x07
+#define NOP 0x08
+#define MSG_PARITY_ERROR 0x09
+#define LINKED_CMD_COMPLETE 0x0a
+#define LINKED_FLG_CMD_COMPLETE 0x0b
+#define BUS_DEVICE_RESET 0x0c
+
+#define INITIATE_RECOVERY 0x0f /* SCSI-II only */
+#define RELEASE_RECOVERY 0x10 /* SCSI-II only */
+
+#define SIMPLE_QUEUE_TAG 0x20
+#define HEAD_OF_QUEUE_TAG 0x21
+#define ORDERED_QUEUE_TAG 0x22
+
+/*
+ * Here are some scsi specific ioctl commands which are sometimes useful.
+ */
+/* These are a few other constants only used by scsi devices */
+
+#define SCSI_IOCTL_GET_IDLUN 0x5382
+
+/* Used to turn on and off tagged queuing for scsi devices */
+
+#define SCSI_IOCTL_TAGGED_ENABLE 0x5383
+#define SCSI_IOCTL_TAGGED_DISABLE 0x5384
+
+/* Used to obtain the host number of a device. */
+#define SCSI_IOCTL_PROBE_HOST 0x5385
+
+/* Used to get the bus number for a device */
+#define SCSI_IOCTL_GET_BUS_NUMBER 0x5386
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
+
+#endif
diff --git a/linux/src/include/scsi/scsi_ioctl.h b/linux/src/include/scsi/scsi_ioctl.h
new file mode 100644
index 0000000..a90abf6
--- /dev/null
+++ b/linux/src/include/scsi/scsi_ioctl.h
@@ -0,0 +1,28 @@
+#ifndef _SCSI_IOCTL_H
+#define _SCSI_IOCTL_H
+
+#define SCSI_IOCTL_SEND_COMMAND 1
+#define SCSI_IOCTL_TEST_UNIT_READY 2
+#define SCSI_IOCTL_BENCHMARK_COMMAND 3
+#define SCSI_IOCTL_SYNC 4 /* Request synchronous parameters */
+#define SCSI_IOCTL_START_UNIT 5
+#define SCSI_IOCTL_STOP_UNIT 6
+/* The door lock/unlock constants are compatible with Sun constants for
+ the cdrom */
+#define SCSI_IOCTL_DOORLOCK 0x5380 /* lock the eject mechanism */
+#define SCSI_IOCTL_DOORUNLOCK 0x5381 /* unlock the mechanism */
+
+#define SCSI_REMOVAL_PREVENT 1
+#define SCSI_REMOVAL_ALLOW 0
+
+#ifdef __KERNEL__
+
+extern int scsi_ioctl (Scsi_Device *dev, int cmd, void *arg);
+extern int kernel_scsi_ioctl (Scsi_Device *dev, int cmd, void *arg);
+extern int scsi_ioctl_send_command(Scsi_Device *dev, void *buffer);
+
+#endif
+
+#endif
+
+
diff --git a/linux/src/include/scsi/scsicam.h b/linux/src/include/scsi/scsicam.h
new file mode 100644
index 0000000..954e140
--- /dev/null
+++ b/linux/src/include/scsi/scsicam.h
@@ -0,0 +1,17 @@
+/*
+ * scsicam.h - SCSI CAM support functions, use for HDIO_GETGEO, etc.
+ *
+ * Copyright 1993, 1994 Drew Eckhardt
+ * Visionary Computing
+ * (Unix and Linux consulting and custom programming)
+ * drew@Colorado.EDU
+ * +1 (303) 786-7975
+ *
+ * For more information, please consult the SCSI-CAM draft.
+ */
+
+#ifndef SCSICAM_H
+#define SCSICAM_H
+#include <linux/kdev_t.h>
+extern int scsicam_bios_param (Disk *disk, kdev_t dev, int *ip);
+#endif /* def SCSICAM_H */
diff --git a/linux/src/init/main.c b/linux/src/init/main.c
new file mode 100644
index 0000000..1aa15b9
--- /dev/null
+++ b/linux/src/init/main.c
@@ -0,0 +1,1135 @@
+/*
+ * linux/init/main.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * GK 2/5/95 - Changed to support mounting root fs via NFS
+ * Added initrd & change_root: Werner Almesberger & Hans Lermen, Feb '96
+ * Moan early if gcc is old, avoiding bogus kernels - Paul Gortmaker, May '96
+ */
+
+#define __KERNEL_SYSCALLS__
+#include <stdarg.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/tty.h>
+#include <linux/head.h>
+#include <linux/unistd.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/fs.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/utsname.h>
+#include <linux/ioport.h>
+#include <linux/hdreg.h>
+#include <linux/mm.h>
+#include <linux/major.h>
+#include <linux/blk.h>
+#ifdef CONFIG_ROOT_NFS
+#include <linux/nfs_fs.h>
+#endif
+#ifdef CONFIG_MTRR
+#include <asm/mtrr.h>
+#endif
+
+#include <asm/bugs.h>
+
+#include <linux/dev/glue/glue.h>
+
+/*
+ * Versions of gcc older than that listed below may actually compile
+ * and link okay, but the end product can have subtle run time bugs.
+ * To avoid associated bogus bug reports, we flatly refuse to compile
+ * with a gcc that is known to be too old from the very beginning.
+ */
+#if __GNUC__ < 2 || (__GNUC__ == 2 && __GNUC_MINOR__ < 6)
+#error sorry, your GCC is too old. It builds incorrect kernels.
+#endif
+
+extern char _stext, _etext;
+extern const char *linux_banner;
+
+static char printbuf[1024];
+
+extern int console_loglevel;
+
+static int init(void *);
+extern int bdflush(void *);
+extern int kswapd(void *);
+extern void kswapd_setup(void);
+
+extern void init_modules(void);
+extern long console_init(long, long);
+extern long kmalloc_init(long,long);
+extern void sock_init(void);
+extern unsigned long pci_init(unsigned long, unsigned long);
+extern void sysctl_init(void);
+
+extern void no_scroll(char *str, int *ints);
+extern void swap_setup(char *str, int *ints);
+extern void buff_setup(char *str, int *ints);
+extern void panic_setup(char *str, int *ints);
+extern void bmouse_setup(char *str, int *ints);
+extern void msmouse_setup(char *str, int *ints);
+extern void lp_setup(char *str, int *ints);
+extern void eth_setup(char *str, int *ints);
+extern void xd_setup(char *str, int *ints);
+extern void xd_manual_geo_init(char *str, int *ints);
+extern void floppy_setup(char *str, int *ints);
+extern void st_setup(char *str, int *ints);
+extern void st0x_setup(char *str, int *ints);
+extern void advansys_setup(char *str, int *ints);
+extern void tmc8xx_setup(char *str, int *ints);
+extern void t128_setup(char *str, int *ints);
+extern void pas16_setup(char *str, int *ints);
+extern void generic_NCR5380_setup(char *str, int *intr);
+extern void generic_NCR53C400_setup(char *str, int *intr);
+extern void aha152x_setup(char *str, int *ints);
+extern void aha1542_setup(char *str, int *ints);
+extern void gdth_setup(char *str, int *ints);
+extern void aic7xxx_setup(char *str, int *ints);
+extern void AM53C974_setup(char *str, int *ints);
+extern void BusLogic_Setup(char *str, int *ints);
+extern void ncr53c8xx_setup(char *str, int *ints);
+extern void eata2x_setup(char *str, int *ints);
+extern void u14_34f_setup(char *str, int *ints);
+extern void fdomain_setup(char *str, int *ints);
+extern void in2000_setup(char *str, int *ints);
+extern void NCR53c406a_setup(char *str, int *ints);
+extern void wd7000_setup(char *str, int *ints);
+extern void ppa_setup(char *str, int *ints);
+extern void scsi_luns_setup(char *str, int *ints);
+extern void sound_setup(char *str, int *ints);
+extern void apm_setup(char *str, int *ints);
+extern void reboot_setup(char *str, int *ints);
+#ifdef CONFIG_CDU31A
+extern void cdu31a_setup(char *str, int *ints);
+#endif CONFIG_CDU31A
+#ifdef CONFIG_MCD
+extern void mcd_setup(char *str, int *ints);
+#endif CONFIG_MCD
+#ifdef CONFIG_MCDX
+extern void mcdx_setup(char *str, int *ints);
+#endif CONFIG_MCDX
+#ifdef CONFIG_SBPCD
+extern void sbpcd_setup(char *str, int *ints);
+#endif CONFIG_SBPCD
+#ifdef CONFIG_AZTCD
+extern void aztcd_setup(char *str, int *ints);
+#endif CONFIG_AZTCD
+#ifdef CONFIG_CDU535
+extern void sonycd535_setup(char *str, int *ints);
+#endif CONFIG_CDU535
+#ifdef CONFIG_GSCD
+extern void gscd_setup(char *str, int *ints);
+#endif CONFIG_GSCD
+#ifdef CONFIG_CM206
+extern void cm206_setup(char *str, int *ints);
+#endif CONFIG_CM206
+#ifdef CONFIG_OPTCD
+extern void optcd_setup(char *str, int *ints);
+#endif CONFIG_OPTCD
+#ifdef CONFIG_SJCD
+extern void sjcd_setup(char *str, int *ints);
+#endif CONFIG_SJCD
+#ifdef CONFIG_ISP16_CDI
+extern void isp16_setup(char *str, int *ints);
+#endif CONFIG_ISP16_CDI
+#ifdef CONFIG_BLK_DEV_RAM
+static void ramdisk_start_setup(char *str, int *ints);
+static void load_ramdisk(char *str, int *ints);
+static void prompt_ramdisk(char *str, int *ints);
+static void ramdisk_size(char *str, int *ints);
+#ifdef CONFIG_BLK_DEV_INITRD
+static void no_initrd(char *s,int *ints);
+#endif
+#endif CONFIG_BLK_DEV_RAM
+#ifdef CONFIG_ISDN_DRV_ICN
+extern void icn_setup(char *str, int *ints);
+#endif
+#ifdef CONFIG_ISDN_DRV_HISAX
+extern void HiSax_setup(char *str, int *ints);
+#endif
+#ifdef CONFIG_ISDN_DRV_PCBIT
+extern void pcbit_setup(char *str, int *ints);
+#endif
+
+#ifdef CONFIG_ATARIMOUSE
+extern void atari_mouse_setup (char *str, int *ints);
+#endif
+#ifdef CONFIG_DMASOUND
+extern void dmasound_setup (char *str, int *ints);
+#endif
+#ifdef CONFIG_ATARI_SCSI
+extern void atari_scsi_setup (char *str, int *ints);
+#endif
+extern void wd33c93_setup (char *str, int *ints);
+extern void gvp11_setup (char *str, int *ints);
+
+#ifdef CONFIG_CYCLADES
+extern void cy_setup(char *str, int *ints);
+#endif
+#ifdef CONFIG_DIGI
+extern void pcxx_setup(char *str, int *ints);
+#endif
+#ifdef CONFIG_RISCOM8
+extern void riscom8_setup(char *str, int *ints);
+#endif
+#ifdef CONFIG_SPECIALIX
+extern void specialix_setup(char *str, int *ints);
+#endif
+#ifdef CONFIG_BAYCOM
+extern void baycom_setup(char *str, int *ints);
+#endif
+
+#ifdef CONFIG_PARIDE_PD
+extern void pd_setup(char *str, int *ints);
+#endif
+#ifdef CONFIG_PARIDE_PF
+extern void pf_setup(char *str, int *ints);
+#endif
+#ifdef CONFIG_PARIDE_PT
+extern void pt_setup(char *str, int *ints);
+#endif
+#ifdef CONFIG_PARIDE_PG
+extern void pg_setup(char *str, int *ints);
+#endif
+#ifdef CONFIG_PARIDE_PCD
+extern void pcd_setup(char *str, int *ints);
+#endif
+
+#if defined(CONFIG_SYSVIPC) || defined(CONFIG_KERNELD)
+extern void ipc_init(void);
+#endif
+
+/*
+ * Boot command-line arguments
+ */
+#define MAX_INIT_ARGS 8
+#define MAX_INIT_ENVS 8
+
+extern void time_init(void);
+
+static unsigned long memory_start = 0;
+static unsigned long memory_end = 0;
+
+int rows, cols;
+
+#ifdef CONFIG_BLK_DEV_RAM
+extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */
+extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */
+extern int rd_size; /* Size of the ramdisk(s) */
+extern int rd_image_start; /* starting block # of image */
+#ifdef CONFIG_BLK_DEV_INITRD
+kdev_t real_root_dev;
+#endif
+#endif
+
+int root_mountflags = MS_RDONLY;
+char *execute_command = 0;
+
+#ifdef CONFIG_ROOT_NFS
+char nfs_root_name[NFS_ROOT_NAME_LEN] = { "default" };
+char nfs_root_addrs[NFS_ROOT_ADDRS_LEN] = { "" };
+#endif
+
+extern void dquot_init(void);
+
+static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
+static char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
+
+static char * argv_rc[] = { "/bin/sh", NULL };
+static char * envp_rc[] = { "HOME=/", "TERM=linux", NULL };
+
+static char * argv[] = { "-/bin/sh",NULL };
+static char * envp[] = { "HOME=/usr/root", "TERM=linux", NULL };
+
+char *get_options(char *str, int *ints)
+{
+ char *cur = str;
+ int i=1;
+
+ while (cur && isdigit(*cur) && i <= 10) {
+ ints[i++] = simple_strtoul(cur,NULL,0);
+ if ((cur = strchr(cur,',')) != NULL)
+ cur++;
+ }
+ ints[0] = i-1;
+ return(cur);
+}
+
+static void profile_setup(char *str, int *ints)
+{
+ if (ints[0] > 0)
+ prof_shift = (unsigned long) ints[1];
+ else
+#ifdef CONFIG_PROFILE_SHIFT
+ prof_shift = CONFIG_PROFILE_SHIFT;
+#else
+ prof_shift = 2;
+#endif
+}
+
+struct kernel_param {
+ const char *str;
+ void (*setup_func)(char *, int *);
+} ;
+
+struct kernel_param bootsetups[] = {
+ { "reserve=", reserve_setup },
+ { "profile=", profile_setup },
+#ifdef CONFIG_BLK_DEV_RAM
+ { "ramdisk_start=", ramdisk_start_setup },
+ { "load_ramdisk=", load_ramdisk },
+ { "prompt_ramdisk=", prompt_ramdisk },
+ { "ramdisk=", ramdisk_size },
+ { "ramdisk_size=", ramdisk_size },
+#ifdef CONFIG_BLK_DEV_INITRD
+ { "noinitrd", no_initrd },
+#endif
+#endif
+ { "swap=", swap_setup },
+ { "buff=", buff_setup },
+ { "panic=", panic_setup },
+ { "no-scroll", no_scroll },
+#ifdef CONFIG_BUGi386
+ { "no-hlt", no_halt },
+ { "no387", no_387 },
+ { "reboot=", reboot_setup },
+#endif
+#ifdef CONFIG_INET
+ { "ether=", eth_setup },
+#endif
+#ifdef CONFIG_PRINTER
+ { "lp=", lp_setup },
+#endif
+#ifdef CONFIG_SCSI
+ { "max_scsi_luns=", scsi_luns_setup },
+#endif
+#ifdef CONFIG_SCSI_ADVANSYS
+ { "advansys=", advansys_setup },
+#endif
+#if defined(CONFIG_BLK_DEV_HD)
+ { "hd=", hd_setup },
+#endif
+#ifdef CONFIG_CHR_DEV_ST
+ { "st=", st_setup },
+#endif
+#ifdef CONFIG_BUSMOUSE
+ { "bmouse=", bmouse_setup },
+#endif
+#ifdef CONFIG_MS_BUSMOUSE
+ { "msmouse=", msmouse_setup },
+#endif
+#ifdef CONFIG_SCSI_SEAGATE
+ { "st0x=", st0x_setup },
+ { "tmc8xx=", tmc8xx_setup },
+#endif
+#ifdef CONFIG_SCSI_T128
+ { "t128=", t128_setup },
+#endif
+#ifdef CONFIG_SCSI_PAS16
+ { "pas16=", pas16_setup },
+#endif
+#ifdef CONFIG_SCSI_GENERIC_NCR5380
+ { "ncr5380=", generic_NCR5380_setup },
+ { "ncr53c400=", generic_NCR53C400_setup },
+#endif
+#ifdef CONFIG_SCSI_AHA152X
+ { "aha152x=", aha152x_setup},
+#endif
+#ifdef CONFIG_SCSI_AHA1542
+ { "aha1542=", aha1542_setup},
+#endif
+#ifdef CONFIG_SCSI_GDTH
+ { "gdth=", gdth_setup},
+#endif
+#ifdef CONFIG_SCSI_AIC7XXX
+ { "aic7xxx=", aic7xxx_setup},
+#endif
+#ifdef CONFIG_SCSI_BUSLOGIC
+ { "BusLogic=", BusLogic_Setup},
+#endif
+#ifdef CONFIG_SCSI_NCR53C8XX
+ { "ncr53c8xx=", ncr53c8xx_setup},
+#endif
+#ifdef CONFIG_SCSI_EATA
+ { "eata=", eata2x_setup},
+#endif
+#ifdef CONFIG_SCSI_U14_34F
+ { "u14-34f=", u14_34f_setup},
+#endif
+#ifdef CONFIG_SCSI_AM53C974
+ { "AM53C974=", AM53C974_setup},
+#endif
+#ifdef CONFIG_SCSI_NCR53C406A
+ { "ncr53c406a=", NCR53c406a_setup},
+#endif
+#ifdef CONFIG_SCSI_FUTURE_DOMAIN
+ { "fdomain=", fdomain_setup},
+#endif
+#ifdef CONFIG_SCSI_IN2000
+ { "in2000=", in2000_setup},
+#endif
+#ifdef CONFIG_SCSI_7000FASST
+ { "wd7000=", wd7000_setup},
+#endif
+#ifdef CONFIG_SCSI_PPA
+ { "ppa=", ppa_setup },
+#endif
+#ifdef CONFIG_BLK_DEV_XD
+ { "xd=", xd_setup },
+ { "xd_geo=", xd_manual_geo_init },
+#endif
+#ifdef CONFIG_BLK_DEV_FD
+ { "floppy=", floppy_setup },
+#endif
+#ifdef CONFIG_CDU31A
+ { "cdu31a=", cdu31a_setup },
+#endif CONFIG_CDU31A
+#ifdef CONFIG_MCD
+ { "mcd=", mcd_setup },
+#endif CONFIG_MCD
+#ifdef CONFIG_MCDX
+ { "mcdx=", mcdx_setup },
+#endif CONFIG_MCDX
+#ifdef CONFIG_SBPCD
+ { "sbpcd=", sbpcd_setup },
+#endif CONFIG_SBPCD
+#ifdef CONFIG_AZTCD
+ { "aztcd=", aztcd_setup },
+#endif CONFIG_AZTCD
+#ifdef CONFIG_CDU535
+ { "sonycd535=", sonycd535_setup },
+#endif CONFIG_CDU535
+#ifdef CONFIG_GSCD
+ { "gscd=", gscd_setup },
+#endif CONFIG_GSCD
+#ifdef CONFIG_CM206
+ { "cm206=", cm206_setup },
+#endif CONFIG_CM206
+#ifdef CONFIG_OPTCD
+ { "optcd=", optcd_setup },
+#endif CONFIG_OPTCD
+#ifdef CONFIG_SJCD
+ { "sjcd=", sjcd_setup },
+#endif CONFIG_SJCD
+#ifdef CONFIG_ISP16_CDI
+ { "isp16=", isp16_setup },
+#endif CONFIG_ISP16_CDI
+#ifdef CONFIG_SOUND
+ { "sound=", sound_setup },
+#endif
+#ifdef CONFIG_ISDN_DRV_ICN
+ { "icn=", icn_setup },
+#endif
+#ifdef CONFIG_ISDN_DRV_HISAX
+ { "hisax=", HiSax_setup },
+ { "HiSax=", HiSax_setup },
+#endif
+#ifdef CONFIG_ISDN_DRV_PCBIT
+ { "pcbit=", pcbit_setup },
+#endif
+#ifdef CONFIG_ATARIMOUSE
+ { "atamouse=", atari_mouse_setup },
+#endif
+#ifdef CONFIG_DMASOUND
+ { "dmasound=", dmasound_setup },
+#endif
+#ifdef CONFIG_ATARI_SCSI
+ { "atascsi=", atari_scsi_setup },
+#endif
+#if defined(CONFIG_A3000_SCSI) || defined(CONFIG_A2091_SCSI) \
+ || defined(CONFIG_GVP11_SCSI)
+ { "wd33c93=", wd33c93_setup },
+#endif
+#if defined(CONFIG_GVP11_SCSI)
+ { "gvp11=", gvp11_setup },
+#endif
+#ifdef CONFIG_CYCLADES
+ { "cyclades=", cy_setup },
+#endif
+#ifdef CONFIG_DIGI
+ { "digi=", pcxx_setup },
+#endif
+#ifdef CONFIG_RISCOM8
+ { "riscom8=", riscom8_setup },
+#endif
+#ifdef CONFIG_SPECIALIX
+ { "specialix=", specialix_setup },
+#endif
+#ifdef CONFIG_BAYCOM
+ { "baycom=", baycom_setup },
+#endif
+#ifdef CONFIG_APM
+ { "apm=", apm_setup },
+#endif
+ { 0, 0 }
+};
+
+static struct kernel_param raw_params[] = {
+
+#ifdef CONFIG_PARIDE_PD
+ { "pd.", pd_setup },
+#endif
+#ifdef CONFIG_PARIDE_PCD
+ { "pcd.", pcd_setup },
+#endif
+#ifdef CONFIG_PARIDE_PF
+ { "pf.", pf_setup },
+#endif
+#ifdef CONFIG_PARIDE_PT
+ { "pt.", pt_setup },
+#endif
+#ifdef CONFIG_PARIDE_PG
+ { "pg.", pg_setup },
+#endif
+ { 0, 0 }
+} ;
+
+
+#ifdef CONFIG_BLK_DEV_RAM
+static void ramdisk_start_setup(char *str, int *ints)
+{
+ if (ints[0] > 0 && ints[1] >= 0)
+ rd_image_start = ints[1];
+}
+
+static void load_ramdisk(char *str, int *ints)
+{
+ if (ints[0] > 0 && ints[1] >= 0)
+ rd_doload = ints[1] & 3;
+}
+
+static void prompt_ramdisk(char *str, int *ints)
+{
+ if (ints[0] > 0 && ints[1] >= 0)
+ rd_prompt = ints[1] & 1;
+}
+
+static void ramdisk_size(char *str, int *ints)
+{
+ if (ints[0] > 0 && ints[1] >= 0)
+ rd_size = ints[1];
+}
+
+#endif
+
+static int checksetup(char *line)
+{
+ int i = 0;
+ int ints[11];
+
+#ifdef CONFIG_BLK_DEV_IDE
+ /* ide driver needs the basic string, rather than pre-processed values */
+ if (!strncmp(line,"ide",3) || (!strncmp(line,"hd",2) && line[2] != '=')) {
+ ide_setup(line);
+ return 1;
+ }
+#endif
+ while (bootsetups[i].str) {
+ int n = strlen(bootsetups[i].str);
+ if (!strncmp(line,bootsetups[i].str,n)) {
+ bootsetups[i].setup_func(get_options(line+n,ints), ints);
+ return 1;
+ }
+ i++;
+ }
+
+ for (i=0; raw_params[i].str; i++) {
+ int n = strlen(raw_params[i].str);
+ if (!strncmp(line,raw_params[i].str,n)) {
+ raw_params[i].setup_func(line+n, NULL);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/* this should be approx 2 Bo*oMips to start (note initial shift), and will
+ still work even if initially too large, it will just take slightly longer */
+unsigned long loops_per_sec = (1<<12);
+
+#if defined(__SMP__) && defined(__i386__)
+unsigned long smp_loops_per_tick = 1000000;
+#endif
+
+/* This is the number of bits of precision for the loops_per_second. Each
+ bit takes on average 1.5/HZ seconds. This (like the original) is a little
+ better than 1% */
+#define LPS_PREC 8
+
+void calibrate_delay(void)
+{
+ int ticks;
+ int loopbit;
+ int lps_precision = LPS_PREC;
+
+ loops_per_sec = (1<<12);
+
+ printk("Calibrating delay loop.. ");
+ while (loops_per_sec <<= 1) {
+ /* wait for "start of" clock tick */
+ ticks = jiffies;
+ while (ticks == jiffies)
+ /* nothing */;
+ /* Go .. */
+ ticks = jiffies;
+ __delay(loops_per_sec);
+ ticks = jiffies - ticks;
+ if (ticks)
+ break;
+ }
+
+/* Do a binary approximation to get loops_per_second set to equal one clock
+ (up to lps_precision bits) */
+ loops_per_sec >>= 1;
+ loopbit = loops_per_sec;
+ while ( lps_precision-- && (loopbit >>= 1) ) {
+ loops_per_sec |= loopbit;
+ ticks = jiffies;
+ while (ticks == jiffies);
+ ticks = jiffies;
+ __delay(loops_per_sec);
+ if (jiffies != ticks) /* longer than 1 tick */
+ loops_per_sec &= ~loopbit;
+ }
+
+/* finally, adjust loops per second in terms of seconds instead of clocks */
+ loops_per_sec *= HZ;
+/* Round the value and print it */
+ printk("ok - %lu.%02lu BogoMIPS\n",
+ (loops_per_sec+2500)/500000,
+ ((loops_per_sec+2500)/5000) % 100);
+
+#if defined(__SMP__) && defined(__i386__)
+ smp_loops_per_tick = loops_per_sec / 400;
+#endif
+}
+
+static void parse_root_dev(char * line)
+{
+ int base = 0;
+ static struct dev_name_struct {
+ const char *name;
+ const int num;
+ } devices[] = {
+ { "nfs", 0x00ff },
+ { "loop", 0x0700 },
+ { "hda", 0x0300 },
+ { "hdb", 0x0340 },
+ { "hdc", 0x1600 },
+ { "hdd", 0x1640 },
+ { "hde", 0x2100 },
+ { "hdf", 0x2140 },
+ { "hdg", 0x2200 },
+ { "hdh", 0x2240 },
+ { "sda", 0x0800 },
+ { "sdb", 0x0810 },
+ { "sdc", 0x0820 },
+ { "sdd", 0x0830 },
+ { "sde", 0x0840 },
+ { "sdf", 0x0850 },
+ { "sdg", 0x0860 },
+ { "sdh", 0x0870 },
+ { "sdi", 0x0880 },
+ { "sdj", 0x0890 },
+ { "sdk", 0x08a0 },
+ { "sdl", 0x08b0 },
+ { "sdm", 0x08c0 },
+ { "sdn", 0x08d0 },
+ { "sdo", 0x08e0 },
+ { "sdp", 0x08f0 },
+ { "fd", 0x0200 },
+ { "xda", 0x0d00 },
+ { "xdb", 0x0d40 },
+ { "ram", 0x0100 },
+ { "scd", 0x0b00 },
+ { "mcd", 0x1700 },
+ { "cdu535", 0x1800 },
+ { "aztcd", 0x1d00 },
+ { "cm206cd", 0x2000 },
+ { "gscd", 0x1000 },
+ { "sbpcd", 0x1900 },
+ { "sonycd", 0x1800 },
+#ifdef CONFIG_PARIDE_PD
+ { "pda", 0x2d00 },
+ { "pdb", 0x2d10 },
+ { "pdc", 0x2d20 },
+ { "pdd", 0x2d30 },
+#endif
+#ifdef CONFIG_PARIDE_PCD
+ { "pcd", 0x2e00 },
+#endif
+#ifdef CONFIG_PARIDE_PF
+ { "pf", 0x2f00 },
+#endif
+ { NULL, 0 }
+ };
+
+ if (strncmp(line,"/dev/",5) == 0) {
+ struct dev_name_struct *dev = devices;
+ line += 5;
+ do {
+ int len = strlen(dev->name);
+ if (strncmp(line,dev->name,len) == 0) {
+ line += len;
+ base = dev->num;
+ break;
+ }
+ dev++;
+ } while (dev->name);
+ }
+ ROOT_DEV = to_kdev_t(base + simple_strtoul(line,NULL,base?10:16));
+}
+
+/*
+ * This is a simple kernel command line parsing function: it parses
+ * the command line, and fills in the arguments/environment to init
+ * as appropriate. Any cmd-line option is taken to be an environment
+ * variable if it contains the character '='.
+ *
+ *
+ * This routine also checks for options meant for the kernel.
+ * These options are not given to init - they are for internal kernel use only.
+ */
+static void parse_options(char *line)
+{
+ char *next;
+ int args, envs;
+
+ if (!*line)
+ return;
+ args = 0;
+ envs = 1; /* TERM is set to 'linux' by default */
+ next = line;
+ while ((line = next) != NULL) {
+ if ((next = strchr(line,' ')) != NULL)
+ *next++ = 0;
+ /*
+ * check for kernel options first..
+ */
+ if (!strncmp(line,"root=",5)) {
+ parse_root_dev(line+5);
+ continue;
+ }
+#ifdef CONFIG_ROOT_NFS
+ if (!strncmp(line, "nfsroot=", 8)) {
+ int n;
+ line += 8;
+ ROOT_DEV = MKDEV(UNNAMED_MAJOR, 255);
+ if (line[0] == '/' || line[0] == ',' || (line[0] >= '0' && line[0] <= '9')) {
+ strncpy(nfs_root_name, line, sizeof(nfs_root_name));
+ nfs_root_name[sizeof(nfs_root_name)-1] = '\0';
+ continue;
+ }
+ n = strlen(line) + strlen(NFS_ROOT);
+ if (n >= sizeof(nfs_root_name))
+ line[sizeof(nfs_root_name) - strlen(NFS_ROOT) - 1] = '\0';
+ sprintf(nfs_root_name, NFS_ROOT, line);
+ continue;
+ }
+ if (!strncmp(line, "nfsaddrs=", 9)) {
+ line += 9;
+ strncpy(nfs_root_addrs, line, sizeof(nfs_root_addrs));
+ nfs_root_addrs[sizeof(nfs_root_addrs)-1] = '\0';
+ continue;
+ }
+#endif
+ if (!strcmp(line,"ro")) {
+ root_mountflags |= MS_RDONLY;
+ continue;
+ }
+ if (!strcmp(line,"rw")) {
+ root_mountflags &= ~MS_RDONLY;
+ continue;
+ }
+ if (!strcmp(line,"debug")) {
+ console_loglevel = 10;
+ continue;
+ }
+ if (!strncmp(line,"init=",5)) {
+ line += 5;
+ execute_command = line;
+ continue;
+ }
+ if (checksetup(line))
+ continue;
+ /*
+ * Then check if it's an environment variable or
+ * an option.
+ */
+ if (strchr(line,'=')) {
+ if (envs >= MAX_INIT_ENVS)
+ break;
+ envp_init[++envs] = line;
+ } else {
+ if (args >= MAX_INIT_ARGS)
+ break;
+ argv_init[++args] = line;
+ }
+ }
+ argv_init[args+1] = NULL;
+ envp_init[envs+1] = NULL;
+}
+
+
+extern void setup_arch(char **, unsigned long *, unsigned long *);
+extern void arch_syms_export(void);
+
+#ifndef __SMP__
+
+/*
+ * Uniprocessor idle thread
+ */
+
+int cpu_idle(void *unused)
+{
+ for(;;)
+ idle();
+}
+
+#else
+
+/*
+ * Multiprocessor idle thread is in arch/...
+ */
+
+extern int cpu_idle(void * unused);
+
+/*
+ * Activate a secondary processor.
+ */
+
+asmlinkage void start_secondary(void)
+{
+ trap_init();
+ init_IRQ();
+ smp_callin();
+ cpu_idle(NULL);
+}
+
+
+
+/*
+ * Called by CPU#0 to activate the rest.
+ */
+
+static void smp_init(void)
+{
+ int i, j;
+ smp_boot_cpus();
+
+ /*
+ * Create the slave init tasks as sharing pid 0.
+ *
+ * This should only happen if we have virtual CPU numbers
+ * higher than 0.
+ */
+
+ for (i=1; i<smp_num_cpus; i++)
+ {
+ struct task_struct *n, *p;
+
+ j = cpu_logical_map[i];
+ /*
+ * We use kernel_thread for the idlers which are
+ * unlocked tasks running in kernel space.
+ */
+ kernel_thread(cpu_idle, NULL, CLONE_PID);
+ /*
+ * Don't assume linear processor numbering
+ */
+ current_set[j]=task[i];
+ current_set[j]->processor=j;
+ cli();
+ n = task[i]->next_run;
+ p = task[i]->prev_run;
+ nr_running--;
+ n->prev_run = p;
+ p->next_run = n;
+ task[i]->next_run = task[i]->prev_run = task[i];
+ sti();
+ }
+}
+
+/*
+ * The autoprobe routines assume CPU#0 on the i386
+ * so we don't actually set the game in motion until
+ * they are finished.
+ */
+
+static void smp_begin(void)
+{
+ smp_threads_ready=1;
+ smp_commence();
+}
+
+#endif
+
+/*
+ * Activate the first processor.
+ */
+
+asmlinkage void start_kernel(void)
+{
+ char * command_line;
+
+/*
+ * This little check will move.
+ */
+
+#ifdef __SMP__
+ static int first_cpu=1;
+
+ if(!first_cpu)
+ start_secondary();
+ first_cpu=0;
+
+#endif
+/*
+ * Interrupts are still disabled. Do necessary setups, then
+ * enable them
+ */
+ setup_arch(&command_line, &memory_start, &memory_end);
+ memory_start = paging_init(memory_start,memory_end);
+ trap_init();
+ init_IRQ();
+ sched_init();
+ time_init();
+ parse_options(command_line);
+#ifdef CONFIG_MODULES
+ init_modules();
+#endif
+#ifdef CONFIG_PROFILE
+ if (!prof_shift)
+#ifdef CONFIG_PROFILE_SHIFT
+ prof_shift = CONFIG_PROFILE_SHIFT;
+#else
+ prof_shift = 2;
+#endif
+#endif
+ if (prof_shift) {
+ prof_buffer = (unsigned int *) memory_start;
+ /* only text is profiled */
+ prof_len = (unsigned long) &_etext - (unsigned long) &_stext;
+ prof_len >>= prof_shift;
+ memory_start += prof_len * sizeof(unsigned int);
+ memset(prof_buffer, 0, prof_len * sizeof(unsigned int));
+ }
+ memory_start = console_init(memory_start,memory_end);
+#ifdef CONFIG_PCI
+ memory_start = pci_init(memory_start,memory_end);
+#endif
+ memory_start = kmalloc_init(memory_start,memory_end);
+ sti();
+ calibrate_delay();
+ memory_start = inode_init(memory_start,memory_end);
+ memory_start = file_table_init(memory_start,memory_end);
+ memory_start = name_cache_init(memory_start,memory_end);
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (initrd_start && initrd_start < memory_start) {
+ printk(KERN_CRIT "initrd overwritten (0x%08lx < 0x%08lx) - "
+ "disabling it.\n",initrd_start,memory_start);
+ initrd_start = 0;
+ }
+#endif
+ mem_init(memory_start,memory_end);
+ buffer_init();
+ sock_init();
+#if defined(CONFIG_SYSVIPC) || defined(CONFIG_KERNELD)
+ ipc_init();
+#endif
+ dquot_init();
+ arch_syms_export();
+ sti();
+ check_bugs();
+
+#if defined(CONFIG_MTRR) && defined(__SMP__)
+ init_mtrr_config();
+#endif
+
+
+ printk(linux_banner);
+#ifdef __SMP__
+ smp_init();
+#endif
+ sysctl_init();
+ /*
+ * We count on the initial thread going ok
+ * Like idlers init is an unlocked kernel thread, which will
+ * make syscalls (and thus be locked).
+ */
+ kernel_thread(init, NULL, 0);
+/*
+ * task[0] is meant to be used as an "idle" task: it may not sleep, but
+ * it might do some general things like count free pages or it could be
+ * used to implement a reasonable LRU algorithm for the paging routines:
+ * anything that can be useful, but shouldn't take time from the real
+ * processes.
+ *
+ * Right now task[0] just does a infinite idle loop.
+ */
+ cpu_idle(NULL);
+}
+
+static int printf(const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ write(1,printbuf,i=vsprintf(printbuf, fmt, args));
+ va_end(args);
+ return i;
+}
+
+static int do_rc(void * rc)
+{
+ close(0);
+ if (open(rc,O_RDONLY,0))
+ return -1;
+ return execve("/bin/sh", argv_rc, envp_rc);
+}
+
+static int do_shell(void * shell)
+{
+ close(0);close(1);close(2);
+ setsid();
+ (void) open("/dev/tty1",O_RDWR,0);
+ (void) dup(0);
+ (void) dup(0);
+ return execve(shell, argv, envp);
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+static int do_linuxrc(void * shell)
+{
+ static char *argv[] = { "linuxrc", NULL, };
+
+ close(0);close(1);close(2);
+ setsid();
+ (void) open("/dev/tty1",O_RDWR,0);
+ (void) dup(0);
+ (void) dup(0);
+ return execve(shell, argv, envp_init);
+}
+
+static void no_initrd(char *s,int *ints)
+{
+ mount_initrd = 0;
+}
+#endif
+
+static int init(void * unused)
+{
+ int pid,i;
+#ifdef CONFIG_BLK_DEV_INITRD
+ int real_root_mountflags;
+#endif
+
+ /* Launch bdflush from here, instead of the old syscall way. */
+ kernel_thread(bdflush, NULL, 0);
+ /* Start the background pageout daemon. */
+ kswapd_setup();
+ kernel_thread(kswapd, NULL, 0);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ real_root_dev = ROOT_DEV;
+ real_root_mountflags = root_mountflags;
+ if (initrd_start && mount_initrd) root_mountflags &= ~MS_RDONLY;
+ else mount_initrd =0;
+#endif
+ setup();
+
+#ifdef __SMP__
+ /*
+ * With the devices probed and setup we can
+ * now enter SMP mode.
+ */
+
+ smp_begin();
+#endif
+
+ #ifdef CONFIG_UMSDOS_FS
+ {
+ /*
+ When mounting a umsdos fs as root, we detect
+ the pseudo_root (/linux) and initialise it here.
+ pseudo_root is defined in fs/umsdos/inode.c
+ */
+ extern struct inode *pseudo_root;
+ if (pseudo_root != NULL){
+ current->fs->root = pseudo_root;
+ current->fs->pwd = pseudo_root;
+ }
+ }
+ #endif
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ root_mountflags = real_root_mountflags;
+ if (mount_initrd && ROOT_DEV != real_root_dev && ROOT_DEV == MKDEV(RAMDISK_MAJOR,0)) {
+ int error;
+
+ pid = kernel_thread(do_linuxrc, "/linuxrc", SIGCHLD);
+ if (pid>0)
+ while (pid != wait(&i));
+ if (real_root_dev != MKDEV(RAMDISK_MAJOR, 0)) {
+ error = change_root(real_root_dev,"/initrd");
+ if (error)
+ printk(KERN_ERR "Change root to /initrd: "
+ "error %d\n",error);
+ }
+ }
+#endif
+
+ /*
+ * This keeps serial console MUCH cleaner, but does assume
+ * the console driver checks there really is a video device
+ * attached (Sparc effectively does).
+ */
+
+ if ((open("/dev/tty1",O_RDWR,0) < 0) &&
+ (open("/dev/ttyS0",O_RDWR,0) < 0))
+ printk("Unable to open an initial console.\n");
+
+ (void) dup(0);
+ (void) dup(0);
+
+ if (!execute_command) {
+ execve("/etc/init",argv_init,envp_init);
+ execve("/bin/init",argv_init,envp_init);
+ execve("/sbin/init",argv_init,envp_init);
+ /* if this fails, fall through to original stuff */
+
+ pid = kernel_thread(do_rc, "/etc/rc", SIGCHLD);
+ if (pid>0)
+ while (pid != wait(&i))
+ /* nothing */;
+ }
+
+ while (1) {
+ pid = kernel_thread(do_shell,
+ execute_command ? execute_command : "/bin/sh",
+ SIGCHLD);
+ if (pid < 0) {
+ printf("Fork failed in init\n\r");
+ continue;
+ }
+ while (1)
+ if (pid == wait(&i))
+ break;
+ printf("\n\rchild %d died with code %04x\n\r",pid,i);
+ sync();
+ }
+ return -1;
+}
diff --git a/linux/src/init/version.c b/linux/src/init/version.c
new file mode 100644
index 0000000..0196fdf
--- /dev/null
+++ b/linux/src/init/version.c
@@ -0,0 +1,30 @@
+/*
+ * linux/version.c
+ *
+ * Copyright (C) 1992 Theodore Ts'o
+ *
+ * May be freely distributed as part of Linux.
+ */
+
+#include <linux/config.h>
+#include <linux/utsname.h>
+#include <linux/version.h>
+#include <linux/compile.h>
+
+/* make the "checkconfig" script happy: we really need to include config.h */
+#ifdef CONFIG_BOGUS
+#endif
+
+#define version(a) Version_ ## a
+#define version_string(a) version(a)
+
+int version_string(LINUX_VERSION_CODE) = 0;
+
+struct new_utsname system_utsname = {
+ UTS_SYSNAME, UTS_NODENAME, UTS_RELEASE, UTS_VERSION,
+ UTS_MACHINE, UTS_DOMAINNAME
+};
+
+const char *linux_banner =
+ "Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@"
+ LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION "\n";
diff --git a/linux/src/kernel/dma.c b/linux/src/kernel/dma.c
new file mode 100644
index 0000000..724d8ca
--- /dev/null
+++ b/linux/src/kernel/dma.c
@@ -0,0 +1,99 @@
+/* $Id: dma.c,v 1.1 1999/04/26 05:58:29 tb Exp $
+ * linux/kernel/dma.c: A DMA channel allocator. Inspired by linux/kernel/irq.c.
+ *
+ * Written by Hennus Bergman, 1992.
+ *
+ * 1994/12/26: Changes by Alex Nash to fix a minor bug in /proc/dma.
+ * In the previous version the reported device could end up being wrong,
+ * if a device requested a DMA channel that was already in use.
+ * [It also happened to remove the sizeof(char *) == sizeof(int)
+ * assumption introduced because of those /proc/dma patches. -- Hennus]
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <asm/dma.h>
+#include <asm/system.h>
+
+
+/* A note on resource allocation:
+ *
+ * All drivers needing DMA channels, should allocate and release them
+ * through the public routines `request_dma()' and `free_dma()'.
+ *
+ * In order to avoid problems, all processes should allocate resources in
+ * the same sequence and release them in the reverse order.
+ *
+ * So, when allocating DMAs and IRQs, first allocate the IRQ, then the DMA.
+ * When releasing them, first release the DMA, then release the IRQ.
+ * If you don't, you may cause allocation requests to fail unnecessarily.
+ * This doesn't really matter now, but it will once we get real semaphores
+ * in the kernel.
+ */
+
+
+
+/* Channel n is busy iff dma_chan_busy[n].lock != 0.
+ * DMA0 used to be reserved for DRAM refresh, but apparently not any more...
+ * DMA4 is reserved for cascading.
+ */
+
+struct dma_chan {
+ int lock;
+ const char *device_id;
+};
+
+static struct dma_chan dma_chan_busy[MAX_DMA_CHANNELS] = {
+ { 0, 0 },
+ { 0, 0 },
+ { 0, 0 },
+ { 0, 0 },
+ { 1, "cascade" },
+ { 0, 0 },
+ { 0, 0 },
+ { 0, 0 }
+};
+
+int get_dma_list(char *buf)
+{
+ int i, len = 0;
+
+ for (i = 0 ; i < MAX_DMA_CHANNELS ; i++) {
+ if (dma_chan_busy[i].lock) {
+ len += sprintf(buf+len, "%2d: %s\n",
+ i,
+ dma_chan_busy[i].device_id);
+ }
+ }
+ return len;
+} /* get_dma_list */
+
+
+int request_dma(unsigned int dmanr, const char * device_id)
+{
+ if (dmanr >= MAX_DMA_CHANNELS)
+ return -EINVAL;
+
+ if (xchg(&dma_chan_busy[dmanr].lock, 1) != 0)
+ return -EBUSY;
+
+ dma_chan_busy[dmanr].device_id = device_id;
+
+ /* old flag was 0, now contains 1 to indicate busy */
+ return 0;
+} /* request_dma */
+
+
+void free_dma(unsigned int dmanr)
+{
+ if (dmanr >= MAX_DMA_CHANNELS) {
+ printk("Trying to free DMA%d\n", dmanr);
+ return;
+ }
+
+ if (xchg(&dma_chan_busy[dmanr].lock, 0) == 0) {
+ printk("Trying to free free DMA%d\n", dmanr);
+ return;
+ }
+
+} /* free_dma */
diff --git a/linux/src/kernel/printk.c b/linux/src/kernel/printk.c
new file mode 100644
index 0000000..da8ffca
--- /dev/null
+++ b/linux/src/kernel/printk.c
@@ -0,0 +1,253 @@
+/*
+ * linux/kernel/printk.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * Modified to make sys_syslog() more flexible: added commands to
+ * return the last 4k of kernel messages, regardless of whether
+ * they've been read or not. Added option to suppress kernel printk's
+ * to the console. Added hook for sending the console messages
+ * elsewhere, in preparation for a serial line console (someday).
+ * Ted Ts'o, 2/11/93.
+ */
+
+#include <stdarg.h>
+
+#include <asm/segment.h>
+#include <asm/system.h>
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+
+#define LOG_BUF_LEN 8192
+
+static char buf[1024];
+
+extern void console_print(const char *);
+
+/* printk's without a loglevel use this.. */
+#define DEFAULT_MESSAGE_LOGLEVEL 4 /* KERN_WARNING */
+
+/* We show everything that is MORE important than this.. */
+#define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */
+#define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */
+
+unsigned long log_size = 0;
+struct wait_queue * log_wait = NULL;
+int console_loglevel = DEFAULT_CONSOLE_LOGLEVEL;
+
+static void (*console_print_proc)(const char *) = 0;
+static char log_buf[LOG_BUF_LEN];
+static unsigned long log_start = 0;
+static unsigned long logged_chars = 0;
+
+/*
+ * Commands to sys_syslog:
+ *
+ * 0 -- Close the log. Currently a NOP.
+ * 1 -- Open the log. Currently a NOP.
+ * 2 -- Read from the log.
+ * 3 -- Read up to the last 4k of messages in the ring buffer.
+ * 4 -- Read and clear last 4k of messages in the ring buffer
+ * 5 -- Clear ring buffer.
+ * 6 -- Disable printk's to console
+ * 7 -- Enable printk's to console
+ * 8 -- Set level of messages printed to console
+ */
+asmlinkage int sys_syslog(int type, char * buf, int len)
+{
+ unsigned long i, j, count;
+ int do_clear = 0;
+ char c;
+ int error;
+
+ if ((type != 3) && !suser())
+ return -EPERM;
+ switch (type) {
+ case 0: /* Close log */
+ return 0;
+ case 1: /* Open log */
+ return 0;
+ case 2: /* Read from log */
+ if (!buf || len < 0)
+ return -EINVAL;
+ if (!len)
+ return 0;
+ error = verify_area(VERIFY_WRITE,buf,len);
+ if (error)
+ return error;
+ cli();
+ while (!log_size) {
+ if (current->signal & ~current->blocked) {
+ sti();
+ return -ERESTARTSYS;
+ }
+ interruptible_sleep_on(&log_wait);
+ }
+ i = 0;
+ while (log_size && i < len) {
+ c = *((char *) log_buf+log_start);
+ log_start++;
+ log_size--;
+ log_start &= LOG_BUF_LEN-1;
+ sti();
+ put_user(c,buf);
+ buf++;
+ i++;
+ cli();
+ }
+ sti();
+ return i;
+ case 4: /* Read/clear last kernel messages */
+ do_clear = 1;
+ /* FALL THRU */
+ case 3: /* Read last kernel messages */
+ if (!buf || len < 0)
+ return -EINVAL;
+ if (!len)
+ return 0;
+ error = verify_area(VERIFY_WRITE,buf,len);
+ if (error)
+ return error;
+ count = len;
+ if (count > LOG_BUF_LEN)
+ count = LOG_BUF_LEN;
+ if (count > logged_chars)
+ count = logged_chars;
+ j = log_start + log_size - count;
+ for (i = 0; i < count; i++) {
+ c = *((char *) log_buf+(j++ & (LOG_BUF_LEN-1)));
+ put_user(c, buf++);
+ }
+ if (do_clear)
+ logged_chars = 0;
+ return i;
+ case 5: /* Clear ring buffer */
+ logged_chars = 0;
+ return 0;
+ case 6: /* Disable logging to console */
+ console_loglevel = MINIMUM_CONSOLE_LOGLEVEL;
+ return 0;
+ case 7: /* Enable logging to console */
+ console_loglevel = DEFAULT_CONSOLE_LOGLEVEL;
+ return 0;
+ case 8:
+ if (len < 1 || len > 8)
+ return -EINVAL;
+ if (len < MINIMUM_CONSOLE_LOGLEVEL)
+ len = MINIMUM_CONSOLE_LOGLEVEL;
+ console_loglevel = len;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+
+asmlinkage int printk(const char *fmt, ...)
+{
+ va_list args;
+ int i;
+ char *msg, *p, *buf_end;
+ static char msg_level = -1;
+ long flags;
+
+ save_flags(flags);
+ cli();
+ va_start(args, fmt);
+ i = vsprintf(buf + 3, fmt, args); /* hopefully i < sizeof(buf)-4 */
+ buf_end = buf + 3 + i;
+ va_end(args);
+ for (p = buf + 3; p < buf_end; p++) {
+ msg = p;
+ if (msg_level < 0) {
+ if (
+ p[0] != '<' ||
+ p[1] < '0' ||
+ p[1] > '7' ||
+ p[2] != '>'
+ ) {
+ p -= 3;
+ p[0] = '<';
+ p[1] = DEFAULT_MESSAGE_LOGLEVEL + '0';
+ p[2] = '>';
+ } else
+ msg += 3;
+ msg_level = p[1] - '0';
+ }
+ for (; p < buf_end; p++) {
+ log_buf[(log_start+log_size) & (LOG_BUF_LEN-1)] = *p;
+ if (log_size < LOG_BUF_LEN)
+ log_size++;
+ else {
+ log_start++;
+ log_start &= LOG_BUF_LEN-1;
+ }
+ logged_chars++;
+ if (*p == '\n')
+ break;
+ }
+ if (msg_level < console_loglevel && console_print_proc) {
+ char tmp = p[1];
+ p[1] = '\0';
+ (*console_print_proc)(msg);
+ p[1] = tmp;
+ }
+ if (*p == '\n')
+ msg_level = -1;
+ }
+ restore_flags(flags);
+ wake_up_interruptible(&log_wait);
+ return i;
+}
+
+/*
+ * The console driver calls this routine during kernel initialization
+ * to register the console printing procedure with printk() and to
+ * print any messages that were printed by the kernel before the
+ * console driver was initialized.
+ */
+void register_console(void (*proc)(const char *))
+{
+ int i,j;
+ int p = log_start;
+ char buf[16];
+ char msg_level = -1;
+ char *q;
+
+ console_print_proc = proc;
+
+ for (i=0,j=0; i < log_size; i++) {
+ buf[j++] = log_buf[p];
+ p++; p &= LOG_BUF_LEN-1;
+ if (buf[j-1] != '\n' && i < log_size - 1 && j < sizeof(buf)-1)
+ continue;
+ buf[j] = 0;
+ q = buf;
+ if (msg_level < 0) {
+ msg_level = buf[1] - '0';
+ q = buf + 3;
+ }
+ if (msg_level < console_loglevel)
+ (*proc)(q);
+ if (buf[j-1] == '\n')
+ msg_level = -1;
+ j = 0;
+ }
+}
+
+/*
+ * Write a message to a certain tty, not just the console. This is used for
+ * messages that need to be redirected to a specific tty.
+ * We don't put it into the syslog queue right now maybe in the future if
+ * really needed.
+ */
+void tty_write_message(struct tty_struct *tty, char *msg)
+{
+ if (tty && tty->driver.write)
+ tty->driver.write(tty, 0, msg, strlen(msg));
+ return;
+}
diff --git a/linux/src/kernel/resource.c b/linux/src/kernel/resource.c
new file mode 100644
index 0000000..7d7a4ad
--- /dev/null
+++ b/linux/src/kernel/resource.c
@@ -0,0 +1,129 @@
+/*
+ * linux/kernel/resource.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ * David Hinds
+ *
+ * Kernel io-region resource management
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+
+#define IOTABLE_SIZE 128
+
+typedef struct resource_entry_t {
+ u_long from, num;
+ const char *name;
+ struct resource_entry_t *next;
+} resource_entry_t;
+
+static resource_entry_t iolist = { 0, 0, "", NULL };
+
+static resource_entry_t iotable[IOTABLE_SIZE];
+
+/*
+ * This generates the report for /proc/ioports
+ */
+int get_ioport_list(char *buf)
+{
+ resource_entry_t *p;
+ int len = 0;
+
+ for (p = iolist.next; (p) && (len < 4000); p = p->next)
+ len += sprintf(buf+len, "%04lx-%04lx : %s\n",
+ p->from, p->from+p->num-1, p->name);
+ if (p)
+ len += sprintf(buf+len, "4K limit reached!\n");
+ return len;
+}
+
+/*
+ * The workhorse function: find where to put a new entry
+ */
+static resource_entry_t *find_gap(resource_entry_t *root,
+ u_long from, u_long num)
+{
+ unsigned long flags;
+ resource_entry_t *p;
+
+ if (from > from+num-1)
+ return NULL;
+ save_flags(flags);
+ cli();
+ for (p = root; ; p = p->next) {
+ if ((p != root) && (p->from+p->num-1 >= from)) {
+ p = NULL;
+ break;
+ }
+ if ((p->next == NULL) || (p->next->from > from+num-1))
+ break;
+ }
+ restore_flags(flags);
+ return p;
+}
+
+/*
+ * Call this from the device driver to register the ioport region.
+ */
+void request_region(unsigned int from, unsigned int num, const char *name)
+{
+ resource_entry_t *p;
+ int i;
+
+ for (i = 0; i < IOTABLE_SIZE; i++)
+ if (iotable[i].num == 0)
+ break;
+ if (i == IOTABLE_SIZE)
+ printk("warning: ioport table is full\n");
+ else {
+ p = find_gap(&iolist, from, num);
+ if (p == NULL)
+ return;
+ iotable[i].name = name;
+ iotable[i].from = from;
+ iotable[i].num = num;
+ iotable[i].next = p->next;
+ p->next = &iotable[i];
+ return;
+ }
+}
+
+/*
+ * Call this when the device driver is unloaded
+ */
+void release_region(unsigned int from, unsigned int num)
+{
+ resource_entry_t *p, *q;
+
+ for (p = &iolist; ; p = q) {
+ q = p->next;
+ if (q == NULL)
+ break;
+ if ((q->from == from) && (q->num == num)) {
+ q->num = 0;
+ p->next = q->next;
+ return;
+ }
+ }
+}
+
+/*
+ * Call this to check the ioport region before probing
+ */
+int check_region(unsigned int from, unsigned int num)
+{
+ return (find_gap(&iolist, from, num) == NULL) ? -EBUSY : 0;
+}
+
+/* Called from init/main.c to reserve IO ports. */
+void reserve_setup(char *str, int *ints)
+{
+ int i;
+
+ for (i = 1; i < ints[0]; i += 2)
+ request_region(ints[i], ints[i+1], "reserved");
+}
diff --git a/linux/src/kernel/sched.c b/linux/src/kernel/sched.c
new file mode 100644
index 0000000..0904f59
--- /dev/null
+++ b/linux/src/kernel/sched.c
@@ -0,0 +1,1747 @@
+/*
+ * linux/kernel/sched.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * 1996-04-21 Modified by Ulrich Windl to make NTP work
+ * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
+ * make semaphores SMP safe
+ * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
+ * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
+ * "A Kernel Model for Precision Timekeeping" by Dave Mills
+ */
+
+/*
+ * 'sched.c' is the main kernel file. It contains scheduling primitives
+ * (sleep_on, wakeup, schedule etc) as well as a number of simple system
+ * call functions (type getpid()), which just extract a field from
+ * current-task
+ */
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
+#include <linux/fdreg.h>
+#include <linux/errno.h>
+#include <linux/time.h>
+#include <linux/ptrace.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/tqueue.h>
+#include <linux/resource.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/segment.h>
+#include <asm/pgtable.h>
+#include <asm/mmu_context.h>
+
+#include <linux/timex.h>
+
+/*
+ * kernel variables
+ */
+
+int securelevel = 0; /* system security level */
+
+long tick = (1000000 + HZ/2) / HZ; /* timer interrupt period */
+volatile struct timeval xtime; /* The current time */
+int tickadj = 500/HZ ? 500/HZ : 1; /* microsecs */
+
+DECLARE_TASK_QUEUE(tq_timer);
+DECLARE_TASK_QUEUE(tq_immediate);
+DECLARE_TASK_QUEUE(tq_scheduler);
+
+/*
+ * phase-lock loop variables
+ */
+/* TIME_ERROR prevents overwriting the CMOS clock */
+int time_state = TIME_ERROR; /* clock synchronization status */
+int time_status = STA_UNSYNC; /* clock status bits */
+long time_offset = 0; /* time adjustment (us) */
+long time_constant = 2; /* pll time constant */
+long time_tolerance = MAXFREQ; /* frequency tolerance (ppm) */
+long time_precision = 1; /* clock precision (us) */
+long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */
+long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */
+long time_phase = 0; /* phase offset (scaled us) */
+long time_freq = ((1000000 + HZ/2) % HZ - HZ/2) << SHIFT_USEC; /* frequency offset (scaled ppm) */
+long time_adj = 0; /* tick adjust (scaled 1 / HZ) */
+long time_reftime = 0; /* time at last adjustment (s) */
+
+long time_adjust = 0;
+long time_adjust_step = 0;
+
+int need_resched = 0;
+unsigned long event = 0;
+
+extern int _setitimer(int, struct itimerval *, struct itimerval *);
+unsigned int * prof_buffer = NULL;
+unsigned long prof_len = 0;
+unsigned long prof_shift = 0;
+
+#define _S(nr) (1<<((nr)-1))
+
+extern void mem_use(void);
+extern unsigned long get_wchan(struct task_struct *);
+
+static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
+unsigned long init_user_stack[1024] = { STACK_MAGIC, };
+static struct vm_area_struct init_mmap = INIT_MMAP;
+static struct fs_struct init_fs = INIT_FS;
+static struct files_struct init_files = INIT_FILES;
+static struct signal_struct init_signals = INIT_SIGNALS;
+
+struct mm_struct init_mm = INIT_MM;
+struct task_struct init_task = INIT_TASK;
+
+unsigned long volatile jiffies=0;
+
+struct task_struct *current_set[NR_CPUS];
+struct task_struct *last_task_used_math = NULL;
+
+struct task_struct * task[NR_TASKS] = {&init_task, };
+
+struct kernel_stat kstat = { 0 };
+
+static inline void add_to_runqueue(struct task_struct * p)
+{
+#ifdef __SMP__
+ int cpu=smp_processor_id();
+#endif
+#if 1 /* sanity tests */
+ if (p->next_run || p->prev_run) {
+ printk("task already on run-queue\n");
+ return;
+ }
+#endif
+ if (p->policy != SCHED_OTHER || p->counter > current->counter + 3)
+ need_resched = 1;
+ nr_running++;
+ (p->prev_run = init_task.prev_run)->next_run = p;
+ p->next_run = &init_task;
+ init_task.prev_run = p;
+#ifdef __SMP__
+ /* this is safe only if called with cli()*/
+ while(set_bit(31,&smp_process_available))
+ {
+ while(test_bit(31,&smp_process_available))
+ {
+ if(clear_bit(cpu,&smp_invalidate_needed))
+ {
+ local_flush_tlb();
+ set_bit(cpu,&cpu_callin_map[0]);
+ }
+ }
+ }
+ smp_process_available++;
+ clear_bit(31,&smp_process_available);
+ if ((0!=p->pid) && smp_threads_ready)
+ {
+ int i;
+ for (i=0;i<smp_num_cpus;i++)
+ {
+ if (0==current_set[cpu_logical_map[i]]->pid)
+ {
+ smp_message_pass(cpu_logical_map[i], MSG_RESCHEDULE, 0L, 0);
+ break;
+ }
+ }
+ }
+#endif
+}
+
+static inline void del_from_runqueue(struct task_struct * p)
+{
+ struct task_struct *next = p->next_run;
+ struct task_struct *prev = p->prev_run;
+
+#if 1 /* sanity tests */
+ if (!next || !prev) {
+ printk("task not on run-queue\n");
+ return;
+ }
+#endif
+ if (p == &init_task) {
+ static int nr = 0;
+ if (nr < 5) {
+ nr++;
+ printk("idle task may not sleep\n");
+ }
+ return;
+ }
+ nr_running--;
+ next->prev_run = prev;
+ prev->next_run = next;
+ p->next_run = NULL;
+ p->prev_run = NULL;
+}
+
+static inline void move_last_runqueue(struct task_struct * p)
+{
+ struct task_struct *next = p->next_run;
+ struct task_struct *prev = p->prev_run;
+
+ /* remove from list */
+ next->prev_run = prev;
+ prev->next_run = next;
+ /* add back to list */
+ p->next_run = &init_task;
+ prev = init_task.prev_run;
+ init_task.prev_run = p;
+ p->prev_run = prev;
+ prev->next_run = p;
+}
+
+/*
+ * Wake up a process. Put it on the run-queue if it's not
+ * already there. The "current" process is always on the
+ * run-queue (except when the actual re-schedule is in
+ * progress), and as such you're allowed to do the simpler
+ * "current->state = TASK_RUNNING" to mark yourself runnable
+ * without the overhead of this.
+ */
+inline void wake_up_process(struct task_struct * p)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ p->state = TASK_RUNNING;
+ if (!p->next_run)
+ add_to_runqueue(p);
+ restore_flags(flags);
+}
+
+static void process_timeout(unsigned long __data)
+{
+ struct task_struct * p = (struct task_struct *) __data;
+
+ p->timeout = 0;
+ wake_up_process(p);
+}
+
+/*
+ * This is the function that decides how desirable a process is..
+ * You can weigh different processes against each other depending
+ * on what CPU they've run on lately etc to try to handle cache
+ * and TLB miss penalties.
+ *
+ * Return values:
+ * -1000: never select this
+ * 0: out of time, recalculate counters (but it might still be
+ * selected)
+ * +ve: "goodness" value (the larger, the better)
+ * +1000: realtime process, select this.
+ */
+static inline int goodness(struct task_struct * p, struct task_struct * prev, int this_cpu)
+{
+ int weight;
+
+#ifdef __SMP__
+ /* We are not permitted to run a task someone else is running */
+ if (p->processor != NO_PROC_ID)
+ return -1000;
+#ifdef PAST_2_0
+ /* This process is locked to a processor group */
+ if (p->processor_mask && !(p->processor_mask & (1<<this_cpu))
+ return -1000;
+#endif
+#endif
+
+ /*
+ * Realtime process, select the first one on the
+ * runqueue (taking priorities within processes
+ * into account).
+ */
+ if (p->policy != SCHED_OTHER)
+ return 1000 + p->rt_priority;
+
+ /*
+ * Give the process a first-approximation goodness value
+ * according to the number of clock-ticks it has left.
+ *
+ * Don't do any other calculations if the time slice is
+ * over..
+ */
+ weight = p->counter;
+ if (weight) {
+
+#ifdef __SMP__
+ /* Give a largish advantage to the same processor... */
+ /* (this is equivalent to penalizing other processors) */
+ if (p->last_processor == this_cpu)
+ weight += PROC_CHANGE_PENALTY;
+#endif
+
+ /* .. and a slight advantage to the current process */
+ if (p == prev)
+ weight += 1;
+ }
+
+ return weight;
+}
+
+
+/*
+ The following allow_interrupts function is used to workaround a rare but
+ nasty deadlock situation that is possible for 2.0.x Intel SMP because it uses
+ a single kernel lock and interrupts are only routed to the boot CPU. There
+ are two deadlock scenarios this code protects against.
+
+ The first scenario is that if a CPU other than the boot CPU holds the kernel
+ lock and needs to wait for an operation to complete that itself requires an
+ interrupt, there is a deadlock since the boot CPU may be able to accept the
+ interrupt but will not be able to acquire the kernel lock to process it.
+
+ The workaround for this deadlock requires adding calls to allow_interrupts to
+ places where this deadlock is possible. These places are known to be present
+ in buffer.c and keyboard.c. It is also possible that there are other such
+ places which have not been identified yet. In order to break the deadlock,
+ the code in allow_interrupts temporarily yields the kernel lock directly to
+ the boot CPU to allow the interrupt to be processed. The boot CPU interrupt
+ entry code indicates that it is spinning waiting for the kernel lock by
+ setting the smp_blocked_interrupt_pending variable. This code notices that
+ and manipulates the active_kernel_processor variable to yield the kernel lock
+ without ever clearing it. When the interrupt has been processed, the
+ saved_active_kernel_processor variable contains the value for the interrupt
+ exit code to restore, either the APICID of the CPU that granted it the kernel
+ lock, or NO_PROC_ID in the normal case where no yielding occurred. Restoring
+ active_kernel_processor from saved_active_kernel_processor returns the kernel
+ lock back to the CPU that yielded it.
+
+ The second form of deadlock is even more insidious. Suppose the boot CPU
+ takes a page fault and then the previous scenario ensues. In this case, the
+ boot CPU would spin with interrupts disabled waiting to acquire the kernel
+ lock. To resolve this deadlock, the kernel lock acquisition code must enable
+ interrupts briefly so that the pending interrupt can be handled as in the
+ case above.
+
+ An additional form of deadlock is where kernel code running on a non-boot CPU
+ waits for the jiffies variable to be incremented. This deadlock is avoided
+ by having the spin loops in ENTER_KERNEL increment jiffies approximately
+ every 10 milliseconds. Finally, if approximately 60 seconds elapse waiting
+ for the kernel lock, a message will be printed if possible to indicate that a
+ deadlock has been detected.
+
+ Leonard N. Zubkoff
+ 4 August 1997
+*/
+
+#if defined(__SMP__) && defined(__i386__)
+
+volatile unsigned char smp_blocked_interrupt_pending = 0;
+
+volatile unsigned char saved_active_kernel_processor = NO_PROC_ID;
+
+void allow_interrupts(void)
+{
+ if (smp_processor_id() == boot_cpu_id) return;
+ if (smp_blocked_interrupt_pending)
+ {
+ unsigned long saved_kernel_counter;
+ long timeout_counter;
+ saved_active_kernel_processor = active_kernel_processor;
+ saved_kernel_counter = kernel_counter;
+ kernel_counter = 0;
+ active_kernel_processor = boot_cpu_id;
+ timeout_counter = 6000000;
+ while (active_kernel_processor != saved_active_kernel_processor &&
+ --timeout_counter >= 0)
+ {
+ udelay(10);
+ barrier();
+ }
+ if (timeout_counter < 0)
+ panic("FORWARDED INTERRUPT TIMEOUT (AKP = %d, Saved AKP = %d)\n",
+ active_kernel_processor, saved_active_kernel_processor);
+ kernel_counter = saved_kernel_counter;
+ saved_active_kernel_processor = NO_PROC_ID;
+ }
+}
+
+#else
+
+void allow_interrupts(void) {}
+
+#endif
+
+
+/*
+ * 'schedule()' is the scheduler function. It's a very simple and nice
+ * scheduler: it's not perfect, but certainly works for most things.
+ *
+ * The goto is "interesting".
+ *
+ * NOTE!! Task 0 is the 'idle' task, which gets called when no other
+ * tasks can run. It can not be killed, and it cannot sleep. The 'state'
+ * information in task[0] is never used.
+ */
+asmlinkage void schedule(void)
+{
+ int c;
+ struct task_struct * p;
+ struct task_struct * prev, * next;
+ unsigned long timeout = 0;
+ int this_cpu=smp_processor_id();
+
+/* check alarm, wake up any interruptible tasks that have got a signal */
+
+ allow_interrupts();
+
+ if (intr_count)
+ goto scheduling_in_interrupt;
+
+ if (bh_active & bh_mask) {
+ intr_count = 1;
+ do_bottom_half();
+ intr_count = 0;
+ }
+
+ run_task_queue(&tq_scheduler);
+
+ need_resched = 0;
+ prev = current;
+ cli();
+ /* move an exhausted RR process to be last.. */
+ if (!prev->counter && prev->policy == SCHED_RR) {
+ prev->counter = prev->priority;
+ move_last_runqueue(prev);
+ }
+ switch (prev->state) {
+ case TASK_INTERRUPTIBLE:
+ if (prev->signal & ~prev->blocked)
+ goto makerunnable;
+ timeout = prev->timeout;
+ if (timeout && (timeout <= jiffies)) {
+ prev->timeout = 0;
+ timeout = 0;
+ makerunnable:
+ prev->state = TASK_RUNNING;
+ break;
+ }
+ default:
+ del_from_runqueue(prev);
+ case TASK_RUNNING:
+ }
+ p = init_task.next_run;
+ sti();
+
+#ifdef __SMP__
+ /*
+ * This is safe as we do not permit re-entry of schedule()
+ */
+ prev->processor = NO_PROC_ID;
+#define idle_task (task[cpu_number_map[this_cpu]])
+#else
+#define idle_task (&init_task)
+#endif
+
+/*
+ * Note! there may appear new tasks on the run-queue during this, as
+ * interrupts are enabled. However, they will be put on front of the
+ * list, so our list starting at "p" is essentially fixed.
+ */
+/* this is the scheduler proper: */
+ c = -1000;
+ next = idle_task;
+ while (p != &init_task) {
+ int weight = goodness(p, prev, this_cpu);
+ if (weight > c)
+ c = weight, next = p;
+ p = p->next_run;
+ }
+
+ /* if all runnable processes have "counter == 0", re-calculate counters */
+ if (!c) {
+ for_each_task(p)
+ p->counter = (p->counter >> 1) + p->priority;
+ }
+#ifdef __SMP__
+ /*
+ * Allocate process to CPU
+ */
+
+ next->processor = this_cpu;
+ next->last_processor = this_cpu;
+#endif
+#ifdef __SMP_PROF__
+ /* mark processor running an idle thread */
+ if (0==next->pid)
+ set_bit(this_cpu,&smp_idle_map);
+ else
+ clear_bit(this_cpu,&smp_idle_map);
+#endif
+ if (prev != next) {
+ struct timer_list timer;
+
+ kstat.context_swtch++;
+ if (timeout) {
+ init_timer(&timer);
+ timer.expires = timeout;
+ timer.data = (unsigned long) prev;
+ timer.function = process_timeout;
+ add_timer(&timer);
+ }
+ get_mmu_context(next);
+ switch_to(prev,next);
+ if (timeout)
+ del_timer(&timer);
+ }
+ return;
+
+scheduling_in_interrupt:
+ printk("Aiee: scheduling in interrupt %p\n",
+ __builtin_return_address(0));
+}
+
+#ifndef __alpha__
+
+/*
+ * For backwards compatibility? This can be done in libc so Alpha
+ * and all newer ports shouldn't need it.
+ */
+asmlinkage int sys_pause(void)
+{
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ return -ERESTARTNOHAND;
+}
+
+#endif
+
+/*
+ * wake_up doesn't wake up stopped processes - they have to be awakened
+ * with signals or similar.
+ *
+ * Note that this doesn't need cli-sti pairs: interrupts may not change
+ * the wait-queue structures directly, but only call wake_up() to wake
+ * a process. The process itself must remove the queue once it has woken.
+ */
+void wake_up(struct wait_queue **q)
+{
+ struct wait_queue *next;
+ struct wait_queue *head;
+
+ if (!q || !(next = *q))
+ return;
+ head = WAIT_QUEUE_HEAD(q);
+ while (next != head) {
+ struct task_struct *p = next->task;
+ next = next->next;
+ if (p != NULL) {
+ if ((p->state == TASK_UNINTERRUPTIBLE) ||
+ (p->state == TASK_INTERRUPTIBLE))
+ wake_up_process(p);
+ }
+ if (!next)
+ goto bad;
+ }
+ return;
+bad:
+ printk("wait_queue is bad (eip = %p)\n",
+ __builtin_return_address(0));
+ printk(" q = %p\n",q);
+ printk(" *q = %p\n",*q);
+}
+
+void wake_up_interruptible(struct wait_queue **q)
+{
+ struct wait_queue *next;
+ struct wait_queue *head;
+
+ if (!q || !(next = *q))
+ return;
+ head = WAIT_QUEUE_HEAD(q);
+ while (next != head) {
+ struct task_struct *p = next->task;
+ next = next->next;
+ if (p != NULL) {
+ if (p->state == TASK_INTERRUPTIBLE)
+ wake_up_process(p);
+ }
+ if (!next)
+ goto bad;
+ }
+ return;
+bad:
+ printk("wait_queue is bad (eip = %p)\n",
+ __builtin_return_address(0));
+ printk(" q = %p\n",q);
+ printk(" *q = %p\n",*q);
+}
+
+
+/*
+ * Semaphores are implemented using a two-way counter:
+ * The "count" variable is decremented for each process
+ * that tries to sleep, while the "waking" variable is
+ * incremented when the "up()" code goes to wake up waiting
+ * processes.
+ *
+ * Notably, the inline "up()" and "down()" functions can
+ * efficiently test if they need to do any extra work (up
+ * needs to do something only if count was negative before
+ * the increment operation.
+ *
+ * This routine must execute atomically.
+ */
+static inline int waking_non_zero(struct semaphore *sem)
+{
+ int ret ;
+ long flags ;
+
+ get_buzz_lock(&sem->lock) ;
+ save_flags(flags) ;
+ cli() ;
+
+ if ((ret = (sem->waking > 0)))
+ sem->waking-- ;
+
+ restore_flags(flags) ;
+ give_buzz_lock(&sem->lock) ;
+ return(ret) ;
+}
+
+/*
+ * When __up() is called, the count was negative before
+ * incrementing it, and we need to wake up somebody.
+ *
+ * This routine adds one to the count of processes that need to
+ * wake up and exit. ALL waiting processes actually wake up but
+ * only the one that gets to the "waking" field first will gate
+ * through and acquire the semaphore. The others will go back
+ * to sleep.
+ *
+ * Note that these functions are only called when there is
+ * contention on the lock, and as such all this is the
+ * "non-critical" part of the whole semaphore business. The
+ * critical part is the inline stuff in <asm/semaphore.h>
+ * where we want to avoid any extra jumps and calls.
+ */
+void __up(struct semaphore *sem)
+{
+ atomic_inc(&sem->waking) ;
+ wake_up(&sem->wait);
+}
+
+/*
+ * Perform the "down" function. Return zero for semaphore acquired,
+ * return negative for signalled out of the function.
+ *
+ * If called from __down, the return is ignored and the wait loop is
+ * not interruptible. This means that a task waiting on a semaphore
+ * using "down()" cannot be killed until someone does an "up()" on
+ * the semaphore.
+ *
+ * If called from __down_interruptible, the return value gets checked
+ * upon return. If the return value is negative then the task continues
+ * with the negative value in the return register (it can be tested by
+ * the caller).
+ *
+ * Either form may be used in conjunction with "up()".
+ *
+ */
+int __do_down(struct semaphore * sem, int task_state)
+{
+ struct task_struct *tsk = current;
+ struct wait_queue wait = { tsk, NULL };
+ int ret = 0 ;
+
+ tsk->state = task_state;
+ add_wait_queue(&sem->wait, &wait);
+
+ /*
+ * Ok, we're set up. sem->count is known to be less than zero
+ * so we must wait.
+ *
+ * We can let go the lock for purposes of waiting.
+ * We re-acquire it after awaking so as to protect
+ * all semaphore operations.
+ *
+ * If "up()" is called before we call waking_non_zero() then
+ * we will catch it right away. If it is called later then
+ * we will have to go through a wakeup cycle to catch it.
+ *
+ * Multiple waiters contend for the semaphore lock to see
+ * who gets to gate through and who has to wait some more.
+ */
+ for (;;)
+ {
+ if (waking_non_zero(sem)) /* are we waking up? */
+ break ; /* yes, exit loop */
+
+ if ( task_state == TASK_INTERRUPTIBLE
+ && (tsk->signal & ~tsk->blocked) /* signalled */
+ )
+ {
+ ret = -EINTR ; /* interrupted */
+ atomic_inc(&sem->count) ; /* give up on down operation */
+ break ;
+ }
+
+ schedule();
+ tsk->state = task_state;
+ }
+
+ tsk->state = TASK_RUNNING;
+ remove_wait_queue(&sem->wait, &wait);
+ return(ret) ;
+
+} /* __do_down */
+
+void __down(struct semaphore * sem)
+{
+ __do_down(sem,TASK_UNINTERRUPTIBLE) ;
+}
+
+int __down_interruptible(struct semaphore * sem)
+{
+ return(__do_down(sem,TASK_INTERRUPTIBLE)) ;
+}
+
+
+static inline void __sleep_on(struct wait_queue **p, int state)
+{
+ unsigned long flags;
+ struct wait_queue wait = { current, NULL };
+
+ if (!p)
+ return;
+ if (current == task[0])
+ panic("task[0] trying to sleep");
+ current->state = state;
+ save_flags(flags);
+ cli();
+ __add_wait_queue(p, &wait);
+ sti();
+ schedule();
+ cli();
+ __remove_wait_queue(p, &wait);
+ restore_flags(flags);
+}
+
+void interruptible_sleep_on(struct wait_queue **p)
+{
+ __sleep_on(p,TASK_INTERRUPTIBLE);
+}
+
+void sleep_on(struct wait_queue **p)
+{
+ __sleep_on(p,TASK_UNINTERRUPTIBLE);
+}
+
+#define TVN_BITS 6
+#define TVR_BITS 8
+#define TVN_SIZE (1 << TVN_BITS)
+#define TVR_SIZE (1 << TVR_BITS)
+#define TVN_MASK (TVN_SIZE - 1)
+#define TVR_MASK (TVR_SIZE - 1)
+
+#define SLOW_BUT_DEBUGGING_TIMERS 0
+
+struct timer_vec {
+ int index;
+ struct timer_list *vec[TVN_SIZE];
+};
+
+struct timer_vec_root {
+ int index;
+ struct timer_list *vec[TVR_SIZE];
+};
+
+static struct timer_vec tv5 = { 0 };
+static struct timer_vec tv4 = { 0 };
+static struct timer_vec tv3 = { 0 };
+static struct timer_vec tv2 = { 0 };
+static struct timer_vec_root tv1 = { 0 };
+
+static struct timer_vec * const tvecs[] = {
+ (struct timer_vec *)&tv1, &tv2, &tv3, &tv4, &tv5
+};
+
+#define NOOF_TVECS (sizeof(tvecs) / sizeof(tvecs[0]))
+
+static unsigned long timer_jiffies = 0;
+
+static inline void insert_timer(struct timer_list *timer,
+ struct timer_list **vec, int idx)
+{
+ if ((timer->next = vec[idx]))
+ vec[idx]->prev = timer;
+ vec[idx] = timer;
+ timer->prev = (struct timer_list *)&vec[idx];
+}
+
+static inline void internal_add_timer(struct timer_list *timer)
+{
+ /*
+ * must be cli-ed when calling this
+ */
+ unsigned long expires = timer->expires;
+ unsigned long idx = expires - timer_jiffies;
+
+ if (idx < TVR_SIZE) {
+ int i = expires & TVR_MASK;
+ insert_timer(timer, tv1.vec, i);
+ } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
+ int i = (expires >> TVR_BITS) & TVN_MASK;
+ insert_timer(timer, tv2.vec, i);
+ } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
+ int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
+ insert_timer(timer, tv3.vec, i);
+ } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
+ int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
+ insert_timer(timer, tv4.vec, i);
+ } else if (expires < timer_jiffies) {
+ /* can happen if you add a timer with expires == jiffies,
+ * or you set a timer to go off in the past
+ */
+ insert_timer(timer, tv1.vec, tv1.index);
+ } else if (idx < 0xffffffffUL) {
+ int i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
+ insert_timer(timer, tv5.vec, i);
+ } else {
+ /* Can only get here on architectures with 64-bit jiffies */
+ timer->next = timer->prev = timer;
+ }
+}
+
+void add_timer(struct timer_list *timer)
+{
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+#if SLOW_BUT_DEBUGGING_TIMERS
+ if (timer->next || timer->prev) {
+ printk("add_timer() called with non-zero list from %p\n",
+ __builtin_return_address(0));
+ goto out;
+ }
+#endif
+ internal_add_timer(timer);
+#if SLOW_BUT_DEBUGGING_TIMERS
+out:
+#endif
+ restore_flags(flags);
+}
+
+static inline int detach_timer(struct timer_list *timer)
+{
+ int ret = 0;
+ struct timer_list *next, *prev;
+ next = timer->next;
+ prev = timer->prev;
+ if (next) {
+ next->prev = prev;
+ }
+ if (prev) {
+ ret = 1;
+ prev->next = next;
+ }
+ return ret;
+}
+
+
+int del_timer(struct timer_list * timer)
+{
+ int ret;
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ ret = detach_timer(timer);
+ timer->next = timer->prev = 0;
+ restore_flags(flags);
+ return ret;
+}
+
+static inline void cascade_timers(struct timer_vec *tv)
+{
+ /* cascade all the timers from tv up one level */
+ struct timer_list *timer;
+ timer = tv->vec[tv->index];
+ /*
+ * We are removing _all_ timers from the list, so we don't have to
+ * detach them individually, just clear the list afterwards.
+ */
+ while (timer) {
+ struct timer_list *tmp = timer;
+ timer = timer->next;
+ internal_add_timer(tmp);
+ }
+ tv->vec[tv->index] = NULL;
+ tv->index = (tv->index + 1) & TVN_MASK;
+}
+
+static inline void run_timer_list(void)
+{
+ cli();
+ while ((long)(jiffies - timer_jiffies) >= 0) {
+ struct timer_list *timer;
+ if (!tv1.index) {
+ int n = 1;
+ do {
+ cascade_timers(tvecs[n]);
+ } while (tvecs[n]->index == 1 && ++n < NOOF_TVECS);
+ }
+ while ((timer = tv1.vec[tv1.index])) {
+ void (*fn)(unsigned long) = timer->function;
+ unsigned long data = timer->data;
+ detach_timer(timer);
+ timer->next = timer->prev = NULL;
+ sti();
+ fn(data);
+ cli();
+ }
+ ++timer_jiffies;
+ tv1.index = (tv1.index + 1) & TVR_MASK;
+ }
+ sti();
+}
+
+static inline void run_old_timers(void)
+{
+ struct timer_struct *tp;
+ unsigned long mask;
+
+ for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
+ if (mask > timer_active)
+ break;
+ if (!(mask & timer_active))
+ continue;
+ if (tp->expires > jiffies)
+ continue;
+ timer_active &= ~mask;
+ tp->fn();
+ sti();
+ }
+}
+
+void tqueue_bh(void)
+{
+ run_task_queue(&tq_timer);
+}
+
+void immediate_bh(void)
+{
+ run_task_queue(&tq_immediate);
+}
+
+unsigned long timer_active = 0;
+struct timer_struct timer_table[32];
+
+/*
+ * Hmm.. Changed this, as the GNU make sources (load.c) seems to
+ * imply that avenrun[] is the standard name for this kind of thing.
+ * Nothing else seems to be standardized: the fractional size etc
+ * all seem to differ on different machines.
+ */
+unsigned long avenrun[3] = { 0,0,0 };
+
+/*
+ * Nr of active tasks - counted in fixed-point numbers
+ */
+static unsigned long count_active_tasks(void)
+{
+ struct task_struct **p;
+ unsigned long nr = 0;
+
+ for(p = &LAST_TASK; p > &FIRST_TASK; --p)
+ if (*p && ((*p)->state == TASK_RUNNING ||
+ (*p)->state == TASK_UNINTERRUPTIBLE ||
+ (*p)->state == TASK_SWAPPING))
+ nr += FIXED_1;
+#ifdef __SMP__
+ nr-=(smp_num_cpus-1)*FIXED_1;
+#endif
+ return nr;
+}
+
+static inline void calc_load(unsigned long ticks)
+{
+ unsigned long active_tasks; /* fixed-point */
+ static int count = LOAD_FREQ;
+
+ count -= ticks;
+ if (count < 0) {
+ count += LOAD_FREQ;
+ active_tasks = count_active_tasks();
+ CALC_LOAD(avenrun[0], EXP_1, active_tasks);
+ CALC_LOAD(avenrun[1], EXP_5, active_tasks);
+ CALC_LOAD(avenrun[2], EXP_15, active_tasks);
+ }
+}
+
+/*
+ * this routine handles the overflow of the microsecond field
+ *
+ * The tricky bits of code to handle the accurate clock support
+ * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
+ * They were originally developed for SUN and DEC kernels.
+ * All the kudos should go to Dave for this stuff.
+ *
+ */
+static void second_overflow(void)
+{
+ long ltemp;
+
+ /* Bump the maxerror field */
+ time_maxerror += time_tolerance >> SHIFT_USEC;
+ if ( time_maxerror > NTP_PHASE_LIMIT ) {
+ time_maxerror = NTP_PHASE_LIMIT;
+ time_state = TIME_ERROR; /* p. 17, sect. 4.3, (b) */
+ time_status |= STA_UNSYNC;
+ }
+
+ /*
+ * Leap second processing. If in leap-insert state at
+ * the end of the day, the system clock is set back one
+ * second; if in leap-delete state, the system clock is
+ * set ahead one second. The microtime() routine or
+ * external clock driver will insure that reported time
+ * is always monotonic. The ugly divides should be
+ * replaced.
+ */
+ switch (time_state) {
+
+ case TIME_OK:
+ if (time_status & STA_INS)
+ time_state = TIME_INS;
+ else if (time_status & STA_DEL)
+ time_state = TIME_DEL;
+ break;
+
+ case TIME_INS:
+ if (xtime.tv_sec % 86400 == 0) {
+ xtime.tv_sec--;
+ time_state = TIME_OOP;
+ printk(KERN_NOTICE "Clock: inserting leap second 23:59:60 UTC\n");
+ }
+ break;
+
+ case TIME_DEL:
+ if ((xtime.tv_sec + 1) % 86400 == 0) {
+ xtime.tv_sec++;
+ time_state = TIME_WAIT;
+ printk(KERN_NOTICE "Clock: deleting leap second 23:59:59 UTC\n");
+ }
+ break;
+
+ case TIME_OOP:
+ time_state = TIME_WAIT;
+ break;
+
+ case TIME_WAIT:
+ if (!(time_status & (STA_INS | STA_DEL)))
+ time_state = TIME_OK;
+ }
+
+ /*
+ * Compute the phase adjustment for the next second. In
+ * PLL mode, the offset is reduced by a fixed factor
+ * times the time constant. In FLL mode the offset is
+ * used directly. In either mode, the maximum phase
+ * adjustment for each second is clamped so as to spread
+ * the adjustment over not more than the number of
+ * seconds between updates.
+ */
+ if (time_offset < 0) {
+ ltemp = -time_offset;
+ if (!(time_status & STA_FLL))
+ ltemp >>= SHIFT_KG + time_constant;
+ if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
+ ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
+ time_offset += ltemp;
+ time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
+ } else {
+ ltemp = time_offset;
+ if (!(time_status & STA_FLL))
+ ltemp >>= SHIFT_KG + time_constant;
+ if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
+ ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
+ time_offset -= ltemp;
+ time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
+ }
+
+ /*
+ * Compute the frequency estimate and additional phase
+ * adjustment due to frequency error for the next
+ * second. When the PPS signal is engaged, gnaw on the
+ * watchdog counter and update the frequency computed by
+ * the pll and the PPS signal.
+ */
+ pps_valid++;
+ if (pps_valid == PPS_VALID) { /* PPS signal lost */
+ pps_jitter = MAXTIME;
+ pps_stabil = MAXFREQ;
+ time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
+ STA_PPSWANDER | STA_PPSERROR);
+ }
+ ltemp = time_freq + pps_freq;
+ if (ltemp < 0)
+ time_adj -= -ltemp >> (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
+ else
+ time_adj += ltemp >> (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
+
+#if HZ == 100
+ /* Compensate for (HZ==100) != (1 << SHIFT_HZ).
+ * Add 25% and 3.125% to get 128.125; => only 0.125% error (p. 14)
+ */
+ if (time_adj < 0)
+ time_adj -= (-time_adj >> 2) + (-time_adj >> 5);
+ else
+ time_adj += (time_adj >> 2) + (time_adj >> 5);
+#endif
+}
+
+/* in the NTP reference this is called "hardclock()" */
+static void update_wall_time_one_tick(void)
+{
+ if ( (time_adjust_step = time_adjust) != 0 ) {
+ /* We are doing an adjtime thing.
+ *
+ * Prepare time_adjust_step to be within bounds.
+ * Note that a positive time_adjust means we want the clock
+ * to run faster.
+ *
+ * Limit the amount of the step to be in the range
+ * -tickadj .. +tickadj
+ */
+ if (time_adjust > tickadj)
+ time_adjust_step = tickadj;
+ else if (time_adjust < -tickadj)
+ time_adjust_step = -tickadj;
+
+ /* Reduce by this step the amount of time left */
+ time_adjust -= time_adjust_step;
+ }
+ xtime.tv_usec += tick + time_adjust_step;
+ /*
+ * Advance the phase, once it gets to one microsecond, then
+ * advance the tick more.
+ */
+ time_phase += time_adj;
+ if (time_phase <= -FINEUSEC) {
+ long ltemp = -time_phase >> SHIFT_SCALE;
+ time_phase += ltemp << SHIFT_SCALE;
+ xtime.tv_usec -= ltemp;
+ }
+ else if (time_phase >= FINEUSEC) {
+ long ltemp = time_phase >> SHIFT_SCALE;
+ time_phase -= ltemp << SHIFT_SCALE;
+ xtime.tv_usec += ltemp;
+ }
+}
+
+/*
+ * Using a loop looks inefficient, but "ticks" is
+ * usually just one (we shouldn't be losing ticks,
+ * we're doing this this way mainly for interrupt
+ * latency reasons, not because we think we'll
+ * have lots of lost timer ticks
+ */
+static void update_wall_time(unsigned long ticks)
+{
+ do {
+ ticks--;
+ update_wall_time_one_tick();
+ } while (ticks);
+
+ if (xtime.tv_usec >= 1000000) {
+ xtime.tv_usec -= 1000000;
+ xtime.tv_sec++;
+ second_overflow();
+ }
+}
+
+static inline void do_process_times(struct task_struct *p,
+ unsigned long user, unsigned long system)
+{
+ long psecs;
+
+ p->utime += user;
+ p->stime += system;
+
+ psecs = (p->stime + p->utime) / HZ;
+ if (psecs > p->rlim[RLIMIT_CPU].rlim_cur) {
+ /* Send SIGXCPU every second.. */
+ if (psecs * HZ == p->stime + p->utime)
+ send_sig(SIGXCPU, p, 1);
+ /* and SIGKILL when we go over max.. */
+ if (psecs > p->rlim[RLIMIT_CPU].rlim_max)
+ send_sig(SIGKILL, p, 1);
+ }
+}
+
+static inline void do_it_virt(struct task_struct * p, unsigned long ticks)
+{
+ unsigned long it_virt = p->it_virt_value;
+
+ if (it_virt) {
+ if (it_virt <= ticks) {
+ it_virt = ticks + p->it_virt_incr;
+ send_sig(SIGVTALRM, p, 1);
+ }
+ p->it_virt_value = it_virt - ticks;
+ }
+}
+
+static inline void do_it_prof(struct task_struct * p, unsigned long ticks)
+{
+ unsigned long it_prof = p->it_prof_value;
+
+ if (it_prof) {
+ if (it_prof <= ticks) {
+ it_prof = ticks + p->it_prof_incr;
+ send_sig(SIGPROF, p, 1);
+ }
+ p->it_prof_value = it_prof - ticks;
+ }
+}
+
+static __inline__ void update_one_process(struct task_struct *p,
+ unsigned long ticks, unsigned long user, unsigned long system)
+{
+ do_process_times(p, user, system);
+ do_it_virt(p, user);
+ do_it_prof(p, ticks);
+}
+
+static void update_process_times(unsigned long ticks, unsigned long system)
+{
+#ifndef __SMP__
+ struct task_struct * p = current;
+ unsigned long user = ticks - system;
+ if (p->pid) {
+ p->counter -= ticks;
+ if (p->counter < 0) {
+ p->counter = 0;
+ need_resched = 1;
+ }
+ if (p->priority < DEF_PRIORITY)
+ kstat.cpu_nice += user;
+ else
+ kstat.cpu_user += user;
+ kstat.cpu_system += system;
+ }
+ update_one_process(p, ticks, user, system);
+#else
+ int cpu,j;
+ cpu = smp_processor_id();
+ for (j=0;j<smp_num_cpus;j++)
+ {
+ int i = cpu_logical_map[j];
+ struct task_struct *p;
+
+#ifdef __SMP_PROF__
+ if (test_bit(i,&smp_idle_map))
+ smp_idle_count[i]++;
+#endif
+ p = current_set[i];
+ /*
+ * Do we have a real process?
+ */
+ if (p->pid) {
+ /* assume user-mode process */
+ unsigned long utime = ticks;
+ unsigned long stime = 0;
+ if (cpu == i) {
+ utime = ticks-system;
+ stime = system;
+ } else if (smp_proc_in_lock[j]) {
+ utime = 0;
+ stime = ticks;
+ }
+ update_one_process(p, ticks, utime, stime);
+
+ if (p->priority < DEF_PRIORITY)
+ kstat.cpu_nice += utime;
+ else
+ kstat.cpu_user += utime;
+ kstat.cpu_system += stime;
+
+ p->counter -= ticks;
+ if (p->counter >= 0)
+ continue;
+ p->counter = 0;
+ } else {
+ /*
+ * Idle processor found, do we have anything
+ * we could run?
+ */
+ if (!(0x7fffffff & smp_process_available))
+ continue;
+ }
+ /* Ok, we should reschedule, do the magic */
+ if (i==cpu)
+ need_resched = 1;
+ else
+ smp_message_pass(i, MSG_RESCHEDULE, 0L, 0);
+ }
+#endif
+}
+
+static unsigned long lost_ticks = 0;
+static unsigned long lost_ticks_system = 0;
+
+static inline void update_times(void)
+{
+ unsigned long ticks;
+
+ ticks = xchg(&lost_ticks, 0);
+
+ if (ticks) {
+ unsigned long system;
+
+ system = xchg(&lost_ticks_system, 0);
+ calc_load(ticks);
+ update_wall_time(ticks);
+ update_process_times(ticks, system);
+ }
+}
+
+static void timer_bh(void)
+{
+ update_times();
+ run_old_timers();
+ run_timer_list();
+}
+
+void do_timer(struct pt_regs * regs)
+{
+ (*(unsigned long *)&jiffies)++;
+ lost_ticks++;
+ mark_bh(TIMER_BH);
+ if (!user_mode(regs)) {
+ lost_ticks_system++;
+ if (prof_buffer && current->pid) {
+ extern int _stext;
+ unsigned long ip = instruction_pointer(regs);
+ ip -= (unsigned long) &_stext;
+ ip >>= prof_shift;
+ if (ip < prof_len)
+ prof_buffer[ip]++;
+ }
+ }
+ if (tq_timer)
+ mark_bh(TQUEUE_BH);
+}
+
+#ifndef __alpha__
+
+/*
+ * For backwards compatibility? This can be done in libc so Alpha
+ * and all newer ports shouldn't need it.
+ */
+asmlinkage unsigned int sys_alarm(unsigned int seconds)
+{
+ struct itimerval it_new, it_old;
+ unsigned int oldalarm;
+
+ it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
+ it_new.it_value.tv_sec = seconds;
+ it_new.it_value.tv_usec = 0;
+ _setitimer(ITIMER_REAL, &it_new, &it_old);
+ oldalarm = it_old.it_value.tv_sec;
+ /* ehhh.. We can't return 0 if we have an alarm pending.. */
+ /* And we'd better return too much than too little anyway */
+ if (it_old.it_value.tv_usec)
+ oldalarm++;
+ return oldalarm;
+}
+
+/*
+ * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
+ * should be moved into arch/i386 instead?
+ */
+asmlinkage int sys_getpid(void)
+{
+ return current->pid;
+}
+
+asmlinkage int sys_getppid(void)
+{
+ return current->p_opptr->pid;
+}
+
+asmlinkage int sys_getuid(void)
+{
+ return current->uid;
+}
+
+asmlinkage int sys_geteuid(void)
+{
+ return current->euid;
+}
+
+asmlinkage int sys_getgid(void)
+{
+ return current->gid;
+}
+
+asmlinkage int sys_getegid(void)
+{
+ return current->egid;
+}
+
+/*
+ * This has been replaced by sys_setpriority. Maybe it should be
+ * moved into the arch dependent tree for those ports that require
+ * it for backward compatibility?
+ */
+asmlinkage int sys_nice(int increment)
+{
+ unsigned long newprio;
+ int increase = 0;
+
+ newprio = increment;
+ if (increment < 0) {
+ if (!suser())
+ return -EPERM;
+ newprio = -increment;
+ increase = 1;
+ }
+ if (newprio > 40)
+ newprio = 40;
+ /*
+ * do a "normalization" of the priority (traditionally
+ * unix nice values are -20..20, linux doesn't really
+ * use that kind of thing, but uses the length of the
+ * timeslice instead (default 150 msec). The rounding is
+ * why we want to avoid negative values.
+ */
+ newprio = (newprio * DEF_PRIORITY + 10) / 20;
+ increment = newprio;
+ if (increase)
+ increment = -increment;
+ newprio = current->priority - increment;
+ if ((signed) newprio < 1)
+ newprio = 1;
+ if (newprio > DEF_PRIORITY*2)
+ newprio = DEF_PRIORITY*2;
+ current->priority = newprio;
+ return 0;
+}
+
+#endif
+
+static struct task_struct *find_process_by_pid(pid_t pid) {
+ struct task_struct *p, *q;
+
+ if (pid == 0)
+ p = current;
+ else {
+ p = 0;
+ for_each_task(q) {
+ if (q && q->pid == pid) {
+ p = q;
+ break;
+ }
+ }
+ }
+ return p;
+}
+
+static int setscheduler(pid_t pid, int policy,
+ struct sched_param *param)
+{
+ int error;
+ struct sched_param lp;
+ struct task_struct *p;
+
+ if (!param || pid < 0)
+ return -EINVAL;
+
+ error = verify_area(VERIFY_READ, param, sizeof(struct sched_param));
+ if (error)
+ return error;
+ memcpy_fromfs(&lp, param, sizeof(struct sched_param));
+
+ p = find_process_by_pid(pid);
+ if (!p)
+ return -ESRCH;
+
+ if (policy < 0)
+ policy = p->policy;
+ else if (policy != SCHED_FIFO && policy != SCHED_RR &&
+ policy != SCHED_OTHER)
+ return -EINVAL;
+
+ /*
+ * Valid priorities for SCHED_FIFO and SCHED_RR are 1..99, valid
+ * priority for SCHED_OTHER is 0.
+ */
+ if (lp.sched_priority < 0 || lp.sched_priority > 99)
+ return -EINVAL;
+ if ((policy == SCHED_OTHER) != (lp.sched_priority == 0))
+ return -EINVAL;
+
+ if ((policy == SCHED_FIFO || policy == SCHED_RR) && !suser())
+ return -EPERM;
+ if ((current->euid != p->euid) && (current->euid != p->uid) &&
+ !suser())
+ return -EPERM;
+
+ p->policy = policy;
+ p->rt_priority = lp.sched_priority;
+ cli();
+ if (p->next_run)
+ move_last_runqueue(p);
+ sti();
+ need_resched = 1;
+ return 0;
+}
+
+asmlinkage int sys_sched_setscheduler(pid_t pid, int policy,
+ struct sched_param *param)
+{
+ return setscheduler(pid, policy, param);
+}
+
+asmlinkage int sys_sched_setparam(pid_t pid, struct sched_param *param)
+{
+ return setscheduler(pid, -1, param);
+}
+
+asmlinkage int sys_sched_getscheduler(pid_t pid)
+{
+ struct task_struct *p;
+
+ if (pid < 0)
+ return -EINVAL;
+
+ p = find_process_by_pid(pid);
+ if (!p)
+ return -ESRCH;
+
+ return p->policy;
+}
+
+asmlinkage int sys_sched_getparam(pid_t pid, struct sched_param *param)
+{
+ int error;
+ struct task_struct *p;
+ struct sched_param lp;
+
+ if (!param || pid < 0)
+ return -EINVAL;
+
+ error = verify_area(VERIFY_WRITE, param, sizeof(struct sched_param));
+ if (error)
+ return error;
+
+ p = find_process_by_pid(pid);
+ if (!p)
+ return -ESRCH;
+
+ lp.sched_priority = p->rt_priority;
+ memcpy_tofs(param, &lp, sizeof(struct sched_param));
+
+ return 0;
+}
+
+asmlinkage int sys_sched_yield(void)
+{
+ cli();
+ move_last_runqueue(current);
+ current->counter = 0;
+ need_resched = 1;
+ sti();
+ return 0;
+}
+
+asmlinkage int sys_sched_get_priority_max(int policy)
+{
+ switch (policy) {
+ case SCHED_FIFO:
+ case SCHED_RR:
+ return 99;
+ case SCHED_OTHER:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+asmlinkage int sys_sched_get_priority_min(int policy)
+{
+ switch (policy) {
+ case SCHED_FIFO:
+ case SCHED_RR:
+ return 1;
+ case SCHED_OTHER:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+asmlinkage int sys_sched_rr_get_interval(pid_t pid, struct timespec *interval)
+{
+ int error;
+ struct timespec t;
+
+ error = verify_area(VERIFY_WRITE, interval, sizeof(struct timespec));
+ if (error)
+ return error;
+
+ /* Values taken from 2.1.38 */
+ t.tv_sec = 0;
+ t.tv_nsec = 150000; /* is this right for non-intel architecture too?*/
+ memcpy_tofs(interval, &t, sizeof(struct timespec));
+
+ return 0;
+}
+
+/*
+ * change timeval to jiffies, trying to avoid the
+ * most obvious overflows..
+ */
+static unsigned long timespectojiffies(struct timespec *value)
+{
+ unsigned long sec = (unsigned) value->tv_sec;
+ long nsec = value->tv_nsec;
+
+ if (sec > (LONG_MAX / HZ))
+ return LONG_MAX;
+ nsec += 1000000000L / HZ - 1;
+ nsec /= 1000000000L / HZ;
+ return HZ * sec + nsec;
+}
+
+static void jiffiestotimespec(unsigned long jiffies, struct timespec *value)
+{
+ value->tv_nsec = (jiffies % HZ) * (1000000000L / HZ);
+ value->tv_sec = jiffies / HZ;
+ return;
+}
+
+asmlinkage int sys_nanosleep(struct timespec *rqtp, struct timespec *rmtp)
+{
+ int error;
+ struct timespec t;
+ unsigned long expire;
+
+ error = verify_area(VERIFY_READ, rqtp, sizeof(struct timespec));
+ if (error)
+ return error;
+ memcpy_fromfs(&t, rqtp, sizeof(struct timespec));
+ if (rmtp) {
+ error = verify_area(VERIFY_WRITE, rmtp,
+ sizeof(struct timespec));
+ if (error)
+ return error;
+ }
+
+ if (t.tv_nsec >= 1000000000L || t.tv_nsec < 0 || t.tv_sec < 0)
+ return -EINVAL;
+
+ if (t.tv_sec == 0 && t.tv_nsec <= 2000000L &&
+ current->policy != SCHED_OTHER) {
+ /*
+ * Short delay requests up to 2 ms will be handled with
+ * high precision by a busy wait for all real-time processes.
+ */
+ udelay((t.tv_nsec + 999) / 1000);
+ return 0;
+ }
+
+ expire = timespectojiffies(&t) + (t.tv_sec || t.tv_nsec) + jiffies;
+ current->timeout = expire;
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+
+ if (expire > jiffies) {
+ if (rmtp) {
+ jiffiestotimespec(expire - jiffies -
+ (expire > jiffies + 1), &t);
+ memcpy_tofs(rmtp, &t, sizeof(struct timespec));
+ }
+ return -EINTR;
+ }
+
+ return 0;
+}
+
+static void show_task(int nr,struct task_struct * p)
+{
+ unsigned long free;
+ static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
+
+ printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
+ if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
+ printk(stat_nam[p->state]);
+ else
+ printk(" ");
+#if ((~0UL) == 0xffffffff)
+ if (p == current)
+ printk(" current ");
+ else
+ printk(" %08lX ", thread_saved_pc(&p->tss));
+ printk("%08lX ", get_wchan(p));
+#else
+ if (p == current)
+ printk(" current task ");
+ else
+ printk(" %016lx ", thread_saved_pc(&p->tss));
+ printk("%08lX ", get_wchan(p) & 0xffffffffL);
+#endif
+ for (free = 1; free < PAGE_SIZE/sizeof(long) ; free++) {
+ if (((unsigned long *)p->kernel_stack_page)[free])
+ break;
+ }
+ printk("%5lu %5d %6d ", free*sizeof(long), p->pid, p->p_pptr->pid);
+ if (p->p_cptr)
+ printk("%5d ", p->p_cptr->pid);
+ else
+ printk(" ");
+ if (p->p_ysptr)
+ printk("%7d", p->p_ysptr->pid);
+ else
+ printk(" ");
+ if (p->p_osptr)
+ printk(" %5d\n", p->p_osptr->pid);
+ else
+ printk("\n");
+}
+
+void show_state(void)
+{
+ int i;
+
+#if ((~0UL) == 0xffffffff)
+ printk("\n"
+ " free sibling\n");
+ printk(" task PC wchan stack pid father child younger older\n");
+#else
+ printk("\n"
+ " free sibling\n");
+ printk(" task PC wchan stack pid father child younger older\n");
+#endif
+ for (i=0 ; i<NR_TASKS ; i++)
+ if (task[i])
+ show_task(i,task[i]);
+}
+
+void sched_init(void)
+{
+ /*
+ * We have to do a little magic to get the first
+ * process right in SMP mode.
+ */
+ int cpu=smp_processor_id();
+#ifndef __SMP__
+ current_set[cpu]=&init_task;
+#else
+ init_task.processor=cpu;
+ for(cpu = 0; cpu < NR_CPUS; cpu++)
+ current_set[cpu] = &init_task;
+#endif
+ init_bh(TIMER_BH, timer_bh);
+ init_bh(TQUEUE_BH, tqueue_bh);
+ init_bh(IMMEDIATE_BH, immediate_bh);
+}
diff --git a/linux/src/kernel/softirq.c b/linux/src/kernel/softirq.c
new file mode 100644
index 0000000..32038b1
--- /dev/null
+++ b/linux/src/kernel/softirq.c
@@ -0,0 +1,54 @@
+/*
+ * linux/kernel/softirq.c
+ *
+ * Copyright (C) 1992 Linus Torvalds
+ *
+ * do_bottom_half() runs at normal kernel priority: all interrupts
+ * enabled. do_bottom_half() is atomic with respect to itself: a
+ * bottom_half handler need not be re-entrant.
+ */
+
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/kernel_stat.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/bitops.h>
+
+unsigned int intr_count = 0;
+
+int bh_mask_count[32];
+unsigned int bh_active = 0;
+unsigned int bh_mask = 0;
+void (*bh_base[32])(void);
+
+
+asmlinkage void do_bottom_half(void)
+{
+ unsigned int active;
+ unsigned int mask, left;
+ void (**bh)(void);
+
+ sti();
+ bh = bh_base;
+ active = bh_active & bh_mask;
+ for (mask = 1, left = ~0 ; left & active ; bh++,mask += mask,left += left) {
+ if (mask & active) {
+ void (*fn)(void);
+ bh_active &= ~mask;
+ fn = *bh;
+ if (!fn)
+ goto bad_bh;
+ fn();
+ }
+ }
+ return;
+bad_bh:
+ printk ("irq.c:bad bottom half entry %08lx\n", mask);
+}
diff --git a/linux/src/lib/ctype.c b/linux/src/lib/ctype.c
new file mode 100644
index 0000000..26baa62
--- /dev/null
+++ b/linux/src/lib/ctype.c
@@ -0,0 +1,36 @@
+/*
+ * linux/lib/ctype.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#include <linux/ctype.h>
+#include <linux/module.h>
+
+const unsigned char _ctype[] = {
+_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */
+_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */
+_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */
+_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */
+_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */
+_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */
+_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */
+_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */
+_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */
+_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */
+_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */
+_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */
+_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */
+_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */
+_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */
+_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */
+_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */
+_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */
+_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */
+_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */
+_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */
+_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */
+
+EXPORT_SYMBOL(_ctype);
diff --git a/linux/src/lib/vsprintf.c b/linux/src/lib/vsprintf.c
new file mode 100644
index 0000000..8f813c6
--- /dev/null
+++ b/linux/src/lib/vsprintf.c
@@ -0,0 +1,306 @@
+/*
+ * linux/lib/vsprintf.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+/* vsprintf.c -- Lars Wirzenius & Linus Torvalds. */
+/*
+ * Wirzenius wrote this portably, Torvalds fucked it up :-)
+ */
+
+#include <stdarg.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+
+unsigned long simple_strtoul(const char *cp,char **endp,unsigned int base)
+{
+ unsigned long result = 0,value;
+
+ if (!base) {
+ base = 10;
+ if (*cp == '0') {
+ base = 8;
+ cp++;
+ if ((*cp == 'x') && isxdigit(cp[1])) {
+ cp++;
+ base = 16;
+ }
+ }
+ }
+ while (isxdigit(*cp) && (value = isdigit(*cp) ? *cp-'0' : (islower(*cp)
+ ? toupper(*cp) : *cp)-'A'+10) < base) {
+ result = result*base + value;
+ cp++;
+ }
+ if (endp)
+ *endp = (char *)cp;
+ return result;
+}
+
+/* we use this so that we can do without the ctype library */
+#define is_digit(c) ((c) >= '0' && (c) <= '9')
+
+static int skip_atoi(const char **s)
+{
+ int i=0;
+
+ while (is_digit(**s))
+ i = i*10 + *((*s)++) - '0';
+ return i;
+}
+
+#define ZEROPAD 1 /* pad with zero */
+#define SIGN 2 /* unsigned/signed long */
+#define PLUS 4 /* show plus */
+#define SPACE 8 /* space if plus */
+#define LEFT 16 /* left justified */
+#define SPECIAL 32 /* 0x */
+#define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */
+
+#define do_div(n,base) ({ \
+int __res; \
+__res = ((unsigned long) n) % (unsigned) base; \
+n = ((unsigned long) n) / (unsigned) base; \
+__res; })
+
+static char * number(char * str, long num, int base, int size, int precision
+ ,int type)
+{
+ char c,sign,tmp[66];
+ const char *digits="0123456789abcdefghijklmnopqrstuvwxyz";
+ int i;
+
+ if (type & LARGE)
+ digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+ if (type & LEFT)
+ type &= ~ZEROPAD;
+ if (base < 2 || base > 36)
+ return 0;
+ c = (type & ZEROPAD) ? '0' : ' ';
+ sign = 0;
+ if (type & SIGN) {
+ if (num < 0) {
+ sign = '-';
+ num = -num;
+ size--;
+ } else if (type & PLUS) {
+ sign = '+';
+ size--;
+ } else if (type & SPACE) {
+ sign = ' ';
+ size--;
+ }
+ }
+ if (type & SPECIAL) {
+ if (base == 16)
+ size -= 2;
+ else if (base == 8)
+ size--;
+ }
+ i = 0;
+ if (num == 0)
+ tmp[i++]='0';
+ else while (num != 0)
+ tmp[i++] = digits[do_div(num,base)];
+ if (i > precision)
+ precision = i;
+ size -= precision;
+ if (!(type&(ZEROPAD+LEFT)))
+ while(size-->0)
+ *str++ = ' ';
+ if (sign)
+ *str++ = sign;
+ if (type & SPECIAL)
+ if (base==8)
+ *str++ = '0';
+ else if (base==16) {
+ *str++ = '0';
+ *str++ = digits[33];
+ }
+ if (!(type & LEFT))
+ while (size-- > 0)
+ *str++ = c;
+ while (i < precision--)
+ *str++ = '0';
+ while (i-- > 0)
+ *str++ = tmp[i];
+ while (size-- > 0)
+ *str++ = ' ';
+ return str;
+}
+
+int vsprintf(char *buf, const char *fmt, va_list args)
+{
+ int len;
+ unsigned long num;
+ int i, base;
+ char * str;
+ const char *s;
+
+ int flags; /* flags to number() */
+
+ int field_width; /* width of output field */
+ int precision; /* min. # of digits for integers; max
+ number of chars for from string */
+ int qualifier; /* 'h', 'l', or 'L' for integer fields */
+
+ for (str=buf ; *fmt ; ++fmt) {
+ if (*fmt != '%') {
+ *str++ = *fmt;
+ continue;
+ }
+
+ /* process flags */
+ flags = 0;
+ repeat:
+ ++fmt; /* this also skips first '%' */
+ switch (*fmt) {
+ case '-': flags |= LEFT; goto repeat;
+ case '+': flags |= PLUS; goto repeat;
+ case ' ': flags |= SPACE; goto repeat;
+ case '#': flags |= SPECIAL; goto repeat;
+ case '0': flags |= ZEROPAD; goto repeat;
+ }
+
+ /* get field width */
+ field_width = -1;
+ if (is_digit(*fmt))
+ field_width = skip_atoi(&fmt);
+ else if (*fmt == '*') {
+ ++fmt;
+ /* it's the next argument */
+ field_width = va_arg(args, int);
+ if (field_width < 0) {
+ field_width = -field_width;
+ flags |= LEFT;
+ }
+ }
+
+ /* get the precision */
+ precision = -1;
+ if (*fmt == '.') {
+ ++fmt;
+ if (is_digit(*fmt))
+ precision = skip_atoi(&fmt);
+ else if (*fmt == '*') {
+ ++fmt;
+ /* it's the next argument */
+ precision = va_arg(args, int);
+ }
+ if (precision < 0)
+ precision = 0;
+ }
+
+ /* get the conversion qualifier */
+ qualifier = -1;
+ if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L') {
+ qualifier = *fmt;
+ ++fmt;
+ }
+
+ /* default base */
+ base = 10;
+
+ switch (*fmt) {
+ case 'c':
+ if (!(flags & LEFT))
+ while (--field_width > 0)
+ *str++ = ' ';
+ *str++ = (unsigned char) va_arg(args, int);
+ while (--field_width > 0)
+ *str++ = ' ';
+ continue;
+
+ case 's':
+ s = va_arg(args, char *);
+ if (!s)
+ s = "<NULL>";
+
+ len = strnlen(s, precision);
+
+ if (!(flags & LEFT))
+ while (len < field_width--)
+ *str++ = ' ';
+ for (i = 0; i < len; ++i)
+ *str++ = *s++;
+ while (len < field_width--)
+ *str++ = ' ';
+ continue;
+
+ case 'p':
+ if (field_width == -1) {
+ field_width = 2*sizeof(void *);
+ flags |= ZEROPAD;
+ }
+ str = number(str,
+ (unsigned long) va_arg(args, void *), 16,
+ field_width, precision, flags);
+ continue;
+
+
+ case 'n':
+ if (qualifier == 'l') {
+ long * ip = va_arg(args, long *);
+ *ip = (str - buf);
+ } else {
+ int * ip = va_arg(args, int *);
+ *ip = (str - buf);
+ }
+ continue;
+
+ /* integer number formats - set up the flags and "break" */
+ case 'o':
+ base = 8;
+ break;
+
+ case 'X':
+ flags |= LARGE;
+ case 'x':
+ base = 16;
+ break;
+
+ case 'd':
+ case 'i':
+ flags |= SIGN;
+ case 'u':
+ break;
+
+ default:
+ if (*fmt != '%')
+ *str++ = '%';
+ if (*fmt)
+ *str++ = *fmt;
+ else
+ --fmt;
+ continue;
+ }
+ if (qualifier == 'l')
+ num = va_arg(args, unsigned long);
+ else if (qualifier == 'h')
+ if (flags & SIGN)
+ num = va_arg(args, short);
+ else
+ num = va_arg(args, unsigned short);
+ else if (flags & SIGN)
+ num = va_arg(args, int);
+ else
+ num = va_arg(args, unsigned int);
+ str = number(str, num, base, field_width, precision, flags);
+ }
+ *str = '\0';
+ return str-buf;
+}
+
+int sprintf(char * buf, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ i=vsprintf(buf,fmt,args);
+ va_end(args);
+ return i;
+}
+
diff --git a/linux/src/net/core/dev.c b/linux/src/net/core/dev.c
new file mode 100644
index 0000000..4e46f9f
--- /dev/null
+++ b/linux/src/net/core/dev.c
@@ -0,0 +1,1629 @@
+/*
+ * NET3 Protocol independent device support routines.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Derived from the non IP parts of dev.c 1.0.19
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Mark Evans, <evansmp@uhura.aston.ac.uk>
+ *
+ * Additional Authors:
+ * Florian la Roche <rzsfl@rz.uni-sb.de>
+ * Alan Cox <gw4pts@gw4pts.ampr.org>
+ * David Hinds <dhinds@allegro.stanford.edu>
+ *
+ * Changes:
+ * Alan Cox : device private ioctl copies fields back.
+ * Alan Cox : Transmit queue code does relevant stunts to
+ * keep the queue safe.
+ * Alan Cox : Fixed double lock.
+ * Alan Cox : Fixed promisc NULL pointer trap
+ * ???????? : Support the full private ioctl range
+ * Alan Cox : Moved ioctl permission check into drivers
+ * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
+ * Alan Cox : 100 backlog just doesn't cut it when
+ * you start doing multicast video 8)
+ * Alan Cox : Rewrote net_bh and list manager.
+ * Alan Cox : Fix ETH_P_ALL echoback lengths.
+ * Alan Cox : Took out transmit every packet pass
+ * Saved a few bytes in the ioctl handler
+ * Alan Cox : Network driver sets packet type before calling netif_rx. Saves
+ * a function call a packet.
+ * Alan Cox : Hashed net_bh()
+ * Richard Kooijman: Timestamp fixes.
+ * Alan Cox : Wrong field in SIOCGIFDSTADDR
+ * Alan Cox : Device lock protection.
+ * Alan Cox : Fixed nasty side effect of device close changes.
+ * Rudi Cilibrasi : Pass the right thing to set_mac_address()
+ * Dave Miller : 32bit quantity for the device lock to make it work out
+ * on a Sparc.
+ * Bjorn Ekwall : Added KERNELD hack.
+ * Alan Cox : Cleaned up the backlog initialise.
+ * Craig Metz : SIOCGIFCONF fix if space for under
+ * 1 device.
+ * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
+ * is no device open function.
+ * Lawrence V. Stefani : Changed set MTU ioctl to not assume
+ * min MTU of 68 bytes for devices
+ * that have change MTU functions.
+ *
+ */
+
+#include <asm/segment.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/notifier.h>
+#include <net/ip.h>
+#include <net/route.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <net/arp.h>
+#include <net/slhc.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <net/br.h>
+#ifdef CONFIG_NET_ALIAS
+#include <linux/net_alias.h>
+#endif
+#ifdef CONFIG_KERNELD
+#include <linux/kerneld.h>
+#endif
+#ifdef CONFIG_NET_RADIO
+#include <linux/wireless.h>
+#endif /* CONFIG_NET_RADIO */
+
+/*
+ * The list of packet types we will receive (as opposed to discard)
+ * and the routines to invoke.
+ */
+
+struct packet_type *ptype_base[16];
+struct packet_type *ptype_all = NULL; /* Taps */
+
+/*
+ * Device list lock
+ */
+
+int dev_lockct=0;
+
+/*
+ * Our notifier list
+ */
+
+struct notifier_block *netdev_chain=NULL;
+
+/*
+ * Device drivers call our routines to queue packets here. We empty the
+ * queue in the bottom half handler.
+ */
+
+static struct sk_buff_head backlog;
+
+/*
+ * We don't overdo the queue or we will thrash memory badly.
+ */
+
+static int backlog_size = 0;
+
+/*
+ * Return the lesser of the two values.
+ */
+
+static __inline__ unsigned long min(unsigned long a, unsigned long b)
+{
+ return (a < b)? a : b;
+}
+
+
+/******************************************************************************************
+
+ Protocol management and registration routines
+
+*******************************************************************************************/
+
+/*
+ * For efficiency
+ */
+
+static int dev_nit=0;
+
+/*
+ * Add a protocol ID to the list. Now that the input handler is
+ * smarter we can dispense with all the messy stuff that used to be
+ * here.
+ */
+
+void dev_add_pack(struct packet_type *pt)
+{
+ int hash;
+ if(pt->type==htons(ETH_P_ALL))
+ {
+ dev_nit++;
+ pt->next=ptype_all;
+ ptype_all=pt;
+ }
+ else
+ {
+ hash=ntohs(pt->type)&15;
+ pt->next = ptype_base[hash];
+ ptype_base[hash] = pt;
+ }
+}
+
+
+/*
+ * Remove a protocol ID from the list.
+ */
+
+void dev_remove_pack(struct packet_type *pt)
+{
+ struct packet_type **pt1;
+ if(pt->type==htons(ETH_P_ALL))
+ {
+ dev_nit--;
+ pt1=&ptype_all;
+ }
+ else
+ pt1=&ptype_base[ntohs(pt->type)&15];
+ for(; (*pt1)!=NULL; pt1=&((*pt1)->next))
+ {
+ if(pt==(*pt1))
+ {
+ *pt1=pt->next;
+ return;
+ }
+ }
+ printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
+}
+
+/*****************************************************************************************
+
+ Device Interface Subroutines
+
+******************************************************************************************/
+
+/*
+ * Find an interface by name.
+ */
+
+struct device *dev_get(const char *name)
+{
+ struct device *dev;
+
+ for (dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ if (strcmp(dev->name, name) == 0)
+ return(dev);
+ }
+ return NULL;
+}
+
+/*
+ * Find and possibly load an interface.
+ */
+
+#ifdef CONFIG_KERNELD
+
+extern __inline__ void dev_load(const char *name)
+{
+ if(!dev_get(name) && suser()) {
+#ifdef CONFIG_NET_ALIAS
+ const char *sptr;
+
+ for (sptr=name ; *sptr ; sptr++) if(*sptr==':') break;
+ if (!(*sptr && *(sptr+1)))
+#endif
+ request_module(name);
+ }
+}
+
+#endif
+
+/*
+ * Prepare an interface for use.
+ */
+
+int dev_open(struct device *dev)
+{
+ int ret = -ENODEV;
+
+ /*
+ * Call device private open method
+ */
+ if (dev->open)
+ ret = dev->open(dev);
+
+ /*
+ * If it went open OK then set the flags
+ */
+
+ if (ret == 0)
+ {
+ dev->flags |= (IFF_UP | IFF_RUNNING);
+ /*
+ * Initialise multicasting status
+ */
+ dev_mc_upload(dev);
+ notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
+ }
+ return(ret);
+}
+
+
+/*
+ * Completely shutdown an interface.
+ */
+
+int dev_close(struct device *dev)
+{
+ int ct=0;
+
+ /*
+ * Call the device specific close. This cannot fail.
+ * Only if device is UP
+ */
+
+ if ((dev->flags & IFF_UP) && dev->stop)
+ dev->stop(dev);
+
+ /*
+ * Device is now down.
+ */
+
+ dev->flags&=~(IFF_UP|IFF_RUNNING);
+
+ /*
+ * Tell people we are going down
+ */
+ notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
+ /*
+ * Flush the multicast chain
+ */
+ dev_mc_discard(dev);
+
+ /*
+ * Purge any queued packets when we down the link
+ */
+ while(ct<DEV_NUMBUFFS)
+ {
+ struct sk_buff *skb;
+ while((skb=skb_dequeue(&dev->buffs[ct]))!=NULL)
+ if(skb->free)
+ kfree_skb(skb,FREE_WRITE);
+ ct++;
+ }
+ return(0);
+}
+
+
+/*
+ * Device change register/unregister. These are not inline or static
+ * as we export them to the world.
+ */
+
+int register_netdevice_notifier(struct notifier_block *nb)
+{
+ return notifier_chain_register(&netdev_chain, nb);
+}
+
+int unregister_netdevice_notifier(struct notifier_block *nb)
+{
+ return notifier_chain_unregister(&netdev_chain,nb);
+}
+
+/*
+ * Send (or queue for sending) a packet.
+ *
+ * IMPORTANT: When this is called to resend frames. The caller MUST
+ * already have locked the sk_buff. Apart from that we do the
+ * rest of the magic.
+ */
+
+static void do_dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
+{
+ unsigned long flags;
+ struct sk_buff_head *list;
+ int retransmission = 0; /* used to say if the packet should go */
+ /* at the front or the back of the */
+ /* queue - front is a retransmit try */
+
+ if(pri>=0 && !skb_device_locked(skb))
+ skb_device_lock(skb); /* Shove a lock on the frame */
+#if CONFIG_SKB_CHECK
+ IS_SKB(skb);
+#endif
+ skb->dev = dev;
+
+ /*
+ * Negative priority is used to flag a frame that is being pulled from the
+ * queue front as a retransmit attempt. It therefore goes back on the queue
+ * start on a failure.
+ */
+
+ if (pri < 0)
+ {
+ pri = -pri-1;
+ retransmission = 1;
+ }
+
+#ifdef CONFIG_NET_DEBUG
+ if (pri >= DEV_NUMBUFFS)
+ {
+ printk(KERN_WARNING "bad priority in dev_queue_xmit.\n");
+ pri = 1;
+ }
+#endif
+
+ /*
+ * If the address has not been resolved. Call the device header rebuilder.
+ * This can cover all protocols and technically not just ARP either.
+ */
+
+ if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) {
+ return;
+ }
+
+ /*
+ *
+ * If dev is an alias, switch to its main device.
+ * "arp" resolution has been made with alias device, so
+ * arp entries refer to alias, not main.
+ *
+ */
+
+#ifdef CONFIG_NET_ALIAS
+ if (net_alias_is(dev))
+ skb->dev = dev = net_alias_dev_tx(dev);
+#endif
+
+ /*
+ * If we are bridging and this is directly generated output
+ * pass the frame via the bridge.
+ */
+
+#ifdef CONFIG_BRIDGE
+ if(skb->pkt_bridged!=IS_BRIDGED && br_stats.flags & BR_UP)
+ {
+ if(br_tx_frame(skb))
+ return;
+ }
+#endif
+
+ list = dev->buffs + pri;
+
+ save_flags(flags);
+ /* if this isn't a retransmission, use the first packet instead... */
+ if (!retransmission) {
+ if (skb_queue_len(list)) {
+ /* avoid overrunning the device queue.. */
+ if (skb_queue_len(list) > dev->tx_queue_len) {
+ dev_kfree_skb(skb, FREE_WRITE);
+ return;
+ }
+ }
+
+ /* copy outgoing packets to any sniffer packet handlers */
+ if (dev_nit) {
+ struct packet_type *ptype;
+ skb->stamp=xtime;
+ for (ptype = ptype_all; ptype!=NULL; ptype = ptype->next)
+ {
+ /* Never send packets back to the socket
+ * they originated from - MvS (miquels@drinkel.ow.org)
+ */
+ if ((ptype->dev == dev || !ptype->dev) &&
+ ((struct sock *)ptype->data != skb->sk))
+ {
+ struct sk_buff *skb2;
+ if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
+ break;
+ /* FIXME?: Wrong when the hard_header_len
+ * is an upper bound. Is this even
+ * used anywhere?
+ */
+ skb2->h.raw = skb2->data + dev->hard_header_len;
+ /* On soft header devices we
+ * yank the header before mac.raw
+ * back off. This is set by
+ * dev->hard_header().
+ */
+ if (dev->flags&IFF_SOFTHEADERS)
+ skb_pull(skb2,skb2->mac.raw-skb2->data);
+ skb2->mac.raw = skb2->data;
+ ptype->func(skb2, skb->dev, ptype);
+ }
+ }
+ }
+
+ if (skb_queue_len(list)) {
+ cli();
+ skb_device_unlock(skb); /* Buffer is on the device queue and can be freed safely */
+ __skb_queue_tail(list, skb);
+ skb = __skb_dequeue(list);
+ skb_device_lock(skb); /* New buffer needs locking down */
+ restore_flags(flags);
+ }
+ }
+ if (dev->hard_start_xmit(skb, dev) == 0) {
+ /*
+ * Packet is now solely the responsibility of the driver
+ */
+ return;
+ }
+
+ /*
+ * Transmission failed, put skb back into a list. Once on the list it's safe and
+ * no longer device locked (it can be freed safely from the device queue)
+ */
+ cli();
+ skb_device_unlock(skb);
+ __skb_queue_head(list,skb);
+ restore_flags(flags);
+}
+
+void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
+{
+ start_bh_atomic();
+ do_dev_queue_xmit(skb, dev, pri);
+ end_bh_atomic();
+}
+
+/*
+ * Receive a packet from a device driver and queue it for the upper
+ * (protocol) levels. It always succeeds. This is the recommended
+ * interface to use.
+ */
+
+void netif_rx(struct sk_buff *skb)
+{
+ static int dropping = 0;
+
+ /*
+ * Any received buffers are un-owned and should be discarded
+ * when freed. These will be updated later as the frames get
+ * owners.
+ */
+
+ skb->sk = NULL;
+ skb->free = 1;
+ if(skb->stamp.tv_sec==0)
+ skb->stamp = xtime;
+
+ /*
+ * Check that we aren't overdoing things.
+ */
+
+ if (!backlog_size)
+ dropping = 0;
+ else if (backlog_size > 300)
+ dropping = 1;
+
+ if (dropping)
+ {
+ kfree_skb(skb, FREE_READ);
+ return;
+ }
+
+ /*
+ * Add it to the "backlog" queue.
+ */
+#if CONFIG_SKB_CHECK
+ IS_SKB(skb);
+#endif
+ skb_queue_tail(&backlog,skb);
+ backlog_size++;
+
+ /*
+ * If any packet arrived, mark it for processing after the
+ * hardware interrupt returns.
+ */
+
+ mark_bh(NET_BH);
+ return;
+}
+
+/*
+ * This routine causes all interfaces to try to send some data.
+ */
+
+static void dev_transmit(void)
+{
+ struct device *dev;
+
+ for (dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ if (dev->flags != 0 && !dev->tbusy) {
+ /*
+ * Kick the device
+ */
+ dev_tint(dev);
+ }
+ }
+}
+
+
+/**********************************************************************************
+
+ Receive Queue Processor
+
+***********************************************************************************/
+
+/*
+ * When we are called the queue is ready to grab, the interrupts are
+ * on and hardware can interrupt and queue to the receive queue as we
+ * run with no problems.
+ * This is run as a bottom half after an interrupt handler that does
+ * mark_bh(NET_BH);
+ */
+
+void net_bh(void)
+{
+ struct packet_type *ptype;
+ struct packet_type *pt_prev;
+ unsigned short type;
+
+ /*
+ * Can we send anything now? We want to clear the
+ * decks for any more sends that get done as we
+ * process the input. This also minimises the
+ * latency on a transmit interrupt bh.
+ */
+
+ dev_transmit();
+
+ /*
+ * Any data left to process. This may occur because a
+ * mark_bh() is done after we empty the queue including
+ * that from the device which does a mark_bh() just after
+ */
+
+ /*
+ * While the queue is not empty..
+ *
+ * Note that the queue never shrinks due to
+ * an interrupt, so we can do this test without
+ * disabling interrupts.
+ */
+
+ while (!skb_queue_empty(&backlog)) {
+ struct sk_buff * skb = backlog.next;
+
+ /*
+ * We have a packet. Therefore the queue has shrunk
+ */
+ cli();
+ __skb_unlink(skb, &backlog);
+ backlog_size--;
+ sti();
+
+
+#ifdef CONFIG_BRIDGE
+
+ /*
+ * If we are bridging then pass the frame up to the
+ * bridging code. If it is bridged then move on
+ */
+
+ if (br_stats.flags & BR_UP)
+ {
+ /*
+ * We pass the bridge a complete frame. This means
+ * recovering the MAC header first.
+ */
+
+ int offset=skb->data-skb->mac.raw;
+ cli();
+ skb_push(skb,offset); /* Put header back on for bridge */
+ if(br_receive_frame(skb))
+ {
+ sti();
+ continue;
+ }
+ /*
+ * Pull the MAC header off for the copy going to
+ * the upper layers.
+ */
+ skb_pull(skb,offset);
+ sti();
+ }
+#endif
+
+ /*
+ * Bump the pointer to the next structure.
+ *
+ * On entry to the protocol layer. skb->data and
+ * skb->h.raw point to the MAC and encapsulated data
+ */
+
+ skb->h.raw = skb->data;
+
+ /*
+ * Fetch the packet protocol ID.
+ */
+
+ type = skb->protocol;
+
+ /*
+ * We got a packet ID. Now loop over the "known protocols"
+ * list. There are two lists. The ptype_all list of taps (normally empty)
+ * and the main protocol list which is hashed perfectly for normal protocols.
+ */
+
+ pt_prev = NULL;
+ for (ptype = ptype_all; ptype!=NULL; ptype=ptype->next)
+ {
+ if(!ptype->dev || ptype->dev == skb->dev) {
+ if(pt_prev) {
+ struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC);
+ if(skb2)
+ pt_prev->func(skb2,skb->dev, pt_prev);
+ }
+ pt_prev=ptype;
+ }
+ }
+
+ for (ptype = ptype_base[ntohs(type)&15]; ptype != NULL; ptype = ptype->next)
+ {
+ if (ptype->type == type && (!ptype->dev || ptype->dev==skb->dev))
+ {
+ /*
+ * We already have a match queued. Deliver
+ * to it and then remember the new match
+ */
+ if(pt_prev)
+ {
+ struct sk_buff *skb2;
+
+ skb2=skb_clone(skb, GFP_ATOMIC);
+
+ /*
+ * Kick the protocol handler. This should be fast
+ * and efficient code.
+ */
+
+ if(skb2)
+ pt_prev->func(skb2, skb->dev, pt_prev);
+ }
+ /* Remember the current last to do */
+ pt_prev=ptype;
+ }
+ } /* End of protocol list loop */
+
+ /*
+ * Is there a last item to send to ?
+ */
+
+ if(pt_prev)
+ pt_prev->func(skb, skb->dev, pt_prev);
+ /*
+ * Has an unknown packet has been received ?
+ */
+
+ else
+ kfree_skb(skb, FREE_WRITE);
+ /*
+ * Again, see if we can transmit anything now.
+ * [Ought to take this out judging by tests it slows
+ * us down not speeds us up]
+ */
+#ifdef XMIT_EVERY
+ dev_transmit();
+#endif
+ } /* End of queue loop */
+
+ /*
+ * We have emptied the queue
+ */
+
+ /*
+ * One last output flush.
+ */
+
+#ifdef XMIT_AFTER
+ dev_transmit();
+#endif
+}
+
+
+/*
+ * This routine is called when an device driver (i.e. an
+ * interface) is ready to transmit a packet.
+ */
+
+void dev_tint(struct device *dev)
+{
+ int i;
+ unsigned long flags;
+ struct sk_buff_head * head;
+
+ /*
+ * aliases do not transmit (for now :) )
+ */
+
+#ifdef CONFIG_NET_ALIAS
+ if (net_alias_is(dev)) return;
+#endif
+ head = dev->buffs;
+ save_flags(flags);
+ cli();
+
+ /*
+ * Work the queues in priority order
+ */
+ for(i = 0;i < DEV_NUMBUFFS; i++,head++)
+ {
+
+ while (!skb_queue_empty(head)) {
+ struct sk_buff *skb;
+
+ skb = head->next;
+ __skb_unlink(skb, head);
+ /*
+ * Stop anyone freeing the buffer while we retransmit it
+ */
+ skb_device_lock(skb);
+ restore_flags(flags);
+ /*
+ * Feed them to the output stage and if it fails
+ * indicate they re-queue at the front.
+ */
+ do_dev_queue_xmit(skb,dev,-i - 1);
+ /*
+ * If we can take no more then stop here.
+ */
+ if (dev->tbusy)
+ return;
+ cli();
+ }
+ }
+ restore_flags(flags);
+}
+
+
+/*
+ * Perform a SIOCGIFCONF call. This structure will change
+ * size shortly, and there is nothing I can do about it.
+ * Thus we will need a 'compatibility mode'.
+ */
+
+static int dev_ifconf(char *arg)
+{
+ struct ifconf ifc;
+ struct ifreq ifr;
+ struct device *dev;
+ char *pos;
+ int len;
+ int err;
+
+ /*
+ * Fetch the caller's info block.
+ */
+
+ err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifconf));
+ if(err)
+ return err;
+ memcpy_fromfs(&ifc, arg, sizeof(struct ifconf));
+ len = ifc.ifc_len;
+ pos = ifc.ifc_buf;
+
+ /*
+ * We now walk the device list filling each active device
+ * into the array.
+ */
+
+ err=verify_area(VERIFY_WRITE,pos,len);
+ if(err)
+ return err;
+
+ /*
+ * Loop over the interfaces, and write an info block for each.
+ */
+
+ for (dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ if(!(dev->flags & IFF_UP)) /* Downed devices don't count */
+ continue;
+ /*
+ * Have we run out of space here ?
+ */
+
+ if (len < sizeof(struct ifreq))
+ break;
+
+ memset(&ifr, 0, sizeof(struct ifreq));
+ strcpy(ifr.ifr_name, dev->name);
+ (*(struct sockaddr_in *) &ifr.ifr_addr).sin_family = dev->family;
+ (*(struct sockaddr_in *) &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
+
+
+ /*
+ * Write this block to the caller's space.
+ */
+
+ memcpy_tofs(pos, &ifr, sizeof(struct ifreq));
+ pos += sizeof(struct ifreq);
+ len -= sizeof(struct ifreq);
+ }
+
+ /*
+ * All done. Write the updated control block back to the caller.
+ */
+
+ ifc.ifc_len = (pos - ifc.ifc_buf);
+ ifc.ifc_req = (struct ifreq *) ifc.ifc_buf;
+ memcpy_tofs(arg, &ifc, sizeof(struct ifconf));
+
+ /*
+ * Report how much was filled in
+ */
+
+ return(pos - arg);
+}
+
+
+/*
+ * This is invoked by the /proc filesystem handler to display a device
+ * in detail.
+ */
+
+#ifdef CONFIG_PROC_FS
+static int sprintf_stats(char *buffer, struct device *dev)
+{
+ struct enet_statistics *stats = (dev->get_stats ? dev->get_stats(dev): NULL);
+ int size;
+
+ if (stats)
+ size = sprintf(buffer, "%6s:%7d %4d %4d %4d %4d %8d %4d %4d %4d %5d %4d\n",
+ dev->name,
+ stats->rx_packets, stats->rx_errors,
+ stats->rx_dropped + stats->rx_missed_errors,
+ stats->rx_fifo_errors,
+ stats->rx_length_errors + stats->rx_over_errors
+ + stats->rx_crc_errors + stats->rx_frame_errors,
+ stats->tx_packets, stats->tx_errors, stats->tx_dropped,
+ stats->tx_fifo_errors, stats->collisions,
+ stats->tx_carrier_errors + stats->tx_aborted_errors
+ + stats->tx_window_errors + stats->tx_heartbeat_errors);
+ else
+ size = sprintf(buffer, "%6s: No statistics available.\n", dev->name);
+
+ return size;
+}
+
+/*
+ * Called from the PROCfs module. This now uses the new arbitrary sized /proc/net interface
+ * to create /proc/net/dev
+ */
+
+int dev_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
+{
+ int len=0;
+ off_t begin=0;
+ off_t pos=0;
+ int size;
+
+ struct device *dev;
+
+
+ size = sprintf(buffer, "Inter-| Receive | Transmit\n"
+ " face |packets errs drop fifo frame|packets errs drop fifo colls carrier\n");
+
+ pos+=size;
+ len+=size;
+
+
+ for (dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ size = sprintf_stats(buffer+len, dev);
+ len+=size;
+ pos=begin+len;
+
+ if(pos<offset)
+ {
+ len=0;
+ begin=pos;
+ }
+ if(pos>offset+length)
+ break;
+ }
+
+ *start=buffer+(offset-begin); /* Start of wanted data */
+ len-=(offset-begin); /* Start slop */
+ if(len>length)
+ len=length; /* Ending slop */
+ return len;
+}
+#endif /* CONFIG_PROC_FS */
+
+
+#ifdef CONFIG_NET_RADIO
+#ifdef CONFIG_PROC_FS
+
+/*
+ * Print one entry of /proc/net/wireless
+ * This is a clone of /proc/net/dev (just above)
+ */
+static int
+sprintf_wireless_stats(char * buffer,
+ struct device * dev)
+{
+ /* Get stats from the driver */
+ struct iw_statistics *stats = (dev->get_wireless_stats ?
+ dev->get_wireless_stats(dev) :
+ (struct iw_statistics *) NULL);
+ int size;
+
+ if(stats != (struct iw_statistics *) NULL)
+ size = sprintf(buffer,
+ "%6s: %02x %3d%c %3d%c %3d%c %5d %5d %5d\n",
+ dev->name,
+ stats->status,
+ stats->qual.qual,
+ stats->qual.updated & 1 ? '.' : ' ',
+ stats->qual.level,
+ stats->qual.updated & 2 ? '.' : ' ',
+ stats->qual.noise,
+ stats->qual.updated & 3 ? '.' : ' ',
+ stats->discard.nwid,
+ stats->discard.code,
+ stats->discard.misc);
+ else
+ size = 0;
+
+ return size;
+}
+
+/*
+ * Print info for /proc/net/wireless (print all entries)
+ * This is a clone of /proc/net/dev (just above)
+ */
+int
+dev_get_wireless_info(char * buffer,
+ char ** start,
+ off_t offset,
+ int length,
+ int dummy)
+{
+ int len = 0;
+ off_t begin = 0;
+ off_t pos = 0;
+ int size;
+
+ struct device * dev;
+
+ size = sprintf(buffer,
+ "Inter-|sta| Quality | Discarded packets\n"
+ " face |tus|link level noise| nwid crypt misc\n");
+
+ pos+=size;
+ len+=size;
+
+
+ for(dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ size = sprintf_wireless_stats(buffer+len, dev);
+ len+=size;
+ pos=begin+len;
+
+ if(pos < offset)
+ {
+ len=0;
+ begin=pos;
+ }
+ if(pos > offset + length)
+ break;
+ }
+
+ *start = buffer + (offset - begin); /* Start of wanted data */
+ len -= (offset - begin); /* Start slop */
+ if(len > length)
+ len = length; /* Ending slop */
+
+ return len;
+}
+#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_NET_RADIO */
+
+
+/*
+ * This checks bitmasks for the ioctl calls for devices.
+ */
+
+static inline int bad_mask(unsigned long mask, unsigned long addr)
+{
+ if (addr & (mask = ~mask))
+ return 1;
+ mask = ntohl(mask);
+ if (mask & (mask+1))
+ return 1;
+ return 0;
+}
+
+/*
+ * Perform the SIOCxIFxxx calls.
+ *
+ * The socket layer has seen an ioctl the address family thinks is
+ * for the device. At this point we get invoked to make a decision
+ */
+
+static int dev_ifsioc(void *arg, unsigned int getset)
+{
+ struct ifreq ifr;
+ struct device *dev;
+ int ret;
+
+ /*
+ * Fetch the caller's info block into kernel space
+ */
+
+ int err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifreq));
+ if(err)
+ return err;
+
+ memcpy_fromfs(&ifr, arg, sizeof(struct ifreq));
+
+ /*
+ * See which interface the caller is talking about.
+ */
+
+ /*
+ *
+ * net_alias_dev_get(): dev_get() with added alias naming magic.
+ * only allow alias creation/deletion if (getset==SIOCSIFADDR)
+ *
+ */
+
+#ifdef CONFIG_KERNELD
+ dev_load(ifr.ifr_name);
+#endif
+
+#ifdef CONFIG_NET_ALIAS
+ if ((dev = net_alias_dev_get(ifr.ifr_name, getset == SIOCSIFADDR, &err, NULL, NULL)) == NULL)
+ return(err);
+#else
+ if ((dev = dev_get(ifr.ifr_name)) == NULL)
+ return(-ENODEV);
+#endif
+ switch(getset)
+ {
+ case SIOCGIFFLAGS: /* Get interface flags */
+ ifr.ifr_flags = (dev->flags & ~IFF_SOFTHEADERS);
+ goto rarok;
+
+ case SIOCSIFFLAGS: /* Set interface flags */
+ {
+ int old_flags = dev->flags;
+
+ if(securelevel>0)
+ ifr.ifr_flags&=~IFF_PROMISC;
+ /*
+ * We are not allowed to potentially close/unload
+ * a device until we get this lock.
+ */
+
+ dev_lock_wait();
+
+ /*
+ * Set the flags on our device.
+ */
+
+ dev->flags = (ifr.ifr_flags & (
+ IFF_BROADCAST | IFF_DEBUG | IFF_LOOPBACK |
+ IFF_POINTOPOINT | IFF_NOTRAILERS | IFF_RUNNING |
+ IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI | IFF_SLAVE | IFF_MASTER
+ | IFF_MULTICAST)) | (dev->flags & (IFF_SOFTHEADERS|IFF_UP));
+ /*
+ * Load in the correct multicast list now the flags have changed.
+ */
+
+ dev_mc_upload(dev);
+
+ /*
+ * Have we downed the interface. We handle IFF_UP ourselves
+ * according to user attempts to set it, rather than blindly
+ * setting it.
+ */
+
+ if ((old_flags^ifr.ifr_flags)&IFF_UP) /* Bit is different ? */
+ {
+ if(old_flags&IFF_UP) /* Gone down */
+ ret=dev_close(dev);
+ else /* Come up */
+ {
+ ret=dev_open(dev);
+ if(ret<0)
+ dev->flags&=~IFF_UP; /* Open failed */
+ }
+ }
+ else
+ ret=0;
+ /*
+ * Load in the correct multicast list now the flags have changed.
+ */
+
+ dev_mc_upload(dev);
+ }
+ break;
+
+ case SIOCGIFADDR: /* Get interface address (and family) */
+ if(ifr.ifr_addr.sa_family==AF_UNSPEC)
+ {
+ memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
+ ifr.ifr_hwaddr.sa_family=dev->type;
+ goto rarok;
+ }
+ else
+ {
+ (*(struct sockaddr_in *)
+ &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_addr).sin_family = dev->family;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_addr).sin_port = 0;
+ }
+ goto rarok;
+
+ case SIOCSIFADDR: /* Set interface address (and family) */
+
+ /*
+ * BSDism. SIOCSIFADDR family=AF_UNSPEC sets the
+ * physical address. We can cope with this now.
+ */
+
+ if(ifr.ifr_addr.sa_family==AF_UNSPEC)
+ {
+ if(dev->set_mac_address==NULL)
+ return -EOPNOTSUPP;
+ if(securelevel>0)
+ return -EPERM;
+ ret=dev->set_mac_address(dev,&ifr.ifr_addr);
+ }
+ else
+ {
+ u32 new_pa_addr = (*(struct sockaddr_in *)
+ &ifr.ifr_addr).sin_addr.s_addr;
+ u16 new_family = ifr.ifr_addr.sa_family;
+
+ if (new_family == dev->family &&
+ new_pa_addr == dev->pa_addr) {
+ ret =0;
+ break;
+ }
+ if (dev->flags & IFF_UP)
+ notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
+
+ /*
+ * if dev is an alias, must rehash to update
+ * address change
+ */
+
+#ifdef CONFIG_NET_ALIAS
+ if (net_alias_is(dev))
+ net_alias_dev_rehash(dev ,&ifr.ifr_addr);
+#endif
+ dev->pa_addr = new_pa_addr;
+ dev->family = new_family;
+
+#ifdef CONFIG_INET
+ /* This is naughty. When net-032e comes out It wants moving into the net032
+ code not the kernel. Till then it can sit here (SIGH) */
+ if (!dev->pa_mask)
+ dev->pa_mask = ip_get_mask(dev->pa_addr);
+#endif
+ if (!dev->pa_brdaddr)
+ dev->pa_brdaddr = dev->pa_addr | ~dev->pa_mask;
+ if (dev->flags & IFF_UP)
+ notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
+ ret = 0;
+ }
+ break;
+
+ case SIOCGIFBRDADDR: /* Get the broadcast address */
+ (*(struct sockaddr_in *)
+ &ifr.ifr_broadaddr).sin_addr.s_addr = dev->pa_brdaddr;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_broadaddr).sin_family = dev->family;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_broadaddr).sin_port = 0;
+ goto rarok;
+
+ case SIOCSIFBRDADDR: /* Set the broadcast address */
+ dev->pa_brdaddr = (*(struct sockaddr_in *)
+ &ifr.ifr_broadaddr).sin_addr.s_addr;
+ ret = 0;
+ break;
+
+ case SIOCGIFDSTADDR: /* Get the destination address (for point-to-point links) */
+ (*(struct sockaddr_in *)
+ &ifr.ifr_dstaddr).sin_addr.s_addr = dev->pa_dstaddr;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_dstaddr).sin_family = dev->family;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_dstaddr).sin_port = 0;
+ goto rarok;
+
+ case SIOCSIFDSTADDR: /* Set the destination address (for point-to-point links) */
+ dev->pa_dstaddr = (*(struct sockaddr_in *)
+ &ifr.ifr_dstaddr).sin_addr.s_addr;
+ ret = 0;
+ break;
+
+ case SIOCGIFNETMASK: /* Get the netmask for the interface */
+ (*(struct sockaddr_in *)
+ &ifr.ifr_netmask).sin_addr.s_addr = dev->pa_mask;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_netmask).sin_family = dev->family;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_netmask).sin_port = 0;
+ goto rarok;
+
+ case SIOCSIFNETMASK: /* Set the netmask for the interface */
+ {
+ unsigned long mask = (*(struct sockaddr_in *)
+ &ifr.ifr_netmask).sin_addr.s_addr;
+ ret = -EINVAL;
+ /*
+ * The mask we set must be legal.
+ */
+ if (bad_mask(mask,0))
+ break;
+ dev->pa_mask = mask;
+ ret = 0;
+ }
+ break;
+
+ case SIOCGIFMETRIC: /* Get the metric on the interface (currently unused) */
+
+ ifr.ifr_metric = dev->metric;
+ goto rarok;
+
+ case SIOCSIFMETRIC: /* Set the metric on the interface (currently unused) */
+ dev->metric = ifr.ifr_metric;
+ ret=0;
+ break;
+
+ case SIOCGIFMTU: /* Get the MTU of a device */
+ ifr.ifr_mtu = dev->mtu;
+ goto rarok;
+
+ case SIOCSIFMTU: /* Set the MTU of a device */
+
+ if (dev->change_mtu)
+ ret = dev->change_mtu(dev, ifr.ifr_mtu);
+ else
+ {
+ /*
+ * MTU must be positive.
+ */
+
+ if(ifr.ifr_mtu<68)
+ return -EINVAL;
+
+ dev->mtu = ifr.ifr_mtu;
+ ret = 0;
+ }
+ break;
+
+ case SIOCGIFMEM: /* Get the per device memory space. We can add this but currently
+ do not support it */
+ ret = -EINVAL;
+ break;
+
+ case SIOCSIFMEM: /* Set the per device memory buffer space. Not applicable in our case */
+ ret = -EINVAL;
+ break;
+
+ case SIOCGIFHWADDR:
+ memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
+ ifr.ifr_hwaddr.sa_family=dev->type;
+ goto rarok;
+
+ case SIOCSIFHWADDR:
+ if(dev->set_mac_address==NULL)
+ return -EOPNOTSUPP;
+ if(securelevel > 0)
+ return -EPERM;
+ if(ifr.ifr_hwaddr.sa_family!=dev->type)
+ return -EINVAL;
+ ret=dev->set_mac_address(dev,&ifr.ifr_hwaddr);
+ break;
+
+ case SIOCGIFMAP:
+ ifr.ifr_map.mem_start=dev->mem_start;
+ ifr.ifr_map.mem_end=dev->mem_end;
+ ifr.ifr_map.base_addr=dev->base_addr;
+ ifr.ifr_map.irq=dev->irq;
+ ifr.ifr_map.dma=dev->dma;
+ ifr.ifr_map.port=dev->if_port;
+ goto rarok;
+
+ case SIOCSIFMAP:
+ if(dev->set_config==NULL)
+ return -EOPNOTSUPP;
+ return dev->set_config(dev,&ifr.ifr_map);
+
+ case SIOCADDMULTI:
+ if(dev->set_multicast_list==NULL)
+ return -EINVAL;
+ if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
+ return -EINVAL;
+ dev_mc_add(dev,ifr.ifr_hwaddr.sa_data, dev->addr_len, 1);
+ return 0;
+
+ case SIOCDELMULTI:
+ if(dev->set_multicast_list==NULL)
+ return -EINVAL;
+ if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
+ return -EINVAL;
+ dev_mc_delete(dev,ifr.ifr_hwaddr.sa_data,dev->addr_len, 1);
+ return 0;
+ /*
+ * Unknown or private ioctl
+ */
+
+ default:
+ if((getset >= SIOCDEVPRIVATE) &&
+ (getset <= (SIOCDEVPRIVATE + 15))) {
+ if(dev->do_ioctl==NULL)
+ return -EOPNOTSUPP;
+ ret=dev->do_ioctl(dev, &ifr, getset);
+ memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
+ break;
+ }
+
+#ifdef CONFIG_NET_RADIO
+ if((getset >= SIOCIWFIRST) &&
+ (getset <= SIOCIWLAST))
+ {
+ if(dev->do_ioctl==NULL)
+ return -EOPNOTSUPP;
+ /* Perform the ioctl */
+ ret=dev->do_ioctl(dev, &ifr, getset);
+ /* If return args... */
+ if(IW_IS_GET(getset))
+ memcpy_tofs(arg, &ifr,
+ sizeof(struct ifreq));
+ break;
+ }
+#endif /* CONFIG_NET_RADIO */
+
+ ret = -EINVAL;
+ }
+ return(ret);
+/*
+ * The load of calls that return an ifreq and ok (saves memory).
+ */
+rarok:
+ memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
+ return 0;
+}
+
+
+/*
+ * This function handles all "interface"-type I/O control requests. The actual
+ * 'doing' part of this is dev_ifsioc above.
+ */
+
+int dev_ioctl(unsigned int cmd, void *arg)
+{
+ switch(cmd)
+ {
+ case SIOCGIFCONF:
+ (void) dev_ifconf((char *) arg);
+ return 0;
+
+ /*
+ * Ioctl calls that can be done by all.
+ */
+
+ case SIOCGIFFLAGS:
+ case SIOCGIFADDR:
+ case SIOCGIFDSTADDR:
+ case SIOCGIFBRDADDR:
+ case SIOCGIFNETMASK:
+ case SIOCGIFMETRIC:
+ case SIOCGIFMTU:
+ case SIOCGIFMEM:
+ case SIOCGIFHWADDR:
+ case SIOCGIFSLAVE:
+ case SIOCGIFMAP:
+ return dev_ifsioc(arg, cmd);
+
+ /*
+ * Ioctl calls requiring the power of a superuser
+ */
+
+ case SIOCSIFFLAGS:
+ case SIOCSIFADDR:
+ case SIOCSIFDSTADDR:
+ case SIOCSIFBRDADDR:
+ case SIOCSIFNETMASK:
+ case SIOCSIFMETRIC:
+ case SIOCSIFMTU:
+ case SIOCSIFMEM:
+ case SIOCSIFHWADDR:
+ case SIOCSIFMAP:
+ case SIOCSIFSLAVE:
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ if (!suser())
+ return -EPERM;
+ return dev_ifsioc(arg, cmd);
+
+ case SIOCSIFLINK:
+ return -EINVAL;
+
+ /*
+ * Unknown or private ioctl.
+ */
+
+ default:
+ if((cmd >= SIOCDEVPRIVATE) &&
+ (cmd <= (SIOCDEVPRIVATE + 15))) {
+ return dev_ifsioc(arg, cmd);
+ }
+#ifdef CONFIG_NET_RADIO
+ if((cmd >= SIOCIWFIRST) &&
+ (cmd <= SIOCIWLAST))
+ {
+ if((IW_IS_SET(cmd)) && (!suser()))
+ return -EPERM;
+ return dev_ifsioc(arg, cmd);
+ }
+#endif /* CONFIG_NET_RADIO */
+ return -EINVAL;
+ }
+}
+
+
+/*
+ * Initialize the DEV module. At boot time this walks the device list and
+ * unhooks any devices that fail to initialise (normally hardware not
+ * present) and leaves us with a valid list of present and active devices.
+ *
+ */
+extern int lance_init(void);
+extern int pi_init(void);
+extern int pt_init(void);
+extern int bpq_init(void);
+extern void sdla_setup(void);
+extern int dlci_setup(void);
+extern int sm_init(void);
+extern int baycom_init(void);
+
+int net_dev_init(void)
+{
+ struct device *dev, **dp;
+
+ /*
+ * Initialise the packet receive queue.
+ */
+
+ skb_queue_head_init(&backlog);
+
+ /*
+ * The bridge has to be up before the devices
+ */
+
+#ifdef CONFIG_BRIDGE
+ br_init();
+#endif
+
+ /*
+ * This is Very Ugly(tm).
+ *
+ * Some devices want to be initialized early..
+ */
+#if defined(CONFIG_PI)
+ pi_init();
+#endif
+#if defined(CONFIG_PT)
+ pt_init();
+#endif
+#if defined(CONFIG_BPQETHER)
+ bpq_init();
+#endif
+#if defined(CONFIG_DLCI)
+ dlci_setup();
+#endif
+#if defined(CONFIG_SDLA)
+ sdla_setup();
+#endif
+#if defined(CONFIG_BAYCOM)
+ baycom_init();
+#endif
+#if defined(CONFIG_SOUNDMODEM)
+ sm_init();
+#endif
+ /*
+ * SLHC if present needs attaching so other people see it
+ * even if not opened.
+ */
+#if (defined(CONFIG_SLIP) && defined(CONFIG_SLIP_COMPRESSED)) \
+ || defined(CONFIG_PPP) \
+ || (defined(CONFIG_ISDN) && defined(CONFIG_ISDN_PPP))
+ slhc_install();
+#endif
+
+ /*
+ * Add the devices.
+ * If the call to dev->init fails, the dev is removed
+ * from the chain disconnecting the device until the
+ * next reboot.
+ */
+
+ dp = &dev_base;
+ while ((dev = *dp) != NULL)
+ {
+ int i;
+ for (i = 0; i < DEV_NUMBUFFS; i++) {
+ skb_queue_head_init(dev->buffs + i);
+ }
+
+ if (dev->init && dev->init(dev))
+ {
+ /*
+ * It failed to come up. Unhook it.
+ */
+ *dp = dev->next;
+ }
+ else
+ {
+ dp = &dev->next;
+ }
+ }
+
+#ifdef CONFIG_PROC_FS
+ proc_net_register(&(struct proc_dir_entry) {
+ PROC_NET_DEV, 3, "dev",
+ S_IFREG | S_IRUGO, 1, 0, 0,
+ 0, &proc_net_inode_operations,
+ dev_get_info
+ });
+#endif
+
+#ifdef CONFIG_NET_RADIO
+#ifdef CONFIG_PROC_FS
+ proc_net_register(&(struct proc_dir_entry) {
+ PROC_NET_WIRELESS, 8, "wireless",
+ S_IFREG | S_IRUGO, 1, 0, 0,
+ 0, &proc_net_inode_operations,
+ dev_get_wireless_info
+ });
+#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_NET_RADIO */
+
+ /*
+ * Initialise net_alias engine
+ *
+ * - register net_alias device notifier
+ * - register proc entries: /proc/net/alias_types
+ * /proc/net/aliases
+ */
+
+#ifdef CONFIG_NET_ALIAS
+ net_alias_init();
+#endif
+
+ init_bh(NET_BH, net_bh);
+ return 0;
+}